From e1047302bdbfcac0f2331ebd5f6126a8b3c3b9b3 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 23 Nov 2020 00:15:40 -0800 Subject: [PATCH 001/474] [dev.regabi] cmd/compile/internal/types: add pos/sym/typ params to NewField These are almost always set, so might as well expect callers to provide them. They're also all required by go/types's corresponding New{Field,Func,Param,Var} functions, so this eases API compatibility. Passes toolstash-check. Change-Id: Ib3fa355d4961243cd285b41915e87652ae2c22f6 Reviewed-on: https://go-review.googlesource.com/c/go/+/272386 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky Reviewed-by: Cuong Manh Le Reviewed-by: Robert Griesemer --- src/cmd/compile/internal/gc/align.go | 7 ++--- src/cmd/compile/internal/gc/closure.go | 5 ++-- src/cmd/compile/internal/gc/dcl.go | 39 ++++--------------------- src/cmd/compile/internal/gc/iimport.go | 28 ++++-------------- src/cmd/compile/internal/gc/reflect.go | 12 +++----- src/cmd/compile/internal/gc/universe.go | 17 ++++++----- src/cmd/compile/internal/types/type.go | 11 +++++-- 7 files changed, 37 insertions(+), 82 deletions(-) diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go index a3a0c8fce822e..1f7631d19900e 100644 --- a/src/cmd/compile/internal/gc/align.go +++ b/src/cmd/compile/internal/gc/align.go @@ -74,11 +74,8 @@ func expandiface(t *types.Type) { // (including broken ones, if any) and add to t's // method set. for _, t1 := range m.Type.Fields().Slice() { - f := types.NewField() - f.Pos = m.Pos // preserve embedding position - f.Sym = t1.Sym - f.Type = t1.Type - f.SetBroke(t1.Broke()) + // Use m.Pos rather than t1.Pos to preserve embedding position. + f := types.NewField(m.Pos, t1.Sym, t1.Type) addMethod(f, false) } } diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index bd350f696e8ce..42a9b4f3e8876 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -7,6 +7,7 @@ package gc import ( "cmd/compile/internal/syntax" "cmd/compile/internal/types" + "cmd/internal/src" "fmt" ) @@ -266,10 +267,8 @@ func transformclosure(xfunc *Node) { v.SetClass(PPARAM) decls = append(decls, v) - fld := types.NewField() + fld := types.NewField(src.NoXPos, v.Sym, v.Type) fld.Nname = asTypesNode(v) - fld.Type = v.Type - fld.Sym = v.Sym params = append(params, fld) } diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 6e90eb4d65bcd..96c3a6faba9db 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -543,35 +543,19 @@ func structfield(n *Node) *types.Field { Fatalf("structfield: oops %v\n", n) } - f := types.NewField() - f.Pos = n.Pos - f.Sym = n.Sym - if n.Left != nil { n.Left = typecheck(n.Left, ctxType) n.Type = n.Left.Type n.Left = nil } - f.Type = n.Type - if f.Type == nil { - f.SetBroke(true) - } - + f := types.NewField(n.Pos, n.Sym, n.Type) if n.Embedded() { checkembeddedtype(n.Type) f.Embedded = 1 - } else { - f.Embedded = 0 } - - switch u := n.Val().U.(type) { - case string: - f.Note = u - default: - yyerror("field tag must be a string") - case nil: - // no-op + if n.HasVal() { + f.Note = n.Val().U.(string) } lineno = lno @@ -671,13 +655,7 @@ func interfacefield(n *Node) *types.Field { n.Left = nil } - f := types.NewField() - f.Pos = n.Pos - f.Sym = n.Sym - f.Type = n.Type - if f.Type == nil { - f.SetBroke(true) - } + f := types.NewField(n.Pos, n.Sym, n.Type) lineno = lno return f @@ -705,9 +683,7 @@ func fakeRecv() *Node { } func fakeRecvField() *types.Field { - f := types.NewField() - f.Type = types.FakeRecvType() - return f + return types.NewField(src.NoXPos, nil, types.FakeRecvType()) } // isifacemethod reports whether (field) m is @@ -920,10 +896,7 @@ func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) *types.F return f } - f := types.NewField() - f.Pos = lineno - f.Sym = msym - f.Type = t + f := types.NewField(lineno, msym, t) f.SetNointerface(nointerface) mt.Methods().Append(f) diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index c0114d0e53b16..376a167e166da 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -327,11 +327,7 @@ func (r *importReader) doDecl(n *Node) { recv := r.param() mtyp := r.signature(recv) - f := types.NewField() - f.Pos = mpos - f.Sym = msym - f.Type = mtyp - ms[i] = f + ms[i] = types.NewField(mpos, msym, mtyp) m := newfuncnamel(mpos, methodSym(recv.Type, msym)) m.Type = mtyp @@ -547,10 +543,7 @@ func (r *importReader) typ1() *types.Type { emb := r.bool() note := r.string() - f := types.NewField() - f.Pos = pos - f.Sym = sym - f.Type = typ + f := types.NewField(pos, sym, typ) if emb { f.Embedded = 1 } @@ -571,10 +564,7 @@ func (r *importReader) typ1() *types.Type { pos := r.pos() typ := r.typ() - f := types.NewField() - f.Pos = pos - f.Type = typ - embeddeds[i] = f + embeddeds[i] = types.NewField(pos, nil, typ) } methods := make([]*types.Field, r.uint64()) @@ -583,11 +573,7 @@ func (r *importReader) typ1() *types.Type { sym := r.ident() typ := r.signature(fakeRecvField()) - f := types.NewField() - f.Pos = pos - f.Sym = sym - f.Type = typ - methods[i] = f + methods[i] = types.NewField(pos, sym, typ) } t := types.New(TINTER) @@ -624,11 +610,7 @@ func (r *importReader) paramList() []*types.Field { } func (r *importReader) param() *types.Field { - f := types.NewField() - f.Pos = r.pos() - f.Sym = r.ident() - f.Type = r.typ() - return f + return types.NewField(r.pos(), r.ident(), r.typ()) } func (r *importReader) bool() bool { diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 9401eba7a5021..05e476b76b233 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -73,10 +73,8 @@ func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{}) } func makefield(name string, t *types.Type) *types.Field { - f := types.NewField() - f.Type = t - f.Sym = (*types.Pkg)(nil).Lookup(name) - return f + sym := (*types.Pkg)(nil).Lookup(name) + return types.NewField(src.NoXPos, sym, t) } // bmap makes the map bucket type given the type of the map. @@ -301,13 +299,11 @@ func hiter(t *types.Type) *types.Type { // stksize bytes of args. func deferstruct(stksize int64) *types.Type { makefield := func(name string, typ *types.Type) *types.Field { - f := types.NewField() - f.Type = typ // Unlike the global makefield function, this one needs to set Pkg // because these types might be compared (in SSA CSE sorting). // TODO: unify this makefield and the global one above. - f.Sym = &types.Sym{Name: name, Pkg: localpkg} - return f + sym := &types.Sym{Name: name, Pkg: localpkg} + return types.NewField(src.NoXPos, sym, typ) } argtype := types.NewArray(types.Types[TUINT8], stksize) argtype.Width = stksize diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index ff8cabd8e38c2..559d47da1a090 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -6,7 +6,10 @@ package gc -import "cmd/compile/internal/types" +import ( + "cmd/compile/internal/types" + "cmd/internal/src" +) // builtinpkg is a fake package that declares the universe block. var builtinpkg *types.Pkg @@ -355,16 +358,14 @@ func typeinit() { } func makeErrorInterface() *types.Type { - field := types.NewField() - field.Type = types.Types[TSTRING] - f := functypefield(fakeRecvField(), nil, []*types.Field{field}) + sig := functypefield(fakeRecvField(), nil, []*types.Field{ + types.NewField(src.NoXPos, nil, types.Types[TSTRING]), + }) - field = types.NewField() - field.Sym = lookup("Error") - field.Type = f + method := types.NewField(src.NoXPos, lookup("Error"), sig) t := types.New(TINTER) - t.SetInterface([]*types.Field{field}) + t.SetInterface([]*types.Field{method}) return t } diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index 023ab9af88aee..c6d14e9e0981d 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -583,10 +583,17 @@ func NewFuncArgs(f *Type) *Type { return t } -func NewField() *Field { - return &Field{ +func NewField(pos src.XPos, sym *Sym, typ *Type) *Field { + f := &Field{ + Pos: pos, + Sym: sym, + Type: typ, Offset: BADWIDTH, } + if typ == nil { + f.SetBroke(true) + } + return f } // SubstAny walks t, replacing instances of "any" with successive From b30c7a80443c6aed5a7f57ae4c57d691ea88ad9a Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 22 Nov 2020 13:47:55 -0800 Subject: [PATCH 002/474] [dev.regabi] cmd/compile/internal/gc: add MethodName for getting referenced method A common operation throughout the front end is getting the ONAME for a method used in a method selector, method expression, or method value. This CL adds MethodName as a uniform API for doing this for all of these kinds of nodes. For method selectors (ODOTMETH) and method expressions (ONAMEs where isMethodExpression reports true), we take advantage of the Node.Opt field to save the types.Field. This is the approach we already started taking in golang.org/cl/271217 (caching types.Field in Node.Opt for ODOT). For method values (OCALLPART), we continue using the existing callpartMethod helper function. Escape analysis already uses Node.Opt for tracking the method value's closure's data flow. A subsequent, automated refactoring CL will make more use of this method. For now, we just address a few cases in inl.go that aren't easily automated. Passes toolstash-check. Change-Id: Ic92b288b2d8b2fa7e18e3b68634326b8ef0d869b Reviewed-on: https://go-review.googlesource.com/c/go/+/272387 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Robert Griesemer --- src/cmd/compile/fmtmap_test.go | 1 - src/cmd/compile/internal/gc/closure.go | 1 + src/cmd/compile/internal/gc/inl.go | 11 ++--------- src/cmd/compile/internal/gc/syntax.go | 6 +++++- src/cmd/compile/internal/gc/typecheck.go | 21 +++++++++++++++++++++ 5 files changed, 29 insertions(+), 11 deletions(-) diff --git a/src/cmd/compile/fmtmap_test.go b/src/cmd/compile/fmtmap_test.go index 0811df7f7b800..a8698de307931 100644 --- a/src/cmd/compile/fmtmap_test.go +++ b/src/cmd/compile/fmtmap_test.go @@ -50,7 +50,6 @@ var knownFormats = map[string]string{ "*cmd/compile/internal/types.Sym %v": "", "*cmd/compile/internal/types.Type %#L": "", "*cmd/compile/internal/types.Type %#v": "", - "*cmd/compile/internal/types.Type %+v": "", "*cmd/compile/internal/types.Type %-S": "", "*cmd/compile/internal/types.Type %0S": "", "*cmd/compile/internal/types.Type %L": "", diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index 42a9b4f3e8876..dd6640667de8b 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -435,6 +435,7 @@ func typecheckpartialcall(fn *Node, sym *types.Sym) { fn.Right = newname(sym) fn.Op = OCALLPART fn.Type = xfunc.Type + fn.SetOpt(nil) // clear types.Field from ODOTMETH } // makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 419056985fc14..1fab67391bd09 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -358,9 +358,6 @@ func (v *hairyVisitor) visit(n *Node) bool { if t == nil { Fatalf("no function type for [%p] %+v\n", n.Left, n.Left) } - if t.Nname() == nil { - Fatalf("no function definition for [%p] %+v\n", t, t) - } if isRuntimePkg(n.Left.Sym.Pkg) { fn := n.Left.Sym.Name if fn == "heapBits.nextArena" { @@ -372,7 +369,7 @@ func (v *hairyVisitor) visit(n *Node) bool { break } } - if inlfn := asNode(t.FuncType().Nname).Func; inlfn.Inl != nil { + if inlfn := n.Left.MethodName().Func; inlfn.Inl != nil { v.budget -= inlfn.Inl.Cost break } @@ -703,11 +700,7 @@ func inlnode(n *Node, maxCost int32, inlMap map[*Node]bool) *Node { Fatalf("no function type for [%p] %+v\n", n.Left, n.Left) } - if n.Left.Type.Nname() == nil { - Fatalf("no function definition for [%p] %+v\n", n.Left.Type, n.Left.Type) - } - - n = mkinlcall(n, asNode(n.Left.Type.FuncType().Nname), maxCost, inlMap) + n = mkinlcall(n, n.Left.MethodName(), maxCost, inlMap) } lineno = lno diff --git a/src/cmd/compile/internal/gc/syntax.go b/src/cmd/compile/internal/gc/syntax.go index 43358333b82e7..e46a0dadf3fa7 100644 --- a/src/cmd/compile/internal/gc/syntax.go +++ b/src/cmd/compile/internal/gc/syntax.go @@ -266,7 +266,11 @@ func (n *Node) Opt() interface{} { // SetOpt sets the optimizer data for the node, which must not have been used with SetVal. // SetOpt(nil) is ignored for Vals to simplify call sites that are clearing Opts. func (n *Node) SetOpt(x interface{}) { - if x == nil && n.HasVal() { + if x == nil { + if n.HasOpt() { + n.SetHasOpt(false) + n.E = nil + } return } if n.HasVal() { diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index c0b05035f033c..1c371c0e9daaf 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -2416,6 +2416,7 @@ func typecheckMethodExpr(n *Node) (res *Node) { n.Type = methodfunc(m.Type, n.Left.Type) n.Xoffset = 0 n.SetClass(PFUNC) + n.SetOpt(m) // methodSym already marked n.Sym as a function. // Issue 25065. Make sure that we emit the symbol for a local method. @@ -2538,6 +2539,7 @@ func lookdot(n *Node, t *types.Type, dostrcmp int) *types.Field { n.Xoffset = f2.Offset n.Type = f2.Type n.Op = ODOTMETH + n.SetOpt(f2) return f2 } @@ -4017,3 +4019,22 @@ func curpkg() *types.Pkg { return fnpkg(fn) } + +// MethodName returns the ONAME representing the method +// referenced by expression n, which must be a method selector, +// method expression, or method value. +func (n *Node) MethodName() *Node { + return asNode(n.MethodFunc().Type.Nname()) +} + +// MethodFunc is like MethodName, but returns the types.Field instead. +func (n *Node) MethodFunc() *types.Field { + switch { + case n.Op == ODOTMETH || n.isMethodExpression(): + return n.Opt().(*types.Field) + case n.Op == OCALLPART: + return callpartMethod(n) + } + Fatalf("unexpected node: %v (%v)", n, n.Op) + panic("unreachable") +} From d5928847debd0b16f89a5fd018646b2e3e9a8cb9 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 22 Nov 2020 10:45:44 -0800 Subject: [PATCH 003/474] [dev.regabi] cmd/compile/internal/gc: prep for Func.Nname removal refactoring There are three bits of method-handling code where we separately go from Field->Type and then Type->Node. By shuffling the code around a little to go Field->Type->Node in a single statement, we're able to more easily remove Type from the operation. Passes toolstash-check. Change-Id: Ife98216d70d3b867fa153449abef0e56a4fb242a Reviewed-on: https://go-review.googlesource.com/c/go/+/272388 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Robert Griesemer --- src/cmd/compile/internal/gc/bexport.go | 16 ++++++++++------ src/cmd/compile/internal/gc/dcl.go | 3 ++- src/cmd/compile/internal/gc/iexport.go | 5 ++--- src/cmd/compile/internal/gc/iimport.go | 11 +++-------- src/cmd/compile/internal/gc/typecheck.go | 2 +- 5 files changed, 18 insertions(+), 19 deletions(-) diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index 10f21f86df581..f4720f8402912 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -12,6 +12,15 @@ type exporter struct { marked map[*types.Type]bool // types already seen by markType } +// markObject visits a reachable object. +func (p *exporter) markObject(n *Node) { + if n.Op == ONAME && n.Class() == PFUNC { + inlFlood(n) + } + + p.markType(n.Type) +} + // markType recursively visits types reachable from t to identify // functions whose inline bodies may be needed. func (p *exporter) markType(t *types.Type) { @@ -28,7 +37,7 @@ func (p *exporter) markType(t *types.Type) { if t.Sym != nil && t.Etype != TINTER { for _, m := range t.Methods().Slice() { if types.IsExported(m.Sym.Name) { - p.markType(m.Type) + p.markObject(asNode(m.Type.Nname())) } } } @@ -63,11 +72,6 @@ func (p *exporter) markType(t *types.Type) { } case TFUNC: - // If t is the type of a function or method, then - // t.Nname() is its ONAME. Mark its inline body and - // any recursively called functions for export. - inlFlood(asNode(t.Nname())) - for _, f := range t.Results().FieldSlice() { p.markType(f.Type) } diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 96c3a6faba9db..6af0369246ac5 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -824,7 +824,7 @@ func methodSymSuffix(recv *types.Type, msym *types.Sym, suffix string) *types.Sy // - msym is the method symbol // - t is function type (with receiver) // Returns a pointer to the existing or added Field; or nil if there's an error. -func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) *types.Field { +func addmethod(n *Node, msym *types.Sym, t *types.Type, local, nointerface bool) *types.Field { if msym == nil { Fatalf("no method symbol") } @@ -897,6 +897,7 @@ func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) *types.F } f := types.NewField(lineno, msym, t) + f.Type.SetNname(asTypesNode(n.Func.Nname)) f.SetNointerface(nointerface) mt.Methods().Append(f) diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index 1f53d8ca7dc19..af5f1b70e47bf 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -243,14 +243,13 @@ const ( ) func iexport(out *bufio.Writer) { - // Mark inline bodies that are reachable through exported types. + // Mark inline bodies that are reachable through exported objects. // (Phase 0 of bexport.go.) { // TODO(mdempsky): Separate from bexport logic. p := &exporter{marked: make(map[*types.Type]bool)} for _, n := range exportlist { - sym := n.Sym - p.markType(asNode(sym.Def).Type) + p.markObject(n) } } diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 376a167e166da..de2ea3558c27a 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -327,19 +327,14 @@ func (r *importReader) doDecl(n *Node) { recv := r.param() mtyp := r.signature(recv) - ms[i] = types.NewField(mpos, msym, mtyp) - m := newfuncnamel(mpos, methodSym(recv.Type, msym)) m.Type = mtyp m.SetClass(PFUNC) // methodSym already marked m.Sym as a function. - // (comment from parser.go) - // inl.C's inlnode in on a dotmeth node expects to find the inlineable body as - // (dotmeth's type).Nname.Inl, and dotmeth's type has been pulled - // out by typecheck's lookdot as this $$.ttype. So by providing - // this back link here we avoid special casing there. - mtyp.SetNname(asTypesNode(m)) + f := types.NewField(mpos, msym, mtyp) + f.Type.SetNname(asTypesNode(m)) + ms[i] = f } t.Methods().Set(ms) diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 1c371c0e9daaf..d2e805a72f4ce 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -3412,7 +3412,7 @@ func typecheckfunc(n *Node) { t.FuncType().Nname = asTypesNode(n.Func.Nname) rcvr := t.Recv() if rcvr != nil && n.Func.Shortname != nil { - m := addmethod(n.Func.Shortname, t, true, n.Func.Pragma&Nointerface != 0) + m := addmethod(n, n.Func.Shortname, t, true, n.Func.Pragma&Nointerface != 0) if m == nil { return } From c50c7a8c068aa4f6f9aaf288dac984c67197d0e0 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 22 Nov 2020 20:43:16 -0800 Subject: [PATCH 004/474] [dev.regabi] cmd/compile/internal/gc: refactor to use stop using Func.Nname Automated factoring produced by rf script below to replace uses of Func.Nname with Field.Nname or Node.MethodName as appropriate. Some dead assignments to Func.Nname are left behind; these will be removed in a subequent remove-only CL. Passes toolstash-check. [git-generate] cd src/cmd/compile/internal/gc rf ' ex \ import "cmd/compile/internal/types"; \ var f *types.Field; \ var n *types.Node; \ f.Type.Nname() -> f.Nname; \ f.Type.SetNname(n) -> f.Nname = n; \ f.Type.FuncType().Nname -> f.Nname ex \ var n *Node; \ asNode(n.Type.Nname()) -> n.MethodName(); \ asNode(n.Type.FuncType().Nname) -> n.MethodName(); \ asNode(callpartMethod(n).Type.Nname()) -> n.MethodName() ' Change-Id: Iaae054324dfe7da6f5d8b8d57a1e05b58cc5968c Reviewed-on: https://go-review.googlesource.com/c/go/+/272389 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Robert Griesemer --- src/cmd/compile/internal/gc/bexport.go | 2 +- src/cmd/compile/internal/gc/dcl.go | 2 +- src/cmd/compile/internal/gc/escape.go | 4 ++-- src/cmd/compile/internal/gc/iexport.go | 2 +- src/cmd/compile/internal/gc/iimport.go | 4 ++-- src/cmd/compile/internal/gc/initorder.go | 4 ++-- src/cmd/compile/internal/gc/inl.go | 6 +++--- src/cmd/compile/internal/gc/scc.go | 6 +++--- src/cmd/compile/internal/gc/typecheck.go | 2 +- 9 files changed, 16 insertions(+), 16 deletions(-) diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index f4720f8402912..6564024a0c8dd 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -37,7 +37,7 @@ func (p *exporter) markType(t *types.Type) { if t.Sym != nil && t.Etype != TINTER { for _, m := range t.Methods().Slice() { if types.IsExported(m.Sym.Name) { - p.markObject(asNode(m.Type.Nname())) + p.markObject(asNode(m.Nname)) } } } diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 6af0369246ac5..e1dc647f824ab 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -897,7 +897,7 @@ func addmethod(n *Node, msym *types.Sym, t *types.Type, local, nointerface bool) } f := types.NewField(lineno, msym, t) - f.Type.SetNname(asTypesNode(n.Func.Nname)) + f.Nname = asTypesNode(n.Func.Nname) f.SetNointerface(nointerface) mt.Methods().Append(f) diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index 618bdf78e259c..142eacf7d8afa 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -544,7 +544,7 @@ func (e *Escape) exprSkipInit(k EscHole, n *Node) { for i := m.Type.NumResults(); i > 0; i-- { ks = append(ks, e.heapHole()) } - paramK := e.tagHole(ks, asNode(m.Type.Nname()), m.Type.Recv()) + paramK := e.tagHole(ks, asNode(m.Nname), m.Type.Recv()) e.expr(e.teeHole(paramK, closureK), n.Left) @@ -778,7 +778,7 @@ func (e *Escape) call(ks []EscHole, call, where *Node) { fn = v.Func.Closure.Func.Nname } case OCALLMETH: - fn = asNode(call.Left.Type.FuncType().Nname) + fn = call.Left.MethodName() } fntype := call.Left.Type diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index af5f1b70e47bf..47910eb3b9024 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -994,7 +994,7 @@ func (w *exportWriter) funcExt(n *Node) { func (w *exportWriter) methExt(m *types.Field) { w.bool(m.Nointerface()) - w.funcExt(asNode(m.Type.Nname())) + w.funcExt(asNode(m.Nname)) } func (w *exportWriter) linkname(s *types.Sym) { diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index de2ea3558c27a..a37730343acfb 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -333,7 +333,7 @@ func (r *importReader) doDecl(n *Node) { // methodSym already marked m.Sym as a function. f := types.NewField(mpos, msym, mtyp) - f.Type.SetNname(asTypesNode(m)) + f.Nname = asTypesNode(m) ms[i] = f } t.Methods().Set(ms) @@ -667,7 +667,7 @@ func (r *importReader) methExt(m *types.Field) { if r.bool() { m.SetNointerface(true) } - r.funcExt(asNode(m.Type.Nname())) + r.funcExt(asNode(m.Nname)) } func (r *importReader) linkname(s *types.Sym) { diff --git a/src/cmd/compile/internal/gc/initorder.go b/src/cmd/compile/internal/gc/initorder.go index 41f1349bbe64f..2d7c0176d58fb 100644 --- a/src/cmd/compile/internal/gc/initorder.go +++ b/src/cmd/compile/internal/gc/initorder.go @@ -277,7 +277,7 @@ func (d *initDeps) visit(n *Node) bool { switch n.Op { case ONAME: if n.isMethodExpression() { - d.foundDep(asNode(n.Type.FuncType().Nname)) + d.foundDep(n.MethodName()) return false } @@ -290,7 +290,7 @@ func (d *initDeps) visit(n *Node) bool { d.inspectList(n.Func.Closure.Nbody) case ODOTMETH, OCALLPART: - d.foundDep(asNode(n.Type.FuncType().Nname)) + d.foundDep(n.MethodName()) } return true diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 1fab67391bd09..4908dc4463666 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -267,7 +267,7 @@ func inlFlood(n *Node) { switch n.Class() { case PFUNC: if n.isMethodExpression() { - inlFlood(asNode(n.Type.Nname())) + inlFlood(n.MethodName()) } else { inlFlood(n) exportsym(n) @@ -277,7 +277,7 @@ func inlFlood(n *Node) { } case ODOTMETH: - fn := asNode(n.Type.Nname()) + fn := n.MethodName() inlFlood(fn) case OCALLPART: @@ -714,7 +714,7 @@ func inlCallee(fn *Node) *Node { switch { case fn.Op == ONAME && fn.Class() == PFUNC: if fn.isMethodExpression() { - n := asNode(fn.Type.Nname()) + n := fn.MethodName() // Check that receiver type matches fn.Left. // TODO(mdempsky): Handle implicit dereference // of pointer receiver argument? diff --git a/src/cmd/compile/internal/gc/scc.go b/src/cmd/compile/internal/gc/scc.go index 5c7935aa87674..14f77d613ae0b 100644 --- a/src/cmd/compile/internal/gc/scc.go +++ b/src/cmd/compile/internal/gc/scc.go @@ -78,7 +78,7 @@ func (v *bottomUpVisitor) visit(n *Node) uint32 { case ONAME: if n.Class() == PFUNC { if n.isMethodExpression() { - n = asNode(n.Type.Nname()) + n = n.MethodName() } if n != nil && n.Name.Defn != nil { if m := v.visit(n.Name.Defn); m < min { @@ -87,14 +87,14 @@ func (v *bottomUpVisitor) visit(n *Node) uint32 { } } case ODOTMETH: - fn := asNode(n.Type.Nname()) + fn := n.MethodName() if fn != nil && fn.Op == ONAME && fn.Class() == PFUNC && fn.Name.Defn != nil { if m := v.visit(fn.Name.Defn); m < min { min = m } } case OCALLPART: - fn := asNode(callpartMethod(n).Type.Nname()) + fn := asNode(callpartMethod(n).Nname) if fn != nil && fn.Op == ONAME && fn.Class() == PFUNC && fn.Name.Defn != nil { if m := v.visit(fn.Name.Defn); m < min { min = m diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index d2e805a72f4ce..53a547c3bbc07 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -4024,7 +4024,7 @@ func curpkg() *types.Pkg { // referenced by expression n, which must be a method selector, // method expression, or method value. func (n *Node) MethodName() *Node { - return asNode(n.MethodFunc().Type.Nname()) + return asNode(n.MethodFunc().Nname) } // MethodFunc is like MethodName, but returns the types.Field instead. From c754f25241134eaa68c8f26ed5372cadeb49ef89 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 22 Nov 2020 20:45:42 -0800 Subject: [PATCH 005/474] [dev.regabi] cmd/compile/internal/types: remove Func.Nname Now that there's no code remaining that uses Func.Nname, we can get rid of it along with the remaining code that uselessly assigns to it. Passes toolstash-check. Change-Id: I104ab3bb5122fb824c741bc6e4d9d54fefe5646e Reviewed-on: https://go-review.googlesource.com/c/go/+/272390 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Trust: Matthew Dempsky Reviewed-by: Robert Griesemer --- src/cmd/compile/internal/gc/export.go | 1 - src/cmd/compile/internal/gc/inl.go | 4 ---- src/cmd/compile/internal/gc/reflect.go | 8 +------ src/cmd/compile/internal/gc/typecheck.go | 1 - src/cmd/compile/internal/types/sizeof_test.go | 2 +- src/cmd/compile/internal/types/type.go | 23 +------------------ 6 files changed, 3 insertions(+), 36 deletions(-) diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index c6917e0f810dd..5179b6c05be58 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -164,7 +164,6 @@ func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) { } n.Func = new(Func) - t.SetNname(asTypesNode(n)) if Debug.E != 0 { fmt.Printf("import func %v%S\n", s, t) diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 4908dc4463666..4aa561da6e9eb 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -221,10 +221,6 @@ func caninl(fn *Node) { Body: inlcopylist(fn.Nbody.Slice()), } - // hack, TODO, check for better way to link method nodes back to the thing with the ->inl - // this is so export can find the body of a method - fn.Type.FuncType().Nname = asTypesNode(n) - if Debug.m > 1 { fmt.Printf("%v: can inline %#v with cost %d as: %#v { %#v }\n", fn.Line(), n, inlineMaxBudget-visitor.budget, fn.Type, asNodes(n.Func.Inl.Body)) } else if Debug.m != 0 { diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 05e476b76b233..1ac7a8490f6ca 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -365,13 +365,7 @@ func methodfunc(f *types.Type, receiver *types.Type) *types.Type { out = append(out, d) } - t := functype(nil, in, out) - if f.Nname() != nil { - // Link to name of original method function. - t.SetNname(f.Nname()) - } - - return t + return functype(nil, in, out) } // methods returns the methods of the non-interface type t, sorted by name. diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 53a547c3bbc07..391115637ed1b 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -3409,7 +3409,6 @@ func typecheckfunc(n *Node) { return } n.Type = t - t.FuncType().Nname = asTypesNode(n.Func.Nname) rcvr := t.Recv() if rcvr != nil && n.Func.Shortname != nil { m := addmethod(n, n.Func.Shortname, t, true, n.Func.Pragma&Nointerface != 0) diff --git a/src/cmd/compile/internal/types/sizeof_test.go b/src/cmd/compile/internal/types/sizeof_test.go index ea947d8f417c5..0cf343e8f13a8 100644 --- a/src/cmd/compile/internal/types/sizeof_test.go +++ b/src/cmd/compile/internal/types/sizeof_test.go @@ -24,7 +24,7 @@ func TestSizeof(t *testing.T) { {Type{}, 52, 88}, {Map{}, 20, 40}, {Forward{}, 20, 32}, - {Func{}, 32, 56}, + {Func{}, 28, 48}, {Struct{}, 16, 32}, {Interface{}, 8, 16}, {Chan{}, 8, 16}, diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index c6d14e9e0981d..62c5c344845ad 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -247,8 +247,7 @@ type Func struct { Results *Type // function results Params *Type // function params - Nname *Node - pkg *Pkg + pkg *Pkg // Argwid is the total width of the function receiver, params, and results. // It gets calculated via a temporary TFUNCARGS type. @@ -807,26 +806,6 @@ func (t *Type) FuncArgs() *Type { return t.Extra.(FuncArgs).T } -// Nname returns the associated function's nname. -func (t *Type) Nname() *Node { - switch t.Etype { - case TFUNC: - return t.Extra.(*Func).Nname - } - Fatalf("Type.Nname %v %v", t.Etype, t) - return nil -} - -// Nname sets the associated function's nname. -func (t *Type) SetNname(n *Node) { - switch t.Etype { - case TFUNC: - t.Extra.(*Func).Nname = n - default: - Fatalf("Type.SetNname %v %v", t.Etype, t) - } -} - // IsFuncArgStruct reports whether t is a struct representing function parameters. func (t *Type) IsFuncArgStruct() bool { return t.Etype == TSTRUCT && t.Extra.(*Struct).Funarg != FunargNone From 7b144ed4f7a730f5c9375bca65010446ad9f4b73 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sun, 15 Nov 2020 11:40:25 -0500 Subject: [PATCH 006/474] [dev.regabi] cmd/compile: rewrite concurrentFlagOk to be clearer The current implementation copies Debug, clears a bunch of flags that are meant to be considered OK, and then checks the result against the zero value. But more flags are cleared than remain: it's easier to write and to understand to just check the ones that need checking. This phrasing also makes it safe to move more flags into the struct. It turns out that some of the flags being checked should probably not be checked, but this CL is meant to be a strict semantic no-op, so left a TODO to clean up the function a bit more later. Change-Id: I7afe6d7b32b5b889c40dd339568e8602e02df9bc Reviewed-on: https://go-review.googlesource.com/c/go/+/271666 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/main.go | 28 +++++++++++----------------- 1 file changed, 11 insertions(+), 17 deletions(-) diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index a6963a3d66e62..61742fc8ce721 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -1418,24 +1418,18 @@ func IsAlias(sym *types.Sym) bool { return sym.Def != nil && asNode(sym.Def).Sym != sym } -// By default, assume any debug flags are incompatible with concurrent -// compilation. A few are safe and potentially in common use for -// normal compiles, though; return true for those. +// concurrentFlagOk reports whether the current compiler flags +// are compatible with concurrent compilation. func concurrentFlagOk() bool { - // Report whether any debug flag that would prevent concurrent - // compilation is set, by zeroing out the allowed ones and then - // checking if the resulting struct is zero. - d := Debug - d.B = 0 // disable bounds checking - d.C = 0 // disable printing of columns in error messages - d.e = 0 // no limit on errors; errors all come from non-concurrent code - d.N = 0 // disable optimizations - d.l = 0 // disable inlining - d.w = 0 // all printing happens before compilation - d.W = 0 // all printing happens before compilation - d.S = 0 // printing disassembly happens at the end (but see concurrentBackendAllowed below) - - return d == DebugFlags{} + // TODO(rsc): Many of these are fine. Remove them. + return Debug.P == 0 && + Debug.E == 0 && + Debug.K == 0 && + Debug.L == 0 && + Debug.h == 0 && + Debug.j == 0 && + Debug.m == 0 && + Debug.r == 0 } func concurrentBackendAllowed() bool { From 5fd949e4bd18ec2068e614c17be0a74969dc13b8 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 19 Nov 2020 17:16:50 -0500 Subject: [PATCH 007/474] [dev.regabi] cmd/compile: initialize importMap lazily This sets up the next CL, moving importMap to a global zeroed struct. Change-Id: I1acc91b440d3da6e28fb32bd275fb3cd36db4e97 Reviewed-on: https://go-review.googlesource.com/c/go/+/272046 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/main.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 61742fc8ce721..d1b4161277ef9 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -877,11 +877,14 @@ func writebench(filename string) error { } var ( - importMap = map[string]string{} + importMap map[string]string packageFile map[string]string // nil means not in use ) func addImportMap(s string) { + if importMap == nil { + importMap = make(map[string]string) + } if strings.Count(s, "=") != 1 { log.Fatal("-importmap argument must be of the form source=actual") } @@ -894,6 +897,9 @@ func addImportMap(s string) { } func readImportCfg(file string) { + if importMap == nil { + importMap = make(map[string]string) + } packageFile = map[string]string{} data, err := ioutil.ReadFile(file) if err != nil { From 357c576878137c8840b702c64167470f1669f064 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 16 Nov 2020 11:08:38 -0500 Subject: [PATCH 008/474] [dev.regabi] cmd/compile: clean up error API Prepare for factoring the error API out of this package by cleaning it up. The doc comments use the intended new names, which will be introduced in the next CL. Change-Id: Ie4c8d4262422da32a9a9f750fda42c225b6b42a8 Reviewed-on: https://go-review.googlesource.com/c/go/+/272248 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/_rex.20201123151057 | 1 + src/cmd/compile/internal/gc/dcl.go | 3 - src/cmd/compile/internal/gc/go.go | 10 - src/cmd/compile/internal/gc/initorder.go | 4 +- src/cmd/compile/internal/gc/lex.go | 4 - src/cmd/compile/internal/gc/main.go | 44 +--- src/cmd/compile/internal/gc/mpfloat.go | 4 +- src/cmd/compile/internal/gc/mpint.go | 24 +-- src/cmd/compile/internal/gc/noder.go | 4 +- src/cmd/compile/internal/gc/pgen.go | 9 +- src/cmd/compile/internal/gc/print.go | 243 +++++++++++++++++++++++ src/cmd/compile/internal/gc/subr.go | 174 ---------------- src/cmd/compile/internal/gc/typecheck.go | 10 +- src/cmd/compile/internal/gc/walk.go | 3 +- 14 files changed, 283 insertions(+), 254 deletions(-) create mode 100644 src/_rex.20201123151057 create mode 100644 src/cmd/compile/internal/gc/print.go diff --git a/src/_rex.20201123151057 b/src/_rex.20201123151057 new file mode 100644 index 0000000000000..8b137891791fe --- /dev/null +++ b/src/_rex.20201123151057 @@ -0,0 +1 @@ + diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index e1dc647f824ab..d3b7590257084 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -19,9 +19,6 @@ var externdcl []*Node func testdclstack() { if !types.IsDclstackValid() { - if nerrors != 0 { - errorexit() - } Fatalf("mark left on the dclstack") } } diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index da6b6d6e7274e..c53fde7e242ba 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -102,16 +102,6 @@ var pragcgobuf [][]string var outfile string var linkobj string -// nerrors is the number of compiler errors reported -// since the last call to saveerrors. -var nerrors int - -// nsavederrors is the total number of compiler errors -// reported before the last call to saveerrors. -var nsavederrors int - -var nsyntaxerrors int - var decldepth int32 var nolocalimports bool diff --git a/src/cmd/compile/internal/gc/initorder.go b/src/cmd/compile/internal/gc/initorder.go index 2d7c0176d58fb..102cb769db1a5 100644 --- a/src/cmd/compile/internal/gc/initorder.go +++ b/src/cmd/compile/internal/gc/initorder.go @@ -104,9 +104,7 @@ func initOrder(l []*Node) []*Node { // confused us and there might not be // a loop. Let the user fix those // first. - if nerrors > 0 { - errorexit() - } + ExitIfErrors() findInitLoopAndExit(firstLHS(n), new([]*Node)) Fatalf("initialization unfinished, but failed to identify loop") diff --git a/src/cmd/compile/internal/gc/lex.go b/src/cmd/compile/internal/gc/lex.go index 7cce371408ae8..c58479952ed1a 100644 --- a/src/cmd/compile/internal/gc/lex.go +++ b/src/cmd/compile/internal/gc/lex.go @@ -12,10 +12,6 @@ import ( "strings" ) -// lineno is the source position at the start of the most recently lexed token. -// TODO(gri) rename and eventually remove -var lineno src.XPos - func makePos(base *src.PosBase, line, col uint) src.XPos { return Ctxt.PosTable.XPos(src.MakePos(base, line, col)) } diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index d1b4161277ef9..89dbca0cf1b94 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -119,7 +119,7 @@ func usage() { } func hidePanic() { - if Debug_panic == 0 && nsavederrors+nerrors > 0 { + if Debug_panic == 0 && Errors() > 0 { // If we've already complained about things // in the program, don't bother complaining // about a panic too; let the user clean up @@ -567,7 +567,6 @@ func Main(archInit func(*Arch)) { initUniverse() dclcontext = PEXTERN - nerrors = 0 autogeneratedPos = makePos(src.NewFileBase("", ""), 1, 0) @@ -625,10 +624,10 @@ func Main(archInit func(*Arch)) { if n.Op == ODCLFUNC { Curfn = n decldepth = 1 - saveerrors() + errorsBefore := Errors() typecheckslice(Curfn.Nbody.Slice(), ctxStmt) checkreturn(Curfn) - if nerrors != 0 { + if Errors() > errorsBefore { Curfn.Nbody.Set(nil) // type errors; do not compile } // Now that we've checked whether n terminates, @@ -641,11 +640,9 @@ func Main(archInit func(*Arch)) { // check past phase 9 isn't sufficient, as we may exit with other errors // before then, thus skipping map key errors. checkMapKeys() - timings.AddEvent(fcount, "funcs") + ExitIfErrors() - if nsavederrors+nerrors != 0 { - errorexit() - } + timings.AddEvent(fcount, "funcs") fninit(xtop) @@ -660,12 +657,8 @@ func Main(archInit func(*Arch)) { } } capturevarscomplete = true - Curfn = nil - - if nsavederrors+nerrors != 0 { - errorexit() - } + ExitIfErrors() // Phase 5: Inlining timings.Start("fe", "inlining") @@ -674,14 +667,10 @@ func Main(archInit func(*Arch)) { // otherwise lazily when used or re-exported. for _, n := range importlist { if n.Func.Inl != nil { - saveerrors() typecheckinl(n) } } - - if nsavederrors+nerrors != 0 { - errorexit() - } + ExitIfErrors() } if Debug.l != 0 { @@ -793,10 +782,7 @@ func Main(archInit func(*Arch)) { // Check the map keys again, since we typechecked the external // declarations. checkMapKeys() - - if nerrors+nsavederrors != 0 { - errorexit() - } + ExitIfErrors() // Write object data to disk. timings.Start("be", "dumpobj") @@ -827,10 +813,7 @@ func Main(archInit func(*Arch)) { } logopt.FlushLoggedOpts(Ctxt, myimportpath) - - if nerrors+nsavederrors != 0 { - errorexit() - } + ExitIfErrors() flusherrors() timings.Stop() @@ -1011,11 +994,6 @@ func readSymABIs(file, myimportpath string) { } } -func saveerrors() { - nsavederrors += nerrors - nerrors = 0 -} - func arsize(b *bufio.Reader, name string) int { var buf [ArhdrSize]byte if _, err := io.ReadFull(b, buf[:]); err != nil { @@ -1396,7 +1374,7 @@ func clearImports() { // leave s->block set to cause redeclaration // errors if a conflicting top-level name is // introduced by a different file. - if !n.Name.Used() && nsyntaxerrors == 0 { + if !n.Name.Used() && SyntaxErrors() == 0 { unused = append(unused, importedPkg{n.Pos, n.Name.Pkg.Path, s.Name}) } s.Def = nil @@ -1405,7 +1383,7 @@ func clearImports() { if IsAlias(s) { // throw away top-level name left over // from previous import . "x" - if n.Name != nil && n.Name.Pack != nil && !n.Name.Pack.Name.Used() && nsyntaxerrors == 0 { + if n.Name != nil && n.Name.Pack != nil && !n.Name.Pack.Name.Used() && SyntaxErrors() == 0 { unused = append(unused, importedPkg{n.Name.Pack.Pos, n.Name.Pack.Name.Pkg.Path, ""}) n.Name.Pack.Name.SetUsed(true) } diff --git a/src/cmd/compile/internal/gc/mpfloat.go b/src/cmd/compile/internal/gc/mpfloat.go index 401aef319de7c..9962f4b41320b 100644 --- a/src/cmd/compile/internal/gc/mpfloat.go +++ b/src/cmd/compile/internal/gc/mpfloat.go @@ -136,7 +136,7 @@ func (a *Mpflt) Float64() float64 { x, _ := a.Val.Float64() // check for overflow - if math.IsInf(x, 0) && nsavederrors+nerrors == 0 { + if math.IsInf(x, 0) && Errors() == 0 { Fatalf("ovf in Mpflt Float64") } @@ -148,7 +148,7 @@ func (a *Mpflt) Float32() float64 { x := float64(x32) // check for overflow - if math.IsInf(x, 0) && nsavederrors+nerrors == 0 { + if math.IsInf(x, 0) && Errors() == 0 { Fatalf("ovf in Mpflt Float32") } diff --git a/src/cmd/compile/internal/gc/mpint.go b/src/cmd/compile/internal/gc/mpint.go index 340350bca7b2b..79eb60e65d2b0 100644 --- a/src/cmd/compile/internal/gc/mpint.go +++ b/src/cmd/compile/internal/gc/mpint.go @@ -72,7 +72,7 @@ func (a *Mpint) SetFloat(b *Mpflt) bool { func (a *Mpint) Add(b *Mpint) { if a.Ovf || b.Ovf { - if nsavederrors+nerrors == 0 { + if Errors() == 0 { Fatalf("ovf in Mpint Add") } a.SetOverflow() @@ -88,7 +88,7 @@ func (a *Mpint) Add(b *Mpint) { func (a *Mpint) Sub(b *Mpint) { if a.Ovf || b.Ovf { - if nsavederrors+nerrors == 0 { + if Errors() == 0 { Fatalf("ovf in Mpint Sub") } a.SetOverflow() @@ -104,7 +104,7 @@ func (a *Mpint) Sub(b *Mpint) { func (a *Mpint) Mul(b *Mpint) { if a.Ovf || b.Ovf { - if nsavederrors+nerrors == 0 { + if Errors() == 0 { Fatalf("ovf in Mpint Mul") } a.SetOverflow() @@ -120,7 +120,7 @@ func (a *Mpint) Mul(b *Mpint) { func (a *Mpint) Quo(b *Mpint) { if a.Ovf || b.Ovf { - if nsavederrors+nerrors == 0 { + if Errors() == 0 { Fatalf("ovf in Mpint Quo") } a.SetOverflow() @@ -137,7 +137,7 @@ func (a *Mpint) Quo(b *Mpint) { func (a *Mpint) Rem(b *Mpint) { if a.Ovf || b.Ovf { - if nsavederrors+nerrors == 0 { + if Errors() == 0 { Fatalf("ovf in Mpint Rem") } a.SetOverflow() @@ -154,7 +154,7 @@ func (a *Mpint) Rem(b *Mpint) { func (a *Mpint) Or(b *Mpint) { if a.Ovf || b.Ovf { - if nsavederrors+nerrors == 0 { + if Errors() == 0 { Fatalf("ovf in Mpint Or") } a.SetOverflow() @@ -166,7 +166,7 @@ func (a *Mpint) Or(b *Mpint) { func (a *Mpint) And(b *Mpint) { if a.Ovf || b.Ovf { - if nsavederrors+nerrors == 0 { + if Errors() == 0 { Fatalf("ovf in Mpint And") } a.SetOverflow() @@ -178,7 +178,7 @@ func (a *Mpint) And(b *Mpint) { func (a *Mpint) AndNot(b *Mpint) { if a.Ovf || b.Ovf { - if nsavederrors+nerrors == 0 { + if Errors() == 0 { Fatalf("ovf in Mpint AndNot") } a.SetOverflow() @@ -190,7 +190,7 @@ func (a *Mpint) AndNot(b *Mpint) { func (a *Mpint) Xor(b *Mpint) { if a.Ovf || b.Ovf { - if nsavederrors+nerrors == 0 { + if Errors() == 0 { Fatalf("ovf in Mpint Xor") } a.SetOverflow() @@ -202,7 +202,7 @@ func (a *Mpint) Xor(b *Mpint) { func (a *Mpint) Lsh(b *Mpint) { if a.Ovf || b.Ovf { - if nsavederrors+nerrors == 0 { + if Errors() == 0 { Fatalf("ovf in Mpint Lsh") } a.SetOverflow() @@ -229,7 +229,7 @@ func (a *Mpint) Lsh(b *Mpint) { func (a *Mpint) Rsh(b *Mpint) { if a.Ovf || b.Ovf { - if nsavederrors+nerrors == 0 { + if Errors() == 0 { Fatalf("ovf in Mpint Rsh") } a.SetOverflow() @@ -267,7 +267,7 @@ func (a *Mpint) Neg() { func (a *Mpint) Int64() int64 { if a.Ovf { - if nsavederrors+nerrors == 0 { + if Errors() == 0 { Fatalf("constant overflow") } return 0 diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 67d24ef0bc706..c7119f96f3ead 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -64,7 +64,7 @@ func parseFiles(filenames []string) uint { lines += p.file.Lines p.file = nil // release memory - if nsyntaxerrors != 0 { + if SyntaxErrors() != 0 { errorexit() } // Always run testdclstack here, even when debug_dclstack is not set, as a sanity measure. @@ -333,7 +333,7 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) { val := p.basicLit(imp.Path) ipkg := importfile(&val) if ipkg == nil { - if nerrors == 0 { + if Errors() == 0 { Fatalf("phase error in import") } return diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index 353f4b08c9dcb..6dbb69281c4d4 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -198,7 +198,7 @@ func funccompile(fn *Node) { } if fn.Type == nil { - if nerrors == 0 { + if Errors() == 0 { Fatalf("funccompile missing type") } return @@ -224,10 +224,9 @@ func funccompile(fn *Node) { } func compile(fn *Node) { - saveerrors() - + errorsBefore := Errors() order(fn) - if nerrors != 0 { + if Errors() > errorsBefore { return } @@ -237,7 +236,7 @@ func compile(fn *Node) { fn.Func.initLSym(true) walk(fn) - if nerrors != 0 { + if Errors() > errorsBefore { return } if instrumenting { diff --git a/src/cmd/compile/internal/gc/print.go b/src/cmd/compile/internal/gc/print.go new file mode 100644 index 0000000000000..1dbd58df42ada --- /dev/null +++ b/src/cmd/compile/internal/gc/print.go @@ -0,0 +1,243 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gc + +import ( + "cmd/internal/objabi" + "cmd/internal/src" + "fmt" + "os" + "runtime/debug" + "sort" + "strings" +) + +// An errorMsg is a queued error message, waiting to be printed. +type errorMsg struct { + pos src.XPos + msg string +} + +// Pos is the current source position being processed, +// printed by Errorf, ErrorfLang, Fatalf, and Warnf. +var lineno src.XPos + +var ( + errorMsgs []errorMsg + numErrors int // number of entries in errorMsgs that are errors (as opposed to warnings) + numSyntaxErrors int +) + +// Errors returns the number of errors reported. +func Errors() int { + return numErrors +} + +// SyntaxErrors returns the number of syntax errors reported +func SyntaxErrors() int { + return numSyntaxErrors +} + +// addErrorMsg adds a new errorMsg (which may be a warning) to errorMsgs. +func addErrorMsg(pos src.XPos, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + // Only add the position if know the position. + // See issue golang.org/issue/11361. + if pos.IsKnown() { + msg = fmt.Sprintf("%v: %s", linestr(pos), msg) + } + errorMsgs = append(errorMsgs, errorMsg{ + pos: pos, + msg: msg + "\n", + }) +} + +// FmtPos formats pos as a file:line string. +func linestr(pos src.XPos) string { + if Ctxt == nil { + return "???" + } + return Ctxt.OutermostPos(pos).Format(Debug.C == 0, Debug.L == 1) +} + +// byPos sorts errors by source position. +type byPos []errorMsg + +func (x byPos) Len() int { return len(x) } +func (x byPos) Less(i, j int) bool { return x[i].pos.Before(x[j].pos) } +func (x byPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +// FlushErrors sorts errors seen so far by line number, prints them to stdout, +// and empties the errors array. +func flusherrors() { + Ctxt.Bso.Flush() + if len(errorMsgs) == 0 { + return + } + sort.Stable(byPos(errorMsgs)) + for i, err := range errorMsgs { + if i == 0 || err.msg != errorMsgs[i-1].msg { + fmt.Printf("%s", err.msg) + } + } + errorMsgs = errorMsgs[:0] +} + +// lasterror keeps track of the most recently issued error, +// to avoid printing multiple error messages on the same line. +var lasterror struct { + syntax src.XPos // source position of last syntax error + other src.XPos // source position of last non-syntax error + msg string // error message of last non-syntax error +} + +// sameline reports whether two positions a, b are on the same line. +func sameline(a, b src.XPos) bool { + p := Ctxt.PosTable.Pos(a) + q := Ctxt.PosTable.Pos(b) + return p.Base() == q.Base() && p.Line() == q.Line() +} + +// Errorf reports a formatted error at the current line. +func yyerror(format string, args ...interface{}) { + yyerrorl(lineno, format, args...) +} + +// ErrorfAt reports a formatted error message at pos. +func yyerrorl(pos src.XPos, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + + if strings.HasPrefix(msg, "syntax error") { + numSyntaxErrors++ + // only one syntax error per line, no matter what error + if sameline(lasterror.syntax, pos) { + return + } + lasterror.syntax = pos + } else { + // only one of multiple equal non-syntax errors per line + // (flusherrors shows only one of them, so we filter them + // here as best as we can (they may not appear in order) + // so that we don't count them here and exit early, and + // then have nothing to show for.) + if sameline(lasterror.other, pos) && lasterror.msg == msg { + return + } + lasterror.other = pos + lasterror.msg = msg + } + + addErrorMsg(pos, "%s", msg) + numErrors++ + + hcrash() + if numErrors >= 10 && Debug.e == 0 { + flusherrors() + fmt.Printf("%v: too many errors\n", linestr(pos)) + errorexit() + } +} + +// ErrorfVers reports that a language feature (format, args) requires a later version of Go. +func yyerrorv(lang string, format string, args ...interface{}) { + yyerror("%s requires %s or later (-lang was set to %s; check go.mod)", fmt.Sprintf(format, args...), lang, flag_lang) +} + +// UpdateErrorDot is a clumsy hack that rewrites the last error, +// if it was "LINE: undefined: NAME", to be "LINE: undefined: NAME in EXPR". +// It is used to give better error messages for dot (selector) expressions. +func UpdateErrorDot(line string, name, expr string) { + if len(errorMsgs) == 0 { + return + } + e := &errorMsgs[len(errorMsgs)-1] + if strings.HasPrefix(e.msg, line) && e.msg == fmt.Sprintf("%v: undefined: %v\n", line, name) { + e.msg = fmt.Sprintf("%v: undefined: %v in %v\n", line, name, expr) + } +} + +// Warnf reports a formatted warning at the current line. +// In general the Go compiler does NOT generate warnings, +// so this should be used only when the user has opted in +// to additional output by setting a particular flag. +func Warn(format string, args ...interface{}) { + Warnl(lineno, format, args...) +} + +// WarnfAt reports a formatted warning at pos. +// In general the Go compiler does NOT generate warnings, +// so this should be used only when the user has opted in +// to additional output by setting a particular flag. +func Warnl(pos src.XPos, format string, args ...interface{}) { + addErrorMsg(pos, format, args...) + if Debug.m != 0 { + flusherrors() + } +} + +// Fatal reports a fatal error - an internal problem - at the current line and exits. +// If other errors have already been printed, then Fatal just quietly exits. +// (The internal problem may have been caused by incomplete information +// after the already-reported errors, so best to let users fix those and +// try again without being bothered about a spurious internal error.) +// +// But if no errors have been printed, or if -d panic has been specified, +// Fatal prints the error as an "internal compiler error". In a released build, +// it prints an error asking to file a bug report. In development builds, it +// prints a stack trace. +// +// If -h has been specified, Fatal panics to force the usual runtime info dump. +func Fatalf(format string, args ...interface{}) { + flusherrors() + + if Debug_panic != 0 || numErrors == 0 { + fmt.Printf("%v: internal compiler error: ", linestr(lineno)) + fmt.Printf(format, args...) + fmt.Printf("\n") + + // If this is a released compiler version, ask for a bug report. + if strings.HasPrefix(objabi.Version, "go") { + fmt.Printf("\n") + fmt.Printf("Please file a bug report including a short program that triggers the error.\n") + fmt.Printf("https://golang.org/issue/new\n") + } else { + // Not a release; dump a stack trace, too. + fmt.Println() + os.Stdout.Write(debug.Stack()) + fmt.Println() + } + } + + hcrash() + errorexit() +} + +// hcrash crashes the compiler when -h is set, to find out where a message is generated. +func hcrash() { + if Debug.h != 0 { + flusherrors() + if outfile != "" { + os.Remove(outfile) + } + panic("-h") + } +} + +// ErrorExit handles an error-status exit. +// It flushes any pending errors, removes the output file, and exits. +func errorexit() { + flusherrors() + if outfile != "" { + os.Remove(outfile) + } + os.Exit(2) +} + +// ExitIfErrors calls ErrorExit if any errors have been reported. +func ExitIfErrors() { + if Errors() > 0 { + errorexit() + } +} diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index defefd76b342b..9760823e96e2a 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -6,13 +6,10 @@ package gc import ( "cmd/compile/internal/types" - "cmd/internal/objabi" "cmd/internal/src" "crypto/md5" "encoding/binary" "fmt" - "os" - "runtime/debug" "sort" "strconv" "strings" @@ -21,13 +18,6 @@ import ( "unicode/utf8" ) -type Error struct { - pos src.XPos - msg string -} - -var errors []Error - // largeStack is info about a function whose stack frame is too large (rare). type largeStack struct { locals int64 @@ -41,170 +31,6 @@ var ( largeStackFrames []largeStack ) -func errorexit() { - flusherrors() - if outfile != "" { - os.Remove(outfile) - } - os.Exit(2) -} - -func adderrorname(n *Node) { - if n.Op != ODOT { - return - } - old := fmt.Sprintf("%v: undefined: %v\n", n.Line(), n.Left) - if len(errors) > 0 && errors[len(errors)-1].pos.Line() == n.Pos.Line() && errors[len(errors)-1].msg == old { - errors[len(errors)-1].msg = fmt.Sprintf("%v: undefined: %v in %v\n", n.Line(), n.Left, n) - } -} - -func adderr(pos src.XPos, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - // Only add the position if know the position. - // See issue golang.org/issue/11361. - if pos.IsKnown() { - msg = fmt.Sprintf("%v: %s", linestr(pos), msg) - } - errors = append(errors, Error{ - pos: pos, - msg: msg + "\n", - }) -} - -// byPos sorts errors by source position. -type byPos []Error - -func (x byPos) Len() int { return len(x) } -func (x byPos) Less(i, j int) bool { return x[i].pos.Before(x[j].pos) } -func (x byPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -// flusherrors sorts errors seen so far by line number, prints them to stdout, -// and empties the errors array. -func flusherrors() { - Ctxt.Bso.Flush() - if len(errors) == 0 { - return - } - sort.Stable(byPos(errors)) - for i, err := range errors { - if i == 0 || err.msg != errors[i-1].msg { - fmt.Printf("%s", err.msg) - } - } - errors = errors[:0] -} - -func hcrash() { - if Debug.h != 0 { - flusherrors() - if outfile != "" { - os.Remove(outfile) - } - var x *int - *x = 0 - } -} - -func linestr(pos src.XPos) string { - return Ctxt.OutermostPos(pos).Format(Debug.C == 0, Debug.L == 1) -} - -// lasterror keeps track of the most recently issued error. -// It is used to avoid multiple error messages on the same -// line. -var lasterror struct { - syntax src.XPos // source position of last syntax error - other src.XPos // source position of last non-syntax error - msg string // error message of last non-syntax error -} - -// sameline reports whether two positions a, b are on the same line. -func sameline(a, b src.XPos) bool { - p := Ctxt.PosTable.Pos(a) - q := Ctxt.PosTable.Pos(b) - return p.Base() == q.Base() && p.Line() == q.Line() -} - -func yyerrorl(pos src.XPos, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - - if strings.HasPrefix(msg, "syntax error") { - nsyntaxerrors++ - // only one syntax error per line, no matter what error - if sameline(lasterror.syntax, pos) { - return - } - lasterror.syntax = pos - } else { - // only one of multiple equal non-syntax errors per line - // (flusherrors shows only one of them, so we filter them - // here as best as we can (they may not appear in order) - // so that we don't count them here and exit early, and - // then have nothing to show for.) - if sameline(lasterror.other, pos) && lasterror.msg == msg { - return - } - lasterror.other = pos - lasterror.msg = msg - } - - adderr(pos, "%s", msg) - - hcrash() - nerrors++ - if nsavederrors+nerrors >= 10 && Debug.e == 0 { - flusherrors() - fmt.Printf("%v: too many errors\n", linestr(pos)) - errorexit() - } -} - -func yyerrorv(lang string, format string, args ...interface{}) { - what := fmt.Sprintf(format, args...) - yyerrorl(lineno, "%s requires %s or later (-lang was set to %s; check go.mod)", what, lang, flag_lang) -} - -func yyerror(format string, args ...interface{}) { - yyerrorl(lineno, format, args...) -} - -func Warn(fmt_ string, args ...interface{}) { - Warnl(lineno, fmt_, args...) -} - -func Warnl(line src.XPos, fmt_ string, args ...interface{}) { - adderr(line, fmt_, args...) - if Debug.m != 0 { - flusherrors() - } -} - -func Fatalf(fmt_ string, args ...interface{}) { - flusherrors() - - if Debug_panic != 0 || nsavederrors+nerrors == 0 { - fmt.Printf("%v: internal compiler error: ", linestr(lineno)) - fmt.Printf(fmt_, args...) - fmt.Printf("\n") - - // If this is a released compiler version, ask for a bug report. - if strings.HasPrefix(objabi.Version, "go") { - fmt.Printf("\n") - fmt.Printf("Please file a bug report including a short program that triggers the error.\n") - fmt.Printf("https://golang.org/issue/new\n") - } else { - // Not a release; dump a stack trace, too. - fmt.Println() - os.Stdout.Write(debug.Stack()) - fmt.Println() - } - } - - hcrash() - errorexit() -} - // hasUniquePos reports whether n has a unique position that can be // used for reporting error messages. // diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 391115637ed1b..41f0c3f2a59f0 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -280,7 +280,7 @@ func typecheck(n *Node, top int) (res *Node) { yyerrorl(n.Pos, "constant definition loop%s", cycleTrace(cycleFor(n))) } - if nsavederrors+nerrors == 0 { + if Errors() == 0 { var trace string for i := len(typecheck_tcstack) - 1; i >= 0; i-- { x := typecheck_tcstack[i] @@ -891,7 +891,7 @@ func typecheck1(n *Node, top int) (res *Node) { t := n.Left.Type if t == nil { - adderrorname(n) + UpdateErrorDot(n.Line(), n.Left.String(), n.String()) n.Type = nil return n } @@ -3641,7 +3641,7 @@ func typecheckdef(n *Node) { if n.SubOp() != 0 { // like OPRINTN break } - if nsavederrors+nerrors > 0 { + if Errors() > 0 { // Can have undefined variables in x := foo // that make x have an n.name.Defn == nil. // If there are other errors anyway, don't @@ -3686,9 +3686,9 @@ func typecheckdef(n *Node) { n.SetWalkdef(1) setTypeNode(n, types.New(TFORW)) n.Type.Sym = n.Sym - nerrors0 := nerrors + errorsBefore := Errors() typecheckdeftype(n) - if n.Type.Etype == TFORW && nerrors > nerrors0 { + if n.Type.Etype == TFORW && Errors() > errorsBefore { // Something went wrong during type-checking, // but it was reported. Silence future errors. n.Type.SetBroke(true) diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index a7b6e7fcb3c76..a61cb3f651742 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -20,6 +20,7 @@ const zeroValSize = 1024 // must match value of runtime/map.go:maxZero func walk(fn *Node) { Curfn = fn + errorsBefore := Errors() if Debug.W != 0 { s := fmt.Sprintf("\nbefore walk %v", Curfn.Func.Nname.Sym) @@ -59,7 +60,7 @@ func walk(fn *Node) { } lineno = lno - if nerrors != 0 { + if Errors() > errorsBefore { return } walkstmtlist(Curfn.Nbody.Slice()) From e37597f7f0ad0be32d854c9b7b3556009b728538 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 16 Nov 2020 11:36:13 -0500 Subject: [PATCH 009/474] [dev.regabi] cmd/compile: rename a few 'base' identifiers We want to introduce a package cmd/compile/internal/base, and these will shadow it at points where it is needed. Change-Id: Ic936733fba1ccba8c2ca1fdedbd4d2989df4bbf4 Reviewed-on: https://go-review.googlesource.com/c/go/+/272249 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/escape.go | 20 ++++++++++---------- src/cmd/compile/internal/gc/lex.go | 4 ++-- src/cmd/compile/internal/gc/noder.go | 8 ++++---- src/cmd/compile/internal/gc/obj.go | 8 ++++---- src/cmd/compile/internal/gc/pgen.go | 10 +++++----- src/cmd/compile/internal/gc/swt.go | 6 +++--- src/cmd/compile/internal/gc/unsafe.go | 8 ++++---- 7 files changed, 32 insertions(+), 32 deletions(-) diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index 142eacf7d8afa..1fc51745f454a 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -1152,16 +1152,16 @@ func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLoc l := todo[len(todo)-1] todo = todo[:len(todo)-1] - base := l.derefs + derefs := l.derefs // If l.derefs < 0, then l's address flows to root. - addressOf := base < 0 + addressOf := derefs < 0 if addressOf { // For a flow path like "root = &l; l = x", // l's address flows to root, but x's does // not. We recognize this by lower bounding - // base at 0. - base = 0 + // derefs at 0. + derefs = 0 // If l's address flows to a non-transient // location, then l can't be transiently @@ -1181,15 +1181,15 @@ func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLoc if l.isName(PPARAM) { if (logopt.Enabled() || Debug.m >= 2) && !l.escapes { if Debug.m >= 2 { - fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", linestr(l.n.Pos), l.n, e.explainLoc(root), base) + fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", linestr(l.n.Pos), l.n, e.explainLoc(root), derefs) } explanation := e.explainPath(root, l) if logopt.Enabled() { logopt.LogOpt(l.n.Pos, "leak", "escape", e.curfn.funcname(), - fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, e.explainLoc(root), base), explanation) + fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, e.explainLoc(root), derefs), explanation) } } - l.leakTo(root, base) + l.leakTo(root, derefs) } // If l's address flows somewhere that @@ -1215,10 +1215,10 @@ func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLoc if edge.src.escapes { continue } - derefs := base + edge.derefs - if edge.src.walkgen != walkgen || edge.src.derefs > derefs { + d := derefs + edge.derefs + if edge.src.walkgen != walkgen || edge.src.derefs > d { edge.src.walkgen = walkgen - edge.src.derefs = derefs + edge.src.derefs = d edge.src.dst = l edge.src.dstEdgeIdx = i todo = append(todo, edge.src) diff --git a/src/cmd/compile/internal/gc/lex.go b/src/cmd/compile/internal/gc/lex.go index c58479952ed1a..f01891f3652a1 100644 --- a/src/cmd/compile/internal/gc/lex.go +++ b/src/cmd/compile/internal/gc/lex.go @@ -12,8 +12,8 @@ import ( "strings" ) -func makePos(base *src.PosBase, line, col uint) src.XPos { - return Ctxt.PosTable.XPos(src.MakePos(base, line, col)) +func makePos(b *src.PosBase, line, col uint) src.XPos { + return Ctxt.PosTable.XPos(src.MakePos(b, line, col)) } func isSpace(c rune) bool { diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index c7119f96f3ead..27bc9b5629e2f 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -1382,16 +1382,16 @@ func checkLangCompat(lit *syntax.BasicLit) { if s[0] != '0' { return } - base := s[1] - if base == 'b' || base == 'B' { + radix := s[1] + if radix == 'b' || radix == 'B' { yyerrorv("go1.13", "binary literals") return } - if base == 'o' || base == 'O' { + if radix == 'o' || radix == 'O' { yyerrorv("go1.13", "0o/0O-style octal literals") return } - if lit.Kind != syntax.IntLit && (base == 'x' || base == 'X') { + if lit.Kind != syntax.IntLit && (radix == 'x' || radix == 'X') { yyerrorv("go1.13", "hexadecimal floating-point literals") } } diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 32aa7c5bb1ae0..8fe480b65f587 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -544,13 +544,13 @@ func dsymptrWeakOff(s *obj.LSym, off int, x *obj.LSym) int { // arr must be an ONAME. slicesym does not modify n. func slicesym(n, arr *Node, lencap int64) { s := n.Sym.Linksym() - base := n.Xoffset + off := n.Xoffset if arr.Op != ONAME { Fatalf("slicesym non-name arr %v", arr) } - s.WriteAddr(Ctxt, base, Widthptr, arr.Sym.Linksym(), arr.Xoffset) - s.WriteInt(Ctxt, base+sliceLenOffset, Widthptr, lencap) - s.WriteInt(Ctxt, base+sliceCapOffset, Widthptr, lencap) + s.WriteAddr(Ctxt, off, Widthptr, arr.Sym.Linksym(), arr.Xoffset) + s.WriteInt(Ctxt, off+sliceLenOffset, Widthptr, lencap) + s.WriteInt(Ctxt, off+sliceCapOffset, Widthptr, lencap) } // addrsym writes the static address of a to n. a must be an ONAME. diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index 6dbb69281c4d4..9c1bd285ae7c7 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -698,20 +698,20 @@ func preInliningDcls(fnsym *obj.LSym) []*Node { // to do with its offset in the user variable. func stackOffset(slot ssa.LocalSlot) int32 { n := slot.N.(*Node) - var base int64 + var off int64 switch n.Class() { case PAUTO: if Ctxt.FixedFrameSize() == 0 { - base -= int64(Widthptr) + off -= int64(Widthptr) } if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" { // There is a word space for FP on ARM64 even if the frame pointer is disabled - base -= int64(Widthptr) + off -= int64(Widthptr) } case PPARAM, PPARAMOUT: - base += Ctxt.FixedFrameSize() + off += Ctxt.FixedFrameSize() } - return int32(base + n.Xoffset + slot.Off) + return int32(off + n.Xoffset + slot.Off) } // createComplexVar builds a single DWARF variable entry and location list. diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go index 8d9fbe300e84b..9205f4142a0e7 100644 --- a/src/cmd/compile/internal/gc/swt.go +++ b/src/cmd/compile/internal/gc/swt.go @@ -720,9 +720,9 @@ func (s *typeSwitch) flush() { // less(i) should return a boolean expression. If it evaluates true, // then cases before i will be tested; otherwise, cases i and later. // -// base(i, nif) should setup nif (an OIF node) to test case i. In +// leaf(i, nif) should setup nif (an OIF node) to test case i. In // particular, it should set nif.Left and nif.Nbody. -func binarySearch(n int, out *Nodes, less func(i int) *Node, base func(i int, nif *Node)) { +func binarySearch(n int, out *Nodes, less func(i int) *Node, leaf func(i int, nif *Node)) { const binarySearchMin = 4 // minimum number of cases for binary search var do func(lo, hi int, out *Nodes) @@ -731,7 +731,7 @@ func binarySearch(n int, out *Nodes, less func(i int) *Node, base func(i int, ni if n < binarySearchMin { for i := lo; i < hi; i++ { nif := nod(OIF, nil, nil) - base(i, nif) + leaf(i, nif) lineno = lineno.WithNotStmt() nif.Left = typecheck(nif.Left, ctxExpr) nif.Left = defaultlit(nif.Left, nil) diff --git a/src/cmd/compile/internal/gc/unsafe.go b/src/cmd/compile/internal/gc/unsafe.go index 2233961561230..a3151e83bf04d 100644 --- a/src/cmd/compile/internal/gc/unsafe.go +++ b/src/cmd/compile/internal/gc/unsafe.go @@ -31,7 +31,7 @@ func evalunsafe(n *Node) int64 { // Since r->left may be mutated by typechecking, check it explicitly // first to track it correctly. n.Left.Left = typecheck(n.Left.Left, ctxExpr) - base := n.Left.Left + sbase := n.Left.Left n.Left = typecheck(n.Left, ctxExpr) if n.Left.Type == nil { @@ -48,15 +48,15 @@ func evalunsafe(n *Node) int64 { return 0 } - // Sum offsets for dots until we reach base. + // Sum offsets for dots until we reach sbase. var v int64 - for r := n.Left; r != base; r = r.Left { + for r := n.Left; r != sbase; r = r.Left { switch r.Op { case ODOTPTR: // For Offsetof(s.f), s may itself be a pointer, // but accessing f must not otherwise involve // indirection via embedded pointer types. - if r.Left != base { + if r.Left != sbase { yyerror("invalid expression %v: selector implies indirection of embedded %v", n, r.Left) return 0 } From 228b732ad988a457c0f3d42f6aeb0fe338a5c4ec Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 16 Nov 2020 12:18:09 -0500 Subject: [PATCH 010/474] [dev.regabi] cmd/compile: prepare for package ir The next CL will introduce a package ir to hold the IR definitions. This CL adjusts a few names and makes a few other minor changes to make the next CL - an automated one - smoother. Change-Id: Ie787a34732efd5b3d171bf0c1220b6dd91994ce3 Reviewed-on: https://go-review.googlesource.com/c/go/+/272251 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/escape.go | 35 ++++++++++++++++++++ src/cmd/compile/internal/gc/fmt.go | 42 ++++++++---------------- src/cmd/compile/internal/gc/iimport.go | 32 +++++++++--------- src/cmd/compile/internal/gc/subr.go | 9 ----- src/cmd/compile/internal/gc/typecheck.go | 14 ++++---- 5 files changed, 72 insertions(+), 60 deletions(-) diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index 1fc51745f454a..757b4652ca675 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -140,6 +140,41 @@ type EscEdge struct { notes *EscNote } +func init() { + EscFmt = escFmt +} + +// escFmt is called from node printing to print information about escape analysis results. +func escFmt(n *Node, short bool) string { + text := "" + switch n.Esc { + case EscUnknown: + break + + case EscHeap: + text = "esc(h)" + + case EscNone: + text = "esc(no)" + + case EscNever: + if !short { + text = "esc(N)" + } + + default: + text = fmt.Sprintf("esc(%d)", n.Esc) + } + + if e, ok := n.Opt().(*EscLocation); ok && e.loopDepth != 0 { + if text != "" { + text += " " + } + text += fmt.Sprintf("ld(%d)", e.loopDepth) + } + return text +} + // escapeFuncs performs escape analysis on a minimal batch of // functions. func escapeFuncs(fns []*Node, recursive bool) { diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go index f92f5d0e884a7..f61ea8aaac915 100644 --- a/src/cmd/compile/internal/gc/fmt.go +++ b/src/cmd/compile/internal/gc/fmt.go @@ -415,19 +415,22 @@ func (n *Node) format(s fmt.State, verb rune, mode fmtMode) { } } +// EscFmt is set by the escape analysis code to add escape analysis details to the node print. +var EscFmt func(n *Node, short bool) string + // *Node details func (n *Node) jconv(s fmt.State, flag FmtFlag) { - c := flag & FmtShort + short := flag&FmtShort != 0 - // Useful to see which nodes in a Node Dump/dumplist are actually identical + // Useful to see which nodes in an AST printout are actually identical if Debug_dumpptrs != 0 { fmt.Fprintf(s, " p(%p)", n) } - if c == 0 && n.Name != nil && n.Name.Vargen != 0 { + if !short && n.Name != nil && n.Name.Vargen != 0 { fmt.Fprintf(s, " g(%d)", n.Name.Vargen) } - if Debug_dumpptrs != 0 && c == 0 && n.Name != nil && n.Name.Defn != nil { + if Debug_dumpptrs != 0 && !short && n.Name != nil && n.Name.Defn != nil { // Useful to see where Defn is set and what node it points to fmt.Fprintf(s, " defn(%p)", n.Name.Defn) } @@ -443,7 +446,7 @@ func (n *Node) jconv(s fmt.State, flag FmtFlag) { fmt.Fprintf(s, " l(%s%d)", pfx, n.Pos.Line()) } - if c == 0 && n.Xoffset != BADWIDTH { + if !short && n.Xoffset != BADWIDTH { fmt.Fprintf(s, " x(%d)", n.Xoffset) } @@ -455,30 +458,13 @@ func (n *Node) jconv(s fmt.State, flag FmtFlag) { fmt.Fprintf(s, " colas(%v)", n.Colas()) } - switch n.Esc { - case EscUnknown: - break - - case EscHeap: - fmt.Fprint(s, " esc(h)") - - case EscNone: - fmt.Fprint(s, " esc(no)") - - case EscNever: - if c == 0 { - fmt.Fprint(s, " esc(N)") + if EscFmt != nil { + if esc := EscFmt(n, short); esc != "" { + fmt.Fprintf(s, " %s", esc) } - - default: - fmt.Fprintf(s, " esc(%d)", n.Esc) - } - - if e, ok := n.Opt().(*EscLocation); ok && e.loopDepth != 0 { - fmt.Fprintf(s, " ld(%d)", e.loopDepth) } - if c == 0 && n.Typecheck() != 0 { + if !short && n.Typecheck() != 0 { fmt.Fprintf(s, " tc(%d)", n.Typecheck()) } @@ -518,11 +504,11 @@ func (n *Node) jconv(s fmt.State, flag FmtFlag) { fmt.Fprint(s, " nonnil") } - if c == 0 && n.HasCall() { + if !short && n.HasCall() { fmt.Fprint(s, " hascall") } - if c == 0 && n.Name != nil && n.Name.Used() { + if !short && n.Name != nil && n.Name.Used() { fmt.Fprint(s, " used") } } diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index a37730343acfb..df193cd8e1d86 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -98,16 +98,16 @@ func (r *intReader) uint64() uint64 { } func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType) { - ir := &intReader{in, pkg} + ird := &intReader{in, pkg} - version := ir.uint64() + version := ird.uint64() if version != iexportVersion { yyerror("import %q: unknown export format version %d", pkg.Path, version) errorexit() } - sLen := ir.uint64() - dLen := ir.uint64() + sLen := ird.uint64() + dLen := ird.uint64() // Map string (and data) section into memory as a single large // string. This reduces heap fragmentation and allows @@ -138,10 +138,10 @@ func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType) } // Declaration index. - for nPkgs := ir.uint64(); nPkgs > 0; nPkgs-- { - pkg := p.pkgAt(ir.uint64()) - pkgName := p.stringAt(ir.uint64()) - pkgHeight := int(ir.uint64()) + for nPkgs := ird.uint64(); nPkgs > 0; nPkgs-- { + pkg := p.pkgAt(ird.uint64()) + pkgName := p.stringAt(ird.uint64()) + pkgHeight := int(ird.uint64()) if pkg.Name == "" { pkg.Name = pkgName pkg.Height = pkgHeight @@ -158,9 +158,9 @@ func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType) } } - for nSyms := ir.uint64(); nSyms > 0; nSyms-- { - s := pkg.Lookup(p.stringAt(ir.uint64())) - off := ir.uint64() + for nSyms := ird.uint64(); nSyms > 0; nSyms-- { + s := pkg.Lookup(p.stringAt(ird.uint64())) + off := ird.uint64() if _, ok := declImporter[s]; ok { continue @@ -177,12 +177,12 @@ func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType) } // Inline body index. - for nPkgs := ir.uint64(); nPkgs > 0; nPkgs-- { - pkg := p.pkgAt(ir.uint64()) + for nPkgs := ird.uint64(); nPkgs > 0; nPkgs-- { + pkg := p.pkgAt(ird.uint64()) - for nSyms := ir.uint64(); nSyms > 0; nSyms-- { - s := pkg.Lookup(p.stringAt(ir.uint64())) - off := ir.uint64() + for nSyms := ird.uint64(); nSyms > 0; nSyms-- { + s := pkg.Lookup(p.stringAt(ird.uint64())) + off := ird.uint64() if _, ok := inlineImporter[s]; ok { continue diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 9760823e96e2a..849043bfe22ea 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -1585,15 +1585,6 @@ func liststmt(l []*Node) *Node { return n } -func (l Nodes) asblock() *Node { - n := nod(OBLOCK, nil, nil) - n.List = l - if l.Len() != 0 { - n.Pos = l.First().Pos - } - return n -} - func ngotype(n *Node) *types.Sym { if n.Type != nil { return typenamesym(n.Type) diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 41f0c3f2a59f0..f13d9a3e2659b 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -3867,7 +3867,7 @@ func checkreturn(fn *Node) { } func deadcode(fn *Node) { - deadcodeslice(fn.Nbody) + deadcodeslice(&fn.Nbody) deadcodefn(fn) } @@ -3897,7 +3897,7 @@ func deadcodefn(fn *Node) { fn.Nbody.Set([]*Node{nod(OEMPTY, nil, nil)}) } -func deadcodeslice(nn Nodes) { +func deadcodeslice(nn *Nodes) { var lastLabel = -1 for i, n := range nn.Slice() { if n != nil && n.Op == OLABEL { @@ -3939,12 +3939,12 @@ func deadcodeslice(nn Nodes) { } } - deadcodeslice(n.Ninit) - deadcodeslice(n.Nbody) - deadcodeslice(n.List) - deadcodeslice(n.Rlist) + deadcodeslice(&n.Ninit) + deadcodeslice(&n.Nbody) + deadcodeslice(&n.List) + deadcodeslice(&n.Rlist) if cut { - *nn.slice = nn.Slice()[:i+1] + nn.Set(nn.Slice()[:i+1]) break } } From 1abb12fc97d87ea67ce87a04ad6500bdfe1dbb7d Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 17 Nov 2020 12:53:34 -0800 Subject: [PATCH 011/474] [dev.regabi] go/constant: optimize BitLen Avoids an unnecessary heap allocation when computing the bit length of int64 values. Change-Id: I69dfc510e461daf3e83b0b7b6c0707f6526a32d0 Reviewed-on: https://go-review.googlesource.com/c/go/+/272646 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Trust: Matthew Dempsky Reviewed-by: Robert Griesemer --- src/go/constant/value.go | 7 ++++++- src/go/constant/value_test.go | 21 +++++++++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/src/go/constant/value.go b/src/go/constant/value.go index 116c7575d9eeb..59606dc479572 100644 --- a/src/go/constant/value.go +++ b/src/go/constant/value.go @@ -17,6 +17,7 @@ import ( "go/token" "math" "math/big" + "math/bits" "strconv" "strings" "sync" @@ -610,7 +611,11 @@ func Make(x interface{}) Value { func BitLen(x Value) int { switch x := x.(type) { case int64Val: - return i64toi(x).val.BitLen() + u := uint64(x) + if x < 0 { + u = uint64(-x) + } + return 64 - bits.LeadingZeros64(u) case intVal: return x.val.BitLen() case unknownVal: diff --git a/src/go/constant/value_test.go b/src/go/constant/value_test.go index 1a5025cbbd492..1ad6784f9a164 100644 --- a/src/go/constant/value_test.go +++ b/src/go/constant/value_test.go @@ -655,3 +655,24 @@ func BenchmarkStringAdd(b *testing.B) { }) } } + +var bitLenTests = []struct { + val int64 + want int +}{ + {0, 0}, + {1, 1}, + {-16, 5}, + {1 << 61, 62}, + {1 << 62, 63}, + {-1 << 62, 63}, + {-1 << 63, 64}, +} + +func TestBitLen(t *testing.T) { + for _, test := range bitLenTests { + if got := BitLen(MakeInt64(test.val)); got != test.want { + t.Errorf("%v: got %v, want %v", test.val, got, test.want) + } + } +} From 96f3fb7244680fbb04549914384ced7afe433daf Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 23 Nov 2020 04:28:25 -0800 Subject: [PATCH 012/474] [dev.regabi] go/constant: avoid heap allocations in match When type switching from interface{} to T, and then returning the T as interface{} again, it's better to return the original interface{} value. This avoids needing to heap allocate the T for non-pointer-shaped types (i.e., int64Val, complexVal, stringVal). Change-Id: I25c83b3f9ec9bd2ffeec5a65279b68f4fcef8a19 Reviewed-on: https://go-review.googlesource.com/c/go/+/272647 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Trust: Matthew Dempsky Reviewed-by: Robert Griesemer --- src/go/constant/value.go | 33 ++++++++++++++++++--------------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/src/go/constant/value.go b/src/go/constant/value.go index 59606dc479572..4a89ef3b945de 100644 --- a/src/go/constant/value.go +++ b/src/go/constant/value.go @@ -1023,52 +1023,55 @@ func match(x, y Value) (_, _ Value) { } // ord(x) <= ord(y) - switch x := x.(type) { + // Prefer to return the original x and y arguments when possible, + // to avoid unnecessary heap allocations. + + switch x1 := x.(type) { case boolVal, *stringVal, complexVal: return x, y case int64Val: - switch y := y.(type) { + switch y.(type) { case int64Val: return x, y case intVal: - return i64toi(x), y + return i64toi(x1), y case ratVal: - return i64tor(x), y + return i64tor(x1), y case floatVal: - return i64tof(x), y + return i64tof(x1), y case complexVal: - return vtoc(x), y + return vtoc(x1), y } case intVal: - switch y := y.(type) { + switch y.(type) { case intVal: return x, y case ratVal: - return itor(x), y + return itor(x1), y case floatVal: - return itof(x), y + return itof(x1), y case complexVal: - return vtoc(x), y + return vtoc(x1), y } case ratVal: - switch y := y.(type) { + switch y.(type) { case ratVal: return x, y case floatVal: - return rtof(x), y + return rtof(x1), y case complexVal: - return vtoc(x), y + return vtoc(x1), y } case floatVal: - switch y := y.(type) { + switch y.(type) { case floatVal: return x, y case complexVal: - return vtoc(x), y + return vtoc(x1), y } } From 668e3a598f56d2c9618d800a163f3e784ba3ae0b Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 16 Nov 2020 08:44:40 -0800 Subject: [PATCH 013/474] [dev.regabi] cmd/compile: cleanup type switch typechecking Address outstanding TODO, which simplifies subsequent CLs. Now the compiler always type checks type-switch case clauses (like gccgo), but it treats clause variables as broken if an appropriate type cannot be determined for it (like go/types). Passes toolstash-check. Change-Id: Iedfe9cdf38c6865211e4b93391f1cf72c1bed136 Reviewed-on: https://go-review.googlesource.com/c/go/+/272648 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Robert Griesemer Trust: Matthew Dempsky --- src/cmd/compile/internal/gc/swt.go | 16 ++++++++-------- test/fixedbugs/bug340.go | 3 ++- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go index 9205f4142a0e7..9ab5f0c2487e1 100644 --- a/src/cmd/compile/internal/gc/swt.go +++ b/src/cmd/compile/internal/gc/swt.go @@ -89,22 +89,22 @@ func typecheckTypeSwitch(n *Node) { if len(ls) == 1 { if ls[0].Op == OTYPE { vt = ls[0].Type - } else if ls[0].Op != OLITERAL { // TODO(mdempsky): Should be !ls[0].isNil() + } else if !ls[0].isNil() { // Invalid single-type case; // mark variable as broken. vt = nil } } - // TODO(mdempsky): It should be possible to - // still typecheck the case body. - if vt == nil { - continue - } - nvar := ncase.Rlist.First() nvar.Type = vt - nvar = typecheck(nvar, ctxExpr|ctxAssign) + if vt != nil { + nvar = typecheck(nvar, ctxExpr|ctxAssign) + } else { + // Clause variable is broken; prevent typechecking. + nvar.SetTypecheck(1) + nvar.SetWalkdef(1) + } ncase.Rlist.SetFirst(nvar) } diff --git a/test/fixedbugs/bug340.go b/test/fixedbugs/bug340.go index 118bbacc2202e..a067940408920 100644 --- a/test/fixedbugs/bug340.go +++ b/test/fixedbugs/bug340.go @@ -12,6 +12,7 @@ func main() { var x interface{} switch t := x.(type) { case 0: // ERROR "type" - t.x = 1 // ERROR "type interface \{\}|reference to undefined field or method" + t.x = 1 + x.x = 1 // ERROR "type interface \{\}|reference to undefined field or method" } } From 4af2decf3004261ff7cb500f511c6414a9d0f68a Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 15 Nov 2020 17:19:08 -0800 Subject: [PATCH 014/474] [dev.regabi] cmd/compile: add (unused) ONIL constant Subsequent CL will make use of ONIL. Split out separately so that the next CL can pass toolstash-check. Change-Id: I49d77bedbe2cac4a5da149c925cda969e50b0b2d Reviewed-on: https://go-review.googlesource.com/c/go/+/272649 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Robert Griesemer Trust: Matthew Dempsky --- src/cmd/compile/internal/gc/op_string.go | 299 ++++++++++++----------- src/cmd/compile/internal/gc/syntax.go | 1 + 2 files changed, 151 insertions(+), 149 deletions(-) diff --git a/src/cmd/compile/internal/gc/op_string.go b/src/cmd/compile/internal/gc/op_string.go index 41d588309c01e..f7d31f912cb14 100644 --- a/src/cmd/compile/internal/gc/op_string.go +++ b/src/cmd/compile/internal/gc/op_string.go @@ -14,158 +14,159 @@ func _() { _ = x[OTYPE-3] _ = x[OPACK-4] _ = x[OLITERAL-5] - _ = x[OADD-6] - _ = x[OSUB-7] - _ = x[OOR-8] - _ = x[OXOR-9] - _ = x[OADDSTR-10] - _ = x[OADDR-11] - _ = x[OANDAND-12] - _ = x[OAPPEND-13] - _ = x[OBYTES2STR-14] - _ = x[OBYTES2STRTMP-15] - _ = x[ORUNES2STR-16] - _ = x[OSTR2BYTES-17] - _ = x[OSTR2BYTESTMP-18] - _ = x[OSTR2RUNES-19] - _ = x[OAS-20] - _ = x[OAS2-21] - _ = x[OAS2DOTTYPE-22] - _ = x[OAS2FUNC-23] - _ = x[OAS2MAPR-24] - _ = x[OAS2RECV-25] - _ = x[OASOP-26] - _ = x[OCALL-27] - _ = x[OCALLFUNC-28] - _ = x[OCALLMETH-29] - _ = x[OCALLINTER-30] - _ = x[OCALLPART-31] - _ = x[OCAP-32] - _ = x[OCLOSE-33] - _ = x[OCLOSURE-34] - _ = x[OCOMPLIT-35] - _ = x[OMAPLIT-36] - _ = x[OSTRUCTLIT-37] - _ = x[OARRAYLIT-38] - _ = x[OSLICELIT-39] - _ = x[OPTRLIT-40] - _ = x[OCONV-41] - _ = x[OCONVIFACE-42] - _ = x[OCONVNOP-43] - _ = x[OCOPY-44] - _ = x[ODCL-45] - _ = x[ODCLFUNC-46] - _ = x[ODCLFIELD-47] - _ = x[ODCLCONST-48] - _ = x[ODCLTYPE-49] - _ = x[ODELETE-50] - _ = x[ODOT-51] - _ = x[ODOTPTR-52] - _ = x[ODOTMETH-53] - _ = x[ODOTINTER-54] - _ = x[OXDOT-55] - _ = x[ODOTTYPE-56] - _ = x[ODOTTYPE2-57] - _ = x[OEQ-58] - _ = x[ONE-59] - _ = x[OLT-60] - _ = x[OLE-61] - _ = x[OGE-62] - _ = x[OGT-63] - _ = x[ODEREF-64] - _ = x[OINDEX-65] - _ = x[OINDEXMAP-66] - _ = x[OKEY-67] - _ = x[OSTRUCTKEY-68] - _ = x[OLEN-69] - _ = x[OMAKE-70] - _ = x[OMAKECHAN-71] - _ = x[OMAKEMAP-72] - _ = x[OMAKESLICE-73] - _ = x[OMAKESLICECOPY-74] - _ = x[OMUL-75] - _ = x[ODIV-76] - _ = x[OMOD-77] - _ = x[OLSH-78] - _ = x[ORSH-79] - _ = x[OAND-80] - _ = x[OANDNOT-81] - _ = x[ONEW-82] - _ = x[ONEWOBJ-83] - _ = x[ONOT-84] - _ = x[OBITNOT-85] - _ = x[OPLUS-86] - _ = x[ONEG-87] - _ = x[OOROR-88] - _ = x[OPANIC-89] - _ = x[OPRINT-90] - _ = x[OPRINTN-91] - _ = x[OPAREN-92] - _ = x[OSEND-93] - _ = x[OSLICE-94] - _ = x[OSLICEARR-95] - _ = x[OSLICESTR-96] - _ = x[OSLICE3-97] - _ = x[OSLICE3ARR-98] - _ = x[OSLICEHEADER-99] - _ = x[ORECOVER-100] - _ = x[ORECV-101] - _ = x[ORUNESTR-102] - _ = x[OSELRECV-103] - _ = x[OSELRECV2-104] - _ = x[OIOTA-105] - _ = x[OREAL-106] - _ = x[OIMAG-107] - _ = x[OCOMPLEX-108] - _ = x[OALIGNOF-109] - _ = x[OOFFSETOF-110] - _ = x[OSIZEOF-111] - _ = x[OBLOCK-112] - _ = x[OBREAK-113] - _ = x[OCASE-114] - _ = x[OCONTINUE-115] - _ = x[ODEFER-116] - _ = x[OEMPTY-117] - _ = x[OFALL-118] - _ = x[OFOR-119] - _ = x[OFORUNTIL-120] - _ = x[OGOTO-121] - _ = x[OIF-122] - _ = x[OLABEL-123] - _ = x[OGO-124] - _ = x[ORANGE-125] - _ = x[ORETURN-126] - _ = x[OSELECT-127] - _ = x[OSWITCH-128] - _ = x[OTYPESW-129] - _ = x[OTCHAN-130] - _ = x[OTMAP-131] - _ = x[OTSTRUCT-132] - _ = x[OTINTER-133] - _ = x[OTFUNC-134] - _ = x[OTARRAY-135] - _ = x[ODDD-136] - _ = x[OINLCALL-137] - _ = x[OEFACE-138] - _ = x[OITAB-139] - _ = x[OIDATA-140] - _ = x[OSPTR-141] - _ = x[OCLOSUREVAR-142] - _ = x[OCFUNC-143] - _ = x[OCHECKNIL-144] - _ = x[OVARDEF-145] - _ = x[OVARKILL-146] - _ = x[OVARLIVE-147] - _ = x[ORESULT-148] - _ = x[OINLMARK-149] - _ = x[ORETJMP-150] - _ = x[OGETG-151] - _ = x[OEND-152] + _ = x[ONIL-6] + _ = x[OADD-7] + _ = x[OSUB-8] + _ = x[OOR-9] + _ = x[OXOR-10] + _ = x[OADDSTR-11] + _ = x[OADDR-12] + _ = x[OANDAND-13] + _ = x[OAPPEND-14] + _ = x[OBYTES2STR-15] + _ = x[OBYTES2STRTMP-16] + _ = x[ORUNES2STR-17] + _ = x[OSTR2BYTES-18] + _ = x[OSTR2BYTESTMP-19] + _ = x[OSTR2RUNES-20] + _ = x[OAS-21] + _ = x[OAS2-22] + _ = x[OAS2DOTTYPE-23] + _ = x[OAS2FUNC-24] + _ = x[OAS2MAPR-25] + _ = x[OAS2RECV-26] + _ = x[OASOP-27] + _ = x[OCALL-28] + _ = x[OCALLFUNC-29] + _ = x[OCALLMETH-30] + _ = x[OCALLINTER-31] + _ = x[OCALLPART-32] + _ = x[OCAP-33] + _ = x[OCLOSE-34] + _ = x[OCLOSURE-35] + _ = x[OCOMPLIT-36] + _ = x[OMAPLIT-37] + _ = x[OSTRUCTLIT-38] + _ = x[OARRAYLIT-39] + _ = x[OSLICELIT-40] + _ = x[OPTRLIT-41] + _ = x[OCONV-42] + _ = x[OCONVIFACE-43] + _ = x[OCONVNOP-44] + _ = x[OCOPY-45] + _ = x[ODCL-46] + _ = x[ODCLFUNC-47] + _ = x[ODCLFIELD-48] + _ = x[ODCLCONST-49] + _ = x[ODCLTYPE-50] + _ = x[ODELETE-51] + _ = x[ODOT-52] + _ = x[ODOTPTR-53] + _ = x[ODOTMETH-54] + _ = x[ODOTINTER-55] + _ = x[OXDOT-56] + _ = x[ODOTTYPE-57] + _ = x[ODOTTYPE2-58] + _ = x[OEQ-59] + _ = x[ONE-60] + _ = x[OLT-61] + _ = x[OLE-62] + _ = x[OGE-63] + _ = x[OGT-64] + _ = x[ODEREF-65] + _ = x[OINDEX-66] + _ = x[OINDEXMAP-67] + _ = x[OKEY-68] + _ = x[OSTRUCTKEY-69] + _ = x[OLEN-70] + _ = x[OMAKE-71] + _ = x[OMAKECHAN-72] + _ = x[OMAKEMAP-73] + _ = x[OMAKESLICE-74] + _ = x[OMAKESLICECOPY-75] + _ = x[OMUL-76] + _ = x[ODIV-77] + _ = x[OMOD-78] + _ = x[OLSH-79] + _ = x[ORSH-80] + _ = x[OAND-81] + _ = x[OANDNOT-82] + _ = x[ONEW-83] + _ = x[ONEWOBJ-84] + _ = x[ONOT-85] + _ = x[OBITNOT-86] + _ = x[OPLUS-87] + _ = x[ONEG-88] + _ = x[OOROR-89] + _ = x[OPANIC-90] + _ = x[OPRINT-91] + _ = x[OPRINTN-92] + _ = x[OPAREN-93] + _ = x[OSEND-94] + _ = x[OSLICE-95] + _ = x[OSLICEARR-96] + _ = x[OSLICESTR-97] + _ = x[OSLICE3-98] + _ = x[OSLICE3ARR-99] + _ = x[OSLICEHEADER-100] + _ = x[ORECOVER-101] + _ = x[ORECV-102] + _ = x[ORUNESTR-103] + _ = x[OSELRECV-104] + _ = x[OSELRECV2-105] + _ = x[OIOTA-106] + _ = x[OREAL-107] + _ = x[OIMAG-108] + _ = x[OCOMPLEX-109] + _ = x[OALIGNOF-110] + _ = x[OOFFSETOF-111] + _ = x[OSIZEOF-112] + _ = x[OBLOCK-113] + _ = x[OBREAK-114] + _ = x[OCASE-115] + _ = x[OCONTINUE-116] + _ = x[ODEFER-117] + _ = x[OEMPTY-118] + _ = x[OFALL-119] + _ = x[OFOR-120] + _ = x[OFORUNTIL-121] + _ = x[OGOTO-122] + _ = x[OIF-123] + _ = x[OLABEL-124] + _ = x[OGO-125] + _ = x[ORANGE-126] + _ = x[ORETURN-127] + _ = x[OSELECT-128] + _ = x[OSWITCH-129] + _ = x[OTYPESW-130] + _ = x[OTCHAN-131] + _ = x[OTMAP-132] + _ = x[OTSTRUCT-133] + _ = x[OTINTER-134] + _ = x[OTFUNC-135] + _ = x[OTARRAY-136] + _ = x[ODDD-137] + _ = x[OINLCALL-138] + _ = x[OEFACE-139] + _ = x[OITAB-140] + _ = x[OIDATA-141] + _ = x[OSPTR-142] + _ = x[OCLOSUREVAR-143] + _ = x[OCFUNC-144] + _ = x[OCHECKNIL-145] + _ = x[OVARDEF-146] + _ = x[OVARKILL-147] + _ = x[OVARLIVE-148] + _ = x[ORESULT-149] + _ = x[OINLMARK-150] + _ = x[ORETJMP-151] + _ = x[OGETG-152] + _ = x[OEND-153] } -const _Op_name = "XXXNAMENONAMETYPEPACKLITERALADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLFIELDDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFBLOCKBREAKCASECONTINUEDEFEREMPTYFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYDDDINLCALLEFACEITABIDATASPTRCLOSUREVARCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKRETJMPGETGEND" +const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLFIELDDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFBLOCKBREAKCASECONTINUEDEFEREMPTYFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYDDDINLCALLEFACEITABIDATASPTRCLOSUREVARCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKRETJMPGETGEND" -var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 36, 39, 45, 49, 55, 61, 70, 82, 91, 100, 112, 121, 123, 126, 136, 143, 150, 157, 161, 165, 173, 181, 190, 198, 201, 206, 213, 220, 226, 235, 243, 251, 257, 261, 270, 277, 281, 284, 291, 299, 307, 314, 320, 323, 329, 336, 344, 348, 355, 363, 365, 367, 369, 371, 373, 375, 380, 385, 393, 396, 405, 408, 412, 420, 427, 436, 449, 452, 455, 458, 461, 464, 467, 473, 476, 482, 485, 491, 495, 498, 502, 507, 512, 518, 523, 527, 532, 540, 548, 554, 563, 574, 581, 585, 592, 599, 607, 611, 615, 619, 626, 633, 641, 647, 652, 657, 661, 669, 674, 679, 683, 686, 694, 698, 700, 705, 707, 712, 718, 724, 730, 736, 741, 745, 752, 758, 763, 769, 772, 779, 784, 788, 793, 797, 807, 812, 820, 826, 833, 840, 846, 853, 859, 863, 866} +var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 310, 317, 323, 326, 332, 339, 347, 351, 358, 366, 368, 370, 372, 374, 376, 378, 383, 388, 396, 399, 408, 411, 415, 423, 430, 439, 452, 455, 458, 461, 464, 467, 470, 476, 479, 485, 488, 494, 498, 501, 505, 510, 515, 521, 526, 530, 535, 543, 551, 557, 566, 577, 584, 588, 595, 602, 610, 614, 618, 622, 629, 636, 644, 650, 655, 660, 664, 672, 677, 682, 686, 689, 697, 701, 703, 708, 710, 715, 721, 727, 733, 739, 744, 748, 755, 761, 766, 772, 775, 782, 787, 791, 796, 800, 810, 815, 823, 829, 836, 843, 849, 856, 862, 866, 869} func (i Op) String() string { if i >= Op(len(_Op_index)-1) { diff --git a/src/cmd/compile/internal/gc/syntax.go b/src/cmd/compile/internal/gc/syntax.go index e46a0dadf3fa7..b86510a294804 100644 --- a/src/cmd/compile/internal/gc/syntax.go +++ b/src/cmd/compile/internal/gc/syntax.go @@ -758,6 +758,7 @@ const ( OTYPE // type name OPACK // import OLITERAL // literal + ONIL // nil // expressions OADD // Left + Right From 88a9e2f9ad0ad3ef1e254e9150f4649e57b0a296 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Fri, 13 Nov 2020 20:38:21 -0800 Subject: [PATCH 015/474] [dev.regabi] cmd/compile: replace CTNIL with ONIL Properly speaking, "nil" is a zero value, not a constant. So go/constant does not have a representation for it. To allow replacing Val with constant.Value, we split out ONIL separately from OLITERAL so we can get rid of CTNIL. Passes toolstash-check. Change-Id: I4c8e60cae3b3c91bbac43b3b0cf2a4ade028d6cb Reviewed-on: https://go-review.googlesource.com/c/go/+/272650 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Robert Griesemer Trust: Matthew Dempsky --- src/cmd/compile/internal/gc/const.go | 19 +++++------------ src/cmd/compile/internal/gc/esc.go | 2 +- src/cmd/compile/internal/gc/escape.go | 2 +- src/cmd/compile/internal/gc/fmt.go | 12 +++++------ src/cmd/compile/internal/gc/iexport.go | 17 +++++++++------- src/cmd/compile/internal/gc/iimport.go | 22 ++++++++++++-------- src/cmd/compile/internal/gc/inl.go | 4 ++-- src/cmd/compile/internal/gc/obj.go | 9 +++++--- src/cmd/compile/internal/gc/order.go | 6 +++--- src/cmd/compile/internal/gc/sinit.go | 26 ++++++++++++++++-------- src/cmd/compile/internal/gc/ssa.go | 22 ++++++++++---------- src/cmd/compile/internal/gc/subr.go | 16 ++++++++------- src/cmd/compile/internal/gc/swt.go | 2 +- src/cmd/compile/internal/gc/syntax.go | 2 +- src/cmd/compile/internal/gc/typecheck.go | 9 +++++--- src/cmd/compile/internal/gc/universe.go | 4 +--- src/cmd/compile/internal/gc/walk.go | 11 +++++----- src/cmd/compile/internal/types/type.go | 2 +- 18 files changed, 100 insertions(+), 87 deletions(-) diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index b92c8d66b5afb..42ac3a26f8cbb 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -24,7 +24,6 @@ const ( CTCPLX CTSTR CTBOOL - CTNIL ) type Val struct { @@ -34,7 +33,6 @@ type Val struct { // *Mpflt float when Ctype() == CTFLT // *Mpcplx pair of floats when Ctype() == CTCPLX // string string when Ctype() == CTSTR - // *Nilval when Ctype() == CTNIL U interface{} } @@ -45,8 +43,6 @@ func (v Val) Ctype() Ctype { panic("unreachable") case nil: return CTxxx - case *NilVal: - return CTNIL case bool: return CTBOOL case *Mpint: @@ -71,8 +67,6 @@ func eqval(a, b Val) bool { default: Fatalf("unexpected Ctype for %T", a.U) panic("unreachable") - case *NilVal: - return true case bool: y := b.U.(bool) return x == y @@ -99,8 +93,6 @@ func (v Val) Interface() interface{} { default: Fatalf("unexpected Interface for %T", v.U) panic("unreachable") - case *NilVal: - return nil case bool, string: return x case *Mpint: @@ -112,8 +104,6 @@ func (v Val) Interface() interface{} { } } -type NilVal struct{} - // Int64Val returns n as an int64. // n must be an integer or rune constant. func (n *Node) Int64Val() int64 { @@ -245,7 +235,7 @@ func convlit1(n *Node, t *types.Type, explicit bool, context func() string) *Nod return n } - if n.Op == OLITERAL { + if n.Op == OLITERAL || n.Op == ONIL { // Can't always set n.Type directly on OLITERAL nodes. // See discussion on CL 20813. n = n.rawcopy() @@ -253,6 +243,9 @@ func convlit1(n *Node, t *types.Type, explicit bool, context func() string) *Nod // Nil is technically not a constant, so handle it specially. if n.Type.Etype == TNIL { + if n.Op != ONIL { + Fatalf("unexpected op: %v (%v)", n, n.Op) + } if t == nil { yyerror("use of untyped nil") n.SetDiag(true) @@ -1039,8 +1032,6 @@ func idealType(ct Ctype) *types.Type { return types.UntypedFloat case CTCPLX: return types.UntypedComplex - case CTNIL: - return types.Types[TNIL] } Fatalf("unexpected Ctype: %v", ct) return nil @@ -1189,7 +1180,7 @@ func indexconst(n *Node) int64 { // Expressions derived from nil, like string([]byte(nil)), while they // may be known at compile time, are not Go language constants. func (n *Node) isGoConst() bool { - return n.Op == OLITERAL && n.Val().Ctype() != CTNIL + return n.Op == OLITERAL } func hascallchan(n *Node) bool { diff --git a/src/cmd/compile/internal/gc/esc.go b/src/cmd/compile/internal/gc/esc.go index 6f328ab5ea936..b7d1dfc92a5f5 100644 --- a/src/cmd/compile/internal/gc/esc.go +++ b/src/cmd/compile/internal/gc/esc.go @@ -152,7 +152,7 @@ func mayAffectMemory(n *Node) bool { // We're ignoring things like division by zero, index out of range, // and nil pointer dereference here. switch n.Op { - case ONAME, OCLOSUREVAR, OLITERAL: + case ONAME, OCLOSUREVAR, OLITERAL, ONIL: return false // Left+Right group. diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index 757b4652ca675..bc0eb98d7625a 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -476,7 +476,7 @@ func (e *Escape) exprSkipInit(k EscHole, n *Node) { default: Fatalf("unexpected expr: %v", n) - case OLITERAL, OGETG, OCLOSUREVAR, OTYPE: + case OLITERAL, ONIL, OGETG, OCLOSUREVAR, OTYPE: // nop case ONAME: diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go index f61ea8aaac915..9b57d131b1fa1 100644 --- a/src/cmd/compile/internal/gc/fmt.go +++ b/src/cmd/compile/internal/gc/fmt.go @@ -571,9 +571,6 @@ func (v Val) vconv(s fmt.State, flag FmtFlag) { case bool: fmt.Fprint(s, u) - case *NilVal: - fmt.Fprint(s, "nil") - default: fmt.Fprintf(s, "", v.Ctype()) } @@ -1207,6 +1204,7 @@ var opprec = []int{ OMAPLIT: 8, ONAME: 8, ONEW: 8, + ONIL: 8, ONONAME: 8, OOFFSETOF: 8, OPACK: 8, @@ -1323,6 +1321,9 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) { case OPAREN: mode.Fprintf(s, "(%v)", n.Left) + case ONIL: + fmt.Fprint(s, "nil") + case OLITERAL: // this is a bit of a mess if mode == FErr { if n.Orig != nil && n.Orig != n { @@ -1334,10 +1335,7 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) { return } } - if n.Val().Ctype() == CTNIL && n.Orig != nil && n.Orig != n { - n.Orig.exprfmt(s, prec, mode) - return - } + if n.Type != nil && !n.Type.IsUntyped() { // Need parens when type begins with what might // be misinterpreted as a unary operator: * or <-. diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index 47910eb3b9024..b48a840d00000 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -759,8 +759,6 @@ func constTypeOf(typ *types.Type) Ctype { } switch typ.Etype { - case TCHAN, TFUNC, TMAP, TNIL, TINTER, TPTR, TSLICE, TUNSAFEPTR: - return CTNIL case TBOOL: return CTBOOL case TSTRING: @@ -790,9 +788,6 @@ func (w *exportWriter) value(typ *types.Type, v Val) { // and provides a useful consistency check. switch constTypeOf(typ) { - case CTNIL: - // Only one value; nothing to encode. - _ = v.U.(*NilVal) case CTBOOL: w.bool(v.U.(bool)) case CTSTR: @@ -1207,11 +1202,19 @@ func (w *exportWriter) expr(n *Node) { switch op := n.Op; op { // expressions // (somewhat closely following the structure of exprfmt in fmt.go) - case OLITERAL: - if n.Val().Ctype() == CTNIL && n.Orig != nil && n.Orig != n { + case ONIL: + if !n.Type.HasNil() { + Fatalf("unexpected type for nil: %v", n.Type) + } + if n.Orig != nil && n.Orig != n { w.expr(n.Orig) break } + w.op(OLITERAL) + w.pos(n.Pos) + w.typ(n.Type) + + case OLITERAL: w.op(OLITERAL) w.pos(n.Pos) w.value(n.Type, n.Val()) diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index df193cd8e1d86..ac565a6632c9a 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -293,7 +293,8 @@ func (r *importReader) doDecl(n *Node) { importalias(r.p.ipkg, pos, n.Sym, typ) case 'C': - typ, val := r.value() + typ := r.typ() + val := r.value(typ) importconst(r.p.ipkg, pos, n.Sym, typ, val) @@ -354,12 +355,8 @@ func (r *importReader) doDecl(n *Node) { } } -func (p *importReader) value() (typ *types.Type, v Val) { - typ = p.typ() - +func (p *importReader) value(typ *types.Type) (v Val) { switch constTypeOf(typ) { - case CTNIL: - v.U = &NilVal{} case CTBOOL: v.U = p.bool() case CTSTR: @@ -810,11 +807,20 @@ func (r *importReader) node() *Node { // case OPAREN: // unreachable - unpacked by exporter + // case ONIL: + // unreachable - mapped to OLITERAL + case OLITERAL: pos := r.pos() - typ, val := r.value() + typ := r.typ() - n := npos(pos, nodlit(val)) + var n *Node + if typ.HasNil() { + n = nodnil() + } else { + n = nodlit(r.value(typ)) + } + n = npos(pos, n) n.Type = typ return n diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 4aa561da6e9eb..a882e91dceedb 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -459,7 +459,7 @@ func inlcopy(n *Node) *Node { } switch n.Op { - case ONAME, OTYPE, OLITERAL: + case ONAME, OTYPE, OLITERAL, ONIL: return n } @@ -1322,7 +1322,7 @@ func (subst *inlsubst) node(n *Node) *Node { } return n - case OLITERAL, OTYPE: + case OLITERAL, ONIL, OTYPE: // If n is a named constant or type, we can continue // using it in the inline copy. Otherwise, make a copy // so we can update the line number. diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 8fe480b65f587..77f9afb44dada 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -591,12 +591,15 @@ func litsym(n, c *Node, wid int) { if n.Op != ONAME { Fatalf("litsym n op %v", n.Op) } - if c.Op != OLITERAL { - Fatalf("litsym c op %v", c.Op) - } if n.Sym == nil { Fatalf("litsym nil n sym") } + if c.Op == ONIL { + return + } + if c.Op != OLITERAL { + Fatalf("litsym c op %v", c.Op) + } s := n.Sym.Linksym() switch u := c.Val().U.(type) { case bool: diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 863de5b6c71b4..11c8b1fa25e5f 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -119,7 +119,7 @@ func (o *Order) cheapExpr(n *Node) *Node { } switch n.Op { - case ONAME, OLITERAL: + case ONAME, OLITERAL, ONIL: return n case OLEN, OCAP: l := o.cheapExpr(n.Left) @@ -143,7 +143,7 @@ func (o *Order) cheapExpr(n *Node) *Node { // The intended use is to apply to x when rewriting x += y into x = x + y. func (o *Order) safeExpr(n *Node) *Node { switch n.Op { - case ONAME, OLITERAL: + case ONAME, OLITERAL, ONIL: return n case ODOT, OLEN, OCAP: @@ -202,7 +202,7 @@ func isaddrokay(n *Node) bool { // The result of addrTemp MUST be assigned back to n, e.g. // n.Left = o.addrTemp(n.Left) func (o *Order) addrTemp(n *Node) *Node { - if consttype(n) != CTxxx { + if n.Op == OLITERAL || n.Op == ONIL { // TODO: expand this to all static composite literal nodes? n = defaultlit(n, nil) dowidth(n.Type) diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 212fcc022dba3..c199ff6317d0b 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -104,6 +104,9 @@ func (s *InitSchedule) staticcopy(l *Node, r *Node) bool { s.append(nod(OAS, l, conv(r, l.Type))) return true + case ONIL: + return true + case OLITERAL: if isZero(r) { return true @@ -139,7 +142,7 @@ func (s *InitSchedule) staticcopy(l *Node, r *Node) bool { e := &p.E[i] n.Xoffset = l.Xoffset + e.Xoffset n.Type = e.Expr.Type - if e.Expr.Op == OLITERAL { + if e.Expr.Op == OLITERAL || e.Expr.Op == ONIL { litsym(n, e.Expr, int(n.Type.Width)) continue } @@ -171,6 +174,9 @@ func (s *InitSchedule) staticassign(l *Node, r *Node) bool { case ONAME: return s.staticcopy(l, r) + case ONIL: + return true + case OLITERAL: if isZero(r) { return true @@ -232,7 +238,7 @@ func (s *InitSchedule) staticassign(l *Node, r *Node) bool { e := &p.E[i] n.Xoffset = l.Xoffset + e.Xoffset n.Type = e.Expr.Type - if e.Expr.Op == OLITERAL { + if e.Expr.Op == OLITERAL || e.Expr.Op == ONIL { litsym(n, e.Expr, int(n.Type.Width)) continue } @@ -269,13 +275,14 @@ func (s *InitSchedule) staticassign(l *Node, r *Node) bool { for val.Op == OCONVIFACE { val = val.Left } + if val.Type.IsInterface() { // val is an interface type. // If val is nil, we can statically initialize l; // both words are zero and so there no work to do, so report success. // If val is non-nil, we have no concrete type to record, // and we won't be able to statically initialize its value, so report failure. - return Isconst(val, CTNIL) + return val.Op == ONIL } markTypeUsedInInterface(val.Type, l.Sym.Linksym()) @@ -296,7 +303,7 @@ func (s *InitSchedule) staticassign(l *Node, r *Node) bool { // Emit data. if isdirectiface(val.Type) { - if Isconst(val, CTNIL) { + if val.Op == ONIL { // Nil is zero, nothing to do. return true } @@ -462,7 +469,7 @@ func isStaticCompositeLiteral(n *Node) bool { } } return true - case OLITERAL: + case OLITERAL, ONIL: return true case OCONVIFACE: // See staticassign's OCONVIFACE case for comments. @@ -471,9 +478,9 @@ func isStaticCompositeLiteral(n *Node) bool { val = val.Left } if val.Type.IsInterface() { - return Isconst(val, CTNIL) + return val.Op == ONIL } - if isdirectiface(val.Type) && Isconst(val, CTNIL) { + if isdirectiface(val.Type) && val.Op == ONIL { return true } return isStaticCompositeLiteral(val) @@ -1105,13 +1112,14 @@ func (s *InitSchedule) addvalue(p *InitPlan, xoffset int64, n *Node) { func isZero(n *Node) bool { switch n.Op { + case ONIL: + return true + case OLITERAL: switch u := n.Val().U.(type) { default: Dump("unexpected literal", n) Fatalf("isZero") - case *NilVal: - return true case string: return u == "" case bool: diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 0b38e70cd2a95..709b2d434ed28 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -1993,7 +1993,7 @@ func (s *state) ssaShiftOp(op Op, t *types.Type, u *types.Type) ssa.Op { // expr converts the expression n to ssa, adds it to s and returns the ssa result. func (s *state) expr(n *Node) *ssa.Value { - if !(n.Op == ONAME || n.Op == OLITERAL && n.Sym != nil) { + if hasUniquePos(n) { // ONAMEs and named OLITERALs have the line number // of the decl, not the use. See issue 14742. s.pushLine(n.Pos) @@ -2029,6 +2029,16 @@ func (s *state) expr(n *Node) *ssa.Value { case OCLOSUREVAR: addr := s.addr(n) return s.load(n.Type, addr) + case ONIL: + t := n.Type + switch { + case t.IsSlice(): + return s.constSlice(t) + case t.IsInterface(): + return s.constInterface(t) + default: + return s.constNil(t) + } case OLITERAL: switch u := n.Val().U.(type) { case *Mpint: @@ -2053,16 +2063,6 @@ func (s *state) expr(n *Node) *ssa.Value { return s.entryNewValue0A(ssa.OpConstString, n.Type, u) case bool: return s.constBool(u) - case *NilVal: - t := n.Type - switch { - case t.IsSlice(): - return s.constSlice(t) - case t.IsInterface(): - return s.constInterface(t) - default: - return s.constNil(t) - } case *Mpflt: switch n.Type.Size() { case 4: diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 849043bfe22ea..7c13aef21403b 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -41,7 +41,7 @@ func hasUniquePos(n *Node) bool { switch n.Op { case ONAME, OPACK: return false - case OLITERAL, OTYPE: + case OLITERAL, ONIL, OTYPE: if n.Sym != nil { return false } @@ -257,7 +257,9 @@ func nodintconst(v int64) *Node { } func nodnil() *Node { - return nodlit(Val{new(NilVal)}) + n := nod(ONIL, nil, nil) + n.Type = types.Types[TNIL] + return n } func nodbool(b bool) *Node { @@ -298,7 +300,7 @@ func treecopy(n *Node, pos src.XPos) *Node { // crashing (golang.org/issue/11361). fallthrough - case ONAME, ONONAME, OLITERAL, OTYPE: + case ONAME, ONONAME, OLITERAL, ONIL, OTYPE: return n } @@ -308,7 +310,7 @@ func treecopy(n *Node, pos src.XPos) *Node { func (n *Node) isNil() bool { // Check n.Orig because constant propagation may produce typed nil constants, // which don't exist in the Go spec. - return Isconst(n.Orig, CTNIL) + return n.Orig.Op == ONIL } func isptrto(t *types.Type, et types.EType) bool { @@ -807,7 +809,7 @@ func calcHasCall(n *Node) bool { } switch n.Op { - case OLITERAL, ONAME, OTYPE: + case OLITERAL, ONIL, ONAME, OTYPE: if n.HasCall() { Fatalf("OLITERAL/ONAME/OTYPE should never have calls: %+v", n) } @@ -926,7 +928,7 @@ func safeexpr(n *Node, init *Nodes) *Node { } switch n.Op { - case ONAME, OLITERAL: + case ONAME, OLITERAL, ONIL: return n case ODOT, OLEN, OCAP: @@ -988,7 +990,7 @@ func copyexpr(n *Node, t *types.Type, init *Nodes) *Node { // result may not be assignable. func cheapexpr(n *Node, init *Nodes) *Node { switch n.Op { - case ONAME, OLITERAL: + case ONAME, OLITERAL, ONIL: return n } diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go index 9ab5f0c2487e1..5f4e9e4b408d2 100644 --- a/src/cmd/compile/internal/gc/swt.go +++ b/src/cmd/compile/internal/gc/swt.go @@ -261,7 +261,7 @@ func walkExprSwitch(sw *Node) { } cond = walkexpr(cond, &sw.Ninit) - if cond.Op != OLITERAL { + if cond.Op != OLITERAL && cond.Op != ONIL { cond = copyexpr(cond, cond.Type, &sw.Nbody) } diff --git a/src/cmd/compile/internal/gc/syntax.go b/src/cmd/compile/internal/gc/syntax.go index b86510a294804..f364ed1527fc7 100644 --- a/src/cmd/compile/internal/gc/syntax.go +++ b/src/cmd/compile/internal/gc/syntax.go @@ -294,7 +294,7 @@ func (n *Node) SetIota(x int64) { // Extra care must be taken when mutating such a node. func (n *Node) mayBeShared() bool { switch n.Op { - case ONAME, OLITERAL, OTYPE: + case ONAME, OLITERAL, ONIL, OTYPE: return true } return false diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index f13d9a3e2659b..32619b08d1fbe 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -363,7 +363,7 @@ func typecheck1(n *Node, top int) (res *Node) { n.Type = types.UntypedString } - case ONONAME: + case ONIL, ONONAME: ok |= ctxExpr case ONAME: @@ -1590,7 +1590,7 @@ func typecheck1(n *Node, top int) (res *Node) { n.Type = t if !t.IsSlice() { - if Isconst(args.First(), CTNIL) { + if args.First().isNil() { yyerror("first argument to append must be typed slice; have untyped nil") n.Type = nil return n @@ -3193,6 +3193,9 @@ func samesafeexpr(l *Node, r *Node) bool { case OLITERAL: return eqval(l.Val(), r.Val()) + + case ONIL: + return true } return false @@ -3596,7 +3599,7 @@ func typecheckdef(n *Node) { } if !e.isGoConst() { if !e.Diag() { - if Isconst(e, CTNIL) { + if e.Op == ONIL { yyerrorl(n.Pos, "const initializer cannot be nil") } else { yyerrorl(n.Pos, "const initializer %v is not a constant", e) diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index 559d47da1a090..32bf37e3228cb 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -157,9 +157,7 @@ func lexinit() { types.Types[TNIL] = types.New(TNIL) s = builtinpkg.Lookup("nil") - var v Val - v.U = new(NilVal) - s.Def = asTypesNode(nodlit(v)) + s.Def = asTypesNode(nodnil()) asNode(s.Def).Sym = s asNode(s.Def).Name = new(Name) diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index a61cb3f651742..ac43a8e1bea54 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -465,7 +465,7 @@ opswitch: case ONONAME, OEMPTY, OGETG, ONEWOBJ: - case OTYPE, ONAME, OLITERAL: + case OTYPE, ONAME, OLITERAL, ONIL: // TODO(mdempsky): Just return n; see discussion on CL 38655. // Perhaps refactor to use Node.mayBeShared for these instead. // If these return early, make sure to still call @@ -2277,7 +2277,7 @@ func varexpr(n *Node) bool { } switch n.Op { - case OLITERAL: + case OLITERAL, ONIL: return true case ONAME: @@ -2332,7 +2332,7 @@ func vmatch2(l *Node, r *Node) bool { case ONAME: return l == r - case OLITERAL: + case OLITERAL, ONIL: return false } @@ -2373,7 +2373,7 @@ func vmatch1(l *Node, r *Node) bool { return vmatch2(l, r) - case OLITERAL: + case OLITERAL, ONIL: return false } @@ -3190,7 +3190,7 @@ func eqfor(t *types.Type) (n *Node, needsize bool) { // The result of walkcompare MUST be assigned back to n, e.g. // n.Left = walkcompare(n.Left, init) func walkcompare(n *Node, init *Nodes) *Node { - if n.Left.Type.IsInterface() && n.Right.Type.IsInterface() && n.Left.Op != OLITERAL && n.Right.Op != OLITERAL { + if n.Left.Type.IsInterface() && n.Right.Type.IsInterface() && n.Left.Op != ONIL && n.Right.Op != ONIL { return walkcompareInterface(n, init) } @@ -3788,6 +3788,7 @@ func candiscard(n *Node) bool { OTYPE, OPACK, OLITERAL, + ONIL, OADD, OSUB, OOR, diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index 62c5c344845ad..82db9e4dbcba6 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -1265,7 +1265,7 @@ func (t *Type) IsPtrShaped() bool { // HasNil reports whether the set of values determined by t includes nil. func (t *Type) HasNil() bool { switch t.Etype { - case TCHAN, TFUNC, TINTER, TMAP, TPTR, TSLICE, TUNSAFEPTR: + case TCHAN, TFUNC, TINTER, TMAP, TNIL, TPTR, TSLICE, TUNSAFEPTR: return true } return false From 6dae48fb0ba772d30c664a8a31732a46e980e536 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Fri, 20 Nov 2020 13:23:58 -0800 Subject: [PATCH 016/474] [dev.regabi] cmd/compile: refactor type/value assertions Small refactoring to make subsequent CLs clearer. Passes toolstash-check. Change-Id: I1a6ae599f491220d44aaabae0b7bed4aff46ee92 Reviewed-on: https://go-review.googlesource.com/c/go/+/272651 Reviewed-by: Robert Griesemer Trust: Matthew Dempsky --- src/cmd/compile/internal/gc/const.go | 23 ++++++++++++++++++----- src/cmd/compile/internal/gc/iexport.go | 4 +--- src/cmd/compile/internal/gc/syntax.go | 3 +++ src/cmd/compile/internal/gc/typecheck.go | 2 +- 4 files changed, 23 insertions(+), 9 deletions(-) diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index 42ac3a26f8cbb..4e7318cfc6d28 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -275,8 +275,8 @@ func convlit1(n *Node, t *types.Type, explicit bool, context func() string) *Nod if v.U == nil { break } - n.SetVal(v) n.Type = t + n.SetVal(v) return n case OPLUS, ONEG, OBITNOT, ONOT, OREAL, OIMAG: @@ -979,9 +979,6 @@ func setconst(n *Node, v Val) { Xoffset: BADWIDTH, } n.SetVal(v) - if vt := idealType(v.Ctype()); n.Type.IsUntyped() && n.Type != vt { - Fatalf("untyped type mismatch, have: %v, want: %v", n.Type, vt) - } // Check range. lno := setlineno(n) @@ -1000,6 +997,22 @@ func setconst(n *Node, v Val) { } } +func assertRepresents(t *types.Type, v Val) { + if !represents(t, v) { + Fatalf("%v does not represent %v", t, v) + } +} + +func represents(t *types.Type, v Val) bool { + if !t.IsUntyped() { + // TODO(mdempsky): Stricter handling of typed types. + return true + } + + vt := idealType(v.Ctype()) + return t == vt +} + func setboolconst(n *Node, v bool) { setconst(n, Val{U: v}) } @@ -1013,8 +1026,8 @@ func setintconst(n *Node, v int64) { // nodlit returns a new untyped constant with value v. func nodlit(v Val) *Node { n := nod(OLITERAL, nil, nil) - n.SetVal(v) n.Type = idealType(v.Ctype()) + n.SetVal(v) return n } diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index b48a840d00000..c3385f785ac19 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -777,9 +777,7 @@ func constTypeOf(typ *types.Type) Ctype { } func (w *exportWriter) value(typ *types.Type, v Val) { - if vt := idealType(v.Ctype()); typ.IsUntyped() && typ != vt { - Fatalf("exporter: untyped type mismatch, have: %v, want: %v", typ, vt) - } + assertRepresents(typ, v) w.typ(typ) // Each type has only one admissible constant representation, diff --git a/src/cmd/compile/internal/gc/syntax.go b/src/cmd/compile/internal/gc/syntax.go index f364ed1527fc7..de516dec69422 100644 --- a/src/cmd/compile/internal/gc/syntax.go +++ b/src/cmd/compile/internal/gc/syntax.go @@ -251,6 +251,9 @@ func (n *Node) SetVal(v Val) { Dump("have Opt", n) Fatalf("have Opt") } + if n.Op == OLITERAL { + assertRepresents(n.Type, v) + } n.SetHasVal(true) n.E = v.U } diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 32619b08d1fbe..443a3f7827dd3 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -3624,8 +3624,8 @@ func typecheckdef(n *Node) { e = convlit(e, t) } - n.SetVal(e.Val()) n.Type = e.Type + n.SetVal(e.Val()) case ONAME: if n.Name.Param.Ntype != nil { From c767d73227704ba4e22e366e89d1885f52d4b6cc Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Fri, 13 Nov 2020 18:33:19 -0800 Subject: [PATCH 017/474] [dev.regabi] cmd/compile: remove CTRUNE Since CL 255217, we've been able to rely on types.UntypedRune to identify untyped rune literals, rather than needing Mpint.Rune / CTRUNE. This makes way for switching to using go/constant, which doesn't have a separate notion of rune constants distinct from integer constants. Passes toolstash-check. Change-Id: I319861f4758aeea17345c101b167cb307e706a0e Reviewed-on: https://go-review.googlesource.com/c/go/+/272652 Reviewed-by: Robert Griesemer Trust: Matthew Dempsky --- src/cmd/compile/internal/gc/const.go | 86 +++++++++--------------- src/cmd/compile/internal/gc/fmt.go | 55 ++++++++------- src/cmd/compile/internal/gc/iimport.go | 1 - src/cmd/compile/internal/gc/mpint.go | 5 +- src/cmd/compile/internal/gc/noder.go | 4 +- src/cmd/compile/internal/gc/typecheck.go | 2 +- src/cmd/compile/internal/gc/walk.go | 5 +- 7 files changed, 71 insertions(+), 87 deletions(-) diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index 4e7318cfc6d28..326f44a2feb8e 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -19,7 +19,6 @@ const ( CTxxx Ctype = iota CTINT - CTRUNE CTFLT CTCPLX CTSTR @@ -29,7 +28,7 @@ const ( type Val struct { // U contains one of: // bool bool when Ctype() == CTBOOL - // *Mpint int when Ctype() == CTINT, rune when Ctype() == CTRUNE + // *Mpint int when Ctype() == CTINT // *Mpflt float when Ctype() == CTFLT // *Mpcplx pair of floats when Ctype() == CTCPLX // string string when Ctype() == CTSTR @@ -37,7 +36,7 @@ type Val struct { } func (v Val) Ctype() Ctype { - switch x := v.U.(type) { + switch v.U.(type) { default: Fatalf("unexpected Ctype for %T", v.U) panic("unreachable") @@ -46,9 +45,6 @@ func (v Val) Ctype() Ctype { case bool: return CTBOOL case *Mpint: - if x.Rune { - return CTRUNE - } return CTINT case *Mpflt: return CTFLT @@ -384,7 +380,7 @@ func convertVal(v Val, t *types.Type, explicit bool) Val { return v } - case CTINT, CTRUNE: + case CTINT: if explicit && t.IsString() { return tostr(v) } @@ -449,11 +445,6 @@ func toflt(v Val) Val { func toint(v Val) Val { switch u := v.U.(type) { case *Mpint: - if u.Rune { - i := new(Mpint) - i.Set(u) - v.U = i - } case *Mpflt: i := new(Mpint) @@ -560,11 +551,7 @@ func consttype(n *Node) Ctype { } func Isconst(n *Node, ct Ctype) bool { - t := consttype(n) - - // If the caller is asking for CTINT, allow CTRUNE too. - // Makes life easier for back ends. - return t == ct || (ct == CTINT && t == CTRUNE) + return consttype(n) == ct } // evconst rewrites constant expressions into OLITERAL nodes. @@ -710,7 +697,7 @@ func compareOp(x Val, op Op, y Val) bool { return x != y } - case CTINT, CTRUNE: + case CTINT: x, y := x.U.(*Mpint), y.U.(*Mpint) return cmpZero(x.Cmp(y), op) @@ -784,11 +771,10 @@ Outer: return Val{U: x || y} } - case CTINT, CTRUNE: + case CTINT: x, y := x.U.(*Mpint), y.U.(*Mpint) u := new(Mpint) - u.Rune = x.Rune || y.Rune u.Set(x) switch op { case OADD: @@ -879,16 +865,15 @@ func unaryOp(op Op, x Val, t *types.Type) Val { switch op { case OPLUS: switch x.Ctype() { - case CTINT, CTRUNE, CTFLT, CTCPLX: + case CTINT, CTFLT, CTCPLX: return x } case ONEG: switch x.Ctype() { - case CTINT, CTRUNE: + case CTINT: x := x.U.(*Mpint) u := new(Mpint) - u.Rune = x.Rune u.Set(x) u.Neg() return Val{U: u} @@ -912,11 +897,10 @@ func unaryOp(op Op, x Val, t *types.Type) Val { case OBITNOT: switch x.Ctype() { - case CTINT, CTRUNE: + case CTINT: x := x.U.(*Mpint) u := new(Mpint) - u.Rune = x.Rune if t.IsSigned() || t.IsUntyped() { // Signed values change sign. u.SetInt64(-1) @@ -937,14 +921,11 @@ func unaryOp(op Op, x Val, t *types.Type) Val { } func shiftOp(x Val, op Op, y Val) Val { - if x.Ctype() != CTRUNE { - x = toint(x) - } + x = toint(x) y = toint(y) u := new(Mpint) u.Set(x.U.(*Mpint)) - u.Rune = x.U.(*Mpint).Rune switch op { case OLSH: u.Lsh(y.U.(*Mpint)) @@ -1010,7 +991,7 @@ func represents(t *types.Type, v Val) bool { } vt := idealType(v.Ctype()) - return t == vt + return t == vt || (t == types.UntypedRune && vt == types.UntypedInt) } func setboolconst(n *Node, v bool) { @@ -1039,8 +1020,6 @@ func idealType(ct Ctype) *types.Type { return types.UntypedBool case CTINT: return types.UntypedInt - case CTRUNE: - return types.UntypedRune case CTFLT: return types.UntypedFloat case CTCPLX: @@ -1091,31 +1070,30 @@ func defaultlit2(l *Node, r *Node, force bool) (*Node, *Node) { return l, r } -func ctype(t *types.Type) Ctype { - switch t { - case types.UntypedBool: - return CTBOOL - case types.UntypedString: - return CTSTR - case types.UntypedInt: - return CTINT - case types.UntypedRune: - return CTRUNE - case types.UntypedFloat: - return CTFLT - case types.UntypedComplex: - return CTCPLX +func mixUntyped(t1, t2 *types.Type) *types.Type { + if t1 == t2 { + return t1 + } + + rank := func(t *types.Type) int { + switch t { + case types.UntypedInt: + return 0 + case types.UntypedRune: + return 1 + case types.UntypedFloat: + return 2 + case types.UntypedComplex: + return 3 + } + Fatalf("bad type %v", t) + panic("unreachable") } - Fatalf("bad type %v", t) - panic("unreachable") -} -func mixUntyped(t1, t2 *types.Type) *types.Type { - t := t1 - if ctype(t2) > ctype(t1) { - t = t2 + if rank(t2) > rank(t1) { + return t2 } - return t + return t1 } func defaultType(t *types.Type) *types.Type { diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go index 9b57d131b1fa1..740fdab977458 100644 --- a/src/cmd/compile/internal/gc/fmt.go +++ b/src/cmd/compile/internal/gc/fmt.go @@ -526,28 +526,12 @@ func (v Val) Format(s fmt.State, verb rune) { func (v Val) vconv(s fmt.State, flag FmtFlag) { switch u := v.U.(type) { case *Mpint: - if !u.Rune { - if flag&FmtSharp != 0 { - fmt.Fprint(s, u.String()) - return - } - fmt.Fprint(s, u.GoString()) + if flag&FmtSharp != 0 { + fmt.Fprint(s, u.String()) return } - - switch x := u.Int64(); { - case ' ' <= x && x < utf8.RuneSelf && x != '\\' && x != '\'': - fmt.Fprintf(s, "'%c'", int(x)) - - case 0 <= x && x < 1<<16: - fmt.Fprintf(s, "'\\u%04x'", uint(int(x))) - - case 0 <= x && x <= utf8.MaxRune: - fmt.Fprintf(s, "'\\U%08x'", uint64(x)) - - default: - fmt.Fprintf(s, "('\\x00' + %v)", u) - } + fmt.Fprint(s, u.GoString()) + return case *Mpflt: if flag&FmtSharp != 0 { @@ -1336,19 +1320,40 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) { } } + needUnparen := false if n.Type != nil && !n.Type.IsUntyped() { // Need parens when type begins with what might // be misinterpreted as a unary operator: * or <-. if n.Type.IsPtr() || (n.Type.IsChan() && n.Type.ChanDir() == types.Crecv) { - mode.Fprintf(s, "(%v)(%v)", n.Type, n.Val()) - return + mode.Fprintf(s, "(%v)(", n.Type) } else { - mode.Fprintf(s, "%v(%v)", n.Type, n.Val()) - return + mode.Fprintf(s, "%v(", n.Type) } + needUnparen = true } - mode.Fprintf(s, "%v", n.Val()) + if n.Type == types.UntypedRune { + u := n.Val().U.(*Mpint) + switch x := u.Int64(); { + case ' ' <= x && x < utf8.RuneSelf && x != '\\' && x != '\'': + fmt.Fprintf(s, "'%c'", int(x)) + + case 0 <= x && x < 1<<16: + fmt.Fprintf(s, "'\\u%04x'", uint(int(x))) + + case 0 <= x && x <= utf8.MaxRune: + fmt.Fprintf(s, "'\\U%08x'", uint64(x)) + + default: + fmt.Fprintf(s, "('\\x00' + %v)", u) + } + } else { + mode.Fprintf(s, "%v", n.Val()) + } + + if needUnparen { + mode.Fprintf(s, ")") + } // Special case: name used as local variable in export. // _ becomes ~b%d internally; print as _ for export diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index ac565a6632c9a..fc6b7ecb9ff7e 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -363,7 +363,6 @@ func (p *importReader) value(typ *types.Type) (v Val) { v.U = p.string() case CTINT: x := new(Mpint) - x.Rune = typ == types.UntypedRune p.mpint(&x.Val, typ) v.U = x case CTFLT: diff --git a/src/cmd/compile/internal/gc/mpint.go b/src/cmd/compile/internal/gc/mpint.go index 79eb60e65d2b0..199b2659d1588 100644 --- a/src/cmd/compile/internal/gc/mpint.go +++ b/src/cmd/compile/internal/gc/mpint.go @@ -13,9 +13,8 @@ import ( // Mpint represents an integer constant. type Mpint struct { - Val big.Int - Ovf bool // set if Val overflowed compiler limit (sticky) - Rune bool // set if syntax indicates default type rune + Val big.Int + Ovf bool // set if Val overflowed compiler limit (sticky) } func (a *Mpint) SetOverflow() { diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 27bc9b5629e2f..303b04cd46eed 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -656,6 +656,9 @@ func (p *noder) expr(expr syntax.Expr) *Node { return p.mkname(expr) case *syntax.BasicLit: n := nodlit(p.basicLit(expr)) + if expr.Kind == syntax.RuneLit { + n.Type = types.UntypedRune + } n.SetDiag(expr.Bad) // avoid follow-on errors if there was a syntax error return n case *syntax.CompositeLit: @@ -1428,7 +1431,6 @@ func (p *noder) basicLit(lit *syntax.BasicLit) Val { case syntax.RuneLit: x := new(Mpint) - x.Rune = true if !lit.Bad { u, _ := strconv.Unquote(s) var r rune diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 443a3f7827dd3..3fb59c8debf93 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -3724,7 +3724,7 @@ func checkmake(t *types.Type, arg string, np **Node) bool { // Do range checks for constants before defaultlit // to avoid redundant "constant NNN overflows int" errors. switch consttype(n) { - case CTINT, CTRUNE, CTFLT, CTCPLX: + case CTINT, CTFLT, CTCPLX: v := toint(n.Val()).U.(*Mpint) if v.CmpInt64(0) < 0 { yyerror("negative %s argument in make(%v)", arg, t) diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index ac43a8e1bea54..e7351d1792a8e 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -1931,10 +1931,11 @@ func walkprint(nn *Node, init *Nodes) *Node { calls := []*Node{mkcall("printlock", nil, init)} for i, n := range nn.List.Slice() { if n.Op == OLITERAL { - switch n.Val().Ctype() { - case CTRUNE: + if n.Type == types.UntypedRune { n = defaultlit(n, types.Runetype) + } + switch n.Val().Ctype() { case CTINT: n = defaultlit(n, types.Types[TINT64]) From 015423a15bcfae148d5121bcf4ba5b50d0847cd0 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 23 Nov 2020 21:48:38 -0800 Subject: [PATCH 018/474] [dev.regabi] strconv: add to bootstrap packages go/constant relies on strconv for parsing Go literals, while older versions of strconv either lack recent Go language features (e.g., Go 1.13's new numeric literals) or have errors (e.g., mishandling of carriage returns in raw string literals prior to Go 1.8). This requires two changes: 1. Splitting out the internal/bytealg dependency into a separate file, which can be easily substituted with a simple loop for bootstrap builds. 2. Updating eisel_lemire.go to not utilize Go 1.13 functionality (underscores in numeric literals and signed shift counts). Change-Id: Ib48a858a03b155eebdcd08d577aec2254337e70e Reviewed-on: https://go-review.googlesource.com/c/go/+/272749 Reviewed-by: Robert Griesemer Reviewed-by: Russ Cox Trust: Matthew Dempsky --- src/cmd/compile/internal/gc/dep_test.go | 2 +- src/cmd/dist/buildtool.go | 2 ++ src/strconv/bytealg.go | 14 ++++++++++++++ src/strconv/bytealg_bootstrap.go | 17 +++++++++++++++++ src/strconv/eisel_lemire.go | 16 ++++++++-------- src/strconv/quote.go | 6 ------ 6 files changed, 42 insertions(+), 15 deletions(-) create mode 100644 src/strconv/bytealg.go create mode 100644 src/strconv/bytealg_bootstrap.go diff --git a/src/cmd/compile/internal/gc/dep_test.go b/src/cmd/compile/internal/gc/dep_test.go index c1dac9338652b..a185bc9f547c5 100644 --- a/src/cmd/compile/internal/gc/dep_test.go +++ b/src/cmd/compile/internal/gc/dep_test.go @@ -18,7 +18,7 @@ func TestDeps(t *testing.T) { } for _, dep := range strings.Fields(strings.Trim(string(out), "[]")) { switch dep { - case "go/build", "go/token": + case "go/build", "go/scanner": t.Errorf("undesired dependency on %q", dep) } } diff --git a/src/cmd/dist/buildtool.go b/src/cmd/dist/buildtool.go index 37b3d45977a88..e39f284db566d 100644 --- a/src/cmd/dist/buildtool.go +++ b/src/cmd/dist/buildtool.go @@ -96,6 +96,7 @@ var bootstrapDirs = []string{ "debug/elf", "debug/macho", "debug/pe", + "go/constant", "internal/goversion", "internal/race", "internal/unsafeheader", @@ -103,6 +104,7 @@ var bootstrapDirs = []string{ "math/big", "math/bits", "sort", + "strconv", } // File prefixes that are ignored by go/build anyway, and cause diff --git a/src/strconv/bytealg.go b/src/strconv/bytealg.go new file mode 100644 index 0000000000000..7f66f2a8bbc8c --- /dev/null +++ b/src/strconv/bytealg.go @@ -0,0 +1,14 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !compiler_bootstrap + +package strconv + +import "internal/bytealg" + +// contains reports whether the string contains the byte c. +func contains(s string, c byte) bool { + return bytealg.IndexByteString(s, c) != -1 +} diff --git a/src/strconv/bytealg_bootstrap.go b/src/strconv/bytealg_bootstrap.go new file mode 100644 index 0000000000000..a3a547d1b639a --- /dev/null +++ b/src/strconv/bytealg_bootstrap.go @@ -0,0 +1,17 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build compiler_bootstrap + +package strconv + +// contains reports whether the string contains the byte c. +func contains(s string, c byte) bool { + for i := 0; i < len(s); i++ { + if s[i] == c { + return true + } + } + return false +} diff --git a/src/strconv/eisel_lemire.go b/src/strconv/eisel_lemire.go index 6c7f852eba806..fecd1b93451d0 100644 --- a/src/strconv/eisel_lemire.go +++ b/src/strconv/eisel_lemire.go @@ -29,7 +29,7 @@ func eiselLemire64(man uint64, exp10 int, neg bool) (f float64, ok bool) { // Exp10 Range. if man == 0 { if neg { - f = math.Float64frombits(0x80000000_00000000) // Negative zero. + f = math.Float64frombits(0x8000000000000000) // Negative zero. } return f, true } @@ -39,7 +39,7 @@ func eiselLemire64(man uint64, exp10 int, neg bool) (f float64, ok bool) { // Normalization. clz := bits.LeadingZeros64(man) - man <<= clz + man <<= uint(clz) const float64ExponentBias = 1023 retExp2 := uint64(217706*exp10>>16+64+float64ExponentBias) - uint64(clz) @@ -84,9 +84,9 @@ func eiselLemire64(man uint64, exp10 int, neg bool) (f float64, ok bool) { if retExp2-1 >= 0x7FF-1 { return 0, false } - retBits := retExp2<<52 | retMantissa&0x000FFFFF_FFFFFFFF + retBits := retExp2<<52 | retMantissa&0x000FFFFFFFFFFFFF if neg { - retBits |= 0x80000000_00000000 + retBits |= 0x8000000000000000 } return math.Float64frombits(retBits), true } @@ -114,7 +114,7 @@ func eiselLemire32(man uint64, exp10 int, neg bool) (f float32, ok bool) { // Normalization. clz := bits.LeadingZeros64(man) - man <<= clz + man <<= uint(clz) const float32ExponentBias = 127 retExp2 := uint64(217706*exp10>>16+64+float32ExponentBias) - uint64(clz) @@ -122,13 +122,13 @@ func eiselLemire32(man uint64, exp10 int, neg bool) (f float32, ok bool) { xHi, xLo := bits.Mul64(man, detailedPowersOfTen[exp10-detailedPowersOfTenMinExp10][1]) // Wider Approximation. - if xHi&0x3F_FFFFFFFF == 0x3F_FFFFFFFF && xLo+man < man { + if xHi&0x3FFFFFFFFF == 0x3FFFFFFFFF && xLo+man < man { yHi, yLo := bits.Mul64(man, detailedPowersOfTen[exp10-detailedPowersOfTenMinExp10][0]) mergedHi, mergedLo := xHi, xLo+yHi if mergedLo < xLo { mergedHi++ } - if mergedHi&0x3F_FFFFFFFF == 0x3F_FFFFFFFF && mergedLo+1 == 0 && yLo+man < man { + if mergedHi&0x3FFFFFFFFF == 0x3FFFFFFFFF && mergedLo+1 == 0 && yLo+man < man { return 0, false } xHi, xLo = mergedHi, mergedLo @@ -140,7 +140,7 @@ func eiselLemire32(man uint64, exp10 int, neg bool) (f float32, ok bool) { retExp2 -= 1 ^ msb // Half-way Ambiguity. - if xLo == 0 && xHi&0x3F_FFFFFFFF == 0 && retMantissa&3 == 1 { + if xLo == 0 && xHi&0x3FFFFFFFFF == 0 && retMantissa&3 == 1 { return 0, false } diff --git a/src/strconv/quote.go b/src/strconv/quote.go index bcbdbc514d21a..4ffa10b72efc9 100644 --- a/src/strconv/quote.go +++ b/src/strconv/quote.go @@ -7,7 +7,6 @@ package strconv import ( - "internal/bytealg" "unicode/utf8" ) @@ -436,11 +435,6 @@ func Unquote(s string) (string, error) { return string(buf), nil } -// contains reports whether the string contains the byte c. -func contains(s string, c byte) bool { - return bytealg.IndexByteString(s, c) != -1 -} - // bsearch16 returns the smallest i such that a[i] >= x. // If there is no such i, bsearch16 returns len(a). func bsearch16(a []uint16, x uint16) int { From 742c05e3bce2cf2f4631762cb5fb733d2a92bc91 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 23 Nov 2020 13:42:43 -0800 Subject: [PATCH 019/474] [dev.regabi] cmd/compile: prep refactoring for switching to go/constant This CL replaces gc.Ctype (along with its CTINT, etc. constants) with constant.Kind; renames Val.Ctype to Val.Kind; and replaces a handful of abstraction-violating patterns that can be readily expressed differently. The next commit will actually replace Val with constant.Value. Passes toolstash-check. [git-generate] cd src/cmd/compile/internal/gc sed -i 's/type Ctype uint8/type Ctype = constant.Kind/' const.go goimports -w const.go rf ' inline -rm Ctype mv Val.Ctype Val.Kind ex import "go/constant"; \ CTxxx -> constant.Unknown; \ CTINT -> constant.Int; \ CTFLT -> constant.Float; \ CTCPLX -> constant.Complex; \ CTBOOL -> constant.Bool; \ CTSTR -> constant.String rm CTxxx CTINT CTFLT CTCPLX CTBOOL CTSTR ex import "cmd/compile/internal/types"; \ var t *types.Type; \ var v, v2 Val; \ v.U.(*Mpint).Cmp(maxintval[TINT]) > 0 -> doesoverflow(v, types.Types[TINT]); \ v.U.(*Mpint).Cmp(v2.U.(*Mpint)) > 0 -> compareOp(v, OGT, v2); \ maxintval[t.Etype].Cmp(maxintval[TUINT]) <= 0 -> t.Size() <= types.Types[TUINT].Size(); \ maxintval[t.Etype].Cmp(maxintval[TUINT]) > 0 -> t.Size() > types.Types[TUINT].Size(); ' go test cmd/compile -u Change-Id: I6c22ec0597508845f88eee639a0d76cbaa66d08f Reviewed-on: https://go-review.googlesource.com/c/go/+/272653 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Robert Griesemer Trust: Matthew Dempsky --- src/cmd/compile/fmtmap_test.go | 4 +- src/cmd/compile/internal/gc/const.go | 130 ++++++++++------------- src/cmd/compile/internal/gc/dcl.go | 3 +- src/cmd/compile/internal/gc/export.go | 5 +- src/cmd/compile/internal/gc/fmt.go | 2 +- src/cmd/compile/internal/gc/iexport.go | 29 ++--- src/cmd/compile/internal/gc/iimport.go | 11 +- src/cmd/compile/internal/gc/inl.go | 3 +- src/cmd/compile/internal/gc/noder.go | 5 +- src/cmd/compile/internal/gc/obj.go | 3 +- src/cmd/compile/internal/gc/ssa.go | 11 +- src/cmd/compile/internal/gc/swt.go | 3 +- src/cmd/compile/internal/gc/typecheck.go | 47 ++++---- src/cmd/compile/internal/gc/walk.go | 51 ++++----- 14 files changed, 153 insertions(+), 154 deletions(-) diff --git a/src/cmd/compile/fmtmap_test.go b/src/cmd/compile/fmtmap_test.go index a8698de307931..51134e4919d3a 100644 --- a/src/cmd/compile/fmtmap_test.go +++ b/src/cmd/compile/fmtmap_test.go @@ -85,8 +85,6 @@ var knownFormats = map[string]string{ "cmd/compile/internal/gc.Class %d": "", "cmd/compile/internal/gc.Class %s": "", "cmd/compile/internal/gc.Class %v": "", - "cmd/compile/internal/gc.Ctype %d": "", - "cmd/compile/internal/gc.Ctype %v": "", "cmd/compile/internal/gc.Nodes %#v": "", "cmd/compile/internal/gc.Nodes %+v": "", "cmd/compile/internal/gc.Nodes %.v": "", @@ -138,6 +136,8 @@ var knownFormats = map[string]string{ "float64 %.3f": "", "float64 %.6g": "", "float64 %g": "", + "go/constant.Kind %d": "", + "go/constant.Kind %v": "", "int %#x": "", "int %-12d": "", "int %-6d": "", diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index 326f44a2feb8e..c30d24ae1a7e0 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -8,23 +8,11 @@ import ( "cmd/compile/internal/types" "cmd/internal/src" "fmt" + "go/constant" "math/big" "strings" ) -// Ctype describes the constant kind of an "ideal" (untyped) constant. -type Ctype uint8 - -const ( - CTxxx Ctype = iota - - CTINT - CTFLT - CTCPLX - CTSTR - CTBOOL -) - type Val struct { // U contains one of: // bool bool when Ctype() == CTBOOL @@ -35,28 +23,28 @@ type Val struct { U interface{} } -func (v Val) Ctype() Ctype { +func (v Val) Kind() constant.Kind { switch v.U.(type) { default: Fatalf("unexpected Ctype for %T", v.U) panic("unreachable") case nil: - return CTxxx + return constant.Unknown case bool: - return CTBOOL + return constant.Bool case *Mpint: - return CTINT + return constant.Int case *Mpflt: - return CTFLT + return constant.Float case *Mpcplx: - return CTCPLX + return constant.Complex case string: - return CTSTR + return constant.String } } func eqval(a, b Val) bool { - if a.Ctype() != b.Ctype() { + if a.Kind() != b.Kind() { return false } switch x := a.U.(type) { @@ -103,7 +91,7 @@ func (v Val) Interface() interface{} { // Int64Val returns n as an int64. // n must be an integer or rune constant. func (n *Node) Int64Val() int64 { - if !Isconst(n, CTINT) { + if !Isconst(n, constant.Int) { Fatalf("Int64Val(%v)", n) } return n.Val().U.(*Mpint).Int64() @@ -111,7 +99,7 @@ func (n *Node) Int64Val() int64 { // CanInt64 reports whether it is safe to call Int64Val() on n. func (n *Node) CanInt64() bool { - if !Isconst(n, CTINT) { + if !Isconst(n, constant.Int) { return false } @@ -123,7 +111,7 @@ func (n *Node) CanInt64() bool { // BoolVal returns n as a bool. // n must be a boolean constant. func (n *Node) BoolVal() bool { - if !Isconst(n, CTBOOL) { + if !Isconst(n, constant.Bool) { Fatalf("BoolVal(%v)", n) } return n.Val().U.(bool) @@ -132,7 +120,7 @@ func (n *Node) BoolVal() bool { // StringVal returns the value of a literal string Node as a string. // n must be a string constant. func (n *Node) StringVal() string { - if !Isconst(n, CTSTR) { + if !Isconst(n, constant.String) { Fatalf("StringVal(%v)", n) } return n.Val().U.(string) @@ -369,23 +357,23 @@ func operandType(op Op, t *types.Type) *types.Type { // If explicit is true, then conversions from integer to string are // also allowed. func convertVal(v Val, t *types.Type, explicit bool) Val { - switch ct := v.Ctype(); ct { - case CTBOOL: + switch ct := v.Kind(); ct { + case constant.Bool: if t.IsBoolean() { return v } - case CTSTR: + case constant.String: if t.IsString() { return v } - case CTINT: + case constant.Int: if explicit && t.IsString() { return tostr(v) } fallthrough - case CTFLT, CTCPLX: + case constant.Float, constant.Complex: switch { case t.IsInteger(): v = toint(v) @@ -543,14 +531,14 @@ func tostr(v Val) Val { return v } -func consttype(n *Node) Ctype { +func consttype(n *Node) constant.Kind { if n == nil || n.Op != OLITERAL { - return CTxxx + return constant.Unknown } - return n.Val().Ctype() + return n.Val().Kind() } -func Isconst(n *Node, ct Ctype) bool { +func Isconst(n *Node, ct constant.Kind) bool { return consttype(n) == ct } @@ -596,11 +584,11 @@ func evconst(n *Node) { // Merge adjacent constants in the argument list. s := n.List.Slice() for i1 := 0; i1 < len(s); i1++ { - if Isconst(s[i1], CTSTR) && i1+1 < len(s) && Isconst(s[i1+1], CTSTR) { + if Isconst(s[i1], constant.String) && i1+1 < len(s) && Isconst(s[i1+1], constant.String) { // merge from i1 up to but not including i2 var strs []string i2 := i1 - for i2 < len(s) && Isconst(s[i2], CTSTR) { + for i2 < len(s) && Isconst(s[i2], constant.String) { strs = append(strs, s[i2].StringVal()) i2++ } @@ -613,7 +601,7 @@ func evconst(n *Node) { } } - if len(s) == 1 && Isconst(s[0], CTSTR) { + if len(s) == 1 && Isconst(s[0], constant.String) { n.Op = OLITERAL n.SetVal(s[0].Val()) } else { @@ -623,7 +611,7 @@ func evconst(n *Node) { case OCAP, OLEN: switch nl.Type.Etype { case TSTRING: - if Isconst(nl, CTSTR) { + if Isconst(nl, constant.String) { setintconst(n, int64(len(nl.StringVal()))) } case TARRAY: @@ -674,9 +662,9 @@ func evconst(n *Node) { func match(x, y Val) (Val, Val) { switch { - case x.Ctype() == CTCPLX || y.Ctype() == CTCPLX: + case x.Kind() == constant.Complex || y.Kind() == constant.Complex: return tocplx(x), tocplx(y) - case x.Ctype() == CTFLT || y.Ctype() == CTFLT: + case x.Kind() == constant.Float || y.Kind() == constant.Float: return toflt(x), toflt(y) } @@ -687,8 +675,8 @@ func match(x, y Val) (Val, Val) { func compareOp(x Val, op Op, y Val) bool { x, y = match(x, y) - switch x.Ctype() { - case CTBOOL: + switch x.Kind() { + case constant.Bool: x, y := x.U.(bool), y.U.(bool) switch op { case OEQ: @@ -697,15 +685,15 @@ func compareOp(x Val, op Op, y Val) bool { return x != y } - case CTINT: + case constant.Int: x, y := x.U.(*Mpint), y.U.(*Mpint) return cmpZero(x.Cmp(y), op) - case CTFLT: + case constant.Float: x, y := x.U.(*Mpflt), y.U.(*Mpflt) return cmpZero(x.Cmp(y), op) - case CTCPLX: + case constant.Complex: x, y := x.U.(*Mpcplx), y.U.(*Mpcplx) eq := x.Real.Cmp(&y.Real) == 0 && x.Imag.Cmp(&y.Imag) == 0 switch op { @@ -715,7 +703,7 @@ func compareOp(x Val, op Op, y Val) bool { return !eq } - case CTSTR: + case constant.String: x, y := x.U.(string), y.U.(string) switch op { case OEQ: @@ -761,8 +749,8 @@ func binaryOp(x Val, op Op, y Val) Val { x, y = match(x, y) Outer: - switch x.Ctype() { - case CTBOOL: + switch x.Kind() { + case constant.Bool: x, y := x.U.(bool), y.U.(bool) switch op { case OANDAND: @@ -771,7 +759,7 @@ Outer: return Val{U: x || y} } - case CTINT: + case constant.Int: x, y := x.U.(*Mpint), y.U.(*Mpint) u := new(Mpint) @@ -808,7 +796,7 @@ Outer: } return Val{U: u} - case CTFLT: + case constant.Float: x, y := x.U.(*Mpflt), y.U.(*Mpflt) u := newMpflt() @@ -831,7 +819,7 @@ Outer: } return Val{U: u} - case CTCPLX: + case constant.Complex: x, y := x.U.(*Mpcplx), y.U.(*Mpcplx) u := newMpcmplx() @@ -864,28 +852,28 @@ Outer: func unaryOp(op Op, x Val, t *types.Type) Val { switch op { case OPLUS: - switch x.Ctype() { - case CTINT, CTFLT, CTCPLX: + switch x.Kind() { + case constant.Int, constant.Float, constant.Complex: return x } case ONEG: - switch x.Ctype() { - case CTINT: + switch x.Kind() { + case constant.Int: x := x.U.(*Mpint) u := new(Mpint) u.Set(x) u.Neg() return Val{U: u} - case CTFLT: + case constant.Float: x := x.U.(*Mpflt) u := newMpflt() u.Set(x) u.Neg() return Val{U: u} - case CTCPLX: + case constant.Complex: x := x.U.(*Mpcplx) u := newMpcmplx() u.Real.Set(&x.Real) @@ -896,8 +884,8 @@ func unaryOp(op Op, x Val, t *types.Type) Val { } case OBITNOT: - switch x.Ctype() { - case CTINT: + switch x.Kind() { + case constant.Int: x := x.U.(*Mpint) u := new(Mpint) @@ -967,12 +955,12 @@ func setconst(n *Node, v Val) { lineno = lno if !n.Type.IsUntyped() { - switch v.Ctype() { + switch v.Kind() { // Truncate precision for non-ideal float. - case CTFLT: + case constant.Float: n.SetVal(Val{truncfltlit(v.U.(*Mpflt), n.Type)}) // Truncate precision for non-ideal complex. - case CTCPLX: + case constant.Complex: n.SetVal(Val{trunccmplxlit(v.U.(*Mpcplx), n.Type)}) } } @@ -990,7 +978,7 @@ func represents(t *types.Type, v Val) bool { return true } - vt := idealType(v.Ctype()) + vt := idealType(v.Kind()) return t == vt || (t == types.UntypedRune && vt == types.UntypedInt) } @@ -1007,22 +995,22 @@ func setintconst(n *Node, v int64) { // nodlit returns a new untyped constant with value v. func nodlit(v Val) *Node { n := nod(OLITERAL, nil, nil) - n.Type = idealType(v.Ctype()) + n.Type = idealType(v.Kind()) n.SetVal(v) return n } -func idealType(ct Ctype) *types.Type { +func idealType(ct constant.Kind) *types.Type { switch ct { - case CTSTR: + case constant.String: return types.UntypedString - case CTBOOL: + case constant.Bool: return types.UntypedBool - case CTINT: + case constant.Int: return types.UntypedInt - case CTFLT: + case constant.Float: return types.UntypedFloat - case CTCPLX: + case constant.Complex: return types.UntypedComplex } Fatalf("unexpected Ctype: %v", ct) @@ -1121,7 +1109,7 @@ func defaultType(t *types.Type) *types.Type { } func smallintconst(n *Node) bool { - if n.Op == OLITERAL && Isconst(n, CTINT) && n.Type != nil { + if n.Op == OLITERAL && Isconst(n, constant.Int) && n.Type != nil { switch simtype[n.Type.Etype] { case TINT8, TUINT8, diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index d3b7590257084..e0a6f6ac92c58 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -10,6 +10,7 @@ import ( "cmd/internal/obj" "cmd/internal/src" "fmt" + "go/constant" "strings" ) @@ -637,7 +638,7 @@ func interfacefield(n *Node) *types.Field { Fatalf("interfacefield: oops %v\n", n) } - if n.Val().Ctype() != CTxxx { + if n.Val().Kind() != constant.Unknown { yyerror("interface method cannot have annotation") } diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index 5179b6c05be58..15251062b4b81 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -9,6 +9,7 @@ import ( "cmd/internal/bio" "cmd/internal/src" "fmt" + "go/constant" ) var ( @@ -208,8 +209,8 @@ func dumpasmhdr() { } switch n.Op { case OLITERAL: - t := n.Val().Ctype() - if t == CTFLT || t == CTCPLX { + t := n.Val().Kind() + if t == constant.Float || t == constant.Complex { break } fmt.Fprintf(b, "#define const_%s %#v\n", n.Sym.Name, n.Val()) diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go index 740fdab977458..650fb9681ef97 100644 --- a/src/cmd/compile/internal/gc/fmt.go +++ b/src/cmd/compile/internal/gc/fmt.go @@ -556,7 +556,7 @@ func (v Val) vconv(s fmt.State, flag FmtFlag) { fmt.Fprint(s, u) default: - fmt.Fprintf(s, "", v.Ctype()) + fmt.Fprintf(s, "", v.Kind()) } } diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index c3385f785ac19..d661fca2d1447 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -210,6 +210,7 @@ import ( "crypto/md5" "encoding/binary" "fmt" + "go/constant" "io" "math/big" "sort" @@ -748,28 +749,28 @@ func (w *exportWriter) param(f *types.Field) { w.typ(f.Type) } -func constTypeOf(typ *types.Type) Ctype { +func constTypeOf(typ *types.Type) constant.Kind { switch typ { case types.UntypedInt, types.UntypedRune: - return CTINT + return constant.Int case types.UntypedFloat: - return CTFLT + return constant.Float case types.UntypedComplex: - return CTCPLX + return constant.Complex } switch typ.Etype { case TBOOL: - return CTBOOL + return constant.Bool case TSTRING: - return CTSTR + return constant.String case TINT, TINT8, TINT16, TINT32, TINT64, TUINT, TUINT8, TUINT16, TUINT32, TUINT64, TUINTPTR: - return CTINT + return constant.Int case TFLOAT32, TFLOAT64: - return CTFLT + return constant.Float case TCOMPLEX64, TCOMPLEX128: - return CTCPLX + return constant.Complex } Fatalf("unexpected constant type: %v", typ) @@ -786,15 +787,15 @@ func (w *exportWriter) value(typ *types.Type, v Val) { // and provides a useful consistency check. switch constTypeOf(typ) { - case CTBOOL: + case constant.Bool: w.bool(v.U.(bool)) - case CTSTR: + case constant.String: w.string(v.U.(string)) - case CTINT: + case constant.Int: w.mpint(&v.U.(*Mpint).Val, typ) - case CTFLT: + case constant.Float: w.mpfloat(&v.U.(*Mpflt).Val, typ) - case CTCPLX: + case constant.Complex: x := v.U.(*Mpcplx) w.mpfloat(&x.Real.Val, typ) w.mpfloat(&x.Imag.Val, typ) diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index fc6b7ecb9ff7e..0fa11c5f59ead 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -15,6 +15,7 @@ import ( "cmd/internal/src" "encoding/binary" "fmt" + "go/constant" "io" "math/big" "os" @@ -357,19 +358,19 @@ func (r *importReader) doDecl(n *Node) { func (p *importReader) value(typ *types.Type) (v Val) { switch constTypeOf(typ) { - case CTBOOL: + case constant.Bool: v.U = p.bool() - case CTSTR: + case constant.String: v.U = p.string() - case CTINT: + case constant.Int: x := new(Mpint) p.mpint(&x.Val, typ) v.U = x - case CTFLT: + case constant.Float: x := newMpflt() p.float(x, typ) v.U = x - case CTCPLX: + case constant.Complex: x := newMpcmplx() p.float(&x.Real, typ) p.float(&x.Imag, typ) diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index a882e91dceedb..6d07e156ea198 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -32,6 +32,7 @@ import ( "cmd/internal/obj" "cmd/internal/src" "fmt" + "go/constant" "strings" ) @@ -417,7 +418,7 @@ func (v *hairyVisitor) visit(n *Node) bool { } case OIF: - if Isconst(n.Left, CTBOOL) { + if Isconst(n.Left, constant.Bool) { // This if and the condition cost nothing. return v.visitList(n.Ninit) || v.visitList(n.Nbody) || v.visitList(n.Rlist) diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 303b04cd46eed..3ef8583f6d53a 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -6,6 +6,7 @@ package gc import ( "fmt" + "go/constant" "os" "path/filepath" "runtime" @@ -803,7 +804,7 @@ func (p *noder) sum(x syntax.Expr) *Node { chunks := make([]string, 0, 1) n := p.expr(x) - if Isconst(n, CTSTR) && n.Sym == nil { + if Isconst(n, constant.String) && n.Sym == nil { nstr = n chunks = append(chunks, nstr.StringVal()) } @@ -812,7 +813,7 @@ func (p *noder) sum(x syntax.Expr) *Node { add := adds[i] r := p.expr(add.Y) - if Isconst(r, CTSTR) && r.Sym == nil { + if Isconst(r, constant.String) && r.Sym == nil { if nstr != nil { // Collapse r into nstr instead of adding to n. chunks = append(chunks, r.StringVal()) diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 77f9afb44dada..499b8ef2e5712 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -13,6 +13,7 @@ import ( "crypto/sha256" "encoding/json" "fmt" + "go/constant" "io" "io/ioutil" "os" @@ -263,7 +264,7 @@ func dumpGlobalConst(n *Node) { case TUINTPTR: // ok case TIDEAL: - if !Isconst(n, CTINT) { + if !Isconst(n, constant.Int) { return } x := n.Val().U.(*Mpint) diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 709b2d434ed28..e23a189d7176d 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -7,6 +7,7 @@ package gc import ( "encoding/binary" "fmt" + "go/constant" "html" "os" "path/filepath" @@ -1277,7 +1278,7 @@ func (s *state) stmt(n *Node) { // We're assigning a slicing operation back to its source. // Don't write back fields we aren't changing. See issue #14855. i, j, k := rhs.SliceBounds() - if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64Val() == 0) { + if i != nil && (i.Op == OLITERAL && i.Val().Kind() == constant.Int && i.Int64Val() == 0) { // [0:...] is the same as [:...] i = nil } @@ -1305,7 +1306,7 @@ func (s *state) stmt(n *Node) { s.assign(n.Left, r, deref, skip) case OIF: - if Isconst(n.Left, CTBOOL) { + if Isconst(n.Left, constant.Bool) { s.stmtList(n.Left.Ninit) if n.Left.BoolVal() { s.stmtList(n.Nbody) @@ -2093,7 +2094,7 @@ func (s *state) expr(n *Node) *ssa.Value { } default: - s.Fatalf("unhandled OLITERAL %v", n.Val().Ctype()) + s.Fatalf("unhandled OLITERAL %v", n.Val().Kind()) return nil } case OCONVNOP: @@ -2617,7 +2618,7 @@ func (s *state) expr(n *Node) *ssa.Value { case OINDEX: switch { case n.Left.Type.IsString(): - if n.Bounded() && Isconst(n.Left, CTSTR) && Isconst(n.Right, CTINT) { + if n.Bounded() && Isconst(n.Left, constant.String) && Isconst(n.Right, constant.Int) { // Replace "abc"[1] with 'b'. // Delayed until now because "abc"[1] is not an ideal constant. // See test/fixedbugs/issue11370.go. @@ -2629,7 +2630,7 @@ func (s *state) expr(n *Node) *ssa.Value { i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded()) ptrtyp := s.f.Config.Types.BytePtr ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a) - if Isconst(n.Right, CTINT) { + if Isconst(n.Right, constant.Int) { ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64Val(), ptr) } else { ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i) diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go index 5f4e9e4b408d2..068f1a34e1f5a 100644 --- a/src/cmd/compile/internal/gc/swt.go +++ b/src/cmd/compile/internal/gc/swt.go @@ -7,6 +7,7 @@ package gc import ( "cmd/compile/internal/types" "cmd/internal/src" + "go/constant" "sort" ) @@ -442,7 +443,7 @@ func (c *exprClause) test(exprname *Node) *Node { } // Optimize "switch true { ...}" and "switch false { ... }". - if Isconst(exprname, CTBOOL) && !c.lo.Type.IsInterface() { + if Isconst(exprname, constant.Bool) && !c.lo.Type.IsInterface() { if exprname.BoolVal() { return c.lo } else { diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 3fb59c8debf93..11c1ae38ea735 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -7,6 +7,7 @@ package gc import ( "cmd/compile/internal/types" "fmt" + "go/constant" "strings" ) @@ -359,7 +360,7 @@ func typecheck1(n *Node, top int) (res *Node) { case OLITERAL: ok |= ctxExpr - if n.Type == nil && n.Val().Ctype() == CTSTR { + if n.Type == nil && n.Val().Kind() == constant.String { n.Type = types.UntypedString } @@ -425,7 +426,7 @@ func typecheck1(n *Node, top int) (res *Node) { } else { n.Left = indexlit(typecheck(n.Left, ctxExpr)) l := n.Left - if consttype(l) != CTINT { + if consttype(l) != constant.Int { switch { case l.Type == nil: // Error already reported elsewhere. @@ -802,7 +803,7 @@ func typecheck1(n *Node, top int) (res *Node) { n.Right = nil } - if (op == ODIV || op == OMOD) && Isconst(r, CTINT) { + if (op == ODIV || op == OMOD) && Isconst(r, constant.Int) { if r.Val().U.(*Mpint).CmpInt64(0) == 0 { yyerror("division by zero") n.Type = nil @@ -1044,15 +1045,15 @@ func typecheck1(n *Node, top int) (res *Node) { break } - if !n.Bounded() && Isconst(n.Right, CTINT) { + if !n.Bounded() && Isconst(n.Right, constant.Int) { x := n.Right.Int64Val() if x < 0 { yyerror("invalid %s index %v (index must be non-negative)", why, n.Right) } else if t.IsArray() && x >= t.NumElem() { yyerror("invalid array index %v (out of bounds for %d-element array)", n.Right, t.NumElem()) - } else if Isconst(n.Left, CTSTR) && x >= int64(len(n.Left.StringVal())) { + } else if Isconst(n.Left, constant.String) && x >= int64(len(n.Left.StringVal())) { yyerror("invalid string index %v (out of bounds for %d-byte string)", n.Right, len(n.Left.StringVal())) - } else if n.Right.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 { + } else if doesoverflow(n.Right.Val(), types.Types[TINT]) { yyerror("invalid %s index %v (index too large)", why, n.Right) } } @@ -1147,15 +1148,15 @@ func typecheck1(n *Node, top int) (res *Node) { l = defaultlit(l, types.Types[TINT]) c = defaultlit(c, types.Types[TINT]) - if Isconst(l, CTINT) && l.Int64Val() < 0 { + if Isconst(l, constant.Int) && l.Int64Val() < 0 { Fatalf("len for OSLICEHEADER must be non-negative") } - if Isconst(c, CTINT) && c.Int64Val() < 0 { + if Isconst(c, constant.Int) && c.Int64Val() < 0 { Fatalf("cap for OSLICEHEADER must be non-negative") } - if Isconst(l, CTINT) && Isconst(c, CTINT) && l.Val().U.(*Mpint).Cmp(c.Val().U.(*Mpint)) > 0 { + if Isconst(l, constant.Int) && Isconst(c, constant.Int) && compareOp(l.Val(), OGT, c.Val()) { Fatalf("len larger than cap for OSLICEHEADER") } @@ -1196,8 +1197,8 @@ func typecheck1(n *Node, top int) (res *Node) { yyerror("non-integer len argument in OMAKESLICECOPY") } - if Isconst(n.Left, CTINT) { - if n.Left.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 { + if Isconst(n.Left, constant.Int) { + if doesoverflow(n.Left.Val(), types.Types[TINT]) { Fatalf("len for OMAKESLICECOPY too large") } if n.Left.Int64Val() < 0 { @@ -1773,7 +1774,7 @@ func typecheck1(n *Node, top int) (res *Node) { n.Type = nil return n } - if Isconst(l, CTINT) && r != nil && Isconst(r, CTINT) && l.Val().U.(*Mpint).Cmp(r.Val().U.(*Mpint)) > 0 { + if Isconst(l, constant.Int) && r != nil && Isconst(r, constant.Int) && compareOp(l.Val(), OGT, r.Val()) { yyerror("len larger than cap in make(%v)", t) n.Type = nil return n @@ -1865,7 +1866,7 @@ func typecheck1(n *Node, top int) (res *Node) { ls := n.List.Slice() for i1, n1 := range ls { // Special case for print: int constant is int64, not int. - if Isconst(n1, CTINT) { + if Isconst(n1, constant.Int) { ls[i1] = defaultlit(ls[i1], types.Types[TINT64]) } else { ls[i1] = defaultlit(ls[i1], nil) @@ -2187,10 +2188,10 @@ func checksliceindex(l *Node, r *Node, tp *types.Type) bool { } else if tp != nil && tp.NumElem() >= 0 && r.Int64Val() > tp.NumElem() { yyerror("invalid slice index %v (out of bounds for %d-element array)", r, tp.NumElem()) return false - } else if Isconst(l, CTSTR) && r.Int64Val() > int64(len(l.StringVal())) { + } else if Isconst(l, constant.String) && r.Int64Val() > int64(len(l.StringVal())) { yyerror("invalid slice index %v (out of bounds for %d-byte string)", r, len(l.StringVal())) return false - } else if r.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 { + } else if doesoverflow(r.Val(), types.Types[TINT]) { yyerror("invalid slice index %v (index too large)", r) return false } @@ -2200,7 +2201,7 @@ func checksliceindex(l *Node, r *Node, tp *types.Type) bool { } func checksliceconst(lo *Node, hi *Node) bool { - if lo != nil && hi != nil && lo.Op == OLITERAL && hi.Op == OLITERAL && lo.Val().U.(*Mpint).Cmp(hi.Val().U.(*Mpint)) > 0 { + if lo != nil && hi != nil && lo.Op == OLITERAL && hi.Op == OLITERAL && compareOp(lo.Val(), OGT, hi.Val()) { yyerror("invalid slice index: %v > %v", lo, hi) return false } @@ -3431,7 +3432,7 @@ func typecheckfunc(n *Node) { // The result of stringtoruneslit MUST be assigned back to n, e.g. // n.Left = stringtoruneslit(n.Left) func stringtoruneslit(n *Node) *Node { - if n.Left.Op != OLITERAL || n.Left.Val().Ctype() != CTSTR { + if n.Left.Op != OLITERAL || n.Left.Val().Kind() != constant.String { Fatalf("stringtoarraylit %v", n) } @@ -3724,7 +3725,7 @@ func checkmake(t *types.Type, arg string, np **Node) bool { // Do range checks for constants before defaultlit // to avoid redundant "constant NNN overflows int" errors. switch consttype(n) { - case CTINT, CTFLT, CTCPLX: + case constant.Int, constant.Float, constant.Complex: v := toint(n.Val()).U.(*Mpint) if v.CmpInt64(0) < 0 { yyerror("negative %s argument in make(%v)", arg, t) @@ -3885,11 +3886,11 @@ func deadcodefn(fn *Node) { } switch n.Op { case OIF: - if !Isconst(n.Left, CTBOOL) || n.Nbody.Len() > 0 || n.Rlist.Len() > 0 { + if !Isconst(n.Left, constant.Bool) || n.Nbody.Len() > 0 || n.Rlist.Len() > 0 { return } case OFOR: - if !Isconst(n.Left, CTBOOL) || n.Left.BoolVal() { + if !Isconst(n.Left, constant.Bool) || n.Left.BoolVal() { return } default: @@ -3917,7 +3918,7 @@ func deadcodeslice(nn *Nodes) { } if n.Op == OIF { n.Left = deadcodeexpr(n.Left) - if Isconst(n.Left, CTBOOL) { + if Isconst(n.Left, constant.Bool) { var body Nodes if n.Left.BoolVal() { n.Rlist = Nodes{} @@ -3961,7 +3962,7 @@ func deadcodeexpr(n *Node) *Node { case OANDAND: n.Left = deadcodeexpr(n.Left) n.Right = deadcodeexpr(n.Right) - if Isconst(n.Left, CTBOOL) { + if Isconst(n.Left, constant.Bool) { if n.Left.BoolVal() { return n.Right // true && x => x } else { @@ -3971,7 +3972,7 @@ func deadcodeexpr(n *Node) *Node { case OOROR: n.Left = deadcodeexpr(n.Left) n.Right = deadcodeexpr(n.Right) - if Isconst(n.Left, CTBOOL) { + if Isconst(n.Left, constant.Bool) { if n.Left.BoolVal() { return n.Left // true || x => true } else { diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index e7351d1792a8e..4bbc58ce13cf3 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -11,6 +11,7 @@ import ( "cmd/internal/sys" "encoding/binary" "fmt" + "go/constant" "strings" ) @@ -1045,15 +1046,15 @@ opswitch: } if t.IsArray() { n.SetBounded(bounded(r, t.NumElem())) - if Debug.m != 0 && n.Bounded() && !Isconst(n.Right, CTINT) { + if Debug.m != 0 && n.Bounded() && !Isconst(n.Right, constant.Int) { Warn("index bounds check elided") } if smallintconst(n.Right) && !n.Bounded() { yyerror("index out of bounds") } - } else if Isconst(n.Left, CTSTR) { + } else if Isconst(n.Left, constant.String) { n.SetBounded(bounded(r, int64(len(n.Left.StringVal())))) - if Debug.m != 0 && n.Bounded() && !Isconst(n.Right, CTINT) { + if Debug.m != 0 && n.Bounded() && !Isconst(n.Right, constant.Int) { Warn("index bounds check elided") } if smallintconst(n.Right) && !n.Bounded() { @@ -1061,8 +1062,8 @@ opswitch: } } - if Isconst(n.Right, CTINT) { - if n.Right.Val().U.(*Mpint).CmpInt64(0) < 0 || n.Right.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 { + if Isconst(n.Right, constant.Int) { + if n.Right.Val().U.(*Mpint).CmpInt64(0) < 0 || doesoverflow(n.Right.Val(), types.Types[TINT]) { yyerror("index out of bounds") } } @@ -1192,7 +1193,7 @@ opswitch: // Type checking guarantees that TIDEAL size is positive and fits in an int. // The case of size overflow when converting TUINT or TUINTPTR to TINT // will be handled by the negative range checks in makechan during runtime. - if size.Type.IsKind(TIDEAL) || maxintval[size.Type.Etype].Cmp(maxintval[TUINT]) <= 0 { + if size.Type.IsKind(TIDEAL) || size.Type.Size() <= types.Types[TUINT].Size() { fnname = "makechan" argtype = types.Types[TINT] } @@ -1222,7 +1223,7 @@ opswitch: // BUCKETSIZE runtime.makemap will allocate the buckets on the heap. // Maximum key and elem size is 128 bytes, larger objects // are stored with an indirection. So max bucket size is 2048+eps. - if !Isconst(hint, CTINT) || + if !Isconst(hint, constant.Int) || hint.Val().U.(*Mpint).CmpInt64(BUCKETSIZE) <= 0 { // In case hint is larger than BUCKETSIZE runtime.makemap @@ -1256,7 +1257,7 @@ opswitch: } } - if Isconst(hint, CTINT) && hint.Val().U.(*Mpint).CmpInt64(BUCKETSIZE) <= 0 { + if Isconst(hint, constant.Int) && hint.Val().U.(*Mpint).CmpInt64(BUCKETSIZE) <= 0 { // Handling make(map[any]any) and // make(map[any]any, hint) where hint <= BUCKETSIZE // special allows for faster map initialization and @@ -1300,7 +1301,7 @@ opswitch: // See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function. // The case of hint overflow when converting TUINT or TUINTPTR to TINT // will be handled by the negative range checks in makemap during runtime. - if hint.Type.IsKind(TIDEAL) || maxintval[hint.Type.Etype].Cmp(maxintval[TUINT]) <= 0 { + if hint.Type.IsKind(TIDEAL) || hint.Type.Size() <= types.Types[TUINT].Size() { fnname = "makemap" argtype = types.Types[TINT] } @@ -1370,8 +1371,8 @@ opswitch: // Type checking guarantees that TIDEAL len/cap are positive and fit in an int. // The case of len or cap overflow when converting TUINT or TUINTPTR to TINT // will be handled by the negative range checks in makeslice during runtime. - if (len.Type.IsKind(TIDEAL) || maxintval[len.Type.Etype].Cmp(maxintval[TUINT]) <= 0) && - (cap.Type.IsKind(TIDEAL) || maxintval[cap.Type.Etype].Cmp(maxintval[TUINT]) <= 0) { + if (len.Type.IsKind(TIDEAL) || len.Type.Size() <= types.Types[TUINT].Size()) && + (cap.Type.IsKind(TIDEAL) || cap.Type.Size() <= types.Types[TUINT].Size()) { fnname = "makeslice" argtype = types.Types[TINT] } @@ -1486,7 +1487,7 @@ opswitch: case OSTR2BYTES: s := n.Left - if Isconst(s, CTSTR) { + if Isconst(s, constant.String) { sc := s.StringVal() // Allocate a [n]byte of the right size. @@ -1914,7 +1915,7 @@ func walkprint(nn *Node, init *Nodes) *Node { t := make([]*Node, 0, len(s)) for i := 0; i < len(s); { var strs []string - for i < len(s) && Isconst(s[i], CTSTR) { + for i < len(s) && Isconst(s[i], constant.String) { strs = append(strs, s[i].StringVal()) i++ } @@ -1935,11 +1936,11 @@ func walkprint(nn *Node, init *Nodes) *Node { n = defaultlit(n, types.Runetype) } - switch n.Val().Ctype() { - case CTINT: + switch n.Val().Kind() { + case constant.Int: n = defaultlit(n, types.Types[TINT64]) - case CTFLT: + case constant.Float: n = defaultlit(n, types.Types[TFLOAT64]) } } @@ -1994,7 +1995,7 @@ func walkprint(nn *Node, init *Nodes) *Node { on = syslook("printbool") case TSTRING: cs := "" - if Isconst(n, CTSTR) { + if Isconst(n, constant.String) { cs = n.StringVal() } switch cs { @@ -2850,7 +2851,7 @@ func isAppendOfMake(n *Node) bool { // The care of overflow of the len argument to make will be handled by an explicit check of int(len) < 0 during runtime. y := second.Left - if !Isconst(y, CTINT) && maxintval[y.Type.Etype].Cmp(maxintval[TUINT]) > 0 { + if !Isconst(y, constant.Int) && y.Type.Size() > types.Types[TUINT].Size() { return false } @@ -3471,12 +3472,12 @@ func walkcompareString(n *Node, init *Nodes) *Node { // Rewrite comparisons to short constant strings as length+byte-wise comparisons. var cs, ncs *Node // const string, non-const string switch { - case Isconst(n.Left, CTSTR) && Isconst(n.Right, CTSTR): + case Isconst(n.Left, constant.String) && Isconst(n.Right, constant.String): // ignore; will be constant evaluated - case Isconst(n.Left, CTSTR): + case Isconst(n.Left, constant.String): cs = n.Left ncs = n.Right - case Isconst(n.Right, CTSTR): + case Isconst(n.Right, constant.String): cs = n.Right ncs = n.Left } @@ -3485,7 +3486,7 @@ func walkcompareString(n *Node, init *Nodes) *Node { // Our comparison below assumes that the non-constant string // is on the left hand side, so rewrite "" cmp x to x cmp "". // See issue 24817. - if Isconst(n.Left, CTSTR) { + if Isconst(n.Left, constant.String) { cmp = brrev(cmp) } @@ -3841,17 +3842,17 @@ func candiscard(n *Node) bool { // Discardable as long as we know it's not division by zero. case ODIV, OMOD: - if Isconst(n.Right, CTINT) && n.Right.Val().U.(*Mpint).CmpInt64(0) != 0 { + if Isconst(n.Right, constant.Int) && n.Right.Val().U.(*Mpint).CmpInt64(0) != 0 { break } - if Isconst(n.Right, CTFLT) && n.Right.Val().U.(*Mpflt).CmpFloat64(0) != 0 { + if Isconst(n.Right, constant.Float) && n.Right.Val().U.(*Mpflt).CmpFloat64(0) != 0 { break } return false // Discardable as long as we know it won't fail because of a bad size. case OMAKECHAN, OMAKEMAP: - if Isconst(n.Left, CTINT) && n.Left.Val().U.(*Mpint).CmpInt64(0) == 0 { + if Isconst(n.Left, constant.Int) && n.Left.Val().U.(*Mpint).CmpInt64(0) == 0 { break } return false From 8e2106327cf27b2d281a3e4432fcae552d4b29aa Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 18 Nov 2020 15:14:24 -0500 Subject: [PATCH 020/474] [dev.regabi] cmd/compile: clean up tests to know less about Node We want to refactor a bit, and these tests know too much about the layout of Nodes. Use standard constructors instead. Change-Id: I91f0325c89ea60086655414468c53419ebeacea4 Reviewed-on: https://go-review.googlesource.com/c/go/+/272626 Trust: Russ Cox Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky TryBot-Result: Go Bot --- src/cmd/compile/internal/gc/pgen_test.go | 147 ++++++++++++----------- 1 file changed, 79 insertions(+), 68 deletions(-) diff --git a/src/cmd/compile/internal/gc/pgen_test.go b/src/cmd/compile/internal/gc/pgen_test.go index b1db29825c2c1..932ab47d02635 100644 --- a/src/cmd/compile/internal/gc/pgen_test.go +++ b/src/cmd/compile/internal/gc/pgen_test.go @@ -35,106 +35,110 @@ func markNeedZero(n *Node) *Node { return n } -func nodeWithClass(n Node, c Class) *Node { - n.SetClass(c) - n.Name = new(Name) - return &n -} - // Test all code paths for cmpstackvarlt. func TestCmpstackvar(t *testing.T) { + nod := func(xoffset int64, t *types.Type, s *types.Sym, cl Class) *Node { + if s == nil { + s = &types.Sym{Name: "."} + } + n := newname(s) + n.Type = t + n.Xoffset = xoffset + n.SetClass(cl) + return n + } testdata := []struct { a, b *Node lt bool }{ { - nodeWithClass(Node{}, PAUTO), - nodeWithClass(Node{}, PFUNC), + nod(0, nil, nil, PAUTO), + nod(0, nil, nil, PFUNC), false, }, { - nodeWithClass(Node{}, PFUNC), - nodeWithClass(Node{}, PAUTO), + nod(0, nil, nil, PFUNC), + nod(0, nil, nil, PAUTO), true, }, { - nodeWithClass(Node{Xoffset: 0}, PFUNC), - nodeWithClass(Node{Xoffset: 10}, PFUNC), + nod(0, nil, nil, PFUNC), + nod(10, nil, nil, PFUNC), true, }, { - nodeWithClass(Node{Xoffset: 20}, PFUNC), - nodeWithClass(Node{Xoffset: 10}, PFUNC), + nod(20, nil, nil, PFUNC), + nod(10, nil, nil, PFUNC), false, }, { - nodeWithClass(Node{Xoffset: 10}, PFUNC), - nodeWithClass(Node{Xoffset: 10}, PFUNC), + nod(10, nil, nil, PFUNC), + nod(10, nil, nil, PFUNC), false, }, { - nodeWithClass(Node{Xoffset: 10}, PPARAM), - nodeWithClass(Node{Xoffset: 20}, PPARAMOUT), + nod(10, nil, nil, PPARAM), + nod(20, nil, nil, PPARAMOUT), true, }, { - nodeWithClass(Node{Xoffset: 10}, PPARAMOUT), - nodeWithClass(Node{Xoffset: 20}, PPARAM), + nod(10, nil, nil, PPARAMOUT), + nod(20, nil, nil, PPARAM), true, }, { - markUsed(nodeWithClass(Node{}, PAUTO)), - nodeWithClass(Node{}, PAUTO), + markUsed(nod(0, nil, nil, PAUTO)), + nod(0, nil, nil, PAUTO), true, }, { - nodeWithClass(Node{}, PAUTO), - markUsed(nodeWithClass(Node{}, PAUTO)), + nod(0, nil, nil, PAUTO), + markUsed(nod(0, nil, nil, PAUTO)), false, }, { - nodeWithClass(Node{Type: typeWithoutPointers()}, PAUTO), - nodeWithClass(Node{Type: typeWithPointers()}, PAUTO), + nod(0, typeWithoutPointers(), nil, PAUTO), + nod(0, typeWithPointers(), nil, PAUTO), false, }, { - nodeWithClass(Node{Type: typeWithPointers()}, PAUTO), - nodeWithClass(Node{Type: typeWithoutPointers()}, PAUTO), + nod(0, typeWithPointers(), nil, PAUTO), + nod(0, typeWithoutPointers(), nil, PAUTO), true, }, { - markNeedZero(nodeWithClass(Node{Type: &types.Type{}}, PAUTO)), - nodeWithClass(Node{Type: &types.Type{}, Name: &Name{}}, PAUTO), + markNeedZero(nod(0, &types.Type{}, nil, PAUTO)), + nod(0, &types.Type{}, nil, PAUTO), true, }, { - nodeWithClass(Node{Type: &types.Type{}, Name: &Name{}}, PAUTO), - markNeedZero(nodeWithClass(Node{Type: &types.Type{}}, PAUTO)), + nod(0, &types.Type{}, nil, PAUTO), + markNeedZero(nod(0, &types.Type{}, nil, PAUTO)), false, }, { - nodeWithClass(Node{Type: &types.Type{Width: 1}, Name: &Name{}}, PAUTO), - nodeWithClass(Node{Type: &types.Type{Width: 2}, Name: &Name{}}, PAUTO), + nod(0, &types.Type{Width: 1}, nil, PAUTO), + nod(0, &types.Type{Width: 2}, nil, PAUTO), false, }, { - nodeWithClass(Node{Type: &types.Type{Width: 2}, Name: &Name{}}, PAUTO), - nodeWithClass(Node{Type: &types.Type{Width: 1}, Name: &Name{}}, PAUTO), + nod(0, &types.Type{Width: 2}, nil, PAUTO), + nod(0, &types.Type{Width: 1}, nil, PAUTO), true, }, { - nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO), - nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO), + nod(0, &types.Type{}, &types.Sym{Name: "abc"}, PAUTO), + nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, PAUTO), true, }, { - nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO), - nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO), + nod(0, &types.Type{}, &types.Sym{Name: "abc"}, PAUTO), + nod(0, &types.Type{}, &types.Sym{Name: "abc"}, PAUTO), false, }, { - nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO), - nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO), + nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, PAUTO), + nod(0, &types.Type{}, &types.Sym{Name: "abc"}, PAUTO), false, }, } @@ -151,35 +155,42 @@ func TestCmpstackvar(t *testing.T) { } func TestStackvarSort(t *testing.T) { + nod := func(xoffset int64, t *types.Type, s *types.Sym, cl Class) *Node { + n := newname(s) + n.Type = t + n.Xoffset = xoffset + n.SetClass(cl) + return n + } inp := []*Node{ - nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC), - nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO), - nodeWithClass(Node{Xoffset: 0, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC), - nodeWithClass(Node{Xoffset: 10, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC), - nodeWithClass(Node{Xoffset: 20, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC), - markUsed(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)), - nodeWithClass(Node{Type: typeWithoutPointers(), Sym: &types.Sym{}}, PAUTO), - nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO), - markNeedZero(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)), - nodeWithClass(Node{Type: &types.Type{Width: 1}, Sym: &types.Sym{}}, PAUTO), - nodeWithClass(Node{Type: &types.Type{Width: 2}, Sym: &types.Sym{}}, PAUTO), - nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO), - nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO), + nod(0, &types.Type{}, &types.Sym{}, PFUNC), + nod(0, &types.Type{}, &types.Sym{}, PAUTO), + nod(0, &types.Type{}, &types.Sym{}, PFUNC), + nod(10, &types.Type{}, &types.Sym{}, PFUNC), + nod(20, &types.Type{}, &types.Sym{}, PFUNC), + markUsed(nod(0, &types.Type{}, &types.Sym{}, PAUTO)), + nod(0, typeWithoutPointers(), &types.Sym{}, PAUTO), + nod(0, &types.Type{}, &types.Sym{}, PAUTO), + markNeedZero(nod(0, &types.Type{}, &types.Sym{}, PAUTO)), + nod(0, &types.Type{Width: 1}, &types.Sym{}, PAUTO), + nod(0, &types.Type{Width: 2}, &types.Sym{}, PAUTO), + nod(0, &types.Type{}, &types.Sym{Name: "abc"}, PAUTO), + nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, PAUTO), } want := []*Node{ - nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC), - nodeWithClass(Node{Xoffset: 0, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC), - nodeWithClass(Node{Xoffset: 10, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC), - nodeWithClass(Node{Xoffset: 20, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC), - markUsed(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)), - markNeedZero(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)), - nodeWithClass(Node{Type: &types.Type{Width: 2}, Sym: &types.Sym{}}, PAUTO), - nodeWithClass(Node{Type: &types.Type{Width: 1}, Sym: &types.Sym{}}, PAUTO), - nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO), - nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO), - nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO), - nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO), - nodeWithClass(Node{Type: typeWithoutPointers(), Sym: &types.Sym{}}, PAUTO), + nod(0, &types.Type{}, &types.Sym{}, PFUNC), + nod(0, &types.Type{}, &types.Sym{}, PFUNC), + nod(10, &types.Type{}, &types.Sym{}, PFUNC), + nod(20, &types.Type{}, &types.Sym{}, PFUNC), + markUsed(nod(0, &types.Type{}, &types.Sym{}, PAUTO)), + markNeedZero(nod(0, &types.Type{}, &types.Sym{}, PAUTO)), + nod(0, &types.Type{Width: 2}, &types.Sym{}, PAUTO), + nod(0, &types.Type{Width: 1}, &types.Sym{}, PAUTO), + nod(0, &types.Type{}, &types.Sym{}, PAUTO), + nod(0, &types.Type{}, &types.Sym{}, PAUTO), + nod(0, &types.Type{}, &types.Sym{Name: "abc"}, PAUTO), + nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, PAUTO), + nod(0, typeWithoutPointers(), &types.Sym{}, PAUTO), } sort.Sort(byStackVar(inp)) if !reflect.DeepEqual(want, inp) { From fd11a32c92a2621c6f52edec2a0339f4b7d794e8 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 16 Nov 2020 17:00:10 -0500 Subject: [PATCH 021/474] [dev.regabi] cmd/compile: clean up Node.Func The original meaning of type Func was "extra fields factored out of a few cases of type Node having to do with functions", but those specific cases didn't necessarily have any relation. A typical declared function is represented by an ODCLFUNC Node at its declaration and an ONAME node at its uses, and both those have a .Func field, but they are *different* Funcs. Similarly, a closure is represented both by an OCLOSURE Node for the value itself and an ODCLFUNC Node for the underlying function implementing the closure. Those too have *different* Funcs, and the Func.Closure field in one points to the other and vice versa. This has led to no end of confusion over the years. This CL elevates type Func to be the canonical identifier for a given Go function. This looks like a trivial CL but in fact is the result of a lot of scaffolding and rewriting, discarded once the result was achieved, to separate out the three different kinds of Func nodes into three separate fields, limited in use to each specific Node type, to understand which Func fields are used by which Node types and what the possible overlaps are. There were a few overlaps, most notably around closures, which led to more fields being added to type Func to keep them separate even though there is now a single Func instead of two different ones for each function. A future CL can and should change Curfn to be a *Func instead of a *Node, finally eliminating the confusion about whether Curfn is an ODCLFUNC node (as it is most of the time) or an ONAME node (as it is when type-checking an inlined function body). Although sizeof_test.go makes it look like Func is growing by two words, there are now half as many Funcs in a running compilation, so the memory footprint has actually been reduced substantially. Change-Id: I598bd96c95728093dc769a835d48f2154a406a61 Reviewed-on: https://go-review.googlesource.com/c/go/+/272253 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/closure.go | 155 ++++++++++----------- src/cmd/compile/internal/gc/dcl.go | 20 +-- src/cmd/compile/internal/gc/esc.go | 5 +- src/cmd/compile/internal/gc/escape.go | 6 +- src/cmd/compile/internal/gc/fmt.go | 12 +- src/cmd/compile/internal/gc/gen.go | 2 +- src/cmd/compile/internal/gc/iimport.go | 2 +- src/cmd/compile/internal/gc/initorder.go | 2 +- src/cmd/compile/internal/gc/inl.go | 10 +- src/cmd/compile/internal/gc/main.go | 6 +- src/cmd/compile/internal/gc/noder.go | 2 +- src/cmd/compile/internal/gc/order.go | 2 +- src/cmd/compile/internal/gc/pgen.go | 65 ++++++--- src/cmd/compile/internal/gc/scc.go | 2 +- src/cmd/compile/internal/gc/sinit.go | 2 +- src/cmd/compile/internal/gc/sizeof_test.go | 2 +- src/cmd/compile/internal/gc/subr.go | 3 +- src/cmd/compile/internal/gc/syntax.go | 86 ++++++------ src/cmd/compile/internal/gc/walk.go | 7 +- 19 files changed, 210 insertions(+), 181 deletions(-) diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index dd6640667de8b..577d6565f59d9 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -15,25 +15,25 @@ func (p *noder) funcLit(expr *syntax.FuncLit) *Node { xtype := p.typeExpr(expr.Type) ntype := p.typeExpr(expr.Type) - xfunc := p.nod(expr, ODCLFUNC, nil, nil) - xfunc.Func.SetIsHiddenClosure(Curfn != nil) - xfunc.Func.Nname = newfuncnamel(p.pos(expr), nblank.Sym) // filled in by typecheckclosure - xfunc.Func.Nname.Name.Param.Ntype = xtype - xfunc.Func.Nname.Name.Defn = xfunc + dcl := p.nod(expr, ODCLFUNC, nil, nil) + fn := dcl.Func + fn.SetIsHiddenClosure(Curfn != nil) + fn.Nname = newfuncnamel(p.pos(expr), nblank.Sym, fn) // filled in by typecheckclosure + fn.Nname.Name.Param.Ntype = xtype + fn.Nname.Name.Defn = dcl clo := p.nod(expr, OCLOSURE, nil, nil) - clo.Func.Ntype = ntype + clo.Func = fn + fn.ClosureType = ntype + fn.OClosure = clo - xfunc.Func.Closure = clo - clo.Func.Closure = xfunc - - p.funcBody(xfunc, expr.Body) + p.funcBody(dcl, expr.Body) // closure-specific variables are hanging off the // ordinary ones in the symbol table; see oldname. // unhook them. // make the list of pointers for the closure call. - for _, v := range xfunc.Func.Cvars.Slice() { + for _, v := range fn.ClosureVars.Slice() { // Unlink from v1; see comment in syntax.go type Param for these fields. v1 := v.Name.Defn v1.Name.Param.Innermost = v.Name.Param.Outer @@ -77,25 +77,26 @@ func (p *noder) funcLit(expr *syntax.FuncLit) *Node { // TODO: This creation of the named function should probably really be done in a // separate pass from type-checking. func typecheckclosure(clo *Node, top int) { - xfunc := clo.Func.Closure + fn := clo.Func + dcl := fn.Decl // Set current associated iota value, so iota can be used inside // function in ConstSpec, see issue #22344 if x := getIotaValue(); x >= 0 { - xfunc.SetIota(x) + dcl.SetIota(x) } - clo.Func.Ntype = typecheck(clo.Func.Ntype, ctxType) - clo.Type = clo.Func.Ntype.Type - clo.Func.Top = top + fn.ClosureType = typecheck(fn.ClosureType, ctxType) + clo.Type = fn.ClosureType.Type + fn.ClosureCalled = top&ctxCallee != 0 - // Do not typecheck xfunc twice, otherwise, we will end up pushing - // xfunc to xtop multiple times, causing initLSym called twice. + // Do not typecheck dcl twice, otherwise, we will end up pushing + // dcl to xtop multiple times, causing initLSym called twice. // See #30709 - if xfunc.Typecheck() == 1 { + if dcl.Typecheck() == 1 { return } - for _, ln := range xfunc.Func.Cvars.Slice() { + for _, ln := range fn.ClosureVars.Slice() { n := ln.Name.Defn if !n.Name.Captured() { n.Name.SetCaptured(true) @@ -111,9 +112,9 @@ func typecheckclosure(clo *Node, top int) { } } - xfunc.Func.Nname.Sym = closurename(Curfn) - setNodeNameFunc(xfunc.Func.Nname) - xfunc = typecheck(xfunc, ctxStmt) + fn.Nname.Sym = closurename(Curfn) + setNodeNameFunc(fn.Nname) + dcl = typecheck(dcl, ctxStmt) // Type check the body now, but only if we're inside a function. // At top level (in a variable initialization: curfn==nil) we're not @@ -121,15 +122,15 @@ func typecheckclosure(clo *Node, top int) { // underlying closure function we create is added to xtop. if Curfn != nil && clo.Type != nil { oldfn := Curfn - Curfn = xfunc + Curfn = dcl olddd := decldepth decldepth = 1 - typecheckslice(xfunc.Nbody.Slice(), ctxStmt) + typecheckslice(dcl.Nbody.Slice(), ctxStmt) decldepth = olddd Curfn = oldfn } - xtop = append(xtop, xfunc) + xtop = append(xtop, dcl) } // globClosgen is like Func.Closgen, but for the global scope. @@ -143,7 +144,7 @@ func closurename(outerfunc *Node) *types.Sym { gen := &globClosgen if outerfunc != nil { - if outerfunc.Func.Closure != nil { + if outerfunc.Func.OClosure != nil { prefix = "" } @@ -169,12 +170,11 @@ var capturevarscomplete bool // by value or by reference. // We use value capturing for values <= 128 bytes that are never reassigned // after capturing (effectively constant). -func capturevars(xfunc *Node) { +func capturevars(dcl *Node) { lno := lineno - lineno = xfunc.Pos - - clo := xfunc.Func.Closure - cvars := xfunc.Func.Cvars.Slice() + lineno = dcl.Pos + fn := dcl.Func + cvars := fn.ClosureVars.Slice() out := cvars[:0] for _, v := range cvars { if v.Type == nil { @@ -216,21 +216,21 @@ func capturevars(xfunc *Node) { } outer = typecheck(outer, ctxExpr) - clo.Func.Enter.Append(outer) + fn.ClosureEnter.Append(outer) } - xfunc.Func.Cvars.Set(out) + fn.ClosureVars.Set(out) lineno = lno } // transformclosure is called in a separate phase after escape analysis. // It transform closure bodies to properly reference captured variables. -func transformclosure(xfunc *Node) { +func transformclosure(dcl *Node) { lno := lineno - lineno = xfunc.Pos - clo := xfunc.Func.Closure + lineno = dcl.Pos + fn := dcl.Func - if clo.Func.Top&ctxCallee != 0 { + if fn.ClosureCalled { // If the closure is directly called, we transform it to a plain function call // with variables passed as args. This avoids allocation of a closure object. // Here we do only a part of the transformation. Walk of OCALLFUNC(OCLOSURE) @@ -247,12 +247,12 @@ func transformclosure(xfunc *Node) { // }(byval, &byref, 42) // f is ONAME of the actual function. - f := xfunc.Func.Nname + f := fn.Nname // We are going to insert captured variables before input args. var params []*types.Field var decls []*Node - for _, v := range xfunc.Func.Cvars.Slice() { + for _, v := range fn.ClosureVars.Slice() { if !v.Name.Byval() { // If v of type T is captured by reference, // we introduce function param &v *T @@ -275,16 +275,16 @@ func transformclosure(xfunc *Node) { if len(params) > 0 { // Prepend params and decls. f.Type.Params().SetFields(append(params, f.Type.Params().FieldSlice()...)) - xfunc.Func.Dcl = append(decls, xfunc.Func.Dcl...) + fn.Dcl = append(decls, fn.Dcl...) } dowidth(f.Type) - xfunc.Type = f.Type // update type of ODCLFUNC + dcl.Type = f.Type // update type of ODCLFUNC } else { // The closure is not called, so it is going to stay as closure. var body []*Node offset := int64(Widthptr) - for _, v := range xfunc.Func.Cvars.Slice() { + for _, v := range fn.ClosureVars.Slice() { // cv refers to the field inside of closure OSTRUCTLIT. cv := nod(OCLOSUREVAR, nil, nil) @@ -299,7 +299,7 @@ func transformclosure(xfunc *Node) { if v.Name.Byval() && v.Type.Width <= int64(2*Widthptr) { // If it is a small variable captured by value, downgrade it to PAUTO. v.SetClass(PAUTO) - xfunc.Func.Dcl = append(xfunc.Func.Dcl, v) + fn.Dcl = append(fn.Dcl, v) body = append(body, nod(OAS, v, cv)) } else { // Declare variable holding addresses taken from closure @@ -308,8 +308,8 @@ func transformclosure(xfunc *Node) { addr.Type = types.NewPtr(v.Type) addr.SetClass(PAUTO) addr.Name.SetUsed(true) - addr.Name.Curfn = xfunc - xfunc.Func.Dcl = append(xfunc.Func.Dcl, addr) + addr.Name.Curfn = dcl + fn.Dcl = append(fn.Dcl, addr) v.Name.Param.Heapaddr = addr if v.Name.Byval() { cv = nod(OADDR, cv, nil) @@ -320,8 +320,8 @@ func transformclosure(xfunc *Node) { if len(body) > 0 { typecheckslice(body, ctxStmt) - xfunc.Func.Enter.Set(body) - xfunc.Func.SetNeedctxt(true) + fn.Enter.Set(body) + fn.SetNeedctxt(true) } } @@ -331,19 +331,17 @@ func transformclosure(xfunc *Node) { // hasemptycvars reports whether closure clo has an // empty list of captured vars. func hasemptycvars(clo *Node) bool { - xfunc := clo.Func.Closure - return xfunc.Func.Cvars.Len() == 0 + return clo.Func.ClosureVars.Len() == 0 } // closuredebugruntimecheck applies boilerplate checks for debug flags // and compiling runtime func closuredebugruntimecheck(clo *Node) { if Debug_closure > 0 { - xfunc := clo.Func.Closure if clo.Esc == EscHeap { - Warnl(clo.Pos, "heap closure, captured vars = %v", xfunc.Func.Cvars) + Warnl(clo.Pos, "heap closure, captured vars = %v", clo.Func.ClosureVars) } else { - Warnl(clo.Pos, "stack closure, captured vars = %v", xfunc.Func.Cvars) + Warnl(clo.Pos, "stack closure, captured vars = %v", clo.Func.ClosureVars) } } if compiling_runtime && clo.Esc == EscHeap { @@ -371,7 +369,7 @@ func closureType(clo *Node) *types.Type { fields := []*Node{ namedfield(".F", types.Types[TUINTPTR]), } - for _, v := range clo.Func.Closure.Func.Cvars.Slice() { + for _, v := range clo.Func.ClosureVars.Slice() { typ := v.Type if !v.Name.Byval() { typ = types.NewPtr(typ) @@ -384,14 +382,14 @@ func closureType(clo *Node) *types.Type { } func walkclosure(clo *Node, init *Nodes) *Node { - xfunc := clo.Func.Closure + fn := clo.Func // If no closure vars, don't bother wrapping. if hasemptycvars(clo) { if Debug_closure > 0 { Warnl(clo.Pos, "closure converted to global") } - return xfunc.Func.Nname + return fn.Nname } closuredebugruntimecheck(clo) @@ -399,7 +397,7 @@ func walkclosure(clo *Node, init *Nodes) *Node { clos := nod(OCOMPLIT, nil, typenod(typ)) clos.Esc = clo.Esc - clos.List.Set(append([]*Node{nod(OCFUNC, xfunc.Func.Nname, nil)}, clo.Func.Enter.Slice()...)) + clos.List.Set(append([]*Node{nod(OCFUNC, fn.Nname, nil)}, fn.ClosureEnter.Slice()...)) clos = nod(OADDR, clos, nil) clos.Esc = clo.Esc @@ -419,8 +417,8 @@ func walkclosure(clo *Node, init *Nodes) *Node { return walkexpr(clos, init) } -func typecheckpartialcall(fn *Node, sym *types.Sym) { - switch fn.Op { +func typecheckpartialcall(dot *Node, sym *types.Sym) { + switch dot.Op { case ODOTINTER, ODOTMETH: break @@ -429,19 +427,19 @@ func typecheckpartialcall(fn *Node, sym *types.Sym) { } // Create top-level function. - xfunc := makepartialcall(fn, fn.Type, sym) - fn.Func = xfunc.Func - fn.Func.SetWrapper(true) - fn.Right = newname(sym) - fn.Op = OCALLPART - fn.Type = xfunc.Type - fn.SetOpt(nil) // clear types.Field from ODOTMETH + dcl := makepartialcall(dot, dot.Type, sym) + dcl.Func.SetWrapper(true) + dot.Op = OCALLPART + dot.Right = newname(sym) + dot.Type = dcl.Type + dot.Func = dcl.Func + dot.SetOpt(nil) // clear types.Field from ODOTMETH } // makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed // for partial calls. -func makepartialcall(fn *Node, t0 *types.Type, meth *types.Sym) *Node { - rcvrtype := fn.Left.Type +func makepartialcall(dot *Node, t0 *types.Type, meth *types.Sym) *Node { + rcvrtype := dot.Left.Type sym := methodSymSuffix(rcvrtype, meth, "-fm") if sym.Uniq() { @@ -468,9 +466,10 @@ func makepartialcall(fn *Node, t0 *types.Type, meth *types.Sym) *Node { tfn.List.Set(structargs(t0.Params(), true)) tfn.Rlist.Set(structargs(t0.Results(), false)) - xfunc := dclfunc(sym, tfn) - xfunc.Func.SetDupok(true) - xfunc.Func.SetNeedctxt(true) + dcl := dclfunc(sym, tfn) + fn := dcl.Func + fn.SetDupok(true) + fn.SetNeedctxt(true) tfn.Type.SetPkg(t0.Pkg()) @@ -502,20 +501,20 @@ func makepartialcall(fn *Node, t0 *types.Type, meth *types.Sym) *Node { } body = append(body, call) - xfunc.Nbody.Set(body) + dcl.Nbody.Set(body) funcbody() - xfunc = typecheck(xfunc, ctxStmt) + dcl = typecheck(dcl, ctxStmt) // Need to typecheck the body of the just-generated wrapper. // typecheckslice() requires that Curfn is set when processing an ORETURN. - Curfn = xfunc - typecheckslice(xfunc.Nbody.Slice(), ctxStmt) - sym.Def = asTypesNode(xfunc) - xtop = append(xtop, xfunc) + Curfn = dcl + typecheckslice(dcl.Nbody.Slice(), ctxStmt) + sym.Def = asTypesNode(dcl) + xtop = append(xtop, dcl) Curfn = savecurfn lineno = saveLineNo - return xfunc + return dcl } // partialCallType returns the struct type used to hold all the information diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index e0a6f6ac92c58..59888cce7e9f4 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -206,11 +206,13 @@ func newnoname(s *types.Sym) *Node { } // newfuncnamel generates a new name node for a function or method. -// TODO(rsc): Use an ODCLFUNC node instead. See comment in CL 7360. -func newfuncnamel(pos src.XPos, s *types.Sym) *Node { +func newfuncnamel(pos src.XPos, s *types.Sym, fn *Func) *Node { + if fn.Nname != nil { + Fatalf("newfuncnamel - already have name") + } n := newnamel(pos, s) - n.Func = new(Func) - n.Func.SetIsHiddenClosure(Curfn != nil) + n.Func = fn + fn.Nname = n return n } @@ -287,7 +289,7 @@ func oldname(s *types.Sym) *Node { c.Name.Param.Outer = n.Name.Param.Innermost n.Name.Param.Innermost = c - Curfn.Func.Cvars.Append(c) + Curfn.Func.ClosureVars.Append(c) } // return ref to closure var, not original @@ -388,10 +390,8 @@ func funchdr(n *Node) { types.Markdcl() - if n.Func.Nname != nil { + if n.Func.Nname != nil && n.Func.Nname.Name.Param.Ntype != nil { funcargs(n.Func.Nname.Name.Param.Ntype) - } else if n.Func.Ntype != nil { - funcargs(n.Func.Ntype) } else { funcargs2(n.Type) } @@ -973,7 +973,7 @@ func dclfunc(sym *types.Sym, tfn *Node) *Node { } fn := nod(ODCLFUNC, nil, nil) - fn.Func.Nname = newfuncnamel(lineno, sym) + fn.Func.Nname = newfuncnamel(lineno, sym, fn.Func) fn.Func.Nname.Name.Defn = fn fn.Func.Nname.Name.Param.Ntype = tfn setNodeNameFunc(fn.Func.Nname) @@ -1043,7 +1043,7 @@ func (c *nowritebarrierrecChecker) findExtraCalls(n *Node) bool { case ONAME: callee = arg.Name.Defn case OCLOSURE: - callee = arg.Func.Closure + callee = arg.Func.Decl default: Fatalf("expected ONAME or OCLOSURE node, got %+v", arg) } diff --git a/src/cmd/compile/internal/gc/esc.go b/src/cmd/compile/internal/gc/esc.go index b7d1dfc92a5f5..c4159101f2008 100644 --- a/src/cmd/compile/internal/gc/esc.go +++ b/src/cmd/compile/internal/gc/esc.go @@ -259,8 +259,9 @@ func addrescapes(n *Node) { // heap in f, not in the inner closure. Flip over to f before calling moveToHeap. oldfn := Curfn Curfn = n.Name.Curfn - if Curfn.Func.Closure != nil && Curfn.Op == OCLOSURE { - Curfn = Curfn.Func.Closure + if Curfn.Op == OCLOSURE { + Curfn = Curfn.Func.Decl + panic("can't happen") } ln := lineno lineno = Curfn.Pos diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index bc0eb98d7625a..07cc5498259bb 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -623,7 +623,7 @@ func (e *Escape) exprSkipInit(k EscHole, n *Node) { k = e.spill(k, n) // Link addresses of captured variables to closure. - for _, v := range n.Func.Closure.Func.Cvars.Slice() { + for _, v := range n.Func.ClosureVars.Slice() { if v.Op == OXXX { // unnamed out argument; see dcl.go:/^funcargs continue } @@ -810,7 +810,7 @@ func (e *Escape) call(ks []EscHole, call, where *Node) { case v.Op == ONAME && v.Class() == PFUNC: fn = v case v.Op == OCLOSURE: - fn = v.Func.Closure.Func.Nname + fn = v.Func.Nname } case OCALLMETH: fn = call.Left.MethodName() @@ -1358,7 +1358,7 @@ func (e *Escape) outlives(l, other *EscLocation) bool { // // var u int // okay to stack allocate // *(func() *int { return &u }()) = 42 - if containsClosure(other.curfn, l.curfn) && l.curfn.Func.Closure.Func.Top&ctxCallee != 0 { + if containsClosure(other.curfn, l.curfn) && l.curfn.Func.ClosureCalled { return false } diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go index 650fb9681ef97..e62a526eebc72 100644 --- a/src/cmd/compile/internal/gc/fmt.go +++ b/src/cmd/compile/internal/gc/fmt.go @@ -1417,7 +1417,7 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) { mode.Fprintf(s, "%v { %v }", n.Type, n.Nbody) return } - mode.Fprintf(s, "%v { %v }", n.Type, n.Func.Closure.Nbody) + mode.Fprintf(s, "%v { %v }", n.Type, n.Func.Decl.Nbody) case OCOMPLIT: if mode == FErr { @@ -1717,8 +1717,8 @@ func (n *Node) nodedump(s fmt.State, flag FmtFlag, mode fmtMode) { } } - if n.Op == OCLOSURE && n.Func.Closure != nil && n.Func.Closure.Func.Nname.Sym != nil { - mode.Fprintf(s, " fnName %v", n.Func.Closure.Func.Nname.Sym) + if n.Op == OCLOSURE && n.Func.Decl != nil && n.Func.Nname.Sym != nil { + mode.Fprintf(s, " fnName %v", n.Func.Nname.Sym) } if n.Sym != nil && n.Op != ONAME { mode.Fprintf(s, " %v", n.Sym) @@ -1735,12 +1735,12 @@ func (n *Node) nodedump(s fmt.State, flag FmtFlag, mode fmtMode) { if n.Right != nil { mode.Fprintf(s, "%v", n.Right) } - if n.Func != nil && n.Func.Closure != nil && n.Func.Closure.Nbody.Len() != 0 { + if n.Op == OCLOSURE && n.Func != nil && n.Func.Decl != nil && n.Func.Decl.Nbody.Len() != 0 { indent(s) // The function associated with a closure - mode.Fprintf(s, "%v-clofunc%v", n.Op, n.Func.Closure) + mode.Fprintf(s, "%v-clofunc%v", n.Op, n.Func.Decl) } - if n.Func != nil && n.Func.Dcl != nil && len(n.Func.Dcl) != 0 { + if n.Op == ODCLFUNC && n.Func != nil && n.Func.Dcl != nil && len(n.Func.Dcl) != 0 { indent(s) // The dcls for a func or closure mode.Fprintf(s, "%v-dcl%v", n.Op, asNodes(n.Func.Dcl)) diff --git a/src/cmd/compile/internal/gc/gen.go b/src/cmd/compile/internal/gc/gen.go index 929653ebbd092..d882d6d672b3d 100644 --- a/src/cmd/compile/internal/gc/gen.go +++ b/src/cmd/compile/internal/gc/gen.go @@ -54,7 +54,7 @@ func tempAt(pos src.XPos, curfn *Node, t *types.Type) *Node { if curfn == nil { Fatalf("no curfn for tempAt") } - if curfn.Func.Closure != nil && curfn.Op == OCLOSURE { + if curfn.Op == OCLOSURE { Dump("tempAt", curfn) Fatalf("adding tempAt to wrong closure function") } diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 0fa11c5f59ead..a3a01e59cd594 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -329,7 +329,7 @@ func (r *importReader) doDecl(n *Node) { recv := r.param() mtyp := r.signature(recv) - m := newfuncnamel(mpos, methodSym(recv.Type, msym)) + m := newfuncnamel(mpos, methodSym(recv.Type, msym), new(Func)) m.Type = mtyp m.SetClass(PFUNC) // methodSym already marked m.Sym as a function. diff --git a/src/cmd/compile/internal/gc/initorder.go b/src/cmd/compile/internal/gc/initorder.go index 102cb769db1a5..f82df04b7307b 100644 --- a/src/cmd/compile/internal/gc/initorder.go +++ b/src/cmd/compile/internal/gc/initorder.go @@ -285,7 +285,7 @@ func (d *initDeps) visit(n *Node) bool { } case OCLOSURE: - d.inspectList(n.Func.Closure.Nbody) + d.inspectList(n.Func.Decl.Nbody) case ODOTMETH, OCALLPART: d.foundDep(n.MethodName()) diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 6d07e156ea198..db53b2aae147c 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -722,7 +722,7 @@ func inlCallee(fn *Node) *Node { } return fn case fn.Op == OCLOSURE: - c := fn.Func.Closure + c := fn.Func.Decl caninl(c) return c.Func.Nname } @@ -806,7 +806,7 @@ func reassigned(n *Node) (bool, *Node) { // We need to walk the function body to check for reassignments so we follow the // linkage to the ODCLFUNC node as that is where body is held. if f.Op == OCLOSURE { - f = f.Func.Closure + f = f.Func.Decl } v := reassignVisitor{name: n} a := v.visitList(f.Nbody) @@ -976,8 +976,8 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { // Handle captured variables when inlining closures. if fn.Name.Defn != nil { - if c := fn.Name.Defn.Func.Closure; c != nil { - for _, v := range c.Func.Closure.Func.Cvars.Slice() { + if c := fn.Name.Defn.Func.OClosure; c != nil { + for _, v := range c.Func.ClosureVars.Slice() { if v.Op == OXXX { continue } @@ -987,7 +987,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { // NB: if we enabled inlining of functions containing OCLOSURE or refined // the reassigned check via some sort of copy propagation this would most // likely need to be changed to a loop to walk up to the correct Param - if o == nil || (o.Name.Curfn != Curfn && o.Name.Curfn.Func.Closure != Curfn) { + if o == nil || (o.Name.Curfn != Curfn && o.Name.Curfn.Func.OClosure != Curfn) { Fatalf("%v: unresolvable capture %v %v\n", n.Line(), fn, v) } diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 89dbca0cf1b94..cf4ec039f10a6 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -651,7 +651,7 @@ func Main(archInit func(*Arch)) { // because variables captured by value do not escape. timings.Start("fe", "capturevars") for _, n := range xtop { - if n.Op == ODCLFUNC && n.Func.Closure != nil { + if n.Op == ODCLFUNC && n.Func.OClosure != nil { Curfn = n capturevars(n) } @@ -724,7 +724,7 @@ func Main(archInit func(*Arch)) { // before walk reaches a call of a closure. timings.Start("fe", "xclosures") for _, n := range xtop { - if n.Op == ODCLFUNC && n.Func.Closure != nil { + if n.Op == ODCLFUNC && n.Func.OClosure != nil { Curfn = n transformclosure(n) } @@ -829,7 +829,7 @@ func Main(archInit func(*Arch)) { func numNonClosures(list []*Node) int { count := 0 for _, n := range list { - if n.Func.Closure == nil { + if n.Func.OClosure == nil { count++ } } diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 3ef8583f6d53a..f8c84a75bf365 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -537,7 +537,7 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) *Node { name = nblank.Sym // filled in by typecheckfunc } - f.Func.Nname = newfuncnamel(p.pos(fun.Name), name) + f.Func.Nname = newfuncnamel(p.pos(fun.Name), name, f.Func) f.Func.Nname.Name.Defn = f f.Func.Nname.Name.Param.Ntype = t diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 11c8b1fa25e5f..a62d468c9c498 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -1256,7 +1256,7 @@ func (o *Order) expr(n, lhs *Node) *Node { } case OCLOSURE: - if n.Transient() && n.Func.Closure.Func.Cvars.Len() > 0 { + if n.Transient() && n.Func.ClosureVars.Len() > 0 { prealloc[n] = o.newTemp(closureType(n), false) } diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index 9c1bd285ae7c7..0f0f6b7107478 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -404,30 +404,59 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S } } + // Back when there were two different *Funcs for a function, this code + // was not consistent about whether a particular *Node being processed + // was an ODCLFUNC or ONAME node. Partly this is because inlined function + // bodies have no ODCLFUNC node, which was it's own inconsistency. + // In any event, the handling of the two different nodes for DWARF purposes + // was subtly different, likely in unintended ways. CL 272253 merged the + // two nodes' Func fields, so that code sees the same *Func whether it is + // holding the ODCLFUNC or the ONAME. This resulted in changes in the + // DWARF output. To preserve the existing DWARF output and leave an + // intentional change for a future CL, this code does the following when + // fn.Op == ONAME: + // + // 1. Disallow use of createComplexVars in createDwarfVars. + // It was not possible to reach that code for an ONAME before, + // because the DebugInfo was set only on the ODCLFUNC Func. + // Calling into it in the ONAME case causes an index out of bounds panic. + // + // 2. Do not populate apdecls. fn.Func.Dcl was in the ODCLFUNC Func, + // not the ONAME Func. Populating apdecls for the ONAME case results + // in selected being populated after createSimpleVars is called in + // createDwarfVars, and then that causes the loop to skip all the entries + // in dcl, meaning that the RecordAutoType calls don't happen. + // + // These two adjustments keep toolstash -cmp working for now. + // Deciding the right answer is, as they say, future work. + isODCLFUNC := fn.Op == ODCLFUNC + var apdecls []*Node // Populate decls for fn. - for _, n := range fn.Func.Dcl { - if n.Op != ONAME { // might be OTYPE or OLITERAL - continue - } - switch n.Class() { - case PAUTO: - if !n.Name.Used() { - // Text == nil -> generating abstract function - if fnsym.Func().Text != nil { - Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)") + if isODCLFUNC { + for _, n := range fn.Func.Dcl { + if n.Op != ONAME { // might be OTYPE or OLITERAL + continue + } + switch n.Class() { + case PAUTO: + if !n.Name.Used() { + // Text == nil -> generating abstract function + if fnsym.Func().Text != nil { + Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)") + } + continue } + case PPARAM, PPARAMOUT: + default: continue } - case PPARAM, PPARAMOUT: - default: - continue + apdecls = append(apdecls, n) + fnsym.Func().RecordAutoType(ngotype(n).Linksym()) } - apdecls = append(apdecls, n) - fnsym.Func().RecordAutoType(ngotype(n).Linksym()) } - decls, dwarfVars := createDwarfVars(fnsym, fn.Func, apdecls) + decls, dwarfVars := createDwarfVars(fnsym, isODCLFUNC, fn.Func, apdecls) // For each type referenced by the functions auto vars but not // already referenced by a dwarf var, attach a dummy relocation to @@ -575,12 +604,12 @@ func createComplexVars(fnsym *obj.LSym, fn *Func) ([]*Node, []*dwarf.Var, map[*N // createDwarfVars process fn, returning a list of DWARF variables and the // Nodes they represent. -func createDwarfVars(fnsym *obj.LSym, fn *Func, apDecls []*Node) ([]*Node, []*dwarf.Var) { +func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *Func, apDecls []*Node) ([]*Node, []*dwarf.Var) { // Collect a raw list of DWARF vars. var vars []*dwarf.Var var decls []*Node var selected map[*Node]bool - if Ctxt.Flag_locationlists && Ctxt.Flag_optimize && fn.DebugInfo != nil { + if Ctxt.Flag_locationlists && Ctxt.Flag_optimize && fn.DebugInfo != nil && complexOK { decls, vars, selected = createComplexVars(fnsym, fn) } else { decls, vars, selected = createSimpleVars(fnsym, apDecls) diff --git a/src/cmd/compile/internal/gc/scc.go b/src/cmd/compile/internal/gc/scc.go index 14f77d613ae0b..8e41ebac29c65 100644 --- a/src/cmd/compile/internal/gc/scc.go +++ b/src/cmd/compile/internal/gc/scc.go @@ -101,7 +101,7 @@ func (v *bottomUpVisitor) visit(n *Node) uint32 { } } case OCLOSURE: - if m := v.visit(n.Func.Closure); m < min { + if m := v.visit(n.Func.Decl); m < min { min = m } } diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index c199ff6317d0b..5727245562420 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -261,7 +261,7 @@ func (s *InitSchedule) staticassign(l *Node, r *Node) bool { } // Closures with no captured variables are globals, // so the assignment can be done at link time. - pfuncsym(l, r.Func.Closure.Func.Nname) + pfuncsym(l, r.Func.Nname) return true } closuredebugruntimecheck(r) diff --git a/src/cmd/compile/internal/gc/sizeof_test.go b/src/cmd/compile/internal/gc/sizeof_test.go index ce4a216c2e2da..2f2eba4c6755c 100644 --- a/src/cmd/compile/internal/gc/sizeof_test.go +++ b/src/cmd/compile/internal/gc/sizeof_test.go @@ -20,7 +20,7 @@ func TestSizeof(t *testing.T) { _32bit uintptr // size on 32bit platforms _64bit uintptr // size on 64bit platforms }{ - {Func{}, 124, 224}, + {Func{}, 132, 240}, {Name{}, 32, 56}, {Param{}, 24, 48}, {Node{}, 76, 128}, diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 7c13aef21403b..1aa3af929c2f6 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -139,13 +139,14 @@ func nod(op Op, nleft, nright *Node) *Node { func nodl(pos src.XPos, op Op, nleft, nright *Node) *Node { var n *Node switch op { - case OCLOSURE, ODCLFUNC: + case ODCLFUNC: var x struct { n Node f Func } n = &x.n n.Func = &x.f + n.Func.Decl = n case ONAME: Fatalf("use newname instead") case OLABEL, OPACK: diff --git a/src/cmd/compile/internal/gc/syntax.go b/src/cmd/compile/internal/gc/syntax.go index de516dec69422..435fd78fce977 100644 --- a/src/cmd/compile/internal/gc/syntax.go +++ b/src/cmd/compile/internal/gc/syntax.go @@ -578,62 +578,66 @@ func (p *Param) SetEmbedFiles(list []string) { *(*p.Extra).(*embedFileList) = list } -// Functions +// A Func corresponds to a single function in a Go program +// (and vice versa: each function is denoted by exactly one *Func). // -// A simple function declaration is represented as an ODCLFUNC node f -// and an ONAME node n. They're linked to one another through -// f.Func.Nname == n and n.Name.Defn == f. When functions are -// referenced by name in an expression, the function's ONAME node is -// used directly. +// There are multiple nodes that represent a Func in the IR. // -// Function names have n.Class() == PFUNC. This distinguishes them -// from variables of function type. +// The ONAME node (Func.Name) is used for plain references to it. +// The ODCLFUNC node (Func.Decl) is used for its declaration code. +// The OCLOSURE node (Func.Closure) is used for a reference to a +// function literal. // -// Confusingly, n.Func and f.Func both exist, but commonly point to -// different Funcs. (Exception: an OCALLPART's Func does point to its -// ODCLFUNC's Func.) +// A Func for an imported function will have only an ONAME node. +// A declared function or method has an ONAME and an ODCLFUNC. +// A function literal is represented directly by an OCLOSURE, but it also +// has an ODCLFUNC (and a matching ONAME) representing the compiled +// underlying form of the closure, which accesses the captured variables +// using a special data structure passed in a register. // -// A method declaration is represented like functions, except n.Sym +// A method declaration is represented like functions, except f.Sym // will be the qualified method name (e.g., "T.m") and // f.Func.Shortname is the bare method name (e.g., "m"). // -// Method expressions are represented as ONAME/PFUNC nodes like -// function names, but their Left and Right fields still point to the -// type and method, respectively. They can be distinguished from -// normal functions with isMethodExpression. Also, unlike function -// name nodes, method expression nodes exist for each method -// expression. The declaration ONAME can be accessed with -// x.Type.Nname(), where x is the method expression ONAME node. +// A method expression (T.M) is represented as an ONAME node +// like a function name would be, but n.Left and n.Right point to +// the type and method, respectively. A method expression can +// be distinguished from a normal function ONAME by checking +// n.IsMethodExpression. Unlike ordinary ONAME nodes, each +// distinct mention of a method expression in the source code +// constructs a fresh ONAME node. +// TODO(rsc): Method expressions deserve their own opcode +// instead of violating invariants of ONAME. // -// Method values are represented by ODOTMETH/ODOTINTER when called -// immediately, and OCALLPART otherwise. They are like method -// expressions, except that for ODOTMETH/ODOTINTER the method name is -// stored in Sym instead of Right. -// -// Closures are represented by OCLOSURE node c. They link back and -// forth with the ODCLFUNC via Func.Closure; that is, c.Func.Closure -// == f and f.Func.Closure == c. -// -// Function bodies are stored in f.Nbody, and inline function bodies -// are stored in n.Func.Inl. Pragmas are stored in f.Func.Pragma. -// -// Imported functions skip the ODCLFUNC, so n.Name.Defn is nil. They -// also use Dcl instead of Inldcl. - -// Func holds Node fields used only with function-like nodes. +// A method value (t.M) is represented by ODOTMETH/ODOTINTER +// when it is called directly and by OCALLPART otherwise. +// These are like method expressions, except that for ODOTMETH/ODOTINTER, +// the method name is stored in Sym instead of Right. +// Each OCALLPART ends up being implemented as a new +// function, a bit like a closure, with its own ODCLFUNC. +// The OCALLPART has uses n.Func to record the linkage to +// the generated ODCLFUNC (as n.Func.Decl), but there is no +// pointer from the Func back to the OCALLPART. type Func struct { + Nname *Node // ONAME node + Decl *Node // ODCLFUNC node + OClosure *Node // OCLOSURE node + Shortname *types.Sym + // Extra entry code for the function. For example, allocate and initialize - // memory for escaping parameters. However, just for OCLOSURE, Enter is a - // list of ONAME nodes of captured variables + // memory for escaping parameters. Enter Nodes Exit Nodes - // ONAME nodes for closure params, each should have closurevar set - Cvars Nodes // ONAME nodes for all params/locals for this func/closure, does NOT // include closurevars until transformclosure runs. Dcl []*Node + ClosureEnter Nodes // list of ONAME nodes of captured variables + ClosureType *Node // closure representation type + ClosureCalled bool // closure is only immediately called + ClosureVars Nodes // closure params; each has closurevar set + // Parents records the parent scope of each scope within a // function. The root scope (0) has no parent, so the i'th // scope's parent is stored at Parents[i-1]. @@ -649,10 +653,6 @@ type Func struct { FieldTrack map[*types.Sym]struct{} DebugInfo *ssa.FuncDebug - Ntype *Node // signature - Top int // top context (ctxCallee, etc) - Closure *Node // OCLOSURE <-> ODCLFUNC (see header comment above) - Nname *Node // The ONAME node associated with an ODCLFUNC (both have same Type) lsym *obj.LSym Inl *Inline diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 4bbc58ce13cf3..ae344fc8e101b 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -562,12 +562,11 @@ opswitch: // transformclosure already did all preparation work. // Prepend captured variables to argument list. - n.List.Prepend(n.Left.Func.Enter.Slice()...) - - n.Left.Func.Enter.Set(nil) + n.List.Prepend(n.Left.Func.ClosureEnter.Slice()...) + n.Left.Func.ClosureEnter.Set(nil) // Replace OCLOSURE with ONAME/PFUNC. - n.Left = n.Left.Func.Closure.Func.Nname + n.Left = n.Left.Func.Nname // Update type of OCALLFUNC node. // Output arguments had not changed, but their offsets could. From 4f9d54e41d80f06b8806bcbb23c015572b78d9fc Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Tue, 24 Nov 2020 10:25:41 -0500 Subject: [PATCH 022/474] [dev.regabi] cmd/compile: add OMETHEXPR This CL is obviously OK but does not pass toolstash -cmp, because it renumbers the Op codes. In a separate CL so that we can use toolstash -cmp on the CL with real changes related to OMETHEXPR. Change-Id: I1db978e3f2652b3bdf51f7981a3ba5137641c8c7 Reviewed-on: https://go-review.googlesource.com/c/go/+/272866 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/op_string.go | 87 ++++++++++++------------ src/cmd/compile/internal/gc/syntax.go | 1 + 2 files changed, 45 insertions(+), 43 deletions(-) diff --git a/src/cmd/compile/internal/gc/op_string.go b/src/cmd/compile/internal/gc/op_string.go index f7d31f912cb14..16fd79e477880 100644 --- a/src/cmd/compile/internal/gc/op_string.go +++ b/src/cmd/compile/internal/gc/op_string.go @@ -121,52 +121,53 @@ func _() { _ = x[OALIGNOF-110] _ = x[OOFFSETOF-111] _ = x[OSIZEOF-112] - _ = x[OBLOCK-113] - _ = x[OBREAK-114] - _ = x[OCASE-115] - _ = x[OCONTINUE-116] - _ = x[ODEFER-117] - _ = x[OEMPTY-118] - _ = x[OFALL-119] - _ = x[OFOR-120] - _ = x[OFORUNTIL-121] - _ = x[OGOTO-122] - _ = x[OIF-123] - _ = x[OLABEL-124] - _ = x[OGO-125] - _ = x[ORANGE-126] - _ = x[ORETURN-127] - _ = x[OSELECT-128] - _ = x[OSWITCH-129] - _ = x[OTYPESW-130] - _ = x[OTCHAN-131] - _ = x[OTMAP-132] - _ = x[OTSTRUCT-133] - _ = x[OTINTER-134] - _ = x[OTFUNC-135] - _ = x[OTARRAY-136] - _ = x[ODDD-137] - _ = x[OINLCALL-138] - _ = x[OEFACE-139] - _ = x[OITAB-140] - _ = x[OIDATA-141] - _ = x[OSPTR-142] - _ = x[OCLOSUREVAR-143] - _ = x[OCFUNC-144] - _ = x[OCHECKNIL-145] - _ = x[OVARDEF-146] - _ = x[OVARKILL-147] - _ = x[OVARLIVE-148] - _ = x[ORESULT-149] - _ = x[OINLMARK-150] - _ = x[ORETJMP-151] - _ = x[OGETG-152] - _ = x[OEND-153] + _ = x[OMETHEXPR-113] + _ = x[OBLOCK-114] + _ = x[OBREAK-115] + _ = x[OCASE-116] + _ = x[OCONTINUE-117] + _ = x[ODEFER-118] + _ = x[OEMPTY-119] + _ = x[OFALL-120] + _ = x[OFOR-121] + _ = x[OFORUNTIL-122] + _ = x[OGOTO-123] + _ = x[OIF-124] + _ = x[OLABEL-125] + _ = x[OGO-126] + _ = x[ORANGE-127] + _ = x[ORETURN-128] + _ = x[OSELECT-129] + _ = x[OSWITCH-130] + _ = x[OTYPESW-131] + _ = x[OTCHAN-132] + _ = x[OTMAP-133] + _ = x[OTSTRUCT-134] + _ = x[OTINTER-135] + _ = x[OTFUNC-136] + _ = x[OTARRAY-137] + _ = x[ODDD-138] + _ = x[OINLCALL-139] + _ = x[OEFACE-140] + _ = x[OITAB-141] + _ = x[OIDATA-142] + _ = x[OSPTR-143] + _ = x[OCLOSUREVAR-144] + _ = x[OCFUNC-145] + _ = x[OCHECKNIL-146] + _ = x[OVARDEF-147] + _ = x[OVARKILL-148] + _ = x[OVARLIVE-149] + _ = x[ORESULT-150] + _ = x[OINLMARK-151] + _ = x[ORETJMP-152] + _ = x[OGETG-153] + _ = x[OEND-154] } -const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLFIELDDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFBLOCKBREAKCASECONTINUEDEFEREMPTYFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYDDDINLCALLEFACEITABIDATASPTRCLOSUREVARCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKRETJMPGETGEND" +const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLFIELDDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRBLOCKBREAKCASECONTINUEDEFEREMPTYFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYDDDINLCALLEFACEITABIDATASPTRCLOSUREVARCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKRETJMPGETGEND" -var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 310, 317, 323, 326, 332, 339, 347, 351, 358, 366, 368, 370, 372, 374, 376, 378, 383, 388, 396, 399, 408, 411, 415, 423, 430, 439, 452, 455, 458, 461, 464, 467, 470, 476, 479, 485, 488, 494, 498, 501, 505, 510, 515, 521, 526, 530, 535, 543, 551, 557, 566, 577, 584, 588, 595, 602, 610, 614, 618, 622, 629, 636, 644, 650, 655, 660, 664, 672, 677, 682, 686, 689, 697, 701, 703, 708, 710, 715, 721, 727, 733, 739, 744, 748, 755, 761, 766, 772, 775, 782, 787, 791, 796, 800, 810, 815, 823, 829, 836, 843, 849, 856, 862, 866, 869} +var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 310, 317, 323, 326, 332, 339, 347, 351, 358, 366, 368, 370, 372, 374, 376, 378, 383, 388, 396, 399, 408, 411, 415, 423, 430, 439, 452, 455, 458, 461, 464, 467, 470, 476, 479, 485, 488, 494, 498, 501, 505, 510, 515, 521, 526, 530, 535, 543, 551, 557, 566, 577, 584, 588, 595, 602, 610, 614, 618, 622, 629, 636, 644, 650, 658, 663, 668, 672, 680, 685, 690, 694, 697, 705, 709, 711, 716, 718, 723, 729, 735, 741, 747, 752, 756, 763, 769, 774, 780, 783, 790, 795, 799, 804, 808, 818, 823, 831, 837, 844, 851, 857, 864, 870, 874, 877} func (i Op) String() string { if i >= Op(len(_Op_index)-1) { diff --git a/src/cmd/compile/internal/gc/syntax.go b/src/cmd/compile/internal/gc/syntax.go index 435fd78fce977..343d5b171c36b 100644 --- a/src/cmd/compile/internal/gc/syntax.go +++ b/src/cmd/compile/internal/gc/syntax.go @@ -891,6 +891,7 @@ const ( OALIGNOF // unsafe.Alignof(Left) OOFFSETOF // unsafe.Offsetof(Left) OSIZEOF // unsafe.Sizeof(Left) + OMETHEXPR // method expression // statements OBLOCK // { List } (block of code) From ee6132a698172a063ad2aa5b8d603f589c16e019 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 18 Nov 2020 11:25:29 -0500 Subject: [PATCH 023/474] [dev.regabi] cmd/compile: introduce OMETHEXPR instead of overloading ONAME A method expression today is an ONAME that has none of the invariants or properties of other ONAMEs and is always a special case (hence the Node.IsMethodExpression method). Remove the special cases by making a separate Op. Passes toolstash -cmp. Change-Id: I7667693c9155d5486a6924dbf75ebb59891c4afc Reviewed-on: https://go-review.googlesource.com/c/go/+/272867 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/escape.go | 2 +- src/cmd/compile/internal/gc/fmt.go | 8 +++--- src/cmd/compile/internal/gc/iexport.go | 14 ++++------ src/cmd/compile/internal/gc/initorder.go | 9 +++--- src/cmd/compile/internal/gc/inl.go | 35 ++++++++++++------------ src/cmd/compile/internal/gc/scc.go | 10 +++++-- src/cmd/compile/internal/gc/sinit.go | 14 +++++----- src/cmd/compile/internal/gc/ssa.go | 3 ++ src/cmd/compile/internal/gc/syntax.go | 18 +++--------- src/cmd/compile/internal/gc/typecheck.go | 10 +++---- src/cmd/compile/internal/gc/walk.go | 2 +- 11 files changed, 60 insertions(+), 65 deletions(-) diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index 07cc5498259bb..497151d02f33f 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -476,7 +476,7 @@ func (e *Escape) exprSkipInit(k EscHole, n *Node) { default: Fatalf("unexpected expr: %v", n) - case OLITERAL, ONIL, OGETG, OCLOSUREVAR, OTYPE: + case OLITERAL, ONIL, OGETG, OCLOSUREVAR, OTYPE, OMETHEXPR: // nop case ONAME: diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go index e62a526eebc72..addb010e5c71e 100644 --- a/src/cmd/compile/internal/gc/fmt.go +++ b/src/cmd/compile/internal/gc/fmt.go @@ -1355,15 +1355,15 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) { mode.Fprintf(s, ")") } - // Special case: name used as local variable in export. - // _ becomes ~b%d internally; print as _ for export case ONAME: + // Special case: name used as local variable in export. + // _ becomes ~b%d internally; print as _ for export if mode == FErr && n.Sym != nil && n.Sym.Name[0] == '~' && n.Sym.Name[1] == 'b' { fmt.Fprint(s, "_") return } fallthrough - case OPACK, ONONAME: + case OPACK, ONONAME, OMETHEXPR: fmt.Fprint(s, smodeString(n.Sym, mode)) case OTYPE: @@ -1695,7 +1695,7 @@ func (n *Node) nodedump(s fmt.State, flag FmtFlag, mode fmtMode) { case OLITERAL: mode.Fprintf(s, "%v-%v%j", n.Op, n.Val(), n) - case ONAME, ONONAME: + case ONAME, ONONAME, OMETHEXPR: if n.Sym != nil { mode.Fprintf(s, "%v-%v%j", n.Op, n.Sym, n) } else { diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index d661fca2d1447..842025705b9c2 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -1218,18 +1218,16 @@ func (w *exportWriter) expr(n *Node) { w.pos(n.Pos) w.value(n.Type, n.Val()) - case ONAME: + case OMETHEXPR: // Special case: explicit name of func (*T) method(...) is turned into pkg.(*T).method, // but for export, this should be rendered as (*pkg.T).meth. // These nodes have the special property that they are names with a left OTYPE and a right ONAME. - if n.isMethodExpression() { - w.op(OXDOT) - w.pos(n.Pos) - w.expr(n.Left) // n.Left.Op == OTYPE - w.selector(n.Right.Sym) - break - } + w.op(OXDOT) + w.pos(n.Pos) + w.expr(n.Left) // n.Left.Op == OTYPE + w.selector(n.Right.Sym) + case ONAME: // Package scope name. if (n.Class() == PEXTERN || n.Class() == PFUNC) && !n.isBlank() { w.op(ONONAME) diff --git a/src/cmd/compile/internal/gc/initorder.go b/src/cmd/compile/internal/gc/initorder.go index f82df04b7307b..ecbfc5631a3e6 100644 --- a/src/cmd/compile/internal/gc/initorder.go +++ b/src/cmd/compile/internal/gc/initorder.go @@ -273,12 +273,11 @@ func (d *initDeps) inspectList(l Nodes) { inspectList(l, d.visit) } // referenced by n, if any. func (d *initDeps) visit(n *Node) bool { switch n.Op { - case ONAME: - if n.isMethodExpression() { - d.foundDep(n.MethodName()) - return false - } + case OMETHEXPR: + d.foundDep(n.MethodName()) + return false + case ONAME: switch n.Class() { case PEXTERN, PFUNC: d.foundDep(n) diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index db53b2aae147c..0695b161f1249 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -260,15 +260,14 @@ func inlFlood(n *Node) { // because after inlining they might be callable. inspectList(asNodes(n.Func.Inl.Body), func(n *Node) bool { switch n.Op { + case OMETHEXPR: + inlFlood(n.MethodName()) + case ONAME: switch n.Class() { case PFUNC: - if n.isMethodExpression() { - inlFlood(n.MethodName()) - } else { - inlFlood(n) - exportsym(n) - } + inlFlood(n) + exportsym(n) case PEXTERN: exportsym(n) } @@ -709,17 +708,16 @@ func inlnode(n *Node, maxCost int32, inlMap map[*Node]bool) *Node { func inlCallee(fn *Node) *Node { fn = staticValue(fn) switch { - case fn.Op == ONAME && fn.Class() == PFUNC: - if fn.isMethodExpression() { - n := fn.MethodName() - // Check that receiver type matches fn.Left. - // TODO(mdempsky): Handle implicit dereference - // of pointer receiver argument? - if n == nil || !types.Identical(n.Type.Recv().Type, fn.Left.Type) { - return nil - } - return n + case fn.Op == OMETHEXPR: + n := fn.MethodName() + // Check that receiver type matches fn.Left. + // TODO(mdempsky): Handle implicit dereference + // of pointer receiver argument? + if n == nil || !types.Identical(n.Type.Recv().Type, fn.Left.Type) { + return nil } + return n + case fn.Op == ONAME && fn.Class() == PFUNC: return fn case fn.Op == OCLOSURE: c := fn.Func.Decl @@ -963,7 +961,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { ninit.AppendNodes(&callee.Ninit) callee = callee.Left } - if callee.Op != ONAME && callee.Op != OCLOSURE { + if callee.Op != ONAME && callee.Op != OCLOSURE && callee.Op != OMETHEXPR { Fatalf("unexpected callee expression: %v", callee) } } @@ -1323,6 +1321,9 @@ func (subst *inlsubst) node(n *Node) *Node { } return n + case OMETHEXPR: + return n + case OLITERAL, ONIL, OTYPE: // If n is a named constant or type, we can continue // using it in the inline copy. Otherwise, make a copy diff --git a/src/cmd/compile/internal/gc/scc.go b/src/cmd/compile/internal/gc/scc.go index 8e41ebac29c65..891012cbc9f2b 100644 --- a/src/cmd/compile/internal/gc/scc.go +++ b/src/cmd/compile/internal/gc/scc.go @@ -77,15 +77,19 @@ func (v *bottomUpVisitor) visit(n *Node) uint32 { switch n.Op { case ONAME: if n.Class() == PFUNC { - if n.isMethodExpression() { - n = n.MethodName() - } if n != nil && n.Name.Defn != nil { if m := v.visit(n.Name.Defn); m < min { min = m } } } + case OMETHEXPR: + fn := n.MethodName() + if fn != nil && fn.Name.Defn != nil { + if m := v.visit(fn.Name.Defn); m < min { + min = m + } + } case ODOTMETH: fn := n.MethodName() if fn != nil && fn.Op == ONAME && fn.Class() == PFUNC && fn.Name.Defn != nil { diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 5727245562420..3b4056cf7d208 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -68,7 +68,7 @@ func (s *InitSchedule) tryStaticInit(n *Node) bool { // like staticassign but we are copying an already // initialized value r. func (s *InitSchedule) staticcopy(l *Node, r *Node) bool { - if r.Op != ONAME { + if r.Op != ONAME && r.Op != OMETHEXPR { return false } if r.Class() == PFUNC { @@ -95,7 +95,7 @@ func (s *InitSchedule) staticcopy(l *Node, r *Node) bool { } switch r.Op { - case ONAME: + case ONAME, OMETHEXPR: if s.staticcopy(l, r) { return true } @@ -171,7 +171,7 @@ func (s *InitSchedule) staticassign(l *Node, r *Node) bool { } switch r.Op { - case ONAME: + case ONAME, OMETHEXPR: return s.staticcopy(l, r) case ONIL: @@ -383,7 +383,7 @@ func readonlystaticname(t *types.Type) *Node { } func (n *Node) isSimpleName() bool { - return n.Op == ONAME && n.Class() != PAUTOHEAP && n.Class() != PEXTERN + return (n.Op == ONAME || n.Op == OMETHEXPR) && n.Class() != PAUTOHEAP && n.Class() != PEXTERN } func litas(l *Node, r *Node, init *Nodes) { @@ -870,7 +870,7 @@ func anylit(n *Node, var_ *Node, init *Nodes) { default: Fatalf("anylit: not lit, op=%v node=%v", n.Op, n) - case ONAME: + case ONAME, OMETHEXPR: a := nod(OAS, var_, n) a = typecheck(a, ctxStmt) init.Append(a) @@ -1007,7 +1007,7 @@ func stataddr(nam *Node, n *Node) bool { } switch n.Op { - case ONAME: + case ONAME, OMETHEXPR: *nam = *n return true @@ -1172,7 +1172,7 @@ func genAsStatic(as *Node) { switch { case as.Right.Op == OLITERAL: litsym(&nam, as.Right, int(as.Right.Type.Width)) - case as.Right.Op == ONAME && as.Right.Class() == PFUNC: + case (as.Right.Op == ONAME || as.Right.Op == OMETHEXPR) && as.Right.Class() == PFUNC: pfuncsym(&nam, as.Right) default: Fatalf("genAsStatic: rhs %v", as.Right) diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index e23a189d7176d..88ff8d684c5ec 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -2016,6 +2016,9 @@ func (s *state) expr(n *Node) *ssa.Value { case OCFUNC: aux := n.Left.Sym.Linksym() return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb) + case OMETHEXPR: + sym := funcsym(n.Sym).Linksym() + return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), sym, s.sb) case ONAME: if n.Class() == PFUNC { // "value" of a function is the address of the function's closure diff --git a/src/cmd/compile/internal/gc/syntax.go b/src/cmd/compile/internal/gc/syntax.go index 343d5b171c36b..39f2996808ecb 100644 --- a/src/cmd/compile/internal/gc/syntax.go +++ b/src/cmd/compile/internal/gc/syntax.go @@ -303,11 +303,6 @@ func (n *Node) mayBeShared() bool { return false } -// isMethodExpression reports whether n represents a method expression T.M. -func (n *Node) isMethodExpression() bool { - return n.Op == ONAME && n.Left != nil && n.Left.Op == OTYPE && n.Right != nil && n.Right.Op == ONAME -} - // funcname returns the name (without the package) of the function n. func (n *Node) funcname() string { if n == nil || n.Func == nil || n.Func.Nname == nil { @@ -599,15 +594,10 @@ func (p *Param) SetEmbedFiles(list []string) { // will be the qualified method name (e.g., "T.m") and // f.Func.Shortname is the bare method name (e.g., "m"). // -// A method expression (T.M) is represented as an ONAME node -// like a function name would be, but n.Left and n.Right point to -// the type and method, respectively. A method expression can -// be distinguished from a normal function ONAME by checking -// n.IsMethodExpression. Unlike ordinary ONAME nodes, each -// distinct mention of a method expression in the source code -// constructs a fresh ONAME node. -// TODO(rsc): Method expressions deserve their own opcode -// instead of violating invariants of ONAME. +// A method expression (T.M) is represented as an OMETHEXPR node, +// in which n.Left and n.Right point to the type and method, respectively. +// Each distinct mention of a method expression in the source code +// constructs a fresh node. // // A method value (t.M) is represented by ODOTMETH/ODOTINTER // when it is called directly and by OCALLPART otherwise. diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 11c1ae38ea735..5cc7c8a34c4e0 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -2408,7 +2408,7 @@ func typecheckMethodExpr(n *Node) (res *Node) { return n } - n.Op = ONAME + n.Op = OMETHEXPR if n.Name == nil { n.Name = new(Name) } @@ -2668,7 +2668,7 @@ notenough: // call is the expression being called, not the overall call. // Method expressions have the form T.M, and the compiler has // rewritten those to ONAME nodes but left T in Left. - if call.isMethodExpression() { + if call.Op == OMETHEXPR { yyerror("not enough arguments in call to method expression %v%s", call, details) } else { yyerror("not enough arguments in call to %v%s", call, details) @@ -4032,10 +4032,10 @@ func (n *Node) MethodName() *Node { // MethodFunc is like MethodName, but returns the types.Field instead. func (n *Node) MethodFunc() *types.Field { - switch { - case n.Op == ODOTMETH || n.isMethodExpression(): + switch n.Op { + case ODOTMETH, OMETHEXPR: return n.Opt().(*types.Field) - case n.Op == OCALLPART: + case OCALLPART: return callpartMethod(n) } Fatalf("unexpected node: %v (%v)", n, n.Op) diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index ae344fc8e101b..7bf5281a67308 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -464,7 +464,7 @@ opswitch: Dump("walk", n) Fatalf("walkexpr: switch 1 unknown op %+S", n) - case ONONAME, OEMPTY, OGETG, ONEWOBJ: + case ONONAME, OEMPTY, OGETG, ONEWOBJ, OMETHEXPR: case OTYPE, ONAME, OLITERAL, ONIL: // TODO(mdempsky): Just return n; see discussion on CL 38655. From c22bc745c3b822cdf6da0ea2f9b5cac858e5a5ac Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Tue, 24 Nov 2020 11:07:48 -0500 Subject: [PATCH 024/474] [dev.regabi] cmd/compile: delete n.List after collapsing OADDSTR to OLITERAL The leftover n.List is clearly unnecessary, but it makes the inlining cost of the expression unnecessarily high. This change breaks toolstash -cmp: # cmd/internal/src toolstash: compiler output differs, with optimizers disabled (-N) inconsistent log line: /tmp/go-build866291351/b230/_pkg_.a.log:77: /Users/rsc/go/src/cmd/internal/src/pos.go:275:6: can inline (*PosBase).SymFilename with cost 9 as: method(*PosBase) func() string { if b != nil { return b.symFilename }; return "gofile..??" } /tmp/go-build866291351/b230/_pkg_.a.stash.log:77: /Users/rsc/go/src/cmd/internal/src/pos.go:275:6: can inline (*PosBase).SymFilename with cost 11 as: method(*PosBase) func() string { if b != nil { return b.symFilename }; return "gofile..??" } Separated from other constant work so that the bigger CL can pass toolstash -cmp. Change-Id: I5c7ddbc8373207b5b9824eafb8639488da0ca1b7 Reviewed-on: https://go-review.googlesource.com/c/go/+/272868 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/const.go | 1 + 1 file changed, 1 insertion(+) diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index c30d24ae1a7e0..ebf3896a0a9a3 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -604,6 +604,7 @@ func evconst(n *Node) { if len(s) == 1 && Isconst(s[0], constant.String) { n.Op = OLITERAL n.SetVal(s[0].Val()) + n.List.Set(nil) } else { n.List.Set(s) } From 6826287c6b1ff2e3f23611472a9d81ac5e3aa89a Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Tue, 24 Nov 2020 09:37:54 -0500 Subject: [PATCH 025/474] [dev.regabi] cmd/compile: replace evconst with non-mutating version evconst is one of the largest sources of Op rewrites, which prevent separating different kinds of nodes (in this case, arithmetic nodes and OLITERAL nodes). The change in swt.go is necessary because otherwise the syntax graph ends up containing that OLEN expression multiple times, which violates the invariant that it's a tree except for ONAME, OLITERAL, and OTYPE nodes. (Before, the OLEN was overwritten by an OLITERAL, so the invariant still held, but now that we don't overwrite it, we need a different copy for each instance.) Passes toolstash -cmp. Change-Id: Ia004774ab6852fb384805d0f9f9f234b40842811 Reviewed-on: https://go-review.googlesource.com/c/go/+/272869 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/const.go | 115 +++++++++++++---------- src/cmd/compile/internal/gc/swt.go | 5 +- src/cmd/compile/internal/gc/typecheck.go | 15 ++- src/cmd/compile/internal/gc/walk.go | 4 +- 4 files changed, 76 insertions(+), 63 deletions(-) diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index ebf3896a0a9a3..18d5feb81398d 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -542,87 +542,105 @@ func Isconst(n *Node, ct constant.Kind) bool { return consttype(n) == ct } -// evconst rewrites constant expressions into OLITERAL nodes. -func evconst(n *Node) { +// evalConst returns a constant-evaluated expression equivalent to n. +// If n is not a constant, evalConst returns n. +// Otherwise, evalConst returns a new OLITERAL with the same value as n, +// and with .Orig pointing back to n. +func evalConst(n *Node) *Node { nl, nr := n.Left, n.Right // Pick off just the opcodes that can be constant evaluated. switch op := n.Op; op { case OPLUS, ONEG, OBITNOT, ONOT: if nl.Op == OLITERAL { - setconst(n, unaryOp(op, nl.Val(), n.Type)) + return origConst(n, unaryOp(op, nl.Val(), n.Type)) } case OADD, OSUB, OMUL, ODIV, OMOD, OOR, OXOR, OAND, OANDNOT, OOROR, OANDAND: if nl.Op == OLITERAL && nr.Op == OLITERAL { - setconst(n, binaryOp(nl.Val(), op, nr.Val())) + return origConst(n, binaryOp(nl.Val(), op, nr.Val())) } case OEQ, ONE, OLT, OLE, OGT, OGE: if nl.Op == OLITERAL && nr.Op == OLITERAL { - setboolconst(n, compareOp(nl.Val(), op, nr.Val())) + return origBoolConst(n, compareOp(nl.Val(), op, nr.Val())) } case OLSH, ORSH: if nl.Op == OLITERAL && nr.Op == OLITERAL { - setconst(n, shiftOp(nl.Val(), op, nr.Val())) + return origConst(n, shiftOp(nl.Val(), op, nr.Val())) } case OCONV, ORUNESTR: if okforconst[n.Type.Etype] && nl.Op == OLITERAL { - setconst(n, convertVal(nl.Val(), n.Type, true)) + return origConst(n, convertVal(nl.Val(), n.Type, true)) } case OCONVNOP: if okforconst[n.Type.Etype] && nl.Op == OLITERAL { // set so n.Orig gets OCONV instead of OCONVNOP n.Op = OCONV - setconst(n, nl.Val()) + return origConst(n, nl.Val()) } case OADDSTR: // Merge adjacent constants in the argument list. s := n.List.Slice() - for i1 := 0; i1 < len(s); i1++ { - if Isconst(s[i1], constant.String) && i1+1 < len(s) && Isconst(s[i1+1], constant.String) { - // merge from i1 up to but not including i2 + need := 0 + for i := 0; i < len(s); i++ { + if i == 0 || !Isconst(s[i-1], constant.String) || !Isconst(s[i], constant.String) { + // Can't merge s[i] into s[i-1]; need a slot in the list. + need++ + } + } + if need == len(s) { + return n + } + if need == 1 { + var strs []string + for _, c := range s { + strs = append(strs, c.StringVal()) + } + return origConst(n, Val{U: strings.Join(strs, "")}) + } + newList := make([]*Node, 0, need) + for i := 0; i < len(s); i++ { + if Isconst(s[i], constant.String) && i+1 < len(s) && Isconst(s[i+1], constant.String) { + // merge from i up to but not including i2 var strs []string - i2 := i1 + i2 := i for i2 < len(s) && Isconst(s[i2], constant.String) { strs = append(strs, s[i2].StringVal()) i2++ } - nl := *s[i1] - nl.Orig = &nl - nl.SetVal(Val{strings.Join(strs, "")}) - s[i1] = &nl - s = append(s[:i1+1], s[i2:]...) + nl := origConst(s[i], Val{U: strings.Join(strs, "")}) + nl.Orig = nl // it's bigger than just s[i] + newList = append(newList, nl) + i = i2 - 1 + } else { + newList = append(newList, s[i]) } } - if len(s) == 1 && Isconst(s[0], constant.String) { - n.Op = OLITERAL - n.SetVal(s[0].Val()) - n.List.Set(nil) - } else { - n.List.Set(s) - } + n = n.copy() + n.List.Set(newList) + return n case OCAP, OLEN: switch nl.Type.Etype { case TSTRING: if Isconst(nl, constant.String) { - setintconst(n, int64(len(nl.StringVal()))) + return origIntConst(n, int64(len(nl.StringVal()))) } case TARRAY: if !hascallchan(nl) { - setintconst(n, nl.Type.NumElem()) + return origIntConst(n, nl.Type.NumElem()) } } case OALIGNOF, OOFFSETOF, OSIZEOF: - setintconst(n, evalunsafe(n)) + return origIntConst(n, evalunsafe(n)) case OREAL, OIMAG: if nl.Op == OLITERAL { @@ -647,7 +665,7 @@ func evconst(n *Node) { } re = im } - setconst(n, Val{re}) + return origConst(n, Val{re}) } case OCOMPLEX: @@ -656,9 +674,11 @@ func evconst(n *Node) { c := newMpcmplx() c.Real.Set(toflt(nl.Val()).U.(*Mpflt)) c.Imag.Set(toflt(nr.Val()).U.(*Mpflt)) - setconst(n, Val{c}) + return origConst(n, Val{c}) } } + + return n } func match(x, y Val) (Val, Val) { @@ -927,27 +947,21 @@ func shiftOp(x Val, op Op, y Val) Val { return Val{U: u} } -// setconst rewrites n as an OLITERAL with value v. -func setconst(n *Node, v Val) { - // If constant folding failed, mark n as broken and give up. +// origConst returns an OLITERAL with orig n and value v. +func origConst(n *Node, v Val) *Node { + // If constant folding was attempted (we were called) + // but it produced an invalid constant value, + // mark n as broken and give up. if v.U == nil { n.Type = nil - return - } - - // Ensure n.Orig still points to a semantically-equivalent - // expression after we rewrite n into a constant. - if n.Orig == n { - n.Orig = n.sepcopy() + return n } - *n = Node{ - Op: OLITERAL, - Pos: n.Pos, - Orig: n.Orig, - Type: n.Type, - Xoffset: BADWIDTH, - } + orig := n + n = nod(OLITERAL, nil, nil) + n.Orig = orig + n.Pos = orig.Pos + n.Type = orig.Type n.SetVal(v) // Check range. @@ -965,6 +979,7 @@ func setconst(n *Node, v Val) { n.SetVal(Val{trunccmplxlit(v.U.(*Mpcplx), n.Type)}) } } + return n } func assertRepresents(t *types.Type, v Val) { @@ -983,14 +998,14 @@ func represents(t *types.Type, v Val) bool { return t == vt || (t == types.UntypedRune && vt == types.UntypedInt) } -func setboolconst(n *Node, v bool) { - setconst(n, Val{U: v}) +func origBoolConst(n *Node, v bool) *Node { + return origConst(n, Val{U: v}) } -func setintconst(n *Node, v int64) { +func origIntConst(n *Node, v int64) *Node { u := new(Mpint) u.SetInt64(v) - setconst(n, Val{u}) + return origConst(n, Val{u}) } // nodlit returns a new untyped constant with value v. diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go index 068f1a34e1f5a..8459bd7c181ef 100644 --- a/src/cmd/compile/internal/gc/swt.go +++ b/src/cmd/compile/internal/gc/swt.go @@ -386,14 +386,13 @@ func (s *exprSwitch) flush() { runs = append(runs, cc[start:]) // Perform two-level binary search. - nlen := nod(OLEN, s.exprname, nil) binarySearch(len(runs), &s.done, func(i int) *Node { - return nod(OLE, nlen, nodintconst(runLen(runs[i-1]))) + return nod(OLE, nod(OLEN, s.exprname, nil), nodintconst(runLen(runs[i-1]))) }, func(i int, nif *Node) { run := runs[i] - nif.Left = nod(OEQ, nlen, nodintconst(runLen(run))) + nif.Left = nod(OEQ, nod(OLEN, s.exprname, nil), nodintconst(runLen(run))) s.search(run, &nif.Nbody) }, ) diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 5cc7c8a34c4e0..e014a0ba2d7d7 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -776,7 +776,7 @@ func typecheck1(n *Node, top int) (res *Node) { } if iscmp[n.Op] { - evconst(n) + n = evalConst(n) t = types.UntypedBool if n.Op != OLITERAL { l, r = defaultlit2(l, r, true) @@ -786,12 +786,13 @@ func typecheck1(n *Node, top int) (res *Node) { } if et == TSTRING && n.Op == OADD { - // create OADDSTR node with list of strings in x + y + z + (w + v) + ... - n.Op = OADDSTR - + // create or update OADDSTR node with list of strings in x + y + z + (w + v) + ... if l.Op == OADDSTR { - n.List.Set(l.List.Slice()) + orig := n + n = l + n.Pos = orig.Pos } else { + n = nodl(n.Pos, OADDSTR, nil, nil) n.List.Set1(l) } if r.Op == OADDSTR { @@ -799,8 +800,6 @@ func typecheck1(n *Node, top int) (res *Node) { } else { n.List.Append(r) } - n.Left = nil - n.Right = nil } if (op == ODIV || op == OMOD) && Isconst(r, constant.Int) { @@ -2091,7 +2090,7 @@ func typecheck1(n *Node, top int) (res *Node) { } } - evconst(n) + n = evalConst(n) if n.Op == OTYPE && top&ctxType == 0 { if !n.Type.Broke() { yyerror("type %v is not an expression", n.Type) diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 7bf5281a67308..9971fb0c0dd7f 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -513,7 +513,7 @@ opswitch: } if t.IsArray() { safeexpr(n.Left, init) - setintconst(n, t.NumElem()) + n = origIntConst(n, t.NumElem()) n.SetTypecheck(1) } @@ -1580,7 +1580,7 @@ opswitch: // walk of y%1 may have replaced it by 0. // Check whether n with its updated args is itself now a constant. t := n.Type - evconst(n) + n = evalConst(n) if n.Type != t { Fatalf("evconst changed Type: %v had type %v, now %v", n, t, n.Type) } From 7d72951229a4d55c5643d0ec7d7f7653d6efda3d Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Fri, 13 Nov 2020 23:36:48 -0800 Subject: [PATCH 026/474] [dev.regabi] cmd/compile: replace Val with go/constant.Value MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This replaces the compiler's legacy constant representation with go/constant, which is used by go/types. This should ease integrating with the new go/types-based type checker in the future. Performance difference is mixed, but there's still room for improvement. name old time/op new time/op delta Template 280ms ± 6% 281ms ± 6% ~ (p=0.488 n=592+587) Unicode 132ms ±11% 129ms ±11% -2.61% (p=0.000 n=592+591) GoTypes 865ms ± 3% 866ms ± 3% +0.16% (p=0.019 n=572+577) Compiler 3.60s ± 3% 3.60s ± 3% ~ (p=0.083 n=578+582) SSA 8.27s ± 2% 8.28s ± 2% +0.14% (p=0.002 n=575+580) Flate 177ms ± 8% 176ms ± 8% ~ (p=0.133 n=580+590) GoParser 238ms ± 7% 237ms ± 6% ~ (p=0.569 n=587+591) Reflect 542ms ± 4% 543ms ± 4% ~ (p=0.064 n=581+579) Tar 244ms ± 6% 244ms ± 6% ~ (p=0.880 n=586+584) XML 322ms ± 5% 322ms ± 5% ~ (p=0.449 n=589+590) LinkCompiler 454ms ± 6% 453ms ± 6% ~ (p=0.249 n=585+583) ExternalLinkCompiler 1.35s ± 4% 1.35s ± 4% ~ (p=0.968 n=590+588) LinkWithoutDebugCompiler 279ms ± 7% 280ms ± 7% ~ (p=0.270 n=589+586) [Geo mean] 535ms 534ms -0.17% name old user-time/op new user-time/op delta Template 599ms ±22% 602ms ±21% ~ (p=0.377 n=588+590) Unicode 410ms ±43% 376ms ±39% -8.36% (p=0.000 n=596+586) GoTypes 1.96s ±15% 1.97s ±17% +0.70% (p=0.031 n=596+594) Compiler 7.47s ± 9% 7.50s ± 8% +0.38% (p=0.031 n=591+583) SSA 16.2s ± 4% 16.2s ± 5% ~ (p=0.617 n=531+531) Flate 298ms ±25% 292ms ±30% -2.14% (p=0.001 n=594+596) GoParser 379ms ±20% 381ms ±21% ~ (p=0.312 n=578+584) Reflect 1.24s ±20% 1.25s ±23% +0.88% (p=0.031 n=592+596) Tar 471ms ±23% 473ms ±21% ~ (p=0.616 n=593+587) XML 674ms ±20% 681ms ±21% +1.03% (p=0.050 n=584+587) LinkCompiler 842ms ±10% 839ms ±10% ~ (p=0.074 n=587+590) ExternalLinkCompiler 1.65s ± 7% 1.65s ± 7% ~ (p=0.767 n=590+585) LinkWithoutDebugCompiler 378ms ±11% 379ms ±12% ~ (p=0.677 n=591+586) [Geo mean] 1.02s 1.02s -0.52% name old alloc/op new alloc/op delta Template 37.4MB ± 0% 37.4MB ± 0% +0.06% (p=0.000 n=589+585) Unicode 29.6MB ± 0% 28.6MB ± 0% -3.11% (p=0.000 n=574+566) GoTypes 120MB ± 0% 120MB ± 0% -0.01% (p=0.000 n=594+593) Compiler 568MB ± 0% 568MB ± 0% -0.02% (p=0.000 n=588+591) SSA 1.45GB ± 0% 1.45GB ± 0% -0.16% (p=0.000 n=596+592) Flate 22.6MB ± 0% 22.5MB ± 0% -0.36% (p=0.000 n=593+595) GoParser 30.1MB ± 0% 30.1MB ± 0% -0.01% (p=0.000 n=590+594) Reflect 77.8MB ± 0% 77.8MB ± 0% ~ (p=0.631 n=584+591) Tar 34.1MB ± 0% 34.1MB ± 0% -0.04% (p=0.000 n=584+588) XML 43.6MB ± 0% 43.6MB ± 0% +0.07% (p=0.000 n=593+591) LinkCompiler 98.6MB ± 0% 98.6MB ± 0% ~ (p=0.096 n=590+589) ExternalLinkCompiler 89.6MB ± 0% 89.6MB ± 0% ~ (p=0.695 n=590+587) LinkWithoutDebugCompiler 57.2MB ± 0% 57.2MB ± 0% ~ (p=0.674 n=590+589) [Geo mean] 78.5MB 78.3MB -0.28% name old allocs/op new allocs/op delta Template 379k ± 0% 380k ± 0% +0.33% (p=0.000 n=593+590) Unicode 344k ± 0% 338k ± 0% -1.67% (p=0.000 n=594+589) GoTypes 1.30M ± 0% 1.31M ± 0% +0.19% (p=0.000 n=592+591) Compiler 5.40M ± 0% 5.41M ± 0% +0.23% (p=0.000 n=587+585) SSA 14.2M ± 0% 14.2M ± 0% +0.08% (p=0.000 n=594+591) Flate 231k ± 0% 230k ± 0% -0.42% (p=0.000 n=588+589) GoParser 314k ± 0% 315k ± 0% +0.16% (p=0.000 n=587+594) Reflect 975k ± 0% 976k ± 0% +0.10% (p=0.000 n=590+594) Tar 344k ± 0% 345k ± 0% +0.24% (p=0.000 n=595+590) XML 422k ± 0% 424k ± 0% +0.57% (p=0.000 n=590+589) LinkCompiler 538k ± 0% 538k ± 0% -0.00% (p=0.045 n=592+587) ExternalLinkCompiler 593k ± 0% 593k ± 0% ~ (p=0.171 n=588+587) LinkWithoutDebugCompiler 172k ± 0% 172k ± 0% ~ (p=0.996 n=590+585) [Geo mean] 685k 685k -0.02% name old maxRSS/op new maxRSS/op delta Template 53.7M ± 8% 53.8M ± 8% ~ (p=0.666 n=576+574) Unicode 54.4M ±12% 55.0M ±10% +1.15% (p=0.000 n=591+588) GoTypes 95.1M ± 4% 95.1M ± 4% ~ (p=0.948 n=589+591) Compiler 334M ± 6% 334M ± 6% ~ (p=0.875 n=592+593) SSA 792M ± 5% 791M ± 5% ~ (p=0.067 n=592+591) Flate 39.9M ±11% 40.0M ±10% ~ (p=0.131 n=596+596) GoParser 45.2M ±11% 45.3M ±11% ~ (p=0.353 n=592+590) Reflect 76.1M ± 5% 76.2M ± 5% ~ (p=0.114 n=594+594) Tar 49.4M ±10% 49.6M ± 9% +0.57% (p=0.015 n=590+593) XML 57.4M ± 9% 57.7M ± 8% +0.67% (p=0.000 n=592+580) LinkCompiler 183M ± 2% 183M ± 2% ~ (p=0.229 n=587+591) ExternalLinkCompiler 187M ± 2% 187M ± 3% ~ (p=0.362 n=571+562) LinkWithoutDebugCompiler 143M ± 3% 143M ± 3% ~ (p=0.350 n=584+586) [Geo mean] 103M 103M +0.23% Passes toolstash-check. Fixes #4617. Change-Id: Id4f6759b4afc5e002770091d0d4f6e272ee6cbdd Reviewed-on: https://go-review.googlesource.com/c/go/+/272654 Reviewed-by: Robert Griesemer Trust: Matthew Dempsky --- src/cmd/compile/fmtmap_test.go | 12 +- src/cmd/compile/internal/gc/const.go | 895 +++++++++-------------- src/cmd/compile/internal/gc/dcl.go | 4 +- src/cmd/compile/internal/gc/export.go | 2 +- src/cmd/compile/internal/gc/fmt.go | 84 +-- src/cmd/compile/internal/gc/go.go | 8 - src/cmd/compile/internal/gc/iexport.go | 30 +- src/cmd/compile/internal/gc/iimport.go | 36 +- src/cmd/compile/internal/gc/main.go | 7 +- src/cmd/compile/internal/gc/mpfloat.go | 357 --------- src/cmd/compile/internal/gc/mpint.go | 303 -------- src/cmd/compile/internal/gc/noder.go | 85 +-- src/cmd/compile/internal/gc/obj.go | 71 +- src/cmd/compile/internal/gc/sinit.go | 20 +- src/cmd/compile/internal/gc/ssa.go | 43 +- src/cmd/compile/internal/gc/subr.go | 9 +- src/cmd/compile/internal/gc/swt.go | 3 +- src/cmd/compile/internal/gc/syntax.go | 16 +- src/cmd/compile/internal/gc/typecheck.go | 55 +- src/cmd/compile/internal/gc/universe.go | 33 - src/cmd/compile/internal/gc/walk.go | 20 +- src/cmd/compile/internal/types/type.go | 14 +- test/fixedbugs/issue20232.go | 4 +- 23 files changed, 572 insertions(+), 1539 deletions(-) delete mode 100644 src/cmd/compile/internal/gc/mpfloat.go delete mode 100644 src/cmd/compile/internal/gc/mpint.go diff --git a/src/cmd/compile/fmtmap_test.go b/src/cmd/compile/fmtmap_test.go index 51134e4919d3a..691eee3a1b289 100644 --- a/src/cmd/compile/fmtmap_test.go +++ b/src/cmd/compile/fmtmap_test.go @@ -22,8 +22,6 @@ package main_test var knownFormats = map[string]string{ "*bytes.Buffer %s": "", "*cmd/compile/internal/gc.EscLocation %v": "", - "*cmd/compile/internal/gc.Mpflt %v": "", - "*cmd/compile/internal/gc.Mpint %v": "", "*cmd/compile/internal/gc.Node %#v": "", "*cmd/compile/internal/gc.Node %+S": "", "*cmd/compile/internal/gc.Node %+v": "", @@ -60,9 +58,7 @@ var knownFormats = map[string]string{ "*cmd/internal/obj.Addr %v": "", "*cmd/internal/obj.LSym %v": "", "*math/big.Float %f": "", - "*math/big.Int %#x": "", "*math/big.Int %s": "", - "*math/big.Int %v": "", "[16]byte %x": "", "[]*cmd/compile/internal/ssa.Block %v": "", "[]*cmd/compile/internal/ssa.Value %v": "", @@ -91,9 +87,6 @@ var knownFormats = map[string]string{ "cmd/compile/internal/gc.Nodes %v": "", "cmd/compile/internal/gc.Op %#v": "", "cmd/compile/internal/gc.Op %v": "", - "cmd/compile/internal/gc.Val %#v": "", - "cmd/compile/internal/gc.Val %T": "", - "cmd/compile/internal/gc.Val %v": "", "cmd/compile/internal/gc.fmtMode %d": "", "cmd/compile/internal/gc.initKind %d": "", "cmd/compile/internal/gc.itag %v": "", @@ -134,10 +127,10 @@ var knownFormats = map[string]string{ "error %v": "", "float64 %.2f": "", "float64 %.3f": "", - "float64 %.6g": "", "float64 %g": "", - "go/constant.Kind %d": "", "go/constant.Kind %v": "", + "go/constant.Value %#v": "", + "go/constant.Value %v": "", "int %#x": "", "int %-12d": "", "int %-6d": "", @@ -155,7 +148,6 @@ var knownFormats = map[string]string{ "int32 %v": "", "int32 %x": "", "int64 %#x": "", - "int64 %+d": "", "int64 %-10d": "", "int64 %.5d": "", "int64 %d": "", diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index 18d5feb81398d..84f0b11712673 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -9,83 +9,80 @@ import ( "cmd/internal/src" "fmt" "go/constant" + "go/token" + "math" "math/big" "strings" + "unicode" ) -type Val struct { - // U contains one of: - // bool bool when Ctype() == CTBOOL - // *Mpint int when Ctype() == CTINT - // *Mpflt float when Ctype() == CTFLT - // *Mpcplx pair of floats when Ctype() == CTCPLX - // string string when Ctype() == CTSTR - U interface{} -} +const ( + // Maximum size in bits for big.Ints before signalling + // overflow and also mantissa precision for big.Floats. + Mpprec = 512 +) -func (v Val) Kind() constant.Kind { - switch v.U.(type) { +// ValueInterface returns the constant value stored in n as an interface{}. +// It returns int64s for ints and runes, float64s for floats, +// and complex128s for complex values. +func (n *Node) ValueInterface() interface{} { + switch v := n.Val(); v.Kind() { default: - Fatalf("unexpected Ctype for %T", v.U) + Fatalf("unexpected constant: %v", v) panic("unreachable") - case nil: - return constant.Unknown - case bool: - return constant.Bool - case *Mpint: - return constant.Int - case *Mpflt: - return constant.Float - case *Mpcplx: - return constant.Complex - case string: - return constant.String + case constant.Bool: + return constant.BoolVal(v) + case constant.String: + return constant.StringVal(v) + case constant.Int: + return int64Val(n.Type, v) + case constant.Float: + return float64Val(v) + case constant.Complex: + return complex(float64Val(constant.Real(v)), float64Val(constant.Imag(v))) } } -func eqval(a, b Val) bool { - if a.Kind() != b.Kind() { - return false +// int64Val returns v converted to int64. +// Note: if t is uint64, very large values will be converted to negative int64. +func int64Val(t *types.Type, v constant.Value) int64 { + if t.IsUnsigned() { + if x, ok := constant.Uint64Val(v); ok { + return int64(x) + } + } else { + if x, ok := constant.Int64Val(v); ok { + return x + } } - switch x := a.U.(type) { - default: - Fatalf("unexpected Ctype for %T", a.U) - panic("unreachable") - case bool: - y := b.U.(bool) - return x == y - case *Mpint: - y := b.U.(*Mpint) - return x.Cmp(y) == 0 - case *Mpflt: - y := b.U.(*Mpflt) - return x.Cmp(y) == 0 - case *Mpcplx: - y := b.U.(*Mpcplx) - return x.Real.Cmp(&y.Real) == 0 && x.Imag.Cmp(&y.Imag) == 0 - case string: - y := b.U.(string) - return x == y + Fatalf("%v out of range for %v", v, t) + panic("unreachable") +} + +func float64Val(v constant.Value) float64 { + if x, _ := constant.Float64Val(v); !math.IsInf(x, 0) { + return x + 0 // avoid -0 (should not be needed, but be conservative) } + Fatalf("bad float64 value: %v", v) + panic("unreachable") } -// Interface returns the constant value stored in v as an interface{}. -// It returns int64s for ints and runes, float64s for floats, -// complex128s for complex values, and nil for constant nils. -func (v Val) Interface() interface{} { - switch x := v.U.(type) { +func bigFloatVal(v constant.Value) *big.Float { + f := new(big.Float) + f.SetPrec(Mpprec) + switch u := constant.Val(v).(type) { + case int64: + f.SetInt64(u) + case *big.Int: + f.SetInt(u) + case *big.Float: + f.Set(u) + case *big.Rat: + f.SetRat(u) default: - Fatalf("unexpected Interface for %T", v.U) - panic("unreachable") - case bool, string: - return x - case *Mpint: - return x.Int64() - case *Mpflt: - return x.Float64() - case *Mpcplx: - return complex(x.Real.Float64(), x.Imag.Float64()) + Fatalf("unexpected: %v", u) } + return f } // Int64Val returns n as an int64. @@ -94,7 +91,11 @@ func (n *Node) Int64Val() int64 { if !Isconst(n, constant.Int) { Fatalf("Int64Val(%v)", n) } - return n.Val().U.(*Mpint).Int64() + x, ok := constant.Int64Val(n.Val()) + if !ok { + Fatalf("Int64Val(%v)", n) + } + return x } // CanInt64 reports whether it is safe to call Int64Val() on n. @@ -105,7 +106,21 @@ func (n *Node) CanInt64() bool { // if the value inside n cannot be represented as an int64, the // return value of Int64 is undefined - return n.Val().U.(*Mpint).CmpInt64(n.Int64Val()) == 0 + _, ok := constant.Int64Val(n.Val()) + return ok +} + +// Uint64Val returns n as an uint64. +// n must be an integer or rune constant. +func (n *Node) Uint64Val() uint64 { + if !Isconst(n, constant.Int) { + Fatalf("Uint64Val(%v)", n) + } + x, ok := constant.Uint64Val(n.Val()) + if !ok { + Fatalf("Uint64Val(%v)", n) + } + return x } // BoolVal returns n as a bool. @@ -114,7 +129,7 @@ func (n *Node) BoolVal() bool { if !Isconst(n, constant.Bool) { Fatalf("BoolVal(%v)", n) } - return n.Val().U.(bool) + return constant.BoolVal(n.Val()) } // StringVal returns the value of a literal string Node as a string. @@ -123,68 +138,48 @@ func (n *Node) StringVal() string { if !Isconst(n, constant.String) { Fatalf("StringVal(%v)", n) } - return n.Val().U.(string) + return constant.StringVal(n.Val()) } -// truncate float literal fv to 32-bit or 64-bit precision -// according to type; return truncated value. -func truncfltlit(oldv *Mpflt, t *types.Type) *Mpflt { - if t == nil { - return oldv +func roundFloat(v constant.Value, sz int64) constant.Value { + switch sz { + case 4: + f, _ := constant.Float32Val(v) + return makeFloat64(float64(f)) + case 8: + f, _ := constant.Float64Val(v) + return makeFloat64(f) } + Fatalf("unexpected size: %v", sz) + panic("unreachable") +} - if overflow(Val{oldv}, t) { +// truncate float literal fv to 32-bit or 64-bit precision +// according to type; return truncated value. +func truncfltlit(v constant.Value, t *types.Type) constant.Value { + if t.IsUntyped() || overflow(v, t) { // If there was overflow, simply continuing would set the // value to Inf which in turn would lead to spurious follow-on // errors. Avoid this by returning the existing value. - return oldv - } - - fv := newMpflt() - - // convert large precision literal floating - // into limited precision (float64 or float32) - switch t.Etype { - case types.TFLOAT32: - fv.SetFloat64(oldv.Float32()) - case types.TFLOAT64: - fv.SetFloat64(oldv.Float64()) - default: - Fatalf("truncfltlit: unexpected Etype %v", t.Etype) + return v } - return fv + return roundFloat(v, t.Size()) } // truncate Real and Imag parts of Mpcplx to 32-bit or 64-bit // precision, according to type; return truncated value. In case of // overflow, calls yyerror but does not truncate the input value. -func trunccmplxlit(oldv *Mpcplx, t *types.Type) *Mpcplx { - if t == nil { - return oldv - } - - if overflow(Val{oldv}, t) { +func trunccmplxlit(v constant.Value, t *types.Type) constant.Value { + if t.IsUntyped() || overflow(v, t) { // If there was overflow, simply continuing would set the // value to Inf which in turn would lead to spurious follow-on // errors. Avoid this by returning the existing value. - return oldv - } - - cv := newMpcmplx() - - switch t.Etype { - case types.TCOMPLEX64: - cv.Real.SetFloat64(oldv.Real.Float32()) - cv.Imag.SetFloat64(oldv.Imag.Float32()) - case types.TCOMPLEX128: - cv.Real.SetFloat64(oldv.Real.Float64()) - cv.Imag.SetFloat64(oldv.Imag.Float64()) - default: - Fatalf("trunccplxlit: unexpected Etype %v", t.Etype) + return v } - return cv + fsz := t.Size() / 2 + return makeComplex(roundFloat(constant.Real(v), fsz), roundFloat(constant.Imag(v), fsz)) } // TODO(mdempsky): Replace these with better APIs. @@ -256,7 +251,7 @@ func convlit1(n *Node, t *types.Type, explicit bool, context func() string) *Nod case OLITERAL: v := convertVal(n.Val(), t, explicit) - if v.U == nil { + if v.Kind() == constant.Unknown { break } n.Type = t @@ -356,7 +351,7 @@ func operandType(op Op, t *types.Type) *types.Type { // // If explicit is true, then conversions from integer to string are // also allowed. -func convertVal(v Val, t *types.Type, explicit bool) Val { +func convertVal(v constant.Value, t *types.Type, explicit bool) constant.Value { switch ct := v.Kind(); ct { case constant.Bool: if t.IsBoolean() { @@ -381,153 +376,131 @@ func convertVal(v Val, t *types.Type, explicit bool) Val { return v case t.IsFloat(): v = toflt(v) - v = Val{truncfltlit(v.U.(*Mpflt), t)} + v = truncfltlit(v, t) return v case t.IsComplex(): v = tocplx(v) - v = Val{trunccmplxlit(v.U.(*Mpcplx), t)} + v = trunccmplxlit(v, t) return v } } - return Val{} + return constant.MakeUnknown() } -func tocplx(v Val) Val { - switch u := v.U.(type) { - case *Mpint: - c := newMpcmplx() - c.Real.SetInt(u) - c.Imag.SetFloat64(0.0) - v.U = c - - case *Mpflt: - c := newMpcmplx() - c.Real.Set(u) - c.Imag.SetFloat64(0.0) - v.U = c - } - - return v +func tocplx(v constant.Value) constant.Value { + return constant.ToComplex(v) } -func toflt(v Val) Val { - switch u := v.U.(type) { - case *Mpint: - f := newMpflt() - f.SetInt(u) - v.U = f - - case *Mpcplx: - f := newMpflt() - f.Set(&u.Real) - if u.Imag.CmpFloat64(0) != 0 { - yyerror("constant %v truncated to real", u.GoString()) +func toflt(v constant.Value) constant.Value { + if v.Kind() == constant.Complex { + if constant.Sign(constant.Imag(v)) != 0 { + yyerror("constant %v truncated to real", v) } - v.U = f + v = constant.Real(v) } - return v + return constant.ToFloat(v) } -func toint(v Val) Val { - switch u := v.U.(type) { - case *Mpint: - - case *Mpflt: - i := new(Mpint) - if !i.SetFloat(u) { - if i.checkOverflow(0) { - yyerror("integer too large") - } else { - // The value of u cannot be represented as an integer; - // so we need to print an error message. - // Unfortunately some float values cannot be - // reasonably formatted for inclusion in an error - // message (example: 1 + 1e-100), so first we try to - // format the float; if the truncation resulted in - // something that looks like an integer we omit the - // value from the error message. - // (See issue #11371). - var t big.Float - t.Parse(u.GoString(), 10) - if t.IsInt() { - yyerror("constant truncated to integer") - } else { - yyerror("constant %v truncated to integer", u.GoString()) - } - } +func toint(v constant.Value) constant.Value { + if v.Kind() == constant.Complex { + if constant.Sign(constant.Imag(v)) != 0 { + yyerror("constant %v truncated to integer", v) } - v.U = i + v = constant.Real(v) + } - case *Mpcplx: - i := new(Mpint) - if !i.SetFloat(&u.Real) || u.Imag.CmpFloat64(0) != 0 { - yyerror("constant %v truncated to integer", u.GoString()) - } + if v := constant.ToInt(v); v.Kind() == constant.Int { + return v + } - v.U = i + // The value of v cannot be represented as an integer; + // so we need to print an error message. + // Unfortunately some float values cannot be + // reasonably formatted for inclusion in an error + // message (example: 1 + 1e-100), so first we try to + // format the float; if the truncation resulted in + // something that looks like an integer we omit the + // value from the error message. + // (See issue #11371). + f := bigFloatVal(v) + if f.MantExp(nil) > 2*Mpprec { + yyerror("integer too large") + } else { + var t big.Float + t.Parse(fmt.Sprint(v), 0) + if t.IsInt() { + yyerror("constant truncated to integer") + } else { + yyerror("constant %v truncated to integer", v) + } } - return v + // Prevent follow-on errors. + // TODO(mdempsky): Use constant.MakeUnknown() instead. + return constant.MakeInt64(1) } -func doesoverflow(v Val, t *types.Type) bool { - switch u := v.U.(type) { - case *Mpint: - if !t.IsInteger() { - Fatalf("overflow: %v integer constant", t) +// doesoverflow reports whether constant value v is too large +// to represent with type t. +func doesoverflow(v constant.Value, t *types.Type) bool { + switch { + case t.IsInteger(): + bits := uint(8 * t.Size()) + if t.IsUnsigned() { + x, ok := constant.Uint64Val(v) + return !ok || x>>bits != 0 } - return u.Cmp(minintval[t.Etype]) < 0 || u.Cmp(maxintval[t.Etype]) > 0 - - case *Mpflt: - if !t.IsFloat() { - Fatalf("overflow: %v floating-point constant", t) + x, ok := constant.Int64Val(v) + if x < 0 { + x = ^x } - return u.Cmp(minfltval[t.Etype]) <= 0 || u.Cmp(maxfltval[t.Etype]) >= 0 - - case *Mpcplx: - if !t.IsComplex() { - Fatalf("overflow: %v complex constant", t) + return !ok || x>>(bits-1) != 0 + case t.IsFloat(): + switch t.Size() { + case 4: + f, _ := constant.Float32Val(v) + return math.IsInf(float64(f), 0) + case 8: + f, _ := constant.Float64Val(v) + return math.IsInf(f, 0) } - return u.Real.Cmp(minfltval[t.Etype]) <= 0 || u.Real.Cmp(maxfltval[t.Etype]) >= 0 || - u.Imag.Cmp(minfltval[t.Etype]) <= 0 || u.Imag.Cmp(maxfltval[t.Etype]) >= 0 + case t.IsComplex(): + ft := floatForComplex(t) + return doesoverflow(constant.Real(v), ft) || doesoverflow(constant.Imag(v), ft) } - - return false + Fatalf("doesoverflow: %v, %v", v, t) + panic("unreachable") } -func overflow(v Val, t *types.Type) bool { +// overflow reports whether constant value v is too large +// to represent with type t, and emits an error message if so. +func overflow(v constant.Value, t *types.Type) bool { // v has already been converted // to appropriate form for t. - if t == nil || t.Etype == TIDEAL { + if t.IsUntyped() { return false } - - // Only uintptrs may be converted to pointers, which cannot overflow. - if t.IsPtr() || t.IsUnsafePtr() { - return false + if v.Kind() == constant.Int && constant.BitLen(v) > Mpprec { + yyerror("integer too large") + return true } - if doesoverflow(v, t) { - yyerror("constant %v overflows %v", v, t) + yyerror("constant %v overflows %v", vconv(v, 0), t) return true } - return false - } -func tostr(v Val) Val { - switch u := v.U.(type) { - case *Mpint: - var r rune = 0xFFFD - if u.Cmp(minintval[TINT32]) >= 0 && u.Cmp(maxintval[TINT32]) <= 0 { - r = rune(u.Int64()) +func tostr(v constant.Value) constant.Value { + if v.Kind() == constant.Int { + r := unicode.ReplacementChar + if x, ok := constant.Uint64Val(v); ok && x <= unicode.MaxRune { + r = rune(x) } - v.U = string(r) + v = constant.MakeString(string(r)) } - return v } @@ -542,6 +515,35 @@ func Isconst(n *Node, ct constant.Kind) bool { return consttype(n) == ct } +var tokenForOp = [...]token.Token{ + OPLUS: token.ADD, + ONEG: token.SUB, + ONOT: token.NOT, + OBITNOT: token.XOR, + + OADD: token.ADD, + OSUB: token.SUB, + OMUL: token.MUL, + ODIV: token.QUO, + OMOD: token.REM, + OOR: token.OR, + OXOR: token.XOR, + OAND: token.AND, + OANDNOT: token.AND_NOT, + OOROR: token.LOR, + OANDAND: token.LAND, + + OEQ: token.EQL, + ONE: token.NEQ, + OLT: token.LSS, + OLE: token.LEQ, + OGT: token.GTR, + OGE: token.GEQ, + + OLSH: token.SHL, + ORSH: token.SHR, +} + // evalConst returns a constant-evaluated expression equivalent to n. // If n is not a constant, evalConst returns n. // Otherwise, evalConst returns a new OLITERAL with the same value as n, @@ -553,22 +555,52 @@ func evalConst(n *Node) *Node { switch op := n.Op; op { case OPLUS, ONEG, OBITNOT, ONOT: if nl.Op == OLITERAL { - return origConst(n, unaryOp(op, nl.Val(), n.Type)) + var prec uint + if n.Type.IsUnsigned() { + prec = uint(n.Type.Size() * 8) + } + return origConst(n, constant.UnaryOp(tokenForOp[op], nl.Val(), prec)) } case OADD, OSUB, OMUL, ODIV, OMOD, OOR, OXOR, OAND, OANDNOT, OOROR, OANDAND: if nl.Op == OLITERAL && nr.Op == OLITERAL { - return origConst(n, binaryOp(nl.Val(), op, nr.Val())) + rval := nr.Val() + + // check for divisor underflow in complex division (see issue 20227) + if op == ODIV && n.Type.IsComplex() && constant.Sign(square(constant.Real(rval))) == 0 && constant.Sign(square(constant.Imag(rval))) == 0 { + yyerror("complex division by zero") + n.Type = nil + return n + } + if (op == ODIV || op == OMOD) && constant.Sign(rval) == 0 { + yyerror("division by zero") + n.Type = nil + return n + } + + tok := tokenForOp[op] + if op == ODIV && n.Type.IsInteger() { + tok = token.QUO_ASSIGN // integer division + } + return origConst(n, constant.BinaryOp(nl.Val(), tok, rval)) } case OEQ, ONE, OLT, OLE, OGT, OGE: if nl.Op == OLITERAL && nr.Op == OLITERAL { - return origBoolConst(n, compareOp(nl.Val(), op, nr.Val())) + return origBoolConst(n, constant.Compare(nl.Val(), tokenForOp[op], nr.Val())) } case OLSH, ORSH: if nl.Op == OLITERAL && nr.Op == OLITERAL { - return origConst(n, shiftOp(nl.Val(), op, nr.Val())) + // shiftBound from go/types; "so we can express smallestFloat64" + const shiftBound = 1023 - 1 + 52 + s, ok := constant.Uint64Val(nr.Val()) + if !ok || s > shiftBound { + yyerror("invalid shift count %v", nr) + n.Type = nil + break + } + return origConst(n, constant.Shift(toint(nl.Val()), tokenForOp[op], uint(s))) } case OCONV, ORUNESTR: @@ -601,7 +633,7 @@ func evalConst(n *Node) *Node { for _, c := range s { strs = append(strs, c.StringVal()) } - return origConst(n, Val{U: strings.Join(strs, "")}) + return origConst(n, constant.MakeString(strings.Join(strs, ""))) } newList := make([]*Node, 0, need) for i := 0; i < len(s); i++ { @@ -614,7 +646,7 @@ func evalConst(n *Node) *Node { i2++ } - nl := origConst(s[i], Val{U: strings.Join(strs, "")}) + nl := origConst(s[i], constant.MakeString(strings.Join(strs, ""))) nl.Orig = nl // it's bigger than just s[i] newList = append(newList, nl) i = i2 - 1 @@ -642,319 +674,84 @@ func evalConst(n *Node) *Node { case OALIGNOF, OOFFSETOF, OSIZEOF: return origIntConst(n, evalunsafe(n)) - case OREAL, OIMAG: + case OREAL: if nl.Op == OLITERAL { - var re, im *Mpflt - switch u := nl.Val().U.(type) { - case *Mpint: - re = newMpflt() - re.SetInt(u) - // im = 0 - case *Mpflt: - re = u - // im = 0 - case *Mpcplx: - re = &u.Real - im = &u.Imag - default: - Fatalf("impossible") - } - if n.Op == OIMAG { - if im == nil { - im = newMpflt() - } - re = im - } - return origConst(n, Val{re}) + return origConst(n, constant.Real(nl.Val())) + } + + case OIMAG: + if nl.Op == OLITERAL { + return origConst(n, constant.Imag(nl.Val())) } case OCOMPLEX: if nl.Op == OLITERAL && nr.Op == OLITERAL { - // make it a complex literal - c := newMpcmplx() - c.Real.Set(toflt(nl.Val()).U.(*Mpflt)) - c.Imag.Set(toflt(nr.Val()).U.(*Mpflt)) - return origConst(n, Val{c}) + return origConst(n, makeComplex(nl.Val(), nr.Val())) } } return n } -func match(x, y Val) (Val, Val) { - switch { - case x.Kind() == constant.Complex || y.Kind() == constant.Complex: - return tocplx(x), tocplx(y) - case x.Kind() == constant.Float || y.Kind() == constant.Float: - return toflt(x), toflt(y) +func makeInt(i *big.Int) constant.Value { + if i.IsInt64() { + return constant.Make(i.Int64()) // workaround #42640 (Int64Val(Make(big.NewInt(10))) returns (10, false), not (10, true)) } - - // Mixed int/rune are fine. - return x, y + return constant.Make(i) } -func compareOp(x Val, op Op, y Val) bool { - x, y = match(x, y) - - switch x.Kind() { - case constant.Bool: - x, y := x.U.(bool), y.U.(bool) - switch op { - case OEQ: - return x == y - case ONE: - return x != y - } - - case constant.Int: - x, y := x.U.(*Mpint), y.U.(*Mpint) - return cmpZero(x.Cmp(y), op) - - case constant.Float: - x, y := x.U.(*Mpflt), y.U.(*Mpflt) - return cmpZero(x.Cmp(y), op) - - case constant.Complex: - x, y := x.U.(*Mpcplx), y.U.(*Mpcplx) - eq := x.Real.Cmp(&y.Real) == 0 && x.Imag.Cmp(&y.Imag) == 0 - switch op { - case OEQ: - return eq - case ONE: - return !eq - } - - case constant.String: - x, y := x.U.(string), y.U.(string) - switch op { - case OEQ: - return x == y - case ONE: - return x != y - case OLT: - return x < y - case OLE: - return x <= y - case OGT: - return x > y - case OGE: - return x >= y - } +func makeFloat64(f float64) constant.Value { + if math.IsInf(f, 0) { + Fatalf("infinity is not a valid constant") } - - Fatalf("compareOp: bad comparison: %v %v %v", x, op, y) - panic("unreachable") + v := constant.MakeFloat64(f) + v = constant.ToFloat(v) // workaround #42641 (MakeFloat64(0).Kind() returns Int, not Float) + return v } -func cmpZero(x int, op Op) bool { - switch op { - case OEQ: - return x == 0 - case ONE: - return x != 0 - case OLT: - return x < 0 - case OLE: - return x <= 0 - case OGT: - return x > 0 - case OGE: - return x >= 0 - } - - Fatalf("cmpZero: want comparison operator, got %v", op) - panic("unreachable") +func makeComplex(real, imag constant.Value) constant.Value { + return constant.BinaryOp(constant.ToFloat(real), token.ADD, constant.MakeImag(constant.ToFloat(imag))) } -func binaryOp(x Val, op Op, y Val) Val { - x, y = match(x, y) - -Outer: - switch x.Kind() { - case constant.Bool: - x, y := x.U.(bool), y.U.(bool) - switch op { - case OANDAND: - return Val{U: x && y} - case OOROR: - return Val{U: x || y} - } - - case constant.Int: - x, y := x.U.(*Mpint), y.U.(*Mpint) - - u := new(Mpint) - u.Set(x) - switch op { - case OADD: - u.Add(y) - case OSUB: - u.Sub(y) - case OMUL: - u.Mul(y) - case ODIV: - if y.CmpInt64(0) == 0 { - yyerror("division by zero") - return Val{} - } - u.Quo(y) - case OMOD: - if y.CmpInt64(0) == 0 { - yyerror("division by zero") - return Val{} - } - u.Rem(y) - case OOR: - u.Or(y) - case OAND: - u.And(y) - case OANDNOT: - u.AndNot(y) - case OXOR: - u.Xor(y) - default: - break Outer - } - return Val{U: u} - - case constant.Float: - x, y := x.U.(*Mpflt), y.U.(*Mpflt) - - u := newMpflt() - u.Set(x) - switch op { - case OADD: - u.Add(y) - case OSUB: - u.Sub(y) - case OMUL: - u.Mul(y) - case ODIV: - if y.CmpFloat64(0) == 0 { - yyerror("division by zero") - return Val{} - } - u.Quo(y) - default: - break Outer - } - return Val{U: u} - - case constant.Complex: - x, y := x.U.(*Mpcplx), y.U.(*Mpcplx) - - u := newMpcmplx() - u.Real.Set(&x.Real) - u.Imag.Set(&x.Imag) - switch op { - case OADD: - u.Real.Add(&y.Real) - u.Imag.Add(&y.Imag) - case OSUB: - u.Real.Sub(&y.Real) - u.Imag.Sub(&y.Imag) - case OMUL: - u.Mul(y) - case ODIV: - if !u.Div(y) { - yyerror("complex division by zero") - return Val{} - } - default: - break Outer - } - return Val{U: u} - } +func square(x constant.Value) constant.Value { + return constant.BinaryOp(x, token.MUL, x) +} - Fatalf("binaryOp: bad operation: %v %v %v", x, op, y) - panic("unreachable") +// For matching historical "constant OP overflow" error messages. +var overflowNames = [...]string{ + OADD: "addition", + OSUB: "subtraction", + OMUL: "multiplication", + OLSH: "shift", } -func unaryOp(op Op, x Val, t *types.Type) Val { - switch op { - case OPLUS: - switch x.Kind() { - case constant.Int, constant.Float, constant.Complex: - return x - } +// origConst returns an OLITERAL with orig n and value v. +func origConst(n *Node, v constant.Value) *Node { + lno := setlineno(n) + v = convertVal(v, n.Type, false) + lineno = lno - case ONEG: - switch x.Kind() { - case constant.Int: - x := x.U.(*Mpint) - u := new(Mpint) - u.Set(x) - u.Neg() - return Val{U: u} - - case constant.Float: - x := x.U.(*Mpflt) - u := newMpflt() - u.Set(x) - u.Neg() - return Val{U: u} - - case constant.Complex: - x := x.U.(*Mpcplx) - u := newMpcmplx() - u.Real.Set(&x.Real) - u.Imag.Set(&x.Imag) - u.Real.Neg() - u.Imag.Neg() - return Val{U: u} + switch v.Kind() { + case constant.Unknown: + // If constant folding was attempted (we were called) + // but it produced an invalid constant value, + // mark n as broken and give up. + if Errors() == 0 { + Fatalf("should have reported an error") } + n.Type = nil + return n - case OBITNOT: - switch x.Kind() { - case constant.Int: - x := x.U.(*Mpint) - - u := new(Mpint) - if t.IsSigned() || t.IsUntyped() { - // Signed values change sign. - u.SetInt64(-1) - } else { - // Unsigned values invert their bits. - u.Set(maxintval[t.Etype]) + case constant.Int: + if constant.BitLen(v) > Mpprec { + what := overflowNames[n.Op] + if what == "" { + Fatalf("unexpected overflow: %v", n.Op) } - u.Xor(x) - return Val{U: u} + yyerror("constant %v overflow", what) + n.Type = nil + return n } - - case ONOT: - return Val{U: !x.U.(bool)} - } - - Fatalf("unaryOp: bad operation: %v %v", op, x) - panic("unreachable") -} - -func shiftOp(x Val, op Op, y Val) Val { - x = toint(x) - y = toint(y) - - u := new(Mpint) - u.Set(x.U.(*Mpint)) - switch op { - case OLSH: - u.Lsh(y.U.(*Mpint)) - case ORSH: - u.Rsh(y.U.(*Mpint)) - default: - Fatalf("shiftOp: bad operator: %v", op) - panic("unreachable") - } - return Val{U: u} -} - -// origConst returns an OLITERAL with orig n and value v. -func origConst(n *Node, v Val) *Node { - // If constant folding was attempted (we were called) - // but it produced an invalid constant value, - // mark n as broken and give up. - if v.U == nil { - n.Type = nil - return n } orig := n @@ -963,53 +760,45 @@ func origConst(n *Node, v Val) *Node { n.Pos = orig.Pos n.Type = orig.Type n.SetVal(v) - - // Check range. - lno := setlineno(n) - overflow(v, n.Type) - lineno = lno - - if !n.Type.IsUntyped() { - switch v.Kind() { - // Truncate precision for non-ideal float. - case constant.Float: - n.SetVal(Val{truncfltlit(v.U.(*Mpflt), n.Type)}) - // Truncate precision for non-ideal complex. - case constant.Complex: - n.SetVal(Val{trunccmplxlit(v.U.(*Mpcplx), n.Type)}) - } - } return n } -func assertRepresents(t *types.Type, v Val) { +func assertRepresents(t *types.Type, v constant.Value) { if !represents(t, v) { Fatalf("%v does not represent %v", t, v) } } -func represents(t *types.Type, v Val) bool { - if !t.IsUntyped() { - // TODO(mdempsky): Stricter handling of typed types. - return true +func represents(t *types.Type, v constant.Value) bool { + switch v.Kind() { + case constant.Unknown: + return okforconst[t.Etype] + case constant.Bool: + return t.IsBoolean() + case constant.String: + return t.IsString() + case constant.Int: + return t.IsInteger() + case constant.Float: + return t.IsFloat() + case constant.Complex: + return t.IsComplex() } - vt := idealType(v.Kind()) - return t == vt || (t == types.UntypedRune && vt == types.UntypedInt) + Fatalf("unexpected constant kind: %v", v) + panic("unreachable") } func origBoolConst(n *Node, v bool) *Node { - return origConst(n, Val{U: v}) + return origConst(n, constant.MakeBool(v)) } func origIntConst(n *Node, v int64) *Node { - u := new(Mpint) - u.SetInt64(v) - return origConst(n, Val{u}) + return origConst(n, constant.MakeInt64(v)) } // nodlit returns a new untyped constant with value v. -func nodlit(v Val) *Node { +func nodlit(v constant.Value) *Node { n := nod(OLITERAL, nil, nil) n.Type = idealType(v.Kind()) n.SetVal(v) @@ -1125,25 +914,10 @@ func defaultType(t *types.Type) *types.Type { } func smallintconst(n *Node) bool { - if n.Op == OLITERAL && Isconst(n, constant.Int) && n.Type != nil { - switch simtype[n.Type.Etype] { - case TINT8, - TUINT8, - TINT16, - TUINT16, - TINT32, - TUINT32, - TBOOL: - return true - - case TIDEAL, TINT64, TUINT64, TPTR: - v, ok := n.Val().U.(*Mpint) - if ok && v.Cmp(minintval[TINT32]) >= 0 && v.Cmp(maxintval[TINT32]) <= 0 { - return true - } - } + if n.Op == OLITERAL { + v, ok := constant.Int64Val(n.Val()) + return ok && int64(int32(v)) == v } - return false } @@ -1156,17 +930,18 @@ func indexconst(n *Node) int64 { if n.Op != OLITERAL { return -1 } + if !n.Type.IsInteger() && n.Type.Etype != TIDEAL { + return -1 + } - v := toint(n.Val()) // toint returns argument unchanged if not representable as an *Mpint - vi, ok := v.U.(*Mpint) - if !ok || vi.CmpInt64(0) < 0 { + v := toint(n.Val()) + if v.Kind() != constant.Int || constant.Sign(v) < 0 { return -1 } - if vi.Cmp(maxintval[TINT]) > 0 { + if doesoverflow(v, types.Types[TINT]) { return -2 } - - return vi.Int64() + return int64Val(types.Types[TINT], v) } // isGoConst reports whether n is a Go language constant (as opposed to a @@ -1276,7 +1051,7 @@ func (s *constSet) add(pos src.XPos, n *Node, what, where string) { case types.Runetype: typ = types.Types[TINT32] } - k := constSetKey{typ, n.Val().Interface()} + k := constSetKey{typ, n.ValueInterface()} if hasUniquePos(n) { pos = n.Pos @@ -1301,7 +1076,7 @@ func (s *constSet) add(pos src.XPos, n *Node, what, where string) { // TODO(mdempsky): This could probably be a fmt.go flag. func nodeAndVal(n *Node) string { show := n.String() - val := n.Val().Interface() + val := n.ValueInterface() if s := fmt.Sprintf("%#v", val); show != s { show += " (value " + s + ")" } diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 59888cce7e9f4..431142117428a 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -553,7 +553,7 @@ func structfield(n *Node) *types.Field { f.Embedded = 1 } if n.HasVal() { - f.Note = n.Val().U.(string) + f.Note = constant.StringVal(n.Val()) } lineno = lno @@ -638,7 +638,7 @@ func interfacefield(n *Node) *types.Field { Fatalf("interfacefield: oops %v\n", n) } - if n.Val().Kind() != constant.Unknown { + if n.HasVal() { yyerror("interface method cannot have annotation") } diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index 15251062b4b81..9ee3b080b89e7 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -143,7 +143,7 @@ func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op Op, ctxt Class, t // importconst declares symbol s as an imported constant with type t and value val. // ipkg is the package being imported -func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val Val) { +func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val constant.Value) { n := importobj(ipkg, pos, s, OLITERAL, PEXTERN, t) if n == nil { // TODO: Check that value matches. return diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go index addb010e5c71e..f9888aec41b4e 100644 --- a/src/cmd/compile/internal/gc/fmt.go +++ b/src/cmd/compile/internal/gc/fmt.go @@ -9,6 +9,7 @@ import ( "cmd/compile/internal/types" "cmd/internal/src" "fmt" + "go/constant" "io" "strconv" "strings" @@ -334,7 +335,7 @@ func (m fmtMode) prepareArgs(args []interface{}) { args[i] = (*fmtSymErr)(arg) case Nodes: args[i] = fmtNodesErr(arg) - case Val, int32, int64, string, types.EType: + case int32, int64, string, types.EType, constant.Value: // OK: printing these types doesn't depend on mode default: Fatalf("mode.prepareArgs type %T", arg) @@ -353,7 +354,7 @@ func (m fmtMode) prepareArgs(args []interface{}) { args[i] = (*fmtSymDbg)(arg) case Nodes: args[i] = fmtNodesDbg(arg) - case Val, int32, int64, string, types.EType: + case int32, int64, string, types.EType, constant.Value: // OK: printing these types doesn't depend on mode default: Fatalf("mode.prepareArgs type %T", arg) @@ -372,7 +373,7 @@ func (m fmtMode) prepareArgs(args []interface{}) { args[i] = (*fmtSymTypeId)(arg) case Nodes: args[i] = fmtNodesTypeId(arg) - case Val, int32, int64, string, types.EType: + case int32, int64, string, types.EType, constant.Value: // OK: printing these types doesn't depend on mode default: Fatalf("mode.prepareArgs type %T", arg) @@ -391,7 +392,7 @@ func (m fmtMode) prepareArgs(args []interface{}) { args[i] = (*fmtSymTypeIdName)(arg) case Nodes: args[i] = fmtNodesTypeIdName(arg) - case Val, int32, int64, string, types.EType: + case int32, int64, string, types.EType, constant.Value: // OK: printing these types doesn't depend on mode default: Fatalf("mode.prepareArgs type %T", arg) @@ -513,51 +514,37 @@ func (n *Node) jconv(s fmt.State, flag FmtFlag) { } } -func (v Val) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - v.vconv(s, fmtFlag(s, verb)) - - default: - fmt.Fprintf(s, "%%!%c(Val=%T)", verb, v) - } -} +func vconv(v constant.Value, flag FmtFlag) string { + if flag&FmtSharp == 0 && v.Kind() == constant.Complex { + real, imag := constant.Real(v), constant.Imag(v) -func (v Val) vconv(s fmt.State, flag FmtFlag) { - switch u := v.U.(type) { - case *Mpint: - if flag&FmtSharp != 0 { - fmt.Fprint(s, u.String()) - return + var re string + sre := constant.Sign(real) + if sre != 0 { + re = real.String() } - fmt.Fprint(s, u.GoString()) - return - case *Mpflt: - if flag&FmtSharp != 0 { - fmt.Fprint(s, u.String()) - return + var im string + sim := constant.Sign(imag) + if sim != 0 { + im = imag.String() } - fmt.Fprint(s, u.GoString()) - return - case *Mpcplx: - if flag&FmtSharp != 0 { - fmt.Fprint(s, u.String()) - return + switch { + case sre == 0 && sim == 0: + return "0" + case sre == 0: + return im + "i" + case sim == 0: + return re + case sim < 0: + return fmt.Sprintf("(%s%si)", re, im) + default: + return fmt.Sprintf("(%s+%si)", re, im) } - fmt.Fprint(s, u.GoString()) - return - - case string: - fmt.Fprint(s, strconv.Quote(u)) - - case bool: - fmt.Fprint(s, u) - - default: - fmt.Fprintf(s, "", v.Kind()) } + + return v.String() } /* @@ -1333,8 +1320,12 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) { } if n.Type == types.UntypedRune { - u := n.Val().U.(*Mpint) - switch x := u.Int64(); { + switch x, ok := constant.Int64Val(n.Val()); { + case !ok: + fallthrough + default: + fmt.Fprintf(s, "('\\x00' + %v)", n.Val()) + case ' ' <= x && x < utf8.RuneSelf && x != '\\' && x != '\'': fmt.Fprintf(s, "'%c'", int(x)) @@ -1343,12 +1334,9 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) { case 0 <= x && x <= utf8.MaxRune: fmt.Fprintf(s, "'\\U%08x'", uint64(x)) - - default: - fmt.Fprintf(s, "('\\x00' + %v)", u) } } else { - mode.Fprintf(s, "%v", n.Val()) + fmt.Fprint(s, vconv(n.Val(), fmtFlag(s, 'v'))) } if needUnparen { diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index c53fde7e242ba..1242fc06cb3b6 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -178,14 +178,6 @@ var ( iscmp [OEND]bool ) -var minintval [NTYPE]*Mpint - -var maxintval [NTYPE]*Mpint - -var minfltval [NTYPE]*Mpflt - -var maxfltval [NTYPE]*Mpflt - var xtop []*Node var exportlist []*Node diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index 842025705b9c2..447f938a0ac35 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -777,7 +777,7 @@ func constTypeOf(typ *types.Type) constant.Kind { return 0 } -func (w *exportWriter) value(typ *types.Type, v Val) { +func (w *exportWriter) value(typ *types.Type, v constant.Value) { assertRepresents(typ, v) w.typ(typ) @@ -788,17 +788,16 @@ func (w *exportWriter) value(typ *types.Type, v Val) { switch constTypeOf(typ) { case constant.Bool: - w.bool(v.U.(bool)) + w.bool(constant.BoolVal(v)) case constant.String: - w.string(v.U.(string)) + w.string(constant.StringVal(v)) case constant.Int: - w.mpint(&v.U.(*Mpint).Val, typ) + w.mpint(v, typ) case constant.Float: - w.mpfloat(&v.U.(*Mpflt).Val, typ) + w.mpfloat(v, typ) case constant.Complex: - x := v.U.(*Mpcplx) - w.mpfloat(&x.Real.Val, typ) - w.mpfloat(&x.Imag.Val, typ) + w.mpfloat(constant.Real(v), typ) + w.mpfloat(constant.Imag(v), typ) } } @@ -847,15 +846,19 @@ func intSize(typ *types.Type) (signed bool, maxBytes uint) { // single byte. // // TODO(mdempsky): Is this level of complexity really worthwhile? -func (w *exportWriter) mpint(x *big.Int, typ *types.Type) { +func (w *exportWriter) mpint(x constant.Value, typ *types.Type) { signed, maxBytes := intSize(typ) - negative := x.Sign() < 0 + negative := constant.Sign(x) < 0 if !signed && negative { Fatalf("negative unsigned integer; type %v, value %v", typ, x) } - b := x.Bytes() + b := constant.Bytes(x) // little endian + for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 { + b[i], b[j] = b[j], b[i] + } + if len(b) > 0 && b[0] == 0 { Fatalf("leading zeros") } @@ -910,7 +913,8 @@ func (w *exportWriter) mpint(x *big.Int, typ *types.Type) { // mantissa is an integer. The value is written out as mantissa (as a // multi-precision integer) and then the exponent, except exponent is // omitted if mantissa is zero. -func (w *exportWriter) mpfloat(f *big.Float, typ *types.Type) { +func (w *exportWriter) mpfloat(v constant.Value, typ *types.Type) { + f := bigFloatVal(v) if f.IsInf() { Fatalf("infinite constant") } @@ -928,7 +932,7 @@ func (w *exportWriter) mpfloat(f *big.Float, typ *types.Type) { if acc != big.Exact { Fatalf("mantissa scaling failed for %f (%s)", f, acc) } - w.mpint(manti, typ) + w.mpint(makeInt(manti), typ) if manti.Sign() != 0 { w.int64(exp) } diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index a3a01e59cd594..3f50a940619b4 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -356,27 +356,24 @@ func (r *importReader) doDecl(n *Node) { } } -func (p *importReader) value(typ *types.Type) (v Val) { +func (p *importReader) value(typ *types.Type) constant.Value { switch constTypeOf(typ) { case constant.Bool: - v.U = p.bool() + return constant.MakeBool(p.bool()) case constant.String: - v.U = p.string() + return constant.MakeString(p.string()) case constant.Int: - x := new(Mpint) - p.mpint(&x.Val, typ) - v.U = x + var i big.Int + p.mpint(&i, typ) + return makeInt(&i) case constant.Float: - x := newMpflt() - p.float(x, typ) - v.U = x + return p.float(typ) case constant.Complex: - x := newMpcmplx() - p.float(&x.Real, typ) - p.float(&x.Imag, typ) - v.U = x + return makeComplex(p.float(typ), p.float(typ)) } - return + + Fatalf("unexpected value type: %v", typ) + panic("unreachable") } func (p *importReader) mpint(x *big.Int, typ *types.Type) { @@ -418,14 +415,15 @@ func (p *importReader) mpint(x *big.Int, typ *types.Type) { } } -func (p *importReader) float(x *Mpflt, typ *types.Type) { +func (p *importReader) float(typ *types.Type) constant.Value { var mant big.Int p.mpint(&mant, typ) - m := x.Val.SetInt(&mant) - if m.Sign() == 0 { - return + var f big.Float + f.SetInt(&mant) + if f.Sign() != 0 { + f.SetMantExp(&f, int(p.int64())) } - m.SetMantExp(m, int(p.int64())) + return constant.Make(&f) } func (r *importReader) ident() *types.Sym { diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index cf4ec039f10a6..fca1334a194b7 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -21,6 +21,7 @@ import ( "cmd/internal/sys" "flag" "fmt" + "go/constant" "internal/goversion" "io" "io/ioutil" @@ -1135,13 +1136,13 @@ func loadsys() { // imported so far. var myheight int -func importfile(f *Val) *types.Pkg { - path_, ok := f.U.(string) - if !ok { +func importfile(f constant.Value) *types.Pkg { + if f.Kind() != constant.String { yyerror("import path must be a string") return nil } + path_ := constant.StringVal(f) if len(path_) == 0 { yyerror("import path is empty") return nil diff --git a/src/cmd/compile/internal/gc/mpfloat.go b/src/cmd/compile/internal/gc/mpfloat.go deleted file mode 100644 index 9962f4b41320b..0000000000000 --- a/src/cmd/compile/internal/gc/mpfloat.go +++ /dev/null @@ -1,357 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gc - -import ( - "fmt" - "math" - "math/big" -) - -// implements float arithmetic - -const ( - // Maximum size in bits for Mpints before signalling - // overflow and also mantissa precision for Mpflts. - Mpprec = 512 - // Turn on for constant arithmetic debugging output. - Mpdebug = false -) - -// Mpflt represents a floating-point constant. -type Mpflt struct { - Val big.Float -} - -// Mpcplx represents a complex constant. -type Mpcplx struct { - Real Mpflt - Imag Mpflt -} - -// Use newMpflt (not new(Mpflt)!) to get the correct default precision. -func newMpflt() *Mpflt { - var a Mpflt - a.Val.SetPrec(Mpprec) - return &a -} - -// Use newMpcmplx (not new(Mpcplx)!) to get the correct default precision. -func newMpcmplx() *Mpcplx { - var a Mpcplx - a.Real = *newMpflt() - a.Imag = *newMpflt() - return &a -} - -func (a *Mpflt) SetInt(b *Mpint) { - if b.checkOverflow(0) { - // sign doesn't really matter but copy anyway - a.Val.SetInf(b.Val.Sign() < 0) - return - } - a.Val.SetInt(&b.Val) -} - -func (a *Mpflt) Set(b *Mpflt) { - a.Val.Set(&b.Val) -} - -func (a *Mpflt) Add(b *Mpflt) { - if Mpdebug { - fmt.Printf("\n%v + %v", a, b) - } - - a.Val.Add(&a.Val, &b.Val) - - if Mpdebug { - fmt.Printf(" = %v\n\n", a) - } -} - -func (a *Mpflt) AddFloat64(c float64) { - var b Mpflt - - b.SetFloat64(c) - a.Add(&b) -} - -func (a *Mpflt) Sub(b *Mpflt) { - if Mpdebug { - fmt.Printf("\n%v - %v", a, b) - } - - a.Val.Sub(&a.Val, &b.Val) - - if Mpdebug { - fmt.Printf(" = %v\n\n", a) - } -} - -func (a *Mpflt) Mul(b *Mpflt) { - if Mpdebug { - fmt.Printf("%v\n * %v\n", a, b) - } - - a.Val.Mul(&a.Val, &b.Val) - - if Mpdebug { - fmt.Printf(" = %v\n\n", a) - } -} - -func (a *Mpflt) MulFloat64(c float64) { - var b Mpflt - - b.SetFloat64(c) - a.Mul(&b) -} - -func (a *Mpflt) Quo(b *Mpflt) { - if Mpdebug { - fmt.Printf("%v\n / %v\n", a, b) - } - - a.Val.Quo(&a.Val, &b.Val) - - if Mpdebug { - fmt.Printf(" = %v\n\n", a) - } -} - -func (a *Mpflt) Cmp(b *Mpflt) int { - return a.Val.Cmp(&b.Val) -} - -func (a *Mpflt) CmpFloat64(c float64) int { - if c == 0 { - return a.Val.Sign() // common case shortcut - } - return a.Val.Cmp(big.NewFloat(c)) -} - -func (a *Mpflt) Float64() float64 { - x, _ := a.Val.Float64() - - // check for overflow - if math.IsInf(x, 0) && Errors() == 0 { - Fatalf("ovf in Mpflt Float64") - } - - return x + 0 // avoid -0 (should not be needed, but be conservative) -} - -func (a *Mpflt) Float32() float64 { - x32, _ := a.Val.Float32() - x := float64(x32) - - // check for overflow - if math.IsInf(x, 0) && Errors() == 0 { - Fatalf("ovf in Mpflt Float32") - } - - return x + 0 // avoid -0 (should not be needed, but be conservative) -} - -func (a *Mpflt) SetFloat64(c float64) { - if Mpdebug { - fmt.Printf("\nconst %g", c) - } - - // convert -0 to 0 - if c == 0 { - c = 0 - } - a.Val.SetFloat64(c) - - if Mpdebug { - fmt.Printf(" = %v\n", a) - } -} - -func (a *Mpflt) Neg() { - // avoid -0 - if a.Val.Sign() != 0 { - a.Val.Neg(&a.Val) - } -} - -func (a *Mpflt) SetString(as string) { - f, _, err := a.Val.Parse(as, 0) - if err != nil { - yyerror("malformed constant: %s (%v)", as, err) - a.Val.SetFloat64(0) - return - } - - if f.IsInf() { - yyerror("constant too large: %s", as) - a.Val.SetFloat64(0) - return - } - - // -0 becomes 0 - if f.Sign() == 0 && f.Signbit() { - a.Val.SetFloat64(0) - } -} - -func (f *Mpflt) String() string { - return f.Val.Text('b', 0) -} - -func (fvp *Mpflt) GoString() string { - // determine sign - sign := "" - f := &fvp.Val - if f.Sign() < 0 { - sign = "-" - f = new(big.Float).Abs(f) - } - - // Don't try to convert infinities (will not terminate). - if f.IsInf() { - return sign + "Inf" - } - - // Use exact fmt formatting if in float64 range (common case): - // proceed if f doesn't underflow to 0 or overflow to inf. - if x, _ := f.Float64(); f.Sign() == 0 == (x == 0) && !math.IsInf(x, 0) { - return fmt.Sprintf("%s%.6g", sign, x) - } - - // Out of float64 range. Do approximate manual to decimal - // conversion to avoid precise but possibly slow Float - // formatting. - // f = mant * 2**exp - var mant big.Float - exp := f.MantExp(&mant) // 0.5 <= mant < 1.0 - - // approximate float64 mantissa m and decimal exponent d - // f ~ m * 10**d - m, _ := mant.Float64() // 0.5 <= m < 1.0 - d := float64(exp) * (math.Ln2 / math.Ln10) // log_10(2) - - // adjust m for truncated (integer) decimal exponent e - e := int64(d) - m *= math.Pow(10, d-float64(e)) - - // ensure 1 <= m < 10 - switch { - case m < 1-0.5e-6: - // The %.6g format below rounds m to 5 digits after the - // decimal point. Make sure that m*10 < 10 even after - // rounding up: m*10 + 0.5e-5 < 10 => m < 1 - 0.5e6. - m *= 10 - e-- - case m >= 10: - m /= 10 - e++ - } - - return fmt.Sprintf("%s%.6ge%+d", sign, m, e) -} - -// complex multiply v *= rv -// (a, b) * (c, d) = (a*c - b*d, b*c + a*d) -func (v *Mpcplx) Mul(rv *Mpcplx) { - var ac, ad, bc, bd Mpflt - - ac.Set(&v.Real) - ac.Mul(&rv.Real) // ac - - bd.Set(&v.Imag) - bd.Mul(&rv.Imag) // bd - - bc.Set(&v.Imag) - bc.Mul(&rv.Real) // bc - - ad.Set(&v.Real) - ad.Mul(&rv.Imag) // ad - - v.Real.Set(&ac) - v.Real.Sub(&bd) // ac-bd - - v.Imag.Set(&bc) - v.Imag.Add(&ad) // bc+ad -} - -// complex divide v /= rv -// (a, b) / (c, d) = ((a*c + b*d), (b*c - a*d))/(c*c + d*d) -func (v *Mpcplx) Div(rv *Mpcplx) bool { - if rv.Real.CmpFloat64(0) == 0 && rv.Imag.CmpFloat64(0) == 0 { - return false - } - - var ac, ad, bc, bd, cc_plus_dd Mpflt - - cc_plus_dd.Set(&rv.Real) - cc_plus_dd.Mul(&rv.Real) // cc - - ac.Set(&rv.Imag) - ac.Mul(&rv.Imag) // dd - cc_plus_dd.Add(&ac) // cc+dd - - // We already checked that c and d are not both zero, but we can't - // assume that c²+d² != 0 follows, because for tiny values of c - // and/or d c²+d² can underflow to zero. Check that c²+d² is - // nonzero, return if it's not. - if cc_plus_dd.CmpFloat64(0) == 0 { - return false - } - - ac.Set(&v.Real) - ac.Mul(&rv.Real) // ac - - bd.Set(&v.Imag) - bd.Mul(&rv.Imag) // bd - - bc.Set(&v.Imag) - bc.Mul(&rv.Real) // bc - - ad.Set(&v.Real) - ad.Mul(&rv.Imag) // ad - - v.Real.Set(&ac) - v.Real.Add(&bd) // ac+bd - v.Real.Quo(&cc_plus_dd) // (ac+bd)/(cc+dd) - - v.Imag.Set(&bc) - v.Imag.Sub(&ad) // bc-ad - v.Imag.Quo(&cc_plus_dd) // (bc+ad)/(cc+dd) - - return true -} - -func (v *Mpcplx) String() string { - return fmt.Sprintf("(%s+%si)", v.Real.String(), v.Imag.String()) -} - -func (v *Mpcplx) GoString() string { - var re string - sre := v.Real.CmpFloat64(0) - if sre != 0 { - re = v.Real.GoString() - } - - var im string - sim := v.Imag.CmpFloat64(0) - if sim != 0 { - im = v.Imag.GoString() - } - - switch { - case sre == 0 && sim == 0: - return "0" - case sre == 0: - return im + "i" - case sim == 0: - return re - case sim < 0: - return fmt.Sprintf("(%s%si)", re, im) - default: - return fmt.Sprintf("(%s+%si)", re, im) - } -} diff --git a/src/cmd/compile/internal/gc/mpint.go b/src/cmd/compile/internal/gc/mpint.go deleted file mode 100644 index 199b2659d1588..0000000000000 --- a/src/cmd/compile/internal/gc/mpint.go +++ /dev/null @@ -1,303 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gc - -import ( - "fmt" - "math/big" -) - -// implements integer arithmetic - -// Mpint represents an integer constant. -type Mpint struct { - Val big.Int - Ovf bool // set if Val overflowed compiler limit (sticky) -} - -func (a *Mpint) SetOverflow() { - a.Val.SetUint64(1) // avoid spurious div-zero errors - a.Ovf = true -} - -func (a *Mpint) checkOverflow(extra int) bool { - // We don't need to be precise here, any reasonable upper limit would do. - // For now, use existing limit so we pass all the tests unchanged. - if a.Val.BitLen()+extra > Mpprec { - a.SetOverflow() - } - return a.Ovf -} - -func (a *Mpint) Set(b *Mpint) { - a.Val.Set(&b.Val) -} - -func (a *Mpint) SetFloat(b *Mpflt) bool { - // avoid converting huge floating-point numbers to integers - // (2*Mpprec is large enough to permit all tests to pass) - if b.Val.MantExp(nil) > 2*Mpprec { - a.SetOverflow() - return false - } - - if _, acc := b.Val.Int(&a.Val); acc == big.Exact { - return true - } - - const delta = 16 // a reasonably small number of bits > 0 - var t big.Float - t.SetPrec(Mpprec - delta) - - // try rounding down a little - t.SetMode(big.ToZero) - t.Set(&b.Val) - if _, acc := t.Int(&a.Val); acc == big.Exact { - return true - } - - // try rounding up a little - t.SetMode(big.AwayFromZero) - t.Set(&b.Val) - if _, acc := t.Int(&a.Val); acc == big.Exact { - return true - } - - a.Ovf = false - return false -} - -func (a *Mpint) Add(b *Mpint) { - if a.Ovf || b.Ovf { - if Errors() == 0 { - Fatalf("ovf in Mpint Add") - } - a.SetOverflow() - return - } - - a.Val.Add(&a.Val, &b.Val) - - if a.checkOverflow(0) { - yyerror("constant addition overflow") - } -} - -func (a *Mpint) Sub(b *Mpint) { - if a.Ovf || b.Ovf { - if Errors() == 0 { - Fatalf("ovf in Mpint Sub") - } - a.SetOverflow() - return - } - - a.Val.Sub(&a.Val, &b.Val) - - if a.checkOverflow(0) { - yyerror("constant subtraction overflow") - } -} - -func (a *Mpint) Mul(b *Mpint) { - if a.Ovf || b.Ovf { - if Errors() == 0 { - Fatalf("ovf in Mpint Mul") - } - a.SetOverflow() - return - } - - a.Val.Mul(&a.Val, &b.Val) - - if a.checkOverflow(0) { - yyerror("constant multiplication overflow") - } -} - -func (a *Mpint) Quo(b *Mpint) { - if a.Ovf || b.Ovf { - if Errors() == 0 { - Fatalf("ovf in Mpint Quo") - } - a.SetOverflow() - return - } - - a.Val.Quo(&a.Val, &b.Val) - - if a.checkOverflow(0) { - // can only happen for div-0 which should be checked elsewhere - yyerror("constant division overflow") - } -} - -func (a *Mpint) Rem(b *Mpint) { - if a.Ovf || b.Ovf { - if Errors() == 0 { - Fatalf("ovf in Mpint Rem") - } - a.SetOverflow() - return - } - - a.Val.Rem(&a.Val, &b.Val) - - if a.checkOverflow(0) { - // should never happen - yyerror("constant modulo overflow") - } -} - -func (a *Mpint) Or(b *Mpint) { - if a.Ovf || b.Ovf { - if Errors() == 0 { - Fatalf("ovf in Mpint Or") - } - a.SetOverflow() - return - } - - a.Val.Or(&a.Val, &b.Val) -} - -func (a *Mpint) And(b *Mpint) { - if a.Ovf || b.Ovf { - if Errors() == 0 { - Fatalf("ovf in Mpint And") - } - a.SetOverflow() - return - } - - a.Val.And(&a.Val, &b.Val) -} - -func (a *Mpint) AndNot(b *Mpint) { - if a.Ovf || b.Ovf { - if Errors() == 0 { - Fatalf("ovf in Mpint AndNot") - } - a.SetOverflow() - return - } - - a.Val.AndNot(&a.Val, &b.Val) -} - -func (a *Mpint) Xor(b *Mpint) { - if a.Ovf || b.Ovf { - if Errors() == 0 { - Fatalf("ovf in Mpint Xor") - } - a.SetOverflow() - return - } - - a.Val.Xor(&a.Val, &b.Val) -} - -func (a *Mpint) Lsh(b *Mpint) { - if a.Ovf || b.Ovf { - if Errors() == 0 { - Fatalf("ovf in Mpint Lsh") - } - a.SetOverflow() - return - } - - s := b.Int64() - if s < 0 || s >= Mpprec { - msg := "shift count too large" - if s < 0 { - msg = "invalid negative shift count" - } - yyerror("%s: %d", msg, s) - a.SetInt64(0) - return - } - - if a.checkOverflow(int(s)) { - yyerror("constant shift overflow") - return - } - a.Val.Lsh(&a.Val, uint(s)) -} - -func (a *Mpint) Rsh(b *Mpint) { - if a.Ovf || b.Ovf { - if Errors() == 0 { - Fatalf("ovf in Mpint Rsh") - } - a.SetOverflow() - return - } - - s := b.Int64() - if s < 0 { - yyerror("invalid negative shift count: %d", s) - if a.Val.Sign() < 0 { - a.SetInt64(-1) - } else { - a.SetInt64(0) - } - return - } - - a.Val.Rsh(&a.Val, uint(s)) -} - -func (a *Mpint) Cmp(b *Mpint) int { - return a.Val.Cmp(&b.Val) -} - -func (a *Mpint) CmpInt64(c int64) int { - if c == 0 { - return a.Val.Sign() // common case shortcut - } - return a.Val.Cmp(big.NewInt(c)) -} - -func (a *Mpint) Neg() { - a.Val.Neg(&a.Val) -} - -func (a *Mpint) Int64() int64 { - if a.Ovf { - if Errors() == 0 { - Fatalf("constant overflow") - } - return 0 - } - - return a.Val.Int64() -} - -func (a *Mpint) SetInt64(c int64) { - a.Val.SetInt64(c) -} - -func (a *Mpint) SetString(as string) { - _, ok := a.Val.SetString(as, 0) - if !ok { - // The lexer checks for correct syntax of the literal - // and reports detailed errors. Thus SetString should - // never fail (in theory it might run out of memory, - // but that wouldn't be reported as an error here). - Fatalf("malformed integer constant: %s", as) - return - } - if a.checkOverflow(0) { - yyerror("constant too large: %s", as) - } -} - -func (a *Mpint) GoString() string { - return a.Val.String() -} - -func (a *Mpint) String() string { - return fmt.Sprintf("%#x", &a.Val) -} diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index f8c84a75bf365..47b1958f18baf 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -7,6 +7,7 @@ package gc import ( "fmt" "go/constant" + "go/token" "os" "path/filepath" "runtime" @@ -331,8 +332,7 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) { p.checkUnused(pragma) } - val := p.basicLit(imp.Path) - ipkg := importfile(&val) + ipkg := importfile(p.basicLit(imp.Path)) if ipkg == nil { if Errors() == 0 { Fatalf("phase error in import") @@ -824,7 +824,7 @@ func (p *noder) sum(x syntax.Expr) *Node { chunks = append(chunks, nstr.StringVal()) } else { if len(chunks) > 1 { - nstr.SetVal(Val{U: strings.Join(chunks, "")}) + nstr.SetVal(constant.MakeString(strings.Join(chunks, ""))) } nstr = nil chunks = chunks[:0] @@ -832,7 +832,7 @@ func (p *noder) sum(x syntax.Expr) *Node { n = p.nod(add, OADD, n, r) } if len(chunks) > 1 { - nstr.SetVal(Val{U: strings.Join(chunks, "")}) + nstr.SetVal(constant.MakeString(strings.Join(chunks, ""))) } return n @@ -1400,64 +1400,43 @@ func checkLangCompat(lit *syntax.BasicLit) { } } -func (p *noder) basicLit(lit *syntax.BasicLit) Val { +func (p *noder) basicLit(lit *syntax.BasicLit) constant.Value { // We don't use the errors of the conversion routines to determine // if a literal string is valid because the conversion routines may // accept a wider syntax than the language permits. Rely on lit.Bad // instead. - switch s := lit.Value; lit.Kind { - case syntax.IntLit: - checkLangCompat(lit) - x := new(Mpint) - if !lit.Bad { - x.SetString(s) - } - return Val{U: x} - - case syntax.FloatLit: - checkLangCompat(lit) - x := newMpflt() - if !lit.Bad { - x.SetString(s) - } - return Val{U: x} + if lit.Bad { + return constant.MakeUnknown() + } - case syntax.ImagLit: + switch lit.Kind { + case syntax.IntLit, syntax.FloatLit, syntax.ImagLit: checkLangCompat(lit) - x := newMpcmplx() - if !lit.Bad { - x.Imag.SetString(strings.TrimSuffix(s, "i")) - } - return Val{U: x} - - case syntax.RuneLit: - x := new(Mpint) - if !lit.Bad { - u, _ := strconv.Unquote(s) - var r rune - if len(u) == 1 { - r = rune(u[0]) - } else { - r, _ = utf8.DecodeRuneInString(u) - } - x.SetInt64(int64(r)) - } - return Val{U: x} + } - case syntax.StringLit: - var x string - if !lit.Bad { - if len(s) > 0 && s[0] == '`' { - // strip carriage returns from raw string - s = strings.Replace(s, "\r", "", -1) - } - x, _ = strconv.Unquote(s) - } - return Val{U: x} + v := constant.MakeFromLiteral(lit.Value, tokenForLitKind[lit.Kind], 0) + if v.Kind() == constant.Unknown { + // TODO(mdempsky): Better error message? + p.yyerrorpos(lit.Pos(), "malformed constant: %s", lit.Value) + } - default: - panic("unhandled BasicLit kind") + // go/constant uses big.Rat by default, which is more precise, but + // causes toolstash -cmp and some tests to fail. For now, convert + // to big.Float to match cmd/compile's historical precision. + // TODO(mdempsky): Remove. + if v.Kind() == constant.Float { + v = constant.Make(bigFloatVal(v)) } + + return v +} + +var tokenForLitKind = [...]token.Token{ + syntax.IntLit: token.INT, + syntax.RuneLit: token.CHAR, + syntax.FloatLit: token.FLOAT, + syntax.ImagLit: token.IMAG, + syntax.StringLit: token.STRING, } func (p *noder) name(name *syntax.Name) *types.Sym { diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 499b8ef2e5712..d51f50ccab5f2 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -250,33 +250,18 @@ func dumpGlobalConst(n *Node) { return } // only export integer constants for now - switch t.Etype { - case TINT8: - case TINT16: - case TINT32: - case TINT64: - case TINT: - case TUINT8: - case TUINT16: - case TUINT32: - case TUINT64: - case TUINT: - case TUINTPTR: - // ok - case TIDEAL: - if !Isconst(n, constant.Int) { - return - } - x := n.Val().U.(*Mpint) - if x.Cmp(minintval[TINT]) < 0 || x.Cmp(maxintval[TINT]) > 0 { + if !t.IsInteger() { + return + } + v := n.Val() + if t.IsUntyped() { + // Export untyped integers as int (if they fit). + t = types.Types[TINT] + if doesoverflow(v, t) { return } - // Ideal integers we export as int (if they fit). - t = types.Types[TINT] - default: - return } - Ctxt.DwarfIntConst(myimportpath, n.Sym.Name, typesymname(t), n.Int64Val()) + Ctxt.DwarfIntConst(myimportpath, n.Sym.Name, typesymname(t), int64Val(t, v)) } func dumpglobls() { @@ -595,6 +580,9 @@ func litsym(n, c *Node, wid int) { if n.Sym == nil { Fatalf("litsym nil n sym") } + if !types.Identical(n.Type, c.Type) { + Fatalf("litsym: type mismatch: %v has type %v, but %v has type %v", n, n.Type, c, c.Type) + } if c.Op == ONIL { return } @@ -602,16 +590,16 @@ func litsym(n, c *Node, wid int) { Fatalf("litsym c op %v", c.Op) } s := n.Sym.Linksym() - switch u := c.Val().U.(type) { - case bool: - i := int64(obj.Bool2int(u)) + switch u := c.Val(); u.Kind() { + case constant.Bool: + i := int64(obj.Bool2int(constant.BoolVal(u))) s.WriteInt(Ctxt, n.Xoffset, wid, i) - case *Mpint: - s.WriteInt(Ctxt, n.Xoffset, wid, u.Int64()) + case constant.Int: + s.WriteInt(Ctxt, n.Xoffset, wid, int64Val(n.Type, u)) - case *Mpflt: - f := u.Float64() + case constant.Float: + f, _ := constant.Float64Val(u) switch n.Type.Etype { case TFLOAT32: s.WriteFloat32(Ctxt, n.Xoffset, float32(f)) @@ -619,22 +607,23 @@ func litsym(n, c *Node, wid int) { s.WriteFloat64(Ctxt, n.Xoffset, f) } - case *Mpcplx: - r := u.Real.Float64() - i := u.Imag.Float64() + case constant.Complex: + re, _ := constant.Float64Val(constant.Real(u)) + im, _ := constant.Float64Val(constant.Imag(u)) switch n.Type.Etype { case TCOMPLEX64: - s.WriteFloat32(Ctxt, n.Xoffset, float32(r)) - s.WriteFloat32(Ctxt, n.Xoffset+4, float32(i)) + s.WriteFloat32(Ctxt, n.Xoffset, float32(re)) + s.WriteFloat32(Ctxt, n.Xoffset+4, float32(im)) case TCOMPLEX128: - s.WriteFloat64(Ctxt, n.Xoffset, r) - s.WriteFloat64(Ctxt, n.Xoffset+8, i) + s.WriteFloat64(Ctxt, n.Xoffset, re) + s.WriteFloat64(Ctxt, n.Xoffset+8, im) } - case string: - symdata := stringsym(n.Pos, u) + case constant.String: + i := constant.StringVal(u) + symdata := stringsym(n.Pos, i) s.WriteAddr(Ctxt, n.Xoffset, Widthptr, symdata, 0) - s.WriteInt(Ctxt, n.Xoffset+int64(Widthptr), Widthptr, int64(len(u))) + s.WriteInt(Ctxt, n.Xoffset+int64(Widthptr), Widthptr, int64(len(i))) default: Fatalf("litsym unhandled OLITERAL %v", c) diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 3b4056cf7d208..6da3c5e10b4bd 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -8,6 +8,7 @@ import ( "cmd/compile/internal/types" "cmd/internal/obj" "fmt" + "go/constant" ) type InitEntry struct { @@ -1116,20 +1117,13 @@ func isZero(n *Node) bool { return true case OLITERAL: - switch u := n.Val().U.(type) { + switch u := n.Val(); u.Kind() { + case constant.String: + return constant.StringVal(u) == "" + case constant.Bool: + return !constant.BoolVal(u) default: - Dump("unexpected literal", n) - Fatalf("isZero") - case string: - return u == "" - case bool: - return !u - case *Mpint: - return u.CmpInt64(0) == 0 - case *Mpflt: - return u.CmpFloat64(0) == 0 - case *Mpcplx: - return u.Real.CmpFloat64(0) == 0 && u.Imag.CmpFloat64(0) == 0 + return constant.Sign(u) == 0 } case OARRAYLIT: diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 88ff8d684c5ec..7a8dda2938166 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -2044,9 +2044,9 @@ func (s *state) expr(n *Node) *ssa.Value { return s.constNil(t) } case OLITERAL: - switch u := n.Val().U.(type) { - case *Mpint: - i := u.Int64() + switch u := n.Val(); u.Kind() { + case constant.Int: + i := int64Val(n.Type, u) switch n.Type.Size() { case 1: return s.constInt8(n.Type, int8(i)) @@ -2060,44 +2060,45 @@ func (s *state) expr(n *Node) *ssa.Value { s.Fatalf("bad integer size %d", n.Type.Size()) return nil } - case string: - if u == "" { + case constant.String: + i := constant.StringVal(u) + if i == "" { return s.constEmptyString(n.Type) } - return s.entryNewValue0A(ssa.OpConstString, n.Type, u) - case bool: - return s.constBool(u) - case *Mpflt: + return s.entryNewValue0A(ssa.OpConstString, n.Type, i) + case constant.Bool: + return s.constBool(constant.BoolVal(u)) + case constant.Float: + f, _ := constant.Float64Val(u) switch n.Type.Size() { case 4: - return s.constFloat32(n.Type, u.Float32()) + return s.constFloat32(n.Type, f) case 8: - return s.constFloat64(n.Type, u.Float64()) + return s.constFloat64(n.Type, f) default: s.Fatalf("bad float size %d", n.Type.Size()) return nil } - case *Mpcplx: - r := &u.Real - i := &u.Imag + case constant.Complex: + re, _ := constant.Float64Val(constant.Real(u)) + im, _ := constant.Float64Val(constant.Imag(u)) switch n.Type.Size() { case 8: pt := types.Types[TFLOAT32] return s.newValue2(ssa.OpComplexMake, n.Type, - s.constFloat32(pt, r.Float32()), - s.constFloat32(pt, i.Float32())) + s.constFloat32(pt, re), + s.constFloat32(pt, im)) case 16: pt := types.Types[TFLOAT64] return s.newValue2(ssa.OpComplexMake, n.Type, - s.constFloat64(pt, r.Float64()), - s.constFloat64(pt, i.Float64())) + s.constFloat64(pt, re), + s.constFloat64(pt, im)) default: - s.Fatalf("bad float size %d", n.Type.Size()) + s.Fatalf("bad complex size %d", n.Type.Size()) return nil } - default: - s.Fatalf("unhandled OLITERAL %v", n.Val().Kind()) + s.Fatalf("unhandled OLITERAL %v", u.Kind()) return nil } case OCONVNOP: diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 1aa3af929c2f6..ebc5af63e19c9 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -10,6 +10,7 @@ import ( "crypto/md5" "encoding/binary" "fmt" + "go/constant" "sort" "strconv" "strings" @@ -252,9 +253,7 @@ func (x methcmp) Swap(i, j int) { x[i], x[j] = x[j], x[i] } func (x methcmp) Less(i, j int) bool { return x[i].Sym.Less(x[j].Sym) } func nodintconst(v int64) *Node { - u := new(Mpint) - u.SetInt64(v) - return nodlit(Val{u}) + return nodlit(constant.MakeInt64(v)) } func nodnil() *Node { @@ -264,11 +263,11 @@ func nodnil() *Node { } func nodbool(b bool) *Node { - return nodlit(Val{b}) + return nodlit(constant.MakeBool(b)) } func nodstr(s string) *Node { - return nodlit(Val{s}) + return nodlit(constant.MakeString(s)) } // treecopy recursively copies n, with the exception of diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go index 8459bd7c181ef..c249a85b64e82 100644 --- a/src/cmd/compile/internal/gc/swt.go +++ b/src/cmd/compile/internal/gc/swt.go @@ -8,6 +8,7 @@ import ( "cmd/compile/internal/types" "cmd/internal/src" "go/constant" + "go/token" "sort" ) @@ -400,7 +401,7 @@ func (s *exprSwitch) flush() { } sort.Slice(cc, func(i, j int) bool { - return compareOp(cc[i].lo.Val(), OLT, cc[j].lo.Val()) + return constant.Compare(cc[i].lo.Val(), token.LSS, cc[j].lo.Val()) }) // Merge consecutive integer cases. diff --git a/src/cmd/compile/internal/gc/syntax.go b/src/cmd/compile/internal/gc/syntax.go index 39f2996808ecb..3b585ea341794 100644 --- a/src/cmd/compile/internal/gc/syntax.go +++ b/src/cmd/compile/internal/gc/syntax.go @@ -12,6 +12,7 @@ import ( "cmd/internal/obj" "cmd/internal/objabi" "cmd/internal/src" + "go/constant" "sort" ) @@ -236,16 +237,17 @@ func (n *Node) MarkReadonly() { n.Sym.Linksym().Type = objabi.SRODATA } -// Val returns the Val for the node. -func (n *Node) Val() Val { +// Val returns the constant.Value for the node. +func (n *Node) Val() constant.Value { if !n.HasVal() { - return Val{} + return constant.MakeUnknown() } - return Val{n.E} + return *n.E.(*constant.Value) } -// SetVal sets the Val for the node, which must not have been used with SetOpt. -func (n *Node) SetVal(v Val) { +// SetVal sets the constant.Value for the node, +// which must not have been used with SetOpt. +func (n *Node) SetVal(v constant.Value) { if n.HasOpt() { Debug.h = 1 Dump("have Opt", n) @@ -255,7 +257,7 @@ func (n *Node) SetVal(v Val) { assertRepresents(n.Type, v) } n.SetHasVal(true) - n.E = v.U + n.E = &v } // Opt returns the optimizer data for the node. diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index e014a0ba2d7d7..d1bc781a54054 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -8,6 +8,7 @@ import ( "cmd/compile/internal/types" "fmt" "go/constant" + "go/token" "strings" ) @@ -361,7 +362,7 @@ func typecheck1(n *Node, top int) (res *Node) { ok |= ctxExpr if n.Type == nil && n.Val().Kind() == constant.String { - n.Type = types.UntypedString + Fatalf("string literal missing type") } case ONIL, ONONAME: @@ -446,12 +447,13 @@ func typecheck1(n *Node, top int) (res *Node) { return n } - bound := v.U.(*Mpint).Int64() - if bound < 0 { + if constant.Sign(v) < 0 { yyerror("array bound must be non-negative") n.Type = nil return n } + + bound, _ := constant.Int64Val(v) t = types.NewArray(r.Type, bound) } @@ -776,8 +778,9 @@ func typecheck1(n *Node, top int) (res *Node) { } if iscmp[n.Op] { - n = evalConst(n) t = types.UntypedBool + n.Type = t + n = evalConst(n) if n.Op != OLITERAL { l, r = defaultlit2(l, r, true) n.Left = l @@ -803,7 +806,7 @@ func typecheck1(n *Node, top int) (res *Node) { } if (op == ODIV || op == OMOD) && Isconst(r, constant.Int) { - if r.Val().U.(*Mpint).CmpInt64(0) == 0 { + if constant.Sign(r.Val()) == 0 { yyerror("division by zero") n.Type = nil return n @@ -1045,14 +1048,14 @@ func typecheck1(n *Node, top int) (res *Node) { } if !n.Bounded() && Isconst(n.Right, constant.Int) { - x := n.Right.Int64Val() - if x < 0 { + x := n.Right.Val() + if constant.Sign(x) < 0 { yyerror("invalid %s index %v (index must be non-negative)", why, n.Right) - } else if t.IsArray() && x >= t.NumElem() { + } else if t.IsArray() && constant.Compare(x, token.GEQ, constant.MakeInt64(t.NumElem())) { yyerror("invalid array index %v (out of bounds for %d-element array)", n.Right, t.NumElem()) - } else if Isconst(n.Left, constant.String) && x >= int64(len(n.Left.StringVal())) { + } else if Isconst(n.Left, constant.String) && constant.Compare(x, token.GEQ, constant.MakeInt64(int64(len(n.Left.StringVal())))) { yyerror("invalid string index %v (out of bounds for %d-byte string)", n.Right, len(n.Left.StringVal())) - } else if doesoverflow(n.Right.Val(), types.Types[TINT]) { + } else if doesoverflow(x, types.Types[TINT]) { yyerror("invalid %s index %v (index too large)", why, n.Right) } } @@ -1155,7 +1158,7 @@ func typecheck1(n *Node, top int) (res *Node) { Fatalf("cap for OSLICEHEADER must be non-negative") } - if Isconst(l, constant.Int) && Isconst(c, constant.Int) && compareOp(l.Val(), OGT, c.Val()) { + if Isconst(l, constant.Int) && Isconst(c, constant.Int) && constant.Compare(l.Val(), token.GTR, c.Val()) { Fatalf("len larger than cap for OSLICEHEADER") } @@ -1200,7 +1203,7 @@ func typecheck1(n *Node, top int) (res *Node) { if doesoverflow(n.Left.Val(), types.Types[TINT]) { Fatalf("len for OMAKESLICECOPY too large") } - if n.Left.Int64Val() < 0 { + if constant.Sign(n.Left.Val()) < 0 { Fatalf("len for OMAKESLICECOPY must be non-negative") } } @@ -1773,7 +1776,7 @@ func typecheck1(n *Node, top int) (res *Node) { n.Type = nil return n } - if Isconst(l, constant.Int) && r != nil && Isconst(r, constant.Int) && compareOp(l.Val(), OGT, r.Val()) { + if Isconst(l, constant.Int) && r != nil && Isconst(r, constant.Int) && constant.Compare(l.Val(), token.GTR, r.Val()) { yyerror("len larger than cap in make(%v)", t) n.Type = nil return n @@ -2181,16 +2184,17 @@ func checksliceindex(l *Node, r *Node, tp *types.Type) bool { } if r.Op == OLITERAL { - if r.Int64Val() < 0 { + x := r.Val() + if constant.Sign(x) < 0 { yyerror("invalid slice index %v (index must be non-negative)", r) return false - } else if tp != nil && tp.NumElem() >= 0 && r.Int64Val() > tp.NumElem() { + } else if tp != nil && tp.NumElem() >= 0 && constant.Compare(x, token.GTR, constant.MakeInt64(tp.NumElem())) { yyerror("invalid slice index %v (out of bounds for %d-element array)", r, tp.NumElem()) return false - } else if Isconst(l, constant.String) && r.Int64Val() > int64(len(l.StringVal())) { + } else if Isconst(l, constant.String) && constant.Compare(x, token.GTR, constant.MakeInt64(int64(len(l.StringVal())))) { yyerror("invalid slice index %v (out of bounds for %d-byte string)", r, len(l.StringVal())) return false - } else if doesoverflow(r.Val(), types.Types[TINT]) { + } else if doesoverflow(x, types.Types[TINT]) { yyerror("invalid slice index %v (index too large)", r) return false } @@ -2200,7 +2204,7 @@ func checksliceindex(l *Node, r *Node, tp *types.Type) bool { } func checksliceconst(lo *Node, hi *Node) bool { - if lo != nil && hi != nil && lo.Op == OLITERAL && hi.Op == OLITERAL && compareOp(lo.Val(), OGT, hi.Val()) { + if lo != nil && hi != nil && lo.Op == OLITERAL && hi.Op == OLITERAL && constant.Compare(lo.Val(), token.GTR, hi.Val()) { yyerror("invalid slice index: %v > %v", lo, hi) return false } @@ -3192,7 +3196,7 @@ func samesafeexpr(l *Node, r *Node) bool { return samesafeexpr(l.Left, r.Left) && samesafeexpr(l.Right, r.Right) case OLITERAL: - return eqval(l.Val(), r.Val()) + return constant.Compare(l.Val(), token.EQL, r.Val()) case ONIL: return true @@ -3625,7 +3629,9 @@ func typecheckdef(n *Node) { } n.Type = e.Type - n.SetVal(e.Val()) + if n.Type != nil { + n.SetVal(e.Val()) + } case ONAME: if n.Name.Param.Ntype != nil { @@ -3723,14 +3729,13 @@ func checkmake(t *types.Type, arg string, np **Node) bool { // Do range checks for constants before defaultlit // to avoid redundant "constant NNN overflows int" errors. - switch consttype(n) { - case constant.Int, constant.Float, constant.Complex: - v := toint(n.Val()).U.(*Mpint) - if v.CmpInt64(0) < 0 { + if n.Op == OLITERAL { + v := toint(n.Val()) + if constant.Sign(v) < 0 { yyerror("negative %s argument in make(%v)", arg, t) return false } - if v.Cmp(maxintval[TINT]) > 0 { + if doesoverflow(v, types.Types[TINT]) { yyerror("%s argument too large in make(%v)", arg, t) return false } diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index 32bf37e3228cb..8c32f2f6d2cb4 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -209,8 +209,6 @@ func typeinit() { okforand[et] = true okforconst[et] = true issimple[et] = true - minintval[et] = new(Mpint) - maxintval[et] = new(Mpint) } if isFloat[et] { @@ -220,8 +218,6 @@ func typeinit() { okforarith[et] = true okforconst[et] = true issimple[et] = true - minfltval[et] = newMpflt() - maxfltval[et] = newMpflt() } if isComplex[et] { @@ -310,31 +306,6 @@ func typeinit() { iscmp[OEQ] = true iscmp[ONE] = true - maxintval[TINT8].SetString("0x7f") - minintval[TINT8].SetString("-0x80") - maxintval[TINT16].SetString("0x7fff") - minintval[TINT16].SetString("-0x8000") - maxintval[TINT32].SetString("0x7fffffff") - minintval[TINT32].SetString("-0x80000000") - maxintval[TINT64].SetString("0x7fffffffffffffff") - minintval[TINT64].SetString("-0x8000000000000000") - - maxintval[TUINT8].SetString("0xff") - maxintval[TUINT16].SetString("0xffff") - maxintval[TUINT32].SetString("0xffffffff") - maxintval[TUINT64].SetString("0xffffffffffffffff") - - // f is valid float if min < f < max. (min and max are not themselves valid.) - maxfltval[TFLOAT32].SetString("33554431p103") // 2^24-1 p (127-23) + 1/2 ulp - minfltval[TFLOAT32].SetString("-33554431p103") - maxfltval[TFLOAT64].SetString("18014398509481983p970") // 2^53-1 p (1023-52) + 1/2 ulp - minfltval[TFLOAT64].SetString("-18014398509481983p970") - - maxfltval[TCOMPLEX64] = maxfltval[TFLOAT32] - minfltval[TCOMPLEX64] = minfltval[TFLOAT32] - maxfltval[TCOMPLEX128] = maxfltval[TFLOAT64] - minfltval[TCOMPLEX128] = minfltval[TFLOAT64] - types.Types[TINTER] = types.New(TINTER) // empty interface // simple aliases @@ -410,10 +381,6 @@ func lexinit1() { } simtype[s.etype] = sameas - minfltval[s.etype] = minfltval[sameas] - maxfltval[s.etype] = maxfltval[sameas] - minintval[s.etype] = minintval[sameas] - maxintval[s.etype] = maxintval[sameas] t := types.New(s.etype) t.Sym = s1 diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 9971fb0c0dd7f..b1bac06fd0c68 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -12,6 +12,7 @@ import ( "encoding/binary" "fmt" "go/constant" + "go/token" "strings" ) @@ -1002,7 +1003,7 @@ opswitch: break opswitch } case TUINT64: - c := uint64(n.Right.Int64Val()) + c := n.Right.Uint64Val() if c < 1<<16 { break opswitch } @@ -1062,7 +1063,7 @@ opswitch: } if Isconst(n.Right, constant.Int) { - if n.Right.Val().U.(*Mpint).CmpInt64(0) < 0 || doesoverflow(n.Right.Val(), types.Types[TINT]) { + if v := n.Right.Val(); constant.Sign(v) < 0 || doesoverflow(v, types.Types[TINT]) { yyerror("index out of bounds") } } @@ -1223,7 +1224,7 @@ opswitch: // Maximum key and elem size is 128 bytes, larger objects // are stored with an indirection. So max bucket size is 2048+eps. if !Isconst(hint, constant.Int) || - hint.Val().U.(*Mpint).CmpInt64(BUCKETSIZE) <= 0 { + constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(BUCKETSIZE)) { // In case hint is larger than BUCKETSIZE runtime.makemap // will allocate the buckets on the heap, see #20184 @@ -1256,7 +1257,7 @@ opswitch: } } - if Isconst(hint, constant.Int) && hint.Val().U.(*Mpint).CmpInt64(BUCKETSIZE) <= 0 { + if Isconst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(BUCKETSIZE)) { // Handling make(map[any]any) and // make(map[any]any, hint) where hint <= BUCKETSIZE // special allows for faster map initialization and @@ -1588,8 +1589,8 @@ opswitch: n = typecheck(n, ctxExpr) // Emit string symbol now to avoid emitting // any concurrently during the backend. - if s, ok := n.Val().U.(string); ok { - _ = stringsym(n.Pos, s) + if v := n.Val(); v.Kind() == constant.String { + _ = stringsym(n.Pos, constant.StringVal(v)) } } @@ -3841,17 +3842,14 @@ func candiscard(n *Node) bool { // Discardable as long as we know it's not division by zero. case ODIV, OMOD: - if Isconst(n.Right, constant.Int) && n.Right.Val().U.(*Mpint).CmpInt64(0) != 0 { - break - } - if Isconst(n.Right, constant.Float) && n.Right.Val().U.(*Mpflt).CmpFloat64(0) != 0 { + if n.Right.Op == OLITERAL && constant.Sign(n.Right.Val()) != 0 { break } return false // Discardable as long as we know it won't fail because of a bad size. case OMAKECHAN, OMAKEMAP: - if Isconst(n.Left, constant.Int) && n.Left.Val().U.(*Mpint).CmpInt64(0) == 0 { + if Isconst(n.Left, constant.Int) && constant.Sign(n.Left.Val()) == 0 { break } return false diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index 82db9e4dbcba6..f1a01b64daa17 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -1212,7 +1212,7 @@ func (t *Type) IsInteger() bool { case TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TINT, TUINT, TUINTPTR: return true } - return false + return t == UntypedInt || t == UntypedRune } func (t *Type) IsSigned() bool { @@ -1223,12 +1223,20 @@ func (t *Type) IsSigned() bool { return false } +func (t *Type) IsUnsigned() bool { + switch t.Etype { + case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR: + return true + } + return false +} + func (t *Type) IsFloat() bool { - return t.Etype == TFLOAT32 || t.Etype == TFLOAT64 + return t.Etype == TFLOAT32 || t.Etype == TFLOAT64 || t == UntypedFloat } func (t *Type) IsComplex() bool { - return t.Etype == TCOMPLEX64 || t.Etype == TCOMPLEX128 + return t.Etype == TCOMPLEX64 || t.Etype == TCOMPLEX128 || t == UntypedComplex } // IsPtr reports whether t is a regular Go pointer type. diff --git a/test/fixedbugs/issue20232.go b/test/fixedbugs/issue20232.go index f91c74936b13f..fbe8cdebfb57f 100644 --- a/test/fixedbugs/issue20232.go +++ b/test/fixedbugs/issue20232.go @@ -6,6 +6,6 @@ package main -const _ = 6e5518446744 // ERROR "malformed constant: 6e5518446744 \(exponent overflow\)" +const _ = 6e5518446744 // ERROR "malformed constant: 6e5518446744" const _ = 1e-1000000000 -const _ = 1e+1000000000 // ERROR "constant too large" +const _ = 1e+1000000000 From 484449c6416662c5453257c641d015c1fca681ea Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 24 Nov 2020 12:35:33 -0800 Subject: [PATCH 027/474] [dev.regabi] cmd/compile: remove file mistakenly added by CL 272248 Change-Id: Ib27a2ab499960cda3bedfd6c1d10a4038c519df5 Reviewed-on: https://go-review.googlesource.com/c/go/+/272986 Trust: Matthew Dempsky Reviewed-by: Russ Cox --- src/_rex.20201123151057 | 1 - 1 file changed, 1 deletion(-) delete mode 100644 src/_rex.20201123151057 diff --git a/src/_rex.20201123151057 b/src/_rex.20201123151057 deleted file mode 100644 index 8b137891791fe..0000000000000 --- a/src/_rex.20201123151057 +++ /dev/null @@ -1 +0,0 @@ - From 4a6b4fd13965fe8428c9177bdd824a48dff553c0 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 24 Nov 2020 15:52:13 -0800 Subject: [PATCH 028/474] [dev.regabi] add FatalfAt and fix Fatalf docs I've wanted a FatalfAt function for a while, but under the old "-l" suffix naming convention it would have been called "Fatalfl", which is just atrocious. Change-Id: If87f692ecdff478769426d4b054ac396e5c1e42e Reviewed-on: https://go-review.googlesource.com/c/go/+/273013 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Russ Cox --- src/cmd/compile/internal/gc/print.go | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/src/cmd/compile/internal/gc/print.go b/src/cmd/compile/internal/gc/print.go index 1dbd58df42ada..52585814f6d68 100644 --- a/src/cmd/compile/internal/gc/print.go +++ b/src/cmd/compile/internal/gc/print.go @@ -177,23 +177,39 @@ func Warnl(pos src.XPos, format string, args ...interface{}) { } } -// Fatal reports a fatal error - an internal problem - at the current line and exits. -// If other errors have already been printed, then Fatal just quietly exits. +// Fatalf reports a fatal error - an internal problem - at the current line and exits. +// If other errors have already been printed, then Fatalf just quietly exits. // (The internal problem may have been caused by incomplete information // after the already-reported errors, so best to let users fix those and // try again without being bothered about a spurious internal error.) // // But if no errors have been printed, or if -d panic has been specified, -// Fatal prints the error as an "internal compiler error". In a released build, +// Fatalf prints the error as an "internal compiler error". In a released build, // it prints an error asking to file a bug report. In development builds, it // prints a stack trace. // -// If -h has been specified, Fatal panics to force the usual runtime info dump. +// If -h has been specified, Fatalf panics to force the usual runtime info dump. func Fatalf(format string, args ...interface{}) { + FatalfAt(lineno, format, args...) +} + +// FatalfAt reports a fatal error - an internal problem - at pos and exits. +// If other errors have already been printed, then FatalfAt just quietly exits. +// (The internal problem may have been caused by incomplete information +// after the already-reported errors, so best to let users fix those and +// try again without being bothered about a spurious internal error.) +// +// But if no errors have been printed, or if -d panic has been specified, +// FatalfAt prints the error as an "internal compiler error". In a released build, +// it prints an error asking to file a bug report. In development builds, it +// prints a stack trace. +// +// If -h has been specified, FatalfAt panics to force the usual runtime info dump. +func FatalfAt(pos src.XPos, format string, args ...interface{}) { flusherrors() if Debug_panic != 0 || numErrors == 0 { - fmt.Printf("%v: internal compiler error: ", linestr(lineno)) + fmt.Printf("%v: internal compiler error: ", linestr(pos)) fmt.Printf(format, args...) fmt.Printf("\n") From 9e0e43d84d1bb653a74ccc7f90a80dfa9c665fbf Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Tue, 24 Nov 2020 22:09:57 -0500 Subject: [PATCH 029/474] [dev.regabi] cmd/compile: remove uses of dummy Per https://developers.google.com/style/inclusive-documentation, since we are editing some of this code anyway and it is easier to put the cleanup in a separate CL. Change-Id: Ib6b851f43f9cc0a57676564477d4ff22abb1cee5 Reviewed-on: https://go-review.googlesource.com/c/go/+/273106 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/align.go | 2 +- src/cmd/compile/internal/gc/escape.go | 2 +- src/cmd/compile/internal/gc/init.go | 21 +++-- src/cmd/compile/internal/gc/main.go | 4 +- src/cmd/compile/internal/gc/pgen.go | 2 +- src/cmd/compile/internal/gc/phi.go | 2 +- src/cmd/compile/internal/gc/ssa.go | 8 +- src/cmd/compile/internal/gc/typecheck.go | 4 +- src/cmd/compile/internal/ssa/export_test.go | 92 +++++++++---------- src/cmd/compile/internal/ssa/poset.go | 54 +++++------ src/cmd/compile/internal/ssa/regalloc.go | 2 +- .../compile/internal/syntax/dumper_test.go | 2 +- src/cmd/compile/internal/syntax/nodes.go | 2 +- .../compile/internal/syntax/printer_test.go | 2 +- src/cmd/compile/internal/types/type.go | 2 +- 15 files changed, 100 insertions(+), 101 deletions(-) diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go index 1f7631d19900e..563bd5030c07b 100644 --- a/src/cmd/compile/internal/gc/align.go +++ b/src/cmd/compile/internal/gc/align.go @@ -392,7 +392,7 @@ func dowidth(t *types.Type) { w = 1 // anything will do case TANY: - // dummy type; should be replaced before use. + // not a real type; should be replaced before use. Fatalf("dowidth any") case TSTRING: diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index 497151d02f33f..50674e1a1a3c3 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -574,7 +574,7 @@ func (e *Escape) exprSkipInit(k EscHole, n *Node) { // parameters all flow to the heap. // // TODO(mdempsky): Change ks into a callback, so that - // we don't have to create this dummy slice? + // we don't have to create this slice? var ks []EscHole for i := m.Type.NumResults(); i > 0; i-- { ks = append(ks, e.heapHole()) diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go index ec9cc4bddc54b..c3b66a2ad2aae 100644 --- a/src/cmd/compile/internal/gc/init.go +++ b/src/cmd/compile/internal/gc/init.go @@ -15,8 +15,9 @@ import ( // the name, normally "pkg.init", is altered to "pkg.init.0". var renameinitgen int -// Dummy function for autotmps generated during typechecking. -var dummyInitFn = nod(ODCLFUNC, nil, nil) +// Function collecting autotmps generated during typechecking, +// to be included in the package-level init function. +var initTodo = nod(ODCLFUNC, nil, nil) func renameinit() *types.Sym { s := lookupN("init.", renameinitgen) @@ -46,11 +47,11 @@ func fninit(n []*Node) { lineno = nf[0].Pos // prolog/epilog gets line number of first init stmt initializers := lookup("init") fn := dclfunc(initializers, nod(OTFUNC, nil, nil)) - for _, dcl := range dummyInitFn.Func.Dcl { + for _, dcl := range initTodo.Func.Dcl { dcl.Name.Curfn = fn } - fn.Func.Dcl = append(fn.Func.Dcl, dummyInitFn.Func.Dcl...) - dummyInitFn.Func.Dcl = nil + fn.Func.Dcl = append(fn.Func.Dcl, initTodo.Func.Dcl...) + initTodo.Func.Dcl = nil fn.Nbody.Set(nf) funcbody() @@ -62,13 +63,13 @@ func fninit(n []*Node) { xtop = append(xtop, fn) fns = append(fns, initializers.Linksym()) } - if dummyInitFn.Func.Dcl != nil { - // We only generate temps using dummyInitFn if there + if initTodo.Func.Dcl != nil { + // We only generate temps using initTodo if there // are package-scope initialization statements, so // something's weird if we get here. - Fatalf("dummyInitFn still has declarations") + Fatalf("initTodo still has declarations") } - dummyInitFn = nil + initTodo = nil // Record user init functions. for i := 0; i < renameinitgen; i++ { @@ -88,7 +89,7 @@ func fninit(n []*Node) { // Make an .inittask structure. sym := lookup(".inittask") nn := newname(sym) - nn.Type = types.Types[TUINT8] // dummy type + nn.Type = types.Types[TUINT8] // fake type nn.SetClass(PEXTERN) sym.Def = asTypesNode(nn) exportsym(nn) diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index fca1334a194b7..428bf31fa96c4 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -1254,9 +1254,7 @@ func importfile(f constant.Value) *types.Pkg { } } - // In the importfile, if we find: - // $$\n (textual format): not supported anymore - // $$B\n (binary format) : import directly, then feed the lexer a dummy statement + // Expect $$B\n to signal binary import format. // look for $$ var c byte diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index 0f0f6b7107478..7c1d5543e312a 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -459,7 +459,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S decls, dwarfVars := createDwarfVars(fnsym, isODCLFUNC, fn.Func, apdecls) // For each type referenced by the functions auto vars but not - // already referenced by a dwarf var, attach a dummy relocation to + // already referenced by a dwarf var, attach an R_USETYPE relocation to // the function symbol to insure that the type included in DWARF // processing during linking. typesyms := []*obj.LSym{} diff --git a/src/cmd/compile/internal/gc/phi.go b/src/cmd/compile/internal/gc/phi.go index 5218cd0ef3d14..4beaa11a7e3f3 100644 --- a/src/cmd/compile/internal/gc/phi.go +++ b/src/cmd/compile/internal/gc/phi.go @@ -59,7 +59,7 @@ type phiState struct { hasDef *sparseSet // has a write of the variable we're processing // miscellaneous - placeholder *ssa.Value // dummy value to use as a "not set yet" placeholder. + placeholder *ssa.Value // value to use as a "not set yet" placeholder. } func (s *phiState) insertPhis() { diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 7a8dda2938166..f196bee4a225a 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -692,10 +692,10 @@ func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() } var ( - // dummy node for the memory variable + // marker node for the memory variable memVar = Node{Op: ONAME, Sym: &types.Sym{Name: "mem"}} - // dummy nodes for temporary variables + // marker nodes for temporary variables ptrVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ptr"}} lenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "len"}} newlenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "newlen"}} @@ -4793,7 +4793,7 @@ func (s *state) getMethodClosure(fn *Node) *ssa.Value { n2.SetClass(PFUNC) // n2.Sym already existed, so it's already marked as a function. n2.Pos = fn.Pos - n2.Type = types.Types[TUINT8] // dummy type for a static closure. Could use runtime.funcval if we had it. + n2.Type = types.Types[TUINT8] // fake type for a static closure. Could use runtime.funcval if we had it. return s.expr(n2) } @@ -6054,7 +6054,7 @@ func (s *state) mem() *ssa.Value { func (s *state) addNamedValue(n *Node, v *ssa.Value) { if n.Class() == Pxxx { - // Don't track our dummy nodes (&memVar etc.). + // Don't track our marker nodes (&memVar etc.). return } if n.IsAutoTmp() { diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index d1bc781a54054..9cc1dee773ad9 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -2153,11 +2153,11 @@ func typecheckargs(n *Node) { // If we're outside of function context, then this call will // be executed during the generated init function. However, // init.go hasn't yet created it. Instead, associate the - // temporary variables with dummyInitFn for now, and init.go + // temporary variables with initTodo for now, and init.go // will reassociate them later when it's appropriate. static := Curfn == nil if static { - Curfn = dummyInitFn + Curfn = initTodo } for _, f := range t.FieldSlice() { t := temp(f.Type) diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go index b4c3e5cfdfb73..bfe94ff160273 100644 --- a/src/cmd/compile/internal/ssa/export_test.go +++ b/src/cmd/compile/internal/ssa/export_test.go @@ -36,10 +36,10 @@ func testConfigArch(tb testing.TB, arch string) *Conf { tb.Fatalf("unknown arch %s", arch) } if ctxt.Arch.PtrSize != 8 { - tb.Fatal("dummyTypes is 64-bit only") + tb.Fatal("testTypes is 64-bit only") } c := &Conf{ - config: NewConfig(arch, dummyTypes, ctxt, true), + config: NewConfig(arch, testTypes, ctxt, true), tb: tb, } return c @@ -53,108 +53,108 @@ type Conf struct { func (c *Conf) Frontend() Frontend { if c.fe == nil { - c.fe = DummyFrontend{t: c.tb, ctxt: c.config.ctxt} + c.fe = TestFrontend{t: c.tb, ctxt: c.config.ctxt} } return c.fe } -// DummyFrontend is a test-only frontend. +// TestFrontend is a test-only frontend. // It assumes 64 bit integers and pointers. -type DummyFrontend struct { +type TestFrontend struct { t testing.TB ctxt *obj.Link } -type DummyAuto struct { +type TestAuto struct { t *types.Type s string } -func (d *DummyAuto) Typ() *types.Type { +func (d *TestAuto) Typ() *types.Type { return d.t } -func (d *DummyAuto) String() string { +func (d *TestAuto) String() string { return d.s } -func (d *DummyAuto) StorageClass() StorageClass { +func (d *TestAuto) StorageClass() StorageClass { return ClassAuto } -func (d *DummyAuto) IsSynthetic() bool { +func (d *TestAuto) IsSynthetic() bool { return false } -func (d *DummyAuto) IsAutoTmp() bool { +func (d *TestAuto) IsAutoTmp() bool { return true } -func (DummyFrontend) StringData(s string) *obj.LSym { +func (TestFrontend) StringData(s string) *obj.LSym { return nil } -func (DummyFrontend) Auto(pos src.XPos, t *types.Type) GCNode { - return &DummyAuto{t: t, s: "aDummyAuto"} +func (TestFrontend) Auto(pos src.XPos, t *types.Type) GCNode { + return &TestAuto{t: t, s: "aTestAuto"} } -func (d DummyFrontend) SplitString(s LocalSlot) (LocalSlot, LocalSlot) { - return LocalSlot{N: s.N, Type: dummyTypes.BytePtr, Off: s.Off}, LocalSlot{N: s.N, Type: dummyTypes.Int, Off: s.Off + 8} +func (d TestFrontend) SplitString(s LocalSlot) (LocalSlot, LocalSlot) { + return LocalSlot{N: s.N, Type: testTypes.BytePtr, Off: s.Off}, LocalSlot{N: s.N, Type: testTypes.Int, Off: s.Off + 8} } -func (d DummyFrontend) SplitInterface(s LocalSlot) (LocalSlot, LocalSlot) { - return LocalSlot{N: s.N, Type: dummyTypes.BytePtr, Off: s.Off}, LocalSlot{N: s.N, Type: dummyTypes.BytePtr, Off: s.Off + 8} +func (d TestFrontend) SplitInterface(s LocalSlot) (LocalSlot, LocalSlot) { + return LocalSlot{N: s.N, Type: testTypes.BytePtr, Off: s.Off}, LocalSlot{N: s.N, Type: testTypes.BytePtr, Off: s.Off + 8} } -func (d DummyFrontend) SplitSlice(s LocalSlot) (LocalSlot, LocalSlot, LocalSlot) { +func (d TestFrontend) SplitSlice(s LocalSlot) (LocalSlot, LocalSlot, LocalSlot) { return LocalSlot{N: s.N, Type: s.Type.Elem().PtrTo(), Off: s.Off}, - LocalSlot{N: s.N, Type: dummyTypes.Int, Off: s.Off + 8}, - LocalSlot{N: s.N, Type: dummyTypes.Int, Off: s.Off + 16} + LocalSlot{N: s.N, Type: testTypes.Int, Off: s.Off + 8}, + LocalSlot{N: s.N, Type: testTypes.Int, Off: s.Off + 16} } -func (d DummyFrontend) SplitComplex(s LocalSlot) (LocalSlot, LocalSlot) { +func (d TestFrontend) SplitComplex(s LocalSlot) (LocalSlot, LocalSlot) { if s.Type.Size() == 16 { - return LocalSlot{N: s.N, Type: dummyTypes.Float64, Off: s.Off}, LocalSlot{N: s.N, Type: dummyTypes.Float64, Off: s.Off + 8} + return LocalSlot{N: s.N, Type: testTypes.Float64, Off: s.Off}, LocalSlot{N: s.N, Type: testTypes.Float64, Off: s.Off + 8} } - return LocalSlot{N: s.N, Type: dummyTypes.Float32, Off: s.Off}, LocalSlot{N: s.N, Type: dummyTypes.Float32, Off: s.Off + 4} + return LocalSlot{N: s.N, Type: testTypes.Float32, Off: s.Off}, LocalSlot{N: s.N, Type: testTypes.Float32, Off: s.Off + 4} } -func (d DummyFrontend) SplitInt64(s LocalSlot) (LocalSlot, LocalSlot) { +func (d TestFrontend) SplitInt64(s LocalSlot) (LocalSlot, LocalSlot) { if s.Type.IsSigned() { - return LocalSlot{N: s.N, Type: dummyTypes.Int32, Off: s.Off + 4}, LocalSlot{N: s.N, Type: dummyTypes.UInt32, Off: s.Off} + return LocalSlot{N: s.N, Type: testTypes.Int32, Off: s.Off + 4}, LocalSlot{N: s.N, Type: testTypes.UInt32, Off: s.Off} } - return LocalSlot{N: s.N, Type: dummyTypes.UInt32, Off: s.Off + 4}, LocalSlot{N: s.N, Type: dummyTypes.UInt32, Off: s.Off} + return LocalSlot{N: s.N, Type: testTypes.UInt32, Off: s.Off + 4}, LocalSlot{N: s.N, Type: testTypes.UInt32, Off: s.Off} } -func (d DummyFrontend) SplitStruct(s LocalSlot, i int) LocalSlot { +func (d TestFrontend) SplitStruct(s LocalSlot, i int) LocalSlot { return LocalSlot{N: s.N, Type: s.Type.FieldType(i), Off: s.Off + s.Type.FieldOff(i)} } -func (d DummyFrontend) SplitArray(s LocalSlot) LocalSlot { +func (d TestFrontend) SplitArray(s LocalSlot) LocalSlot { return LocalSlot{N: s.N, Type: s.Type.Elem(), Off: s.Off} } -func (d DummyFrontend) SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot { +func (d TestFrontend) SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot { return LocalSlot{N: parent.N, Type: t, Off: offset} } -func (DummyFrontend) Line(_ src.XPos) string { +func (TestFrontend) Line(_ src.XPos) string { return "unknown.go:0" } -func (DummyFrontend) AllocFrame(f *Func) { +func (TestFrontend) AllocFrame(f *Func) { } -func (d DummyFrontend) Syslook(s string) *obj.LSym { +func (d TestFrontend) Syslook(s string) *obj.LSym { return d.ctxt.Lookup(s) } -func (DummyFrontend) UseWriteBarrier() bool { +func (TestFrontend) UseWriteBarrier() bool { return true // only writebarrier_test cares } -func (DummyFrontend) SetWBPos(pos src.XPos) { +func (TestFrontend) SetWBPos(pos src.XPos) { } -func (d DummyFrontend) Logf(msg string, args ...interface{}) { d.t.Logf(msg, args...) } -func (d DummyFrontend) Log() bool { return true } +func (d TestFrontend) Logf(msg string, args ...interface{}) { d.t.Logf(msg, args...) } +func (d TestFrontend) Log() bool { return true } -func (d DummyFrontend) Fatalf(_ src.XPos, msg string, args ...interface{}) { d.t.Fatalf(msg, args...) } -func (d DummyFrontend) Warnl(_ src.XPos, msg string, args ...interface{}) { d.t.Logf(msg, args...) } -func (d DummyFrontend) Debug_checknil() bool { return false } +func (d TestFrontend) Fatalf(_ src.XPos, msg string, args ...interface{}) { d.t.Fatalf(msg, args...) } +func (d TestFrontend) Warnl(_ src.XPos, msg string, args ...interface{}) { d.t.Logf(msg, args...) } +func (d TestFrontend) Debug_checknil() bool { return false } -func (d DummyFrontend) MyImportPath() string { +func (d TestFrontend) MyImportPath() string { return "my/import/path" } -var dummyTypes Types +var testTypes Types func init() { // Initialize just enough of the universe and the types package to make our tests function. @@ -198,12 +198,12 @@ func init() { t.Align = uint8(typ.width) types.Types[typ.et] = t } - dummyTypes.SetTypPtrs() + testTypes.SetTypPtrs() } -func (d DummyFrontend) DerefItab(sym *obj.LSym, off int64) *obj.LSym { return nil } +func (d TestFrontend) DerefItab(sym *obj.LSym, off int64) *obj.LSym { return nil } -func (d DummyFrontend) CanSSA(t *types.Type) bool { - // There are no un-SSAable types in dummy land. +func (d TestFrontend) CanSSA(t *types.Type) bool { + // There are no un-SSAable types in test land. return true } diff --git a/src/cmd/compile/internal/ssa/poset.go b/src/cmd/compile/internal/ssa/poset.go index f5a2b3a8c2104..1e04b48ba4612 100644 --- a/src/cmd/compile/internal/ssa/poset.go +++ b/src/cmd/compile/internal/ssa/poset.go @@ -136,13 +136,13 @@ type posetNode struct { // Most internal data structures are pre-allocated and flat, so for instance adding a // new relation does not cause any allocation. For performance reasons, // each node has only up to two outgoing edges (like a binary tree), so intermediate -// "dummy" nodes are required to represent more than two relations. For instance, +// "extra" nodes are required to represent more than two relations. For instance, // to record that A r i1 // i2 \ / // i2 // - dummy := po.newnode(nil) - po.changeroot(r, dummy) - po.upush(undoChangeRoot, dummy, newedge(r, false)) - po.addchild(dummy, r, false) - po.addchild(dummy, i1, false) + extra := po.newnode(nil) + po.changeroot(r, extra) + po.upush(undoChangeRoot, extra, newedge(r, false)) + po.addchild(extra, r, false) + po.addchild(extra, i1, false) po.addchild(i1, i2, strict) case f1 && f2: diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index 0339b073ae15f..4ed884c3e7cc1 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -104,7 +104,7 @@ // If b3 is the primary predecessor of b2, then we use x3 in b2 and // add a x4:CX->BX copy at the end of b4. // But the definition of x3 doesn't dominate b2. We should really -// insert a dummy phi at the start of b2 (x5=phi(x3,x4):BX) to keep +// insert an extra phi at the start of b2 (x5=phi(x3,x4):BX) to keep // SSA form. For now, we ignore this problem as remaining in strict // SSA form isn't needed after regalloc. We'll just leave the use // of x3 not dominated by the definition of x3, and the CX->BX copy diff --git a/src/cmd/compile/internal/syntax/dumper_test.go b/src/cmd/compile/internal/syntax/dumper_test.go index f84bd2d7056f0..22680dce786c5 100644 --- a/src/cmd/compile/internal/syntax/dumper_test.go +++ b/src/cmd/compile/internal/syntax/dumper_test.go @@ -13,7 +13,7 @@ func TestDump(t *testing.T) { t.Skip("skipping test in short mode") } - // provide a dummy error handler so parsing doesn't stop after first error + // provide a no-op error handler so parsing doesn't stop after first error ast, err := ParseFile(*src_, func(error) {}, nil, CheckBranches) if err != nil { t.Error(err) diff --git a/src/cmd/compile/internal/syntax/nodes.go b/src/cmd/compile/internal/syntax/nodes.go index 815630fcd412e..487cab19fef9c 100644 --- a/src/cmd/compile/internal/syntax/nodes.go +++ b/src/cmd/compile/internal/syntax/nodes.go @@ -114,7 +114,7 @@ func (*decl) aDecl() {} // All declarations belonging to the same group point to the same Group node. type Group struct { - dummy int // not empty so we are guaranteed different Group instances + _ int // not empty so we are guaranteed different Group instances } // ---------------------------------------------------------------------------- diff --git a/src/cmd/compile/internal/syntax/printer_test.go b/src/cmd/compile/internal/syntax/printer_test.go index c3b9aca229c33..fe72e7a374fb8 100644 --- a/src/cmd/compile/internal/syntax/printer_test.go +++ b/src/cmd/compile/internal/syntax/printer_test.go @@ -18,7 +18,7 @@ func TestPrint(t *testing.T) { t.Skip("skipping test in short mode") } - // provide a dummy error handler so parsing doesn't stop after first error + // provide a no-op error handler so parsing doesn't stop after first error ast, err := ParseFile(*src_, func(error) {}, nil, 0) if err != nil { t.Error(err) diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index f1a01b64daa17..b93409aac1a3c 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -10,7 +10,7 @@ import ( "fmt" ) -// Dummy Node so we can refer to *Node without actually +// Our own “Node” so we can refer to *gc.Node without actually // having a gc.Node. Necessary to break import cycles. // TODO(gri) try to eliminate soon type Node struct{ _ int } From 9262909764ea63285805c87f8d41837a532fda62 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sun, 22 Nov 2020 12:09:08 -0500 Subject: [PATCH 030/474] [dev.regabi] cmd/compile: rewrite problematic use of Node fields For the upcoming rewrite to access methods, a few direct accesses are problematic for the automated tool, most notably direct copies or use of Node structs as opposed to pointers. Fix these manually. Passes toolstash -cmp. Change-Id: I8bdbb33216737c09e1edda284d5c414422d86284 Reviewed-on: https://go-review.googlesource.com/c/go/+/273006 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/esc.go | 4 +- src/cmd/compile/internal/gc/escape.go | 10 +- src/cmd/compile/internal/gc/iimport.go | 10 +- src/cmd/compile/internal/gc/sinit.go | 40 ++-- src/cmd/compile/internal/gc/ssa.go | 224 +++++++++++------------ src/cmd/compile/internal/gc/typecheck.go | 18 +- 6 files changed, 156 insertions(+), 150 deletions(-) diff --git a/src/cmd/compile/internal/gc/esc.go b/src/cmd/compile/internal/gc/esc.go index c4159101f2008..6003f6608cc70 100644 --- a/src/cmd/compile/internal/gc/esc.go +++ b/src/cmd/compile/internal/gc/esc.go @@ -53,8 +53,8 @@ func funcSym(fn *Node) *types.Sym { // Walk hasn't generated (goto|label).Left.Sym.Label yet, so we'll cheat // and set it to one of the following two. Then in esc we'll clear it again. var ( - looping Node - nonlooping Node + looping = nod(OXXX, nil, nil) + nonlooping = nod(OXXX, nil, nil) ) func isSliceSelfAssign(dst, src *Node) bool { diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index 50674e1a1a3c3..b6975c79a40d6 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -227,13 +227,13 @@ func (e *Escape) walkFunc(fn *Node) { inspectList(fn.Nbody, func(n *Node) bool { switch n.Op { case OLABEL: - n.Sym.Label = asTypesNode(&nonlooping) + n.Sym.Label = asTypesNode(nonlooping) case OGOTO: // If we visited the label before the goto, // then this is a looping label. - if n.Sym.Label == asTypesNode(&nonlooping) { - n.Sym.Label = asTypesNode(&looping) + if n.Sym.Label == asTypesNode(nonlooping) { + n.Sym.Label = asTypesNode(looping) } } @@ -309,11 +309,11 @@ func (e *Escape) stmt(n *Node) { case OLABEL: switch asNode(n.Sym.Label) { - case &nonlooping: + case nonlooping: if Debug.m > 2 { fmt.Printf("%v:%v non-looping label\n", linestr(lineno), n) } - case &looping: + case looping: if Debug.m > 2 { fmt.Printf("%v: %v looping label\n", linestr(lineno), n) } diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 3f50a940619b4..352335a993aef 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -839,7 +839,8 @@ func (r *importReader) node() *Node { if s := r.ident(); s != nil { n.Left = npos(n.Pos, newnoname(s)) } - n.Right, _ = r.exprsOrNil() + right, _ := r.exprsOrNil() + n.Right = right return n // case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC: @@ -1021,7 +1022,9 @@ func (r *importReader) node() *Node { case OFOR: n := nodl(r.pos(), OFOR, nil, nil) n.Ninit.Set(r.stmtList()) - n.Left, n.Right = r.exprsOrNil() + left, right := r.exprsOrNil() + n.Left = left + n.Right = right n.Nbody.Set(r.stmtList()) return n @@ -1035,7 +1038,8 @@ func (r *importReader) node() *Node { case OSELECT, OSWITCH: n := nodl(r.pos(), op, nil, nil) n.Ninit.Set(r.stmtList()) - n.Left, _ = r.exprsOrNil() + left, _ := r.exprsOrNil() + n.Left = left n.List.Set(r.caseList(n)) return n diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 6da3c5e10b4bd..e15d558a78245 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -186,9 +186,8 @@ func (s *InitSchedule) staticassign(l *Node, r *Node) bool { return true case OADDR: - var nam Node - if stataddr(&nam, r.Left) { - addrsym(l, &nam) + if nam := stataddr(r.Left); nam != nil { + addrsym(l, nam) return true } fallthrough @@ -609,11 +608,11 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) { // copy static to slice var_ = typecheck(var_, ctxExpr|ctxAssign) - var nam Node - if !stataddr(&nam, var_) || nam.Class() != PEXTERN { + nam := stataddr(var_) + if nam == nil || nam.Class() != PEXTERN { Fatalf("slicelit: %v", var_) } - slicesym(&nam, vstat, t.NumElem()) + slicesym(nam, vstat, t.NumElem()) return } @@ -1001,30 +1000,31 @@ func getlit(lit *Node) int { return -1 } -// stataddr sets nam to the static address of n and reports whether it succeeded. -func stataddr(nam *Node, n *Node) bool { +// stataddr returns the static address of n, if n has one, or else nil. +func stataddr(n *Node) *Node { if n == nil { - return false + return nil } switch n.Op { case ONAME, OMETHEXPR: - *nam = *n - return true + return n.sepcopy() case ODOT: - if !stataddr(nam, n.Left) { + nam := stataddr(n.Left) + if nam == nil { break } nam.Xoffset += n.Xoffset nam.Type = n.Type - return true + return nam case OINDEX: if n.Left.Type.IsSlice() { break } - if !stataddr(nam, n.Left) { + nam := stataddr(n.Left) + if nam == nil { break } l := getlit(n.Right) @@ -1038,10 +1038,10 @@ func stataddr(nam *Node, n *Node) bool { } nam.Xoffset += int64(l) * n.Type.Width nam.Type = n.Type - return true + return nam } - return false + return nil } func (s *InitSchedule) initplan(n *Node) { @@ -1158,16 +1158,16 @@ func genAsStatic(as *Node) { Fatalf("genAsStatic as.Left not typechecked") } - var nam Node - if !stataddr(&nam, as.Left) || (nam.Class() != PEXTERN && as.Left != nblank) { + nam := stataddr(as.Left) + if nam == nil || (nam.Class() != PEXTERN && as.Left != nblank) { Fatalf("genAsStatic: lhs %v", as.Left) } switch { case as.Right.Op == OLITERAL: - litsym(&nam, as.Right, int(as.Right.Type.Width)) + litsym(nam, as.Right, int(as.Right.Type.Width)) case (as.Right.Op == ONAME || as.Right.Op == OMETHEXPR) && as.Right.Class() == PFUNC: - pfuncsym(&nam, as.Right) + pfuncsym(nam, as.Right) default: Fatalf("genAsStatic: rhs %v", as.Right) } diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index f196bee4a225a..f00f5d94a169c 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -388,7 +388,7 @@ func buildssa(fn *Node, worker int) *ssa.Func { s.sb = s.entryNewValue0(ssa.OpSB, types.Types[TUINTPTR]) s.startBlock(s.f.Entry) - s.vars[&memVar] = s.startmem + s.vars[memVar] = s.startmem if s.hasOpenDefers { // Create the deferBits variable and stack slot. deferBits is a // bitmask showing which of the open-coded defers in this function @@ -397,7 +397,7 @@ func buildssa(fn *Node, worker int) *ssa.Func { s.deferBitsTemp = deferBitsTemp // For this value, AuxInt is initialized to zero by default startDeferBits := s.entryNewValue0(ssa.OpConst8, types.Types[TUINT8]) - s.vars[&deferBitsVar] = startDeferBits + s.vars[deferBitsVar] = startDeferBits s.deferBitsAddr = s.addr(deferBitsTemp) s.store(types.Types[TUINT8], s.deferBitsAddr, startDeferBits) // Make sure that the deferBits stack slot is kept alive (for use @@ -405,7 +405,7 @@ func buildssa(fn *Node, worker int) *ssa.Func { // all checking code on deferBits in the function exit can be // eliminated, because the defer statements were all // unconditional. - s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, deferBitsTemp, s.mem(), false) + s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, deferBitsTemp, s.mem(), false) } // Generate addresses of local declarations @@ -691,18 +691,22 @@ func (s *state) Fatalf(msg string, args ...interface{}) { func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) } func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() } +func ssaMarker(name string) *Node { + return newname(&types.Sym{Name: name}) +} + var ( // marker node for the memory variable - memVar = Node{Op: ONAME, Sym: &types.Sym{Name: "mem"}} + memVar = ssaMarker("mem") // marker nodes for temporary variables - ptrVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ptr"}} - lenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "len"}} - newlenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "newlen"}} - capVar = Node{Op: ONAME, Sym: &types.Sym{Name: "cap"}} - typVar = Node{Op: ONAME, Sym: &types.Sym{Name: "typ"}} - okVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ok"}} - deferBitsVar = Node{Op: ONAME, Sym: &types.Sym{Name: "deferBits"}} + ptrVar = ssaMarker("ptr") + lenVar = ssaMarker("len") + newlenVar = ssaMarker("newlen") + capVar = ssaMarker("cap") + typVar = ssaMarker("typ") + okVar = ssaMarker("ok") + deferBitsVar = ssaMarker("deferBits") ) // startBlock sets the current block we're generating code in to b. @@ -1027,14 +1031,14 @@ func (s *state) rawLoad(t *types.Type, src *ssa.Value) *ssa.Value { } func (s *state) store(t *types.Type, dst, val *ssa.Value) { - s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, dst, val, s.mem()) + s.vars[memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, dst, val, s.mem()) } func (s *state) zero(t *types.Type, dst *ssa.Value) { s.instrument(t, dst, true) store := s.newValue2I(ssa.OpZero, types.TypeMem, t.Size(), dst, s.mem()) store.Aux = t - s.vars[&memVar] = store + s.vars[memVar] = store } func (s *state) move(t *types.Type, dst, src *ssa.Value) { @@ -1042,7 +1046,7 @@ func (s *state) move(t *types.Type, dst, src *ssa.Value) { s.instrument(t, dst, true) store := s.newValue3I(ssa.OpMove, types.TypeMem, t.Size(), dst, src, s.mem()) store.Aux = t - s.vars[&memVar] = store + s.vars[memVar] = store } // stmtList converts the statement list n to SSA and adds it to s. @@ -1509,7 +1513,7 @@ func (s *state) stmt(n *Node) { case OVARDEF: if !s.canSSA(n.Left) { - s.vars[&memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.Left, s.mem(), false) + s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.Left, s.mem(), false) } case OVARKILL: // Insert a varkill op to record that a variable is no longer live. @@ -1517,7 +1521,7 @@ func (s *state) stmt(n *Node) { // varkill in the store chain is enough to keep it correctly ordered // with respect to call ops. if !s.canSSA(n.Left) { - s.vars[&memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.Left, s.mem(), false) + s.vars[memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.Left, s.mem(), false) } case OVARLIVE: @@ -1530,7 +1534,7 @@ func (s *state) stmt(n *Node) { default: s.Fatalf("VARLIVE variable %v must be Auto or Arg", n.Left) } - s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, n.Left, s.mem()) + s.vars[memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, n.Left, s.mem()) case OCHECKNIL: p := s.expr(n.Left) @@ -1576,7 +1580,7 @@ func (s *state) exit() *ssa.Block { for _, n := range s.returns { addr := s.decladdrs[n] val := s.variable(n, n.Type) - s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem()) + s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem()) s.store(n.Type, addr, val) // TODO: if val is ever spilled, we'd like to use the // PPARAMOUT slot for spilling it. That won't happen @@ -2843,14 +2847,14 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value { c := s.newValue1(ssa.OpSliceCap, types.Types[TINT], slice) nl := s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs)) - cmp := s.newValue2(s.ssaOp(OLT, types.Types[TUINT]), types.Types[TBOOL], c, nl) - s.vars[&ptrVar] = p + cmp := s.newValue2(s.ssaOp(OLT, types.Types[TUINT]), types.Types[types.TBOOL], c, nl) + s.vars[ptrVar] = p if !inplace { - s.vars[&newlenVar] = nl - s.vars[&capVar] = c + s.vars[newlenVar] = nl + s.vars[capVar] = c } else { - s.vars[&lenVar] = l + s.vars[lenVar] = l } b := s.endBlock() @@ -2868,18 +2872,18 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value { if inplace { if sn.Op == ONAME && sn.Class() != PEXTERN { // Tell liveness we're about to build a new slice - s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem()) + s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem()) } capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, sliceCapOffset, addr) s.store(types.Types[TINT], capaddr, r[2]) s.store(pt, addr, r[0]) // load the value we just stored to avoid having to spill it - s.vars[&ptrVar] = s.load(pt, addr) - s.vars[&lenVar] = r[1] // avoid a spill in the fast path + s.vars[ptrVar] = s.load(pt, addr) + s.vars[lenVar] = r[1] // avoid a spill in the fast path } else { - s.vars[&ptrVar] = r[0] - s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], r[1], s.constInt(types.Types[TINT], nargs)) - s.vars[&capVar] = r[2] + s.vars[ptrVar] = r[0] + s.vars[newlenVar] = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], r[1], s.constInt(types.Types[TINT], nargs)) + s.vars[capVar] = r[2] } b = s.endBlock() @@ -2889,7 +2893,7 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value { s.startBlock(assign) if inplace { - l = s.variable(&lenVar, types.Types[TINT]) // generates phi for len + l = s.variable(lenVar, types.Types[TINT]) // generates phi for len nl = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs)) lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, sliceLenOffset, addr) s.store(types.Types[TINT], lenaddr, nl) @@ -2912,10 +2916,10 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value { } } - p = s.variable(&ptrVar, pt) // generates phi for ptr + p = s.variable(ptrVar, pt) // generates phi for ptr if !inplace { - nl = s.variable(&newlenVar, types.Types[TINT]) // generates phi for nl - c = s.variable(&capVar, types.Types[TINT]) // generates phi for cap + nl = s.variable(newlenVar, types.Types[TINT]) // generates phi for nl + c = s.variable(capVar, types.Types[TINT]) // generates phi for cap } p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l) for i, arg := range args { @@ -2927,13 +2931,13 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value { } } - delete(s.vars, &ptrVar) + delete(s.vars, ptrVar) if inplace { - delete(s.vars, &lenVar) + delete(s.vars, lenVar) return nil } - delete(s.vars, &newlenVar) - delete(s.vars, &capVar) + delete(s.vars, newlenVar) + delete(s.vars, capVar) // make result return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c) } @@ -3074,7 +3078,7 @@ func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask) // If this assignment clobbers an entire local variable, then emit // OpVarDef so liveness analysis knows the variable is redefined. if base := clobberBase(left); base.Op == ONAME && base.Class() != PEXTERN && skip == 0 { - s.vars[&memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !base.IsAutoTmp()) + s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !base.IsAutoTmp()) } // Left is not ssa-able. Compute its address. @@ -3332,7 +3336,7 @@ func init() { add("runtime", "KeepAlive", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0]) - s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem()) + s.vars[memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem()) return nil }, all...) @@ -3380,79 +3384,79 @@ func init() { addF("runtime/internal/atomic", "Load", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem()) - s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) }, sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Load8", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[TUINT8], types.TypeMem), args[0], s.mem()) - s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[TUINT8], v) }, sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Load64", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem()) - s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) }, sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "LoadAcq", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem()) - s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) }, sys.PPC64, sys.S390X) addF("runtime/internal/atomic", "LoadAcq64", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem()) - s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) }, sys.PPC64) addF("runtime/internal/atomic", "Loadp", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem()) - s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v) }, sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Store", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem()) + s.vars[memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Store8", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem()) + s.vars[memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Store64", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem()) + s.vars[memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "StorepNoWB", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem()) + s.vars[memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "StoreRel", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - s.vars[&memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem()) + s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.PPC64, sys.S390X) addF("runtime/internal/atomic", "StoreRel64", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - s.vars[&memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem()) + s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.PPC64) @@ -3460,14 +3464,14 @@ func init() { addF("runtime/internal/atomic", "Xchg", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem()) - s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) }, sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Xchg64", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem()) - s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) }, sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) @@ -3512,7 +3516,7 @@ func init() { atomicXchgXaddEmitterARM64 := func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType) { v := s.newValue3(op, types.NewTuple(types.Types[typ], types.TypeMem), args[0], args[1], s.mem()) - s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v) } addF("runtime/internal/atomic", "Xchg", @@ -3525,14 +3529,14 @@ func init() { addF("runtime/internal/atomic", "Xadd", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem()) - s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) }, sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Xadd64", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem()) - s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) }, sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) @@ -3546,29 +3550,29 @@ func init() { addF("runtime/internal/atomic", "Cas", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) - s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) - return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v) + v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v) }, sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Cas64", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) - s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) - return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v) + v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v) }, sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "CasRel", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) - s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) - return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v) + v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v) }, sys.PPC64) atomicCasEmitterARM64 := func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType) { - v := s.newValue4(op, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) - s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + v := s.newValue4(op, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v) } @@ -3581,31 +3585,31 @@ func init() { addF("runtime/internal/atomic", "And8", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem()) + s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X) addF("runtime/internal/atomic", "And", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem()) + s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X) addF("runtime/internal/atomic", "Or8", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem()) + s.vars[memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X) addF("runtime/internal/atomic", "Or", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem()) + s.vars[memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X) atomicAndOrEmitterARM64 := func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType) { - s.vars[&memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem()) + s.vars[memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem()) } addF("runtime/internal/atomic", "And8", @@ -4274,8 +4278,8 @@ func (s *state) openDeferRecord(n *Node) { // Update deferBits only after evaluation and storage to stack of // args/receiver/interface is successful. bitvalue := s.constInt8(types.Types[TUINT8], 1< empty // Need to load type from itab off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab) - s.vars[&typVar] = s.load(byteptr, off) + s.vars[typVar] = s.load(byteptr, off) s.endBlock() // itab is nil, might as well use that as the nil result. s.startBlock(bFail) - s.vars[&typVar] = itab + s.vars[typVar] = itab s.endBlock() // Merge point. @@ -5894,9 +5898,9 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) { bFail.AddEdgeTo(bEnd) s.startBlock(bEnd) idata := s.newValue1(ssa.OpIData, n.Type, iface) - res = s.newValue2(ssa.OpIMake, n.Type, s.variable(&typVar, byteptr), idata) + res = s.newValue2(ssa.OpIMake, n.Type, s.variable(typVar, byteptr), idata) resok = cond - delete(s.vars, &typVar) + delete(s.vars, typVar) return } // converting to a nonempty interface needs a runtime call. @@ -5942,7 +5946,7 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) { // unSSAable type, use temporary. // TODO: get rid of some of these temporaries. tmp = tempAt(n.Pos, s.curfn, n.Type) - s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem()) + s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem()) addr = s.addr(tmp) } @@ -5981,7 +5985,7 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) { bEnd := s.f.NewBlock(ssa.BlockPlain) // Note that we need a new valVar each time (unlike okVar where we can // reuse the variable) because it might have a different type every time. - valVar := &Node{Op: ONAME, Sym: &types.Sym{Name: "val"}} + valVar := ssaMarker("val") // type assertion succeeded s.startBlock(bOk) @@ -5996,7 +6000,7 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) { p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface) s.move(n.Type, addr, p) } - s.vars[&okVar] = s.constBool(true) + s.vars[okVar] = s.constBool(true) s.endBlock() bOk.AddEdgeTo(bEnd) @@ -6007,7 +6011,7 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) { } else { s.zero(n.Type, addr) } - s.vars[&okVar] = s.constBool(false) + s.vars[okVar] = s.constBool(false) s.endBlock() bFail.AddEdgeTo(bEnd) @@ -6018,10 +6022,10 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) { delete(s.vars, valVar) } else { res = s.load(n.Type, addr) - s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp, s.mem()) + s.vars[memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp, s.mem()) } - resok = s.variable(&okVar, types.Types[TBOOL]) - delete(s.vars, &okVar) + resok = s.variable(okVar, types.Types[types.TBOOL]) + delete(s.vars, okVar) return res, resok } @@ -6049,12 +6053,12 @@ func (s *state) variable(name *Node, t *types.Type) *ssa.Value { } func (s *state) mem() *ssa.Value { - return s.variable(&memVar, types.TypeMem) + return s.variable(memVar, types.TypeMem) } func (s *state) addNamedValue(n *Node, v *ssa.Value) { if n.Class() == Pxxx { - // Don't track our marker nodes (&memVar etc.). + // Don't track our marker nodes (memVar etc.). return } if n.IsAutoTmp() { @@ -7064,17 +7068,9 @@ func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t } s := &types.Sym{Name: node.Sym.Name + suffix, Pkg: localpkg} - - n := &Node{ - Name: new(Name), - Op: ONAME, - Pos: parent.N.(*Node).Pos, - } - n.Orig = n - + n := newnamel(parent.N.(*Node).Pos, s) s.Def = asTypesNode(n) asNode(s.Def).Name.SetUsed(true) - n.Sym = s n.Type = t n.SetClass(PAUTO) n.Esc = EscNever diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 9cc1dee773ad9..a4acdfaed3dd6 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -1692,8 +1692,8 @@ func typecheck1(n *Node, top int) (res *Node) { n.Type = nil return n } - var why string - n.Op, why = convertop(n.Left.Op == OLITERAL, t, n.Type) + op, why := convertop(n.Left.Op == OLITERAL, t, n.Type) + n.Op = op if n.Op == OXXX { if !n.Diag() && !n.Type.Broke() && !n.Left.Diag() { yyerror("cannot convert %L to type %v%s", n.Left, n.Type, why) @@ -3021,7 +3021,8 @@ func typecheckarraylit(elemType *types.Type, bound int64, elts []*Node, ctx stri var key, length int64 for i, elt := range elts { setlineno(elt) - vp := &elts[i] + r := elts[i] + var kv *Node if elt.Op == OKEY { elt.Left = typecheck(elt.Left, ctxExpr) key = indexconst(elt.Left) @@ -3036,13 +3037,18 @@ func typecheckarraylit(elemType *types.Type, bound int64, elts []*Node, ctx stri } key = -(1 << 30) // stay negative for a while } - vp = &elt.Right + kv = elt + r = elt.Right } - r := *vp r = pushtype(r, elemType) r = typecheck(r, ctxExpr) - *vp = assignconv(r, elemType, ctx) + r = assignconv(r, elemType, ctx) + if kv != nil { + kv.Right = r + } else { + elts[i] = r + } if key >= 0 { if indices != nil { From d166ef6876850571d08288c63315db2b47c851f5 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Tue, 17 Nov 2020 11:18:45 -0500 Subject: [PATCH 031/474] [dev.regabi] cmd/compile: add Node field getters and setters The goal is to move Node to being an interface and then break up the one big struct into many implementations. Step 1 is to convert all current uses of Node to only use methods, so that the existing algorithms keep working even as the underlying implementations are adjusted. Step 0 - this CL - is to add the getters and setters for Step 1. Change-Id: I0570d8727c3ccb64113627bb9bebcb0dc39da07a Reviewed-on: https://go-review.googlesource.com/c/go/+/273007 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/syntax.go | 35 +++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/src/cmd/compile/internal/gc/syntax.go b/src/cmd/compile/internal/gc/syntax.go index 3b585ea341794..65ae7f23d8f01 100644 --- a/src/cmd/compile/internal/gc/syntax.go +++ b/src/cmd/compile/internal/gc/syntax.go @@ -63,6 +63,41 @@ type Node struct { aux uint8 } +func (n *Node) GetLeft() *Node { return n.Left } +func (n *Node) SetLeft(x *Node) { n.Left = x } +func (n *Node) GetRight() *Node { return n.Right } +func (n *Node) SetRight(x *Node) { n.Right = x } +func (n *Node) GetOrig() *Node { return n.Orig } +func (n *Node) SetOrig(x *Node) { n.Orig = x } +func (n *Node) GetType() *types.Type { return n.Type } +func (n *Node) SetType(x *types.Type) { n.Type = x } +func (n *Node) GetFunc() *Func { return n.Func } +func (n *Node) SetFunc(x *Func) { n.Func = x } +func (n *Node) GetName() *Name { return n.Name } +func (n *Node) SetName(x *Name) { n.Name = x } +func (n *Node) GetSym() *types.Sym { return n.Sym } +func (n *Node) SetSym(x *types.Sym) { n.Sym = x } +func (n *Node) GetPos() src.XPos { return n.Pos } +func (n *Node) SetPos(x src.XPos) { n.Pos = x } +func (n *Node) GetXoffset() int64 { return n.Xoffset } +func (n *Node) SetXoffset(x int64) { n.Xoffset = x } +func (n *Node) GetEsc() uint16 { return n.Esc } +func (n *Node) SetEsc(x uint16) { n.Esc = x } +func (n *Node) GetOp() Op { return n.Op } +func (n *Node) SetOp(x Op) { n.Op = x } +func (n *Node) GetNinit() Nodes { return n.Ninit } +func (n *Node) SetNinit(x Nodes) { n.Ninit = x } +func (n *Node) PtrNinit() *Nodes { return &n.Ninit } +func (n *Node) GetNbody() Nodes { return n.Nbody } +func (n *Node) SetNbody(x Nodes) { n.Nbody = x } +func (n *Node) PtrNbody() *Nodes { return &n.Nbody } +func (n *Node) GetList() Nodes { return n.List } +func (n *Node) SetList(x Nodes) { n.List = x } +func (n *Node) PtrList() *Nodes { return &n.List } +func (n *Node) GetRlist() Nodes { return n.Rlist } +func (n *Node) SetRlist(x Nodes) { n.Rlist = x } +func (n *Node) PtrRlist() *Nodes { return &n.Rlist } + func (n *Node) ResetAux() { n.aux = 0 } From 6e583d65abd2b044997430984c43b80cad398cc1 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Tue, 24 Nov 2020 23:58:36 -0500 Subject: [PATCH 032/474] [dev.regabi] cmd/compile: simplify fmt handling of Nodes The existing code introduces many types in what appears to be an attempt to avoid allocation when converting formatting argument lists. Simplify by accepting that allocation is going to happen, especially when Node itself turns into an interface. Change-Id: I3c0d45ca01eace4924deb43c0ea7dc6d65943d08 Reviewed-on: https://go-review.googlesource.com/c/go/+/272929 Trust: Russ Cox Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/fmt.go | 187 ++++++++--------------------- 1 file changed, 51 insertions(+), 136 deletions(-) diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go index f9888aec41b4e..f995d2e2ec408 100644 --- a/src/cmd/compile/internal/gc/fmt.go +++ b/src/cmd/compile/internal/gc/fmt.go @@ -238,72 +238,49 @@ func (o Op) oconv(s fmt.State, flag FmtFlag, mode fmtMode) { fmt.Fprint(s, o.String()) } -type ( - fmtMode int - - fmtNodeErr Node - fmtNodeDbg Node - fmtNodeTypeId Node - fmtNodeTypeIdName Node - - fmtOpErr Op - fmtOpDbg Op - fmtOpTypeId Op - fmtOpTypeIdName Op - - fmtTypeErr types.Type - fmtTypeDbg types.Type - fmtTypeTypeId types.Type - fmtTypeTypeIdName types.Type - - fmtSymErr types.Sym - fmtSymDbg types.Sym - fmtSymTypeId types.Sym - fmtSymTypeIdName types.Sym - - fmtNodesErr Nodes - fmtNodesDbg Nodes - fmtNodesTypeId Nodes - fmtNodesTypeIdName Nodes -) +type fmtMode int -func (n *fmtNodeErr) Format(s fmt.State, verb rune) { (*Node)(n).format(s, verb, FErr) } -func (n *fmtNodeDbg) Format(s fmt.State, verb rune) { (*Node)(n).format(s, verb, FDbg) } -func (n *fmtNodeTypeId) Format(s fmt.State, verb rune) { (*Node)(n).format(s, verb, FTypeId) } -func (n *fmtNodeTypeIdName) Format(s fmt.State, verb rune) { (*Node)(n).format(s, verb, FTypeIdName) } -func (n *Node) Format(s fmt.State, verb rune) { n.format(s, verb, FErr) } - -func (o fmtOpErr) Format(s fmt.State, verb rune) { Op(o).format(s, verb, FErr) } -func (o fmtOpDbg) Format(s fmt.State, verb rune) { Op(o).format(s, verb, FDbg) } -func (o fmtOpTypeId) Format(s fmt.State, verb rune) { Op(o).format(s, verb, FTypeId) } -func (o fmtOpTypeIdName) Format(s fmt.State, verb rune) { Op(o).format(s, verb, FTypeIdName) } -func (o Op) Format(s fmt.State, verb rune) { o.format(s, verb, FErr) } - -func (t *fmtTypeErr) Format(s fmt.State, verb rune) { typeFormat((*types.Type)(t), s, verb, FErr) } -func (t *fmtTypeDbg) Format(s fmt.State, verb rune) { typeFormat((*types.Type)(t), s, verb, FDbg) } -func (t *fmtTypeTypeId) Format(s fmt.State, verb rune) { - typeFormat((*types.Type)(t), s, verb, FTypeId) +type fmtNode struct { + x *Node + m fmtMode } -func (t *fmtTypeTypeIdName) Format(s fmt.State, verb rune) { - typeFormat((*types.Type)(t), s, verb, FTypeIdName) + +func (f *fmtNode) Format(s fmt.State, verb rune) { f.x.format(s, verb, f.m) } + +type fmtOp struct { + x Op + m fmtMode } -// func (t *types.Type) Format(s fmt.State, verb rune) // in package types +func (f *fmtOp) Format(s fmt.State, verb rune) { f.x.format(s, verb, f.m) } -func (y *fmtSymErr) Format(s fmt.State, verb rune) { symFormat((*types.Sym)(y), s, verb, FErr) } -func (y *fmtSymDbg) Format(s fmt.State, verb rune) { symFormat((*types.Sym)(y), s, verb, FDbg) } -func (y *fmtSymTypeId) Format(s fmt.State, verb rune) { symFormat((*types.Sym)(y), s, verb, FTypeId) } -func (y *fmtSymTypeIdName) Format(s fmt.State, verb rune) { - symFormat((*types.Sym)(y), s, verb, FTypeIdName) +type fmtType struct { + x *types.Type + m fmtMode } -// func (y *types.Sym) Format(s fmt.State, verb rune) // in package types { y.format(s, verb, FErr) } +func (f *fmtType) Format(s fmt.State, verb rune) { typeFormat(f.x, s, verb, f.m) } + +type fmtSym struct { + x *types.Sym + m fmtMode +} -func (n fmtNodesErr) Format(s fmt.State, verb rune) { (Nodes)(n).format(s, verb, FErr) } -func (n fmtNodesDbg) Format(s fmt.State, verb rune) { (Nodes)(n).format(s, verb, FDbg) } -func (n fmtNodesTypeId) Format(s fmt.State, verb rune) { (Nodes)(n).format(s, verb, FTypeId) } -func (n fmtNodesTypeIdName) Format(s fmt.State, verb rune) { (Nodes)(n).format(s, verb, FTypeIdName) } -func (n Nodes) Format(s fmt.State, verb rune) { n.format(s, verb, FErr) } +func (f *fmtSym) Format(s fmt.State, verb rune) { symFormat(f.x, s, verb, f.m) } + +type fmtNodes struct { + x Nodes + m fmtMode +} + +func (f *fmtNodes) Format(s fmt.State, verb rune) { f.x.format(s, verb, f.m) } + +func (n *Node) Format(s fmt.State, verb rune) { n.format(s, verb, FErr) } +func (o Op) Format(s fmt.State, verb rune) { o.format(s, verb, FErr) } + +// func (t *types.Type) Format(s fmt.State, verb rune) // in package types +// func (y *types.Sym) Format(s fmt.State, verb rune) // in package types { y.format(s, verb, FErr) } +func (n Nodes) Format(s fmt.State, verb rune) { n.format(s, verb, FErr) } func (m fmtMode) Fprintf(s fmt.State, format string, args ...interface{}) { m.prepareArgs(args) @@ -321,85 +298,23 @@ func (m fmtMode) Sprint(args ...interface{}) string { } func (m fmtMode) prepareArgs(args []interface{}) { - switch m { - case FErr: - for i, arg := range args { - switch arg := arg.(type) { - case Op: - args[i] = fmtOpErr(arg) - case *Node: - args[i] = (*fmtNodeErr)(arg) - case *types.Type: - args[i] = (*fmtTypeErr)(arg) - case *types.Sym: - args[i] = (*fmtSymErr)(arg) - case Nodes: - args[i] = fmtNodesErr(arg) - case int32, int64, string, types.EType, constant.Value: - // OK: printing these types doesn't depend on mode - default: - Fatalf("mode.prepareArgs type %T", arg) - } - } - case FDbg: - for i, arg := range args { - switch arg := arg.(type) { - case Op: - args[i] = fmtOpDbg(arg) - case *Node: - args[i] = (*fmtNodeDbg)(arg) - case *types.Type: - args[i] = (*fmtTypeDbg)(arg) - case *types.Sym: - args[i] = (*fmtSymDbg)(arg) - case Nodes: - args[i] = fmtNodesDbg(arg) - case int32, int64, string, types.EType, constant.Value: - // OK: printing these types doesn't depend on mode - default: - Fatalf("mode.prepareArgs type %T", arg) - } - } - case FTypeId: - for i, arg := range args { - switch arg := arg.(type) { - case Op: - args[i] = fmtOpTypeId(arg) - case *Node: - args[i] = (*fmtNodeTypeId)(arg) - case *types.Type: - args[i] = (*fmtTypeTypeId)(arg) - case *types.Sym: - args[i] = (*fmtSymTypeId)(arg) - case Nodes: - args[i] = fmtNodesTypeId(arg) - case int32, int64, string, types.EType, constant.Value: - // OK: printing these types doesn't depend on mode - default: - Fatalf("mode.prepareArgs type %T", arg) - } - } - case FTypeIdName: - for i, arg := range args { - switch arg := arg.(type) { - case Op: - args[i] = fmtOpTypeIdName(arg) - case *Node: - args[i] = (*fmtNodeTypeIdName)(arg) - case *types.Type: - args[i] = (*fmtTypeTypeIdName)(arg) - case *types.Sym: - args[i] = (*fmtSymTypeIdName)(arg) - case Nodes: - args[i] = fmtNodesTypeIdName(arg) - case int32, int64, string, types.EType, constant.Value: - // OK: printing these types doesn't depend on mode - default: - Fatalf("mode.prepareArgs type %T", arg) - } + for i, arg := range args { + switch arg := arg.(type) { + case Op: + args[i] = &fmtOp{arg, m} + case *Node: + args[i] = &fmtNode{arg, m} + case *types.Type: + args[i] = &fmtType{arg, m} + case *types.Sym: + args[i] = &fmtSym{arg, m} + case Nodes: + args[i] = &fmtNodes{arg, m} + case int32, int64, string, types.EType, constant.Value: + // OK: printing these types doesn't depend on mode + default: + Fatalf("mode.prepareArgs type %T", arg) } - default: - Fatalf("mode.prepareArgs mode %d", m) } } From 18573aea3cc5098c5c27e357e15c507a05de5599 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 16 Nov 2020 00:59:30 -0500 Subject: [PATCH 033/474] [dev.regabi] cmd/compile: clean up flag handling [generated] The flag values have grown fairly haphazard, with no organization or even common naming convention. This CL moves all flag values into the Flag struct (formerly misnamed Debug), except for a few that live in Ctxt fields instead. This CL is entirely automated changes. A followup CL will make a few manual cleanups, leaving this CL completely automated and easier to regenerate during merge conflicts. Cleaning up flags is necessary because the printing routines look at some of them, and the printing routines need to move out of package gc to a new package shared by gc and any other packages that split out of gc. [git-generate] cd src/cmd/compile/internal/gc rf ' mv Debug Flag mv DebugFlags Flags mv Flags.e Flags.LowerE mv Flags.h Flags.LowerH mv Flags.j Flags.LowerJ mv Flags.l Flags.LowerL mv Flags.m Flags.LowerM mv Flags.r Flags.LowerR mv Flags.w Flags.LowerW mv Flags.P Flags.Percent mv compiling_runtime Flag.CompilingRuntime mv compiling_std Flag.Std mv localimport Flag.D mv asmhdr Flag.AsmHdr mv buildid Flag.BuildID mv nBackendWorkers Flag.LowerC mv pure_go Flag.Complete mv debugstr Flag.LowerD mv flagDWARF Flag.Dwarf mv genDwarfInline Flag.GenDwarfInl mv flag_installsuffix Flag.InstallSuffix mv flag_lang Flag.Lang mv linkobj Flag.LinkObj mv debuglive Flag.Live mv flag_msan Flag.MSan mv nolocalimports Flag.NoLocalImports mv outfile Flag.LowerO mv myimportpath Ctxt.Pkgpath mv writearchive Flag.Pack mv flag_race Flag.Race mv spectre Flag.Spectre mv trace Flag.LowerT mv pathPrefix Flag.TrimPath mv Debug_vlog Ctxt.Debugvlog mv use_writebarrier Flag.WB mv Main.flag_shared Flag.Shared mv Main.flag_dynlink Flag.Dynlink mv Main.goversion Flag.GoVersion mv Main.symabisPath Flag.SymABIs mv cpuprofile Flag.CPUProfile mv memprofile Flag.MemProfile mv traceprofile Flag.TraceProfile mv blockprofile Flag.BlockProfile mv mutexprofile Flag.MutexProfile mv benchfile Flag.Bench mv Main.smallFrames Flag.SmallFrames mv Main.jsonLogOpt Flag.JSON add Flag:$ \ Cfg struct{} mv embedCfg Flag.Cfg.Embed mv idirs Flag.Cfg.ImportDirs mv importMap Flag.Cfg.ImportMap mv packageFile Flag.Cfg.PackageFile mv spectreIndex Flag.Cfg.SpectreIndex mv addidir addImportDir mv main.go:/Wasm/-0,/ssaDump/-3 ParseFlags mv usage Flag Flags ParseFlags \ concurrentFlagOk concurrentBackendAllowed \ addImportDir addImportMap \ readImportCfg readEmbedCfg \ flag.go # Remove //go:generate line copied from main.go # along with two self-assignments from the merge. rm flag.go:/go:generate/-+ \ flag.go:/Ctxt.Pkgpath = Ctxt.Pkgpath/-+ \ flag.go:/Ctxt.Debugvlog = Ctxt.Debugvlog/-+ ' Change-Id: I10431c15fe7d9f48024d53141d4224d957dbf334 Reviewed-on: https://go-review.googlesource.com/c/go/+/271667 Trust: Russ Cox Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/alg.go | 8 +- src/cmd/compile/internal/gc/closure.go | 4 +- src/cmd/compile/internal/gc/dcl.go | 2 +- src/cmd/compile/internal/gc/dwinl.go | 2 +- src/cmd/compile/internal/gc/embed.go | 39 +- src/cmd/compile/internal/gc/esc.go | 18 +- src/cmd/compile/internal/gc/escape.go | 32 +- src/cmd/compile/internal/gc/export.go | 16 +- src/cmd/compile/internal/gc/flag.go | 516 +++++++++++++++++++++++ src/cmd/compile/internal/gc/go.go | 53 +-- src/cmd/compile/internal/gc/gsubr.go | 8 +- src/cmd/compile/internal/gc/iimport.go | 4 +- src/cmd/compile/internal/gc/inl.go | 50 +-- src/cmd/compile/internal/gc/main.go | 512 ++-------------------- src/cmd/compile/internal/gc/noder.go | 18 +- src/cmd/compile/internal/gc/obj.go | 14 +- src/cmd/compile/internal/gc/order.go | 4 +- src/cmd/compile/internal/gc/pgen.go | 17 +- src/cmd/compile/internal/gc/plive.go | 8 +- src/cmd/compile/internal/gc/print.go | 18 +- src/cmd/compile/internal/gc/racewalk.go | 8 +- src/cmd/compile/internal/gc/range.go | 4 +- src/cmd/compile/internal/gc/reflect.go | 16 +- src/cmd/compile/internal/gc/select.go | 6 +- src/cmd/compile/internal/gc/sinit.go | 2 +- src/cmd/compile/internal/gc/ssa.go | 44 +- src/cmd/compile/internal/gc/subr.go | 8 +- src/cmd/compile/internal/gc/syntax.go | 6 +- src/cmd/compile/internal/gc/typecheck.go | 21 +- src/cmd/compile/internal/gc/util.go | 25 +- src/cmd/compile/internal/gc/walk.go | 26 +- 31 files changed, 752 insertions(+), 757 deletions(-) create mode 100644 src/cmd/compile/internal/gc/flag.go diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index 2f7fa27bb9100..c1d8de6bad426 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -282,7 +282,7 @@ func genhash(t *types.Type) *obj.LSym { } sym := typesymprefix(".hash", t) - if Debug.r != 0 { + if Flag.LowerR != 0 { fmt.Printf("genhash %v %v %v\n", closure, sym, t) } @@ -374,7 +374,7 @@ func genhash(t *types.Type) *obj.LSym { r.List.Append(nh) fn.Nbody.Append(r) - if Debug.r != 0 { + if Flag.LowerR != 0 { dumplist("genhash body", fn.Nbody) } @@ -509,7 +509,7 @@ func geneq(t *types.Type) *obj.LSym { return closure } sym := typesymprefix(".eq", t) - if Debug.r != 0 { + if Flag.LowerR != 0 { fmt.Printf("geneq %v\n", t) } @@ -753,7 +753,7 @@ func geneq(t *types.Type) *obj.LSym { // We should really do a generic CL that shares epilogues across // the board. See #24936. - if Debug.r != 0 { + if Flag.LowerR != 0 { dumplist("geneq body", fn.Nbody) } diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index 577d6565f59d9..f850cbe2801dc 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -203,7 +203,7 @@ func capturevars(dcl *Node) { outer = nod(OADDR, outer, nil) } - if Debug.m > 1 { + if Flag.LowerM > 1 { var name *types.Sym if v.Name.Curfn != nil && v.Name.Curfn.Func.Nname != nil { name = v.Name.Curfn.Func.Nname.Sym @@ -344,7 +344,7 @@ func closuredebugruntimecheck(clo *Node) { Warnl(clo.Pos, "stack closure, captured vars = %v", clo.Func.ClosureVars) } } - if compiling_runtime && clo.Esc == EscHeap { + if Flag.CompilingRuntime && clo.Esc == EscHeap { yyerrorl(clo.Pos, "heap-allocated closure, not allowed in runtime") } } diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 431142117428a..3f193e3a019b9 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -946,7 +946,7 @@ func makefuncsym(s *types.Sym) { if s.IsBlank() { return } - if compiling_runtime && (s.Name == "getg" || s.Name == "getclosureptr" || s.Name == "getcallerpc" || s.Name == "getcallersp") { + if Flag.CompilingRuntime && (s.Name == "getg" || s.Name == "getclosureptr" || s.Name == "getcallerpc" || s.Name == "getcallersp") { // runtime.getg(), getclosureptr(), getcallerpc(), and // getcallersp() are not real functions and so do not // get funcsyms. diff --git a/src/cmd/compile/internal/gc/dwinl.go b/src/cmd/compile/internal/gc/dwinl.go index bb5ae61cbb3b4..48d78f6cd7d2a 100644 --- a/src/cmd/compile/internal/gc/dwinl.go +++ b/src/cmd/compile/internal/gc/dwinl.go @@ -213,7 +213,7 @@ func genAbstractFunc(fn *obj.LSym) { if Debug_gendwarfinl != 0 { Ctxt.Logf("DwarfAbstractFunc(%v)\n", fn.Name) } - Ctxt.DwarfAbstractFunc(ifn, fn, myimportpath) + Ctxt.DwarfAbstractFunc(ifn, fn, Ctxt.Pkgpath) } // Undo any versioning performed when a name was written diff --git a/src/cmd/compile/internal/gc/embed.go b/src/cmd/compile/internal/gc/embed.go index 103949c1f9f08..5559d628131b4 100644 --- a/src/cmd/compile/internal/gc/embed.go +++ b/src/cmd/compile/internal/gc/embed.go @@ -8,9 +8,7 @@ import ( "cmd/compile/internal/syntax" "cmd/compile/internal/types" "cmd/internal/obj" - "encoding/json" - "io/ioutil" - "log" + "path" "sort" "strconv" @@ -19,27 +17,6 @@ import ( var embedlist []*Node -var embedCfg struct { - Patterns map[string][]string - Files map[string]string -} - -func readEmbedCfg(file string) { - data, err := ioutil.ReadFile(file) - if err != nil { - log.Fatalf("-embedcfg: %v", err) - } - if err := json.Unmarshal(data, &embedCfg); err != nil { - log.Fatalf("%s: %v", file, err) - } - if embedCfg.Patterns == nil { - log.Fatalf("%s: invalid embedcfg: missing Patterns", file) - } - if embedCfg.Files == nil { - log.Fatalf("%s: invalid embedcfg: missing Files", file) - } -} - const ( embedUnknown = iota embedBytes @@ -69,7 +46,7 @@ func varEmbed(p *noder, names []*Node, typ *Node, exprs []*Node, embeds []Pragma p.yyerrorpos(pos, "invalid go:embed: missing import \"embed\"") return exprs } - if embedCfg.Patterns == nil { + if Flag.Cfg.Embed.Patterns == nil { p.yyerrorpos(pos, "invalid go:embed: build system did not supply embed configuration") return exprs } @@ -98,12 +75,12 @@ func varEmbed(p *noder, names []*Node, typ *Node, exprs []*Node, embeds []Pragma var list []string for _, e := range embeds { for _, pattern := range e.Patterns { - files, ok := embedCfg.Patterns[pattern] + files, ok := Flag.Cfg.Embed.Patterns[pattern] if !ok { p.yyerrorpos(e.Pos, "invalid go:embed: build system did not map pattern: %s", pattern) } for _, file := range files { - if embedCfg.Files[file] == "" { + if Flag.Cfg.Embed.Files[file] == "" { p.yyerrorpos(e.Pos, "invalid go:embed: build system did not map file: %s", file) continue } @@ -152,7 +129,7 @@ func varEmbed(p *noder, names []*Node, typ *Node, exprs []*Node, embeds []Pragma // can't tell whether "string" and "byte" really mean "string" and "byte". // The result must be confirmed later, after type checking, using embedKind. func embedKindApprox(typ *Node) int { - if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == localpkg && myimportpath == "embed")) { + if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == localpkg && Ctxt.Pkgpath == "embed")) { return embedFiles } // These are not guaranteed to match only string and []byte - @@ -170,7 +147,7 @@ func embedKindApprox(typ *Node) int { // embedKind determines the kind of embedding variable. func embedKind(typ *types.Type) int { - if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == localpkg && myimportpath == "embed")) { + if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == localpkg && Ctxt.Pkgpath == "embed")) { return embedFiles } if typ == types.Types[TSTRING] { @@ -221,7 +198,7 @@ func initEmbed(v *Node) { case embedString, embedBytes: file := files[0] - fsym, size, err := fileStringSym(v.Pos, embedCfg.Files[file], kind == embedString, nil) + fsym, size, err := fileStringSym(v.Pos, Flag.Cfg.Embed.Files[file], kind == embedString, nil) if err != nil { yyerrorl(v.Pos, "embed %s: %v", file, err) } @@ -257,7 +234,7 @@ func initEmbed(v *Node) { off = duintptr(slicedata, off, 0) off += hashSize } else { - fsym, size, err := fileStringSym(v.Pos, embedCfg.Files[file], true, hash) + fsym, size, err := fileStringSym(v.Pos, Flag.Cfg.Embed.Files[file], true, hash) if err != nil { yyerrorl(v.Pos, "embed %s: %v", file, err) } diff --git a/src/cmd/compile/internal/gc/esc.go b/src/cmd/compile/internal/gc/esc.go index 6003f6608cc70..74b85e1ae8772 100644 --- a/src/cmd/compile/internal/gc/esc.go +++ b/src/cmd/compile/internal/gc/esc.go @@ -283,10 +283,10 @@ func addrescapes(n *Node) { // moveToHeap records the parameter or local variable n as moved to the heap. func moveToHeap(n *Node) { - if Debug.r != 0 { + if Flag.LowerR != 0 { Dump("MOVE", n) } - if compiling_runtime { + if Flag.CompilingRuntime { yyerror("%v escapes to heap, not allowed in runtime", n) } if n.Class() == PAUTOHEAP { @@ -360,7 +360,7 @@ func moveToHeap(n *Node) { n.Xoffset = 0 n.Name.Param.Heapaddr = heapaddr n.Esc = EscHeap - if Debug.m != 0 { + if Flag.LowerM != 0 { Warnl(n.Pos, "moved to heap: %v", n) } } @@ -390,7 +390,7 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string { // but we are reusing the ability to annotate an individual function // argument and pass those annotations along to importing code. if f.Type.IsUintptr() { - if Debug.m != 0 { + if Flag.LowerM != 0 { Warnl(f.Pos, "assuming %v is unsafe uintptr", name()) } return unsafeUintptrTag @@ -405,11 +405,11 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string { // External functions are assumed unsafe, unless // //go:noescape is given before the declaration. if fn.Func.Pragma&Noescape != 0 { - if Debug.m != 0 && f.Sym != nil { + if Flag.LowerM != 0 && f.Sym != nil { Warnl(f.Pos, "%v does not escape", name()) } } else { - if Debug.m != 0 && f.Sym != nil { + if Flag.LowerM != 0 && f.Sym != nil { Warnl(f.Pos, "leaking param: %v", name()) } esc.AddHeap(0) @@ -420,14 +420,14 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string { if fn.Func.Pragma&UintptrEscapes != 0 { if f.Type.IsUintptr() { - if Debug.m != 0 { + if Flag.LowerM != 0 { Warnl(f.Pos, "marking %v as escaping uintptr", name()) } return uintptrEscapesTag } if f.IsDDD() && f.Type.Elem().IsUintptr() { // final argument is ...uintptr. - if Debug.m != 0 { + if Flag.LowerM != 0 { Warnl(f.Pos, "marking %v as escaping ...uintptr", name()) } return uintptrEscapesTag @@ -449,7 +449,7 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string { esc := loc.paramEsc esc.Optimize() - if Debug.m != 0 && !loc.escapes { + if Flag.LowerM != 0 && !loc.escapes { if esc.Empty() { Warnl(f.Pos, "%v does not escape", name()) } diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index b6975c79a40d6..27645fb888720 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -205,7 +205,7 @@ func (e *Escape) initFunc(fn *Node) { Fatalf("unexpected node: %v", fn) } fn.Esc = EscFuncPlanned - if Debug.m > 3 { + if Flag.LowerM > 3 { Dump("escAnalyze", fn) } @@ -282,7 +282,7 @@ func (e *Escape) stmt(n *Node) { lineno = lno }() - if Debug.m > 2 { + if Flag.LowerM > 2 { fmt.Printf("%v:[%d] %v stmt: %v\n", linestr(lineno), e.loopDepth, funcSym(e.curfn), n) } @@ -310,11 +310,11 @@ func (e *Escape) stmt(n *Node) { case OLABEL: switch asNode(n.Sym.Label) { case nonlooping: - if Debug.m > 2 { + if Flag.LowerM > 2 { fmt.Printf("%v:%v non-looping label\n", linestr(lineno), n) } case looping: - if Debug.m > 2 { + if Flag.LowerM > 2 { fmt.Printf("%v: %v looping label\n", linestr(lineno), n) } e.loopDepth++ @@ -752,7 +752,7 @@ func (e *Escape) addrs(l Nodes) []EscHole { func (e *Escape) assign(dst, src *Node, why string, where *Node) { // Filter out some no-op assignments for escape analysis. ignore := dst != nil && src != nil && isSelfAssign(dst, src) - if ignore && Debug.m != 0 { + if ignore && Flag.LowerM != 0 { Warnl(where.Pos, "%v ignoring self-assignment in %S", funcSym(e.curfn), where) } @@ -966,7 +966,7 @@ func (k EscHole) note(where *Node, why string) EscHole { if where == nil || why == "" { Fatalf("note: missing where/why") } - if Debug.m >= 2 || logopt.Enabled() { + if Flag.LowerM >= 2 || logopt.Enabled() { k.notes = &EscNote{ next: k.notes, where: where, @@ -1112,9 +1112,9 @@ func (e *Escape) flow(k EscHole, src *EscLocation) { return } if dst.escapes && k.derefs < 0 { // dst = &src - if Debug.m >= 2 || logopt.Enabled() { + if Flag.LowerM >= 2 || logopt.Enabled() { pos := linestr(src.n.Pos) - if Debug.m >= 2 { + if Flag.LowerM >= 2 { fmt.Printf("%s: %v escapes to heap:\n", pos, src.n) } explanation := e.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{}) @@ -1214,8 +1214,8 @@ func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLoc // that value flow for tagging the function // later. if l.isName(PPARAM) { - if (logopt.Enabled() || Debug.m >= 2) && !l.escapes { - if Debug.m >= 2 { + if (logopt.Enabled() || Flag.LowerM >= 2) && !l.escapes { + if Flag.LowerM >= 2 { fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", linestr(l.n.Pos), l.n, e.explainLoc(root), derefs) } explanation := e.explainPath(root, l) @@ -1231,8 +1231,8 @@ func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLoc // outlives it, then l needs to be heap // allocated. if addressOf && !l.escapes { - if logopt.Enabled() || Debug.m >= 2 { - if Debug.m >= 2 { + if logopt.Enabled() || Flag.LowerM >= 2 { + if Flag.LowerM >= 2 { fmt.Printf("%s: %v escapes to heap:\n", linestr(l.n.Pos), l.n) } explanation := e.explainPath(root, l) @@ -1270,7 +1270,7 @@ func (e *Escape) explainPath(root, src *EscLocation) []*logopt.LoggedOpt { for { // Prevent infinite loop. if visited[src] { - if Debug.m >= 2 { + if Flag.LowerM >= 2 { fmt.Printf("%s: warning: truncated explanation due to assignment cycle; see golang.org/issue/35518\n", pos) } break @@ -1298,7 +1298,7 @@ func (e *Escape) explainFlow(pos string, dst, srcloc *EscLocation, derefs int, n if derefs >= 0 { ops = strings.Repeat("*", derefs) } - print := Debug.m >= 2 + print := Flag.LowerM >= 2 flow := fmt.Sprintf(" flow: %s = %s%v:", e.explainLoc(dst), ops, e.explainLoc(srcloc)) if print { @@ -1452,7 +1452,7 @@ func (e *Escape) finish(fns []*Node) { if loc.escapes { if n.Op != ONAME { - if Debug.m != 0 { + if Flag.LowerM != 0 { Warnl(n.Pos, "%S escapes to heap", n) } if logopt.Enabled() { @@ -1462,7 +1462,7 @@ func (e *Escape) finish(fns []*Node) { n.Esc = EscHeap addrescapes(n) } else { - if Debug.m != 0 && n.Op != ONAME { + if Flag.LowerM != 0 && n.Op != ONAME { Warnl(n.Pos, "%S does not escape", n) } n.Esc = EscNone diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index 9ee3b080b89e7..edd270323896c 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -32,7 +32,7 @@ func exportsym(n *Node) { } n.Sym.SetOnExportList(true) - if Debug.E != 0 { + if Flag.E != 0 { fmt.Printf("export symbol %v\n", n.Sym) } @@ -57,7 +57,7 @@ func autoexport(n *Node, ctxt Class) { if types.IsExported(n.Sym.Name) || initname(n.Sym.Name) { exportsym(n) } - if asmhdr != "" && !n.Sym.Asm() { + if Flag.AsmHdr != "" && !n.Sym.Asm() { n.Sym.SetAsm(true) asmlist = append(asmlist, n) } @@ -72,7 +72,7 @@ func dumpexport(bout *bio.Writer) { exportf(bout, "\n$$\n") if Debug_export != 0 { - fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", myimportpath, size) + fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", Ctxt.Pkgpath, size) } } @@ -151,7 +151,7 @@ func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val n.SetVal(val) - if Debug.E != 0 { + if Flag.E != 0 { fmt.Printf("import const %v %L = %v\n", s, t, val) } } @@ -166,7 +166,7 @@ func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) { n.Func = new(Func) - if Debug.E != 0 { + if Flag.E != 0 { fmt.Printf("import func %v%S\n", s, t) } } @@ -179,7 +179,7 @@ func importvar(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) { return } - if Debug.E != 0 { + if Flag.E != 0 { fmt.Printf("import var %v %L\n", s, t) } } @@ -192,13 +192,13 @@ func importalias(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) { return } - if Debug.E != 0 { + if Flag.E != 0 { fmt.Printf("import type %v = %L\n", s, t) } } func dumpasmhdr() { - b, err := bio.Create(asmhdr) + b, err := bio.Create(Flag.AsmHdr) if err != nil { Fatalf("%v", err) } diff --git a/src/cmd/compile/internal/gc/flag.go b/src/cmd/compile/internal/gc/flag.go new file mode 100644 index 0000000000000..3861c9a02850a --- /dev/null +++ b/src/cmd/compile/internal/gc/flag.go @@ -0,0 +1,516 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gc + +import ( + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "log" + "os" + "runtime" + "strconv" + "strings" + + "cmd/compile/internal/logopt" + "cmd/compile/internal/ssa" + "cmd/compile/internal/types" + "cmd/internal/dwarf" + "cmd/internal/obj" + "cmd/internal/objabi" + "cmd/internal/sys" +) + +func usage() { + fmt.Fprintf(os.Stderr, "usage: compile [options] file.go...\n") + objabi.Flagprint(os.Stderr) + Exit(2) +} + +var Flag Flags + +// gc debug flags +type Flags struct { + Percent, B, C, E, + K, L, N, S, + W, LowerE, LowerH, LowerJ, + LowerL, LowerM, LowerR, LowerW int + CompilingRuntime bool + Std bool + D string + AsmHdr string + BuildID string + LowerC int + Complete bool + LowerD string + Dwarf bool + GenDwarfInl int + InstallSuffix string + Lang string + LinkObj string + Live int + MSan bool + NoLocalImports bool + LowerO string + Pack bool + Race bool + Spectre string + LowerT bool + TrimPath string + WB bool + Shared bool + Dynlink bool + GoVersion string + SymABIs string + CPUProfile string + MemProfile string + TraceProfile string + BlockProfile string + MutexProfile string + Bench string + SmallFrames bool + JSON string + + Cfg struct { + Embed struct { + Patterns map[string][]string + Files map[string]string + } + ImportDirs []string + ImportMap map[string]string + PackageFile map[string]string + SpectreIndex bool + } +} + +func ParseFlags() { + Wasm := objabi.GOARCH == "wasm" + + // Whether the limit for stack-allocated objects is much smaller than normal. + // This can be helpful for diagnosing certain causes of GC latency. See #27732. + Flag.SmallFrames = false + Flag.JSON = "" + + flag.BoolVar(&Flag.CompilingRuntime, "+", false, "compiling runtime") + flag.BoolVar(&Flag.Std, "std", false, "compiling standard library") + flag.StringVar(&Flag.D, "D", "", "set relative `path` for local imports") + + objabi.Flagcount("%", "debug non-static initializers", &Flag.Percent) + objabi.Flagcount("B", "disable bounds checking", &Flag.B) + objabi.Flagcount("C", "disable printing of columns in error messages", &Flag.C) + objabi.Flagcount("E", "debug symbol export", &Flag.E) + objabi.Flagcount("K", "debug missing line numbers", &Flag.K) + objabi.Flagcount("L", "show full file names in error messages", &Flag.L) + objabi.Flagcount("N", "disable optimizations", &Flag.N) + objabi.Flagcount("S", "print assembly listing", &Flag.S) + objabi.Flagcount("W", "debug parse tree after type checking", &Flag.W) + objabi.Flagcount("e", "no limit on number of errors reported", &Flag.LowerE) + objabi.Flagcount("h", "halt on error", &Flag.LowerH) + objabi.Flagcount("j", "debug runtime-initialized variables", &Flag.LowerJ) + objabi.Flagcount("l", "disable inlining", &Flag.LowerL) + objabi.Flagcount("m", "print optimization decisions", &Flag.LowerM) + objabi.Flagcount("r", "debug generated wrappers", &Flag.LowerR) + objabi.Flagcount("w", "debug type checking", &Flag.LowerW) + + objabi.Flagfn1("I", "add `directory` to import search path", addImportDir) + objabi.AddVersionFlag() // -V + flag.StringVar(&Flag.AsmHdr, "asmhdr", "", "write assembly header to `file`") + flag.StringVar(&Flag.BuildID, "buildid", "", "record `id` as the build id in the export metadata") + flag.IntVar(&Flag.LowerC, "c", 1, "concurrency during compilation, 1 means no concurrency") + flag.BoolVar(&Flag.Complete, "complete", false, "compiling complete package (no C or assembly)") + flag.StringVar(&Flag.LowerD, "d", "", "print debug information about items in `list`; try -d help") + flag.BoolVar(&Flag.Dwarf, "dwarf", !Wasm, "generate DWARF symbols") + flag.BoolVar(&Ctxt.Flag_locationlists, "dwarflocationlists", true, "add location lists to DWARF in optimized mode") + flag.IntVar(&Flag.GenDwarfInl, "gendwarfinl", 2, "generate DWARF inline info records") + objabi.Flagfn1("embedcfg", "read go:embed configuration from `file`", readEmbedCfg) + objabi.Flagfn1("importmap", "add `definition` of the form source=actual to import map", addImportMap) + objabi.Flagfn1("importcfg", "read import configuration from `file`", readImportCfg) + flag.StringVar(&Flag.InstallSuffix, "installsuffix", "", "set pkg directory `suffix`") + flag.StringVar(&Flag.Lang, "lang", "", "release to compile for") + flag.StringVar(&Flag.LinkObj, "linkobj", "", "write linker-specific object to `file`") + objabi.Flagcount("live", "debug liveness analysis", &Flag.Live) + if sys.MSanSupported(objabi.GOOS, objabi.GOARCH) { + flag.BoolVar(&Flag.MSan, "msan", false, "build code compatible with C/C++ memory sanitizer") + } + flag.BoolVar(&Flag.NoLocalImports, "nolocalimports", false, "reject local (relative) imports") + flag.StringVar(&Flag.LowerO, "o", "", "write output to `file`") + flag.StringVar(&Ctxt.Pkgpath, "p", "", "set expected package import `path`") + flag.BoolVar(&Flag.Pack, "pack", false, "write to file.a instead of file.o") + if sys.RaceDetectorSupported(objabi.GOOS, objabi.GOARCH) { + flag.BoolVar(&Flag.Race, "race", false, "enable race detector") + } + flag.StringVar(&Flag.Spectre, "spectre", Flag.Spectre, "enable spectre mitigations in `list` (all, index, ret)") + if enableTrace { + flag.BoolVar(&Flag.LowerT, "t", false, "trace type-checking") + } + flag.StringVar(&Flag.TrimPath, "trimpath", "", "remove `prefix` from recorded source file paths") + flag.BoolVar(&Ctxt.Debugvlog, "v", false, "increase debug verbosity") + flag.BoolVar(&Flag.WB, "wb", true, "enable write barrier") + if supportsDynlink(thearch.LinkArch.Arch) { + flag.BoolVar(&Flag.Shared, "shared", false, "generate code that can be linked into a shared library") + flag.BoolVar(&Flag.Dynlink, "dynlink", false, "support references to Go symbols defined in other shared libraries") + flag.BoolVar(&Ctxt.Flag_linkshared, "linkshared", false, "generate code that will be linked against Go shared libraries") + } + flag.StringVar(&Flag.CPUProfile, "cpuprofile", "", "write cpu profile to `file`") + flag.StringVar(&Flag.MemProfile, "memprofile", "", "write memory profile to `file`") + flag.Int64Var(&memprofilerate, "memprofilerate", 0, "set runtime.MemProfileRate to `rate`") + flag.StringVar(&Flag.GoVersion, "goversion", "", "required version of the runtime") + flag.StringVar(&Flag.SymABIs, "symabis", "", "read symbol ABIs from `file`") + flag.StringVar(&Flag.TraceProfile, "traceprofile", "", "write an execution trace to `file`") + flag.StringVar(&Flag.BlockProfile, "blockprofile", "", "write block profile to `file`") + flag.StringVar(&Flag.MutexProfile, "mutexprofile", "", "write mutex profile to `file`") + flag.StringVar(&Flag.Bench, "bench", "", "append benchmark times to `file`") + flag.BoolVar(&Flag.SmallFrames, "smallframes", false, "reduce the size limit for stack allocated objects") + flag.BoolVar(&Ctxt.UseBASEntries, "dwarfbasentries", Ctxt.UseBASEntries, "use base address selection entries in DWARF") + flag.StringVar(&Flag.JSON, "json", "", "version,destination for JSON compiler/optimizer logging") + + objabi.Flagparse(usage) + + for _, f := range strings.Split(Flag.Spectre, ",") { + f = strings.TrimSpace(f) + switch f { + default: + log.Fatalf("unknown setting -spectre=%s", f) + case "": + // nothing + case "all": + Flag.Cfg.SpectreIndex = true + Ctxt.Retpoline = true + case "index": + Flag.Cfg.SpectreIndex = true + case "ret": + Ctxt.Retpoline = true + } + } + + if Flag.Cfg.SpectreIndex { + switch objabi.GOARCH { + case "amd64": + // ok + default: + log.Fatalf("GOARCH=%s does not support -spectre=index", objabi.GOARCH) + } + } + + // Record flags that affect the build result. (And don't + // record flags that don't, since that would cause spurious + // changes in the binary.) + recordFlags("B", "N", "l", "msan", "race", "shared", "dynlink", "dwarflocationlists", "dwarfbasentries", "smallframes", "spectre") + + if Flag.SmallFrames { + maxStackVarSize = 128 * 1024 + maxImplicitStackVarSize = 16 * 1024 + } + + Ctxt.Flag_shared = Flag.Dynlink || Flag.Shared + Ctxt.Flag_dynlink = Flag.Dynlink + Ctxt.Flag_optimize = Flag.N == 0 + + Ctxt.Debugasm = Flag.S + if Flag.Dwarf { + Ctxt.DebugInfo = debuginfo + Ctxt.GenAbstractFunc = genAbstractFunc + Ctxt.DwFixups = obj.NewDwarfFixupTable(Ctxt) + } else { + // turn off inline generation if no dwarf at all + Flag.GenDwarfInl = 0 + Ctxt.Flag_locationlists = false + } + + if flag.NArg() < 1 && Flag.LowerD != "help" && Flag.LowerD != "ssa/help" { + usage() + } + + if Flag.GoVersion != "" && Flag.GoVersion != runtime.Version() { + fmt.Printf("compile: version %q does not match go tool version %q\n", runtime.Version(), Flag.GoVersion) + Exit(2) + } + + checkLang() + + if Flag.SymABIs != "" { + readSymABIs(Flag.SymABIs, Ctxt.Pkgpath) + } + + thearch.LinkArch.Init(Ctxt) + + if Flag.LowerO == "" { + p := flag.Arg(0) + if i := strings.LastIndex(p, "/"); i >= 0 { + p = p[i+1:] + } + if runtime.GOOS == "windows" { + if i := strings.LastIndex(p, `\`); i >= 0 { + p = p[i+1:] + } + } + if i := strings.LastIndex(p, "."); i >= 0 { + p = p[:i] + } + suffix := ".o" + if Flag.Pack { + suffix = ".a" + } + Flag.LowerO = p + suffix + } + + startProfile() + + if Flag.Race && Flag.MSan { + log.Fatal("cannot use both -race and -msan") + } + if Flag.Race || Flag.MSan { + // -race and -msan imply -d=checkptr for now. + Debug_checkptr = 1 + } + if ispkgin(omit_pkgs) { + Flag.Race = false + Flag.MSan = false + } + if Flag.Race { + racepkg = types.NewPkg("runtime/race", "") + } + if Flag.MSan { + msanpkg = types.NewPkg("runtime/msan", "") + } + if Flag.Race || Flag.MSan { + instrumenting = true + } + + if Flag.CompilingRuntime && Flag.N != 0 { + log.Fatal("cannot disable optimizations while compiling runtime") + } + if Flag.LowerC < 1 { + log.Fatalf("-c must be at least 1, got %d", Flag.LowerC) + } + if Flag.LowerC > 1 && !concurrentBackendAllowed() { + log.Fatalf("cannot use concurrent backend compilation with provided flags; invoked as %v", os.Args) + } + if Ctxt.Flag_locationlists && len(Ctxt.Arch.DWARFRegisters) == 0 { + log.Fatalf("location lists requested but register mapping not available on %v", Ctxt.Arch.Name) + } + + // parse -d argument + if Flag.LowerD != "" { + Split: + for _, name := range strings.Split(Flag.LowerD, ",") { + if name == "" { + continue + } + // display help about the -d option itself and quit + if name == "help" { + fmt.Print(debugHelpHeader) + maxLen := len("ssa/help") + for _, t := range debugtab { + if len(t.name) > maxLen { + maxLen = len(t.name) + } + } + for _, t := range debugtab { + fmt.Printf("\t%-*s\t%s\n", maxLen, t.name, t.help) + } + // ssa options have their own help + fmt.Printf("\t%-*s\t%s\n", maxLen, "ssa/help", "print help about SSA debugging") + fmt.Print(debugHelpFooter) + os.Exit(0) + } + val, valstring, haveInt := 1, "", true + if i := strings.IndexAny(name, "=:"); i >= 0 { + var err error + name, valstring = name[:i], name[i+1:] + val, err = strconv.Atoi(valstring) + if err != nil { + val, haveInt = 1, false + } + } + for _, t := range debugtab { + if t.name != name { + continue + } + switch vp := t.val.(type) { + case nil: + // Ignore + case *string: + *vp = valstring + case *int: + if !haveInt { + log.Fatalf("invalid debug value %v", name) + } + *vp = val + default: + panic("bad debugtab type") + } + continue Split + } + // special case for ssa for now + if strings.HasPrefix(name, "ssa/") { + // expect form ssa/phase/flag + // e.g. -d=ssa/generic_cse/time + // _ in phase name also matches space + phase := name[4:] + flag := "debug" // default flag is debug + if i := strings.Index(phase, "/"); i >= 0 { + flag = phase[i+1:] + phase = phase[:i] + } + err := ssa.PhaseOption(phase, flag, val, valstring) + if err != "" { + log.Fatalf(err) + } + continue Split + } + log.Fatalf("unknown debug key -d %s\n", name) + } + } + + if Flag.CompilingRuntime { + // Runtime can't use -d=checkptr, at least not yet. + Debug_checkptr = 0 + + // Fuzzing the runtime isn't interesting either. + Debug_libfuzzer = 0 + } + + // set via a -d flag + Ctxt.Debugpcln = Debug_pctab + if Flag.Dwarf { + dwarf.EnableLogging(Debug_gendwarfinl != 0) + } + + if Debug_softfloat != 0 { + thearch.SoftFloat = true + } + + // enable inlining. for now: + // default: inlining on. (Debug.l == 1) + // -l: inlining off (Debug.l == 0) + // -l=2, -l=3: inlining on again, with extra debugging (Debug.l > 1) + if Flag.LowerL <= 1 { + Flag.LowerL = 1 - Flag.LowerL + } + + if Flag.JSON != "" { // parse version,destination from json logging optimization. + logopt.LogJsonOption(Flag.JSON) + } +} + +// concurrentFlagOk reports whether the current compiler flags +// are compatible with concurrent compilation. +func concurrentFlagOk() bool { + // TODO(rsc): Many of these are fine. Remove them. + return Flag.Percent == 0 && + Flag.E == 0 && + Flag.K == 0 && + Flag.L == 0 && + Flag.LowerH == 0 && + Flag.LowerJ == 0 && + Flag.LowerM == 0 && + Flag.LowerR == 0 +} + +func concurrentBackendAllowed() bool { + if !concurrentFlagOk() { + return false + } + + // Debug.S by itself is ok, because all printing occurs + // while writing the object file, and that is non-concurrent. + // Adding Debug_vlog, however, causes Debug.S to also print + // while flushing the plist, which happens concurrently. + if Ctxt.Debugvlog || Flag.LowerD != "" || Flag.Live > 0 { + return false + } + // TODO: Test and delete this condition. + if objabi.Fieldtrack_enabled != 0 { + return false + } + // TODO: fix races and enable the following flags + if Ctxt.Flag_shared || Ctxt.Flag_dynlink || Flag.Race { + return false + } + return true +} + +func addImportDir(dir string) { + if dir != "" { + Flag.Cfg.ImportDirs = append(Flag.Cfg.ImportDirs, dir) + } +} + +func addImportMap(s string) { + if Flag.Cfg.ImportMap == nil { + Flag.Cfg.ImportMap = make(map[string]string) + } + if strings.Count(s, "=") != 1 { + log.Fatal("-importmap argument must be of the form source=actual") + } + i := strings.Index(s, "=") + source, actual := s[:i], s[i+1:] + if source == "" || actual == "" { + log.Fatal("-importmap argument must be of the form source=actual; source and actual must be non-empty") + } + Flag.Cfg.ImportMap[source] = actual +} + +func readImportCfg(file string) { + if Flag.Cfg.ImportMap == nil { + Flag.Cfg.ImportMap = make(map[string]string) + } + Flag.Cfg.PackageFile = map[string]string{} + data, err := ioutil.ReadFile(file) + if err != nil { + log.Fatalf("-importcfg: %v", err) + } + + for lineNum, line := range strings.Split(string(data), "\n") { + lineNum++ // 1-based + line = strings.TrimSpace(line) + if line == "" || strings.HasPrefix(line, "#") { + continue + } + + var verb, args string + if i := strings.Index(line, " "); i < 0 { + verb = line + } else { + verb, args = line[:i], strings.TrimSpace(line[i+1:]) + } + var before, after string + if i := strings.Index(args, "="); i >= 0 { + before, after = args[:i], args[i+1:] + } + switch verb { + default: + log.Fatalf("%s:%d: unknown directive %q", file, lineNum, verb) + case "importmap": + if before == "" || after == "" { + log.Fatalf(`%s:%d: invalid importmap: syntax is "importmap old=new"`, file, lineNum) + } + Flag.Cfg.ImportMap[before] = after + case "packagefile": + if before == "" || after == "" { + log.Fatalf(`%s:%d: invalid packagefile: syntax is "packagefile path=filename"`, file, lineNum) + } + Flag.Cfg.PackageFile[before] = after + } + } +} + +func readEmbedCfg(file string) { + data, err := ioutil.ReadFile(file) + if err != nil { + log.Fatalf("-embedcfg: %v", err) + } + if err := json.Unmarshal(data, &Flag.Cfg.Embed); err != nil { + log.Fatalf("%s: %v", file, err) + } + if Flag.Cfg.Embed.Patterns == nil { + log.Fatalf("%s: invalid embedcfg: missing Patterns", file) + } + if Flag.Cfg.Embed.Files == nil { + log.Fatalf("%s: invalid embedcfg: missing Files", file) + } +} diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index 1242fc06cb3b6..6cab03d7265b1 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -39,7 +39,7 @@ var ( // isRuntimePkg reports whether p is package runtime. func isRuntimePkg(p *types.Pkg) bool { - if compiling_runtime && p == localpkg { + if Flag.CompilingRuntime && p == localpkg { return true } return p.Path == "runtime" @@ -48,7 +48,7 @@ func isRuntimePkg(p *types.Pkg) bool { // isReflectPkg reports whether p is package reflect. func isReflectPkg(p *types.Pkg) bool { if p == localpkg { - return myimportpath == "reflect" + return Ctxt.Pkgpath == "reflect" } return p.Path == "reflect" } @@ -99,25 +99,8 @@ var ( var pragcgobuf [][]string -var outfile string -var linkobj string - var decldepth int32 -var nolocalimports bool - -// gc debug flags -type DebugFlags struct { - P, B, C, E, - K, L, N, S, - W, e, h, j, - l, m, r, w int -} - -var Debug DebugFlags - -var debugstr string - var Debug_checknil int var Debug_typeassert int @@ -145,12 +128,6 @@ var gopkg *types.Pkg // pseudo-package for method symbols on anonymous receiver var zerosize int64 -var myimportpath string - -var localimport string - -var asmhdr string - var simtype [NTYPE]types.EType var ( @@ -201,23 +178,6 @@ var nblank *Node var typecheckok bool -var compiling_runtime bool - -// Compiling the standard library -var compiling_std bool - -var use_writebarrier bool - -var pure_go bool - -var flag_installsuffix string - -var flag_race bool - -var flag_msan bool - -var flagDWARF bool - // Whether we are adding any sort of code instrumentation, such as // when the race detector is enabled. var instrumenting bool @@ -225,17 +185,8 @@ var instrumenting bool // Whether we are tracking lexical scopes for DWARF. var trackScopes bool -// Controls generation of DWARF inlined instance records. Zero -// disables, 1 emits inlined routines but suppresses var info, -// and 2 emits inlined routines with tracking of formals/locals. -var genDwarfInline int - -var debuglive int - var Ctxt *obj.Link -var writearchive bool - var nodfp *Node var disable_checknil int diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go index d599a383e7b88..00d425a77cc86 100644 --- a/src/cmd/compile/internal/gc/gsubr.go +++ b/src/cmd/compile/internal/gc/gsubr.go @@ -58,7 +58,7 @@ type Progs struct { func newProgs(fn *Node, worker int) *Progs { pp := new(Progs) if Ctxt.CanReuseProgs() { - sz := len(sharedProgArray) / nBackendWorkers + sz := len(sharedProgArray) / Flag.LowerC pp.progcache = sharedProgArray[sz*worker : sz*(worker+1)] } pp.curfn = fn @@ -90,7 +90,7 @@ func (pp *Progs) NewProg() *obj.Prog { // Flush converts from pp to machine code. func (pp *Progs) Flush() { plist := &obj.Plist{Firstpc: pp.Text, Curfn: pp.curfn} - obj.Flushplist(Ctxt, plist, pp.NewProg, myimportpath) + obj.Flushplist(Ctxt, plist, pp.NewProg, Ctxt.Pkgpath) } // Free clears pp and any associated resources. @@ -133,7 +133,7 @@ func (pp *Progs) Prog(as obj.As) *obj.Prog { pp.clearp(pp.next) p.Link = pp.next - if !pp.pos.IsKnown() && Debug.K != 0 { + if !pp.pos.IsKnown() && Flag.K != 0 { Warn("prog: unknown position (line 0)") } @@ -278,7 +278,7 @@ func (f *Func) initLSym(hasBody bool) { // Clumsy but important. // See test/recover.go for test cases and src/reflect/value.go // for the actual functions being considered. - if myimportpath == "reflect" { + if Ctxt.Pkgpath == "reflect" { switch f.Nname.Sym.Name { case "callReflect", "callMethod": flag |= obj.WRAPPER diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 352335a993aef..a8a84b8cbc426 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -714,8 +714,8 @@ func (r *importReader) doInline(n *Node) { importlist = append(importlist, n) - if Debug.E > 0 && Debug.m > 2 { - if Debug.m > 3 { + if Flag.E > 0 && Flag.LowerM > 2 { + if Flag.LowerM > 3 { fmt.Printf("inl body for %v %#v: %+v\n", n, n.Type, asNodes(n.Func.Inl.Body)) } else { fmt.Printf("inl body for %v %#v: %v\n", n, n.Type, asNodes(n.Func.Inl.Body)) diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 0695b161f1249..50091e9c111f3 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -86,7 +86,7 @@ func typecheckinl(fn *Node) { return // typecheckinl on local function } - if Debug.m > 2 || Debug_export != 0 { + if Flag.LowerM > 2 || Debug_export != 0 { fmt.Printf("typecheck import [%v] %L { %#v }\n", fn.Sym, fn, asNodes(fn.Func.Inl.Body)) } @@ -118,10 +118,10 @@ func caninl(fn *Node) { } var reason string // reason, if any, that the function was not inlined - if Debug.m > 1 || logopt.Enabled() { + if Flag.LowerM > 1 || logopt.Enabled() { defer func() { if reason != "" { - if Debug.m > 1 { + if Flag.LowerM > 1 { fmt.Printf("%v: cannot inline %v: %s\n", fn.Line(), fn.Func.Nname, reason) } if logopt.Enabled() { @@ -138,7 +138,7 @@ func caninl(fn *Node) { } // If marked "go:norace" and -race compilation, don't inline. - if flag_race && fn.Func.Pragma&Norace != 0 { + if Flag.Race && fn.Func.Pragma&Norace != 0 { reason = "marked go:norace with -race compilation" return } @@ -189,7 +189,7 @@ func caninl(fn *Node) { defer n.Func.SetInlinabilityChecked(true) cc := int32(inlineExtraCallCost) - if Debug.l == 4 { + if Flag.LowerL == 4 { cc = 1 // this appears to yield better performance than 0. } @@ -222,9 +222,9 @@ func caninl(fn *Node) { Body: inlcopylist(fn.Nbody.Slice()), } - if Debug.m > 1 { + if Flag.LowerM > 1 { fmt.Printf("%v: can inline %#v with cost %d as: %#v { %#v }\n", fn.Line(), n, inlineMaxBudget-visitor.budget, fn.Type, asNodes(n.Func.Inl.Body)) - } else if Debug.m != 0 { + } else if Flag.LowerM != 0 { fmt.Printf("%v: can inline %v\n", fn.Line(), n) } if logopt.Enabled() { @@ -433,7 +433,7 @@ func (v *hairyVisitor) visit(n *Node) bool { v.budget-- // When debugging, don't stop early, to get full cost of inlining this function - if v.budget < 0 && Debug.m < 2 && !logopt.Enabled() { + if v.budget < 0 && Flag.LowerM < 2 && !logopt.Enabled() { return true } @@ -676,7 +676,7 @@ func inlnode(n *Node, maxCost int32, inlMap map[*Node]bool) *Node { switch n.Op { case OCALLFUNC: - if Debug.m > 3 { + if Flag.LowerM > 3 { fmt.Printf("%v:call to func %+v\n", n.Line(), n.Left) } if isIntrinsicCall(n) { @@ -687,7 +687,7 @@ func inlnode(n *Node, maxCost int32, inlMap map[*Node]bool) *Node { } case OCALLMETH: - if Debug.m > 3 { + if Flag.LowerM > 3 { fmt.Printf("%v:call to meth %L\n", n.Line(), n.Left.Right) } @@ -922,7 +922,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { } if inlMap[fn] { - if Debug.m > 1 { + if Flag.LowerM > 1 { fmt.Printf("%v: cannot inline %v into %v: repeated recursive cycle\n", n.Line(), fn, Curfn.funcname()) } return n @@ -936,12 +936,12 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { } // We have a function node, and it has an inlineable body. - if Debug.m > 1 { + if Flag.LowerM > 1 { fmt.Printf("%v: inlining call to %v %#v { %#v }\n", n.Line(), fn.Sym, fn.Type, asNodes(fn.Func.Inl.Body)) - } else if Debug.m != 0 { + } else if Flag.LowerM != 0 { fmt.Printf("%v: inlining call to %v\n", n.Line(), fn) } - if Debug.m > 2 { + if Flag.LowerM > 2 { fmt.Printf("%v: Before inlining: %+v\n", n.Line(), n) } @@ -1026,7 +1026,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { } inlf := typecheck(inlvar(ln), ctxExpr) inlvars[ln] = inlf - if genDwarfInline > 0 { + if Flag.GenDwarfInl > 0 { if ln.Class() == PPARAM { inlf.Name.SetInlFormal(true) } else { @@ -1064,7 +1064,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { m = retvar(t, i) } - if genDwarfInline > 0 { + if Flag.GenDwarfInl > 0 { // Don't update the src.Pos on a return variable if it // was manufactured by the inliner (e.g. "~R2"); such vars // were not part of the original callee. @@ -1165,7 +1165,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { inlMark.Xoffset = int64(newIndex) ninit.Append(inlMark) - if genDwarfInline > 0 { + if Flag.GenDwarfInl > 0 { if !fn.Sym.Linksym().WasInlined() { Ctxt.DwFixups.SetPrecursorFunc(fn.Sym.Linksym(), fn) fn.Sym.Linksym().Set(obj.AttrWasInlined, true) @@ -1188,7 +1188,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { typecheckslice(body, ctxStmt) - if genDwarfInline > 0 { + if Flag.GenDwarfInl > 0 { for _, v := range inlfvars { v.Pos = subst.updatedPos(v.Pos) } @@ -1216,7 +1216,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { } } - if Debug.m > 2 { + if Flag.LowerM > 2 { fmt.Printf("%v: After inlining %+v\n\n", call.Line(), call) } @@ -1227,7 +1227,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { // PAUTO's in the calling functions, and link them off of the // PPARAM's, PAUTOS and PPARAMOUTs of the called function. func inlvar(var_ *Node) *Node { - if Debug.m > 3 { + if Flag.LowerM > 3 { fmt.Printf("inlvar %+v\n", var_) } @@ -1310,13 +1310,13 @@ func (subst *inlsubst) node(n *Node) *Node { switch n.Op { case ONAME: if inlvar := subst.inlvars[n]; inlvar != nil { // These will be set during inlnode - if Debug.m > 2 { + if Flag.LowerM > 2 { fmt.Printf("substituting name %+v -> %+v\n", n, inlvar) } return inlvar } - if Debug.m > 2 { + if Flag.LowerM > 2 { fmt.Printf("not substituting name %+v\n", n) } return n @@ -1449,21 +1449,21 @@ func devirtualizeCall(call *Node) { x = typecheck(x, ctxExpr|ctxCallee) switch x.Op { case ODOTMETH: - if Debug.m != 0 { + if Flag.LowerM != 0 { Warnl(call.Pos, "devirtualizing %v to %v", call.Left, typ) } call.Op = OCALLMETH call.Left = x case ODOTINTER: // Promoted method from embedded interface-typed field (#42279). - if Debug.m != 0 { + if Flag.LowerM != 0 { Warnl(call.Pos, "partially devirtualizing %v to %v", call.Left, typ) } call.Op = OCALLINTER call.Left = x default: // TODO(mdempsky): Turn back into Fatalf after more testing. - if Debug.m != 0 { + if Flag.LowerM != 0 { Warnl(call.Pos, "failed to devirtualize %v (%v)", x, x.Op) } return diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 428bf31fa96c4..8edc0d4495f40 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -10,7 +10,7 @@ import ( "bufio" "bytes" "cmd/compile/internal/logopt" - "cmd/compile/internal/ssa" + "cmd/compile/internal/types" "cmd/internal/bio" "cmd/internal/dwarf" @@ -35,12 +35,6 @@ import ( "strings" ) -var ( - buildid string - spectre string - spectreIndex bool -) - var ( Debug_append int Debug_checkptr int @@ -51,7 +45,6 @@ var ( Debug_libfuzzer int Debug_panic int Debug_slice int - Debug_vlog bool Debug_wb int Debug_pctab string Debug_locationlist int @@ -113,12 +106,6 @@ Key "pctab" supports values: "pctospadj", "pctofile", "pctoline", "pctoinline", "pctopcdata" ` -func usage() { - fmt.Fprintf(os.Stderr, "usage: compile [options] file.go...\n") - objabi.Flagprint(os.Stderr) - Exit(2) -} - func hidePanic() { if Debug_panic == 0 && Errors() > 0 { // If we've already complained about things @@ -139,7 +126,6 @@ func supportsDynlink(arch *sys.Arch) bool { // timing data for compiler phases var timings Timings -var benchfile string var nowritebarrierrecCheck *nowritebarrierrecChecker @@ -204,321 +190,7 @@ func Main(archInit func(*Arch)) { // pseudo-package used for methods with anonymous receivers gopkg = types.NewPkg("go", "") - Wasm := objabi.GOARCH == "wasm" - - // Whether the limit for stack-allocated objects is much smaller than normal. - // This can be helpful for diagnosing certain causes of GC latency. See #27732. - smallFrames := false - jsonLogOpt := "" - - flag.BoolVar(&compiling_runtime, "+", false, "compiling runtime") - flag.BoolVar(&compiling_std, "std", false, "compiling standard library") - flag.StringVar(&localimport, "D", "", "set relative `path` for local imports") - - objabi.Flagcount("%", "debug non-static initializers", &Debug.P) - objabi.Flagcount("B", "disable bounds checking", &Debug.B) - objabi.Flagcount("C", "disable printing of columns in error messages", &Debug.C) - objabi.Flagcount("E", "debug symbol export", &Debug.E) - objabi.Flagcount("K", "debug missing line numbers", &Debug.K) - objabi.Flagcount("L", "show full file names in error messages", &Debug.L) - objabi.Flagcount("N", "disable optimizations", &Debug.N) - objabi.Flagcount("S", "print assembly listing", &Debug.S) - objabi.Flagcount("W", "debug parse tree after type checking", &Debug.W) - objabi.Flagcount("e", "no limit on number of errors reported", &Debug.e) - objabi.Flagcount("h", "halt on error", &Debug.h) - objabi.Flagcount("j", "debug runtime-initialized variables", &Debug.j) - objabi.Flagcount("l", "disable inlining", &Debug.l) - objabi.Flagcount("m", "print optimization decisions", &Debug.m) - objabi.Flagcount("r", "debug generated wrappers", &Debug.r) - objabi.Flagcount("w", "debug type checking", &Debug.w) - - objabi.Flagfn1("I", "add `directory` to import search path", addidir) - objabi.AddVersionFlag() // -V - flag.StringVar(&asmhdr, "asmhdr", "", "write assembly header to `file`") - flag.StringVar(&buildid, "buildid", "", "record `id` as the build id in the export metadata") - flag.IntVar(&nBackendWorkers, "c", 1, "concurrency during compilation, 1 means no concurrency") - flag.BoolVar(&pure_go, "complete", false, "compiling complete package (no C or assembly)") - flag.StringVar(&debugstr, "d", "", "print debug information about items in `list`; try -d help") - flag.BoolVar(&flagDWARF, "dwarf", !Wasm, "generate DWARF symbols") - flag.BoolVar(&Ctxt.Flag_locationlists, "dwarflocationlists", true, "add location lists to DWARF in optimized mode") - flag.IntVar(&genDwarfInline, "gendwarfinl", 2, "generate DWARF inline info records") - objabi.Flagfn1("embedcfg", "read go:embed configuration from `file`", readEmbedCfg) - objabi.Flagfn1("importmap", "add `definition` of the form source=actual to import map", addImportMap) - objabi.Flagfn1("importcfg", "read import configuration from `file`", readImportCfg) - flag.StringVar(&flag_installsuffix, "installsuffix", "", "set pkg directory `suffix`") - flag.StringVar(&flag_lang, "lang", "", "release to compile for") - flag.StringVar(&linkobj, "linkobj", "", "write linker-specific object to `file`") - objabi.Flagcount("live", "debug liveness analysis", &debuglive) - if sys.MSanSupported(objabi.GOOS, objabi.GOARCH) { - flag.BoolVar(&flag_msan, "msan", false, "build code compatible with C/C++ memory sanitizer") - } - flag.BoolVar(&nolocalimports, "nolocalimports", false, "reject local (relative) imports") - flag.StringVar(&outfile, "o", "", "write output to `file`") - flag.StringVar(&myimportpath, "p", "", "set expected package import `path`") - flag.BoolVar(&writearchive, "pack", false, "write to file.a instead of file.o") - if sys.RaceDetectorSupported(objabi.GOOS, objabi.GOARCH) { - flag.BoolVar(&flag_race, "race", false, "enable race detector") - } - flag.StringVar(&spectre, "spectre", spectre, "enable spectre mitigations in `list` (all, index, ret)") - if enableTrace { - flag.BoolVar(&trace, "t", false, "trace type-checking") - } - flag.StringVar(&pathPrefix, "trimpath", "", "remove `prefix` from recorded source file paths") - flag.BoolVar(&Debug_vlog, "v", false, "increase debug verbosity") - flag.BoolVar(&use_writebarrier, "wb", true, "enable write barrier") - var flag_shared bool - var flag_dynlink bool - if supportsDynlink(thearch.LinkArch.Arch) { - flag.BoolVar(&flag_shared, "shared", false, "generate code that can be linked into a shared library") - flag.BoolVar(&flag_dynlink, "dynlink", false, "support references to Go symbols defined in other shared libraries") - flag.BoolVar(&Ctxt.Flag_linkshared, "linkshared", false, "generate code that will be linked against Go shared libraries") - } - flag.StringVar(&cpuprofile, "cpuprofile", "", "write cpu profile to `file`") - flag.StringVar(&memprofile, "memprofile", "", "write memory profile to `file`") - flag.Int64Var(&memprofilerate, "memprofilerate", 0, "set runtime.MemProfileRate to `rate`") - var goversion string - flag.StringVar(&goversion, "goversion", "", "required version of the runtime") - var symabisPath string - flag.StringVar(&symabisPath, "symabis", "", "read symbol ABIs from `file`") - flag.StringVar(&traceprofile, "traceprofile", "", "write an execution trace to `file`") - flag.StringVar(&blockprofile, "blockprofile", "", "write block profile to `file`") - flag.StringVar(&mutexprofile, "mutexprofile", "", "write mutex profile to `file`") - flag.StringVar(&benchfile, "bench", "", "append benchmark times to `file`") - flag.BoolVar(&smallFrames, "smallframes", false, "reduce the size limit for stack allocated objects") - flag.BoolVar(&Ctxt.UseBASEntries, "dwarfbasentries", Ctxt.UseBASEntries, "use base address selection entries in DWARF") - flag.StringVar(&jsonLogOpt, "json", "", "version,destination for JSON compiler/optimizer logging") - - objabi.Flagparse(usage) - - Ctxt.Pkgpath = myimportpath - - for _, f := range strings.Split(spectre, ",") { - f = strings.TrimSpace(f) - switch f { - default: - log.Fatalf("unknown setting -spectre=%s", f) - case "": - // nothing - case "all": - spectreIndex = true - Ctxt.Retpoline = true - case "index": - spectreIndex = true - case "ret": - Ctxt.Retpoline = true - } - } - - if spectreIndex { - switch objabi.GOARCH { - case "amd64": - // ok - default: - log.Fatalf("GOARCH=%s does not support -spectre=index", objabi.GOARCH) - } - } - - // Record flags that affect the build result. (And don't - // record flags that don't, since that would cause spurious - // changes in the binary.) - recordFlags("B", "N", "l", "msan", "race", "shared", "dynlink", "dwarflocationlists", "dwarfbasentries", "smallframes", "spectre") - - if smallFrames { - maxStackVarSize = 128 * 1024 - maxImplicitStackVarSize = 16 * 1024 - } - - Ctxt.Flag_shared = flag_dynlink || flag_shared - Ctxt.Flag_dynlink = flag_dynlink - Ctxt.Flag_optimize = Debug.N == 0 - - Ctxt.Debugasm = Debug.S - Ctxt.Debugvlog = Debug_vlog - if flagDWARF { - Ctxt.DebugInfo = debuginfo - Ctxt.GenAbstractFunc = genAbstractFunc - Ctxt.DwFixups = obj.NewDwarfFixupTable(Ctxt) - } else { - // turn off inline generation if no dwarf at all - genDwarfInline = 0 - Ctxt.Flag_locationlists = false - } - - if flag.NArg() < 1 && debugstr != "help" && debugstr != "ssa/help" { - usage() - } - - if goversion != "" && goversion != runtime.Version() { - fmt.Printf("compile: version %q does not match go tool version %q\n", runtime.Version(), goversion) - Exit(2) - } - - checkLang() - - if symabisPath != "" { - readSymABIs(symabisPath, myimportpath) - } - - thearch.LinkArch.Init(Ctxt) - - if outfile == "" { - p := flag.Arg(0) - if i := strings.LastIndex(p, "/"); i >= 0 { - p = p[i+1:] - } - if runtime.GOOS == "windows" { - if i := strings.LastIndex(p, `\`); i >= 0 { - p = p[i+1:] - } - } - if i := strings.LastIndex(p, "."); i >= 0 { - p = p[:i] - } - suffix := ".o" - if writearchive { - suffix = ".a" - } - outfile = p + suffix - } - - startProfile() - - if flag_race && flag_msan { - log.Fatal("cannot use both -race and -msan") - } - if flag_race || flag_msan { - // -race and -msan imply -d=checkptr for now. - Debug_checkptr = 1 - } - if ispkgin(omit_pkgs) { - flag_race = false - flag_msan = false - } - if flag_race { - racepkg = types.NewPkg("runtime/race", "") - } - if flag_msan { - msanpkg = types.NewPkg("runtime/msan", "") - } - if flag_race || flag_msan { - instrumenting = true - } - - if compiling_runtime && Debug.N != 0 { - log.Fatal("cannot disable optimizations while compiling runtime") - } - if nBackendWorkers < 1 { - log.Fatalf("-c must be at least 1, got %d", nBackendWorkers) - } - if nBackendWorkers > 1 && !concurrentBackendAllowed() { - log.Fatalf("cannot use concurrent backend compilation with provided flags; invoked as %v", os.Args) - } - if Ctxt.Flag_locationlists && len(Ctxt.Arch.DWARFRegisters) == 0 { - log.Fatalf("location lists requested but register mapping not available on %v", Ctxt.Arch.Name) - } - - // parse -d argument - if debugstr != "" { - Split: - for _, name := range strings.Split(debugstr, ",") { - if name == "" { - continue - } - // display help about the -d option itself and quit - if name == "help" { - fmt.Print(debugHelpHeader) - maxLen := len("ssa/help") - for _, t := range debugtab { - if len(t.name) > maxLen { - maxLen = len(t.name) - } - } - for _, t := range debugtab { - fmt.Printf("\t%-*s\t%s\n", maxLen, t.name, t.help) - } - // ssa options have their own help - fmt.Printf("\t%-*s\t%s\n", maxLen, "ssa/help", "print help about SSA debugging") - fmt.Print(debugHelpFooter) - os.Exit(0) - } - val, valstring, haveInt := 1, "", true - if i := strings.IndexAny(name, "=:"); i >= 0 { - var err error - name, valstring = name[:i], name[i+1:] - val, err = strconv.Atoi(valstring) - if err != nil { - val, haveInt = 1, false - } - } - for _, t := range debugtab { - if t.name != name { - continue - } - switch vp := t.val.(type) { - case nil: - // Ignore - case *string: - *vp = valstring - case *int: - if !haveInt { - log.Fatalf("invalid debug value %v", name) - } - *vp = val - default: - panic("bad debugtab type") - } - continue Split - } - // special case for ssa for now - if strings.HasPrefix(name, "ssa/") { - // expect form ssa/phase/flag - // e.g. -d=ssa/generic_cse/time - // _ in phase name also matches space - phase := name[4:] - flag := "debug" // default flag is debug - if i := strings.Index(phase, "/"); i >= 0 { - flag = phase[i+1:] - phase = phase[:i] - } - err := ssa.PhaseOption(phase, flag, val, valstring) - if err != "" { - log.Fatalf(err) - } - continue Split - } - log.Fatalf("unknown debug key -d %s\n", name) - } - } - - if compiling_runtime { - // Runtime can't use -d=checkptr, at least not yet. - Debug_checkptr = 0 - - // Fuzzing the runtime isn't interesting either. - Debug_libfuzzer = 0 - } - - // set via a -d flag - Ctxt.Debugpcln = Debug_pctab - if flagDWARF { - dwarf.EnableLogging(Debug_gendwarfinl != 0) - } - - if Debug_softfloat != 0 { - thearch.SoftFloat = true - } - - // enable inlining. for now: - // default: inlining on. (Debug.l == 1) - // -l: inlining off (Debug.l == 0) - // -l=2, -l=3: inlining on again, with extra debugging (Debug.l > 1) - if Debug.l <= 1 { - Debug.l = 1 - Debug.l - } - - if jsonLogOpt != "" { // parse version,destination from json logging optimization. - logopt.LogJsonOption(jsonLogOpt) - } + ParseFlags() ssaDump = os.Getenv("GOSSAFUNC") ssaDir = os.Getenv("GOSSADIR") @@ -534,7 +206,7 @@ func Main(archInit func(*Arch)) { } } - trackScopes = flagDWARF + trackScopes = Flag.Dwarf Widthptr = thearch.LinkArch.PtrSize Widthreg = thearch.LinkArch.RegSize @@ -674,7 +346,7 @@ func Main(archInit func(*Arch)) { ExitIfErrors() } - if Debug.l != 0 { + if Flag.LowerL != 0 { // Find functions that can be inlined and clone them before walk expands them. visitBottomUp(xtop, func(list []*Node, recursive bool) { numfns := numNonClosures(list) @@ -685,7 +357,7 @@ func Main(archInit func(*Arch)) { // across more than one function. caninl(n) } else { - if Debug.m > 1 { + if Flag.LowerM > 1 { fmt.Printf("%v: cannot inline %v: recursive\n", n.Line(), n.Func.Nname) } } @@ -716,7 +388,7 @@ func Main(archInit func(*Arch)) { // checking. This must happen before transformclosure. // We'll do the final check after write barriers are // inserted. - if compiling_runtime { + if Flag.CompilingRuntime { nowritebarrierrecCheck = newNowritebarrierrecChecker() } @@ -768,9 +440,9 @@ func Main(archInit func(*Arch)) { // DWARF inlining gen so as to avoid problems with generated // method wrappers. if Ctxt.DwFixups != nil { - Ctxt.DwFixups.Finalize(myimportpath, Debug_gendwarfinl != 0) + Ctxt.DwFixups.Finalize(Ctxt.Pkgpath, Debug_gendwarfinl != 0) Ctxt.DwFixups = nil - genDwarfInline = 0 + Flag.GenDwarfInl = 0 } // Phase 9: Check external declarations. @@ -790,7 +462,7 @@ func Main(archInit func(*Arch)) { dumpdata() Ctxt.NumberSyms() dumpobj() - if asmhdr != "" { + if Flag.AsmHdr != "" { dumpasmhdr() } @@ -813,14 +485,14 @@ func Main(archInit func(*Arch)) { Fatalf("%d uncompiled functions", len(compilequeue)) } - logopt.FlushLoggedOpts(Ctxt, myimportpath) + logopt.FlushLoggedOpts(Ctxt, Ctxt.Pkgpath) ExitIfErrors() flusherrors() timings.Stop() - if benchfile != "" { - if err := writebench(benchfile); err != nil { + if Flag.Bench != "" { + if err := writebench(Flag.Bench); err != nil { log.Fatalf("cannot write benchmark data: %v", err) } } @@ -847,7 +519,7 @@ func writebench(filename string) error { fmt.Fprintln(&buf, "commit:", objabi.Version) fmt.Fprintln(&buf, "goos:", runtime.GOOS) fmt.Fprintln(&buf, "goarch:", runtime.GOARCH) - timings.Write(&buf, "BenchmarkCompile:"+myimportpath+":") + timings.Write(&buf, "BenchmarkCompile:"+Ctxt.Pkgpath+":") n, err := f.Write(buf.Bytes()) if err != nil { @@ -860,70 +532,6 @@ func writebench(filename string) error { return f.Close() } -var ( - importMap map[string]string - packageFile map[string]string // nil means not in use -) - -func addImportMap(s string) { - if importMap == nil { - importMap = make(map[string]string) - } - if strings.Count(s, "=") != 1 { - log.Fatal("-importmap argument must be of the form source=actual") - } - i := strings.Index(s, "=") - source, actual := s[:i], s[i+1:] - if source == "" || actual == "" { - log.Fatal("-importmap argument must be of the form source=actual; source and actual must be non-empty") - } - importMap[source] = actual -} - -func readImportCfg(file string) { - if importMap == nil { - importMap = make(map[string]string) - } - packageFile = map[string]string{} - data, err := ioutil.ReadFile(file) - if err != nil { - log.Fatalf("-importcfg: %v", err) - } - - for lineNum, line := range strings.Split(string(data), "\n") { - lineNum++ // 1-based - line = strings.TrimSpace(line) - if line == "" || strings.HasPrefix(line, "#") { - continue - } - - var verb, args string - if i := strings.Index(line, " "); i < 0 { - verb = line - } else { - verb, args = line[:i], strings.TrimSpace(line[i+1:]) - } - var before, after string - if i := strings.Index(args, "="); i >= 0 { - before, after = args[:i], args[i+1:] - } - switch verb { - default: - log.Fatalf("%s:%d: unknown directive %q", file, lineNum, verb) - case "importmap": - if before == "" || after == "" { - log.Fatalf(`%s:%d: invalid importmap: syntax is "importmap old=new"`, file, lineNum) - } - importMap[before] = after - case "packagefile": - if before == "" || after == "" { - log.Fatalf(`%s:%d: invalid packagefile: syntax is "packagefile path=filename"`, file, lineNum) - } - packageFile[before] = after - } - } -} - // symabiDefs and symabiRefs record the defined and referenced ABIs of // symbols required by non-Go code. These are keyed by link symbol // name, where the local package prefix is always `"".` @@ -1009,14 +617,6 @@ func arsize(b *bufio.Reader, name string) int { return i } -var idirs []string - -func addidir(dir string) { - if dir != "" { - idirs = append(idirs, dir) - } -} - func isDriveLetter(b byte) bool { return 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z' } @@ -1031,12 +631,12 @@ func islocalname(name string) bool { func findpkg(name string) (file string, ok bool) { if islocalname(name) { - if nolocalimports { + if Flag.NoLocalImports { return "", false } - if packageFile != nil { - file, ok = packageFile[name] + if Flag.Cfg.PackageFile != nil { + file, ok = Flag.Cfg.PackageFile[name] return file, ok } @@ -1062,12 +662,12 @@ func findpkg(name string) (file string, ok bool) { return "", false } - if packageFile != nil { - file, ok = packageFile[name] + if Flag.Cfg.PackageFile != nil { + file, ok = Flag.Cfg.PackageFile[name] return file, ok } - for _, dir := range idirs { + for _, dir := range Flag.Cfg.ImportDirs { file = fmt.Sprintf("%s/%s.a", dir, name) if _, err := os.Stat(file); err == nil { return file, true @@ -1081,13 +681,13 @@ func findpkg(name string) (file string, ok bool) { if objabi.GOROOT != "" { suffix := "" suffixsep := "" - if flag_installsuffix != "" { + if Flag.InstallSuffix != "" { suffixsep = "_" - suffix = flag_installsuffix - } else if flag_race { + suffix = Flag.InstallSuffix + } else if Flag.Race { suffixsep = "_" suffix = "race" - } else if flag_msan { + } else if Flag.MSan { suffixsep = "_" suffix = "msan" } @@ -1161,12 +761,12 @@ func importfile(f constant.Value) *types.Pkg { errorexit() } - if myimportpath != "" && path_ == myimportpath { + if Ctxt.Pkgpath != "" && path_ == Ctxt.Pkgpath { yyerror("import %q while compiling that package (import cycle)", path_) errorexit() } - if mapped, ok := importMap[path_]; ok { + if mapped, ok := Flag.Cfg.ImportMap[path_]; ok { path_ = mapped } @@ -1181,8 +781,8 @@ func importfile(f constant.Value) *types.Pkg { } prefix := Ctxt.Pathname - if localimport != "" { - prefix = localimport + if Flag.D != "" { + prefix = Flag.D } path_ = path.Join(prefix, path_) @@ -1308,7 +908,7 @@ func importfile(f constant.Value) *types.Pkg { } // assume files move (get installed) so don't record the full path - if packageFile != nil { + if Flag.Cfg.PackageFile != nil { // If using a packageFile map, assume path_ can be recorded directly. Ctxt.AddImport(path_, fingerprint) } else { @@ -1401,47 +1001,10 @@ func IsAlias(sym *types.Sym) bool { return sym.Def != nil && asNode(sym.Def).Sym != sym } -// concurrentFlagOk reports whether the current compiler flags -// are compatible with concurrent compilation. -func concurrentFlagOk() bool { - // TODO(rsc): Many of these are fine. Remove them. - return Debug.P == 0 && - Debug.E == 0 && - Debug.K == 0 && - Debug.L == 0 && - Debug.h == 0 && - Debug.j == 0 && - Debug.m == 0 && - Debug.r == 0 -} - -func concurrentBackendAllowed() bool { - if !concurrentFlagOk() { - return false - } - - // Debug.S by itself is ok, because all printing occurs - // while writing the object file, and that is non-concurrent. - // Adding Debug_vlog, however, causes Debug.S to also print - // while flushing the plist, which happens concurrently. - if Debug_vlog || debugstr != "" || debuglive > 0 { - return false - } - // TODO: Test and delete this condition. - if objabi.Fieldtrack_enabled != 0 { - return false - } - // TODO: fix races and enable the following flags - if Ctxt.Flag_shared || Ctxt.Flag_dynlink || flag_race { - return false - } - return true -} - // recordFlags records the specified command-line flags to be placed // in the DWARF info. func recordFlags(flags ...string) { - if myimportpath == "" { + if Ctxt.Pkgpath == "" { // We can't record the flags if we don't know what the // package name is. return @@ -1484,7 +1047,7 @@ func recordFlags(flags ...string) { if cmd.Len() == 0 { return } - s := Ctxt.Lookup(dwarf.CUInfoPrefix + "producer." + myimportpath) + s := Ctxt.Lookup(dwarf.CUInfoPrefix + "producer." + Ctxt.Pkgpath) s.Type = objabi.SDWARFCUINFO // Sometimes (for example when building tests) we can link // together two package main archives. So allow dups. @@ -1496,7 +1059,7 @@ func recordFlags(flags ...string) { // recordPackageName records the name of the package being // compiled, so that the linker can save it in the compile unit's DIE. func recordPackageName() { - s := Ctxt.Lookup(dwarf.CUInfoPrefix + "packagename." + myimportpath) + s := Ctxt.Lookup(dwarf.CUInfoPrefix + "packagename." + Ctxt.Pkgpath) s.Type = objabi.SDWARFCUINFO // Sometimes (for example when building tests) we can link // together two package main archives. So allow dups. @@ -1505,9 +1068,6 @@ func recordPackageName() { s.P = []byte(localpkg.Name) } -// flag_lang is the language version we are compiling for, set by the -lang flag. -var flag_lang string - // currentLang returns the current language version. func currentLang() string { return fmt.Sprintf("go1.%d", goversion.Version) @@ -1548,23 +1108,23 @@ func langSupported(major, minor int, pkg *types.Pkg) bool { // checkLang verifies that the -lang flag holds a valid value, and // exits if not. It initializes data used by langSupported. func checkLang() { - if flag_lang == "" { + if Flag.Lang == "" { return } var err error - langWant, err = parseLang(flag_lang) + langWant, err = parseLang(Flag.Lang) if err != nil { - log.Fatalf("invalid value %q for -lang: %v", flag_lang, err) + log.Fatalf("invalid value %q for -lang: %v", Flag.Lang, err) } - if def := currentLang(); flag_lang != def { + if def := currentLang(); Flag.Lang != def { defVers, err := parseLang(def) if err != nil { log.Fatalf("internal error parsing default lang %q: %v", def, err) } if langWant.major > defVers.major || (langWant.major == defVers.major && langWant.minor > defVers.minor) { - log.Fatalf("invalid value %q for -lang: max known version is %q", flag_lang, def) + log.Fatalf("invalid value %q for -lang: max known version is %q", Flag.Lang, def) } } } diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 47b1958f18baf..2d3da884a2254 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -118,15 +118,13 @@ func (p *noder) yyerrorpos(pos syntax.Pos, format string, args ...interface{}) { yyerrorl(p.makeXPos(pos), format, args...) } -var pathPrefix string - // TODO(gri) Can we eliminate fileh in favor of absFilename? func fileh(name string) string { - return objabi.AbsFile("", name, pathPrefix) + return objabi.AbsFile("", name, Flag.TrimPath) } func absFilename(name string) string { - return objabi.AbsFile(Ctxt.Pathname, name, pathPrefix) + return objabi.AbsFile(Ctxt.Pathname, name, Flag.TrimPath) } // noder transforms package syntax's AST into a Node tree. @@ -269,10 +267,10 @@ func (p *noder) node() { } else { // Use the default object symbol name if the // user didn't provide one. - if myimportpath == "" { + if Ctxt.Pkgpath == "" { p.yyerrorpos(n.pos, "//go:linkname requires linkname argument or -p compiler flag") } else { - s.Linkname = objabi.PathToPrefix(myimportpath) + "." + n.local + s.Linkname = objabi.PathToPrefix(Ctxt.Pkgpath) + "." + n.local } } } @@ -561,7 +559,7 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) *Node { yyerrorl(f.Pos, "can only use //go:noescape with external func implementations") } } else { - if pure_go || strings.HasPrefix(f.funcname(), "init.") { + if Flag.Complete || strings.HasPrefix(f.funcname(), "init.") { // Linknamed functions are allowed to have no body. Hopefully // the linkname target has a body. See issue 23311. isLinknamed := false @@ -1621,7 +1619,7 @@ func (p *noder) pragma(pos syntax.Pos, blankLine bool, text string, old syntax.P // For security, we disallow //go:cgo_* directives other // than cgo_import_dynamic outside cgo-generated files. // Exception: they are allowed in the standard library, for runtime and syscall. - if !isCgoGeneratedFile(pos) && !compiling_std { + if !isCgoGeneratedFile(pos) && !Flag.Std { p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s only allowed in cgo-generated code", text)}) } p.pragcgo(pos, text) @@ -1633,10 +1631,10 @@ func (p *noder) pragma(pos syntax.Pos, blankLine bool, text string, old syntax.P } flag := pragmaFlag(verb) const runtimePragmas = Systemstack | Nowritebarrier | Nowritebarrierrec | Yeswritebarrierrec - if !compiling_runtime && flag&runtimePragmas != 0 { + if !Flag.CompilingRuntime && flag&runtimePragmas != 0 { p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s only allowed in runtime", verb)}) } - if flag == 0 && !allowedStdPragmas[verb] && compiling_std { + if flag == 0 && !allowedStdPragmas[verb] && Flag.Std { p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s is not allowed in the standard library", verb)}) } pragma.Flag |= flag diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index d51f50ccab5f2..170d997cd69b0 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -47,12 +47,12 @@ const ( ) func dumpobj() { - if linkobj == "" { - dumpobj1(outfile, modeCompilerObj|modeLinkerObj) + if Flag.LinkObj == "" { + dumpobj1(Flag.LowerO, modeCompilerObj|modeLinkerObj) return } - dumpobj1(outfile, modeCompilerObj) - dumpobj1(linkobj, modeLinkerObj) + dumpobj1(Flag.LowerO, modeCompilerObj) + dumpobj1(Flag.LinkObj, modeLinkerObj) } func dumpobj1(outfile string, mode int) { @@ -79,8 +79,8 @@ func dumpobj1(outfile string, mode int) { func printObjHeader(bout *bio.Writer) { fmt.Fprintf(bout, "go object %s %s %s %s\n", objabi.GOOS, objabi.GOARCH, objabi.Version, objabi.Expstring()) - if buildid != "" { - fmt.Fprintf(bout, "build id %q\n", buildid) + if Flag.BuildID != "" { + fmt.Fprintf(bout, "build id %q\n", Flag.BuildID) } if localpkg.Name == "main" { fmt.Fprintf(bout, "main\n") @@ -261,7 +261,7 @@ func dumpGlobalConst(n *Node) { return } } - Ctxt.DwarfIntConst(myimportpath, n.Sym.Name, typesymname(t), int64Val(t, v)) + Ctxt.DwarfIntConst(Ctxt.Pkgpath, n.Sym.Name, typesymname(t), int64Val(t, v)) } func dumpglobls() { diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index a62d468c9c498..ee0c8f2711084 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -50,7 +50,7 @@ type Order struct { // Order rewrites fn.Nbody to apply the ordering constraints // described in the comment at the top of the file. func order(fn *Node) { - if Debug.W > 1 { + if Flag.W > 1 { s := fmt.Sprintf("\nbefore order %v", fn.Func.Nname.Sym) dumplist(s, fn.Nbody) } @@ -323,7 +323,7 @@ func (o *Order) stmtList(l Nodes) { // and rewrites it to: // m = OMAKESLICECOPY([]T, x, s); nil func orderMakeSliceCopy(s []*Node) { - if Debug.N != 0 || instrumenting { + if Flag.N != 0 || instrumenting { return } diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index 7c1d5543e312a..fe13a161bd508 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -22,8 +22,7 @@ import ( // "Portable" code generation. var ( - nBackendWorkers int // number of concurrent backend workers, set by a compiler flag - compilequeue []*Node // functions waiting to be compiled + compilequeue []*Node // functions waiting to be compiled ) func emitptrargsmap(fn *Node) { @@ -292,7 +291,7 @@ func compilenow(fn *Node) bool { if fn.IsMethod() && isInlinableButNotInlined(fn) { return false } - return nBackendWorkers == 1 && Debug_compilelater == 0 + return Flag.LowerC == 1 && Debug_compilelater == 0 } // isInlinableButNotInlined returns true if 'fn' was marked as an @@ -375,8 +374,8 @@ func compileFunctions() { } var wg sync.WaitGroup Ctxt.InParallel = true - c := make(chan *Node, nBackendWorkers) - for i := 0; i < nBackendWorkers; i++ { + c := make(chan *Node, Flag.LowerC) + for i := 0; i < Flag.LowerC; i++ { wg.Add(1) go func(worker int) { for fn := range c { @@ -482,7 +481,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S scopes := assembleScopes(fnsym, fn, dwarfVars, varScopes) var inlcalls dwarf.InlCalls - if genDwarfInline > 0 { + if Flag.GenDwarfInl > 0 { inlcalls = assembleInlines(fnsym, dwarfVars) } return scopes, inlcalls @@ -552,7 +551,7 @@ func createSimpleVar(fnsym *obj.LSym, n *Node) *dwarf.Var { typename := dwarf.InfoPrefix + typesymname(n.Type) delete(fnsym.Func().Autot, ngotype(n).Linksym()) inlIndex := 0 - if genDwarfInline > 1 { + if Flag.GenDwarfInl > 1 { if n.Name.InlFormal() || n.Name.InlLocal() { inlIndex = posInlIndex(n.Pos) + 1 if n.Name.InlFormal() { @@ -673,7 +672,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *Func, apDecls []*Node) } } inlIndex := 0 - if genDwarfInline > 1 { + if Flag.GenDwarfInl > 1 { if n.Name.InlFormal() || n.Name.InlLocal() { inlIndex = posInlIndex(n.Pos) + 1 if n.Name.InlFormal() { @@ -762,7 +761,7 @@ func createComplexVar(fnsym *obj.LSym, fn *Func, varID ssa.VarID) *dwarf.Var { delete(fnsym.Func().Autot, gotype) typename := dwarf.InfoPrefix + gotype.Name[len("type."):] inlIndex := 0 - if genDwarfInline > 1 { + if Flag.GenDwarfInl > 1 { if n.Name.InlFormal() || n.Name.InlLocal() { inlIndex = posInlIndex(n.Pos) + 1 if n.Name.InlFormal() { diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go index a48173e0d65bf..5f4af06b80321 100644 --- a/src/cmd/compile/internal/gc/plive.go +++ b/src/cmd/compile/internal/gc/plive.go @@ -509,7 +509,7 @@ func allUnsafe(f *ssa.Func) bool { // go:nosplit functions are similar. Since safe points used to // be coupled with stack checks, go:nosplit often actually // means "no safe points in this function". - return compiling_runtime || f.NoSplit + return Flag.CompilingRuntime || f.NoSplit } // markUnsafePoints finds unsafe points and computes lv.unsafePoints. @@ -966,7 +966,7 @@ func (lv *Liveness) compact(b *ssa.Block) { } func (lv *Liveness) showlive(v *ssa.Value, live bvec) { - if debuglive == 0 || lv.fn.funcname() == "init" || strings.HasPrefix(lv.fn.funcname(), ".") { + if Flag.Live == 0 || lv.fn.funcname() == "init" || strings.HasPrefix(lv.fn.funcname(), ".") { return } if !(v == nil || v.Op.IsCall()) { @@ -1235,7 +1235,7 @@ func liveness(e *ssafn, f *ssa.Func, pp *Progs) LivenessMap { lv.prologue() lv.solve() lv.epilogue() - if debuglive > 0 { + if Flag.Live > 0 { lv.showlive(nil, lv.stackMaps[0]) for _, b := range f.Blocks { for _, val := range b.Values { @@ -1245,7 +1245,7 @@ func liveness(e *ssafn, f *ssa.Func, pp *Progs) LivenessMap { } } } - if debuglive >= 2 { + if Flag.Live >= 2 { lv.printDebug() } diff --git a/src/cmd/compile/internal/gc/print.go b/src/cmd/compile/internal/gc/print.go index 52585814f6d68..6b5f670812e9c 100644 --- a/src/cmd/compile/internal/gc/print.go +++ b/src/cmd/compile/internal/gc/print.go @@ -59,7 +59,7 @@ func linestr(pos src.XPos) string { if Ctxt == nil { return "???" } - return Ctxt.OutermostPos(pos).Format(Debug.C == 0, Debug.L == 1) + return Ctxt.OutermostPos(pos).Format(Flag.C == 0, Flag.L == 1) } // byPos sorts errors by source position. @@ -133,7 +133,7 @@ func yyerrorl(pos src.XPos, format string, args ...interface{}) { numErrors++ hcrash() - if numErrors >= 10 && Debug.e == 0 { + if numErrors >= 10 && Flag.LowerE == 0 { flusherrors() fmt.Printf("%v: too many errors\n", linestr(pos)) errorexit() @@ -142,7 +142,7 @@ func yyerrorl(pos src.XPos, format string, args ...interface{}) { // ErrorfVers reports that a language feature (format, args) requires a later version of Go. func yyerrorv(lang string, format string, args ...interface{}) { - yyerror("%s requires %s or later (-lang was set to %s; check go.mod)", fmt.Sprintf(format, args...), lang, flag_lang) + yyerror("%s requires %s or later (-lang was set to %s; check go.mod)", fmt.Sprintf(format, args...), lang, Flag.Lang) } // UpdateErrorDot is a clumsy hack that rewrites the last error, @@ -172,7 +172,7 @@ func Warn(format string, args ...interface{}) { // to additional output by setting a particular flag. func Warnl(pos src.XPos, format string, args ...interface{}) { addErrorMsg(pos, format, args...) - if Debug.m != 0 { + if Flag.LowerM != 0 { flusherrors() } } @@ -232,10 +232,10 @@ func FatalfAt(pos src.XPos, format string, args ...interface{}) { // hcrash crashes the compiler when -h is set, to find out where a message is generated. func hcrash() { - if Debug.h != 0 { + if Flag.LowerH != 0 { flusherrors() - if outfile != "" { - os.Remove(outfile) + if Flag.LowerO != "" { + os.Remove(Flag.LowerO) } panic("-h") } @@ -245,8 +245,8 @@ func hcrash() { // It flushes any pending errors, removes the output file, and exits. func errorexit() { flusherrors() - if outfile != "" { - os.Remove(outfile) + if Flag.LowerO != "" { + os.Remove(Flag.LowerO) } os.Exit(2) } diff --git a/src/cmd/compile/internal/gc/racewalk.go b/src/cmd/compile/internal/gc/racewalk.go index 35526174010fa..733d19c024697 100644 --- a/src/cmd/compile/internal/gc/racewalk.go +++ b/src/cmd/compile/internal/gc/racewalk.go @@ -47,9 +47,9 @@ var omit_pkgs = []string{ var norace_inst_pkgs = []string{"sync", "sync/atomic"} func ispkgin(pkgs []string) bool { - if myimportpath != "" { + if Ctxt.Pkgpath != "" { for _, p := range pkgs { - if myimportpath == p { + if Ctxt.Pkgpath == p { return true } } @@ -63,11 +63,11 @@ func instrument(fn *Node) { return } - if !flag_race || !ispkgin(norace_inst_pkgs) { + if !Flag.Race || !ispkgin(norace_inst_pkgs) { fn.Func.SetInstrumentBody(true) } - if flag_race { + if Flag.Race { lno := lineno lineno = src.NoXPos diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go index 1b4d765d4239e..44776e988e95e 100644 --- a/src/cmd/compile/internal/gc/range.go +++ b/src/cmd/compile/internal/gc/range.go @@ -466,7 +466,7 @@ func walkrange(n *Node) *Node { // // where == for keys of map m is reflexive. func isMapClear(n *Node) bool { - if Debug.N != 0 || instrumenting { + if Flag.N != 0 || instrumenting { return false } @@ -533,7 +533,7 @@ func mapClear(m *Node) *Node { // // Parameters are as in walkrange: "for v1, v2 = range a". func arrayClear(n, v1, v2, a *Node) bool { - if Debug.N != 0 || instrumenting { + if Flag.N != 0 || instrumenting { return false } diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 1ac7a8490f6ca..674a3bf3fbae3 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -488,14 +488,14 @@ func dimportpath(p *types.Pkg) { // If we are compiling the runtime package, there are two runtime packages around // -- localpkg and Runtimepkg. We don't want to produce import path symbols for // both of them, so just produce one for localpkg. - if myimportpath == "runtime" && p == Runtimepkg { + if Ctxt.Pkgpath == "runtime" && p == Runtimepkg { return } str := p.Path if p == localpkg { // Note: myimportpath != "", or else dgopkgpath won't call dimportpath. - str = myimportpath + str = Ctxt.Pkgpath } s := Ctxt.Lookup("type..importpath." + p.Prefix + ".") @@ -510,7 +510,7 @@ func dgopkgpath(s *obj.LSym, ot int, pkg *types.Pkg) int { return duintptr(s, ot, 0) } - if pkg == localpkg && myimportpath == "" { + if pkg == localpkg && Ctxt.Pkgpath == "" { // If we don't know the full import path of the package being compiled // (i.e. -p was not passed on the compiler command line), emit a reference to // type..importpath.""., which the linker will rewrite using the correct import path. @@ -529,7 +529,7 @@ func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int { if pkg == nil { return duint32(s, ot, 0) } - if pkg == localpkg && myimportpath == "" { + if pkg == localpkg && Ctxt.Pkgpath == "" { // If we don't know the full import path of the package being compiled // (i.e. -p was not passed on the compiler command line), emit a reference to // type..importpath.""., which the linker will rewrite using the correct import path. @@ -1158,7 +1158,7 @@ func dtypesym(t *types.Type) *obj.LSym { dupok = obj.DUPOK } - if myimportpath != "runtime" || (tbase != types.Types[tbase.Etype] && tbase != types.Bytetype && tbase != types.Runetype && tbase != types.Errortype) { // int, float, etc + if Ctxt.Pkgpath != "runtime" || (tbase != types.Types[tbase.Etype] && tbase != types.Bytetype && tbase != types.Runetype && tbase != types.Errortype) { // int, float, etc // named types from other files are defined only by those files if tbase.Sym != nil && tbase.Sym.Pkg != localpkg { if i, ok := typeSymIdx[tbase]; ok { @@ -1613,7 +1613,7 @@ func dumpbasictypes() { // so this is as good as any. // another possible choice would be package main, // but using runtime means fewer copies in object files. - if myimportpath == "runtime" { + if Ctxt.Pkgpath == "runtime" { for i := types.EType(1); i <= TBOOL; i++ { dtypesym(types.NewPtr(types.Types[i])) } @@ -1629,10 +1629,10 @@ func dumpbasictypes() { // add paths for runtime and main, which 6l imports implicitly. dimportpath(Runtimepkg) - if flag_race { + if Flag.Race { dimportpath(racepkg) } - if flag_msan { + if Flag.MSan { dimportpath(msanpkg) } dimportpath(types.NewPkg("main", "")) diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go index 97e0424ce07f8..8e6b15af536af 100644 --- a/src/cmd/compile/internal/gc/select.go +++ b/src/cmd/compile/internal/gc/select.go @@ -255,7 +255,7 @@ func walkselectcases(cases *Nodes) []*Node { order := temp(types.NewArray(types.Types[TUINT16], 2*int64(ncas))) var pc0, pcs *Node - if flag_race { + if Flag.Race { pcs = temp(types.NewArray(types.Types[TUINTPTR], int64(ncas))) pc0 = typecheck(nod(OADDR, nod(OINDEX, pcs, nodintconst(0)), nil), ctxExpr) } else { @@ -308,7 +308,7 @@ func walkselectcases(cases *Nodes) []*Node { // TODO(mdempsky): There should be a cleaner way to // handle this. - if flag_race { + if Flag.Race { r = mkcall("selectsetpc", nil, nil, nod(OADDR, nod(OINDEX, pcs, nodintconst(int64(i))), nil)) init = append(init, r) } @@ -331,7 +331,7 @@ func walkselectcases(cases *Nodes) []*Node { // selv and order are no longer alive after selectgo. init = append(init, nod(OVARKILL, selv, nil)) init = append(init, nod(OVARKILL, order, nil)) - if flag_race { + if Flag.Race { init = append(init, nod(OVARKILL, pcs, nil)) } diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index e15d558a78245..741e0ef9a3a5e 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -40,7 +40,7 @@ func (s *InitSchedule) append(n *Node) { // staticInit adds an initialization statement n to the schedule. func (s *InitSchedule) staticInit(n *Node) { if !s.tryStaticInit(n) { - if Debug.P != 0 { + if Flag.Percent != 0 { Dump("nonstatic", n) } s.append(n) diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index f00f5d94a169c..260df2f54f142 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -60,10 +60,10 @@ func initssaconfig() { _ = types.NewPtr(types.Types[TINT64]) // *int64 _ = types.NewPtr(types.Errortype) // *error types.NewPtrCacheEnabled = false - ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, *types_, Ctxt, Debug.N == 0) + ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, *types_, Ctxt, Flag.N == 0) ssaConfig.SoftFloat = thearch.SoftFloat - ssaConfig.Race = flag_race - ssaCaches = make([]ssa.Cache, nBackendWorkers) + ssaConfig.Race = Flag.Race + ssaCaches = make([]ssa.Cache, Flag.LowerC) // Set up some runtime functions we'll need to call. assertE2I = sysfunc("assertE2I") @@ -291,7 +291,7 @@ func buildssa(fn *Node, worker int) *ssa.Func { name := fn.funcname() printssa := false if ssaDump != "" { // match either a simple name e.g. "(*Reader).Reset", or a package.name e.g. "compress/gzip.(*Reader).Reset" - printssa = name == ssaDump || myimportpath+"."+name == ssaDump + printssa = name == ssaDump || Ctxt.Pkgpath+"."+name == ssaDump } var astBuf *bytes.Buffer if printssa { @@ -342,7 +342,7 @@ func buildssa(fn *Node, worker int) *ssa.Func { if printssa { ssaDF := ssaDumpFile if ssaDir != "" { - ssaDF = filepath.Join(ssaDir, myimportpath+"."+name+".html") + ssaDF = filepath.Join(ssaDir, Ctxt.Pkgpath+"."+name+".html") ssaD := filepath.Dir(ssaDF) os.MkdirAll(ssaD, 0755) } @@ -358,7 +358,7 @@ func buildssa(fn *Node, worker int) *ssa.Func { s.fwdVars = map[*Node]*ssa.Value{} s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem) - s.hasOpenDefers = Debug.N == 0 && s.hasdefer && !s.curfn.Func.OpenCodedDeferDisallowed() + s.hasOpenDefers = Flag.N == 0 && s.hasdefer && !s.curfn.Func.OpenCodedDeferDisallowed() switch { case s.hasOpenDefers && (Ctxt.Flag_shared || Ctxt.Flag_dynlink) && thearch.LinkArch.Name == "386": // Don't support open-coded defers for 386 ONLY when using shared @@ -752,7 +752,7 @@ func (s *state) pushLine(line src.XPos) { // the frontend may emit node with line number missing, // use the parent line number in this case. line = s.peekPos() - if Debug.K != 0 { + if Flag.K != 0 { Warn("buildssa: unknown position (line 0)") } } else { @@ -988,13 +988,13 @@ func (s *state) instrument(t *types.Type, addr *ssa.Value, wr bool) { var fn *obj.LSym needWidth := false - if flag_msan { + if Flag.MSan { fn = msanread if wr { fn = msanwrite } needWidth = true - } else if flag_race && t.NumComponents(types.CountBlankFields) > 1 { + } else if Flag.Race && t.NumComponents(types.CountBlankFields) > 1 { // for composite objects we have to write every address // because a write might happen to any subobject. // composites with only one element don't have subobjects, though. @@ -1003,7 +1003,7 @@ func (s *state) instrument(t *types.Type, addr *ssa.Value, wr bool) { fn = racewriterange } needWidth = true - } else if flag_race { + } else if Flag.Race { // for non-composite objects we can write just the start // address, as any write must write the first byte. fn = raceread @@ -1090,7 +1090,7 @@ func (s *state) stmt(n *Node) { case OCALLMETH, OCALLINTER: s.callResult(n, callNormal) if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class() == PFUNC { - if fn := n.Left.Sym.Name; compiling_runtime && fn == "throw" || + if fn := n.Left.Sym.Name; Flag.CompilingRuntime && fn == "throw" || n.Left.Sym.Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") { m := s.mem() b := s.endBlock() @@ -1225,7 +1225,7 @@ func (s *state) stmt(n *Node) { // Check whether we're writing the result of an append back to the same slice. // If so, we handle it specially to avoid write barriers on the fast // (non-growth) path. - if !samesafeexpr(n.Left, rhs.List.First()) || Debug.N != 0 { + if !samesafeexpr(n.Left, rhs.List.First()) || Flag.N != 0 { break } // If the slice can be SSA'd, it'll be on the stack, @@ -4130,9 +4130,9 @@ func findIntrinsic(sym *types.Sym) intrinsicBuilder { } pkg := sym.Pkg.Path if sym.Pkg == localpkg { - pkg = myimportpath + pkg = Ctxt.Pkgpath } - if flag_race && pkg == "sync/atomic" { + if Flag.Race && pkg == "sync/atomic" { // The race detector needs to be able to intercept these calls. // We can't intrinsify them. return nil @@ -4930,7 +4930,7 @@ func (s *state) addr(n *Node) *ssa.Value { // canSSA reports whether n is SSA-able. // n must be an ONAME (or an ODOT sequence with an ONAME base). func (s *state) canSSA(n *Node) bool { - if Debug.N != 0 { + if Flag.N != 0 { return false } for n.Op == ODOT || (n.Op == OINDEX && n.Left.Type.IsArray()) { @@ -5041,7 +5041,7 @@ func (s *state) nilCheck(ptr *ssa.Value) { func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value { idx = s.extendIndex(idx, len, kind, bounded) - if bounded || Debug.B != 0 { + if bounded || Flag.B != 0 { // If bounded or bounds checking is flag-disabled, then no check necessary, // just return the extended index. // @@ -5114,7 +5114,7 @@ func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bo s.startBlock(bNext) // In Spectre index mode, apply an appropriate mask to avoid speculative out-of-bounds accesses. - if spectreIndex { + if Flag.Cfg.SpectreIndex { op := ssa.OpSpectreIndex if kind != ssa.BoundsIndex && kind != ssa.BoundsIndexU { op = ssa.OpSpectreSliceIndex @@ -6235,7 +6235,7 @@ func emitStackObjects(e *ssafn, pp *Progs) { p.To.Name = obj.NAME_EXTERN p.To.Sym = x - if debuglive != 0 { + if Flag.Live != 0 { for _, v := range vars { Warnl(v.Pos, "stack object %v %s", v, v.Type.String()) } @@ -6397,7 +6397,7 @@ func genssa(f *ssa.Func, pp *Progs) { } // Emit control flow instructions for block var next *ssa.Block - if i < len(f.Blocks)-1 && Debug.N == 0 { + if i < len(f.Blocks)-1 && Flag.N == 0 { // If -N, leave next==nil so every block with successors // ends in a JMP (except call blocks - plive doesn't like // select{send,recv} followed by a JMP call). Helps keep @@ -6705,7 +6705,7 @@ func (s *state) extendIndex(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bo } else { lo = s.newValue1(ssa.OpInt64Lo, types.Types[TUINT], idx) } - if bounded || Debug.B != 0 { + if bounded || Flag.B != 0 { return lo } bNext := s.f.NewBlock(ssa.BlockPlain) @@ -7117,7 +7117,7 @@ func (e *ssafn) Debug_checknil() bool { } func (e *ssafn) UseWriteBarrier() bool { - return use_writebarrier + return Flag.WB } func (e *ssafn) Syslook(name string) *obj.LSym { @@ -7142,7 +7142,7 @@ func (e *ssafn) SetWBPos(pos src.XPos) { } func (e *ssafn) MyImportPath() string { - return myimportpath + return Ctxt.Pkgpath } func (n *Node) Typ() *types.Type { diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index ebc5af63e19c9..32312e9545aeb 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -49,7 +49,7 @@ func hasUniquePos(n *Node) bool { } if !n.Pos.IsKnown() { - if Debug.K != 0 { + if Flag.K != 0 { Warn("setlineno: unknown position (line 0)") } return false @@ -1334,7 +1334,7 @@ func structargs(tl *types.Type, mustname bool) []*Node { // method - M func (t T)(), a TFIELD type struct // newnam - the eventual mangled name of this function func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { - if false && Debug.r != 0 { + if false && Flag.LowerR != 0 { fmt.Printf("genwrapper rcvrtype=%v method=%v newnam=%v\n", rcvr, method, newnam) } @@ -1407,7 +1407,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { fn.Nbody.Append(call) } - if false && Debug.r != 0 { + if false && Flag.LowerR != 0 { dumplist("genwrapper body", fn.Nbody) } @@ -1548,7 +1548,7 @@ func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool // the method does not exist for value types. rcvr := tm.Type.Recv().Type if rcvr.IsPtr() && !t0.IsPtr() && !followptr && !isifacemethod(tm.Type) { - if false && Debug.r != 0 { + if false && Flag.LowerR != 0 { yyerror("interface pointer mismatch") } diff --git a/src/cmd/compile/internal/gc/syntax.go b/src/cmd/compile/internal/gc/syntax.go index 65ae7f23d8f01..75a7ae2c7af7c 100644 --- a/src/cmd/compile/internal/gc/syntax.go +++ b/src/cmd/compile/internal/gc/syntax.go @@ -284,7 +284,7 @@ func (n *Node) Val() constant.Value { // which must not have been used with SetOpt. func (n *Node) SetVal(v constant.Value) { if n.HasOpt() { - Debug.h = 1 + Flag.LowerH = 1 Dump("have Opt", n) Fatalf("have Opt") } @@ -314,7 +314,7 @@ func (n *Node) SetOpt(x interface{}) { return } if n.HasVal() { - Debug.h = 1 + Flag.LowerH = 1 Dump("have Val", n) Fatalf("have Val") } @@ -367,7 +367,7 @@ func (n *Node) pkgFuncName() string { } pkg := s.Pkg - p := myimportpath + p := Ctxt.Pkgpath if pkg != nil && pkg.Path != "" { p = pkg.Path } diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index a4acdfaed3dd6..7b299e553b57a 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -15,7 +15,6 @@ import ( // To enable tracing support (-t flag), set enableTrace to true. const enableTrace = false -var trace bool var traceIndent []byte var skipDowidthForTracing bool @@ -85,7 +84,7 @@ func resolve(n *Node) (res *Node) { } // only trace if there's work to do - if enableTrace && trace { + if enableTrace && Flag.LowerT { defer tracePrint("resolve", n)(&res) } @@ -212,7 +211,7 @@ func typecheck(n *Node, top int) (res *Node) { } // only trace if there's work to do - if enableTrace && trace { + if enableTrace && Flag.LowerT { defer tracePrint("typecheck", n)(&res) } @@ -326,7 +325,7 @@ func indexlit(n *Node) *Node { // The result of typecheck1 MUST be assigned back to n, e.g. // n.Left = typecheck1(n.Left, top) func typecheck1(n *Node, top int) (res *Node) { - if enableTrace && trace { + if enableTrace && Flag.LowerT { defer tracePrint("typecheck1", n)(&res) } @@ -2359,7 +2358,7 @@ func lookdot1(errnode *Node, s *types.Sym, t *types.Type, fs *types.Fields, dost // typecheckMethodExpr checks selector expressions (ODOT) where the // base expression is a type expression (OTYPE). func typecheckMethodExpr(n *Node) (res *Node) { - if enableTrace && trace { + if enableTrace && Flag.LowerT { defer tracePrint("typecheckMethodExpr", n)(&res) } @@ -2797,7 +2796,7 @@ func pushtype(n *Node, t *types.Type) *Node { // The result of typecheckcomplit MUST be assigned back to n, e.g. // n.Left = typecheckcomplit(n.Left) func typecheckcomplit(n *Node) (res *Node) { - if enableTrace && trace { + if enableTrace && Flag.LowerT { defer tracePrint("typecheckcomplit", n)(&res) } @@ -3215,7 +3214,7 @@ func samesafeexpr(l *Node, r *Node) bool { // if this assignment is the definition of a var on the left side, // fill in the var's type. func typecheckas(n *Node) { - if enableTrace && trace { + if enableTrace && Flag.LowerT { defer tracePrint("typecheckas", n)(nil) } @@ -3273,7 +3272,7 @@ func checkassignto(src *types.Type, dst *Node) { } func typecheckas2(n *Node) { - if enableTrace && trace { + if enableTrace && Flag.LowerT { defer tracePrint("typecheckas2", n)(nil) } @@ -3406,7 +3405,7 @@ out: // type check function definition func typecheckfunc(n *Node) { - if enableTrace && trace { + if enableTrace && Flag.LowerT { defer tracePrint("typecheckfunc", n)(nil) } @@ -3520,7 +3519,7 @@ func setUnderlying(t, underlying *types.Type) { } func typecheckdeftype(n *Node) { - if enableTrace && trace { + if enableTrace && Flag.LowerT { defer tracePrint("typecheckdeftype", n)(nil) } @@ -3540,7 +3539,7 @@ func typecheckdeftype(n *Node) { } func typecheckdef(n *Node) { - if enableTrace && trace { + if enableTrace && Flag.LowerT { defer tracePrint("typecheckdef", n)(nil) } diff --git a/src/cmd/compile/internal/gc/util.go b/src/cmd/compile/internal/gc/util.go index 58be2f82530a9..d1a5993daff37 100644 --- a/src/cmd/compile/internal/gc/util.go +++ b/src/cmd/compile/internal/gc/util.go @@ -32,18 +32,13 @@ func Exit(code int) { } var ( - blockprofile string - cpuprofile string - memprofile string memprofilerate int64 - traceprofile string traceHandler func(string) - mutexprofile string ) func startProfile() { - if cpuprofile != "" { - f, err := os.Create(cpuprofile) + if Flag.CPUProfile != "" { + f, err := os.Create(Flag.CPUProfile) if err != nil { Fatalf("%v", err) } @@ -52,11 +47,11 @@ func startProfile() { } atExit(pprof.StopCPUProfile) } - if memprofile != "" { + if Flag.MemProfile != "" { if memprofilerate != 0 { runtime.MemProfileRate = int(memprofilerate) } - f, err := os.Create(memprofile) + f, err := os.Create(Flag.MemProfile) if err != nil { Fatalf("%v", err) } @@ -75,8 +70,8 @@ func startProfile() { // Not doing memory profiling; disable it entirely. runtime.MemProfileRate = 0 } - if blockprofile != "" { - f, err := os.Create(blockprofile) + if Flag.BlockProfile != "" { + f, err := os.Create(Flag.BlockProfile) if err != nil { Fatalf("%v", err) } @@ -86,8 +81,8 @@ func startProfile() { f.Close() }) } - if mutexprofile != "" { - f, err := os.Create(mutexprofile) + if Flag.MutexProfile != "" { + f, err := os.Create(Flag.MutexProfile) if err != nil { Fatalf("%v", err) } @@ -97,7 +92,7 @@ func startProfile() { f.Close() }) } - if traceprofile != "" && traceHandler != nil { - traceHandler(traceprofile) + if Flag.TraceProfile != "" && traceHandler != nil { + traceHandler(Flag.TraceProfile) } } diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index b1bac06fd0c68..c2d8411a596e9 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -24,7 +24,7 @@ func walk(fn *Node) { Curfn = fn errorsBefore := Errors() - if Debug.W != 0 { + if Flag.W != 0 { s := fmt.Sprintf("\nbefore walk %v", Curfn.Func.Nname.Sym) dumplist(s, Curfn.Nbody) } @@ -66,14 +66,14 @@ func walk(fn *Node) { return } walkstmtlist(Curfn.Nbody.Slice()) - if Debug.W != 0 { + if Flag.W != 0 { s := fmt.Sprintf("after walk %v", Curfn.Func.Nname.Sym) dumplist(s, Curfn.Nbody) } zeroResults() heapmoves() - if Debug.W != 0 && Curfn.Func.Enter.Len() > 0 { + if Flag.W != 0 && Curfn.Func.Enter.Len() > 0 { s := fmt.Sprintf("enter %v", Curfn.Func.Nname.Sym) dumplist(s, Curfn.Func.Enter) } @@ -186,7 +186,7 @@ func walkstmt(n *Node) *Node { case ODCL: v := n.Left if v.Class() == PAUTOHEAP { - if compiling_runtime { + if Flag.CompilingRuntime { yyerror("%v escapes to heap, not allowed in runtime", v) } if prealloc[v] == nil { @@ -439,7 +439,7 @@ func walkexpr(n *Node, init *Nodes) *Node { lno := setlineno(n) - if Debug.w > 1 { + if Flag.LowerW > 1 { Dump("before walk expr", n) } @@ -1046,7 +1046,7 @@ opswitch: } if t.IsArray() { n.SetBounded(bounded(r, t.NumElem())) - if Debug.m != 0 && n.Bounded() && !Isconst(n.Right, constant.Int) { + if Flag.LowerM != 0 && n.Bounded() && !Isconst(n.Right, constant.Int) { Warn("index bounds check elided") } if smallintconst(n.Right) && !n.Bounded() { @@ -1054,7 +1054,7 @@ opswitch: } } else if Isconst(n.Left, constant.String) { n.SetBounded(bounded(r, int64(len(n.Left.StringVal())))) - if Debug.m != 0 && n.Bounded() && !Isconst(n.Right, constant.Int) { + if Flag.LowerM != 0 && n.Bounded() && !Isconst(n.Right, constant.Int) { Warn("index bounds check elided") } if smallintconst(n.Right) && !n.Bounded() { @@ -1174,7 +1174,7 @@ opswitch: Fatalf("append outside assignment") case OCOPY: - n = copyany(n, init, instrumenting && !compiling_runtime) + n = copyany(n, init, instrumenting && !Flag.CompilingRuntime) // cannot use chanfn - closechan takes any, not chan any case OCLOSE: @@ -1596,7 +1596,7 @@ opswitch: updateHasCall(n) - if Debug.w != 0 && n != nil { + if Flag.LowerW != 0 && n != nil { Dump("after walk expr", n) } @@ -2784,7 +2784,7 @@ func appendslice(n *Node, init *Nodes) *Node { ptr1, len1 := nptr1.backingArrayPtrLen() ptr2, len2 := nptr2.backingArrayPtrLen() ncopy = mkcall1(fn, types.Types[TINT], &nodes, typename(elemtype), ptr1, len1, ptr2, len2) - } else if instrumenting && !compiling_runtime { + } else if instrumenting && !Flag.CompilingRuntime { // rely on runtime to instrument: // copy(s[len(l1):], l2) // l2 can be a slice or string. @@ -2827,7 +2827,7 @@ func appendslice(n *Node, init *Nodes) *Node { // isAppendOfMake reports whether n is of the form append(x , make([]T, y)...). // isAppendOfMake assumes n has already been typechecked. func isAppendOfMake(n *Node) bool { - if Debug.N != 0 || instrumenting { + if Flag.N != 0 || instrumenting { return false } @@ -3036,7 +3036,7 @@ func walkappend(n *Node, init *Nodes, dst *Node) *Node { // General case, with no function calls left as arguments. // Leave for gen, except that instrumentation requires old form. - if !instrumenting || compiling_runtime { + if !instrumenting || Flag.CompilingRuntime { return n } @@ -3991,7 +3991,7 @@ func canMergeLoads() bool { // isRuneCount reports whether n is of the form len([]rune(string)). // These are optimized into a call to runtime.countrunes. func isRuneCount(n *Node) bool { - return Debug.N == 0 && !instrumenting && n.Op == OLEN && n.Left.Op == OSTR2RUNES + return Flag.N == 0 && !instrumenting && n.Op == OLEN && n.Left.Op == OSTR2RUNES } func walkCheckPtrAlignment(n *Node, init *Nodes, count *Node) *Node { From 259fd8adbb15f2a44433c7b8b40a35e97992b345 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 24 Nov 2020 21:56:47 -0800 Subject: [PATCH 034/474] [dev.regabi] cmd/compile: fix reporting of overflow In the previous CL, I had incorrectly removed one of the error messages from issue20232.go, because I thought go/constant was just handling it. But actually the compiler was panicking in nodlit, because it didn't handle constant.Unknown. So this CL makes it leave n.Type == nil for unknown constant.Values. While here, also address #42732 by making sure to report an error message when origConst is called with an unknown constant.Value (as can happen when multiplying two floating-point constants overflows). Finally, add OXOR and OBITNOT to the list of operations to report errors about, since they're also constant expressions that can produce a constant with a greater bit length than their operands. Fixes #42732. Change-Id: I4a538fbae9b3ac4c553d7de5625dc0c87d9acce3 Reviewed-on: https://go-review.googlesource.com/c/go/+/272928 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Russ Cox --- src/cmd/compile/internal/gc/const.go | 45 +++++++++++++--------------- test/const2.go | 11 +++++++ test/fixedbugs/issue20232.go | 5 ++-- 3 files changed, 35 insertions(+), 26 deletions(-) diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index 84f0b11712673..e72962124a383 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -718,11 +718,14 @@ func square(x constant.Value) constant.Value { } // For matching historical "constant OP overflow" error messages. +// TODO(mdempsky): Replace with error messages like go/types uses. var overflowNames = [...]string{ - OADD: "addition", - OSUB: "subtraction", - OMUL: "multiplication", - OLSH: "shift", + OADD: "addition", + OSUB: "subtraction", + OMUL: "multiplication", + OLSH: "shift", + OXOR: "bitwise XOR", + OBITNOT: "bitwise complement", } // origConst returns an OLITERAL with orig n and value v. @@ -732,32 +735,24 @@ func origConst(n *Node, v constant.Value) *Node { lineno = lno switch v.Kind() { + case constant.Int: + if constant.BitLen(v) <= Mpprec { + break + } + fallthrough case constant.Unknown: - // If constant folding was attempted (we were called) - // but it produced an invalid constant value, - // mark n as broken and give up. - if Errors() == 0 { - Fatalf("should have reported an error") + what := overflowNames[n.Op] + if what == "" { + Fatalf("unexpected overflow: %v", n.Op) } + yyerrorl(n.Pos, "constant %v overflow", what) n.Type = nil return n - - case constant.Int: - if constant.BitLen(v) > Mpprec { - what := overflowNames[n.Op] - if what == "" { - Fatalf("unexpected overflow: %v", n.Op) - } - yyerror("constant %v overflow", what) - n.Type = nil - return n - } } orig := n - n = nod(OLITERAL, nil, nil) + n = nodl(orig.Pos, OLITERAL, nil, nil) n.Orig = orig - n.Pos = orig.Pos n.Type = orig.Type n.SetVal(v) return n @@ -800,8 +795,10 @@ func origIntConst(n *Node, v int64) *Node { // nodlit returns a new untyped constant with value v. func nodlit(v constant.Value) *Node { n := nod(OLITERAL, nil, nil) - n.Type = idealType(v.Kind()) - n.SetVal(v) + if k := v.Kind(); k != constant.Unknown { + n.Type = idealType(k) + n.SetVal(v) + } return n } diff --git a/test/const2.go b/test/const2.go index 048d0cb9f3976..d104a2fa71941 100644 --- a/test/const2.go +++ b/test/const2.go @@ -19,3 +19,14 @@ const LargeB = LargeA * LargeA * LargeA const LargeC = LargeB * LargeB * LargeB // GC_ERROR "constant multiplication overflow" const AlsoLargeA = LargeA << 400 << 400 >> 400 >> 400 // GC_ERROR "constant shift overflow" + +// Issue #42732. + +const a = 1e+500000000 +const b = a * a // ERROR "constant multiplication overflow" +const c = b * b + +const MaxInt512 = (1<<256 - 1) * (1<<256 + 1) +const _ = MaxInt512 + 1 // ERROR "constant addition overflow" +const _ = MaxInt512 ^ -1 // ERROR "constant bitwise XOR overflow" +const _ = ^MaxInt512 // ERROR "constant bitwise complement overflow" diff --git a/test/fixedbugs/issue20232.go b/test/fixedbugs/issue20232.go index fbe8cdebfb57f..7a0300a4c42d1 100644 --- a/test/fixedbugs/issue20232.go +++ b/test/fixedbugs/issue20232.go @@ -6,6 +6,7 @@ package main -const _ = 6e5518446744 // ERROR "malformed constant: 6e5518446744" +const x = 6e5518446744 // ERROR "malformed constant: 6e5518446744" +const _ = x * x const _ = 1e-1000000000 -const _ = 1e+1000000000 +const _ = 1e+1000000000 // ERROR "malformed constant: 1e\+1000000000" From 756661c82a2ffa285c16f36d5a5290e057fa75bd Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 16 Nov 2020 01:15:33 -0500 Subject: [PATCH 035/474] [dev.regabi] cmd/compile: finish cleanup of Flag initialization Now that all flags are in a struct, use struct tags to set the usage messages and use reflection to walk the struct and register all the flags. Also move some flag usage back into main.go that shouldn't come with the rest of flag.go into package base. Change-Id: Ie655582194906c9ab425c3d01ad8c304bc49bfe0 Reviewed-on: https://go-review.googlesource.com/c/go/+/271668 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/fmtmap_test.go | 1 + src/cmd/compile/internal/gc/flag.go | 447 ++++++++++++++-------------- src/cmd/compile/internal/gc/main.go | 75 ++++- 3 files changed, 298 insertions(+), 225 deletions(-) diff --git a/src/cmd/compile/fmtmap_test.go b/src/cmd/compile/fmtmap_test.go index 691eee3a1b289..e32233bcaf802 100644 --- a/src/cmd/compile/fmtmap_test.go +++ b/src/cmd/compile/fmtmap_test.go @@ -168,6 +168,7 @@ var knownFormats = map[string]string{ "map[int64]uint32 %v": "", "math/big.Accuracy %s": "", "reflect.Type %s": "", + "reflect.Type %v": "", "rune %#U": "", "rune %c": "", "rune %q": "", diff --git a/src/cmd/compile/internal/gc/flag.go b/src/cmd/compile/internal/gc/flag.go index 3861c9a02850a..090287ef62835 100644 --- a/src/cmd/compile/internal/gc/flag.go +++ b/src/cmd/compile/internal/gc/flag.go @@ -11,15 +11,12 @@ import ( "io/ioutil" "log" "os" + "reflect" "runtime" "strconv" "strings" - "cmd/compile/internal/logopt" "cmd/compile/internal/ssa" - "cmd/compile/internal/types" - "cmd/internal/dwarf" - "cmd/internal/obj" "cmd/internal/objabi" "cmd/internal/sys" ) @@ -30,195 +27,153 @@ func usage() { Exit(2) } -var Flag Flags - -// gc debug flags -type Flags struct { - Percent, B, C, E, - K, L, N, S, - W, LowerE, LowerH, LowerJ, - LowerL, LowerM, LowerR, LowerW int - CompilingRuntime bool - Std bool - D string - AsmHdr string - BuildID string - LowerC int - Complete bool - LowerD string - Dwarf bool - GenDwarfInl int - InstallSuffix string - Lang string - LinkObj string - Live int - MSan bool - NoLocalImports bool - LowerO string - Pack bool - Race bool - Spectre string - LowerT bool - TrimPath string - WB bool - Shared bool - Dynlink bool - GoVersion string - SymABIs string - CPUProfile string - MemProfile string - TraceProfile string - BlockProfile string - MutexProfile string - Bench string - SmallFrames bool - JSON string - +// Flag holds the parsed command-line flags. +// See ParseFlag for non-zero defaults. +var Flag CmdFlags + +// A CountFlag is a counting integer flag. +// It accepts -name=value to set the value directly, +// but it also accepts -name with no =value to increment the count. +type CountFlag int + +// CmdFlags defines the command-line flags (see var Flag). +// Each struct field is a different flag, by default named for the lower-case of the field name. +// If the flag name is a single letter, the default flag name is left upper-case. +// If the flag name is "Lower" followed by a single letter, the default flag name is the lower-case of the last letter. +// +// If this default flag name can't be made right, the `flag` struct tag can be used to replace it, +// but this should be done only in exceptional circumstances: it helps everyone if the flag name +// is obvious from the field name when the flag is used elsewhere in the compiler sources. +// The `flag:"-"` struct tag makes a field invisible to the flag logic and should also be used sparingly. +// +// Each field must have a `help` struct tag giving the flag help message. +// +// The allowed field types are bool, int, string, pointers to those (for values stored elsewhere), +// CountFlag (for a counting flag), and func(string) (for a flag that uses special code for parsing). +type CmdFlags struct { + // Single letters + B CountFlag "help:\"disable bounds checking\"" + C CountFlag "help:\"disable printing of columns in error messages\"" + D string "help:\"set relative `path` for local imports\"" + E CountFlag "help:\"debug symbol export\"" + I func(string) "help:\"add `directory` to import search path\"" + K CountFlag "help:\"debug missing line numbers\"" + L CountFlag "help:\"show full file names in error messages\"" + N CountFlag "help:\"disable optimizations\"" + S CountFlag "help:\"print assembly listing\"" + // V is added by objabi.AddVersionFlag + W CountFlag "help:\"debug parse tree after type checking\"" + + LowerC int "help:\"concurrency during compilation (1 means no concurrency)\"" + LowerD string "help:\"enable debugging settings; try -d help\"" + LowerE CountFlag "help:\"no limit on number of errors reported\"" + LowerH CountFlag "help:\"halt on error\"" + LowerJ CountFlag "help:\"debug runtime-initialized variables\"" + LowerL CountFlag "help:\"disable inlining\"" + LowerM CountFlag "help:\"print optimization decisions\"" + LowerO string "help:\"write output to `file`\"" + LowerP *string "help:\"set expected package import `path`\"" // &Ctxt.Pkgpath, set below + LowerR CountFlag "help:\"debug generated wrappers\"" + LowerT bool "help:\"enable tracing for debugging the compiler\"" + LowerW CountFlag "help:\"debug type checking\"" + LowerV *bool "help:\"increase debug verbosity\"" + + // Special characters + Percent int "flag:\"%\" help:\"debug non-static initializers\"" + CompilingRuntime bool "flag:\"+\" help:\"compiling runtime\"" + + // Longer names + AsmHdr string "help:\"write assembly header to `file`\"" + Bench string "help:\"append benchmark times to `file`\"" + BlockProfile string "help:\"write block profile to `file`\"" + BuildID string "help:\"record `id` as the build id in the export metadata\"" + CPUProfile string "help:\"write cpu profile to `file`\"" + Complete bool "help:\"compiling complete package (no C or assembly)\"" + Dwarf bool "help:\"generate DWARF symbols\"" + DwarfBASEntries *bool "help:\"use base address selection entries in DWARF\"" // &Ctxt.UseBASEntries, set below + DwarfLocationLists *bool "help:\"add location lists to DWARF in optimized mode\"" // &Ctxt.Flag_locationlists, set below + Dynlink *bool "help:\"support references to Go symbols defined in other shared libraries\"" // &Ctxt.Flag_dynlink, set below + EmbedCfg func(string) "help:\"read go:embed configuration from `file`\"" + GenDwarfInl int "help:\"generate DWARF inline info records\"" // 0=disabled, 1=funcs, 2=funcs+formals/locals + GoVersion string "help:\"required version of the runtime\"" + ImportCfg func(string) "help:\"read import configuration from `file`\"" + ImportMap func(string) "help:\"add `definition` of the form source=actual to import map\"" + InstallSuffix string "help:\"set pkg directory `suffix`\"" + JSON string "help:\"version,file for JSON compiler/optimizer detail output\"" + Lang string "help:\"Go language version source code expects\"" + LinkObj string "help:\"write linker-specific object to `file`\"" + LinkShared *bool "help:\"generate code that will be linked against Go shared libraries\"" // &Ctxt.Flag_linkshared, set below + Live CountFlag "help:\"debug liveness analysis\"" + MSan bool "help:\"build code compatible with C/C++ memory sanitizer\"" + MemProfile string "help:\"write memory profile to `file`\"" + MemProfileRate int64 "help:\"set runtime.MemProfileRate to `rate`\"" + MutexProfile string "help:\"write mutex profile to `file`\"" + NoLocalImports bool "help:\"reject local (relative) imports\"" + Pack bool "help:\"write to file.a instead of file.o\"" + Race bool "help:\"enable race detector\"" + Shared *bool "help:\"generate code that can be linked into a shared library\"" // &Ctxt.Flag_shared, set below + SmallFrames bool "help:\"reduce the size limit for stack allocated objects\"" // small stacks, to diagnose GC latency; see golang.org/issue/27732 + Spectre string "help:\"enable spectre mitigations in `list` (all, index, ret)\"" + Std bool "help:\"compiling standard library\"" + SymABIs string "help:\"read symbol ABIs from `file`\"" + TraceProfile string "help:\"write an execution trace to `file`\"" + TrimPath string "help:\"remove `prefix` from recorded source file paths\"" + WB bool "help:\"enable write barrier\"" // TODO: remove + + // Configuration derived from flags; not a flag itself. Cfg struct { - Embed struct { + Embed struct { // set by -embedcfg Patterns map[string][]string Files map[string]string } - ImportDirs []string - ImportMap map[string]string - PackageFile map[string]string - SpectreIndex bool + ImportDirs []string // appended to by -I + ImportMap map[string]string // set by -importmap OR -importcfg + PackageFile map[string]string // set by -importcfg; nil means not in use + SpectreIndex bool // set by -spectre=index or -spectre=all } } +// ParseFlags parses the command-line flags into Flag. func ParseFlags() { - Wasm := objabi.GOARCH == "wasm" - - // Whether the limit for stack-allocated objects is much smaller than normal. - // This can be helpful for diagnosing certain causes of GC latency. See #27732. - Flag.SmallFrames = false - Flag.JSON = "" - - flag.BoolVar(&Flag.CompilingRuntime, "+", false, "compiling runtime") - flag.BoolVar(&Flag.Std, "std", false, "compiling standard library") - flag.StringVar(&Flag.D, "D", "", "set relative `path` for local imports") - - objabi.Flagcount("%", "debug non-static initializers", &Flag.Percent) - objabi.Flagcount("B", "disable bounds checking", &Flag.B) - objabi.Flagcount("C", "disable printing of columns in error messages", &Flag.C) - objabi.Flagcount("E", "debug symbol export", &Flag.E) - objabi.Flagcount("K", "debug missing line numbers", &Flag.K) - objabi.Flagcount("L", "show full file names in error messages", &Flag.L) - objabi.Flagcount("N", "disable optimizations", &Flag.N) - objabi.Flagcount("S", "print assembly listing", &Flag.S) - objabi.Flagcount("W", "debug parse tree after type checking", &Flag.W) - objabi.Flagcount("e", "no limit on number of errors reported", &Flag.LowerE) - objabi.Flagcount("h", "halt on error", &Flag.LowerH) - objabi.Flagcount("j", "debug runtime-initialized variables", &Flag.LowerJ) - objabi.Flagcount("l", "disable inlining", &Flag.LowerL) - objabi.Flagcount("m", "print optimization decisions", &Flag.LowerM) - objabi.Flagcount("r", "debug generated wrappers", &Flag.LowerR) - objabi.Flagcount("w", "debug type checking", &Flag.LowerW) - - objabi.Flagfn1("I", "add `directory` to import search path", addImportDir) - objabi.AddVersionFlag() // -V - flag.StringVar(&Flag.AsmHdr, "asmhdr", "", "write assembly header to `file`") - flag.StringVar(&Flag.BuildID, "buildid", "", "record `id` as the build id in the export metadata") - flag.IntVar(&Flag.LowerC, "c", 1, "concurrency during compilation, 1 means no concurrency") - flag.BoolVar(&Flag.Complete, "complete", false, "compiling complete package (no C or assembly)") - flag.StringVar(&Flag.LowerD, "d", "", "print debug information about items in `list`; try -d help") - flag.BoolVar(&Flag.Dwarf, "dwarf", !Wasm, "generate DWARF symbols") - flag.BoolVar(&Ctxt.Flag_locationlists, "dwarflocationlists", true, "add location lists to DWARF in optimized mode") - flag.IntVar(&Flag.GenDwarfInl, "gendwarfinl", 2, "generate DWARF inline info records") - objabi.Flagfn1("embedcfg", "read go:embed configuration from `file`", readEmbedCfg) - objabi.Flagfn1("importmap", "add `definition` of the form source=actual to import map", addImportMap) - objabi.Flagfn1("importcfg", "read import configuration from `file`", readImportCfg) - flag.StringVar(&Flag.InstallSuffix, "installsuffix", "", "set pkg directory `suffix`") - flag.StringVar(&Flag.Lang, "lang", "", "release to compile for") - flag.StringVar(&Flag.LinkObj, "linkobj", "", "write linker-specific object to `file`") - objabi.Flagcount("live", "debug liveness analysis", &Flag.Live) - if sys.MSanSupported(objabi.GOOS, objabi.GOARCH) { - flag.BoolVar(&Flag.MSan, "msan", false, "build code compatible with C/C++ memory sanitizer") - } - flag.BoolVar(&Flag.NoLocalImports, "nolocalimports", false, "reject local (relative) imports") - flag.StringVar(&Flag.LowerO, "o", "", "write output to `file`") - flag.StringVar(&Ctxt.Pkgpath, "p", "", "set expected package import `path`") - flag.BoolVar(&Flag.Pack, "pack", false, "write to file.a instead of file.o") - if sys.RaceDetectorSupported(objabi.GOOS, objabi.GOARCH) { - flag.BoolVar(&Flag.Race, "race", false, "enable race detector") - } - flag.StringVar(&Flag.Spectre, "spectre", Flag.Spectre, "enable spectre mitigations in `list` (all, index, ret)") - if enableTrace { - flag.BoolVar(&Flag.LowerT, "t", false, "trace type-checking") - } - flag.StringVar(&Flag.TrimPath, "trimpath", "", "remove `prefix` from recorded source file paths") - flag.BoolVar(&Ctxt.Debugvlog, "v", false, "increase debug verbosity") - flag.BoolVar(&Flag.WB, "wb", true, "enable write barrier") - if supportsDynlink(thearch.LinkArch.Arch) { - flag.BoolVar(&Flag.Shared, "shared", false, "generate code that can be linked into a shared library") - flag.BoolVar(&Flag.Dynlink, "dynlink", false, "support references to Go symbols defined in other shared libraries") - flag.BoolVar(&Ctxt.Flag_linkshared, "linkshared", false, "generate code that will be linked against Go shared libraries") - } - flag.StringVar(&Flag.CPUProfile, "cpuprofile", "", "write cpu profile to `file`") - flag.StringVar(&Flag.MemProfile, "memprofile", "", "write memory profile to `file`") - flag.Int64Var(&memprofilerate, "memprofilerate", 0, "set runtime.MemProfileRate to `rate`") - flag.StringVar(&Flag.GoVersion, "goversion", "", "required version of the runtime") - flag.StringVar(&Flag.SymABIs, "symabis", "", "read symbol ABIs from `file`") - flag.StringVar(&Flag.TraceProfile, "traceprofile", "", "write an execution trace to `file`") - flag.StringVar(&Flag.BlockProfile, "blockprofile", "", "write block profile to `file`") - flag.StringVar(&Flag.MutexProfile, "mutexprofile", "", "write mutex profile to `file`") - flag.StringVar(&Flag.Bench, "bench", "", "append benchmark times to `file`") - flag.BoolVar(&Flag.SmallFrames, "smallframes", false, "reduce the size limit for stack allocated objects") - flag.BoolVar(&Ctxt.UseBASEntries, "dwarfbasentries", Ctxt.UseBASEntries, "use base address selection entries in DWARF") - flag.StringVar(&Flag.JSON, "json", "", "version,destination for JSON compiler/optimizer logging") + Flag.I = addImportDir + + Flag.LowerC = 1 + Flag.LowerP = &Ctxt.Pkgpath + Flag.LowerV = &Ctxt.Debugvlog + + Flag.Dwarf = objabi.GOARCH != "wasm" + Flag.DwarfBASEntries = &Ctxt.UseBASEntries + Flag.DwarfLocationLists = &Ctxt.Flag_locationlists + *Flag.DwarfLocationLists = true + Flag.Dynlink = &Ctxt.Flag_dynlink + Flag.EmbedCfg = readEmbedCfg + Flag.GenDwarfInl = 2 + Flag.ImportCfg = readImportCfg + Flag.ImportMap = addImportMap + Flag.LinkShared = &Ctxt.Flag_linkshared + Flag.Shared = &Ctxt.Flag_shared + Flag.WB = true + + Flag.Cfg.ImportMap = make(map[string]string) + objabi.AddVersionFlag() // -V + registerFlags() objabi.Flagparse(usage) - for _, f := range strings.Split(Flag.Spectre, ",") { - f = strings.TrimSpace(f) - switch f { - default: - log.Fatalf("unknown setting -spectre=%s", f) - case "": - // nothing - case "all": - Flag.Cfg.SpectreIndex = true - Ctxt.Retpoline = true - case "index": - Flag.Cfg.SpectreIndex = true - case "ret": - Ctxt.Retpoline = true - } + if Flag.MSan && !sys.MSanSupported(objabi.GOOS, objabi.GOARCH) { + log.Fatalf("%s/%s does not support -msan", objabi.GOOS, objabi.GOARCH) } - - if Flag.Cfg.SpectreIndex { - switch objabi.GOARCH { - case "amd64": - // ok - default: - log.Fatalf("GOARCH=%s does not support -spectre=index", objabi.GOARCH) - } + if Flag.Race && !sys.RaceDetectorSupported(objabi.GOOS, objabi.GOARCH) { + log.Fatalf("%s/%s does not support -race", objabi.GOOS, objabi.GOARCH) } - - // Record flags that affect the build result. (And don't - // record flags that don't, since that would cause spurious - // changes in the binary.) - recordFlags("B", "N", "l", "msan", "race", "shared", "dynlink", "dwarflocationlists", "dwarfbasentries", "smallframes", "spectre") - - if Flag.SmallFrames { - maxStackVarSize = 128 * 1024 - maxImplicitStackVarSize = 16 * 1024 + if (*Flag.Shared || *Flag.Dynlink || *Flag.LinkShared) && !Ctxt.Arch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X) { + log.Fatalf("%s/%s does not support -shared", objabi.GOOS, objabi.GOARCH) } + parseSpectre(Flag.Spectre) // left as string for recordFlags - Ctxt.Flag_shared = Flag.Dynlink || Flag.Shared - Ctxt.Flag_dynlink = Flag.Dynlink + Ctxt.Flag_shared = Ctxt.Flag_dynlink || Ctxt.Flag_shared Ctxt.Flag_optimize = Flag.N == 0 - - Ctxt.Debugasm = Flag.S - if Flag.Dwarf { - Ctxt.DebugInfo = debuginfo - Ctxt.GenAbstractFunc = genAbstractFunc - Ctxt.DwFixups = obj.NewDwarfFixupTable(Ctxt) - } else { - // turn off inline generation if no dwarf at all - Flag.GenDwarfInl = 0 - Ctxt.Flag_locationlists = false - } + Ctxt.Debugasm = int(Flag.S) if flag.NArg() < 1 && Flag.LowerD != "help" && Flag.LowerD != "ssa/help" { usage() @@ -229,14 +184,6 @@ func ParseFlags() { Exit(2) } - checkLang() - - if Flag.SymABIs != "" { - readSymABIs(Flag.SymABIs, Ctxt.Pkgpath) - } - - thearch.LinkArch.Init(Ctxt) - if Flag.LowerO == "" { p := flag.Arg(0) if i := strings.LastIndex(p, "/"); i >= 0 { @@ -257,8 +204,6 @@ func ParseFlags() { Flag.LowerO = p + suffix } - startProfile() - if Flag.Race && Flag.MSan { log.Fatal("cannot use both -race and -msan") } @@ -266,19 +211,6 @@ func ParseFlags() { // -race and -msan imply -d=checkptr for now. Debug_checkptr = 1 } - if ispkgin(omit_pkgs) { - Flag.Race = false - Flag.MSan = false - } - if Flag.Race { - racepkg = types.NewPkg("runtime/race", "") - } - if Flag.MSan { - msanpkg = types.NewPkg("runtime/msan", "") - } - if Flag.Race || Flag.MSan { - instrumenting = true - } if Flag.CompilingRuntime && Flag.N != 0 { log.Fatal("cannot disable optimizations while compiling runtime") @@ -289,9 +221,6 @@ func ParseFlags() { if Flag.LowerC > 1 && !concurrentBackendAllowed() { log.Fatalf("cannot use concurrent backend compilation with provided flags; invoked as %v", os.Args) } - if Ctxt.Flag_locationlists && len(Ctxt.Arch.DWARFRegisters) == 0 { - log.Fatalf("location lists requested but register mapping not available on %v", Ctxt.Arch.Name) - } // parse -d argument if Flag.LowerD != "" { @@ -376,24 +305,77 @@ func ParseFlags() { // set via a -d flag Ctxt.Debugpcln = Debug_pctab - if Flag.Dwarf { - dwarf.EnableLogging(Debug_gendwarfinl != 0) - } +} - if Debug_softfloat != 0 { - thearch.SoftFloat = true - } +// registerFlags adds flag registrations for all the fields in Flag. +// See the comment on type CmdFlags for the rules. +func registerFlags() { + var ( + boolType = reflect.TypeOf(bool(false)) + intType = reflect.TypeOf(int(0)) + stringType = reflect.TypeOf(string("")) + ptrBoolType = reflect.TypeOf(new(bool)) + ptrIntType = reflect.TypeOf(new(int)) + ptrStringType = reflect.TypeOf(new(string)) + countType = reflect.TypeOf(CountFlag(0)) + funcType = reflect.TypeOf((func(string))(nil)) + ) + + v := reflect.ValueOf(&Flag).Elem() + t := v.Type() + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Name == "Cfg" { + continue + } - // enable inlining. for now: - // default: inlining on. (Debug.l == 1) - // -l: inlining off (Debug.l == 0) - // -l=2, -l=3: inlining on again, with extra debugging (Debug.l > 1) - if Flag.LowerL <= 1 { - Flag.LowerL = 1 - Flag.LowerL - } + var name string + if len(f.Name) == 1 { + name = f.Name + } else if len(f.Name) == 6 && f.Name[:5] == "Lower" && 'A' <= f.Name[5] && f.Name[5] <= 'Z' { + name = string(rune(f.Name[5] + 'a' - 'A')) + } else { + name = strings.ToLower(f.Name) + } + if tag := f.Tag.Get("flag"); tag != "" { + name = tag + } + + help := f.Tag.Get("help") + if help == "" { + panic(fmt.Sprintf("base.Flag.%s is missing help text", f.Name)) + } + + if k := f.Type.Kind(); (k == reflect.Ptr || k == reflect.Func) && v.Field(i).IsNil() { + panic(fmt.Sprintf("base.Flag.%s is uninitialized %v", f.Name, f.Type)) + } - if Flag.JSON != "" { // parse version,destination from json logging optimization. - logopt.LogJsonOption(Flag.JSON) + switch f.Type { + case boolType: + p := v.Field(i).Addr().Interface().(*bool) + flag.BoolVar(p, name, *p, help) + case intType: + p := v.Field(i).Addr().Interface().(*int) + flag.IntVar(p, name, *p, help) + case stringType: + p := v.Field(i).Addr().Interface().(*string) + flag.StringVar(p, name, *p, help) + case ptrBoolType: + p := v.Field(i).Interface().(*bool) + flag.BoolVar(p, name, *p, help) + case ptrIntType: + p := v.Field(i).Interface().(*int) + flag.IntVar(p, name, *p, help) + case ptrStringType: + p := v.Field(i).Interface().(*string) + flag.StringVar(p, name, *p, help) + case countType: + p := (*int)(v.Field(i).Addr().Interface().(*CountFlag)) + objabi.Flagcount(name, help, p) + case funcType: + f := v.Field(i).Interface().(func(string)) + objabi.Flagfn1(name, help, f) + } } } @@ -514,3 +496,32 @@ func readEmbedCfg(file string) { log.Fatalf("%s: invalid embedcfg: missing Files", file) } } + +// parseSpectre parses the spectre configuration from the string s. +func parseSpectre(s string) { + for _, f := range strings.Split(s, ",") { + f = strings.TrimSpace(f) + switch f { + default: + log.Fatalf("unknown setting -spectre=%s", f) + case "": + // nothing + case "all": + Flag.Cfg.SpectreIndex = true + Ctxt.Retpoline = true + case "index": + Flag.Cfg.SpectreIndex = true + case "ret": + Ctxt.Retpoline = true + } + } + + if Flag.Cfg.SpectreIndex { + switch objabi.GOARCH { + case "amd64": + // ok + default: + log.Fatalf("GOARCH=%s does not support -spectre=index", objabi.GOARCH) + } + } +} diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 8edc0d4495f40..9cf988bca8ea6 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -18,7 +18,6 @@ import ( "cmd/internal/obj" "cmd/internal/objabi" "cmd/internal/src" - "cmd/internal/sys" "flag" "fmt" "go/constant" @@ -118,12 +117,6 @@ func hidePanic() { } } -// supportsDynlink reports whether or not the code generator for the given -// architecture supports the -shared and -dynlink flags. -func supportsDynlink(arch *sys.Arch) bool { - return arch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X) -} - // timing data for compiler phases var timings Timings @@ -192,6 +185,74 @@ func Main(archInit func(*Arch)) { ParseFlags() + // Record flags that affect the build result. (And don't + // record flags that don't, since that would cause spurious + // changes in the binary.) + recordFlags("B", "N", "l", "msan", "race", "shared", "dynlink", "dwarflocationlists", "dwarfbasentries", "smallframes", "spectre") + + if !enableTrace && Flag.LowerT { + log.Fatalf("compiler not built with support for -t") + } + + // Enable inlining (after recordFlags, to avoid recording the rewritten -l). For now: + // default: inlining on. (Flag.LowerL == 1) + // -l: inlining off (Flag.LowerL == 0) + // -l=2, -l=3: inlining on again, with extra debugging (Flag.LowerL > 1) + if Flag.LowerL <= 1 { + Flag.LowerL = 1 - Flag.LowerL + } + + if Flag.SmallFrames { + maxStackVarSize = 128 * 1024 + maxImplicitStackVarSize = 16 * 1024 + } + + if Flag.Dwarf { + Ctxt.DebugInfo = debuginfo + Ctxt.GenAbstractFunc = genAbstractFunc + Ctxt.DwFixups = obj.NewDwarfFixupTable(Ctxt) + } else { + // turn off inline generation if no dwarf at all + Flag.GenDwarfInl = 0 + Ctxt.Flag_locationlists = false + } + if Ctxt.Flag_locationlists && len(Ctxt.Arch.DWARFRegisters) == 0 { + log.Fatalf("location lists requested but register mapping not available on %v", Ctxt.Arch.Name) + } + + checkLang() + + if Flag.SymABIs != "" { + readSymABIs(Flag.SymABIs, Ctxt.Pkgpath) + } + + if ispkgin(omit_pkgs) { + Flag.Race = false + Flag.MSan = false + } + + thearch.LinkArch.Init(Ctxt) + startProfile() + if Flag.Race { + racepkg = types.NewPkg("runtime/race", "") + } + if Flag.MSan { + msanpkg = types.NewPkg("runtime/msan", "") + } + if Flag.Race || Flag.MSan { + instrumenting = true + } + if Flag.Dwarf { + dwarf.EnableLogging(Debug_gendwarfinl != 0) + } + if Debug_softfloat != 0 { + thearch.SoftFloat = true + } + + if Flag.JSON != "" { // parse version,destination from json logging optimization. + logopt.LogJsonOption(Flag.JSON) + } + ssaDump = os.Getenv("GOSSAFUNC") ssaDir = os.Getenv("GOSSADIR") if ssaDump != "" { From 3c240f5d17e4ad3ddd342645b63fe20ecbb7fcae Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 16 Nov 2020 01:17:25 -0500 Subject: [PATCH 036/474] [dev.regabi] cmd/compile: clean up debug flag (-d) handling [generated] The debug table is not as haphazard as flags, but there are still a few mismatches between command-line names and variable names. This CL moves them all into a consistent home (var Debug, like var Flag). Code updated automatically using the rf command below. A followup CL will make a few manual cleanups, leaving this CL completely automated and easier to regenerate during merge conflicts. [git-generate] cd src/cmd/compile/internal/gc rf ' add main.go var Debug struct{} mv Debug_append Debug.Append mv Debug_checkptr Debug.Checkptr mv Debug_closure Debug.Closure mv Debug_compilelater Debug.CompileLater mv disable_checknil Debug.DisableNil mv debug_dclstack Debug.DclStack mv Debug_gcprog Debug.GCProg mv Debug_libfuzzer Debug.Libfuzzer mv Debug_checknil Debug.Nil mv Debug_panic Debug.Panic mv Debug_slice Debug.Slice mv Debug_typeassert Debug.TypeAssert mv Debug_wb Debug.WB mv Debug_export Debug.Export mv Debug_pctab Debug.PCTab mv Debug_locationlist Debug.LocationLists mv Debug_typecheckinl Debug.TypecheckInl mv Debug_gendwarfinl Debug.DwarfInl mv Debug_softfloat Debug.SoftFloat mv Debug_defer Debug.Defer mv Debug_dumpptrs Debug.DumpPtrs mv flag.go:/parse.-d/-1,/unknown.debug/+2 parseDebug mv debugtab Debug parseDebug \ debugHelpHeader debugHelpFooter \ debug.go # Remove //go:generate line copied from main.go rm debug.go:/go:generate/-+ ' Change-Id: I625761ca5659be4052f7161a83baa00df75cca91 Reviewed-on: https://go-review.googlesource.com/c/go/+/272246 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/amd64/ssa.go | 2 +- src/cmd/compile/internal/arm/ssa.go | 2 +- src/cmd/compile/internal/arm64/ssa.go | 2 +- src/cmd/compile/internal/gc/alg.go | 4 +- src/cmd/compile/internal/gc/closure.go | 4 +- src/cmd/compile/internal/gc/debug.go | 167 ++++++++++++++++++++++++ src/cmd/compile/internal/gc/dwinl.go | 6 +- src/cmd/compile/internal/gc/export.go | 8 +- src/cmd/compile/internal/gc/flag.go | 84 +----------- src/cmd/compile/internal/gc/fmt.go | 4 +- src/cmd/compile/internal/gc/go.go | 5 - src/cmd/compile/internal/gc/inl.go | 8 +- src/cmd/compile/internal/gc/main.go | 83 +----------- src/cmd/compile/internal/gc/order.go | 4 +- src/cmd/compile/internal/gc/pgen.go | 2 +- src/cmd/compile/internal/gc/print.go | 2 +- src/cmd/compile/internal/gc/reflect.go | 6 +- src/cmd/compile/internal/gc/sinit.go | 2 +- src/cmd/compile/internal/gc/ssa.go | 20 +-- src/cmd/compile/internal/gc/subr.go | 2 +- src/cmd/compile/internal/gc/syntax.go | 2 +- src/cmd/compile/internal/gc/walk.go | 6 +- src/cmd/compile/internal/mips/ssa.go | 2 +- src/cmd/compile/internal/mips64/ssa.go | 2 +- src/cmd/compile/internal/ppc64/ssa.go | 2 +- src/cmd/compile/internal/riscv64/ssa.go | 2 +- src/cmd/compile/internal/s390x/ssa.go | 2 +- src/cmd/compile/internal/wasm/ssa.go | 2 +- src/cmd/compile/internal/x86/ssa.go | 2 +- 29 files changed, 226 insertions(+), 213 deletions(-) create mode 100644 src/cmd/compile/internal/gc/debug.go diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 5ff05a0edd16c..1f2d626721ed5 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1164,7 +1164,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { if logopt.Enabled() { logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) } - if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers + if gc.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers gc.Warnl(v.Pos, "generated nil check") } case ssa.OpAMD64MOVBatomicload, ssa.OpAMD64MOVLatomicload, ssa.OpAMD64MOVQatomicload: diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go index 765a7715465ce..82a5172ec7f96 100644 --- a/src/cmd/compile/internal/arm/ssa.go +++ b/src/cmd/compile/internal/arm/ssa.go @@ -741,7 +741,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { if logopt.Enabled() { logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) } - if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers + if gc.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers gc.Warnl(v.Pos, "generated nil check") } case ssa.OpARMLoweredZero: diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go index 22b28a9308bd3..dcbd8f947400c 100644 --- a/src/cmd/compile/internal/arm64/ssa.go +++ b/src/cmd/compile/internal/arm64/ssa.go @@ -1038,7 +1038,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { if logopt.Enabled() { logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) } - if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Line==1 in generated wrappers + if gc.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Line==1 in generated wrappers gc.Warnl(v.Pos, "generated nil check") } case ssa.OpARM64Equal, diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index c1d8de6bad426..87b905ed59e0f 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -387,7 +387,7 @@ func genhash(t *types.Type) *obj.LSym { typecheckslice(fn.Nbody.Slice(), ctxStmt) Curfn = nil - if debug_dclstack != 0 { + if Debug.DclStack != 0 { testdclstack() } @@ -766,7 +766,7 @@ func geneq(t *types.Type) *obj.LSym { typecheckslice(fn.Nbody.Slice(), ctxStmt) Curfn = nil - if debug_dclstack != 0 { + if Debug.DclStack != 0 { testdclstack() } diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index f850cbe2801dc..c25a4469997e4 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -337,7 +337,7 @@ func hasemptycvars(clo *Node) bool { // closuredebugruntimecheck applies boilerplate checks for debug flags // and compiling runtime func closuredebugruntimecheck(clo *Node) { - if Debug_closure > 0 { + if Debug.Closure > 0 { if clo.Esc == EscHeap { Warnl(clo.Pos, "heap closure, captured vars = %v", clo.Func.ClosureVars) } else { @@ -386,7 +386,7 @@ func walkclosure(clo *Node, init *Nodes) *Node { // If no closure vars, don't bother wrapping. if hasemptycvars(clo) { - if Debug_closure > 0 { + if Debug.Closure > 0 { Warnl(clo.Pos, "closure converted to global") } return fn.Nname diff --git a/src/cmd/compile/internal/gc/debug.go b/src/cmd/compile/internal/gc/debug.go new file mode 100644 index 0000000000000..f6be3d57b0be5 --- /dev/null +++ b/src/cmd/compile/internal/gc/debug.go @@ -0,0 +1,167 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gc + +import ( + "fmt" + "log" + "os" + "strconv" + "strings" + + "cmd/compile/internal/ssa" + "cmd/internal/objabi" +) + +// Debug arguments. +// These can be specified with the -d flag, as in "-d nil" +// to set the debug_checknil variable. +// Multiple options can be comma-separated. +// Each option accepts an optional argument, as in "gcprog=2" +var debugtab = []struct { + name string + help string + val interface{} // must be *int or *string +}{ + {"append", "print information about append compilation", &Debug.Append}, + {"checkptr", "instrument unsafe pointer conversions", &Debug.Checkptr}, + {"closure", "print information about closure compilation", &Debug.Closure}, + {"compilelater", "compile functions as late as possible", &Debug.CompileLater}, + {"disablenil", "disable nil checks", &Debug.DisableNil}, + {"dclstack", "run internal dclstack check", &Debug.DclStack}, + {"dumpptrs", "show Node pointer values in Dump/dumplist output", &Debug.DumpPtrs}, + {"gcprog", "print dump of GC programs", &Debug.GCProg}, + {"libfuzzer", "coverage instrumentation for libfuzzer", &Debug.Libfuzzer}, + {"nil", "print information about nil checks", &Debug.Nil}, + {"panic", "do not hide any compiler panic", &Debug.Panic}, + {"slice", "print information about slice compilation", &Debug.Slice}, + {"typeassert", "print information about type assertion inlining", &Debug.TypeAssert}, + {"wb", "print information about write barriers", &Debug.WB}, + {"export", "print export data", &Debug.Export}, + {"pctab", "print named pc-value table", &Debug.PCTab}, + {"locationlists", "print information about DWARF location list creation", &Debug.LocationLists}, + {"typecheckinl", "eager typechecking of inline function bodies", &Debug.TypecheckInl}, + {"dwarfinl", "print information about DWARF inlined function creation", &Debug.DwarfInl}, + {"softfloat", "force compiler to emit soft-float code", &Debug.SoftFloat}, + {"defer", "print information about defer compilation", &Debug.Defer}, + {"fieldtrack", "enable fieldtracking", &objabi.Fieldtrack_enabled}, +} + +var Debug struct { + Append int + Checkptr int + Closure int + CompileLater int + DisableNil int + DclStack int + GCProg int + Libfuzzer int + Nil int + Panic int + Slice int + TypeAssert int + WB int + Export int + PCTab string + LocationLists int + TypecheckInl int + DwarfInl int + SoftFloat int + Defer int + DumpPtrs int +} + +func parseDebug() { + // parse -d argument + if Flag.LowerD != "" { + Split: + for _, name := range strings.Split(Flag.LowerD, ",") { + if name == "" { + continue + } + // display help about the -d option itself and quit + if name == "help" { + fmt.Print(debugHelpHeader) + maxLen := len("ssa/help") + for _, t := range debugtab { + if len(t.name) > maxLen { + maxLen = len(t.name) + } + } + for _, t := range debugtab { + fmt.Printf("\t%-*s\t%s\n", maxLen, t.name, t.help) + } + // ssa options have their own help + fmt.Printf("\t%-*s\t%s\n", maxLen, "ssa/help", "print help about SSA debugging") + fmt.Print(debugHelpFooter) + os.Exit(0) + } + val, valstring, haveInt := 1, "", true + if i := strings.IndexAny(name, "=:"); i >= 0 { + var err error + name, valstring = name[:i], name[i+1:] + val, err = strconv.Atoi(valstring) + if err != nil { + val, haveInt = 1, false + } + } + for _, t := range debugtab { + if t.name != name { + continue + } + switch vp := t.val.(type) { + case nil: + // Ignore + case *string: + *vp = valstring + case *int: + if !haveInt { + log.Fatalf("invalid debug value %v", name) + } + *vp = val + default: + panic("bad debugtab type") + } + continue Split + } + // special case for ssa for now + if strings.HasPrefix(name, "ssa/") { + // expect form ssa/phase/flag + // e.g. -d=ssa/generic_cse/time + // _ in phase name also matches space + phase := name[4:] + flag := "debug" // default flag is debug + if i := strings.Index(phase, "/"); i >= 0 { + flag = phase[i+1:] + phase = phase[:i] + } + err := ssa.PhaseOption(phase, flag, val, valstring) + if err != "" { + log.Fatalf(err) + } + continue Split + } + log.Fatalf("unknown debug key -d %s\n", name) + } + } +} + +const debugHelpHeader = `usage: -d arg[,arg]* and arg is [=] + + is one of: + +` + +const debugHelpFooter = ` + is key-specific. + +Key "checkptr" supports values: + "0": instrumentation disabled + "1": conversions involving unsafe.Pointer are instrumented + "2": conversions to unsafe.Pointer force heap allocation + +Key "pctab" supports values: + "pctospadj", "pctofile", "pctoline", "pctoinline", "pctopcdata" +` diff --git a/src/cmd/compile/internal/gc/dwinl.go b/src/cmd/compile/internal/gc/dwinl.go index 48d78f6cd7d2a..edde7a4cc530c 100644 --- a/src/cmd/compile/internal/gc/dwinl.go +++ b/src/cmd/compile/internal/gc/dwinl.go @@ -26,7 +26,7 @@ type varPos struct { func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls { var inlcalls dwarf.InlCalls - if Debug_gendwarfinl != 0 { + if Debug.DwarfInl != 0 { Ctxt.Logf("assembling DWARF inlined routine info for %v\n", fnsym.Name) } @@ -181,7 +181,7 @@ func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls { } // Debugging - if Debug_gendwarfinl != 0 { + if Debug.DwarfInl != 0 { dumpInlCalls(inlcalls) dumpInlVars(dwVars) } @@ -210,7 +210,7 @@ func genAbstractFunc(fn *obj.LSym) { Ctxt.Diag("failed to locate precursor fn for %v", fn) return } - if Debug_gendwarfinl != 0 { + if Debug.DwarfInl != 0 { Ctxt.Logf("DwarfAbstractFunc(%v)\n", fn.Name) } Ctxt.DwarfAbstractFunc(ifn, fn, Ctxt.Pkgpath) diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index edd270323896c..48f77fa18293e 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -12,13 +12,9 @@ import ( "go/constant" ) -var ( - Debug_export int // if set, print debugging information about export data -) - func exportf(bout *bio.Writer, format string, args ...interface{}) { fmt.Fprintf(bout, format, args...) - if Debug_export != 0 { + if Debug.Export != 0 { fmt.Printf(format, args...) } } @@ -71,7 +67,7 @@ func dumpexport(bout *bio.Writer) { size := bout.Offset() - off exportf(bout, "\n$$\n") - if Debug_export != 0 { + if Debug.Export != 0 { fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", Ctxt.Pkgpath, size) } } diff --git a/src/cmd/compile/internal/gc/flag.go b/src/cmd/compile/internal/gc/flag.go index 090287ef62835..06b0a88ba350a 100644 --- a/src/cmd/compile/internal/gc/flag.go +++ b/src/cmd/compile/internal/gc/flag.go @@ -13,10 +13,9 @@ import ( "os" "reflect" "runtime" - "strconv" + "strings" - "cmd/compile/internal/ssa" "cmd/internal/objabi" "cmd/internal/sys" ) @@ -209,7 +208,7 @@ func ParseFlags() { } if Flag.Race || Flag.MSan { // -race and -msan imply -d=checkptr for now. - Debug_checkptr = 1 + Debug.Checkptr = 1 } if Flag.CompilingRuntime && Flag.N != 0 { @@ -222,89 +221,18 @@ func ParseFlags() { log.Fatalf("cannot use concurrent backend compilation with provided flags; invoked as %v", os.Args) } - // parse -d argument - if Flag.LowerD != "" { - Split: - for _, name := range strings.Split(Flag.LowerD, ",") { - if name == "" { - continue - } - // display help about the -d option itself and quit - if name == "help" { - fmt.Print(debugHelpHeader) - maxLen := len("ssa/help") - for _, t := range debugtab { - if len(t.name) > maxLen { - maxLen = len(t.name) - } - } - for _, t := range debugtab { - fmt.Printf("\t%-*s\t%s\n", maxLen, t.name, t.help) - } - // ssa options have their own help - fmt.Printf("\t%-*s\t%s\n", maxLen, "ssa/help", "print help about SSA debugging") - fmt.Print(debugHelpFooter) - os.Exit(0) - } - val, valstring, haveInt := 1, "", true - if i := strings.IndexAny(name, "=:"); i >= 0 { - var err error - name, valstring = name[:i], name[i+1:] - val, err = strconv.Atoi(valstring) - if err != nil { - val, haveInt = 1, false - } - } - for _, t := range debugtab { - if t.name != name { - continue - } - switch vp := t.val.(type) { - case nil: - // Ignore - case *string: - *vp = valstring - case *int: - if !haveInt { - log.Fatalf("invalid debug value %v", name) - } - *vp = val - default: - panic("bad debugtab type") - } - continue Split - } - // special case for ssa for now - if strings.HasPrefix(name, "ssa/") { - // expect form ssa/phase/flag - // e.g. -d=ssa/generic_cse/time - // _ in phase name also matches space - phase := name[4:] - flag := "debug" // default flag is debug - if i := strings.Index(phase, "/"); i >= 0 { - flag = phase[i+1:] - phase = phase[:i] - } - err := ssa.PhaseOption(phase, flag, val, valstring) - if err != "" { - log.Fatalf(err) - } - continue Split - } - log.Fatalf("unknown debug key -d %s\n", name) - } - } + parseDebug() if Flag.CompilingRuntime { // Runtime can't use -d=checkptr, at least not yet. - Debug_checkptr = 0 + Debug.Checkptr = 0 // Fuzzing the runtime isn't interesting either. - Debug_libfuzzer = 0 + Debug.Libfuzzer = 0 } // set via a -d flag - Ctxt.Debugpcln = Debug_pctab + Ctxt.Debugpcln = Debug.PCTab } // registerFlags adds flag registrations for all the fields in Flag. diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go index f995d2e2ec408..51e139e3190b8 100644 --- a/src/cmd/compile/internal/gc/fmt.go +++ b/src/cmd/compile/internal/gc/fmt.go @@ -339,14 +339,14 @@ func (n *Node) jconv(s fmt.State, flag FmtFlag) { short := flag&FmtShort != 0 // Useful to see which nodes in an AST printout are actually identical - if Debug_dumpptrs != 0 { + if Debug.DumpPtrs != 0 { fmt.Fprintf(s, " p(%p)", n) } if !short && n.Name != nil && n.Name.Vargen != 0 { fmt.Fprintf(s, " g(%d)", n.Name.Vargen) } - if Debug_dumpptrs != 0 && !short && n.Name != nil && n.Name.Defn != nil { + if Debug.DumpPtrs != 0 && !short && n.Name != nil && n.Name.Defn != nil { // Useful to see where Defn is set and what node it points to fmt.Fprintf(s, " defn(%p)", n.Name.Defn) } diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index 6cab03d7265b1..947dae476b119 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -101,9 +101,6 @@ var pragcgobuf [][]string var decldepth int32 -var Debug_checknil int -var Debug_typeassert int - var localpkg *types.Pkg // package being compiled var inimport bool // set during import @@ -189,8 +186,6 @@ var Ctxt *obj.Link var nodfp *Node -var disable_checknil int - var autogeneratedPos src.XPos // interface to back end diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 50091e9c111f3..fc467dd95aaf7 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -86,7 +86,7 @@ func typecheckinl(fn *Node) { return // typecheckinl on local function } - if Flag.LowerM > 2 || Debug_export != 0 { + if Flag.LowerM > 2 || Debug.Export != 0 { fmt.Printf("typecheck import [%v] %L { %#v }\n", fn.Sym, fn, asNodes(fn.Func.Inl.Body)) } @@ -144,7 +144,7 @@ func caninl(fn *Node) { } // If marked "go:nocheckptr" and -d checkptr compilation, don't inline. - if Debug_checkptr != 0 && fn.Func.Pragma&NoCheckPtr != 0 { + if Debug.Checkptr != 0 && fn.Func.Pragma&NoCheckPtr != 0 { reason = "marked go:nocheckptr" return } @@ -595,7 +595,7 @@ func inlnode(n *Node, maxCost int32, inlMap map[*Node]bool) *Node { case OCALLMETH: // Prevent inlining some reflect.Value methods when using checkptr, // even when package reflect was compiled without it (#35073). - if s := n.Left.Sym; Debug_checkptr != 0 && isReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") { + if s := n.Left.Sym; Debug.Checkptr != 0 && isReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") { return n } } @@ -931,7 +931,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { defer func() { inlMap[fn] = false }() - if Debug_typecheckinl == 0 { + if Debug.TypecheckInl == 0 { typecheckinl(fn) } diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 9cf988bca8ea6..0d41f81a52b14 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -34,79 +34,8 @@ import ( "strings" ) -var ( - Debug_append int - Debug_checkptr int - Debug_closure int - Debug_compilelater int - debug_dclstack int - Debug_dumpptrs int - Debug_libfuzzer int - Debug_panic int - Debug_slice int - Debug_wb int - Debug_pctab string - Debug_locationlist int - Debug_typecheckinl int - Debug_gendwarfinl int - Debug_softfloat int - Debug_defer int -) - -// Debug arguments. -// These can be specified with the -d flag, as in "-d nil" -// to set the debug_checknil variable. -// Multiple options can be comma-separated. -// Each option accepts an optional argument, as in "gcprog=2" -var debugtab = []struct { - name string - help string - val interface{} // must be *int or *string -}{ - {"append", "print information about append compilation", &Debug_append}, - {"checkptr", "instrument unsafe pointer conversions", &Debug_checkptr}, - {"closure", "print information about closure compilation", &Debug_closure}, - {"compilelater", "compile functions as late as possible", &Debug_compilelater}, - {"disablenil", "disable nil checks", &disable_checknil}, - {"dclstack", "run internal dclstack check", &debug_dclstack}, - {"dumpptrs", "show Node pointer values in Dump/dumplist output", &Debug_dumpptrs}, - {"gcprog", "print dump of GC programs", &Debug_gcprog}, - {"libfuzzer", "coverage instrumentation for libfuzzer", &Debug_libfuzzer}, - {"nil", "print information about nil checks", &Debug_checknil}, - {"panic", "do not hide any compiler panic", &Debug_panic}, - {"slice", "print information about slice compilation", &Debug_slice}, - {"typeassert", "print information about type assertion inlining", &Debug_typeassert}, - {"wb", "print information about write barriers", &Debug_wb}, - {"export", "print export data", &Debug_export}, - {"pctab", "print named pc-value table", &Debug_pctab}, - {"locationlists", "print information about DWARF location list creation", &Debug_locationlist}, - {"typecheckinl", "eager typechecking of inline function bodies", &Debug_typecheckinl}, - {"dwarfinl", "print information about DWARF inlined function creation", &Debug_gendwarfinl}, - {"softfloat", "force compiler to emit soft-float code", &Debug_softfloat}, - {"defer", "print information about defer compilation", &Debug_defer}, - {"fieldtrack", "enable fieldtracking", &objabi.Fieldtrack_enabled}, -} - -const debugHelpHeader = `usage: -d arg[,arg]* and arg is [=] - - is one of: - -` - -const debugHelpFooter = ` - is key-specific. - -Key "checkptr" supports values: - "0": instrumentation disabled - "1": conversions involving unsafe.Pointer are instrumented - "2": conversions to unsafe.Pointer force heap allocation - -Key "pctab" supports values: - "pctospadj", "pctofile", "pctoline", "pctoinline", "pctopcdata" -` - func hidePanic() { - if Debug_panic == 0 && Errors() > 0 { + if Debug.Panic == 0 && Errors() > 0 { // If we've already complained about things // in the program, don't bother complaining // about a panic too; let the user clean up @@ -243,9 +172,9 @@ func Main(archInit func(*Arch)) { instrumenting = true } if Flag.Dwarf { - dwarf.EnableLogging(Debug_gendwarfinl != 0) + dwarf.EnableLogging(Debug.DwarfInl != 0) } - if Debug_softfloat != 0 { + if Debug.SoftFloat != 0 { thearch.SoftFloat = true } @@ -396,7 +325,7 @@ func Main(archInit func(*Arch)) { // Phase 5: Inlining timings.Start("fe", "inlining") - if Debug_typecheckinl != 0 { + if Debug.TypecheckInl != 0 { // Typecheck imported function bodies if Debug.l > 1, // otherwise lazily when used or re-exported. for _, n := range importlist { @@ -501,7 +430,7 @@ func Main(archInit func(*Arch)) { // DWARF inlining gen so as to avoid problems with generated // method wrappers. if Ctxt.DwFixups != nil { - Ctxt.DwFixups.Finalize(Ctxt.Pkgpath, Debug_gendwarfinl != 0) + Ctxt.DwFixups.Finalize(Ctxt.Pkgpath, Debug.DwarfInl != 0) Ctxt.DwFixups = nil Flag.GenDwarfInl = 0 } @@ -944,7 +873,7 @@ func importfile(f constant.Value) *types.Pkg { return nil case 'B': - if Debug_export != 0 { + if Debug.Export != 0 { fmt.Printf("importing %s (%s)\n", path_, file) } imp.ReadByte() // skip \n after $$B diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index ee0c8f2711084..90c08b1b7503e 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -384,7 +384,7 @@ func orderMakeSliceCopy(s []*Node) { // edge inserts coverage instrumentation for libfuzzer. func (o *Order) edge() { - if Debug_libfuzzer == 0 { + if Debug.Libfuzzer == 0 { return } @@ -998,7 +998,7 @@ func (o *Order) stmt(n *Node) { // For now just clean all the temporaries at the end. // In practice that's fine. case OSWITCH: - if Debug_libfuzzer != 0 && !hasDefaultCase(n) { + if Debug.Libfuzzer != 0 && !hasDefaultCase(n) { // Add empty "default:" case for instrumentation. n.List.Append(nod(OCASE, nil, nil)) } diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index fe13a161bd508..19a24a3235eb5 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -291,7 +291,7 @@ func compilenow(fn *Node) bool { if fn.IsMethod() && isInlinableButNotInlined(fn) { return false } - return Flag.LowerC == 1 && Debug_compilelater == 0 + return Flag.LowerC == 1 && Debug.CompileLater == 0 } // isInlinableButNotInlined returns true if 'fn' was marked as an diff --git a/src/cmd/compile/internal/gc/print.go b/src/cmd/compile/internal/gc/print.go index 6b5f670812e9c..345f433fe4024 100644 --- a/src/cmd/compile/internal/gc/print.go +++ b/src/cmd/compile/internal/gc/print.go @@ -208,7 +208,7 @@ func Fatalf(format string, args ...interface{}) { func FatalfAt(pos src.XPos, format string, args ...interface{}) { flusherrors() - if Debug_panic != 0 || numErrors == 0 { + if Debug.Panic != 0 || numErrors == 0 { fmt.Printf("%v: internal compiler error: ", linestr(pos)) fmt.Printf(format, args...) fmt.Printf("\n") diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 674a3bf3fbae3..11ccc15a253fa 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -1787,13 +1787,11 @@ type GCProg struct { w gcprog.Writer } -var Debug_gcprog int // set by -d gcprog - func (p *GCProg) init(lsym *obj.LSym) { p.lsym = lsym p.symoff = 4 // first 4 bytes hold program length p.w.Init(p.writeByte) - if Debug_gcprog > 0 { + if Debug.GCProg > 0 { fmt.Fprintf(os.Stderr, "compile: start GCProg for %v\n", lsym) p.w.Debug(os.Stderr) } @@ -1807,7 +1805,7 @@ func (p *GCProg) end() { p.w.End() duint32(p.lsym, 0, uint32(p.symoff-4)) ggloblsym(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL) - if Debug_gcprog > 0 { + if Debug.GCProg > 0 { fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.lsym) } } diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 741e0ef9a3a5e..1f89baa3c0122 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -256,7 +256,7 @@ func (s *InitSchedule) staticassign(l *Node, r *Node) bool { case OCLOSURE: if hasemptycvars(r) { - if Debug_closure > 0 { + if Debug.Closure > 0 { Warnl(r.Pos, "closure converted to global") } // Closures with no captured variables are globals, diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 260df2f54f142..f06f08e6ab6d9 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -1102,7 +1102,7 @@ func (s *state) stmt(n *Node) { } } case ODEFER: - if Debug_defer > 0 { + if Debug.Defer > 0 { var defertype string if s.hasOpenDefers { defertype = "open-coded" @@ -1232,12 +1232,12 @@ func (s *state) stmt(n *Node) { // so there will be no write barriers, // so there's no need to attempt to prevent them. if s.canSSA(n.Left) { - if Debug_append > 0 { // replicating old diagnostic message + if Debug.Append > 0 { // replicating old diagnostic message Warnl(n.Pos, "append: len-only update (in local slice)") } break } - if Debug_append > 0 { + if Debug.Append > 0 { Warnl(n.Pos, "append: len-only update") } s.append(rhs, true) @@ -5026,7 +5026,7 @@ func (s *state) exprPtr(n *Node, bounded bool, lineno src.XPos) *ssa.Value { // Used only for automatically inserted nil checks, // not for user code like 'x != nil'. func (s *state) nilCheck(ptr *ssa.Value) { - if disable_checknil != 0 || s.curfn.Func.NilCheckDisabled() { + if Debug.DisableNil != 0 || s.curfn.Func.NilCheckDisabled() { return } s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem()) @@ -5837,7 +5837,7 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) { if n.Type.IsEmptyInterface() { // Converting to an empty interface. // Input could be an empty or nonempty interface. - if Debug_typeassert > 0 { + if Debug.TypeAssert > 0 { Warnl(n.Pos, "type assertion inlined") } @@ -5904,7 +5904,7 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) { return } // converting to a nonempty interface needs a runtime call. - if Debug_typeassert > 0 { + if Debug.TypeAssert > 0 { Warnl(n.Pos, "type assertion not inlined") } if n.Left.Type.IsEmptyInterface() { @@ -5921,14 +5921,14 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) { return s.rtcall(assertI2I, true, []*types.Type{n.Type}, target, iface)[0], nil } - if Debug_typeassert > 0 { + if Debug.TypeAssert > 0 { Warnl(n.Pos, "type assertion inlined") } // Converting to a concrete type. direct := isdirectiface(n.Type) itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface - if Debug_typeassert > 0 { + if Debug.TypeAssert > 0 { Warnl(n.Pos, "type assertion inlined") } var targetITab *ssa.Value @@ -6474,7 +6474,7 @@ func genssa(f *ssa.Func, pp *Progs) { } if Ctxt.Flag_locationlists { - e.curfn.Func.DebugInfo = ssa.BuildFuncDebug(Ctxt, f, Debug_locationlist > 1, stackOffset) + e.curfn.Func.DebugInfo = ssa.BuildFuncDebug(Ctxt, f, Debug.LocationLists > 1, stackOffset) bstart := s.bstart // Note that at this moment, Prog.Pc is a sequence number; it's // not a real PC until after assembly, so this mapping has to @@ -7113,7 +7113,7 @@ func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...interface{}) { } func (e *ssafn) Debug_checknil() bool { - return Debug_checknil != 0 + return Debug.Nil != 0 } func (e *ssafn) UseWriteBarrier() bool { diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 32312e9545aeb..989d10a561d0d 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -1412,7 +1412,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { } funcbody() - if debug_dclstack != 0 { + if Debug.DclStack != 0 { testdclstack() } diff --git a/src/cmd/compile/internal/gc/syntax.go b/src/cmd/compile/internal/gc/syntax.go index 75a7ae2c7af7c..f771a7184ea47 100644 --- a/src/cmd/compile/internal/gc/syntax.go +++ b/src/cmd/compile/internal/gc/syntax.go @@ -764,7 +764,7 @@ func (f *Func) SetInstrumentBody(b bool) { f.flags.set(funcInstrumentB func (f *Func) SetOpenCodedDeferDisallowed(b bool) { f.flags.set(funcOpenCodedDeferDisallowed, b) } func (f *Func) setWBPos(pos src.XPos) { - if Debug_wb != 0 { + if Debug.WB != 0 { Warnl(pos, "write barrier") } if !f.WBPos.IsKnown() { diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index c2d8411a596e9..de2733909e3e8 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -1685,7 +1685,7 @@ func reduceSlice(n *Node) *Node { n.SetSliceBounds(low, high, max) if (n.Op == OSLICE || n.Op == OSLICESTR) && low == nil && high == nil { // Reduce x[:] to x. - if Debug_slice > 0 { + if Debug.Slice > 0 { Warn("slice: omit slice operation") } return n.Left @@ -3262,7 +3262,7 @@ func walkcompare(n *Node, init *Nodes) *Node { switch t.Etype { default: - if Debug_libfuzzer != 0 && t.IsInteger() { + if Debug.Libfuzzer != 0 && t.IsInteger() { n.Left = cheapexpr(n.Left, init) n.Right = cheapexpr(n.Right, init) @@ -4087,5 +4087,5 @@ func walkCheckPtrArithmetic(n *Node, init *Nodes) *Node { // function fn at a given level. See debugHelpFooter for defined // levels. func checkPtr(fn *Node, level int) bool { - return Debug_checkptr >= level && fn.Func.Pragma&NoCheckPtr == 0 + return Debug.Checkptr >= level && fn.Func.Pragma&NoCheckPtr == 0 } diff --git a/src/cmd/compile/internal/mips/ssa.go b/src/cmd/compile/internal/mips/ssa.go index 9d11c6bf53ae7..1d2e2c79e6ea9 100644 --- a/src/cmd/compile/internal/mips/ssa.go +++ b/src/cmd/compile/internal/mips/ssa.go @@ -766,7 +766,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { if logopt.Enabled() { logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) } - if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers + if gc.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers gc.Warnl(v.Pos, "generated nil check") } case ssa.OpMIPSFPFlagTrue, diff --git a/src/cmd/compile/internal/mips64/ssa.go b/src/cmd/compile/internal/mips64/ssa.go index 2727c4d8a8340..067b8158c939f 100644 --- a/src/cmd/compile/internal/mips64/ssa.go +++ b/src/cmd/compile/internal/mips64/ssa.go @@ -724,7 +724,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { if logopt.Enabled() { logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) } - if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers + if gc.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers gc.Warnl(v.Pos, "generated nil check") } case ssa.OpMIPS64FPFlagTrue, diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go index 3e20c44a4c72b..f0e7c4192336f 100644 --- a/src/cmd/compile/internal/ppc64/ssa.go +++ b/src/cmd/compile/internal/ppc64/ssa.go @@ -1852,7 +1852,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { if logopt.Enabled() { logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) } - if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers + if gc.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers gc.Warnl(v.Pos, "generated nil check") } diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go index 0beb5b4bd180a..d49927ee04e93 100644 --- a/src/cmd/compile/internal/riscv64/ssa.go +++ b/src/cmd/compile/internal/riscv64/ssa.go @@ -586,7 +586,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { gc.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = riscv.REG_ZERO - if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos == 1 in generated wrappers + if gc.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos == 1 in generated wrappers gc.Warnl(v.Pos, "generated nil check") } diff --git a/src/cmd/compile/internal/s390x/ssa.go b/src/cmd/compile/internal/s390x/ssa.go index 8037357131c79..cb13f8d3c0d28 100644 --- a/src/cmd/compile/internal/s390x/ssa.go +++ b/src/cmd/compile/internal/s390x/ssa.go @@ -642,7 +642,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { if logopt.Enabled() { logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) } - if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers + if gc.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers gc.Warnl(v.Pos, "generated nil check") } case ssa.OpS390XMVC: diff --git a/src/cmd/compile/internal/wasm/ssa.go b/src/cmd/compile/internal/wasm/ssa.go index a36fbca4e02bd..3f05515b9a6e5 100644 --- a/src/cmd/compile/internal/wasm/ssa.go +++ b/src/cmd/compile/internal/wasm/ssa.go @@ -165,7 +165,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { if logopt.Enabled() { logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) } - if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers + if gc.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers gc.Warnl(v.Pos, "generated nil check") } diff --git a/src/cmd/compile/internal/x86/ssa.go b/src/cmd/compile/internal/x86/ssa.go index fbf76d0c5efb9..65d7e75a533c2 100644 --- a/src/cmd/compile/internal/x86/ssa.go +++ b/src/cmd/compile/internal/x86/ssa.go @@ -850,7 +850,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { if logopt.Enabled() { logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) } - if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers + if gc.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers gc.Warnl(v.Pos, "generated nil check") } case ssa.OpClobber: From eb3086e5a8958723ae696ea48d4cc7981c6779fa Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 16 Nov 2020 01:44:47 -0500 Subject: [PATCH 037/474] [dev.regabi] cmd/compile: finish cleanup of Debug parsing Now that the debug settings are in a struct, use struct tags to set the usage messages and use reflection to populate debugtab, much like we did for the Flag struct. Change-Id: Id2ba30c30a9158c062527715a68bf4dd94679457 Reviewed-on: https://go-review.googlesource.com/c/go/+/272247 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/debug.go | 261 +++++++++++++++------------ src/cmd/compile/internal/gc/flag.go | 33 ++-- src/cmd/compile/internal/gc/main.go | 3 +- 3 files changed, 162 insertions(+), 135 deletions(-) diff --git a/src/cmd/compile/internal/gc/debug.go b/src/cmd/compile/internal/gc/debug.go index f6be3d57b0be5..98e6631e5b108 100644 --- a/src/cmd/compile/internal/gc/debug.go +++ b/src/cmd/compile/internal/gc/debug.go @@ -2,149 +2,176 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// Debug arguments, set by -d flag. + package gc import ( "fmt" "log" "os" + "reflect" "strconv" "strings" - "cmd/compile/internal/ssa" "cmd/internal/objabi" ) -// Debug arguments. -// These can be specified with the -d flag, as in "-d nil" -// to set the debug_checknil variable. -// Multiple options can be comma-separated. -// Each option accepts an optional argument, as in "gcprog=2" -var debugtab = []struct { +// Debug holds the parsed debugging configuration values. +var Debug = DebugFlags{ + Fieldtrack: &objabi.Fieldtrack_enabled, +} + +// DebugFlags defines the debugging configuration values (see var Debug). +// Each struct field is a different value, named for the lower-case of the field name. +// Each field must be an int or string and must have a `help` struct tag. +// +// The -d option takes a comma-separated list of settings. +// Each setting is name=value; for ints, name is short for name=1. +type DebugFlags struct { + Append int `help:"print information about append compilation"` + Checkptr int `help:"instrument unsafe pointer conversions"` + Closure int `help:"print information about closure compilation"` + CompileLater int `help:"compile functions as late as possible"` + DclStack int `help:"run internal dclstack check"` + Defer int `help:"print information about defer compilation"` + DisableNil int `help:"disable nil checks"` + DumpPtrs int `help:"show Node pointers values in dump output"` + DwarfInl int `help:"print information about DWARF inlined function creation"` + Export int `help:"print export data"` + Fieldtrack *int `help:"enable field tracking"` + GCProg int `help:"print dump of GC programs"` + Libfuzzer int `help:"enable coverage instrumentation for libfuzzer"` + LocationLists int `help:"print information about DWARF location list creation"` + Nil int `help:"print information about nil checks"` + PCTab string `help:"print named pc-value table"` + Panic int `help:"show all compiler panics"` + Slice int `help:"print information about slice compilation"` + SoftFloat int `help:"force compiler to emit soft-float code"` + TypeAssert int `help:"print information about type assertion inlining"` + TypecheckInl int `help:"eager typechecking of inline function bodies"` + WB int `help:"print information about write barriers"` + + any bool // set when any of the values have been set +} + +// Any reports whether any of the debug flags have been set. +func (d *DebugFlags) Any() bool { return d.any } + +type debugField struct { name string help string - val interface{} // must be *int or *string -}{ - {"append", "print information about append compilation", &Debug.Append}, - {"checkptr", "instrument unsafe pointer conversions", &Debug.Checkptr}, - {"closure", "print information about closure compilation", &Debug.Closure}, - {"compilelater", "compile functions as late as possible", &Debug.CompileLater}, - {"disablenil", "disable nil checks", &Debug.DisableNil}, - {"dclstack", "run internal dclstack check", &Debug.DclStack}, - {"dumpptrs", "show Node pointer values in Dump/dumplist output", &Debug.DumpPtrs}, - {"gcprog", "print dump of GC programs", &Debug.GCProg}, - {"libfuzzer", "coverage instrumentation for libfuzzer", &Debug.Libfuzzer}, - {"nil", "print information about nil checks", &Debug.Nil}, - {"panic", "do not hide any compiler panic", &Debug.Panic}, - {"slice", "print information about slice compilation", &Debug.Slice}, - {"typeassert", "print information about type assertion inlining", &Debug.TypeAssert}, - {"wb", "print information about write barriers", &Debug.WB}, - {"export", "print export data", &Debug.Export}, - {"pctab", "print named pc-value table", &Debug.PCTab}, - {"locationlists", "print information about DWARF location list creation", &Debug.LocationLists}, - {"typecheckinl", "eager typechecking of inline function bodies", &Debug.TypecheckInl}, - {"dwarfinl", "print information about DWARF inlined function creation", &Debug.DwarfInl}, - {"softfloat", "force compiler to emit soft-float code", &Debug.SoftFloat}, - {"defer", "print information about defer compilation", &Debug.Defer}, - {"fieldtrack", "enable fieldtracking", &objabi.Fieldtrack_enabled}, + val interface{} // *int or *string } -var Debug struct { - Append int - Checkptr int - Closure int - CompileLater int - DisableNil int - DclStack int - GCProg int - Libfuzzer int - Nil int - Panic int - Slice int - TypeAssert int - WB int - Export int - PCTab string - LocationLists int - TypecheckInl int - DwarfInl int - SoftFloat int - Defer int - DumpPtrs int +var debugTab []debugField + +func init() { + v := reflect.ValueOf(&Debug).Elem() + t := v.Type() + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Name == "any" { + continue + } + name := strings.ToLower(f.Name) + help := f.Tag.Get("help") + if help == "" { + panic(fmt.Sprintf("base.Debug.%s is missing help text", f.Name)) + } + ptr := v.Field(i).Addr().Interface() + switch ptr.(type) { + default: + panic(fmt.Sprintf("base.Debug.%s has invalid type %v (must be int or string)", f.Name, f.Type)) + case *int, *string: + // ok + case **int: + ptr = *ptr.(**int) // record the *int itself + } + debugTab = append(debugTab, debugField{name, help, ptr}) + } } -func parseDebug() { +// DebugSSA is called to set a -d ssa/... option. +// If nil, those options are reported as invalid options. +// If DebugSSA returns a non-empty string, that text is reported as a compiler error. +var DebugSSA func(phase, flag string, val int, valString string) string + +// parseDebug parses the -d debug string argument. +func parseDebug(debugstr string) { // parse -d argument - if Flag.LowerD != "" { - Split: - for _, name := range strings.Split(Flag.LowerD, ",") { - if name == "" { - continue - } - // display help about the -d option itself and quit - if name == "help" { - fmt.Print(debugHelpHeader) - maxLen := len("ssa/help") - for _, t := range debugtab { - if len(t.name) > maxLen { - maxLen = len(t.name) - } - } - for _, t := range debugtab { - fmt.Printf("\t%-*s\t%s\n", maxLen, t.name, t.help) + if debugstr == "" { + return + } + Debug.any = true +Split: + for _, name := range strings.Split(debugstr, ",") { + if name == "" { + continue + } + // display help about the -d option itself and quit + if name == "help" { + fmt.Print(debugHelpHeader) + maxLen := len("ssa/help") + for _, t := range debugTab { + if len(t.name) > maxLen { + maxLen = len(t.name) } - // ssa options have their own help - fmt.Printf("\t%-*s\t%s\n", maxLen, "ssa/help", "print help about SSA debugging") - fmt.Print(debugHelpFooter) - os.Exit(0) } - val, valstring, haveInt := 1, "", true - if i := strings.IndexAny(name, "=:"); i >= 0 { - var err error - name, valstring = name[:i], name[i+1:] - val, err = strconv.Atoi(valstring) - if err != nil { - val, haveInt = 1, false - } + for _, t := range debugTab { + fmt.Printf("\t%-*s\t%s\n", maxLen, t.name, t.help) } - for _, t := range debugtab { - if t.name != name { - continue - } - switch vp := t.val.(type) { - case nil: - // Ignore - case *string: - *vp = valstring - case *int: - if !haveInt { - log.Fatalf("invalid debug value %v", name) - } - *vp = val - default: - panic("bad debugtab type") - } - continue Split + // ssa options have their own help + fmt.Printf("\t%-*s\t%s\n", maxLen, "ssa/help", "print help about SSA debugging") + fmt.Print(debugHelpFooter) + os.Exit(0) + } + val, valstring, haveInt := 1, "", true + if i := strings.IndexAny(name, "=:"); i >= 0 { + var err error + name, valstring = name[:i], name[i+1:] + val, err = strconv.Atoi(valstring) + if err != nil { + val, haveInt = 1, false } - // special case for ssa for now - if strings.HasPrefix(name, "ssa/") { - // expect form ssa/phase/flag - // e.g. -d=ssa/generic_cse/time - // _ in phase name also matches space - phase := name[4:] - flag := "debug" // default flag is debug - if i := strings.Index(phase, "/"); i >= 0 { - flag = phase[i+1:] - phase = phase[:i] - } - err := ssa.PhaseOption(phase, flag, val, valstring) - if err != "" { - log.Fatalf(err) + } + for _, t := range debugTab { + if t.name != name { + continue + } + switch vp := t.val.(type) { + case nil: + // Ignore + case *string: + *vp = valstring + case *int: + if !haveInt { + log.Fatalf("invalid debug value %v", name) } - continue Split + *vp = val + default: + panic("bad debugtab type") + } + continue Split + } + // special case for ssa for now + if DebugSSA != nil && strings.HasPrefix(name, "ssa/") { + // expect form ssa/phase/flag + // e.g. -d=ssa/generic_cse/time + // _ in phase name also matches space + phase := name[4:] + flag := "debug" // default flag is debug + if i := strings.Index(phase, "/"); i >= 0 { + flag = phase[i+1:] + phase = phase[:i] + } + err := DebugSSA(phase, flag, val, valstring) + if err != "" { + log.Fatalf(err) } - log.Fatalf("unknown debug key -d %s\n", name) + continue Split } + log.Fatalf("unknown debug key -d %s\n", name) } } diff --git a/src/cmd/compile/internal/gc/flag.go b/src/cmd/compile/internal/gc/flag.go index 06b0a88ba350a..29aac3aa28027 100644 --- a/src/cmd/compile/internal/gc/flag.go +++ b/src/cmd/compile/internal/gc/flag.go @@ -63,19 +63,19 @@ type CmdFlags struct { // V is added by objabi.AddVersionFlag W CountFlag "help:\"debug parse tree after type checking\"" - LowerC int "help:\"concurrency during compilation (1 means no concurrency)\"" - LowerD string "help:\"enable debugging settings; try -d help\"" - LowerE CountFlag "help:\"no limit on number of errors reported\"" - LowerH CountFlag "help:\"halt on error\"" - LowerJ CountFlag "help:\"debug runtime-initialized variables\"" - LowerL CountFlag "help:\"disable inlining\"" - LowerM CountFlag "help:\"print optimization decisions\"" - LowerO string "help:\"write output to `file`\"" - LowerP *string "help:\"set expected package import `path`\"" // &Ctxt.Pkgpath, set below - LowerR CountFlag "help:\"debug generated wrappers\"" - LowerT bool "help:\"enable tracing for debugging the compiler\"" - LowerW CountFlag "help:\"debug type checking\"" - LowerV *bool "help:\"increase debug verbosity\"" + LowerC int "help:\"concurrency during compilation (1 means no concurrency)\"" + LowerD func(string) "help:\"enable debugging settings; try -d help\"" + LowerE CountFlag "help:\"no limit on number of errors reported\"" + LowerH CountFlag "help:\"halt on error\"" + LowerJ CountFlag "help:\"debug runtime-initialized variables\"" + LowerL CountFlag "help:\"disable inlining\"" + LowerM CountFlag "help:\"print optimization decisions\"" + LowerO string "help:\"write output to `file`\"" + LowerP *string "help:\"set expected package import `path`\"" // &Ctxt.Pkgpath, set below + LowerR CountFlag "help:\"debug generated wrappers\"" + LowerT bool "help:\"enable tracing for debugging the compiler\"" + LowerW CountFlag "help:\"debug type checking\"" + LowerV *bool "help:\"increase debug verbosity\"" // Special characters Percent int "flag:\"%\" help:\"debug non-static initializers\"" @@ -137,6 +137,7 @@ func ParseFlags() { Flag.I = addImportDir Flag.LowerC = 1 + Flag.LowerD = parseDebug Flag.LowerP = &Ctxt.Pkgpath Flag.LowerV = &Ctxt.Debugvlog @@ -174,7 +175,7 @@ func ParseFlags() { Ctxt.Flag_optimize = Flag.N == 0 Ctxt.Debugasm = int(Flag.S) - if flag.NArg() < 1 && Flag.LowerD != "help" && Flag.LowerD != "ssa/help" { + if flag.NArg() < 1 { usage() } @@ -221,8 +222,6 @@ func ParseFlags() { log.Fatalf("cannot use concurrent backend compilation with provided flags; invoked as %v", os.Args) } - parseDebug() - if Flag.CompilingRuntime { // Runtime can't use -d=checkptr, at least not yet. Debug.Checkptr = 0 @@ -330,7 +329,7 @@ func concurrentBackendAllowed() bool { // while writing the object file, and that is non-concurrent. // Adding Debug_vlog, however, causes Debug.S to also print // while flushing the plist, which happens concurrently. - if Ctxt.Debugvlog || Flag.LowerD != "" || Flag.Live > 0 { + if Ctxt.Debugvlog || Debug.Any() || Flag.Live > 0 { return false } // TODO: Test and delete this condition. diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 0d41f81a52b14..2794ba369454b 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -10,7 +10,7 @@ import ( "bufio" "bytes" "cmd/compile/internal/logopt" - + "cmd/compile/internal/ssa" "cmd/compile/internal/types" "cmd/internal/bio" "cmd/internal/dwarf" @@ -112,6 +112,7 @@ func Main(archInit func(*Arch)) { // pseudo-package used for methods with anonymous receivers gopkg = types.NewPkg("go", "") + DebugSSA = ssa.PhaseOption ParseFlags() // Record flags that affect the build result. (And don't From 26b66fd60b258d323d7b8df2c489d5bd292c0809 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 19 Nov 2020 20:49:23 -0500 Subject: [PATCH 038/474] [dev.regabi] cmd/compile: introduce cmd/compile/internal/base [generated] Move Flag, Debug, Ctxt, Exit, and error messages to new package cmd/compile/internal/base. These are the core functionality that everything in gc uses and which otherwise prevent splitting any other code out of gc into different packages. A minor milestone: the compiler source code no longer contains the string "yy". [git-generate] cd src/cmd/compile/internal/gc rf ' mv atExit AtExit mv Ctxt atExitFuncs AtExit Exit base.go mv lineno Pos mv linestr FmtPos mv flusherrors FlushErrors mv yyerror Errorf mv yyerrorl ErrorfAt mv yyerrorv ErrorfVers mv noder.yyerrorpos noder.errorAt mv Warnl WarnfAt mv errorexit ErrorExit mv base.go debug.go flag.go print.go cmd/compile/internal/base ' : # update comments sed -i '' 's/yyerrorl/ErrorfAt/g; s/yyerror/Errorf/g' *.go : # bootstrap.go is not built by default so invisible to rf sed -i '' 's/Fatalf/base.Fatalf/' bootstrap.go goimports -w bootstrap.go : # update cmd/dist to add internal/base cd ../../../dist sed -i '' '/internal.amd64/a\ "cmd/compile/internal/base", ' buildtool.go gofmt -w buildtool.go Change-Id: I59903c7084222d6eaee38823fd222159ba24a31a Reviewed-on: https://go-review.googlesource.com/c/go/+/272250 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/amd64/ggen.go | 3 +- src/cmd/compile/internal/amd64/ssa.go | 9 +- src/cmd/compile/internal/arm/ssa.go | 7 +- src/cmd/compile/internal/arm64/ssa.go | 7 +- src/cmd/compile/internal/base/base.go | 28 + .../compile/internal/{gc => base}/debug.go | 2 +- src/cmd/compile/internal/{gc => base}/flag.go | 3 +- .../compile/internal/{gc => base}/print.go | 53 +- src/cmd/compile/internal/gc/alg.go | 29 +- src/cmd/compile/internal/gc/align.go | 51 +- src/cmd/compile/internal/gc/bootstrap.go | 7 +- src/cmd/compile/internal/gc/bv.go | 12 +- src/cmd/compile/internal/gc/closure.go | 45 +- src/cmd/compile/internal/gc/const.go | 91 ++-- src/cmd/compile/internal/gc/dcl.go | 127 ++--- src/cmd/compile/internal/gc/dump.go | 3 +- src/cmd/compile/internal/gc/dwinl.go | 65 +-- src/cmd/compile/internal/gc/embed.go | 43 +- src/cmd/compile/internal/gc/esc.go | 53 +- src/cmd/compile/internal/gc/escape.go | 99 ++-- src/cmd/compile/internal/gc/export.go | 31 +- src/cmd/compile/internal/gc/fmt.go | 15 +- src/cmd/compile/internal/gc/gen.go | 9 +- src/cmd/compile/internal/gc/go.go | 7 +- src/cmd/compile/internal/gc/gsubr.go | 33 +- src/cmd/compile/internal/gc/iexport.go | 57 ++- src/cmd/compile/internal/gc/iimport.go | 75 +-- src/cmd/compile/internal/gc/init.go | 5 +- src/cmd/compile/internal/gc/initorder.go | 22 +- src/cmd/compile/internal/gc/inl.go | 121 ++--- src/cmd/compile/internal/gc/lex.go | 3 +- src/cmd/compile/internal/gc/main.go | 261 +++++----- src/cmd/compile/internal/gc/noder.go | 115 ++--- src/cmd/compile/internal/gc/obj.go | 109 ++-- src/cmd/compile/internal/gc/order.go | 45 +- src/cmd/compile/internal/gc/pgen.go | 69 +-- src/cmd/compile/internal/gc/plive.go | 41 +- src/cmd/compile/internal/gc/racewalk.go | 15 +- src/cmd/compile/internal/gc/range.go | 25 +- src/cmd/compile/internal/gc/reflect.go | 119 ++--- src/cmd/compile/internal/gc/scope.go | 3 +- src/cmd/compile/internal/gc/select.go | 43 +- src/cmd/compile/internal/gc/sinit.go | 47 +- src/cmd/compile/internal/gc/ssa.go | 143 +++--- src/cmd/compile/internal/gc/subr.go | 97 ++-- src/cmd/compile/internal/gc/swt.go | 51 +- src/cmd/compile/internal/gc/syntax.go | 33 +- src/cmd/compile/internal/gc/trace.go | 8 +- src/cmd/compile/internal/gc/typecheck.go | 477 +++++++++--------- src/cmd/compile/internal/gc/universe.go | 5 +- src/cmd/compile/internal/gc/unsafe.go | 14 +- src/cmd/compile/internal/gc/util.go | 59 +-- src/cmd/compile/internal/gc/walk.go | 179 +++---- src/cmd/compile/internal/mips/ggen.go | 5 +- src/cmd/compile/internal/mips/ssa.go | 7 +- src/cmd/compile/internal/mips64/ssa.go | 7 +- src/cmd/compile/internal/ppc64/ggen.go | 9 +- src/cmd/compile/internal/ppc64/ssa.go | 9 +- src/cmd/compile/internal/riscv64/ggen.go | 3 +- src/cmd/compile/internal/riscv64/ssa.go | 15 +- src/cmd/compile/internal/s390x/ggen.go | 3 +- src/cmd/compile/internal/s390x/ssa.go | 7 +- src/cmd/compile/internal/wasm/ssa.go | 7 +- src/cmd/compile/internal/x86/galign.go | 5 +- src/cmd/compile/internal/x86/ssa.go | 13 +- src/cmd/compile/main.go | 3 +- src/cmd/dist/buildtool.go | 2 + 67 files changed, 1626 insertions(+), 1542 deletions(-) create mode 100644 src/cmd/compile/internal/base/base.go rename src/cmd/compile/internal/{gc => base}/debug.go (99%) rename src/cmd/compile/internal/{gc => base}/flag.go (99%) rename src/cmd/compile/internal/{gc => base}/print.go (89%) diff --git a/src/cmd/compile/internal/amd64/ggen.go b/src/cmd/compile/internal/amd64/ggen.go index 0c1456f4d0ded..ec98b8cca126c 100644 --- a/src/cmd/compile/internal/amd64/ggen.go +++ b/src/cmd/compile/internal/amd64/ggen.go @@ -5,6 +5,7 @@ package amd64 import ( + "cmd/compile/internal/base" "cmd/compile/internal/gc" "cmd/internal/obj" "cmd/internal/obj/x86" @@ -64,7 +65,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Pr if cnt%int64(gc.Widthreg) != 0 { // should only happen with nacl if cnt%int64(gc.Widthptr) != 0 { - gc.Fatalf("zerorange count not a multiple of widthptr %d", cnt) + base.Fatalf("zerorange count not a multiple of widthptr %d", cnt) } if *state&ax == 0 { p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0) diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 1f2d626721ed5..5e3b962076b0d 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -8,6 +8,7 @@ import ( "fmt" "math" + "cmd/compile/internal/base" "cmd/compile/internal/gc" "cmd/compile/internal/logopt" "cmd/compile/internal/ssa" @@ -975,7 +976,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { r := v.Reg() // See the comments in cmd/internal/obj/x86/obj6.go // near CanUse1InsnTLS for a detailed explanation of these instructions. - if x86.CanUse1InsnTLS(gc.Ctxt) { + if x86.CanUse1InsnTLS(base.Ctxt) { // MOVQ (TLS), r p := s.Prog(x86.AMOVQ) p.From.Type = obj.TYPE_MEM @@ -1017,7 +1018,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { } p := s.Prog(mov) p.From.Type = obj.TYPE_ADDR - p.From.Offset = -gc.Ctxt.FixedFrameSize() // 0 on amd64, just to be consistent with other architectures + p.From.Offset = -base.Ctxt.FixedFrameSize() // 0 on amd64, just to be consistent with other architectures p.From.Name = obj.NAME_PARAM p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() @@ -1164,8 +1165,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { if logopt.Enabled() { logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) } - if gc.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers - gc.Warnl(v.Pos, "generated nil check") + if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers + base.WarnfAt(v.Pos, "generated nil check") } case ssa.OpAMD64MOVBatomicload, ssa.OpAMD64MOVLatomicload, ssa.OpAMD64MOVQatomicload: p := s.Prog(v.Op.Asm()) diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go index 82a5172ec7f96..7d34cc517005f 100644 --- a/src/cmd/compile/internal/arm/ssa.go +++ b/src/cmd/compile/internal/arm/ssa.go @@ -9,6 +9,7 @@ import ( "math" "math/bits" + "cmd/compile/internal/base" "cmd/compile/internal/gc" "cmd/compile/internal/logopt" "cmd/compile/internal/ssa" @@ -741,8 +742,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { if logopt.Enabled() { logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) } - if gc.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers - gc.Warnl(v.Pos, "generated nil check") + if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers + base.WarnfAt(v.Pos, "generated nil check") } case ssa.OpARMLoweredZero: // MOVW.P Rarg2, 4(R1) @@ -849,7 +850,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { // caller's SP is FixedFrameSize below the address of the first arg p := s.Prog(arm.AMOVW) p.From.Type = obj.TYPE_ADDR - p.From.Offset = -gc.Ctxt.FixedFrameSize() + p.From.Offset = -base.Ctxt.FixedFrameSize() p.From.Name = obj.NAME_PARAM p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go index dcbd8f947400c..5e6f607708df2 100644 --- a/src/cmd/compile/internal/arm64/ssa.go +++ b/src/cmd/compile/internal/arm64/ssa.go @@ -7,6 +7,7 @@ package arm64 import ( "math" + "cmd/compile/internal/base" "cmd/compile/internal/gc" "cmd/compile/internal/logopt" "cmd/compile/internal/ssa" @@ -1038,8 +1039,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { if logopt.Enabled() { logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) } - if gc.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Line==1 in generated wrappers - gc.Warnl(v.Pos, "generated nil check") + if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Line==1 in generated wrappers + base.WarnfAt(v.Pos, "generated nil check") } case ssa.OpARM64Equal, ssa.OpARM64NotEqual, @@ -1068,7 +1069,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { // caller's SP is FixedFrameSize below the address of the first arg p := s.Prog(arm64.AMOVD) p.From.Type = obj.TYPE_ADDR - p.From.Offset = -gc.Ctxt.FixedFrameSize() + p.From.Offset = -base.Ctxt.FixedFrameSize() p.From.Name = obj.NAME_PARAM p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() diff --git a/src/cmd/compile/internal/base/base.go b/src/cmd/compile/internal/base/base.go new file mode 100644 index 0000000000000..e26b378472da9 --- /dev/null +++ b/src/cmd/compile/internal/base/base.go @@ -0,0 +1,28 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package base + +import ( + "os" + + "cmd/internal/obj" +) + +var Ctxt *obj.Link + +var atExitFuncs []func() + +func AtExit(f func()) { + atExitFuncs = append(atExitFuncs, f) +} + +func Exit(code int) { + for i := len(atExitFuncs) - 1; i >= 0; i-- { + f := atExitFuncs[i] + atExitFuncs = atExitFuncs[:i] + f() + } + os.Exit(code) +} diff --git a/src/cmd/compile/internal/gc/debug.go b/src/cmd/compile/internal/base/debug.go similarity index 99% rename from src/cmd/compile/internal/gc/debug.go rename to src/cmd/compile/internal/base/debug.go index 98e6631e5b108..45a552a4d95e3 100644 --- a/src/cmd/compile/internal/gc/debug.go +++ b/src/cmd/compile/internal/base/debug.go @@ -4,7 +4,7 @@ // Debug arguments, set by -d flag. -package gc +package base import ( "fmt" diff --git a/src/cmd/compile/internal/gc/flag.go b/src/cmd/compile/internal/base/flag.go similarity index 99% rename from src/cmd/compile/internal/gc/flag.go rename to src/cmd/compile/internal/base/flag.go index 29aac3aa28027..aadc70f49645f 100644 --- a/src/cmd/compile/internal/gc/flag.go +++ b/src/cmd/compile/internal/base/flag.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package base import ( "encoding/json" @@ -13,7 +13,6 @@ import ( "os" "reflect" "runtime" - "strings" "cmd/internal/objabi" diff --git a/src/cmd/compile/internal/gc/print.go b/src/cmd/compile/internal/base/print.go similarity index 89% rename from src/cmd/compile/internal/gc/print.go rename to src/cmd/compile/internal/base/print.go index 345f433fe4024..6831b3ada314f 100644 --- a/src/cmd/compile/internal/gc/print.go +++ b/src/cmd/compile/internal/base/print.go @@ -2,16 +2,17 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package base import ( - "cmd/internal/objabi" - "cmd/internal/src" "fmt" "os" "runtime/debug" "sort" "strings" + + "cmd/internal/objabi" + "cmd/internal/src" ) // An errorMsg is a queued error message, waiting to be printed. @@ -22,7 +23,7 @@ type errorMsg struct { // Pos is the current source position being processed, // printed by Errorf, ErrorfLang, Fatalf, and Warnf. -var lineno src.XPos +var Pos src.XPos var ( errorMsgs []errorMsg @@ -46,7 +47,7 @@ func addErrorMsg(pos src.XPos, format string, args ...interface{}) { // Only add the position if know the position. // See issue golang.org/issue/11361. if pos.IsKnown() { - msg = fmt.Sprintf("%v: %s", linestr(pos), msg) + msg = fmt.Sprintf("%v: %s", FmtPos(pos), msg) } errorMsgs = append(errorMsgs, errorMsg{ pos: pos, @@ -55,7 +56,7 @@ func addErrorMsg(pos src.XPos, format string, args ...interface{}) { } // FmtPos formats pos as a file:line string. -func linestr(pos src.XPos) string { +func FmtPos(pos src.XPos) string { if Ctxt == nil { return "???" } @@ -71,7 +72,7 @@ func (x byPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] } // FlushErrors sorts errors seen so far by line number, prints them to stdout, // and empties the errors array. -func flusherrors() { +func FlushErrors() { Ctxt.Bso.Flush() if len(errorMsgs) == 0 { return @@ -101,12 +102,12 @@ func sameline(a, b src.XPos) bool { } // Errorf reports a formatted error at the current line. -func yyerror(format string, args ...interface{}) { - yyerrorl(lineno, format, args...) +func Errorf(format string, args ...interface{}) { + ErrorfAt(Pos, format, args...) } // ErrorfAt reports a formatted error message at pos. -func yyerrorl(pos src.XPos, format string, args ...interface{}) { +func ErrorfAt(pos src.XPos, format string, args ...interface{}) { msg := fmt.Sprintf(format, args...) if strings.HasPrefix(msg, "syntax error") { @@ -134,15 +135,15 @@ func yyerrorl(pos src.XPos, format string, args ...interface{}) { hcrash() if numErrors >= 10 && Flag.LowerE == 0 { - flusherrors() - fmt.Printf("%v: too many errors\n", linestr(pos)) - errorexit() + FlushErrors() + fmt.Printf("%v: too many errors\n", FmtPos(pos)) + ErrorExit() } } // ErrorfVers reports that a language feature (format, args) requires a later version of Go. -func yyerrorv(lang string, format string, args ...interface{}) { - yyerror("%s requires %s or later (-lang was set to %s; check go.mod)", fmt.Sprintf(format, args...), lang, Flag.Lang) +func ErrorfVers(lang string, format string, args ...interface{}) { + Errorf("%s requires %s or later (-lang was set to %s; check go.mod)", fmt.Sprintf(format, args...), lang, Flag.Lang) } // UpdateErrorDot is a clumsy hack that rewrites the last error, @@ -163,17 +164,17 @@ func UpdateErrorDot(line string, name, expr string) { // so this should be used only when the user has opted in // to additional output by setting a particular flag. func Warn(format string, args ...interface{}) { - Warnl(lineno, format, args...) + WarnfAt(Pos, format, args...) } // WarnfAt reports a formatted warning at pos. // In general the Go compiler does NOT generate warnings, // so this should be used only when the user has opted in // to additional output by setting a particular flag. -func Warnl(pos src.XPos, format string, args ...interface{}) { +func WarnfAt(pos src.XPos, format string, args ...interface{}) { addErrorMsg(pos, format, args...) if Flag.LowerM != 0 { - flusherrors() + FlushErrors() } } @@ -190,7 +191,7 @@ func Warnl(pos src.XPos, format string, args ...interface{}) { // // If -h has been specified, Fatalf panics to force the usual runtime info dump. func Fatalf(format string, args ...interface{}) { - FatalfAt(lineno, format, args...) + FatalfAt(Pos, format, args...) } // FatalfAt reports a fatal error - an internal problem - at pos and exits. @@ -206,10 +207,10 @@ func Fatalf(format string, args ...interface{}) { // // If -h has been specified, FatalfAt panics to force the usual runtime info dump. func FatalfAt(pos src.XPos, format string, args ...interface{}) { - flusherrors() + FlushErrors() if Debug.Panic != 0 || numErrors == 0 { - fmt.Printf("%v: internal compiler error: ", linestr(pos)) + fmt.Printf("%v: internal compiler error: ", FmtPos(pos)) fmt.Printf(format, args...) fmt.Printf("\n") @@ -227,13 +228,13 @@ func FatalfAt(pos src.XPos, format string, args ...interface{}) { } hcrash() - errorexit() + ErrorExit() } // hcrash crashes the compiler when -h is set, to find out where a message is generated. func hcrash() { if Flag.LowerH != 0 { - flusherrors() + FlushErrors() if Flag.LowerO != "" { os.Remove(Flag.LowerO) } @@ -243,8 +244,8 @@ func hcrash() { // ErrorExit handles an error-status exit. // It flushes any pending errors, removes the output file, and exits. -func errorexit() { - flusherrors() +func ErrorExit() { + FlushErrors() if Flag.LowerO != "" { os.Remove(Flag.LowerO) } @@ -254,6 +255,6 @@ func errorexit() { // ExitIfErrors calls ErrorExit if any errors have been reported. func ExitIfErrors() { if Errors() > 0 { - errorexit() + ErrorExit() } } diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index 87b905ed59e0f..517aaa4b814cf 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -5,6 +5,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/compile/internal/types" "cmd/internal/obj" "fmt" @@ -203,7 +204,7 @@ func algtype1(t *types.Type) (AlgKind, *types.Type) { return ret, nil } - Fatalf("algtype1: unexpected type %v", t) + base.Fatalf("algtype1: unexpected type %v", t) return 0, nil } @@ -214,7 +215,7 @@ func genhash(t *types.Type) *obj.LSym { switch algtype(t) { default: // genhash is only called for types that have equality - Fatalf("genhash %v", t) + base.Fatalf("genhash %v", t) case AMEM0: return sysClosure("memhash0") case AMEM8: @@ -282,11 +283,11 @@ func genhash(t *types.Type) *obj.LSym { } sym := typesymprefix(".hash", t) - if Flag.LowerR != 0 { + if base.Flag.LowerR != 0 { fmt.Printf("genhash %v %v %v\n", closure, sym, t) } - lineno = autogeneratedPos // less confusing than end of input + base.Pos = autogeneratedPos // less confusing than end of input dclcontext = PEXTERN // func sym(p *T, h uintptr) uintptr @@ -374,7 +375,7 @@ func genhash(t *types.Type) *obj.LSym { r.List.Append(nh) fn.Nbody.Append(r) - if Flag.LowerR != 0 { + if base.Flag.LowerR != 0 { dumplist("genhash body", fn.Nbody) } @@ -387,7 +388,7 @@ func genhash(t *types.Type) *obj.LSym { typecheckslice(fn.Nbody.Slice(), ctxStmt) Curfn = nil - if Debug.DclStack != 0 { + if base.Debug.DclStack != 0 { testdclstack() } @@ -407,7 +408,7 @@ func hashfor(t *types.Type) *Node { switch a, _ := algtype1(t); a { case AMEM: - Fatalf("hashfor with AMEM type") + base.Fatalf("hashfor with AMEM type") case AINTER: sym = Runtimepkg.Lookup("interhash") case ANILINTER: @@ -509,13 +510,13 @@ func geneq(t *types.Type) *obj.LSym { return closure } sym := typesymprefix(".eq", t) - if Flag.LowerR != 0 { + if base.Flag.LowerR != 0 { fmt.Printf("geneq %v\n", t) } // Autogenerate code for equality of structs and arrays. - lineno = autogeneratedPos // less confusing than end of input + base.Pos = autogeneratedPos // less confusing than end of input dclcontext = PEXTERN // func sym(p, q *T) bool @@ -539,7 +540,7 @@ func geneq(t *types.Type) *obj.LSym { // so t must be either an array or a struct. switch t.Etype { default: - Fatalf("geneq %v", t) + base.Fatalf("geneq %v", t) case TARRAY: nelem := t.NumElem() @@ -753,7 +754,7 @@ func geneq(t *types.Type) *obj.LSym { // We should really do a generic CL that shares epilogues across // the board. See #24936. - if Flag.LowerR != 0 { + if base.Flag.LowerR != 0 { dumplist("geneq body", fn.Nbody) } @@ -766,7 +767,7 @@ func geneq(t *types.Type) *obj.LSym { typecheckslice(fn.Nbody.Slice(), ctxStmt) Curfn = nil - if Debug.DclStack != 0 { + if base.Debug.DclStack != 0 { testdclstack() } @@ -859,7 +860,7 @@ func eqstring(s, t *Node) (eqlen, eqmem *Node) { // eqtab must be evaluated before eqdata, and shortcircuiting is required. func eqinterface(s, t *Node) (eqtab, eqdata *Node) { if !types.Identical(s.Type, t.Type) { - Fatalf("eqinterface %v %v", s.Type, t.Type) + base.Fatalf("eqinterface %v %v", s.Type, t.Type) } // func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool) // func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool) @@ -949,7 +950,7 @@ func memrun(t *types.Type, start int) (size int64, next int) { // by padding. func ispaddedfield(t *types.Type, i int) bool { if !t.IsStruct() { - Fatalf("ispaddedfield called non-struct %v", t) + base.Fatalf("ispaddedfield called non-struct %v", t) } end := t.Width if i+1 < t.NumFields() { diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go index 563bd5030c07b..a8cbbfd322ccf 100644 --- a/src/cmd/compile/internal/gc/align.go +++ b/src/cmd/compile/internal/gc/align.go @@ -6,6 +6,7 @@ package gc import ( "bytes" + "cmd/compile/internal/base" "cmd/compile/internal/types" "fmt" "sort" @@ -21,7 +22,7 @@ var defercalc int func Rnd(o int64, r int64) int64 { if r < 1 || r > 8 || r&(r-1) != 0 { - Fatalf("rnd %d", r) + base.Fatalf("rnd %d", r) } return (o + r - 1) &^ (r - 1) } @@ -39,7 +40,7 @@ func expandiface(t *types.Type) { case langSupported(1, 14, t.Pkg()) && !explicit && types.Identical(m.Type, prev.Type): return default: - yyerrorl(m.Pos, "duplicate method %s", m.Sym.Name) + base.ErrorfAt(m.Pos, "duplicate method %s", m.Sym.Name) } methods = append(methods, m) } @@ -59,7 +60,7 @@ func expandiface(t *types.Type) { } if !m.Type.IsInterface() { - yyerrorl(m.Pos, "interface contains embedded non-interface %v", m.Type) + base.ErrorfAt(m.Pos, "interface contains embedded non-interface %v", m.Type) m.SetBroke(true) t.SetBroke(true) // Add to fields so that error messages @@ -83,7 +84,7 @@ func expandiface(t *types.Type) { sort.Sort(methcmp(methods)) if int64(len(methods)) >= thearch.MAXWIDTH/int64(Widthptr) { - yyerrorl(typePos(t), "interface too large") + base.ErrorfAt(typePos(t), "interface too large") } for i, m := range methods { m.Offset = int64(i) * int64(Widthptr) @@ -134,7 +135,7 @@ func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 { w := f.Type.Width if w < 0 { - Fatalf("invalid width %d", f.Type.Width) + base.Fatalf("invalid width %d", f.Type.Width) } if w == 0 { lastzero = o @@ -147,7 +148,7 @@ func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 { maxwidth = 1<<31 - 1 } if o >= maxwidth { - yyerrorl(typePos(errtype), "type %L too large", errtype) + base.ErrorfAt(typePos(errtype), "type %L too large", errtype) o = 8 // small but nonzero } } @@ -235,7 +236,7 @@ func reportTypeLoop(t *types.Type) { var l []*types.Type if !findTypeLoop(t, &l) { - Fatalf("failed to find type loop for: %v", t) + base.Fatalf("failed to find type loop for: %v", t) } // Rotate loop so that the earliest type declaration is first. @@ -250,11 +251,11 @@ func reportTypeLoop(t *types.Type) { var msg bytes.Buffer fmt.Fprintf(&msg, "invalid recursive type %v\n", l[0]) for _, t := range l { - fmt.Fprintf(&msg, "\t%v: %v refers to\n", linestr(typePos(t)), t) + fmt.Fprintf(&msg, "\t%v: %v refers to\n", base.FmtPos(typePos(t)), t) t.SetBroke(true) } - fmt.Fprintf(&msg, "\t%v: %v", linestr(typePos(l[0])), l[0]) - yyerrorl(typePos(l[0]), msg.String()) + fmt.Fprintf(&msg, "\t%v: %v", base.FmtPos(typePos(l[0])), l[0]) + base.ErrorfAt(typePos(l[0]), msg.String()) } // dowidth calculates and stores the size and alignment for t. @@ -268,7 +269,7 @@ func dowidth(t *types.Type) { return } if Widthptr == 0 { - Fatalf("dowidth without betypeinit") + base.Fatalf("dowidth without betypeinit") } if t == nil { @@ -292,7 +293,7 @@ func dowidth(t *types.Type) { return } t.SetBroke(true) - Fatalf("width not calculated: %v", t) + base.Fatalf("width not calculated: %v", t) } // break infinite recursion if the broken recursive type @@ -304,9 +305,9 @@ func dowidth(t *types.Type) { // defer checkwidth calls until after we're done defercheckwidth() - lno := lineno + lno := base.Pos if asNode(t.Nod) != nil { - lineno = asNode(t.Nod).Pos + base.Pos = asNode(t.Nod).Pos } t.Width = -2 @@ -327,7 +328,7 @@ func dowidth(t *types.Type) { var w int64 switch et { default: - Fatalf("dowidth: unknown type: %v", t) + base.Fatalf("dowidth: unknown type: %v", t) // compiler-specific stuff case TINT8, TUINT8, TBOOL: @@ -378,7 +379,7 @@ func dowidth(t *types.Type) { t1 := t.ChanArgs() dowidth(t1) // just in case if t1.Elem().Width >= 1<<16 { - yyerrorl(typePos(t1), "channel element type too large (>64kB)") + base.ErrorfAt(typePos(t1), "channel element type too large (>64kB)") } w = 1 // anything will do @@ -393,11 +394,11 @@ func dowidth(t *types.Type) { case TANY: // not a real type; should be replaced before use. - Fatalf("dowidth any") + base.Fatalf("dowidth any") case TSTRING: if sizeofString == 0 { - Fatalf("early dowidth string") + base.Fatalf("early dowidth string") } w = sizeofString t.Align = uint8(Widthptr) @@ -411,7 +412,7 @@ func dowidth(t *types.Type) { if t.Elem().Width != 0 { cap := (uint64(thearch.MAXWIDTH) - 1) / uint64(t.Elem().Width) if uint64(t.NumElem()) > cap { - yyerrorl(typePos(t), "type %L larger than address space", t) + base.ErrorfAt(typePos(t), "type %L larger than address space", t) } } w = t.NumElem() * t.Elem().Width @@ -427,7 +428,7 @@ func dowidth(t *types.Type) { case TSTRUCT: if t.IsFuncArgStruct() { - Fatalf("dowidth fn struct %v", t) + base.Fatalf("dowidth fn struct %v", t) } w = widstruct(t, t, 0, 1) @@ -447,24 +448,24 @@ func dowidth(t *types.Type) { w = widstruct(t1, t1.Results(), w, Widthreg) t1.Extra.(*types.Func).Argwid = w if w%int64(Widthreg) != 0 { - Warn("bad type %v %d\n", t1, w) + base.Warn("bad type %v %d\n", t1, w) } t.Align = 1 } if Widthptr == 4 && w != int64(int32(w)) { - yyerrorl(typePos(t), "type %v too large", t) + base.ErrorfAt(typePos(t), "type %v too large", t) } t.Width = w if t.Align == 0 { if w == 0 || w > 8 || w&(w-1) != 0 { - Fatalf("invalid alignment for %v", t) + base.Fatalf("invalid alignment for %v", t) } t.Align = uint8(w) } - lineno = lno + base.Pos = lno resumecheckwidth() } @@ -495,7 +496,7 @@ func checkwidth(t *types.Type) { // function arg structs should not be checked // outside of the enclosing function. if t.IsFuncArgStruct() { - Fatalf("checkwidth %v", t) + base.Fatalf("checkwidth %v", t) } if defercalc == 0 { diff --git a/src/cmd/compile/internal/gc/bootstrap.go b/src/cmd/compile/internal/gc/bootstrap.go index 967f75a9ac3a7..2e13d6b57acd4 100644 --- a/src/cmd/compile/internal/gc/bootstrap.go +++ b/src/cmd/compile/internal/gc/bootstrap.go @@ -6,8 +6,11 @@ package gc -import "runtime" +import ( + "cmd/compile/internal/base" + "runtime" +) func startMutexProfiling() { - Fatalf("mutex profiling unavailable in version %v", runtime.Version()) + base.Fatalf("mutex profiling unavailable in version %v", runtime.Version()) } diff --git a/src/cmd/compile/internal/gc/bv.go b/src/cmd/compile/internal/gc/bv.go index e32ab97ad52d2..d82851e7cb494 100644 --- a/src/cmd/compile/internal/gc/bv.go +++ b/src/cmd/compile/internal/gc/bv.go @@ -6,6 +6,8 @@ package gc import ( "math/bits" + + "cmd/compile/internal/base" ) const ( @@ -35,7 +37,7 @@ func bvbulkalloc(nbit int32, count int32) bulkBvec { nword := (nbit + wordBits - 1) / wordBits size := int64(nword) * int64(count) if int64(int32(size*4)) != size*4 { - Fatalf("bvbulkalloc too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size) + base.Fatalf("bvbulkalloc too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size) } return bulkBvec{ words: make([]uint32, size), @@ -52,7 +54,7 @@ func (b *bulkBvec) next() bvec { func (bv1 bvec) Eq(bv2 bvec) bool { if bv1.n != bv2.n { - Fatalf("bvequal: lengths %d and %d are not equal", bv1.n, bv2.n) + base.Fatalf("bvequal: lengths %d and %d are not equal", bv1.n, bv2.n) } for i, x := range bv1.b { if x != bv2.b[i] { @@ -68,7 +70,7 @@ func (dst bvec) Copy(src bvec) { func (bv bvec) Get(i int32) bool { if i < 0 || i >= bv.n { - Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.n) + base.Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.n) } mask := uint32(1 << uint(i%wordBits)) return bv.b[i>>wordShift]&mask != 0 @@ -76,7 +78,7 @@ func (bv bvec) Get(i int32) bool { func (bv bvec) Set(i int32) { if i < 0 || i >= bv.n { - Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.n) + base.Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.n) } mask := uint32(1 << uint(i%wordBits)) bv.b[i/wordBits] |= mask @@ -84,7 +86,7 @@ func (bv bvec) Set(i int32) { func (bv bvec) Unset(i int32) { if i < 0 || i >= bv.n { - Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.n) + base.Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.n) } mask := uint32(1 << uint(i%wordBits)) bv.b[i/wordBits] &^= mask diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index c25a4469997e4..ad255c9c06a6c 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -5,6 +5,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/compile/internal/syntax" "cmd/compile/internal/types" "cmd/internal/src" @@ -101,7 +102,7 @@ func typecheckclosure(clo *Node, top int) { if !n.Name.Captured() { n.Name.SetCaptured(true) if n.Name.Decldepth == 0 { - Fatalf("typecheckclosure: var %S does not have decldepth assigned", n) + base.Fatalf("typecheckclosure: var %S does not have decldepth assigned", n) } // Ignore assignments to the variable in straightline code @@ -171,8 +172,8 @@ var capturevarscomplete bool // We use value capturing for values <= 128 bytes that are never reassigned // after capturing (effectively constant). func capturevars(dcl *Node) { - lno := lineno - lineno = dcl.Pos + lno := base.Pos + base.Pos = dcl.Pos fn := dcl.Func cvars := fn.ClosureVars.Slice() out := cvars[:0] @@ -203,7 +204,7 @@ func capturevars(dcl *Node) { outer = nod(OADDR, outer, nil) } - if Flag.LowerM > 1 { + if base.Flag.LowerM > 1 { var name *types.Sym if v.Name.Curfn != nil && v.Name.Curfn.Func.Nname != nil { name = v.Name.Curfn.Func.Nname.Sym @@ -212,7 +213,7 @@ func capturevars(dcl *Node) { if v.Name.Byval() { how = "value" } - Warnl(v.Pos, "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym, outermost.Name.Addrtaken(), outermost.Name.Assigned(), int32(v.Type.Width)) + base.WarnfAt(v.Pos, "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym, outermost.Name.Addrtaken(), outermost.Name.Assigned(), int32(v.Type.Width)) } outer = typecheck(outer, ctxExpr) @@ -220,14 +221,14 @@ func capturevars(dcl *Node) { } fn.ClosureVars.Set(out) - lineno = lno + base.Pos = lno } // transformclosure is called in a separate phase after escape analysis. // It transform closure bodies to properly reference captured variables. func transformclosure(dcl *Node) { - lno := lineno - lineno = dcl.Pos + lno := base.Pos + base.Pos = dcl.Pos fn := dcl.Func if fn.ClosureCalled { @@ -325,7 +326,7 @@ func transformclosure(dcl *Node) { } } - lineno = lno + base.Pos = lno } // hasemptycvars reports whether closure clo has an @@ -337,15 +338,15 @@ func hasemptycvars(clo *Node) bool { // closuredebugruntimecheck applies boilerplate checks for debug flags // and compiling runtime func closuredebugruntimecheck(clo *Node) { - if Debug.Closure > 0 { + if base.Debug.Closure > 0 { if clo.Esc == EscHeap { - Warnl(clo.Pos, "heap closure, captured vars = %v", clo.Func.ClosureVars) + base.WarnfAt(clo.Pos, "heap closure, captured vars = %v", clo.Func.ClosureVars) } else { - Warnl(clo.Pos, "stack closure, captured vars = %v", clo.Func.ClosureVars) + base.WarnfAt(clo.Pos, "stack closure, captured vars = %v", clo.Func.ClosureVars) } } - if Flag.CompilingRuntime && clo.Esc == EscHeap { - yyerrorl(clo.Pos, "heap-allocated closure, not allowed in runtime") + if base.Flag.CompilingRuntime && clo.Esc == EscHeap { + base.ErrorfAt(clo.Pos, "heap-allocated closure, not allowed in runtime") } } @@ -386,8 +387,8 @@ func walkclosure(clo *Node, init *Nodes) *Node { // If no closure vars, don't bother wrapping. if hasemptycvars(clo) { - if Debug.Closure > 0 { - Warnl(clo.Pos, "closure converted to global") + if base.Debug.Closure > 0 { + base.WarnfAt(clo.Pos, "closure converted to global") } return fn.Nname } @@ -423,7 +424,7 @@ func typecheckpartialcall(dot *Node, sym *types.Sym) { break default: - Fatalf("invalid typecheckpartialcall") + base.Fatalf("invalid typecheckpartialcall") } // Create top-level function. @@ -448,13 +449,13 @@ func makepartialcall(dot *Node, t0 *types.Type, meth *types.Sym) *Node { sym.SetUniq(true) savecurfn := Curfn - saveLineNo := lineno + saveLineNo := base.Pos Curfn = nil // Set line number equal to the line number where the method is declared. var m *types.Field if lookdot0(meth, rcvrtype, &m, false) == 1 && m.Pos.IsKnown() { - lineno = m.Pos + base.Pos = m.Pos } // Note: !m.Pos.IsKnown() happens for method expressions where // the method is implicitly declared. The Error method of the @@ -512,7 +513,7 @@ func makepartialcall(dot *Node, t0 *types.Type, meth *types.Sym) *Node { sym.Def = asTypesNode(dcl) xtop = append(xtop, dcl) Curfn = savecurfn - lineno = saveLineNo + base.Pos = saveLineNo return dcl } @@ -579,14 +580,14 @@ func walkpartialcall(n *Node, init *Nodes) *Node { // referenced by method value n. func callpartMethod(n *Node) *types.Field { if n.Op != OCALLPART { - Fatalf("expected OCALLPART, got %v", n) + base.Fatalf("expected OCALLPART, got %v", n) } // TODO(mdempsky): Optimize this. If necessary, // makepartialcall could save m for us somewhere. var m *types.Field if lookdot0(n.Right.Sym, n.Left.Type, &m, false) != 1 { - Fatalf("failed to find field for OCALLPART") + base.Fatalf("failed to find field for OCALLPART") } return m diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index e72962124a383..98473b4cfb015 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -5,6 +5,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/compile/internal/types" "cmd/internal/src" "fmt" @@ -28,7 +29,7 @@ const ( func (n *Node) ValueInterface() interface{} { switch v := n.Val(); v.Kind() { default: - Fatalf("unexpected constant: %v", v) + base.Fatalf("unexpected constant: %v", v) panic("unreachable") case constant.Bool: return constant.BoolVal(v) @@ -55,7 +56,7 @@ func int64Val(t *types.Type, v constant.Value) int64 { return x } } - Fatalf("%v out of range for %v", v, t) + base.Fatalf("%v out of range for %v", v, t) panic("unreachable") } @@ -63,7 +64,7 @@ func float64Val(v constant.Value) float64 { if x, _ := constant.Float64Val(v); !math.IsInf(x, 0) { return x + 0 // avoid -0 (should not be needed, but be conservative) } - Fatalf("bad float64 value: %v", v) + base.Fatalf("bad float64 value: %v", v) panic("unreachable") } @@ -80,7 +81,7 @@ func bigFloatVal(v constant.Value) *big.Float { case *big.Rat: f.SetRat(u) default: - Fatalf("unexpected: %v", u) + base.Fatalf("unexpected: %v", u) } return f } @@ -89,11 +90,11 @@ func bigFloatVal(v constant.Value) *big.Float { // n must be an integer or rune constant. func (n *Node) Int64Val() int64 { if !Isconst(n, constant.Int) { - Fatalf("Int64Val(%v)", n) + base.Fatalf("Int64Val(%v)", n) } x, ok := constant.Int64Val(n.Val()) if !ok { - Fatalf("Int64Val(%v)", n) + base.Fatalf("Int64Val(%v)", n) } return x } @@ -114,11 +115,11 @@ func (n *Node) CanInt64() bool { // n must be an integer or rune constant. func (n *Node) Uint64Val() uint64 { if !Isconst(n, constant.Int) { - Fatalf("Uint64Val(%v)", n) + base.Fatalf("Uint64Val(%v)", n) } x, ok := constant.Uint64Val(n.Val()) if !ok { - Fatalf("Uint64Val(%v)", n) + base.Fatalf("Uint64Val(%v)", n) } return x } @@ -127,7 +128,7 @@ func (n *Node) Uint64Val() uint64 { // n must be a boolean constant. func (n *Node) BoolVal() bool { if !Isconst(n, constant.Bool) { - Fatalf("BoolVal(%v)", n) + base.Fatalf("BoolVal(%v)", n) } return constant.BoolVal(n.Val()) } @@ -136,7 +137,7 @@ func (n *Node) BoolVal() bool { // n must be a string constant. func (n *Node) StringVal() string { if !Isconst(n, constant.String) { - Fatalf("StringVal(%v)", n) + base.Fatalf("StringVal(%v)", n) } return constant.StringVal(n.Val()) } @@ -150,7 +151,7 @@ func roundFloat(v constant.Value, sz int64) constant.Value { f, _ := constant.Float64Val(v) return makeFloat64(f) } - Fatalf("unexpected size: %v", sz) + base.Fatalf("unexpected size: %v", sz) panic("unreachable") } @@ -169,7 +170,7 @@ func truncfltlit(v constant.Value, t *types.Type) constant.Value { // truncate Real and Imag parts of Mpcplx to 32-bit or 64-bit // precision, according to type; return truncated value. In case of -// overflow, calls yyerror but does not truncate the input value. +// overflow, calls Errorf but does not truncate the input value. func trunccmplxlit(v constant.Value, t *types.Type) constant.Value { if t.IsUntyped() || overflow(v, t) { // If there was overflow, simply continuing would set the @@ -199,10 +200,10 @@ func defaultlit(n *Node, t *types.Type) *Node { return convlit1(n, t, false, nil // message. func convlit1(n *Node, t *types.Type, explicit bool, context func() string) *Node { if explicit && t == nil { - Fatalf("explicit conversion missing type") + base.Fatalf("explicit conversion missing type") } if t != nil && t.IsUntyped() { - Fatalf("bad conversion to untyped: %v", t) + base.Fatalf("bad conversion to untyped: %v", t) } if n == nil || n.Type == nil { @@ -223,10 +224,10 @@ func convlit1(n *Node, t *types.Type, explicit bool, context func() string) *Nod // Nil is technically not a constant, so handle it specially. if n.Type.Etype == TNIL { if n.Op != ONIL { - Fatalf("unexpected op: %v (%v)", n, n.Op) + base.Fatalf("unexpected op: %v (%v)", n, n.Op) } if t == nil { - yyerror("use of untyped nil") + base.Errorf("use of untyped nil") n.SetDiag(true) n.Type = nil return n @@ -247,7 +248,7 @@ func convlit1(n *Node, t *types.Type, explicit bool, context func() string) *Nod switch n.Op { default: - Fatalf("unexpected untyped expression: %v", n) + base.Fatalf("unexpected untyped expression: %v", n) case OLITERAL: v := convertVal(n.Val(), t, explicit) @@ -287,7 +288,7 @@ func convlit1(n *Node, t *types.Type, explicit bool, context func() string) *Nod return n } if !types.Identical(n.Left.Type, n.Right.Type) { - yyerror("invalid operation: %v (mismatched types %v and %v)", n, n.Left.Type, n.Right.Type) + base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, n.Left.Type, n.Right.Type) n.Type = nil return n } @@ -306,7 +307,7 @@ func convlit1(n *Node, t *types.Type, explicit bool, context func() string) *Nod n.Left = convlit1(n.Left, t, explicit, nil) n.Type = n.Left.Type if n.Type != nil && !n.Type.IsInteger() { - yyerror("invalid operation: %v (shift of type %v)", n, n.Type) + base.Errorf("invalid operation: %v (shift of type %v)", n, n.Type) n.Type = nil } return n @@ -315,11 +316,11 @@ func convlit1(n *Node, t *types.Type, explicit bool, context func() string) *Nod if !n.Diag() { if !t.Broke() { if explicit { - yyerror("cannot convert %L to type %v", n, t) + base.Errorf("cannot convert %L to type %v", n, t) } else if context != nil { - yyerror("cannot use %L as type %v in %s", n, t, context()) + base.Errorf("cannot use %L as type %v in %s", n, t, context()) } else { - yyerror("cannot use %L as type %v", n, t) + base.Errorf("cannot use %L as type %v", n, t) } } n.SetDiag(true) @@ -395,7 +396,7 @@ func tocplx(v constant.Value) constant.Value { func toflt(v constant.Value) constant.Value { if v.Kind() == constant.Complex { if constant.Sign(constant.Imag(v)) != 0 { - yyerror("constant %v truncated to real", v) + base.Errorf("constant %v truncated to real", v) } v = constant.Real(v) } @@ -406,7 +407,7 @@ func toflt(v constant.Value) constant.Value { func toint(v constant.Value) constant.Value { if v.Kind() == constant.Complex { if constant.Sign(constant.Imag(v)) != 0 { - yyerror("constant %v truncated to integer", v) + base.Errorf("constant %v truncated to integer", v) } v = constant.Real(v) } @@ -426,14 +427,14 @@ func toint(v constant.Value) constant.Value { // (See issue #11371). f := bigFloatVal(v) if f.MantExp(nil) > 2*Mpprec { - yyerror("integer too large") + base.Errorf("integer too large") } else { var t big.Float t.Parse(fmt.Sprint(v), 0) if t.IsInt() { - yyerror("constant truncated to integer") + base.Errorf("constant truncated to integer") } else { - yyerror("constant %v truncated to integer", v) + base.Errorf("constant %v truncated to integer", v) } } @@ -470,7 +471,7 @@ func doesoverflow(v constant.Value, t *types.Type) bool { ft := floatForComplex(t) return doesoverflow(constant.Real(v), ft) || doesoverflow(constant.Imag(v), ft) } - Fatalf("doesoverflow: %v, %v", v, t) + base.Fatalf("doesoverflow: %v, %v", v, t) panic("unreachable") } @@ -483,11 +484,11 @@ func overflow(v constant.Value, t *types.Type) bool { return false } if v.Kind() == constant.Int && constant.BitLen(v) > Mpprec { - yyerror("integer too large") + base.Errorf("integer too large") return true } if doesoverflow(v, t) { - yyerror("constant %v overflows %v", vconv(v, 0), t) + base.Errorf("constant %v overflows %v", vconv(v, 0), t) return true } return false @@ -568,12 +569,12 @@ func evalConst(n *Node) *Node { // check for divisor underflow in complex division (see issue 20227) if op == ODIV && n.Type.IsComplex() && constant.Sign(square(constant.Real(rval))) == 0 && constant.Sign(square(constant.Imag(rval))) == 0 { - yyerror("complex division by zero") + base.Errorf("complex division by zero") n.Type = nil return n } if (op == ODIV || op == OMOD) && constant.Sign(rval) == 0 { - yyerror("division by zero") + base.Errorf("division by zero") n.Type = nil return n } @@ -596,7 +597,7 @@ func evalConst(n *Node) *Node { const shiftBound = 1023 - 1 + 52 s, ok := constant.Uint64Val(nr.Val()) if !ok || s > shiftBound { - yyerror("invalid shift count %v", nr) + base.Errorf("invalid shift count %v", nr) n.Type = nil break } @@ -702,7 +703,7 @@ func makeInt(i *big.Int) constant.Value { func makeFloat64(f float64) constant.Value { if math.IsInf(f, 0) { - Fatalf("infinity is not a valid constant") + base.Fatalf("infinity is not a valid constant") } v := constant.MakeFloat64(f) v = constant.ToFloat(v) // workaround #42641 (MakeFloat64(0).Kind() returns Int, not Float) @@ -732,7 +733,7 @@ var overflowNames = [...]string{ func origConst(n *Node, v constant.Value) *Node { lno := setlineno(n) v = convertVal(v, n.Type, false) - lineno = lno + base.Pos = lno switch v.Kind() { case constant.Int: @@ -743,9 +744,9 @@ func origConst(n *Node, v constant.Value) *Node { case constant.Unknown: what := overflowNames[n.Op] if what == "" { - Fatalf("unexpected overflow: %v", n.Op) + base.Fatalf("unexpected overflow: %v", n.Op) } - yyerrorl(n.Pos, "constant %v overflow", what) + base.ErrorfAt(n.Pos, "constant %v overflow", what) n.Type = nil return n } @@ -760,7 +761,7 @@ func origConst(n *Node, v constant.Value) *Node { func assertRepresents(t *types.Type, v constant.Value) { if !represents(t, v) { - Fatalf("%v does not represent %v", t, v) + base.Fatalf("%v does not represent %v", t, v) } } @@ -780,7 +781,7 @@ func represents(t *types.Type, v constant.Value) bool { return t.IsComplex() } - Fatalf("unexpected constant kind: %v", v) + base.Fatalf("unexpected constant kind: %v", v) panic("unreachable") } @@ -815,7 +816,7 @@ func idealType(ct constant.Kind) *types.Type { case constant.Complex: return types.UntypedComplex } - Fatalf("unexpected Ctype: %v", ct) + base.Fatalf("unexpected Ctype: %v", ct) return nil } @@ -876,7 +877,7 @@ func mixUntyped(t1, t2 *types.Type) *types.Type { case types.UntypedComplex: return 3 } - Fatalf("bad type %v", t) + base.Fatalf("bad type %v", t) panic("unreachable") } @@ -906,7 +907,7 @@ func defaultType(t *types.Type) *types.Type { return types.Types[TCOMPLEX128] } - Fatalf("bad type %v", t) + base.Fatalf("bad type %v", t) return nil } @@ -1023,7 +1024,7 @@ func (s *constSet) add(pos src.XPos, n *Node, what, where string) { return } if n.Type.IsUntyped() { - Fatalf("%v is untyped", n) + base.Fatalf("%v is untyped", n) } // Consts are only duplicates if they have the same value and @@ -1059,9 +1060,9 @@ func (s *constSet) add(pos src.XPos, n *Node, what, where string) { } if prevPos, isDup := s.m[k]; isDup { - yyerrorl(pos, "duplicate %s %s in %s\n\tprevious %s at %v", + base.ErrorfAt(pos, "duplicate %s %s in %s\n\tprevious %s at %v", what, nodeAndVal(n), where, - what, linestr(prevPos)) + what, base.FmtPos(prevPos)) } else { s.m[k] = pos } diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 3f193e3a019b9..63a52a9f364cb 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -6,6 +6,7 @@ package gc import ( "bytes" + "cmd/compile/internal/base" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/src" @@ -20,7 +21,7 @@ var externdcl []*Node func testdclstack() { if !types.IsDclstackValid() { - Fatalf("mark left on the dclstack") + base.Fatalf("mark left on the dclstack") } } @@ -31,7 +32,7 @@ func redeclare(pos src.XPos, s *types.Sym, where string) { if pkg == nil { pkg = s.Pkg } - yyerrorl(pos, "%v redeclared %s\n"+ + base.ErrorfAt(pos, "%v redeclared %s\n"+ "\tprevious declaration during import %q", s, where, pkg.Path) } else { prevPos := s.Lastlineno @@ -44,8 +45,8 @@ func redeclare(pos src.XPos, s *types.Sym, where string) { pos, prevPos = prevPos, pos } - yyerrorl(pos, "%v redeclared %s\n"+ - "\tprevious declaration at %v", s, where, linestr(prevPos)) + base.ErrorfAt(pos, "%v redeclared %s\n"+ + "\tprevious declaration at %v", s, where, base.FmtPos(prevPos)) } } @@ -71,22 +72,22 @@ func declare(n *Node, ctxt Class) { // kludgy: typecheckok means we're past parsing. Eg genwrapper may declare out of package names later. if !inimport && !typecheckok && s.Pkg != localpkg { - yyerrorl(n.Pos, "cannot declare name %v", s) + base.ErrorfAt(n.Pos, "cannot declare name %v", s) } gen := 0 if ctxt == PEXTERN { if s.Name == "init" { - yyerrorl(n.Pos, "cannot declare init - must be func") + base.ErrorfAt(n.Pos, "cannot declare init - must be func") } if s.Name == "main" && s.Pkg.Name == "main" { - yyerrorl(n.Pos, "cannot declare main - must be func") + base.ErrorfAt(n.Pos, "cannot declare main - must be func") } externdcl = append(externdcl, n) } else { if Curfn == nil && ctxt == PAUTO { - lineno = n.Pos - Fatalf("automatic outside function") + base.Pos = n.Pos + base.Fatalf("automatic outside function") } if Curfn != nil && ctxt != PFUNC { Curfn.Func.Dcl = append(Curfn.Func.Dcl, n) @@ -115,7 +116,7 @@ func declare(n *Node, ctxt Class) { } s.Block = types.Block - s.Lastlineno = lineno + s.Lastlineno = base.Pos s.Def = asTypesNode(n) n.Name.Vargen = int32(gen) n.SetClass(ctxt) @@ -128,7 +129,7 @@ func declare(n *Node, ctxt Class) { func addvar(n *Node, t *types.Type, ctxt Class) { if n == nil || n.Sym == nil || (n.Op != ONAME && n.Op != ONONAME) || t == nil { - Fatalf("addvar: n=%v t=%v nil", n, t) + base.Fatalf("addvar: n=%v t=%v nil", n, t) } n.Op = ONAME @@ -165,7 +166,7 @@ func variter(vl []*Node, t *Node, el []*Node) []*Node { var e *Node if doexpr { if len(el) == 0 { - yyerror("assignment mismatch: %d variables but %d values", len(vl), nel) + base.Errorf("assignment mismatch: %d variables but %d values", len(vl), nel) break } e = el[0] @@ -189,7 +190,7 @@ func variter(vl []*Node, t *Node, el []*Node) []*Node { } if len(el) != 0 { - yyerror("assignment mismatch: %d variables but %d values", len(vl), nel) + base.Errorf("assignment mismatch: %d variables but %d values", len(vl), nel) } return init } @@ -197,7 +198,7 @@ func variter(vl []*Node, t *Node, el []*Node) []*Node { // newnoname returns a new ONONAME Node associated with symbol s. func newnoname(s *types.Sym) *Node { if s == nil { - Fatalf("newnoname nil") + base.Fatalf("newnoname nil") } n := nod(ONONAME, nil, nil) n.Sym = s @@ -208,7 +209,7 @@ func newnoname(s *types.Sym) *Node { // newfuncnamel generates a new name node for a function or method. func newfuncnamel(pos src.XPos, s *types.Sym, fn *Func) *Node { if fn.Nname != nil { - Fatalf("newfuncnamel - already have name") + base.Fatalf("newfuncnamel - already have name") } n := newnamel(pos, s) n.Func = fn @@ -304,7 +305,7 @@ func importName(sym *types.Sym) *Node { n := oldname(sym) if !types.IsExported(sym.Name) && sym.Pkg != localpkg { n.SetDiag(true) - yyerror("cannot refer to unexported name %s.%s", sym.Pkg.Name, sym.Name) + base.Errorf("cannot refer to unexported name %s.%s", sym.Pkg.Name, sym.Name) } return n } @@ -336,13 +337,13 @@ func colasdefn(left []*Node, defn *Node) { continue } if !colasname(n) { - yyerrorl(defn.Pos, "non-name %v on left side of :=", n) + base.ErrorfAt(defn.Pos, "non-name %v on left side of :=", n) nerr++ continue } if !n.Sym.Uniq() { - yyerrorl(defn.Pos, "%v repeated on left side of :=", n.Sym) + base.ErrorfAt(defn.Pos, "%v repeated on left side of :=", n.Sym) n.SetDiag(true) nerr++ continue @@ -362,7 +363,7 @@ func colasdefn(left []*Node, defn *Node) { } if nnew == 0 && nerr == 0 { - yyerrorl(defn.Pos, "no new variables on left side of :=") + base.ErrorfAt(defn.Pos, "no new variables on left side of :=") } } @@ -370,11 +371,11 @@ func colasdefn(left []*Node, defn *Node) { // interface field declaration. func ifacedcl(n *Node) { if n.Op != ODCLFIELD || n.Left == nil { - Fatalf("ifacedcl") + base.Fatalf("ifacedcl") } if n.Sym.IsBlank() { - yyerror("methods must have a unique non-blank name") + base.Errorf("methods must have a unique non-blank name") } } @@ -399,7 +400,7 @@ func funchdr(n *Node) { func funcargs(nt *Node) { if nt.Op != OTFUNC { - Fatalf("funcargs %v", nt.Op) + base.Fatalf("funcargs %v", nt.Op) } // re-start the variable generation number @@ -449,7 +450,7 @@ func funcargs(nt *Node) { func funcarg(n *Node, ctxt Class) { if n.Op != ODCLFIELD { - Fatalf("funcarg %v", n.Op) + base.Fatalf("funcarg %v", n.Op) } if n.Sym == nil { return @@ -469,7 +470,7 @@ func funcarg(n *Node, ctxt Class) { // used functype directly to parse the function's type. func funcargs2(t *types.Type) { if t.Etype != TFUNC { - Fatalf("funcargs2 %v", t) + base.Fatalf("funcargs2 %v", t) } for _, f := range t.Recvs().Fields().Slice() { @@ -522,23 +523,23 @@ func checkembeddedtype(t *types.Type) { if t.Sym == nil && t.IsPtr() { t = t.Elem() if t.IsInterface() { - yyerror("embedded type cannot be a pointer to interface") + base.Errorf("embedded type cannot be a pointer to interface") } } if t.IsPtr() || t.IsUnsafePtr() { - yyerror("embedded type cannot be a pointer") + base.Errorf("embedded type cannot be a pointer") } else if t.Etype == TFORW && !t.ForwardType().Embedlineno.IsKnown() { - t.ForwardType().Embedlineno = lineno + t.ForwardType().Embedlineno = base.Pos } } func structfield(n *Node) *types.Field { - lno := lineno - lineno = n.Pos + lno := base.Pos + base.Pos = n.Pos if n.Op != ODCLFIELD { - Fatalf("structfield: oops %v\n", n) + base.Fatalf("structfield: oops %v\n", n) } if n.Left != nil { @@ -556,7 +557,7 @@ func structfield(n *Node) *types.Field { f.Note = constant.StringVal(n.Val()) } - lineno = lno + base.Pos = lno return f } @@ -570,7 +571,7 @@ func checkdupfields(what string, fss ...[]*types.Field) { continue } if seen[f.Sym] { - yyerrorl(f.Pos, "duplicate %s %s", what, f.Sym.Name) + base.ErrorfAt(f.Pos, "duplicate %s %s", what, f.Sym.Name) continue } seen[f.Sym] = true @@ -631,15 +632,15 @@ func tofunargsfield(fields []*types.Field, funarg types.Funarg) *types.Type { } func interfacefield(n *Node) *types.Field { - lno := lineno - lineno = n.Pos + lno := base.Pos + base.Pos = n.Pos if n.Op != ODCLFIELD { - Fatalf("interfacefield: oops %v\n", n) + base.Fatalf("interfacefield: oops %v\n", n) } if n.HasVal() { - yyerror("interface method cannot have annotation") + base.Errorf("interface method cannot have annotation") } // MethodSpec = MethodName Signature | InterfaceTypeName . @@ -655,7 +656,7 @@ func interfacefield(n *Node) *types.Field { f := types.NewField(n.Pos, n.Sym, n.Type) - lineno = lno + base.Pos = lno return f } @@ -774,13 +775,13 @@ func methodSym(recv *types.Type, msym *types.Sym) *types.Sym { // start with a letter, number, or period. func methodSymSuffix(recv *types.Type, msym *types.Sym, suffix string) *types.Sym { if msym.IsBlank() { - Fatalf("blank method name") + base.Fatalf("blank method name") } rsym := recv.Sym if recv.IsPtr() { if rsym != nil { - Fatalf("declared pointer receiver type: %v", recv) + base.Fatalf("declared pointer receiver type: %v", recv) } rsym = recv.Elem().Sym } @@ -824,13 +825,13 @@ func methodSymSuffix(recv *types.Type, msym *types.Sym, suffix string) *types.Sy // Returns a pointer to the existing or added Field; or nil if there's an error. func addmethod(n *Node, msym *types.Sym, t *types.Type, local, nointerface bool) *types.Field { if msym == nil { - Fatalf("no method symbol") + base.Fatalf("no method symbol") } // get parent type sym rf := t.Recv() // ptr to this structure if rf == nil { - yyerror("missing receiver") + base.Errorf("missing receiver") return nil } @@ -840,7 +841,7 @@ func addmethod(n *Node, msym *types.Sym, t *types.Type, local, nointerface bool) t := pa if t != nil && t.IsPtr() { if t.Sym != nil { - yyerror("invalid receiver type %v (%v is a pointer type)", pa, t) + base.Errorf("invalid receiver type %v (%v is a pointer type)", pa, t) return nil } t = t.Elem() @@ -850,21 +851,21 @@ func addmethod(n *Node, msym *types.Sym, t *types.Type, local, nointerface bool) case t == nil || t.Broke(): // rely on typecheck having complained before case t.Sym == nil: - yyerror("invalid receiver type %v (%v is not a defined type)", pa, t) + base.Errorf("invalid receiver type %v (%v is not a defined type)", pa, t) case t.IsPtr(): - yyerror("invalid receiver type %v (%v is a pointer type)", pa, t) + base.Errorf("invalid receiver type %v (%v is a pointer type)", pa, t) case t.IsInterface(): - yyerror("invalid receiver type %v (%v is an interface type)", pa, t) + base.Errorf("invalid receiver type %v (%v is an interface type)", pa, t) default: // Should have picked off all the reasons above, // but just in case, fall back to generic error. - yyerror("invalid receiver type %v (%L / %L)", pa, pa, t) + base.Errorf("invalid receiver type %v (%L / %L)", pa, pa, t) } return nil } if local && mt.Sym.Pkg != localpkg { - yyerror("cannot define new methods on non-local type %v", mt) + base.Errorf("cannot define new methods on non-local type %v", mt) return nil } @@ -875,7 +876,7 @@ func addmethod(n *Node, msym *types.Sym, t *types.Type, local, nointerface bool) if mt.IsStruct() { for _, f := range mt.Fields().Slice() { if f.Sym == msym { - yyerror("type %v has both field and method named %v", mt, msym) + base.Errorf("type %v has both field and method named %v", mt, msym) f.SetBroke(true) return nil } @@ -889,12 +890,12 @@ func addmethod(n *Node, msym *types.Sym, t *types.Type, local, nointerface bool) // types.Identical only checks that incoming and result parameters match, // so explicitly check that the receiver parameters match too. if !types.Identical(t, f.Type) || !types.Identical(t.Recv().Type, f.Type.Recv().Type) { - yyerror("method redeclared: %v.%v\n\t%v\n\t%v", mt, msym, f.Type, t) + base.Errorf("method redeclared: %v.%v\n\t%v\n\t%v", mt, msym, f.Type, t) } return f } - f := types.NewField(lineno, msym, t) + f := types.NewField(base.Pos, msym, t) f.Nname = asTypesNode(n.Func.Nname) f.SetNointerface(nointerface) @@ -923,7 +924,7 @@ func funcsym(s *types.Sym) *types.Sym { // When dynamically linking, the necessary function // symbols will be created explicitly with makefuncsym. // See the makefuncsym comment for details. - if !Ctxt.Flag_dynlink && !existed { + if !base.Ctxt.Flag_dynlink && !existed { funcsyms = append(funcsyms, s) } funcsymsmu.Unlock() @@ -940,13 +941,13 @@ func funcsym(s *types.Sym) *types.Sym { // So instead, when dynamic linking, we only create // the s·f stubs in s's package. func makefuncsym(s *types.Sym) { - if !Ctxt.Flag_dynlink { - Fatalf("makefuncsym dynlink") + if !base.Ctxt.Flag_dynlink { + base.Fatalf("makefuncsym dynlink") } if s.IsBlank() { return } - if Flag.CompilingRuntime && (s.Name == "getg" || s.Name == "getclosureptr" || s.Name == "getcallerpc" || s.Name == "getcallersp") { + if base.Flag.CompilingRuntime && (s.Name == "getg" || s.Name == "getclosureptr" || s.Name == "getcallerpc" || s.Name == "getcallersp") { // runtime.getg(), getclosureptr(), getcallerpc(), and // getcallersp() are not real functions and so do not // get funcsyms. @@ -960,7 +961,7 @@ func makefuncsym(s *types.Sym) { // setNodeNameFunc marks a node as a function. func setNodeNameFunc(n *Node) { if n.Op != ONAME || n.Class() != Pxxx { - Fatalf("expected ONAME/Pxxx node, got %v", n) + base.Fatalf("expected ONAME/Pxxx node, got %v", n) } n.SetClass(PFUNC) @@ -969,11 +970,11 @@ func setNodeNameFunc(n *Node) { func dclfunc(sym *types.Sym, tfn *Node) *Node { if tfn.Op != OTFUNC { - Fatalf("expected OTFUNC node, got %v", tfn) + base.Fatalf("expected OTFUNC node, got %v", tfn) } fn := nod(ODCLFUNC, nil, nil) - fn.Func.Nname = newfuncnamel(lineno, sym, fn.Func) + fn.Func.Nname = newfuncnamel(base.Pos, sym, fn.Func) fn.Func.Nname.Name.Defn = fn fn.Func.Nname.Name.Param.Ntype = tfn setNodeNameFunc(fn.Func.Nname) @@ -1045,10 +1046,10 @@ func (c *nowritebarrierrecChecker) findExtraCalls(n *Node) bool { case OCLOSURE: callee = arg.Func.Decl default: - Fatalf("expected ONAME or OCLOSURE node, got %+v", arg) + base.Fatalf("expected ONAME or OCLOSURE node, got %+v", arg) } if callee.Op != ODCLFUNC { - Fatalf("expected ODCLFUNC node, got %+v", callee) + base.Fatalf("expected ODCLFUNC node, got %+v", callee) } c.extraCalls[c.curfn] = append(c.extraCalls[c.curfn], nowritebarrierrecCall{callee, n.Pos}) return true @@ -1064,7 +1065,7 @@ func (c *nowritebarrierrecChecker) findExtraCalls(n *Node) bool { // This can be called concurrently for different from Nodes. func (c *nowritebarrierrecChecker) recordCall(from *Node, to *obj.LSym, pos src.XPos) { if from.Op != ODCLFUNC { - Fatalf("expected ODCLFUNC, got %v", from) + base.Fatalf("expected ODCLFUNC, got %v", from) } // We record this information on the *Func so this is // concurrent-safe. @@ -1105,7 +1106,7 @@ func (c *nowritebarrierrecChecker) check() { } // Check go:nowritebarrier functions. if n.Func.Pragma&Nowritebarrier != 0 && n.Func.WBPos.IsKnown() { - yyerrorl(n.Func.WBPos, "write barrier prohibited") + base.ErrorfAt(n.Func.WBPos, "write barrier prohibited") } } @@ -1133,10 +1134,10 @@ func (c *nowritebarrierrecChecker) check() { var err bytes.Buffer call := funcs[fn] for call.target != nil { - fmt.Fprintf(&err, "\n\t%v: called by %v", linestr(call.lineno), call.target.Func.Nname) + fmt.Fprintf(&err, "\n\t%v: called by %v", base.FmtPos(call.lineno), call.target.Func.Nname) call = funcs[call.target] } - yyerrorl(fn.Func.WBPos, "write barrier prohibited by caller; %v%s", fn.Func.Nname, err.String()) + base.ErrorfAt(fn.Func.WBPos, "write barrier prohibited by caller; %v%s", fn.Func.Nname, err.String()) continue } diff --git a/src/cmd/compile/internal/gc/dump.go b/src/cmd/compile/internal/gc/dump.go index 29eb1c1e48b6c..56dc474465737 100644 --- a/src/cmd/compile/internal/gc/dump.go +++ b/src/cmd/compile/internal/gc/dump.go @@ -9,6 +9,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/compile/internal/types" "cmd/internal/src" "fmt" @@ -146,7 +147,7 @@ func (p *dumper) dump(x reflect.Value, depth int) { x = reflect.ValueOf(v.Slice()) case src.XPos: - p.printf("%s", linestr(v)) + p.printf("%s", base.FmtPos(v)) return case *types.Node: diff --git a/src/cmd/compile/internal/gc/dwinl.go b/src/cmd/compile/internal/gc/dwinl.go index edde7a4cc530c..5da2871748848 100644 --- a/src/cmd/compile/internal/gc/dwinl.go +++ b/src/cmd/compile/internal/gc/dwinl.go @@ -5,6 +5,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/internal/dwarf" "cmd/internal/obj" "cmd/internal/src" @@ -26,8 +27,8 @@ type varPos struct { func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls { var inlcalls dwarf.InlCalls - if Debug.DwarfInl != 0 { - Ctxt.Logf("assembling DWARF inlined routine info for %v\n", fnsym.Name) + if base.Debug.DwarfInl != 0 { + base.Ctxt.Logf("assembling DWARF inlined routine info for %v\n", fnsym.Name) } // This maps inline index (from Ctxt.InlTree) to index in inlcalls.Calls @@ -106,7 +107,7 @@ func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls { } m = makePreinlineDclMap(fnsym) } else { - ifnlsym := Ctxt.InlTree.InlinedFunction(int(ii - 1)) + ifnlsym := base.Ctxt.InlTree.InlinedFunction(int(ii - 1)) m = makePreinlineDclMap(ifnlsym) } @@ -181,7 +182,7 @@ func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls { } // Debugging - if Debug.DwarfInl != 0 { + if base.Debug.DwarfInl != 0 { dumpInlCalls(inlcalls) dumpInlVars(dwVars) } @@ -205,15 +206,15 @@ func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls { // abstract function DIE for an inlined routine imported from a // previously compiled package. func genAbstractFunc(fn *obj.LSym) { - ifn := Ctxt.DwFixups.GetPrecursorFunc(fn) + ifn := base.Ctxt.DwFixups.GetPrecursorFunc(fn) if ifn == nil { - Ctxt.Diag("failed to locate precursor fn for %v", fn) + base.Ctxt.Diag("failed to locate precursor fn for %v", fn) return } - if Debug.DwarfInl != 0 { - Ctxt.Logf("DwarfAbstractFunc(%v)\n", fn.Name) + if base.Debug.DwarfInl != 0 { + base.Ctxt.Logf("DwarfAbstractFunc(%v)\n", fn.Name) } - Ctxt.DwarfAbstractFunc(ifn, fn, Ctxt.Pkgpath) + base.Ctxt.DwarfAbstractFunc(ifn, fn, base.Ctxt.Pkgpath) } // Undo any versioning performed when a name was written @@ -235,7 +236,7 @@ func makePreinlineDclMap(fnsym *obj.LSym) map[varPos]int { dcl := preInliningDcls(fnsym) m := make(map[varPos]int) for i, n := range dcl { - pos := Ctxt.InnermostPos(n.Pos) + pos := base.Ctxt.InnermostPos(n.Pos) vp := varPos{ DeclName: unversion(n.Sym.Name), DeclFile: pos.RelFilename(), @@ -243,7 +244,7 @@ func makePreinlineDclMap(fnsym *obj.LSym) map[varPos]int { DeclCol: pos.Col(), } if _, found := m[vp]; found { - Fatalf("child dcl collision on symbol %s within %v\n", n.Sym.Name, fnsym.Name) + base.Fatalf("child dcl collision on symbol %s within %v\n", n.Sym.Name, fnsym.Name) } m[vp] = i } @@ -260,17 +261,17 @@ func insertInlCall(dwcalls *dwarf.InlCalls, inlIdx int, imap map[int]int) int { // is one. We do this first so that parents appear before their // children in the resulting table. parCallIdx := -1 - parInlIdx := Ctxt.InlTree.Parent(inlIdx) + parInlIdx := base.Ctxt.InlTree.Parent(inlIdx) if parInlIdx >= 0 { parCallIdx = insertInlCall(dwcalls, parInlIdx, imap) } // Create new entry for this inline - inlinedFn := Ctxt.InlTree.InlinedFunction(inlIdx) - callXPos := Ctxt.InlTree.CallPos(inlIdx) - absFnSym := Ctxt.DwFixups.AbsFuncDwarfSym(inlinedFn) - pb := Ctxt.PosTable.Pos(callXPos).Base() - callFileSym := Ctxt.Lookup(pb.SymFilename()) + inlinedFn := base.Ctxt.InlTree.InlinedFunction(inlIdx) + callXPos := base.Ctxt.InlTree.CallPos(inlIdx) + absFnSym := base.Ctxt.DwFixups.AbsFuncDwarfSym(inlinedFn) + pb := base.Ctxt.PosTable.Pos(callXPos).Base() + callFileSym := base.Ctxt.Lookup(pb.SymFilename()) ic := dwarf.InlCall{ InlIndex: inlIdx, CallFile: callFileSym, @@ -298,7 +299,7 @@ func insertInlCall(dwcalls *dwarf.InlCalls, inlIdx int, imap map[int]int) int { // the index for a node from the inlined body of D will refer to the // call to D from C. Whew. func posInlIndex(xpos src.XPos) int { - pos := Ctxt.PosTable.Pos(xpos) + pos := base.Ctxt.PosTable.Pos(xpos) if b := pos.Base(); b != nil { ii := b.InliningIndex() if ii >= 0 { @@ -324,7 +325,7 @@ func addRange(calls []dwarf.InlCall, start, end int64, ii int, imap map[int]int) // Append range to correct inlined call callIdx, found := imap[ii] if !found { - Fatalf("can't find inlIndex %d in imap for prog at %d\n", ii, start) + base.Fatalf("can't find inlIndex %d in imap for prog at %d\n", ii, start) } call := &calls[callIdx] call.Ranges = append(call.Ranges, dwarf.Range{Start: start, End: end}) @@ -332,23 +333,23 @@ func addRange(calls []dwarf.InlCall, start, end int64, ii int, imap map[int]int) func dumpInlCall(inlcalls dwarf.InlCalls, idx, ilevel int) { for i := 0; i < ilevel; i++ { - Ctxt.Logf(" ") + base.Ctxt.Logf(" ") } ic := inlcalls.Calls[idx] - callee := Ctxt.InlTree.InlinedFunction(ic.InlIndex) - Ctxt.Logf(" %d: II:%d (%s) V: (", idx, ic.InlIndex, callee.Name) + callee := base.Ctxt.InlTree.InlinedFunction(ic.InlIndex) + base.Ctxt.Logf(" %d: II:%d (%s) V: (", idx, ic.InlIndex, callee.Name) for _, f := range ic.InlVars { - Ctxt.Logf(" %v", f.Name) + base.Ctxt.Logf(" %v", f.Name) } - Ctxt.Logf(" ) C: (") + base.Ctxt.Logf(" ) C: (") for _, k := range ic.Children { - Ctxt.Logf(" %v", k) + base.Ctxt.Logf(" %v", k) } - Ctxt.Logf(" ) R:") + base.Ctxt.Logf(" ) R:") for _, r := range ic.Ranges { - Ctxt.Logf(" [%d,%d)", r.Start, r.End) + base.Ctxt.Logf(" [%d,%d)", r.Start, r.End) } - Ctxt.Logf("\n") + base.Ctxt.Logf("\n") for _, k := range ic.Children { dumpInlCall(inlcalls, k, ilevel+1) } @@ -373,7 +374,7 @@ func dumpInlVars(dwvars []*dwarf.Var) { if dwv.IsInAbstract { ia = 1 } - Ctxt.Logf("V%d: %s CI:%d II:%d IA:%d %s\n", i, dwv.Name, dwv.ChildIndex, dwv.InlIndex-1, ia, typ) + base.Ctxt.Logf("V%d: %s CI:%d II:%d IA:%d %s\n", i, dwv.Name, dwv.ChildIndex, dwv.InlIndex-1, ia, typ) } } @@ -410,7 +411,7 @@ func checkInlCall(funcName string, inlCalls dwarf.InlCalls, funcSize int64, idx, // Callee ic := inlCalls.Calls[idx] - callee := Ctxt.InlTree.InlinedFunction(ic.InlIndex).Name + callee := base.Ctxt.InlTree.InlinedFunction(ic.InlIndex).Name calleeRanges := ic.Ranges // Caller @@ -418,14 +419,14 @@ func checkInlCall(funcName string, inlCalls dwarf.InlCalls, funcSize int64, idx, parentRanges := []dwarf.Range{dwarf.Range{Start: int64(0), End: funcSize}} if parentIdx != -1 { pic := inlCalls.Calls[parentIdx] - caller = Ctxt.InlTree.InlinedFunction(pic.InlIndex).Name + caller = base.Ctxt.InlTree.InlinedFunction(pic.InlIndex).Name parentRanges = pic.Ranges } // Callee ranges contained in caller ranges? c, m := rangesContainsAll(parentRanges, calleeRanges) if !c { - Fatalf("** malformed inlined routine range in %s: caller %s callee %s II=%d %s\n", funcName, caller, callee, idx, m) + base.Fatalf("** malformed inlined routine range in %s: caller %s callee %s II=%d %s\n", funcName, caller, callee, idx, m) } // Now visit kids diff --git a/src/cmd/compile/internal/gc/embed.go b/src/cmd/compile/internal/gc/embed.go index 5559d628131b4..f6c1b7cdccf11 100644 --- a/src/cmd/compile/internal/gc/embed.go +++ b/src/cmd/compile/internal/gc/embed.go @@ -5,6 +5,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/compile/internal/syntax" "cmd/compile/internal/types" "cmd/internal/obj" @@ -43,30 +44,30 @@ func varEmbed(p *noder, names []*Node, typ *Node, exprs []*Node, embeds []Pragma pos := embeds[0].Pos if !haveEmbed { - p.yyerrorpos(pos, "invalid go:embed: missing import \"embed\"") + p.errorAt(pos, "invalid go:embed: missing import \"embed\"") return exprs } - if Flag.Cfg.Embed.Patterns == nil { - p.yyerrorpos(pos, "invalid go:embed: build system did not supply embed configuration") + if base.Flag.Cfg.Embed.Patterns == nil { + p.errorAt(pos, "invalid go:embed: build system did not supply embed configuration") return exprs } if len(names) > 1 { - p.yyerrorpos(pos, "go:embed cannot apply to multiple vars") + p.errorAt(pos, "go:embed cannot apply to multiple vars") return exprs } if len(exprs) > 0 { - p.yyerrorpos(pos, "go:embed cannot apply to var with initializer") + p.errorAt(pos, "go:embed cannot apply to var with initializer") return exprs } if typ == nil { // Should not happen, since len(exprs) == 0 now. - p.yyerrorpos(pos, "go:embed cannot apply to var without type") + p.errorAt(pos, "go:embed cannot apply to var without type") return exprs } kind := embedKindApprox(typ) if kind == embedUnknown { - p.yyerrorpos(pos, "go:embed cannot apply to var of type %v", typ) + p.errorAt(pos, "go:embed cannot apply to var of type %v", typ) return exprs } @@ -75,13 +76,13 @@ func varEmbed(p *noder, names []*Node, typ *Node, exprs []*Node, embeds []Pragma var list []string for _, e := range embeds { for _, pattern := range e.Patterns { - files, ok := Flag.Cfg.Embed.Patterns[pattern] + files, ok := base.Flag.Cfg.Embed.Patterns[pattern] if !ok { - p.yyerrorpos(e.Pos, "invalid go:embed: build system did not map pattern: %s", pattern) + p.errorAt(e.Pos, "invalid go:embed: build system did not map pattern: %s", pattern) } for _, file := range files { - if Flag.Cfg.Embed.Files[file] == "" { - p.yyerrorpos(e.Pos, "invalid go:embed: build system did not map file: %s", file) + if base.Flag.Cfg.Embed.Files[file] == "" { + p.errorAt(e.Pos, "invalid go:embed: build system did not map file: %s", file) continue } if !have[file] { @@ -103,7 +104,7 @@ func varEmbed(p *noder, names []*Node, typ *Node, exprs []*Node, embeds []Pragma if kind == embedString || kind == embedBytes { if len(list) > 1 { - p.yyerrorpos(pos, "invalid go:embed: multiple files for type %v", typ) + p.errorAt(pos, "invalid go:embed: multiple files for type %v", typ) return exprs } } @@ -129,7 +130,7 @@ func varEmbed(p *noder, names []*Node, typ *Node, exprs []*Node, embeds []Pragma // can't tell whether "string" and "byte" really mean "string" and "byte". // The result must be confirmed later, after type checking, using embedKind. func embedKindApprox(typ *Node) int { - if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == localpkg && Ctxt.Pkgpath == "embed")) { + if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == localpkg && base.Ctxt.Pkgpath == "embed")) { return embedFiles } // These are not guaranteed to match only string and []byte - @@ -147,7 +148,7 @@ func embedKindApprox(typ *Node) int { // embedKind determines the kind of embedding variable. func embedKind(typ *types.Type) int { - if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == localpkg && Ctxt.Pkgpath == "embed")) { + if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == localpkg && base.Ctxt.Pkgpath == "embed")) { return embedFiles } if typ == types.Types[TSTRING] { @@ -194,13 +195,13 @@ func initEmbed(v *Node) { files := v.Name.Param.EmbedFiles() switch kind := embedKind(v.Type); kind { case embedUnknown: - yyerrorl(v.Pos, "go:embed cannot apply to var of type %v", v.Type) + base.ErrorfAt(v.Pos, "go:embed cannot apply to var of type %v", v.Type) case embedString, embedBytes: file := files[0] - fsym, size, err := fileStringSym(v.Pos, Flag.Cfg.Embed.Files[file], kind == embedString, nil) + fsym, size, err := fileStringSym(v.Pos, base.Flag.Cfg.Embed.Files[file], kind == embedString, nil) if err != nil { - yyerrorl(v.Pos, "embed %s: %v", file, err) + base.ErrorfAt(v.Pos, "embed %s: %v", file, err) } sym := v.Sym.Linksym() off := 0 @@ -211,7 +212,7 @@ func initEmbed(v *Node) { } case embedFiles: - slicedata := Ctxt.Lookup(`"".` + v.Sym.Name + `.files`) + slicedata := base.Ctxt.Lookup(`"".` + v.Sym.Name + `.files`) off := 0 // []files pointed at by Files off = dsymptr(slicedata, off, slicedata, 3*Widthptr) // []file, pointing just past slice @@ -234,13 +235,13 @@ func initEmbed(v *Node) { off = duintptr(slicedata, off, 0) off += hashSize } else { - fsym, size, err := fileStringSym(v.Pos, Flag.Cfg.Embed.Files[file], true, hash) + fsym, size, err := fileStringSym(v.Pos, base.Flag.Cfg.Embed.Files[file], true, hash) if err != nil { - yyerrorl(v.Pos, "embed %s: %v", file, err) + base.ErrorfAt(v.Pos, "embed %s: %v", file, err) } off = dsymptr(slicedata, off, fsym, 0) // data string off = duintptr(slicedata, off, uint64(size)) - off = int(slicedata.WriteBytes(Ctxt, int64(off), hash)) + off = int(slicedata.WriteBytes(base.Ctxt, int64(off), hash)) } } ggloblsym(slicedata, int32(off), obj.RODATA|obj.LOCAL) diff --git a/src/cmd/compile/internal/gc/esc.go b/src/cmd/compile/internal/gc/esc.go index 74b85e1ae8772..5cf8c4a1c64f0 100644 --- a/src/cmd/compile/internal/gc/esc.go +++ b/src/cmd/compile/internal/gc/esc.go @@ -5,6 +5,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/compile/internal/types" "fmt" ) @@ -263,11 +264,11 @@ func addrescapes(n *Node) { Curfn = Curfn.Func.Decl panic("can't happen") } - ln := lineno - lineno = Curfn.Pos + ln := base.Pos + base.Pos = Curfn.Pos moveToHeap(n) Curfn = oldfn - lineno = ln + base.Pos = ln // ODOTPTR has already been introduced, // so these are the non-pointer ODOT and OINDEX. @@ -283,15 +284,15 @@ func addrescapes(n *Node) { // moveToHeap records the parameter or local variable n as moved to the heap. func moveToHeap(n *Node) { - if Flag.LowerR != 0 { + if base.Flag.LowerR != 0 { Dump("MOVE", n) } - if Flag.CompilingRuntime { - yyerror("%v escapes to heap, not allowed in runtime", n) + if base.Flag.CompilingRuntime { + base.Errorf("%v escapes to heap, not allowed in runtime", n) } if n.Class() == PAUTOHEAP { Dump("n", n) - Fatalf("double move to heap") + base.Fatalf("double move to heap") } // Allocate a local stack variable to hold the pointer to the heap copy. @@ -311,7 +312,7 @@ func moveToHeap(n *Node) { // the function. if n.Class() == PPARAM || n.Class() == PPARAMOUT { if n.Xoffset == BADWIDTH { - Fatalf("addrescapes before param assignment") + base.Fatalf("addrescapes before param assignment") } // We rewrite n below to be a heap variable (indirection of heapaddr). @@ -350,7 +351,7 @@ func moveToHeap(n *Node) { } } if !found { - Fatalf("cannot find %v in local variable list", n) + base.Fatalf("cannot find %v in local variable list", n) } Curfn.Func.Dcl = append(Curfn.Func.Dcl, n) } @@ -360,8 +361,8 @@ func moveToHeap(n *Node) { n.Xoffset = 0 n.Name.Param.Heapaddr = heapaddr n.Esc = EscHeap - if Flag.LowerM != 0 { - Warnl(n.Pos, "moved to heap: %v", n) + if base.Flag.LowerM != 0 { + base.WarnfAt(n.Pos, "moved to heap: %v", n) } } @@ -390,8 +391,8 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string { // but we are reusing the ability to annotate an individual function // argument and pass those annotations along to importing code. if f.Type.IsUintptr() { - if Flag.LowerM != 0 { - Warnl(f.Pos, "assuming %v is unsafe uintptr", name()) + if base.Flag.LowerM != 0 { + base.WarnfAt(f.Pos, "assuming %v is unsafe uintptr", name()) } return unsafeUintptrTag } @@ -405,12 +406,12 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string { // External functions are assumed unsafe, unless // //go:noescape is given before the declaration. if fn.Func.Pragma&Noescape != 0 { - if Flag.LowerM != 0 && f.Sym != nil { - Warnl(f.Pos, "%v does not escape", name()) + if base.Flag.LowerM != 0 && f.Sym != nil { + base.WarnfAt(f.Pos, "%v does not escape", name()) } } else { - if Flag.LowerM != 0 && f.Sym != nil { - Warnl(f.Pos, "leaking param: %v", name()) + if base.Flag.LowerM != 0 && f.Sym != nil { + base.WarnfAt(f.Pos, "leaking param: %v", name()) } esc.AddHeap(0) } @@ -420,15 +421,15 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string { if fn.Func.Pragma&UintptrEscapes != 0 { if f.Type.IsUintptr() { - if Flag.LowerM != 0 { - Warnl(f.Pos, "marking %v as escaping uintptr", name()) + if base.Flag.LowerM != 0 { + base.WarnfAt(f.Pos, "marking %v as escaping uintptr", name()) } return uintptrEscapesTag } if f.IsDDD() && f.Type.Elem().IsUintptr() { // final argument is ...uintptr. - if Flag.LowerM != 0 { - Warnl(f.Pos, "marking %v as escaping ...uintptr", name()) + if base.Flag.LowerM != 0 { + base.WarnfAt(f.Pos, "marking %v as escaping ...uintptr", name()) } return uintptrEscapesTag } @@ -449,22 +450,22 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string { esc := loc.paramEsc esc.Optimize() - if Flag.LowerM != 0 && !loc.escapes { + if base.Flag.LowerM != 0 && !loc.escapes { if esc.Empty() { - Warnl(f.Pos, "%v does not escape", name()) + base.WarnfAt(f.Pos, "%v does not escape", name()) } if x := esc.Heap(); x >= 0 { if x == 0 { - Warnl(f.Pos, "leaking param: %v", name()) + base.WarnfAt(f.Pos, "leaking param: %v", name()) } else { // TODO(mdempsky): Mention level=x like below? - Warnl(f.Pos, "leaking param content: %v", name()) + base.WarnfAt(f.Pos, "leaking param content: %v", name()) } } for i := 0; i < numEscResults; i++ { if x := esc.Result(i); x >= 0 { res := fn.Type.Results().Field(i).Sym - Warnl(f.Pos, "leaking param: %v to result %v level=%d", name(), res, x) + base.WarnfAt(f.Pos, "leaking param: %v to result %v level=%d", name(), res, x) } } } diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index 27645fb888720..aaf768d85ab8d 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -5,6 +5,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/compile/internal/logopt" "cmd/compile/internal/types" "cmd/internal/src" @@ -180,7 +181,7 @@ func escFmt(n *Node, short bool) string { func escapeFuncs(fns []*Node, recursive bool) { for _, fn := range fns { if fn.Op != ODCLFUNC { - Fatalf("unexpected node: %v", fn) + base.Fatalf("unexpected node: %v", fn) } } @@ -202,10 +203,10 @@ func escapeFuncs(fns []*Node, recursive bool) { func (e *Escape) initFunc(fn *Node) { if fn.Op != ODCLFUNC || fn.Esc != EscFuncUnknown { - Fatalf("unexpected node: %v", fn) + base.Fatalf("unexpected node: %v", fn) } fn.Esc = EscFuncPlanned - if Flag.LowerM > 3 { + if base.Flag.LowerM > 3 { Dump("escAnalyze", fn) } @@ -279,18 +280,18 @@ func (e *Escape) stmt(n *Node) { lno := setlineno(n) defer func() { - lineno = lno + base.Pos = lno }() - if Flag.LowerM > 2 { - fmt.Printf("%v:[%d] %v stmt: %v\n", linestr(lineno), e.loopDepth, funcSym(e.curfn), n) + if base.Flag.LowerM > 2 { + fmt.Printf("%v:[%d] %v stmt: %v\n", base.FmtPos(base.Pos), e.loopDepth, funcSym(e.curfn), n) } e.stmts(n.Ninit) switch n.Op { default: - Fatalf("unexpected stmt: %v", n) + base.Fatalf("unexpected stmt: %v", n) case ODCLCONST, ODCLTYPE, OEMPTY, OFALL, OINLMARK: // nop @@ -310,16 +311,16 @@ func (e *Escape) stmt(n *Node) { case OLABEL: switch asNode(n.Sym.Label) { case nonlooping: - if Flag.LowerM > 2 { - fmt.Printf("%v:%v non-looping label\n", linestr(lineno), n) + if base.Flag.LowerM > 2 { + fmt.Printf("%v:%v non-looping label\n", base.FmtPos(base.Pos), n) } case looping: - if Flag.LowerM > 2 { - fmt.Printf("%v: %v looping label\n", linestr(lineno), n) + if base.Flag.LowerM > 2 { + fmt.Printf("%v: %v looping label\n", base.FmtPos(base.Pos), n) } e.loopDepth++ default: - Fatalf("label missing tag") + base.Fatalf("label missing tag") } n.Sym.Label = nil @@ -460,7 +461,7 @@ func (e *Escape) exprSkipInit(k EscHole, n *Node) { lno := setlineno(n) defer func() { - lineno = lno + base.Pos = lno }() uintptrEscapesHack := k.uintptrEscapesHack @@ -474,7 +475,7 @@ func (e *Escape) exprSkipInit(k EscHole, n *Node) { switch n.Op { default: - Fatalf("unexpected expr: %v", n) + base.Fatalf("unexpected expr: %v", n) case OLITERAL, ONIL, OGETG, OCLOSUREVAR, OTYPE, OMETHEXPR: // nop @@ -653,7 +654,7 @@ func (e *Escape) exprSkipInit(k EscHole, n *Node) { // for conversions from an unsafe.Pointer. func (e *Escape) unsafeValue(k EscHole, n *Node) { if n.Type.Etype != TUINTPTR { - Fatalf("unexpected type %v for %v", n.Type, n) + base.Fatalf("unexpected type %v for %v", n.Type, n) } e.stmts(n.Ninit) @@ -711,7 +712,7 @@ func (e *Escape) addr(n *Node) EscHole { switch n.Op { default: - Fatalf("unexpected addr: %v", n) + base.Fatalf("unexpected addr: %v", n) case ONAME: if n.Class() == PEXTERN { break @@ -752,8 +753,8 @@ func (e *Escape) addrs(l Nodes) []EscHole { func (e *Escape) assign(dst, src *Node, why string, where *Node) { // Filter out some no-op assignments for escape analysis. ignore := dst != nil && src != nil && isSelfAssign(dst, src) - if ignore && Flag.LowerM != 0 { - Warnl(where.Pos, "%v ignoring self-assignment in %S", funcSym(e.curfn), where) + if ignore && base.Flag.LowerM != 0 { + base.WarnfAt(where.Pos, "%v ignoring self-assignment in %S", funcSym(e.curfn), where) } k := e.addr(dst) @@ -797,7 +798,7 @@ func (e *Escape) call(ks []EscHole, call, where *Node) { switch call.Op { default: - Fatalf("unexpected call op: %v", call.Op) + base.Fatalf("unexpected call op: %v", call.Op) case OCALLFUNC, OCALLMETH, OCALLINTER: fixVariadicCall(call) @@ -936,7 +937,7 @@ func (e *Escape) tagHole(ks []EscHole, fn *Node, param *types.Field) EscHole { func (e *Escape) inMutualBatch(fn *Node) bool { if fn.Name.Defn != nil && fn.Name.Defn.Esc < EscFuncTagged { if fn.Name.Defn.Esc == EscFuncUnknown { - Fatalf("graph inconsistency") + base.Fatalf("graph inconsistency") } return true } @@ -964,9 +965,9 @@ type EscNote struct { func (k EscHole) note(where *Node, why string) EscHole { if where == nil || why == "" { - Fatalf("note: missing where/why") + base.Fatalf("note: missing where/why") } - if Flag.LowerM >= 2 || logopt.Enabled() { + if base.Flag.LowerM >= 2 || logopt.Enabled() { k.notes = &EscNote{ next: k.notes, where: where, @@ -979,7 +980,7 @@ func (k EscHole) note(where *Node, why string) EscHole { func (k EscHole) shift(delta int) EscHole { k.derefs += delta if k.derefs < -1 { - Fatalf("derefs underflow: %v", k.derefs) + base.Fatalf("derefs underflow: %v", k.derefs) } return k } @@ -1016,7 +1017,7 @@ func (e *Escape) teeHole(ks ...EscHole) EscHole { // *ltmp" and "l2 = ltmp" and return "ltmp = &_" // instead. if k.derefs < 0 { - Fatalf("teeHole: negative derefs") + base.Fatalf("teeHole: negative derefs") } e.flow(k, loc) @@ -1054,7 +1055,7 @@ func canonicalNode(n *Node) *Node { if n != nil && n.Op == ONAME && n.Name.IsClosureVar() { n = n.Name.Defn if n.Name.IsClosureVar() { - Fatalf("still closure var") + base.Fatalf("still closure var") } } @@ -1063,10 +1064,10 @@ func canonicalNode(n *Node) *Node { func (e *Escape) newLoc(n *Node, transient bool) *EscLocation { if e.curfn == nil { - Fatalf("e.curfn isn't set") + base.Fatalf("e.curfn isn't set") } if n != nil && n.Type != nil && n.Type.NotInHeap() { - yyerrorl(n.Pos, "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type) + base.ErrorfAt(n.Pos, "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type) } n = canonicalNode(n) @@ -1079,11 +1080,11 @@ func (e *Escape) newLoc(n *Node, transient bool) *EscLocation { e.allLocs = append(e.allLocs, loc) if n != nil { if n.Op == ONAME && n.Name.Curfn != e.curfn { - Fatalf("curfn mismatch: %v != %v", n.Name.Curfn, e.curfn) + base.Fatalf("curfn mismatch: %v != %v", n.Name.Curfn, e.curfn) } if n.HasOpt() { - Fatalf("%v already has a location", n) + base.Fatalf("%v already has a location", n) } n.SetOpt(loc) @@ -1112,9 +1113,9 @@ func (e *Escape) flow(k EscHole, src *EscLocation) { return } if dst.escapes && k.derefs < 0 { // dst = &src - if Flag.LowerM >= 2 || logopt.Enabled() { - pos := linestr(src.n.Pos) - if Flag.LowerM >= 2 { + if base.Flag.LowerM >= 2 || logopt.Enabled() { + pos := base.FmtPos(src.n.Pos) + if base.Flag.LowerM >= 2 { fmt.Printf("%s: %v escapes to heap:\n", pos, src.n) } explanation := e.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{}) @@ -1214,9 +1215,9 @@ func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLoc // that value flow for tagging the function // later. if l.isName(PPARAM) { - if (logopt.Enabled() || Flag.LowerM >= 2) && !l.escapes { - if Flag.LowerM >= 2 { - fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", linestr(l.n.Pos), l.n, e.explainLoc(root), derefs) + if (logopt.Enabled() || base.Flag.LowerM >= 2) && !l.escapes { + if base.Flag.LowerM >= 2 { + fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos), l.n, e.explainLoc(root), derefs) } explanation := e.explainPath(root, l) if logopt.Enabled() { @@ -1231,9 +1232,9 @@ func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLoc // outlives it, then l needs to be heap // allocated. if addressOf && !l.escapes { - if logopt.Enabled() || Flag.LowerM >= 2 { - if Flag.LowerM >= 2 { - fmt.Printf("%s: %v escapes to heap:\n", linestr(l.n.Pos), l.n) + if logopt.Enabled() || base.Flag.LowerM >= 2 { + if base.Flag.LowerM >= 2 { + fmt.Printf("%s: %v escapes to heap:\n", base.FmtPos(l.n.Pos), l.n) } explanation := e.explainPath(root, l) if logopt.Enabled() { @@ -1265,12 +1266,12 @@ func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLoc // explainPath prints an explanation of how src flows to the walk root. func (e *Escape) explainPath(root, src *EscLocation) []*logopt.LoggedOpt { visited := make(map[*EscLocation]bool) - pos := linestr(src.n.Pos) + pos := base.FmtPos(src.n.Pos) var explanation []*logopt.LoggedOpt for { // Prevent infinite loop. if visited[src] { - if Flag.LowerM >= 2 { + if base.Flag.LowerM >= 2 { fmt.Printf("%s: warning: truncated explanation due to assignment cycle; see golang.org/issue/35518\n", pos) } break @@ -1279,7 +1280,7 @@ func (e *Escape) explainPath(root, src *EscLocation) []*logopt.LoggedOpt { dst := src.dst edge := &dst.edges[src.dstEdgeIdx] if edge.src != src { - Fatalf("path inconsistency: %v != %v", edge.src, src) + base.Fatalf("path inconsistency: %v != %v", edge.src, src) } explanation = e.explainFlow(pos, dst, src, edge.derefs, edge.notes, explanation) @@ -1298,7 +1299,7 @@ func (e *Escape) explainFlow(pos string, dst, srcloc *EscLocation, derefs int, n if derefs >= 0 { ops = strings.Repeat("*", derefs) } - print := Flag.LowerM >= 2 + print := base.Flag.LowerM >= 2 flow := fmt.Sprintf(" flow: %s = %s%v:", e.explainLoc(dst), ops, e.explainLoc(srcloc)) if print { @@ -1316,7 +1317,7 @@ func (e *Escape) explainFlow(pos string, dst, srcloc *EscLocation, derefs int, n for note := notes; note != nil; note = note.next { if print { - fmt.Printf("%s: from %v (%v) at %s\n", pos, note.where, note.why, linestr(note.where.Pos)) + fmt.Printf("%s: from %v (%v) at %s\n", pos, note.where, note.why, base.FmtPos(note.where.Pos)) } if logopt.Enabled() { explanation = append(explanation, logopt.NewLoggedOpt(note.where.Pos, "escflow", "escape", e.curfn.funcname(), @@ -1394,7 +1395,7 @@ func (e *Escape) outlives(l, other *EscLocation) bool { // containsClosure reports whether c is a closure contained within f. func containsClosure(f, c *Node) bool { if f.Op != ODCLFUNC || c.Op != ODCLFUNC { - Fatalf("bad containsClosure: %v, %v", f, c) + base.Fatalf("bad containsClosure: %v, %v", f, c) } // Common case. @@ -1452,8 +1453,8 @@ func (e *Escape) finish(fns []*Node) { if loc.escapes { if n.Op != ONAME { - if Flag.LowerM != 0 { - Warnl(n.Pos, "%S escapes to heap", n) + if base.Flag.LowerM != 0 { + base.WarnfAt(n.Pos, "%S escapes to heap", n) } if logopt.Enabled() { logopt.LogOpt(n.Pos, "escape", "escape", e.curfn.funcname()) @@ -1462,8 +1463,8 @@ func (e *Escape) finish(fns []*Node) { n.Esc = EscHeap addrescapes(n) } else { - if Flag.LowerM != 0 && n.Op != ONAME { - Warnl(n.Pos, "%S does not escape", n) + if base.Flag.LowerM != 0 && n.Op != ONAME { + base.WarnfAt(n.Pos, "%S does not escape", n) } n.Esc = EscNone if loc.transient { @@ -1516,7 +1517,7 @@ func (l *EscLeaks) add(i, derefs int) { func (l *EscLeaks) set(i, derefs int) { v := derefs + 1 if v < 0 { - Fatalf("invalid derefs count: %v", derefs) + base.Fatalf("invalid derefs count: %v", derefs) } if v > math.MaxUint8 { v = math.MaxUint8 diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index 48f77fa18293e..1fa64fbe4497d 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -5,6 +5,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/compile/internal/types" "cmd/internal/bio" "cmd/internal/src" @@ -14,7 +15,7 @@ import ( func exportf(bout *bio.Writer, format string, args ...interface{}) { fmt.Fprintf(bout, format, args...) - if Debug.Export != 0 { + if base.Debug.Export != 0 { fmt.Printf(format, args...) } } @@ -28,7 +29,7 @@ func exportsym(n *Node) { } n.Sym.SetOnExportList(true) - if Flag.E != 0 { + if base.Flag.E != 0 { fmt.Printf("export symbol %v\n", n.Sym) } @@ -53,7 +54,7 @@ func autoexport(n *Node, ctxt Class) { if types.IsExported(n.Sym.Name) || initname(n.Sym.Name) { exportsym(n) } - if Flag.AsmHdr != "" && !n.Sym.Asm() { + if base.Flag.AsmHdr != "" && !n.Sym.Asm() { n.Sym.SetAsm(true) asmlist = append(asmlist, n) } @@ -67,8 +68,8 @@ func dumpexport(bout *bio.Writer) { size := bout.Offset() - off exportf(bout, "\n$$\n") - if Debug.Export != 0 { - fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", Ctxt.Pkgpath, size) + if base.Debug.Export != 0 { + fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", base.Ctxt.Pkgpath, size) } } @@ -80,7 +81,7 @@ func importsym(ipkg *types.Pkg, s *types.Sym, op Op) *Node { // is declarations for Runtimepkg, which are populated // by loadsys instead. if s.Pkg != Runtimepkg { - Fatalf("missing ONONAME for %v\n", s) + base.Fatalf("missing ONONAME for %v\n", s) } n = dclname(s) @@ -88,7 +89,7 @@ func importsym(ipkg *types.Pkg, s *types.Sym, op Op) *Node { s.Importdef = ipkg } if n.Op != ONONAME && n.Op != op { - redeclare(lineno, s, fmt.Sprintf("during import %q", ipkg.Path)) + redeclare(base.Pos, s, fmt.Sprintf("during import %q", ipkg.Path)) } return n } @@ -111,7 +112,7 @@ func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *types.Type { t := n.Type if t == nil { - Fatalf("importtype %v", s) + base.Fatalf("importtype %v", s) } return t } @@ -122,7 +123,7 @@ func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op Op, ctxt Class, t n := importsym(ipkg, s, op) if n.Op != ONONAME { if n.Op == op && (n.Class() != ctxt || !types.Identical(n.Type, t)) { - redeclare(lineno, s, fmt.Sprintf("during import %q", ipkg.Path)) + redeclare(base.Pos, s, fmt.Sprintf("during import %q", ipkg.Path)) } return nil } @@ -147,7 +148,7 @@ func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val n.SetVal(val) - if Flag.E != 0 { + if base.Flag.E != 0 { fmt.Printf("import const %v %L = %v\n", s, t, val) } } @@ -162,7 +163,7 @@ func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) { n.Func = new(Func) - if Flag.E != 0 { + if base.Flag.E != 0 { fmt.Printf("import func %v%S\n", s, t) } } @@ -175,7 +176,7 @@ func importvar(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) { return } - if Flag.E != 0 { + if base.Flag.E != 0 { fmt.Printf("import var %v %L\n", s, t) } } @@ -188,15 +189,15 @@ func importalias(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) { return } - if Flag.E != 0 { + if base.Flag.E != 0 { fmt.Printf("import type %v = %L\n", s, t) } } func dumpasmhdr() { - b, err := bio.Create(Flag.AsmHdr) + b, err := bio.Create(base.Flag.AsmHdr) if err != nil { - Fatalf("%v", err) + base.Fatalf("%v", err) } fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", localpkg.Name) for _, n := range asmlist { diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go index 51e139e3190b8..9248eb22aa28d 100644 --- a/src/cmd/compile/internal/gc/fmt.go +++ b/src/cmd/compile/internal/gc/fmt.go @@ -6,6 +6,7 @@ package gc import ( "bytes" + "cmd/compile/internal/base" "cmd/compile/internal/types" "cmd/internal/src" "fmt" @@ -47,7 +48,7 @@ func fmtFlag(s fmt.State, verb rune) FmtFlag { flag |= FmtSign } if s.Flag(' ') { - Fatalf("FmtUnsigned in format string") + base.Fatalf("FmtUnsigned in format string") } if _, ok := s.Precision(); ok { flag |= FmtComma @@ -313,7 +314,7 @@ func (m fmtMode) prepareArgs(args []interface{}) { case int32, int64, string, types.EType, constant.Value: // OK: printing these types doesn't depend on mode default: - Fatalf("mode.prepareArgs type %T", arg) + base.Fatalf("mode.prepareArgs type %T", arg) } } } @@ -339,14 +340,14 @@ func (n *Node) jconv(s fmt.State, flag FmtFlag) { short := flag&FmtShort != 0 // Useful to see which nodes in an AST printout are actually identical - if Debug.DumpPtrs != 0 { + if base.Debug.DumpPtrs != 0 { fmt.Fprintf(s, " p(%p)", n) } if !short && n.Name != nil && n.Name.Vargen != 0 { fmt.Fprintf(s, " g(%d)", n.Name.Vargen) } - if Debug.DumpPtrs != 0 && !short && n.Name != nil && n.Name.Defn != nil { + if base.Debug.DumpPtrs != 0 && !short && n.Name != nil && n.Name.Defn != nil { // Useful to see where Defn is set and what node it points to fmt.Fprintf(s, " defn(%p)", n.Name.Defn) } @@ -817,7 +818,7 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited case mt.Hiter: b.WriteString("map.iter[") default: - Fatalf("unknown internal map type") + base.Fatalf("unknown internal map type") } tconv2(b, m.Key(), 0, mode, visited) b.WriteByte(']') @@ -1416,7 +1417,7 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) { case OSLICEHEADER: if n.List.Len() != 2 { - Fatalf("bad OSLICEHEADER list length %d", n.List.Len()) + base.Fatalf("bad OSLICEHEADER list length %d", n.List.Len()) } mode.Fprintf(s, "sliceheader{%v,%v,%v}", n.Left, n.List.First(), n.List.Second()) @@ -1806,7 +1807,7 @@ func (n *Node) nconv(s fmt.State, flag FmtFlag, mode fmtMode) { dumpdepth-- default: - Fatalf("unhandled %%N mode: %d", mode) + base.Fatalf("unhandled %%N mode: %d", mode) } } diff --git a/src/cmd/compile/internal/gc/gen.go b/src/cmd/compile/internal/gc/gen.go index d882d6d672b3d..a70bddca81340 100644 --- a/src/cmd/compile/internal/gc/gen.go +++ b/src/cmd/compile/internal/gc/gen.go @@ -5,6 +5,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/src" @@ -52,14 +53,14 @@ func autotmpname(n int) string { // make a new Node off the books func tempAt(pos src.XPos, curfn *Node, t *types.Type) *Node { if curfn == nil { - Fatalf("no curfn for tempAt") + base.Fatalf("no curfn for tempAt") } if curfn.Op == OCLOSURE { Dump("tempAt", curfn) - Fatalf("adding tempAt to wrong closure function") + base.Fatalf("adding tempAt to wrong closure function") } if t == nil { - Fatalf("tempAt called with nil type") + base.Fatalf("tempAt called with nil type") } s := &types.Sym{ @@ -82,5 +83,5 @@ func tempAt(pos src.XPos, curfn *Node, t *types.Type) *Node { } func temp(t *types.Type) *Node { - return tempAt(lineno, Curfn, t) + return tempAt(base.Pos, Curfn, t) } diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index 947dae476b119..e9ff5aeb138af 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -5,6 +5,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/compile/internal/ssa" "cmd/compile/internal/types" "cmd/internal/obj" @@ -39,7 +40,7 @@ var ( // isRuntimePkg reports whether p is package runtime. func isRuntimePkg(p *types.Pkg) bool { - if Flag.CompilingRuntime && p == localpkg { + if base.Flag.CompilingRuntime && p == localpkg { return true } return p.Path == "runtime" @@ -48,7 +49,7 @@ func isRuntimePkg(p *types.Pkg) bool { // isReflectPkg reports whether p is package reflect. func isReflectPkg(p *types.Pkg) bool { if p == localpkg { - return Ctxt.Pkgpath == "reflect" + return base.Ctxt.Pkgpath == "reflect" } return p.Path == "reflect" } @@ -182,8 +183,6 @@ var instrumenting bool // Whether we are tracking lexical scopes for DWARF. var trackScopes bool -var Ctxt *obj.Link - var nodfp *Node var autogeneratedPos src.XPos diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go index 00d425a77cc86..92a3611cb7851 100644 --- a/src/cmd/compile/internal/gc/gsubr.go +++ b/src/cmd/compile/internal/gc/gsubr.go @@ -31,6 +31,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/compile/internal/ssa" "cmd/internal/obj" "cmd/internal/objabi" @@ -57,8 +58,8 @@ type Progs struct { // worker indicates which of the backend workers will use the Progs. func newProgs(fn *Node, worker int) *Progs { pp := new(Progs) - if Ctxt.CanReuseProgs() { - sz := len(sharedProgArray) / Flag.LowerC + if base.Ctxt.CanReuseProgs() { + sz := len(sharedProgArray) / base.Flag.LowerC pp.progcache = sharedProgArray[sz*worker : sz*(worker+1)] } pp.curfn = fn @@ -83,19 +84,19 @@ func (pp *Progs) NewProg() *obj.Prog { } else { p = new(obj.Prog) } - p.Ctxt = Ctxt + p.Ctxt = base.Ctxt return p } // Flush converts from pp to machine code. func (pp *Progs) Flush() { plist := &obj.Plist{Firstpc: pp.Text, Curfn: pp.curfn} - obj.Flushplist(Ctxt, plist, pp.NewProg, Ctxt.Pkgpath) + obj.Flushplist(base.Ctxt, plist, pp.NewProg, base.Ctxt.Pkgpath) } // Free clears pp and any associated resources. func (pp *Progs) Free() { - if Ctxt.CanReuseProgs() { + if base.Ctxt.CanReuseProgs() { // Clear progs to enable GC and avoid abuse. s := pp.progcache[:pp.cacheidx] for i := range s { @@ -133,8 +134,8 @@ func (pp *Progs) Prog(as obj.As) *obj.Prog { pp.clearp(pp.next) p.Link = pp.next - if !pp.pos.IsKnown() && Flag.K != 0 { - Warn("prog: unknown position (line 0)") + if !pp.pos.IsKnown() && base.Flag.K != 0 { + base.Warn("prog: unknown position (line 0)") } p.As = as @@ -174,7 +175,7 @@ func (pp *Progs) Appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16 func (pp *Progs) settext(fn *Node) { if pp.Text != nil { - Fatalf("Progs.settext called twice") + base.Fatalf("Progs.settext called twice") } ptxt := pp.Prog(obj.ATEXT) pp.Text = ptxt @@ -193,7 +194,7 @@ func (pp *Progs) settext(fn *Node) { // called for both functions with bodies and functions without bodies. func (f *Func) initLSym(hasBody bool) { if f.lsym != nil { - Fatalf("Func.initLSym called twice") + base.Fatalf("Func.initLSym called twice") } if nam := f.Nname; !nam.isBlank() { @@ -215,7 +216,7 @@ func (f *Func) initLSym(hasBody bool) { // using the expected ABI. want := obj.ABIInternal if f.lsym.ABI() != want { - Fatalf("function symbol %s has the wrong ABI %v, expected %v", f.lsym.Name, f.lsym.ABI(), want) + base.Fatalf("function symbol %s has the wrong ABI %v, expected %v", f.lsym.Name, f.lsym.ABI(), want) } } @@ -249,7 +250,7 @@ func (f *Func) initLSym(hasBody bool) { } asym.SetABI(aliasABI) asym.Set(obj.AttrDuplicateOK, true) - Ctxt.ABIAliases = append(Ctxt.ABIAliases, asym) + base.Ctxt.ABIAliases = append(base.Ctxt.ABIAliases, asym) } } @@ -278,14 +279,14 @@ func (f *Func) initLSym(hasBody bool) { // Clumsy but important. // See test/recover.go for test cases and src/reflect/value.go // for the actual functions being considered. - if Ctxt.Pkgpath == "reflect" { + if base.Ctxt.Pkgpath == "reflect" { switch f.Nname.Sym.Name { case "callReflect", "callMethod": flag |= obj.WRAPPER } } - Ctxt.InitTextSym(f.lsym, flag) + base.Ctxt.InitTextSym(f.lsym, flag) } func ggloblnod(nam *Node) { @@ -298,7 +299,7 @@ func ggloblnod(nam *Node) { if nam.Type != nil && !nam.Type.HasPointers() { flags |= obj.NOPTR } - Ctxt.Globl(s, nam.Type.Width, flags) + base.Ctxt.Globl(s, nam.Type.Width, flags) if nam.Name.LibfuzzerExtraCounter() { s.Type = objabi.SLIBFUZZER_EXTRA_COUNTER } @@ -315,7 +316,7 @@ func ggloblsym(s *obj.LSym, width int32, flags int16) { s.Set(obj.AttrLocal, true) flags &^= obj.LOCAL } - Ctxt.Globl(s, int64(width), int(flags)) + base.Ctxt.Globl(s, int64(width), int(flags)) } func Addrconst(a *obj.Addr, v int64) { @@ -326,7 +327,7 @@ func Addrconst(a *obj.Addr, v int64) { func Patch(p *obj.Prog, to *obj.Prog) { if p.To.Type != obj.TYPE_BRANCH { - Fatalf("patch: not a branch") + base.Fatalf("patch: not a branch") } p.To.SetTarget(to) p.To.Offset = to.Pc diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index 447f938a0ac35..246a057ade132 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -204,6 +204,7 @@ package gc import ( "bufio" "bytes" + "cmd/compile/internal/base" "cmd/compile/internal/types" "cmd/internal/goobj" "cmd/internal/src" @@ -266,7 +267,7 @@ func iexport(out *bufio.Writer) { p.typIndex[pt] = uint64(i) } if len(p.typIndex) > predeclReserved { - Fatalf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved) + base.Fatalf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved) } // Initialize work queue with exported declarations. @@ -304,8 +305,8 @@ func iexport(out *bufio.Writer) { // Add fingerprint (used by linker object file). // Attach this to the end, so tools (e.g. gcimporter) don't care. - copy(Ctxt.Fingerprint[:], h.Sum(nil)[:]) - out.Write(Ctxt.Fingerprint[:]) + copy(base.Ctxt.Fingerprint[:], h.Sum(nil)[:]) + out.Write(base.Ctxt.Fingerprint[:]) } // writeIndex writes out an object index. mainIndex indicates whether @@ -394,7 +395,7 @@ func (p *iexporter) stringOff(s string) uint64 { // pushDecl adds n to the declaration work queue, if not already present. func (p *iexporter) pushDecl(n *Node) { if n.Sym == nil || asNode(n.Sym.Def) != n && n.Op != OTYPE { - Fatalf("weird Sym: %v, %v", n, n.Sym) + base.Fatalf("weird Sym: %v, %v", n, n.Sym) } // Don't export predeclared declarations. @@ -437,7 +438,7 @@ func (p *iexporter) doDecl(n *Node) { case PFUNC: if n.IsMethod() { - Fatalf("unexpected method: %v", n) + base.Fatalf("unexpected method: %v", n) } // Function. @@ -447,7 +448,7 @@ func (p *iexporter) doDecl(n *Node) { w.funcExt(n) default: - Fatalf("unexpected class: %v, %v", n, n.Class()) + base.Fatalf("unexpected class: %v, %v", n, n.Class()) } case OLITERAL: @@ -503,7 +504,7 @@ func (p *iexporter) doDecl(n *Node) { } default: - Fatalf("unexpected node: %v", n) + base.Fatalf("unexpected node: %v", n) } p.declIndex[n] = w.flush() @@ -523,7 +524,7 @@ func (p *iexporter) doInline(f *Node) { } func (w *exportWriter) pos(pos src.XPos) { - p := Ctxt.PosTable.Pos(pos) + p := base.Ctxt.PosTable.Pos(pos) file := p.Base().AbsFilename() line := int64(p.RelLine()) column := int64(p.RelCol()) @@ -579,7 +580,7 @@ func (w *exportWriter) qualifiedIdent(n *Node) { func (w *exportWriter) selector(s *types.Sym) { if w.currPkg == nil { - Fatalf("missing currPkg") + base.Fatalf("missing currPkg") } // Method selectors are rewritten into method symbols (of the @@ -594,7 +595,7 @@ func (w *exportWriter) selector(s *types.Sym) { pkg = localpkg } if s.Pkg != pkg { - Fatalf("package mismatch in selector: %v in package %q, but want %q", s, s.Pkg.Path, pkg.Path) + base.Fatalf("package mismatch in selector: %v in package %q, but want %q", s, s.Pkg.Path, pkg.Path) } } @@ -633,7 +634,7 @@ func (w *exportWriter) startType(k itag) { func (w *exportWriter) doTyp(t *types.Type) { if t.Sym != nil { if t.Sym.Pkg == builtinpkg || t.Sym.Pkg == unsafepkg { - Fatalf("builtin type missing from typIndex: %v", t) + base.Fatalf("builtin type missing from typIndex: %v", t) } w.startType(definedType) @@ -710,7 +711,7 @@ func (w *exportWriter) doTyp(t *types.Type) { } default: - Fatalf("unexpected type: %v", t) + base.Fatalf("unexpected type: %v", t) } } @@ -773,7 +774,7 @@ func constTypeOf(typ *types.Type) constant.Kind { return constant.Complex } - Fatalf("unexpected constant type: %v", typ) + base.Fatalf("unexpected constant type: %v", typ) return 0 } @@ -851,7 +852,7 @@ func (w *exportWriter) mpint(x constant.Value, typ *types.Type) { negative := constant.Sign(x) < 0 if !signed && negative { - Fatalf("negative unsigned integer; type %v, value %v", typ, x) + base.Fatalf("negative unsigned integer; type %v, value %v", typ, x) } b := constant.Bytes(x) // little endian @@ -860,10 +861,10 @@ func (w *exportWriter) mpint(x constant.Value, typ *types.Type) { } if len(b) > 0 && b[0] == 0 { - Fatalf("leading zeros") + base.Fatalf("leading zeros") } if uint(len(b)) > maxBytes { - Fatalf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x) + base.Fatalf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x) } maxSmall := 256 - maxBytes @@ -900,7 +901,7 @@ func (w *exportWriter) mpint(x constant.Value, typ *types.Type) { } } if n < maxSmall || n >= 256 { - Fatalf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n) + base.Fatalf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n) } w.data.WriteByte(byte(n)) @@ -916,7 +917,7 @@ func (w *exportWriter) mpint(x constant.Value, typ *types.Type) { func (w *exportWriter) mpfloat(v constant.Value, typ *types.Type) { f := bigFloatVal(v) if f.IsInf() { - Fatalf("infinite constant") + base.Fatalf("infinite constant") } // Break into f = mant × 2**exp, with 0.5 <= mant < 1. @@ -930,7 +931,7 @@ func (w *exportWriter) mpfloat(v constant.Value, typ *types.Type) { manti, acc := mant.Int(nil) if acc != big.Exact { - Fatalf("mantissa scaling failed for %f (%s)", f, acc) + base.Fatalf("mantissa scaling failed for %f (%s)", f, acc) } w.mpint(makeInt(manti), typ) if manti.Sign() != 0 { @@ -1158,7 +1159,7 @@ func (w *exportWriter) stmt(n *Node) { w.string(n.Sym.Name) default: - Fatalf("exporter: CANNOT EXPORT: %v\nPlease notify gri@\n", n.Op) + base.Fatalf("exporter: CANNOT EXPORT: %v\nPlease notify gri@\n", n.Op) } } @@ -1169,7 +1170,7 @@ func (w *exportWriter) caseList(sw *Node) { w.uint64(uint64(len(cases))) for _, cas := range cases { if cas.Op != OCASE { - Fatalf("expected OCASE, got %v", cas) + base.Fatalf("expected OCASE, got %v", cas) } w.pos(cas.Pos) w.stmtList(cas.List) @@ -1207,7 +1208,7 @@ func (w *exportWriter) expr(n *Node) { // (somewhat closely following the structure of exprfmt in fmt.go) case ONIL: if !n.Type.HasNil() { - Fatalf("unexpected type for nil: %v", n.Type) + base.Fatalf("unexpected type for nil: %v", n.Type) } if n.Orig != nil && n.Orig != n { w.expr(n.Orig) @@ -1256,7 +1257,7 @@ func (w *exportWriter) expr(n *Node) { var s *types.Sym if n.Left != nil { if n.Left.Op != ONONAME { - Fatalf("expected ONONAME, got %v", n.Left) + base.Fatalf("expected ONONAME, got %v", n.Left) } s = n.Left.Sym } @@ -1365,7 +1366,7 @@ func (w *exportWriter) expr(n *Node) { if op == OAPPEND { w.bool(n.IsDDD()) } else if n.IsDDD() { - Fatalf("exporter: unexpected '...' with %v call", op) + base.Fatalf("exporter: unexpected '...' with %v call", op) } case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER, OGETG: @@ -1419,7 +1420,7 @@ func (w *exportWriter) expr(n *Node) { // has already been replaced with literals default: - Fatalf("cannot export %v (%d) node\n"+ + base.Fatalf("cannot export %v (%d) node\n"+ "\t==> please file an issue and assign to gri@", n.Op, int(n.Op)) } } @@ -1484,18 +1485,18 @@ func (w *exportWriter) localIdent(s *types.Sym, v int32) { // TODO(mdempsky): Fix autotmp hack. if i := strings.LastIndex(name, "."); i >= 0 && !strings.HasPrefix(name, ".autotmp_") { - Fatalf("unexpected dot in identifier: %v", name) + base.Fatalf("unexpected dot in identifier: %v", name) } if v > 0 { if strings.Contains(name, "·") { - Fatalf("exporter: unexpected · in symbol name") + base.Fatalf("exporter: unexpected · in symbol name") } name = fmt.Sprintf("%s·%d", name, v) } if !types.IsExported(name) && s.Pkg != w.currPkg { - Fatalf("weird package in name: %v => %v, not %q", s, name, w.currPkg.Path) + base.Fatalf("weird package in name: %v => %v, not %q", s, name, w.currPkg.Path) } w.string(name) diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index a8a84b8cbc426..cc0209ed03d72 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -8,6 +8,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/compile/internal/types" "cmd/internal/bio" "cmd/internal/goobj" @@ -60,7 +61,7 @@ func expandInline(fn *Node) { r := importReaderFor(fn, inlineImporter) if r == nil { - Fatalf("missing import reader for %v", fn) + base.Fatalf("missing import reader for %v", fn) } r.doInline(fn) @@ -83,8 +84,8 @@ type intReader struct { func (r *intReader) int64() int64 { i, err := binary.ReadVarint(r.Reader) if err != nil { - yyerror("import %q: read error: %v", r.pkg.Path, err) - errorexit() + base.Errorf("import %q: read error: %v", r.pkg.Path, err) + base.ErrorExit() } return i } @@ -92,8 +93,8 @@ func (r *intReader) int64() int64 { func (r *intReader) uint64() uint64 { i, err := binary.ReadUvarint(r.Reader) if err != nil { - yyerror("import %q: read error: %v", r.pkg.Path, err) - errorexit() + base.Errorf("import %q: read error: %v", r.pkg.Path, err) + base.ErrorExit() } return i } @@ -103,8 +104,8 @@ func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType) version := ird.uint64() if version != iexportVersion { - yyerror("import %q: unknown export format version %d", pkg.Path, version) - errorexit() + base.Errorf("import %q: unknown export format version %d", pkg.Path, version) + base.ErrorExit() } sLen := ird.uint64() @@ -115,8 +116,8 @@ func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType) // returning individual substrings very efficiently. data, err := mapFile(in.File(), in.Offset(), int64(sLen+dLen)) if err != nil { - yyerror("import %q: mapping input: %v", pkg.Path, err) - errorexit() + base.Errorf("import %q: mapping input: %v", pkg.Path, err) + base.ErrorExit() } stringData := data[:sLen] declData := data[sLen:] @@ -152,10 +153,10 @@ func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType) pkg.Lookup("_").Def = asTypesNode(nblank) } else { if pkg.Name != pkgName { - Fatalf("conflicting package names %v and %v for path %q", pkg.Name, pkgName, pkg.Path) + base.Fatalf("conflicting package names %v and %v for path %q", pkg.Name, pkgName, pkg.Path) } if pkg.Height != pkgHeight { - Fatalf("conflicting package heights %v and %v for path %q", pkg.Height, pkgHeight, pkg.Path) + base.Fatalf("conflicting package heights %v and %v for path %q", pkg.Height, pkgHeight, pkg.Path) } } @@ -171,7 +172,7 @@ func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType) // Create stub declaration. If used, this will // be overwritten by expandDecl. if s.Def != nil { - Fatalf("unexpected definition for %v: %v", s, asNode(s.Def)) + base.Fatalf("unexpected definition for %v: %v", s, asNode(s.Def)) } s.Def = asTypesNode(npos(src.NoXPos, dclname(s))) } @@ -195,8 +196,8 @@ func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType) // Fingerprint. _, err = io.ReadFull(in, fingerprint[:]) if err != nil { - yyerror("import %s: error reading fingerprint", pkg.Path) - errorexit() + base.Errorf("import %s: error reading fingerprint", pkg.Path) + base.ErrorExit() } return fingerprint } @@ -218,7 +219,7 @@ func (p *iimporter) stringAt(off uint64) string { slen, n := binary.Uvarint(x[:n]) if n <= 0 { - Fatalf("varint failed") + base.Fatalf("varint failed") } spos := off + uint64(n) return p.stringData[spos : spos+slen] @@ -281,7 +282,7 @@ func (r *importReader) setPkg() { func (r *importReader) doDecl(n *Node) { if n.Op != ONONAME { - Fatalf("doDecl: unexpected Op for %v: %v", n.Sym, n.Op) + base.Fatalf("doDecl: unexpected Op for %v: %v", n.Sym, n.Op) } tag := r.byte() @@ -352,7 +353,7 @@ func (r *importReader) doDecl(n *Node) { r.varExt(n) default: - Fatalf("unexpected tag: %v", tag) + base.Fatalf("unexpected tag: %v", tag) } } @@ -372,7 +373,7 @@ func (p *importReader) value(typ *types.Type) constant.Value { return makeComplex(p.float(typ), p.float(typ)) } - Fatalf("unexpected value type: %v", typ) + base.Fatalf("unexpected value type: %v", typ) panic("unreachable") } @@ -405,7 +406,7 @@ func (p *importReader) mpint(x *big.Int, typ *types.Type) { v = -(n &^ 1) >> 1 } if v < 1 || uint(v) > maxBytes { - Fatalf("weird decoding: %v, %v => %v", n, signed, v) + base.Fatalf("weird decoding: %v, %v => %v", n, signed, v) } b := make([]byte, v) p.Read(b) @@ -462,10 +463,10 @@ func (r *importReader) pos() src.XPos { } if r.prevBase == nil { - Fatalf("missing posbase") + base.Fatalf("missing posbase") } pos := src.MakePos(r.prevBase, uint(r.prevLine), uint(r.prevColumn)) - return Ctxt.PosTable.XPos(pos) + return base.Ctxt.PosTable.XPos(pos) } func (r *importReader) typ() *types.Type { @@ -476,7 +477,7 @@ func (p *iimporter) typAt(off uint64) *types.Type { t, ok := p.typCache[off] if !ok { if off < predeclReserved { - Fatalf("predeclared type missing from cache: %d", off) + base.Fatalf("predeclared type missing from cache: %d", off) } t = p.newReader(off-predeclReserved, nil).typ1() p.typCache[off] = t @@ -487,7 +488,7 @@ func (p *iimporter) typAt(off uint64) *types.Type { func (r *importReader) typ1() *types.Type { switch k := r.kind(); k { default: - Fatalf("unexpected kind tag in %q: %v", r.p.ipkg.Path, k) + base.Fatalf("unexpected kind tag in %q: %v", r.p.ipkg.Path, k) return nil case definedType: @@ -502,7 +503,7 @@ func (r *importReader) typ1() *types.Type { expandDecl(n) } if n.Op != OTYPE { - Fatalf("expected OTYPE, got %v: %v, %v", n.Op, n.Sym, n) + base.Fatalf("expected OTYPE, got %v: %v, %v", n.Op, n.Sym, n) } return n.Type case pointerType: @@ -610,7 +611,7 @@ func (r *importReader) bool() bool { func (r *importReader) int64() int64 { n, err := binary.ReadVarint(r) if err != nil { - Fatalf("readVarint: %v", err) + base.Fatalf("readVarint: %v", err) } return n } @@ -618,7 +619,7 @@ func (r *importReader) int64() int64 { func (r *importReader) uint64() uint64 { n, err := binary.ReadUvarint(r) if err != nil { - Fatalf("readVarint: %v", err) + base.Fatalf("readVarint: %v", err) } return n } @@ -626,7 +627,7 @@ func (r *importReader) uint64() uint64 { func (r *importReader) byte() byte { x, err := r.ReadByte() if err != nil { - Fatalf("declReader.ReadByte: %v", err) + base.Fatalf("declReader.ReadByte: %v", err) } return x } @@ -674,7 +675,7 @@ func (r *importReader) symIdx(s *types.Sym) { idx := int32(r.int64()) if idx != -1 { if s.Linkname != "" { - Fatalf("bad index for linknamed symbol: %v %d\n", lsym, idx) + base.Fatalf("bad index for linknamed symbol: %v %d\n", lsym, idx) } lsym.SymIdx = idx lsym.Set(obj.AttrIndexed, true) @@ -695,7 +696,7 @@ var typeSymIdx = make(map[*types.Type][2]int64) func (r *importReader) doInline(n *Node) { if len(n.Func.Inl.Body) != 0 { - Fatalf("%v already has inline body", n) + base.Fatalf("%v already has inline body", n) } funchdr(n) @@ -714,8 +715,8 @@ func (r *importReader) doInline(n *Node) { importlist = append(importlist, n) - if Flag.E > 0 && Flag.LowerM > 2 { - if Flag.LowerM > 3 { + if base.Flag.E > 0 && base.Flag.LowerM > 2 { + if base.Flag.LowerM > 3 { fmt.Printf("inl body for %v %#v: %+v\n", n, n.Type, asNodes(n.Func.Inl.Body)) } else { fmt.Printf("inl body for %v %#v: %v\n", n, n.Type, asNodes(n.Func.Inl.Body)) @@ -793,7 +794,7 @@ func (r *importReader) exprList() []*Node { func (r *importReader) expr() *Node { n := r.node() if n != nil && n.Op == OBLOCK { - Fatalf("unexpected block node: %v", n) + base.Fatalf("unexpected block node: %v", n) } return n } @@ -854,11 +855,11 @@ func (r *importReader) node() *Node { case OSTRUCTLIT: // TODO(mdempsky): Export position information for OSTRUCTKEY nodes. - savedlineno := lineno - lineno = r.pos() - n := nodl(lineno, OCOMPLIT, nil, typenod(r.typ())) + savedlineno := base.Pos + base.Pos = r.pos() + n := nodl(base.Pos, OCOMPLIT, nil, typenod(r.typ())) n.List.Set(r.elemList()) // special handling of field names - lineno = savedlineno + base.Pos = savedlineno return n // case OARRAYLIT, OSLICELIT, OMAPLIT: @@ -1070,7 +1071,7 @@ func (r *importReader) node() *Node { return nil default: - Fatalf("cannot import %v (%d) node\n"+ + base.Fatalf("cannot import %v (%d) node\n"+ "\t==> please file an issue and assign to gri@", op, int(op)) panic("unreachable") // satisfy compiler } diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go index c3b66a2ad2aae..9319faf6a015d 100644 --- a/src/cmd/compile/internal/gc/init.go +++ b/src/cmd/compile/internal/gc/init.go @@ -5,6 +5,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/compile/internal/types" "cmd/internal/obj" ) @@ -44,7 +45,7 @@ func fninit(n []*Node) { // Make a function that contains all the initialization statements. if len(nf) > 0 { - lineno = nf[0].Pos // prolog/epilog gets line number of first init stmt + base.Pos = nf[0].Pos // prolog/epilog gets line number of first init stmt initializers := lookup("init") fn := dclfunc(initializers, nod(OTFUNC, nil, nil)) for _, dcl := range initTodo.Func.Dcl { @@ -67,7 +68,7 @@ func fninit(n []*Node) { // We only generate temps using initTodo if there // are package-scope initialization statements, so // something's weird if we get here. - Fatalf("initTodo still has declarations") + base.Fatalf("initTodo still has declarations") } initTodo = nil diff --git a/src/cmd/compile/internal/gc/initorder.go b/src/cmd/compile/internal/gc/initorder.go index ecbfc5631a3e6..f553a3f057b60 100644 --- a/src/cmd/compile/internal/gc/initorder.go +++ b/src/cmd/compile/internal/gc/initorder.go @@ -8,6 +8,8 @@ import ( "bytes" "container/heap" "fmt" + + "cmd/compile/internal/base" ) // Package initialization @@ -89,7 +91,7 @@ func initOrder(l []*Node) []*Node { case ODCLCONST, ODCLFUNC, ODCLTYPE: // nop default: - Fatalf("unexpected package-level statement: %v", n) + base.Fatalf("unexpected package-level statement: %v", n) } } @@ -104,10 +106,10 @@ func initOrder(l []*Node) []*Node { // confused us and there might not be // a loop. Let the user fix those // first. - ExitIfErrors() + base.ExitIfErrors() findInitLoopAndExit(firstLHS(n), new([]*Node)) - Fatalf("initialization unfinished, but failed to identify loop") + base.Fatalf("initialization unfinished, but failed to identify loop") } } } @@ -115,7 +117,7 @@ func initOrder(l []*Node) []*Node { // Invariant consistency check. If this is non-zero, then we // should have found a cycle above. if len(o.blocking) != 0 { - Fatalf("expected empty map: %v", o.blocking) + base.Fatalf("expected empty map: %v", o.blocking) } return s.out @@ -123,7 +125,7 @@ func initOrder(l []*Node) []*Node { func (o *InitOrder) processAssign(n *Node) { if n.Initorder() != InitNotStarted || n.Xoffset != BADWIDTH { - Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Xoffset) + base.Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Xoffset) } n.SetInitorder(InitPending) @@ -154,7 +156,7 @@ func (o *InitOrder) flushReady(initialize func(*Node)) { for o.ready.Len() != 0 { n := heap.Pop(&o.ready).(*Node) if n.Initorder() != InitPending || n.Xoffset != 0 { - Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Xoffset) + base.Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Xoffset) } initialize(n) @@ -238,8 +240,8 @@ func reportInitLoopAndExit(l []*Node) { } fmt.Fprintf(&msg, "\t%v: %v", l[0].Line(), l[0]) - yyerrorl(l[0].Pos, msg.String()) - errorexit() + base.ErrorfAt(l[0].Pos, msg.String()) + base.ErrorExit() } // collectDeps returns all of the package-level functions and @@ -256,7 +258,7 @@ func collectDeps(n *Node, transitive bool) NodeSet { case ODCLFUNC: d.inspectList(n.Nbody) default: - Fatalf("unexpected Op: %v", n.Op) + base.Fatalf("unexpected Op: %v", n.Op) } return d.seen } @@ -347,6 +349,6 @@ func firstLHS(n *Node) *Node { return n.List.First() } - Fatalf("unexpected Op: %v", n.Op) + base.Fatalf("unexpected Op: %v", n.Op) return nil } diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index fc467dd95aaf7..d71ea9b5ed407 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -27,6 +27,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/compile/internal/logopt" "cmd/compile/internal/types" "cmd/internal/obj" @@ -60,7 +61,7 @@ func fnpkg(fn *Node) *types.Pkg { rcvr = rcvr.Elem() } if rcvr.Sym == nil { - Fatalf("receiver with no sym: [%v] %L (%v)", fn.Sym, fn, rcvr) + base.Fatalf("receiver with no sym: [%v] %L (%v)", fn.Sym, fn, rcvr) } return rcvr.Sym.Pkg } @@ -86,7 +87,7 @@ func typecheckinl(fn *Node) { return // typecheckinl on local function } - if Flag.LowerM > 2 || Debug.Export != 0 { + if base.Flag.LowerM > 2 || base.Debug.Export != 0 { fmt.Printf("typecheck import [%v] %L { %#v }\n", fn.Sym, fn, asNodes(fn.Func.Inl.Body)) } @@ -103,7 +104,7 @@ func typecheckinl(fn *Node) { fn.Func.Inl.Dcl = append(fn.Func.Inl.Dcl, fn.Func.Dcl...) fn.Func.Dcl = nil - lineno = lno + base.Pos = lno } // Caninl determines whether fn is inlineable. @@ -111,17 +112,17 @@ func typecheckinl(fn *Node) { // fn and ->nbody will already have been typechecked. func caninl(fn *Node) { if fn.Op != ODCLFUNC { - Fatalf("caninl %v", fn) + base.Fatalf("caninl %v", fn) } if fn.Func.Nname == nil { - Fatalf("caninl no nname %+v", fn) + base.Fatalf("caninl no nname %+v", fn) } var reason string // reason, if any, that the function was not inlined - if Flag.LowerM > 1 || logopt.Enabled() { + if base.Flag.LowerM > 1 || logopt.Enabled() { defer func() { if reason != "" { - if Flag.LowerM > 1 { + if base.Flag.LowerM > 1 { fmt.Printf("%v: cannot inline %v: %s\n", fn.Line(), fn.Func.Nname, reason) } if logopt.Enabled() { @@ -138,13 +139,13 @@ func caninl(fn *Node) { } // If marked "go:norace" and -race compilation, don't inline. - if Flag.Race && fn.Func.Pragma&Norace != 0 { + if base.Flag.Race && fn.Func.Pragma&Norace != 0 { reason = "marked go:norace with -race compilation" return } // If marked "go:nocheckptr" and -d checkptr compilation, don't inline. - if Debug.Checkptr != 0 && fn.Func.Pragma&NoCheckPtr != 0 { + if base.Debug.Checkptr != 0 && fn.Func.Pragma&NoCheckPtr != 0 { reason = "marked go:nocheckptr" return } @@ -179,7 +180,7 @@ func caninl(fn *Node) { } if fn.Typecheck() == 0 { - Fatalf("caninl on non-typechecked function %v", fn) + base.Fatalf("caninl on non-typechecked function %v", fn) } n := fn.Func.Nname @@ -189,7 +190,7 @@ func caninl(fn *Node) { defer n.Func.SetInlinabilityChecked(true) cc := int32(inlineExtraCallCost) - if Flag.LowerL == 4 { + if base.Flag.LowerL == 4 { cc = 1 // this appears to yield better performance than 0. } @@ -222,9 +223,9 @@ func caninl(fn *Node) { Body: inlcopylist(fn.Nbody.Slice()), } - if Flag.LowerM > 1 { + if base.Flag.LowerM > 1 { fmt.Printf("%v: can inline %#v with cost %d as: %#v { %#v }\n", fn.Line(), n, inlineMaxBudget-visitor.budget, fn.Type, asNodes(n.Func.Inl.Body)) - } else if Flag.LowerM != 0 { + } else if base.Flag.LowerM != 0 { fmt.Printf("%v: can inline %v\n", fn.Line(), n) } if logopt.Enabled() { @@ -239,10 +240,10 @@ func inlFlood(n *Node) { return } if n.Op != ONAME || n.Class() != PFUNC { - Fatalf("inlFlood: unexpected %v, %v, %v", n, n.Op, n.Class()) + base.Fatalf("inlFlood: unexpected %v, %v, %v", n, n.Op, n.Class()) } if n.Func == nil { - Fatalf("inlFlood: missing Func on %v", n) + base.Fatalf("inlFlood: missing Func on %v", n) } if n.Func.Inl == nil { return @@ -286,7 +287,7 @@ func inlFlood(n *Node) { // // When we do, we'll probably want: // inlFlood(n.Func.Closure.Func.Nname) - Fatalf("unexpected closure in inlinable function") + base.Fatalf("unexpected closure in inlinable function") } return true }) @@ -352,7 +353,7 @@ func (v *hairyVisitor) visit(n *Node) bool { case OCALLMETH: t := n.Left.Type if t == nil { - Fatalf("no function type for [%p] %+v\n", n.Left, n.Left) + base.Fatalf("no function type for [%p] %+v\n", n.Left, n.Left) } if isRuntimePkg(n.Left.Sym.Pkg) { fn := n.Left.Sym.Name @@ -413,7 +414,7 @@ func (v *hairyVisitor) visit(n *Node) bool { case OBREAK, OCONTINUE: if n.Sym != nil { // Should have short-circuited due to labeledControl above. - Fatalf("unexpected labeled break/continue: %v", n) + base.Fatalf("unexpected labeled break/continue: %v", n) } case OIF: @@ -433,7 +434,7 @@ func (v *hairyVisitor) visit(n *Node) bool { v.budget-- // When debugging, don't stop early, to get full cost of inlining this function - if v.budget < 0 && Flag.LowerM < 2 && !logopt.Enabled() { + if v.budget < 0 && base.Flag.LowerM < 2 && !logopt.Enabled() { return true } @@ -465,7 +466,7 @@ func inlcopy(n *Node) *Node { m := n.copy() if n.Op != OCALLPART && m.Func != nil { - Fatalf("unexpected Func: %v", m) + base.Fatalf("unexpected Func: %v", m) } m.Left = inlcopy(n.Left) m.Right = inlcopy(n.Right) @@ -517,7 +518,7 @@ func inlcalls(fn *Node) { inlMap := make(map[*Node]bool) fn = inlnode(fn, maxCost, inlMap) if fn != Curfn { - Fatalf("inlnode replaced curfn") + base.Fatalf("inlnode replaced curfn") } Curfn = savefn } @@ -548,7 +549,7 @@ func inlconv2expr(n *Node) *Node { // statements. func inlconv2list(n *Node) []*Node { if n.Op != OINLCALL || n.Rlist.Len() == 0 { - Fatalf("inlconv2list %+v\n", n) + base.Fatalf("inlconv2list %+v\n", n) } s := n.Rlist.Slice() @@ -595,7 +596,7 @@ func inlnode(n *Node, maxCost int32, inlMap map[*Node]bool) *Node { case OCALLMETH: // Prevent inlining some reflect.Value methods when using checkptr, // even when package reflect was compiled without it (#35073). - if s := n.Left.Sym; Debug.Checkptr != 0 && isReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") { + if s := n.Left.Sym; base.Debug.Checkptr != 0 && isReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") { return n } } @@ -676,7 +677,7 @@ func inlnode(n *Node, maxCost int32, inlMap map[*Node]bool) *Node { switch n.Op { case OCALLFUNC: - if Flag.LowerM > 3 { + if base.Flag.LowerM > 3 { fmt.Printf("%v:call to func %+v\n", n.Line(), n.Left) } if isIntrinsicCall(n) { @@ -687,19 +688,19 @@ func inlnode(n *Node, maxCost int32, inlMap map[*Node]bool) *Node { } case OCALLMETH: - if Flag.LowerM > 3 { + if base.Flag.LowerM > 3 { fmt.Printf("%v:call to meth %L\n", n.Line(), n.Left.Right) } // typecheck should have resolved ODOTMETH->type, whose nname points to the actual function. if n.Left.Type == nil { - Fatalf("no function type for [%p] %+v\n", n.Left, n.Left) + base.Fatalf("no function type for [%p] %+v\n", n.Left, n.Left) } n = mkinlcall(n, n.Left.MethodName(), maxCost, inlMap) } - lineno = lno + base.Pos = lno return n } @@ -767,12 +768,12 @@ FindRHS: break FindRHS } } - Fatalf("%v missing from LHS of %v", n, defn) + base.Fatalf("%v missing from LHS of %v", n, defn) default: return nil } if rhs == nil { - Fatalf("RHS is nil: %v", defn) + base.Fatalf("RHS is nil: %v", defn) } unsafe, _ := reassigned(n) @@ -791,7 +792,7 @@ FindRHS: // TODO: handle initial declaration not including an assignment and followed by a single assignment? func reassigned(n *Node) (bool, *Node) { if n.Op != ONAME { - Fatalf("reassigned %v", n) + base.Fatalf("reassigned %v", n) } // no way to reliably check for no-reassignment of globals, assume it can be if n.Name.Curfn == nil { @@ -869,7 +870,7 @@ func inlParam(t *types.Field, as *Node, inlvars map[*Node]*Node) *Node { inlvar := inlvars[n] if inlvar == nil { - Fatalf("missing inlvar for %v", n) + base.Fatalf("missing inlvar for %v", n) } as.Ninit.Append(nod(ODCL, inlvar, nil)) inlvar.Name.Defn = as @@ -922,7 +923,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { } if inlMap[fn] { - if Flag.LowerM > 1 { + if base.Flag.LowerM > 1 { fmt.Printf("%v: cannot inline %v into %v: repeated recursive cycle\n", n.Line(), fn, Curfn.funcname()) } return n @@ -931,17 +932,17 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { defer func() { inlMap[fn] = false }() - if Debug.TypecheckInl == 0 { + if base.Debug.TypecheckInl == 0 { typecheckinl(fn) } // We have a function node, and it has an inlineable body. - if Flag.LowerM > 1 { + if base.Flag.LowerM > 1 { fmt.Printf("%v: inlining call to %v %#v { %#v }\n", n.Line(), fn.Sym, fn.Type, asNodes(fn.Func.Inl.Body)) - } else if Flag.LowerM != 0 { + } else if base.Flag.LowerM != 0 { fmt.Printf("%v: inlining call to %v\n", n.Line(), fn) } - if Flag.LowerM > 2 { + if base.Flag.LowerM > 2 { fmt.Printf("%v: Before inlining: %+v\n", n.Line(), n) } @@ -962,7 +963,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { callee = callee.Left } if callee.Op != ONAME && callee.Op != OCLOSURE && callee.Op != OMETHEXPR { - Fatalf("unexpected callee expression: %v", callee) + base.Fatalf("unexpected callee expression: %v", callee) } } @@ -986,7 +987,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { // the reassigned check via some sort of copy propagation this would most // likely need to be changed to a loop to walk up to the correct Param if o == nil || (o.Name.Curfn != Curfn && o.Name.Curfn.Func.OClosure != Curfn) { - Fatalf("%v: unresolvable capture %v %v\n", n.Line(), fn, v) + base.Fatalf("%v: unresolvable capture %v %v\n", n.Line(), fn, v) } if v.Name.Byval() { @@ -1022,11 +1023,11 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { // this never actually happens. We currently // perform inlining before escape analysis, so // nothing should have moved to the heap yet. - Fatalf("impossible: %v", ln) + base.Fatalf("impossible: %v", ln) } inlf := typecheck(inlvar(ln), ctxExpr) inlvars[ln] = inlf - if Flag.GenDwarfInl > 0 { + if base.Flag.GenDwarfInl > 0 { if ln.Class() == PPARAM { inlf.Name.SetInlFormal(true) } else { @@ -1064,7 +1065,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { m = retvar(t, i) } - if Flag.GenDwarfInl > 0 { + if base.Flag.GenDwarfInl > 0 { // Don't update the src.Pos on a return variable if it // was manufactured by the inliner (e.g. "~R2"); such vars // were not part of the original callee. @@ -1083,7 +1084,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { as.SetColas(true) if n.Op == OCALLMETH { if n.Left.Left == nil { - Fatalf("method call without receiver: %+v", n) + base.Fatalf("method call without receiver: %+v", n) } as.Rlist.Append(n.Left.Left) } @@ -1150,10 +1151,10 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { inlgen++ parent := -1 - if b := Ctxt.PosTable.Pos(n.Pos).Base(); b != nil { + if b := base.Ctxt.PosTable.Pos(n.Pos).Base(); b != nil { parent = b.InliningIndex() } - newIndex := Ctxt.InlTree.Add(parent, n.Pos, fn.Sym.Linksym()) + newIndex := base.Ctxt.InlTree.Add(parent, n.Pos, fn.Sym.Linksym()) // Add an inline mark just before the inlined body. // This mark is inline in the code so that it's a reasonable spot @@ -1165,9 +1166,9 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { inlMark.Xoffset = int64(newIndex) ninit.Append(inlMark) - if Flag.GenDwarfInl > 0 { + if base.Flag.GenDwarfInl > 0 { if !fn.Sym.Linksym().WasInlined() { - Ctxt.DwFixups.SetPrecursorFunc(fn.Sym.Linksym(), fn) + base.Ctxt.DwFixups.SetPrecursorFunc(fn.Sym.Linksym(), fn) fn.Sym.Linksym().Set(obj.AttrWasInlined, true) } } @@ -1188,7 +1189,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { typecheckslice(body, ctxStmt) - if Flag.GenDwarfInl > 0 { + if base.Flag.GenDwarfInl > 0 { for _, v := range inlfvars { v.Pos = subst.updatedPos(v.Pos) } @@ -1216,7 +1217,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { } } - if Flag.LowerM > 2 { + if base.Flag.LowerM > 2 { fmt.Printf("%v: After inlining %+v\n\n", call.Line(), call) } @@ -1227,7 +1228,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { // PAUTO's in the calling functions, and link them off of the // PPARAM's, PAUTOS and PPARAMOUTs of the called function. func inlvar(var_ *Node) *Node { - if Flag.LowerM > 3 { + if base.Flag.LowerM > 3 { fmt.Printf("inlvar %+v\n", var_) } @@ -1310,13 +1311,13 @@ func (subst *inlsubst) node(n *Node) *Node { switch n.Op { case ONAME: if inlvar := subst.inlvars[n]; inlvar != nil { // These will be set during inlnode - if Flag.LowerM > 2 { + if base.Flag.LowerM > 2 { fmt.Printf("substituting name %+v -> %+v\n", n, inlvar) } return inlvar } - if Flag.LowerM > 2 { + if base.Flag.LowerM > 2 { fmt.Printf("not substituting name %+v\n", n) } return n @@ -1382,7 +1383,7 @@ func (subst *inlsubst) node(n *Node) *Node { m.Ninit.Set(nil) if n.Op == OCLOSURE { - Fatalf("cannot inline function containing closure: %+v", n) + base.Fatalf("cannot inline function containing closure: %+v", n) } m.Left = subst.node(n.Left) @@ -1396,7 +1397,7 @@ func (subst *inlsubst) node(n *Node) *Node { } func (subst *inlsubst) updatedPos(xpos src.XPos) src.XPos { - pos := Ctxt.PosTable.Pos(xpos) + pos := base.Ctxt.PosTable.Pos(xpos) oldbase := pos.Base() // can be nil newbase := subst.bases[oldbase] if newbase == nil { @@ -1404,7 +1405,7 @@ func (subst *inlsubst) updatedPos(xpos src.XPos) src.XPos { subst.bases[oldbase] = newbase } pos.SetBase(newbase) - return Ctxt.PosTable.XPos(pos) + return base.Ctxt.PosTable.XPos(pos) } func pruneUnusedAutos(ll []*Node, vis *hairyVisitor) []*Node { @@ -1449,22 +1450,22 @@ func devirtualizeCall(call *Node) { x = typecheck(x, ctxExpr|ctxCallee) switch x.Op { case ODOTMETH: - if Flag.LowerM != 0 { - Warnl(call.Pos, "devirtualizing %v to %v", call.Left, typ) + if base.Flag.LowerM != 0 { + base.WarnfAt(call.Pos, "devirtualizing %v to %v", call.Left, typ) } call.Op = OCALLMETH call.Left = x case ODOTINTER: // Promoted method from embedded interface-typed field (#42279). - if Flag.LowerM != 0 { - Warnl(call.Pos, "partially devirtualizing %v to %v", call.Left, typ) + if base.Flag.LowerM != 0 { + base.WarnfAt(call.Pos, "partially devirtualizing %v to %v", call.Left, typ) } call.Op = OCALLINTER call.Left = x default: // TODO(mdempsky): Turn back into Fatalf after more testing. - if Flag.LowerM != 0 { - Warnl(call.Pos, "failed to devirtualize %v (%v)", x, x.Op) + if base.Flag.LowerM != 0 { + base.WarnfAt(call.Pos, "failed to devirtualize %v (%v)", x, x.Op) } return } diff --git a/src/cmd/compile/internal/gc/lex.go b/src/cmd/compile/internal/gc/lex.go index f01891f3652a1..30ef4d0eb2035 100644 --- a/src/cmd/compile/internal/gc/lex.go +++ b/src/cmd/compile/internal/gc/lex.go @@ -5,6 +5,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/compile/internal/syntax" "cmd/internal/objabi" "cmd/internal/src" @@ -13,7 +14,7 @@ import ( ) func makePos(b *src.PosBase, line, col uint) src.XPos { - return Ctxt.PosTable.XPos(src.MakePos(b, line, col)) + return base.Ctxt.PosTable.XPos(src.MakePos(b, line, col)) } func isSpace(c rune) bool { diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 2794ba369454b..c66139027a61c 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -9,6 +9,7 @@ package gc import ( "bufio" "bytes" + "cmd/compile/internal/base" "cmd/compile/internal/logopt" "cmd/compile/internal/ssa" "cmd/compile/internal/types" @@ -35,13 +36,13 @@ import ( ) func hidePanic() { - if Debug.Panic == 0 && Errors() > 0 { + if base.Debug.Panic == 0 && base.Errors() > 0 { // If we've already complained about things // in the program, don't bother complaining // about a panic too; let the user clean up // the code and try again. if err := recover(); err != nil { - errorexit() + base.ErrorExit() } } } @@ -61,16 +62,16 @@ func Main(archInit func(*Arch)) { archInit(&thearch) - Ctxt = obj.Linknew(thearch.LinkArch) - Ctxt.DiagFunc = yyerror - Ctxt.DiagFlush = flusherrors - Ctxt.Bso = bufio.NewWriter(os.Stdout) + base.Ctxt = obj.Linknew(thearch.LinkArch) + base.Ctxt.DiagFunc = base.Errorf + base.Ctxt.DiagFlush = base.FlushErrors + base.Ctxt.Bso = bufio.NewWriter(os.Stdout) // UseBASEntries is preferred because it shaves about 2% off build time, but LLDB, dsymutil, and dwarfdump // on Darwin don't support it properly, especially since macOS 10.14 (Mojave). This is exposed as a flag // to allow testing with LLVM tools on Linux, and to help with reporting this bug to the LLVM project. // See bugs 31188 and 21945 (CLs 170638, 98075, 72371). - Ctxt.UseBASEntries = Ctxt.Headtype != objabi.Hdarwin + base.Ctxt.UseBASEntries = base.Ctxt.Headtype != objabi.Hdarwin localpkg = types.NewPkg("", "") localpkg.Prefix = "\"\"" @@ -112,15 +113,15 @@ func Main(archInit func(*Arch)) { // pseudo-package used for methods with anonymous receivers gopkg = types.NewPkg("go", "") - DebugSSA = ssa.PhaseOption - ParseFlags() + base.DebugSSA = ssa.PhaseOption + base.ParseFlags() // Record flags that affect the build result. (And don't // record flags that don't, since that would cause spurious // changes in the binary.) recordFlags("B", "N", "l", "msan", "race", "shared", "dynlink", "dwarflocationlists", "dwarfbasentries", "smallframes", "spectre") - if !enableTrace && Flag.LowerT { + if !enableTrace && base.Flag.LowerT { log.Fatalf("compiler not built with support for -t") } @@ -128,59 +129,59 @@ func Main(archInit func(*Arch)) { // default: inlining on. (Flag.LowerL == 1) // -l: inlining off (Flag.LowerL == 0) // -l=2, -l=3: inlining on again, with extra debugging (Flag.LowerL > 1) - if Flag.LowerL <= 1 { - Flag.LowerL = 1 - Flag.LowerL + if base.Flag.LowerL <= 1 { + base.Flag.LowerL = 1 - base.Flag.LowerL } - if Flag.SmallFrames { + if base.Flag.SmallFrames { maxStackVarSize = 128 * 1024 maxImplicitStackVarSize = 16 * 1024 } - if Flag.Dwarf { - Ctxt.DebugInfo = debuginfo - Ctxt.GenAbstractFunc = genAbstractFunc - Ctxt.DwFixups = obj.NewDwarfFixupTable(Ctxt) + if base.Flag.Dwarf { + base.Ctxt.DebugInfo = debuginfo + base.Ctxt.GenAbstractFunc = genAbstractFunc + base.Ctxt.DwFixups = obj.NewDwarfFixupTable(base.Ctxt) } else { // turn off inline generation if no dwarf at all - Flag.GenDwarfInl = 0 - Ctxt.Flag_locationlists = false + base.Flag.GenDwarfInl = 0 + base.Ctxt.Flag_locationlists = false } - if Ctxt.Flag_locationlists && len(Ctxt.Arch.DWARFRegisters) == 0 { - log.Fatalf("location lists requested but register mapping not available on %v", Ctxt.Arch.Name) + if base.Ctxt.Flag_locationlists && len(base.Ctxt.Arch.DWARFRegisters) == 0 { + log.Fatalf("location lists requested but register mapping not available on %v", base.Ctxt.Arch.Name) } checkLang() - if Flag.SymABIs != "" { - readSymABIs(Flag.SymABIs, Ctxt.Pkgpath) + if base.Flag.SymABIs != "" { + readSymABIs(base.Flag.SymABIs, base.Ctxt.Pkgpath) } if ispkgin(omit_pkgs) { - Flag.Race = false - Flag.MSan = false + base.Flag.Race = false + base.Flag.MSan = false } - thearch.LinkArch.Init(Ctxt) + thearch.LinkArch.Init(base.Ctxt) startProfile() - if Flag.Race { + if base.Flag.Race { racepkg = types.NewPkg("runtime/race", "") } - if Flag.MSan { + if base.Flag.MSan { msanpkg = types.NewPkg("runtime/msan", "") } - if Flag.Race || Flag.MSan { + if base.Flag.Race || base.Flag.MSan { instrumenting = true } - if Flag.Dwarf { - dwarf.EnableLogging(Debug.DwarfInl != 0) + if base.Flag.Dwarf { + dwarf.EnableLogging(base.Debug.DwarfInl != 0) } - if Debug.SoftFloat != 0 { + if base.Debug.SoftFloat != 0 { thearch.SoftFloat = true } - if Flag.JSON != "" { // parse version,destination from json logging optimization. - logopt.LogJsonOption(Flag.JSON) + if base.Flag.JSON != "" { // parse version,destination from json logging optimization. + logopt.LogJsonOption(base.Flag.JSON) } ssaDump = os.Getenv("GOSSAFUNC") @@ -197,7 +198,7 @@ func Main(archInit func(*Arch)) { } } - trackScopes = Flag.Dwarf + trackScopes = base.Flag.Dwarf Widthptr = thearch.LinkArch.PtrSize Widthreg = thearch.LinkArch.RegSize @@ -207,7 +208,7 @@ func Main(archInit func(*Arch)) { // would lead to import cycles) types.Widthptr = Widthptr types.Dowidth = dowidth - types.Fatalf = Fatalf + types.Fatalf = base.Fatalf types.Sconv = func(s *types.Sym, flag, mode int) string { return sconv(s, FmtFlag(flag), fmtMode(mode)) } @@ -226,7 +227,7 @@ func Main(archInit func(*Arch)) { types.FmtLeft = int(FmtLeft) types.FmtUnsigned = int(FmtUnsigned) types.FErr = int(FErr) - types.Ctxt = Ctxt + types.Ctxt = base.Ctxt initUniverse() @@ -288,10 +289,10 @@ func Main(archInit func(*Arch)) { if n.Op == ODCLFUNC { Curfn = n decldepth = 1 - errorsBefore := Errors() + errorsBefore := base.Errors() typecheckslice(Curfn.Nbody.Slice(), ctxStmt) checkreturn(Curfn) - if Errors() > errorsBefore { + if base.Errors() > errorsBefore { Curfn.Nbody.Set(nil) // type errors; do not compile } // Now that we've checked whether n terminates, @@ -304,7 +305,7 @@ func Main(archInit func(*Arch)) { // check past phase 9 isn't sufficient, as we may exit with other errors // before then, thus skipping map key errors. checkMapKeys() - ExitIfErrors() + base.ExitIfErrors() timings.AddEvent(fcount, "funcs") @@ -322,11 +323,11 @@ func Main(archInit func(*Arch)) { } capturevarscomplete = true Curfn = nil - ExitIfErrors() + base.ExitIfErrors() // Phase 5: Inlining timings.Start("fe", "inlining") - if Debug.TypecheckInl != 0 { + if base.Debug.TypecheckInl != 0 { // Typecheck imported function bodies if Debug.l > 1, // otherwise lazily when used or re-exported. for _, n := range importlist { @@ -334,10 +335,10 @@ func Main(archInit func(*Arch)) { typecheckinl(n) } } - ExitIfErrors() + base.ExitIfErrors() } - if Flag.LowerL != 0 { + if base.Flag.LowerL != 0 { // Find functions that can be inlined and clone them before walk expands them. visitBottomUp(xtop, func(list []*Node, recursive bool) { numfns := numNonClosures(list) @@ -348,7 +349,7 @@ func Main(archInit func(*Arch)) { // across more than one function. caninl(n) } else { - if Flag.LowerM > 1 { + if base.Flag.LowerM > 1 { fmt.Printf("%v: cannot inline %v: recursive\n", n.Line(), n.Func.Nname) } } @@ -379,7 +380,7 @@ func Main(archInit func(*Arch)) { // checking. This must happen before transformclosure. // We'll do the final check after write barriers are // inserted. - if Flag.CompilingRuntime { + if base.Flag.CompilingRuntime { nowritebarrierrecCheck = newNowritebarrierrecChecker() } @@ -430,10 +431,10 @@ func Main(archInit func(*Arch)) { // Finalize DWARF inline routine DIEs, then explicitly turn off // DWARF inlining gen so as to avoid problems with generated // method wrappers. - if Ctxt.DwFixups != nil { - Ctxt.DwFixups.Finalize(Ctxt.Pkgpath, Debug.DwarfInl != 0) - Ctxt.DwFixups = nil - Flag.GenDwarfInl = 0 + if base.Ctxt.DwFixups != nil { + base.Ctxt.DwFixups.Finalize(base.Ctxt.Pkgpath, base.Debug.DwarfInl != 0) + base.Ctxt.DwFixups = nil + base.Flag.GenDwarfInl = 0 } // Phase 9: Check external declarations. @@ -446,14 +447,14 @@ func Main(archInit func(*Arch)) { // Check the map keys again, since we typechecked the external // declarations. checkMapKeys() - ExitIfErrors() + base.ExitIfErrors() // Write object data to disk. timings.Start("be", "dumpobj") dumpdata() - Ctxt.NumberSyms() + base.Ctxt.NumberSyms() dumpobj() - if Flag.AsmHdr != "" { + if base.Flag.AsmHdr != "" { dumpasmhdr() } @@ -463,27 +464,27 @@ func Main(archInit func(*Arch)) { }) for _, large := range largeStackFrames { if large.callee != 0 { - yyerrorl(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args + %d MB callee", large.locals>>20, large.args>>20, large.callee>>20) + base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args + %d MB callee", large.locals>>20, large.args>>20, large.callee>>20) } else { - yyerrorl(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args", large.locals>>20, large.args>>20) + base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args", large.locals>>20, large.args>>20) } } if len(funcStack) != 0 { - Fatalf("funcStack is non-empty: %v", len(funcStack)) + base.Fatalf("funcStack is non-empty: %v", len(funcStack)) } if len(compilequeue) != 0 { - Fatalf("%d uncompiled functions", len(compilequeue)) + base.Fatalf("%d uncompiled functions", len(compilequeue)) } - logopt.FlushLoggedOpts(Ctxt, Ctxt.Pkgpath) - ExitIfErrors() + logopt.FlushLoggedOpts(base.Ctxt, base.Ctxt.Pkgpath) + base.ExitIfErrors() - flusherrors() + base.FlushErrors() timings.Stop() - if Flag.Bench != "" { - if err := writebench(Flag.Bench); err != nil { + if base.Flag.Bench != "" { + if err := writebench(base.Flag.Bench); err != nil { log.Fatalf("cannot write benchmark data: %v", err) } } @@ -510,7 +511,7 @@ func writebench(filename string) error { fmt.Fprintln(&buf, "commit:", objabi.Version) fmt.Fprintln(&buf, "goos:", runtime.GOOS) fmt.Fprintln(&buf, "goarch:", runtime.GOARCH) - timings.Write(&buf, "BenchmarkCompile:"+Ctxt.Pkgpath+":") + timings.Write(&buf, "BenchmarkCompile:"+base.Ctxt.Pkgpath+":") n, err := f.Write(buf.Bytes()) if err != nil { @@ -622,12 +623,12 @@ func islocalname(name string) bool { func findpkg(name string) (file string, ok bool) { if islocalname(name) { - if Flag.NoLocalImports { + if base.Flag.NoLocalImports { return "", false } - if Flag.Cfg.PackageFile != nil { - file, ok = Flag.Cfg.PackageFile[name] + if base.Flag.Cfg.PackageFile != nil { + file, ok = base.Flag.Cfg.PackageFile[name] return file, ok } @@ -649,16 +650,16 @@ func findpkg(name string) (file string, ok bool) { // don't want to see "encoding/../encoding/base64" // as different from "encoding/base64". if q := path.Clean(name); q != name { - yyerror("non-canonical import path %q (should be %q)", name, q) + base.Errorf("non-canonical import path %q (should be %q)", name, q) return "", false } - if Flag.Cfg.PackageFile != nil { - file, ok = Flag.Cfg.PackageFile[name] + if base.Flag.Cfg.PackageFile != nil { + file, ok = base.Flag.Cfg.PackageFile[name] return file, ok } - for _, dir := range Flag.Cfg.ImportDirs { + for _, dir := range base.Flag.Cfg.ImportDirs { file = fmt.Sprintf("%s/%s.a", dir, name) if _, err := os.Stat(file); err == nil { return file, true @@ -672,13 +673,13 @@ func findpkg(name string) (file string, ok bool) { if objabi.GOROOT != "" { suffix := "" suffixsep := "" - if Flag.InstallSuffix != "" { + if base.Flag.InstallSuffix != "" { suffixsep = "_" - suffix = Flag.InstallSuffix - } else if Flag.Race { + suffix = base.Flag.InstallSuffix + } else if base.Flag.Race { suffixsep = "_" suffix = "race" - } else if Flag.MSan { + } else if base.Flag.MSan { suffixsep = "_" suffix = "msan" } @@ -715,7 +716,7 @@ func loadsys() { case varTag: importvar(Runtimepkg, src.NoXPos, sym, typ) default: - Fatalf("unhandled declaration tag %v", d.tag) + base.Fatalf("unhandled declaration tag %v", d.tag) } } @@ -729,13 +730,13 @@ var myheight int func importfile(f constant.Value) *types.Pkg { if f.Kind() != constant.String { - yyerror("import path must be a string") + base.Errorf("import path must be a string") return nil } path_ := constant.StringVal(f) if len(path_) == 0 { - yyerror("import path is empty") + base.Errorf("import path is empty") return nil } @@ -748,16 +749,16 @@ func importfile(f constant.Value) *types.Pkg { // the main package, just as we reserve the import // path "math" to identify the standard math package. if path_ == "main" { - yyerror("cannot import \"main\"") - errorexit() + base.Errorf("cannot import \"main\"") + base.ErrorExit() } - if Ctxt.Pkgpath != "" && path_ == Ctxt.Pkgpath { - yyerror("import %q while compiling that package (import cycle)", path_) - errorexit() + if base.Ctxt.Pkgpath != "" && path_ == base.Ctxt.Pkgpath { + base.Errorf("import %q while compiling that package (import cycle)", path_) + base.ErrorExit() } - if mapped, ok := Flag.Cfg.ImportMap[path_]; ok { + if mapped, ok := base.Flag.Cfg.ImportMap[path_]; ok { path_ = mapped } @@ -767,13 +768,13 @@ func importfile(f constant.Value) *types.Pkg { if islocalname(path_) { if path_[0] == '/' { - yyerror("import path cannot be absolute path") + base.Errorf("import path cannot be absolute path") return nil } - prefix := Ctxt.Pathname - if Flag.D != "" { - prefix = Flag.D + prefix := base.Ctxt.Pathname + if base.Flag.D != "" { + prefix = base.Flag.D } path_ = path.Join(prefix, path_) @@ -784,8 +785,8 @@ func importfile(f constant.Value) *types.Pkg { file, found := findpkg(path_) if !found { - yyerror("can't find import: %q", path_) - errorexit() + base.Errorf("can't find import: %q", path_) + base.ErrorExit() } importpkg := types.NewPkg(path_, "") @@ -797,48 +798,48 @@ func importfile(f constant.Value) *types.Pkg { imp, err := bio.Open(file) if err != nil { - yyerror("can't open import: %q: %v", path_, err) - errorexit() + base.Errorf("can't open import: %q: %v", path_, err) + base.ErrorExit() } defer imp.Close() // check object header p, err := imp.ReadString('\n') if err != nil { - yyerror("import %s: reading input: %v", file, err) - errorexit() + base.Errorf("import %s: reading input: %v", file, err) + base.ErrorExit() } if p == "!\n" { // package archive // package export block should be first sz := arsize(imp.Reader, "__.PKGDEF") if sz <= 0 { - yyerror("import %s: not a package file", file) - errorexit() + base.Errorf("import %s: not a package file", file) + base.ErrorExit() } p, err = imp.ReadString('\n') if err != nil { - yyerror("import %s: reading input: %v", file, err) - errorexit() + base.Errorf("import %s: reading input: %v", file, err) + base.ErrorExit() } } if !strings.HasPrefix(p, "go object ") { - yyerror("import %s: not a go object file: %s", file, p) - errorexit() + base.Errorf("import %s: not a go object file: %s", file, p) + base.ErrorExit() } q := fmt.Sprintf("%s %s %s %s\n", objabi.GOOS, objabi.GOARCH, objabi.Version, objabi.Expstring()) if p[10:] != q { - yyerror("import %s: object is [%s] expected [%s]", file, p[10:], q) - errorexit() + base.Errorf("import %s: object is [%s] expected [%s]", file, p[10:], q) + base.ErrorExit() } // process header lines for { p, err = imp.ReadString('\n') if err != nil { - yyerror("import %s: reading input: %v", file, err) - errorexit() + base.Errorf("import %s: reading input: %v", file, err) + base.ErrorExit() } if p == "\n" { break // header ends with blank line @@ -870,41 +871,41 @@ func importfile(f constant.Value) *types.Pkg { var fingerprint goobj.FingerprintType switch c { case '\n': - yyerror("cannot import %s: old export format no longer supported (recompile library)", path_) + base.Errorf("cannot import %s: old export format no longer supported (recompile library)", path_) return nil case 'B': - if Debug.Export != 0 { + if base.Debug.Export != 0 { fmt.Printf("importing %s (%s)\n", path_, file) } imp.ReadByte() // skip \n after $$B c, err = imp.ReadByte() if err != nil { - yyerror("import %s: reading input: %v", file, err) - errorexit() + base.Errorf("import %s: reading input: %v", file, err) + base.ErrorExit() } // Indexed format is distinguished by an 'i' byte, // whereas previous export formats started with 'c', 'd', or 'v'. if c != 'i' { - yyerror("import %s: unexpected package format byte: %v", file, c) - errorexit() + base.Errorf("import %s: unexpected package format byte: %v", file, c) + base.ErrorExit() } fingerprint = iimport(importpkg, imp) default: - yyerror("no import in %q", path_) - errorexit() + base.Errorf("no import in %q", path_) + base.ErrorExit() } // assume files move (get installed) so don't record the full path - if Flag.Cfg.PackageFile != nil { + if base.Flag.Cfg.PackageFile != nil { // If using a packageFile map, assume path_ can be recorded directly. - Ctxt.AddImport(path_, fingerprint) + base.Ctxt.AddImport(path_, fingerprint) } else { // For file "/Users/foo/go/pkg/darwin_amd64/math.a" record "math.a". - Ctxt.AddImport(file[len(file)-len(path_)-len(".a"):], fingerprint) + base.Ctxt.AddImport(file[len(file)-len(path_)-len(".a"):], fingerprint) } if importpkg.Height >= myheight { @@ -926,21 +927,21 @@ func pkgnotused(lineno src.XPos, path string, name string) { elem = elem[i+1:] } if name == "" || elem == name { - yyerrorl(lineno, "imported and not used: %q", path) + base.ErrorfAt(lineno, "imported and not used: %q", path) } else { - yyerrorl(lineno, "imported and not used: %q as %s", path, name) + base.ErrorfAt(lineno, "imported and not used: %q as %s", path, name) } } func mkpackage(pkgname string) { if localpkg.Name == "" { if pkgname == "_" { - yyerror("invalid package name _") + base.Errorf("invalid package name _") } localpkg.Name = pkgname } else { if pkgname != localpkg.Name { - yyerror("package %s; expected %s", pkgname, localpkg.Name) + base.Errorf("package %s; expected %s", pkgname, localpkg.Name) } } } @@ -964,7 +965,7 @@ func clearImports() { // leave s->block set to cause redeclaration // errors if a conflicting top-level name is // introduced by a different file. - if !n.Name.Used() && SyntaxErrors() == 0 { + if !n.Name.Used() && base.SyntaxErrors() == 0 { unused = append(unused, importedPkg{n.Pos, n.Name.Pkg.Path, s.Name}) } s.Def = nil @@ -973,7 +974,7 @@ func clearImports() { if IsAlias(s) { // throw away top-level name left over // from previous import . "x" - if n.Name != nil && n.Name.Pack != nil && !n.Name.Pack.Name.Used() && SyntaxErrors() == 0 { + if n.Name != nil && n.Name.Pack != nil && !n.Name.Pack.Name.Used() && base.SyntaxErrors() == 0 { unused = append(unused, importedPkg{n.Name.Pack.Pos, n.Name.Pack.Name.Pkg.Path, ""}) n.Name.Pack.Name.SetUsed(true) } @@ -995,7 +996,7 @@ func IsAlias(sym *types.Sym) bool { // recordFlags records the specified command-line flags to be placed // in the DWARF info. func recordFlags(flags ...string) { - if Ctxt.Pkgpath == "" { + if base.Ctxt.Pkgpath == "" { // We can't record the flags if we don't know what the // package name is. return @@ -1038,24 +1039,24 @@ func recordFlags(flags ...string) { if cmd.Len() == 0 { return } - s := Ctxt.Lookup(dwarf.CUInfoPrefix + "producer." + Ctxt.Pkgpath) + s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "producer." + base.Ctxt.Pkgpath) s.Type = objabi.SDWARFCUINFO // Sometimes (for example when building tests) we can link // together two package main archives. So allow dups. s.Set(obj.AttrDuplicateOK, true) - Ctxt.Data = append(Ctxt.Data, s) + base.Ctxt.Data = append(base.Ctxt.Data, s) s.P = cmd.Bytes()[1:] } // recordPackageName records the name of the package being // compiled, so that the linker can save it in the compile unit's DIE. func recordPackageName() { - s := Ctxt.Lookup(dwarf.CUInfoPrefix + "packagename." + Ctxt.Pkgpath) + s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "packagename." + base.Ctxt.Pkgpath) s.Type = objabi.SDWARFCUINFO // Sometimes (for example when building tests) we can link // together two package main archives. So allow dups. s.Set(obj.AttrDuplicateOK, true) - Ctxt.Data = append(Ctxt.Data, s) + base.Ctxt.Data = append(base.Ctxt.Data, s) s.P = []byte(localpkg.Name) } @@ -1099,23 +1100,23 @@ func langSupported(major, minor int, pkg *types.Pkg) bool { // checkLang verifies that the -lang flag holds a valid value, and // exits if not. It initializes data used by langSupported. func checkLang() { - if Flag.Lang == "" { + if base.Flag.Lang == "" { return } var err error - langWant, err = parseLang(Flag.Lang) + langWant, err = parseLang(base.Flag.Lang) if err != nil { - log.Fatalf("invalid value %q for -lang: %v", Flag.Lang, err) + log.Fatalf("invalid value %q for -lang: %v", base.Flag.Lang, err) } - if def := currentLang(); Flag.Lang != def { + if def := currentLang(); base.Flag.Lang != def { defVers, err := parseLang(def) if err != nil { log.Fatalf("internal error parsing default lang %q: %v", def, err) } if langWant.major > defVers.major || (langWant.major == defVers.major && langWant.minor > defVers.minor) { - log.Fatalf("invalid value %q for -lang: max known version is %q", Flag.Lang, def) + log.Fatalf("invalid value %q for -lang: max known version is %q", base.Flag.Lang, def) } } } diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 2d3da884a2254..6dae2cd0a41d2 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -16,6 +16,7 @@ import ( "unicode" "unicode/utf8" + "cmd/compile/internal/base" "cmd/compile/internal/syntax" "cmd/compile/internal/types" "cmd/internal/obj" @@ -59,15 +60,15 @@ func parseFiles(filenames []string) uint { var lines uint for _, p := range noders { for e := range p.err { - p.yyerrorpos(e.Pos, "%s", e.Msg) + p.errorAt(e.Pos, "%s", e.Msg) } p.node() lines += p.file.Lines p.file = nil // release memory - if SyntaxErrors() != 0 { - errorexit() + if base.SyntaxErrors() != 0 { + base.ErrorExit() } // Always run testdclstack here, even when debug_dclstack is not set, as a sanity measure. testdclstack() @@ -111,20 +112,20 @@ func (p *noder) makeSrcPosBase(b0 *syntax.PosBase) *src.PosBase { } func (p *noder) makeXPos(pos syntax.Pos) (_ src.XPos) { - return Ctxt.PosTable.XPos(src.MakePos(p.makeSrcPosBase(pos.Base()), pos.Line(), pos.Col())) + return base.Ctxt.PosTable.XPos(src.MakePos(p.makeSrcPosBase(pos.Base()), pos.Line(), pos.Col())) } -func (p *noder) yyerrorpos(pos syntax.Pos, format string, args ...interface{}) { - yyerrorl(p.makeXPos(pos), format, args...) +func (p *noder) errorAt(pos syntax.Pos, format string, args ...interface{}) { + base.ErrorfAt(p.makeXPos(pos), format, args...) } // TODO(gri) Can we eliminate fileh in favor of absFilename? func fileh(name string) string { - return objabi.AbsFile("", name, Flag.TrimPath) + return objabi.AbsFile("", name, base.Flag.TrimPath) } func absFilename(name string) string { - return objabi.AbsFile(Ctxt.Pathname, name, Flag.TrimPath) + return objabi.AbsFile(base.Ctxt.Pathname, name, base.Flag.TrimPath) } // noder transforms package syntax's AST into a Node tree. @@ -162,8 +163,8 @@ func (p *noder) funcBody(fn *Node, block *syntax.BlockStmt) { } fn.Nbody.Set(body) - lineno = p.makeXPos(block.Rbrace) - fn.Func.Endlineno = lineno + base.Pos = p.makeXPos(block.Rbrace) + fn.Func.Endlineno = base.Pos } funcbody() @@ -193,7 +194,7 @@ func (p *noder) closeScope(pos syntax.Pos) { // no variables were declared in this scope, so we can retract it. if int(p.scope) != len(Curfn.Func.Parents) { - Fatalf("scope tracking inconsistency, no variables declared but scopes were not retracted") + base.Fatalf("scope tracking inconsistency, no variables declared but scopes were not retracted") } p.scope = Curfn.Func.Parents[p.scope-1] @@ -258,7 +259,7 @@ func (p *noder) node() { for _, n := range p.linknames { if !p.importedUnsafe { - p.yyerrorpos(n.pos, "//go:linkname only allowed in Go files that import \"unsafe\"") + p.errorAt(n.pos, "//go:linkname only allowed in Go files that import \"unsafe\"") continue } s := lookup(n.local) @@ -267,10 +268,10 @@ func (p *noder) node() { } else { // Use the default object symbol name if the // user didn't provide one. - if Ctxt.Pkgpath == "" { - p.yyerrorpos(n.pos, "//go:linkname requires linkname argument or -p compiler flag") + if base.Ctxt.Pkgpath == "" { + p.errorAt(n.pos, "//go:linkname requires linkname argument or -p compiler flag") } else { - s.Linkname = objabi.PathToPrefix(Ctxt.Pkgpath) + "." + n.local + s.Linkname = objabi.PathToPrefix(base.Ctxt.Pkgpath) + "." + n.local } } } @@ -288,7 +289,7 @@ func (p *noder) node() { } pragcgobuf = append(pragcgobuf, p.pragcgobuf...) - lineno = src.NoXPos + base.Pos = src.NoXPos clearImports() } @@ -332,8 +333,8 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) { ipkg := importfile(p.basicLit(imp.Path)) if ipkg == nil { - if Errors() == 0 { - Fatalf("phase error in import") + if base.Errors() == 0 { + base.Fatalf("phase error in import") } return } @@ -363,7 +364,7 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) { importdot(ipkg, pack) return case "init": - yyerrorl(pack.Pos, "cannot import package as init - init must be a func") + base.ErrorfAt(pack.Pos, "cannot import package as init - init must be a func") return case "_": return @@ -393,7 +394,7 @@ func (p *noder) varDecl(decl *syntax.VarDecl) []*Node { // so at that point it hasn't seen the imports. // We're left to check now, just before applying the //go:embed lines. for _, e := range pragma.Embeds { - p.yyerrorpos(e.Pos, "//go:embed only allowed in Go files that import \"embed\"") + p.errorAt(e.Pos, "//go:embed only allowed in Go files that import \"embed\"") } } else { exprs = varEmbed(p, names, typ, exprs, pragma.Embeds) @@ -437,7 +438,7 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []*Node { cs.typ, cs.values = typ, values } else { if typ != nil { - yyerror("const declaration cannot have type without expression") + base.Errorf("const declaration cannot have type without expression") } typ, values = cs.typ, cs.values } @@ -445,7 +446,7 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []*Node { nn := make([]*Node, 0, len(names)) for i, n := range names { if i >= len(values) { - yyerror("missing value in const declaration") + base.Errorf("missing value in const declaration") break } v := values[i] @@ -464,7 +465,7 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []*Node { } if len(values) > len(names) { - yyerror("extra expression in const declaration") + base.Errorf("extra expression in const declaration") } cs.iota++ @@ -493,7 +494,7 @@ func (p *noder) typeDecl(decl *syntax.TypeDecl) *Node { nod := p.nod(decl, ODCLTYPE, n, nil) if param.Alias() && !langSupported(1, 9, localpkg) { - yyerrorl(nod.Pos, "type aliases only supported as of -lang=go1.9") + base.ErrorfAt(nod.Pos, "type aliases only supported as of -lang=go1.9") } return nod } @@ -521,13 +522,13 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) *Node { if name.Name == "init" { name = renameinit() if t.List.Len() > 0 || t.Rlist.Len() > 0 { - yyerrorl(f.Pos, "func init must have no arguments and no return values") + base.ErrorfAt(f.Pos, "func init must have no arguments and no return values") } } if localpkg.Name == "main" && name.Name == "main" { if t.List.Len() > 0 || t.Rlist.Len() > 0 { - yyerrorl(f.Pos, "func main must have no arguments and no return values") + base.ErrorfAt(f.Pos, "func main must have no arguments and no return values") } } } else { @@ -542,7 +543,7 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) *Node { if pragma, ok := fun.Pragma.(*Pragma); ok { f.Func.Pragma = pragma.Flag & FuncPragmas if pragma.Flag&Systemstack != 0 && pragma.Flag&Nosplit != 0 { - yyerrorl(f.Pos, "go:nosplit and go:systemstack cannot be combined") + base.ErrorfAt(f.Pos, "go:nosplit and go:systemstack cannot be combined") } pragma.Flag &^= FuncPragmas p.checkUnused(pragma) @@ -556,10 +557,10 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) *Node { if fun.Body != nil { if f.Func.Pragma&Noescape != 0 { - yyerrorl(f.Pos, "can only use //go:noescape with external func implementations") + base.ErrorfAt(f.Pos, "can only use //go:noescape with external func implementations") } } else { - if Flag.Complete || strings.HasPrefix(f.funcname(), "init.") { + if base.Flag.Complete || strings.HasPrefix(f.funcname(), "init.") { // Linknamed functions are allowed to have no body. Hopefully // the linkname target has a body. See issue 23311. isLinknamed := false @@ -570,7 +571,7 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) *Node { } } if !isLinknamed { - yyerrorl(f.Pos, "missing function body") + base.ErrorfAt(f.Pos, "missing function body") } } } @@ -610,13 +611,13 @@ func (p *noder) param(param *syntax.Field, dddOk, final bool) *Node { if typ.Op == ODDD { if !dddOk { // We mark these as syntax errors to get automatic elimination - // of multiple such errors per line (see yyerrorl in subr.go). - yyerror("syntax error: cannot use ... in receiver or result parameter list") + // of multiple such errors per line (see ErrorfAt in subr.go). + base.Errorf("syntax error: cannot use ... in receiver or result parameter list") } else if !final { if param.Name == nil { - yyerror("syntax error: cannot use ... with non-final parameter") + base.Errorf("syntax error: cannot use ... with non-final parameter") } else { - p.yyerrorpos(param.Name.Pos(), "syntax error: cannot use ... with non-final parameter %s", param.Name.Value) + p.errorAt(param.Name.Pos(), "syntax error: cannot use ... with non-final parameter %s", param.Name.Value) } } typ.Op = OTARRAY @@ -670,7 +671,7 @@ func (p *noder) expr(expr syntax.Expr) *Node { l[i] = p.wrapname(expr.ElemList[i], e) } n.List.Set(l) - lineno = p.makeXPos(expr.Rbrace) + base.Pos = p.makeXPos(expr.Rbrace) return n case *syntax.KeyValueExpr: // use position of expr.Key rather than of expr (which has position of ':') @@ -752,7 +753,7 @@ func (p *noder) expr(expr syntax.Expr) *Node { if expr.Lhs != nil { n.Left = p.declName(expr.Lhs) if n.Left.isBlank() { - yyerror("invalid variable name %v in type switch", n.Left) + base.Errorf("invalid variable name %v in type switch", n.Left) } } return n @@ -916,12 +917,12 @@ func (p *noder) packname(expr syntax.Expr) *types.Sym { name := p.name(expr.X.(*syntax.Name)) def := asNode(name.Def) if def == nil { - yyerror("undefined: %v", name) + base.Errorf("undefined: %v", name) return name } var pkg *types.Pkg if def.Op != OPACK { - yyerror("%v is not a package", name) + base.Errorf("%v is not a package", name) pkg = localpkg } else { def.Name.SetUsed(true) @@ -1026,7 +1027,7 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) *Node { op = OCONTINUE case syntax.Fallthrough: if !fallOK { - yyerror("fallthrough statement out of place") + base.Errorf("fallthrough statement out of place") } op = OFALL case syntax.Goto: @@ -1066,7 +1067,7 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) *Node { break } if asNode(ln.Sym.Def) != ln { - yyerror("%s is shadowed during return", ln.Sym.Name) + base.Errorf("%s is shadowed during return", ln.Sym.Name) } } } @@ -1107,7 +1108,7 @@ func (p *noder) assignList(expr syntax.Expr, defn *Node, colas bool) []*Node { name, ok := expr.(*syntax.Name) if !ok { - p.yyerrorpos(expr.Pos(), "non-name %v on left side of :=", p.expr(expr)) + p.errorAt(expr.Pos(), "non-name %v on left side of :=", p.expr(expr)) newOrErr = true continue } @@ -1118,7 +1119,7 @@ func (p *noder) assignList(expr syntax.Expr, defn *Node, colas bool) []*Node { } if seen[sym] { - p.yyerrorpos(expr.Pos(), "%v repeated on left side of :=", sym) + p.errorAt(expr.Pos(), "%v repeated on left side of :=", sym) newOrErr = true continue } @@ -1138,7 +1139,7 @@ func (p *noder) assignList(expr syntax.Expr, defn *Node, colas bool) []*Node { } if !newOrErr { - yyerrorl(defn.Pos, "no new variables on left side of :=") + base.ErrorfAt(defn.Pos, "no new variables on left side of :=") } return res } @@ -1256,10 +1257,10 @@ func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *Node, rbrace n.Nbody.Set(p.stmtsFall(body, true)) if l := n.Nbody.Len(); l > 0 && n.Nbody.Index(l-1).Op == OFALL { if tswitch != nil { - yyerror("cannot fallthrough in type switch") + base.Errorf("cannot fallthrough in type switch") } if i+1 == len(clauses) { - yyerror("cannot fallthrough final case in switch") + base.Errorf("cannot fallthrough final case in switch") } } @@ -1378,7 +1379,7 @@ func checkLangCompat(lit *syntax.BasicLit) { } // len(s) > 2 if strings.Contains(s, "_") { - yyerrorv("go1.13", "underscores in numeric literals") + base.ErrorfVers("go1.13", "underscores in numeric literals") return } if s[0] != '0' { @@ -1386,15 +1387,15 @@ func checkLangCompat(lit *syntax.BasicLit) { } radix := s[1] if radix == 'b' || radix == 'B' { - yyerrorv("go1.13", "binary literals") + base.ErrorfVers("go1.13", "binary literals") return } if radix == 'o' || radix == 'O' { - yyerrorv("go1.13", "0o/0O-style octal literals") + base.ErrorfVers("go1.13", "0o/0O-style octal literals") return } if lit.Kind != syntax.IntLit && (radix == 'x' || radix == 'X') { - yyerrorv("go1.13", "hexadecimal floating-point literals") + base.ErrorfVers("go1.13", "hexadecimal floating-point literals") } } @@ -1415,7 +1416,7 @@ func (p *noder) basicLit(lit *syntax.BasicLit) constant.Value { v := constant.MakeFromLiteral(lit.Value, tokenForLitKind[lit.Kind], 0) if v.Kind() == constant.Unknown { // TODO(mdempsky): Better error message? - p.yyerrorpos(lit.Pos(), "malformed constant: %s", lit.Value) + p.errorAt(lit.Pos(), "malformed constant: %s", lit.Value) } // go/constant uses big.Rat by default, which is more precise, but @@ -1474,7 +1475,7 @@ func (p *noder) nodSym(orig syntax.Node, op Op, left *Node, sym *types.Sym) *Nod func (p *noder) pos(n syntax.Node) src.XPos { // TODO(gri): orig.Pos() should always be known - fix package syntax - xpos := lineno + xpos := base.Pos if pos := n.Pos(); pos.IsKnown() { xpos = p.makeXPos(pos) } @@ -1483,7 +1484,7 @@ func (p *noder) pos(n syntax.Node) src.XPos { func (p *noder) setlineno(n syntax.Node) { if n != nil { - lineno = p.pos(n) + base.Pos = p.pos(n) } } @@ -1525,12 +1526,12 @@ type PragmaEmbed struct { func (p *noder) checkUnused(pragma *Pragma) { for _, pos := range pragma.Pos { if pos.Flag&pragma.Flag != 0 { - p.yyerrorpos(pos.Pos, "misplaced compiler directive") + p.errorAt(pos.Pos, "misplaced compiler directive") } } if len(pragma.Embeds) > 0 { for _, e := range pragma.Embeds { - p.yyerrorpos(e.Pos, "misplaced go:embed directive") + p.errorAt(e.Pos, "misplaced go:embed directive") } } } @@ -1619,7 +1620,7 @@ func (p *noder) pragma(pos syntax.Pos, blankLine bool, text string, old syntax.P // For security, we disallow //go:cgo_* directives other // than cgo_import_dynamic outside cgo-generated files. // Exception: they are allowed in the standard library, for runtime and syscall. - if !isCgoGeneratedFile(pos) && !Flag.Std { + if !isCgoGeneratedFile(pos) && !base.Flag.Std { p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s only allowed in cgo-generated code", text)}) } p.pragcgo(pos, text) @@ -1631,10 +1632,10 @@ func (p *noder) pragma(pos syntax.Pos, blankLine bool, text string, old syntax.P } flag := pragmaFlag(verb) const runtimePragmas = Systemstack | Nowritebarrier | Nowritebarrierrec | Yeswritebarrierrec - if !Flag.CompilingRuntime && flag&runtimePragmas != 0 { + if !base.Flag.CompilingRuntime && flag&runtimePragmas != 0 { p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s only allowed in runtime", verb)}) } - if flag == 0 && !allowedStdPragmas[verb] && Flag.Std { + if flag == 0 && !allowedStdPragmas[verb] && base.Flag.Std { p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s is not allowed in the standard library", verb)}) } pragma.Flag |= flag diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 170d997cd69b0..6c659c91c7750 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -5,6 +5,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/compile/internal/types" "cmd/internal/bio" "cmd/internal/obj" @@ -47,20 +48,20 @@ const ( ) func dumpobj() { - if Flag.LinkObj == "" { - dumpobj1(Flag.LowerO, modeCompilerObj|modeLinkerObj) + if base.Flag.LinkObj == "" { + dumpobj1(base.Flag.LowerO, modeCompilerObj|modeLinkerObj) return } - dumpobj1(Flag.LowerO, modeCompilerObj) - dumpobj1(Flag.LinkObj, modeLinkerObj) + dumpobj1(base.Flag.LowerO, modeCompilerObj) + dumpobj1(base.Flag.LinkObj, modeLinkerObj) } func dumpobj1(outfile string, mode int) { bout, err := bio.Create(outfile) if err != nil { - flusherrors() + base.FlushErrors() fmt.Printf("can't create %s: %v\n", outfile, err) - errorexit() + base.ErrorExit() } defer bout.Close() bout.WriteString("!\n") @@ -79,8 +80,8 @@ func dumpobj1(outfile string, mode int) { func printObjHeader(bout *bio.Writer) { fmt.Fprintf(bout, "go object %s %s %s %s\n", objabi.GOOS, objabi.GOARCH, objabi.Version, objabi.Expstring()) - if Flag.BuildID != "" { - fmt.Fprintf(bout, "build id %q\n", Flag.BuildID) + if base.Flag.BuildID != "" { + fmt.Fprintf(bout, "build id %q\n", base.Flag.BuildID) } if localpkg.Name == "main" { fmt.Fprintf(bout, "main\n") @@ -169,13 +170,13 @@ func dumpdata() { addGCLocals() if exportlistLen != len(exportlist) { - Fatalf("exportlist changed after compile functions loop") + base.Fatalf("exportlist changed after compile functions loop") } if ptabsLen != len(ptabs) { - Fatalf("ptabs changed after compile functions loop") + base.Fatalf("ptabs changed after compile functions loop") } if itabsLen != len(itabs) { - Fatalf("itabs changed after compile functions loop") + base.Fatalf("itabs changed after compile functions loop") } } @@ -187,18 +188,18 @@ func dumpLinkerObj(bout *bio.Writer) { fmt.Fprintf(bout, "\n$$\n\n$$\n\n") fmt.Fprintf(bout, "\n$$ // cgo\n") if err := json.NewEncoder(bout).Encode(pragcgobuf); err != nil { - Fatalf("serializing pragcgobuf: %v", err) + base.Fatalf("serializing pragcgobuf: %v", err) } fmt.Fprintf(bout, "\n$$\n\n") } fmt.Fprintf(bout, "\n!\n") - obj.WriteObjFile(Ctxt, bout) + obj.WriteObjFile(base.Ctxt, bout) } func addptabs() { - if !Ctxt.Flag_dynlink || localpkg.Name != "main" { + if !base.Ctxt.Flag_dynlink || localpkg.Name != "main" { return } for _, exportn := range exportlist { @@ -228,7 +229,7 @@ func addptabs() { func dumpGlobal(n *Node) { if n.Type == nil { - Fatalf("external %v nil type\n", n) + base.Fatalf("external %v nil type\n", n) } if n.Class() == PFUNC { return @@ -261,7 +262,7 @@ func dumpGlobalConst(n *Node) { return } } - Ctxt.DwarfIntConst(Ctxt.Pkgpath, n.Sym.Name, typesymname(t), int64Val(t, v)) + base.Ctxt.DwarfIntConst(base.Ctxt.Pkgpath, n.Sym.Name, typesymname(t), int64Val(t, v)) } func dumpglobls() { @@ -293,7 +294,7 @@ func dumpglobls() { // This is done during the sequential phase after compilation, since // global symbols can't be declared during parallel compilation. func addGCLocals() { - for _, s := range Ctxt.Text { + for _, s := range base.Ctxt.Text { fn := s.Func() if fn == nil { continue @@ -316,9 +317,9 @@ func addGCLocals() { func duintxx(s *obj.LSym, off int, v uint64, wid int) int { if off&(wid-1) != 0 { - Fatalf("duintxxLSym: misaligned: v=%d wid=%d off=%d", v, wid, off) + base.Fatalf("duintxxLSym: misaligned: v=%d wid=%d off=%d", v, wid, off) } - s.WriteInt(Ctxt, int64(off), wid, int64(v)) + s.WriteInt(base.Ctxt, int64(off), wid, int64(v)) return off + wid } @@ -369,7 +370,7 @@ func stringsym(pos src.XPos, s string) (data *obj.LSym) { symname = strconv.Quote(s) } - symdata := Ctxt.Lookup(stringSymPrefix + symname) + symdata := base.Ctxt.Lookup(stringSymPrefix + symname) if !symdata.OnList() { off := dstringdata(symdata, 0, s, pos, "string") ggloblsym(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL) @@ -447,7 +448,7 @@ func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj. var symdata *obj.LSym if readonly { symname := fmt.Sprintf(stringSymPattern, size, sum) - symdata = Ctxt.Lookup(stringSymPrefix + symname) + symdata = base.Ctxt.Lookup(stringSymPrefix + symname) if !symdata.OnList() { info := symdata.NewFileInfo() info.Name = file @@ -489,7 +490,7 @@ func slicedata(pos src.XPos, s string) *Node { func slicebytes(nam *Node, s string) { if nam.Op != ONAME { - Fatalf("slicebytes %v", nam) + base.Fatalf("slicebytes %v", nam) } slicesym(nam, slicedata(nam.Pos, s), int64(len(s))) } @@ -499,29 +500,29 @@ func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int // causing a cryptic error message by the linker. Check for oversize objects here // and provide a useful error message instead. if int64(len(t)) > 2e9 { - yyerrorl(pos, "%v with length %v is too big", what, len(t)) + base.ErrorfAt(pos, "%v with length %v is too big", what, len(t)) return 0 } - s.WriteString(Ctxt, int64(off), len(t), t) + s.WriteString(base.Ctxt, int64(off), len(t), t) return off + len(t) } func dsymptr(s *obj.LSym, off int, x *obj.LSym, xoff int) int { off = int(Rnd(int64(off), int64(Widthptr))) - s.WriteAddr(Ctxt, int64(off), Widthptr, x, int64(xoff)) + s.WriteAddr(base.Ctxt, int64(off), Widthptr, x, int64(xoff)) off += Widthptr return off } func dsymptrOff(s *obj.LSym, off int, x *obj.LSym) int { - s.WriteOff(Ctxt, int64(off), x, 0) + s.WriteOff(base.Ctxt, int64(off), x, 0) off += 4 return off } func dsymptrWeakOff(s *obj.LSym, off int, x *obj.LSym) int { - s.WriteWeakOff(Ctxt, int64(off), x, 0) + s.WriteWeakOff(base.Ctxt, int64(off), x, 0) off += 4 return off } @@ -532,79 +533,79 @@ func slicesym(n, arr *Node, lencap int64) { s := n.Sym.Linksym() off := n.Xoffset if arr.Op != ONAME { - Fatalf("slicesym non-name arr %v", arr) + base.Fatalf("slicesym non-name arr %v", arr) } - s.WriteAddr(Ctxt, off, Widthptr, arr.Sym.Linksym(), arr.Xoffset) - s.WriteInt(Ctxt, off+sliceLenOffset, Widthptr, lencap) - s.WriteInt(Ctxt, off+sliceCapOffset, Widthptr, lencap) + s.WriteAddr(base.Ctxt, off, Widthptr, arr.Sym.Linksym(), arr.Xoffset) + s.WriteInt(base.Ctxt, off+sliceLenOffset, Widthptr, lencap) + s.WriteInt(base.Ctxt, off+sliceCapOffset, Widthptr, lencap) } // addrsym writes the static address of a to n. a must be an ONAME. // Neither n nor a is modified. func addrsym(n, a *Node) { if n.Op != ONAME { - Fatalf("addrsym n op %v", n.Op) + base.Fatalf("addrsym n op %v", n.Op) } if n.Sym == nil { - Fatalf("addrsym nil n sym") + base.Fatalf("addrsym nil n sym") } if a.Op != ONAME { - Fatalf("addrsym a op %v", a.Op) + base.Fatalf("addrsym a op %v", a.Op) } s := n.Sym.Linksym() - s.WriteAddr(Ctxt, n.Xoffset, Widthptr, a.Sym.Linksym(), a.Xoffset) + s.WriteAddr(base.Ctxt, n.Xoffset, Widthptr, a.Sym.Linksym(), a.Xoffset) } // pfuncsym writes the static address of f to n. f must be a global function. // Neither n nor f is modified. func pfuncsym(n, f *Node) { if n.Op != ONAME { - Fatalf("pfuncsym n op %v", n.Op) + base.Fatalf("pfuncsym n op %v", n.Op) } if n.Sym == nil { - Fatalf("pfuncsym nil n sym") + base.Fatalf("pfuncsym nil n sym") } if f.Class() != PFUNC { - Fatalf("pfuncsym class not PFUNC %d", f.Class()) + base.Fatalf("pfuncsym class not PFUNC %d", f.Class()) } s := n.Sym.Linksym() - s.WriteAddr(Ctxt, n.Xoffset, Widthptr, funcsym(f.Sym).Linksym(), f.Xoffset) + s.WriteAddr(base.Ctxt, n.Xoffset, Widthptr, funcsym(f.Sym).Linksym(), f.Xoffset) } // litsym writes the static literal c to n. // Neither n nor c is modified. func litsym(n, c *Node, wid int) { if n.Op != ONAME { - Fatalf("litsym n op %v", n.Op) + base.Fatalf("litsym n op %v", n.Op) } if n.Sym == nil { - Fatalf("litsym nil n sym") + base.Fatalf("litsym nil n sym") } if !types.Identical(n.Type, c.Type) { - Fatalf("litsym: type mismatch: %v has type %v, but %v has type %v", n, n.Type, c, c.Type) + base.Fatalf("litsym: type mismatch: %v has type %v, but %v has type %v", n, n.Type, c, c.Type) } if c.Op == ONIL { return } if c.Op != OLITERAL { - Fatalf("litsym c op %v", c.Op) + base.Fatalf("litsym c op %v", c.Op) } s := n.Sym.Linksym() switch u := c.Val(); u.Kind() { case constant.Bool: i := int64(obj.Bool2int(constant.BoolVal(u))) - s.WriteInt(Ctxt, n.Xoffset, wid, i) + s.WriteInt(base.Ctxt, n.Xoffset, wid, i) case constant.Int: - s.WriteInt(Ctxt, n.Xoffset, wid, int64Val(n.Type, u)) + s.WriteInt(base.Ctxt, n.Xoffset, wid, int64Val(n.Type, u)) case constant.Float: f, _ := constant.Float64Val(u) switch n.Type.Etype { case TFLOAT32: - s.WriteFloat32(Ctxt, n.Xoffset, float32(f)) + s.WriteFloat32(base.Ctxt, n.Xoffset, float32(f)) case TFLOAT64: - s.WriteFloat64(Ctxt, n.Xoffset, f) + s.WriteFloat64(base.Ctxt, n.Xoffset, f) } case constant.Complex: @@ -612,20 +613,20 @@ func litsym(n, c *Node, wid int) { im, _ := constant.Float64Val(constant.Imag(u)) switch n.Type.Etype { case TCOMPLEX64: - s.WriteFloat32(Ctxt, n.Xoffset, float32(re)) - s.WriteFloat32(Ctxt, n.Xoffset+4, float32(im)) + s.WriteFloat32(base.Ctxt, n.Xoffset, float32(re)) + s.WriteFloat32(base.Ctxt, n.Xoffset+4, float32(im)) case TCOMPLEX128: - s.WriteFloat64(Ctxt, n.Xoffset, re) - s.WriteFloat64(Ctxt, n.Xoffset+8, im) + s.WriteFloat64(base.Ctxt, n.Xoffset, re) + s.WriteFloat64(base.Ctxt, n.Xoffset+8, im) } case constant.String: i := constant.StringVal(u) symdata := stringsym(n.Pos, i) - s.WriteAddr(Ctxt, n.Xoffset, Widthptr, symdata, 0) - s.WriteInt(Ctxt, n.Xoffset+int64(Widthptr), Widthptr, int64(len(i))) + s.WriteAddr(base.Ctxt, n.Xoffset, Widthptr, symdata, 0) + s.WriteInt(base.Ctxt, n.Xoffset+int64(Widthptr), Widthptr, int64(len(i))) default: - Fatalf("litsym unhandled OLITERAL %v", c) + base.Fatalf("litsym unhandled OLITERAL %v", c) } } diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 90c08b1b7503e..3b0f31669627b 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -5,6 +5,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/compile/internal/types" "cmd/internal/src" "fmt" @@ -50,7 +51,7 @@ type Order struct { // Order rewrites fn.Nbody to apply the ordering constraints // described in the comment at the top of the file. func order(fn *Node) { - if Flag.W > 1 { + if base.Flag.W > 1 { s := fmt.Sprintf("\nbefore order %v", fn.Func.Nname.Sym) dumplist(s, fn.Nbody) } @@ -181,7 +182,7 @@ func (o *Order) safeExpr(n *Node) *Node { return typecheck(a, ctxExpr) default: - Fatalf("order.safeExpr %v", n.Op) + base.Fatalf("order.safeExpr %v", n.Op) return nil // not reached } } @@ -210,7 +211,7 @@ func (o *Order) addrTemp(n *Node) *Node { var s InitSchedule s.staticassign(vstat, n) if s.out != nil { - Fatalf("staticassign of const generated code: %+v", n) + base.Fatalf("staticassign of const generated code: %+v", n) } vstat = typecheck(vstat, ctxExpr) return vstat @@ -323,7 +324,7 @@ func (o *Order) stmtList(l Nodes) { // and rewrites it to: // m = OMAKESLICECOPY([]T, x, s); nil func orderMakeSliceCopy(s []*Node) { - if Flag.N != 0 || instrumenting { + if base.Flag.N != 0 || instrumenting { return } @@ -384,7 +385,7 @@ func orderMakeSliceCopy(s []*Node) { // edge inserts coverage instrumentation for libfuzzer. func (o *Order) edge() { - if Debug.Libfuzzer == 0 { + if base.Debug.Libfuzzer == 0 { return } @@ -450,7 +451,7 @@ func (o *Order) init(n *Node) { // For concurrency safety, don't mutate potentially shared nodes. // First, ensure that no work is required here. if n.Ninit.Len() > 0 { - Fatalf("order.init shared node with ninit") + base.Fatalf("order.init shared node with ninit") } return } @@ -463,7 +464,7 @@ func (o *Order) init(n *Node) { func (o *Order) call(n *Node) { if n.Ninit.Len() > 0 { // Caller should have already called o.init(n). - Fatalf("%v with unexpected ninit", n.Op) + base.Fatalf("%v with unexpected ninit", n.Op) } // Builtin functions. @@ -526,7 +527,7 @@ func (o *Order) call(n *Node) { func (o *Order) mapAssign(n *Node) { switch n.Op { default: - Fatalf("order.mapAssign %v", n.Op) + base.Fatalf("order.mapAssign %v", n.Op) case OAS, OASOP: if n.Left.Op == OINDEXMAP { @@ -582,7 +583,7 @@ func (o *Order) stmt(n *Node) { switch n.Op { default: - Fatalf("order.stmt %v", n.Op) + base.Fatalf("order.stmt %v", n.Op) case OVARKILL, OVARLIVE, OINLMARK: o.out = append(o.out, n) @@ -659,7 +660,7 @@ func (o *Order) stmt(n *Node) { _ = mapKeyReplaceStrConv(r.Right) r.Right = o.mapKeyTemp(r.Left.Type, r.Right) default: - Fatalf("order.stmt: %v", r.Op) + base.Fatalf("order.stmt: %v", r.Op) } o.okAs2(n) @@ -776,7 +777,7 @@ func (o *Order) stmt(n *Node) { orderBody := true switch n.Type.Etype { default: - Fatalf("order.stmt range %v", n.Type) + base.Fatalf("order.stmt range %v", n.Type) case TARRAY, TSLICE: if n.List.Len() < 2 || n.List.Second().isBlank() { @@ -843,7 +844,7 @@ func (o *Order) stmt(n *Node) { for _, n2 := range n.List.Slice() { if n2.Op != OCASE { - Fatalf("order select case %v", n2.Op) + base.Fatalf("order select case %v", n2.Op) } r := n2.Left setlineno(n2) @@ -851,7 +852,7 @@ func (o *Order) stmt(n *Node) { // Append any new body prologue to ninit. // The next loop will insert ninit into nbody. if n2.Ninit.Len() != 0 { - Fatalf("order select ninit") + base.Fatalf("order select ninit") } if r == nil { continue @@ -859,7 +860,7 @@ func (o *Order) stmt(n *Node) { switch r.Op { default: Dump("select case", r) - Fatalf("unknown op in select %v", r.Op) + base.Fatalf("unknown op in select %v", r.Op) // If this is case x := <-ch or case x, y := <-ch, the case has // the ODCL nodes to declare x and y. We want to delay that @@ -881,7 +882,7 @@ func (o *Order) stmt(n *Node) { if r.Ninit.Len() != 0 { dumplist("ninit", r.Ninit) - Fatalf("ninit on select recv") + base.Fatalf("ninit on select recv") } // case x = <-c @@ -943,7 +944,7 @@ func (o *Order) stmt(n *Node) { case OSEND: if r.Ninit.Len() != 0 { dumplist("ninit", r.Ninit) - Fatalf("ninit on select send") + base.Fatalf("ninit on select send") } // case c <- x @@ -998,7 +999,7 @@ func (o *Order) stmt(n *Node) { // For now just clean all the temporaries at the end. // In practice that's fine. case OSWITCH: - if Debug.Libfuzzer != 0 && !hasDefaultCase(n) { + if base.Debug.Libfuzzer != 0 && !hasDefaultCase(n) { // Add empty "default:" case for instrumentation. n.List.Append(nod(OCASE, nil, nil)) } @@ -1007,7 +1008,7 @@ func (o *Order) stmt(n *Node) { n.Left = o.expr(n.Left, nil) for _, ncas := range n.List.Slice() { if ncas.Op != OCASE { - Fatalf("order switch case %v", ncas.Op) + base.Fatalf("order switch case %v", ncas.Op) } o.exprListInPlace(ncas.List) orderBlock(&ncas.Nbody, o.free) @@ -1017,13 +1018,13 @@ func (o *Order) stmt(n *Node) { o.cleanTemp(t) } - lineno = lno + base.Pos = lno } func hasDefaultCase(n *Node) bool { for _, ncas := range n.List.Slice() { if ncas.Op != OCASE { - Fatalf("expected case, found %v", ncas.Op) + base.Fatalf("expected case, found %v", ncas.Op) } if ncas.List.Len() == 0 { return true @@ -1330,7 +1331,7 @@ func (o *Order) expr(n, lhs *Node) *Node { var dynamics []*Node for _, r := range entries { if r.Op != OKEY { - Fatalf("OMAPLIT entry not OKEY: %v\n", r) + base.Fatalf("OMAPLIT entry not OKEY: %v\n", r) } if !isStaticCompositeLiteral(r.Left) || !isStaticCompositeLiteral(r.Right) { @@ -1369,7 +1370,7 @@ func (o *Order) expr(n, lhs *Node) *Node { } } - lineno = lno + base.Pos = lno return n } diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index 19a24a3235eb5..f10599dc2806b 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -5,6 +5,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/compile/internal/ssa" "cmd/compile/internal/types" "cmd/internal/dwarf" @@ -29,7 +30,7 @@ func emitptrargsmap(fn *Node) { if fn.funcname() == "_" || fn.Func.Nname.Sym.Linkname != "" { return } - lsym := Ctxt.Lookup(fn.Func.lsym.Name + ".args_stackmap") + lsym := base.Ctxt.Lookup(fn.Func.lsym.Name + ".args_stackmap") nptr := int(fn.Type.ArgWidth() / int64(Widthptr)) bv := bvalloc(int32(nptr) * 2) @@ -164,7 +165,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { dowidth(n.Type) w := n.Type.Width if w >= thearch.MAXWIDTH || w < 0 { - Fatalf("bad width") + base.Fatalf("bad width") } if w == 0 && lastHasPtr { // Pad between a pointer-containing object and a zero-sized object. @@ -193,12 +194,12 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { func funccompile(fn *Node) { if Curfn != nil { - Fatalf("funccompile %v inside %v", fn.Func.Nname.Sym, Curfn.Func.Nname.Sym) + base.Fatalf("funccompile %v inside %v", fn.Func.Nname.Sym, Curfn.Func.Nname.Sym) } if fn.Type == nil { - if Errors() == 0 { - Fatalf("funccompile missing type") + if base.Errors() == 0 { + base.Fatalf("funccompile missing type") } return } @@ -223,9 +224,9 @@ func funccompile(fn *Node) { } func compile(fn *Node) { - errorsBefore := Errors() + errorsBefore := base.Errors() order(fn) - if Errors() > errorsBefore { + if base.Errors() > errorsBefore { return } @@ -235,7 +236,7 @@ func compile(fn *Node) { fn.Func.initLSym(true) walk(fn) - if Errors() > errorsBefore { + if base.Errors() > errorsBefore { return } if instrumenting { @@ -265,7 +266,7 @@ func compile(fn *Node) { // Also make sure we allocate a linker symbol // for the stack object data, for the same reason. if fn.Func.lsym.Func().StackObjects == nil { - fn.Func.lsym.Func().StackObjects = Ctxt.Lookup(fn.Func.lsym.Name + ".stkobj") + fn.Func.lsym.Func().StackObjects = base.Ctxt.Lookup(fn.Func.lsym.Name + ".stkobj") } } } @@ -291,7 +292,7 @@ func compilenow(fn *Node) bool { if fn.IsMethod() && isInlinableButNotInlined(fn) { return false } - return Flag.LowerC == 1 && Debug.CompileLater == 0 + return base.Flag.LowerC == 1 && base.Debug.CompileLater == 0 } // isInlinableButNotInlined returns true if 'fn' was marked as an @@ -373,9 +374,9 @@ func compileFunctions() { }) } var wg sync.WaitGroup - Ctxt.InParallel = true - c := make(chan *Node, Flag.LowerC) - for i := 0; i < Flag.LowerC; i++ { + base.Ctxt.InParallel = true + c := make(chan *Node, base.Flag.LowerC) + for i := 0; i < base.Flag.LowerC; i++ { wg.Add(1) go func(worker int) { for fn := range c { @@ -390,7 +391,7 @@ func compileFunctions() { close(c) compilequeue = nil wg.Wait() - Ctxt.InParallel = false + base.Ctxt.InParallel = false sizeCalculationDisabled = false } } @@ -399,7 +400,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S fn := curfn.(*Node) if fn.Func.Nname != nil { if expect := fn.Func.Nname.Sym.Linksym(); fnsym != expect { - Fatalf("unexpected fnsym: %v != %v", fnsym, expect) + base.Fatalf("unexpected fnsym: %v != %v", fnsym, expect) } } @@ -442,7 +443,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S if !n.Name.Used() { // Text == nil -> generating abstract function if fnsym.Func().Text != nil { - Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)") + base.Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)") } continue } @@ -481,7 +482,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S scopes := assembleScopes(fnsym, fn, dwarfVars, varScopes) var inlcalls dwarf.InlCalls - if Flag.GenDwarfInl > 0 { + if base.Flag.GenDwarfInl > 0 { inlcalls = assembleInlines(fnsym, dwarfVars) } return scopes, inlcalls @@ -533,7 +534,7 @@ func createSimpleVar(fnsym *obj.LSym, n *Node) *dwarf.Var { switch n.Class() { case PAUTO: abbrev = dwarf.DW_ABRV_AUTO - if Ctxt.FixedFrameSize() == 0 { + if base.Ctxt.FixedFrameSize() == 0 { offs -= int64(Widthptr) } if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" { @@ -543,15 +544,15 @@ func createSimpleVar(fnsym *obj.LSym, n *Node) *dwarf.Var { case PPARAM, PPARAMOUT: abbrev = dwarf.DW_ABRV_PARAM - offs += Ctxt.FixedFrameSize() + offs += base.Ctxt.FixedFrameSize() default: - Fatalf("createSimpleVar unexpected class %v for node %v", n.Class(), n) + base.Fatalf("createSimpleVar unexpected class %v for node %v", n.Class(), n) } typename := dwarf.InfoPrefix + typesymname(n.Type) delete(fnsym.Func().Autot, ngotype(n).Linksym()) inlIndex := 0 - if Flag.GenDwarfInl > 1 { + if base.Flag.GenDwarfInl > 1 { if n.Name.InlFormal() || n.Name.InlLocal() { inlIndex = posInlIndex(n.Pos) + 1 if n.Name.InlFormal() { @@ -559,14 +560,14 @@ func createSimpleVar(fnsym *obj.LSym, n *Node) *dwarf.Var { } } } - declpos := Ctxt.InnermostPos(declPos(n)) + declpos := base.Ctxt.InnermostPos(declPos(n)) return &dwarf.Var{ Name: n.Sym.Name, IsReturnValue: n.Class() == PPARAMOUT, IsInlFormal: n.Name.InlFormal(), Abbrev: abbrev, StackOffset: int32(offs), - Type: Ctxt.Lookup(typename), + Type: base.Ctxt.Lookup(typename), DeclFile: declpos.RelFilename(), DeclLine: declpos.RelLine(), DeclCol: declpos.Col(), @@ -608,7 +609,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *Func, apDecls []*Node) var vars []*dwarf.Var var decls []*Node var selected map[*Node]bool - if Ctxt.Flag_locationlists && Ctxt.Flag_optimize && fn.DebugInfo != nil && complexOK { + if base.Ctxt.Flag_locationlists && base.Ctxt.Flag_optimize && fn.DebugInfo != nil && complexOK { decls, vars, selected = createComplexVars(fnsym, fn) } else { decls, vars, selected = createSimpleVars(fnsym, apDecls) @@ -672,7 +673,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *Func, apDecls []*Node) } } inlIndex := 0 - if Flag.GenDwarfInl > 1 { + if base.Flag.GenDwarfInl > 1 { if n.Name.InlFormal() || n.Name.InlLocal() { inlIndex = posInlIndex(n.Pos) + 1 if n.Name.InlFormal() { @@ -680,13 +681,13 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *Func, apDecls []*Node) } } } - declpos := Ctxt.InnermostPos(n.Pos) + declpos := base.Ctxt.InnermostPos(n.Pos) vars = append(vars, &dwarf.Var{ Name: n.Sym.Name, IsReturnValue: isReturnValue, Abbrev: abbrev, StackOffset: int32(n.Xoffset), - Type: Ctxt.Lookup(typename), + Type: base.Ctxt.Lookup(typename), DeclFile: declpos.RelFilename(), DeclLine: declpos.RelLine(), DeclCol: declpos.Col(), @@ -707,7 +708,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *Func, apDecls []*Node) // names of the variables may have been "versioned" to avoid conflicts // with local vars; disregard this versioning when sorting. func preInliningDcls(fnsym *obj.LSym) []*Node { - fn := Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*Node) + fn := base.Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*Node) var rdcl []*Node for _, n := range fn.Func.Inl.Dcl { c := n.Sym.Name[0] @@ -729,7 +730,7 @@ func stackOffset(slot ssa.LocalSlot) int32 { var off int64 switch n.Class() { case PAUTO: - if Ctxt.FixedFrameSize() == 0 { + if base.Ctxt.FixedFrameSize() == 0 { off -= int64(Widthptr) } if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" { @@ -737,7 +738,7 @@ func stackOffset(slot ssa.LocalSlot) int32 { off -= int64(Widthptr) } case PPARAM, PPARAMOUT: - off += Ctxt.FixedFrameSize() + off += base.Ctxt.FixedFrameSize() } return int32(off + n.Xoffset + slot.Off) } @@ -761,7 +762,7 @@ func createComplexVar(fnsym *obj.LSym, fn *Func, varID ssa.VarID) *dwarf.Var { delete(fnsym.Func().Autot, gotype) typename := dwarf.InfoPrefix + gotype.Name[len("type."):] inlIndex := 0 - if Flag.GenDwarfInl > 1 { + if base.Flag.GenDwarfInl > 1 { if n.Name.InlFormal() || n.Name.InlLocal() { inlIndex = posInlIndex(n.Pos) + 1 if n.Name.InlFormal() { @@ -769,13 +770,13 @@ func createComplexVar(fnsym *obj.LSym, fn *Func, varID ssa.VarID) *dwarf.Var { } } } - declpos := Ctxt.InnermostPos(n.Pos) + declpos := base.Ctxt.InnermostPos(n.Pos) dvar := &dwarf.Var{ Name: n.Sym.Name, IsReturnValue: n.Class() == PPARAMOUT, IsInlFormal: n.Name.InlFormal(), Abbrev: abbrev, - Type: Ctxt.Lookup(typename), + Type: base.Ctxt.Lookup(typename), // The stack offset is used as a sorting key, so for decomposed // variables just give it the first one. It's not used otherwise. // This won't work well if the first slot hasn't been assigned a stack @@ -790,7 +791,7 @@ func createComplexVar(fnsym *obj.LSym, fn *Func, varID ssa.VarID) *dwarf.Var { list := debug.LocationLists[varID] if len(list) != 0 { dvar.PutLocationList = func(listSym, startPC dwarf.Sym) { - debug.PutLocationList(list, Ctxt, listSym.(*obj.LSym), startPC.(*obj.LSym)) + debug.PutLocationList(list, base.Ctxt, listSym.(*obj.LSym), startPC.(*obj.LSym)) } } return dvar diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go index 5f4af06b80321..da2298480ad60 100644 --- a/src/cmd/compile/internal/gc/plive.go +++ b/src/cmd/compile/internal/gc/plive.go @@ -15,6 +15,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/compile/internal/ssa" "cmd/compile/internal/types" "cmd/internal/obj" @@ -226,7 +227,7 @@ func getvariables(fn *Node) ([]*Node, map[*Node]int32) { func (lv *Liveness) initcache() { if lv.cache.initialized { - Fatalf("liveness cache initialized twice") + base.Fatalf("liveness cache initialized twice") return } lv.cache.initialized = true @@ -341,7 +342,7 @@ func affectedNode(v *ssa.Value) (*Node, ssa.SymEffect) { case *Node: return a, e default: - Fatalf("weird aux: %s", v.LongString()) + base.Fatalf("weird aux: %s", v.LongString()) return nil, e } } @@ -406,7 +407,7 @@ func (lv *Liveness) blockEffects(b *ssa.Block) *BlockEffects { // on future calls with the same type t. func onebitwalktype1(t *types.Type, off int64, bv bvec) { if t.Align > 0 && off&int64(t.Align-1) != 0 { - Fatalf("onebitwalktype1: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off) + base.Fatalf("onebitwalktype1: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off) } if !t.HasPointers() { // Note: this case ensures that pointers to go:notinheap types @@ -417,14 +418,14 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) { switch t.Etype { case TPTR, TUNSAFEPTR, TFUNC, TCHAN, TMAP: if off&int64(Widthptr-1) != 0 { - Fatalf("onebitwalktype1: invalid alignment, %v", t) + base.Fatalf("onebitwalktype1: invalid alignment, %v", t) } bv.Set(int32(off / int64(Widthptr))) // pointer case TSTRING: // struct { byte *str; intgo len; } if off&int64(Widthptr-1) != 0 { - Fatalf("onebitwalktype1: invalid alignment, %v", t) + base.Fatalf("onebitwalktype1: invalid alignment, %v", t) } bv.Set(int32(off / int64(Widthptr))) //pointer in first slot @@ -433,7 +434,7 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) { // or, when isnilinter(t)==true: // struct { Type *type; void *data; } if off&int64(Widthptr-1) != 0 { - Fatalf("onebitwalktype1: invalid alignment, %v", t) + base.Fatalf("onebitwalktype1: invalid alignment, %v", t) } // The first word of an interface is a pointer, but we don't // treat it as such. @@ -452,7 +453,7 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) { case TSLICE: // struct { byte *array; uintgo len; uintgo cap; } if off&int64(Widthptr-1) != 0 { - Fatalf("onebitwalktype1: invalid TARRAY alignment, %v", t) + base.Fatalf("onebitwalktype1: invalid TARRAY alignment, %v", t) } bv.Set(int32(off / int64(Widthptr))) // pointer in first slot (BitsPointer) @@ -473,7 +474,7 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) { } default: - Fatalf("onebitwalktype1: unexpected type, %v", t) + base.Fatalf("onebitwalktype1: unexpected type, %v", t) } } @@ -509,7 +510,7 @@ func allUnsafe(f *ssa.Func) bool { // go:nosplit functions are similar. Since safe points used to // be coupled with stack checks, go:nosplit often actually // means "no safe points in this function". - return Flag.CompilingRuntime || f.NoSplit + return base.Flag.CompilingRuntime || f.NoSplit } // markUnsafePoints finds unsafe points and computes lv.unsafePoints. @@ -791,7 +792,7 @@ func (lv *Liveness) epilogue() { if n.Class() == PPARAMOUT { if n.Name.IsOutputParamHeapAddr() { // Just to be paranoid. Heap addresses are PAUTOs. - Fatalf("variable %v both output param and heap output param", n) + base.Fatalf("variable %v both output param and heap output param", n) } if n.Name.Param.Heapaddr != nil { // If this variable moved to the heap, then @@ -816,7 +817,7 @@ func (lv *Liveness) epilogue() { livedefer.Set(int32(i)) // It was already marked as Needzero when created. if !n.Name.Needzero() { - Fatalf("all pointer-containing defer arg slots should have Needzero set") + base.Fatalf("all pointer-containing defer arg slots should have Needzero set") } } } @@ -878,7 +879,7 @@ func (lv *Liveness) epilogue() { if b == lv.f.Entry { if index != 0 { - Fatalf("bad index for entry point: %v", index) + base.Fatalf("bad index for entry point: %v", index) } // Check to make sure only input variables are live. @@ -889,7 +890,7 @@ func (lv *Liveness) epilogue() { if n.Class() == PPARAM { continue // ok } - Fatalf("bad live variable at entry of %v: %L", lv.fn.Func.Nname, n) + base.Fatalf("bad live variable at entry of %v: %L", lv.fn.Func.Nname, n) } // Record live variables. @@ -966,7 +967,7 @@ func (lv *Liveness) compact(b *ssa.Block) { } func (lv *Liveness) showlive(v *ssa.Value, live bvec) { - if Flag.Live == 0 || lv.fn.funcname() == "init" || strings.HasPrefix(lv.fn.funcname(), ".") { + if base.Flag.Live == 0 || lv.fn.funcname() == "init" || strings.HasPrefix(lv.fn.funcname(), ".") { return } if !(v == nil || v.Op.IsCall()) { @@ -1002,7 +1003,7 @@ func (lv *Liveness) showlive(v *ssa.Value, live bvec) { } } - Warnl(pos, s) + base.WarnfAt(pos, s) } func (lv *Liveness) printbvec(printed bool, name string, live bvec) bool { @@ -1088,7 +1089,7 @@ func (lv *Liveness) printDebug() { if b == lv.f.Entry { live := lv.stackMaps[0] - fmt.Printf("(%s) function entry\n", linestr(lv.fn.Func.Nname.Pos)) + fmt.Printf("(%s) function entry\n", base.FmtPos(lv.fn.Func.Nname.Pos)) fmt.Printf("\tlive=") printed = false for j, n := range lv.vars { @@ -1105,7 +1106,7 @@ func (lv *Liveness) printDebug() { } for _, v := range b.Values { - fmt.Printf("(%s) %v\n", linestr(v.Pos), v.LongString()) + fmt.Printf("(%s) %v\n", base.FmtPos(v.Pos), v.LongString()) pcdata := lv.livenessMap.Get(v) @@ -1214,7 +1215,7 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) { // These symbols will be added to Ctxt.Data by addGCLocals // after parallel compilation is done. makeSym := func(tmpSym *obj.LSym) *obj.LSym { - return Ctxt.LookupInit(fmt.Sprintf("gclocals·%x", md5.Sum(tmpSym.P)), func(lsym *obj.LSym) { + return base.Ctxt.LookupInit(fmt.Sprintf("gclocals·%x", md5.Sum(tmpSym.P)), func(lsym *obj.LSym) { lsym.P = tmpSym.P lsym.Set(obj.AttrContentAddressable, true) }) @@ -1235,7 +1236,7 @@ func liveness(e *ssafn, f *ssa.Func, pp *Progs) LivenessMap { lv.prologue() lv.solve() lv.epilogue() - if Flag.Live > 0 { + if base.Flag.Live > 0 { lv.showlive(nil, lv.stackMaps[0]) for _, b := range f.Blocks { for _, val := range b.Values { @@ -1245,7 +1246,7 @@ func liveness(e *ssafn, f *ssa.Func, pp *Progs) LivenessMap { } } } - if Flag.Live >= 2 { + if base.Flag.Live >= 2 { lv.printDebug() } diff --git a/src/cmd/compile/internal/gc/racewalk.go b/src/cmd/compile/internal/gc/racewalk.go index 733d19c024697..20b4bc583b015 100644 --- a/src/cmd/compile/internal/gc/racewalk.go +++ b/src/cmd/compile/internal/gc/racewalk.go @@ -5,6 +5,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/compile/internal/types" "cmd/internal/src" "cmd/internal/sys" @@ -47,9 +48,9 @@ var omit_pkgs = []string{ var norace_inst_pkgs = []string{"sync", "sync/atomic"} func ispkgin(pkgs []string) bool { - if Ctxt.Pkgpath != "" { + if base.Ctxt.Pkgpath != "" { for _, p := range pkgs { - if Ctxt.Pkgpath == p { + if base.Ctxt.Pkgpath == p { return true } } @@ -63,13 +64,13 @@ func instrument(fn *Node) { return } - if !Flag.Race || !ispkgin(norace_inst_pkgs) { + if !base.Flag.Race || !ispkgin(norace_inst_pkgs) { fn.Func.SetInstrumentBody(true) } - if Flag.Race { - lno := lineno - lineno = src.NoXPos + if base.Flag.Race { + lno := base.Pos + base.Pos = src.NoXPos if thearch.LinkArch.Arch.Family != sys.AMD64 { fn.Func.Enter.Prepend(mkcall("racefuncenterfp", nil, nil)) @@ -88,6 +89,6 @@ func instrument(fn *Node) { fn.Func.Enter.Prepend(mkcall("racefuncenter", nil, nil, nodpc)) fn.Func.Exit.Append(mkcall("racefuncexit", nil, nil)) } - lineno = lno + base.Pos = lno } } diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go index 44776e988e95e..568c5138ec30b 100644 --- a/src/cmd/compile/internal/gc/range.go +++ b/src/cmd/compile/internal/gc/range.go @@ -5,6 +5,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/compile/internal/types" "cmd/internal/sys" "unicode/utf8" @@ -61,7 +62,7 @@ func typecheckrangeExpr(n *Node) { toomany := false switch t.Etype { default: - yyerrorl(n.Pos, "cannot range over %L", n.Right) + base.ErrorfAt(n.Pos, "cannot range over %L", n.Right) return case TARRAY, TSLICE: @@ -74,7 +75,7 @@ func typecheckrangeExpr(n *Node) { case TCHAN: if !t.ChanDir().CanRecv() { - yyerrorl(n.Pos, "invalid operation: range %v (receive from send-only type %v)", n.Right, n.Right.Type) + base.ErrorfAt(n.Pos, "invalid operation: range %v (receive from send-only type %v)", n.Right, n.Right.Type) return } @@ -90,7 +91,7 @@ func typecheckrangeExpr(n *Node) { } if n.List.Len() > 2 || toomany { - yyerrorl(n.Pos, "too many variables in range") + base.ErrorfAt(n.Pos, "too many variables in range") } var v1, v2 *Node @@ -117,7 +118,7 @@ func typecheckrangeExpr(n *Node) { v1.Type = t1 } else if v1.Type != nil { if op, why := assignop(t1, v1.Type); op == OXXX { - yyerrorl(n.Pos, "cannot assign type %v to %L in range%s", t1, v1, why) + base.ErrorfAt(n.Pos, "cannot assign type %v to %L in range%s", t1, v1, why) } } checkassign(n, v1) @@ -128,7 +129,7 @@ func typecheckrangeExpr(n *Node) { v2.Type = t2 } else if v2.Type != nil { if op, why := assignop(t2, v2.Type); op == OXXX { - yyerrorl(n.Pos, "cannot assign type %v to %L in range%s", t2, v2, why) + base.ErrorfAt(n.Pos, "cannot assign type %v to %L in range%s", t2, v2, why) } } checkassign(n, v2) @@ -160,7 +161,7 @@ func walkrange(n *Node) *Node { m := n.Right lno := setlineno(m) n = mapClear(m) - lineno = lno + base.Pos = lno return n } @@ -196,7 +197,7 @@ func walkrange(n *Node) *Node { } if v1 == nil && v2 != nil { - Fatalf("walkrange: v2 != nil while v1 == nil") + base.Fatalf("walkrange: v2 != nil while v1 == nil") } // n.List has no meaning anymore, clear it @@ -211,11 +212,11 @@ func walkrange(n *Node) *Node { var init []*Node switch t.Etype { default: - Fatalf("walkrange") + base.Fatalf("walkrange") case TARRAY, TSLICE: if arrayClear(n, v1, v2, a) { - lineno = lno + base.Pos = lno return n } @@ -454,7 +455,7 @@ func walkrange(n *Node) *Node { n = walkstmt(n) - lineno = lno + base.Pos = lno return n } @@ -466,7 +467,7 @@ func walkrange(n *Node) *Node { // // where == for keys of map m is reflexive. func isMapClear(n *Node) bool { - if Flag.N != 0 || instrumenting { + if base.Flag.N != 0 || instrumenting { return false } @@ -533,7 +534,7 @@ func mapClear(m *Node) *Node { // // Parameters are as in walkrange: "for v1, v2 = range a". func arrayClear(n, v1, v2, a *Node) bool { - if Flag.N != 0 || instrumenting { + if base.Flag.N != 0 || instrumenting { return false } diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 11ccc15a253fa..456903e7d75f3 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -5,6 +5,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/compile/internal/types" "cmd/internal/gcprog" "cmd/internal/obj" @@ -131,52 +132,52 @@ func bmap(t *types.Type) *types.Type { // Check invariants that map code depends on. if !IsComparable(t.Key()) { - Fatalf("unsupported map key type for %v", t) + base.Fatalf("unsupported map key type for %v", t) } if BUCKETSIZE < 8 { - Fatalf("bucket size too small for proper alignment") + base.Fatalf("bucket size too small for proper alignment") } if keytype.Align > BUCKETSIZE { - Fatalf("key align too big for %v", t) + base.Fatalf("key align too big for %v", t) } if elemtype.Align > BUCKETSIZE { - Fatalf("elem align too big for %v", t) + base.Fatalf("elem align too big for %v", t) } if keytype.Width > MAXKEYSIZE { - Fatalf("key size to large for %v", t) + base.Fatalf("key size to large for %v", t) } if elemtype.Width > MAXELEMSIZE { - Fatalf("elem size to large for %v", t) + base.Fatalf("elem size to large for %v", t) } if t.Key().Width > MAXKEYSIZE && !keytype.IsPtr() { - Fatalf("key indirect incorrect for %v", t) + base.Fatalf("key indirect incorrect for %v", t) } if t.Elem().Width > MAXELEMSIZE && !elemtype.IsPtr() { - Fatalf("elem indirect incorrect for %v", t) + base.Fatalf("elem indirect incorrect for %v", t) } if keytype.Width%int64(keytype.Align) != 0 { - Fatalf("key size not a multiple of key align for %v", t) + base.Fatalf("key size not a multiple of key align for %v", t) } if elemtype.Width%int64(elemtype.Align) != 0 { - Fatalf("elem size not a multiple of elem align for %v", t) + base.Fatalf("elem size not a multiple of elem align for %v", t) } if bucket.Align%keytype.Align != 0 { - Fatalf("bucket align not multiple of key align %v", t) + base.Fatalf("bucket align not multiple of key align %v", t) } if bucket.Align%elemtype.Align != 0 { - Fatalf("bucket align not multiple of elem align %v", t) + base.Fatalf("bucket align not multiple of elem align %v", t) } if keys.Offset%int64(keytype.Align) != 0 { - Fatalf("bad alignment of keys in bmap for %v", t) + base.Fatalf("bad alignment of keys in bmap for %v", t) } if elems.Offset%int64(elemtype.Align) != 0 { - Fatalf("bad alignment of elems in bmap for %v", t) + base.Fatalf("bad alignment of elems in bmap for %v", t) } // Double-check that overflow field is final memory in struct, // with no padding at end. if overflow.Offset != bucket.Width-int64(Widthptr) { - Fatalf("bad offset of overflow in bmap for %v", t) + base.Fatalf("bad offset of overflow in bmap for %v", t) } t.MapType().Bucket = bucket @@ -227,7 +228,7 @@ func hmap(t *types.Type) *types.Type { // The size of hmap should be 48 bytes on 64 bit // and 28 bytes on 32 bit platforms. if size := int64(8 + 5*Widthptr); hmap.Width != size { - Fatalf("hmap size not correct: got %d, want %d", hmap.Width, size) + base.Fatalf("hmap size not correct: got %d, want %d", hmap.Width, size) } t.MapType().Hmap = hmap @@ -288,7 +289,7 @@ func hiter(t *types.Type) *types.Type { hiter.SetFields(fields) dowidth(hiter) if hiter.Width != int64(12*Widthptr) { - Fatalf("hash_iter size not correct %d %d", hiter.Width, 12*Widthptr) + base.Fatalf("hash_iter size not correct %d %d", hiter.Width, 12*Widthptr) } t.MapType().Hiter = hiter hiter.StructType().Map = t @@ -391,10 +392,10 @@ func methods(t *types.Type) []*Sig { var ms []*Sig for _, f := range mt.AllMethods().Slice() { if !f.IsMethod() { - Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f) + base.Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f) } if f.Type.Recv() == nil { - Fatalf("receiver with no type on %v method %v %v\n", mt, f.Sym, f) + base.Fatalf("receiver with no type on %v method %v %v\n", mt, f.Sym, f) } if f.Nointerface() { continue @@ -450,12 +451,12 @@ func imethods(t *types.Type) []*Sig { continue } if f.Sym.IsBlank() { - Fatalf("unexpected blank symbol in interface method set") + base.Fatalf("unexpected blank symbol in interface method set") } if n := len(methods); n > 0 { last := methods[n-1] if !last.name.Less(f.Sym) { - Fatalf("sigcmp vs sortinter %v %v", last.name, f.Sym) + base.Fatalf("sigcmp vs sortinter %v %v", last.name, f.Sym) } } @@ -488,17 +489,17 @@ func dimportpath(p *types.Pkg) { // If we are compiling the runtime package, there are two runtime packages around // -- localpkg and Runtimepkg. We don't want to produce import path symbols for // both of them, so just produce one for localpkg. - if Ctxt.Pkgpath == "runtime" && p == Runtimepkg { + if base.Ctxt.Pkgpath == "runtime" && p == Runtimepkg { return } str := p.Path if p == localpkg { // Note: myimportpath != "", or else dgopkgpath won't call dimportpath. - str = Ctxt.Pkgpath + str = base.Ctxt.Pkgpath } - s := Ctxt.Lookup("type..importpath." + p.Prefix + ".") + s := base.Ctxt.Lookup("type..importpath." + p.Prefix + ".") ot := dnameData(s, 0, str, "", nil, false) ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) s.Set(obj.AttrContentAddressable, true) @@ -510,13 +511,13 @@ func dgopkgpath(s *obj.LSym, ot int, pkg *types.Pkg) int { return duintptr(s, ot, 0) } - if pkg == localpkg && Ctxt.Pkgpath == "" { + if pkg == localpkg && base.Ctxt.Pkgpath == "" { // If we don't know the full import path of the package being compiled // (i.e. -p was not passed on the compiler command line), emit a reference to // type..importpath.""., which the linker will rewrite using the correct import path. // Every package that imports this one directly defines the symbol. // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. - ns := Ctxt.Lookup(`type..importpath."".`) + ns := base.Ctxt.Lookup(`type..importpath."".`) return dsymptr(s, ot, ns, 0) } @@ -529,13 +530,13 @@ func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int { if pkg == nil { return duint32(s, ot, 0) } - if pkg == localpkg && Ctxt.Pkgpath == "" { + if pkg == localpkg && base.Ctxt.Pkgpath == "" { // If we don't know the full import path of the package being compiled // (i.e. -p was not passed on the compiler command line), emit a reference to // type..importpath.""., which the linker will rewrite using the correct import path. // Every package that imports this one directly defines the symbol. // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. - ns := Ctxt.Lookup(`type..importpath."".`) + ns := base.Ctxt.Lookup(`type..importpath."".`) return dsymptrOff(s, ot, ns) } @@ -546,7 +547,7 @@ func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int { // dnameField dumps a reflect.name for a struct field. func dnameField(lsym *obj.LSym, ot int, spkg *types.Pkg, ft *types.Field) int { if !types.IsExported(ft.Sym.Name) && ft.Sym.Pkg != spkg { - Fatalf("package mismatch for %v", ft.Sym) + base.Fatalf("package mismatch for %v", ft.Sym) } nsym := dname(ft.Sym.Name, ft.Note, nil, types.IsExported(ft.Sym.Name)) return dsymptr(lsym, ot, nsym, 0) @@ -555,10 +556,10 @@ func dnameField(lsym *obj.LSym, ot int, spkg *types.Pkg, ft *types.Field) int { // dnameData writes the contents of a reflect.name into s at offset ot. func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported bool) int { if len(name) > 1<<16-1 { - Fatalf("name too long: %s", name) + base.Fatalf("name too long: %s", name) } if len(tag) > 1<<16-1 { - Fatalf("tag too long: %s", tag) + base.Fatalf("tag too long: %s", tag) } // Encode name and tag. See reflect/type.go for details. @@ -586,7 +587,7 @@ func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported b copy(tb[2:], tag) } - ot = int(s.WriteBytes(Ctxt, int64(ot), b)) + ot = int(s.WriteBytes(base.Ctxt, int64(ot), b)) if pkg != nil { ot = dgopkgpathOff(s, ot, pkg) @@ -623,7 +624,7 @@ func dname(name, tag string, pkg *types.Pkg, exported bool) *obj.LSym { sname = fmt.Sprintf(`%s"".%d`, sname, dnameCount) dnameCount++ } - s := Ctxt.Lookup(sname) + s := base.Ctxt.Lookup(sname) if len(s.P) > 0 { return s } @@ -643,7 +644,7 @@ func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int { } noff := int(Rnd(int64(ot), int64(Widthptr))) if noff != ot { - Fatalf("unexpected alignment in dextratype for %v", t) + base.Fatalf("unexpected alignment in dextratype for %v", t) } for _, a := range m { @@ -655,11 +656,11 @@ func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int { dataAdd += uncommonSize(t) mcount := len(m) if mcount != int(uint16(mcount)) { - Fatalf("too many methods on %v: %d", t, mcount) + base.Fatalf("too many methods on %v: %d", t, mcount) } xcount := sort.Search(mcount, func(i int) bool { return !types.IsExported(m[i].name.Name) }) if dataAdd != int(uint32(dataAdd)) { - Fatalf("methods are too far away on %v: %d", t, dataAdd) + base.Fatalf("methods are too far away on %v: %d", t, dataAdd) } ot = duint16(lsym, ot, uint16(mcount)) @@ -788,7 +789,7 @@ func typeptrdata(t *types.Type) int64 { return lastPtrField.Offset + typeptrdata(lastPtrField.Type) default: - Fatalf("typeptrdata: unexpected type, %v", t) + base.Fatalf("typeptrdata: unexpected type, %v", t) return 0 } } @@ -888,7 +889,7 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int { i = 1 } if i&(i-1) != 0 { - Fatalf("invalid alignment %d for %v", t.Align, t) + base.Fatalf("invalid alignment %d for %v", t.Align, t) } ot = duint8(lsym, ot, t.Align) // align ot = duint8(lsym, ot, t.Align) // fieldAlign @@ -979,7 +980,7 @@ func typesymprefix(prefix string, t *types.Type) *types.Sym { func typenamesym(t *types.Type) *types.Sym { if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() { - Fatalf("typenamesym %v", t) + base.Fatalf("typenamesym %v", t) } s := typesym(t) signatmu.Lock() @@ -1006,7 +1007,7 @@ func typename(t *types.Type) *Node { func itabname(t, itype *types.Type) *Node { if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() { - Fatalf("itabname(%v, %v)", t, itype) + base.Fatalf("itabname(%v, %v)", t, itype) } s := itabpkg.Lookup(t.ShortString() + "," + itype.ShortString()) if s.Def == nil { @@ -1065,7 +1066,7 @@ func isreflexive(t *types.Type) bool { return true default: - Fatalf("bad type for map key: %v", t) + base.Fatalf("bad type for map key: %v", t) return false } } @@ -1095,7 +1096,7 @@ func needkeyupdate(t *types.Type) bool { return false default: - Fatalf("bad type for map key: %v", t) + base.Fatalf("bad type for map key: %v", t) return true } } @@ -1135,7 +1136,7 @@ func formalType(t *types.Type) *types.Type { func dtypesym(t *types.Type) *obj.LSym { t = formalType(t) if t.IsUntyped() { - Fatalf("dtypesym %v", t) + base.Fatalf("dtypesym %v", t) } s := typesym(t) @@ -1158,7 +1159,7 @@ func dtypesym(t *types.Type) *obj.LSym { dupok = obj.DUPOK } - if Ctxt.Pkgpath != "runtime" || (tbase != types.Types[tbase.Etype] && tbase != types.Bytetype && tbase != types.Runetype && tbase != types.Errortype) { // int, float, etc + if base.Ctxt.Pkgpath != "runtime" || (tbase != types.Types[tbase.Etype] && tbase != types.Bytetype && tbase != types.Runetype && tbase != types.Errortype) { // int, float, etc // named types from other files are defined only by those files if tbase.Sym != nil && tbase.Sym.Pkg != localpkg { if i, ok := typeSymIdx[tbase]; ok { @@ -1377,7 +1378,7 @@ func dtypesym(t *types.Type) *obj.LSym { ot = dsymptr(lsym, ot, dtypesym(f.Type), 0) offsetAnon := uint64(f.Offset) << 1 if offsetAnon>>1 != uint64(f.Offset) { - Fatalf("%v: bad field offset for %s", t, f.Sym.Name) + base.Fatalf("%v: bad field offset for %s", t, f.Sym.Name) } if f.Embedded != 0 { offsetAnon |= 1 @@ -1394,7 +1395,7 @@ func dtypesym(t *types.Type) *obj.LSym { // // When buildmode=shared, all types are in typelinks so the // runtime can deduplicate type pointers. - keep := Ctxt.Flag_dynlink + keep := base.Ctxt.Flag_dynlink if !keep && t.Sym == nil { // For an unnamed type, we only need the link if the type can // be created at run time by reflect.PtrTo and similar @@ -1471,7 +1472,7 @@ func genfun(t, it *types.Type) []*obj.LSym { } if len(sigs) != 0 { - Fatalf("incomplete itab") + base.Fatalf("incomplete itab") } return out @@ -1572,7 +1573,7 @@ func dumptabs() { // process ptabs if localpkg.Name == "main" && len(ptabs) > 0 { ot := 0 - s := Ctxt.Lookup("go.plugin.tabs") + s := base.Ctxt.Lookup("go.plugin.tabs") for _, p := range ptabs { // Dump ptab symbol into go.pluginsym package. // @@ -1591,7 +1592,7 @@ func dumptabs() { ggloblsym(s, int32(ot), int16(obj.RODATA)) ot = 0 - s = Ctxt.Lookup("go.plugin.exports") + s = base.Ctxt.Lookup("go.plugin.exports") for _, p := range ptabs { ot = dsymptr(s, ot, p.s.Linksym(), 0) } @@ -1613,7 +1614,7 @@ func dumpbasictypes() { // so this is as good as any. // another possible choice would be package main, // but using runtime means fewer copies in object files. - if Ctxt.Pkgpath == "runtime" { + if base.Ctxt.Pkgpath == "runtime" { for i := types.EType(1); i <= TBOOL; i++ { dtypesym(types.NewPtr(types.Types[i])) } @@ -1629,10 +1630,10 @@ func dumpbasictypes() { // add paths for runtime and main, which 6l imports implicitly. dimportpath(Runtimepkg) - if Flag.Race { + if base.Flag.Race { dimportpath(racepkg) } - if Flag.MSan { + if base.Flag.MSan { dimportpath(msanpkg) } dimportpath(types.NewPkg("main", "")) @@ -1767,7 +1768,7 @@ func fillptrmask(t *types.Type, ptrmask []byte) { func dgcprog(t *types.Type) (*obj.LSym, int64) { dowidth(t) if t.Width == BADWIDTH { - Fatalf("dgcprog: %v badwidth", t) + base.Fatalf("dgcprog: %v badwidth", t) } lsym := typesymprefix(".gcprog", t).Linksym() var p GCProg @@ -1776,7 +1777,7 @@ func dgcprog(t *types.Type) (*obj.LSym, int64) { offset := p.w.BitIndex() * int64(Widthptr) p.end() if ptrdata := typeptrdata(t); offset < ptrdata || offset > t.Width { - Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width) + base.Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width) } return lsym, offset } @@ -1791,7 +1792,7 @@ func (p *GCProg) init(lsym *obj.LSym) { p.lsym = lsym p.symoff = 4 // first 4 bytes hold program length p.w.Init(p.writeByte) - if Debug.GCProg > 0 { + if base.Debug.GCProg > 0 { fmt.Fprintf(os.Stderr, "compile: start GCProg for %v\n", lsym) p.w.Debug(os.Stderr) } @@ -1805,7 +1806,7 @@ func (p *GCProg) end() { p.w.End() duint32(p.lsym, 0, uint32(p.symoff-4)) ggloblsym(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL) - if Debug.GCProg > 0 { + if base.Debug.GCProg > 0 { fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.lsym) } } @@ -1821,7 +1822,7 @@ func (p *GCProg) emit(t *types.Type, offset int64) { } switch t.Etype { default: - Fatalf("GCProg.emit: unexpected type %v", t) + base.Fatalf("GCProg.emit: unexpected type %v", t) case TSTRING: p.w.Ptr(offset / int64(Widthptr)) @@ -1836,7 +1837,7 @@ func (p *GCProg) emit(t *types.Type, offset int64) { case TARRAY: if t.NumElem() == 0 { // should have been handled by haspointers check above - Fatalf("GCProg.emit: empty array") + base.Fatalf("GCProg.emit: empty array") } // Flatten array-of-array-of-array to just a big array by multiplying counts. @@ -1869,7 +1870,7 @@ func (p *GCProg) emit(t *types.Type, offset int64) { // size bytes of zeros. func zeroaddr(size int64) *Node { if size >= 1<<31 { - Fatalf("map elem too big %d", size) + base.Fatalf("map elem too big %d", size) } if zerosize < size { zerosize = size diff --git a/src/cmd/compile/internal/gc/scope.go b/src/cmd/compile/internal/gc/scope.go index e66b859e10069..ace1d6bd9c5ef 100644 --- a/src/cmd/compile/internal/gc/scope.go +++ b/src/cmd/compile/internal/gc/scope.go @@ -5,6 +5,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/internal/dwarf" "cmd/internal/obj" "cmd/internal/src" @@ -13,7 +14,7 @@ import ( // See golang.org/issue/20390. func xposBefore(p, q src.XPos) bool { - return Ctxt.PosTable.Pos(p).Before(Ctxt.PosTable.Pos(q)) + return base.Ctxt.PosTable.Pos(p).Before(base.Ctxt.PosTable.Pos(q)) } func findScope(marks []Mark, pos src.XPos) ScopeID { diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go index 8e6b15af536af..8d4c8d2be10e7 100644 --- a/src/cmd/compile/internal/gc/select.go +++ b/src/cmd/compile/internal/gc/select.go @@ -4,7 +4,10 @@ package gc -import "cmd/compile/internal/types" +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/types" +) // select func typecheckselect(sel *Node) { @@ -14,18 +17,18 @@ func typecheckselect(sel *Node) { for _, ncase := range sel.List.Slice() { if ncase.Op != OCASE { setlineno(ncase) - Fatalf("typecheckselect %v", ncase.Op) + base.Fatalf("typecheckselect %v", ncase.Op) } if ncase.List.Len() == 0 { // default if def != nil { - yyerrorl(ncase.Pos, "multiple defaults in select (first at %v)", def.Line()) + base.ErrorfAt(ncase.Pos, "multiple defaults in select (first at %v)", def.Line()) } else { def = ncase } } else if ncase.List.Len() > 1 { - yyerrorl(ncase.Pos, "select cases cannot be lists") + base.ErrorfAt(ncase.Pos, "select cases cannot be lists") } else { ncase.List.SetFirst(typecheck(ncase.List.First(), ctxStmt)) n := ncase.List.First() @@ -41,7 +44,7 @@ func typecheckselect(sel *Node) { // on the same line). This matches the approach before 1.10. pos = ncase.Pos } - yyerrorl(pos, "select case must be receive, send or assign recv") + base.ErrorfAt(pos, "select case must be receive, send or assign recv") // convert x = <-c into OSELRECV(x, <-c). // remove implicit conversions; the eventual assignment @@ -52,7 +55,7 @@ func typecheckselect(sel *Node) { } if n.Right.Op != ORECV { - yyerrorl(n.Pos, "select assignment must have receive on right hand side") + base.ErrorfAt(n.Pos, "select assignment must have receive on right hand side") break } @@ -61,7 +64,7 @@ func typecheckselect(sel *Node) { // convert x, ok = <-c into OSELRECV2(x, <-c) with ntest=ok case OAS2RECV: if n.Right.Op != ORECV { - yyerrorl(n.Pos, "select assignment must have receive on right hand side") + base.ErrorfAt(n.Pos, "select assignment must have receive on right hand side") break } @@ -84,13 +87,13 @@ func typecheckselect(sel *Node) { typecheckslice(ncase.Nbody.Slice(), ctxStmt) } - lineno = lno + base.Pos = lno } func walkselect(sel *Node) { lno := setlineno(sel) if sel.Nbody.Len() != 0 { - Fatalf("double walkselect") + base.Fatalf("double walkselect") } init := sel.Ninit.Slice() @@ -102,12 +105,12 @@ func walkselect(sel *Node) { sel.Nbody.Set(init) walkstmtlist(sel.Nbody.Slice()) - lineno = lno + base.Pos = lno } func walkselectcases(cases *Nodes) []*Node { ncas := cases.Len() - sellineno := lineno + sellineno := base.Pos // optimization: zero-case select if ncas == 0 { @@ -125,7 +128,7 @@ func walkselectcases(cases *Nodes) []*Node { n.Ninit.Set(nil) switch n.Op { default: - Fatalf("select %v", n.Op) + base.Fatalf("select %v", n.Op) case OSEND: // already ok @@ -202,7 +205,7 @@ func walkselectcases(cases *Nodes) []*Node { r.Ninit.Set(cas.Ninit.Slice()) switch n.Op { default: - Fatalf("select %v", n.Op) + base.Fatalf("select %v", n.Op) case OSEND: // if selectnbsend(c, v) { body } else { default body } @@ -245,7 +248,7 @@ func walkselectcases(cases *Nodes) []*Node { var init []*Node // generate sel-struct - lineno = sellineno + base.Pos = sellineno selv := temp(types.NewArray(scasetype(), int64(ncas))) r := nod(OAS, selv, nil) r = typecheck(r, ctxStmt) @@ -255,7 +258,7 @@ func walkselectcases(cases *Nodes) []*Node { order := temp(types.NewArray(types.Types[TUINT16], 2*int64(ncas))) var pc0, pcs *Node - if Flag.Race { + if base.Flag.Race { pcs = temp(types.NewArray(types.Types[TUINTPTR], int64(ncas))) pc0 = typecheck(nod(OADDR, nod(OINDEX, pcs, nodintconst(0)), nil), ctxExpr) } else { @@ -278,7 +281,7 @@ func walkselectcases(cases *Nodes) []*Node { var c, elem *Node switch n.Op { default: - Fatalf("select %v", n.Op) + base.Fatalf("select %v", n.Op) case OSEND: i = nsends nsends++ @@ -308,17 +311,17 @@ func walkselectcases(cases *Nodes) []*Node { // TODO(mdempsky): There should be a cleaner way to // handle this. - if Flag.Race { + if base.Flag.Race { r = mkcall("selectsetpc", nil, nil, nod(OADDR, nod(OINDEX, pcs, nodintconst(int64(i))), nil)) init = append(init, r) } } if nsends+nrecvs != ncas { - Fatalf("walkselectcases: miscount: %v + %v != %v", nsends, nrecvs, ncas) + base.Fatalf("walkselectcases: miscount: %v + %v != %v", nsends, nrecvs, ncas) } // run the select - lineno = sellineno + base.Pos = sellineno chosen := temp(types.Types[TINT]) recvOK := temp(types.Types[TBOOL]) r = nod(OAS2, nil, nil) @@ -331,7 +334,7 @@ func walkselectcases(cases *Nodes) []*Node { // selv and order are no longer alive after selectgo. init = append(init, nod(OVARKILL, selv, nil)) init = append(init, nod(OVARKILL, order, nil)) - if Flag.Race { + if base.Flag.Race { init = append(init, nod(OVARKILL, pcs, nil)) } diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 1f89baa3c0122..219435d6de39e 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -5,6 +5,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/compile/internal/types" "cmd/internal/obj" "fmt" @@ -40,7 +41,7 @@ func (s *InitSchedule) append(n *Node) { // staticInit adds an initialization statement n to the schedule. func (s *InitSchedule) staticInit(n *Node) { if !s.tryStaticInit(n) { - if Flag.Percent != 0 { + if base.Flag.Percent != 0 { Dump("nonstatic", n) } s.append(n) @@ -62,7 +63,7 @@ func (s *InitSchedule) tryStaticInit(n *Node) bool { return true } lno := setlineno(n) - defer func() { lineno = lno }() + defer func() { base.Pos = lno }() return s.staticassign(n.Left, n.Right) } @@ -256,8 +257,8 @@ func (s *InitSchedule) staticassign(l *Node, r *Node) bool { case OCLOSURE: if hasemptycvars(r) { - if Debug.Closure > 0 { - Warnl(r.Pos, "closure converted to global") + if base.Debug.Closure > 0 { + base.WarnfAt(r.Pos, "closure converted to global") } // Closures with no captured variables are globals, // so the assignment can be done at link time. @@ -462,7 +463,7 @@ func isStaticCompositeLiteral(n *Node) bool { case OSTRUCTLIT: for _, r := range n.List.Slice() { if r.Op != OSTRUCTKEY { - Fatalf("isStaticCompositeLiteral: rhs not OSTRUCTKEY: %v", r) + base.Fatalf("isStaticCompositeLiteral: rhs not OSTRUCTKEY: %v", r) } if !isStaticCompositeLiteral(r.Left) { return false @@ -517,7 +518,7 @@ func fixedlit(ctxt initContext, kind initKind, n *Node, var_ *Node, init *Nodes) if r.Op == OKEY { k = indexconst(r.Left) if k < 0 { - Fatalf("fixedlit: invalid index %v", r.Left) + base.Fatalf("fixedlit: invalid index %v", r.Left) } r = r.Right } @@ -531,7 +532,7 @@ func fixedlit(ctxt initContext, kind initKind, n *Node, var_ *Node, init *Nodes) case OSTRUCTLIT: splitnode = func(r *Node) (*Node, *Node) { if r.Op != OSTRUCTKEY { - Fatalf("fixedlit: rhs not OSTRUCTKEY: %v", r) + base.Fatalf("fixedlit: rhs not OSTRUCTKEY: %v", r) } if r.Sym.IsBlank() || isBlank { return nblank, r.Left @@ -540,7 +541,7 @@ func fixedlit(ctxt initContext, kind initKind, n *Node, var_ *Node, init *Nodes) return nodSym(ODOT, var_, r.Sym), r.Left } default: - Fatalf("fixedlit bad op: %v", n.Op) + base.Fatalf("fixedlit bad op: %v", n.Op) } for _, r := range n.List.Slice() { @@ -578,7 +579,7 @@ func fixedlit(ctxt initContext, kind initKind, n *Node, var_ *Node, init *Nodes) a = walkstmt(a) init.Append(a) default: - Fatalf("fixedlit: bad kind %d", kind) + base.Fatalf("fixedlit: bad kind %d", kind) } } @@ -610,7 +611,7 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) { var_ = typecheck(var_, ctxExpr|ctxAssign) nam := stataddr(var_) if nam == nil || nam.Class() != PEXTERN { - Fatalf("slicelit: %v", var_) + base.Fatalf("slicelit: %v", var_) } slicesym(nam, vstat, t.NumElem()) return @@ -709,7 +710,7 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) { if value.Op == OKEY { index = indexconst(value.Left) if index < 0 { - Fatalf("slicelit: invalid index %v", value.Left) + base.Fatalf("slicelit: invalid index %v", value.Left) } value = value.Right } @@ -770,7 +771,7 @@ func maplit(n *Node, m *Node, init *Nodes) { // All remaining entries are static. Double-check that. for _, r := range entries { if !isStaticCompositeLiteral(r.Left) || !isStaticCompositeLiteral(r.Right) { - Fatalf("maplit: entry is not a literal: %v", r) + base.Fatalf("maplit: entry is not a literal: %v", r) } } @@ -868,7 +869,7 @@ func anylit(n *Node, var_ *Node, init *Nodes) { t := n.Type switch n.Op { default: - Fatalf("anylit: not lit, op=%v node=%v", n.Op, n) + base.Fatalf("anylit: not lit, op=%v node=%v", n.Op, n) case ONAME, OMETHEXPR: a := nod(OAS, var_, n) @@ -877,7 +878,7 @@ func anylit(n *Node, var_ *Node, init *Nodes) { case OPTRLIT: if !t.IsPtr() { - Fatalf("anylit: not ptr") + base.Fatalf("anylit: not ptr") } var r *Node @@ -905,7 +906,7 @@ func anylit(n *Node, var_ *Node, init *Nodes) { case OSTRUCTLIT, OARRAYLIT: if !t.IsStruct() && !t.IsArray() { - Fatalf("anylit: not struct/array") + base.Fatalf("anylit: not struct/array") } if var_.isSimpleName() && n.List.Len() > 4 { @@ -951,7 +952,7 @@ func anylit(n *Node, var_ *Node, init *Nodes) { case OMAPLIT: if !t.IsMap() { - Fatalf("anylit: not map") + base.Fatalf("anylit: not map") } maplit(n, var_, init) } @@ -1052,7 +1053,7 @@ func (s *InitSchedule) initplan(n *Node) { s.initplans[n] = p switch n.Op { default: - Fatalf("initplan") + base.Fatalf("initplan") case OARRAYLIT, OSLICELIT: var k int64 @@ -1060,7 +1061,7 @@ func (s *InitSchedule) initplan(n *Node) { if a.Op == OKEY { k = indexconst(a.Left) if k < 0 { - Fatalf("initplan arraylit: invalid index %v", a.Left) + base.Fatalf("initplan arraylit: invalid index %v", a.Left) } a = a.Right } @@ -1071,7 +1072,7 @@ func (s *InitSchedule) initplan(n *Node) { case OSTRUCTLIT: for _, a := range n.List.Slice() { if a.Op != OSTRUCTKEY { - Fatalf("initplan structlit") + base.Fatalf("initplan structlit") } if a.Sym.IsBlank() { continue @@ -1082,7 +1083,7 @@ func (s *InitSchedule) initplan(n *Node) { case OMAPLIT: for _, a := range n.List.Slice() { if a.Op != OKEY { - Fatalf("initplan maplit") + base.Fatalf("initplan maplit") } s.addvalue(p, -1, a.Right) } @@ -1155,12 +1156,12 @@ func isvaluelit(n *Node) bool { func genAsStatic(as *Node) { if as.Left.Type == nil { - Fatalf("genAsStatic as.Left not typechecked") + base.Fatalf("genAsStatic as.Left not typechecked") } nam := stataddr(as.Left) if nam == nil || (nam.Class() != PEXTERN && as.Left != nblank) { - Fatalf("genAsStatic: lhs %v", as.Left) + base.Fatalf("genAsStatic: lhs %v", as.Left) } switch { @@ -1169,6 +1170,6 @@ func genAsStatic(as *Node) { case (as.Right.Op == ONAME || as.Right.Op == OMETHEXPR) && as.Right.Class() == PFUNC: pfuncsym(nam, as.Right) default: - Fatalf("genAsStatic: rhs %v", as.Right) + base.Fatalf("genAsStatic: rhs %v", as.Right) } } diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index f06f08e6ab6d9..e892a01da08bf 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -15,6 +15,7 @@ import ( "bufio" "bytes" + "cmd/compile/internal/base" "cmd/compile/internal/ssa" "cmd/compile/internal/types" "cmd/internal/obj" @@ -60,10 +61,10 @@ func initssaconfig() { _ = types.NewPtr(types.Types[TINT64]) // *int64 _ = types.NewPtr(types.Errortype) // *error types.NewPtrCacheEnabled = false - ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, *types_, Ctxt, Flag.N == 0) + ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, *types_, base.Ctxt, base.Flag.N == 0) ssaConfig.SoftFloat = thearch.SoftFloat - ssaConfig.Race = Flag.Race - ssaCaches = make([]ssa.Cache, Flag.LowerC) + ssaConfig.Race = base.Flag.Race + ssaCaches = make([]ssa.Cache, base.Flag.LowerC) // Set up some runtime functions we'll need to call. assertE2I = sysfunc("assertE2I") @@ -240,7 +241,7 @@ func dvarint(x *obj.LSym, off int, v int64) int { // - Size of the argument // - Offset of where argument should be placed in the args frame when making call func (s *state) emitOpenDeferInfo() { - x := Ctxt.Lookup(s.curfn.Func.lsym.Name + ".opendefer") + x := base.Ctxt.Lookup(s.curfn.Func.lsym.Name + ".opendefer") s.curfn.Func.lsym.Func().OpenCodedDeferInfo = x off := 0 @@ -291,7 +292,7 @@ func buildssa(fn *Node, worker int) *ssa.Func { name := fn.funcname() printssa := false if ssaDump != "" { // match either a simple name e.g. "(*Reader).Reset", or a package.name e.g. "compress/gzip.(*Reader).Reset" - printssa = name == ssaDump || Ctxt.Pkgpath+"."+name == ssaDump + printssa = name == ssaDump || base.Ctxt.Pkgpath+"."+name == ssaDump } var astBuf *bytes.Buffer if printssa { @@ -342,7 +343,7 @@ func buildssa(fn *Node, worker int) *ssa.Func { if printssa { ssaDF := ssaDumpFile if ssaDir != "" { - ssaDF = filepath.Join(ssaDir, Ctxt.Pkgpath+"."+name+".html") + ssaDF = filepath.Join(ssaDir, base.Ctxt.Pkgpath+"."+name+".html") ssaD := filepath.Dir(ssaDF) os.MkdirAll(ssaD, 0755) } @@ -358,9 +359,9 @@ func buildssa(fn *Node, worker int) *ssa.Func { s.fwdVars = map[*Node]*ssa.Value{} s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem) - s.hasOpenDefers = Flag.N == 0 && s.hasdefer && !s.curfn.Func.OpenCodedDeferDisallowed() + s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.Func.OpenCodedDeferDisallowed() switch { - case s.hasOpenDefers && (Ctxt.Flag_shared || Ctxt.Flag_dynlink) && thearch.LinkArch.Name == "386": + case s.hasOpenDefers && (base.Ctxt.Flag_shared || base.Ctxt.Flag_dynlink) && thearch.LinkArch.Name == "386": // Don't support open-coded defers for 386 ONLY when using shared // libraries, because there is extra code (added by rewriteToUseGot()) // preceding the deferreturn/ret code that is generated by gencallret() @@ -478,7 +479,7 @@ func buildssa(fn *Node, worker int) *ssa.Func { func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *Node) { // Read sources of target function fn. - fname := Ctxt.PosTable.Pos(fn.Pos).Filename() + fname := base.Ctxt.PosTable.Pos(fn.Pos).Filename() targetFn, err := readFuncLines(fname, fn.Pos.Line(), fn.Func.Endlineno.Line()) if err != nil { writer.Logf("cannot read sources for function %v: %v", fn, err) @@ -494,7 +495,7 @@ func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *Node) { } else { elno = fi.Name.Defn.Func.Endlineno } - fname := Ctxt.PosTable.Pos(fi.Pos).Filename() + fname := base.Ctxt.PosTable.Pos(fi.Pos).Filename() fnLines, err := readFuncLines(fname, fi.Pos.Line(), elno.Line()) if err != nil { writer.Logf("cannot read sources for inlined function %v: %v", fi, err) @@ -752,8 +753,8 @@ func (s *state) pushLine(line src.XPos) { // the frontend may emit node with line number missing, // use the parent line number in this case. line = s.peekPos() - if Flag.K != 0 { - Warn("buildssa: unknown position (line 0)") + if base.Flag.K != 0 { + base.Warn("buildssa: unknown position (line 0)") } } else { s.lastPos = line @@ -988,13 +989,13 @@ func (s *state) instrument(t *types.Type, addr *ssa.Value, wr bool) { var fn *obj.LSym needWidth := false - if Flag.MSan { + if base.Flag.MSan { fn = msanread if wr { fn = msanwrite } needWidth = true - } else if Flag.Race && t.NumComponents(types.CountBlankFields) > 1 { + } else if base.Flag.Race && t.NumComponents(types.CountBlankFields) > 1 { // for composite objects we have to write every address // because a write might happen to any subobject. // composites with only one element don't have subobjects, though. @@ -1003,7 +1004,7 @@ func (s *state) instrument(t *types.Type, addr *ssa.Value, wr bool) { fn = racewriterange } needWidth = true - } else if Flag.Race { + } else if base.Flag.Race { // for non-composite objects we can write just the start // address, as any write must write the first byte. fn = raceread @@ -1090,7 +1091,7 @@ func (s *state) stmt(n *Node) { case OCALLMETH, OCALLINTER: s.callResult(n, callNormal) if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class() == PFUNC { - if fn := n.Left.Sym.Name; Flag.CompilingRuntime && fn == "throw" || + if fn := n.Left.Sym.Name; base.Flag.CompilingRuntime && fn == "throw" || n.Left.Sym.Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") { m := s.mem() b := s.endBlock() @@ -1102,7 +1103,7 @@ func (s *state) stmt(n *Node) { } } case ODEFER: - if Debug.Defer > 0 { + if base.Debug.Defer > 0 { var defertype string if s.hasOpenDefers { defertype = "open-coded" @@ -1111,7 +1112,7 @@ func (s *state) stmt(n *Node) { } else { defertype = "heap-allocated" } - Warnl(n.Pos, "%s defer", defertype) + base.WarnfAt(n.Pos, "%s defer", defertype) } if s.hasOpenDefers { s.openDeferRecord(n.Left) @@ -1225,20 +1226,20 @@ func (s *state) stmt(n *Node) { // Check whether we're writing the result of an append back to the same slice. // If so, we handle it specially to avoid write barriers on the fast // (non-growth) path. - if !samesafeexpr(n.Left, rhs.List.First()) || Flag.N != 0 { + if !samesafeexpr(n.Left, rhs.List.First()) || base.Flag.N != 0 { break } // If the slice can be SSA'd, it'll be on the stack, // so there will be no write barriers, // so there's no need to attempt to prevent them. if s.canSSA(n.Left) { - if Debug.Append > 0 { // replicating old diagnostic message - Warnl(n.Pos, "append: len-only update (in local slice)") + if base.Debug.Append > 0 { // replicating old diagnostic message + base.WarnfAt(n.Pos, "append: len-only update (in local slice)") } break } - if Debug.Append > 0 { - Warnl(n.Pos, "append: len-only update") + if base.Debug.Append > 0 { + base.WarnfAt(n.Pos, "append: len-only update") } s.append(rhs, true) return @@ -1814,7 +1815,7 @@ func floatForComplex(t *types.Type) *types.Type { case TCOMPLEX128: return types.Types[TFLOAT64] } - Fatalf("unexpected type: %v", t) + base.Fatalf("unexpected type: %v", t) return nil } @@ -1825,7 +1826,7 @@ func complexForFloat(t *types.Type) *types.Type { case TFLOAT64: return types.Types[TCOMPLEX128] } - Fatalf("unexpected type: %v", t) + base.Fatalf("unexpected type: %v", t) return nil } @@ -4130,9 +4131,9 @@ func findIntrinsic(sym *types.Sym) intrinsicBuilder { } pkg := sym.Pkg.Path if sym.Pkg == localpkg { - pkg = Ctxt.Pkgpath + pkg = base.Ctxt.Pkgpath } - if Flag.Race && pkg == "sync/atomic" { + if base.Flag.Race && pkg == "sync/atomic" { // The race detector needs to be able to intercept these calls. // We can't intrinsify them. return nil @@ -4172,7 +4173,7 @@ func (s *state) intrinsicCall(n *Node) *ssa.Value { if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 { x = x.Args[0] } - Warnl(n.Pos, "intrinsic substitution for %v with %s", n.Left.Sym.Name, x.LongString()) + base.WarnfAt(n.Pos, "intrinsic substitution for %v with %s", n.Left.Sym.Name, x.LongString()) } return v } @@ -4240,7 +4241,7 @@ func (s *state) openDeferRecord(n *Node) { } } else if n.Op == OCALLMETH { if fn.Op != ODOTMETH { - Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn) + base.Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn) } closureVal := s.getMethodClosure(fn) // We must always store the function value in a stack slot for the @@ -4250,7 +4251,7 @@ func (s *state) openDeferRecord(n *Node) { opendefer.closureNode = closure.Aux.(*Node) } else { if fn.Op != ODOTINTER { - Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op) + base.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op) } closure, rcvr := s.getClosureAndRcvr(fn) opendefer.closure = s.openDeferSave(nil, closure.Type, closure) @@ -4382,7 +4383,7 @@ func (s *state) openDeferExit() { // Generate code to call the function call of the defer, using the // closure/receiver/args that were stored in argtmps at the point // of the defer statement. - argStart := Ctxt.FixedFrameSize() + argStart := base.Ctxt.FixedFrameSize() fn := r.n.Left stksize := fn.Type.ArgWidth() var ACArgs []ssa.Param @@ -4499,7 +4500,7 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value { nf := res.NumFields() for i := 0; i < nf; i++ { fp := res.Field(i) - ACResults = append(ACResults, ssa.Param{Type: fp.Type, Offset: int32(fp.Offset + Ctxt.FixedFrameSize())}) + ACResults = append(ACResults, ssa.Param{Type: fp.Type, Offset: int32(fp.Offset + base.Ctxt.FixedFrameSize())}) } } @@ -4604,14 +4605,14 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value { } // Call runtime.deferprocStack with pointer to _defer record. - ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(Ctxt.FixedFrameSize())}) + ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(base.Ctxt.FixedFrameSize())}) aux := ssa.StaticAuxCall(deferprocStack, ACArgs, ACResults) if testLateExpansion { callArgs = append(callArgs, addr, s.mem()) call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) call.AddArgs(callArgs...) } else { - arg0 := s.constOffPtrSP(types.Types[TUINTPTR], Ctxt.FixedFrameSize()) + arg0 := s.constOffPtrSP(types.Types[TUINTPTR], base.Ctxt.FixedFrameSize()) s.store(types.Types[TUINTPTR], arg0, addr) call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem()) } @@ -4625,7 +4626,7 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value { } else { // Store arguments to stack, including defer/go arguments and receiver for method calls. // These are written in SP-offset order. - argStart := Ctxt.FixedFrameSize() + argStart := base.Ctxt.FixedFrameSize() // Defer/go args. if k != callNormal { // Write argsize and closure (args to newproc/deferproc). @@ -4766,13 +4767,13 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value { if testLateExpansion { return s.newValue1I(ssa.OpSelectNAddr, pt, 0, call) } - return s.constOffPtrSP(pt, fp.Offset+Ctxt.FixedFrameSize()) + return s.constOffPtrSP(pt, fp.Offset+base.Ctxt.FixedFrameSize()) } if testLateExpansion { return s.newValue1I(ssa.OpSelectN, fp.Type, 0, call) } - return s.load(n.Type, s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+Ctxt.FixedFrameSize())) + return s.load(n.Type, s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+base.Ctxt.FixedFrameSize())) } // maybeNilCheckClosure checks if a nil check of a closure is needed in some @@ -4930,7 +4931,7 @@ func (s *state) addr(n *Node) *ssa.Value { // canSSA reports whether n is SSA-able. // n must be an ONAME (or an ODOT sequence with an ONAME base). func (s *state) canSSA(n *Node) bool { - if Flag.N != 0 { + if base.Flag.N != 0 { return false } for n.Op == ODOT || (n.Op == OINDEX && n.Left.Type.IsArray()) { @@ -5026,7 +5027,7 @@ func (s *state) exprPtr(n *Node, bounded bool, lineno src.XPos) *ssa.Value { // Used only for automatically inserted nil checks, // not for user code like 'x != nil'. func (s *state) nilCheck(ptr *ssa.Value) { - if Debug.DisableNil != 0 || s.curfn.Func.NilCheckDisabled() { + if base.Debug.DisableNil != 0 || s.curfn.Func.NilCheckDisabled() { return } s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem()) @@ -5041,7 +5042,7 @@ func (s *state) nilCheck(ptr *ssa.Value) { func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value { idx = s.extendIndex(idx, len, kind, bounded) - if bounded || Flag.B != 0 { + if bounded || base.Flag.B != 0 { // If bounded or bounds checking is flag-disabled, then no check necessary, // just return the extended index. // @@ -5114,7 +5115,7 @@ func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bo s.startBlock(bNext) // In Spectre index mode, apply an appropriate mask to avoid speculative out-of-bounds accesses. - if Flag.Cfg.SpectreIndex { + if base.Flag.Cfg.SpectreIndex { op := ssa.OpSpectreIndex if kind != ssa.BoundsIndex && kind != ssa.BoundsIndexU { op = ssa.OpSpectreSliceIndex @@ -5133,7 +5134,7 @@ func (s *state) check(cmp *ssa.Value, fn *obj.LSym) { b.Likely = ssa.BranchLikely bNext := s.f.NewBlock(ssa.BlockPlain) line := s.peekPos() - pos := Ctxt.PosTable.Pos(line) + pos := base.Ctxt.PosTable.Pos(line) fl := funcLine{f: fn, base: pos.Base(), line: pos.Line()} bPanic := s.panics[fl] if bPanic == nil { @@ -5172,7 +5173,7 @@ func (s *state) intDivide(n *Node, a, b *ssa.Value) *ssa.Value { func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value { s.prevCall = nil // Write args to the stack - off := Ctxt.FixedFrameSize() + off := base.Ctxt.FixedFrameSize() testLateExpansion := ssa.LateCallExpansionEnabledWithin(s.f) var ACArgs []ssa.Param var ACResults []ssa.Param @@ -5219,7 +5220,7 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args . b := s.endBlock() b.Kind = ssa.BlockExit b.SetControl(call) - call.AuxInt = off - Ctxt.FixedFrameSize() + call.AuxInt = off - base.Ctxt.FixedFrameSize() if len(results) > 0 { s.Fatalf("panic call can't have results") } @@ -5837,8 +5838,8 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) { if n.Type.IsEmptyInterface() { // Converting to an empty interface. // Input could be an empty or nonempty interface. - if Debug.TypeAssert > 0 { - Warnl(n.Pos, "type assertion inlined") + if base.Debug.TypeAssert > 0 { + base.WarnfAt(n.Pos, "type assertion inlined") } // Get itab/type field from input. @@ -5904,8 +5905,8 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) { return } // converting to a nonempty interface needs a runtime call. - if Debug.TypeAssert > 0 { - Warnl(n.Pos, "type assertion not inlined") + if base.Debug.TypeAssert > 0 { + base.WarnfAt(n.Pos, "type assertion not inlined") } if n.Left.Type.IsEmptyInterface() { if commaok { @@ -5921,15 +5922,15 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) { return s.rtcall(assertI2I, true, []*types.Type{n.Type}, target, iface)[0], nil } - if Debug.TypeAssert > 0 { - Warnl(n.Pos, "type assertion inlined") + if base.Debug.TypeAssert > 0 { + base.WarnfAt(n.Pos, "type assertion inlined") } // Converting to a concrete type. direct := isdirectiface(n.Type) itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface - if Debug.TypeAssert > 0 { - Warnl(n.Pos, "type assertion inlined") + if base.Debug.TypeAssert > 0 { + base.WarnfAt(n.Pos, "type assertion inlined") } var targetITab *ssa.Value if n.Left.Type.IsEmptyInterface() { @@ -6235,9 +6236,9 @@ func emitStackObjects(e *ssafn, pp *Progs) { p.To.Name = obj.NAME_EXTERN p.To.Sym = x - if Flag.Live != 0 { + if base.Flag.Live != 0 { for _, v := range vars { - Warnl(v.Pos, "stack object %v %s", v, v.Type.String()) + base.WarnfAt(v.Pos, "stack object %v %s", v, v.Type.String()) } } } @@ -6277,7 +6278,7 @@ func genssa(f *ssa.Func, pp *Progs) { s.ScratchFpMem = e.scratchFpMem - if Ctxt.Flag_locationlists { + if base.Ctxt.Flag_locationlists { if cap(f.Cache.ValueToProgAfter) < f.NumValues() { f.Cache.ValueToProgAfter = make([]*obj.Prog, f.NumValues()) } @@ -6373,7 +6374,7 @@ func genssa(f *ssa.Func, pp *Progs) { thearch.SSAGenValue(&s, v) } - if Ctxt.Flag_locationlists { + if base.Ctxt.Flag_locationlists { valueToProgAfter[v.ID] = s.pp.next } @@ -6397,7 +6398,7 @@ func genssa(f *ssa.Func, pp *Progs) { } // Emit control flow instructions for block var next *ssa.Block - if i < len(f.Blocks)-1 && Flag.N == 0 { + if i < len(f.Blocks)-1 && base.Flag.N == 0 { // If -N, leave next==nil so every block with successors // ends in a JMP (except call blocks - plive doesn't like // select{send,recv} followed by a JMP call). Helps keep @@ -6473,8 +6474,8 @@ func genssa(f *ssa.Func, pp *Progs) { } } - if Ctxt.Flag_locationlists { - e.curfn.Func.DebugInfo = ssa.BuildFuncDebug(Ctxt, f, Debug.LocationLists > 1, stackOffset) + if base.Ctxt.Flag_locationlists { + e.curfn.Func.DebugInfo = ssa.BuildFuncDebug(base.Ctxt, f, base.Debug.LocationLists > 1, stackOffset) bstart := s.bstart // Note that at this moment, Prog.Pc is a sequence number; it's // not a real PC until after assembly, so this mapping has to @@ -6705,7 +6706,7 @@ func (s *state) extendIndex(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bo } else { lo = s.newValue1(ssa.OpInt64Lo, types.Types[TUINT], idx) } - if bounded || Flag.B != 0 { + if bounded || base.Flag.B != 0 { return lo } bNext := s.f.NewBlock(ssa.BlockPlain) @@ -6807,7 +6808,7 @@ func CheckLoweredPhi(v *ssa.Value) { func CheckLoweredGetClosurePtr(v *ssa.Value) { entry := v.Block.Func.Entry if entry != v.Block || entry.Values[0] != v { - Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v) + base.Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v) } } @@ -6869,7 +6870,7 @@ func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog { case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64: p.To.Type = obj.TYPE_MEM default: - Fatalf("unknown indirect call family") + base.Fatalf("unknown indirect call family") } p.To.Reg = v.Args[0].Reg() } @@ -6884,7 +6885,7 @@ func (s *SSAGenState) PrepareCall(v *ssa.Value) { if !idx.StackMapValid() { // See Liveness.hasStackMap. if sym, ok := v.Aux.(*ssa.AuxCall); !ok || !(sym.Fn == typedmemclr || sym.Fn == typedmemmove) { - Fatalf("missing stack map index for %v", v.LongString()) + base.Fatalf("missing stack map index for %v", v.LongString()) } } @@ -7085,7 +7086,7 @@ func (e *ssafn) CanSSA(t *types.Type) bool { } func (e *ssafn) Line(pos src.XPos) string { - return linestr(pos) + return base.FmtPos(pos) } // Log logs a message from the compiler. @@ -7101,23 +7102,23 @@ func (e *ssafn) Log() bool { // Fatal reports a compiler error and exits. func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...interface{}) { - lineno = pos + base.Pos = pos nargs := append([]interface{}{e.curfn.funcname()}, args...) - Fatalf("'%s': "+msg, nargs...) + base.Fatalf("'%s': "+msg, nargs...) } // Warnl reports a "warning", which is usually flag-triggered // logging output for the benefit of tests. func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...interface{}) { - Warnl(pos, fmt_, args...) + base.WarnfAt(pos, fmt_, args...) } func (e *ssafn) Debug_checknil() bool { - return Debug.Nil != 0 + return base.Debug.Nil != 0 } func (e *ssafn) UseWriteBarrier() bool { - return Flag.WB + return base.Flag.WB } func (e *ssafn) Syslook(name string) *obj.LSym { @@ -7142,7 +7143,7 @@ func (e *ssafn) SetWBPos(pos src.XPos) { } func (e *ssafn) MyImportPath() string { - return Ctxt.Pkgpath + return base.Ctxt.Pkgpath } func (n *Node) Typ() *types.Type { @@ -7157,7 +7158,7 @@ func (n *Node) StorageClass() ssa.StorageClass { case PAUTO: return ssa.ClassAuto default: - Fatalf("untranslatable storage class for %v: %s", n, n.Class()) + base.Fatalf("untranslatable storage class for %v: %s", n, n.Class()) return 0 } } diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 989d10a561d0d..00402a1bee65d 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -5,6 +5,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/compile/internal/types" "cmd/internal/src" "crypto/md5" @@ -49,8 +50,8 @@ func hasUniquePos(n *Node) bool { } if !n.Pos.IsKnown() { - if Flag.K != 0 { - Warn("setlineno: unknown position (line 0)") + if base.Flag.K != 0 { + base.Warn("setlineno: unknown position (line 0)") } return false } @@ -59,9 +60,9 @@ func hasUniquePos(n *Node) bool { } func setlineno(n *Node) src.XPos { - lno := lineno + lno := base.Pos if n != nil && hasUniquePos(n) { - lineno = n.Pos + base.Pos = n.Pos } return lno } @@ -87,11 +88,11 @@ func lookupN(prefix string, n int) *types.Sym { // user labels. func autolabel(prefix string) *types.Sym { if prefix[0] != '.' { - Fatalf("autolabel prefix must start with '.', have %q", prefix) + base.Fatalf("autolabel prefix must start with '.', have %q", prefix) } fn := Curfn if Curfn == nil { - Fatalf("autolabel outside function") + base.Fatalf("autolabel outside function") } n := fn.Func.Label fn.Func.Label++ @@ -112,7 +113,7 @@ func importdot(opkg *types.Pkg, pack *Node) { s1 := lookup(s.Name) if s1.Def != nil { pkgerror := fmt.Sprintf("during import %q", opkg.Path) - redeclare(lineno, s1, pkgerror) + redeclare(base.Pos, s1, pkgerror) continue } @@ -120,7 +121,7 @@ func importdot(opkg *types.Pkg, pack *Node) { s1.Block = s.Block if asNode(s1.Def).Name == nil { Dump("s1def", asNode(s1.Def)) - Fatalf("missing Name") + base.Fatalf("missing Name") } asNode(s1.Def).Name.Pack = pack s1.Origpkg = opkg @@ -129,12 +130,12 @@ func importdot(opkg *types.Pkg, pack *Node) { if n == 0 { // can't possibly be used - there were no symbols - yyerrorl(pack.Pos, "imported and not used: %q", opkg.Path) + base.ErrorfAt(pack.Pos, "imported and not used: %q", opkg.Path) } } func nod(op Op, nleft, nright *Node) *Node { - return nodl(lineno, op, nleft, nright) + return nodl(base.Pos, op, nleft, nright) } func nodl(pos src.XPos, op Op, nleft, nright *Node) *Node { @@ -149,7 +150,7 @@ func nodl(pos src.XPos, op Op, nleft, nright *Node) *Node { n.Func = &x.f n.Func.Decl = n case ONAME: - Fatalf("use newname instead") + base.Fatalf("use newname instead") case OLABEL, OPACK: var x struct { n Node @@ -171,7 +172,7 @@ func nodl(pos src.XPos, op Op, nleft, nright *Node) *Node { // newname returns a new ONAME Node associated with symbol s. func newname(s *types.Sym) *Node { - n := newnamel(lineno, s) + n := newnamel(base.Pos, s) n.Name.Curfn = Curfn return n } @@ -180,7 +181,7 @@ func newname(s *types.Sym) *Node { // The caller is responsible for setting n.Name.Curfn. func newnamel(pos src.XPos, s *types.Sym) *Node { if s == nil { - Fatalf("newnamel nil") + base.Fatalf("newnamel nil") } var x struct { @@ -203,7 +204,7 @@ func newnamel(pos src.XPos, s *types.Sym) *Node { // nodSym makes a Node with Op op and with the Left field set to left // and the Sym field set to sym. This is for ODOT and friends. func nodSym(op Op, left *Node, sym *types.Sym) *Node { - return nodlSym(lineno, op, left, sym) + return nodlSym(base.Pos, op, left, sym) } // nodlSym makes a Node with position Pos, with Op op, and with the Left field set to left @@ -290,7 +291,7 @@ func treecopy(n *Node, pos src.XPos) *Node { } if m.Name != nil && n.Op != ODCLFIELD { Dump("treecopy", n) - Fatalf("treecopy Name") + base.Fatalf("treecopy Name") } return m @@ -625,7 +626,7 @@ func assignconvfn(n *Node, t *types.Type, context func() string) *Node { } if t.Etype == TBLANK && n.Type.Etype == TNIL { - yyerror("use of untyped nil") + base.Errorf("use of untyped nil") } n = convlit1(n, t, false, context) @@ -654,7 +655,7 @@ func assignconvfn(n *Node, t *types.Type, context func() string) *Node { op, why := assignop(n.Type, t) if op == OXXX { - yyerror("cannot use %L as type %v in %s%s", n, t, context(), why) + base.Errorf("cannot use %L as type %v in %s%s", n, t, context(), why) op = OCONV } @@ -687,7 +688,7 @@ func (n *Node) SliceBounds() (low, high, max *Node) { s := n.List.Slice() return s[0], s[1], s[2] } - Fatalf("SliceBounds op %v: %v", n.Op, n) + base.Fatalf("SliceBounds op %v: %v", n.Op, n) return nil, nil, nil } @@ -697,7 +698,7 @@ func (n *Node) SetSliceBounds(low, high, max *Node) { switch n.Op { case OSLICE, OSLICEARR, OSLICESTR: if max != nil { - Fatalf("SetSliceBounds %v given three bounds", n.Op) + base.Fatalf("SetSliceBounds %v given three bounds", n.Op) } s := n.List.Slice() if s == nil { @@ -724,7 +725,7 @@ func (n *Node) SetSliceBounds(low, high, max *Node) { s[2] = max return } - Fatalf("SetSliceBounds op %v: %v", n.Op, n) + base.Fatalf("SetSliceBounds op %v: %v", n.Op, n) } // IsSlice3 reports whether o is a slice3 op (OSLICE3, OSLICE3ARR). @@ -736,7 +737,7 @@ func (o Op) IsSlice3() bool { case OSLICE3, OSLICE3ARR: return true } - Fatalf("IsSlice3 op %v", o) + base.Fatalf("IsSlice3 op %v", o) return false } @@ -746,7 +747,7 @@ func (n *Node) backingArrayPtrLen() (ptr, len *Node) { var init Nodes c := cheapexpr(n, &init) if c != n || init.Len() != 0 { - Fatalf("backingArrayPtrLen not cheap: %v", n) + base.Fatalf("backingArrayPtrLen not cheap: %v", n) } ptr = nod(OSPTR, n, nil) if n.Type.IsString() { @@ -763,7 +764,7 @@ func (n *Node) backingArrayPtrLen() (ptr, len *Node) { // associated with the label n, if any. func (n *Node) labeledControl() *Node { if n.Op != OLABEL { - Fatalf("labeledControl %v", n.Op) + base.Fatalf("labeledControl %v", n.Op) } ctl := n.Name.Defn if ctl == nil { @@ -779,7 +780,7 @@ func (n *Node) labeledControl() *Node { func syslook(name string) *Node { s := Runtimepkg.Lookup(name) if s == nil || s.Def == nil { - Fatalf("syslook: can't find runtime.%s", name) + base.Fatalf("syslook: can't find runtime.%s", name) } return asNode(s.Def) } @@ -811,7 +812,7 @@ func calcHasCall(n *Node) bool { switch n.Op { case OLITERAL, ONIL, ONAME, OTYPE: if n.HasCall() { - Fatalf("OLITERAL/ONAME/OTYPE should never have calls: %+v", n) + base.Fatalf("OLITERAL/ONAME/OTYPE should never have calls: %+v", n) } return false case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER: @@ -870,7 +871,7 @@ func badtype(op Op, tl, tr *types.Type) { } } - yyerror("illegal types for operand: %v%s", op, s) + base.Errorf("illegal types for operand: %v%s", op, s) } // brcom returns !(op). @@ -890,7 +891,7 @@ func brcom(op Op) Op { case OGE: return OLT } - Fatalf("brcom: no com for %v\n", op) + base.Fatalf("brcom: no com for %v\n", op) return op } @@ -911,7 +912,7 @@ func brrev(op Op) Op { case OGE: return OLE } - Fatalf("brrev: no rev for %v\n", op) + base.Fatalf("brrev: no rev for %v\n", op) return op } @@ -972,7 +973,7 @@ func safeexpr(n *Node, init *Nodes) *Node { // make a copy; must not be used as an lvalue if islvalue(n) { - Fatalf("missing lvalue case in safeexpr: %v", n) + base.Fatalf("missing lvalue case in safeexpr: %v", n) } return cheapexpr(n, init) } @@ -1161,7 +1162,7 @@ func adddot(n *Node) *Node { n.Left.SetImplicit(true) } case ambig: - yyerror("ambiguous selector %v", n) + base.Errorf("ambiguous selector %v", n) n.Left = nil } @@ -1334,7 +1335,7 @@ func structargs(tl *types.Type, mustname bool) []*Node { // method - M func (t T)(), a TFIELD type struct // newnam - the eventual mangled name of this function func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { - if false && Flag.LowerR != 0 { + if false && base.Flag.LowerR != 0 { fmt.Printf("genwrapper rcvrtype=%v method=%v newnam=%v\n", rcvr, method, newnam) } @@ -1350,7 +1351,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { return } - lineno = autogeneratedPos + base.Pos = autogeneratedPos dclcontext = PEXTERN tfn := nod(OTFUNC, nil, nil) @@ -1384,7 +1385,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { // the TOC to the appropriate value for that module. But if it returns // directly to the wrapper's caller, nothing will reset it to the correct // value for that function. - if !instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !isifacemethod(method.Type) && !(thearch.LinkArch.Name == "ppc64le" && Ctxt.Flag_dynlink) { + if !instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !isifacemethod(method.Type) && !(thearch.LinkArch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) { // generate tail call: adjust pointer receiver and jump to embedded method. dot = dot.Left // skip final .M // TODO(mdempsky): Remove dependency on dotlist. @@ -1407,12 +1408,12 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { fn.Nbody.Append(call) } - if false && Flag.LowerR != 0 { + if false && base.Flag.LowerR != 0 { dumplist("genwrapper body", fn.Nbody) } funcbody() - if Debug.DclStack != 0 { + if base.Debug.DclStack != 0 { testdclstack() } @@ -1464,7 +1465,7 @@ func ifacelookdot(s *types.Sym, t *types.Type, ignorecase bool) (m *types.Field, path, ambig := dotpath(s, t, &m, ignorecase) if path == nil { if ambig { - yyerror("%v.%v is ambiguous", t, s) + base.Errorf("%v.%v is ambiguous", t, s) } return nil, false } @@ -1477,7 +1478,7 @@ func ifacelookdot(s *types.Sym, t *types.Type, ignorecase bool) (m *types.Field, } if !m.IsMethod() { - yyerror("%v.%v is a field, not a method", t, s) + base.Errorf("%v.%v is a field, not a method", t, s) return nil, followptr } @@ -1548,8 +1549,8 @@ func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool // the method does not exist for value types. rcvr := tm.Type.Recv().Type if rcvr.IsPtr() && !t0.IsPtr() && !followptr && !isifacemethod(tm.Type) { - if false && Flag.LowerR != 0 { - yyerror("interface pointer mismatch") + if false && base.Flag.LowerR != 0 { + base.Errorf("interface pointer mismatch") } *m = im @@ -1624,40 +1625,40 @@ var reservedimports = []string{ func isbadimport(path string, allowSpace bool) bool { if strings.Contains(path, "\x00") { - yyerror("import path contains NUL") + base.Errorf("import path contains NUL") return true } for _, ri := range reservedimports { if path == ri { - yyerror("import path %q is reserved and cannot be used", path) + base.Errorf("import path %q is reserved and cannot be used", path) return true } } for _, r := range path { if r == utf8.RuneError { - yyerror("import path contains invalid UTF-8 sequence: %q", path) + base.Errorf("import path contains invalid UTF-8 sequence: %q", path) return true } if r < 0x20 || r == 0x7f { - yyerror("import path contains control character: %q", path) + base.Errorf("import path contains control character: %q", path) return true } if r == '\\' { - yyerror("import path contains backslash; use slash: %q", path) + base.Errorf("import path contains backslash; use slash: %q", path) return true } if !allowSpace && unicode.IsSpace(r) { - yyerror("import path contains space character: %q", path) + base.Errorf("import path contains space character: %q", path) return true } if strings.ContainsRune("!\"#$%&'()*,:;<=>?[]^`{|}", r) { - yyerror("import path contains invalid character '%c': %q", r, path) + base.Errorf("import path contains invalid character '%c': %q", r, path) return true } } @@ -1709,7 +1710,7 @@ func itabType(itab *Node) *Node { // It follows the pointer if !isdirectiface(t). func ifaceData(pos src.XPos, n *Node, t *types.Type) *Node { if t.IsInterface() { - Fatalf("ifaceData interface: %v", t) + base.Fatalf("ifaceData interface: %v", t) } ptr := nodlSym(pos, OIDATA, n, nil) if isdirectiface(t) { @@ -1731,7 +1732,7 @@ func ifaceData(pos src.XPos, n *Node, t *types.Type) *Node { func typePos(t *types.Type) src.XPos { n := asNode(t.Nod) if n == nil || !n.Pos.IsKnown() { - Fatalf("bad type: %v", t) + base.Fatalf("bad type: %v", t) } return n.Pos } diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go index c249a85b64e82..7befbdf06c15d 100644 --- a/src/cmd/compile/internal/gc/swt.go +++ b/src/cmd/compile/internal/gc/swt.go @@ -5,6 +5,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/compile/internal/types" "cmd/internal/src" "go/constant" @@ -26,7 +27,7 @@ func typecheckTypeSwitch(n *Node) { n.Left.Right = typecheck(n.Left.Right, ctxExpr) t := n.Left.Right.Type if t != nil && !t.IsInterface() { - yyerrorl(n.Pos, "cannot type switch on non-interface value %L", n.Left.Right) + base.ErrorfAt(n.Pos, "cannot type switch on non-interface value %L", n.Left.Right) t = nil } @@ -34,7 +35,7 @@ func typecheckTypeSwitch(n *Node) { // declaration itself. So if there are no cases, we won't // notice that it went unused. if v := n.Left.Left; v != nil && !v.isBlank() && n.List.Len() == 0 { - yyerrorl(v.Pos, "%v declared but not used", v.Sym) + base.ErrorfAt(v.Pos, "%v declared but not used", v.Sym) } var defCase, nilCase *Node @@ -43,7 +44,7 @@ func typecheckTypeSwitch(n *Node) { ls := ncase.List.Slice() if len(ls) == 0 { // default: if defCase != nil { - yyerrorl(ncase.Pos, "multiple defaults in switch (first at %v)", defCase.Line()) + base.ErrorfAt(ncase.Pos, "multiple defaults in switch (first at %v)", defCase.Line()) } else { defCase = ncase } @@ -61,21 +62,21 @@ func typecheckTypeSwitch(n *Node) { switch { case n1.isNil(): // case nil: if nilCase != nil { - yyerrorl(ncase.Pos, "multiple nil cases in type switch (first at %v)", nilCase.Line()) + base.ErrorfAt(ncase.Pos, "multiple nil cases in type switch (first at %v)", nilCase.Line()) } else { nilCase = ncase } case n1.Op != OTYPE: - yyerrorl(ncase.Pos, "%L is not a type", n1) + base.ErrorfAt(ncase.Pos, "%L is not a type", n1) case !n1.Type.IsInterface() && !implements(n1.Type, t, &missing, &have, &ptr) && !missing.Broke(): if have != nil && !have.Broke() { - yyerrorl(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+ + base.ErrorfAt(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+ " (wrong type for %v method)\n\thave %v%S\n\twant %v%S", n.Left.Right, n1.Type, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) } else if ptr != 0 { - yyerrorl(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+ + base.ErrorfAt(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+ " (%v method has pointer receiver)", n.Left.Right, n1.Type, missing.Sym) } else { - yyerrorl(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+ + base.ErrorfAt(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+ " (missing %v method)", n.Left.Right, n1.Type, missing.Sym) } } @@ -135,7 +136,7 @@ func (s *typeSet) add(pos src.XPos, typ *types.Type) { prevs := s.m[ls] for _, prev := range prevs { if types.Identical(typ, prev.typ) { - yyerrorl(pos, "duplicate case %v in type switch\n\tprevious case at %s", typ, linestr(prev.pos)) + base.ErrorfAt(pos, "duplicate case %v in type switch\n\tprevious case at %s", typ, base.FmtPos(prev.pos)) return } } @@ -162,9 +163,9 @@ func typecheckExprSwitch(n *Node) { case !IsComparable(t): if t.IsStruct() { - yyerrorl(n.Pos, "cannot switch on %L (struct containing %v cannot be compared)", n.Left, IncomparableField(t).Type) + base.ErrorfAt(n.Pos, "cannot switch on %L (struct containing %v cannot be compared)", n.Left, IncomparableField(t).Type) } else { - yyerrorl(n.Pos, "cannot switch on %L", n.Left) + base.ErrorfAt(n.Pos, "cannot switch on %L", n.Left) } t = nil } @@ -176,7 +177,7 @@ func typecheckExprSwitch(n *Node) { ls := ncase.List.Slice() if len(ls) == 0 { // default: if defCase != nil { - yyerrorl(ncase.Pos, "multiple defaults in switch (first at %v)", defCase.Line()) + base.ErrorfAt(ncase.Pos, "multiple defaults in switch (first at %v)", defCase.Line()) } else { defCase = ncase } @@ -192,17 +193,17 @@ func typecheckExprSwitch(n *Node) { } if nilonly != "" && !n1.isNil() { - yyerrorl(ncase.Pos, "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Left) + base.ErrorfAt(ncase.Pos, "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Left) } else if t.IsInterface() && !n1.Type.IsInterface() && !IsComparable(n1.Type) { - yyerrorl(ncase.Pos, "invalid case %L in switch (incomparable type)", n1) + base.ErrorfAt(ncase.Pos, "invalid case %L in switch (incomparable type)", n1) } else { op1, _ := assignop(n1.Type, t) op2, _ := assignop(t, n1.Type) if op1 == OXXX && op2 == OXXX { if n.Left != nil { - yyerrorl(ncase.Pos, "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Left, n1.Type, t) + base.ErrorfAt(ncase.Pos, "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Left, n1.Type, t) } else { - yyerrorl(ncase.Pos, "invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type) + base.ErrorfAt(ncase.Pos, "invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type) } } } @@ -267,7 +268,7 @@ func walkExprSwitch(sw *Node) { cond = copyexpr(cond, cond.Type, &sw.Nbody) } - lineno = lno + base.Pos = lno s := exprSwitch{ exprname: cond, @@ -282,7 +283,7 @@ func walkExprSwitch(sw *Node) { // Process case dispatch. if ncase.List.Len() == 0 { if defaultGoto != nil { - Fatalf("duplicate default case not detected during typechecking") + base.Fatalf("duplicate default case not detected during typechecking") } defaultGoto = jmp } @@ -464,7 +465,7 @@ func allCaseExprsAreSideEffectFree(sw *Node) bool { for _, ncase := range sw.List.Slice() { if ncase.Op != OCASE { - Fatalf("switch string(byteslice) bad op: %v", ncase.Op) + base.Fatalf("switch string(byteslice) bad op: %v", ncase.Op) } for _, v := range ncase.List.Slice() { if v.Op != OLITERAL { @@ -517,7 +518,7 @@ func walkTypeSwitch(sw *Node) { // Use a similar strategy for non-empty interfaces. ifNil := nod(OIF, nil, nil) ifNil.Left = nod(OEQ, itab, nodnil()) - lineno = lineno.WithNotStmt() // disable statement marks after the first check. + base.Pos = base.Pos.WithNotStmt() // disable statement marks after the first check. ifNil.Left = typecheck(ifNil.Left, ctxExpr) ifNil.Left = defaultlit(ifNil.Left, nil) // ifNil.Nbody assigned at end. @@ -558,7 +559,7 @@ func walkTypeSwitch(sw *Node) { if ncase.List.Len() == 0 { // default: if defaultGoto != nil { - Fatalf("duplicate default case not detected during typechecking") + base.Fatalf("duplicate default case not detected during typechecking") } defaultGoto = jmp } @@ -566,7 +567,7 @@ func walkTypeSwitch(sw *Node) { for _, n1 := range ncase.List.Slice() { if n1.isNil() { // case nil: if nilGoto != nil { - Fatalf("duplicate nil case not detected during typechecking") + base.Fatalf("duplicate nil case not detected during typechecking") } nilGoto = jmp continue @@ -586,7 +587,7 @@ func walkTypeSwitch(sw *Node) { if singleType != nil { // We have a single concrete type. Extract the data. if singleType.IsInterface() { - Fatalf("singleType interface should have been handled in Add") + base.Fatalf("singleType interface should have been handled in Add") } val = ifaceData(ncase.Pos, s.facename, singleType) } @@ -733,7 +734,7 @@ func binarySearch(n int, out *Nodes, less func(i int) *Node, leaf func(i int, ni for i := lo; i < hi; i++ { nif := nod(OIF, nil, nil) leaf(i, nif) - lineno = lineno.WithNotStmt() + base.Pos = base.Pos.WithNotStmt() nif.Left = typecheck(nif.Left, ctxExpr) nif.Left = defaultlit(nif.Left, nil) out.Append(nif) @@ -745,7 +746,7 @@ func binarySearch(n int, out *Nodes, less func(i int) *Node, leaf func(i int, ni half := lo + n/2 nif := nod(OIF, nil, nil) nif.Left = less(half) - lineno = lineno.WithNotStmt() + base.Pos = base.Pos.WithNotStmt() nif.Left = typecheck(nif.Left, ctxExpr) nif.Left = defaultlit(nif.Left, nil) do(lo, half, &nif.Nbody) diff --git a/src/cmd/compile/internal/gc/syntax.go b/src/cmd/compile/internal/gc/syntax.go index f771a7184ea47..11671fc54a05d 100644 --- a/src/cmd/compile/internal/gc/syntax.go +++ b/src/cmd/compile/internal/gc/syntax.go @@ -7,6 +7,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/compile/internal/ssa" "cmd/compile/internal/types" "cmd/internal/obj" @@ -106,7 +107,7 @@ func (n *Node) SubOp() Op { switch n.Op { case OASOP, ONAME: default: - Fatalf("unexpected op: %v", n.Op) + base.Fatalf("unexpected op: %v", n.Op) } return Op(n.aux) } @@ -115,21 +116,21 @@ func (n *Node) SetSubOp(op Op) { switch n.Op { case OASOP, ONAME: default: - Fatalf("unexpected op: %v", n.Op) + base.Fatalf("unexpected op: %v", n.Op) } n.aux = uint8(op) } func (n *Node) IndexMapLValue() bool { if n.Op != OINDEXMAP { - Fatalf("unexpected op: %v", n.Op) + base.Fatalf("unexpected op: %v", n.Op) } return n.aux != 0 } func (n *Node) SetIndexMapLValue(b bool) { if n.Op != OINDEXMAP { - Fatalf("unexpected op: %v", n.Op) + base.Fatalf("unexpected op: %v", n.Op) } if b { n.aux = 1 @@ -140,14 +141,14 @@ func (n *Node) SetIndexMapLValue(b bool) { func (n *Node) TChanDir() types.ChanDir { if n.Op != OTCHAN { - Fatalf("unexpected op: %v", n.Op) + base.Fatalf("unexpected op: %v", n.Op) } return types.ChanDir(n.aux) } func (n *Node) SetTChanDir(dir types.ChanDir) { if n.Op != OTCHAN { - Fatalf("unexpected op: %v", n.Op) + base.Fatalf("unexpected op: %v", n.Op) } n.aux = uint8(dir) } @@ -236,7 +237,7 @@ func (n *Node) SetEmbedded(b bool) { n.flags.set(nodeEmbedded, b) } // inserted before dereferencing. See state.exprPtr. func (n *Node) MarkNonNil() { if !n.Type.IsPtr() && !n.Type.IsUnsafePtr() { - Fatalf("MarkNonNil(%v), type %v", n, n.Type) + base.Fatalf("MarkNonNil(%v), type %v", n, n.Type) } n.flags.set(nodeNonNil, true) } @@ -255,7 +256,7 @@ func (n *Node) SetBounded(b bool) { // No length and cap checks needed // since new slice and copied over slice data have same length. default: - Fatalf("SetBounded(%v)", n) + base.Fatalf("SetBounded(%v)", n) } n.flags.set(nodeBounded, b) } @@ -263,7 +264,7 @@ func (n *Node) SetBounded(b bool) { // MarkReadonly indicates that n is an ONAME with readonly contents. func (n *Node) MarkReadonly() { if n.Op != ONAME { - Fatalf("Node.MarkReadonly %v", n.Op) + base.Fatalf("Node.MarkReadonly %v", n.Op) } n.Name.SetReadonly(true) // Mark the linksym as readonly immediately @@ -284,9 +285,9 @@ func (n *Node) Val() constant.Value { // which must not have been used with SetOpt. func (n *Node) SetVal(v constant.Value) { if n.HasOpt() { - Flag.LowerH = 1 + base.Flag.LowerH = 1 Dump("have Opt", n) - Fatalf("have Opt") + base.Fatalf("have Opt") } if n.Op == OLITERAL { assertRepresents(n.Type, v) @@ -314,9 +315,9 @@ func (n *Node) SetOpt(x interface{}) { return } if n.HasVal() { - Flag.LowerH = 1 + base.Flag.LowerH = 1 Dump("have Val", n) - Fatalf("have Val") + base.Fatalf("have Val") } n.SetHasOpt(true) n.E = x @@ -367,7 +368,7 @@ func (n *Node) pkgFuncName() string { } pkg := s.Pkg - p := Ctxt.Pkgpath + p := base.Ctxt.Pkgpath if pkg != nil && pkg.Path != "" { p = pkg.Path } @@ -764,8 +765,8 @@ func (f *Func) SetInstrumentBody(b bool) { f.flags.set(funcInstrumentB func (f *Func) SetOpenCodedDeferDisallowed(b bool) { f.flags.set(funcOpenCodedDeferDisallowed, b) } func (f *Func) setWBPos(pos src.XPos) { - if Debug.WB != 0 { - Warnl(pos, "write barrier") + if base.Debug.WB != 0 { + base.WarnfAt(pos, "write barrier") } if !f.WBPos.IsKnown() { f.WBPos = pos diff --git a/src/cmd/compile/internal/gc/trace.go b/src/cmd/compile/internal/gc/trace.go index ed4b5a268ddc6..c6eb23a09091c 100644 --- a/src/cmd/compile/internal/gc/trace.go +++ b/src/cmd/compile/internal/gc/trace.go @@ -9,6 +9,8 @@ package gc import ( "os" tracepkg "runtime/trace" + + "cmd/compile/internal/base" ) func init() { @@ -18,10 +20,10 @@ func init() { func traceHandlerGo17(traceprofile string) { f, err := os.Create(traceprofile) if err != nil { - Fatalf("%v", err) + base.Fatalf("%v", err) } if err := tracepkg.Start(f); err != nil { - Fatalf("%v", err) + base.Fatalf("%v", err) } - atExit(tracepkg.Stop) + base.AtExit(tracepkg.Stop) } diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 7b299e553b57a..b61b9b0525892 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -5,6 +5,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/compile/internal/types" "fmt" "go/constant" @@ -25,7 +26,7 @@ func tracePrint(title string, n *Node) func(np **Node) { var pos, op string var tc uint8 if n != nil { - pos = linestr(n.Pos) + pos = base.FmtPos(n.Pos) op = n.Op.String() tc = n.Typecheck() } @@ -48,7 +49,7 @@ func tracePrint(title string, n *Node) func(np **Node) { var tc uint8 var typ *types.Type if n != nil { - pos = linestr(n.Pos) + pos = base.FmtPos(n.Pos) op = n.Op.String() tc = n.Typecheck() typ = n.Type @@ -84,13 +85,13 @@ func resolve(n *Node) (res *Node) { } // only trace if there's work to do - if enableTrace && Flag.LowerT { + if enableTrace && base.Flag.LowerT { defer tracePrint("resolve", n)(&res) } if n.Sym.Pkg != localpkg { if inimport { - Fatalf("recursive inimport") + base.Fatalf("recursive inimport") } inimport = true expandDecl(n) @@ -203,7 +204,7 @@ var typecheck_tcstack []*Node func typecheck(n *Node, top int) (res *Node) { // cannot type check until all the source has been parsed if !typecheckok { - Fatalf("early typecheck") + base.Fatalf("early typecheck") } if n == nil { @@ -211,7 +212,7 @@ func typecheck(n *Node, top int) (res *Node) { } // only trace if there's work to do - if enableTrace && Flag.LowerT { + if enableTrace && base.Flag.LowerT { defer tracePrint("typecheck", n)(&res) } @@ -233,7 +234,7 @@ func typecheck(n *Node, top int) (res *Node) { break default: - lineno = lno + base.Pos = lno return n } } @@ -245,7 +246,7 @@ func typecheck(n *Node, top int) (res *Node) { // We can already diagnose variables used as types. case ONAME: if top&(ctxExpr|ctxType) == ctxType { - yyerror("%v is not a type", n) + base.Errorf("%v is not a type", n) } case OTYPE: @@ -263,34 +264,34 @@ func typecheck(n *Node, top int) (res *Node) { // with aliases that we can't handle properly yet. // Report an error rather than crashing later. if n.Name != nil && n.Name.Param.Alias() && n.Type == nil { - lineno = n.Pos - Fatalf("cannot handle alias type declaration (issue #25838): %v", n) + base.Pos = n.Pos + base.Fatalf("cannot handle alias type declaration (issue #25838): %v", n) } - lineno = lno + base.Pos = lno return n } } - yyerrorl(n.Pos, "invalid recursive type alias %v%s", n, cycleTrace(cycle)) + base.ErrorfAt(n.Pos, "invalid recursive type alias %v%s", n, cycleTrace(cycle)) } case OLITERAL: if top&(ctxExpr|ctxType) == ctxType { - yyerror("%v is not a type", n) + base.Errorf("%v is not a type", n) break } - yyerrorl(n.Pos, "constant definition loop%s", cycleTrace(cycleFor(n))) + base.ErrorfAt(n.Pos, "constant definition loop%s", cycleTrace(cycleFor(n))) } - if Errors() == 0 { + if base.Errors() == 0 { var trace string for i := len(typecheck_tcstack) - 1; i >= 0; i-- { x := typecheck_tcstack[i] trace += fmt.Sprintf("\n\t%v %v", x.Line(), x) } - yyerror("typechecking loop involving %v%s", n, trace) + base.Errorf("typechecking loop involving %v%s", n, trace) } - lineno = lno + base.Pos = lno return n } @@ -305,7 +306,7 @@ func typecheck(n *Node, top int) (res *Node) { typecheck_tcstack[last] = nil typecheck_tcstack = typecheck_tcstack[:last] - lineno = lno + base.Pos = lno return n } @@ -325,7 +326,7 @@ func indexlit(n *Node) *Node { // The result of typecheck1 MUST be assigned back to n, e.g. // n.Left = typecheck1(n.Left, top) func typecheck1(n *Node, top int) (res *Node) { - if enableTrace && Flag.LowerT { + if enableTrace && base.Flag.LowerT { defer tracePrint("typecheck1", n)(&res) } @@ -336,7 +337,7 @@ func typecheck1(n *Node, top int) (res *Node) { } if n.Op == ONAME && n.SubOp() != 0 && top&ctxCallee == 0 { - yyerror("use of builtin %v not in function call", n.Sym) + base.Errorf("use of builtin %v not in function call", n.Sym) n.Type = nil return n } @@ -354,14 +355,14 @@ func typecheck1(n *Node, top int) (res *Node) { default: Dump("typecheck", n) - Fatalf("typecheck %v", n.Op) + base.Fatalf("typecheck %v", n.Op) // names case OLITERAL: ok |= ctxExpr if n.Type == nil && n.Val().Kind() == constant.String { - Fatalf("string literal missing type") + base.Fatalf("string literal missing type") } case ONIL, ONONAME: @@ -379,7 +380,7 @@ func typecheck1(n *Node, top int) (res *Node) { if top&ctxAssign == 0 { // not a write to the variable if n.isBlank() { - yyerror("cannot use _ as value") + base.Errorf("cannot use _ as value") n.Type = nil return n } @@ -390,7 +391,7 @@ func typecheck1(n *Node, top int) (res *Node) { ok |= ctxExpr case OPACK: - yyerror("use of package %v without selector", n.Sym) + base.Errorf("use of package %v without selector", n.Sym) n.Type = nil return n @@ -419,7 +420,7 @@ func typecheck1(n *Node, top int) (res *Node) { } else if n.Left.Op == ODDD { if !n.Diag() { n.SetDiag(true) - yyerror("use of [...] array outside of array literal") + base.Errorf("use of [...] array outside of array literal") } n.Type = nil return n @@ -431,9 +432,9 @@ func typecheck1(n *Node, top int) (res *Node) { case l.Type == nil: // Error already reported elsewhere. case l.Type.IsInteger() && l.Op != OLITERAL: - yyerror("non-constant array bound %v", l) + base.Errorf("non-constant array bound %v", l) default: - yyerror("invalid array bound %v", l) + base.Errorf("invalid array bound %v", l) } n.Type = nil return n @@ -441,13 +442,13 @@ func typecheck1(n *Node, top int) (res *Node) { v := l.Val() if doesoverflow(v, types.Types[TINT]) { - yyerror("array bound is too large") + base.Errorf("array bound is too large") n.Type = nil return n } if constant.Sign(v) < 0 { - yyerror("array bound must be non-negative") + base.Errorf("array bound must be non-negative") n.Type = nil return n } @@ -472,10 +473,10 @@ func typecheck1(n *Node, top int) (res *Node) { return n } if l.Type.NotInHeap() { - yyerror("incomplete (or unallocatable) map key not allowed") + base.Errorf("incomplete (or unallocatable) map key not allowed") } if r.Type.NotInHeap() { - yyerror("incomplete (or unallocatable) map value not allowed") + base.Errorf("incomplete (or unallocatable) map value not allowed") } setTypeNode(n, types.NewMap(l.Type, r.Type)) @@ -492,7 +493,7 @@ func typecheck1(n *Node, top int) (res *Node) { return n } if l.Type.NotInHeap() { - yyerror("chan of incomplete (or unallocatable) type not allowed") + base.Errorf("chan of incomplete (or unallocatable) type not allowed") } setTypeNode(n, types.NewChan(l.Type, n.TChanDir())) @@ -535,7 +536,7 @@ func typecheck1(n *Node, top int) (res *Node) { if !t.IsPtr() { if top&(ctxExpr|ctxStmt) != 0 { - yyerror("invalid indirect of %L", n.Left) + base.Errorf("invalid indirect of %L", n.Left) n.Type = nil return n } @@ -582,7 +583,7 @@ func typecheck1(n *Node, top int) (res *Node) { return n } if n.Implicit() && !okforarith[l.Type.Etype] { - yyerror("invalid operation: %v (non-numeric type %v)", n, l.Type) + base.Errorf("invalid operation: %v (non-numeric type %v)", n, l.Type) n.Type = nil return n } @@ -605,18 +606,18 @@ func typecheck1(n *Node, top int) (res *Node) { n.Right = r t := r.Type if !t.IsInteger() { - yyerror("invalid operation: %v (shift count type %v, must be integer)", n, r.Type) + base.Errorf("invalid operation: %v (shift count type %v, must be integer)", n, r.Type) n.Type = nil return n } if t.IsSigned() && !langSupported(1, 13, curpkg()) { - yyerrorv("go1.13", "invalid operation: %v (signed shift count type %v)", n, r.Type) + base.ErrorfVers("go1.13", "invalid operation: %v (signed shift count type %v)", n, r.Type) n.Type = nil return n } t = l.Type if t != nil && t.Etype != TIDEAL && !t.IsInteger() { - yyerror("invalid operation: %v (shift of type %v)", n, t) + base.Errorf("invalid operation: %v (shift of type %v)", n, t) n.Type = nil return n } @@ -636,12 +637,12 @@ func typecheck1(n *Node, top int) (res *Node) { // can't be converted to int (see issue #41500). if n.Op == OANDAND || n.Op == OOROR { if !n.Left.Type.IsBoolean() { - yyerror("invalid operation: %v (operator %v not defined on %s)", n, n.Op, typekind(n.Left.Type)) + base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op, typekind(n.Left.Type)) n.Type = nil return n } if !n.Right.Type.IsBoolean() { - yyerror("invalid operation: %v (operator %v not defined on %s)", n, n.Op, typekind(n.Right.Type)) + base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op, typekind(n.Right.Type)) n.Type = nil return n } @@ -678,7 +679,7 @@ func typecheck1(n *Node, top int) (res *Node) { aop, _ = assignop(l.Type, r.Type) if aop != OXXX { if r.Type.IsInterface() && !l.Type.IsInterface() && !IsComparable(l.Type) { - yyerror("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(l.Type)) + base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(l.Type)) n.Type = nil return n } @@ -700,7 +701,7 @@ func typecheck1(n *Node, top int) (res *Node) { aop, _ = assignop(r.Type, l.Type) if aop != OXXX { if l.Type.IsInterface() && !r.Type.IsInterface() && !IsComparable(r.Type) { - yyerror("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(r.Type)) + base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(r.Type)) n.Type = nil return n } @@ -727,7 +728,7 @@ func typecheck1(n *Node, top int) (res *Node) { return n } if l.Type.IsInterface() == r.Type.IsInterface() || aop == 0 { - yyerror("invalid operation: %v (mismatched types %v and %v)", n, l.Type, r.Type) + base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type, r.Type) n.Type = nil return n } @@ -737,7 +738,7 @@ func typecheck1(n *Node, top int) (res *Node) { t = mixUntyped(l.Type, r.Type) } if dt := defaultType(t); !okfor[op][dt.Etype] { - yyerror("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(t)) + base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(t)) n.Type = nil return n } @@ -745,32 +746,32 @@ func typecheck1(n *Node, top int) (res *Node) { // okfor allows any array == array, map == map, func == func. // restrict to slice/map/func == nil and nil == slice/map/func. if l.Type.IsArray() && !IsComparable(l.Type) { - yyerror("invalid operation: %v (%v cannot be compared)", n, l.Type) + base.Errorf("invalid operation: %v (%v cannot be compared)", n, l.Type) n.Type = nil return n } if l.Type.IsSlice() && !l.isNil() && !r.isNil() { - yyerror("invalid operation: %v (slice can only be compared to nil)", n) + base.Errorf("invalid operation: %v (slice can only be compared to nil)", n) n.Type = nil return n } if l.Type.IsMap() && !l.isNil() && !r.isNil() { - yyerror("invalid operation: %v (map can only be compared to nil)", n) + base.Errorf("invalid operation: %v (map can only be compared to nil)", n) n.Type = nil return n } if l.Type.Etype == TFUNC && !l.isNil() && !r.isNil() { - yyerror("invalid operation: %v (func can only be compared to nil)", n) + base.Errorf("invalid operation: %v (func can only be compared to nil)", n) n.Type = nil return n } if l.Type.IsStruct() { if f := IncomparableField(l.Type); f != nil { - yyerror("invalid operation: %v (struct containing %v cannot be compared)", n, f.Type) + base.Errorf("invalid operation: %v (struct containing %v cannot be compared)", n, f.Type) n.Type = nil return n } @@ -806,7 +807,7 @@ func typecheck1(n *Node, top int) (res *Node) { if (op == ODIV || op == OMOD) && Isconst(r, constant.Int) { if constant.Sign(r.Val()) == 0 { - yyerror("division by zero") + base.Errorf("division by zero") n.Type = nil return n } @@ -824,7 +825,7 @@ func typecheck1(n *Node, top int) (res *Node) { return n } if !okfor[n.Op][defaultType(t).Etype] { - yyerror("invalid operation: %v (operator %v not defined on %s)", n, n.Op, typekind(t)) + base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op, typekind(t)) n.Type = nil return n } @@ -850,7 +851,7 @@ func typecheck1(n *Node, top int) (res *Node) { r := outervalue(n.Left) if r.Op == ONAME { if r.Orig != r { - Fatalf("found non-orig name node %v", r) // TODO(mdempsky): What does this mean? + base.Fatalf("found non-orig name node %v", r) // TODO(mdempsky): What does this mean? } r.Name.SetAddrtaken(true) if r.Name.IsClosureVar() && !capturevarscomplete { @@ -893,7 +894,7 @@ func typecheck1(n *Node, top int) (res *Node) { t := n.Left.Type if t == nil { - UpdateErrorDot(n.Line(), n.Left.String(), n.String()) + base.UpdateErrorDot(n.Line(), n.Left.String(), n.String()) n.Type = nil return n } @@ -920,7 +921,7 @@ func typecheck1(n *Node, top int) (res *Node) { } if n.Sym.IsBlank() { - yyerror("cannot refer to blank field or method") + base.Errorf("cannot refer to blank field or method") n.Type = nil return n } @@ -929,21 +930,21 @@ func typecheck1(n *Node, top int) (res *Node) { // Legitimate field or method lookup failed, try to explain the error switch { case t.IsEmptyInterface(): - yyerror("%v undefined (type %v is interface with no methods)", n, n.Left.Type) + base.Errorf("%v undefined (type %v is interface with no methods)", n, n.Left.Type) case t.IsPtr() && t.Elem().IsInterface(): // Pointer to interface is almost always a mistake. - yyerror("%v undefined (type %v is pointer to interface, not interface)", n, n.Left.Type) + base.Errorf("%v undefined (type %v is pointer to interface, not interface)", n, n.Left.Type) case lookdot(n, t, 1) != nil: // Field or method matches by name, but it is not exported. - yyerror("%v undefined (cannot refer to unexported field or method %v)", n, n.Sym) + base.Errorf("%v undefined (cannot refer to unexported field or method %v)", n, n.Sym) default: if mt := lookdot(n, t, 2); mt != nil && visible(mt.Sym) { // Case-insensitive lookup. - yyerror("%v undefined (type %v has no field or method %v, but does have %v)", n, n.Left.Type, n.Sym, mt.Sym) + base.Errorf("%v undefined (type %v has no field or method %v, but does have %v)", n, n.Left.Type, n.Sym, mt.Sym) } else { - yyerror("%v undefined (type %v has no field or method %v)", n, n.Left.Type, n.Sym) + base.Errorf("%v undefined (type %v has no field or method %v)", n, n.Left.Type, n.Sym) } } n.Type = nil @@ -974,7 +975,7 @@ func typecheck1(n *Node, top int) (res *Node) { return n } if !t.IsInterface() { - yyerror("invalid type assertion: %v (non-interface type %v on left)", n, t) + base.Errorf("invalid type assertion: %v (non-interface type %v on left)", n, t) n.Type = nil return n } @@ -993,15 +994,15 @@ func typecheck1(n *Node, top int) (res *Node) { var ptr int if !implements(n.Type, t, &missing, &have, &ptr) { if have != nil && have.Sym == missing.Sym { - yyerror("impossible type assertion:\n\t%v does not implement %v (wrong type for %v method)\n"+ + base.Errorf("impossible type assertion:\n\t%v does not implement %v (wrong type for %v method)\n"+ "\t\thave %v%0S\n\t\twant %v%0S", n.Type, t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) } else if ptr != 0 { - yyerror("impossible type assertion:\n\t%v does not implement %v (%v method has pointer receiver)", n.Type, t, missing.Sym) + base.Errorf("impossible type assertion:\n\t%v does not implement %v (%v method has pointer receiver)", n.Type, t, missing.Sym) } else if have != nil { - yyerror("impossible type assertion:\n\t%v does not implement %v (missing %v method)\n"+ + base.Errorf("impossible type assertion:\n\t%v does not implement %v (missing %v method)\n"+ "\t\thave %v%0S\n\t\twant %v%0S", n.Type, t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) } else { - yyerror("impossible type assertion:\n\t%v does not implement %v (missing %v method)", n.Type, t, missing.Sym) + base.Errorf("impossible type assertion:\n\t%v does not implement %v (missing %v method)", n.Type, t, missing.Sym) } n.Type = nil return n @@ -1023,7 +1024,7 @@ func typecheck1(n *Node, top int) (res *Node) { } switch t.Etype { default: - yyerror("invalid operation: %v (type %v does not support indexing)", n, t) + base.Errorf("invalid operation: %v (type %v does not support indexing)", n, t) n.Type = nil return n @@ -1042,20 +1043,20 @@ func typecheck1(n *Node, top int) (res *Node) { } if n.Right.Type != nil && !n.Right.Type.IsInteger() { - yyerror("non-integer %s index %v", why, n.Right) + base.Errorf("non-integer %s index %v", why, n.Right) break } if !n.Bounded() && Isconst(n.Right, constant.Int) { x := n.Right.Val() if constant.Sign(x) < 0 { - yyerror("invalid %s index %v (index must be non-negative)", why, n.Right) + base.Errorf("invalid %s index %v (index must be non-negative)", why, n.Right) } else if t.IsArray() && constant.Compare(x, token.GEQ, constant.MakeInt64(t.NumElem())) { - yyerror("invalid array index %v (out of bounds for %d-element array)", n.Right, t.NumElem()) + base.Errorf("invalid array index %v (out of bounds for %d-element array)", n.Right, t.NumElem()) } else if Isconst(n.Left, constant.String) && constant.Compare(x, token.GEQ, constant.MakeInt64(int64(len(n.Left.StringVal())))) { - yyerror("invalid string index %v (out of bounds for %d-byte string)", n.Right, len(n.Left.StringVal())) + base.Errorf("invalid string index %v (out of bounds for %d-byte string)", n.Right, len(n.Left.StringVal())) } else if doesoverflow(x, types.Types[TINT]) { - yyerror("invalid %s index %v (index too large)", why, n.Right) + base.Errorf("invalid %s index %v (index too large)", why, n.Right) } } @@ -1077,13 +1078,13 @@ func typecheck1(n *Node, top int) (res *Node) { return n } if !t.IsChan() { - yyerror("invalid operation: %v (receive from non-chan type %v)", n, t) + base.Errorf("invalid operation: %v (receive from non-chan type %v)", n, t) n.Type = nil return n } if !t.ChanDir().CanRecv() { - yyerror("invalid operation: %v (receive from send-only type %v)", n, t) + base.Errorf("invalid operation: %v (receive from send-only type %v)", n, t) n.Type = nil return n } @@ -1101,13 +1102,13 @@ func typecheck1(n *Node, top int) (res *Node) { return n } if !t.IsChan() { - yyerror("invalid operation: %v (send to non-chan type %v)", n, t) + base.Errorf("invalid operation: %v (send to non-chan type %v)", n, t) n.Type = nil return n } if !t.ChanDir().CanSend() { - yyerror("invalid operation: %v (send to receive-only type %v)", n, t) + base.Errorf("invalid operation: %v (send to receive-only type %v)", n, t) n.Type = nil return n } @@ -1120,7 +1121,7 @@ func typecheck1(n *Node, top int) (res *Node) { n.Type = nil case OSLICEHEADER: - // Errors here are Fatalf instead of yyerror because only the compiler + // Errors here are Fatalf instead of Errorf because only the compiler // can construct an OSLICEHEADER node. // Components used in OSLICEHEADER that are supplied by parsed source code // have already been typechecked in e.g. OMAKESLICE earlier. @@ -1128,19 +1129,19 @@ func typecheck1(n *Node, top int) (res *Node) { t := n.Type if t == nil { - Fatalf("no type specified for OSLICEHEADER") + base.Fatalf("no type specified for OSLICEHEADER") } if !t.IsSlice() { - Fatalf("invalid type %v for OSLICEHEADER", n.Type) + base.Fatalf("invalid type %v for OSLICEHEADER", n.Type) } if n.Left == nil || n.Left.Type == nil || !n.Left.Type.IsUnsafePtr() { - Fatalf("need unsafe.Pointer for OSLICEHEADER") + base.Fatalf("need unsafe.Pointer for OSLICEHEADER") } if x := n.List.Len(); x != 2 { - Fatalf("expected 2 params (len, cap) for OSLICEHEADER, got %d", x) + base.Fatalf("expected 2 params (len, cap) for OSLICEHEADER, got %d", x) } n.Left = typecheck(n.Left, ctxExpr) @@ -1150,22 +1151,22 @@ func typecheck1(n *Node, top int) (res *Node) { c = defaultlit(c, types.Types[TINT]) if Isconst(l, constant.Int) && l.Int64Val() < 0 { - Fatalf("len for OSLICEHEADER must be non-negative") + base.Fatalf("len for OSLICEHEADER must be non-negative") } if Isconst(c, constant.Int) && c.Int64Val() < 0 { - Fatalf("cap for OSLICEHEADER must be non-negative") + base.Fatalf("cap for OSLICEHEADER must be non-negative") } if Isconst(l, constant.Int) && Isconst(c, constant.Int) && constant.Compare(l.Val(), token.GTR, c.Val()) { - Fatalf("len larger than cap for OSLICEHEADER") + base.Fatalf("len larger than cap for OSLICEHEADER") } n.List.SetFirst(l) n.List.SetSecond(c) case OMAKESLICECOPY: - // Errors here are Fatalf instead of yyerror because only the compiler + // Errors here are Fatalf instead of Errorf because only the compiler // can construct an OMAKESLICECOPY node. // Components used in OMAKESCLICECOPY that are supplied by parsed source code // have already been typechecked in OMAKE and OCOPY earlier. @@ -1174,19 +1175,19 @@ func typecheck1(n *Node, top int) (res *Node) { t := n.Type if t == nil { - Fatalf("no type specified for OMAKESLICECOPY") + base.Fatalf("no type specified for OMAKESLICECOPY") } if !t.IsSlice() { - Fatalf("invalid type %v for OMAKESLICECOPY", n.Type) + base.Fatalf("invalid type %v for OMAKESLICECOPY", n.Type) } if n.Left == nil { - Fatalf("missing len argument for OMAKESLICECOPY") + base.Fatalf("missing len argument for OMAKESLICECOPY") } if n.Right == nil { - Fatalf("missing slice argument to copy for OMAKESLICECOPY") + base.Fatalf("missing slice argument to copy for OMAKESLICECOPY") } n.Left = typecheck(n.Left, ctxExpr) @@ -1195,15 +1196,15 @@ func typecheck1(n *Node, top int) (res *Node) { n.Left = defaultlit(n.Left, types.Types[TINT]) if !n.Left.Type.IsInteger() && n.Type.Etype != TIDEAL { - yyerror("non-integer len argument in OMAKESLICECOPY") + base.Errorf("non-integer len argument in OMAKESLICECOPY") } if Isconst(n.Left, constant.Int) { if doesoverflow(n.Left.Val(), types.Types[TINT]) { - Fatalf("len for OMAKESLICECOPY too large") + base.Fatalf("len for OMAKESLICECOPY too large") } if constant.Sign(n.Left.Val()) < 0 { - Fatalf("len for OMAKESLICECOPY must be non-negative") + base.Fatalf("len for OMAKESLICECOPY must be non-negative") } } @@ -1227,7 +1228,7 @@ func typecheck1(n *Node, top int) (res *Node) { } if l.Type.IsArray() { if !islvalue(n.Left) { - yyerror("invalid operation %v (slice of unaddressable value)", n) + base.Errorf("invalid operation %v (slice of unaddressable value)", n) n.Type = nil return n } @@ -1241,7 +1242,7 @@ func typecheck1(n *Node, top int) (res *Node) { var tp *types.Type if t.IsString() { if hasmax { - yyerror("invalid operation %v (3-index slice of string)", n) + base.Errorf("invalid operation %v (3-index slice of string)", n) n.Type = nil return n } @@ -1259,7 +1260,7 @@ func typecheck1(n *Node, top int) (res *Node) { } else if t.IsSlice() { n.Type = t } else { - yyerror("cannot slice %v (type %v)", l, t) + base.Errorf("cannot slice %v (type %v)", l, t) n.Type = nil return n } @@ -1293,7 +1294,7 @@ func typecheck1(n *Node, top int) (res *Node) { if l.Op == ONAME && l.SubOp() != 0 { if n.IsDDD() && l.SubOp() != OAPPEND { - yyerror("invalid use of ... with builtin %v", l) + base.Errorf("invalid use of ... with builtin %v", l) } // builtin: OLEN, OCAP, etc. @@ -1309,7 +1310,7 @@ func typecheck1(n *Node, top int) (res *Node) { if l.Op == OTYPE { if n.IsDDD() { if !l.Type.Broke() { - yyerror("invalid use of ... in type conversion to %v", l.Type) + base.Errorf("invalid use of ... in type conversion to %v", l.Type) } n.SetDiag(true) } @@ -1352,7 +1353,7 @@ func typecheck1(n *Node, top int) (res *Node) { tp := t.Recv().Type if l.Left == nil || !types.Identical(l.Left.Type, tp) { - Fatalf("method receiver") + base.Fatalf("method receiver") } default: @@ -1362,10 +1363,10 @@ func typecheck1(n *Node, top int) (res *Node) { if isBuiltinFuncName(name) && l.Name.Defn != nil { // be more specific when the function // name matches a predeclared function - yyerror("cannot call non-function %s (type %v), declared at %s", - name, t, linestr(l.Name.Defn.Pos)) + base.Errorf("cannot call non-function %s (type %v), declared at %s", + name, t, base.FmtPos(l.Name.Defn.Pos)) } else { - yyerror("cannot call non-function %s (type %v)", name, t) + base.Errorf("cannot call non-function %s (type %v)", name, t) } n.Type = nil return n @@ -1396,7 +1397,7 @@ func typecheck1(n *Node, top int) (res *Node) { // multiple return if top&(ctxMultiOK|ctxStmt) == 0 { - yyerror("multiple-value %v() in single-value context", l) + base.Errorf("multiple-value %v() in single-value context", l) break } @@ -1434,7 +1435,7 @@ func typecheck1(n *Node, top int) (res *Node) { ok = okforcap[t.Etype] } if !ok { - yyerror("invalid argument %L for %v", l, n.Op) + base.Errorf("invalid argument %L for %v", l, n.Op) n.Type = nil return n } @@ -1465,7 +1466,7 @@ func typecheck1(n *Node, top int) (res *Node) { case TCOMPLEX128: n.Type = types.Types[TFLOAT64] default: - yyerror("invalid argument %L for %v", l, n.Op) + base.Errorf("invalid argument %L for %v", l, n.Op) n.Type = nil return n } @@ -1492,7 +1493,7 @@ func typecheck1(n *Node, top int) (res *Node) { n.Right = r if !types.Identical(l.Type, r.Type) { - yyerror("invalid operation: %v (mismatched types %v and %v)", n, l.Type, r.Type) + base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type, r.Type) n.Type = nil return n } @@ -1500,7 +1501,7 @@ func typecheck1(n *Node, top int) (res *Node) { var t *types.Type switch l.Type.Etype { default: - yyerror("invalid operation: %v (arguments have type %v, expected floating-point)", n, l.Type) + base.Errorf("invalid operation: %v (arguments have type %v, expected floating-point)", n, l.Type) n.Type = nil return n @@ -1529,13 +1530,13 @@ func typecheck1(n *Node, top int) (res *Node) { return n } if !t.IsChan() { - yyerror("invalid operation: %v (non-chan type %v)", n, t) + base.Errorf("invalid operation: %v (non-chan type %v)", n, t) n.Type = nil return n } if !t.ChanDir().CanSend() { - yyerror("invalid operation: %v (cannot close receive-only channel)", n) + base.Errorf("invalid operation: %v (cannot close receive-only channel)", n) n.Type = nil return n } @@ -1547,19 +1548,19 @@ func typecheck1(n *Node, top int) (res *Node) { typecheckargs(n) args := n.List if args.Len() == 0 { - yyerror("missing arguments to delete") + base.Errorf("missing arguments to delete") n.Type = nil return n } if args.Len() == 1 { - yyerror("missing second (key) argument to delete") + base.Errorf("missing second (key) argument to delete") n.Type = nil return n } if args.Len() != 2 { - yyerror("too many arguments to delete") + base.Errorf("too many arguments to delete") n.Type = nil return n } @@ -1567,7 +1568,7 @@ func typecheck1(n *Node, top int) (res *Node) { l := args.First() r := args.Second() if l.Type != nil && !l.Type.IsMap() { - yyerror("first argument to delete must be map; have %L", l.Type) + base.Errorf("first argument to delete must be map; have %L", l.Type) n.Type = nil return n } @@ -1579,7 +1580,7 @@ func typecheck1(n *Node, top int) (res *Node) { typecheckargs(n) args := n.List if args.Len() == 0 { - yyerror("missing arguments to append") + base.Errorf("missing arguments to append") n.Type = nil return n } @@ -1593,25 +1594,25 @@ func typecheck1(n *Node, top int) (res *Node) { n.Type = t if !t.IsSlice() { if args.First().isNil() { - yyerror("first argument to append must be typed slice; have untyped nil") + base.Errorf("first argument to append must be typed slice; have untyped nil") n.Type = nil return n } - yyerror("first argument to append must be slice; have %L", t) + base.Errorf("first argument to append must be slice; have %L", t) n.Type = nil return n } if n.IsDDD() { if args.Len() == 1 { - yyerror("cannot use ... on first argument to append") + base.Errorf("cannot use ... on first argument to append") n.Type = nil return n } if args.Len() != 2 { - yyerror("too many arguments to append") + base.Errorf("too many arguments to append") n.Type = nil return n } @@ -1658,25 +1659,25 @@ func typecheck1(n *Node, top int) (res *Node) { if types.Identical(n.Left.Type.Elem(), types.Bytetype) { break } - yyerror("arguments to copy have different element types: %L and string", n.Left.Type) + base.Errorf("arguments to copy have different element types: %L and string", n.Left.Type) n.Type = nil return n } if !n.Left.Type.IsSlice() || !n.Right.Type.IsSlice() { if !n.Left.Type.IsSlice() && !n.Right.Type.IsSlice() { - yyerror("arguments to copy must be slices; have %L, %L", n.Left.Type, n.Right.Type) + base.Errorf("arguments to copy must be slices; have %L, %L", n.Left.Type, n.Right.Type) } else if !n.Left.Type.IsSlice() { - yyerror("first argument to copy should be slice; have %L", n.Left.Type) + base.Errorf("first argument to copy should be slice; have %L", n.Left.Type) } else { - yyerror("second argument to copy should be slice or string; have %L", n.Right.Type) + base.Errorf("second argument to copy should be slice or string; have %L", n.Right.Type) } n.Type = nil return n } if !types.Identical(n.Left.Type.Elem(), n.Right.Type.Elem()) { - yyerror("arguments to copy have different element types: %L and %L", n.Left.Type, n.Right.Type) + base.Errorf("arguments to copy have different element types: %L and %L", n.Left.Type, n.Right.Type) n.Type = nil return n } @@ -1695,7 +1696,7 @@ func typecheck1(n *Node, top int) (res *Node) { n.Op = op if n.Op == OXXX { if !n.Diag() && !n.Type.Broke() && !n.Left.Diag() { - yyerror("cannot convert %L to type %v%s", n.Left, n.Type, why) + base.Errorf("cannot convert %L to type %v%s", n.Left, n.Type, why) n.SetDiag(true) } n.Op = OCONV @@ -1729,7 +1730,7 @@ func typecheck1(n *Node, top int) (res *Node) { ok |= ctxExpr args := n.List.Slice() if len(args) == 0 { - yyerror("missing argument to make") + base.Errorf("missing argument to make") n.Type = nil return n } @@ -1746,13 +1747,13 @@ func typecheck1(n *Node, top int) (res *Node) { i := 1 switch t.Etype { default: - yyerror("cannot make type %v", t) + base.Errorf("cannot make type %v", t) n.Type = nil return n case TSLICE: if i >= len(args) { - yyerror("missing len argument to make(%v)", t) + base.Errorf("missing len argument to make(%v)", t) n.Type = nil return n } @@ -1776,7 +1777,7 @@ func typecheck1(n *Node, top int) (res *Node) { return n } if Isconst(l, constant.Int) && r != nil && Isconst(r, constant.Int) && constant.Compare(l.Val(), token.GTR, r.Val()) { - yyerror("len larger than cap in make(%v)", t) + base.Errorf("len larger than cap in make(%v)", t) n.Type = nil return n } @@ -1828,7 +1829,7 @@ func typecheck1(n *Node, top int) (res *Node) { } if i < len(args) { - yyerror("too many arguments to make(%v)", t) + base.Errorf("too many arguments to make(%v)", t) n.Op = OMAKE n.Type = nil return n @@ -1840,7 +1841,7 @@ func typecheck1(n *Node, top int) (res *Node) { ok |= ctxExpr args := n.List if args.Len() == 0 { - yyerror("missing argument to new") + base.Errorf("missing argument to new") n.Type = nil return n } @@ -1853,7 +1854,7 @@ func typecheck1(n *Node, top int) (res *Node) { return n } if args.Len() > 1 { - yyerror("too many arguments to new(%v)", t) + base.Errorf("too many arguments to new(%v)", t) n.Type = nil return n } @@ -1890,7 +1891,7 @@ func typecheck1(n *Node, top int) (res *Node) { case ORECOVER: ok |= ctxExpr | ctxStmt if n.List.Len() != 0 { - yyerror("too many arguments to recover") + base.Errorf("too many arguments to recover") n.Type = nil return n } @@ -1913,14 +1914,14 @@ func typecheck1(n *Node, top int) (res *Node) { return n } if !t.IsInterface() { - Fatalf("OITAB of %v", t) + base.Fatalf("OITAB of %v", t) } n.Type = types.NewPtr(types.Types[TUINTPTR]) case OIDATA: // Whoever creates the OIDATA node must know a priori the concrete type at that moment, // usually by just having checked the OITAB. - Fatalf("cannot typecheck interface data %v", n) + base.Fatalf("cannot typecheck interface data %v", n) case OSPTR: ok |= ctxExpr @@ -1931,7 +1932,7 @@ func typecheck1(n *Node, top int) (res *Node) { return n } if !t.IsSlice() && !t.IsString() { - Fatalf("OSPTR of %v", t) + base.Fatalf("OSPTR of %v", t) } if t.IsString() { n.Type = types.NewPtr(types.Types[TUINT8]) @@ -2008,7 +2009,7 @@ func typecheck1(n *Node, top int) (res *Node) { if n.Left != nil { t := n.Left.Type if t != nil && !t.IsBoolean() { - yyerror("non-bool %L used as for condition", n.Left) + base.Errorf("non-bool %L used as for condition", n.Left) } } n.Right = typecheck(n.Right, ctxStmt) @@ -2026,7 +2027,7 @@ func typecheck1(n *Node, top int) (res *Node) { if n.Left != nil { t := n.Left.Type if t != nil && !t.IsBoolean() { - yyerror("non-bool %L used as if condition", n.Left) + base.Errorf("non-bool %L used as if condition", n.Left) } } typecheckslice(n.Nbody.Slice(), ctxStmt) @@ -2036,7 +2037,7 @@ func typecheck1(n *Node, top int) (res *Node) { ok |= ctxStmt typecheckargs(n) if Curfn == nil { - yyerror("return outside function") + base.Errorf("return outside function") n.Type = nil return n } @@ -2062,7 +2063,7 @@ func typecheck1(n *Node, top int) (res *Node) { typecheckrange(n) case OTYPESW: - yyerror("use of .(type) outside type switch") + base.Errorf("use of .(type) outside type switch") n.Type = nil return n @@ -2095,28 +2096,28 @@ func typecheck1(n *Node, top int) (res *Node) { n = evalConst(n) if n.Op == OTYPE && top&ctxType == 0 { if !n.Type.Broke() { - yyerror("type %v is not an expression", n.Type) + base.Errorf("type %v is not an expression", n.Type) } n.Type = nil return n } if top&(ctxExpr|ctxType) == ctxType && n.Op != OTYPE { - yyerror("%v is not a type", n) + base.Errorf("%v is not a type", n) n.Type = nil return n } // TODO(rsc): simplify if (top&(ctxCallee|ctxExpr|ctxType) != 0) && top&ctxStmt == 0 && ok&(ctxExpr|ctxType|ctxCallee) == 0 { - yyerror("%v used as value", n) + base.Errorf("%v used as value", n) n.Type = nil return n } if (top&ctxStmt != 0) && top&(ctxCallee|ctxExpr|ctxType) == 0 && ok&ctxStmt == 0 { if !n.Diag() { - yyerror("%v evaluated but not used", n) + base.Errorf("%v evaluated but not used", n) n.SetDiag(true) } @@ -2178,23 +2179,23 @@ func checksliceindex(l *Node, r *Node, tp *types.Type) bool { return false } if !t.IsInteger() { - yyerror("invalid slice index %v (type %v)", r, t) + base.Errorf("invalid slice index %v (type %v)", r, t) return false } if r.Op == OLITERAL { x := r.Val() if constant.Sign(x) < 0 { - yyerror("invalid slice index %v (index must be non-negative)", r) + base.Errorf("invalid slice index %v (index must be non-negative)", r) return false } else if tp != nil && tp.NumElem() >= 0 && constant.Compare(x, token.GTR, constant.MakeInt64(tp.NumElem())) { - yyerror("invalid slice index %v (out of bounds for %d-element array)", r, tp.NumElem()) + base.Errorf("invalid slice index %v (out of bounds for %d-element array)", r, tp.NumElem()) return false } else if Isconst(l, constant.String) && constant.Compare(x, token.GTR, constant.MakeInt64(int64(len(l.StringVal())))) { - yyerror("invalid slice index %v (out of bounds for %d-byte string)", r, len(l.StringVal())) + base.Errorf("invalid slice index %v (out of bounds for %d-byte string)", r, len(l.StringVal())) return false } else if doesoverflow(x, types.Types[TINT]) { - yyerror("invalid slice index %v (index too large)", r) + base.Errorf("invalid slice index %v (index too large)", r) return false } } @@ -2204,7 +2205,7 @@ func checksliceindex(l *Node, r *Node, tp *types.Type) bool { func checksliceconst(lo *Node, hi *Node) bool { if lo != nil && hi != nil && lo.Op == OLITERAL && hi.Op == OLITERAL && constant.Compare(lo.Val(), token.GTR, hi.Val()) { - yyerror("invalid slice index: %v > %v", lo, hi) + base.Errorf("invalid slice index: %v > %v", lo, hi) return false } @@ -2246,7 +2247,7 @@ func checkdefergo(n *Node) { if n.Left.Orig != nil && n.Left.Orig.Op == OCONV { break } - yyerrorl(n.Pos, "%s discards result of %v", what, n.Left) + base.ErrorfAt(n.Pos, "%s discards result of %v", what, n.Left) return } @@ -2260,7 +2261,7 @@ func checkdefergo(n *Node) { // The syntax made sure it was a call, so this must be // a conversion. n.SetDiag(true) - yyerrorl(n.Pos, "%s requires function call, not conversion", what) + base.ErrorfAt(n.Pos, "%s requires function call, not conversion", what) } } @@ -2291,13 +2292,13 @@ func onearg(n *Node, f string, args ...interface{}) bool { } if n.List.Len() == 0 { p := fmt.Sprintf(f, args...) - yyerror("missing argument to %s: %v", p, n) + base.Errorf("missing argument to %s: %v", p, n) return false } if n.List.Len() > 1 { p := fmt.Sprintf(f, args...) - yyerror("too many arguments to %s: %v", p, n) + base.Errorf("too many arguments to %s: %v", p, n) n.Left = n.List.First() n.List.Set(nil) return false @@ -2314,9 +2315,9 @@ func twoarg(n *Node) bool { } if n.List.Len() != 2 { if n.List.Len() < 2 { - yyerror("not enough arguments in call to %v", n) + base.Errorf("not enough arguments in call to %v", n) } else { - yyerror("too many arguments in call to %v", n) + base.Errorf("too many arguments in call to %v", n) } return false } @@ -2340,11 +2341,11 @@ func lookdot1(errnode *Node, s *types.Sym, t *types.Type, fs *types.Fields, dost } if r != nil { if errnode != nil { - yyerror("ambiguous selector %v", errnode) + base.Errorf("ambiguous selector %v", errnode) } else if t.IsPtr() { - yyerror("ambiguous selector (%v).%v", t, s) + base.Errorf("ambiguous selector (%v).%v", t, s) } else { - yyerror("ambiguous selector %v.%v", t, s) + base.Errorf("ambiguous selector %v.%v", t, s) } break } @@ -2358,7 +2359,7 @@ func lookdot1(errnode *Node, s *types.Sym, t *types.Type, fs *types.Fields, dost // typecheckMethodExpr checks selector expressions (ODOT) where the // base expression is a type expression (OTYPE). func typecheckMethodExpr(n *Node) (res *Node) { - if enableTrace && Flag.LowerT { + if enableTrace && base.Flag.LowerT { defer tracePrint("typecheckMethodExpr", n)(&res) } @@ -2371,7 +2372,7 @@ func typecheckMethodExpr(n *Node) (res *Node) { } else { mt := methtype(t) if mt == nil { - yyerror("%v undefined (type %v has no method %v)", n, t, n.Sym) + base.Errorf("%v undefined (type %v has no method %v)", n, t, n.Sym) n.Type = nil return n } @@ -2394,18 +2395,18 @@ func typecheckMethodExpr(n *Node) (res *Node) { m := lookdot1(n, s, t, ms, 0) if m == nil { if lookdot1(n, s, t, ms, 1) != nil { - yyerror("%v undefined (cannot refer to unexported method %v)", n, s) + base.Errorf("%v undefined (cannot refer to unexported method %v)", n, s) } else if _, ambig := dotpath(s, t, nil, false); ambig { - yyerror("%v undefined (ambiguous selector)", n) // method or field + base.Errorf("%v undefined (ambiguous selector)", n) // method or field } else { - yyerror("%v undefined (type %v has no method %v)", n, t, s) + base.Errorf("%v undefined (type %v has no method %v)", n, t, s) } n.Type = nil return n } if !isMethodApplicable(t, m) { - yyerror("invalid method expression %v (needs pointer receiver: (*%v).%S)", n, t, s) + base.Errorf("invalid method expression %v (needs pointer receiver: (*%v).%S)", n, t, s) n.Type = nil return n } @@ -2423,7 +2424,7 @@ func typecheckMethodExpr(n *Node) (res *Node) { // methodSym already marked n.Sym as a function. // Issue 25065. Make sure that we emit the symbol for a local method. - if Ctxt.Flag_dynlink && !inimport && (t.Sym == nil || t.Sym.Pkg == localpkg) { + if base.Ctxt.Flag_dynlink && !inimport && (t.Sym == nil || t.Sym.Pkg == localpkg) { makefuncsym(n.Sym) } @@ -2468,10 +2469,10 @@ func lookdot(n *Node, t *types.Type, dostrcmp int) *types.Field { return f1 } if f2 != nil { - yyerror("%v is both field and method", n.Sym) + base.Errorf("%v is both field and method", n.Sym) } if f1.Offset == BADWIDTH { - Fatalf("lookdot badwidth %v %p", f1, f1) + base.Fatalf("lookdot badwidth %v %p", f1, f1) } n.Xoffset = f1.Offset n.Type = f1.Type @@ -2509,7 +2510,7 @@ func lookdot(n *Node, t *types.Type, dostrcmp int) *types.Field { n.Left.SetImplicit(true) n.Left = typecheck(n.Left, ctxType|ctxExpr) } else if tt.IsPtr() && tt.Elem().IsPtr() && types.Identical(derefall(tt), derefall(rcvr)) { - yyerror("calling method %v with receiver %L requires explicit dereference", n.Sym, n.Left) + base.Errorf("calling method %v with receiver %L requires explicit dereference", n.Sym, n.Left) for tt.IsPtr() { // Stop one level early for method with pointer receiver. if rcvr.IsPtr() && !tt.Elem().IsPtr() { @@ -2521,7 +2522,7 @@ func lookdot(n *Node, t *types.Type, dostrcmp int) *types.Field { tt = tt.Elem() } } else { - Fatalf("method mismatch: %v for %v", rcvr, tt) + base.Fatalf("method mismatch: %v for %v", rcvr, tt) } } @@ -2574,8 +2575,8 @@ func typecheckaste(op Op, call *Node, isddd bool, tstruct *types.Type, nl Nodes, var t *types.Type var i int - lno := lineno - defer func() { lineno = lno }() + lno := base.Pos + defer func() { base.Pos = lno }() if tstruct.Broke() { return @@ -2656,9 +2657,9 @@ func typecheckaste(op Op, call *Node, isddd bool, tstruct *types.Type, nl Nodes, } if isddd { if call != nil { - yyerror("invalid use of ... in call to %v", call) + base.Errorf("invalid use of ... in call to %v", call) } else { - yyerror("invalid use of ... in %v", op) + base.Errorf("invalid use of ... in %v", op) } } return @@ -2671,12 +2672,12 @@ notenough: // Method expressions have the form T.M, and the compiler has // rewritten those to ONAME nodes but left T in Left. if call.Op == OMETHEXPR { - yyerror("not enough arguments in call to method expression %v%s", call, details) + base.Errorf("not enough arguments in call to method expression %v%s", call, details) } else { - yyerror("not enough arguments in call to %v%s", call, details) + base.Errorf("not enough arguments in call to %v%s", call, details) } } else { - yyerror("not enough arguments to %v%s", op, details) + base.Errorf("not enough arguments to %v%s", op, details) } if n != nil { n.SetDiag(true) @@ -2687,9 +2688,9 @@ notenough: toomany: details := errorDetails(nl, tstruct, isddd) if call != nil { - yyerror("too many arguments in call to %v%s", call, details) + base.Errorf("too many arguments in call to %v%s", call, details) } else { - yyerror("too many arguments to %v%s", op, details) + base.Errorf("too many arguments to %v%s", op, details) } } @@ -2729,7 +2730,7 @@ func sigrepr(t *types.Type, isddd bool) string { // Turn []T... argument to ...T for clearer error message. if isddd { if !t.IsSlice() { - Fatalf("bad type for ... argument: %v", t) + base.Fatalf("bad type for ... argument: %v", t) } return "..." + t.Elem().String() } @@ -2754,7 +2755,7 @@ func (nl Nodes) sigerr(isddd bool) string { // type check composite func fielddup(name string, hash map[string]bool) { if hash[name] { - yyerror("duplicate field name in struct literal: %s", name) + base.Errorf("duplicate field name in struct literal: %s", name) return } hash[name] = true @@ -2796,17 +2797,17 @@ func pushtype(n *Node, t *types.Type) *Node { // The result of typecheckcomplit MUST be assigned back to n, e.g. // n.Left = typecheckcomplit(n.Left) func typecheckcomplit(n *Node) (res *Node) { - if enableTrace && Flag.LowerT { + if enableTrace && base.Flag.LowerT { defer tracePrint("typecheckcomplit", n)(&res) } - lno := lineno + lno := base.Pos defer func() { - lineno = lno + base.Pos = lno }() if n.Right == nil { - yyerrorl(n.Pos, "missing type in composite literal") + base.ErrorfAt(n.Pos, "missing type in composite literal") n.Type = nil return n } @@ -2843,7 +2844,7 @@ func typecheckcomplit(n *Node) (res *Node) { switch t.Etype { default: - yyerror("invalid composite literal type %v", t) + base.Errorf("invalid composite literal type %v", t) n.Type = nil case TARRAY: @@ -2862,7 +2863,7 @@ func typecheckcomplit(n *Node) (res *Node) { setlineno(l) if l.Op != OKEY { n.List.SetIndex(i3, typecheck(l, ctxExpr)) - yyerror("missing key in map literal") + base.Errorf("missing key in map literal") continue } @@ -2870,7 +2871,7 @@ func typecheckcomplit(n *Node) (res *Node) { r = pushtype(r, t.Key()) r = typecheck(r, ctxExpr) l.Left = assignconv(r, t.Key(), "map key") - cs.add(lineno, l.Left, "key", "map literal") + cs.add(base.Pos, l.Left, "key", "map literal") r = l.Right r = pushtype(r, t.Elem()) @@ -2895,7 +2896,7 @@ func typecheckcomplit(n *Node) (res *Node) { ls[i] = n1 if i >= t.NumFields() { if !errored { - yyerror("too many values in %v", n) + base.Errorf("too many values in %v", n) errored = true } continue @@ -2904,7 +2905,7 @@ func typecheckcomplit(n *Node) (res *Node) { f := t.Field(i) s := f.Sym if s != nil && !types.IsExported(s.Name) && s.Pkg != localpkg { - yyerror("implicit assignment of unexported field '%s' in %v literal", s.Name, t) + base.Errorf("implicit assignment of unexported field '%s' in %v literal", s.Name, t) } // No pushtype allowed here. Must name fields for that. n1 = assignconv(n1, f.Type, "field value") @@ -2913,7 +2914,7 @@ func typecheckcomplit(n *Node) (res *Node) { ls[i] = n1 } if len(ls) < t.NumFields() { - yyerror("too few values in %v", n) + base.Errorf("too few values in %v", n) } } else { hash := make(map[string]bool) @@ -2935,7 +2936,7 @@ func typecheckcomplit(n *Node) (res *Node) { // so s will be non-nil, but an OXDOT // is never a valid struct literal key. if key.Sym == nil || key.Op == OXDOT || key.Sym.IsBlank() { - yyerror("invalid field name %v in struct initializer", key) + base.Errorf("invalid field name %v in struct initializer", key) l.Left = typecheck(l.Left, ctxExpr) continue } @@ -2955,7 +2956,7 @@ func typecheckcomplit(n *Node) (res *Node) { if l.Op != OSTRUCTKEY { if !errored { - yyerror("mixture of field:value and value initializers") + base.Errorf("mixture of field:value and value initializers") errored = true } ls[i] = typecheck(ls[i], ctxExpr) @@ -2966,18 +2967,18 @@ func typecheckcomplit(n *Node) (res *Node) { if f == nil { if ci := lookdot1(nil, l.Sym, t, t.Fields(), 2); ci != nil { // Case-insensitive lookup. if visible(ci.Sym) { - yyerror("unknown field '%v' in struct literal of type %v (but does have %v)", l.Sym, t, ci.Sym) + base.Errorf("unknown field '%v' in struct literal of type %v (but does have %v)", l.Sym, t, ci.Sym) } else if nonexported(l.Sym) && l.Sym.Name == ci.Sym.Name { // Ensure exactness before the suggestion. - yyerror("cannot refer to unexported field '%v' in struct literal of type %v", l.Sym, t) + base.Errorf("cannot refer to unexported field '%v' in struct literal of type %v", l.Sym, t) } else { - yyerror("unknown field '%v' in struct literal of type %v", l.Sym, t) + base.Errorf("unknown field '%v' in struct literal of type %v", l.Sym, t) } continue } var f *types.Field p, _ := dotpath(l.Sym, t, &f, true) if p == nil || f.IsMethod() { - yyerror("unknown field '%v' in struct literal of type %v", l.Sym, t) + base.Errorf("unknown field '%v' in struct literal of type %v", l.Sym, t) continue } // dotpath returns the parent embedded types in reverse order. @@ -2986,7 +2987,7 @@ func typecheckcomplit(n *Node) (res *Node) { ep = append(ep, p[ei].field.Sym.Name) } ep = append(ep, l.Sym.Name) - yyerror("cannot use promoted field %v in struct literal of type %v", strings.Join(ep, "."), t) + base.Errorf("cannot use promoted field %v in struct literal of type %v", strings.Join(ep, "."), t) continue } fielddup(f.Sym.Name, hash) @@ -3028,9 +3029,9 @@ func typecheckarraylit(elemType *types.Type, bound int64, elts []*Node, ctx stri if key < 0 { if !elt.Left.Diag() { if key == -2 { - yyerror("index too large") + base.Errorf("index too large") } else { - yyerror("index must be non-negative integer constant") + base.Errorf("index must be non-negative integer constant") } elt.Left.SetDiag(true) } @@ -3052,14 +3053,14 @@ func typecheckarraylit(elemType *types.Type, bound int64, elts []*Node, ctx stri if key >= 0 { if indices != nil { if indices[key] { - yyerror("duplicate index in %s: %d", ctx, key) + base.Errorf("duplicate index in %s: %d", ctx, key) } else { indices[key] = true } } if bound >= 0 && key >= bound { - yyerror("array index %d out of bounds [0:%d]", key, bound) + base.Errorf("array index %d out of bounds [0:%d]", key, bound) bound = -1 } } @@ -3112,7 +3113,7 @@ func islvalue(n *Node) bool { func checklvalue(n *Node, verb string) { if !islvalue(n) { - yyerror("cannot %s %v", verb, n) + base.Errorf("cannot %s %v", verb, n) } } @@ -3143,13 +3144,13 @@ func checkassign(stmt *Node, n *Node) { switch { case n.Op == ODOT && n.Left.Op == OINDEXMAP: - yyerror("cannot assign to struct field %v in map", n) + base.Errorf("cannot assign to struct field %v in map", n) case (n.Op == OINDEX && n.Left.Type.IsString()) || n.Op == OSLICESTR: - yyerror("cannot assign to %v (strings are immutable)", n) + base.Errorf("cannot assign to %v (strings are immutable)", n) case n.Op == OLITERAL && n.Sym != nil && n.isGoConst(): - yyerror("cannot assign to %v (declared const)", n) + base.Errorf("cannot assign to %v (declared const)", n) default: - yyerror("cannot assign to %v", n) + base.Errorf("cannot assign to %v", n) } n.Type = nil } @@ -3214,7 +3215,7 @@ func samesafeexpr(l *Node, r *Node) bool { // if this assignment is the definition of a var on the left side, // fill in the var's type. func typecheckas(n *Node) { - if enableTrace && Flag.LowerT { + if enableTrace && base.Flag.LowerT { defer tracePrint("typecheckas", n)(nil) } @@ -3237,7 +3238,7 @@ func typecheckas(n *Node) { checkassign(n, n.Left) if n.Right != nil && n.Right.Type != nil { if n.Right.Type.IsFuncArgStruct() { - yyerror("assignment mismatch: 1 variable but %v returns %d values", n.Right.Left, n.Right.Type.NumFields()) + base.Errorf("assignment mismatch: 1 variable but %v returns %d values", n.Right.Left, n.Right.Type.NumFields()) // Multi-value RHS isn't actually valid for OAS; nil out // to indicate failed typechecking. n.Right.Type = nil @@ -3266,13 +3267,13 @@ func typecheckas(n *Node) { func checkassignto(src *types.Type, dst *Node) { if op, why := assignop(src, dst.Type); op == OXXX { - yyerror("cannot assign %v to %L in multiple assignment%s", src, dst, why) + base.Errorf("cannot assign %v to %L in multiple assignment%s", src, dst, why) return } } func typecheckas2(n *Node) { - if enableTrace && Flag.LowerT { + if enableTrace && base.Flag.LowerT { defer tracePrint("typecheckas2", n)(nil) } @@ -3387,9 +3388,9 @@ func typecheckas2(n *Node) { mismatch: switch r.Op { default: - yyerror("assignment mismatch: %d variables but %d values", cl, cr) + base.Errorf("assignment mismatch: %d variables but %d values", cl, cr) case OCALLFUNC, OCALLMETH, OCALLINTER: - yyerror("assignment mismatch: %d variables but %v returns %d values", cl, r.Left, cr) + base.Errorf("assignment mismatch: %d variables but %v returns %d values", cl, r.Left, cr) } // second half of dance @@ -3405,7 +3406,7 @@ out: // type check function definition func typecheckfunc(n *Node) { - if enableTrace && Flag.LowerT { + if enableTrace && base.Flag.LowerT { defer tracePrint("typecheckfunc", n)(nil) } @@ -3432,7 +3433,7 @@ func typecheckfunc(n *Node) { declare(n.Func.Nname, PFUNC) } - if Ctxt.Flag_dynlink && !inimport && n.Func.Nname != nil { + if base.Ctxt.Flag_dynlink && !inimport && n.Func.Nname != nil { makefuncsym(n.Func.Nname.Sym) } } @@ -3441,7 +3442,7 @@ func typecheckfunc(n *Node) { // n.Left = stringtoruneslit(n.Left) func stringtoruneslit(n *Node) *Node { if n.Left.Op != OLITERAL || n.Left.Val().Kind() != constant.String { - Fatalf("stringtoarraylit %v", n) + base.Fatalf("stringtoarraylit %v", n) } var l []*Node @@ -3463,7 +3464,7 @@ func checkMapKeys() { for _, n := range mapqueue { k := n.Type.MapType().Key if !k.Broke() && !IsComparable(k) { - yyerrorl(n.Pos, "invalid map key type %v", k) + base.ErrorfAt(n.Pos, "invalid map key type %v", k) } } mapqueue = nil @@ -3513,13 +3514,13 @@ func setUnderlying(t, underlying *types.Type) { // Double-check use of type as embedded type. if ft.Embedlineno.IsKnown() { if t.IsPtr() || t.IsUnsafePtr() { - yyerrorl(ft.Embedlineno, "embedded type cannot be a pointer") + base.ErrorfAt(ft.Embedlineno, "embedded type cannot be a pointer") } } } func typecheckdeftype(n *Node) { - if enableTrace && Flag.LowerT { + if enableTrace && base.Flag.LowerT { defer tracePrint("typecheckdeftype", n)(nil) } @@ -3539,7 +3540,7 @@ func typecheckdeftype(n *Node) { } func typecheckdef(n *Node) { - if enableTrace && Flag.LowerT { + if enableTrace && base.Flag.LowerT { defer tracePrint("typecheckdef", n)(nil) } @@ -3551,27 +3552,27 @@ func typecheckdef(n *Node) { // Note: adderrorname looks for this string and // adds context about the outer expression - yyerrorl(lineno, "undefined: %v", n.Sym) + base.ErrorfAt(base.Pos, "undefined: %v", n.Sym) } - lineno = lno + base.Pos = lno return } if n.Walkdef() == 1 { - lineno = lno + base.Pos = lno return } typecheckdefstack = append(typecheckdefstack, n) if n.Walkdef() == 2 { - flusherrors() + base.FlushErrors() fmt.Printf("typecheckdef loop:") for i := len(typecheckdefstack) - 1; i >= 0; i-- { n := typecheckdefstack[i] fmt.Printf(" %v", n.Sym) } fmt.Printf("\n") - Fatalf("typecheckdef loop") + base.Fatalf("typecheckdef loop") } n.SetWalkdef(2) @@ -3582,7 +3583,7 @@ func typecheckdef(n *Node) { switch n.Op { default: - Fatalf("typecheckdef %v", n.Op) + base.Fatalf("typecheckdef %v", n.Op) case OLITERAL: if n.Name.Param.Ntype != nil { @@ -3599,7 +3600,7 @@ func typecheckdef(n *Node) { n.Name.Defn = nil if e == nil { Dump("typecheckdef nil defn", n) - yyerrorl(n.Pos, "xxx") + base.ErrorfAt(n.Pos, "xxx") } e = typecheck(e, ctxExpr) @@ -3609,9 +3610,9 @@ func typecheckdef(n *Node) { if !e.isGoConst() { if !e.Diag() { if e.Op == ONIL { - yyerrorl(n.Pos, "const initializer cannot be nil") + base.ErrorfAt(n.Pos, "const initializer cannot be nil") } else { - yyerrorl(n.Pos, "const initializer %v is not a constant", e) + base.ErrorfAt(n.Pos, "const initializer %v is not a constant", e) } e.SetDiag(true) } @@ -3621,12 +3622,12 @@ func typecheckdef(n *Node) { t := n.Type if t != nil { if !okforconst[t.Etype] { - yyerrorl(n.Pos, "invalid constant type %v", t) + base.ErrorfAt(n.Pos, "invalid constant type %v", t) goto ret } if !e.Type.IsUntyped() && !types.Identical(t, e.Type) { - yyerrorl(n.Pos, "cannot use %L as type %v in const initializer", e, t) + base.ErrorfAt(n.Pos, "cannot use %L as type %v in const initializer", e, t) goto ret } @@ -3655,7 +3656,7 @@ func typecheckdef(n *Node) { if n.SubOp() != 0 { // like OPRINTN break } - if Errors() > 0 { + if base.Errors() > 0 { // Can have undefined variables in x := foo // that make x have an n.name.Defn == nil. // If there are other errors anyway, don't @@ -3663,7 +3664,7 @@ func typecheckdef(n *Node) { break } - Fatalf("var without type, init: %v", n.Sym) + base.Fatalf("var without type, init: %v", n.Sym) } if n.Name.Defn.Op == ONAME { @@ -3700,9 +3701,9 @@ func typecheckdef(n *Node) { n.SetWalkdef(1) setTypeNode(n, types.New(TFORW)) n.Type.Sym = n.Sym - errorsBefore := Errors() + errorsBefore := base.Errors() typecheckdeftype(n) - if n.Type.Etype == TFORW && Errors() > errorsBefore { + if n.Type.Etype == TFORW && base.Errors() > errorsBefore { // Something went wrong during type-checking, // but it was reported. Silence future errors. n.Type.SetBroke(true) @@ -3712,23 +3713,23 @@ func typecheckdef(n *Node) { ret: if n.Op != OLITERAL && n.Type != nil && n.Type.IsUntyped() { - Fatalf("got %v for %v", n.Type, n) + base.Fatalf("got %v for %v", n.Type, n) } last := len(typecheckdefstack) - 1 if typecheckdefstack[last] != n { - Fatalf("typecheckdefstack mismatch") + base.Fatalf("typecheckdefstack mismatch") } typecheckdefstack[last] = nil typecheckdefstack = typecheckdefstack[:last] - lineno = lno + base.Pos = lno n.SetWalkdef(1) } func checkmake(t *types.Type, arg string, np **Node) bool { n := *np if !n.Type.IsInteger() && n.Type.Etype != TIDEAL { - yyerror("non-integer %s argument in make(%v) - %v", arg, t, n.Type) + base.Errorf("non-integer %s argument in make(%v) - %v", arg, t, n.Type) return false } @@ -3737,11 +3738,11 @@ func checkmake(t *types.Type, arg string, np **Node) bool { if n.Op == OLITERAL { v := toint(n.Val()) if constant.Sign(v) < 0 { - yyerror("negative %s argument in make(%v)", arg, t) + base.Errorf("negative %s argument in make(%v)", arg, t) return false } if doesoverflow(v, types.Types[TINT]) { - yyerror("%s argument too large in make(%v)", arg, t) + base.Errorf("%s argument too large in make(%v)", arg, t) return false } } @@ -3874,7 +3875,7 @@ func checkreturn(fn *Node) { if fn.Type.NumResults() != 0 && fn.Nbody.Len() != 0 { markbreaklist(fn.Nbody, nil) if !fn.Nbody.isterminating() { - yyerrorl(fn.Func.Endlineno, "missing return at end of function") + base.ErrorfAt(fn.Func.Endlineno, "missing return at end of function") } } } @@ -4047,6 +4048,6 @@ func (n *Node) MethodFunc() *types.Field { case OCALLPART: return callpartMethod(n) } - Fatalf("unexpected node: %v (%v)", n, n.Op) + base.Fatalf("unexpected node: %v (%v)", n, n.Op) panic("unreachable") } diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index 8c32f2f6d2cb4..aa0ee4075dd18 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -7,6 +7,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/compile/internal/types" "cmd/internal/src" ) @@ -98,7 +99,7 @@ func lexinit() { for _, s := range &basicTypes { etype := s.etype if int(etype) >= len(types.Types) { - Fatalf("lexinit: %s bad etype", s.name) + base.Fatalf("lexinit: %s bad etype", s.name) } s2 := builtinpkg.Lookup(s.name) t := types.Types[etype] @@ -169,7 +170,7 @@ func lexinit() { func typeinit() { if Widthptr == 0 { - Fatalf("typeinit before betypeinit") + base.Fatalf("typeinit before betypeinit") } for et := types.EType(0); et < NTYPE; et++ { diff --git a/src/cmd/compile/internal/gc/unsafe.go b/src/cmd/compile/internal/gc/unsafe.go index a3151e83bf04d..a1c1c1bf6e291 100644 --- a/src/cmd/compile/internal/gc/unsafe.go +++ b/src/cmd/compile/internal/gc/unsafe.go @@ -4,6 +4,8 @@ package gc +import "cmd/compile/internal/base" + // evalunsafe evaluates a package unsafe operation and returns the result. func evalunsafe(n *Node) int64 { switch n.Op { @@ -23,7 +25,7 @@ func evalunsafe(n *Node) int64 { case OOFFSETOF: // must be a selector. if n.Left.Op != OXDOT { - yyerror("invalid expression %v", n) + base.Errorf("invalid expression %v", n) return 0 } @@ -41,10 +43,10 @@ func evalunsafe(n *Node) int64 { case ODOT, ODOTPTR: break case OCALLPART: - yyerror("invalid expression %v: argument is a method value", n) + base.Errorf("invalid expression %v: argument is a method value", n) return 0 default: - yyerror("invalid expression %v", n) + base.Errorf("invalid expression %v", n) return 0 } @@ -57,7 +59,7 @@ func evalunsafe(n *Node) int64 { // but accessing f must not otherwise involve // indirection via embedded pointer types. if r.Left != sbase { - yyerror("invalid expression %v: selector implies indirection of embedded %v", n, r.Left) + base.Errorf("invalid expression %v: selector implies indirection of embedded %v", n, r.Left) return 0 } fallthrough @@ -65,12 +67,12 @@ func evalunsafe(n *Node) int64 { v += r.Xoffset default: Dump("unsafenmagic", n.Left) - Fatalf("impossible %#v node after dot insertion", r.Op) + base.Fatalf("impossible %#v node after dot insertion", r.Op) } } return v } - Fatalf("unexpected op %v", n.Op) + base.Fatalf("unexpected op %v", n.Op) return 0 } diff --git a/src/cmd/compile/internal/gc/util.go b/src/cmd/compile/internal/gc/util.go index d1a5993daff37..597a29a940e56 100644 --- a/src/cmd/compile/internal/gc/util.go +++ b/src/cmd/compile/internal/gc/util.go @@ -8,27 +8,14 @@ import ( "os" "runtime" "runtime/pprof" + + "cmd/compile/internal/base" ) // Line returns n's position as a string. If n has been inlined, // it uses the outermost position where n has been inlined. func (n *Node) Line() string { - return linestr(n.Pos) -} - -var atExitFuncs []func() - -func atExit(f func()) { - atExitFuncs = append(atExitFuncs, f) -} - -func Exit(code int) { - for i := len(atExitFuncs) - 1; i >= 0; i-- { - f := atExitFuncs[i] - atExitFuncs = atExitFuncs[:i] - f() - } - os.Exit(code) + return base.FmtPos(n.Pos) } var ( @@ -37,25 +24,25 @@ var ( ) func startProfile() { - if Flag.CPUProfile != "" { - f, err := os.Create(Flag.CPUProfile) + if base.Flag.CPUProfile != "" { + f, err := os.Create(base.Flag.CPUProfile) if err != nil { - Fatalf("%v", err) + base.Fatalf("%v", err) } if err := pprof.StartCPUProfile(f); err != nil { - Fatalf("%v", err) + base.Fatalf("%v", err) } - atExit(pprof.StopCPUProfile) + base.AtExit(pprof.StopCPUProfile) } - if Flag.MemProfile != "" { + if base.Flag.MemProfile != "" { if memprofilerate != 0 { runtime.MemProfileRate = int(memprofilerate) } - f, err := os.Create(Flag.MemProfile) + f, err := os.Create(base.Flag.MemProfile) if err != nil { - Fatalf("%v", err) + base.Fatalf("%v", err) } - atExit(func() { + base.AtExit(func() { // Profile all outstanding allocations. runtime.GC() // compilebench parses the memory profile to extract memstats, @@ -63,36 +50,36 @@ func startProfile() { // See golang.org/issue/18641 and runtime/pprof/pprof.go:writeHeap. const writeLegacyFormat = 1 if err := pprof.Lookup("heap").WriteTo(f, writeLegacyFormat); err != nil { - Fatalf("%v", err) + base.Fatalf("%v", err) } }) } else { // Not doing memory profiling; disable it entirely. runtime.MemProfileRate = 0 } - if Flag.BlockProfile != "" { - f, err := os.Create(Flag.BlockProfile) + if base.Flag.BlockProfile != "" { + f, err := os.Create(base.Flag.BlockProfile) if err != nil { - Fatalf("%v", err) + base.Fatalf("%v", err) } runtime.SetBlockProfileRate(1) - atExit(func() { + base.AtExit(func() { pprof.Lookup("block").WriteTo(f, 0) f.Close() }) } - if Flag.MutexProfile != "" { - f, err := os.Create(Flag.MutexProfile) + if base.Flag.MutexProfile != "" { + f, err := os.Create(base.Flag.MutexProfile) if err != nil { - Fatalf("%v", err) + base.Fatalf("%v", err) } startMutexProfiling() - atExit(func() { + base.AtExit(func() { pprof.Lookup("mutex").WriteTo(f, 0) f.Close() }) } - if Flag.TraceProfile != "" && traceHandler != nil { - traceHandler(Flag.TraceProfile) + if base.Flag.TraceProfile != "" && traceHandler != nil { + traceHandler(base.Flag.TraceProfile) } } diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index de2733909e3e8..d7cd7ddf27ca3 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -5,6 +5,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/objabi" @@ -22,14 +23,14 @@ const zeroValSize = 1024 // must match value of runtime/map.go:maxZero func walk(fn *Node) { Curfn = fn - errorsBefore := Errors() + errorsBefore := base.Errors() - if Flag.W != 0 { + if base.Flag.W != 0 { s := fmt.Sprintf("\nbefore walk %v", Curfn.Func.Nname.Sym) dumplist(s, Curfn.Nbody) } - lno := lineno + lno := base.Pos // Final typecheck for any unused variables. for i, ln := range fn.Func.Dcl { @@ -54,26 +55,26 @@ func walk(fn *Node) { if defn.Left.Name.Used() { continue } - yyerrorl(defn.Left.Pos, "%v declared but not used", ln.Sym) + base.ErrorfAt(defn.Left.Pos, "%v declared but not used", ln.Sym) defn.Left.Name.SetUsed(true) // suppress repeats } else { - yyerrorl(ln.Pos, "%v declared but not used", ln.Sym) + base.ErrorfAt(ln.Pos, "%v declared but not used", ln.Sym) } } - lineno = lno - if Errors() > errorsBefore { + base.Pos = lno + if base.Errors() > errorsBefore { return } walkstmtlist(Curfn.Nbody.Slice()) - if Flag.W != 0 { + if base.Flag.W != 0 { s := fmt.Sprintf("after walk %v", Curfn.Func.Nname.Sym) dumplist(s, Curfn.Nbody) } zeroResults() heapmoves() - if Flag.W != 0 && Curfn.Func.Enter.Len() > 0 { + if base.Flag.W != 0 && Curfn.Func.Enter.Len() > 0 { s := fmt.Sprintf("enter %v", Curfn.Func.Nname.Sym) dumplist(s, Curfn.Func.Enter) } @@ -116,9 +117,9 @@ func walkstmt(n *Node) *Node { switch n.Op { default: if n.Op == ONAME { - yyerror("%v is not a top level statement", n.Sym) + base.Errorf("%v is not a top level statement", n.Sym) } else { - yyerror("%v is not a top level statement", n.Op) + base.Errorf("%v is not a top level statement", n.Op) } Dump("nottop", n) @@ -144,7 +145,7 @@ func walkstmt(n *Node) *Node { ORECOVER, OGETG: if n.Typecheck() == 0 { - Fatalf("missing typecheck: %+v", n) + base.Fatalf("missing typecheck: %+v", n) } wascopy := n.Op == OCOPY init := n.Ninit @@ -159,7 +160,7 @@ func walkstmt(n *Node) *Node { // the value received. case ORECV: if n.Typecheck() == 0 { - Fatalf("missing typecheck: %+v", n) + base.Fatalf("missing typecheck: %+v", n) } init := n.Ninit n.Ninit.Set(nil) @@ -186,8 +187,8 @@ func walkstmt(n *Node) *Node { case ODCL: v := n.Left if v.Class() == PAUTOHEAP { - if Flag.CompilingRuntime { - yyerror("%v escapes to heap, not allowed in runtime", v) + if base.Flag.CompilingRuntime { + base.Errorf("%v escapes to heap, not allowed in runtime", v) } if prealloc[v] == nil { prealloc[v] = callnew(v.Type) @@ -202,7 +203,7 @@ func walkstmt(n *Node) *Node { walkstmtlist(n.List.Slice()) case OCASE: - yyerror("case statement out of place") + base.Errorf("case statement out of place") case ODEFER: Curfn.Func.SetHasDefer(true) @@ -291,7 +292,7 @@ func walkstmt(n *Node) *Node { if got, want := n.List.Len(), len(rl); got != want { // order should have rewritten multi-value function calls // with explicit OAS2FUNC nodes. - Fatalf("expected %v return arguments, have %v", want, got) + base.Fatalf("expected %v return arguments, have %v", want, got) } // move function calls out, to make reorder3's job easier. @@ -334,7 +335,7 @@ func walkstmt(n *Node) *Node { } if n.Op == ONAME { - Fatalf("walkstmt ended up with name: %+v", n) + base.Fatalf("walkstmt ended up with name: %+v", n) } return n } @@ -405,7 +406,7 @@ func convFuncName(from, to *types.Type) (fnname string, needsaddr bool) { return "convT2I", true } } - Fatalf("unknown conv func %c2%c", from.Tie(), to.Tie()) + base.Fatalf("unknown conv func %c2%c", from.Tie(), to.Tie()) panic("unreachable") } @@ -429,7 +430,7 @@ func walkexpr(n *Node, init *Nodes) *Node { // not okay to use n->ninit when walking n, // because we might replace n with some other node // and would lose the init list. - Fatalf("walkexpr init == &n->ninit") + base.Fatalf("walkexpr init == &n->ninit") } if n.Ninit.Len() != 0 { @@ -439,16 +440,16 @@ func walkexpr(n *Node, init *Nodes) *Node { lno := setlineno(n) - if Flag.LowerW > 1 { + if base.Flag.LowerW > 1 { Dump("before walk expr", n) } if n.Typecheck() != 1 { - Fatalf("missed typecheck: %+v", n) + base.Fatalf("missed typecheck: %+v", n) } if n.Type.IsUntyped() { - Fatalf("expression has untyped type: %+v", n) + base.Fatalf("expression has untyped type: %+v", n) } if n.Op == ONAME && n.Class() == PAUTOHEAP { @@ -463,7 +464,7 @@ opswitch: switch n.Op { default: Dump("walk", n) - Fatalf("walkexpr: switch 1 unknown op %+S", n) + base.Fatalf("walkexpr: switch 1 unknown op %+S", n) case ONONAME, OEMPTY, OGETG, ONEWOBJ, OMETHEXPR: @@ -587,7 +588,7 @@ opswitch: // the mapassign call. mapAppend := n.Left.Op == OINDEXMAP && n.Right.Op == OAPPEND if mapAppend && !samesafeexpr(n.Left, n.Right.List.First()) { - Fatalf("not same expressions: %v != %v", n.Left, n.Right.List.First()) + base.Fatalf("not same expressions: %v != %v", n.Left, n.Right.List.First()) } n.Left = walkexpr(n.Left, init) @@ -638,7 +639,7 @@ opswitch: // x = append(...) r := n.Right if r.Type.Elem().NotInHeap() { - yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", r.Type.Elem()) + base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", r.Type.Elem()) } switch { case isAppendOfMake(r): @@ -1046,25 +1047,25 @@ opswitch: } if t.IsArray() { n.SetBounded(bounded(r, t.NumElem())) - if Flag.LowerM != 0 && n.Bounded() && !Isconst(n.Right, constant.Int) { - Warn("index bounds check elided") + if base.Flag.LowerM != 0 && n.Bounded() && !Isconst(n.Right, constant.Int) { + base.Warn("index bounds check elided") } if smallintconst(n.Right) && !n.Bounded() { - yyerror("index out of bounds") + base.Errorf("index out of bounds") } } else if Isconst(n.Left, constant.String) { n.SetBounded(bounded(r, int64(len(n.Left.StringVal())))) - if Flag.LowerM != 0 && n.Bounded() && !Isconst(n.Right, constant.Int) { - Warn("index bounds check elided") + if base.Flag.LowerM != 0 && n.Bounded() && !Isconst(n.Right, constant.Int) { + base.Warn("index bounds check elided") } if smallintconst(n.Right) && !n.Bounded() { - yyerror("index out of bounds") + base.Errorf("index out of bounds") } } if Isconst(n.Right, constant.Int) { if v := n.Right.Val(); constant.Sign(v) < 0 || doesoverflow(v, types.Types[TINT]) { - yyerror("index out of bounds") + base.Errorf("index out of bounds") } } @@ -1107,7 +1108,7 @@ opswitch: n.SetTypecheck(1) case ORECV: - Fatalf("walkexpr ORECV") // should see inside OAS only + base.Fatalf("walkexpr ORECV") // should see inside OAS only case OSLICEHEADER: n.Left = walkexpr(n.Left, init) @@ -1149,11 +1150,11 @@ opswitch: case ONEW: if n.Type.Elem().NotInHeap() { - yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type.Elem()) + base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type.Elem()) } if n.Esc == EscNone { if n.Type.Elem().Width >= maxImplicitStackVarSize { - Fatalf("large ONEW with EscNone: %v", n) + base.Fatalf("large ONEW with EscNone: %v", n) } r := temp(n.Type.Elem()) r = nod(OAS, r, nil) // zero temp @@ -1171,10 +1172,10 @@ opswitch: case OAPPEND: // order should make sure we only see OAS(node, OAPPEND), which we handle above. - Fatalf("append outside assignment") + base.Fatalf("append outside assignment") case OCOPY: - n = copyany(n, init, instrumenting && !Flag.CompilingRuntime) + n = copyany(n, init, instrumenting && !base.Flag.CompilingRuntime) // cannot use chanfn - closechan takes any, not chan any case OCLOSE: @@ -1320,17 +1321,17 @@ opswitch: } t := n.Type if t.Elem().NotInHeap() { - yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem()) + base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem()) } if n.Esc == EscNone { if why := heapAllocReason(n); why != "" { - Fatalf("%v has EscNone, but %v", n, why) + base.Fatalf("%v has EscNone, but %v", n, why) } // var arr [r]T // n = arr[:l] i := indexconst(r) if i < 0 { - Fatalf("walkexpr: invalid index %v", r) + base.Fatalf("walkexpr: invalid index %v", r) } // cap is constrained to [0,2^31) or [0,2^63) depending on whether @@ -1392,12 +1393,12 @@ opswitch: case OMAKESLICECOPY: if n.Esc == EscNone { - Fatalf("OMAKESLICECOPY with EscNone: %v", n) + base.Fatalf("OMAKESLICECOPY with EscNone: %v", n) } t := n.Type if t.Elem().NotInHeap() { - yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem()) + base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem()) } length := conv(n.Left, types.Types[TINT]) @@ -1583,7 +1584,7 @@ opswitch: t := n.Type n = evalConst(n) if n.Type != t { - Fatalf("evconst changed Type: %v had type %v, now %v", n, t, n.Type) + base.Fatalf("evconst changed Type: %v had type %v, now %v", n, t, n.Type) } if n.Op == OLITERAL { n = typecheck(n, ctxExpr) @@ -1596,11 +1597,11 @@ opswitch: updateHasCall(n) - if Flag.LowerW != 0 && n != nil { + if base.Flag.LowerW != 0 && n != nil { Dump("after walk expr", n) } - lineno = lno + base.Pos = lno return n } @@ -1685,8 +1686,8 @@ func reduceSlice(n *Node) *Node { n.SetSliceBounds(low, high, max) if (n.Op == OSLICE || n.Op == OSLICESTR) && low == nil && high == nil { // Reduce x[:] to x. - if Debug.Slice > 0 { - Warn("slice: omit slice operation") + if base.Debug.Slice > 0 { + base.Warn("slice: omit slice operation") } return n.Left } @@ -1736,7 +1737,7 @@ func ascompatee(op Op, nl, nr []*Node, init *Nodes) []*Node { var nln, nrn Nodes nln.Set(nl) nrn.Set(nr) - Fatalf("error in shape across %+v %v %+v / %d %d [%s]", nln, op, nrn, len(nl), len(nr), Curfn.funcname()) + base.Fatalf("error in shape across %+v %v %+v / %d %d [%s]", nln, op, nrn, len(nl), len(nr), Curfn.funcname()) } return nn } @@ -1758,7 +1759,7 @@ func fncall(l *Node, rt *types.Type) bool { // expr-list = func() func ascompatet(nl Nodes, nr *types.Type) []*Node { if nl.Len() != nr.NumFields() { - Fatalf("ascompatet: assignment count mismatch: %d = %d", nl.Len(), nr.NumFields()) + base.Fatalf("ascompatet: assignment count mismatch: %d = %d", nl.Len(), nr.NumFields()) } var nn, mm Nodes @@ -1780,7 +1781,7 @@ func ascompatet(nl Nodes, nr *types.Type) []*Node { } res := nod(ORESULT, nil, nil) - res.Xoffset = Ctxt.FixedFrameSize() + r.Offset + res.Xoffset = base.Ctxt.FixedFrameSize() + r.Offset res.Type = r.Type res.SetTypecheck(1) @@ -1789,7 +1790,7 @@ func ascompatet(nl Nodes, nr *types.Type) []*Node { updateHasCall(a) if a.HasCall() { Dump("ascompatet ucount", a) - Fatalf("ascompatet: too many function calls evaluating parameters") + base.Fatalf("ascompatet: too many function calls evaluating parameters") } nn.Append(a) @@ -1811,7 +1812,7 @@ func mkdotargslice(typ *types.Type, args []*Node) *Node { n = typecheck(n, ctxExpr) if n.Type == nil { - Fatalf("mkdotargslice: typecheck failed") + base.Fatalf("mkdotargslice: typecheck failed") } return n } @@ -2069,7 +2070,7 @@ func isReflectHeaderDataField(l *Node) bool { func convas(n *Node, init *Nodes) *Node { if n.Op != OAS { - Fatalf("convas: not OAS %v", n.Op) + base.Fatalf("convas: not OAS %v", n.Op) } defer updateHasCall(n) @@ -2134,7 +2135,7 @@ func reorder3(all []*Node) []*Node { switch l.Op { default: - Fatalf("reorder3 unexpected lvalue %#v", l.Op) + base.Fatalf("reorder3 unexpected lvalue %#v", l.Op) case ONAME: break @@ -2182,7 +2183,7 @@ func outervalue(n *Node) *Node { for { switch n.Op { case OXDOT: - Fatalf("OXDOT in walk") + base.Fatalf("OXDOT in walk") case ODOT, OPAREN, OCONVNOP: n = n.Left continue @@ -2230,7 +2231,7 @@ func aliased(r *Node, all []*Node) bool { switch l.Class() { default: - Fatalf("unexpected class: %v, %v", l, l.Class()) + base.Fatalf("unexpected class: %v, %v", l, l.Class()) case PAUTOHEAP, PEXTERN: memwrite = true @@ -2317,7 +2318,7 @@ func varexpr(n *Node) bool { case ODOT: // but not ODOTPTR // Should have been handled in aliased. - Fatalf("varexpr unexpected ODOT") + base.Fatalf("varexpr unexpected ODOT") } // Be conservative. @@ -2468,25 +2469,25 @@ func returnsfromheap(params *types.Type) []*Node { // between the stack and the heap. The generated code is added to Curfn's // Enter and Exit lists. func heapmoves() { - lno := lineno - lineno = Curfn.Pos + lno := base.Pos + base.Pos = Curfn.Pos nn := paramstoheap(Curfn.Type.Recvs()) nn = append(nn, paramstoheap(Curfn.Type.Params())...) nn = append(nn, paramstoheap(Curfn.Type.Results())...) Curfn.Func.Enter.Append(nn...) - lineno = Curfn.Func.Endlineno + base.Pos = Curfn.Func.Endlineno Curfn.Func.Exit.Append(returnsfromheap(Curfn.Type.Results())...) - lineno = lno + base.Pos = lno } func vmkcall(fn *Node, t *types.Type, init *Nodes, va []*Node) *Node { if fn.Type == nil || fn.Type.Etype != TFUNC { - Fatalf("mkcall %v %v", fn, fn.Type) + base.Fatalf("mkcall %v %v", fn, fn.Type) } n := fn.Type.NumParams() if n != len(va) { - Fatalf("vmkcall %v needs %v args got %v", fn, n, len(va)) + base.Fatalf("vmkcall %v needs %v args got %v", fn, n, len(va)) } r := nod(OCALL, fn, nil) @@ -2552,12 +2553,12 @@ func byteindex(n *Node) *Node { func chanfn(name string, n int, t *types.Type) *Node { if !t.IsChan() { - Fatalf("chanfn %v", t) + base.Fatalf("chanfn %v", t) } fn := syslook(name) switch n { default: - Fatalf("chanfn %d", n) + base.Fatalf("chanfn %d", n) case 1: fn = substArgTypes(fn, t.Elem()) case 2: @@ -2568,7 +2569,7 @@ func chanfn(name string, n int, t *types.Type) *Node { func mapfn(name string, t *types.Type) *Node { if !t.IsMap() { - Fatalf("mapfn %v", t) + base.Fatalf("mapfn %v", t) } fn := syslook(name) fn = substArgTypes(fn, t.Key(), t.Elem(), t.Key(), t.Elem()) @@ -2577,7 +2578,7 @@ func mapfn(name string, t *types.Type) *Node { func mapfndel(name string, t *types.Type) *Node { if !t.IsMap() { - Fatalf("mapfn %v", t) + base.Fatalf("mapfn %v", t) } fn := syslook(name) fn = substArgTypes(fn, t.Key(), t.Elem(), t.Key()) @@ -2618,7 +2619,7 @@ func mapfast(t *types.Type) int { if Widthptr == 4 { return mapfast32ptr } - Fatalf("small pointer %v", t.Key()) + base.Fatalf("small pointer %v", t.Key()) case AMEM64: if !t.Key().HasPointers() { return mapfast64 @@ -2645,7 +2646,7 @@ func addstr(n *Node, init *Nodes) *Node { c := n.List.Len() if c < 2 { - Fatalf("addstr count %d too small", c) + base.Fatalf("addstr count %d too small", c) } buf := nodnil() @@ -2784,7 +2785,7 @@ func appendslice(n *Node, init *Nodes) *Node { ptr1, len1 := nptr1.backingArrayPtrLen() ptr2, len2 := nptr2.backingArrayPtrLen() ncopy = mkcall1(fn, types.Types[TINT], &nodes, typename(elemtype), ptr1, len1, ptr2, len2) - } else if instrumenting && !Flag.CompilingRuntime { + } else if instrumenting && !base.Flag.CompilingRuntime { // rely on runtime to instrument: // copy(s[len(l1):], l2) // l2 can be a slice or string. @@ -2827,12 +2828,12 @@ func appendslice(n *Node, init *Nodes) *Node { // isAppendOfMake reports whether n is of the form append(x , make([]T, y)...). // isAppendOfMake assumes n has already been typechecked. func isAppendOfMake(n *Node) bool { - if Flag.N != 0 || instrumenting { + if base.Flag.N != 0 || instrumenting { return false } if n.Typecheck() == 0 { - Fatalf("missing typecheck: %+v", n) + base.Fatalf("missing typecheck: %+v", n) } if n.Op != OAPPEND || !n.IsDDD() || n.List.Len() != 2 { @@ -3036,7 +3037,7 @@ func walkappend(n *Node, init *Nodes, dst *Node) *Node { // General case, with no function calls left as arguments. // Leave for gen, except that instrumentation requires old form. - if !instrumenting || Flag.CompilingRuntime { + if !instrumenting || base.Flag.CompilingRuntime { return n } @@ -3185,7 +3186,7 @@ func eqfor(t *types.Type) (n *Node, needsize bool) { }) return n, false } - Fatalf("eqfor %v", t) + base.Fatalf("eqfor %v", t) return nil, false } @@ -3262,7 +3263,7 @@ func walkcompare(n *Node, init *Nodes) *Node { switch t.Etype { default: - if Debug.Libfuzzer != 0 && t.IsInteger() { + if base.Debug.Libfuzzer != 0 && t.IsInteger() { n.Left = cheapexpr(n.Left, init) n.Right = cheapexpr(n.Right, init) @@ -3304,7 +3305,7 @@ func walkcompare(n *Node, init *Nodes) *Node { } paramType = types.Types[TUINT64] default: - Fatalf("unexpected integer size %d for %v", t.Size(), t) + base.Fatalf("unexpected integer size %d for %v", t.Size(), t) } init.Append(mkcall(fn, nil, init, tracecmpArg(l, paramType, init), tracecmpArg(r, paramType, init))) } @@ -3329,7 +3330,7 @@ func walkcompare(n *Node, init *Nodes) *Node { if !inline { // eq algs take pointers; cmpl and cmpr must be addressable if !islvalue(cmpl) || !islvalue(cmpr) { - Fatalf("arguments of comparison must be lvalues - %v %v", cmpl, cmpr) + base.Fatalf("arguments of comparison must be lvalues - %v %v", cmpl, cmpr) } fn, needsize := eqfor(t) @@ -3722,7 +3723,7 @@ func usefield(n *Node) { switch n.Op { default: - Fatalf("usefield %v", n.Op) + base.Fatalf("usefield %v", n.Op) case ODOT, ODOTPTR: break @@ -3739,10 +3740,10 @@ func usefield(n *Node) { } field := n.Opt().(*types.Field) if field == nil { - Fatalf("usefield %v %v without paramfld", n.Left.Type, n.Sym) + base.Fatalf("usefield %v %v without paramfld", n.Left.Type, n.Sym) } if field.Sym != n.Sym || field.Offset != n.Xoffset { - Fatalf("field inconsistency: %v,%v != %v,%v", field.Sym, field.Offset, n.Sym, n.Xoffset) + base.Fatalf("field inconsistency: %v,%v != %v,%v", field.Sym, field.Offset, n.Sym, n.Xoffset) } if !strings.Contains(field.Note, "go:\"track\"") { return @@ -3753,10 +3754,10 @@ func usefield(n *Node) { outer = outer.Elem() } if outer.Sym == nil { - yyerror("tracked field must be in named struct type") + base.Errorf("tracked field must be in named struct type") } if !types.IsExported(field.Sym.Name) { - yyerror("tracked field must be exported (upper case)") + base.Errorf("tracked field must be exported (upper case)") } sym := tracksym(outer, field) @@ -3968,7 +3969,7 @@ func substArgTypes(old *Node, types_ ...*types.Type) *Node { } n.Type = types.SubstAny(n.Type, &types_) if len(types_) > 0 { - Fatalf("substArgTypes: too many argument types") + base.Fatalf("substArgTypes: too many argument types") } return n } @@ -3991,17 +3992,17 @@ func canMergeLoads() bool { // isRuneCount reports whether n is of the form len([]rune(string)). // These are optimized into a call to runtime.countrunes. func isRuneCount(n *Node) bool { - return Flag.N == 0 && !instrumenting && n.Op == OLEN && n.Left.Op == OSTR2RUNES + return base.Flag.N == 0 && !instrumenting && n.Op == OLEN && n.Left.Op == OSTR2RUNES } func walkCheckPtrAlignment(n *Node, init *Nodes, count *Node) *Node { if !n.Type.IsPtr() { - Fatalf("expected pointer type: %v", n.Type) + base.Fatalf("expected pointer type: %v", n.Type) } elem := n.Type.Elem() if count != nil { if !elem.IsArray() { - Fatalf("expected array type: %v", elem) + base.Fatalf("expected array type: %v", elem) } elem = elem.Elem() } @@ -4031,7 +4032,7 @@ func walkCheckPtrArithmetic(n *Node, init *Nodes) *Node { } else if opt != nil { // We use n.Opt() here because today it's not used for OCONVNOP. If that changes, // there's no guarantee that temporarily replacing it is safe, so just hard fail here. - Fatalf("unexpected Opt: %v", opt) + base.Fatalf("unexpected Opt: %v", opt) } n.SetOpt(&walkCheckPtrArithmeticMarker) defer n.SetOpt(nil) @@ -4087,5 +4088,5 @@ func walkCheckPtrArithmetic(n *Node, init *Nodes) *Node { // function fn at a given level. See debugHelpFooter for defined // levels. func checkPtr(fn *Node, level int) bool { - return Debug.Checkptr >= level && fn.Func.Pragma&NoCheckPtr == 0 + return base.Debug.Checkptr >= level && fn.Func.Pragma&NoCheckPtr == 0 } diff --git a/src/cmd/compile/internal/mips/ggen.go b/src/cmd/compile/internal/mips/ggen.go index 5e867721c3fcf..2356267df7267 100644 --- a/src/cmd/compile/internal/mips/ggen.go +++ b/src/cmd/compile/internal/mips/ggen.go @@ -5,6 +5,7 @@ package mips import ( + "cmd/compile/internal/base" "cmd/compile/internal/gc" "cmd/internal/obj" "cmd/internal/obj/mips" @@ -18,7 +19,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { } if cnt < int64(4*gc.Widthptr) { for i := int64(0); i < cnt; i += int64(gc.Widthptr) { - p = pp.Appendpp(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, gc.Ctxt.FixedFrameSize()+off+i) + p = pp.Appendpp(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, base.Ctxt.FixedFrameSize()+off+i) } } else { //fmt.Printf("zerorange frame:%v, lo: %v, hi:%v \n", frame ,lo, hi) @@ -28,7 +29,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { // MOVW R0, (Widthptr)r1 // ADD $Widthptr, r1 // BNE r1, r2, loop - p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+off-4, obj.TYPE_REG, mips.REGRT1, 0) + p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-4, obj.TYPE_REG, mips.REGRT1, 0) p.Reg = mips.REGSP p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0) p.Reg = mips.REGRT1 diff --git a/src/cmd/compile/internal/mips/ssa.go b/src/cmd/compile/internal/mips/ssa.go index 1d2e2c79e6ea9..c37a2e07149d9 100644 --- a/src/cmd/compile/internal/mips/ssa.go +++ b/src/cmd/compile/internal/mips/ssa.go @@ -7,6 +7,7 @@ package mips import ( "math" + "cmd/compile/internal/base" "cmd/compile/internal/gc" "cmd/compile/internal/logopt" "cmd/compile/internal/ssa" @@ -766,8 +767,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { if logopt.Enabled() { logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) } - if gc.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers - gc.Warnl(v.Pos, "generated nil check") + if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers + base.WarnfAt(v.Pos, "generated nil check") } case ssa.OpMIPSFPFlagTrue, ssa.OpMIPSFPFlagFalse: @@ -796,7 +797,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { // caller's SP is FixedFrameSize below the address of the first arg p := s.Prog(mips.AMOVW) p.From.Type = obj.TYPE_ADDR - p.From.Offset = -gc.Ctxt.FixedFrameSize() + p.From.Offset = -base.Ctxt.FixedFrameSize() p.From.Name = obj.NAME_PARAM p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() diff --git a/src/cmd/compile/internal/mips64/ssa.go b/src/cmd/compile/internal/mips64/ssa.go index 067b8158c939f..a7c10d8869b5d 100644 --- a/src/cmd/compile/internal/mips64/ssa.go +++ b/src/cmd/compile/internal/mips64/ssa.go @@ -7,6 +7,7 @@ package mips64 import ( "math" + "cmd/compile/internal/base" "cmd/compile/internal/gc" "cmd/compile/internal/logopt" "cmd/compile/internal/ssa" @@ -724,8 +725,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { if logopt.Enabled() { logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) } - if gc.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers - gc.Warnl(v.Pos, "generated nil check") + if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers + base.WarnfAt(v.Pos, "generated nil check") } case ssa.OpMIPS64FPFlagTrue, ssa.OpMIPS64FPFlagFalse: @@ -757,7 +758,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { // caller's SP is FixedFrameSize below the address of the first arg p := s.Prog(mips.AMOVV) p.From.Type = obj.TYPE_ADDR - p.From.Offset = -gc.Ctxt.FixedFrameSize() + p.From.Offset = -base.Ctxt.FixedFrameSize() p.From.Name = obj.NAME_PARAM p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() diff --git a/src/cmd/compile/internal/ppc64/ggen.go b/src/cmd/compile/internal/ppc64/ggen.go index a5a772b49179e..8f5caf5f991b9 100644 --- a/src/cmd/compile/internal/ppc64/ggen.go +++ b/src/cmd/compile/internal/ppc64/ggen.go @@ -5,6 +5,7 @@ package ppc64 import ( + "cmd/compile/internal/base" "cmd/compile/internal/gc" "cmd/internal/obj" "cmd/internal/obj/ppc64" @@ -16,17 +17,17 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { } if cnt < int64(4*gc.Widthptr) { for i := int64(0); i < cnt; i += int64(gc.Widthptr) { - p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, gc.Ctxt.FixedFrameSize()+off+i) + p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, base.Ctxt.FixedFrameSize()+off+i) } } else if cnt <= int64(128*gc.Widthptr) { - p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGRT1, 0) + p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGRT1, 0) p.Reg = ppc64.REGSP p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) p.To.Name = obj.NAME_EXTERN p.To.Sym = gc.Duffzero p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr)) } else { - p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGTMP, 0) + p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGTMP, 0) p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0) p.Reg = ppc64.REGSP p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0) @@ -66,7 +67,7 @@ func ginsnopdefer(pp *gc.Progs) *obj.Prog { // on ppc64 in both shared and non-shared modes. ginsnop(pp) - if gc.Ctxt.Flag_shared { + if base.Ctxt.Flag_shared { p := pp.Prog(ppc64.AMOVD) p.From.Type = obj.TYPE_MEM p.From.Offset = 24 diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go index f0e7c4192336f..e3f0ee1a932f7 100644 --- a/src/cmd/compile/internal/ppc64/ssa.go +++ b/src/cmd/compile/internal/ppc64/ssa.go @@ -5,6 +5,7 @@ package ppc64 import ( + "cmd/compile/internal/base" "cmd/compile/internal/gc" "cmd/compile/internal/logopt" "cmd/compile/internal/ssa" @@ -473,7 +474,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { // caller's SP is FixedFrameSize below the address of the first arg p := s.Prog(ppc64.AMOVD) p.From.Type = obj.TYPE_ADDR - p.From.Offset = -gc.Ctxt.FixedFrameSize() + p.From.Offset = -base.Ctxt.FixedFrameSize() p.From.Name = obj.NAME_PARAM p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() @@ -1784,7 +1785,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { // Insert a hint this is not a subroutine return. pp.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: 1}) - if gc.Ctxt.Flag_shared { + if base.Ctxt.Flag_shared { // When compiling Go into PIC, the function we just // called via pointer might have been implemented in // a separate module and so overwritten the TOC @@ -1852,8 +1853,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { if logopt.Enabled() { logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) } - if gc.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers - gc.Warnl(v.Pos, "generated nil check") + if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers + base.WarnfAt(v.Pos, "generated nil check") } // These should be resolved by rules and not make it here. diff --git a/src/cmd/compile/internal/riscv64/ggen.go b/src/cmd/compile/internal/riscv64/ggen.go index f7c03fe7c2173..18905a4aea361 100644 --- a/src/cmd/compile/internal/riscv64/ggen.go +++ b/src/cmd/compile/internal/riscv64/ggen.go @@ -5,6 +5,7 @@ package riscv64 import ( + "cmd/compile/internal/base" "cmd/compile/internal/gc" "cmd/internal/obj" "cmd/internal/obj/riscv" @@ -16,7 +17,7 @@ func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { } // Adjust the frame to account for LR. - off += gc.Ctxt.FixedFrameSize() + off += base.Ctxt.FixedFrameSize() if cnt < int64(4*gc.Widthptr) { for i := int64(0); i < cnt; i += int64(gc.Widthptr) { diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go index d49927ee04e93..5a71b33c00cb6 100644 --- a/src/cmd/compile/internal/riscv64/ssa.go +++ b/src/cmd/compile/internal/riscv64/ssa.go @@ -5,6 +5,7 @@ package riscv64 import ( + "cmd/compile/internal/base" "cmd/compile/internal/gc" "cmd/compile/internal/ssa" "cmd/compile/internal/types" @@ -91,7 +92,7 @@ func loadByType(t *types.Type) obj.As { case 8: return riscv.AMOVD default: - gc.Fatalf("unknown float width for load %d in type %v", width, t) + base.Fatalf("unknown float width for load %d in type %v", width, t) return 0 } } @@ -118,7 +119,7 @@ func loadByType(t *types.Type) obj.As { case 8: return riscv.AMOV default: - gc.Fatalf("unknown width for load %d in type %v", width, t) + base.Fatalf("unknown width for load %d in type %v", width, t) return 0 } } @@ -134,7 +135,7 @@ func storeByType(t *types.Type) obj.As { case 8: return riscv.AMOVD default: - gc.Fatalf("unknown float width for store %d in type %v", width, t) + base.Fatalf("unknown float width for store %d in type %v", width, t) return 0 } } @@ -149,7 +150,7 @@ func storeByType(t *types.Type) obj.As { case 8: return riscv.AMOV default: - gc.Fatalf("unknown width for store %d in type %v", width, t) + base.Fatalf("unknown width for store %d in type %v", width, t) return 0 } } @@ -586,8 +587,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { gc.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = riscv.REG_ZERO - if gc.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos == 1 in generated wrappers - gc.Warnl(v.Pos, "generated nil check") + if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos == 1 in generated wrappers + base.WarnfAt(v.Pos, "generated nil check") } case ssa.OpRISCV64LoweredGetClosurePtr: @@ -598,7 +599,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { // caller's SP is FixedFrameSize below the address of the first arg p := s.Prog(riscv.AMOV) p.From.Type = obj.TYPE_ADDR - p.From.Offset = -gc.Ctxt.FixedFrameSize() + p.From.Offset = -base.Ctxt.FixedFrameSize() p.From.Name = obj.NAME_PARAM p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() diff --git a/src/cmd/compile/internal/s390x/ggen.go b/src/cmd/compile/internal/s390x/ggen.go index 5a837d8574236..0e2f48bf4cc3e 100644 --- a/src/cmd/compile/internal/s390x/ggen.go +++ b/src/cmd/compile/internal/s390x/ggen.go @@ -5,6 +5,7 @@ package s390x import ( + "cmd/compile/internal/base" "cmd/compile/internal/gc" "cmd/internal/obj" "cmd/internal/obj/s390x" @@ -23,7 +24,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { } // Adjust the frame to account for LR. - off += gc.Ctxt.FixedFrameSize() + off += base.Ctxt.FixedFrameSize() reg := int16(s390x.REGSP) // If the off cannot fit in a 12-bit unsigned displacement then we diff --git a/src/cmd/compile/internal/s390x/ssa.go b/src/cmd/compile/internal/s390x/ssa.go index cb13f8d3c0d28..366adffd986f6 100644 --- a/src/cmd/compile/internal/s390x/ssa.go +++ b/src/cmd/compile/internal/s390x/ssa.go @@ -7,6 +7,7 @@ package s390x import ( "math" + "cmd/compile/internal/base" "cmd/compile/internal/gc" "cmd/compile/internal/logopt" "cmd/compile/internal/ssa" @@ -573,7 +574,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { // caller's SP is FixedFrameSize below the address of the first arg p := s.Prog(s390x.AMOVD) p.From.Type = obj.TYPE_ADDR - p.From.Offset = -gc.Ctxt.FixedFrameSize() + p.From.Offset = -base.Ctxt.FixedFrameSize() p.From.Name = obj.NAME_PARAM p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() @@ -642,8 +643,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { if logopt.Enabled() { logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) } - if gc.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers - gc.Warnl(v.Pos, "generated nil check") + if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers + base.WarnfAt(v.Pos, "generated nil check") } case ssa.OpS390XMVC: vo := v.AuxValAndOff() diff --git a/src/cmd/compile/internal/wasm/ssa.go b/src/cmd/compile/internal/wasm/ssa.go index 3f05515b9a6e5..373dc431e54c6 100644 --- a/src/cmd/compile/internal/wasm/ssa.go +++ b/src/cmd/compile/internal/wasm/ssa.go @@ -5,6 +5,7 @@ package wasm import ( + "cmd/compile/internal/base" "cmd/compile/internal/gc" "cmd/compile/internal/logopt" "cmd/compile/internal/ssa" @@ -33,7 +34,7 @@ func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Pr return p } if cnt%8 != 0 { - gc.Fatalf("zerorange count not a multiple of widthptr %d", cnt) + base.Fatalf("zerorange count not a multiple of widthptr %d", cnt) } for i := int64(0); i < cnt; i += 8 { @@ -165,8 +166,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { if logopt.Enabled() { logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) } - if gc.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers - gc.Warnl(v.Pos, "generated nil check") + if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers + base.WarnfAt(v.Pos, "generated nil check") } case ssa.OpWasmLoweredWB: diff --git a/src/cmd/compile/internal/x86/galign.go b/src/cmd/compile/internal/x86/galign.go index e137daa3fc590..7d628f9b7c5df 100644 --- a/src/cmd/compile/internal/x86/galign.go +++ b/src/cmd/compile/internal/x86/galign.go @@ -5,6 +5,7 @@ package x86 import ( + "cmd/compile/internal/base" "cmd/compile/internal/gc" "cmd/internal/obj/x86" "cmd/internal/objabi" @@ -24,10 +25,10 @@ func Init(arch *gc.Arch) { arch.SoftFloat = true case "387": fmt.Fprintf(os.Stderr, "unsupported setting GO386=387. Consider using GO386=softfloat instead.\n") - gc.Exit(1) + base.Exit(1) default: fmt.Fprintf(os.Stderr, "unsupported setting GO386=%s\n", v) - gc.Exit(1) + base.Exit(1) } diff --git a/src/cmd/compile/internal/x86/ssa.go b/src/cmd/compile/internal/x86/ssa.go index 65d7e75a533c2..a3aaf03c95845 100644 --- a/src/cmd/compile/internal/x86/ssa.go +++ b/src/cmd/compile/internal/x86/ssa.go @@ -8,6 +8,7 @@ import ( "fmt" "math" + "cmd/compile/internal/base" "cmd/compile/internal/gc" "cmd/compile/internal/logopt" "cmd/compile/internal/ssa" @@ -480,9 +481,9 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Name = obj.NAME_EXTERN f := math.Float64frombits(uint64(v.AuxInt)) if v.Op == ssa.Op386MOVSDconst1 { - p.From.Sym = gc.Ctxt.Float64Sym(f) + p.From.Sym = base.Ctxt.Float64Sym(f) } else { - p.From.Sym = gc.Ctxt.Float32Sym(float32(f)) + p.From.Sym = base.Ctxt.Float32Sym(float32(f)) } p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() @@ -713,7 +714,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { r := v.Reg() // See the comments in cmd/internal/obj/x86/obj6.go // near CanUse1InsnTLS for a detailed explanation of these instructions. - if x86.CanUse1InsnTLS(gc.Ctxt) { + if x86.CanUse1InsnTLS(base.Ctxt) { // MOVL (TLS), r p := s.Prog(x86.AMOVL) p.From.Type = obj.TYPE_MEM @@ -749,7 +750,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { // caller's SP is the address of the first arg p := s.Prog(x86.AMOVL) p.From.Type = obj.TYPE_ADDR - p.From.Offset = -gc.Ctxt.FixedFrameSize() // 0 on 386, just to be consistent with other architectures + p.From.Offset = -base.Ctxt.FixedFrameSize() // 0 on 386, just to be consistent with other architectures p.From.Name = obj.NAME_PARAM p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() @@ -850,8 +851,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { if logopt.Enabled() { logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) } - if gc.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers - gc.Warnl(v.Pos, "generated nil check") + if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers + base.WarnfAt(v.Pos, "generated nil check") } case ssa.OpClobber: p := s.Prog(x86.AMOVL) diff --git a/src/cmd/compile/main.go b/src/cmd/compile/main.go index 3aa64a5ce251b..5a33719d870a2 100644 --- a/src/cmd/compile/main.go +++ b/src/cmd/compile/main.go @@ -8,6 +8,7 @@ import ( "cmd/compile/internal/amd64" "cmd/compile/internal/arm" "cmd/compile/internal/arm64" + "cmd/compile/internal/base" "cmd/compile/internal/gc" "cmd/compile/internal/mips" "cmd/compile/internal/mips64" @@ -50,5 +51,5 @@ func main() { } gc.Main(archInit) - gc.Exit(0) + base.Exit(0) } diff --git a/src/cmd/dist/buildtool.go b/src/cmd/dist/buildtool.go index e39f284db566d..f8e1f2f95131c 100644 --- a/src/cmd/dist/buildtool.go +++ b/src/cmd/dist/buildtool.go @@ -38,6 +38,7 @@ var bootstrapDirs = []string{ "cmd/cgo", "cmd/compile", "cmd/compile/internal/amd64", + "cmd/compile/internal/base", "cmd/compile/internal/arm", "cmd/compile/internal/arm64", "cmd/compile/internal/gc", @@ -72,6 +73,7 @@ var bootstrapDirs = []string{ "cmd/internal/sys", "cmd/link", "cmd/link/internal/amd64", + "cmd/compile/internal/base", "cmd/link/internal/arm", "cmd/link/internal/arm64", "cmd/link/internal/benchmark", From 331b8b4797bc4e134a8d8b78bf1c060689144145 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Tue, 24 Nov 2020 22:52:37 -0500 Subject: [PATCH 039/474] [dev.regabi] cmd/compile: move okforconst into its own declaration It needs to move into package ir, and we do not want all the rest. Change-Id: Ibcfa1ebc0e63fe3659267bf2fa7069e8a93de4e9 Reviewed-on: https://go-review.googlesource.com/c/go/+/272930 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/go.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index e9ff5aeb138af..d9b8f704a9362 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -145,9 +145,10 @@ var ( okforcap [NTYPE]bool okforlen [NTYPE]bool okforarith [NTYPE]bool - okforconst [NTYPE]bool ) +var okforconst [NTYPE]bool + var ( okfor [OEND][]bool iscmp [OEND]bool From 84e2bd611f9b62ec3b581f8a0d932dc4252ceb67 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 19 Nov 2020 21:09:22 -0500 Subject: [PATCH 040/474] [dev.regabi] cmd/compile: introduce cmd/compile/internal/ir [generated] If we want to break up package gc at all, we will need to move the compiler IR it defines into a separate package that can be imported by packages that gc itself imports. This CL does that. It also removes the TINT8 etc aliases so that all code is clear about which package things are coming from. This CL is automatically generated by the script below. See the comments in the script for details about the changes. [git-generate] cd src/cmd/compile/internal/gc rf ' # These names were never fully qualified # when the types package was added. # Do it now, to avoid confusion about where they live. inline -rm \ Txxx \ TINT8 \ TUINT8 \ TINT16 \ TUINT16 \ TINT32 \ TUINT32 \ TINT64 \ TUINT64 \ TINT \ TUINT \ TUINTPTR \ TCOMPLEX64 \ TCOMPLEX128 \ TFLOAT32 \ TFLOAT64 \ TBOOL \ TPTR \ TFUNC \ TSLICE \ TARRAY \ TSTRUCT \ TCHAN \ TMAP \ TINTER \ TFORW \ TANY \ TSTRING \ TUNSAFEPTR \ TIDEAL \ TNIL \ TBLANK \ TFUNCARGS \ TCHANARGS \ NTYPE \ BADWIDTH # esc.go and escape.go do not need to be split. # Append esc.go onto the end of escape.go. mv esc.go escape.go # Pull out the type format installation from func Main, # so it can be carried into package ir. mv Main:/Sconv.=/-0,/TypeLinkSym/-1 InstallTypeFormats # Names that need to be exported for use by code left in gc. mv Isconst IsConst mv asNode AsNode mv asNodes AsNodes mv asTypesNode AsTypesNode mv basicnames BasicTypeNames mv builtinpkg BuiltinPkg mv consttype ConstType mv dumplist DumpList mv fdumplist FDumpList mv fmtMode FmtMode mv goopnames OpNames mv inspect Inspect mv inspectList InspectList mv localpkg LocalPkg mv nblank BlankNode mv numImport NumImport mv opprec OpPrec mv origSym OrigSym mv stmtwithinit StmtWithInit mv dump DumpAny mv fdump FDumpAny mv nod Nod mv nodl NodAt mv newname NewName mv newnamel NewNameAt mv assertRepresents AssertValidTypeForConst mv represents ValidTypeForConst mv nodlit NewLiteral # Types and fields that need to be exported for use by gc. mv nowritebarrierrecCallSym SymAndPos mv SymAndPos.lineno SymAndPos.Pos mv SymAndPos.target SymAndPos.Sym mv Func.lsym Func.LSym mv Func.setWBPos Func.SetWBPos mv Func.numReturns Func.NumReturns mv Func.numDefers Func.NumDefers mv Func.nwbrCalls Func.NWBRCalls # initLSym is an algorithm left behind in gc, # not an operation on Func itself. mv Func.initLSym initLSym mv nodeQueue NodeQueue mv NodeQueue.empty NodeQueue.Empty mv NodeQueue.popLeft NodeQueue.PopLeft mv NodeQueue.pushRight NodeQueue.PushRight # Many methods on Node are actually algorithms that # would apply to any node implementation. # Those become plain functions. mv Node.funcname FuncName mv Node.isBlank IsBlank mv Node.isGoConst isGoConst mv Node.isNil IsNil mv Node.isParamHeapCopy isParamHeapCopy mv Node.isParamStackCopy isParamStackCopy mv Node.isSimpleName isSimpleName mv Node.mayBeShared MayBeShared mv Node.pkgFuncName PkgFuncName mv Node.backingArrayPtrLen backingArrayPtrLen mv Node.isterminating isTermNode mv Node.labeledControl labeledControl mv Nodes.isterminating isTermNodes mv Nodes.sigerr fmtSignature mv Node.MethodName methodExprName mv Node.MethodFunc methodExprFunc mv Node.IsMethod IsMethod # Every node will need to implement RawCopy; # Copy and SepCopy algorithms will use it. mv Node.rawcopy Node.RawCopy mv Node.copy Copy mv Node.sepcopy SepCopy # Extract Node.Format method body into func FmtNode, # but leave method wrapper behind. mv Node.Format:0,$ FmtNode # Formatting helpers that will apply to all node implementations. mv Node.Line Line mv Node.exprfmt exprFmt mv Node.jconv jconvFmt mv Node.modeString modeString mv Node.nconv nconvFmt mv Node.nodedump nodeDumpFmt mv Node.nodefmt nodeFmt mv Node.stmtfmt stmtFmt # Constant support needed for code moving to ir. mv okforconst OKForConst mv vconv FmtConst mv int64Val Int64Val mv float64Val Float64Val mv Node.ValueInterface ConstValue # Organize code into files. mv LocalPkg BuiltinPkg ir.go mv NumImport InstallTypeFormats Line fmt.go mv syntax.go Nod NodAt NewNameAt Class Pxxx PragmaFlag Nointerface SymAndPos \ AsNode AsTypesNode BlankNode OrigSym \ Node.SliceBounds Node.SetSliceBounds Op.IsSlice3 \ IsConst Node.Int64Val Node.CanInt64 Node.Uint64Val Node.BoolVal Node.StringVal \ Node.RawCopy SepCopy Copy \ IsNil IsBlank IsMethod \ Node.Typ Node.StorageClass node.go mv ConstType ConstValue Int64Val Float64Val AssertValidTypeForConst ValidTypeForConst NewLiteral idealType OKForConst val.go # Move files to new ir package. mv bitset.go class_string.go dump.go fmt.go \ ir.go node.go op_string.go val.go \ sizeof_test.go cmd/compile/internal/ir ' : # fix mkbuiltin.go to generate the changes made to builtin.go during rf sed -i '' ' s/\[T/[types.T/g s/\*Node/*ir.Node/g /internal\/types/c \ fmt.Fprintln(&b, `import (`) \ fmt.Fprintln(&b, ` "cmd/compile/internal/ir"`) \ fmt.Fprintln(&b, ` "cmd/compile/internal/types"`) \ fmt.Fprintln(&b, `)`) ' mkbuiltin.go gofmt -w mkbuiltin.go : # update cmd/dist to add internal/ir cd ../../../dist sed -i '' '/compile.internal.gc/a\ "cmd/compile/internal/ir", ' buildtool.go gofmt -w buildtool.go : # update cmd/compile TestFormats cd ../.. go install std cmd cd cmd/compile go test -u || go test # first one updates but fails; second passes Change-Id: I5f7caf6b20629b51970279e81231a3574d5b51db Reviewed-on: https://go-review.googlesource.com/c/go/+/273008 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/fmtmap_test.go | 40 +- src/cmd/compile/internal/arm/ssa.go | 3 +- src/cmd/compile/internal/arm64/ssa.go | 3 +- src/cmd/compile/internal/gc/alg.go | 271 +-- src/cmd/compile/internal/gc/align.go | 57 +- src/cmd/compile/internal/gc/bexport.go | 63 +- src/cmd/compile/internal/gc/bimport.go | 13 +- src/cmd/compile/internal/gc/builtin.go | 219 +- src/cmd/compile/internal/gc/closure.go | 121 +- src/cmd/compile/internal/gc/const.go | 432 ++-- src/cmd/compile/internal/gc/dcl.go | 329 ++- src/cmd/compile/internal/gc/embed.go | 29 +- src/cmd/compile/internal/gc/esc.go | 474 ---- src/cmd/compile/internal/gc/escape.go | 778 +++++-- src/cmd/compile/internal/gc/export.go | 55 +- src/cmd/compile/internal/gc/gen.go | 25 +- src/cmd/compile/internal/gc/go.go | 80 +- src/cmd/compile/internal/gc/gsubr.go | 45 +- src/cmd/compile/internal/gc/iexport.go | 297 +-- src/cmd/compile/internal/gc/iimport.go | 225 +- src/cmd/compile/internal/gc/init.go | 21 +- src/cmd/compile/internal/gc/initorder.go | 90 +- src/cmd/compile/internal/gc/inl.go | 413 ++-- src/cmd/compile/internal/gc/lex.go | 86 +- src/cmd/compile/internal/gc/main.go | 76 +- src/cmd/compile/internal/gc/mkbuiltin.go | 13 +- src/cmd/compile/internal/gc/noder.go | 387 ++-- src/cmd/compile/internal/gc/obj.go | 81 +- src/cmd/compile/internal/gc/order.go | 429 ++-- src/cmd/compile/internal/gc/pgen.go | 147 +- src/cmd/compile/internal/gc/pgen_test.go | 151 +- src/cmd/compile/internal/gc/phi.go | 35 +- src/cmd/compile/internal/gc/plive.go | 77 +- src/cmd/compile/internal/gc/racewalk.go | 9 +- src/cmd/compile/internal/gc/range.go | 199 +- src/cmd/compile/internal/gc/reflect.go | 321 +-- src/cmd/compile/internal/gc/scc.go | 40 +- src/cmd/compile/internal/gc/scope.go | 11 +- src/cmd/compile/internal/gc/select.go | 153 +- src/cmd/compile/internal/gc/sinit.go | 393 ++-- src/cmd/compile/internal/gc/ssa.go | 1976 ++++++++--------- src/cmd/compile/internal/gc/subr.go | 594 ++--- src/cmd/compile/internal/gc/swt.go | 203 +- src/cmd/compile/internal/gc/typecheck.go | 1009 ++++----- src/cmd/compile/internal/gc/types.go | 53 - src/cmd/compile/internal/gc/types_acc.go | 8 - src/cmd/compile/internal/gc/universe.go | 366 ++- src/cmd/compile/internal/gc/unsafe.go | 25 +- src/cmd/compile/internal/gc/util.go | 6 - src/cmd/compile/internal/gc/walk.go | 1539 ++++++------- src/cmd/compile/internal/{gc => ir}/bitset.go | 2 +- .../internal/{gc => ir}/class_string.go | 2 +- src/cmd/compile/internal/{gc => ir}/dump.go | 17 +- src/cmd/compile/internal/{gc => ir}/fmt.go | 269 ++- src/cmd/compile/internal/ir/ir.go | 12 + .../internal/{gc/syntax.go => ir/node.go} | 396 +++- .../compile/internal/{gc => ir}/op_string.go | 2 +- .../internal/{gc => ir}/sizeof_test.go | 2 +- src/cmd/compile/internal/ir/val.go | 120 + src/cmd/compile/internal/mips/ssa.go | 3 +- src/cmd/compile/internal/mips64/ssa.go | 3 +- src/cmd/compile/internal/ppc64/ssa.go | 3 +- src/cmd/compile/internal/riscv64/ssa.go | 3 +- src/cmd/compile/internal/wasm/ssa.go | 3 +- src/cmd/dist/buildtool.go | 1 + 65 files changed, 6666 insertions(+), 6642 deletions(-) delete mode 100644 src/cmd/compile/internal/gc/esc.go rename src/cmd/compile/internal/{gc => ir}/bitset.go (99%) rename src/cmd/compile/internal/{gc => ir}/class_string.go (98%) rename src/cmd/compile/internal/{gc => ir}/dump.go (96%) rename src/cmd/compile/internal/{gc => ir}/fmt.go (87%) create mode 100644 src/cmd/compile/internal/ir/ir.go rename src/cmd/compile/internal/{gc/syntax.go => ir/node.go} (82%) rename src/cmd/compile/internal/{gc => ir}/op_string.go (99%) rename src/cmd/compile/internal/{gc => ir}/sizeof_test.go (98%) create mode 100644 src/cmd/compile/internal/ir/val.go diff --git a/src/cmd/compile/fmtmap_test.go b/src/cmd/compile/fmtmap_test.go index e32233bcaf802..404e89d0f2602 100644 --- a/src/cmd/compile/fmtmap_test.go +++ b/src/cmd/compile/fmtmap_test.go @@ -22,14 +22,14 @@ package main_test var knownFormats = map[string]string{ "*bytes.Buffer %s": "", "*cmd/compile/internal/gc.EscLocation %v": "", - "*cmd/compile/internal/gc.Node %#v": "", - "*cmd/compile/internal/gc.Node %+S": "", - "*cmd/compile/internal/gc.Node %+v": "", - "*cmd/compile/internal/gc.Node %L": "", - "*cmd/compile/internal/gc.Node %S": "", - "*cmd/compile/internal/gc.Node %j": "", - "*cmd/compile/internal/gc.Node %p": "", - "*cmd/compile/internal/gc.Node %v": "", + "*cmd/compile/internal/ir.Node %#v": "", + "*cmd/compile/internal/ir.Node %+S": "", + "*cmd/compile/internal/ir.Node %+v": "", + "*cmd/compile/internal/ir.Node %L": "", + "*cmd/compile/internal/ir.Node %S": "", + "*cmd/compile/internal/ir.Node %j": "", + "*cmd/compile/internal/ir.Node %p": "", + "*cmd/compile/internal/ir.Node %v": "", "*cmd/compile/internal/ssa.Block %s": "", "*cmd/compile/internal/ssa.Block %v": "", "*cmd/compile/internal/ssa.Func %s": "", @@ -78,18 +78,18 @@ var knownFormats = map[string]string{ "byte %q": "", "byte %v": "", "cmd/compile/internal/arm.shift %d": "", - "cmd/compile/internal/gc.Class %d": "", - "cmd/compile/internal/gc.Class %s": "", - "cmd/compile/internal/gc.Class %v": "", - "cmd/compile/internal/gc.Nodes %#v": "", - "cmd/compile/internal/gc.Nodes %+v": "", - "cmd/compile/internal/gc.Nodes %.v": "", - "cmd/compile/internal/gc.Nodes %v": "", - "cmd/compile/internal/gc.Op %#v": "", - "cmd/compile/internal/gc.Op %v": "", - "cmd/compile/internal/gc.fmtMode %d": "", "cmd/compile/internal/gc.initKind %d": "", "cmd/compile/internal/gc.itag %v": "", + "cmd/compile/internal/ir.Class %d": "", + "cmd/compile/internal/ir.Class %s": "", + "cmd/compile/internal/ir.Class %v": "", + "cmd/compile/internal/ir.FmtMode %d": "", + "cmd/compile/internal/ir.Nodes %#v": "", + "cmd/compile/internal/ir.Nodes %+v": "", + "cmd/compile/internal/ir.Nodes %.v": "", + "cmd/compile/internal/ir.Nodes %v": "", + "cmd/compile/internal/ir.Op %#v": "", + "cmd/compile/internal/ir.Op %v": "", "cmd/compile/internal/ssa.BranchPrediction %d": "", "cmd/compile/internal/ssa.Edge %v": "", "cmd/compile/internal/ssa.GCNode %v": "", @@ -162,8 +162,8 @@ var knownFormats = map[string]string{ "interface{} %q": "", "interface{} %s": "", "interface{} %v": "", - "map[*cmd/compile/internal/gc.Node]*cmd/compile/internal/ssa.Value %v": "", - "map[*cmd/compile/internal/gc.Node][]*cmd/compile/internal/gc.Node %v": "", + "map[*cmd/compile/internal/ir.Node]*cmd/compile/internal/ssa.Value %v": "", + "map[*cmd/compile/internal/ir.Node][]*cmd/compile/internal/ir.Node %v": "", "map[cmd/compile/internal/ssa.ID]uint32 %v": "", "map[int64]uint32 %v": "", "math/big.Accuracy %s": "", diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go index 7d34cc517005f..ff1dd8869e211 100644 --- a/src/cmd/compile/internal/arm/ssa.go +++ b/src/cmd/compile/internal/arm/ssa.go @@ -11,6 +11,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/gc" + "cmd/compile/internal/ir" "cmd/compile/internal/logopt" "cmd/compile/internal/ssa" "cmd/compile/internal/types" @@ -545,7 +546,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case *obj.LSym: wantreg = "SB" gc.AddAux(&p.From, v) - case *gc.Node: + case *ir.Node: wantreg = "SP" gc.AddAux(&p.From, v) case nil: diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go index 5e6f607708df2..58c00dc3bd14c 100644 --- a/src/cmd/compile/internal/arm64/ssa.go +++ b/src/cmd/compile/internal/arm64/ssa.go @@ -9,6 +9,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/gc" + "cmd/compile/internal/ir" "cmd/compile/internal/logopt" "cmd/compile/internal/ssa" "cmd/compile/internal/types" @@ -395,7 +396,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case *obj.LSym: wantreg = "SB" gc.AddAux(&p.From, v) - case *gc.Node: + case *ir.Node: wantreg = "SP" gc.AddAux(&p.From, v) case nil: diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index 517aaa4b814cf..cf82b9d5916e0 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -6,6 +6,7 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/obj" "fmt" @@ -70,11 +71,11 @@ func EqCanPanic(t *types.Type) bool { switch t.Etype { default: return false - case TINTER: + case types.TINTER: return true - case TARRAY: + case types.TARRAY: return EqCanPanic(t.Elem()) - case TSTRUCT: + case types.TSTRUCT: for _, f := range t.FieldSlice() { if !f.Sym.IsBlank() && EqCanPanic(f.Type) { return true @@ -120,45 +121,45 @@ func algtype1(t *types.Type) (AlgKind, *types.Type) { } switch t.Etype { - case TANY, TFORW: + case types.TANY, types.TFORW: // will be defined later. return ANOEQ, t - case TINT8, TUINT8, TINT16, TUINT16, - TINT32, TUINT32, TINT64, TUINT64, - TINT, TUINT, TUINTPTR, - TBOOL, TPTR, - TCHAN, TUNSAFEPTR: + case types.TINT8, types.TUINT8, types.TINT16, types.TUINT16, + types.TINT32, types.TUINT32, types.TINT64, types.TUINT64, + types.TINT, types.TUINT, types.TUINTPTR, + types.TBOOL, types.TPTR, + types.TCHAN, types.TUNSAFEPTR: return AMEM, nil - case TFUNC, TMAP: + case types.TFUNC, types.TMAP: return ANOEQ, t - case TFLOAT32: + case types.TFLOAT32: return AFLOAT32, nil - case TFLOAT64: + case types.TFLOAT64: return AFLOAT64, nil - case TCOMPLEX64: + case types.TCOMPLEX64: return ACPLX64, nil - case TCOMPLEX128: + case types.TCOMPLEX128: return ACPLX128, nil - case TSTRING: + case types.TSTRING: return ASTRING, nil - case TINTER: + case types.TINTER: if t.IsEmptyInterface() { return ANILINTER, nil } return AINTER, nil - case TSLICE: + case types.TSLICE: return ANOEQ, t - case TARRAY: + case types.TARRAY: a, bad := algtype1(t.Elem()) switch a { case AMEM: @@ -178,7 +179,7 @@ func algtype1(t *types.Type) (AlgKind, *types.Type) { return ASPECIAL, nil - case TSTRUCT: + case types.TSTRUCT: fields := t.FieldSlice() // One-field struct is same as that one field alone. @@ -288,19 +289,19 @@ func genhash(t *types.Type) *obj.LSym { } base.Pos = autogeneratedPos // less confusing than end of input - dclcontext = PEXTERN + dclcontext = ir.PEXTERN // func sym(p *T, h uintptr) uintptr - tfn := nod(OTFUNC, nil, nil) + tfn := ir.Nod(ir.OTFUNC, nil, nil) tfn.List.Set2( namedfield("p", types.NewPtr(t)), - namedfield("h", types.Types[TUINTPTR]), + namedfield("h", types.Types[types.TUINTPTR]), ) - tfn.Rlist.Set1(anonfield(types.Types[TUINTPTR])) + tfn.Rlist.Set1(anonfield(types.Types[types.TUINTPTR])) fn := dclfunc(sym, tfn) - np := asNode(tfn.Type.Params().Field(0).Nname) - nh := asNode(tfn.Type.Params().Field(1).Nname) + np := ir.AsNode(tfn.Type.Params().Field(0).Nname) + nh := ir.AsNode(tfn.Type.Params().Field(1).Nname) switch t.Etype { case types.TARRAY: @@ -309,23 +310,23 @@ func genhash(t *types.Type) *obj.LSym { // pure memory. hashel := hashfor(t.Elem()) - n := nod(ORANGE, nil, nod(ODEREF, np, nil)) - ni := newname(lookup("i")) - ni.Type = types.Types[TINT] + n := ir.Nod(ir.ORANGE, nil, ir.Nod(ir.ODEREF, np, nil)) + ni := NewName(lookup("i")) + ni.Type = types.Types[types.TINT] n.List.Set1(ni) n.SetColas(true) colasdefn(n.List.Slice(), n) ni = n.List.First() // h = hashel(&p[i], h) - call := nod(OCALL, hashel, nil) + call := ir.Nod(ir.OCALL, hashel, nil) - nx := nod(OINDEX, np, ni) + nx := ir.Nod(ir.OINDEX, np, ni) nx.SetBounded(true) - na := nod(OADDR, nx, nil) + na := ir.Nod(ir.OADDR, nx, nil) call.List.Append(na) call.List.Append(nh) - n.Nbody.Append(nod(OAS, nh, call)) + n.Nbody.Append(ir.Nod(ir.OAS, nh, call)) fn.Nbody.Append(n) @@ -344,12 +345,12 @@ func genhash(t *types.Type) *obj.LSym { // Hash non-memory fields with appropriate hash function. if !IsRegularMemory(f.Type) { hashel := hashfor(f.Type) - call := nod(OCALL, hashel, nil) - nx := nodSym(OXDOT, np, f.Sym) // TODO: fields from other packages? - na := nod(OADDR, nx, nil) + call := ir.Nod(ir.OCALL, hashel, nil) + nx := nodSym(ir.OXDOT, np, f.Sym) // TODO: fields from other packages? + na := ir.Nod(ir.OADDR, nx, nil) call.List.Append(na) call.List.Append(nh) - fn.Nbody.Append(nod(OAS, nh, call)) + fn.Nbody.Append(ir.Nod(ir.OAS, nh, call)) i++ continue } @@ -359,24 +360,24 @@ func genhash(t *types.Type) *obj.LSym { // h = hashel(&p.first, size, h) hashel := hashmem(f.Type) - call := nod(OCALL, hashel, nil) - nx := nodSym(OXDOT, np, f.Sym) // TODO: fields from other packages? - na := nod(OADDR, nx, nil) + call := ir.Nod(ir.OCALL, hashel, nil) + nx := nodSym(ir.OXDOT, np, f.Sym) // TODO: fields from other packages? + na := ir.Nod(ir.OADDR, nx, nil) call.List.Append(na) call.List.Append(nh) call.List.Append(nodintconst(size)) - fn.Nbody.Append(nod(OAS, nh, call)) + fn.Nbody.Append(ir.Nod(ir.OAS, nh, call)) i = next } } - r := nod(ORETURN, nil, nil) + r := ir.Nod(ir.ORETURN, nil, nil) r.List.Append(nh) fn.Nbody.Append(r) if base.Flag.LowerR != 0 { - dumplist("genhash body", fn.Nbody) + ir.DumpList("genhash body", fn.Nbody) } funcbody() @@ -403,7 +404,7 @@ func genhash(t *types.Type) *obj.LSym { return closure } -func hashfor(t *types.Type) *Node { +func hashfor(t *types.Type) *ir.Node { var sym *types.Sym switch a, _ := algtype1(t); a { @@ -429,13 +430,13 @@ func hashfor(t *types.Type) *Node { sym = typesymprefix(".hash", t) } - n := newname(sym) + n := NewName(sym) setNodeNameFunc(n) - n.Type = functype(nil, []*Node{ + n.Type = functype(nil, []*ir.Node{ anonfield(types.NewPtr(t)), - anonfield(types.Types[TUINTPTR]), - }, []*Node{ - anonfield(types.Types[TUINTPTR]), + anonfield(types.Types[types.TUINTPTR]), + }, []*ir.Node{ + anonfield(types.Types[types.TUINTPTR]), }) return n } @@ -517,20 +518,20 @@ func geneq(t *types.Type) *obj.LSym { // Autogenerate code for equality of structs and arrays. base.Pos = autogeneratedPos // less confusing than end of input - dclcontext = PEXTERN + dclcontext = ir.PEXTERN // func sym(p, q *T) bool - tfn := nod(OTFUNC, nil, nil) + tfn := ir.Nod(ir.OTFUNC, nil, nil) tfn.List.Set2( namedfield("p", types.NewPtr(t)), namedfield("q", types.NewPtr(t)), ) - tfn.Rlist.Set1(namedfield("r", types.Types[TBOOL])) + tfn.Rlist.Set1(namedfield("r", types.Types[types.TBOOL])) fn := dclfunc(sym, tfn) - np := asNode(tfn.Type.Params().Field(0).Nname) - nq := asNode(tfn.Type.Params().Field(1).Nname) - nr := asNode(tfn.Type.Results().Field(0).Nname) + np := ir.AsNode(tfn.Type.Params().Field(0).Nname) + nq := ir.AsNode(tfn.Type.Params().Field(1).Nname) + nr := ir.AsNode(tfn.Type.Results().Field(0).Nname) // Label to jump to if an equality test fails. neq := autolabel(".neq") @@ -542,7 +543,7 @@ func geneq(t *types.Type) *obj.LSym { default: base.Fatalf("geneq %v", t) - case TARRAY: + case types.TARRAY: nelem := t.NumElem() // checkAll generates code to check the equality of all array elements. @@ -566,15 +567,15 @@ func geneq(t *types.Type) *obj.LSym { // // TODO(josharian): consider doing some loop unrolling // for larger nelem as well, processing a few elements at a time in a loop. - checkAll := func(unroll int64, last bool, eq func(pi, qi *Node) *Node) { + checkAll := func(unroll int64, last bool, eq func(pi, qi *ir.Node) *ir.Node) { // checkIdx generates a node to check for equality at index i. - checkIdx := func(i *Node) *Node { + checkIdx := func(i *ir.Node) *ir.Node { // pi := p[i] - pi := nod(OINDEX, np, i) + pi := ir.Nod(ir.OINDEX, np, i) pi.SetBounded(true) pi.Type = t.Elem() // qi := q[i] - qi := nod(OINDEX, nq, i) + qi := ir.Nod(ir.OINDEX, nq, i) qi.SetBounded(true) qi.Type = t.Elem() return eq(pi, qi) @@ -588,68 +589,68 @@ func geneq(t *types.Type) *obj.LSym { // Generate a series of checks. for i := int64(0); i < nelem; i++ { // if check {} else { goto neq } - nif := nod(OIF, checkIdx(nodintconst(i)), nil) - nif.Rlist.Append(nodSym(OGOTO, nil, neq)) + nif := ir.Nod(ir.OIF, checkIdx(nodintconst(i)), nil) + nif.Rlist.Append(nodSym(ir.OGOTO, nil, neq)) fn.Nbody.Append(nif) } if last { - fn.Nbody.Append(nod(OAS, nr, checkIdx(nodintconst(nelem)))) + fn.Nbody.Append(ir.Nod(ir.OAS, nr, checkIdx(nodintconst(nelem)))) } } else { // Generate a for loop. // for i := 0; i < nelem; i++ - i := temp(types.Types[TINT]) - init := nod(OAS, i, nodintconst(0)) - cond := nod(OLT, i, nodintconst(nelem)) - post := nod(OAS, i, nod(OADD, i, nodintconst(1))) - loop := nod(OFOR, cond, post) + i := temp(types.Types[types.TINT]) + init := ir.Nod(ir.OAS, i, nodintconst(0)) + cond := ir.Nod(ir.OLT, i, nodintconst(nelem)) + post := ir.Nod(ir.OAS, i, ir.Nod(ir.OADD, i, nodintconst(1))) + loop := ir.Nod(ir.OFOR, cond, post) loop.Ninit.Append(init) // if eq(pi, qi) {} else { goto neq } - nif := nod(OIF, checkIdx(i), nil) - nif.Rlist.Append(nodSym(OGOTO, nil, neq)) + nif := ir.Nod(ir.OIF, checkIdx(i), nil) + nif.Rlist.Append(nodSym(ir.OGOTO, nil, neq)) loop.Nbody.Append(nif) fn.Nbody.Append(loop) if last { - fn.Nbody.Append(nod(OAS, nr, nodbool(true))) + fn.Nbody.Append(ir.Nod(ir.OAS, nr, nodbool(true))) } } } switch t.Elem().Etype { - case TSTRING: + case types.TSTRING: // Do two loops. First, check that all the lengths match (cheap). // Second, check that all the contents match (expensive). // TODO: when the array size is small, unroll the length match checks. - checkAll(3, false, func(pi, qi *Node) *Node { + checkAll(3, false, func(pi, qi *ir.Node) *ir.Node { // Compare lengths. eqlen, _ := eqstring(pi, qi) return eqlen }) - checkAll(1, true, func(pi, qi *Node) *Node { + checkAll(1, true, func(pi, qi *ir.Node) *ir.Node { // Compare contents. _, eqmem := eqstring(pi, qi) return eqmem }) - case TFLOAT32, TFLOAT64: - checkAll(2, true, func(pi, qi *Node) *Node { + case types.TFLOAT32, types.TFLOAT64: + checkAll(2, true, func(pi, qi *ir.Node) *ir.Node { // p[i] == q[i] - return nod(OEQ, pi, qi) + return ir.Nod(ir.OEQ, pi, qi) }) // TODO: pick apart structs, do them piecemeal too default: - checkAll(1, true, func(pi, qi *Node) *Node { + checkAll(1, true, func(pi, qi *ir.Node) *ir.Node { // p[i] == q[i] - return nod(OEQ, pi, qi) + return ir.Nod(ir.OEQ, pi, qi) }) } - case TSTRUCT: + case types.TSTRUCT: // Build a list of conditions to satisfy. // The conditions are a list-of-lists. Conditions are reorderable // within each inner list. The outer lists must be evaluated in order. - var conds [][]*Node - conds = append(conds, []*Node{}) - and := func(n *Node) { + var conds [][]*ir.Node + conds = append(conds, []*ir.Node{}) + and := func(n *ir.Node) { i := len(conds) - 1 conds[i] = append(conds[i], n) } @@ -669,21 +670,21 @@ func geneq(t *types.Type) *obj.LSym { if !IsRegularMemory(f.Type) { if EqCanPanic(f.Type) { // Enforce ordering by starting a new set of reorderable conditions. - conds = append(conds, []*Node{}) + conds = append(conds, []*ir.Node{}) } - p := nodSym(OXDOT, np, f.Sym) - q := nodSym(OXDOT, nq, f.Sym) + p := nodSym(ir.OXDOT, np, f.Sym) + q := nodSym(ir.OXDOT, nq, f.Sym) switch { case f.Type.IsString(): eqlen, eqmem := eqstring(p, q) and(eqlen) and(eqmem) default: - and(nod(OEQ, p, q)) + and(ir.Nod(ir.OEQ, p, q)) } if EqCanPanic(f.Type) { // Also enforce ordering after something that can panic. - conds = append(conds, []*Node{}) + conds = append(conds, []*ir.Node{}) } i++ continue @@ -708,10 +709,10 @@ func geneq(t *types.Type) *obj.LSym { // Sort conditions to put runtime calls last. // Preserve the rest of the ordering. - var flatConds []*Node + var flatConds []*ir.Node for _, c := range conds { - isCall := func(n *Node) bool { - return n.Op == OCALL || n.Op == OCALLFUNC + isCall := func(n *ir.Node) bool { + return n.Op == ir.OCALL || n.Op == ir.OCALLFUNC } sort.SliceStable(c, func(i, j int) bool { return !isCall(c[i]) && isCall(c[j]) @@ -720,42 +721,42 @@ func geneq(t *types.Type) *obj.LSym { } if len(flatConds) == 0 { - fn.Nbody.Append(nod(OAS, nr, nodbool(true))) + fn.Nbody.Append(ir.Nod(ir.OAS, nr, nodbool(true))) } else { for _, c := range flatConds[:len(flatConds)-1] { // if cond {} else { goto neq } - n := nod(OIF, c, nil) - n.Rlist.Append(nodSym(OGOTO, nil, neq)) + n := ir.Nod(ir.OIF, c, nil) + n.Rlist.Append(nodSym(ir.OGOTO, nil, neq)) fn.Nbody.Append(n) } - fn.Nbody.Append(nod(OAS, nr, flatConds[len(flatConds)-1])) + fn.Nbody.Append(ir.Nod(ir.OAS, nr, flatConds[len(flatConds)-1])) } } // ret: // return ret := autolabel(".ret") - fn.Nbody.Append(nodSym(OLABEL, nil, ret)) - fn.Nbody.Append(nod(ORETURN, nil, nil)) + fn.Nbody.Append(nodSym(ir.OLABEL, nil, ret)) + fn.Nbody.Append(ir.Nod(ir.ORETURN, nil, nil)) // neq: // r = false // return (or goto ret) - fn.Nbody.Append(nodSym(OLABEL, nil, neq)) - fn.Nbody.Append(nod(OAS, nr, nodbool(false))) + fn.Nbody.Append(nodSym(ir.OLABEL, nil, neq)) + fn.Nbody.Append(ir.Nod(ir.OAS, nr, nodbool(false))) if EqCanPanic(t) || hasCall(fn) { // Epilogue is large, so share it with the equal case. - fn.Nbody.Append(nodSym(OGOTO, nil, ret)) + fn.Nbody.Append(nodSym(ir.OGOTO, nil, ret)) } else { // Epilogue is small, so don't bother sharing. - fn.Nbody.Append(nod(ORETURN, nil, nil)) + fn.Nbody.Append(ir.Nod(ir.ORETURN, nil, nil)) } // TODO(khr): the epilogue size detection condition above isn't perfect. // We should really do a generic CL that shares epilogues across // the board. See #24936. if base.Flag.LowerR != 0 { - dumplist("geneq body", fn.Nbody) + ir.DumpList("geneq body", fn.Nbody) } funcbody() @@ -784,8 +785,8 @@ func geneq(t *types.Type) *obj.LSym { return closure } -func hasCall(n *Node) bool { - if n.Op == OCALL || n.Op == OCALLFUNC { +func hasCall(n *ir.Node) bool { + if n.Op == ir.OCALL || n.Op == ir.OCALLFUNC { return true } if n.Left != nil && hasCall(n.Left) { @@ -819,10 +820,10 @@ func hasCall(n *Node) bool { // eqfield returns the node // p.field == q.field -func eqfield(p *Node, q *Node, field *types.Sym) *Node { - nx := nodSym(OXDOT, p, field) - ny := nodSym(OXDOT, q, field) - ne := nod(OEQ, nx, ny) +func eqfield(p *ir.Node, q *ir.Node, field *types.Sym) *ir.Node { + nx := nodSym(ir.OXDOT, p, field) + ny := nodSym(ir.OXDOT, q, field) + ne := ir.Nod(ir.OEQ, nx, ny) return ne } @@ -832,23 +833,23 @@ func eqfield(p *Node, q *Node, field *types.Sym) *Node { // memequal(s.ptr, t.ptr, len(s)) // which can be used to construct string equality comparison. // eqlen must be evaluated before eqmem, and shortcircuiting is required. -func eqstring(s, t *Node) (eqlen, eqmem *Node) { - s = conv(s, types.Types[TSTRING]) - t = conv(t, types.Types[TSTRING]) - sptr := nod(OSPTR, s, nil) - tptr := nod(OSPTR, t, nil) - slen := conv(nod(OLEN, s, nil), types.Types[TUINTPTR]) - tlen := conv(nod(OLEN, t, nil), types.Types[TUINTPTR]) +func eqstring(s, t *ir.Node) (eqlen, eqmem *ir.Node) { + s = conv(s, types.Types[types.TSTRING]) + t = conv(t, types.Types[types.TSTRING]) + sptr := ir.Nod(ir.OSPTR, s, nil) + tptr := ir.Nod(ir.OSPTR, t, nil) + slen := conv(ir.Nod(ir.OLEN, s, nil), types.Types[types.TUINTPTR]) + tlen := conv(ir.Nod(ir.OLEN, t, nil), types.Types[types.TUINTPTR]) fn := syslook("memequal") - fn = substArgTypes(fn, types.Types[TUINT8], types.Types[TUINT8]) - call := nod(OCALL, fn, nil) - call.List.Append(sptr, tptr, slen.copy()) + fn = substArgTypes(fn, types.Types[types.TUINT8], types.Types[types.TUINT8]) + call := ir.Nod(ir.OCALL, fn, nil) + call.List.Append(sptr, tptr, ir.Copy(slen)) call = typecheck(call, ctxExpr|ctxMultiOK) - cmp := nod(OEQ, slen, tlen) + cmp := ir.Nod(ir.OEQ, slen, tlen) cmp = typecheck(cmp, ctxExpr) - cmp.Type = types.Types[TBOOL] + cmp.Type = types.Types[types.TBOOL] return cmp, call } @@ -858,48 +859,48 @@ func eqstring(s, t *Node) (eqlen, eqmem *Node) { // ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate) // which can be used to construct interface equality comparison. // eqtab must be evaluated before eqdata, and shortcircuiting is required. -func eqinterface(s, t *Node) (eqtab, eqdata *Node) { +func eqinterface(s, t *ir.Node) (eqtab, eqdata *ir.Node) { if !types.Identical(s.Type, t.Type) { base.Fatalf("eqinterface %v %v", s.Type, t.Type) } // func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool) // func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool) - var fn *Node + var fn *ir.Node if s.Type.IsEmptyInterface() { fn = syslook("efaceeq") } else { fn = syslook("ifaceeq") } - stab := nod(OITAB, s, nil) - ttab := nod(OITAB, t, nil) - sdata := nod(OIDATA, s, nil) - tdata := nod(OIDATA, t, nil) - sdata.Type = types.Types[TUNSAFEPTR] - tdata.Type = types.Types[TUNSAFEPTR] + stab := ir.Nod(ir.OITAB, s, nil) + ttab := ir.Nod(ir.OITAB, t, nil) + sdata := ir.Nod(ir.OIDATA, s, nil) + tdata := ir.Nod(ir.OIDATA, t, nil) + sdata.Type = types.Types[types.TUNSAFEPTR] + tdata.Type = types.Types[types.TUNSAFEPTR] sdata.SetTypecheck(1) tdata.SetTypecheck(1) - call := nod(OCALL, fn, nil) + call := ir.Nod(ir.OCALL, fn, nil) call.List.Append(stab, sdata, tdata) call = typecheck(call, ctxExpr|ctxMultiOK) - cmp := nod(OEQ, stab, ttab) + cmp := ir.Nod(ir.OEQ, stab, ttab) cmp = typecheck(cmp, ctxExpr) - cmp.Type = types.Types[TBOOL] + cmp.Type = types.Types[types.TBOOL] return cmp, call } // eqmem returns the node // memequal(&p.field, &q.field [, size]) -func eqmem(p *Node, q *Node, field *types.Sym, size int64) *Node { - nx := nod(OADDR, nodSym(OXDOT, p, field), nil) - ny := nod(OADDR, nodSym(OXDOT, q, field), nil) +func eqmem(p *ir.Node, q *ir.Node, field *types.Sym, size int64) *ir.Node { + nx := ir.Nod(ir.OADDR, nodSym(ir.OXDOT, p, field), nil) + ny := ir.Nod(ir.OADDR, nodSym(ir.OXDOT, q, field), nil) nx = typecheck(nx, ctxExpr) ny = typecheck(ny, ctxExpr) fn, needsize := eqmemfunc(size, nx.Type.Elem()) - call := nod(OCALL, fn, nil) + call := ir.Nod(ir.OCALL, fn, nil) call.List.Append(nx) call.List.Append(ny) if needsize { @@ -909,7 +910,7 @@ func eqmem(p *Node, q *Node, field *types.Sym, size int64) *Node { return call } -func eqmemfunc(size int64, t *types.Type) (fn *Node, needsize bool) { +func eqmemfunc(size int64, t *types.Type) (fn *ir.Node, needsize bool) { switch size { default: fn = syslook("memequal") diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go index a8cbbfd322ccf..1bc8bf238f74e 100644 --- a/src/cmd/compile/internal/gc/align.go +++ b/src/cmd/compile/internal/gc/align.go @@ -7,6 +7,7 @@ package gc import ( "bytes" "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/types" "fmt" "sort" @@ -117,7 +118,7 @@ func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 { o = Rnd(o, int64(f.Type.Align)) } f.Offset = o - if n := asNode(f.Nname); n != nil { + if n := ir.AsNode(f.Nname); n != nil { // addrescapes has similar code to update these offsets. // Usually addrescapes runs after widstruct, // in which case we could drop this, @@ -197,7 +198,7 @@ func findTypeLoop(t *types.Type, path *[]*types.Type) bool { } *path = append(*path, t) - if p := asNode(t.Nod).Name.Param; p != nil && findTypeLoop(p.Ntype.Type, path) { + if p := ir.AsNode(t.Nod).Name.Param; p != nil && findTypeLoop(p.Ntype.Type, path) { return true } *path = (*path)[:len(*path)-1] @@ -205,17 +206,17 @@ func findTypeLoop(t *types.Type, path *[]*types.Type) bool { // Anonymous type. Recurse on contained types. switch t.Etype { - case TARRAY: + case types.TARRAY: if findTypeLoop(t.Elem(), path) { return true } - case TSTRUCT: + case types.TSTRUCT: for _, f := range t.Fields().Slice() { if findTypeLoop(f.Type, path) { return true } } - case TINTER: + case types.TINTER: for _, m := range t.Methods().Slice() { if m.Type.IsInterface() { // embedded interface if findTypeLoop(m.Type, path) { @@ -306,8 +307,8 @@ func dowidth(t *types.Type) { defercheckwidth() lno := base.Pos - if asNode(t.Nod) != nil { - base.Pos = asNode(t.Nod).Pos + if ir.AsNode(t.Nod) != nil { + base.Pos = ir.AsNode(t.Nod).Pos } t.Width = -2 @@ -315,7 +316,7 @@ func dowidth(t *types.Type) { et := t.Etype switch et { - case TFUNC, TCHAN, TMAP, TSTRING: + case types.TFUNC, types.TCHAN, types.TMAP, types.TSTRING: break // simtype == 0 during bootstrap @@ -331,41 +332,41 @@ func dowidth(t *types.Type) { base.Fatalf("dowidth: unknown type: %v", t) // compiler-specific stuff - case TINT8, TUINT8, TBOOL: + case types.TINT8, types.TUINT8, types.TBOOL: // bool is int8 w = 1 - case TINT16, TUINT16: + case types.TINT16, types.TUINT16: w = 2 - case TINT32, TUINT32, TFLOAT32: + case types.TINT32, types.TUINT32, types.TFLOAT32: w = 4 - case TINT64, TUINT64, TFLOAT64: + case types.TINT64, types.TUINT64, types.TFLOAT64: w = 8 t.Align = uint8(Widthreg) - case TCOMPLEX64: + case types.TCOMPLEX64: w = 8 t.Align = 4 - case TCOMPLEX128: + case types.TCOMPLEX128: w = 16 t.Align = uint8(Widthreg) - case TPTR: + case types.TPTR: w = int64(Widthptr) checkwidth(t.Elem()) - case TUNSAFEPTR: + case types.TUNSAFEPTR: w = int64(Widthptr) - case TINTER: // implemented as 2 pointers + case types.TINTER: // implemented as 2 pointers w = 2 * int64(Widthptr) t.Align = uint8(Widthptr) expandiface(t) - case TCHAN: // implemented as pointer + case types.TCHAN: // implemented as pointer w = int64(Widthptr) checkwidth(t.Elem()) @@ -375,7 +376,7 @@ func dowidth(t *types.Type) { t1 := types.NewChanArgs(t) checkwidth(t1) - case TCHANARGS: + case types.TCHANARGS: t1 := t.ChanArgs() dowidth(t1) // just in case if t1.Elem().Width >= 1<<16 { @@ -383,27 +384,27 @@ func dowidth(t *types.Type) { } w = 1 // anything will do - case TMAP: // implemented as pointer + case types.TMAP: // implemented as pointer w = int64(Widthptr) checkwidth(t.Elem()) checkwidth(t.Key()) - case TFORW: // should have been filled in + case types.TFORW: // should have been filled in reportTypeLoop(t) w = 1 // anything will do - case TANY: + case types.TANY: // not a real type; should be replaced before use. base.Fatalf("dowidth any") - case TSTRING: + case types.TSTRING: if sizeofString == 0 { base.Fatalf("early dowidth string") } w = sizeofString t.Align = uint8(Widthptr) - case TARRAY: + case types.TARRAY: if t.Elem() == nil { break } @@ -418,7 +419,7 @@ func dowidth(t *types.Type) { w = t.NumElem() * t.Elem().Width t.Align = t.Elem().Align - case TSLICE: + case types.TSLICE: if t.Elem() == nil { break } @@ -426,7 +427,7 @@ func dowidth(t *types.Type) { checkwidth(t.Elem()) t.Align = uint8(Widthptr) - case TSTRUCT: + case types.TSTRUCT: if t.IsFuncArgStruct() { base.Fatalf("dowidth fn struct %v", t) } @@ -434,14 +435,14 @@ func dowidth(t *types.Type) { // make fake type to check later to // trigger function argument computation. - case TFUNC: + case types.TFUNC: t1 := types.NewFuncArgs(t) checkwidth(t1) w = int64(Widthptr) // width of func type is pointer // function is 3 cated structures; // compute their widths as side-effect. - case TFUNCARGS: + case types.TFUNCARGS: t1 := t.FuncArgs() w = widstruct(t1, t1.Recvs(), 0, 0) w = widstruct(t1, t1.Params(), w, Widthreg) diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index 6564024a0c8dd..ff33c6b5fcf73 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -5,6 +5,7 @@ package gc import ( + "cmd/compile/internal/ir" "cmd/compile/internal/types" ) @@ -13,8 +14,8 @@ type exporter struct { } // markObject visits a reachable object. -func (p *exporter) markObject(n *Node) { - if n.Op == ONAME && n.Class() == PFUNC { +func (p *exporter) markObject(n *ir.Node) { + if n.Op == ir.ONAME && n.Class() == ir.PFUNC { inlFlood(n) } @@ -34,10 +35,10 @@ func (p *exporter) markType(t *types.Type) { // only their unexpanded method set (i.e., exclusive of // interface embeddings), and the switch statement below // handles their full method set. - if t.Sym != nil && t.Etype != TINTER { + if t.Sym != nil && t.Etype != types.TINTER { for _, m := range t.Methods().Slice() { if types.IsExported(m.Sym.Name) { - p.markObject(asNode(m.Nname)) + p.markObject(ir.AsNode(m.Nname)) } } } @@ -52,31 +53,31 @@ func (p *exporter) markType(t *types.Type) { // the user already needs some way to construct values of // those types. switch t.Etype { - case TPTR, TARRAY, TSLICE: + case types.TPTR, types.TARRAY, types.TSLICE: p.markType(t.Elem()) - case TCHAN: + case types.TCHAN: if t.ChanDir().CanRecv() { p.markType(t.Elem()) } - case TMAP: + case types.TMAP: p.markType(t.Key()) p.markType(t.Elem()) - case TSTRUCT: + case types.TSTRUCT: for _, f := range t.FieldSlice() { if types.IsExported(f.Sym.Name) || f.Embedded != 0 { p.markType(f.Type) } } - case TFUNC: + case types.TFUNC: for _, f := range t.Results().FieldSlice() { p.markType(f.Type) } - case TINTER: + case types.TINTER: for _, f := range t.FieldSlice() { if types.IsExported(f.Sym.Name) { p.markType(f.Type) @@ -133,23 +134,23 @@ func predeclared() []*types.Type { // elements have been initialized before predecl = []*types.Type{ // basic types - types.Types[TBOOL], - types.Types[TINT], - types.Types[TINT8], - types.Types[TINT16], - types.Types[TINT32], - types.Types[TINT64], - types.Types[TUINT], - types.Types[TUINT8], - types.Types[TUINT16], - types.Types[TUINT32], - types.Types[TUINT64], - types.Types[TUINTPTR], - types.Types[TFLOAT32], - types.Types[TFLOAT64], - types.Types[TCOMPLEX64], - types.Types[TCOMPLEX128], - types.Types[TSTRING], + types.Types[types.TBOOL], + types.Types[types.TINT], + types.Types[types.TINT8], + types.Types[types.TINT16], + types.Types[types.TINT32], + types.Types[types.TINT64], + types.Types[types.TUINT], + types.Types[types.TUINT8], + types.Types[types.TUINT16], + types.Types[types.TUINT32], + types.Types[types.TUINT64], + types.Types[types.TUINTPTR], + types.Types[types.TFLOAT32], + types.Types[types.TFLOAT64], + types.Types[types.TCOMPLEX64], + types.Types[types.TCOMPLEX128], + types.Types[types.TSTRING], // basic type aliases types.Bytetype, @@ -165,16 +166,16 @@ func predeclared() []*types.Type { types.UntypedFloat, types.UntypedComplex, types.UntypedString, - types.Types[TNIL], + types.Types[types.TNIL], // package unsafe - types.Types[TUNSAFEPTR], + types.Types[types.TUNSAFEPTR], // invalid type (package contains errors) - types.Types[Txxx], + types.Types[types.Txxx], // any type, for builtin export data - types.Types[TANY], + types.Types[types.TANY], } } return predecl diff --git a/src/cmd/compile/internal/gc/bimport.go b/src/cmd/compile/internal/gc/bimport.go index 911ac4c0dc7b6..e2dd276f46165 100644 --- a/src/cmd/compile/internal/gc/bimport.go +++ b/src/cmd/compile/internal/gc/bimport.go @@ -5,20 +5,15 @@ package gc import ( + "cmd/compile/internal/ir" "cmd/internal/src" ) -// numImport tracks how often a package with a given name is imported. -// It is used to provide a better error message (by using the package -// path to disambiguate) if a package that appears multiple times with -// the same name appears in an error message. -var numImport = make(map[string]int) - -func npos(pos src.XPos, n *Node) *Node { +func npos(pos src.XPos, n *ir.Node) *ir.Node { n.Pos = pos return n } -func builtinCall(op Op) *Node { - return nod(OCALL, mkname(builtinpkg.Lookup(goopnames[op])), nil) +func builtinCall(op ir.Op) *ir.Node { + return ir.Nod(ir.OCALL, mkname(ir.BuiltinPkg.Lookup(ir.OpNames[op])), nil) } diff --git a/src/cmd/compile/internal/gc/builtin.go b/src/cmd/compile/internal/gc/builtin.go index fd95b657b2662..5016905f225d3 100644 --- a/src/cmd/compile/internal/gc/builtin.go +++ b/src/cmd/compile/internal/gc/builtin.go @@ -2,7 +2,10 @@ package gc -import "cmd/compile/internal/types" +import ( + "cmd/compile/internal/ir" + "cmd/compile/internal/types" +) var runtimeDecls = [...]struct { name string @@ -205,134 +208,134 @@ func runtimeTypes() []*types.Type { var typs [131]*types.Type typs[0] = types.Bytetype typs[1] = types.NewPtr(typs[0]) - typs[2] = types.Types[TANY] + typs[2] = types.Types[types.TANY] typs[3] = types.NewPtr(typs[2]) - typs[4] = functype(nil, []*Node{anonfield(typs[1])}, []*Node{anonfield(typs[3])}) - typs[5] = types.Types[TUINTPTR] - typs[6] = types.Types[TBOOL] - typs[7] = types.Types[TUNSAFEPTR] - typs[8] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[1]), anonfield(typs[6])}, []*Node{anonfield(typs[7])}) + typs[4] = functype(nil, []*ir.Node{anonfield(typs[1])}, []*ir.Node{anonfield(typs[3])}) + typs[5] = types.Types[types.TUINTPTR] + typs[6] = types.Types[types.TBOOL] + typs[7] = types.Types[types.TUNSAFEPTR] + typs[8] = functype(nil, []*ir.Node{anonfield(typs[5]), anonfield(typs[1]), anonfield(typs[6])}, []*ir.Node{anonfield(typs[7])}) typs[9] = functype(nil, nil, nil) - typs[10] = types.Types[TINTER] - typs[11] = functype(nil, []*Node{anonfield(typs[10])}, nil) - typs[12] = types.Types[TINT32] + typs[10] = types.Types[types.TINTER] + typs[11] = functype(nil, []*ir.Node{anonfield(typs[10])}, nil) + typs[12] = types.Types[types.TINT32] typs[13] = types.NewPtr(typs[12]) - typs[14] = functype(nil, []*Node{anonfield(typs[13])}, []*Node{anonfield(typs[10])}) - typs[15] = types.Types[TINT] - typs[16] = functype(nil, []*Node{anonfield(typs[15]), anonfield(typs[15])}, nil) - typs[17] = types.Types[TUINT] - typs[18] = functype(nil, []*Node{anonfield(typs[17]), anonfield(typs[15])}, nil) - typs[19] = functype(nil, []*Node{anonfield(typs[6])}, nil) - typs[20] = types.Types[TFLOAT64] - typs[21] = functype(nil, []*Node{anonfield(typs[20])}, nil) - typs[22] = types.Types[TINT64] - typs[23] = functype(nil, []*Node{anonfield(typs[22])}, nil) - typs[24] = types.Types[TUINT64] - typs[25] = functype(nil, []*Node{anonfield(typs[24])}, nil) - typs[26] = types.Types[TCOMPLEX128] - typs[27] = functype(nil, []*Node{anonfield(typs[26])}, nil) - typs[28] = types.Types[TSTRING] - typs[29] = functype(nil, []*Node{anonfield(typs[28])}, nil) - typs[30] = functype(nil, []*Node{anonfield(typs[2])}, nil) - typs[31] = functype(nil, []*Node{anonfield(typs[5])}, nil) + typs[14] = functype(nil, []*ir.Node{anonfield(typs[13])}, []*ir.Node{anonfield(typs[10])}) + typs[15] = types.Types[types.TINT] + typs[16] = functype(nil, []*ir.Node{anonfield(typs[15]), anonfield(typs[15])}, nil) + typs[17] = types.Types[types.TUINT] + typs[18] = functype(nil, []*ir.Node{anonfield(typs[17]), anonfield(typs[15])}, nil) + typs[19] = functype(nil, []*ir.Node{anonfield(typs[6])}, nil) + typs[20] = types.Types[types.TFLOAT64] + typs[21] = functype(nil, []*ir.Node{anonfield(typs[20])}, nil) + typs[22] = types.Types[types.TINT64] + typs[23] = functype(nil, []*ir.Node{anonfield(typs[22])}, nil) + typs[24] = types.Types[types.TUINT64] + typs[25] = functype(nil, []*ir.Node{anonfield(typs[24])}, nil) + typs[26] = types.Types[types.TCOMPLEX128] + typs[27] = functype(nil, []*ir.Node{anonfield(typs[26])}, nil) + typs[28] = types.Types[types.TSTRING] + typs[29] = functype(nil, []*ir.Node{anonfield(typs[28])}, nil) + typs[30] = functype(nil, []*ir.Node{anonfield(typs[2])}, nil) + typs[31] = functype(nil, []*ir.Node{anonfield(typs[5])}, nil) typs[32] = types.NewArray(typs[0], 32) typs[33] = types.NewPtr(typs[32]) - typs[34] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])}) - typs[35] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])}) - typs[36] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])}) - typs[37] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])}) + typs[34] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Node{anonfield(typs[28])}) + typs[35] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Node{anonfield(typs[28])}) + typs[36] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Node{anonfield(typs[28])}) + typs[37] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Node{anonfield(typs[28])}) typs[38] = types.NewSlice(typs[28]) - typs[39] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[38])}, []*Node{anonfield(typs[28])}) - typs[40] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[15])}) + typs[39] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[38])}, []*ir.Node{anonfield(typs[28])}) + typs[40] = functype(nil, []*ir.Node{anonfield(typs[28]), anonfield(typs[28])}, []*ir.Node{anonfield(typs[15])}) typs[41] = types.NewArray(typs[0], 4) typs[42] = types.NewPtr(typs[41]) - typs[43] = functype(nil, []*Node{anonfield(typs[42]), anonfield(typs[22])}, []*Node{anonfield(typs[28])}) - typs[44] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])}) - typs[45] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])}) + typs[43] = functype(nil, []*ir.Node{anonfield(typs[42]), anonfield(typs[22])}, []*ir.Node{anonfield(typs[28])}) + typs[44] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[1]), anonfield(typs[15])}, []*ir.Node{anonfield(typs[28])}) + typs[45] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[15])}, []*ir.Node{anonfield(typs[28])}) typs[46] = types.Runetype typs[47] = types.NewSlice(typs[46]) - typs[48] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[47])}, []*Node{anonfield(typs[28])}) + typs[48] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[47])}, []*ir.Node{anonfield(typs[28])}) typs[49] = types.NewSlice(typs[0]) - typs[50] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28])}, []*Node{anonfield(typs[49])}) + typs[50] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[28])}, []*ir.Node{anonfield(typs[49])}) typs[51] = types.NewArray(typs[46], 32) typs[52] = types.NewPtr(typs[51]) - typs[53] = functype(nil, []*Node{anonfield(typs[52]), anonfield(typs[28])}, []*Node{anonfield(typs[47])}) - typs[54] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []*Node{anonfield(typs[15])}) - typs[55] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[15])}, []*Node{anonfield(typs[46]), anonfield(typs[15])}) - typs[56] = functype(nil, []*Node{anonfield(typs[28])}, []*Node{anonfield(typs[15])}) - typs[57] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2])}) - typs[58] = functype(nil, []*Node{anonfield(typs[2])}, []*Node{anonfield(typs[7])}) - typs[59] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, []*Node{anonfield(typs[2])}) - typs[60] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2]), anonfield(typs[6])}) - typs[61] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil) - typs[62] = functype(nil, []*Node{anonfield(typs[1])}, nil) + typs[53] = functype(nil, []*ir.Node{anonfield(typs[52]), anonfield(typs[28])}, []*ir.Node{anonfield(typs[47])}) + typs[54] = functype(nil, []*ir.Node{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []*ir.Node{anonfield(typs[15])}) + typs[55] = functype(nil, []*ir.Node{anonfield(typs[28]), anonfield(typs[15])}, []*ir.Node{anonfield(typs[46]), anonfield(typs[15])}) + typs[56] = functype(nil, []*ir.Node{anonfield(typs[28])}, []*ir.Node{anonfield(typs[15])}) + typs[57] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[2])}, []*ir.Node{anonfield(typs[2])}) + typs[58] = functype(nil, []*ir.Node{anonfield(typs[2])}, []*ir.Node{anonfield(typs[7])}) + typs[59] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[2])}) + typs[60] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[2])}, []*ir.Node{anonfield(typs[2]), anonfield(typs[6])}) + typs[61] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil) + typs[62] = functype(nil, []*ir.Node{anonfield(typs[1])}, nil) typs[63] = types.NewPtr(typs[5]) - typs[64] = functype(nil, []*Node{anonfield(typs[63]), anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])}) - typs[65] = types.Types[TUINT32] - typs[66] = functype(nil, nil, []*Node{anonfield(typs[65])}) + typs[64] = functype(nil, []*ir.Node{anonfield(typs[63]), anonfield(typs[7]), anonfield(typs[7])}, []*ir.Node{anonfield(typs[6])}) + typs[65] = types.Types[types.TUINT32] + typs[66] = functype(nil, nil, []*ir.Node{anonfield(typs[65])}) typs[67] = types.NewMap(typs[2], typs[2]) - typs[68] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []*Node{anonfield(typs[67])}) - typs[69] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []*Node{anonfield(typs[67])}) - typs[70] = functype(nil, nil, []*Node{anonfield(typs[67])}) - typs[71] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*Node{anonfield(typs[3])}) - typs[72] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*Node{anonfield(typs[3])}) - typs[73] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3])}) - typs[74] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*Node{anonfield(typs[3]), anonfield(typs[6])}) - typs[75] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*Node{anonfield(typs[3]), anonfield(typs[6])}) - typs[76] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3]), anonfield(typs[6])}) - typs[77] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, nil) - typs[78] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, nil) - typs[79] = functype(nil, []*Node{anonfield(typs[3])}, nil) - typs[80] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67])}, nil) + typs[68] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[67])}) + typs[69] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[67])}) + typs[70] = functype(nil, nil, []*ir.Node{anonfield(typs[67])}) + typs[71] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[3])}) + typs[72] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*ir.Node{anonfield(typs[3])}) + typs[73] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*ir.Node{anonfield(typs[3])}) + typs[74] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[3]), anonfield(typs[6])}) + typs[75] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*ir.Node{anonfield(typs[3]), anonfield(typs[6])}) + typs[76] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*ir.Node{anonfield(typs[3]), anonfield(typs[6])}) + typs[77] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, nil) + typs[78] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, nil) + typs[79] = functype(nil, []*ir.Node{anonfield(typs[3])}, nil) + typs[80] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67])}, nil) typs[81] = types.NewChan(typs[2], types.Cboth) - typs[82] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22])}, []*Node{anonfield(typs[81])}) - typs[83] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[81])}) + typs[82] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[22])}, []*ir.Node{anonfield(typs[81])}) + typs[83] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[15])}, []*ir.Node{anonfield(typs[81])}) typs[84] = types.NewChan(typs[2], types.Crecv) - typs[85] = functype(nil, []*Node{anonfield(typs[84]), anonfield(typs[3])}, nil) - typs[86] = functype(nil, []*Node{anonfield(typs[84]), anonfield(typs[3])}, []*Node{anonfield(typs[6])}) + typs[85] = functype(nil, []*ir.Node{anonfield(typs[84]), anonfield(typs[3])}, nil) + typs[86] = functype(nil, []*ir.Node{anonfield(typs[84]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[6])}) typs[87] = types.NewChan(typs[2], types.Csend) - typs[88] = functype(nil, []*Node{anonfield(typs[87]), anonfield(typs[3])}, nil) + typs[88] = functype(nil, []*ir.Node{anonfield(typs[87]), anonfield(typs[3])}, nil) typs[89] = types.NewArray(typs[0], 3) - typs[90] = tostruct([]*Node{namedfield("enabled", typs[6]), namedfield("pad", typs[89]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])}) - typs[91] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil) - typs[92] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, nil) - typs[93] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []*Node{anonfield(typs[15])}) - typs[94] = functype(nil, []*Node{anonfield(typs[87]), anonfield(typs[3])}, []*Node{anonfield(typs[6])}) - typs[95] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[84])}, []*Node{anonfield(typs[6])}) + typs[90] = tostruct([]*ir.Node{namedfield("enabled", typs[6]), namedfield("pad", typs[89]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])}) + typs[91] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil) + typs[92] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[3])}, nil) + typs[93] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []*ir.Node{anonfield(typs[15])}) + typs[94] = functype(nil, []*ir.Node{anonfield(typs[87]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[6])}) + typs[95] = functype(nil, []*ir.Node{anonfield(typs[3]), anonfield(typs[84])}, []*ir.Node{anonfield(typs[6])}) typs[96] = types.NewPtr(typs[6]) - typs[97] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[96]), anonfield(typs[84])}, []*Node{anonfield(typs[6])}) - typs[98] = functype(nil, []*Node{anonfield(typs[63])}, nil) - typs[99] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[63]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []*Node{anonfield(typs[15]), anonfield(typs[6])}) - typs[100] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[7])}) - typs[101] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[7])}) - typs[102] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []*Node{anonfield(typs[7])}) + typs[97] = functype(nil, []*ir.Node{anonfield(typs[3]), anonfield(typs[96]), anonfield(typs[84])}, []*ir.Node{anonfield(typs[6])}) + typs[98] = functype(nil, []*ir.Node{anonfield(typs[63])}, nil) + typs[99] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[63]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []*ir.Node{anonfield(typs[15]), anonfield(typs[6])}) + typs[100] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*ir.Node{anonfield(typs[7])}) + typs[101] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []*ir.Node{anonfield(typs[7])}) + typs[102] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []*ir.Node{anonfield(typs[7])}) typs[103] = types.NewSlice(typs[2]) - typs[104] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[103]), anonfield(typs[15])}, []*Node{anonfield(typs[103])}) - typs[105] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil) - typs[106] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, nil) - typs[107] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []*Node{anonfield(typs[6])}) - typs[108] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[6])}) - typs[109] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])}) - typs[110] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []*Node{anonfield(typs[5])}) - typs[111] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, []*Node{anonfield(typs[5])}) - typs[112] = functype(nil, []*Node{anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[22])}) - typs[113] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, []*Node{anonfield(typs[24])}) - typs[114] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[22])}) - typs[115] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[24])}) - typs[116] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[65])}) - typs[117] = functype(nil, []*Node{anonfield(typs[22])}, []*Node{anonfield(typs[20])}) - typs[118] = functype(nil, []*Node{anonfield(typs[24])}, []*Node{anonfield(typs[20])}) - typs[119] = functype(nil, []*Node{anonfield(typs[65])}, []*Node{anonfield(typs[20])}) - typs[120] = functype(nil, []*Node{anonfield(typs[26]), anonfield(typs[26])}, []*Node{anonfield(typs[26])}) - typs[121] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[5])}, nil) - typs[122] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[1]), anonfield(typs[5])}, nil) + typs[104] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[103]), anonfield(typs[15])}, []*ir.Node{anonfield(typs[103])}) + typs[105] = functype(nil, []*ir.Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil) + typs[106] = functype(nil, []*ir.Node{anonfield(typs[7]), anonfield(typs[5])}, nil) + typs[107] = functype(nil, []*ir.Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []*ir.Node{anonfield(typs[6])}) + typs[108] = functype(nil, []*ir.Node{anonfield(typs[3]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[6])}) + typs[109] = functype(nil, []*ir.Node{anonfield(typs[7]), anonfield(typs[7])}, []*ir.Node{anonfield(typs[6])}) + typs[110] = functype(nil, []*ir.Node{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []*ir.Node{anonfield(typs[5])}) + typs[111] = functype(nil, []*ir.Node{anonfield(typs[7]), anonfield(typs[5])}, []*ir.Node{anonfield(typs[5])}) + typs[112] = functype(nil, []*ir.Node{anonfield(typs[22]), anonfield(typs[22])}, []*ir.Node{anonfield(typs[22])}) + typs[113] = functype(nil, []*ir.Node{anonfield(typs[24]), anonfield(typs[24])}, []*ir.Node{anonfield(typs[24])}) + typs[114] = functype(nil, []*ir.Node{anonfield(typs[20])}, []*ir.Node{anonfield(typs[22])}) + typs[115] = functype(nil, []*ir.Node{anonfield(typs[20])}, []*ir.Node{anonfield(typs[24])}) + typs[116] = functype(nil, []*ir.Node{anonfield(typs[20])}, []*ir.Node{anonfield(typs[65])}) + typs[117] = functype(nil, []*ir.Node{anonfield(typs[22])}, []*ir.Node{anonfield(typs[20])}) + typs[118] = functype(nil, []*ir.Node{anonfield(typs[24])}, []*ir.Node{anonfield(typs[20])}) + typs[119] = functype(nil, []*ir.Node{anonfield(typs[65])}, []*ir.Node{anonfield(typs[20])}) + typs[120] = functype(nil, []*ir.Node{anonfield(typs[26]), anonfield(typs[26])}, []*ir.Node{anonfield(typs[26])}) + typs[121] = functype(nil, []*ir.Node{anonfield(typs[5]), anonfield(typs[5])}, nil) + typs[122] = functype(nil, []*ir.Node{anonfield(typs[7]), anonfield(typs[1]), anonfield(typs[5])}, nil) typs[123] = types.NewSlice(typs[7]) - typs[124] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[123])}, nil) - typs[125] = types.Types[TUINT8] - typs[126] = functype(nil, []*Node{anonfield(typs[125]), anonfield(typs[125])}, nil) - typs[127] = types.Types[TUINT16] - typs[128] = functype(nil, []*Node{anonfield(typs[127]), anonfield(typs[127])}, nil) - typs[129] = functype(nil, []*Node{anonfield(typs[65]), anonfield(typs[65])}, nil) - typs[130] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, nil) + typs[124] = functype(nil, []*ir.Node{anonfield(typs[7]), anonfield(typs[123])}, nil) + typs[125] = types.Types[types.TUINT8] + typs[126] = functype(nil, []*ir.Node{anonfield(typs[125]), anonfield(typs[125])}, nil) + typs[127] = types.Types[types.TUINT16] + typs[128] = functype(nil, []*ir.Node{anonfield(typs[127]), anonfield(typs[127])}, nil) + typs[129] = functype(nil, []*ir.Node{anonfield(typs[65]), anonfield(typs[65])}, nil) + typs[130] = functype(nil, []*ir.Node{anonfield(typs[24]), anonfield(typs[24])}, nil) return typs[:] } diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index ad255c9c06a6c..e68d7103637fa 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -6,24 +6,25 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/syntax" "cmd/compile/internal/types" "cmd/internal/src" "fmt" ) -func (p *noder) funcLit(expr *syntax.FuncLit) *Node { +func (p *noder) funcLit(expr *syntax.FuncLit) *ir.Node { xtype := p.typeExpr(expr.Type) ntype := p.typeExpr(expr.Type) - dcl := p.nod(expr, ODCLFUNC, nil, nil) + dcl := p.nod(expr, ir.ODCLFUNC, nil, nil) fn := dcl.Func fn.SetIsHiddenClosure(Curfn != nil) - fn.Nname = newfuncnamel(p.pos(expr), nblank.Sym, fn) // filled in by typecheckclosure + fn.Nname = newfuncnamel(p.pos(expr), ir.BlankNode.Sym, fn) // filled in by typecheckclosure fn.Nname.Name.Param.Ntype = xtype fn.Nname.Name.Defn = dcl - clo := p.nod(expr, OCLOSURE, nil, nil) + clo := p.nod(expr, ir.OCLOSURE, nil, nil) clo.Func = fn fn.ClosureType = ntype fn.OClosure = clo @@ -77,7 +78,7 @@ func (p *noder) funcLit(expr *syntax.FuncLit) *Node { // function associated with the closure. // TODO: This creation of the named function should probably really be done in a // separate pass from type-checking. -func typecheckclosure(clo *Node, top int) { +func typecheckclosure(clo *ir.Node, top int) { fn := clo.Func dcl := fn.Decl // Set current associated iota value, so iota can be used inside @@ -139,7 +140,7 @@ var globClosgen int // closurename generates a new unique name for a closure within // outerfunc. -func closurename(outerfunc *Node) *types.Sym { +func closurename(outerfunc *ir.Node) *types.Sym { outer := "glob." prefix := "func" gen := &globClosgen @@ -149,12 +150,12 @@ func closurename(outerfunc *Node) *types.Sym { prefix = "" } - outer = outerfunc.funcname() + outer = ir.FuncName(outerfunc) // There may be multiple functions named "_". In those // cases, we can't use their individual Closgens as it // would lead to name clashes. - if !outerfunc.Func.Nname.isBlank() { + if !ir.IsBlank(outerfunc.Func.Nname) { gen = &outerfunc.Func.Closgen } } @@ -171,7 +172,7 @@ var capturevarscomplete bool // by value or by reference. // We use value capturing for values <= 128 bytes that are never reassigned // after capturing (effectively constant). -func capturevars(dcl *Node) { +func capturevars(dcl *ir.Node) { lno := base.Pos base.Pos = dcl.Pos fn := dcl.Func @@ -197,11 +198,11 @@ func capturevars(dcl *Node) { outermost := v.Name.Defn // out parameters will be assigned to implicitly upon return. - if outermost.Class() != PPARAMOUT && !outermost.Name.Addrtaken() && !outermost.Name.Assigned() && v.Type.Width <= 128 { + if outermost.Class() != ir.PPARAMOUT && !outermost.Name.Addrtaken() && !outermost.Name.Assigned() && v.Type.Width <= 128 { v.Name.SetByval(true) } else { outermost.Name.SetAddrtaken(true) - outer = nod(OADDR, outer, nil) + outer = ir.Nod(ir.OADDR, outer, nil) } if base.Flag.LowerM > 1 { @@ -226,7 +227,7 @@ func capturevars(dcl *Node) { // transformclosure is called in a separate phase after escape analysis. // It transform closure bodies to properly reference captured variables. -func transformclosure(dcl *Node) { +func transformclosure(dcl *ir.Node) { lno := base.Pos base.Pos = dcl.Pos fn := dcl.Func @@ -252,24 +253,24 @@ func transformclosure(dcl *Node) { // We are going to insert captured variables before input args. var params []*types.Field - var decls []*Node + var decls []*ir.Node for _, v := range fn.ClosureVars.Slice() { if !v.Name.Byval() { // If v of type T is captured by reference, // we introduce function param &v *T // and v remains PAUTOHEAP with &v heapaddr // (accesses will implicitly deref &v). - addr := newname(lookup("&" + v.Sym.Name)) + addr := NewName(lookup("&" + v.Sym.Name)) addr.Type = types.NewPtr(v.Type) v.Name.Param.Heapaddr = addr v = addr } - v.SetClass(PPARAM) + v.SetClass(ir.PPARAM) decls = append(decls, v) fld := types.NewField(src.NoXPos, v.Sym, v.Type) - fld.Nname = asTypesNode(v) + fld.Nname = ir.AsTypesNode(v) params = append(params, fld) } @@ -283,11 +284,11 @@ func transformclosure(dcl *Node) { dcl.Type = f.Type // update type of ODCLFUNC } else { // The closure is not called, so it is going to stay as closure. - var body []*Node + var body []*ir.Node offset := int64(Widthptr) for _, v := range fn.ClosureVars.Slice() { // cv refers to the field inside of closure OSTRUCTLIT. - cv := nod(OCLOSUREVAR, nil, nil) + cv := ir.Nod(ir.OCLOSUREVAR, nil, nil) cv.Type = v.Type if !v.Name.Byval() { @@ -299,23 +300,23 @@ func transformclosure(dcl *Node) { if v.Name.Byval() && v.Type.Width <= int64(2*Widthptr) { // If it is a small variable captured by value, downgrade it to PAUTO. - v.SetClass(PAUTO) + v.SetClass(ir.PAUTO) fn.Dcl = append(fn.Dcl, v) - body = append(body, nod(OAS, v, cv)) + body = append(body, ir.Nod(ir.OAS, v, cv)) } else { // Declare variable holding addresses taken from closure // and initialize in entry prologue. - addr := newname(lookup("&" + v.Sym.Name)) + addr := NewName(lookup("&" + v.Sym.Name)) addr.Type = types.NewPtr(v.Type) - addr.SetClass(PAUTO) + addr.SetClass(ir.PAUTO) addr.Name.SetUsed(true) addr.Name.Curfn = dcl fn.Dcl = append(fn.Dcl, addr) v.Name.Param.Heapaddr = addr if v.Name.Byval() { - cv = nod(OADDR, cv, nil) + cv = ir.Nod(ir.OADDR, cv, nil) } - body = append(body, nod(OAS, addr, cv)) + body = append(body, ir.Nod(ir.OAS, addr, cv)) } } @@ -331,13 +332,13 @@ func transformclosure(dcl *Node) { // hasemptycvars reports whether closure clo has an // empty list of captured vars. -func hasemptycvars(clo *Node) bool { +func hasemptycvars(clo *ir.Node) bool { return clo.Func.ClosureVars.Len() == 0 } // closuredebugruntimecheck applies boilerplate checks for debug flags // and compiling runtime -func closuredebugruntimecheck(clo *Node) { +func closuredebugruntimecheck(clo *ir.Node) { if base.Debug.Closure > 0 { if clo.Esc == EscHeap { base.WarnfAt(clo.Pos, "heap closure, captured vars = %v", clo.Func.ClosureVars) @@ -353,7 +354,7 @@ func closuredebugruntimecheck(clo *Node) { // closureType returns the struct type used to hold all the information // needed in the closure for clo (clo must be a OCLOSURE node). // The address of a variable of the returned type can be cast to a func. -func closureType(clo *Node) *types.Type { +func closureType(clo *ir.Node) *types.Type { // Create closure in the form of a composite literal. // supposing the closure captures an int i and a string s // and has one float64 argument and no results, @@ -367,8 +368,8 @@ func closureType(clo *Node) *types.Type { // The information appears in the binary in the form of type descriptors; // the struct is unnamed so that closures in multiple packages with the // same struct type can share the descriptor. - fields := []*Node{ - namedfield(".F", types.Types[TUINTPTR]), + fields := []*ir.Node{ + namedfield(".F", types.Types[types.TUINTPTR]), } for _, v := range clo.Func.ClosureVars.Slice() { typ := v.Type @@ -382,7 +383,7 @@ func closureType(clo *Node) *types.Type { return typ } -func walkclosure(clo *Node, init *Nodes) *Node { +func walkclosure(clo *ir.Node, init *ir.Nodes) *ir.Node { fn := clo.Func // If no closure vars, don't bother wrapping. @@ -396,11 +397,11 @@ func walkclosure(clo *Node, init *Nodes) *Node { typ := closureType(clo) - clos := nod(OCOMPLIT, nil, typenod(typ)) + clos := ir.Nod(ir.OCOMPLIT, nil, typenod(typ)) clos.Esc = clo.Esc - clos.List.Set(append([]*Node{nod(OCFUNC, fn.Nname, nil)}, fn.ClosureEnter.Slice()...)) + clos.List.Set(append([]*ir.Node{ir.Nod(ir.OCFUNC, fn.Nname, nil)}, fn.ClosureEnter.Slice()...)) - clos = nod(OADDR, clos, nil) + clos = ir.Nod(ir.OADDR, clos, nil) clos.Esc = clo.Esc // Force type conversion from *struct to the func type. @@ -418,9 +419,9 @@ func walkclosure(clo *Node, init *Nodes) *Node { return walkexpr(clos, init) } -func typecheckpartialcall(dot *Node, sym *types.Sym) { +func typecheckpartialcall(dot *ir.Node, sym *types.Sym) { switch dot.Op { - case ODOTINTER, ODOTMETH: + case ir.ODOTINTER, ir.ODOTMETH: break default: @@ -430,8 +431,8 @@ func typecheckpartialcall(dot *Node, sym *types.Sym) { // Create top-level function. dcl := makepartialcall(dot, dot.Type, sym) dcl.Func.SetWrapper(true) - dot.Op = OCALLPART - dot.Right = newname(sym) + dot.Op = ir.OCALLPART + dot.Right = NewName(sym) dot.Type = dcl.Type dot.Func = dcl.Func dot.SetOpt(nil) // clear types.Field from ODOTMETH @@ -439,12 +440,12 @@ func typecheckpartialcall(dot *Node, sym *types.Sym) { // makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed // for partial calls. -func makepartialcall(dot *Node, t0 *types.Type, meth *types.Sym) *Node { +func makepartialcall(dot *ir.Node, t0 *types.Type, meth *types.Sym) *ir.Node { rcvrtype := dot.Left.Type sym := methodSymSuffix(rcvrtype, meth, "-fm") if sym.Uniq() { - return asNode(sym.Def) + return ir.AsNode(sym.Def) } sym.SetUniq(true) @@ -463,7 +464,7 @@ func makepartialcall(dot *Node, t0 *types.Type, meth *types.Sym) *Node { // number at the use of the method expression in this // case. See issue 29389. - tfn := nod(OTFUNC, nil, nil) + tfn := ir.Nod(ir.OTFUNC, nil, nil) tfn.List.Set(structargs(t0.Params(), true)) tfn.Rlist.Set(structargs(t0.Results(), false)) @@ -476,27 +477,27 @@ func makepartialcall(dot *Node, t0 *types.Type, meth *types.Sym) *Node { // Declare and initialize variable holding receiver. - cv := nod(OCLOSUREVAR, nil, nil) + cv := ir.Nod(ir.OCLOSUREVAR, nil, nil) cv.Type = rcvrtype cv.Xoffset = Rnd(int64(Widthptr), int64(cv.Type.Align)) - ptr := newname(lookup(".this")) - declare(ptr, PAUTO) + ptr := NewName(lookup(".this")) + declare(ptr, ir.PAUTO) ptr.Name.SetUsed(true) - var body []*Node + var body []*ir.Node if rcvrtype.IsPtr() || rcvrtype.IsInterface() { ptr.Type = rcvrtype - body = append(body, nod(OAS, ptr, cv)) + body = append(body, ir.Nod(ir.OAS, ptr, cv)) } else { ptr.Type = types.NewPtr(rcvrtype) - body = append(body, nod(OAS, ptr, nod(OADDR, cv, nil))) + body = append(body, ir.Nod(ir.OAS, ptr, ir.Nod(ir.OADDR, cv, nil))) } - call := nod(OCALL, nodSym(OXDOT, ptr, meth), nil) + call := ir.Nod(ir.OCALL, nodSym(ir.OXDOT, ptr, meth), nil) call.List.Set(paramNnames(tfn.Type)) call.SetIsDDD(tfn.Type.IsVariadic()) if t0.NumResults() != 0 { - n := nod(ORETURN, nil, nil) + n := ir.Nod(ir.ORETURN, nil, nil) n.List.Set1(call) call = n } @@ -510,7 +511,7 @@ func makepartialcall(dot *Node, t0 *types.Type, meth *types.Sym) *Node { // typecheckslice() requires that Curfn is set when processing an ORETURN. Curfn = dcl typecheckslice(dcl.Nbody.Slice(), ctxStmt) - sym.Def = asTypesNode(dcl) + sym.Def = ir.AsTypesNode(dcl) xtop = append(xtop, dcl) Curfn = savecurfn base.Pos = saveLineNo @@ -521,16 +522,16 @@ func makepartialcall(dot *Node, t0 *types.Type, meth *types.Sym) *Node { // partialCallType returns the struct type used to hold all the information // needed in the closure for n (n must be a OCALLPART node). // The address of a variable of the returned type can be cast to a func. -func partialCallType(n *Node) *types.Type { - t := tostruct([]*Node{ - namedfield("F", types.Types[TUINTPTR]), +func partialCallType(n *ir.Node) *types.Type { + t := tostruct([]*ir.Node{ + namedfield("F", types.Types[types.TUINTPTR]), namedfield("R", n.Left.Type), }) t.SetNoalg(true) return t } -func walkpartialcall(n *Node, init *Nodes) *Node { +func walkpartialcall(n *ir.Node, init *ir.Nodes) *ir.Node { // Create closure in the form of a composite literal. // For x.M with receiver (x) type T, the generated code looks like: // @@ -544,21 +545,21 @@ func walkpartialcall(n *Node, init *Nodes) *Node { n.Left = cheapexpr(n.Left, init) n.Left = walkexpr(n.Left, nil) - tab := nod(OITAB, n.Left, nil) + tab := ir.Nod(ir.OITAB, n.Left, nil) tab = typecheck(tab, ctxExpr) - c := nod(OCHECKNIL, tab, nil) + c := ir.Nod(ir.OCHECKNIL, tab, nil) c.SetTypecheck(1) init.Append(c) } typ := partialCallType(n) - clos := nod(OCOMPLIT, nil, typenod(typ)) + clos := ir.Nod(ir.OCOMPLIT, nil, typenod(typ)) clos.Esc = n.Esc - clos.List.Set2(nod(OCFUNC, n.Func.Nname, nil), n.Left) + clos.List.Set2(ir.Nod(ir.OCFUNC, n.Func.Nname, nil), n.Left) - clos = nod(OADDR, clos, nil) + clos = ir.Nod(ir.OADDR, clos, nil) clos.Esc = n.Esc // Force type conversion from *struct to the func type. @@ -578,8 +579,8 @@ func walkpartialcall(n *Node, init *Nodes) *Node { // callpartMethod returns the *types.Field representing the method // referenced by method value n. -func callpartMethod(n *Node) *types.Field { - if n.Op != OCALLPART { +func callpartMethod(n *ir.Node) *types.Field { + if n.Op != ir.OCALLPART { base.Fatalf("expected OCALLPART, got %v", n) } diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index 98473b4cfb015..a557e20d46b4d 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -6,6 +6,7 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/src" "fmt" @@ -23,51 +24,6 @@ const ( Mpprec = 512 ) -// ValueInterface returns the constant value stored in n as an interface{}. -// It returns int64s for ints and runes, float64s for floats, -// and complex128s for complex values. -func (n *Node) ValueInterface() interface{} { - switch v := n.Val(); v.Kind() { - default: - base.Fatalf("unexpected constant: %v", v) - panic("unreachable") - case constant.Bool: - return constant.BoolVal(v) - case constant.String: - return constant.StringVal(v) - case constant.Int: - return int64Val(n.Type, v) - case constant.Float: - return float64Val(v) - case constant.Complex: - return complex(float64Val(constant.Real(v)), float64Val(constant.Imag(v))) - } -} - -// int64Val returns v converted to int64. -// Note: if t is uint64, very large values will be converted to negative int64. -func int64Val(t *types.Type, v constant.Value) int64 { - if t.IsUnsigned() { - if x, ok := constant.Uint64Val(v); ok { - return int64(x) - } - } else { - if x, ok := constant.Int64Val(v); ok { - return x - } - } - base.Fatalf("%v out of range for %v", v, t) - panic("unreachable") -} - -func float64Val(v constant.Value) float64 { - if x, _ := constant.Float64Val(v); !math.IsInf(x, 0) { - return x + 0 // avoid -0 (should not be needed, but be conservative) - } - base.Fatalf("bad float64 value: %v", v) - panic("unreachable") -} - func bigFloatVal(v constant.Value) *big.Float { f := new(big.Float) f.SetPrec(Mpprec) @@ -86,62 +42,6 @@ func bigFloatVal(v constant.Value) *big.Float { return f } -// Int64Val returns n as an int64. -// n must be an integer or rune constant. -func (n *Node) Int64Val() int64 { - if !Isconst(n, constant.Int) { - base.Fatalf("Int64Val(%v)", n) - } - x, ok := constant.Int64Val(n.Val()) - if !ok { - base.Fatalf("Int64Val(%v)", n) - } - return x -} - -// CanInt64 reports whether it is safe to call Int64Val() on n. -func (n *Node) CanInt64() bool { - if !Isconst(n, constant.Int) { - return false - } - - // if the value inside n cannot be represented as an int64, the - // return value of Int64 is undefined - _, ok := constant.Int64Val(n.Val()) - return ok -} - -// Uint64Val returns n as an uint64. -// n must be an integer or rune constant. -func (n *Node) Uint64Val() uint64 { - if !Isconst(n, constant.Int) { - base.Fatalf("Uint64Val(%v)", n) - } - x, ok := constant.Uint64Val(n.Val()) - if !ok { - base.Fatalf("Uint64Val(%v)", n) - } - return x -} - -// BoolVal returns n as a bool. -// n must be a boolean constant. -func (n *Node) BoolVal() bool { - if !Isconst(n, constant.Bool) { - base.Fatalf("BoolVal(%v)", n) - } - return constant.BoolVal(n.Val()) -} - -// StringVal returns the value of a literal string Node as a string. -// n must be a string constant. -func (n *Node) StringVal() string { - if !Isconst(n, constant.String) { - base.Fatalf("StringVal(%v)", n) - } - return constant.StringVal(n.Val()) -} - func roundFloat(v constant.Value, sz int64) constant.Value { switch sz { case 4: @@ -184,8 +84,8 @@ func trunccmplxlit(v constant.Value, t *types.Type) constant.Value { } // TODO(mdempsky): Replace these with better APIs. -func convlit(n *Node, t *types.Type) *Node { return convlit1(n, t, false, nil) } -func defaultlit(n *Node, t *types.Type) *Node { return convlit1(n, t, false, nil) } +func convlit(n *ir.Node, t *types.Type) *ir.Node { return convlit1(n, t, false, nil) } +func defaultlit(n *ir.Node, t *types.Type) *ir.Node { return convlit1(n, t, false, nil) } // convlit1 converts an untyped expression n to type t. If n already // has a type, convlit1 has no effect. @@ -198,7 +98,7 @@ func defaultlit(n *Node, t *types.Type) *Node { return convlit1(n, t, false, nil // // If there's an error converting n to t, context is used in the error // message. -func convlit1(n *Node, t *types.Type, explicit bool, context func() string) *Node { +func convlit1(n *ir.Node, t *types.Type, explicit bool, context func() string) *ir.Node { if explicit && t == nil { base.Fatalf("explicit conversion missing type") } @@ -215,15 +115,15 @@ func convlit1(n *Node, t *types.Type, explicit bool, context func() string) *Nod return n } - if n.Op == OLITERAL || n.Op == ONIL { + if n.Op == ir.OLITERAL || n.Op == ir.ONIL { // Can't always set n.Type directly on OLITERAL nodes. // See discussion on CL 20813. - n = n.rawcopy() + n = n.RawCopy() } // Nil is technically not a constant, so handle it specially. - if n.Type.Etype == TNIL { - if n.Op != ONIL { + if n.Type.Etype == types.TNIL { + if n.Op != ir.ONIL { base.Fatalf("unexpected op: %v (%v)", n, n.Op) } if t == nil { @@ -242,7 +142,7 @@ func convlit1(n *Node, t *types.Type, explicit bool, context func() string) *Nod return n } - if t == nil || !okforconst[t.Etype] { + if t == nil || !ir.OKForConst[t.Etype] { t = defaultType(n.Type) } @@ -250,7 +150,7 @@ func convlit1(n *Node, t *types.Type, explicit bool, context func() string) *Nod default: base.Fatalf("unexpected untyped expression: %v", n) - case OLITERAL: + case ir.OLITERAL: v := convertVal(n.Val(), t, explicit) if v.Kind() == constant.Unknown { break @@ -259,7 +159,7 @@ func convlit1(n *Node, t *types.Type, explicit bool, context func() string) *Nod n.SetVal(v) return n - case OPLUS, ONEG, OBITNOT, ONOT, OREAL, OIMAG: + case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.OREAL, ir.OIMAG: ot := operandType(n.Op, t) if ot == nil { n = defaultlit(n, nil) @@ -274,7 +174,7 @@ func convlit1(n *Node, t *types.Type, explicit bool, context func() string) *Nod n.Type = t return n - case OADD, OSUB, OMUL, ODIV, OMOD, OOR, OXOR, OAND, OANDNOT, OOROR, OANDAND, OCOMPLEX: + case ir.OADD, ir.OSUB, ir.OMUL, ir.ODIV, ir.OMOD, ir.OOR, ir.OXOR, ir.OAND, ir.OANDNOT, ir.OOROR, ir.OANDAND, ir.OCOMPLEX: ot := operandType(n.Op, t) if ot == nil { n = defaultlit(n, nil) @@ -296,14 +196,14 @@ func convlit1(n *Node, t *types.Type, explicit bool, context func() string) *Nod n.Type = t return n - case OEQ, ONE, OLT, OLE, OGT, OGE: + case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE: if !t.IsBoolean() { break } n.Type = t return n - case OLSH, ORSH: + case ir.OLSH, ir.ORSH: n.Left = convlit1(n.Left, t, explicit, nil) n.Type = n.Left.Type if n.Type != nil && !n.Type.IsInteger() { @@ -329,13 +229,13 @@ func convlit1(n *Node, t *types.Type, explicit bool, context func() string) *Nod return n } -func operandType(op Op, t *types.Type) *types.Type { +func operandType(op ir.Op, t *types.Type) *types.Type { switch op { - case OCOMPLEX: + case ir.OCOMPLEX: if t.IsComplex() { return floatForComplex(t) } - case OREAL, OIMAG: + case ir.OREAL, ir.OIMAG: if t.IsFloat() { return complexForFloat(t) } @@ -488,7 +388,7 @@ func overflow(v constant.Value, t *types.Type) bool { return true } if doesoverflow(v, t) { - base.Errorf("constant %v overflows %v", vconv(v, 0), t) + base.Errorf("constant %v overflows %v", ir.FmtConst(v, 0), t) return true } return false @@ -505,57 +405,46 @@ func tostr(v constant.Value) constant.Value { return v } -func consttype(n *Node) constant.Kind { - if n == nil || n.Op != OLITERAL { - return constant.Unknown - } - return n.Val().Kind() -} - -func Isconst(n *Node, ct constant.Kind) bool { - return consttype(n) == ct -} - var tokenForOp = [...]token.Token{ - OPLUS: token.ADD, - ONEG: token.SUB, - ONOT: token.NOT, - OBITNOT: token.XOR, - - OADD: token.ADD, - OSUB: token.SUB, - OMUL: token.MUL, - ODIV: token.QUO, - OMOD: token.REM, - OOR: token.OR, - OXOR: token.XOR, - OAND: token.AND, - OANDNOT: token.AND_NOT, - OOROR: token.LOR, - OANDAND: token.LAND, - - OEQ: token.EQL, - ONE: token.NEQ, - OLT: token.LSS, - OLE: token.LEQ, - OGT: token.GTR, - OGE: token.GEQ, - - OLSH: token.SHL, - ORSH: token.SHR, + ir.OPLUS: token.ADD, + ir.ONEG: token.SUB, + ir.ONOT: token.NOT, + ir.OBITNOT: token.XOR, + + ir.OADD: token.ADD, + ir.OSUB: token.SUB, + ir.OMUL: token.MUL, + ir.ODIV: token.QUO, + ir.OMOD: token.REM, + ir.OOR: token.OR, + ir.OXOR: token.XOR, + ir.OAND: token.AND, + ir.OANDNOT: token.AND_NOT, + ir.OOROR: token.LOR, + ir.OANDAND: token.LAND, + + ir.OEQ: token.EQL, + ir.ONE: token.NEQ, + ir.OLT: token.LSS, + ir.OLE: token.LEQ, + ir.OGT: token.GTR, + ir.OGE: token.GEQ, + + ir.OLSH: token.SHL, + ir.ORSH: token.SHR, } // evalConst returns a constant-evaluated expression equivalent to n. // If n is not a constant, evalConst returns n. // Otherwise, evalConst returns a new OLITERAL with the same value as n, // and with .Orig pointing back to n. -func evalConst(n *Node) *Node { +func evalConst(n *ir.Node) *ir.Node { nl, nr := n.Left, n.Right // Pick off just the opcodes that can be constant evaluated. switch op := n.Op; op { - case OPLUS, ONEG, OBITNOT, ONOT: - if nl.Op == OLITERAL { + case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT: + if nl.Op == ir.OLITERAL { var prec uint if n.Type.IsUnsigned() { prec = uint(n.Type.Size() * 8) @@ -563,36 +452,36 @@ func evalConst(n *Node) *Node { return origConst(n, constant.UnaryOp(tokenForOp[op], nl.Val(), prec)) } - case OADD, OSUB, OMUL, ODIV, OMOD, OOR, OXOR, OAND, OANDNOT, OOROR, OANDAND: - if nl.Op == OLITERAL && nr.Op == OLITERAL { + case ir.OADD, ir.OSUB, ir.OMUL, ir.ODIV, ir.OMOD, ir.OOR, ir.OXOR, ir.OAND, ir.OANDNOT, ir.OOROR, ir.OANDAND: + if nl.Op == ir.OLITERAL && nr.Op == ir.OLITERAL { rval := nr.Val() // check for divisor underflow in complex division (see issue 20227) - if op == ODIV && n.Type.IsComplex() && constant.Sign(square(constant.Real(rval))) == 0 && constant.Sign(square(constant.Imag(rval))) == 0 { + if op == ir.ODIV && n.Type.IsComplex() && constant.Sign(square(constant.Real(rval))) == 0 && constant.Sign(square(constant.Imag(rval))) == 0 { base.Errorf("complex division by zero") n.Type = nil return n } - if (op == ODIV || op == OMOD) && constant.Sign(rval) == 0 { + if (op == ir.ODIV || op == ir.OMOD) && constant.Sign(rval) == 0 { base.Errorf("division by zero") n.Type = nil return n } tok := tokenForOp[op] - if op == ODIV && n.Type.IsInteger() { + if op == ir.ODIV && n.Type.IsInteger() { tok = token.QUO_ASSIGN // integer division } return origConst(n, constant.BinaryOp(nl.Val(), tok, rval)) } - case OEQ, ONE, OLT, OLE, OGT, OGE: - if nl.Op == OLITERAL && nr.Op == OLITERAL { + case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE: + if nl.Op == ir.OLITERAL && nr.Op == ir.OLITERAL { return origBoolConst(n, constant.Compare(nl.Val(), tokenForOp[op], nr.Val())) } - case OLSH, ORSH: - if nl.Op == OLITERAL && nr.Op == OLITERAL { + case ir.OLSH, ir.ORSH: + if nl.Op == ir.OLITERAL && nr.Op == ir.OLITERAL { // shiftBound from go/types; "so we can express smallestFloat64" const shiftBound = 1023 - 1 + 52 s, ok := constant.Uint64Val(nr.Val()) @@ -604,24 +493,24 @@ func evalConst(n *Node) *Node { return origConst(n, constant.Shift(toint(nl.Val()), tokenForOp[op], uint(s))) } - case OCONV, ORUNESTR: - if okforconst[n.Type.Etype] && nl.Op == OLITERAL { + case ir.OCONV, ir.ORUNESTR: + if ir.OKForConst[n.Type.Etype] && nl.Op == ir.OLITERAL { return origConst(n, convertVal(nl.Val(), n.Type, true)) } - case OCONVNOP: - if okforconst[n.Type.Etype] && nl.Op == OLITERAL { + case ir.OCONVNOP: + if ir.OKForConst[n.Type.Etype] && nl.Op == ir.OLITERAL { // set so n.Orig gets OCONV instead of OCONVNOP - n.Op = OCONV + n.Op = ir.OCONV return origConst(n, nl.Val()) } - case OADDSTR: + case ir.OADDSTR: // Merge adjacent constants in the argument list. s := n.List.Slice() need := 0 for i := 0; i < len(s); i++ { - if i == 0 || !Isconst(s[i-1], constant.String) || !Isconst(s[i], constant.String) { + if i == 0 || !ir.IsConst(s[i-1], constant.String) || !ir.IsConst(s[i], constant.String) { // Can't merge s[i] into s[i-1]; need a slot in the list. need++ } @@ -636,13 +525,13 @@ func evalConst(n *Node) *Node { } return origConst(n, constant.MakeString(strings.Join(strs, ""))) } - newList := make([]*Node, 0, need) + newList := make([]*ir.Node, 0, need) for i := 0; i < len(s); i++ { - if Isconst(s[i], constant.String) && i+1 < len(s) && Isconst(s[i+1], constant.String) { + if ir.IsConst(s[i], constant.String) && i+1 < len(s) && ir.IsConst(s[i+1], constant.String) { // merge from i up to but not including i2 var strs []string i2 := i - for i2 < len(s) && Isconst(s[i2], constant.String) { + for i2 < len(s) && ir.IsConst(s[i2], constant.String) { strs = append(strs, s[i2].StringVal()) i2++ } @@ -656,37 +545,37 @@ func evalConst(n *Node) *Node { } } - n = n.copy() + n = ir.Copy(n) n.List.Set(newList) return n - case OCAP, OLEN: + case ir.OCAP, ir.OLEN: switch nl.Type.Etype { - case TSTRING: - if Isconst(nl, constant.String) { + case types.TSTRING: + if ir.IsConst(nl, constant.String) { return origIntConst(n, int64(len(nl.StringVal()))) } - case TARRAY: + case types.TARRAY: if !hascallchan(nl) { return origIntConst(n, nl.Type.NumElem()) } } - case OALIGNOF, OOFFSETOF, OSIZEOF: + case ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF: return origIntConst(n, evalunsafe(n)) - case OREAL: - if nl.Op == OLITERAL { + case ir.OREAL: + if nl.Op == ir.OLITERAL { return origConst(n, constant.Real(nl.Val())) } - case OIMAG: - if nl.Op == OLITERAL { + case ir.OIMAG: + if nl.Op == ir.OLITERAL { return origConst(n, constant.Imag(nl.Val())) } - case OCOMPLEX: - if nl.Op == OLITERAL && nr.Op == OLITERAL { + case ir.OCOMPLEX: + if nl.Op == ir.OLITERAL && nr.Op == ir.OLITERAL { return origConst(n, makeComplex(nl.Val(), nr.Val())) } } @@ -721,16 +610,16 @@ func square(x constant.Value) constant.Value { // For matching historical "constant OP overflow" error messages. // TODO(mdempsky): Replace with error messages like go/types uses. var overflowNames = [...]string{ - OADD: "addition", - OSUB: "subtraction", - OMUL: "multiplication", - OLSH: "shift", - OXOR: "bitwise XOR", - OBITNOT: "bitwise complement", + ir.OADD: "addition", + ir.OSUB: "subtraction", + ir.OMUL: "multiplication", + ir.OLSH: "shift", + ir.OXOR: "bitwise XOR", + ir.OBITNOT: "bitwise complement", } // origConst returns an OLITERAL with orig n and value v. -func origConst(n *Node, v constant.Value) *Node { +func origConst(n *ir.Node, v constant.Value) *ir.Node { lno := setlineno(n) v = convertVal(v, n.Type, false) base.Pos = lno @@ -752,81 +641,28 @@ func origConst(n *Node, v constant.Value) *Node { } orig := n - n = nodl(orig.Pos, OLITERAL, nil, nil) + n = ir.NodAt(orig.Pos, ir.OLITERAL, nil, nil) n.Orig = orig n.Type = orig.Type n.SetVal(v) return n } -func assertRepresents(t *types.Type, v constant.Value) { - if !represents(t, v) { - base.Fatalf("%v does not represent %v", t, v) - } -} - -func represents(t *types.Type, v constant.Value) bool { - switch v.Kind() { - case constant.Unknown: - return okforconst[t.Etype] - case constant.Bool: - return t.IsBoolean() - case constant.String: - return t.IsString() - case constant.Int: - return t.IsInteger() - case constant.Float: - return t.IsFloat() - case constant.Complex: - return t.IsComplex() - } - - base.Fatalf("unexpected constant kind: %v", v) - panic("unreachable") -} - -func origBoolConst(n *Node, v bool) *Node { +func origBoolConst(n *ir.Node, v bool) *ir.Node { return origConst(n, constant.MakeBool(v)) } -func origIntConst(n *Node, v int64) *Node { +func origIntConst(n *ir.Node, v int64) *ir.Node { return origConst(n, constant.MakeInt64(v)) } -// nodlit returns a new untyped constant with value v. -func nodlit(v constant.Value) *Node { - n := nod(OLITERAL, nil, nil) - if k := v.Kind(); k != constant.Unknown { - n.Type = idealType(k) - n.SetVal(v) - } - return n -} - -func idealType(ct constant.Kind) *types.Type { - switch ct { - case constant.String: - return types.UntypedString - case constant.Bool: - return types.UntypedBool - case constant.Int: - return types.UntypedInt - case constant.Float: - return types.UntypedFloat - case constant.Complex: - return types.UntypedComplex - } - base.Fatalf("unexpected Ctype: %v", ct) - return nil -} - // defaultlit on both nodes simultaneously; // if they're both ideal going in they better // get the same type going out. // force means must assign concrete (non-ideal) type. // The results of defaultlit2 MUST be assigned back to l and r, e.g. // n.Left, n.Right = defaultlit2(n.Left, n.Right, force) -func defaultlit2(l *Node, r *Node, force bool) (*Node, *Node) { +func defaultlit2(l *ir.Node, r *ir.Node, force bool) (*ir.Node, *ir.Node) { if l.Type == nil || r.Type == nil { return l, r } @@ -851,7 +687,7 @@ func defaultlit2(l *Node, r *Node, force bool) (*Node, *Node) { if l.Type.IsString() != r.Type.IsString() { return l, r } - if l.isNil() || r.isNil() { + if ir.IsNil(l) || ir.IsNil(r) { return l, r } @@ -888,31 +724,31 @@ func mixUntyped(t1, t2 *types.Type) *types.Type { } func defaultType(t *types.Type) *types.Type { - if !t.IsUntyped() || t.Etype == TNIL { + if !t.IsUntyped() || t.Etype == types.TNIL { return t } switch t { case types.UntypedBool: - return types.Types[TBOOL] + return types.Types[types.TBOOL] case types.UntypedString: - return types.Types[TSTRING] + return types.Types[types.TSTRING] case types.UntypedInt: - return types.Types[TINT] + return types.Types[types.TINT] case types.UntypedRune: return types.Runetype case types.UntypedFloat: - return types.Types[TFLOAT64] + return types.Types[types.TFLOAT64] case types.UntypedComplex: - return types.Types[TCOMPLEX128] + return types.Types[types.TCOMPLEX128] } base.Fatalf("bad type %v", t) return nil } -func smallintconst(n *Node) bool { - if n.Op == OLITERAL { +func smallintconst(n *ir.Node) bool { + if n.Op == ir.OLITERAL { v, ok := constant.Int64Val(n.Val()) return ok && int64(int32(v)) == v } @@ -924,11 +760,11 @@ func smallintconst(n *Node) bool { // If n is not a constant expression, not representable as an // integer, or negative, it returns -1. If n is too large, it // returns -2. -func indexconst(n *Node) int64 { - if n.Op != OLITERAL { +func indexconst(n *ir.Node) int64 { + if n.Op != ir.OLITERAL { return -1 } - if !n.Type.IsInteger() && n.Type.Etype != TIDEAL { + if !n.Type.IsInteger() && n.Type.Etype != types.TIDEAL { return -1 } @@ -936,10 +772,10 @@ func indexconst(n *Node) int64 { if v.Kind() != constant.Int || constant.Sign(v) < 0 { return -1 } - if doesoverflow(v, types.Types[TINT]) { + if doesoverflow(v, types.Types[types.TINT]) { return -2 } - return int64Val(types.Types[TINT], v) + return ir.Int64Val(types.Types[types.TINT], v) } // isGoConst reports whether n is a Go language constant (as opposed to a @@ -947,35 +783,35 @@ func indexconst(n *Node) int64 { // // Expressions derived from nil, like string([]byte(nil)), while they // may be known at compile time, are not Go language constants. -func (n *Node) isGoConst() bool { - return n.Op == OLITERAL +func isGoConst(n *ir.Node) bool { + return n.Op == ir.OLITERAL } -func hascallchan(n *Node) bool { +func hascallchan(n *ir.Node) bool { if n == nil { return false } switch n.Op { - case OAPPEND, - OCALL, - OCALLFUNC, - OCALLINTER, - OCALLMETH, - OCAP, - OCLOSE, - OCOMPLEX, - OCOPY, - ODELETE, - OIMAG, - OLEN, - OMAKE, - ONEW, - OPANIC, - OPRINT, - OPRINTN, - OREAL, - ORECOVER, - ORECV: + case ir.OAPPEND, + ir.OCALL, + ir.OCALLFUNC, + ir.OCALLINTER, + ir.OCALLMETH, + ir.OCAP, + ir.OCLOSE, + ir.OCOMPLEX, + ir.OCOPY, + ir.ODELETE, + ir.OIMAG, + ir.OLEN, + ir.OMAKE, + ir.ONEW, + ir.OPANIC, + ir.OPRINT, + ir.OPRINTN, + ir.OREAL, + ir.ORECOVER, + ir.ORECV: return true } @@ -1015,12 +851,12 @@ type constSetKey struct { // where are used in the error message. // // n must not be an untyped constant. -func (s *constSet) add(pos src.XPos, n *Node, what, where string) { - if n.Op == OCONVIFACE && n.Implicit() { +func (s *constSet) add(pos src.XPos, n *ir.Node, what, where string) { + if n.Op == ir.OCONVIFACE && n.Implicit() { n = n.Left } - if !n.isGoConst() { + if !isGoConst(n) { return } if n.Type.IsUntyped() { @@ -1045,11 +881,11 @@ func (s *constSet) add(pos src.XPos, n *Node, what, where string) { typ := n.Type switch typ { case types.Bytetype: - typ = types.Types[TUINT8] + typ = types.Types[types.TUINT8] case types.Runetype: - typ = types.Types[TINT32] + typ = types.Types[types.TINT32] } - k := constSetKey{typ, n.ValueInterface()} + k := constSetKey{typ, ir.ConstValue(n)} if hasUniquePos(n) { pos = n.Pos @@ -1072,9 +908,9 @@ func (s *constSet) add(pos src.XPos, n *Node, what, where string) { // the latter is non-obvious. // // TODO(mdempsky): This could probably be a fmt.go flag. -func nodeAndVal(n *Node) string { +func nodeAndVal(n *ir.Node) string { show := n.String() - val := n.ValueInterface() + val := ir.ConstValue(n) if s := fmt.Sprintf("%#v", val); show != s { show += " (value " + s + ")" } diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 63a52a9f364cb..6fee872fd2d4c 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -7,6 +7,7 @@ package gc import ( "bytes" "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/src" @@ -17,7 +18,7 @@ import ( // Declaration stack & operations -var externdcl []*Node +var externdcl []*ir.Node func testdclstack() { if !types.IsDclstackValid() { @@ -58,25 +59,25 @@ var declare_typegen int // declare records that Node n declares symbol n.Sym in the specified // declaration context. -func declare(n *Node, ctxt Class) { - if n.isBlank() { +func declare(n *ir.Node, ctxt ir.Class) { + if ir.IsBlank(n) { return } if n.Name == nil { // named OLITERAL needs Name; most OLITERALs don't. - n.Name = new(Name) + n.Name = new(ir.Name) } s := n.Sym // kludgy: typecheckok means we're past parsing. Eg genwrapper may declare out of package names later. - if !inimport && !typecheckok && s.Pkg != localpkg { + if !inimport && !typecheckok && s.Pkg != ir.LocalPkg { base.ErrorfAt(n.Pos, "cannot declare name %v", s) } gen := 0 - if ctxt == PEXTERN { + if ctxt == ir.PEXTERN { if s.Name == "init" { base.ErrorfAt(n.Pos, "cannot declare init - must be func") } @@ -85,17 +86,17 @@ func declare(n *Node, ctxt Class) { } externdcl = append(externdcl, n) } else { - if Curfn == nil && ctxt == PAUTO { + if Curfn == nil && ctxt == ir.PAUTO { base.Pos = n.Pos base.Fatalf("automatic outside function") } - if Curfn != nil && ctxt != PFUNC { + if Curfn != nil && ctxt != ir.PFUNC { Curfn.Func.Dcl = append(Curfn.Func.Dcl, n) } - if n.Op == OTYPE { + if n.Op == ir.OTYPE { declare_typegen++ gen = declare_typegen - } else if n.Op == ONAME && ctxt == PAUTO && !strings.Contains(s.Name, "·") { + } else if n.Op == ir.ONAME && ctxt == ir.PAUTO && !strings.Contains(s.Name, "·") { vargen++ gen = vargen } @@ -103,58 +104,58 @@ func declare(n *Node, ctxt Class) { n.Name.Curfn = Curfn } - if ctxt == PAUTO { + if ctxt == ir.PAUTO { n.Xoffset = 0 } if s.Block == types.Block { // functype will print errors about duplicate function arguments. // Don't repeat the error here. - if ctxt != PPARAM && ctxt != PPARAMOUT { + if ctxt != ir.PPARAM && ctxt != ir.PPARAMOUT { redeclare(n.Pos, s, "in this block") } } s.Block = types.Block s.Lastlineno = base.Pos - s.Def = asTypesNode(n) + s.Def = ir.AsTypesNode(n) n.Name.Vargen = int32(gen) n.SetClass(ctxt) - if ctxt == PFUNC { + if ctxt == ir.PFUNC { n.Sym.SetFunc(true) } autoexport(n, ctxt) } -func addvar(n *Node, t *types.Type, ctxt Class) { - if n == nil || n.Sym == nil || (n.Op != ONAME && n.Op != ONONAME) || t == nil { +func addvar(n *ir.Node, t *types.Type, ctxt ir.Class) { + if n == nil || n.Sym == nil || (n.Op != ir.ONAME && n.Op != ir.ONONAME) || t == nil { base.Fatalf("addvar: n=%v t=%v nil", n, t) } - n.Op = ONAME + n.Op = ir.ONAME declare(n, ctxt) n.Type = t } // declare variables from grammar // new_name_list (type | [type] = expr_list) -func variter(vl []*Node, t *Node, el []*Node) []*Node { - var init []*Node +func variter(vl []*ir.Node, t *ir.Node, el []*ir.Node) []*ir.Node { + var init []*ir.Node doexpr := len(el) > 0 if len(el) == 1 && len(vl) > 1 { e := el[0] - as2 := nod(OAS2, nil, nil) + as2 := ir.Nod(ir.OAS2, nil, nil) as2.List.Set(vl) as2.Rlist.Set1(e) for _, v := range vl { - v.Op = ONAME + v.Op = ir.ONAME declare(v, dclcontext) v.Name.Param.Ntype = t v.Name.Defn = as2 if Curfn != nil { - init = append(init, nod(ODCL, v, nil)) + init = append(init, ir.Nod(ir.ODCL, v, nil)) } } @@ -163,7 +164,7 @@ func variter(vl []*Node, t *Node, el []*Node) []*Node { nel := len(el) for _, v := range vl { - var e *Node + var e *ir.Node if doexpr { if len(el) == 0 { base.Errorf("assignment mismatch: %d variables but %d values", len(vl), nel) @@ -173,15 +174,15 @@ func variter(vl []*Node, t *Node, el []*Node) []*Node { el = el[1:] } - v.Op = ONAME + v.Op = ir.ONAME declare(v, dclcontext) v.Name.Param.Ntype = t - if e != nil || Curfn != nil || v.isBlank() { + if e != nil || Curfn != nil || ir.IsBlank(v) { if Curfn != nil { - init = append(init, nod(ODCL, v, nil)) + init = append(init, ir.Nod(ir.ODCL, v, nil)) } - e = nod(OAS, v, e) + e = ir.Nod(ir.OAS, v, e) init = append(init, e) if e.Right != nil { v.Name.Defn = e @@ -196,22 +197,22 @@ func variter(vl []*Node, t *Node, el []*Node) []*Node { } // newnoname returns a new ONONAME Node associated with symbol s. -func newnoname(s *types.Sym) *Node { +func newnoname(s *types.Sym) *ir.Node { if s == nil { base.Fatalf("newnoname nil") } - n := nod(ONONAME, nil, nil) + n := ir.Nod(ir.ONONAME, nil, nil) n.Sym = s n.Xoffset = 0 return n } // newfuncnamel generates a new name node for a function or method. -func newfuncnamel(pos src.XPos, s *types.Sym, fn *Func) *Node { +func newfuncnamel(pos src.XPos, s *types.Sym, fn *ir.Func) *ir.Node { if fn.Nname != nil { base.Fatalf("newfuncnamel - already have name") } - n := newnamel(pos, s) + n := ir.NewNameAt(pos, s) n.Func = fn fn.Nname = n return n @@ -219,39 +220,39 @@ func newfuncnamel(pos src.XPos, s *types.Sym, fn *Func) *Node { // this generates a new name node for a name // being declared. -func dclname(s *types.Sym) *Node { - n := newname(s) - n.Op = ONONAME // caller will correct it +func dclname(s *types.Sym) *ir.Node { + n := NewName(s) + n.Op = ir.ONONAME // caller will correct it return n } -func typenod(t *types.Type) *Node { +func typenod(t *types.Type) *ir.Node { return typenodl(src.NoXPos, t) } -func typenodl(pos src.XPos, t *types.Type) *Node { +func typenodl(pos src.XPos, t *types.Type) *ir.Node { // if we copied another type with *t = *u // then t->nod might be out of date, so // check t->nod->type too - if asNode(t.Nod) == nil || asNode(t.Nod).Type != t { - t.Nod = asTypesNode(nodl(pos, OTYPE, nil, nil)) - asNode(t.Nod).Type = t - asNode(t.Nod).Sym = t.Sym + if ir.AsNode(t.Nod) == nil || ir.AsNode(t.Nod).Type != t { + t.Nod = ir.AsTypesNode(ir.NodAt(pos, ir.OTYPE, nil, nil)) + ir.AsNode(t.Nod).Type = t + ir.AsNode(t.Nod).Sym = t.Sym } - return asNode(t.Nod) + return ir.AsNode(t.Nod) } -func anonfield(typ *types.Type) *Node { +func anonfield(typ *types.Type) *ir.Node { return symfield(nil, typ) } -func namedfield(s string, typ *types.Type) *Node { +func namedfield(s string, typ *types.Type) *ir.Node { return symfield(lookup(s), typ) } -func symfield(s *types.Sym, typ *types.Type) *Node { - n := nodSym(ODCLFIELD, nil, s) +func symfield(s *types.Sym, typ *types.Type) *ir.Node { + n := nodSym(ir.ODCLFIELD, nil, s) n.Type = typ return n } @@ -260,8 +261,8 @@ func symfield(s *types.Sym, typ *types.Type) *Node { // If no such Node currently exists, an ONONAME Node is returned instead. // Automatically creates a new closure variable if the referenced symbol was // declared in a different (containing) function. -func oldname(s *types.Sym) *Node { - n := asNode(s.Def) +func oldname(s *types.Sym) *ir.Node { + n := ir.AsNode(s.Def) if n == nil { // Maybe a top-level declaration will come along later to // define s. resolve will check s.Def again once all input @@ -269,7 +270,7 @@ func oldname(s *types.Sym) *Node { return newnoname(s) } - if Curfn != nil && n.Op == ONAME && n.Name.Curfn != nil && n.Name.Curfn != Curfn { + if Curfn != nil && n.Op == ir.ONAME && n.Name.Curfn != nil && n.Name.Curfn != Curfn { // Inner func is referring to var in outer func. // // TODO(rsc): If there is an outer variable x and we @@ -279,8 +280,8 @@ func oldname(s *types.Sym) *Node { c := n.Name.Param.Innermost if c == nil || c.Name.Curfn != Curfn { // Do not have a closure var for the active closure yet; make one. - c = newname(s) - c.SetClass(PAUTOHEAP) + c = NewName(s) + c.SetClass(ir.PAUTOHEAP) c.Name.SetIsClosureVar(true) c.SetIsDDD(n.IsDDD()) c.Name.Defn = n @@ -301,9 +302,9 @@ func oldname(s *types.Sym) *Node { } // importName is like oldname, but it reports an error if sym is from another package and not exported. -func importName(sym *types.Sym) *Node { +func importName(sym *types.Sym) *ir.Node { n := oldname(sym) - if !types.IsExported(sym.Name) && sym.Pkg != localpkg { + if !types.IsExported(sym.Name) && sym.Pkg != ir.LocalPkg { n.SetDiag(true) base.Errorf("cannot refer to unexported name %s.%s", sym.Pkg.Name, sym.Name) } @@ -311,20 +312,20 @@ func importName(sym *types.Sym) *Node { } // := declarations -func colasname(n *Node) bool { +func colasname(n *ir.Node) bool { switch n.Op { - case ONAME, - ONONAME, - OPACK, - OTYPE, - OLITERAL: + case ir.ONAME, + ir.ONONAME, + ir.OPACK, + ir.OTYPE, + ir.OLITERAL: return n.Sym != nil } return false } -func colasdefn(left []*Node, defn *Node) { +func colasdefn(left []*ir.Node, defn *ir.Node) { for _, n := range left { if n.Sym != nil { n.Sym.SetUniq(true) @@ -333,7 +334,7 @@ func colasdefn(left []*Node, defn *Node) { var nnew, nerr int for i, n := range left { - if n.isBlank() { + if ir.IsBlank(n) { continue } if !colasname(n) { @@ -355,10 +356,10 @@ func colasdefn(left []*Node, defn *Node) { } nnew++ - n = newname(n.Sym) + n = NewName(n.Sym) declare(n, dclcontext) n.Name.Defn = defn - defn.Ninit.Append(nod(ODCL, n, nil)) + defn.Ninit.Append(ir.Nod(ir.ODCL, n, nil)) left[i] = n } @@ -369,8 +370,8 @@ func colasdefn(left []*Node, defn *Node) { // declare the arguments in an // interface field declaration. -func ifacedcl(n *Node) { - if n.Op != ODCLFIELD || n.Left == nil { +func ifacedcl(n *ir.Node) { + if n.Op != ir.ODCLFIELD || n.Left == nil { base.Fatalf("ifacedcl") } @@ -383,11 +384,11 @@ func ifacedcl(n *Node) { // and declare the arguments. // called in extern-declaration context // returns in auto-declaration context. -func funchdr(n *Node) { +func funchdr(n *ir.Node) { // change the declaration context from extern to auto funcStack = append(funcStack, funcStackEnt{Curfn, dclcontext}) Curfn = n - dclcontext = PAUTO + dclcontext = ir.PAUTO types.Markdcl() @@ -398,8 +399,8 @@ func funchdr(n *Node) { } } -func funcargs(nt *Node) { - if nt.Op != OTFUNC { +func funcargs(nt *ir.Node) { + if nt.Op != ir.OTFUNC { base.Fatalf("funcargs %v", nt.Op) } @@ -414,10 +415,10 @@ func funcargs(nt *Node) { // declare the receiver and in arguments. if nt.Left != nil { - funcarg(nt.Left, PPARAM) + funcarg(nt.Left, ir.PPARAM) } for _, n := range nt.List.Slice() { - funcarg(n, PPARAM) + funcarg(n, ir.PPARAM) } oldvargen := vargen @@ -442,21 +443,21 @@ func funcargs(nt *Node) { gen++ } - funcarg(n, PPARAMOUT) + funcarg(n, ir.PPARAMOUT) } vargen = oldvargen } -func funcarg(n *Node, ctxt Class) { - if n.Op != ODCLFIELD { +func funcarg(n *ir.Node, ctxt ir.Class) { + if n.Op != ir.ODCLFIELD { base.Fatalf("funcarg %v", n.Op) } if n.Sym == nil { return } - n.Right = newnamel(n.Pos, n.Sym) + n.Right = ir.NewNameAt(n.Pos, n.Sym) n.Right.Name.Param.Ntype = n.Left n.Right.SetIsDDD(n.IsDDD()) declare(n.Right, ctxt) @@ -469,27 +470,27 @@ func funcarg(n *Node, ctxt Class) { // This happens during import, where the hidden_fndcl rule has // used functype directly to parse the function's type. func funcargs2(t *types.Type) { - if t.Etype != TFUNC { + if t.Etype != types.TFUNC { base.Fatalf("funcargs2 %v", t) } for _, f := range t.Recvs().Fields().Slice() { - funcarg2(f, PPARAM) + funcarg2(f, ir.PPARAM) } for _, f := range t.Params().Fields().Slice() { - funcarg2(f, PPARAM) + funcarg2(f, ir.PPARAM) } for _, f := range t.Results().Fields().Slice() { - funcarg2(f, PPARAMOUT) + funcarg2(f, ir.PPARAMOUT) } } -func funcarg2(f *types.Field, ctxt Class) { +func funcarg2(f *types.Field, ctxt ir.Class) { if f.Sym == nil { return } - n := newnamel(f.Pos, f.Sym) - f.Nname = asTypesNode(n) + n := ir.NewNameAt(f.Pos, f.Sym) + f.Nname = ir.AsTypesNode(n) n.Type = f.Type n.SetIsDDD(f.IsDDD()) declare(n, ctxt) @@ -498,8 +499,8 @@ func funcarg2(f *types.Field, ctxt Class) { var funcStack []funcStackEnt // stack of previous values of Curfn/dclcontext type funcStackEnt struct { - curfn *Node - dclcontext Class + curfn *ir.Node + dclcontext ir.Class } // finish the body. @@ -529,16 +530,16 @@ func checkembeddedtype(t *types.Type) { if t.IsPtr() || t.IsUnsafePtr() { base.Errorf("embedded type cannot be a pointer") - } else if t.Etype == TFORW && !t.ForwardType().Embedlineno.IsKnown() { + } else if t.Etype == types.TFORW && !t.ForwardType().Embedlineno.IsKnown() { t.ForwardType().Embedlineno = base.Pos } } -func structfield(n *Node) *types.Field { +func structfield(n *ir.Node) *types.Field { lno := base.Pos base.Pos = n.Pos - if n.Op != ODCLFIELD { + if n.Op != ir.ODCLFIELD { base.Fatalf("structfield: oops %v\n", n) } @@ -581,8 +582,8 @@ func checkdupfields(what string, fss ...[]*types.Field) { // convert a parsed id/type list into // a type for struct/interface/arglist -func tostruct(l []*Node) *types.Type { - t := types.New(TSTRUCT) +func tostruct(l []*ir.Node) *types.Type { + t := types.New(types.TSTRUCT) fields := make([]*types.Field, len(l)) for i, n := range l { @@ -603,8 +604,8 @@ func tostruct(l []*Node) *types.Type { return t } -func tofunargs(l []*Node, funarg types.Funarg) *types.Type { - t := types.New(TSTRUCT) +func tofunargs(l []*ir.Node, funarg types.Funarg) *types.Type { + t := types.New(types.TSTRUCT) t.StructType().Funarg = funarg fields := make([]*types.Field, len(l)) @@ -613,7 +614,7 @@ func tofunargs(l []*Node, funarg types.Funarg) *types.Type { f.SetIsDDD(n.IsDDD()) if n.Right != nil { n.Right.Type = f.Type - f.Nname = asTypesNode(n.Right) + f.Nname = ir.AsTypesNode(n.Right) } if f.Broke() { t.SetBroke(true) @@ -625,17 +626,17 @@ func tofunargs(l []*Node, funarg types.Funarg) *types.Type { } func tofunargsfield(fields []*types.Field, funarg types.Funarg) *types.Type { - t := types.New(TSTRUCT) + t := types.New(types.TSTRUCT) t.StructType().Funarg = funarg t.SetFields(fields) return t } -func interfacefield(n *Node) *types.Field { +func interfacefield(n *ir.Node) *types.Field { lno := base.Pos base.Pos = n.Pos - if n.Op != ODCLFIELD { + if n.Op != ir.ODCLFIELD { base.Fatalf("interfacefield: oops %v\n", n) } @@ -660,11 +661,11 @@ func interfacefield(n *Node) *types.Field { return f } -func tointerface(l []*Node) *types.Type { +func tointerface(l []*ir.Node) *types.Type { if len(l) == 0 { - return types.Types[TINTER] + return types.Types[types.TINTER] } - t := types.New(TINTER) + t := types.New(types.TINTER) var fields []*types.Field for _, n := range l { f := interfacefield(n) @@ -677,7 +678,7 @@ func tointerface(l []*Node) *types.Type { return t } -func fakeRecv() *Node { +func fakeRecv() *ir.Node { return anonfield(types.FakeRecvType()) } @@ -693,12 +694,12 @@ func isifacemethod(f *types.Type) bool { } // turn a parsed function declaration into a type -func functype(this *Node, in, out []*Node) *types.Type { - t := types.New(TFUNC) +func functype(this *ir.Node, in, out []*ir.Node) *types.Type { + t := types.New(types.TFUNC) - var rcvr []*Node + var rcvr []*ir.Node if this != nil { - rcvr = []*Node{this} + rcvr = []*ir.Node{this} } t.FuncType().Receiver = tofunargs(rcvr, types.FunargRcvr) t.FuncType().Params = tofunargs(in, types.FunargParams) @@ -710,13 +711,13 @@ func functype(this *Node, in, out []*Node) *types.Type { t.SetBroke(true) } - t.FuncType().Outnamed = t.NumResults() > 0 && origSym(t.Results().Field(0).Sym) != nil + t.FuncType().Outnamed = t.NumResults() > 0 && ir.OrigSym(t.Results().Field(0).Sym) != nil return t } func functypefield(this *types.Field, in, out []*types.Field) *types.Type { - t := types.New(TFUNC) + t := types.New(types.TFUNC) var rcvr []*types.Field if this != nil { @@ -726,36 +727,11 @@ func functypefield(this *types.Field, in, out []*types.Field) *types.Type { t.FuncType().Params = tofunargsfield(in, types.FunargParams) t.FuncType().Results = tofunargsfield(out, types.FunargResults) - t.FuncType().Outnamed = t.NumResults() > 0 && origSym(t.Results().Field(0).Sym) != nil + t.FuncType().Outnamed = t.NumResults() > 0 && ir.OrigSym(t.Results().Field(0).Sym) != nil return t } -// origSym returns the original symbol written by the user. -func origSym(s *types.Sym) *types.Sym { - if s == nil { - return nil - } - - if len(s.Name) > 1 && s.Name[0] == '~' { - switch s.Name[1] { - case 'r': // originally an unnamed result - return nil - case 'b': // originally the blank identifier _ - // TODO(mdempsky): Does s.Pkg matter here? - return nblank.Sym - } - return s - } - - if strings.HasPrefix(s.Name, ".anon") { - // originally an unnamed or _ name (see subr.go: structargs) - return nil - } - - return s -} - // methodSym returns the method symbol representing a method name // associated with a specific receiver type. // @@ -823,7 +799,7 @@ func methodSymSuffix(recv *types.Type, msym *types.Sym, suffix string) *types.Sy // - msym is the method symbol // - t is function type (with receiver) // Returns a pointer to the existing or added Field; or nil if there's an error. -func addmethod(n *Node, msym *types.Sym, t *types.Type, local, nointerface bool) *types.Field { +func addmethod(n *ir.Node, msym *types.Sym, t *types.Type, local, nointerface bool) *types.Field { if msym == nil { base.Fatalf("no method symbol") } @@ -864,7 +840,7 @@ func addmethod(n *Node, msym *types.Sym, t *types.Type, local, nointerface bool) return nil } - if local && mt.Sym.Pkg != localpkg { + if local && mt.Sym.Pkg != ir.LocalPkg { base.Errorf("cannot define new methods on non-local type %v", mt) return nil } @@ -896,7 +872,7 @@ func addmethod(n *Node, msym *types.Sym, t *types.Type, local, nointerface bool) } f := types.NewField(base.Pos, msym, t) - f.Nname = asTypesNode(n.Func.Nname) + f.Nname = ir.AsTypesNode(n.Func.Nname) f.SetNointerface(nointerface) mt.Methods().Append(f) @@ -959,21 +935,21 @@ func makefuncsym(s *types.Sym) { } // setNodeNameFunc marks a node as a function. -func setNodeNameFunc(n *Node) { - if n.Op != ONAME || n.Class() != Pxxx { +func setNodeNameFunc(n *ir.Node) { + if n.Op != ir.ONAME || n.Class() != ir.Pxxx { base.Fatalf("expected ONAME/Pxxx node, got %v", n) } - n.SetClass(PFUNC) + n.SetClass(ir.PFUNC) n.Sym.SetFunc(true) } -func dclfunc(sym *types.Sym, tfn *Node) *Node { - if tfn.Op != OTFUNC { +func dclfunc(sym *types.Sym, tfn *ir.Node) *ir.Node { + if tfn.Op != ir.OTFUNC { base.Fatalf("expected OTFUNC node, got %v", tfn) } - fn := nod(ODCLFUNC, nil, nil) + fn := ir.Nod(ir.ODCLFUNC, nil, nil) fn.Func.Nname = newfuncnamel(base.Pos, sym, fn.Func) fn.Func.Nname.Name.Defn = fn fn.Func.Nname.Name.Param.Ntype = tfn @@ -987,27 +963,22 @@ type nowritebarrierrecChecker struct { // extraCalls contains extra function calls that may not be // visible during later analysis. It maps from the ODCLFUNC of // the caller to a list of callees. - extraCalls map[*Node][]nowritebarrierrecCall + extraCalls map[*ir.Node][]nowritebarrierrecCall // curfn is the current function during AST walks. - curfn *Node + curfn *ir.Node } type nowritebarrierrecCall struct { - target *Node // ODCLFUNC of caller or callee + target *ir.Node // ODCLFUNC of caller or callee lineno src.XPos // line of call } -type nowritebarrierrecCallSym struct { - target *obj.LSym // LSym of callee - lineno src.XPos // line of call -} - // newNowritebarrierrecChecker creates a nowritebarrierrecChecker. It // must be called before transformclosure and walk. func newNowritebarrierrecChecker() *nowritebarrierrecChecker { c := &nowritebarrierrecChecker{ - extraCalls: make(map[*Node][]nowritebarrierrecCall), + extraCalls: make(map[*ir.Node][]nowritebarrierrecCall), } // Find all systemstack calls and record their targets. In @@ -1016,39 +987,39 @@ func newNowritebarrierrecChecker() *nowritebarrierrecChecker { // directly. This has to happen before transformclosure since // it's a lot harder to work out the argument after. for _, n := range xtop { - if n.Op != ODCLFUNC { + if n.Op != ir.ODCLFUNC { continue } c.curfn = n - inspect(n, c.findExtraCalls) + ir.Inspect(n, c.findExtraCalls) } c.curfn = nil return c } -func (c *nowritebarrierrecChecker) findExtraCalls(n *Node) bool { - if n.Op != OCALLFUNC { +func (c *nowritebarrierrecChecker) findExtraCalls(n *ir.Node) bool { + if n.Op != ir.OCALLFUNC { return true } fn := n.Left - if fn == nil || fn.Op != ONAME || fn.Class() != PFUNC || fn.Name.Defn == nil { + if fn == nil || fn.Op != ir.ONAME || fn.Class() != ir.PFUNC || fn.Name.Defn == nil { return true } if !isRuntimePkg(fn.Sym.Pkg) || fn.Sym.Name != "systemstack" { return true } - var callee *Node + var callee *ir.Node arg := n.List.First() switch arg.Op { - case ONAME: + case ir.ONAME: callee = arg.Name.Defn - case OCLOSURE: + case ir.OCLOSURE: callee = arg.Func.Decl default: base.Fatalf("expected ONAME or OCLOSURE node, got %+v", arg) } - if callee.Op != ODCLFUNC { + if callee.Op != ir.ODCLFUNC { base.Fatalf("expected ODCLFUNC node, got %+v", callee) } c.extraCalls[c.curfn] = append(c.extraCalls[c.curfn], nowritebarrierrecCall{callee, n.Pos}) @@ -1063,17 +1034,17 @@ func (c *nowritebarrierrecChecker) findExtraCalls(n *Node) bool { // because that's all we know after we start SSA. // // This can be called concurrently for different from Nodes. -func (c *nowritebarrierrecChecker) recordCall(from *Node, to *obj.LSym, pos src.XPos) { - if from.Op != ODCLFUNC { +func (c *nowritebarrierrecChecker) recordCall(from *ir.Node, to *obj.LSym, pos src.XPos) { + if from.Op != ir.ODCLFUNC { base.Fatalf("expected ODCLFUNC, got %v", from) } // We record this information on the *Func so this is // concurrent-safe. fn := from.Func - if fn.nwbrCalls == nil { - fn.nwbrCalls = new([]nowritebarrierrecCallSym) + if fn.NWBRCalls == nil { + fn.NWBRCalls = new([]ir.SymAndPos) } - *fn.nwbrCalls = append(*fn.nwbrCalls, nowritebarrierrecCallSym{to, pos}) + *fn.NWBRCalls = append(*fn.NWBRCalls, ir.SymAndPos{Sym: to, Pos: pos}) } func (c *nowritebarrierrecChecker) check() { @@ -1081,39 +1052,39 @@ func (c *nowritebarrierrecChecker) check() { // capture all calls created by lowering, but this means we // only get to see the obj.LSyms of calls. symToFunc lets us // get back to the ODCLFUNCs. - symToFunc := make(map[*obj.LSym]*Node) + symToFunc := make(map[*obj.LSym]*ir.Node) // funcs records the back-edges of the BFS call graph walk. It // maps from the ODCLFUNC of each function that must not have // write barriers to the call that inhibits them. Functions // that are directly marked go:nowritebarrierrec are in this // map with a zero-valued nowritebarrierrecCall. This also // acts as the set of marks for the BFS of the call graph. - funcs := make(map[*Node]nowritebarrierrecCall) + funcs := make(map[*ir.Node]nowritebarrierrecCall) // q is the queue of ODCLFUNC Nodes to visit in BFS order. - var q nodeQueue + var q ir.NodeQueue for _, n := range xtop { - if n.Op != ODCLFUNC { + if n.Op != ir.ODCLFUNC { continue } - symToFunc[n.Func.lsym] = n + symToFunc[n.Func.LSym] = n // Make nowritebarrierrec functions BFS roots. - if n.Func.Pragma&Nowritebarrierrec != 0 { + if n.Func.Pragma&ir.Nowritebarrierrec != 0 { funcs[n] = nowritebarrierrecCall{} - q.pushRight(n) + q.PushRight(n) } // Check go:nowritebarrier functions. - if n.Func.Pragma&Nowritebarrier != 0 && n.Func.WBPos.IsKnown() { + if n.Func.Pragma&ir.Nowritebarrier != 0 && n.Func.WBPos.IsKnown() { base.ErrorfAt(n.Func.WBPos, "write barrier prohibited") } } // Perform a BFS of the call graph from all // go:nowritebarrierrec functions. - enqueue := func(src, target *Node, pos src.XPos) { - if target.Func.Pragma&Yeswritebarrierrec != 0 { + enqueue := func(src, target *ir.Node, pos src.XPos) { + if target.Func.Pragma&ir.Yeswritebarrierrec != 0 { // Don't flow into this function. return } @@ -1124,10 +1095,10 @@ func (c *nowritebarrierrecChecker) check() { // Record the path. funcs[target] = nowritebarrierrecCall{target: src, lineno: pos} - q.pushRight(target) + q.PushRight(target) } - for !q.empty() { - fn := q.popLeft() + for !q.Empty() { + fn := q.PopLeft() // Check fn. if fn.Func.WBPos.IsKnown() { @@ -1145,13 +1116,13 @@ func (c *nowritebarrierrecChecker) check() { for _, callee := range c.extraCalls[fn] { enqueue(fn, callee.target, callee.lineno) } - if fn.Func.nwbrCalls == nil { + if fn.Func.NWBRCalls == nil { continue } - for _, callee := range *fn.Func.nwbrCalls { - target := symToFunc[callee.target] + for _, callee := range *fn.Func.NWBRCalls { + target := symToFunc[callee.Sym] if target != nil { - enqueue(fn, target, callee.lineno) + enqueue(fn, target, callee.Pos) } } } diff --git a/src/cmd/compile/internal/gc/embed.go b/src/cmd/compile/internal/gc/embed.go index f6c1b7cdccf11..636aa4a70edce 100644 --- a/src/cmd/compile/internal/gc/embed.go +++ b/src/cmd/compile/internal/gc/embed.go @@ -6,6 +6,7 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/syntax" "cmd/compile/internal/types" "cmd/internal/obj" @@ -16,7 +17,7 @@ import ( "strings" ) -var embedlist []*Node +var embedlist []*ir.Node const ( embedUnknown = iota @@ -27,7 +28,7 @@ const ( var numLocalEmbed int -func varEmbed(p *noder, names []*Node, typ *Node, exprs []*Node, embeds []PragmaEmbed) (newExprs []*Node) { +func varEmbed(p *noder, names []*ir.Node, typ *ir.Node, exprs []*ir.Node, embeds []PragmaEmbed) (newExprs []*ir.Node) { haveEmbed := false for _, decl := range p.file.DeclList { imp, ok := decl.(*syntax.ImportDecl) @@ -110,14 +111,14 @@ func varEmbed(p *noder, names []*Node, typ *Node, exprs []*Node, embeds []Pragma } v := names[0] - if dclcontext != PEXTERN { + if dclcontext != ir.PEXTERN { numLocalEmbed++ - v = newnamel(v.Pos, lookupN("embed.", numLocalEmbed)) - v.Sym.Def = asTypesNode(v) + v = ir.NewNameAt(v.Pos, lookupN("embed.", numLocalEmbed)) + v.Sym.Def = ir.AsTypesNode(v) v.Name.Param.Ntype = typ - v.SetClass(PEXTERN) + v.SetClass(ir.PEXTERN) externdcl = append(externdcl, v) - exprs = []*Node{v} + exprs = []*ir.Node{v} } v.Name.Param.SetEmbedFiles(list) @@ -129,18 +130,18 @@ func varEmbed(p *noder, names []*Node, typ *Node, exprs []*Node, embeds []Pragma // The match is approximate because we haven't done scope resolution yet and // can't tell whether "string" and "byte" really mean "string" and "byte". // The result must be confirmed later, after type checking, using embedKind. -func embedKindApprox(typ *Node) int { - if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == localpkg && base.Ctxt.Pkgpath == "embed")) { +func embedKindApprox(typ *ir.Node) int { + if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == ir.LocalPkg && base.Ctxt.Pkgpath == "embed")) { return embedFiles } // These are not guaranteed to match only string and []byte - // maybe the local package has redefined one of those words. // But it's the best we can do now during the noder. // The stricter check happens later, in initEmbed calling embedKind. - if typ.Sym != nil && typ.Sym.Name == "string" && typ.Sym.Pkg == localpkg { + if typ.Sym != nil && typ.Sym.Name == "string" && typ.Sym.Pkg == ir.LocalPkg { return embedString } - if typ.Op == OTARRAY && typ.Left == nil && typ.Right.Sym != nil && typ.Right.Sym.Name == "byte" && typ.Right.Sym.Pkg == localpkg { + if typ.Op == ir.OTARRAY && typ.Left == nil && typ.Right.Sym != nil && typ.Right.Sym.Name == "byte" && typ.Right.Sym.Pkg == ir.LocalPkg { return embedBytes } return embedUnknown @@ -148,10 +149,10 @@ func embedKindApprox(typ *Node) int { // embedKind determines the kind of embedding variable. func embedKind(typ *types.Type) int { - if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == localpkg && base.Ctxt.Pkgpath == "embed")) { + if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == ir.LocalPkg && base.Ctxt.Pkgpath == "embed")) { return embedFiles } - if typ == types.Types[TSTRING] { + if typ == types.Types[types.TSTRING] { return embedString } if typ.Sym == nil && typ.IsSlice() && typ.Elem() == types.Bytetype { @@ -191,7 +192,7 @@ func dumpembeds() { // initEmbed emits the init data for a //go:embed variable, // which is either a string, a []byte, or an embed.FS. -func initEmbed(v *Node) { +func initEmbed(v *ir.Node) { files := v.Name.Param.EmbedFiles() switch kind := embedKind(v.Type); kind { case embedUnknown: diff --git a/src/cmd/compile/internal/gc/esc.go b/src/cmd/compile/internal/gc/esc.go deleted file mode 100644 index 5cf8c4a1c64f0..0000000000000 --- a/src/cmd/compile/internal/gc/esc.go +++ /dev/null @@ -1,474 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gc - -import ( - "cmd/compile/internal/base" - "cmd/compile/internal/types" - "fmt" -) - -func escapes(all []*Node) { - visitBottomUp(all, escapeFuncs) -} - -const ( - EscFuncUnknown = 0 + iota - EscFuncPlanned - EscFuncStarted - EscFuncTagged -) - -func min8(a, b int8) int8 { - if a < b { - return a - } - return b -} - -func max8(a, b int8) int8 { - if a > b { - return a - } - return b -} - -const ( - EscUnknown = iota - EscNone // Does not escape to heap, result, or parameters. - EscHeap // Reachable from the heap - EscNever // By construction will not escape. -) - -// funcSym returns fn.Func.Nname.Sym if no nils are encountered along the way. -func funcSym(fn *Node) *types.Sym { - if fn == nil || fn.Func.Nname == nil { - return nil - } - return fn.Func.Nname.Sym -} - -// Mark labels that have no backjumps to them as not increasing e.loopdepth. -// Walk hasn't generated (goto|label).Left.Sym.Label yet, so we'll cheat -// and set it to one of the following two. Then in esc we'll clear it again. -var ( - looping = nod(OXXX, nil, nil) - nonlooping = nod(OXXX, nil, nil) -) - -func isSliceSelfAssign(dst, src *Node) bool { - // Detect the following special case. - // - // func (b *Buffer) Foo() { - // n, m := ... - // b.buf = b.buf[n:m] - // } - // - // This assignment is a no-op for escape analysis, - // it does not store any new pointers into b that were not already there. - // However, without this special case b will escape, because we assign to OIND/ODOTPTR. - // Here we assume that the statement will not contain calls, - // that is, that order will move any calls to init. - // Otherwise base ONAME value could change between the moments - // when we evaluate it for dst and for src. - - // dst is ONAME dereference. - if dst.Op != ODEREF && dst.Op != ODOTPTR || dst.Left.Op != ONAME { - return false - } - // src is a slice operation. - switch src.Op { - case OSLICE, OSLICE3, OSLICESTR: - // OK. - case OSLICEARR, OSLICE3ARR: - // Since arrays are embedded into containing object, - // slice of non-pointer array will introduce a new pointer into b that was not already there - // (pointer to b itself). After such assignment, if b contents escape, - // b escapes as well. If we ignore such OSLICEARR, we will conclude - // that b does not escape when b contents do. - // - // Pointer to an array is OK since it's not stored inside b directly. - // For slicing an array (not pointer to array), there is an implicit OADDR. - // We check that to determine non-pointer array slicing. - if src.Left.Op == OADDR { - return false - } - default: - return false - } - // slice is applied to ONAME dereference. - if src.Left.Op != ODEREF && src.Left.Op != ODOTPTR || src.Left.Left.Op != ONAME { - return false - } - // dst and src reference the same base ONAME. - return dst.Left == src.Left.Left -} - -// isSelfAssign reports whether assignment from src to dst can -// be ignored by the escape analysis as it's effectively a self-assignment. -func isSelfAssign(dst, src *Node) bool { - if isSliceSelfAssign(dst, src) { - return true - } - - // Detect trivial assignments that assign back to the same object. - // - // It covers these cases: - // val.x = val.y - // val.x[i] = val.y[j] - // val.x1.x2 = val.x1.y2 - // ... etc - // - // These assignments do not change assigned object lifetime. - - if dst == nil || src == nil || dst.Op != src.Op { - return false - } - - switch dst.Op { - case ODOT, ODOTPTR: - // Safe trailing accessors that are permitted to differ. - case OINDEX: - if mayAffectMemory(dst.Right) || mayAffectMemory(src.Right) { - return false - } - default: - return false - } - - // The expression prefix must be both "safe" and identical. - return samesafeexpr(dst.Left, src.Left) -} - -// mayAffectMemory reports whether evaluation of n may affect the program's -// memory state. If the expression can't affect memory state, then it can be -// safely ignored by the escape analysis. -func mayAffectMemory(n *Node) bool { - // We may want to use a list of "memory safe" ops instead of generally - // "side-effect free", which would include all calls and other ops that can - // allocate or change global state. For now, it's safer to start with the latter. - // - // We're ignoring things like division by zero, index out of range, - // and nil pointer dereference here. - switch n.Op { - case ONAME, OCLOSUREVAR, OLITERAL, ONIL: - return false - - // Left+Right group. - case OINDEX, OADD, OSUB, OOR, OXOR, OMUL, OLSH, ORSH, OAND, OANDNOT, ODIV, OMOD: - return mayAffectMemory(n.Left) || mayAffectMemory(n.Right) - - // Left group. - case ODOT, ODOTPTR, ODEREF, OCONVNOP, OCONV, OLEN, OCAP, - ONOT, OBITNOT, OPLUS, ONEG, OALIGNOF, OOFFSETOF, OSIZEOF: - return mayAffectMemory(n.Left) - - default: - return true - } -} - -// heapAllocReason returns the reason the given Node must be heap -// allocated, or the empty string if it doesn't. -func heapAllocReason(n *Node) string { - if n.Type == nil { - return "" - } - - // Parameters are always passed via the stack. - if n.Op == ONAME && (n.Class() == PPARAM || n.Class() == PPARAMOUT) { - return "" - } - - if n.Type.Width > maxStackVarSize { - return "too large for stack" - } - - if (n.Op == ONEW || n.Op == OPTRLIT) && n.Type.Elem().Width >= maxImplicitStackVarSize { - return "too large for stack" - } - - if n.Op == OCLOSURE && closureType(n).Size() >= maxImplicitStackVarSize { - return "too large for stack" - } - if n.Op == OCALLPART && partialCallType(n).Size() >= maxImplicitStackVarSize { - return "too large for stack" - } - - if n.Op == OMAKESLICE { - r := n.Right - if r == nil { - r = n.Left - } - if !smallintconst(r) { - return "non-constant size" - } - if t := n.Type; t.Elem().Width != 0 && r.Int64Val() >= maxImplicitStackVarSize/t.Elem().Width { - return "too large for stack" - } - } - - return "" -} - -// addrescapes tags node n as having had its address taken -// by "increasing" the "value" of n.Esc to EscHeap. -// Storage is allocated as necessary to allow the address -// to be taken. -func addrescapes(n *Node) { - switch n.Op { - default: - // Unexpected Op, probably due to a previous type error. Ignore. - - case ODEREF, ODOTPTR: - // Nothing to do. - - case ONAME: - if n == nodfp { - break - } - - // if this is a tmpname (PAUTO), it was tagged by tmpname as not escaping. - // on PPARAM it means something different. - if n.Class() == PAUTO && n.Esc == EscNever { - break - } - - // If a closure reference escapes, mark the outer variable as escaping. - if n.Name.IsClosureVar() { - addrescapes(n.Name.Defn) - break - } - - if n.Class() != PPARAM && n.Class() != PPARAMOUT && n.Class() != PAUTO { - break - } - - // This is a plain parameter or local variable that needs to move to the heap, - // but possibly for the function outside the one we're compiling. - // That is, if we have: - // - // func f(x int) { - // func() { - // global = &x - // } - // } - // - // then we're analyzing the inner closure but we need to move x to the - // heap in f, not in the inner closure. Flip over to f before calling moveToHeap. - oldfn := Curfn - Curfn = n.Name.Curfn - if Curfn.Op == OCLOSURE { - Curfn = Curfn.Func.Decl - panic("can't happen") - } - ln := base.Pos - base.Pos = Curfn.Pos - moveToHeap(n) - Curfn = oldfn - base.Pos = ln - - // ODOTPTR has already been introduced, - // so these are the non-pointer ODOT and OINDEX. - // In &x[0], if x is a slice, then x does not - // escape--the pointer inside x does, but that - // is always a heap pointer anyway. - case ODOT, OINDEX, OPAREN, OCONVNOP: - if !n.Left.Type.IsSlice() { - addrescapes(n.Left) - } - } -} - -// moveToHeap records the parameter or local variable n as moved to the heap. -func moveToHeap(n *Node) { - if base.Flag.LowerR != 0 { - Dump("MOVE", n) - } - if base.Flag.CompilingRuntime { - base.Errorf("%v escapes to heap, not allowed in runtime", n) - } - if n.Class() == PAUTOHEAP { - Dump("n", n) - base.Fatalf("double move to heap") - } - - // Allocate a local stack variable to hold the pointer to the heap copy. - // temp will add it to the function declaration list automatically. - heapaddr := temp(types.NewPtr(n.Type)) - heapaddr.Sym = lookup("&" + n.Sym.Name) - heapaddr.Orig.Sym = heapaddr.Sym - heapaddr.Pos = n.Pos - - // Unset AutoTemp to persist the &foo variable name through SSA to - // liveness analysis. - // TODO(mdempsky/drchase): Cleaner solution? - heapaddr.Name.SetAutoTemp(false) - - // Parameters have a local stack copy used at function start/end - // in addition to the copy in the heap that may live longer than - // the function. - if n.Class() == PPARAM || n.Class() == PPARAMOUT { - if n.Xoffset == BADWIDTH { - base.Fatalf("addrescapes before param assignment") - } - - // We rewrite n below to be a heap variable (indirection of heapaddr). - // Preserve a copy so we can still write code referring to the original, - // and substitute that copy into the function declaration list - // so that analyses of the local (on-stack) variables use it. - stackcopy := newname(n.Sym) - stackcopy.Type = n.Type - stackcopy.Xoffset = n.Xoffset - stackcopy.SetClass(n.Class()) - stackcopy.Name.Param.Heapaddr = heapaddr - if n.Class() == PPARAMOUT { - // Make sure the pointer to the heap copy is kept live throughout the function. - // The function could panic at any point, and then a defer could recover. - // Thus, we need the pointer to the heap copy always available so the - // post-deferreturn code can copy the return value back to the stack. - // See issue 16095. - heapaddr.Name.SetIsOutputParamHeapAddr(true) - } - n.Name.Param.Stackcopy = stackcopy - - // Substitute the stackcopy into the function variable list so that - // liveness and other analyses use the underlying stack slot - // and not the now-pseudo-variable n. - found := false - for i, d := range Curfn.Func.Dcl { - if d == n { - Curfn.Func.Dcl[i] = stackcopy - found = true - break - } - // Parameters are before locals, so can stop early. - // This limits the search even in functions with many local variables. - if d.Class() == PAUTO { - break - } - } - if !found { - base.Fatalf("cannot find %v in local variable list", n) - } - Curfn.Func.Dcl = append(Curfn.Func.Dcl, n) - } - - // Modify n in place so that uses of n now mean indirection of the heapaddr. - n.SetClass(PAUTOHEAP) - n.Xoffset = 0 - n.Name.Param.Heapaddr = heapaddr - n.Esc = EscHeap - if base.Flag.LowerM != 0 { - base.WarnfAt(n.Pos, "moved to heap: %v", n) - } -} - -// This special tag is applied to uintptr variables -// that we believe may hold unsafe.Pointers for -// calls into assembly functions. -const unsafeUintptrTag = "unsafe-uintptr" - -// This special tag is applied to uintptr parameters of functions -// marked go:uintptrescapes. -const uintptrEscapesTag = "uintptr-escapes" - -func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string { - name := func() string { - if f.Sym != nil { - return f.Sym.Name - } - return fmt.Sprintf("arg#%d", narg) - } - - if fn.Nbody.Len() == 0 { - // Assume that uintptr arguments must be held live across the call. - // This is most important for syscall.Syscall. - // See golang.org/issue/13372. - // This really doesn't have much to do with escape analysis per se, - // but we are reusing the ability to annotate an individual function - // argument and pass those annotations along to importing code. - if f.Type.IsUintptr() { - if base.Flag.LowerM != 0 { - base.WarnfAt(f.Pos, "assuming %v is unsafe uintptr", name()) - } - return unsafeUintptrTag - } - - if !f.Type.HasPointers() { // don't bother tagging for scalars - return "" - } - - var esc EscLeaks - - // External functions are assumed unsafe, unless - // //go:noescape is given before the declaration. - if fn.Func.Pragma&Noescape != 0 { - if base.Flag.LowerM != 0 && f.Sym != nil { - base.WarnfAt(f.Pos, "%v does not escape", name()) - } - } else { - if base.Flag.LowerM != 0 && f.Sym != nil { - base.WarnfAt(f.Pos, "leaking param: %v", name()) - } - esc.AddHeap(0) - } - - return esc.Encode() - } - - if fn.Func.Pragma&UintptrEscapes != 0 { - if f.Type.IsUintptr() { - if base.Flag.LowerM != 0 { - base.WarnfAt(f.Pos, "marking %v as escaping uintptr", name()) - } - return uintptrEscapesTag - } - if f.IsDDD() && f.Type.Elem().IsUintptr() { - // final argument is ...uintptr. - if base.Flag.LowerM != 0 { - base.WarnfAt(f.Pos, "marking %v as escaping ...uintptr", name()) - } - return uintptrEscapesTag - } - } - - if !f.Type.HasPointers() { // don't bother tagging for scalars - return "" - } - - // Unnamed parameters are unused and therefore do not escape. - if f.Sym == nil || f.Sym.IsBlank() { - var esc EscLeaks - return esc.Encode() - } - - n := asNode(f.Nname) - loc := e.oldLoc(n) - esc := loc.paramEsc - esc.Optimize() - - if base.Flag.LowerM != 0 && !loc.escapes { - if esc.Empty() { - base.WarnfAt(f.Pos, "%v does not escape", name()) - } - if x := esc.Heap(); x >= 0 { - if x == 0 { - base.WarnfAt(f.Pos, "leaking param: %v", name()) - } else { - // TODO(mdempsky): Mention level=x like below? - base.WarnfAt(f.Pos, "leaking param content: %v", name()) - } - } - for i := 0; i < numEscResults; i++ { - if x := esc.Result(i); x >= 0 { - res := fn.Type.Results().Field(i).Sym - base.WarnfAt(f.Pos, "leaking param: %v to result %v level=%d", name(), res, x) - } - } - } - - return esc.Encode() -} diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index aaf768d85ab8d..a0aa516d9a43a 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -6,6 +6,7 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/logopt" "cmd/compile/internal/types" "cmd/internal/src" @@ -85,7 +86,7 @@ import ( type Escape struct { allLocs []*EscLocation - curfn *Node + curfn *ir.Node // loopDepth counts the current loop nesting depth within // curfn. It increments within each "for" loop and at each @@ -100,8 +101,8 @@ type Escape struct { // An EscLocation represents an abstract location that stores a Go // variable. type EscLocation struct { - n *Node // represented variable or expression, if any - curfn *Node // enclosing function + n *ir.Node // represented variable or expression, if any + curfn *ir.Node // enclosing function edges []EscEdge // incoming edges loopDepth int // loopDepth at declaration @@ -142,11 +143,11 @@ type EscEdge struct { } func init() { - EscFmt = escFmt + ir.EscFmt = escFmt } // escFmt is called from node printing to print information about escape analysis results. -func escFmt(n *Node, short bool) string { +func escFmt(n *ir.Node, short bool) string { text := "" switch n.Esc { case EscUnknown: @@ -178,9 +179,9 @@ func escFmt(n *Node, short bool) string { // escapeFuncs performs escape analysis on a minimal batch of // functions. -func escapeFuncs(fns []*Node, recursive bool) { +func escapeFuncs(fns []*ir.Node, recursive bool) { for _, fn := range fns { - if fn.Op != ODCLFUNC { + if fn.Op != ir.ODCLFUNC { base.Fatalf("unexpected node: %v", fn) } } @@ -201,13 +202,13 @@ func escapeFuncs(fns []*Node, recursive bool) { e.finish(fns) } -func (e *Escape) initFunc(fn *Node) { - if fn.Op != ODCLFUNC || fn.Esc != EscFuncUnknown { +func (e *Escape) initFunc(fn *ir.Node) { + if fn.Op != ir.ODCLFUNC || fn.Esc != EscFuncUnknown { base.Fatalf("unexpected node: %v", fn) } fn.Esc = EscFuncPlanned if base.Flag.LowerM > 3 { - Dump("escAnalyze", fn) + ir.Dump("escAnalyze", fn) } e.curfn = fn @@ -215,26 +216,26 @@ func (e *Escape) initFunc(fn *Node) { // Allocate locations for local variables. for _, dcl := range fn.Func.Dcl { - if dcl.Op == ONAME { + if dcl.Op == ir.ONAME { e.newLoc(dcl, false) } } } -func (e *Escape) walkFunc(fn *Node) { +func (e *Escape) walkFunc(fn *ir.Node) { fn.Esc = EscFuncStarted // Identify labels that mark the head of an unstructured loop. - inspectList(fn.Nbody, func(n *Node) bool { + ir.InspectList(fn.Nbody, func(n *ir.Node) bool { switch n.Op { - case OLABEL: - n.Sym.Label = asTypesNode(nonlooping) + case ir.OLABEL: + n.Sym.Label = ir.AsTypesNode(nonlooping) - case OGOTO: + case ir.OGOTO: // If we visited the label before the goto, // then this is a looping label. - if n.Sym.Label == asTypesNode(nonlooping) { - n.Sym.Label = asTypesNode(looping) + if n.Sym.Label == ir.AsTypesNode(nonlooping) { + n.Sym.Label = ir.AsTypesNode(looping) } } @@ -273,7 +274,7 @@ func (e *Escape) walkFunc(fn *Node) { // } // stmt evaluates a single Go statement. -func (e *Escape) stmt(n *Node) { +func (e *Escape) stmt(n *ir.Node) { if n == nil { return } @@ -293,23 +294,23 @@ func (e *Escape) stmt(n *Node) { default: base.Fatalf("unexpected stmt: %v", n) - case ODCLCONST, ODCLTYPE, OEMPTY, OFALL, OINLMARK: + case ir.ODCLCONST, ir.ODCLTYPE, ir.OEMPTY, ir.OFALL, ir.OINLMARK: // nop - case OBREAK, OCONTINUE, OGOTO: + case ir.OBREAK, ir.OCONTINUE, ir.OGOTO: // TODO(mdempsky): Handle dead code? - case OBLOCK: + case ir.OBLOCK: e.stmts(n.List) - case ODCL: + case ir.ODCL: // Record loop depth at declaration. - if !n.Left.isBlank() { + if !ir.IsBlank(n.Left) { e.dcl(n.Left) } - case OLABEL: - switch asNode(n.Sym.Label) { + case ir.OLABEL: + switch ir.AsNode(n.Sym.Label) { case nonlooping: if base.Flag.LowerM > 2 { fmt.Printf("%v:%v non-looping label\n", base.FmtPos(base.Pos), n) @@ -324,19 +325,19 @@ func (e *Escape) stmt(n *Node) { } n.Sym.Label = nil - case OIF: + case ir.OIF: e.discard(n.Left) e.block(n.Nbody) e.block(n.Rlist) - case OFOR, OFORUNTIL: + case ir.OFOR, ir.OFORUNTIL: e.loopDepth++ e.discard(n.Left) e.stmt(n.Right) e.block(n.Nbody) e.loopDepth-- - case ORANGE: + case ir.ORANGE: // for List = range Right { Nbody } e.loopDepth++ ks := e.addrs(n.List) @@ -354,8 +355,8 @@ func (e *Escape) stmt(n *Node) { } e.expr(e.later(k), n.Right) - case OSWITCH: - typesw := n.Left != nil && n.Left.Op == OTYPESW + case ir.OSWITCH: + typesw := n.Left != nil && n.Left.Op == ir.OTYPESW var ks []EscHole for _, cas := range n.List.Slice() { // cases @@ -377,68 +378,68 @@ func (e *Escape) stmt(n *Node) { e.discard(n.Left) } - case OSELECT: + case ir.OSELECT: for _, cas := range n.List.Slice() { e.stmt(cas.Left) e.block(cas.Nbody) } - case OSELRECV: + case ir.OSELRECV: e.assign(n.Left, n.Right, "selrecv", n) - case OSELRECV2: + case ir.OSELRECV2: e.assign(n.Left, n.Right, "selrecv", n) e.assign(n.List.First(), nil, "selrecv", n) - case ORECV: + case ir.ORECV: // TODO(mdempsky): Consider e.discard(n.Left). e.exprSkipInit(e.discardHole(), n) // already visited n.Ninit - case OSEND: + case ir.OSEND: e.discard(n.Left) e.assignHeap(n.Right, "send", n) - case OAS, OASOP: + case ir.OAS, ir.OASOP: e.assign(n.Left, n.Right, "assign", n) - case OAS2: + case ir.OAS2: for i, nl := range n.List.Slice() { e.assign(nl, n.Rlist.Index(i), "assign-pair", n) } - case OAS2DOTTYPE: // v, ok = x.(type) + case ir.OAS2DOTTYPE: // v, ok = x.(type) e.assign(n.List.First(), n.Right, "assign-pair-dot-type", n) e.assign(n.List.Second(), nil, "assign-pair-dot-type", n) - case OAS2MAPR: // v, ok = m[k] + case ir.OAS2MAPR: // v, ok = m[k] e.assign(n.List.First(), n.Right, "assign-pair-mapr", n) e.assign(n.List.Second(), nil, "assign-pair-mapr", n) - case OAS2RECV: // v, ok = <-ch + case ir.OAS2RECV: // v, ok = <-ch e.assign(n.List.First(), n.Right, "assign-pair-receive", n) e.assign(n.List.Second(), nil, "assign-pair-receive", n) - case OAS2FUNC: + case ir.OAS2FUNC: e.stmts(n.Right.Ninit) e.call(e.addrs(n.List), n.Right, nil) - case ORETURN: + case ir.ORETURN: results := e.curfn.Type.Results().FieldSlice() for i, v := range n.List.Slice() { - e.assign(asNode(results[i].Nname), v, "return", n) + e.assign(ir.AsNode(results[i].Nname), v, "return", n) } - case OCALLFUNC, OCALLMETH, OCALLINTER, OCLOSE, OCOPY, ODELETE, OPANIC, OPRINT, OPRINTN, ORECOVER: + case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OCLOSE, ir.OCOPY, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN, ir.ORECOVER: e.call(nil, n, nil) - case OGO, ODEFER: + case ir.OGO, ir.ODEFER: e.stmts(n.Left.Ninit) e.call(nil, n.Left, n) - case ORETJMP: + case ir.ORETJMP: // TODO(mdempsky): What do? esc.go just ignores it. } } -func (e *Escape) stmts(l Nodes) { +func (e *Escape) stmts(l ir.Nodes) { for _, n := range l.Slice() { e.stmt(n) } } // block is like stmts, but preserves loopDepth. -func (e *Escape) block(l Nodes) { +func (e *Escape) block(l ir.Nodes) { old := e.loopDepth e.stmts(l) e.loopDepth = old @@ -446,7 +447,7 @@ func (e *Escape) block(l Nodes) { // expr models evaluating an expression n and flowing the result into // hole k. -func (e *Escape) expr(k EscHole, n *Node) { +func (e *Escape) expr(k EscHole, n *ir.Node) { if n == nil { return } @@ -454,7 +455,7 @@ func (e *Escape) expr(k EscHole, n *Node) { e.exprSkipInit(k, n) } -func (e *Escape) exprSkipInit(k EscHole, n *Node) { +func (e *Escape) exprSkipInit(k EscHole, n *ir.Node) { if n == nil { return } @@ -467,7 +468,7 @@ func (e *Escape) exprSkipInit(k EscHole, n *Node) { uintptrEscapesHack := k.uintptrEscapesHack k.uintptrEscapesHack = false - if uintptrEscapesHack && n.Op == OCONVNOP && n.Left.Type.IsUnsafePtr() { + if uintptrEscapesHack && n.Op == ir.OCONVNOP && n.Left.Type.IsUnsafePtr() { // nop } else if k.derefs >= 0 && !n.Type.HasPointers() { k = e.discardHole() @@ -477,32 +478,32 @@ func (e *Escape) exprSkipInit(k EscHole, n *Node) { default: base.Fatalf("unexpected expr: %v", n) - case OLITERAL, ONIL, OGETG, OCLOSUREVAR, OTYPE, OMETHEXPR: + case ir.OLITERAL, ir.ONIL, ir.OGETG, ir.OCLOSUREVAR, ir.OTYPE, ir.OMETHEXPR: // nop - case ONAME: - if n.Class() == PFUNC || n.Class() == PEXTERN { + case ir.ONAME: + if n.Class() == ir.PFUNC || n.Class() == ir.PEXTERN { return } e.flow(k, e.oldLoc(n)) - case OPLUS, ONEG, OBITNOT, ONOT: + case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT: e.discard(n.Left) - case OADD, OSUB, OOR, OXOR, OMUL, ODIV, OMOD, OLSH, ORSH, OAND, OANDNOT, OEQ, ONE, OLT, OLE, OGT, OGE, OANDAND, OOROR: + case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE, ir.OANDAND, ir.OOROR: e.discard(n.Left) e.discard(n.Right) - case OADDR: + case ir.OADDR: e.expr(k.addr(n, "address-of"), n.Left) // "address-of" - case ODEREF: + case ir.ODEREF: e.expr(k.deref(n, "indirection"), n.Left) // "indirection" - case ODOT, ODOTMETH, ODOTINTER: + case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER: e.expr(k.note(n, "dot"), n.Left) - case ODOTPTR: + case ir.ODOTPTR: e.expr(k.deref(n, "dot of pointer"), n.Left) // "dot of pointer" - case ODOTTYPE, ODOTTYPE2: + case ir.ODOTTYPE, ir.ODOTTYPE2: e.expr(k.dotType(n.Type, n, "dot"), n.Left) - case OINDEX: + case ir.OINDEX: if n.Left.Type.IsArray() { e.expr(k.note(n, "fixed-array-index-of"), n.Left) } else { @@ -510,17 +511,17 @@ func (e *Escape) exprSkipInit(k EscHole, n *Node) { e.expr(k.deref(n, "dot of pointer"), n.Left) } e.discard(n.Right) - case OINDEXMAP: + case ir.OINDEXMAP: e.discard(n.Left) e.discard(n.Right) - case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR: + case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR: e.expr(k.note(n, "slice"), n.Left) low, high, max := n.SliceBounds() e.discard(low) e.discard(high) e.discard(max) - case OCONV, OCONVNOP: + case ir.OCONV, ir.OCONVNOP: if checkPtr(e.curfn, 2) && n.Type.IsUnsafePtr() && n.Left.Type.IsPtr() { // When -d=checkptr=2 is enabled, treat // conversions to unsafe.Pointer as an @@ -534,35 +535,35 @@ func (e *Escape) exprSkipInit(k EscHole, n *Node) { } else { e.expr(k, n.Left) } - case OCONVIFACE: + case ir.OCONVIFACE: if !n.Left.Type.IsInterface() && !isdirectiface(n.Left.Type) { k = e.spill(k, n) } e.expr(k.note(n, "interface-converted"), n.Left) - case ORECV: + case ir.ORECV: e.discard(n.Left) - case OCALLMETH, OCALLFUNC, OCALLINTER, OLEN, OCAP, OCOMPLEX, OREAL, OIMAG, OAPPEND, OCOPY: + case ir.OCALLMETH, ir.OCALLFUNC, ir.OCALLINTER, ir.OLEN, ir.OCAP, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCOPY: e.call([]EscHole{k}, n, nil) - case ONEW: + case ir.ONEW: e.spill(k, n) - case OMAKESLICE: + case ir.OMAKESLICE: e.spill(k, n) e.discard(n.Left) e.discard(n.Right) - case OMAKECHAN: + case ir.OMAKECHAN: e.discard(n.Left) - case OMAKEMAP: + case ir.OMAKEMAP: e.spill(k, n) e.discard(n.Left) - case ORECOVER: + case ir.ORECOVER: // nop - case OCALLPART: + case ir.OCALLPART: // Flow the receiver argument to both the closure and // to the receiver parameter. @@ -580,38 +581,38 @@ func (e *Escape) exprSkipInit(k EscHole, n *Node) { for i := m.Type.NumResults(); i > 0; i-- { ks = append(ks, e.heapHole()) } - paramK := e.tagHole(ks, asNode(m.Nname), m.Type.Recv()) + paramK := e.tagHole(ks, ir.AsNode(m.Nname), m.Type.Recv()) e.expr(e.teeHole(paramK, closureK), n.Left) - case OPTRLIT: + case ir.OPTRLIT: e.expr(e.spill(k, n), n.Left) - case OARRAYLIT: + case ir.OARRAYLIT: for _, elt := range n.List.Slice() { - if elt.Op == OKEY { + if elt.Op == ir.OKEY { elt = elt.Right } e.expr(k.note(n, "array literal element"), elt) } - case OSLICELIT: + case ir.OSLICELIT: k = e.spill(k, n) k.uintptrEscapesHack = uintptrEscapesHack // for ...uintptr parameters for _, elt := range n.List.Slice() { - if elt.Op == OKEY { + if elt.Op == ir.OKEY { elt = elt.Right } e.expr(k.note(n, "slice-literal-element"), elt) } - case OSTRUCTLIT: + case ir.OSTRUCTLIT: for _, elt := range n.List.Slice() { e.expr(k.note(n, "struct literal element"), elt.Left) } - case OMAPLIT: + case ir.OMAPLIT: e.spill(k, n) // Map keys and values are always stored in the heap. @@ -620,12 +621,12 @@ func (e *Escape) exprSkipInit(k EscHole, n *Node) { e.assignHeap(elt.Right, "map literal value", n) } - case OCLOSURE: + case ir.OCLOSURE: k = e.spill(k, n) // Link addresses of captured variables to closure. for _, v := range n.Func.ClosureVars.Slice() { - if v.Op == OXXX { // unnamed out argument; see dcl.go:/^funcargs + if v.Op == ir.OXXX { // unnamed out argument; see dcl.go:/^funcargs continue } @@ -637,11 +638,11 @@ func (e *Escape) exprSkipInit(k EscHole, n *Node) { e.expr(k.note(n, "captured by a closure"), v.Name.Defn) } - case ORUNES2STR, OBYTES2STR, OSTR2RUNES, OSTR2BYTES, ORUNESTR: + case ir.ORUNES2STR, ir.OBYTES2STR, ir.OSTR2RUNES, ir.OSTR2BYTES, ir.ORUNESTR: e.spill(k, n) e.discard(n.Left) - case OADDSTR: + case ir.OADDSTR: e.spill(k, n) // Arguments of OADDSTR never escape; @@ -652,32 +653,32 @@ func (e *Escape) exprSkipInit(k EscHole, n *Node) { // unsafeValue evaluates a uintptr-typed arithmetic expression looking // for conversions from an unsafe.Pointer. -func (e *Escape) unsafeValue(k EscHole, n *Node) { - if n.Type.Etype != TUINTPTR { +func (e *Escape) unsafeValue(k EscHole, n *ir.Node) { + if n.Type.Etype != types.TUINTPTR { base.Fatalf("unexpected type %v for %v", n.Type, n) } e.stmts(n.Ninit) switch n.Op { - case OCONV, OCONVNOP: + case ir.OCONV, ir.OCONVNOP: if n.Left.Type.IsUnsafePtr() { e.expr(k, n.Left) } else { e.discard(n.Left) } - case ODOTPTR: + case ir.ODOTPTR: if isReflectHeaderDataField(n) { e.expr(k.deref(n, "reflect.Header.Data"), n.Left) } else { e.discard(n.Left) } - case OPLUS, ONEG, OBITNOT: + case ir.OPLUS, ir.ONEG, ir.OBITNOT: e.unsafeValue(k, n.Left) - case OADD, OSUB, OOR, OXOR, OMUL, ODIV, OMOD, OAND, OANDNOT: + case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OAND, ir.OANDNOT: e.unsafeValue(k, n.Left) e.unsafeValue(k, n.Right) - case OLSH, ORSH: + case ir.OLSH, ir.ORSH: e.unsafeValue(k, n.Left) // RHS need not be uintptr-typed (#32959) and can't meaningfully // flow pointers anyway. @@ -689,11 +690,11 @@ func (e *Escape) unsafeValue(k EscHole, n *Node) { // discard evaluates an expression n for side-effects, but discards // its value. -func (e *Escape) discard(n *Node) { +func (e *Escape) discard(n *ir.Node) { e.expr(e.discardHole(), n) } -func (e *Escape) discards(l Nodes) { +func (e *Escape) discards(l ir.Nodes) { for _, n := range l.Slice() { e.discard(n) } @@ -701,8 +702,8 @@ func (e *Escape) discards(l Nodes) { // addr evaluates an addressable expression n and returns an EscHole // that represents storing into the represented location. -func (e *Escape) addr(n *Node) EscHole { - if n == nil || n.isBlank() { +func (e *Escape) addr(n *ir.Node) EscHole { + if n == nil || ir.IsBlank(n) { // Can happen at least in OSELRECV. // TODO(mdempsky): Anywhere else? return e.discardHole() @@ -713,23 +714,23 @@ func (e *Escape) addr(n *Node) EscHole { switch n.Op { default: base.Fatalf("unexpected addr: %v", n) - case ONAME: - if n.Class() == PEXTERN { + case ir.ONAME: + if n.Class() == ir.PEXTERN { break } k = e.oldLoc(n).asHole() - case ODOT: + case ir.ODOT: k = e.addr(n.Left) - case OINDEX: + case ir.OINDEX: e.discard(n.Right) if n.Left.Type.IsArray() { k = e.addr(n.Left) } else { e.discard(n.Left) } - case ODEREF, ODOTPTR: + case ir.ODEREF, ir.ODOTPTR: e.discard(n) - case OINDEXMAP: + case ir.OINDEXMAP: e.discard(n.Left) e.assignHeap(n.Right, "key of map put", n) } @@ -741,7 +742,7 @@ func (e *Escape) addr(n *Node) EscHole { return k } -func (e *Escape) addrs(l Nodes) []EscHole { +func (e *Escape) addrs(l ir.Nodes) []EscHole { var ks []EscHole for _, n := range l.Slice() { ks = append(ks, e.addr(n)) @@ -750,7 +751,7 @@ func (e *Escape) addrs(l Nodes) []EscHole { } // assign evaluates the assignment dst = src. -func (e *Escape) assign(dst, src *Node, why string, where *Node) { +func (e *Escape) assign(dst, src *ir.Node, why string, where *ir.Node) { // Filter out some no-op assignments for escape analysis. ignore := dst != nil && src != nil && isSelfAssign(dst, src) if ignore && base.Flag.LowerM != 0 { @@ -758,7 +759,7 @@ func (e *Escape) assign(dst, src *Node, why string, where *Node) { } k := e.addr(dst) - if dst != nil && dst.Op == ODOTPTR && isReflectHeaderDataField(dst) { + if dst != nil && dst.Op == ir.ODOTPTR && isReflectHeaderDataField(dst) { e.unsafeValue(e.heapHole().note(where, why), src) } else { if ignore { @@ -768,22 +769,22 @@ func (e *Escape) assign(dst, src *Node, why string, where *Node) { } } -func (e *Escape) assignHeap(src *Node, why string, where *Node) { +func (e *Escape) assignHeap(src *ir.Node, why string, where *ir.Node) { e.expr(e.heapHole().note(where, why), src) } // call evaluates a call expressions, including builtin calls. ks // should contain the holes representing where the function callee's // results flows; where is the OGO/ODEFER context of the call, if any. -func (e *Escape) call(ks []EscHole, call, where *Node) { - topLevelDefer := where != nil && where.Op == ODEFER && e.loopDepth == 1 +func (e *Escape) call(ks []EscHole, call, where *ir.Node) { + topLevelDefer := where != nil && where.Op == ir.ODEFER && e.loopDepth == 1 if topLevelDefer { // force stack allocation of defer record, unless // open-coded defers are used (see ssa.go) where.Esc = EscNever } - argument := func(k EscHole, arg *Node) { + argument := func(k EscHole, arg *ir.Node) { if topLevelDefer { // Top level defers arguments don't escape to // heap, but they do need to last until end of @@ -800,21 +801,21 @@ func (e *Escape) call(ks []EscHole, call, where *Node) { default: base.Fatalf("unexpected call op: %v", call.Op) - case OCALLFUNC, OCALLMETH, OCALLINTER: + case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER: fixVariadicCall(call) // Pick out the function callee, if statically known. - var fn *Node + var fn *ir.Node switch call.Op { - case OCALLFUNC: + case ir.OCALLFUNC: switch v := staticValue(call.Left); { - case v.Op == ONAME && v.Class() == PFUNC: + case v.Op == ir.ONAME && v.Class() == ir.PFUNC: fn = v - case v.Op == OCLOSURE: + case v.Op == ir.OCLOSURE: fn = v.Func.Nname } - case OCALLMETH: - fn = call.Left.MethodName() + case ir.OCALLMETH: + fn = methodExprName(call.Left) } fntype := call.Left.Type @@ -824,7 +825,7 @@ func (e *Escape) call(ks []EscHole, call, where *Node) { if ks != nil && fn != nil && e.inMutualBatch(fn) { for i, result := range fn.Type.Results().FieldSlice() { - e.expr(ks[i], asNode(result.Nname)) + e.expr(ks[i], ir.AsNode(result.Nname)) } } @@ -840,7 +841,7 @@ func (e *Escape) call(ks []EscHole, call, where *Node) { argument(e.tagHole(ks, fn, param), args[i]) } - case OAPPEND: + case ir.OAPPEND: args := call.List.Slice() // Appendee slice may flow directly to the result, if @@ -865,7 +866,7 @@ func (e *Escape) call(ks []EscHole, call, where *Node) { } } - case OCOPY: + case ir.OCOPY: argument(e.discardHole(), call.Left) copiedK := e.discardHole() @@ -874,17 +875,17 @@ func (e *Escape) call(ks []EscHole, call, where *Node) { } argument(copiedK, call.Right) - case OPANIC: + case ir.OPANIC: argument(e.heapHole(), call.Left) - case OCOMPLEX: + case ir.OCOMPLEX: argument(e.discardHole(), call.Left) argument(e.discardHole(), call.Right) - case ODELETE, OPRINT, OPRINTN, ORECOVER: + case ir.ODELETE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER: for _, arg := range call.List.Slice() { argument(e.discardHole(), arg) } - case OLEN, OCAP, OREAL, OIMAG, OCLOSE: + case ir.OLEN, ir.OCAP, ir.OREAL, ir.OIMAG, ir.OCLOSE: argument(e.discardHole(), call.Left) } } @@ -893,14 +894,14 @@ func (e *Escape) call(ks []EscHole, call, where *Node) { // ks should contain the holes representing where the function // callee's results flows. fn is the statically-known callee function, // if any. -func (e *Escape) tagHole(ks []EscHole, fn *Node, param *types.Field) EscHole { +func (e *Escape) tagHole(ks []EscHole, fn *ir.Node, param *types.Field) EscHole { // If this is a dynamic call, we can't rely on param.Note. if fn == nil { return e.heapHole() } if e.inMutualBatch(fn) { - return e.addr(asNode(param.Nname)) + return e.addr(ir.AsNode(param.Nname)) } // Call to previously tagged function. @@ -934,7 +935,7 @@ func (e *Escape) tagHole(ks []EscHole, fn *Node, param *types.Field) EscHole { // fn has not yet been analyzed, so its parameters and results // should be incorporated directly into the flow graph instead of // relying on its escape analysis tagging. -func (e *Escape) inMutualBatch(fn *Node) bool { +func (e *Escape) inMutualBatch(fn *ir.Node) bool { if fn.Name.Defn != nil && fn.Name.Defn.Esc < EscFuncTagged { if fn.Name.Defn.Esc == EscFuncUnknown { base.Fatalf("graph inconsistency") @@ -959,11 +960,11 @@ type EscHole struct { type EscNote struct { next *EscNote - where *Node + where *ir.Node why string } -func (k EscHole) note(where *Node, why string) EscHole { +func (k EscHole) note(where *ir.Node, why string) EscHole { if where == nil || why == "" { base.Fatalf("note: missing where/why") } @@ -985,10 +986,10 @@ func (k EscHole) shift(delta int) EscHole { return k } -func (k EscHole) deref(where *Node, why string) EscHole { return k.shift(1).note(where, why) } -func (k EscHole) addr(where *Node, why string) EscHole { return k.shift(-1).note(where, why) } +func (k EscHole) deref(where *ir.Node, why string) EscHole { return k.shift(1).note(where, why) } +func (k EscHole) addr(where *ir.Node, why string) EscHole { return k.shift(-1).note(where, why) } -func (k EscHole) dotType(t *types.Type, where *Node, why string) EscHole { +func (k EscHole) dotType(t *types.Type, where *ir.Node, why string) EscHole { if !t.IsInterface() && !isdirectiface(t) { k = k.shift(1) } @@ -1025,7 +1026,7 @@ func (e *Escape) teeHole(ks ...EscHole) EscHole { return loc.asHole() } -func (e *Escape) dcl(n *Node) EscHole { +func (e *Escape) dcl(n *ir.Node) EscHole { loc := e.oldLoc(n) loc.loopDepth = e.loopDepth return loc.asHole() @@ -1034,7 +1035,7 @@ func (e *Escape) dcl(n *Node) EscHole { // spill allocates a new location associated with expression n, flows // its address to k, and returns a hole that flows values to it. It's // intended for use with most expressions that allocate storage. -func (e *Escape) spill(k EscHole, n *Node) EscHole { +func (e *Escape) spill(k EscHole, n *ir.Node) EscHole { loc := e.newLoc(n, true) e.flow(k.addr(n, "spill"), loc) return loc.asHole() @@ -1051,8 +1052,8 @@ func (e *Escape) later(k EscHole) EscHole { // canonicalNode returns the canonical *Node that n logically // represents. -func canonicalNode(n *Node) *Node { - if n != nil && n.Op == ONAME && n.Name.IsClosureVar() { +func canonicalNode(n *ir.Node) *ir.Node { + if n != nil && n.Op == ir.ONAME && n.Name.IsClosureVar() { n = n.Name.Defn if n.Name.IsClosureVar() { base.Fatalf("still closure var") @@ -1062,7 +1063,7 @@ func canonicalNode(n *Node) *Node { return n } -func (e *Escape) newLoc(n *Node, transient bool) *EscLocation { +func (e *Escape) newLoc(n *ir.Node, transient bool) *EscLocation { if e.curfn == nil { base.Fatalf("e.curfn isn't set") } @@ -1079,7 +1080,7 @@ func (e *Escape) newLoc(n *Node, transient bool) *EscLocation { } e.allLocs = append(e.allLocs, loc) if n != nil { - if n.Op == ONAME && n.Name.Curfn != e.curfn { + if n.Op == ir.ONAME && n.Name.Curfn != e.curfn { base.Fatalf("curfn mismatch: %v != %v", n.Name.Curfn, e.curfn) } @@ -1095,7 +1096,7 @@ func (e *Escape) newLoc(n *Node, transient bool) *EscLocation { return loc } -func (e *Escape) oldLoc(n *Node) *EscLocation { +func (e *Escape) oldLoc(n *ir.Node) *EscLocation { n = canonicalNode(n) return n.Opt().(*EscLocation) } @@ -1120,7 +1121,7 @@ func (e *Escape) flow(k EscHole, src *EscLocation) { } explanation := e.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{}) if logopt.Enabled() { - logopt.LogOpt(src.n.Pos, "escapes", "escape", e.curfn.funcname(), fmt.Sprintf("%v escapes to heap", src.n), explanation) + logopt.LogOpt(src.n.Pos, "escapes", "escape", ir.FuncName(e.curfn), fmt.Sprintf("%v escapes to heap", src.n), explanation) } } @@ -1214,14 +1215,14 @@ func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLoc // corresponding result parameter, then record // that value flow for tagging the function // later. - if l.isName(PPARAM) { + if l.isName(ir.PPARAM) { if (logopt.Enabled() || base.Flag.LowerM >= 2) && !l.escapes { if base.Flag.LowerM >= 2 { fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos), l.n, e.explainLoc(root), derefs) } explanation := e.explainPath(root, l) if logopt.Enabled() { - logopt.LogOpt(l.n.Pos, "leak", "escape", e.curfn.funcname(), + logopt.LogOpt(l.n.Pos, "leak", "escape", ir.FuncName(e.curfn), fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, e.explainLoc(root), derefs), explanation) } } @@ -1238,7 +1239,7 @@ func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLoc } explanation := e.explainPath(root, l) if logopt.Enabled() { - logopt.LogOpt(l.n.Pos, "escape", "escape", e.curfn.funcname(), fmt.Sprintf("%v escapes to heap", l.n), explanation) + logopt.LogOpt(l.n.Pos, "escape", "escape", ir.FuncName(e.curfn), fmt.Sprintf("%v escapes to heap", l.n), explanation) } } l.escapes = true @@ -1312,7 +1313,7 @@ func (e *Escape) explainFlow(pos string, dst, srcloc *EscLocation, derefs int, n } else if srcloc != nil && srcloc.n != nil { epos = srcloc.n.Pos } - explanation = append(explanation, logopt.NewLoggedOpt(epos, "escflow", "escape", e.curfn.funcname(), flow)) + explanation = append(explanation, logopt.NewLoggedOpt(epos, "escflow", "escape", ir.FuncName(e.curfn), flow)) } for note := notes; note != nil; note = note.next { @@ -1320,7 +1321,7 @@ func (e *Escape) explainFlow(pos string, dst, srcloc *EscLocation, derefs int, n fmt.Printf("%s: from %v (%v) at %s\n", pos, note.where, note.why, base.FmtPos(note.where.Pos)) } if logopt.Enabled() { - explanation = append(explanation, logopt.NewLoggedOpt(note.where.Pos, "escflow", "escape", e.curfn.funcname(), + explanation = append(explanation, logopt.NewLoggedOpt(note.where.Pos, "escflow", "escape", ir.FuncName(e.curfn), fmt.Sprintf(" from %v (%v)", note.where, note.why))) } } @@ -1335,7 +1336,7 @@ func (e *Escape) explainLoc(l *EscLocation) string { // TODO(mdempsky): Omit entirely. return "{temp}" } - if l.n.Op == ONAME { + if l.n.Op == ir.ONAME { return fmt.Sprintf("%v", l.n) } return fmt.Sprintf("{storage for %v}", l.n) @@ -1352,7 +1353,7 @@ func (e *Escape) outlives(l, other *EscLocation) bool { // We don't know what callers do with returned values, so // pessimistically we need to assume they flow to the heap and // outlive everything too. - if l.isName(PPARAMOUT) { + if l.isName(ir.PPARAMOUT) { // Exception: Directly called closures can return // locations allocated outside of them without forcing // them to the heap. For example: @@ -1393,8 +1394,8 @@ func (e *Escape) outlives(l, other *EscLocation) bool { } // containsClosure reports whether c is a closure contained within f. -func containsClosure(f, c *Node) bool { - if f.Op != ODCLFUNC || c.Op != ODCLFUNC { +func containsClosure(f, c *ir.Node) bool { + if f.Op != ir.ODCLFUNC || c.Op != ir.ODCLFUNC { base.Fatalf("bad containsClosure: %v, %v", f, c) } @@ -1414,7 +1415,7 @@ func containsClosure(f, c *Node) bool { func (l *EscLocation) leakTo(sink *EscLocation, derefs int) { // If sink is a result parameter and we can fit return bits // into the escape analysis tag, then record a return leak. - if sink.isName(PPARAMOUT) && sink.curfn == l.curfn { + if sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn { // TODO(mdempsky): Eliminate dependency on Vargen here. ri := int(sink.n.Name.Vargen) - 1 if ri < numEscResults { @@ -1428,7 +1429,7 @@ func (l *EscLocation) leakTo(sink *EscLocation, derefs int) { l.paramEsc.AddHeap(derefs) } -func (e *Escape) finish(fns []*Node) { +func (e *Escape) finish(fns []*ir.Node) { // Record parameter tags for package export data. for _, fn := range fns { fn.Esc = EscFuncTagged @@ -1452,18 +1453,18 @@ func (e *Escape) finish(fns []*Node) { // Update n.Esc based on escape analysis results. if loc.escapes { - if n.Op != ONAME { + if n.Op != ir.ONAME { if base.Flag.LowerM != 0 { base.WarnfAt(n.Pos, "%S escapes to heap", n) } if logopt.Enabled() { - logopt.LogOpt(n.Pos, "escape", "escape", e.curfn.funcname()) + logopt.LogOpt(n.Pos, "escape", "escape", ir.FuncName(e.curfn)) } } n.Esc = EscHeap addrescapes(n) } else { - if base.Flag.LowerM != 0 && n.Op != ONAME { + if base.Flag.LowerM != 0 && n.Op != ir.ONAME { base.WarnfAt(n.Pos, "%S does not escape", n) } n.Esc = EscNone @@ -1474,8 +1475,8 @@ func (e *Escape) finish(fns []*Node) { } } -func (l *EscLocation) isName(c Class) bool { - return l.n != nil && l.n.Op == ONAME && l.n.Class() == c +func (l *EscLocation) isName(c ir.Class) bool { + return l.n != nil && l.n.Op == ir.ONAME && l.n.Class() == c } const numEscResults = 7 @@ -1572,3 +1573,466 @@ func ParseLeaks(s string) EscLeaks { copy(l[:], s[4:]) return l } + +func escapes(all []*ir.Node) { + visitBottomUp(all, escapeFuncs) +} + +const ( + EscFuncUnknown = 0 + iota + EscFuncPlanned + EscFuncStarted + EscFuncTagged +) + +func min8(a, b int8) int8 { + if a < b { + return a + } + return b +} + +func max8(a, b int8) int8 { + if a > b { + return a + } + return b +} + +const ( + EscUnknown = iota + EscNone // Does not escape to heap, result, or parameters. + EscHeap // Reachable from the heap + EscNever // By construction will not escape. +) + +// funcSym returns fn.Func.Nname.Sym if no nils are encountered along the way. +func funcSym(fn *ir.Node) *types.Sym { + if fn == nil || fn.Func.Nname == nil { + return nil + } + return fn.Func.Nname.Sym +} + +// Mark labels that have no backjumps to them as not increasing e.loopdepth. +// Walk hasn't generated (goto|label).Left.Sym.Label yet, so we'll cheat +// and set it to one of the following two. Then in esc we'll clear it again. +var ( + looping = ir.Nod(ir.OXXX, nil, nil) + nonlooping = ir.Nod(ir.OXXX, nil, nil) +) + +func isSliceSelfAssign(dst, src *ir.Node) bool { + // Detect the following special case. + // + // func (b *Buffer) Foo() { + // n, m := ... + // b.buf = b.buf[n:m] + // } + // + // This assignment is a no-op for escape analysis, + // it does not store any new pointers into b that were not already there. + // However, without this special case b will escape, because we assign to OIND/ODOTPTR. + // Here we assume that the statement will not contain calls, + // that is, that order will move any calls to init. + // Otherwise base ONAME value could change between the moments + // when we evaluate it for dst and for src. + + // dst is ONAME dereference. + if dst.Op != ir.ODEREF && dst.Op != ir.ODOTPTR || dst.Left.Op != ir.ONAME { + return false + } + // src is a slice operation. + switch src.Op { + case ir.OSLICE, ir.OSLICE3, ir.OSLICESTR: + // OK. + case ir.OSLICEARR, ir.OSLICE3ARR: + // Since arrays are embedded into containing object, + // slice of non-pointer array will introduce a new pointer into b that was not already there + // (pointer to b itself). After such assignment, if b contents escape, + // b escapes as well. If we ignore such OSLICEARR, we will conclude + // that b does not escape when b contents do. + // + // Pointer to an array is OK since it's not stored inside b directly. + // For slicing an array (not pointer to array), there is an implicit OADDR. + // We check that to determine non-pointer array slicing. + if src.Left.Op == ir.OADDR { + return false + } + default: + return false + } + // slice is applied to ONAME dereference. + if src.Left.Op != ir.ODEREF && src.Left.Op != ir.ODOTPTR || src.Left.Left.Op != ir.ONAME { + return false + } + // dst and src reference the same base ONAME. + return dst.Left == src.Left.Left +} + +// isSelfAssign reports whether assignment from src to dst can +// be ignored by the escape analysis as it's effectively a self-assignment. +func isSelfAssign(dst, src *ir.Node) bool { + if isSliceSelfAssign(dst, src) { + return true + } + + // Detect trivial assignments that assign back to the same object. + // + // It covers these cases: + // val.x = val.y + // val.x[i] = val.y[j] + // val.x1.x2 = val.x1.y2 + // ... etc + // + // These assignments do not change assigned object lifetime. + + if dst == nil || src == nil || dst.Op != src.Op { + return false + } + + switch dst.Op { + case ir.ODOT, ir.ODOTPTR: + // Safe trailing accessors that are permitted to differ. + case ir.OINDEX: + if mayAffectMemory(dst.Right) || mayAffectMemory(src.Right) { + return false + } + default: + return false + } + + // The expression prefix must be both "safe" and identical. + return samesafeexpr(dst.Left, src.Left) +} + +// mayAffectMemory reports whether evaluation of n may affect the program's +// memory state. If the expression can't affect memory state, then it can be +// safely ignored by the escape analysis. +func mayAffectMemory(n *ir.Node) bool { + // We may want to use a list of "memory safe" ops instead of generally + // "side-effect free", which would include all calls and other ops that can + // allocate or change global state. For now, it's safer to start with the latter. + // + // We're ignoring things like division by zero, index out of range, + // and nil pointer dereference here. + switch n.Op { + case ir.ONAME, ir.OCLOSUREVAR, ir.OLITERAL, ir.ONIL: + return false + + // Left+Right group. + case ir.OINDEX, ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD: + return mayAffectMemory(n.Left) || mayAffectMemory(n.Right) + + // Left group. + case ir.ODOT, ir.ODOTPTR, ir.ODEREF, ir.OCONVNOP, ir.OCONV, ir.OLEN, ir.OCAP, + ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF: + return mayAffectMemory(n.Left) + + default: + return true + } +} + +// heapAllocReason returns the reason the given Node must be heap +// allocated, or the empty string if it doesn't. +func heapAllocReason(n *ir.Node) string { + if n.Type == nil { + return "" + } + + // Parameters are always passed via the stack. + if n.Op == ir.ONAME && (n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) { + return "" + } + + if n.Type.Width > maxStackVarSize { + return "too large for stack" + } + + if (n.Op == ir.ONEW || n.Op == ir.OPTRLIT) && n.Type.Elem().Width >= maxImplicitStackVarSize { + return "too large for stack" + } + + if n.Op == ir.OCLOSURE && closureType(n).Size() >= maxImplicitStackVarSize { + return "too large for stack" + } + if n.Op == ir.OCALLPART && partialCallType(n).Size() >= maxImplicitStackVarSize { + return "too large for stack" + } + + if n.Op == ir.OMAKESLICE { + r := n.Right + if r == nil { + r = n.Left + } + if !smallintconst(r) { + return "non-constant size" + } + if t := n.Type; t.Elem().Width != 0 && r.Int64Val() >= maxImplicitStackVarSize/t.Elem().Width { + return "too large for stack" + } + } + + return "" +} + +// addrescapes tags node n as having had its address taken +// by "increasing" the "value" of n.Esc to EscHeap. +// Storage is allocated as necessary to allow the address +// to be taken. +func addrescapes(n *ir.Node) { + switch n.Op { + default: + // Unexpected Op, probably due to a previous type error. Ignore. + + case ir.ODEREF, ir.ODOTPTR: + // Nothing to do. + + case ir.ONAME: + if n == nodfp { + break + } + + // if this is a tmpname (PAUTO), it was tagged by tmpname as not escaping. + // on PPARAM it means something different. + if n.Class() == ir.PAUTO && n.Esc == EscNever { + break + } + + // If a closure reference escapes, mark the outer variable as escaping. + if n.Name.IsClosureVar() { + addrescapes(n.Name.Defn) + break + } + + if n.Class() != ir.PPARAM && n.Class() != ir.PPARAMOUT && n.Class() != ir.PAUTO { + break + } + + // This is a plain parameter or local variable that needs to move to the heap, + // but possibly for the function outside the one we're compiling. + // That is, if we have: + // + // func f(x int) { + // func() { + // global = &x + // } + // } + // + // then we're analyzing the inner closure but we need to move x to the + // heap in f, not in the inner closure. Flip over to f before calling moveToHeap. + oldfn := Curfn + Curfn = n.Name.Curfn + if Curfn.Op == ir.OCLOSURE { + Curfn = Curfn.Func.Decl + panic("can't happen") + } + ln := base.Pos + base.Pos = Curfn.Pos + moveToHeap(n) + Curfn = oldfn + base.Pos = ln + + // ODOTPTR has already been introduced, + // so these are the non-pointer ODOT and OINDEX. + // In &x[0], if x is a slice, then x does not + // escape--the pointer inside x does, but that + // is always a heap pointer anyway. + case ir.ODOT, ir.OINDEX, ir.OPAREN, ir.OCONVNOP: + if !n.Left.Type.IsSlice() { + addrescapes(n.Left) + } + } +} + +// moveToHeap records the parameter or local variable n as moved to the heap. +func moveToHeap(n *ir.Node) { + if base.Flag.LowerR != 0 { + ir.Dump("MOVE", n) + } + if base.Flag.CompilingRuntime { + base.Errorf("%v escapes to heap, not allowed in runtime", n) + } + if n.Class() == ir.PAUTOHEAP { + ir.Dump("n", n) + base.Fatalf("double move to heap") + } + + // Allocate a local stack variable to hold the pointer to the heap copy. + // temp will add it to the function declaration list automatically. + heapaddr := temp(types.NewPtr(n.Type)) + heapaddr.Sym = lookup("&" + n.Sym.Name) + heapaddr.Orig.Sym = heapaddr.Sym + heapaddr.Pos = n.Pos + + // Unset AutoTemp to persist the &foo variable name through SSA to + // liveness analysis. + // TODO(mdempsky/drchase): Cleaner solution? + heapaddr.Name.SetAutoTemp(false) + + // Parameters have a local stack copy used at function start/end + // in addition to the copy in the heap that may live longer than + // the function. + if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT { + if n.Xoffset == types.BADWIDTH { + base.Fatalf("addrescapes before param assignment") + } + + // We rewrite n below to be a heap variable (indirection of heapaddr). + // Preserve a copy so we can still write code referring to the original, + // and substitute that copy into the function declaration list + // so that analyses of the local (on-stack) variables use it. + stackcopy := NewName(n.Sym) + stackcopy.Type = n.Type + stackcopy.Xoffset = n.Xoffset + stackcopy.SetClass(n.Class()) + stackcopy.Name.Param.Heapaddr = heapaddr + if n.Class() == ir.PPARAMOUT { + // Make sure the pointer to the heap copy is kept live throughout the function. + // The function could panic at any point, and then a defer could recover. + // Thus, we need the pointer to the heap copy always available so the + // post-deferreturn code can copy the return value back to the stack. + // See issue 16095. + heapaddr.Name.SetIsOutputParamHeapAddr(true) + } + n.Name.Param.Stackcopy = stackcopy + + // Substitute the stackcopy into the function variable list so that + // liveness and other analyses use the underlying stack slot + // and not the now-pseudo-variable n. + found := false + for i, d := range Curfn.Func.Dcl { + if d == n { + Curfn.Func.Dcl[i] = stackcopy + found = true + break + } + // Parameters are before locals, so can stop early. + // This limits the search even in functions with many local variables. + if d.Class() == ir.PAUTO { + break + } + } + if !found { + base.Fatalf("cannot find %v in local variable list", n) + } + Curfn.Func.Dcl = append(Curfn.Func.Dcl, n) + } + + // Modify n in place so that uses of n now mean indirection of the heapaddr. + n.SetClass(ir.PAUTOHEAP) + n.Xoffset = 0 + n.Name.Param.Heapaddr = heapaddr + n.Esc = EscHeap + if base.Flag.LowerM != 0 { + base.WarnfAt(n.Pos, "moved to heap: %v", n) + } +} + +// This special tag is applied to uintptr variables +// that we believe may hold unsafe.Pointers for +// calls into assembly functions. +const unsafeUintptrTag = "unsafe-uintptr" + +// This special tag is applied to uintptr parameters of functions +// marked go:uintptrescapes. +const uintptrEscapesTag = "uintptr-escapes" + +func (e *Escape) paramTag(fn *ir.Node, narg int, f *types.Field) string { + name := func() string { + if f.Sym != nil { + return f.Sym.Name + } + return fmt.Sprintf("arg#%d", narg) + } + + if fn.Nbody.Len() == 0 { + // Assume that uintptr arguments must be held live across the call. + // This is most important for syscall.Syscall. + // See golang.org/issue/13372. + // This really doesn't have much to do with escape analysis per se, + // but we are reusing the ability to annotate an individual function + // argument and pass those annotations along to importing code. + if f.Type.IsUintptr() { + if base.Flag.LowerM != 0 { + base.WarnfAt(f.Pos, "assuming %v is unsafe uintptr", name()) + } + return unsafeUintptrTag + } + + if !f.Type.HasPointers() { // don't bother tagging for scalars + return "" + } + + var esc EscLeaks + + // External functions are assumed unsafe, unless + // //go:noescape is given before the declaration. + if fn.Func.Pragma&ir.Noescape != 0 { + if base.Flag.LowerM != 0 && f.Sym != nil { + base.WarnfAt(f.Pos, "%v does not escape", name()) + } + } else { + if base.Flag.LowerM != 0 && f.Sym != nil { + base.WarnfAt(f.Pos, "leaking param: %v", name()) + } + esc.AddHeap(0) + } + + return esc.Encode() + } + + if fn.Func.Pragma&ir.UintptrEscapes != 0 { + if f.Type.IsUintptr() { + if base.Flag.LowerM != 0 { + base.WarnfAt(f.Pos, "marking %v as escaping uintptr", name()) + } + return uintptrEscapesTag + } + if f.IsDDD() && f.Type.Elem().IsUintptr() { + // final argument is ...uintptr. + if base.Flag.LowerM != 0 { + base.WarnfAt(f.Pos, "marking %v as escaping ...uintptr", name()) + } + return uintptrEscapesTag + } + } + + if !f.Type.HasPointers() { // don't bother tagging for scalars + return "" + } + + // Unnamed parameters are unused and therefore do not escape. + if f.Sym == nil || f.Sym.IsBlank() { + var esc EscLeaks + return esc.Encode() + } + + n := ir.AsNode(f.Nname) + loc := e.oldLoc(n) + esc := loc.paramEsc + esc.Optimize() + + if base.Flag.LowerM != 0 && !loc.escapes { + if esc.Empty() { + base.WarnfAt(f.Pos, "%v does not escape", name()) + } + if x := esc.Heap(); x >= 0 { + if x == 0 { + base.WarnfAt(f.Pos, "leaking param: %v", name()) + } else { + // TODO(mdempsky): Mention level=x like below? + base.WarnfAt(f.Pos, "leaking param content: %v", name()) + } + } + for i := 0; i < numEscResults; i++ { + if x := esc.Result(i); x >= 0 { + res := fn.Type.Results().Field(i).Sym + base.WarnfAt(f.Pos, "leaking param: %v to result %v level=%d", name(), res, x) + } + } + } + + return esc.Encode() +} diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index 1fa64fbe4497d..36bbb75050749 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -6,6 +6,7 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/bio" "cmd/internal/src" @@ -20,10 +21,10 @@ func exportf(bout *bio.Writer, format string, args ...interface{}) { } } -var asmlist []*Node +var asmlist []*ir.Node // exportsym marks n for export (or reexport). -func exportsym(n *Node) { +func exportsym(n *ir.Node) { if n.Sym.OnExportList() { return } @@ -40,14 +41,14 @@ func initname(s string) bool { return s == "init" } -func autoexport(n *Node, ctxt Class) { - if n.Sym.Pkg != localpkg { +func autoexport(n *ir.Node, ctxt ir.Class) { + if n.Sym.Pkg != ir.LocalPkg { return } - if (ctxt != PEXTERN && ctxt != PFUNC) || dclcontext != PEXTERN { + if (ctxt != ir.PEXTERN && ctxt != ir.PFUNC) || dclcontext != ir.PEXTERN { return } - if n.Type != nil && n.Type.IsKind(TFUNC) && n.IsMethod() { + if n.Type != nil && n.Type.IsKind(types.TFUNC) && ir.IsMethod(n) { return } @@ -73,8 +74,8 @@ func dumpexport(bout *bio.Writer) { } } -func importsym(ipkg *types.Pkg, s *types.Sym, op Op) *Node { - n := asNode(s.PkgDef()) +func importsym(ipkg *types.Pkg, s *types.Sym, op ir.Op) *ir.Node { + n := ir.AsNode(s.PkgDef()) if n == nil { // iimport should have created a stub ONONAME // declaration for all imported symbols. The exception @@ -85,10 +86,10 @@ func importsym(ipkg *types.Pkg, s *types.Sym, op Op) *Node { } n = dclname(s) - s.SetPkgDef(asTypesNode(n)) + s.SetPkgDef(ir.AsTypesNode(n)) s.Importdef = ipkg } - if n.Op != ONONAME && n.Op != op { + if n.Op != ir.ONONAME && n.Op != op { redeclare(base.Pos, s, fmt.Sprintf("during import %q", ipkg.Path)) } return n @@ -98,16 +99,16 @@ func importsym(ipkg *types.Pkg, s *types.Sym, op Op) *Node { // If no such type has been declared yet, a forward declaration is returned. // ipkg is the package being imported func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *types.Type { - n := importsym(ipkg, s, OTYPE) - if n.Op != OTYPE { - t := types.New(TFORW) + n := importsym(ipkg, s, ir.OTYPE) + if n.Op != ir.OTYPE { + t := types.New(types.TFORW) t.Sym = s - t.Nod = asTypesNode(n) + t.Nod = ir.AsTypesNode(n) - n.Op = OTYPE + n.Op = ir.OTYPE n.Pos = pos n.Type = t - n.SetClass(PEXTERN) + n.SetClass(ir.PEXTERN) } t := n.Type @@ -119,9 +120,9 @@ func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *types.Type { // importobj declares symbol s as an imported object representable by op. // ipkg is the package being imported -func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op Op, ctxt Class, t *types.Type) *Node { +func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class, t *types.Type) *ir.Node { n := importsym(ipkg, s, op) - if n.Op != ONONAME { + if n.Op != ir.ONONAME { if n.Op == op && (n.Class() != ctxt || !types.Identical(n.Type, t)) { redeclare(base.Pos, s, fmt.Sprintf("during import %q", ipkg.Path)) } @@ -131,7 +132,7 @@ func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op Op, ctxt Class, t n.Op = op n.Pos = pos n.SetClass(ctxt) - if ctxt == PFUNC { + if ctxt == ir.PFUNC { n.Sym.SetFunc(true) } n.Type = t @@ -141,7 +142,7 @@ func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op Op, ctxt Class, t // importconst declares symbol s as an imported constant with type t and value val. // ipkg is the package being imported func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val constant.Value) { - n := importobj(ipkg, pos, s, OLITERAL, PEXTERN, t) + n := importobj(ipkg, pos, s, ir.OLITERAL, ir.PEXTERN, t) if n == nil { // TODO: Check that value matches. return } @@ -156,12 +157,12 @@ func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val // importfunc declares symbol s as an imported function with type t. // ipkg is the package being imported func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) { - n := importobj(ipkg, pos, s, ONAME, PFUNC, t) + n := importobj(ipkg, pos, s, ir.ONAME, ir.PFUNC, t) if n == nil { return } - n.Func = new(Func) + n.Func = new(ir.Func) if base.Flag.E != 0 { fmt.Printf("import func %v%S\n", s, t) @@ -171,7 +172,7 @@ func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) { // importvar declares symbol s as an imported variable with type t. // ipkg is the package being imported func importvar(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) { - n := importobj(ipkg, pos, s, ONAME, PEXTERN, t) + n := importobj(ipkg, pos, s, ir.ONAME, ir.PEXTERN, t) if n == nil { return } @@ -184,7 +185,7 @@ func importvar(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) { // importalias declares symbol s as an imported type alias with type t. // ipkg is the package being imported func importalias(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) { - n := importobj(ipkg, pos, s, OTYPE, PEXTERN, t) + n := importobj(ipkg, pos, s, ir.OTYPE, ir.PEXTERN, t) if n == nil { return } @@ -199,20 +200,20 @@ func dumpasmhdr() { if err != nil { base.Fatalf("%v", err) } - fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", localpkg.Name) + fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", ir.LocalPkg.Name) for _, n := range asmlist { if n.Sym.IsBlank() { continue } switch n.Op { - case OLITERAL: + case ir.OLITERAL: t := n.Val().Kind() if t == constant.Float || t == constant.Complex { break } fmt.Fprintf(b, "#define const_%s %#v\n", n.Sym.Name, n.Val()) - case OTYPE: + case ir.OTYPE: t := n.Type if !t.IsStruct() || t.StructType().Map != nil || t.IsFuncArgStruct() { break diff --git a/src/cmd/compile/internal/gc/gen.go b/src/cmd/compile/internal/gc/gen.go index a70bddca81340..0f5294b17d185 100644 --- a/src/cmd/compile/internal/gc/gen.go +++ b/src/cmd/compile/internal/gc/gen.go @@ -6,6 +6,7 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/src" @@ -29,14 +30,14 @@ func sysvar(name string) *obj.LSym { // isParamStackCopy reports whether this is the on-stack copy of a // function parameter that moved to the heap. -func (n *Node) isParamStackCopy() bool { - return n.Op == ONAME && (n.Class() == PPARAM || n.Class() == PPARAMOUT) && n.Name.Param.Heapaddr != nil +func isParamStackCopy(n *ir.Node) bool { + return n.Op == ir.ONAME && (n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Name.Param.Heapaddr != nil } // isParamHeapCopy reports whether this is the on-heap copy of // a function parameter that moved to the heap. -func (n *Node) isParamHeapCopy() bool { - return n.Op == ONAME && n.Class() == PAUTOHEAP && n.Name.Param.Stackcopy != nil +func isParamHeapCopy(n *ir.Node) bool { + return n.Op == ir.ONAME && n.Class() == ir.PAUTOHEAP && n.Name.Param.Stackcopy != nil } // autotmpname returns the name for an autotmp variable numbered n. @@ -51,12 +52,12 @@ func autotmpname(n int) string { } // make a new Node off the books -func tempAt(pos src.XPos, curfn *Node, t *types.Type) *Node { +func tempAt(pos src.XPos, curfn *ir.Node, t *types.Type) *ir.Node { if curfn == nil { base.Fatalf("no curfn for tempAt") } - if curfn.Op == OCLOSURE { - Dump("tempAt", curfn) + if curfn.Op == ir.OCLOSURE { + ir.Dump("tempAt", curfn) base.Fatalf("adding tempAt to wrong closure function") } if t == nil { @@ -65,12 +66,12 @@ func tempAt(pos src.XPos, curfn *Node, t *types.Type) *Node { s := &types.Sym{ Name: autotmpname(len(curfn.Func.Dcl)), - Pkg: localpkg, + Pkg: ir.LocalPkg, } - n := newnamel(pos, s) - s.Def = asTypesNode(n) + n := ir.NewNameAt(pos, s) + s.Def = ir.AsTypesNode(n) n.Type = t - n.SetClass(PAUTO) + n.SetClass(ir.PAUTO) n.Esc = EscNever n.Name.Curfn = curfn n.Name.SetUsed(true) @@ -82,6 +83,6 @@ func tempAt(pos src.XPos, curfn *Node, t *types.Type) *Node { return n.Orig } -func temp(t *types.Type) *Node { +func temp(t *types.Type) *ir.Node { return tempAt(base.Pos, Curfn, t) } diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index d9b8f704a9362..8642cc4a30566 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -6,6 +6,7 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/ssa" "cmd/compile/internal/types" "cmd/internal/obj" @@ -13,10 +14,6 @@ import ( "sync" ) -const ( - BADWIDTH = types.BADWIDTH -) - var ( // maximum size variable which we will allocate on the stack. // This limit is for explicit variable declarations like "var x T" or "x := ...". @@ -40,7 +37,7 @@ var ( // isRuntimePkg reports whether p is package runtime. func isRuntimePkg(p *types.Pkg) bool { - if base.Flag.CompilingRuntime && p == localpkg { + if base.Flag.CompilingRuntime && p == ir.LocalPkg { return true } return p.Path == "runtime" @@ -48,31 +45,12 @@ func isRuntimePkg(p *types.Pkg) bool { // isReflectPkg reports whether p is package reflect. func isReflectPkg(p *types.Pkg) bool { - if p == localpkg { + if p == ir.LocalPkg { return base.Ctxt.Pkgpath == "reflect" } return p.Path == "reflect" } -// The Class of a variable/function describes the "storage class" -// of a variable or function. During parsing, storage classes are -// called declaration contexts. -type Class uint8 - -//go:generate stringer -type=Class -const ( - Pxxx Class = iota // no class; used during ssa conversion to indicate pseudo-variables - PEXTERN // global variables - PAUTO // local variables - PAUTOHEAP // local variables or parameters moved to heap - PPARAM // input arguments - PPARAMOUT // output results - PFUNC // global functions - - // Careful: Class is stored in three bits in Node.flags. - _ = uint((1 << 3) - iota) // static assert for iota <= (1 << 3) -) - // Slices in the runtime are represented by three components: // // type slice struct { @@ -102,8 +80,6 @@ var pragcgobuf [][]string var decldepth int32 -var localpkg *types.Pkg // package being compiled - var inimport bool // set during import var itabpkg *types.Pkg // fake pkg for itab entries @@ -126,55 +102,51 @@ var gopkg *types.Pkg // pseudo-package for method symbols on anonymous receiver var zerosize int64 -var simtype [NTYPE]types.EType +var simtype [types.NTYPE]types.EType var ( - isInt [NTYPE]bool - isFloat [NTYPE]bool - isComplex [NTYPE]bool - issimple [NTYPE]bool + isInt [types.NTYPE]bool + isFloat [types.NTYPE]bool + isComplex [types.NTYPE]bool + issimple [types.NTYPE]bool ) var ( - okforeq [NTYPE]bool - okforadd [NTYPE]bool - okforand [NTYPE]bool - okfornone [NTYPE]bool - okforcmp [NTYPE]bool - okforbool [NTYPE]bool - okforcap [NTYPE]bool - okforlen [NTYPE]bool - okforarith [NTYPE]bool + okforeq [types.NTYPE]bool + okforadd [types.NTYPE]bool + okforand [types.NTYPE]bool + okfornone [types.NTYPE]bool + okforcmp [types.NTYPE]bool + okforbool [types.NTYPE]bool + okforcap [types.NTYPE]bool + okforlen [types.NTYPE]bool + okforarith [types.NTYPE]bool ) -var okforconst [NTYPE]bool - var ( - okfor [OEND][]bool - iscmp [OEND]bool + okfor [ir.OEND][]bool + iscmp [ir.OEND]bool ) -var xtop []*Node +var xtop []*ir.Node -var exportlist []*Node +var exportlist []*ir.Node -var importlist []*Node // imported functions and methods with inlinable bodies +var importlist []*ir.Node // imported functions and methods with inlinable bodies var ( funcsymsmu sync.Mutex // protects funcsyms and associated package lookups (see func funcsym) funcsyms []*types.Sym ) -var dclcontext Class // PEXTERN/PAUTO +var dclcontext ir.Class // PEXTERN/PAUTO -var Curfn *Node +var Curfn *ir.Node var Widthptr int var Widthreg int -var nblank *Node - var typecheckok bool // Whether we are adding any sort of code instrumentation, such as @@ -184,7 +156,7 @@ var instrumenting bool // Whether we are tracking lexical scopes for DWARF. var trackScopes bool -var nodfp *Node +var nodfp *ir.Node var autogeneratedPos src.XPos @@ -221,7 +193,7 @@ var thearch Arch var ( staticuint64s, - zerobase *Node + zerobase *ir.Node assertE2I, assertE2I2, diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go index 92a3611cb7851..cf1c85ce29265 100644 --- a/src/cmd/compile/internal/gc/gsubr.go +++ b/src/cmd/compile/internal/gc/gsubr.go @@ -32,6 +32,7 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/ssa" "cmd/internal/obj" "cmd/internal/objabi" @@ -46,7 +47,7 @@ type Progs struct { next *obj.Prog // next Prog pc int64 // virtual PC; count of Progs pos src.XPos // position to use for new Progs - curfn *Node // fn these Progs are for + curfn *ir.Node // fn these Progs are for progcache []obj.Prog // local progcache cacheidx int // first free element of progcache @@ -56,7 +57,7 @@ type Progs struct { // newProgs returns a new Progs for fn. // worker indicates which of the backend workers will use the Progs. -func newProgs(fn *Node, worker int) *Progs { +func newProgs(fn *ir.Node, worker int) *Progs { pp := new(Progs) if base.Ctxt.CanReuseProgs() { sz := len(sharedProgArray) / base.Flag.LowerC @@ -173,17 +174,17 @@ func (pp *Progs) Appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16 return q } -func (pp *Progs) settext(fn *Node) { +func (pp *Progs) settext(fn *ir.Node) { if pp.Text != nil { base.Fatalf("Progs.settext called twice") } ptxt := pp.Prog(obj.ATEXT) pp.Text = ptxt - fn.Func.lsym.Func().Text = ptxt + fn.Func.LSym.Func().Text = ptxt ptxt.From.Type = obj.TYPE_MEM ptxt.From.Name = obj.NAME_EXTERN - ptxt.From.Sym = fn.Func.lsym + ptxt.From.Sym = fn.Func.LSym } // initLSym defines f's obj.LSym and initializes it based on the @@ -192,36 +193,36 @@ func (pp *Progs) settext(fn *Node) { // // initLSym must be called exactly once per function and must be // called for both functions with bodies and functions without bodies. -func (f *Func) initLSym(hasBody bool) { - if f.lsym != nil { +func initLSym(f *ir.Func, hasBody bool) { + if f.LSym != nil { base.Fatalf("Func.initLSym called twice") } - if nam := f.Nname; !nam.isBlank() { - f.lsym = nam.Sym.Linksym() - if f.Pragma&Systemstack != 0 { - f.lsym.Set(obj.AttrCFunc, true) + if nam := f.Nname; !ir.IsBlank(nam) { + f.LSym = nam.Sym.Linksym() + if f.Pragma&ir.Systemstack != 0 { + f.LSym.Set(obj.AttrCFunc, true) } var aliasABI obj.ABI needABIAlias := false - defABI, hasDefABI := symabiDefs[f.lsym.Name] + defABI, hasDefABI := symabiDefs[f.LSym.Name] if hasDefABI && defABI == obj.ABI0 { // Symbol is defined as ABI0. Create an // Internal -> ABI0 wrapper. - f.lsym.SetABI(obj.ABI0) + f.LSym.SetABI(obj.ABI0) needABIAlias, aliasABI = true, obj.ABIInternal } else { // No ABI override. Check that the symbol is // using the expected ABI. want := obj.ABIInternal - if f.lsym.ABI() != want { - base.Fatalf("function symbol %s has the wrong ABI %v, expected %v", f.lsym.Name, f.lsym.ABI(), want) + if f.LSym.ABI() != want { + base.Fatalf("function symbol %s has the wrong ABI %v, expected %v", f.LSym.Name, f.LSym.ABI(), want) } } isLinknameExported := nam.Sym.Linkname != "" && (hasBody || hasDefABI) - if abi, ok := symabiRefs[f.lsym.Name]; (ok && abi == obj.ABI0) || isLinknameExported { + if abi, ok := symabiRefs[f.LSym.Name]; (ok && abi == obj.ABI0) || isLinknameExported { // Either 1) this symbol is definitely // referenced as ABI0 from this package; or 2) // this symbol is defined in this package but @@ -233,7 +234,7 @@ func (f *Func) initLSym(hasBody bool) { // since other packages may "pull" symbols // using linkname and we don't want to create // duplicate ABI wrappers. - if f.lsym.ABI() != obj.ABI0 { + if f.LSym.ABI() != obj.ABI0 { needABIAlias, aliasABI = true, obj.ABI0 } } @@ -244,9 +245,9 @@ func (f *Func) initLSym(hasBody bool) { // rather than looking them up. The uniqueness // of f.lsym ensures uniqueness of asym. asym := &obj.LSym{ - Name: f.lsym.Name, + Name: f.LSym.Name, Type: objabi.SABIALIAS, - R: []obj.Reloc{{Sym: f.lsym}}, // 0 size, so "informational" + R: []obj.Reloc{{Sym: f.LSym}}, // 0 size, so "informational" } asym.SetABI(aliasABI) asym.Set(obj.AttrDuplicateOK, true) @@ -269,7 +270,7 @@ func (f *Func) initLSym(hasBody bool) { if f.Needctxt() { flag |= obj.NEEDCTXT } - if f.Pragma&Nosplit != 0 { + if f.Pragma&ir.Nosplit != 0 { flag |= obj.NOSPLIT } if f.ReflectMethod() { @@ -286,10 +287,10 @@ func (f *Func) initLSym(hasBody bool) { } } - base.Ctxt.InitTextSym(f.lsym, flag) + base.Ctxt.InitTextSym(f.LSym, flag) } -func ggloblnod(nam *Node) { +func ggloblnod(nam *ir.Node) { s := nam.Sym.Linksym() s.Gotype = ngotype(nam).Linksym() flags := 0 diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index 246a057ade132..212db2184ed0c 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -205,6 +205,7 @@ import ( "bufio" "bytes" "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/goobj" "cmd/internal/src" @@ -258,8 +259,8 @@ func iexport(out *bufio.Writer) { p := iexporter{ allPkgs: map[*types.Pkg]bool{}, stringIndex: map[string]uint64{}, - declIndex: map[*Node]uint64{}, - inlineIndex: map[*Node]uint64{}, + declIndex: map[*ir.Node]uint64{}, + inlineIndex: map[*ir.Node]uint64{}, typIndex: map[*types.Type]uint64{}, } @@ -278,8 +279,8 @@ func iexport(out *bufio.Writer) { // Loop until no more work. We use a queue because while // writing out inline bodies, we may discover additional // declarations that are needed. - for !p.declTodo.empty() { - p.doDecl(p.declTodo.popLeft()) + for !p.declTodo.Empty() { + p.doDecl(p.declTodo.PopLeft()) } // Append indices to data0 section. @@ -313,15 +314,15 @@ func iexport(out *bufio.Writer) { // we're writing out the main index, which is also read by // non-compiler tools and includes a complete package description // (i.e., name and height). -func (w *exportWriter) writeIndex(index map[*Node]uint64, mainIndex bool) { +func (w *exportWriter) writeIndex(index map[*ir.Node]uint64, mainIndex bool) { // Build a map from packages to objects from that package. - pkgObjs := map[*types.Pkg][]*Node{} + pkgObjs := map[*types.Pkg][]*ir.Node{} // For the main index, make sure to include every package that // we reference, even if we're not exporting (or reexporting) // any symbols from it. if mainIndex { - pkgObjs[localpkg] = nil + pkgObjs[ir.LocalPkg] = nil for pkg := range w.p.allPkgs { pkgObjs[pkg] = nil } @@ -367,14 +368,14 @@ type iexporter struct { // main index. allPkgs map[*types.Pkg]bool - declTodo nodeQueue + declTodo ir.NodeQueue strings intWriter stringIndex map[string]uint64 data0 intWriter - declIndex map[*Node]uint64 - inlineIndex map[*Node]uint64 + declIndex map[*ir.Node]uint64 + inlineIndex map[*ir.Node]uint64 typIndex map[*types.Type]uint64 } @@ -393,13 +394,13 @@ func (p *iexporter) stringOff(s string) uint64 { } // pushDecl adds n to the declaration work queue, if not already present. -func (p *iexporter) pushDecl(n *Node) { - if n.Sym == nil || asNode(n.Sym.Def) != n && n.Op != OTYPE { +func (p *iexporter) pushDecl(n *ir.Node) { + if n.Sym == nil || ir.AsNode(n.Sym.Def) != n && n.Op != ir.OTYPE { base.Fatalf("weird Sym: %v, %v", n, n.Sym) } // Don't export predeclared declarations. - if n.Sym.Pkg == builtinpkg || n.Sym.Pkg == unsafepkg { + if n.Sym.Pkg == ir.BuiltinPkg || n.Sym.Pkg == unsafepkg { return } @@ -408,7 +409,7 @@ func (p *iexporter) pushDecl(n *Node) { } p.declIndex[n] = ^uint64(0) // mark n present in work queue - p.declTodo.pushRight(n) + p.declTodo.PushRight(n) } // exportWriter handles writing out individual data section chunks. @@ -422,22 +423,22 @@ type exportWriter struct { prevColumn int64 } -func (p *iexporter) doDecl(n *Node) { +func (p *iexporter) doDecl(n *ir.Node) { w := p.newWriter() w.setPkg(n.Sym.Pkg, false) switch n.Op { - case ONAME: + case ir.ONAME: switch n.Class() { - case PEXTERN: + case ir.PEXTERN: // Variable. w.tag('V') w.pos(n.Pos) w.typ(n.Type) w.varExt(n) - case PFUNC: - if n.IsMethod() { + case ir.PFUNC: + if ir.IsMethod(n) { base.Fatalf("unexpected method: %v", n) } @@ -451,14 +452,14 @@ func (p *iexporter) doDecl(n *Node) { base.Fatalf("unexpected class: %v, %v", n, n.Class()) } - case OLITERAL: + case ir.OLITERAL: // Constant. n = typecheck(n, ctxExpr) w.tag('C') w.pos(n.Pos) w.value(n.Type, n.Val()) - case OTYPE: + case ir.OTYPE: if IsAlias(n.Sym) { // Alias. w.tag('A') @@ -514,11 +515,11 @@ func (w *exportWriter) tag(tag byte) { w.data.WriteByte(tag) } -func (p *iexporter) doInline(f *Node) { +func (p *iexporter) doInline(f *ir.Node) { w := p.newWriter() w.setPkg(fnpkg(f), false) - w.stmtList(asNodes(f.Func.Inl.Body)) + w.stmtList(ir.AsNodes(f.Func.Inl.Body)) p.inlineIndex[f] = w.flush() } @@ -569,7 +570,7 @@ func (w *exportWriter) pkg(pkg *types.Pkg) { w.string(pkg.Path) } -func (w *exportWriter) qualifiedIdent(n *Node) { +func (w *exportWriter) qualifiedIdent(n *ir.Node) { // Ensure any referenced declarations are written out too. w.p.pushDecl(n) @@ -592,7 +593,7 @@ func (w *exportWriter) selector(s *types.Sym) { } else { pkg := w.currPkg if types.IsExported(name) { - pkg = localpkg + pkg = ir.LocalPkg } if s.Pkg != pkg { base.Fatalf("package mismatch in selector: %v in package %q, but want %q", s, s.Pkg.Path, pkg.Path) @@ -633,7 +634,7 @@ func (w *exportWriter) startType(k itag) { func (w *exportWriter) doTyp(t *types.Type) { if t.Sym != nil { - if t.Sym.Pkg == builtinpkg || t.Sym.Pkg == unsafepkg { + if t.Sym.Pkg == ir.BuiltinPkg || t.Sym.Pkg == unsafepkg { base.Fatalf("builtin type missing from typIndex: %v", t) } @@ -643,35 +644,35 @@ func (w *exportWriter) doTyp(t *types.Type) { } switch t.Etype { - case TPTR: + case types.TPTR: w.startType(pointerType) w.typ(t.Elem()) - case TSLICE: + case types.TSLICE: w.startType(sliceType) w.typ(t.Elem()) - case TARRAY: + case types.TARRAY: w.startType(arrayType) w.uint64(uint64(t.NumElem())) w.typ(t.Elem()) - case TCHAN: + case types.TCHAN: w.startType(chanType) w.uint64(uint64(t.ChanDir())) w.typ(t.Elem()) - case TMAP: + case types.TMAP: w.startType(mapType) w.typ(t.Key()) w.typ(t.Elem()) - case TFUNC: + case types.TFUNC: w.startType(signatureType) w.setPkg(t.Pkg(), true) w.signature(t) - case TSTRUCT: + case types.TSTRUCT: w.startType(structType) w.setPkg(t.Pkg(), true) @@ -684,7 +685,7 @@ func (w *exportWriter) doTyp(t *types.Type) { w.string(f.Note) } - case TINTER: + case types.TINTER: var embeddeds, methods []*types.Field for _, m := range t.Methods().Slice() { if m.Sym != nil { @@ -719,7 +720,7 @@ func (w *exportWriter) setPkg(pkg *types.Pkg, write bool) { if pkg == nil { // TODO(mdempsky): Proactively set Pkg for types and // remove this fallback logic. - pkg = localpkg + pkg = ir.LocalPkg } if write { @@ -746,7 +747,7 @@ func (w *exportWriter) paramList(fs []*types.Field) { func (w *exportWriter) param(f *types.Field) { w.pos(f.Pos) - w.localIdent(origSym(f.Sym), 0) + w.localIdent(ir.OrigSym(f.Sym), 0) w.typ(f.Type) } @@ -761,16 +762,16 @@ func constTypeOf(typ *types.Type) constant.Kind { } switch typ.Etype { - case TBOOL: + case types.TBOOL: return constant.Bool - case TSTRING: + case types.TSTRING: return constant.String - case TINT, TINT8, TINT16, TINT32, TINT64, - TUINT, TUINT8, TUINT16, TUINT32, TUINT64, TUINTPTR: + case types.TINT, types.TINT8, types.TINT16, types.TINT32, types.TINT64, + types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR: return constant.Int - case TFLOAT32, TFLOAT64: + case types.TFLOAT32, types.TFLOAT64: return constant.Float - case TCOMPLEX64, TCOMPLEX128: + case types.TCOMPLEX64, types.TCOMPLEX128: return constant.Complex } @@ -779,7 +780,7 @@ func constTypeOf(typ *types.Type) constant.Kind { } func (w *exportWriter) value(typ *types.Type, v constant.Value) { - assertRepresents(typ, v) + ir.AssertValidTypeForConst(typ, v) w.typ(typ) // Each type has only one admissible constant representation, @@ -808,9 +809,9 @@ func intSize(typ *types.Type) (signed bool, maxBytes uint) { } switch typ.Etype { - case TFLOAT32, TCOMPLEX64: + case types.TFLOAT32, types.TCOMPLEX64: return true, 3 - case TFLOAT64, TCOMPLEX128: + case types.TFLOAT64, types.TCOMPLEX128: return true, 7 } @@ -820,7 +821,7 @@ func intSize(typ *types.Type) (signed bool, maxBytes uint) { // The go/types API doesn't expose sizes to importers, so they // don't know how big these types are. switch typ.Etype { - case TINT, TUINT, TUINTPTR: + case types.TINT, types.TUINT, types.TUINTPTR: maxBytes = 8 } @@ -954,12 +955,12 @@ func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) } // Compiler-specific extensions. -func (w *exportWriter) varExt(n *Node) { +func (w *exportWriter) varExt(n *ir.Node) { w.linkname(n.Sym) w.symIdx(n.Sym) } -func (w *exportWriter) funcExt(n *Node) { +func (w *exportWriter) funcExt(n *ir.Node) { w.linkname(n.Sym) w.symIdx(n.Sym) @@ -993,7 +994,7 @@ func (w *exportWriter) funcExt(n *Node) { func (w *exportWriter) methExt(m *types.Field) { w.bool(m.Nointerface()) - w.funcExt(asNode(m.Nname)) + w.funcExt(ir.AsNode(m.Nname)) } func (w *exportWriter) linkname(s *types.Sym) { @@ -1029,15 +1030,15 @@ func (w *exportWriter) typeExt(t *types.Type) { // Inline bodies. -func (w *exportWriter) stmtList(list Nodes) { +func (w *exportWriter) stmtList(list ir.Nodes) { for _, n := range list.Slice() { w.node(n) } - w.op(OEND) + w.op(ir.OEND) } -func (w *exportWriter) node(n *Node) { - if opprec[n.Op] < 0 { +func (w *exportWriter) node(n *ir.Node) { + if ir.OpPrec[n.Op] < 0 { w.stmt(n) } else { w.expr(n) @@ -1046,8 +1047,8 @@ func (w *exportWriter) node(n *Node) { // Caution: stmt will emit more than one node for statement nodes n that have a non-empty // n.Ninit and where n cannot have a natural init section (such as in "if", "for", etc.). -func (w *exportWriter) stmt(n *Node) { - if n.Ninit.Len() > 0 && !stmtwithinit(n.Op) { +func (w *exportWriter) stmt(n *ir.Node) { + if n.Ninit.Len() > 0 && !ir.StmtWithInit(n.Op) { // can't use stmtList here since we don't want the final OEND for _, n := range n.Ninit.Slice() { w.stmt(n) @@ -1055,8 +1056,8 @@ func (w *exportWriter) stmt(n *Node) { } switch op := n.Op; op { - case ODCL: - w.op(ODCL) + case ir.ODCL: + w.op(ir.ODCL) w.pos(n.Left.Pos) w.localName(n.Left) w.typ(n.Left.Type) @@ -1064,19 +1065,19 @@ func (w *exportWriter) stmt(n *Node) { // case ODCLFIELD: // unimplemented - handled by default case - case OAS: + case ir.OAS: // Don't export "v = " initializing statements, hope they're always // preceded by the DCL which will be re-parsed and typecheck to reproduce // the "v = " again. if n.Right != nil { - w.op(OAS) + w.op(ir.OAS) w.pos(n.Pos) w.expr(n.Left) w.expr(n.Right) } - case OASOP: - w.op(OASOP) + case ir.OASOP: + w.op(ir.OASOP) w.pos(n.Pos) w.op(n.SubOp()) w.expr(n.Left) @@ -1084,54 +1085,54 @@ func (w *exportWriter) stmt(n *Node) { w.expr(n.Right) } - case OAS2: - w.op(OAS2) + case ir.OAS2: + w.op(ir.OAS2) w.pos(n.Pos) w.exprList(n.List) w.exprList(n.Rlist) - case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV: - w.op(OAS2) + case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV: + w.op(ir.OAS2) w.pos(n.Pos) w.exprList(n.List) - w.exprList(asNodes([]*Node{n.Right})) + w.exprList(ir.AsNodes([]*ir.Node{n.Right})) - case ORETURN: - w.op(ORETURN) + case ir.ORETURN: + w.op(ir.ORETURN) w.pos(n.Pos) w.exprList(n.List) // case ORETJMP: // unreachable - generated by compiler for trampolin routines - case OGO, ODEFER: + case ir.OGO, ir.ODEFER: w.op(op) w.pos(n.Pos) w.expr(n.Left) - case OIF: - w.op(OIF) + case ir.OIF: + w.op(ir.OIF) w.pos(n.Pos) w.stmtList(n.Ninit) w.expr(n.Left) w.stmtList(n.Nbody) w.stmtList(n.Rlist) - case OFOR: - w.op(OFOR) + case ir.OFOR: + w.op(ir.OFOR) w.pos(n.Pos) w.stmtList(n.Ninit) w.exprsOrNil(n.Left, n.Right) w.stmtList(n.Nbody) - case ORANGE: - w.op(ORANGE) + case ir.ORANGE: + w.op(ir.ORANGE) w.pos(n.Pos) w.stmtList(n.List) w.expr(n.Right) w.stmtList(n.Nbody) - case OSELECT, OSWITCH: + case ir.OSELECT, ir.OSWITCH: w.op(op) w.pos(n.Pos) w.stmtList(n.Ninit) @@ -1141,19 +1142,19 @@ func (w *exportWriter) stmt(n *Node) { // case OCASE: // handled by caseList - case OFALL: - w.op(OFALL) + case ir.OFALL: + w.op(ir.OFALL) w.pos(n.Pos) - case OBREAK, OCONTINUE: + case ir.OBREAK, ir.OCONTINUE: w.op(op) w.pos(n.Pos) w.exprsOrNil(n.Left, nil) - case OEMPTY: + case ir.OEMPTY: // nothing to emit - case OGOTO, OLABEL: + case ir.OGOTO, ir.OLABEL: w.op(op) w.pos(n.Pos) w.string(n.Sym.Name) @@ -1163,13 +1164,13 @@ func (w *exportWriter) stmt(n *Node) { } } -func (w *exportWriter) caseList(sw *Node) { - namedTypeSwitch := sw.Op == OSWITCH && sw.Left != nil && sw.Left.Op == OTYPESW && sw.Left.Left != nil +func (w *exportWriter) caseList(sw *ir.Node) { + namedTypeSwitch := sw.Op == ir.OSWITCH && sw.Left != nil && sw.Left.Op == ir.OTYPESW && sw.Left.Left != nil cases := sw.List.Slice() w.uint64(uint64(len(cases))) for _, cas := range cases { - if cas.Op != OCASE { + if cas.Op != ir.OCASE { base.Fatalf("expected OCASE, got %v", cas) } w.pos(cas.Pos) @@ -1181,14 +1182,14 @@ func (w *exportWriter) caseList(sw *Node) { } } -func (w *exportWriter) exprList(list Nodes) { +func (w *exportWriter) exprList(list ir.Nodes) { for _, n := range list.Slice() { w.expr(n) } - w.op(OEND) + w.op(ir.OEND) } -func (w *exportWriter) expr(n *Node) { +func (w *exportWriter) expr(n *ir.Node) { // from nodefmt (fmt.go) // // nodefmt reverts nodes back to their original - we don't need to do @@ -1199,14 +1200,14 @@ func (w *exportWriter) expr(n *Node) { // } // from exprfmt (fmt.go) - for n.Op == OPAREN || n.Implicit() && (n.Op == ODEREF || n.Op == OADDR || n.Op == ODOT || n.Op == ODOTPTR) { + for n.Op == ir.OPAREN || n.Implicit() && (n.Op == ir.ODEREF || n.Op == ir.OADDR || n.Op == ir.ODOT || n.Op == ir.ODOTPTR) { n = n.Left } switch op := n.Op; op { // expressions // (somewhat closely following the structure of exprfmt in fmt.go) - case ONIL: + case ir.ONIL: if !n.Type.HasNil() { base.Fatalf("unexpected type for nil: %v", n.Type) } @@ -1214,49 +1215,49 @@ func (w *exportWriter) expr(n *Node) { w.expr(n.Orig) break } - w.op(OLITERAL) + w.op(ir.OLITERAL) w.pos(n.Pos) w.typ(n.Type) - case OLITERAL: - w.op(OLITERAL) + case ir.OLITERAL: + w.op(ir.OLITERAL) w.pos(n.Pos) w.value(n.Type, n.Val()) - case OMETHEXPR: + case ir.OMETHEXPR: // Special case: explicit name of func (*T) method(...) is turned into pkg.(*T).method, // but for export, this should be rendered as (*pkg.T).meth. // These nodes have the special property that they are names with a left OTYPE and a right ONAME. - w.op(OXDOT) + w.op(ir.OXDOT) w.pos(n.Pos) w.expr(n.Left) // n.Left.Op == OTYPE w.selector(n.Right.Sym) - case ONAME: + case ir.ONAME: // Package scope name. - if (n.Class() == PEXTERN || n.Class() == PFUNC) && !n.isBlank() { - w.op(ONONAME) + if (n.Class() == ir.PEXTERN || n.Class() == ir.PFUNC) && !ir.IsBlank(n) { + w.op(ir.ONONAME) w.qualifiedIdent(n) break } // Function scope name. - w.op(ONAME) + w.op(ir.ONAME) w.localName(n) // case OPACK, ONONAME: // should have been resolved by typechecking - handled by default case - case OTYPE: - w.op(OTYPE) + case ir.OTYPE: + w.op(ir.OTYPE) w.typ(n.Type) - case OTYPESW: - w.op(OTYPESW) + case ir.OTYPESW: + w.op(ir.OTYPESW) w.pos(n.Pos) var s *types.Sym if n.Left != nil { - if n.Left.Op != ONONAME { + if n.Left.Op != ir.ONONAME { base.Fatalf("expected ONONAME, got %v", n.Left) } s = n.Left.Sym @@ -1273,149 +1274,149 @@ func (w *exportWriter) expr(n *Node) { // case OCOMPLIT: // should have been resolved by typechecking - handled by default case - case OPTRLIT: - w.op(OADDR) + case ir.OPTRLIT: + w.op(ir.OADDR) w.pos(n.Pos) w.expr(n.Left) - case OSTRUCTLIT: - w.op(OSTRUCTLIT) + case ir.OSTRUCTLIT: + w.op(ir.OSTRUCTLIT) w.pos(n.Pos) w.typ(n.Type) w.elemList(n.List) // special handling of field names - case OARRAYLIT, OSLICELIT, OMAPLIT: - w.op(OCOMPLIT) + case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT: + w.op(ir.OCOMPLIT) w.pos(n.Pos) w.typ(n.Type) w.exprList(n.List) - case OKEY: - w.op(OKEY) + case ir.OKEY: + w.op(ir.OKEY) w.pos(n.Pos) w.exprsOrNil(n.Left, n.Right) // case OSTRUCTKEY: // unreachable - handled in case OSTRUCTLIT by elemList - case OCALLPART: + case ir.OCALLPART: // An OCALLPART is an OXDOT before type checking. - w.op(OXDOT) + w.op(ir.OXDOT) w.pos(n.Pos) w.expr(n.Left) // Right node should be ONAME w.selector(n.Right.Sym) - case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH: - w.op(OXDOT) + case ir.OXDOT, ir.ODOT, ir.ODOTPTR, ir.ODOTINTER, ir.ODOTMETH: + w.op(ir.OXDOT) w.pos(n.Pos) w.expr(n.Left) w.selector(n.Sym) - case ODOTTYPE, ODOTTYPE2: - w.op(ODOTTYPE) + case ir.ODOTTYPE, ir.ODOTTYPE2: + w.op(ir.ODOTTYPE) w.pos(n.Pos) w.expr(n.Left) w.typ(n.Type) - case OINDEX, OINDEXMAP: - w.op(OINDEX) + case ir.OINDEX, ir.OINDEXMAP: + w.op(ir.OINDEX) w.pos(n.Pos) w.expr(n.Left) w.expr(n.Right) - case OSLICE, OSLICESTR, OSLICEARR: - w.op(OSLICE) + case ir.OSLICE, ir.OSLICESTR, ir.OSLICEARR: + w.op(ir.OSLICE) w.pos(n.Pos) w.expr(n.Left) low, high, _ := n.SliceBounds() w.exprsOrNil(low, high) - case OSLICE3, OSLICE3ARR: - w.op(OSLICE3) + case ir.OSLICE3, ir.OSLICE3ARR: + w.op(ir.OSLICE3) w.pos(n.Pos) w.expr(n.Left) low, high, max := n.SliceBounds() w.exprsOrNil(low, high) w.expr(max) - case OCOPY, OCOMPLEX: + case ir.OCOPY, ir.OCOMPLEX: // treated like other builtin calls (see e.g., OREAL) w.op(op) w.pos(n.Pos) w.expr(n.Left) w.expr(n.Right) - w.op(OEND) + w.op(ir.OEND) - case OCONV, OCONVIFACE, OCONVNOP, OBYTES2STR, ORUNES2STR, OSTR2BYTES, OSTR2RUNES, ORUNESTR: - w.op(OCONV) + case ir.OCONV, ir.OCONVIFACE, ir.OCONVNOP, ir.OBYTES2STR, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2RUNES, ir.ORUNESTR: + w.op(ir.OCONV) w.pos(n.Pos) w.expr(n.Left) w.typ(n.Type) - case OREAL, OIMAG, OAPPEND, OCAP, OCLOSE, ODELETE, OLEN, OMAKE, ONEW, OPANIC, ORECOVER, OPRINT, OPRINTN: + case ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCAP, ir.OCLOSE, ir.ODELETE, ir.OLEN, ir.OMAKE, ir.ONEW, ir.OPANIC, ir.ORECOVER, ir.OPRINT, ir.OPRINTN: w.op(op) w.pos(n.Pos) if n.Left != nil { w.expr(n.Left) - w.op(OEND) + w.op(ir.OEND) } else { w.exprList(n.List) // emits terminating OEND } // only append() calls may contain '...' arguments - if op == OAPPEND { + if op == ir.OAPPEND { w.bool(n.IsDDD()) } else if n.IsDDD() { base.Fatalf("exporter: unexpected '...' with %v call", op) } - case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER, OGETG: - w.op(OCALL) + case ir.OCALL, ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OGETG: + w.op(ir.OCALL) w.pos(n.Pos) w.stmtList(n.Ninit) w.expr(n.Left) w.exprList(n.List) w.bool(n.IsDDD()) - case OMAKEMAP, OMAKECHAN, OMAKESLICE: + case ir.OMAKEMAP, ir.OMAKECHAN, ir.OMAKESLICE: w.op(op) // must keep separate from OMAKE for importer w.pos(n.Pos) w.typ(n.Type) switch { default: // empty list - w.op(OEND) + w.op(ir.OEND) case n.List.Len() != 0: // pre-typecheck w.exprList(n.List) // emits terminating OEND case n.Right != nil: w.expr(n.Left) w.expr(n.Right) - w.op(OEND) - case n.Left != nil && (n.Op == OMAKESLICE || !n.Left.Type.IsUntyped()): + w.op(ir.OEND) + case n.Left != nil && (n.Op == ir.OMAKESLICE || !n.Left.Type.IsUntyped()): w.expr(n.Left) - w.op(OEND) + w.op(ir.OEND) } // unary expressions - case OPLUS, ONEG, OADDR, OBITNOT, ODEREF, ONOT, ORECV: + case ir.OPLUS, ir.ONEG, ir.OADDR, ir.OBITNOT, ir.ODEREF, ir.ONOT, ir.ORECV: w.op(op) w.pos(n.Pos) w.expr(n.Left) // binary expressions - case OADD, OAND, OANDAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE, OLT, - OLSH, OMOD, OMUL, ONE, OOR, OOROR, ORSH, OSEND, OSUB, OXOR: + case ir.OADD, ir.OAND, ir.OANDAND, ir.OANDNOT, ir.ODIV, ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT, + ir.OLSH, ir.OMOD, ir.OMUL, ir.ONE, ir.OOR, ir.OOROR, ir.ORSH, ir.OSEND, ir.OSUB, ir.OXOR: w.op(op) w.pos(n.Pos) w.expr(n.Left) w.expr(n.Right) - case OADDSTR: - w.op(OADDSTR) + case ir.OADDSTR: + w.op(ir.OADDSTR) w.pos(n.Pos) w.exprList(n.List) - case ODCLCONST: + case ir.ODCLCONST: // if exporting, DCLCONST should just be removed as its usage // has already been replaced with literals @@ -1425,11 +1426,11 @@ func (w *exportWriter) expr(n *Node) { } } -func (w *exportWriter) op(op Op) { +func (w *exportWriter) op(op ir.Op) { w.uint64(uint64(op)) } -func (w *exportWriter) exprsOrNil(a, b *Node) { +func (w *exportWriter) exprsOrNil(a, b *ir.Node) { ab := 0 if a != nil { ab |= 1 @@ -1446,7 +1447,7 @@ func (w *exportWriter) exprsOrNil(a, b *Node) { } } -func (w *exportWriter) elemList(list Nodes) { +func (w *exportWriter) elemList(list ir.Nodes) { w.uint64(uint64(list.Len())) for _, n := range list.Slice() { w.selector(n.Sym) @@ -1454,7 +1455,7 @@ func (w *exportWriter) elemList(list Nodes) { } } -func (w *exportWriter) localName(n *Node) { +func (w *exportWriter) localName(n *ir.Node) { // Escape analysis happens after inline bodies are saved, but // we're using the same ONAME nodes, so we might still see // PAUTOHEAP here. @@ -1463,7 +1464,7 @@ func (w *exportWriter) localName(n *Node) { // PPARAM/PPARAMOUT, because we only want to include vargen in // non-param names. var v int32 - if n.Class() == PAUTO || (n.Class() == PAUTOHEAP && n.Name.Param.Stackcopy == nil) { + if n.Class() == ir.PAUTO || (n.Class() == ir.PAUTOHEAP && n.Name.Param.Stackcopy == nil) { v = n.Name.Vargen } diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index cc0209ed03d72..84386140bb990 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -9,6 +9,7 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/bio" "cmd/internal/goobj" @@ -40,8 +41,8 @@ var ( inlineImporter = map[*types.Sym]iimporterAndOffset{} ) -func expandDecl(n *Node) { - if n.Op != ONONAME { +func expandDecl(n *ir.Node) { + if n.Op != ir.ONONAME { return } @@ -54,7 +55,7 @@ func expandDecl(n *Node) { r.doDecl(n) } -func expandInline(fn *Node) { +func expandInline(fn *ir.Node) { if fn.Func.Inl.Body != nil { return } @@ -67,7 +68,7 @@ func expandInline(fn *Node) { r.doInline(fn) } -func importReaderFor(n *Node, importers map[*types.Sym]iimporterAndOffset) *importReader { +func importReaderFor(n *ir.Node, importers map[*types.Sym]iimporterAndOffset) *importReader { x, ok := importers[n.Sym] if !ok { return nil @@ -147,10 +148,10 @@ func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType) if pkg.Name == "" { pkg.Name = pkgName pkg.Height = pkgHeight - numImport[pkgName]++ + ir.NumImport[pkgName]++ // TODO(mdempsky): This belongs somewhere else. - pkg.Lookup("_").Def = asTypesNode(nblank) + pkg.Lookup("_").Def = ir.AsTypesNode(ir.BlankNode) } else { if pkg.Name != pkgName { base.Fatalf("conflicting package names %v and %v for path %q", pkg.Name, pkgName, pkg.Path) @@ -172,9 +173,9 @@ func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType) // Create stub declaration. If used, this will // be overwritten by expandDecl. if s.Def != nil { - base.Fatalf("unexpected definition for %v: %v", s, asNode(s.Def)) + base.Fatalf("unexpected definition for %v: %v", s, ir.AsNode(s.Def)) } - s.Def = asTypesNode(npos(src.NoXPos, dclname(s))) + s.Def = ir.AsTypesNode(npos(src.NoXPos, dclname(s))) } } @@ -280,8 +281,8 @@ func (r *importReader) setPkg() { r.currPkg = r.pkg() } -func (r *importReader) doDecl(n *Node) { - if n.Op != ONONAME { +func (r *importReader) doDecl(n *ir.Node) { + if n.Op != ir.ONONAME { base.Fatalf("doDecl: unexpected Op for %v: %v", n.Sym, n.Op) } @@ -330,13 +331,13 @@ func (r *importReader) doDecl(n *Node) { recv := r.param() mtyp := r.signature(recv) - m := newfuncnamel(mpos, methodSym(recv.Type, msym), new(Func)) + m := newfuncnamel(mpos, methodSym(recv.Type, msym), new(ir.Func)) m.Type = mtyp - m.SetClass(PFUNC) + m.SetClass(ir.PFUNC) // methodSym already marked m.Sym as a function. f := types.NewField(mpos, msym, mtyp) - f.Nname = asTypesNode(m) + f.Nname = ir.AsTypesNode(m) ms[i] = f } t.Methods().Set(ms) @@ -434,7 +435,7 @@ func (r *importReader) ident() *types.Sym { } pkg := r.currPkg if types.IsExported(name) { - pkg = localpkg + pkg = ir.LocalPkg } return pkg.Lookup(name) } @@ -498,11 +499,11 @@ func (r *importReader) typ1() *types.Type { // support inlining functions with local defined // types. Therefore, this must be a package-scope // type. - n := asNode(r.qualifiedIdent().PkgDef()) - if n.Op == ONONAME { + n := ir.AsNode(r.qualifiedIdent().PkgDef()) + if n.Op == ir.ONONAME { expandDecl(n) } - if n.Op != OTYPE { + if n.Op != ir.OTYPE { base.Fatalf("expected OTYPE, got %v: %v, %v", n.Op, n.Sym, n) } return n.Type @@ -542,7 +543,7 @@ func (r *importReader) typ1() *types.Type { fs[i] = f } - t := types.New(TSTRUCT) + t := types.New(types.TSTRUCT) t.SetPkg(r.currPkg) t.SetFields(fs) return t @@ -567,7 +568,7 @@ func (r *importReader) typ1() *types.Type { methods[i] = types.NewField(pos, sym, typ) } - t := types.New(TINTER) + t := types.New(types.TINTER) t.SetPkg(r.currPkg) t.SetInterface(append(embeddeds, methods...)) @@ -634,12 +635,12 @@ func (r *importReader) byte() byte { // Compiler-specific extensions. -func (r *importReader) varExt(n *Node) { +func (r *importReader) varExt(n *ir.Node) { r.linkname(n.Sym) r.symIdx(n.Sym) } -func (r *importReader) funcExt(n *Node) { +func (r *importReader) funcExt(n *ir.Node) { r.linkname(n.Sym) r.symIdx(n.Sym) @@ -652,7 +653,7 @@ func (r *importReader) funcExt(n *Node) { // Inline body. if u := r.uint64(); u > 0 { - n.Func.Inl = &Inline{ + n.Func.Inl = &ir.Inline{ Cost: int32(u - 1), } n.Func.Endlineno = r.pos() @@ -663,7 +664,7 @@ func (r *importReader) methExt(m *types.Field) { if r.bool() { m.SetNointerface(true) } - r.funcExt(asNode(m.Nname)) + r.funcExt(ir.AsNode(m.Nname)) } func (r *importReader) linkname(s *types.Sym) { @@ -694,7 +695,7 @@ func (r *importReader) typeExt(t *types.Type) { // so we can use index to reference the symbol. var typeSymIdx = make(map[*types.Type][2]int64) -func (r *importReader) doInline(n *Node) { +func (r *importReader) doInline(n *ir.Node) { if len(n.Func.Inl.Body) != 0 { base.Fatalf("%v already has inline body", n) } @@ -709,7 +710,7 @@ func (r *importReader) doInline(n *Node) { // (not doing so can cause significant performance // degradation due to unnecessary calls to empty // functions). - body = []*Node{} + body = []*ir.Node{} } n.Func.Inl.Body = body @@ -717,9 +718,9 @@ func (r *importReader) doInline(n *Node) { if base.Flag.E > 0 && base.Flag.LowerM > 2 { if base.Flag.LowerM > 3 { - fmt.Printf("inl body for %v %#v: %+v\n", n, n.Type, asNodes(n.Func.Inl.Body)) + fmt.Printf("inl body for %v %#v: %+v\n", n, n.Type, ir.AsNodes(n.Func.Inl.Body)) } else { - fmt.Printf("inl body for %v %#v: %v\n", n, n.Type, asNodes(n.Func.Inl.Body)) + fmt.Printf("inl body for %v %#v: %v\n", n, n.Type, ir.AsNodes(n.Func.Inl.Body)) } } } @@ -739,15 +740,15 @@ func (r *importReader) doInline(n *Node) { // unrefined nodes (since this is what the importer uses). The respective case // entries are unreachable in the importer. -func (r *importReader) stmtList() []*Node { - var list []*Node +func (r *importReader) stmtList() []*ir.Node { + var list []*ir.Node for { n := r.node() if n == nil { break } // OBLOCK nodes may be created when importing ODCL nodes - unpack them - if n.Op == OBLOCK { + if n.Op == ir.OBLOCK { list = append(list, n.List.Slice()...) } else { list = append(list, n) @@ -757,18 +758,18 @@ func (r *importReader) stmtList() []*Node { return list } -func (r *importReader) caseList(sw *Node) []*Node { - namedTypeSwitch := sw.Op == OSWITCH && sw.Left != nil && sw.Left.Op == OTYPESW && sw.Left.Left != nil +func (r *importReader) caseList(sw *ir.Node) []*ir.Node { + namedTypeSwitch := sw.Op == ir.OSWITCH && sw.Left != nil && sw.Left.Op == ir.OTYPESW && sw.Left.Left != nil - cases := make([]*Node, r.uint64()) + cases := make([]*ir.Node, r.uint64()) for i := range cases { - cas := nodl(r.pos(), OCASE, nil, nil) + cas := ir.NodAt(r.pos(), ir.OCASE, nil, nil) cas.List.Set(r.stmtList()) if namedTypeSwitch { // Note: per-case variables will have distinct, dotted // names after import. That's okay: swt.go only needs // Sym for diagnostics anyway. - caseVar := newnamel(cas.Pos, r.ident()) + caseVar := ir.NewNameAt(cas.Pos, r.ident()) declare(caseVar, dclcontext) cas.Rlist.Set1(caseVar) caseVar.Name.Defn = sw.Left @@ -779,8 +780,8 @@ func (r *importReader) caseList(sw *Node) []*Node { return cases } -func (r *importReader) exprList() []*Node { - var list []*Node +func (r *importReader) exprList() []*ir.Node { + var list []*ir.Node for { n := r.expr() if n == nil { @@ -791,16 +792,16 @@ func (r *importReader) exprList() []*Node { return list } -func (r *importReader) expr() *Node { +func (r *importReader) expr() *ir.Node { n := r.node() - if n != nil && n.Op == OBLOCK { + if n != nil && n.Op == ir.OBLOCK { base.Fatalf("unexpected block node: %v", n) } return n } // TODO(gri) split into expr and stmt -func (r *importReader) node() *Node { +func (r *importReader) node() *ir.Node { switch op := r.op(); op { // expressions // case OPAREN: @@ -809,34 +810,34 @@ func (r *importReader) node() *Node { // case ONIL: // unreachable - mapped to OLITERAL - case OLITERAL: + case ir.OLITERAL: pos := r.pos() typ := r.typ() - var n *Node + var n *ir.Node if typ.HasNil() { n = nodnil() } else { - n = nodlit(r.value(typ)) + n = ir.NewLiteral(r.value(typ)) } n = npos(pos, n) n.Type = typ return n - case ONONAME: + case ir.ONONAME: return mkname(r.qualifiedIdent()) - case ONAME: + case ir.ONAME: return mkname(r.ident()) // case OPACK, ONONAME: // unreachable - should have been resolved by typechecking - case OTYPE: + case ir.OTYPE: return typenod(r.typ()) - case OTYPESW: - n := nodl(r.pos(), OTYPESW, nil, nil) + case ir.OTYPESW: + n := ir.NodAt(r.pos(), ir.OTYPESW, nil, nil) if s := r.ident(); s != nil { n.Left = npos(n.Pos, newnoname(s)) } @@ -853,11 +854,11 @@ func (r *importReader) node() *Node { // case OPTRLIT: // unreachable - mapped to case OADDR below by exporter - case OSTRUCTLIT: + case ir.OSTRUCTLIT: // TODO(mdempsky): Export position information for OSTRUCTKEY nodes. savedlineno := base.Pos base.Pos = r.pos() - n := nodl(base.Pos, OCOMPLIT, nil, typenod(r.typ())) + n := ir.NodAt(base.Pos, ir.OCOMPLIT, nil, typenod(r.typ())) n.List.Set(r.elemList()) // special handling of field names base.Pos = savedlineno return n @@ -865,15 +866,15 @@ func (r *importReader) node() *Node { // case OARRAYLIT, OSLICELIT, OMAPLIT: // unreachable - mapped to case OCOMPLIT below by exporter - case OCOMPLIT: - n := nodl(r.pos(), OCOMPLIT, nil, typenod(r.typ())) + case ir.OCOMPLIT: + n := ir.NodAt(r.pos(), ir.OCOMPLIT, nil, typenod(r.typ())) n.List.Set(r.exprList()) return n - case OKEY: + case ir.OKEY: pos := r.pos() left, right := r.exprsOrNil() - return nodl(pos, OKEY, left, right) + return ir.NodAt(pos, ir.OKEY, left, right) // case OSTRUCTKEY: // unreachable - handled in case OSTRUCTLIT by elemList @@ -884,28 +885,28 @@ func (r *importReader) node() *Node { // case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH: // unreachable - mapped to case OXDOT below by exporter - case OXDOT: + case ir.OXDOT: // see parser.new_dotname - return npos(r.pos(), nodSym(OXDOT, r.expr(), r.ident())) + return npos(r.pos(), nodSym(ir.OXDOT, r.expr(), r.ident())) // case ODOTTYPE, ODOTTYPE2: // unreachable - mapped to case ODOTTYPE below by exporter - case ODOTTYPE: - n := nodl(r.pos(), ODOTTYPE, r.expr(), nil) + case ir.ODOTTYPE: + n := ir.NodAt(r.pos(), ir.ODOTTYPE, r.expr(), nil) n.Type = r.typ() return n // case OINDEX, OINDEXMAP, OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR: // unreachable - mapped to cases below by exporter - case OINDEX: - return nodl(r.pos(), op, r.expr(), r.expr()) + case ir.OINDEX: + return ir.NodAt(r.pos(), op, r.expr(), r.expr()) - case OSLICE, OSLICE3: - n := nodl(r.pos(), op, r.expr(), nil) + case ir.OSLICE, ir.OSLICE3: + n := ir.NodAt(r.pos(), op, r.expr(), nil) low, high := r.exprsOrNil() - var max *Node + var max *ir.Node if n.Op.IsSlice3() { max = r.expr() } @@ -915,15 +916,15 @@ func (r *importReader) node() *Node { // case OCONV, OCONVIFACE, OCONVNOP, OBYTES2STR, ORUNES2STR, OSTR2BYTES, OSTR2RUNES, ORUNESTR: // unreachable - mapped to OCONV case below by exporter - case OCONV: - n := nodl(r.pos(), OCONV, r.expr(), nil) + case ir.OCONV: + n := ir.NodAt(r.pos(), ir.OCONV, r.expr(), nil) n.Type = r.typ() return n - case OCOPY, OCOMPLEX, OREAL, OIMAG, OAPPEND, OCAP, OCLOSE, ODELETE, OLEN, OMAKE, ONEW, OPANIC, ORECOVER, OPRINT, OPRINTN: + case ir.OCOPY, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCAP, ir.OCLOSE, ir.ODELETE, ir.OLEN, ir.OMAKE, ir.ONEW, ir.OPANIC, ir.ORECOVER, ir.OPRINT, ir.OPRINTN: n := npos(r.pos(), builtinCall(op)) n.List.Set(r.exprList()) - if op == OAPPEND { + if op == ir.OAPPEND { n.SetIsDDD(r.bool()) } return n @@ -931,45 +932,45 @@ func (r *importReader) node() *Node { // case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER, OGETG: // unreachable - mapped to OCALL case below by exporter - case OCALL: - n := nodl(r.pos(), OCALL, nil, nil) + case ir.OCALL: + n := ir.NodAt(r.pos(), ir.OCALL, nil, nil) n.Ninit.Set(r.stmtList()) n.Left = r.expr() n.List.Set(r.exprList()) n.SetIsDDD(r.bool()) return n - case OMAKEMAP, OMAKECHAN, OMAKESLICE: - n := npos(r.pos(), builtinCall(OMAKE)) + case ir.OMAKEMAP, ir.OMAKECHAN, ir.OMAKESLICE: + n := npos(r.pos(), builtinCall(ir.OMAKE)) n.List.Append(typenod(r.typ())) n.List.Append(r.exprList()...) return n // unary expressions - case OPLUS, ONEG, OADDR, OBITNOT, ODEREF, ONOT, ORECV: - return nodl(r.pos(), op, r.expr(), nil) + case ir.OPLUS, ir.ONEG, ir.OADDR, ir.OBITNOT, ir.ODEREF, ir.ONOT, ir.ORECV: + return ir.NodAt(r.pos(), op, r.expr(), nil) // binary expressions - case OADD, OAND, OANDAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE, OLT, - OLSH, OMOD, OMUL, ONE, OOR, OOROR, ORSH, OSEND, OSUB, OXOR: - return nodl(r.pos(), op, r.expr(), r.expr()) + case ir.OADD, ir.OAND, ir.OANDAND, ir.OANDNOT, ir.ODIV, ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT, + ir.OLSH, ir.OMOD, ir.OMUL, ir.ONE, ir.OOR, ir.OOROR, ir.ORSH, ir.OSEND, ir.OSUB, ir.OXOR: + return ir.NodAt(r.pos(), op, r.expr(), r.expr()) - case OADDSTR: + case ir.OADDSTR: pos := r.pos() list := r.exprList() x := npos(pos, list[0]) for _, y := range list[1:] { - x = nodl(pos, OADD, x, y) + x = ir.NodAt(pos, ir.OADD, x, y) } return x // -------------------------------------------------------------------- // statements - case ODCL: + case ir.ODCL: pos := r.pos() lhs := npos(pos, dclname(r.ident())) typ := typenod(r.typ()) - return npos(pos, liststmt(variter([]*Node{lhs}, typ, nil))) // TODO(gri) avoid list creation + return npos(pos, liststmt(variter([]*ir.Node{lhs}, typ, nil))) // TODO(gri) avoid list creation // case ODCLFIELD: // unimplemented @@ -977,11 +978,11 @@ func (r *importReader) node() *Node { // case OAS, OASWB: // unreachable - mapped to OAS case below by exporter - case OAS: - return nodl(r.pos(), OAS, r.expr(), r.expr()) + case ir.OAS: + return ir.NodAt(r.pos(), ir.OAS, r.expr(), r.expr()) - case OASOP: - n := nodl(r.pos(), OASOP, nil, nil) + case ir.OASOP: + n := ir.NodAt(r.pos(), ir.OASOP, nil, nil) n.SetSubOp(r.op()) n.Left = r.expr() if !r.bool() { @@ -995,33 +996,33 @@ func (r *importReader) node() *Node { // case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV: // unreachable - mapped to OAS2 case below by exporter - case OAS2: - n := nodl(r.pos(), OAS2, nil, nil) + case ir.OAS2: + n := ir.NodAt(r.pos(), ir.OAS2, nil, nil) n.List.Set(r.exprList()) n.Rlist.Set(r.exprList()) return n - case ORETURN: - n := nodl(r.pos(), ORETURN, nil, nil) + case ir.ORETURN: + n := ir.NodAt(r.pos(), ir.ORETURN, nil, nil) n.List.Set(r.exprList()) return n // case ORETJMP: // unreachable - generated by compiler for trampolin routines (not exported) - case OGO, ODEFER: - return nodl(r.pos(), op, r.expr(), nil) + case ir.OGO, ir.ODEFER: + return ir.NodAt(r.pos(), op, r.expr(), nil) - case OIF: - n := nodl(r.pos(), OIF, nil, nil) + case ir.OIF: + n := ir.NodAt(r.pos(), ir.OIF, nil, nil) n.Ninit.Set(r.stmtList()) n.Left = r.expr() n.Nbody.Set(r.stmtList()) n.Rlist.Set(r.stmtList()) return n - case OFOR: - n := nodl(r.pos(), OFOR, nil, nil) + case ir.OFOR: + n := ir.NodAt(r.pos(), ir.OFOR, nil, nil) n.Ninit.Set(r.stmtList()) left, right := r.exprsOrNil() n.Left = left @@ -1029,15 +1030,15 @@ func (r *importReader) node() *Node { n.Nbody.Set(r.stmtList()) return n - case ORANGE: - n := nodl(r.pos(), ORANGE, nil, nil) + case ir.ORANGE: + n := ir.NodAt(r.pos(), ir.ORANGE, nil, nil) n.List.Set(r.stmtList()) n.Right = r.expr() n.Nbody.Set(r.stmtList()) return n - case OSELECT, OSWITCH: - n := nodl(r.pos(), op, nil, nil) + case ir.OSELECT, ir.OSWITCH: + n := ir.NodAt(r.pos(), op, nil, nil) n.Ninit.Set(r.stmtList()) left, _ := r.exprsOrNil() n.Left = left @@ -1047,27 +1048,27 @@ func (r *importReader) node() *Node { // case OCASE: // handled by caseList - case OFALL: - n := nodl(r.pos(), OFALL, nil, nil) + case ir.OFALL: + n := ir.NodAt(r.pos(), ir.OFALL, nil, nil) return n - case OBREAK, OCONTINUE: + case ir.OBREAK, ir.OCONTINUE: pos := r.pos() left, _ := r.exprsOrNil() if left != nil { - left = newname(left.Sym) + left = NewName(left.Sym) } - return nodl(pos, op, left, nil) + return ir.NodAt(pos, op, left, nil) // case OEMPTY: // unreachable - not emitted by exporter - case OGOTO, OLABEL: - n := nodl(r.pos(), op, nil, nil) + case ir.OGOTO, ir.OLABEL: + n := ir.NodAt(r.pos(), op, nil, nil) n.Sym = lookup(r.string()) return n - case OEND: + case ir.OEND: return nil default: @@ -1077,21 +1078,21 @@ func (r *importReader) node() *Node { } } -func (r *importReader) op() Op { - return Op(r.uint64()) +func (r *importReader) op() ir.Op { + return ir.Op(r.uint64()) } -func (r *importReader) elemList() []*Node { +func (r *importReader) elemList() []*ir.Node { c := r.uint64() - list := make([]*Node, c) + list := make([]*ir.Node, c) for i := range list { s := r.ident() - list[i] = nodSym(OSTRUCTKEY, r.expr(), s) + list[i] = nodSym(ir.OSTRUCTKEY, r.expr(), s) } return list } -func (r *importReader) exprsOrNil() (a, b *Node) { +func (r *importReader) exprsOrNil() (a, b *ir.Node) { ab := r.uint64() if ab&1 != 0 { a = r.expr() diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go index 9319faf6a015d..f3c302f6bee26 100644 --- a/src/cmd/compile/internal/gc/init.go +++ b/src/cmd/compile/internal/gc/init.go @@ -6,6 +6,7 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/obj" ) @@ -18,7 +19,7 @@ var renameinitgen int // Function collecting autotmps generated during typechecking, // to be included in the package-level init function. -var initTodo = nod(ODCLFUNC, nil, nil) +var initTodo = ir.Nod(ir.ODCLFUNC, nil, nil) func renameinit() *types.Sym { s := lookupN("init.", renameinitgen) @@ -32,7 +33,7 @@ func renameinit() *types.Sym { // 1) Initialize all of the packages the current package depends on. // 2) Initialize all the variables that have initializers. // 3) Run any init functions. -func fninit(n []*Node) { +func fninit(n []*ir.Node) { nf := initOrder(n) var deps []*obj.LSym // initTask records for packages the current package depends on @@ -47,7 +48,7 @@ func fninit(n []*Node) { if len(nf) > 0 { base.Pos = nf[0].Pos // prolog/epilog gets line number of first init stmt initializers := lookup("init") - fn := dclfunc(initializers, nod(OTFUNC, nil, nil)) + fn := dclfunc(initializers, ir.Nod(ir.OTFUNC, nil, nil)) for _, dcl := range initTodo.Func.Dcl { dcl.Name.Curfn = fn } @@ -75,24 +76,24 @@ func fninit(n []*Node) { // Record user init functions. for i := 0; i < renameinitgen; i++ { s := lookupN("init.", i) - fn := asNode(s.Def).Name.Defn + fn := ir.AsNode(s.Def).Name.Defn // Skip init functions with empty bodies. - if fn.Nbody.Len() == 1 && fn.Nbody.First().Op == OEMPTY { + if fn.Nbody.Len() == 1 && fn.Nbody.First().Op == ir.OEMPTY { continue } fns = append(fns, s.Linksym()) } - if len(deps) == 0 && len(fns) == 0 && localpkg.Name != "main" && localpkg.Name != "runtime" { + if len(deps) == 0 && len(fns) == 0 && ir.LocalPkg.Name != "main" && ir.LocalPkg.Name != "runtime" { return // nothing to initialize } // Make an .inittask structure. sym := lookup(".inittask") - nn := newname(sym) - nn.Type = types.Types[TUINT8] // fake type - nn.SetClass(PEXTERN) - sym.Def = asTypesNode(nn) + nn := NewName(sym) + nn.Type = types.Types[types.TUINT8] // fake type + nn.SetClass(ir.PEXTERN) + sym.Def = ir.AsTypesNode(nn) exportsym(nn) lsym := sym.Linksym() ot := 0 diff --git a/src/cmd/compile/internal/gc/initorder.go b/src/cmd/compile/internal/gc/initorder.go index f553a3f057b60..942cb95f2077b 100644 --- a/src/cmd/compile/internal/gc/initorder.go +++ b/src/cmd/compile/internal/gc/initorder.go @@ -10,6 +10,8 @@ import ( "fmt" "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" ) // Package initialization @@ -62,7 +64,7 @@ const ( type InitOrder struct { // blocking maps initialization assignments to the assignments // that depend on it. - blocking map[*Node][]*Node + blocking map[*ir.Node][]*ir.Node // ready is the queue of Pending initialization assignments // that are ready for initialization. @@ -73,22 +75,22 @@ type InitOrder struct { // package-level declarations (in declaration order) and outputs the // corresponding list of statements to include in the init() function // body. -func initOrder(l []*Node) []*Node { +func initOrder(l []*ir.Node) []*ir.Node { s := InitSchedule{ - initplans: make(map[*Node]*InitPlan), - inittemps: make(map[*Node]*Node), + initplans: make(map[*ir.Node]*InitPlan), + inittemps: make(map[*ir.Node]*ir.Node), } o := InitOrder{ - blocking: make(map[*Node][]*Node), + blocking: make(map[*ir.Node][]*ir.Node), } // Process all package-level assignment in declaration order. for _, n := range l { switch n.Op { - case OAS, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV: + case ir.OAS, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV: o.processAssign(n) o.flushReady(s.staticInit) - case ODCLCONST, ODCLFUNC, ODCLTYPE: + case ir.ODCLCONST, ir.ODCLFUNC, ir.ODCLTYPE: // nop default: base.Fatalf("unexpected package-level statement: %v", n) @@ -99,7 +101,7 @@ func initOrder(l []*Node) []*Node { // have been a dependency cycle. for _, n := range l { switch n.Op { - case OAS, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV: + case ir.OAS, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV: if n.Initorder() != InitDone { // If there have already been errors // printed, those errors may have @@ -108,7 +110,7 @@ func initOrder(l []*Node) []*Node { // first. base.ExitIfErrors() - findInitLoopAndExit(firstLHS(n), new([]*Node)) + findInitLoopAndExit(firstLHS(n), new([]*ir.Node)) base.Fatalf("initialization unfinished, but failed to identify loop") } } @@ -123,8 +125,8 @@ func initOrder(l []*Node) []*Node { return s.out } -func (o *InitOrder) processAssign(n *Node) { - if n.Initorder() != InitNotStarted || n.Xoffset != BADWIDTH { +func (o *InitOrder) processAssign(n *ir.Node) { + if n.Initorder() != InitNotStarted || n.Xoffset != types.BADWIDTH { base.Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Xoffset) } @@ -137,7 +139,7 @@ func (o *InitOrder) processAssign(n *Node) { defn := dep.Name.Defn // Skip dependencies on functions (PFUNC) and // variables already initialized (InitDone). - if dep.Class() != PEXTERN || defn.Initorder() == InitDone { + if dep.Class() != ir.PEXTERN || defn.Initorder() == InitDone { continue } n.Xoffset++ @@ -152,16 +154,16 @@ func (o *InitOrder) processAssign(n *Node) { // flushReady repeatedly applies initialize to the earliest (in // declaration order) assignment ready for initialization and updates // the inverse dependency ("blocking") graph. -func (o *InitOrder) flushReady(initialize func(*Node)) { +func (o *InitOrder) flushReady(initialize func(*ir.Node)) { for o.ready.Len() != 0 { - n := heap.Pop(&o.ready).(*Node) + n := heap.Pop(&o.ready).(*ir.Node) if n.Initorder() != InitPending || n.Xoffset != 0 { base.Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Xoffset) } initialize(n) n.SetInitorder(InitDone) - n.Xoffset = BADWIDTH + n.Xoffset = types.BADWIDTH blocked := o.blocking[n] delete(o.blocking, n) @@ -181,7 +183,7 @@ func (o *InitOrder) flushReady(initialize func(*Node)) { // path points to a slice used for tracking the sequence of // variables/functions visited. Using a pointer to a slice allows the // slice capacity to grow and limit reallocations. -func findInitLoopAndExit(n *Node, path *[]*Node) { +func findInitLoopAndExit(n *ir.Node, path *[]*ir.Node) { // We implement a simple DFS loop-finding algorithm. This // could be faster, but initialization cycles are rare. @@ -194,14 +196,14 @@ func findInitLoopAndExit(n *Node, path *[]*Node) { // There might be multiple loops involving n; by sorting // references, we deterministically pick the one reported. - refers := collectDeps(n.Name.Defn, false).Sorted(func(ni, nj *Node) bool { + refers := collectDeps(n.Name.Defn, false).Sorted(func(ni, nj *ir.Node) bool { return ni.Pos.Before(nj.Pos) }) *path = append(*path, n) for _, ref := range refers { // Short-circuit variables that were initialized. - if ref.Class() == PEXTERN && ref.Name.Defn.Initorder() == InitDone { + if ref.Class() == ir.PEXTERN && ref.Name.Defn.Initorder() == InitDone { continue } @@ -213,12 +215,12 @@ func findInitLoopAndExit(n *Node, path *[]*Node) { // reportInitLoopAndExit reports and initialization loop as an error // and exits. However, if l is not actually an initialization loop, it // simply returns instead. -func reportInitLoopAndExit(l []*Node) { +func reportInitLoopAndExit(l []*ir.Node) { // Rotate loop so that the earliest variable declaration is at // the start. i := -1 for j, n := range l { - if n.Class() == PEXTERN && (i == -1 || n.Pos.Before(l[i].Pos)) { + if n.Class() == ir.PEXTERN && (i == -1 || n.Pos.Before(l[i].Pos)) { i = j } } @@ -236,9 +238,9 @@ func reportInitLoopAndExit(l []*Node) { var msg bytes.Buffer fmt.Fprintf(&msg, "initialization loop:\n") for _, n := range l { - fmt.Fprintf(&msg, "\t%v: %v refers to\n", n.Line(), n) + fmt.Fprintf(&msg, "\t%v: %v refers to\n", ir.Line(n), n) } - fmt.Fprintf(&msg, "\t%v: %v", l[0].Line(), l[0]) + fmt.Fprintf(&msg, "\t%v: %v", ir.Line(l[0]), l[0]) base.ErrorfAt(l[0].Pos, msg.String()) base.ErrorExit() @@ -248,14 +250,14 @@ func reportInitLoopAndExit(l []*Node) { // variables that declaration n depends on. If transitive is true, // then it also includes the transitive dependencies of any depended // upon functions (but not variables). -func collectDeps(n *Node, transitive bool) NodeSet { +func collectDeps(n *ir.Node, transitive bool) ir.NodeSet { d := initDeps{transitive: transitive} switch n.Op { - case OAS: + case ir.OAS: d.inspect(n.Right) - case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV: + case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV: d.inspect(n.Right) - case ODCLFUNC: + case ir.ODCLFUNC: d.inspectList(n.Nbody) default: base.Fatalf("unexpected Op: %v", n.Op) @@ -265,31 +267,31 @@ func collectDeps(n *Node, transitive bool) NodeSet { type initDeps struct { transitive bool - seen NodeSet + seen ir.NodeSet } -func (d *initDeps) inspect(n *Node) { inspect(n, d.visit) } -func (d *initDeps) inspectList(l Nodes) { inspectList(l, d.visit) } +func (d *initDeps) inspect(n *ir.Node) { ir.Inspect(n, d.visit) } +func (d *initDeps) inspectList(l ir.Nodes) { ir.InspectList(l, d.visit) } // visit calls foundDep on any package-level functions or variables // referenced by n, if any. -func (d *initDeps) visit(n *Node) bool { +func (d *initDeps) visit(n *ir.Node) bool { switch n.Op { - case OMETHEXPR: - d.foundDep(n.MethodName()) + case ir.OMETHEXPR: + d.foundDep(methodExprName(n)) return false - case ONAME: + case ir.ONAME: switch n.Class() { - case PEXTERN, PFUNC: + case ir.PEXTERN, ir.PFUNC: d.foundDep(n) } - case OCLOSURE: + case ir.OCLOSURE: d.inspectList(n.Func.Decl.Nbody) - case ODOTMETH, OCALLPART: - d.foundDep(n.MethodName()) + case ir.ODOTMETH, ir.OCALLPART: + d.foundDep(methodExprName(n)) } return true @@ -297,7 +299,7 @@ func (d *initDeps) visit(n *Node) bool { // foundDep records that we've found a dependency on n by adding it to // seen. -func (d *initDeps) foundDep(n *Node) { +func (d *initDeps) foundDep(n *ir.Node) { // Can happen with method expressions involving interface // types; e.g., fixedbugs/issue4495.go. if n == nil { @@ -314,7 +316,7 @@ func (d *initDeps) foundDep(n *Node) { return } d.seen.Add(n) - if d.transitive && n.Class() == PFUNC { + if d.transitive && n.Class() == ir.PFUNC { d.inspectList(n.Name.Defn.Nbody) } } @@ -326,13 +328,13 @@ func (d *initDeps) foundDep(n *Node) { // an OAS node's Pos may not be unique. For example, given the // declaration "var a, b = f(), g()", "a" must be ordered before "b", // but both OAS nodes use the "=" token's position as their Pos. -type declOrder []*Node +type declOrder []*ir.Node func (s declOrder) Len() int { return len(s) } func (s declOrder) Less(i, j int) bool { return firstLHS(s[i]).Pos.Before(firstLHS(s[j]).Pos) } func (s declOrder) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s *declOrder) Push(x interface{}) { *s = append(*s, x.(*Node)) } +func (s *declOrder) Push(x interface{}) { *s = append(*s, x.(*ir.Node)) } func (s *declOrder) Pop() interface{} { n := (*s)[len(*s)-1] *s = (*s)[:len(*s)-1] @@ -341,11 +343,11 @@ func (s *declOrder) Pop() interface{} { // firstLHS returns the first expression on the left-hand side of // assignment n. -func firstLHS(n *Node) *Node { +func firstLHS(n *ir.Node) *ir.Node { switch n.Op { - case OAS: + case ir.OAS: return n.Left - case OAS2DOTTYPE, OAS2FUNC, OAS2RECV, OAS2MAPR: + case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2RECV, ir.OAS2MAPR: return n.List.First() } diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index d71ea9b5ed407..f982b43fb92cb 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -28,6 +28,7 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/logopt" "cmd/compile/internal/types" "cmd/internal/obj" @@ -52,8 +53,8 @@ const ( // Get the function's package. For ordinary functions it's on the ->sym, but for imported methods // the ->sym can be re-used in the local package, so peel it off the receiver's type. -func fnpkg(fn *Node) *types.Pkg { - if fn.IsMethod() { +func fnpkg(fn *ir.Node) *types.Pkg { + if ir.IsMethod(fn) { // method rcvr := fn.Type.Recv().Type @@ -72,7 +73,7 @@ func fnpkg(fn *Node) *types.Pkg { // Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck // because they're a copy of an already checked body. -func typecheckinl(fn *Node) { +func typecheckinl(fn *ir.Node) { lno := setlineno(fn) expandInline(fn) @@ -83,12 +84,12 @@ func typecheckinl(fn *Node) { // the ->inl of a local function has been typechecked before caninl copied it. pkg := fnpkg(fn) - if pkg == localpkg || pkg == nil { + if pkg == ir.LocalPkg || pkg == nil { return // typecheckinl on local function } if base.Flag.LowerM > 2 || base.Debug.Export != 0 { - fmt.Printf("typecheck import [%v] %L { %#v }\n", fn.Sym, fn, asNodes(fn.Func.Inl.Body)) + fmt.Printf("typecheck import [%v] %L { %#v }\n", fn.Sym, fn, ir.AsNodes(fn.Func.Inl.Body)) } savefn := Curfn @@ -110,8 +111,8 @@ func typecheckinl(fn *Node) { // Caninl determines whether fn is inlineable. // If so, caninl saves fn->nbody in fn->inl and substitutes it with a copy. // fn and ->nbody will already have been typechecked. -func caninl(fn *Node) { - if fn.Op != ODCLFUNC { +func caninl(fn *ir.Node) { + if fn.Op != ir.ODCLFUNC { base.Fatalf("caninl %v", fn) } if fn.Func.Nname == nil { @@ -123,43 +124,43 @@ func caninl(fn *Node) { defer func() { if reason != "" { if base.Flag.LowerM > 1 { - fmt.Printf("%v: cannot inline %v: %s\n", fn.Line(), fn.Func.Nname, reason) + fmt.Printf("%v: cannot inline %v: %s\n", ir.Line(fn), fn.Func.Nname, reason) } if logopt.Enabled() { - logopt.LogOpt(fn.Pos, "cannotInlineFunction", "inline", fn.funcname(), reason) + logopt.LogOpt(fn.Pos, "cannotInlineFunction", "inline", ir.FuncName(fn), reason) } } }() } // If marked "go:noinline", don't inline - if fn.Func.Pragma&Noinline != 0 { + if fn.Func.Pragma&ir.Noinline != 0 { reason = "marked go:noinline" return } // If marked "go:norace" and -race compilation, don't inline. - if base.Flag.Race && fn.Func.Pragma&Norace != 0 { + if base.Flag.Race && fn.Func.Pragma&ir.Norace != 0 { reason = "marked go:norace with -race compilation" return } // If marked "go:nocheckptr" and -d checkptr compilation, don't inline. - if base.Debug.Checkptr != 0 && fn.Func.Pragma&NoCheckPtr != 0 { + if base.Debug.Checkptr != 0 && fn.Func.Pragma&ir.NoCheckPtr != 0 { reason = "marked go:nocheckptr" return } // If marked "go:cgo_unsafe_args", don't inline, since the // function makes assumptions about its argument frame layout. - if fn.Func.Pragma&CgoUnsafeArgs != 0 { + if fn.Func.Pragma&ir.CgoUnsafeArgs != 0 { reason = "marked go:cgo_unsafe_args" return } // If marked as "go:uintptrescapes", don't inline, since the // escape information is lost during inlining. - if fn.Func.Pragma&UintptrEscapes != 0 { + if fn.Func.Pragma&ir.UintptrEscapes != 0 { reason = "marked as having an escaping uintptr argument" return } @@ -168,7 +169,7 @@ func caninl(fn *Node) { // granularity, so inlining yeswritebarrierrec functions can // confuse it (#22342). As a workaround, disallow inlining // them for now. - if fn.Func.Pragma&Yeswritebarrierrec != 0 { + if fn.Func.Pragma&ir.Yeswritebarrierrec != 0 { reason = "marked go:yeswritebarrierrec" return } @@ -206,7 +207,7 @@ func caninl(fn *Node) { visitor := hairyVisitor{ budget: inlineMaxBudget, extraCallCost: cc, - usedLocals: make(map[*Node]bool), + usedLocals: make(map[*ir.Node]bool), } if visitor.visitList(fn.Nbody) { reason = visitor.reason @@ -217,29 +218,29 @@ func caninl(fn *Node) { return } - n.Func.Inl = &Inline{ + n.Func.Inl = &ir.Inline{ Cost: inlineMaxBudget - visitor.budget, Dcl: inlcopylist(pruneUnusedAutos(n.Name.Defn.Func.Dcl, &visitor)), Body: inlcopylist(fn.Nbody.Slice()), } if base.Flag.LowerM > 1 { - fmt.Printf("%v: can inline %#v with cost %d as: %#v { %#v }\n", fn.Line(), n, inlineMaxBudget-visitor.budget, fn.Type, asNodes(n.Func.Inl.Body)) + fmt.Printf("%v: can inline %#v with cost %d as: %#v { %#v }\n", ir.Line(fn), n, inlineMaxBudget-visitor.budget, fn.Type, ir.AsNodes(n.Func.Inl.Body)) } else if base.Flag.LowerM != 0 { - fmt.Printf("%v: can inline %v\n", fn.Line(), n) + fmt.Printf("%v: can inline %v\n", ir.Line(fn), n) } if logopt.Enabled() { - logopt.LogOpt(fn.Pos, "canInlineFunction", "inline", fn.funcname(), fmt.Sprintf("cost: %d", inlineMaxBudget-visitor.budget)) + logopt.LogOpt(fn.Pos, "canInlineFunction", "inline", ir.FuncName(fn), fmt.Sprintf("cost: %d", inlineMaxBudget-visitor.budget)) } } // inlFlood marks n's inline body for export and recursively ensures // all called functions are marked too. -func inlFlood(n *Node) { +func inlFlood(n *ir.Node) { if n == nil { return } - if n.Op != ONAME || n.Class() != PFUNC { + if n.Op != ir.ONAME || n.Class() != ir.PFUNC { base.Fatalf("inlFlood: unexpected %v, %v, %v", n, n.Op, n.Class()) } if n.Func == nil { @@ -259,28 +260,28 @@ func inlFlood(n *Node) { // Recursively identify all referenced functions for // reexport. We want to include even non-called functions, // because after inlining they might be callable. - inspectList(asNodes(n.Func.Inl.Body), func(n *Node) bool { + ir.InspectList(ir.AsNodes(n.Func.Inl.Body), func(n *ir.Node) bool { switch n.Op { - case OMETHEXPR: - inlFlood(n.MethodName()) + case ir.OMETHEXPR: + inlFlood(methodExprName(n)) - case ONAME: + case ir.ONAME: switch n.Class() { - case PFUNC: + case ir.PFUNC: inlFlood(n) exportsym(n) - case PEXTERN: + case ir.PEXTERN: exportsym(n) } - case ODOTMETH: - fn := n.MethodName() + case ir.ODOTMETH: + fn := methodExprName(n) inlFlood(fn) - case OCALLPART: + case ir.OCALLPART: // Okay, because we don't yet inline indirect // calls to method values. - case OCLOSURE: + case ir.OCLOSURE: // If the closure is inlinable, we'll need to // flood it too. But today we don't support // inlining functions that contain closures. @@ -299,11 +300,11 @@ type hairyVisitor struct { budget int32 reason string extraCallCost int32 - usedLocals map[*Node]bool + usedLocals map[*ir.Node]bool } // Look for anything we want to punt on. -func (v *hairyVisitor) visitList(ll Nodes) bool { +func (v *hairyVisitor) visitList(ll ir.Nodes) bool { for _, n := range ll.Slice() { if v.visit(n) { return true @@ -312,19 +313,19 @@ func (v *hairyVisitor) visitList(ll Nodes) bool { return false } -func (v *hairyVisitor) visit(n *Node) bool { +func (v *hairyVisitor) visit(n *ir.Node) bool { if n == nil { return false } switch n.Op { // Call is okay if inlinable and we have the budget for the body. - case OCALLFUNC: + case ir.OCALLFUNC: // Functions that call runtime.getcaller{pc,sp} can not be inlined // because getcaller{pc,sp} expect a pointer to the caller's first argument. // // runtime.throw is a "cheap call" like panic in normal code. - if n.Left.Op == ONAME && n.Left.Class() == PFUNC && isRuntimePkg(n.Left.Sym.Pkg) { + if n.Left.Op == ir.ONAME && n.Left.Class() == ir.PFUNC && isRuntimePkg(n.Left.Sym.Pkg) { fn := n.Left.Sym.Name if fn == "getcallerpc" || fn == "getcallersp" { v.reason = "call to " + fn @@ -350,7 +351,7 @@ func (v *hairyVisitor) visit(n *Node) bool { v.budget -= v.extraCallCost // Call is okay if inlinable and we have the budget for the body. - case OCALLMETH: + case ir.OCALLMETH: t := n.Left.Type if t == nil { base.Fatalf("no function type for [%p] %+v\n", n.Left, n.Left) @@ -366,7 +367,7 @@ func (v *hairyVisitor) visit(n *Node) bool { break } } - if inlfn := n.Left.MethodName().Func; inlfn.Inl != nil { + if inlfn := methodExprName(n.Left).Func; inlfn.Inl != nil { v.budget -= inlfn.Inl.Cost break } @@ -374,58 +375,58 @@ func (v *hairyVisitor) visit(n *Node) bool { v.budget -= v.extraCallCost // Things that are too hairy, irrespective of the budget - case OCALL, OCALLINTER: + case ir.OCALL, ir.OCALLINTER: // Call cost for non-leaf inlining. v.budget -= v.extraCallCost - case OPANIC: + case ir.OPANIC: v.budget -= inlineExtraPanicCost - case ORECOVER: + case ir.ORECOVER: // recover matches the argument frame pointer to find // the right panic value, so it needs an argument frame. v.reason = "call to recover" return true - case OCLOSURE, - ORANGE, - OSELECT, - OGO, - ODEFER, - ODCLTYPE, // can't print yet - ORETJMP: + case ir.OCLOSURE, + ir.ORANGE, + ir.OSELECT, + ir.OGO, + ir.ODEFER, + ir.ODCLTYPE, // can't print yet + ir.ORETJMP: v.reason = "unhandled op " + n.Op.String() return true - case OAPPEND: + case ir.OAPPEND: v.budget -= inlineExtraAppendCost - case ODCLCONST, OEMPTY, OFALL: + case ir.ODCLCONST, ir.OEMPTY, ir.OFALL: // These nodes don't produce code; omit from inlining budget. return false - case OLABEL: + case ir.OLABEL: // TODO(mdempsky): Add support for inlining labeled control statements. - if n.labeledControl() != nil { + if labeledControl(n) != nil { v.reason = "labeled control" return true } - case OBREAK, OCONTINUE: + case ir.OBREAK, ir.OCONTINUE: if n.Sym != nil { // Should have short-circuited due to labeledControl above. base.Fatalf("unexpected labeled break/continue: %v", n) } - case OIF: - if Isconst(n.Left, constant.Bool) { + case ir.OIF: + if ir.IsConst(n.Left, constant.Bool) { // This if and the condition cost nothing. return v.visitList(n.Ninit) || v.visitList(n.Nbody) || v.visitList(n.Rlist) } - case ONAME: - if n.Class() == PAUTO { + case ir.ONAME: + if n.Class() == ir.PAUTO { v.usedLocals[n] = true } @@ -446,26 +447,26 @@ func (v *hairyVisitor) visit(n *Node) bool { // inlcopylist (together with inlcopy) recursively copies a list of nodes, except // that it keeps the same ONAME, OTYPE, and OLITERAL nodes. It is used for copying // the body and dcls of an inlineable function. -func inlcopylist(ll []*Node) []*Node { - s := make([]*Node, 0, len(ll)) +func inlcopylist(ll []*ir.Node) []*ir.Node { + s := make([]*ir.Node, 0, len(ll)) for _, n := range ll { s = append(s, inlcopy(n)) } return s } -func inlcopy(n *Node) *Node { +func inlcopy(n *ir.Node) *ir.Node { if n == nil { return nil } switch n.Op { - case ONAME, OTYPE, OLITERAL, ONIL: + case ir.ONAME, ir.OTYPE, ir.OLITERAL, ir.ONIL: return n } - m := n.copy() - if n.Op != OCALLPART && m.Func != nil { + m := ir.Copy(n) + if n.Op != ir.OCALLPART && m.Func != nil { base.Fatalf("unexpected Func: %v", m) } m.Left = inlcopy(n.Left) @@ -478,7 +479,7 @@ func inlcopy(n *Node) *Node { return m } -func countNodes(n *Node) int { +func countNodes(n *ir.Node) int { if n == nil { return 0 } @@ -502,7 +503,7 @@ func countNodes(n *Node) int { // Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any // calls made to inlineable functions. This is the external entry point. -func inlcalls(fn *Node) { +func inlcalls(fn *ir.Node) { savefn := Curfn Curfn = fn maxCost := int32(inlineMaxBudget) @@ -515,7 +516,7 @@ func inlcalls(fn *Node) { // but allow inlining if there is a recursion cycle of many functions. // Most likely, the inlining will stop before we even hit the beginning of // the cycle again, but the map catches the unusual case. - inlMap := make(map[*Node]bool) + inlMap := make(map[*ir.Node]bool) fn = inlnode(fn, maxCost, inlMap) if fn != Curfn { base.Fatalf("inlnode replaced curfn") @@ -524,8 +525,8 @@ func inlcalls(fn *Node) { } // Turn an OINLCALL into a statement. -func inlconv2stmt(n *Node) { - n.Op = OBLOCK +func inlconv2stmt(n *ir.Node) { + n.Op = ir.OBLOCK // n->ninit stays n.List.Set(n.Nbody.Slice()) @@ -537,7 +538,7 @@ func inlconv2stmt(n *Node) { // Turn an OINLCALL into a single valued expression. // The result of inlconv2expr MUST be assigned back to n, e.g. // n.Left = inlconv2expr(n.Left) -func inlconv2expr(n *Node) *Node { +func inlconv2expr(n *ir.Node) *ir.Node { r := n.Rlist.First() return addinit(r, append(n.Ninit.Slice(), n.Nbody.Slice()...)) } @@ -547,8 +548,8 @@ func inlconv2expr(n *Node) *Node { // containing the inlined statements on the first list element so // order will be preserved Used in return, oas2func and call // statements. -func inlconv2list(n *Node) []*Node { - if n.Op != OINLCALL || n.Rlist.Len() == 0 { +func inlconv2list(n *ir.Node) []*ir.Node { + if n.Op != ir.OINLCALL || n.Rlist.Len() == 0 { base.Fatalf("inlconv2list %+v\n", n) } @@ -557,7 +558,7 @@ func inlconv2list(n *Node) []*Node { return s } -func inlnodelist(l Nodes, maxCost int32, inlMap map[*Node]bool) { +func inlnodelist(l ir.Nodes, maxCost int32, inlMap map[*ir.Node]bool) { s := l.Slice() for i := range s { s[i] = inlnode(s[i], maxCost, inlMap) @@ -577,23 +578,23 @@ func inlnodelist(l Nodes, maxCost int32, inlMap map[*Node]bool) { // shorter and less complicated. // The result of inlnode MUST be assigned back to n, e.g. // n.Left = inlnode(n.Left) -func inlnode(n *Node, maxCost int32, inlMap map[*Node]bool) *Node { +func inlnode(n *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node { if n == nil { return n } switch n.Op { - case ODEFER, OGO: + case ir.ODEFER, ir.OGO: switch n.Left.Op { - case OCALLFUNC, OCALLMETH: + case ir.OCALLFUNC, ir.OCALLMETH: n.Left.SetNoInline(true) } // TODO do them here (or earlier), // so escape analysis can avoid more heapmoves. - case OCLOSURE: + case ir.OCLOSURE: return n - case OCALLMETH: + case ir.OCALLMETH: // Prevent inlining some reflect.Value methods when using checkptr, // even when package reflect was compiled without it (#35073). if s := n.Left.Sym; base.Debug.Checkptr != 0 && isReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") { @@ -605,24 +606,24 @@ func inlnode(n *Node, maxCost int32, inlMap map[*Node]bool) *Node { inlnodelist(n.Ninit, maxCost, inlMap) for _, n1 := range n.Ninit.Slice() { - if n1.Op == OINLCALL { + if n1.Op == ir.OINLCALL { inlconv2stmt(n1) } } n.Left = inlnode(n.Left, maxCost, inlMap) - if n.Left != nil && n.Left.Op == OINLCALL { + if n.Left != nil && n.Left.Op == ir.OINLCALL { n.Left = inlconv2expr(n.Left) } n.Right = inlnode(n.Right, maxCost, inlMap) - if n.Right != nil && n.Right.Op == OINLCALL { - if n.Op == OFOR || n.Op == OFORUNTIL { + if n.Right != nil && n.Right.Op == ir.OINLCALL { + if n.Op == ir.OFOR || n.Op == ir.OFORUNTIL { inlconv2stmt(n.Right) - } else if n.Op == OAS2FUNC { + } else if n.Op == ir.OAS2FUNC { n.Rlist.Set(inlconv2list(n.Right)) n.Right = nil - n.Op = OAS2 + n.Op = ir.OAS2 n.SetTypecheck(0) n = typecheck(n, ctxStmt) } else { @@ -631,16 +632,16 @@ func inlnode(n *Node, maxCost int32, inlMap map[*Node]bool) *Node { } inlnodelist(n.List, maxCost, inlMap) - if n.Op == OBLOCK { + if n.Op == ir.OBLOCK { for _, n2 := range n.List.Slice() { - if n2.Op == OINLCALL { + if n2.Op == ir.OINLCALL { inlconv2stmt(n2) } } } else { s := n.List.Slice() for i1, n1 := range s { - if n1 != nil && n1.Op == OINLCALL { + if n1 != nil && n1.Op == ir.OINLCALL { s[i1] = inlconv2expr(s[i1]) } } @@ -649,8 +650,8 @@ func inlnode(n *Node, maxCost int32, inlMap map[*Node]bool) *Node { inlnodelist(n.Rlist, maxCost, inlMap) s := n.Rlist.Slice() for i1, n1 := range s { - if n1.Op == OINLCALL { - if n.Op == OIF { + if n1.Op == ir.OINLCALL { + if n.Op == ir.OIF { inlconv2stmt(n1) } else { s[i1] = inlconv2expr(s[i1]) @@ -660,7 +661,7 @@ func inlnode(n *Node, maxCost int32, inlMap map[*Node]bool) *Node { inlnodelist(n.Nbody, maxCost, inlMap) for _, n := range n.Nbody.Slice() { - if n.Op == OINLCALL { + if n.Op == ir.OINLCALL { inlconv2stmt(n) } } @@ -669,16 +670,16 @@ func inlnode(n *Node, maxCost int32, inlMap map[*Node]bool) *Node { // transmogrify this node itself unless inhibited by the // switch at the top of this function. switch n.Op { - case OCALLFUNC, OCALLMETH: + case ir.OCALLFUNC, ir.OCALLMETH: if n.NoInline() { return n } } switch n.Op { - case OCALLFUNC: + case ir.OCALLFUNC: if base.Flag.LowerM > 3 { - fmt.Printf("%v:call to func %+v\n", n.Line(), n.Left) + fmt.Printf("%v:call to func %+v\n", ir.Line(n), n.Left) } if isIntrinsicCall(n) { break @@ -687,9 +688,9 @@ func inlnode(n *Node, maxCost int32, inlMap map[*Node]bool) *Node { n = mkinlcall(n, fn, maxCost, inlMap) } - case OCALLMETH: + case ir.OCALLMETH: if base.Flag.LowerM > 3 { - fmt.Printf("%v:call to meth %L\n", n.Line(), n.Left.Right) + fmt.Printf("%v:call to meth %L\n", ir.Line(n), n.Left.Right) } // typecheck should have resolved ODOTMETH->type, whose nname points to the actual function. @@ -697,7 +698,7 @@ func inlnode(n *Node, maxCost int32, inlMap map[*Node]bool) *Node { base.Fatalf("no function type for [%p] %+v\n", n.Left, n.Left) } - n = mkinlcall(n, n.Left.MethodName(), maxCost, inlMap) + n = mkinlcall(n, methodExprName(n.Left), maxCost, inlMap) } base.Pos = lno @@ -706,11 +707,11 @@ func inlnode(n *Node, maxCost int32, inlMap map[*Node]bool) *Node { // inlCallee takes a function-typed expression and returns the underlying function ONAME // that it refers to if statically known. Otherwise, it returns nil. -func inlCallee(fn *Node) *Node { +func inlCallee(fn *ir.Node) *ir.Node { fn = staticValue(fn) switch { - case fn.Op == OMETHEXPR: - n := fn.MethodName() + case fn.Op == ir.OMETHEXPR: + n := methodExprName(fn) // Check that receiver type matches fn.Left. // TODO(mdempsky): Handle implicit dereference // of pointer receiver argument? @@ -718,9 +719,9 @@ func inlCallee(fn *Node) *Node { return nil } return n - case fn.Op == ONAME && fn.Class() == PFUNC: + case fn.Op == ir.ONAME && fn.Class() == ir.PFUNC: return fn - case fn.Op == OCLOSURE: + case fn.Op == ir.OCLOSURE: c := fn.Func.Decl caninl(c) return c.Func.Nname @@ -728,9 +729,9 @@ func inlCallee(fn *Node) *Node { return nil } -func staticValue(n *Node) *Node { +func staticValue(n *ir.Node) *ir.Node { for { - if n.Op == OCONVNOP { + if n.Op == ir.OCONVNOP { n = n.Left continue } @@ -746,8 +747,8 @@ func staticValue(n *Node) *Node { // staticValue1 implements a simple SSA-like optimization. If n is a local variable // that is initialized and never reassigned, staticValue1 returns the initializer // expression. Otherwise, it returns nil. -func staticValue1(n *Node) *Node { - if n.Op != ONAME || n.Class() != PAUTO || n.Name.Addrtaken() { +func staticValue1(n *ir.Node) *ir.Node { + if n.Op != ir.ONAME || n.Class() != ir.PAUTO || n.Name.Addrtaken() { return nil } @@ -756,12 +757,12 @@ func staticValue1(n *Node) *Node { return nil } - var rhs *Node + var rhs *ir.Node FindRHS: switch defn.Op { - case OAS: + case ir.OAS: rhs = defn.Right - case OAS2: + case ir.OAS2: for i, lhs := range defn.List.Slice() { if lhs == n { rhs = defn.Rlist.Index(i) @@ -790,8 +791,8 @@ FindRHS: // useful for -m output documenting the reason for inhibited optimizations. // NB: global variables are always considered to be re-assigned. // TODO: handle initial declaration not including an assignment and followed by a single assignment? -func reassigned(n *Node) (bool, *Node) { - if n.Op != ONAME { +func reassigned(n *ir.Node) (bool, *ir.Node) { + if n.Op != ir.ONAME { base.Fatalf("reassigned %v", n) } // no way to reliably check for no-reassignment of globals, assume it can be @@ -804,7 +805,7 @@ func reassigned(n *Node) (bool, *Node) { // of the corresponding ODCLFUNC. // We need to walk the function body to check for reassignments so we follow the // linkage to the ODCLFUNC node as that is where body is held. - if f.Op == OCLOSURE { + if f.Op == ir.OCLOSURE { f = f.Func.Decl } v := reassignVisitor{name: n} @@ -813,19 +814,19 @@ func reassigned(n *Node) (bool, *Node) { } type reassignVisitor struct { - name *Node + name *ir.Node } -func (v *reassignVisitor) visit(n *Node) *Node { +func (v *reassignVisitor) visit(n *ir.Node) *ir.Node { if n == nil { return nil } switch n.Op { - case OAS: + case ir.OAS: if n.Left == v.name && n != v.name.Name.Defn { return n } - case OAS2, OAS2FUNC, OAS2MAPR, OAS2DOTTYPE: + case ir.OAS2, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2DOTTYPE: for _, p := range n.List.Slice() { if p == v.name && n != v.name.Name.Defn { return n @@ -853,7 +854,7 @@ func (v *reassignVisitor) visit(n *Node) *Node { return nil } -func (v *reassignVisitor) visitList(l Nodes) *Node { +func (v *reassignVisitor) visitList(l ir.Nodes) *ir.Node { for _, n := range l.Slice() { if a := v.visit(n); a != nil { return a @@ -862,17 +863,17 @@ func (v *reassignVisitor) visitList(l Nodes) *Node { return nil } -func inlParam(t *types.Field, as *Node, inlvars map[*Node]*Node) *Node { - n := asNode(t.Nname) - if n == nil || n.isBlank() { - return nblank +func inlParam(t *types.Field, as *ir.Node, inlvars map[*ir.Node]*ir.Node) *ir.Node { + n := ir.AsNode(t.Nname) + if n == nil || ir.IsBlank(n) { + return ir.BlankNode } inlvar := inlvars[n] if inlvar == nil { base.Fatalf("missing inlvar for %v", n) } - as.Ninit.Append(nod(ODCL, inlvar, nil)) + as.Ninit.Append(ir.Nod(ir.ODCL, inlvar, nil)) inlvar.Name.Defn = as return inlvar } @@ -886,11 +887,11 @@ var inlgen int // parameters. // The result of mkinlcall MUST be assigned back to n, e.g. // n.Left = mkinlcall(n.Left, fn, isddd) -func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { +func mkinlcall(n, fn *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node { if fn.Func.Inl == nil { if logopt.Enabled() { - logopt.LogOpt(n.Pos, "cannotInlineCall", "inline", Curfn.funcname(), - fmt.Sprintf("%s cannot be inlined", fn.pkgFuncName())) + logopt.LogOpt(n.Pos, "cannotInlineCall", "inline", ir.FuncName(Curfn), + fmt.Sprintf("%s cannot be inlined", ir.PkgFuncName(fn))) } return n } @@ -898,8 +899,8 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { // The inlined function body is too big. Typically we use this check to restrict // inlining into very big functions. See issue 26546 and 17566. if logopt.Enabled() { - logopt.LogOpt(n.Pos, "cannotInlineCall", "inline", Curfn.funcname(), - fmt.Sprintf("cost %d of %s exceeds max large caller cost %d", fn.Func.Inl.Cost, fn.pkgFuncName(), maxCost)) + logopt.LogOpt(n.Pos, "cannotInlineCall", "inline", ir.FuncName(Curfn), + fmt.Sprintf("cost %d of %s exceeds max large caller cost %d", fn.Func.Inl.Cost, ir.PkgFuncName(fn), maxCost)) } return n } @@ -907,7 +908,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { if fn == Curfn || fn.Name.Defn == Curfn { // Can't recursively inline a function into itself. if logopt.Enabled() { - logopt.LogOpt(n.Pos, "cannotInlineCall", "inline", fmt.Sprintf("recursive call to %s", Curfn.funcname())) + logopt.LogOpt(n.Pos, "cannotInlineCall", "inline", fmt.Sprintf("recursive call to %s", ir.FuncName(Curfn))) } return n } @@ -924,7 +925,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { if inlMap[fn] { if base.Flag.LowerM > 1 { - fmt.Printf("%v: cannot inline %v into %v: repeated recursive cycle\n", n.Line(), fn, Curfn.funcname()) + fmt.Printf("%v: cannot inline %v into %v: repeated recursive cycle\n", ir.Line(n), fn, ir.FuncName(Curfn)) } return n } @@ -938,15 +939,15 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { // We have a function node, and it has an inlineable body. if base.Flag.LowerM > 1 { - fmt.Printf("%v: inlining call to %v %#v { %#v }\n", n.Line(), fn.Sym, fn.Type, asNodes(fn.Func.Inl.Body)) + fmt.Printf("%v: inlining call to %v %#v { %#v }\n", ir.Line(n), fn.Sym, fn.Type, ir.AsNodes(fn.Func.Inl.Body)) } else if base.Flag.LowerM != 0 { - fmt.Printf("%v: inlining call to %v\n", n.Line(), fn) + fmt.Printf("%v: inlining call to %v\n", ir.Line(n), fn) } if base.Flag.LowerM > 2 { - fmt.Printf("%v: Before inlining: %+v\n", n.Line(), n) + fmt.Printf("%v: Before inlining: %+v\n", ir.Line(n), n) } - if ssaDump != "" && ssaDump == Curfn.funcname() { + if ssaDump != "" && ssaDump == ir.FuncName(Curfn) { ssaDumpInlined = append(ssaDumpInlined, fn) } @@ -956,28 +957,28 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { // may contain side effects (e.g., added by addinit during // inlconv2expr or inlconv2list). Make sure to preserve these, // if necessary (#42703). - if n.Op == OCALLFUNC { + if n.Op == ir.OCALLFUNC { callee := n.Left - for callee.Op == OCONVNOP { + for callee.Op == ir.OCONVNOP { ninit.AppendNodes(&callee.Ninit) callee = callee.Left } - if callee.Op != ONAME && callee.Op != OCLOSURE && callee.Op != OMETHEXPR { + if callee.Op != ir.ONAME && callee.Op != ir.OCLOSURE && callee.Op != ir.OMETHEXPR { base.Fatalf("unexpected callee expression: %v", callee) } } // Make temp names to use instead of the originals. - inlvars := make(map[*Node]*Node) + inlvars := make(map[*ir.Node]*ir.Node) // record formals/locals for later post-processing - var inlfvars []*Node + var inlfvars []*ir.Node // Handle captured variables when inlining closures. if fn.Name.Defn != nil { if c := fn.Name.Defn.Func.OClosure; c != nil { for _, v := range c.Func.ClosureVars.Slice() { - if v.Op == OXXX { + if v.Op == ir.OXXX { continue } @@ -987,38 +988,38 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { // the reassigned check via some sort of copy propagation this would most // likely need to be changed to a loop to walk up to the correct Param if o == nil || (o.Name.Curfn != Curfn && o.Name.Curfn.Func.OClosure != Curfn) { - base.Fatalf("%v: unresolvable capture %v %v\n", n.Line(), fn, v) + base.Fatalf("%v: unresolvable capture %v %v\n", ir.Line(n), fn, v) } if v.Name.Byval() { iv := typecheck(inlvar(v), ctxExpr) - ninit.Append(nod(ODCL, iv, nil)) - ninit.Append(typecheck(nod(OAS, iv, o), ctxStmt)) + ninit.Append(ir.Nod(ir.ODCL, iv, nil)) + ninit.Append(typecheck(ir.Nod(ir.OAS, iv, o), ctxStmt)) inlvars[v] = iv } else { - addr := newname(lookup("&" + v.Sym.Name)) + addr := NewName(lookup("&" + v.Sym.Name)) addr.Type = types.NewPtr(v.Type) ia := typecheck(inlvar(addr), ctxExpr) - ninit.Append(nod(ODCL, ia, nil)) - ninit.Append(typecheck(nod(OAS, ia, nod(OADDR, o, nil)), ctxStmt)) + ninit.Append(ir.Nod(ir.ODCL, ia, nil)) + ninit.Append(typecheck(ir.Nod(ir.OAS, ia, ir.Nod(ir.OADDR, o, nil)), ctxStmt)) inlvars[addr] = ia // When capturing by reference, all occurrence of the captured var // must be substituted with dereference of the temporary address - inlvars[v] = typecheck(nod(ODEREF, ia, nil), ctxExpr) + inlvars[v] = typecheck(ir.Nod(ir.ODEREF, ia, nil), ctxExpr) } } } } for _, ln := range fn.Func.Inl.Dcl { - if ln.Op != ONAME { + if ln.Op != ir.ONAME { continue } - if ln.Class() == PPARAMOUT { // return values handled below. + if ln.Class() == ir.PPARAMOUT { // return values handled below. continue } - if ln.isParamStackCopy() { // ignore the on-stack copy of a parameter that moved to the heap + if isParamStackCopy(ln) { // ignore the on-stack copy of a parameter that moved to the heap // TODO(mdempsky): Remove once I'm confident // this never actually happens. We currently // perform inlining before escape analysis, so @@ -1028,7 +1029,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { inlf := typecheck(inlvar(ln), ctxExpr) inlvars[ln] = inlf if base.Flag.GenDwarfInl > 0 { - if ln.Class() == PPARAM { + if ln.Class() == ir.PPARAM { inlf.Name.SetInlFormal(true) } else { inlf.Name.SetInlLocal(true) @@ -1039,8 +1040,8 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { } nreturns := 0 - inspectList(asNodes(fn.Func.Inl.Body), func(n *Node) bool { - if n != nil && n.Op == ORETURN { + ir.InspectList(ir.AsNodes(fn.Func.Inl.Body), func(n *ir.Node) bool { + if n != nil && n.Op == ir.ORETURN { nreturns++ } return true @@ -1052,10 +1053,10 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { delayretvars := nreturns == 1 // temporaries for return values. - var retvars []*Node + var retvars []*ir.Node for i, t := range fn.Type.Results().Fields().Slice() { - var m *Node - if n := asNode(t.Nname); n != nil && !n.isBlank() && !strings.HasPrefix(n.Sym.Name, "~r") { + var m *ir.Node + if n := ir.AsNode(t.Nname); n != nil && !ir.IsBlank(n) && !strings.HasPrefix(n.Sym.Name, "~r") { m = inlvar(n) m = typecheck(m, ctxExpr) inlvars[n] = m @@ -1080,9 +1081,9 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { } // Assign arguments to the parameters' temp names. - as := nod(OAS2, nil, nil) + as := ir.Nod(ir.OAS2, nil, nil) as.SetColas(true) - if n.Op == OCALLMETH { + if n.Op == ir.OCALLMETH { if n.Left.Left == nil { base.Fatalf("method call without receiver: %+v", n) } @@ -1092,7 +1093,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { // For non-dotted calls to variadic functions, we assign the // variadic parameter's temp name separately. - var vas *Node + var vas *ir.Node if recv := fn.Type.Recv(); recv != nil { as.List.Append(inlParam(recv, as, inlvars)) @@ -1115,13 +1116,13 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { } varargs := as.List.Slice()[x:] - vas = nod(OAS, nil, nil) + vas = ir.Nod(ir.OAS, nil, nil) vas.Left = inlParam(param, vas, inlvars) if len(varargs) == 0 { vas.Right = nodnil() vas.Right.Type = param.Type } else { - vas.Right = nod(OCOMPLIT, nil, typenod(param.Type)) + vas.Right = ir.Nod(ir.OCOMPLIT, nil, typenod(param.Type)) vas.Right.List.Set(varargs) } } @@ -1139,8 +1140,8 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { if !delayretvars { // Zero the return parameters. for _, n := range retvars { - ninit.Append(nod(ODCL, n, nil)) - ras := nod(OAS, n, nil) + ninit.Append(ir.Nod(ir.ODCL, n, nil)) + ras := ir.Nod(ir.OAS, n, nil) ras = typecheck(ras, ctxStmt) ninit.Append(ras) } @@ -1161,7 +1162,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { // to put a breakpoint. Not sure if that's really necessary or not // (in which case it could go at the end of the function instead). // Note issue 28603. - inlMark := nod(OINLMARK, nil, nil) + inlMark := ir.Nod(ir.OINLMARK, nil, nil) inlMark.Pos = n.Pos.WithIsStmt() inlMark.Xoffset = int64(newIndex) ninit.Append(inlMark) @@ -1182,9 +1183,9 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { newInlIndex: newIndex, } - body := subst.list(asNodes(fn.Func.Inl.Body)) + body := subst.list(ir.AsNodes(fn.Func.Inl.Body)) - lab := nodSym(OLABEL, nil, retlabel) + lab := nodSym(ir.OLABEL, nil, retlabel) body = append(body, lab) typecheckslice(body, ctxStmt) @@ -1197,7 +1198,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { //dumplist("ninit post", ninit); - call := nod(OINLCALL, nil, nil) + call := ir.Nod(ir.OINLCALL, nil, nil) call.Ninit.Set(ninit.Slice()) call.Nbody.Set(body) call.Rlist.Set(retvars) @@ -1212,13 +1213,13 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { // luckily these are small. inlnodelist(call.Nbody, maxCost, inlMap) for _, n := range call.Nbody.Slice() { - if n.Op == OINLCALL { + if n.Op == ir.OINLCALL { inlconv2stmt(n) } } if base.Flag.LowerM > 2 { - fmt.Printf("%v: After inlining %+v\n\n", call.Line(), call) + fmt.Printf("%v: After inlining %+v\n\n", ir.Line(call), call) } return call @@ -1227,14 +1228,14 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node { // Every time we expand a function we generate a new set of tmpnames, // PAUTO's in the calling functions, and link them off of the // PPARAM's, PAUTOS and PPARAMOUTs of the called function. -func inlvar(var_ *Node) *Node { +func inlvar(var_ *ir.Node) *ir.Node { if base.Flag.LowerM > 3 { fmt.Printf("inlvar %+v\n", var_) } - n := newname(var_.Sym) + n := NewName(var_.Sym) n.Type = var_.Type - n.SetClass(PAUTO) + n.SetClass(ir.PAUTO) n.Name.SetUsed(true) n.Name.Curfn = Curfn // the calling function, not the called one n.Name.SetAddrtaken(var_.Name.Addrtaken()) @@ -1244,10 +1245,10 @@ func inlvar(var_ *Node) *Node { } // Synthesize a variable to store the inlined function's results in. -func retvar(t *types.Field, i int) *Node { - n := newname(lookupN("~R", i)) +func retvar(t *types.Field, i int) *ir.Node { + n := NewName(lookupN("~R", i)) n.Type = t.Type - n.SetClass(PAUTO) + n.SetClass(ir.PAUTO) n.Name.SetUsed(true) n.Name.Curfn = Curfn // the calling function, not the called one Curfn.Func.Dcl = append(Curfn.Func.Dcl, n) @@ -1256,10 +1257,10 @@ func retvar(t *types.Field, i int) *Node { // Synthesize a variable to store the inlined function's arguments // when they come from a multiple return call. -func argvar(t *types.Type, i int) *Node { - n := newname(lookupN("~arg", i)) +func argvar(t *types.Type, i int) *ir.Node { + n := NewName(lookupN("~arg", i)) n.Type = t.Elem() - n.SetClass(PAUTO) + n.SetClass(ir.PAUTO) n.Name.SetUsed(true) n.Name.Curfn = Curfn // the calling function, not the called one Curfn.Func.Dcl = append(Curfn.Func.Dcl, n) @@ -1273,13 +1274,13 @@ type inlsubst struct { retlabel *types.Sym // Temporary result variables. - retvars []*Node + retvars []*ir.Node // Whether result variables should be initialized at the // "return" statement. delayretvars bool - inlvars map[*Node]*Node + inlvars map[*ir.Node]*ir.Node // bases maps from original PosBase to PosBase with an extra // inlined call frame. @@ -1291,8 +1292,8 @@ type inlsubst struct { } // list inlines a list of nodes. -func (subst *inlsubst) list(ll Nodes) []*Node { - s := make([]*Node, 0, ll.Len()) +func (subst *inlsubst) list(ll ir.Nodes) []*ir.Node { + s := make([]*ir.Node, 0, ll.Len()) for _, n := range ll.Slice() { s = append(s, subst.node(n)) } @@ -1303,13 +1304,13 @@ func (subst *inlsubst) list(ll Nodes) []*Node { // inlined function, substituting references to input/output // parameters with ones to the tmpnames, and substituting returns with // assignments to the output. -func (subst *inlsubst) node(n *Node) *Node { +func (subst *inlsubst) node(n *ir.Node) *ir.Node { if n == nil { return nil } switch n.Op { - case ONAME: + case ir.ONAME: if inlvar := subst.inlvars[n]; inlvar != nil { // These will be set during inlnode if base.Flag.LowerM > 2 { fmt.Printf("substituting name %+v -> %+v\n", n, inlvar) @@ -1322,10 +1323,10 @@ func (subst *inlsubst) node(n *Node) *Node { } return n - case OMETHEXPR: + case ir.OMETHEXPR: return n - case OLITERAL, ONIL, OTYPE: + case ir.OLITERAL, ir.ONIL, ir.OTYPE: // If n is a named constant or type, we can continue // using it in the inline copy. Otherwise, make a copy // so we can update the line number. @@ -1336,12 +1337,12 @@ func (subst *inlsubst) node(n *Node) *Node { // Since we don't handle bodies with closures, this return is guaranteed to belong to the current inlined function. // dump("Return before substitution", n); - case ORETURN: - m := nodSym(OGOTO, nil, subst.retlabel) + case ir.ORETURN: + m := nodSym(ir.OGOTO, nil, subst.retlabel) m.Ninit.Set(subst.list(n.Ninit)) if len(subst.retvars) != 0 && n.List.Len() != 0 { - as := nod(OAS2, nil, nil) + as := ir.Nod(ir.OAS2, nil, nil) // Make a shallow copy of retvars. // Otherwise OINLCALL.Rlist will be the same list, @@ -1353,7 +1354,7 @@ func (subst *inlsubst) node(n *Node) *Node { if subst.delayretvars { for _, n := range as.List.Slice() { - as.Ninit.Append(nod(ODCL, n, nil)) + as.Ninit.Append(ir.Nod(ir.ODCL, n, nil)) n.Name.Defn = as } } @@ -1368,8 +1369,8 @@ func (subst *inlsubst) node(n *Node) *Node { // dump("Return after substitution", m); return m - case OGOTO, OLABEL: - m := n.copy() + case ir.OGOTO, ir.OLABEL: + m := ir.Copy(n) m.Pos = subst.updatedPos(m.Pos) m.Ninit.Set(nil) p := fmt.Sprintf("%s·%d", n.Sym.Name, inlgen) @@ -1378,11 +1379,11 @@ func (subst *inlsubst) node(n *Node) *Node { return m } - m := n.copy() + m := ir.Copy(n) m.Pos = subst.updatedPos(m.Pos) m.Ninit.Set(nil) - if n.Op == OCLOSURE { + if n.Op == ir.OCLOSURE { base.Fatalf("cannot inline function containing closure: %+v", n) } @@ -1408,10 +1409,10 @@ func (subst *inlsubst) updatedPos(xpos src.XPos) src.XPos { return base.Ctxt.PosTable.XPos(pos) } -func pruneUnusedAutos(ll []*Node, vis *hairyVisitor) []*Node { - s := make([]*Node, 0, len(ll)) +func pruneUnusedAutos(ll []*ir.Node, vis *hairyVisitor) []*ir.Node { + s := make([]*ir.Node, 0, len(ll)) for _, n := range ll { - if n.Class() == PAUTO { + if n.Class() == ir.PAUTO { if _, found := vis.usedLocals[n]; !found { continue } @@ -1423,19 +1424,19 @@ func pruneUnusedAutos(ll []*Node, vis *hairyVisitor) []*Node { // devirtualize replaces interface method calls within fn with direct // concrete-type method calls where applicable. -func devirtualize(fn *Node) { +func devirtualize(fn *ir.Node) { Curfn = fn - inspectList(fn.Nbody, func(n *Node) bool { - if n.Op == OCALLINTER { + ir.InspectList(fn.Nbody, func(n *ir.Node) bool { + if n.Op == ir.OCALLINTER { devirtualizeCall(n) } return true }) } -func devirtualizeCall(call *Node) { +func devirtualizeCall(call *ir.Node) { recv := staticValue(call.Left.Left) - if recv.Op != OCONVIFACE { + if recv.Op != ir.OCONVIFACE { return } @@ -1444,23 +1445,23 @@ func devirtualizeCall(call *Node) { return } - x := nodl(call.Left.Pos, ODOTTYPE, call.Left.Left, nil) + x := ir.NodAt(call.Left.Pos, ir.ODOTTYPE, call.Left.Left, nil) x.Type = typ - x = nodlSym(call.Left.Pos, OXDOT, x, call.Left.Sym) + x = nodlSym(call.Left.Pos, ir.OXDOT, x, call.Left.Sym) x = typecheck(x, ctxExpr|ctxCallee) switch x.Op { - case ODOTMETH: + case ir.ODOTMETH: if base.Flag.LowerM != 0 { base.WarnfAt(call.Pos, "devirtualizing %v to %v", call.Left, typ) } - call.Op = OCALLMETH + call.Op = ir.OCALLMETH call.Left = x - case ODOTINTER: + case ir.ODOTINTER: // Promoted method from embedded interface-typed field (#42279). if base.Flag.LowerM != 0 { base.WarnfAt(call.Pos, "partially devirtualizing %v to %v", call.Left, typ) } - call.Op = OCALLINTER + call.Op = ir.OCALLINTER call.Left = x default: // TODO(mdempsky): Turn back into Fatalf after more testing. diff --git a/src/cmd/compile/internal/gc/lex.go b/src/cmd/compile/internal/gc/lex.go index 30ef4d0eb2035..39d73867e4d79 100644 --- a/src/cmd/compile/internal/gc/lex.go +++ b/src/cmd/compile/internal/gc/lex.go @@ -6,6 +6,7 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/syntax" "cmd/internal/objabi" "cmd/internal/src" @@ -25,78 +26,51 @@ func isQuoted(s string) bool { return len(s) >= 2 && s[0] == '"' && s[len(s)-1] == '"' } -type PragmaFlag int16 - -const ( - // Func pragmas. - Nointerface PragmaFlag = 1 << iota - Noescape // func parameters don't escape - Norace // func must not have race detector annotations - Nosplit // func should not execute on separate stack - Noinline // func should not be inlined - NoCheckPtr // func should not be instrumented by checkptr - CgoUnsafeArgs // treat a pointer to one arg as a pointer to them all - UintptrEscapes // pointers converted to uintptr escape - - // Runtime-only func pragmas. - // See ../../../../runtime/README.md for detailed descriptions. - Systemstack // func must run on system stack - Nowritebarrier // emit compiler error instead of write barrier - Nowritebarrierrec // error on write barrier in this or recursive callees - Yeswritebarrierrec // cancels Nowritebarrierrec in this function and callees - - // Runtime and cgo type pragmas - NotInHeap // values of this type must not be heap allocated - - // Go command pragmas - GoBuildPragma -) - const ( - FuncPragmas = Nointerface | - Noescape | - Norace | - Nosplit | - Noinline | - NoCheckPtr | - CgoUnsafeArgs | - UintptrEscapes | - Systemstack | - Nowritebarrier | - Nowritebarrierrec | - Yeswritebarrierrec - - TypePragmas = NotInHeap + FuncPragmas = ir.Nointerface | + ir.Noescape | + ir.Norace | + ir.Nosplit | + ir.Noinline | + ir.NoCheckPtr | + ir.CgoUnsafeArgs | + ir.UintptrEscapes | + ir.Systemstack | + ir.Nowritebarrier | + ir.Nowritebarrierrec | + ir.Yeswritebarrierrec + + TypePragmas = ir.NotInHeap ) -func pragmaFlag(verb string) PragmaFlag { +func pragmaFlag(verb string) ir.PragmaFlag { switch verb { case "go:build": - return GoBuildPragma + return ir.GoBuildPragma case "go:nointerface": if objabi.Fieldtrack_enabled != 0 { - return Nointerface + return ir.Nointerface } case "go:noescape": - return Noescape + return ir.Noescape case "go:norace": - return Norace + return ir.Norace case "go:nosplit": - return Nosplit | NoCheckPtr // implies NoCheckPtr (see #34972) + return ir.Nosplit | ir.NoCheckPtr // implies NoCheckPtr (see #34972) case "go:noinline": - return Noinline + return ir.Noinline case "go:nocheckptr": - return NoCheckPtr + return ir.NoCheckPtr case "go:systemstack": - return Systemstack + return ir.Systemstack case "go:nowritebarrier": - return Nowritebarrier + return ir.Nowritebarrier case "go:nowritebarrierrec": - return Nowritebarrierrec | Nowritebarrier // implies Nowritebarrier + return ir.Nowritebarrierrec | ir.Nowritebarrier // implies Nowritebarrier case "go:yeswritebarrierrec": - return Yeswritebarrierrec + return ir.Yeswritebarrierrec case "go:cgo_unsafe_args": - return CgoUnsafeArgs | NoCheckPtr // implies NoCheckPtr (see #34968) + return ir.CgoUnsafeArgs | ir.NoCheckPtr // implies NoCheckPtr (see #34968) case "go:uintptrescapes": // For the next function declared in the file // any uintptr arguments may be pointer values @@ -109,9 +83,9 @@ func pragmaFlag(verb string) PragmaFlag { // call. The conversion to uintptr must appear // in the argument list. // Used in syscall/dll_windows.go. - return UintptrEscapes + return ir.UintptrEscapes case "go:notinheap": - return NotInHeap + return ir.NotInHeap } return 0 } diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index c66139027a61c..24e926602bf4e 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -10,6 +10,7 @@ import ( "bufio" "bytes" "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/logopt" "cmd/compile/internal/ssa" "cmd/compile/internal/types" @@ -73,17 +74,17 @@ func Main(archInit func(*Arch)) { // See bugs 31188 and 21945 (CLs 170638, 98075, 72371). base.Ctxt.UseBASEntries = base.Ctxt.Headtype != objabi.Hdarwin - localpkg = types.NewPkg("", "") - localpkg.Prefix = "\"\"" + ir.LocalPkg = types.NewPkg("", "") + ir.LocalPkg.Prefix = "\"\"" // We won't know localpkg's height until after import // processing. In the mean time, set to MaxPkgHeight to ensure // height comparisons at least work until then. - localpkg.Height = types.MaxPkgHeight + ir.LocalPkg.Height = types.MaxPkgHeight // pseudo-package, for scoping - builtinpkg = types.NewPkg("go.builtin", "") // TODO(gri) name this package go.builtin? - builtinpkg.Prefix = "go.builtin" // not go%2ebuiltin + ir.BuiltinPkg = types.NewPkg("go.builtin", "") // TODO(gri) name this package go.builtin? + ir.BuiltinPkg.Prefix = "go.builtin" // not go%2ebuiltin // pseudo-package, accessed by import "unsafe" unsafepkg = types.NewPkg("unsafe", "unsafe") @@ -209,29 +210,18 @@ func Main(archInit func(*Arch)) { types.Widthptr = Widthptr types.Dowidth = dowidth types.Fatalf = base.Fatalf - types.Sconv = func(s *types.Sym, flag, mode int) string { - return sconv(s, FmtFlag(flag), fmtMode(mode)) - } - types.Tconv = func(t *types.Type, flag, mode int) string { - return tconv(t, FmtFlag(flag), fmtMode(mode)) - } - types.FormatSym = func(sym *types.Sym, s fmt.State, verb rune, mode int) { - symFormat(sym, s, verb, fmtMode(mode)) - } - types.FormatType = func(t *types.Type, s fmt.State, verb rune, mode int) { - typeFormat(t, s, verb, fmtMode(mode)) - } + ir.InstallTypeFormats() types.TypeLinkSym = func(t *types.Type) *obj.LSym { return typenamesym(t).Linksym() } - types.FmtLeft = int(FmtLeft) - types.FmtUnsigned = int(FmtUnsigned) - types.FErr = int(FErr) + types.FmtLeft = int(ir.FmtLeft) + types.FmtUnsigned = int(ir.FmtUnsigned) + types.FErr = int(ir.FErr) types.Ctxt = base.Ctxt initUniverse() - dclcontext = PEXTERN + dclcontext = ir.PEXTERN autogeneratedPos = makePos(src.NewFileBase("", ""), 1, 0) @@ -263,7 +253,7 @@ func Main(archInit func(*Arch)) { timings.Start("fe", "typecheck", "top1") for i := 0; i < len(xtop); i++ { n := xtop[i] - if op := n.Op; op != ODCL && op != OAS && op != OAS2 && (op != ODCLTYPE || !n.Left.Name.Param.Alias()) { + if op := n.Op; op != ir.ODCL && op != ir.OAS && op != ir.OAS2 && (op != ir.ODCLTYPE || !n.Left.Name.Param.Alias()) { xtop[i] = typecheck(n, ctxStmt) } } @@ -275,7 +265,7 @@ func Main(archInit func(*Arch)) { timings.Start("fe", "typecheck", "top2") for i := 0; i < len(xtop); i++ { n := xtop[i] - if op := n.Op; op == ODCL || op == OAS || op == OAS2 || op == ODCLTYPE && n.Left.Name.Param.Alias() { + if op := n.Op; op == ir.ODCL || op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.Left.Name.Param.Alias() { xtop[i] = typecheck(n, ctxStmt) } } @@ -286,7 +276,7 @@ func Main(archInit func(*Arch)) { var fcount int64 for i := 0; i < len(xtop); i++ { n := xtop[i] - if n.Op == ODCLFUNC { + if n.Op == ir.ODCLFUNC { Curfn = n decldepth = 1 errorsBefore := base.Errors() @@ -316,7 +306,7 @@ func Main(archInit func(*Arch)) { // because variables captured by value do not escape. timings.Start("fe", "capturevars") for _, n := range xtop { - if n.Op == ODCLFUNC && n.Func.OClosure != nil { + if n.Op == ir.ODCLFUNC && n.Func.OClosure != nil { Curfn = n capturevars(n) } @@ -340,7 +330,7 @@ func Main(archInit func(*Arch)) { if base.Flag.LowerL != 0 { // Find functions that can be inlined and clone them before walk expands them. - visitBottomUp(xtop, func(list []*Node, recursive bool) { + visitBottomUp(xtop, func(list []*ir.Node, recursive bool) { numfns := numNonClosures(list) for _, n := range list { if !recursive || numfns > 1 { @@ -350,7 +340,7 @@ func Main(archInit func(*Arch)) { caninl(n) } else { if base.Flag.LowerM > 1 { - fmt.Printf("%v: cannot inline %v: recursive\n", n.Line(), n.Func.Nname) + fmt.Printf("%v: cannot inline %v: recursive\n", ir.Line(n), n.Func.Nname) } } inlcalls(n) @@ -359,7 +349,7 @@ func Main(archInit func(*Arch)) { } for _, n := range xtop { - if n.Op == ODCLFUNC { + if n.Op == ir.ODCLFUNC { devirtualize(n) } } @@ -389,7 +379,7 @@ func Main(archInit func(*Arch)) { // before walk reaches a call of a closure. timings.Start("fe", "xclosures") for _, n := range xtop { - if n.Op == ODCLFUNC && n.Func.OClosure != nil { + if n.Op == ir.ODCLFUNC && n.Func.OClosure != nil { Curfn = n transformclosure(n) } @@ -412,7 +402,7 @@ func Main(archInit func(*Arch)) { fcount = 0 for i := 0; i < len(xtop); i++ { n := xtop[i] - if n.Op == ODCLFUNC { + if n.Op == ir.ODCLFUNC { funccompile(n) fcount++ } @@ -440,7 +430,7 @@ func Main(archInit func(*Arch)) { // Phase 9: Check external declarations. timings.Start("be", "externaldcls") for i, n := range externdcl { - if n.Op == ONAME { + if n.Op == ir.ONAME { externdcl[i] = typecheck(externdcl[i], ctxExpr) } } @@ -491,7 +481,7 @@ func Main(archInit func(*Arch)) { } // numNonClosures returns the number of functions in list which are not closures. -func numNonClosures(list []*Node) int { +func numNonClosures(list []*ir.Node) int { count := 0 for _, n := range list { if n.Func.OClosure == nil { @@ -934,14 +924,14 @@ func pkgnotused(lineno src.XPos, path string, name string) { } func mkpackage(pkgname string) { - if localpkg.Name == "" { + if ir.LocalPkg.Name == "" { if pkgname == "_" { base.Errorf("invalid package name _") } - localpkg.Name = pkgname + ir.LocalPkg.Name = pkgname } else { - if pkgname != localpkg.Name { - base.Errorf("package %s; expected %s", pkgname, localpkg.Name) + if pkgname != ir.LocalPkg.Name { + base.Errorf("package %s; expected %s", pkgname, ir.LocalPkg.Name) } } } @@ -954,12 +944,12 @@ func clearImports() { } var unused []importedPkg - for _, s := range localpkg.Syms { - n := asNode(s.Def) + for _, s := range ir.LocalPkg.Syms { + n := ir.AsNode(s.Def) if n == nil { continue } - if n.Op == OPACK { + if n.Op == ir.OPACK { // throw away top-level package name left over // from previous file. // leave s->block set to cause redeclaration @@ -990,7 +980,7 @@ func clearImports() { } func IsAlias(sym *types.Sym) bool { - return sym.Def != nil && asNode(sym.Def).Sym != sym + return sym.Def != nil && ir.AsNode(sym.Def).Sym != sym } // recordFlags records the specified command-line flags to be placed @@ -1057,7 +1047,7 @@ func recordPackageName() { // together two package main archives. So allow dups. s.Set(obj.AttrDuplicateOK, true) base.Ctxt.Data = append(base.Ctxt.Data, s) - s.P = []byte(localpkg.Name) + s.P = []byte(ir.LocalPkg.Name) } // currentLang returns the current language version. @@ -1084,9 +1074,9 @@ var langWant lang func langSupported(major, minor int, pkg *types.Pkg) bool { if pkg == nil { // TODO(mdempsky): Set Pkg for local types earlier. - pkg = localpkg + pkg = ir.LocalPkg } - if pkg != localpkg { + if pkg != ir.LocalPkg { // Assume imported packages passed type-checking. return true } diff --git a/src/cmd/compile/internal/gc/mkbuiltin.go b/src/cmd/compile/internal/gc/mkbuiltin.go index 63d2a12c079d6..8fa6d02f2c2c0 100644 --- a/src/cmd/compile/internal/gc/mkbuiltin.go +++ b/src/cmd/compile/internal/gc/mkbuiltin.go @@ -35,7 +35,10 @@ func main() { fmt.Fprintln(&b) fmt.Fprintln(&b, "package gc") fmt.Fprintln(&b) - fmt.Fprintln(&b, `import "cmd/compile/internal/types"`) + fmt.Fprintln(&b, `import (`) + fmt.Fprintln(&b, ` "cmd/compile/internal/ir"`) + fmt.Fprintln(&b, ` "cmd/compile/internal/types"`) + fmt.Fprintln(&b, `)`) mkbuiltin(&b, "runtime") @@ -144,12 +147,12 @@ func (i *typeInterner) mktype(t ast.Expr) string { case "rune": return "types.Runetype" } - return fmt.Sprintf("types.Types[T%s]", strings.ToUpper(t.Name)) + return fmt.Sprintf("types.Types[types.T%s]", strings.ToUpper(t.Name)) case *ast.SelectorExpr: if t.X.(*ast.Ident).Name != "unsafe" || t.Sel.Name != "Pointer" { log.Fatalf("unhandled type: %#v", t) } - return "types.Types[TUNSAFEPTR]" + return "types.Types[types.TUNSAFEPTR]" case *ast.ArrayType: if t.Len == nil { @@ -171,7 +174,7 @@ func (i *typeInterner) mktype(t ast.Expr) string { if len(t.Methods.List) != 0 { log.Fatal("non-empty interfaces unsupported") } - return "types.Types[TINTER]" + return "types.Types[types.TINTER]" case *ast.MapType: return fmt.Sprintf("types.NewMap(%s, %s)", i.subtype(t.Key), i.subtype(t.Value)) case *ast.StarExpr: @@ -204,7 +207,7 @@ func (i *typeInterner) fields(fl *ast.FieldList, keepNames bool) string { } } } - return fmt.Sprintf("[]*Node{%s}", strings.Join(res, ", ")) + return fmt.Sprintf("[]*ir.Node{%s}", strings.Join(res, ", ")) } func intconst(e ast.Expr) int64 { diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 6dae2cd0a41d2..eeed3740f063c 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -17,6 +17,7 @@ import ( "unicode/utf8" "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/syntax" "cmd/compile/internal/types" "cmd/internal/obj" @@ -74,7 +75,7 @@ func parseFiles(filenames []string) uint { testdclstack() } - localpkg.Height = myheight + ir.LocalPkg.Height = myheight return lines } @@ -140,7 +141,7 @@ type noder struct { linknames []linkname pragcgobuf [][]string err chan syntax.Error - scope ScopeID + scope ir.ScopeID importedUnsafe bool importedEmbed bool @@ -151,7 +152,7 @@ type noder struct { lastCloseScopePos syntax.Pos } -func (p *noder) funcBody(fn *Node, block *syntax.BlockStmt) { +func (p *noder) funcBody(fn *ir.Node, block *syntax.BlockStmt) { oldScope := p.scope p.scope = 0 funchdr(fn) @@ -159,7 +160,7 @@ func (p *noder) funcBody(fn *Node, block *syntax.BlockStmt) { if block != nil { body := p.stmts(block.List) if body == nil { - body = []*Node{nod(OEMPTY, nil, nil)} + body = []*ir.Node{ir.Nod(ir.OEMPTY, nil, nil)} } fn.Nbody.Set(body) @@ -177,7 +178,7 @@ func (p *noder) openScope(pos syntax.Pos) { if trackScopes { Curfn.Func.Parents = append(Curfn.Func.Parents, p.scope) p.scopeVars = append(p.scopeVars, len(Curfn.Func.Dcl)) - p.scope = ScopeID(len(Curfn.Func.Parents)) + p.scope = ir.ScopeID(len(Curfn.Func.Parents)) p.markScope(pos) } @@ -202,7 +203,7 @@ func (p *noder) closeScope(pos syntax.Pos) { nmarks := len(Curfn.Func.Marks) Curfn.Func.Marks[nmarks-1].Scope = p.scope - prevScope := ScopeID(0) + prevScope := ir.ScopeID(0) if nmarks >= 2 { prevScope = Curfn.Func.Marks[nmarks-2].Scope } @@ -223,7 +224,7 @@ func (p *noder) markScope(pos syntax.Pos) { if i := len(Curfn.Func.Marks); i > 0 && Curfn.Func.Marks[i-1].Pos == xpos { Curfn.Func.Marks[i-1].Scope = p.scope } else { - Curfn.Func.Marks = append(Curfn.Func.Marks, Mark{xpos, p.scope}) + Curfn.Func.Marks = append(Curfn.Func.Marks, ir.Mark{Pos: xpos, Scope: p.scope}) } } @@ -251,7 +252,7 @@ func (p *noder) node() { mkpackage(p.file.PkgName.Value) if pragma, ok := p.file.Pragma.(*Pragma); ok { - pragma.Flag &^= GoBuildPragma + pragma.Flag &^= ir.GoBuildPragma p.checkUnused(pragma) } @@ -293,7 +294,7 @@ func (p *noder) node() { clearImports() } -func (p *noder) decls(decls []syntax.Decl) (l []*Node) { +func (p *noder) decls(decls []syntax.Decl) (l []*ir.Node) { var cs constState for _, decl := range decls { @@ -355,7 +356,7 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) { my = lookup(ipkg.Name) } - pack := p.nod(imp, OPACK, nil, nil) + pack := p.nod(imp, ir.OPACK, nil, nil) pack.Sym = my pack.Name.Pkg = ipkg @@ -372,16 +373,16 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) { if my.Def != nil { redeclare(pack.Pos, my, "as imported package name") } - my.Def = asTypesNode(pack) + my.Def = ir.AsTypesNode(pack) my.Lastlineno = pack.Pos my.Block = 1 // at top level } -func (p *noder) varDecl(decl *syntax.VarDecl) []*Node { +func (p *noder) varDecl(decl *syntax.VarDecl) []*ir.Node { names := p.declNames(decl.NameList) typ := p.typeExprOrNil(decl.Type) - var exprs []*Node + var exprs []*ir.Node if decl.Values != nil { exprs = p.exprList(decl.Values) } @@ -413,12 +414,12 @@ func (p *noder) varDecl(decl *syntax.VarDecl) []*Node { // constant declarations are handled correctly (e.g., issue 15550). type constState struct { group *syntax.Group - typ *Node - values []*Node + typ *ir.Node + values []*ir.Node iota int64 } -func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []*Node { +func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []*ir.Node { if decl.Group == nil || decl.Group != cs.group { *cs = constState{ group: decl.Group, @@ -432,7 +433,7 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []*Node { names := p.declNames(decl.NameList) typ := p.typeExprOrNil(decl.Type) - var values []*Node + var values []*ir.Node if decl.Values != nil { values = p.exprList(decl.Values) cs.typ, cs.values = typ, values @@ -443,7 +444,7 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []*Node { typ, values = cs.typ, cs.values } - nn := make([]*Node, 0, len(names)) + nn := make([]*ir.Node, 0, len(names)) for i, n := range names { if i >= len(values) { base.Errorf("missing value in const declaration") @@ -454,14 +455,14 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []*Node { v = treecopy(v, n.Pos) } - n.Op = OLITERAL + n.Op = ir.OLITERAL declare(n, dclcontext) n.Name.Param.Ntype = typ n.Name.Defn = v n.SetIota(cs.iota) - nn = append(nn, p.nod(decl, ODCLCONST, n, nil)) + nn = append(nn, p.nod(decl, ir.ODCLCONST, n, nil)) } if len(values) > len(names) { @@ -473,9 +474,9 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []*Node { return nn } -func (p *noder) typeDecl(decl *syntax.TypeDecl) *Node { +func (p *noder) typeDecl(decl *syntax.TypeDecl) *ir.Node { n := p.declName(decl.Name) - n.Op = OTYPE + n.Op = ir.OTYPE declare(n, dclcontext) // decl.Type may be nil but in that case we got a syntax error during parsing @@ -492,31 +493,31 @@ func (p *noder) typeDecl(decl *syntax.TypeDecl) *Node { p.checkUnused(pragma) } - nod := p.nod(decl, ODCLTYPE, n, nil) - if param.Alias() && !langSupported(1, 9, localpkg) { + nod := p.nod(decl, ir.ODCLTYPE, n, nil) + if param.Alias() && !langSupported(1, 9, ir.LocalPkg) { base.ErrorfAt(nod.Pos, "type aliases only supported as of -lang=go1.9") } return nod } -func (p *noder) declNames(names []*syntax.Name) []*Node { - nodes := make([]*Node, 0, len(names)) +func (p *noder) declNames(names []*syntax.Name) []*ir.Node { + nodes := make([]*ir.Node, 0, len(names)) for _, name := range names { nodes = append(nodes, p.declName(name)) } return nodes } -func (p *noder) declName(name *syntax.Name) *Node { +func (p *noder) declName(name *syntax.Name) *ir.Node { n := dclname(p.name(name)) n.Pos = p.pos(name) return n } -func (p *noder) funcDecl(fun *syntax.FuncDecl) *Node { +func (p *noder) funcDecl(fun *syntax.FuncDecl) *ir.Node { name := p.name(fun.Name) t := p.signature(fun.Recv, fun.Type) - f := p.nod(fun, ODCLFUNC, nil, nil) + f := p.nod(fun, ir.ODCLFUNC, nil, nil) if fun.Recv == nil { if name.Name == "init" { @@ -526,14 +527,14 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) *Node { } } - if localpkg.Name == "main" && name.Name == "main" { + if ir.LocalPkg.Name == "main" && name.Name == "main" { if t.List.Len() > 0 || t.Rlist.Len() > 0 { base.ErrorfAt(f.Pos, "func main must have no arguments and no return values") } } } else { f.Func.Shortname = name - name = nblank.Sym // filled in by typecheckfunc + name = ir.BlankNode.Sym // filled in by typecheckfunc } f.Func.Nname = newfuncnamel(p.pos(fun.Name), name, f.Func) @@ -542,7 +543,7 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) *Node { if pragma, ok := fun.Pragma.(*Pragma); ok { f.Func.Pragma = pragma.Flag & FuncPragmas - if pragma.Flag&Systemstack != 0 && pragma.Flag&Nosplit != 0 { + if pragma.Flag&ir.Systemstack != 0 && pragma.Flag&ir.Nosplit != 0 { base.ErrorfAt(f.Pos, "go:nosplit and go:systemstack cannot be combined") } pragma.Flag &^= FuncPragmas @@ -550,22 +551,22 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) *Node { } if fun.Recv == nil { - declare(f.Func.Nname, PFUNC) + declare(f.Func.Nname, ir.PFUNC) } p.funcBody(f, fun.Body) if fun.Body != nil { - if f.Func.Pragma&Noescape != 0 { + if f.Func.Pragma&ir.Noescape != 0 { base.ErrorfAt(f.Pos, "can only use //go:noescape with external func implementations") } } else { - if base.Flag.Complete || strings.HasPrefix(f.funcname(), "init.") { + if base.Flag.Complete || strings.HasPrefix(ir.FuncName(f), "init.") { // Linknamed functions are allowed to have no body. Hopefully // the linkname target has a body. See issue 23311. isLinknamed := false for _, n := range p.linknames { - if f.funcname() == n.local { + if ir.FuncName(f) == n.local { isLinknamed = true break } @@ -579,8 +580,8 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) *Node { return f } -func (p *noder) signature(recv *syntax.Field, typ *syntax.FuncType) *Node { - n := p.nod(typ, OTFUNC, nil, nil) +func (p *noder) signature(recv *syntax.Field, typ *syntax.FuncType) *ir.Node { + n := p.nod(typ, ir.OTFUNC, nil, nil) if recv != nil { n.Left = p.param(recv, false, false) } @@ -589,8 +590,8 @@ func (p *noder) signature(recv *syntax.Field, typ *syntax.FuncType) *Node { return n } -func (p *noder) params(params []*syntax.Field, dddOk bool) []*Node { - nodes := make([]*Node, 0, len(params)) +func (p *noder) params(params []*syntax.Field, dddOk bool) []*ir.Node { + nodes := make([]*ir.Node, 0, len(params)) for i, param := range params { p.setlineno(param) nodes = append(nodes, p.param(param, dddOk, i+1 == len(params))) @@ -598,17 +599,17 @@ func (p *noder) params(params []*syntax.Field, dddOk bool) []*Node { return nodes } -func (p *noder) param(param *syntax.Field, dddOk, final bool) *Node { +func (p *noder) param(param *syntax.Field, dddOk, final bool) *ir.Node { var name *types.Sym if param.Name != nil { name = p.name(param.Name) } typ := p.typeExpr(param.Type) - n := p.nodSym(param, ODCLFIELD, typ, name) + n := p.nodSym(param, ir.ODCLFIELD, typ, name) // rewrite ...T parameter - if typ.Op == ODDD { + if typ.Op == ir.ODDD { if !dddOk { // We mark these as syntax errors to get automatic elimination // of multiple such errors per line (see ErrorfAt in subr.go). @@ -620,7 +621,7 @@ func (p *noder) param(param *syntax.Field, dddOk, final bool) *Node { p.errorAt(param.Name.Pos(), "syntax error: cannot use ... with non-final parameter %s", param.Name.Value) } } - typ.Op = OTARRAY + typ.Op = ir.OTARRAY typ.Right = typ.Left typ.Left = nil n.SetIsDDD(true) @@ -632,22 +633,22 @@ func (p *noder) param(param *syntax.Field, dddOk, final bool) *Node { return n } -func (p *noder) exprList(expr syntax.Expr) []*Node { +func (p *noder) exprList(expr syntax.Expr) []*ir.Node { if list, ok := expr.(*syntax.ListExpr); ok { return p.exprs(list.ElemList) } - return []*Node{p.expr(expr)} + return []*ir.Node{p.expr(expr)} } -func (p *noder) exprs(exprs []syntax.Expr) []*Node { - nodes := make([]*Node, 0, len(exprs)) +func (p *noder) exprs(exprs []syntax.Expr) []*ir.Node { + nodes := make([]*ir.Node, 0, len(exprs)) for _, expr := range exprs { nodes = append(nodes, p.expr(expr)) } return nodes } -func (p *noder) expr(expr syntax.Expr) *Node { +func (p *noder) expr(expr syntax.Expr) *ir.Node { p.setlineno(expr) switch expr := expr.(type) { case nil, *syntax.BadExpr: @@ -655,14 +656,14 @@ func (p *noder) expr(expr syntax.Expr) *Node { case *syntax.Name: return p.mkname(expr) case *syntax.BasicLit: - n := nodlit(p.basicLit(expr)) + n := ir.NewLiteral(p.basicLit(expr)) if expr.Kind == syntax.RuneLit { n.Type = types.UntypedRune } n.SetDiag(expr.Bad) // avoid follow-on errors if there was a syntax error return n case *syntax.CompositeLit: - n := p.nod(expr, OCOMPLIT, nil, nil) + n := p.nod(expr, ir.OCOMPLIT, nil, nil) if expr.Type != nil { n.Right = p.expr(expr.Type) } @@ -675,30 +676,30 @@ func (p *noder) expr(expr syntax.Expr) *Node { return n case *syntax.KeyValueExpr: // use position of expr.Key rather than of expr (which has position of ':') - return p.nod(expr.Key, OKEY, p.expr(expr.Key), p.wrapname(expr.Value, p.expr(expr.Value))) + return p.nod(expr.Key, ir.OKEY, p.expr(expr.Key), p.wrapname(expr.Value, p.expr(expr.Value))) case *syntax.FuncLit: return p.funcLit(expr) case *syntax.ParenExpr: - return p.nod(expr, OPAREN, p.expr(expr.X), nil) + return p.nod(expr, ir.OPAREN, p.expr(expr.X), nil) case *syntax.SelectorExpr: // parser.new_dotname obj := p.expr(expr.X) - if obj.Op == OPACK { + if obj.Op == ir.OPACK { obj.Name.SetUsed(true) return importName(obj.Name.Pkg.Lookup(expr.Sel.Value)) } - n := nodSym(OXDOT, obj, p.name(expr.Sel)) + n := nodSym(ir.OXDOT, obj, p.name(expr.Sel)) n.Pos = p.pos(expr) // lineno may have been changed by p.expr(expr.X) return n case *syntax.IndexExpr: - return p.nod(expr, OINDEX, p.expr(expr.X), p.expr(expr.Index)) + return p.nod(expr, ir.OINDEX, p.expr(expr.X), p.expr(expr.Index)) case *syntax.SliceExpr: - op := OSLICE + op := ir.OSLICE if expr.Full { - op = OSLICE3 + op = ir.OSLICE3 } n := p.nod(expr, op, p.expr(expr.X), nil) - var index [3]*Node + var index [3]*ir.Node for i, x := range &expr.Index { if x != nil { index[i] = p.expr(x) @@ -707,7 +708,7 @@ func (p *noder) expr(expr syntax.Expr) *Node { n.SetSliceBounds(index[0], index[1], index[2]) return n case *syntax.AssertExpr: - return p.nod(expr, ODOTTYPE, p.expr(expr.X), p.typeExpr(expr.Type)) + return p.nod(expr, ir.ODOTTYPE, p.expr(expr.X), p.typeExpr(expr.Type)) case *syntax.Operation: if expr.Op == syntax.Add && expr.Y != nil { return p.sum(expr) @@ -718,23 +719,23 @@ func (p *noder) expr(expr syntax.Expr) *Node { } return p.nod(expr, p.binOp(expr.Op), x, p.expr(expr.Y)) case *syntax.CallExpr: - n := p.nod(expr, OCALL, p.expr(expr.Fun), nil) + n := p.nod(expr, ir.OCALL, p.expr(expr.Fun), nil) n.List.Set(p.exprs(expr.ArgList)) n.SetIsDDD(expr.HasDots) return n case *syntax.ArrayType: - var len *Node + var len *ir.Node if expr.Len != nil { len = p.expr(expr.Len) } else { - len = p.nod(expr, ODDD, nil, nil) + len = p.nod(expr, ir.ODDD, nil, nil) } - return p.nod(expr, OTARRAY, len, p.typeExpr(expr.Elem)) + return p.nod(expr, ir.OTARRAY, len, p.typeExpr(expr.Elem)) case *syntax.SliceType: - return p.nod(expr, OTARRAY, nil, p.typeExpr(expr.Elem)) + return p.nod(expr, ir.OTARRAY, nil, p.typeExpr(expr.Elem)) case *syntax.DotsType: - return p.nod(expr, ODDD, p.typeExpr(expr.Elem), nil) + return p.nod(expr, ir.ODDD, p.typeExpr(expr.Elem), nil) case *syntax.StructType: return p.structType(expr) case *syntax.InterfaceType: @@ -742,17 +743,17 @@ func (p *noder) expr(expr syntax.Expr) *Node { case *syntax.FuncType: return p.signature(nil, expr) case *syntax.MapType: - return p.nod(expr, OTMAP, p.typeExpr(expr.Key), p.typeExpr(expr.Value)) + return p.nod(expr, ir.OTMAP, p.typeExpr(expr.Key), p.typeExpr(expr.Value)) case *syntax.ChanType: - n := p.nod(expr, OTCHAN, p.typeExpr(expr.Elem), nil) + n := p.nod(expr, ir.OTCHAN, p.typeExpr(expr.Elem), nil) n.SetTChanDir(p.chanDir(expr.Dir)) return n case *syntax.TypeSwitchGuard: - n := p.nod(expr, OTYPESW, nil, p.expr(expr.X)) + n := p.nod(expr, ir.OTYPESW, nil, p.expr(expr.X)) if expr.Lhs != nil { n.Left = p.declName(expr.Lhs) - if n.Left.isBlank() { + if ir.IsBlank(n.Left) { base.Errorf("invalid variable name %v in type switch", n.Left) } } @@ -764,7 +765,7 @@ func (p *noder) expr(expr syntax.Expr) *Node { // sum efficiently handles very large summation expressions (such as // in issue #16394). In particular, it avoids left recursion and // collapses string literals. -func (p *noder) sum(x syntax.Expr) *Node { +func (p *noder) sum(x syntax.Expr) *ir.Node { // While we need to handle long sums with asymptotic // efficiency, the vast majority of sums are very small: ~95% // have only 2 or 3 operands, and ~99% of string literals are @@ -799,11 +800,11 @@ func (p *noder) sum(x syntax.Expr) *Node { // handle correctly. For now, we avoid these problems by // treating named string constants the same as non-constant // operands. - var nstr *Node + var nstr *ir.Node chunks := make([]string, 0, 1) n := p.expr(x) - if Isconst(n, constant.String) && n.Sym == nil { + if ir.IsConst(n, constant.String) && n.Sym == nil { nstr = n chunks = append(chunks, nstr.StringVal()) } @@ -812,7 +813,7 @@ func (p *noder) sum(x syntax.Expr) *Node { add := adds[i] r := p.expr(add.Y) - if Isconst(r, constant.String) && r.Sym == nil { + if ir.IsConst(r, constant.String) && r.Sym == nil { if nstr != nil { // Collapse r into nstr instead of adding to n. chunks = append(chunks, r.StringVal()) @@ -828,7 +829,7 @@ func (p *noder) sum(x syntax.Expr) *Node { nstr = nil chunks = chunks[:0] } - n = p.nod(add, OADD, n, r) + n = p.nod(add, ir.OADD, n, r) } if len(chunks) > 1 { nstr.SetVal(constant.MakeString(strings.Join(chunks, ""))) @@ -837,12 +838,12 @@ func (p *noder) sum(x syntax.Expr) *Node { return n } -func (p *noder) typeExpr(typ syntax.Expr) *Node { +func (p *noder) typeExpr(typ syntax.Expr) *ir.Node { // TODO(mdempsky): Be stricter? typecheck should handle errors anyway. return p.expr(typ) } -func (p *noder) typeExprOrNil(typ syntax.Expr) *Node { +func (p *noder) typeExprOrNil(typ syntax.Expr) *ir.Node { if typ != nil { return p.expr(typ) } @@ -861,15 +862,15 @@ func (p *noder) chanDir(dir syntax.ChanDir) types.ChanDir { panic("unhandled ChanDir") } -func (p *noder) structType(expr *syntax.StructType) *Node { - l := make([]*Node, 0, len(expr.FieldList)) +func (p *noder) structType(expr *syntax.StructType) *ir.Node { + l := make([]*ir.Node, 0, len(expr.FieldList)) for i, field := range expr.FieldList { p.setlineno(field) - var n *Node + var n *ir.Node if field.Name == nil { n = p.embedded(field.Type) } else { - n = p.nodSym(field, ODCLFIELD, p.typeExpr(field.Type), p.name(field.Name)) + n = p.nodSym(field, ir.ODCLFIELD, p.typeExpr(field.Type), p.name(field.Name)) } if i < len(expr.TagList) && expr.TagList[i] != nil { n.SetVal(p.basicLit(expr.TagList[i])) @@ -878,29 +879,29 @@ func (p *noder) structType(expr *syntax.StructType) *Node { } p.setlineno(expr) - n := p.nod(expr, OTSTRUCT, nil, nil) + n := p.nod(expr, ir.OTSTRUCT, nil, nil) n.List.Set(l) return n } -func (p *noder) interfaceType(expr *syntax.InterfaceType) *Node { - l := make([]*Node, 0, len(expr.MethodList)) +func (p *noder) interfaceType(expr *syntax.InterfaceType) *ir.Node { + l := make([]*ir.Node, 0, len(expr.MethodList)) for _, method := range expr.MethodList { p.setlineno(method) - var n *Node + var n *ir.Node if method.Name == nil { - n = p.nodSym(method, ODCLFIELD, importName(p.packname(method.Type)), nil) + n = p.nodSym(method, ir.ODCLFIELD, importName(p.packname(method.Type)), nil) } else { mname := p.name(method.Name) sig := p.typeExpr(method.Type) sig.Left = fakeRecv() - n = p.nodSym(method, ODCLFIELD, sig, mname) + n = p.nodSym(method, ir.ODCLFIELD, sig, mname) ifacedcl(n) } l = append(l, n) } - n := p.nod(expr, OTINTER, nil, nil) + n := p.nod(expr, ir.OTINTER, nil, nil) n.List.Set(l) return n } @@ -915,15 +916,15 @@ func (p *noder) packname(expr syntax.Expr) *types.Sym { return name case *syntax.SelectorExpr: name := p.name(expr.X.(*syntax.Name)) - def := asNode(name.Def) + def := ir.AsNode(name.Def) if def == nil { base.Errorf("undefined: %v", name) return name } var pkg *types.Pkg - if def.Op != OPACK { + if def.Op != ir.OPACK { base.Errorf("%v is not a package", name) - pkg = localpkg + pkg = ir.LocalPkg } else { def.Name.SetUsed(true) pkg = def.Name.Pkg @@ -933,7 +934,7 @@ func (p *noder) packname(expr syntax.Expr) *types.Sym { panic(fmt.Sprintf("unexpected packname: %#v", expr)) } -func (p *noder) embedded(typ syntax.Expr) *Node { +func (p *noder) embedded(typ syntax.Expr) *ir.Node { op, isStar := typ.(*syntax.Operation) if isStar { if op.Op != syntax.Mul || op.Y != nil { @@ -943,25 +944,25 @@ func (p *noder) embedded(typ syntax.Expr) *Node { } sym := p.packname(typ) - n := p.nodSym(typ, ODCLFIELD, importName(sym), lookup(sym.Name)) + n := p.nodSym(typ, ir.ODCLFIELD, importName(sym), lookup(sym.Name)) n.SetEmbedded(true) if isStar { - n.Left = p.nod(op, ODEREF, n.Left, nil) + n.Left = p.nod(op, ir.ODEREF, n.Left, nil) } return n } -func (p *noder) stmts(stmts []syntax.Stmt) []*Node { +func (p *noder) stmts(stmts []syntax.Stmt) []*ir.Node { return p.stmtsFall(stmts, false) } -func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []*Node { - var nodes []*Node +func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []*ir.Node { + var nodes []*ir.Node for i, stmt := range stmts { s := p.stmtFall(stmt, fallOK && i+1 == len(stmts)) if s == nil { - } else if s.Op == OBLOCK && s.Ninit.Len() == 0 { + } else if s.Op == ir.OBLOCK && s.Ninit.Len() == 0 { nodes = append(nodes, s.List.Slice()...) } else { nodes = append(nodes, s) @@ -970,11 +971,11 @@ func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []*Node { return nodes } -func (p *noder) stmt(stmt syntax.Stmt) *Node { +func (p *noder) stmt(stmt syntax.Stmt) *ir.Node { return p.stmtFall(stmt, false) } -func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) *Node { +func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) *ir.Node { p.setlineno(stmt) switch stmt := stmt.(type) { case *syntax.EmptyStmt: @@ -985,24 +986,24 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) *Node { l := p.blockStmt(stmt) if len(l) == 0 { // TODO(mdempsky): Line number? - return nod(OEMPTY, nil, nil) + return ir.Nod(ir.OEMPTY, nil, nil) } return liststmt(l) case *syntax.ExprStmt: return p.wrapname(stmt, p.expr(stmt.X)) case *syntax.SendStmt: - return p.nod(stmt, OSEND, p.expr(stmt.Chan), p.expr(stmt.Value)) + return p.nod(stmt, ir.OSEND, p.expr(stmt.Chan), p.expr(stmt.Value)) case *syntax.DeclStmt: return liststmt(p.decls(stmt.DeclList)) case *syntax.AssignStmt: if stmt.Op != 0 && stmt.Op != syntax.Def { - n := p.nod(stmt, OASOP, p.expr(stmt.Lhs), p.expr(stmt.Rhs)) + n := p.nod(stmt, ir.OASOP, p.expr(stmt.Lhs), p.expr(stmt.Rhs)) n.SetImplicit(stmt.Rhs == syntax.ImplicitOne) n.SetSubOp(p.binOp(stmt.Op)) return n } - n := p.nod(stmt, OAS, nil, nil) // assume common case + n := p.nod(stmt, ir.OAS, nil, nil) // assume common case rhs := p.exprList(stmt.Rhs) lhs := p.assignList(stmt.Lhs, n, stmt.Op == syntax.Def) @@ -1012,26 +1013,26 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) *Node { n.Left = lhs[0] n.Right = rhs[0] } else { - n.Op = OAS2 + n.Op = ir.OAS2 n.List.Set(lhs) n.Rlist.Set(rhs) } return n case *syntax.BranchStmt: - var op Op + var op ir.Op switch stmt.Tok { case syntax.Break: - op = OBREAK + op = ir.OBREAK case syntax.Continue: - op = OCONTINUE + op = ir.OCONTINUE case syntax.Fallthrough: if !fallOK { base.Errorf("fallthrough statement out of place") } - op = OFALL + op = ir.OFALL case syntax.Goto: - op = OGOTO + op = ir.OGOTO default: panic("unhandled BranchStmt") } @@ -1041,32 +1042,32 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) *Node { } return n case *syntax.CallStmt: - var op Op + var op ir.Op switch stmt.Tok { case syntax.Defer: - op = ODEFER + op = ir.ODEFER case syntax.Go: - op = OGO + op = ir.OGO default: panic("unhandled CallStmt") } return p.nod(stmt, op, p.expr(stmt.Call), nil) case *syntax.ReturnStmt: - var results []*Node + var results []*ir.Node if stmt.Results != nil { results = p.exprList(stmt.Results) } - n := p.nod(stmt, ORETURN, nil, nil) + n := p.nod(stmt, ir.ORETURN, nil, nil) n.List.Set(results) if n.List.Len() == 0 && Curfn != nil { for _, ln := range Curfn.Func.Dcl { - if ln.Class() == PPARAM { + if ln.Class() == ir.PPARAM { continue } - if ln.Class() != PPARAMOUT { + if ln.Class() != ir.PPARAMOUT { break } - if asNode(ln.Sym.Def) != ln { + if ir.AsNode(ln.Sym.Def) != ln { base.Errorf("%s is shadowed during return", ln.Sym.Name) } } @@ -1084,7 +1085,7 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) *Node { panic("unhandled Stmt") } -func (p *noder) assignList(expr syntax.Expr, defn *Node, colas bool) []*Node { +func (p *noder) assignList(expr syntax.Expr, defn *ir.Node, colas bool) []*ir.Node { if !colas { return p.exprList(expr) } @@ -1098,13 +1099,13 @@ func (p *noder) assignList(expr syntax.Expr, defn *Node, colas bool) []*Node { exprs = []syntax.Expr{expr} } - res := make([]*Node, len(exprs)) + res := make([]*ir.Node, len(exprs)) seen := make(map[*types.Sym]bool, len(exprs)) newOrErr := false for i, expr := range exprs { p.setlineno(expr) - res[i] = nblank + res[i] = ir.BlankNode name, ok := expr.(*syntax.Name) if !ok { @@ -1131,10 +1132,10 @@ func (p *noder) assignList(expr syntax.Expr, defn *Node, colas bool) []*Node { } newOrErr = true - n := newname(sym) + n := NewName(sym) declare(n, dclcontext) n.Name.Defn = defn - defn.Ninit.Append(nod(ODCL, n, nil)) + defn.Ninit.Append(ir.Nod(ir.ODCL, n, nil)) res[i] = n } @@ -1144,16 +1145,16 @@ func (p *noder) assignList(expr syntax.Expr, defn *Node, colas bool) []*Node { return res } -func (p *noder) blockStmt(stmt *syntax.BlockStmt) []*Node { +func (p *noder) blockStmt(stmt *syntax.BlockStmt) []*ir.Node { p.openScope(stmt.Pos()) nodes := p.stmts(stmt.List) p.closeScope(stmt.Rbrace) return nodes } -func (p *noder) ifStmt(stmt *syntax.IfStmt) *Node { +func (p *noder) ifStmt(stmt *syntax.IfStmt) *ir.Node { p.openScope(stmt.Pos()) - n := p.nod(stmt, OIF, nil, nil) + n := p.nod(stmt, ir.OIF, nil, nil) if stmt.Init != nil { n.Ninit.Set1(p.stmt(stmt.Init)) } @@ -1163,7 +1164,7 @@ func (p *noder) ifStmt(stmt *syntax.IfStmt) *Node { n.Nbody.Set(p.blockStmt(stmt.Then)) if stmt.Else != nil { e := p.stmt(stmt.Else) - if e.Op == OBLOCK && e.Ninit.Len() == 0 { + if e.Op == ir.OBLOCK && e.Ninit.Len() == 0 { n.Rlist.Set(e.List.Slice()) } else { n.Rlist.Set1(e) @@ -1173,20 +1174,20 @@ func (p *noder) ifStmt(stmt *syntax.IfStmt) *Node { return n } -func (p *noder) forStmt(stmt *syntax.ForStmt) *Node { +func (p *noder) forStmt(stmt *syntax.ForStmt) *ir.Node { p.openScope(stmt.Pos()) - var n *Node + var n *ir.Node if r, ok := stmt.Init.(*syntax.RangeClause); ok { if stmt.Cond != nil || stmt.Post != nil { panic("unexpected RangeClause") } - n = p.nod(r, ORANGE, nil, p.expr(r.X)) + n = p.nod(r, ir.ORANGE, nil, p.expr(r.X)) if r.Lhs != nil { n.List.Set(p.assignList(r.Lhs, n, r.Def)) } } else { - n = p.nod(stmt, OFOR, nil, nil) + n = p.nod(stmt, ir.OFOR, nil, nil) if stmt.Init != nil { n.Ninit.Set1(p.stmt(stmt.Init)) } @@ -1202,9 +1203,9 @@ func (p *noder) forStmt(stmt *syntax.ForStmt) *Node { return n } -func (p *noder) switchStmt(stmt *syntax.SwitchStmt) *Node { +func (p *noder) switchStmt(stmt *syntax.SwitchStmt) *ir.Node { p.openScope(stmt.Pos()) - n := p.nod(stmt, OSWITCH, nil, nil) + n := p.nod(stmt, ir.OSWITCH, nil, nil) if stmt.Init != nil { n.Ninit.Set1(p.stmt(stmt.Init)) } @@ -1213,7 +1214,7 @@ func (p *noder) switchStmt(stmt *syntax.SwitchStmt) *Node { } tswitch := n.Left - if tswitch != nil && tswitch.Op != OTYPESW { + if tswitch != nil && tswitch.Op != ir.OTYPESW { tswitch = nil } n.List.Set(p.caseClauses(stmt.Body, tswitch, stmt.Rbrace)) @@ -1222,8 +1223,8 @@ func (p *noder) switchStmt(stmt *syntax.SwitchStmt) *Node { return n } -func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *Node, rbrace syntax.Pos) []*Node { - nodes := make([]*Node, 0, len(clauses)) +func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.Node, rbrace syntax.Pos) []*ir.Node { + nodes := make([]*ir.Node, 0, len(clauses)) for i, clause := range clauses { p.setlineno(clause) if i > 0 { @@ -1231,12 +1232,12 @@ func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *Node, rbrace } p.openScope(clause.Pos()) - n := p.nod(clause, OCASE, nil, nil) + n := p.nod(clause, ir.OCASE, nil, nil) if clause.Cases != nil { n.List.Set(p.exprList(clause.Cases)) } if tswitch != nil && tswitch.Left != nil { - nn := newname(tswitch.Left.Sym) + nn := NewName(tswitch.Left.Sym) declare(nn, dclcontext) n.Rlist.Set1(nn) // keep track of the instances for reporting unused @@ -1255,7 +1256,7 @@ func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *Node, rbrace } n.Nbody.Set(p.stmtsFall(body, true)) - if l := n.Nbody.Len(); l > 0 && n.Nbody.Index(l-1).Op == OFALL { + if l := n.Nbody.Len(); l > 0 && n.Nbody.Index(l-1).Op == ir.OFALL { if tswitch != nil { base.Errorf("cannot fallthrough in type switch") } @@ -1272,14 +1273,14 @@ func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *Node, rbrace return nodes } -func (p *noder) selectStmt(stmt *syntax.SelectStmt) *Node { - n := p.nod(stmt, OSELECT, nil, nil) +func (p *noder) selectStmt(stmt *syntax.SelectStmt) *ir.Node { + n := p.nod(stmt, ir.OSELECT, nil, nil) n.List.Set(p.commClauses(stmt.Body, stmt.Rbrace)) return n } -func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []*Node { - nodes := make([]*Node, 0, len(clauses)) +func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []*ir.Node { + nodes := make([]*ir.Node, 0, len(clauses)) for i, clause := range clauses { p.setlineno(clause) if i > 0 { @@ -1287,7 +1288,7 @@ func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []* } p.openScope(clause.Pos()) - n := p.nod(clause, OCASE, nil, nil) + n := p.nod(clause, ir.OCASE, nil, nil) if clause.Comm != nil { n.List.Set1(p.stmt(clause.Comm)) } @@ -1300,18 +1301,18 @@ func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []* return nodes } -func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) *Node { - lhs := p.nodSym(label, OLABEL, nil, p.name(label.Label)) +func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) *ir.Node { + lhs := p.nodSym(label, ir.OLABEL, nil, p.name(label.Label)) - var ls *Node + var ls *ir.Node if label.Stmt != nil { // TODO(mdempsky): Should always be present. ls = p.stmtFall(label.Stmt, fallOK) } lhs.Name.Defn = ls - l := []*Node{lhs} + l := []*ir.Node{lhs} if ls != nil { - if ls.Op == OBLOCK && ls.Ninit.Len() == 0 { + if ls.Op == ir.OBLOCK && ls.Ninit.Len() == 0 { l = append(l, ls.List.Slice()...) } else { l = append(l, ls) @@ -1320,50 +1321,50 @@ func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) *Node { return liststmt(l) } -var unOps = [...]Op{ - syntax.Recv: ORECV, - syntax.Mul: ODEREF, - syntax.And: OADDR, +var unOps = [...]ir.Op{ + syntax.Recv: ir.ORECV, + syntax.Mul: ir.ODEREF, + syntax.And: ir.OADDR, - syntax.Not: ONOT, - syntax.Xor: OBITNOT, - syntax.Add: OPLUS, - syntax.Sub: ONEG, + syntax.Not: ir.ONOT, + syntax.Xor: ir.OBITNOT, + syntax.Add: ir.OPLUS, + syntax.Sub: ir.ONEG, } -func (p *noder) unOp(op syntax.Operator) Op { +func (p *noder) unOp(op syntax.Operator) ir.Op { if uint64(op) >= uint64(len(unOps)) || unOps[op] == 0 { panic("invalid Operator") } return unOps[op] } -var binOps = [...]Op{ - syntax.OrOr: OOROR, - syntax.AndAnd: OANDAND, +var binOps = [...]ir.Op{ + syntax.OrOr: ir.OOROR, + syntax.AndAnd: ir.OANDAND, - syntax.Eql: OEQ, - syntax.Neq: ONE, - syntax.Lss: OLT, - syntax.Leq: OLE, - syntax.Gtr: OGT, - syntax.Geq: OGE, + syntax.Eql: ir.OEQ, + syntax.Neq: ir.ONE, + syntax.Lss: ir.OLT, + syntax.Leq: ir.OLE, + syntax.Gtr: ir.OGT, + syntax.Geq: ir.OGE, - syntax.Add: OADD, - syntax.Sub: OSUB, - syntax.Or: OOR, - syntax.Xor: OXOR, + syntax.Add: ir.OADD, + syntax.Sub: ir.OSUB, + syntax.Or: ir.OOR, + syntax.Xor: ir.OXOR, - syntax.Mul: OMUL, - syntax.Div: ODIV, - syntax.Rem: OMOD, - syntax.And: OAND, - syntax.AndNot: OANDNOT, - syntax.Shl: OLSH, - syntax.Shr: ORSH, + syntax.Mul: ir.OMUL, + syntax.Div: ir.ODIV, + syntax.Rem: ir.OMOD, + syntax.And: ir.OAND, + syntax.AndNot: ir.OANDNOT, + syntax.Shl: ir.OLSH, + syntax.Shr: ir.ORSH, } -func (p *noder) binOp(op syntax.Operator) Op { +func (p *noder) binOp(op syntax.Operator) ir.Op { if uint64(op) >= uint64(len(binOps)) || binOps[op] == 0 { panic("invalid Operator") } @@ -1374,7 +1375,7 @@ func (p *noder) binOp(op syntax.Operator) Op { // literal is not compatible with the current language version. func checkLangCompat(lit *syntax.BasicLit) { s := lit.Value - if len(s) <= 2 || langSupported(1, 13, localpkg) { + if len(s) <= 2 || langSupported(1, 13, ir.LocalPkg) { return } // len(s) > 2 @@ -1442,32 +1443,32 @@ func (p *noder) name(name *syntax.Name) *types.Sym { return lookup(name.Value) } -func (p *noder) mkname(name *syntax.Name) *Node { +func (p *noder) mkname(name *syntax.Name) *ir.Node { // TODO(mdempsky): Set line number? return mkname(p.name(name)) } -func (p *noder) wrapname(n syntax.Node, x *Node) *Node { +func (p *noder) wrapname(n syntax.Node, x *ir.Node) *ir.Node { // These nodes do not carry line numbers. // Introduce a wrapper node to give them the correct line. switch x.Op { - case OTYPE, OLITERAL: + case ir.OTYPE, ir.OLITERAL: if x.Sym == nil { break } fallthrough - case ONAME, ONONAME, OPACK: - x = p.nod(n, OPAREN, x, nil) + case ir.ONAME, ir.ONONAME, ir.OPACK: + x = p.nod(n, ir.OPAREN, x, nil) x.SetImplicit(true) } return x } -func (p *noder) nod(orig syntax.Node, op Op, left, right *Node) *Node { - return nodl(p.pos(orig), op, left, right) +func (p *noder) nod(orig syntax.Node, op ir.Op, left, right *ir.Node) *ir.Node { + return ir.NodAt(p.pos(orig), op, left, right) } -func (p *noder) nodSym(orig syntax.Node, op Op, left *Node, sym *types.Sym) *Node { +func (p *noder) nodSym(orig syntax.Node, op ir.Op, left *ir.Node, sym *types.Sym) *ir.Node { n := nodSym(op, left, sym) n.Pos = p.pos(orig) return n @@ -1508,13 +1509,13 @@ var allowedStdPragmas = map[string]bool{ // *Pragma is the value stored in a syntax.Pragma during parsing. type Pragma struct { - Flag PragmaFlag // collected bits - Pos []PragmaPos // position of each individual flag + Flag ir.PragmaFlag // collected bits + Pos []PragmaPos // position of each individual flag Embeds []PragmaEmbed } type PragmaPos struct { - Flag PragmaFlag + Flag ir.PragmaFlag Pos syntax.Pos } @@ -1631,7 +1632,7 @@ func (p *noder) pragma(pos syntax.Pos, blankLine bool, text string, old syntax.P verb = verb[:i] } flag := pragmaFlag(verb) - const runtimePragmas = Systemstack | Nowritebarrier | Nowritebarrierrec | Yeswritebarrierrec + const runtimePragmas = ir.Systemstack | ir.Nowritebarrier | ir.Nowritebarrierrec | ir.Yeswritebarrierrec if !base.Flag.CompilingRuntime && flag&runtimePragmas != 0 { p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s only allowed in runtime", verb)}) } @@ -1667,7 +1668,7 @@ func safeArg(name string) bool { return '0' <= c && c <= '9' || 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || c == '.' || c == '_' || c == '/' || c >= utf8.RuneSelf } -func mkname(sym *types.Sym) *Node { +func mkname(sym *types.Sym) *ir.Node { n := oldname(sym) if n.Name != nil && n.Name.Pack != nil { n.Name.Pack.Name.SetUsed(true) diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 6c659c91c7750..2961dbf636a4f 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -6,6 +6,7 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/bio" "cmd/internal/obj" @@ -83,7 +84,7 @@ func printObjHeader(bout *bio.Writer) { if base.Flag.BuildID != "" { fmt.Fprintf(bout, "build id %q\n", base.Flag.BuildID) } - if localpkg.Name == "main" { + if ir.LocalPkg.Name == "main" { fmt.Fprintf(bout, "main\n") } fmt.Fprintf(bout, "\n") // header ends with blank line @@ -141,7 +142,7 @@ func dumpdata() { for { for i := xtops; i < len(xtop); i++ { n := xtop[i] - if n.Op == ODCLFUNC { + if n.Op == ir.ODCLFUNC { funccompile(n) } } @@ -199,16 +200,16 @@ func dumpLinkerObj(bout *bio.Writer) { } func addptabs() { - if !base.Ctxt.Flag_dynlink || localpkg.Name != "main" { + if !base.Ctxt.Flag_dynlink || ir.LocalPkg.Name != "main" { return } for _, exportn := range exportlist { s := exportn.Sym - n := asNode(s.Def) + n := ir.AsNode(s.Def) if n == nil { continue } - if n.Op != ONAME { + if n.Op != ir.ONAME { continue } if !types.IsExported(s.Name) { @@ -217,37 +218,37 @@ func addptabs() { if s.Pkg.Name != "main" { continue } - if n.Type.Etype == TFUNC && n.Class() == PFUNC { + if n.Type.Etype == types.TFUNC && n.Class() == ir.PFUNC { // function - ptabs = append(ptabs, ptabEntry{s: s, t: asNode(s.Def).Type}) + ptabs = append(ptabs, ptabEntry{s: s, t: ir.AsNode(s.Def).Type}) } else { // variable - ptabs = append(ptabs, ptabEntry{s: s, t: types.NewPtr(asNode(s.Def).Type)}) + ptabs = append(ptabs, ptabEntry{s: s, t: types.NewPtr(ir.AsNode(s.Def).Type)}) } } } -func dumpGlobal(n *Node) { +func dumpGlobal(n *ir.Node) { if n.Type == nil { base.Fatalf("external %v nil type\n", n) } - if n.Class() == PFUNC { + if n.Class() == ir.PFUNC { return } - if n.Sym.Pkg != localpkg { + if n.Sym.Pkg != ir.LocalPkg { return } dowidth(n.Type) ggloblnod(n) } -func dumpGlobalConst(n *Node) { +func dumpGlobalConst(n *ir.Node) { // only export typed constants t := n.Type if t == nil { return } - if n.Sym.Pkg != localpkg { + if n.Sym.Pkg != ir.LocalPkg { return } // only export integer constants for now @@ -257,21 +258,21 @@ func dumpGlobalConst(n *Node) { v := n.Val() if t.IsUntyped() { // Export untyped integers as int (if they fit). - t = types.Types[TINT] + t = types.Types[types.TINT] if doesoverflow(v, t) { return } } - base.Ctxt.DwarfIntConst(base.Ctxt.Pkgpath, n.Sym.Name, typesymname(t), int64Val(t, v)) + base.Ctxt.DwarfIntConst(base.Ctxt.Pkgpath, n.Sym.Name, typesymname(t), ir.Int64Val(t, v)) } func dumpglobls() { // add globals for _, n := range externdcl { switch n.Op { - case ONAME: + case ir.ONAME: dumpGlobal(n) - case OLITERAL: + case ir.OLITERAL: dumpGlobalConst(n) } } @@ -474,12 +475,12 @@ func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj. var slicedataGen int -func slicedata(pos src.XPos, s string) *Node { +func slicedata(pos src.XPos, s string) *ir.Node { slicedataGen++ symname := fmt.Sprintf(".gobytes.%d", slicedataGen) - sym := localpkg.Lookup(symname) - symnode := newname(sym) - sym.Def = asTypesNode(symnode) + sym := ir.LocalPkg.Lookup(symname) + symnode := NewName(sym) + sym.Def = ir.AsTypesNode(symnode) lsym := sym.Linksym() off := dstringdata(lsym, 0, s, pos, "slice") @@ -488,8 +489,8 @@ func slicedata(pos src.XPos, s string) *Node { return symnode } -func slicebytes(nam *Node, s string) { - if nam.Op != ONAME { +func slicebytes(nam *ir.Node, s string) { + if nam.Op != ir.ONAME { base.Fatalf("slicebytes %v", nam) } slicesym(nam, slicedata(nam.Pos, s), int64(len(s))) @@ -529,10 +530,10 @@ func dsymptrWeakOff(s *obj.LSym, off int, x *obj.LSym) int { // slicesym writes a static slice symbol {&arr, lencap, lencap} to n. // arr must be an ONAME. slicesym does not modify n. -func slicesym(n, arr *Node, lencap int64) { +func slicesym(n, arr *ir.Node, lencap int64) { s := n.Sym.Linksym() off := n.Xoffset - if arr.Op != ONAME { + if arr.Op != ir.ONAME { base.Fatalf("slicesym non-name arr %v", arr) } s.WriteAddr(base.Ctxt, off, Widthptr, arr.Sym.Linksym(), arr.Xoffset) @@ -542,14 +543,14 @@ func slicesym(n, arr *Node, lencap int64) { // addrsym writes the static address of a to n. a must be an ONAME. // Neither n nor a is modified. -func addrsym(n, a *Node) { - if n.Op != ONAME { +func addrsym(n, a *ir.Node) { + if n.Op != ir.ONAME { base.Fatalf("addrsym n op %v", n.Op) } if n.Sym == nil { base.Fatalf("addrsym nil n sym") } - if a.Op != ONAME { + if a.Op != ir.ONAME { base.Fatalf("addrsym a op %v", a.Op) } s := n.Sym.Linksym() @@ -558,14 +559,14 @@ func addrsym(n, a *Node) { // pfuncsym writes the static address of f to n. f must be a global function. // Neither n nor f is modified. -func pfuncsym(n, f *Node) { - if n.Op != ONAME { +func pfuncsym(n, f *ir.Node) { + if n.Op != ir.ONAME { base.Fatalf("pfuncsym n op %v", n.Op) } if n.Sym == nil { base.Fatalf("pfuncsym nil n sym") } - if f.Class() != PFUNC { + if f.Class() != ir.PFUNC { base.Fatalf("pfuncsym class not PFUNC %d", f.Class()) } s := n.Sym.Linksym() @@ -574,8 +575,8 @@ func pfuncsym(n, f *Node) { // litsym writes the static literal c to n. // Neither n nor c is modified. -func litsym(n, c *Node, wid int) { - if n.Op != ONAME { +func litsym(n, c *ir.Node, wid int) { + if n.Op != ir.ONAME { base.Fatalf("litsym n op %v", n.Op) } if n.Sym == nil { @@ -584,10 +585,10 @@ func litsym(n, c *Node, wid int) { if !types.Identical(n.Type, c.Type) { base.Fatalf("litsym: type mismatch: %v has type %v, but %v has type %v", n, n.Type, c, c.Type) } - if c.Op == ONIL { + if c.Op == ir.ONIL { return } - if c.Op != OLITERAL { + if c.Op != ir.OLITERAL { base.Fatalf("litsym c op %v", c.Op) } s := n.Sym.Linksym() @@ -597,14 +598,14 @@ func litsym(n, c *Node, wid int) { s.WriteInt(base.Ctxt, n.Xoffset, wid, i) case constant.Int: - s.WriteInt(base.Ctxt, n.Xoffset, wid, int64Val(n.Type, u)) + s.WriteInt(base.Ctxt, n.Xoffset, wid, ir.Int64Val(n.Type, u)) case constant.Float: f, _ := constant.Float64Val(u) switch n.Type.Etype { - case TFLOAT32: + case types.TFLOAT32: s.WriteFloat32(base.Ctxt, n.Xoffset, float32(f)) - case TFLOAT64: + case types.TFLOAT64: s.WriteFloat64(base.Ctxt, n.Xoffset, f) } @@ -612,10 +613,10 @@ func litsym(n, c *Node, wid int) { re, _ := constant.Float64Val(constant.Real(u)) im, _ := constant.Float64Val(constant.Imag(u)) switch n.Type.Etype { - case TCOMPLEX64: + case types.TCOMPLEX64: s.WriteFloat32(base.Ctxt, n.Xoffset, float32(re)) s.WriteFloat32(base.Ctxt, n.Xoffset+4, float32(im)) - case TCOMPLEX128: + case types.TCOMPLEX128: s.WriteFloat64(base.Ctxt, n.Xoffset, re) s.WriteFloat64(base.Ctxt, n.Xoffset+8, im) } diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 3b0f31669627b..25bdbd5a4143e 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -6,6 +6,7 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/src" "fmt" @@ -43,27 +44,27 @@ import ( // Order holds state during the ordering process. type Order struct { - out []*Node // list of generated statements - temp []*Node // stack of temporary variables - free map[string][]*Node // free list of unused temporaries, by type.LongString(). + out []*ir.Node // list of generated statements + temp []*ir.Node // stack of temporary variables + free map[string][]*ir.Node // free list of unused temporaries, by type.LongString(). } // Order rewrites fn.Nbody to apply the ordering constraints // described in the comment at the top of the file. -func order(fn *Node) { +func order(fn *ir.Node) { if base.Flag.W > 1 { s := fmt.Sprintf("\nbefore order %v", fn.Func.Nname.Sym) - dumplist(s, fn.Nbody) + ir.DumpList(s, fn.Nbody) } - orderBlock(&fn.Nbody, map[string][]*Node{}) + orderBlock(&fn.Nbody, map[string][]*ir.Node{}) } // newTemp allocates a new temporary with the given type, // pushes it onto the temp stack, and returns it. // If clear is true, newTemp emits code to zero the temporary. -func (o *Order) newTemp(t *types.Type, clear bool) *Node { - var v *Node +func (o *Order) newTemp(t *types.Type, clear bool) *ir.Node { + var v *ir.Node // Note: LongString is close to the type equality we want, // but not exactly. We still need to double-check with types.Identical. key := t.LongString() @@ -81,7 +82,7 @@ func (o *Order) newTemp(t *types.Type, clear bool) *Node { v = temp(t) } if clear { - a := nod(OAS, v, nil) + a := ir.Nod(ir.OAS, v, nil) a = typecheck(a, ctxStmt) o.out = append(o.out, a) } @@ -102,9 +103,9 @@ func (o *Order) newTemp(t *types.Type, clear bool) *Node { // (The other candidate would be map access, but map access // returns a pointer to the result data instead of taking a pointer // to be filled in.) -func (o *Order) copyExpr(n *Node, t *types.Type, clear bool) *Node { +func (o *Order) copyExpr(n *ir.Node, t *types.Type, clear bool) *ir.Node { v := o.newTemp(t, clear) - a := nod(OAS, v, n) + a := ir.Nod(ir.OAS, v, n) a = typecheck(a, ctxStmt) o.out = append(o.out, a) return v @@ -114,20 +115,20 @@ func (o *Order) copyExpr(n *Node, t *types.Type, clear bool) *Node { // The definition of cheap is that n is a variable or constant. // If not, cheapExpr allocates a new tmp, emits tmp = n, // and then returns tmp. -func (o *Order) cheapExpr(n *Node) *Node { +func (o *Order) cheapExpr(n *ir.Node) *ir.Node { if n == nil { return nil } switch n.Op { - case ONAME, OLITERAL, ONIL: + case ir.ONAME, ir.OLITERAL, ir.ONIL: return n - case OLEN, OCAP: + case ir.OLEN, ir.OCAP: l := o.cheapExpr(n.Left) if l == n.Left { return n } - a := n.sepcopy() + a := ir.SepCopy(n) a.Left = l return typecheck(a, ctxExpr) } @@ -142,31 +143,31 @@ func (o *Order) cheapExpr(n *Node) *Node { // as assigning to the original n. // // The intended use is to apply to x when rewriting x += y into x = x + y. -func (o *Order) safeExpr(n *Node) *Node { +func (o *Order) safeExpr(n *ir.Node) *ir.Node { switch n.Op { - case ONAME, OLITERAL, ONIL: + case ir.ONAME, ir.OLITERAL, ir.ONIL: return n - case ODOT, OLEN, OCAP: + case ir.ODOT, ir.OLEN, ir.OCAP: l := o.safeExpr(n.Left) if l == n.Left { return n } - a := n.sepcopy() + a := ir.SepCopy(n) a.Left = l return typecheck(a, ctxExpr) - case ODOTPTR, ODEREF: + case ir.ODOTPTR, ir.ODEREF: l := o.cheapExpr(n.Left) if l == n.Left { return n } - a := n.sepcopy() + a := ir.SepCopy(n) a.Left = l return typecheck(a, ctxExpr) - case OINDEX, OINDEXMAP: - var l *Node + case ir.OINDEX, ir.OINDEXMAP: + var l *ir.Node if n.Left.Type.IsArray() { l = o.safeExpr(n.Left) } else { @@ -176,7 +177,7 @@ func (o *Order) safeExpr(n *Node) *Node { if l == n.Left && r == n.Right { return n } - a := n.sepcopy() + a := ir.SepCopy(n) a.Left = l a.Right = r return typecheck(a, ctxExpr) @@ -193,8 +194,8 @@ func (o *Order) safeExpr(n *Node) *Node { // of ordinary stack variables, those are not 'isaddrokay'. Temporaries are okay, // because we emit explicit VARKILL instructions marking the end of those // temporaries' lifetimes. -func isaddrokay(n *Node) bool { - return islvalue(n) && (n.Op != ONAME || n.Class() == PEXTERN || n.IsAutoTmp()) +func isaddrokay(n *ir.Node) bool { + return islvalue(n) && (n.Op != ir.ONAME || n.Class() == ir.PEXTERN || n.IsAutoTmp()) } // addrTemp ensures that n is okay to pass by address to runtime routines. @@ -202,8 +203,8 @@ func isaddrokay(n *Node) bool { // tmp = n, and then returns tmp. // The result of addrTemp MUST be assigned back to n, e.g. // n.Left = o.addrTemp(n.Left) -func (o *Order) addrTemp(n *Node) *Node { - if n.Op == OLITERAL || n.Op == ONIL { +func (o *Order) addrTemp(n *ir.Node) *ir.Node { + if n.Op == ir.OLITERAL || n.Op == ir.ONIL { // TODO: expand this to all static composite literal nodes? n = defaultlit(n, nil) dowidth(n.Type) @@ -224,7 +225,7 @@ func (o *Order) addrTemp(n *Node) *Node { // mapKeyTemp prepares n to be a key in a map runtime call and returns n. // It should only be used for map runtime calls which have *_fast* versions. -func (o *Order) mapKeyTemp(t *types.Type, n *Node) *Node { +func (o *Order) mapKeyTemp(t *types.Type, n *ir.Node) *ir.Node { // Most map calls need to take the address of the key. // Exception: map*_fast* calls. See golang.org/issue/19015. if mapfast(t) == mapslow { @@ -247,21 +248,21 @@ func (o *Order) mapKeyTemp(t *types.Type, n *Node) *Node { // It would be nice to handle these generally, but because // []byte keys are not allowed in maps, the use of string(k) // comes up in important cases in practice. See issue 3512. -func mapKeyReplaceStrConv(n *Node) bool { +func mapKeyReplaceStrConv(n *ir.Node) bool { var replaced bool switch n.Op { - case OBYTES2STR: - n.Op = OBYTES2STRTMP + case ir.OBYTES2STR: + n.Op = ir.OBYTES2STRTMP replaced = true - case OSTRUCTLIT: + case ir.OSTRUCTLIT: for _, elem := range n.List.Slice() { if mapKeyReplaceStrConv(elem.Left) { replaced = true } } - case OARRAYLIT: + case ir.OARRAYLIT: for _, elem := range n.List.Slice() { - if elem.Op == OKEY { + if elem.Op == ir.OKEY { elem = elem.Right } if mapKeyReplaceStrConv(elem) { @@ -292,11 +293,11 @@ func (o *Order) popTemp(mark ordermarker) { // cleanTempNoPop emits VARKILL instructions to *out // for each temporary above the mark on the temporary stack. // It does not pop the temporaries from the stack. -func (o *Order) cleanTempNoPop(mark ordermarker) []*Node { - var out []*Node +func (o *Order) cleanTempNoPop(mark ordermarker) []*ir.Node { + var out []*ir.Node for i := len(o.temp) - 1; i >= int(mark); i-- { n := o.temp[i] - kill := nod(OVARKILL, n, nil) + kill := ir.Nod(ir.OVARKILL, n, nil) kill = typecheck(kill, ctxStmt) out = append(out, kill) } @@ -311,7 +312,7 @@ func (o *Order) cleanTemp(top ordermarker) { } // stmtList orders each of the statements in the list. -func (o *Order) stmtList(l Nodes) { +func (o *Order) stmtList(l ir.Nodes) { s := l.Slice() for i := range s { orderMakeSliceCopy(s[i:]) @@ -323,7 +324,7 @@ func (o *Order) stmtList(l Nodes) { // m = OMAKESLICE([]T, x); OCOPY(m, s) // and rewrites it to: // m = OMAKESLICECOPY([]T, x, s); nil -func orderMakeSliceCopy(s []*Node) { +func orderMakeSliceCopy(s []*ir.Node) { if base.Flag.N != 0 || instrumenting { return } @@ -335,17 +336,17 @@ func orderMakeSliceCopy(s []*Node) { asn := s[0] copyn := s[1] - if asn == nil || asn.Op != OAS { + if asn == nil || asn.Op != ir.OAS { return } - if asn.Left.Op != ONAME { + if asn.Left.Op != ir.ONAME { return } - if asn.Left.isBlank() { + if ir.IsBlank(asn.Left) { return } maken := asn.Right - if maken == nil || maken.Op != OMAKESLICE { + if maken == nil || maken.Op != ir.OMAKESLICE { return } if maken.Esc == EscNone { @@ -354,16 +355,16 @@ func orderMakeSliceCopy(s []*Node) { if maken.Left == nil || maken.Right != nil { return } - if copyn.Op != OCOPY { + if copyn.Op != ir.OCOPY { return } - if copyn.Left.Op != ONAME { + if copyn.Left.Op != ir.ONAME { return } if asn.Left.Sym != copyn.Left.Sym { return } - if copyn.Right.Op != ONAME { + if copyn.Right.Op != ir.ONAME { return } @@ -371,10 +372,10 @@ func orderMakeSliceCopy(s []*Node) { return } - maken.Op = OMAKESLICECOPY + maken.Op = ir.OMAKESLICECOPY maken.Right = copyn.Right // Set bounded when m = OMAKESLICE([]T, len(s)); OCOPY(m, s) - maken.SetBounded(maken.Left.Op == OLEN && samesafeexpr(maken.Left.Left, copyn.Right)) + maken.SetBounded(maken.Left.Op == ir.OLEN && samesafeexpr(maken.Left.Left, copyn.Right)) maken = typecheck(maken, ctxExpr) @@ -391,12 +392,12 @@ func (o *Order) edge() { // Create a new uint8 counter to be allocated in section // __libfuzzer_extra_counters. - counter := staticname(types.Types[TUINT8]) + counter := staticname(types.Types[types.TUINT8]) counter.Name.SetLibfuzzerExtraCounter(true) // counter += 1 - incr := nod(OASOP, counter, nodintconst(1)) - incr.SetSubOp(OADD) + incr := ir.Nod(ir.OASOP, counter, nodintconst(1)) + incr.SetSubOp(ir.OADD) incr = typecheck(incr, ctxStmt) o.out = append(o.out, incr) @@ -405,7 +406,7 @@ func (o *Order) edge() { // orderBlock orders the block of statements in n into a new slice, // and then replaces the old slice in n with the new slice. // free is a map that can be used to obtain temporary variables by type. -func orderBlock(n *Nodes, free map[string][]*Node) { +func orderBlock(n *ir.Nodes, free map[string][]*ir.Node) { var order Order order.free = free mark := order.markTemp() @@ -419,7 +420,7 @@ func orderBlock(n *Nodes, free map[string][]*Node) { // leaves them as the init list of the final *np. // The result of exprInPlace MUST be assigned back to n, e.g. // n.Left = o.exprInPlace(n.Left) -func (o *Order) exprInPlace(n *Node) *Node { +func (o *Order) exprInPlace(n *ir.Node) *ir.Node { var order Order order.free = o.free n = order.expr(n, nil) @@ -436,7 +437,7 @@ func (o *Order) exprInPlace(n *Node) *Node { // The result of orderStmtInPlace MUST be assigned back to n, e.g. // n.Left = orderStmtInPlace(n.Left) // free is a map that can be used to obtain temporary variables by type. -func orderStmtInPlace(n *Node, free map[string][]*Node) *Node { +func orderStmtInPlace(n *ir.Node, free map[string][]*ir.Node) *ir.Node { var order Order order.free = free mark := order.markTemp() @@ -446,8 +447,8 @@ func orderStmtInPlace(n *Node, free map[string][]*Node) *Node { } // init moves n's init list to o.out. -func (o *Order) init(n *Node) { - if n.mayBeShared() { +func (o *Order) init(n *ir.Node) { + if ir.MayBeShared(n) { // For concurrency safety, don't mutate potentially shared nodes. // First, ensure that no work is required here. if n.Ninit.Len() > 0 { @@ -461,14 +462,14 @@ func (o *Order) init(n *Node) { // call orders the call expression n. // n.Op is OCALLMETH/OCALLFUNC/OCALLINTER or a builtin like OCOPY. -func (o *Order) call(n *Node) { +func (o *Order) call(n *ir.Node) { if n.Ninit.Len() > 0 { // Caller should have already called o.init(n). base.Fatalf("%v with unexpected ninit", n.Op) } // Builtin functions. - if n.Op != OCALLFUNC && n.Op != OCALLMETH && n.Op != OCALLINTER { + if n.Op != ir.OCALLFUNC && n.Op != ir.OCALLMETH && n.Op != ir.OCALLINTER { n.Left = o.expr(n.Left, nil) n.Right = o.expr(n.Right, nil) o.exprList(n.List) @@ -479,26 +480,26 @@ func (o *Order) call(n *Node) { n.Left = o.expr(n.Left, nil) o.exprList(n.List) - if n.Op == OCALLINTER { + if n.Op == ir.OCALLINTER { return } - keepAlive := func(arg *Node) { + keepAlive := func(arg *ir.Node) { // If the argument is really a pointer being converted to uintptr, // arrange for the pointer to be kept alive until the call returns, // by copying it into a temp and marking that temp // still alive when we pop the temp stack. - if arg.Op == OCONVNOP && arg.Left.Type.IsUnsafePtr() { + if arg.Op == ir.OCONVNOP && arg.Left.Type.IsUnsafePtr() { x := o.copyExpr(arg.Left, arg.Left.Type, false) arg.Left = x x.Name.SetAddrtaken(true) // ensure SSA keeps the x variable - n.Nbody.Append(typecheck(nod(OVARLIVE, x, nil), ctxStmt)) + n.Nbody.Append(typecheck(ir.Nod(ir.OVARLIVE, x, nil), ctxStmt)) } } // Check for "unsafe-uintptr" tag provided by escape analysis. for i, param := range n.Left.Type.Params().FieldSlice() { if param.Note == unsafeUintptrTag || param.Note == uintptrEscapesTag { - if arg := n.List.Index(i); arg.Op == OSLICELIT { + if arg := n.List.Index(i); arg.Op == ir.OSLICELIT { for _, elt := range arg.List.Slice() { keepAlive(elt) } @@ -524,16 +525,16 @@ func (o *Order) call(n *Node) { // cases they are also typically registerizable, so not much harm done. // And this only applies to the multiple-assignment form. // We could do a more precise analysis if needed, like in walk.go. -func (o *Order) mapAssign(n *Node) { +func (o *Order) mapAssign(n *ir.Node) { switch n.Op { default: base.Fatalf("order.mapAssign %v", n.Op) - case OAS, OASOP: - if n.Left.Op == OINDEXMAP { + case ir.OAS, ir.OASOP: + if n.Left.Op == ir.OINDEXMAP { // Make sure we evaluate the RHS before starting the map insert. // We need to make sure the RHS won't panic. See issue 22881. - if n.Right.Op == OAPPEND { + if n.Right.Op == ir.OAPPEND { s := n.Right.List.Slice()[1:] for i, n := range s { s[i] = o.cheapExpr(n) @@ -544,11 +545,11 @@ func (o *Order) mapAssign(n *Node) { } o.out = append(o.out, n) - case OAS2, OAS2DOTTYPE, OAS2MAPR, OAS2FUNC: - var post []*Node + case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2MAPR, ir.OAS2FUNC: + var post []*ir.Node for i, m := range n.List.Slice() { switch { - case m.Op == OINDEXMAP: + case m.Op == ir.OINDEXMAP: if !m.Left.IsAutoTmp() { m.Left = o.copyExpr(m.Left, m.Left.Type, false) } @@ -556,10 +557,10 @@ func (o *Order) mapAssign(n *Node) { m.Right = o.copyExpr(m.Right, m.Right.Type, false) } fallthrough - case instrumenting && n.Op == OAS2FUNC && !m.isBlank(): + case instrumenting && n.Op == ir.OAS2FUNC && !ir.IsBlank(m): t := o.newTemp(m.Type, false) n.List.SetIndex(i, t) - a := nod(OAS, m, t) + a := ir.Nod(ir.OAS, m, t) a = typecheck(a, ctxStmt) post = append(post, a) } @@ -573,7 +574,7 @@ func (o *Order) mapAssign(n *Node) { // stmt orders the statement n, appending to o.out. // Temporaries created during the statement are cleaned // up using VARKILL instructions as possible. -func (o *Order) stmt(n *Node) { +func (o *Order) stmt(n *ir.Node) { if n == nil { return } @@ -585,22 +586,22 @@ func (o *Order) stmt(n *Node) { default: base.Fatalf("order.stmt %v", n.Op) - case OVARKILL, OVARLIVE, OINLMARK: + case ir.OVARKILL, ir.OVARLIVE, ir.OINLMARK: o.out = append(o.out, n) - case OAS: + case ir.OAS: t := o.markTemp() n.Left = o.expr(n.Left, nil) n.Right = o.expr(n.Right, n.Left) o.mapAssign(n) o.cleanTemp(t) - case OASOP: + case ir.OASOP: t := o.markTemp() n.Left = o.expr(n.Left, nil) n.Right = o.expr(n.Right, nil) - if instrumenting || n.Left.Op == OINDEXMAP && (n.SubOp() == ODIV || n.SubOp() == OMOD) { + if instrumenting || n.Left.Op == ir.OINDEXMAP && (n.SubOp() == ir.ODIV || n.SubOp() == ir.OMOD) { // Rewrite m[k] op= r into m[k] = m[k] op r so // that we can ensure that if op panics // because r is zero, the panic happens before @@ -609,22 +610,22 @@ func (o *Order) stmt(n *Node) { n.Left = o.safeExpr(n.Left) l := treecopy(n.Left, src.NoXPos) - if l.Op == OINDEXMAP { + if l.Op == ir.OINDEXMAP { l.SetIndexMapLValue(false) } l = o.copyExpr(l, n.Left.Type, false) - n.Right = nod(n.SubOp(), l, n.Right) + n.Right = ir.Nod(n.SubOp(), l, n.Right) n.Right = typecheck(n.Right, ctxExpr) n.Right = o.expr(n.Right, nil) - n.Op = OAS + n.Op = ir.OAS n.ResetAux() } o.mapAssign(n) o.cleanTemp(t) - case OAS2: + case ir.OAS2: t := o.markTemp() o.exprList(n.List) o.exprList(n.Rlist) @@ -632,7 +633,7 @@ func (o *Order) stmt(n *Node) { o.cleanTemp(t) // Special: avoid copy of func call n.Right - case OAS2FUNC: + case ir.OAS2FUNC: t := o.markTemp() o.exprList(n.List) o.init(n.Right) @@ -646,14 +647,14 @@ func (o *Order) stmt(n *Node) { // // OAS2MAPR: make sure key is addressable if needed, // and make sure OINDEXMAP is not copied out. - case OAS2DOTTYPE, OAS2RECV, OAS2MAPR: + case ir.OAS2DOTTYPE, ir.OAS2RECV, ir.OAS2MAPR: t := o.markTemp() o.exprList(n.List) switch r := n.Right; r.Op { - case ODOTTYPE2, ORECV: + case ir.ODOTTYPE2, ir.ORECV: r.Left = o.expr(r.Left, nil) - case OINDEXMAP: + case ir.OINDEXMAP: r.Left = o.expr(r.Left, nil) r.Right = o.expr(r.Right, nil) // See similar conversion for OINDEXMAP below. @@ -667,34 +668,34 @@ func (o *Order) stmt(n *Node) { o.cleanTemp(t) // Special: does not save n onto out. - case OBLOCK, OEMPTY: + case ir.OBLOCK, ir.OEMPTY: o.stmtList(n.List) // Special: n->left is not an expression; save as is. - case OBREAK, - OCONTINUE, - ODCL, - ODCLCONST, - ODCLTYPE, - OFALL, - OGOTO, - OLABEL, - ORETJMP: + case ir.OBREAK, + ir.OCONTINUE, + ir.ODCL, + ir.ODCLCONST, + ir.ODCLTYPE, + ir.OFALL, + ir.OGOTO, + ir.OLABEL, + ir.ORETJMP: o.out = append(o.out, n) // Special: handle call arguments. - case OCALLFUNC, OCALLINTER, OCALLMETH: + case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH: t := o.markTemp() o.call(n) o.out = append(o.out, n) o.cleanTemp(t) - case OCLOSE, - OCOPY, - OPRINT, - OPRINTN, - ORECOVER, - ORECV: + case ir.OCLOSE, + ir.OCOPY, + ir.OPRINT, + ir.OPRINTN, + ir.ORECOVER, + ir.ORECV: t := o.markTemp() n.Left = o.expr(n.Left, nil) n.Right = o.expr(n.Right, nil) @@ -704,14 +705,14 @@ func (o *Order) stmt(n *Node) { o.cleanTemp(t) // Special: order arguments to inner call but not call itself. - case ODEFER, OGO: + case ir.ODEFER, ir.OGO: t := o.markTemp() o.init(n.Left) o.call(n.Left) o.out = append(o.out, n) o.cleanTemp(t) - case ODELETE: + case ir.ODELETE: t := o.markTemp() n.List.SetFirst(o.expr(n.List.First(), nil)) n.List.SetSecond(o.expr(n.List.Second(), nil)) @@ -721,7 +722,7 @@ func (o *Order) stmt(n *Node) { // Clean temporaries from condition evaluation at // beginning of loop body and after for statement. - case OFOR: + case ir.OFOR: t := o.markTemp() n.Left = o.exprInPlace(n.Left) n.Nbody.Prepend(o.cleanTempNoPop(t)...) @@ -732,7 +733,7 @@ func (o *Order) stmt(n *Node) { // Clean temporaries from condition at // beginning of both branches. - case OIF: + case ir.OIF: t := o.markTemp() n.Left = o.exprInPlace(n.Left) n.Nbody.Prepend(o.cleanTempNoPop(t)...) @@ -744,7 +745,7 @@ func (o *Order) stmt(n *Node) { // Special: argument will be converted to interface using convT2E // so make sure it is an addressable temporary. - case OPANIC: + case ir.OPANIC: t := o.markTemp() n.Left = o.expr(n.Left, nil) if !n.Left.Type.IsInterface() { @@ -753,7 +754,7 @@ func (o *Order) stmt(n *Node) { o.out = append(o.out, n) o.cleanTemp(t) - case ORANGE: + case ir.ORANGE: // n.Right is the expression being ranged over. // order it, and then make a copy if we need one. // We almost always do, to ensure that we don't @@ -767,8 +768,8 @@ func (o *Order) stmt(n *Node) { // Mark []byte(str) range expression to reuse string backing storage. // It is safe because the storage cannot be mutated. - if n.Right.Op == OSTR2BYTES { - n.Right.Op = OSTR2BYTESTMP + if n.Right.Op == ir.OSTR2BYTES { + n.Right.Op = ir.OSTR2BYTESTMP } t := o.markTemp() @@ -779,28 +780,28 @@ func (o *Order) stmt(n *Node) { default: base.Fatalf("order.stmt range %v", n.Type) - case TARRAY, TSLICE: - if n.List.Len() < 2 || n.List.Second().isBlank() { + case types.TARRAY, types.TSLICE: + if n.List.Len() < 2 || ir.IsBlank(n.List.Second()) { // for i := range x will only use x once, to compute len(x). // No need to copy it. break } fallthrough - case TCHAN, TSTRING: + case types.TCHAN, types.TSTRING: // chan, string, slice, array ranges use value multiple times. // make copy. r := n.Right - if r.Type.IsString() && r.Type != types.Types[TSTRING] { - r = nod(OCONV, r, nil) - r.Type = types.Types[TSTRING] + if r.Type.IsString() && r.Type != types.Types[types.TSTRING] { + r = ir.Nod(ir.OCONV, r, nil) + r.Type = types.Types[types.TSTRING] r = typecheck(r, ctxExpr) } n.Right = o.copyExpr(r, r.Type, false) - case TMAP: + case types.TMAP: if isMapClear(n) { // Preserve the body of the map clear pattern so it can // be detected during walk. The loop body will not be used @@ -826,7 +827,7 @@ func (o *Order) stmt(n *Node) { o.out = append(o.out, n) o.cleanTemp(t) - case ORETURN: + case ir.ORETURN: o.exprList(n.List) o.out = append(o.out, n) @@ -839,11 +840,11 @@ func (o *Order) stmt(n *Node) { // reordered after the channel evaluation for a different // case (if p were nil, then the timing of the fault would // give this away). - case OSELECT: + case ir.OSELECT: t := o.markTemp() for _, n2 := range n.List.Slice() { - if n2.Op != OCASE { + if n2.Op != ir.OCASE { base.Fatalf("order select case %v", n2.Op) } r := n2.Left @@ -859,20 +860,20 @@ func (o *Order) stmt(n *Node) { } switch r.Op { default: - Dump("select case", r) + ir.Dump("select case", r) base.Fatalf("unknown op in select %v", r.Op) // If this is case x := <-ch or case x, y := <-ch, the case has // the ODCL nodes to declare x and y. We want to delay that // declaration (and possible allocation) until inside the case body. // Delete the ODCL nodes here and recreate them inside the body below. - case OSELRECV, OSELRECV2: + case ir.OSELRECV, ir.OSELRECV2: if r.Colas() { i := 0 - if r.Ninit.Len() != 0 && r.Ninit.First().Op == ODCL && r.Ninit.First().Left == r.Left { + if r.Ninit.Len() != 0 && r.Ninit.First().Op == ir.ODCL && r.Ninit.First().Left == r.Left { i++ } - if i < r.Ninit.Len() && r.Ninit.Index(i).Op == ODCL && r.List.Len() != 0 && r.Ninit.Index(i).Left == r.List.First() { + if i < r.Ninit.Len() && r.Ninit.Index(i).Op == ir.ODCL && r.List.Len() != 0 && r.Ninit.Index(i).Left == r.List.First() { i++ } if i >= r.Ninit.Len() { @@ -881,7 +882,7 @@ func (o *Order) stmt(n *Node) { } if r.Ninit.Len() != 0 { - dumplist("ninit", r.Ninit) + ir.DumpList("ninit", r.Ninit) base.Fatalf("ninit on select recv") } @@ -892,7 +893,7 @@ func (o *Order) stmt(n *Node) { // c is always evaluated; x and ok are only evaluated when assigned. r.Right.Left = o.expr(r.Right.Left, nil) - if r.Right.Left.Op != ONAME { + if r.Right.Left.Op != ir.ONAME { r.Right.Left = o.copyExpr(r.Right.Left, r.Right.Left.Type, false) } @@ -902,7 +903,7 @@ func (o *Order) stmt(n *Node) { // temporary per distinct type, sharing the temp among all receives // with that temp. Similarly one ok bool could be shared among all // the x,ok receives. Not worth doing until there's a clear need. - if r.Left != nil && r.Left.isBlank() { + if r.Left != nil && ir.IsBlank(r.Left) { r.Left = nil } if r.Left != nil { @@ -912,38 +913,38 @@ func (o *Order) stmt(n *Node) { tmp1 := r.Left if r.Colas() { - tmp2 := nod(ODCL, tmp1, nil) + tmp2 := ir.Nod(ir.ODCL, tmp1, nil) tmp2 = typecheck(tmp2, ctxStmt) n2.Ninit.Append(tmp2) } r.Left = o.newTemp(r.Right.Left.Type.Elem(), r.Right.Left.Type.Elem().HasPointers()) - tmp2 := nod(OAS, tmp1, r.Left) + tmp2 := ir.Nod(ir.OAS, tmp1, r.Left) tmp2 = typecheck(tmp2, ctxStmt) n2.Ninit.Append(tmp2) } - if r.List.Len() != 0 && r.List.First().isBlank() { + if r.List.Len() != 0 && ir.IsBlank(r.List.First()) { r.List.Set(nil) } if r.List.Len() != 0 { tmp1 := r.List.First() if r.Colas() { - tmp2 := nod(ODCL, tmp1, nil) + tmp2 := ir.Nod(ir.ODCL, tmp1, nil) tmp2 = typecheck(tmp2, ctxStmt) n2.Ninit.Append(tmp2) } - r.List.Set1(o.newTemp(types.Types[TBOOL], false)) + r.List.Set1(o.newTemp(types.Types[types.TBOOL], false)) tmp2 := okas(tmp1, r.List.First()) tmp2 = typecheck(tmp2, ctxStmt) n2.Ninit.Append(tmp2) } orderBlock(&n2.Ninit, o.free) - case OSEND: + case ir.OSEND: if r.Ninit.Len() != 0 { - dumplist("ninit", r.Ninit) + ir.DumpList("ninit", r.Ninit) base.Fatalf("ninit on select send") } @@ -977,7 +978,7 @@ func (o *Order) stmt(n *Node) { o.popTemp(t) // Special: value being sent is passed as a pointer; make it addressable. - case OSEND: + case ir.OSEND: t := o.markTemp() n.Left = o.expr(n.Left, nil) n.Right = o.expr(n.Right, nil) @@ -998,16 +999,16 @@ func (o *Order) stmt(n *Node) { // the if-else chain instead.) // For now just clean all the temporaries at the end. // In practice that's fine. - case OSWITCH: + case ir.OSWITCH: if base.Debug.Libfuzzer != 0 && !hasDefaultCase(n) { // Add empty "default:" case for instrumentation. - n.List.Append(nod(OCASE, nil, nil)) + n.List.Append(ir.Nod(ir.OCASE, nil, nil)) } t := o.markTemp() n.Left = o.expr(n.Left, nil) for _, ncas := range n.List.Slice() { - if ncas.Op != OCASE { + if ncas.Op != ir.OCASE { base.Fatalf("order switch case %v", ncas.Op) } o.exprListInPlace(ncas.List) @@ -1021,9 +1022,9 @@ func (o *Order) stmt(n *Node) { base.Pos = lno } -func hasDefaultCase(n *Node) bool { +func hasDefaultCase(n *ir.Node) bool { for _, ncas := range n.List.Slice() { - if ncas.Op != OCASE { + if ncas.Op != ir.OCASE { base.Fatalf("expected case, found %v", ncas.Op) } if ncas.List.Len() == 0 { @@ -1034,7 +1035,7 @@ func hasDefaultCase(n *Node) bool { } // exprList orders the expression list l into o. -func (o *Order) exprList(l Nodes) { +func (o *Order) exprList(l ir.Nodes) { s := l.Slice() for i := range s { s[i] = o.expr(s[i], nil) @@ -1043,7 +1044,7 @@ func (o *Order) exprList(l Nodes) { // exprListInPlace orders the expression list l but saves // the side effects on the individual expression ninit lists. -func (o *Order) exprListInPlace(l Nodes) { +func (o *Order) exprListInPlace(l ir.Nodes) { s := l.Slice() for i := range s { s[i] = o.exprInPlace(s[i]) @@ -1051,7 +1052,7 @@ func (o *Order) exprListInPlace(l Nodes) { } // prealloc[x] records the allocation to use for x. -var prealloc = map[*Node]*Node{} +var prealloc = map[*ir.Node]*ir.Node{} // expr orders a single expression, appending side // effects to o.out as needed. @@ -1060,7 +1061,7 @@ var prealloc = map[*Node]*Node{} // to avoid copying the result of the expression to a temporary.) // The result of expr MUST be assigned back to n, e.g. // n.Left = o.expr(n.Left, lhs) -func (o *Order) expr(n, lhs *Node) *Node { +func (o *Order) expr(n, lhs *ir.Node) *ir.Node { if n == nil { return n } @@ -1078,11 +1079,11 @@ func (o *Order) expr(n, lhs *Node) *Node { // Addition of strings turns into a function call. // Allocate a temporary to hold the strings. // Fewer than 5 strings use direct runtime helpers. - case OADDSTR: + case ir.OADDSTR: o.exprList(n.List) if n.List.Len() > 5 { - t := types.NewArray(types.Types[TSTRING], int64(n.List.Len())) + t := types.NewArray(types.Types[types.TSTRING], int64(n.List.Len())) prealloc[n] = o.newTemp(t, false) } @@ -1097,19 +1098,19 @@ func (o *Order) expr(n, lhs *Node) *Node { haslit := false for _, n1 := range n.List.Slice() { - hasbyte = hasbyte || n1.Op == OBYTES2STR - haslit = haslit || n1.Op == OLITERAL && len(n1.StringVal()) != 0 + hasbyte = hasbyte || n1.Op == ir.OBYTES2STR + haslit = haslit || n1.Op == ir.OLITERAL && len(n1.StringVal()) != 0 } if haslit && hasbyte { for _, n2 := range n.List.Slice() { - if n2.Op == OBYTES2STR { - n2.Op = OBYTES2STRTMP + if n2.Op == ir.OBYTES2STR { + n2.Op = ir.OBYTES2STRTMP } } } - case OINDEXMAP: + case ir.OINDEXMAP: n.Left = o.expr(n.Left, nil) n.Right = o.expr(n.Right, nil) needCopy := false @@ -1136,7 +1137,7 @@ func (o *Order) expr(n, lhs *Node) *Node { // concrete type (not interface) argument might need an addressable // temporary to pass to the runtime conversion routine. - case OCONVIFACE: + case ir.OCONVIFACE: n.Left = o.expr(n.Left, nil) if n.Left.Type.IsInterface() { break @@ -1148,21 +1149,21 @@ func (o *Order) expr(n, lhs *Node) *Node { n.Left = o.addrTemp(n.Left) } - case OCONVNOP: - if n.Type.IsKind(TUNSAFEPTR) && n.Left.Type.IsKind(TUINTPTR) && (n.Left.Op == OCALLFUNC || n.Left.Op == OCALLINTER || n.Left.Op == OCALLMETH) { + case ir.OCONVNOP: + if n.Type.IsKind(types.TUNSAFEPTR) && n.Left.Type.IsKind(types.TUINTPTR) && (n.Left.Op == ir.OCALLFUNC || n.Left.Op == ir.OCALLINTER || n.Left.Op == ir.OCALLMETH) { // When reordering unsafe.Pointer(f()) into a separate // statement, the conversion and function call must stay // together. See golang.org/issue/15329. o.init(n.Left) o.call(n.Left) - if lhs == nil || lhs.Op != ONAME || instrumenting { + if lhs == nil || lhs.Op != ir.ONAME || instrumenting { n = o.copyExpr(n, n.Type, false) } } else { n.Left = o.expr(n.Left, nil) } - case OANDAND, OOROR: + case ir.OANDAND, ir.OOROR: // ... = LHS && RHS // // var r bool @@ -1176,7 +1177,7 @@ func (o *Order) expr(n, lhs *Node) *Node { // Evaluate left-hand side. lhs := o.expr(n.Left, nil) - o.out = append(o.out, typecheck(nod(OAS, r, lhs), ctxStmt)) + o.out = append(o.out, typecheck(ir.Nod(ir.OAS, r, lhs), ctxStmt)) // Evaluate right-hand side, save generated code. saveout := o.out @@ -1184,14 +1185,14 @@ func (o *Order) expr(n, lhs *Node) *Node { t := o.markTemp() o.edge() rhs := o.expr(n.Right, nil) - o.out = append(o.out, typecheck(nod(OAS, r, rhs), ctxStmt)) + o.out = append(o.out, typecheck(ir.Nod(ir.OAS, r, rhs), ctxStmt)) o.cleanTemp(t) gen := o.out o.out = saveout // If left-hand side doesn't cause a short-circuit, issue right-hand side. - nif := nod(OIF, r, nil) - if n.Op == OANDAND { + nif := ir.Nod(ir.OIF, r, nil) + if n.Op == ir.OANDAND { nif.Nbody.Set(gen) } else { nif.Rlist.Set(gen) @@ -1199,24 +1200,24 @@ func (o *Order) expr(n, lhs *Node) *Node { o.out = append(o.out, nif) n = r - case OCALLFUNC, - OCALLINTER, - OCALLMETH, - OCAP, - OCOMPLEX, - OCOPY, - OIMAG, - OLEN, - OMAKECHAN, - OMAKEMAP, - OMAKESLICE, - OMAKESLICECOPY, - ONEW, - OREAL, - ORECOVER, - OSTR2BYTES, - OSTR2BYTESTMP, - OSTR2RUNES: + case ir.OCALLFUNC, + ir.OCALLINTER, + ir.OCALLMETH, + ir.OCAP, + ir.OCOMPLEX, + ir.OCOPY, + ir.OIMAG, + ir.OLEN, + ir.OMAKECHAN, + ir.OMAKEMAP, + ir.OMAKESLICE, + ir.OMAKESLICECOPY, + ir.ONEW, + ir.OREAL, + ir.ORECOVER, + ir.OSTR2BYTES, + ir.OSTR2BYTESTMP, + ir.OSTR2RUNES: if isRuneCount(n) { // len([]rune(s)) is rewritten to runtime.countrunes(s) later. @@ -1225,11 +1226,11 @@ func (o *Order) expr(n, lhs *Node) *Node { o.call(n) } - if lhs == nil || lhs.Op != ONAME || instrumenting { + if lhs == nil || lhs.Op != ir.ONAME || instrumenting { n = o.copyExpr(n, n.Type, false) } - case OAPPEND: + case ir.OAPPEND: // Check for append(x, make([]T, y)...) . if isAppendOfMake(n) { n.List.SetFirst(o.expr(n.List.First(), nil)) // order x @@ -1238,11 +1239,11 @@ func (o *Order) expr(n, lhs *Node) *Node { o.exprList(n.List) } - if lhs == nil || lhs.Op != ONAME && !samesafeexpr(lhs, n.List.First()) { + if lhs == nil || lhs.Op != ir.ONAME && !samesafeexpr(lhs, n.List.First()) { n = o.copyExpr(n, n.Type, false) } - case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR: + case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR: n.Left = o.expr(n.Left, nil) low, high, max := n.SliceBounds() low = o.expr(low, nil) @@ -1252,16 +1253,16 @@ func (o *Order) expr(n, lhs *Node) *Node { max = o.expr(max, nil) max = o.cheapExpr(max) n.SetSliceBounds(low, high, max) - if lhs == nil || lhs.Op != ONAME && !samesafeexpr(lhs, n.Left) { + if lhs == nil || lhs.Op != ir.ONAME && !samesafeexpr(lhs, n.Left) { n = o.copyExpr(n, n.Type, false) } - case OCLOSURE: + case ir.OCLOSURE: if n.Transient() && n.Func.ClosureVars.Len() > 0 { prealloc[n] = o.newTemp(closureType(n), false) } - case OSLICELIT, OCALLPART: + case ir.OSLICELIT, ir.OCALLPART: n.Left = o.expr(n.Left, nil) n.Right = o.expr(n.Right, nil) o.exprList(n.List) @@ -1269,25 +1270,25 @@ func (o *Order) expr(n, lhs *Node) *Node { if n.Transient() { var t *types.Type switch n.Op { - case OSLICELIT: + case ir.OSLICELIT: t = types.NewArray(n.Type.Elem(), n.Right.Int64Val()) - case OCALLPART: + case ir.OCALLPART: t = partialCallType(n) } prealloc[n] = o.newTemp(t, false) } - case ODOTTYPE, ODOTTYPE2: + case ir.ODOTTYPE, ir.ODOTTYPE2: n.Left = o.expr(n.Left, nil) if !isdirectiface(n.Type) || instrumenting { n = o.copyExpr(n, n.Type, true) } - case ORECV: + case ir.ORECV: n.Left = o.expr(n.Left, nil) n = o.copyExpr(n, n.Type, true) - case OEQ, ONE, OLT, OLE, OGT, OGE: + case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE: n.Left = o.expr(n.Left, nil) n.Right = o.expr(n.Right, nil) @@ -1297,11 +1298,11 @@ func (o *Order) expr(n, lhs *Node) *Node { // Mark string(byteSlice) arguments to reuse byteSlice backing // buffer during conversion. String comparison does not // memorize the strings for later use, so it is safe. - if n.Left.Op == OBYTES2STR { - n.Left.Op = OBYTES2STRTMP + if n.Left.Op == ir.OBYTES2STR { + n.Left.Op = ir.OBYTES2STRTMP } - if n.Right.Op == OBYTES2STR { - n.Right.Op = OBYTES2STRTMP + if n.Right.Op == ir.OBYTES2STR { + n.Right.Op = ir.OBYTES2STRTMP } case t.IsStruct() || t.IsArray(): @@ -1310,7 +1311,7 @@ func (o *Order) expr(n, lhs *Node) *Node { n.Left = o.addrTemp(n.Left) n.Right = o.addrTemp(n.Right) } - case OMAPLIT: + case ir.OMAPLIT: // Order map by converting: // map[int]int{ // a(): b(), @@ -1328,9 +1329,9 @@ func (o *Order) expr(n, lhs *Node) *Node { // See issue 26552. entries := n.List.Slice() statics := entries[:0] - var dynamics []*Node + var dynamics []*ir.Node for _, r := range entries { - if r.Op != OKEY { + if r.Op != ir.OKEY { base.Fatalf("OMAPLIT entry not OKEY: %v\n", r) } @@ -1357,14 +1358,14 @@ func (o *Order) expr(n, lhs *Node) *Node { // Emit the creation of the map (with all its static entries). m := o.newTemp(n.Type, false) - as := nod(OAS, m, n) + as := ir.Nod(ir.OAS, m, n) typecheck(as, ctxStmt) o.stmt(as) n = m // Emit eval+insert of dynamic entries, one at a time. for _, r := range dynamics { - as := nod(OAS, nod(OINDEX, n, r.Left), r.Right) + as := ir.Nod(ir.OAS, ir.Nod(ir.OINDEX, n, r.Left), r.Right) typecheck(as, ctxStmt) // Note: this converts the OINDEX to an OINDEXMAP o.stmt(as) } @@ -1376,11 +1377,11 @@ func (o *Order) expr(n, lhs *Node) *Node { // okas creates and returns an assignment of val to ok, // including an explicit conversion if necessary. -func okas(ok, val *Node) *Node { - if !ok.isBlank() { +func okas(ok, val *ir.Node) *ir.Node { + if !ir.IsBlank(ok) { val = conv(val, ok.Type) } - return nod(OAS, ok, val) + return ir.Nod(ir.OAS, ok, val) } // as2 orders OAS2XXXX nodes. It creates temporaries to ensure left-to-right assignment. @@ -1391,11 +1392,11 @@ func okas(ok, val *Node) *Node { // tmp1, tmp2, tmp3 = ... // a, b, a = tmp1, tmp2, tmp3 // This is necessary to ensure left to right assignment order. -func (o *Order) as2(n *Node) { - tmplist := []*Node{} - left := []*Node{} +func (o *Order) as2(n *ir.Node) { + tmplist := []*ir.Node{} + left := []*ir.Node{} for ni, l := range n.List.Slice() { - if !l.isBlank() { + if !ir.IsBlank(l) { tmp := o.newTemp(l.Type, l.Type.HasPointers()) n.List.SetIndex(ni, tmp) tmplist = append(tmplist, tmp) @@ -1405,7 +1406,7 @@ func (o *Order) as2(n *Node) { o.out = append(o.out, n) - as := nod(OAS2, nil, nil) + as := ir.Nod(ir.OAS2, nil, nil) as.List.Set(left) as.Rlist.Set(tmplist) as = typecheck(as, ctxStmt) @@ -1414,21 +1415,21 @@ func (o *Order) as2(n *Node) { // okAs2 orders OAS2XXX with ok. // Just like as2, this also adds temporaries to ensure left-to-right assignment. -func (o *Order) okAs2(n *Node) { - var tmp1, tmp2 *Node - if !n.List.First().isBlank() { +func (o *Order) okAs2(n *ir.Node) { + var tmp1, tmp2 *ir.Node + if !ir.IsBlank(n.List.First()) { typ := n.Right.Type tmp1 = o.newTemp(typ, typ.HasPointers()) } - if !n.List.Second().isBlank() { - tmp2 = o.newTemp(types.Types[TBOOL], false) + if !ir.IsBlank(n.List.Second()) { + tmp2 = o.newTemp(types.Types[types.TBOOL], false) } o.out = append(o.out, n) if tmp1 != nil { - r := nod(OAS, n.List.First(), tmp1) + r := ir.Nod(ir.OAS, n.List.First(), tmp1) r = typecheck(r, ctxStmt) o.mapAssign(r) n.List.SetFirst(tmp1) diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index f10599dc2806b..38f416c1c3842 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -6,6 +6,7 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/ssa" "cmd/compile/internal/types" "cmd/internal/dwarf" @@ -23,14 +24,14 @@ import ( // "Portable" code generation. var ( - compilequeue []*Node // functions waiting to be compiled + compilequeue []*ir.Node // functions waiting to be compiled ) -func emitptrargsmap(fn *Node) { - if fn.funcname() == "_" || fn.Func.Nname.Sym.Linkname != "" { +func emitptrargsmap(fn *ir.Node) { + if ir.FuncName(fn) == "_" || fn.Func.Nname.Sym.Linkname != "" { return } - lsym := base.Ctxt.Lookup(fn.Func.lsym.Name + ".args_stackmap") + lsym := base.Ctxt.Lookup(fn.Func.LSym.Name + ".args_stackmap") nptr := int(fn.Type.ArgWidth() / int64(Widthptr)) bv := bvalloc(int32(nptr) * 2) @@ -41,7 +42,7 @@ func emitptrargsmap(fn *Node) { off := duint32(lsym, 0, uint32(nbitmap)) off = duint32(lsym, off, uint32(bv.n)) - if fn.IsMethod() { + if ir.IsMethod(fn) { onebitwalktype1(fn.Type.Recvs(), 0, bv) } if fn.Type.NumParams() > 0 { @@ -67,12 +68,12 @@ func emitptrargsmap(fn *Node) { // really means, in memory, things with pointers needing zeroing at // the top of the stack and increasing in size. // Non-autos sort on offset. -func cmpstackvarlt(a, b *Node) bool { - if (a.Class() == PAUTO) != (b.Class() == PAUTO) { - return b.Class() == PAUTO +func cmpstackvarlt(a, b *ir.Node) bool { + if (a.Class() == ir.PAUTO) != (b.Class() == ir.PAUTO) { + return b.Class() == ir.PAUTO } - if a.Class() != PAUTO { + if a.Class() != ir.PAUTO { return a.Xoffset < b.Xoffset } @@ -100,7 +101,7 @@ func cmpstackvarlt(a, b *Node) bool { } // byStackvar implements sort.Interface for []*Node using cmpstackvarlt. -type byStackVar []*Node +type byStackVar []*ir.Node func (s byStackVar) Len() int { return len(s) } func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) } @@ -113,28 +114,28 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { // Mark the PAUTO's unused. for _, ln := range fn.Dcl { - if ln.Class() == PAUTO { + if ln.Class() == ir.PAUTO { ln.Name.SetUsed(false) } } for _, l := range f.RegAlloc { if ls, ok := l.(ssa.LocalSlot); ok { - ls.N.(*Node).Name.SetUsed(true) + ls.N.(*ir.Node).Name.SetUsed(true) } } scratchUsed := false for _, b := range f.Blocks { for _, v := range b.Values { - if n, ok := v.Aux.(*Node); ok { + if n, ok := v.Aux.(*ir.Node); ok { switch n.Class() { - case PPARAM, PPARAMOUT: + case ir.PPARAM, ir.PPARAMOUT: // Don't modify nodfp; it is a global. if n != nodfp { n.Name.SetUsed(true) } - case PAUTO: + case ir.PAUTO: n.Name.SetUsed(true) } } @@ -146,7 +147,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { } if f.Config.NeedsFpScratch && scratchUsed { - s.scratchFpMem = tempAt(src.NoXPos, s.curfn, types.Types[TUINT64]) + s.scratchFpMem = tempAt(src.NoXPos, s.curfn, types.Types[types.TUINT64]) } sort.Sort(byStackVar(fn.Dcl)) @@ -154,7 +155,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { // Reassign stack offsets of the locals that are used. lastHasPtr := false for i, n := range fn.Dcl { - if n.Op != ONAME || n.Class() != PAUTO { + if n.Op != ir.ONAME || n.Class() != ir.PAUTO { continue } if !n.Name.Used() { @@ -192,7 +193,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { s.stkptrsize = Rnd(s.stkptrsize, int64(Widthreg)) } -func funccompile(fn *Node) { +func funccompile(fn *ir.Node) { if Curfn != nil { base.Fatalf("funccompile %v inside %v", fn.Func.Nname.Sym, Curfn.Func.Nname.Sym) } @@ -209,21 +210,21 @@ func funccompile(fn *Node) { if fn.Nbody.Len() == 0 { // Initialize ABI wrappers if necessary. - fn.Func.initLSym(false) + initLSym(fn.Func, false) emitptrargsmap(fn) return } - dclcontext = PAUTO + dclcontext = ir.PAUTO Curfn = fn compile(fn) Curfn = nil - dclcontext = PEXTERN + dclcontext = ir.PEXTERN } -func compile(fn *Node) { +func compile(fn *ir.Node) { errorsBefore := base.Errors() order(fn) if base.Errors() > errorsBefore { @@ -233,7 +234,7 @@ func compile(fn *Node) { // Set up the function's LSym early to avoid data races with the assemblers. // Do this before walk, as walk needs the LSym to set attributes/relocations // (e.g. in markTypeUsedInInterface). - fn.Func.initLSym(true) + initLSym(fn.Func, true) walk(fn) if base.Errors() > errorsBefore { @@ -246,7 +247,7 @@ func compile(fn *Node) { // From this point, there should be no uses of Curfn. Enforce that. Curfn = nil - if fn.funcname() == "_" { + if ir.FuncName(fn) == "_" { // We don't need to generate code for this function, just report errors in its body. // At this point we've generated any errors needed. // (Beyond here we generate only non-spec errors, like "stack frame too large".) @@ -260,13 +261,13 @@ func compile(fn *Node) { // phase of the compiler. for _, n := range fn.Func.Dcl { switch n.Class() { - case PPARAM, PPARAMOUT, PAUTO: + case ir.PPARAM, ir.PPARAMOUT, ir.PAUTO: if livenessShouldTrack(n) && n.Name.Addrtaken() { dtypesym(n.Type) // Also make sure we allocate a linker symbol // for the stack object data, for the same reason. - if fn.Func.lsym.Func().StackObjects == nil { - fn.Func.lsym.Func().StackObjects = base.Ctxt.Lookup(fn.Func.lsym.Name + ".stkobj") + if fn.Func.LSym.Func().StackObjects == nil { + fn.Func.LSym.Func().StackObjects = base.Ctxt.Lookup(fn.Func.LSym.Name + ".stkobj") } } } @@ -283,13 +284,13 @@ func compile(fn *Node) { // If functions are not compiled immediately, // they are enqueued in compilequeue, // which is drained by compileFunctions. -func compilenow(fn *Node) bool { +func compilenow(fn *ir.Node) bool { // Issue 38068: if this function is a method AND an inline // candidate AND was not inlined (yet), put it onto the compile // queue instead of compiling it immediately. This is in case we // wind up inlining it into a method wrapper that is generated by // compiling a function later on in the xtop list. - if fn.IsMethod() && isInlinableButNotInlined(fn) { + if ir.IsMethod(fn) && isInlinableButNotInlined(fn) { return false } return base.Flag.LowerC == 1 && base.Debug.CompileLater == 0 @@ -298,7 +299,7 @@ func compilenow(fn *Node) bool { // isInlinableButNotInlined returns true if 'fn' was marked as an // inline candidate but then never inlined (presumably because we // found no call sites). -func isInlinableButNotInlined(fn *Node) bool { +func isInlinableButNotInlined(fn *ir.Node) bool { if fn.Func.Nname.Func.Inl == nil { return false } @@ -314,7 +315,7 @@ const maxStackSize = 1 << 30 // uses it to generate a plist, // and flushes that plist to machine code. // worker indicates which of the backend workers is doing the processing. -func compileSSA(fn *Node, worker int) { +func compileSSA(fn *ir.Node, worker int) { f := buildssa(fn, worker) // Note: check arg size to fix issue 25507. if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type.ArgWidth() >= maxStackSize { @@ -359,7 +360,7 @@ func compileFunctions() { sizeCalculationDisabled = true // not safe to calculate sizes concurrently if race.Enabled { // Randomize compilation order to try to shake out races. - tmp := make([]*Node, len(compilequeue)) + tmp := make([]*ir.Node, len(compilequeue)) perm := rand.Perm(len(compilequeue)) for i, v := range perm { tmp[v] = compilequeue[i] @@ -375,7 +376,7 @@ func compileFunctions() { } var wg sync.WaitGroup base.Ctxt.InParallel = true - c := make(chan *Node, base.Flag.LowerC) + c := make(chan *ir.Node, base.Flag.LowerC) for i := 0; i < base.Flag.LowerC; i++ { wg.Add(1) go func(worker int) { @@ -397,7 +398,7 @@ func compileFunctions() { } func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) { - fn := curfn.(*Node) + fn := curfn.(*ir.Node) if fn.Func.Nname != nil { if expect := fn.Func.Nname.Sym.Linksym(); fnsym != expect { base.Fatalf("unexpected fnsym: %v != %v", fnsym, expect) @@ -429,17 +430,17 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S // // These two adjustments keep toolstash -cmp working for now. // Deciding the right answer is, as they say, future work. - isODCLFUNC := fn.Op == ODCLFUNC + isODCLFUNC := fn.Op == ir.ODCLFUNC - var apdecls []*Node + var apdecls []*ir.Node // Populate decls for fn. if isODCLFUNC { for _, n := range fn.Func.Dcl { - if n.Op != ONAME { // might be OTYPE or OLITERAL + if n.Op != ir.ONAME { // might be OTYPE or OLITERAL continue } switch n.Class() { - case PAUTO: + case ir.PAUTO: if !n.Name.Used() { // Text == nil -> generating abstract function if fnsym.Func().Text != nil { @@ -447,7 +448,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S } continue } - case PPARAM, PPARAMOUT: + case ir.PPARAM, ir.PPARAMOUT: default: continue } @@ -474,7 +475,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S } fnsym.Func().Autot = nil - var varScopes []ScopeID + var varScopes []ir.ScopeID for _, decl := range decls { pos := declPos(decl) varScopes = append(varScopes, findScope(fn.Func.Marks, pos)) @@ -488,7 +489,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S return scopes, inlcalls } -func declPos(decl *Node) src.XPos { +func declPos(decl *ir.Node) src.XPos { if decl.Name.Defn != nil && (decl.Name.Captured() || decl.Name.Byval()) { // It's not clear which position is correct for captured variables here: // * decl.Pos is the wrong position for captured variables, in the inner @@ -511,10 +512,10 @@ func declPos(decl *Node) src.XPos { // createSimpleVars creates a DWARF entry for every variable declared in the // function, claiming that they are permanently on the stack. -func createSimpleVars(fnsym *obj.LSym, apDecls []*Node) ([]*Node, []*dwarf.Var, map[*Node]bool) { +func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Node) ([]*ir.Node, []*dwarf.Var, map[*ir.Node]bool) { var vars []*dwarf.Var - var decls []*Node - selected := make(map[*Node]bool) + var decls []*ir.Node + selected := make(map[*ir.Node]bool) for _, n := range apDecls { if n.IsAutoTmp() { continue @@ -527,12 +528,12 @@ func createSimpleVars(fnsym *obj.LSym, apDecls []*Node) ([]*Node, []*dwarf.Var, return decls, vars, selected } -func createSimpleVar(fnsym *obj.LSym, n *Node) *dwarf.Var { +func createSimpleVar(fnsym *obj.LSym, n *ir.Node) *dwarf.Var { var abbrev int offs := n.Xoffset switch n.Class() { - case PAUTO: + case ir.PAUTO: abbrev = dwarf.DW_ABRV_AUTO if base.Ctxt.FixedFrameSize() == 0 { offs -= int64(Widthptr) @@ -542,7 +543,7 @@ func createSimpleVar(fnsym *obj.LSym, n *Node) *dwarf.Var { offs -= int64(Widthptr) } - case PPARAM, PPARAMOUT: + case ir.PPARAM, ir.PPARAMOUT: abbrev = dwarf.DW_ABRV_PARAM offs += base.Ctxt.FixedFrameSize() default: @@ -563,7 +564,7 @@ func createSimpleVar(fnsym *obj.LSym, n *Node) *dwarf.Var { declpos := base.Ctxt.InnermostPos(declPos(n)) return &dwarf.Var{ Name: n.Sym.Name, - IsReturnValue: n.Class() == PPARAMOUT, + IsReturnValue: n.Class() == ir.PPARAMOUT, IsInlFormal: n.Name.InlFormal(), Abbrev: abbrev, StackOffset: int32(offs), @@ -578,19 +579,19 @@ func createSimpleVar(fnsym *obj.LSym, n *Node) *dwarf.Var { // createComplexVars creates recomposed DWARF vars with location lists, // suitable for describing optimized code. -func createComplexVars(fnsym *obj.LSym, fn *Func) ([]*Node, []*dwarf.Var, map[*Node]bool) { +func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Node, []*dwarf.Var, map[*ir.Node]bool) { debugInfo := fn.DebugInfo // Produce a DWARF variable entry for each user variable. - var decls []*Node + var decls []*ir.Node var vars []*dwarf.Var - ssaVars := make(map[*Node]bool) + ssaVars := make(map[*ir.Node]bool) for varID, dvar := range debugInfo.Vars { - n := dvar.(*Node) + n := dvar.(*ir.Node) ssaVars[n] = true for _, slot := range debugInfo.VarSlots[varID] { - ssaVars[debugInfo.Slots[slot].N.(*Node)] = true + ssaVars[debugInfo.Slots[slot].N.(*ir.Node)] = true } if dvar := createComplexVar(fnsym, fn, ssa.VarID(varID)); dvar != nil { @@ -604,11 +605,11 @@ func createComplexVars(fnsym *obj.LSym, fn *Func) ([]*Node, []*dwarf.Var, map[*N // createDwarfVars process fn, returning a list of DWARF variables and the // Nodes they represent. -func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *Func, apDecls []*Node) ([]*Node, []*dwarf.Var) { +func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir.Node) ([]*ir.Node, []*dwarf.Var) { // Collect a raw list of DWARF vars. var vars []*dwarf.Var - var decls []*Node - var selected map[*Node]bool + var decls []*ir.Node + var selected map[*ir.Node]bool if base.Ctxt.Flag_locationlists && base.Ctxt.Flag_optimize && fn.DebugInfo != nil && complexOK { decls, vars, selected = createComplexVars(fnsym, fn) } else { @@ -640,7 +641,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *Func, apDecls []*Node) if c == '.' || n.Type.IsUntyped() { continue } - if n.Class() == PPARAM && !canSSAType(n.Type) { + if n.Class() == ir.PPARAM && !canSSAType(n.Type) { // SSA-able args get location lists, and may move in and // out of registers, so those are handled elsewhere. // Autos and named output params seem to get handled @@ -655,10 +656,10 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *Func, apDecls []*Node) typename := dwarf.InfoPrefix + typesymname(n.Type) decls = append(decls, n) abbrev := dwarf.DW_ABRV_AUTO_LOCLIST - isReturnValue := (n.Class() == PPARAMOUT) - if n.Class() == PPARAM || n.Class() == PPARAMOUT { + isReturnValue := (n.Class() == ir.PPARAMOUT) + if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT { abbrev = dwarf.DW_ABRV_PARAM_LOCLIST - } else if n.Class() == PAUTOHEAP { + } else if n.Class() == ir.PAUTOHEAP { // If dcl in question has been promoted to heap, do a bit // of extra work to recover original class (auto or param); // see issue 30908. This insures that we get the proper @@ -667,9 +668,9 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *Func, apDecls []*Node) // and not stack). // TODO(thanm): generate a better location expression stackcopy := n.Name.Param.Stackcopy - if stackcopy != nil && (stackcopy.Class() == PPARAM || stackcopy.Class() == PPARAMOUT) { + if stackcopy != nil && (stackcopy.Class() == ir.PPARAM || stackcopy.Class() == ir.PPARAMOUT) { abbrev = dwarf.DW_ABRV_PARAM_LOCLIST - isReturnValue = (stackcopy.Class() == PPARAMOUT) + isReturnValue = (stackcopy.Class() == ir.PPARAMOUT) } } inlIndex := 0 @@ -707,9 +708,9 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *Func, apDecls []*Node) // function that is not local to the package being compiled, then the // names of the variables may have been "versioned" to avoid conflicts // with local vars; disregard this versioning when sorting. -func preInliningDcls(fnsym *obj.LSym) []*Node { - fn := base.Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*Node) - var rdcl []*Node +func preInliningDcls(fnsym *obj.LSym) []*ir.Node { + fn := base.Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*ir.Node) + var rdcl []*ir.Node for _, n := range fn.Func.Inl.Dcl { c := n.Sym.Name[0] // Avoid reporting "_" parameters, since if there are more than @@ -726,10 +727,10 @@ func preInliningDcls(fnsym *obj.LSym) []*Node { // stack pointer, suitable for use in a DWARF location entry. This has nothing // to do with its offset in the user variable. func stackOffset(slot ssa.LocalSlot) int32 { - n := slot.N.(*Node) + n := slot.N.(*ir.Node) var off int64 switch n.Class() { - case PAUTO: + case ir.PAUTO: if base.Ctxt.FixedFrameSize() == 0 { off -= int64(Widthptr) } @@ -737,22 +738,22 @@ func stackOffset(slot ssa.LocalSlot) int32 { // There is a word space for FP on ARM64 even if the frame pointer is disabled off -= int64(Widthptr) } - case PPARAM, PPARAMOUT: + case ir.PPARAM, ir.PPARAMOUT: off += base.Ctxt.FixedFrameSize() } return int32(off + n.Xoffset + slot.Off) } // createComplexVar builds a single DWARF variable entry and location list. -func createComplexVar(fnsym *obj.LSym, fn *Func, varID ssa.VarID) *dwarf.Var { +func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var { debug := fn.DebugInfo - n := debug.Vars[varID].(*Node) + n := debug.Vars[varID].(*ir.Node) var abbrev int switch n.Class() { - case PAUTO: + case ir.PAUTO: abbrev = dwarf.DW_ABRV_AUTO_LOCLIST - case PPARAM, PPARAMOUT: + case ir.PPARAM, ir.PPARAMOUT: abbrev = dwarf.DW_ABRV_PARAM_LOCLIST default: return nil @@ -773,7 +774,7 @@ func createComplexVar(fnsym *obj.LSym, fn *Func, varID ssa.VarID) *dwarf.Var { declpos := base.Ctxt.InnermostPos(n.Pos) dvar := &dwarf.Var{ Name: n.Sym.Name, - IsReturnValue: n.Class() == PPARAMOUT, + IsReturnValue: n.Class() == ir.PPARAMOUT, IsInlFormal: n.Name.InlFormal(), Abbrev: abbrev, Type: base.Ctxt.Lookup(typename), diff --git a/src/cmd/compile/internal/gc/pgen_test.go b/src/cmd/compile/internal/gc/pgen_test.go index 932ab47d02635..9f1f00d46a500 100644 --- a/src/cmd/compile/internal/gc/pgen_test.go +++ b/src/cmd/compile/internal/gc/pgen_test.go @@ -5,6 +5,7 @@ package gc import ( + "cmd/compile/internal/ir" "cmd/compile/internal/types" "reflect" "sort" @@ -12,133 +13,133 @@ import ( ) func typeWithoutPointers() *types.Type { - t := types.New(TSTRUCT) - f := &types.Field{Type: types.New(TINT)} + t := types.New(types.TSTRUCT) + f := &types.Field{Type: types.New(types.TINT)} t.SetFields([]*types.Field{f}) return t } func typeWithPointers() *types.Type { - t := types.New(TSTRUCT) - f := &types.Field{Type: types.NewPtr(types.New(TINT))} + t := types.New(types.TSTRUCT) + f := &types.Field{Type: types.NewPtr(types.New(types.TINT))} t.SetFields([]*types.Field{f}) return t } -func markUsed(n *Node) *Node { +func markUsed(n *ir.Node) *ir.Node { n.Name.SetUsed(true) return n } -func markNeedZero(n *Node) *Node { +func markNeedZero(n *ir.Node) *ir.Node { n.Name.SetNeedzero(true) return n } // Test all code paths for cmpstackvarlt. func TestCmpstackvar(t *testing.T) { - nod := func(xoffset int64, t *types.Type, s *types.Sym, cl Class) *Node { + nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) *ir.Node { if s == nil { s = &types.Sym{Name: "."} } - n := newname(s) + n := NewName(s) n.Type = t n.Xoffset = xoffset n.SetClass(cl) return n } testdata := []struct { - a, b *Node + a, b *ir.Node lt bool }{ { - nod(0, nil, nil, PAUTO), - nod(0, nil, nil, PFUNC), + nod(0, nil, nil, ir.PAUTO), + nod(0, nil, nil, ir.PFUNC), false, }, { - nod(0, nil, nil, PFUNC), - nod(0, nil, nil, PAUTO), + nod(0, nil, nil, ir.PFUNC), + nod(0, nil, nil, ir.PAUTO), true, }, { - nod(0, nil, nil, PFUNC), - nod(10, nil, nil, PFUNC), + nod(0, nil, nil, ir.PFUNC), + nod(10, nil, nil, ir.PFUNC), true, }, { - nod(20, nil, nil, PFUNC), - nod(10, nil, nil, PFUNC), + nod(20, nil, nil, ir.PFUNC), + nod(10, nil, nil, ir.PFUNC), false, }, { - nod(10, nil, nil, PFUNC), - nod(10, nil, nil, PFUNC), + nod(10, nil, nil, ir.PFUNC), + nod(10, nil, nil, ir.PFUNC), false, }, { - nod(10, nil, nil, PPARAM), - nod(20, nil, nil, PPARAMOUT), + nod(10, nil, nil, ir.PPARAM), + nod(20, nil, nil, ir.PPARAMOUT), true, }, { - nod(10, nil, nil, PPARAMOUT), - nod(20, nil, nil, PPARAM), + nod(10, nil, nil, ir.PPARAMOUT), + nod(20, nil, nil, ir.PPARAM), true, }, { - markUsed(nod(0, nil, nil, PAUTO)), - nod(0, nil, nil, PAUTO), + markUsed(nod(0, nil, nil, ir.PAUTO)), + nod(0, nil, nil, ir.PAUTO), true, }, { - nod(0, nil, nil, PAUTO), - markUsed(nod(0, nil, nil, PAUTO)), + nod(0, nil, nil, ir.PAUTO), + markUsed(nod(0, nil, nil, ir.PAUTO)), false, }, { - nod(0, typeWithoutPointers(), nil, PAUTO), - nod(0, typeWithPointers(), nil, PAUTO), + nod(0, typeWithoutPointers(), nil, ir.PAUTO), + nod(0, typeWithPointers(), nil, ir.PAUTO), false, }, { - nod(0, typeWithPointers(), nil, PAUTO), - nod(0, typeWithoutPointers(), nil, PAUTO), + nod(0, typeWithPointers(), nil, ir.PAUTO), + nod(0, typeWithoutPointers(), nil, ir.PAUTO), true, }, { - markNeedZero(nod(0, &types.Type{}, nil, PAUTO)), - nod(0, &types.Type{}, nil, PAUTO), + markNeedZero(nod(0, &types.Type{}, nil, ir.PAUTO)), + nod(0, &types.Type{}, nil, ir.PAUTO), true, }, { - nod(0, &types.Type{}, nil, PAUTO), - markNeedZero(nod(0, &types.Type{}, nil, PAUTO)), + nod(0, &types.Type{}, nil, ir.PAUTO), + markNeedZero(nod(0, &types.Type{}, nil, ir.PAUTO)), false, }, { - nod(0, &types.Type{Width: 1}, nil, PAUTO), - nod(0, &types.Type{Width: 2}, nil, PAUTO), + nod(0, &types.Type{Width: 1}, nil, ir.PAUTO), + nod(0, &types.Type{Width: 2}, nil, ir.PAUTO), false, }, { - nod(0, &types.Type{Width: 2}, nil, PAUTO), - nod(0, &types.Type{Width: 1}, nil, PAUTO), + nod(0, &types.Type{Width: 2}, nil, ir.PAUTO), + nod(0, &types.Type{Width: 1}, nil, ir.PAUTO), true, }, { - nod(0, &types.Type{}, &types.Sym{Name: "abc"}, PAUTO), - nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, PAUTO), + nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO), + nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, ir.PAUTO), true, }, { - nod(0, &types.Type{}, &types.Sym{Name: "abc"}, PAUTO), - nod(0, &types.Type{}, &types.Sym{Name: "abc"}, PAUTO), + nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO), + nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO), false, }, { - nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, PAUTO), - nod(0, &types.Type{}, &types.Sym{Name: "abc"}, PAUTO), + nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, ir.PAUTO), + nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO), false, }, } @@ -155,42 +156,42 @@ func TestCmpstackvar(t *testing.T) { } func TestStackvarSort(t *testing.T) { - nod := func(xoffset int64, t *types.Type, s *types.Sym, cl Class) *Node { - n := newname(s) + nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) *ir.Node { + n := NewName(s) n.Type = t n.Xoffset = xoffset n.SetClass(cl) return n } - inp := []*Node{ - nod(0, &types.Type{}, &types.Sym{}, PFUNC), - nod(0, &types.Type{}, &types.Sym{}, PAUTO), - nod(0, &types.Type{}, &types.Sym{}, PFUNC), - nod(10, &types.Type{}, &types.Sym{}, PFUNC), - nod(20, &types.Type{}, &types.Sym{}, PFUNC), - markUsed(nod(0, &types.Type{}, &types.Sym{}, PAUTO)), - nod(0, typeWithoutPointers(), &types.Sym{}, PAUTO), - nod(0, &types.Type{}, &types.Sym{}, PAUTO), - markNeedZero(nod(0, &types.Type{}, &types.Sym{}, PAUTO)), - nod(0, &types.Type{Width: 1}, &types.Sym{}, PAUTO), - nod(0, &types.Type{Width: 2}, &types.Sym{}, PAUTO), - nod(0, &types.Type{}, &types.Sym{Name: "abc"}, PAUTO), - nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, PAUTO), + inp := []*ir.Node{ + nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC), + nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO), + nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC), + nod(10, &types.Type{}, &types.Sym{}, ir.PFUNC), + nod(20, &types.Type{}, &types.Sym{}, ir.PFUNC), + markUsed(nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO)), + nod(0, typeWithoutPointers(), &types.Sym{}, ir.PAUTO), + nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO), + markNeedZero(nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO)), + nod(0, &types.Type{Width: 1}, &types.Sym{}, ir.PAUTO), + nod(0, &types.Type{Width: 2}, &types.Sym{}, ir.PAUTO), + nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO), + nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, ir.PAUTO), } - want := []*Node{ - nod(0, &types.Type{}, &types.Sym{}, PFUNC), - nod(0, &types.Type{}, &types.Sym{}, PFUNC), - nod(10, &types.Type{}, &types.Sym{}, PFUNC), - nod(20, &types.Type{}, &types.Sym{}, PFUNC), - markUsed(nod(0, &types.Type{}, &types.Sym{}, PAUTO)), - markNeedZero(nod(0, &types.Type{}, &types.Sym{}, PAUTO)), - nod(0, &types.Type{Width: 2}, &types.Sym{}, PAUTO), - nod(0, &types.Type{Width: 1}, &types.Sym{}, PAUTO), - nod(0, &types.Type{}, &types.Sym{}, PAUTO), - nod(0, &types.Type{}, &types.Sym{}, PAUTO), - nod(0, &types.Type{}, &types.Sym{Name: "abc"}, PAUTO), - nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, PAUTO), - nod(0, typeWithoutPointers(), &types.Sym{}, PAUTO), + want := []*ir.Node{ + nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC), + nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC), + nod(10, &types.Type{}, &types.Sym{}, ir.PFUNC), + nod(20, &types.Type{}, &types.Sym{}, ir.PFUNC), + markUsed(nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO)), + markNeedZero(nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO)), + nod(0, &types.Type{Width: 2}, &types.Sym{}, ir.PAUTO), + nod(0, &types.Type{Width: 1}, &types.Sym{}, ir.PAUTO), + nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO), + nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO), + nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO), + nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, ir.PAUTO), + nod(0, typeWithoutPointers(), &types.Sym{}, ir.PAUTO), } sort.Sort(byStackVar(inp)) if !reflect.DeepEqual(want, inp) { diff --git a/src/cmd/compile/internal/gc/phi.go b/src/cmd/compile/internal/gc/phi.go index 4beaa11a7e3f3..2a88d4a5b4314 100644 --- a/src/cmd/compile/internal/gc/phi.go +++ b/src/cmd/compile/internal/gc/phi.go @@ -5,6 +5,7 @@ package gc import ( + "cmd/compile/internal/ir" "cmd/compile/internal/ssa" "cmd/compile/internal/types" "cmd/internal/src" @@ -40,11 +41,11 @@ func (s *state) insertPhis() { } type phiState struct { - s *state // SSA state - f *ssa.Func // function to work on - defvars []map[*Node]*ssa.Value // defined variables at end of each block + s *state // SSA state + f *ssa.Func // function to work on + defvars []map[*ir.Node]*ssa.Value // defined variables at end of each block - varnum map[*Node]int32 // variable numbering + varnum map[*ir.Node]int32 // variable numbering // properties of the dominator tree idom []*ssa.Block // dominator parents @@ -70,15 +71,15 @@ func (s *phiState) insertPhis() { // Find all the variables for which we need to match up reads & writes. // This step prunes any basic-block-only variables from consideration. // Generate a numbering for these variables. - s.varnum = map[*Node]int32{} - var vars []*Node + s.varnum = map[*ir.Node]int32{} + var vars []*ir.Node var vartypes []*types.Type for _, b := range s.f.Blocks { for _, v := range b.Values { if v.Op != ssa.OpFwdRef { continue } - var_ := v.Aux.(*Node) + var_ := v.Aux.(*ir.Node) // Optimization: look back 1 block for the definition. if len(b.Preds) == 1 { @@ -183,7 +184,7 @@ levels: } } -func (s *phiState) insertVarPhis(n int, var_ *Node, defs []*ssa.Block, typ *types.Type) { +func (s *phiState) insertVarPhis(n int, var_ *ir.Node, defs []*ssa.Block, typ *types.Type) { priq := &s.priq q := s.q queued := s.queued @@ -318,7 +319,7 @@ func (s *phiState) resolveFwdRefs() { if v.Op != ssa.OpFwdRef { continue } - n := s.varnum[v.Aux.(*Node)] + n := s.varnum[v.Aux.(*ir.Node)] v.Op = ssa.OpCopy v.Aux = nil v.AddArg(values[n]) @@ -432,11 +433,11 @@ func (s *sparseSet) clear() { // Variant to use for small functions. type simplePhiState struct { - s *state // SSA state - f *ssa.Func // function to work on - fwdrefs []*ssa.Value // list of FwdRefs to be processed - defvars []map[*Node]*ssa.Value // defined variables at end of each block - reachable []bool // which blocks are reachable + s *state // SSA state + f *ssa.Func // function to work on + fwdrefs []*ssa.Value // list of FwdRefs to be processed + defvars []map[*ir.Node]*ssa.Value // defined variables at end of each block + reachable []bool // which blocks are reachable } func (s *simplePhiState) insertPhis() { @@ -449,7 +450,7 @@ func (s *simplePhiState) insertPhis() { continue } s.fwdrefs = append(s.fwdrefs, v) - var_ := v.Aux.(*Node) + var_ := v.Aux.(*ir.Node) if _, ok := s.defvars[b.ID][var_]; !ok { s.defvars[b.ID][var_] = v // treat FwdDefs as definitions. } @@ -463,7 +464,7 @@ loop: v := s.fwdrefs[len(s.fwdrefs)-1] s.fwdrefs = s.fwdrefs[:len(s.fwdrefs)-1] b := v.Block - var_ := v.Aux.(*Node) + var_ := v.Aux.(*ir.Node) if b == s.f.Entry { // No variable should be live at entry. s.s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, var_, v) @@ -511,7 +512,7 @@ loop: } // lookupVarOutgoing finds the variable's value at the end of block b. -func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t *types.Type, var_ *Node, line src.XPos) *ssa.Value { +func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t *types.Type, var_ *ir.Node, line src.XPos) *ssa.Value { for { if v := s.defvars[b.ID][var_]; v != nil { return v diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go index da2298480ad60..f0895884668ad 100644 --- a/src/cmd/compile/internal/gc/plive.go +++ b/src/cmd/compile/internal/gc/plive.go @@ -16,6 +16,7 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/ssa" "cmd/compile/internal/types" "cmd/internal/obj" @@ -100,10 +101,10 @@ type BlockEffects struct { // A collection of global state used by liveness analysis. type Liveness struct { - fn *Node + fn *ir.Node f *ssa.Func - vars []*Node - idx map[*Node]int32 + vars []*ir.Node + idx map[*ir.Node]int32 stkptrsize int64 be []BlockEffects @@ -205,20 +206,20 @@ type progeffectscache struct { // nor do we care about non-local variables, // nor do we care about empty structs (handled by the pointer check), // nor do we care about the fake PAUTOHEAP variables. -func livenessShouldTrack(n *Node) bool { - return n.Op == ONAME && (n.Class() == PAUTO || n.Class() == PPARAM || n.Class() == PPARAMOUT) && n.Type.HasPointers() +func livenessShouldTrack(n *ir.Node) bool { + return n.Op == ir.ONAME && (n.Class() == ir.PAUTO || n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Type.HasPointers() } // getvariables returns the list of on-stack variables that we need to track // and a map for looking up indices by *Node. -func getvariables(fn *Node) ([]*Node, map[*Node]int32) { - var vars []*Node +func getvariables(fn *ir.Node) ([]*ir.Node, map[*ir.Node]int32) { + var vars []*ir.Node for _, n := range fn.Func.Dcl { if livenessShouldTrack(n) { vars = append(vars, n) } } - idx := make(map[*Node]int32, len(vars)) + idx := make(map[*ir.Node]int32, len(vars)) for i, n := range vars { idx[n] = int32(i) } @@ -234,7 +235,7 @@ func (lv *Liveness) initcache() { for i, node := range lv.vars { switch node.Class() { - case PPARAM: + case ir.PPARAM: // A return instruction with a p.to is a tail return, which brings // the stack pointer back up (if it ever went down) and then jumps // to a new function entirely. That form of instruction must read @@ -243,7 +244,7 @@ func (lv *Liveness) initcache() { // function runs. lv.cache.tailuevar = append(lv.cache.tailuevar, int32(i)) - case PPARAMOUT: + case ir.PPARAMOUT: // All results are live at every return point. // Note that this point is after escaping return values // are copied back to the stack using their PAUTOHEAP references. @@ -271,7 +272,7 @@ const ( // If v does not affect any tracked variables, it returns -1, 0. func (lv *Liveness) valueEffects(v *ssa.Value) (int32, liveEffect) { n, e := affectedNode(v) - if e == 0 || n == nil || n.Op != ONAME { // cheapest checks first + if e == 0 || n == nil || n.Op != ir.ONAME { // cheapest checks first return -1, 0 } @@ -311,7 +312,7 @@ func (lv *Liveness) valueEffects(v *ssa.Value) (int32, liveEffect) { } // affectedNode returns the *Node affected by v -func affectedNode(v *ssa.Value) (*Node, ssa.SymEffect) { +func affectedNode(v *ssa.Value) (*ir.Node, ssa.SymEffect) { // Special cases. switch v.Op { case ssa.OpLoadReg: @@ -322,9 +323,9 @@ func affectedNode(v *ssa.Value) (*Node, ssa.SymEffect) { return n, ssa.SymWrite case ssa.OpVarLive: - return v.Aux.(*Node), ssa.SymRead + return v.Aux.(*ir.Node), ssa.SymRead case ssa.OpVarDef, ssa.OpVarKill: - return v.Aux.(*Node), ssa.SymWrite + return v.Aux.(*ir.Node), ssa.SymWrite case ssa.OpKeepAlive: n, _ := AutoVar(v.Args[0]) return n, ssa.SymRead @@ -339,7 +340,7 @@ func affectedNode(v *ssa.Value) (*Node, ssa.SymEffect) { case nil, *obj.LSym: // ok, but no node return nil, e - case *Node: + case *ir.Node: return a, e default: base.Fatalf("weird aux: %s", v.LongString()) @@ -355,7 +356,7 @@ type livenessFuncCache struct { // Constructs a new liveness structure used to hold the global state of the // liveness computation. The cfg argument is a slice of *BasicBlocks and the // vars argument is a slice of *Nodes. -func newliveness(fn *Node, f *ssa.Func, vars []*Node, idx map[*Node]int32, stkptrsize int64) *Liveness { +func newliveness(fn *ir.Node, f *ssa.Func, vars []*ir.Node, idx map[*ir.Node]int32, stkptrsize int64) *Liveness { lv := &Liveness{ fn: fn, f: f, @@ -416,20 +417,20 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) { } switch t.Etype { - case TPTR, TUNSAFEPTR, TFUNC, TCHAN, TMAP: + case types.TPTR, types.TUNSAFEPTR, types.TFUNC, types.TCHAN, types.TMAP: if off&int64(Widthptr-1) != 0 { base.Fatalf("onebitwalktype1: invalid alignment, %v", t) } bv.Set(int32(off / int64(Widthptr))) // pointer - case TSTRING: + case types.TSTRING: // struct { byte *str; intgo len; } if off&int64(Widthptr-1) != 0 { base.Fatalf("onebitwalktype1: invalid alignment, %v", t) } bv.Set(int32(off / int64(Widthptr))) //pointer in first slot - case TINTER: + case types.TINTER: // struct { Itab *tab; void *data; } // or, when isnilinter(t)==true: // struct { Type *type; void *data; } @@ -450,14 +451,14 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) { // well as scan itabs to update their itab._type fields). bv.Set(int32(off/int64(Widthptr) + 1)) // pointer in second slot - case TSLICE: + case types.TSLICE: // struct { byte *array; uintgo len; uintgo cap; } if off&int64(Widthptr-1) != 0 { base.Fatalf("onebitwalktype1: invalid TARRAY alignment, %v", t) } bv.Set(int32(off / int64(Widthptr))) // pointer in first slot (BitsPointer) - case TARRAY: + case types.TARRAY: elt := t.Elem() if elt.Width == 0 { // Short-circuit for #20739. @@ -468,7 +469,7 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) { off += elt.Width } - case TSTRUCT: + case types.TSTRUCT: for _, f := range t.Fields().Slice() { onebitwalktype1(f.Type, off+f.Offset, bv) } @@ -481,7 +482,7 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) { // Generates live pointer value maps for arguments and local variables. The // this argument and the in arguments are always assumed live. The vars // argument is a slice of *Nodes. -func (lv *Liveness) pointerMap(liveout bvec, vars []*Node, args, locals bvec) { +func (lv *Liveness) pointerMap(liveout bvec, vars []*ir.Node, args, locals bvec) { for i := int32(0); ; i++ { i = liveout.Next(i) if i < 0 { @@ -489,10 +490,10 @@ func (lv *Liveness) pointerMap(liveout bvec, vars []*Node, args, locals bvec) { } node := vars[i] switch node.Class() { - case PAUTO: + case ir.PAUTO: onebitwalktype1(node.Type, node.Xoffset+lv.stkptrsize, locals) - case PPARAM, PPARAMOUT: + case ir.PPARAM, ir.PPARAMOUT: onebitwalktype1(node.Type, node.Xoffset, args) } } @@ -789,7 +790,7 @@ func (lv *Liveness) epilogue() { // don't need to keep the stack copy live? if lv.fn.Func.HasDefer() { for i, n := range lv.vars { - if n.Class() == PPARAMOUT { + if n.Class() == ir.PPARAMOUT { if n.Name.IsOutputParamHeapAddr() { // Just to be paranoid. Heap addresses are PAUTOs. base.Fatalf("variable %v both output param and heap output param", n) @@ -887,7 +888,7 @@ func (lv *Liveness) epilogue() { if !liveout.Get(int32(i)) { continue } - if n.Class() == PPARAM { + if n.Class() == ir.PPARAM { continue // ok } base.Fatalf("bad live variable at entry of %v: %L", lv.fn.Func.Nname, n) @@ -920,7 +921,7 @@ func (lv *Liveness) epilogue() { // the only things that can possibly be live are the // input parameters. for j, n := range lv.vars { - if n.Class() != PPARAM && lv.stackMaps[0].Get(int32(j)) { + if n.Class() != ir.PPARAM && lv.stackMaps[0].Get(int32(j)) { lv.f.Fatalf("%v %L recorded as live on entry", lv.fn.Func.Nname, n) } } @@ -967,7 +968,7 @@ func (lv *Liveness) compact(b *ssa.Block) { } func (lv *Liveness) showlive(v *ssa.Value, live bvec) { - if base.Flag.Live == 0 || lv.fn.funcname() == "init" || strings.HasPrefix(lv.fn.funcname(), ".") { + if base.Flag.Live == 0 || ir.FuncName(lv.fn) == "init" || strings.HasPrefix(ir.FuncName(lv.fn), ".") { return } if !(v == nil || v.Op.IsCall()) { @@ -986,7 +987,7 @@ func (lv *Liveness) showlive(v *ssa.Value, live bvec) { s := "live at " if v == nil { - s += fmt.Sprintf("entry to %s:", lv.fn.funcname()) + s += fmt.Sprintf("entry to %s:", ir.FuncName(lv.fn)) } else if sym, ok := v.Aux.(*ssa.AuxCall); ok && sym.Fn != nil { fn := sym.Fn.Name if pos := strings.Index(fn, "."); pos >= 0 { @@ -1051,7 +1052,7 @@ func (lv *Liveness) printeffect(printed bool, name string, pos int32, x bool) bo // This format synthesizes the information used during the multiple passes // into a single presentation. func (lv *Liveness) printDebug() { - fmt.Printf("liveness: %s\n", lv.fn.funcname()) + fmt.Printf("liveness: %s\n", ir.FuncName(lv.fn)) for i, b := range lv.f.Blocks { if i > 0 { @@ -1163,10 +1164,10 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) { // Size args bitmaps to be just large enough to hold the largest pointer. // First, find the largest Xoffset node we care about. // (Nodes without pointers aren't in lv.vars; see livenessShouldTrack.) - var maxArgNode *Node + var maxArgNode *ir.Node for _, n := range lv.vars { switch n.Class() { - case PPARAM, PPARAMOUT: + case ir.PPARAM, ir.PPARAMOUT: if maxArgNode == nil || n.Xoffset > maxArgNode.Xoffset { maxArgNode = n } @@ -1265,7 +1266,7 @@ func liveness(e *ssafn, f *ssa.Func, pp *Progs) LivenessMap { } // Emit the live pointer map data structures - ls := e.curfn.Func.lsym + ls := e.curfn.Func.LSym fninfo := ls.Func() fninfo.GCArgs, fninfo.GCLocals = lv.emit() @@ -1300,16 +1301,16 @@ func liveness(e *ssafn, f *ssa.Func, pp *Progs) LivenessMap { func isfat(t *types.Type) bool { if t != nil { switch t.Etype { - case TSLICE, TSTRING, - TINTER: // maybe remove later + case types.TSLICE, types.TSTRING, + types.TINTER: // maybe remove later return true - case TARRAY: + case types.TARRAY: // Array of 1 element, check if element is fat if t.NumElem() == 1 { return isfat(t.Elem()) } return true - case TSTRUCT: + case types.TSTRUCT: // Struct with 1 field, check if field is fat if t.NumFields() == 1 { return isfat(t.Field(0).Type) diff --git a/src/cmd/compile/internal/gc/racewalk.go b/src/cmd/compile/internal/gc/racewalk.go index 20b4bc583b015..d92749589f709 100644 --- a/src/cmd/compile/internal/gc/racewalk.go +++ b/src/cmd/compile/internal/gc/racewalk.go @@ -6,6 +6,7 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/src" "cmd/internal/sys" @@ -59,8 +60,8 @@ func ispkgin(pkgs []string) bool { return false } -func instrument(fn *Node) { - if fn.Func.Pragma&Norace != 0 { +func instrument(fn *ir.Node) { + if fn.Func.Pragma&ir.Norace != 0 { return } @@ -82,8 +83,8 @@ func instrument(fn *Node) { // This only works for amd64. This will not // work on arm or others that might support // race in the future. - nodpc := nodfp.copy() - nodpc.Type = types.Types[TUINTPTR] + nodpc := ir.Copy(nodfp) + nodpc.Type = types.Types[types.TUINTPTR] nodpc.Xoffset = int64(-Widthptr) fn.Func.Dcl = append(fn.Func.Dcl, nodpc) fn.Func.Enter.Prepend(mkcall("racefuncenter", nil, nil, nodpc)) diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go index 568c5138ec30b..edaec21f920c5 100644 --- a/src/cmd/compile/internal/gc/range.go +++ b/src/cmd/compile/internal/gc/range.go @@ -6,13 +6,14 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/sys" "unicode/utf8" ) // range -func typecheckrange(n *Node) { +func typecheckrange(n *ir.Node) { // Typechecking order is important here: // 0. first typecheck range expression (slice/map/chan), // it is evaluated only once and so logically it is not part of the loop. @@ -38,7 +39,7 @@ func typecheckrange(n *Node) { decldepth-- } -func typecheckrangeExpr(n *Node) { +func typecheckrangeExpr(n *ir.Node) { n.Right = typecheck(n.Right, ctxExpr) t := n.Right.Type @@ -65,15 +66,15 @@ func typecheckrangeExpr(n *Node) { base.ErrorfAt(n.Pos, "cannot range over %L", n.Right) return - case TARRAY, TSLICE: - t1 = types.Types[TINT] + case types.TARRAY, types.TSLICE: + t1 = types.Types[types.TINT] t2 = t.Elem() - case TMAP: + case types.TMAP: t1 = t.Key() t2 = t.Elem() - case TCHAN: + case types.TCHAN: if !t.ChanDir().CanRecv() { base.ErrorfAt(n.Pos, "invalid operation: range %v (receive from send-only type %v)", n.Right, n.Right.Type) return @@ -85,8 +86,8 @@ func typecheckrangeExpr(n *Node) { toomany = true } - case TSTRING: - t1 = types.Types[TINT] + case types.TSTRING: + t1 = types.Types[types.TINT] t2 = types.Runetype } @@ -94,7 +95,7 @@ func typecheckrangeExpr(n *Node) { base.ErrorfAt(n.Pos, "too many variables in range") } - var v1, v2 *Node + var v1, v2 *ir.Node if n.List.Len() != 0 { v1 = n.List.First() } @@ -106,7 +107,7 @@ func typecheckrangeExpr(n *Node) { // "if the second iteration variable is the blank identifier, the range // clause is equivalent to the same clause with only the first variable // present." - if v2.isBlank() { + if ir.IsBlank(v2) { if v1 != nil { n.List.Set1(v1) } @@ -117,7 +118,7 @@ func typecheckrangeExpr(n *Node) { if v1.Name != nil && v1.Name.Defn == n { v1.Type = t1 } else if v1.Type != nil { - if op, why := assignop(t1, v1.Type); op == OXXX { + if op, why := assignop(t1, v1.Type); op == ir.OXXX { base.ErrorfAt(n.Pos, "cannot assign type %v to %L in range%s", t1, v1, why) } } @@ -128,7 +129,7 @@ func typecheckrangeExpr(n *Node) { if v2.Name != nil && v2.Name.Defn == n { v2.Type = t2 } else if v2.Type != nil { - if op, why := assignop(t2, v2.Type); op == OXXX { + if op, why := assignop(t2, v2.Type); op == ir.OXXX { base.ErrorfAt(n.Pos, "cannot assign type %v to %L in range%s", t2, v2, why) } } @@ -156,7 +157,7 @@ func cheapComputableIndex(width int64) bool { // simpler forms. The result must be assigned back to n. // Node n may also be modified in place, and may also be // the returned node. -func walkrange(n *Node) *Node { +func walkrange(n *ir.Node) *ir.Node { if isMapClear(n) { m := n.Right lno := setlineno(m) @@ -178,7 +179,7 @@ func walkrange(n *Node) *Node { lno := setlineno(a) n.Right = nil - var v1, v2 *Node + var v1, v2 *ir.Node l := n.List.Len() if l > 0 { v1 = n.List.First() @@ -188,11 +189,11 @@ func walkrange(n *Node) *Node { v2 = n.List.Second() } - if v2.isBlank() { + if ir.IsBlank(v2) { v2 = nil } - if v1.isBlank() && v2 == nil { + if ir.IsBlank(v1) && v2 == nil { v1 = nil } @@ -204,17 +205,17 @@ func walkrange(n *Node) *Node { // to avoid erroneous processing by racewalk. n.List.Set(nil) - var ifGuard *Node + var ifGuard *ir.Node - translatedLoopOp := OFOR + translatedLoopOp := ir.OFOR - var body []*Node - var init []*Node + var body []*ir.Node + var init []*ir.Node switch t.Etype { default: base.Fatalf("walkrange") - case TARRAY, TSLICE: + case types.TARRAY, types.TSLICE: if arrayClear(n, v1, v2, a) { base.Pos = lno return n @@ -223,14 +224,14 @@ func walkrange(n *Node) *Node { // order.stmt arranged for a copy of the array/slice variable if needed. ha := a - hv1 := temp(types.Types[TINT]) - hn := temp(types.Types[TINT]) + hv1 := temp(types.Types[types.TINT]) + hn := temp(types.Types[types.TINT]) - init = append(init, nod(OAS, hv1, nil)) - init = append(init, nod(OAS, hn, nod(OLEN, ha, nil))) + init = append(init, ir.Nod(ir.OAS, hv1, nil)) + init = append(init, ir.Nod(ir.OAS, hn, ir.Nod(ir.OLEN, ha, nil))) - n.Left = nod(OLT, hv1, hn) - n.Right = nod(OAS, hv1, nod(OADD, hv1, nodintconst(1))) + n.Left = ir.Nod(ir.OLT, hv1, hn) + n.Right = ir.Nod(ir.OAS, hv1, ir.Nod(ir.OADD, hv1, nodintconst(1))) // for range ha { body } if v1 == nil { @@ -239,21 +240,21 @@ func walkrange(n *Node) *Node { // for v1 := range ha { body } if v2 == nil { - body = []*Node{nod(OAS, v1, hv1)} + body = []*ir.Node{ir.Nod(ir.OAS, v1, hv1)} break } // for v1, v2 := range ha { body } if cheapComputableIndex(n.Type.Elem().Width) { // v1, v2 = hv1, ha[hv1] - tmp := nod(OINDEX, ha, hv1) + tmp := ir.Nod(ir.OINDEX, ha, hv1) tmp.SetBounded(true) // Use OAS2 to correctly handle assignments // of the form "v1, a[v1] := range". - a := nod(OAS2, nil, nil) + a := ir.Nod(ir.OAS2, nil, nil) a.List.Set2(v1, v2) a.Rlist.Set2(hv1, tmp) - body = []*Node{a} + body = []*ir.Node{a} break } @@ -269,20 +270,20 @@ func walkrange(n *Node) *Node { // TODO(austin): OFORUNTIL inhibits bounds-check // elimination on the index variable (see #20711). // Enhance the prove pass to understand this. - ifGuard = nod(OIF, nil, nil) - ifGuard.Left = nod(OLT, hv1, hn) - translatedLoopOp = OFORUNTIL + ifGuard = ir.Nod(ir.OIF, nil, nil) + ifGuard.Left = ir.Nod(ir.OLT, hv1, hn) + translatedLoopOp = ir.OFORUNTIL hp := temp(types.NewPtr(n.Type.Elem())) - tmp := nod(OINDEX, ha, nodintconst(0)) + tmp := ir.Nod(ir.OINDEX, ha, nodintconst(0)) tmp.SetBounded(true) - init = append(init, nod(OAS, hp, nod(OADDR, tmp, nil))) + init = append(init, ir.Nod(ir.OAS, hp, ir.Nod(ir.OADDR, tmp, nil))) // Use OAS2 to correctly handle assignments // of the form "v1, a[v1] := range". - a := nod(OAS2, nil, nil) + a := ir.Nod(ir.OAS2, nil, nil) a.List.Set2(v1, v2) - a.Rlist.Set2(hv1, nod(ODEREF, hp, nil)) + a.Rlist.Set2(hv1, ir.Nod(ir.ODEREF, hp, nil)) body = append(body, a) // Advance pointer as part of the late increment. @@ -290,11 +291,11 @@ func walkrange(n *Node) *Node { // This runs *after* the condition check, so we know // advancing the pointer is safe and won't go past the // end of the allocation. - a = nod(OAS, hp, addptr(hp, t.Elem().Width)) + a = ir.Nod(ir.OAS, hp, addptr(hp, t.Elem().Width)) a = typecheck(a, ctxStmt) n.List.Set1(a) - case TMAP: + case types.TMAP: // order.stmt allocated the iterator for us. // we only use a once, so no copy needed. ha := a @@ -308,29 +309,29 @@ func walkrange(n *Node) *Node { fn := syslook("mapiterinit") fn = substArgTypes(fn, t.Key(), t.Elem(), th) - init = append(init, mkcall1(fn, nil, nil, typename(t), ha, nod(OADDR, hit, nil))) - n.Left = nod(ONE, nodSym(ODOT, hit, keysym), nodnil()) + init = append(init, mkcall1(fn, nil, nil, typename(t), ha, ir.Nod(ir.OADDR, hit, nil))) + n.Left = ir.Nod(ir.ONE, nodSym(ir.ODOT, hit, keysym), nodnil()) fn = syslook("mapiternext") fn = substArgTypes(fn, th) - n.Right = mkcall1(fn, nil, nil, nod(OADDR, hit, nil)) + n.Right = mkcall1(fn, nil, nil, ir.Nod(ir.OADDR, hit, nil)) - key := nodSym(ODOT, hit, keysym) - key = nod(ODEREF, key, nil) + key := nodSym(ir.ODOT, hit, keysym) + key = ir.Nod(ir.ODEREF, key, nil) if v1 == nil { body = nil } else if v2 == nil { - body = []*Node{nod(OAS, v1, key)} + body = []*ir.Node{ir.Nod(ir.OAS, v1, key)} } else { - elem := nodSym(ODOT, hit, elemsym) - elem = nod(ODEREF, elem, nil) - a := nod(OAS2, nil, nil) + elem := nodSym(ir.ODOT, hit, elemsym) + elem = ir.Nod(ir.ODEREF, elem, nil) + a := ir.Nod(ir.OAS2, nil, nil) a.List.Set2(v1, v2) a.Rlist.Set2(key, elem) - body = []*Node{a} + body = []*ir.Node{a} } - case TCHAN: + case types.TCHAN: // order.stmt arranged for a copy of the channel variable. ha := a @@ -339,27 +340,27 @@ func walkrange(n *Node) *Node { hv1 := temp(t.Elem()) hv1.SetTypecheck(1) if t.Elem().HasPointers() { - init = append(init, nod(OAS, hv1, nil)) + init = append(init, ir.Nod(ir.OAS, hv1, nil)) } - hb := temp(types.Types[TBOOL]) + hb := temp(types.Types[types.TBOOL]) - n.Left = nod(ONE, hb, nodbool(false)) - a := nod(OAS2RECV, nil, nil) + n.Left = ir.Nod(ir.ONE, hb, nodbool(false)) + a := ir.Nod(ir.OAS2RECV, nil, nil) a.SetTypecheck(1) a.List.Set2(hv1, hb) - a.Right = nod(ORECV, ha, nil) + a.Right = ir.Nod(ir.ORECV, ha, nil) n.Left.Ninit.Set1(a) if v1 == nil { body = nil } else { - body = []*Node{nod(OAS, v1, hv1)} + body = []*ir.Node{ir.Nod(ir.OAS, v1, hv1)} } // Zero hv1. This prevents hv1 from being the sole, inaccessible // reference to an otherwise GC-able value during the next channel receive. // See issue 15281. - body = append(body, nod(OAS, hv1, nil)) + body = append(body, ir.Nod(ir.OAS, hv1, nil)) - case TSTRING: + case types.TSTRING: // Transform string range statements like "for v1, v2 = range a" into // // ha := a @@ -378,35 +379,35 @@ func walkrange(n *Node) *Node { // order.stmt arranged for a copy of the string variable. ha := a - hv1 := temp(types.Types[TINT]) - hv1t := temp(types.Types[TINT]) + hv1 := temp(types.Types[types.TINT]) + hv1t := temp(types.Types[types.TINT]) hv2 := temp(types.Runetype) // hv1 := 0 - init = append(init, nod(OAS, hv1, nil)) + init = append(init, ir.Nod(ir.OAS, hv1, nil)) // hv1 < len(ha) - n.Left = nod(OLT, hv1, nod(OLEN, ha, nil)) + n.Left = ir.Nod(ir.OLT, hv1, ir.Nod(ir.OLEN, ha, nil)) if v1 != nil { // hv1t = hv1 - body = append(body, nod(OAS, hv1t, hv1)) + body = append(body, ir.Nod(ir.OAS, hv1t, hv1)) } // hv2 := rune(ha[hv1]) - nind := nod(OINDEX, ha, hv1) + nind := ir.Nod(ir.OINDEX, ha, hv1) nind.SetBounded(true) - body = append(body, nod(OAS, hv2, conv(nind, types.Runetype))) + body = append(body, ir.Nod(ir.OAS, hv2, conv(nind, types.Runetype))) // if hv2 < utf8.RuneSelf - nif := nod(OIF, nil, nil) - nif.Left = nod(OLT, hv2, nodintconst(utf8.RuneSelf)) + nif := ir.Nod(ir.OIF, nil, nil) + nif.Left = ir.Nod(ir.OLT, hv2, nodintconst(utf8.RuneSelf)) // hv1++ - nif.Nbody.Set1(nod(OAS, hv1, nod(OADD, hv1, nodintconst(1)))) + nif.Nbody.Set1(ir.Nod(ir.OAS, hv1, ir.Nod(ir.OADD, hv1, nodintconst(1)))) // } else { - eif := nod(OAS2, nil, nil) + eif := ir.Nod(ir.OAS2, nil, nil) nif.Rlist.Set1(eif) // hv2, hv1 = decoderune(ha, hv1) @@ -419,13 +420,13 @@ func walkrange(n *Node) *Node { if v1 != nil { if v2 != nil { // v1, v2 = hv1t, hv2 - a := nod(OAS2, nil, nil) + a := ir.Nod(ir.OAS2, nil, nil) a.List.Set2(v1, v2) a.Rlist.Set2(hv1t, hv2) body = append(body, a) } else { // v1 = hv1t - body = append(body, nod(OAS, v1, hv1t)) + body = append(body, ir.Nod(ir.OAS, v1, hv1t)) } } } @@ -466,17 +467,17 @@ func walkrange(n *Node) *Node { // } // // where == for keys of map m is reflexive. -func isMapClear(n *Node) bool { +func isMapClear(n *ir.Node) bool { if base.Flag.N != 0 || instrumenting { return false } - if n.Op != ORANGE || n.Type.Etype != TMAP || n.List.Len() != 1 { + if n.Op != ir.ORANGE || n.Type.Etype != types.TMAP || n.List.Len() != 1 { return false } k := n.List.First() - if k == nil || k.isBlank() { + if k == nil || ir.IsBlank(k) { return false } @@ -490,7 +491,7 @@ func isMapClear(n *Node) bool { } stmt := n.Nbody.First() // only stmt in body - if stmt == nil || stmt.Op != ODELETE { + if stmt == nil || stmt.Op != ir.ODELETE { return false } @@ -508,7 +509,7 @@ func isMapClear(n *Node) bool { } // mapClear constructs a call to runtime.mapclear for the map m. -func mapClear(m *Node) *Node { +func mapClear(m *ir.Node) *ir.Node { t := m.Type // instantiate mapclear(typ *type, hmap map[any]any) @@ -533,7 +534,7 @@ func mapClear(m *Node) *Node { // in which the evaluation of a is side-effect-free. // // Parameters are as in walkrange: "for v1, v2 = range a". -func arrayClear(n, v1, v2, a *Node) bool { +func arrayClear(n, v1, v2, a *ir.Node) bool { if base.Flag.N != 0 || instrumenting { return false } @@ -547,7 +548,7 @@ func arrayClear(n, v1, v2, a *Node) bool { } stmt := n.Nbody.First() // only stmt in body - if stmt.Op != OAS || stmt.Left.Op != OINDEX { + if stmt.Op != ir.OAS || stmt.Left.Op != ir.OINDEX { return false } @@ -567,32 +568,32 @@ func arrayClear(n, v1, v2, a *Node) bool { // memclr{NoHeap,Has}Pointers(hp, hn) // i = len(a) - 1 // } - n.Op = OIF + n.Op = ir.OIF n.Nbody.Set(nil) - n.Left = nod(ONE, nod(OLEN, a, nil), nodintconst(0)) + n.Left = ir.Nod(ir.ONE, ir.Nod(ir.OLEN, a, nil), nodintconst(0)) // hp = &a[0] - hp := temp(types.Types[TUNSAFEPTR]) + hp := temp(types.Types[types.TUNSAFEPTR]) - tmp := nod(OINDEX, a, nodintconst(0)) + tmp := ir.Nod(ir.OINDEX, a, nodintconst(0)) tmp.SetBounded(true) - tmp = nod(OADDR, tmp, nil) - tmp = convnop(tmp, types.Types[TUNSAFEPTR]) - n.Nbody.Append(nod(OAS, hp, tmp)) + tmp = ir.Nod(ir.OADDR, tmp, nil) + tmp = convnop(tmp, types.Types[types.TUNSAFEPTR]) + n.Nbody.Append(ir.Nod(ir.OAS, hp, tmp)) // hn = len(a) * sizeof(elem(a)) - hn := temp(types.Types[TUINTPTR]) + hn := temp(types.Types[types.TUINTPTR]) - tmp = nod(OLEN, a, nil) - tmp = nod(OMUL, tmp, nodintconst(elemsize)) - tmp = conv(tmp, types.Types[TUINTPTR]) - n.Nbody.Append(nod(OAS, hn, tmp)) + tmp = ir.Nod(ir.OLEN, a, nil) + tmp = ir.Nod(ir.OMUL, tmp, nodintconst(elemsize)) + tmp = conv(tmp, types.Types[types.TUINTPTR]) + n.Nbody.Append(ir.Nod(ir.OAS, hn, tmp)) - var fn *Node + var fn *ir.Node if a.Type.Elem().HasPointers() { // memclrHasPointers(hp, hn) - Curfn.Func.setWBPos(stmt.Pos) + Curfn.Func.SetWBPos(stmt.Pos) fn = mkcall("memclrHasPointers", nil, nil, hp, hn) } else { // memclrNoHeapPointers(hp, hn) @@ -602,7 +603,7 @@ func arrayClear(n, v1, v2, a *Node) bool { n.Nbody.Append(fn) // i = len(a) - 1 - v1 = nod(OAS, v1, nod(OSUB, nod(OLEN, a, nil), nodintconst(1))) + v1 = ir.Nod(ir.OAS, v1, ir.Nod(ir.OSUB, ir.Nod(ir.OLEN, a, nil), nodintconst(1))) n.Nbody.Append(v1) @@ -614,15 +615,15 @@ func arrayClear(n, v1, v2, a *Node) bool { } // addptr returns (*T)(uintptr(p) + n). -func addptr(p *Node, n int64) *Node { +func addptr(p *ir.Node, n int64) *ir.Node { t := p.Type - p = nod(OCONVNOP, p, nil) - p.Type = types.Types[TUINTPTR] + p = ir.Nod(ir.OCONVNOP, p, nil) + p.Type = types.Types[types.TUINTPTR] - p = nod(OADD, p, nodintconst(n)) + p = ir.Nod(ir.OADD, p, nodintconst(n)) - p = nod(OCONVNOP, p, nil) + p = ir.Nod(ir.OCONVNOP, p, nil) p.Type = t return p diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 456903e7d75f3..34047bfefa6ff 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -6,6 +6,7 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/gcprog" "cmd/internal/obj" @@ -84,7 +85,7 @@ func bmap(t *types.Type) *types.Type { return t.MapType().Bucket } - bucket := types.New(TSTRUCT) + bucket := types.New(types.TSTRUCT) keytype := t.Key() elemtype := t.Elem() dowidth(keytype) @@ -99,7 +100,7 @@ func bmap(t *types.Type) *types.Type { field := make([]*types.Field, 0, 5) // The first field is: uint8 topbits[BUCKETSIZE]. - arr := types.NewArray(types.Types[TUINT8], BUCKETSIZE) + arr := types.NewArray(types.Types[types.TUINT8], BUCKETSIZE) field = append(field, makefield("topbits", arr)) arr = types.NewArray(keytype, BUCKETSIZE) @@ -120,7 +121,7 @@ func bmap(t *types.Type) *types.Type { // See comment on hmap.overflow in runtime/map.go. otyp := types.NewPtr(bucket) if !elemtype.HasPointers() && !keytype.HasPointers() { - otyp = types.Types[TUINTPTR] + otyp = types.Types[types.TUINTPTR] } overflow := makefield("overflow", otyp) field = append(field, overflow) @@ -209,18 +210,18 @@ func hmap(t *types.Type) *types.Type { // } // must match runtime/map.go:hmap. fields := []*types.Field{ - makefield("count", types.Types[TINT]), - makefield("flags", types.Types[TUINT8]), - makefield("B", types.Types[TUINT8]), - makefield("noverflow", types.Types[TUINT16]), - makefield("hash0", types.Types[TUINT32]), // Used in walk.go for OMAKEMAP. - makefield("buckets", types.NewPtr(bmap)), // Used in walk.go for OMAKEMAP. + makefield("count", types.Types[types.TINT]), + makefield("flags", types.Types[types.TUINT8]), + makefield("B", types.Types[types.TUINT8]), + makefield("noverflow", types.Types[types.TUINT16]), + makefield("hash0", types.Types[types.TUINT32]), // Used in walk.go for OMAKEMAP. + makefield("buckets", types.NewPtr(bmap)), // Used in walk.go for OMAKEMAP. makefield("oldbuckets", types.NewPtr(bmap)), - makefield("nevacuate", types.Types[TUINTPTR]), - makefield("extra", types.Types[TUNSAFEPTR]), + makefield("nevacuate", types.Types[types.TUINTPTR]), + makefield("extra", types.Types[types.TUNSAFEPTR]), } - hmap := types.New(TSTRUCT) + hmap := types.New(types.TSTRUCT) hmap.SetNoalg(true) hmap.SetFields(fields) dowidth(hmap) @@ -268,23 +269,23 @@ func hiter(t *types.Type) *types.Type { fields := []*types.Field{ makefield("key", types.NewPtr(t.Key())), // Used in range.go for TMAP. makefield("elem", types.NewPtr(t.Elem())), // Used in range.go for TMAP. - makefield("t", types.Types[TUNSAFEPTR]), + makefield("t", types.Types[types.TUNSAFEPTR]), makefield("h", types.NewPtr(hmap)), makefield("buckets", types.NewPtr(bmap)), makefield("bptr", types.NewPtr(bmap)), - makefield("overflow", types.Types[TUNSAFEPTR]), - makefield("oldoverflow", types.Types[TUNSAFEPTR]), - makefield("startBucket", types.Types[TUINTPTR]), - makefield("offset", types.Types[TUINT8]), - makefield("wrapped", types.Types[TBOOL]), - makefield("B", types.Types[TUINT8]), - makefield("i", types.Types[TUINT8]), - makefield("bucket", types.Types[TUINTPTR]), - makefield("checkBucket", types.Types[TUINTPTR]), + makefield("overflow", types.Types[types.TUNSAFEPTR]), + makefield("oldoverflow", types.Types[types.TUNSAFEPTR]), + makefield("startBucket", types.Types[types.TUINTPTR]), + makefield("offset", types.Types[types.TUINT8]), + makefield("wrapped", types.Types[types.TBOOL]), + makefield("B", types.Types[types.TUINT8]), + makefield("i", types.Types[types.TUINT8]), + makefield("bucket", types.Types[types.TUINTPTR]), + makefield("checkBucket", types.Types[types.TUINTPTR]), } // build iterator struct holding the above fields - hiter := types.New(TSTRUCT) + hiter := types.New(types.TSTRUCT) hiter.SetNoalg(true) hiter.SetFields(fields) dowidth(hiter) @@ -303,35 +304,35 @@ func deferstruct(stksize int64) *types.Type { // Unlike the global makefield function, this one needs to set Pkg // because these types might be compared (in SSA CSE sorting). // TODO: unify this makefield and the global one above. - sym := &types.Sym{Name: name, Pkg: localpkg} + sym := &types.Sym{Name: name, Pkg: ir.LocalPkg} return types.NewField(src.NoXPos, sym, typ) } - argtype := types.NewArray(types.Types[TUINT8], stksize) + argtype := types.NewArray(types.Types[types.TUINT8], stksize) argtype.Width = stksize argtype.Align = 1 // These fields must match the ones in runtime/runtime2.go:_defer and // cmd/compile/internal/gc/ssa.go:(*state).call. fields := []*types.Field{ - makefield("siz", types.Types[TUINT32]), - makefield("started", types.Types[TBOOL]), - makefield("heap", types.Types[TBOOL]), - makefield("openDefer", types.Types[TBOOL]), - makefield("sp", types.Types[TUINTPTR]), - makefield("pc", types.Types[TUINTPTR]), + makefield("siz", types.Types[types.TUINT32]), + makefield("started", types.Types[types.TBOOL]), + makefield("heap", types.Types[types.TBOOL]), + makefield("openDefer", types.Types[types.TBOOL]), + makefield("sp", types.Types[types.TUINTPTR]), + makefield("pc", types.Types[types.TUINTPTR]), // Note: the types here don't really matter. Defer structures // are always scanned explicitly during stack copying and GC, // so we make them uintptr type even though they are real pointers. - makefield("fn", types.Types[TUINTPTR]), - makefield("_panic", types.Types[TUINTPTR]), - makefield("link", types.Types[TUINTPTR]), - makefield("framepc", types.Types[TUINTPTR]), - makefield("varp", types.Types[TUINTPTR]), - makefield("fd", types.Types[TUINTPTR]), + makefield("fn", types.Types[types.TUINTPTR]), + makefield("_panic", types.Types[types.TUINTPTR]), + makefield("link", types.Types[types.TUINTPTR]), + makefield("framepc", types.Types[types.TUINTPTR]), + makefield("varp", types.Types[types.TUINTPTR]), + makefield("fd", types.Types[types.TUINTPTR]), makefield("args", argtype), } // build struct holding the above fields - s := types.New(TSTRUCT) + s := types.New(types.TSTRUCT) s.SetNoalg(true) s.SetFields(fields) s.Width = widstruct(s, s, 0, 1) @@ -346,7 +347,7 @@ func methodfunc(f *types.Type, receiver *types.Type) *types.Type { if receiver != nil { inLen++ } - in := make([]*Node, 0, inLen) + in := make([]*ir.Node, 0, inLen) if receiver != nil { d := anonfield(receiver) @@ -360,7 +361,7 @@ func methodfunc(f *types.Type, receiver *types.Type) *types.Type { } outLen := f.Results().Fields().Len() - out := make([]*Node, 0, outLen) + out := make([]*ir.Node, 0, outLen) for _, t := range f.Results().Fields().Slice() { d := anonfield(t.Type) out = append(out, d) @@ -447,7 +448,7 @@ func methods(t *types.Type) []*Sig { func imethods(t *types.Type) []*Sig { var methods []*Sig for _, f := range t.Fields().Slice() { - if f.Type.Etype != TFUNC || f.Sym == nil { + if f.Type.Etype != types.TFUNC || f.Sym == nil { continue } if f.Sym.IsBlank() { @@ -494,7 +495,7 @@ func dimportpath(p *types.Pkg) { } str := p.Path - if p == localpkg { + if p == ir.LocalPkg { // Note: myimportpath != "", or else dgopkgpath won't call dimportpath. str = base.Ctxt.Pkgpath } @@ -511,7 +512,7 @@ func dgopkgpath(s *obj.LSym, ot int, pkg *types.Pkg) int { return duintptr(s, ot, 0) } - if pkg == localpkg && base.Ctxt.Pkgpath == "" { + if pkg == ir.LocalPkg && base.Ctxt.Pkgpath == "" { // If we don't know the full import path of the package being compiled // (i.e. -p was not passed on the compiler command line), emit a reference to // type..importpath.""., which the linker will rewrite using the correct import path. @@ -530,7 +531,7 @@ func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int { if pkg == nil { return duint32(s, ot, 0) } - if pkg == localpkg && base.Ctxt.Pkgpath == "" { + if pkg == ir.LocalPkg && base.Ctxt.Pkgpath == "" { // If we don't know the full import path of the package being compiled // (i.e. -p was not passed on the compiler command line), emit a reference to // type..importpath.""., which the linker will rewrite using the correct import path. @@ -674,7 +675,7 @@ func typePkg(t *types.Type) *types.Pkg { tsym := t.Sym if tsym == nil { switch t.Etype { - case TARRAY, TSLICE, TPTR, TCHAN: + case types.TARRAY, types.TSLICE, types.TPTR, types.TCHAN: if t.Elem() != nil { tsym = t.Elem().Sym } @@ -717,32 +718,32 @@ func dmethodptrOff(s *obj.LSym, ot int, x *obj.LSym) int { } var kinds = []int{ - TINT: objabi.KindInt, - TUINT: objabi.KindUint, - TINT8: objabi.KindInt8, - TUINT8: objabi.KindUint8, - TINT16: objabi.KindInt16, - TUINT16: objabi.KindUint16, - TINT32: objabi.KindInt32, - TUINT32: objabi.KindUint32, - TINT64: objabi.KindInt64, - TUINT64: objabi.KindUint64, - TUINTPTR: objabi.KindUintptr, - TFLOAT32: objabi.KindFloat32, - TFLOAT64: objabi.KindFloat64, - TBOOL: objabi.KindBool, - TSTRING: objabi.KindString, - TPTR: objabi.KindPtr, - TSTRUCT: objabi.KindStruct, - TINTER: objabi.KindInterface, - TCHAN: objabi.KindChan, - TMAP: objabi.KindMap, - TARRAY: objabi.KindArray, - TSLICE: objabi.KindSlice, - TFUNC: objabi.KindFunc, - TCOMPLEX64: objabi.KindComplex64, - TCOMPLEX128: objabi.KindComplex128, - TUNSAFEPTR: objabi.KindUnsafePointer, + types.TINT: objabi.KindInt, + types.TUINT: objabi.KindUint, + types.TINT8: objabi.KindInt8, + types.TUINT8: objabi.KindUint8, + types.TINT16: objabi.KindInt16, + types.TUINT16: objabi.KindUint16, + types.TINT32: objabi.KindInt32, + types.TUINT32: objabi.KindUint32, + types.TINT64: objabi.KindInt64, + types.TUINT64: objabi.KindUint64, + types.TUINTPTR: objabi.KindUintptr, + types.TFLOAT32: objabi.KindFloat32, + types.TFLOAT64: objabi.KindFloat64, + types.TBOOL: objabi.KindBool, + types.TSTRING: objabi.KindString, + types.TPTR: objabi.KindPtr, + types.TSTRUCT: objabi.KindStruct, + types.TINTER: objabi.KindInterface, + types.TCHAN: objabi.KindChan, + types.TMAP: objabi.KindMap, + types.TARRAY: objabi.KindArray, + types.TSLICE: objabi.KindSlice, + types.TFUNC: objabi.KindFunc, + types.TCOMPLEX64: objabi.KindComplex64, + types.TCOMPLEX128: objabi.KindComplex128, + types.TUNSAFEPTR: objabi.KindUnsafePointer, } // typeptrdata returns the length in bytes of the prefix of t @@ -753,32 +754,32 @@ func typeptrdata(t *types.Type) int64 { } switch t.Etype { - case TPTR, - TUNSAFEPTR, - TFUNC, - TCHAN, - TMAP: + case types.TPTR, + types.TUNSAFEPTR, + types.TFUNC, + types.TCHAN, + types.TMAP: return int64(Widthptr) - case TSTRING: + case types.TSTRING: // struct { byte *str; intgo len; } return int64(Widthptr) - case TINTER: + case types.TINTER: // struct { Itab *tab; void *data; } or // struct { Type *type; void *data; } // Note: see comment in plive.go:onebitwalktype1. return 2 * int64(Widthptr) - case TSLICE: + case types.TSLICE: // struct { byte *array; uintgo len; uintgo cap; } return int64(Widthptr) - case TARRAY: + case types.TARRAY: // haspointers already eliminated t.NumElem() == 0. return (t.NumElem()-1)*t.Elem().Width + typeptrdata(t.Elem()) - case TSTRUCT: + case types.TSTRUCT: // Find the last field that has pointers. var lastPtrField *types.Field for _, t1 := range t.Fields().Slice() { @@ -989,38 +990,38 @@ func typenamesym(t *types.Type) *types.Sym { return s } -func typename(t *types.Type) *Node { +func typename(t *types.Type) *ir.Node { s := typenamesym(t) if s.Def == nil { - n := newnamel(src.NoXPos, s) - n.Type = types.Types[TUINT8] - n.SetClass(PEXTERN) + n := ir.NewNameAt(src.NoXPos, s) + n.Type = types.Types[types.TUINT8] + n.SetClass(ir.PEXTERN) n.SetTypecheck(1) - s.Def = asTypesNode(n) + s.Def = ir.AsTypesNode(n) } - n := nod(OADDR, asNode(s.Def), nil) - n.Type = types.NewPtr(asNode(s.Def).Type) + n := ir.Nod(ir.OADDR, ir.AsNode(s.Def), nil) + n.Type = types.NewPtr(ir.AsNode(s.Def).Type) n.SetTypecheck(1) return n } -func itabname(t, itype *types.Type) *Node { +func itabname(t, itype *types.Type) *ir.Node { if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() { base.Fatalf("itabname(%v, %v)", t, itype) } s := itabpkg.Lookup(t.ShortString() + "," + itype.ShortString()) if s.Def == nil { - n := newname(s) - n.Type = types.Types[TUINT8] - n.SetClass(PEXTERN) + n := NewName(s) + n.Type = types.Types[types.TUINT8] + n.SetClass(ir.PEXTERN) n.SetTypecheck(1) - s.Def = asTypesNode(n) + s.Def = ir.AsTypesNode(n) itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: s.Linksym()}) } - n := nod(OADDR, asNode(s.Def), nil) - n.Type = types.NewPtr(asNode(s.Def).Type) + n := ir.Nod(ir.OADDR, ir.AsNode(s.Def), nil) + n.Type = types.NewPtr(ir.AsNode(s.Def).Type) n.SetTypecheck(1) return n } @@ -1029,35 +1030,35 @@ func itabname(t, itype *types.Type) *Node { // That is, if x==x for all x of type t. func isreflexive(t *types.Type) bool { switch t.Etype { - case TBOOL, - TINT, - TUINT, - TINT8, - TUINT8, - TINT16, - TUINT16, - TINT32, - TUINT32, - TINT64, - TUINT64, - TUINTPTR, - TPTR, - TUNSAFEPTR, - TSTRING, - TCHAN: + case types.TBOOL, + types.TINT, + types.TUINT, + types.TINT8, + types.TUINT8, + types.TINT16, + types.TUINT16, + types.TINT32, + types.TUINT32, + types.TINT64, + types.TUINT64, + types.TUINTPTR, + types.TPTR, + types.TUNSAFEPTR, + types.TSTRING, + types.TCHAN: return true - case TFLOAT32, - TFLOAT64, - TCOMPLEX64, - TCOMPLEX128, - TINTER: + case types.TFLOAT32, + types.TFLOAT64, + types.TCOMPLEX64, + types.TCOMPLEX128, + types.TINTER: return false - case TARRAY: + case types.TARRAY: return isreflexive(t.Elem()) - case TSTRUCT: + case types.TSTRUCT: for _, t1 := range t.Fields().Slice() { if !isreflexive(t1.Type) { return false @@ -1075,19 +1076,19 @@ func isreflexive(t *types.Type) bool { // need the key to be updated. func needkeyupdate(t *types.Type) bool { switch t.Etype { - case TBOOL, TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, - TINT64, TUINT64, TUINTPTR, TPTR, TUNSAFEPTR, TCHAN: + case types.TBOOL, types.TINT, types.TUINT, types.TINT8, types.TUINT8, types.TINT16, types.TUINT16, types.TINT32, types.TUINT32, + types.TINT64, types.TUINT64, types.TUINTPTR, types.TPTR, types.TUNSAFEPTR, types.TCHAN: return false - case TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, // floats and complex can be +0/-0 - TINTER, - TSTRING: // strings might have smaller backing stores + case types.TFLOAT32, types.TFLOAT64, types.TCOMPLEX64, types.TCOMPLEX128, // floats and complex can be +0/-0 + types.TINTER, + types.TSTRING: // strings might have smaller backing stores return true - case TARRAY: + case types.TARRAY: return needkeyupdate(t.Elem()) - case TSTRUCT: + case types.TSTRUCT: for _, t1 := range t.Fields().Slice() { if needkeyupdate(t1.Type) { return true @@ -1104,13 +1105,13 @@ func needkeyupdate(t *types.Type) bool { // hashMightPanic reports whether the hash of a map key of type t might panic. func hashMightPanic(t *types.Type) bool { switch t.Etype { - case TINTER: + case types.TINTER: return true - case TARRAY: + case types.TARRAY: return hashMightPanic(t.Elem()) - case TSTRUCT: + case types.TSTRUCT: for _, t1 := range t.Fields().Slice() { if hashMightPanic(t1.Type) { return true @@ -1161,7 +1162,7 @@ func dtypesym(t *types.Type) *obj.LSym { if base.Ctxt.Pkgpath != "runtime" || (tbase != types.Types[tbase.Etype] && tbase != types.Bytetype && tbase != types.Runetype && tbase != types.Errortype) { // int, float, etc // named types from other files are defined only by those files - if tbase.Sym != nil && tbase.Sym.Pkg != localpkg { + if tbase.Sym != nil && tbase.Sym.Pkg != ir.LocalPkg { if i, ok := typeSymIdx[tbase]; ok { lsym.Pkg = tbase.Sym.Pkg.Prefix if t != tbase { @@ -1174,7 +1175,7 @@ func dtypesym(t *types.Type) *obj.LSym { return lsym } // TODO(mdempsky): Investigate whether this can happen. - if tbase.Etype == TFORW { + if tbase.Etype == types.TFORW { return lsym } } @@ -1185,7 +1186,7 @@ func dtypesym(t *types.Type) *obj.LSym { ot = dcommontype(lsym, t) ot = dextratype(lsym, ot, t, 0) - case TARRAY: + case types.TARRAY: // ../../../../runtime/type.go:/arrayType s1 := dtypesym(t.Elem()) t2 := types.NewSlice(t.Elem()) @@ -1196,14 +1197,14 @@ func dtypesym(t *types.Type) *obj.LSym { ot = duintptr(lsym, ot, uint64(t.NumElem())) ot = dextratype(lsym, ot, t, 0) - case TSLICE: + case types.TSLICE: // ../../../../runtime/type.go:/sliceType s1 := dtypesym(t.Elem()) ot = dcommontype(lsym, t) ot = dsymptr(lsym, ot, s1, 0) ot = dextratype(lsym, ot, t, 0) - case TCHAN: + case types.TCHAN: // ../../../../runtime/type.go:/chanType s1 := dtypesym(t.Elem()) ot = dcommontype(lsym, t) @@ -1211,7 +1212,7 @@ func dtypesym(t *types.Type) *obj.LSym { ot = duintptr(lsym, ot, uint64(t.ChanDir())) ot = dextratype(lsym, ot, t, 0) - case TFUNC: + case types.TFUNC: for _, t1 := range t.Recvs().Fields().Slice() { dtypesym(t1.Type) } @@ -1250,7 +1251,7 @@ func dtypesym(t *types.Type) *obj.LSym { ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0) } - case TINTER: + case types.TINTER: m := imethods(t) n := len(m) for _, a := range m { @@ -1286,7 +1287,7 @@ func dtypesym(t *types.Type) *obj.LSym { } // ../../../../runtime/type.go:/mapType - case TMAP: + case types.TMAP: s1 := dtypesym(t.Key()) s2 := dtypesym(t.Elem()) s3 := dtypesym(bmap(t)) @@ -1326,8 +1327,8 @@ func dtypesym(t *types.Type) *obj.LSym { ot = duint32(lsym, ot, flags) ot = dextratype(lsym, ot, t, 0) - case TPTR: - if t.Elem().Etype == TANY { + case types.TPTR: + if t.Elem().Etype == types.TANY { // ../../../../runtime/type.go:/UnsafePointerType ot = dcommontype(lsym, t) ot = dextratype(lsym, ot, t, 0) @@ -1344,7 +1345,7 @@ func dtypesym(t *types.Type) *obj.LSym { // ../../../../runtime/type.go:/structType // for security, only the exported fields. - case TSTRUCT: + case types.TSTRUCT: fields := t.Fields().Slice() for _, t1 := range fields { dtypesym(t1.Type) @@ -1403,7 +1404,7 @@ func dtypesym(t *types.Type) *obj.LSym { // functions must return the existing type structure rather // than creating a new one. switch t.Etype { - case TPTR, TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRUCT: + case types.TPTR, types.TARRAY, types.TCHAN, types.TFUNC, types.TMAP, types.TSLICE, types.TSTRUCT: keep = true } } @@ -1515,10 +1516,10 @@ func addsignat(t *types.Type) { } } -func addsignats(dcls []*Node) { +func addsignats(dcls []*ir.Node) { // copy types from dcl list to signatset for _, n := range dcls { - if n.Op == OTYPE { + if n.Op == ir.OTYPE { addsignat(n.Type) } } @@ -1571,7 +1572,7 @@ func dumptabs() { } // process ptabs - if localpkg.Name == "main" && len(ptabs) > 0 { + if ir.LocalPkg.Name == "main" && len(ptabs) > 0 { ot := 0 s := base.Ctxt.Lookup("go.plugin.tabs") for _, p := range ptabs { @@ -1615,17 +1616,17 @@ func dumpbasictypes() { // another possible choice would be package main, // but using runtime means fewer copies in object files. if base.Ctxt.Pkgpath == "runtime" { - for i := types.EType(1); i <= TBOOL; i++ { + for i := types.EType(1); i <= types.TBOOL; i++ { dtypesym(types.NewPtr(types.Types[i])) } - dtypesym(types.NewPtr(types.Types[TSTRING])) - dtypesym(types.NewPtr(types.Types[TUNSAFEPTR])) + dtypesym(types.NewPtr(types.Types[types.TSTRING])) + dtypesym(types.NewPtr(types.Types[types.TUNSAFEPTR])) // emit type structs for error and func(error) string. // The latter is the type of an auto-generated wrapper. dtypesym(types.NewPtr(types.Errortype)) - dtypesym(functype(nil, []*Node{anonfield(types.Errortype)}, []*Node{anonfield(types.Types[TSTRING])})) + dtypesym(functype(nil, []*ir.Node{anonfield(types.Errortype)}, []*ir.Node{anonfield(types.Types[types.TSTRING])})) // add paths for runtime and main, which 6l imports implicitly. dimportpath(Runtimepkg) @@ -1767,7 +1768,7 @@ func fillptrmask(t *types.Type, ptrmask []byte) { // For non-trivial arrays, the program describes the full t.Width size. func dgcprog(t *types.Type) (*obj.LSym, int64) { dowidth(t) - if t.Width == BADWIDTH { + if t.Width == types.BADWIDTH { base.Fatalf("dgcprog: %v badwidth", t) } lsym := typesymprefix(".gcprog", t).Linksym() @@ -1824,17 +1825,17 @@ func (p *GCProg) emit(t *types.Type, offset int64) { default: base.Fatalf("GCProg.emit: unexpected type %v", t) - case TSTRING: + case types.TSTRING: p.w.Ptr(offset / int64(Widthptr)) - case TINTER: + case types.TINTER: // Note: the first word isn't a pointer. See comment in plive.go:onebitwalktype1. p.w.Ptr(offset/int64(Widthptr) + 1) - case TSLICE: + case types.TSLICE: p.w.Ptr(offset / int64(Widthptr)) - case TARRAY: + case types.TARRAY: if t.NumElem() == 0 { // should have been handled by haspointers check above base.Fatalf("GCProg.emit: empty array") @@ -1859,7 +1860,7 @@ func (p *GCProg) emit(t *types.Type, offset int64) { p.w.ZeroUntil((offset + elem.Width) / int64(Widthptr)) p.w.Repeat(elem.Width/int64(Widthptr), count-1) - case TSTRUCT: + case types.TSTRUCT: for _, t1 := range t.Fields().Slice() { p.emit(t1.Type, offset+t1.Offset) } @@ -1868,7 +1869,7 @@ func (p *GCProg) emit(t *types.Type, offset int64) { // zeroaddr returns the address of a symbol with at least // size bytes of zeros. -func zeroaddr(size int64) *Node { +func zeroaddr(size int64) *ir.Node { if size >= 1<<31 { base.Fatalf("map elem too big %d", size) } @@ -1877,14 +1878,14 @@ func zeroaddr(size int64) *Node { } s := mappkg.Lookup("zero") if s.Def == nil { - x := newname(s) - x.Type = types.Types[TUINT8] - x.SetClass(PEXTERN) + x := NewName(s) + x.Type = types.Types[types.TUINT8] + x.SetClass(ir.PEXTERN) x.SetTypecheck(1) - s.Def = asTypesNode(x) + s.Def = ir.AsTypesNode(x) } - z := nod(OADDR, asNode(s.Def), nil) - z.Type = types.NewPtr(types.Types[TUINT8]) + z := ir.Nod(ir.OADDR, ir.AsNode(s.Def), nil) + z.Type = types.NewPtr(types.Types[types.TUINT8]) z.SetTypecheck(1) return z } diff --git a/src/cmd/compile/internal/gc/scc.go b/src/cmd/compile/internal/gc/scc.go index 891012cbc9f2b..ddde18e50541d 100644 --- a/src/cmd/compile/internal/gc/scc.go +++ b/src/cmd/compile/internal/gc/scc.go @@ -4,6 +4,8 @@ package gc +import "cmd/compile/internal/ir" + // Strongly connected components. // // Run analysis on minimal sets of mutually recursive functions @@ -30,10 +32,10 @@ package gc // when analyzing a set of mutually recursive functions. type bottomUpVisitor struct { - analyze func([]*Node, bool) + analyze func([]*ir.Node, bool) visitgen uint32 - nodeID map[*Node]uint32 - stack []*Node + nodeID map[*ir.Node]uint32 + stack []*ir.Node } // visitBottomUp invokes analyze on the ODCLFUNC nodes listed in list. @@ -49,18 +51,18 @@ type bottomUpVisitor struct { // If recursive is false, the list consists of only a single function and its closures. // If recursive is true, the list may still contain only a single function, // if that function is itself recursive. -func visitBottomUp(list []*Node, analyze func(list []*Node, recursive bool)) { +func visitBottomUp(list []*ir.Node, analyze func(list []*ir.Node, recursive bool)) { var v bottomUpVisitor v.analyze = analyze - v.nodeID = make(map[*Node]uint32) + v.nodeID = make(map[*ir.Node]uint32) for _, n := range list { - if n.Op == ODCLFUNC && !n.Func.IsHiddenClosure() { + if n.Op == ir.ODCLFUNC && !n.Func.IsHiddenClosure() { v.visit(n) } } } -func (v *bottomUpVisitor) visit(n *Node) uint32 { +func (v *bottomUpVisitor) visit(n *ir.Node) uint32 { if id := v.nodeID[n]; id > 0 { // already visited return id @@ -73,38 +75,38 @@ func (v *bottomUpVisitor) visit(n *Node) uint32 { min := v.visitgen v.stack = append(v.stack, n) - inspectList(n.Nbody, func(n *Node) bool { + ir.InspectList(n.Nbody, func(n *ir.Node) bool { switch n.Op { - case ONAME: - if n.Class() == PFUNC { + case ir.ONAME: + if n.Class() == ir.PFUNC { if n != nil && n.Name.Defn != nil { if m := v.visit(n.Name.Defn); m < min { min = m } } } - case OMETHEXPR: - fn := n.MethodName() + case ir.OMETHEXPR: + fn := methodExprName(n) if fn != nil && fn.Name.Defn != nil { if m := v.visit(fn.Name.Defn); m < min { min = m } } - case ODOTMETH: - fn := n.MethodName() - if fn != nil && fn.Op == ONAME && fn.Class() == PFUNC && fn.Name.Defn != nil { + case ir.ODOTMETH: + fn := methodExprName(n) + if fn != nil && fn.Op == ir.ONAME && fn.Class() == ir.PFUNC && fn.Name.Defn != nil { if m := v.visit(fn.Name.Defn); m < min { min = m } } - case OCALLPART: - fn := asNode(callpartMethod(n).Nname) - if fn != nil && fn.Op == ONAME && fn.Class() == PFUNC && fn.Name.Defn != nil { + case ir.OCALLPART: + fn := ir.AsNode(callpartMethod(n).Nname) + if fn != nil && fn.Op == ir.ONAME && fn.Class() == ir.PFUNC && fn.Name.Defn != nil { if m := v.visit(fn.Name.Defn); m < min { min = m } } - case OCLOSURE: + case ir.OCLOSURE: if m := v.visit(n.Func.Decl); m < min { min = m } diff --git a/src/cmd/compile/internal/gc/scope.go b/src/cmd/compile/internal/gc/scope.go index ace1d6bd9c5ef..b5ebce04bec46 100644 --- a/src/cmd/compile/internal/gc/scope.go +++ b/src/cmd/compile/internal/gc/scope.go @@ -6,6 +6,7 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/internal/dwarf" "cmd/internal/obj" "cmd/internal/src" @@ -17,7 +18,7 @@ func xposBefore(p, q src.XPos) bool { return base.Ctxt.PosTable.Pos(p).Before(base.Ctxt.PosTable.Pos(q)) } -func findScope(marks []Mark, pos src.XPos) ScopeID { +func findScope(marks []ir.Mark, pos src.XPos) ir.ScopeID { i := sort.Search(len(marks), func(i int) bool { return xposBefore(pos, marks[i].Pos) }) @@ -27,7 +28,7 @@ func findScope(marks []Mark, pos src.XPos) ScopeID { return marks[i-1].Scope } -func assembleScopes(fnsym *obj.LSym, fn *Node, dwarfVars []*dwarf.Var, varScopes []ScopeID) []dwarf.Scope { +func assembleScopes(fnsym *obj.LSym, fn *ir.Node, dwarfVars []*dwarf.Var, varScopes []ir.ScopeID) []dwarf.Scope { // Initialize the DWARF scope tree based on lexical scopes. dwarfScopes := make([]dwarf.Scope, 1+len(fn.Func.Parents)) for i, parent := range fn.Func.Parents { @@ -40,7 +41,7 @@ func assembleScopes(fnsym *obj.LSym, fn *Node, dwarfVars []*dwarf.Var, varScopes } // scopeVariables assigns DWARF variable records to their scopes. -func scopeVariables(dwarfVars []*dwarf.Var, varScopes []ScopeID, dwarfScopes []dwarf.Scope) { +func scopeVariables(dwarfVars []*dwarf.Var, varScopes []ir.ScopeID, dwarfScopes []dwarf.Scope) { sort.Stable(varsByScopeAndOffset{dwarfVars, varScopes}) i0 := 0 @@ -57,7 +58,7 @@ func scopeVariables(dwarfVars []*dwarf.Var, varScopes []ScopeID, dwarfScopes []d } // scopePCs assigns PC ranges to their scopes. -func scopePCs(fnsym *obj.LSym, marks []Mark, dwarfScopes []dwarf.Scope) { +func scopePCs(fnsym *obj.LSym, marks []ir.Mark, dwarfScopes []dwarf.Scope) { // If there aren't any child scopes (in particular, when scope // tracking is disabled), we can skip a whole lot of work. if len(marks) == 0 { @@ -90,7 +91,7 @@ func compactScopes(dwarfScopes []dwarf.Scope) []dwarf.Scope { type varsByScopeAndOffset struct { vars []*dwarf.Var - scopes []ScopeID + scopes []ir.ScopeID } func (v varsByScopeAndOffset) Len() int { diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go index 8d4c8d2be10e7..ed7db0aaf7b56 100644 --- a/src/cmd/compile/internal/gc/select.go +++ b/src/cmd/compile/internal/gc/select.go @@ -6,16 +6,17 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/types" ) // select -func typecheckselect(sel *Node) { - var def *Node +func typecheckselect(sel *ir.Node) { + var def *ir.Node lno := setlineno(sel) typecheckslice(sel.Ninit.Slice(), ctxStmt) for _, ncase := range sel.List.Slice() { - if ncase.Op != OCASE { + if ncase.Op != ir.OCASE { setlineno(ncase) base.Fatalf("typecheckselect %v", ncase.Op) } @@ -23,7 +24,7 @@ func typecheckselect(sel *Node) { if ncase.List.Len() == 0 { // default if def != nil { - base.ErrorfAt(ncase.Pos, "multiple defaults in select (first at %v)", def.Line()) + base.ErrorfAt(ncase.Pos, "multiple defaults in select (first at %v)", ir.Line(def)) } else { def = ncase } @@ -37,7 +38,7 @@ func typecheckselect(sel *Node) { switch n.Op { default: pos := n.Pos - if n.Op == ONAME { + if n.Op == ir.ONAME { // We don't have the right position for ONAME nodes (see #15459 and // others). Using ncase.Pos for now as it will provide the correct // line number (assuming the expression follows the "case" keyword @@ -49,37 +50,37 @@ func typecheckselect(sel *Node) { // convert x = <-c into OSELRECV(x, <-c). // remove implicit conversions; the eventual assignment // will reintroduce them. - case OAS: - if (n.Right.Op == OCONVNOP || n.Right.Op == OCONVIFACE) && n.Right.Implicit() { + case ir.OAS: + if (n.Right.Op == ir.OCONVNOP || n.Right.Op == ir.OCONVIFACE) && n.Right.Implicit() { n.Right = n.Right.Left } - if n.Right.Op != ORECV { + if n.Right.Op != ir.ORECV { base.ErrorfAt(n.Pos, "select assignment must have receive on right hand side") break } - n.Op = OSELRECV + n.Op = ir.OSELRECV // convert x, ok = <-c into OSELRECV2(x, <-c) with ntest=ok - case OAS2RECV: - if n.Right.Op != ORECV { + case ir.OAS2RECV: + if n.Right.Op != ir.ORECV { base.ErrorfAt(n.Pos, "select assignment must have receive on right hand side") break } - n.Op = OSELRECV2 + n.Op = ir.OSELRECV2 n.Left = n.List.First() n.List.Set1(n.List.Second()) // convert <-c into OSELRECV(N, <-c) - case ORECV: - n = nodl(n.Pos, OSELRECV, nil, n) + case ir.ORECV: + n = ir.NodAt(n.Pos, ir.OSELRECV, nil, n) n.SetTypecheck(1) ncase.Left = n - case OSEND: + case ir.OSEND: break } } @@ -90,7 +91,7 @@ func typecheckselect(sel *Node) { base.Pos = lno } -func walkselect(sel *Node) { +func walkselect(sel *ir.Node) { lno := setlineno(sel) if sel.Nbody.Len() != 0 { base.Fatalf("double walkselect") @@ -108,13 +109,13 @@ func walkselect(sel *Node) { base.Pos = lno } -func walkselectcases(cases *Nodes) []*Node { +func walkselectcases(cases *ir.Nodes) []*ir.Node { ncas := cases.Len() sellineno := base.Pos // optimization: zero-case select if ncas == 0 { - return []*Node{mkcall("block", nil, nil)} + return []*ir.Node{mkcall("block", nil, nil)} } // optimization: one-case select: single op. @@ -130,25 +131,25 @@ func walkselectcases(cases *Nodes) []*Node { default: base.Fatalf("select %v", n.Op) - case OSEND: + case ir.OSEND: // already ok - case OSELRECV, OSELRECV2: - if n.Op == OSELRECV || n.List.Len() == 0 { + case ir.OSELRECV, ir.OSELRECV2: + if n.Op == ir.OSELRECV || n.List.Len() == 0 { if n.Left == nil { n = n.Right } else { - n.Op = OAS + n.Op = ir.OAS } break } if n.Left == nil { - nblank = typecheck(nblank, ctxExpr|ctxAssign) - n.Left = nblank + ir.BlankNode = typecheck(ir.BlankNode, ctxExpr|ctxAssign) + n.Left = ir.BlankNode } - n.Op = OAS2 + n.Op = ir.OAS2 n.List.Prepend(n.Left) n.Rlist.Set1(n.Right) n.Right = nil @@ -161,13 +162,13 @@ func walkselectcases(cases *Nodes) []*Node { } l = append(l, cas.Nbody.Slice()...) - l = append(l, nod(OBREAK, nil, nil)) + l = append(l, ir.Nod(ir.OBREAK, nil, nil)) return l } // convert case value arguments to addresses. // this rewrite is used by both the general code and the next optimization. - var dflt *Node + var dflt *ir.Node for _, cas := range cases.Slice() { setlineno(cas) n := cas.Left @@ -176,17 +177,17 @@ func walkselectcases(cases *Nodes) []*Node { continue } switch n.Op { - case OSEND: - n.Right = nod(OADDR, n.Right, nil) + case ir.OSEND: + n.Right = ir.Nod(ir.OADDR, n.Right, nil) n.Right = typecheck(n.Right, ctxExpr) - case OSELRECV, OSELRECV2: - if n.Op == OSELRECV2 && n.List.Len() == 0 { - n.Op = OSELRECV + case ir.OSELRECV, ir.OSELRECV2: + if n.Op == ir.OSELRECV2 && n.List.Len() == 0 { + n.Op = ir.OSELRECV } if n.Left != nil { - n.Left = nod(OADDR, n.Left, nil) + n.Left = ir.Nod(ir.OADDR, n.Left, nil) n.Left = typecheck(n.Left, ctxExpr) } } @@ -201,66 +202,66 @@ func walkselectcases(cases *Nodes) []*Node { n := cas.Left setlineno(n) - r := nod(OIF, nil, nil) + r := ir.Nod(ir.OIF, nil, nil) r.Ninit.Set(cas.Ninit.Slice()) switch n.Op { default: base.Fatalf("select %v", n.Op) - case OSEND: + case ir.OSEND: // if selectnbsend(c, v) { body } else { default body } ch := n.Left - r.Left = mkcall1(chanfn("selectnbsend", 2, ch.Type), types.Types[TBOOL], &r.Ninit, ch, n.Right) + r.Left = mkcall1(chanfn("selectnbsend", 2, ch.Type), types.Types[types.TBOOL], &r.Ninit, ch, n.Right) - case OSELRECV: + case ir.OSELRECV: // if selectnbrecv(&v, c) { body } else { default body } ch := n.Right.Left elem := n.Left if elem == nil { elem = nodnil() } - r.Left = mkcall1(chanfn("selectnbrecv", 2, ch.Type), types.Types[TBOOL], &r.Ninit, elem, ch) + r.Left = mkcall1(chanfn("selectnbrecv", 2, ch.Type), types.Types[types.TBOOL], &r.Ninit, elem, ch) - case OSELRECV2: + case ir.OSELRECV2: // if selectnbrecv2(&v, &received, c) { body } else { default body } ch := n.Right.Left elem := n.Left if elem == nil { elem = nodnil() } - receivedp := nod(OADDR, n.List.First(), nil) + receivedp := ir.Nod(ir.OADDR, n.List.First(), nil) receivedp = typecheck(receivedp, ctxExpr) - r.Left = mkcall1(chanfn("selectnbrecv2", 2, ch.Type), types.Types[TBOOL], &r.Ninit, elem, receivedp, ch) + r.Left = mkcall1(chanfn("selectnbrecv2", 2, ch.Type), types.Types[types.TBOOL], &r.Ninit, elem, receivedp, ch) } r.Left = typecheck(r.Left, ctxExpr) r.Nbody.Set(cas.Nbody.Slice()) r.Rlist.Set(append(dflt.Ninit.Slice(), dflt.Nbody.Slice()...)) - return []*Node{r, nod(OBREAK, nil, nil)} + return []*ir.Node{r, ir.Nod(ir.OBREAK, nil, nil)} } if dflt != nil { ncas-- } - casorder := make([]*Node, ncas) + casorder := make([]*ir.Node, ncas) nsends, nrecvs := 0, 0 - var init []*Node + var init []*ir.Node // generate sel-struct base.Pos = sellineno selv := temp(types.NewArray(scasetype(), int64(ncas))) - r := nod(OAS, selv, nil) + r := ir.Nod(ir.OAS, selv, nil) r = typecheck(r, ctxStmt) init = append(init, r) // No initialization for order; runtime.selectgo is responsible for that. - order := temp(types.NewArray(types.Types[TUINT16], 2*int64(ncas))) + order := temp(types.NewArray(types.Types[types.TUINT16], 2*int64(ncas))) - var pc0, pcs *Node + var pc0, pcs *ir.Node if base.Flag.Race { - pcs = temp(types.NewArray(types.Types[TUINTPTR], int64(ncas))) - pc0 = typecheck(nod(OADDR, nod(OINDEX, pcs, nodintconst(0)), nil), ctxExpr) + pcs = temp(types.NewArray(types.Types[types.TUINTPTR], int64(ncas))) + pc0 = typecheck(ir.Nod(ir.OADDR, ir.Nod(ir.OINDEX, pcs, nodintconst(0)), nil), ctxExpr) } else { pc0 = nodnil() } @@ -278,16 +279,16 @@ func walkselectcases(cases *Nodes) []*Node { } var i int - var c, elem *Node + var c, elem *ir.Node switch n.Op { default: base.Fatalf("select %v", n.Op) - case OSEND: + case ir.OSEND: i = nsends nsends++ c = n.Left elem = n.Right - case OSELRECV, OSELRECV2: + case ir.OSELRECV, ir.OSELRECV2: nrecvs++ i = ncas - nrecvs c = n.Right.Left @@ -296,23 +297,23 @@ func walkselectcases(cases *Nodes) []*Node { casorder[i] = cas - setField := func(f string, val *Node) { - r := nod(OAS, nodSym(ODOT, nod(OINDEX, selv, nodintconst(int64(i))), lookup(f)), val) + setField := func(f string, val *ir.Node) { + r := ir.Nod(ir.OAS, nodSym(ir.ODOT, ir.Nod(ir.OINDEX, selv, nodintconst(int64(i))), lookup(f)), val) r = typecheck(r, ctxStmt) init = append(init, r) } - c = convnop(c, types.Types[TUNSAFEPTR]) + c = convnop(c, types.Types[types.TUNSAFEPTR]) setField("c", c) if elem != nil { - elem = convnop(elem, types.Types[TUNSAFEPTR]) + elem = convnop(elem, types.Types[types.TUNSAFEPTR]) setField("elem", elem) } // TODO(mdempsky): There should be a cleaner way to // handle this. if base.Flag.Race { - r = mkcall("selectsetpc", nil, nil, nod(OADDR, nod(OINDEX, pcs, nodintconst(int64(i))), nil)) + r = mkcall("selectsetpc", nil, nil, ir.Nod(ir.OADDR, ir.Nod(ir.OINDEX, pcs, nodintconst(int64(i))), nil)) init = append(init, r) } } @@ -322,9 +323,9 @@ func walkselectcases(cases *Nodes) []*Node { // run the select base.Pos = sellineno - chosen := temp(types.Types[TINT]) - recvOK := temp(types.Types[TBOOL]) - r = nod(OAS2, nil, nil) + chosen := temp(types.Types[types.TINT]) + recvOK := temp(types.Types[types.TBOOL]) + r = ir.Nod(ir.OAS2, nil, nil) r.List.Set2(chosen, recvOK) fn := syslook("selectgo") r.Rlist.Set1(mkcall1(fn, fn.Type.Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, nodintconst(int64(nsends)), nodintconst(int64(nrecvs)), nodbool(dflt == nil))) @@ -332,46 +333,46 @@ func walkselectcases(cases *Nodes) []*Node { init = append(init, r) // selv and order are no longer alive after selectgo. - init = append(init, nod(OVARKILL, selv, nil)) - init = append(init, nod(OVARKILL, order, nil)) + init = append(init, ir.Nod(ir.OVARKILL, selv, nil)) + init = append(init, ir.Nod(ir.OVARKILL, order, nil)) if base.Flag.Race { - init = append(init, nod(OVARKILL, pcs, nil)) + init = append(init, ir.Nod(ir.OVARKILL, pcs, nil)) } // dispatch cases - dispatch := func(cond, cas *Node) { + dispatch := func(cond, cas *ir.Node) { cond = typecheck(cond, ctxExpr) cond = defaultlit(cond, nil) - r := nod(OIF, cond, nil) + r := ir.Nod(ir.OIF, cond, nil) - if n := cas.Left; n != nil && n.Op == OSELRECV2 { - x := nod(OAS, n.List.First(), recvOK) + if n := cas.Left; n != nil && n.Op == ir.OSELRECV2 { + x := ir.Nod(ir.OAS, n.List.First(), recvOK) x = typecheck(x, ctxStmt) r.Nbody.Append(x) } r.Nbody.AppendNodes(&cas.Nbody) - r.Nbody.Append(nod(OBREAK, nil, nil)) + r.Nbody.Append(ir.Nod(ir.OBREAK, nil, nil)) init = append(init, r) } if dflt != nil { setlineno(dflt) - dispatch(nod(OLT, chosen, nodintconst(0)), dflt) + dispatch(ir.Nod(ir.OLT, chosen, nodintconst(0)), dflt) } for i, cas := range casorder { setlineno(cas) - dispatch(nod(OEQ, chosen, nodintconst(int64(i))), cas) + dispatch(ir.Nod(ir.OEQ, chosen, nodintconst(int64(i))), cas) } return init } // bytePtrToIndex returns a Node representing "(*byte)(&n[i])". -func bytePtrToIndex(n *Node, i int64) *Node { - s := nod(OADDR, nod(OINDEX, n, nodintconst(i)), nil) - t := types.NewPtr(types.Types[TUINT8]) +func bytePtrToIndex(n *ir.Node, i int64) *ir.Node { + s := ir.Nod(ir.OADDR, ir.Nod(ir.OINDEX, n, nodintconst(i)), nil) + t := types.NewPtr(types.Types[types.TUINT8]) return convnop(s, t) } @@ -380,9 +381,9 @@ var scase *types.Type // Keep in sync with src/runtime/select.go. func scasetype() *types.Type { if scase == nil { - scase = tostruct([]*Node{ - namedfield("c", types.Types[TUNSAFEPTR]), - namedfield("elem", types.Types[TUNSAFEPTR]), + scase = tostruct([]*ir.Node{ + namedfield("c", types.Types[types.TUNSAFEPTR]), + namedfield("elem", types.Types[types.TUNSAFEPTR]), }) scase.SetNoalg(true) } diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 219435d6de39e..d78b509127675 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -6,6 +6,7 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/obj" "fmt" @@ -13,8 +14,8 @@ import ( ) type InitEntry struct { - Xoffset int64 // struct, array only - Expr *Node // bytes of run-time computed expressions + Xoffset int64 // struct, array only + Expr *ir.Node // bytes of run-time computed expressions } type InitPlan struct { @@ -28,21 +29,21 @@ type InitPlan struct { type InitSchedule struct { // out is the ordered list of dynamic initialization // statements. - out []*Node + out []*ir.Node - initplans map[*Node]*InitPlan - inittemps map[*Node]*Node + initplans map[*ir.Node]*InitPlan + inittemps map[*ir.Node]*ir.Node } -func (s *InitSchedule) append(n *Node) { +func (s *InitSchedule) append(n *ir.Node) { s.out = append(s.out, n) } // staticInit adds an initialization statement n to the schedule. -func (s *InitSchedule) staticInit(n *Node) { +func (s *InitSchedule) staticInit(n *ir.Node) { if !s.tryStaticInit(n) { if base.Flag.Percent != 0 { - Dump("nonstatic", n) + ir.Dump("nonstatic", n) } s.append(n) } @@ -50,16 +51,16 @@ func (s *InitSchedule) staticInit(n *Node) { // tryStaticInit attempts to statically execute an initialization // statement and reports whether it succeeded. -func (s *InitSchedule) tryStaticInit(n *Node) bool { +func (s *InitSchedule) tryStaticInit(n *ir.Node) bool { // Only worry about simple "l = r" assignments. Multiple // variable/expression OAS2 assignments have already been // replaced by multiple simple OAS assignments, and the other // OAS2* assignments mostly necessitate dynamic execution // anyway. - if n.Op != OAS { + if n.Op != ir.OAS { return false } - if n.Left.isBlank() && candiscard(n.Right) { + if ir.IsBlank(n.Left) && candiscard(n.Right) { return true } lno := setlineno(n) @@ -69,21 +70,21 @@ func (s *InitSchedule) tryStaticInit(n *Node) bool { // like staticassign but we are copying an already // initialized value r. -func (s *InitSchedule) staticcopy(l *Node, r *Node) bool { - if r.Op != ONAME && r.Op != OMETHEXPR { +func (s *InitSchedule) staticcopy(l *ir.Node, r *ir.Node) bool { + if r.Op != ir.ONAME && r.Op != ir.OMETHEXPR { return false } - if r.Class() == PFUNC { + if r.Class() == ir.PFUNC { pfuncsym(l, r) return true } - if r.Class() != PEXTERN || r.Sym.Pkg != localpkg { + if r.Class() != ir.PEXTERN || r.Sym.Pkg != ir.LocalPkg { return false } if r.Name.Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value return false } - if r.Name.Defn.Op != OAS { + if r.Name.Defn.Op != ir.OAS { return false } if r.Type.IsString() { // perhaps overwritten by cmd/link -X (#34675) @@ -92,73 +93,73 @@ func (s *InitSchedule) staticcopy(l *Node, r *Node) bool { orig := r r = r.Name.Defn.Right - for r.Op == OCONVNOP && !types.Identical(r.Type, l.Type) { + for r.Op == ir.OCONVNOP && !types.Identical(r.Type, l.Type) { r = r.Left } switch r.Op { - case ONAME, OMETHEXPR: + case ir.ONAME, ir.OMETHEXPR: if s.staticcopy(l, r) { return true } // We may have skipped past one or more OCONVNOPs, so // use conv to ensure r is assignable to l (#13263). - s.append(nod(OAS, l, conv(r, l.Type))) + s.append(ir.Nod(ir.OAS, l, conv(r, l.Type))) return true - case ONIL: + case ir.ONIL: return true - case OLITERAL: + case ir.OLITERAL: if isZero(r) { return true } litsym(l, r, int(l.Type.Width)) return true - case OADDR: - if a := r.Left; a.Op == ONAME { + case ir.OADDR: + if a := r.Left; a.Op == ir.ONAME { addrsym(l, a) return true } - case OPTRLIT: + case ir.OPTRLIT: switch r.Left.Op { - case OARRAYLIT, OSLICELIT, OSTRUCTLIT, OMAPLIT: + case ir.OARRAYLIT, ir.OSLICELIT, ir.OSTRUCTLIT, ir.OMAPLIT: // copy pointer addrsym(l, s.inittemps[r]) return true } - case OSLICELIT: + case ir.OSLICELIT: // copy slice a := s.inittemps[r] slicesym(l, a, r.Right.Int64Val()) return true - case OARRAYLIT, OSTRUCTLIT: + case ir.OARRAYLIT, ir.OSTRUCTLIT: p := s.initplans[r] - n := l.copy() + n := ir.Copy(l) for i := range p.E { e := &p.E[i] n.Xoffset = l.Xoffset + e.Xoffset n.Type = e.Expr.Type - if e.Expr.Op == OLITERAL || e.Expr.Op == ONIL { + if e.Expr.Op == ir.OLITERAL || e.Expr.Op == ir.ONIL { litsym(n, e.Expr, int(n.Type.Width)) continue } - ll := n.sepcopy() + ll := ir.SepCopy(n) if s.staticcopy(ll, e.Expr) { continue } // Requires computation, but we're // copying someone else's computation. - rr := orig.sepcopy() + rr := ir.SepCopy(orig) rr.Type = ll.Type rr.Xoffset += e.Xoffset setlineno(rr) - s.append(nod(OAS, ll, rr)) + s.append(ir.Nod(ir.OAS, ll, rr)) } return true @@ -167,35 +168,35 @@ func (s *InitSchedule) staticcopy(l *Node, r *Node) bool { return false } -func (s *InitSchedule) staticassign(l *Node, r *Node) bool { - for r.Op == OCONVNOP { +func (s *InitSchedule) staticassign(l *ir.Node, r *ir.Node) bool { + for r.Op == ir.OCONVNOP { r = r.Left } switch r.Op { - case ONAME, OMETHEXPR: + case ir.ONAME, ir.OMETHEXPR: return s.staticcopy(l, r) - case ONIL: + case ir.ONIL: return true - case OLITERAL: + case ir.OLITERAL: if isZero(r) { return true } litsym(l, r, int(l.Type.Width)) return true - case OADDR: + case ir.OADDR: if nam := stataddr(r.Left); nam != nil { addrsym(l, nam) return true } fallthrough - case OPTRLIT: + case ir.OPTRLIT: switch r.Left.Op { - case OARRAYLIT, OSLICELIT, OMAPLIT, OSTRUCTLIT: + case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT: // Init pointer. a := staticname(r.Left.Type) @@ -204,20 +205,20 @@ func (s *InitSchedule) staticassign(l *Node, r *Node) bool { // Init underlying literal. if !s.staticassign(a, r.Left) { - s.append(nod(OAS, a, r.Left)) + s.append(ir.Nod(ir.OAS, a, r.Left)) } return true } //dump("not static ptrlit", r); - case OSTR2BYTES: - if l.Class() == PEXTERN && r.Left.Op == OLITERAL { + case ir.OSTR2BYTES: + if l.Class() == ir.PEXTERN && r.Left.Op == ir.OLITERAL { sval := r.Left.StringVal() slicebytes(l, sval) return true } - case OSLICELIT: + case ir.OSLICELIT: s.initplan(r) // Init slice. bound := r.Right.Int64Val() @@ -230,32 +231,32 @@ func (s *InitSchedule) staticassign(l *Node, r *Node) bool { l = a fallthrough - case OARRAYLIT, OSTRUCTLIT: + case ir.OARRAYLIT, ir.OSTRUCTLIT: s.initplan(r) p := s.initplans[r] - n := l.copy() + n := ir.Copy(l) for i := range p.E { e := &p.E[i] n.Xoffset = l.Xoffset + e.Xoffset n.Type = e.Expr.Type - if e.Expr.Op == OLITERAL || e.Expr.Op == ONIL { + if e.Expr.Op == ir.OLITERAL || e.Expr.Op == ir.ONIL { litsym(n, e.Expr, int(n.Type.Width)) continue } setlineno(e.Expr) - a := n.sepcopy() + a := ir.SepCopy(n) if !s.staticassign(a, e.Expr) { - s.append(nod(OAS, a, e.Expr)) + s.append(ir.Nod(ir.OAS, a, e.Expr)) } } return true - case OMAPLIT: + case ir.OMAPLIT: break - case OCLOSURE: + case ir.OCLOSURE: if hasemptycvars(r) { if base.Debug.Closure > 0 { base.WarnfAt(r.Pos, "closure converted to global") @@ -267,13 +268,13 @@ func (s *InitSchedule) staticassign(l *Node, r *Node) bool { } closuredebugruntimecheck(r) - case OCONVIFACE: + case ir.OCONVIFACE: // This logic is mirrored in isStaticCompositeLiteral. // If you change something here, change it there, and vice versa. // Determine the underlying concrete type and value we are converting from. val := r - for val.Op == OCONVIFACE { + for val.Op == ir.OCONVIFACE { val = val.Left } @@ -283,12 +284,12 @@ func (s *InitSchedule) staticassign(l *Node, r *Node) bool { // both words are zero and so there no work to do, so report success. // If val is non-nil, we have no concrete type to record, // and we won't be able to statically initialize its value, so report failure. - return val.Op == ONIL + return val.Op == ir.ONIL } markTypeUsedInInterface(val.Type, l.Sym.Linksym()) - var itab *Node + var itab *ir.Node if l.Type.IsEmptyInterface() { itab = typename(val.Type) } else { @@ -296,7 +297,7 @@ func (s *InitSchedule) staticassign(l *Node, r *Node) bool { } // Create a copy of l to modify while we emit data. - n := l.copy() + n := ir.Copy(l) // Emit itab, advance offset. addrsym(n, itab.Left) // itab is an OADDR node @@ -304,23 +305,23 @@ func (s *InitSchedule) staticassign(l *Node, r *Node) bool { // Emit data. if isdirectiface(val.Type) { - if val.Op == ONIL { + if val.Op == ir.ONIL { // Nil is zero, nothing to do. return true } // Copy val directly into n. n.Type = val.Type setlineno(val) - a := n.sepcopy() + a := ir.SepCopy(n) if !s.staticassign(a, val) { - s.append(nod(OAS, a, val)) + s.append(ir.Nod(ir.OAS, a, val)) } } else { // Construct temp to hold val, write pointer to temp into n. a := staticname(val.Type) s.inittemps[val] = a if !s.staticassign(a, val) { - s.append(nod(OAS, a, val)) + s.append(ir.Nod(ir.OAS, a, val)) } addrsym(n, a) } @@ -366,29 +367,29 @@ var statuniqgen int // name generator for static temps // staticname returns a name backed by a (writable) static data symbol. // Use readonlystaticname for read-only node. -func staticname(t *types.Type) *Node { +func staticname(t *types.Type) *ir.Node { // Don't use lookupN; it interns the resulting string, but these are all unique. - n := newname(lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen))) + n := NewName(lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen))) statuniqgen++ - addvar(n, t, PEXTERN) + addvar(n, t, ir.PEXTERN) n.Sym.Linksym().Set(obj.AttrLocal, true) return n } // readonlystaticname returns a name backed by a (writable) static data symbol. -func readonlystaticname(t *types.Type) *Node { +func readonlystaticname(t *types.Type) *ir.Node { n := staticname(t) n.MarkReadonly() n.Sym.Linksym().Set(obj.AttrContentAddressable, true) return n } -func (n *Node) isSimpleName() bool { - return (n.Op == ONAME || n.Op == OMETHEXPR) && n.Class() != PAUTOHEAP && n.Class() != PEXTERN +func isSimpleName(n *ir.Node) bool { + return (n.Op == ir.ONAME || n.Op == ir.OMETHEXPR) && n.Class() != ir.PAUTOHEAP && n.Class() != ir.PEXTERN } -func litas(l *Node, r *Node, init *Nodes) { - a := nod(OAS, l, r) +func litas(l *ir.Node, r *ir.Node, init *ir.Nodes) { + a := ir.Nod(ir.OAS, l, r) a = typecheck(a, ctxStmt) a = walkexpr(a, init) init.Append(a) @@ -404,15 +405,15 @@ const ( // getdyn calculates the initGenType for n. // If top is false, getdyn is recursing. -func getdyn(n *Node, top bool) initGenType { +func getdyn(n *ir.Node, top bool) initGenType { switch n.Op { default: - if n.isGoConst() { + if isGoConst(n) { return initConst } return initDynamic - case OSLICELIT: + case ir.OSLICELIT: if !top { return initDynamic } @@ -426,15 +427,15 @@ func getdyn(n *Node, top bool) initGenType { return initDynamic } - case OARRAYLIT, OSTRUCTLIT: + case ir.OARRAYLIT, ir.OSTRUCTLIT: } var mode initGenType for _, n1 := range n.List.Slice() { switch n1.Op { - case OKEY: + case ir.OKEY: n1 = n1.Right - case OSTRUCTKEY: + case ir.OSTRUCTKEY: n1 = n1.Left } mode |= getdyn(n1, false) @@ -446,13 +447,13 @@ func getdyn(n *Node, top bool) initGenType { } // isStaticCompositeLiteral reports whether n is a compile-time constant. -func isStaticCompositeLiteral(n *Node) bool { +func isStaticCompositeLiteral(n *ir.Node) bool { switch n.Op { - case OSLICELIT: + case ir.OSLICELIT: return false - case OARRAYLIT: + case ir.OARRAYLIT: for _, r := range n.List.Slice() { - if r.Op == OKEY { + if r.Op == ir.OKEY { r = r.Right } if !isStaticCompositeLiteral(r) { @@ -460,9 +461,9 @@ func isStaticCompositeLiteral(n *Node) bool { } } return true - case OSTRUCTLIT: + case ir.OSTRUCTLIT: for _, r := range n.List.Slice() { - if r.Op != OSTRUCTKEY { + if r.Op != ir.OSTRUCTKEY { base.Fatalf("isStaticCompositeLiteral: rhs not OSTRUCTKEY: %v", r) } if !isStaticCompositeLiteral(r.Left) { @@ -470,18 +471,18 @@ func isStaticCompositeLiteral(n *Node) bool { } } return true - case OLITERAL, ONIL: + case ir.OLITERAL, ir.ONIL: return true - case OCONVIFACE: + case ir.OCONVIFACE: // See staticassign's OCONVIFACE case for comments. val := n - for val.Op == OCONVIFACE { + for val.Op == ir.OCONVIFACE { val = val.Left } if val.Type.IsInterface() { - return val.Op == ONIL + return val.Op == ir.ONIL } - if isdirectiface(val.Type) && val.Op == ONIL { + if isdirectiface(val.Type) && val.Op == ir.ONIL { return true } return isStaticCompositeLiteral(val) @@ -508,37 +509,37 @@ const ( // fixedlit handles struct, array, and slice literals. // TODO: expand documentation. -func fixedlit(ctxt initContext, kind initKind, n *Node, var_ *Node, init *Nodes) { - isBlank := var_ == nblank - var splitnode func(*Node) (a *Node, value *Node) +func fixedlit(ctxt initContext, kind initKind, n *ir.Node, var_ *ir.Node, init *ir.Nodes) { + isBlank := var_ == ir.BlankNode + var splitnode func(*ir.Node) (a *ir.Node, value *ir.Node) switch n.Op { - case OARRAYLIT, OSLICELIT: + case ir.OARRAYLIT, ir.OSLICELIT: var k int64 - splitnode = func(r *Node) (*Node, *Node) { - if r.Op == OKEY { + splitnode = func(r *ir.Node) (*ir.Node, *ir.Node) { + if r.Op == ir.OKEY { k = indexconst(r.Left) if k < 0 { base.Fatalf("fixedlit: invalid index %v", r.Left) } r = r.Right } - a := nod(OINDEX, var_, nodintconst(k)) + a := ir.Nod(ir.OINDEX, var_, nodintconst(k)) k++ if isBlank { - a = nblank + a = ir.BlankNode } return a, r } - case OSTRUCTLIT: - splitnode = func(r *Node) (*Node, *Node) { - if r.Op != OSTRUCTKEY { + case ir.OSTRUCTLIT: + splitnode = func(r *ir.Node) (*ir.Node, *ir.Node) { + if r.Op != ir.OSTRUCTKEY { base.Fatalf("fixedlit: rhs not OSTRUCTKEY: %v", r) } if r.Sym.IsBlank() || isBlank { - return nblank, r.Left + return ir.BlankNode, r.Left } setlineno(r) - return nodSym(ODOT, var_, r.Sym), r.Left + return nodSym(ir.ODOT, var_, r.Sym), r.Left } default: base.Fatalf("fixedlit bad op: %v", n.Op) @@ -546,36 +547,36 @@ func fixedlit(ctxt initContext, kind initKind, n *Node, var_ *Node, init *Nodes) for _, r := range n.List.Slice() { a, value := splitnode(r) - if a == nblank && candiscard(value) { + if a == ir.BlankNode && candiscard(value) { continue } switch value.Op { - case OSLICELIT: + case ir.OSLICELIT: if (kind == initKindStatic && ctxt == inNonInitFunction) || (kind == initKindDynamic && ctxt == inInitFunction) { slicelit(ctxt, value, a, init) continue } - case OARRAYLIT, OSTRUCTLIT: + case ir.OARRAYLIT, ir.OSTRUCTLIT: fixedlit(ctxt, kind, value, a, init) continue } - islit := value.isGoConst() + islit := isGoConst(value) if (kind == initKindStatic && !islit) || (kind == initKindDynamic && islit) { continue } // build list of assignments: var[index] = expr setlineno(a) - a = nod(OAS, a, value) + a = ir.Nod(ir.OAS, a, value) a = typecheck(a, ctxStmt) switch kind { case initKindStatic: genAsStatic(a) case initKindDynamic, initKindLocalCode: - a = orderStmtInPlace(a, map[string][]*Node{}) + a = orderStmtInPlace(a, map[string][]*ir.Node{}) a = walkstmt(a) init.Append(a) default: @@ -585,8 +586,8 @@ func fixedlit(ctxt initContext, kind initKind, n *Node, var_ *Node, init *Nodes) } } -func isSmallSliceLit(n *Node) bool { - if n.Op != OSLICELIT { +func isSmallSliceLit(n *ir.Node) bool { + if n.Op != ir.OSLICELIT { return false } @@ -595,7 +596,7 @@ func isSmallSliceLit(n *Node) bool { return smallintconst(r) && (n.Type.Elem().Width == 0 || r.Int64Val() <= smallArrayBytes/n.Type.Elem().Width) } -func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) { +func slicelit(ctxt initContext, n *ir.Node, var_ *ir.Node, init *ir.Nodes) { // make an array type corresponding the number of elements we have t := types.NewArray(n.Type.Elem(), n.Right.Int64Val()) dowidth(t) @@ -610,7 +611,7 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) { // copy static to slice var_ = typecheck(var_, ctxExpr|ctxAssign) nam := stataddr(var_) - if nam == nil || nam.Class() != PEXTERN { + if nam == nil || nam.Class() != ir.PEXTERN { base.Fatalf("slicelit: %v", var_) } slicesym(nam, vstat, t.NumElem()) @@ -638,7 +639,7 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) { // if the literal contains constants, // make static initialized array (1),(2) - var vstat *Node + var vstat *ir.Node mode := getdyn(n, true) if mode&initConst != 0 && !isSmallSliceLit(n) { @@ -654,7 +655,7 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) { vauto := temp(types.NewPtr(t)) // set auto to point at new temp or heap (3 assign) - var a *Node + var a *ir.Node if x := prealloc[n]; x != nil { // temp allocated during order.go for dddarg if !types.Identical(t, x.Type) { @@ -662,43 +663,43 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) { } if vstat == nil { - a = nod(OAS, x, nil) + a = ir.Nod(ir.OAS, x, nil) a = typecheck(a, ctxStmt) init.Append(a) // zero new temp } else { // Declare that we're about to initialize all of x. // (Which happens at the *vauto = vstat below.) - init.Append(nod(OVARDEF, x, nil)) + init.Append(ir.Nod(ir.OVARDEF, x, nil)) } - a = nod(OADDR, x, nil) + a = ir.Nod(ir.OADDR, x, nil) } else if n.Esc == EscNone { a = temp(t) if vstat == nil { - a = nod(OAS, temp(t), nil) + a = ir.Nod(ir.OAS, temp(t), nil) a = typecheck(a, ctxStmt) init.Append(a) // zero new temp a = a.Left } else { - init.Append(nod(OVARDEF, a, nil)) + init.Append(ir.Nod(ir.OVARDEF, a, nil)) } - a = nod(OADDR, a, nil) + a = ir.Nod(ir.OADDR, a, nil) } else { - a = nod(ONEW, nil, nil) + a = ir.Nod(ir.ONEW, nil, nil) a.List.Set1(typenod(t)) } - a = nod(OAS, vauto, a) + a = ir.Nod(ir.OAS, vauto, a) a = typecheck(a, ctxStmt) a = walkexpr(a, init) init.Append(a) if vstat != nil { // copy static to heap (4) - a = nod(ODEREF, vauto, nil) + a = ir.Nod(ir.ODEREF, vauto, nil) - a = nod(OAS, a, vstat) + a = ir.Nod(ir.OAS, a, vstat) a = typecheck(a, ctxStmt) a = walkexpr(a, init) init.Append(a) @@ -707,24 +708,24 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) { // put dynamics into array (5) var index int64 for _, value := range n.List.Slice() { - if value.Op == OKEY { + if value.Op == ir.OKEY { index = indexconst(value.Left) if index < 0 { base.Fatalf("slicelit: invalid index %v", value.Left) } value = value.Right } - a := nod(OINDEX, vauto, nodintconst(index)) + a := ir.Nod(ir.OINDEX, vauto, nodintconst(index)) a.SetBounded(true) index++ // TODO need to check bounds? switch value.Op { - case OSLICELIT: + case ir.OSLICELIT: break - case OARRAYLIT, OSTRUCTLIT: + case ir.OARRAYLIT, ir.OSTRUCTLIT: k := initKindDynamic if vstat == nil { // Generate both static and dynamic initializations. @@ -735,32 +736,32 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) { continue } - if vstat != nil && value.isGoConst() { // already set by copy from static value + if vstat != nil && isGoConst(value) { // already set by copy from static value continue } // build list of vauto[c] = expr setlineno(value) - a = nod(OAS, a, value) + a = ir.Nod(ir.OAS, a, value) a = typecheck(a, ctxStmt) - a = orderStmtInPlace(a, map[string][]*Node{}) + a = orderStmtInPlace(a, map[string][]*ir.Node{}) a = walkstmt(a) init.Append(a) } // make slice out of heap (6) - a = nod(OAS, var_, nod(OSLICE, vauto, nil)) + a = ir.Nod(ir.OAS, var_, ir.Nod(ir.OSLICE, vauto, nil)) a = typecheck(a, ctxStmt) - a = orderStmtInPlace(a, map[string][]*Node{}) + a = orderStmtInPlace(a, map[string][]*ir.Node{}) a = walkstmt(a) init.Append(a) } -func maplit(n *Node, m *Node, init *Nodes) { +func maplit(n *ir.Node, m *ir.Node, init *ir.Nodes) { // make the map var - a := nod(OMAKE, nil, nil) + a := ir.Nod(ir.OMAKE, nil, nil) a.Esc = n.Esc a.List.Set2(typenod(n.Type), nodintconst(int64(n.List.Len()))) litas(m, a, init) @@ -792,8 +793,8 @@ func maplit(n *Node, m *Node, init *Nodes) { vstatk := readonlystaticname(tk) vstate := readonlystaticname(te) - datak := nod(OARRAYLIT, nil, nil) - datae := nod(OARRAYLIT, nil, nil) + datak := ir.Nod(ir.OARRAYLIT, nil, nil) + datae := ir.Nod(ir.OARRAYLIT, nil, nil) for _, r := range entries { datak.List.Append(r.Left) datae.List.Append(r.Right) @@ -805,20 +806,20 @@ func maplit(n *Node, m *Node, init *Nodes) { // for i = 0; i < len(vstatk); i++ { // map[vstatk[i]] = vstate[i] // } - i := temp(types.Types[TINT]) - rhs := nod(OINDEX, vstate, i) + i := temp(types.Types[types.TINT]) + rhs := ir.Nod(ir.OINDEX, vstate, i) rhs.SetBounded(true) - kidx := nod(OINDEX, vstatk, i) + kidx := ir.Nod(ir.OINDEX, vstatk, i) kidx.SetBounded(true) - lhs := nod(OINDEX, m, kidx) + lhs := ir.Nod(ir.OINDEX, m, kidx) - zero := nod(OAS, i, nodintconst(0)) - cond := nod(OLT, i, nodintconst(tk.NumElem())) - incr := nod(OAS, i, nod(OADD, i, nodintconst(1))) - body := nod(OAS, lhs, rhs) + zero := ir.Nod(ir.OAS, i, nodintconst(0)) + cond := ir.Nod(ir.OLT, i, nodintconst(tk.NumElem())) + incr := ir.Nod(ir.OAS, i, ir.Nod(ir.OADD, i, nodintconst(1))) + body := ir.Nod(ir.OAS, lhs, rhs) - loop := nod(OFOR, cond, incr) + loop := ir.Nod(ir.OFOR, cond, incr) loop.Nbody.Set1(body) loop.Ninit.Set1(zero) @@ -839,88 +840,88 @@ func maplit(n *Node, m *Node, init *Nodes) { index, elem := r.Left, r.Right setlineno(index) - a := nod(OAS, tmpkey, index) + a := ir.Nod(ir.OAS, tmpkey, index) a = typecheck(a, ctxStmt) a = walkstmt(a) init.Append(a) setlineno(elem) - a = nod(OAS, tmpelem, elem) + a = ir.Nod(ir.OAS, tmpelem, elem) a = typecheck(a, ctxStmt) a = walkstmt(a) init.Append(a) setlineno(tmpelem) - a = nod(OAS, nod(OINDEX, m, tmpkey), tmpelem) + a = ir.Nod(ir.OAS, ir.Nod(ir.OINDEX, m, tmpkey), tmpelem) a = typecheck(a, ctxStmt) a = walkstmt(a) init.Append(a) } - a = nod(OVARKILL, tmpkey, nil) + a = ir.Nod(ir.OVARKILL, tmpkey, nil) a = typecheck(a, ctxStmt) init.Append(a) - a = nod(OVARKILL, tmpelem, nil) + a = ir.Nod(ir.OVARKILL, tmpelem, nil) a = typecheck(a, ctxStmt) init.Append(a) } -func anylit(n *Node, var_ *Node, init *Nodes) { +func anylit(n *ir.Node, var_ *ir.Node, init *ir.Nodes) { t := n.Type switch n.Op { default: base.Fatalf("anylit: not lit, op=%v node=%v", n.Op, n) - case ONAME, OMETHEXPR: - a := nod(OAS, var_, n) + case ir.ONAME, ir.OMETHEXPR: + a := ir.Nod(ir.OAS, var_, n) a = typecheck(a, ctxStmt) init.Append(a) - case OPTRLIT: + case ir.OPTRLIT: if !t.IsPtr() { base.Fatalf("anylit: not ptr") } - var r *Node + var r *ir.Node if n.Right != nil { // n.Right is stack temporary used as backing store. - init.Append(nod(OAS, n.Right, nil)) // zero backing store, just in case (#18410) - r = nod(OADDR, n.Right, nil) + init.Append(ir.Nod(ir.OAS, n.Right, nil)) // zero backing store, just in case (#18410) + r = ir.Nod(ir.OADDR, n.Right, nil) r = typecheck(r, ctxExpr) } else { - r = nod(ONEW, nil, nil) + r = ir.Nod(ir.ONEW, nil, nil) r.SetTypecheck(1) r.Type = t r.Esc = n.Esc } r = walkexpr(r, init) - a := nod(OAS, var_, r) + a := ir.Nod(ir.OAS, var_, r) a = typecheck(a, ctxStmt) init.Append(a) - var_ = nod(ODEREF, var_, nil) + var_ = ir.Nod(ir.ODEREF, var_, nil) var_ = typecheck(var_, ctxExpr|ctxAssign) anylit(n.Left, var_, init) - case OSTRUCTLIT, OARRAYLIT: + case ir.OSTRUCTLIT, ir.OARRAYLIT: if !t.IsStruct() && !t.IsArray() { base.Fatalf("anylit: not struct/array") } - if var_.isSimpleName() && n.List.Len() > 4 { + if isSimpleName(var_) && n.List.Len() > 4 { // lay out static data vstat := readonlystaticname(t) ctxt := inInitFunction - if n.Op == OARRAYLIT { + if n.Op == ir.OARRAYLIT { ctxt = inNonInitFunction } fixedlit(ctxt, initKindStatic, n, vstat, init) // copy static to var - a := nod(OAS, var_, vstat) + a := ir.Nod(ir.OAS, var_, vstat) a = typecheck(a, ctxStmt) a = walkexpr(a, init) @@ -932,14 +933,14 @@ func anylit(n *Node, var_ *Node, init *Nodes) { } var components int64 - if n.Op == OARRAYLIT { + if n.Op == ir.OARRAYLIT { components = t.NumElem() } else { components = int64(t.NumFields()) } // initialization of an array or struct with unspecified components (missing fields or arrays) - if var_.isSimpleName() || int64(n.List.Len()) < components { - a := nod(OAS, var_, nil) + if isSimpleName(var_) || int64(n.List.Len()) < components { + a := ir.Nod(ir.OAS, var_, nil) a = typecheck(a, ctxStmt) a = walkexpr(a, init) init.Append(a) @@ -947,10 +948,10 @@ func anylit(n *Node, var_ *Node, init *Nodes) { fixedlit(inInitFunction, initKindLocalCode, n, var_, init) - case OSLICELIT: + case ir.OSLICELIT: slicelit(inInitFunction, n, var_, init) - case OMAPLIT: + case ir.OMAPLIT: if !t.IsMap() { base.Fatalf("anylit: not map") } @@ -958,7 +959,7 @@ func anylit(n *Node, var_ *Node, init *Nodes) { } } -func oaslit(n *Node, init *Nodes) bool { +func oaslit(n *ir.Node, init *ir.Nodes) bool { if n.Left == nil || n.Right == nil { // not a special composite literal assignment return false @@ -967,7 +968,7 @@ func oaslit(n *Node, init *Nodes) bool { // not a special composite literal assignment return false } - if !n.Left.isSimpleName() { + if !isSimpleName(n.Left) { // not a special composite literal assignment return false } @@ -981,7 +982,7 @@ func oaslit(n *Node, init *Nodes) bool { // not a special composite literal assignment return false - case OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT: + case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT: if vmatch1(n.Left, n.Right) { // not a special composite literal assignment return false @@ -989,12 +990,12 @@ func oaslit(n *Node, init *Nodes) bool { anylit(n.Right, n.Left, init) } - n.Op = OEMPTY + n.Op = ir.OEMPTY n.Right = nil return true } -func getlit(lit *Node) int { +func getlit(lit *ir.Node) int { if smallintconst(lit) { return int(lit.Int64Val()) } @@ -1002,16 +1003,16 @@ func getlit(lit *Node) int { } // stataddr returns the static address of n, if n has one, or else nil. -func stataddr(n *Node) *Node { +func stataddr(n *ir.Node) *ir.Node { if n == nil { return nil } switch n.Op { - case ONAME, OMETHEXPR: - return n.sepcopy() + case ir.ONAME, ir.OMETHEXPR: + return ir.SepCopy(n) - case ODOT: + case ir.ODOT: nam := stataddr(n.Left) if nam == nil { break @@ -1020,7 +1021,7 @@ func stataddr(n *Node) *Node { nam.Type = n.Type return nam - case OINDEX: + case ir.OINDEX: if n.Left.Type.IsSlice() { break } @@ -1045,7 +1046,7 @@ func stataddr(n *Node) *Node { return nil } -func (s *InitSchedule) initplan(n *Node) { +func (s *InitSchedule) initplan(n *ir.Node) { if s.initplans[n] != nil { return } @@ -1055,10 +1056,10 @@ func (s *InitSchedule) initplan(n *Node) { default: base.Fatalf("initplan") - case OARRAYLIT, OSLICELIT: + case ir.OARRAYLIT, ir.OSLICELIT: var k int64 for _, a := range n.List.Slice() { - if a.Op == OKEY { + if a.Op == ir.OKEY { k = indexconst(a.Left) if k < 0 { base.Fatalf("initplan arraylit: invalid index %v", a.Left) @@ -1069,9 +1070,9 @@ func (s *InitSchedule) initplan(n *Node) { k++ } - case OSTRUCTLIT: + case ir.OSTRUCTLIT: for _, a := range n.List.Slice() { - if a.Op != OSTRUCTKEY { + if a.Op != ir.OSTRUCTKEY { base.Fatalf("initplan structlit") } if a.Sym.IsBlank() { @@ -1080,9 +1081,9 @@ func (s *InitSchedule) initplan(n *Node) { s.addvalue(p, a.Xoffset, a.Left) } - case OMAPLIT: + case ir.OMAPLIT: for _, a := range n.List.Slice() { - if a.Op != OKEY { + if a.Op != ir.OKEY { base.Fatalf("initplan maplit") } s.addvalue(p, -1, a.Right) @@ -1090,7 +1091,7 @@ func (s *InitSchedule) initplan(n *Node) { } } -func (s *InitSchedule) addvalue(p *InitPlan, xoffset int64, n *Node) { +func (s *InitSchedule) addvalue(p *InitPlan, xoffset int64, n *ir.Node) { // special case: zero can be dropped entirely if isZero(n) { return @@ -1112,12 +1113,12 @@ func (s *InitSchedule) addvalue(p *InitPlan, xoffset int64, n *Node) { p.E = append(p.E, InitEntry{Xoffset: xoffset, Expr: n}) } -func isZero(n *Node) bool { +func isZero(n *ir.Node) bool { switch n.Op { - case ONIL: + case ir.ONIL: return true - case OLITERAL: + case ir.OLITERAL: switch u := n.Val(); u.Kind() { case constant.String: return constant.StringVal(u) == "" @@ -1127,9 +1128,9 @@ func isZero(n *Node) bool { return constant.Sign(u) == 0 } - case OARRAYLIT: + case ir.OARRAYLIT: for _, n1 := range n.List.Slice() { - if n1.Op == OKEY { + if n1.Op == ir.OKEY { n1 = n1.Right } if !isZero(n1) { @@ -1138,7 +1139,7 @@ func isZero(n *Node) bool { } return true - case OSTRUCTLIT: + case ir.OSTRUCTLIT: for _, n1 := range n.List.Slice() { if !isZero(n1.Left) { return false @@ -1150,24 +1151,24 @@ func isZero(n *Node) bool { return false } -func isvaluelit(n *Node) bool { - return n.Op == OARRAYLIT || n.Op == OSTRUCTLIT +func isvaluelit(n *ir.Node) bool { + return n.Op == ir.OARRAYLIT || n.Op == ir.OSTRUCTLIT } -func genAsStatic(as *Node) { +func genAsStatic(as *ir.Node) { if as.Left.Type == nil { base.Fatalf("genAsStatic as.Left not typechecked") } nam := stataddr(as.Left) - if nam == nil || (nam.Class() != PEXTERN && as.Left != nblank) { + if nam == nil || (nam.Class() != ir.PEXTERN && as.Left != ir.BlankNode) { base.Fatalf("genAsStatic: lhs %v", as.Left) } switch { - case as.Right.Op == OLITERAL: + case as.Right.Op == ir.OLITERAL: litsym(nam, as.Right, int(as.Right.Type.Width)) - case (as.Right.Op == ONAME || as.Right.Op == OMETHEXPR) && as.Right.Class() == PFUNC: + case (as.Right.Op == ir.ONAME || as.Right.Op == ir.OMETHEXPR) && as.Right.Class() == ir.PFUNC: pfuncsym(nam, as.Right) default: base.Fatalf("genAsStatic: rhs %v", as.Right) diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index e892a01da08bf..658ea28fbe228 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -16,6 +16,7 @@ import ( "bufio" "bytes" "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/ssa" "cmd/compile/internal/types" "cmd/internal/obj" @@ -39,7 +40,7 @@ const ssaDumpFile = "ssa.html" const maxOpenDefers = 8 // ssaDumpInlined holds all inlined functions when ssaDump contains a function name. -var ssaDumpInlined []*Node +var ssaDumpInlined []*ir.Node func initssaconfig() { types_ := ssa.NewTypes() @@ -50,16 +51,16 @@ func initssaconfig() { // Generate a few pointer types that are uncommon in the frontend but common in the backend. // Caching is disabled in the backend, so generating these here avoids allocations. - _ = types.NewPtr(types.Types[TINTER]) // *interface{} - _ = types.NewPtr(types.NewPtr(types.Types[TSTRING])) // **string - _ = types.NewPtr(types.NewSlice(types.Types[TINTER])) // *[]interface{} - _ = types.NewPtr(types.NewPtr(types.Bytetype)) // **byte - _ = types.NewPtr(types.NewSlice(types.Bytetype)) // *[]byte - _ = types.NewPtr(types.NewSlice(types.Types[TSTRING])) // *[]string - _ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[TUINT8]))) // ***uint8 - _ = types.NewPtr(types.Types[TINT16]) // *int16 - _ = types.NewPtr(types.Types[TINT64]) // *int64 - _ = types.NewPtr(types.Errortype) // *error + _ = types.NewPtr(types.Types[types.TINTER]) // *interface{} + _ = types.NewPtr(types.NewPtr(types.Types[types.TSTRING])) // **string + _ = types.NewPtr(types.NewSlice(types.Types[types.TINTER])) // *[]interface{} + _ = types.NewPtr(types.NewPtr(types.Bytetype)) // **byte + _ = types.NewPtr(types.NewSlice(types.Bytetype)) // *[]byte + _ = types.NewPtr(types.NewSlice(types.Types[types.TSTRING])) // *[]string + _ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[types.TUINT8]))) // ***uint8 + _ = types.NewPtr(types.Types[types.TINT16]) // *int16 + _ = types.NewPtr(types.Types[types.TINT64]) // *int64 + _ = types.NewPtr(types.Errortype) // *error types.NewPtrCacheEnabled = false ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, *types_, base.Ctxt, base.Flag.N == 0) ssaConfig.SoftFloat = thearch.SoftFloat @@ -185,9 +186,9 @@ func initssaconfig() { // function/method/interface call), where the receiver of a method call is // considered as the 0th parameter. This does not include the receiver of an // interface call. -func getParam(n *Node, i int) *types.Field { +func getParam(n *ir.Node, i int) *types.Field { t := n.Left.Type - if n.Op == OCALLMETH { + if n.Op == ir.OCALLMETH { if i == 0 { return t.Recv() } @@ -241,8 +242,8 @@ func dvarint(x *obj.LSym, off int, v int64) int { // - Size of the argument // - Offset of where argument should be placed in the args frame when making call func (s *state) emitOpenDeferInfo() { - x := base.Ctxt.Lookup(s.curfn.Func.lsym.Name + ".opendefer") - s.curfn.Func.lsym.Func().OpenCodedDeferInfo = x + x := base.Ctxt.Lookup(s.curfn.Func.LSym.Name + ".opendefer") + s.curfn.Func.LSym.Func().OpenCodedDeferInfo = x off := 0 // Compute maxargsize (max size of arguments for all defers) @@ -288,8 +289,8 @@ func (s *state) emitOpenDeferInfo() { // buildssa builds an SSA function for fn. // worker indicates which of the backend workers is doing the processing. -func buildssa(fn *Node, worker int) *ssa.Func { - name := fn.funcname() +func buildssa(fn *ir.Node, worker int) *ssa.Func { + name := ir.FuncName(fn) printssa := false if ssaDump != "" { // match either a simple name e.g. "(*Reader).Reset", or a package.name e.g. "compress/gzip.(*Reader).Reset" printssa = name == ssaDump || base.Ctxt.Pkgpath+"."+name == ssaDump @@ -297,9 +298,9 @@ func buildssa(fn *Node, worker int) *ssa.Func { var astBuf *bytes.Buffer if printssa { astBuf = &bytes.Buffer{} - fdumplist(astBuf, "buildssa-enter", fn.Func.Enter) - fdumplist(astBuf, "buildssa-body", fn.Nbody) - fdumplist(astBuf, "buildssa-exit", fn.Func.Exit) + ir.FDumpList(astBuf, "buildssa-enter", fn.Func.Enter) + ir.FDumpList(astBuf, "buildssa-body", fn.Nbody) + ir.FDumpList(astBuf, "buildssa-exit", fn.Func.Exit) if ssaDumpStdout { fmt.Println("generating SSA for", name) fmt.Print(astBuf.String()) @@ -311,7 +312,7 @@ func buildssa(fn *Node, worker int) *ssa.Func { defer s.popLine() s.hasdefer = fn.Func.HasDefer() - if fn.Func.Pragma&CgoUnsafeArgs != 0 { + if fn.Func.Pragma&ir.CgoUnsafeArgs != 0 { s.cgoUnsafeArgs = true } @@ -330,7 +331,7 @@ func buildssa(fn *Node, worker int) *ssa.Func { s.f.Name = name s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH") s.f.PrintOrHtmlSSA = printssa - if fn.Func.Pragma&Nosplit != 0 { + if fn.Func.Pragma&ir.Nosplit != 0 { s.f.NoSplit = true } s.panics = map[funcLine]*ssa.Block{} @@ -355,8 +356,8 @@ func buildssa(fn *Node, worker int) *ssa.Func { // Allocate starting values s.labels = map[string]*ssaLabel{} - s.labeledNodes = map[*Node]*ssaLabel{} - s.fwdVars = map[*Node]*ssa.Value{} + s.labeledNodes = map[*ir.Node]*ssaLabel{} + s.fwdVars = map[*ir.Node]*ssa.Value{} s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem) s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.Func.OpenCodedDeferDisallowed() @@ -376,7 +377,7 @@ func buildssa(fn *Node, worker int) *ssa.Func { s.hasOpenDefers = false } if s.hasOpenDefers && - s.curfn.Func.numReturns*s.curfn.Func.numDefers > 15 { + s.curfn.Func.NumReturns*s.curfn.Func.NumDefers > 15 { // Since we are generating defer calls at every exit for // open-coded defers, skip doing open-coded defers if there are // too many returns (especially if there are multiple defers). @@ -385,8 +386,8 @@ func buildssa(fn *Node, worker int) *ssa.Func { s.hasOpenDefers = false } - s.sp = s.entryNewValue0(ssa.OpSP, types.Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead - s.sb = s.entryNewValue0(ssa.OpSB, types.Types[TUINTPTR]) + s.sp = s.entryNewValue0(ssa.OpSP, types.Types[types.TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead + s.sb = s.entryNewValue0(ssa.OpSB, types.Types[types.TUINTPTR]) s.startBlock(s.f.Entry) s.vars[memVar] = s.startmem @@ -394,13 +395,13 @@ func buildssa(fn *Node, worker int) *ssa.Func { // Create the deferBits variable and stack slot. deferBits is a // bitmask showing which of the open-coded defers in this function // have been activated. - deferBitsTemp := tempAt(src.NoXPos, s.curfn, types.Types[TUINT8]) + deferBitsTemp := tempAt(src.NoXPos, s.curfn, types.Types[types.TUINT8]) s.deferBitsTemp = deferBitsTemp // For this value, AuxInt is initialized to zero by default - startDeferBits := s.entryNewValue0(ssa.OpConst8, types.Types[TUINT8]) + startDeferBits := s.entryNewValue0(ssa.OpConst8, types.Types[types.TUINT8]) s.vars[deferBitsVar] = startDeferBits s.deferBitsAddr = s.addr(deferBitsTemp) - s.store(types.Types[TUINT8], s.deferBitsAddr, startDeferBits) + s.store(types.Types[types.TUINT8], s.deferBitsAddr, startDeferBits) // Make sure that the deferBits stack slot is kept alive (for use // by panics) and stores to deferBits are not eliminated, even if // all checking code on deferBits in the function exit can be @@ -410,15 +411,15 @@ func buildssa(fn *Node, worker int) *ssa.Func { } // Generate addresses of local declarations - s.decladdrs = map[*Node]*ssa.Value{} + s.decladdrs = map[*ir.Node]*ssa.Value{} var args []ssa.Param var results []ssa.Param for _, n := range fn.Func.Dcl { switch n.Class() { - case PPARAM: + case ir.PPARAM: s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type), n, s.sp, s.startmem) args = append(args, ssa.Param{Type: n.Type, Offset: int32(n.Xoffset)}) - case PPARAMOUT: + case ir.PPARAMOUT: s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type), n, s.sp, s.startmem) results = append(results, ssa.Param{Type: n.Type, Offset: int32(n.Xoffset)}) if s.canSSA(n) { @@ -427,12 +428,12 @@ func buildssa(fn *Node, worker int) *ssa.Func { // the function. s.returns = append(s.returns, n) } - case PAUTO: + case ir.PAUTO: // processed at each use, to prevent Addr coming // before the decl. - case PAUTOHEAP: + case ir.PAUTOHEAP: // moved to heap - already handled by frontend - case PFUNC: + case ir.PFUNC: // local function - already handled by frontend default: s.Fatalf("local variable with class %v unimplemented", n.Class()) @@ -441,7 +442,7 @@ func buildssa(fn *Node, worker int) *ssa.Func { // Populate SSAable arguments. for _, n := range fn.Func.Dcl { - if n.Class() == PPARAM && s.canSSA(n) { + if n.Class() == ir.PPARAM && s.canSSA(n) { v := s.newValue0A(ssa.OpArg, n.Type, n) s.vars[n] = v s.addNamedValue(n, v) // This helps with debugging information, not needed for compilation itself. @@ -477,7 +478,7 @@ func buildssa(fn *Node, worker int) *ssa.Func { return s.f } -func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *Node) { +func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *ir.Node) { // Read sources of target function fn. fname := base.Ctxt.PosTable.Pos(fn.Pos).Filename() targetFn, err := readFuncLines(fname, fn.Pos.Line(), fn.Func.Endlineno.Line()) @@ -565,24 +566,24 @@ func (s *state) updateUnsetPredPos(b *ssa.Block) { // Information about each open-coded defer. type openDeferInfo struct { // The ODEFER node representing the function call of the defer - n *Node + n *ir.Node // If defer call is closure call, the address of the argtmp where the // closure is stored. closure *ssa.Value // The node representing the argtmp where the closure is stored - used for // function, method, or interface call, to store a closure that panic // processing can use for this defer. - closureNode *Node + closureNode *ir.Node // If defer call is interface call, the address of the argtmp where the // receiver is stored rcvr *ssa.Value // The node representing the argtmp where the receiver is stored - rcvrNode *Node + rcvrNode *ir.Node // The addresses of the argtmps where the evaluated arguments of the defer // function call are stored. argVals []*ssa.Value // The nodes representing the argtmps where the args of the defer are stored - argNodes []*Node + argNodes []*ir.Node } type state struct { @@ -593,11 +594,11 @@ type state struct { f *ssa.Func // Node for function - curfn *Node + curfn *ir.Node // labels and labeled control flow nodes (OFOR, OFORUNTIL, OSWITCH, OSELECT) in f labels map[string]*ssaLabel - labeledNodes map[*Node]*ssaLabel + labeledNodes map[*ir.Node]*ssaLabel // unlabeled break and continue statement tracking breakTo *ssa.Block // current target for plain break statement @@ -609,18 +610,18 @@ type state struct { // variable assignments in the current block (map from variable symbol to ssa value) // *Node is the unique identifier (an ONAME Node) for the variable. // TODO: keep a single varnum map, then make all of these maps slices instead? - vars map[*Node]*ssa.Value + vars map[*ir.Node]*ssa.Value // fwdVars are variables that are used before they are defined in the current block. // This map exists just to coalesce multiple references into a single FwdRef op. // *Node is the unique identifier (an ONAME Node) for the variable. - fwdVars map[*Node]*ssa.Value + fwdVars map[*ir.Node]*ssa.Value // all defined variables at the end of each block. Indexed by block ID. - defvars []map[*Node]*ssa.Value + defvars []map[*ir.Node]*ssa.Value // addresses of PPARAM and PPARAMOUT variables. - decladdrs map[*Node]*ssa.Value + decladdrs map[*ir.Node]*ssa.Value // starting values. Memory, stack pointer, and globals pointer startmem *ssa.Value @@ -628,7 +629,7 @@ type state struct { sb *ssa.Value // value representing address of where deferBits autotmp is stored deferBitsAddr *ssa.Value - deferBitsTemp *Node + deferBitsTemp *ir.Node // line number stack. The current line number is top of stack line []src.XPos @@ -640,7 +641,7 @@ type state struct { panics map[funcLine]*ssa.Block // list of PPARAMOUT (return) variables. - returns []*Node + returns []*ir.Node cgoUnsafeArgs bool hasdefer bool // whether the function contains a defer statement @@ -692,8 +693,8 @@ func (s *state) Fatalf(msg string, args ...interface{}) { func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) } func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() } -func ssaMarker(name string) *Node { - return newname(&types.Sym{Name: name}) +func ssaMarker(name string) *ir.Node { + return NewName(&types.Sym{Name: name}) } var ( @@ -716,7 +717,7 @@ func (s *state) startBlock(b *ssa.Block) { s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock) } s.curBlock = b - s.vars = map[*Node]*ssa.Value{} + s.vars = map[*ir.Node]*ssa.Value{} for n := range s.fwdVars { delete(s.fwdVars, n) } @@ -920,7 +921,7 @@ func (s *state) constEmptyString(t *types.Type) *ssa.Value { return s.f.ConstEmptyString(t) } func (s *state) constBool(c bool) *ssa.Value { - return s.f.ConstBool(types.Types[TBOOL], c) + return s.f.ConstBool(types.Types[types.TBOOL], c) } func (s *state) constInt8(t *types.Type, c int8) *ssa.Value { return s.f.ConstInt8(t, c) @@ -1017,7 +1018,7 @@ func (s *state) instrument(t *types.Type, addr *ssa.Value, wr bool) { args := []*ssa.Value{addr} if needWidth { - args = append(args, s.constInt(types.Types[TUINTPTR], w)) + args = append(args, s.constInt(types.Types[types.TUINTPTR], w)) } s.rtcall(fn, true, nil, args...) } @@ -1051,15 +1052,15 @@ func (s *state) move(t *types.Type, dst, src *ssa.Value) { } // stmtList converts the statement list n to SSA and adds it to s. -func (s *state) stmtList(l Nodes) { +func (s *state) stmtList(l ir.Nodes) { for _, n := range l.Slice() { s.stmt(n) } } // stmt converts the statement n to SSA and adds it to s. -func (s *state) stmt(n *Node) { - if !(n.Op == OVARKILL || n.Op == OVARLIVE || n.Op == OVARDEF) { +func (s *state) stmt(n *ir.Node) { + if !(n.Op == ir.OVARKILL || n.Op == ir.OVARLIVE || n.Op == ir.OVARDEF) { // OVARKILL, OVARLIVE, and OVARDEF are invisible to the programmer, so we don't use their line numbers to avoid confusion in debugging. s.pushLine(n.Pos) defer s.popLine() @@ -1067,30 +1068,30 @@ func (s *state) stmt(n *Node) { // If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere), // then this code is dead. Stop here. - if s.curBlock == nil && n.Op != OLABEL { + if s.curBlock == nil && n.Op != ir.OLABEL { return } s.stmtList(n.Ninit) switch n.Op { - case OBLOCK: + case ir.OBLOCK: s.stmtList(n.List) // No-ops - case OEMPTY, ODCLCONST, ODCLTYPE, OFALL: + case ir.OEMPTY, ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL: // Expression statements - case OCALLFUNC: + case ir.OCALLFUNC: if isIntrinsicCall(n) { s.intrinsicCall(n) return } fallthrough - case OCALLMETH, OCALLINTER: + case ir.OCALLMETH, ir.OCALLINTER: s.callResult(n, callNormal) - if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class() == PFUNC { + if n.Op == ir.OCALLFUNC && n.Left.Op == ir.ONAME && n.Left.Class() == ir.PFUNC { if fn := n.Left.Sym.Name; base.Flag.CompilingRuntime && fn == "throw" || n.Left.Sym.Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") { m := s.mem() @@ -1102,7 +1103,7 @@ func (s *state) stmt(n *Node) { // go through SSA. } } - case ODEFER: + case ir.ODEFER: if base.Debug.Defer > 0 { var defertype string if s.hasOpenDefers { @@ -1123,10 +1124,10 @@ func (s *state) stmt(n *Node) { } s.callResult(n.Left, d) } - case OGO: + case ir.OGO: s.callResult(n.Left, callGo) - case OAS2DOTTYPE: + case ir.OAS2DOTTYPE: res, resok := s.dottype(n.Right, true) deref := false if !canSSAType(n.Right.Type) { @@ -1147,7 +1148,7 @@ func (s *state) stmt(n *Node) { s.assign(n.List.Second(), resok, false, 0) return - case OAS2FUNC: + case ir.OAS2FUNC: // We come here only when it is an intrinsic call returning two values. if !isIntrinsicCall(n.Right) { s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Right) @@ -1159,17 +1160,17 @@ func (s *state) stmt(n *Node) { s.assign(n.List.Second(), v2, false, 0) return - case ODCL: - if n.Left.Class() == PAUTOHEAP { + case ir.ODCL: + if n.Left.Class() == ir.PAUTOHEAP { s.Fatalf("DCL %v", n) } - case OLABEL: + case ir.OLABEL: sym := n.Sym lab := s.label(sym) // Associate label with its control flow node, if any - if ctl := n.labeledControl(); ctl != nil { + if ctl := labeledControl(n); ctl != nil { s.labeledNodes[ctl] = lab } @@ -1186,7 +1187,7 @@ func (s *state) stmt(n *Node) { } s.startBlock(lab.target) - case OGOTO: + case ir.OGOTO: sym := n.Sym lab := s.label(sym) @@ -1198,8 +1199,8 @@ func (s *state) stmt(n *Node) { b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block. b.AddEdgeTo(lab.target) - case OAS: - if n.Left == n.Right && n.Left.Op == ONAME { + case ir.OAS: + if n.Left == n.Right && n.Left.Op == ir.ONAME { // An x=x assignment. No point in doing anything // here. In addition, skipping this assignment // prevents generating: @@ -1214,7 +1215,7 @@ func (s *state) stmt(n *Node) { rhs := n.Right if rhs != nil { switch rhs.Op { - case OSTRUCTLIT, OARRAYLIT, OSLICELIT: + case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT: // All literals with nonzero fields have already been // rewritten during walk. Any that remain are just T{} // or equivalents. Use the zero value. @@ -1222,7 +1223,7 @@ func (s *state) stmt(n *Node) { s.Fatalf("literal with nonzero value in SSA: %v", rhs) } rhs = nil - case OAPPEND: + case ir.OAPPEND: // Check whether we're writing the result of an append back to the same slice. // If so, we handle it specially to avoid write barriers on the fast // (non-growth) path. @@ -1246,7 +1247,7 @@ func (s *state) stmt(n *Node) { } } - if n.Left.isBlank() { + if ir.IsBlank(n.Left) { // _ = rhs // Just evaluate rhs for side-effects. if rhs != nil { @@ -1279,11 +1280,11 @@ func (s *state) stmt(n *Node) { } var skip skipMask - if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) { + if rhs != nil && (rhs.Op == ir.OSLICE || rhs.Op == ir.OSLICE3 || rhs.Op == ir.OSLICESTR) && samesafeexpr(rhs.Left, n.Left) { // We're assigning a slicing operation back to its source. // Don't write back fields we aren't changing. See issue #14855. i, j, k := rhs.SliceBounds() - if i != nil && (i.Op == OLITERAL && i.Val().Kind() == constant.Int && i.Int64Val() == 0) { + if i != nil && (i.Op == ir.OLITERAL && i.Val().Kind() == constant.Int && i.Int64Val() == 0) { // [0:...] is the same as [:...] i = nil } @@ -1310,8 +1311,8 @@ func (s *state) stmt(n *Node) { s.assign(n.Left, r, deref, skip) - case OIF: - if Isconst(n.Left, constant.Bool) { + case ir.OIF: + if ir.IsConst(n.Left, constant.Bool) { s.stmtList(n.Left.Ninit) if n.Left.BoolVal() { s.stmtList(n.Nbody) @@ -1356,25 +1357,25 @@ func (s *state) stmt(n *Node) { } s.startBlock(bEnd) - case ORETURN: + case ir.ORETURN: s.stmtList(n.List) b := s.exit() b.Pos = s.lastPos.WithIsStmt() - case ORETJMP: + case ir.ORETJMP: s.stmtList(n.List) b := s.exit() b.Kind = ssa.BlockRetJmp // override BlockRet b.Aux = n.Sym.Linksym() - case OCONTINUE, OBREAK: + case ir.OCONTINUE, ir.OBREAK: var to *ssa.Block if n.Sym == nil { // plain break/continue switch n.Op { - case OCONTINUE: + case ir.OCONTINUE: to = s.continueTo - case OBREAK: + case ir.OBREAK: to = s.breakTo } } else { @@ -1382,9 +1383,9 @@ func (s *state) stmt(n *Node) { sym := n.Sym lab := s.label(sym) switch n.Op { - case OCONTINUE: + case ir.OCONTINUE: to = lab.continueTarget - case OBREAK: + case ir.OBREAK: to = lab.breakTarget } } @@ -1393,7 +1394,7 @@ func (s *state) stmt(n *Node) { b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block. b.AddEdgeTo(to) - case OFOR, OFORUNTIL: + case ir.OFOR, ir.OFORUNTIL: // OFOR: for Ninit; Left; Right { Nbody } // cond (Left); body (Nbody); incr (Right) // @@ -1409,7 +1410,7 @@ func (s *state) stmt(n *Node) { // first, jump to condition test (OFOR) or body (OFORUNTIL) b := s.endBlock() - if n.Op == OFOR { + if n.Op == ir.OFOR { b.AddEdgeTo(bCond) // generate code to test condition s.startBlock(bCond) @@ -1459,12 +1460,12 @@ func (s *state) stmt(n *Node) { if n.Right != nil { s.stmt(n.Right) } - if n.Op == OFOR { + if n.Op == ir.OFOR { if b := s.endBlock(); b != nil { b.AddEdgeTo(bCond) // It can happen that bIncr ends in a block containing only VARKILL, // and that muddles the debugging experience. - if n.Op != OFORUNTIL && b.Pos == src.NoXPos { + if n.Op != ir.OFORUNTIL && b.Pos == src.NoXPos { b.Pos = bCond.Pos } } @@ -1481,7 +1482,7 @@ func (s *state) stmt(n *Node) { s.startBlock(bEnd) - case OSWITCH, OSELECT: + case ir.OSWITCH, ir.OSELECT: // These have been mostly rewritten by the front end into their Nbody fields. // Our main task is to correctly hook up any break statements. bEnd := s.f.NewBlock(ssa.BlockPlain) @@ -1512,11 +1513,11 @@ func (s *state) stmt(n *Node) { } s.startBlock(bEnd) - case OVARDEF: + case ir.OVARDEF: if !s.canSSA(n.Left) { s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.Left, s.mem(), false) } - case OVARKILL: + case ir.OVARKILL: // Insert a varkill op to record that a variable is no longer live. // We only care about liveness info at call sites, so putting the // varkill in the store chain is enough to keep it correctly ordered @@ -1525,23 +1526,23 @@ func (s *state) stmt(n *Node) { s.vars[memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.Left, s.mem(), false) } - case OVARLIVE: + case ir.OVARLIVE: // Insert a varlive op to record that a variable is still live. if !n.Left.Name.Addrtaken() { s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left) } switch n.Left.Class() { - case PAUTO, PPARAM, PPARAMOUT: + case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT: default: s.Fatalf("VARLIVE variable %v must be Auto or Arg", n.Left) } s.vars[memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, n.Left, s.mem()) - case OCHECKNIL: + case ir.OCHECKNIL: p := s.expr(n.Left) s.nilCheck(p) - case OINLMARK: + case ir.OINLMARK: s.newValue1I(ssa.OpInlMark, types.TypeVoid, n.Xoffset, s.mem()) default: @@ -1600,180 +1601,180 @@ func (s *state) exit() *ssa.Block { } type opAndType struct { - op Op + op ir.Op etype types.EType } var opToSSA = map[opAndType]ssa.Op{ - opAndType{OADD, TINT8}: ssa.OpAdd8, - opAndType{OADD, TUINT8}: ssa.OpAdd8, - opAndType{OADD, TINT16}: ssa.OpAdd16, - opAndType{OADD, TUINT16}: ssa.OpAdd16, - opAndType{OADD, TINT32}: ssa.OpAdd32, - opAndType{OADD, TUINT32}: ssa.OpAdd32, - opAndType{OADD, TINT64}: ssa.OpAdd64, - opAndType{OADD, TUINT64}: ssa.OpAdd64, - opAndType{OADD, TFLOAT32}: ssa.OpAdd32F, - opAndType{OADD, TFLOAT64}: ssa.OpAdd64F, - - opAndType{OSUB, TINT8}: ssa.OpSub8, - opAndType{OSUB, TUINT8}: ssa.OpSub8, - opAndType{OSUB, TINT16}: ssa.OpSub16, - opAndType{OSUB, TUINT16}: ssa.OpSub16, - opAndType{OSUB, TINT32}: ssa.OpSub32, - opAndType{OSUB, TUINT32}: ssa.OpSub32, - opAndType{OSUB, TINT64}: ssa.OpSub64, - opAndType{OSUB, TUINT64}: ssa.OpSub64, - opAndType{OSUB, TFLOAT32}: ssa.OpSub32F, - opAndType{OSUB, TFLOAT64}: ssa.OpSub64F, - - opAndType{ONOT, TBOOL}: ssa.OpNot, - - opAndType{ONEG, TINT8}: ssa.OpNeg8, - opAndType{ONEG, TUINT8}: ssa.OpNeg8, - opAndType{ONEG, TINT16}: ssa.OpNeg16, - opAndType{ONEG, TUINT16}: ssa.OpNeg16, - opAndType{ONEG, TINT32}: ssa.OpNeg32, - opAndType{ONEG, TUINT32}: ssa.OpNeg32, - opAndType{ONEG, TINT64}: ssa.OpNeg64, - opAndType{ONEG, TUINT64}: ssa.OpNeg64, - opAndType{ONEG, TFLOAT32}: ssa.OpNeg32F, - opAndType{ONEG, TFLOAT64}: ssa.OpNeg64F, - - opAndType{OBITNOT, TINT8}: ssa.OpCom8, - opAndType{OBITNOT, TUINT8}: ssa.OpCom8, - opAndType{OBITNOT, TINT16}: ssa.OpCom16, - opAndType{OBITNOT, TUINT16}: ssa.OpCom16, - opAndType{OBITNOT, TINT32}: ssa.OpCom32, - opAndType{OBITNOT, TUINT32}: ssa.OpCom32, - opAndType{OBITNOT, TINT64}: ssa.OpCom64, - opAndType{OBITNOT, TUINT64}: ssa.OpCom64, - - opAndType{OIMAG, TCOMPLEX64}: ssa.OpComplexImag, - opAndType{OIMAG, TCOMPLEX128}: ssa.OpComplexImag, - opAndType{OREAL, TCOMPLEX64}: ssa.OpComplexReal, - opAndType{OREAL, TCOMPLEX128}: ssa.OpComplexReal, - - opAndType{OMUL, TINT8}: ssa.OpMul8, - opAndType{OMUL, TUINT8}: ssa.OpMul8, - opAndType{OMUL, TINT16}: ssa.OpMul16, - opAndType{OMUL, TUINT16}: ssa.OpMul16, - opAndType{OMUL, TINT32}: ssa.OpMul32, - opAndType{OMUL, TUINT32}: ssa.OpMul32, - opAndType{OMUL, TINT64}: ssa.OpMul64, - opAndType{OMUL, TUINT64}: ssa.OpMul64, - opAndType{OMUL, TFLOAT32}: ssa.OpMul32F, - opAndType{OMUL, TFLOAT64}: ssa.OpMul64F, - - opAndType{ODIV, TFLOAT32}: ssa.OpDiv32F, - opAndType{ODIV, TFLOAT64}: ssa.OpDiv64F, - - opAndType{ODIV, TINT8}: ssa.OpDiv8, - opAndType{ODIV, TUINT8}: ssa.OpDiv8u, - opAndType{ODIV, TINT16}: ssa.OpDiv16, - opAndType{ODIV, TUINT16}: ssa.OpDiv16u, - opAndType{ODIV, TINT32}: ssa.OpDiv32, - opAndType{ODIV, TUINT32}: ssa.OpDiv32u, - opAndType{ODIV, TINT64}: ssa.OpDiv64, - opAndType{ODIV, TUINT64}: ssa.OpDiv64u, - - opAndType{OMOD, TINT8}: ssa.OpMod8, - opAndType{OMOD, TUINT8}: ssa.OpMod8u, - opAndType{OMOD, TINT16}: ssa.OpMod16, - opAndType{OMOD, TUINT16}: ssa.OpMod16u, - opAndType{OMOD, TINT32}: ssa.OpMod32, - opAndType{OMOD, TUINT32}: ssa.OpMod32u, - opAndType{OMOD, TINT64}: ssa.OpMod64, - opAndType{OMOD, TUINT64}: ssa.OpMod64u, - - opAndType{OAND, TINT8}: ssa.OpAnd8, - opAndType{OAND, TUINT8}: ssa.OpAnd8, - opAndType{OAND, TINT16}: ssa.OpAnd16, - opAndType{OAND, TUINT16}: ssa.OpAnd16, - opAndType{OAND, TINT32}: ssa.OpAnd32, - opAndType{OAND, TUINT32}: ssa.OpAnd32, - opAndType{OAND, TINT64}: ssa.OpAnd64, - opAndType{OAND, TUINT64}: ssa.OpAnd64, - - opAndType{OOR, TINT8}: ssa.OpOr8, - opAndType{OOR, TUINT8}: ssa.OpOr8, - opAndType{OOR, TINT16}: ssa.OpOr16, - opAndType{OOR, TUINT16}: ssa.OpOr16, - opAndType{OOR, TINT32}: ssa.OpOr32, - opAndType{OOR, TUINT32}: ssa.OpOr32, - opAndType{OOR, TINT64}: ssa.OpOr64, - opAndType{OOR, TUINT64}: ssa.OpOr64, - - opAndType{OXOR, TINT8}: ssa.OpXor8, - opAndType{OXOR, TUINT8}: ssa.OpXor8, - opAndType{OXOR, TINT16}: ssa.OpXor16, - opAndType{OXOR, TUINT16}: ssa.OpXor16, - opAndType{OXOR, TINT32}: ssa.OpXor32, - opAndType{OXOR, TUINT32}: ssa.OpXor32, - opAndType{OXOR, TINT64}: ssa.OpXor64, - opAndType{OXOR, TUINT64}: ssa.OpXor64, - - opAndType{OEQ, TBOOL}: ssa.OpEqB, - opAndType{OEQ, TINT8}: ssa.OpEq8, - opAndType{OEQ, TUINT8}: ssa.OpEq8, - opAndType{OEQ, TINT16}: ssa.OpEq16, - opAndType{OEQ, TUINT16}: ssa.OpEq16, - opAndType{OEQ, TINT32}: ssa.OpEq32, - opAndType{OEQ, TUINT32}: ssa.OpEq32, - opAndType{OEQ, TINT64}: ssa.OpEq64, - opAndType{OEQ, TUINT64}: ssa.OpEq64, - opAndType{OEQ, TINTER}: ssa.OpEqInter, - opAndType{OEQ, TSLICE}: ssa.OpEqSlice, - opAndType{OEQ, TFUNC}: ssa.OpEqPtr, - opAndType{OEQ, TMAP}: ssa.OpEqPtr, - opAndType{OEQ, TCHAN}: ssa.OpEqPtr, - opAndType{OEQ, TPTR}: ssa.OpEqPtr, - opAndType{OEQ, TUINTPTR}: ssa.OpEqPtr, - opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr, - opAndType{OEQ, TFLOAT64}: ssa.OpEq64F, - opAndType{OEQ, TFLOAT32}: ssa.OpEq32F, - - opAndType{ONE, TBOOL}: ssa.OpNeqB, - opAndType{ONE, TINT8}: ssa.OpNeq8, - opAndType{ONE, TUINT8}: ssa.OpNeq8, - opAndType{ONE, TINT16}: ssa.OpNeq16, - opAndType{ONE, TUINT16}: ssa.OpNeq16, - opAndType{ONE, TINT32}: ssa.OpNeq32, - opAndType{ONE, TUINT32}: ssa.OpNeq32, - opAndType{ONE, TINT64}: ssa.OpNeq64, - opAndType{ONE, TUINT64}: ssa.OpNeq64, - opAndType{ONE, TINTER}: ssa.OpNeqInter, - opAndType{ONE, TSLICE}: ssa.OpNeqSlice, - opAndType{ONE, TFUNC}: ssa.OpNeqPtr, - opAndType{ONE, TMAP}: ssa.OpNeqPtr, - opAndType{ONE, TCHAN}: ssa.OpNeqPtr, - opAndType{ONE, TPTR}: ssa.OpNeqPtr, - opAndType{ONE, TUINTPTR}: ssa.OpNeqPtr, - opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr, - opAndType{ONE, TFLOAT64}: ssa.OpNeq64F, - opAndType{ONE, TFLOAT32}: ssa.OpNeq32F, - - opAndType{OLT, TINT8}: ssa.OpLess8, - opAndType{OLT, TUINT8}: ssa.OpLess8U, - opAndType{OLT, TINT16}: ssa.OpLess16, - opAndType{OLT, TUINT16}: ssa.OpLess16U, - opAndType{OLT, TINT32}: ssa.OpLess32, - opAndType{OLT, TUINT32}: ssa.OpLess32U, - opAndType{OLT, TINT64}: ssa.OpLess64, - opAndType{OLT, TUINT64}: ssa.OpLess64U, - opAndType{OLT, TFLOAT64}: ssa.OpLess64F, - opAndType{OLT, TFLOAT32}: ssa.OpLess32F, - - opAndType{OLE, TINT8}: ssa.OpLeq8, - opAndType{OLE, TUINT8}: ssa.OpLeq8U, - opAndType{OLE, TINT16}: ssa.OpLeq16, - opAndType{OLE, TUINT16}: ssa.OpLeq16U, - opAndType{OLE, TINT32}: ssa.OpLeq32, - opAndType{OLE, TUINT32}: ssa.OpLeq32U, - opAndType{OLE, TINT64}: ssa.OpLeq64, - opAndType{OLE, TUINT64}: ssa.OpLeq64U, - opAndType{OLE, TFLOAT64}: ssa.OpLeq64F, - opAndType{OLE, TFLOAT32}: ssa.OpLeq32F, + opAndType{ir.OADD, types.TINT8}: ssa.OpAdd8, + opAndType{ir.OADD, types.TUINT8}: ssa.OpAdd8, + opAndType{ir.OADD, types.TINT16}: ssa.OpAdd16, + opAndType{ir.OADD, types.TUINT16}: ssa.OpAdd16, + opAndType{ir.OADD, types.TINT32}: ssa.OpAdd32, + opAndType{ir.OADD, types.TUINT32}: ssa.OpAdd32, + opAndType{ir.OADD, types.TINT64}: ssa.OpAdd64, + opAndType{ir.OADD, types.TUINT64}: ssa.OpAdd64, + opAndType{ir.OADD, types.TFLOAT32}: ssa.OpAdd32F, + opAndType{ir.OADD, types.TFLOAT64}: ssa.OpAdd64F, + + opAndType{ir.OSUB, types.TINT8}: ssa.OpSub8, + opAndType{ir.OSUB, types.TUINT8}: ssa.OpSub8, + opAndType{ir.OSUB, types.TINT16}: ssa.OpSub16, + opAndType{ir.OSUB, types.TUINT16}: ssa.OpSub16, + opAndType{ir.OSUB, types.TINT32}: ssa.OpSub32, + opAndType{ir.OSUB, types.TUINT32}: ssa.OpSub32, + opAndType{ir.OSUB, types.TINT64}: ssa.OpSub64, + opAndType{ir.OSUB, types.TUINT64}: ssa.OpSub64, + opAndType{ir.OSUB, types.TFLOAT32}: ssa.OpSub32F, + opAndType{ir.OSUB, types.TFLOAT64}: ssa.OpSub64F, + + opAndType{ir.ONOT, types.TBOOL}: ssa.OpNot, + + opAndType{ir.ONEG, types.TINT8}: ssa.OpNeg8, + opAndType{ir.ONEG, types.TUINT8}: ssa.OpNeg8, + opAndType{ir.ONEG, types.TINT16}: ssa.OpNeg16, + opAndType{ir.ONEG, types.TUINT16}: ssa.OpNeg16, + opAndType{ir.ONEG, types.TINT32}: ssa.OpNeg32, + opAndType{ir.ONEG, types.TUINT32}: ssa.OpNeg32, + opAndType{ir.ONEG, types.TINT64}: ssa.OpNeg64, + opAndType{ir.ONEG, types.TUINT64}: ssa.OpNeg64, + opAndType{ir.ONEG, types.TFLOAT32}: ssa.OpNeg32F, + opAndType{ir.ONEG, types.TFLOAT64}: ssa.OpNeg64F, + + opAndType{ir.OBITNOT, types.TINT8}: ssa.OpCom8, + opAndType{ir.OBITNOT, types.TUINT8}: ssa.OpCom8, + opAndType{ir.OBITNOT, types.TINT16}: ssa.OpCom16, + opAndType{ir.OBITNOT, types.TUINT16}: ssa.OpCom16, + opAndType{ir.OBITNOT, types.TINT32}: ssa.OpCom32, + opAndType{ir.OBITNOT, types.TUINT32}: ssa.OpCom32, + opAndType{ir.OBITNOT, types.TINT64}: ssa.OpCom64, + opAndType{ir.OBITNOT, types.TUINT64}: ssa.OpCom64, + + opAndType{ir.OIMAG, types.TCOMPLEX64}: ssa.OpComplexImag, + opAndType{ir.OIMAG, types.TCOMPLEX128}: ssa.OpComplexImag, + opAndType{ir.OREAL, types.TCOMPLEX64}: ssa.OpComplexReal, + opAndType{ir.OREAL, types.TCOMPLEX128}: ssa.OpComplexReal, + + opAndType{ir.OMUL, types.TINT8}: ssa.OpMul8, + opAndType{ir.OMUL, types.TUINT8}: ssa.OpMul8, + opAndType{ir.OMUL, types.TINT16}: ssa.OpMul16, + opAndType{ir.OMUL, types.TUINT16}: ssa.OpMul16, + opAndType{ir.OMUL, types.TINT32}: ssa.OpMul32, + opAndType{ir.OMUL, types.TUINT32}: ssa.OpMul32, + opAndType{ir.OMUL, types.TINT64}: ssa.OpMul64, + opAndType{ir.OMUL, types.TUINT64}: ssa.OpMul64, + opAndType{ir.OMUL, types.TFLOAT32}: ssa.OpMul32F, + opAndType{ir.OMUL, types.TFLOAT64}: ssa.OpMul64F, + + opAndType{ir.ODIV, types.TFLOAT32}: ssa.OpDiv32F, + opAndType{ir.ODIV, types.TFLOAT64}: ssa.OpDiv64F, + + opAndType{ir.ODIV, types.TINT8}: ssa.OpDiv8, + opAndType{ir.ODIV, types.TUINT8}: ssa.OpDiv8u, + opAndType{ir.ODIV, types.TINT16}: ssa.OpDiv16, + opAndType{ir.ODIV, types.TUINT16}: ssa.OpDiv16u, + opAndType{ir.ODIV, types.TINT32}: ssa.OpDiv32, + opAndType{ir.ODIV, types.TUINT32}: ssa.OpDiv32u, + opAndType{ir.ODIV, types.TINT64}: ssa.OpDiv64, + opAndType{ir.ODIV, types.TUINT64}: ssa.OpDiv64u, + + opAndType{ir.OMOD, types.TINT8}: ssa.OpMod8, + opAndType{ir.OMOD, types.TUINT8}: ssa.OpMod8u, + opAndType{ir.OMOD, types.TINT16}: ssa.OpMod16, + opAndType{ir.OMOD, types.TUINT16}: ssa.OpMod16u, + opAndType{ir.OMOD, types.TINT32}: ssa.OpMod32, + opAndType{ir.OMOD, types.TUINT32}: ssa.OpMod32u, + opAndType{ir.OMOD, types.TINT64}: ssa.OpMod64, + opAndType{ir.OMOD, types.TUINT64}: ssa.OpMod64u, + + opAndType{ir.OAND, types.TINT8}: ssa.OpAnd8, + opAndType{ir.OAND, types.TUINT8}: ssa.OpAnd8, + opAndType{ir.OAND, types.TINT16}: ssa.OpAnd16, + opAndType{ir.OAND, types.TUINT16}: ssa.OpAnd16, + opAndType{ir.OAND, types.TINT32}: ssa.OpAnd32, + opAndType{ir.OAND, types.TUINT32}: ssa.OpAnd32, + opAndType{ir.OAND, types.TINT64}: ssa.OpAnd64, + opAndType{ir.OAND, types.TUINT64}: ssa.OpAnd64, + + opAndType{ir.OOR, types.TINT8}: ssa.OpOr8, + opAndType{ir.OOR, types.TUINT8}: ssa.OpOr8, + opAndType{ir.OOR, types.TINT16}: ssa.OpOr16, + opAndType{ir.OOR, types.TUINT16}: ssa.OpOr16, + opAndType{ir.OOR, types.TINT32}: ssa.OpOr32, + opAndType{ir.OOR, types.TUINT32}: ssa.OpOr32, + opAndType{ir.OOR, types.TINT64}: ssa.OpOr64, + opAndType{ir.OOR, types.TUINT64}: ssa.OpOr64, + + opAndType{ir.OXOR, types.TINT8}: ssa.OpXor8, + opAndType{ir.OXOR, types.TUINT8}: ssa.OpXor8, + opAndType{ir.OXOR, types.TINT16}: ssa.OpXor16, + opAndType{ir.OXOR, types.TUINT16}: ssa.OpXor16, + opAndType{ir.OXOR, types.TINT32}: ssa.OpXor32, + opAndType{ir.OXOR, types.TUINT32}: ssa.OpXor32, + opAndType{ir.OXOR, types.TINT64}: ssa.OpXor64, + opAndType{ir.OXOR, types.TUINT64}: ssa.OpXor64, + + opAndType{ir.OEQ, types.TBOOL}: ssa.OpEqB, + opAndType{ir.OEQ, types.TINT8}: ssa.OpEq8, + opAndType{ir.OEQ, types.TUINT8}: ssa.OpEq8, + opAndType{ir.OEQ, types.TINT16}: ssa.OpEq16, + opAndType{ir.OEQ, types.TUINT16}: ssa.OpEq16, + opAndType{ir.OEQ, types.TINT32}: ssa.OpEq32, + opAndType{ir.OEQ, types.TUINT32}: ssa.OpEq32, + opAndType{ir.OEQ, types.TINT64}: ssa.OpEq64, + opAndType{ir.OEQ, types.TUINT64}: ssa.OpEq64, + opAndType{ir.OEQ, types.TINTER}: ssa.OpEqInter, + opAndType{ir.OEQ, types.TSLICE}: ssa.OpEqSlice, + opAndType{ir.OEQ, types.TFUNC}: ssa.OpEqPtr, + opAndType{ir.OEQ, types.TMAP}: ssa.OpEqPtr, + opAndType{ir.OEQ, types.TCHAN}: ssa.OpEqPtr, + opAndType{ir.OEQ, types.TPTR}: ssa.OpEqPtr, + opAndType{ir.OEQ, types.TUINTPTR}: ssa.OpEqPtr, + opAndType{ir.OEQ, types.TUNSAFEPTR}: ssa.OpEqPtr, + opAndType{ir.OEQ, types.TFLOAT64}: ssa.OpEq64F, + opAndType{ir.OEQ, types.TFLOAT32}: ssa.OpEq32F, + + opAndType{ir.ONE, types.TBOOL}: ssa.OpNeqB, + opAndType{ir.ONE, types.TINT8}: ssa.OpNeq8, + opAndType{ir.ONE, types.TUINT8}: ssa.OpNeq8, + opAndType{ir.ONE, types.TINT16}: ssa.OpNeq16, + opAndType{ir.ONE, types.TUINT16}: ssa.OpNeq16, + opAndType{ir.ONE, types.TINT32}: ssa.OpNeq32, + opAndType{ir.ONE, types.TUINT32}: ssa.OpNeq32, + opAndType{ir.ONE, types.TINT64}: ssa.OpNeq64, + opAndType{ir.ONE, types.TUINT64}: ssa.OpNeq64, + opAndType{ir.ONE, types.TINTER}: ssa.OpNeqInter, + opAndType{ir.ONE, types.TSLICE}: ssa.OpNeqSlice, + opAndType{ir.ONE, types.TFUNC}: ssa.OpNeqPtr, + opAndType{ir.ONE, types.TMAP}: ssa.OpNeqPtr, + opAndType{ir.ONE, types.TCHAN}: ssa.OpNeqPtr, + opAndType{ir.ONE, types.TPTR}: ssa.OpNeqPtr, + opAndType{ir.ONE, types.TUINTPTR}: ssa.OpNeqPtr, + opAndType{ir.ONE, types.TUNSAFEPTR}: ssa.OpNeqPtr, + opAndType{ir.ONE, types.TFLOAT64}: ssa.OpNeq64F, + opAndType{ir.ONE, types.TFLOAT32}: ssa.OpNeq32F, + + opAndType{ir.OLT, types.TINT8}: ssa.OpLess8, + opAndType{ir.OLT, types.TUINT8}: ssa.OpLess8U, + opAndType{ir.OLT, types.TINT16}: ssa.OpLess16, + opAndType{ir.OLT, types.TUINT16}: ssa.OpLess16U, + opAndType{ir.OLT, types.TINT32}: ssa.OpLess32, + opAndType{ir.OLT, types.TUINT32}: ssa.OpLess32U, + opAndType{ir.OLT, types.TINT64}: ssa.OpLess64, + opAndType{ir.OLT, types.TUINT64}: ssa.OpLess64U, + opAndType{ir.OLT, types.TFLOAT64}: ssa.OpLess64F, + opAndType{ir.OLT, types.TFLOAT32}: ssa.OpLess32F, + + opAndType{ir.OLE, types.TINT8}: ssa.OpLeq8, + opAndType{ir.OLE, types.TUINT8}: ssa.OpLeq8U, + opAndType{ir.OLE, types.TINT16}: ssa.OpLeq16, + opAndType{ir.OLE, types.TUINT16}: ssa.OpLeq16U, + opAndType{ir.OLE, types.TINT32}: ssa.OpLeq32, + opAndType{ir.OLE, types.TUINT32}: ssa.OpLeq32U, + opAndType{ir.OLE, types.TINT64}: ssa.OpLeq64, + opAndType{ir.OLE, types.TUINT64}: ssa.OpLeq64U, + opAndType{ir.OLE, types.TFLOAT64}: ssa.OpLeq64F, + opAndType{ir.OLE, types.TFLOAT32}: ssa.OpLeq32F, } func (s *state) concreteEtype(t *types.Type) types.EType { @@ -1781,25 +1782,25 @@ func (s *state) concreteEtype(t *types.Type) types.EType { switch e { default: return e - case TINT: + case types.TINT: if s.config.PtrSize == 8 { - return TINT64 + return types.TINT64 } - return TINT32 - case TUINT: + return types.TINT32 + case types.TUINT: if s.config.PtrSize == 8 { - return TUINT64 + return types.TUINT64 } - return TUINT32 - case TUINTPTR: + return types.TUINT32 + case types.TUINTPTR: if s.config.PtrSize == 8 { - return TUINT64 + return types.TUINT64 } - return TUINT32 + return types.TUINT32 } } -func (s *state) ssaOp(op Op, t *types.Type) ssa.Op { +func (s *state) ssaOp(op ir.Op, t *types.Type) ssa.Op { etype := s.concreteEtype(t) x, ok := opToSSA[opAndType{op, etype}] if !ok { @@ -1810,10 +1811,10 @@ func (s *state) ssaOp(op Op, t *types.Type) ssa.Op { func floatForComplex(t *types.Type) *types.Type { switch t.Etype { - case TCOMPLEX64: - return types.Types[TFLOAT32] - case TCOMPLEX128: - return types.Types[TFLOAT64] + case types.TCOMPLEX64: + return types.Types[types.TFLOAT32] + case types.TCOMPLEX128: + return types.Types[types.TFLOAT64] } base.Fatalf("unexpected type: %v", t) return nil @@ -1821,17 +1822,17 @@ func floatForComplex(t *types.Type) *types.Type { func complexForFloat(t *types.Type) *types.Type { switch t.Etype { - case TFLOAT32: - return types.Types[TCOMPLEX64] - case TFLOAT64: - return types.Types[TCOMPLEX128] + case types.TFLOAT32: + return types.Types[types.TCOMPLEX64] + case types.TFLOAT64: + return types.Types[types.TCOMPLEX128] } base.Fatalf("unexpected type: %v", t) return nil } type opAndTwoTypes struct { - op Op + op ir.Op etype1 types.EType etype2 types.EType } @@ -1849,145 +1850,145 @@ type twoOpsAndType struct { var fpConvOpToSSA = map[twoTypes]twoOpsAndType{ - twoTypes{TINT8, TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, TINT32}, - twoTypes{TINT16, TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, TINT32}, - twoTypes{TINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, TINT32}, - twoTypes{TINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, TINT64}, - - twoTypes{TINT8, TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, TINT32}, - twoTypes{TINT16, TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, TINT32}, - twoTypes{TINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, TINT32}, - twoTypes{TINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, TINT64}, - - twoTypes{TFLOAT32, TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32}, - twoTypes{TFLOAT32, TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32}, - twoTypes{TFLOAT32, TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, TINT32}, - twoTypes{TFLOAT32, TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, TINT64}, - - twoTypes{TFLOAT64, TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32}, - twoTypes{TFLOAT64, TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32}, - twoTypes{TFLOAT64, TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, TINT32}, - twoTypes{TFLOAT64, TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, TINT64}, + twoTypes{types.TINT8, types.TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, types.TINT32}, + twoTypes{types.TINT16, types.TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, types.TINT32}, + twoTypes{types.TINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, types.TINT32}, + twoTypes{types.TINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, types.TINT64}, + + twoTypes{types.TINT8, types.TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, types.TINT32}, + twoTypes{types.TINT16, types.TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, types.TINT32}, + twoTypes{types.TINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, types.TINT32}, + twoTypes{types.TINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, types.TINT64}, + + twoTypes{types.TFLOAT32, types.TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32}, + twoTypes{types.TFLOAT32, types.TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32}, + twoTypes{types.TFLOAT32, types.TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, types.TINT32}, + twoTypes{types.TFLOAT32, types.TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, types.TINT64}, + + twoTypes{types.TFLOAT64, types.TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32}, + twoTypes{types.TFLOAT64, types.TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32}, + twoTypes{types.TFLOAT64, types.TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, types.TINT32}, + twoTypes{types.TFLOAT64, types.TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, types.TINT64}, // unsigned - twoTypes{TUINT8, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, TINT32}, - twoTypes{TUINT16, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, TINT32}, - twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, TINT64}, // go wide to dodge unsigned - twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto32F, branchy code expansion instead - - twoTypes{TUINT8, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, TINT32}, - twoTypes{TUINT16, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, TINT32}, - twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, TINT64}, // go wide to dodge unsigned - twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto64F, branchy code expansion instead - - twoTypes{TFLOAT32, TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32}, - twoTypes{TFLOAT32, TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32}, - twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned - twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt32Fto64U, branchy code expansion instead - - twoTypes{TFLOAT64, TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32}, - twoTypes{TFLOAT64, TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32}, - twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned - twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt64Fto64U, branchy code expansion instead + twoTypes{types.TUINT8, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, types.TINT32}, + twoTypes{types.TUINT16, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, types.TINT32}, + twoTypes{types.TUINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, types.TINT64}, // go wide to dodge unsigned + twoTypes{types.TUINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, types.TUINT64}, // Cvt64Uto32F, branchy code expansion instead + + twoTypes{types.TUINT8, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, types.TINT32}, + twoTypes{types.TUINT16, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, types.TINT32}, + twoTypes{types.TUINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, types.TINT64}, // go wide to dodge unsigned + twoTypes{types.TUINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, types.TUINT64}, // Cvt64Uto64F, branchy code expansion instead + + twoTypes{types.TFLOAT32, types.TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32}, + twoTypes{types.TFLOAT32, types.TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32}, + twoTypes{types.TFLOAT32, types.TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned + twoTypes{types.TFLOAT32, types.TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt32Fto64U, branchy code expansion instead + + twoTypes{types.TFLOAT64, types.TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32}, + twoTypes{types.TFLOAT64, types.TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32}, + twoTypes{types.TFLOAT64, types.TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned + twoTypes{types.TFLOAT64, types.TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt64Fto64U, branchy code expansion instead // float - twoTypes{TFLOAT64, TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, TFLOAT32}, - twoTypes{TFLOAT64, TFLOAT64}: twoOpsAndType{ssa.OpRound64F, ssa.OpCopy, TFLOAT64}, - twoTypes{TFLOAT32, TFLOAT32}: twoOpsAndType{ssa.OpRound32F, ssa.OpCopy, TFLOAT32}, - twoTypes{TFLOAT32, TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64}, + twoTypes{types.TFLOAT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, types.TFLOAT32}, + twoTypes{types.TFLOAT64, types.TFLOAT64}: twoOpsAndType{ssa.OpRound64F, ssa.OpCopy, types.TFLOAT64}, + twoTypes{types.TFLOAT32, types.TFLOAT32}: twoOpsAndType{ssa.OpRound32F, ssa.OpCopy, types.TFLOAT32}, + twoTypes{types.TFLOAT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, types.TFLOAT64}, } // this map is used only for 32-bit arch, and only includes the difference // on 32-bit arch, don't use int64<->float conversion for uint32 var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{ - twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, TUINT32}, - twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, TUINT32}, - twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, TUINT32}, - twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, TUINT32}, + twoTypes{types.TUINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, types.TUINT32}, + twoTypes{types.TUINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, types.TUINT32}, + twoTypes{types.TFLOAT32, types.TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, types.TUINT32}, + twoTypes{types.TFLOAT64, types.TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, types.TUINT32}, } // uint64<->float conversions, only on machines that have instructions for that var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{ - twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, TUINT64}, - twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, TUINT64}, - twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, TUINT64}, - twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, TUINT64}, + twoTypes{types.TUINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, types.TUINT64}, + twoTypes{types.TUINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, types.TUINT64}, + twoTypes{types.TFLOAT32, types.TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, types.TUINT64}, + twoTypes{types.TFLOAT64, types.TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, types.TUINT64}, } var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{ - opAndTwoTypes{OLSH, TINT8, TUINT8}: ssa.OpLsh8x8, - opAndTwoTypes{OLSH, TUINT8, TUINT8}: ssa.OpLsh8x8, - opAndTwoTypes{OLSH, TINT8, TUINT16}: ssa.OpLsh8x16, - opAndTwoTypes{OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16, - opAndTwoTypes{OLSH, TINT8, TUINT32}: ssa.OpLsh8x32, - opAndTwoTypes{OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32, - opAndTwoTypes{OLSH, TINT8, TUINT64}: ssa.OpLsh8x64, - opAndTwoTypes{OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64, - - opAndTwoTypes{OLSH, TINT16, TUINT8}: ssa.OpLsh16x8, - opAndTwoTypes{OLSH, TUINT16, TUINT8}: ssa.OpLsh16x8, - opAndTwoTypes{OLSH, TINT16, TUINT16}: ssa.OpLsh16x16, - opAndTwoTypes{OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16, - opAndTwoTypes{OLSH, TINT16, TUINT32}: ssa.OpLsh16x32, - opAndTwoTypes{OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32, - opAndTwoTypes{OLSH, TINT16, TUINT64}: ssa.OpLsh16x64, - opAndTwoTypes{OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64, - - opAndTwoTypes{OLSH, TINT32, TUINT8}: ssa.OpLsh32x8, - opAndTwoTypes{OLSH, TUINT32, TUINT8}: ssa.OpLsh32x8, - opAndTwoTypes{OLSH, TINT32, TUINT16}: ssa.OpLsh32x16, - opAndTwoTypes{OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16, - opAndTwoTypes{OLSH, TINT32, TUINT32}: ssa.OpLsh32x32, - opAndTwoTypes{OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32, - opAndTwoTypes{OLSH, TINT32, TUINT64}: ssa.OpLsh32x64, - opAndTwoTypes{OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64, - - opAndTwoTypes{OLSH, TINT64, TUINT8}: ssa.OpLsh64x8, - opAndTwoTypes{OLSH, TUINT64, TUINT8}: ssa.OpLsh64x8, - opAndTwoTypes{OLSH, TINT64, TUINT16}: ssa.OpLsh64x16, - opAndTwoTypes{OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16, - opAndTwoTypes{OLSH, TINT64, TUINT32}: ssa.OpLsh64x32, - opAndTwoTypes{OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32, - opAndTwoTypes{OLSH, TINT64, TUINT64}: ssa.OpLsh64x64, - opAndTwoTypes{OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64, - - opAndTwoTypes{ORSH, TINT8, TUINT8}: ssa.OpRsh8x8, - opAndTwoTypes{ORSH, TUINT8, TUINT8}: ssa.OpRsh8Ux8, - opAndTwoTypes{ORSH, TINT8, TUINT16}: ssa.OpRsh8x16, - opAndTwoTypes{ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16, - opAndTwoTypes{ORSH, TINT8, TUINT32}: ssa.OpRsh8x32, - opAndTwoTypes{ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32, - opAndTwoTypes{ORSH, TINT8, TUINT64}: ssa.OpRsh8x64, - opAndTwoTypes{ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64, - - opAndTwoTypes{ORSH, TINT16, TUINT8}: ssa.OpRsh16x8, - opAndTwoTypes{ORSH, TUINT16, TUINT8}: ssa.OpRsh16Ux8, - opAndTwoTypes{ORSH, TINT16, TUINT16}: ssa.OpRsh16x16, - opAndTwoTypes{ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16, - opAndTwoTypes{ORSH, TINT16, TUINT32}: ssa.OpRsh16x32, - opAndTwoTypes{ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32, - opAndTwoTypes{ORSH, TINT16, TUINT64}: ssa.OpRsh16x64, - opAndTwoTypes{ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64, - - opAndTwoTypes{ORSH, TINT32, TUINT8}: ssa.OpRsh32x8, - opAndTwoTypes{ORSH, TUINT32, TUINT8}: ssa.OpRsh32Ux8, - opAndTwoTypes{ORSH, TINT32, TUINT16}: ssa.OpRsh32x16, - opAndTwoTypes{ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16, - opAndTwoTypes{ORSH, TINT32, TUINT32}: ssa.OpRsh32x32, - opAndTwoTypes{ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32, - opAndTwoTypes{ORSH, TINT32, TUINT64}: ssa.OpRsh32x64, - opAndTwoTypes{ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64, - - opAndTwoTypes{ORSH, TINT64, TUINT8}: ssa.OpRsh64x8, - opAndTwoTypes{ORSH, TUINT64, TUINT8}: ssa.OpRsh64Ux8, - opAndTwoTypes{ORSH, TINT64, TUINT16}: ssa.OpRsh64x16, - opAndTwoTypes{ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16, - opAndTwoTypes{ORSH, TINT64, TUINT32}: ssa.OpRsh64x32, - opAndTwoTypes{ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32, - opAndTwoTypes{ORSH, TINT64, TUINT64}: ssa.OpRsh64x64, - opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64, -} - -func (s *state) ssaShiftOp(op Op, t *types.Type, u *types.Type) ssa.Op { + opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT8}: ssa.OpLsh8x8, + opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT8}: ssa.OpLsh8x8, + opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT16}: ssa.OpLsh8x16, + opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT16}: ssa.OpLsh8x16, + opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT32}: ssa.OpLsh8x32, + opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT32}: ssa.OpLsh8x32, + opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT64}: ssa.OpLsh8x64, + opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT64}: ssa.OpLsh8x64, + + opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT8}: ssa.OpLsh16x8, + opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT8}: ssa.OpLsh16x8, + opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT16}: ssa.OpLsh16x16, + opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT16}: ssa.OpLsh16x16, + opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT32}: ssa.OpLsh16x32, + opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT32}: ssa.OpLsh16x32, + opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT64}: ssa.OpLsh16x64, + opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT64}: ssa.OpLsh16x64, + + opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT8}: ssa.OpLsh32x8, + opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT8}: ssa.OpLsh32x8, + opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT16}: ssa.OpLsh32x16, + opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT16}: ssa.OpLsh32x16, + opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT32}: ssa.OpLsh32x32, + opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT32}: ssa.OpLsh32x32, + opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT64}: ssa.OpLsh32x64, + opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT64}: ssa.OpLsh32x64, + + opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT8}: ssa.OpLsh64x8, + opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT8}: ssa.OpLsh64x8, + opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT16}: ssa.OpLsh64x16, + opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT16}: ssa.OpLsh64x16, + opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT32}: ssa.OpLsh64x32, + opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT32}: ssa.OpLsh64x32, + opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT64}: ssa.OpLsh64x64, + opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT64}: ssa.OpLsh64x64, + + opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT8}: ssa.OpRsh8x8, + opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT8}: ssa.OpRsh8Ux8, + opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT16}: ssa.OpRsh8x16, + opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT16}: ssa.OpRsh8Ux16, + opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT32}: ssa.OpRsh8x32, + opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT32}: ssa.OpRsh8Ux32, + opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT64}: ssa.OpRsh8x64, + opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT64}: ssa.OpRsh8Ux64, + + opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT8}: ssa.OpRsh16x8, + opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT8}: ssa.OpRsh16Ux8, + opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT16}: ssa.OpRsh16x16, + opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT16}: ssa.OpRsh16Ux16, + opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT32}: ssa.OpRsh16x32, + opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT32}: ssa.OpRsh16Ux32, + opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT64}: ssa.OpRsh16x64, + opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT64}: ssa.OpRsh16Ux64, + + opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT8}: ssa.OpRsh32x8, + opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT8}: ssa.OpRsh32Ux8, + opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT16}: ssa.OpRsh32x16, + opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT16}: ssa.OpRsh32Ux16, + opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT32}: ssa.OpRsh32x32, + opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT32}: ssa.OpRsh32Ux32, + opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT64}: ssa.OpRsh32x64, + opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT64}: ssa.OpRsh32Ux64, + + opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT8}: ssa.OpRsh64x8, + opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT8}: ssa.OpRsh64Ux8, + opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT16}: ssa.OpRsh64x16, + opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT16}: ssa.OpRsh64Ux16, + opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT32}: ssa.OpRsh64x32, + opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT32}: ssa.OpRsh64Ux32, + opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT64}: ssa.OpRsh64x64, + opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT64}: ssa.OpRsh64Ux64, +} + +func (s *state) ssaShiftOp(op ir.Op, t *types.Type, u *types.Type) ssa.Op { etype1 := s.concreteEtype(t) etype2 := s.concreteEtype(u) x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}] @@ -1998,7 +1999,7 @@ func (s *state) ssaShiftOp(op Op, t *types.Type, u *types.Type) ssa.Op { } // expr converts the expression n to ssa, adds it to s and returns the ssa result. -func (s *state) expr(n *Node) *ssa.Value { +func (s *state) expr(n *ir.Node) *ssa.Value { if hasUniquePos(n) { // ONAMEs and named OLITERALs have the line number // of the decl, not the use. See issue 14742. @@ -2008,24 +2009,24 @@ func (s *state) expr(n *Node) *ssa.Value { s.stmtList(n.Ninit) switch n.Op { - case OBYTES2STRTMP: + case ir.OBYTES2STRTMP: slice := s.expr(n.Left) ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice) - len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice) + len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice) return s.newValue2(ssa.OpStringMake, n.Type, ptr, len) - case OSTR2BYTESTMP: + case ir.OSTR2BYTESTMP: str := s.expr(n.Left) ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str) - len := s.newValue1(ssa.OpStringLen, types.Types[TINT], str) + len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], str) return s.newValue3(ssa.OpSliceMake, n.Type, ptr, len, len) - case OCFUNC: + case ir.OCFUNC: aux := n.Left.Sym.Linksym() return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb) - case OMETHEXPR: + case ir.OMETHEXPR: sym := funcsym(n.Sym).Linksym() return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), sym, s.sb) - case ONAME: - if n.Class() == PFUNC { + case ir.ONAME: + if n.Class() == ir.PFUNC { // "value" of a function is the address of the function's closure sym := funcsym(n.Sym).Linksym() return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), sym, s.sb) @@ -2035,10 +2036,10 @@ func (s *state) expr(n *Node) *ssa.Value { } addr := s.addr(n) return s.load(n.Type, addr) - case OCLOSUREVAR: + case ir.OCLOSUREVAR: addr := s.addr(n) return s.load(n.Type, addr) - case ONIL: + case ir.ONIL: t := n.Type switch { case t.IsSlice(): @@ -2048,10 +2049,10 @@ func (s *state) expr(n *Node) *ssa.Value { default: return s.constNil(t) } - case OLITERAL: + case ir.OLITERAL: switch u := n.Val(); u.Kind() { case constant.Int: - i := int64Val(n.Type, u) + i := ir.Int64Val(n.Type, u) switch n.Type.Size() { case 1: return s.constInt8(n.Type, int8(i)) @@ -2089,12 +2090,12 @@ func (s *state) expr(n *Node) *ssa.Value { im, _ := constant.Float64Val(constant.Imag(u)) switch n.Type.Size() { case 8: - pt := types.Types[TFLOAT32] + pt := types.Types[types.TFLOAT32] return s.newValue2(ssa.OpComplexMake, n.Type, s.constFloat32(pt, re), s.constFloat32(pt, im)) case 16: - pt := types.Types[TFLOAT64] + pt := types.Types[types.TFLOAT64] return s.newValue2(ssa.OpComplexMake, n.Type, s.constFloat64(pt, re), s.constFloat64(pt, im)) @@ -2106,7 +2107,7 @@ func (s *state) expr(n *Node) *ssa.Value { s.Fatalf("unhandled OLITERAL %v", u.Kind()) return nil } - case OCONVNOP: + case ir.OCONVNOP: to := n.Type from := n.Left.Type @@ -2125,7 +2126,7 @@ func (s *state) expr(n *Node) *ssa.Value { v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type // CONVNOP closure - if to.Etype == TFUNC && from.IsPtrShaped() { + if to.Etype == types.TFUNC && from.IsPtrShaped() { return v } @@ -2140,7 +2141,7 @@ func (s *state) expr(n *Node) *ssa.Value { } // map <--> *hmap - if to.Etype == TMAP && from.IsPtr() && + if to.Etype == types.TMAP && from.IsPtr() && to.MapType().Hmap == from.Elem() { return v } @@ -2171,11 +2172,11 @@ func (s *state) expr(n *Node) *ssa.Value { // integer, same width, same sign return v - case OCONV: + case ir.OCONV: x := s.expr(n.Left) ft := n.Left.Type // from type tt := n.Type // to type - if ft.IsBoolean() && tt.IsKind(TUINT8) { + if ft.IsBoolean() && tt.IsKind(types.TUINT8) { // Bool -> uint8 is generated internally when indexing into runtime.staticbyte. return s.newValue1(ssa.OpCopy, n.Type, x) } @@ -2342,25 +2343,25 @@ func (s *state) expr(n *Node) *ssa.Value { s.Fatalf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype) return nil - case ODOTTYPE: + case ir.ODOTTYPE: res, _ := s.dottype(n, false) return res // binary ops - case OLT, OEQ, ONE, OLE, OGE, OGT: + case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT: a := s.expr(n.Left) b := s.expr(n.Right) if n.Left.Type.IsComplex() { pt := floatForComplex(n.Left.Type) - op := s.ssaOp(OEQ, pt) - r := s.newValueOrSfCall2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)) - i := s.newValueOrSfCall2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)) - c := s.newValue2(ssa.OpAndB, types.Types[TBOOL], r, i) + op := s.ssaOp(ir.OEQ, pt) + r := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)) + i := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)) + c := s.newValue2(ssa.OpAndB, types.Types[types.TBOOL], r, i) switch n.Op { - case OEQ: + case ir.OEQ: return c - case ONE: - return s.newValue1(ssa.OpNot, types.Types[TBOOL], c) + case ir.ONE: + return s.newValue1(ssa.OpNot, types.Types[types.TBOOL], c) default: s.Fatalf("ordered complex compare %v", n.Op) } @@ -2369,26 +2370,26 @@ func (s *state) expr(n *Node) *ssa.Value { // Convert OGE and OGT into OLE and OLT. op := n.Op switch op { - case OGE: - op, a, b = OLE, b, a - case OGT: - op, a, b = OLT, b, a + case ir.OGE: + op, a, b = ir.OLE, b, a + case ir.OGT: + op, a, b = ir.OLT, b, a } if n.Left.Type.IsFloat() { // float comparison - return s.newValueOrSfCall2(s.ssaOp(op, n.Left.Type), types.Types[TBOOL], a, b) + return s.newValueOrSfCall2(s.ssaOp(op, n.Left.Type), types.Types[types.TBOOL], a, b) } // integer comparison - return s.newValue2(s.ssaOp(op, n.Left.Type), types.Types[TBOOL], a, b) - case OMUL: + return s.newValue2(s.ssaOp(op, n.Left.Type), types.Types[types.TBOOL], a, b) + case ir.OMUL: a := s.expr(n.Left) b := s.expr(n.Right) if n.Type.IsComplex() { mulop := ssa.OpMul64F addop := ssa.OpAdd64F subop := ssa.OpSub64F - pt := floatForComplex(n.Type) // Could be Float32 or Float64 - wt := types.Types[TFLOAT64] // Compute in Float64 to minimize cancellation error + pt := floatForComplex(n.Type) // Could be Float32 or Float64 + wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error areal := s.newValue1(ssa.OpComplexReal, pt, a) breal := s.newValue1(ssa.OpComplexReal, pt, b) @@ -2419,7 +2420,7 @@ func (s *state) expr(n *Node) *ssa.Value { return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) - case ODIV: + case ir.ODIV: a := s.expr(n.Left) b := s.expr(n.Right) if n.Type.IsComplex() { @@ -2430,8 +2431,8 @@ func (s *state) expr(n *Node) *ssa.Value { addop := ssa.OpAdd64F subop := ssa.OpSub64F divop := ssa.OpDiv64F - pt := floatForComplex(n.Type) // Could be Float32 or Float64 - wt := types.Types[TFLOAT64] // Compute in Float64 to minimize cancellation error + pt := floatForComplex(n.Type) // Could be Float32 or Float64 + wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error areal := s.newValue1(ssa.OpComplexReal, pt, a) breal := s.newValue1(ssa.OpComplexReal, pt, b) @@ -2466,11 +2467,11 @@ func (s *state) expr(n *Node) *ssa.Value { return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b) } return s.intDivide(n, a, b) - case OMOD: + case ir.OMOD: a := s.expr(n.Left) b := s.expr(n.Right) return s.intDivide(n, a, b) - case OADD, OSUB: + case ir.OADD, ir.OSUB: a := s.expr(n.Left) b := s.expr(n.Right) if n.Type.IsComplex() { @@ -2484,26 +2485,26 @@ func (s *state) expr(n *Node) *ssa.Value { return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b) } return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) - case OAND, OOR, OXOR: + case ir.OAND, ir.OOR, ir.OXOR: a := s.expr(n.Left) b := s.expr(n.Right) return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) - case OANDNOT: + case ir.OANDNOT: a := s.expr(n.Left) b := s.expr(n.Right) - b = s.newValue1(s.ssaOp(OBITNOT, b.Type), b.Type, b) - return s.newValue2(s.ssaOp(OAND, n.Type), a.Type, a, b) - case OLSH, ORSH: + b = s.newValue1(s.ssaOp(ir.OBITNOT, b.Type), b.Type, b) + return s.newValue2(s.ssaOp(ir.OAND, n.Type), a.Type, a, b) + case ir.OLSH, ir.ORSH: a := s.expr(n.Left) b := s.expr(n.Right) bt := b.Type if bt.IsSigned() { - cmp := s.newValue2(s.ssaOp(OLE, bt), types.Types[TBOOL], s.zeroVal(bt), b) + cmp := s.newValue2(s.ssaOp(ir.OLE, bt), types.Types[types.TBOOL], s.zeroVal(bt), b) s.check(cmp, panicshift) bt = bt.ToUnsigned() } return s.newValue2(s.ssaShiftOp(n.Op, n.Type, bt), a.Type, a, b) - case OANDAND, OOROR: + case ir.OANDAND, ir.OOROR: // To implement OANDAND (and OOROR), we introduce a // new temporary variable to hold the result. The // variable is associated with the OANDAND node in the @@ -2530,10 +2531,10 @@ func (s *state) expr(n *Node) *ssa.Value { bRight := s.f.NewBlock(ssa.BlockPlain) bResult := s.f.NewBlock(ssa.BlockPlain) - if n.Op == OANDAND { + if n.Op == ir.OANDAND { b.AddEdgeTo(bRight) b.AddEdgeTo(bResult) - } else if n.Op == OOROR { + } else if n.Op == ir.OOROR { b.AddEdgeTo(bResult) b.AddEdgeTo(bRight) } @@ -2546,14 +2547,14 @@ func (s *state) expr(n *Node) *ssa.Value { b.AddEdgeTo(bResult) s.startBlock(bResult) - return s.variable(n, types.Types[TBOOL]) - case OCOMPLEX: + return s.variable(n, types.Types[types.TBOOL]) + case ir.OCOMPLEX: r := s.expr(n.Left) i := s.expr(n.Right) return s.newValue2(ssa.OpComplexMake, n.Type, r, i) // unary ops - case ONEG: + case ir.ONEG: a := s.expr(n.Left) if n.Type.IsComplex() { tp := floatForComplex(n.Type) @@ -2563,19 +2564,19 @@ func (s *state) expr(n *Node) *ssa.Value { s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a))) } return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a) - case ONOT, OBITNOT: + case ir.ONOT, ir.OBITNOT: a := s.expr(n.Left) return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a) - case OIMAG, OREAL: + case ir.OIMAG, ir.OREAL: a := s.expr(n.Left) return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a) - case OPLUS: + case ir.OPLUS: return s.expr(n.Left) - case OADDR: + case ir.OADDR: return s.addr(n.Left) - case ORESULT: + case ir.ORESULT: if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall { // Do the old thing addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset) @@ -2594,12 +2595,12 @@ func (s *state) expr(n *Node) *ssa.Value { return s.rawLoad(n.Type, addr) } - case ODEREF: + case ir.ODEREF: p := s.exprPtr(n.Left, n.Bounded(), n.Pos) return s.load(n.Type, p) - case ODOT: - if n.Left.Op == OSTRUCTLIT { + case ir.ODOT: + if n.Left.Op == ir.OSTRUCTLIT { // All literals with nonzero fields have already been // rewritten during walk. Any that remain are just T{} // or equivalents. Use the zero value. @@ -2619,32 +2620,32 @@ func (s *state) expr(n *Node) *ssa.Value { v := s.expr(n.Left) return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v) - case ODOTPTR: + case ir.ODOTPTR: p := s.exprPtr(n.Left, n.Bounded(), n.Pos) p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type), n.Xoffset, p) return s.load(n.Type, p) - case OINDEX: + case ir.OINDEX: switch { case n.Left.Type.IsString(): - if n.Bounded() && Isconst(n.Left, constant.String) && Isconst(n.Right, constant.Int) { + if n.Bounded() && ir.IsConst(n.Left, constant.String) && ir.IsConst(n.Right, constant.Int) { // Replace "abc"[1] with 'b'. // Delayed until now because "abc"[1] is not an ideal constant. // See test/fixedbugs/issue11370.go. - return s.newValue0I(ssa.OpConst8, types.Types[TUINT8], int64(int8(n.Left.StringVal()[n.Right.Int64Val()]))) + return s.newValue0I(ssa.OpConst8, types.Types[types.TUINT8], int64(int8(n.Left.StringVal()[n.Right.Int64Val()]))) } a := s.expr(n.Left) i := s.expr(n.Right) - len := s.newValue1(ssa.OpStringLen, types.Types[TINT], a) + len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], a) i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded()) ptrtyp := s.f.Config.Types.BytePtr ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a) - if Isconst(n.Right, constant.Int) { + if ir.IsConst(n.Right, constant.Int) { ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64Val(), ptr) } else { ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i) } - return s.load(types.Types[TUINT8], ptr) + return s.load(types.Types[types.TUINT8], ptr) case n.Left.Type.IsSlice(): p := s.addr(n) return s.load(n.Left.Type.Elem(), p) @@ -2657,12 +2658,12 @@ func (s *state) expr(n *Node) *ssa.Value { if bound == 0 { // Bounds check will never succeed. Might as well // use constants for the bounds check. - z := s.constInt(types.Types[TINT], 0) + z := s.constInt(types.Types[types.TINT], 0) s.boundsCheck(z, z, ssa.BoundsIndex, false) // The return value won't be live, return junk. return s.newValue0(ssa.OpUnknown, n.Type) } - len := s.constInt(types.Types[TINT], bound) + len := s.constInt(types.Types[types.TINT], bound) s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded()) // checks i == 0 return s.newValue1I(ssa.OpArraySelect, n.Type, 0, a) } @@ -2673,23 +2674,23 @@ func (s *state) expr(n *Node) *ssa.Value { return nil } - case OLEN, OCAP: + case ir.OLEN, ir.OCAP: switch { case n.Left.Type.IsSlice(): op := ssa.OpSliceLen - if n.Op == OCAP { + if n.Op == ir.OCAP { op = ssa.OpSliceCap } - return s.newValue1(op, types.Types[TINT], s.expr(n.Left)) + return s.newValue1(op, types.Types[types.TINT], s.expr(n.Left)) case n.Left.Type.IsString(): // string; not reachable for OCAP - return s.newValue1(ssa.OpStringLen, types.Types[TINT], s.expr(n.Left)) + return s.newValue1(ssa.OpStringLen, types.Types[types.TINT], s.expr(n.Left)) case n.Left.Type.IsMap(), n.Left.Type.IsChan(): return s.referenceTypeBuiltin(n, s.expr(n.Left)) default: // array - return s.constInt(types.Types[TINT], n.Left.Type.NumElem()) + return s.constInt(types.Types[types.TINT], n.Left.Type.NumElem()) } - case OSPTR: + case ir.OSPTR: a := s.expr(n.Left) if n.Left.Type.IsSlice() { return s.newValue1(ssa.OpSlicePtr, n.Type, a) @@ -2697,26 +2698,26 @@ func (s *state) expr(n *Node) *ssa.Value { return s.newValue1(ssa.OpStringPtr, n.Type, a) } - case OITAB: + case ir.OITAB: a := s.expr(n.Left) return s.newValue1(ssa.OpITab, n.Type, a) - case OIDATA: + case ir.OIDATA: a := s.expr(n.Left) return s.newValue1(ssa.OpIData, n.Type, a) - case OEFACE: + case ir.OEFACE: tab := s.expr(n.Left) data := s.expr(n.Right) return s.newValue2(ssa.OpIMake, n.Type, tab, data) - case OSLICEHEADER: + case ir.OSLICEHEADER: p := s.expr(n.Left) l := s.expr(n.List.First()) c := s.expr(n.List.Second()) return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c) - case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR: + case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR: v := s.expr(n.Left) var i, j, k *ssa.Value low, high, max := n.SliceBounds() @@ -2732,7 +2733,7 @@ func (s *state) expr(n *Node) *ssa.Value { p, l, c := s.slice(v, i, j, k, n.Bounded()) return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c) - case OSLICESTR: + case ir.OSLICESTR: v := s.expr(n.Left) var i, j *ssa.Value low, high, _ := n.SliceBounds() @@ -2745,22 +2746,22 @@ func (s *state) expr(n *Node) *ssa.Value { p, l, _ := s.slice(v, i, j, nil, n.Bounded()) return s.newValue2(ssa.OpStringMake, n.Type, p, l) - case OCALLFUNC: + case ir.OCALLFUNC: if isIntrinsicCall(n) { return s.intrinsicCall(n) } fallthrough - case OCALLINTER, OCALLMETH: + case ir.OCALLINTER, ir.OCALLMETH: return s.callResult(n, callNormal) - case OGETG: + case ir.OGETG: return s.newValue1(ssa.OpGetG, n.Type, s.mem()) - case OAPPEND: + case ir.OAPPEND: return s.append(n, false) - case OSTRUCTLIT, OARRAYLIT: + case ir.OSTRUCTLIT, ir.OARRAYLIT: // All literals with nonzero fields have already been // rewritten during walk. Any that remain are just T{} // or equivalents. Use the zero value. @@ -2769,7 +2770,7 @@ func (s *state) expr(n *Node) *ssa.Value { } return s.zeroVal(n.Type) - case ONEWOBJ: + case ir.ONEWOBJ: if n.Type.Elem().Size() == 0 { return s.newValue1A(ssa.OpAddr, n.Type, zerobaseSym, s.sb) } @@ -2789,7 +2790,7 @@ func (s *state) expr(n *Node) *ssa.Value { // If inplace is true, it writes the result of the OAPPEND expression n // back to the slice being appended to, and returns nil. // inplace MUST be set to false if the slice can be SSA'd. -func (s *state) append(n *Node, inplace bool) *ssa.Value { +func (s *state) append(n *ir.Node, inplace bool) *ssa.Value { // If inplace is false, process as expression "append(s, e1, e2, e3)": // // ptr, len, cap := s @@ -2844,11 +2845,11 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value { // Decide if we need to grow nargs := int64(n.List.Len() - 1) p := s.newValue1(ssa.OpSlicePtr, pt, slice) - l := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice) - c := s.newValue1(ssa.OpSliceCap, types.Types[TINT], slice) - nl := s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs)) + l := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice) + c := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], slice) + nl := s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], l, s.constInt(types.Types[types.TINT], nargs)) - cmp := s.newValue2(s.ssaOp(OLT, types.Types[TUINT]), types.Types[types.TBOOL], c, nl) + cmp := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT]), types.Types[types.TBOOL], c, nl) s.vars[ptrVar] = p if !inplace { @@ -2868,22 +2869,22 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value { // Call growslice s.startBlock(grow) taddr := s.expr(n.Left) - r := s.rtcall(growslice, true, []*types.Type{pt, types.Types[TINT], types.Types[TINT]}, taddr, p, l, c, nl) + r := s.rtcall(growslice, true, []*types.Type{pt, types.Types[types.TINT], types.Types[types.TINT]}, taddr, p, l, c, nl) if inplace { - if sn.Op == ONAME && sn.Class() != PEXTERN { + if sn.Op == ir.ONAME && sn.Class() != ir.PEXTERN { // Tell liveness we're about to build a new slice s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem()) } capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, sliceCapOffset, addr) - s.store(types.Types[TINT], capaddr, r[2]) + s.store(types.Types[types.TINT], capaddr, r[2]) s.store(pt, addr, r[0]) // load the value we just stored to avoid having to spill it s.vars[ptrVar] = s.load(pt, addr) s.vars[lenVar] = r[1] // avoid a spill in the fast path } else { s.vars[ptrVar] = r[0] - s.vars[newlenVar] = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], r[1], s.constInt(types.Types[TINT], nargs)) + s.vars[newlenVar] = s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], r[1], s.constInt(types.Types[types.TINT], nargs)) s.vars[capVar] = r[2] } @@ -2894,10 +2895,10 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value { s.startBlock(assign) if inplace { - l = s.variable(lenVar, types.Types[TINT]) // generates phi for len - nl = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs)) + l = s.variable(lenVar, types.Types[types.TINT]) // generates phi for len + nl = s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], l, s.constInt(types.Types[types.TINT], nargs)) lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, sliceLenOffset, addr) - s.store(types.Types[TINT], lenaddr, nl) + s.store(types.Types[types.TINT], lenaddr, nl) } // Evaluate args @@ -2919,12 +2920,12 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value { p = s.variable(ptrVar, pt) // generates phi for ptr if !inplace { - nl = s.variable(newlenVar, types.Types[TINT]) // generates phi for nl - c = s.variable(capVar, types.Types[TINT]) // generates phi for cap + nl = s.variable(newlenVar, types.Types[types.TINT]) // generates phi for nl + c = s.variable(capVar, types.Types[types.TINT]) // generates phi for cap } p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l) for i, arg := range args { - addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[TINT], int64(i))) + addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[types.TINT], int64(i))) if arg.store { s.storeType(et, addr, arg.v, 0, true) } else { @@ -2947,9 +2948,9 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value { // if cond is true and no if cond is false. // This function is intended to handle && and || better than just calling // s.expr(cond) and branching on the result. -func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) { +func (s *state) condBranch(cond *ir.Node, yes, no *ssa.Block, likely int8) { switch cond.Op { - case OANDAND: + case ir.OANDAND: mid := s.f.NewBlock(ssa.BlockPlain) s.stmtList(cond.Ninit) s.condBranch(cond.Left, mid, no, max8(likely, 0)) @@ -2962,7 +2963,7 @@ func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) { // the likeliness of the first branch. // TODO: have the frontend give us branch prediction hints for // OANDAND and OOROR nodes (if it ever has such info). - case OOROR: + case ir.OOROR: mid := s.f.NewBlock(ssa.BlockPlain) s.stmtList(cond.Ninit) s.condBranch(cond.Left, yes, mid, min8(likely, 0)) @@ -2972,7 +2973,7 @@ func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) { // Note: if likely==-1, then both recursive calls pass -1. // If likely==1, then we don't have enough info to decide // the likelihood of the first branch. - case ONOT: + case ir.ONOT: s.stmtList(cond.Ninit) s.condBranch(cond.Left, no, yes, -likely) return @@ -2999,8 +3000,8 @@ const ( // If deref is true, then we do left = *right instead (and right has already been nil-checked). // If deref is true and right == nil, just do left = 0. // skip indicates assignments (at the top level) that can be avoided. -func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask) { - if left.Op == ONAME && left.isBlank() { +func (s *state) assign(left *ir.Node, right *ssa.Value, deref bool, skip skipMask) { + if left.Op == ir.ONAME && ir.IsBlank(left) { return } t := left.Type @@ -3009,7 +3010,7 @@ func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask) if deref { s.Fatalf("can SSA LHS %v but not RHS %s", left, right) } - if left.Op == ODOT { + if left.Op == ir.ODOT { // We're assigning to a field of an ssa-able value. // We need to build a new structure with the new value for the // field we're assigning and the old values for the other fields. @@ -3044,7 +3045,7 @@ func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask) // TODO: do we need to update named values here? return } - if left.Op == OINDEX && left.Left.Type.IsArray() { + if left.Op == ir.OINDEX && left.Left.Type.IsArray() { s.pushLine(left.Pos) defer s.popLine() // We're assigning to an element of an ssa-able array. @@ -3056,7 +3057,7 @@ func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask) if n == 0 { // The bounds check must fail. Might as well // ignore the actual index and just use zeros. - z := s.constInt(types.Types[TINT], 0) + z := s.constInt(types.Types[types.TINT], 0) s.boundsCheck(z, z, ssa.BoundsIndex, false) return } @@ -3064,7 +3065,7 @@ func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask) s.Fatalf("assigning to non-1-length array") } // Rewrite to a = [1]{v} - len := s.constInt(types.Types[TINT], 1) + len := s.constInt(types.Types[types.TINT], 1) s.boundsCheck(i, len, ssa.BoundsIndex, false) // checks i == 0 v := s.newValue1(ssa.OpArrayMake1, t, right) s.assign(left.Left, v, false, 0) @@ -3078,7 +3079,7 @@ func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask) // If this assignment clobbers an entire local variable, then emit // OpVarDef so liveness analysis knows the variable is redefined. - if base := clobberBase(left); base.Op == ONAME && base.Class() != PEXTERN && skip == 0 { + if base := clobberBase(left); base.Op == ir.ONAME && base.Class() != ir.PEXTERN && skip == 0 { s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !base.IsAutoTmp()) } @@ -3090,7 +3091,7 @@ func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask) // is valid, even though they have type uintptr (#19168). // Mark it pointer type to signal the writebarrier pass to // insert a write barrier. - t = types.Types[TUNSAFEPTR] + t = types.Types[types.TUNSAFEPTR] } if deref { // Treat as a mem->mem move. @@ -3133,10 +3134,10 @@ func (s *state) zeroVal(t *types.Type) *ssa.Value { case t.IsComplex(): switch t.Size() { case 8: - z := s.constFloat32(types.Types[TFLOAT32], 0) + z := s.constFloat32(types.Types[types.TFLOAT32], 0) return s.entryNewValue2(ssa.OpComplexMake, t, z, z) case 16: - z := s.constFloat64(types.Types[TFLOAT64], 0) + z := s.constFloat64(types.Types[types.TFLOAT64], 0) return s.entryNewValue2(ssa.OpComplexMake, t, z, z) default: s.Fatalf("bad sized complex type %v", t) @@ -3190,38 +3191,38 @@ var softFloatOps map[ssa.Op]sfRtCallDef func softfloatInit() { // Some of these operations get transformed by sfcall. softFloatOps = map[ssa.Op]sfRtCallDef{ - ssa.OpAdd32F: sfRtCallDef{sysfunc("fadd32"), TFLOAT32}, - ssa.OpAdd64F: sfRtCallDef{sysfunc("fadd64"), TFLOAT64}, - ssa.OpSub32F: sfRtCallDef{sysfunc("fadd32"), TFLOAT32}, - ssa.OpSub64F: sfRtCallDef{sysfunc("fadd64"), TFLOAT64}, - ssa.OpMul32F: sfRtCallDef{sysfunc("fmul32"), TFLOAT32}, - ssa.OpMul64F: sfRtCallDef{sysfunc("fmul64"), TFLOAT64}, - ssa.OpDiv32F: sfRtCallDef{sysfunc("fdiv32"), TFLOAT32}, - ssa.OpDiv64F: sfRtCallDef{sysfunc("fdiv64"), TFLOAT64}, - - ssa.OpEq64F: sfRtCallDef{sysfunc("feq64"), TBOOL}, - ssa.OpEq32F: sfRtCallDef{sysfunc("feq32"), TBOOL}, - ssa.OpNeq64F: sfRtCallDef{sysfunc("feq64"), TBOOL}, - ssa.OpNeq32F: sfRtCallDef{sysfunc("feq32"), TBOOL}, - ssa.OpLess64F: sfRtCallDef{sysfunc("fgt64"), TBOOL}, - ssa.OpLess32F: sfRtCallDef{sysfunc("fgt32"), TBOOL}, - ssa.OpLeq64F: sfRtCallDef{sysfunc("fge64"), TBOOL}, - ssa.OpLeq32F: sfRtCallDef{sysfunc("fge32"), TBOOL}, - - ssa.OpCvt32to32F: sfRtCallDef{sysfunc("fint32to32"), TFLOAT32}, - ssa.OpCvt32Fto32: sfRtCallDef{sysfunc("f32toint32"), TINT32}, - ssa.OpCvt64to32F: sfRtCallDef{sysfunc("fint64to32"), TFLOAT32}, - ssa.OpCvt32Fto64: sfRtCallDef{sysfunc("f32toint64"), TINT64}, - ssa.OpCvt64Uto32F: sfRtCallDef{sysfunc("fuint64to32"), TFLOAT32}, - ssa.OpCvt32Fto64U: sfRtCallDef{sysfunc("f32touint64"), TUINT64}, - ssa.OpCvt32to64F: sfRtCallDef{sysfunc("fint32to64"), TFLOAT64}, - ssa.OpCvt64Fto32: sfRtCallDef{sysfunc("f64toint32"), TINT32}, - ssa.OpCvt64to64F: sfRtCallDef{sysfunc("fint64to64"), TFLOAT64}, - ssa.OpCvt64Fto64: sfRtCallDef{sysfunc("f64toint64"), TINT64}, - ssa.OpCvt64Uto64F: sfRtCallDef{sysfunc("fuint64to64"), TFLOAT64}, - ssa.OpCvt64Fto64U: sfRtCallDef{sysfunc("f64touint64"), TUINT64}, - ssa.OpCvt32Fto64F: sfRtCallDef{sysfunc("f32to64"), TFLOAT64}, - ssa.OpCvt64Fto32F: sfRtCallDef{sysfunc("f64to32"), TFLOAT32}, + ssa.OpAdd32F: sfRtCallDef{sysfunc("fadd32"), types.TFLOAT32}, + ssa.OpAdd64F: sfRtCallDef{sysfunc("fadd64"), types.TFLOAT64}, + ssa.OpSub32F: sfRtCallDef{sysfunc("fadd32"), types.TFLOAT32}, + ssa.OpSub64F: sfRtCallDef{sysfunc("fadd64"), types.TFLOAT64}, + ssa.OpMul32F: sfRtCallDef{sysfunc("fmul32"), types.TFLOAT32}, + ssa.OpMul64F: sfRtCallDef{sysfunc("fmul64"), types.TFLOAT64}, + ssa.OpDiv32F: sfRtCallDef{sysfunc("fdiv32"), types.TFLOAT32}, + ssa.OpDiv64F: sfRtCallDef{sysfunc("fdiv64"), types.TFLOAT64}, + + ssa.OpEq64F: sfRtCallDef{sysfunc("feq64"), types.TBOOL}, + ssa.OpEq32F: sfRtCallDef{sysfunc("feq32"), types.TBOOL}, + ssa.OpNeq64F: sfRtCallDef{sysfunc("feq64"), types.TBOOL}, + ssa.OpNeq32F: sfRtCallDef{sysfunc("feq32"), types.TBOOL}, + ssa.OpLess64F: sfRtCallDef{sysfunc("fgt64"), types.TBOOL}, + ssa.OpLess32F: sfRtCallDef{sysfunc("fgt32"), types.TBOOL}, + ssa.OpLeq64F: sfRtCallDef{sysfunc("fge64"), types.TBOOL}, + ssa.OpLeq32F: sfRtCallDef{sysfunc("fge32"), types.TBOOL}, + + ssa.OpCvt32to32F: sfRtCallDef{sysfunc("fint32to32"), types.TFLOAT32}, + ssa.OpCvt32Fto32: sfRtCallDef{sysfunc("f32toint32"), types.TINT32}, + ssa.OpCvt64to32F: sfRtCallDef{sysfunc("fint64to32"), types.TFLOAT32}, + ssa.OpCvt32Fto64: sfRtCallDef{sysfunc("f32toint64"), types.TINT64}, + ssa.OpCvt64Uto32F: sfRtCallDef{sysfunc("fuint64to32"), types.TFLOAT32}, + ssa.OpCvt32Fto64U: sfRtCallDef{sysfunc("f32touint64"), types.TUINT64}, + ssa.OpCvt32to64F: sfRtCallDef{sysfunc("fint32to64"), types.TFLOAT64}, + ssa.OpCvt64Fto32: sfRtCallDef{sysfunc("f64toint32"), types.TINT32}, + ssa.OpCvt64to64F: sfRtCallDef{sysfunc("fint64to64"), types.TFLOAT64}, + ssa.OpCvt64Fto64: sfRtCallDef{sysfunc("f64toint64"), types.TINT64}, + ssa.OpCvt64Uto64F: sfRtCallDef{sysfunc("fuint64to64"), types.TFLOAT64}, + ssa.OpCvt64Fto64U: sfRtCallDef{sysfunc("f64touint64"), types.TUINT64}, + ssa.OpCvt32Fto64F: sfRtCallDef{sysfunc("f32to64"), types.TFLOAT64}, + ssa.OpCvt64Fto32F: sfRtCallDef{sysfunc("f64to32"), types.TFLOAT32}, } } @@ -3237,7 +3238,7 @@ func (s *state) sfcall(op ssa.Op, args ...*ssa.Value) (*ssa.Value, bool) { args[0], args[1] = args[1], args[0] case ssa.OpSub32F, ssa.OpSub64F: - args[1] = s.newValue1(s.ssaOp(ONEG, types.Types[callDef.rtype]), args[1].Type, args[1]) + args[1] = s.newValue1(s.ssaOp(ir.ONEG, types.Types[callDef.rtype]), args[1].Type, args[1]) } result := s.rtcall(callDef.rtfn, true, []*types.Type{types.Types[callDef.rtype]}, args...)[0] @@ -3253,7 +3254,7 @@ var intrinsics map[intrinsicKey]intrinsicBuilder // An intrinsicBuilder converts a call node n into an ssa value that // implements that call as an intrinsic. args is a list of arguments to the func. -type intrinsicBuilder func(s *state, n *Node, args []*ssa.Value) *ssa.Value +type intrinsicBuilder func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value type intrinsicKey struct { arch *sys.Arch @@ -3318,7 +3319,7 @@ func init() { /******** runtime ********/ if !instrumenting { add("runtime", "slicebytetostringtmp", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { // Compiler frontend optimizations emit OBYTES2STRTMP nodes // for the backend instead of slicebytetostringtmp calls // when not instrumenting. @@ -3327,98 +3328,98 @@ func init() { all...) } addF("runtime/internal/math", "MulUintptr", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { if s.config.PtrSize == 4 { - return s.newValue2(ssa.OpMul32uover, types.NewTuple(types.Types[TUINT], types.Types[TUINT]), args[0], args[1]) + return s.newValue2(ssa.OpMul32uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1]) } - return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[TUINT], types.Types[TUINT]), args[0], args[1]) + return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1]) }, sys.AMD64, sys.I386, sys.MIPS64) add("runtime", "KeepAlive", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0]) s.vars[memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem()) return nil }, all...) add("runtime", "getclosureptr", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue0(ssa.OpGetClosurePtr, s.f.Config.Types.Uintptr) }, all...) add("runtime", "getcallerpc", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr) }, all...) add("runtime", "getcallersp", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue0(ssa.OpGetCallerSP, s.f.Config.Types.Uintptr) }, all...) /******** runtime/internal/sys ********/ addF("runtime/internal/sys", "Ctz32", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0]) }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) addF("runtime/internal/sys", "Ctz64", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0]) }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) addF("runtime/internal/sys", "Bswap32", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue1(ssa.OpBswap32, types.Types[TUINT32], args[0]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpBswap32, types.Types[types.TUINT32], args[0]) }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X) addF("runtime/internal/sys", "Bswap64", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue1(ssa.OpBswap64, types.Types[TUINT64], args[0]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpBswap64, types.Types[types.TUINT64], args[0]) }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X) /******** runtime/internal/atomic ********/ addF("runtime/internal/atomic", "Load", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem()) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) - return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v) }, sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Load8", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[TUINT8], types.TypeMem), args[0], s.mem()) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[types.TUINT8], types.TypeMem), args[0], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) - return s.newValue1(ssa.OpSelect0, types.Types[TUINT8], v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT8], v) }, sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Load64", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem()) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) - return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v) }, sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "LoadAcq", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem()) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) - return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v) }, sys.PPC64, sys.S390X) addF("runtime/internal/atomic", "LoadAcq64", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem()) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) - return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v) }, sys.PPC64) addF("runtime/internal/atomic", "Loadp", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v) @@ -3426,65 +3427,65 @@ func init() { sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Store", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Store8", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Store64", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "StorepNoWB", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "StoreRel", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.PPC64, sys.S390X) addF("runtime/internal/atomic", "StoreRel64", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.PPC64) addF("runtime/internal/atomic", "Xchg", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem()) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) - return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v) }, sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Xchg64", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem()) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) - return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v) }, sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) - type atomicOpEmitter func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType) + type atomicOpEmitter func(s *state, n *ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) makeAtomicGuardedIntrinsicARM64 := func(op0, op1 ssa.Op, typ, rtyp types.EType, emit atomicOpEmitter) intrinsicBuilder { - return func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { // Target Atomic feature is identified by dynamic detection - addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), arm64HasATOMICS, s.sb) - v := s.load(types.Types[TBOOL], addr) + addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), arm64HasATOMICS, s.sb) + v := s.load(types.Types[types.TBOOL], addr) b := s.endBlock() b.Kind = ssa.BlockIf b.SetControl(v) @@ -3507,7 +3508,7 @@ func init() { // Merge results. s.startBlock(bEnd) - if rtyp == TNIL { + if rtyp == types.TNIL { return nil } else { return s.variable(n, types.Types[rtyp]) @@ -3515,115 +3516,115 @@ func init() { } } - atomicXchgXaddEmitterARM64 := func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType) { + atomicXchgXaddEmitterARM64 := func(s *state, n *ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) { v := s.newValue3(op, types.NewTuple(types.Types[typ], types.TypeMem), args[0], args[1], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v) } addF("runtime/internal/atomic", "Xchg", - makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange32, ssa.OpAtomicExchange32Variant, TUINT32, TUINT32, atomicXchgXaddEmitterARM64), + makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange32, ssa.OpAtomicExchange32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64), sys.ARM64) addF("runtime/internal/atomic", "Xchg64", - makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange64, ssa.OpAtomicExchange64Variant, TUINT64, TUINT64, atomicXchgXaddEmitterARM64), + makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange64, ssa.OpAtomicExchange64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64), sys.ARM64) addF("runtime/internal/atomic", "Xadd", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem()) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) - return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v) }, sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Xadd64", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem()) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) - return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v) }, sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Xadd", - makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd32, ssa.OpAtomicAdd32Variant, TUINT32, TUINT32, atomicXchgXaddEmitterARM64), + makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd32, ssa.OpAtomicAdd32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64), sys.ARM64) addF("runtime/internal/atomic", "Xadd64", - makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd64, ssa.OpAtomicAdd64Variant, TUINT64, TUINT64, atomicXchgXaddEmitterARM64), + makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd64, ssa.OpAtomicAdd64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64), sys.ARM64) addF("runtime/internal/atomic", "Cas", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v) }, sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Cas64", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v) }, sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "CasRel", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v) }, sys.PPC64) - atomicCasEmitterARM64 := func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType) { + atomicCasEmitterARM64 := func(s *state, n *ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) { v := s.newValue4(op, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v) } addF("runtime/internal/atomic", "Cas", - makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap32, ssa.OpAtomicCompareAndSwap32Variant, TUINT32, TBOOL, atomicCasEmitterARM64), + makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap32, ssa.OpAtomicCompareAndSwap32Variant, types.TUINT32, types.TBOOL, atomicCasEmitterARM64), sys.ARM64) addF("runtime/internal/atomic", "Cas64", - makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap64, ssa.OpAtomicCompareAndSwap64Variant, TUINT64, TBOOL, atomicCasEmitterARM64), + makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap64, ssa.OpAtomicCompareAndSwap64Variant, types.TUINT64, types.TBOOL, atomicCasEmitterARM64), sys.ARM64) addF("runtime/internal/atomic", "And8", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X) addF("runtime/internal/atomic", "And", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X) addF("runtime/internal/atomic", "Or8", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X) addF("runtime/internal/atomic", "Or", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X) - atomicAndOrEmitterARM64 := func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType) { + atomicAndOrEmitterARM64 := func(s *state, n *ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) { s.vars[memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem()) } addF("runtime/internal/atomic", "And8", - makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd8, ssa.OpAtomicAnd8Variant, TNIL, TNIL, atomicAndOrEmitterARM64), + makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd8, ssa.OpAtomicAnd8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64), sys.ARM64) addF("runtime/internal/atomic", "And", - makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd32, ssa.OpAtomicAnd32Variant, TNIL, TNIL, atomicAndOrEmitterARM64), + makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd32, ssa.OpAtomicAnd32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64), sys.ARM64) addF("runtime/internal/atomic", "Or8", - makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr8, ssa.OpAtomicOr8Variant, TNIL, TNIL, atomicAndOrEmitterARM64), + makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr8, ssa.OpAtomicOr8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64), sys.ARM64) addF("runtime/internal/atomic", "Or", - makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr32, ssa.OpAtomicOr32Variant, TNIL, TNIL, atomicAndOrEmitterARM64), + makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr32, ssa.OpAtomicOr32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64), sys.ARM64) alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...) @@ -3658,57 +3659,57 @@ func init() { /******** math ********/ addF("math", "Sqrt", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue1(ssa.OpSqrt, types.Types[TFLOAT64], args[0]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpSqrt, types.Types[types.TFLOAT64], args[0]) }, sys.I386, sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm) addF("math", "Trunc", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue1(ssa.OpTrunc, types.Types[TFLOAT64], args[0]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpTrunc, types.Types[types.TFLOAT64], args[0]) }, sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm) addF("math", "Ceil", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue1(ssa.OpCeil, types.Types[TFLOAT64], args[0]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpCeil, types.Types[types.TFLOAT64], args[0]) }, sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm) addF("math", "Floor", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue1(ssa.OpFloor, types.Types[TFLOAT64], args[0]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpFloor, types.Types[types.TFLOAT64], args[0]) }, sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm) addF("math", "Round", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue1(ssa.OpRound, types.Types[TFLOAT64], args[0]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpRound, types.Types[types.TFLOAT64], args[0]) }, sys.ARM64, sys.PPC64, sys.S390X) addF("math", "RoundToEven", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue1(ssa.OpRoundToEven, types.Types[TFLOAT64], args[0]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpRoundToEven, types.Types[types.TFLOAT64], args[0]) }, sys.ARM64, sys.S390X, sys.Wasm) addF("math", "Abs", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue1(ssa.OpAbs, types.Types[TFLOAT64], args[0]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpAbs, types.Types[types.TFLOAT64], args[0]) }, sys.ARM64, sys.ARM, sys.PPC64, sys.Wasm) addF("math", "Copysign", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue2(ssa.OpCopysign, types.Types[TFLOAT64], args[0], args[1]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue2(ssa.OpCopysign, types.Types[types.TFLOAT64], args[0], args[1]) }, sys.PPC64, sys.Wasm) addF("math", "FMA", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue3(ssa.OpFMA, types.Types[TFLOAT64], args[0], args[1], args[2]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2]) }, sys.ARM64, sys.PPC64, sys.S390X) addF("math", "FMA", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { if !s.config.UseFMA { s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64] - return s.variable(n, types.Types[TFLOAT64]) + return s.variable(n, types.Types[types.TFLOAT64]) } - v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[TBOOL], x86HasFMA) + v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], x86HasFMA) b := s.endBlock() b.Kind = ssa.BlockIf b.SetControl(v) @@ -3721,7 +3722,7 @@ func init() { // We have the intrinsic - use it directly. s.startBlock(bTrue) - s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[TFLOAT64], args[0], args[1], args[2]) + s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2]) s.endBlock().AddEdgeTo(bEnd) // Call the pure Go version. @@ -3731,17 +3732,17 @@ func init() { // Merge results. s.startBlock(bEnd) - return s.variable(n, types.Types[TFLOAT64]) + return s.variable(n, types.Types[types.TFLOAT64]) }, sys.AMD64) addF("math", "FMA", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { if !s.config.UseFMA { s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64] - return s.variable(n, types.Types[TFLOAT64]) + return s.variable(n, types.Types[types.TFLOAT64]) } - addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), armHasVFPv4, s.sb) - v := s.load(types.Types[TBOOL], addr) + addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), armHasVFPv4, s.sb) + v := s.load(types.Types[types.TBOOL], addr) b := s.endBlock() b.Kind = ssa.BlockIf b.SetControl(v) @@ -3754,7 +3755,7 @@ func init() { // We have the intrinsic - use it directly. s.startBlock(bTrue) - s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[TFLOAT64], args[0], args[1], args[2]) + s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2]) s.endBlock().AddEdgeTo(bEnd) // Call the pure Go version. @@ -3764,13 +3765,13 @@ func init() { // Merge results. s.startBlock(bEnd) - return s.variable(n, types.Types[TFLOAT64]) + return s.variable(n, types.Types[types.TFLOAT64]) }, sys.ARM) - makeRoundAMD64 := func(op ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[TBOOL], x86HasSSE41) + makeRoundAMD64 := func(op ssa.Op) func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], x86HasSSE41) b := s.endBlock() b.Kind = ssa.BlockIf b.SetControl(v) @@ -3783,7 +3784,7 @@ func init() { // We have the intrinsic - use it directly. s.startBlock(bTrue) - s.vars[n] = s.newValue1(op, types.Types[TFLOAT64], args[0]) + s.vars[n] = s.newValue1(op, types.Types[types.TFLOAT64], args[0]) s.endBlock().AddEdgeTo(bEnd) // Call the pure Go version. @@ -3793,7 +3794,7 @@ func init() { // Merge results. s.startBlock(bEnd) - return s.variable(n, types.Types[TFLOAT64]) + return s.variable(n, types.Types[types.TFLOAT64]) } } addF("math", "RoundToEven", @@ -3811,55 +3812,55 @@ func init() { /******** math/bits ********/ addF("math/bits", "TrailingZeros64", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0]) }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm) addF("math/bits", "TrailingZeros32", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0]) }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm) addF("math/bits", "TrailingZeros16", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0]) - c := s.constInt32(types.Types[TUINT32], 1<<16) - y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c) - return s.newValue1(ssa.OpCtz32, types.Types[TINT], y) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0]) + c := s.constInt32(types.Types[types.TUINT32], 1<<16) + y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c) + return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], y) }, sys.MIPS) addF("math/bits", "TrailingZeros16", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue1(ssa.OpCtz16, types.Types[TINT], args[0]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpCtz16, types.Types[types.TINT], args[0]) }, sys.AMD64, sys.I386, sys.ARM, sys.ARM64, sys.Wasm) addF("math/bits", "TrailingZeros16", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0]) - c := s.constInt64(types.Types[TUINT64], 1<<16) - y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c) - return s.newValue1(ssa.OpCtz64, types.Types[TINT], y) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0]) + c := s.constInt64(types.Types[types.TUINT64], 1<<16) + y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c) + return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], y) }, sys.S390X, sys.PPC64) addF("math/bits", "TrailingZeros8", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0]) - c := s.constInt32(types.Types[TUINT32], 1<<8) - y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c) - return s.newValue1(ssa.OpCtz32, types.Types[TINT], y) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0]) + c := s.constInt32(types.Types[types.TUINT32], 1<<8) + y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c) + return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], y) }, sys.MIPS) addF("math/bits", "TrailingZeros8", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue1(ssa.OpCtz8, types.Types[TINT], args[0]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpCtz8, types.Types[types.TINT], args[0]) }, sys.AMD64, sys.ARM, sys.ARM64, sys.Wasm) addF("math/bits", "TrailingZeros8", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0]) - c := s.constInt64(types.Types[TUINT64], 1<<8) - y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c) - return s.newValue1(ssa.OpCtz64, types.Types[TINT], y) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0]) + c := s.constInt64(types.Types[types.TUINT64], 1<<8) + y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c) + return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], y) }, sys.S390X) alias("math/bits", "ReverseBytes64", "runtime/internal/sys", "Bswap64", all...) @@ -3867,116 +3868,116 @@ func init() { // ReverseBytes inlines correctly, no need to intrinsify it. // ReverseBytes16 lowers to a rotate, no need for anything special here. addF("math/bits", "Len64", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0]) }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm) addF("math/bits", "Len32", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0]) }, sys.AMD64, sys.ARM64) addF("math/bits", "Len32", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { if s.config.PtrSize == 4 { - return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0]) + return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0]) } - x := s.newValue1(ssa.OpZeroExt32to64, types.Types[TUINT64], args[0]) - return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x) + x := s.newValue1(ssa.OpZeroExt32to64, types.Types[types.TUINT64], args[0]) + return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x) }, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm) addF("math/bits", "Len16", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { if s.config.PtrSize == 4 { - x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0]) - return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x) + x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0]) + return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x) } - x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0]) - return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x) + x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0]) + return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x) }, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm) addF("math/bits", "Len16", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue1(ssa.OpBitLen16, types.Types[TINT], args[0]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpBitLen16, types.Types[types.TINT], args[0]) }, sys.AMD64) addF("math/bits", "Len8", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { if s.config.PtrSize == 4 { - x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0]) - return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x) + x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0]) + return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x) } - x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0]) - return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x) + x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0]) + return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x) }, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm) addF("math/bits", "Len8", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue1(ssa.OpBitLen8, types.Types[TINT], args[0]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpBitLen8, types.Types[types.TINT], args[0]) }, sys.AMD64) addF("math/bits", "Len", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { if s.config.PtrSize == 4 { - return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0]) + return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0]) } - return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0]) + return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0]) }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm) // LeadingZeros is handled because it trivially calls Len. addF("math/bits", "Reverse64", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0]) }, sys.ARM64) addF("math/bits", "Reverse32", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0]) }, sys.ARM64) addF("math/bits", "Reverse16", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue1(ssa.OpBitRev16, types.Types[TINT], args[0]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpBitRev16, types.Types[types.TINT], args[0]) }, sys.ARM64) addF("math/bits", "Reverse8", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue1(ssa.OpBitRev8, types.Types[TINT], args[0]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpBitRev8, types.Types[types.TINT], args[0]) }, sys.ARM64) addF("math/bits", "Reverse", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { if s.config.PtrSize == 4 { - return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0]) + return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0]) } - return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0]) + return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0]) }, sys.ARM64) addF("math/bits", "RotateLeft8", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue2(ssa.OpRotateLeft8, types.Types[TUINT8], args[0], args[1]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue2(ssa.OpRotateLeft8, types.Types[types.TUINT8], args[0], args[1]) }, sys.AMD64) addF("math/bits", "RotateLeft16", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue2(ssa.OpRotateLeft16, types.Types[TUINT16], args[0], args[1]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue2(ssa.OpRotateLeft16, types.Types[types.TUINT16], args[0], args[1]) }, sys.AMD64) addF("math/bits", "RotateLeft32", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue2(ssa.OpRotateLeft32, types.Types[TUINT32], args[0], args[1]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue2(ssa.OpRotateLeft32, types.Types[types.TUINT32], args[0], args[1]) }, sys.AMD64, sys.ARM, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm) addF("math/bits", "RotateLeft64", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue2(ssa.OpRotateLeft64, types.Types[TUINT64], args[0], args[1]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue2(ssa.OpRotateLeft64, types.Types[types.TUINT64], args[0], args[1]) }, sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm) alias("math/bits", "RotateLeft", "math/bits", "RotateLeft64", p8...) - makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[TBOOL], x86HasPOPCNT) + makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], x86HasPOPCNT) b := s.endBlock() b.Kind = ssa.BlockIf b.SetControl(v) @@ -3993,7 +3994,7 @@ func init() { if s.config.PtrSize == 4 { op = op32 } - s.vars[n] = s.newValue1(op, types.Types[TINT], args[0]) + s.vars[n] = s.newValue1(op, types.Types[types.TINT], args[0]) s.endBlock().AddEdgeTo(bEnd) // Call the pure Go version. @@ -4003,67 +4004,67 @@ func init() { // Merge results. s.startBlock(bEnd) - return s.variable(n, types.Types[TINT]) + return s.variable(n, types.Types[types.TINT]) } } addF("math/bits", "OnesCount64", makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount64), sys.AMD64) addF("math/bits", "OnesCount64", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue1(ssa.OpPopCount64, types.Types[TINT], args[0]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpPopCount64, types.Types[types.TINT], args[0]) }, sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm) addF("math/bits", "OnesCount32", makeOnesCountAMD64(ssa.OpPopCount32, ssa.OpPopCount32), sys.AMD64) addF("math/bits", "OnesCount32", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue1(ssa.OpPopCount32, types.Types[TINT], args[0]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpPopCount32, types.Types[types.TINT], args[0]) }, sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm) addF("math/bits", "OnesCount16", makeOnesCountAMD64(ssa.OpPopCount16, ssa.OpPopCount16), sys.AMD64) addF("math/bits", "OnesCount16", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue1(ssa.OpPopCount16, types.Types[TINT], args[0]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpPopCount16, types.Types[types.TINT], args[0]) }, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm) addF("math/bits", "OnesCount8", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue1(ssa.OpPopCount8, types.Types[TINT], args[0]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpPopCount8, types.Types[types.TINT], args[0]) }, sys.S390X, sys.PPC64, sys.Wasm) addF("math/bits", "OnesCount", makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount32), sys.AMD64) addF("math/bits", "Mul64", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1]) }, sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.MIPS64) alias("math/bits", "Mul", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE) addF("math/bits", "Add64", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2]) }, sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X) alias("math/bits", "Add", "math/bits", "Add64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchS390X) addF("math/bits", "Sub64", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2]) }, sys.AMD64, sys.ARM64, sys.S390X) alias("math/bits", "Sub", "math/bits", "Sub64", sys.ArchAMD64, sys.ArchARM64, sys.ArchS390X) addF("math/bits", "Div64", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { // check for divide-by-zero/overflow and panic with appropriate message - cmpZero := s.newValue2(s.ssaOp(ONE, types.Types[TUINT64]), types.Types[TBOOL], args[2], s.zeroVal(types.Types[TUINT64])) + cmpZero := s.newValue2(s.ssaOp(ir.ONE, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[2], s.zeroVal(types.Types[types.TUINT64])) s.check(cmpZero, panicdivide) - cmpOverflow := s.newValue2(s.ssaOp(OLT, types.Types[TUINT64]), types.Types[TBOOL], args[0], args[2]) + cmpOverflow := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[0], args[2]) s.check(cmpOverflow, panicoverflow) - return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2]) + return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2]) }, sys.AMD64) alias("math/bits", "Div", "math/bits", "Div64", sys.ArchAMD64) @@ -4117,8 +4118,8 @@ func init() { /******** math/big ********/ add("math/big", "mulWW", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1]) + func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1]) }, sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64LE, sys.ArchPPC64, sys.ArchS390X) } @@ -4130,7 +4131,7 @@ func findIntrinsic(sym *types.Sym) intrinsicBuilder { return nil } pkg := sym.Pkg.Path - if sym.Pkg == localpkg { + if sym.Pkg == ir.LocalPkg { pkg = base.Ctxt.Pkgpath } if base.Flag.Race && pkg == "sync/atomic" { @@ -4155,7 +4156,7 @@ func findIntrinsic(sym *types.Sym) intrinsicBuilder { return intrinsics[intrinsicKey{thearch.LinkArch.Arch, pkg, fn}] } -func isIntrinsicCall(n *Node) bool { +func isIntrinsicCall(n *ir.Node) bool { if n == nil || n.Left == nil { return false } @@ -4163,7 +4164,7 @@ func isIntrinsicCall(n *Node) bool { } // intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation. -func (s *state) intrinsicCall(n *Node) *ssa.Value { +func (s *state) intrinsicCall(n *ir.Node) *ssa.Value { v := findIntrinsic(n.Left.Sym)(s, n, s.intrinsicArgs(n)) if ssa.IntrinsicsDebug > 0 { x := v @@ -4179,15 +4180,15 @@ func (s *state) intrinsicCall(n *Node) *ssa.Value { } // intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them. -func (s *state) intrinsicArgs(n *Node) []*ssa.Value { +func (s *state) intrinsicArgs(n *ir.Node) []*ssa.Value { // Construct map of temps; see comments in s.call about the structure of n. - temps := map[*Node]*ssa.Value{} + temps := map[*ir.Node]*ssa.Value{} for _, a := range n.List.Slice() { - if a.Op != OAS { + if a.Op != ir.OAS { s.Fatalf("non-assignment as a temp function argument %v", a.Op) } l, r := a.Left, a.Right - if l.Op != ONAME { + if l.Op != ir.ONAME { s.Fatalf("non-ONAME temp function argument %v", a.Op) } // Evaluate and store to "temporary". @@ -4214,7 +4215,7 @@ func (s *state) intrinsicArgs(n *Node) []*ssa.Value { // call. We will also record funcdata information on where the args are stored // (as well as the deferBits variable), and this will enable us to run the proper // defer calls during panics. -func (s *state) openDeferRecord(n *Node) { +func (s *state) openDeferRecord(n *ir.Node) { // Do any needed expression evaluation for the args (including the // receiver, if any). This may be evaluating something like 'autotmp_3 = // once.mutex'. Such a statement will create a mapping in s.vars[] from @@ -4223,24 +4224,24 @@ func (s *state) openDeferRecord(n *Node) { s.stmtList(n.List) var args []*ssa.Value - var argNodes []*Node + var argNodes []*ir.Node opendefer := &openDeferInfo{ n: n, } fn := n.Left - if n.Op == OCALLFUNC { + if n.Op == ir.OCALLFUNC { // We must always store the function value in a stack slot for the // runtime panic code to use. But in the defer exit code, we will // call the function directly if it is a static function. closureVal := s.expr(fn) closure := s.openDeferSave(nil, fn.Type, closureVal) - opendefer.closureNode = closure.Aux.(*Node) - if !(fn.Op == ONAME && fn.Class() == PFUNC) { + opendefer.closureNode = closure.Aux.(*ir.Node) + if !(fn.Op == ir.ONAME && fn.Class() == ir.PFUNC) { opendefer.closure = closure } - } else if n.Op == OCALLMETH { - if fn.Op != ODOTMETH { + } else if n.Op == ir.OCALLMETH { + if fn.Op != ir.ODOTMETH { base.Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn) } closureVal := s.getMethodClosure(fn) @@ -4248,9 +4249,9 @@ func (s *state) openDeferRecord(n *Node) { // runtime panic code to use. But in the defer exit code, we will // call the method directly. closure := s.openDeferSave(nil, fn.Type, closureVal) - opendefer.closureNode = closure.Aux.(*Node) + opendefer.closureNode = closure.Aux.(*ir.Node) } else { - if fn.Op != ODOTINTER { + if fn.Op != ir.ODOTINTER { base.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op) } closure, rcvr := s.getClosureAndRcvr(fn) @@ -4258,8 +4259,8 @@ func (s *state) openDeferRecord(n *Node) { // Important to get the receiver type correct, so it is recognized // as a pointer for GC purposes. opendefer.rcvr = s.openDeferSave(nil, fn.Type.Recv().Type, rcvr) - opendefer.closureNode = opendefer.closure.Aux.(*Node) - opendefer.rcvrNode = opendefer.rcvr.Aux.(*Node) + opendefer.closureNode = opendefer.closure.Aux.(*ir.Node) + opendefer.rcvrNode = opendefer.rcvr.Aux.(*ir.Node) } for _, argn := range n.Rlist.Slice() { var v *ssa.Value @@ -4269,7 +4270,7 @@ func (s *state) openDeferRecord(n *Node) { v = s.openDeferSave(argn, argn.Type, nil) } args = append(args, v) - argNodes = append(argNodes, v.Aux.(*Node)) + argNodes = append(argNodes, v.Aux.(*ir.Node)) } opendefer.argVals = args opendefer.argNodes = argNodes @@ -4278,10 +4279,10 @@ func (s *state) openDeferRecord(n *Node) { // Update deferBits only after evaluation and storage to stack of // args/receiver/interface is successful. - bitvalue := s.constInt8(types.Types[TUINT8], 1<= 0; i-- { @@ -4357,12 +4358,12 @@ func (s *state) openDeferExit() { bCond := s.f.NewBlock(ssa.BlockPlain) bEnd := s.f.NewBlock(ssa.BlockPlain) - deferBits := s.variable(deferBitsVar, types.Types[TUINT8]) + deferBits := s.variable(deferBitsVar, types.Types[types.TUINT8]) // Generate code to check if the bit associated with the current // defer is set. - bitval := s.constInt8(types.Types[TUINT8], 1< ssa.MaxStruct { return false } @@ -5011,7 +5012,7 @@ func canSSAType(t *types.Type) bool { } // exprPtr evaluates n to a pointer and nil-checks it. -func (s *state) exprPtr(n *Node, bounded bool, lineno src.XPos) *ssa.Value { +func (s *state) exprPtr(n *ir.Node, bounded bool, lineno src.XPos) *ssa.Value { p := s.expr(n) if bounded || n.NonNil() { if s.f.Frontend().Debug_checknil() && lineno.Line() > 1 { @@ -5092,9 +5093,9 @@ func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bo var cmp *ssa.Value if kind == ssa.BoundsIndex || kind == ssa.BoundsIndexU { - cmp = s.newValue2(ssa.OpIsInBounds, types.Types[TBOOL], idx, len) + cmp = s.newValue2(ssa.OpIsInBounds, types.Types[types.TBOOL], idx, len) } else { - cmp = s.newValue2(ssa.OpIsSliceInBounds, types.Types[TBOOL], idx, len) + cmp = s.newValue2(ssa.OpIsSliceInBounds, types.Types[types.TBOOL], idx, len) } b := s.endBlock() b.Kind = ssa.BlockIf @@ -5120,7 +5121,7 @@ func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bo if kind != ssa.BoundsIndex && kind != ssa.BoundsIndexU { op = ssa.OpSpectreSliceIndex } - idx = s.newValue2(op, types.Types[TINT], idx, len) + idx = s.newValue2(op, types.Types[types.TINT], idx, len) } return idx @@ -5150,7 +5151,7 @@ func (s *state) check(cmp *ssa.Value, fn *obj.LSym) { s.startBlock(bNext) } -func (s *state) intDivide(n *Node, a, b *ssa.Value) *ssa.Value { +func (s *state) intDivide(n *ir.Node, a, b *ssa.Value) *ssa.Value { needcheck := true switch b.Op { case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64: @@ -5160,7 +5161,7 @@ func (s *state) intDivide(n *Node, a, b *ssa.Value) *ssa.Value { } if needcheck { // do a size-appropriate check for zero - cmp := s.newValue2(s.ssaOp(ONE, n.Type), types.Types[TBOOL], b, s.zeroVal(n.Type)) + cmp := s.newValue2(s.ssaOp(ir.ONE, n.Type), types.Types[types.TBOOL], b, s.zeroVal(n.Type)) s.check(cmp, panicdivide) } return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) @@ -5291,24 +5292,24 @@ func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip ski if skip&skipLen != 0 { return } - len := s.newValue1(ssa.OpStringLen, types.Types[TINT], right) + len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], right) lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left) - s.store(types.Types[TINT], lenAddr, len) + s.store(types.Types[types.TINT], lenAddr, len) case t.IsSlice(): if skip&skipLen == 0 { - len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], right) + len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], right) lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left) - s.store(types.Types[TINT], lenAddr, len) + s.store(types.Types[types.TINT], lenAddr, len) } if skip&skipCap == 0 { - cap := s.newValue1(ssa.OpSliceCap, types.Types[TINT], right) + cap := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], right) capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.PtrSize, left) - s.store(types.Types[TINT], capAddr, cap) + s.store(types.Types[types.TINT], capAddr, cap) } case t.IsInterface(): // itab field doesn't need a write barrier (even though it is a pointer). itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right) - s.store(types.Types[TUINTPTR], left, itab) + s.store(types.Types[types.TUINTPTR], left, itab) case t.IsStruct(): n := t.NumFields() for i := 0; i < n; i++ { @@ -5369,7 +5370,7 @@ func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) { // putArg evaluates n for the purpose of passing it as an argument to a function and returns the corresponding Param for the call. // If forLateExpandedCall is true, it returns the argument value to pass to the call operation. // If forLateExpandedCall is false, then the value is stored at the specified stack offset, and the returned value is nil. -func (s *state) putArg(n *Node, t *types.Type, off int64, forLateExpandedCall bool) (ssa.Param, *ssa.Value) { +func (s *state) putArg(n *ir.Node, t *types.Type, off int64, forLateExpandedCall bool) (ssa.Param, *ssa.Value) { var a *ssa.Value if forLateExpandedCall { if !canSSAType(t) { @@ -5383,7 +5384,7 @@ func (s *state) putArg(n *Node, t *types.Type, off int64, forLateExpandedCall bo return ssa.Param{Type: t, Offset: int32(off)}, a } -func (s *state) storeArgWithBase(n *Node, t *types.Type, base *ssa.Value, off int64) { +func (s *state) storeArgWithBase(n *ir.Node, t *types.Type, base *ssa.Value, off int64) { pt := types.NewPtr(t) var addr *ssa.Value if base == s.sp { @@ -5412,11 +5413,11 @@ func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value) switch { case t.IsSlice(): ptr = s.newValue1(ssa.OpSlicePtr, types.NewPtr(t.Elem()), v) - len = s.newValue1(ssa.OpSliceLen, types.Types[TINT], v) - cap = s.newValue1(ssa.OpSliceCap, types.Types[TINT], v) + len = s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], v) + cap = s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], v) case t.IsString(): - ptr = s.newValue1(ssa.OpStringPtr, types.NewPtr(types.Types[TUINT8]), v) - len = s.newValue1(ssa.OpStringLen, types.Types[TINT], v) + ptr = s.newValue1(ssa.OpStringPtr, types.NewPtr(types.Types[types.TUINT8]), v) + len = s.newValue1(ssa.OpStringLen, types.Types[types.TINT], v) cap = len case t.IsPtr(): if !t.Elem().IsArray() { @@ -5424,7 +5425,7 @@ func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value) } s.nilCheck(v) ptr = s.newValue1(ssa.OpCopy, types.NewPtr(t.Elem().Elem()), v) - len = s.constInt(types.Types[TINT], t.Elem().NumElem()) + len = s.constInt(types.Types[types.TINT], t.Elem().NumElem()) cap = len default: s.Fatalf("bad type in slice %v\n", t) @@ -5432,7 +5433,7 @@ func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value) // Set default values if i == nil { - i = s.constInt(types.Types[TINT], 0) + i = s.constInt(types.Types[types.TINT], 0) } if j == nil { j = len @@ -5470,18 +5471,18 @@ func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value) } // Word-sized integer operations. - subOp := s.ssaOp(OSUB, types.Types[TINT]) - mulOp := s.ssaOp(OMUL, types.Types[TINT]) - andOp := s.ssaOp(OAND, types.Types[TINT]) + subOp := s.ssaOp(ir.OSUB, types.Types[types.TINT]) + mulOp := s.ssaOp(ir.OMUL, types.Types[types.TINT]) + andOp := s.ssaOp(ir.OAND, types.Types[types.TINT]) // Calculate the length (rlen) and capacity (rcap) of the new slice. // For strings the capacity of the result is unimportant. However, // we use rcap to test if we've generated a zero-length slice. // Use length of strings for that. - rlen := s.newValue2(subOp, types.Types[TINT], j, i) + rlen := s.newValue2(subOp, types.Types[types.TINT], j, i) rcap := rlen if j != k && !t.IsString() { - rcap = s.newValue2(subOp, types.Types[TINT], k, i) + rcap = s.newValue2(subOp, types.Types[types.TINT], k, i) } if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 { @@ -5503,15 +5504,15 @@ func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value) // // Where mask(x) is 0 if x==0 and -1 if x>0 and stride is the width // of the element type. - stride := s.constInt(types.Types[TINT], ptr.Type.Elem().Width) + stride := s.constInt(types.Types[types.TINT], ptr.Type.Elem().Width) // The delta is the number of bytes to offset ptr by. - delta := s.newValue2(mulOp, types.Types[TINT], i, stride) + delta := s.newValue2(mulOp, types.Types[types.TINT], i, stride) // If we're slicing to the point where the capacity is zero, // zero out the delta. - mask := s.newValue1(ssa.OpSlicemask, types.Types[TINT], rcap) - delta = s.newValue2(andOp, types.Types[TINT], delta, mask) + mask := s.newValue1(ssa.OpSlicemask, types.Types[types.TINT], rcap) + delta = s.newValue2(andOp, types.Types[types.TINT], delta, mask) // Compute rptr = ptr + delta. rptr := s.newValue2(ssa.OpAddPtr, ptr.Type, ptr, delta) @@ -5544,15 +5545,15 @@ var u64_f32 = u642fcvtTab{ one: (*state).constInt64, } -func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { +func (s *state) uint64Tofloat64(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { return s.uint64Tofloat(&u64_f64, n, x, ft, tt) } -func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { +func (s *state) uint64Tofloat32(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { return s.uint64Tofloat(&u64_f32, n, x, ft, tt) } -func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { +func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { // if x >= 0 { // result = (floatY) x // } else { @@ -5578,7 +5579,7 @@ func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *Node, x *ssa.Value, ft, tt // equal to 10000000001; that rounds up, and the 1 cannot // be lost else it would round down if the LSB of the // candidate mantissa is 0. - cmp := s.newValue2(cvttab.leq, types.Types[TBOOL], s.zeroVal(ft), x) + cmp := s.newValue2(cvttab.leq, types.Types[types.TBOOL], s.zeroVal(ft), x) b := s.endBlock() b.Kind = ssa.BlockIf b.SetControl(cmp) @@ -5625,21 +5626,21 @@ var u32_f32 = u322fcvtTab{ cvtF2F: ssa.OpCvt64Fto32F, } -func (s *state) uint32Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { +func (s *state) uint32Tofloat64(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { return s.uint32Tofloat(&u32_f64, n, x, ft, tt) } -func (s *state) uint32Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { +func (s *state) uint32Tofloat32(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { return s.uint32Tofloat(&u32_f32, n, x, ft, tt) } -func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { +func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { // if x >= 0 { // result = floatY(x) // } else { // result = floatY(float64(x) + (1<<32)) // } - cmp := s.newValue2(ssa.OpLeq32, types.Types[TBOOL], s.zeroVal(ft), x) + cmp := s.newValue2(ssa.OpLeq32, types.Types[types.TBOOL], s.zeroVal(ft), x) b := s.endBlock() b.Kind = ssa.BlockIf b.SetControl(cmp) @@ -5658,9 +5659,9 @@ func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *Node, x *ssa.Value, ft, tt b.AddEdgeTo(bElse) s.startBlock(bElse) - a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[TFLOAT64], x) - twoToThe32 := s.constFloat64(types.Types[TFLOAT64], float64(1<<32)) - a2 := s.newValue2(ssa.OpAdd64F, types.Types[TFLOAT64], a1, twoToThe32) + a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[types.TFLOAT64], x) + twoToThe32 := s.constFloat64(types.Types[types.TFLOAT64], float64(1<<32)) + a2 := s.newValue2(ssa.OpAdd64F, types.Types[types.TFLOAT64], a1, twoToThe32) a3 := s.newValue1(cvttab.cvtF2F, tt, a2) s.vars[n] = a3 @@ -5672,7 +5673,7 @@ func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *Node, x *ssa.Value, ft, tt } // referenceTypeBuiltin generates code for the len/cap builtins for maps and channels. -func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value { +func (s *state) referenceTypeBuiltin(n *ir.Node, x *ssa.Value) *ssa.Value { if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() { s.Fatalf("node must be a map or a channel") } @@ -5685,8 +5686,8 @@ func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value { // return *(((*int)n)+1) // } lenType := n.Type - nilValue := s.constNil(types.Types[TUINTPTR]) - cmp := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], x, nilValue) + nilValue := s.constNil(types.Types[types.TUINTPTR]) + cmp := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], x, nilValue) b := s.endBlock() b.Kind = ssa.BlockIf b.SetControl(cmp) @@ -5706,10 +5707,10 @@ func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value { b.AddEdgeTo(bElse) s.startBlock(bElse) switch n.Op { - case OLEN: + case ir.OLEN: // length is stored in the first word for map/chan s.vars[n] = s.load(lenType, x) - case OCAP: + case ir.OCAP: // capacity is stored in the second word for chan sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x) s.vars[n] = s.load(lenType, sw) @@ -5770,22 +5771,22 @@ var f64_u32 = f2uCvtTab{ cutoff: 1 << 31, } -func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { +func (s *state) float32ToUint64(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { return s.floatToUint(&f32_u64, n, x, ft, tt) } -func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { +func (s *state) float64ToUint64(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { return s.floatToUint(&f64_u64, n, x, ft, tt) } -func (s *state) float32ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { +func (s *state) float32ToUint32(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { return s.floatToUint(&f32_u32, n, x, ft, tt) } -func (s *state) float64ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { +func (s *state) float64ToUint32(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { return s.floatToUint(&f64_u32, n, x, ft, tt) } -func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { +func (s *state) floatToUint(cvttab *f2uCvtTab, n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { // cutoff:=1<<(intY_Size-1) // if x < floatX(cutoff) { // result = uintY(x) @@ -5795,7 +5796,7 @@ func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *ty // result = z | -(cutoff) // } cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff)) - cmp := s.newValue2(cvttab.ltf, types.Types[TBOOL], x, cutoff) + cmp := s.newValue2(cvttab.ltf, types.Types[types.TBOOL], x, cutoff) b := s.endBlock() b.Kind = ssa.BlockIf b.SetControl(cmp) @@ -5829,7 +5830,7 @@ func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *ty // dottype generates SSA for a type assertion node. // commaok indicates whether to panic or return a bool. // If commaok is false, resok will be nil. -func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) { +func (s *state) dottype(n *ir.Node, commaok bool) (res, resok *ssa.Value) { iface := s.expr(n.Left) // input interface target := s.expr(n.Right) // target type byteptr := s.f.Config.Types.BytePtr @@ -5845,7 +5846,7 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) { // Get itab/type field from input. itab := s.newValue1(ssa.OpITab, byteptr, iface) // Conversion succeeds iff that field is not nil. - cond := s.newValue2(ssa.OpNeqPtr, types.Types[TBOOL], itab, s.constNil(byteptr)) + cond := s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], itab, s.constNil(byteptr)) if n.Left.Type.IsEmptyInterface() && commaok { // Converting empty interface to empty interface with ,ok is just a nil check. @@ -5910,13 +5911,13 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) { } if n.Left.Type.IsEmptyInterface() { if commaok { - call := s.rtcall(assertE2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface) + call := s.rtcall(assertE2I2, true, []*types.Type{n.Type, types.Types[types.TBOOL]}, target, iface) return call[0], call[1] } return s.rtcall(assertE2I, true, []*types.Type{n.Type}, target, iface)[0], nil } if commaok { - call := s.rtcall(assertI2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface) + call := s.rtcall(assertI2I2, true, []*types.Type{n.Type, types.Types[types.TBOOL]}, target, iface) return call[0], call[1] } return s.rtcall(assertI2I, true, []*types.Type{n.Type}, target, iface)[0], nil @@ -5941,7 +5942,7 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) { targetITab = s.expr(n.List.First()) } - var tmp *Node // temporary for use with large types + var tmp *ir.Node // temporary for use with large types var addr *ssa.Value // address of tmp if commaok && !canSSAType(n.Type) { // unSSAable type, use temporary. @@ -5951,7 +5952,7 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) { addr = s.addr(tmp) } - cond := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], itab, targetITab) + cond := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], itab, targetITab) b := s.endBlock() b.Kind = ssa.BlockIf b.SetControl(cond) @@ -6031,7 +6032,7 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) { } // variable returns the value of a variable at the current location. -func (s *state) variable(name *Node, t *types.Type) *ssa.Value { +func (s *state) variable(name *ir.Node, t *types.Type) *ssa.Value { v := s.vars[name] if v != nil { return v @@ -6057,8 +6058,8 @@ func (s *state) mem() *ssa.Value { return s.variable(memVar, types.TypeMem) } -func (s *state) addNamedValue(n *Node, v *ssa.Value) { - if n.Class() == Pxxx { +func (s *state) addNamedValue(n *ir.Node, v *ssa.Value) { + if n.Class() == ir.Pxxx { // Don't track our marker nodes (memVar etc.). return } @@ -6066,12 +6067,12 @@ func (s *state) addNamedValue(n *Node, v *ssa.Value) { // Don't track temporary variables. return } - if n.Class() == PPARAMOUT { + if n.Class() == ir.PPARAMOUT { // Don't track named output values. This prevents return values // from being assigned too early. See #14591 and #14762. TODO: allow this. return } - if n.Class() == PAUTO && n.Xoffset != 0 { + if n.Class() == ir.PAUTO && n.Xoffset != 0 { s.Fatalf("AUTO var with offset %v %d", n, n.Xoffset) } loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0} @@ -6110,7 +6111,7 @@ type SSAGenState struct { bstart []*obj.Prog // Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include PPC and Sparc V8. - ScratchFpMem *Node + ScratchFpMem *ir.Node maxarg int64 // largest frame size for arguments to calls made by the function @@ -6193,14 +6194,14 @@ func (s *SSAGenState) DebugFriendlySetPosFrom(v *ssa.Value) { } // byXoffset implements sort.Interface for []*Node using Xoffset as the ordering. -type byXoffset []*Node +type byXoffset []*ir.Node func (s byXoffset) Len() int { return len(s) } func (s byXoffset) Less(i, j int) bool { return s[i].Xoffset < s[j].Xoffset } func (s byXoffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func emitStackObjects(e *ssafn, pp *Progs) { - var vars []*Node + var vars []*ir.Node for _, n := range e.curfn.Func.Dcl { if livenessShouldTrack(n) && n.Name.Addrtaken() { vars = append(vars, n) @@ -6215,7 +6216,7 @@ func emitStackObjects(e *ssafn, pp *Progs) { // Populate the stack object data. // Format must match runtime/stack.go:stackObjectRecord. - x := e.curfn.Func.lsym.Func().StackObjects + x := e.curfn.Func.LSym.Func().StackObjects off := 0 off = duintptr(x, off, uint64(len(vars))) for _, v := range vars { @@ -6252,7 +6253,7 @@ func genssa(f *ssa.Func, pp *Progs) { s.livenessMap = liveness(e, f, pp) emitStackObjects(e, pp) - openDeferInfo := e.curfn.Func.lsym.Func().OpenCodedDeferInfo + openDeferInfo := e.curfn.Func.LSym.Func().OpenCodedDeferInfo if openDeferInfo != nil { // This function uses open-coded defers -- write out the funcdata // info that we computed at the end of genssa. @@ -6457,7 +6458,7 @@ func genssa(f *ssa.Func, pp *Progs) { // some of the inline marks. // Use this instruction instead. p.Pos = p.Pos.WithIsStmt() // promote position to a statement - pp.curfn.Func.lsym.Func().AddInlMark(p, inlMarks[m]) + pp.curfn.Func.LSym.Func().AddInlMark(p, inlMarks[m]) // Make the inline mark a real nop, so it doesn't generate any code. m.As = obj.ANOP m.Pos = src.NoXPos @@ -6469,7 +6470,7 @@ func genssa(f *ssa.Func, pp *Progs) { // Any unmatched inline marks now need to be added to the inlining tree (and will generate a nop instruction). for _, p := range inlMarkList { if p.As != obj.ANOP { - pp.curfn.Func.lsym.Func().AddInlMark(p, inlMarks[p]) + pp.curfn.Func.LSym.Func().AddInlMark(p, inlMarks[p]) } } } @@ -6489,7 +6490,7 @@ func genssa(f *ssa.Func, pp *Progs) { } return bstart[b].Pc case ssa.BlockEnd.ID: - return e.curfn.Func.lsym.Size + return e.curfn.Func.LSym.Size default: return valueToProgAfter[v].Pc } @@ -6591,7 +6592,7 @@ func defframe(s *SSAGenState, e *ssafn) { if !n.Name.Needzero() { continue } - if n.Class() != PAUTO { + if n.Class() != ir.PAUTO { e.Fatalf(n.Pos, "needzero class %d", n.Class()) } if n.Type.Size()%int64(Widthptr) != 0 || n.Xoffset%int64(Widthptr) != 0 || n.Type.Size() == 0 { @@ -6675,8 +6676,8 @@ func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) { case *obj.LSym: a.Name = obj.NAME_EXTERN a.Sym = n - case *Node: - if n.Class() == PPARAM || n.Class() == PPARAMOUT { + case *ir.Node: + if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT { a.Name = obj.NAME_PARAM a.Sym = n.Orig.Sym.Linksym() a.Offset += n.Xoffset @@ -6702,17 +6703,17 @@ func (s *state) extendIndex(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bo // high word and branch to out-of-bounds failure if it is not 0. var lo *ssa.Value if idx.Type.IsSigned() { - lo = s.newValue1(ssa.OpInt64Lo, types.Types[TINT], idx) + lo = s.newValue1(ssa.OpInt64Lo, types.Types[types.TINT], idx) } else { - lo = s.newValue1(ssa.OpInt64Lo, types.Types[TUINT], idx) + lo = s.newValue1(ssa.OpInt64Lo, types.Types[types.TUINT], idx) } if bounded || base.Flag.B != 0 { return lo } bNext := s.f.NewBlock(ssa.BlockPlain) bPanic := s.f.NewBlock(ssa.BlockExit) - hi := s.newValue1(ssa.OpInt64Hi, types.Types[TUINT32], idx) - cmp := s.newValue2(ssa.OpEq32, types.Types[TBOOL], hi, s.constInt32(types.Types[TUINT32], 0)) + hi := s.newValue1(ssa.OpInt64Hi, types.Types[types.TUINT32], idx) + cmp := s.newValue2(ssa.OpEq32, types.Types[types.TBOOL], hi, s.constInt32(types.Types[types.TUINT32], 0)) if !idx.Type.IsSigned() { switch kind { case ssa.BoundsIndex: @@ -6781,7 +6782,7 @@ func (s *state) extendIndex(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bo s.Fatalf("bad unsigned index extension %s", idx.Type) } } - return s.newValue1(op, types.Types[TINT], idx) + return s.newValue1(op, types.Types[types.TINT], idx) } // CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values. @@ -6814,12 +6815,12 @@ func CheckLoweredGetClosurePtr(v *ssa.Value) { // AutoVar returns a *Node and int64 representing the auto variable and offset within it // where v should be spilled. -func AutoVar(v *ssa.Value) (*Node, int64) { +func AutoVar(v *ssa.Value) (*ir.Node, int64) { loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot) if v.Type.Size() > loc.Type.Size() { v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type) } - return loc.N.(*Node), loc.Off + return loc.N.(*ir.Node), loc.Off } func AddrAuto(a *obj.Addr, v *ssa.Value) { @@ -6828,7 +6829,7 @@ func AddrAuto(a *obj.Addr, v *ssa.Value) { a.Sym = n.Sym.Linksym() a.Reg = int16(thearch.REGSP) a.Offset = n.Xoffset + off - if n.Class() == PPARAM || n.Class() == PPARAMOUT { + if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT { a.Name = obj.NAME_PARAM } else { a.Name = obj.NAME_AUTO @@ -6925,7 +6926,7 @@ func (s *SSAGenState) UseArgs(n int64) { } // fieldIdx finds the index of the field referred to by the ODOT node n. -func fieldIdx(n *Node) int { +func fieldIdx(n *ir.Node) int { t := n.Left.Type f := n.Sym if !t.IsStruct() { @@ -6952,9 +6953,9 @@ func fieldIdx(n *Node) int { // ssafn holds frontend information about a function that the backend is processing. // It also exports a bunch of compiler services for the ssa backend. type ssafn struct { - curfn *Node + curfn *ir.Node strings map[string]*obj.LSym // map from constant string to data symbols - scratchFpMem *Node // temp for floating point register / memory moves on some architectures + scratchFpMem *ir.Node // temp for floating point register / memory moves on some architectures stksize int64 // stack size for current frame stkptrsize int64 // prefix of stack containing pointers log bool // print ssa debug to the stdout @@ -6980,8 +6981,8 @@ func (e *ssafn) Auto(pos src.XPos, t *types.Type) ssa.GCNode { } func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { - ptrType := types.NewPtr(types.Types[TUINT8]) - lenType := types.Types[TINT] + ptrType := types.NewPtr(types.Types[types.TUINT8]) + lenType := types.Types[types.TINT] // Split this string up into two separate variables. p := e.SplitSlot(&name, ".ptr", 0, ptrType) l := e.SplitSlot(&name, ".len", ptrType.Size(), lenType) @@ -6989,9 +6990,9 @@ func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { } func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { - n := name.N.(*Node) - u := types.Types[TUINTPTR] - t := types.NewPtr(types.Types[TUINT8]) + n := name.N.(*ir.Node) + u := types.Types[types.TUINTPTR] + t := types.NewPtr(types.Types[types.TUINT8]) // Split this interface up into two separate variables. f := ".itab" if n.Type.IsEmptyInterface() { @@ -7004,7 +7005,7 @@ func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot func (e *ssafn) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) { ptrType := types.NewPtr(name.Type.Elem()) - lenType := types.Types[TINT] + lenType := types.Types[types.TINT] p := e.SplitSlot(&name, ".ptr", 0, ptrType) l := e.SplitSlot(&name, ".len", ptrType.Size(), lenType) c := e.SplitSlot(&name, ".cap", ptrType.Size()+lenType.Size(), lenType) @@ -7015,9 +7016,9 @@ func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) s := name.Type.Size() / 2 var t *types.Type if s == 8 { - t = types.Types[TFLOAT64] + t = types.Types[types.TFLOAT64] } else { - t = types.Types[TFLOAT32] + t = types.Types[types.TFLOAT32] } r := e.SplitSlot(&name, ".real", 0, t) i := e.SplitSlot(&name, ".imag", t.Size(), t) @@ -7027,14 +7028,14 @@ func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) func (e *ssafn) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { var t *types.Type if name.Type.IsSigned() { - t = types.Types[TINT32] + t = types.Types[types.TINT32] } else { - t = types.Types[TUINT32] + t = types.Types[types.TUINT32] } if thearch.LinkArch.ByteOrder == binary.BigEndian { - return e.SplitSlot(&name, ".hi", 0, t), e.SplitSlot(&name, ".lo", t.Size(), types.Types[TUINT32]) + return e.SplitSlot(&name, ".hi", 0, t), e.SplitSlot(&name, ".lo", t.Size(), types.Types[types.TUINT32]) } - return e.SplitSlot(&name, ".hi", t.Size(), t), e.SplitSlot(&name, ".lo", 0, types.Types[TUINT32]) + return e.SplitSlot(&name, ".hi", t.Size(), t), e.SplitSlot(&name, ".lo", 0, types.Types[types.TUINT32]) } func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot { @@ -7046,7 +7047,7 @@ func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot { } func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot { - n := name.N.(*Node) + n := name.N.(*ir.Node) at := name.Type if at.NumElem() != 1 { e.Fatalf(n.Pos, "bad array size") @@ -7061,19 +7062,19 @@ func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym { // SplitSlot returns a slot representing the data of parent starting at offset. func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot { - node := parent.N.(*Node) + node := parent.N.(*ir.Node) - if node.Class() != PAUTO || node.Name.Addrtaken() { + if node.Class() != ir.PAUTO || node.Name.Addrtaken() { // addressed things and non-autos retain their parents (i.e., cannot truly be split) return ssa.LocalSlot{N: node, Type: t, Off: parent.Off + offset} } - s := &types.Sym{Name: node.Sym.Name + suffix, Pkg: localpkg} - n := newnamel(parent.N.(*Node).Pos, s) - s.Def = asTypesNode(n) - asNode(s.Def).Name.SetUsed(true) + s := &types.Sym{Name: node.Sym.Name + suffix, Pkg: ir.LocalPkg} + n := ir.NewNameAt(parent.N.(*ir.Node).Pos, s) + s.Def = ir.AsTypesNode(n) + ir.AsNode(s.Def).Name.SetUsed(true) n.Type = t - n.SetClass(PAUTO) + n.SetClass(ir.PAUTO) n.Esc = EscNever n.Name.Curfn = e.curfn e.curfn.Func.Dcl = append(e.curfn.Func.Dcl, n) @@ -7103,7 +7104,7 @@ func (e *ssafn) Log() bool { // Fatal reports a compiler error and exits. func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...interface{}) { base.Pos = pos - nargs := append([]interface{}{e.curfn.funcname()}, args...) + nargs := append([]interface{}{ir.FuncName(e.curfn)}, args...) base.Fatalf("'%s': "+msg, nargs...) } @@ -7139,35 +7140,18 @@ func (e *ssafn) Syslook(name string) *obj.LSym { } func (e *ssafn) SetWBPos(pos src.XPos) { - e.curfn.Func.setWBPos(pos) + e.curfn.Func.SetWBPos(pos) } func (e *ssafn) MyImportPath() string { return base.Ctxt.Pkgpath } -func (n *Node) Typ() *types.Type { - return n.Type -} -func (n *Node) StorageClass() ssa.StorageClass { - switch n.Class() { - case PPARAM: - return ssa.ClassParam - case PPARAMOUT: - return ssa.ClassParamOut - case PAUTO: - return ssa.ClassAuto - default: - base.Fatalf("untranslatable storage class for %v: %s", n, n.Class()) - return 0 - } -} - -func clobberBase(n *Node) *Node { - if n.Op == ODOT && n.Left.Type.NumFields() == 1 { +func clobberBase(n *ir.Node) *ir.Node { + if n.Op == ir.ODOT && n.Left.Type.NumFields() == 1 { return clobberBase(n.Left) } - if n.Op == OINDEX && n.Left.Type.IsArray() && n.Left.Type.NumElem() == 1 { + if n.Op == ir.OINDEX && n.Left.Type.IsArray() && n.Left.Type.NumElem() == 1 { return clobberBase(n.Left) } return n diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 00402a1bee65d..46f4153fe19fc 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -6,6 +6,7 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/src" "crypto/md5" @@ -39,11 +40,11 @@ var ( // It's primarily used to distinguish references to named objects, // whose Pos will point back to their declaration position rather than // their usage position. -func hasUniquePos(n *Node) bool { +func hasUniquePos(n *ir.Node) bool { switch n.Op { - case ONAME, OPACK: + case ir.ONAME, ir.OPACK: return false - case OLITERAL, ONIL, OTYPE: + case ir.OLITERAL, ir.ONIL, ir.OTYPE: if n.Sym != nil { return false } @@ -59,7 +60,7 @@ func hasUniquePos(n *Node) bool { return true } -func setlineno(n *Node) src.XPos { +func setlineno(n *ir.Node) src.XPos { lno := base.Pos if n != nil && hasUniquePos(n) { base.Pos = n.Pos @@ -68,7 +69,7 @@ func setlineno(n *Node) src.XPos { } func lookup(name string) *types.Sym { - return localpkg.Lookup(name) + return ir.LocalPkg.Lookup(name) } // lookupN looks up the symbol starting with prefix and ending with @@ -77,7 +78,7 @@ func lookupN(prefix string, n int) *types.Sym { var buf [20]byte // plenty long enough for all current users copy(buf[:], prefix) b := strconv.AppendInt(buf[:len(prefix)], int64(n), 10) - return localpkg.LookupBytes(b) + return ir.LocalPkg.LookupBytes(b) } // autolabel generates a new Name node for use with @@ -101,7 +102,7 @@ func autolabel(prefix string) *types.Sym { // find all the exported symbols in package opkg // and make them available in the current package -func importdot(opkg *types.Pkg, pack *Node) { +func importdot(opkg *types.Pkg, pack *ir.Node) { n := 0 for _, s := range opkg.Syms { if s.Def == nil { @@ -119,11 +120,11 @@ func importdot(opkg *types.Pkg, pack *Node) { s1.Def = s.Def s1.Block = s.Block - if asNode(s1.Def).Name == nil { - Dump("s1def", asNode(s1.Def)) + if ir.AsNode(s1.Def).Name == nil { + ir.Dump("s1def", ir.AsNode(s1.Def)) base.Fatalf("missing Name") } - asNode(s1.Def).Name.Pack = pack + ir.AsNode(s1.Def).Name.Pack = pack s1.Origpkg = opkg n++ } @@ -134,118 +135,27 @@ func importdot(opkg *types.Pkg, pack *Node) { } } -func nod(op Op, nleft, nright *Node) *Node { - return nodl(base.Pos, op, nleft, nright) -} - -func nodl(pos src.XPos, op Op, nleft, nright *Node) *Node { - var n *Node - switch op { - case ODCLFUNC: - var x struct { - n Node - f Func - } - n = &x.n - n.Func = &x.f - n.Func.Decl = n - case ONAME: - base.Fatalf("use newname instead") - case OLABEL, OPACK: - var x struct { - n Node - m Name - } - n = &x.n - n.Name = &x.m - default: - n = new(Node) - } - n.Op = op - n.Left = nleft - n.Right = nright - n.Pos = pos - n.Xoffset = BADWIDTH - n.Orig = n - return n -} - // newname returns a new ONAME Node associated with symbol s. -func newname(s *types.Sym) *Node { - n := newnamel(base.Pos, s) +func NewName(s *types.Sym) *ir.Node { + n := ir.NewNameAt(base.Pos, s) n.Name.Curfn = Curfn return n } -// newnamel returns a new ONAME Node associated with symbol s at position pos. -// The caller is responsible for setting n.Name.Curfn. -func newnamel(pos src.XPos, s *types.Sym) *Node { - if s == nil { - base.Fatalf("newnamel nil") - } - - var x struct { - n Node - m Name - p Param - } - n := &x.n - n.Name = &x.m - n.Name.Param = &x.p - - n.Op = ONAME - n.Pos = pos - n.Orig = n - - n.Sym = s - return n -} - // nodSym makes a Node with Op op and with the Left field set to left // and the Sym field set to sym. This is for ODOT and friends. -func nodSym(op Op, left *Node, sym *types.Sym) *Node { +func nodSym(op ir.Op, left *ir.Node, sym *types.Sym) *ir.Node { return nodlSym(base.Pos, op, left, sym) } // nodlSym makes a Node with position Pos, with Op op, and with the Left field set to left // and the Sym field set to sym. This is for ODOT and friends. -func nodlSym(pos src.XPos, op Op, left *Node, sym *types.Sym) *Node { - n := nodl(pos, op, left, nil) +func nodlSym(pos src.XPos, op ir.Op, left *ir.Node, sym *types.Sym) *ir.Node { + n := ir.NodAt(pos, op, left, nil) n.Sym = sym return n } -// rawcopy returns a shallow copy of n. -// Note: copy or sepcopy (rather than rawcopy) is usually the -// correct choice (see comment with Node.copy, below). -func (n *Node) rawcopy() *Node { - copy := *n - return © -} - -// sepcopy returns a separate shallow copy of n, with the copy's -// Orig pointing to itself. -func (n *Node) sepcopy() *Node { - copy := *n - copy.Orig = © - return © -} - -// copy returns shallow copy of n and adjusts the copy's Orig if -// necessary: In general, if n.Orig points to itself, the copy's -// Orig should point to itself as well. Otherwise, if n is modified, -// the copy's Orig node appears modified, too, and then doesn't -// represent the original node anymore. -// (This caused the wrong complit Op to be used when printing error -// messages; see issues #26855, #27765). -func (n *Node) copy() *Node { - copy := *n - if n.Orig == n { - copy.Orig = © - } - return © -} - // methcmp sorts methods by symbol. type methcmp []*types.Field @@ -253,67 +163,60 @@ func (x methcmp) Len() int { return len(x) } func (x methcmp) Swap(i, j int) { x[i], x[j] = x[j], x[i] } func (x methcmp) Less(i, j int) bool { return x[i].Sym.Less(x[j].Sym) } -func nodintconst(v int64) *Node { - return nodlit(constant.MakeInt64(v)) +func nodintconst(v int64) *ir.Node { + return ir.NewLiteral(constant.MakeInt64(v)) } -func nodnil() *Node { - n := nod(ONIL, nil, nil) - n.Type = types.Types[TNIL] +func nodnil() *ir.Node { + n := ir.Nod(ir.ONIL, nil, nil) + n.Type = types.Types[types.TNIL] return n } -func nodbool(b bool) *Node { - return nodlit(constant.MakeBool(b)) +func nodbool(b bool) *ir.Node { + return ir.NewLiteral(constant.MakeBool(b)) } -func nodstr(s string) *Node { - return nodlit(constant.MakeString(s)) +func nodstr(s string) *ir.Node { + return ir.NewLiteral(constant.MakeString(s)) } // treecopy recursively copies n, with the exception of // ONAME, OLITERAL, OTYPE, and ONONAME leaves. // If pos.IsKnown(), it sets the source position of newly // allocated nodes to pos. -func treecopy(n *Node, pos src.XPos) *Node { +func treecopy(n *ir.Node, pos src.XPos) *ir.Node { if n == nil { return nil } switch n.Op { default: - m := n.sepcopy() + m := ir.SepCopy(n) m.Left = treecopy(n.Left, pos) m.Right = treecopy(n.Right, pos) m.List.Set(listtreecopy(n.List.Slice(), pos)) if pos.IsKnown() { m.Pos = pos } - if m.Name != nil && n.Op != ODCLFIELD { - Dump("treecopy", n) + if m.Name != nil && n.Op != ir.ODCLFIELD { + ir.Dump("treecopy", n) base.Fatalf("treecopy Name") } return m - case OPACK: + case ir.OPACK: // OPACK nodes are never valid in const value declarations, // but allow them like any other declared symbol to avoid // crashing (golang.org/issue/11361). fallthrough - case ONAME, ONONAME, OLITERAL, ONIL, OTYPE: + case ir.ONAME, ir.ONONAME, ir.OLITERAL, ir.ONIL, ir.OTYPE: return n } } -// isNil reports whether n represents the universal untyped zero value "nil". -func (n *Node) isNil() bool { - // Check n.Orig because constant propagation may produce typed nil constants, - // which don't exist in the Go spec. - return n.Orig.Op == ONIL -} - func isptrto(t *types.Type, et types.EType) bool { if t == nil { return false @@ -331,13 +234,6 @@ func isptrto(t *types.Type, et types.EType) bool { return true } -func (n *Node) isBlank() bool { - if n == nil { - return false - } - return n.Sym.IsBlank() -} - // methtype returns the underlying type, if any, // that owns methods with receiver parameter t. // The result is either a named type or an anonymous struct. @@ -367,7 +263,7 @@ func methtype(t *types.Type) *types.Type { return t } switch t.Etype { - case TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRING, TSTRUCT: + case types.TARRAY, types.TCHAN, types.TFUNC, types.TMAP, types.TSLICE, types.TSTRING, types.TSTRUCT: return t } return nil @@ -377,17 +273,17 @@ func methtype(t *types.Type) *types.Type { // If so, return op code to use in conversion. // If not, return OXXX. In this case, the string return parameter may // hold a reason why. In all other cases, it'll be the empty string. -func assignop(src, dst *types.Type) (Op, string) { +func assignop(src, dst *types.Type) (ir.Op, string) { if src == dst { - return OCONVNOP, "" + return ir.OCONVNOP, "" } - if src == nil || dst == nil || src.Etype == TFORW || dst.Etype == TFORW || src.Orig == nil || dst.Orig == nil { - return OXXX, "" + if src == nil || dst == nil || src.Etype == types.TFORW || dst.Etype == types.TFORW || src.Orig == nil || dst.Orig == nil { + return ir.OXXX, "" } // 1. src type is identical to dst. if types.Identical(src, dst) { - return OCONVNOP, "" + return ir.OCONVNOP, "" } // 2. src and dst have identical underlying types @@ -401,31 +297,31 @@ func assignop(src, dst *types.Type) (Op, string) { if src.IsEmptyInterface() { // Conversion between two empty interfaces // requires no code. - return OCONVNOP, "" + return ir.OCONVNOP, "" } if (src.Sym == nil || dst.Sym == nil) && !src.IsInterface() { // Conversion between two types, at least one unnamed, // needs no conversion. The exception is nonempty interfaces // which need to have their itab updated. - return OCONVNOP, "" + return ir.OCONVNOP, "" } } // 3. dst is an interface type and src implements dst. - if dst.IsInterface() && src.Etype != TNIL { + if dst.IsInterface() && src.Etype != types.TNIL { var missing, have *types.Field var ptr int if implements(src, dst, &missing, &have, &ptr) { - return OCONVIFACE, "" + return ir.OCONVIFACE, "" } // we'll have complained about this method anyway, suppress spurious messages. if have != nil && have.Sym == missing.Sym && (have.Type.Broke() || missing.Type.Broke()) { - return OCONVIFACE, "" + return ir.OCONVIFACE, "" } var why string - if isptrto(src, TINTER) { + if isptrto(src, types.TINTER) { why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", src) } else if have != nil && have.Sym == missing.Sym && have.Nointerface() { why = fmt.Sprintf(":\n\t%v does not implement %v (%v method is marked 'nointerface')", src, dst, missing.Sym) @@ -441,22 +337,22 @@ func assignop(src, dst *types.Type) (Op, string) { why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)", src, dst, missing.Sym) } - return OXXX, why + return ir.OXXX, why } - if isptrto(dst, TINTER) { + if isptrto(dst, types.TINTER) { why := fmt.Sprintf(":\n\t%v is pointer to interface, not interface", dst) - return OXXX, why + return ir.OXXX, why } - if src.IsInterface() && dst.Etype != TBLANK { + if src.IsInterface() && dst.Etype != types.TBLANK { var missing, have *types.Field var ptr int var why string if implements(dst, src, &missing, &have, &ptr) { why = ": need type assertion" } - return OXXX, why + return ir.OXXX, why } // 4. src is a bidirectional channel value, dst is a channel type, @@ -464,31 +360,31 @@ func assignop(src, dst *types.Type) (Op, string) { // either src or dst is not a named type. if src.IsChan() && src.ChanDir() == types.Cboth && dst.IsChan() { if types.Identical(src.Elem(), dst.Elem()) && (src.Sym == nil || dst.Sym == nil) { - return OCONVNOP, "" + return ir.OCONVNOP, "" } } // 5. src is the predeclared identifier nil and dst is a nillable type. - if src.Etype == TNIL { + if src.Etype == types.TNIL { switch dst.Etype { - case TPTR, - TFUNC, - TMAP, - TCHAN, - TINTER, - TSLICE: - return OCONVNOP, "" + case types.TPTR, + types.TFUNC, + types.TMAP, + types.TCHAN, + types.TINTER, + types.TSLICE: + return ir.OCONVNOP, "" } } // 6. rule about untyped constants - already converted by defaultlit. // 7. Any typed value can be assigned to the blank identifier. - if dst.Etype == TBLANK { - return OCONVNOP, "" + if dst.Etype == types.TBLANK { + return ir.OCONVNOP, "" } - return OXXX, "" + return ir.OXXX, "" } // Can we convert a value of type src to a value of type dst? @@ -496,12 +392,12 @@ func assignop(src, dst *types.Type) (Op, string) { // If not, return OXXX. In this case, the string return parameter may // hold a reason why. In all other cases, it'll be the empty string. // srcConstant indicates whether the value of type src is a constant. -func convertop(srcConstant bool, src, dst *types.Type) (Op, string) { +func convertop(srcConstant bool, src, dst *types.Type) (ir.Op, string) { if src == dst { - return OCONVNOP, "" + return ir.OCONVNOP, "" } if src == nil || dst == nil { - return OXXX, "" + return ir.OXXX, "" } // Conversions from regular to go:notinheap are not allowed @@ -510,17 +406,17 @@ func convertop(srcConstant bool, src, dst *types.Type) (Op, string) { // (a) Disallow (*T) to (*U) where T is go:notinheap but U isn't. if src.IsPtr() && dst.IsPtr() && dst.Elem().NotInHeap() && !src.Elem().NotInHeap() { why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable), but %v is not", dst.Elem(), src.Elem()) - return OXXX, why + return ir.OXXX, why } // (b) Disallow string to []T where T is go:notinheap. if src.IsString() && dst.IsSlice() && dst.Elem().NotInHeap() && (dst.Elem().Etype == types.Bytetype.Etype || dst.Elem().Etype == types.Runetype.Etype) { why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable)", dst.Elem()) - return OXXX, why + return ir.OXXX, why } // 1. src can be assigned to dst. op, why := assignop(src, dst) - if op != OXXX { + if op != ir.OXXX { return op, why } @@ -529,57 +425,57 @@ func convertop(srcConstant bool, src, dst *types.Type) (Op, string) { // with the good message from assignop. // Otherwise clear the error. if src.IsInterface() || dst.IsInterface() { - return OXXX, why + return ir.OXXX, why } // 2. Ignoring struct tags, src and dst have identical underlying types. if types.IdenticalIgnoreTags(src.Orig, dst.Orig) { - return OCONVNOP, "" + return ir.OCONVNOP, "" } // 3. src and dst are unnamed pointer types and, ignoring struct tags, // their base types have identical underlying types. if src.IsPtr() && dst.IsPtr() && src.Sym == nil && dst.Sym == nil { if types.IdenticalIgnoreTags(src.Elem().Orig, dst.Elem().Orig) { - return OCONVNOP, "" + return ir.OCONVNOP, "" } } // 4. src and dst are both integer or floating point types. if (src.IsInteger() || src.IsFloat()) && (dst.IsInteger() || dst.IsFloat()) { if simtype[src.Etype] == simtype[dst.Etype] { - return OCONVNOP, "" + return ir.OCONVNOP, "" } - return OCONV, "" + return ir.OCONV, "" } // 5. src and dst are both complex types. if src.IsComplex() && dst.IsComplex() { if simtype[src.Etype] == simtype[dst.Etype] { - return OCONVNOP, "" + return ir.OCONVNOP, "" } - return OCONV, "" + return ir.OCONV, "" } // Special case for constant conversions: any numeric // conversion is potentially okay. We'll validate further // within evconst. See #38117. if srcConstant && (src.IsInteger() || src.IsFloat() || src.IsComplex()) && (dst.IsInteger() || dst.IsFloat() || dst.IsComplex()) { - return OCONV, "" + return ir.OCONV, "" } // 6. src is an integer or has type []byte or []rune // and dst is a string type. if src.IsInteger() && dst.IsString() { - return ORUNESTR, "" + return ir.ORUNESTR, "" } if src.IsSlice() && dst.IsString() { if src.Elem().Etype == types.Bytetype.Etype { - return OBYTES2STR, "" + return ir.OBYTES2STR, "" } if src.Elem().Etype == types.Runetype.Etype { - return ORUNES2STR, "" + return ir.ORUNES2STR, "" } } @@ -587,45 +483,45 @@ func convertop(srcConstant bool, src, dst *types.Type) (Op, string) { // String to slice. if src.IsString() && dst.IsSlice() { if dst.Elem().Etype == types.Bytetype.Etype { - return OSTR2BYTES, "" + return ir.OSTR2BYTES, "" } if dst.Elem().Etype == types.Runetype.Etype { - return OSTR2RUNES, "" + return ir.OSTR2RUNES, "" } } // 8. src is a pointer or uintptr and dst is unsafe.Pointer. if (src.IsPtr() || src.IsUintptr()) && dst.IsUnsafePtr() { - return OCONVNOP, "" + return ir.OCONVNOP, "" } // 9. src is unsafe.Pointer and dst is a pointer or uintptr. if src.IsUnsafePtr() && (dst.IsPtr() || dst.IsUintptr()) { - return OCONVNOP, "" + return ir.OCONVNOP, "" } // src is map and dst is a pointer to corresponding hmap. // This rule is needed for the implementation detail that // go gc maps are implemented as a pointer to a hmap struct. - if src.Etype == TMAP && dst.IsPtr() && + if src.Etype == types.TMAP && dst.IsPtr() && src.MapType().Hmap == dst.Elem() { - return OCONVNOP, "" + return ir.OCONVNOP, "" } - return OXXX, "" + return ir.OXXX, "" } -func assignconv(n *Node, t *types.Type, context string) *Node { +func assignconv(n *ir.Node, t *types.Type, context string) *ir.Node { return assignconvfn(n, t, func() string { return context }) } // Convert node n for assignment to type t. -func assignconvfn(n *Node, t *types.Type, context func() string) *Node { +func assignconvfn(n *ir.Node, t *types.Type, context func() string) *ir.Node { if n == nil || n.Type == nil || n.Type.Broke() { return n } - if t.Etype == TBLANK && n.Type.Etype == TNIL { + if t.Etype == types.TBLANK && n.Type.Etype == types.TNIL { base.Errorf("use of untyped nil") } @@ -633,16 +529,16 @@ func assignconvfn(n *Node, t *types.Type, context func() string) *Node { if n.Type == nil { return n } - if t.Etype == TBLANK { + if t.Etype == types.TBLANK { return n } // Convert ideal bool from comparison to plain bool // if the next step is non-bool (like interface{}). if n.Type == types.UntypedBool && !t.IsBoolean() { - if n.Op == ONAME || n.Op == OLITERAL { - r := nod(OCONVNOP, n, nil) - r.Type = types.Types[TBOOL] + if n.Op == ir.ONAME || n.Op == ir.OLITERAL { + r := ir.Nod(ir.OCONVNOP, n, nil) + r.Type = types.Types[types.TBOOL] r.SetTypecheck(1) r.SetImplicit(true) n = r @@ -654,12 +550,12 @@ func assignconvfn(n *Node, t *types.Type, context func() string) *Node { } op, why := assignop(n.Type, t) - if op == OXXX { + if op == ir.OXXX { base.Errorf("cannot use %L as type %v in %s%s", n, t, context(), why) - op = OCONV + op = ir.OCONV } - r := nod(op, n, nil) + r := ir.Nod(op, n, nil) r.Type = t r.SetTypecheck(1) r.SetImplicit(true) @@ -667,103 +563,29 @@ func assignconvfn(n *Node, t *types.Type, context func() string) *Node { return r } -// IsMethod reports whether n is a method. -// n must be a function or a method. -func (n *Node) IsMethod() bool { - return n.Type.Recv() != nil -} - -// SliceBounds returns n's slice bounds: low, high, and max in expr[low:high:max]. -// n must be a slice expression. max is nil if n is a simple slice expression. -func (n *Node) SliceBounds() (low, high, max *Node) { - if n.List.Len() == 0 { - return nil, nil, nil - } - - switch n.Op { - case OSLICE, OSLICEARR, OSLICESTR: - s := n.List.Slice() - return s[0], s[1], nil - case OSLICE3, OSLICE3ARR: - s := n.List.Slice() - return s[0], s[1], s[2] - } - base.Fatalf("SliceBounds op %v: %v", n.Op, n) - return nil, nil, nil -} - -// SetSliceBounds sets n's slice bounds, where n is a slice expression. -// n must be a slice expression. If max is non-nil, n must be a full slice expression. -func (n *Node) SetSliceBounds(low, high, max *Node) { - switch n.Op { - case OSLICE, OSLICEARR, OSLICESTR: - if max != nil { - base.Fatalf("SetSliceBounds %v given three bounds", n.Op) - } - s := n.List.Slice() - if s == nil { - if low == nil && high == nil { - return - } - n.List.Set2(low, high) - return - } - s[0] = low - s[1] = high - return - case OSLICE3, OSLICE3ARR: - s := n.List.Slice() - if s == nil { - if low == nil && high == nil && max == nil { - return - } - n.List.Set3(low, high, max) - return - } - s[0] = low - s[1] = high - s[2] = max - return - } - base.Fatalf("SetSliceBounds op %v: %v", n.Op, n) -} - -// IsSlice3 reports whether o is a slice3 op (OSLICE3, OSLICE3ARR). -// o must be a slicing op. -func (o Op) IsSlice3() bool { - switch o { - case OSLICE, OSLICEARR, OSLICESTR: - return false - case OSLICE3, OSLICE3ARR: - return true - } - base.Fatalf("IsSlice3 op %v", o) - return false -} - // backingArrayPtrLen extracts the pointer and length from a slice or string. // This constructs two nodes referring to n, so n must be a cheapexpr. -func (n *Node) backingArrayPtrLen() (ptr, len *Node) { - var init Nodes +func backingArrayPtrLen(n *ir.Node) (ptr, len *ir.Node) { + var init ir.Nodes c := cheapexpr(n, &init) if c != n || init.Len() != 0 { base.Fatalf("backingArrayPtrLen not cheap: %v", n) } - ptr = nod(OSPTR, n, nil) + ptr = ir.Nod(ir.OSPTR, n, nil) if n.Type.IsString() { - ptr.Type = types.Types[TUINT8].PtrTo() + ptr.Type = types.Types[types.TUINT8].PtrTo() } else { ptr.Type = n.Type.Elem().PtrTo() } - len = nod(OLEN, n, nil) - len.Type = types.Types[TINT] + len = ir.Nod(ir.OLEN, n, nil) + len.Type = types.Types[types.TINT] return ptr, len } // labeledControl returns the control flow Node (for, switch, select) // associated with the label n, if any. -func (n *Node) labeledControl() *Node { - if n.Op != OLABEL { +func labeledControl(n *ir.Node) *ir.Node { + if n.Op != ir.OLABEL { base.Fatalf("labeledControl %v", n.Op) } ctl := n.Name.Defn @@ -771,18 +593,18 @@ func (n *Node) labeledControl() *Node { return nil } switch ctl.Op { - case OFOR, OFORUNTIL, OSWITCH, OSELECT: + case ir.OFOR, ir.OFORUNTIL, ir.OSWITCH, ir.OSELECT: return ctl } return nil } -func syslook(name string) *Node { +func syslook(name string) *ir.Node { s := Runtimepkg.Lookup(name) if s == nil || s.Def == nil { base.Fatalf("syslook: can't find runtime.%s", name) } - return asNode(s.Def) + return ir.AsNode(s.Def) } // typehash computes a hash value for type t to use in type switch statements. @@ -796,49 +618,49 @@ func typehash(t *types.Type) uint32 { // updateHasCall checks whether expression n contains any function // calls and sets the n.HasCall flag if so. -func updateHasCall(n *Node) { +func updateHasCall(n *ir.Node) { if n == nil { return } n.SetHasCall(calcHasCall(n)) } -func calcHasCall(n *Node) bool { +func calcHasCall(n *ir.Node) bool { if n.Ninit.Len() != 0 { // TODO(mdempsky): This seems overly conservative. return true } switch n.Op { - case OLITERAL, ONIL, ONAME, OTYPE: + case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.OTYPE: if n.HasCall() { base.Fatalf("OLITERAL/ONAME/OTYPE should never have calls: %+v", n) } return false - case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER: + case ir.OCALL, ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER: return true - case OANDAND, OOROR: + case ir.OANDAND, ir.OOROR: // hard with instrumented code if instrumenting { return true } - case OINDEX, OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR, - ODEREF, ODOTPTR, ODOTTYPE, ODIV, OMOD: + case ir.OINDEX, ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR, + ir.ODEREF, ir.ODOTPTR, ir.ODOTTYPE, ir.ODIV, ir.OMOD: // These ops might panic, make sure they are done // before we start marshaling args for a call. See issue 16760. return true // When using soft-float, these ops might be rewritten to function calls // so we ensure they are evaluated first. - case OADD, OSUB, ONEG, OMUL: + case ir.OADD, ir.OSUB, ir.ONEG, ir.OMUL: if thearch.SoftFloat && (isFloat[n.Type.Etype] || isComplex[n.Type.Etype]) { return true } - case OLT, OEQ, ONE, OLE, OGE, OGT: + case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT: if thearch.SoftFloat && (isFloat[n.Left.Type.Etype] || isComplex[n.Left.Type.Etype]) { return true } - case OCONV: + case ir.OCONV: if thearch.SoftFloat && ((isFloat[n.Type.Etype] || isComplex[n.Type.Etype]) || (isFloat[n.Left.Type.Etype] || isComplex[n.Left.Type.Etype])) { return true } @@ -853,7 +675,7 @@ func calcHasCall(n *Node) bool { return false } -func badtype(op Op, tl, tr *types.Type) { +func badtype(op ir.Op, tl, tr *types.Type) { var s string if tl != nil { s += fmt.Sprintf("\n\t%v", tl) @@ -876,20 +698,20 @@ func badtype(op Op, tl, tr *types.Type) { // brcom returns !(op). // For example, brcom(==) is !=. -func brcom(op Op) Op { +func brcom(op ir.Op) ir.Op { switch op { - case OEQ: - return ONE - case ONE: - return OEQ - case OLT: - return OGE - case OGT: - return OLE - case OLE: - return OGT - case OGE: - return OLT + case ir.OEQ: + return ir.ONE + case ir.ONE: + return ir.OEQ + case ir.OLT: + return ir.OGE + case ir.OGT: + return ir.OLE + case ir.OLE: + return ir.OGT + case ir.OGE: + return ir.OLT } base.Fatalf("brcom: no com for %v\n", op) return op @@ -897,20 +719,20 @@ func brcom(op Op) Op { // brrev returns reverse(op). // For example, Brrev(<) is >. -func brrev(op Op) Op { +func brrev(op ir.Op) ir.Op { switch op { - case OEQ: - return OEQ - case ONE: - return ONE - case OLT: - return OGT - case OGT: - return OLT - case OLE: - return OGE - case OGE: - return OLE + case ir.OEQ: + return ir.OEQ + case ir.ONE: + return ir.ONE + case ir.OLT: + return ir.OGT + case ir.OGT: + return ir.OLT + case ir.OLE: + return ir.OGE + case ir.OGE: + return ir.OLE } base.Fatalf("brrev: no rev for %v\n", op) return op @@ -918,7 +740,7 @@ func brrev(op Op) Op { // return side effect-free n, appending side effects to init. // result is assignable if n is. -func safeexpr(n *Node, init *Nodes) *Node { +func safeexpr(n *ir.Node, init *ir.Nodes) *ir.Node { if n == nil { return nil } @@ -929,43 +751,43 @@ func safeexpr(n *Node, init *Nodes) *Node { } switch n.Op { - case ONAME, OLITERAL, ONIL: + case ir.ONAME, ir.OLITERAL, ir.ONIL: return n - case ODOT, OLEN, OCAP: + case ir.ODOT, ir.OLEN, ir.OCAP: l := safeexpr(n.Left, init) if l == n.Left { return n } - r := n.copy() + r := ir.Copy(n) r.Left = l r = typecheck(r, ctxExpr) r = walkexpr(r, init) return r - case ODOTPTR, ODEREF: + case ir.ODOTPTR, ir.ODEREF: l := safeexpr(n.Left, init) if l == n.Left { return n } - a := n.copy() + a := ir.Copy(n) a.Left = l a = walkexpr(a, init) return a - case OINDEX, OINDEXMAP: + case ir.OINDEX, ir.OINDEXMAP: l := safeexpr(n.Left, init) r := safeexpr(n.Right, init) if l == n.Left && r == n.Right { return n } - a := n.copy() + a := ir.Copy(n) a.Left = l a.Right = r a = walkexpr(a, init) return a - case OSTRUCTLIT, OARRAYLIT, OSLICELIT: + case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT: if isStaticCompositeLiteral(n) { return n } @@ -978,9 +800,9 @@ func safeexpr(n *Node, init *Nodes) *Node { return cheapexpr(n, init) } -func copyexpr(n *Node, t *types.Type, init *Nodes) *Node { +func copyexpr(n *ir.Node, t *types.Type, init *ir.Nodes) *ir.Node { l := temp(t) - a := nod(OAS, l, n) + a := ir.Nod(ir.OAS, l, n) a = typecheck(a, ctxStmt) a = walkexpr(a, init) init.Append(a) @@ -989,9 +811,9 @@ func copyexpr(n *Node, t *types.Type, init *Nodes) *Node { // return side-effect free and cheap n, appending side effects to init. // result may not be assignable. -func cheapexpr(n *Node, init *Nodes) *Node { +func cheapexpr(n *ir.Node, init *ir.Nodes) *ir.Node { switch n.Op { - case ONAME, OLITERAL, ONIL: + case ir.ONAME, ir.OLITERAL, ir.ONIL: return n } @@ -1135,7 +957,7 @@ func dotpath(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) ( // find missing fields that // will give shortest unique addressing. // modify the tree with missing type names. -func adddot(n *Node) *Node { +func adddot(n *ir.Node) *ir.Node { n.Left = typecheck(n.Left, ctxType|ctxExpr) if n.Left.Diag() { n.SetDiag(true) @@ -1145,7 +967,7 @@ func adddot(n *Node) *Node { return n } - if n.Left.Op == OTYPE { + if n.Left.Op == ir.OTYPE { return n } @@ -1158,7 +980,7 @@ func adddot(n *Node) *Node { case path != nil: // rebuild elided dots for c := len(path) - 1; c >= 0; c-- { - n.Left = nodSym(ODOT, n.Left, path[c].field.Sym) + n.Left = nodSym(ir.ODOT, n.Left, path[c].field.Sym) n.Left.SetImplicit(true) } case ambig: @@ -1294,8 +1116,8 @@ func expandmeth(t *types.Type) { } // Given funarg struct list, return list of ODCLFIELD Node fn args. -func structargs(tl *types.Type, mustname bool) []*Node { - var args []*Node +func structargs(tl *types.Type, mustname bool) []*ir.Node { + var args []*ir.Node gen := 0 for _, t := range tl.Fields().Slice() { s := t.Sym @@ -1341,20 +1163,20 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { // Only generate (*T).M wrappers for T.M in T's own package. if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && - rcvr.Elem().Sym != nil && rcvr.Elem().Sym.Pkg != localpkg { + rcvr.Elem().Sym != nil && rcvr.Elem().Sym.Pkg != ir.LocalPkg { return } // Only generate I.M wrappers for I in I's own package // but keep doing it for error.Error (was issue #29304). - if rcvr.IsInterface() && rcvr.Sym != nil && rcvr.Sym.Pkg != localpkg && rcvr != types.Errortype { + if rcvr.IsInterface() && rcvr.Sym != nil && rcvr.Sym.Pkg != ir.LocalPkg && rcvr != types.Errortype { return } base.Pos = autogeneratedPos - dclcontext = PEXTERN + dclcontext = ir.PEXTERN - tfn := nod(OTFUNC, nil, nil) + tfn := ir.Nod(ir.OTFUNC, nil, nil) tfn.Left = namedfield(".this", rcvr) tfn.List.Set(structargs(method.Type.Params(), true)) tfn.Rlist.Set(structargs(method.Type.Results(), false)) @@ -1362,21 +1184,21 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { fn := dclfunc(newnam, tfn) fn.Func.SetDupok(true) - nthis := asNode(tfn.Type.Recv().Nname) + nthis := ir.AsNode(tfn.Type.Recv().Nname) methodrcvr := method.Type.Recv().Type // generate nil pointer check for better error if rcvr.IsPtr() && rcvr.Elem() == methodrcvr { // generating wrapper from *T to T. - n := nod(OIF, nil, nil) - n.Left = nod(OEQ, nthis, nodnil()) - call := nod(OCALL, syslook("panicwrap"), nil) + n := ir.Nod(ir.OIF, nil, nil) + n.Left = ir.Nod(ir.OEQ, nthis, nodnil()) + call := ir.Nod(ir.OCALL, syslook("panicwrap"), nil) n.Nbody.Set1(call) fn.Nbody.Append(n) } - dot := adddot(nodSym(OXDOT, nthis, method.Sym)) + dot := adddot(nodSym(ir.OXDOT, nthis, method.Sym)) // generate call // It's not possible to use a tail call when dynamic linking on ppc64le. The @@ -1390,18 +1212,18 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { dot = dot.Left // skip final .M // TODO(mdempsky): Remove dependency on dotlist. if !dotlist[0].field.Type.IsPtr() { - dot = nod(OADDR, dot, nil) + dot = ir.Nod(ir.OADDR, dot, nil) } - as := nod(OAS, nthis, convnop(dot, rcvr)) + as := ir.Nod(ir.OAS, nthis, convnop(dot, rcvr)) fn.Nbody.Append(as) - fn.Nbody.Append(nodSym(ORETJMP, nil, methodSym(methodrcvr, method.Sym))) + fn.Nbody.Append(nodSym(ir.ORETJMP, nil, methodSym(methodrcvr, method.Sym))) } else { fn.Func.SetWrapper(true) // ignore frame for panic+recover matching - call := nod(OCALL, dot, nil) + call := ir.Nod(ir.OCALL, dot, nil) call.List.Set(paramNnames(tfn.Type)) call.SetIsDDD(tfn.Type.IsVariadic()) if method.Type.NumResults() > 0 { - n := nod(ORETURN, nil, nil) + n := ir.Nod(ir.ORETURN, nil, nil) n.List.Set1(call) call = n } @@ -1409,7 +1231,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { } if false && base.Flag.LowerR != 0 { - dumplist("genwrapper body", fn.Nbody) + ir.DumpList("genwrapper body", fn.Nbody) } funcbody() @@ -1428,31 +1250,31 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym != nil { inlcalls(fn) } - escapeFuncs([]*Node{fn}, false) + escapeFuncs([]*ir.Node{fn}, false) Curfn = nil xtop = append(xtop, fn) } -func paramNnames(ft *types.Type) []*Node { - args := make([]*Node, ft.NumParams()) +func paramNnames(ft *types.Type) []*ir.Node { + args := make([]*ir.Node, ft.NumParams()) for i, f := range ft.Params().FieldSlice() { - args[i] = asNode(f.Nname) + args[i] = ir.AsNode(f.Nname) } return args } -func hashmem(t *types.Type) *Node { +func hashmem(t *types.Type) *ir.Node { sym := Runtimepkg.Lookup("memhash") - n := newname(sym) + n := NewName(sym) setNodeNameFunc(n) - n.Type = functype(nil, []*Node{ + n.Type = functype(nil, []*ir.Node{ anonfield(types.NewPtr(t)), - anonfield(types.Types[TUINTPTR]), - anonfield(types.Types[TUINTPTR]), - }, []*Node{ - anonfield(types.Types[TUINTPTR]), + anonfield(types.Types[types.TUINTPTR]), + anonfield(types.Types[types.TUINTPTR]), + }, []*ir.Node{ + anonfield(types.Types[types.TUINTPTR]), }) return n } @@ -1571,16 +1393,16 @@ func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool return true } -func listtreecopy(l []*Node, pos src.XPos) []*Node { - var out []*Node +func listtreecopy(l []*ir.Node, pos src.XPos) []*ir.Node { + var out []*ir.Node for _, n := range l { out = append(out, treecopy(n, pos)) } return out } -func liststmt(l []*Node) *Node { - n := nod(OBLOCK, nil, nil) +func liststmt(l []*ir.Node) *ir.Node { + n := ir.Nod(ir.OBLOCK, nil, nil) n.List.Set(l) if len(l) != 0 { n.Pos = l[0].Pos @@ -1588,7 +1410,7 @@ func liststmt(l []*Node) *Node { return n } -func ngotype(n *Node) *types.Sym { +func ngotype(n *ir.Node) *types.Sym { if n.Type != nil { return typenamesym(n.Type) } @@ -1597,13 +1419,13 @@ func ngotype(n *Node) *types.Sym { // The result of addinit MUST be assigned back to n, e.g. // n.Left = addinit(n.Left, init) -func addinit(n *Node, init []*Node) *Node { +func addinit(n *ir.Node, init []*ir.Node) *ir.Node { if len(init) == 0 { return n } - if n.mayBeShared() { + if ir.MayBeShared(n) { // Introduce OCONVNOP to hold init list. - n = nod(OCONVNOP, n, nil) + n = ir.Nod(ir.OCONVNOP, n, nil) n.Type = n.Left.Type n.SetTypecheck(1) } @@ -1674,20 +1496,20 @@ func isdirectiface(t *types.Type) bool { } switch t.Etype { - case TPTR: + case types.TPTR: // Pointers to notinheap types must be stored indirectly. See issue 42076. return !t.Elem().NotInHeap() - case TCHAN, - TMAP, - TFUNC, - TUNSAFEPTR: + case types.TCHAN, + types.TMAP, + types.TFUNC, + types.TUNSAFEPTR: return true - case TARRAY: + case types.TARRAY: // Array of 1 direct iface type can be direct. return t.NumElem() == 1 && isdirectiface(t.Elem()) - case TSTRUCT: + case types.TSTRUCT: // Struct with 1 field of direct iface type can be direct. return t.NumFields() == 1 && isdirectiface(t.Field(0).Type) } @@ -1696,9 +1518,9 @@ func isdirectiface(t *types.Type) bool { } // itabType loads the _type field from a runtime.itab struct. -func itabType(itab *Node) *Node { - typ := nodSym(ODOTPTR, itab, nil) - typ.Type = types.NewPtr(types.Types[TUINT8]) +func itabType(itab *ir.Node) *ir.Node { + typ := nodSym(ir.ODOTPTR, itab, nil) + typ.Type = types.NewPtr(types.Types[types.TUINT8]) typ.SetTypecheck(1) typ.Xoffset = int64(Widthptr) // offset of _type in runtime.itab typ.SetBounded(true) // guaranteed not to fault @@ -1708,11 +1530,11 @@ func itabType(itab *Node) *Node { // ifaceData loads the data field from an interface. // The concrete type must be known to have type t. // It follows the pointer if !isdirectiface(t). -func ifaceData(pos src.XPos, n *Node, t *types.Type) *Node { +func ifaceData(pos src.XPos, n *ir.Node, t *types.Type) *ir.Node { if t.IsInterface() { base.Fatalf("ifaceData interface: %v", t) } - ptr := nodlSym(pos, OIDATA, n, nil) + ptr := nodlSym(pos, ir.OIDATA, n, nil) if isdirectiface(t) { ptr.Type = t ptr.SetTypecheck(1) @@ -1720,7 +1542,7 @@ func ifaceData(pos src.XPos, n *Node, t *types.Type) *Node { } ptr.Type = types.NewPtr(t) ptr.SetTypecheck(1) - ind := nodl(pos, ODEREF, ptr, nil) + ind := ir.NodAt(pos, ir.ODEREF, ptr, nil) ind.Type = t ind.SetTypecheck(1) ind.SetBounded(true) @@ -1730,7 +1552,7 @@ func ifaceData(pos src.XPos, n *Node, t *types.Type) *Node { // typePos returns the position associated with t. // This is where t was declared or where it appeared as a type expression. func typePos(t *types.Type) src.XPos { - n := asNode(t.Nod) + n := ir.AsNode(t.Nod) if n == nil || !n.Pos.IsKnown() { base.Fatalf("bad type: %v", t) } diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go index 7befbdf06c15d..f3195df79aa13 100644 --- a/src/cmd/compile/internal/gc/swt.go +++ b/src/cmd/compile/internal/gc/swt.go @@ -6,6 +6,7 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/src" "go/constant" @@ -14,16 +15,16 @@ import ( ) // typecheckswitch typechecks a switch statement. -func typecheckswitch(n *Node) { +func typecheckswitch(n *ir.Node) { typecheckslice(n.Ninit.Slice(), ctxStmt) - if n.Left != nil && n.Left.Op == OTYPESW { + if n.Left != nil && n.Left.Op == ir.OTYPESW { typecheckTypeSwitch(n) } else { typecheckExprSwitch(n) } } -func typecheckTypeSwitch(n *Node) { +func typecheckTypeSwitch(n *ir.Node) { n.Left.Right = typecheck(n.Left.Right, ctxExpr) t := n.Left.Right.Type if t != nil && !t.IsInterface() { @@ -34,17 +35,17 @@ func typecheckTypeSwitch(n *Node) { // We don't actually declare the type switch's guarded // declaration itself. So if there are no cases, we won't // notice that it went unused. - if v := n.Left.Left; v != nil && !v.isBlank() && n.List.Len() == 0 { + if v := n.Left.Left; v != nil && !ir.IsBlank(v) && n.List.Len() == 0 { base.ErrorfAt(v.Pos, "%v declared but not used", v.Sym) } - var defCase, nilCase *Node + var defCase, nilCase *ir.Node var ts typeSet for _, ncase := range n.List.Slice() { ls := ncase.List.Slice() if len(ls) == 0 { // default: if defCase != nil { - base.ErrorfAt(ncase.Pos, "multiple defaults in switch (first at %v)", defCase.Line()) + base.ErrorfAt(ncase.Pos, "multiple defaults in switch (first at %v)", ir.Line(defCase)) } else { defCase = ncase } @@ -60,13 +61,13 @@ func typecheckTypeSwitch(n *Node) { var missing, have *types.Field var ptr int switch { - case n1.isNil(): // case nil: + case ir.IsNil(n1): // case nil: if nilCase != nil { - base.ErrorfAt(ncase.Pos, "multiple nil cases in type switch (first at %v)", nilCase.Line()) + base.ErrorfAt(ncase.Pos, "multiple nil cases in type switch (first at %v)", ir.Line(nilCase)) } else { nilCase = ncase } - case n1.Op != OTYPE: + case n1.Op != ir.OTYPE: base.ErrorfAt(ncase.Pos, "%L is not a type", n1) case !n1.Type.IsInterface() && !implements(n1.Type, t, &missing, &have, &ptr) && !missing.Broke(): if have != nil && !have.Broke() { @@ -81,7 +82,7 @@ func typecheckTypeSwitch(n *Node) { } } - if n1.Op == OTYPE { + if n1.Op == ir.OTYPE { ts.add(ncase.Pos, n1.Type) } } @@ -90,9 +91,9 @@ func typecheckTypeSwitch(n *Node) { // Assign the clause variable's type. vt := t if len(ls) == 1 { - if ls[0].Op == OTYPE { + if ls[0].Op == ir.OTYPE { vt = ls[0].Type - } else if !ls[0].isNil() { + } else if !ir.IsNil(ls[0]) { // Invalid single-type case; // mark variable as broken. vt = nil @@ -143,8 +144,8 @@ func (s *typeSet) add(pos src.XPos, typ *types.Type) { s.m[ls] = append(prevs, typeSetEntry{pos, typ}) } -func typecheckExprSwitch(n *Node) { - t := types.Types[TBOOL] +func typecheckExprSwitch(n *ir.Node) { + t := types.Types[types.TBOOL] if n.Left != nil { n.Left = typecheck(n.Left, ctxExpr) n.Left = defaultlit(n.Left, nil) @@ -156,7 +157,7 @@ func typecheckExprSwitch(n *Node) { switch { case t.IsMap(): nilonly = "map" - case t.Etype == TFUNC: + case t.Etype == types.TFUNC: nilonly = "func" case t.IsSlice(): nilonly = "slice" @@ -171,13 +172,13 @@ func typecheckExprSwitch(n *Node) { } } - var defCase *Node + var defCase *ir.Node var cs constSet for _, ncase := range n.List.Slice() { ls := ncase.List.Slice() if len(ls) == 0 { // default: if defCase != nil { - base.ErrorfAt(ncase.Pos, "multiple defaults in switch (first at %v)", defCase.Line()) + base.ErrorfAt(ncase.Pos, "multiple defaults in switch (first at %v)", ir.Line(defCase)) } else { defCase = ncase } @@ -192,14 +193,14 @@ func typecheckExprSwitch(n *Node) { continue } - if nilonly != "" && !n1.isNil() { + if nilonly != "" && !ir.IsNil(n1) { base.ErrorfAt(ncase.Pos, "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Left) } else if t.IsInterface() && !n1.Type.IsInterface() && !IsComparable(n1.Type) { base.ErrorfAt(ncase.Pos, "invalid case %L in switch (incomparable type)", n1) } else { op1, _ := assignop(n1.Type, t) op2, _ := assignop(t, n1.Type) - if op1 == OXXX && op2 == OXXX { + if op1 == ir.OXXX && op2 == ir.OXXX { if n.Left != nil { base.ErrorfAt(ncase.Pos, "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Left, n1.Type, t) } else { @@ -224,13 +225,13 @@ func typecheckExprSwitch(n *Node) { } // walkswitch walks a switch statement. -func walkswitch(sw *Node) { +func walkswitch(sw *ir.Node) { // Guard against double walk, see #25776. if sw.List.Len() == 0 && sw.Nbody.Len() > 0 { return // Was fatal, but eliminating every possible source of double-walking is hard } - if sw.Left != nil && sw.Left.Op == OTYPESW { + if sw.Left != nil && sw.Left.Op == ir.OTYPESW { walkTypeSwitch(sw) } else { walkExprSwitch(sw) @@ -239,7 +240,7 @@ func walkswitch(sw *Node) { // walkExprSwitch generates an AST implementing sw. sw is an // expression switch. -func walkExprSwitch(sw *Node) { +func walkExprSwitch(sw *ir.Node) { lno := setlineno(sw) cond := sw.Left @@ -259,12 +260,12 @@ func walkExprSwitch(sw *Node) { // because walkexpr will lower the string // conversion into a runtime call. // See issue 24937 for more discussion. - if cond.Op == OBYTES2STR && allCaseExprsAreSideEffectFree(sw) { - cond.Op = OBYTES2STRTMP + if cond.Op == ir.OBYTES2STR && allCaseExprsAreSideEffectFree(sw) { + cond.Op = ir.OBYTES2STRTMP } cond = walkexpr(cond, &sw.Ninit) - if cond.Op != OLITERAL && cond.Op != ONIL { + if cond.Op != ir.OLITERAL && cond.Op != ir.ONIL { cond = copyexpr(cond, cond.Type, &sw.Nbody) } @@ -274,11 +275,11 @@ func walkExprSwitch(sw *Node) { exprname: cond, } - var defaultGoto *Node - var body Nodes + var defaultGoto *ir.Node + var body ir.Nodes for _, ncase := range sw.List.Slice() { label := autolabel(".s") - jmp := npos(ncase.Pos, nodSym(OGOTO, nil, label)) + jmp := npos(ncase.Pos, nodSym(ir.OGOTO, nil, label)) // Process case dispatch. if ncase.List.Len() == 0 { @@ -293,10 +294,10 @@ func walkExprSwitch(sw *Node) { } // Process body. - body.Append(npos(ncase.Pos, nodSym(OLABEL, nil, label))) + body.Append(npos(ncase.Pos, nodSym(ir.OLABEL, nil, label))) body.Append(ncase.Nbody.Slice()...) if fall, pos := hasFall(ncase.Nbody.Slice()); !fall { - br := nod(OBREAK, nil, nil) + br := ir.Nod(ir.OBREAK, nil, nil) br.Pos = pos body.Append(br) } @@ -304,7 +305,7 @@ func walkExprSwitch(sw *Node) { sw.List.Set(nil) if defaultGoto == nil { - br := nod(OBREAK, nil, nil) + br := ir.Nod(ir.OBREAK, nil, nil) br.Pos = br.Pos.WithNotStmt() defaultGoto = br } @@ -317,21 +318,21 @@ func walkExprSwitch(sw *Node) { // An exprSwitch walks an expression switch. type exprSwitch struct { - exprname *Node // value being switched on + exprname *ir.Node // value being switched on - done Nodes + done ir.Nodes clauses []exprClause } type exprClause struct { pos src.XPos - lo, hi *Node - jmp *Node + lo, hi *ir.Node + jmp *ir.Node } -func (s *exprSwitch) Add(pos src.XPos, expr, jmp *Node) { +func (s *exprSwitch) Add(pos src.XPos, expr, jmp *ir.Node) { c := exprClause{pos: pos, lo: expr, hi: expr, jmp: jmp} - if okforcmp[s.exprname.Type.Etype] && expr.Op == OLITERAL { + if okforcmp[s.exprname.Type.Etype] && expr.Op == ir.OLITERAL { s.clauses = append(s.clauses, c) return } @@ -341,7 +342,7 @@ func (s *exprSwitch) Add(pos src.XPos, expr, jmp *Node) { s.flush() } -func (s *exprSwitch) Emit(out *Nodes) { +func (s *exprSwitch) Emit(out *ir.Nodes) { s.flush() out.AppendNodes(&s.done) } @@ -389,12 +390,12 @@ func (s *exprSwitch) flush() { // Perform two-level binary search. binarySearch(len(runs), &s.done, - func(i int) *Node { - return nod(OLE, nod(OLEN, s.exprname, nil), nodintconst(runLen(runs[i-1]))) + func(i int) *ir.Node { + return ir.Nod(ir.OLE, ir.Nod(ir.OLEN, s.exprname, nil), nodintconst(runLen(runs[i-1]))) }, - func(i int, nif *Node) { + func(i int, nif *ir.Node) { run := runs[i] - nif.Left = nod(OEQ, nod(OLEN, s.exprname, nil), nodintconst(runLen(run))) + nif.Left = ir.Nod(ir.OEQ, ir.Nod(ir.OLEN, s.exprname, nil), nodintconst(runLen(run))) s.search(run, &nif.Nbody) }, ) @@ -422,12 +423,12 @@ func (s *exprSwitch) flush() { s.search(cc, &s.done) } -func (s *exprSwitch) search(cc []exprClause, out *Nodes) { +func (s *exprSwitch) search(cc []exprClause, out *ir.Nodes) { binarySearch(len(cc), out, - func(i int) *Node { - return nod(OLE, s.exprname, cc[i-1].hi) + func(i int) *ir.Node { + return ir.Nod(ir.OLE, s.exprname, cc[i-1].hi) }, - func(i int, nif *Node) { + func(i int, nif *ir.Node) { c := &cc[i] nif.Left = c.test(s.exprname) nif.Nbody.Set1(c.jmp) @@ -435,27 +436,27 @@ func (s *exprSwitch) search(cc []exprClause, out *Nodes) { ) } -func (c *exprClause) test(exprname *Node) *Node { +func (c *exprClause) test(exprname *ir.Node) *ir.Node { // Integer range. if c.hi != c.lo { - low := nodl(c.pos, OGE, exprname, c.lo) - high := nodl(c.pos, OLE, exprname, c.hi) - return nodl(c.pos, OANDAND, low, high) + low := ir.NodAt(c.pos, ir.OGE, exprname, c.lo) + high := ir.NodAt(c.pos, ir.OLE, exprname, c.hi) + return ir.NodAt(c.pos, ir.OANDAND, low, high) } // Optimize "switch true { ...}" and "switch false { ... }". - if Isconst(exprname, constant.Bool) && !c.lo.Type.IsInterface() { + if ir.IsConst(exprname, constant.Bool) && !c.lo.Type.IsInterface() { if exprname.BoolVal() { return c.lo } else { - return nodl(c.pos, ONOT, c.lo, nil) + return ir.NodAt(c.pos, ir.ONOT, c.lo, nil) } } - return nodl(c.pos, OEQ, exprname, c.lo) + return ir.NodAt(c.pos, ir.OEQ, exprname, c.lo) } -func allCaseExprsAreSideEffectFree(sw *Node) bool { +func allCaseExprsAreSideEffectFree(sw *ir.Node) bool { // In theory, we could be more aggressive, allowing any // side-effect-free expressions in cases, but it's a bit // tricky because some of that information is unavailable due @@ -464,11 +465,11 @@ func allCaseExprsAreSideEffectFree(sw *Node) bool { // enough. for _, ncase := range sw.List.Slice() { - if ncase.Op != OCASE { + if ncase.Op != ir.OCASE { base.Fatalf("switch string(byteslice) bad op: %v", ncase.Op) } for _, v := range ncase.List.Slice() { - if v.Op != OLITERAL { + if v.Op != ir.OLITERAL { return false } } @@ -477,7 +478,7 @@ func allCaseExprsAreSideEffectFree(sw *Node) bool { } // hasFall reports whether stmts ends with a "fallthrough" statement. -func hasFall(stmts []*Node) (bool, src.XPos) { +func hasFall(stmts []*ir.Node) (bool, src.XPos) { // Search backwards for the index of the fallthrough // statement. Do not assume it'll be in the last // position, since in some cases (e.g. when the statement @@ -485,30 +486,30 @@ func hasFall(stmts []*Node) (bool, src.XPos) { // nodes will be at the end of the list. i := len(stmts) - 1 - for i >= 0 && stmts[i].Op == OVARKILL { + for i >= 0 && stmts[i].Op == ir.OVARKILL { i-- } if i < 0 { return false, src.NoXPos } - return stmts[i].Op == OFALL, stmts[i].Pos + return stmts[i].Op == ir.OFALL, stmts[i].Pos } // walkTypeSwitch generates an AST that implements sw, where sw is a // type switch. -func walkTypeSwitch(sw *Node) { +func walkTypeSwitch(sw *ir.Node) { var s typeSwitch s.facename = sw.Left.Right sw.Left = nil s.facename = walkexpr(s.facename, &sw.Ninit) s.facename = copyexpr(s.facename, s.facename.Type, &sw.Nbody) - s.okname = temp(types.Types[TBOOL]) + s.okname = temp(types.Types[types.TBOOL]) // Get interface descriptor word. // For empty interfaces this will be the type. // For non-empty interfaces this will be the itab. - itab := nod(OITAB, s.facename, nil) + itab := ir.Nod(ir.OITAB, s.facename, nil) // For empty interfaces, do: // if e._type == nil { @@ -516,8 +517,8 @@ func walkTypeSwitch(sw *Node) { // } // h := e._type.hash // Use a similar strategy for non-empty interfaces. - ifNil := nod(OIF, nil, nil) - ifNil.Left = nod(OEQ, itab, nodnil()) + ifNil := ir.Nod(ir.OIF, nil, nil) + ifNil.Left = ir.Nod(ir.OEQ, itab, nodnil()) base.Pos = base.Pos.WithNotStmt() // disable statement marks after the first check. ifNil.Left = typecheck(ifNil.Left, ctxExpr) ifNil.Left = defaultlit(ifNil.Left, nil) @@ -525,8 +526,8 @@ func walkTypeSwitch(sw *Node) { sw.Nbody.Append(ifNil) // Load hash from type or itab. - dotHash := nodSym(ODOTPTR, itab, nil) - dotHash.Type = types.Types[TUINT32] + dotHash := nodSym(ir.ODOTPTR, itab, nil) + dotHash.Type = types.Types[types.TUINT32] dotHash.SetTypecheck(1) if s.facename.Type.IsEmptyInterface() { dotHash.Xoffset = int64(2 * Widthptr) // offset of hash in runtime._type @@ -536,11 +537,11 @@ func walkTypeSwitch(sw *Node) { dotHash.SetBounded(true) // guaranteed not to fault s.hashname = copyexpr(dotHash, dotHash.Type, &sw.Nbody) - br := nod(OBREAK, nil, nil) - var defaultGoto, nilGoto *Node - var body Nodes + br := ir.Nod(ir.OBREAK, nil, nil) + var defaultGoto, nilGoto *ir.Node + var body ir.Nodes for _, ncase := range sw.List.Slice() { - var caseVar *Node + var caseVar *ir.Node if ncase.Rlist.Len() != 0 { caseVar = ncase.Rlist.First() } @@ -549,13 +550,13 @@ func walkTypeSwitch(sw *Node) { // we initialize the case variable as part of the type assertion. // In other cases, we initialize it in the body. var singleType *types.Type - if ncase.List.Len() == 1 && ncase.List.First().Op == OTYPE { + if ncase.List.Len() == 1 && ncase.List.First().Op == ir.OTYPE { singleType = ncase.List.First().Type } caseVarInitialized := false label := autolabel(".s") - jmp := npos(ncase.Pos, nodSym(OGOTO, nil, label)) + jmp := npos(ncase.Pos, nodSym(ir.OGOTO, nil, label)) if ncase.List.Len() == 0 { // default: if defaultGoto != nil { @@ -565,7 +566,7 @@ func walkTypeSwitch(sw *Node) { } for _, n1 := range ncase.List.Slice() { - if n1.isNil() { // case nil: + if ir.IsNil(n1) { // case nil: if nilGoto != nil { base.Fatalf("duplicate nil case not detected during typechecking") } @@ -581,7 +582,7 @@ func walkTypeSwitch(sw *Node) { } } - body.Append(npos(ncase.Pos, nodSym(OLABEL, nil, label))) + body.Append(npos(ncase.Pos, nodSym(ir.OLABEL, nil, label))) if caseVar != nil && !caseVarInitialized { val := s.facename if singleType != nil { @@ -591,9 +592,9 @@ func walkTypeSwitch(sw *Node) { } val = ifaceData(ncase.Pos, s.facename, singleType) } - l := []*Node{ - nodl(ncase.Pos, ODCL, caseVar, nil), - nodl(ncase.Pos, OAS, caseVar, val), + l := []*ir.Node{ + ir.NodAt(ncase.Pos, ir.ODCL, caseVar, nil), + ir.NodAt(ncase.Pos, ir.OAS, caseVar, val), } typecheckslice(l, ctxStmt) body.Append(l...) @@ -621,36 +622,36 @@ func walkTypeSwitch(sw *Node) { // A typeSwitch walks a type switch. type typeSwitch struct { // Temporary variables (i.e., ONAMEs) used by type switch dispatch logic: - facename *Node // value being type-switched on - hashname *Node // type hash of the value being type-switched on - okname *Node // boolean used for comma-ok type assertions + facename *ir.Node // value being type-switched on + hashname *ir.Node // type hash of the value being type-switched on + okname *ir.Node // boolean used for comma-ok type assertions - done Nodes + done ir.Nodes clauses []typeClause } type typeClause struct { hash uint32 - body Nodes + body ir.Nodes } -func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp *Node) { - var body Nodes +func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp *ir.Node) { + var body ir.Nodes if caseVar != nil { - l := []*Node{ - nodl(pos, ODCL, caseVar, nil), - nodl(pos, OAS, caseVar, nil), + l := []*ir.Node{ + ir.NodAt(pos, ir.ODCL, caseVar, nil), + ir.NodAt(pos, ir.OAS, caseVar, nil), } typecheckslice(l, ctxStmt) body.Append(l...) } else { - caseVar = nblank + caseVar = ir.BlankNode } // cv, ok = iface.(type) - as := nodl(pos, OAS2, nil, nil) + as := ir.NodAt(pos, ir.OAS2, nil, nil) as.List.Set2(caseVar, s.okname) // cv, ok = - dot := nodl(pos, ODOTTYPE, s.facename, nil) + dot := ir.NodAt(pos, ir.ODOTTYPE, s.facename, nil) dot.Type = typ // iface.(type) as.Rlist.Set1(dot) as = typecheck(as, ctxStmt) @@ -658,7 +659,7 @@ func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp *Node) { body.Append(as) // if ok { goto label } - nif := nodl(pos, OIF, nil, nil) + nif := ir.NodAt(pos, ir.OIF, nil, nil) nif.Left = s.okname nif.Nbody.Set1(jmp) body.Append(nif) @@ -675,7 +676,7 @@ func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp *Node) { s.done.AppendNodes(&body) } -func (s *typeSwitch) Emit(out *Nodes) { +func (s *typeSwitch) Emit(out *ir.Nodes) { s.flush() out.AppendNodes(&s.done) } @@ -702,14 +703,14 @@ func (s *typeSwitch) flush() { cc = merged binarySearch(len(cc), &s.done, - func(i int) *Node { - return nod(OLE, s.hashname, nodintconst(int64(cc[i-1].hash))) + func(i int) *ir.Node { + return ir.Nod(ir.OLE, s.hashname, nodintconst(int64(cc[i-1].hash))) }, - func(i int, nif *Node) { + func(i int, nif *ir.Node) { // TODO(mdempsky): Omit hash equality check if // there's only one type. c := cc[i] - nif.Left = nod(OEQ, s.hashname, nodintconst(int64(c.hash))) + nif.Left = ir.Nod(ir.OEQ, s.hashname, nodintconst(int64(c.hash))) nif.Nbody.AppendNodes(&c.body) }, ) @@ -724,15 +725,15 @@ func (s *typeSwitch) flush() { // // leaf(i, nif) should setup nif (an OIF node) to test case i. In // particular, it should set nif.Left and nif.Nbody. -func binarySearch(n int, out *Nodes, less func(i int) *Node, leaf func(i int, nif *Node)) { +func binarySearch(n int, out *ir.Nodes, less func(i int) *ir.Node, leaf func(i int, nif *ir.Node)) { const binarySearchMin = 4 // minimum number of cases for binary search - var do func(lo, hi int, out *Nodes) - do = func(lo, hi int, out *Nodes) { + var do func(lo, hi int, out *ir.Nodes) + do = func(lo, hi int, out *ir.Nodes) { n := hi - lo if n < binarySearchMin { for i := lo; i < hi; i++ { - nif := nod(OIF, nil, nil) + nif := ir.Nod(ir.OIF, nil, nil) leaf(i, nif) base.Pos = base.Pos.WithNotStmt() nif.Left = typecheck(nif.Left, ctxExpr) @@ -744,7 +745,7 @@ func binarySearch(n int, out *Nodes, less func(i int) *Node, leaf func(i int, ni } half := lo + n/2 - nif := nod(OIF, nil, nil) + nif := ir.Nod(ir.OIF, nil, nil) nif.Left = less(half) base.Pos = base.Pos.WithNotStmt() nif.Left = typecheck(nif.Left, ctxExpr) diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index b61b9b0525892..78fdf100ad288 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -6,6 +6,7 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/types" "fmt" "go/constant" @@ -19,7 +20,7 @@ const enableTrace = false var traceIndent []byte var skipDowidthForTracing bool -func tracePrint(title string, n *Node) func(np **Node) { +func tracePrint(title string, n *ir.Node) func(np **ir.Node) { indent := traceIndent // guard against nil @@ -36,7 +37,7 @@ func tracePrint(title string, n *Node) func(np **Node) { fmt.Printf("%s: %s%s %p %s %v tc=%d\n", pos, indent, title, n, op, n, tc) traceIndent = append(traceIndent, ". "...) - return func(np **Node) { + return func(np **ir.Node) { traceIndent = traceIndent[:len(traceIndent)-2] // if we have a result, use that @@ -76,11 +77,11 @@ const ( // marks variables that escape the local frame. // rewrites n.Op to be more specific in some cases. -var typecheckdefstack []*Node +var typecheckdefstack []*ir.Node // resolve ONONAME to definition, if any. -func resolve(n *Node) (res *Node) { - if n == nil || n.Op != ONONAME { +func resolve(n *ir.Node) (res *ir.Node) { + if n == nil || n.Op != ir.ONONAME { return n } @@ -89,7 +90,7 @@ func resolve(n *Node) (res *Node) { defer tracePrint("resolve", n)(&res) } - if n.Sym.Pkg != localpkg { + if n.Sym.Pkg != ir.LocalPkg { if inimport { base.Fatalf("recursive inimport") } @@ -99,12 +100,12 @@ func resolve(n *Node) (res *Node) { return n } - r := asNode(n.Sym.Def) + r := ir.AsNode(n.Sym.Def) if r == nil { return n } - if r.Op == OIOTA { + if r.Op == ir.OIOTA { if x := getIotaValue(); x >= 0 { return nodintconst(x) } @@ -114,41 +115,41 @@ func resolve(n *Node) (res *Node) { return r } -func typecheckslice(l []*Node, top int) { +func typecheckslice(l []*ir.Node, top int) { for i := range l { l[i] = typecheck(l[i], top) } } var _typekind = []string{ - TINT: "int", - TUINT: "uint", - TINT8: "int8", - TUINT8: "uint8", - TINT16: "int16", - TUINT16: "uint16", - TINT32: "int32", - TUINT32: "uint32", - TINT64: "int64", - TUINT64: "uint64", - TUINTPTR: "uintptr", - TCOMPLEX64: "complex64", - TCOMPLEX128: "complex128", - TFLOAT32: "float32", - TFLOAT64: "float64", - TBOOL: "bool", - TSTRING: "string", - TPTR: "pointer", - TUNSAFEPTR: "unsafe.Pointer", - TSTRUCT: "struct", - TINTER: "interface", - TCHAN: "chan", - TMAP: "map", - TARRAY: "array", - TSLICE: "slice", - TFUNC: "func", - TNIL: "nil", - TIDEAL: "untyped number", + types.TINT: "int", + types.TUINT: "uint", + types.TINT8: "int8", + types.TUINT8: "uint8", + types.TINT16: "int16", + types.TUINT16: "uint16", + types.TINT32: "int32", + types.TUINT32: "uint32", + types.TINT64: "int64", + types.TUINT64: "uint64", + types.TUINTPTR: "uintptr", + types.TCOMPLEX64: "complex64", + types.TCOMPLEX128: "complex128", + types.TFLOAT32: "float32", + types.TFLOAT64: "float64", + types.TBOOL: "bool", + types.TSTRING: "string", + types.TPTR: "pointer", + types.TUNSAFEPTR: "unsafe.Pointer", + types.TSTRUCT: "struct", + types.TINTER: "interface", + types.TCHAN: "chan", + types.TMAP: "map", + types.TARRAY: "array", + types.TSLICE: "slice", + types.TFUNC: "func", + types.TNIL: "nil", + types.TIDEAL: "untyped number", } func typekind(t *types.Type) string { @@ -165,7 +166,7 @@ func typekind(t *types.Type) string { return fmt.Sprintf("etype=%d", et) } -func cycleFor(start *Node) []*Node { +func cycleFor(start *ir.Node) []*ir.Node { // Find the start node in typecheck_tcstack. // We know that it must exist because each time we mark // a node with n.SetTypecheck(2) we push it on the stack, @@ -178,7 +179,7 @@ func cycleFor(start *Node) []*Node { } // collect all nodes with same Op - var cycle []*Node + var cycle []*ir.Node for _, n := range typecheck_tcstack[i:] { if n.Op == start.Op { cycle = append(cycle, n) @@ -188,20 +189,20 @@ func cycleFor(start *Node) []*Node { return cycle } -func cycleTrace(cycle []*Node) string { +func cycleTrace(cycle []*ir.Node) string { var s string for i, n := range cycle { - s += fmt.Sprintf("\n\t%v: %v uses %v", n.Line(), n, cycle[(i+1)%len(cycle)]) + s += fmt.Sprintf("\n\t%v: %v uses %v", ir.Line(n), n, cycle[(i+1)%len(cycle)]) } return s } -var typecheck_tcstack []*Node +var typecheck_tcstack []*ir.Node // typecheck type checks node n. // The result of typecheck MUST be assigned back to n, e.g. // n.Left = typecheck(n.Left, top) -func typecheck(n *Node, top int) (res *Node) { +func typecheck(n *ir.Node, top int) (res *ir.Node) { // cannot type check until all the source has been parsed if !typecheckok { base.Fatalf("early typecheck") @@ -219,7 +220,7 @@ func typecheck(n *Node, top int) (res *Node) { lno := setlineno(n) // Skip over parens. - for n.Op == OPAREN { + for n.Op == ir.OPAREN { n = n.Left } @@ -230,7 +231,7 @@ func typecheck(n *Node, top int) (res *Node) { // But re-typecheck ONAME/OTYPE/OLITERAL/OPACK node in case context has changed. if n.Typecheck() == 1 { switch n.Op { - case ONAME, OTYPE, OLITERAL, OPACK: + case ir.ONAME, ir.OTYPE, ir.OLITERAL, ir.OPACK: break default: @@ -244,12 +245,12 @@ func typecheck(n *Node, top int) (res *Node) { // otherwise a stack trace of typechecking. switch n.Op { // We can already diagnose variables used as types. - case ONAME: + case ir.ONAME: if top&(ctxExpr|ctxType) == ctxType { base.Errorf("%v is not a type", n) } - case OTYPE: + case ir.OTYPE: // Only report a type cycle if we are expecting a type. // Otherwise let other code report an error. if top&ctxType == ctxType { @@ -274,7 +275,7 @@ func typecheck(n *Node, top int) (res *Node) { base.ErrorfAt(n.Pos, "invalid recursive type alias %v%s", n, cycleTrace(cycle)) } - case OLITERAL: + case ir.OLITERAL: if top&(ctxExpr|ctxType) == ctxType { base.Errorf("%v is not a type", n) break @@ -286,7 +287,7 @@ func typecheck(n *Node, top int) (res *Node) { var trace string for i := len(typecheck_tcstack) - 1; i >= 0; i-- { x := typecheck_tcstack[i] - trace += fmt.Sprintf("\n\t%v %v", x.Line(), x) + trace += fmt.Sprintf("\n\t%v %v", ir.Line(x), x) } base.Errorf("typechecking loop involving %v%s", n, trace) } @@ -316,34 +317,34 @@ func typecheck(n *Node, top int) (res *Node) { // value of type int (see also checkmake for comparison). // The result of indexlit MUST be assigned back to n, e.g. // n.Left = indexlit(n.Left) -func indexlit(n *Node) *Node { - if n != nil && n.Type != nil && n.Type.Etype == TIDEAL { - return defaultlit(n, types.Types[TINT]) +func indexlit(n *ir.Node) *ir.Node { + if n != nil && n.Type != nil && n.Type.Etype == types.TIDEAL { + return defaultlit(n, types.Types[types.TINT]) } return n } // The result of typecheck1 MUST be assigned back to n, e.g. // n.Left = typecheck1(n.Left, top) -func typecheck1(n *Node, top int) (res *Node) { +func typecheck1(n *ir.Node, top int) (res *ir.Node) { if enableTrace && base.Flag.LowerT { defer tracePrint("typecheck1", n)(&res) } switch n.Op { - case OLITERAL, ONAME, ONONAME, OTYPE: + case ir.OLITERAL, ir.ONAME, ir.ONONAME, ir.OTYPE: if n.Sym == nil { break } - if n.Op == ONAME && n.SubOp() != 0 && top&ctxCallee == 0 { + if n.Op == ir.ONAME && n.SubOp() != 0 && top&ctxCallee == 0 { base.Errorf("use of builtin %v not in function call", n.Sym) n.Type = nil return n } typecheckdef(n) - if n.Op == ONONAME { + if n.Op == ir.ONONAME { n.Type = nil return n } @@ -353,22 +354,22 @@ func typecheck1(n *Node, top int) (res *Node) { switch n.Op { // until typecheck is complete, do nothing. default: - Dump("typecheck", n) + ir.Dump("typecheck", n) base.Fatalf("typecheck %v", n.Op) // names - case OLITERAL: + case ir.OLITERAL: ok |= ctxExpr if n.Type == nil && n.Val().Kind() == constant.String { base.Fatalf("string literal missing type") } - case ONIL, ONONAME: + case ir.ONIL, ir.ONONAME: ok |= ctxExpr - case ONAME: + case ir.ONAME: if n.Name.Decldepth == 0 { n.Name.Decldepth = decldepth } @@ -379,7 +380,7 @@ func typecheck1(n *Node, top int) (res *Node) { if top&ctxAssign == 0 { // not a write to the variable - if n.isBlank() { + if ir.IsBlank(n) { base.Errorf("cannot use _ as value") n.Type = nil return n @@ -390,23 +391,23 @@ func typecheck1(n *Node, top int) (res *Node) { ok |= ctxExpr - case OPACK: + case ir.OPACK: base.Errorf("use of package %v without selector", n.Sym) n.Type = nil return n - case ODDD: + case ir.ODDD: break // types (ODEREF is with exprs) - case OTYPE: + case ir.OTYPE: ok |= ctxType if n.Type == nil { return n } - case OTARRAY: + case ir.OTARRAY: ok |= ctxType r := typecheck(n.Right, ctxType) if r.Type == nil { @@ -417,7 +418,7 @@ func typecheck1(n *Node, top int) (res *Node) { var t *types.Type if n.Left == nil { t = types.NewSlice(r.Type) - } else if n.Left.Op == ODDD { + } else if n.Left.Op == ir.ODDD { if !n.Diag() { n.SetDiag(true) base.Errorf("use of [...] array outside of array literal") @@ -427,11 +428,11 @@ func typecheck1(n *Node, top int) (res *Node) { } else { n.Left = indexlit(typecheck(n.Left, ctxExpr)) l := n.Left - if consttype(l) != constant.Int { + if ir.ConstType(l) != constant.Int { switch { case l.Type == nil: // Error already reported elsewhere. - case l.Type.IsInteger() && l.Op != OLITERAL: + case l.Type.IsInteger() && l.Op != ir.OLITERAL: base.Errorf("non-constant array bound %v", l) default: base.Errorf("invalid array bound %v", l) @@ -441,7 +442,7 @@ func typecheck1(n *Node, top int) (res *Node) { } v := l.Val() - if doesoverflow(v, types.Types[TINT]) { + if doesoverflow(v, types.Types[types.TINT]) { base.Errorf("array bound is too large") n.Type = nil return n @@ -462,7 +463,7 @@ func typecheck1(n *Node, top int) (res *Node) { n.Right = nil checkwidth(t) - case OTMAP: + case ir.OTMAP: ok |= ctxType n.Left = typecheck(n.Left, ctxType) n.Right = typecheck(n.Right, ctxType) @@ -484,7 +485,7 @@ func typecheck1(n *Node, top int) (res *Node) { n.Left = nil n.Right = nil - case OTCHAN: + case ir.OTCHAN: ok |= ctxType n.Left = typecheck(n.Left, ctxType) l := n.Left @@ -500,16 +501,16 @@ func typecheck1(n *Node, top int) (res *Node) { n.Left = nil n.ResetAux() - case OTSTRUCT: + case ir.OTSTRUCT: ok |= ctxType setTypeNode(n, tostruct(n.List.Slice())) n.List.Set(nil) - case OTINTER: + case ir.OTINTER: ok |= ctxType setTypeNode(n, tointerface(n.List.Slice())) - case OTFUNC: + case ir.OTFUNC: ok |= ctxType setTypeNode(n, functype(n.Left, n.List.Slice(), n.Rlist.Slice())) n.Left = nil @@ -517,7 +518,7 @@ func typecheck1(n *Node, top int) (res *Node) { n.Rlist.Set(nil) // type or expr - case ODEREF: + case ir.ODEREF: n.Left = typecheck(n.Left, ctxExpr|ctxType) l := n.Left t := l.Type @@ -525,7 +526,7 @@ func typecheck1(n *Node, top int) (res *Node) { n.Type = nil return n } - if l.Op == OTYPE { + if l.Op == ir.OTYPE { ok |= ctxType setTypeNode(n, types.NewPtr(l.Type)) n.Left = nil @@ -548,30 +549,30 @@ func typecheck1(n *Node, top int) (res *Node) { n.Type = t.Elem() // arithmetic exprs - case OASOP, - OADD, - OAND, - OANDAND, - OANDNOT, - ODIV, - OEQ, - OGE, - OGT, - OLE, - OLT, - OLSH, - ORSH, - OMOD, - OMUL, - ONE, - OOR, - OOROR, - OSUB, - OXOR: - var l *Node - var op Op - var r *Node - if n.Op == OASOP { + case ir.OASOP, + ir.OADD, + ir.OAND, + ir.OANDAND, + ir.OANDNOT, + ir.ODIV, + ir.OEQ, + ir.OGE, + ir.OGT, + ir.OLE, + ir.OLT, + ir.OLSH, + ir.ORSH, + ir.OMOD, + ir.OMUL, + ir.ONE, + ir.OOR, + ir.OOROR, + ir.OSUB, + ir.OXOR: + var l *ir.Node + var op ir.Op + var r *ir.Node + if n.Op == ir.OASOP { ok |= ctxStmt n.Left = typecheck(n.Left, ctxExpr) n.Right = typecheck(n.Right, ctxExpr) @@ -601,8 +602,8 @@ func typecheck1(n *Node, top int) (res *Node) { } op = n.Op } - if op == OLSH || op == ORSH { - r = defaultlit(r, types.Types[TUINT]) + if op == ir.OLSH || op == ir.ORSH { + r = defaultlit(r, types.Types[types.TUINT]) n.Right = r t := r.Type if !t.IsInteger() { @@ -616,7 +617,7 @@ func typecheck1(n *Node, top int) (res *Node) { return n } t = l.Type - if t != nil && t.Etype != TIDEAL && !t.IsInteger() { + if t != nil && t.Etype != types.TIDEAL && !t.IsInteger() { base.Errorf("invalid operation: %v (shift of type %v)", n, t) n.Type = nil return n @@ -625,7 +626,7 @@ func typecheck1(n *Node, top int) (res *Node) { // no defaultlit for left // the outer context gives the type n.Type = l.Type - if (l.Type == types.UntypedFloat || l.Type == types.UntypedComplex) && r.Op == OLITERAL { + if (l.Type == types.UntypedFloat || l.Type == types.UntypedComplex) && r.Op == ir.OLITERAL { n.Type = types.UntypedInt } @@ -635,7 +636,7 @@ func typecheck1(n *Node, top int) (res *Node) { // For "x == x && len(s)", it's better to report that "len(s)" (type int) // can't be used with "&&" than to report that "x == x" (type untyped bool) // can't be converted to int (see issue #41500). - if n.Op == OANDAND || n.Op == OOROR { + if n.Op == ir.OANDAND || n.Op == ir.OOROR { if !n.Left.Type.IsBoolean() { base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op, typekind(n.Left.Type)) n.Type = nil @@ -658,15 +659,15 @@ func typecheck1(n *Node, top int) (res *Node) { return n } t := l.Type - if t.Etype == TIDEAL { + if t.Etype == types.TIDEAL { t = r.Type } et := t.Etype - if et == TIDEAL { - et = TINT + if et == types.TIDEAL { + et = types.TINT } - aop := OXXX - if iscmp[n.Op] && t.Etype != TIDEAL && !types.Identical(l.Type, r.Type) { + aop := ir.OXXX + if iscmp[n.Op] && t.Etype != types.TIDEAL && !types.Identical(l.Type, r.Type) { // comparison is okay as long as one side is // assignable to the other. convert so they have // the same type. @@ -675,9 +676,9 @@ func typecheck1(n *Node, top int) (res *Node) { // in that case, check comparability of the concrete type. // The conversion allocates, so only do it if the concrete type is huge. converted := false - if r.Type.Etype != TBLANK { + if r.Type.Etype != types.TBLANK { aop, _ = assignop(l.Type, r.Type) - if aop != OXXX { + if aop != ir.OXXX { if r.Type.IsInterface() && !l.Type.IsInterface() && !IsComparable(l.Type) { base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(l.Type)) n.Type = nil @@ -686,7 +687,7 @@ func typecheck1(n *Node, top int) (res *Node) { dowidth(l.Type) if r.Type.IsInterface() == l.Type.IsInterface() || l.Type.Width >= 1<<16 { - l = nod(aop, l, nil) + l = ir.Nod(aop, l, nil) l.Type = r.Type l.SetTypecheck(1) n.Left = l @@ -697,9 +698,9 @@ func typecheck1(n *Node, top int) (res *Node) { } } - if !converted && l.Type.Etype != TBLANK { + if !converted && l.Type.Etype != types.TBLANK { aop, _ = assignop(r.Type, l.Type) - if aop != OXXX { + if aop != ir.OXXX { if l.Type.IsInterface() && !r.Type.IsInterface() && !IsComparable(r.Type) { base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(r.Type)) n.Type = nil @@ -708,7 +709,7 @@ func typecheck1(n *Node, top int) (res *Node) { dowidth(r.Type) if r.Type.IsInterface() == l.Type.IsInterface() || r.Type.Width >= 1<<16 { - r = nod(aop, r, nil) + r = ir.Nod(aop, r, nil) r.Type = l.Type r.SetTypecheck(1) n.Right = r @@ -721,7 +722,7 @@ func typecheck1(n *Node, top int) (res *Node) { et = t.Etype } - if t.Etype != TIDEAL && !types.Identical(l.Type, r.Type) { + if t.Etype != types.TIDEAL && !types.Identical(l.Type, r.Type) { l, r = defaultlit2(l, r, true) if l.Type == nil || r.Type == nil { n.Type = nil @@ -734,7 +735,7 @@ func typecheck1(n *Node, top int) (res *Node) { } } - if t.Etype == TIDEAL { + if t.Etype == types.TIDEAL { t = mixUntyped(l.Type, r.Type) } if dt := defaultType(t); !okfor[op][dt.Etype] { @@ -751,19 +752,19 @@ func typecheck1(n *Node, top int) (res *Node) { return n } - if l.Type.IsSlice() && !l.isNil() && !r.isNil() { + if l.Type.IsSlice() && !ir.IsNil(l) && !ir.IsNil(r) { base.Errorf("invalid operation: %v (slice can only be compared to nil)", n) n.Type = nil return n } - if l.Type.IsMap() && !l.isNil() && !r.isNil() { + if l.Type.IsMap() && !ir.IsNil(l) && !ir.IsNil(r) { base.Errorf("invalid operation: %v (map can only be compared to nil)", n) n.Type = nil return n } - if l.Type.Etype == TFUNC && !l.isNil() && !r.isNil() { + if l.Type.Etype == types.TFUNC && !ir.IsNil(l) && !ir.IsNil(r) { base.Errorf("invalid operation: %v (func can only be compared to nil)", n) n.Type = nil return n @@ -781,31 +782,31 @@ func typecheck1(n *Node, top int) (res *Node) { t = types.UntypedBool n.Type = t n = evalConst(n) - if n.Op != OLITERAL { + if n.Op != ir.OLITERAL { l, r = defaultlit2(l, r, true) n.Left = l n.Right = r } } - if et == TSTRING && n.Op == OADD { + if et == types.TSTRING && n.Op == ir.OADD { // create or update OADDSTR node with list of strings in x + y + z + (w + v) + ... - if l.Op == OADDSTR { + if l.Op == ir.OADDSTR { orig := n n = l n.Pos = orig.Pos } else { - n = nodl(n.Pos, OADDSTR, nil, nil) + n = ir.NodAt(n.Pos, ir.OADDSTR, nil, nil) n.List.Set1(l) } - if r.Op == OADDSTR { + if r.Op == ir.OADDSTR { n.List.AppendNodes(&r.List) } else { n.List.Append(r) } } - if (op == ODIV || op == OMOD) && Isconst(r, constant.Int) { + if (op == ir.ODIV || op == ir.OMOD) && ir.IsConst(r, constant.Int) { if constant.Sign(r.Val()) == 0 { base.Errorf("division by zero") n.Type = nil @@ -815,7 +816,7 @@ func typecheck1(n *Node, top int) (res *Node) { n.Type = t - case OBITNOT, ONEG, ONOT, OPLUS: + case ir.OBITNOT, ir.ONEG, ir.ONOT, ir.OPLUS: ok |= ctxExpr n.Left = typecheck(n.Left, ctxExpr) l := n.Left @@ -833,7 +834,7 @@ func typecheck1(n *Node, top int) (res *Node) { n.Type = t // exprs - case OADDR: + case ir.OADDR: ok |= ctxExpr n.Left = typecheck(n.Left, ctxExpr) @@ -843,13 +844,13 @@ func typecheck1(n *Node, top int) (res *Node) { } switch n.Left.Op { - case OARRAYLIT, OMAPLIT, OSLICELIT, OSTRUCTLIT: - n.Op = OPTRLIT + case ir.OARRAYLIT, ir.OMAPLIT, ir.OSLICELIT, ir.OSTRUCTLIT: + n.Op = ir.OPTRLIT default: checklvalue(n.Left, "take the address of") r := outervalue(n.Left) - if r.Op == ONAME { + if r.Op == ir.ONAME { if r.Orig != r { base.Fatalf("found non-orig name node %v", r) // TODO(mdempsky): What does this mean? } @@ -871,17 +872,17 @@ func typecheck1(n *Node, top int) (res *Node) { n.Type = types.NewPtr(n.Left.Type) - case OCOMPLIT: + case ir.OCOMPLIT: ok |= ctxExpr n = typecheckcomplit(n) if n.Type == nil { return n } - case OXDOT, ODOT: - if n.Op == OXDOT { + case ir.OXDOT, ir.ODOT: + if n.Op == ir.OXDOT { n = adddot(n) - n.Op = ODOT + n.Op = ir.ODOT if n.Left == nil { n.Type = nil return n @@ -894,14 +895,14 @@ func typecheck1(n *Node, top int) (res *Node) { t := n.Left.Type if t == nil { - base.UpdateErrorDot(n.Line(), n.Left.String(), n.String()) + base.UpdateErrorDot(ir.Line(n), n.Left.String(), n.String()) n.Type = nil return n } s := n.Sym - if n.Left.Op == OTYPE { + if n.Left.Op == ir.OTYPE { n = typecheckMethodExpr(n) if n.Type == nil { return n @@ -916,7 +917,7 @@ func typecheck1(n *Node, top int) (res *Node) { n.Type = nil return n } - n.Op = ODOTPTR + n.Op = ir.ODOTPTR checkwidth(t) } @@ -952,7 +953,7 @@ func typecheck1(n *Node, top int) (res *Node) { } switch n.Op { - case ODOTINTER, ODOTMETH: + case ir.ODOTINTER, ir.ODOTMETH: if top&ctxCallee != 0 { ok |= ctxCallee } else { @@ -964,7 +965,7 @@ func typecheck1(n *Node, top int) (res *Node) { ok |= ctxExpr } - case ODOTTYPE: + case ir.ODOTTYPE: ok |= ctxExpr n.Left = typecheck(n.Left, ctxExpr) n.Left = defaultlit(n.Left, nil) @@ -1009,7 +1010,7 @@ func typecheck1(n *Node, top int) (res *Node) { } } - case OINDEX: + case ir.OINDEX: ok |= ctxExpr n.Left = typecheck(n.Left, ctxExpr) n.Left = defaultlit(n.Left, nil) @@ -1028,7 +1029,7 @@ func typecheck1(n *Node, top int) (res *Node) { n.Type = nil return n - case TSTRING, TARRAY, TSLICE: + case types.TSTRING, types.TARRAY, types.TSLICE: n.Right = indexlit(n.Right) if t.IsString() { n.Type = types.Bytetype @@ -1047,27 +1048,27 @@ func typecheck1(n *Node, top int) (res *Node) { break } - if !n.Bounded() && Isconst(n.Right, constant.Int) { + if !n.Bounded() && ir.IsConst(n.Right, constant.Int) { x := n.Right.Val() if constant.Sign(x) < 0 { base.Errorf("invalid %s index %v (index must be non-negative)", why, n.Right) } else if t.IsArray() && constant.Compare(x, token.GEQ, constant.MakeInt64(t.NumElem())) { base.Errorf("invalid array index %v (out of bounds for %d-element array)", n.Right, t.NumElem()) - } else if Isconst(n.Left, constant.String) && constant.Compare(x, token.GEQ, constant.MakeInt64(int64(len(n.Left.StringVal())))) { + } else if ir.IsConst(n.Left, constant.String) && constant.Compare(x, token.GEQ, constant.MakeInt64(int64(len(n.Left.StringVal())))) { base.Errorf("invalid string index %v (out of bounds for %d-byte string)", n.Right, len(n.Left.StringVal())) - } else if doesoverflow(x, types.Types[TINT]) { + } else if doesoverflow(x, types.Types[types.TINT]) { base.Errorf("invalid %s index %v (index too large)", why, n.Right) } } - case TMAP: + case types.TMAP: n.Right = assignconv(n.Right, t.Key(), "map index") n.Type = t.Elem() - n.Op = OINDEXMAP + n.Op = ir.OINDEXMAP n.ResetAux() } - case ORECV: + case ir.ORECV: ok |= ctxStmt | ctxExpr n.Left = typecheck(n.Left, ctxExpr) n.Left = defaultlit(n.Left, nil) @@ -1091,7 +1092,7 @@ func typecheck1(n *Node, top int) (res *Node) { n.Type = t.Elem() - case OSEND: + case ir.OSEND: ok |= ctxStmt n.Left = typecheck(n.Left, ctxExpr) n.Right = typecheck(n.Right, ctxExpr) @@ -1120,7 +1121,7 @@ func typecheck1(n *Node, top int) (res *Node) { } n.Type = nil - case OSLICEHEADER: + case ir.OSLICEHEADER: // Errors here are Fatalf instead of Errorf because only the compiler // can construct an OSLICEHEADER node. // Components used in OSLICEHEADER that are supplied by parsed source code @@ -1147,25 +1148,25 @@ func typecheck1(n *Node, top int) (res *Node) { n.Left = typecheck(n.Left, ctxExpr) l := typecheck(n.List.First(), ctxExpr) c := typecheck(n.List.Second(), ctxExpr) - l = defaultlit(l, types.Types[TINT]) - c = defaultlit(c, types.Types[TINT]) + l = defaultlit(l, types.Types[types.TINT]) + c = defaultlit(c, types.Types[types.TINT]) - if Isconst(l, constant.Int) && l.Int64Val() < 0 { + if ir.IsConst(l, constant.Int) && l.Int64Val() < 0 { base.Fatalf("len for OSLICEHEADER must be non-negative") } - if Isconst(c, constant.Int) && c.Int64Val() < 0 { + if ir.IsConst(c, constant.Int) && c.Int64Val() < 0 { base.Fatalf("cap for OSLICEHEADER must be non-negative") } - if Isconst(l, constant.Int) && Isconst(c, constant.Int) && constant.Compare(l.Val(), token.GTR, c.Val()) { + if ir.IsConst(l, constant.Int) && ir.IsConst(c, constant.Int) && constant.Compare(l.Val(), token.GTR, c.Val()) { base.Fatalf("len larger than cap for OSLICEHEADER") } n.List.SetFirst(l) n.List.SetSecond(c) - case OMAKESLICECOPY: + case ir.OMAKESLICECOPY: // Errors here are Fatalf instead of Errorf because only the compiler // can construct an OMAKESLICECOPY node. // Components used in OMAKESCLICECOPY that are supplied by parsed source code @@ -1193,14 +1194,14 @@ func typecheck1(n *Node, top int) (res *Node) { n.Left = typecheck(n.Left, ctxExpr) n.Right = typecheck(n.Right, ctxExpr) - n.Left = defaultlit(n.Left, types.Types[TINT]) + n.Left = defaultlit(n.Left, types.Types[types.TINT]) - if !n.Left.Type.IsInteger() && n.Type.Etype != TIDEAL { + if !n.Left.Type.IsInteger() && n.Type.Etype != types.TIDEAL { base.Errorf("non-integer len argument in OMAKESLICECOPY") } - if Isconst(n.Left, constant.Int) { - if doesoverflow(n.Left.Val(), types.Types[TINT]) { + if ir.IsConst(n.Left, constant.Int) { + if doesoverflow(n.Left.Val(), types.Types[types.TINT]) { base.Fatalf("len for OMAKESLICECOPY too large") } if constant.Sign(n.Left.Val()) < 0 { @@ -1208,7 +1209,7 @@ func typecheck1(n *Node, top int) (res *Node) { } } - case OSLICE, OSLICE3: + case ir.OSLICE, ir.OSLICE3: ok |= ctxExpr n.Left = typecheck(n.Left, ctxExpr) low, high, max := n.SliceBounds() @@ -1233,7 +1234,7 @@ func typecheck1(n *Node, top int) (res *Node) { return n } - n.Left = nod(OADDR, n.Left, nil) + n.Left = ir.Nod(ir.OADDR, n.Left, nil) n.Left.SetImplicit(true) n.Left = typecheck(n.Left, ctxExpr) l = n.Left @@ -1247,15 +1248,15 @@ func typecheck1(n *Node, top int) (res *Node) { return n } n.Type = t - n.Op = OSLICESTR + n.Op = ir.OSLICESTR } else if t.IsPtr() && t.Elem().IsArray() { tp = t.Elem() n.Type = types.NewSlice(tp.Elem()) dowidth(n.Type) if hasmax { - n.Op = OSLICE3ARR + n.Op = ir.OSLICE3ARR } else { - n.Op = OSLICEARR + n.Op = ir.OSLICEARR } } else if t.IsSlice() { n.Type = t @@ -1283,7 +1284,7 @@ func typecheck1(n *Node, top int) (res *Node) { } // call and call like - case OCALL: + case ir.OCALL: typecheckslice(n.Ninit.Slice(), ctxStmt) // imported rewritten f(g()) calls (#30907) n.Left = typecheck(n.Left, ctxExpr|ctxType|ctxCallee) if n.Left.Diag() { @@ -1292,8 +1293,8 @@ func typecheck1(n *Node, top int) (res *Node) { l := n.Left - if l.Op == ONAME && l.SubOp() != 0 { - if n.IsDDD() && l.SubOp() != OAPPEND { + if l.Op == ir.ONAME && l.SubOp() != 0 { + if n.IsDDD() && l.SubOp() != ir.OAPPEND { base.Errorf("invalid use of ... with builtin %v", l) } @@ -1307,7 +1308,7 @@ func typecheck1(n *Node, top int) (res *Node) { n.Left = defaultlit(n.Left, nil) l = n.Left - if l.Op == OTYPE { + if l.Op == ir.OTYPE { if n.IsDDD() { if !l.Type.Broke() { base.Errorf("invalid use of ... in type conversion to %v", l.Type) @@ -1321,7 +1322,7 @@ func typecheck1(n *Node, top int) (res *Node) { // turn CALL(type, arg) into CONV(arg) w/ type n.Left = nil - n.Op = OCONV + n.Op = ir.OCONV n.Type = l.Type if !onearg(n, "conversion to %v", l.Type) { n.Type = nil @@ -1340,11 +1341,11 @@ func typecheck1(n *Node, top int) (res *Node) { checkwidth(t) switch l.Op { - case ODOTINTER: - n.Op = OCALLINTER + case ir.ODOTINTER: + n.Op = ir.OCALLINTER - case ODOTMETH: - n.Op = OCALLMETH + case ir.ODOTMETH: + n.Op = ir.OCALLMETH // typecheckaste was used here but there wasn't enough // information further down the call chain to know if we @@ -1357,8 +1358,8 @@ func typecheck1(n *Node, top int) (res *Node) { } default: - n.Op = OCALLFUNC - if t.Etype != TFUNC { + n.Op = ir.OCALLFUNC + if t.Etype != types.TFUNC { name := l.String() if isBuiltinFuncName(name) && l.Name.Defn != nil { // be more specific when the function @@ -1373,7 +1374,7 @@ func typecheck1(n *Node, top int) (res *Node) { } } - typecheckaste(OCALL, n.Left, n.IsDDD(), t.Params(), n.List, func() string { return fmt.Sprintf("argument to %v", n.Left) }) + typecheckaste(ir.OCALL, n.Left, n.IsDDD(), t.Params(), n.List, func() string { return fmt.Sprintf("argument to %v", n.Left) }) ok |= ctxStmt if t.NumResults() == 0 { break @@ -1382,14 +1383,14 @@ func typecheck1(n *Node, top int) (res *Node) { if t.NumResults() == 1 { n.Type = l.Type.Results().Field(0).Type - if n.Op == OCALLFUNC && n.Left.Op == ONAME && isRuntimePkg(n.Left.Sym.Pkg) && n.Left.Sym.Name == "getg" { + if n.Op == ir.OCALLFUNC && n.Left.Op == ir.ONAME && isRuntimePkg(n.Left.Sym.Pkg) && n.Left.Sym.Name == "getg" { // Emit code for runtime.getg() directly instead of calling function. // Most such rewrites (for example the similar one for math.Sqrt) should be done in walk, // so that the ordering pass can make sure to preserve the semantics of the original code // (in particular, the exact time of the function call) by introducing temporaries. // In this case, we know getg() always returns the same result within a given function // and we want to avoid the temporaries, so we do the rewrite earlier than is typical. - n.Op = OGETG + n.Op = ir.OGETG } break @@ -1403,15 +1404,15 @@ func typecheck1(n *Node, top int) (res *Node) { n.Type = l.Type.Results() - case OALIGNOF, OOFFSETOF, OSIZEOF: + case ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF: ok |= ctxExpr if !onearg(n, "%v", n.Op) { n.Type = nil return n } - n.Type = types.Types[TUINTPTR] + n.Type = types.Types[types.TUINTPTR] - case OCAP, OLEN: + case ir.OCAP, ir.OLEN: ok |= ctxExpr if !onearg(n, "%v", n.Op) { n.Type = nil @@ -1429,7 +1430,7 @@ func typecheck1(n *Node, top int) (res *Node) { } var ok bool - if n.Op == OLEN { + if n.Op == ir.OLEN { ok = okforlen[t.Etype] } else { ok = okforcap[t.Etype] @@ -1440,9 +1441,9 @@ func typecheck1(n *Node, top int) (res *Node) { return n } - n.Type = types.Types[TINT] + n.Type = types.Types[types.TINT] - case OREAL, OIMAG: + case ir.OREAL, ir.OIMAG: ok |= ctxExpr if !onearg(n, "%v", n.Op) { n.Type = nil @@ -1459,19 +1460,19 @@ func typecheck1(n *Node, top int) (res *Node) { // Determine result type. switch t.Etype { - case TIDEAL: + case types.TIDEAL: n.Type = types.UntypedFloat - case TCOMPLEX64: - n.Type = types.Types[TFLOAT32] - case TCOMPLEX128: - n.Type = types.Types[TFLOAT64] + case types.TCOMPLEX64: + n.Type = types.Types[types.TFLOAT32] + case types.TCOMPLEX128: + n.Type = types.Types[types.TFLOAT64] default: base.Errorf("invalid argument %L for %v", l, n.Op) n.Type = nil return n } - case OCOMPLEX: + case ir.OCOMPLEX: ok |= ctxExpr typecheckargs(n) if !twoarg(n) { @@ -1505,18 +1506,18 @@ func typecheck1(n *Node, top int) (res *Node) { n.Type = nil return n - case TIDEAL: + case types.TIDEAL: t = types.UntypedComplex - case TFLOAT32: - t = types.Types[TCOMPLEX64] + case types.TFLOAT32: + t = types.Types[types.TCOMPLEX64] - case TFLOAT64: - t = types.Types[TCOMPLEX128] + case types.TFLOAT64: + t = types.Types[types.TCOMPLEX128] } n.Type = t - case OCLOSE: + case ir.OCLOSE: if !onearg(n, "%v", n.Op) { n.Type = nil return n @@ -1543,7 +1544,7 @@ func typecheck1(n *Node, top int) (res *Node) { ok |= ctxStmt - case ODELETE: + case ir.ODELETE: ok |= ctxStmt typecheckargs(n) args := n.List @@ -1575,7 +1576,7 @@ func typecheck1(n *Node, top int) (res *Node) { args.SetSecond(assignconv(r, l.Type.Key(), "delete")) - case OAPPEND: + case ir.OAPPEND: ok |= ctxExpr typecheckargs(n) args := n.List @@ -1593,7 +1594,7 @@ func typecheck1(n *Node, top int) (res *Node) { n.Type = t if !t.IsSlice() { - if args.First().isNil() { + if ir.IsNil(args.First()) { base.Errorf("first argument to append must be typed slice; have untyped nil") n.Type = nil return n @@ -1617,8 +1618,8 @@ func typecheck1(n *Node, top int) (res *Node) { return n } - if t.Elem().IsKind(TUINT8) && args.Second().Type.IsString() { - args.SetSecond(defaultlit(args.Second(), types.Types[TSTRING])) + if t.Elem().IsKind(types.TUINT8) && args.Second().Type.IsString() { + args.SetSecond(defaultlit(args.Second(), types.Types[types.TSTRING])) break } @@ -1635,14 +1636,14 @@ func typecheck1(n *Node, top int) (res *Node) { checkwidth(as[i].Type) // ensure width is calculated for backend } - case OCOPY: + case ir.OCOPY: ok |= ctxStmt | ctxExpr typecheckargs(n) if !twoarg(n) { n.Type = nil return n } - n.Type = types.Types[TINT] + n.Type = types.Types[types.TINT] if n.Left.Type == nil || n.Right.Type == nil { n.Type = nil return n @@ -1682,7 +1683,7 @@ func typecheck1(n *Node, top int) (res *Node) { return n } - case OCONV: + case ir.OCONV: ok |= ctxExpr checkwidth(n.Type) // ensure width is calculated for backend n.Left = typecheck(n.Left, ctxExpr) @@ -1692,41 +1693,41 @@ func typecheck1(n *Node, top int) (res *Node) { n.Type = nil return n } - op, why := convertop(n.Left.Op == OLITERAL, t, n.Type) + op, why := convertop(n.Left.Op == ir.OLITERAL, t, n.Type) n.Op = op - if n.Op == OXXX { + if n.Op == ir.OXXX { if !n.Diag() && !n.Type.Broke() && !n.Left.Diag() { base.Errorf("cannot convert %L to type %v%s", n.Left, n.Type, why) n.SetDiag(true) } - n.Op = OCONV + n.Op = ir.OCONV n.Type = nil return n } switch n.Op { - case OCONVNOP: + case ir.OCONVNOP: if t.Etype == n.Type.Etype { switch t.Etype { - case TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128: + case types.TFLOAT32, types.TFLOAT64, types.TCOMPLEX64, types.TCOMPLEX128: // Floating point casts imply rounding and // so the conversion must be kept. - n.Op = OCONV + n.Op = ir.OCONV } } // do not convert to []byte literal. See CL 125796. // generated code and compiler memory footprint is better without it. - case OSTR2BYTES: + case ir.OSTR2BYTES: break - case OSTR2RUNES: - if n.Left.Op == OLITERAL { + case ir.OSTR2RUNES: + if n.Left.Op == ir.OLITERAL { n = stringtoruneslit(n) } } - case OMAKE: + case ir.OMAKE: ok |= ctxExpr args := n.List.Slice() if len(args) == 0 { @@ -1751,7 +1752,7 @@ func typecheck1(n *Node, top int) (res *Node) { n.Type = nil return n - case TSLICE: + case types.TSLICE: if i >= len(args) { base.Errorf("missing len argument to make(%v)", t) n.Type = nil @@ -1761,7 +1762,7 @@ func typecheck1(n *Node, top int) (res *Node) { l = args[i] i++ l = typecheck(l, ctxExpr) - var r *Node + var r *ir.Node if i < len(args) { r = args[i] i++ @@ -1776,7 +1777,7 @@ func typecheck1(n *Node, top int) (res *Node) { n.Type = nil return n } - if Isconst(l, constant.Int) && r != nil && Isconst(r, constant.Int) && constant.Compare(l.Val(), token.GTR, r.Val()) { + if ir.IsConst(l, constant.Int) && r != nil && ir.IsConst(r, constant.Int) && constant.Compare(l.Val(), token.GTR, r.Val()) { base.Errorf("len larger than cap in make(%v)", t) n.Type = nil return n @@ -1784,14 +1785,14 @@ func typecheck1(n *Node, top int) (res *Node) { n.Left = l n.Right = r - n.Op = OMAKESLICE + n.Op = ir.OMAKESLICE - case TMAP: + case types.TMAP: if i < len(args) { l = args[i] i++ l = typecheck(l, ctxExpr) - l = defaultlit(l, types.Types[TINT]) + l = defaultlit(l, types.Types[types.TINT]) if l.Type == nil { n.Type = nil return n @@ -1804,15 +1805,15 @@ func typecheck1(n *Node, top int) (res *Node) { } else { n.Left = nodintconst(0) } - n.Op = OMAKEMAP + n.Op = ir.OMAKEMAP - case TCHAN: + case types.TCHAN: l = nil if i < len(args) { l = args[i] i++ l = typecheck(l, ctxExpr) - l = defaultlit(l, types.Types[TINT]) + l = defaultlit(l, types.Types[types.TINT]) if l.Type == nil { n.Type = nil return n @@ -1825,19 +1826,19 @@ func typecheck1(n *Node, top int) (res *Node) { } else { n.Left = nodintconst(0) } - n.Op = OMAKECHAN + n.Op = ir.OMAKECHAN } if i < len(args) { base.Errorf("too many arguments to make(%v)", t) - n.Op = OMAKE + n.Op = ir.OMAKE n.Type = nil return n } n.Type = t - case ONEW: + case ir.ONEW: ok |= ctxExpr args := n.List if args.Len() == 0 { @@ -1862,33 +1863,33 @@ func typecheck1(n *Node, top int) (res *Node) { n.Left = l n.Type = types.NewPtr(t) - case OPRINT, OPRINTN: + case ir.OPRINT, ir.OPRINTN: ok |= ctxStmt typecheckargs(n) ls := n.List.Slice() for i1, n1 := range ls { // Special case for print: int constant is int64, not int. - if Isconst(n1, constant.Int) { - ls[i1] = defaultlit(ls[i1], types.Types[TINT64]) + if ir.IsConst(n1, constant.Int) { + ls[i1] = defaultlit(ls[i1], types.Types[types.TINT64]) } else { ls[i1] = defaultlit(ls[i1], nil) } } - case OPANIC: + case ir.OPANIC: ok |= ctxStmt if !onearg(n, "panic") { n.Type = nil return n } n.Left = typecheck(n.Left, ctxExpr) - n.Left = defaultlit(n.Left, types.Types[TINTER]) + n.Left = defaultlit(n.Left, types.Types[types.TINTER]) if n.Left.Type == nil { n.Type = nil return n } - case ORECOVER: + case ir.ORECOVER: ok |= ctxExpr | ctxStmt if n.List.Len() != 0 { base.Errorf("too many arguments to recover") @@ -1896,16 +1897,16 @@ func typecheck1(n *Node, top int) (res *Node) { return n } - n.Type = types.Types[TINTER] + n.Type = types.Types[types.TINTER] - case OCLOSURE: + case ir.OCLOSURE: ok |= ctxExpr typecheckclosure(n, top) if n.Type == nil { return n } - case OITAB: + case ir.OITAB: ok |= ctxExpr n.Left = typecheck(n.Left, ctxExpr) t := n.Left.Type @@ -1916,14 +1917,14 @@ func typecheck1(n *Node, top int) (res *Node) { if !t.IsInterface() { base.Fatalf("OITAB of %v", t) } - n.Type = types.NewPtr(types.Types[TUINTPTR]) + n.Type = types.NewPtr(types.Types[types.TUINTPTR]) - case OIDATA: + case ir.OIDATA: // Whoever creates the OIDATA node must know a priori the concrete type at that moment, // usually by just having checked the OITAB. base.Fatalf("cannot typecheck interface data %v", n) - case OSPTR: + case ir.OSPTR: ok |= ctxExpr n.Left = typecheck(n.Left, ctxExpr) t := n.Left.Type @@ -1935,72 +1936,72 @@ func typecheck1(n *Node, top int) (res *Node) { base.Fatalf("OSPTR of %v", t) } if t.IsString() { - n.Type = types.NewPtr(types.Types[TUINT8]) + n.Type = types.NewPtr(types.Types[types.TUINT8]) } else { n.Type = types.NewPtr(t.Elem()) } - case OCLOSUREVAR: + case ir.OCLOSUREVAR: ok |= ctxExpr - case OCFUNC: + case ir.OCFUNC: ok |= ctxExpr n.Left = typecheck(n.Left, ctxExpr) - n.Type = types.Types[TUINTPTR] + n.Type = types.Types[types.TUINTPTR] - case OCONVNOP: + case ir.OCONVNOP: ok |= ctxExpr n.Left = typecheck(n.Left, ctxExpr) // statements - case OAS: + case ir.OAS: ok |= ctxStmt typecheckas(n) // Code that creates temps does not bother to set defn, so do it here. - if n.Left.Op == ONAME && n.Left.IsAutoTmp() { + if n.Left.Op == ir.ONAME && n.Left.IsAutoTmp() { n.Left.Name.Defn = n } - case OAS2: + case ir.OAS2: ok |= ctxStmt typecheckas2(n) - case OBREAK, - OCONTINUE, - ODCL, - OEMPTY, - OGOTO, - OFALL, - OVARKILL, - OVARLIVE: + case ir.OBREAK, + ir.OCONTINUE, + ir.ODCL, + ir.OEMPTY, + ir.OGOTO, + ir.OFALL, + ir.OVARKILL, + ir.OVARLIVE: ok |= ctxStmt - case OLABEL: + case ir.OLABEL: ok |= ctxStmt decldepth++ if n.Sym.IsBlank() { // Empty identifier is valid but useless. // Eliminate now to simplify life later. // See issues 7538, 11589, 11593. - n.Op = OEMPTY + n.Op = ir.OEMPTY n.Left = nil } - case ODEFER: + case ir.ODEFER: ok |= ctxStmt n.Left = typecheck(n.Left, ctxStmt|ctxExpr) if !n.Left.Diag() { checkdefergo(n) } - case OGO: + case ir.OGO: ok |= ctxStmt n.Left = typecheck(n.Left, ctxStmt|ctxExpr) checkdefergo(n) - case OFOR, OFORUNTIL: + case ir.OFOR, ir.OFORUNTIL: ok |= ctxStmt typecheckslice(n.Ninit.Slice(), ctxStmt) decldepth++ @@ -2013,13 +2014,13 @@ func typecheck1(n *Node, top int) (res *Node) { } } n.Right = typecheck(n.Right, ctxStmt) - if n.Op == OFORUNTIL { + if n.Op == ir.OFORUNTIL { typecheckslice(n.List.Slice(), ctxStmt) } typecheckslice(n.Nbody.Slice(), ctxStmt) decldepth-- - case OIF: + case ir.OIF: ok |= ctxStmt typecheckslice(n.Ninit.Slice(), ctxStmt) n.Left = typecheck(n.Left, ctxExpr) @@ -2033,7 +2034,7 @@ func typecheck1(n *Node, top int) (res *Node) { typecheckslice(n.Nbody.Slice(), ctxStmt) typecheckslice(n.Rlist.Slice(), ctxStmt) - case ORETURN: + case ir.ORETURN: ok |= ctxStmt typecheckargs(n) if Curfn == nil { @@ -2045,47 +2046,47 @@ func typecheck1(n *Node, top int) (res *Node) { if Curfn.Type.FuncType().Outnamed && n.List.Len() == 0 { break } - typecheckaste(ORETURN, nil, false, Curfn.Type.Results(), n.List, func() string { return "return argument" }) + typecheckaste(ir.ORETURN, nil, false, Curfn.Type.Results(), n.List, func() string { return "return argument" }) - case ORETJMP: + case ir.ORETJMP: ok |= ctxStmt - case OSELECT: + case ir.OSELECT: ok |= ctxStmt typecheckselect(n) - case OSWITCH: + case ir.OSWITCH: ok |= ctxStmt typecheckswitch(n) - case ORANGE: + case ir.ORANGE: ok |= ctxStmt typecheckrange(n) - case OTYPESW: + case ir.OTYPESW: base.Errorf("use of .(type) outside type switch") n.Type = nil return n - case ODCLFUNC: + case ir.ODCLFUNC: ok |= ctxStmt typecheckfunc(n) - case ODCLCONST: + case ir.ODCLCONST: ok |= ctxStmt n.Left = typecheck(n.Left, ctxExpr) - case ODCLTYPE: + case ir.ODCLTYPE: ok |= ctxStmt n.Left = typecheck(n.Left, ctxType) checkwidth(n.Left.Type) } t := n.Type - if t != nil && !t.IsFuncArgStruct() && n.Op != OTYPE { + if t != nil && !t.IsFuncArgStruct() && n.Op != ir.OTYPE { switch t.Etype { - case TFUNC, // might have TANY; wait until it's called - TANY, TFORW, TIDEAL, TNIL, TBLANK: + case types.TFUNC, // might have TANY; wait until it's called + types.TANY, types.TFORW, types.TIDEAL, types.TNIL, types.TBLANK: break default: @@ -2094,7 +2095,7 @@ func typecheck1(n *Node, top int) (res *Node) { } n = evalConst(n) - if n.Op == OTYPE && top&ctxType == 0 { + if n.Op == ir.OTYPE && top&ctxType == 0 { if !n.Type.Broke() { base.Errorf("type %v is not an expression", n.Type) } @@ -2102,7 +2103,7 @@ func typecheck1(n *Node, top int) (res *Node) { return n } - if top&(ctxExpr|ctxType) == ctxType && n.Op != OTYPE { + if top&(ctxExpr|ctxType) == ctxType && n.Op != ir.OTYPE { base.Errorf("%v is not a type", n) n.Type = nil return n @@ -2128,7 +2129,7 @@ func typecheck1(n *Node, top int) (res *Node) { return n } -func typecheckargs(n *Node) { +func typecheckargs(n *ir.Node) { if n.List.Len() != 1 || n.IsDDD() { typecheckslice(n.List.Slice(), ctxExpr) return @@ -2144,10 +2145,10 @@ func typecheckargs(n *Node) { // Save n as n.Orig for fmt.go. if n.Orig == n { - n.Orig = n.sepcopy() + n.Orig = ir.SepCopy(n) } - as := nod(OAS2, nil, nil) + as := ir.Nod(ir.OAS2, nil, nil) as.Rlist.AppendNodes(&n.List) // If we're outside of function context, then this call will @@ -2161,7 +2162,7 @@ func typecheckargs(n *Node) { } for _, f := range t.FieldSlice() { t := temp(f.Type) - as.Ninit.Append(nod(ODCL, t, nil)) + as.Ninit.Append(ir.Nod(ir.ODCL, t, nil)) as.List.Append(t) n.List.Append(t) } @@ -2173,7 +2174,7 @@ func typecheckargs(n *Node) { n.Ninit.Append(as) } -func checksliceindex(l *Node, r *Node, tp *types.Type) bool { +func checksliceindex(l *ir.Node, r *ir.Node, tp *types.Type) bool { t := r.Type if t == nil { return false @@ -2183,7 +2184,7 @@ func checksliceindex(l *Node, r *Node, tp *types.Type) bool { return false } - if r.Op == OLITERAL { + if r.Op == ir.OLITERAL { x := r.Val() if constant.Sign(x) < 0 { base.Errorf("invalid slice index %v (index must be non-negative)", r) @@ -2191,10 +2192,10 @@ func checksliceindex(l *Node, r *Node, tp *types.Type) bool { } else if tp != nil && tp.NumElem() >= 0 && constant.Compare(x, token.GTR, constant.MakeInt64(tp.NumElem())) { base.Errorf("invalid slice index %v (out of bounds for %d-element array)", r, tp.NumElem()) return false - } else if Isconst(l, constant.String) && constant.Compare(x, token.GTR, constant.MakeInt64(int64(len(l.StringVal())))) { + } else if ir.IsConst(l, constant.String) && constant.Compare(x, token.GTR, constant.MakeInt64(int64(len(l.StringVal())))) { base.Errorf("invalid slice index %v (out of bounds for %d-byte string)", r, len(l.StringVal())) return false - } else if doesoverflow(x, types.Types[TINT]) { + } else if doesoverflow(x, types.Types[types.TINT]) { base.Errorf("invalid slice index %v (index too large)", r) return false } @@ -2203,8 +2204,8 @@ func checksliceindex(l *Node, r *Node, tp *types.Type) bool { return true } -func checksliceconst(lo *Node, hi *Node) bool { - if lo != nil && hi != nil && lo.Op == OLITERAL && hi.Op == OLITERAL && constant.Compare(lo.Val(), token.GTR, hi.Val()) { +func checksliceconst(lo *ir.Node, hi *ir.Node) bool { + if lo != nil && hi != nil && lo.Op == ir.OLITERAL && hi.Op == ir.OLITERAL && constant.Compare(lo.Val(), token.GTR, hi.Val()) { base.Errorf("invalid slice index: %v > %v", lo, hi) return false } @@ -2212,39 +2213,39 @@ func checksliceconst(lo *Node, hi *Node) bool { return true } -func checkdefergo(n *Node) { +func checkdefergo(n *ir.Node) { what := "defer" - if n.Op == OGO { + if n.Op == ir.OGO { what = "go" } switch n.Left.Op { // ok - case OCALLINTER, - OCALLMETH, - OCALLFUNC, - OCLOSE, - OCOPY, - ODELETE, - OPANIC, - OPRINT, - OPRINTN, - ORECOVER: + case ir.OCALLINTER, + ir.OCALLMETH, + ir.OCALLFUNC, + ir.OCLOSE, + ir.OCOPY, + ir.ODELETE, + ir.OPANIC, + ir.OPRINT, + ir.OPRINTN, + ir.ORECOVER: return - case OAPPEND, - OCAP, - OCOMPLEX, - OIMAG, - OLEN, - OMAKE, - OMAKESLICE, - OMAKECHAN, - OMAKEMAP, - ONEW, - OREAL, - OLITERAL: // conversion or unsafe.Alignof, Offsetof, Sizeof - if n.Left.Orig != nil && n.Left.Orig.Op == OCONV { + case ir.OAPPEND, + ir.OCAP, + ir.OCOMPLEX, + ir.OIMAG, + ir.OLEN, + ir.OMAKE, + ir.OMAKESLICE, + ir.OMAKECHAN, + ir.OMAKEMAP, + ir.ONEW, + ir.OREAL, + ir.OLITERAL: // conversion or unsafe.Alignof, Offsetof, Sizeof + if n.Left.Orig != nil && n.Left.Orig.Op == ir.OCONV { break } base.ErrorfAt(n.Pos, "%s discards result of %v", what, n.Left) @@ -2267,7 +2268,7 @@ func checkdefergo(n *Node) { // The result of implicitstar MUST be assigned back to n, e.g. // n.Left = implicitstar(n.Left) -func implicitstar(n *Node) *Node { +func implicitstar(n *ir.Node) *ir.Node { // insert implicit * if needed for fixed array t := n.Type if t == nil || !t.IsPtr() { @@ -2280,13 +2281,13 @@ func implicitstar(n *Node) *Node { if !t.IsArray() { return n } - n = nod(ODEREF, n, nil) + n = ir.Nod(ir.ODEREF, n, nil) n.SetImplicit(true) n = typecheck(n, ctxExpr) return n } -func onearg(n *Node, f string, args ...interface{}) bool { +func onearg(n *ir.Node, f string, args ...interface{}) bool { if n.Left != nil { return true } @@ -2309,7 +2310,7 @@ func onearg(n *Node, f string, args ...interface{}) bool { return true } -func twoarg(n *Node) bool { +func twoarg(n *ir.Node) bool { if n.Left != nil { return true } @@ -2327,7 +2328,7 @@ func twoarg(n *Node) bool { return true } -func lookdot1(errnode *Node, s *types.Sym, t *types.Type, fs *types.Fields, dostrcmp int) *types.Field { +func lookdot1(errnode *ir.Node, s *types.Sym, t *types.Type, fs *types.Fields, dostrcmp int) *types.Field { var r *types.Field for _, f := range fs.Slice() { if dostrcmp != 0 && f.Sym.Name == s.Name { @@ -2358,7 +2359,7 @@ func lookdot1(errnode *Node, s *types.Sym, t *types.Type, fs *types.Fields, dost // typecheckMethodExpr checks selector expressions (ODOT) where the // base expression is a type expression (OTYPE). -func typecheckMethodExpr(n *Node) (res *Node) { +func typecheckMethodExpr(n *ir.Node) (res *ir.Node) { if enableTrace && base.Flag.LowerT { defer tracePrint("typecheckMethodExpr", n)(&res) } @@ -2411,20 +2412,20 @@ func typecheckMethodExpr(n *Node) (res *Node) { return n } - n.Op = OMETHEXPR + n.Op = ir.OMETHEXPR if n.Name == nil { - n.Name = new(Name) + n.Name = new(ir.Name) } - n.Right = newname(n.Sym) + n.Right = NewName(n.Sym) n.Sym = methodSym(t, n.Sym) n.Type = methodfunc(m.Type, n.Left.Type) n.Xoffset = 0 - n.SetClass(PFUNC) + n.SetClass(ir.PFUNC) n.SetOpt(m) // methodSym already marked n.Sym as a function. // Issue 25065. Make sure that we emit the symbol for a local method. - if base.Ctxt.Flag_dynlink && !inimport && (t.Sym == nil || t.Sym.Pkg == localpkg) { + if base.Ctxt.Flag_dynlink && !inimport && (t.Sym == nil || t.Sym.Pkg == ir.LocalPkg) { makefuncsym(n.Sym) } @@ -2446,7 +2447,7 @@ func derefall(t *types.Type) *types.Type { return t } -func lookdot(n *Node, t *types.Type, dostrcmp int) *types.Field { +func lookdot(n *ir.Node, t *types.Type, dostrcmp int) *types.Field { s := n.Sym dowidth(t) @@ -2471,19 +2472,19 @@ func lookdot(n *Node, t *types.Type, dostrcmp int) *types.Field { if f2 != nil { base.Errorf("%v is both field and method", n.Sym) } - if f1.Offset == BADWIDTH { + if f1.Offset == types.BADWIDTH { base.Fatalf("lookdot badwidth %v %p", f1, f1) } n.Xoffset = f1.Offset n.Type = f1.Type if t.IsInterface() { if n.Left.Type.IsPtr() { - n.Left = nod(ODEREF, n.Left, nil) // implicitstar + n.Left = ir.Nod(ir.ODEREF, n.Left, nil) // implicitstar n.Left.SetImplicit(true) n.Left = typecheck(n.Left, ctxExpr) } - n.Op = ODOTINTER + n.Op = ir.ODOTINTER } else { n.SetOpt(f1) } @@ -2502,11 +2503,11 @@ func lookdot(n *Node, t *types.Type, dostrcmp int) *types.Field { if !types.Identical(rcvr, tt) { if rcvr.IsPtr() && types.Identical(rcvr.Elem(), tt) { checklvalue(n.Left, "call pointer method on") - n.Left = nod(OADDR, n.Left, nil) + n.Left = ir.Nod(ir.OADDR, n.Left, nil) n.Left.SetImplicit(true) n.Left = typecheck(n.Left, ctxType|ctxExpr) } else if tt.IsPtr() && (!rcvr.IsPtr() || rcvr.IsPtr() && rcvr.Elem().NotInHeap()) && types.Identical(tt.Elem(), rcvr) { - n.Left = nod(ODEREF, n.Left, nil) + n.Left = ir.Nod(ir.ODEREF, n.Left, nil) n.Left.SetImplicit(true) n.Left = typecheck(n.Left, ctxType|ctxExpr) } else if tt.IsPtr() && tt.Elem().IsPtr() && types.Identical(derefall(tt), derefall(rcvr)) { @@ -2516,7 +2517,7 @@ func lookdot(n *Node, t *types.Type, dostrcmp int) *types.Field { if rcvr.IsPtr() && !tt.Elem().IsPtr() { break } - n.Left = nod(ODEREF, n.Left, nil) + n.Left = ir.Nod(ir.ODEREF, n.Left, nil) n.Left.SetImplicit(true) n.Left = typecheck(n.Left, ctxType|ctxExpr) tt = tt.Elem() @@ -2528,11 +2529,11 @@ func lookdot(n *Node, t *types.Type, dostrcmp int) *types.Field { pll := n ll := n.Left - for ll.Left != nil && (ll.Op == ODOT || ll.Op == ODOTPTR || ll.Op == ODEREF) { + for ll.Left != nil && (ll.Op == ir.ODOT || ll.Op == ir.ODOTPTR || ll.Op == ir.ODEREF) { pll = ll ll = ll.Left } - if pll.Implicit() && ll.Type.IsPtr() && ll.Type.Sym != nil && asNode(ll.Type.Sym.Def) != nil && asNode(ll.Type.Sym.Def).Op == OTYPE { + if pll.Implicit() && ll.Type.IsPtr() && ll.Type.Sym != nil && ir.AsNode(ll.Type.Sym.Def) != nil && ir.AsNode(ll.Type.Sym.Def).Op == ir.OTYPE { // It is invalid to automatically dereference a named pointer type when selecting a method. // Make n.Left == ll to clarify error message. n.Left = ll @@ -2542,7 +2543,7 @@ func lookdot(n *Node, t *types.Type, dostrcmp int) *types.Field { n.Sym = methodSym(n.Left.Type, f2.Sym) n.Xoffset = f2.Offset n.Type = f2.Type - n.Op = ODOTMETH + n.Op = ir.ODOTMETH n.SetOpt(f2) return f2 @@ -2551,9 +2552,9 @@ func lookdot(n *Node, t *types.Type, dostrcmp int) *types.Field { return nil } -func nokeys(l Nodes) bool { +func nokeys(l ir.Nodes) bool { for _, n := range l.Slice() { - if n.Op == OKEY || n.Op == OSTRUCTKEY { + if n.Op == ir.OKEY || n.Op == ir.OSTRUCTKEY { return false } } @@ -2571,7 +2572,7 @@ func hasddd(t *types.Type) bool { } // typecheck assignment: type list = expression list -func typecheckaste(op Op, call *Node, isddd bool, tstruct *types.Type, nl Nodes, desc func() string) { +func typecheckaste(op ir.Op, call *ir.Node, isddd bool, tstruct *types.Type, nl ir.Nodes, desc func() string) { var t *types.Type var i int @@ -2582,7 +2583,7 @@ func typecheckaste(op Op, call *Node, isddd bool, tstruct *types.Type, nl Nodes, return } - var n *Node + var n *ir.Node if nl.Len() == 1 { n = nl.First() } @@ -2671,7 +2672,7 @@ notenough: // call is the expression being called, not the overall call. // Method expressions have the form T.M, and the compiler has // rewritten those to ONAME nodes but left T in Left. - if call.Op == OMETHEXPR { + if call.Op == ir.OMETHEXPR { base.Errorf("not enough arguments in call to method expression %v%s", call, details) } else { base.Errorf("not enough arguments in call to %v%s", call, details) @@ -2694,7 +2695,7 @@ toomany: } } -func errorDetails(nl Nodes, tstruct *types.Type, isddd bool) string { +func errorDetails(nl ir.Nodes, tstruct *types.Type, isddd bool) string { // If we don't know any type at a call site, let's suppress any return // message signatures. See Issue https://golang.org/issues/19012. if tstruct == nil { @@ -2706,7 +2707,7 @@ func errorDetails(nl Nodes, tstruct *types.Type, isddd bool) string { return "" } } - return fmt.Sprintf("\n\thave %s\n\twant %v", nl.sigerr(isddd), tstruct) + return fmt.Sprintf("\n\thave %s\n\twant %v", fmtSignature(nl, isddd), tstruct) } // sigrepr is a type's representation to the outside world, @@ -2720,7 +2721,7 @@ func sigrepr(t *types.Type, isddd bool) string { return "bool" } - if t.Etype == TIDEAL { + if t.Etype == types.TIDEAL { // "untyped number" is not commonly used // outside of the compiler, so let's use "number". // TODO(mdempsky): Revisit this. @@ -2738,7 +2739,7 @@ func sigrepr(t *types.Type, isddd bool) string { } // sigerr returns the signature of the types at the call or return. -func (nl Nodes) sigerr(isddd bool) string { +func fmtSignature(nl ir.Nodes, isddd bool) string { if nl.Len() < 1 { return "()" } @@ -2764,7 +2765,7 @@ func fielddup(name string, hash map[string]bool) { // iscomptype reports whether type t is a composite literal type. func iscomptype(t *types.Type) bool { switch t.Etype { - case TARRAY, TSLICE, TSTRUCT, TMAP: + case types.TARRAY, types.TSLICE, types.TSTRUCT, types.TMAP: return true default: return false @@ -2773,8 +2774,8 @@ func iscomptype(t *types.Type) bool { // pushtype adds elided type information for composite literals if // appropriate, and returns the resulting expression. -func pushtype(n *Node, t *types.Type) *Node { - if n == nil || n.Op != OCOMPLIT || n.Right != nil { +func pushtype(n *ir.Node, t *types.Type) *ir.Node { + if n == nil || n.Op != ir.OCOMPLIT || n.Right != nil { return n } @@ -2787,7 +2788,7 @@ func pushtype(n *Node, t *types.Type) *Node { // For *T, return &T{...}. n.Right = typenod(t.Elem()) - n = nodl(n.Pos, OADDR, n, nil) + n = ir.NodAt(n.Pos, ir.OADDR, n, nil) n.SetImplicit(true) } @@ -2796,7 +2797,7 @@ func pushtype(n *Node, t *types.Type) *Node { // The result of typecheckcomplit MUST be assigned back to n, e.g. // n.Left = typecheckcomplit(n.Left) -func typecheckcomplit(n *Node) (res *Node) { +func typecheckcomplit(n *ir.Node) (res *ir.Node) { if enableTrace && base.Flag.LowerT { defer tracePrint("typecheckcomplit", n)(&res) } @@ -2813,12 +2814,12 @@ func typecheckcomplit(n *Node) (res *Node) { } // Save original node (including n.Right) - n.Orig = n.copy() + n.Orig = ir.Copy(n) setlineno(n.Right) // Need to handle [...]T arrays specially. - if n.Right.Op == OTARRAY && n.Right.Left != nil && n.Right.Left.Op == ODDD { + if n.Right.Op == ir.OTARRAY && n.Right.Left != nil && n.Right.Left.Op == ir.ODDD { n.Right.Right = typecheck(n.Right.Right, ctxType) if n.Right.Right.Type == nil { n.Type = nil @@ -2828,7 +2829,7 @@ func typecheckcomplit(n *Node) (res *Node) { length := typecheckarraylit(elemType, -1, n.List.Slice(), "array literal") - n.Op = OARRAYLIT + n.Op = ir.OARRAYLIT n.Type = types.NewArray(elemType, length) n.Right = nil return n @@ -2847,21 +2848,21 @@ func typecheckcomplit(n *Node) (res *Node) { base.Errorf("invalid composite literal type %v", t) n.Type = nil - case TARRAY: + case types.TARRAY: typecheckarraylit(t.Elem(), t.NumElem(), n.List.Slice(), "array literal") - n.Op = OARRAYLIT + n.Op = ir.OARRAYLIT n.Right = nil - case TSLICE: + case types.TSLICE: length := typecheckarraylit(t.Elem(), -1, n.List.Slice(), "slice literal") - n.Op = OSLICELIT + n.Op = ir.OSLICELIT n.Right = nodintconst(length) - case TMAP: + case types.TMAP: var cs constSet for i3, l := range n.List.Slice() { setlineno(l) - if l.Op != OKEY { + if l.Op != ir.OKEY { n.List.SetIndex(i3, typecheck(l, ctxExpr)) base.Errorf("missing key in map literal") continue @@ -2879,10 +2880,10 @@ func typecheckcomplit(n *Node) (res *Node) { l.Right = assignconv(r, t.Elem(), "map value") } - n.Op = OMAPLIT + n.Op = ir.OMAPLIT n.Right = nil - case TSTRUCT: + case types.TSTRUCT: // Need valid field offsets for Xoffset below. dowidth(t) @@ -2904,12 +2905,12 @@ func typecheckcomplit(n *Node) (res *Node) { f := t.Field(i) s := f.Sym - if s != nil && !types.IsExported(s.Name) && s.Pkg != localpkg { + if s != nil && !types.IsExported(s.Name) && s.Pkg != ir.LocalPkg { base.Errorf("implicit assignment of unexported field '%s' in %v literal", s.Name, t) } // No pushtype allowed here. Must name fields for that. n1 = assignconv(n1, f.Type, "field value") - n1 = nodSym(OSTRUCTKEY, n1, f.Sym) + n1 = nodSym(ir.OSTRUCTKEY, n1, f.Sym) n1.Xoffset = f.Offset ls[i] = n1 } @@ -2924,10 +2925,10 @@ func typecheckcomplit(n *Node) (res *Node) { for i, l := range ls { setlineno(l) - if l.Op == OKEY { + if l.Op == ir.OKEY { key := l.Left - l.Op = OSTRUCTKEY + l.Op = ir.OSTRUCTKEY l.Left = l.Right l.Right = nil @@ -2935,7 +2936,7 @@ func typecheckcomplit(n *Node) (res *Node) { // the field to the right of the dot, // so s will be non-nil, but an OXDOT // is never a valid struct literal key. - if key.Sym == nil || key.Op == OXDOT || key.Sym.IsBlank() { + if key.Sym == nil || key.Op == ir.OXDOT || key.Sym.IsBlank() { base.Errorf("invalid field name %v in struct initializer", key) l.Left = typecheck(l.Left, ctxExpr) continue @@ -2945,7 +2946,7 @@ func typecheckcomplit(n *Node) (res *Node) { // package, because of import dot. Redirect to correct sym // before we do the lookup. s := key.Sym - if s.Pkg != localpkg && types.IsExported(s.Name) { + if s.Pkg != ir.LocalPkg && types.IsExported(s.Name) { s1 := lookup(s.Name) if s1.Origpkg == s.Pkg { s = s1 @@ -2954,7 +2955,7 @@ func typecheckcomplit(n *Node) (res *Node) { l.Sym = s } - if l.Op != OSTRUCTKEY { + if l.Op != ir.OSTRUCTKEY { if !errored { base.Errorf("mixture of field:value and value initializers") errored = true @@ -2999,7 +3000,7 @@ func typecheckcomplit(n *Node) (res *Node) { } } - n.Op = OSTRUCTLIT + n.Op = ir.OSTRUCTLIT n.Right = nil } @@ -3007,12 +3008,12 @@ func typecheckcomplit(n *Node) (res *Node) { } // typecheckarraylit type-checks a sequence of slice/array literal elements. -func typecheckarraylit(elemType *types.Type, bound int64, elts []*Node, ctx string) int64 { +func typecheckarraylit(elemType *types.Type, bound int64, elts []*ir.Node, ctx string) int64 { // If there are key/value pairs, create a map to keep seen // keys so we can check for duplicate indices. var indices map[int64]bool for _, elt := range elts { - if elt.Op == OKEY { + if elt.Op == ir.OKEY { indices = make(map[int64]bool) break } @@ -3022,8 +3023,8 @@ func typecheckarraylit(elemType *types.Type, bound int64, elts []*Node, ctx stri for i, elt := range elts { setlineno(elt) r := elts[i] - var kv *Node - if elt.Op == OKEY { + var kv *ir.Node + if elt.Op == ir.OKEY { elt.Left = typecheck(elt.Left, ctxExpr) key = indexconst(elt.Left) if key < 0 { @@ -3076,7 +3077,7 @@ func typecheckarraylit(elemType *types.Type, bound int64, elts []*Node, ctx stri // visible reports whether sym is exported or locally defined. func visible(sym *types.Sym) bool { - return sym != nil && (types.IsExported(sym.Name) || sym.Pkg == localpkg) + return sym != nil && (types.IsExported(sym.Name) || sym.Pkg == ir.LocalPkg) } // nonexported reports whether sym is an unexported field. @@ -3085,9 +3086,9 @@ func nonexported(sym *types.Sym) bool { } // lvalue etc -func islvalue(n *Node) bool { +func islvalue(n *ir.Node) bool { switch n.Op { - case OINDEX: + case ir.OINDEX: if n.Left.Type != nil && n.Left.Type.IsArray() { return islvalue(n.Left) } @@ -3095,14 +3096,14 @@ func islvalue(n *Node) bool { return false } fallthrough - case ODEREF, ODOTPTR, OCLOSUREVAR: + case ir.ODEREF, ir.ODOTPTR, ir.OCLOSUREVAR: return true - case ODOT: + case ir.ODOT: return islvalue(n.Left) - case ONAME: - if n.Class() == PFUNC { + case ir.ONAME: + if n.Class() == ir.PFUNC { return false } return true @@ -3111,17 +3112,17 @@ func islvalue(n *Node) bool { return false } -func checklvalue(n *Node, verb string) { +func checklvalue(n *ir.Node, verb string) { if !islvalue(n) { base.Errorf("cannot %s %v", verb, n) } } -func checkassign(stmt *Node, n *Node) { +func checkassign(stmt *ir.Node, n *ir.Node) { // Variables declared in ORANGE are assigned on every iteration. - if n.Name == nil || n.Name.Defn != stmt || stmt.Op == ORANGE { + if n.Name == nil || n.Name.Defn != stmt || stmt.Op == ir.ORANGE { r := outervalue(n) - if r.Op == ONAME { + if r.Op == ir.ONAME { r.Name.SetAssigned(true) if r.Name.IsClosureVar() { r.Name.Defn.Name.SetAssigned(true) @@ -3132,7 +3133,7 @@ func checkassign(stmt *Node, n *Node) { if islvalue(n) { return } - if n.Op == OINDEXMAP { + if n.Op == ir.OINDEXMAP { n.SetIndexMapLValue(true) return } @@ -3143,11 +3144,11 @@ func checkassign(stmt *Node, n *Node) { } switch { - case n.Op == ODOT && n.Left.Op == OINDEXMAP: + case n.Op == ir.ODOT && n.Left.Op == ir.OINDEXMAP: base.Errorf("cannot assign to struct field %v in map", n) - case (n.Op == OINDEX && n.Left.Type.IsString()) || n.Op == OSLICESTR: + case (n.Op == ir.OINDEX && n.Left.Type.IsString()) || n.Op == ir.OSLICESTR: base.Errorf("cannot assign to %v (strings are immutable)", n) - case n.Op == OLITERAL && n.Sym != nil && n.isGoConst(): + case n.Op == ir.OLITERAL && n.Sym != nil && isGoConst(n): base.Errorf("cannot assign to %v (declared const)", n) default: base.Errorf("cannot assign to %v", n) @@ -3155,7 +3156,7 @@ func checkassign(stmt *Node, n *Node) { n.Type = nil } -func checkassignlist(stmt *Node, l Nodes) { +func checkassignlist(stmt *ir.Node, l ir.Nodes) { for _, n := range l.Slice() { checkassign(stmt, n) } @@ -3176,35 +3177,35 @@ func checkassignlist(stmt *Node, l Nodes) { // currently OK, since the only place samesafeexpr gets used on an // lvalue expression is for OSLICE and OAPPEND optimizations, and it // is correct in those settings. -func samesafeexpr(l *Node, r *Node) bool { +func samesafeexpr(l *ir.Node, r *ir.Node) bool { if l.Op != r.Op || !types.Identical(l.Type, r.Type) { return false } switch l.Op { - case ONAME, OCLOSUREVAR: + case ir.ONAME, ir.OCLOSUREVAR: return l == r - case ODOT, ODOTPTR: + case ir.ODOT, ir.ODOTPTR: return l.Sym != nil && r.Sym != nil && l.Sym == r.Sym && samesafeexpr(l.Left, r.Left) - case ODEREF, OCONVNOP, - ONOT, OBITNOT, OPLUS, ONEG: + case ir.ODEREF, ir.OCONVNOP, + ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG: return samesafeexpr(l.Left, r.Left) - case OCONV: + case ir.OCONV: // Some conversions can't be reused, such as []byte(str). // Allow only numeric-ish types. This is a bit conservative. return issimple[l.Type.Etype] && samesafeexpr(l.Left, r.Left) - case OINDEX, OINDEXMAP, - OADD, OSUB, OOR, OXOR, OMUL, OLSH, ORSH, OAND, OANDNOT, ODIV, OMOD: + case ir.OINDEX, ir.OINDEXMAP, + ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD: return samesafeexpr(l.Left, r.Left) && samesafeexpr(l.Right, r.Right) - case OLITERAL: + case ir.OLITERAL: return constant.Compare(l.Val(), token.EQL, r.Val()) - case ONIL: + case ir.ONIL: return true } @@ -3214,7 +3215,7 @@ func samesafeexpr(l *Node, r *Node) bool { // type check assignment. // if this assignment is the definition of a var on the left side, // fill in the var's type. -func typecheckas(n *Node) { +func typecheckas(n *ir.Node) { if enableTrace && base.Flag.LowerT { defer tracePrint("typecheckas", n)(nil) } @@ -3260,19 +3261,19 @@ func typecheckas(n *Node) { if n.Left.Typecheck() == 0 { n.Left = typecheck(n.Left, ctxExpr|ctxAssign) } - if !n.Left.isBlank() { + if !ir.IsBlank(n.Left) { checkwidth(n.Left.Type) // ensure width is calculated for backend } } -func checkassignto(src *types.Type, dst *Node) { - if op, why := assignop(src, dst.Type); op == OXXX { +func checkassignto(src *types.Type, dst *ir.Node) { + if op, why := assignop(src, dst.Type); op == ir.OXXX { base.Errorf("cannot assign %v to %L in multiple assignment%s", src, dst, why) return } } -func typecheckas2(n *Node) { +func typecheckas2(n *ir.Node) { if enableTrace && base.Flag.LowerT { defer tracePrint("typecheckas2", n)(nil) } @@ -3297,8 +3298,8 @@ func typecheckas2(n *Node) { } checkassignlist(n, n.List) - var l *Node - var r *Node + var l *ir.Node + var r *ir.Node if cl == cr { // easy ls := n.List.Slice() @@ -3326,7 +3327,7 @@ func typecheckas2(n *Node) { goto out } switch r.Op { - case OCALLMETH, OCALLINTER, OCALLFUNC: + case ir.OCALLMETH, ir.OCALLINTER, ir.OCALLFUNC: if !r.Type.IsFuncArgStruct() { break } @@ -3334,7 +3335,7 @@ func typecheckas2(n *Node) { if cr != cl { goto mismatch } - n.Op = OAS2FUNC + n.Op = ir.OAS2FUNC n.Right = r n.Rlist.Set(nil) for i, l := range n.List.Slice() { @@ -3356,15 +3357,15 @@ func typecheckas2(n *Node) { goto out } switch r.Op { - case OINDEXMAP, ORECV, ODOTTYPE: + case ir.OINDEXMAP, ir.ORECV, ir.ODOTTYPE: switch r.Op { - case OINDEXMAP: - n.Op = OAS2MAPR - case ORECV: - n.Op = OAS2RECV - case ODOTTYPE: - n.Op = OAS2DOTTYPE - r.Op = ODOTTYPE2 + case ir.OINDEXMAP: + n.Op = ir.OAS2MAPR + case ir.ORECV: + n.Op = ir.OAS2RECV + case ir.ODOTTYPE: + n.Op = ir.OAS2DOTTYPE + r.Op = ir.ODOTTYPE2 } n.Right = r n.Rlist.Set(nil) @@ -3376,10 +3377,10 @@ func typecheckas2(n *Node) { } l := n.List.Second() if l.Type != nil && !l.Type.IsBoolean() { - checkassignto(types.Types[TBOOL], l) + checkassignto(types.Types[types.TBOOL], l) } if l.Name != nil && l.Name.Defn == n && l.Name.Param.Ntype == nil { - l.Type = types.Types[TBOOL] + l.Type = types.Types[types.TBOOL] } goto out } @@ -3389,7 +3390,7 @@ mismatch: switch r.Op { default: base.Errorf("assignment mismatch: %d variables but %d values", cl, cr) - case OCALLFUNC, OCALLMETH, OCALLINTER: + case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER: base.Errorf("assignment mismatch: %d variables but %v returns %d values", cl, r.Left, cr) } @@ -3405,13 +3406,13 @@ out: } // type check function definition -func typecheckfunc(n *Node) { +func typecheckfunc(n *ir.Node) { if enableTrace && base.Flag.LowerT { defer tracePrint("typecheckfunc", n)(nil) } for _, ln := range n.Func.Dcl { - if ln.Op == ONAME && (ln.Class() == PPARAM || ln.Class() == PPARAMOUT) { + if ln.Op == ir.ONAME && (ln.Class() == ir.PPARAM || ln.Class() == ir.PPARAMOUT) { ln.Name.Decldepth = 1 } } @@ -3424,13 +3425,13 @@ func typecheckfunc(n *Node) { n.Type = t rcvr := t.Recv() if rcvr != nil && n.Func.Shortname != nil { - m := addmethod(n, n.Func.Shortname, t, true, n.Func.Pragma&Nointerface != 0) + m := addmethod(n, n.Func.Shortname, t, true, n.Func.Pragma&ir.Nointerface != 0) if m == nil { return } n.Func.Nname.Sym = methodSym(rcvr.Type, n.Func.Shortname) - declare(n.Func.Nname, PFUNC) + declare(n.Func.Nname, ir.PFUNC) } if base.Ctxt.Flag_dynlink && !inimport && n.Func.Nname != nil { @@ -3440,25 +3441,25 @@ func typecheckfunc(n *Node) { // The result of stringtoruneslit MUST be assigned back to n, e.g. // n.Left = stringtoruneslit(n.Left) -func stringtoruneslit(n *Node) *Node { - if n.Left.Op != OLITERAL || n.Left.Val().Kind() != constant.String { +func stringtoruneslit(n *ir.Node) *ir.Node { + if n.Left.Op != ir.OLITERAL || n.Left.Val().Kind() != constant.String { base.Fatalf("stringtoarraylit %v", n) } - var l []*Node + var l []*ir.Node i := 0 for _, r := range n.Left.StringVal() { - l = append(l, nod(OKEY, nodintconst(int64(i)), nodintconst(int64(r)))) + l = append(l, ir.Nod(ir.OKEY, nodintconst(int64(i)), nodintconst(int64(r)))) i++ } - nn := nod(OCOMPLIT, nil, typenod(n.Type)) + nn := ir.Nod(ir.OCOMPLIT, nil, typenod(n.Type)) nn.List.Set(l) nn = typecheck(nn, ctxExpr) return nn } -var mapqueue []*Node +var mapqueue []*ir.Node func checkMapKeys() { for _, n := range mapqueue { @@ -3471,13 +3472,13 @@ func checkMapKeys() { } func setUnderlying(t, underlying *types.Type) { - if underlying.Etype == TFORW { + if underlying.Etype == types.TFORW { // This type isn't computed yet; when it is, update n. underlying.ForwardType().Copyto = append(underlying.ForwardType().Copyto, t) return } - n := asNode(t.Nod) + n := ir.AsNode(t.Nod) ft := t.ForwardType() cache := t.Cache @@ -3485,7 +3486,7 @@ func setUnderlying(t, underlying *types.Type) { *t = *underlying // Restore unnecessarily clobbered attributes. - t.Nod = asTypesNode(n) + t.Nod = ir.AsTypesNode(n) t.Sym = n.Sym if n.Name != nil { t.Vargen = n.Name.Vargen @@ -3502,7 +3503,7 @@ func setUnderlying(t, underlying *types.Type) { } // Propagate go:notinheap pragma from the Name to the Type. - if n.Name != nil && n.Name.Param != nil && n.Name.Param.Pragma()&NotInHeap != 0 { + if n.Name != nil && n.Name.Param != nil && n.Name.Param.Pragma()&ir.NotInHeap != 0 { t.SetNotInHeap(true) } @@ -3519,7 +3520,7 @@ func setUnderlying(t, underlying *types.Type) { } } -func typecheckdeftype(n *Node) { +func typecheckdeftype(n *ir.Node) { if enableTrace && base.Flag.LowerT { defer tracePrint("typecheckdeftype", n)(nil) } @@ -3539,14 +3540,14 @@ func typecheckdeftype(n *Node) { } } -func typecheckdef(n *Node) { +func typecheckdef(n *ir.Node) { if enableTrace && base.Flag.LowerT { defer tracePrint("typecheckdef", n)(nil) } lno := setlineno(n) - if n.Op == ONONAME { + if n.Op == ir.ONONAME { if !n.Diag() { n.SetDiag(true) @@ -3585,7 +3586,7 @@ func typecheckdef(n *Node) { default: base.Fatalf("typecheckdef %v", n.Op) - case OLITERAL: + case ir.OLITERAL: if n.Name.Param.Ntype != nil { n.Name.Param.Ntype = typecheck(n.Name.Param.Ntype, ctxType) n.Type = n.Name.Param.Ntype.Type @@ -3599,7 +3600,7 @@ func typecheckdef(n *Node) { e := n.Name.Defn n.Name.Defn = nil if e == nil { - Dump("typecheckdef nil defn", n) + ir.Dump("typecheckdef nil defn", n) base.ErrorfAt(n.Pos, "xxx") } @@ -3607,9 +3608,9 @@ func typecheckdef(n *Node) { if e.Type == nil { goto ret } - if !e.isGoConst() { + if !isGoConst(e) { if !e.Diag() { - if e.Op == ONIL { + if e.Op == ir.ONIL { base.ErrorfAt(n.Pos, "const initializer cannot be nil") } else { base.ErrorfAt(n.Pos, "const initializer %v is not a constant", e) @@ -3621,7 +3622,7 @@ func typecheckdef(n *Node) { t := n.Type if t != nil { - if !okforconst[t.Etype] { + if !ir.OKForConst[t.Etype] { base.ErrorfAt(n.Pos, "invalid constant type %v", t) goto ret } @@ -3639,7 +3640,7 @@ func typecheckdef(n *Node) { n.SetVal(e.Val()) } - case ONAME: + case ir.ONAME: if n.Name.Param.Ntype != nil { n.Name.Param.Ntype = typecheck(n.Name.Param.Ntype, ctxType) n.Type = n.Name.Param.Ntype.Type @@ -3667,7 +3668,7 @@ func typecheckdef(n *Node) { base.Fatalf("var without type, init: %v", n.Sym) } - if n.Name.Defn.Op == ONAME { + if n.Name.Defn.Op == ir.ONAME { n.Name.Defn = typecheck(n.Name.Defn, ctxExpr) n.Type = n.Name.Defn.Type break @@ -3675,7 +3676,7 @@ func typecheckdef(n *Node) { n.Name.Defn = typecheck(n.Name.Defn, ctxStmt) // fills in n.Type - case OTYPE: + case ir.OTYPE: if p := n.Name.Param; p.Alias() { // Type alias declaration: Simply use the rhs type - no need // to create a new type. @@ -3690,7 +3691,7 @@ func typecheckdef(n *Node) { // For package-level type aliases, set n.Sym.Def so we can identify // it as a type alias during export. See also #31959. if n.Name.Curfn == nil { - n.Sym.Def = asTypesNode(p.Ntype) + n.Sym.Def = ir.AsTypesNode(p.Ntype) } } break @@ -3699,11 +3700,11 @@ func typecheckdef(n *Node) { // regular type declaration defercheckwidth() n.SetWalkdef(1) - setTypeNode(n, types.New(TFORW)) + setTypeNode(n, types.New(types.TFORW)) n.Type.Sym = n.Sym errorsBefore := base.Errors() typecheckdeftype(n) - if n.Type.Etype == TFORW && base.Errors() > errorsBefore { + if n.Type.Etype == types.TFORW && base.Errors() > errorsBefore { // Something went wrong during type-checking, // but it was reported. Silence future errors. n.Type.SetBroke(true) @@ -3712,7 +3713,7 @@ func typecheckdef(n *Node) { } ret: - if n.Op != OLITERAL && n.Type != nil && n.Type.IsUntyped() { + if n.Op != ir.OLITERAL && n.Type != nil && n.Type.IsUntyped() { base.Fatalf("got %v for %v", n.Type, n) } last := len(typecheckdefstack) - 1 @@ -3726,22 +3727,22 @@ ret: n.SetWalkdef(1) } -func checkmake(t *types.Type, arg string, np **Node) bool { +func checkmake(t *types.Type, arg string, np **ir.Node) bool { n := *np - if !n.Type.IsInteger() && n.Type.Etype != TIDEAL { + if !n.Type.IsInteger() && n.Type.Etype != types.TIDEAL { base.Errorf("non-integer %s argument in make(%v) - %v", arg, t, n.Type) return false } // Do range checks for constants before defaultlit // to avoid redundant "constant NNN overflows int" errors. - if n.Op == OLITERAL { + if n.Op == ir.OLITERAL { v := toint(n.Val()) if constant.Sign(v) < 0 { base.Errorf("negative %s argument in make(%v)", arg, t) return false } - if doesoverflow(v, types.Types[TINT]) { + if doesoverflow(v, types.Types[types.TINT]) { base.Errorf("%s argument too large in make(%v)", arg, t) return false } @@ -3752,30 +3753,30 @@ func checkmake(t *types.Type, arg string, np **Node) bool { // are the same as for index expressions. Factor the code better; // for instance, indexlit might be called here and incorporate some // of the bounds checks done for make. - n = defaultlit(n, types.Types[TINT]) + n = defaultlit(n, types.Types[types.TINT]) *np = n return true } -func markbreak(n *Node, implicit *Node) { +func markbreak(n *ir.Node, implicit *ir.Node) { if n == nil { return } switch n.Op { - case OBREAK: + case ir.OBREAK: if n.Sym == nil { if implicit != nil { implicit.SetHasBreak(true) } } else { - lab := asNode(n.Sym.Label) + lab := ir.AsNode(n.Sym.Label) if lab != nil { lab.SetHasBreak(true) } } - case OFOR, OFORUNTIL, OSWITCH, OTYPESW, OSELECT, ORANGE: + case ir.OFOR, ir.OFORUNTIL, ir.OSWITCH, ir.OTYPESW, ir.OSELECT, ir.ORANGE: implicit = n fallthrough default: @@ -3788,17 +3789,17 @@ func markbreak(n *Node, implicit *Node) { } } -func markbreaklist(l Nodes, implicit *Node) { +func markbreaklist(l ir.Nodes, implicit *ir.Node) { s := l.Slice() for i := 0; i < len(s); i++ { n := s[i] if n == nil { continue } - if n.Op == OLABEL && i+1 < len(s) && n.Name.Defn == s[i+1] { + if n.Op == ir.OLABEL && i+1 < len(s) && n.Name.Defn == s[i+1] { switch n.Name.Defn.Op { - case OFOR, OFORUNTIL, OSWITCH, OTYPESW, OSELECT, ORANGE: - n.Sym.Label = asTypesNode(n.Name.Defn) + case ir.OFOR, ir.OFORUNTIL, ir.OSWITCH, ir.OTYPESW, ir.OSELECT, ir.ORANGE: + n.Sym.Label = ir.AsTypesNode(n.Name.Defn) markbreak(n.Name.Defn, n.Name.Defn) n.Sym.Label = nil i++ @@ -3811,31 +3812,31 @@ func markbreaklist(l Nodes, implicit *Node) { } // isterminating reports whether the Nodes list ends with a terminating statement. -func (l Nodes) isterminating() bool { +func isTermNodes(l ir.Nodes) bool { s := l.Slice() c := len(s) if c == 0 { return false } - return s[c-1].isterminating() + return isTermNode(s[c-1]) } // Isterminating reports whether the node n, the last one in a // statement list, is a terminating statement. -func (n *Node) isterminating() bool { +func isTermNode(n *ir.Node) bool { switch n.Op { // NOTE: OLABEL is treated as a separate statement, // not a separate prefix, so skipping to the last statement // in the block handles the labeled statement case by // skipping over the label. No case OLABEL here. - case OBLOCK: - return n.List.isterminating() + case ir.OBLOCK: + return isTermNodes(n.List) - case OGOTO, ORETURN, ORETJMP, OPANIC, OFALL: + case ir.OGOTO, ir.ORETURN, ir.ORETJMP, ir.OPANIC, ir.OFALL: return true - case OFOR, OFORUNTIL: + case ir.OFOR, ir.OFORUNTIL: if n.Left != nil { return false } @@ -3844,16 +3845,16 @@ func (n *Node) isterminating() bool { } return true - case OIF: - return n.Nbody.isterminating() && n.Rlist.isterminating() + case ir.OIF: + return isTermNodes(n.Nbody) && isTermNodes(n.Rlist) - case OSWITCH, OTYPESW, OSELECT: + case ir.OSWITCH, ir.OTYPESW, ir.OSELECT: if n.HasBreak() { return false } def := false for _, n1 := range n.List.Slice() { - if !n1.Nbody.isterminating() { + if !isTermNodes(n1.Nbody) { return false } if n1.List.Len() == 0 { // default @@ -3861,7 +3862,7 @@ func (n *Node) isterminating() bool { } } - if n.Op != OSELECT && !def { + if n.Op != ir.OSELECT && !def { return false } return true @@ -3871,21 +3872,21 @@ func (n *Node) isterminating() bool { } // checkreturn makes sure that fn terminates appropriately. -func checkreturn(fn *Node) { +func checkreturn(fn *ir.Node) { if fn.Type.NumResults() != 0 && fn.Nbody.Len() != 0 { markbreaklist(fn.Nbody, nil) - if !fn.Nbody.isterminating() { + if !isTermNodes(fn.Nbody) { base.ErrorfAt(fn.Func.Endlineno, "missing return at end of function") } } } -func deadcode(fn *Node) { +func deadcode(fn *ir.Node) { deadcodeslice(&fn.Nbody) deadcodefn(fn) } -func deadcodefn(fn *Node) { +func deadcodefn(fn *ir.Node) { if fn.Nbody.Len() == 0 { return } @@ -3895,12 +3896,12 @@ func deadcodefn(fn *Node) { return } switch n.Op { - case OIF: - if !Isconst(n.Left, constant.Bool) || n.Nbody.Len() > 0 || n.Rlist.Len() > 0 { + case ir.OIF: + if !ir.IsConst(n.Left, constant.Bool) || n.Nbody.Len() > 0 || n.Rlist.Len() > 0 { return } - case OFOR: - if !Isconst(n.Left, constant.Bool) || n.Left.BoolVal() { + case ir.OFOR: + if !ir.IsConst(n.Left, constant.Bool) || n.Left.BoolVal() { return } default: @@ -3908,13 +3909,13 @@ func deadcodefn(fn *Node) { } } - fn.Nbody.Set([]*Node{nod(OEMPTY, nil, nil)}) + fn.Nbody.Set([]*ir.Node{ir.Nod(ir.OEMPTY, nil, nil)}) } -func deadcodeslice(nn *Nodes) { +func deadcodeslice(nn *ir.Nodes) { var lastLabel = -1 for i, n := range nn.Slice() { - if n != nil && n.Op == OLABEL { + if n != nil && n.Op == ir.OLABEL { lastLabel = i } } @@ -3926,15 +3927,15 @@ func deadcodeslice(nn *Nodes) { if n == nil { continue } - if n.Op == OIF { + if n.Op == ir.OIF { n.Left = deadcodeexpr(n.Left) - if Isconst(n.Left, constant.Bool) { - var body Nodes + if ir.IsConst(n.Left, constant.Bool) { + var body ir.Nodes if n.Left.BoolVal() { - n.Rlist = Nodes{} + n.Rlist = ir.Nodes{} body = n.Nbody } else { - n.Nbody = Nodes{} + n.Nbody = ir.Nodes{} body = n.Rlist } // If "then" or "else" branch ends with panic or return statement, @@ -3944,7 +3945,7 @@ func deadcodeslice(nn *Nodes) { // might be the target of a goto. See issue 28616. if body := body.Slice(); len(body) != 0 { switch body[(len(body) - 1)].Op { - case ORETURN, ORETJMP, OPANIC: + case ir.ORETURN, ir.ORETJMP, ir.OPANIC: if i > lastLabel { cut = true } @@ -3964,25 +3965,25 @@ func deadcodeslice(nn *Nodes) { } } -func deadcodeexpr(n *Node) *Node { +func deadcodeexpr(n *ir.Node) *ir.Node { // Perform dead-code elimination on short-circuited boolean // expressions involving constants with the intent of // producing a constant 'if' condition. switch n.Op { - case OANDAND: + case ir.OANDAND: n.Left = deadcodeexpr(n.Left) n.Right = deadcodeexpr(n.Right) - if Isconst(n.Left, constant.Bool) { + if ir.IsConst(n.Left, constant.Bool) { if n.Left.BoolVal() { return n.Right // true && x => x } else { return n.Left // false && x => false } } - case OOROR: + case ir.OOROR: n.Left = deadcodeexpr(n.Left) n.Right = deadcodeexpr(n.Right) - if Isconst(n.Left, constant.Bool) { + if ir.IsConst(n.Left, constant.Bool) { if n.Left.BoolVal() { return n.Left // true || x => true } else { @@ -3994,17 +3995,17 @@ func deadcodeexpr(n *Node) *Node { } // setTypeNode sets n to an OTYPE node representing t. -func setTypeNode(n *Node, t *types.Type) { - n.Op = OTYPE +func setTypeNode(n *ir.Node, t *types.Type) { + n.Op = ir.OTYPE n.Type = t - n.Type.Nod = asTypesNode(n) + n.Type.Nod = ir.AsTypesNode(n) } // getIotaValue returns the current value for "iota", // or -1 if not within a ConstSpec. func getIotaValue() int64 { if i := len(typecheckdefstack); i > 0 { - if x := typecheckdefstack[i-1]; x.Op == OLITERAL { + if x := typecheckdefstack[i-1]; x.Op == ir.OLITERAL { return x.Iota() } } @@ -4021,12 +4022,12 @@ func curpkg() *types.Pkg { fn := Curfn if fn == nil { // Initialization expressions for package-scope variables. - return localpkg + return ir.LocalPkg } // TODO(mdempsky): Standardize on either ODCLFUNC or ONAME for // Curfn, rather than mixing them. - if fn.Op == ODCLFUNC { + if fn.Op == ir.ODCLFUNC { fn = fn.Func.Nname } @@ -4036,16 +4037,16 @@ func curpkg() *types.Pkg { // MethodName returns the ONAME representing the method // referenced by expression n, which must be a method selector, // method expression, or method value. -func (n *Node) MethodName() *Node { - return asNode(n.MethodFunc().Nname) +func methodExprName(n *ir.Node) *ir.Node { + return ir.AsNode(methodExprFunc(n).Nname) } // MethodFunc is like MethodName, but returns the types.Field instead. -func (n *Node) MethodFunc() *types.Field { +func methodExprFunc(n *ir.Node) *types.Field { switch n.Op { - case ODOTMETH, OMETHEXPR: + case ir.ODOTMETH, ir.OMETHEXPR: return n.Opt().(*types.Field) - case OCALLPART: + case ir.OCALLPART: return callpartMethod(n) } base.Fatalf("unexpected node: %v (%v)", n, n.Op) diff --git a/src/cmd/compile/internal/gc/types.go b/src/cmd/compile/internal/gc/types.go index 748f8458bdb51..e46735df28dbe 100644 --- a/src/cmd/compile/internal/gc/types.go +++ b/src/cmd/compile/internal/gc/types.go @@ -3,56 +3,3 @@ // license that can be found in the LICENSE file. package gc - -import ( - "cmd/compile/internal/types" -) - -// convenience constants -const ( - Txxx = types.Txxx - - TINT8 = types.TINT8 - TUINT8 = types.TUINT8 - TINT16 = types.TINT16 - TUINT16 = types.TUINT16 - TINT32 = types.TINT32 - TUINT32 = types.TUINT32 - TINT64 = types.TINT64 - TUINT64 = types.TUINT64 - TINT = types.TINT - TUINT = types.TUINT - TUINTPTR = types.TUINTPTR - - TCOMPLEX64 = types.TCOMPLEX64 - TCOMPLEX128 = types.TCOMPLEX128 - - TFLOAT32 = types.TFLOAT32 - TFLOAT64 = types.TFLOAT64 - - TBOOL = types.TBOOL - - TPTR = types.TPTR - TFUNC = types.TFUNC - TSLICE = types.TSLICE - TARRAY = types.TARRAY - TSTRUCT = types.TSTRUCT - TCHAN = types.TCHAN - TMAP = types.TMAP - TINTER = types.TINTER - TFORW = types.TFORW - TANY = types.TANY - TSTRING = types.TSTRING - TUNSAFEPTR = types.TUNSAFEPTR - - // pseudo-types for literals - TIDEAL = types.TIDEAL - TNIL = types.TNIL - TBLANK = types.TBLANK - - // pseudo-types for frame layout - TFUNCARGS = types.TFUNCARGS - TCHANARGS = types.TCHANARGS - - NTYPE = types.NTYPE -) diff --git a/src/cmd/compile/internal/gc/types_acc.go b/src/cmd/compile/internal/gc/types_acc.go index 7240f726f6296..d6d53f05cc949 100644 --- a/src/cmd/compile/internal/gc/types_acc.go +++ b/src/cmd/compile/internal/gc/types_acc.go @@ -6,11 +6,3 @@ // TODO(gri) try to eliminate these soon package gc - -import ( - "cmd/compile/internal/types" - "unsafe" -) - -func asNode(n *types.Node) *Node { return (*Node)(unsafe.Pointer(n)) } -func asTypesNode(n *Node) *types.Node { return (*types.Node)(unsafe.Pointer(n)) } diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index aa0ee4075dd18..bf31055dcc1be 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -8,31 +8,29 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/src" ) -// builtinpkg is a fake package that declares the universe block. -var builtinpkg *types.Pkg - var basicTypes = [...]struct { name string etype types.EType }{ - {"int8", TINT8}, - {"int16", TINT16}, - {"int32", TINT32}, - {"int64", TINT64}, - {"uint8", TUINT8}, - {"uint16", TUINT16}, - {"uint32", TUINT32}, - {"uint64", TUINT64}, - {"float32", TFLOAT32}, - {"float64", TFLOAT64}, - {"complex64", TCOMPLEX64}, - {"complex128", TCOMPLEX128}, - {"bool", TBOOL}, - {"string", TSTRING}, + {"int8", types.TINT8}, + {"int16", types.TINT16}, + {"int32", types.TINT32}, + {"int64", types.TINT64}, + {"uint8", types.TUINT8}, + {"uint16", types.TUINT16}, + {"uint32", types.TUINT32}, + {"uint64", types.TUINT64}, + {"float32", types.TFLOAT32}, + {"float64", types.TFLOAT64}, + {"complex64", types.TCOMPLEX64}, + {"complex128", types.TCOMPLEX128}, + {"bool", types.TBOOL}, + {"string", types.TSTRING}, } var typedefs = [...]struct { @@ -41,30 +39,30 @@ var typedefs = [...]struct { sameas32 types.EType sameas64 types.EType }{ - {"int", TINT, TINT32, TINT64}, - {"uint", TUINT, TUINT32, TUINT64}, - {"uintptr", TUINTPTR, TUINT32, TUINT64}, + {"int", types.TINT, types.TINT32, types.TINT64}, + {"uint", types.TUINT, types.TUINT32, types.TUINT64}, + {"uintptr", types.TUINTPTR, types.TUINT32, types.TUINT64}, } var builtinFuncs = [...]struct { name string - op Op + op ir.Op }{ - {"append", OAPPEND}, - {"cap", OCAP}, - {"close", OCLOSE}, - {"complex", OCOMPLEX}, - {"copy", OCOPY}, - {"delete", ODELETE}, - {"imag", OIMAG}, - {"len", OLEN}, - {"make", OMAKE}, - {"new", ONEW}, - {"panic", OPANIC}, - {"print", OPRINT}, - {"println", OPRINTN}, - {"real", OREAL}, - {"recover", ORECOVER}, + {"append", ir.OAPPEND}, + {"cap", ir.OCAP}, + {"close", ir.OCLOSE}, + {"complex", ir.OCOMPLEX}, + {"copy", ir.OCOPY}, + {"delete", ir.ODELETE}, + {"imag", ir.OIMAG}, + {"len", ir.OLEN}, + {"make", ir.OMAKE}, + {"new", ir.ONEW}, + {"panic", ir.OPANIC}, + {"print", ir.OPRINT}, + {"println", ir.OPRINTN}, + {"real", ir.OREAL}, + {"recover", ir.ORECOVER}, } // isBuiltinFuncName reports whether name matches a builtin function @@ -80,11 +78,11 @@ func isBuiltinFuncName(name string) bool { var unsafeFuncs = [...]struct { name string - op Op + op ir.Op }{ - {"Alignof", OALIGNOF}, - {"Offsetof", OOFFSETOF}, - {"Sizeof", OSIZEOF}, + {"Alignof", ir.OALIGNOF}, + {"Offsetof", ir.OOFFSETOF}, + {"Sizeof", ir.OSIZEOF}, } // initUniverse initializes the universe block. @@ -101,71 +99,71 @@ func lexinit() { if int(etype) >= len(types.Types) { base.Fatalf("lexinit: %s bad etype", s.name) } - s2 := builtinpkg.Lookup(s.name) + s2 := ir.BuiltinPkg.Lookup(s.name) t := types.Types[etype] if t == nil { t = types.New(etype) t.Sym = s2 - if etype != TANY && etype != TSTRING { + if etype != types.TANY && etype != types.TSTRING { dowidth(t) } types.Types[etype] = t } - s2.Def = asTypesNode(typenod(t)) - asNode(s2.Def).Name = new(Name) + s2.Def = ir.AsTypesNode(typenod(t)) + ir.AsNode(s2.Def).Name = new(ir.Name) } for _, s := range &builtinFuncs { - s2 := builtinpkg.Lookup(s.name) - s2.Def = asTypesNode(newname(s2)) - asNode(s2.Def).SetSubOp(s.op) + s2 := ir.BuiltinPkg.Lookup(s.name) + s2.Def = ir.AsTypesNode(NewName(s2)) + ir.AsNode(s2.Def).SetSubOp(s.op) } for _, s := range &unsafeFuncs { s2 := unsafepkg.Lookup(s.name) - s2.Def = asTypesNode(newname(s2)) - asNode(s2.Def).SetSubOp(s.op) + s2.Def = ir.AsTypesNode(NewName(s2)) + ir.AsNode(s2.Def).SetSubOp(s.op) } - types.UntypedString = types.New(TSTRING) - types.UntypedBool = types.New(TBOOL) - types.Types[TANY] = types.New(TANY) + types.UntypedString = types.New(types.TSTRING) + types.UntypedBool = types.New(types.TBOOL) + types.Types[types.TANY] = types.New(types.TANY) - s := builtinpkg.Lookup("true") - s.Def = asTypesNode(nodbool(true)) - asNode(s.Def).Sym = lookup("true") - asNode(s.Def).Name = new(Name) - asNode(s.Def).Type = types.UntypedBool + s := ir.BuiltinPkg.Lookup("true") + s.Def = ir.AsTypesNode(nodbool(true)) + ir.AsNode(s.Def).Sym = lookup("true") + ir.AsNode(s.Def).Name = new(ir.Name) + ir.AsNode(s.Def).Type = types.UntypedBool - s = builtinpkg.Lookup("false") - s.Def = asTypesNode(nodbool(false)) - asNode(s.Def).Sym = lookup("false") - asNode(s.Def).Name = new(Name) - asNode(s.Def).Type = types.UntypedBool + s = ir.BuiltinPkg.Lookup("false") + s.Def = ir.AsTypesNode(nodbool(false)) + ir.AsNode(s.Def).Sym = lookup("false") + ir.AsNode(s.Def).Name = new(ir.Name) + ir.AsNode(s.Def).Type = types.UntypedBool s = lookup("_") s.Block = -100 - s.Def = asTypesNode(newname(s)) - types.Types[TBLANK] = types.New(TBLANK) - asNode(s.Def).Type = types.Types[TBLANK] - nblank = asNode(s.Def) + s.Def = ir.AsTypesNode(NewName(s)) + types.Types[types.TBLANK] = types.New(types.TBLANK) + ir.AsNode(s.Def).Type = types.Types[types.TBLANK] + ir.BlankNode = ir.AsNode(s.Def) - s = builtinpkg.Lookup("_") + s = ir.BuiltinPkg.Lookup("_") s.Block = -100 - s.Def = asTypesNode(newname(s)) - types.Types[TBLANK] = types.New(TBLANK) - asNode(s.Def).Type = types.Types[TBLANK] - - types.Types[TNIL] = types.New(TNIL) - s = builtinpkg.Lookup("nil") - s.Def = asTypesNode(nodnil()) - asNode(s.Def).Sym = s - asNode(s.Def).Name = new(Name) - - s = builtinpkg.Lookup("iota") - s.Def = asTypesNode(nod(OIOTA, nil, nil)) - asNode(s.Def).Sym = s - asNode(s.Def).Name = new(Name) + s.Def = ir.AsTypesNode(NewName(s)) + types.Types[types.TBLANK] = types.New(types.TBLANK) + ir.AsNode(s.Def).Type = types.Types[types.TBLANK] + + types.Types[types.TNIL] = types.New(types.TNIL) + s = ir.BuiltinPkg.Lookup("nil") + s.Def = ir.AsTypesNode(nodnil()) + ir.AsNode(s.Def).Sym = s + ir.AsNode(s.Def).Name = new(ir.Name) + + s = ir.BuiltinPkg.Lookup("iota") + s.Def = ir.AsTypesNode(ir.Nod(ir.OIOTA, nil, nil)) + ir.AsNode(s.Def).Sym = s + ir.AsNode(s.Def).Name = new(ir.Name) } func typeinit() { @@ -173,42 +171,42 @@ func typeinit() { base.Fatalf("typeinit before betypeinit") } - for et := types.EType(0); et < NTYPE; et++ { + for et := types.EType(0); et < types.NTYPE; et++ { simtype[et] = et } - types.Types[TPTR] = types.New(TPTR) - dowidth(types.Types[TPTR]) + types.Types[types.TPTR] = types.New(types.TPTR) + dowidth(types.Types[types.TPTR]) - t := types.New(TUNSAFEPTR) - types.Types[TUNSAFEPTR] = t + t := types.New(types.TUNSAFEPTR) + types.Types[types.TUNSAFEPTR] = t t.Sym = unsafepkg.Lookup("Pointer") - t.Sym.Def = asTypesNode(typenod(t)) - asNode(t.Sym.Def).Name = new(Name) - dowidth(types.Types[TUNSAFEPTR]) + t.Sym.Def = ir.AsTypesNode(typenod(t)) + ir.AsNode(t.Sym.Def).Name = new(ir.Name) + dowidth(types.Types[types.TUNSAFEPTR]) - for et := TINT8; et <= TUINT64; et++ { + for et := types.TINT8; et <= types.TUINT64; et++ { isInt[et] = true } - isInt[TINT] = true - isInt[TUINT] = true - isInt[TUINTPTR] = true + isInt[types.TINT] = true + isInt[types.TUINT] = true + isInt[types.TUINTPTR] = true - isFloat[TFLOAT32] = true - isFloat[TFLOAT64] = true + isFloat[types.TFLOAT32] = true + isFloat[types.TFLOAT64] = true - isComplex[TCOMPLEX64] = true - isComplex[TCOMPLEX128] = true + isComplex[types.TCOMPLEX64] = true + isComplex[types.TCOMPLEX128] = true // initialize okfor - for et := types.EType(0); et < NTYPE; et++ { - if isInt[et] || et == TIDEAL { + for et := types.EType(0); et < types.NTYPE; et++ { + if isInt[et] || et == types.TIDEAL { okforeq[et] = true okforcmp[et] = true okforarith[et] = true okforadd[et] = true okforand[et] = true - okforconst[et] = true + ir.OKForConst[et] = true issimple[et] = true } @@ -217,7 +215,7 @@ func typeinit() { okforcmp[et] = true okforadd[et] = true okforarith[et] = true - okforconst[et] = true + ir.OKForConst[et] = true issimple[et] = true } @@ -225,43 +223,43 @@ func typeinit() { okforeq[et] = true okforadd[et] = true okforarith[et] = true - okforconst[et] = true + ir.OKForConst[et] = true issimple[et] = true } } - issimple[TBOOL] = true + issimple[types.TBOOL] = true - okforadd[TSTRING] = true + okforadd[types.TSTRING] = true - okforbool[TBOOL] = true + okforbool[types.TBOOL] = true - okforcap[TARRAY] = true - okforcap[TCHAN] = true - okforcap[TSLICE] = true + okforcap[types.TARRAY] = true + okforcap[types.TCHAN] = true + okforcap[types.TSLICE] = true - okforconst[TBOOL] = true - okforconst[TSTRING] = true + ir.OKForConst[types.TBOOL] = true + ir.OKForConst[types.TSTRING] = true - okforlen[TARRAY] = true - okforlen[TCHAN] = true - okforlen[TMAP] = true - okforlen[TSLICE] = true - okforlen[TSTRING] = true + okforlen[types.TARRAY] = true + okforlen[types.TCHAN] = true + okforlen[types.TMAP] = true + okforlen[types.TSLICE] = true + okforlen[types.TSTRING] = true - okforeq[TPTR] = true - okforeq[TUNSAFEPTR] = true - okforeq[TINTER] = true - okforeq[TCHAN] = true - okforeq[TSTRING] = true - okforeq[TBOOL] = true - okforeq[TMAP] = true // nil only; refined in typecheck - okforeq[TFUNC] = true // nil only; refined in typecheck - okforeq[TSLICE] = true // nil only; refined in typecheck - okforeq[TARRAY] = true // only if element type is comparable; refined in typecheck - okforeq[TSTRUCT] = true // only if all struct fields are comparable; refined in typecheck + okforeq[types.TPTR] = true + okforeq[types.TUNSAFEPTR] = true + okforeq[types.TINTER] = true + okforeq[types.TCHAN] = true + okforeq[types.TSTRING] = true + okforeq[types.TBOOL] = true + okforeq[types.TMAP] = true // nil only; refined in typecheck + okforeq[types.TFUNC] = true // nil only; refined in typecheck + okforeq[types.TSLICE] = true // nil only; refined in typecheck + okforeq[types.TARRAY] = true // only if element type is comparable; refined in typecheck + okforeq[types.TSTRUCT] = true // only if all struct fields are comparable; refined in typecheck - okforcmp[TSTRING] = true + okforcmp[types.TSTRING] = true var i int for i = 0; i < len(okfor); i++ { @@ -269,51 +267,51 @@ func typeinit() { } // binary - okfor[OADD] = okforadd[:] - okfor[OAND] = okforand[:] - okfor[OANDAND] = okforbool[:] - okfor[OANDNOT] = okforand[:] - okfor[ODIV] = okforarith[:] - okfor[OEQ] = okforeq[:] - okfor[OGE] = okforcmp[:] - okfor[OGT] = okforcmp[:] - okfor[OLE] = okforcmp[:] - okfor[OLT] = okforcmp[:] - okfor[OMOD] = okforand[:] - okfor[OMUL] = okforarith[:] - okfor[ONE] = okforeq[:] - okfor[OOR] = okforand[:] - okfor[OOROR] = okforbool[:] - okfor[OSUB] = okforarith[:] - okfor[OXOR] = okforand[:] - okfor[OLSH] = okforand[:] - okfor[ORSH] = okforand[:] + okfor[ir.OADD] = okforadd[:] + okfor[ir.OAND] = okforand[:] + okfor[ir.OANDAND] = okforbool[:] + okfor[ir.OANDNOT] = okforand[:] + okfor[ir.ODIV] = okforarith[:] + okfor[ir.OEQ] = okforeq[:] + okfor[ir.OGE] = okforcmp[:] + okfor[ir.OGT] = okforcmp[:] + okfor[ir.OLE] = okforcmp[:] + okfor[ir.OLT] = okforcmp[:] + okfor[ir.OMOD] = okforand[:] + okfor[ir.OMUL] = okforarith[:] + okfor[ir.ONE] = okforeq[:] + okfor[ir.OOR] = okforand[:] + okfor[ir.OOROR] = okforbool[:] + okfor[ir.OSUB] = okforarith[:] + okfor[ir.OXOR] = okforand[:] + okfor[ir.OLSH] = okforand[:] + okfor[ir.ORSH] = okforand[:] // unary - okfor[OBITNOT] = okforand[:] - okfor[ONEG] = okforarith[:] - okfor[ONOT] = okforbool[:] - okfor[OPLUS] = okforarith[:] + okfor[ir.OBITNOT] = okforand[:] + okfor[ir.ONEG] = okforarith[:] + okfor[ir.ONOT] = okforbool[:] + okfor[ir.OPLUS] = okforarith[:] // special - okfor[OCAP] = okforcap[:] - okfor[OLEN] = okforlen[:] + okfor[ir.OCAP] = okforcap[:] + okfor[ir.OLEN] = okforlen[:] // comparison - iscmp[OLT] = true - iscmp[OGT] = true - iscmp[OGE] = true - iscmp[OLE] = true - iscmp[OEQ] = true - iscmp[ONE] = true + iscmp[ir.OLT] = true + iscmp[ir.OGT] = true + iscmp[ir.OGE] = true + iscmp[ir.OLE] = true + iscmp[ir.OEQ] = true + iscmp[ir.ONE] = true - types.Types[TINTER] = types.New(TINTER) // empty interface + types.Types[types.TINTER] = types.New(types.TINTER) // empty interface // simple aliases - simtype[TMAP] = TPTR - simtype[TCHAN] = TPTR - simtype[TFUNC] = TPTR - simtype[TUNSAFEPTR] = TPTR + simtype[types.TMAP] = types.TPTR + simtype[types.TCHAN] = types.TPTR + simtype[types.TFUNC] = types.TPTR + simtype[types.TUNSAFEPTR] = types.TPTR slicePtrOffset = 0 sliceLenOffset = Rnd(slicePtrOffset+int64(Widthptr), int64(Widthptr)) @@ -323,29 +321,29 @@ func typeinit() { // string is same as slice wo the cap sizeofString = Rnd(sliceLenOffset+int64(Widthptr), int64(Widthptr)) - dowidth(types.Types[TSTRING]) + dowidth(types.Types[types.TSTRING]) dowidth(types.UntypedString) } func makeErrorInterface() *types.Type { sig := functypefield(fakeRecvField(), nil, []*types.Field{ - types.NewField(src.NoXPos, nil, types.Types[TSTRING]), + types.NewField(src.NoXPos, nil, types.Types[types.TSTRING]), }) method := types.NewField(src.NoXPos, lookup("Error"), sig) - t := types.New(TINTER) + t := types.New(types.TINTER) t.SetInterface([]*types.Field{method}) return t } func lexinit1() { // error type - s := builtinpkg.Lookup("error") + s := ir.BuiltinPkg.Lookup("error") types.Errortype = makeErrorInterface() types.Errortype.Sym = s types.Errortype.Orig = makeErrorInterface() - s.Def = asTypesNode(typenod(types.Errortype)) + s.Def = ir.AsTypesNode(typenod(types.Errortype)) dowidth(types.Errortype) // We create separate byte and rune types for better error messages @@ -357,24 +355,24 @@ func lexinit1() { // type aliases, albeit at the cost of having to deal with it everywhere). // byte alias - s = builtinpkg.Lookup("byte") - types.Bytetype = types.New(TUINT8) + s = ir.BuiltinPkg.Lookup("byte") + types.Bytetype = types.New(types.TUINT8) types.Bytetype.Sym = s - s.Def = asTypesNode(typenod(types.Bytetype)) - asNode(s.Def).Name = new(Name) + s.Def = ir.AsTypesNode(typenod(types.Bytetype)) + ir.AsNode(s.Def).Name = new(ir.Name) dowidth(types.Bytetype) // rune alias - s = builtinpkg.Lookup("rune") - types.Runetype = types.New(TINT32) + s = ir.BuiltinPkg.Lookup("rune") + types.Runetype = types.New(types.TINT32) types.Runetype.Sym = s - s.Def = asTypesNode(typenod(types.Runetype)) - asNode(s.Def).Name = new(Name) + s.Def = ir.AsTypesNode(typenod(types.Runetype)) + ir.AsNode(s.Def).Name = new(ir.Name) dowidth(types.Runetype) // backend-dependent builtin types (e.g. int). for _, s := range &typedefs { - s1 := builtinpkg.Lookup(s.name) + s1 := ir.BuiltinPkg.Lookup(s.name) sameas := s.sameas32 if Widthptr == 8 { @@ -386,9 +384,9 @@ func lexinit1() { t := types.New(s.etype) t.Sym = s1 types.Types[s.etype] = t - s1.Def = asTypesNode(typenod(t)) - asNode(s1.Def).Name = new(Name) - s1.Origpkg = builtinpkg + s1.Def = ir.AsTypesNode(typenod(t)) + ir.AsNode(s1.Def).Name = new(ir.Name) + s1.Origpkg = ir.BuiltinPkg dowidth(t) } @@ -400,7 +398,7 @@ func finishUniverse() { // that we silently skip symbols that are already declared in the // package block rather than emitting a redeclared symbol error. - for _, s := range builtinpkg.Syms { + for _, s := range ir.BuiltinPkg.Syms { if s.Def == nil { continue } @@ -413,8 +411,8 @@ func finishUniverse() { s1.Block = s.Block } - nodfp = newname(lookup(".fp")) - nodfp.Type = types.Types[TINT32] - nodfp.SetClass(PPARAM) + nodfp = NewName(lookup(".fp")) + nodfp.Type = types.Types[types.TINT32] + nodfp.SetClass(ir.PPARAM) nodfp.Name.SetUsed(true) } diff --git a/src/cmd/compile/internal/gc/unsafe.go b/src/cmd/compile/internal/gc/unsafe.go index a1c1c1bf6e291..fce79a631964c 100644 --- a/src/cmd/compile/internal/gc/unsafe.go +++ b/src/cmd/compile/internal/gc/unsafe.go @@ -4,12 +4,15 @@ package gc -import "cmd/compile/internal/base" +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" +) // evalunsafe evaluates a package unsafe operation and returns the result. -func evalunsafe(n *Node) int64 { +func evalunsafe(n *ir.Node) int64 { switch n.Op { - case OALIGNOF, OSIZEOF: + case ir.OALIGNOF, ir.OSIZEOF: n.Left = typecheck(n.Left, ctxExpr) n.Left = defaultlit(n.Left, nil) tr := n.Left.Type @@ -17,14 +20,14 @@ func evalunsafe(n *Node) int64 { return 0 } dowidth(tr) - if n.Op == OALIGNOF { + if n.Op == ir.OALIGNOF { return int64(tr.Align) } return tr.Width - case OOFFSETOF: + case ir.OOFFSETOF: // must be a selector. - if n.Left.Op != OXDOT { + if n.Left.Op != ir.OXDOT { base.Errorf("invalid expression %v", n) return 0 } @@ -40,9 +43,9 @@ func evalunsafe(n *Node) int64 { return 0 } switch n.Left.Op { - case ODOT, ODOTPTR: + case ir.ODOT, ir.ODOTPTR: break - case OCALLPART: + case ir.OCALLPART: base.Errorf("invalid expression %v: argument is a method value", n) return 0 default: @@ -54,7 +57,7 @@ func evalunsafe(n *Node) int64 { var v int64 for r := n.Left; r != sbase; r = r.Left { switch r.Op { - case ODOTPTR: + case ir.ODOTPTR: // For Offsetof(s.f), s may itself be a pointer, // but accessing f must not otherwise involve // indirection via embedded pointer types. @@ -63,10 +66,10 @@ func evalunsafe(n *Node) int64 { return 0 } fallthrough - case ODOT: + case ir.ODOT: v += r.Xoffset default: - Dump("unsafenmagic", n.Left) + ir.Dump("unsafenmagic", n.Left) base.Fatalf("impossible %#v node after dot insertion", r.Op) } } diff --git a/src/cmd/compile/internal/gc/util.go b/src/cmd/compile/internal/gc/util.go index 597a29a940e56..4baddbc029a8d 100644 --- a/src/cmd/compile/internal/gc/util.go +++ b/src/cmd/compile/internal/gc/util.go @@ -12,12 +12,6 @@ import ( "cmd/compile/internal/base" ) -// Line returns n's position as a string. If n has been inlined, -// it uses the outermost position where n has been inlined. -func (n *Node) Line() string { - return base.FmtPos(n.Pos) -} - var ( memprofilerate int64 traceHandler func(string) diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index d7cd7ddf27ca3..619a413b9e49c 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -6,6 +6,7 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/objabi" @@ -21,20 +22,20 @@ import ( const tmpstringbufsize = 32 const zeroValSize = 1024 // must match value of runtime/map.go:maxZero -func walk(fn *Node) { +func walk(fn *ir.Node) { Curfn = fn errorsBefore := base.Errors() if base.Flag.W != 0 { s := fmt.Sprintf("\nbefore walk %v", Curfn.Func.Nname.Sym) - dumplist(s, Curfn.Nbody) + ir.DumpList(s, Curfn.Nbody) } lno := base.Pos // Final typecheck for any unused variables. for i, ln := range fn.Func.Dcl { - if ln.Op == ONAME && (ln.Class() == PAUTO || ln.Class() == PAUTOHEAP) { + if ln.Op == ir.ONAME && (ln.Class() == ir.PAUTO || ln.Class() == ir.PAUTOHEAP) { ln = typecheck(ln, ctxExpr|ctxAssign) fn.Func.Dcl[i] = ln } @@ -42,16 +43,16 @@ func walk(fn *Node) { // Propagate the used flag for typeswitch variables up to the NONAME in its definition. for _, ln := range fn.Func.Dcl { - if ln.Op == ONAME && (ln.Class() == PAUTO || ln.Class() == PAUTOHEAP) && ln.Name.Defn != nil && ln.Name.Defn.Op == OTYPESW && ln.Name.Used() { + if ln.Op == ir.ONAME && (ln.Class() == ir.PAUTO || ln.Class() == ir.PAUTOHEAP) && ln.Name.Defn != nil && ln.Name.Defn.Op == ir.OTYPESW && ln.Name.Used() { ln.Name.Defn.Left.Name.SetUsed(true) } } for _, ln := range fn.Func.Dcl { - if ln.Op != ONAME || (ln.Class() != PAUTO && ln.Class() != PAUTOHEAP) || ln.Sym.Name[0] == '&' || ln.Name.Used() { + if ln.Op != ir.ONAME || (ln.Class() != ir.PAUTO && ln.Class() != ir.PAUTOHEAP) || ln.Sym.Name[0] == '&' || ln.Name.Used() { continue } - if defn := ln.Name.Defn; defn != nil && defn.Op == OTYPESW { + if defn := ln.Name.Defn; defn != nil && defn.Op == ir.OTYPESW { if defn.Left.Name.Used() { continue } @@ -69,32 +70,32 @@ func walk(fn *Node) { walkstmtlist(Curfn.Nbody.Slice()) if base.Flag.W != 0 { s := fmt.Sprintf("after walk %v", Curfn.Func.Nname.Sym) - dumplist(s, Curfn.Nbody) + ir.DumpList(s, Curfn.Nbody) } zeroResults() heapmoves() if base.Flag.W != 0 && Curfn.Func.Enter.Len() > 0 { s := fmt.Sprintf("enter %v", Curfn.Func.Nname.Sym) - dumplist(s, Curfn.Func.Enter) + ir.DumpList(s, Curfn.Func.Enter) } } -func walkstmtlist(s []*Node) { +func walkstmtlist(s []*ir.Node) { for i := range s { s[i] = walkstmt(s[i]) } } -func paramoutheap(fn *Node) bool { +func paramoutheap(fn *ir.Node) bool { for _, ln := range fn.Func.Dcl { switch ln.Class() { - case PPARAMOUT: - if ln.isParamStackCopy() || ln.Name.Addrtaken() { + case ir.PPARAMOUT: + if isParamStackCopy(ln) || ln.Name.Addrtaken() { return true } - case PAUTO: + case ir.PAUTO: // stop early - parameters are over return false } @@ -105,7 +106,7 @@ func paramoutheap(fn *Node) bool { // The result of walkstmt MUST be assigned back to n, e.g. // n.Left = walkstmt(n.Left) -func walkstmt(n *Node) *Node { +func walkstmt(n *ir.Node) *ir.Node { if n == nil { return n } @@ -116,49 +117,49 @@ func walkstmt(n *Node) *Node { switch n.Op { default: - if n.Op == ONAME { + if n.Op == ir.ONAME { base.Errorf("%v is not a top level statement", n.Sym) } else { base.Errorf("%v is not a top level statement", n.Op) } - Dump("nottop", n) - - case OAS, - OASOP, - OAS2, - OAS2DOTTYPE, - OAS2RECV, - OAS2FUNC, - OAS2MAPR, - OCLOSE, - OCOPY, - OCALLMETH, - OCALLINTER, - OCALL, - OCALLFUNC, - ODELETE, - OSEND, - OPRINT, - OPRINTN, - OPANIC, - OEMPTY, - ORECOVER, - OGETG: + ir.Dump("nottop", n) + + case ir.OAS, + ir.OASOP, + ir.OAS2, + ir.OAS2DOTTYPE, + ir.OAS2RECV, + ir.OAS2FUNC, + ir.OAS2MAPR, + ir.OCLOSE, + ir.OCOPY, + ir.OCALLMETH, + ir.OCALLINTER, + ir.OCALL, + ir.OCALLFUNC, + ir.ODELETE, + ir.OSEND, + ir.OPRINT, + ir.OPRINTN, + ir.OPANIC, + ir.OEMPTY, + ir.ORECOVER, + ir.OGETG: if n.Typecheck() == 0 { base.Fatalf("missing typecheck: %+v", n) } - wascopy := n.Op == OCOPY + wascopy := n.Op == ir.OCOPY init := n.Ninit n.Ninit.Set(nil) n = walkexpr(n, &init) n = addinit(n, init.Slice()) - if wascopy && n.Op == OCONVNOP { - n.Op = OEMPTY // don't leave plain values as statements. + if wascopy && n.Op == ir.OCONVNOP { + n.Op = ir.OEMPTY // don't leave plain values as statements. } // special case for a receive where we throw away // the value received. - case ORECV: + case ir.ORECV: if n.Typecheck() == 0 { base.Fatalf("missing typecheck: %+v", n) } @@ -171,44 +172,44 @@ func walkstmt(n *Node) *Node { n = addinit(n, init.Slice()) - case OBREAK, - OCONTINUE, - OFALL, - OGOTO, - OLABEL, - ODCLCONST, - ODCLTYPE, - OCHECKNIL, - OVARDEF, - OVARKILL, - OVARLIVE: + case ir.OBREAK, + ir.OCONTINUE, + ir.OFALL, + ir.OGOTO, + ir.OLABEL, + ir.ODCLCONST, + ir.ODCLTYPE, + ir.OCHECKNIL, + ir.OVARDEF, + ir.OVARKILL, + ir.OVARLIVE: break - case ODCL: + case ir.ODCL: v := n.Left - if v.Class() == PAUTOHEAP { + if v.Class() == ir.PAUTOHEAP { if base.Flag.CompilingRuntime { base.Errorf("%v escapes to heap, not allowed in runtime", v) } if prealloc[v] == nil { prealloc[v] = callnew(v.Type) } - nn := nod(OAS, v.Name.Param.Heapaddr, prealloc[v]) + nn := ir.Nod(ir.OAS, v.Name.Param.Heapaddr, prealloc[v]) nn.SetColas(true) nn = typecheck(nn, ctxStmt) return walkstmt(nn) } - case OBLOCK: + case ir.OBLOCK: walkstmtlist(n.List.Slice()) - case OCASE: + case ir.OCASE: base.Errorf("case statement out of place") - case ODEFER: + case ir.ODEFER: Curfn.Func.SetHasDefer(true) - Curfn.Func.numDefers++ - if Curfn.Func.numDefers > maxOpenDefers { + Curfn.Func.NumDefers++ + if Curfn.Func.NumDefers > maxOpenDefers { // Don't allow open-coded defers if there are more than // 8 defers in the function, since we use a single // byte to record active defers. @@ -220,22 +221,22 @@ func walkstmt(n *Node) *Node { Curfn.Func.SetOpenCodedDeferDisallowed(true) } fallthrough - case OGO: + case ir.OGO: switch n.Left.Op { - case OPRINT, OPRINTN: + case ir.OPRINT, ir.OPRINTN: n.Left = wrapCall(n.Left, &n.Ninit) - case ODELETE: + case ir.ODELETE: if mapfast(n.Left.List.First().Type) == mapslow { n.Left = wrapCall(n.Left, &n.Ninit) } else { n.Left = walkexpr(n.Left, &n.Ninit) } - case OCOPY: + case ir.OCOPY: n.Left = copyany(n.Left, &n.Ninit, true) - case OCALLFUNC, OCALLMETH, OCALLINTER: + case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER: if n.Left.Nbody.Len() > 0 { n.Left = wrapCall(n.Left, &n.Ninit) } else { @@ -246,7 +247,7 @@ func walkstmt(n *Node) *Node { n.Left = walkexpr(n.Left, &n.Ninit) } - case OFOR, OFORUNTIL: + case ir.OFOR, ir.OFORUNTIL: if n.Left != nil { walkstmtlist(n.Left.Ninit.Slice()) init := n.Left.Ninit @@ -256,34 +257,34 @@ func walkstmt(n *Node) *Node { } n.Right = walkstmt(n.Right) - if n.Op == OFORUNTIL { + if n.Op == ir.OFORUNTIL { walkstmtlist(n.List.Slice()) } walkstmtlist(n.Nbody.Slice()) - case OIF: + case ir.OIF: n.Left = walkexpr(n.Left, &n.Ninit) walkstmtlist(n.Nbody.Slice()) walkstmtlist(n.Rlist.Slice()) - case ORETURN: - Curfn.Func.numReturns++ + case ir.ORETURN: + Curfn.Func.NumReturns++ if n.List.Len() == 0 { break } if (Curfn.Type.FuncType().Outnamed && n.List.Len() > 1) || paramoutheap(Curfn) { // assign to the function out parameters, // so that reorder3 can fix up conflicts - var rl []*Node + var rl []*ir.Node for _, ln := range Curfn.Func.Dcl { cl := ln.Class() - if cl == PAUTO || cl == PAUTOHEAP { + if cl == ir.PAUTO || cl == ir.PAUTOHEAP { break } - if cl == PPARAMOUT { - if ln.isParamStackCopy() { - ln = walkexpr(typecheck(nod(ODEREF, ln.Name.Param.Heapaddr, nil), ctxExpr), nil) + if cl == ir.PPARAMOUT { + if isParamStackCopy(ln) { + ln = walkexpr(typecheck(ir.Nod(ir.ODEREF, ln.Name.Param.Heapaddr, nil), ctxExpr), nil) } rl = append(rl, ln) } @@ -307,34 +308,34 @@ func walkstmt(n *Node) *Node { // For each return parameter (lhs), assign the corresponding result (rhs). lhs := Curfn.Type.Results() rhs := n.List.Slice() - res := make([]*Node, lhs.NumFields()) + res := make([]*ir.Node, lhs.NumFields()) for i, nl := range lhs.FieldSlice() { - nname := asNode(nl.Nname) - if nname.isParamHeapCopy() { + nname := ir.AsNode(nl.Nname) + if isParamHeapCopy(nname) { nname = nname.Name.Param.Stackcopy } - a := nod(OAS, nname, rhs[i]) + a := ir.Nod(ir.OAS, nname, rhs[i]) res[i] = convas(a, &n.Ninit) } n.List.Set(res) - case ORETJMP: + case ir.ORETJMP: break - case OINLMARK: + case ir.OINLMARK: break - case OSELECT: + case ir.OSELECT: walkselect(n) - case OSWITCH: + case ir.OSWITCH: walkswitch(n) - case ORANGE: + case ir.ORANGE: n = walkrange(n) } - if n.Op == ONAME { + if n.Op == ir.ONAME { base.Fatalf("walkstmt ended up with name: %+v", n) } return n @@ -345,20 +346,20 @@ func walkstmt(n *Node) *Node { // the types expressions are calculated. // compile-time constants are evaluated. // complex side effects like statements are appended to init -func walkexprlist(s []*Node, init *Nodes) { +func walkexprlist(s []*ir.Node, init *ir.Nodes) { for i := range s { s[i] = walkexpr(s[i], init) } } -func walkexprlistsafe(s []*Node, init *Nodes) { +func walkexprlistsafe(s []*ir.Node, init *ir.Nodes) { for i, n := range s { s[i] = safeexpr(n, init) s[i] = walkexpr(s[i], init) } } -func walkexprlistcheap(s []*Node, init *Nodes) { +func walkexprlistcheap(s []*ir.Node, init *ir.Nodes) { for i, n := range s { s[i] = cheapexpr(n, init) s[i] = walkexpr(s[i], init) @@ -381,7 +382,7 @@ func convFuncName(from, to *types.Type) (fnname string, needsaddr bool) { return "convT16", false case from.Size() == 4 && from.Align == 4 && !from.HasPointers(): return "convT32", false - case from.Size() == 8 && from.Align == types.Types[TUINT64].Align && !from.HasPointers(): + case from.Size() == 8 && from.Align == types.Types[types.TUINT64].Align && !from.HasPointers(): return "convT64", false } if sc := from.SoleComponent(); sc != nil { @@ -412,7 +413,7 @@ func convFuncName(from, to *types.Type) (fnname string, needsaddr bool) { // The result of walkexpr MUST be assigned back to n, e.g. // n.Left = walkexpr(n.Left, init) -func walkexpr(n *Node, init *Nodes) *Node { +func walkexpr(n *ir.Node, init *ir.Nodes) *ir.Node { if n == nil { return n } @@ -420,7 +421,7 @@ func walkexpr(n *Node, init *Nodes) *Node { // Eagerly checkwidth all expressions for the back end. if n.Type != nil && !n.Type.WidthCalculated() { switch n.Type.Etype { - case TBLANK, TNIL, TIDEAL: + case types.TBLANK, types.TNIL, types.TIDEAL: default: checkwidth(n.Type) } @@ -441,7 +442,7 @@ func walkexpr(n *Node, init *Nodes) *Node { lno := setlineno(n) if base.Flag.LowerW > 1 { - Dump("before walk expr", n) + ir.Dump("before walk expr", n) } if n.Typecheck() != 1 { @@ -452,8 +453,8 @@ func walkexpr(n *Node, init *Nodes) *Node { base.Fatalf("expression has untyped type: %+v", n) } - if n.Op == ONAME && n.Class() == PAUTOHEAP { - nn := nod(ODEREF, n.Name.Param.Heapaddr, nil) + if n.Op == ir.ONAME && n.Class() == ir.PAUTOHEAP { + nn := ir.Nod(ir.ODEREF, n.Name.Param.Heapaddr, nil) nn = typecheck(nn, ctxExpr) nn = walkexpr(nn, init) nn.Left.MarkNonNil() @@ -463,44 +464,44 @@ func walkexpr(n *Node, init *Nodes) *Node { opswitch: switch n.Op { default: - Dump("walk", n) + ir.Dump("walk", n) base.Fatalf("walkexpr: switch 1 unknown op %+S", n) - case ONONAME, OEMPTY, OGETG, ONEWOBJ, OMETHEXPR: + case ir.ONONAME, ir.OEMPTY, ir.OGETG, ir.ONEWOBJ, ir.OMETHEXPR: - case OTYPE, ONAME, OLITERAL, ONIL: + case ir.OTYPE, ir.ONAME, ir.OLITERAL, ir.ONIL: // TODO(mdempsky): Just return n; see discussion on CL 38655. // Perhaps refactor to use Node.mayBeShared for these instead. // If these return early, make sure to still call // stringsym for constant strings. - case ONOT, ONEG, OPLUS, OBITNOT, OREAL, OIMAG, ODOTMETH, ODOTINTER, - ODEREF, OSPTR, OITAB, OIDATA, OADDR: + case ir.ONOT, ir.ONEG, ir.OPLUS, ir.OBITNOT, ir.OREAL, ir.OIMAG, ir.ODOTMETH, ir.ODOTINTER, + ir.ODEREF, ir.OSPTR, ir.OITAB, ir.OIDATA, ir.OADDR: n.Left = walkexpr(n.Left, init) - case OEFACE, OAND, OANDNOT, OSUB, OMUL, OADD, OOR, OXOR, OLSH, ORSH: + case ir.OEFACE, ir.OAND, ir.OANDNOT, ir.OSUB, ir.OMUL, ir.OADD, ir.OOR, ir.OXOR, ir.OLSH, ir.ORSH: n.Left = walkexpr(n.Left, init) n.Right = walkexpr(n.Right, init) - case ODOT, ODOTPTR: + case ir.ODOT, ir.ODOTPTR: usefield(n) n.Left = walkexpr(n.Left, init) - case ODOTTYPE, ODOTTYPE2: + case ir.ODOTTYPE, ir.ODOTTYPE2: n.Left = walkexpr(n.Left, init) // Set up interface type addresses for back end. n.Right = typename(n.Type) - if n.Op == ODOTTYPE { + if n.Op == ir.ODOTTYPE { n.Right.Right = typename(n.Left.Type) } if !n.Type.IsInterface() && !n.Left.Type.IsEmptyInterface() { n.List.Set1(itabname(n.Type, n.Left.Type)) } - case OLEN, OCAP: + case ir.OLEN, ir.OCAP: if isRuneCount(n) { // Replace len([]rune(string)) with runtime.countrunes(string). - n = mkcall("countrunes", n.Type, init, conv(n.Left.Left, types.Types[TSTRING])) + n = mkcall("countrunes", n.Type, init, conv(n.Left.Left, types.Types[types.TSTRING])) break } @@ -519,7 +520,7 @@ opswitch: n.SetTypecheck(1) } - case OCOMPLEX: + case ir.OCOMPLEX: // Use results from call expression as arguments for complex. if n.Left == nil && n.Right == nil { n.Left = n.List.First() @@ -528,38 +529,38 @@ opswitch: n.Left = walkexpr(n.Left, init) n.Right = walkexpr(n.Right, init) - case OEQ, ONE, OLT, OLE, OGT, OGE: + case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE: n = walkcompare(n, init) - case OANDAND, OOROR: + case ir.OANDAND, ir.OOROR: n.Left = walkexpr(n.Left, init) // cannot put side effects from n.Right on init, // because they cannot run before n.Left is checked. // save elsewhere and store on the eventual n.Right. - var ll Nodes + var ll ir.Nodes n.Right = walkexpr(n.Right, &ll) n.Right = addinit(n.Right, ll.Slice()) - case OPRINT, OPRINTN: + case ir.OPRINT, ir.OPRINTN: n = walkprint(n, init) - case OPANIC: + case ir.OPANIC: n = mkcall("gopanic", nil, init, n.Left) - case ORECOVER: - n = mkcall("gorecover", n.Type, init, nod(OADDR, nodfp, nil)) + case ir.ORECOVER: + n = mkcall("gorecover", n.Type, init, ir.Nod(ir.OADDR, nodfp, nil)) - case OCLOSUREVAR, OCFUNC: + case ir.OCLOSUREVAR, ir.OCFUNC: - case OCALLINTER, OCALLFUNC, OCALLMETH: - if n.Op == OCALLINTER { + case ir.OCALLINTER, ir.OCALLFUNC, ir.OCALLMETH: + if n.Op == ir.OCALLINTER { usemethod(n) markUsedIfaceMethod(n) } - if n.Op == OCALLFUNC && n.Left.Op == OCLOSURE { + if n.Op == ir.OCALLFUNC && n.Left.Op == ir.OCLOSURE { // Transform direct call of a closure to call of a normal function. // transformclosure already did all preparation work. @@ -581,12 +582,12 @@ opswitch: walkCall(n, init) - case OAS, OASOP: + case ir.OAS, ir.OASOP: init.AppendNodes(&n.Ninit) // Recognize m[k] = append(m[k], ...) so we can reuse // the mapassign call. - mapAppend := n.Left.Op == OINDEXMAP && n.Right.Op == OAPPEND + mapAppend := n.Left.Op == ir.OINDEXMAP && n.Right.Op == ir.OAPPEND if mapAppend && !samesafeexpr(n.Left, n.Right.List.First()) { base.Fatalf("not same expressions: %v != %v", n.Left, n.Right.List.First()) } @@ -598,12 +599,12 @@ opswitch: n.Right.List.SetFirst(n.Left) } - if n.Op == OASOP { + if n.Op == ir.OASOP { // Rewrite x op= y into x = x op y. - n.Right = nod(n.SubOp(), n.Left, n.Right) + n.Right = ir.Nod(n.SubOp(), n.Left, n.Right) n.Right = typecheck(n.Right, ctxExpr) - n.Op = OAS + n.Op = ir.OAS n.ResetAux() } @@ -624,18 +625,18 @@ opswitch: default: n.Right = walkexpr(n.Right, init) - case ORECV: + case ir.ORECV: // x = <-c; n.Left is x, n.Right.Left is c. // order.stmt made sure x is addressable. n.Right.Left = walkexpr(n.Right.Left, init) - n1 := nod(OADDR, n.Left, nil) + n1 := ir.Nod(ir.OADDR, n.Left, nil) r := n.Right.Left // the channel n = mkcall1(chanfn("chanrecv1", 2, r.Type), nil, init, r, n1) n = walkexpr(n, init) break opswitch - case OAPPEND: + case ir.OAPPEND: // x = append(...) r := n.Right if r.Type.Elem().NotInHeap() { @@ -651,7 +652,7 @@ opswitch: r = walkappend(r, init, n) } n.Right = r - if r.Op == OAPPEND { + if r.Op == ir.OAPPEND { // Left in place for back end. // Do not add a new write barrier. // Set up address of type for back end. @@ -666,16 +667,16 @@ opswitch: n = convas(n, init) } - case OAS2: + case ir.OAS2: init.AppendNodes(&n.Ninit) walkexprlistsafe(n.List.Slice(), init) walkexprlistsafe(n.Rlist.Slice(), init) - ll := ascompatee(OAS, n.List.Slice(), n.Rlist.Slice(), init) + ll := ascompatee(ir.OAS, n.List.Slice(), n.Rlist.Slice(), init) ll = reorder3(ll) n = liststmt(ll) // a,b,... = fn() - case OAS2FUNC: + case ir.OAS2FUNC: init.AppendNodes(&n.Ninit) r := n.Right @@ -693,26 +694,26 @@ opswitch: // x, y = <-c // order.stmt made sure x is addressable or blank. - case OAS2RECV: + case ir.OAS2RECV: init.AppendNodes(&n.Ninit) r := n.Right walkexprlistsafe(n.List.Slice(), init) r.Left = walkexpr(r.Left, init) - var n1 *Node - if n.List.First().isBlank() { + var n1 *ir.Node + if ir.IsBlank(n.List.First()) { n1 = nodnil() } else { - n1 = nod(OADDR, n.List.First(), nil) + n1 = ir.Nod(ir.OADDR, n.List.First(), nil) } fn := chanfn("chanrecv2", 2, r.Left.Type) ok := n.List.Second() - call := mkcall1(fn, types.Types[TBOOL], init, r.Left, n1) - n = nod(OAS, ok, call) + call := mkcall1(fn, types.Types[types.TBOOL], init, r.Left, n1) + n = ir.Nod(ir.OAS, ok, call) n = typecheck(n, ctxStmt) // a,b = m[i] - case OAS2MAPR: + case ir.OAS2MAPR: init.AppendNodes(&n.Ninit) r := n.Right @@ -722,14 +723,14 @@ opswitch: t := r.Left.Type fast := mapfast(t) - var key *Node + var key *ir.Node if fast != mapslow { // fast versions take key by value key = r.Right } else { // standard version takes key by reference // order.expr made sure key is addressable. - key = nod(OADDR, r.Right, nil) + key = ir.Nod(ir.OADDR, r.Right, nil) } // from: @@ -751,27 +752,27 @@ opswitch: // mapaccess2* returns a typed bool, but due to spec changes, // the boolean result of i.(T) is now untyped so we make it the // same type as the variable on the lhs. - if ok := n.List.Second(); !ok.isBlank() && ok.Type.IsBoolean() { + if ok := n.List.Second(); !ir.IsBlank(ok) && ok.Type.IsBoolean() { r.Type.Field(1).Type = ok.Type } n.Right = r - n.Op = OAS2FUNC + n.Op = ir.OAS2FUNC // don't generate a = *var if a is _ - if !a.isBlank() { + if !ir.IsBlank(a) { var_ := temp(types.NewPtr(t.Elem())) var_.SetTypecheck(1) var_.MarkNonNil() // mapaccess always returns a non-nil pointer n.List.SetFirst(var_) n = walkexpr(n, init) init.Append(n) - n = nod(OAS, a, nod(ODEREF, var_, nil)) + n = ir.Nod(ir.OAS, a, ir.Nod(ir.ODEREF, var_, nil)) } n = typecheck(n, ctxStmt) n = walkexpr(n, init) - case ODELETE: + case ir.ODELETE: init.AppendNodes(&n.Ninit) map_ := n.List.First() key := n.List.Second() @@ -782,26 +783,26 @@ opswitch: fast := mapfast(t) if fast == mapslow { // order.stmt made sure key is addressable. - key = nod(OADDR, key, nil) + key = ir.Nod(ir.OADDR, key, nil) } n = mkcall1(mapfndel(mapdelete[fast], t), nil, init, typename(t), map_, key) - case OAS2DOTTYPE: + case ir.OAS2DOTTYPE: walkexprlistsafe(n.List.Slice(), init) n.Right = walkexpr(n.Right, init) - case OCONVIFACE: + case ir.OCONVIFACE: n.Left = walkexpr(n.Left, init) fromType := n.Left.Type toType := n.Type - if !fromType.IsInterface() && !Curfn.Func.Nname.isBlank() { // skip unnamed functions (func _()) - markTypeUsedInInterface(fromType, Curfn.Func.lsym) + if !fromType.IsInterface() && !ir.IsBlank(Curfn.Func.Nname) { // skip unnamed functions (func _()) + markTypeUsedInInterface(fromType, Curfn.Func.LSym) } // typeword generates the type word of the interface value. - typeword := func() *Node { + typeword := func() *ir.Node { if toType.IsEmptyInterface() { return typename(fromType) } @@ -810,7 +811,7 @@ opswitch: // Optimize convT2E or convT2I as a two-word copy when T is pointer-shaped. if isdirectiface(fromType) { - l := nod(OEFACE, typeword(), n.Left) + l := ir.Nod(ir.OEFACE, typeword(), n.Left) l.Type = toType l.SetTypecheck(n.Typecheck()) n = l @@ -818,20 +819,20 @@ opswitch: } if staticuint64s == nil { - staticuint64s = newname(Runtimepkg.Lookup("staticuint64s")) - staticuint64s.SetClass(PEXTERN) + staticuint64s = NewName(Runtimepkg.Lookup("staticuint64s")) + staticuint64s.SetClass(ir.PEXTERN) // The actual type is [256]uint64, but we use [256*8]uint8 so we can address // individual bytes. - staticuint64s.Type = types.NewArray(types.Types[TUINT8], 256*8) - zerobase = newname(Runtimepkg.Lookup("zerobase")) - zerobase.SetClass(PEXTERN) - zerobase.Type = types.Types[TUINTPTR] + staticuint64s.Type = types.NewArray(types.Types[types.TUINT8], 256*8) + zerobase = NewName(Runtimepkg.Lookup("zerobase")) + zerobase.SetClass(ir.PEXTERN) + zerobase.Type = types.Types[types.TUINTPTR] } // Optimize convT2{E,I} for many cases in which T is not pointer-shaped, // by using an existing addressable value identical to n.Left // or creating one on the stack. - var value *Node + var value *ir.Node switch { case fromType.Size() == 0: // n.Left is zero-sized. Use zerobase. @@ -842,25 +843,25 @@ opswitch: // and staticuint64s[n.Left * 8 + 7] on big-endian. n.Left = cheapexpr(n.Left, init) // byteindex widens n.Left so that the multiplication doesn't overflow. - index := nod(OLSH, byteindex(n.Left), nodintconst(3)) + index := ir.Nod(ir.OLSH, byteindex(n.Left), nodintconst(3)) if thearch.LinkArch.ByteOrder == binary.BigEndian { - index = nod(OADD, index, nodintconst(7)) + index = ir.Nod(ir.OADD, index, nodintconst(7)) } - value = nod(OINDEX, staticuint64s, index) + value = ir.Nod(ir.OINDEX, staticuint64s, index) value.SetBounded(true) - case n.Left.Class() == PEXTERN && n.Left.Name != nil && n.Left.Name.Readonly(): + case n.Left.Class() == ir.PEXTERN && n.Left.Name != nil && n.Left.Name.Readonly(): // n.Left is a readonly global; use it directly. value = n.Left case !fromType.IsInterface() && n.Esc == EscNone && fromType.Width <= 1024: // n.Left does not escape. Use a stack temporary initialized to n.Left. value = temp(fromType) - init.Append(typecheck(nod(OAS, value, n.Left), ctxStmt)) + init.Append(typecheck(ir.Nod(ir.OAS, value, n.Left), ctxStmt)) } if value != nil { // Value is identical to n.Left. // Construct the interface directly: {type/itab, &value}. - l := nod(OEFACE, typeword(), typecheck(nod(OADDR, value, nil), ctxExpr)) + l := ir.Nod(ir.OEFACE, typeword(), typecheck(ir.Nod(ir.OADDR, value, nil), ctxExpr)) l.Type = toType l.SetTypecheck(n.Typecheck()) n = l @@ -876,19 +877,19 @@ opswitch: if toType.IsEmptyInterface() && fromType.IsInterface() && !fromType.IsEmptyInterface() { // Evaluate the input interface. c := temp(fromType) - init.Append(nod(OAS, c, n.Left)) + init.Append(ir.Nod(ir.OAS, c, n.Left)) // Get the itab out of the interface. - tmp := temp(types.NewPtr(types.Types[TUINT8])) - init.Append(nod(OAS, tmp, typecheck(nod(OITAB, c, nil), ctxExpr))) + tmp := temp(types.NewPtr(types.Types[types.TUINT8])) + init.Append(ir.Nod(ir.OAS, tmp, typecheck(ir.Nod(ir.OITAB, c, nil), ctxExpr))) // Get the type out of the itab. - nif := nod(OIF, typecheck(nod(ONE, tmp, nodnil()), ctxExpr), nil) - nif.Nbody.Set1(nod(OAS, tmp, itabType(tmp))) + nif := ir.Nod(ir.OIF, typecheck(ir.Nod(ir.ONE, tmp, nodnil()), ctxExpr), nil) + nif.Nbody.Set1(ir.Nod(ir.OAS, tmp, itabType(tmp))) init.Append(nif) // Build the result. - e := nod(OEFACE, tmp, ifaceData(n.Pos, c, types.NewPtr(types.Types[TUINT8]))) + e := ir.Nod(ir.OEFACE, tmp, ifaceData(n.Pos, c, types.NewPtr(types.Types[types.TUINT8]))) e.Type = toType // assign type manually, typecheck doesn't understand OEFACE. e.SetTypecheck(1) n = e @@ -905,19 +906,19 @@ opswitch: dowidth(fromType) fn = substArgTypes(fn, fromType) dowidth(fn.Type) - call := nod(OCALL, fn, nil) + call := ir.Nod(ir.OCALL, fn, nil) call.List.Set1(n.Left) call = typecheck(call, ctxExpr) call = walkexpr(call, init) call = safeexpr(call, init) - e := nod(OEFACE, typeword(), call) + e := ir.Nod(ir.OEFACE, typeword(), call) e.Type = toType e.SetTypecheck(1) n = e break } - var tab *Node + var tab *ir.Node if fromType.IsInterface() { // convI2I tab = typename(toType) @@ -937,21 +938,21 @@ opswitch: if !islvalue(v) { v = copyexpr(v, v.Type, init) } - v = nod(OADDR, v, nil) + v = ir.Nod(ir.OADDR, v, nil) } dowidth(fromType) fn := syslook(fnname) fn = substArgTypes(fn, fromType, toType) dowidth(fn.Type) - n = nod(OCALL, fn, nil) + n = ir.Nod(ir.OCALL, fn, nil) n.List.Set2(tab, v) n = typecheck(n, ctxExpr) n = walkexpr(n, init) - case OCONV, OCONVNOP: + case ir.OCONV, ir.OCONVNOP: n.Left = walkexpr(n.Left, init) - if n.Op == OCONVNOP && checkPtr(Curfn, 1) { + if n.Op == ir.OCONVNOP && checkPtr(Curfn, 1) { if n.Type.IsPtr() && n.Left.Type.IsUnsafePtr() { // unsafe.Pointer to *T n = walkCheckPtrAlignment(n, init, nil) break @@ -962,22 +963,22 @@ opswitch: } } param, result := rtconvfn(n.Left.Type, n.Type) - if param == Txxx { + if param == types.Txxx { break } - fn := basicnames[param] + "to" + basicnames[result] + fn := ir.BasicTypeNames[param] + "to" + ir.BasicTypeNames[result] n = conv(mkcall(fn, types.Types[result], init, conv(n.Left, types.Types[param])), n.Type) - case ODIV, OMOD: + case ir.ODIV, ir.OMOD: n.Left = walkexpr(n.Left, init) n.Right = walkexpr(n.Right, init) // rewrite complex div into function call. et := n.Left.Type.Etype - if isComplex[et] && n.Op == ODIV { + if isComplex[et] && n.Op == ir.ODIV { t := n.Type - n = mkcall("complex128div", types.Types[TCOMPLEX128], init, conv(n.Left, types.Types[TCOMPLEX128]), conv(n.Right, types.Types[TCOMPLEX128])) + n = mkcall("complex128div", types.Types[types.TCOMPLEX128], init, conv(n.Left, types.Types[types.TCOMPLEX128]), conv(n.Right, types.Types[types.TCOMPLEX128])) n = conv(n, t) break } @@ -990,12 +991,12 @@ opswitch: // rewrite 64-bit div and mod on 32-bit architectures. // TODO: Remove this code once we can introduce // runtime calls late in SSA processing. - if Widthreg < 8 && (et == TINT64 || et == TUINT64) { - if n.Right.Op == OLITERAL { + if Widthreg < 8 && (et == types.TINT64 || et == types.TUINT64) { + if n.Right.Op == ir.OLITERAL { // Leave div/mod by constant powers of 2 or small 16-bit constants. // The SSA backend will handle those. switch et { - case TINT64: + case types.TINT64: c := n.Right.Int64Val() if c < 0 { c = -c @@ -1003,7 +1004,7 @@ opswitch: if c != 0 && c&(c-1) == 0 { break opswitch } - case TUINT64: + case types.TUINT64: c := n.Right.Uint64Val() if c < 1<<16 { break opswitch @@ -1014,12 +1015,12 @@ opswitch: } } var fn string - if et == TINT64 { + if et == types.TINT64 { fn = "int64" } else { fn = "uint64" } - if n.Op == ODIV { + if n.Op == ir.ODIV { fn += "div" } else { fn += "mod" @@ -1027,7 +1028,7 @@ opswitch: n = mkcall(fn, n.Type, init, conv(n.Left, types.Types[et]), conv(n.Right, types.Types[et])) } - case OINDEX: + case ir.OINDEX: n.Left = walkexpr(n.Left, init) // save the original node for bounds checking elision. @@ -1047,15 +1048,15 @@ opswitch: } if t.IsArray() { n.SetBounded(bounded(r, t.NumElem())) - if base.Flag.LowerM != 0 && n.Bounded() && !Isconst(n.Right, constant.Int) { + if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Right, constant.Int) { base.Warn("index bounds check elided") } if smallintconst(n.Right) && !n.Bounded() { base.Errorf("index out of bounds") } - } else if Isconst(n.Left, constant.String) { + } else if ir.IsConst(n.Left, constant.String) { n.SetBounded(bounded(r, int64(len(n.Left.StringVal())))) - if base.Flag.LowerM != 0 && n.Bounded() && !Isconst(n.Right, constant.Int) { + if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Right, constant.Int) { base.Warn("index bounds check elided") } if smallintconst(n.Right) && !n.Bounded() { @@ -1063,13 +1064,13 @@ opswitch: } } - if Isconst(n.Right, constant.Int) { - if v := n.Right.Val(); constant.Sign(v) < 0 || doesoverflow(v, types.Types[TINT]) { + if ir.IsConst(n.Right, constant.Int) { + if v := n.Right.Val(); constant.Sign(v) < 0 || doesoverflow(v, types.Types[types.TINT]) { base.Errorf("index out of bounds") } } - case OINDEXMAP: + case ir.OINDEXMAP: // Replace m[k] with *map{access1,assign}(maptype, m, &k) n.Left = walkexpr(n.Left, init) n.Right = walkexpr(n.Right, init) @@ -1082,7 +1083,7 @@ opswitch: if fast == mapslow { // standard version takes key by reference. // order.expr made sure key is addressable. - key = nod(OADDR, key, nil) + key = ir.Nod(ir.OADDR, key, nil) } n = mkcall1(mapfn(mapassign[fast], t), nil, init, typename(t), map_, key) } else { @@ -1091,7 +1092,7 @@ opswitch: if fast == mapslow { // standard version takes key by reference. // order.expr made sure key is addressable. - key = nod(OADDR, key, nil) + key = ir.Nod(ir.OADDR, key, nil) } if w := t.Elem().Width; w <= zeroValSize { @@ -1103,20 +1104,20 @@ opswitch: } n.Type = types.NewPtr(t.Elem()) n.MarkNonNil() // mapaccess1* and mapassign always return non-nil pointers. - n = nod(ODEREF, n, nil) + n = ir.Nod(ir.ODEREF, n, nil) n.Type = t.Elem() n.SetTypecheck(1) - case ORECV: + case ir.ORECV: base.Fatalf("walkexpr ORECV") // should see inside OAS only - case OSLICEHEADER: + case ir.OSLICEHEADER: n.Left = walkexpr(n.Left, init) n.List.SetFirst(walkexpr(n.List.First(), init)) n.List.SetSecond(walkexpr(n.List.Second(), init)) - case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR: - checkSlice := checkPtr(Curfn, 1) && n.Op == OSLICE3ARR && n.Left.Op == OCONVNOP && n.Left.Left.Type.IsUnsafePtr() + case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR: + checkSlice := checkPtr(Curfn, 1) && n.Op == ir.OSLICE3ARR && n.Left.Op == ir.OCONVNOP && n.Left.Left.Type.IsUnsafePtr() if checkSlice { n.Left.Left = walkexpr(n.Left.Left, init) } else { @@ -1135,12 +1136,12 @@ opswitch: n.Left = walkCheckPtrAlignment(n.Left, init, max) } if n.Op.IsSlice3() { - if max != nil && max.Op == OCAP && samesafeexpr(n.Left, max.Left) { + if max != nil && max.Op == ir.OCAP && samesafeexpr(n.Left, max.Left) { // Reduce x[i:j:cap(x)] to x[i:j]. - if n.Op == OSLICE3 { - n.Op = OSLICE + if n.Op == ir.OSLICE3 { + n.Op = ir.OSLICE } else { - n.Op = OSLICEARR + n.Op = ir.OSLICEARR } n = reduceSlice(n) } @@ -1148,7 +1149,7 @@ opswitch: n = reduceSlice(n) } - case ONEW: + case ir.ONEW: if n.Type.Elem().NotInHeap() { base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type.Elem()) } @@ -1157,74 +1158,74 @@ opswitch: base.Fatalf("large ONEW with EscNone: %v", n) } r := temp(n.Type.Elem()) - r = nod(OAS, r, nil) // zero temp + r = ir.Nod(ir.OAS, r, nil) // zero temp r = typecheck(r, ctxStmt) init.Append(r) - r = nod(OADDR, r.Left, nil) + r = ir.Nod(ir.OADDR, r.Left, nil) r = typecheck(r, ctxExpr) n = r } else { n = callnew(n.Type.Elem()) } - case OADDSTR: + case ir.OADDSTR: n = addstr(n, init) - case OAPPEND: + case ir.OAPPEND: // order should make sure we only see OAS(node, OAPPEND), which we handle above. base.Fatalf("append outside assignment") - case OCOPY: + case ir.OCOPY: n = copyany(n, init, instrumenting && !base.Flag.CompilingRuntime) // cannot use chanfn - closechan takes any, not chan any - case OCLOSE: + case ir.OCLOSE: fn := syslook("closechan") fn = substArgTypes(fn, n.Left.Type) n = mkcall1(fn, nil, init, n.Left) - case OMAKECHAN: + case ir.OMAKECHAN: // When size fits into int, use makechan instead of // makechan64, which is faster and shorter on 32 bit platforms. size := n.Left fnname := "makechan64" - argtype := types.Types[TINT64] + argtype := types.Types[types.TINT64] // Type checking guarantees that TIDEAL size is positive and fits in an int. // The case of size overflow when converting TUINT or TUINTPTR to TINT // will be handled by the negative range checks in makechan during runtime. - if size.Type.IsKind(TIDEAL) || size.Type.Size() <= types.Types[TUINT].Size() { + if size.Type.IsKind(types.TIDEAL) || size.Type.Size() <= types.Types[types.TUINT].Size() { fnname = "makechan" - argtype = types.Types[TINT] + argtype = types.Types[types.TINT] } n = mkcall1(chanfn(fnname, 1, n.Type), n.Type, init, typename(n.Type), conv(size, argtype)) - case OMAKEMAP: + case ir.OMAKEMAP: t := n.Type hmapType := hmap(t) hint := n.Left // var h *hmap - var h *Node + var h *ir.Node if n.Esc == EscNone { // Allocate hmap on stack. // var hv hmap hv := temp(hmapType) - zero := nod(OAS, hv, nil) + zero := ir.Nod(ir.OAS, hv, nil) zero = typecheck(zero, ctxStmt) init.Append(zero) // h = &hv - h = nod(OADDR, hv, nil) + h = ir.Nod(ir.OADDR, hv, nil) // Allocate one bucket pointed to by hmap.buckets on stack if hint // is not larger than BUCKETSIZE. In case hint is larger than // BUCKETSIZE runtime.makemap will allocate the buckets on the heap. // Maximum key and elem size is 128 bytes, larger objects // are stored with an indirection. So max bucket size is 2048+eps. - if !Isconst(hint, constant.Int) || + if !ir.IsConst(hint, constant.Int) || constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(BUCKETSIZE)) { // In case hint is larger than BUCKETSIZE runtime.makemap @@ -1236,20 +1237,20 @@ opswitch: // h.buckets = b // } - nif := nod(OIF, nod(OLE, hint, nodintconst(BUCKETSIZE)), nil) + nif := ir.Nod(ir.OIF, ir.Nod(ir.OLE, hint, nodintconst(BUCKETSIZE)), nil) nif.SetLikely(true) // var bv bmap bv := temp(bmap(t)) - zero = nod(OAS, bv, nil) + zero = ir.Nod(ir.OAS, bv, nil) nif.Nbody.Append(zero) // b = &bv - b := nod(OADDR, bv, nil) + b := ir.Nod(ir.OADDR, bv, nil) // h.buckets = b bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap - na := nod(OAS, nodSym(ODOT, h, bsym), b) + na := ir.Nod(ir.OAS, nodSym(ir.ODOT, h, bsym), b) nif.Nbody.Append(na) nif = typecheck(nif, ctxStmt) @@ -1258,7 +1259,7 @@ opswitch: } } - if Isconst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(BUCKETSIZE)) { + if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(BUCKETSIZE)) { // Handling make(map[any]any) and // make(map[any]any, hint) where hint <= BUCKETSIZE // special allows for faster map initialization and @@ -1270,9 +1271,9 @@ opswitch: // Only need to initialize h.hash0 since // hmap h has been allocated on the stack already. // h.hash0 = fastrand() - rand := mkcall("fastrand", types.Types[TUINT32], init) + rand := mkcall("fastrand", types.Types[types.TUINT32], init) hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap - a := nod(OAS, nodSym(ODOT, h, hashsym), rand) + a := ir.Nod(ir.OAS, nodSym(ir.ODOT, h, hashsym), rand) a = typecheck(a, ctxStmt) a = walkexpr(a, init) init.Append(a) @@ -1296,15 +1297,15 @@ opswitch: // When hint fits into int, use makemap instead of // makemap64, which is faster and shorter on 32 bit platforms. fnname := "makemap64" - argtype := types.Types[TINT64] + argtype := types.Types[types.TINT64] // Type checking guarantees that TIDEAL hint is positive and fits in an int. // See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function. // The case of hint overflow when converting TUINT or TUINTPTR to TINT // will be handled by the negative range checks in makemap during runtime. - if hint.Type.IsKind(TIDEAL) || hint.Type.Size() <= types.Types[TUINT].Size() { + if hint.Type.IsKind(types.TIDEAL) || hint.Type.Size() <= types.Types[types.TUINT].Size() { fnname = "makemap" - argtype = types.Types[TINT] + argtype = types.Types[types.TINT] } fn := syslook(fnname) @@ -1312,7 +1313,7 @@ opswitch: n = mkcall1(fn, n.Type, init, typename(n.Type), conv(hint, argtype), h) } - case OMAKESLICE: + case ir.OMAKESLICE: l := n.Left r := n.Right if r == nil { @@ -1341,8 +1342,8 @@ opswitch: // if len < 0 { panicmakeslicelen() } // panicmakeslicecap() // } - nif := nod(OIF, nod(OGT, conv(l, types.Types[TUINT64]), nodintconst(i)), nil) - niflen := nod(OIF, nod(OLT, l, nodintconst(0)), nil) + nif := ir.Nod(ir.OIF, ir.Nod(ir.OGT, conv(l, types.Types[types.TUINT64]), nodintconst(i)), nil) + niflen := ir.Nod(ir.OIF, ir.Nod(ir.OLT, l, nodintconst(0)), nil) niflen.Nbody.Set1(mkcall("panicmakeslicelen", nil, init)) nif.Nbody.Append(niflen, mkcall("panicmakeslicecap", nil, init)) nif = typecheck(nif, ctxStmt) @@ -1350,10 +1351,10 @@ opswitch: t = types.NewArray(t.Elem(), i) // [r]T var_ := temp(t) - a := nod(OAS, var_, nil) // zero temp + a := ir.Nod(ir.OAS, var_, nil) // zero temp a = typecheck(a, ctxStmt) init.Append(a) - r := nod(OSLICE, var_, nil) // arr[:l] + r := ir.Nod(ir.OSLICE, var_, nil) // arr[:l] r.SetSliceBounds(nil, l, nil) r = conv(r, n.Type) // in case n.Type is named. r = typecheck(r, ctxExpr) @@ -1367,31 +1368,31 @@ opswitch: len, cap := l, r fnname := "makeslice64" - argtype := types.Types[TINT64] + argtype := types.Types[types.TINT64] // Type checking guarantees that TIDEAL len/cap are positive and fit in an int. // The case of len or cap overflow when converting TUINT or TUINTPTR to TINT // will be handled by the negative range checks in makeslice during runtime. - if (len.Type.IsKind(TIDEAL) || len.Type.Size() <= types.Types[TUINT].Size()) && - (cap.Type.IsKind(TIDEAL) || cap.Type.Size() <= types.Types[TUINT].Size()) { + if (len.Type.IsKind(types.TIDEAL) || len.Type.Size() <= types.Types[types.TUINT].Size()) && + (cap.Type.IsKind(types.TIDEAL) || cap.Type.Size() <= types.Types[types.TUINT].Size()) { fnname = "makeslice" - argtype = types.Types[TINT] + argtype = types.Types[types.TINT] } - m := nod(OSLICEHEADER, nil, nil) + m := ir.Nod(ir.OSLICEHEADER, nil, nil) m.Type = t fn := syslook(fnname) - m.Left = mkcall1(fn, types.Types[TUNSAFEPTR], init, typename(t.Elem()), conv(len, argtype), conv(cap, argtype)) + m.Left = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), conv(len, argtype), conv(cap, argtype)) m.Left.MarkNonNil() - m.List.Set2(conv(len, types.Types[TINT]), conv(cap, types.Types[TINT])) + m.List.Set2(conv(len, types.Types[types.TINT]), conv(cap, types.Types[types.TINT])) m = typecheck(m, ctxExpr) m = walkexpr(m, init) n = m } - case OMAKESLICECOPY: + case ir.OMAKESLICECOPY: if n.Esc == EscNone { base.Fatalf("OMAKESLICECOPY with EscNone: %v", n) } @@ -1401,9 +1402,9 @@ opswitch: base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem()) } - length := conv(n.Left, types.Types[TINT]) - copylen := nod(OLEN, n.Right, nil) - copyptr := nod(OSPTR, n.Right, nil) + length := conv(n.Left, types.Types[types.TINT]) + copylen := ir.Nod(ir.OLEN, n.Right, nil) + copyptr := ir.Nod(ir.OSPTR, n.Right, nil) if !t.Elem().HasPointers() && n.Bounded() { // When len(to)==len(from) and elements have no pointers: @@ -1412,25 +1413,25 @@ opswitch: // We do not check for overflow of len(to)*elem.Width here // since len(from) is an existing checked slice capacity // with same elem.Width for the from slice. - size := nod(OMUL, conv(length, types.Types[TUINTPTR]), conv(nodintconst(t.Elem().Width), types.Types[TUINTPTR])) + size := ir.Nod(ir.OMUL, conv(length, types.Types[types.TUINTPTR]), conv(nodintconst(t.Elem().Width), types.Types[types.TUINTPTR])) // instantiate mallocgc(size uintptr, typ *byte, needszero bool) unsafe.Pointer fn := syslook("mallocgc") - sh := nod(OSLICEHEADER, nil, nil) - sh.Left = mkcall1(fn, types.Types[TUNSAFEPTR], init, size, nodnil(), nodbool(false)) + sh := ir.Nod(ir.OSLICEHEADER, nil, nil) + sh.Left = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, nodnil(), nodbool(false)) sh.Left.MarkNonNil() sh.List.Set2(length, length) sh.Type = t s := temp(t) - r := typecheck(nod(OAS, s, sh), ctxStmt) + r := typecheck(ir.Nod(ir.OAS, s, sh), ctxStmt) r = walkexpr(r, init) init.Append(r) // instantiate memmove(to *any, frm *any, size uintptr) fn = syslook("memmove") fn = substArgTypes(fn, t.Elem(), t.Elem()) - ncopy := mkcall1(fn, nil, init, nod(OSPTR, s, nil), copyptr, size) + ncopy := mkcall1(fn, nil, init, ir.Nod(ir.OSPTR, s, nil), copyptr, size) ncopy = typecheck(ncopy, ctxStmt) ncopy = walkexpr(ncopy, init) init.Append(ncopy) @@ -1439,8 +1440,8 @@ opswitch: } else { // Replace make+copy with runtime.makeslicecopy. // instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer fn := syslook("makeslicecopy") - s := nod(OSLICEHEADER, nil, nil) - s.Left = mkcall1(fn, types.Types[TUNSAFEPTR], init, typename(t.Elem()), length, copylen, conv(copyptr, types.Types[TUNSAFEPTR])) + s := ir.Nod(ir.OSLICEHEADER, nil, nil) + s.Left = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), length, copylen, conv(copyptr, types.Types[types.TUNSAFEPTR])) s.Left.MarkNonNil() s.List.Set2(length, length) s.Type = t @@ -1448,33 +1449,33 @@ opswitch: n = walkexpr(n, init) } - case ORUNESTR: + case ir.ORUNESTR: a := nodnil() if n.Esc == EscNone { - t := types.NewArray(types.Types[TUINT8], 4) - a = nod(OADDR, temp(t), nil) + t := types.NewArray(types.Types[types.TUINT8], 4) + a = ir.Nod(ir.OADDR, temp(t), nil) } // intstring(*[4]byte, rune) - n = mkcall("intstring", n.Type, init, a, conv(n.Left, types.Types[TINT64])) + n = mkcall("intstring", n.Type, init, a, conv(n.Left, types.Types[types.TINT64])) - case OBYTES2STR, ORUNES2STR: + case ir.OBYTES2STR, ir.ORUNES2STR: a := nodnil() if n.Esc == EscNone { // Create temporary buffer for string on stack. - t := types.NewArray(types.Types[TUINT8], tmpstringbufsize) - a = nod(OADDR, temp(t), nil) + t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize) + a = ir.Nod(ir.OADDR, temp(t), nil) } - if n.Op == ORUNES2STR { + if n.Op == ir.ORUNES2STR { // slicerunetostring(*[32]byte, []rune) string n = mkcall("slicerunetostring", n.Type, init, a, n.Left) } else { // slicebytetostring(*[32]byte, ptr *byte, n int) string n.Left = cheapexpr(n.Left, init) - ptr, len := n.Left.backingArrayPtrLen() + ptr, len := backingArrayPtrLen(n.Left) n = mkcall("slicebytetostring", n.Type, init, a, ptr, len) } - case OBYTES2STRTMP: + case ir.OBYTES2STRTMP: n.Left = walkexpr(n.Left, init) if !instrumenting { // Let the backend handle OBYTES2STRTMP directly @@ -1483,37 +1484,37 @@ opswitch: } // slicebytetostringtmp(ptr *byte, n int) string n.Left = cheapexpr(n.Left, init) - ptr, len := n.Left.backingArrayPtrLen() + ptr, len := backingArrayPtrLen(n.Left) n = mkcall("slicebytetostringtmp", n.Type, init, ptr, len) - case OSTR2BYTES: + case ir.OSTR2BYTES: s := n.Left - if Isconst(s, constant.String) { + if ir.IsConst(s, constant.String) { sc := s.StringVal() // Allocate a [n]byte of the right size. - t := types.NewArray(types.Types[TUINT8], int64(len(sc))) - var a *Node + t := types.NewArray(types.Types[types.TUINT8], int64(len(sc))) + var a *ir.Node if n.Esc == EscNone && len(sc) <= int(maxImplicitStackVarSize) { - a = nod(OADDR, temp(t), nil) + a = ir.Nod(ir.OADDR, temp(t), nil) } else { a = callnew(t) } p := temp(t.PtrTo()) // *[n]byte - init.Append(typecheck(nod(OAS, p, a), ctxStmt)) + init.Append(typecheck(ir.Nod(ir.OAS, p, a), ctxStmt)) // Copy from the static string data to the [n]byte. if len(sc) > 0 { - as := nod(OAS, - nod(ODEREF, p, nil), - nod(ODEREF, convnop(nod(OSPTR, s, nil), t.PtrTo()), nil)) + as := ir.Nod(ir.OAS, + ir.Nod(ir.ODEREF, p, nil), + ir.Nod(ir.ODEREF, convnop(ir.Nod(ir.OSPTR, s, nil), t.PtrTo()), nil)) as = typecheck(as, ctxStmt) as = walkstmt(as) init.Append(as) } // Slice the [n]byte to a []byte. - n.Op = OSLICEARR + n.Op = ir.OSLICEARR n.Left = p n = walkexpr(n, init) break @@ -1522,13 +1523,13 @@ opswitch: a := nodnil() if n.Esc == EscNone { // Create temporary buffer for slice on stack. - t := types.NewArray(types.Types[TUINT8], tmpstringbufsize) - a = nod(OADDR, temp(t), nil) + t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize) + a = ir.Nod(ir.OADDR, temp(t), nil) } // stringtoslicebyte(*32[byte], string) []byte - n = mkcall("stringtoslicebyte", n.Type, init, a, conv(s, types.Types[TSTRING])) + n = mkcall("stringtoslicebyte", n.Type, init, a, conv(s, types.Types[types.TSTRING])) - case OSTR2BYTESTMP: + case ir.OSTR2BYTESTMP: // []byte(string) conversion that creates a slice // referring to the actual string bytes. // This conversion is handled later by the backend and @@ -1538,17 +1539,17 @@ opswitch: // for i, c := range []byte(string) n.Left = walkexpr(n.Left, init) - case OSTR2RUNES: + case ir.OSTR2RUNES: a := nodnil() if n.Esc == EscNone { // Create temporary buffer for slice on stack. - t := types.NewArray(types.Types[TINT32], tmpstringbufsize) - a = nod(OADDR, temp(t), nil) + t := types.NewArray(types.Types[types.TINT32], tmpstringbufsize) + a = ir.Nod(ir.OADDR, temp(t), nil) } // stringtoslicerune(*[32]rune, string) []rune - n = mkcall("stringtoslicerune", n.Type, init, a, conv(n.Left, types.Types[TSTRING])) + n = mkcall("stringtoslicerune", n.Type, init, a, conv(n.Left, types.Types[types.TSTRING])) - case OARRAYLIT, OSLICELIT, OMAPLIT, OSTRUCTLIT, OPTRLIT: + case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT, ir.OPTRLIT: if isStaticCompositeLiteral(n) && !canSSAType(n.Type) { // n can be directly represented in the read-only data section. // Make direct reference to the static data. See issue 12841. @@ -1562,17 +1563,17 @@ opswitch: anylit(n, var_, init) n = var_ - case OSEND: + case ir.OSEND: n1 := n.Right n1 = assignconv(n1, n.Left.Type.Elem(), "chan send") n1 = walkexpr(n1, init) - n1 = nod(OADDR, n1, nil) + n1 = ir.Nod(ir.OADDR, n1, nil) n = mkcall1(chanfn("chansend1", 2, n.Left.Type), nil, init, n.Left, n1) - case OCLOSURE: + case ir.OCLOSURE: n = walkclosure(n, init) - case OCALLPART: + case ir.OCALLPART: n = walkpartialcall(n, init) } @@ -1586,7 +1587,7 @@ opswitch: if n.Type != t { base.Fatalf("evconst changed Type: %v had type %v, now %v", n, t, n.Type) } - if n.Op == OLITERAL { + if n.Op == ir.OLITERAL { n = typecheck(n, ctxExpr) // Emit string symbol now to avoid emitting // any concurrently during the backend. @@ -1598,7 +1599,7 @@ opswitch: updateHasCall(n) if base.Flag.LowerW != 0 && n != nil { - Dump("after walk expr", n) + ir.Dump("after walk expr", n) } base.Pos = lno @@ -1618,10 +1619,10 @@ func markTypeUsedInInterface(t *types.Type, from *obj.LSym) { // markUsedIfaceMethod marks that an interface method is used in the current // function. n is OCALLINTER node. -func markUsedIfaceMethod(n *Node) { +func markUsedIfaceMethod(n *ir.Node) { ityp := n.Left.Left.Type tsym := typenamesym(ityp).Linksym() - r := obj.Addrel(Curfn.Func.lsym) + r := obj.Addrel(Curfn.Func.LSym) r.Sym = tsym // n.Left.Xoffset is the method index * Widthptr (the offset of code pointer // in itab). @@ -1637,54 +1638,54 @@ func markUsedIfaceMethod(n *Node) { // If no such function is necessary, it returns (Txxx, Txxx). func rtconvfn(src, dst *types.Type) (param, result types.EType) { if thearch.SoftFloat { - return Txxx, Txxx + return types.Txxx, types.Txxx } switch thearch.LinkArch.Family { case sys.ARM, sys.MIPS: if src.IsFloat() { switch dst.Etype { - case TINT64, TUINT64: - return TFLOAT64, dst.Etype + case types.TINT64, types.TUINT64: + return types.TFLOAT64, dst.Etype } } if dst.IsFloat() { switch src.Etype { - case TINT64, TUINT64: - return src.Etype, TFLOAT64 + case types.TINT64, types.TUINT64: + return src.Etype, types.TFLOAT64 } } case sys.I386: if src.IsFloat() { switch dst.Etype { - case TINT64, TUINT64: - return TFLOAT64, dst.Etype - case TUINT32, TUINT, TUINTPTR: - return TFLOAT64, TUINT32 + case types.TINT64, types.TUINT64: + return types.TFLOAT64, dst.Etype + case types.TUINT32, types.TUINT, types.TUINTPTR: + return types.TFLOAT64, types.TUINT32 } } if dst.IsFloat() { switch src.Etype { - case TINT64, TUINT64: - return src.Etype, TFLOAT64 - case TUINT32, TUINT, TUINTPTR: - return TUINT32, TFLOAT64 + case types.TINT64, types.TUINT64: + return src.Etype, types.TFLOAT64 + case types.TUINT32, types.TUINT, types.TUINTPTR: + return types.TUINT32, types.TFLOAT64 } } } - return Txxx, Txxx + return types.Txxx, types.Txxx } // TODO(josharian): combine this with its caller and simplify -func reduceSlice(n *Node) *Node { +func reduceSlice(n *ir.Node) *ir.Node { low, high, max := n.SliceBounds() - if high != nil && high.Op == OLEN && samesafeexpr(n.Left, high.Left) { + if high != nil && high.Op == ir.OLEN && samesafeexpr(n.Left, high.Left) { // Reduce x[i:len(x)] to x[i:]. high = nil } n.SetSliceBounds(low, high, max) - if (n.Op == OSLICE || n.Op == OSLICESTR) && low == nil && high == nil { + if (n.Op == ir.OSLICE || n.Op == ir.OSLICESTR) && low == nil && high == nil { // Reduce x[:] to x. if base.Debug.Slice > 0 { base.Warn("slice: omit slice operation") @@ -1694,19 +1695,19 @@ func reduceSlice(n *Node) *Node { return n } -func ascompatee1(l *Node, r *Node, init *Nodes) *Node { +func ascompatee1(l *ir.Node, r *ir.Node, init *ir.Nodes) *ir.Node { // convas will turn map assigns into function calls, // making it impossible for reorder3 to work. - n := nod(OAS, l, r) + n := ir.Nod(ir.OAS, l, r) - if l.Op == OINDEXMAP { + if l.Op == ir.OINDEXMAP { return n } return convas(n, init) } -func ascompatee(op Op, nl, nr []*Node, init *Nodes) []*Node { +func ascompatee(op ir.Op, nl, nr []*ir.Node, init *ir.Nodes) []*ir.Node { // check assign expression list to // an expression list. called in // expr-list = expr-list @@ -1719,14 +1720,14 @@ func ascompatee(op Op, nl, nr []*Node, init *Nodes) []*Node { nr[i1] = safeexpr(nr[i1], init) } - var nn []*Node + var nn []*ir.Node i := 0 for ; i < len(nl); i++ { if i >= len(nr) { break } // Do not generate 'x = x' during return. See issue 4014. - if op == ORETURN && samesafeexpr(nl[i], nr[i]) { + if op == ir.ORETURN && samesafeexpr(nl[i], nr[i]) { continue } nn = append(nn, ascompatee1(nl[i], nr[i], init)) @@ -1734,17 +1735,17 @@ func ascompatee(op Op, nl, nr []*Node, init *Nodes) []*Node { // cannot happen: caller checked that lists had same length if i < len(nl) || i < len(nr) { - var nln, nrn Nodes + var nln, nrn ir.Nodes nln.Set(nl) nrn.Set(nr) - base.Fatalf("error in shape across %+v %v %+v / %d %d [%s]", nln, op, nrn, len(nl), len(nr), Curfn.funcname()) + base.Fatalf("error in shape across %+v %v %+v / %d %d [%s]", nln, op, nrn, len(nl), len(nr), ir.FuncName(Curfn)) } return nn } // fncall reports whether assigning an rvalue of type rt to an lvalue l might involve a function call. -func fncall(l *Node, rt *types.Type) bool { - if l.HasCall() || l.Op == OINDEXMAP { +func fncall(l *ir.Node, rt *types.Type) bool { + if l.HasCall() || l.Op == ir.OINDEXMAP { return true } if types.Identical(l.Type, rt) { @@ -1757,14 +1758,14 @@ func fncall(l *Node, rt *types.Type) bool { // check assign type list to // an expression list. called in // expr-list = func() -func ascompatet(nl Nodes, nr *types.Type) []*Node { +func ascompatet(nl ir.Nodes, nr *types.Type) []*ir.Node { if nl.Len() != nr.NumFields() { base.Fatalf("ascompatet: assignment count mismatch: %d = %d", nl.Len(), nr.NumFields()) } - var nn, mm Nodes + var nn, mm ir.Nodes for i, l := range nl.Slice() { - if l.isBlank() { + if ir.IsBlank(l) { continue } r := nr.Field(i) @@ -1774,22 +1775,22 @@ func ascompatet(nl Nodes, nr *types.Type) []*Node { if fncall(l, r.Type) { tmp := temp(r.Type) tmp = typecheck(tmp, ctxExpr) - a := nod(OAS, l, tmp) + a := ir.Nod(ir.OAS, l, tmp) a = convas(a, &mm) mm.Append(a) l = tmp } - res := nod(ORESULT, nil, nil) + res := ir.Nod(ir.ORESULT, nil, nil) res.Xoffset = base.Ctxt.FixedFrameSize() + r.Offset res.Type = r.Type res.SetTypecheck(1) - a := nod(OAS, l, res) + a := ir.Nod(ir.OAS, l, res) a = convas(a, &nn) updateHasCall(a) if a.HasCall() { - Dump("ascompatet ucount", a) + ir.Dump("ascompatet ucount", a) base.Fatalf("ascompatet: too many function calls evaluating parameters") } @@ -1799,13 +1800,13 @@ func ascompatet(nl Nodes, nr *types.Type) []*Node { } // package all the arguments that match a ... T parameter into a []T. -func mkdotargslice(typ *types.Type, args []*Node) *Node { - var n *Node +func mkdotargslice(typ *types.Type, args []*ir.Node) *ir.Node { + var n *ir.Node if len(args) == 0 { n = nodnil() n.Type = typ } else { - n = nod(OCOMPLIT, nil, typenod(typ)) + n = ir.Nod(ir.OCOMPLIT, nil, typenod(typ)) n.List.Append(args...) n.SetImplicit(true) } @@ -1819,7 +1820,7 @@ func mkdotargslice(typ *types.Type, args []*Node) *Node { // fixVariadicCall rewrites calls to variadic functions to use an // explicit ... argument if one is not already present. -func fixVariadicCall(call *Node) { +func fixVariadicCall(call *ir.Node) { fntype := call.Left.Type if !fntype.IsVariadic() || call.IsDDD() { return @@ -1839,7 +1840,7 @@ func fixVariadicCall(call *Node) { call.SetIsDDD(true) } -func walkCall(n *Node, init *Nodes) { +func walkCall(n *ir.Node, init *ir.Nodes) { if n.Rlist.Len() != 0 { return // already walked } @@ -1851,8 +1852,8 @@ func walkCall(n *Node, init *Nodes) { walkexprlist(args, init) // If this is a method call, add the receiver at the beginning of the args. - if n.Op == OCALLMETH { - withRecv := make([]*Node, len(args)+1) + if n.Op == ir.OCALLMETH { + withRecv := make([]*ir.Node, len(args)+1) withRecv[0] = n.Left.Left n.Left.Left = nil copy(withRecv[1:], args) @@ -1863,12 +1864,12 @@ func walkCall(n *Node, init *Nodes) { // store that argument into a temporary variable, // to prevent that calls from clobbering arguments already on the stack. // When instrumenting, all arguments might require function calls. - var tempAssigns []*Node + var tempAssigns []*ir.Node for i, arg := range args { updateHasCall(arg) // Determine param type. var t *types.Type - if n.Op == OCALLMETH { + if n.Op == ir.OCALLMETH { if i == 0 { t = n.Left.Type.Recv().Type } else { @@ -1880,7 +1881,7 @@ func walkCall(n *Node, init *Nodes) { if instrumenting || fncall(arg, t) { // make assignment of fncall to tempAt tmp := temp(t) - a := nod(OAS, tmp, arg) + a := ir.Nod(ir.OAS, tmp, arg) a = convas(a, init) tempAssigns = append(tempAssigns, a) // replace arg with temp @@ -1893,14 +1894,14 @@ func walkCall(n *Node, init *Nodes) { } // generate code for print -func walkprint(nn *Node, init *Nodes) *Node { +func walkprint(nn *ir.Node, init *ir.Nodes) *ir.Node { // Hoist all the argument evaluation up before the lock. walkexprlistcheap(nn.List.Slice(), init) // For println, add " " between elements and "\n" at the end. - if nn.Op == OPRINTN { + if nn.Op == ir.OPRINTN { s := nn.List.Slice() - t := make([]*Node, 0, len(s)*2) + t := make([]*ir.Node, 0, len(s)*2) for i, n := range s { if i != 0 { t = append(t, nodstr(" ")) @@ -1913,10 +1914,10 @@ func walkprint(nn *Node, init *Nodes) *Node { // Collapse runs of constant strings. s := nn.List.Slice() - t := make([]*Node, 0, len(s)) + t := make([]*ir.Node, 0, len(s)) for i := 0; i < len(s); { var strs []string - for i < len(s) && Isconst(s[i], constant.String) { + for i < len(s) && ir.IsConst(s[i], constant.String) { strs = append(strs, s[i].StringVal()) i++ } @@ -1930,73 +1931,73 @@ func walkprint(nn *Node, init *Nodes) *Node { } nn.List.Set(t) - calls := []*Node{mkcall("printlock", nil, init)} + calls := []*ir.Node{mkcall("printlock", nil, init)} for i, n := range nn.List.Slice() { - if n.Op == OLITERAL { + if n.Op == ir.OLITERAL { if n.Type == types.UntypedRune { n = defaultlit(n, types.Runetype) } switch n.Val().Kind() { case constant.Int: - n = defaultlit(n, types.Types[TINT64]) + n = defaultlit(n, types.Types[types.TINT64]) case constant.Float: - n = defaultlit(n, types.Types[TFLOAT64]) + n = defaultlit(n, types.Types[types.TFLOAT64]) } } - if n.Op != OLITERAL && n.Type != nil && n.Type.Etype == TIDEAL { - n = defaultlit(n, types.Types[TINT64]) + if n.Op != ir.OLITERAL && n.Type != nil && n.Type.Etype == types.TIDEAL { + n = defaultlit(n, types.Types[types.TINT64]) } n = defaultlit(n, nil) nn.List.SetIndex(i, n) - if n.Type == nil || n.Type.Etype == TFORW { + if n.Type == nil || n.Type.Etype == types.TFORW { continue } - var on *Node + var on *ir.Node switch n.Type.Etype { - case TINTER: + case types.TINTER: if n.Type.IsEmptyInterface() { on = syslook("printeface") } else { on = syslook("printiface") } on = substArgTypes(on, n.Type) // any-1 - case TPTR: + case types.TPTR: if n.Type.Elem().NotInHeap() { on = syslook("printuintptr") - n = nod(OCONV, n, nil) - n.Type = types.Types[TUNSAFEPTR] - n = nod(OCONV, n, nil) - n.Type = types.Types[TUINTPTR] + n = ir.Nod(ir.OCONV, n, nil) + n.Type = types.Types[types.TUNSAFEPTR] + n = ir.Nod(ir.OCONV, n, nil) + n.Type = types.Types[types.TUINTPTR] break } fallthrough - case TCHAN, TMAP, TFUNC, TUNSAFEPTR: + case types.TCHAN, types.TMAP, types.TFUNC, types.TUNSAFEPTR: on = syslook("printpointer") on = substArgTypes(on, n.Type) // any-1 - case TSLICE: + case types.TSLICE: on = syslook("printslice") on = substArgTypes(on, n.Type) // any-1 - case TUINT, TUINT8, TUINT16, TUINT32, TUINT64, TUINTPTR: + case types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR: if isRuntimePkg(n.Type.Sym.Pkg) && n.Type.Sym.Name == "hex" { on = syslook("printhex") } else { on = syslook("printuint") } - case TINT, TINT8, TINT16, TINT32, TINT64: + case types.TINT, types.TINT8, types.TINT16, types.TINT32, types.TINT64: on = syslook("printint") - case TFLOAT32, TFLOAT64: + case types.TFLOAT32, types.TFLOAT64: on = syslook("printfloat") - case TCOMPLEX64, TCOMPLEX128: + case types.TCOMPLEX64, types.TCOMPLEX128: on = syslook("printcomplex") - case TBOOL: + case types.TBOOL: on = syslook("printbool") - case TSTRING: + case types.TSTRING: cs := "" - if Isconst(n, constant.String) { + if ir.IsConst(n, constant.String) { cs = n.StringVal() } switch cs { @@ -2008,15 +2009,15 @@ func walkprint(nn *Node, init *Nodes) *Node { on = syslook("printstring") } default: - badtype(OPRINT, n.Type, nil) + badtype(ir.OPRINT, n.Type, nil) continue } - r := nod(OCALL, on, nil) + r := ir.Nod(ir.OCALL, on, nil) if params := on.Type.Params().FieldSlice(); len(params) > 0 { t := params[0].Type if !types.Identical(t, n.Type) { - n = nod(OCONV, n, nil) + n = ir.Nod(ir.OCONV, n, nil) n.Type = t } r.List.Append(n) @@ -2029,16 +2030,16 @@ func walkprint(nn *Node, init *Nodes) *Node { typecheckslice(calls, ctxStmt) walkexprlist(calls, init) - r := nod(OEMPTY, nil, nil) + r := ir.Nod(ir.OEMPTY, nil, nil) r = typecheck(r, ctxStmt) r = walkexpr(r, init) r.Ninit.Set(calls) return r } -func callnew(t *types.Type) *Node { +func callnew(t *types.Type) *ir.Node { dowidth(t) - n := nod(ONEWOBJ, typename(t), nil) + n := ir.Nod(ir.ONEWOBJ, typename(t), nil) n.Type = types.NewPtr(t) n.SetTypecheck(1) n.MarkNonNil() @@ -2047,16 +2048,16 @@ func callnew(t *types.Type) *Node { // isReflectHeaderDataField reports whether l is an expression p.Data // where p has type reflect.SliceHeader or reflect.StringHeader. -func isReflectHeaderDataField(l *Node) bool { - if l.Type != types.Types[TUINTPTR] { +func isReflectHeaderDataField(l *ir.Node) bool { + if l.Type != types.Types[types.TUINTPTR] { return false } var tsym *types.Sym switch l.Op { - case ODOT: + case ir.ODOT: tsym = l.Left.Type.Sym - case ODOTPTR: + case ir.ODOTPTR: tsym = l.Left.Type.Elem().Sym default: return false @@ -2068,8 +2069,8 @@ func isReflectHeaderDataField(l *Node) bool { return tsym.Name == "SliceHeader" || tsym.Name == "StringHeader" } -func convas(n *Node, init *Nodes) *Node { - if n.Op != OAS { +func convas(n *ir.Node, init *ir.Nodes) *ir.Node { + if n.Op != ir.OAS { base.Fatalf("convas: not OAS %v", n.Op) } defer updateHasCall(n) @@ -2086,7 +2087,7 @@ func convas(n *Node, init *Nodes) *Node { return n } - if n.Left.isBlank() { + if ir.IsBlank(n.Left) { n.Right = defaultlit(n.Right, nil) return n } @@ -2106,25 +2107,25 @@ func convas(n *Node, init *Nodes) *Node { // be later use of an earlier lvalue. // // function calls have been removed. -func reorder3(all []*Node) []*Node { +func reorder3(all []*ir.Node) []*ir.Node { // If a needed expression may be affected by an // earlier assignment, make an early copy of that // expression and use the copy instead. - var early []*Node + var early []*ir.Node - var mapinit Nodes + var mapinit ir.Nodes for i, n := range all { l := n.Left // Save subexpressions needed on left side. // Drill through non-dereferences. for { - if l.Op == ODOT || l.Op == OPAREN { + if l.Op == ir.ODOT || l.Op == ir.OPAREN { l = l.Left continue } - if l.Op == OINDEX && l.Left.Type.IsArray() { + if l.Op == ir.OINDEX && l.Left.Type.IsArray() { l.Right = reorder3save(l.Right, all, i, &early) l = l.Left continue @@ -2137,17 +2138,17 @@ func reorder3(all []*Node) []*Node { default: base.Fatalf("reorder3 unexpected lvalue %#v", l.Op) - case ONAME: + case ir.ONAME: break - case OINDEX, OINDEXMAP: + case ir.OINDEX, ir.OINDEXMAP: l.Left = reorder3save(l.Left, all, i, &early) l.Right = reorder3save(l.Right, all, i, &early) - if l.Op == OINDEXMAP { + if l.Op == ir.OINDEXMAP { all[i] = convas(all[i], &mapinit) } - case ODEREF, ODOTPTR: + case ir.ODEREF, ir.ODOTPTR: l.Left = reorder3save(l.Left, all, i, &early) } @@ -2165,13 +2166,13 @@ func reorder3(all []*Node) []*Node { // replace *np with that temp. // The result of reorder3save MUST be assigned back to n, e.g. // n.Left = reorder3save(n.Left, all, i, early) -func reorder3save(n *Node, all []*Node, i int, early *[]*Node) *Node { +func reorder3save(n *ir.Node, all []*ir.Node, i int, early *[]*ir.Node) *ir.Node { if !aliased(n, all[:i]) { return n } q := temp(n.Type) - q = nod(OAS, q, n) + q = ir.Nod(ir.OAS, q, n) q = typecheck(q, ctxStmt) *early = append(*early, q) return q.Left @@ -2179,15 +2180,15 @@ func reorder3save(n *Node, all []*Node, i int, early *[]*Node) *Node { // what's the outer value that a write to n affects? // outer value means containing struct or array. -func outervalue(n *Node) *Node { +func outervalue(n *ir.Node) *ir.Node { for { switch n.Op { - case OXDOT: + case ir.OXDOT: base.Fatalf("OXDOT in walk") - case ODOT, OPAREN, OCONVNOP: + case ir.ODOT, ir.OPAREN, ir.OCONVNOP: n = n.Left continue - case OINDEX: + case ir.OINDEX: if n.Left.Type != nil && n.Left.Type.IsArray() { n = n.Left continue @@ -2200,14 +2201,14 @@ func outervalue(n *Node) *Node { // Is it possible that the computation of r might be // affected by assignments in all? -func aliased(r *Node, all []*Node) bool { +func aliased(r *ir.Node, all []*ir.Node) bool { if r == nil { return false } // Treat all fields of a struct as referring to the whole struct. // We could do better but we would have to keep track of the fields. - for r.Op == ODOT { + for r.Op == ir.ODOT { r = r.Left } @@ -2219,12 +2220,12 @@ func aliased(r *Node, all []*Node) bool { memwrite := false for _, as := range all { // We can ignore assignments to blank. - if as.Left.isBlank() { + if ir.IsBlank(as.Left) { continue } l := outervalue(as.Left) - if l.Op != ONAME { + if l.Op != ir.ONAME { memwrite = true continue } @@ -2233,11 +2234,11 @@ func aliased(r *Node, all []*Node) bool { default: base.Fatalf("unexpected class: %v, %v", l, l.Class()) - case PAUTOHEAP, PEXTERN: + case ir.PAUTOHEAP, ir.PEXTERN: memwrite = true continue - case PAUTO, PPARAM, PPARAMOUT: + case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT: if l.Name.Addrtaken() { memwrite = true continue @@ -2274,18 +2275,18 @@ func aliased(r *Node, all []*Node) bool { // does the evaluation of n only refer to variables // whose addresses have not been taken? // (and no other memory) -func varexpr(n *Node) bool { +func varexpr(n *ir.Node) bool { if n == nil { return true } switch n.Op { - case OLITERAL, ONIL: + case ir.OLITERAL, ir.ONIL: return true - case ONAME: + case ir.ONAME: switch n.Class() { - case PAUTO, PPARAM, PPARAMOUT: + case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT: if !n.Name.Addrtaken() { return true } @@ -2293,30 +2294,30 @@ func varexpr(n *Node) bool { return false - case OADD, - OSUB, - OOR, - OXOR, - OMUL, - ODIV, - OMOD, - OLSH, - ORSH, - OAND, - OANDNOT, - OPLUS, - ONEG, - OBITNOT, - OPAREN, - OANDAND, - OOROR, - OCONV, - OCONVNOP, - OCONVIFACE, - ODOTTYPE: + case ir.OADD, + ir.OSUB, + ir.OOR, + ir.OXOR, + ir.OMUL, + ir.ODIV, + ir.OMOD, + ir.OLSH, + ir.ORSH, + ir.OAND, + ir.OANDNOT, + ir.OPLUS, + ir.ONEG, + ir.OBITNOT, + ir.OPAREN, + ir.OANDAND, + ir.OOROR, + ir.OCONV, + ir.OCONVNOP, + ir.OCONVIFACE, + ir.ODOTTYPE: return varexpr(n.Left) && varexpr(n.Right) - case ODOT: // but not ODOTPTR + case ir.ODOT: // but not ODOTPTR // Should have been handled in aliased. base.Fatalf("varexpr unexpected ODOT") } @@ -2326,16 +2327,16 @@ func varexpr(n *Node) bool { } // is the name l mentioned in r? -func vmatch2(l *Node, r *Node) bool { +func vmatch2(l *ir.Node, r *ir.Node) bool { if r == nil { return false } switch r.Op { // match each right given left - case ONAME: + case ir.ONAME: return l == r - case OLITERAL, ONIL: + case ir.OLITERAL, ir.ONIL: return false } @@ -2355,15 +2356,15 @@ func vmatch2(l *Node, r *Node) bool { // is any name mentioned in l also mentioned in r? // called by sinit.go -func vmatch1(l *Node, r *Node) bool { +func vmatch1(l *ir.Node, r *ir.Node) bool { // isolate all left sides if l == nil || r == nil { return false } switch l.Op { - case ONAME: + case ir.ONAME: switch l.Class() { - case PPARAM, PAUTO: + case ir.PPARAM, ir.PAUTO: break default: @@ -2376,7 +2377,7 @@ func vmatch1(l *Node, r *Node) bool { return vmatch2(l, r) - case OLITERAL, ONIL: + case ir.OLITERAL, ir.ONIL: return false } @@ -2396,10 +2397,10 @@ func vmatch1(l *Node, r *Node) bool { // paramstoheap returns code to allocate memory for heap-escaped parameters // and to copy non-result parameters' values from the stack. -func paramstoheap(params *types.Type) []*Node { - var nn []*Node +func paramstoheap(params *types.Type) []*ir.Node { + var nn []*ir.Node for _, t := range params.Fields().Slice() { - v := asNode(t.Nname) + v := ir.AsNode(t.Nname) if v != nil && v.Sym != nil && strings.HasPrefix(v.Sym.Name, "~r") { // unnamed result v = nil } @@ -2408,9 +2409,9 @@ func paramstoheap(params *types.Type) []*Node { } if stackcopy := v.Name.Param.Stackcopy; stackcopy != nil { - nn = append(nn, walkstmt(nod(ODCL, v, nil))) - if stackcopy.Class() == PPARAM { - nn = append(nn, walkstmt(typecheck(nod(OAS, v, stackcopy), ctxStmt))) + nn = append(nn, walkstmt(ir.Nod(ir.ODCL, v, nil))) + if stackcopy.Class() == ir.PPARAM { + nn = append(nn, walkstmt(typecheck(ir.Nod(ir.OAS, v, stackcopy), ctxStmt))) } } } @@ -2427,14 +2428,14 @@ func paramstoheap(params *types.Type) []*Node { // The generated code is added to Curfn's Enter list. func zeroResults() { for _, f := range Curfn.Type.Results().Fields().Slice() { - v := asNode(f.Nname) + v := ir.AsNode(f.Nname) if v != nil && v.Name.Param.Heapaddr != nil { // The local which points to the return value is the // thing that needs zeroing. This is already handled // by a Needzero annotation in plive.go:livenessepilogue. continue } - if v.isParamHeapCopy() { + if isParamHeapCopy(v) { // TODO(josharian/khr): Investigate whether we can switch to "continue" here, // and document more in either case. // In the review of CL 114797, Keith wrote (roughly): @@ -2444,21 +2445,21 @@ func zeroResults() { v = v.Name.Param.Stackcopy } // Zero the stack location containing f. - Curfn.Func.Enter.Append(nodl(Curfn.Pos, OAS, v, nil)) + Curfn.Func.Enter.Append(ir.NodAt(Curfn.Pos, ir.OAS, v, nil)) } } // returnsfromheap returns code to copy values for heap-escaped parameters // back to the stack. -func returnsfromheap(params *types.Type) []*Node { - var nn []*Node +func returnsfromheap(params *types.Type) []*ir.Node { + var nn []*ir.Node for _, t := range params.Fields().Slice() { - v := asNode(t.Nname) + v := ir.AsNode(t.Nname) if v == nil { continue } - if stackcopy := v.Name.Param.Stackcopy; stackcopy != nil && stackcopy.Class() == PPARAMOUT { - nn = append(nn, walkstmt(typecheck(nod(OAS, stackcopy, v), ctxStmt))) + if stackcopy := v.Name.Param.Stackcopy; stackcopy != nil && stackcopy.Class() == ir.PPARAMOUT { + nn = append(nn, walkstmt(typecheck(ir.Nod(ir.OAS, stackcopy, v), ctxStmt))) } } @@ -2480,8 +2481,8 @@ func heapmoves() { base.Pos = lno } -func vmkcall(fn *Node, t *types.Type, init *Nodes, va []*Node) *Node { - if fn.Type == nil || fn.Type.Etype != TFUNC { +func vmkcall(fn *ir.Node, t *types.Type, init *ir.Nodes, va []*ir.Node) *ir.Node { + if fn.Type == nil || fn.Type.Etype != types.TFUNC { base.Fatalf("mkcall %v %v", fn, fn.Type) } @@ -2490,7 +2491,7 @@ func vmkcall(fn *Node, t *types.Type, init *Nodes, va []*Node) *Node { base.Fatalf("vmkcall %v needs %v args got %v", fn, n, len(va)) } - r := nod(OCALL, fn, nil) + r := ir.Nod(ir.OCALL, fn, nil) r.List.Set(va) if fn.Type.NumResults() > 0 { r = typecheck(r, ctxExpr|ctxMultiOK) @@ -2502,19 +2503,19 @@ func vmkcall(fn *Node, t *types.Type, init *Nodes, va []*Node) *Node { return r } -func mkcall(name string, t *types.Type, init *Nodes, args ...*Node) *Node { +func mkcall(name string, t *types.Type, init *ir.Nodes, args ...*ir.Node) *ir.Node { return vmkcall(syslook(name), t, init, args) } -func mkcall1(fn *Node, t *types.Type, init *Nodes, args ...*Node) *Node { +func mkcall1(fn *ir.Node, t *types.Type, init *ir.Nodes, args ...*ir.Node) *ir.Node { return vmkcall(fn, t, init, args) } -func conv(n *Node, t *types.Type) *Node { +func conv(n *ir.Node, t *types.Type) *ir.Node { if types.Identical(n.Type, t) { return n } - n = nod(OCONV, n, nil) + n = ir.Nod(ir.OCONV, n, nil) n.Type = t n = typecheck(n, ctxExpr) return n @@ -2522,11 +2523,11 @@ func conv(n *Node, t *types.Type) *Node { // convnop converts node n to type t using the OCONVNOP op // and typechecks the result with ctxExpr. -func convnop(n *Node, t *types.Type) *Node { +func convnop(n *ir.Node, t *types.Type) *ir.Node { if types.Identical(n.Type, t) { return n } - n = nod(OCONVNOP, n, nil) + n = ir.Nod(ir.OCONVNOP, n, nil) n.Type = t n = typecheck(n, ctxExpr) return n @@ -2535,23 +2536,23 @@ func convnop(n *Node, t *types.Type) *Node { // byteindex converts n, which is byte-sized, to an int used to index into an array. // We cannot use conv, because we allow converting bool to int here, // which is forbidden in user code. -func byteindex(n *Node) *Node { +func byteindex(n *ir.Node) *ir.Node { // We cannot convert from bool to int directly. // While converting from int8 to int is possible, it would yield // the wrong result for negative values. // Reinterpreting the value as an unsigned byte solves both cases. - if !types.Identical(n.Type, types.Types[TUINT8]) { - n = nod(OCONV, n, nil) - n.Type = types.Types[TUINT8] + if !types.Identical(n.Type, types.Types[types.TUINT8]) { + n = ir.Nod(ir.OCONV, n, nil) + n.Type = types.Types[types.TUINT8] n.SetTypecheck(1) } - n = nod(OCONV, n, nil) - n.Type = types.Types[TINT] + n = ir.Nod(ir.OCONV, n, nil) + n.Type = types.Types[types.TINT] n.SetTypecheck(1) return n } -func chanfn(name string, n int, t *types.Type) *Node { +func chanfn(name string, n int, t *types.Type) *ir.Node { if !t.IsChan() { base.Fatalf("chanfn %v", t) } @@ -2567,7 +2568,7 @@ func chanfn(name string, n int, t *types.Type) *Node { return fn } -func mapfn(name string, t *types.Type) *Node { +func mapfn(name string, t *types.Type) *ir.Node { if !t.IsMap() { base.Fatalf("mapfn %v", t) } @@ -2576,7 +2577,7 @@ func mapfn(name string, t *types.Type) *Node { return fn } -func mapfndel(name string, t *types.Type) *Node { +func mapfndel(name string, t *types.Type) *ir.Node { if !t.IsMap() { base.Fatalf("mapfn %v", t) } @@ -2635,13 +2636,13 @@ func mapfast(t *types.Type) int { return mapslow } -func writebarrierfn(name string, l *types.Type, r *types.Type) *Node { +func writebarrierfn(name string, l *types.Type, r *types.Type) *ir.Node { fn := syslook(name) fn = substArgTypes(fn, l, r) return fn } -func addstr(n *Node, init *Nodes) *Node { +func addstr(n *ir.Node, init *ir.Nodes) *ir.Node { // order.expr rewrote OADDSTR to have a list of strings. c := n.List.Len() @@ -2653,7 +2654,7 @@ func addstr(n *Node, init *Nodes) *Node { if n.Esc == EscNone { sz := int64(0) for _, n1 := range n.List.Slice() { - if n1.Op == OLITERAL { + if n1.Op == ir.OLITERAL { sz += int64(len(n1.StringVal())) } } @@ -2661,15 +2662,15 @@ func addstr(n *Node, init *Nodes) *Node { // Don't allocate the buffer if the result won't fit. if sz < tmpstringbufsize { // Create temporary buffer for result string on stack. - t := types.NewArray(types.Types[TUINT8], tmpstringbufsize) - buf = nod(OADDR, temp(t), nil) + t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize) + buf = ir.Nod(ir.OADDR, temp(t), nil) } } // build list of string arguments - args := []*Node{buf} + args := []*ir.Node{buf} for _, n2 := range n.List.Slice() { - args = append(args, conv(n2, types.Types[TSTRING])) + args = append(args, conv(n2, types.Types[types.TSTRING])) } var fn string @@ -2681,18 +2682,18 @@ func addstr(n *Node, init *Nodes) *Node { // large numbers of strings are passed to the runtime as a slice. fn = "concatstrings" - t := types.NewSlice(types.Types[TSTRING]) - slice := nod(OCOMPLIT, nil, typenod(t)) + t := types.NewSlice(types.Types[types.TSTRING]) + slice := ir.Nod(ir.OCOMPLIT, nil, typenod(t)) if prealloc[n] != nil { prealloc[slice] = prealloc[n] } slice.List.Set(args[1:]) // skip buf arg - args = []*Node{buf, slice} + args = []*ir.Node{buf, slice} slice.Esc = EscNone } cat := syslook(fn) - r := nod(OCALL, cat, nil) + r := ir.Nod(ir.OCALL, cat, nil) r.List.Set(args) r = typecheck(r, ctxExpr) r = walkexpr(r, init) @@ -2701,7 +2702,7 @@ func addstr(n *Node, init *Nodes) *Node { return r } -func walkAppendArgs(n *Node, init *Nodes) { +func walkAppendArgs(n *ir.Node, init *ir.Nodes) { walkexprlistsafe(n.List.Slice(), init) // walkexprlistsafe will leave OINDEX (s[n]) alone if both s @@ -2727,7 +2728,7 @@ func walkAppendArgs(n *Node, init *Nodes) { // s // // l2 is allowed to be a string. -func appendslice(n *Node, init *Nodes) *Node { +func appendslice(n *ir.Node, init *ir.Nodes) *ir.Node { walkAppendArgs(n, init) l1 := n.List.First() @@ -2735,82 +2736,82 @@ func appendslice(n *Node, init *Nodes) *Node { l2 = cheapexpr(l2, init) n.List.SetSecond(l2) - var nodes Nodes + var nodes ir.Nodes // var s []T s := temp(l1.Type) - nodes.Append(nod(OAS, s, l1)) // s = l1 + nodes.Append(ir.Nod(ir.OAS, s, l1)) // s = l1 elemtype := s.Type.Elem() // n := len(s) + len(l2) - nn := temp(types.Types[TINT]) - nodes.Append(nod(OAS, nn, nod(OADD, nod(OLEN, s, nil), nod(OLEN, l2, nil)))) + nn := temp(types.Types[types.TINT]) + nodes.Append(ir.Nod(ir.OAS, nn, ir.Nod(ir.OADD, ir.Nod(ir.OLEN, s, nil), ir.Nod(ir.OLEN, l2, nil)))) // if uint(n) > uint(cap(s)) - nif := nod(OIF, nil, nil) - nuint := conv(nn, types.Types[TUINT]) - scapuint := conv(nod(OCAP, s, nil), types.Types[TUINT]) - nif.Left = nod(OGT, nuint, scapuint) + nif := ir.Nod(ir.OIF, nil, nil) + nuint := conv(nn, types.Types[types.TUINT]) + scapuint := conv(ir.Nod(ir.OCAP, s, nil), types.Types[types.TUINT]) + nif.Left = ir.Nod(ir.OGT, nuint, scapuint) // instantiate growslice(typ *type, []any, int) []any fn := syslook("growslice") fn = substArgTypes(fn, elemtype, elemtype) // s = growslice(T, s, n) - nif.Nbody.Set1(nod(OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(elemtype), s, nn))) + nif.Nbody.Set1(ir.Nod(ir.OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(elemtype), s, nn))) nodes.Append(nif) // s = s[:n] - nt := nod(OSLICE, s, nil) + nt := ir.Nod(ir.OSLICE, s, nil) nt.SetSliceBounds(nil, nn, nil) nt.SetBounded(true) - nodes.Append(nod(OAS, s, nt)) + nodes.Append(ir.Nod(ir.OAS, s, nt)) - var ncopy *Node + var ncopy *ir.Node if elemtype.HasPointers() { // copy(s[len(l1):], l2) - nptr1 := nod(OSLICE, s, nil) + nptr1 := ir.Nod(ir.OSLICE, s, nil) nptr1.Type = s.Type - nptr1.SetSliceBounds(nod(OLEN, l1, nil), nil, nil) + nptr1.SetSliceBounds(ir.Nod(ir.OLEN, l1, nil), nil, nil) nptr1 = cheapexpr(nptr1, &nodes) nptr2 := l2 - Curfn.Func.setWBPos(n.Pos) + Curfn.Func.SetWBPos(n.Pos) // instantiate typedslicecopy(typ *type, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int fn := syslook("typedslicecopy") fn = substArgTypes(fn, l1.Type.Elem(), l2.Type.Elem()) - ptr1, len1 := nptr1.backingArrayPtrLen() - ptr2, len2 := nptr2.backingArrayPtrLen() - ncopy = mkcall1(fn, types.Types[TINT], &nodes, typename(elemtype), ptr1, len1, ptr2, len2) + ptr1, len1 := backingArrayPtrLen(nptr1) + ptr2, len2 := backingArrayPtrLen(nptr2) + ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, typename(elemtype), ptr1, len1, ptr2, len2) } else if instrumenting && !base.Flag.CompilingRuntime { // rely on runtime to instrument: // copy(s[len(l1):], l2) // l2 can be a slice or string. - nptr1 := nod(OSLICE, s, nil) + nptr1 := ir.Nod(ir.OSLICE, s, nil) nptr1.Type = s.Type - nptr1.SetSliceBounds(nod(OLEN, l1, nil), nil, nil) + nptr1.SetSliceBounds(ir.Nod(ir.OLEN, l1, nil), nil, nil) nptr1 = cheapexpr(nptr1, &nodes) nptr2 := l2 - ptr1, len1 := nptr1.backingArrayPtrLen() - ptr2, len2 := nptr2.backingArrayPtrLen() + ptr1, len1 := backingArrayPtrLen(nptr1) + ptr2, len2 := backingArrayPtrLen(nptr2) fn := syslook("slicecopy") fn = substArgTypes(fn, ptr1.Type.Elem(), ptr2.Type.Elem()) - ncopy = mkcall1(fn, types.Types[TINT], &nodes, ptr1, len1, ptr2, len2, nodintconst(elemtype.Width)) + ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, ptr1, len1, ptr2, len2, nodintconst(elemtype.Width)) } else { // memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T)) - nptr1 := nod(OINDEX, s, nod(OLEN, l1, nil)) + nptr1 := ir.Nod(ir.OINDEX, s, ir.Nod(ir.OLEN, l1, nil)) nptr1.SetBounded(true) - nptr1 = nod(OADDR, nptr1, nil) + nptr1 = ir.Nod(ir.OADDR, nptr1, nil) - nptr2 := nod(OSPTR, l2, nil) + nptr2 := ir.Nod(ir.OSPTR, l2, nil) - nwid := cheapexpr(conv(nod(OLEN, l2, nil), types.Types[TUINTPTR]), &nodes) - nwid = nod(OMUL, nwid, nodintconst(elemtype.Width)) + nwid := cheapexpr(conv(ir.Nod(ir.OLEN, l2, nil), types.Types[types.TUINTPTR]), &nodes) + nwid = ir.Nod(ir.OMUL, nwid, nodintconst(elemtype.Width)) // instantiate func memmove(to *any, frm *any, length uintptr) fn := syslook("memmove") @@ -2827,7 +2828,7 @@ func appendslice(n *Node, init *Nodes) *Node { // isAppendOfMake reports whether n is of the form append(x , make([]T, y)...). // isAppendOfMake assumes n has already been typechecked. -func isAppendOfMake(n *Node) bool { +func isAppendOfMake(n *ir.Node) bool { if base.Flag.N != 0 || instrumenting { return false } @@ -2836,12 +2837,12 @@ func isAppendOfMake(n *Node) bool { base.Fatalf("missing typecheck: %+v", n) } - if n.Op != OAPPEND || !n.IsDDD() || n.List.Len() != 2 { + if n.Op != ir.OAPPEND || !n.IsDDD() || n.List.Len() != 2 { return false } second := n.List.Second() - if second.Op != OMAKESLICE || second.Right != nil { + if second.Op != ir.OMAKESLICE || second.Right != nil { return false } @@ -2852,7 +2853,7 @@ func isAppendOfMake(n *Node) bool { // The care of overflow of the len argument to make will be handled by an explicit check of int(len) < 0 during runtime. y := second.Left - if !Isconst(y, constant.Int) && y.Type.Size() > types.Types[TUINT].Size() { + if !ir.IsConst(y, constant.Int) && y.Type.Size() > types.Types[types.TUINT].Size() { return false } @@ -2886,11 +2887,11 @@ func isAppendOfMake(n *Node) bool { // } // } // s -func extendslice(n *Node, init *Nodes) *Node { +func extendslice(n *ir.Node, init *ir.Nodes) *ir.Node { // isAppendOfMake made sure all possible positive values of l2 fit into an uint. // The case of l2 overflow when converting from e.g. uint to int is handled by an explicit // check of l2 < 0 at runtime which is generated below. - l2 := conv(n.List.Second().Left, types.Types[TINT]) + l2 := conv(n.List.Second().Left, types.Types[types.TINT]) l2 = typecheck(l2, ctxExpr) n.List.SetSecond(l2) // walkAppendArgs expects l2 in n.List.Second(). @@ -2899,10 +2900,10 @@ func extendslice(n *Node, init *Nodes) *Node { l1 := n.List.First() l2 = n.List.Second() // re-read l2, as it may have been updated by walkAppendArgs - var nodes []*Node + var nodes []*ir.Node // if l2 >= 0 (likely happens), do nothing - nifneg := nod(OIF, nod(OGE, l2, nodintconst(0)), nil) + nifneg := ir.Nod(ir.OIF, ir.Nod(ir.OGE, l2, nodintconst(0)), nil) nifneg.SetLikely(true) // else panicmakeslicelen() @@ -2911,67 +2912,67 @@ func extendslice(n *Node, init *Nodes) *Node { // s := l1 s := temp(l1.Type) - nodes = append(nodes, nod(OAS, s, l1)) + nodes = append(nodes, ir.Nod(ir.OAS, s, l1)) elemtype := s.Type.Elem() // n := len(s) + l2 - nn := temp(types.Types[TINT]) - nodes = append(nodes, nod(OAS, nn, nod(OADD, nod(OLEN, s, nil), l2))) + nn := temp(types.Types[types.TINT]) + nodes = append(nodes, ir.Nod(ir.OAS, nn, ir.Nod(ir.OADD, ir.Nod(ir.OLEN, s, nil), l2))) // if uint(n) > uint(cap(s)) - nuint := conv(nn, types.Types[TUINT]) - capuint := conv(nod(OCAP, s, nil), types.Types[TUINT]) - nif := nod(OIF, nod(OGT, nuint, capuint), nil) + nuint := conv(nn, types.Types[types.TUINT]) + capuint := conv(ir.Nod(ir.OCAP, s, nil), types.Types[types.TUINT]) + nif := ir.Nod(ir.OIF, ir.Nod(ir.OGT, nuint, capuint), nil) // instantiate growslice(typ *type, old []any, newcap int) []any fn := syslook("growslice") fn = substArgTypes(fn, elemtype, elemtype) // s = growslice(T, s, n) - nif.Nbody.Set1(nod(OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(elemtype), s, nn))) + nif.Nbody.Set1(ir.Nod(ir.OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(elemtype), s, nn))) nodes = append(nodes, nif) // s = s[:n] - nt := nod(OSLICE, s, nil) + nt := ir.Nod(ir.OSLICE, s, nil) nt.SetSliceBounds(nil, nn, nil) nt.SetBounded(true) - nodes = append(nodes, nod(OAS, s, nt)) + nodes = append(nodes, ir.Nod(ir.OAS, s, nt)) // lptr := &l1[0] l1ptr := temp(l1.Type.Elem().PtrTo()) - tmp := nod(OSPTR, l1, nil) - nodes = append(nodes, nod(OAS, l1ptr, tmp)) + tmp := ir.Nod(ir.OSPTR, l1, nil) + nodes = append(nodes, ir.Nod(ir.OAS, l1ptr, tmp)) // sptr := &s[0] sptr := temp(elemtype.PtrTo()) - tmp = nod(OSPTR, s, nil) - nodes = append(nodes, nod(OAS, sptr, tmp)) + tmp = ir.Nod(ir.OSPTR, s, nil) + nodes = append(nodes, ir.Nod(ir.OAS, sptr, tmp)) // hp := &s[len(l1)] - hp := nod(OINDEX, s, nod(OLEN, l1, nil)) + hp := ir.Nod(ir.OINDEX, s, ir.Nod(ir.OLEN, l1, nil)) hp.SetBounded(true) - hp = nod(OADDR, hp, nil) - hp = convnop(hp, types.Types[TUNSAFEPTR]) + hp = ir.Nod(ir.OADDR, hp, nil) + hp = convnop(hp, types.Types[types.TUNSAFEPTR]) // hn := l2 * sizeof(elem(s)) - hn := nod(OMUL, l2, nodintconst(elemtype.Width)) - hn = conv(hn, types.Types[TUINTPTR]) + hn := ir.Nod(ir.OMUL, l2, nodintconst(elemtype.Width)) + hn = conv(hn, types.Types[types.TUINTPTR]) clrname := "memclrNoHeapPointers" hasPointers := elemtype.HasPointers() if hasPointers { clrname = "memclrHasPointers" - Curfn.Func.setWBPos(n.Pos) + Curfn.Func.SetWBPos(n.Pos) } - var clr Nodes + var clr ir.Nodes clrfn := mkcall(clrname, nil, &clr, hp, hn) clr.Append(clrfn) if hasPointers { // if l1ptr == sptr - nifclr := nod(OIF, nod(OEQ, l1ptr, sptr), nil) + nifclr := ir.Nod(ir.OIF, ir.Nod(ir.OEQ, l1ptr, sptr), nil) nifclr.Nbody = clr nodes = append(nodes, nifclr) } else { @@ -3005,7 +3006,7 @@ func extendslice(n *Node, init *Nodes) *Node { // ... // } // s -func walkappend(n *Node, init *Nodes, dst *Node) *Node { +func walkappend(n *ir.Node, init *ir.Nodes, dst *ir.Node) *ir.Node { if !samesafeexpr(dst, n.List.First()) { n.List.SetFirst(safeexpr(n.List.First(), init)) n.List.SetFirst(walkexpr(n.List.First(), init)) @@ -3041,39 +3042,39 @@ func walkappend(n *Node, init *Nodes, dst *Node) *Node { return n } - var l []*Node + var l []*ir.Node ns := temp(nsrc.Type) - l = append(l, nod(OAS, ns, nsrc)) // s = src + l = append(l, ir.Nod(ir.OAS, ns, nsrc)) // s = src na := nodintconst(int64(argc)) // const argc - nx := nod(OIF, nil, nil) // if cap(s) - len(s) < argc - nx.Left = nod(OLT, nod(OSUB, nod(OCAP, ns, nil), nod(OLEN, ns, nil)), na) + nx := ir.Nod(ir.OIF, nil, nil) // if cap(s) - len(s) < argc + nx.Left = ir.Nod(ir.OLT, ir.Nod(ir.OSUB, ir.Nod(ir.OCAP, ns, nil), ir.Nod(ir.OLEN, ns, nil)), na) fn := syslook("growslice") // growslice(, old []T, mincap int) (ret []T) fn = substArgTypes(fn, ns.Type.Elem(), ns.Type.Elem()) - nx.Nbody.Set1(nod(OAS, ns, + nx.Nbody.Set1(ir.Nod(ir.OAS, ns, mkcall1(fn, ns.Type, &nx.Ninit, typename(ns.Type.Elem()), ns, - nod(OADD, nod(OLEN, ns, nil), na)))) + ir.Nod(ir.OADD, ir.Nod(ir.OLEN, ns, nil), na)))) l = append(l, nx) - nn := temp(types.Types[TINT]) - l = append(l, nod(OAS, nn, nod(OLEN, ns, nil))) // n = len(s) + nn := temp(types.Types[types.TINT]) + l = append(l, ir.Nod(ir.OAS, nn, ir.Nod(ir.OLEN, ns, nil))) // n = len(s) - nx = nod(OSLICE, ns, nil) // ...s[:n+argc] - nx.SetSliceBounds(nil, nod(OADD, nn, na), nil) + nx = ir.Nod(ir.OSLICE, ns, nil) // ...s[:n+argc] + nx.SetSliceBounds(nil, ir.Nod(ir.OADD, nn, na), nil) nx.SetBounded(true) - l = append(l, nod(OAS, ns, nx)) // s = s[:n+argc] + l = append(l, ir.Nod(ir.OAS, ns, nx)) // s = s[:n+argc] ls = n.List.Slice()[1:] for i, n := range ls { - nx = nod(OINDEX, ns, nn) // s[n] ... + nx = ir.Nod(ir.OINDEX, ns, nn) // s[n] ... nx.SetBounded(true) - l = append(l, nod(OAS, nx, n)) // s[n] = arg + l = append(l, ir.Nod(ir.OAS, nx, n)) // s[n] = arg if i+1 < len(ls) { - l = append(l, nod(OAS, nn, nod(OADD, nn, nodintconst(1)))) // n = n + 1 + l = append(l, ir.Nod(ir.OAS, nn, ir.Nod(ir.OADD, nn, nodintconst(1)))) // n = n + 1 } } @@ -3094,14 +3095,14 @@ func walkappend(n *Node, init *Nodes, dst *Node) *Node { // // Also works if b is a string. // -func copyany(n *Node, init *Nodes, runtimecall bool) *Node { +func copyany(n *ir.Node, init *ir.Nodes, runtimecall bool) *ir.Node { if n.Left.Type.Elem().HasPointers() { - Curfn.Func.setWBPos(n.Pos) + Curfn.Func.SetWBPos(n.Pos) fn := writebarrierfn("typedslicecopy", n.Left.Type.Elem(), n.Right.Type.Elem()) n.Left = cheapexpr(n.Left, init) - ptrL, lenL := n.Left.backingArrayPtrLen() + ptrL, lenL := backingArrayPtrLen(n.Left) n.Right = cheapexpr(n.Right, init) - ptrR, lenR := n.Right.backingArrayPtrLen() + ptrR, lenR := backingArrayPtrLen(n.Right) return mkcall1(fn, n.Type, init, typename(n.Left.Type.Elem()), ptrL, lenL, ptrR, lenR) } @@ -3111,9 +3112,9 @@ func copyany(n *Node, init *Nodes, runtimecall bool) *Node { // n.Right can be a slice or string. n.Left = cheapexpr(n.Left, init) - ptrL, lenL := n.Left.backingArrayPtrLen() + ptrL, lenL := backingArrayPtrLen(n.Left) n.Right = cheapexpr(n.Right, init) - ptrR, lenR := n.Right.backingArrayPtrLen() + ptrR, lenR := backingArrayPtrLen(n.Right) fn := syslook("slicecopy") fn = substArgTypes(fn, ptrL.Type.Elem(), ptrR.Type.Elem()) @@ -3125,36 +3126,36 @@ func copyany(n *Node, init *Nodes, runtimecall bool) *Node { n.Right = walkexpr(n.Right, init) nl := temp(n.Left.Type) nr := temp(n.Right.Type) - var l []*Node - l = append(l, nod(OAS, nl, n.Left)) - l = append(l, nod(OAS, nr, n.Right)) + var l []*ir.Node + l = append(l, ir.Nod(ir.OAS, nl, n.Left)) + l = append(l, ir.Nod(ir.OAS, nr, n.Right)) - nfrm := nod(OSPTR, nr, nil) - nto := nod(OSPTR, nl, nil) + nfrm := ir.Nod(ir.OSPTR, nr, nil) + nto := ir.Nod(ir.OSPTR, nl, nil) - nlen := temp(types.Types[TINT]) + nlen := temp(types.Types[types.TINT]) // n = len(to) - l = append(l, nod(OAS, nlen, nod(OLEN, nl, nil))) + l = append(l, ir.Nod(ir.OAS, nlen, ir.Nod(ir.OLEN, nl, nil))) // if n > len(frm) { n = len(frm) } - nif := nod(OIF, nil, nil) + nif := ir.Nod(ir.OIF, nil, nil) - nif.Left = nod(OGT, nlen, nod(OLEN, nr, nil)) - nif.Nbody.Append(nod(OAS, nlen, nod(OLEN, nr, nil))) + nif.Left = ir.Nod(ir.OGT, nlen, ir.Nod(ir.OLEN, nr, nil)) + nif.Nbody.Append(ir.Nod(ir.OAS, nlen, ir.Nod(ir.OLEN, nr, nil))) l = append(l, nif) // if to.ptr != frm.ptr { memmove( ... ) } - ne := nod(OIF, nod(ONE, nto, nfrm), nil) + ne := ir.Nod(ir.OIF, ir.Nod(ir.ONE, nto, nfrm), nil) ne.SetLikely(true) l = append(l, ne) fn := syslook("memmove") fn = substArgTypes(fn, nl.Type.Elem(), nl.Type.Elem()) - nwid := temp(types.Types[TUINTPTR]) - setwid := nod(OAS, nwid, conv(nlen, types.Types[TUINTPTR])) + nwid := temp(types.Types[types.TUINTPTR]) + setwid := ir.Nod(ir.OAS, nwid, conv(nlen, types.Types[types.TUINTPTR])) ne.Nbody.Append(setwid) - nwid = nod(OMUL, nwid, nodintconst(nl.Type.Elem().Width)) + nwid = ir.Nod(ir.OMUL, nwid, nodintconst(nl.Type.Elem().Width)) call := mkcall1(fn, nil, init, nto, nfrm, nwid) ne.Nbody.Append(call) @@ -3164,7 +3165,7 @@ func copyany(n *Node, init *Nodes, runtimecall bool) *Node { return nlen } -func eqfor(t *types.Type) (n *Node, needsize bool) { +func eqfor(t *types.Type) (n *ir.Node, needsize bool) { // Should only arrive here with large memory or // a struct/array containing a non-memory field/element. // Small memory is handled inline, and single non-memory @@ -3176,13 +3177,13 @@ func eqfor(t *types.Type) (n *Node, needsize bool) { return n, true case ASPECIAL: sym := typesymprefix(".eq", t) - n := newname(sym) + n := NewName(sym) setNodeNameFunc(n) - n.Type = functype(nil, []*Node{ + n.Type = functype(nil, []*ir.Node{ anonfield(types.NewPtr(t)), anonfield(types.NewPtr(t)), - }, []*Node{ - anonfield(types.Types[TBOOL]), + }, []*ir.Node{ + anonfield(types.Types[types.TBOOL]), }) return n, false } @@ -3192,8 +3193,8 @@ func eqfor(t *types.Type) (n *Node, needsize bool) { // The result of walkcompare MUST be assigned back to n, e.g. // n.Left = walkcompare(n.Left, init) -func walkcompare(n *Node, init *Nodes) *Node { - if n.Left.Type.IsInterface() && n.Right.Type.IsInterface() && n.Left.Op != ONIL && n.Right.Op != ONIL { +func walkcompare(n *ir.Node, init *ir.Nodes) *ir.Node { + if n.Left.Type.IsInterface() && n.Right.Type.IsInterface() && n.Left.Op != ir.ONIL && n.Right.Op != ir.ONIL { return walkcompareInterface(n, init) } @@ -3218,31 +3219,31 @@ func walkcompare(n *Node, init *Nodes) *Node { // Handle both == and !=. eq := n.Op - andor := OOROR - if eq == OEQ { - andor = OANDAND + andor := ir.OOROR + if eq == ir.OEQ { + andor = ir.OANDAND } // Check for types equal. // For empty interface, this is: // l.tab == type(r) // For non-empty interface, this is: // l.tab != nil && l.tab._type == type(r) - var eqtype *Node - tab := nod(OITAB, l, nil) + var eqtype *ir.Node + tab := ir.Nod(ir.OITAB, l, nil) rtyp := typename(r.Type) if l.Type.IsEmptyInterface() { - tab.Type = types.NewPtr(types.Types[TUINT8]) + tab.Type = types.NewPtr(types.Types[types.TUINT8]) tab.SetTypecheck(1) - eqtype = nod(eq, tab, rtyp) + eqtype = ir.Nod(eq, tab, rtyp) } else { - nonnil := nod(brcom(eq), nodnil(), tab) - match := nod(eq, itabType(tab), rtyp) - eqtype = nod(andor, nonnil, match) + nonnil := ir.Nod(brcom(eq), nodnil(), tab) + match := ir.Nod(eq, itabType(tab), rtyp) + eqtype = ir.Nod(andor, nonnil, match) } // Check for data equal. - eqdata := nod(eq, ifaceData(n.Pos, l, r.Type), r) + eqdata := ir.Nod(eq, ifaceData(n.Pos, l, r.Type), r) // Put it all together. - expr := nod(andor, eqtype, eqdata) + expr := ir.Nod(andor, eqtype, eqdata) n = finishcompare(n, expr, init) return n } @@ -3272,10 +3273,10 @@ func walkcompare(n *Node, init *Nodes) *Node { // instead, and arrange for the constant // operand to be the first argument. l, r := n.Left, n.Right - if r.Op == OLITERAL { + if r.Op == ir.OLITERAL { l, r = r, l } - constcmp := l.Op == OLITERAL && r.Op != OLITERAL + constcmp := l.Op == ir.OLITERAL && r.Op != ir.OLITERAL var fn string var paramType *types.Type @@ -3285,44 +3286,44 @@ func walkcompare(n *Node, init *Nodes) *Node { if constcmp { fn = "libfuzzerTraceConstCmp1" } - paramType = types.Types[TUINT8] + paramType = types.Types[types.TUINT8] case 2: fn = "libfuzzerTraceCmp2" if constcmp { fn = "libfuzzerTraceConstCmp2" } - paramType = types.Types[TUINT16] + paramType = types.Types[types.TUINT16] case 4: fn = "libfuzzerTraceCmp4" if constcmp { fn = "libfuzzerTraceConstCmp4" } - paramType = types.Types[TUINT32] + paramType = types.Types[types.TUINT32] case 8: fn = "libfuzzerTraceCmp8" if constcmp { fn = "libfuzzerTraceConstCmp8" } - paramType = types.Types[TUINT64] + paramType = types.Types[types.TUINT64] default: base.Fatalf("unexpected integer size %d for %v", t.Size(), t) } init.Append(mkcall(fn, nil, init, tracecmpArg(l, paramType, init), tracecmpArg(r, paramType, init))) } return n - case TARRAY: + case types.TARRAY: // We can compare several elements at once with 2/4/8 byte integer compares inline = t.NumElem() <= 1 || (issimple[t.Elem().Etype] && (t.NumElem() <= 4 || t.Elem().Width*t.NumElem() <= maxcmpsize)) - case TSTRUCT: + case types.TSTRUCT: inline = t.NumComponents(types.IgnoreBlankFields) <= 4 } cmpl := n.Left - for cmpl != nil && cmpl.Op == OCONVNOP { + for cmpl != nil && cmpl.Op == ir.OCONVNOP { cmpl = cmpl.Left } cmpr := n.Right - for cmpr != nil && cmpr.Op == OCONVNOP { + for cmpr != nil && cmpr.Op == ir.OCONVNOP { cmpr = cmpr.Left } @@ -3334,32 +3335,32 @@ func walkcompare(n *Node, init *Nodes) *Node { } fn, needsize := eqfor(t) - call := nod(OCALL, fn, nil) - call.List.Append(nod(OADDR, cmpl, nil)) - call.List.Append(nod(OADDR, cmpr, nil)) + call := ir.Nod(ir.OCALL, fn, nil) + call.List.Append(ir.Nod(ir.OADDR, cmpl, nil)) + call.List.Append(ir.Nod(ir.OADDR, cmpr, nil)) if needsize { call.List.Append(nodintconst(t.Width)) } res := call - if n.Op != OEQ { - res = nod(ONOT, res, nil) + if n.Op != ir.OEQ { + res = ir.Nod(ir.ONOT, res, nil) } n = finishcompare(n, res, init) return n } // inline: build boolean expression comparing element by element - andor := OANDAND - if n.Op == ONE { - andor = OOROR + andor := ir.OANDAND + if n.Op == ir.ONE { + andor = ir.OOROR } - var expr *Node - compare := func(el, er *Node) { - a := nod(n.Op, el, er) + var expr *ir.Node + compare := func(el, er *ir.Node) { + a := ir.Nod(n.Op, el, er) if expr == nil { expr = a } else { - expr = nod(andor, expr, a) + expr = ir.Nod(andor, expr, a) } } cmpl = safeexpr(cmpl, init) @@ -3371,8 +3372,8 @@ func walkcompare(n *Node, init *Nodes) *Node { continue } compare( - nodSym(OXDOT, cmpl, sym), - nodSym(OXDOT, cmpr, sym), + nodSym(ir.OXDOT, cmpl, sym), + nodSym(ir.OXDOT, cmpr, sym), ) } } else { @@ -3385,45 +3386,45 @@ func walkcompare(n *Node, init *Nodes) *Node { var convType *types.Type switch { case remains >= 8 && combine64bit: - convType = types.Types[TINT64] + convType = types.Types[types.TINT64] step = 8 / t.Elem().Width case remains >= 4 && combine32bit: - convType = types.Types[TUINT32] + convType = types.Types[types.TUINT32] step = 4 / t.Elem().Width case remains >= 2 && combine16bit: - convType = types.Types[TUINT16] + convType = types.Types[types.TUINT16] step = 2 / t.Elem().Width default: step = 1 } if step == 1 { compare( - nod(OINDEX, cmpl, nodintconst(i)), - nod(OINDEX, cmpr, nodintconst(i)), + ir.Nod(ir.OINDEX, cmpl, nodintconst(i)), + ir.Nod(ir.OINDEX, cmpr, nodintconst(i)), ) i++ remains -= t.Elem().Width } else { elemType := t.Elem().ToUnsigned() - cmplw := nod(OINDEX, cmpl, nodintconst(i)) + cmplw := ir.Nod(ir.OINDEX, cmpl, nodintconst(i)) cmplw = conv(cmplw, elemType) // convert to unsigned cmplw = conv(cmplw, convType) // widen - cmprw := nod(OINDEX, cmpr, nodintconst(i)) + cmprw := ir.Nod(ir.OINDEX, cmpr, nodintconst(i)) cmprw = conv(cmprw, elemType) cmprw = conv(cmprw, convType) // For code like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ... // ssa will generate a single large load. for offset := int64(1); offset < step; offset++ { - lb := nod(OINDEX, cmpl, nodintconst(i+offset)) + lb := ir.Nod(ir.OINDEX, cmpl, nodintconst(i+offset)) lb = conv(lb, elemType) lb = conv(lb, convType) - lb = nod(OLSH, lb, nodintconst(8*t.Elem().Width*offset)) - cmplw = nod(OOR, cmplw, lb) - rb := nod(OINDEX, cmpr, nodintconst(i+offset)) + lb = ir.Nod(ir.OLSH, lb, nodintconst(8*t.Elem().Width*offset)) + cmplw = ir.Nod(ir.OOR, cmplw, lb) + rb := ir.Nod(ir.OINDEX, cmpr, nodintconst(i+offset)) rb = conv(rb, elemType) rb = conv(rb, convType) - rb = nod(OLSH, rb, nodintconst(8*t.Elem().Width*offset)) - cmprw = nod(OOR, cmprw, rb) + rb = ir.Nod(ir.OLSH, rb, nodintconst(8*t.Elem().Width*offset)) + cmprw = ir.Nod(ir.OOR, cmprw, rb) } compare(cmplw, cmprw) i += step @@ -3432,13 +3433,13 @@ func walkcompare(n *Node, init *Nodes) *Node { } } if expr == nil { - expr = nodbool(n.Op == OEQ) + expr = nodbool(n.Op == ir.OEQ) // We still need to use cmpl and cmpr, in case they contain // an expression which might panic. See issue 23837. t := temp(cmpl.Type) - a1 := nod(OAS, t, cmpl) + a1 := ir.Nod(ir.OAS, t, cmpl) a1 = typecheck(a1, ctxStmt) - a2 := nod(OAS, t, cmpr) + a2 := ir.Nod(ir.OAS, t, cmpr) a2 = typecheck(a2, ctxStmt) init.Append(a1, a2) } @@ -3446,39 +3447,39 @@ func walkcompare(n *Node, init *Nodes) *Node { return n } -func tracecmpArg(n *Node, t *types.Type, init *Nodes) *Node { +func tracecmpArg(n *ir.Node, t *types.Type, init *ir.Nodes) *ir.Node { // Ugly hack to avoid "constant -1 overflows uintptr" errors, etc. - if n.Op == OLITERAL && n.Type.IsSigned() && n.Int64Val() < 0 { + if n.Op == ir.OLITERAL && n.Type.IsSigned() && n.Int64Val() < 0 { n = copyexpr(n, n.Type, init) } return conv(n, t) } -func walkcompareInterface(n *Node, init *Nodes) *Node { +func walkcompareInterface(n *ir.Node, init *ir.Nodes) *ir.Node { n.Right = cheapexpr(n.Right, init) n.Left = cheapexpr(n.Left, init) eqtab, eqdata := eqinterface(n.Left, n.Right) - var cmp *Node - if n.Op == OEQ { - cmp = nod(OANDAND, eqtab, eqdata) + var cmp *ir.Node + if n.Op == ir.OEQ { + cmp = ir.Nod(ir.OANDAND, eqtab, eqdata) } else { - eqtab.Op = ONE - cmp = nod(OOROR, eqtab, nod(ONOT, eqdata, nil)) + eqtab.Op = ir.ONE + cmp = ir.Nod(ir.OOROR, eqtab, ir.Nod(ir.ONOT, eqdata, nil)) } return finishcompare(n, cmp, init) } -func walkcompareString(n *Node, init *Nodes) *Node { +func walkcompareString(n *ir.Node, init *ir.Nodes) *ir.Node { // Rewrite comparisons to short constant strings as length+byte-wise comparisons. - var cs, ncs *Node // const string, non-const string + var cs, ncs *ir.Node // const string, non-const string switch { - case Isconst(n.Left, constant.String) && Isconst(n.Right, constant.String): + case ir.IsConst(n.Left, constant.String) && ir.IsConst(n.Right, constant.String): // ignore; will be constant evaluated - case Isconst(n.Left, constant.String): + case ir.IsConst(n.Left, constant.String): cs = n.Left ncs = n.Right - case Isconst(n.Right, constant.String): + case ir.IsConst(n.Right, constant.String): cs = n.Right ncs = n.Left } @@ -3487,7 +3488,7 @@ func walkcompareString(n *Node, init *Nodes) *Node { // Our comparison below assumes that the non-constant string // is on the left hand side, so rewrite "" cmp x to x cmp "". // See issue 24817. - if Isconst(n.Left, constant.String) { + if ir.IsConst(n.Left, constant.String) { cmp = brrev(cmp) } @@ -3506,12 +3507,12 @@ func walkcompareString(n *Node, init *Nodes) *Node { combine64bit = thearch.LinkArch.RegSize >= 8 } - var and Op + var and ir.Op switch cmp { - case OEQ: - and = OANDAND - case ONE: - and = OOROR + case ir.OEQ: + and = ir.OANDAND + case ir.ONE: + and = ir.OOROR default: // Don't do byte-wise comparisons for <, <=, etc. // They're fairly complicated. @@ -3522,13 +3523,13 @@ func walkcompareString(n *Node, init *Nodes) *Node { if len(s) > 0 { ncs = safeexpr(ncs, init) } - r := nod(cmp, nod(OLEN, ncs, nil), nodintconst(int64(len(s)))) + r := ir.Nod(cmp, ir.Nod(ir.OLEN, ncs, nil), nodintconst(int64(len(s)))) remains := len(s) for i := 0; remains > 0; { if remains == 1 || !canCombineLoads { cb := nodintconst(int64(s[i])) - ncb := nod(OINDEX, ncs, nodintconst(int64(i))) - r = nod(and, r, nod(cmp, ncb, cb)) + ncb := ir.Nod(ir.OINDEX, ncs, nodintconst(int64(i))) + r = ir.Nod(and, r, ir.Nod(cmp, ncb, cb)) remains-- i++ continue @@ -3537,31 +3538,31 @@ func walkcompareString(n *Node, init *Nodes) *Node { var convType *types.Type switch { case remains >= 8 && combine64bit: - convType = types.Types[TINT64] + convType = types.Types[types.TINT64] step = 8 case remains >= 4: - convType = types.Types[TUINT32] + convType = types.Types[types.TUINT32] step = 4 case remains >= 2: - convType = types.Types[TUINT16] + convType = types.Types[types.TUINT16] step = 2 } - ncsubstr := nod(OINDEX, ncs, nodintconst(int64(i))) + ncsubstr := ir.Nod(ir.OINDEX, ncs, nodintconst(int64(i))) ncsubstr = conv(ncsubstr, convType) csubstr := int64(s[i]) // Calculate large constant from bytes as sequence of shifts and ors. // Like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ... // ssa will combine this into a single large load. for offset := 1; offset < step; offset++ { - b := nod(OINDEX, ncs, nodintconst(int64(i+offset))) + b := ir.Nod(ir.OINDEX, ncs, nodintconst(int64(i+offset))) b = conv(b, convType) - b = nod(OLSH, b, nodintconst(int64(8*offset))) - ncsubstr = nod(OOR, ncsubstr, b) + b = ir.Nod(ir.OLSH, b, nodintconst(int64(8*offset))) + ncsubstr = ir.Nod(ir.OOR, ncsubstr, b) csubstr |= int64(s[i+offset]) << uint8(8*offset) } csubstrPart := nodintconst(csubstr) // Compare "step" bytes as once - r = nod(and, r, nod(cmp, csubstrPart, ncsubstr)) + r = ir.Nod(and, r, ir.Nod(cmp, csubstrPart, ncsubstr)) remains -= step i += step } @@ -3569,26 +3570,26 @@ func walkcompareString(n *Node, init *Nodes) *Node { } } - var r *Node - if n.Op == OEQ || n.Op == ONE { + var r *ir.Node + if n.Op == ir.OEQ || n.Op == ir.ONE { // prepare for rewrite below n.Left = cheapexpr(n.Left, init) n.Right = cheapexpr(n.Right, init) eqlen, eqmem := eqstring(n.Left, n.Right) // quick check of len before full compare for == or !=. // memequal then tests equality up to length len. - if n.Op == OEQ { + if n.Op == ir.OEQ { // len(left) == len(right) && memequal(left, right, len) - r = nod(OANDAND, eqlen, eqmem) + r = ir.Nod(ir.OANDAND, eqlen, eqmem) } else { // len(left) != len(right) || !memequal(left, right, len) - eqlen.Op = ONE - r = nod(OOROR, eqlen, nod(ONOT, eqmem, nil)) + eqlen.Op = ir.ONE + r = ir.Nod(ir.OOROR, eqlen, ir.Nod(ir.ONOT, eqmem, nil)) } } else { // sys_cmpstring(s1, s2) :: 0 - r = mkcall("cmpstring", types.Types[TINT], init, conv(n.Left, types.Types[TSTRING]), conv(n.Right, types.Types[TSTRING])) - r = nod(n.Op, r, nodintconst(0)) + r = mkcall("cmpstring", types.Types[types.TINT], init, conv(n.Left, types.Types[types.TSTRING]), conv(n.Right, types.Types[types.TSTRING])) + r = ir.Nod(n.Op, r, nodintconst(0)) } return finishcompare(n, r, init) @@ -3596,7 +3597,7 @@ func walkcompareString(n *Node, init *Nodes) *Node { // The result of finishcompare MUST be assigned back to n, e.g. // n.Left = finishcompare(n.Left, x, r, init) -func finishcompare(n, r *Node, init *Nodes) *Node { +func finishcompare(n, r *ir.Node, init *ir.Nodes) *ir.Node { r = typecheck(r, ctxExpr) r = conv(r, n.Type) r = walkexpr(r, init) @@ -3604,7 +3605,7 @@ func finishcompare(n, r *Node, init *Nodes) *Node { } // return 1 if integer n must be in range [0, max), 0 otherwise -func bounded(n *Node, max int64) bool { +func bounded(n *ir.Node, max int64) bool { if n.Type == nil || !n.Type.IsInteger() { return false } @@ -3618,14 +3619,14 @@ func bounded(n *Node, max int64) bool { } switch n.Op { - case OAND, OANDNOT: + case ir.OAND, ir.OANDNOT: v := int64(-1) switch { case smallintconst(n.Left): v = n.Left.Int64Val() case smallintconst(n.Right): v = n.Right.Int64Val() - if n.Op == OANDNOT { + if n.Op == ir.OANDNOT { v = ^v if !sign { v &= 1< 0 && v >= 2 { @@ -3653,7 +3654,7 @@ func bounded(n *Node, max int64) bool { } } - case ORSH: + case ir.ORSH: if !sign && smallintconst(n.Right) { v := n.Right.Int64Val() if v > int64(bits) { @@ -3671,7 +3672,7 @@ func bounded(n *Node, max int64) bool { } // usemethod checks interface method calls for uses of reflect.Type.Method. -func usemethod(n *Node) { +func usemethod(n *ir.Node) { t := n.Left.Type // Looking for either of: @@ -3694,7 +3695,7 @@ func usemethod(n *Node) { } if res1 == nil { - if p0.Type.Etype != TINT { + if p0.Type.Etype != types.TINT { return } } else { @@ -3712,11 +3713,11 @@ func usemethod(n *Node) { if s := res0.Type.Sym; s != nil && s.Name == "Method" && isReflectPkg(s.Pkg) { Curfn.Func.SetReflectMethod(true) // The LSym is initialized at this point. We need to set the attribute on the LSym. - Curfn.Func.lsym.Set(obj.AttrReflectMethod, true) + Curfn.Func.LSym.Set(obj.AttrReflectMethod, true) } } -func usefield(n *Node) { +func usefield(n *ir.Node) { if objabi.Fieldtrack_enabled == 0 { return } @@ -3725,7 +3726,7 @@ func usefield(n *Node) { default: base.Fatalf("usefield %v", n.Op) - case ODOT, ODOTPTR: + case ir.ODOT, ir.ODOTPTR: break } if n.Sym == nil { @@ -3767,7 +3768,7 @@ func usefield(n *Node) { Curfn.Func.FieldTrack[sym] = struct{}{} } -func candiscardlist(l Nodes) bool { +func candiscardlist(l ir.Nodes) bool { for _, n := range l.Slice() { if !candiscard(n) { return false @@ -3776,7 +3777,7 @@ func candiscardlist(l Nodes) bool { return true } -func candiscard(n *Node) bool { +func candiscard(n *ir.Node) bool { if n == nil { return true } @@ -3786,80 +3787,80 @@ func candiscard(n *Node) bool { return false // Discardable as long as the subpieces are. - case ONAME, - ONONAME, - OTYPE, - OPACK, - OLITERAL, - ONIL, - OADD, - OSUB, - OOR, - OXOR, - OADDSTR, - OADDR, - OANDAND, - OBYTES2STR, - ORUNES2STR, - OSTR2BYTES, - OSTR2RUNES, - OCAP, - OCOMPLIT, - OMAPLIT, - OSTRUCTLIT, - OARRAYLIT, - OSLICELIT, - OPTRLIT, - OCONV, - OCONVIFACE, - OCONVNOP, - ODOT, - OEQ, - ONE, - OLT, - OLE, - OGT, - OGE, - OKEY, - OSTRUCTKEY, - OLEN, - OMUL, - OLSH, - ORSH, - OAND, - OANDNOT, - ONEW, - ONOT, - OBITNOT, - OPLUS, - ONEG, - OOROR, - OPAREN, - ORUNESTR, - OREAL, - OIMAG, - OCOMPLEX: + case ir.ONAME, + ir.ONONAME, + ir.OTYPE, + ir.OPACK, + ir.OLITERAL, + ir.ONIL, + ir.OADD, + ir.OSUB, + ir.OOR, + ir.OXOR, + ir.OADDSTR, + ir.OADDR, + ir.OANDAND, + ir.OBYTES2STR, + ir.ORUNES2STR, + ir.OSTR2BYTES, + ir.OSTR2RUNES, + ir.OCAP, + ir.OCOMPLIT, + ir.OMAPLIT, + ir.OSTRUCTLIT, + ir.OARRAYLIT, + ir.OSLICELIT, + ir.OPTRLIT, + ir.OCONV, + ir.OCONVIFACE, + ir.OCONVNOP, + ir.ODOT, + ir.OEQ, + ir.ONE, + ir.OLT, + ir.OLE, + ir.OGT, + ir.OGE, + ir.OKEY, + ir.OSTRUCTKEY, + ir.OLEN, + ir.OMUL, + ir.OLSH, + ir.ORSH, + ir.OAND, + ir.OANDNOT, + ir.ONEW, + ir.ONOT, + ir.OBITNOT, + ir.OPLUS, + ir.ONEG, + ir.OOROR, + ir.OPAREN, + ir.ORUNESTR, + ir.OREAL, + ir.OIMAG, + ir.OCOMPLEX: break // Discardable as long as we know it's not division by zero. - case ODIV, OMOD: - if n.Right.Op == OLITERAL && constant.Sign(n.Right.Val()) != 0 { + case ir.ODIV, ir.OMOD: + if n.Right.Op == ir.OLITERAL && constant.Sign(n.Right.Val()) != 0 { break } return false // Discardable as long as we know it won't fail because of a bad size. - case OMAKECHAN, OMAKEMAP: - if Isconst(n.Left, constant.Int) && constant.Sign(n.Left.Val()) == 0 { + case ir.OMAKECHAN, ir.OMAKEMAP: + if ir.IsConst(n.Left, constant.Int) && constant.Sign(n.Left.Val()) == 0 { break } return false // Difficult to tell what sizes are okay. - case OMAKESLICE: + case ir.OMAKESLICE: return false - case OMAKESLICECOPY: + case ir.OMAKESLICECOPY: return false } @@ -3890,29 +3891,29 @@ var wrapCall_prgen int // The result of wrapCall MUST be assigned back to n, e.g. // n.Left = wrapCall(n.Left, init) -func wrapCall(n *Node, init *Nodes) *Node { +func wrapCall(n *ir.Node, init *ir.Nodes) *ir.Node { if n.Ninit.Len() != 0 { walkstmtlist(n.Ninit.Slice()) init.AppendNodes(&n.Ninit) } - isBuiltinCall := n.Op != OCALLFUNC && n.Op != OCALLMETH && n.Op != OCALLINTER + isBuiltinCall := n.Op != ir.OCALLFUNC && n.Op != ir.OCALLMETH && n.Op != ir.OCALLINTER // Turn f(a, b, []T{c, d, e}...) back into f(a, b, c, d, e). if !isBuiltinCall && n.IsDDD() { last := n.List.Len() - 1 - if va := n.List.Index(last); va.Op == OSLICELIT { + if va := n.List.Index(last); va.Op == ir.OSLICELIT { n.List.Set(append(n.List.Slice()[:last], va.List.Slice()...)) n.SetIsDDD(false) } } // origArgs keeps track of what argument is uintptr-unsafe/unsafe-uintptr conversion. - origArgs := make([]*Node, n.List.Len()) - t := nod(OTFUNC, nil, nil) + origArgs := make([]*ir.Node, n.List.Len()) + t := ir.Nod(ir.OTFUNC, nil, nil) for i, arg := range n.List.Slice() { s := lookupN("a", i) - if !isBuiltinCall && arg.Op == OCONVNOP && arg.Type.IsUintptr() && arg.Left.Type.IsUnsafePtr() { + if !isBuiltinCall && arg.Op == ir.OCONVNOP && arg.Type.IsUintptr() && arg.Left.Type.IsUnsafePtr() { origArgs[i] = arg arg = arg.Left n.List.SetIndex(i, arg) @@ -3929,13 +3930,13 @@ func wrapCall(n *Node, init *Nodes) *Node { if origArg == nil { continue } - arg := nod(origArg.Op, args[i], nil) + arg := ir.Nod(origArg.Op, args[i], nil) arg.Type = origArg.Type args[i] = arg } - call := nod(n.Op, nil, nil) + call := ir.Nod(n.Op, nil, nil) if !isBuiltinCall { - call.Op = OCALL + call.Op = ir.OCALL call.Left = n.Left call.SetIsDDD(n.IsDDD()) } @@ -3948,7 +3949,7 @@ func wrapCall(n *Node, init *Nodes) *Node { typecheckslice(fn.Nbody.Slice(), ctxStmt) xtop = append(xtop, fn) - call = nod(OCALL, nil, nil) + call = ir.Nod(ir.OCALL, nil, nil) call.Left = fn.Func.Nname call.List.Set(n.List.Slice()) call = typecheck(call, ctxStmt) @@ -3961,8 +3962,8 @@ func wrapCall(n *Node, init *Nodes) *Node { // type syntax expression n.Type. // The result of substArgTypes MUST be assigned back to old, e.g. // n.Left = substArgTypes(n.Left, t1, t2) -func substArgTypes(old *Node, types_ ...*types.Type) *Node { - n := old.copy() +func substArgTypes(old *ir.Node, types_ ...*types.Type) *ir.Node { + n := ir.Copy(old) for _, t := range types_ { dowidth(t) @@ -3991,11 +3992,11 @@ func canMergeLoads() bool { // isRuneCount reports whether n is of the form len([]rune(string)). // These are optimized into a call to runtime.countrunes. -func isRuneCount(n *Node) bool { - return base.Flag.N == 0 && !instrumenting && n.Op == OLEN && n.Left.Op == OSTR2RUNES +func isRuneCount(n *ir.Node) bool { + return base.Flag.N == 0 && !instrumenting && n.Op == ir.OLEN && n.Left.Op == ir.OSTR2RUNES } -func walkCheckPtrAlignment(n *Node, init *Nodes, count *Node) *Node { +func walkCheckPtrAlignment(n *ir.Node, init *ir.Nodes, count *ir.Node) *ir.Node { if !n.Type.IsPtr() { base.Fatalf("expected pointer type: %v", n.Type) } @@ -4017,13 +4018,13 @@ func walkCheckPtrAlignment(n *Node, init *Nodes, count *Node) *Node { } n.Left = cheapexpr(n.Left, init) - init.Append(mkcall("checkptrAlignment", nil, init, convnop(n.Left, types.Types[TUNSAFEPTR]), typename(elem), conv(count, types.Types[TUINTPTR]))) + init.Append(mkcall("checkptrAlignment", nil, init, convnop(n.Left, types.Types[types.TUNSAFEPTR]), typename(elem), conv(count, types.Types[types.TUINTPTR]))) return n } var walkCheckPtrArithmeticMarker byte -func walkCheckPtrArithmetic(n *Node, init *Nodes) *Node { +func walkCheckPtrArithmetic(n *ir.Node, init *ir.Nodes) *ir.Node { // Calling cheapexpr(n, init) below leads to a recursive call // to walkexpr, which leads us back here again. Use n.Opt to // prevent infinite loops. @@ -4040,11 +4041,11 @@ func walkCheckPtrArithmetic(n *Node, init *Nodes) *Node { // TODO(mdempsky): Make stricter. We only need to exempt // reflect.Value.Pointer and reflect.Value.UnsafeAddr. switch n.Left.Op { - case OCALLFUNC, OCALLMETH, OCALLINTER: + case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER: return n } - if n.Left.Op == ODOTPTR && isReflectHeaderDataField(n.Left) { + if n.Left.Op == ir.ODOTPTR && isReflectHeaderDataField(n.Left) { return n } @@ -4054,19 +4055,19 @@ func walkCheckPtrArithmetic(n *Node, init *Nodes) *Node { // "It is valid both to add and to subtract offsets from a // pointer in this way. It is also valid to use &^ to round // pointers, usually for alignment." - var originals []*Node - var walk func(n *Node) - walk = func(n *Node) { + var originals []*ir.Node + var walk func(n *ir.Node) + walk = func(n *ir.Node) { switch n.Op { - case OADD: + case ir.OADD: walk(n.Left) walk(n.Right) - case OSUB, OANDNOT: + case ir.OSUB, ir.OANDNOT: walk(n.Left) - case OCONVNOP: + case ir.OCONVNOP: if n.Left.Type.IsUnsafePtr() { n.Left = cheapexpr(n.Left, init) - originals = append(originals, convnop(n.Left, types.Types[TUNSAFEPTR])) + originals = append(originals, convnop(n.Left, types.Types[types.TUNSAFEPTR])) } } } @@ -4074,10 +4075,10 @@ func walkCheckPtrArithmetic(n *Node, init *Nodes) *Node { n = cheapexpr(n, init) - slice := mkdotargslice(types.NewSlice(types.Types[TUNSAFEPTR]), originals) + slice := mkdotargslice(types.NewSlice(types.Types[types.TUNSAFEPTR]), originals) slice.Esc = EscNone - init.Append(mkcall("checkptrArithmetic", nil, init, convnop(n, types.Types[TUNSAFEPTR]), slice)) + init.Append(mkcall("checkptrArithmetic", nil, init, convnop(n, types.Types[types.TUNSAFEPTR]), slice)) // TODO(khr): Mark backing store of slice as dead. This will allow us to reuse // the backing store for multiple calls to checkptrArithmetic. @@ -4087,6 +4088,6 @@ func walkCheckPtrArithmetic(n *Node, init *Nodes) *Node { // checkPtr reports whether pointer checking should be enabled for // function fn at a given level. See debugHelpFooter for defined // levels. -func checkPtr(fn *Node, level int) bool { - return base.Debug.Checkptr >= level && fn.Func.Pragma&NoCheckPtr == 0 +func checkPtr(fn *ir.Node, level int) bool { + return base.Debug.Checkptr >= level && fn.Func.Pragma&ir.NoCheckPtr == 0 } diff --git a/src/cmd/compile/internal/gc/bitset.go b/src/cmd/compile/internal/ir/bitset.go similarity index 99% rename from src/cmd/compile/internal/gc/bitset.go rename to src/cmd/compile/internal/ir/bitset.go index ed5eea0a11be9..29f136296fd1e 100644 --- a/src/cmd/compile/internal/gc/bitset.go +++ b/src/cmd/compile/internal/ir/bitset.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package ir type bitset8 uint8 diff --git a/src/cmd/compile/internal/gc/class_string.go b/src/cmd/compile/internal/ir/class_string.go similarity index 98% rename from src/cmd/compile/internal/gc/class_string.go rename to src/cmd/compile/internal/ir/class_string.go index a4084a7535209..866bf1a6b5b78 100644 --- a/src/cmd/compile/internal/gc/class_string.go +++ b/src/cmd/compile/internal/ir/class_string.go @@ -1,6 +1,6 @@ // Code generated by "stringer -type=Class"; DO NOT EDIT. -package gc +package ir import "strconv" diff --git a/src/cmd/compile/internal/gc/dump.go b/src/cmd/compile/internal/ir/dump.go similarity index 96% rename from src/cmd/compile/internal/gc/dump.go rename to src/cmd/compile/internal/ir/dump.go index 56dc474465737..9306366e8ad31 100644 --- a/src/cmd/compile/internal/gc/dump.go +++ b/src/cmd/compile/internal/ir/dump.go @@ -6,22 +6,23 @@ // for debugging purposes. The code is customized for Node graphs // and may be used for an alternative view of the node structure. -package gc +package ir import ( - "cmd/compile/internal/base" - "cmd/compile/internal/types" - "cmd/internal/src" "fmt" "io" "os" "reflect" "regexp" + + "cmd/compile/internal/base" + "cmd/compile/internal/types" + "cmd/internal/src" ) // dump is like fdump but prints to stderr. -func dump(root interface{}, filter string, depth int) { - fdump(os.Stderr, root, filter, depth) +func DumpAny(root interface{}, filter string, depth int) { + FDumpAny(os.Stderr, root, filter, depth) } // fdump prints the structure of a rooted data structure @@ -41,7 +42,7 @@ func dump(root interface{}, filter string, depth int) { // rather than their type; struct fields with zero values or // non-matching field names are omitted, and "…" means recursion // depth has been reached or struct fields have been omitted. -func fdump(w io.Writer, root interface{}, filter string, depth int) { +func FDumpAny(w io.Writer, root interface{}, filter string, depth int) { if root == nil { fmt.Fprintln(w, "nil") return @@ -151,7 +152,7 @@ func (p *dumper) dump(x reflect.Value, depth int) { return case *types.Node: - x = reflect.ValueOf(asNode(v)) + x = reflect.ValueOf(AsNode(v)) } switch x.Kind() { diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/ir/fmt.go similarity index 87% rename from src/cmd/compile/internal/gc/fmt.go rename to src/cmd/compile/internal/ir/fmt.go index 9248eb22aa28d..5dea0880fc140 100644 --- a/src/cmd/compile/internal/gc/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -2,13 +2,10 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package ir import ( "bytes" - "cmd/compile/internal/base" - "cmd/compile/internal/types" - "cmd/internal/src" "fmt" "go/constant" "io" @@ -16,6 +13,10 @@ import ( "strings" "sync" "unicode/utf8" + + "cmd/compile/internal/base" + "cmd/compile/internal/types" + "cmd/internal/src" ) // A FmtFlag value is a set of flags (or 0). @@ -98,7 +99,7 @@ func fmtFlag(s fmt.State, verb rune) FmtFlag { // *types.Sym, *types.Type, and *Node types use the flags below to set the format mode const ( - FErr fmtMode = iota + FErr FmtMode = iota FDbg FTypeId FTypeIdName // same as FTypeId, but use package name instead of prefix @@ -131,7 +132,7 @@ const ( // %- v type identifiers with package name instead of prefix (typesym, dcommontype, typehash) // update returns the results of applying f to mode. -func (f FmtFlag) update(mode fmtMode) (FmtFlag, fmtMode) { +func (f FmtFlag) update(mode FmtMode) (FmtFlag, FmtMode) { switch { case f&FmtSign != 0: mode = FDbg @@ -147,7 +148,7 @@ func (f FmtFlag) update(mode fmtMode) (FmtFlag, fmtMode) { return f, mode } -var goopnames = []string{ +var OpNames = []string{ OADDR: "&", OADD: "+", OADDSTR: "+", @@ -217,7 +218,7 @@ func (o Op) GoString() string { return fmt.Sprintf("%#v", o) } -func (o Op) format(s fmt.State, verb rune, mode fmtMode) { +func (o Op) format(s fmt.State, verb rune, mode FmtMode) { switch verb { case 'v': o.oconv(s, fmtFlag(s, verb), mode) @@ -227,10 +228,10 @@ func (o Op) format(s fmt.State, verb rune, mode fmtMode) { } } -func (o Op) oconv(s fmt.State, flag FmtFlag, mode fmtMode) { +func (o Op) oconv(s fmt.State, flag FmtFlag, mode FmtMode) { if flag&FmtSharp != 0 || mode != FDbg { - if int(o) < len(goopnames) && goopnames[o] != "" { - fmt.Fprint(s, goopnames[o]) + if int(o) < len(OpNames) && OpNames[o] != "" { + fmt.Fprint(s, OpNames[o]) return } } @@ -239,66 +240,73 @@ func (o Op) oconv(s fmt.State, flag FmtFlag, mode fmtMode) { fmt.Fprint(s, o.String()) } -type fmtMode int +type FmtMode int type fmtNode struct { x *Node - m fmtMode + m FmtMode } func (f *fmtNode) Format(s fmt.State, verb rune) { f.x.format(s, verb, f.m) } type fmtOp struct { x Op - m fmtMode + m FmtMode } func (f *fmtOp) Format(s fmt.State, verb rune) { f.x.format(s, verb, f.m) } type fmtType struct { x *types.Type - m fmtMode + m FmtMode } func (f *fmtType) Format(s fmt.State, verb rune) { typeFormat(f.x, s, verb, f.m) } type fmtSym struct { x *types.Sym - m fmtMode + m FmtMode } func (f *fmtSym) Format(s fmt.State, verb rune) { symFormat(f.x, s, verb, f.m) } type fmtNodes struct { x Nodes - m fmtMode + m FmtMode } func (f *fmtNodes) Format(s fmt.State, verb rune) { f.x.format(s, verb, f.m) } -func (n *Node) Format(s fmt.State, verb rune) { n.format(s, verb, FErr) } -func (o Op) Format(s fmt.State, verb rune) { o.format(s, verb, FErr) } +func (n *Node) Format(s fmt.State, verb rune) { + FmtNode(n, s, verb) +} + +func FmtNode(n *Node, s fmt.State, verb rune) { + n.format(s, verb, FErr) +} + +func (o Op) Format(s fmt.State, verb rune) { o.format(s, verb, FErr) } // func (t *types.Type) Format(s fmt.State, verb rune) // in package types // func (y *types.Sym) Format(s fmt.State, verb rune) // in package types { y.format(s, verb, FErr) } func (n Nodes) Format(s fmt.State, verb rune) { n.format(s, verb, FErr) } -func (m fmtMode) Fprintf(s fmt.State, format string, args ...interface{}) { +func (m FmtMode) Fprintf(s fmt.State, format string, args ...interface{}) { m.prepareArgs(args) fmt.Fprintf(s, format, args...) } -func (m fmtMode) Sprintf(format string, args ...interface{}) string { +func (m FmtMode) Sprintf(format string, args ...interface{}) string { m.prepareArgs(args) return fmt.Sprintf(format, args...) } -func (m fmtMode) Sprint(args ...interface{}) string { +func (m FmtMode) Sprint(args ...interface{}) string { m.prepareArgs(args) return fmt.Sprint(args...) } -func (m fmtMode) prepareArgs(args []interface{}) { +func (m FmtMode) prepareArgs(args []interface{}) { for i, arg := range args { switch arg := arg.(type) { case Op: @@ -319,13 +327,13 @@ func (m fmtMode) prepareArgs(args []interface{}) { } } -func (n *Node) format(s fmt.State, verb rune, mode fmtMode) { +func (n *Node) format(s fmt.State, verb rune, mode FmtMode) { switch verb { case 'v', 'S', 'L': - n.nconv(s, fmtFlag(s, verb), mode) + nconvFmt(n, s, fmtFlag(s, verb), mode) case 'j': - n.jconv(s, fmtFlag(s, verb)) + jconvFmt(n, s, fmtFlag(s, verb)) default: fmt.Fprintf(s, "%%!%c(*Node=%p)", verb, n) @@ -336,7 +344,7 @@ func (n *Node) format(s fmt.State, verb rune, mode fmtMode) { var EscFmt func(n *Node, short bool) string // *Node details -func (n *Node) jconv(s fmt.State, flag FmtFlag) { +func jconvFmt(n *Node, s fmt.State, flag FmtFlag) { short := flag&FmtShort != 0 // Useful to see which nodes in an AST printout are actually identical @@ -363,7 +371,7 @@ func (n *Node) jconv(s fmt.State, flag FmtFlag) { fmt.Fprintf(s, " l(%s%d)", pfx, n.Pos.Line()) } - if !short && n.Xoffset != BADWIDTH { + if !short && n.Xoffset != types.BADWIDTH { fmt.Fprintf(s, " x(%d)", n.Xoffset) } @@ -430,7 +438,7 @@ func (n *Node) jconv(s fmt.State, flag FmtFlag) { } } -func vconv(v constant.Value, flag FmtFlag) string { +func FmtConst(v constant.Value, flag FmtFlag) string { if flag&FmtSharp == 0 && v.Kind() == constant.Complex { real, imag := constant.Real(v), constant.Imag(v) @@ -473,17 +481,17 @@ s%^ ........*\]%&~%g s%~ %%g */ -func symfmt(b *bytes.Buffer, s *types.Sym, flag FmtFlag, mode fmtMode) { +func symfmt(b *bytes.Buffer, s *types.Sym, flag FmtFlag, mode FmtMode) { if flag&FmtShort == 0 { switch mode { case FErr: // This is for the user - if s.Pkg == builtinpkg || s.Pkg == localpkg { + if s.Pkg == BuiltinPkg || s.Pkg == LocalPkg { b.WriteString(s.Name) return } // If the name was used by multiple packages, display the full path, - if s.Pkg.Name != "" && numImport[s.Pkg.Name] > 1 { + if s.Pkg.Name != "" && NumImport[s.Pkg.Name] > 1 { fmt.Fprintf(b, "%q.%s", s.Pkg.Path, s.Name) return } @@ -534,28 +542,28 @@ func symfmt(b *bytes.Buffer, s *types.Sym, flag FmtFlag, mode fmtMode) { b.WriteString(s.Name) } -var basicnames = []string{ - TINT: "int", - TUINT: "uint", - TINT8: "int8", - TUINT8: "uint8", - TINT16: "int16", - TUINT16: "uint16", - TINT32: "int32", - TUINT32: "uint32", - TINT64: "int64", - TUINT64: "uint64", - TUINTPTR: "uintptr", - TFLOAT32: "float32", - TFLOAT64: "float64", - TCOMPLEX64: "complex64", - TCOMPLEX128: "complex128", - TBOOL: "bool", - TANY: "any", - TSTRING: "string", - TNIL: "nil", - TIDEAL: "untyped number", - TBLANK: "blank", +var BasicTypeNames = []string{ + types.TINT: "int", + types.TUINT: "uint", + types.TINT8: "int8", + types.TUINT8: "uint8", + types.TINT16: "int16", + types.TUINT16: "uint16", + types.TINT32: "int32", + types.TUINT32: "uint32", + types.TINT64: "int64", + types.TUINT64: "uint64", + types.TUINTPTR: "uintptr", + types.TFLOAT32: "float32", + types.TFLOAT64: "float64", + types.TCOMPLEX64: "complex64", + types.TCOMPLEX128: "complex128", + types.TBOOL: "bool", + types.TANY: "any", + types.TSTRING: "string", + types.TNIL: "nil", + types.TIDEAL: "untyped number", + types.TBLANK: "blank", } var fmtBufferPool = sync.Pool{ @@ -564,7 +572,7 @@ var fmtBufferPool = sync.Pool{ }, } -func tconv(t *types.Type, flag FmtFlag, mode fmtMode) string { +func tconv(t *types.Type, flag FmtFlag, mode FmtMode) string { buf := fmtBufferPool.Get().(*bytes.Buffer) buf.Reset() defer fmtBufferPool.Put(buf) @@ -577,7 +585,7 @@ func tconv(t *types.Type, flag FmtFlag, mode fmtMode) string { // flag and mode control exactly what is printed. // Any types x that are already in the visited map get printed as @%d where %d=visited[x]. // See #16897 before changing the implementation of tconv. -func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited map[*types.Type]int) { +func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode FmtMode, visited map[*types.Type]int) { if off, ok := visited[t]; ok { // We've seen this type before, so we're trying to print it recursively. // Print a reference to it instead. @@ -648,7 +656,7 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited return } - if t.Sym.Pkg == localpkg && t.Vargen != 0 { + if t.Sym.Pkg == LocalPkg && t.Vargen != 0 { b.WriteString(mode.Sprintf("%v·%d", t.Sym, t.Vargen)) return } @@ -658,7 +666,7 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited return } - if int(t.Etype) < len(basicnames) && basicnames[t.Etype] != "" { + if int(t.Etype) < len(BasicTypeNames) && BasicTypeNames[t.Etype] != "" { var name string switch t { case types.UntypedBool: @@ -674,7 +682,7 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited case types.UntypedComplex: name = "untyped complex" default: - name = basicnames[t.Etype] + name = BasicTypeNames[t.Etype] } b.WriteString(name) return @@ -701,7 +709,7 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited defer delete(visited, t) switch t.Etype { - case TPTR: + case types.TPTR: b.WriteByte('*') switch mode { case FTypeId, FTypeIdName: @@ -712,17 +720,17 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited } tconv2(b, t.Elem(), 0, mode, visited) - case TARRAY: + case types.TARRAY: b.WriteByte('[') b.WriteString(strconv.FormatInt(t.NumElem(), 10)) b.WriteByte(']') tconv2(b, t.Elem(), 0, mode, visited) - case TSLICE: + case types.TSLICE: b.WriteString("[]") tconv2(b, t.Elem(), 0, mode, visited) - case TCHAN: + case types.TCHAN: switch t.ChanDir() { case types.Crecv: b.WriteString("<-chan ") @@ -741,13 +749,13 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited } } - case TMAP: + case types.TMAP: b.WriteString("map[") tconv2(b, t.Key(), 0, mode, visited) b.WriteByte(']') tconv2(b, t.Elem(), 0, mode, visited) - case TINTER: + case types.TINTER: if t.IsEmptyInterface() { b.WriteString("interface {}") break @@ -779,7 +787,7 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited } b.WriteByte('}') - case TFUNC: + case types.TFUNC: if flag&FmtShort != 0 { // no leading func } else { @@ -805,7 +813,7 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited tconv2(b, t.Results(), 0, mode, visited) } - case TSTRUCT: + case types.TSTRUCT: if m := t.StructType().Map; m != nil { mt := m.MapType() // Format the bucket struct for map[x]y as map.bucket[x]y. @@ -856,17 +864,17 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited b.WriteByte('}') } - case TFORW: + case types.TFORW: b.WriteString("undefined") if t.Sym != nil { b.WriteByte(' ') sconv2(b, t.Sym, 0, mode) } - case TUNSAFEPTR: + case types.TUNSAFEPTR: b.WriteString("unsafe.Pointer") - case Txxx: + case types.Txxx: b.WriteString("Txxx") default: // Don't know how to handle - fall back to detailed prints. @@ -875,7 +883,7 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited } // Statements which may be rendered with a simplestmt as init. -func stmtwithinit(op Op) bool { +func StmtWithInit(op Op) bool { switch op { case OIF, OFOR, OFORUNTIL, OSWITCH: return true @@ -884,20 +892,20 @@ func stmtwithinit(op Op) bool { return false } -func (n *Node) stmtfmt(s fmt.State, mode fmtMode) { +func stmtFmt(n *Node, s fmt.State, mode FmtMode) { // some statements allow for an init, but at most one, // but we may have an arbitrary number added, eg by typecheck // and inlining. If it doesn't fit the syntax, emit an enclosing // block starting with the init statements. // if we can just say "for" n->ninit; ... then do so - simpleinit := n.Ninit.Len() == 1 && n.Ninit.First().Ninit.Len() == 0 && stmtwithinit(n.Op) + simpleinit := n.Ninit.Len() == 1 && n.Ninit.First().Ninit.Len() == 0 && StmtWithInit(n.Op) // otherwise, print the inits as separate statements complexinit := n.Ninit.Len() != 0 && !simpleinit && (mode != FErr) // but if it was for if/for/switch, put in an extra surrounding block to limit the scope - extrablock := complexinit && stmtwithinit(n.Op) + extrablock := complexinit && StmtWithInit(n.Op) if extrablock { fmt.Fprint(s, "{") @@ -1064,7 +1072,7 @@ func (n *Node) stmtfmt(s fmt.State, mode fmtMode) { } } -var opprec = []int{ +var OpPrec = []int{ OALIGNOF: 8, OAPPEND: 8, OBYTES2STR: 8, @@ -1184,7 +1192,7 @@ var opprec = []int{ OEND: 0, } -func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) { +func exprFmt(n *Node, s fmt.State, prec int, mode FmtMode) { for n != nil && n.Implicit() && (n.Op == ODEREF || n.Op == OADDR) { n = n.Left } @@ -1194,7 +1202,7 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) { return } - nprec := opprec[n.Op] + nprec := OpPrec[n.Op] if n.Op == OTYPE && n.Sym != nil { nprec = 8 } @@ -1214,7 +1222,7 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) { case OLITERAL: // this is a bit of a mess if mode == FErr { if n.Orig != nil && n.Orig != n { - n.Orig.exprfmt(s, prec, mode) + exprFmt(n.Orig, s, prec, mode) return } if n.Sym != nil { @@ -1252,7 +1260,7 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) { fmt.Fprintf(s, "'\\U%08x'", uint64(x)) } } else { - fmt.Fprint(s, vconv(n.Val(), fmtFlag(s, 'v'))) + fmt.Fprint(s, FmtConst(n.Val(), fmtFlag(s, 'v'))) } if needUnparen { @@ -1369,7 +1377,7 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) { mode.Fprintf(s, "%v:%v", n.Sym, n.Left) case OCALLPART: - n.Left.exprfmt(s, nprec, mode) + exprFmt(n.Left, s, nprec, mode) if n.Right == nil || n.Right.Sym == nil { fmt.Fprint(s, ".") return @@ -1377,7 +1385,7 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) { mode.Fprintf(s, ".%0S", n.Right.Sym) case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH: - n.Left.exprfmt(s, nprec, mode) + exprFmt(n.Left, s, nprec, mode) if n.Sym == nil { fmt.Fprint(s, ".") return @@ -1385,7 +1393,7 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) { mode.Fprintf(s, ".%0S", n.Sym) case ODOTTYPE, ODOTTYPE2: - n.Left.exprfmt(s, nprec, mode) + exprFmt(n.Left, s, nprec, mode) if n.Right != nil { mode.Fprintf(s, ".(%v)", n.Right) return @@ -1393,24 +1401,24 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) { mode.Fprintf(s, ".(%v)", n.Type) case OINDEX, OINDEXMAP: - n.Left.exprfmt(s, nprec, mode) + exprFmt(n.Left, s, nprec, mode) mode.Fprintf(s, "[%v]", n.Right) case OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR: - n.Left.exprfmt(s, nprec, mode) + exprFmt(n.Left, s, nprec, mode) fmt.Fprint(s, "[") low, high, max := n.SliceBounds() if low != nil { - fmt.Fprint(s, low.modeString(mode)) + fmt.Fprint(s, modeString(low, mode)) } fmt.Fprint(s, ":") if high != nil { - fmt.Fprint(s, high.modeString(mode)) + fmt.Fprint(s, modeString(high, mode)) } if n.Op.IsSlice3() { fmt.Fprint(s, ":") if max != nil { - fmt.Fprint(s, max.modeString(mode)) + fmt.Fprint(s, modeString(max, mode)) } } fmt.Fprint(s, "]") @@ -1474,7 +1482,7 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) { mode.Fprintf(s, "%#v(%.v)", n.Op, n.List) case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, OGETG: - n.Left.exprfmt(s, nprec, mode) + exprFmt(n.Left, s, nprec, mode) if n.IsDDD() { mode.Fprintf(s, "(%.v...)", n.List) return @@ -1505,7 +1513,7 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) { if n.Left != nil && n.Left.Op == n.Op { fmt.Fprint(s, " ") } - n.Left.exprfmt(s, nprec+1, mode) + exprFmt(n.Left, s, nprec+1, mode) // Binary case OADD, @@ -1528,16 +1536,16 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) { OSEND, OSUB, OXOR: - n.Left.exprfmt(s, nprec, mode) + exprFmt(n.Left, s, nprec, mode) mode.Fprintf(s, " %#v ", n.Op) - n.Right.exprfmt(s, nprec+1, mode) + exprFmt(n.Right, s, nprec+1, mode) case OADDSTR: for i, n1 := range n.List.Slice() { if i != 0 { fmt.Fprint(s, " + ") } - n1.exprfmt(s, nprec, mode) + exprFmt(n1, s, nprec, mode) } case ODDD: mode.Fprintf(s, "...") @@ -1546,7 +1554,7 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) { } } -func (n *Node) nodefmt(s fmt.State, flag FmtFlag, mode fmtMode) { +func nodeFmt(n *Node, s fmt.State, flag FmtFlag, mode FmtMode) { t := n.Type // We almost always want the original. @@ -1556,7 +1564,7 @@ func (n *Node) nodefmt(s fmt.State, flag FmtFlag, mode fmtMode) { } if flag&FmtLong != 0 && t != nil { - if t.Etype == TNIL { + if t.Etype == types.TNIL { fmt.Fprint(s, "nil") } else if n.Op == ONAME && n.Name.AutoTemp() { mode.Fprintf(s, "%v value", t) @@ -1568,15 +1576,15 @@ func (n *Node) nodefmt(s fmt.State, flag FmtFlag, mode fmtMode) { // TODO inlining produces expressions with ninits. we can't print these yet. - if opprec[n.Op] < 0 { - n.stmtfmt(s, mode) + if OpPrec[n.Op] < 0 { + stmtFmt(n, s, mode) return } - n.exprfmt(s, 0, mode) + exprFmt(n, s, 0, mode) } -func (n *Node) nodedump(s fmt.State, flag FmtFlag, mode fmtMode) { +func nodeDumpFmt(n *Node, s fmt.State, flag FmtFlag, mode FmtMode) { recur := flag&FmtShort == 0 if recur { @@ -1647,7 +1655,7 @@ func (n *Node) nodedump(s fmt.State, flag FmtFlag, mode fmtMode) { if n.Op == ODCLFUNC && n.Func != nil && n.Func.Dcl != nil && len(n.Func.Dcl) != 0 { indent(s) // The dcls for a func or closure - mode.Fprintf(s, "%v-dcl%v", n.Op, asNodes(n.Func.Dcl)) + mode.Fprintf(s, "%v-dcl%v", n.Op, AsNodes(n.Func.Dcl)) } if n.List.Len() != 0 { indent(s) @@ -1667,7 +1675,7 @@ func (n *Node) nodedump(s fmt.State, flag FmtFlag, mode fmtMode) { } // "%S" suppresses qualifying with package -func symFormat(s *types.Sym, f fmt.State, verb rune, mode fmtMode) { +func symFormat(s *types.Sym, f fmt.State, verb rune, mode FmtMode) { switch verb { case 'v', 'S': fmt.Fprint(f, sconv(s, fmtFlag(f, verb), mode)) @@ -1677,10 +1685,10 @@ func symFormat(s *types.Sym, f fmt.State, verb rune, mode fmtMode) { } } -func smodeString(s *types.Sym, mode fmtMode) string { return sconv(s, 0, mode) } +func smodeString(s *types.Sym, mode FmtMode) string { return sconv(s, 0, mode) } // See #16897 before changing the implementation of sconv. -func sconv(s *types.Sym, flag FmtFlag, mode fmtMode) string { +func sconv(s *types.Sym, flag FmtFlag, mode FmtMode) string { if flag&FmtLong != 0 { panic("linksymfmt") } @@ -1701,7 +1709,7 @@ func sconv(s *types.Sym, flag FmtFlag, mode fmtMode) string { return types.InternString(buf.Bytes()) } -func sconv2(b *bytes.Buffer, s *types.Sym, flag FmtFlag, mode fmtMode) { +func sconv2(b *bytes.Buffer, s *types.Sym, flag FmtFlag, mode FmtMode) { if flag&FmtLong != 0 { panic("linksymfmt") } @@ -1718,7 +1726,7 @@ func sconv2(b *bytes.Buffer, s *types.Sym, flag FmtFlag, mode fmtMode) { symfmt(b, s, flag, mode) } -func fldconv(b *bytes.Buffer, f *types.Field, flag FmtFlag, mode fmtMode, visited map[*types.Type]int, funarg types.Funarg) { +func fldconv(b *bytes.Buffer, f *types.Field, flag FmtFlag, mode FmtMode, visited map[*types.Type]int, funarg types.Funarg) { if f == nil { b.WriteString("") return @@ -1734,12 +1742,12 @@ func fldconv(b *bytes.Buffer, f *types.Field, flag FmtFlag, mode fmtMode, visite // Take the name from the original. if mode == FErr { - s = origSym(s) + s = OrigSym(s) } if s != nil && f.Embedded == 0 { if funarg != types.FunargNone { - name = asNode(f.Nname).modeString(mode) + name = modeString(AsNode(f.Nname), mode) } else if flag&FmtLong != 0 { name = mode.Sprintf("%0S", s) if !types.IsExported(name) && flag&FmtUnsigned == 0 { @@ -1775,7 +1783,7 @@ func fldconv(b *bytes.Buffer, f *types.Field, flag FmtFlag, mode fmtMode, visite // "%L" print definition, not name // "%S" omit 'func' and receiver from function types, short type names -func typeFormat(t *types.Type, s fmt.State, verb rune, mode fmtMode) { +func typeFormat(t *types.Type, s fmt.State, verb rune, mode FmtMode) { switch verb { case 'v', 'S', 'L': fmt.Fprint(s, tconv(t, fmtFlag(s, verb), mode)) @@ -1784,12 +1792,12 @@ func typeFormat(t *types.Type, s fmt.State, verb rune, mode fmtMode) { } } -func (n *Node) String() string { return fmt.Sprint(n) } -func (n *Node) modeString(mode fmtMode) string { return mode.Sprint(n) } +func (n *Node) String() string { return fmt.Sprint(n) } +func modeString(n *Node, mode FmtMode) string { return mode.Sprint(n) } // "%L" suffix with "(type %T)" where possible // "%+S" in debug mode, don't recurse, no multiline output -func (n *Node) nconv(s fmt.State, flag FmtFlag, mode fmtMode) { +func nconvFmt(n *Node, s fmt.State, flag FmtFlag, mode FmtMode) { if n == nil { fmt.Fprint(s, "") return @@ -1799,11 +1807,11 @@ func (n *Node) nconv(s fmt.State, flag FmtFlag, mode fmtMode) { switch mode { case FErr: - n.nodefmt(s, flag, mode) + nodeFmt(n, s, flag, mode) case FDbg: dumpdepth++ - n.nodedump(s, flag, mode) + nodeDumpFmt(n, s, flag, mode) dumpdepth-- default: @@ -1811,7 +1819,7 @@ func (n *Node) nconv(s fmt.State, flag FmtFlag, mode fmtMode) { } } -func (l Nodes) format(s fmt.State, verb rune, mode fmtMode) { +func (l Nodes) format(s fmt.State, verb rune, mode FmtMode) { switch verb { case 'v': l.hconv(s, fmtFlag(s, verb), mode) @@ -1826,7 +1834,7 @@ func (n Nodes) String() string { } // Flags: all those of %N plus '.': separate with comma's instead of semicolons. -func (l Nodes) hconv(s fmt.State, flag FmtFlag, mode fmtMode) { +func (l Nodes) hconv(s fmt.State, flag FmtFlag, mode FmtMode) { if l.Len() == 0 && mode == FDbg { fmt.Fprint(s, "") return @@ -1841,18 +1849,18 @@ func (l Nodes) hconv(s fmt.State, flag FmtFlag, mode fmtMode) { } for i, n := range l.Slice() { - fmt.Fprint(s, n.modeString(mode)) + fmt.Fprint(s, modeString(n, mode)) if i+1 < l.Len() { fmt.Fprint(s, sep) } } } -func dumplist(s string, l Nodes) { +func DumpList(s string, l Nodes) { fmt.Printf("%s%+v\n", s, l) } -func fdumplist(w io.Writer, s string, l Nodes) { +func FDumpList(w io.Writer, s string, l Nodes) { fmt.Fprintf(w, "%s%+v\n", s, l) } @@ -1877,3 +1885,30 @@ func ellipsisIf(b bool) string { } return "" } + +// numImport tracks how often a package with a given name is imported. +// It is used to provide a better error message (by using the package +// path to disambiguate) if a package that appears multiple times with +// the same name appears in an error message. +var NumImport = make(map[string]int) + +func InstallTypeFormats() { + types.Sconv = func(s *types.Sym, flag, mode int) string { + return sconv(s, FmtFlag(flag), FmtMode(mode)) + } + types.Tconv = func(t *types.Type, flag, mode int) string { + return tconv(t, FmtFlag(flag), FmtMode(mode)) + } + types.FormatSym = func(sym *types.Sym, s fmt.State, verb rune, mode int) { + symFormat(sym, s, verb, FmtMode(mode)) + } + types.FormatType = func(t *types.Type, s fmt.State, verb rune, mode int) { + typeFormat(t, s, verb, FmtMode(mode)) + } +} + +// Line returns n's position as a string. If n has been inlined, +// it uses the outermost position where n has been inlined. +func Line(n *Node) string { + return base.FmtPos(n.Pos) +} diff --git a/src/cmd/compile/internal/ir/ir.go b/src/cmd/compile/internal/ir/ir.go new file mode 100644 index 0000000000000..ad7f692b07782 --- /dev/null +++ b/src/cmd/compile/internal/ir/ir.go @@ -0,0 +1,12 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +import "cmd/compile/internal/types" + +var LocalPkg *types.Pkg // package being compiled + +// builtinpkg is a fake package that declares the universe block. +var BuiltinPkg *types.Pkg diff --git a/src/cmd/compile/internal/gc/syntax.go b/src/cmd/compile/internal/ir/node.go similarity index 82% rename from src/cmd/compile/internal/gc/syntax.go rename to src/cmd/compile/internal/ir/node.go index 11671fc54a05d..e6ed178f495eb 100644 --- a/src/cmd/compile/internal/gc/syntax.go +++ b/src/cmd/compile/internal/ir/node.go @@ -4,17 +4,20 @@ // “Abstract” syntax representation. -package gc +package ir import ( + "go/constant" + "sort" + "strings" + "unsafe" + "cmd/compile/internal/base" "cmd/compile/internal/ssa" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/objabi" "cmd/internal/src" - "go/constant" - "sort" ) // A Node is a single node in the syntax tree. @@ -290,7 +293,7 @@ func (n *Node) SetVal(v constant.Value) { base.Fatalf("have Opt") } if n.Op == OLITERAL { - assertRepresents(n.Type, v) + AssertValidTypeForConst(n.Type, v) } n.SetHasVal(true) n.E = &v @@ -333,7 +336,7 @@ func (n *Node) SetIota(x int64) { // mayBeShared reports whether n may occur in multiple places in the AST. // Extra care must be taken when mutating such a node. -func (n *Node) mayBeShared() bool { +func MayBeShared(n *Node) bool { switch n.Op { case ONAME, OLITERAL, ONIL, OTYPE: return true @@ -342,7 +345,7 @@ func (n *Node) mayBeShared() bool { } // funcname returns the name (without the package) of the function n. -func (n *Node) funcname() string { +func FuncName(n *Node) string { if n == nil || n.Func == nil || n.Func.Nname == nil { return "" } @@ -353,7 +356,7 @@ func (n *Node) funcname() string { // This differs from the compiler's internal convention where local functions lack a package // because the ultimate consumer of this is a human looking at an IDE; package is only empty // if the compilation package is actually the empty string. -func (n *Node) pkgFuncName() string { +func PkgFuncName(n *Node) string { var s *types.Sym if n == nil { return "" @@ -681,7 +684,7 @@ type Func struct { FieldTrack map[*types.Sym]struct{} DebugInfo *ssa.FuncDebug - lsym *obj.LSym + LSym *obj.LSym Inl *Inline @@ -693,13 +696,13 @@ type Func struct { Pragma PragmaFlag // go:xxx function annotations flags bitset16 - numDefers int // number of defer calls in the function - numReturns int // number of explicit returns in the function + NumDefers int // number of defer calls in the function + NumReturns int // number of explicit returns in the function // nwbrCalls records the LSyms of functions called by this // function for go:nowritebarrierrec analysis. Only filled in // if nowritebarrierrecCheck != nil. - nwbrCalls *[]nowritebarrierrecCallSym + NWBRCalls *[]SymAndPos } // An Inline holds fields used for function bodies that can be inlined. @@ -764,7 +767,7 @@ func (f *Func) SetExportInline(b bool) { f.flags.set(funcExportInlin func (f *Func) SetInstrumentBody(b bool) { f.flags.set(funcInstrumentBody, b) } func (f *Func) SetOpenCodedDeferDisallowed(b bool) { f.flags.set(funcOpenCodedDeferDisallowed, b) } -func (f *Func) setWBPos(pos src.XPos) { +func (f *Func) SetWBPos(pos src.XPos) { if base.Debug.WB != 0 { base.WarnfAt(pos, "write barrier") } @@ -996,7 +999,7 @@ const ( type Nodes struct{ slice *[]*Node } // asNodes returns a slice of *Node as a Nodes value. -func asNodes(s []*Node) Nodes { +func AsNodes(s []*Node) Nodes { return Nodes{&s} } @@ -1136,38 +1139,38 @@ func (n *Nodes) AppendNodes(n2 *Nodes) { // inspect invokes f on each node in an AST in depth-first order. // If f(n) returns false, inspect skips visiting n's children. -func inspect(n *Node, f func(*Node) bool) { +func Inspect(n *Node, f func(*Node) bool) { if n == nil || !f(n) { return } - inspectList(n.Ninit, f) - inspect(n.Left, f) - inspect(n.Right, f) - inspectList(n.List, f) - inspectList(n.Nbody, f) - inspectList(n.Rlist, f) + InspectList(n.Ninit, f) + Inspect(n.Left, f) + Inspect(n.Right, f) + InspectList(n.List, f) + InspectList(n.Nbody, f) + InspectList(n.Rlist, f) } -func inspectList(l Nodes, f func(*Node) bool) { +func InspectList(l Nodes, f func(*Node) bool) { for _, n := range l.Slice() { - inspect(n, f) + Inspect(n, f) } } // nodeQueue is a FIFO queue of *Node. The zero value of nodeQueue is // a ready-to-use empty queue. -type nodeQueue struct { +type NodeQueue struct { ring []*Node head, tail int } // empty reports whether q contains no Nodes. -func (q *nodeQueue) empty() bool { +func (q *NodeQueue) Empty() bool { return q.head == q.tail } // pushRight appends n to the right of the queue. -func (q *nodeQueue) pushRight(n *Node) { +func (q *NodeQueue) PushRight(n *Node) { if len(q.ring) == 0 { q.ring = make([]*Node, 16) } else if q.head+len(q.ring) == q.tail { @@ -1191,8 +1194,8 @@ func (q *nodeQueue) pushRight(n *Node) { // popLeft pops a node from the left of the queue. It panics if q is // empty. -func (q *nodeQueue) popLeft() *Node { - if q.empty() { +func (q *NodeQueue) PopLeft() *Node { + if q.Empty() { panic("dequeue empty") } n := q.ring[q.head%len(q.ring)] @@ -1226,3 +1229,342 @@ func (s NodeSet) Sorted(less func(*Node, *Node) bool) []*Node { sort.Slice(res, func(i, j int) bool { return less(res[i], res[j]) }) return res } + +func Nod(op Op, nleft, nright *Node) *Node { + return NodAt(base.Pos, op, nleft, nright) +} + +func NodAt(pos src.XPos, op Op, nleft, nright *Node) *Node { + var n *Node + switch op { + case ODCLFUNC: + var x struct { + n Node + f Func + } + n = &x.n + n.Func = &x.f + n.Func.Decl = n + case ONAME: + base.Fatalf("use newname instead") + case OLABEL, OPACK: + var x struct { + n Node + m Name + } + n = &x.n + n.Name = &x.m + default: + n = new(Node) + } + n.Op = op + n.Left = nleft + n.Right = nright + n.Pos = pos + n.Xoffset = types.BADWIDTH + n.Orig = n + return n +} + +// newnamel returns a new ONAME Node associated with symbol s at position pos. +// The caller is responsible for setting n.Name.Curfn. +func NewNameAt(pos src.XPos, s *types.Sym) *Node { + if s == nil { + base.Fatalf("newnamel nil") + } + + var x struct { + n Node + m Name + p Param + } + n := &x.n + n.Name = &x.m + n.Name.Param = &x.p + + n.Op = ONAME + n.Pos = pos + n.Orig = n + + n.Sym = s + return n +} + +// The Class of a variable/function describes the "storage class" +// of a variable or function. During parsing, storage classes are +// called declaration contexts. +type Class uint8 + +//go:generate stringer -type=Class +const ( + Pxxx Class = iota // no class; used during ssa conversion to indicate pseudo-variables + PEXTERN // global variables + PAUTO // local variables + PAUTOHEAP // local variables or parameters moved to heap + PPARAM // input arguments + PPARAMOUT // output results + PFUNC // global functions + + // Careful: Class is stored in three bits in Node.flags. + _ = uint((1 << 3) - iota) // static assert for iota <= (1 << 3) +) + +type PragmaFlag int16 + +const ( + // Func pragmas. + Nointerface PragmaFlag = 1 << iota + Noescape // func parameters don't escape + Norace // func must not have race detector annotations + Nosplit // func should not execute on separate stack + Noinline // func should not be inlined + NoCheckPtr // func should not be instrumented by checkptr + CgoUnsafeArgs // treat a pointer to one arg as a pointer to them all + UintptrEscapes // pointers converted to uintptr escape + + // Runtime-only func pragmas. + // See ../../../../runtime/README.md for detailed descriptions. + Systemstack // func must run on system stack + Nowritebarrier // emit compiler error instead of write barrier + Nowritebarrierrec // error on write barrier in this or recursive callees + Yeswritebarrierrec // cancels Nowritebarrierrec in this function and callees + + // Runtime and cgo type pragmas + NotInHeap // values of this type must not be heap allocated + + // Go command pragmas + GoBuildPragma +) + +type SymAndPos struct { + Sym *obj.LSym // LSym of callee + Pos src.XPos // line of call +} + +func AsNode(n *types.Node) *Node { return (*Node)(unsafe.Pointer(n)) } + +func AsTypesNode(n *Node) *types.Node { return (*types.Node)(unsafe.Pointer(n)) } + +var BlankNode *Node + +// origSym returns the original symbol written by the user. +func OrigSym(s *types.Sym) *types.Sym { + if s == nil { + return nil + } + + if len(s.Name) > 1 && s.Name[0] == '~' { + switch s.Name[1] { + case 'r': // originally an unnamed result + return nil + case 'b': // originally the blank identifier _ + // TODO(mdempsky): Does s.Pkg matter here? + return BlankNode.Sym + } + return s + } + + if strings.HasPrefix(s.Name, ".anon") { + // originally an unnamed or _ name (see subr.go: structargs) + return nil + } + + return s +} + +// SliceBounds returns n's slice bounds: low, high, and max in expr[low:high:max]. +// n must be a slice expression. max is nil if n is a simple slice expression. +func (n *Node) SliceBounds() (low, high, max *Node) { + if n.List.Len() == 0 { + return nil, nil, nil + } + + switch n.Op { + case OSLICE, OSLICEARR, OSLICESTR: + s := n.List.Slice() + return s[0], s[1], nil + case OSLICE3, OSLICE3ARR: + s := n.List.Slice() + return s[0], s[1], s[2] + } + base.Fatalf("SliceBounds op %v: %v", n.Op, n) + return nil, nil, nil +} + +// SetSliceBounds sets n's slice bounds, where n is a slice expression. +// n must be a slice expression. If max is non-nil, n must be a full slice expression. +func (n *Node) SetSliceBounds(low, high, max *Node) { + switch n.Op { + case OSLICE, OSLICEARR, OSLICESTR: + if max != nil { + base.Fatalf("SetSliceBounds %v given three bounds", n.Op) + } + s := n.List.Slice() + if s == nil { + if low == nil && high == nil { + return + } + n.List.Set2(low, high) + return + } + s[0] = low + s[1] = high + return + case OSLICE3, OSLICE3ARR: + s := n.List.Slice() + if s == nil { + if low == nil && high == nil && max == nil { + return + } + n.List.Set3(low, high, max) + return + } + s[0] = low + s[1] = high + s[2] = max + return + } + base.Fatalf("SetSliceBounds op %v: %v", n.Op, n) +} + +// IsSlice3 reports whether o is a slice3 op (OSLICE3, OSLICE3ARR). +// o must be a slicing op. +func (o Op) IsSlice3() bool { + switch o { + case OSLICE, OSLICEARR, OSLICESTR: + return false + case OSLICE3, OSLICE3ARR: + return true + } + base.Fatalf("IsSlice3 op %v", o) + return false +} + +func IsConst(n *Node, ct constant.Kind) bool { + return ConstType(n) == ct +} + +// Int64Val returns n as an int64. +// n must be an integer or rune constant. +func (n *Node) Int64Val() int64 { + if !IsConst(n, constant.Int) { + base.Fatalf("Int64Val(%v)", n) + } + x, ok := constant.Int64Val(n.Val()) + if !ok { + base.Fatalf("Int64Val(%v)", n) + } + return x +} + +// CanInt64 reports whether it is safe to call Int64Val() on n. +func (n *Node) CanInt64() bool { + if !IsConst(n, constant.Int) { + return false + } + + // if the value inside n cannot be represented as an int64, the + // return value of Int64 is undefined + _, ok := constant.Int64Val(n.Val()) + return ok +} + +// Uint64Val returns n as an uint64. +// n must be an integer or rune constant. +func (n *Node) Uint64Val() uint64 { + if !IsConst(n, constant.Int) { + base.Fatalf("Uint64Val(%v)", n) + } + x, ok := constant.Uint64Val(n.Val()) + if !ok { + base.Fatalf("Uint64Val(%v)", n) + } + return x +} + +// BoolVal returns n as a bool. +// n must be a boolean constant. +func (n *Node) BoolVal() bool { + if !IsConst(n, constant.Bool) { + base.Fatalf("BoolVal(%v)", n) + } + return constant.BoolVal(n.Val()) +} + +// StringVal returns the value of a literal string Node as a string. +// n must be a string constant. +func (n *Node) StringVal() string { + if !IsConst(n, constant.String) { + base.Fatalf("StringVal(%v)", n) + } + return constant.StringVal(n.Val()) +} + +// rawcopy returns a shallow copy of n. +// Note: copy or sepcopy (rather than rawcopy) is usually the +// correct choice (see comment with Node.copy, below). +func (n *Node) RawCopy() *Node { + copy := *n + return © +} + +// sepcopy returns a separate shallow copy of n, with the copy's +// Orig pointing to itself. +func SepCopy(n *Node) *Node { + copy := *n + copy.Orig = © + return © +} + +// copy returns shallow copy of n and adjusts the copy's Orig if +// necessary: In general, if n.Orig points to itself, the copy's +// Orig should point to itself as well. Otherwise, if n is modified, +// the copy's Orig node appears modified, too, and then doesn't +// represent the original node anymore. +// (This caused the wrong complit Op to be used when printing error +// messages; see issues #26855, #27765). +func Copy(n *Node) *Node { + copy := *n + if n.Orig == n { + copy.Orig = © + } + return © +} + +// isNil reports whether n represents the universal untyped zero value "nil". +func IsNil(n *Node) bool { + // Check n.Orig because constant propagation may produce typed nil constants, + // which don't exist in the Go spec. + return n.Orig.Op == ONIL +} + +func IsBlank(n *Node) bool { + if n == nil { + return false + } + return n.Sym.IsBlank() +} + +// IsMethod reports whether n is a method. +// n must be a function or a method. +func IsMethod(n *Node) bool { + return n.Type.Recv() != nil +} + +func (n *Node) Typ() *types.Type { + return n.Type +} + +func (n *Node) StorageClass() ssa.StorageClass { + switch n.Class() { + case PPARAM: + return ssa.ClassParam + case PPARAMOUT: + return ssa.ClassParamOut + case PAUTO: + return ssa.ClassAuto + default: + base.Fatalf("untranslatable storage class for %v: %s", n, n.Class()) + return 0 + } +} diff --git a/src/cmd/compile/internal/gc/op_string.go b/src/cmd/compile/internal/ir/op_string.go similarity index 99% rename from src/cmd/compile/internal/gc/op_string.go rename to src/cmd/compile/internal/ir/op_string.go index 16fd79e477880..d0d3778357e67 100644 --- a/src/cmd/compile/internal/gc/op_string.go +++ b/src/cmd/compile/internal/ir/op_string.go @@ -1,6 +1,6 @@ // Code generated by "stringer -type=Op -trimprefix=O"; DO NOT EDIT. -package gc +package ir import "strconv" diff --git a/src/cmd/compile/internal/gc/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go similarity index 98% rename from src/cmd/compile/internal/gc/sizeof_test.go rename to src/cmd/compile/internal/ir/sizeof_test.go index 2f2eba4c6755c..c5169b9092a0e 100644 --- a/src/cmd/compile/internal/gc/sizeof_test.go +++ b/src/cmd/compile/internal/ir/sizeof_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package ir import ( "reflect" diff --git a/src/cmd/compile/internal/ir/val.go b/src/cmd/compile/internal/ir/val.go new file mode 100644 index 0000000000000..00b5bfd1ad4fd --- /dev/null +++ b/src/cmd/compile/internal/ir/val.go @@ -0,0 +1,120 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +import ( + "go/constant" + "math" + + "cmd/compile/internal/base" + "cmd/compile/internal/types" +) + +func ConstType(n *Node) constant.Kind { + if n == nil || n.Op != OLITERAL { + return constant.Unknown + } + return n.Val().Kind() +} + +// ValueInterface returns the constant value stored in n as an interface{}. +// It returns int64s for ints and runes, float64s for floats, +// and complex128s for complex values. +func ConstValue(n *Node) interface{} { + switch v := n.Val(); v.Kind() { + default: + base.Fatalf("unexpected constant: %v", v) + panic("unreachable") + case constant.Bool: + return constant.BoolVal(v) + case constant.String: + return constant.StringVal(v) + case constant.Int: + return Int64Val(n.Type, v) + case constant.Float: + return Float64Val(v) + case constant.Complex: + return complex(Float64Val(constant.Real(v)), Float64Val(constant.Imag(v))) + } +} + +// int64Val returns v converted to int64. +// Note: if t is uint64, very large values will be converted to negative int64. +func Int64Val(t *types.Type, v constant.Value) int64 { + if t.IsUnsigned() { + if x, ok := constant.Uint64Val(v); ok { + return int64(x) + } + } else { + if x, ok := constant.Int64Val(v); ok { + return x + } + } + base.Fatalf("%v out of range for %v", v, t) + panic("unreachable") +} + +func Float64Val(v constant.Value) float64 { + if x, _ := constant.Float64Val(v); !math.IsInf(x, 0) { + return x + 0 // avoid -0 (should not be needed, but be conservative) + } + base.Fatalf("bad float64 value: %v", v) + panic("unreachable") +} + +func AssertValidTypeForConst(t *types.Type, v constant.Value) { + if !ValidTypeForConst(t, v) { + base.Fatalf("%v does not represent %v", t, v) + } +} + +func ValidTypeForConst(t *types.Type, v constant.Value) bool { + switch v.Kind() { + case constant.Unknown: + return OKForConst[t.Etype] + case constant.Bool: + return t.IsBoolean() + case constant.String: + return t.IsString() + case constant.Int: + return t.IsInteger() + case constant.Float: + return t.IsFloat() + case constant.Complex: + return t.IsComplex() + } + + base.Fatalf("unexpected constant kind: %v", v) + panic("unreachable") +} + +// nodlit returns a new untyped constant with value v. +func NewLiteral(v constant.Value) *Node { + n := Nod(OLITERAL, nil, nil) + if k := v.Kind(); k != constant.Unknown { + n.Type = idealType(k) + n.SetVal(v) + } + return n +} + +func idealType(ct constant.Kind) *types.Type { + switch ct { + case constant.String: + return types.UntypedString + case constant.Bool: + return types.UntypedBool + case constant.Int: + return types.UntypedInt + case constant.Float: + return types.UntypedFloat + case constant.Complex: + return types.UntypedComplex + } + base.Fatalf("unexpected Ctype: %v", ct) + return nil +} + +var OKForConst [types.NTYPE]bool diff --git a/src/cmd/compile/internal/mips/ssa.go b/src/cmd/compile/internal/mips/ssa.go index c37a2e07149d9..87e6f5b0c7ae2 100644 --- a/src/cmd/compile/internal/mips/ssa.go +++ b/src/cmd/compile/internal/mips/ssa.go @@ -9,6 +9,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/gc" + "cmd/compile/internal/ir" "cmd/compile/internal/logopt" "cmd/compile/internal/ssa" "cmd/compile/internal/types" @@ -288,7 +289,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case *obj.LSym: wantreg = "SB" gc.AddAux(&p.From, v) - case *gc.Node: + case *ir.Node: wantreg = "SP" gc.AddAux(&p.From, v) case nil: diff --git a/src/cmd/compile/internal/mips64/ssa.go b/src/cmd/compile/internal/mips64/ssa.go index a7c10d8869b5d..ea22c488aab97 100644 --- a/src/cmd/compile/internal/mips64/ssa.go +++ b/src/cmd/compile/internal/mips64/ssa.go @@ -9,6 +9,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/gc" + "cmd/compile/internal/ir" "cmd/compile/internal/logopt" "cmd/compile/internal/ssa" "cmd/compile/internal/types" @@ -262,7 +263,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case *obj.LSym: wantreg = "SB" gc.AddAux(&p.From, v) - case *gc.Node: + case *ir.Node: wantreg = "SP" gc.AddAux(&p.From, v) case nil: diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go index e3f0ee1a932f7..848f27af843f4 100644 --- a/src/cmd/compile/internal/ppc64/ssa.go +++ b/src/cmd/compile/internal/ppc64/ssa.go @@ -7,6 +7,7 @@ package ppc64 import ( "cmd/compile/internal/base" "cmd/compile/internal/gc" + "cmd/compile/internal/ir" "cmd/compile/internal/logopt" "cmd/compile/internal/ssa" "cmd/compile/internal/types" @@ -751,7 +752,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Reg = v.Reg() } - case *obj.LSym, *gc.Node: + case *obj.LSym, *ir.Node: p := s.Prog(ppc64.AMOVD) p.From.Type = obj.TYPE_ADDR p.From.Reg = v.Args[0].Reg() diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go index 5a71b33c00cb6..a3dc07fe03029 100644 --- a/src/cmd/compile/internal/riscv64/ssa.go +++ b/src/cmd/compile/internal/riscv64/ssa.go @@ -7,6 +7,7 @@ package riscv64 import ( "cmd/compile/internal/base" "cmd/compile/internal/gc" + "cmd/compile/internal/ir" "cmd/compile/internal/ssa" "cmd/compile/internal/types" "cmd/internal/obj" @@ -323,7 +324,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case *obj.LSym: wantreg = "SB" gc.AddAux(&p.From, v) - case *gc.Node: + case *ir.Node: wantreg = "SP" gc.AddAux(&p.From, v) case nil: diff --git a/src/cmd/compile/internal/wasm/ssa.go b/src/cmd/compile/internal/wasm/ssa.go index 373dc431e54c6..1a8b5691ef09a 100644 --- a/src/cmd/compile/internal/wasm/ssa.go +++ b/src/cmd/compile/internal/wasm/ssa.go @@ -7,6 +7,7 @@ package wasm import ( "cmd/compile/internal/base" "cmd/compile/internal/gc" + "cmd/compile/internal/ir" "cmd/compile/internal/logopt" "cmd/compile/internal/ssa" "cmd/compile/internal/types" @@ -236,7 +237,7 @@ func ssaGenValueOnStack(s *gc.SSAGenState, v *ssa.Value, extend bool) { switch v.Aux.(type) { case *obj.LSym: gc.AddAux(&p.From, v) - case *gc.Node: + case *ir.Node: p.From.Reg = v.Args[0].Reg() gc.AddAux(&p.From, v) default: diff --git a/src/cmd/dist/buildtool.go b/src/cmd/dist/buildtool.go index f8e1f2f95131c..839579349a28d 100644 --- a/src/cmd/dist/buildtool.go +++ b/src/cmd/dist/buildtool.go @@ -42,6 +42,7 @@ var bootstrapDirs = []string{ "cmd/compile/internal/arm", "cmd/compile/internal/arm64", "cmd/compile/internal/gc", + "cmd/compile/internal/ir", "cmd/compile/internal/logopt", "cmd/compile/internal/mips", "cmd/compile/internal/mips64", From 048debb2246d17ecd19ccfd603e8544d5e7946a0 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Tue, 17 Nov 2020 21:47:56 -0500 Subject: [PATCH 041/474] =?UTF-8?q?[dev.regabi]=20cmd/compile:=20remove=20?= =?UTF-8?q?gc=20=E2=86=94=20ssa=20cycle=20hacks?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The cycle hacks existed because gc needed to import ssa which need to know about gc.Node. But now that's ir.Node, and there's no cycle anymore. Don't know how much it matters but LocalSlot is now one word shorter than before, because it holds a pointer instead of an interface for the *Node. That won't last long. Now that they're not necessary for interface satisfaction, IsSynthetic and IsAutoTmp can move to top-level ir functions. Change-Id: Ie511e93466cfa2b17d9a91afc4bd8d53fdb80453 Reviewed-on: https://go-review.googlesource.com/c/go/+/272931 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/fmtmap_test.go | 2 -- src/cmd/compile/internal/gc/order.go | 10 +++---- src/cmd/compile/internal/gc/pgen.go | 16 +++++------ src/cmd/compile/internal/gc/ssa.go | 25 ++++++++-------- src/cmd/compile/internal/gc/typecheck.go | 2 +- src/cmd/compile/internal/ir/node.go | 25 ++-------------- src/cmd/compile/internal/ir/sizeof_test.go | 2 +- src/cmd/compile/internal/ssa/config.go | 21 ++------------ src/cmd/compile/internal/ssa/deadstore.go | 29 ++++++++++--------- src/cmd/compile/internal/ssa/debug.go | 21 +++++++------- src/cmd/compile/internal/ssa/export_test.go | 32 ++++----------------- src/cmd/compile/internal/ssa/location.go | 3 +- src/cmd/compile/internal/ssa/nilcheck.go | 3 +- src/cmd/compile/internal/ssa/regalloc.go | 3 +- src/cmd/compile/internal/ssa/sizeof_test.go | 2 +- src/cmd/compile/internal/ssa/stackalloc.go | 3 +- 16 files changed, 73 insertions(+), 126 deletions(-) diff --git a/src/cmd/compile/fmtmap_test.go b/src/cmd/compile/fmtmap_test.go index 404e89d0f2602..432d26a7b8751 100644 --- a/src/cmd/compile/fmtmap_test.go +++ b/src/cmd/compile/fmtmap_test.go @@ -81,7 +81,6 @@ var knownFormats = map[string]string{ "cmd/compile/internal/gc.initKind %d": "", "cmd/compile/internal/gc.itag %v": "", "cmd/compile/internal/ir.Class %d": "", - "cmd/compile/internal/ir.Class %s": "", "cmd/compile/internal/ir.Class %v": "", "cmd/compile/internal/ir.FmtMode %d": "", "cmd/compile/internal/ir.Nodes %#v": "", @@ -92,7 +91,6 @@ var knownFormats = map[string]string{ "cmd/compile/internal/ir.Op %v": "", "cmd/compile/internal/ssa.BranchPrediction %d": "", "cmd/compile/internal/ssa.Edge %v": "", - "cmd/compile/internal/ssa.GCNode %v": "", "cmd/compile/internal/ssa.ID %d": "", "cmd/compile/internal/ssa.ID %v": "", "cmd/compile/internal/ssa.LocalSlot %s": "", diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 25bdbd5a4143e..3bd49e8094efc 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -195,7 +195,7 @@ func (o *Order) safeExpr(n *ir.Node) *ir.Node { // because we emit explicit VARKILL instructions marking the end of those // temporaries' lifetimes. func isaddrokay(n *ir.Node) bool { - return islvalue(n) && (n.Op != ir.ONAME || n.Class() == ir.PEXTERN || n.IsAutoTmp()) + return islvalue(n) && (n.Op != ir.ONAME || n.Class() == ir.PEXTERN || ir.IsAutoTmp(n)) } // addrTemp ensures that n is okay to pass by address to runtime routines. @@ -550,10 +550,10 @@ func (o *Order) mapAssign(n *ir.Node) { for i, m := range n.List.Slice() { switch { case m.Op == ir.OINDEXMAP: - if !m.Left.IsAutoTmp() { + if !ir.IsAutoTmp(m.Left) { m.Left = o.copyExpr(m.Left, m.Left.Type, false) } - if !m.Right.IsAutoTmp() { + if !ir.IsAutoTmp(m.Right) { m.Right = o.copyExpr(m.Right, m.Right.Type, false) } fallthrough @@ -952,11 +952,11 @@ func (o *Order) stmt(n *ir.Node) { // r->left is c, r->right is x, both are always evaluated. r.Left = o.expr(r.Left, nil) - if !r.Left.IsAutoTmp() { + if !ir.IsAutoTmp(r.Left) { r.Left = o.copyExpr(r.Left, r.Left.Type, false) } r.Right = o.expr(r.Right, nil) - if !r.Right.IsAutoTmp() { + if !ir.IsAutoTmp(r.Right) { r.Right = o.copyExpr(r.Right, r.Right.Type, false) } } diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index 38f416c1c3842..6e7922ca5488b 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -121,7 +121,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { for _, l := range f.RegAlloc { if ls, ok := l.(ssa.LocalSlot); ok { - ls.N.(*ir.Node).Name.SetUsed(true) + ls.N.Name.SetUsed(true) } } @@ -517,7 +517,7 @@ func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Node) ([]*ir.Node, []*dwarf var decls []*ir.Node selected := make(map[*ir.Node]bool) for _, n := range apDecls { - if n.IsAutoTmp() { + if ir.IsAutoTmp(n) { continue } @@ -580,7 +580,7 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Node) *dwarf.Var { // createComplexVars creates recomposed DWARF vars with location lists, // suitable for describing optimized code. func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Node, []*dwarf.Var, map[*ir.Node]bool) { - debugInfo := fn.DebugInfo + debugInfo := fn.DebugInfo.(*ssa.FuncDebug) // Produce a DWARF variable entry for each user variable. var decls []*ir.Node @@ -588,10 +588,10 @@ func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Node, []*dwarf.Var, ssaVars := make(map[*ir.Node]bool) for varID, dvar := range debugInfo.Vars { - n := dvar.(*ir.Node) + n := dvar ssaVars[n] = true for _, slot := range debugInfo.VarSlots[varID] { - ssaVars[debugInfo.Slots[slot].N.(*ir.Node)] = true + ssaVars[debugInfo.Slots[slot].N] = true } if dvar := createComplexVar(fnsym, fn, ssa.VarID(varID)); dvar != nil { @@ -727,7 +727,7 @@ func preInliningDcls(fnsym *obj.LSym) []*ir.Node { // stack pointer, suitable for use in a DWARF location entry. This has nothing // to do with its offset in the user variable. func stackOffset(slot ssa.LocalSlot) int32 { - n := slot.N.(*ir.Node) + n := slot.N var off int64 switch n.Class() { case ir.PAUTO: @@ -746,8 +746,8 @@ func stackOffset(slot ssa.LocalSlot) int32 { // createComplexVar builds a single DWARF variable entry and location list. func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var { - debug := fn.DebugInfo - n := debug.Vars[varID].(*ir.Node) + debug := fn.DebugInfo.(*ssa.FuncDebug) + n := debug.Vars[varID] var abbrev int switch n.Class() { diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 658ea28fbe228..5cee3fab85c5f 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -3080,7 +3080,7 @@ func (s *state) assign(left *ir.Node, right *ssa.Value, deref bool, skip skipMas // If this assignment clobbers an entire local variable, then emit // OpVarDef so liveness analysis knows the variable is redefined. if base := clobberBase(left); base.Op == ir.ONAME && base.Class() != ir.PEXTERN && skip == 0 { - s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !base.IsAutoTmp()) + s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !ir.IsAutoTmp(base)) } // Left is not ssa-able. Compute its address. @@ -3103,7 +3103,7 @@ func (s *state) assign(left *ir.Node, right *ssa.Value, deref bool, skip skipMas return } // Treat as a store. - s.storeType(t, addr, right, skip, !left.IsAutoTmp()) + s.storeType(t, addr, right, skip, !ir.IsAutoTmp(left)) } // zeroVal returns the zero value for type t. @@ -4860,7 +4860,7 @@ func (s *state) addr(n *ir.Node) *ssa.Value { s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs) return nil case ir.PAUTO: - return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), !n.IsAutoTmp()) + return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), !ir.IsAutoTmp(n)) case ir.PPARAMOUT: // Same as PAUTO -- cannot generate LEA early. // ensure that we reuse symbols for out parameters so @@ -6063,7 +6063,7 @@ func (s *state) addNamedValue(n *ir.Node, v *ssa.Value) { // Don't track our marker nodes (memVar etc.). return } - if n.IsAutoTmp() { + if ir.IsAutoTmp(n) { // Don't track temporary variables. return } @@ -6476,12 +6476,13 @@ func genssa(f *ssa.Func, pp *Progs) { } if base.Ctxt.Flag_locationlists { - e.curfn.Func.DebugInfo = ssa.BuildFuncDebug(base.Ctxt, f, base.Debug.LocationLists > 1, stackOffset) + debugInfo := ssa.BuildFuncDebug(base.Ctxt, f, base.Debug.LocationLists > 1, stackOffset) + e.curfn.Func.DebugInfo = debugInfo bstart := s.bstart // Note that at this moment, Prog.Pc is a sequence number; it's // not a real PC until after assembly, so this mapping has to // be done later. - e.curfn.Func.DebugInfo.GetPC = func(b, v ssa.ID) int64 { + debugInfo.GetPC = func(b, v ssa.ID) int64 { switch v { case ssa.BlockStart.ID: if b == f.Entry.ID { @@ -6820,7 +6821,7 @@ func AutoVar(v *ssa.Value) (*ir.Node, int64) { if v.Type.Size() > loc.Type.Size() { v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type) } - return loc.N.(*ir.Node), loc.Off + return loc.N, loc.Off } func AddrAuto(a *obj.Addr, v *ssa.Value) { @@ -6975,7 +6976,7 @@ func (e *ssafn) StringData(s string) *obj.LSym { return data } -func (e *ssafn) Auto(pos src.XPos, t *types.Type) ssa.GCNode { +func (e *ssafn) Auto(pos src.XPos, t *types.Type) *ir.Node { n := tempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list return n } @@ -6990,7 +6991,7 @@ func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { } func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { - n := name.N.(*ir.Node) + n := name.N u := types.Types[types.TUINTPTR] t := types.NewPtr(types.Types[types.TUINT8]) // Split this interface up into two separate variables. @@ -7047,7 +7048,7 @@ func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot { } func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot { - n := name.N.(*ir.Node) + n := name.N at := name.Type if at.NumElem() != 1 { e.Fatalf(n.Pos, "bad array size") @@ -7062,7 +7063,7 @@ func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym { // SplitSlot returns a slot representing the data of parent starting at offset. func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot { - node := parent.N.(*ir.Node) + node := parent.N if node.Class() != ir.PAUTO || node.Name.Addrtaken() { // addressed things and non-autos retain their parents (i.e., cannot truly be split) @@ -7070,7 +7071,7 @@ func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t } s := &types.Sym{Name: node.Sym.Name + suffix, Pkg: ir.LocalPkg} - n := ir.NewNameAt(parent.N.(*ir.Node).Pos, s) + n := ir.NewNameAt(parent.N.Pos, s) s.Def = ir.AsTypesNode(n) ir.AsNode(s.Def).Name.SetUsed(true) n.Type = t diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 78fdf100ad288..318f315f16cbe 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -1960,7 +1960,7 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { typecheckas(n) // Code that creates temps does not bother to set defn, so do it here. - if n.Left.Op == ir.ONAME && n.Left.IsAutoTmp() { + if n.Left.Op == ir.ONAME && ir.IsAutoTmp(n.Left) { n.Left.Name.Defn = n } diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index e6ed178f495eb..cac9e6eb3eeb1 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -13,7 +13,6 @@ import ( "unsafe" "cmd/compile/internal/base" - "cmd/compile/internal/ssa" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/objabi" @@ -156,14 +155,14 @@ func (n *Node) SetTChanDir(dir types.ChanDir) { n.aux = uint8(dir) } -func (n *Node) IsSynthetic() bool { +func IsSynthetic(n *Node) bool { name := n.Sym.Name return name[0] == '.' || name[0] == '~' } // IsAutoTmp indicates if n was created by the compiler as a temporary, // based on the setting of the .AutoTemp flag in n's Name. -func (n *Node) IsAutoTmp() bool { +func IsAutoTmp(n *Node) bool { if n == nil || n.Op != ONAME { return false } @@ -683,7 +682,7 @@ type Func struct { Closgen int FieldTrack map[*types.Sym]struct{} - DebugInfo *ssa.FuncDebug + DebugInfo interface{} LSym *obj.LSym Inl *Inline @@ -1550,21 +1549,3 @@ func IsBlank(n *Node) bool { func IsMethod(n *Node) bool { return n.Type.Recv() != nil } - -func (n *Node) Typ() *types.Type { - return n.Type -} - -func (n *Node) StorageClass() ssa.StorageClass { - switch n.Class() { - case PPARAM: - return ssa.ClassParam - case PPARAMOUT: - return ssa.ClassParamOut - case PAUTO: - return ssa.ClassAuto - default: - base.Fatalf("untranslatable storage class for %v: %s", n, n.Class()) - return 0 - } -} diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go index c5169b9092a0e..1ec89c338d1e9 100644 --- a/src/cmd/compile/internal/ir/sizeof_test.go +++ b/src/cmd/compile/internal/ir/sizeof_test.go @@ -20,7 +20,7 @@ func TestSizeof(t *testing.T) { _32bit uintptr // size on 32bit platforms _64bit uintptr // size on 64bit platforms }{ - {Func{}, 132, 240}, + {Func{}, 136, 248}, {Name{}, 32, 56}, {Param{}, 24, 48}, {Node{}, 76, 128}, diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go index 0fe0337ddfa30..62abbdc2238e4 100644 --- a/src/cmd/compile/internal/ssa/config.go +++ b/src/cmd/compile/internal/ssa/config.go @@ -5,6 +5,7 @@ package ssa import ( + "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/objabi" @@ -138,7 +139,7 @@ type Frontend interface { // Auto returns a Node for an auto variable of the given type. // The SSA compiler uses this function to allocate space for spills. - Auto(src.XPos, *types.Type) GCNode + Auto(src.XPos, *types.Type) *ir.Node // Given the name for a compound type, returns the name we should use // for the parts of that compound type. @@ -178,24 +179,6 @@ type Frontend interface { MyImportPath() string } -// interface used to hold a *gc.Node (a stack variable). -// We'd use *gc.Node directly but that would lead to an import cycle. -type GCNode interface { - Typ() *types.Type - String() string - IsSynthetic() bool - IsAutoTmp() bool - StorageClass() StorageClass -} - -type StorageClass uint8 - -const ( - ClassAuto StorageClass = iota // local stack variable - ClassParam // argument - ClassParamOut // return value -) - const go116lateCallExpansion = true // LateCallExpansionEnabledWithin returns true if late call expansion should be tested diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go index 0664013b397ed..0f1cd4bc9fa1b 100644 --- a/src/cmd/compile/internal/ssa/deadstore.go +++ b/src/cmd/compile/internal/ssa/deadstore.go @@ -5,6 +5,7 @@ package ssa import ( + "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/src" ) @@ -136,9 +137,9 @@ func dse(f *Func) { // reaches stores then we delete all the stores. The other operations will then // be eliminated by the dead code elimination pass. func elimDeadAutosGeneric(f *Func) { - addr := make(map[*Value]GCNode) // values that the address of the auto reaches - elim := make(map[*Value]GCNode) // values that could be eliminated if the auto is - used := make(map[GCNode]bool) // used autos that must be kept + addr := make(map[*Value]*ir.Node) // values that the address of the auto reaches + elim := make(map[*Value]*ir.Node) // values that could be eliminated if the auto is + used := make(map[*ir.Node]bool) // used autos that must be kept // visit the value and report whether any of the maps are updated visit := func(v *Value) (changed bool) { @@ -146,8 +147,8 @@ func elimDeadAutosGeneric(f *Func) { switch v.Op { case OpAddr, OpLocalAddr: // Propagate the address if it points to an auto. - n, ok := v.Aux.(GCNode) - if !ok || n.StorageClass() != ClassAuto { + n, ok := v.Aux.(*ir.Node) + if !ok || n.Class() != ir.PAUTO { return } if addr[v] == nil { @@ -157,8 +158,8 @@ func elimDeadAutosGeneric(f *Func) { return case OpVarDef, OpVarKill: // v should be eliminated if we eliminate the auto. - n, ok := v.Aux.(GCNode) - if !ok || n.StorageClass() != ClassAuto { + n, ok := v.Aux.(*ir.Node) + if !ok || n.Class() != ir.PAUTO { return } if elim[v] == nil { @@ -173,8 +174,8 @@ func elimDeadAutosGeneric(f *Func) { // for open-coded defers from being removed (since they // may not be used by the inline code, but will be used by // panic processing). - n, ok := v.Aux.(GCNode) - if !ok || n.StorageClass() != ClassAuto { + n, ok := v.Aux.(*ir.Node) + if !ok || n.Class() != ir.PAUTO { return } if !used[n] { @@ -221,7 +222,7 @@ func elimDeadAutosGeneric(f *Func) { } // Propagate any auto addresses through v. - node := GCNode(nil) + var node *ir.Node for _, a := range args { if n, ok := addr[a]; ok && !used[n] { if node == nil { @@ -298,15 +299,15 @@ func elimUnreadAutos(f *Func) { // Loop over all ops that affect autos taking note of which // autos we need and also stores that we might be able to // eliminate. - seen := make(map[GCNode]bool) + seen := make(map[*ir.Node]bool) var stores []*Value for _, b := range f.Blocks { for _, v := range b.Values { - n, ok := v.Aux.(GCNode) + n, ok := v.Aux.(*ir.Node) if !ok { continue } - if n.StorageClass() != ClassAuto { + if n.Class() != ir.PAUTO { continue } @@ -334,7 +335,7 @@ func elimUnreadAutos(f *Func) { // Eliminate stores to unread autos. for _, store := range stores { - n, _ := store.Aux.(GCNode) + n, _ := store.Aux.(*ir.Node) if seen[n] { continue } diff --git a/src/cmd/compile/internal/ssa/debug.go b/src/cmd/compile/internal/ssa/debug.go index 6353f72897858..9de5f427c07a0 100644 --- a/src/cmd/compile/internal/ssa/debug.go +++ b/src/cmd/compile/internal/ssa/debug.go @@ -5,6 +5,7 @@ package ssa import ( + "cmd/compile/internal/ir" "cmd/internal/dwarf" "cmd/internal/obj" "encoding/hex" @@ -24,7 +25,7 @@ type FuncDebug struct { // Slots is all the slots used in the debug info, indexed by their SlotID. Slots []LocalSlot // The user variables, indexed by VarID. - Vars []GCNode + Vars []*ir.Node // The slots that make up each variable, indexed by VarID. VarSlots [][]SlotID // The location list data, indexed by VarID. Must be processed by PutLocationList. @@ -165,7 +166,7 @@ func (s *debugState) logf(msg string, args ...interface{}) { type debugState struct { // See FuncDebug. slots []LocalSlot - vars []GCNode + vars []*ir.Node varSlots [][]SlotID lists [][]byte @@ -189,7 +190,7 @@ type debugState struct { // The pending location list entry for each user variable, indexed by VarID. pendingEntries []pendingEntry - varParts map[GCNode][]SlotID + varParts map[*ir.Node][]SlotID blockDebug []BlockDebug pendingSlotLocs []VarLoc liveSlots []liveSlot @@ -346,7 +347,7 @@ func BuildFuncDebug(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset fu } if state.varParts == nil { - state.varParts = make(map[GCNode][]SlotID) + state.varParts = make(map[*ir.Node][]SlotID) } else { for n := range state.varParts { delete(state.varParts, n) @@ -360,7 +361,7 @@ func BuildFuncDebug(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset fu state.vars = state.vars[:0] for i, slot := range f.Names { state.slots = append(state.slots, slot) - if slot.N.IsSynthetic() { + if ir.IsSynthetic(slot.N) { continue } @@ -379,8 +380,8 @@ func BuildFuncDebug(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset fu for _, b := range f.Blocks { for _, v := range b.Values { if v.Op == OpVarDef || v.Op == OpVarKill { - n := v.Aux.(GCNode) - if n.IsSynthetic() { + n := v.Aux.(*ir.Node) + if ir.IsSynthetic(n) { continue } @@ -425,7 +426,7 @@ func BuildFuncDebug(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset fu state.initializeCache(f, len(state.varParts), len(state.slots)) for i, slot := range f.Names { - if slot.N.IsSynthetic() { + if ir.IsSynthetic(slot.N) { continue } for _, value := range f.NamedValues[slot] { @@ -717,8 +718,8 @@ func (state *debugState) processValue(v *Value, vSlots []SlotID, vReg *Register) switch { case v.Op == OpVarDef, v.Op == OpVarKill: - n := v.Aux.(GCNode) - if n.IsSynthetic() { + n := v.Aux.(*ir.Node) + if ir.IsSynthetic(n) { break } diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go index bfe94ff160273..3d142a2272014 100644 --- a/src/cmd/compile/internal/ssa/export_test.go +++ b/src/cmd/compile/internal/ssa/export_test.go @@ -5,6 +5,7 @@ package ssa import ( + "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/arm64" @@ -65,36 +66,13 @@ type TestFrontend struct { ctxt *obj.Link } -type TestAuto struct { - t *types.Type - s string -} - -func (d *TestAuto) Typ() *types.Type { - return d.t -} - -func (d *TestAuto) String() string { - return d.s -} - -func (d *TestAuto) StorageClass() StorageClass { - return ClassAuto -} - -func (d *TestAuto) IsSynthetic() bool { - return false -} - -func (d *TestAuto) IsAutoTmp() bool { - return true -} - func (TestFrontend) StringData(s string) *obj.LSym { return nil } -func (TestFrontend) Auto(pos src.XPos, t *types.Type) GCNode { - return &TestAuto{t: t, s: "aTestAuto"} +func (TestFrontend) Auto(pos src.XPos, t *types.Type) *ir.Node { + n := ir.NewNameAt(pos, &types.Sym{Name: "aFakeAuto"}) + n.SetClass(ir.PAUTO) + return n } func (d TestFrontend) SplitString(s LocalSlot) (LocalSlot, LocalSlot) { return LocalSlot{N: s.N, Type: testTypes.BytePtr, Off: s.Off}, LocalSlot{N: s.N, Type: testTypes.Int, Off: s.Off + 8} diff --git a/src/cmd/compile/internal/ssa/location.go b/src/cmd/compile/internal/ssa/location.go index a333982389aa4..2f456c9f899b9 100644 --- a/src/cmd/compile/internal/ssa/location.go +++ b/src/cmd/compile/internal/ssa/location.go @@ -5,6 +5,7 @@ package ssa import ( + "cmd/compile/internal/ir" "cmd/compile/internal/types" "fmt" ) @@ -59,7 +60,7 @@ func (r *Register) GCNum() int16 { // { N: len, Type: int, Off: 0, SplitOf: parent, SplitOffset: 8} // parent = &{N: s, Type: string} type LocalSlot struct { - N GCNode // an ONAME *gc.Node representing a stack location. + N *ir.Node // an ONAME *gc.Node representing a stack location. Type *types.Type // type of slot Off int64 // offset of slot in N diff --git a/src/cmd/compile/internal/ssa/nilcheck.go b/src/cmd/compile/internal/ssa/nilcheck.go index d1bad529e700c..e0ae0454ef358 100644 --- a/src/cmd/compile/internal/ssa/nilcheck.go +++ b/src/cmd/compile/internal/ssa/nilcheck.go @@ -5,6 +5,7 @@ package ssa import ( + "cmd/compile/internal/ir" "cmd/internal/objabi" "cmd/internal/src" ) @@ -235,7 +236,7 @@ func nilcheckelim2(f *Func) { continue } if v.Type.IsMemory() || v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() { - if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(GCNode).Typ().HasPointers()) { + if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(*ir.Node).Type.HasPointers()) { // These ops don't really change memory. continue // Note: OpVarDef requires that the defined variable not have pointers. diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index 4ed884c3e7cc1..984188393976e 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -114,6 +114,7 @@ package ssa import ( + "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/objabi" "cmd/internal/src" @@ -1248,7 +1249,7 @@ func (s *regAllocState) regalloc(f *Func) { // This forces later liveness analysis to make the // value live at this point. v.SetArg(0, s.makeSpill(a, b)) - } else if _, ok := a.Aux.(GCNode); ok && vi.rematerializeable { + } else if _, ok := a.Aux.(*ir.Node); ok && vi.rematerializeable { // Rematerializeable value with a gc.Node. This is the address of // a stack object (e.g. an LEAQ). Keep the object live. // Change it to VarLive, which is what plive expects for locals. diff --git a/src/cmd/compile/internal/ssa/sizeof_test.go b/src/cmd/compile/internal/ssa/sizeof_test.go index 60ada011e3e0b..a27002ee3ac3b 100644 --- a/src/cmd/compile/internal/ssa/sizeof_test.go +++ b/src/cmd/compile/internal/ssa/sizeof_test.go @@ -22,7 +22,7 @@ func TestSizeof(t *testing.T) { }{ {Value{}, 72, 112}, {Block{}, 164, 304}, - {LocalSlot{}, 32, 48}, + {LocalSlot{}, 28, 40}, {valState{}, 28, 40}, } diff --git a/src/cmd/compile/internal/ssa/stackalloc.go b/src/cmd/compile/internal/ssa/stackalloc.go index 406a3c3ea53fa..eee0a21a6675f 100644 --- a/src/cmd/compile/internal/ssa/stackalloc.go +++ b/src/cmd/compile/internal/ssa/stackalloc.go @@ -7,6 +7,7 @@ package ssa import ( + "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/src" "fmt" @@ -156,7 +157,7 @@ func (s *stackAllocState) stackalloc() { if v.Aux == nil { f.Fatalf("%s has nil Aux\n", v.LongString()) } - loc := LocalSlot{N: v.Aux.(GCNode), Type: v.Type, Off: v.AuxInt} + loc := LocalSlot{N: v.Aux.(*ir.Node), Type: v.Type, Off: v.AuxInt} if f.pass.debug > stackDebug { fmt.Printf("stackalloc %s to %s\n", v, loc) } From 41ab6689edb1f51001feab0928e598050e2f6d32 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Tue, 24 Nov 2020 20:47:32 -0500 Subject: [PATCH 042/474] [dev.regabi] cmd/compile: rewrite a few ++/--/+=/-= to prep for getters/setters [generated] These are trivial rewrites that are only OK because it turns out that n has no side effects. Separated into a different CL for easy inspection. [git-generate] cd src/cmd/compile/internal/gc rf ' ex . ../ir ../ssa { import "cmd/compile/internal/ir" var n *ir.Node var i int64 n.Xoffset++ -> n.Xoffset = n.Xoffset + 1 n.Xoffset-- -> n.Xoffset = n.Xoffset - 1 n.Xoffset += i -> n.Xoffset = n.Xoffset + i n.Xoffset -= i -> n.Xoffset = n.Xoffset - i } ' Change-Id: If7b4b7f7cbdafeee988e04d03924ef0e1dd867b0 Reviewed-on: https://go-review.googlesource.com/c/go/+/272932 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/initorder.go | 4 ++-- src/cmd/compile/internal/gc/sinit.go | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/cmd/compile/internal/gc/initorder.go b/src/cmd/compile/internal/gc/initorder.go index 942cb95f2077b..62294b5a90e53 100644 --- a/src/cmd/compile/internal/gc/initorder.go +++ b/src/cmd/compile/internal/gc/initorder.go @@ -142,7 +142,7 @@ func (o *InitOrder) processAssign(n *ir.Node) { if dep.Class() != ir.PEXTERN || defn.Initorder() == InitDone { continue } - n.Xoffset++ + n.Xoffset = n.Xoffset + 1 o.blocking[defn] = append(o.blocking[defn], n) } @@ -169,7 +169,7 @@ func (o *InitOrder) flushReady(initialize func(*ir.Node)) { delete(o.blocking, n) for _, m := range blocked { - m.Xoffset-- + m.Xoffset = m.Xoffset - 1 if m.Xoffset == 0 { heap.Push(&o.ready, m) } diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index d78b509127675..0ba7efb95ec71 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -157,7 +157,7 @@ func (s *InitSchedule) staticcopy(l *ir.Node, r *ir.Node) bool { // copying someone else's computation. rr := ir.SepCopy(orig) rr.Type = ll.Type - rr.Xoffset += e.Xoffset + rr.Xoffset = rr.Xoffset + e.Xoffset setlineno(rr) s.append(ir.Nod(ir.OAS, ll, rr)) } @@ -301,7 +301,7 @@ func (s *InitSchedule) staticassign(l *ir.Node, r *ir.Node) bool { // Emit itab, advance offset. addrsym(n, itab.Left) // itab is an OADDR node - n.Xoffset += int64(Widthptr) + n.Xoffset = n.Xoffset + int64(Widthptr) // Emit data. if isdirectiface(val.Type) { @@ -1017,7 +1017,7 @@ func stataddr(n *ir.Node) *ir.Node { if nam == nil { break } - nam.Xoffset += n.Xoffset + nam.Xoffset = nam.Xoffset + n.Xoffset nam.Type = n.Type return nam @@ -1038,7 +1038,7 @@ func stataddr(n *ir.Node) *ir.Node { if n.Type.Width != 0 && thearch.MAXWIDTH/n.Type.Width <= int64(l) { break } - nam.Xoffset += int64(l) * n.Type.Width + nam.Xoffset = nam.Xoffset + int64(l)*n.Type.Width nam.Type = n.Type return nam } From acb4d1cef14529585266df1868045f80e37ae081 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sun, 22 Nov 2020 09:59:15 -0500 Subject: [PATCH 043/474] [dev.regabi] cmd/compile: use Node getters and setters [generated] Now that we have all the getters and setters defined, use them and unexport all the actual Node fields. This is the next step toward replacing Node with an interface. [git-generate] cd src/cmd/compile/internal/gc rf ' ex . ../ir ../ssa { import "cmd/compile/internal/ir" import "cmd/compile/internal/types" import "cmd/internal/src" var n, x *ir.Node var op ir.Op var t *types.Type var f *ir.Func var m *ir.Name var s *types.Sym var p src.XPos var i int64 var e uint16 var nodes ir.Nodes n.Op = op -> n.SetOp(op) n.Left = x -> n.SetLeft(x) n.Right = x -> n.SetRight(x) n.Orig = x -> n.SetOrig(x) n.Type = t -> n.SetType(t) n.Func = f -> n.SetFunc(f) n.Name = m -> n.SetName(m) n.Sym = s -> n.SetSym(s) n.Pos = p -> n.SetPos(p) n.Xoffset = i -> n.SetXoffset(i) n.Esc = e -> n.SetEsc(e) n.Ninit.Append -> n.PtrNinit().Append n.Ninit.AppendNodes -> n.PtrNinit().AppendNodes n.Ninit.MoveNodes -> n.PtrNinit().MoveNodes n.Ninit.Prepend -> n.PtrNinit().Prepend n.Ninit.Set -> n.PtrNinit().Set n.Ninit.Set1 -> n.PtrNinit().Set1 n.Ninit.Set2 -> n.PtrNinit().Set2 n.Ninit.Set3 -> n.PtrNinit().Set3 &n.Ninit -> n.PtrNinit() n.Ninit = nodes -> n.SetNinit(nodes) n.Nbody.Append -> n.PtrNbody().Append n.Nbody.AppendNodes -> n.PtrNbody().AppendNodes n.Nbody.MoveNodes -> n.PtrNbody().MoveNodes n.Nbody.Prepend -> n.PtrNbody().Prepend n.Nbody.Set -> n.PtrNbody().Set n.Nbody.Set1 -> n.PtrNbody().Set1 n.Nbody.Set2 -> n.PtrNbody().Set2 n.Nbody.Set3 -> n.PtrNbody().Set3 &n.Nbody -> n.PtrNbody() n.Nbody = nodes -> n.SetNbody(nodes) n.List.Append -> n.PtrList().Append n.List.AppendNodes -> n.PtrList().AppendNodes n.List.MoveNodes -> n.PtrList().MoveNodes n.List.Prepend -> n.PtrList().Prepend n.List.Set -> n.PtrList().Set n.List.Set1 -> n.PtrList().Set1 n.List.Set2 -> n.PtrList().Set2 n.List.Set3 -> n.PtrList().Set3 &n.List -> n.PtrList() n.List = nodes -> n.SetList(nodes) n.Rlist.Append -> n.PtrRlist().Append n.Rlist.AppendNodes -> n.PtrRlist().AppendNodes n.Rlist.MoveNodes -> n.PtrRlist().MoveNodes n.Rlist.Prepend -> n.PtrRlist().Prepend n.Rlist.Set -> n.PtrRlist().Set n.Rlist.Set1 -> n.PtrRlist().Set1 n.Rlist.Set2 -> n.PtrRlist().Set2 n.Rlist.Set3 -> n.PtrRlist().Set3 &n.Rlist -> n.PtrRlist() n.Rlist = nodes -> n.SetRlist(nodes) } ex . ../ir ../ssa { import "cmd/compile/internal/ir" var n *ir.Node n.Op -> n.GetOp() n.Left -> n.GetLeft() n.Right -> n.GetRight() n.Orig -> n.GetOrig() n.Type -> n.GetType() n.Func -> n.GetFunc() n.Name -> n.GetName() n.Sym -> n.GetSym() n.Pos -> n.GetPos() n.Xoffset -> n.GetXoffset() n.Esc -> n.GetEsc() avoid (*ir.Node).PtrNinit avoid (*ir.Node).PtrNbody avoid (*ir.Node).PtrList avoid (*ir.Node).PtrRlist n.Ninit -> n.GetNinit() n.Nbody -> n.GetNbody() n.List -> n.GetList() n.Rlist -> n.GetRlist() } ' cd ../ir rf ' mv Node.Op Node.op mv Node.GetOp Node.Op mv Node.Left Node.left mv Node.GetLeft Node.Left mv Node.Right Node.right mv Node.GetRight Node.Right mv Node.Orig Node.orig mv Node.GetOrig Node.Orig mv Node.Type Node.typ mv Node.GetType Node.Type mv Node.Func Node.fn mv Node.GetFunc Node.Func mv Node.Name Node.name mv Node.GetName Node.Name # All uses are in other Node methods already. mv Node.E Node.e mv Node.Sym Node.sym mv Node.GetSym Node.Sym mv Node.Pos Node.pos mv Node.GetPos Node.Pos mv Node.Esc Node.esc mv Node.GetEsc Node.Esc # While we are here, rename Xoffset to more idiomatic Offset. mv Node.Xoffset Node.offset mv Node.GetXoffset Node.Offset mv Node.SetXoffset Node.SetOffset # While we are here, rename Ninit, Nbody to more idiomatic Init, Body. mv Node.Ninit Node.init mv Node.GetNinit Node.Init mv Node.PtrNinit Node.PtrInit mv Node.SetNinit Node.SetInit mv Node.Nbody Node.body mv Node.GetNbody Node.Body mv Node.PtrNbody Node.PtrBody mv Node.SetNbody Node.SetBody mv Node.List Node.list mv Node.GetList Node.List mv Node.Rlist Node.rlist mv Node.GetRlist Node.Rlist # Unexport these mv Node.SetHasOpt Node.setHasOpt mv Node.SetHasVal Node.setHasVal ' Change-Id: I9894f633375c5237a29b6d6d7b89ba181b56ca3a Reviewed-on: https://go-review.googlesource.com/c/go/+/273009 Trust: Russ Cox Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/alg.go | 154 +- src/cmd/compile/internal/gc/align.go | 12 +- src/cmd/compile/internal/gc/bexport.go | 4 +- src/cmd/compile/internal/gc/bimport.go | 2 +- src/cmd/compile/internal/gc/closure.go | 222 +-- src/cmd/compile/internal/gc/const.go | 172 +- src/cmd/compile/internal/gc/dcl.go | 266 +-- src/cmd/compile/internal/gc/dwinl.go | 6 +- src/cmd/compile/internal/gc/embed.go | 36 +- src/cmd/compile/internal/gc/escape.go | 518 +++--- src/cmd/compile/internal/gc/export.go | 54 +- src/cmd/compile/internal/gc/gen.go | 22 +- src/cmd/compile/internal/gc/gsubr.go | 24 +- src/cmd/compile/internal/gc/iexport.go | 338 ++-- src/cmd/compile/internal/gc/iimport.go | 144 +- src/cmd/compile/internal/gc/init.go | 20 +- src/cmd/compile/internal/gc/initorder.go | 70 +- src/cmd/compile/internal/gc/inl.go | 554 +++--- src/cmd/compile/internal/gc/main.go | 40 +- src/cmd/compile/internal/gc/noder.go | 254 +-- src/cmd/compile/internal/gc/obj.go | 110 +- src/cmd/compile/internal/gc/order.go | 604 +++---- src/cmd/compile/internal/gc/pgen.go | 178 +- src/cmd/compile/internal/gc/pgen_test.go | 12 +- src/cmd/compile/internal/gc/plive.go | 48 +- src/cmd/compile/internal/gc/racewalk.go | 18 +- src/cmd/compile/internal/gc/range.go | 208 +-- src/cmd/compile/internal/gc/reflect.go | 16 +- src/cmd/compile/internal/gc/scc.go | 26 +- src/cmd/compile/internal/gc/scope.go | 6 +- src/cmd/compile/internal/gc/select.go | 192 +-- src/cmd/compile/internal/gc/sinit.go | 356 ++-- src/cmd/compile/internal/gc/ssa.go | 1082 ++++++------ src/cmd/compile/internal/gc/subr.go | 206 +-- src/cmd/compile/internal/gc/swt.go | 278 +-- src/cmd/compile/internal/gc/typecheck.go | 1970 +++++++++++----------- src/cmd/compile/internal/gc/universe.go | 38 +- src/cmd/compile/internal/gc/unsafe.go | 38 +- src/cmd/compile/internal/gc/walk.go | 1404 +++++++-------- src/cmd/compile/internal/ir/dump.go | 2 +- src/cmd/compile/internal/ir/fmt.go | 402 ++--- src/cmd/compile/internal/ir/node.go | 266 +-- src/cmd/compile/internal/ir/val.go | 6 +- src/cmd/compile/internal/ssa/nilcheck.go | 2 +- 44 files changed, 5191 insertions(+), 5189 deletions(-) diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index cf82b9d5916e0..ffd1682b35bcd 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -293,15 +293,15 @@ func genhash(t *types.Type) *obj.LSym { // func sym(p *T, h uintptr) uintptr tfn := ir.Nod(ir.OTFUNC, nil, nil) - tfn.List.Set2( + tfn.PtrList().Set2( namedfield("p", types.NewPtr(t)), namedfield("h", types.Types[types.TUINTPTR]), ) - tfn.Rlist.Set1(anonfield(types.Types[types.TUINTPTR])) + tfn.PtrRlist().Set1(anonfield(types.Types[types.TUINTPTR])) fn := dclfunc(sym, tfn) - np := ir.AsNode(tfn.Type.Params().Field(0).Nname) - nh := ir.AsNode(tfn.Type.Params().Field(1).Nname) + np := ir.AsNode(tfn.Type().Params().Field(0).Nname) + nh := ir.AsNode(tfn.Type().Params().Field(1).Nname) switch t.Etype { case types.TARRAY: @@ -312,11 +312,11 @@ func genhash(t *types.Type) *obj.LSym { n := ir.Nod(ir.ORANGE, nil, ir.Nod(ir.ODEREF, np, nil)) ni := NewName(lookup("i")) - ni.Type = types.Types[types.TINT] - n.List.Set1(ni) + ni.SetType(types.Types[types.TINT]) + n.PtrList().Set1(ni) n.SetColas(true) - colasdefn(n.List.Slice(), n) - ni = n.List.First() + colasdefn(n.List().Slice(), n) + ni = n.List().First() // h = hashel(&p[i], h) call := ir.Nod(ir.OCALL, hashel, nil) @@ -324,11 +324,11 @@ func genhash(t *types.Type) *obj.LSym { nx := ir.Nod(ir.OINDEX, np, ni) nx.SetBounded(true) na := ir.Nod(ir.OADDR, nx, nil) - call.List.Append(na) - call.List.Append(nh) - n.Nbody.Append(ir.Nod(ir.OAS, nh, call)) + call.PtrList().Append(na) + call.PtrList().Append(nh) + n.PtrBody().Append(ir.Nod(ir.OAS, nh, call)) - fn.Nbody.Append(n) + fn.PtrBody().Append(n) case types.TSTRUCT: // Walk the struct using memhash for runs of AMEM @@ -348,9 +348,9 @@ func genhash(t *types.Type) *obj.LSym { call := ir.Nod(ir.OCALL, hashel, nil) nx := nodSym(ir.OXDOT, np, f.Sym) // TODO: fields from other packages? na := ir.Nod(ir.OADDR, nx, nil) - call.List.Append(na) - call.List.Append(nh) - fn.Nbody.Append(ir.Nod(ir.OAS, nh, call)) + call.PtrList().Append(na) + call.PtrList().Append(nh) + fn.PtrBody().Append(ir.Nod(ir.OAS, nh, call)) i++ continue } @@ -363,37 +363,37 @@ func genhash(t *types.Type) *obj.LSym { call := ir.Nod(ir.OCALL, hashel, nil) nx := nodSym(ir.OXDOT, np, f.Sym) // TODO: fields from other packages? na := ir.Nod(ir.OADDR, nx, nil) - call.List.Append(na) - call.List.Append(nh) - call.List.Append(nodintconst(size)) - fn.Nbody.Append(ir.Nod(ir.OAS, nh, call)) + call.PtrList().Append(na) + call.PtrList().Append(nh) + call.PtrList().Append(nodintconst(size)) + fn.PtrBody().Append(ir.Nod(ir.OAS, nh, call)) i = next } } r := ir.Nod(ir.ORETURN, nil, nil) - r.List.Append(nh) - fn.Nbody.Append(r) + r.PtrList().Append(nh) + fn.PtrBody().Append(r) if base.Flag.LowerR != 0 { - ir.DumpList("genhash body", fn.Nbody) + ir.DumpList("genhash body", fn.Body()) } funcbody() - fn.Func.SetDupok(true) + fn.Func().SetDupok(true) fn = typecheck(fn, ctxStmt) Curfn = fn - typecheckslice(fn.Nbody.Slice(), ctxStmt) + typecheckslice(fn.Body().Slice(), ctxStmt) Curfn = nil if base.Debug.DclStack != 0 { testdclstack() } - fn.Func.SetNilCheckDisabled(true) + fn.Func().SetNilCheckDisabled(true) xtop = append(xtop, fn) // Build closure. It doesn't close over any variables, so @@ -432,12 +432,12 @@ func hashfor(t *types.Type) *ir.Node { n := NewName(sym) setNodeNameFunc(n) - n.Type = functype(nil, []*ir.Node{ + n.SetType(functype(nil, []*ir.Node{ anonfield(types.NewPtr(t)), anonfield(types.Types[types.TUINTPTR]), }, []*ir.Node{ anonfield(types.Types[types.TUINTPTR]), - }) + })) return n } @@ -522,16 +522,16 @@ func geneq(t *types.Type) *obj.LSym { // func sym(p, q *T) bool tfn := ir.Nod(ir.OTFUNC, nil, nil) - tfn.List.Set2( + tfn.PtrList().Set2( namedfield("p", types.NewPtr(t)), namedfield("q", types.NewPtr(t)), ) - tfn.Rlist.Set1(namedfield("r", types.Types[types.TBOOL])) + tfn.PtrRlist().Set1(namedfield("r", types.Types[types.TBOOL])) fn := dclfunc(sym, tfn) - np := ir.AsNode(tfn.Type.Params().Field(0).Nname) - nq := ir.AsNode(tfn.Type.Params().Field(1).Nname) - nr := ir.AsNode(tfn.Type.Results().Field(0).Nname) + np := ir.AsNode(tfn.Type().Params().Field(0).Nname) + nq := ir.AsNode(tfn.Type().Params().Field(1).Nname) + nr := ir.AsNode(tfn.Type().Results().Field(0).Nname) // Label to jump to if an equality test fails. neq := autolabel(".neq") @@ -573,11 +573,11 @@ func geneq(t *types.Type) *obj.LSym { // pi := p[i] pi := ir.Nod(ir.OINDEX, np, i) pi.SetBounded(true) - pi.Type = t.Elem() + pi.SetType(t.Elem()) // qi := q[i] qi := ir.Nod(ir.OINDEX, nq, i) qi.SetBounded(true) - qi.Type = t.Elem() + qi.SetType(t.Elem()) return eq(pi, qi) } @@ -590,11 +590,11 @@ func geneq(t *types.Type) *obj.LSym { for i := int64(0); i < nelem; i++ { // if check {} else { goto neq } nif := ir.Nod(ir.OIF, checkIdx(nodintconst(i)), nil) - nif.Rlist.Append(nodSym(ir.OGOTO, nil, neq)) - fn.Nbody.Append(nif) + nif.PtrRlist().Append(nodSym(ir.OGOTO, nil, neq)) + fn.PtrBody().Append(nif) } if last { - fn.Nbody.Append(ir.Nod(ir.OAS, nr, checkIdx(nodintconst(nelem)))) + fn.PtrBody().Append(ir.Nod(ir.OAS, nr, checkIdx(nodintconst(nelem)))) } } else { // Generate a for loop. @@ -604,14 +604,14 @@ func geneq(t *types.Type) *obj.LSym { cond := ir.Nod(ir.OLT, i, nodintconst(nelem)) post := ir.Nod(ir.OAS, i, ir.Nod(ir.OADD, i, nodintconst(1))) loop := ir.Nod(ir.OFOR, cond, post) - loop.Ninit.Append(init) + loop.PtrInit().Append(init) // if eq(pi, qi) {} else { goto neq } nif := ir.Nod(ir.OIF, checkIdx(i), nil) - nif.Rlist.Append(nodSym(ir.OGOTO, nil, neq)) - loop.Nbody.Append(nif) - fn.Nbody.Append(loop) + nif.PtrRlist().Append(nodSym(ir.OGOTO, nil, neq)) + loop.PtrBody().Append(nif) + fn.PtrBody().Append(loop) if last { - fn.Nbody.Append(ir.Nod(ir.OAS, nr, nodbool(true))) + fn.PtrBody().Append(ir.Nod(ir.OAS, nr, nodbool(true))) } } } @@ -712,7 +712,7 @@ func geneq(t *types.Type) *obj.LSym { var flatConds []*ir.Node for _, c := range conds { isCall := func(n *ir.Node) bool { - return n.Op == ir.OCALL || n.Op == ir.OCALLFUNC + return n.Op() == ir.OCALL || n.Op() == ir.OCALLFUNC } sort.SliceStable(c, func(i, j int) bool { return !isCall(c[i]) && isCall(c[j]) @@ -721,51 +721,51 @@ func geneq(t *types.Type) *obj.LSym { } if len(flatConds) == 0 { - fn.Nbody.Append(ir.Nod(ir.OAS, nr, nodbool(true))) + fn.PtrBody().Append(ir.Nod(ir.OAS, nr, nodbool(true))) } else { for _, c := range flatConds[:len(flatConds)-1] { // if cond {} else { goto neq } n := ir.Nod(ir.OIF, c, nil) - n.Rlist.Append(nodSym(ir.OGOTO, nil, neq)) - fn.Nbody.Append(n) + n.PtrRlist().Append(nodSym(ir.OGOTO, nil, neq)) + fn.PtrBody().Append(n) } - fn.Nbody.Append(ir.Nod(ir.OAS, nr, flatConds[len(flatConds)-1])) + fn.PtrBody().Append(ir.Nod(ir.OAS, nr, flatConds[len(flatConds)-1])) } } // ret: // return ret := autolabel(".ret") - fn.Nbody.Append(nodSym(ir.OLABEL, nil, ret)) - fn.Nbody.Append(ir.Nod(ir.ORETURN, nil, nil)) + fn.PtrBody().Append(nodSym(ir.OLABEL, nil, ret)) + fn.PtrBody().Append(ir.Nod(ir.ORETURN, nil, nil)) // neq: // r = false // return (or goto ret) - fn.Nbody.Append(nodSym(ir.OLABEL, nil, neq)) - fn.Nbody.Append(ir.Nod(ir.OAS, nr, nodbool(false))) + fn.PtrBody().Append(nodSym(ir.OLABEL, nil, neq)) + fn.PtrBody().Append(ir.Nod(ir.OAS, nr, nodbool(false))) if EqCanPanic(t) || hasCall(fn) { // Epilogue is large, so share it with the equal case. - fn.Nbody.Append(nodSym(ir.OGOTO, nil, ret)) + fn.PtrBody().Append(nodSym(ir.OGOTO, nil, ret)) } else { // Epilogue is small, so don't bother sharing. - fn.Nbody.Append(ir.Nod(ir.ORETURN, nil, nil)) + fn.PtrBody().Append(ir.Nod(ir.ORETURN, nil, nil)) } // TODO(khr): the epilogue size detection condition above isn't perfect. // We should really do a generic CL that shares epilogues across // the board. See #24936. if base.Flag.LowerR != 0 { - ir.DumpList("geneq body", fn.Nbody) + ir.DumpList("geneq body", fn.Body()) } funcbody() - fn.Func.SetDupok(true) + fn.Func().SetDupok(true) fn = typecheck(fn, ctxStmt) Curfn = fn - typecheckslice(fn.Nbody.Slice(), ctxStmt) + typecheckslice(fn.Body().Slice(), ctxStmt) Curfn = nil if base.Debug.DclStack != 0 { @@ -776,7 +776,7 @@ func geneq(t *types.Type) *obj.LSym { // We are comparing a struct or an array, // neither of which can be nil, and our comparisons // are shallow. - fn.Func.SetNilCheckDisabled(true) + fn.Func().SetNilCheckDisabled(true) xtop = append(xtop, fn) // Generate a closure which points at the function we just generated. @@ -786,31 +786,31 @@ func geneq(t *types.Type) *obj.LSym { } func hasCall(n *ir.Node) bool { - if n.Op == ir.OCALL || n.Op == ir.OCALLFUNC { + if n.Op() == ir.OCALL || n.Op() == ir.OCALLFUNC { return true } - if n.Left != nil && hasCall(n.Left) { + if n.Left() != nil && hasCall(n.Left()) { return true } - if n.Right != nil && hasCall(n.Right) { + if n.Right() != nil && hasCall(n.Right()) { return true } - for _, x := range n.Ninit.Slice() { + for _, x := range n.Init().Slice() { if hasCall(x) { return true } } - for _, x := range n.Nbody.Slice() { + for _, x := range n.Body().Slice() { if hasCall(x) { return true } } - for _, x := range n.List.Slice() { + for _, x := range n.List().Slice() { if hasCall(x) { return true } } - for _, x := range n.Rlist.Slice() { + for _, x := range n.Rlist().Slice() { if hasCall(x) { return true } @@ -844,12 +844,12 @@ func eqstring(s, t *ir.Node) (eqlen, eqmem *ir.Node) { fn := syslook("memequal") fn = substArgTypes(fn, types.Types[types.TUINT8], types.Types[types.TUINT8]) call := ir.Nod(ir.OCALL, fn, nil) - call.List.Append(sptr, tptr, ir.Copy(slen)) + call.PtrList().Append(sptr, tptr, ir.Copy(slen)) call = typecheck(call, ctxExpr|ctxMultiOK) cmp := ir.Nod(ir.OEQ, slen, tlen) cmp = typecheck(cmp, ctxExpr) - cmp.Type = types.Types[types.TBOOL] + cmp.SetType(types.Types[types.TBOOL]) return cmp, call } @@ -860,13 +860,13 @@ func eqstring(s, t *ir.Node) (eqlen, eqmem *ir.Node) { // which can be used to construct interface equality comparison. // eqtab must be evaluated before eqdata, and shortcircuiting is required. func eqinterface(s, t *ir.Node) (eqtab, eqdata *ir.Node) { - if !types.Identical(s.Type, t.Type) { - base.Fatalf("eqinterface %v %v", s.Type, t.Type) + if !types.Identical(s.Type(), t.Type()) { + base.Fatalf("eqinterface %v %v", s.Type(), t.Type()) } // func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool) // func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool) var fn *ir.Node - if s.Type.IsEmptyInterface() { + if s.Type().IsEmptyInterface() { fn = syslook("efaceeq") } else { fn = syslook("ifaceeq") @@ -876,18 +876,18 @@ func eqinterface(s, t *ir.Node) (eqtab, eqdata *ir.Node) { ttab := ir.Nod(ir.OITAB, t, nil) sdata := ir.Nod(ir.OIDATA, s, nil) tdata := ir.Nod(ir.OIDATA, t, nil) - sdata.Type = types.Types[types.TUNSAFEPTR] - tdata.Type = types.Types[types.TUNSAFEPTR] + sdata.SetType(types.Types[types.TUNSAFEPTR]) + tdata.SetType(types.Types[types.TUNSAFEPTR]) sdata.SetTypecheck(1) tdata.SetTypecheck(1) call := ir.Nod(ir.OCALL, fn, nil) - call.List.Append(stab, sdata, tdata) + call.PtrList().Append(stab, sdata, tdata) call = typecheck(call, ctxExpr|ctxMultiOK) cmp := ir.Nod(ir.OEQ, stab, ttab) cmp = typecheck(cmp, ctxExpr) - cmp.Type = types.Types[types.TBOOL] + cmp.SetType(types.Types[types.TBOOL]) return cmp, call } @@ -899,12 +899,12 @@ func eqmem(p *ir.Node, q *ir.Node, field *types.Sym, size int64) *ir.Node { nx = typecheck(nx, ctxExpr) ny = typecheck(ny, ctxExpr) - fn, needsize := eqmemfunc(size, nx.Type.Elem()) + fn, needsize := eqmemfunc(size, nx.Type().Elem()) call := ir.Nod(ir.OCALL, fn, nil) - call.List.Append(nx) - call.List.Append(ny) + call.PtrList().Append(nx) + call.PtrList().Append(ny) if needsize { - call.List.Append(nodintconst(size)) + call.PtrList().Append(nodintconst(size)) } return call diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go index 1bc8bf238f74e..edf7d263a33cb 100644 --- a/src/cmd/compile/internal/gc/align.go +++ b/src/cmd/compile/internal/gc/align.go @@ -126,11 +126,11 @@ func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 { // NOTE(rsc): This comment may be stale. // It's possible the ordering has changed and this is // now the common case. I'm not sure. - if n.Name.Param.Stackcopy != nil { - n.Name.Param.Stackcopy.Xoffset = o - n.Xoffset = 0 + if n.Name().Param.Stackcopy != nil { + n.Name().Param.Stackcopy.SetOffset(o) + n.SetOffset(0) } else { - n.Xoffset = o + n.SetOffset(o) } } @@ -198,7 +198,7 @@ func findTypeLoop(t *types.Type, path *[]*types.Type) bool { } *path = append(*path, t) - if p := ir.AsNode(t.Nod).Name.Param; p != nil && findTypeLoop(p.Ntype.Type, path) { + if p := ir.AsNode(t.Nod).Name().Param; p != nil && findTypeLoop(p.Ntype.Type(), path) { return true } *path = (*path)[:len(*path)-1] @@ -308,7 +308,7 @@ func dowidth(t *types.Type) { lno := base.Pos if ir.AsNode(t.Nod) != nil { - base.Pos = ir.AsNode(t.Nod).Pos + base.Pos = ir.AsNode(t.Nod).Pos() } t.Width = -2 diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index ff33c6b5fcf73..e36903cbe0906 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -15,11 +15,11 @@ type exporter struct { // markObject visits a reachable object. func (p *exporter) markObject(n *ir.Node) { - if n.Op == ir.ONAME && n.Class() == ir.PFUNC { + if n.Op() == ir.ONAME && n.Class() == ir.PFUNC { inlFlood(n) } - p.markType(n.Type) + p.markType(n.Type()) } // markType recursively visits types reachable from t to identify diff --git a/src/cmd/compile/internal/gc/bimport.go b/src/cmd/compile/internal/gc/bimport.go index e2dd276f46165..603710d6b192a 100644 --- a/src/cmd/compile/internal/gc/bimport.go +++ b/src/cmd/compile/internal/gc/bimport.go @@ -10,7 +10,7 @@ import ( ) func npos(pos src.XPos, n *ir.Node) *ir.Node { - n.Pos = pos + n.SetPos(pos) return n } diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index e68d7103637fa..1b926ec17e48b 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -18,14 +18,14 @@ func (p *noder) funcLit(expr *syntax.FuncLit) *ir.Node { ntype := p.typeExpr(expr.Type) dcl := p.nod(expr, ir.ODCLFUNC, nil, nil) - fn := dcl.Func + fn := dcl.Func() fn.SetIsHiddenClosure(Curfn != nil) - fn.Nname = newfuncnamel(p.pos(expr), ir.BlankNode.Sym, fn) // filled in by typecheckclosure - fn.Nname.Name.Param.Ntype = xtype - fn.Nname.Name.Defn = dcl + fn.Nname = newfuncnamel(p.pos(expr), ir.BlankNode.Sym(), fn) // filled in by typecheckclosure + fn.Nname.Name().Param.Ntype = xtype + fn.Nname.Name().Defn = dcl clo := p.nod(expr, ir.OCLOSURE, nil, nil) - clo.Func = fn + clo.SetFunc(fn) fn.ClosureType = ntype fn.OClosure = clo @@ -37,8 +37,8 @@ func (p *noder) funcLit(expr *syntax.FuncLit) *ir.Node { // make the list of pointers for the closure call. for _, v := range fn.ClosureVars.Slice() { // Unlink from v1; see comment in syntax.go type Param for these fields. - v1 := v.Name.Defn - v1.Name.Param.Innermost = v.Name.Param.Outer + v1 := v.Name().Defn + v1.Name().Param.Innermost = v.Name().Param.Outer // If the closure usage of v is not dense, // we need to make it dense; now that we're out @@ -68,7 +68,7 @@ func (p *noder) funcLit(expr *syntax.FuncLit) *ir.Node { // obtains f3's v, creating it if necessary (as it is in the example). // // capturevars will decide whether to use v directly or &v. - v.Name.Param.Outer = oldname(v.Sym) + v.Name().Param.Outer = oldname(v.Sym()) } return clo @@ -79,7 +79,7 @@ func (p *noder) funcLit(expr *syntax.FuncLit) *ir.Node { // TODO: This creation of the named function should probably really be done in a // separate pass from type-checking. func typecheckclosure(clo *ir.Node, top int) { - fn := clo.Func + fn := clo.Func() dcl := fn.Decl // Set current associated iota value, so iota can be used inside // function in ConstSpec, see issue #22344 @@ -88,7 +88,7 @@ func typecheckclosure(clo *ir.Node, top int) { } fn.ClosureType = typecheck(fn.ClosureType, ctxType) - clo.Type = fn.ClosureType.Type + clo.SetType(fn.ClosureType.Type()) fn.ClosureCalled = top&ctxCallee != 0 // Do not typecheck dcl twice, otherwise, we will end up pushing @@ -99,22 +99,22 @@ func typecheckclosure(clo *ir.Node, top int) { } for _, ln := range fn.ClosureVars.Slice() { - n := ln.Name.Defn - if !n.Name.Captured() { - n.Name.SetCaptured(true) - if n.Name.Decldepth == 0 { + n := ln.Name().Defn + if !n.Name().Captured() { + n.Name().SetCaptured(true) + if n.Name().Decldepth == 0 { base.Fatalf("typecheckclosure: var %S does not have decldepth assigned", n) } // Ignore assignments to the variable in straightline code // preceding the first capturing by a closure. - if n.Name.Decldepth == decldepth { - n.Name.SetAssigned(false) + if n.Name().Decldepth == decldepth { + n.Name().SetAssigned(false) } } } - fn.Nname.Sym = closurename(Curfn) + fn.Nname.SetSym(closurename(Curfn)) setNodeNameFunc(fn.Nname) dcl = typecheck(dcl, ctxStmt) @@ -122,12 +122,12 @@ func typecheckclosure(clo *ir.Node, top int) { // At top level (in a variable initialization: curfn==nil) we're not // ready to type check code yet; we'll check it later, because the // underlying closure function we create is added to xtop. - if Curfn != nil && clo.Type != nil { + if Curfn != nil && clo.Type() != nil { oldfn := Curfn Curfn = dcl olddd := decldepth decldepth = 1 - typecheckslice(dcl.Nbody.Slice(), ctxStmt) + typecheckslice(dcl.Body().Slice(), ctxStmt) decldepth = olddd Curfn = oldfn } @@ -146,7 +146,7 @@ func closurename(outerfunc *ir.Node) *types.Sym { gen := &globClosgen if outerfunc != nil { - if outerfunc.Func.OClosure != nil { + if outerfunc.Func().OClosure != nil { prefix = "" } @@ -155,8 +155,8 @@ func closurename(outerfunc *ir.Node) *types.Sym { // There may be multiple functions named "_". In those // cases, we can't use their individual Closgens as it // would lead to name clashes. - if !ir.IsBlank(outerfunc.Func.Nname) { - gen = &outerfunc.Func.Closgen + if !ir.IsBlank(outerfunc.Func().Nname) { + gen = &outerfunc.Func().Closgen } } @@ -174,12 +174,12 @@ var capturevarscomplete bool // after capturing (effectively constant). func capturevars(dcl *ir.Node) { lno := base.Pos - base.Pos = dcl.Pos - fn := dcl.Func + base.Pos = dcl.Pos() + fn := dcl.Func() cvars := fn.ClosureVars.Slice() out := cvars[:0] for _, v := range cvars { - if v.Type == nil { + if v.Type() == nil { // If v.Type is nil, it means v looked like it // was going to be used in the closure, but // isn't. This happens in struct literals like @@ -192,29 +192,29 @@ func capturevars(dcl *ir.Node) { // type check the & of closed variables outside the closure, // so that the outer frame also grabs them and knows they escape. - dowidth(v.Type) + dowidth(v.Type()) - outer := v.Name.Param.Outer - outermost := v.Name.Defn + outer := v.Name().Param.Outer + outermost := v.Name().Defn // out parameters will be assigned to implicitly upon return. - if outermost.Class() != ir.PPARAMOUT && !outermost.Name.Addrtaken() && !outermost.Name.Assigned() && v.Type.Width <= 128 { - v.Name.SetByval(true) + if outermost.Class() != ir.PPARAMOUT && !outermost.Name().Addrtaken() && !outermost.Name().Assigned() && v.Type().Width <= 128 { + v.Name().SetByval(true) } else { - outermost.Name.SetAddrtaken(true) + outermost.Name().SetAddrtaken(true) outer = ir.Nod(ir.OADDR, outer, nil) } if base.Flag.LowerM > 1 { var name *types.Sym - if v.Name.Curfn != nil && v.Name.Curfn.Func.Nname != nil { - name = v.Name.Curfn.Func.Nname.Sym + if v.Name().Curfn != nil && v.Name().Curfn.Func().Nname != nil { + name = v.Name().Curfn.Func().Nname.Sym() } how := "ref" - if v.Name.Byval() { + if v.Name().Byval() { how = "value" } - base.WarnfAt(v.Pos, "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym, outermost.Name.Addrtaken(), outermost.Name.Assigned(), int32(v.Type.Width)) + base.WarnfAt(v.Pos(), "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym(), outermost.Name().Addrtaken(), outermost.Name().Assigned(), int32(v.Type().Width)) } outer = typecheck(outer, ctxExpr) @@ -229,8 +229,8 @@ func capturevars(dcl *ir.Node) { // It transform closure bodies to properly reference captured variables. func transformclosure(dcl *ir.Node) { lno := base.Pos - base.Pos = dcl.Pos - fn := dcl.Func + base.Pos = dcl.Pos() + fn := dcl.Func() if fn.ClosureCalled { // If the closure is directly called, we transform it to a plain function call @@ -255,33 +255,33 @@ func transformclosure(dcl *ir.Node) { var params []*types.Field var decls []*ir.Node for _, v := range fn.ClosureVars.Slice() { - if !v.Name.Byval() { + if !v.Name().Byval() { // If v of type T is captured by reference, // we introduce function param &v *T // and v remains PAUTOHEAP with &v heapaddr // (accesses will implicitly deref &v). - addr := NewName(lookup("&" + v.Sym.Name)) - addr.Type = types.NewPtr(v.Type) - v.Name.Param.Heapaddr = addr + addr := NewName(lookup("&" + v.Sym().Name)) + addr.SetType(types.NewPtr(v.Type())) + v.Name().Param.Heapaddr = addr v = addr } v.SetClass(ir.PPARAM) decls = append(decls, v) - fld := types.NewField(src.NoXPos, v.Sym, v.Type) + fld := types.NewField(src.NoXPos, v.Sym(), v.Type()) fld.Nname = ir.AsTypesNode(v) params = append(params, fld) } if len(params) > 0 { // Prepend params and decls. - f.Type.Params().SetFields(append(params, f.Type.Params().FieldSlice()...)) + f.Type().Params().SetFields(append(params, f.Type().Params().FieldSlice()...)) fn.Dcl = append(decls, fn.Dcl...) } - dowidth(f.Type) - dcl.Type = f.Type // update type of ODCLFUNC + dowidth(f.Type()) + dcl.SetType(f.Type()) // update type of ODCLFUNC } else { // The closure is not called, so it is going to stay as closure. var body []*ir.Node @@ -290,15 +290,15 @@ func transformclosure(dcl *ir.Node) { // cv refers to the field inside of closure OSTRUCTLIT. cv := ir.Nod(ir.OCLOSUREVAR, nil, nil) - cv.Type = v.Type - if !v.Name.Byval() { - cv.Type = types.NewPtr(v.Type) + cv.SetType(v.Type()) + if !v.Name().Byval() { + cv.SetType(types.NewPtr(v.Type())) } - offset = Rnd(offset, int64(cv.Type.Align)) - cv.Xoffset = offset - offset += cv.Type.Width + offset = Rnd(offset, int64(cv.Type().Align)) + cv.SetOffset(offset) + offset += cv.Type().Width - if v.Name.Byval() && v.Type.Width <= int64(2*Widthptr) { + if v.Name().Byval() && v.Type().Width <= int64(2*Widthptr) { // If it is a small variable captured by value, downgrade it to PAUTO. v.SetClass(ir.PAUTO) fn.Dcl = append(fn.Dcl, v) @@ -306,14 +306,14 @@ func transformclosure(dcl *ir.Node) { } else { // Declare variable holding addresses taken from closure // and initialize in entry prologue. - addr := NewName(lookup("&" + v.Sym.Name)) - addr.Type = types.NewPtr(v.Type) + addr := NewName(lookup("&" + v.Sym().Name)) + addr.SetType(types.NewPtr(v.Type())) addr.SetClass(ir.PAUTO) - addr.Name.SetUsed(true) - addr.Name.Curfn = dcl + addr.Name().SetUsed(true) + addr.Name().Curfn = dcl fn.Dcl = append(fn.Dcl, addr) - v.Name.Param.Heapaddr = addr - if v.Name.Byval() { + v.Name().Param.Heapaddr = addr + if v.Name().Byval() { cv = ir.Nod(ir.OADDR, cv, nil) } body = append(body, ir.Nod(ir.OAS, addr, cv)) @@ -333,21 +333,21 @@ func transformclosure(dcl *ir.Node) { // hasemptycvars reports whether closure clo has an // empty list of captured vars. func hasemptycvars(clo *ir.Node) bool { - return clo.Func.ClosureVars.Len() == 0 + return clo.Func().ClosureVars.Len() == 0 } // closuredebugruntimecheck applies boilerplate checks for debug flags // and compiling runtime func closuredebugruntimecheck(clo *ir.Node) { if base.Debug.Closure > 0 { - if clo.Esc == EscHeap { - base.WarnfAt(clo.Pos, "heap closure, captured vars = %v", clo.Func.ClosureVars) + if clo.Esc() == EscHeap { + base.WarnfAt(clo.Pos(), "heap closure, captured vars = %v", clo.Func().ClosureVars) } else { - base.WarnfAt(clo.Pos, "stack closure, captured vars = %v", clo.Func.ClosureVars) + base.WarnfAt(clo.Pos(), "stack closure, captured vars = %v", clo.Func().ClosureVars) } } - if base.Flag.CompilingRuntime && clo.Esc == EscHeap { - base.ErrorfAt(clo.Pos, "heap-allocated closure, not allowed in runtime") + if base.Flag.CompilingRuntime && clo.Esc() == EscHeap { + base.ErrorfAt(clo.Pos(), "heap-allocated closure, not allowed in runtime") } } @@ -371,12 +371,12 @@ func closureType(clo *ir.Node) *types.Type { fields := []*ir.Node{ namedfield(".F", types.Types[types.TUINTPTR]), } - for _, v := range clo.Func.ClosureVars.Slice() { - typ := v.Type - if !v.Name.Byval() { + for _, v := range clo.Func().ClosureVars.Slice() { + typ := v.Type() + if !v.Name().Byval() { typ = types.NewPtr(typ) } - fields = append(fields, symfield(v.Sym, typ)) + fields = append(fields, symfield(v.Sym(), typ)) } typ := tostruct(fields) typ.SetNoalg(true) @@ -384,12 +384,12 @@ func closureType(clo *ir.Node) *types.Type { } func walkclosure(clo *ir.Node, init *ir.Nodes) *ir.Node { - fn := clo.Func + fn := clo.Func() // If no closure vars, don't bother wrapping. if hasemptycvars(clo) { if base.Debug.Closure > 0 { - base.WarnfAt(clo.Pos, "closure converted to global") + base.WarnfAt(clo.Pos(), "closure converted to global") } return fn.Nname } @@ -398,21 +398,21 @@ func walkclosure(clo *ir.Node, init *ir.Nodes) *ir.Node { typ := closureType(clo) clos := ir.Nod(ir.OCOMPLIT, nil, typenod(typ)) - clos.Esc = clo.Esc - clos.List.Set(append([]*ir.Node{ir.Nod(ir.OCFUNC, fn.Nname, nil)}, fn.ClosureEnter.Slice()...)) + clos.SetEsc(clo.Esc()) + clos.PtrList().Set(append([]*ir.Node{ir.Nod(ir.OCFUNC, fn.Nname, nil)}, fn.ClosureEnter.Slice()...)) clos = ir.Nod(ir.OADDR, clos, nil) - clos.Esc = clo.Esc + clos.SetEsc(clo.Esc()) // Force type conversion from *struct to the func type. - clos = convnop(clos, clo.Type) + clos = convnop(clos, clo.Type()) // non-escaping temp to use, if any. if x := prealloc[clo]; x != nil { - if !types.Identical(typ, x.Type) { + if !types.Identical(typ, x.Type()) { panic("closure type does not match order's assigned type") } - clos.Left.Right = x + clos.Left().SetRight(x) delete(prealloc, clo) } @@ -420,7 +420,7 @@ func walkclosure(clo *ir.Node, init *ir.Nodes) *ir.Node { } func typecheckpartialcall(dot *ir.Node, sym *types.Sym) { - switch dot.Op { + switch dot.Op() { case ir.ODOTINTER, ir.ODOTMETH: break @@ -429,19 +429,19 @@ func typecheckpartialcall(dot *ir.Node, sym *types.Sym) { } // Create top-level function. - dcl := makepartialcall(dot, dot.Type, sym) - dcl.Func.SetWrapper(true) - dot.Op = ir.OCALLPART - dot.Right = NewName(sym) - dot.Type = dcl.Type - dot.Func = dcl.Func + dcl := makepartialcall(dot, dot.Type(), sym) + dcl.Func().SetWrapper(true) + dot.SetOp(ir.OCALLPART) + dot.SetRight(NewName(sym)) + dot.SetType(dcl.Type()) + dot.SetFunc(dcl.Func()) dot.SetOpt(nil) // clear types.Field from ODOTMETH } // makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed // for partial calls. func makepartialcall(dot *ir.Node, t0 *types.Type, meth *types.Sym) *ir.Node { - rcvrtype := dot.Left.Type + rcvrtype := dot.Left().Type() sym := methodSymSuffix(rcvrtype, meth, "-fm") if sym.Uniq() { @@ -465,52 +465,52 @@ func makepartialcall(dot *ir.Node, t0 *types.Type, meth *types.Sym) *ir.Node { // case. See issue 29389. tfn := ir.Nod(ir.OTFUNC, nil, nil) - tfn.List.Set(structargs(t0.Params(), true)) - tfn.Rlist.Set(structargs(t0.Results(), false)) + tfn.PtrList().Set(structargs(t0.Params(), true)) + tfn.PtrRlist().Set(structargs(t0.Results(), false)) dcl := dclfunc(sym, tfn) - fn := dcl.Func + fn := dcl.Func() fn.SetDupok(true) fn.SetNeedctxt(true) - tfn.Type.SetPkg(t0.Pkg()) + tfn.Type().SetPkg(t0.Pkg()) // Declare and initialize variable holding receiver. cv := ir.Nod(ir.OCLOSUREVAR, nil, nil) - cv.Type = rcvrtype - cv.Xoffset = Rnd(int64(Widthptr), int64(cv.Type.Align)) + cv.SetType(rcvrtype) + cv.SetOffset(Rnd(int64(Widthptr), int64(cv.Type().Align))) ptr := NewName(lookup(".this")) declare(ptr, ir.PAUTO) - ptr.Name.SetUsed(true) + ptr.Name().SetUsed(true) var body []*ir.Node if rcvrtype.IsPtr() || rcvrtype.IsInterface() { - ptr.Type = rcvrtype + ptr.SetType(rcvrtype) body = append(body, ir.Nod(ir.OAS, ptr, cv)) } else { - ptr.Type = types.NewPtr(rcvrtype) + ptr.SetType(types.NewPtr(rcvrtype)) body = append(body, ir.Nod(ir.OAS, ptr, ir.Nod(ir.OADDR, cv, nil))) } call := ir.Nod(ir.OCALL, nodSym(ir.OXDOT, ptr, meth), nil) - call.List.Set(paramNnames(tfn.Type)) - call.SetIsDDD(tfn.Type.IsVariadic()) + call.PtrList().Set(paramNnames(tfn.Type())) + call.SetIsDDD(tfn.Type().IsVariadic()) if t0.NumResults() != 0 { n := ir.Nod(ir.ORETURN, nil, nil) - n.List.Set1(call) + n.PtrList().Set1(call) call = n } body = append(body, call) - dcl.Nbody.Set(body) + dcl.PtrBody().Set(body) funcbody() dcl = typecheck(dcl, ctxStmt) // Need to typecheck the body of the just-generated wrapper. // typecheckslice() requires that Curfn is set when processing an ORETURN. Curfn = dcl - typecheckslice(dcl.Nbody.Slice(), ctxStmt) + typecheckslice(dcl.Body().Slice(), ctxStmt) sym.Def = ir.AsTypesNode(dcl) xtop = append(xtop, dcl) Curfn = savecurfn @@ -525,7 +525,7 @@ func makepartialcall(dot *ir.Node, t0 *types.Type, meth *types.Sym) *ir.Node { func partialCallType(n *ir.Node) *types.Type { t := tostruct([]*ir.Node{ namedfield("F", types.Types[types.TUINTPTR]), - namedfield("R", n.Left.Type), + namedfield("R", n.Left().Type()), }) t.SetNoalg(true) return t @@ -539,13 +539,13 @@ func walkpartialcall(n *ir.Node, init *ir.Nodes) *ir.Node { // // Like walkclosure above. - if n.Left.Type.IsInterface() { + if n.Left().Type().IsInterface() { // Trigger panic for method on nil interface now. // Otherwise it happens in the wrapper and is confusing. - n.Left = cheapexpr(n.Left, init) - n.Left = walkexpr(n.Left, nil) + n.SetLeft(cheapexpr(n.Left(), init)) + n.SetLeft(walkexpr(n.Left(), nil)) - tab := ir.Nod(ir.OITAB, n.Left, nil) + tab := ir.Nod(ir.OITAB, n.Left(), nil) tab = typecheck(tab, ctxExpr) c := ir.Nod(ir.OCHECKNIL, tab, nil) @@ -556,21 +556,21 @@ func walkpartialcall(n *ir.Node, init *ir.Nodes) *ir.Node { typ := partialCallType(n) clos := ir.Nod(ir.OCOMPLIT, nil, typenod(typ)) - clos.Esc = n.Esc - clos.List.Set2(ir.Nod(ir.OCFUNC, n.Func.Nname, nil), n.Left) + clos.SetEsc(n.Esc()) + clos.PtrList().Set2(ir.Nod(ir.OCFUNC, n.Func().Nname, nil), n.Left()) clos = ir.Nod(ir.OADDR, clos, nil) - clos.Esc = n.Esc + clos.SetEsc(n.Esc()) // Force type conversion from *struct to the func type. - clos = convnop(clos, n.Type) + clos = convnop(clos, n.Type()) // non-escaping temp to use, if any. if x := prealloc[n]; x != nil { - if !types.Identical(typ, x.Type) { + if !types.Identical(typ, x.Type()) { panic("partial call type does not match order's assigned type") } - clos.Left.Right = x + clos.Left().SetRight(x) delete(prealloc, n) } @@ -580,14 +580,14 @@ func walkpartialcall(n *ir.Node, init *ir.Nodes) *ir.Node { // callpartMethod returns the *types.Field representing the method // referenced by method value n. func callpartMethod(n *ir.Node) *types.Field { - if n.Op != ir.OCALLPART { + if n.Op() != ir.OCALLPART { base.Fatalf("expected OCALLPART, got %v", n) } // TODO(mdempsky): Optimize this. If necessary, // makepartialcall could save m for us somewhere. var m *types.Field - if lookdot0(n.Right.Sym, n.Left.Type, &m, false) != 1 { + if lookdot0(n.Right().Sym(), n.Left().Type(), &m, false) != 1 { base.Fatalf("failed to find field for OCALLPART") } diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index a557e20d46b4d..27e54b46c83d4 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -106,30 +106,30 @@ func convlit1(n *ir.Node, t *types.Type, explicit bool, context func() string) * base.Fatalf("bad conversion to untyped: %v", t) } - if n == nil || n.Type == nil { + if n == nil || n.Type() == nil { // Allow sloppy callers. return n } - if !n.Type.IsUntyped() { + if !n.Type().IsUntyped() { // Already typed; nothing to do. return n } - if n.Op == ir.OLITERAL || n.Op == ir.ONIL { + if n.Op() == ir.OLITERAL || n.Op() == ir.ONIL { // Can't always set n.Type directly on OLITERAL nodes. // See discussion on CL 20813. n = n.RawCopy() } // Nil is technically not a constant, so handle it specially. - if n.Type.Etype == types.TNIL { - if n.Op != ir.ONIL { - base.Fatalf("unexpected op: %v (%v)", n, n.Op) + if n.Type().Etype == types.TNIL { + if n.Op() != ir.ONIL { + base.Fatalf("unexpected op: %v (%v)", n, n.Op()) } if t == nil { base.Errorf("use of untyped nil") n.SetDiag(true) - n.Type = nil + n.SetType(nil) return n } @@ -138,15 +138,15 @@ func convlit1(n *ir.Node, t *types.Type, explicit bool, context func() string) * return n } - n.Type = t + n.SetType(t) return n } if t == nil || !ir.OKForConst[t.Etype] { - t = defaultType(n.Type) + t = defaultType(n.Type()) } - switch n.Op { + switch n.Op() { default: base.Fatalf("unexpected untyped expression: %v", n) @@ -155,60 +155,60 @@ func convlit1(n *ir.Node, t *types.Type, explicit bool, context func() string) * if v.Kind() == constant.Unknown { break } - n.Type = t + n.SetType(t) n.SetVal(v) return n case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.OREAL, ir.OIMAG: - ot := operandType(n.Op, t) + ot := operandType(n.Op(), t) if ot == nil { n = defaultlit(n, nil) break } - n.Left = convlit(n.Left, ot) - if n.Left.Type == nil { - n.Type = nil + n.SetLeft(convlit(n.Left(), ot)) + if n.Left().Type() == nil { + n.SetType(nil) return n } - n.Type = t + n.SetType(t) return n case ir.OADD, ir.OSUB, ir.OMUL, ir.ODIV, ir.OMOD, ir.OOR, ir.OXOR, ir.OAND, ir.OANDNOT, ir.OOROR, ir.OANDAND, ir.OCOMPLEX: - ot := operandType(n.Op, t) + ot := operandType(n.Op(), t) if ot == nil { n = defaultlit(n, nil) break } - n.Left = convlit(n.Left, ot) - n.Right = convlit(n.Right, ot) - if n.Left.Type == nil || n.Right.Type == nil { - n.Type = nil + n.SetLeft(convlit(n.Left(), ot)) + n.SetRight(convlit(n.Right(), ot)) + if n.Left().Type() == nil || n.Right().Type() == nil { + n.SetType(nil) return n } - if !types.Identical(n.Left.Type, n.Right.Type) { - base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, n.Left.Type, n.Right.Type) - n.Type = nil + if !types.Identical(n.Left().Type(), n.Right().Type()) { + base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, n.Left().Type(), n.Right().Type()) + n.SetType(nil) return n } - n.Type = t + n.SetType(t) return n case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE: if !t.IsBoolean() { break } - n.Type = t + n.SetType(t) return n case ir.OLSH, ir.ORSH: - n.Left = convlit1(n.Left, t, explicit, nil) - n.Type = n.Left.Type - if n.Type != nil && !n.Type.IsInteger() { - base.Errorf("invalid operation: %v (shift of type %v)", n, n.Type) - n.Type = nil + n.SetLeft(convlit1(n.Left(), t, explicit, nil)) + n.SetType(n.Left().Type()) + if n.Type() != nil && !n.Type().IsInteger() { + base.Errorf("invalid operation: %v (shift of type %v)", n, n.Type()) + n.SetType(nil) } return n } @@ -225,7 +225,7 @@ func convlit1(n *ir.Node, t *types.Type, explicit bool, context func() string) * } n.SetDiag(true) } - n.Type = nil + n.SetType(nil) return n } @@ -439,75 +439,75 @@ var tokenForOp = [...]token.Token{ // Otherwise, evalConst returns a new OLITERAL with the same value as n, // and with .Orig pointing back to n. func evalConst(n *ir.Node) *ir.Node { - nl, nr := n.Left, n.Right + nl, nr := n.Left(), n.Right() // Pick off just the opcodes that can be constant evaluated. - switch op := n.Op; op { + switch op := n.Op(); op { case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT: - if nl.Op == ir.OLITERAL { + if nl.Op() == ir.OLITERAL { var prec uint - if n.Type.IsUnsigned() { - prec = uint(n.Type.Size() * 8) + if n.Type().IsUnsigned() { + prec = uint(n.Type().Size() * 8) } return origConst(n, constant.UnaryOp(tokenForOp[op], nl.Val(), prec)) } case ir.OADD, ir.OSUB, ir.OMUL, ir.ODIV, ir.OMOD, ir.OOR, ir.OXOR, ir.OAND, ir.OANDNOT, ir.OOROR, ir.OANDAND: - if nl.Op == ir.OLITERAL && nr.Op == ir.OLITERAL { + if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL { rval := nr.Val() // check for divisor underflow in complex division (see issue 20227) - if op == ir.ODIV && n.Type.IsComplex() && constant.Sign(square(constant.Real(rval))) == 0 && constant.Sign(square(constant.Imag(rval))) == 0 { + if op == ir.ODIV && n.Type().IsComplex() && constant.Sign(square(constant.Real(rval))) == 0 && constant.Sign(square(constant.Imag(rval))) == 0 { base.Errorf("complex division by zero") - n.Type = nil + n.SetType(nil) return n } if (op == ir.ODIV || op == ir.OMOD) && constant.Sign(rval) == 0 { base.Errorf("division by zero") - n.Type = nil + n.SetType(nil) return n } tok := tokenForOp[op] - if op == ir.ODIV && n.Type.IsInteger() { + if op == ir.ODIV && n.Type().IsInteger() { tok = token.QUO_ASSIGN // integer division } return origConst(n, constant.BinaryOp(nl.Val(), tok, rval)) } case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE: - if nl.Op == ir.OLITERAL && nr.Op == ir.OLITERAL { + if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL { return origBoolConst(n, constant.Compare(nl.Val(), tokenForOp[op], nr.Val())) } case ir.OLSH, ir.ORSH: - if nl.Op == ir.OLITERAL && nr.Op == ir.OLITERAL { + if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL { // shiftBound from go/types; "so we can express smallestFloat64" const shiftBound = 1023 - 1 + 52 s, ok := constant.Uint64Val(nr.Val()) if !ok || s > shiftBound { base.Errorf("invalid shift count %v", nr) - n.Type = nil + n.SetType(nil) break } return origConst(n, constant.Shift(toint(nl.Val()), tokenForOp[op], uint(s))) } case ir.OCONV, ir.ORUNESTR: - if ir.OKForConst[n.Type.Etype] && nl.Op == ir.OLITERAL { - return origConst(n, convertVal(nl.Val(), n.Type, true)) + if ir.OKForConst[n.Type().Etype] && nl.Op() == ir.OLITERAL { + return origConst(n, convertVal(nl.Val(), n.Type(), true)) } case ir.OCONVNOP: - if ir.OKForConst[n.Type.Etype] && nl.Op == ir.OLITERAL { + if ir.OKForConst[n.Type().Etype] && nl.Op() == ir.OLITERAL { // set so n.Orig gets OCONV instead of OCONVNOP - n.Op = ir.OCONV + n.SetOp(ir.OCONV) return origConst(n, nl.Val()) } case ir.OADDSTR: // Merge adjacent constants in the argument list. - s := n.List.Slice() + s := n.List().Slice() need := 0 for i := 0; i < len(s); i++ { if i == 0 || !ir.IsConst(s[i-1], constant.String) || !ir.IsConst(s[i], constant.String) { @@ -537,7 +537,7 @@ func evalConst(n *ir.Node) *ir.Node { } nl := origConst(s[i], constant.MakeString(strings.Join(strs, ""))) - nl.Orig = nl // it's bigger than just s[i] + nl.SetOrig(nl) // it's bigger than just s[i] newList = append(newList, nl) i = i2 - 1 } else { @@ -546,18 +546,18 @@ func evalConst(n *ir.Node) *ir.Node { } n = ir.Copy(n) - n.List.Set(newList) + n.PtrList().Set(newList) return n case ir.OCAP, ir.OLEN: - switch nl.Type.Etype { + switch nl.Type().Etype { case types.TSTRING: if ir.IsConst(nl, constant.String) { return origIntConst(n, int64(len(nl.StringVal()))) } case types.TARRAY: if !hascallchan(nl) { - return origIntConst(n, nl.Type.NumElem()) + return origIntConst(n, nl.Type().NumElem()) } } @@ -565,17 +565,17 @@ func evalConst(n *ir.Node) *ir.Node { return origIntConst(n, evalunsafe(n)) case ir.OREAL: - if nl.Op == ir.OLITERAL { + if nl.Op() == ir.OLITERAL { return origConst(n, constant.Real(nl.Val())) } case ir.OIMAG: - if nl.Op == ir.OLITERAL { + if nl.Op() == ir.OLITERAL { return origConst(n, constant.Imag(nl.Val())) } case ir.OCOMPLEX: - if nl.Op == ir.OLITERAL && nr.Op == ir.OLITERAL { + if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL { return origConst(n, makeComplex(nl.Val(), nr.Val())) } } @@ -621,7 +621,7 @@ var overflowNames = [...]string{ // origConst returns an OLITERAL with orig n and value v. func origConst(n *ir.Node, v constant.Value) *ir.Node { lno := setlineno(n) - v = convertVal(v, n.Type, false) + v = convertVal(v, n.Type(), false) base.Pos = lno switch v.Kind() { @@ -631,19 +631,19 @@ func origConst(n *ir.Node, v constant.Value) *ir.Node { } fallthrough case constant.Unknown: - what := overflowNames[n.Op] + what := overflowNames[n.Op()] if what == "" { - base.Fatalf("unexpected overflow: %v", n.Op) + base.Fatalf("unexpected overflow: %v", n.Op()) } - base.ErrorfAt(n.Pos, "constant %v overflow", what) - n.Type = nil + base.ErrorfAt(n.Pos(), "constant %v overflow", what) + n.SetType(nil) return n } orig := n - n = ir.NodAt(orig.Pos, ir.OLITERAL, nil, nil) - n.Orig = orig - n.Type = orig.Type + n = ir.NodAt(orig.Pos(), ir.OLITERAL, nil, nil) + n.SetOrig(orig) + n.SetType(orig.Type()) n.SetVal(v) return n } @@ -663,16 +663,16 @@ func origIntConst(n *ir.Node, v int64) *ir.Node { // The results of defaultlit2 MUST be assigned back to l and r, e.g. // n.Left, n.Right = defaultlit2(n.Left, n.Right, force) func defaultlit2(l *ir.Node, r *ir.Node, force bool) (*ir.Node, *ir.Node) { - if l.Type == nil || r.Type == nil { + if l.Type() == nil || r.Type() == nil { return l, r } - if !l.Type.IsUntyped() { - r = convlit(r, l.Type) + if !l.Type().IsUntyped() { + r = convlit(r, l.Type()) return l, r } - if !r.Type.IsUntyped() { - l = convlit(l, r.Type) + if !r.Type().IsUntyped() { + l = convlit(l, r.Type()) return l, r } @@ -681,17 +681,17 @@ func defaultlit2(l *ir.Node, r *ir.Node, force bool) (*ir.Node, *ir.Node) { } // Can't mix bool with non-bool, string with non-string, or nil with anything (untyped). - if l.Type.IsBoolean() != r.Type.IsBoolean() { + if l.Type().IsBoolean() != r.Type().IsBoolean() { return l, r } - if l.Type.IsString() != r.Type.IsString() { + if l.Type().IsString() != r.Type().IsString() { return l, r } if ir.IsNil(l) || ir.IsNil(r) { return l, r } - t := defaultType(mixUntyped(l.Type, r.Type)) + t := defaultType(mixUntyped(l.Type(), r.Type())) l = convlit(l, t) r = convlit(r, t) return l, r @@ -748,7 +748,7 @@ func defaultType(t *types.Type) *types.Type { } func smallintconst(n *ir.Node) bool { - if n.Op == ir.OLITERAL { + if n.Op() == ir.OLITERAL { v, ok := constant.Int64Val(n.Val()) return ok && int64(int32(v)) == v } @@ -761,10 +761,10 @@ func smallintconst(n *ir.Node) bool { // integer, or negative, it returns -1. If n is too large, it // returns -2. func indexconst(n *ir.Node) int64 { - if n.Op != ir.OLITERAL { + if n.Op() != ir.OLITERAL { return -1 } - if !n.Type.IsInteger() && n.Type.Etype != types.TIDEAL { + if !n.Type().IsInteger() && n.Type().Etype != types.TIDEAL { return -1 } @@ -784,14 +784,14 @@ func indexconst(n *ir.Node) int64 { // Expressions derived from nil, like string([]byte(nil)), while they // may be known at compile time, are not Go language constants. func isGoConst(n *ir.Node) bool { - return n.Op == ir.OLITERAL + return n.Op() == ir.OLITERAL } func hascallchan(n *ir.Node) bool { if n == nil { return false } - switch n.Op { + switch n.Op() { case ir.OAPPEND, ir.OCALL, ir.OCALLFUNC, @@ -815,15 +815,15 @@ func hascallchan(n *ir.Node) bool { return true } - if hascallchan(n.Left) || hascallchan(n.Right) { + if hascallchan(n.Left()) || hascallchan(n.Right()) { return true } - for _, n1 := range n.List.Slice() { + for _, n1 := range n.List().Slice() { if hascallchan(n1) { return true } } - for _, n2 := range n.Rlist.Slice() { + for _, n2 := range n.Rlist().Slice() { if hascallchan(n2) { return true } @@ -852,14 +852,14 @@ type constSetKey struct { // // n must not be an untyped constant. func (s *constSet) add(pos src.XPos, n *ir.Node, what, where string) { - if n.Op == ir.OCONVIFACE && n.Implicit() { - n = n.Left + if n.Op() == ir.OCONVIFACE && n.Implicit() { + n = n.Left() } if !isGoConst(n) { return } - if n.Type.IsUntyped() { + if n.Type().IsUntyped() { base.Fatalf("%v is untyped", n) } @@ -878,7 +878,7 @@ func (s *constSet) add(pos src.XPos, n *ir.Node, what, where string) { // #21866 by treating all type aliases like byte/uint8 and // rune/int32. - typ := n.Type + typ := n.Type() switch typ { case types.Bytetype: typ = types.Types[types.TUINT8] @@ -888,7 +888,7 @@ func (s *constSet) add(pos src.XPos, n *ir.Node, what, where string) { k := constSetKey{typ, ir.ConstValue(n)} if hasUniquePos(n) { - pos = n.Pos + pos = n.Pos() } if s.m == nil { diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 6fee872fd2d4c..8b3274890f3c2 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -64,78 +64,78 @@ func declare(n *ir.Node, ctxt ir.Class) { return } - if n.Name == nil { + if n.Name() == nil { // named OLITERAL needs Name; most OLITERALs don't. - n.Name = new(ir.Name) + n.SetName(new(ir.Name)) } - s := n.Sym + s := n.Sym() // kludgy: typecheckok means we're past parsing. Eg genwrapper may declare out of package names later. if !inimport && !typecheckok && s.Pkg != ir.LocalPkg { - base.ErrorfAt(n.Pos, "cannot declare name %v", s) + base.ErrorfAt(n.Pos(), "cannot declare name %v", s) } gen := 0 if ctxt == ir.PEXTERN { if s.Name == "init" { - base.ErrorfAt(n.Pos, "cannot declare init - must be func") + base.ErrorfAt(n.Pos(), "cannot declare init - must be func") } if s.Name == "main" && s.Pkg.Name == "main" { - base.ErrorfAt(n.Pos, "cannot declare main - must be func") + base.ErrorfAt(n.Pos(), "cannot declare main - must be func") } externdcl = append(externdcl, n) } else { if Curfn == nil && ctxt == ir.PAUTO { - base.Pos = n.Pos + base.Pos = n.Pos() base.Fatalf("automatic outside function") } if Curfn != nil && ctxt != ir.PFUNC { - Curfn.Func.Dcl = append(Curfn.Func.Dcl, n) + Curfn.Func().Dcl = append(Curfn.Func().Dcl, n) } - if n.Op == ir.OTYPE { + if n.Op() == ir.OTYPE { declare_typegen++ gen = declare_typegen - } else if n.Op == ir.ONAME && ctxt == ir.PAUTO && !strings.Contains(s.Name, "·") { + } else if n.Op() == ir.ONAME && ctxt == ir.PAUTO && !strings.Contains(s.Name, "·") { vargen++ gen = vargen } types.Pushdcl(s) - n.Name.Curfn = Curfn + n.Name().Curfn = Curfn } if ctxt == ir.PAUTO { - n.Xoffset = 0 + n.SetOffset(0) } if s.Block == types.Block { // functype will print errors about duplicate function arguments. // Don't repeat the error here. if ctxt != ir.PPARAM && ctxt != ir.PPARAMOUT { - redeclare(n.Pos, s, "in this block") + redeclare(n.Pos(), s, "in this block") } } s.Block = types.Block s.Lastlineno = base.Pos s.Def = ir.AsTypesNode(n) - n.Name.Vargen = int32(gen) + n.Name().Vargen = int32(gen) n.SetClass(ctxt) if ctxt == ir.PFUNC { - n.Sym.SetFunc(true) + n.Sym().SetFunc(true) } autoexport(n, ctxt) } func addvar(n *ir.Node, t *types.Type, ctxt ir.Class) { - if n == nil || n.Sym == nil || (n.Op != ir.ONAME && n.Op != ir.ONONAME) || t == nil { + if n == nil || n.Sym() == nil || (n.Op() != ir.ONAME && n.Op() != ir.ONONAME) || t == nil { base.Fatalf("addvar: n=%v t=%v nil", n, t) } - n.Op = ir.ONAME + n.SetOp(ir.ONAME) declare(n, ctxt) - n.Type = t + n.SetType(t) } // declare variables from grammar @@ -147,13 +147,13 @@ func variter(vl []*ir.Node, t *ir.Node, el []*ir.Node) []*ir.Node { if len(el) == 1 && len(vl) > 1 { e := el[0] as2 := ir.Nod(ir.OAS2, nil, nil) - as2.List.Set(vl) - as2.Rlist.Set1(e) + as2.PtrList().Set(vl) + as2.PtrRlist().Set1(e) for _, v := range vl { - v.Op = ir.ONAME + v.SetOp(ir.ONAME) declare(v, dclcontext) - v.Name.Param.Ntype = t - v.Name.Defn = as2 + v.Name().Param.Ntype = t + v.Name().Defn = as2 if Curfn != nil { init = append(init, ir.Nod(ir.ODCL, v, nil)) } @@ -174,9 +174,9 @@ func variter(vl []*ir.Node, t *ir.Node, el []*ir.Node) []*ir.Node { el = el[1:] } - v.Op = ir.ONAME + v.SetOp(ir.ONAME) declare(v, dclcontext) - v.Name.Param.Ntype = t + v.Name().Param.Ntype = t if e != nil || Curfn != nil || ir.IsBlank(v) { if Curfn != nil { @@ -184,8 +184,8 @@ func variter(vl []*ir.Node, t *ir.Node, el []*ir.Node) []*ir.Node { } e = ir.Nod(ir.OAS, v, e) init = append(init, e) - if e.Right != nil { - v.Name.Defn = e + if e.Right() != nil { + v.Name().Defn = e } } } @@ -202,8 +202,8 @@ func newnoname(s *types.Sym) *ir.Node { base.Fatalf("newnoname nil") } n := ir.Nod(ir.ONONAME, nil, nil) - n.Sym = s - n.Xoffset = 0 + n.SetSym(s) + n.SetOffset(0) return n } @@ -213,7 +213,7 @@ func newfuncnamel(pos src.XPos, s *types.Sym, fn *ir.Func) *ir.Node { base.Fatalf("newfuncnamel - already have name") } n := ir.NewNameAt(pos, s) - n.Func = fn + n.SetFunc(fn) fn.Nname = n return n } @@ -222,7 +222,7 @@ func newfuncnamel(pos src.XPos, s *types.Sym, fn *ir.Func) *ir.Node { // being declared. func dclname(s *types.Sym) *ir.Node { n := NewName(s) - n.Op = ir.ONONAME // caller will correct it + n.SetOp(ir.ONONAME) // caller will correct it return n } @@ -234,10 +234,10 @@ func typenodl(pos src.XPos, t *types.Type) *ir.Node { // if we copied another type with *t = *u // then t->nod might be out of date, so // check t->nod->type too - if ir.AsNode(t.Nod) == nil || ir.AsNode(t.Nod).Type != t { + if ir.AsNode(t.Nod) == nil || ir.AsNode(t.Nod).Type() != t { t.Nod = ir.AsTypesNode(ir.NodAt(pos, ir.OTYPE, nil, nil)) - ir.AsNode(t.Nod).Type = t - ir.AsNode(t.Nod).Sym = t.Sym + ir.AsNode(t.Nod).SetType(t) + ir.AsNode(t.Nod).SetSym(t.Sym) } return ir.AsNode(t.Nod) @@ -253,7 +253,7 @@ func namedfield(s string, typ *types.Type) *ir.Node { func symfield(s *types.Sym, typ *types.Type) *ir.Node { n := nodSym(ir.ODCLFIELD, nil, s) - n.Type = typ + n.SetType(typ) return n } @@ -270,28 +270,28 @@ func oldname(s *types.Sym) *ir.Node { return newnoname(s) } - if Curfn != nil && n.Op == ir.ONAME && n.Name.Curfn != nil && n.Name.Curfn != Curfn { + if Curfn != nil && n.Op() == ir.ONAME && n.Name().Curfn != nil && n.Name().Curfn != Curfn { // Inner func is referring to var in outer func. // // TODO(rsc): If there is an outer variable x and we // are parsing x := 5 inside the closure, until we get to // the := it looks like a reference to the outer x so we'll // make x a closure variable unnecessarily. - c := n.Name.Param.Innermost - if c == nil || c.Name.Curfn != Curfn { + c := n.Name().Param.Innermost + if c == nil || c.Name().Curfn != Curfn { // Do not have a closure var for the active closure yet; make one. c = NewName(s) c.SetClass(ir.PAUTOHEAP) - c.Name.SetIsClosureVar(true) + c.Name().SetIsClosureVar(true) c.SetIsDDD(n.IsDDD()) - c.Name.Defn = n + c.Name().Defn = n // Link into list of active closure variables. // Popped from list in func funcLit. - c.Name.Param.Outer = n.Name.Param.Innermost - n.Name.Param.Innermost = c + c.Name().Param.Outer = n.Name().Param.Innermost + n.Name().Param.Innermost = c - Curfn.Func.ClosureVars.Append(c) + Curfn.Func().ClosureVars.Append(c) } // return ref to closure var, not original @@ -313,13 +313,13 @@ func importName(sym *types.Sym) *ir.Node { // := declarations func colasname(n *ir.Node) bool { - switch n.Op { + switch n.Op() { case ir.ONAME, ir.ONONAME, ir.OPACK, ir.OTYPE, ir.OLITERAL: - return n.Sym != nil + return n.Sym() != nil } return false @@ -327,8 +327,8 @@ func colasname(n *ir.Node) bool { func colasdefn(left []*ir.Node, defn *ir.Node) { for _, n := range left { - if n.Sym != nil { - n.Sym.SetUniq(true) + if n.Sym() != nil { + n.Sym().SetUniq(true) } } @@ -338,44 +338,44 @@ func colasdefn(left []*ir.Node, defn *ir.Node) { continue } if !colasname(n) { - base.ErrorfAt(defn.Pos, "non-name %v on left side of :=", n) + base.ErrorfAt(defn.Pos(), "non-name %v on left side of :=", n) nerr++ continue } - if !n.Sym.Uniq() { - base.ErrorfAt(defn.Pos, "%v repeated on left side of :=", n.Sym) + if !n.Sym().Uniq() { + base.ErrorfAt(defn.Pos(), "%v repeated on left side of :=", n.Sym()) n.SetDiag(true) nerr++ continue } - n.Sym.SetUniq(false) - if n.Sym.Block == types.Block { + n.Sym().SetUniq(false) + if n.Sym().Block == types.Block { continue } nnew++ - n = NewName(n.Sym) + n = NewName(n.Sym()) declare(n, dclcontext) - n.Name.Defn = defn - defn.Ninit.Append(ir.Nod(ir.ODCL, n, nil)) + n.Name().Defn = defn + defn.PtrInit().Append(ir.Nod(ir.ODCL, n, nil)) left[i] = n } if nnew == 0 && nerr == 0 { - base.ErrorfAt(defn.Pos, "no new variables on left side of :=") + base.ErrorfAt(defn.Pos(), "no new variables on left side of :=") } } // declare the arguments in an // interface field declaration. func ifacedcl(n *ir.Node) { - if n.Op != ir.ODCLFIELD || n.Left == nil { + if n.Op() != ir.ODCLFIELD || n.Left() == nil { base.Fatalf("ifacedcl") } - if n.Sym.IsBlank() { + if n.Sym().IsBlank() { base.Errorf("methods must have a unique non-blank name") } } @@ -392,16 +392,16 @@ func funchdr(n *ir.Node) { types.Markdcl() - if n.Func.Nname != nil && n.Func.Nname.Name.Param.Ntype != nil { - funcargs(n.Func.Nname.Name.Param.Ntype) + if n.Func().Nname != nil && n.Func().Nname.Name().Param.Ntype != nil { + funcargs(n.Func().Nname.Name().Param.Ntype) } else { - funcargs2(n.Type) + funcargs2(n.Type()) } } func funcargs(nt *ir.Node) { - if nt.Op != ir.OTFUNC { - base.Fatalf("funcargs %v", nt.Op) + if nt.Op() != ir.OTFUNC { + base.Fatalf("funcargs %v", nt.Op()) } // re-start the variable generation number @@ -411,13 +411,13 @@ func funcargs(nt *ir.Node) { // TODO(mdempsky): This is ugly, and only necessary because // esc.go uses Vargen to figure out result parameters' index // within the result tuple. - vargen = nt.Rlist.Len() + vargen = nt.Rlist().Len() // declare the receiver and in arguments. - if nt.Left != nil { - funcarg(nt.Left, ir.PPARAM) + if nt.Left() != nil { + funcarg(nt.Left(), ir.PPARAM) } - for _, n := range nt.List.Slice() { + for _, n := range nt.List().Slice() { funcarg(n, ir.PPARAM) } @@ -425,21 +425,21 @@ func funcargs(nt *ir.Node) { vargen = 0 // declare the out arguments. - gen := nt.List.Len() - for _, n := range nt.Rlist.Slice() { - if n.Sym == nil { + gen := nt.List().Len() + for _, n := range nt.Rlist().Slice() { + if n.Sym() == nil { // Name so that escape analysis can track it. ~r stands for 'result'. - n.Sym = lookupN("~r", gen) + n.SetSym(lookupN("~r", gen)) gen++ } - if n.Sym.IsBlank() { + if n.Sym().IsBlank() { // Give it a name so we can assign to it during return. ~b stands for 'blank'. // The name must be different from ~r above because if you have // func f() (_ int) // func g() int // f is allowed to use a plain 'return' with no arguments, while g is not. // So the two cases must be distinguished. - n.Sym = lookupN("~b", gen) + n.SetSym(lookupN("~b", gen)) gen++ } @@ -450,20 +450,20 @@ func funcargs(nt *ir.Node) { } func funcarg(n *ir.Node, ctxt ir.Class) { - if n.Op != ir.ODCLFIELD { - base.Fatalf("funcarg %v", n.Op) + if n.Op() != ir.ODCLFIELD { + base.Fatalf("funcarg %v", n.Op()) } - if n.Sym == nil { + if n.Sym() == nil { return } - n.Right = ir.NewNameAt(n.Pos, n.Sym) - n.Right.Name.Param.Ntype = n.Left - n.Right.SetIsDDD(n.IsDDD()) - declare(n.Right, ctxt) + n.SetRight(ir.NewNameAt(n.Pos(), n.Sym())) + n.Right().Name().Param.Ntype = n.Left() + n.Right().SetIsDDD(n.IsDDD()) + declare(n.Right(), ctxt) vargen++ - n.Right.Name.Vargen = int32(vargen) + n.Right().Name().Vargen = int32(vargen) } // Same as funcargs, except run over an already constructed TFUNC. @@ -491,7 +491,7 @@ func funcarg2(f *types.Field, ctxt ir.Class) { } n := ir.NewNameAt(f.Pos, f.Sym) f.Nname = ir.AsTypesNode(n) - n.Type = f.Type + n.SetType(f.Type) n.SetIsDDD(f.IsDDD()) declare(n, ctxt) } @@ -537,21 +537,21 @@ func checkembeddedtype(t *types.Type) { func structfield(n *ir.Node) *types.Field { lno := base.Pos - base.Pos = n.Pos + base.Pos = n.Pos() - if n.Op != ir.ODCLFIELD { + if n.Op() != ir.ODCLFIELD { base.Fatalf("structfield: oops %v\n", n) } - if n.Left != nil { - n.Left = typecheck(n.Left, ctxType) - n.Type = n.Left.Type - n.Left = nil + if n.Left() != nil { + n.SetLeft(typecheck(n.Left(), ctxType)) + n.SetType(n.Left().Type()) + n.SetLeft(nil) } - f := types.NewField(n.Pos, n.Sym, n.Type) + f := types.NewField(n.Pos(), n.Sym(), n.Type()) if n.Embedded() { - checkembeddedtype(n.Type) + checkembeddedtype(n.Type()) f.Embedded = 1 } if n.HasVal() { @@ -612,9 +612,9 @@ func tofunargs(l []*ir.Node, funarg types.Funarg) *types.Type { for i, n := range l { f := structfield(n) f.SetIsDDD(n.IsDDD()) - if n.Right != nil { - n.Right.Type = f.Type - f.Nname = ir.AsTypesNode(n.Right) + if n.Right() != nil { + n.Right().SetType(f.Type) + f.Nname = ir.AsTypesNode(n.Right()) } if f.Broke() { t.SetBroke(true) @@ -634,9 +634,9 @@ func tofunargsfield(fields []*types.Field, funarg types.Funarg) *types.Type { func interfacefield(n *ir.Node) *types.Field { lno := base.Pos - base.Pos = n.Pos + base.Pos = n.Pos() - if n.Op != ir.ODCLFIELD { + if n.Op() != ir.ODCLFIELD { base.Fatalf("interfacefield: oops %v\n", n) } @@ -649,13 +649,13 @@ func interfacefield(n *ir.Node) *types.Field { // If Sym != nil, then Sym is MethodName and Left is Signature. // Otherwise, Left is InterfaceTypeName. - if n.Left != nil { - n.Left = typecheck(n.Left, ctxType) - n.Type = n.Left.Type - n.Left = nil + if n.Left() != nil { + n.SetLeft(typecheck(n.Left(), ctxType)) + n.SetType(n.Left().Type()) + n.SetLeft(nil) } - f := types.NewField(n.Pos, n.Sym, n.Type) + f := types.NewField(n.Pos(), n.Sym(), n.Type()) base.Pos = lno return f @@ -872,7 +872,7 @@ func addmethod(n *ir.Node, msym *types.Sym, t *types.Type, local, nointerface bo } f := types.NewField(base.Pos, msym, t) - f.Nname = ir.AsTypesNode(n.Func.Nname) + f.Nname = ir.AsTypesNode(n.Func().Nname) f.SetNointerface(nointerface) mt.Methods().Append(f) @@ -936,26 +936,26 @@ func makefuncsym(s *types.Sym) { // setNodeNameFunc marks a node as a function. func setNodeNameFunc(n *ir.Node) { - if n.Op != ir.ONAME || n.Class() != ir.Pxxx { + if n.Op() != ir.ONAME || n.Class() != ir.Pxxx { base.Fatalf("expected ONAME/Pxxx node, got %v", n) } n.SetClass(ir.PFUNC) - n.Sym.SetFunc(true) + n.Sym().SetFunc(true) } func dclfunc(sym *types.Sym, tfn *ir.Node) *ir.Node { - if tfn.Op != ir.OTFUNC { + if tfn.Op() != ir.OTFUNC { base.Fatalf("expected OTFUNC node, got %v", tfn) } fn := ir.Nod(ir.ODCLFUNC, nil, nil) - fn.Func.Nname = newfuncnamel(base.Pos, sym, fn.Func) - fn.Func.Nname.Name.Defn = fn - fn.Func.Nname.Name.Param.Ntype = tfn - setNodeNameFunc(fn.Func.Nname) + fn.Func().Nname = newfuncnamel(base.Pos, sym, fn.Func()) + fn.Func().Nname.Name().Defn = fn + fn.Func().Nname.Name().Param.Ntype = tfn + setNodeNameFunc(fn.Func().Nname) funchdr(fn) - fn.Func.Nname.Name.Param.Ntype = typecheck(fn.Func.Nname.Name.Param.Ntype, ctxType) + fn.Func().Nname.Name().Param.Ntype = typecheck(fn.Func().Nname.Name().Param.Ntype, ctxType) return fn } @@ -987,7 +987,7 @@ func newNowritebarrierrecChecker() *nowritebarrierrecChecker { // directly. This has to happen before transformclosure since // it's a lot harder to work out the argument after. for _, n := range xtop { - if n.Op != ir.ODCLFUNC { + if n.Op() != ir.ODCLFUNC { continue } c.curfn = n @@ -998,31 +998,31 @@ func newNowritebarrierrecChecker() *nowritebarrierrecChecker { } func (c *nowritebarrierrecChecker) findExtraCalls(n *ir.Node) bool { - if n.Op != ir.OCALLFUNC { + if n.Op() != ir.OCALLFUNC { return true } - fn := n.Left - if fn == nil || fn.Op != ir.ONAME || fn.Class() != ir.PFUNC || fn.Name.Defn == nil { + fn := n.Left() + if fn == nil || fn.Op() != ir.ONAME || fn.Class() != ir.PFUNC || fn.Name().Defn == nil { return true } - if !isRuntimePkg(fn.Sym.Pkg) || fn.Sym.Name != "systemstack" { + if !isRuntimePkg(fn.Sym().Pkg) || fn.Sym().Name != "systemstack" { return true } var callee *ir.Node - arg := n.List.First() - switch arg.Op { + arg := n.List().First() + switch arg.Op() { case ir.ONAME: - callee = arg.Name.Defn + callee = arg.Name().Defn case ir.OCLOSURE: - callee = arg.Func.Decl + callee = arg.Func().Decl default: base.Fatalf("expected ONAME or OCLOSURE node, got %+v", arg) } - if callee.Op != ir.ODCLFUNC { + if callee.Op() != ir.ODCLFUNC { base.Fatalf("expected ODCLFUNC node, got %+v", callee) } - c.extraCalls[c.curfn] = append(c.extraCalls[c.curfn], nowritebarrierrecCall{callee, n.Pos}) + c.extraCalls[c.curfn] = append(c.extraCalls[c.curfn], nowritebarrierrecCall{callee, n.Pos()}) return true } @@ -1035,12 +1035,12 @@ func (c *nowritebarrierrecChecker) findExtraCalls(n *ir.Node) bool { // // This can be called concurrently for different from Nodes. func (c *nowritebarrierrecChecker) recordCall(from *ir.Node, to *obj.LSym, pos src.XPos) { - if from.Op != ir.ODCLFUNC { + if from.Op() != ir.ODCLFUNC { base.Fatalf("expected ODCLFUNC, got %v", from) } // We record this information on the *Func so this is // concurrent-safe. - fn := from.Func + fn := from.Func() if fn.NWBRCalls == nil { fn.NWBRCalls = new([]ir.SymAndPos) } @@ -1064,27 +1064,27 @@ func (c *nowritebarrierrecChecker) check() { var q ir.NodeQueue for _, n := range xtop { - if n.Op != ir.ODCLFUNC { + if n.Op() != ir.ODCLFUNC { continue } - symToFunc[n.Func.LSym] = n + symToFunc[n.Func().LSym] = n // Make nowritebarrierrec functions BFS roots. - if n.Func.Pragma&ir.Nowritebarrierrec != 0 { + if n.Func().Pragma&ir.Nowritebarrierrec != 0 { funcs[n] = nowritebarrierrecCall{} q.PushRight(n) } // Check go:nowritebarrier functions. - if n.Func.Pragma&ir.Nowritebarrier != 0 && n.Func.WBPos.IsKnown() { - base.ErrorfAt(n.Func.WBPos, "write barrier prohibited") + if n.Func().Pragma&ir.Nowritebarrier != 0 && n.Func().WBPos.IsKnown() { + base.ErrorfAt(n.Func().WBPos, "write barrier prohibited") } } // Perform a BFS of the call graph from all // go:nowritebarrierrec functions. enqueue := func(src, target *ir.Node, pos src.XPos) { - if target.Func.Pragma&ir.Yeswritebarrierrec != 0 { + if target.Func().Pragma&ir.Yeswritebarrierrec != 0 { // Don't flow into this function. return } @@ -1101,14 +1101,14 @@ func (c *nowritebarrierrecChecker) check() { fn := q.PopLeft() // Check fn. - if fn.Func.WBPos.IsKnown() { + if fn.Func().WBPos.IsKnown() { var err bytes.Buffer call := funcs[fn] for call.target != nil { - fmt.Fprintf(&err, "\n\t%v: called by %v", base.FmtPos(call.lineno), call.target.Func.Nname) + fmt.Fprintf(&err, "\n\t%v: called by %v", base.FmtPos(call.lineno), call.target.Func().Nname) call = funcs[call.target] } - base.ErrorfAt(fn.Func.WBPos, "write barrier prohibited by caller; %v%s", fn.Func.Nname, err.String()) + base.ErrorfAt(fn.Func().WBPos, "write barrier prohibited by caller; %v%s", fn.Func().Nname, err.String()) continue } @@ -1116,10 +1116,10 @@ func (c *nowritebarrierrecChecker) check() { for _, callee := range c.extraCalls[fn] { enqueue(fn, callee.target, callee.lineno) } - if fn.Func.NWBRCalls == nil { + if fn.Func().NWBRCalls == nil { continue } - for _, callee := range *fn.Func.NWBRCalls { + for _, callee := range *fn.Func().NWBRCalls { target := symToFunc[callee.Sym] if target != nil { enqueue(fn, target, callee.Pos) diff --git a/src/cmd/compile/internal/gc/dwinl.go b/src/cmd/compile/internal/gc/dwinl.go index 5da2871748848..1e4e43caadd65 100644 --- a/src/cmd/compile/internal/gc/dwinl.go +++ b/src/cmd/compile/internal/gc/dwinl.go @@ -236,15 +236,15 @@ func makePreinlineDclMap(fnsym *obj.LSym) map[varPos]int { dcl := preInliningDcls(fnsym) m := make(map[varPos]int) for i, n := range dcl { - pos := base.Ctxt.InnermostPos(n.Pos) + pos := base.Ctxt.InnermostPos(n.Pos()) vp := varPos{ - DeclName: unversion(n.Sym.Name), + DeclName: unversion(n.Sym().Name), DeclFile: pos.RelFilename(), DeclLine: pos.RelLine(), DeclCol: pos.Col(), } if _, found := m[vp]; found { - base.Fatalf("child dcl collision on symbol %s within %v\n", n.Sym.Name, fnsym.Name) + base.Fatalf("child dcl collision on symbol %s within %v\n", n.Sym().Name, fnsym.Name) } m[vp] = i } diff --git a/src/cmd/compile/internal/gc/embed.go b/src/cmd/compile/internal/gc/embed.go index 636aa4a70edce..d515696add242 100644 --- a/src/cmd/compile/internal/gc/embed.go +++ b/src/cmd/compile/internal/gc/embed.go @@ -113,15 +113,15 @@ func varEmbed(p *noder, names []*ir.Node, typ *ir.Node, exprs []*ir.Node, embeds v := names[0] if dclcontext != ir.PEXTERN { numLocalEmbed++ - v = ir.NewNameAt(v.Pos, lookupN("embed.", numLocalEmbed)) - v.Sym.Def = ir.AsTypesNode(v) - v.Name.Param.Ntype = typ + v = ir.NewNameAt(v.Pos(), lookupN("embed.", numLocalEmbed)) + v.Sym().Def = ir.AsTypesNode(v) + v.Name().Param.Ntype = typ v.SetClass(ir.PEXTERN) externdcl = append(externdcl, v) exprs = []*ir.Node{v} } - v.Name.Param.SetEmbedFiles(list) + v.Name().Param.SetEmbedFiles(list) embedlist = append(embedlist, v) return exprs } @@ -131,17 +131,17 @@ func varEmbed(p *noder, names []*ir.Node, typ *ir.Node, exprs []*ir.Node, embeds // can't tell whether "string" and "byte" really mean "string" and "byte". // The result must be confirmed later, after type checking, using embedKind. func embedKindApprox(typ *ir.Node) int { - if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == ir.LocalPkg && base.Ctxt.Pkgpath == "embed")) { + if typ.Sym() != nil && typ.Sym().Name == "FS" && (typ.Sym().Pkg.Path == "embed" || (typ.Sym().Pkg == ir.LocalPkg && base.Ctxt.Pkgpath == "embed")) { return embedFiles } // These are not guaranteed to match only string and []byte - // maybe the local package has redefined one of those words. // But it's the best we can do now during the noder. // The stricter check happens later, in initEmbed calling embedKind. - if typ.Sym != nil && typ.Sym.Name == "string" && typ.Sym.Pkg == ir.LocalPkg { + if typ.Sym() != nil && typ.Sym().Name == "string" && typ.Sym().Pkg == ir.LocalPkg { return embedString } - if typ.Op == ir.OTARRAY && typ.Left == nil && typ.Right.Sym != nil && typ.Right.Sym.Name == "byte" && typ.Right.Sym.Pkg == ir.LocalPkg { + if typ.Op() == ir.OTARRAY && typ.Left() == nil && typ.Right().Sym() != nil && typ.Right().Sym().Name == "byte" && typ.Right().Sym().Pkg == ir.LocalPkg { return embedBytes } return embedUnknown @@ -193,18 +193,18 @@ func dumpembeds() { // initEmbed emits the init data for a //go:embed variable, // which is either a string, a []byte, or an embed.FS. func initEmbed(v *ir.Node) { - files := v.Name.Param.EmbedFiles() - switch kind := embedKind(v.Type); kind { + files := v.Name().Param.EmbedFiles() + switch kind := embedKind(v.Type()); kind { case embedUnknown: - base.ErrorfAt(v.Pos, "go:embed cannot apply to var of type %v", v.Type) + base.ErrorfAt(v.Pos(), "go:embed cannot apply to var of type %v", v.Type()) case embedString, embedBytes: file := files[0] - fsym, size, err := fileStringSym(v.Pos, base.Flag.Cfg.Embed.Files[file], kind == embedString, nil) + fsym, size, err := fileStringSym(v.Pos(), base.Flag.Cfg.Embed.Files[file], kind == embedString, nil) if err != nil { - base.ErrorfAt(v.Pos, "embed %s: %v", file, err) + base.ErrorfAt(v.Pos(), "embed %s: %v", file, err) } - sym := v.Sym.Linksym() + sym := v.Sym().Linksym() off := 0 off = dsymptr(sym, off, fsym, 0) // data string off = duintptr(sym, off, uint64(size)) // len @@ -213,7 +213,7 @@ func initEmbed(v *ir.Node) { } case embedFiles: - slicedata := base.Ctxt.Lookup(`"".` + v.Sym.Name + `.files`) + slicedata := base.Ctxt.Lookup(`"".` + v.Sym().Name + `.files`) off := 0 // []files pointed at by Files off = dsymptr(slicedata, off, slicedata, 3*Widthptr) // []file, pointing just past slice @@ -228,7 +228,7 @@ func initEmbed(v *ir.Node) { const hashSize = 16 hash := make([]byte, hashSize) for _, file := range files { - off = dsymptr(slicedata, off, stringsym(v.Pos, file), 0) // file string + off = dsymptr(slicedata, off, stringsym(v.Pos(), file), 0) // file string off = duintptr(slicedata, off, uint64(len(file))) if strings.HasSuffix(file, "/") { // entry for directory - no data @@ -236,9 +236,9 @@ func initEmbed(v *ir.Node) { off = duintptr(slicedata, off, 0) off += hashSize } else { - fsym, size, err := fileStringSym(v.Pos, base.Flag.Cfg.Embed.Files[file], true, hash) + fsym, size, err := fileStringSym(v.Pos(), base.Flag.Cfg.Embed.Files[file], true, hash) if err != nil { - base.ErrorfAt(v.Pos, "embed %s: %v", file, err) + base.ErrorfAt(v.Pos(), "embed %s: %v", file, err) } off = dsymptr(slicedata, off, fsym, 0) // data string off = duintptr(slicedata, off, uint64(size)) @@ -246,7 +246,7 @@ func initEmbed(v *ir.Node) { } } ggloblsym(slicedata, int32(off), obj.RODATA|obj.LOCAL) - sym := v.Sym.Linksym() + sym := v.Sym().Linksym() dsymptr(sym, 0, slicedata, 0) } } diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index a0aa516d9a43a..866bdf8a6f6a7 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -149,7 +149,7 @@ func init() { // escFmt is called from node printing to print information about escape analysis results. func escFmt(n *ir.Node, short bool) string { text := "" - switch n.Esc { + switch n.Esc() { case EscUnknown: break @@ -165,7 +165,7 @@ func escFmt(n *ir.Node, short bool) string { } default: - text = fmt.Sprintf("esc(%d)", n.Esc) + text = fmt.Sprintf("esc(%d)", n.Esc()) } if e, ok := n.Opt().(*EscLocation); ok && e.loopDepth != 0 { @@ -181,7 +181,7 @@ func escFmt(n *ir.Node, short bool) string { // functions. func escapeFuncs(fns []*ir.Node, recursive bool) { for _, fn := range fns { - if fn.Op != ir.ODCLFUNC { + if fn.Op() != ir.ODCLFUNC { base.Fatalf("unexpected node: %v", fn) } } @@ -203,10 +203,10 @@ func escapeFuncs(fns []*ir.Node, recursive bool) { } func (e *Escape) initFunc(fn *ir.Node) { - if fn.Op != ir.ODCLFUNC || fn.Esc != EscFuncUnknown { + if fn.Op() != ir.ODCLFUNC || fn.Esc() != EscFuncUnknown { base.Fatalf("unexpected node: %v", fn) } - fn.Esc = EscFuncPlanned + fn.SetEsc(EscFuncPlanned) if base.Flag.LowerM > 3 { ir.Dump("escAnalyze", fn) } @@ -215,27 +215,27 @@ func (e *Escape) initFunc(fn *ir.Node) { e.loopDepth = 1 // Allocate locations for local variables. - for _, dcl := range fn.Func.Dcl { - if dcl.Op == ir.ONAME { + for _, dcl := range fn.Func().Dcl { + if dcl.Op() == ir.ONAME { e.newLoc(dcl, false) } } } func (e *Escape) walkFunc(fn *ir.Node) { - fn.Esc = EscFuncStarted + fn.SetEsc(EscFuncStarted) // Identify labels that mark the head of an unstructured loop. - ir.InspectList(fn.Nbody, func(n *ir.Node) bool { - switch n.Op { + ir.InspectList(fn.Body(), func(n *ir.Node) bool { + switch n.Op() { case ir.OLABEL: - n.Sym.Label = ir.AsTypesNode(nonlooping) + n.Sym().Label = ir.AsTypesNode(nonlooping) case ir.OGOTO: // If we visited the label before the goto, // then this is a looping label. - if n.Sym.Label == ir.AsTypesNode(nonlooping) { - n.Sym.Label = ir.AsTypesNode(looping) + if n.Sym().Label == ir.AsTypesNode(nonlooping) { + n.Sym().Label = ir.AsTypesNode(looping) } } @@ -244,7 +244,7 @@ func (e *Escape) walkFunc(fn *ir.Node) { e.curfn = fn e.loopDepth = 1 - e.block(fn.Nbody) + e.block(fn.Body()) } // Below we implement the methods for walking the AST and recording @@ -288,9 +288,9 @@ func (e *Escape) stmt(n *ir.Node) { fmt.Printf("%v:[%d] %v stmt: %v\n", base.FmtPos(base.Pos), e.loopDepth, funcSym(e.curfn), n) } - e.stmts(n.Ninit) + e.stmts(n.Init()) - switch n.Op { + switch n.Op() { default: base.Fatalf("unexpected stmt: %v", n) @@ -301,16 +301,16 @@ func (e *Escape) stmt(n *ir.Node) { // TODO(mdempsky): Handle dead code? case ir.OBLOCK: - e.stmts(n.List) + e.stmts(n.List()) case ir.ODCL: // Record loop depth at declaration. - if !ir.IsBlank(n.Left) { - e.dcl(n.Left) + if !ir.IsBlank(n.Left()) { + e.dcl(n.Left()) } case ir.OLABEL: - switch ir.AsNode(n.Sym.Label) { + switch ir.AsNode(n.Sym().Label) { case nonlooping: if base.Flag.LowerM > 2 { fmt.Printf("%v:%v non-looping label\n", base.FmtPos(base.Pos), n) @@ -323,109 +323,109 @@ func (e *Escape) stmt(n *ir.Node) { default: base.Fatalf("label missing tag") } - n.Sym.Label = nil + n.Sym().Label = nil case ir.OIF: - e.discard(n.Left) - e.block(n.Nbody) - e.block(n.Rlist) + e.discard(n.Left()) + e.block(n.Body()) + e.block(n.Rlist()) case ir.OFOR, ir.OFORUNTIL: e.loopDepth++ - e.discard(n.Left) - e.stmt(n.Right) - e.block(n.Nbody) + e.discard(n.Left()) + e.stmt(n.Right()) + e.block(n.Body()) e.loopDepth-- case ir.ORANGE: // for List = range Right { Nbody } e.loopDepth++ - ks := e.addrs(n.List) - e.block(n.Nbody) + ks := e.addrs(n.List()) + e.block(n.Body()) e.loopDepth-- // Right is evaluated outside the loop. k := e.discardHole() if len(ks) >= 2 { - if n.Right.Type.IsArray() { + if n.Right().Type().IsArray() { k = ks[1].note(n, "range") } else { k = ks[1].deref(n, "range-deref") } } - e.expr(e.later(k), n.Right) + e.expr(e.later(k), n.Right()) case ir.OSWITCH: - typesw := n.Left != nil && n.Left.Op == ir.OTYPESW + typesw := n.Left() != nil && n.Left().Op() == ir.OTYPESW var ks []EscHole - for _, cas := range n.List.Slice() { // cases - if typesw && n.Left.Left != nil { - cv := cas.Rlist.First() + for _, cas := range n.List().Slice() { // cases + if typesw && n.Left().Left() != nil { + cv := cas.Rlist().First() k := e.dcl(cv) // type switch variables have no ODCL. - if cv.Type.HasPointers() { - ks = append(ks, k.dotType(cv.Type, cas, "switch case")) + if cv.Type().HasPointers() { + ks = append(ks, k.dotType(cv.Type(), cas, "switch case")) } } - e.discards(cas.List) - e.block(cas.Nbody) + e.discards(cas.List()) + e.block(cas.Body()) } if typesw { - e.expr(e.teeHole(ks...), n.Left.Right) + e.expr(e.teeHole(ks...), n.Left().Right()) } else { - e.discard(n.Left) + e.discard(n.Left()) } case ir.OSELECT: - for _, cas := range n.List.Slice() { - e.stmt(cas.Left) - e.block(cas.Nbody) + for _, cas := range n.List().Slice() { + e.stmt(cas.Left()) + e.block(cas.Body()) } case ir.OSELRECV: - e.assign(n.Left, n.Right, "selrecv", n) + e.assign(n.Left(), n.Right(), "selrecv", n) case ir.OSELRECV2: - e.assign(n.Left, n.Right, "selrecv", n) - e.assign(n.List.First(), nil, "selrecv", n) + e.assign(n.Left(), n.Right(), "selrecv", n) + e.assign(n.List().First(), nil, "selrecv", n) case ir.ORECV: // TODO(mdempsky): Consider e.discard(n.Left). e.exprSkipInit(e.discardHole(), n) // already visited n.Ninit case ir.OSEND: - e.discard(n.Left) - e.assignHeap(n.Right, "send", n) + e.discard(n.Left()) + e.assignHeap(n.Right(), "send", n) case ir.OAS, ir.OASOP: - e.assign(n.Left, n.Right, "assign", n) + e.assign(n.Left(), n.Right(), "assign", n) case ir.OAS2: - for i, nl := range n.List.Slice() { - e.assign(nl, n.Rlist.Index(i), "assign-pair", n) + for i, nl := range n.List().Slice() { + e.assign(nl, n.Rlist().Index(i), "assign-pair", n) } case ir.OAS2DOTTYPE: // v, ok = x.(type) - e.assign(n.List.First(), n.Right, "assign-pair-dot-type", n) - e.assign(n.List.Second(), nil, "assign-pair-dot-type", n) + e.assign(n.List().First(), n.Right(), "assign-pair-dot-type", n) + e.assign(n.List().Second(), nil, "assign-pair-dot-type", n) case ir.OAS2MAPR: // v, ok = m[k] - e.assign(n.List.First(), n.Right, "assign-pair-mapr", n) - e.assign(n.List.Second(), nil, "assign-pair-mapr", n) + e.assign(n.List().First(), n.Right(), "assign-pair-mapr", n) + e.assign(n.List().Second(), nil, "assign-pair-mapr", n) case ir.OAS2RECV: // v, ok = <-ch - e.assign(n.List.First(), n.Right, "assign-pair-receive", n) - e.assign(n.List.Second(), nil, "assign-pair-receive", n) + e.assign(n.List().First(), n.Right(), "assign-pair-receive", n) + e.assign(n.List().Second(), nil, "assign-pair-receive", n) case ir.OAS2FUNC: - e.stmts(n.Right.Ninit) - e.call(e.addrs(n.List), n.Right, nil) + e.stmts(n.Right().Init()) + e.call(e.addrs(n.List()), n.Right(), nil) case ir.ORETURN: - results := e.curfn.Type.Results().FieldSlice() - for i, v := range n.List.Slice() { + results := e.curfn.Type().Results().FieldSlice() + for i, v := range n.List().Slice() { e.assign(ir.AsNode(results[i].Nname), v, "return", n) } case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OCLOSE, ir.OCOPY, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN, ir.ORECOVER: e.call(nil, n, nil) case ir.OGO, ir.ODEFER: - e.stmts(n.Left.Ninit) - e.call(nil, n.Left, n) + e.stmts(n.Left().Init()) + e.call(nil, n.Left(), n) case ir.ORETJMP: // TODO(mdempsky): What do? esc.go just ignores it. @@ -451,7 +451,7 @@ func (e *Escape) expr(k EscHole, n *ir.Node) { if n == nil { return } - e.stmts(n.Ninit) + e.stmts(n.Init()) e.exprSkipInit(k, n) } @@ -468,13 +468,13 @@ func (e *Escape) exprSkipInit(k EscHole, n *ir.Node) { uintptrEscapesHack := k.uintptrEscapesHack k.uintptrEscapesHack = false - if uintptrEscapesHack && n.Op == ir.OCONVNOP && n.Left.Type.IsUnsafePtr() { + if uintptrEscapesHack && n.Op() == ir.OCONVNOP && n.Left().Type().IsUnsafePtr() { // nop - } else if k.derefs >= 0 && !n.Type.HasPointers() { + } else if k.derefs >= 0 && !n.Type().HasPointers() { k = e.discardHole() } - switch n.Op { + switch n.Op() { default: base.Fatalf("unexpected expr: %v", n) @@ -488,61 +488,61 @@ func (e *Escape) exprSkipInit(k EscHole, n *ir.Node) { e.flow(k, e.oldLoc(n)) case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT: - e.discard(n.Left) + e.discard(n.Left()) case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE, ir.OANDAND, ir.OOROR: - e.discard(n.Left) - e.discard(n.Right) + e.discard(n.Left()) + e.discard(n.Right()) case ir.OADDR: - e.expr(k.addr(n, "address-of"), n.Left) // "address-of" + e.expr(k.addr(n, "address-of"), n.Left()) // "address-of" case ir.ODEREF: - e.expr(k.deref(n, "indirection"), n.Left) // "indirection" + e.expr(k.deref(n, "indirection"), n.Left()) // "indirection" case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER: - e.expr(k.note(n, "dot"), n.Left) + e.expr(k.note(n, "dot"), n.Left()) case ir.ODOTPTR: - e.expr(k.deref(n, "dot of pointer"), n.Left) // "dot of pointer" + e.expr(k.deref(n, "dot of pointer"), n.Left()) // "dot of pointer" case ir.ODOTTYPE, ir.ODOTTYPE2: - e.expr(k.dotType(n.Type, n, "dot"), n.Left) + e.expr(k.dotType(n.Type(), n, "dot"), n.Left()) case ir.OINDEX: - if n.Left.Type.IsArray() { - e.expr(k.note(n, "fixed-array-index-of"), n.Left) + if n.Left().Type().IsArray() { + e.expr(k.note(n, "fixed-array-index-of"), n.Left()) } else { // TODO(mdempsky): Fix why reason text. - e.expr(k.deref(n, "dot of pointer"), n.Left) + e.expr(k.deref(n, "dot of pointer"), n.Left()) } - e.discard(n.Right) + e.discard(n.Right()) case ir.OINDEXMAP: - e.discard(n.Left) - e.discard(n.Right) + e.discard(n.Left()) + e.discard(n.Right()) case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR: - e.expr(k.note(n, "slice"), n.Left) + e.expr(k.note(n, "slice"), n.Left()) low, high, max := n.SliceBounds() e.discard(low) e.discard(high) e.discard(max) case ir.OCONV, ir.OCONVNOP: - if checkPtr(e.curfn, 2) && n.Type.IsUnsafePtr() && n.Left.Type.IsPtr() { + if checkPtr(e.curfn, 2) && n.Type().IsUnsafePtr() && n.Left().Type().IsPtr() { // When -d=checkptr=2 is enabled, treat // conversions to unsafe.Pointer as an // escaping operation. This allows better // runtime instrumentation, since we can more // easily detect object boundaries on the heap // than the stack. - e.assignHeap(n.Left, "conversion to unsafe.Pointer", n) - } else if n.Type.IsUnsafePtr() && n.Left.Type.IsUintptr() { - e.unsafeValue(k, n.Left) + e.assignHeap(n.Left(), "conversion to unsafe.Pointer", n) + } else if n.Type().IsUnsafePtr() && n.Left().Type().IsUintptr() { + e.unsafeValue(k, n.Left()) } else { - e.expr(k, n.Left) + e.expr(k, n.Left()) } case ir.OCONVIFACE: - if !n.Left.Type.IsInterface() && !isdirectiface(n.Left.Type) { + if !n.Left().Type().IsInterface() && !isdirectiface(n.Left().Type()) { k = e.spill(k, n) } - e.expr(k.note(n, "interface-converted"), n.Left) + e.expr(k.note(n, "interface-converted"), n.Left()) case ir.ORECV: - e.discard(n.Left) + e.discard(n.Left()) case ir.OCALLMETH, ir.OCALLFUNC, ir.OCALLINTER, ir.OLEN, ir.OCAP, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCOPY: e.call([]EscHole{k}, n, nil) @@ -552,13 +552,13 @@ func (e *Escape) exprSkipInit(k EscHole, n *ir.Node) { case ir.OMAKESLICE: e.spill(k, n) - e.discard(n.Left) - e.discard(n.Right) + e.discard(n.Left()) + e.discard(n.Right()) case ir.OMAKECHAN: - e.discard(n.Left) + e.discard(n.Left()) case ir.OMAKEMAP: e.spill(k, n) - e.discard(n.Left) + e.discard(n.Left()) case ir.ORECOVER: // nop @@ -583,15 +583,15 @@ func (e *Escape) exprSkipInit(k EscHole, n *ir.Node) { } paramK := e.tagHole(ks, ir.AsNode(m.Nname), m.Type.Recv()) - e.expr(e.teeHole(paramK, closureK), n.Left) + e.expr(e.teeHole(paramK, closureK), n.Left()) case ir.OPTRLIT: - e.expr(e.spill(k, n), n.Left) + e.expr(e.spill(k, n), n.Left()) case ir.OARRAYLIT: - for _, elt := range n.List.Slice() { - if elt.Op == ir.OKEY { - elt = elt.Right + for _, elt := range n.List().Slice() { + if elt.Op() == ir.OKEY { + elt = elt.Right() } e.expr(k.note(n, "array literal element"), elt) } @@ -600,89 +600,89 @@ func (e *Escape) exprSkipInit(k EscHole, n *ir.Node) { k = e.spill(k, n) k.uintptrEscapesHack = uintptrEscapesHack // for ...uintptr parameters - for _, elt := range n.List.Slice() { - if elt.Op == ir.OKEY { - elt = elt.Right + for _, elt := range n.List().Slice() { + if elt.Op() == ir.OKEY { + elt = elt.Right() } e.expr(k.note(n, "slice-literal-element"), elt) } case ir.OSTRUCTLIT: - for _, elt := range n.List.Slice() { - e.expr(k.note(n, "struct literal element"), elt.Left) + for _, elt := range n.List().Slice() { + e.expr(k.note(n, "struct literal element"), elt.Left()) } case ir.OMAPLIT: e.spill(k, n) // Map keys and values are always stored in the heap. - for _, elt := range n.List.Slice() { - e.assignHeap(elt.Left, "map literal key", n) - e.assignHeap(elt.Right, "map literal value", n) + for _, elt := range n.List().Slice() { + e.assignHeap(elt.Left(), "map literal key", n) + e.assignHeap(elt.Right(), "map literal value", n) } case ir.OCLOSURE: k = e.spill(k, n) // Link addresses of captured variables to closure. - for _, v := range n.Func.ClosureVars.Slice() { - if v.Op == ir.OXXX { // unnamed out argument; see dcl.go:/^funcargs + for _, v := range n.Func().ClosureVars.Slice() { + if v.Op() == ir.OXXX { // unnamed out argument; see dcl.go:/^funcargs continue } k := k - if !v.Name.Byval() { + if !v.Name().Byval() { k = k.addr(v, "reference") } - e.expr(k.note(n, "captured by a closure"), v.Name.Defn) + e.expr(k.note(n, "captured by a closure"), v.Name().Defn) } case ir.ORUNES2STR, ir.OBYTES2STR, ir.OSTR2RUNES, ir.OSTR2BYTES, ir.ORUNESTR: e.spill(k, n) - e.discard(n.Left) + e.discard(n.Left()) case ir.OADDSTR: e.spill(k, n) // Arguments of OADDSTR never escape; // runtime.concatstrings makes sure of that. - e.discards(n.List) + e.discards(n.List()) } } // unsafeValue evaluates a uintptr-typed arithmetic expression looking // for conversions from an unsafe.Pointer. func (e *Escape) unsafeValue(k EscHole, n *ir.Node) { - if n.Type.Etype != types.TUINTPTR { - base.Fatalf("unexpected type %v for %v", n.Type, n) + if n.Type().Etype != types.TUINTPTR { + base.Fatalf("unexpected type %v for %v", n.Type(), n) } - e.stmts(n.Ninit) + e.stmts(n.Init()) - switch n.Op { + switch n.Op() { case ir.OCONV, ir.OCONVNOP: - if n.Left.Type.IsUnsafePtr() { - e.expr(k, n.Left) + if n.Left().Type().IsUnsafePtr() { + e.expr(k, n.Left()) } else { - e.discard(n.Left) + e.discard(n.Left()) } case ir.ODOTPTR: if isReflectHeaderDataField(n) { - e.expr(k.deref(n, "reflect.Header.Data"), n.Left) + e.expr(k.deref(n, "reflect.Header.Data"), n.Left()) } else { - e.discard(n.Left) + e.discard(n.Left()) } case ir.OPLUS, ir.ONEG, ir.OBITNOT: - e.unsafeValue(k, n.Left) + e.unsafeValue(k, n.Left()) case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OAND, ir.OANDNOT: - e.unsafeValue(k, n.Left) - e.unsafeValue(k, n.Right) + e.unsafeValue(k, n.Left()) + e.unsafeValue(k, n.Right()) case ir.OLSH, ir.ORSH: - e.unsafeValue(k, n.Left) + e.unsafeValue(k, n.Left()) // RHS need not be uintptr-typed (#32959) and can't meaningfully // flow pointers anyway. - e.discard(n.Right) + e.discard(n.Right()) default: e.exprSkipInit(e.discardHole(), n) } @@ -711,7 +711,7 @@ func (e *Escape) addr(n *ir.Node) EscHole { k := e.heapHole() - switch n.Op { + switch n.Op() { default: base.Fatalf("unexpected addr: %v", n) case ir.ONAME: @@ -720,22 +720,22 @@ func (e *Escape) addr(n *ir.Node) EscHole { } k = e.oldLoc(n).asHole() case ir.ODOT: - k = e.addr(n.Left) + k = e.addr(n.Left()) case ir.OINDEX: - e.discard(n.Right) - if n.Left.Type.IsArray() { - k = e.addr(n.Left) + e.discard(n.Right()) + if n.Left().Type().IsArray() { + k = e.addr(n.Left()) } else { - e.discard(n.Left) + e.discard(n.Left()) } case ir.ODEREF, ir.ODOTPTR: e.discard(n) case ir.OINDEXMAP: - e.discard(n.Left) - e.assignHeap(n.Right, "key of map put", n) + e.discard(n.Left()) + e.assignHeap(n.Right(), "key of map put", n) } - if !n.Type.HasPointers() { + if !n.Type().HasPointers() { k = e.discardHole() } @@ -755,11 +755,11 @@ func (e *Escape) assign(dst, src *ir.Node, why string, where *ir.Node) { // Filter out some no-op assignments for escape analysis. ignore := dst != nil && src != nil && isSelfAssign(dst, src) if ignore && base.Flag.LowerM != 0 { - base.WarnfAt(where.Pos, "%v ignoring self-assignment in %S", funcSym(e.curfn), where) + base.WarnfAt(where.Pos(), "%v ignoring self-assignment in %S", funcSym(e.curfn), where) } k := e.addr(dst) - if dst != nil && dst.Op == ir.ODOTPTR && isReflectHeaderDataField(dst) { + if dst != nil && dst.Op() == ir.ODOTPTR && isReflectHeaderDataField(dst) { e.unsafeValue(e.heapHole().note(where, why), src) } else { if ignore { @@ -777,11 +777,11 @@ func (e *Escape) assignHeap(src *ir.Node, why string, where *ir.Node) { // should contain the holes representing where the function callee's // results flows; where is the OGO/ODEFER context of the call, if any. func (e *Escape) call(ks []EscHole, call, where *ir.Node) { - topLevelDefer := where != nil && where.Op == ir.ODEFER && e.loopDepth == 1 + topLevelDefer := where != nil && where.Op() == ir.ODEFER && e.loopDepth == 1 if topLevelDefer { // force stack allocation of defer record, unless // open-coded defers are used (see ssa.go) - where.Esc = EscNever + where.SetEsc(EscNever) } argument := func(k EscHole, arg *ir.Node) { @@ -797,66 +797,66 @@ func (e *Escape) call(ks []EscHole, call, where *ir.Node) { e.expr(k.note(call, "call parameter"), arg) } - switch call.Op { + switch call.Op() { default: - base.Fatalf("unexpected call op: %v", call.Op) + base.Fatalf("unexpected call op: %v", call.Op()) case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER: fixVariadicCall(call) // Pick out the function callee, if statically known. var fn *ir.Node - switch call.Op { + switch call.Op() { case ir.OCALLFUNC: - switch v := staticValue(call.Left); { - case v.Op == ir.ONAME && v.Class() == ir.PFUNC: + switch v := staticValue(call.Left()); { + case v.Op() == ir.ONAME && v.Class() == ir.PFUNC: fn = v - case v.Op == ir.OCLOSURE: - fn = v.Func.Nname + case v.Op() == ir.OCLOSURE: + fn = v.Func().Nname } case ir.OCALLMETH: - fn = methodExprName(call.Left) + fn = methodExprName(call.Left()) } - fntype := call.Left.Type + fntype := call.Left().Type() if fn != nil { - fntype = fn.Type + fntype = fn.Type() } if ks != nil && fn != nil && e.inMutualBatch(fn) { - for i, result := range fn.Type.Results().FieldSlice() { + for i, result := range fn.Type().Results().FieldSlice() { e.expr(ks[i], ir.AsNode(result.Nname)) } } if r := fntype.Recv(); r != nil { - argument(e.tagHole(ks, fn, r), call.Left.Left) + argument(e.tagHole(ks, fn, r), call.Left().Left()) } else { // Evaluate callee function expression. - argument(e.discardHole(), call.Left) + argument(e.discardHole(), call.Left()) } - args := call.List.Slice() + args := call.List().Slice() for i, param := range fntype.Params().FieldSlice() { argument(e.tagHole(ks, fn, param), args[i]) } case ir.OAPPEND: - args := call.List.Slice() + args := call.List().Slice() // Appendee slice may flow directly to the result, if // it has enough capacity. Alternatively, a new heap // slice might be allocated, and all slice elements // might flow to heap. appendeeK := ks[0] - if args[0].Type.Elem().HasPointers() { + if args[0].Type().Elem().HasPointers() { appendeeK = e.teeHole(appendeeK, e.heapHole().deref(call, "appendee slice")) } argument(appendeeK, args[0]) if call.IsDDD() { appendedK := e.discardHole() - if args[1].Type.IsSlice() && args[1].Type.Elem().HasPointers() { + if args[1].Type().IsSlice() && args[1].Type().Elem().HasPointers() { appendedK = e.heapHole().deref(call, "appended slice...") } argument(appendedK, args[1]) @@ -867,26 +867,26 @@ func (e *Escape) call(ks []EscHole, call, where *ir.Node) { } case ir.OCOPY: - argument(e.discardHole(), call.Left) + argument(e.discardHole(), call.Left()) copiedK := e.discardHole() - if call.Right.Type.IsSlice() && call.Right.Type.Elem().HasPointers() { + if call.Right().Type().IsSlice() && call.Right().Type().Elem().HasPointers() { copiedK = e.heapHole().deref(call, "copied slice") } - argument(copiedK, call.Right) + argument(copiedK, call.Right()) case ir.OPANIC: - argument(e.heapHole(), call.Left) + argument(e.heapHole(), call.Left()) case ir.OCOMPLEX: - argument(e.discardHole(), call.Left) - argument(e.discardHole(), call.Right) + argument(e.discardHole(), call.Left()) + argument(e.discardHole(), call.Right()) case ir.ODELETE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER: - for _, arg := range call.List.Slice() { + for _, arg := range call.List().Slice() { argument(e.discardHole(), arg) } case ir.OLEN, ir.OCAP, ir.OREAL, ir.OIMAG, ir.OCLOSE: - argument(e.discardHole(), call.Left) + argument(e.discardHole(), call.Left()) } } @@ -936,8 +936,8 @@ func (e *Escape) tagHole(ks []EscHole, fn *ir.Node, param *types.Field) EscHole // should be incorporated directly into the flow graph instead of // relying on its escape analysis tagging. func (e *Escape) inMutualBatch(fn *ir.Node) bool { - if fn.Name.Defn != nil && fn.Name.Defn.Esc < EscFuncTagged { - if fn.Name.Defn.Esc == EscFuncUnknown { + if fn.Name().Defn != nil && fn.Name().Defn.Esc() < EscFuncTagged { + if fn.Name().Defn.Esc() == EscFuncUnknown { base.Fatalf("graph inconsistency") } return true @@ -1053,9 +1053,9 @@ func (e *Escape) later(k EscHole) EscHole { // canonicalNode returns the canonical *Node that n logically // represents. func canonicalNode(n *ir.Node) *ir.Node { - if n != nil && n.Op == ir.ONAME && n.Name.IsClosureVar() { - n = n.Name.Defn - if n.Name.IsClosureVar() { + if n != nil && n.Op() == ir.ONAME && n.Name().IsClosureVar() { + n = n.Name().Defn + if n.Name().IsClosureVar() { base.Fatalf("still closure var") } } @@ -1067,8 +1067,8 @@ func (e *Escape) newLoc(n *ir.Node, transient bool) *EscLocation { if e.curfn == nil { base.Fatalf("e.curfn isn't set") } - if n != nil && n.Type != nil && n.Type.NotInHeap() { - base.ErrorfAt(n.Pos, "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type) + if n != nil && n.Type() != nil && n.Type().NotInHeap() { + base.ErrorfAt(n.Pos(), "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type()) } n = canonicalNode(n) @@ -1080,8 +1080,8 @@ func (e *Escape) newLoc(n *ir.Node, transient bool) *EscLocation { } e.allLocs = append(e.allLocs, loc) if n != nil { - if n.Op == ir.ONAME && n.Name.Curfn != e.curfn { - base.Fatalf("curfn mismatch: %v != %v", n.Name.Curfn, e.curfn) + if n.Op() == ir.ONAME && n.Name().Curfn != e.curfn { + base.Fatalf("curfn mismatch: %v != %v", n.Name().Curfn, e.curfn) } if n.HasOpt() { @@ -1115,13 +1115,13 @@ func (e *Escape) flow(k EscHole, src *EscLocation) { } if dst.escapes && k.derefs < 0 { // dst = &src if base.Flag.LowerM >= 2 || logopt.Enabled() { - pos := base.FmtPos(src.n.Pos) + pos := base.FmtPos(src.n.Pos()) if base.Flag.LowerM >= 2 { fmt.Printf("%s: %v escapes to heap:\n", pos, src.n) } explanation := e.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{}) if logopt.Enabled() { - logopt.LogOpt(src.n.Pos, "escapes", "escape", ir.FuncName(e.curfn), fmt.Sprintf("%v escapes to heap", src.n), explanation) + logopt.LogOpt(src.n.Pos(), "escapes", "escape", ir.FuncName(e.curfn), fmt.Sprintf("%v escapes to heap", src.n), explanation) } } @@ -1218,11 +1218,11 @@ func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLoc if l.isName(ir.PPARAM) { if (logopt.Enabled() || base.Flag.LowerM >= 2) && !l.escapes { if base.Flag.LowerM >= 2 { - fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos), l.n, e.explainLoc(root), derefs) + fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos()), l.n, e.explainLoc(root), derefs) } explanation := e.explainPath(root, l) if logopt.Enabled() { - logopt.LogOpt(l.n.Pos, "leak", "escape", ir.FuncName(e.curfn), + logopt.LogOpt(l.n.Pos(), "leak", "escape", ir.FuncName(e.curfn), fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, e.explainLoc(root), derefs), explanation) } } @@ -1235,11 +1235,11 @@ func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLoc if addressOf && !l.escapes { if logopt.Enabled() || base.Flag.LowerM >= 2 { if base.Flag.LowerM >= 2 { - fmt.Printf("%s: %v escapes to heap:\n", base.FmtPos(l.n.Pos), l.n) + fmt.Printf("%s: %v escapes to heap:\n", base.FmtPos(l.n.Pos()), l.n) } explanation := e.explainPath(root, l) if logopt.Enabled() { - logopt.LogOpt(l.n.Pos, "escape", "escape", ir.FuncName(e.curfn), fmt.Sprintf("%v escapes to heap", l.n), explanation) + logopt.LogOpt(l.n.Pos(), "escape", "escape", ir.FuncName(e.curfn), fmt.Sprintf("%v escapes to heap", l.n), explanation) } } l.escapes = true @@ -1267,7 +1267,7 @@ func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLoc // explainPath prints an explanation of how src flows to the walk root. func (e *Escape) explainPath(root, src *EscLocation) []*logopt.LoggedOpt { visited := make(map[*EscLocation]bool) - pos := base.FmtPos(src.n.Pos) + pos := base.FmtPos(src.n.Pos()) var explanation []*logopt.LoggedOpt for { // Prevent infinite loop. @@ -1309,19 +1309,19 @@ func (e *Escape) explainFlow(pos string, dst, srcloc *EscLocation, derefs int, n if logopt.Enabled() { var epos src.XPos if notes != nil { - epos = notes.where.Pos + epos = notes.where.Pos() } else if srcloc != nil && srcloc.n != nil { - epos = srcloc.n.Pos + epos = srcloc.n.Pos() } explanation = append(explanation, logopt.NewLoggedOpt(epos, "escflow", "escape", ir.FuncName(e.curfn), flow)) } for note := notes; note != nil; note = note.next { if print { - fmt.Printf("%s: from %v (%v) at %s\n", pos, note.where, note.why, base.FmtPos(note.where.Pos)) + fmt.Printf("%s: from %v (%v) at %s\n", pos, note.where, note.why, base.FmtPos(note.where.Pos())) } if logopt.Enabled() { - explanation = append(explanation, logopt.NewLoggedOpt(note.where.Pos, "escflow", "escape", ir.FuncName(e.curfn), + explanation = append(explanation, logopt.NewLoggedOpt(note.where.Pos(), "escflow", "escape", ir.FuncName(e.curfn), fmt.Sprintf(" from %v (%v)", note.where, note.why))) } } @@ -1336,7 +1336,7 @@ func (e *Escape) explainLoc(l *EscLocation) string { // TODO(mdempsky): Omit entirely. return "{temp}" } - if l.n.Op == ir.ONAME { + if l.n.Op() == ir.ONAME { return fmt.Sprintf("%v", l.n) } return fmt.Sprintf("{storage for %v}", l.n) @@ -1360,7 +1360,7 @@ func (e *Escape) outlives(l, other *EscLocation) bool { // // var u int // okay to stack allocate // *(func() *int { return &u }()) = 42 - if containsClosure(other.curfn, l.curfn) && l.curfn.Func.ClosureCalled { + if containsClosure(other.curfn, l.curfn) && l.curfn.Func().ClosureCalled { return false } @@ -1395,7 +1395,7 @@ func (e *Escape) outlives(l, other *EscLocation) bool { // containsClosure reports whether c is a closure contained within f. func containsClosure(f, c *ir.Node) bool { - if f.Op != ir.ODCLFUNC || c.Op != ir.ODCLFUNC { + if f.Op() != ir.ODCLFUNC || c.Op() != ir.ODCLFUNC { base.Fatalf("bad containsClosure: %v, %v", f, c) } @@ -1406,8 +1406,8 @@ func containsClosure(f, c *ir.Node) bool { // Closures within function Foo are named like "Foo.funcN..." // TODO(mdempsky): Better way to recognize this. - fn := f.Func.Nname.Sym.Name - cn := c.Func.Nname.Sym.Name + fn := f.Func().Nname.Sym().Name + cn := c.Func().Nname.Sym().Name return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.' } @@ -1417,7 +1417,7 @@ func (l *EscLocation) leakTo(sink *EscLocation, derefs int) { // into the escape analysis tag, then record a return leak. if sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn { // TODO(mdempsky): Eliminate dependency on Vargen here. - ri := int(sink.n.Name.Vargen) - 1 + ri := int(sink.n.Name().Vargen) - 1 if ri < numEscResults { // Leak to result parameter. l.paramEsc.AddResult(ri, derefs) @@ -1432,11 +1432,11 @@ func (l *EscLocation) leakTo(sink *EscLocation, derefs int) { func (e *Escape) finish(fns []*ir.Node) { // Record parameter tags for package export data. for _, fn := range fns { - fn.Esc = EscFuncTagged + fn.SetEsc(EscFuncTagged) narg := 0 for _, fs := range &types.RecvsParams { - for _, f := range fs(fn.Type).Fields().Slice() { + for _, f := range fs(fn.Type()).Fields().Slice() { narg++ f.Note = e.paramTag(fn, narg, f) } @@ -1453,21 +1453,21 @@ func (e *Escape) finish(fns []*ir.Node) { // Update n.Esc based on escape analysis results. if loc.escapes { - if n.Op != ir.ONAME { + if n.Op() != ir.ONAME { if base.Flag.LowerM != 0 { - base.WarnfAt(n.Pos, "%S escapes to heap", n) + base.WarnfAt(n.Pos(), "%S escapes to heap", n) } if logopt.Enabled() { - logopt.LogOpt(n.Pos, "escape", "escape", ir.FuncName(e.curfn)) + logopt.LogOpt(n.Pos(), "escape", "escape", ir.FuncName(e.curfn)) } } - n.Esc = EscHeap + n.SetEsc(EscHeap) addrescapes(n) } else { - if base.Flag.LowerM != 0 && n.Op != ir.ONAME { - base.WarnfAt(n.Pos, "%S does not escape", n) + if base.Flag.LowerM != 0 && n.Op() != ir.ONAME { + base.WarnfAt(n.Pos(), "%S does not escape", n) } - n.Esc = EscNone + n.SetEsc(EscNone) if loc.transient { n.SetTransient(true) } @@ -1476,7 +1476,7 @@ func (e *Escape) finish(fns []*ir.Node) { } func (l *EscLocation) isName(c ir.Class) bool { - return l.n != nil && l.n.Op == ir.ONAME && l.n.Class() == c + return l.n != nil && l.n.Op() == ir.ONAME && l.n.Class() == c } const numEscResults = 7 @@ -1608,10 +1608,10 @@ const ( // funcSym returns fn.Func.Nname.Sym if no nils are encountered along the way. func funcSym(fn *ir.Node) *types.Sym { - if fn == nil || fn.Func.Nname == nil { + if fn == nil || fn.Func().Nname == nil { return nil } - return fn.Func.Nname.Sym + return fn.Func().Nname.Sym() } // Mark labels that have no backjumps to them as not increasing e.loopdepth. @@ -1639,11 +1639,11 @@ func isSliceSelfAssign(dst, src *ir.Node) bool { // when we evaluate it for dst and for src. // dst is ONAME dereference. - if dst.Op != ir.ODEREF && dst.Op != ir.ODOTPTR || dst.Left.Op != ir.ONAME { + if dst.Op() != ir.ODEREF && dst.Op() != ir.ODOTPTR || dst.Left().Op() != ir.ONAME { return false } // src is a slice operation. - switch src.Op { + switch src.Op() { case ir.OSLICE, ir.OSLICE3, ir.OSLICESTR: // OK. case ir.OSLICEARR, ir.OSLICE3ARR: @@ -1656,18 +1656,18 @@ func isSliceSelfAssign(dst, src *ir.Node) bool { // Pointer to an array is OK since it's not stored inside b directly. // For slicing an array (not pointer to array), there is an implicit OADDR. // We check that to determine non-pointer array slicing. - if src.Left.Op == ir.OADDR { + if src.Left().Op() == ir.OADDR { return false } default: return false } // slice is applied to ONAME dereference. - if src.Left.Op != ir.ODEREF && src.Left.Op != ir.ODOTPTR || src.Left.Left.Op != ir.ONAME { + if src.Left().Op() != ir.ODEREF && src.Left().Op() != ir.ODOTPTR || src.Left().Left().Op() != ir.ONAME { return false } // dst and src reference the same base ONAME. - return dst.Left == src.Left.Left + return dst.Left() == src.Left().Left() } // isSelfAssign reports whether assignment from src to dst can @@ -1687,15 +1687,15 @@ func isSelfAssign(dst, src *ir.Node) bool { // // These assignments do not change assigned object lifetime. - if dst == nil || src == nil || dst.Op != src.Op { + if dst == nil || src == nil || dst.Op() != src.Op() { return false } - switch dst.Op { + switch dst.Op() { case ir.ODOT, ir.ODOTPTR: // Safe trailing accessors that are permitted to differ. case ir.OINDEX: - if mayAffectMemory(dst.Right) || mayAffectMemory(src.Right) { + if mayAffectMemory(dst.Right()) || mayAffectMemory(src.Right()) { return false } default: @@ -1703,7 +1703,7 @@ func isSelfAssign(dst, src *ir.Node) bool { } // The expression prefix must be both "safe" and identical. - return samesafeexpr(dst.Left, src.Left) + return samesafeexpr(dst.Left(), src.Left()) } // mayAffectMemory reports whether evaluation of n may affect the program's @@ -1716,18 +1716,18 @@ func mayAffectMemory(n *ir.Node) bool { // // We're ignoring things like division by zero, index out of range, // and nil pointer dereference here. - switch n.Op { + switch n.Op() { case ir.ONAME, ir.OCLOSUREVAR, ir.OLITERAL, ir.ONIL: return false // Left+Right group. case ir.OINDEX, ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD: - return mayAffectMemory(n.Left) || mayAffectMemory(n.Right) + return mayAffectMemory(n.Left()) || mayAffectMemory(n.Right()) // Left group. case ir.ODOT, ir.ODOTPTR, ir.ODEREF, ir.OCONVNOP, ir.OCONV, ir.OLEN, ir.OCAP, ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF: - return mayAffectMemory(n.Left) + return mayAffectMemory(n.Left()) default: return true @@ -1737,39 +1737,39 @@ func mayAffectMemory(n *ir.Node) bool { // heapAllocReason returns the reason the given Node must be heap // allocated, or the empty string if it doesn't. func heapAllocReason(n *ir.Node) string { - if n.Type == nil { + if n.Type() == nil { return "" } // Parameters are always passed via the stack. - if n.Op == ir.ONAME && (n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) { + if n.Op() == ir.ONAME && (n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) { return "" } - if n.Type.Width > maxStackVarSize { + if n.Type().Width > maxStackVarSize { return "too large for stack" } - if (n.Op == ir.ONEW || n.Op == ir.OPTRLIT) && n.Type.Elem().Width >= maxImplicitStackVarSize { + if (n.Op() == ir.ONEW || n.Op() == ir.OPTRLIT) && n.Type().Elem().Width >= maxImplicitStackVarSize { return "too large for stack" } - if n.Op == ir.OCLOSURE && closureType(n).Size() >= maxImplicitStackVarSize { + if n.Op() == ir.OCLOSURE && closureType(n).Size() >= maxImplicitStackVarSize { return "too large for stack" } - if n.Op == ir.OCALLPART && partialCallType(n).Size() >= maxImplicitStackVarSize { + if n.Op() == ir.OCALLPART && partialCallType(n).Size() >= maxImplicitStackVarSize { return "too large for stack" } - if n.Op == ir.OMAKESLICE { - r := n.Right + if n.Op() == ir.OMAKESLICE { + r := n.Right() if r == nil { - r = n.Left + r = n.Left() } if !smallintconst(r) { return "non-constant size" } - if t := n.Type; t.Elem().Width != 0 && r.Int64Val() >= maxImplicitStackVarSize/t.Elem().Width { + if t := n.Type(); t.Elem().Width != 0 && r.Int64Val() >= maxImplicitStackVarSize/t.Elem().Width { return "too large for stack" } } @@ -1782,7 +1782,7 @@ func heapAllocReason(n *ir.Node) string { // Storage is allocated as necessary to allow the address // to be taken. func addrescapes(n *ir.Node) { - switch n.Op { + switch n.Op() { default: // Unexpected Op, probably due to a previous type error. Ignore. @@ -1796,13 +1796,13 @@ func addrescapes(n *ir.Node) { // if this is a tmpname (PAUTO), it was tagged by tmpname as not escaping. // on PPARAM it means something different. - if n.Class() == ir.PAUTO && n.Esc == EscNever { + if n.Class() == ir.PAUTO && n.Esc() == EscNever { break } // If a closure reference escapes, mark the outer variable as escaping. - if n.Name.IsClosureVar() { - addrescapes(n.Name.Defn) + if n.Name().IsClosureVar() { + addrescapes(n.Name().Defn) break } @@ -1823,13 +1823,13 @@ func addrescapes(n *ir.Node) { // then we're analyzing the inner closure but we need to move x to the // heap in f, not in the inner closure. Flip over to f before calling moveToHeap. oldfn := Curfn - Curfn = n.Name.Curfn - if Curfn.Op == ir.OCLOSURE { - Curfn = Curfn.Func.Decl + Curfn = n.Name().Curfn + if Curfn.Op() == ir.OCLOSURE { + Curfn = Curfn.Func().Decl panic("can't happen") } ln := base.Pos - base.Pos = Curfn.Pos + base.Pos = Curfn.Pos() moveToHeap(n) Curfn = oldfn base.Pos = ln @@ -1840,8 +1840,8 @@ func addrescapes(n *ir.Node) { // escape--the pointer inside x does, but that // is always a heap pointer anyway. case ir.ODOT, ir.OINDEX, ir.OPAREN, ir.OCONVNOP: - if !n.Left.Type.IsSlice() { - addrescapes(n.Left) + if !n.Left().Type().IsSlice() { + addrescapes(n.Left()) } } } @@ -1861,21 +1861,21 @@ func moveToHeap(n *ir.Node) { // Allocate a local stack variable to hold the pointer to the heap copy. // temp will add it to the function declaration list automatically. - heapaddr := temp(types.NewPtr(n.Type)) - heapaddr.Sym = lookup("&" + n.Sym.Name) - heapaddr.Orig.Sym = heapaddr.Sym - heapaddr.Pos = n.Pos + heapaddr := temp(types.NewPtr(n.Type())) + heapaddr.SetSym(lookup("&" + n.Sym().Name)) + heapaddr.Orig().SetSym(heapaddr.Sym()) + heapaddr.SetPos(n.Pos()) // Unset AutoTemp to persist the &foo variable name through SSA to // liveness analysis. // TODO(mdempsky/drchase): Cleaner solution? - heapaddr.Name.SetAutoTemp(false) + heapaddr.Name().SetAutoTemp(false) // Parameters have a local stack copy used at function start/end // in addition to the copy in the heap that may live longer than // the function. if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT { - if n.Xoffset == types.BADWIDTH { + if n.Offset() == types.BADWIDTH { base.Fatalf("addrescapes before param assignment") } @@ -1883,28 +1883,28 @@ func moveToHeap(n *ir.Node) { // Preserve a copy so we can still write code referring to the original, // and substitute that copy into the function declaration list // so that analyses of the local (on-stack) variables use it. - stackcopy := NewName(n.Sym) - stackcopy.Type = n.Type - stackcopy.Xoffset = n.Xoffset + stackcopy := NewName(n.Sym()) + stackcopy.SetType(n.Type()) + stackcopy.SetOffset(n.Offset()) stackcopy.SetClass(n.Class()) - stackcopy.Name.Param.Heapaddr = heapaddr + stackcopy.Name().Param.Heapaddr = heapaddr if n.Class() == ir.PPARAMOUT { // Make sure the pointer to the heap copy is kept live throughout the function. // The function could panic at any point, and then a defer could recover. // Thus, we need the pointer to the heap copy always available so the // post-deferreturn code can copy the return value back to the stack. // See issue 16095. - heapaddr.Name.SetIsOutputParamHeapAddr(true) + heapaddr.Name().SetIsOutputParamHeapAddr(true) } - n.Name.Param.Stackcopy = stackcopy + n.Name().Param.Stackcopy = stackcopy // Substitute the stackcopy into the function variable list so that // liveness and other analyses use the underlying stack slot // and not the now-pseudo-variable n. found := false - for i, d := range Curfn.Func.Dcl { + for i, d := range Curfn.Func().Dcl { if d == n { - Curfn.Func.Dcl[i] = stackcopy + Curfn.Func().Dcl[i] = stackcopy found = true break } @@ -1917,16 +1917,16 @@ func moveToHeap(n *ir.Node) { if !found { base.Fatalf("cannot find %v in local variable list", n) } - Curfn.Func.Dcl = append(Curfn.Func.Dcl, n) + Curfn.Func().Dcl = append(Curfn.Func().Dcl, n) } // Modify n in place so that uses of n now mean indirection of the heapaddr. n.SetClass(ir.PAUTOHEAP) - n.Xoffset = 0 - n.Name.Param.Heapaddr = heapaddr - n.Esc = EscHeap + n.SetOffset(0) + n.Name().Param.Heapaddr = heapaddr + n.SetEsc(EscHeap) if base.Flag.LowerM != 0 { - base.WarnfAt(n.Pos, "moved to heap: %v", n) + base.WarnfAt(n.Pos(), "moved to heap: %v", n) } } @@ -1947,7 +1947,7 @@ func (e *Escape) paramTag(fn *ir.Node, narg int, f *types.Field) string { return fmt.Sprintf("arg#%d", narg) } - if fn.Nbody.Len() == 0 { + if fn.Body().Len() == 0 { // Assume that uintptr arguments must be held live across the call. // This is most important for syscall.Syscall. // See golang.org/issue/13372. @@ -1969,7 +1969,7 @@ func (e *Escape) paramTag(fn *ir.Node, narg int, f *types.Field) string { // External functions are assumed unsafe, unless // //go:noescape is given before the declaration. - if fn.Func.Pragma&ir.Noescape != 0 { + if fn.Func().Pragma&ir.Noescape != 0 { if base.Flag.LowerM != 0 && f.Sym != nil { base.WarnfAt(f.Pos, "%v does not escape", name()) } @@ -1983,7 +1983,7 @@ func (e *Escape) paramTag(fn *ir.Node, narg int, f *types.Field) string { return esc.Encode() } - if fn.Func.Pragma&ir.UintptrEscapes != 0 { + if fn.Func().Pragma&ir.UintptrEscapes != 0 { if f.Type.IsUintptr() { if base.Flag.LowerM != 0 { base.WarnfAt(f.Pos, "marking %v as escaping uintptr", name()) @@ -2028,7 +2028,7 @@ func (e *Escape) paramTag(fn *ir.Node, narg int, f *types.Field) string { } for i := 0; i < numEscResults; i++ { if x := esc.Result(i); x >= 0 { - res := fn.Type.Results().Field(i).Sym + res := fn.Type().Results().Field(i).Sym base.WarnfAt(f.Pos, "leaking param: %v to result %v level=%d", name(), res, x) } } diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index 36bbb75050749..1f0288a5911a3 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -25,13 +25,13 @@ var asmlist []*ir.Node // exportsym marks n for export (or reexport). func exportsym(n *ir.Node) { - if n.Sym.OnExportList() { + if n.Sym().OnExportList() { return } - n.Sym.SetOnExportList(true) + n.Sym().SetOnExportList(true) if base.Flag.E != 0 { - fmt.Printf("export symbol %v\n", n.Sym) + fmt.Printf("export symbol %v\n", n.Sym()) } exportlist = append(exportlist, n) @@ -42,21 +42,21 @@ func initname(s string) bool { } func autoexport(n *ir.Node, ctxt ir.Class) { - if n.Sym.Pkg != ir.LocalPkg { + if n.Sym().Pkg != ir.LocalPkg { return } if (ctxt != ir.PEXTERN && ctxt != ir.PFUNC) || dclcontext != ir.PEXTERN { return } - if n.Type != nil && n.Type.IsKind(types.TFUNC) && ir.IsMethod(n) { + if n.Type() != nil && n.Type().IsKind(types.TFUNC) && ir.IsMethod(n) { return } - if types.IsExported(n.Sym.Name) || initname(n.Sym.Name) { + if types.IsExported(n.Sym().Name) || initname(n.Sym().Name) { exportsym(n) } - if base.Flag.AsmHdr != "" && !n.Sym.Asm() { - n.Sym.SetAsm(true) + if base.Flag.AsmHdr != "" && !n.Sym().Asm() { + n.Sym().SetAsm(true) asmlist = append(asmlist, n) } } @@ -89,7 +89,7 @@ func importsym(ipkg *types.Pkg, s *types.Sym, op ir.Op) *ir.Node { s.SetPkgDef(ir.AsTypesNode(n)) s.Importdef = ipkg } - if n.Op != ir.ONONAME && n.Op != op { + if n.Op() != ir.ONONAME && n.Op() != op { redeclare(base.Pos, s, fmt.Sprintf("during import %q", ipkg.Path)) } return n @@ -100,18 +100,18 @@ func importsym(ipkg *types.Pkg, s *types.Sym, op ir.Op) *ir.Node { // ipkg is the package being imported func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *types.Type { n := importsym(ipkg, s, ir.OTYPE) - if n.Op != ir.OTYPE { + if n.Op() != ir.OTYPE { t := types.New(types.TFORW) t.Sym = s t.Nod = ir.AsTypesNode(n) - n.Op = ir.OTYPE - n.Pos = pos - n.Type = t + n.SetOp(ir.OTYPE) + n.SetPos(pos) + n.SetType(t) n.SetClass(ir.PEXTERN) } - t := n.Type + t := n.Type() if t == nil { base.Fatalf("importtype %v", s) } @@ -122,20 +122,20 @@ func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *types.Type { // ipkg is the package being imported func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class, t *types.Type) *ir.Node { n := importsym(ipkg, s, op) - if n.Op != ir.ONONAME { - if n.Op == op && (n.Class() != ctxt || !types.Identical(n.Type, t)) { + if n.Op() != ir.ONONAME { + if n.Op() == op && (n.Class() != ctxt || !types.Identical(n.Type(), t)) { redeclare(base.Pos, s, fmt.Sprintf("during import %q", ipkg.Path)) } return nil } - n.Op = op - n.Pos = pos + n.SetOp(op) + n.SetPos(pos) n.SetClass(ctxt) if ctxt == ir.PFUNC { - n.Sym.SetFunc(true) + n.Sym().SetFunc(true) } - n.Type = t + n.SetType(t) return n } @@ -162,7 +162,7 @@ func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) { return } - n.Func = new(ir.Func) + n.SetFunc(new(ir.Func)) if base.Flag.E != 0 { fmt.Printf("import func %v%S\n", s, t) @@ -202,26 +202,26 @@ func dumpasmhdr() { } fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", ir.LocalPkg.Name) for _, n := range asmlist { - if n.Sym.IsBlank() { + if n.Sym().IsBlank() { continue } - switch n.Op { + switch n.Op() { case ir.OLITERAL: t := n.Val().Kind() if t == constant.Float || t == constant.Complex { break } - fmt.Fprintf(b, "#define const_%s %#v\n", n.Sym.Name, n.Val()) + fmt.Fprintf(b, "#define const_%s %#v\n", n.Sym().Name, n.Val()) case ir.OTYPE: - t := n.Type + t := n.Type() if !t.IsStruct() || t.StructType().Map != nil || t.IsFuncArgStruct() { break } - fmt.Fprintf(b, "#define %s__size %d\n", n.Sym.Name, int(t.Width)) + fmt.Fprintf(b, "#define %s__size %d\n", n.Sym().Name, int(t.Width)) for _, f := range t.Fields().Slice() { if !f.Sym.IsBlank() { - fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym.Name, f.Sym.Name, int(f.Offset)) + fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym().Name, f.Sym.Name, int(f.Offset)) } } } diff --git a/src/cmd/compile/internal/gc/gen.go b/src/cmd/compile/internal/gc/gen.go index 0f5294b17d185..d7320f3cccbbb 100644 --- a/src/cmd/compile/internal/gc/gen.go +++ b/src/cmd/compile/internal/gc/gen.go @@ -31,13 +31,13 @@ func sysvar(name string) *obj.LSym { // isParamStackCopy reports whether this is the on-stack copy of a // function parameter that moved to the heap. func isParamStackCopy(n *ir.Node) bool { - return n.Op == ir.ONAME && (n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Name.Param.Heapaddr != nil + return n.Op() == ir.ONAME && (n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Name().Param.Heapaddr != nil } // isParamHeapCopy reports whether this is the on-heap copy of // a function parameter that moved to the heap. func isParamHeapCopy(n *ir.Node) bool { - return n.Op == ir.ONAME && n.Class() == ir.PAUTOHEAP && n.Name.Param.Stackcopy != nil + return n.Op() == ir.ONAME && n.Class() == ir.PAUTOHEAP && n.Name().Param.Stackcopy != nil } // autotmpname returns the name for an autotmp variable numbered n. @@ -56,7 +56,7 @@ func tempAt(pos src.XPos, curfn *ir.Node, t *types.Type) *ir.Node { if curfn == nil { base.Fatalf("no curfn for tempAt") } - if curfn.Op == ir.OCLOSURE { + if curfn.Op() == ir.OCLOSURE { ir.Dump("tempAt", curfn) base.Fatalf("adding tempAt to wrong closure function") } @@ -65,22 +65,22 @@ func tempAt(pos src.XPos, curfn *ir.Node, t *types.Type) *ir.Node { } s := &types.Sym{ - Name: autotmpname(len(curfn.Func.Dcl)), + Name: autotmpname(len(curfn.Func().Dcl)), Pkg: ir.LocalPkg, } n := ir.NewNameAt(pos, s) s.Def = ir.AsTypesNode(n) - n.Type = t + n.SetType(t) n.SetClass(ir.PAUTO) - n.Esc = EscNever - n.Name.Curfn = curfn - n.Name.SetUsed(true) - n.Name.SetAutoTemp(true) - curfn.Func.Dcl = append(curfn.Func.Dcl, n) + n.SetEsc(EscNever) + n.Name().Curfn = curfn + n.Name().SetUsed(true) + n.Name().SetAutoTemp(true) + curfn.Func().Dcl = append(curfn.Func().Dcl, n) dowidth(t) - return n.Orig + return n.Orig() } func temp(t *types.Type) *ir.Node { diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go index cf1c85ce29265..3416a00cd17cf 100644 --- a/src/cmd/compile/internal/gc/gsubr.go +++ b/src/cmd/compile/internal/gc/gsubr.go @@ -69,7 +69,7 @@ func newProgs(fn *ir.Node, worker int) *Progs { pp.next = pp.NewProg() pp.clearp(pp.next) - pp.pos = fn.Pos + pp.pos = fn.Pos() pp.settext(fn) // PCDATA tables implicitly start with index -1. pp.prevLive = LivenessIndex{-1, false} @@ -181,10 +181,10 @@ func (pp *Progs) settext(fn *ir.Node) { ptxt := pp.Prog(obj.ATEXT) pp.Text = ptxt - fn.Func.LSym.Func().Text = ptxt + fn.Func().LSym.Func().Text = ptxt ptxt.From.Type = obj.TYPE_MEM ptxt.From.Name = obj.NAME_EXTERN - ptxt.From.Sym = fn.Func.LSym + ptxt.From.Sym = fn.Func().LSym } // initLSym defines f's obj.LSym and initializes it based on the @@ -199,7 +199,7 @@ func initLSym(f *ir.Func, hasBody bool) { } if nam := f.Nname; !ir.IsBlank(nam) { - f.LSym = nam.Sym.Linksym() + f.LSym = nam.Sym().Linksym() if f.Pragma&ir.Systemstack != 0 { f.LSym.Set(obj.AttrCFunc, true) } @@ -221,7 +221,7 @@ func initLSym(f *ir.Func, hasBody bool) { } } - isLinknameExported := nam.Sym.Linkname != "" && (hasBody || hasDefABI) + isLinknameExported := nam.Sym().Linkname != "" && (hasBody || hasDefABI) if abi, ok := symabiRefs[f.LSym.Name]; (ok && abi == obj.ABI0) || isLinknameExported { // Either 1) this symbol is definitely // referenced as ABI0 from this package; or 2) @@ -281,7 +281,7 @@ func initLSym(f *ir.Func, hasBody bool) { // See test/recover.go for test cases and src/reflect/value.go // for the actual functions being considered. if base.Ctxt.Pkgpath == "reflect" { - switch f.Nname.Sym.Name { + switch f.Nname.Sym().Name { case "callReflect", "callMethod": flag |= obj.WRAPPER } @@ -291,20 +291,20 @@ func initLSym(f *ir.Func, hasBody bool) { } func ggloblnod(nam *ir.Node) { - s := nam.Sym.Linksym() + s := nam.Sym().Linksym() s.Gotype = ngotype(nam).Linksym() flags := 0 - if nam.Name.Readonly() { + if nam.Name().Readonly() { flags = obj.RODATA } - if nam.Type != nil && !nam.Type.HasPointers() { + if nam.Type() != nil && !nam.Type().HasPointers() { flags |= obj.NOPTR } - base.Ctxt.Globl(s, nam.Type.Width, flags) - if nam.Name.LibfuzzerExtraCounter() { + base.Ctxt.Globl(s, nam.Type().Width, flags) + if nam.Name().LibfuzzerExtraCounter() { s.Type = objabi.SLIBFUZZER_EXTRA_COUNTER } - if nam.Sym.Linkname != "" { + if nam.Sym().Linkname != "" { // Make sure linkname'd symbol is non-package. When a symbol is // both imported and linkname'd, s.Pkg may not set to "_" in // types.Sym.Linksym because LSym already exists. Set it here. diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index 212db2184ed0c..281e2de43dd0d 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -329,7 +329,7 @@ func (w *exportWriter) writeIndex(index map[*ir.Node]uint64, mainIndex bool) { } for n := range index { - pkgObjs[n.Sym.Pkg] = append(pkgObjs[n.Sym.Pkg], n) + pkgObjs[n.Sym().Pkg] = append(pkgObjs[n.Sym().Pkg], n) } var pkgs []*types.Pkg @@ -337,7 +337,7 @@ func (w *exportWriter) writeIndex(index map[*ir.Node]uint64, mainIndex bool) { pkgs = append(pkgs, pkg) sort.Slice(objs, func(i, j int) bool { - return objs[i].Sym.Name < objs[j].Sym.Name + return objs[i].Sym().Name < objs[j].Sym().Name }) } @@ -356,7 +356,7 @@ func (w *exportWriter) writeIndex(index map[*ir.Node]uint64, mainIndex bool) { objs := pkgObjs[pkg] w.uint64(uint64(len(objs))) for _, n := range objs { - w.string(n.Sym.Name) + w.string(n.Sym().Name) w.uint64(index[n]) } } @@ -395,12 +395,12 @@ func (p *iexporter) stringOff(s string) uint64 { // pushDecl adds n to the declaration work queue, if not already present. func (p *iexporter) pushDecl(n *ir.Node) { - if n.Sym == nil || ir.AsNode(n.Sym.Def) != n && n.Op != ir.OTYPE { - base.Fatalf("weird Sym: %v, %v", n, n.Sym) + if n.Sym() == nil || ir.AsNode(n.Sym().Def) != n && n.Op() != ir.OTYPE { + base.Fatalf("weird Sym: %v, %v", n, n.Sym()) } // Don't export predeclared declarations. - if n.Sym.Pkg == ir.BuiltinPkg || n.Sym.Pkg == unsafepkg { + if n.Sym().Pkg == ir.BuiltinPkg || n.Sym().Pkg == unsafepkg { return } @@ -425,16 +425,16 @@ type exportWriter struct { func (p *iexporter) doDecl(n *ir.Node) { w := p.newWriter() - w.setPkg(n.Sym.Pkg, false) + w.setPkg(n.Sym().Pkg, false) - switch n.Op { + switch n.Op() { case ir.ONAME: switch n.Class() { case ir.PEXTERN: // Variable. w.tag('V') - w.pos(n.Pos) - w.typ(n.Type) + w.pos(n.Pos()) + w.typ(n.Type()) w.varExt(n) case ir.PFUNC: @@ -444,8 +444,8 @@ func (p *iexporter) doDecl(n *ir.Node) { // Function. w.tag('F') - w.pos(n.Pos) - w.signature(n.Type) + w.pos(n.Pos()) + w.signature(n.Type()) w.funcExt(n) default: @@ -456,23 +456,23 @@ func (p *iexporter) doDecl(n *ir.Node) { // Constant. n = typecheck(n, ctxExpr) w.tag('C') - w.pos(n.Pos) - w.value(n.Type, n.Val()) + w.pos(n.Pos()) + w.value(n.Type(), n.Val()) case ir.OTYPE: - if IsAlias(n.Sym) { + if IsAlias(n.Sym()) { // Alias. w.tag('A') - w.pos(n.Pos) - w.typ(n.Type) + w.pos(n.Pos()) + w.typ(n.Type()) break } // Defined type. w.tag('T') - w.pos(n.Pos) + w.pos(n.Pos()) - underlying := n.Type.Orig + underlying := n.Type().Orig if underlying == types.Errortype.Orig { // For "type T error", use error as the // underlying type instead of error's own @@ -484,7 +484,7 @@ func (p *iexporter) doDecl(n *ir.Node) { } w.typ(underlying) - t := n.Type + t := n.Type() if t.IsInterface() { w.typeExt(t) break @@ -519,7 +519,7 @@ func (p *iexporter) doInline(f *ir.Node) { w := p.newWriter() w.setPkg(fnpkg(f), false) - w.stmtList(ir.AsNodes(f.Func.Inl.Body)) + w.stmtList(ir.AsNodes(f.Func().Inl.Body)) p.inlineIndex[f] = w.flush() } @@ -574,7 +574,7 @@ func (w *exportWriter) qualifiedIdent(n *ir.Node) { // Ensure any referenced declarations are written out too. w.p.pushDecl(n) - s := n.Sym + s := n.Sym() w.string(s.Name) w.pkg(s.Pkg) } @@ -956,36 +956,36 @@ func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) } // Compiler-specific extensions. func (w *exportWriter) varExt(n *ir.Node) { - w.linkname(n.Sym) - w.symIdx(n.Sym) + w.linkname(n.Sym()) + w.symIdx(n.Sym()) } func (w *exportWriter) funcExt(n *ir.Node) { - w.linkname(n.Sym) - w.symIdx(n.Sym) + w.linkname(n.Sym()) + w.symIdx(n.Sym()) // Escape analysis. for _, fs := range &types.RecvsParams { - for _, f := range fs(n.Type).FieldSlice() { + for _, f := range fs(n.Type()).FieldSlice() { w.string(f.Note) } } // Inline body. - if n.Func.Inl != nil { - w.uint64(1 + uint64(n.Func.Inl.Cost)) - if n.Func.ExportInline() { + if n.Func().Inl != nil { + w.uint64(1 + uint64(n.Func().Inl.Cost)) + if n.Func().ExportInline() { w.p.doInline(n) } // Endlineno for inlined function. - if n.Name.Defn != nil { - w.pos(n.Name.Defn.Func.Endlineno) + if n.Name().Defn != nil { + w.pos(n.Name().Defn.Func().Endlineno) } else { // When the exported node was defined externally, // e.g. io exports atomic.(*Value).Load or bytes exports errors.New. // Keep it as we don't distinguish this case in iimport.go. - w.pos(n.Func.Endlineno) + w.pos(n.Func().Endlineno) } } else { w.uint64(0) @@ -1038,7 +1038,7 @@ func (w *exportWriter) stmtList(list ir.Nodes) { } func (w *exportWriter) node(n *ir.Node) { - if ir.OpPrec[n.Op] < 0 { + if ir.OpPrec[n.Op()] < 0 { w.stmt(n) } else { w.expr(n) @@ -1048,19 +1048,19 @@ func (w *exportWriter) node(n *ir.Node) { // Caution: stmt will emit more than one node for statement nodes n that have a non-empty // n.Ninit and where n cannot have a natural init section (such as in "if", "for", etc.). func (w *exportWriter) stmt(n *ir.Node) { - if n.Ninit.Len() > 0 && !ir.StmtWithInit(n.Op) { + if n.Init().Len() > 0 && !ir.StmtWithInit(n.Op()) { // can't use stmtList here since we don't want the final OEND - for _, n := range n.Ninit.Slice() { + for _, n := range n.Init().Slice() { w.stmt(n) } } - switch op := n.Op; op { + switch op := n.Op(); op { case ir.ODCL: w.op(ir.ODCL) - w.pos(n.Left.Pos) - w.localName(n.Left) - w.typ(n.Left.Type) + w.pos(n.Left().Pos()) + w.localName(n.Left()) + w.typ(n.Left().Type()) // case ODCLFIELD: // unimplemented - handled by default case @@ -1069,74 +1069,74 @@ func (w *exportWriter) stmt(n *ir.Node) { // Don't export "v = " initializing statements, hope they're always // preceded by the DCL which will be re-parsed and typecheck to reproduce // the "v = " again. - if n.Right != nil { + if n.Right() != nil { w.op(ir.OAS) - w.pos(n.Pos) - w.expr(n.Left) - w.expr(n.Right) + w.pos(n.Pos()) + w.expr(n.Left()) + w.expr(n.Right()) } case ir.OASOP: w.op(ir.OASOP) - w.pos(n.Pos) + w.pos(n.Pos()) w.op(n.SubOp()) - w.expr(n.Left) + w.expr(n.Left()) if w.bool(!n.Implicit()) { - w.expr(n.Right) + w.expr(n.Right()) } case ir.OAS2: w.op(ir.OAS2) - w.pos(n.Pos) - w.exprList(n.List) - w.exprList(n.Rlist) + w.pos(n.Pos()) + w.exprList(n.List()) + w.exprList(n.Rlist()) case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV: w.op(ir.OAS2) - w.pos(n.Pos) - w.exprList(n.List) - w.exprList(ir.AsNodes([]*ir.Node{n.Right})) + w.pos(n.Pos()) + w.exprList(n.List()) + w.exprList(ir.AsNodes([]*ir.Node{n.Right()})) case ir.ORETURN: w.op(ir.ORETURN) - w.pos(n.Pos) - w.exprList(n.List) + w.pos(n.Pos()) + w.exprList(n.List()) // case ORETJMP: // unreachable - generated by compiler for trampolin routines case ir.OGO, ir.ODEFER: w.op(op) - w.pos(n.Pos) - w.expr(n.Left) + w.pos(n.Pos()) + w.expr(n.Left()) case ir.OIF: w.op(ir.OIF) - w.pos(n.Pos) - w.stmtList(n.Ninit) - w.expr(n.Left) - w.stmtList(n.Nbody) - w.stmtList(n.Rlist) + w.pos(n.Pos()) + w.stmtList(n.Init()) + w.expr(n.Left()) + w.stmtList(n.Body()) + w.stmtList(n.Rlist()) case ir.OFOR: w.op(ir.OFOR) - w.pos(n.Pos) - w.stmtList(n.Ninit) - w.exprsOrNil(n.Left, n.Right) - w.stmtList(n.Nbody) + w.pos(n.Pos()) + w.stmtList(n.Init()) + w.exprsOrNil(n.Left(), n.Right()) + w.stmtList(n.Body()) case ir.ORANGE: w.op(ir.ORANGE) - w.pos(n.Pos) - w.stmtList(n.List) - w.expr(n.Right) - w.stmtList(n.Nbody) + w.pos(n.Pos()) + w.stmtList(n.List()) + w.expr(n.Right()) + w.stmtList(n.Body()) case ir.OSELECT, ir.OSWITCH: w.op(op) - w.pos(n.Pos) - w.stmtList(n.Ninit) - w.exprsOrNil(n.Left, nil) + w.pos(n.Pos()) + w.stmtList(n.Init()) + w.exprsOrNil(n.Left(), nil) w.caseList(n) // case OCASE: @@ -1144,41 +1144,41 @@ func (w *exportWriter) stmt(n *ir.Node) { case ir.OFALL: w.op(ir.OFALL) - w.pos(n.Pos) + w.pos(n.Pos()) case ir.OBREAK, ir.OCONTINUE: w.op(op) - w.pos(n.Pos) - w.exprsOrNil(n.Left, nil) + w.pos(n.Pos()) + w.exprsOrNil(n.Left(), nil) case ir.OEMPTY: // nothing to emit case ir.OGOTO, ir.OLABEL: w.op(op) - w.pos(n.Pos) - w.string(n.Sym.Name) + w.pos(n.Pos()) + w.string(n.Sym().Name) default: - base.Fatalf("exporter: CANNOT EXPORT: %v\nPlease notify gri@\n", n.Op) + base.Fatalf("exporter: CANNOT EXPORT: %v\nPlease notify gri@\n", n.Op()) } } func (w *exportWriter) caseList(sw *ir.Node) { - namedTypeSwitch := sw.Op == ir.OSWITCH && sw.Left != nil && sw.Left.Op == ir.OTYPESW && sw.Left.Left != nil + namedTypeSwitch := sw.Op() == ir.OSWITCH && sw.Left() != nil && sw.Left().Op() == ir.OTYPESW && sw.Left().Left() != nil - cases := sw.List.Slice() + cases := sw.List().Slice() w.uint64(uint64(len(cases))) for _, cas := range cases { - if cas.Op != ir.OCASE { + if cas.Op() != ir.OCASE { base.Fatalf("expected OCASE, got %v", cas) } - w.pos(cas.Pos) - w.stmtList(cas.List) + w.pos(cas.Pos()) + w.stmtList(cas.List()) if namedTypeSwitch { - w.localName(cas.Rlist.First()) + w.localName(cas.Rlist().First()) } - w.stmtList(cas.Nbody) + w.stmtList(cas.Body()) } } @@ -1200,38 +1200,38 @@ func (w *exportWriter) expr(n *ir.Node) { // } // from exprfmt (fmt.go) - for n.Op == ir.OPAREN || n.Implicit() && (n.Op == ir.ODEREF || n.Op == ir.OADDR || n.Op == ir.ODOT || n.Op == ir.ODOTPTR) { - n = n.Left + for n.Op() == ir.OPAREN || n.Implicit() && (n.Op() == ir.ODEREF || n.Op() == ir.OADDR || n.Op() == ir.ODOT || n.Op() == ir.ODOTPTR) { + n = n.Left() } - switch op := n.Op; op { + switch op := n.Op(); op { // expressions // (somewhat closely following the structure of exprfmt in fmt.go) case ir.ONIL: - if !n.Type.HasNil() { - base.Fatalf("unexpected type for nil: %v", n.Type) + if !n.Type().HasNil() { + base.Fatalf("unexpected type for nil: %v", n.Type()) } - if n.Orig != nil && n.Orig != n { - w.expr(n.Orig) + if n.Orig() != nil && n.Orig() != n { + w.expr(n.Orig()) break } w.op(ir.OLITERAL) - w.pos(n.Pos) - w.typ(n.Type) + w.pos(n.Pos()) + w.typ(n.Type()) case ir.OLITERAL: w.op(ir.OLITERAL) - w.pos(n.Pos) - w.value(n.Type, n.Val()) + w.pos(n.Pos()) + w.value(n.Type(), n.Val()) case ir.OMETHEXPR: // Special case: explicit name of func (*T) method(...) is turned into pkg.(*T).method, // but for export, this should be rendered as (*pkg.T).meth. // These nodes have the special property that they are names with a left OTYPE and a right ONAME. w.op(ir.OXDOT) - w.pos(n.Pos) - w.expr(n.Left) // n.Left.Op == OTYPE - w.selector(n.Right.Sym) + w.pos(n.Pos()) + w.expr(n.Left()) // n.Left.Op == OTYPE + w.selector(n.Right().Sym()) case ir.ONAME: // Package scope name. @@ -1250,20 +1250,20 @@ func (w *exportWriter) expr(n *ir.Node) { case ir.OTYPE: w.op(ir.OTYPE) - w.typ(n.Type) + w.typ(n.Type()) case ir.OTYPESW: w.op(ir.OTYPESW) - w.pos(n.Pos) + w.pos(n.Pos()) var s *types.Sym - if n.Left != nil { - if n.Left.Op != ir.ONONAME { - base.Fatalf("expected ONONAME, got %v", n.Left) + if n.Left() != nil { + if n.Left().Op() != ir.ONONAME { + base.Fatalf("expected ONONAME, got %v", n.Left()) } - s = n.Left.Sym + s = n.Left().Sym() } w.localIdent(s, 0) // declared pseudo-variable, if any - w.exprsOrNil(n.Right, nil) + w.exprsOrNil(n.Right(), nil) // case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC: // should have been resolved by typechecking - handled by default case @@ -1276,25 +1276,25 @@ func (w *exportWriter) expr(n *ir.Node) { case ir.OPTRLIT: w.op(ir.OADDR) - w.pos(n.Pos) - w.expr(n.Left) + w.pos(n.Pos()) + w.expr(n.Left()) case ir.OSTRUCTLIT: w.op(ir.OSTRUCTLIT) - w.pos(n.Pos) - w.typ(n.Type) - w.elemList(n.List) // special handling of field names + w.pos(n.Pos()) + w.typ(n.Type()) + w.elemList(n.List()) // special handling of field names case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT: w.op(ir.OCOMPLIT) - w.pos(n.Pos) - w.typ(n.Type) - w.exprList(n.List) + w.pos(n.Pos()) + w.typ(n.Type()) + w.exprList(n.List()) case ir.OKEY: w.op(ir.OKEY) - w.pos(n.Pos) - w.exprsOrNil(n.Left, n.Right) + w.pos(n.Pos()) + w.exprsOrNil(n.Left(), n.Right()) // case OSTRUCTKEY: // unreachable - handled in case OSTRUCTLIT by elemList @@ -1302,40 +1302,40 @@ func (w *exportWriter) expr(n *ir.Node) { case ir.OCALLPART: // An OCALLPART is an OXDOT before type checking. w.op(ir.OXDOT) - w.pos(n.Pos) - w.expr(n.Left) + w.pos(n.Pos()) + w.expr(n.Left()) // Right node should be ONAME - w.selector(n.Right.Sym) + w.selector(n.Right().Sym()) case ir.OXDOT, ir.ODOT, ir.ODOTPTR, ir.ODOTINTER, ir.ODOTMETH: w.op(ir.OXDOT) - w.pos(n.Pos) - w.expr(n.Left) - w.selector(n.Sym) + w.pos(n.Pos()) + w.expr(n.Left()) + w.selector(n.Sym()) case ir.ODOTTYPE, ir.ODOTTYPE2: w.op(ir.ODOTTYPE) - w.pos(n.Pos) - w.expr(n.Left) - w.typ(n.Type) + w.pos(n.Pos()) + w.expr(n.Left()) + w.typ(n.Type()) case ir.OINDEX, ir.OINDEXMAP: w.op(ir.OINDEX) - w.pos(n.Pos) - w.expr(n.Left) - w.expr(n.Right) + w.pos(n.Pos()) + w.expr(n.Left()) + w.expr(n.Right()) case ir.OSLICE, ir.OSLICESTR, ir.OSLICEARR: w.op(ir.OSLICE) - w.pos(n.Pos) - w.expr(n.Left) + w.pos(n.Pos()) + w.expr(n.Left()) low, high, _ := n.SliceBounds() w.exprsOrNil(low, high) case ir.OSLICE3, ir.OSLICE3ARR: w.op(ir.OSLICE3) - w.pos(n.Pos) - w.expr(n.Left) + w.pos(n.Pos()) + w.expr(n.Left()) low, high, max := n.SliceBounds() w.exprsOrNil(low, high) w.expr(max) @@ -1343,25 +1343,25 @@ func (w *exportWriter) expr(n *ir.Node) { case ir.OCOPY, ir.OCOMPLEX: // treated like other builtin calls (see e.g., OREAL) w.op(op) - w.pos(n.Pos) - w.expr(n.Left) - w.expr(n.Right) + w.pos(n.Pos()) + w.expr(n.Left()) + w.expr(n.Right()) w.op(ir.OEND) case ir.OCONV, ir.OCONVIFACE, ir.OCONVNOP, ir.OBYTES2STR, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2RUNES, ir.ORUNESTR: w.op(ir.OCONV) - w.pos(n.Pos) - w.expr(n.Left) - w.typ(n.Type) + w.pos(n.Pos()) + w.expr(n.Left()) + w.typ(n.Type()) case ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCAP, ir.OCLOSE, ir.ODELETE, ir.OLEN, ir.OMAKE, ir.ONEW, ir.OPANIC, ir.ORECOVER, ir.OPRINT, ir.OPRINTN: w.op(op) - w.pos(n.Pos) - if n.Left != nil { - w.expr(n.Left) + w.pos(n.Pos()) + if n.Left() != nil { + w.expr(n.Left()) w.op(ir.OEND) } else { - w.exprList(n.List) // emits terminating OEND + w.exprList(n.List()) // emits terminating OEND } // only append() calls may contain '...' arguments if op == ir.OAPPEND { @@ -1372,49 +1372,49 @@ func (w *exportWriter) expr(n *ir.Node) { case ir.OCALL, ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OGETG: w.op(ir.OCALL) - w.pos(n.Pos) - w.stmtList(n.Ninit) - w.expr(n.Left) - w.exprList(n.List) + w.pos(n.Pos()) + w.stmtList(n.Init()) + w.expr(n.Left()) + w.exprList(n.List()) w.bool(n.IsDDD()) case ir.OMAKEMAP, ir.OMAKECHAN, ir.OMAKESLICE: w.op(op) // must keep separate from OMAKE for importer - w.pos(n.Pos) - w.typ(n.Type) + w.pos(n.Pos()) + w.typ(n.Type()) switch { default: // empty list w.op(ir.OEND) - case n.List.Len() != 0: // pre-typecheck - w.exprList(n.List) // emits terminating OEND - case n.Right != nil: - w.expr(n.Left) - w.expr(n.Right) + case n.List().Len() != 0: // pre-typecheck + w.exprList(n.List()) // emits terminating OEND + case n.Right() != nil: + w.expr(n.Left()) + w.expr(n.Right()) w.op(ir.OEND) - case n.Left != nil && (n.Op == ir.OMAKESLICE || !n.Left.Type.IsUntyped()): - w.expr(n.Left) + case n.Left() != nil && (n.Op() == ir.OMAKESLICE || !n.Left().Type().IsUntyped()): + w.expr(n.Left()) w.op(ir.OEND) } // unary expressions case ir.OPLUS, ir.ONEG, ir.OADDR, ir.OBITNOT, ir.ODEREF, ir.ONOT, ir.ORECV: w.op(op) - w.pos(n.Pos) - w.expr(n.Left) + w.pos(n.Pos()) + w.expr(n.Left()) // binary expressions case ir.OADD, ir.OAND, ir.OANDAND, ir.OANDNOT, ir.ODIV, ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT, ir.OLSH, ir.OMOD, ir.OMUL, ir.ONE, ir.OOR, ir.OOROR, ir.ORSH, ir.OSEND, ir.OSUB, ir.OXOR: w.op(op) - w.pos(n.Pos) - w.expr(n.Left) - w.expr(n.Right) + w.pos(n.Pos()) + w.expr(n.Left()) + w.expr(n.Right()) case ir.OADDSTR: w.op(ir.OADDSTR) - w.pos(n.Pos) - w.exprList(n.List) + w.pos(n.Pos()) + w.exprList(n.List()) case ir.ODCLCONST: // if exporting, DCLCONST should just be removed as its usage @@ -1422,7 +1422,7 @@ func (w *exportWriter) expr(n *ir.Node) { default: base.Fatalf("cannot export %v (%d) node\n"+ - "\t==> please file an issue and assign to gri@", n.Op, int(n.Op)) + "\t==> please file an issue and assign to gri@", n.Op(), int(n.Op())) } } @@ -1450,8 +1450,8 @@ func (w *exportWriter) exprsOrNil(a, b *ir.Node) { func (w *exportWriter) elemList(list ir.Nodes) { w.uint64(uint64(list.Len())) for _, n := range list.Slice() { - w.selector(n.Sym) - w.expr(n.Left) + w.selector(n.Sym()) + w.expr(n.Left()) } } @@ -1464,11 +1464,11 @@ func (w *exportWriter) localName(n *ir.Node) { // PPARAM/PPARAMOUT, because we only want to include vargen in // non-param names. var v int32 - if n.Class() == ir.PAUTO || (n.Class() == ir.PAUTOHEAP && n.Name.Param.Stackcopy == nil) { - v = n.Name.Vargen + if n.Class() == ir.PAUTO || (n.Class() == ir.PAUTOHEAP && n.Name().Param.Stackcopy == nil) { + v = n.Name().Vargen } - w.localIdent(n.Sym, v) + w.localIdent(n.Sym(), v) } func (w *exportWriter) localIdent(s *types.Sym, v int32) { diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 84386140bb990..71063566659b2 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -42,7 +42,7 @@ var ( ) func expandDecl(n *ir.Node) { - if n.Op != ir.ONONAME { + if n.Op() != ir.ONONAME { return } @@ -56,7 +56,7 @@ func expandDecl(n *ir.Node) { } func expandInline(fn *ir.Node) { - if fn.Func.Inl.Body != nil { + if fn.Func().Inl.Body != nil { return } @@ -69,12 +69,12 @@ func expandInline(fn *ir.Node) { } func importReaderFor(n *ir.Node, importers map[*types.Sym]iimporterAndOffset) *importReader { - x, ok := importers[n.Sym] + x, ok := importers[n.Sym()] if !ok { return nil } - return x.p.newReader(x.off, n.Sym.Pkg) + return x.p.newReader(x.off, n.Sym().Pkg) } type intReader struct { @@ -282,8 +282,8 @@ func (r *importReader) setPkg() { } func (r *importReader) doDecl(n *ir.Node) { - if n.Op != ir.ONONAME { - base.Fatalf("doDecl: unexpected Op for %v: %v", n.Sym, n.Op) + if n.Op() != ir.ONONAME { + base.Fatalf("doDecl: unexpected Op for %v: %v", n.Sym(), n.Op()) } tag := r.byte() @@ -293,24 +293,24 @@ func (r *importReader) doDecl(n *ir.Node) { case 'A': typ := r.typ() - importalias(r.p.ipkg, pos, n.Sym, typ) + importalias(r.p.ipkg, pos, n.Sym(), typ) case 'C': typ := r.typ() val := r.value(typ) - importconst(r.p.ipkg, pos, n.Sym, typ, val) + importconst(r.p.ipkg, pos, n.Sym(), typ, val) case 'F': typ := r.signature(nil) - importfunc(r.p.ipkg, pos, n.Sym, typ) + importfunc(r.p.ipkg, pos, n.Sym(), typ) r.funcExt(n) case 'T': // Types can be recursive. We need to setup a stub // declaration before recursing. - t := importtype(r.p.ipkg, pos, n.Sym) + t := importtype(r.p.ipkg, pos, n.Sym()) // We also need to defer width calculations until // after the underlying type has been assigned. @@ -332,7 +332,7 @@ func (r *importReader) doDecl(n *ir.Node) { mtyp := r.signature(recv) m := newfuncnamel(mpos, methodSym(recv.Type, msym), new(ir.Func)) - m.Type = mtyp + m.SetType(mtyp) m.SetClass(ir.PFUNC) // methodSym already marked m.Sym as a function. @@ -350,7 +350,7 @@ func (r *importReader) doDecl(n *ir.Node) { case 'V': typ := r.typ() - importvar(r.p.ipkg, pos, n.Sym, typ) + importvar(r.p.ipkg, pos, n.Sym(), typ) r.varExt(n) default: @@ -500,13 +500,13 @@ func (r *importReader) typ1() *types.Type { // types. Therefore, this must be a package-scope // type. n := ir.AsNode(r.qualifiedIdent().PkgDef()) - if n.Op == ir.ONONAME { + if n.Op() == ir.ONONAME { expandDecl(n) } - if n.Op != ir.OTYPE { - base.Fatalf("expected OTYPE, got %v: %v, %v", n.Op, n.Sym, n) + if n.Op() != ir.OTYPE { + base.Fatalf("expected OTYPE, got %v: %v, %v", n.Op(), n.Sym(), n) } - return n.Type + return n.Type() case pointerType: return types.NewPtr(r.typ()) case sliceType: @@ -636,27 +636,27 @@ func (r *importReader) byte() byte { // Compiler-specific extensions. func (r *importReader) varExt(n *ir.Node) { - r.linkname(n.Sym) - r.symIdx(n.Sym) + r.linkname(n.Sym()) + r.symIdx(n.Sym()) } func (r *importReader) funcExt(n *ir.Node) { - r.linkname(n.Sym) - r.symIdx(n.Sym) + r.linkname(n.Sym()) + r.symIdx(n.Sym()) // Escape analysis. for _, fs := range &types.RecvsParams { - for _, f := range fs(n.Type).FieldSlice() { + for _, f := range fs(n.Type()).FieldSlice() { f.Note = r.string() } } // Inline body. if u := r.uint64(); u > 0 { - n.Func.Inl = &ir.Inline{ + n.Func().Inl = &ir.Inline{ Cost: int32(u - 1), } - n.Func.Endlineno = r.pos() + n.Func().Endlineno = r.pos() } } @@ -696,7 +696,7 @@ func (r *importReader) typeExt(t *types.Type) { var typeSymIdx = make(map[*types.Type][2]int64) func (r *importReader) doInline(n *ir.Node) { - if len(n.Func.Inl.Body) != 0 { + if len(n.Func().Inl.Body) != 0 { base.Fatalf("%v already has inline body", n) } @@ -712,15 +712,15 @@ func (r *importReader) doInline(n *ir.Node) { // functions). body = []*ir.Node{} } - n.Func.Inl.Body = body + n.Func().Inl.Body = body importlist = append(importlist, n) if base.Flag.E > 0 && base.Flag.LowerM > 2 { if base.Flag.LowerM > 3 { - fmt.Printf("inl body for %v %#v: %+v\n", n, n.Type, ir.AsNodes(n.Func.Inl.Body)) + fmt.Printf("inl body for %v %#v: %+v\n", n, n.Type(), ir.AsNodes(n.Func().Inl.Body)) } else { - fmt.Printf("inl body for %v %#v: %v\n", n, n.Type, ir.AsNodes(n.Func.Inl.Body)) + fmt.Printf("inl body for %v %#v: %v\n", n, n.Type(), ir.AsNodes(n.Func().Inl.Body)) } } } @@ -748,8 +748,8 @@ func (r *importReader) stmtList() []*ir.Node { break } // OBLOCK nodes may be created when importing ODCL nodes - unpack them - if n.Op == ir.OBLOCK { - list = append(list, n.List.Slice()...) + if n.Op() == ir.OBLOCK { + list = append(list, n.List().Slice()...) } else { list = append(list, n) } @@ -759,22 +759,22 @@ func (r *importReader) stmtList() []*ir.Node { } func (r *importReader) caseList(sw *ir.Node) []*ir.Node { - namedTypeSwitch := sw.Op == ir.OSWITCH && sw.Left != nil && sw.Left.Op == ir.OTYPESW && sw.Left.Left != nil + namedTypeSwitch := sw.Op() == ir.OSWITCH && sw.Left() != nil && sw.Left().Op() == ir.OTYPESW && sw.Left().Left() != nil cases := make([]*ir.Node, r.uint64()) for i := range cases { cas := ir.NodAt(r.pos(), ir.OCASE, nil, nil) - cas.List.Set(r.stmtList()) + cas.PtrList().Set(r.stmtList()) if namedTypeSwitch { // Note: per-case variables will have distinct, dotted // names after import. That's okay: swt.go only needs // Sym for diagnostics anyway. - caseVar := ir.NewNameAt(cas.Pos, r.ident()) + caseVar := ir.NewNameAt(cas.Pos(), r.ident()) declare(caseVar, dclcontext) - cas.Rlist.Set1(caseVar) - caseVar.Name.Defn = sw.Left + cas.PtrRlist().Set1(caseVar) + caseVar.Name().Defn = sw.Left() } - cas.Nbody.Set(r.stmtList()) + cas.PtrBody().Set(r.stmtList()) cases[i] = cas } return cases @@ -794,7 +794,7 @@ func (r *importReader) exprList() []*ir.Node { func (r *importReader) expr() *ir.Node { n := r.node() - if n != nil && n.Op == ir.OBLOCK { + if n != nil && n.Op() == ir.OBLOCK { base.Fatalf("unexpected block node: %v", n) } return n @@ -821,7 +821,7 @@ func (r *importReader) node() *ir.Node { n = ir.NewLiteral(r.value(typ)) } n = npos(pos, n) - n.Type = typ + n.SetType(typ) return n case ir.ONONAME: @@ -839,10 +839,10 @@ func (r *importReader) node() *ir.Node { case ir.OTYPESW: n := ir.NodAt(r.pos(), ir.OTYPESW, nil, nil) if s := r.ident(); s != nil { - n.Left = npos(n.Pos, newnoname(s)) + n.SetLeft(npos(n.Pos(), newnoname(s))) } right, _ := r.exprsOrNil() - n.Right = right + n.SetRight(right) return n // case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC: @@ -859,7 +859,7 @@ func (r *importReader) node() *ir.Node { savedlineno := base.Pos base.Pos = r.pos() n := ir.NodAt(base.Pos, ir.OCOMPLIT, nil, typenod(r.typ())) - n.List.Set(r.elemList()) // special handling of field names + n.PtrList().Set(r.elemList()) // special handling of field names base.Pos = savedlineno return n @@ -868,7 +868,7 @@ func (r *importReader) node() *ir.Node { case ir.OCOMPLIT: n := ir.NodAt(r.pos(), ir.OCOMPLIT, nil, typenod(r.typ())) - n.List.Set(r.exprList()) + n.PtrList().Set(r.exprList()) return n case ir.OKEY: @@ -894,7 +894,7 @@ func (r *importReader) node() *ir.Node { case ir.ODOTTYPE: n := ir.NodAt(r.pos(), ir.ODOTTYPE, r.expr(), nil) - n.Type = r.typ() + n.SetType(r.typ()) return n // case OINDEX, OINDEXMAP, OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR: @@ -907,7 +907,7 @@ func (r *importReader) node() *ir.Node { n := ir.NodAt(r.pos(), op, r.expr(), nil) low, high := r.exprsOrNil() var max *ir.Node - if n.Op.IsSlice3() { + if n.Op().IsSlice3() { max = r.expr() } n.SetSliceBounds(low, high, max) @@ -918,12 +918,12 @@ func (r *importReader) node() *ir.Node { case ir.OCONV: n := ir.NodAt(r.pos(), ir.OCONV, r.expr(), nil) - n.Type = r.typ() + n.SetType(r.typ()) return n case ir.OCOPY, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCAP, ir.OCLOSE, ir.ODELETE, ir.OLEN, ir.OMAKE, ir.ONEW, ir.OPANIC, ir.ORECOVER, ir.OPRINT, ir.OPRINTN: n := npos(r.pos(), builtinCall(op)) - n.List.Set(r.exprList()) + n.PtrList().Set(r.exprList()) if op == ir.OAPPEND { n.SetIsDDD(r.bool()) } @@ -934,16 +934,16 @@ func (r *importReader) node() *ir.Node { case ir.OCALL: n := ir.NodAt(r.pos(), ir.OCALL, nil, nil) - n.Ninit.Set(r.stmtList()) - n.Left = r.expr() - n.List.Set(r.exprList()) + n.PtrInit().Set(r.stmtList()) + n.SetLeft(r.expr()) + n.PtrList().Set(r.exprList()) n.SetIsDDD(r.bool()) return n case ir.OMAKEMAP, ir.OMAKECHAN, ir.OMAKESLICE: n := npos(r.pos(), builtinCall(ir.OMAKE)) - n.List.Append(typenod(r.typ())) - n.List.Append(r.exprList()...) + n.PtrList().Append(typenod(r.typ())) + n.PtrList().Append(r.exprList()...) return n // unary expressions @@ -984,12 +984,12 @@ func (r *importReader) node() *ir.Node { case ir.OASOP: n := ir.NodAt(r.pos(), ir.OASOP, nil, nil) n.SetSubOp(r.op()) - n.Left = r.expr() + n.SetLeft(r.expr()) if !r.bool() { - n.Right = nodintconst(1) + n.SetRight(nodintconst(1)) n.SetImplicit(true) } else { - n.Right = r.expr() + n.SetRight(r.expr()) } return n @@ -998,13 +998,13 @@ func (r *importReader) node() *ir.Node { case ir.OAS2: n := ir.NodAt(r.pos(), ir.OAS2, nil, nil) - n.List.Set(r.exprList()) - n.Rlist.Set(r.exprList()) + n.PtrList().Set(r.exprList()) + n.PtrRlist().Set(r.exprList()) return n case ir.ORETURN: n := ir.NodAt(r.pos(), ir.ORETURN, nil, nil) - n.List.Set(r.exprList()) + n.PtrList().Set(r.exprList()) return n // case ORETJMP: @@ -1015,34 +1015,34 @@ func (r *importReader) node() *ir.Node { case ir.OIF: n := ir.NodAt(r.pos(), ir.OIF, nil, nil) - n.Ninit.Set(r.stmtList()) - n.Left = r.expr() - n.Nbody.Set(r.stmtList()) - n.Rlist.Set(r.stmtList()) + n.PtrInit().Set(r.stmtList()) + n.SetLeft(r.expr()) + n.PtrBody().Set(r.stmtList()) + n.PtrRlist().Set(r.stmtList()) return n case ir.OFOR: n := ir.NodAt(r.pos(), ir.OFOR, nil, nil) - n.Ninit.Set(r.stmtList()) + n.PtrInit().Set(r.stmtList()) left, right := r.exprsOrNil() - n.Left = left - n.Right = right - n.Nbody.Set(r.stmtList()) + n.SetLeft(left) + n.SetRight(right) + n.PtrBody().Set(r.stmtList()) return n case ir.ORANGE: n := ir.NodAt(r.pos(), ir.ORANGE, nil, nil) - n.List.Set(r.stmtList()) - n.Right = r.expr() - n.Nbody.Set(r.stmtList()) + n.PtrList().Set(r.stmtList()) + n.SetRight(r.expr()) + n.PtrBody().Set(r.stmtList()) return n case ir.OSELECT, ir.OSWITCH: n := ir.NodAt(r.pos(), op, nil, nil) - n.Ninit.Set(r.stmtList()) + n.PtrInit().Set(r.stmtList()) left, _ := r.exprsOrNil() - n.Left = left - n.List.Set(r.caseList(n)) + n.SetLeft(left) + n.PtrList().Set(r.caseList(n)) return n // case OCASE: @@ -1056,7 +1056,7 @@ func (r *importReader) node() *ir.Node { pos := r.pos() left, _ := r.exprsOrNil() if left != nil { - left = NewName(left.Sym) + left = NewName(left.Sym()) } return ir.NodAt(pos, op, left, nil) @@ -1065,7 +1065,7 @@ func (r *importReader) node() *ir.Node { case ir.OGOTO, ir.OLABEL: n := ir.NodAt(r.pos(), op, nil, nil) - n.Sym = lookup(r.string()) + n.SetSym(lookup(r.string())) return n case ir.OEND: diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go index f3c302f6bee26..b66ee6f9533d3 100644 --- a/src/cmd/compile/internal/gc/init.go +++ b/src/cmd/compile/internal/gc/init.go @@ -46,16 +46,16 @@ func fninit(n []*ir.Node) { // Make a function that contains all the initialization statements. if len(nf) > 0 { - base.Pos = nf[0].Pos // prolog/epilog gets line number of first init stmt + base.Pos = nf[0].Pos() // prolog/epilog gets line number of first init stmt initializers := lookup("init") fn := dclfunc(initializers, ir.Nod(ir.OTFUNC, nil, nil)) - for _, dcl := range initTodo.Func.Dcl { - dcl.Name.Curfn = fn + for _, dcl := range initTodo.Func().Dcl { + dcl.Name().Curfn = fn } - fn.Func.Dcl = append(fn.Func.Dcl, initTodo.Func.Dcl...) - initTodo.Func.Dcl = nil + fn.Func().Dcl = append(fn.Func().Dcl, initTodo.Func().Dcl...) + initTodo.Func().Dcl = nil - fn.Nbody.Set(nf) + fn.PtrBody().Set(nf) funcbody() fn = typecheck(fn, ctxStmt) @@ -65,7 +65,7 @@ func fninit(n []*ir.Node) { xtop = append(xtop, fn) fns = append(fns, initializers.Linksym()) } - if initTodo.Func.Dcl != nil { + if initTodo.Func().Dcl != nil { // We only generate temps using initTodo if there // are package-scope initialization statements, so // something's weird if we get here. @@ -76,9 +76,9 @@ func fninit(n []*ir.Node) { // Record user init functions. for i := 0; i < renameinitgen; i++ { s := lookupN("init.", i) - fn := ir.AsNode(s.Def).Name.Defn + fn := ir.AsNode(s.Def).Name().Defn // Skip init functions with empty bodies. - if fn.Nbody.Len() == 1 && fn.Nbody.First().Op == ir.OEMPTY { + if fn.Body().Len() == 1 && fn.Body().First().Op() == ir.OEMPTY { continue } fns = append(fns, s.Linksym()) @@ -91,7 +91,7 @@ func fninit(n []*ir.Node) { // Make an .inittask structure. sym := lookup(".inittask") nn := NewName(sym) - nn.Type = types.Types[types.TUINT8] // fake type + nn.SetType(types.Types[types.TUINT8]) // fake type nn.SetClass(ir.PEXTERN) sym.Def = ir.AsTypesNode(nn) exportsym(nn) diff --git a/src/cmd/compile/internal/gc/initorder.go b/src/cmd/compile/internal/gc/initorder.go index 62294b5a90e53..71da72f0cfe93 100644 --- a/src/cmd/compile/internal/gc/initorder.go +++ b/src/cmd/compile/internal/gc/initorder.go @@ -86,7 +86,7 @@ func initOrder(l []*ir.Node) []*ir.Node { // Process all package-level assignment in declaration order. for _, n := range l { - switch n.Op { + switch n.Op() { case ir.OAS, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV: o.processAssign(n) o.flushReady(s.staticInit) @@ -100,7 +100,7 @@ func initOrder(l []*ir.Node) []*ir.Node { // Check that all assignments are now Done; if not, there must // have been a dependency cycle. for _, n := range l { - switch n.Op { + switch n.Op() { case ir.OAS, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV: if n.Initorder() != InitDone { // If there have already been errors @@ -126,27 +126,27 @@ func initOrder(l []*ir.Node) []*ir.Node { } func (o *InitOrder) processAssign(n *ir.Node) { - if n.Initorder() != InitNotStarted || n.Xoffset != types.BADWIDTH { - base.Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Xoffset) + if n.Initorder() != InitNotStarted || n.Offset() != types.BADWIDTH { + base.Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Offset()) } n.SetInitorder(InitPending) - n.Xoffset = 0 + n.SetOffset(0) // Compute number of variable dependencies and build the // inverse dependency ("blocking") graph. for dep := range collectDeps(n, true) { - defn := dep.Name.Defn + defn := dep.Name().Defn // Skip dependencies on functions (PFUNC) and // variables already initialized (InitDone). if dep.Class() != ir.PEXTERN || defn.Initorder() == InitDone { continue } - n.Xoffset = n.Xoffset + 1 + n.SetOffset(n.Offset() + 1) o.blocking[defn] = append(o.blocking[defn], n) } - if n.Xoffset == 0 { + if n.Offset() == 0 { heap.Push(&o.ready, n) } } @@ -157,20 +157,20 @@ func (o *InitOrder) processAssign(n *ir.Node) { func (o *InitOrder) flushReady(initialize func(*ir.Node)) { for o.ready.Len() != 0 { n := heap.Pop(&o.ready).(*ir.Node) - if n.Initorder() != InitPending || n.Xoffset != 0 { - base.Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Xoffset) + if n.Initorder() != InitPending || n.Offset() != 0 { + base.Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Offset()) } initialize(n) n.SetInitorder(InitDone) - n.Xoffset = types.BADWIDTH + n.SetOffset(types.BADWIDTH) blocked := o.blocking[n] delete(o.blocking, n) for _, m := range blocked { - m.Xoffset = m.Xoffset - 1 - if m.Xoffset == 0 { + m.SetOffset(m.Offset() - 1) + if m.Offset() == 0 { heap.Push(&o.ready, m) } } @@ -196,14 +196,14 @@ func findInitLoopAndExit(n *ir.Node, path *[]*ir.Node) { // There might be multiple loops involving n; by sorting // references, we deterministically pick the one reported. - refers := collectDeps(n.Name.Defn, false).Sorted(func(ni, nj *ir.Node) bool { - return ni.Pos.Before(nj.Pos) + refers := collectDeps(n.Name().Defn, false).Sorted(func(ni, nj *ir.Node) bool { + return ni.Pos().Before(nj.Pos()) }) *path = append(*path, n) for _, ref := range refers { // Short-circuit variables that were initialized. - if ref.Class() == ir.PEXTERN && ref.Name.Defn.Initorder() == InitDone { + if ref.Class() == ir.PEXTERN && ref.Name().Defn.Initorder() == InitDone { continue } @@ -220,7 +220,7 @@ func reportInitLoopAndExit(l []*ir.Node) { // the start. i := -1 for j, n := range l { - if n.Class() == ir.PEXTERN && (i == -1 || n.Pos.Before(l[i].Pos)) { + if n.Class() == ir.PEXTERN && (i == -1 || n.Pos().Before(l[i].Pos())) { i = j } } @@ -242,7 +242,7 @@ func reportInitLoopAndExit(l []*ir.Node) { } fmt.Fprintf(&msg, "\t%v: %v", ir.Line(l[0]), l[0]) - base.ErrorfAt(l[0].Pos, msg.String()) + base.ErrorfAt(l[0].Pos(), msg.String()) base.ErrorExit() } @@ -252,15 +252,15 @@ func reportInitLoopAndExit(l []*ir.Node) { // upon functions (but not variables). func collectDeps(n *ir.Node, transitive bool) ir.NodeSet { d := initDeps{transitive: transitive} - switch n.Op { + switch n.Op() { case ir.OAS: - d.inspect(n.Right) + d.inspect(n.Right()) case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV: - d.inspect(n.Right) + d.inspect(n.Right()) case ir.ODCLFUNC: - d.inspectList(n.Nbody) + d.inspectList(n.Body()) default: - base.Fatalf("unexpected Op: %v", n.Op) + base.Fatalf("unexpected Op: %v", n.Op()) } return d.seen } @@ -276,7 +276,7 @@ func (d *initDeps) inspectList(l ir.Nodes) { ir.InspectList(l, d.visit) } // visit calls foundDep on any package-level functions or variables // referenced by n, if any. func (d *initDeps) visit(n *ir.Node) bool { - switch n.Op { + switch n.Op() { case ir.OMETHEXPR: d.foundDep(methodExprName(n)) return false @@ -288,7 +288,7 @@ func (d *initDeps) visit(n *ir.Node) bool { } case ir.OCLOSURE: - d.inspectList(n.Func.Decl.Nbody) + d.inspectList(n.Func().Decl.Body()) case ir.ODOTMETH, ir.OCALLPART: d.foundDep(methodExprName(n)) @@ -308,7 +308,7 @@ func (d *initDeps) foundDep(n *ir.Node) { // Names without definitions aren't interesting as far as // initialization ordering goes. - if n.Name.Defn == nil { + if n.Name().Defn == nil { return } @@ -317,7 +317,7 @@ func (d *initDeps) foundDep(n *ir.Node) { } d.seen.Add(n) if d.transitive && n.Class() == ir.PFUNC { - d.inspectList(n.Name.Defn.Nbody) + d.inspectList(n.Name().Defn.Body()) } } @@ -330,9 +330,11 @@ func (d *initDeps) foundDep(n *ir.Node) { // but both OAS nodes use the "=" token's position as their Pos. type declOrder []*ir.Node -func (s declOrder) Len() int { return len(s) } -func (s declOrder) Less(i, j int) bool { return firstLHS(s[i]).Pos.Before(firstLHS(s[j]).Pos) } -func (s declOrder) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s declOrder) Len() int { return len(s) } +func (s declOrder) Less(i, j int) bool { + return firstLHS(s[i]).Pos().Before(firstLHS(s[j]).Pos()) +} +func (s declOrder) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s *declOrder) Push(x interface{}) { *s = append(*s, x.(*ir.Node)) } func (s *declOrder) Pop() interface{} { @@ -344,13 +346,13 @@ func (s *declOrder) Pop() interface{} { // firstLHS returns the first expression on the left-hand side of // assignment n. func firstLHS(n *ir.Node) *ir.Node { - switch n.Op { + switch n.Op() { case ir.OAS: - return n.Left + return n.Left() case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2RECV, ir.OAS2MAPR: - return n.List.First() + return n.List().First() } - base.Fatalf("unexpected Op: %v", n.Op) + base.Fatalf("unexpected Op: %v", n.Op()) return nil } diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index f982b43fb92cb..f82c1282657a4 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -56,19 +56,19 @@ const ( func fnpkg(fn *ir.Node) *types.Pkg { if ir.IsMethod(fn) { // method - rcvr := fn.Type.Recv().Type + rcvr := fn.Type().Recv().Type if rcvr.IsPtr() { rcvr = rcvr.Elem() } if rcvr.Sym == nil { - base.Fatalf("receiver with no sym: [%v] %L (%v)", fn.Sym, fn, rcvr) + base.Fatalf("receiver with no sym: [%v] %L (%v)", fn.Sym(), fn, rcvr) } return rcvr.Sym.Pkg } // non-method - return fn.Sym.Pkg + return fn.Sym().Pkg } // Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck @@ -89,12 +89,12 @@ func typecheckinl(fn *ir.Node) { } if base.Flag.LowerM > 2 || base.Debug.Export != 0 { - fmt.Printf("typecheck import [%v] %L { %#v }\n", fn.Sym, fn, ir.AsNodes(fn.Func.Inl.Body)) + fmt.Printf("typecheck import [%v] %L { %#v }\n", fn.Sym(), fn, ir.AsNodes(fn.Func().Inl.Body)) } savefn := Curfn Curfn = fn - typecheckslice(fn.Func.Inl.Body, ctxStmt) + typecheckslice(fn.Func().Inl.Body, ctxStmt) Curfn = savefn // During expandInline (which imports fn.Func.Inl.Body), @@ -102,8 +102,8 @@ func typecheckinl(fn *ir.Node) { // to fn.Func.Inl.Dcl for consistency with how local functions // behave. (Append because typecheckinl may be called multiple // times.) - fn.Func.Inl.Dcl = append(fn.Func.Inl.Dcl, fn.Func.Dcl...) - fn.Func.Dcl = nil + fn.Func().Inl.Dcl = append(fn.Func().Inl.Dcl, fn.Func().Dcl...) + fn.Func().Dcl = nil base.Pos = lno } @@ -112,10 +112,10 @@ func typecheckinl(fn *ir.Node) { // If so, caninl saves fn->nbody in fn->inl and substitutes it with a copy. // fn and ->nbody will already have been typechecked. func caninl(fn *ir.Node) { - if fn.Op != ir.ODCLFUNC { + if fn.Op() != ir.ODCLFUNC { base.Fatalf("caninl %v", fn) } - if fn.Func.Nname == nil { + if fn.Func().Nname == nil { base.Fatalf("caninl no nname %+v", fn) } @@ -124,43 +124,43 @@ func caninl(fn *ir.Node) { defer func() { if reason != "" { if base.Flag.LowerM > 1 { - fmt.Printf("%v: cannot inline %v: %s\n", ir.Line(fn), fn.Func.Nname, reason) + fmt.Printf("%v: cannot inline %v: %s\n", ir.Line(fn), fn.Func().Nname, reason) } if logopt.Enabled() { - logopt.LogOpt(fn.Pos, "cannotInlineFunction", "inline", ir.FuncName(fn), reason) + logopt.LogOpt(fn.Pos(), "cannotInlineFunction", "inline", ir.FuncName(fn), reason) } } }() } // If marked "go:noinline", don't inline - if fn.Func.Pragma&ir.Noinline != 0 { + if fn.Func().Pragma&ir.Noinline != 0 { reason = "marked go:noinline" return } // If marked "go:norace" and -race compilation, don't inline. - if base.Flag.Race && fn.Func.Pragma&ir.Norace != 0 { + if base.Flag.Race && fn.Func().Pragma&ir.Norace != 0 { reason = "marked go:norace with -race compilation" return } // If marked "go:nocheckptr" and -d checkptr compilation, don't inline. - if base.Debug.Checkptr != 0 && fn.Func.Pragma&ir.NoCheckPtr != 0 { + if base.Debug.Checkptr != 0 && fn.Func().Pragma&ir.NoCheckPtr != 0 { reason = "marked go:nocheckptr" return } // If marked "go:cgo_unsafe_args", don't inline, since the // function makes assumptions about its argument frame layout. - if fn.Func.Pragma&ir.CgoUnsafeArgs != 0 { + if fn.Func().Pragma&ir.CgoUnsafeArgs != 0 { reason = "marked go:cgo_unsafe_args" return } // If marked as "go:uintptrescapes", don't inline, since the // escape information is lost during inlining. - if fn.Func.Pragma&ir.UintptrEscapes != 0 { + if fn.Func().Pragma&ir.UintptrEscapes != 0 { reason = "marked as having an escaping uintptr argument" return } @@ -169,13 +169,13 @@ func caninl(fn *ir.Node) { // granularity, so inlining yeswritebarrierrec functions can // confuse it (#22342). As a workaround, disallow inlining // them for now. - if fn.Func.Pragma&ir.Yeswritebarrierrec != 0 { + if fn.Func().Pragma&ir.Yeswritebarrierrec != 0 { reason = "marked go:yeswritebarrierrec" return } // If fn has no body (is defined outside of Go), cannot inline it. - if fn.Nbody.Len() == 0 { + if fn.Body().Len() == 0 { reason = "no function body" return } @@ -184,11 +184,11 @@ func caninl(fn *ir.Node) { base.Fatalf("caninl on non-typechecked function %v", fn) } - n := fn.Func.Nname - if n.Func.InlinabilityChecked() { + n := fn.Func().Nname + if n.Func().InlinabilityChecked() { return } - defer n.Func.SetInlinabilityChecked(true) + defer n.Func().SetInlinabilityChecked(true) cc := int32(inlineExtraCallCost) if base.Flag.LowerL == 4 { @@ -209,7 +209,7 @@ func caninl(fn *ir.Node) { extraCallCost: cc, usedLocals: make(map[*ir.Node]bool), } - if visitor.visitList(fn.Nbody) { + if visitor.visitList(fn.Body()) { reason = visitor.reason return } @@ -218,19 +218,19 @@ func caninl(fn *ir.Node) { return } - n.Func.Inl = &ir.Inline{ + n.Func().Inl = &ir.Inline{ Cost: inlineMaxBudget - visitor.budget, - Dcl: inlcopylist(pruneUnusedAutos(n.Name.Defn.Func.Dcl, &visitor)), - Body: inlcopylist(fn.Nbody.Slice()), + Dcl: inlcopylist(pruneUnusedAutos(n.Name().Defn.Func().Dcl, &visitor)), + Body: inlcopylist(fn.Body().Slice()), } if base.Flag.LowerM > 1 { - fmt.Printf("%v: can inline %#v with cost %d as: %#v { %#v }\n", ir.Line(fn), n, inlineMaxBudget-visitor.budget, fn.Type, ir.AsNodes(n.Func.Inl.Body)) + fmt.Printf("%v: can inline %#v with cost %d as: %#v { %#v }\n", ir.Line(fn), n, inlineMaxBudget-visitor.budget, fn.Type(), ir.AsNodes(n.Func().Inl.Body)) } else if base.Flag.LowerM != 0 { fmt.Printf("%v: can inline %v\n", ir.Line(fn), n) } if logopt.Enabled() { - logopt.LogOpt(fn.Pos, "canInlineFunction", "inline", ir.FuncName(fn), fmt.Sprintf("cost: %d", inlineMaxBudget-visitor.budget)) + logopt.LogOpt(fn.Pos(), "canInlineFunction", "inline", ir.FuncName(fn), fmt.Sprintf("cost: %d", inlineMaxBudget-visitor.budget)) } } @@ -240,28 +240,28 @@ func inlFlood(n *ir.Node) { if n == nil { return } - if n.Op != ir.ONAME || n.Class() != ir.PFUNC { - base.Fatalf("inlFlood: unexpected %v, %v, %v", n, n.Op, n.Class()) + if n.Op() != ir.ONAME || n.Class() != ir.PFUNC { + base.Fatalf("inlFlood: unexpected %v, %v, %v", n, n.Op(), n.Class()) } - if n.Func == nil { + if n.Func() == nil { base.Fatalf("inlFlood: missing Func on %v", n) } - if n.Func.Inl == nil { + if n.Func().Inl == nil { return } - if n.Func.ExportInline() { + if n.Func().ExportInline() { return } - n.Func.SetExportInline(true) + n.Func().SetExportInline(true) typecheckinl(n) // Recursively identify all referenced functions for // reexport. We want to include even non-called functions, // because after inlining they might be callable. - ir.InspectList(ir.AsNodes(n.Func.Inl.Body), func(n *ir.Node) bool { - switch n.Op { + ir.InspectList(ir.AsNodes(n.Func().Inl.Body), func(n *ir.Node) bool { + switch n.Op() { case ir.OMETHEXPR: inlFlood(methodExprName(n)) @@ -318,15 +318,15 @@ func (v *hairyVisitor) visit(n *ir.Node) bool { return false } - switch n.Op { + switch n.Op() { // Call is okay if inlinable and we have the budget for the body. case ir.OCALLFUNC: // Functions that call runtime.getcaller{pc,sp} can not be inlined // because getcaller{pc,sp} expect a pointer to the caller's first argument. // // runtime.throw is a "cheap call" like panic in normal code. - if n.Left.Op == ir.ONAME && n.Left.Class() == ir.PFUNC && isRuntimePkg(n.Left.Sym.Pkg) { - fn := n.Left.Sym.Name + if n.Left().Op() == ir.ONAME && n.Left().Class() == ir.PFUNC && isRuntimePkg(n.Left().Sym().Pkg) { + fn := n.Left().Sym().Name if fn == "getcallerpc" || fn == "getcallersp" { v.reason = "call to " + fn return true @@ -342,8 +342,8 @@ func (v *hairyVisitor) visit(n *ir.Node) bool { break } - if fn := inlCallee(n.Left); fn != nil && fn.Func.Inl != nil { - v.budget -= fn.Func.Inl.Cost + if fn := inlCallee(n.Left()); fn != nil && fn.Func().Inl != nil { + v.budget -= fn.Func().Inl.Cost break } @@ -352,12 +352,12 @@ func (v *hairyVisitor) visit(n *ir.Node) bool { // Call is okay if inlinable and we have the budget for the body. case ir.OCALLMETH: - t := n.Left.Type + t := n.Left().Type() if t == nil { - base.Fatalf("no function type for [%p] %+v\n", n.Left, n.Left) + base.Fatalf("no function type for [%p] %+v\n", n.Left(), n.Left()) } - if isRuntimePkg(n.Left.Sym.Pkg) { - fn := n.Left.Sym.Name + if isRuntimePkg(n.Left().Sym().Pkg) { + fn := n.Left().Sym().Name if fn == "heapBits.nextArena" { // Special case: explicitly allow // mid-stack inlining of @@ -367,7 +367,7 @@ func (v *hairyVisitor) visit(n *ir.Node) bool { break } } - if inlfn := methodExprName(n.Left).Func; inlfn.Inl != nil { + if inlfn := methodExprName(n.Left()).Func(); inlfn.Inl != nil { v.budget -= inlfn.Inl.Cost break } @@ -395,7 +395,7 @@ func (v *hairyVisitor) visit(n *ir.Node) bool { ir.ODEFER, ir.ODCLTYPE, // can't print yet ir.ORETJMP: - v.reason = "unhandled op " + n.Op.String() + v.reason = "unhandled op " + n.Op().String() return true case ir.OAPPEND: @@ -413,16 +413,16 @@ func (v *hairyVisitor) visit(n *ir.Node) bool { } case ir.OBREAK, ir.OCONTINUE: - if n.Sym != nil { + if n.Sym() != nil { // Should have short-circuited due to labeledControl above. base.Fatalf("unexpected labeled break/continue: %v", n) } case ir.OIF: - if ir.IsConst(n.Left, constant.Bool) { + if ir.IsConst(n.Left(), constant.Bool) { // This if and the condition cost nothing. - return v.visitList(n.Ninit) || v.visitList(n.Nbody) || - v.visitList(n.Rlist) + return v.visitList(n.Init()) || v.visitList(n.Body()) || + v.visitList(n.Rlist()) } case ir.ONAME: @@ -439,9 +439,9 @@ func (v *hairyVisitor) visit(n *ir.Node) bool { return true } - return v.visit(n.Left) || v.visit(n.Right) || - v.visitList(n.List) || v.visitList(n.Rlist) || - v.visitList(n.Ninit) || v.visitList(n.Nbody) + return v.visit(n.Left()) || v.visit(n.Right()) || + v.visitList(n.List()) || v.visitList(n.Rlist()) || + v.visitList(n.Init()) || v.visitList(n.Body()) } // inlcopylist (together with inlcopy) recursively copies a list of nodes, except @@ -460,21 +460,21 @@ func inlcopy(n *ir.Node) *ir.Node { return nil } - switch n.Op { + switch n.Op() { case ir.ONAME, ir.OTYPE, ir.OLITERAL, ir.ONIL: return n } m := ir.Copy(n) - if n.Op != ir.OCALLPART && m.Func != nil { + if n.Op() != ir.OCALLPART && m.Func() != nil { base.Fatalf("unexpected Func: %v", m) } - m.Left = inlcopy(n.Left) - m.Right = inlcopy(n.Right) - m.List.Set(inlcopylist(n.List.Slice())) - m.Rlist.Set(inlcopylist(n.Rlist.Slice())) - m.Ninit.Set(inlcopylist(n.Ninit.Slice())) - m.Nbody.Set(inlcopylist(n.Nbody.Slice())) + m.SetLeft(inlcopy(n.Left())) + m.SetRight(inlcopy(n.Right())) + m.PtrList().Set(inlcopylist(n.List().Slice())) + m.PtrRlist().Set(inlcopylist(n.Rlist().Slice())) + m.PtrInit().Set(inlcopylist(n.Init().Slice())) + m.PtrBody().Set(inlcopylist(n.Body().Slice())) return m } @@ -484,18 +484,18 @@ func countNodes(n *ir.Node) int { return 0 } cnt := 1 - cnt += countNodes(n.Left) - cnt += countNodes(n.Right) - for _, n1 := range n.Ninit.Slice() { + cnt += countNodes(n.Left()) + cnt += countNodes(n.Right()) + for _, n1 := range n.Init().Slice() { cnt += countNodes(n1) } - for _, n1 := range n.Nbody.Slice() { + for _, n1 := range n.Body().Slice() { cnt += countNodes(n1) } - for _, n1 := range n.List.Slice() { + for _, n1 := range n.List().Slice() { cnt += countNodes(n1) } - for _, n1 := range n.Rlist.Slice() { + for _, n1 := range n.Rlist().Slice() { cnt += countNodes(n1) } return cnt @@ -526,21 +526,21 @@ func inlcalls(fn *ir.Node) { // Turn an OINLCALL into a statement. func inlconv2stmt(n *ir.Node) { - n.Op = ir.OBLOCK + n.SetOp(ir.OBLOCK) // n->ninit stays - n.List.Set(n.Nbody.Slice()) + n.PtrList().Set(n.Body().Slice()) - n.Nbody.Set(nil) - n.Rlist.Set(nil) + n.PtrBody().Set(nil) + n.PtrRlist().Set(nil) } // Turn an OINLCALL into a single valued expression. // The result of inlconv2expr MUST be assigned back to n, e.g. // n.Left = inlconv2expr(n.Left) func inlconv2expr(n *ir.Node) *ir.Node { - r := n.Rlist.First() - return addinit(r, append(n.Ninit.Slice(), n.Nbody.Slice()...)) + r := n.Rlist().First() + return addinit(r, append(n.Init().Slice(), n.Body().Slice()...)) } // Turn the rlist (with the return values) of the OINLCALL in @@ -549,12 +549,12 @@ func inlconv2expr(n *ir.Node) *ir.Node { // order will be preserved Used in return, oas2func and call // statements. func inlconv2list(n *ir.Node) []*ir.Node { - if n.Op != ir.OINLCALL || n.Rlist.Len() == 0 { + if n.Op() != ir.OINLCALL || n.Rlist().Len() == 0 { base.Fatalf("inlconv2list %+v\n", n) } - s := n.Rlist.Slice() - s[0] = addinit(s[0], append(n.Ninit.Slice(), n.Nbody.Slice()...)) + s := n.Rlist().Slice() + s[0] = addinit(s[0], append(n.Init().Slice(), n.Body().Slice()...)) return s } @@ -583,11 +583,11 @@ func inlnode(n *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node { return n } - switch n.Op { + switch n.Op() { case ir.ODEFER, ir.OGO: - switch n.Left.Op { + switch n.Left().Op() { case ir.OCALLFUNC, ir.OCALLMETH: - n.Left.SetNoInline(true) + n.Left().SetNoInline(true) } // TODO do them here (or earlier), @@ -597,61 +597,61 @@ func inlnode(n *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node { case ir.OCALLMETH: // Prevent inlining some reflect.Value methods when using checkptr, // even when package reflect was compiled without it (#35073). - if s := n.Left.Sym; base.Debug.Checkptr != 0 && isReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") { + if s := n.Left().Sym(); base.Debug.Checkptr != 0 && isReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") { return n } } lno := setlineno(n) - inlnodelist(n.Ninit, maxCost, inlMap) - for _, n1 := range n.Ninit.Slice() { - if n1.Op == ir.OINLCALL { + inlnodelist(n.Init(), maxCost, inlMap) + for _, n1 := range n.Init().Slice() { + if n1.Op() == ir.OINLCALL { inlconv2stmt(n1) } } - n.Left = inlnode(n.Left, maxCost, inlMap) - if n.Left != nil && n.Left.Op == ir.OINLCALL { - n.Left = inlconv2expr(n.Left) + n.SetLeft(inlnode(n.Left(), maxCost, inlMap)) + if n.Left() != nil && n.Left().Op() == ir.OINLCALL { + n.SetLeft(inlconv2expr(n.Left())) } - n.Right = inlnode(n.Right, maxCost, inlMap) - if n.Right != nil && n.Right.Op == ir.OINLCALL { - if n.Op == ir.OFOR || n.Op == ir.OFORUNTIL { - inlconv2stmt(n.Right) - } else if n.Op == ir.OAS2FUNC { - n.Rlist.Set(inlconv2list(n.Right)) - n.Right = nil - n.Op = ir.OAS2 + n.SetRight(inlnode(n.Right(), maxCost, inlMap)) + if n.Right() != nil && n.Right().Op() == ir.OINLCALL { + if n.Op() == ir.OFOR || n.Op() == ir.OFORUNTIL { + inlconv2stmt(n.Right()) + } else if n.Op() == ir.OAS2FUNC { + n.PtrRlist().Set(inlconv2list(n.Right())) + n.SetRight(nil) + n.SetOp(ir.OAS2) n.SetTypecheck(0) n = typecheck(n, ctxStmt) } else { - n.Right = inlconv2expr(n.Right) + n.SetRight(inlconv2expr(n.Right())) } } - inlnodelist(n.List, maxCost, inlMap) - if n.Op == ir.OBLOCK { - for _, n2 := range n.List.Slice() { - if n2.Op == ir.OINLCALL { + inlnodelist(n.List(), maxCost, inlMap) + if n.Op() == ir.OBLOCK { + for _, n2 := range n.List().Slice() { + if n2.Op() == ir.OINLCALL { inlconv2stmt(n2) } } } else { - s := n.List.Slice() + s := n.List().Slice() for i1, n1 := range s { - if n1 != nil && n1.Op == ir.OINLCALL { + if n1 != nil && n1.Op() == ir.OINLCALL { s[i1] = inlconv2expr(s[i1]) } } } - inlnodelist(n.Rlist, maxCost, inlMap) - s := n.Rlist.Slice() + inlnodelist(n.Rlist(), maxCost, inlMap) + s := n.Rlist().Slice() for i1, n1 := range s { - if n1.Op == ir.OINLCALL { - if n.Op == ir.OIF { + if n1.Op() == ir.OINLCALL { + if n.Op() == ir.OIF { inlconv2stmt(n1) } else { s[i1] = inlconv2expr(s[i1]) @@ -659,9 +659,9 @@ func inlnode(n *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node { } } - inlnodelist(n.Nbody, maxCost, inlMap) - for _, n := range n.Nbody.Slice() { - if n.Op == ir.OINLCALL { + inlnodelist(n.Body(), maxCost, inlMap) + for _, n := range n.Body().Slice() { + if n.Op() == ir.OINLCALL { inlconv2stmt(n) } } @@ -669,36 +669,36 @@ func inlnode(n *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node { // with all the branches out of the way, it is now time to // transmogrify this node itself unless inhibited by the // switch at the top of this function. - switch n.Op { + switch n.Op() { case ir.OCALLFUNC, ir.OCALLMETH: if n.NoInline() { return n } } - switch n.Op { + switch n.Op() { case ir.OCALLFUNC: if base.Flag.LowerM > 3 { - fmt.Printf("%v:call to func %+v\n", ir.Line(n), n.Left) + fmt.Printf("%v:call to func %+v\n", ir.Line(n), n.Left()) } if isIntrinsicCall(n) { break } - if fn := inlCallee(n.Left); fn != nil && fn.Func.Inl != nil { + if fn := inlCallee(n.Left()); fn != nil && fn.Func().Inl != nil { n = mkinlcall(n, fn, maxCost, inlMap) } case ir.OCALLMETH: if base.Flag.LowerM > 3 { - fmt.Printf("%v:call to meth %L\n", ir.Line(n), n.Left.Right) + fmt.Printf("%v:call to meth %L\n", ir.Line(n), n.Left().Right()) } // typecheck should have resolved ODOTMETH->type, whose nname points to the actual function. - if n.Left.Type == nil { - base.Fatalf("no function type for [%p] %+v\n", n.Left, n.Left) + if n.Left().Type() == nil { + base.Fatalf("no function type for [%p] %+v\n", n.Left(), n.Left()) } - n = mkinlcall(n, methodExprName(n.Left), maxCost, inlMap) + n = mkinlcall(n, methodExprName(n.Left()), maxCost, inlMap) } base.Pos = lno @@ -710,29 +710,29 @@ func inlnode(n *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node { func inlCallee(fn *ir.Node) *ir.Node { fn = staticValue(fn) switch { - case fn.Op == ir.OMETHEXPR: + case fn.Op() == ir.OMETHEXPR: n := methodExprName(fn) // Check that receiver type matches fn.Left. // TODO(mdempsky): Handle implicit dereference // of pointer receiver argument? - if n == nil || !types.Identical(n.Type.Recv().Type, fn.Left.Type) { + if n == nil || !types.Identical(n.Type().Recv().Type, fn.Left().Type()) { return nil } return n - case fn.Op == ir.ONAME && fn.Class() == ir.PFUNC: + case fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC: return fn - case fn.Op == ir.OCLOSURE: - c := fn.Func.Decl + case fn.Op() == ir.OCLOSURE: + c := fn.Func().Decl caninl(c) - return c.Func.Nname + return c.Func().Nname } return nil } func staticValue(n *ir.Node) *ir.Node { for { - if n.Op == ir.OCONVNOP { - n = n.Left + if n.Op() == ir.OCONVNOP { + n = n.Left() continue } @@ -748,24 +748,24 @@ func staticValue(n *ir.Node) *ir.Node { // that is initialized and never reassigned, staticValue1 returns the initializer // expression. Otherwise, it returns nil. func staticValue1(n *ir.Node) *ir.Node { - if n.Op != ir.ONAME || n.Class() != ir.PAUTO || n.Name.Addrtaken() { + if n.Op() != ir.ONAME || n.Class() != ir.PAUTO || n.Name().Addrtaken() { return nil } - defn := n.Name.Defn + defn := n.Name().Defn if defn == nil { return nil } var rhs *ir.Node FindRHS: - switch defn.Op { + switch defn.Op() { case ir.OAS: - rhs = defn.Right + rhs = defn.Right() case ir.OAS2: - for i, lhs := range defn.List.Slice() { + for i, lhs := range defn.List().Slice() { if lhs == n { - rhs = defn.Rlist.Index(i) + rhs = defn.Rlist().Index(i) break FindRHS } } @@ -792,24 +792,24 @@ FindRHS: // NB: global variables are always considered to be re-assigned. // TODO: handle initial declaration not including an assignment and followed by a single assignment? func reassigned(n *ir.Node) (bool, *ir.Node) { - if n.Op != ir.ONAME { + if n.Op() != ir.ONAME { base.Fatalf("reassigned %v", n) } // no way to reliably check for no-reassignment of globals, assume it can be - if n.Name.Curfn == nil { + if n.Name().Curfn == nil { return true, nil } - f := n.Name.Curfn + f := n.Name().Curfn // There just might be a good reason for this although this can be pretty surprising: // local variables inside a closure have Curfn pointing to the OCLOSURE node instead // of the corresponding ODCLFUNC. // We need to walk the function body to check for reassignments so we follow the // linkage to the ODCLFUNC node as that is where body is held. - if f.Op == ir.OCLOSURE { - f = f.Func.Decl + if f.Op() == ir.OCLOSURE { + f = f.Func().Decl } v := reassignVisitor{name: n} - a := v.visitList(f.Nbody) + a := v.visitList(f.Body()) return a != nil, a } @@ -821,34 +821,34 @@ func (v *reassignVisitor) visit(n *ir.Node) *ir.Node { if n == nil { return nil } - switch n.Op { + switch n.Op() { case ir.OAS: - if n.Left == v.name && n != v.name.Name.Defn { + if n.Left() == v.name && n != v.name.Name().Defn { return n } case ir.OAS2, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2DOTTYPE: - for _, p := range n.List.Slice() { - if p == v.name && n != v.name.Name.Defn { + for _, p := range n.List().Slice() { + if p == v.name && n != v.name.Name().Defn { return n } } } - if a := v.visit(n.Left); a != nil { + if a := v.visit(n.Left()); a != nil { return a } - if a := v.visit(n.Right); a != nil { + if a := v.visit(n.Right()); a != nil { return a } - if a := v.visitList(n.List); a != nil { + if a := v.visitList(n.List()); a != nil { return a } - if a := v.visitList(n.Rlist); a != nil { + if a := v.visitList(n.Rlist()); a != nil { return a } - if a := v.visitList(n.Ninit); a != nil { + if a := v.visitList(n.Init()); a != nil { return a } - if a := v.visitList(n.Nbody); a != nil { + if a := v.visitList(n.Body()); a != nil { return a } return nil @@ -873,8 +873,8 @@ func inlParam(t *types.Field, as *ir.Node, inlvars map[*ir.Node]*ir.Node) *ir.No if inlvar == nil { base.Fatalf("missing inlvar for %v", n) } - as.Ninit.Append(ir.Nod(ir.ODCL, inlvar, nil)) - inlvar.Name.Defn = as + as.PtrInit().Append(ir.Nod(ir.ODCL, inlvar, nil)) + inlvar.Name().Defn = as return inlvar } @@ -888,32 +888,32 @@ var inlgen int // The result of mkinlcall MUST be assigned back to n, e.g. // n.Left = mkinlcall(n.Left, fn, isddd) func mkinlcall(n, fn *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node { - if fn.Func.Inl == nil { + if fn.Func().Inl == nil { if logopt.Enabled() { - logopt.LogOpt(n.Pos, "cannotInlineCall", "inline", ir.FuncName(Curfn), + logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(Curfn), fmt.Sprintf("%s cannot be inlined", ir.PkgFuncName(fn))) } return n } - if fn.Func.Inl.Cost > maxCost { + if fn.Func().Inl.Cost > maxCost { // The inlined function body is too big. Typically we use this check to restrict // inlining into very big functions. See issue 26546 and 17566. if logopt.Enabled() { - logopt.LogOpt(n.Pos, "cannotInlineCall", "inline", ir.FuncName(Curfn), - fmt.Sprintf("cost %d of %s exceeds max large caller cost %d", fn.Func.Inl.Cost, ir.PkgFuncName(fn), maxCost)) + logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(Curfn), + fmt.Sprintf("cost %d of %s exceeds max large caller cost %d", fn.Func().Inl.Cost, ir.PkgFuncName(fn), maxCost)) } return n } - if fn == Curfn || fn.Name.Defn == Curfn { + if fn == Curfn || fn.Name().Defn == Curfn { // Can't recursively inline a function into itself. if logopt.Enabled() { - logopt.LogOpt(n.Pos, "cannotInlineCall", "inline", fmt.Sprintf("recursive call to %s", ir.FuncName(Curfn))) + logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", fmt.Sprintf("recursive call to %s", ir.FuncName(Curfn))) } return n } - if instrumenting && isRuntimePkg(fn.Sym.Pkg) { + if instrumenting && isRuntimePkg(fn.Sym().Pkg) { // Runtime package must not be instrumented. // Instrument skips runtime package. However, some runtime code can be // inlined into other packages and instrumented there. To avoid this, @@ -939,7 +939,7 @@ func mkinlcall(n, fn *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node // We have a function node, and it has an inlineable body. if base.Flag.LowerM > 1 { - fmt.Printf("%v: inlining call to %v %#v { %#v }\n", ir.Line(n), fn.Sym, fn.Type, ir.AsNodes(fn.Func.Inl.Body)) + fmt.Printf("%v: inlining call to %v %#v { %#v }\n", ir.Line(n), fn.Sym(), fn.Type(), ir.AsNodes(fn.Func().Inl.Body)) } else if base.Flag.LowerM != 0 { fmt.Printf("%v: inlining call to %v\n", ir.Line(n), fn) } @@ -951,19 +951,19 @@ func mkinlcall(n, fn *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node ssaDumpInlined = append(ssaDumpInlined, fn) } - ninit := n.Ninit + ninit := n.Init() // For normal function calls, the function callee expression // may contain side effects (e.g., added by addinit during // inlconv2expr or inlconv2list). Make sure to preserve these, // if necessary (#42703). - if n.Op == ir.OCALLFUNC { - callee := n.Left - for callee.Op == ir.OCONVNOP { - ninit.AppendNodes(&callee.Ninit) - callee = callee.Left + if n.Op() == ir.OCALLFUNC { + callee := n.Left() + for callee.Op() == ir.OCONVNOP { + ninit.AppendNodes(callee.PtrInit()) + callee = callee.Left() } - if callee.Op != ir.ONAME && callee.Op != ir.OCLOSURE && callee.Op != ir.OMETHEXPR { + if callee.Op() != ir.ONAME && callee.Op() != ir.OCLOSURE && callee.Op() != ir.OMETHEXPR { base.Fatalf("unexpected callee expression: %v", callee) } } @@ -975,30 +975,30 @@ func mkinlcall(n, fn *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node var inlfvars []*ir.Node // Handle captured variables when inlining closures. - if fn.Name.Defn != nil { - if c := fn.Name.Defn.Func.OClosure; c != nil { - for _, v := range c.Func.ClosureVars.Slice() { - if v.Op == ir.OXXX { + if fn.Name().Defn != nil { + if c := fn.Name().Defn.Func().OClosure; c != nil { + for _, v := range c.Func().ClosureVars.Slice() { + if v.Op() == ir.OXXX { continue } - o := v.Name.Param.Outer + o := v.Name().Param.Outer // make sure the outer param matches the inlining location // NB: if we enabled inlining of functions containing OCLOSURE or refined // the reassigned check via some sort of copy propagation this would most // likely need to be changed to a loop to walk up to the correct Param - if o == nil || (o.Name.Curfn != Curfn && o.Name.Curfn.Func.OClosure != Curfn) { + if o == nil || (o.Name().Curfn != Curfn && o.Name().Curfn.Func().OClosure != Curfn) { base.Fatalf("%v: unresolvable capture %v %v\n", ir.Line(n), fn, v) } - if v.Name.Byval() { + if v.Name().Byval() { iv := typecheck(inlvar(v), ctxExpr) ninit.Append(ir.Nod(ir.ODCL, iv, nil)) ninit.Append(typecheck(ir.Nod(ir.OAS, iv, o), ctxStmt)) inlvars[v] = iv } else { - addr := NewName(lookup("&" + v.Sym.Name)) - addr.Type = types.NewPtr(v.Type) + addr := NewName(lookup("&" + v.Sym().Name)) + addr.SetType(types.NewPtr(v.Type())) ia := typecheck(inlvar(addr), ctxExpr) ninit.Append(ir.Nod(ir.ODCL, ia, nil)) ninit.Append(typecheck(ir.Nod(ir.OAS, ia, ir.Nod(ir.OADDR, o, nil)), ctxStmt)) @@ -1012,8 +1012,8 @@ func mkinlcall(n, fn *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node } } - for _, ln := range fn.Func.Inl.Dcl { - if ln.Op != ir.ONAME { + for _, ln := range fn.Func().Inl.Dcl { + if ln.Op() != ir.ONAME { continue } if ln.Class() == ir.PPARAMOUT { // return values handled below. @@ -1030,18 +1030,18 @@ func mkinlcall(n, fn *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node inlvars[ln] = inlf if base.Flag.GenDwarfInl > 0 { if ln.Class() == ir.PPARAM { - inlf.Name.SetInlFormal(true) + inlf.Name().SetInlFormal(true) } else { - inlf.Name.SetInlLocal(true) + inlf.Name().SetInlLocal(true) } - inlf.Pos = ln.Pos + inlf.SetPos(ln.Pos()) inlfvars = append(inlfvars, inlf) } } nreturns := 0 - ir.InspectList(ir.AsNodes(fn.Func.Inl.Body), func(n *ir.Node) bool { - if n != nil && n.Op == ir.ORETURN { + ir.InspectList(ir.AsNodes(fn.Func().Inl.Body), func(n *ir.Node) bool { + if n != nil && n.Op() == ir.ORETURN { nreturns++ } return true @@ -1054,9 +1054,9 @@ func mkinlcall(n, fn *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node // temporaries for return values. var retvars []*ir.Node - for i, t := range fn.Type.Results().Fields().Slice() { + for i, t := range fn.Type().Results().Fields().Slice() { var m *ir.Node - if n := ir.AsNode(t.Nname); n != nil && !ir.IsBlank(n) && !strings.HasPrefix(n.Sym.Name, "~r") { + if n := ir.AsNode(t.Nname); n != nil && !ir.IsBlank(n) && !strings.HasPrefix(n.Sym().Name, "~r") { m = inlvar(n) m = typecheck(m, ctxExpr) inlvars[n] = m @@ -1070,9 +1070,9 @@ func mkinlcall(n, fn *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node // Don't update the src.Pos on a return variable if it // was manufactured by the inliner (e.g. "~R2"); such vars // were not part of the original callee. - if !strings.HasPrefix(m.Sym.Name, "~R") { - m.Name.SetInlFormal(true) - m.Pos = t.Pos + if !strings.HasPrefix(m.Sym().Name, "~R") { + m.Name().SetInlFormal(true) + m.SetPos(t.Pos) inlfvars = append(inlfvars, m) } } @@ -1083,51 +1083,51 @@ func mkinlcall(n, fn *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node // Assign arguments to the parameters' temp names. as := ir.Nod(ir.OAS2, nil, nil) as.SetColas(true) - if n.Op == ir.OCALLMETH { - if n.Left.Left == nil { + if n.Op() == ir.OCALLMETH { + if n.Left().Left() == nil { base.Fatalf("method call without receiver: %+v", n) } - as.Rlist.Append(n.Left.Left) + as.PtrRlist().Append(n.Left().Left()) } - as.Rlist.Append(n.List.Slice()...) + as.PtrRlist().Append(n.List().Slice()...) // For non-dotted calls to variadic functions, we assign the // variadic parameter's temp name separately. var vas *ir.Node - if recv := fn.Type.Recv(); recv != nil { - as.List.Append(inlParam(recv, as, inlvars)) + if recv := fn.Type().Recv(); recv != nil { + as.PtrList().Append(inlParam(recv, as, inlvars)) } - for _, param := range fn.Type.Params().Fields().Slice() { + for _, param := range fn.Type().Params().Fields().Slice() { // For ordinary parameters or variadic parameters in // dotted calls, just add the variable to the // assignment list, and we're done. if !param.IsDDD() || n.IsDDD() { - as.List.Append(inlParam(param, as, inlvars)) + as.PtrList().Append(inlParam(param, as, inlvars)) continue } // Otherwise, we need to collect the remaining values // to pass as a slice. - x := as.List.Len() - for as.List.Len() < as.Rlist.Len() { - as.List.Append(argvar(param.Type, as.List.Len())) + x := as.List().Len() + for as.List().Len() < as.Rlist().Len() { + as.PtrList().Append(argvar(param.Type, as.List().Len())) } - varargs := as.List.Slice()[x:] + varargs := as.List().Slice()[x:] vas = ir.Nod(ir.OAS, nil, nil) - vas.Left = inlParam(param, vas, inlvars) + vas.SetLeft(inlParam(param, vas, inlvars)) if len(varargs) == 0 { - vas.Right = nodnil() - vas.Right.Type = param.Type + vas.SetRight(nodnil()) + vas.Right().SetType(param.Type) } else { - vas.Right = ir.Nod(ir.OCOMPLIT, nil, typenod(param.Type)) - vas.Right.List.Set(varargs) + vas.SetRight(ir.Nod(ir.OCOMPLIT, nil, typenod(param.Type))) + vas.Right().PtrList().Set(varargs) } } - if as.Rlist.Len() != 0 { + if as.Rlist().Len() != 0 { as = typecheck(as, ctxStmt) ninit.Append(as) } @@ -1152,10 +1152,10 @@ func mkinlcall(n, fn *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node inlgen++ parent := -1 - if b := base.Ctxt.PosTable.Pos(n.Pos).Base(); b != nil { + if b := base.Ctxt.PosTable.Pos(n.Pos()).Base(); b != nil { parent = b.InliningIndex() } - newIndex := base.Ctxt.InlTree.Add(parent, n.Pos, fn.Sym.Linksym()) + newIndex := base.Ctxt.InlTree.Add(parent, n.Pos(), fn.Sym().Linksym()) // Add an inline mark just before the inlined body. // This mark is inline in the code so that it's a reasonable spot @@ -1163,14 +1163,14 @@ func mkinlcall(n, fn *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node // (in which case it could go at the end of the function instead). // Note issue 28603. inlMark := ir.Nod(ir.OINLMARK, nil, nil) - inlMark.Pos = n.Pos.WithIsStmt() - inlMark.Xoffset = int64(newIndex) + inlMark.SetPos(n.Pos().WithIsStmt()) + inlMark.SetOffset(int64(newIndex)) ninit.Append(inlMark) if base.Flag.GenDwarfInl > 0 { - if !fn.Sym.Linksym().WasInlined() { - base.Ctxt.DwFixups.SetPrecursorFunc(fn.Sym.Linksym(), fn) - fn.Sym.Linksym().Set(obj.AttrWasInlined, true) + if !fn.Sym().Linksym().WasInlined() { + base.Ctxt.DwFixups.SetPrecursorFunc(fn.Sym().Linksym(), fn) + fn.Sym().Linksym().Set(obj.AttrWasInlined, true) } } @@ -1183,7 +1183,7 @@ func mkinlcall(n, fn *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node newInlIndex: newIndex, } - body := subst.list(ir.AsNodes(fn.Func.Inl.Body)) + body := subst.list(ir.AsNodes(fn.Func().Inl.Body)) lab := nodSym(ir.OLABEL, nil, retlabel) body = append(body, lab) @@ -1192,17 +1192,17 @@ func mkinlcall(n, fn *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node if base.Flag.GenDwarfInl > 0 { for _, v := range inlfvars { - v.Pos = subst.updatedPos(v.Pos) + v.SetPos(subst.updatedPos(v.Pos())) } } //dumplist("ninit post", ninit); call := ir.Nod(ir.OINLCALL, nil, nil) - call.Ninit.Set(ninit.Slice()) - call.Nbody.Set(body) - call.Rlist.Set(retvars) - call.Type = n.Type + call.PtrInit().Set(ninit.Slice()) + call.PtrBody().Set(body) + call.PtrRlist().Set(retvars) + call.SetType(n.Type()) call.SetTypecheck(1) // transitive inlining @@ -1211,9 +1211,9 @@ func mkinlcall(n, fn *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node // instead we emit the things that the body needs // and each use must redo the inlining. // luckily these are small. - inlnodelist(call.Nbody, maxCost, inlMap) - for _, n := range call.Nbody.Slice() { - if n.Op == ir.OINLCALL { + inlnodelist(call.Body(), maxCost, inlMap) + for _, n := range call.Body().Slice() { + if n.Op() == ir.OINLCALL { inlconv2stmt(n) } } @@ -1233,25 +1233,25 @@ func inlvar(var_ *ir.Node) *ir.Node { fmt.Printf("inlvar %+v\n", var_) } - n := NewName(var_.Sym) - n.Type = var_.Type + n := NewName(var_.Sym()) + n.SetType(var_.Type()) n.SetClass(ir.PAUTO) - n.Name.SetUsed(true) - n.Name.Curfn = Curfn // the calling function, not the called one - n.Name.SetAddrtaken(var_.Name.Addrtaken()) + n.Name().SetUsed(true) + n.Name().Curfn = Curfn // the calling function, not the called one + n.Name().SetAddrtaken(var_.Name().Addrtaken()) - Curfn.Func.Dcl = append(Curfn.Func.Dcl, n) + Curfn.Func().Dcl = append(Curfn.Func().Dcl, n) return n } // Synthesize a variable to store the inlined function's results in. func retvar(t *types.Field, i int) *ir.Node { n := NewName(lookupN("~R", i)) - n.Type = t.Type + n.SetType(t.Type) n.SetClass(ir.PAUTO) - n.Name.SetUsed(true) - n.Name.Curfn = Curfn // the calling function, not the called one - Curfn.Func.Dcl = append(Curfn.Func.Dcl, n) + n.Name().SetUsed(true) + n.Name().Curfn = Curfn // the calling function, not the called one + Curfn.Func().Dcl = append(Curfn.Func().Dcl, n) return n } @@ -1259,11 +1259,11 @@ func retvar(t *types.Field, i int) *ir.Node { // when they come from a multiple return call. func argvar(t *types.Type, i int) *ir.Node { n := NewName(lookupN("~arg", i)) - n.Type = t.Elem() + n.SetType(t.Elem()) n.SetClass(ir.PAUTO) - n.Name.SetUsed(true) - n.Name.Curfn = Curfn // the calling function, not the called one - Curfn.Func.Dcl = append(Curfn.Func.Dcl, n) + n.Name().SetUsed(true) + n.Name().Curfn = Curfn // the calling function, not the called one + Curfn.Func().Dcl = append(Curfn.Func().Dcl, n) return n } @@ -1309,7 +1309,7 @@ func (subst *inlsubst) node(n *ir.Node) *ir.Node { return nil } - switch n.Op { + switch n.Op() { case ir.ONAME: if inlvar := subst.inlvars[n]; inlvar != nil { // These will be set during inlnode if base.Flag.LowerM > 2 { @@ -1330,7 +1330,7 @@ func (subst *inlsubst) node(n *ir.Node) *ir.Node { // If n is a named constant or type, we can continue // using it in the inline copy. Otherwise, make a copy // so we can update the line number. - if n.Sym != nil { + if n.Sym() != nil { return n } @@ -1339,31 +1339,31 @@ func (subst *inlsubst) node(n *ir.Node) *ir.Node { // dump("Return before substitution", n); case ir.ORETURN: m := nodSym(ir.OGOTO, nil, subst.retlabel) - m.Ninit.Set(subst.list(n.Ninit)) + m.PtrInit().Set(subst.list(n.Init())) - if len(subst.retvars) != 0 && n.List.Len() != 0 { + if len(subst.retvars) != 0 && n.List().Len() != 0 { as := ir.Nod(ir.OAS2, nil, nil) // Make a shallow copy of retvars. // Otherwise OINLCALL.Rlist will be the same list, // and later walk and typecheck may clobber it. for _, n := range subst.retvars { - as.List.Append(n) + as.PtrList().Append(n) } - as.Rlist.Set(subst.list(n.List)) + as.PtrRlist().Set(subst.list(n.List())) if subst.delayretvars { - for _, n := range as.List.Slice() { - as.Ninit.Append(ir.Nod(ir.ODCL, n, nil)) - n.Name.Defn = as + for _, n := range as.List().Slice() { + as.PtrInit().Append(ir.Nod(ir.ODCL, n, nil)) + n.Name().Defn = as } } as = typecheck(as, ctxStmt) - m.Ninit.Append(as) + m.PtrInit().Append(as) } - typecheckslice(m.Ninit.Slice(), ctxStmt) + typecheckslice(m.Init().Slice(), ctxStmt) m = typecheck(m, ctxStmt) // dump("Return after substitution", m); @@ -1371,28 +1371,28 @@ func (subst *inlsubst) node(n *ir.Node) *ir.Node { case ir.OGOTO, ir.OLABEL: m := ir.Copy(n) - m.Pos = subst.updatedPos(m.Pos) - m.Ninit.Set(nil) - p := fmt.Sprintf("%s·%d", n.Sym.Name, inlgen) - m.Sym = lookup(p) + m.SetPos(subst.updatedPos(m.Pos())) + m.PtrInit().Set(nil) + p := fmt.Sprintf("%s·%d", n.Sym().Name, inlgen) + m.SetSym(lookup(p)) return m } m := ir.Copy(n) - m.Pos = subst.updatedPos(m.Pos) - m.Ninit.Set(nil) + m.SetPos(subst.updatedPos(m.Pos())) + m.PtrInit().Set(nil) - if n.Op == ir.OCLOSURE { + if n.Op() == ir.OCLOSURE { base.Fatalf("cannot inline function containing closure: %+v", n) } - m.Left = subst.node(n.Left) - m.Right = subst.node(n.Right) - m.List.Set(subst.list(n.List)) - m.Rlist.Set(subst.list(n.Rlist)) - m.Ninit.Set(append(m.Ninit.Slice(), subst.list(n.Ninit)...)) - m.Nbody.Set(subst.list(n.Nbody)) + m.SetLeft(subst.node(n.Left())) + m.SetRight(subst.node(n.Right())) + m.PtrList().Set(subst.list(n.List())) + m.PtrRlist().Set(subst.list(n.Rlist())) + m.PtrInit().Set(append(m.Init().Slice(), subst.list(n.Init())...)) + m.PtrBody().Set(subst.list(n.Body())) return m } @@ -1426,8 +1426,8 @@ func pruneUnusedAutos(ll []*ir.Node, vis *hairyVisitor) []*ir.Node { // concrete-type method calls where applicable. func devirtualize(fn *ir.Node) { Curfn = fn - ir.InspectList(fn.Nbody, func(n *ir.Node) bool { - if n.Op == ir.OCALLINTER { + ir.InspectList(fn.Body(), func(n *ir.Node) bool { + if n.Op() == ir.OCALLINTER { devirtualizeCall(n) } return true @@ -1435,38 +1435,38 @@ func devirtualize(fn *ir.Node) { } func devirtualizeCall(call *ir.Node) { - recv := staticValue(call.Left.Left) - if recv.Op != ir.OCONVIFACE { + recv := staticValue(call.Left().Left()) + if recv.Op() != ir.OCONVIFACE { return } - typ := recv.Left.Type + typ := recv.Left().Type() if typ.IsInterface() { return } - x := ir.NodAt(call.Left.Pos, ir.ODOTTYPE, call.Left.Left, nil) - x.Type = typ - x = nodlSym(call.Left.Pos, ir.OXDOT, x, call.Left.Sym) + x := ir.NodAt(call.Left().Pos(), ir.ODOTTYPE, call.Left().Left(), nil) + x.SetType(typ) + x = nodlSym(call.Left().Pos(), ir.OXDOT, x, call.Left().Sym()) x = typecheck(x, ctxExpr|ctxCallee) - switch x.Op { + switch x.Op() { case ir.ODOTMETH: if base.Flag.LowerM != 0 { - base.WarnfAt(call.Pos, "devirtualizing %v to %v", call.Left, typ) + base.WarnfAt(call.Pos(), "devirtualizing %v to %v", call.Left(), typ) } - call.Op = ir.OCALLMETH - call.Left = x + call.SetOp(ir.OCALLMETH) + call.SetLeft(x) case ir.ODOTINTER: // Promoted method from embedded interface-typed field (#42279). if base.Flag.LowerM != 0 { - base.WarnfAt(call.Pos, "partially devirtualizing %v to %v", call.Left, typ) + base.WarnfAt(call.Pos(), "partially devirtualizing %v to %v", call.Left(), typ) } - call.Op = ir.OCALLINTER - call.Left = x + call.SetOp(ir.OCALLINTER) + call.SetLeft(x) default: // TODO(mdempsky): Turn back into Fatalf after more testing. if base.Flag.LowerM != 0 { - base.WarnfAt(call.Pos, "failed to devirtualize %v (%v)", x, x.Op) + base.WarnfAt(call.Pos(), "failed to devirtualize %v (%v)", x, x.Op()) } return } @@ -1477,12 +1477,12 @@ func devirtualizeCall(call *ir.Node) { // Receiver parameter size may have changed; need to update // call.Type to get correct stack offsets for result // parameters. - checkwidth(x.Type) - switch ft := x.Type; ft.NumResults() { + checkwidth(x.Type()) + switch ft := x.Type(); ft.NumResults() { case 0: case 1: - call.Type = ft.Results().Field(0).Type + call.SetType(ft.Results().Field(0).Type) default: - call.Type = ft.Results() + call.SetType(ft.Results()) } } diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 24e926602bf4e..a7d605f3ba66b 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -253,7 +253,7 @@ func Main(archInit func(*Arch)) { timings.Start("fe", "typecheck", "top1") for i := 0; i < len(xtop); i++ { n := xtop[i] - if op := n.Op; op != ir.ODCL && op != ir.OAS && op != ir.OAS2 && (op != ir.ODCLTYPE || !n.Left.Name.Param.Alias()) { + if op := n.Op(); op != ir.ODCL && op != ir.OAS && op != ir.OAS2 && (op != ir.ODCLTYPE || !n.Left().Name().Param.Alias()) { xtop[i] = typecheck(n, ctxStmt) } } @@ -265,7 +265,7 @@ func Main(archInit func(*Arch)) { timings.Start("fe", "typecheck", "top2") for i := 0; i < len(xtop); i++ { n := xtop[i] - if op := n.Op; op == ir.ODCL || op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.Left.Name.Param.Alias() { + if op := n.Op(); op == ir.ODCL || op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.Left().Name().Param.Alias() { xtop[i] = typecheck(n, ctxStmt) } } @@ -276,14 +276,14 @@ func Main(archInit func(*Arch)) { var fcount int64 for i := 0; i < len(xtop); i++ { n := xtop[i] - if n.Op == ir.ODCLFUNC { + if n.Op() == ir.ODCLFUNC { Curfn = n decldepth = 1 errorsBefore := base.Errors() - typecheckslice(Curfn.Nbody.Slice(), ctxStmt) + typecheckslice(Curfn.Body().Slice(), ctxStmt) checkreturn(Curfn) if base.Errors() > errorsBefore { - Curfn.Nbody.Set(nil) // type errors; do not compile + Curfn.PtrBody().Set(nil) // type errors; do not compile } // Now that we've checked whether n terminates, // we can eliminate some obviously dead code. @@ -306,7 +306,7 @@ func Main(archInit func(*Arch)) { // because variables captured by value do not escape. timings.Start("fe", "capturevars") for _, n := range xtop { - if n.Op == ir.ODCLFUNC && n.Func.OClosure != nil { + if n.Op() == ir.ODCLFUNC && n.Func().OClosure != nil { Curfn = n capturevars(n) } @@ -321,7 +321,7 @@ func Main(archInit func(*Arch)) { // Typecheck imported function bodies if Debug.l > 1, // otherwise lazily when used or re-exported. for _, n := range importlist { - if n.Func.Inl != nil { + if n.Func().Inl != nil { typecheckinl(n) } } @@ -340,7 +340,7 @@ func Main(archInit func(*Arch)) { caninl(n) } else { if base.Flag.LowerM > 1 { - fmt.Printf("%v: cannot inline %v: recursive\n", ir.Line(n), n.Func.Nname) + fmt.Printf("%v: cannot inline %v: recursive\n", ir.Line(n), n.Func().Nname) } } inlcalls(n) @@ -349,7 +349,7 @@ func Main(archInit func(*Arch)) { } for _, n := range xtop { - if n.Op == ir.ODCLFUNC { + if n.Op() == ir.ODCLFUNC { devirtualize(n) } } @@ -379,7 +379,7 @@ func Main(archInit func(*Arch)) { // before walk reaches a call of a closure. timings.Start("fe", "xclosures") for _, n := range xtop { - if n.Op == ir.ODCLFUNC && n.Func.OClosure != nil { + if n.Op() == ir.ODCLFUNC && n.Func().OClosure != nil { Curfn = n transformclosure(n) } @@ -402,7 +402,7 @@ func Main(archInit func(*Arch)) { fcount = 0 for i := 0; i < len(xtop); i++ { n := xtop[i] - if n.Op == ir.ODCLFUNC { + if n.Op() == ir.ODCLFUNC { funccompile(n) fcount++ } @@ -430,7 +430,7 @@ func Main(archInit func(*Arch)) { // Phase 9: Check external declarations. timings.Start("be", "externaldcls") for i, n := range externdcl { - if n.Op == ir.ONAME { + if n.Op() == ir.ONAME { externdcl[i] = typecheck(externdcl[i], ctxExpr) } } @@ -484,7 +484,7 @@ func Main(archInit func(*Arch)) { func numNonClosures(list []*ir.Node) int { count := 0 for _, n := range list { - if n.Func.OClosure == nil { + if n.Func().OClosure == nil { count++ } } @@ -949,14 +949,14 @@ func clearImports() { if n == nil { continue } - if n.Op == ir.OPACK { + if n.Op() == ir.OPACK { // throw away top-level package name left over // from previous file. // leave s->block set to cause redeclaration // errors if a conflicting top-level name is // introduced by a different file. - if !n.Name.Used() && base.SyntaxErrors() == 0 { - unused = append(unused, importedPkg{n.Pos, n.Name.Pkg.Path, s.Name}) + if !n.Name().Used() && base.SyntaxErrors() == 0 { + unused = append(unused, importedPkg{n.Pos(), n.Name().Pkg.Path, s.Name}) } s.Def = nil continue @@ -964,9 +964,9 @@ func clearImports() { if IsAlias(s) { // throw away top-level name left over // from previous import . "x" - if n.Name != nil && n.Name.Pack != nil && !n.Name.Pack.Name.Used() && base.SyntaxErrors() == 0 { - unused = append(unused, importedPkg{n.Name.Pack.Pos, n.Name.Pack.Name.Pkg.Path, ""}) - n.Name.Pack.Name.SetUsed(true) + if n.Name() != nil && n.Name().Pack != nil && !n.Name().Pack.Name().Used() && base.SyntaxErrors() == 0 { + unused = append(unused, importedPkg{n.Name().Pack.Pos(), n.Name().Pack.Name().Pkg.Path, ""}) + n.Name().Pack.Name().SetUsed(true) } s.Def = nil continue @@ -980,7 +980,7 @@ func clearImports() { } func IsAlias(sym *types.Sym) bool { - return sym.Def != nil && ir.AsNode(sym.Def).Sym != sym + return sym.Def != nil && ir.AsNode(sym.Def).Sym() != sym } // recordFlags records the specified command-line flags to be placed diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index eeed3740f063c..98819fadde5aa 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -162,10 +162,10 @@ func (p *noder) funcBody(fn *ir.Node, block *syntax.BlockStmt) { if body == nil { body = []*ir.Node{ir.Nod(ir.OEMPTY, nil, nil)} } - fn.Nbody.Set(body) + fn.PtrBody().Set(body) base.Pos = p.makeXPos(block.Rbrace) - fn.Func.Endlineno = base.Pos + fn.Func().Endlineno = base.Pos } funcbody() @@ -176,9 +176,9 @@ func (p *noder) openScope(pos syntax.Pos) { types.Markdcl() if trackScopes { - Curfn.Func.Parents = append(Curfn.Func.Parents, p.scope) - p.scopeVars = append(p.scopeVars, len(Curfn.Func.Dcl)) - p.scope = ir.ScopeID(len(Curfn.Func.Parents)) + Curfn.Func().Parents = append(Curfn.Func().Parents, p.scope) + p.scopeVars = append(p.scopeVars, len(Curfn.Func().Dcl)) + p.scope = ir.ScopeID(len(Curfn.Func().Parents)) p.markScope(pos) } @@ -191,29 +191,29 @@ func (p *noder) closeScope(pos syntax.Pos) { if trackScopes { scopeVars := p.scopeVars[len(p.scopeVars)-1] p.scopeVars = p.scopeVars[:len(p.scopeVars)-1] - if scopeVars == len(Curfn.Func.Dcl) { + if scopeVars == len(Curfn.Func().Dcl) { // no variables were declared in this scope, so we can retract it. - if int(p.scope) != len(Curfn.Func.Parents) { + if int(p.scope) != len(Curfn.Func().Parents) { base.Fatalf("scope tracking inconsistency, no variables declared but scopes were not retracted") } - p.scope = Curfn.Func.Parents[p.scope-1] - Curfn.Func.Parents = Curfn.Func.Parents[:len(Curfn.Func.Parents)-1] + p.scope = Curfn.Func().Parents[p.scope-1] + Curfn.Func().Parents = Curfn.Func().Parents[:len(Curfn.Func().Parents)-1] - nmarks := len(Curfn.Func.Marks) - Curfn.Func.Marks[nmarks-1].Scope = p.scope + nmarks := len(Curfn.Func().Marks) + Curfn.Func().Marks[nmarks-1].Scope = p.scope prevScope := ir.ScopeID(0) if nmarks >= 2 { - prevScope = Curfn.Func.Marks[nmarks-2].Scope + prevScope = Curfn.Func().Marks[nmarks-2].Scope } - if Curfn.Func.Marks[nmarks-1].Scope == prevScope { - Curfn.Func.Marks = Curfn.Func.Marks[:nmarks-1] + if Curfn.Func().Marks[nmarks-1].Scope == prevScope { + Curfn.Func().Marks = Curfn.Func().Marks[:nmarks-1] } return } - p.scope = Curfn.Func.Parents[p.scope-1] + p.scope = Curfn.Func().Parents[p.scope-1] p.markScope(pos) } @@ -221,10 +221,10 @@ func (p *noder) closeScope(pos syntax.Pos) { func (p *noder) markScope(pos syntax.Pos) { xpos := p.makeXPos(pos) - if i := len(Curfn.Func.Marks); i > 0 && Curfn.Func.Marks[i-1].Pos == xpos { - Curfn.Func.Marks[i-1].Scope = p.scope + if i := len(Curfn.Func().Marks); i > 0 && Curfn.Func().Marks[i-1].Pos == xpos { + Curfn.Func().Marks[i-1].Scope = p.scope } else { - Curfn.Func.Marks = append(Curfn.Func.Marks, ir.Mark{Pos: xpos, Scope: p.scope}) + Curfn.Func().Marks = append(Curfn.Func().Marks, ir.Mark{Pos: xpos, Scope: p.scope}) } } @@ -357,24 +357,24 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) { } pack := p.nod(imp, ir.OPACK, nil, nil) - pack.Sym = my - pack.Name.Pkg = ipkg + pack.SetSym(my) + pack.Name().Pkg = ipkg switch my.Name { case ".": importdot(ipkg, pack) return case "init": - base.ErrorfAt(pack.Pos, "cannot import package as init - init must be a func") + base.ErrorfAt(pack.Pos(), "cannot import package as init - init must be a func") return case "_": return } if my.Def != nil { - redeclare(pack.Pos, my, "as imported package name") + redeclare(pack.Pos(), my, "as imported package name") } my.Def = ir.AsTypesNode(pack) - my.Lastlineno = pack.Pos + my.Lastlineno = pack.Pos() my.Block = 1 // at top level } @@ -452,14 +452,14 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []*ir.Node { } v := values[i] if decl.Values == nil { - v = treecopy(v, n.Pos) + v = treecopy(v, n.Pos()) } - n.Op = ir.OLITERAL + n.SetOp(ir.OLITERAL) declare(n, dclcontext) - n.Name.Param.Ntype = typ - n.Name.Defn = v + n.Name().Param.Ntype = typ + n.Name().Defn = v n.SetIota(cs.iota) nn = append(nn, p.nod(decl, ir.ODCLCONST, n, nil)) @@ -476,13 +476,13 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []*ir.Node { func (p *noder) typeDecl(decl *syntax.TypeDecl) *ir.Node { n := p.declName(decl.Name) - n.Op = ir.OTYPE + n.SetOp(ir.OTYPE) declare(n, dclcontext) // decl.Type may be nil but in that case we got a syntax error during parsing typ := p.typeExprOrNil(decl.Type) - param := n.Name.Param + param := n.Name().Param param.Ntype = typ param.SetAlias(decl.Alias) if pragma, ok := decl.Pragma.(*Pragma); ok { @@ -495,7 +495,7 @@ func (p *noder) typeDecl(decl *syntax.TypeDecl) *ir.Node { nod := p.nod(decl, ir.ODCLTYPE, n, nil) if param.Alias() && !langSupported(1, 9, ir.LocalPkg) { - base.ErrorfAt(nod.Pos, "type aliases only supported as of -lang=go1.9") + base.ErrorfAt(nod.Pos(), "type aliases only supported as of -lang=go1.9") } return nod } @@ -510,7 +510,7 @@ func (p *noder) declNames(names []*syntax.Name) []*ir.Node { func (p *noder) declName(name *syntax.Name) *ir.Node { n := dclname(p.name(name)) - n.Pos = p.pos(name) + n.SetPos(p.pos(name)) return n } @@ -522,43 +522,43 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) *ir.Node { if fun.Recv == nil { if name.Name == "init" { name = renameinit() - if t.List.Len() > 0 || t.Rlist.Len() > 0 { - base.ErrorfAt(f.Pos, "func init must have no arguments and no return values") + if t.List().Len() > 0 || t.Rlist().Len() > 0 { + base.ErrorfAt(f.Pos(), "func init must have no arguments and no return values") } } if ir.LocalPkg.Name == "main" && name.Name == "main" { - if t.List.Len() > 0 || t.Rlist.Len() > 0 { - base.ErrorfAt(f.Pos, "func main must have no arguments and no return values") + if t.List().Len() > 0 || t.Rlist().Len() > 0 { + base.ErrorfAt(f.Pos(), "func main must have no arguments and no return values") } } } else { - f.Func.Shortname = name - name = ir.BlankNode.Sym // filled in by typecheckfunc + f.Func().Shortname = name + name = ir.BlankNode.Sym() // filled in by typecheckfunc } - f.Func.Nname = newfuncnamel(p.pos(fun.Name), name, f.Func) - f.Func.Nname.Name.Defn = f - f.Func.Nname.Name.Param.Ntype = t + f.Func().Nname = newfuncnamel(p.pos(fun.Name), name, f.Func()) + f.Func().Nname.Name().Defn = f + f.Func().Nname.Name().Param.Ntype = t if pragma, ok := fun.Pragma.(*Pragma); ok { - f.Func.Pragma = pragma.Flag & FuncPragmas + f.Func().Pragma = pragma.Flag & FuncPragmas if pragma.Flag&ir.Systemstack != 0 && pragma.Flag&ir.Nosplit != 0 { - base.ErrorfAt(f.Pos, "go:nosplit and go:systemstack cannot be combined") + base.ErrorfAt(f.Pos(), "go:nosplit and go:systemstack cannot be combined") } pragma.Flag &^= FuncPragmas p.checkUnused(pragma) } if fun.Recv == nil { - declare(f.Func.Nname, ir.PFUNC) + declare(f.Func().Nname, ir.PFUNC) } p.funcBody(f, fun.Body) if fun.Body != nil { - if f.Func.Pragma&ir.Noescape != 0 { - base.ErrorfAt(f.Pos, "can only use //go:noescape with external func implementations") + if f.Func().Pragma&ir.Noescape != 0 { + base.ErrorfAt(f.Pos(), "can only use //go:noescape with external func implementations") } } else { if base.Flag.Complete || strings.HasPrefix(ir.FuncName(f), "init.") { @@ -572,7 +572,7 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) *ir.Node { } } if !isLinknamed { - base.ErrorfAt(f.Pos, "missing function body") + base.ErrorfAt(f.Pos(), "missing function body") } } } @@ -583,10 +583,10 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) *ir.Node { func (p *noder) signature(recv *syntax.Field, typ *syntax.FuncType) *ir.Node { n := p.nod(typ, ir.OTFUNC, nil, nil) if recv != nil { - n.Left = p.param(recv, false, false) + n.SetLeft(p.param(recv, false, false)) } - n.List.Set(p.params(typ.ParamList, true)) - n.Rlist.Set(p.params(typ.ResultList, false)) + n.PtrList().Set(p.params(typ.ParamList, true)) + n.PtrRlist().Set(p.params(typ.ResultList, false)) return n } @@ -609,7 +609,7 @@ func (p *noder) param(param *syntax.Field, dddOk, final bool) *ir.Node { n := p.nodSym(param, ir.ODCLFIELD, typ, name) // rewrite ...T parameter - if typ.Op == ir.ODDD { + if typ.Op() == ir.ODDD { if !dddOk { // We mark these as syntax errors to get automatic elimination // of multiple such errors per line (see ErrorfAt in subr.go). @@ -621,12 +621,12 @@ func (p *noder) param(param *syntax.Field, dddOk, final bool) *ir.Node { p.errorAt(param.Name.Pos(), "syntax error: cannot use ... with non-final parameter %s", param.Name.Value) } } - typ.Op = ir.OTARRAY - typ.Right = typ.Left - typ.Left = nil + typ.SetOp(ir.OTARRAY) + typ.SetRight(typ.Left()) + typ.SetLeft(nil) n.SetIsDDD(true) - if n.Left != nil { - n.Left.SetIsDDD(true) + if n.Left() != nil { + n.Left().SetIsDDD(true) } } @@ -658,20 +658,20 @@ func (p *noder) expr(expr syntax.Expr) *ir.Node { case *syntax.BasicLit: n := ir.NewLiteral(p.basicLit(expr)) if expr.Kind == syntax.RuneLit { - n.Type = types.UntypedRune + n.SetType(types.UntypedRune) } n.SetDiag(expr.Bad) // avoid follow-on errors if there was a syntax error return n case *syntax.CompositeLit: n := p.nod(expr, ir.OCOMPLIT, nil, nil) if expr.Type != nil { - n.Right = p.expr(expr.Type) + n.SetRight(p.expr(expr.Type)) } l := p.exprs(expr.ElemList) for i, e := range l { l[i] = p.wrapname(expr.ElemList[i], e) } - n.List.Set(l) + n.PtrList().Set(l) base.Pos = p.makeXPos(expr.Rbrace) return n case *syntax.KeyValueExpr: @@ -684,12 +684,12 @@ func (p *noder) expr(expr syntax.Expr) *ir.Node { case *syntax.SelectorExpr: // parser.new_dotname obj := p.expr(expr.X) - if obj.Op == ir.OPACK { - obj.Name.SetUsed(true) - return importName(obj.Name.Pkg.Lookup(expr.Sel.Value)) + if obj.Op() == ir.OPACK { + obj.Name().SetUsed(true) + return importName(obj.Name().Pkg.Lookup(expr.Sel.Value)) } n := nodSym(ir.OXDOT, obj, p.name(expr.Sel)) - n.Pos = p.pos(expr) // lineno may have been changed by p.expr(expr.X) + n.SetPos(p.pos(expr)) // lineno may have been changed by p.expr(expr.X) return n case *syntax.IndexExpr: return p.nod(expr, ir.OINDEX, p.expr(expr.X), p.expr(expr.Index)) @@ -720,7 +720,7 @@ func (p *noder) expr(expr syntax.Expr) *ir.Node { return p.nod(expr, p.binOp(expr.Op), x, p.expr(expr.Y)) case *syntax.CallExpr: n := p.nod(expr, ir.OCALL, p.expr(expr.Fun), nil) - n.List.Set(p.exprs(expr.ArgList)) + n.PtrList().Set(p.exprs(expr.ArgList)) n.SetIsDDD(expr.HasDots) return n @@ -752,9 +752,9 @@ func (p *noder) expr(expr syntax.Expr) *ir.Node { case *syntax.TypeSwitchGuard: n := p.nod(expr, ir.OTYPESW, nil, p.expr(expr.X)) if expr.Lhs != nil { - n.Left = p.declName(expr.Lhs) - if ir.IsBlank(n.Left) { - base.Errorf("invalid variable name %v in type switch", n.Left) + n.SetLeft(p.declName(expr.Lhs)) + if ir.IsBlank(n.Left()) { + base.Errorf("invalid variable name %v in type switch", n.Left()) } } return n @@ -804,7 +804,7 @@ func (p *noder) sum(x syntax.Expr) *ir.Node { chunks := make([]string, 0, 1) n := p.expr(x) - if ir.IsConst(n, constant.String) && n.Sym == nil { + if ir.IsConst(n, constant.String) && n.Sym() == nil { nstr = n chunks = append(chunks, nstr.StringVal()) } @@ -813,7 +813,7 @@ func (p *noder) sum(x syntax.Expr) *ir.Node { add := adds[i] r := p.expr(add.Y) - if ir.IsConst(r, constant.String) && r.Sym == nil { + if ir.IsConst(r, constant.String) && r.Sym() == nil { if nstr != nil { // Collapse r into nstr instead of adding to n. chunks = append(chunks, r.StringVal()) @@ -880,7 +880,7 @@ func (p *noder) structType(expr *syntax.StructType) *ir.Node { p.setlineno(expr) n := p.nod(expr, ir.OTSTRUCT, nil, nil) - n.List.Set(l) + n.PtrList().Set(l) return n } @@ -894,7 +894,7 @@ func (p *noder) interfaceType(expr *syntax.InterfaceType) *ir.Node { } else { mname := p.name(method.Name) sig := p.typeExpr(method.Type) - sig.Left = fakeRecv() + sig.SetLeft(fakeRecv()) n = p.nodSym(method, ir.ODCLFIELD, sig, mname) ifacedcl(n) } @@ -902,7 +902,7 @@ func (p *noder) interfaceType(expr *syntax.InterfaceType) *ir.Node { } n := p.nod(expr, ir.OTINTER, nil, nil) - n.List.Set(l) + n.PtrList().Set(l) return n } @@ -910,8 +910,8 @@ func (p *noder) packname(expr syntax.Expr) *types.Sym { switch expr := expr.(type) { case *syntax.Name: name := p.name(expr) - if n := oldname(name); n.Name != nil && n.Name.Pack != nil { - n.Name.Pack.Name.SetUsed(true) + if n := oldname(name); n.Name() != nil && n.Name().Pack != nil { + n.Name().Pack.Name().SetUsed(true) } return name case *syntax.SelectorExpr: @@ -922,12 +922,12 @@ func (p *noder) packname(expr syntax.Expr) *types.Sym { return name } var pkg *types.Pkg - if def.Op != ir.OPACK { + if def.Op() != ir.OPACK { base.Errorf("%v is not a package", name) pkg = ir.LocalPkg } else { - def.Name.SetUsed(true) - pkg = def.Name.Pkg + def.Name().SetUsed(true) + pkg = def.Name().Pkg } return pkg.Lookup(expr.Sel.Value) } @@ -948,7 +948,7 @@ func (p *noder) embedded(typ syntax.Expr) *ir.Node { n.SetEmbedded(true) if isStar { - n.Left = p.nod(op, ir.ODEREF, n.Left, nil) + n.SetLeft(p.nod(op, ir.ODEREF, n.Left(), nil)) } return n } @@ -962,8 +962,8 @@ func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []*ir.Node { for i, stmt := range stmts { s := p.stmtFall(stmt, fallOK && i+1 == len(stmts)) if s == nil { - } else if s.Op == ir.OBLOCK && s.Ninit.Len() == 0 { - nodes = append(nodes, s.List.Slice()...) + } else if s.Op() == ir.OBLOCK && s.Init().Len() == 0 { + nodes = append(nodes, s.List().Slice()...) } else { nodes = append(nodes, s) } @@ -1010,12 +1010,12 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) *ir.Node { if len(lhs) == 1 && len(rhs) == 1 { // common case - n.Left = lhs[0] - n.Right = rhs[0] + n.SetLeft(lhs[0]) + n.SetRight(rhs[0]) } else { - n.Op = ir.OAS2 - n.List.Set(lhs) - n.Rlist.Set(rhs) + n.SetOp(ir.OAS2) + n.PtrList().Set(lhs) + n.PtrRlist().Set(rhs) } return n @@ -1038,7 +1038,7 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) *ir.Node { } n := p.nod(stmt, op, nil, nil) if stmt.Label != nil { - n.Sym = p.name(stmt.Label) + n.SetSym(p.name(stmt.Label)) } return n case *syntax.CallStmt: @@ -1058,17 +1058,17 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) *ir.Node { results = p.exprList(stmt.Results) } n := p.nod(stmt, ir.ORETURN, nil, nil) - n.List.Set(results) - if n.List.Len() == 0 && Curfn != nil { - for _, ln := range Curfn.Func.Dcl { + n.PtrList().Set(results) + if n.List().Len() == 0 && Curfn != nil { + for _, ln := range Curfn.Func().Dcl { if ln.Class() == ir.PPARAM { continue } if ln.Class() != ir.PPARAMOUT { break } - if ir.AsNode(ln.Sym.Def) != ln { - base.Errorf("%s is shadowed during return", ln.Sym.Name) + if ir.AsNode(ln.Sym().Def) != ln { + base.Errorf("%s is shadowed during return", ln.Sym().Name) } } } @@ -1134,13 +1134,13 @@ func (p *noder) assignList(expr syntax.Expr, defn *ir.Node, colas bool) []*ir.No newOrErr = true n := NewName(sym) declare(n, dclcontext) - n.Name.Defn = defn - defn.Ninit.Append(ir.Nod(ir.ODCL, n, nil)) + n.Name().Defn = defn + defn.PtrInit().Append(ir.Nod(ir.ODCL, n, nil)) res[i] = n } if !newOrErr { - base.ErrorfAt(defn.Pos, "no new variables on left side of :=") + base.ErrorfAt(defn.Pos(), "no new variables on left side of :=") } return res } @@ -1156,18 +1156,18 @@ func (p *noder) ifStmt(stmt *syntax.IfStmt) *ir.Node { p.openScope(stmt.Pos()) n := p.nod(stmt, ir.OIF, nil, nil) if stmt.Init != nil { - n.Ninit.Set1(p.stmt(stmt.Init)) + n.PtrInit().Set1(p.stmt(stmt.Init)) } if stmt.Cond != nil { - n.Left = p.expr(stmt.Cond) + n.SetLeft(p.expr(stmt.Cond)) } - n.Nbody.Set(p.blockStmt(stmt.Then)) + n.PtrBody().Set(p.blockStmt(stmt.Then)) if stmt.Else != nil { e := p.stmt(stmt.Else) - if e.Op == ir.OBLOCK && e.Ninit.Len() == 0 { - n.Rlist.Set(e.List.Slice()) + if e.Op() == ir.OBLOCK && e.Init().Len() == 0 { + n.PtrRlist().Set(e.List().Slice()) } else { - n.Rlist.Set1(e) + n.PtrRlist().Set1(e) } } p.closeAnotherScope() @@ -1184,21 +1184,21 @@ func (p *noder) forStmt(stmt *syntax.ForStmt) *ir.Node { n = p.nod(r, ir.ORANGE, nil, p.expr(r.X)) if r.Lhs != nil { - n.List.Set(p.assignList(r.Lhs, n, r.Def)) + n.PtrList().Set(p.assignList(r.Lhs, n, r.Def)) } } else { n = p.nod(stmt, ir.OFOR, nil, nil) if stmt.Init != nil { - n.Ninit.Set1(p.stmt(stmt.Init)) + n.PtrInit().Set1(p.stmt(stmt.Init)) } if stmt.Cond != nil { - n.Left = p.expr(stmt.Cond) + n.SetLeft(p.expr(stmt.Cond)) } if stmt.Post != nil { - n.Right = p.stmt(stmt.Post) + n.SetRight(p.stmt(stmt.Post)) } } - n.Nbody.Set(p.blockStmt(stmt.Body)) + n.PtrBody().Set(p.blockStmt(stmt.Body)) p.closeAnotherScope() return n } @@ -1207,17 +1207,17 @@ func (p *noder) switchStmt(stmt *syntax.SwitchStmt) *ir.Node { p.openScope(stmt.Pos()) n := p.nod(stmt, ir.OSWITCH, nil, nil) if stmt.Init != nil { - n.Ninit.Set1(p.stmt(stmt.Init)) + n.PtrInit().Set1(p.stmt(stmt.Init)) } if stmt.Tag != nil { - n.Left = p.expr(stmt.Tag) + n.SetLeft(p.expr(stmt.Tag)) } - tswitch := n.Left - if tswitch != nil && tswitch.Op != ir.OTYPESW { + tswitch := n.Left() + if tswitch != nil && tswitch.Op() != ir.OTYPESW { tswitch = nil } - n.List.Set(p.caseClauses(stmt.Body, tswitch, stmt.Rbrace)) + n.PtrList().Set(p.caseClauses(stmt.Body, tswitch, stmt.Rbrace)) p.closeScope(stmt.Rbrace) return n @@ -1234,14 +1234,14 @@ func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.Node, rbra n := p.nod(clause, ir.OCASE, nil, nil) if clause.Cases != nil { - n.List.Set(p.exprList(clause.Cases)) + n.PtrList().Set(p.exprList(clause.Cases)) } - if tswitch != nil && tswitch.Left != nil { - nn := NewName(tswitch.Left.Sym) + if tswitch != nil && tswitch.Left() != nil { + nn := NewName(tswitch.Left().Sym()) declare(nn, dclcontext) - n.Rlist.Set1(nn) + n.PtrRlist().Set1(nn) // keep track of the instances for reporting unused - nn.Name.Defn = tswitch + nn.Name().Defn = tswitch } // Trim trailing empty statements. We omit them from @@ -1255,8 +1255,8 @@ func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.Node, rbra body = body[:len(body)-1] } - n.Nbody.Set(p.stmtsFall(body, true)) - if l := n.Nbody.Len(); l > 0 && n.Nbody.Index(l-1).Op == ir.OFALL { + n.PtrBody().Set(p.stmtsFall(body, true)) + if l := n.Body().Len(); l > 0 && n.Body().Index(l-1).Op() == ir.OFALL { if tswitch != nil { base.Errorf("cannot fallthrough in type switch") } @@ -1275,7 +1275,7 @@ func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.Node, rbra func (p *noder) selectStmt(stmt *syntax.SelectStmt) *ir.Node { n := p.nod(stmt, ir.OSELECT, nil, nil) - n.List.Set(p.commClauses(stmt.Body, stmt.Rbrace)) + n.PtrList().Set(p.commClauses(stmt.Body, stmt.Rbrace)) return n } @@ -1290,9 +1290,9 @@ func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []* n := p.nod(clause, ir.OCASE, nil, nil) if clause.Comm != nil { - n.List.Set1(p.stmt(clause.Comm)) + n.PtrList().Set1(p.stmt(clause.Comm)) } - n.Nbody.Set(p.stmts(clause.Body)) + n.PtrBody().Set(p.stmts(clause.Body)) nodes = append(nodes, n) } if len(clauses) > 0 { @@ -1309,11 +1309,11 @@ func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) *ir.Node { ls = p.stmtFall(label.Stmt, fallOK) } - lhs.Name.Defn = ls + lhs.Name().Defn = ls l := []*ir.Node{lhs} if ls != nil { - if ls.Op == ir.OBLOCK && ls.Ninit.Len() == 0 { - l = append(l, ls.List.Slice()...) + if ls.Op() == ir.OBLOCK && ls.Init().Len() == 0 { + l = append(l, ls.List().Slice()...) } else { l = append(l, ls) } @@ -1451,9 +1451,9 @@ func (p *noder) mkname(name *syntax.Name) *ir.Node { func (p *noder) wrapname(n syntax.Node, x *ir.Node) *ir.Node { // These nodes do not carry line numbers. // Introduce a wrapper node to give them the correct line. - switch x.Op { + switch x.Op() { case ir.OTYPE, ir.OLITERAL: - if x.Sym == nil { + if x.Sym() == nil { break } fallthrough @@ -1470,7 +1470,7 @@ func (p *noder) nod(orig syntax.Node, op ir.Op, left, right *ir.Node) *ir.Node { func (p *noder) nodSym(orig syntax.Node, op ir.Op, left *ir.Node, sym *types.Sym) *ir.Node { n := nodSym(op, left, sym) - n.Pos = p.pos(orig) + n.SetPos(p.pos(orig)) return n } @@ -1670,8 +1670,8 @@ func safeArg(name string) bool { func mkname(sym *types.Sym) *ir.Node { n := oldname(sym) - if n.Name != nil && n.Name.Pack != nil { - n.Name.Pack.Name.SetUsed(true) + if n.Name() != nil && n.Name().Pack != nil { + n.Name().Pack.Name().SetUsed(true) } return n } diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 2961dbf636a4f..9f0cefbd1cb9a 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -142,7 +142,7 @@ func dumpdata() { for { for i := xtops; i < len(xtop); i++ { n := xtop[i] - if n.Op == ir.ODCLFUNC { + if n.Op() == ir.ODCLFUNC { funccompile(n) } } @@ -204,12 +204,12 @@ func addptabs() { return } for _, exportn := range exportlist { - s := exportn.Sym + s := exportn.Sym() n := ir.AsNode(s.Def) if n == nil { continue } - if n.Op != ir.ONAME { + if n.Op() != ir.ONAME { continue } if !types.IsExported(s.Name) { @@ -218,37 +218,37 @@ func addptabs() { if s.Pkg.Name != "main" { continue } - if n.Type.Etype == types.TFUNC && n.Class() == ir.PFUNC { + if n.Type().Etype == types.TFUNC && n.Class() == ir.PFUNC { // function - ptabs = append(ptabs, ptabEntry{s: s, t: ir.AsNode(s.Def).Type}) + ptabs = append(ptabs, ptabEntry{s: s, t: ir.AsNode(s.Def).Type()}) } else { // variable - ptabs = append(ptabs, ptabEntry{s: s, t: types.NewPtr(ir.AsNode(s.Def).Type)}) + ptabs = append(ptabs, ptabEntry{s: s, t: types.NewPtr(ir.AsNode(s.Def).Type())}) } } } func dumpGlobal(n *ir.Node) { - if n.Type == nil { + if n.Type() == nil { base.Fatalf("external %v nil type\n", n) } if n.Class() == ir.PFUNC { return } - if n.Sym.Pkg != ir.LocalPkg { + if n.Sym().Pkg != ir.LocalPkg { return } - dowidth(n.Type) + dowidth(n.Type()) ggloblnod(n) } func dumpGlobalConst(n *ir.Node) { // only export typed constants - t := n.Type + t := n.Type() if t == nil { return } - if n.Sym.Pkg != ir.LocalPkg { + if n.Sym().Pkg != ir.LocalPkg { return } // only export integer constants for now @@ -263,13 +263,13 @@ func dumpGlobalConst(n *ir.Node) { return } } - base.Ctxt.DwarfIntConst(base.Ctxt.Pkgpath, n.Sym.Name, typesymname(t), ir.Int64Val(t, v)) + base.Ctxt.DwarfIntConst(base.Ctxt.Pkgpath, n.Sym().Name, typesymname(t), ir.Int64Val(t, v)) } func dumpglobls() { // add globals for _, n := range externdcl { - switch n.Op { + switch n.Op() { case ir.ONAME: dumpGlobal(n) case ir.OLITERAL: @@ -414,7 +414,7 @@ func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj. if readonly { sym = stringsym(pos, string(data)) } else { - sym = slicedata(pos, string(data)).Sym.Linksym() + sym = slicedata(pos, string(data)).Sym().Linksym() } if len(hash) > 0 { sum := sha256.Sum256(data) @@ -462,7 +462,7 @@ func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj. } else { // Emit a zero-length data symbol // and then fix up length and content to use file. - symdata = slicedata(pos, "").Sym.Linksym() + symdata = slicedata(pos, "").Sym().Linksym() symdata.Size = size symdata.Type = objabi.SNOPTRDATA info := symdata.NewFileInfo() @@ -490,10 +490,10 @@ func slicedata(pos src.XPos, s string) *ir.Node { } func slicebytes(nam *ir.Node, s string) { - if nam.Op != ir.ONAME { + if nam.Op() != ir.ONAME { base.Fatalf("slicebytes %v", nam) } - slicesym(nam, slicedata(nam.Pos, s), int64(len(s))) + slicesym(nam, slicedata(nam.Pos(), s), int64(len(s))) } func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int { @@ -531,12 +531,12 @@ func dsymptrWeakOff(s *obj.LSym, off int, x *obj.LSym) int { // slicesym writes a static slice symbol {&arr, lencap, lencap} to n. // arr must be an ONAME. slicesym does not modify n. func slicesym(n, arr *ir.Node, lencap int64) { - s := n.Sym.Linksym() - off := n.Xoffset - if arr.Op != ir.ONAME { + s := n.Sym().Linksym() + off := n.Offset() + if arr.Op() != ir.ONAME { base.Fatalf("slicesym non-name arr %v", arr) } - s.WriteAddr(base.Ctxt, off, Widthptr, arr.Sym.Linksym(), arr.Xoffset) + s.WriteAddr(base.Ctxt, off, Widthptr, arr.Sym().Linksym(), arr.Offset()) s.WriteInt(base.Ctxt, off+sliceLenOffset, Widthptr, lencap) s.WriteInt(base.Ctxt, off+sliceCapOffset, Widthptr, lencap) } @@ -544,88 +544,88 @@ func slicesym(n, arr *ir.Node, lencap int64) { // addrsym writes the static address of a to n. a must be an ONAME. // Neither n nor a is modified. func addrsym(n, a *ir.Node) { - if n.Op != ir.ONAME { - base.Fatalf("addrsym n op %v", n.Op) + if n.Op() != ir.ONAME { + base.Fatalf("addrsym n op %v", n.Op()) } - if n.Sym == nil { + if n.Sym() == nil { base.Fatalf("addrsym nil n sym") } - if a.Op != ir.ONAME { - base.Fatalf("addrsym a op %v", a.Op) + if a.Op() != ir.ONAME { + base.Fatalf("addrsym a op %v", a.Op()) } - s := n.Sym.Linksym() - s.WriteAddr(base.Ctxt, n.Xoffset, Widthptr, a.Sym.Linksym(), a.Xoffset) + s := n.Sym().Linksym() + s.WriteAddr(base.Ctxt, n.Offset(), Widthptr, a.Sym().Linksym(), a.Offset()) } // pfuncsym writes the static address of f to n. f must be a global function. // Neither n nor f is modified. func pfuncsym(n, f *ir.Node) { - if n.Op != ir.ONAME { - base.Fatalf("pfuncsym n op %v", n.Op) + if n.Op() != ir.ONAME { + base.Fatalf("pfuncsym n op %v", n.Op()) } - if n.Sym == nil { + if n.Sym() == nil { base.Fatalf("pfuncsym nil n sym") } if f.Class() != ir.PFUNC { base.Fatalf("pfuncsym class not PFUNC %d", f.Class()) } - s := n.Sym.Linksym() - s.WriteAddr(base.Ctxt, n.Xoffset, Widthptr, funcsym(f.Sym).Linksym(), f.Xoffset) + s := n.Sym().Linksym() + s.WriteAddr(base.Ctxt, n.Offset(), Widthptr, funcsym(f.Sym()).Linksym(), f.Offset()) } // litsym writes the static literal c to n. // Neither n nor c is modified. func litsym(n, c *ir.Node, wid int) { - if n.Op != ir.ONAME { - base.Fatalf("litsym n op %v", n.Op) + if n.Op() != ir.ONAME { + base.Fatalf("litsym n op %v", n.Op()) } - if n.Sym == nil { + if n.Sym() == nil { base.Fatalf("litsym nil n sym") } - if !types.Identical(n.Type, c.Type) { - base.Fatalf("litsym: type mismatch: %v has type %v, but %v has type %v", n, n.Type, c, c.Type) + if !types.Identical(n.Type(), c.Type()) { + base.Fatalf("litsym: type mismatch: %v has type %v, but %v has type %v", n, n.Type(), c, c.Type()) } - if c.Op == ir.ONIL { + if c.Op() == ir.ONIL { return } - if c.Op != ir.OLITERAL { - base.Fatalf("litsym c op %v", c.Op) + if c.Op() != ir.OLITERAL { + base.Fatalf("litsym c op %v", c.Op()) } - s := n.Sym.Linksym() + s := n.Sym().Linksym() switch u := c.Val(); u.Kind() { case constant.Bool: i := int64(obj.Bool2int(constant.BoolVal(u))) - s.WriteInt(base.Ctxt, n.Xoffset, wid, i) + s.WriteInt(base.Ctxt, n.Offset(), wid, i) case constant.Int: - s.WriteInt(base.Ctxt, n.Xoffset, wid, ir.Int64Val(n.Type, u)) + s.WriteInt(base.Ctxt, n.Offset(), wid, ir.Int64Val(n.Type(), u)) case constant.Float: f, _ := constant.Float64Val(u) - switch n.Type.Etype { + switch n.Type().Etype { case types.TFLOAT32: - s.WriteFloat32(base.Ctxt, n.Xoffset, float32(f)) + s.WriteFloat32(base.Ctxt, n.Offset(), float32(f)) case types.TFLOAT64: - s.WriteFloat64(base.Ctxt, n.Xoffset, f) + s.WriteFloat64(base.Ctxt, n.Offset(), f) } case constant.Complex: re, _ := constant.Float64Val(constant.Real(u)) im, _ := constant.Float64Val(constant.Imag(u)) - switch n.Type.Etype { + switch n.Type().Etype { case types.TCOMPLEX64: - s.WriteFloat32(base.Ctxt, n.Xoffset, float32(re)) - s.WriteFloat32(base.Ctxt, n.Xoffset+4, float32(im)) + s.WriteFloat32(base.Ctxt, n.Offset(), float32(re)) + s.WriteFloat32(base.Ctxt, n.Offset()+4, float32(im)) case types.TCOMPLEX128: - s.WriteFloat64(base.Ctxt, n.Xoffset, re) - s.WriteFloat64(base.Ctxt, n.Xoffset+8, im) + s.WriteFloat64(base.Ctxt, n.Offset(), re) + s.WriteFloat64(base.Ctxt, n.Offset()+8, im) } case constant.String: i := constant.StringVal(u) - symdata := stringsym(n.Pos, i) - s.WriteAddr(base.Ctxt, n.Xoffset, Widthptr, symdata, 0) - s.WriteInt(base.Ctxt, n.Xoffset+int64(Widthptr), Widthptr, int64(len(i))) + symdata := stringsym(n.Pos(), i) + s.WriteAddr(base.Ctxt, n.Offset(), Widthptr, symdata, 0) + s.WriteInt(base.Ctxt, n.Offset()+int64(Widthptr), Widthptr, int64(len(i))) default: base.Fatalf("litsym unhandled OLITERAL %v", c) diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 3bd49e8094efc..36a4095640603 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -53,11 +53,11 @@ type Order struct { // described in the comment at the top of the file. func order(fn *ir.Node) { if base.Flag.W > 1 { - s := fmt.Sprintf("\nbefore order %v", fn.Func.Nname.Sym) - ir.DumpList(s, fn.Nbody) + s := fmt.Sprintf("\nbefore order %v", fn.Func().Nname.Sym()) + ir.DumpList(s, fn.Body()) } - orderBlock(&fn.Nbody, map[string][]*ir.Node{}) + orderBlock(fn.PtrBody(), map[string][]*ir.Node{}) } // newTemp allocates a new temporary with the given type, @@ -70,7 +70,7 @@ func (o *Order) newTemp(t *types.Type, clear bool) *ir.Node { key := t.LongString() a := o.free[key] for i, n := range a { - if types.Identical(t, n.Type) { + if types.Identical(t, n.Type()) { v = a[i] a[i] = a[len(a)-1] a = a[:len(a)-1] @@ -120,20 +120,20 @@ func (o *Order) cheapExpr(n *ir.Node) *ir.Node { return nil } - switch n.Op { + switch n.Op() { case ir.ONAME, ir.OLITERAL, ir.ONIL: return n case ir.OLEN, ir.OCAP: - l := o.cheapExpr(n.Left) - if l == n.Left { + l := o.cheapExpr(n.Left()) + if l == n.Left() { return n } a := ir.SepCopy(n) - a.Left = l + a.SetLeft(l) return typecheck(a, ctxExpr) } - return o.copyExpr(n, n.Type, false) + return o.copyExpr(n, n.Type(), false) } // safeExpr returns a safe version of n. @@ -144,46 +144,46 @@ func (o *Order) cheapExpr(n *ir.Node) *ir.Node { // // The intended use is to apply to x when rewriting x += y into x = x + y. func (o *Order) safeExpr(n *ir.Node) *ir.Node { - switch n.Op { + switch n.Op() { case ir.ONAME, ir.OLITERAL, ir.ONIL: return n case ir.ODOT, ir.OLEN, ir.OCAP: - l := o.safeExpr(n.Left) - if l == n.Left { + l := o.safeExpr(n.Left()) + if l == n.Left() { return n } a := ir.SepCopy(n) - a.Left = l + a.SetLeft(l) return typecheck(a, ctxExpr) case ir.ODOTPTR, ir.ODEREF: - l := o.cheapExpr(n.Left) - if l == n.Left { + l := o.cheapExpr(n.Left()) + if l == n.Left() { return n } a := ir.SepCopy(n) - a.Left = l + a.SetLeft(l) return typecheck(a, ctxExpr) case ir.OINDEX, ir.OINDEXMAP: var l *ir.Node - if n.Left.Type.IsArray() { - l = o.safeExpr(n.Left) + if n.Left().Type().IsArray() { + l = o.safeExpr(n.Left()) } else { - l = o.cheapExpr(n.Left) + l = o.cheapExpr(n.Left()) } - r := o.cheapExpr(n.Right) - if l == n.Left && r == n.Right { + r := o.cheapExpr(n.Right()) + if l == n.Left() && r == n.Right() { return n } a := ir.SepCopy(n) - a.Left = l - a.Right = r + a.SetLeft(l) + a.SetRight(r) return typecheck(a, ctxExpr) default: - base.Fatalf("order.safeExpr %v", n.Op) + base.Fatalf("order.safeExpr %v", n.Op()) return nil // not reached } } @@ -195,7 +195,7 @@ func (o *Order) safeExpr(n *ir.Node) *ir.Node { // because we emit explicit VARKILL instructions marking the end of those // temporaries' lifetimes. func isaddrokay(n *ir.Node) bool { - return islvalue(n) && (n.Op != ir.ONAME || n.Class() == ir.PEXTERN || ir.IsAutoTmp(n)) + return islvalue(n) && (n.Op() != ir.ONAME || n.Class() == ir.PEXTERN || ir.IsAutoTmp(n)) } // addrTemp ensures that n is okay to pass by address to runtime routines. @@ -204,11 +204,11 @@ func isaddrokay(n *ir.Node) bool { // The result of addrTemp MUST be assigned back to n, e.g. // n.Left = o.addrTemp(n.Left) func (o *Order) addrTemp(n *ir.Node) *ir.Node { - if n.Op == ir.OLITERAL || n.Op == ir.ONIL { + if n.Op() == ir.OLITERAL || n.Op() == ir.ONIL { // TODO: expand this to all static composite literal nodes? n = defaultlit(n, nil) - dowidth(n.Type) - vstat := readonlystaticname(n.Type) + dowidth(n.Type()) + vstat := readonlystaticname(n.Type()) var s InitSchedule s.staticassign(vstat, n) if s.out != nil { @@ -220,7 +220,7 @@ func (o *Order) addrTemp(n *ir.Node) *ir.Node { if isaddrokay(n) { return n } - return o.copyExpr(n, n.Type, false) + return o.copyExpr(n, n.Type(), false) } // mapKeyTemp prepares n to be a key in a map runtime call and returns n. @@ -250,20 +250,20 @@ func (o *Order) mapKeyTemp(t *types.Type, n *ir.Node) *ir.Node { // comes up in important cases in practice. See issue 3512. func mapKeyReplaceStrConv(n *ir.Node) bool { var replaced bool - switch n.Op { + switch n.Op() { case ir.OBYTES2STR: - n.Op = ir.OBYTES2STRTMP + n.SetOp(ir.OBYTES2STRTMP) replaced = true case ir.OSTRUCTLIT: - for _, elem := range n.List.Slice() { - if mapKeyReplaceStrConv(elem.Left) { + for _, elem := range n.List().Slice() { + if mapKeyReplaceStrConv(elem.Left()) { replaced = true } } case ir.OARRAYLIT: - for _, elem := range n.List.Slice() { - if elem.Op == ir.OKEY { - elem = elem.Right + for _, elem := range n.List().Slice() { + if elem.Op() == ir.OKEY { + elem = elem.Right() } if mapKeyReplaceStrConv(elem) { replaced = true @@ -284,7 +284,7 @@ func (o *Order) markTemp() ordermarker { // which must have been returned by markTemp. func (o *Order) popTemp(mark ordermarker) { for _, n := range o.temp[mark:] { - key := n.Type.LongString() + key := n.Type().LongString() o.free[key] = append(o.free[key], n) } o.temp = o.temp[:mark] @@ -336,46 +336,46 @@ func orderMakeSliceCopy(s []*ir.Node) { asn := s[0] copyn := s[1] - if asn == nil || asn.Op != ir.OAS { + if asn == nil || asn.Op() != ir.OAS { return } - if asn.Left.Op != ir.ONAME { + if asn.Left().Op() != ir.ONAME { return } - if ir.IsBlank(asn.Left) { + if ir.IsBlank(asn.Left()) { return } - maken := asn.Right - if maken == nil || maken.Op != ir.OMAKESLICE { + maken := asn.Right() + if maken == nil || maken.Op() != ir.OMAKESLICE { return } - if maken.Esc == EscNone { + if maken.Esc() == EscNone { return } - if maken.Left == nil || maken.Right != nil { + if maken.Left() == nil || maken.Right() != nil { return } - if copyn.Op != ir.OCOPY { + if copyn.Op() != ir.OCOPY { return } - if copyn.Left.Op != ir.ONAME { + if copyn.Left().Op() != ir.ONAME { return } - if asn.Left.Sym != copyn.Left.Sym { + if asn.Left().Sym() != copyn.Left().Sym() { return } - if copyn.Right.Op != ir.ONAME { + if copyn.Right().Op() != ir.ONAME { return } - if copyn.Left.Sym == copyn.Right.Sym { + if copyn.Left().Sym() == copyn.Right().Sym() { return } - maken.Op = ir.OMAKESLICECOPY - maken.Right = copyn.Right + maken.SetOp(ir.OMAKESLICECOPY) + maken.SetRight(copyn.Right()) // Set bounded when m = OMAKESLICE([]T, len(s)); OCOPY(m, s) - maken.SetBounded(maken.Left.Op == ir.OLEN && samesafeexpr(maken.Left.Left, copyn.Right)) + maken.SetBounded(maken.Left().Op() == ir.OLEN && samesafeexpr(maken.Left().Left(), copyn.Right())) maken = typecheck(maken, ctxExpr) @@ -393,7 +393,7 @@ func (o *Order) edge() { // Create a new uint8 counter to be allocated in section // __libfuzzer_extra_counters. counter := staticname(types.Types[types.TUINT8]) - counter.Name.SetLibfuzzerExtraCounter(true) + counter.Name().SetLibfuzzerExtraCounter(true) // counter += 1 incr := ir.Nod(ir.OASOP, counter, nodintconst(1)) @@ -451,36 +451,36 @@ func (o *Order) init(n *ir.Node) { if ir.MayBeShared(n) { // For concurrency safety, don't mutate potentially shared nodes. // First, ensure that no work is required here. - if n.Ninit.Len() > 0 { + if n.Init().Len() > 0 { base.Fatalf("order.init shared node with ninit") } return } - o.stmtList(n.Ninit) - n.Ninit.Set(nil) + o.stmtList(n.Init()) + n.PtrInit().Set(nil) } // call orders the call expression n. // n.Op is OCALLMETH/OCALLFUNC/OCALLINTER or a builtin like OCOPY. func (o *Order) call(n *ir.Node) { - if n.Ninit.Len() > 0 { + if n.Init().Len() > 0 { // Caller should have already called o.init(n). - base.Fatalf("%v with unexpected ninit", n.Op) + base.Fatalf("%v with unexpected ninit", n.Op()) } // Builtin functions. - if n.Op != ir.OCALLFUNC && n.Op != ir.OCALLMETH && n.Op != ir.OCALLINTER { - n.Left = o.expr(n.Left, nil) - n.Right = o.expr(n.Right, nil) - o.exprList(n.List) + if n.Op() != ir.OCALLFUNC && n.Op() != ir.OCALLMETH && n.Op() != ir.OCALLINTER { + n.SetLeft(o.expr(n.Left(), nil)) + n.SetRight(o.expr(n.Right(), nil)) + o.exprList(n.List()) return } fixVariadicCall(n) - n.Left = o.expr(n.Left, nil) - o.exprList(n.List) + n.SetLeft(o.expr(n.Left(), nil)) + o.exprList(n.List()) - if n.Op == ir.OCALLINTER { + if n.Op() == ir.OCALLINTER { return } keepAlive := func(arg *ir.Node) { @@ -488,19 +488,19 @@ func (o *Order) call(n *ir.Node) { // arrange for the pointer to be kept alive until the call returns, // by copying it into a temp and marking that temp // still alive when we pop the temp stack. - if arg.Op == ir.OCONVNOP && arg.Left.Type.IsUnsafePtr() { - x := o.copyExpr(arg.Left, arg.Left.Type, false) - arg.Left = x - x.Name.SetAddrtaken(true) // ensure SSA keeps the x variable - n.Nbody.Append(typecheck(ir.Nod(ir.OVARLIVE, x, nil), ctxStmt)) + if arg.Op() == ir.OCONVNOP && arg.Left().Type().IsUnsafePtr() { + x := o.copyExpr(arg.Left(), arg.Left().Type(), false) + arg.SetLeft(x) + x.Name().SetAddrtaken(true) // ensure SSA keeps the x variable + n.PtrBody().Append(typecheck(ir.Nod(ir.OVARLIVE, x, nil), ctxStmt)) } } // Check for "unsafe-uintptr" tag provided by escape analysis. - for i, param := range n.Left.Type.Params().FieldSlice() { + for i, param := range n.Left().Type().Params().FieldSlice() { if param.Note == unsafeUintptrTag || param.Note == uintptrEscapesTag { - if arg := n.List.Index(i); arg.Op == ir.OSLICELIT { - for _, elt := range arg.List.Slice() { + if arg := n.List().Index(i); arg.Op() == ir.OSLICELIT { + for _, elt := range arg.List().Slice() { keepAlive(elt) } } else { @@ -526,40 +526,40 @@ func (o *Order) call(n *ir.Node) { // And this only applies to the multiple-assignment form. // We could do a more precise analysis if needed, like in walk.go. func (o *Order) mapAssign(n *ir.Node) { - switch n.Op { + switch n.Op() { default: - base.Fatalf("order.mapAssign %v", n.Op) + base.Fatalf("order.mapAssign %v", n.Op()) case ir.OAS, ir.OASOP: - if n.Left.Op == ir.OINDEXMAP { + if n.Left().Op() == ir.OINDEXMAP { // Make sure we evaluate the RHS before starting the map insert. // We need to make sure the RHS won't panic. See issue 22881. - if n.Right.Op == ir.OAPPEND { - s := n.Right.List.Slice()[1:] + if n.Right().Op() == ir.OAPPEND { + s := n.Right().List().Slice()[1:] for i, n := range s { s[i] = o.cheapExpr(n) } } else { - n.Right = o.cheapExpr(n.Right) + n.SetRight(o.cheapExpr(n.Right())) } } o.out = append(o.out, n) case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2MAPR, ir.OAS2FUNC: var post []*ir.Node - for i, m := range n.List.Slice() { + for i, m := range n.List().Slice() { switch { - case m.Op == ir.OINDEXMAP: - if !ir.IsAutoTmp(m.Left) { - m.Left = o.copyExpr(m.Left, m.Left.Type, false) + case m.Op() == ir.OINDEXMAP: + if !ir.IsAutoTmp(m.Left()) { + m.SetLeft(o.copyExpr(m.Left(), m.Left().Type(), false)) } - if !ir.IsAutoTmp(m.Right) { - m.Right = o.copyExpr(m.Right, m.Right.Type, false) + if !ir.IsAutoTmp(m.Right()) { + m.SetRight(o.copyExpr(m.Right(), m.Right().Type(), false)) } fallthrough - case instrumenting && n.Op == ir.OAS2FUNC && !ir.IsBlank(m): - t := o.newTemp(m.Type, false) - n.List.SetIndex(i, t) + case instrumenting && n.Op() == ir.OAS2FUNC && !ir.IsBlank(m): + t := o.newTemp(m.Type(), false) + n.List().SetIndex(i, t) a := ir.Nod(ir.OAS, m, t) a = typecheck(a, ctxStmt) post = append(post, a) @@ -582,43 +582,43 @@ func (o *Order) stmt(n *ir.Node) { lno := setlineno(n) o.init(n) - switch n.Op { + switch n.Op() { default: - base.Fatalf("order.stmt %v", n.Op) + base.Fatalf("order.stmt %v", n.Op()) case ir.OVARKILL, ir.OVARLIVE, ir.OINLMARK: o.out = append(o.out, n) case ir.OAS: t := o.markTemp() - n.Left = o.expr(n.Left, nil) - n.Right = o.expr(n.Right, n.Left) + n.SetLeft(o.expr(n.Left(), nil)) + n.SetRight(o.expr(n.Right(), n.Left())) o.mapAssign(n) o.cleanTemp(t) case ir.OASOP: t := o.markTemp() - n.Left = o.expr(n.Left, nil) - n.Right = o.expr(n.Right, nil) + n.SetLeft(o.expr(n.Left(), nil)) + n.SetRight(o.expr(n.Right(), nil)) - if instrumenting || n.Left.Op == ir.OINDEXMAP && (n.SubOp() == ir.ODIV || n.SubOp() == ir.OMOD) { + if instrumenting || n.Left().Op() == ir.OINDEXMAP && (n.SubOp() == ir.ODIV || n.SubOp() == ir.OMOD) { // Rewrite m[k] op= r into m[k] = m[k] op r so // that we can ensure that if op panics // because r is zero, the panic happens before // the map assignment. - n.Left = o.safeExpr(n.Left) + n.SetLeft(o.safeExpr(n.Left())) - l := treecopy(n.Left, src.NoXPos) - if l.Op == ir.OINDEXMAP { + l := treecopy(n.Left(), src.NoXPos) + if l.Op() == ir.OINDEXMAP { l.SetIndexMapLValue(false) } - l = o.copyExpr(l, n.Left.Type, false) - n.Right = ir.Nod(n.SubOp(), l, n.Right) - n.Right = typecheck(n.Right, ctxExpr) - n.Right = o.expr(n.Right, nil) + l = o.copyExpr(l, n.Left().Type(), false) + n.SetRight(ir.Nod(n.SubOp(), l, n.Right())) + n.SetRight(typecheck(n.Right(), ctxExpr)) + n.SetRight(o.expr(n.Right(), nil)) - n.Op = ir.OAS + n.SetOp(ir.OAS) n.ResetAux() } @@ -627,17 +627,17 @@ func (o *Order) stmt(n *ir.Node) { case ir.OAS2: t := o.markTemp() - o.exprList(n.List) - o.exprList(n.Rlist) + o.exprList(n.List()) + o.exprList(n.Rlist()) o.mapAssign(n) o.cleanTemp(t) // Special: avoid copy of func call n.Right case ir.OAS2FUNC: t := o.markTemp() - o.exprList(n.List) - o.init(n.Right) - o.call(n.Right) + o.exprList(n.List()) + o.init(n.Right()) + o.call(n.Right()) o.as2(n) o.cleanTemp(t) @@ -649,19 +649,19 @@ func (o *Order) stmt(n *ir.Node) { // and make sure OINDEXMAP is not copied out. case ir.OAS2DOTTYPE, ir.OAS2RECV, ir.OAS2MAPR: t := o.markTemp() - o.exprList(n.List) + o.exprList(n.List()) - switch r := n.Right; r.Op { + switch r := n.Right(); r.Op() { case ir.ODOTTYPE2, ir.ORECV: - r.Left = o.expr(r.Left, nil) + r.SetLeft(o.expr(r.Left(), nil)) case ir.OINDEXMAP: - r.Left = o.expr(r.Left, nil) - r.Right = o.expr(r.Right, nil) + r.SetLeft(o.expr(r.Left(), nil)) + r.SetRight(o.expr(r.Right(), nil)) // See similar conversion for OINDEXMAP below. - _ = mapKeyReplaceStrConv(r.Right) - r.Right = o.mapKeyTemp(r.Left.Type, r.Right) + _ = mapKeyReplaceStrConv(r.Right()) + r.SetRight(o.mapKeyTemp(r.Left().Type(), r.Right())) default: - base.Fatalf("order.stmt: %v", r.Op) + base.Fatalf("order.stmt: %v", r.Op()) } o.okAs2(n) @@ -669,7 +669,7 @@ func (o *Order) stmt(n *ir.Node) { // Special: does not save n onto out. case ir.OBLOCK, ir.OEMPTY: - o.stmtList(n.List) + o.stmtList(n.List()) // Special: n->left is not an expression; save as is. case ir.OBREAK, @@ -697,26 +697,26 @@ func (o *Order) stmt(n *ir.Node) { ir.ORECOVER, ir.ORECV: t := o.markTemp() - n.Left = o.expr(n.Left, nil) - n.Right = o.expr(n.Right, nil) - o.exprList(n.List) - o.exprList(n.Rlist) + n.SetLeft(o.expr(n.Left(), nil)) + n.SetRight(o.expr(n.Right(), nil)) + o.exprList(n.List()) + o.exprList(n.Rlist()) o.out = append(o.out, n) o.cleanTemp(t) // Special: order arguments to inner call but not call itself. case ir.ODEFER, ir.OGO: t := o.markTemp() - o.init(n.Left) - o.call(n.Left) + o.init(n.Left()) + o.call(n.Left()) o.out = append(o.out, n) o.cleanTemp(t) case ir.ODELETE: t := o.markTemp() - n.List.SetFirst(o.expr(n.List.First(), nil)) - n.List.SetSecond(o.expr(n.List.Second(), nil)) - n.List.SetSecond(o.mapKeyTemp(n.List.First().Type, n.List.Second())) + n.List().SetFirst(o.expr(n.List().First(), nil)) + n.List().SetSecond(o.expr(n.List().Second(), nil)) + n.List().SetSecond(o.mapKeyTemp(n.List().First().Type(), n.List().Second())) o.out = append(o.out, n) o.cleanTemp(t) @@ -724,10 +724,10 @@ func (o *Order) stmt(n *ir.Node) { // beginning of loop body and after for statement. case ir.OFOR: t := o.markTemp() - n.Left = o.exprInPlace(n.Left) - n.Nbody.Prepend(o.cleanTempNoPop(t)...) - orderBlock(&n.Nbody, o.free) - n.Right = orderStmtInPlace(n.Right, o.free) + n.SetLeft(o.exprInPlace(n.Left())) + n.PtrBody().Prepend(o.cleanTempNoPop(t)...) + orderBlock(n.PtrBody(), o.free) + n.SetRight(orderStmtInPlace(n.Right(), o.free)) o.out = append(o.out, n) o.cleanTemp(t) @@ -735,21 +735,21 @@ func (o *Order) stmt(n *ir.Node) { // beginning of both branches. case ir.OIF: t := o.markTemp() - n.Left = o.exprInPlace(n.Left) - n.Nbody.Prepend(o.cleanTempNoPop(t)...) - n.Rlist.Prepend(o.cleanTempNoPop(t)...) + n.SetLeft(o.exprInPlace(n.Left())) + n.PtrBody().Prepend(o.cleanTempNoPop(t)...) + n.PtrRlist().Prepend(o.cleanTempNoPop(t)...) o.popTemp(t) - orderBlock(&n.Nbody, o.free) - orderBlock(&n.Rlist, o.free) + orderBlock(n.PtrBody(), o.free) + orderBlock(n.PtrRlist(), o.free) o.out = append(o.out, n) // Special: argument will be converted to interface using convT2E // so make sure it is an addressable temporary. case ir.OPANIC: t := o.markTemp() - n.Left = o.expr(n.Left, nil) - if !n.Left.Type.IsInterface() { - n.Left = o.addrTemp(n.Left) + n.SetLeft(o.expr(n.Left(), nil)) + if !n.Left().Type().IsInterface() { + n.SetLeft(o.addrTemp(n.Left())) } o.out = append(o.out, n) o.cleanTemp(t) @@ -768,20 +768,20 @@ func (o *Order) stmt(n *ir.Node) { // Mark []byte(str) range expression to reuse string backing storage. // It is safe because the storage cannot be mutated. - if n.Right.Op == ir.OSTR2BYTES { - n.Right.Op = ir.OSTR2BYTESTMP + if n.Right().Op() == ir.OSTR2BYTES { + n.Right().SetOp(ir.OSTR2BYTESTMP) } t := o.markTemp() - n.Right = o.expr(n.Right, nil) + n.SetRight(o.expr(n.Right(), nil)) orderBody := true - switch n.Type.Etype { + switch n.Type().Etype { default: - base.Fatalf("order.stmt range %v", n.Type) + base.Fatalf("order.stmt range %v", n.Type()) case types.TARRAY, types.TSLICE: - if n.List.Len() < 2 || ir.IsBlank(n.List.Second()) { + if n.List().Len() < 2 || ir.IsBlank(n.List().Second()) { // for i := range x will only use x once, to compute len(x). // No need to copy it. break @@ -791,15 +791,15 @@ func (o *Order) stmt(n *ir.Node) { case types.TCHAN, types.TSTRING: // chan, string, slice, array ranges use value multiple times. // make copy. - r := n.Right + r := n.Right() - if r.Type.IsString() && r.Type != types.Types[types.TSTRING] { + if r.Type().IsString() && r.Type() != types.Types[types.TSTRING] { r = ir.Nod(ir.OCONV, r, nil) - r.Type = types.Types[types.TSTRING] + r.SetType(types.Types[types.TSTRING]) r = typecheck(r, ctxExpr) } - n.Right = o.copyExpr(r, r.Type, false) + n.SetRight(o.copyExpr(r, r.Type(), false)) case types.TMAP: if isMapClear(n) { @@ -813,22 +813,22 @@ func (o *Order) stmt(n *ir.Node) { // copy the map value in case it is a map literal. // TODO(rsc): Make tmp = literal expressions reuse tmp. // For maps tmp is just one word so it hardly matters. - r := n.Right - n.Right = o.copyExpr(r, r.Type, false) + r := n.Right() + n.SetRight(o.copyExpr(r, r.Type(), false)) // prealloc[n] is the temp for the iterator. // hiter contains pointers and needs to be zeroed. - prealloc[n] = o.newTemp(hiter(n.Type), true) + prealloc[n] = o.newTemp(hiter(n.Type()), true) } - o.exprListInPlace(n.List) + o.exprListInPlace(n.List()) if orderBody { - orderBlock(&n.Nbody, o.free) + orderBlock(n.PtrBody(), o.free) } o.out = append(o.out, n) o.cleanTemp(t) case ir.ORETURN: - o.exprList(n.List) + o.exprList(n.List()) o.out = append(o.out, n) // Special: clean case temporaries in each block entry. @@ -843,25 +843,25 @@ func (o *Order) stmt(n *ir.Node) { case ir.OSELECT: t := o.markTemp() - for _, n2 := range n.List.Slice() { - if n2.Op != ir.OCASE { - base.Fatalf("order select case %v", n2.Op) + for _, n2 := range n.List().Slice() { + if n2.Op() != ir.OCASE { + base.Fatalf("order select case %v", n2.Op()) } - r := n2.Left + r := n2.Left() setlineno(n2) // Append any new body prologue to ninit. // The next loop will insert ninit into nbody. - if n2.Ninit.Len() != 0 { + if n2.Init().Len() != 0 { base.Fatalf("order select ninit") } if r == nil { continue } - switch r.Op { + switch r.Op() { default: ir.Dump("select case", r) - base.Fatalf("unknown op in select %v", r.Op) + base.Fatalf("unknown op in select %v", r.Op()) // If this is case x := <-ch or case x, y := <-ch, the case has // the ODCL nodes to declare x and y. We want to delay that @@ -870,19 +870,19 @@ func (o *Order) stmt(n *ir.Node) { case ir.OSELRECV, ir.OSELRECV2: if r.Colas() { i := 0 - if r.Ninit.Len() != 0 && r.Ninit.First().Op == ir.ODCL && r.Ninit.First().Left == r.Left { + if r.Init().Len() != 0 && r.Init().First().Op() == ir.ODCL && r.Init().First().Left() == r.Left() { i++ } - if i < r.Ninit.Len() && r.Ninit.Index(i).Op == ir.ODCL && r.List.Len() != 0 && r.Ninit.Index(i).Left == r.List.First() { + if i < r.Init().Len() && r.Init().Index(i).Op() == ir.ODCL && r.List().Len() != 0 && r.Init().Index(i).Left() == r.List().First() { i++ } - if i >= r.Ninit.Len() { - r.Ninit.Set(nil) + if i >= r.Init().Len() { + r.PtrInit().Set(nil) } } - if r.Ninit.Len() != 0 { - ir.DumpList("ninit", r.Ninit) + if r.Init().Len() != 0 { + ir.DumpList("ninit", r.Init()) base.Fatalf("ninit on select recv") } @@ -891,10 +891,10 @@ func (o *Order) stmt(n *ir.Node) { // r->left is x, r->ntest is ok, r->right is ORECV, r->right->left is c. // r->left == N means 'case <-c'. // c is always evaluated; x and ok are only evaluated when assigned. - r.Right.Left = o.expr(r.Right.Left, nil) + r.Right().SetLeft(o.expr(r.Right().Left(), nil)) - if r.Right.Left.Op != ir.ONAME { - r.Right.Left = o.copyExpr(r.Right.Left, r.Right.Left.Type, false) + if r.Right().Left().Op() != ir.ONAME { + r.Right().SetLeft(o.copyExpr(r.Right().Left(), r.Right().Left().Type(), false)) } // Introduce temporary for receive and move actual copy into case body. @@ -903,75 +903,75 @@ func (o *Order) stmt(n *ir.Node) { // temporary per distinct type, sharing the temp among all receives // with that temp. Similarly one ok bool could be shared among all // the x,ok receives. Not worth doing until there's a clear need. - if r.Left != nil && ir.IsBlank(r.Left) { - r.Left = nil + if r.Left() != nil && ir.IsBlank(r.Left()) { + r.SetLeft(nil) } - if r.Left != nil { + if r.Left() != nil { // use channel element type for temporary to avoid conversions, // such as in case interfacevalue = <-intchan. // the conversion happens in the OAS instead. - tmp1 := r.Left + tmp1 := r.Left() if r.Colas() { tmp2 := ir.Nod(ir.ODCL, tmp1, nil) tmp2 = typecheck(tmp2, ctxStmt) - n2.Ninit.Append(tmp2) + n2.PtrInit().Append(tmp2) } - r.Left = o.newTemp(r.Right.Left.Type.Elem(), r.Right.Left.Type.Elem().HasPointers()) - tmp2 := ir.Nod(ir.OAS, tmp1, r.Left) + r.SetLeft(o.newTemp(r.Right().Left().Type().Elem(), r.Right().Left().Type().Elem().HasPointers())) + tmp2 := ir.Nod(ir.OAS, tmp1, r.Left()) tmp2 = typecheck(tmp2, ctxStmt) - n2.Ninit.Append(tmp2) + n2.PtrInit().Append(tmp2) } - if r.List.Len() != 0 && ir.IsBlank(r.List.First()) { - r.List.Set(nil) + if r.List().Len() != 0 && ir.IsBlank(r.List().First()) { + r.PtrList().Set(nil) } - if r.List.Len() != 0 { - tmp1 := r.List.First() + if r.List().Len() != 0 { + tmp1 := r.List().First() if r.Colas() { tmp2 := ir.Nod(ir.ODCL, tmp1, nil) tmp2 = typecheck(tmp2, ctxStmt) - n2.Ninit.Append(tmp2) + n2.PtrInit().Append(tmp2) } - r.List.Set1(o.newTemp(types.Types[types.TBOOL], false)) - tmp2 := okas(tmp1, r.List.First()) + r.PtrList().Set1(o.newTemp(types.Types[types.TBOOL], false)) + tmp2 := okas(tmp1, r.List().First()) tmp2 = typecheck(tmp2, ctxStmt) - n2.Ninit.Append(tmp2) + n2.PtrInit().Append(tmp2) } - orderBlock(&n2.Ninit, o.free) + orderBlock(n2.PtrInit(), o.free) case ir.OSEND: - if r.Ninit.Len() != 0 { - ir.DumpList("ninit", r.Ninit) + if r.Init().Len() != 0 { + ir.DumpList("ninit", r.Init()) base.Fatalf("ninit on select send") } // case c <- x // r->left is c, r->right is x, both are always evaluated. - r.Left = o.expr(r.Left, nil) + r.SetLeft(o.expr(r.Left(), nil)) - if !ir.IsAutoTmp(r.Left) { - r.Left = o.copyExpr(r.Left, r.Left.Type, false) + if !ir.IsAutoTmp(r.Left()) { + r.SetLeft(o.copyExpr(r.Left(), r.Left().Type(), false)) } - r.Right = o.expr(r.Right, nil) - if !ir.IsAutoTmp(r.Right) { - r.Right = o.copyExpr(r.Right, r.Right.Type, false) + r.SetRight(o.expr(r.Right(), nil)) + if !ir.IsAutoTmp(r.Right()) { + r.SetRight(o.copyExpr(r.Right(), r.Right().Type(), false)) } } } // Now that we have accumulated all the temporaries, clean them. // Also insert any ninit queued during the previous loop. // (The temporary cleaning must follow that ninit work.) - for _, n3 := range n.List.Slice() { - orderBlock(&n3.Nbody, o.free) - n3.Nbody.Prepend(o.cleanTempNoPop(t)...) + for _, n3 := range n.List().Slice() { + orderBlock(n3.PtrBody(), o.free) + n3.PtrBody().Prepend(o.cleanTempNoPop(t)...) // TODO(mdempsky): Is this actually necessary? // walkselect appears to walk Ninit. - n3.Nbody.Prepend(n3.Ninit.Slice()...) - n3.Ninit.Set(nil) + n3.PtrBody().Prepend(n3.Init().Slice()...) + n3.PtrInit().Set(nil) } o.out = append(o.out, n) @@ -980,14 +980,14 @@ func (o *Order) stmt(n *ir.Node) { // Special: value being sent is passed as a pointer; make it addressable. case ir.OSEND: t := o.markTemp() - n.Left = o.expr(n.Left, nil) - n.Right = o.expr(n.Right, nil) + n.SetLeft(o.expr(n.Left(), nil)) + n.SetRight(o.expr(n.Right(), nil)) if instrumenting { // Force copying to the stack so that (chan T)(nil) <- x // is still instrumented as a read of x. - n.Right = o.copyExpr(n.Right, n.Right.Type, false) + n.SetRight(o.copyExpr(n.Right(), n.Right().Type(), false)) } else { - n.Right = o.addrTemp(n.Right) + n.SetRight(o.addrTemp(n.Right())) } o.out = append(o.out, n) o.cleanTemp(t) @@ -1002,17 +1002,17 @@ func (o *Order) stmt(n *ir.Node) { case ir.OSWITCH: if base.Debug.Libfuzzer != 0 && !hasDefaultCase(n) { // Add empty "default:" case for instrumentation. - n.List.Append(ir.Nod(ir.OCASE, nil, nil)) + n.PtrList().Append(ir.Nod(ir.OCASE, nil, nil)) } t := o.markTemp() - n.Left = o.expr(n.Left, nil) - for _, ncas := range n.List.Slice() { - if ncas.Op != ir.OCASE { - base.Fatalf("order switch case %v", ncas.Op) + n.SetLeft(o.expr(n.Left(), nil)) + for _, ncas := range n.List().Slice() { + if ncas.Op() != ir.OCASE { + base.Fatalf("order switch case %v", ncas.Op()) } - o.exprListInPlace(ncas.List) - orderBlock(&ncas.Nbody, o.free) + o.exprListInPlace(ncas.List()) + orderBlock(ncas.PtrBody(), o.free) } o.out = append(o.out, n) @@ -1023,11 +1023,11 @@ func (o *Order) stmt(n *ir.Node) { } func hasDefaultCase(n *ir.Node) bool { - for _, ncas := range n.List.Slice() { - if ncas.Op != ir.OCASE { - base.Fatalf("expected case, found %v", ncas.Op) + for _, ncas := range n.List().Slice() { + if ncas.Op() != ir.OCASE { + base.Fatalf("expected case, found %v", ncas.Op()) } - if ncas.List.Len() == 0 { + if ncas.List().Len() == 0 { return true } } @@ -1069,21 +1069,21 @@ func (o *Order) expr(n, lhs *ir.Node) *ir.Node { lno := setlineno(n) o.init(n) - switch n.Op { + switch n.Op() { default: - n.Left = o.expr(n.Left, nil) - n.Right = o.expr(n.Right, nil) - o.exprList(n.List) - o.exprList(n.Rlist) + n.SetLeft(o.expr(n.Left(), nil)) + n.SetRight(o.expr(n.Right(), nil)) + o.exprList(n.List()) + o.exprList(n.Rlist()) // Addition of strings turns into a function call. // Allocate a temporary to hold the strings. // Fewer than 5 strings use direct runtime helpers. case ir.OADDSTR: - o.exprList(n.List) + o.exprList(n.List()) - if n.List.Len() > 5 { - t := types.NewArray(types.Types[types.TSTRING], int64(n.List.Len())) + if n.List().Len() > 5 { + t := types.NewArray(types.Types[types.TSTRING], int64(n.List().Len())) prealloc[n] = o.newTemp(t, false) } @@ -1097,22 +1097,22 @@ func (o *Order) expr(n, lhs *ir.Node) *ir.Node { hasbyte := false haslit := false - for _, n1 := range n.List.Slice() { - hasbyte = hasbyte || n1.Op == ir.OBYTES2STR - haslit = haslit || n1.Op == ir.OLITERAL && len(n1.StringVal()) != 0 + for _, n1 := range n.List().Slice() { + hasbyte = hasbyte || n1.Op() == ir.OBYTES2STR + haslit = haslit || n1.Op() == ir.OLITERAL && len(n1.StringVal()) != 0 } if haslit && hasbyte { - for _, n2 := range n.List.Slice() { - if n2.Op == ir.OBYTES2STR { - n2.Op = ir.OBYTES2STRTMP + for _, n2 := range n.List().Slice() { + if n2.Op() == ir.OBYTES2STR { + n2.SetOp(ir.OBYTES2STRTMP) } } } case ir.OINDEXMAP: - n.Left = o.expr(n.Left, nil) - n.Right = o.expr(n.Right, nil) + n.SetLeft(o.expr(n.Left(), nil)) + n.SetRight(o.expr(n.Right(), nil)) needCopy := false if !n.IndexMapLValue() { @@ -1120,7 +1120,7 @@ func (o *Order) expr(n, lhs *ir.Node) *ir.Node { // can not be changed before the map index by forcing // the map index to happen immediately following the // conversions. See copyExpr a few lines below. - needCopy = mapKeyReplaceStrConv(n.Right) + needCopy = mapKeyReplaceStrConv(n.Right()) if instrumenting { // Race detector needs the copy so it can @@ -1130,37 +1130,37 @@ func (o *Order) expr(n, lhs *ir.Node) *ir.Node { } // key must be addressable - n.Right = o.mapKeyTemp(n.Left.Type, n.Right) + n.SetRight(o.mapKeyTemp(n.Left().Type(), n.Right())) if needCopy { - n = o.copyExpr(n, n.Type, false) + n = o.copyExpr(n, n.Type(), false) } // concrete type (not interface) argument might need an addressable // temporary to pass to the runtime conversion routine. case ir.OCONVIFACE: - n.Left = o.expr(n.Left, nil) - if n.Left.Type.IsInterface() { + n.SetLeft(o.expr(n.Left(), nil)) + if n.Left().Type().IsInterface() { break } - if _, needsaddr := convFuncName(n.Left.Type, n.Type); needsaddr || isStaticCompositeLiteral(n.Left) { + if _, needsaddr := convFuncName(n.Left().Type(), n.Type()); needsaddr || isStaticCompositeLiteral(n.Left()) { // Need a temp if we need to pass the address to the conversion function. // We also process static composite literal node here, making a named static global // whose address we can put directly in an interface (see OCONVIFACE case in walk). - n.Left = o.addrTemp(n.Left) + n.SetLeft(o.addrTemp(n.Left())) } case ir.OCONVNOP: - if n.Type.IsKind(types.TUNSAFEPTR) && n.Left.Type.IsKind(types.TUINTPTR) && (n.Left.Op == ir.OCALLFUNC || n.Left.Op == ir.OCALLINTER || n.Left.Op == ir.OCALLMETH) { + if n.Type().IsKind(types.TUNSAFEPTR) && n.Left().Type().IsKind(types.TUINTPTR) && (n.Left().Op() == ir.OCALLFUNC || n.Left().Op() == ir.OCALLINTER || n.Left().Op() == ir.OCALLMETH) { // When reordering unsafe.Pointer(f()) into a separate // statement, the conversion and function call must stay // together. See golang.org/issue/15329. - o.init(n.Left) - o.call(n.Left) - if lhs == nil || lhs.Op != ir.ONAME || instrumenting { - n = o.copyExpr(n, n.Type, false) + o.init(n.Left()) + o.call(n.Left()) + if lhs == nil || lhs.Op() != ir.ONAME || instrumenting { + n = o.copyExpr(n, n.Type(), false) } } else { - n.Left = o.expr(n.Left, nil) + n.SetLeft(o.expr(n.Left(), nil)) } case ir.OANDAND, ir.OOROR: @@ -1173,10 +1173,10 @@ func (o *Order) expr(n, lhs *ir.Node) *ir.Node { // } // ... = r - r := o.newTemp(n.Type, false) + r := o.newTemp(n.Type(), false) // Evaluate left-hand side. - lhs := o.expr(n.Left, nil) + lhs := o.expr(n.Left(), nil) o.out = append(o.out, typecheck(ir.Nod(ir.OAS, r, lhs), ctxStmt)) // Evaluate right-hand side, save generated code. @@ -1184,7 +1184,7 @@ func (o *Order) expr(n, lhs *ir.Node) *ir.Node { o.out = nil t := o.markTemp() o.edge() - rhs := o.expr(n.Right, nil) + rhs := o.expr(n.Right(), nil) o.out = append(o.out, typecheck(ir.Nod(ir.OAS, r, rhs), ctxStmt)) o.cleanTemp(t) gen := o.out @@ -1192,10 +1192,10 @@ func (o *Order) expr(n, lhs *ir.Node) *ir.Node { // If left-hand side doesn't cause a short-circuit, issue right-hand side. nif := ir.Nod(ir.OIF, r, nil) - if n.Op == ir.OANDAND { - nif.Nbody.Set(gen) + if n.Op() == ir.OANDAND { + nif.PtrBody().Set(gen) } else { - nif.Rlist.Set(gen) + nif.PtrRlist().Set(gen) } o.out = append(o.out, nif) n = r @@ -1221,30 +1221,30 @@ func (o *Order) expr(n, lhs *ir.Node) *ir.Node { if isRuneCount(n) { // len([]rune(s)) is rewritten to runtime.countrunes(s) later. - n.Left.Left = o.expr(n.Left.Left, nil) + n.Left().SetLeft(o.expr(n.Left().Left(), nil)) } else { o.call(n) } - if lhs == nil || lhs.Op != ir.ONAME || instrumenting { - n = o.copyExpr(n, n.Type, false) + if lhs == nil || lhs.Op() != ir.ONAME || instrumenting { + n = o.copyExpr(n, n.Type(), false) } case ir.OAPPEND: // Check for append(x, make([]T, y)...) . if isAppendOfMake(n) { - n.List.SetFirst(o.expr(n.List.First(), nil)) // order x - n.List.Second().Left = o.expr(n.List.Second().Left, nil) // order y + n.List().SetFirst(o.expr(n.List().First(), nil)) // order x + n.List().Second().SetLeft(o.expr(n.List().Second().Left(), nil)) // order y } else { - o.exprList(n.List) + o.exprList(n.List()) } - if lhs == nil || lhs.Op != ir.ONAME && !samesafeexpr(lhs, n.List.First()) { - n = o.copyExpr(n, n.Type, false) + if lhs == nil || lhs.Op() != ir.ONAME && !samesafeexpr(lhs, n.List().First()) { + n = o.copyExpr(n, n.Type(), false) } case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR: - n.Left = o.expr(n.Left, nil) + n.SetLeft(o.expr(n.Left(), nil)) low, high, max := n.SliceBounds() low = o.expr(low, nil) low = o.cheapExpr(low) @@ -1253,25 +1253,25 @@ func (o *Order) expr(n, lhs *ir.Node) *ir.Node { max = o.expr(max, nil) max = o.cheapExpr(max) n.SetSliceBounds(low, high, max) - if lhs == nil || lhs.Op != ir.ONAME && !samesafeexpr(lhs, n.Left) { - n = o.copyExpr(n, n.Type, false) + if lhs == nil || lhs.Op() != ir.ONAME && !samesafeexpr(lhs, n.Left()) { + n = o.copyExpr(n, n.Type(), false) } case ir.OCLOSURE: - if n.Transient() && n.Func.ClosureVars.Len() > 0 { + if n.Transient() && n.Func().ClosureVars.Len() > 0 { prealloc[n] = o.newTemp(closureType(n), false) } case ir.OSLICELIT, ir.OCALLPART: - n.Left = o.expr(n.Left, nil) - n.Right = o.expr(n.Right, nil) - o.exprList(n.List) - o.exprList(n.Rlist) + n.SetLeft(o.expr(n.Left(), nil)) + n.SetRight(o.expr(n.Right(), nil)) + o.exprList(n.List()) + o.exprList(n.Rlist()) if n.Transient() { var t *types.Type - switch n.Op { + switch n.Op() { case ir.OSLICELIT: - t = types.NewArray(n.Type.Elem(), n.Right.Int64Val()) + t = types.NewArray(n.Type().Elem(), n.Right().Int64Val()) case ir.OCALLPART: t = partialCallType(n) } @@ -1279,37 +1279,37 @@ func (o *Order) expr(n, lhs *ir.Node) *ir.Node { } case ir.ODOTTYPE, ir.ODOTTYPE2: - n.Left = o.expr(n.Left, nil) - if !isdirectiface(n.Type) || instrumenting { - n = o.copyExpr(n, n.Type, true) + n.SetLeft(o.expr(n.Left(), nil)) + if !isdirectiface(n.Type()) || instrumenting { + n = o.copyExpr(n, n.Type(), true) } case ir.ORECV: - n.Left = o.expr(n.Left, nil) - n = o.copyExpr(n, n.Type, true) + n.SetLeft(o.expr(n.Left(), nil)) + n = o.copyExpr(n, n.Type(), true) case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE: - n.Left = o.expr(n.Left, nil) - n.Right = o.expr(n.Right, nil) + n.SetLeft(o.expr(n.Left(), nil)) + n.SetRight(o.expr(n.Right(), nil)) - t := n.Left.Type + t := n.Left().Type() switch { case t.IsString(): // Mark string(byteSlice) arguments to reuse byteSlice backing // buffer during conversion. String comparison does not // memorize the strings for later use, so it is safe. - if n.Left.Op == ir.OBYTES2STR { - n.Left.Op = ir.OBYTES2STRTMP + if n.Left().Op() == ir.OBYTES2STR { + n.Left().SetOp(ir.OBYTES2STRTMP) } - if n.Right.Op == ir.OBYTES2STR { - n.Right.Op = ir.OBYTES2STRTMP + if n.Right().Op() == ir.OBYTES2STR { + n.Right().SetOp(ir.OBYTES2STRTMP) } case t.IsStruct() || t.IsArray(): // for complex comparisons, we need both args to be // addressable so we can pass them to the runtime. - n.Left = o.addrTemp(n.Left) - n.Right = o.addrTemp(n.Right) + n.SetLeft(o.addrTemp(n.Left())) + n.SetRight(o.addrTemp(n.Right())) } case ir.OMAPLIT: // Order map by converting: @@ -1327,15 +1327,15 @@ func (o *Order) expr(n, lhs *ir.Node) *ir.Node { // Without this special case, order would otherwise compute all // the keys and values before storing any of them to the map. // See issue 26552. - entries := n.List.Slice() + entries := n.List().Slice() statics := entries[:0] var dynamics []*ir.Node for _, r := range entries { - if r.Op != ir.OKEY { + if r.Op() != ir.OKEY { base.Fatalf("OMAPLIT entry not OKEY: %v\n", r) } - if !isStaticCompositeLiteral(r.Left) || !isStaticCompositeLiteral(r.Right) { + if !isStaticCompositeLiteral(r.Left()) || !isStaticCompositeLiteral(r.Right()) { dynamics = append(dynamics, r) continue } @@ -1343,21 +1343,21 @@ func (o *Order) expr(n, lhs *ir.Node) *ir.Node { // Recursively ordering some static entries can change them to dynamic; // e.g., OCONVIFACE nodes. See #31777. r = o.expr(r, nil) - if !isStaticCompositeLiteral(r.Left) || !isStaticCompositeLiteral(r.Right) { + if !isStaticCompositeLiteral(r.Left()) || !isStaticCompositeLiteral(r.Right()) { dynamics = append(dynamics, r) continue } statics = append(statics, r) } - n.List.Set(statics) + n.PtrList().Set(statics) if len(dynamics) == 0 { break } // Emit the creation of the map (with all its static entries). - m := o.newTemp(n.Type, false) + m := o.newTemp(n.Type(), false) as := ir.Nod(ir.OAS, m, n) typecheck(as, ctxStmt) o.stmt(as) @@ -1365,7 +1365,7 @@ func (o *Order) expr(n, lhs *ir.Node) *ir.Node { // Emit eval+insert of dynamic entries, one at a time. for _, r := range dynamics { - as := ir.Nod(ir.OAS, ir.Nod(ir.OINDEX, n, r.Left), r.Right) + as := ir.Nod(ir.OAS, ir.Nod(ir.OINDEX, n, r.Left()), r.Right()) typecheck(as, ctxStmt) // Note: this converts the OINDEX to an OINDEXMAP o.stmt(as) } @@ -1379,7 +1379,7 @@ func (o *Order) expr(n, lhs *ir.Node) *ir.Node { // including an explicit conversion if necessary. func okas(ok, val *ir.Node) *ir.Node { if !ir.IsBlank(ok) { - val = conv(val, ok.Type) + val = conv(val, ok.Type()) } return ir.Nod(ir.OAS, ok, val) } @@ -1395,10 +1395,10 @@ func okas(ok, val *ir.Node) *ir.Node { func (o *Order) as2(n *ir.Node) { tmplist := []*ir.Node{} left := []*ir.Node{} - for ni, l := range n.List.Slice() { + for ni, l := range n.List().Slice() { if !ir.IsBlank(l) { - tmp := o.newTemp(l.Type, l.Type.HasPointers()) - n.List.SetIndex(ni, tmp) + tmp := o.newTemp(l.Type(), l.Type().HasPointers()) + n.List().SetIndex(ni, tmp) tmplist = append(tmplist, tmp) left = append(left, l) } @@ -1407,8 +1407,8 @@ func (o *Order) as2(n *ir.Node) { o.out = append(o.out, n) as := ir.Nod(ir.OAS2, nil, nil) - as.List.Set(left) - as.Rlist.Set(tmplist) + as.PtrList().Set(left) + as.PtrRlist().Set(tmplist) as = typecheck(as, ctxStmt) o.stmt(as) } @@ -1417,27 +1417,27 @@ func (o *Order) as2(n *ir.Node) { // Just like as2, this also adds temporaries to ensure left-to-right assignment. func (o *Order) okAs2(n *ir.Node) { var tmp1, tmp2 *ir.Node - if !ir.IsBlank(n.List.First()) { - typ := n.Right.Type + if !ir.IsBlank(n.List().First()) { + typ := n.Right().Type() tmp1 = o.newTemp(typ, typ.HasPointers()) } - if !ir.IsBlank(n.List.Second()) { + if !ir.IsBlank(n.List().Second()) { tmp2 = o.newTemp(types.Types[types.TBOOL], false) } o.out = append(o.out, n) if tmp1 != nil { - r := ir.Nod(ir.OAS, n.List.First(), tmp1) + r := ir.Nod(ir.OAS, n.List().First(), tmp1) r = typecheck(r, ctxStmt) o.mapAssign(r) - n.List.SetFirst(tmp1) + n.List().SetFirst(tmp1) } if tmp2 != nil { - r := okas(n.List.Second(), tmp2) + r := okas(n.List().Second(), tmp2) r = typecheck(r, ctxStmt) o.mapAssign(r) - n.List.SetSecond(tmp2) + n.List().SetSecond(tmp2) } } diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index 6e7922ca5488b..5827b5a7a6d69 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -28,30 +28,30 @@ var ( ) func emitptrargsmap(fn *ir.Node) { - if ir.FuncName(fn) == "_" || fn.Func.Nname.Sym.Linkname != "" { + if ir.FuncName(fn) == "_" || fn.Func().Nname.Sym().Linkname != "" { return } - lsym := base.Ctxt.Lookup(fn.Func.LSym.Name + ".args_stackmap") + lsym := base.Ctxt.Lookup(fn.Func().LSym.Name + ".args_stackmap") - nptr := int(fn.Type.ArgWidth() / int64(Widthptr)) + nptr := int(fn.Type().ArgWidth() / int64(Widthptr)) bv := bvalloc(int32(nptr) * 2) nbitmap := 1 - if fn.Type.NumResults() > 0 { + if fn.Type().NumResults() > 0 { nbitmap = 2 } off := duint32(lsym, 0, uint32(nbitmap)) off = duint32(lsym, off, uint32(bv.n)) if ir.IsMethod(fn) { - onebitwalktype1(fn.Type.Recvs(), 0, bv) + onebitwalktype1(fn.Type().Recvs(), 0, bv) } - if fn.Type.NumParams() > 0 { - onebitwalktype1(fn.Type.Params(), 0, bv) + if fn.Type().NumParams() > 0 { + onebitwalktype1(fn.Type().Params(), 0, bv) } off = dbvec(lsym, off, bv) - if fn.Type.NumResults() > 0 { - onebitwalktype1(fn.Type.Results(), 0, bv) + if fn.Type().NumResults() > 0 { + onebitwalktype1(fn.Type().Results(), 0, bv) off = dbvec(lsym, off, bv) } @@ -74,30 +74,30 @@ func cmpstackvarlt(a, b *ir.Node) bool { } if a.Class() != ir.PAUTO { - return a.Xoffset < b.Xoffset + return a.Offset() < b.Offset() } - if a.Name.Used() != b.Name.Used() { - return a.Name.Used() + if a.Name().Used() != b.Name().Used() { + return a.Name().Used() } - ap := a.Type.HasPointers() - bp := b.Type.HasPointers() + ap := a.Type().HasPointers() + bp := b.Type().HasPointers() if ap != bp { return ap } - ap = a.Name.Needzero() - bp = b.Name.Needzero() + ap = a.Name().Needzero() + bp = b.Name().Needzero() if ap != bp { return ap } - if a.Type.Width != b.Type.Width { - return a.Type.Width > b.Type.Width + if a.Type().Width != b.Type().Width { + return a.Type().Width > b.Type().Width } - return a.Sym.Name < b.Sym.Name + return a.Sym().Name < b.Sym().Name } // byStackvar implements sort.Interface for []*Node using cmpstackvarlt. @@ -110,18 +110,18 @@ func (s byStackVar) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s *ssafn) AllocFrame(f *ssa.Func) { s.stksize = 0 s.stkptrsize = 0 - fn := s.curfn.Func + fn := s.curfn.Func() // Mark the PAUTO's unused. for _, ln := range fn.Dcl { if ln.Class() == ir.PAUTO { - ln.Name.SetUsed(false) + ln.Name().SetUsed(false) } } for _, l := range f.RegAlloc { if ls, ok := l.(ssa.LocalSlot); ok { - ls.N.Name.SetUsed(true) + ls.N.Name().SetUsed(true) } } @@ -133,10 +133,10 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { case ir.PPARAM, ir.PPARAMOUT: // Don't modify nodfp; it is a global. if n != nodfp { - n.Name.SetUsed(true) + n.Name().SetUsed(true) } case ir.PAUTO: - n.Name.SetUsed(true) + n.Name().SetUsed(true) } } if !scratchUsed { @@ -155,16 +155,16 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { // Reassign stack offsets of the locals that are used. lastHasPtr := false for i, n := range fn.Dcl { - if n.Op != ir.ONAME || n.Class() != ir.PAUTO { + if n.Op() != ir.ONAME || n.Class() != ir.PAUTO { continue } - if !n.Name.Used() { + if !n.Name().Used() { fn.Dcl = fn.Dcl[:i] break } - dowidth(n.Type) - w := n.Type.Width + dowidth(n.Type()) + w := n.Type().Width if w >= thearch.MAXWIDTH || w < 0 { base.Fatalf("bad width") } @@ -176,8 +176,8 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { w = 1 } s.stksize += w - s.stksize = Rnd(s.stksize, int64(n.Type.Align)) - if n.Type.HasPointers() { + s.stksize = Rnd(s.stksize, int64(n.Type().Align)) + if n.Type().HasPointers() { s.stkptrsize = s.stksize lastHasPtr = true } else { @@ -186,7 +186,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { if thearch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) { s.stksize = Rnd(s.stksize, int64(Widthptr)) } - n.Xoffset = -s.stksize + n.SetOffset(-s.stksize) } s.stksize = Rnd(s.stksize, int64(Widthreg)) @@ -195,10 +195,10 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { func funccompile(fn *ir.Node) { if Curfn != nil { - base.Fatalf("funccompile %v inside %v", fn.Func.Nname.Sym, Curfn.Func.Nname.Sym) + base.Fatalf("funccompile %v inside %v", fn.Func().Nname.Sym(), Curfn.Func().Nname.Sym()) } - if fn.Type == nil { + if fn.Type() == nil { if base.Errors() == 0 { base.Fatalf("funccompile missing type") } @@ -206,11 +206,11 @@ func funccompile(fn *ir.Node) { } // assign parameter offsets - dowidth(fn.Type) + dowidth(fn.Type()) - if fn.Nbody.Len() == 0 { + if fn.Body().Len() == 0 { // Initialize ABI wrappers if necessary. - initLSym(fn.Func, false) + initLSym(fn.Func(), false) emitptrargsmap(fn) return } @@ -234,7 +234,7 @@ func compile(fn *ir.Node) { // Set up the function's LSym early to avoid data races with the assemblers. // Do this before walk, as walk needs the LSym to set attributes/relocations // (e.g. in markTypeUsedInInterface). - initLSym(fn.Func, true) + initLSym(fn.Func(), true) walk(fn) if base.Errors() > errorsBefore { @@ -259,15 +259,15 @@ func compile(fn *ir.Node) { // be types of stack objects. We need to do this here // because symbols must be allocated before the parallel // phase of the compiler. - for _, n := range fn.Func.Dcl { + for _, n := range fn.Func().Dcl { switch n.Class() { case ir.PPARAM, ir.PPARAMOUT, ir.PAUTO: - if livenessShouldTrack(n) && n.Name.Addrtaken() { - dtypesym(n.Type) + if livenessShouldTrack(n) && n.Name().Addrtaken() { + dtypesym(n.Type()) // Also make sure we allocate a linker symbol // for the stack object data, for the same reason. - if fn.Func.LSym.Func().StackObjects == nil { - fn.Func.LSym.Func().StackObjects = base.Ctxt.Lookup(fn.Func.LSym.Name + ".stkobj") + if fn.Func().LSym.Func().StackObjects == nil { + fn.Func().LSym.Func().StackObjects = base.Ctxt.Lookup(fn.Func().LSym.Name + ".stkobj") } } } @@ -300,13 +300,13 @@ func compilenow(fn *ir.Node) bool { // inline candidate but then never inlined (presumably because we // found no call sites). func isInlinableButNotInlined(fn *ir.Node) bool { - if fn.Func.Nname.Func.Inl == nil { + if fn.Func().Nname.Func().Inl == nil { return false } - if fn.Sym == nil { + if fn.Sym() == nil { return true } - return !fn.Sym.Linksym().WasInlined() + return !fn.Sym().Linksym().WasInlined() } const maxStackSize = 1 << 30 @@ -318,9 +318,9 @@ const maxStackSize = 1 << 30 func compileSSA(fn *ir.Node, worker int) { f := buildssa(fn, worker) // Note: check arg size to fix issue 25507. - if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type.ArgWidth() >= maxStackSize { + if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type().ArgWidth() >= maxStackSize { largeStackFramesMu.Lock() - largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: fn.Type.ArgWidth(), pos: fn.Pos}) + largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: fn.Type().ArgWidth(), pos: fn.Pos()}) largeStackFramesMu.Unlock() return } @@ -336,14 +336,14 @@ func compileSSA(fn *ir.Node, worker int) { if pp.Text.To.Offset >= maxStackSize { largeStackFramesMu.Lock() locals := f.Frontend().(*ssafn).stksize - largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: fn.Type.ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos}) + largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: fn.Type().ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos()}) largeStackFramesMu.Unlock() return } pp.Flush() // assemble, fill in boilerplate, etc. // fieldtrack must be called after pp.Flush. See issue 20014. - fieldtrack(pp.Text.From.Sym, fn.Func.FieldTrack) + fieldtrack(pp.Text.From.Sym, fn.Func().FieldTrack) } func init() { @@ -371,7 +371,7 @@ func compileFunctions() { // since they're most likely to be the slowest. // This helps avoid stragglers. sort.Slice(compilequeue, func(i, j int) bool { - return compilequeue[i].Nbody.Len() > compilequeue[j].Nbody.Len() + return compilequeue[i].Body().Len() > compilequeue[j].Body().Len() }) } var wg sync.WaitGroup @@ -399,8 +399,8 @@ func compileFunctions() { func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) { fn := curfn.(*ir.Node) - if fn.Func.Nname != nil { - if expect := fn.Func.Nname.Sym.Linksym(); fnsym != expect { + if fn.Func().Nname != nil { + if expect := fn.Func().Nname.Sym().Linksym(); fnsym != expect { base.Fatalf("unexpected fnsym: %v != %v", fnsym, expect) } } @@ -430,18 +430,18 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S // // These two adjustments keep toolstash -cmp working for now. // Deciding the right answer is, as they say, future work. - isODCLFUNC := fn.Op == ir.ODCLFUNC + isODCLFUNC := fn.Op() == ir.ODCLFUNC var apdecls []*ir.Node // Populate decls for fn. if isODCLFUNC { - for _, n := range fn.Func.Dcl { - if n.Op != ir.ONAME { // might be OTYPE or OLITERAL + for _, n := range fn.Func().Dcl { + if n.Op() != ir.ONAME { // might be OTYPE or OLITERAL continue } switch n.Class() { case ir.PAUTO: - if !n.Name.Used() { + if !n.Name().Used() { // Text == nil -> generating abstract function if fnsym.Func().Text != nil { base.Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)") @@ -457,7 +457,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S } } - decls, dwarfVars := createDwarfVars(fnsym, isODCLFUNC, fn.Func, apdecls) + decls, dwarfVars := createDwarfVars(fnsym, isODCLFUNC, fn.Func(), apdecls) // For each type referenced by the functions auto vars but not // already referenced by a dwarf var, attach an R_USETYPE relocation to @@ -478,7 +478,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S var varScopes []ir.ScopeID for _, decl := range decls { pos := declPos(decl) - varScopes = append(varScopes, findScope(fn.Func.Marks, pos)) + varScopes = append(varScopes, findScope(fn.Func().Marks, pos)) } scopes := assembleScopes(fnsym, fn, dwarfVars, varScopes) @@ -490,7 +490,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S } func declPos(decl *ir.Node) src.XPos { - if decl.Name.Defn != nil && (decl.Name.Captured() || decl.Name.Byval()) { + if decl.Name().Defn != nil && (decl.Name().Captured() || decl.Name().Byval()) { // It's not clear which position is correct for captured variables here: // * decl.Pos is the wrong position for captured variables, in the inner // function, but it is the right position in the outer function. @@ -505,9 +505,9 @@ func declPos(decl *ir.Node) src.XPos { // case statement. // This code is probably wrong for type switch variables that are also // captured. - return decl.Name.Defn.Pos + return decl.Name().Defn.Pos() } - return decl.Pos + return decl.Pos() } // createSimpleVars creates a DWARF entry for every variable declared in the @@ -530,7 +530,7 @@ func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Node) ([]*ir.Node, []*dwarf func createSimpleVar(fnsym *obj.LSym, n *ir.Node) *dwarf.Var { var abbrev int - offs := n.Xoffset + offs := n.Offset() switch n.Class() { case ir.PAUTO: @@ -550,22 +550,22 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Node) *dwarf.Var { base.Fatalf("createSimpleVar unexpected class %v for node %v", n.Class(), n) } - typename := dwarf.InfoPrefix + typesymname(n.Type) + typename := dwarf.InfoPrefix + typesymname(n.Type()) delete(fnsym.Func().Autot, ngotype(n).Linksym()) inlIndex := 0 if base.Flag.GenDwarfInl > 1 { - if n.Name.InlFormal() || n.Name.InlLocal() { - inlIndex = posInlIndex(n.Pos) + 1 - if n.Name.InlFormal() { + if n.Name().InlFormal() || n.Name().InlLocal() { + inlIndex = posInlIndex(n.Pos()) + 1 + if n.Name().InlFormal() { abbrev = dwarf.DW_ABRV_PARAM } } } declpos := base.Ctxt.InnermostPos(declPos(n)) return &dwarf.Var{ - Name: n.Sym.Name, + Name: n.Sym().Name, IsReturnValue: n.Class() == ir.PPARAMOUT, - IsInlFormal: n.Name.InlFormal(), + IsInlFormal: n.Name().InlFormal(), Abbrev: abbrev, StackOffset: int32(offs), Type: base.Ctxt.Lookup(typename), @@ -637,11 +637,11 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir if _, found := selected[n]; found { continue } - c := n.Sym.Name[0] - if c == '.' || n.Type.IsUntyped() { + c := n.Sym().Name[0] + if c == '.' || n.Type().IsUntyped() { continue } - if n.Class() == ir.PPARAM && !canSSAType(n.Type) { + if n.Class() == ir.PPARAM && !canSSAType(n.Type()) { // SSA-able args get location lists, and may move in and // out of registers, so those are handled elsewhere. // Autos and named output params seem to get handled @@ -653,7 +653,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir decls = append(decls, n) continue } - typename := dwarf.InfoPrefix + typesymname(n.Type) + typename := dwarf.InfoPrefix + typesymname(n.Type()) decls = append(decls, n) abbrev := dwarf.DW_ABRV_AUTO_LOCLIST isReturnValue := (n.Class() == ir.PPARAMOUT) @@ -667,7 +667,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir // misleading location for the param (we want pointer-to-heap // and not stack). // TODO(thanm): generate a better location expression - stackcopy := n.Name.Param.Stackcopy + stackcopy := n.Name().Param.Stackcopy if stackcopy != nil && (stackcopy.Class() == ir.PPARAM || stackcopy.Class() == ir.PPARAMOUT) { abbrev = dwarf.DW_ABRV_PARAM_LOCLIST isReturnValue = (stackcopy.Class() == ir.PPARAMOUT) @@ -675,19 +675,19 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir } inlIndex := 0 if base.Flag.GenDwarfInl > 1 { - if n.Name.InlFormal() || n.Name.InlLocal() { - inlIndex = posInlIndex(n.Pos) + 1 - if n.Name.InlFormal() { + if n.Name().InlFormal() || n.Name().InlLocal() { + inlIndex = posInlIndex(n.Pos()) + 1 + if n.Name().InlFormal() { abbrev = dwarf.DW_ABRV_PARAM_LOCLIST } } } - declpos := base.Ctxt.InnermostPos(n.Pos) + declpos := base.Ctxt.InnermostPos(n.Pos()) vars = append(vars, &dwarf.Var{ - Name: n.Sym.Name, + Name: n.Sym().Name, IsReturnValue: isReturnValue, Abbrev: abbrev, - StackOffset: int32(n.Xoffset), + StackOffset: int32(n.Offset()), Type: base.Ctxt.Lookup(typename), DeclFile: declpos.RelFilename(), DeclLine: declpos.RelLine(), @@ -711,11 +711,11 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir func preInliningDcls(fnsym *obj.LSym) []*ir.Node { fn := base.Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*ir.Node) var rdcl []*ir.Node - for _, n := range fn.Func.Inl.Dcl { - c := n.Sym.Name[0] + for _, n := range fn.Func().Inl.Dcl { + c := n.Sym().Name[0] // Avoid reporting "_" parameters, since if there are more than // one, it can result in a collision later on, as in #23179. - if unversion(n.Sym.Name) == "_" || c == '.' || n.Type.IsUntyped() { + if unversion(n.Sym().Name) == "_" || c == '.' || n.Type().IsUntyped() { continue } rdcl = append(rdcl, n) @@ -741,7 +741,7 @@ func stackOffset(slot ssa.LocalSlot) int32 { case ir.PPARAM, ir.PPARAMOUT: off += base.Ctxt.FixedFrameSize() } - return int32(off + n.Xoffset + slot.Off) + return int32(off + n.Offset() + slot.Off) } // createComplexVar builds a single DWARF variable entry and location list. @@ -764,18 +764,18 @@ func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var typename := dwarf.InfoPrefix + gotype.Name[len("type."):] inlIndex := 0 if base.Flag.GenDwarfInl > 1 { - if n.Name.InlFormal() || n.Name.InlLocal() { - inlIndex = posInlIndex(n.Pos) + 1 - if n.Name.InlFormal() { + if n.Name().InlFormal() || n.Name().InlLocal() { + inlIndex = posInlIndex(n.Pos()) + 1 + if n.Name().InlFormal() { abbrev = dwarf.DW_ABRV_PARAM_LOCLIST } } } - declpos := base.Ctxt.InnermostPos(n.Pos) + declpos := base.Ctxt.InnermostPos(n.Pos()) dvar := &dwarf.Var{ - Name: n.Sym.Name, + Name: n.Sym().Name, IsReturnValue: n.Class() == ir.PPARAMOUT, - IsInlFormal: n.Name.InlFormal(), + IsInlFormal: n.Name().InlFormal(), Abbrev: abbrev, Type: base.Ctxt.Lookup(typename), // The stack offset is used as a sorting key, so for decomposed diff --git a/src/cmd/compile/internal/gc/pgen_test.go b/src/cmd/compile/internal/gc/pgen_test.go index 9f1f00d46a500..efdffe0256c9a 100644 --- a/src/cmd/compile/internal/gc/pgen_test.go +++ b/src/cmd/compile/internal/gc/pgen_test.go @@ -27,12 +27,12 @@ func typeWithPointers() *types.Type { } func markUsed(n *ir.Node) *ir.Node { - n.Name.SetUsed(true) + n.Name().SetUsed(true) return n } func markNeedZero(n *ir.Node) *ir.Node { - n.Name.SetNeedzero(true) + n.Name().SetNeedzero(true) return n } @@ -43,8 +43,8 @@ func TestCmpstackvar(t *testing.T) { s = &types.Sym{Name: "."} } n := NewName(s) - n.Type = t - n.Xoffset = xoffset + n.SetType(t) + n.SetOffset(xoffset) n.SetClass(cl) return n } @@ -158,8 +158,8 @@ func TestCmpstackvar(t *testing.T) { func TestStackvarSort(t *testing.T) { nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) *ir.Node { n := NewName(s) - n.Type = t - n.Xoffset = xoffset + n.SetType(t) + n.SetOffset(xoffset) n.SetClass(cl) return n } diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go index f0895884668ad..c1e523f7a0d2b 100644 --- a/src/cmd/compile/internal/gc/plive.go +++ b/src/cmd/compile/internal/gc/plive.go @@ -207,14 +207,14 @@ type progeffectscache struct { // nor do we care about empty structs (handled by the pointer check), // nor do we care about the fake PAUTOHEAP variables. func livenessShouldTrack(n *ir.Node) bool { - return n.Op == ir.ONAME && (n.Class() == ir.PAUTO || n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Type.HasPointers() + return n.Op() == ir.ONAME && (n.Class() == ir.PAUTO || n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Type().HasPointers() } // getvariables returns the list of on-stack variables that we need to track // and a map for looking up indices by *Node. func getvariables(fn *ir.Node) ([]*ir.Node, map[*ir.Node]int32) { var vars []*ir.Node - for _, n := range fn.Func.Dcl { + for _, n := range fn.Func().Dcl { if livenessShouldTrack(n) { vars = append(vars, n) } @@ -272,7 +272,7 @@ const ( // If v does not affect any tracked variables, it returns -1, 0. func (lv *Liveness) valueEffects(v *ssa.Value) (int32, liveEffect) { n, e := affectedNode(v) - if e == 0 || n == nil || n.Op != ir.ONAME { // cheapest checks first + if e == 0 || n == nil || n.Op() != ir.ONAME { // cheapest checks first return -1, 0 } @@ -282,7 +282,7 @@ func (lv *Liveness) valueEffects(v *ssa.Value) (int32, liveEffect) { // variable" ICEs (issue 19632). switch v.Op { case ssa.OpVarDef, ssa.OpVarKill, ssa.OpVarLive, ssa.OpKeepAlive: - if !n.Name.Used() { + if !n.Name().Used() { return -1, 0 } } @@ -297,7 +297,7 @@ func (lv *Liveness) valueEffects(v *ssa.Value) (int32, liveEffect) { if e&(ssa.SymRead|ssa.SymAddr) != 0 { effect |= uevar } - if e&ssa.SymWrite != 0 && (!isfat(n.Type) || v.Op == ssa.OpVarDef) { + if e&ssa.SymWrite != 0 && (!isfat(n.Type()) || v.Op == ssa.OpVarDef) { effect |= varkill } @@ -491,10 +491,10 @@ func (lv *Liveness) pointerMap(liveout bvec, vars []*ir.Node, args, locals bvec) node := vars[i] switch node.Class() { case ir.PAUTO: - onebitwalktype1(node.Type, node.Xoffset+lv.stkptrsize, locals) + onebitwalktype1(node.Type(), node.Offset()+lv.stkptrsize, locals) case ir.PPARAM, ir.PPARAMOUT: - onebitwalktype1(node.Type, node.Xoffset, args) + onebitwalktype1(node.Type(), node.Offset(), args) } } } @@ -788,14 +788,14 @@ func (lv *Liveness) epilogue() { // pointers to copy values back to the stack). // TODO: if the output parameter is heap-allocated, then we // don't need to keep the stack copy live? - if lv.fn.Func.HasDefer() { + if lv.fn.Func().HasDefer() { for i, n := range lv.vars { if n.Class() == ir.PPARAMOUT { - if n.Name.IsOutputParamHeapAddr() { + if n.Name().IsOutputParamHeapAddr() { // Just to be paranoid. Heap addresses are PAUTOs. base.Fatalf("variable %v both output param and heap output param", n) } - if n.Name.Param.Heapaddr != nil { + if n.Name().Param.Heapaddr != nil { // If this variable moved to the heap, then // its stack copy is not live. continue @@ -803,21 +803,21 @@ func (lv *Liveness) epilogue() { // Note: zeroing is handled by zeroResults in walk.go. livedefer.Set(int32(i)) } - if n.Name.IsOutputParamHeapAddr() { + if n.Name().IsOutputParamHeapAddr() { // This variable will be overwritten early in the function // prologue (from the result of a mallocgc) but we need to // zero it in case that malloc causes a stack scan. - n.Name.SetNeedzero(true) + n.Name().SetNeedzero(true) livedefer.Set(int32(i)) } - if n.Name.OpenDeferSlot() { + if n.Name().OpenDeferSlot() { // Open-coded defer args slots must be live // everywhere in a function, since a panic can // occur (almost) anywhere. Because it is live // everywhere, it must be zeroed on entry. livedefer.Set(int32(i)) // It was already marked as Needzero when created. - if !n.Name.Needzero() { + if !n.Name().Needzero() { base.Fatalf("all pointer-containing defer arg slots should have Needzero set") } } @@ -891,7 +891,7 @@ func (lv *Liveness) epilogue() { if n.Class() == ir.PPARAM { continue // ok } - base.Fatalf("bad live variable at entry of %v: %L", lv.fn.Func.Nname, n) + base.Fatalf("bad live variable at entry of %v: %L", lv.fn.Func().Nname, n) } // Record live variables. @@ -904,7 +904,7 @@ func (lv *Liveness) epilogue() { } // If we have an open-coded deferreturn call, make a liveness map for it. - if lv.fn.Func.OpenCodedDeferDisallowed() { + if lv.fn.Func().OpenCodedDeferDisallowed() { lv.livenessMap.deferreturn = LivenessDontCare } else { lv.livenessMap.deferreturn = LivenessIndex{ @@ -922,7 +922,7 @@ func (lv *Liveness) epilogue() { // input parameters. for j, n := range lv.vars { if n.Class() != ir.PPARAM && lv.stackMaps[0].Get(int32(j)) { - lv.f.Fatalf("%v %L recorded as live on entry", lv.fn.Func.Nname, n) + lv.f.Fatalf("%v %L recorded as live on entry", lv.fn.Func().Nname, n) } } } @@ -980,7 +980,7 @@ func (lv *Liveness) showlive(v *ssa.Value, live bvec) { return } - pos := lv.fn.Func.Nname.Pos + pos := lv.fn.Func().Nname.Pos() if v != nil { pos = v.Pos } @@ -1024,7 +1024,7 @@ func (lv *Liveness) printbvec(printed bool, name string, live bvec) bool { if !live.Get(int32(i)) { continue } - fmt.Printf("%s%s", comma, n.Sym.Name) + fmt.Printf("%s%s", comma, n.Sym().Name) comma = "," } return true @@ -1042,7 +1042,7 @@ func (lv *Liveness) printeffect(printed bool, name string, pos int32, x bool) bo } fmt.Printf("%s=", name) if x { - fmt.Printf("%s", lv.vars[pos].Sym.Name) + fmt.Printf("%s", lv.vars[pos].Sym().Name) } return true @@ -1090,7 +1090,7 @@ func (lv *Liveness) printDebug() { if b == lv.f.Entry { live := lv.stackMaps[0] - fmt.Printf("(%s) function entry\n", base.FmtPos(lv.fn.Func.Nname.Pos)) + fmt.Printf("(%s) function entry\n", base.FmtPos(lv.fn.Func().Nname.Pos())) fmt.Printf("\tlive=") printed = false for j, n := range lv.vars { @@ -1168,7 +1168,7 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) { for _, n := range lv.vars { switch n.Class() { case ir.PPARAM, ir.PPARAMOUT: - if maxArgNode == nil || n.Xoffset > maxArgNode.Xoffset { + if maxArgNode == nil || n.Offset() > maxArgNode.Offset() { maxArgNode = n } } @@ -1176,7 +1176,7 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) { // Next, find the offset of the largest pointer in the largest node. var maxArgs int64 if maxArgNode != nil { - maxArgs = maxArgNode.Xoffset + typeptrdata(maxArgNode.Type) + maxArgs = maxArgNode.Offset() + typeptrdata(maxArgNode.Type()) } // Size locals bitmaps to be stkptrsize sized. @@ -1266,7 +1266,7 @@ func liveness(e *ssafn, f *ssa.Func, pp *Progs) LivenessMap { } // Emit the live pointer map data structures - ls := e.curfn.Func.LSym + ls := e.curfn.Func().LSym fninfo := ls.Func() fninfo.GCArgs, fninfo.GCLocals = lv.emit() diff --git a/src/cmd/compile/internal/gc/racewalk.go b/src/cmd/compile/internal/gc/racewalk.go index d92749589f709..5ab2821187b23 100644 --- a/src/cmd/compile/internal/gc/racewalk.go +++ b/src/cmd/compile/internal/gc/racewalk.go @@ -61,12 +61,12 @@ func ispkgin(pkgs []string) bool { } func instrument(fn *ir.Node) { - if fn.Func.Pragma&ir.Norace != 0 { + if fn.Func().Pragma&ir.Norace != 0 { return } if !base.Flag.Race || !ispkgin(norace_inst_pkgs) { - fn.Func.SetInstrumentBody(true) + fn.Func().SetInstrumentBody(true) } if base.Flag.Race { @@ -74,8 +74,8 @@ func instrument(fn *ir.Node) { base.Pos = src.NoXPos if thearch.LinkArch.Arch.Family != sys.AMD64 { - fn.Func.Enter.Prepend(mkcall("racefuncenterfp", nil, nil)) - fn.Func.Exit.Append(mkcall("racefuncexit", nil, nil)) + fn.Func().Enter.Prepend(mkcall("racefuncenterfp", nil, nil)) + fn.Func().Exit.Append(mkcall("racefuncexit", nil, nil)) } else { // nodpc is the PC of the caller as extracted by @@ -84,11 +84,11 @@ func instrument(fn *ir.Node) { // work on arm or others that might support // race in the future. nodpc := ir.Copy(nodfp) - nodpc.Type = types.Types[types.TUINTPTR] - nodpc.Xoffset = int64(-Widthptr) - fn.Func.Dcl = append(fn.Func.Dcl, nodpc) - fn.Func.Enter.Prepend(mkcall("racefuncenter", nil, nil, nodpc)) - fn.Func.Exit.Append(mkcall("racefuncexit", nil, nil)) + nodpc.SetType(types.Types[types.TUINTPTR]) + nodpc.SetOffset(int64(-Widthptr)) + fn.Func().Dcl = append(fn.Func().Dcl, nodpc) + fn.Func().Enter.Prepend(mkcall("racefuncenter", nil, nil, nodpc)) + fn.Func().Exit.Append(mkcall("racefuncexit", nil, nil)) } base.Pos = lno } diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go index edaec21f920c5..6a2a65c2dfff5 100644 --- a/src/cmd/compile/internal/gc/range.go +++ b/src/cmd/compile/internal/gc/range.go @@ -27,7 +27,7 @@ func typecheckrange(n *ir.Node) { // second half of dance, the first half being typecheckrangeExpr n.SetTypecheck(1) - ls := n.List.Slice() + ls := n.List().Slice() for i1, n1 := range ls { if n1.Typecheck() == 0 { ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign) @@ -35,21 +35,21 @@ func typecheckrange(n *ir.Node) { } decldepth++ - typecheckslice(n.Nbody.Slice(), ctxStmt) + typecheckslice(n.Body().Slice(), ctxStmt) decldepth-- } func typecheckrangeExpr(n *ir.Node) { - n.Right = typecheck(n.Right, ctxExpr) + n.SetRight(typecheck(n.Right(), ctxExpr)) - t := n.Right.Type + t := n.Right().Type() if t == nil { return } // delicate little dance. see typecheckas2 - ls := n.List.Slice() + ls := n.List().Slice() for i1, n1 := range ls { - if n1.Name == nil || n1.Name.Defn != n { + if n1.Name() == nil || n1.Name().Defn != n { ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign) } } @@ -57,13 +57,13 @@ func typecheckrangeExpr(n *ir.Node) { if t.IsPtr() && t.Elem().IsArray() { t = t.Elem() } - n.Type = t + n.SetType(t) var t1, t2 *types.Type toomany := false switch t.Etype { default: - base.ErrorfAt(n.Pos, "cannot range over %L", n.Right) + base.ErrorfAt(n.Pos(), "cannot range over %L", n.Right()) return case types.TARRAY, types.TSLICE: @@ -76,13 +76,13 @@ func typecheckrangeExpr(n *ir.Node) { case types.TCHAN: if !t.ChanDir().CanRecv() { - base.ErrorfAt(n.Pos, "invalid operation: range %v (receive from send-only type %v)", n.Right, n.Right.Type) + base.ErrorfAt(n.Pos(), "invalid operation: range %v (receive from send-only type %v)", n.Right(), n.Right().Type()) return } t1 = t.Elem() t2 = nil - if n.List.Len() == 2 { + if n.List().Len() == 2 { toomany = true } @@ -91,16 +91,16 @@ func typecheckrangeExpr(n *ir.Node) { t2 = types.Runetype } - if n.List.Len() > 2 || toomany { - base.ErrorfAt(n.Pos, "too many variables in range") + if n.List().Len() > 2 || toomany { + base.ErrorfAt(n.Pos(), "too many variables in range") } var v1, v2 *ir.Node - if n.List.Len() != 0 { - v1 = n.List.First() + if n.List().Len() != 0 { + v1 = n.List().First() } - if n.List.Len() > 1 { - v2 = n.List.Second() + if n.List().Len() > 1 { + v2 = n.List().Second() } // this is not only an optimization but also a requirement in the spec. @@ -109,28 +109,28 @@ func typecheckrangeExpr(n *ir.Node) { // present." if ir.IsBlank(v2) { if v1 != nil { - n.List.Set1(v1) + n.PtrList().Set1(v1) } v2 = nil } if v1 != nil { - if v1.Name != nil && v1.Name.Defn == n { - v1.Type = t1 - } else if v1.Type != nil { - if op, why := assignop(t1, v1.Type); op == ir.OXXX { - base.ErrorfAt(n.Pos, "cannot assign type %v to %L in range%s", t1, v1, why) + if v1.Name() != nil && v1.Name().Defn == n { + v1.SetType(t1) + } else if v1.Type() != nil { + if op, why := assignop(t1, v1.Type()); op == ir.OXXX { + base.ErrorfAt(n.Pos(), "cannot assign type %v to %L in range%s", t1, v1, why) } } checkassign(n, v1) } if v2 != nil { - if v2.Name != nil && v2.Name.Defn == n { - v2.Type = t2 - } else if v2.Type != nil { - if op, why := assignop(t2, v2.Type); op == ir.OXXX { - base.ErrorfAt(n.Pos, "cannot assign type %v to %L in range%s", t2, v2, why) + if v2.Name() != nil && v2.Name().Defn == n { + v2.SetType(t2) + } else if v2.Type() != nil { + if op, why := assignop(t2, v2.Type()); op == ir.OXXX { + base.ErrorfAt(n.Pos(), "cannot assign type %v to %L in range%s", t2, v2, why) } } checkassign(n, v2) @@ -159,7 +159,7 @@ func cheapComputableIndex(width int64) bool { // the returned node. func walkrange(n *ir.Node) *ir.Node { if isMapClear(n) { - m := n.Right + m := n.Right() lno := setlineno(m) n = mapClear(m) base.Pos = lno @@ -173,20 +173,20 @@ func walkrange(n *ir.Node) *ir.Node { // hb: hidden bool // a, v1, v2: not hidden aggregate, val 1, 2 - t := n.Type + t := n.Type() - a := n.Right + a := n.Right() lno := setlineno(a) - n.Right = nil + n.SetRight(nil) var v1, v2 *ir.Node - l := n.List.Len() + l := n.List().Len() if l > 0 { - v1 = n.List.First() + v1 = n.List().First() } if l > 1 { - v2 = n.List.Second() + v2 = n.List().Second() } if ir.IsBlank(v2) { @@ -203,7 +203,7 @@ func walkrange(n *ir.Node) *ir.Node { // n.List has no meaning anymore, clear it // to avoid erroneous processing by racewalk. - n.List.Set(nil) + n.PtrList().Set(nil) var ifGuard *ir.Node @@ -230,8 +230,8 @@ func walkrange(n *ir.Node) *ir.Node { init = append(init, ir.Nod(ir.OAS, hv1, nil)) init = append(init, ir.Nod(ir.OAS, hn, ir.Nod(ir.OLEN, ha, nil))) - n.Left = ir.Nod(ir.OLT, hv1, hn) - n.Right = ir.Nod(ir.OAS, hv1, ir.Nod(ir.OADD, hv1, nodintconst(1))) + n.SetLeft(ir.Nod(ir.OLT, hv1, hn)) + n.SetRight(ir.Nod(ir.OAS, hv1, ir.Nod(ir.OADD, hv1, nodintconst(1)))) // for range ha { body } if v1 == nil { @@ -245,15 +245,15 @@ func walkrange(n *ir.Node) *ir.Node { } // for v1, v2 := range ha { body } - if cheapComputableIndex(n.Type.Elem().Width) { + if cheapComputableIndex(n.Type().Elem().Width) { // v1, v2 = hv1, ha[hv1] tmp := ir.Nod(ir.OINDEX, ha, hv1) tmp.SetBounded(true) // Use OAS2 to correctly handle assignments // of the form "v1, a[v1] := range". a := ir.Nod(ir.OAS2, nil, nil) - a.List.Set2(v1, v2) - a.Rlist.Set2(hv1, tmp) + a.PtrList().Set2(v1, v2) + a.PtrRlist().Set2(hv1, tmp) body = []*ir.Node{a} break } @@ -271,10 +271,10 @@ func walkrange(n *ir.Node) *ir.Node { // elimination on the index variable (see #20711). // Enhance the prove pass to understand this. ifGuard = ir.Nod(ir.OIF, nil, nil) - ifGuard.Left = ir.Nod(ir.OLT, hv1, hn) + ifGuard.SetLeft(ir.Nod(ir.OLT, hv1, hn)) translatedLoopOp = ir.OFORUNTIL - hp := temp(types.NewPtr(n.Type.Elem())) + hp := temp(types.NewPtr(n.Type().Elem())) tmp := ir.Nod(ir.OINDEX, ha, nodintconst(0)) tmp.SetBounded(true) init = append(init, ir.Nod(ir.OAS, hp, ir.Nod(ir.OADDR, tmp, nil))) @@ -282,8 +282,8 @@ func walkrange(n *ir.Node) *ir.Node { // Use OAS2 to correctly handle assignments // of the form "v1, a[v1] := range". a := ir.Nod(ir.OAS2, nil, nil) - a.List.Set2(v1, v2) - a.Rlist.Set2(hv1, ir.Nod(ir.ODEREF, hp, nil)) + a.PtrList().Set2(v1, v2) + a.PtrRlist().Set2(hv1, ir.Nod(ir.ODEREF, hp, nil)) body = append(body, a) // Advance pointer as part of the late increment. @@ -293,7 +293,7 @@ func walkrange(n *ir.Node) *ir.Node { // end of the allocation. a = ir.Nod(ir.OAS, hp, addptr(hp, t.Elem().Width)) a = typecheck(a, ctxStmt) - n.List.Set1(a) + n.PtrList().Set1(a) case types.TMAP: // order.stmt allocated the iterator for us. @@ -301,8 +301,8 @@ func walkrange(n *ir.Node) *ir.Node { ha := a hit := prealloc[n] - th := hit.Type - n.Left = nil + th := hit.Type() + n.SetLeft(nil) keysym := th.Field(0).Sym // depends on layout of iterator struct. See reflect.go:hiter elemsym := th.Field(1).Sym // ditto @@ -310,11 +310,11 @@ func walkrange(n *ir.Node) *ir.Node { fn = substArgTypes(fn, t.Key(), t.Elem(), th) init = append(init, mkcall1(fn, nil, nil, typename(t), ha, ir.Nod(ir.OADDR, hit, nil))) - n.Left = ir.Nod(ir.ONE, nodSym(ir.ODOT, hit, keysym), nodnil()) + n.SetLeft(ir.Nod(ir.ONE, nodSym(ir.ODOT, hit, keysym), nodnil())) fn = syslook("mapiternext") fn = substArgTypes(fn, th) - n.Right = mkcall1(fn, nil, nil, ir.Nod(ir.OADDR, hit, nil)) + n.SetRight(mkcall1(fn, nil, nil, ir.Nod(ir.OADDR, hit, nil))) key := nodSym(ir.ODOT, hit, keysym) key = ir.Nod(ir.ODEREF, key, nil) @@ -326,8 +326,8 @@ func walkrange(n *ir.Node) *ir.Node { elem := nodSym(ir.ODOT, hit, elemsym) elem = ir.Nod(ir.ODEREF, elem, nil) a := ir.Nod(ir.OAS2, nil, nil) - a.List.Set2(v1, v2) - a.Rlist.Set2(key, elem) + a.PtrList().Set2(v1, v2) + a.PtrRlist().Set2(key, elem) body = []*ir.Node{a} } @@ -335,7 +335,7 @@ func walkrange(n *ir.Node) *ir.Node { // order.stmt arranged for a copy of the channel variable. ha := a - n.Left = nil + n.SetLeft(nil) hv1 := temp(t.Elem()) hv1.SetTypecheck(1) @@ -344,12 +344,12 @@ func walkrange(n *ir.Node) *ir.Node { } hb := temp(types.Types[types.TBOOL]) - n.Left = ir.Nod(ir.ONE, hb, nodbool(false)) + n.SetLeft(ir.Nod(ir.ONE, hb, nodbool(false))) a := ir.Nod(ir.OAS2RECV, nil, nil) a.SetTypecheck(1) - a.List.Set2(hv1, hb) - a.Right = ir.Nod(ir.ORECV, ha, nil) - n.Left.Ninit.Set1(a) + a.PtrList().Set2(hv1, hb) + a.SetRight(ir.Nod(ir.ORECV, ha, nil)) + n.Left().PtrInit().Set1(a) if v1 == nil { body = nil } else { @@ -387,7 +387,7 @@ func walkrange(n *ir.Node) *ir.Node { init = append(init, ir.Nod(ir.OAS, hv1, nil)) // hv1 < len(ha) - n.Left = ir.Nod(ir.OLT, hv1, ir.Nod(ir.OLEN, ha, nil)) + n.SetLeft(ir.Nod(ir.OLT, hv1, ir.Nod(ir.OLEN, ha, nil))) if v1 != nil { // hv1t = hv1 @@ -401,19 +401,19 @@ func walkrange(n *ir.Node) *ir.Node { // if hv2 < utf8.RuneSelf nif := ir.Nod(ir.OIF, nil, nil) - nif.Left = ir.Nod(ir.OLT, hv2, nodintconst(utf8.RuneSelf)) + nif.SetLeft(ir.Nod(ir.OLT, hv2, nodintconst(utf8.RuneSelf))) // hv1++ - nif.Nbody.Set1(ir.Nod(ir.OAS, hv1, ir.Nod(ir.OADD, hv1, nodintconst(1)))) + nif.PtrBody().Set1(ir.Nod(ir.OAS, hv1, ir.Nod(ir.OADD, hv1, nodintconst(1)))) // } else { eif := ir.Nod(ir.OAS2, nil, nil) - nif.Rlist.Set1(eif) + nif.PtrRlist().Set1(eif) // hv2, hv1 = decoderune(ha, hv1) - eif.List.Set2(hv2, hv1) + eif.PtrList().Set2(hv2, hv1) fn := syslook("decoderune") - eif.Rlist.Set1(mkcall1(fn, fn.Type.Results(), nil, ha, hv1)) + eif.PtrRlist().Set1(mkcall1(fn, fn.Type().Results(), nil, ha, hv1)) body = append(body, nif) @@ -421,8 +421,8 @@ func walkrange(n *ir.Node) *ir.Node { if v2 != nil { // v1, v2 = hv1t, hv2 a := ir.Nod(ir.OAS2, nil, nil) - a.List.Set2(v1, v2) - a.Rlist.Set2(hv1t, hv2) + a.PtrList().Set2(v1, v2) + a.PtrRlist().Set2(hv1t, hv2) body = append(body, a) } else { // v1 = hv1t @@ -431,26 +431,26 @@ func walkrange(n *ir.Node) *ir.Node { } } - n.Op = translatedLoopOp + n.SetOp(translatedLoopOp) typecheckslice(init, ctxStmt) if ifGuard != nil { - ifGuard.Ninit.Append(init...) + ifGuard.PtrInit().Append(init...) ifGuard = typecheck(ifGuard, ctxStmt) } else { - n.Ninit.Append(init...) + n.PtrInit().Append(init...) } - typecheckslice(n.Left.Ninit.Slice(), ctxStmt) + typecheckslice(n.Left().Init().Slice(), ctxStmt) - n.Left = typecheck(n.Left, ctxExpr) - n.Left = defaultlit(n.Left, nil) - n.Right = typecheck(n.Right, ctxStmt) + n.SetLeft(typecheck(n.Left(), ctxExpr)) + n.SetLeft(defaultlit(n.Left(), nil)) + n.SetRight(typecheck(n.Right(), ctxStmt)) typecheckslice(body, ctxStmt) - n.Nbody.Prepend(body...) + n.PtrBody().Prepend(body...) if ifGuard != nil { - ifGuard.Nbody.Set1(n) + ifGuard.PtrBody().Set1(n) n = ifGuard } @@ -472,36 +472,36 @@ func isMapClear(n *ir.Node) bool { return false } - if n.Op != ir.ORANGE || n.Type.Etype != types.TMAP || n.List.Len() != 1 { + if n.Op() != ir.ORANGE || n.Type().Etype != types.TMAP || n.List().Len() != 1 { return false } - k := n.List.First() + k := n.List().First() if k == nil || ir.IsBlank(k) { return false } // Require k to be a new variable name. - if k.Name == nil || k.Name.Defn != n { + if k.Name() == nil || k.Name().Defn != n { return false } - if n.Nbody.Len() != 1 { + if n.Body().Len() != 1 { return false } - stmt := n.Nbody.First() // only stmt in body - if stmt == nil || stmt.Op != ir.ODELETE { + stmt := n.Body().First() // only stmt in body + if stmt == nil || stmt.Op() != ir.ODELETE { return false } - m := n.Right - if !samesafeexpr(stmt.List.First(), m) || !samesafeexpr(stmt.List.Second(), k) { + m := n.Right() + if !samesafeexpr(stmt.List().First(), m) || !samesafeexpr(stmt.List().Second(), k) { return false } // Keys where equality is not reflexive can not be deleted from maps. - if !isreflexive(m.Type.Key()) { + if !isreflexive(m.Type().Key()) { return false } @@ -510,7 +510,7 @@ func isMapClear(n *ir.Node) bool { // mapClear constructs a call to runtime.mapclear for the map m. func mapClear(m *ir.Node) *ir.Node { - t := m.Type + t := m.Type() // instantiate mapclear(typ *type, hmap map[any]any) fn := syslook("mapclear") @@ -543,21 +543,21 @@ func arrayClear(n, v1, v2, a *ir.Node) bool { return false } - if n.Nbody.Len() != 1 || n.Nbody.First() == nil { + if n.Body().Len() != 1 || n.Body().First() == nil { return false } - stmt := n.Nbody.First() // only stmt in body - if stmt.Op != ir.OAS || stmt.Left.Op != ir.OINDEX { + stmt := n.Body().First() // only stmt in body + if stmt.Op() != ir.OAS || stmt.Left().Op() != ir.OINDEX { return false } - if !samesafeexpr(stmt.Left.Left, a) || !samesafeexpr(stmt.Left.Right, v1) { + if !samesafeexpr(stmt.Left().Left(), a) || !samesafeexpr(stmt.Left().Right(), v1) { return false } - elemsize := n.Type.Elem().Width - if elemsize <= 0 || !isZero(stmt.Right) { + elemsize := n.Type().Elem().Width + if elemsize <= 0 || !isZero(stmt.Right()) { return false } @@ -568,10 +568,10 @@ func arrayClear(n, v1, v2, a *ir.Node) bool { // memclr{NoHeap,Has}Pointers(hp, hn) // i = len(a) - 1 // } - n.Op = ir.OIF + n.SetOp(ir.OIF) - n.Nbody.Set(nil) - n.Left = ir.Nod(ir.ONE, ir.Nod(ir.OLEN, a, nil), nodintconst(0)) + n.PtrBody().Set(nil) + n.SetLeft(ir.Nod(ir.ONE, ir.Nod(ir.OLEN, a, nil), nodintconst(0))) // hp = &a[0] hp := temp(types.Types[types.TUNSAFEPTR]) @@ -580,7 +580,7 @@ func arrayClear(n, v1, v2, a *ir.Node) bool { tmp.SetBounded(true) tmp = ir.Nod(ir.OADDR, tmp, nil) tmp = convnop(tmp, types.Types[types.TUNSAFEPTR]) - n.Nbody.Append(ir.Nod(ir.OAS, hp, tmp)) + n.PtrBody().Append(ir.Nod(ir.OAS, hp, tmp)) // hn = len(a) * sizeof(elem(a)) hn := temp(types.Types[types.TUINTPTR]) @@ -588,43 +588,43 @@ func arrayClear(n, v1, v2, a *ir.Node) bool { tmp = ir.Nod(ir.OLEN, a, nil) tmp = ir.Nod(ir.OMUL, tmp, nodintconst(elemsize)) tmp = conv(tmp, types.Types[types.TUINTPTR]) - n.Nbody.Append(ir.Nod(ir.OAS, hn, tmp)) + n.PtrBody().Append(ir.Nod(ir.OAS, hn, tmp)) var fn *ir.Node - if a.Type.Elem().HasPointers() { + if a.Type().Elem().HasPointers() { // memclrHasPointers(hp, hn) - Curfn.Func.SetWBPos(stmt.Pos) + Curfn.Func().SetWBPos(stmt.Pos()) fn = mkcall("memclrHasPointers", nil, nil, hp, hn) } else { // memclrNoHeapPointers(hp, hn) fn = mkcall("memclrNoHeapPointers", nil, nil, hp, hn) } - n.Nbody.Append(fn) + n.PtrBody().Append(fn) // i = len(a) - 1 v1 = ir.Nod(ir.OAS, v1, ir.Nod(ir.OSUB, ir.Nod(ir.OLEN, a, nil), nodintconst(1))) - n.Nbody.Append(v1) + n.PtrBody().Append(v1) - n.Left = typecheck(n.Left, ctxExpr) - n.Left = defaultlit(n.Left, nil) - typecheckslice(n.Nbody.Slice(), ctxStmt) + n.SetLeft(typecheck(n.Left(), ctxExpr)) + n.SetLeft(defaultlit(n.Left(), nil)) + typecheckslice(n.Body().Slice(), ctxStmt) n = walkstmt(n) return true } // addptr returns (*T)(uintptr(p) + n). func addptr(p *ir.Node, n int64) *ir.Node { - t := p.Type + t := p.Type() p = ir.Nod(ir.OCONVNOP, p, nil) - p.Type = types.Types[types.TUINTPTR] + p.SetType(types.Types[types.TUINTPTR]) p = ir.Nod(ir.OADD, p, nodintconst(n)) p = ir.Nod(ir.OCONVNOP, p, nil) - p.Type = t + p.SetType(t) return p } diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 34047bfefa6ff..4559dd3a219f5 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -994,14 +994,14 @@ func typename(t *types.Type) *ir.Node { s := typenamesym(t) if s.Def == nil { n := ir.NewNameAt(src.NoXPos, s) - n.Type = types.Types[types.TUINT8] + n.SetType(types.Types[types.TUINT8]) n.SetClass(ir.PEXTERN) n.SetTypecheck(1) s.Def = ir.AsTypesNode(n) } n := ir.Nod(ir.OADDR, ir.AsNode(s.Def), nil) - n.Type = types.NewPtr(ir.AsNode(s.Def).Type) + n.SetType(types.NewPtr(ir.AsNode(s.Def).Type())) n.SetTypecheck(1) return n } @@ -1013,7 +1013,7 @@ func itabname(t, itype *types.Type) *ir.Node { s := itabpkg.Lookup(t.ShortString() + "," + itype.ShortString()) if s.Def == nil { n := NewName(s) - n.Type = types.Types[types.TUINT8] + n.SetType(types.Types[types.TUINT8]) n.SetClass(ir.PEXTERN) n.SetTypecheck(1) s.Def = ir.AsTypesNode(n) @@ -1021,7 +1021,7 @@ func itabname(t, itype *types.Type) *ir.Node { } n := ir.Nod(ir.OADDR, ir.AsNode(s.Def), nil) - n.Type = types.NewPtr(ir.AsNode(s.Def).Type) + n.SetType(types.NewPtr(ir.AsNode(s.Def).Type())) n.SetTypecheck(1) return n } @@ -1519,8 +1519,8 @@ func addsignat(t *types.Type) { func addsignats(dcls []*ir.Node) { // copy types from dcl list to signatset for _, n := range dcls { - if n.Op == ir.OTYPE { - addsignat(n.Type) + if n.Op() == ir.OTYPE { + addsignat(n.Type()) } } } @@ -1879,13 +1879,13 @@ func zeroaddr(size int64) *ir.Node { s := mappkg.Lookup("zero") if s.Def == nil { x := NewName(s) - x.Type = types.Types[types.TUINT8] + x.SetType(types.Types[types.TUINT8]) x.SetClass(ir.PEXTERN) x.SetTypecheck(1) s.Def = ir.AsTypesNode(x) } z := ir.Nod(ir.OADDR, ir.AsNode(s.Def), nil) - z.Type = types.NewPtr(types.Types[types.TUINT8]) + z.SetType(types.NewPtr(types.Types[types.TUINT8])) z.SetTypecheck(1) return z } diff --git a/src/cmd/compile/internal/gc/scc.go b/src/cmd/compile/internal/gc/scc.go index ddde18e50541d..880eff7595afd 100644 --- a/src/cmd/compile/internal/gc/scc.go +++ b/src/cmd/compile/internal/gc/scc.go @@ -56,7 +56,7 @@ func visitBottomUp(list []*ir.Node, analyze func(list []*ir.Node, recursive bool v.analyze = analyze v.nodeID = make(map[*ir.Node]uint32) for _, n := range list { - if n.Op == ir.ODCLFUNC && !n.Func.IsHiddenClosure() { + if n.Op() == ir.ODCLFUNC && !n.Func().IsHiddenClosure() { v.visit(n) } } @@ -75,46 +75,46 @@ func (v *bottomUpVisitor) visit(n *ir.Node) uint32 { min := v.visitgen v.stack = append(v.stack, n) - ir.InspectList(n.Nbody, func(n *ir.Node) bool { - switch n.Op { + ir.InspectList(n.Body(), func(n *ir.Node) bool { + switch n.Op() { case ir.ONAME: if n.Class() == ir.PFUNC { - if n != nil && n.Name.Defn != nil { - if m := v.visit(n.Name.Defn); m < min { + if n != nil && n.Name().Defn != nil { + if m := v.visit(n.Name().Defn); m < min { min = m } } } case ir.OMETHEXPR: fn := methodExprName(n) - if fn != nil && fn.Name.Defn != nil { - if m := v.visit(fn.Name.Defn); m < min { + if fn != nil && fn.Name().Defn != nil { + if m := v.visit(fn.Name().Defn); m < min { min = m } } case ir.ODOTMETH: fn := methodExprName(n) - if fn != nil && fn.Op == ir.ONAME && fn.Class() == ir.PFUNC && fn.Name.Defn != nil { - if m := v.visit(fn.Name.Defn); m < min { + if fn != nil && fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC && fn.Name().Defn != nil { + if m := v.visit(fn.Name().Defn); m < min { min = m } } case ir.OCALLPART: fn := ir.AsNode(callpartMethod(n).Nname) - if fn != nil && fn.Op == ir.ONAME && fn.Class() == ir.PFUNC && fn.Name.Defn != nil { - if m := v.visit(fn.Name.Defn); m < min { + if fn != nil && fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC && fn.Name().Defn != nil { + if m := v.visit(fn.Name().Defn); m < min { min = m } } case ir.OCLOSURE: - if m := v.visit(n.Func.Decl); m < min { + if m := v.visit(n.Func().Decl); m < min { min = m } } return true }) - if (min == id || min == id+1) && !n.Func.IsHiddenClosure() { + if (min == id || min == id+1) && !n.Func().IsHiddenClosure() { // This node is the root of a strongly connected component. // The original min passed to visitcodelist was v.nodeID[n]+1. diff --git a/src/cmd/compile/internal/gc/scope.go b/src/cmd/compile/internal/gc/scope.go index b5ebce04bec46..16e66dee6c6d6 100644 --- a/src/cmd/compile/internal/gc/scope.go +++ b/src/cmd/compile/internal/gc/scope.go @@ -30,13 +30,13 @@ func findScope(marks []ir.Mark, pos src.XPos) ir.ScopeID { func assembleScopes(fnsym *obj.LSym, fn *ir.Node, dwarfVars []*dwarf.Var, varScopes []ir.ScopeID) []dwarf.Scope { // Initialize the DWARF scope tree based on lexical scopes. - dwarfScopes := make([]dwarf.Scope, 1+len(fn.Func.Parents)) - for i, parent := range fn.Func.Parents { + dwarfScopes := make([]dwarf.Scope, 1+len(fn.Func().Parents)) + for i, parent := range fn.Func().Parents { dwarfScopes[i+1].Parent = int32(parent) } scopeVariables(dwarfVars, varScopes, dwarfScopes) - scopePCs(fnsym, fn.Func.Marks, dwarfScopes) + scopePCs(fnsym, fn.Func().Marks, dwarfScopes) return compactScopes(dwarfScopes) } diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go index ed7db0aaf7b56..73b808b81543d 100644 --- a/src/cmd/compile/internal/gc/select.go +++ b/src/cmd/compile/internal/gc/select.go @@ -14,36 +14,36 @@ import ( func typecheckselect(sel *ir.Node) { var def *ir.Node lno := setlineno(sel) - typecheckslice(sel.Ninit.Slice(), ctxStmt) - for _, ncase := range sel.List.Slice() { - if ncase.Op != ir.OCASE { + typecheckslice(sel.Init().Slice(), ctxStmt) + for _, ncase := range sel.List().Slice() { + if ncase.Op() != ir.OCASE { setlineno(ncase) - base.Fatalf("typecheckselect %v", ncase.Op) + base.Fatalf("typecheckselect %v", ncase.Op()) } - if ncase.List.Len() == 0 { + if ncase.List().Len() == 0 { // default if def != nil { - base.ErrorfAt(ncase.Pos, "multiple defaults in select (first at %v)", ir.Line(def)) + base.ErrorfAt(ncase.Pos(), "multiple defaults in select (first at %v)", ir.Line(def)) } else { def = ncase } - } else if ncase.List.Len() > 1 { - base.ErrorfAt(ncase.Pos, "select cases cannot be lists") + } else if ncase.List().Len() > 1 { + base.ErrorfAt(ncase.Pos(), "select cases cannot be lists") } else { - ncase.List.SetFirst(typecheck(ncase.List.First(), ctxStmt)) - n := ncase.List.First() - ncase.Left = n - ncase.List.Set(nil) - switch n.Op { + ncase.List().SetFirst(typecheck(ncase.List().First(), ctxStmt)) + n := ncase.List().First() + ncase.SetLeft(n) + ncase.PtrList().Set(nil) + switch n.Op() { default: - pos := n.Pos - if n.Op == ir.ONAME { + pos := n.Pos() + if n.Op() == ir.ONAME { // We don't have the right position for ONAME nodes (see #15459 and // others). Using ncase.Pos for now as it will provide the correct // line number (assuming the expression follows the "case" keyword // on the same line). This matches the approach before 1.10. - pos = ncase.Pos + pos = ncase.Pos() } base.ErrorfAt(pos, "select case must be receive, send or assign recv") @@ -51,41 +51,41 @@ func typecheckselect(sel *ir.Node) { // remove implicit conversions; the eventual assignment // will reintroduce them. case ir.OAS: - if (n.Right.Op == ir.OCONVNOP || n.Right.Op == ir.OCONVIFACE) && n.Right.Implicit() { - n.Right = n.Right.Left + if (n.Right().Op() == ir.OCONVNOP || n.Right().Op() == ir.OCONVIFACE) && n.Right().Implicit() { + n.SetRight(n.Right().Left()) } - if n.Right.Op != ir.ORECV { - base.ErrorfAt(n.Pos, "select assignment must have receive on right hand side") + if n.Right().Op() != ir.ORECV { + base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side") break } - n.Op = ir.OSELRECV + n.SetOp(ir.OSELRECV) // convert x, ok = <-c into OSELRECV2(x, <-c) with ntest=ok case ir.OAS2RECV: - if n.Right.Op != ir.ORECV { - base.ErrorfAt(n.Pos, "select assignment must have receive on right hand side") + if n.Right().Op() != ir.ORECV { + base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side") break } - n.Op = ir.OSELRECV2 - n.Left = n.List.First() - n.List.Set1(n.List.Second()) + n.SetOp(ir.OSELRECV2) + n.SetLeft(n.List().First()) + n.PtrList().Set1(n.List().Second()) // convert <-c into OSELRECV(N, <-c) case ir.ORECV: - n = ir.NodAt(n.Pos, ir.OSELRECV, nil, n) + n = ir.NodAt(n.Pos(), ir.OSELRECV, nil, n) n.SetTypecheck(1) - ncase.Left = n + ncase.SetLeft(n) case ir.OSEND: break } } - typecheckslice(ncase.Nbody.Slice(), ctxStmt) + typecheckslice(ncase.Body().Slice(), ctxStmt) } base.Pos = lno @@ -93,18 +93,18 @@ func typecheckselect(sel *ir.Node) { func walkselect(sel *ir.Node) { lno := setlineno(sel) - if sel.Nbody.Len() != 0 { + if sel.Body().Len() != 0 { base.Fatalf("double walkselect") } - init := sel.Ninit.Slice() - sel.Ninit.Set(nil) + init := sel.Init().Slice() + sel.PtrInit().Set(nil) - init = append(init, walkselectcases(&sel.List)...) - sel.List.Set(nil) + init = append(init, walkselectcases(sel.PtrList())...) + sel.PtrList().Set(nil) - sel.Nbody.Set(init) - walkstmtlist(sel.Nbody.Slice()) + sel.PtrBody().Set(init) + walkstmtlist(sel.Body().Slice()) base.Pos = lno } @@ -122,38 +122,38 @@ func walkselectcases(cases *ir.Nodes) []*ir.Node { if ncas == 1 { cas := cases.First() setlineno(cas) - l := cas.Ninit.Slice() - if cas.Left != nil { // not default: - n := cas.Left - l = append(l, n.Ninit.Slice()...) - n.Ninit.Set(nil) - switch n.Op { + l := cas.Init().Slice() + if cas.Left() != nil { // not default: + n := cas.Left() + l = append(l, n.Init().Slice()...) + n.PtrInit().Set(nil) + switch n.Op() { default: - base.Fatalf("select %v", n.Op) + base.Fatalf("select %v", n.Op()) case ir.OSEND: // already ok case ir.OSELRECV, ir.OSELRECV2: - if n.Op == ir.OSELRECV || n.List.Len() == 0 { - if n.Left == nil { - n = n.Right + if n.Op() == ir.OSELRECV || n.List().Len() == 0 { + if n.Left() == nil { + n = n.Right() } else { - n.Op = ir.OAS + n.SetOp(ir.OAS) } break } - if n.Left == nil { + if n.Left() == nil { ir.BlankNode = typecheck(ir.BlankNode, ctxExpr|ctxAssign) - n.Left = ir.BlankNode + n.SetLeft(ir.BlankNode) } - n.Op = ir.OAS2 - n.List.Prepend(n.Left) - n.Rlist.Set1(n.Right) - n.Right = nil - n.Left = nil + n.SetOp(ir.OAS2) + n.PtrList().Prepend(n.Left()) + n.PtrRlist().Set1(n.Right()) + n.SetRight(nil) + n.SetLeft(nil) n.SetTypecheck(0) n = typecheck(n, ctxStmt) } @@ -161,7 +161,7 @@ func walkselectcases(cases *ir.Nodes) []*ir.Node { l = append(l, n) } - l = append(l, cas.Nbody.Slice()...) + l = append(l, cas.Body().Slice()...) l = append(l, ir.Nod(ir.OBREAK, nil, nil)) return l } @@ -171,24 +171,24 @@ func walkselectcases(cases *ir.Nodes) []*ir.Node { var dflt *ir.Node for _, cas := range cases.Slice() { setlineno(cas) - n := cas.Left + n := cas.Left() if n == nil { dflt = cas continue } - switch n.Op { + switch n.Op() { case ir.OSEND: - n.Right = ir.Nod(ir.OADDR, n.Right, nil) - n.Right = typecheck(n.Right, ctxExpr) + n.SetRight(ir.Nod(ir.OADDR, n.Right(), nil)) + n.SetRight(typecheck(n.Right(), ctxExpr)) case ir.OSELRECV, ir.OSELRECV2: - if n.Op == ir.OSELRECV2 && n.List.Len() == 0 { - n.Op = ir.OSELRECV + if n.Op() == ir.OSELRECV2 && n.List().Len() == 0 { + n.SetOp(ir.OSELRECV) } - if n.Left != nil { - n.Left = ir.Nod(ir.OADDR, n.Left, nil) - n.Left = typecheck(n.Left, ctxExpr) + if n.Left() != nil { + n.SetLeft(ir.Nod(ir.OADDR, n.Left(), nil)) + n.SetLeft(typecheck(n.Left(), ctxExpr)) } } } @@ -200,43 +200,43 @@ func walkselectcases(cases *ir.Nodes) []*ir.Node { cas = cases.Second() } - n := cas.Left + n := cas.Left() setlineno(n) r := ir.Nod(ir.OIF, nil, nil) - r.Ninit.Set(cas.Ninit.Slice()) - switch n.Op { + r.PtrInit().Set(cas.Init().Slice()) + switch n.Op() { default: - base.Fatalf("select %v", n.Op) + base.Fatalf("select %v", n.Op()) case ir.OSEND: // if selectnbsend(c, v) { body } else { default body } - ch := n.Left - r.Left = mkcall1(chanfn("selectnbsend", 2, ch.Type), types.Types[types.TBOOL], &r.Ninit, ch, n.Right) + ch := n.Left() + r.SetLeft(mkcall1(chanfn("selectnbsend", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), ch, n.Right())) case ir.OSELRECV: // if selectnbrecv(&v, c) { body } else { default body } - ch := n.Right.Left - elem := n.Left + ch := n.Right().Left() + elem := n.Left() if elem == nil { elem = nodnil() } - r.Left = mkcall1(chanfn("selectnbrecv", 2, ch.Type), types.Types[types.TBOOL], &r.Ninit, elem, ch) + r.SetLeft(mkcall1(chanfn("selectnbrecv", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, ch)) case ir.OSELRECV2: // if selectnbrecv2(&v, &received, c) { body } else { default body } - ch := n.Right.Left - elem := n.Left + ch := n.Right().Left() + elem := n.Left() if elem == nil { elem = nodnil() } - receivedp := ir.Nod(ir.OADDR, n.List.First(), nil) + receivedp := ir.Nod(ir.OADDR, n.List().First(), nil) receivedp = typecheck(receivedp, ctxExpr) - r.Left = mkcall1(chanfn("selectnbrecv2", 2, ch.Type), types.Types[types.TBOOL], &r.Ninit, elem, receivedp, ch) + r.SetLeft(mkcall1(chanfn("selectnbrecv2", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, receivedp, ch)) } - r.Left = typecheck(r.Left, ctxExpr) - r.Nbody.Set(cas.Nbody.Slice()) - r.Rlist.Set(append(dflt.Ninit.Slice(), dflt.Nbody.Slice()...)) + r.SetLeft(typecheck(r.Left(), ctxExpr)) + r.PtrBody().Set(cas.Body().Slice()) + r.PtrRlist().Set(append(dflt.Init().Slice(), dflt.Body().Slice()...)) return []*ir.Node{r, ir.Nod(ir.OBREAK, nil, nil)} } @@ -270,29 +270,29 @@ func walkselectcases(cases *ir.Nodes) []*ir.Node { for _, cas := range cases.Slice() { setlineno(cas) - init = append(init, cas.Ninit.Slice()...) - cas.Ninit.Set(nil) + init = append(init, cas.Init().Slice()...) + cas.PtrInit().Set(nil) - n := cas.Left + n := cas.Left() if n == nil { // default: continue } var i int var c, elem *ir.Node - switch n.Op { + switch n.Op() { default: - base.Fatalf("select %v", n.Op) + base.Fatalf("select %v", n.Op()) case ir.OSEND: i = nsends nsends++ - c = n.Left - elem = n.Right + c = n.Left() + elem = n.Right() case ir.OSELRECV, ir.OSELRECV2: nrecvs++ i = ncas - nrecvs - c = n.Right.Left - elem = n.Left + c = n.Right().Left() + elem = n.Left() } casorder[i] = cas @@ -326,9 +326,9 @@ func walkselectcases(cases *ir.Nodes) []*ir.Node { chosen := temp(types.Types[types.TINT]) recvOK := temp(types.Types[types.TBOOL]) r = ir.Nod(ir.OAS2, nil, nil) - r.List.Set2(chosen, recvOK) + r.PtrList().Set2(chosen, recvOK) fn := syslook("selectgo") - r.Rlist.Set1(mkcall1(fn, fn.Type.Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, nodintconst(int64(nsends)), nodintconst(int64(nrecvs)), nodbool(dflt == nil))) + r.PtrRlist().Set1(mkcall1(fn, fn.Type().Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, nodintconst(int64(nsends)), nodintconst(int64(nrecvs)), nodbool(dflt == nil))) r = typecheck(r, ctxStmt) init = append(init, r) @@ -346,14 +346,14 @@ func walkselectcases(cases *ir.Nodes) []*ir.Node { r := ir.Nod(ir.OIF, cond, nil) - if n := cas.Left; n != nil && n.Op == ir.OSELRECV2 { - x := ir.Nod(ir.OAS, n.List.First(), recvOK) + if n := cas.Left(); n != nil && n.Op() == ir.OSELRECV2 { + x := ir.Nod(ir.OAS, n.List().First(), recvOK) x = typecheck(x, ctxStmt) - r.Nbody.Append(x) + r.PtrBody().Append(x) } - r.Nbody.AppendNodes(&cas.Nbody) - r.Nbody.Append(ir.Nod(ir.OBREAK, nil, nil)) + r.PtrBody().AppendNodes(cas.PtrBody()) + r.PtrBody().Append(ir.Nod(ir.OBREAK, nil, nil)) init = append(init, r) } diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 0ba7efb95ec71..c0f85a1e337bd 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -57,54 +57,54 @@ func (s *InitSchedule) tryStaticInit(n *ir.Node) bool { // replaced by multiple simple OAS assignments, and the other // OAS2* assignments mostly necessitate dynamic execution // anyway. - if n.Op != ir.OAS { + if n.Op() != ir.OAS { return false } - if ir.IsBlank(n.Left) && candiscard(n.Right) { + if ir.IsBlank(n.Left()) && candiscard(n.Right()) { return true } lno := setlineno(n) defer func() { base.Pos = lno }() - return s.staticassign(n.Left, n.Right) + return s.staticassign(n.Left(), n.Right()) } // like staticassign but we are copying an already // initialized value r. func (s *InitSchedule) staticcopy(l *ir.Node, r *ir.Node) bool { - if r.Op != ir.ONAME && r.Op != ir.OMETHEXPR { + if r.Op() != ir.ONAME && r.Op() != ir.OMETHEXPR { return false } if r.Class() == ir.PFUNC { pfuncsym(l, r) return true } - if r.Class() != ir.PEXTERN || r.Sym.Pkg != ir.LocalPkg { + if r.Class() != ir.PEXTERN || r.Sym().Pkg != ir.LocalPkg { return false } - if r.Name.Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value + if r.Name().Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value return false } - if r.Name.Defn.Op != ir.OAS { + if r.Name().Defn.Op() != ir.OAS { return false } - if r.Type.IsString() { // perhaps overwritten by cmd/link -X (#34675) + if r.Type().IsString() { // perhaps overwritten by cmd/link -X (#34675) return false } orig := r - r = r.Name.Defn.Right + r = r.Name().Defn.Right() - for r.Op == ir.OCONVNOP && !types.Identical(r.Type, l.Type) { - r = r.Left + for r.Op() == ir.OCONVNOP && !types.Identical(r.Type(), l.Type()) { + r = r.Left() } - switch r.Op { + switch r.Op() { case ir.ONAME, ir.OMETHEXPR: if s.staticcopy(l, r) { return true } // We may have skipped past one or more OCONVNOPs, so // use conv to ensure r is assignable to l (#13263). - s.append(ir.Nod(ir.OAS, l, conv(r, l.Type))) + s.append(ir.Nod(ir.OAS, l, conv(r, l.Type()))) return true case ir.ONIL: @@ -114,17 +114,17 @@ func (s *InitSchedule) staticcopy(l *ir.Node, r *ir.Node) bool { if isZero(r) { return true } - litsym(l, r, int(l.Type.Width)) + litsym(l, r, int(l.Type().Width)) return true case ir.OADDR: - if a := r.Left; a.Op == ir.ONAME { + if a := r.Left(); a.Op() == ir.ONAME { addrsym(l, a) return true } case ir.OPTRLIT: - switch r.Left.Op { + switch r.Left().Op() { case ir.OARRAYLIT, ir.OSLICELIT, ir.OSTRUCTLIT, ir.OMAPLIT: // copy pointer addrsym(l, s.inittemps[r]) @@ -134,7 +134,7 @@ func (s *InitSchedule) staticcopy(l *ir.Node, r *ir.Node) bool { case ir.OSLICELIT: // copy slice a := s.inittemps[r] - slicesym(l, a, r.Right.Int64Val()) + slicesym(l, a, r.Right().Int64Val()) return true case ir.OARRAYLIT, ir.OSTRUCTLIT: @@ -143,10 +143,10 @@ func (s *InitSchedule) staticcopy(l *ir.Node, r *ir.Node) bool { n := ir.Copy(l) for i := range p.E { e := &p.E[i] - n.Xoffset = l.Xoffset + e.Xoffset - n.Type = e.Expr.Type - if e.Expr.Op == ir.OLITERAL || e.Expr.Op == ir.ONIL { - litsym(n, e.Expr, int(n.Type.Width)) + n.SetOffset(l.Offset() + e.Xoffset) + n.SetType(e.Expr.Type()) + if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL { + litsym(n, e.Expr, int(n.Type().Width)) continue } ll := ir.SepCopy(n) @@ -156,8 +156,8 @@ func (s *InitSchedule) staticcopy(l *ir.Node, r *ir.Node) bool { // Requires computation, but we're // copying someone else's computation. rr := ir.SepCopy(orig) - rr.Type = ll.Type - rr.Xoffset = rr.Xoffset + e.Xoffset + rr.SetType(ll.Type()) + rr.SetOffset(rr.Offset() + e.Xoffset) setlineno(rr) s.append(ir.Nod(ir.OAS, ll, rr)) } @@ -169,11 +169,11 @@ func (s *InitSchedule) staticcopy(l *ir.Node, r *ir.Node) bool { } func (s *InitSchedule) staticassign(l *ir.Node, r *ir.Node) bool { - for r.Op == ir.OCONVNOP { - r = r.Left + for r.Op() == ir.OCONVNOP { + r = r.Left() } - switch r.Op { + switch r.Op() { case ir.ONAME, ir.OMETHEXPR: return s.staticcopy(l, r) @@ -184,36 +184,36 @@ func (s *InitSchedule) staticassign(l *ir.Node, r *ir.Node) bool { if isZero(r) { return true } - litsym(l, r, int(l.Type.Width)) + litsym(l, r, int(l.Type().Width)) return true case ir.OADDR: - if nam := stataddr(r.Left); nam != nil { + if nam := stataddr(r.Left()); nam != nil { addrsym(l, nam) return true } fallthrough case ir.OPTRLIT: - switch r.Left.Op { + switch r.Left().Op() { case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT: // Init pointer. - a := staticname(r.Left.Type) + a := staticname(r.Left().Type()) s.inittemps[r] = a addrsym(l, a) // Init underlying literal. - if !s.staticassign(a, r.Left) { - s.append(ir.Nod(ir.OAS, a, r.Left)) + if !s.staticassign(a, r.Left()) { + s.append(ir.Nod(ir.OAS, a, r.Left())) } return true } //dump("not static ptrlit", r); case ir.OSTR2BYTES: - if l.Class() == ir.PEXTERN && r.Left.Op == ir.OLITERAL { - sval := r.Left.StringVal() + if l.Class() == ir.PEXTERN && r.Left().Op() == ir.OLITERAL { + sval := r.Left().StringVal() slicebytes(l, sval) return true } @@ -221,8 +221,8 @@ func (s *InitSchedule) staticassign(l *ir.Node, r *ir.Node) bool { case ir.OSLICELIT: s.initplan(r) // Init slice. - bound := r.Right.Int64Val() - ta := types.NewArray(r.Type.Elem(), bound) + bound := r.Right().Int64Val() + ta := types.NewArray(r.Type().Elem(), bound) ta.SetNoalg(true) a := staticname(ta) s.inittemps[r] = a @@ -238,10 +238,10 @@ func (s *InitSchedule) staticassign(l *ir.Node, r *ir.Node) bool { n := ir.Copy(l) for i := range p.E { e := &p.E[i] - n.Xoffset = l.Xoffset + e.Xoffset - n.Type = e.Expr.Type - if e.Expr.Op == ir.OLITERAL || e.Expr.Op == ir.ONIL { - litsym(n, e.Expr, int(n.Type.Width)) + n.SetOffset(l.Offset() + e.Xoffset) + n.SetType(e.Expr.Type()) + if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL { + litsym(n, e.Expr, int(n.Type().Width)) continue } setlineno(e.Expr) @@ -259,11 +259,11 @@ func (s *InitSchedule) staticassign(l *ir.Node, r *ir.Node) bool { case ir.OCLOSURE: if hasemptycvars(r) { if base.Debug.Closure > 0 { - base.WarnfAt(r.Pos, "closure converted to global") + base.WarnfAt(r.Pos(), "closure converted to global") } // Closures with no captured variables are globals, // so the assignment can be done at link time. - pfuncsym(l, r.Func.Nname) + pfuncsym(l, r.Func().Nname) return true } closuredebugruntimecheck(r) @@ -274,43 +274,43 @@ func (s *InitSchedule) staticassign(l *ir.Node, r *ir.Node) bool { // Determine the underlying concrete type and value we are converting from. val := r - for val.Op == ir.OCONVIFACE { - val = val.Left + for val.Op() == ir.OCONVIFACE { + val = val.Left() } - if val.Type.IsInterface() { + if val.Type().IsInterface() { // val is an interface type. // If val is nil, we can statically initialize l; // both words are zero and so there no work to do, so report success. // If val is non-nil, we have no concrete type to record, // and we won't be able to statically initialize its value, so report failure. - return val.Op == ir.ONIL + return val.Op() == ir.ONIL } - markTypeUsedInInterface(val.Type, l.Sym.Linksym()) + markTypeUsedInInterface(val.Type(), l.Sym().Linksym()) var itab *ir.Node - if l.Type.IsEmptyInterface() { - itab = typename(val.Type) + if l.Type().IsEmptyInterface() { + itab = typename(val.Type()) } else { - itab = itabname(val.Type, l.Type) + itab = itabname(val.Type(), l.Type()) } // Create a copy of l to modify while we emit data. n := ir.Copy(l) // Emit itab, advance offset. - addrsym(n, itab.Left) // itab is an OADDR node - n.Xoffset = n.Xoffset + int64(Widthptr) + addrsym(n, itab.Left()) // itab is an OADDR node + n.SetOffset(n.Offset() + int64(Widthptr)) // Emit data. - if isdirectiface(val.Type) { - if val.Op == ir.ONIL { + if isdirectiface(val.Type()) { + if val.Op() == ir.ONIL { // Nil is zero, nothing to do. return true } // Copy val directly into n. - n.Type = val.Type + n.SetType(val.Type()) setlineno(val) a := ir.SepCopy(n) if !s.staticassign(a, val) { @@ -318,7 +318,7 @@ func (s *InitSchedule) staticassign(l *ir.Node, r *ir.Node) bool { } } else { // Construct temp to hold val, write pointer to temp into n. - a := staticname(val.Type) + a := staticname(val.Type()) s.inittemps[val] = a if !s.staticassign(a, val) { s.append(ir.Nod(ir.OAS, a, val)) @@ -372,7 +372,7 @@ func staticname(t *types.Type) *ir.Node { n := NewName(lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen))) statuniqgen++ addvar(n, t, ir.PEXTERN) - n.Sym.Linksym().Set(obj.AttrLocal, true) + n.Sym().Linksym().Set(obj.AttrLocal, true) return n } @@ -380,12 +380,12 @@ func staticname(t *types.Type) *ir.Node { func readonlystaticname(t *types.Type) *ir.Node { n := staticname(t) n.MarkReadonly() - n.Sym.Linksym().Set(obj.AttrContentAddressable, true) + n.Sym().Linksym().Set(obj.AttrContentAddressable, true) return n } func isSimpleName(n *ir.Node) bool { - return (n.Op == ir.ONAME || n.Op == ir.OMETHEXPR) && n.Class() != ir.PAUTOHEAP && n.Class() != ir.PEXTERN + return (n.Op() == ir.ONAME || n.Op() == ir.OMETHEXPR) && n.Class() != ir.PAUTOHEAP && n.Class() != ir.PEXTERN } func litas(l *ir.Node, r *ir.Node, init *ir.Nodes) { @@ -406,7 +406,7 @@ const ( // getdyn calculates the initGenType for n. // If top is false, getdyn is recursing. func getdyn(n *ir.Node, top bool) initGenType { - switch n.Op { + switch n.Op() { default: if isGoConst(n) { return initConst @@ -417,7 +417,7 @@ func getdyn(n *ir.Node, top bool) initGenType { if !top { return initDynamic } - if n.Right.Int64Val()/4 > int64(n.List.Len()) { + if n.Right().Int64Val()/4 > int64(n.List().Len()) { // <25% of entries have explicit values. // Very rough estimation, it takes 4 bytes of instructions // to initialize 1 byte of result. So don't use a static @@ -431,12 +431,12 @@ func getdyn(n *ir.Node, top bool) initGenType { } var mode initGenType - for _, n1 := range n.List.Slice() { - switch n1.Op { + for _, n1 := range n.List().Slice() { + switch n1.Op() { case ir.OKEY: - n1 = n1.Right + n1 = n1.Right() case ir.OSTRUCTKEY: - n1 = n1.Left + n1 = n1.Left() } mode |= getdyn(n1, false) if mode == initDynamic|initConst { @@ -448,13 +448,13 @@ func getdyn(n *ir.Node, top bool) initGenType { // isStaticCompositeLiteral reports whether n is a compile-time constant. func isStaticCompositeLiteral(n *ir.Node) bool { - switch n.Op { + switch n.Op() { case ir.OSLICELIT: return false case ir.OARRAYLIT: - for _, r := range n.List.Slice() { - if r.Op == ir.OKEY { - r = r.Right + for _, r := range n.List().Slice() { + if r.Op() == ir.OKEY { + r = r.Right() } if !isStaticCompositeLiteral(r) { return false @@ -462,11 +462,11 @@ func isStaticCompositeLiteral(n *ir.Node) bool { } return true case ir.OSTRUCTLIT: - for _, r := range n.List.Slice() { - if r.Op != ir.OSTRUCTKEY { + for _, r := range n.List().Slice() { + if r.Op() != ir.OSTRUCTKEY { base.Fatalf("isStaticCompositeLiteral: rhs not OSTRUCTKEY: %v", r) } - if !isStaticCompositeLiteral(r.Left) { + if !isStaticCompositeLiteral(r.Left()) { return false } } @@ -476,13 +476,13 @@ func isStaticCompositeLiteral(n *ir.Node) bool { case ir.OCONVIFACE: // See staticassign's OCONVIFACE case for comments. val := n - for val.Op == ir.OCONVIFACE { - val = val.Left + for val.Op() == ir.OCONVIFACE { + val = val.Left() } - if val.Type.IsInterface() { - return val.Op == ir.ONIL + if val.Type().IsInterface() { + return val.Op() == ir.ONIL } - if isdirectiface(val.Type) && val.Op == ir.ONIL { + if isdirectiface(val.Type()) && val.Op() == ir.ONIL { return true } return isStaticCompositeLiteral(val) @@ -512,16 +512,16 @@ const ( func fixedlit(ctxt initContext, kind initKind, n *ir.Node, var_ *ir.Node, init *ir.Nodes) { isBlank := var_ == ir.BlankNode var splitnode func(*ir.Node) (a *ir.Node, value *ir.Node) - switch n.Op { + switch n.Op() { case ir.OARRAYLIT, ir.OSLICELIT: var k int64 splitnode = func(r *ir.Node) (*ir.Node, *ir.Node) { - if r.Op == ir.OKEY { - k = indexconst(r.Left) + if r.Op() == ir.OKEY { + k = indexconst(r.Left()) if k < 0 { - base.Fatalf("fixedlit: invalid index %v", r.Left) + base.Fatalf("fixedlit: invalid index %v", r.Left()) } - r = r.Right + r = r.Right() } a := ir.Nod(ir.OINDEX, var_, nodintconst(k)) k++ @@ -532,26 +532,26 @@ func fixedlit(ctxt initContext, kind initKind, n *ir.Node, var_ *ir.Node, init * } case ir.OSTRUCTLIT: splitnode = func(r *ir.Node) (*ir.Node, *ir.Node) { - if r.Op != ir.OSTRUCTKEY { + if r.Op() != ir.OSTRUCTKEY { base.Fatalf("fixedlit: rhs not OSTRUCTKEY: %v", r) } - if r.Sym.IsBlank() || isBlank { - return ir.BlankNode, r.Left + if r.Sym().IsBlank() || isBlank { + return ir.BlankNode, r.Left() } setlineno(r) - return nodSym(ir.ODOT, var_, r.Sym), r.Left + return nodSym(ir.ODOT, var_, r.Sym()), r.Left() } default: - base.Fatalf("fixedlit bad op: %v", n.Op) + base.Fatalf("fixedlit bad op: %v", n.Op()) } - for _, r := range n.List.Slice() { + for _, r := range n.List().Slice() { a, value := splitnode(r) if a == ir.BlankNode && candiscard(value) { continue } - switch value.Op { + switch value.Op() { case ir.OSLICELIT: if (kind == initKindStatic && ctxt == inNonInitFunction) || (kind == initKindDynamic && ctxt == inInitFunction) { slicelit(ctxt, value, a, init) @@ -587,18 +587,18 @@ func fixedlit(ctxt initContext, kind initKind, n *ir.Node, var_ *ir.Node, init * } func isSmallSliceLit(n *ir.Node) bool { - if n.Op != ir.OSLICELIT { + if n.Op() != ir.OSLICELIT { return false } - r := n.Right + r := n.Right() - return smallintconst(r) && (n.Type.Elem().Width == 0 || r.Int64Val() <= smallArrayBytes/n.Type.Elem().Width) + return smallintconst(r) && (n.Type().Elem().Width == 0 || r.Int64Val() <= smallArrayBytes/n.Type().Elem().Width) } func slicelit(ctxt initContext, n *ir.Node, var_ *ir.Node, init *ir.Nodes) { // make an array type corresponding the number of elements we have - t := types.NewArray(n.Type.Elem(), n.Right.Int64Val()) + t := types.NewArray(n.Type().Elem(), n.Right().Int64Val()) dowidth(t) if ctxt == inNonInitFunction { @@ -658,7 +658,7 @@ func slicelit(ctxt initContext, n *ir.Node, var_ *ir.Node, init *ir.Nodes) { var a *ir.Node if x := prealloc[n]; x != nil { // temp allocated during order.go for dddarg - if !types.Identical(t, x.Type) { + if !types.Identical(t, x.Type()) { panic("dotdotdot base type does not match order's assigned type") } @@ -673,13 +673,13 @@ func slicelit(ctxt initContext, n *ir.Node, var_ *ir.Node, init *ir.Nodes) { } a = ir.Nod(ir.OADDR, x, nil) - } else if n.Esc == EscNone { + } else if n.Esc() == EscNone { a = temp(t) if vstat == nil { a = ir.Nod(ir.OAS, temp(t), nil) a = typecheck(a, ctxStmt) init.Append(a) // zero new temp - a = a.Left + a = a.Left() } else { init.Append(ir.Nod(ir.OVARDEF, a, nil)) } @@ -687,7 +687,7 @@ func slicelit(ctxt initContext, n *ir.Node, var_ *ir.Node, init *ir.Nodes) { a = ir.Nod(ir.OADDR, a, nil) } else { a = ir.Nod(ir.ONEW, nil, nil) - a.List.Set1(typenod(t)) + a.PtrList().Set1(typenod(t)) } a = ir.Nod(ir.OAS, vauto, a) @@ -707,13 +707,13 @@ func slicelit(ctxt initContext, n *ir.Node, var_ *ir.Node, init *ir.Nodes) { // put dynamics into array (5) var index int64 - for _, value := range n.List.Slice() { - if value.Op == ir.OKEY { - index = indexconst(value.Left) + for _, value := range n.List().Slice() { + if value.Op() == ir.OKEY { + index = indexconst(value.Left()) if index < 0 { - base.Fatalf("slicelit: invalid index %v", value.Left) + base.Fatalf("slicelit: invalid index %v", value.Left()) } - value = value.Right + value = value.Right() } a := ir.Nod(ir.OINDEX, vauto, nodintconst(index)) a.SetBounded(true) @@ -721,7 +721,7 @@ func slicelit(ctxt initContext, n *ir.Node, var_ *ir.Node, init *ir.Nodes) { // TODO need to check bounds? - switch value.Op { + switch value.Op() { case ir.OSLICELIT: break @@ -762,16 +762,16 @@ func slicelit(ctxt initContext, n *ir.Node, var_ *ir.Node, init *ir.Nodes) { func maplit(n *ir.Node, m *ir.Node, init *ir.Nodes) { // make the map var a := ir.Nod(ir.OMAKE, nil, nil) - a.Esc = n.Esc - a.List.Set2(typenod(n.Type), nodintconst(int64(n.List.Len()))) + a.SetEsc(n.Esc()) + a.PtrList().Set2(typenod(n.Type()), nodintconst(int64(n.List().Len()))) litas(m, a, init) - entries := n.List.Slice() + entries := n.List().Slice() // The order pass already removed any dynamic (runtime-computed) entries. // All remaining entries are static. Double-check that. for _, r := range entries { - if !isStaticCompositeLiteral(r.Left) || !isStaticCompositeLiteral(r.Right) { + if !isStaticCompositeLiteral(r.Left()) || !isStaticCompositeLiteral(r.Right()) { base.Fatalf("maplit: entry is not a literal: %v", r) } } @@ -780,8 +780,8 @@ func maplit(n *ir.Node, m *ir.Node, init *ir.Nodes) { // For a large number of entries, put them in an array and loop. // build types [count]Tindex and [count]Tvalue - tk := types.NewArray(n.Type.Key(), int64(len(entries))) - te := types.NewArray(n.Type.Elem(), int64(len(entries))) + tk := types.NewArray(n.Type().Key(), int64(len(entries))) + te := types.NewArray(n.Type().Elem(), int64(len(entries))) tk.SetNoalg(true) te.SetNoalg(true) @@ -796,8 +796,8 @@ func maplit(n *ir.Node, m *ir.Node, init *ir.Nodes) { datak := ir.Nod(ir.OARRAYLIT, nil, nil) datae := ir.Nod(ir.OARRAYLIT, nil, nil) for _, r := range entries { - datak.List.Append(r.Left) - datae.List.Append(r.Right) + datak.PtrList().Append(r.Left()) + datae.PtrList().Append(r.Right()) } fixedlit(inInitFunction, initKindStatic, datak, vstatk, init) fixedlit(inInitFunction, initKindStatic, datae, vstate, init) @@ -820,8 +820,8 @@ func maplit(n *ir.Node, m *ir.Node, init *ir.Nodes) { body := ir.Nod(ir.OAS, lhs, rhs) loop := ir.Nod(ir.OFOR, cond, incr) - loop.Nbody.Set1(body) - loop.Ninit.Set1(zero) + loop.PtrBody().Set1(body) + loop.PtrInit().Set1(zero) loop = typecheck(loop, ctxStmt) loop = walkstmt(loop) @@ -833,11 +833,11 @@ func maplit(n *ir.Node, m *ir.Node, init *ir.Nodes) { // Build list of var[c] = expr. // Use temporaries so that mapassign1 can have addressable key, elem. // TODO(josharian): avoid map key temporaries for mapfast_* assignments with literal keys. - tmpkey := temp(m.Type.Key()) - tmpelem := temp(m.Type.Elem()) + tmpkey := temp(m.Type().Key()) + tmpelem := temp(m.Type().Elem()) for _, r := range entries { - index, elem := r.Left, r.Right + index, elem := r.Left(), r.Right() setlineno(index) a := ir.Nod(ir.OAS, tmpkey, index) @@ -867,10 +867,10 @@ func maplit(n *ir.Node, m *ir.Node, init *ir.Nodes) { } func anylit(n *ir.Node, var_ *ir.Node, init *ir.Nodes) { - t := n.Type - switch n.Op { + t := n.Type() + switch n.Op() { default: - base.Fatalf("anylit: not lit, op=%v node=%v", n.Op, n) + base.Fatalf("anylit: not lit, op=%v node=%v", n.Op(), n) case ir.ONAME, ir.OMETHEXPR: a := ir.Nod(ir.OAS, var_, n) @@ -883,16 +883,16 @@ func anylit(n *ir.Node, var_ *ir.Node, init *ir.Nodes) { } var r *ir.Node - if n.Right != nil { + if n.Right() != nil { // n.Right is stack temporary used as backing store. - init.Append(ir.Nod(ir.OAS, n.Right, nil)) // zero backing store, just in case (#18410) - r = ir.Nod(ir.OADDR, n.Right, nil) + init.Append(ir.Nod(ir.OAS, n.Right(), nil)) // zero backing store, just in case (#18410) + r = ir.Nod(ir.OADDR, n.Right(), nil) r = typecheck(r, ctxExpr) } else { r = ir.Nod(ir.ONEW, nil, nil) r.SetTypecheck(1) - r.Type = t - r.Esc = n.Esc + r.SetType(t) + r.SetEsc(n.Esc()) } r = walkexpr(r, init) @@ -903,19 +903,19 @@ func anylit(n *ir.Node, var_ *ir.Node, init *ir.Nodes) { var_ = ir.Nod(ir.ODEREF, var_, nil) var_ = typecheck(var_, ctxExpr|ctxAssign) - anylit(n.Left, var_, init) + anylit(n.Left(), var_, init) case ir.OSTRUCTLIT, ir.OARRAYLIT: if !t.IsStruct() && !t.IsArray() { base.Fatalf("anylit: not struct/array") } - if isSimpleName(var_) && n.List.Len() > 4 { + if isSimpleName(var_) && n.List().Len() > 4 { // lay out static data vstat := readonlystaticname(t) ctxt := inInitFunction - if n.Op == ir.OARRAYLIT { + if n.Op() == ir.OARRAYLIT { ctxt = inNonInitFunction } fixedlit(ctxt, initKindStatic, n, vstat, init) @@ -933,13 +933,13 @@ func anylit(n *ir.Node, var_ *ir.Node, init *ir.Nodes) { } var components int64 - if n.Op == ir.OARRAYLIT { + if n.Op() == ir.OARRAYLIT { components = t.NumElem() } else { components = int64(t.NumFields()) } // initialization of an array or struct with unspecified components (missing fields or arrays) - if isSimpleName(var_) || int64(n.List.Len()) < components { + if isSimpleName(var_) || int64(n.List().Len()) < components { a := ir.Nod(ir.OAS, var_, nil) a = typecheck(a, ctxStmt) a = walkexpr(a, init) @@ -960,38 +960,38 @@ func anylit(n *ir.Node, var_ *ir.Node, init *ir.Nodes) { } func oaslit(n *ir.Node, init *ir.Nodes) bool { - if n.Left == nil || n.Right == nil { + if n.Left() == nil || n.Right() == nil { // not a special composite literal assignment return false } - if n.Left.Type == nil || n.Right.Type == nil { + if n.Left().Type() == nil || n.Right().Type() == nil { // not a special composite literal assignment return false } - if !isSimpleName(n.Left) { + if !isSimpleName(n.Left()) { // not a special composite literal assignment return false } - if !types.Identical(n.Left.Type, n.Right.Type) { + if !types.Identical(n.Left().Type(), n.Right().Type()) { // not a special composite literal assignment return false } - switch n.Right.Op { + switch n.Right().Op() { default: // not a special composite literal assignment return false case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT: - if vmatch1(n.Left, n.Right) { + if vmatch1(n.Left(), n.Right()) { // not a special composite literal assignment return false } - anylit(n.Right, n.Left, init) + anylit(n.Right(), n.Left(), init) } - n.Op = ir.OEMPTY - n.Right = nil + n.SetOp(ir.OEMPTY) + n.SetRight(nil) return true } @@ -1008,38 +1008,38 @@ func stataddr(n *ir.Node) *ir.Node { return nil } - switch n.Op { + switch n.Op() { case ir.ONAME, ir.OMETHEXPR: return ir.SepCopy(n) case ir.ODOT: - nam := stataddr(n.Left) + nam := stataddr(n.Left()) if nam == nil { break } - nam.Xoffset = nam.Xoffset + n.Xoffset - nam.Type = n.Type + nam.SetOffset(nam.Offset() + n.Offset()) + nam.SetType(n.Type()) return nam case ir.OINDEX: - if n.Left.Type.IsSlice() { + if n.Left().Type().IsSlice() { break } - nam := stataddr(n.Left) + nam := stataddr(n.Left()) if nam == nil { break } - l := getlit(n.Right) + l := getlit(n.Right()) if l < 0 { break } // Check for overflow. - if n.Type.Width != 0 && thearch.MAXWIDTH/n.Type.Width <= int64(l) { + if n.Type().Width != 0 && thearch.MAXWIDTH/n.Type().Width <= int64(l) { break } - nam.Xoffset = nam.Xoffset + int64(l)*n.Type.Width - nam.Type = n.Type + nam.SetOffset(nam.Offset() + int64(l)*n.Type().Width) + nam.SetType(n.Type()) return nam } @@ -1052,41 +1052,41 @@ func (s *InitSchedule) initplan(n *ir.Node) { } p := new(InitPlan) s.initplans[n] = p - switch n.Op { + switch n.Op() { default: base.Fatalf("initplan") case ir.OARRAYLIT, ir.OSLICELIT: var k int64 - for _, a := range n.List.Slice() { - if a.Op == ir.OKEY { - k = indexconst(a.Left) + for _, a := range n.List().Slice() { + if a.Op() == ir.OKEY { + k = indexconst(a.Left()) if k < 0 { - base.Fatalf("initplan arraylit: invalid index %v", a.Left) + base.Fatalf("initplan arraylit: invalid index %v", a.Left()) } - a = a.Right + a = a.Right() } - s.addvalue(p, k*n.Type.Elem().Width, a) + s.addvalue(p, k*n.Type().Elem().Width, a) k++ } case ir.OSTRUCTLIT: - for _, a := range n.List.Slice() { - if a.Op != ir.OSTRUCTKEY { + for _, a := range n.List().Slice() { + if a.Op() != ir.OSTRUCTKEY { base.Fatalf("initplan structlit") } - if a.Sym.IsBlank() { + if a.Sym().IsBlank() { continue } - s.addvalue(p, a.Xoffset, a.Left) + s.addvalue(p, a.Offset(), a.Left()) } case ir.OMAPLIT: - for _, a := range n.List.Slice() { - if a.Op != ir.OKEY { + for _, a := range n.List().Slice() { + if a.Op() != ir.OKEY { base.Fatalf("initplan maplit") } - s.addvalue(p, -1, a.Right) + s.addvalue(p, -1, a.Right()) } } } @@ -1114,7 +1114,7 @@ func (s *InitSchedule) addvalue(p *InitPlan, xoffset int64, n *ir.Node) { } func isZero(n *ir.Node) bool { - switch n.Op { + switch n.Op() { case ir.ONIL: return true @@ -1129,9 +1129,9 @@ func isZero(n *ir.Node) bool { } case ir.OARRAYLIT: - for _, n1 := range n.List.Slice() { - if n1.Op == ir.OKEY { - n1 = n1.Right + for _, n1 := range n.List().Slice() { + if n1.Op() == ir.OKEY { + n1 = n1.Right() } if !isZero(n1) { return false @@ -1140,8 +1140,8 @@ func isZero(n *ir.Node) bool { return true case ir.OSTRUCTLIT: - for _, n1 := range n.List.Slice() { - if !isZero(n1.Left) { + for _, n1 := range n.List().Slice() { + if !isZero(n1.Left()) { return false } } @@ -1152,25 +1152,25 @@ func isZero(n *ir.Node) bool { } func isvaluelit(n *ir.Node) bool { - return n.Op == ir.OARRAYLIT || n.Op == ir.OSTRUCTLIT + return n.Op() == ir.OARRAYLIT || n.Op() == ir.OSTRUCTLIT } func genAsStatic(as *ir.Node) { - if as.Left.Type == nil { + if as.Left().Type() == nil { base.Fatalf("genAsStatic as.Left not typechecked") } - nam := stataddr(as.Left) - if nam == nil || (nam.Class() != ir.PEXTERN && as.Left != ir.BlankNode) { - base.Fatalf("genAsStatic: lhs %v", as.Left) + nam := stataddr(as.Left()) + if nam == nil || (nam.Class() != ir.PEXTERN && as.Left() != ir.BlankNode) { + base.Fatalf("genAsStatic: lhs %v", as.Left()) } switch { - case as.Right.Op == ir.OLITERAL: - litsym(nam, as.Right, int(as.Right.Type.Width)) - case (as.Right.Op == ir.ONAME || as.Right.Op == ir.OMETHEXPR) && as.Right.Class() == ir.PFUNC: - pfuncsym(nam, as.Right) + case as.Right().Op() == ir.OLITERAL: + litsym(nam, as.Right(), int(as.Right().Type().Width)) + case (as.Right().Op() == ir.ONAME || as.Right().Op() == ir.OMETHEXPR) && as.Right().Class() == ir.PFUNC: + pfuncsym(nam, as.Right()) default: - base.Fatalf("genAsStatic: rhs %v", as.Right) + base.Fatalf("genAsStatic: rhs %v", as.Right()) } } diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 5cee3fab85c5f..018b94d9d8011 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -187,8 +187,8 @@ func initssaconfig() { // considered as the 0th parameter. This does not include the receiver of an // interface call. func getParam(n *ir.Node, i int) *types.Field { - t := n.Left.Type - if n.Op == ir.OCALLMETH { + t := n.Left().Type() + if n.Op() == ir.OCALLMETH { if i == 0 { return t.Recv() } @@ -242,8 +242,8 @@ func dvarint(x *obj.LSym, off int, v int64) int { // - Size of the argument // - Offset of where argument should be placed in the args frame when making call func (s *state) emitOpenDeferInfo() { - x := base.Ctxt.Lookup(s.curfn.Func.LSym.Name + ".opendefer") - s.curfn.Func.LSym.Func().OpenCodedDeferInfo = x + x := base.Ctxt.Lookup(s.curfn.Func().LSym.Name + ".opendefer") + s.curfn.Func().LSym.Func().OpenCodedDeferInfo = x off := 0 // Compute maxargsize (max size of arguments for all defers) @@ -251,20 +251,20 @@ func (s *state) emitOpenDeferInfo() { var maxargsize int64 for i := len(s.openDefers) - 1; i >= 0; i-- { r := s.openDefers[i] - argsize := r.n.Left.Type.ArgWidth() + argsize := r.n.Left().Type().ArgWidth() if argsize > maxargsize { maxargsize = argsize } } off = dvarint(x, off, maxargsize) - off = dvarint(x, off, -s.deferBitsTemp.Xoffset) + off = dvarint(x, off, -s.deferBitsTemp.Offset()) off = dvarint(x, off, int64(len(s.openDefers))) // Write in reverse-order, for ease of running in that order at runtime for i := len(s.openDefers) - 1; i >= 0; i-- { r := s.openDefers[i] - off = dvarint(x, off, r.n.Left.Type.ArgWidth()) - off = dvarint(x, off, -r.closureNode.Xoffset) + off = dvarint(x, off, r.n.Left().Type().ArgWidth()) + off = dvarint(x, off, -r.closureNode.Offset()) numArgs := len(r.argNodes) if r.rcvrNode != nil { // If there's an interface receiver, treat/place it as the first @@ -274,13 +274,13 @@ func (s *state) emitOpenDeferInfo() { } off = dvarint(x, off, int64(numArgs)) if r.rcvrNode != nil { - off = dvarint(x, off, -r.rcvrNode.Xoffset) + off = dvarint(x, off, -r.rcvrNode.Offset()) off = dvarint(x, off, s.config.PtrSize) off = dvarint(x, off, 0) } for j, arg := range r.argNodes { f := getParam(r.n, j) - off = dvarint(x, off, -arg.Xoffset) + off = dvarint(x, off, -arg.Offset()) off = dvarint(x, off, f.Type.Size()) off = dvarint(x, off, f.Offset) } @@ -298,9 +298,9 @@ func buildssa(fn *ir.Node, worker int) *ssa.Func { var astBuf *bytes.Buffer if printssa { astBuf = &bytes.Buffer{} - ir.FDumpList(astBuf, "buildssa-enter", fn.Func.Enter) - ir.FDumpList(astBuf, "buildssa-body", fn.Nbody) - ir.FDumpList(astBuf, "buildssa-exit", fn.Func.Exit) + ir.FDumpList(astBuf, "buildssa-enter", fn.Func().Enter) + ir.FDumpList(astBuf, "buildssa-body", fn.Body()) + ir.FDumpList(astBuf, "buildssa-exit", fn.Func().Exit) if ssaDumpStdout { fmt.Println("generating SSA for", name) fmt.Print(astBuf.String()) @@ -308,11 +308,11 @@ func buildssa(fn *ir.Node, worker int) *ssa.Func { } var s state - s.pushLine(fn.Pos) + s.pushLine(fn.Pos()) defer s.popLine() - s.hasdefer = fn.Func.HasDefer() - if fn.Func.Pragma&ir.CgoUnsafeArgs != 0 { + s.hasdefer = fn.Func().HasDefer() + if fn.Func().Pragma&ir.CgoUnsafeArgs != 0 { s.cgoUnsafeArgs = true } @@ -324,14 +324,14 @@ func buildssa(fn *ir.Node, worker int) *ssa.Func { s.f = ssa.NewFunc(&fe) s.config = ssaConfig - s.f.Type = fn.Type + s.f.Type = fn.Type() s.f.Config = ssaConfig s.f.Cache = &ssaCaches[worker] s.f.Cache.Reset() s.f.Name = name s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH") s.f.PrintOrHtmlSSA = printssa - if fn.Func.Pragma&ir.Nosplit != 0 { + if fn.Func().Pragma&ir.Nosplit != 0 { s.f.NoSplit = true } s.panics = map[funcLine]*ssa.Block{} @@ -339,7 +339,7 @@ func buildssa(fn *ir.Node, worker int) *ssa.Func { // Allocate starting block s.f.Entry = s.f.NewBlock(ssa.BlockPlain) - s.f.Entry.Pos = fn.Pos + s.f.Entry.Pos = fn.Pos() if printssa { ssaDF := ssaDumpFile @@ -360,7 +360,7 @@ func buildssa(fn *ir.Node, worker int) *ssa.Func { s.fwdVars = map[*ir.Node]*ssa.Value{} s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem) - s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.Func.OpenCodedDeferDisallowed() + s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.Func().OpenCodedDeferDisallowed() switch { case s.hasOpenDefers && (base.Ctxt.Flag_shared || base.Ctxt.Flag_dynlink) && thearch.LinkArch.Name == "386": // Don't support open-coded defers for 386 ONLY when using shared @@ -369,7 +369,7 @@ func buildssa(fn *ir.Node, worker int) *ssa.Func { // that we don't track correctly. s.hasOpenDefers = false } - if s.hasOpenDefers && s.curfn.Func.Exit.Len() > 0 { + if s.hasOpenDefers && s.curfn.Func().Exit.Len() > 0 { // Skip doing open defers if there is any extra exit code (likely // copying heap-allocated return values or race detection), since // we will not generate that code in the case of the extra @@ -377,7 +377,7 @@ func buildssa(fn *ir.Node, worker int) *ssa.Func { s.hasOpenDefers = false } if s.hasOpenDefers && - s.curfn.Func.NumReturns*s.curfn.Func.NumDefers > 15 { + s.curfn.Func().NumReturns*s.curfn.Func().NumDefers > 15 { // Since we are generating defer calls at every exit for // open-coded defers, skip doing open-coded defers if there are // too many returns (especially if there are multiple defers). @@ -414,14 +414,14 @@ func buildssa(fn *ir.Node, worker int) *ssa.Func { s.decladdrs = map[*ir.Node]*ssa.Value{} var args []ssa.Param var results []ssa.Param - for _, n := range fn.Func.Dcl { + for _, n := range fn.Func().Dcl { switch n.Class() { case ir.PPARAM: - s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type), n, s.sp, s.startmem) - args = append(args, ssa.Param{Type: n.Type, Offset: int32(n.Xoffset)}) + s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem) + args = append(args, ssa.Param{Type: n.Type(), Offset: int32(n.Offset())}) case ir.PPARAMOUT: - s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type), n, s.sp, s.startmem) - results = append(results, ssa.Param{Type: n.Type, Offset: int32(n.Xoffset)}) + s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem) + results = append(results, ssa.Param{Type: n.Type(), Offset: int32(n.Offset())}) if s.canSSA(n) { // Save ssa-able PPARAMOUT variables so we can // store them back to the stack at the end of @@ -441,21 +441,21 @@ func buildssa(fn *ir.Node, worker int) *ssa.Func { } // Populate SSAable arguments. - for _, n := range fn.Func.Dcl { + for _, n := range fn.Func().Dcl { if n.Class() == ir.PPARAM && s.canSSA(n) { - v := s.newValue0A(ssa.OpArg, n.Type, n) + v := s.newValue0A(ssa.OpArg, n.Type(), n) s.vars[n] = v s.addNamedValue(n, v) // This helps with debugging information, not needed for compilation itself. } } // Convert the AST-based IR to the SSA-based IR - s.stmtList(fn.Func.Enter) - s.stmtList(fn.Nbody) + s.stmtList(fn.Func().Enter) + s.stmtList(fn.Body()) // fallthrough to exit if s.curBlock != nil { - s.pushLine(fn.Func.Endlineno) + s.pushLine(fn.Func().Endlineno) s.exit() s.popLine() } @@ -480,8 +480,8 @@ func buildssa(fn *ir.Node, worker int) *ssa.Func { func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *ir.Node) { // Read sources of target function fn. - fname := base.Ctxt.PosTable.Pos(fn.Pos).Filename() - targetFn, err := readFuncLines(fname, fn.Pos.Line(), fn.Func.Endlineno.Line()) + fname := base.Ctxt.PosTable.Pos(fn.Pos()).Filename() + targetFn, err := readFuncLines(fname, fn.Pos().Line(), fn.Func().Endlineno.Line()) if err != nil { writer.Logf("cannot read sources for function %v: %v", fn, err) } @@ -490,14 +490,14 @@ func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *ir.Node) { var inlFns []*ssa.FuncLines for _, fi := range ssaDumpInlined { var elno src.XPos - if fi.Name.Defn == nil { + if fi.Name().Defn == nil { // Endlineno is filled from exported data. - elno = fi.Func.Endlineno + elno = fi.Func().Endlineno } else { - elno = fi.Name.Defn.Func.Endlineno + elno = fi.Name().Defn.Func().Endlineno } - fname := base.Ctxt.PosTable.Pos(fi.Pos).Filename() - fnLines, err := readFuncLines(fname, fi.Pos.Line(), elno.Line()) + fname := base.Ctxt.PosTable.Pos(fi.Pos()).Filename() + fnLines, err := readFuncLines(fname, fi.Pos().Line(), elno.Line()) if err != nil { writer.Logf("cannot read sources for inlined function %v: %v", fi, err) continue @@ -974,7 +974,7 @@ func (s *state) newValueOrSfCall2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Valu } func (s *state) instrument(t *types.Type, addr *ssa.Value, wr bool) { - if !s.curfn.Func.InstrumentBody() { + if !s.curfn.Func().InstrumentBody() { return } @@ -1060,23 +1060,23 @@ func (s *state) stmtList(l ir.Nodes) { // stmt converts the statement n to SSA and adds it to s. func (s *state) stmt(n *ir.Node) { - if !(n.Op == ir.OVARKILL || n.Op == ir.OVARLIVE || n.Op == ir.OVARDEF) { + if !(n.Op() == ir.OVARKILL || n.Op() == ir.OVARLIVE || n.Op() == ir.OVARDEF) { // OVARKILL, OVARLIVE, and OVARDEF are invisible to the programmer, so we don't use their line numbers to avoid confusion in debugging. - s.pushLine(n.Pos) + s.pushLine(n.Pos()) defer s.popLine() } // If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere), // then this code is dead. Stop here. - if s.curBlock == nil && n.Op != ir.OLABEL { + if s.curBlock == nil && n.Op() != ir.OLABEL { return } - s.stmtList(n.Ninit) - switch n.Op { + s.stmtList(n.Init()) + switch n.Op() { case ir.OBLOCK: - s.stmtList(n.List) + s.stmtList(n.List()) // No-ops case ir.OEMPTY, ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL: @@ -1091,9 +1091,9 @@ func (s *state) stmt(n *ir.Node) { case ir.OCALLMETH, ir.OCALLINTER: s.callResult(n, callNormal) - if n.Op == ir.OCALLFUNC && n.Left.Op == ir.ONAME && n.Left.Class() == ir.PFUNC { - if fn := n.Left.Sym.Name; base.Flag.CompilingRuntime && fn == "throw" || - n.Left.Sym.Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") { + if n.Op() == ir.OCALLFUNC && n.Left().Op() == ir.ONAME && n.Left().Class() == ir.PFUNC { + if fn := n.Left().Sym().Name; base.Flag.CompilingRuntime && fn == "throw" || + n.Left().Sym().Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") { m := s.mem() b := s.endBlock() b.Kind = ssa.BlockExit @@ -1108,29 +1108,29 @@ func (s *state) stmt(n *ir.Node) { var defertype string if s.hasOpenDefers { defertype = "open-coded" - } else if n.Esc == EscNever { + } else if n.Esc() == EscNever { defertype = "stack-allocated" } else { defertype = "heap-allocated" } - base.WarnfAt(n.Pos, "%s defer", defertype) + base.WarnfAt(n.Pos(), "%s defer", defertype) } if s.hasOpenDefers { - s.openDeferRecord(n.Left) + s.openDeferRecord(n.Left()) } else { d := callDefer - if n.Esc == EscNever { + if n.Esc() == EscNever { d = callDeferStack } - s.callResult(n.Left, d) + s.callResult(n.Left(), d) } case ir.OGO: - s.callResult(n.Left, callGo) + s.callResult(n.Left(), callGo) case ir.OAS2DOTTYPE: - res, resok := s.dottype(n.Right, true) + res, resok := s.dottype(n.Right(), true) deref := false - if !canSSAType(n.Right.Type) { + if !canSSAType(n.Right().Type()) { if res.Op != ssa.OpLoad { s.Fatalf("dottype of non-load") } @@ -1144,29 +1144,29 @@ func (s *state) stmt(n *ir.Node) { deref = true res = res.Args[0] } - s.assign(n.List.First(), res, deref, 0) - s.assign(n.List.Second(), resok, false, 0) + s.assign(n.List().First(), res, deref, 0) + s.assign(n.List().Second(), resok, false, 0) return case ir.OAS2FUNC: // We come here only when it is an intrinsic call returning two values. - if !isIntrinsicCall(n.Right) { - s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Right) - } - v := s.intrinsicCall(n.Right) - v1 := s.newValue1(ssa.OpSelect0, n.List.First().Type, v) - v2 := s.newValue1(ssa.OpSelect1, n.List.Second().Type, v) - s.assign(n.List.First(), v1, false, 0) - s.assign(n.List.Second(), v2, false, 0) + if !isIntrinsicCall(n.Right()) { + s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Right()) + } + v := s.intrinsicCall(n.Right()) + v1 := s.newValue1(ssa.OpSelect0, n.List().First().Type(), v) + v2 := s.newValue1(ssa.OpSelect1, n.List().Second().Type(), v) + s.assign(n.List().First(), v1, false, 0) + s.assign(n.List().Second(), v2, false, 0) return case ir.ODCL: - if n.Left.Class() == ir.PAUTOHEAP { + if n.Left().Class() == ir.PAUTOHEAP { s.Fatalf("DCL %v", n) } case ir.OLABEL: - sym := n.Sym + sym := n.Sym() lab := s.label(sym) // Associate label with its control flow node, if any @@ -1188,7 +1188,7 @@ func (s *state) stmt(n *ir.Node) { s.startBlock(lab.target) case ir.OGOTO: - sym := n.Sym + sym := n.Sym() lab := s.label(sym) if lab.target == nil { @@ -1200,7 +1200,7 @@ func (s *state) stmt(n *ir.Node) { b.AddEdgeTo(lab.target) case ir.OAS: - if n.Left == n.Right && n.Left.Op == ir.ONAME { + if n.Left() == n.Right() && n.Left().Op() == ir.ONAME { // An x=x assignment. No point in doing anything // here. In addition, skipping this assignment // prevents generating: @@ -1212,9 +1212,9 @@ func (s *state) stmt(n *ir.Node) { } // Evaluate RHS. - rhs := n.Right + rhs := n.Right() if rhs != nil { - switch rhs.Op { + switch rhs.Op() { case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT: // All literals with nonzero fields have already been // rewritten during walk. Any that remain are just T{} @@ -1227,27 +1227,27 @@ func (s *state) stmt(n *ir.Node) { // Check whether we're writing the result of an append back to the same slice. // If so, we handle it specially to avoid write barriers on the fast // (non-growth) path. - if !samesafeexpr(n.Left, rhs.List.First()) || base.Flag.N != 0 { + if !samesafeexpr(n.Left(), rhs.List().First()) || base.Flag.N != 0 { break } // If the slice can be SSA'd, it'll be on the stack, // so there will be no write barriers, // so there's no need to attempt to prevent them. - if s.canSSA(n.Left) { + if s.canSSA(n.Left()) { if base.Debug.Append > 0 { // replicating old diagnostic message - base.WarnfAt(n.Pos, "append: len-only update (in local slice)") + base.WarnfAt(n.Pos(), "append: len-only update (in local slice)") } break } if base.Debug.Append > 0 { - base.WarnfAt(n.Pos, "append: len-only update") + base.WarnfAt(n.Pos(), "append: len-only update") } s.append(rhs, true) return } } - if ir.IsBlank(n.Left) { + if ir.IsBlank(n.Left()) { // _ = rhs // Just evaluate rhs for side-effects. if rhs != nil { @@ -1257,10 +1257,10 @@ func (s *state) stmt(n *ir.Node) { } var t *types.Type - if n.Right != nil { - t = n.Right.Type + if n.Right() != nil { + t = n.Right().Type() } else { - t = n.Left.Type + t = n.Left().Type() } var r *ssa.Value @@ -1280,11 +1280,11 @@ func (s *state) stmt(n *ir.Node) { } var skip skipMask - if rhs != nil && (rhs.Op == ir.OSLICE || rhs.Op == ir.OSLICE3 || rhs.Op == ir.OSLICESTR) && samesafeexpr(rhs.Left, n.Left) { + if rhs != nil && (rhs.Op() == ir.OSLICE || rhs.Op() == ir.OSLICE3 || rhs.Op() == ir.OSLICESTR) && samesafeexpr(rhs.Left(), n.Left()) { // We're assigning a slicing operation back to its source. // Don't write back fields we aren't changing. See issue #14855. i, j, k := rhs.SliceBounds() - if i != nil && (i.Op == ir.OLITERAL && i.Val().Kind() == constant.Int && i.Int64Val() == 0) { + if i != nil && (i.Op() == ir.OLITERAL && i.Val().Kind() == constant.Int && i.Int64Val() == 0) { // [0:...] is the same as [:...] i = nil } @@ -1309,15 +1309,15 @@ func (s *state) stmt(n *ir.Node) { } } - s.assign(n.Left, r, deref, skip) + s.assign(n.Left(), r, deref, skip) case ir.OIF: - if ir.IsConst(n.Left, constant.Bool) { - s.stmtList(n.Left.Ninit) - if n.Left.BoolVal() { - s.stmtList(n.Nbody) + if ir.IsConst(n.Left(), constant.Bool) { + s.stmtList(n.Left().Init()) + if n.Left().BoolVal() { + s.stmtList(n.Body()) } else { - s.stmtList(n.Rlist) + s.stmtList(n.Rlist()) } break } @@ -1328,29 +1328,29 @@ func (s *state) stmt(n *ir.Node) { likely = 1 } var bThen *ssa.Block - if n.Nbody.Len() != 0 { + if n.Body().Len() != 0 { bThen = s.f.NewBlock(ssa.BlockPlain) } else { bThen = bEnd } var bElse *ssa.Block - if n.Rlist.Len() != 0 { + if n.Rlist().Len() != 0 { bElse = s.f.NewBlock(ssa.BlockPlain) } else { bElse = bEnd } - s.condBranch(n.Left, bThen, bElse, likely) + s.condBranch(n.Left(), bThen, bElse, likely) - if n.Nbody.Len() != 0 { + if n.Body().Len() != 0 { s.startBlock(bThen) - s.stmtList(n.Nbody) + s.stmtList(n.Body()) if b := s.endBlock(); b != nil { b.AddEdgeTo(bEnd) } } - if n.Rlist.Len() != 0 { + if n.Rlist().Len() != 0 { s.startBlock(bElse) - s.stmtList(n.Rlist) + s.stmtList(n.Rlist()) if b := s.endBlock(); b != nil { b.AddEdgeTo(bEnd) } @@ -1358,21 +1358,21 @@ func (s *state) stmt(n *ir.Node) { s.startBlock(bEnd) case ir.ORETURN: - s.stmtList(n.List) + s.stmtList(n.List()) b := s.exit() b.Pos = s.lastPos.WithIsStmt() case ir.ORETJMP: - s.stmtList(n.List) + s.stmtList(n.List()) b := s.exit() b.Kind = ssa.BlockRetJmp // override BlockRet - b.Aux = n.Sym.Linksym() + b.Aux = n.Sym().Linksym() case ir.OCONTINUE, ir.OBREAK: var to *ssa.Block - if n.Sym == nil { + if n.Sym() == nil { // plain break/continue - switch n.Op { + switch n.Op() { case ir.OCONTINUE: to = s.continueTo case ir.OBREAK: @@ -1380,9 +1380,9 @@ func (s *state) stmt(n *ir.Node) { } } else { // labeled break/continue; look up the target - sym := n.Sym + sym := n.Sym() lab := s.label(sym) - switch n.Op { + switch n.Op() { case ir.OCONTINUE: to = lab.continueTarget case ir.OBREAK: @@ -1406,16 +1406,16 @@ func (s *state) stmt(n *ir.Node) { bEnd := s.f.NewBlock(ssa.BlockPlain) // ensure empty for loops have correct position; issue #30167 - bBody.Pos = n.Pos + bBody.Pos = n.Pos() // first, jump to condition test (OFOR) or body (OFORUNTIL) b := s.endBlock() - if n.Op == ir.OFOR { + if n.Op() == ir.OFOR { b.AddEdgeTo(bCond) // generate code to test condition s.startBlock(bCond) - if n.Left != nil { - s.condBranch(n.Left, bBody, bEnd, 1) + if n.Left() != nil { + s.condBranch(n.Left(), bBody, bEnd, 1) } else { b := s.endBlock() b.Kind = ssa.BlockPlain @@ -1440,7 +1440,7 @@ func (s *state) stmt(n *ir.Node) { // generate body s.startBlock(bBody) - s.stmtList(n.Nbody) + s.stmtList(n.Body()) // tear down continue/break s.continueTo = prevContinue @@ -1457,15 +1457,15 @@ func (s *state) stmt(n *ir.Node) { // generate incr (and, for OFORUNTIL, condition) s.startBlock(bIncr) - if n.Right != nil { - s.stmt(n.Right) + if n.Right() != nil { + s.stmt(n.Right()) } - if n.Op == ir.OFOR { + if n.Op() == ir.OFOR { if b := s.endBlock(); b != nil { b.AddEdgeTo(bCond) // It can happen that bIncr ends in a block containing only VARKILL, // and that muddles the debugging experience. - if n.Op != ir.OFORUNTIL && b.Pos == src.NoXPos { + if n.Op() != ir.OFORUNTIL && b.Pos == src.NoXPos { b.Pos = bCond.Pos } } @@ -1473,10 +1473,10 @@ func (s *state) stmt(n *ir.Node) { // bCond is unused in OFORUNTIL, so repurpose it. bLateIncr := bCond // test condition - s.condBranch(n.Left, bLateIncr, bEnd, 1) + s.condBranch(n.Left(), bLateIncr, bEnd, 1) // generate late increment s.startBlock(bLateIncr) - s.stmtList(n.List) + s.stmtList(n.List()) s.endBlock().AddEdgeTo(bBody) } @@ -1496,7 +1496,7 @@ func (s *state) stmt(n *ir.Node) { } // generate body code - s.stmtList(n.Nbody) + s.stmtList(n.Body()) s.breakTo = prevBreak if lab != nil { @@ -1514,39 +1514,39 @@ func (s *state) stmt(n *ir.Node) { s.startBlock(bEnd) case ir.OVARDEF: - if !s.canSSA(n.Left) { - s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.Left, s.mem(), false) + if !s.canSSA(n.Left()) { + s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.Left(), s.mem(), false) } case ir.OVARKILL: // Insert a varkill op to record that a variable is no longer live. // We only care about liveness info at call sites, so putting the // varkill in the store chain is enough to keep it correctly ordered // with respect to call ops. - if !s.canSSA(n.Left) { - s.vars[memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.Left, s.mem(), false) + if !s.canSSA(n.Left()) { + s.vars[memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.Left(), s.mem(), false) } case ir.OVARLIVE: // Insert a varlive op to record that a variable is still live. - if !n.Left.Name.Addrtaken() { - s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left) + if !n.Left().Name().Addrtaken() { + s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left()) } - switch n.Left.Class() { + switch n.Left().Class() { case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT: default: - s.Fatalf("VARLIVE variable %v must be Auto or Arg", n.Left) + s.Fatalf("VARLIVE variable %v must be Auto or Arg", n.Left()) } - s.vars[memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, n.Left, s.mem()) + s.vars[memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, n.Left(), s.mem()) case ir.OCHECKNIL: - p := s.expr(n.Left) + p := s.expr(n.Left()) s.nilCheck(p) case ir.OINLMARK: - s.newValue1I(ssa.OpInlMark, types.TypeVoid, n.Xoffset, s.mem()) + s.newValue1I(ssa.OpInlMark, types.TypeVoid, n.Offset(), s.mem()) default: - s.Fatalf("unhandled stmt %v", n.Op) + s.Fatalf("unhandled stmt %v", n.Op()) } } @@ -1576,14 +1576,14 @@ func (s *state) exit() *ssa.Block { // Run exit code. Typically, this code copies heap-allocated PPARAMOUT // variables back to the stack. - s.stmtList(s.curfn.Func.Exit) + s.stmtList(s.curfn.Func().Exit) // Store SSAable PPARAMOUT variables back to stack locations. for _, n := range s.returns { addr := s.decladdrs[n] - val := s.variable(n, n.Type) + val := s.variable(n, n.Type()) s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem()) - s.store(n.Type, addr, val) + s.store(n.Type(), addr, val) // TODO: if val is ever spilled, we'd like to use the // PPARAMOUT slot for spilling it. That won't happen // currently. @@ -2003,44 +2003,44 @@ func (s *state) expr(n *ir.Node) *ssa.Value { if hasUniquePos(n) { // ONAMEs and named OLITERALs have the line number // of the decl, not the use. See issue 14742. - s.pushLine(n.Pos) + s.pushLine(n.Pos()) defer s.popLine() } - s.stmtList(n.Ninit) - switch n.Op { + s.stmtList(n.Init()) + switch n.Op() { case ir.OBYTES2STRTMP: - slice := s.expr(n.Left) + slice := s.expr(n.Left()) ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice) len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice) - return s.newValue2(ssa.OpStringMake, n.Type, ptr, len) + return s.newValue2(ssa.OpStringMake, n.Type(), ptr, len) case ir.OSTR2BYTESTMP: - str := s.expr(n.Left) + str := s.expr(n.Left()) ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str) len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], str) - return s.newValue3(ssa.OpSliceMake, n.Type, ptr, len, len) + return s.newValue3(ssa.OpSliceMake, n.Type(), ptr, len, len) case ir.OCFUNC: - aux := n.Left.Sym.Linksym() - return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb) + aux := n.Left().Sym().Linksym() + return s.entryNewValue1A(ssa.OpAddr, n.Type(), aux, s.sb) case ir.OMETHEXPR: - sym := funcsym(n.Sym).Linksym() - return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), sym, s.sb) + sym := funcsym(n.Sym()).Linksym() + return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb) case ir.ONAME: if n.Class() == ir.PFUNC { // "value" of a function is the address of the function's closure - sym := funcsym(n.Sym).Linksym() - return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), sym, s.sb) + sym := funcsym(n.Sym()).Linksym() + return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb) } if s.canSSA(n) { - return s.variable(n, n.Type) + return s.variable(n, n.Type()) } addr := s.addr(n) - return s.load(n.Type, addr) + return s.load(n.Type(), addr) case ir.OCLOSUREVAR: addr := s.addr(n) - return s.load(n.Type, addr) + return s.load(n.Type(), addr) case ir.ONIL: - t := n.Type + t := n.Type() switch { case t.IsSlice(): return s.constSlice(t) @@ -2052,55 +2052,55 @@ func (s *state) expr(n *ir.Node) *ssa.Value { case ir.OLITERAL: switch u := n.Val(); u.Kind() { case constant.Int: - i := ir.Int64Val(n.Type, u) - switch n.Type.Size() { + i := ir.Int64Val(n.Type(), u) + switch n.Type().Size() { case 1: - return s.constInt8(n.Type, int8(i)) + return s.constInt8(n.Type(), int8(i)) case 2: - return s.constInt16(n.Type, int16(i)) + return s.constInt16(n.Type(), int16(i)) case 4: - return s.constInt32(n.Type, int32(i)) + return s.constInt32(n.Type(), int32(i)) case 8: - return s.constInt64(n.Type, i) + return s.constInt64(n.Type(), i) default: - s.Fatalf("bad integer size %d", n.Type.Size()) + s.Fatalf("bad integer size %d", n.Type().Size()) return nil } case constant.String: i := constant.StringVal(u) if i == "" { - return s.constEmptyString(n.Type) + return s.constEmptyString(n.Type()) } - return s.entryNewValue0A(ssa.OpConstString, n.Type, i) + return s.entryNewValue0A(ssa.OpConstString, n.Type(), i) case constant.Bool: return s.constBool(constant.BoolVal(u)) case constant.Float: f, _ := constant.Float64Val(u) - switch n.Type.Size() { + switch n.Type().Size() { case 4: - return s.constFloat32(n.Type, f) + return s.constFloat32(n.Type(), f) case 8: - return s.constFloat64(n.Type, f) + return s.constFloat64(n.Type(), f) default: - s.Fatalf("bad float size %d", n.Type.Size()) + s.Fatalf("bad float size %d", n.Type().Size()) return nil } case constant.Complex: re, _ := constant.Float64Val(constant.Real(u)) im, _ := constant.Float64Val(constant.Imag(u)) - switch n.Type.Size() { + switch n.Type().Size() { case 8: pt := types.Types[types.TFLOAT32] - return s.newValue2(ssa.OpComplexMake, n.Type, + return s.newValue2(ssa.OpComplexMake, n.Type(), s.constFloat32(pt, re), s.constFloat32(pt, im)) case 16: pt := types.Types[types.TFLOAT64] - return s.newValue2(ssa.OpComplexMake, n.Type, + return s.newValue2(ssa.OpComplexMake, n.Type(), s.constFloat64(pt, re), s.constFloat64(pt, im)) default: - s.Fatalf("bad complex size %d", n.Type.Size()) + s.Fatalf("bad complex size %d", n.Type().Size()) return nil } default: @@ -2108,12 +2108,12 @@ func (s *state) expr(n *ir.Node) *ssa.Value { return nil } case ir.OCONVNOP: - to := n.Type - from := n.Left.Type + to := n.Type() + from := n.Left().Type() // Assume everything will work out, so set up our return value. // Anything interesting that happens from here is a fatal. - x := s.expr(n.Left) + x := s.expr(n.Left()) // Special case for not confusing GC and liveness. // We don't want pointers accidentally classified @@ -2173,12 +2173,12 @@ func (s *state) expr(n *ir.Node) *ssa.Value { return v case ir.OCONV: - x := s.expr(n.Left) - ft := n.Left.Type // from type - tt := n.Type // to type + x := s.expr(n.Left()) + ft := n.Left().Type() // from type + tt := n.Type() // to type if ft.IsBoolean() && tt.IsKind(types.TUINT8) { // Bool -> uint8 is generated internally when indexing into runtime.staticbyte. - return s.newValue1(ssa.OpCopy, n.Type, x) + return s.newValue1(ssa.OpCopy, n.Type(), x) } if ft.IsInteger() && tt.IsInteger() { var op ssa.Op @@ -2239,7 +2239,7 @@ func (s *state) expr(n *ir.Node) *ssa.Value { s.Fatalf("weird integer sign extension %v -> %v", ft, tt) } } - return s.newValue1(op, n.Type, x) + return s.newValue1(op, n.Type(), x) } if ft.IsFloat() || tt.IsFloat() { @@ -2286,12 +2286,12 @@ func (s *state) expr(n *ir.Node) *ssa.Value { if op2 == ssa.OpCopy { return x } - return s.newValueOrSfCall1(op2, n.Type, x) + return s.newValueOrSfCall1(op2, n.Type(), x) } if op2 == ssa.OpCopy { - return s.newValueOrSfCall1(op1, n.Type, x) + return s.newValueOrSfCall1(op1, n.Type(), x) } - return s.newValueOrSfCall1(op2, n.Type, s.newValueOrSfCall1(op1, types.Types[it], x)) + return s.newValueOrSfCall1(op2, n.Type(), s.newValueOrSfCall1(op1, types.Types[it], x)) } // Tricky 64-bit unsigned cases. if ft.IsInteger() { @@ -2340,7 +2340,7 @@ func (s *state) expr(n *ir.Node) *ssa.Value { s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x))) } - s.Fatalf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype) + s.Fatalf("unhandled OCONV %s -> %s", n.Left().Type().Etype, n.Type().Etype) return nil case ir.ODOTTYPE: @@ -2349,46 +2349,46 @@ func (s *state) expr(n *ir.Node) *ssa.Value { // binary ops case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT: - a := s.expr(n.Left) - b := s.expr(n.Right) - if n.Left.Type.IsComplex() { - pt := floatForComplex(n.Left.Type) + a := s.expr(n.Left()) + b := s.expr(n.Right()) + if n.Left().Type().IsComplex() { + pt := floatForComplex(n.Left().Type()) op := s.ssaOp(ir.OEQ, pt) r := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)) i := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)) c := s.newValue2(ssa.OpAndB, types.Types[types.TBOOL], r, i) - switch n.Op { + switch n.Op() { case ir.OEQ: return c case ir.ONE: return s.newValue1(ssa.OpNot, types.Types[types.TBOOL], c) default: - s.Fatalf("ordered complex compare %v", n.Op) + s.Fatalf("ordered complex compare %v", n.Op()) } } // Convert OGE and OGT into OLE and OLT. - op := n.Op + op := n.Op() switch op { case ir.OGE: op, a, b = ir.OLE, b, a case ir.OGT: op, a, b = ir.OLT, b, a } - if n.Left.Type.IsFloat() { + if n.Left().Type().IsFloat() { // float comparison - return s.newValueOrSfCall2(s.ssaOp(op, n.Left.Type), types.Types[types.TBOOL], a, b) + return s.newValueOrSfCall2(s.ssaOp(op, n.Left().Type()), types.Types[types.TBOOL], a, b) } // integer comparison - return s.newValue2(s.ssaOp(op, n.Left.Type), types.Types[types.TBOOL], a, b) + return s.newValue2(s.ssaOp(op, n.Left().Type()), types.Types[types.TBOOL], a, b) case ir.OMUL: - a := s.expr(n.Left) - b := s.expr(n.Right) - if n.Type.IsComplex() { + a := s.expr(n.Left()) + b := s.expr(n.Right()) + if n.Type().IsComplex() { mulop := ssa.OpMul64F addop := ssa.OpAdd64F subop := ssa.OpSub64F - pt := floatForComplex(n.Type) // Could be Float32 or Float64 + pt := floatForComplex(n.Type()) // Could be Float32 or Float64 wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error areal := s.newValue1(ssa.OpComplexReal, pt, a) @@ -2411,19 +2411,19 @@ func (s *state) expr(n *ir.Node) *ssa.Value { ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag) } - return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag) + return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag) } - if n.Type.IsFloat() { - return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b) + if n.Type().IsFloat() { + return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b) } - return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) + return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b) case ir.ODIV: - a := s.expr(n.Left) - b := s.expr(n.Right) - if n.Type.IsComplex() { + a := s.expr(n.Left()) + b := s.expr(n.Right()) + if n.Type().IsComplex() { // TODO this is not executed because the front-end substitutes a runtime call. // That probably ought to change; with modest optimization the widen/narrow // conversions could all be elided in larger expression trees. @@ -2431,7 +2431,7 @@ func (s *state) expr(n *ir.Node) *ssa.Value { addop := ssa.OpAdd64F subop := ssa.OpSub64F divop := ssa.OpDiv64F - pt := floatForComplex(n.Type) // Could be Float32 or Float64 + pt := floatForComplex(n.Type()) // Could be Float32 or Float64 wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error areal := s.newValue1(ssa.OpComplexReal, pt, a) @@ -2461,49 +2461,49 @@ func (s *state) expr(n *ir.Node) *ssa.Value { xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal) ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag) } - return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag) + return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag) } - if n.Type.IsFloat() { - return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b) + if n.Type().IsFloat() { + return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b) } return s.intDivide(n, a, b) case ir.OMOD: - a := s.expr(n.Left) - b := s.expr(n.Right) + a := s.expr(n.Left()) + b := s.expr(n.Right()) return s.intDivide(n, a, b) case ir.OADD, ir.OSUB: - a := s.expr(n.Left) - b := s.expr(n.Right) - if n.Type.IsComplex() { - pt := floatForComplex(n.Type) - op := s.ssaOp(n.Op, pt) - return s.newValue2(ssa.OpComplexMake, n.Type, + a := s.expr(n.Left()) + b := s.expr(n.Right()) + if n.Type().IsComplex() { + pt := floatForComplex(n.Type()) + op := s.ssaOp(n.Op(), pt) + return s.newValue2(ssa.OpComplexMake, n.Type(), s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)), s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))) } - if n.Type.IsFloat() { - return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b) + if n.Type().IsFloat() { + return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b) } - return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) + return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b) case ir.OAND, ir.OOR, ir.OXOR: - a := s.expr(n.Left) - b := s.expr(n.Right) - return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) + a := s.expr(n.Left()) + b := s.expr(n.Right()) + return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b) case ir.OANDNOT: - a := s.expr(n.Left) - b := s.expr(n.Right) + a := s.expr(n.Left()) + b := s.expr(n.Right()) b = s.newValue1(s.ssaOp(ir.OBITNOT, b.Type), b.Type, b) - return s.newValue2(s.ssaOp(ir.OAND, n.Type), a.Type, a, b) + return s.newValue2(s.ssaOp(ir.OAND, n.Type()), a.Type, a, b) case ir.OLSH, ir.ORSH: - a := s.expr(n.Left) - b := s.expr(n.Right) + a := s.expr(n.Left()) + b := s.expr(n.Right()) bt := b.Type if bt.IsSigned() { cmp := s.newValue2(s.ssaOp(ir.OLE, bt), types.Types[types.TBOOL], s.zeroVal(bt), b) s.check(cmp, panicshift) bt = bt.ToUnsigned() } - return s.newValue2(s.ssaShiftOp(n.Op, n.Type, bt), a.Type, a, b) + return s.newValue2(s.ssaShiftOp(n.Op(), n.Type(), bt), a.Type, a, b) case ir.OANDAND, ir.OOROR: // To implement OANDAND (and OOROR), we introduce a // new temporary variable to hold the result. The @@ -2518,7 +2518,7 @@ func (s *state) expr(n *ir.Node) *ssa.Value { // } // Using var in the subsequent block introduces the // necessary phi variable. - el := s.expr(n.Left) + el := s.expr(n.Left()) s.vars[n] = el b := s.endBlock() @@ -2531,16 +2531,16 @@ func (s *state) expr(n *ir.Node) *ssa.Value { bRight := s.f.NewBlock(ssa.BlockPlain) bResult := s.f.NewBlock(ssa.BlockPlain) - if n.Op == ir.OANDAND { + if n.Op() == ir.OANDAND { b.AddEdgeTo(bRight) b.AddEdgeTo(bResult) - } else if n.Op == ir.OOROR { + } else if n.Op() == ir.OOROR { b.AddEdgeTo(bResult) b.AddEdgeTo(bRight) } s.startBlock(bRight) - er := s.expr(n.Right) + er := s.expr(n.Right()) s.vars[n] = er b = s.endBlock() @@ -2549,65 +2549,65 @@ func (s *state) expr(n *ir.Node) *ssa.Value { s.startBlock(bResult) return s.variable(n, types.Types[types.TBOOL]) case ir.OCOMPLEX: - r := s.expr(n.Left) - i := s.expr(n.Right) - return s.newValue2(ssa.OpComplexMake, n.Type, r, i) + r := s.expr(n.Left()) + i := s.expr(n.Right()) + return s.newValue2(ssa.OpComplexMake, n.Type(), r, i) // unary ops case ir.ONEG: - a := s.expr(n.Left) - if n.Type.IsComplex() { - tp := floatForComplex(n.Type) - negop := s.ssaOp(n.Op, tp) - return s.newValue2(ssa.OpComplexMake, n.Type, + a := s.expr(n.Left()) + if n.Type().IsComplex() { + tp := floatForComplex(n.Type()) + negop := s.ssaOp(n.Op(), tp) + return s.newValue2(ssa.OpComplexMake, n.Type(), s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)), s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a))) } - return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a) + return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a) case ir.ONOT, ir.OBITNOT: - a := s.expr(n.Left) - return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a) + a := s.expr(n.Left()) + return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a) case ir.OIMAG, ir.OREAL: - a := s.expr(n.Left) - return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a) + a := s.expr(n.Left()) + return s.newValue1(s.ssaOp(n.Op(), n.Left().Type()), n.Type(), a) case ir.OPLUS: - return s.expr(n.Left) + return s.expr(n.Left()) case ir.OADDR: - return s.addr(n.Left) + return s.addr(n.Left()) case ir.ORESULT: if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall { // Do the old thing - addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset) - return s.rawLoad(n.Type, addr) + addr := s.constOffPtrSP(types.NewPtr(n.Type()), n.Offset()) + return s.rawLoad(n.Type(), addr) } - which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Xoffset) + which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Offset()) if which == -1 { // Do the old thing // TODO: Panic instead. - addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset) - return s.rawLoad(n.Type, addr) + addr := s.constOffPtrSP(types.NewPtr(n.Type()), n.Offset()) + return s.rawLoad(n.Type(), addr) } - if canSSAType(n.Type) { - return s.newValue1I(ssa.OpSelectN, n.Type, which, s.prevCall) + if canSSAType(n.Type()) { + return s.newValue1I(ssa.OpSelectN, n.Type(), which, s.prevCall) } else { - addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(n.Type), which, s.prevCall) - return s.rawLoad(n.Type, addr) + addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(n.Type()), which, s.prevCall) + return s.rawLoad(n.Type(), addr) } case ir.ODEREF: - p := s.exprPtr(n.Left, n.Bounded(), n.Pos) - return s.load(n.Type, p) + p := s.exprPtr(n.Left(), n.Bounded(), n.Pos()) + return s.load(n.Type(), p) case ir.ODOT: - if n.Left.Op == ir.OSTRUCTLIT { + if n.Left().Op() == ir.OSTRUCTLIT { // All literals with nonzero fields have already been // rewritten during walk. Any that remain are just T{} // or equivalents. Use the zero value. - if !isZero(n.Left) { - s.Fatalf("literal with nonzero value in SSA: %v", n.Left) + if !isZero(n.Left()) { + s.Fatalf("literal with nonzero value in SSA: %v", n.Left()) } - return s.zeroVal(n.Type) + return s.zeroVal(n.Type()) } // If n is addressable and can't be represented in // SSA, then load just the selected field. This @@ -2615,110 +2615,110 @@ func (s *state) expr(n *ir.Node) *ssa.Value { // instrumentation. if islvalue(n) && !s.canSSA(n) { p := s.addr(n) - return s.load(n.Type, p) + return s.load(n.Type(), p) } - v := s.expr(n.Left) - return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v) + v := s.expr(n.Left()) + return s.newValue1I(ssa.OpStructSelect, n.Type(), int64(fieldIdx(n)), v) case ir.ODOTPTR: - p := s.exprPtr(n.Left, n.Bounded(), n.Pos) - p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type), n.Xoffset, p) - return s.load(n.Type, p) + p := s.exprPtr(n.Left(), n.Bounded(), n.Pos()) + p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type()), n.Offset(), p) + return s.load(n.Type(), p) case ir.OINDEX: switch { - case n.Left.Type.IsString(): - if n.Bounded() && ir.IsConst(n.Left, constant.String) && ir.IsConst(n.Right, constant.Int) { + case n.Left().Type().IsString(): + if n.Bounded() && ir.IsConst(n.Left(), constant.String) && ir.IsConst(n.Right(), constant.Int) { // Replace "abc"[1] with 'b'. // Delayed until now because "abc"[1] is not an ideal constant. // See test/fixedbugs/issue11370.go. - return s.newValue0I(ssa.OpConst8, types.Types[types.TUINT8], int64(int8(n.Left.StringVal()[n.Right.Int64Val()]))) + return s.newValue0I(ssa.OpConst8, types.Types[types.TUINT8], int64(int8(n.Left().StringVal()[n.Right().Int64Val()]))) } - a := s.expr(n.Left) - i := s.expr(n.Right) + a := s.expr(n.Left()) + i := s.expr(n.Right()) len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], a) i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded()) ptrtyp := s.f.Config.Types.BytePtr ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a) - if ir.IsConst(n.Right, constant.Int) { - ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64Val(), ptr) + if ir.IsConst(n.Right(), constant.Int) { + ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right().Int64Val(), ptr) } else { ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i) } return s.load(types.Types[types.TUINT8], ptr) - case n.Left.Type.IsSlice(): + case n.Left().Type().IsSlice(): p := s.addr(n) - return s.load(n.Left.Type.Elem(), p) - case n.Left.Type.IsArray(): - if canSSAType(n.Left.Type) { + return s.load(n.Left().Type().Elem(), p) + case n.Left().Type().IsArray(): + if canSSAType(n.Left().Type()) { // SSA can handle arrays of length at most 1. - bound := n.Left.Type.NumElem() - a := s.expr(n.Left) - i := s.expr(n.Right) + bound := n.Left().Type().NumElem() + a := s.expr(n.Left()) + i := s.expr(n.Right()) if bound == 0 { // Bounds check will never succeed. Might as well // use constants for the bounds check. z := s.constInt(types.Types[types.TINT], 0) s.boundsCheck(z, z, ssa.BoundsIndex, false) // The return value won't be live, return junk. - return s.newValue0(ssa.OpUnknown, n.Type) + return s.newValue0(ssa.OpUnknown, n.Type()) } len := s.constInt(types.Types[types.TINT], bound) s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded()) // checks i == 0 - return s.newValue1I(ssa.OpArraySelect, n.Type, 0, a) + return s.newValue1I(ssa.OpArraySelect, n.Type(), 0, a) } p := s.addr(n) - return s.load(n.Left.Type.Elem(), p) + return s.load(n.Left().Type().Elem(), p) default: - s.Fatalf("bad type for index %v", n.Left.Type) + s.Fatalf("bad type for index %v", n.Left().Type()) return nil } case ir.OLEN, ir.OCAP: switch { - case n.Left.Type.IsSlice(): + case n.Left().Type().IsSlice(): op := ssa.OpSliceLen - if n.Op == ir.OCAP { + if n.Op() == ir.OCAP { op = ssa.OpSliceCap } - return s.newValue1(op, types.Types[types.TINT], s.expr(n.Left)) - case n.Left.Type.IsString(): // string; not reachable for OCAP - return s.newValue1(ssa.OpStringLen, types.Types[types.TINT], s.expr(n.Left)) - case n.Left.Type.IsMap(), n.Left.Type.IsChan(): - return s.referenceTypeBuiltin(n, s.expr(n.Left)) + return s.newValue1(op, types.Types[types.TINT], s.expr(n.Left())) + case n.Left().Type().IsString(): // string; not reachable for OCAP + return s.newValue1(ssa.OpStringLen, types.Types[types.TINT], s.expr(n.Left())) + case n.Left().Type().IsMap(), n.Left().Type().IsChan(): + return s.referenceTypeBuiltin(n, s.expr(n.Left())) default: // array - return s.constInt(types.Types[types.TINT], n.Left.Type.NumElem()) + return s.constInt(types.Types[types.TINT], n.Left().Type().NumElem()) } case ir.OSPTR: - a := s.expr(n.Left) - if n.Left.Type.IsSlice() { - return s.newValue1(ssa.OpSlicePtr, n.Type, a) + a := s.expr(n.Left()) + if n.Left().Type().IsSlice() { + return s.newValue1(ssa.OpSlicePtr, n.Type(), a) } else { - return s.newValue1(ssa.OpStringPtr, n.Type, a) + return s.newValue1(ssa.OpStringPtr, n.Type(), a) } case ir.OITAB: - a := s.expr(n.Left) - return s.newValue1(ssa.OpITab, n.Type, a) + a := s.expr(n.Left()) + return s.newValue1(ssa.OpITab, n.Type(), a) case ir.OIDATA: - a := s.expr(n.Left) - return s.newValue1(ssa.OpIData, n.Type, a) + a := s.expr(n.Left()) + return s.newValue1(ssa.OpIData, n.Type(), a) case ir.OEFACE: - tab := s.expr(n.Left) - data := s.expr(n.Right) - return s.newValue2(ssa.OpIMake, n.Type, tab, data) + tab := s.expr(n.Left()) + data := s.expr(n.Right()) + return s.newValue2(ssa.OpIMake, n.Type(), tab, data) case ir.OSLICEHEADER: - p := s.expr(n.Left) - l := s.expr(n.List.First()) - c := s.expr(n.List.Second()) - return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c) + p := s.expr(n.Left()) + l := s.expr(n.List().First()) + c := s.expr(n.List().Second()) + return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c) case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR: - v := s.expr(n.Left) + v := s.expr(n.Left()) var i, j, k *ssa.Value low, high, max := n.SliceBounds() if low != nil { @@ -2731,10 +2731,10 @@ func (s *state) expr(n *ir.Node) *ssa.Value { k = s.expr(max) } p, l, c := s.slice(v, i, j, k, n.Bounded()) - return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c) + return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c) case ir.OSLICESTR: - v := s.expr(n.Left) + v := s.expr(n.Left()) var i, j *ssa.Value low, high, _ := n.SliceBounds() if low != nil { @@ -2744,7 +2744,7 @@ func (s *state) expr(n *ir.Node) *ssa.Value { j = s.expr(high) } p, l, _ := s.slice(v, i, j, nil, n.Bounded()) - return s.newValue2(ssa.OpStringMake, n.Type, p, l) + return s.newValue2(ssa.OpStringMake, n.Type(), p, l) case ir.OCALLFUNC: if isIntrinsicCall(n) { @@ -2756,7 +2756,7 @@ func (s *state) expr(n *ir.Node) *ssa.Value { return s.callResult(n, callNormal) case ir.OGETG: - return s.newValue1(ssa.OpGetG, n.Type, s.mem()) + return s.newValue1(ssa.OpGetG, n.Type(), s.mem()) case ir.OAPPEND: return s.append(n, false) @@ -2768,18 +2768,18 @@ func (s *state) expr(n *ir.Node) *ssa.Value { if !isZero(n) { s.Fatalf("literal with nonzero value in SSA: %v", n) } - return s.zeroVal(n.Type) + return s.zeroVal(n.Type()) case ir.ONEWOBJ: - if n.Type.Elem().Size() == 0 { - return s.newValue1A(ssa.OpAddr, n.Type, zerobaseSym, s.sb) + if n.Type().Elem().Size() == 0 { + return s.newValue1A(ssa.OpAddr, n.Type(), zerobaseSym, s.sb) } - typ := s.expr(n.Left) - vv := s.rtcall(newobject, true, []*types.Type{n.Type}, typ) + typ := s.expr(n.Left()) + vv := s.rtcall(newobject, true, []*types.Type{n.Type()}, typ) return vv[0] default: - s.Fatalf("unhandled expr %v", n.Op) + s.Fatalf("unhandled expr %v", n.Op()) return nil } } @@ -2824,16 +2824,16 @@ func (s *state) append(n *ir.Node, inplace bool) *ssa.Value { // *(ptr+len+1) = e2 // *(ptr+len+2) = e3 - et := n.Type.Elem() + et := n.Type().Elem() pt := types.NewPtr(et) // Evaluate slice - sn := n.List.First() // the slice node is the first in the list + sn := n.List().First() // the slice node is the first in the list var slice, addr *ssa.Value if inplace { addr = s.addr(sn) - slice = s.load(n.Type, addr) + slice = s.load(n.Type(), addr) } else { slice = s.expr(sn) } @@ -2843,7 +2843,7 @@ func (s *state) append(n *ir.Node, inplace bool) *ssa.Value { assign := s.f.NewBlock(ssa.BlockPlain) // Decide if we need to grow - nargs := int64(n.List.Len() - 1) + nargs := int64(n.List().Len() - 1) p := s.newValue1(ssa.OpSlicePtr, pt, slice) l := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice) c := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], slice) @@ -2868,11 +2868,11 @@ func (s *state) append(n *ir.Node, inplace bool) *ssa.Value { // Call growslice s.startBlock(grow) - taddr := s.expr(n.Left) + taddr := s.expr(n.Left()) r := s.rtcall(growslice, true, []*types.Type{pt, types.Types[types.TINT], types.Types[types.TINT]}, taddr, p, l, c, nl) if inplace { - if sn.Op == ir.ONAME && sn.Class() != ir.PEXTERN { + if sn.Op() == ir.ONAME && sn.Class() != ir.PEXTERN { // Tell liveness we're about to build a new slice s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem()) } @@ -2909,8 +2909,8 @@ func (s *state) append(n *ir.Node, inplace bool) *ssa.Value { store bool } args := make([]argRec, 0, nargs) - for _, n := range n.List.Slice()[1:] { - if canSSAType(n.Type) { + for _, n := range n.List().Slice()[1:] { + if canSSAType(n.Type()) { args = append(args, argRec{v: s.expr(n), store: true}) } else { v := s.addr(n) @@ -2941,7 +2941,7 @@ func (s *state) append(n *ir.Node, inplace bool) *ssa.Value { delete(s.vars, newlenVar) delete(s.vars, capVar) // make result - return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c) + return s.newValue3(ssa.OpSliceMake, n.Type(), p, nl, c) } // condBranch evaluates the boolean expression cond and branches to yes @@ -2949,13 +2949,13 @@ func (s *state) append(n *ir.Node, inplace bool) *ssa.Value { // This function is intended to handle && and || better than just calling // s.expr(cond) and branching on the result. func (s *state) condBranch(cond *ir.Node, yes, no *ssa.Block, likely int8) { - switch cond.Op { + switch cond.Op() { case ir.OANDAND: mid := s.f.NewBlock(ssa.BlockPlain) - s.stmtList(cond.Ninit) - s.condBranch(cond.Left, mid, no, max8(likely, 0)) + s.stmtList(cond.Init()) + s.condBranch(cond.Left(), mid, no, max8(likely, 0)) s.startBlock(mid) - s.condBranch(cond.Right, yes, no, likely) + s.condBranch(cond.Right(), yes, no, likely) return // Note: if likely==1, then both recursive calls pass 1. // If likely==-1, then we don't have enough information to decide @@ -2965,17 +2965,17 @@ func (s *state) condBranch(cond *ir.Node, yes, no *ssa.Block, likely int8) { // OANDAND and OOROR nodes (if it ever has such info). case ir.OOROR: mid := s.f.NewBlock(ssa.BlockPlain) - s.stmtList(cond.Ninit) - s.condBranch(cond.Left, yes, mid, min8(likely, 0)) + s.stmtList(cond.Init()) + s.condBranch(cond.Left(), yes, mid, min8(likely, 0)) s.startBlock(mid) - s.condBranch(cond.Right, yes, no, likely) + s.condBranch(cond.Right(), yes, no, likely) return // Note: if likely==-1, then both recursive calls pass -1. // If likely==1, then we don't have enough info to decide // the likelihood of the first branch. case ir.ONOT: - s.stmtList(cond.Ninit) - s.condBranch(cond.Left, no, yes, -likely) + s.stmtList(cond.Init()) + s.condBranch(cond.Left(), no, yes, -likely) return } c := s.expr(cond) @@ -3001,16 +3001,16 @@ const ( // If deref is true and right == nil, just do left = 0. // skip indicates assignments (at the top level) that can be avoided. func (s *state) assign(left *ir.Node, right *ssa.Value, deref bool, skip skipMask) { - if left.Op == ir.ONAME && ir.IsBlank(left) { + if left.Op() == ir.ONAME && ir.IsBlank(left) { return } - t := left.Type + t := left.Type() dowidth(t) if s.canSSA(left) { if deref { s.Fatalf("can SSA LHS %v but not RHS %s", left, right) } - if left.Op == ir.ODOT { + if left.Op() == ir.ODOT { // We're assigning to a field of an ssa-able value. // We need to build a new structure with the new value for the // field we're assigning and the old values for the other fields. @@ -3021,12 +3021,12 @@ func (s *state) assign(left *ir.Node, right *ssa.Value, deref bool, skip skipMas // For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c} // Grab information about the structure type. - t := left.Left.Type + t := left.Left().Type() nf := t.NumFields() idx := fieldIdx(left) // Grab old value of structure. - old := s.expr(left.Left) + old := s.expr(left.Left()) // Make new structure. new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t) @@ -3041,19 +3041,19 @@ func (s *state) assign(left *ir.Node, right *ssa.Value, deref bool, skip skipMas } // Recursively assign the new value we've made to the base of the dot op. - s.assign(left.Left, new, false, 0) + s.assign(left.Left(), new, false, 0) // TODO: do we need to update named values here? return } - if left.Op == ir.OINDEX && left.Left.Type.IsArray() { - s.pushLine(left.Pos) + if left.Op() == ir.OINDEX && left.Left().Type().IsArray() { + s.pushLine(left.Pos()) defer s.popLine() // We're assigning to an element of an ssa-able array. // a[i] = v - t := left.Left.Type + t := left.Left().Type() n := t.NumElem() - i := s.expr(left.Right) // index + i := s.expr(left.Right()) // index if n == 0 { // The bounds check must fail. Might as well // ignore the actual index and just use zeros. @@ -3068,7 +3068,7 @@ func (s *state) assign(left *ir.Node, right *ssa.Value, deref bool, skip skipMas len := s.constInt(types.Types[types.TINT], 1) s.boundsCheck(i, len, ssa.BoundsIndex, false) // checks i == 0 v := s.newValue1(ssa.OpArrayMake1, t, right) - s.assign(left.Left, v, false, 0) + s.assign(left.Left(), v, false, 0) return } // Update variable assignment. @@ -3079,7 +3079,7 @@ func (s *state) assign(left *ir.Node, right *ssa.Value, deref bool, skip skipMas // If this assignment clobbers an entire local variable, then emit // OpVarDef so liveness analysis knows the variable is redefined. - if base := clobberBase(left); base.Op == ir.ONAME && base.Class() != ir.PEXTERN && skip == 0 { + if base := clobberBase(left); base.Op() == ir.ONAME && base.Class() != ir.PEXTERN && skip == 0 { s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !ir.IsAutoTmp(base)) } @@ -3323,7 +3323,7 @@ func init() { // Compiler frontend optimizations emit OBYTES2STRTMP nodes // for the backend instead of slicebytetostringtmp calls // when not instrumenting. - return s.newValue2(ssa.OpStringMake, n.Type, args[0], args[1]) + return s.newValue2(ssa.OpStringMake, n.Type(), args[0], args[1]) }, all...) } @@ -4157,15 +4157,15 @@ func findIntrinsic(sym *types.Sym) intrinsicBuilder { } func isIntrinsicCall(n *ir.Node) bool { - if n == nil || n.Left == nil { + if n == nil || n.Left() == nil { return false } - return findIntrinsic(n.Left.Sym) != nil + return findIntrinsic(n.Left().Sym()) != nil } // intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation. func (s *state) intrinsicCall(n *ir.Node) *ssa.Value { - v := findIntrinsic(n.Left.Sym)(s, n, s.intrinsicArgs(n)) + v := findIntrinsic(n.Left().Sym())(s, n, s.intrinsicArgs(n)) if ssa.IntrinsicsDebug > 0 { x := v if x == nil { @@ -4174,7 +4174,7 @@ func (s *state) intrinsicCall(n *ir.Node) *ssa.Value { if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 { x = x.Args[0] } - base.WarnfAt(n.Pos, "intrinsic substitution for %v with %s", n.Left.Sym.Name, x.LongString()) + base.WarnfAt(n.Pos(), "intrinsic substitution for %v with %s", n.Left().Sym().Name, x.LongString()) } return v } @@ -4183,20 +4183,20 @@ func (s *state) intrinsicCall(n *ir.Node) *ssa.Value { func (s *state) intrinsicArgs(n *ir.Node) []*ssa.Value { // Construct map of temps; see comments in s.call about the structure of n. temps := map[*ir.Node]*ssa.Value{} - for _, a := range n.List.Slice() { - if a.Op != ir.OAS { - s.Fatalf("non-assignment as a temp function argument %v", a.Op) + for _, a := range n.List().Slice() { + if a.Op() != ir.OAS { + s.Fatalf("non-assignment as a temp function argument %v", a.Op()) } - l, r := a.Left, a.Right - if l.Op != ir.ONAME { - s.Fatalf("non-ONAME temp function argument %v", a.Op) + l, r := a.Left(), a.Right() + if l.Op() != ir.ONAME { + s.Fatalf("non-ONAME temp function argument %v", a.Op()) } // Evaluate and store to "temporary". // Walk ensures these temporaries are dead outside of n. temps[l] = s.expr(r) } - args := make([]*ssa.Value, n.Rlist.Len()) - for i, n := range n.Rlist.Slice() { + args := make([]*ssa.Value, n.Rlist().Len()) + for i, n := range n.Rlist().Slice() { // Store a value to an argument slot. if x, ok := temps[n]; ok { // This is a previously computed temporary. @@ -4221,7 +4221,7 @@ func (s *state) openDeferRecord(n *ir.Node) { // once.mutex'. Such a statement will create a mapping in s.vars[] from // the autotmp name to the evaluated SSA arg value, but won't do any // stores to the stack. - s.stmtList(n.List) + s.stmtList(n.List()) var args []*ssa.Value var argNodes []*ir.Node @@ -4229,45 +4229,45 @@ func (s *state) openDeferRecord(n *ir.Node) { opendefer := &openDeferInfo{ n: n, } - fn := n.Left - if n.Op == ir.OCALLFUNC { + fn := n.Left() + if n.Op() == ir.OCALLFUNC { // We must always store the function value in a stack slot for the // runtime panic code to use. But in the defer exit code, we will // call the function directly if it is a static function. closureVal := s.expr(fn) - closure := s.openDeferSave(nil, fn.Type, closureVal) + closure := s.openDeferSave(nil, fn.Type(), closureVal) opendefer.closureNode = closure.Aux.(*ir.Node) - if !(fn.Op == ir.ONAME && fn.Class() == ir.PFUNC) { + if !(fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC) { opendefer.closure = closure } - } else if n.Op == ir.OCALLMETH { - if fn.Op != ir.ODOTMETH { + } else if n.Op() == ir.OCALLMETH { + if fn.Op() != ir.ODOTMETH { base.Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn) } closureVal := s.getMethodClosure(fn) // We must always store the function value in a stack slot for the // runtime panic code to use. But in the defer exit code, we will // call the method directly. - closure := s.openDeferSave(nil, fn.Type, closureVal) + closure := s.openDeferSave(nil, fn.Type(), closureVal) opendefer.closureNode = closure.Aux.(*ir.Node) } else { - if fn.Op != ir.ODOTINTER { - base.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op) + if fn.Op() != ir.ODOTINTER { + base.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op()) } closure, rcvr := s.getClosureAndRcvr(fn) opendefer.closure = s.openDeferSave(nil, closure.Type, closure) // Important to get the receiver type correct, so it is recognized // as a pointer for GC purposes. - opendefer.rcvr = s.openDeferSave(nil, fn.Type.Recv().Type, rcvr) + opendefer.rcvr = s.openDeferSave(nil, fn.Type().Recv().Type, rcvr) opendefer.closureNode = opendefer.closure.Aux.(*ir.Node) opendefer.rcvrNode = opendefer.rcvr.Aux.(*ir.Node) } - for _, argn := range n.Rlist.Slice() { + for _, argn := range n.Rlist().Slice() { var v *ssa.Value - if canSSAType(argn.Type) { - v = s.openDeferSave(nil, argn.Type, s.expr(argn)) + if canSSAType(argn.Type()) { + v = s.openDeferSave(nil, argn.Type(), s.expr(argn)) } else { - v = s.openDeferSave(argn, argn.Type, nil) + v = s.openDeferSave(argn, argn.Type(), nil) } args = append(args, v) argNodes = append(argNodes, v.Aux.(*ir.Node)) @@ -4298,10 +4298,10 @@ func (s *state) openDeferSave(n *ir.Node, t *types.Type, val *ssa.Value) *ssa.Va if canSSA { pos = val.Pos } else { - pos = n.Pos + pos = n.Pos() } argTemp := tempAt(pos.WithNotStmt(), s.curfn, t) - argTemp.Name.SetOpenDeferSlot(true) + argTemp.Name().SetOpenDeferSlot(true) var addrArgTemp *ssa.Value // Use OpVarLive to make sure stack slots for the args, etc. are not // removed by dead-store elimination @@ -4312,14 +4312,14 @@ func (s *state) openDeferSave(n *ir.Node, t *types.Type, val *ssa.Value) *ssa.Va // associated defer call has been activated). s.defvars[s.f.Entry.ID][memVar] = s.entryNewValue1A(ssa.OpVarDef, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][memVar]) s.defvars[s.f.Entry.ID][memVar] = s.entryNewValue1A(ssa.OpVarLive, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][memVar]) - addrArgTemp = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(argTemp.Type), argTemp, s.sp, s.defvars[s.f.Entry.ID][memVar]) + addrArgTemp = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(argTemp.Type()), argTemp, s.sp, s.defvars[s.f.Entry.ID][memVar]) } else { // Special case if we're still in the entry block. We can't use // the above code, since s.defvars[s.f.Entry.ID] isn't defined // until we end the entry block with s.endBlock(). s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, argTemp, s.mem(), false) s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argTemp, s.mem(), false) - addrArgTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(argTemp.Type), argTemp, s.sp, s.mem(), false) + addrArgTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(argTemp.Type()), argTemp, s.sp, s.mem(), false) } if t.HasPointers() { // Since we may use this argTemp during exit depending on the @@ -4327,7 +4327,7 @@ func (s *state) openDeferSave(n *ir.Node, t *types.Type, val *ssa.Value) *ssa.Va // Therefore, we must make sure it is zeroed out in the entry // block if it contains pointers, else GC may wrongly follow an // uninitialized pointer value. - argTemp.Name.SetNeedzero(true) + argTemp.Name().SetNeedzero(true) } if !canSSA { a := s.addr(n) @@ -4385,8 +4385,8 @@ func (s *state) openDeferExit() { // closure/receiver/args that were stored in argtmps at the point // of the defer statement. argStart := base.Ctxt.FixedFrameSize() - fn := r.n.Left - stksize := fn.Type.ArgWidth() + fn := r.n.Left() + stksize := fn.Type().ArgWidth() var ACArgs []ssa.Param var ACResults []ssa.Param var callArgs []*ssa.Value @@ -4437,7 +4437,7 @@ func (s *state) openDeferExit() { call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, aux, codeptr, v, s.mem()) } } else { - aux := ssa.StaticAuxCall(fn.Sym.Linksym(), ACArgs, ACResults) + aux := ssa.StaticAuxCall(fn.Sym().Linksym(), ACArgs, ACResults) if testLateExpansion { callArgs = append(callArgs, s.mem()) call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) @@ -4461,12 +4461,12 @@ func (s *state) openDeferExit() { s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false) } if r.rcvrNode != nil { - if r.rcvrNode.Type.HasPointers() { + if r.rcvrNode.Type().HasPointers() { s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.rcvrNode, s.mem(), false) } } for _, argNode := range r.argNodes { - if argNode.Type.HasPointers() { + if argNode.Type().HasPointers() { s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argNode, s.mem(), false) } } @@ -4492,11 +4492,11 @@ func (s *state) call(n *ir.Node, k callKind, returnResultAddr bool) *ssa.Value { var closure *ssa.Value // ptr to closure to run (if dynamic) var codeptr *ssa.Value // ptr to target code (if dynamic) var rcvr *ssa.Value // receiver to set - fn := n.Left + fn := n.Left() var ACArgs []ssa.Param var ACResults []ssa.Param var callArgs []*ssa.Value - res := n.Left.Type.Results() + res := n.Left().Type().Results() if k == callNormal { nf := res.NumFields() for i := 0; i < nf; i++ { @@ -4507,11 +4507,11 @@ func (s *state) call(n *ir.Node, k callKind, returnResultAddr bool) *ssa.Value { testLateExpansion := false - switch n.Op { + switch n.Op() { case ir.OCALLFUNC: testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f) - if k == callNormal && fn.Op == ir.ONAME && fn.Class() == ir.PFUNC { - sym = fn.Sym + if k == callNormal && fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC { + sym = fn.Sym() break } closure = s.expr(fn) @@ -4521,20 +4521,20 @@ func (s *state) call(n *ir.Node, k callKind, returnResultAddr bool) *ssa.Value { s.maybeNilCheckClosure(closure, k) } case ir.OCALLMETH: - if fn.Op != ir.ODOTMETH { + if fn.Op() != ir.ODOTMETH { s.Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn) } testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f) if k == callNormal { - sym = fn.Sym + sym = fn.Sym() break } closure = s.getMethodClosure(fn) // Note: receiver is already present in n.Rlist, so we don't // want to set it here. case ir.OCALLINTER: - if fn.Op != ir.ODOTINTER { - s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op) + if fn.Op() != ir.ODOTINTER { + s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op()) } testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f) var iclosure *ssa.Value @@ -4545,20 +4545,20 @@ func (s *state) call(n *ir.Node, k callKind, returnResultAddr bool) *ssa.Value { closure = iclosure } } - dowidth(fn.Type) - stksize := fn.Type.ArgWidth() // includes receiver, args, and results + dowidth(fn.Type()) + stksize := fn.Type().ArgWidth() // includes receiver, args, and results // Run all assignments of temps. // The temps are introduced to avoid overwriting argument // slots when arguments themselves require function calls. - s.stmtList(n.List) + s.stmtList(n.List()) var call *ssa.Value if k == callDeferStack { testLateExpansion = ssa.LateCallExpansionEnabledWithin(s.f) // Make a defer struct d on the stack. t := deferstruct(stksize) - d := tempAt(n.Pos, s.curfn, t) + d := tempAt(n.Pos(), s.curfn, t) s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, d, s.mem()) addr := s.addr(d) @@ -4584,9 +4584,9 @@ func (s *state) call(n *ir.Node, k callKind, returnResultAddr bool) *ssa.Value { // 11: fd // Then, store all the arguments of the defer call. - ft := fn.Type + ft := fn.Type() off := t.FieldOff(12) - args := n.Rlist.Slice() + args := n.Rlist().Slice() // Set receiver (for interface calls). Always a pointer. if rcvr != nil { @@ -4594,7 +4594,7 @@ func (s *state) call(n *ir.Node, k callKind, returnResultAddr bool) *ssa.Value { s.store(types.Types[types.TUINTPTR], p, rcvr) } // Set receiver (for method calls). - if n.Op == ir.OCALLMETH { + if n.Op() == ir.OCALLMETH { f := ft.Recv() s.storeArgWithBase(args[0], f.Type, addr, off+f.Offset) args = args[1:] @@ -4662,9 +4662,9 @@ func (s *state) call(n *ir.Node, k callKind, returnResultAddr bool) *ssa.Value { } // Write args. - t := n.Left.Type - args := n.Rlist.Slice() - if n.Op == ir.OCALLMETH { + t := n.Left().Type() + args := n.Rlist().Slice() + if n.Op() == ir.OCALLMETH { f := t.Recv() ACArg, arg := s.putArg(args[0], f.Type, argStart+f.Offset, testLateExpansion) ACArgs = append(ACArgs, ACArg) @@ -4729,7 +4729,7 @@ func (s *state) call(n *ir.Node, k callKind, returnResultAddr bool) *ssa.Value { call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(sym.Linksym(), ACArgs, ACResults), s.mem()) } default: - s.Fatalf("bad call type %v %v", n.Op, n) + s.Fatalf("bad call type %v %v", n.Op(), n) } call.AuxInt = stksize // Call operations carry the argsize of the callee along with them } @@ -4740,7 +4740,7 @@ func (s *state) call(n *ir.Node, k callKind, returnResultAddr bool) *ssa.Value { s.vars[memVar] = call } // Insert OVARLIVE nodes - s.stmtList(n.Nbody) + s.stmtList(n.Body()) // Finish block for defers if k == callDefer || k == callDeferStack { @@ -4774,7 +4774,7 @@ func (s *state) call(n *ir.Node, k callKind, returnResultAddr bool) *ssa.Value { if testLateExpansion { return s.newValue1I(ssa.OpSelectN, fp.Type, 0, call) } - return s.load(n.Type, s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+base.Ctxt.FixedFrameSize())) + return s.load(n.Type(), s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+base.Ctxt.FixedFrameSize())) } // maybeNilCheckClosure checks if a nil check of a closure is needed in some @@ -4794,22 +4794,22 @@ func (s *state) getMethodClosure(fn *ir.Node) *ssa.Value { // Make a PFUNC node out of that, then evaluate it. // We get back an SSA value representing &sync.(*Mutex).Unlock·f. // We can then pass that to defer or go. - n2 := ir.NewNameAt(fn.Pos, fn.Sym) - n2.Name.Curfn = s.curfn + n2 := ir.NewNameAt(fn.Pos(), fn.Sym()) + n2.Name().Curfn = s.curfn n2.SetClass(ir.PFUNC) // n2.Sym already existed, so it's already marked as a function. - n2.Pos = fn.Pos - n2.Type = types.Types[types.TUINT8] // fake type for a static closure. Could use runtime.funcval if we had it. + n2.SetPos(fn.Pos()) + n2.SetType(types.Types[types.TUINT8]) // fake type for a static closure. Could use runtime.funcval if we had it. return s.expr(n2) } // getClosureAndRcvr returns values for the appropriate closure and receiver of an // interface call func (s *state) getClosureAndRcvr(fn *ir.Node) (*ssa.Value, *ssa.Value) { - i := s.expr(fn.Left) + i := s.expr(fn.Left()) itab := s.newValue1(ssa.OpITab, types.Types[types.TUINTPTR], i) s.nilCheck(itab) - itabidx := fn.Xoffset + 2*int64(Widthptr) + 8 // offset of fun field in runtime.itab + itabidx := fn.Offset() + 2*int64(Widthptr) + 8 // offset of fun field in runtime.itab closure := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab) rcvr := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, i) return closure, rcvr @@ -4830,21 +4830,21 @@ func etypesign(e types.EType) int8 { // addr converts the address of the expression n to SSA, adds it to s and returns the SSA result. // The value that the returned Value represents is guaranteed to be non-nil. func (s *state) addr(n *ir.Node) *ssa.Value { - if n.Op != ir.ONAME { - s.pushLine(n.Pos) + if n.Op() != ir.ONAME { + s.pushLine(n.Pos()) defer s.popLine() } - t := types.NewPtr(n.Type) - switch n.Op { + t := types.NewPtr(n.Type()) + switch n.Op() { case ir.ONAME: switch n.Class() { case ir.PEXTERN: // global variable - v := s.entryNewValue1A(ssa.OpAddr, t, n.Sym.Linksym(), s.sb) + v := s.entryNewValue1A(ssa.OpAddr, t, n.Sym().Linksym(), s.sb) // TODO: Make OpAddr use AuxInt as well as Aux. - if n.Xoffset != 0 { - v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v) + if n.Offset() != 0 { + v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Offset(), v) } return v case ir.PPARAM: @@ -4873,44 +4873,44 @@ func (s *state) addr(n *ir.Node) *ssa.Value { case ir.ORESULT: // load return from callee if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall { - return s.constOffPtrSP(t, n.Xoffset) + return s.constOffPtrSP(t, n.Offset()) } - which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Xoffset) + which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Offset()) if which == -1 { // Do the old thing // TODO: Panic instead. - return s.constOffPtrSP(t, n.Xoffset) + return s.constOffPtrSP(t, n.Offset()) } x := s.newValue1I(ssa.OpSelectNAddr, t, which, s.prevCall) return x case ir.OINDEX: - if n.Left.Type.IsSlice() { - a := s.expr(n.Left) - i := s.expr(n.Right) + if n.Left().Type().IsSlice() { + a := s.expr(n.Left()) + i := s.expr(n.Right()) len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], a) i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded()) p := s.newValue1(ssa.OpSlicePtr, t, a) return s.newValue2(ssa.OpPtrIndex, t, p, i) } else { // array - a := s.addr(n.Left) - i := s.expr(n.Right) - len := s.constInt(types.Types[types.TINT], n.Left.Type.NumElem()) + a := s.addr(n.Left()) + i := s.expr(n.Right()) + len := s.constInt(types.Types[types.TINT], n.Left().Type().NumElem()) i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded()) - return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.Left.Type.Elem()), a, i) + return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.Left().Type().Elem()), a, i) } case ir.ODEREF: - return s.exprPtr(n.Left, n.Bounded(), n.Pos) + return s.exprPtr(n.Left(), n.Bounded(), n.Pos()) case ir.ODOT: - p := s.addr(n.Left) - return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p) + p := s.addr(n.Left()) + return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p) case ir.ODOTPTR: - p := s.exprPtr(n.Left, n.Bounded(), n.Pos) - return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p) + p := s.exprPtr(n.Left(), n.Bounded(), n.Pos()) + return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p) case ir.OCLOSUREVAR: - return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, + return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr)) case ir.OCONVNOP: - addr := s.addr(n.Left) + addr := s.addr(n.Left()) return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH: return s.callAddr(n, callNormal) @@ -4924,7 +4924,7 @@ func (s *state) addr(n *ir.Node) *ssa.Value { } return v.Args[0] default: - s.Fatalf("unhandled addr %v", n.Op) + s.Fatalf("unhandled addr %v", n.Op()) return nil } } @@ -4935,13 +4935,13 @@ func (s *state) canSSA(n *ir.Node) bool { if base.Flag.N != 0 { return false } - for n.Op == ir.ODOT || (n.Op == ir.OINDEX && n.Left.Type.IsArray()) { - n = n.Left + for n.Op() == ir.ODOT || (n.Op() == ir.OINDEX && n.Left().Type().IsArray()) { + n = n.Left() } - if n.Op != ir.ONAME { + if n.Op() != ir.ONAME { return false } - if n.Name.Addrtaken() { + if n.Name().Addrtaken() { return false } if isParamHeapCopy(n) { @@ -4968,13 +4968,13 @@ func (s *state) canSSA(n *ir.Node) bool { return false } } - if n.Class() == ir.PPARAM && n.Sym != nil && n.Sym.Name == ".this" { + if n.Class() == ir.PPARAM && n.Sym() != nil && n.Sym().Name == ".this" { // wrappers generated by genwrapper need to update // the .this pointer in place. // TODO: treat as a PPARAMOUT? return false } - return canSSAType(n.Type) + return canSSAType(n.Type()) // TODO: try to make more variables SSAable? } @@ -5028,7 +5028,7 @@ func (s *state) exprPtr(n *ir.Node, bounded bool, lineno src.XPos) *ssa.Value { // Used only for automatically inserted nil checks, // not for user code like 'x != nil'. func (s *state) nilCheck(ptr *ssa.Value) { - if base.Debug.DisableNil != 0 || s.curfn.Func.NilCheckDisabled() { + if base.Debug.DisableNil != 0 || s.curfn.Func().NilCheckDisabled() { return } s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem()) @@ -5161,10 +5161,10 @@ func (s *state) intDivide(n *ir.Node, a, b *ssa.Value) *ssa.Value { } if needcheck { // do a size-appropriate check for zero - cmp := s.newValue2(s.ssaOp(ir.ONE, n.Type), types.Types[types.TBOOL], b, s.zeroVal(n.Type)) + cmp := s.newValue2(s.ssaOp(ir.ONE, n.Type()), types.Types[types.TBOOL], b, s.zeroVal(n.Type())) s.check(cmp, panicdivide) } - return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) + return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b) } // rtcall issues a call to the given runtime function fn with the listed args. @@ -5609,7 +5609,7 @@ func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *ir.Node, x *ssa.Value, ft, bElse.AddEdgeTo(bAfter) s.startBlock(bAfter) - return s.variable(n, n.Type) + return s.variable(n, n.Type()) } type u322fcvtTab struct { @@ -5669,12 +5669,12 @@ func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *ir.Node, x *ssa.Value, ft, bElse.AddEdgeTo(bAfter) s.startBlock(bAfter) - return s.variable(n, n.Type) + return s.variable(n, n.Type()) } // referenceTypeBuiltin generates code for the len/cap builtins for maps and channels. func (s *state) referenceTypeBuiltin(n *ir.Node, x *ssa.Value) *ssa.Value { - if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() { + if !n.Left().Type().IsMap() && !n.Left().Type().IsChan() { s.Fatalf("node must be a map or a channel") } // if n == nil { @@ -5685,7 +5685,7 @@ func (s *state) referenceTypeBuiltin(n *ir.Node, x *ssa.Value) *ssa.Value { // // cap // return *(((*int)n)+1) // } - lenType := n.Type + lenType := n.Type() nilValue := s.constNil(types.Types[types.TUINTPTR]) cmp := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], x, nilValue) b := s.endBlock() @@ -5706,7 +5706,7 @@ func (s *state) referenceTypeBuiltin(n *ir.Node, x *ssa.Value) *ssa.Value { b.AddEdgeTo(bElse) s.startBlock(bElse) - switch n.Op { + switch n.Op() { case ir.OLEN: // length is stored in the first word for map/chan s.vars[n] = s.load(lenType, x) @@ -5824,23 +5824,23 @@ func (s *state) floatToUint(cvttab *f2uCvtTab, n *ir.Node, x *ssa.Value, ft, tt bElse.AddEdgeTo(bAfter) s.startBlock(bAfter) - return s.variable(n, n.Type) + return s.variable(n, n.Type()) } // dottype generates SSA for a type assertion node. // commaok indicates whether to panic or return a bool. // If commaok is false, resok will be nil. func (s *state) dottype(n *ir.Node, commaok bool) (res, resok *ssa.Value) { - iface := s.expr(n.Left) // input interface - target := s.expr(n.Right) // target type + iface := s.expr(n.Left()) // input interface + target := s.expr(n.Right()) // target type byteptr := s.f.Config.Types.BytePtr - if n.Type.IsInterface() { - if n.Type.IsEmptyInterface() { + if n.Type().IsInterface() { + if n.Type().IsEmptyInterface() { // Converting to an empty interface. // Input could be an empty or nonempty interface. if base.Debug.TypeAssert > 0 { - base.WarnfAt(n.Pos, "type assertion inlined") + base.WarnfAt(n.Pos(), "type assertion inlined") } // Get itab/type field from input. @@ -5848,7 +5848,7 @@ func (s *state) dottype(n *ir.Node, commaok bool) (res, resok *ssa.Value) { // Conversion succeeds iff that field is not nil. cond := s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], itab, s.constNil(byteptr)) - if n.Left.Type.IsEmptyInterface() && commaok { + if n.Left().Type().IsEmptyInterface() && commaok { // Converting empty interface to empty interface with ,ok is just a nil check. return iface, cond } @@ -5870,15 +5870,15 @@ func (s *state) dottype(n *ir.Node, commaok bool) (res, resok *ssa.Value) { // On success, return (perhaps modified) input interface. s.startBlock(bOk) - if n.Left.Type.IsEmptyInterface() { + if n.Left().Type().IsEmptyInterface() { res = iface // Use input interface unchanged. return } // Load type out of itab, build interface with existing idata. off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab) typ := s.load(byteptr, off) - idata := s.newValue1(ssa.OpIData, n.Type, iface) - res = s.newValue2(ssa.OpIMake, n.Type, typ, idata) + idata := s.newValue1(ssa.OpIData, n.Type(), iface) + res = s.newValue2(ssa.OpIMake, n.Type(), typ, idata) return } @@ -5899,55 +5899,55 @@ func (s *state) dottype(n *ir.Node, commaok bool) (res, resok *ssa.Value) { bOk.AddEdgeTo(bEnd) bFail.AddEdgeTo(bEnd) s.startBlock(bEnd) - idata := s.newValue1(ssa.OpIData, n.Type, iface) - res = s.newValue2(ssa.OpIMake, n.Type, s.variable(typVar, byteptr), idata) + idata := s.newValue1(ssa.OpIData, n.Type(), iface) + res = s.newValue2(ssa.OpIMake, n.Type(), s.variable(typVar, byteptr), idata) resok = cond delete(s.vars, typVar) return } // converting to a nonempty interface needs a runtime call. if base.Debug.TypeAssert > 0 { - base.WarnfAt(n.Pos, "type assertion not inlined") + base.WarnfAt(n.Pos(), "type assertion not inlined") } - if n.Left.Type.IsEmptyInterface() { + if n.Left().Type().IsEmptyInterface() { if commaok { - call := s.rtcall(assertE2I2, true, []*types.Type{n.Type, types.Types[types.TBOOL]}, target, iface) + call := s.rtcall(assertE2I2, true, []*types.Type{n.Type(), types.Types[types.TBOOL]}, target, iface) return call[0], call[1] } - return s.rtcall(assertE2I, true, []*types.Type{n.Type}, target, iface)[0], nil + return s.rtcall(assertE2I, true, []*types.Type{n.Type()}, target, iface)[0], nil } if commaok { - call := s.rtcall(assertI2I2, true, []*types.Type{n.Type, types.Types[types.TBOOL]}, target, iface) + call := s.rtcall(assertI2I2, true, []*types.Type{n.Type(), types.Types[types.TBOOL]}, target, iface) return call[0], call[1] } - return s.rtcall(assertI2I, true, []*types.Type{n.Type}, target, iface)[0], nil + return s.rtcall(assertI2I, true, []*types.Type{n.Type()}, target, iface)[0], nil } if base.Debug.TypeAssert > 0 { - base.WarnfAt(n.Pos, "type assertion inlined") + base.WarnfAt(n.Pos(), "type assertion inlined") } // Converting to a concrete type. - direct := isdirectiface(n.Type) + direct := isdirectiface(n.Type()) itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface if base.Debug.TypeAssert > 0 { - base.WarnfAt(n.Pos, "type assertion inlined") + base.WarnfAt(n.Pos(), "type assertion inlined") } var targetITab *ssa.Value - if n.Left.Type.IsEmptyInterface() { + if n.Left().Type().IsEmptyInterface() { // Looking for pointer to target type. targetITab = target } else { // Looking for pointer to itab for target type and source interface. - targetITab = s.expr(n.List.First()) + targetITab = s.expr(n.List().First()) } var tmp *ir.Node // temporary for use with large types var addr *ssa.Value // address of tmp - if commaok && !canSSAType(n.Type) { + if commaok && !canSSAType(n.Type()) { // unSSAable type, use temporary. // TODO: get rid of some of these temporaries. - tmp = tempAt(n.Pos, s.curfn, n.Type) + tmp = tempAt(n.Pos(), s.curfn, n.Type()) s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem()) addr = s.addr(tmp) } @@ -5966,8 +5966,8 @@ func (s *state) dottype(n *ir.Node, commaok bool) (res, resok *ssa.Value) { if !commaok { // on failure, panic by calling panicdottype s.startBlock(bFail) - taddr := s.expr(n.Right.Right) - if n.Left.Type.IsEmptyInterface() { + taddr := s.expr(n.Right().Right()) + if n.Left().Type().IsEmptyInterface() { s.rtcall(panicdottypeE, false, nil, itab, target, taddr) } else { s.rtcall(panicdottypeI, false, nil, itab, target, taddr) @@ -5976,10 +5976,10 @@ func (s *state) dottype(n *ir.Node, commaok bool) (res, resok *ssa.Value) { // on success, return data from interface s.startBlock(bOk) if direct { - return s.newValue1(ssa.OpIData, n.Type, iface), nil + return s.newValue1(ssa.OpIData, n.Type(), iface), nil } - p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface) - return s.load(n.Type, p), nil + p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface) + return s.load(n.Type(), p), nil } // commaok is the more complicated case because we have @@ -5993,14 +5993,14 @@ func (s *state) dottype(n *ir.Node, commaok bool) (res, resok *ssa.Value) { s.startBlock(bOk) if tmp == nil { if direct { - s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type, iface) + s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type(), iface) } else { - p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface) - s.vars[valVar] = s.load(n.Type, p) + p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface) + s.vars[valVar] = s.load(n.Type(), p) } } else { - p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface) - s.move(n.Type, addr, p) + p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface) + s.move(n.Type(), addr, p) } s.vars[okVar] = s.constBool(true) s.endBlock() @@ -6009,9 +6009,9 @@ func (s *state) dottype(n *ir.Node, commaok bool) (res, resok *ssa.Value) { // type assertion failed s.startBlock(bFail) if tmp == nil { - s.vars[valVar] = s.zeroVal(n.Type) + s.vars[valVar] = s.zeroVal(n.Type()) } else { - s.zero(n.Type, addr) + s.zero(n.Type(), addr) } s.vars[okVar] = s.constBool(false) s.endBlock() @@ -6020,10 +6020,10 @@ func (s *state) dottype(n *ir.Node, commaok bool) (res, resok *ssa.Value) { // merge point s.startBlock(bEnd) if tmp == nil { - res = s.variable(valVar, n.Type) + res = s.variable(valVar, n.Type()) delete(s.vars, valVar) } else { - res = s.load(n.Type, addr) + res = s.load(n.Type(), addr) s.vars[memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp, s.mem()) } resok = s.variable(okVar, types.Types[types.TBOOL]) @@ -6072,10 +6072,10 @@ func (s *state) addNamedValue(n *ir.Node, v *ssa.Value) { // from being assigned too early. See #14591 and #14762. TODO: allow this. return } - if n.Class() == ir.PAUTO && n.Xoffset != 0 { - s.Fatalf("AUTO var with offset %v %d", n, n.Xoffset) + if n.Class() == ir.PAUTO && n.Offset() != 0 { + s.Fatalf("AUTO var with offset %v %d", n, n.Offset()) } - loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0} + loc := ssa.LocalSlot{N: n, Type: n.Type(), Off: 0} values, ok := s.f.NamedValues[loc] if !ok { s.f.Names = append(s.f.Names, loc) @@ -6197,13 +6197,13 @@ func (s *SSAGenState) DebugFriendlySetPosFrom(v *ssa.Value) { type byXoffset []*ir.Node func (s byXoffset) Len() int { return len(s) } -func (s byXoffset) Less(i, j int) bool { return s[i].Xoffset < s[j].Xoffset } +func (s byXoffset) Less(i, j int) bool { return s[i].Offset() < s[j].Offset() } func (s byXoffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func emitStackObjects(e *ssafn, pp *Progs) { var vars []*ir.Node - for _, n := range e.curfn.Func.Dcl { - if livenessShouldTrack(n) && n.Name.Addrtaken() { + for _, n := range e.curfn.Func().Dcl { + if livenessShouldTrack(n) && n.Name().Addrtaken() { vars = append(vars, n) } } @@ -6216,18 +6216,18 @@ func emitStackObjects(e *ssafn, pp *Progs) { // Populate the stack object data. // Format must match runtime/stack.go:stackObjectRecord. - x := e.curfn.Func.LSym.Func().StackObjects + x := e.curfn.Func().LSym.Func().StackObjects off := 0 off = duintptr(x, off, uint64(len(vars))) for _, v := range vars { // Note: arguments and return values have non-negative Xoffset, // in which case the offset is relative to argp. // Locals have a negative Xoffset, in which case the offset is relative to varp. - off = duintptr(x, off, uint64(v.Xoffset)) - if !typesym(v.Type).Siggen() { - e.Fatalf(v.Pos, "stack object's type symbol not generated for type %s", v.Type) + off = duintptr(x, off, uint64(v.Offset())) + if !typesym(v.Type()).Siggen() { + e.Fatalf(v.Pos(), "stack object's type symbol not generated for type %s", v.Type()) } - off = dsymptr(x, off, dtypesym(v.Type), 0) + off = dsymptr(x, off, dtypesym(v.Type()), 0) } // Emit a funcdata pointing at the stack object data. @@ -6239,7 +6239,7 @@ func emitStackObjects(e *ssafn, pp *Progs) { if base.Flag.Live != 0 { for _, v := range vars { - base.WarnfAt(v.Pos, "stack object %v %s", v, v.Type.String()) + base.WarnfAt(v.Pos(), "stack object %v %s", v, v.Type().String()) } } } @@ -6253,7 +6253,7 @@ func genssa(f *ssa.Func, pp *Progs) { s.livenessMap = liveness(e, f, pp) emitStackObjects(e, pp) - openDeferInfo := e.curfn.Func.LSym.Func().OpenCodedDeferInfo + openDeferInfo := e.curfn.Func().LSym.Func().OpenCodedDeferInfo if openDeferInfo != nil { // This function uses open-coded defers -- write out the funcdata // info that we computed at the end of genssa. @@ -6458,7 +6458,7 @@ func genssa(f *ssa.Func, pp *Progs) { // some of the inline marks. // Use this instruction instead. p.Pos = p.Pos.WithIsStmt() // promote position to a statement - pp.curfn.Func.LSym.Func().AddInlMark(p, inlMarks[m]) + pp.curfn.Func().LSym.Func().AddInlMark(p, inlMarks[m]) // Make the inline mark a real nop, so it doesn't generate any code. m.As = obj.ANOP m.Pos = src.NoXPos @@ -6470,14 +6470,14 @@ func genssa(f *ssa.Func, pp *Progs) { // Any unmatched inline marks now need to be added to the inlining tree (and will generate a nop instruction). for _, p := range inlMarkList { if p.As != obj.ANOP { - pp.curfn.Func.LSym.Func().AddInlMark(p, inlMarks[p]) + pp.curfn.Func().LSym.Func().AddInlMark(p, inlMarks[p]) } } } if base.Ctxt.Flag_locationlists { debugInfo := ssa.BuildFuncDebug(base.Ctxt, f, base.Debug.LocationLists > 1, stackOffset) - e.curfn.Func.DebugInfo = debugInfo + e.curfn.Func().DebugInfo = debugInfo bstart := s.bstart // Note that at this moment, Prog.Pc is a sequence number; it's // not a real PC until after assembly, so this mapping has to @@ -6491,7 +6491,7 @@ func genssa(f *ssa.Func, pp *Progs) { } return bstart[b].Pc case ssa.BlockEnd.ID: - return e.curfn.Func.LSym.Size + return e.curfn.Func().LSym.Size default: return valueToProgAfter[v].Pc } @@ -6575,7 +6575,7 @@ func defframe(s *SSAGenState, e *ssafn) { // Fill in argument and frame size. pp.Text.To.Type = obj.TYPE_TEXTSIZE - pp.Text.To.Val = int32(Rnd(e.curfn.Type.ArgWidth(), int64(Widthreg))) + pp.Text.To.Val = int32(Rnd(e.curfn.Type().ArgWidth(), int64(Widthreg))) pp.Text.To.Offset = frame // Insert code to zero ambiguously live variables so that the @@ -6589,20 +6589,20 @@ func defframe(s *SSAGenState, e *ssafn) { var state uint32 // Iterate through declarations. They are sorted in decreasing Xoffset order. - for _, n := range e.curfn.Func.Dcl { - if !n.Name.Needzero() { + for _, n := range e.curfn.Func().Dcl { + if !n.Name().Needzero() { continue } if n.Class() != ir.PAUTO { - e.Fatalf(n.Pos, "needzero class %d", n.Class()) + e.Fatalf(n.Pos(), "needzero class %d", n.Class()) } - if n.Type.Size()%int64(Widthptr) != 0 || n.Xoffset%int64(Widthptr) != 0 || n.Type.Size() == 0 { - e.Fatalf(n.Pos, "var %L has size %d offset %d", n, n.Type.Size(), n.Xoffset) + if n.Type().Size()%int64(Widthptr) != 0 || n.Offset()%int64(Widthptr) != 0 || n.Type().Size() == 0 { + e.Fatalf(n.Pos(), "var %L has size %d offset %d", n, n.Type().Size(), n.Offset()) } - if lo != hi && n.Xoffset+n.Type.Size() >= lo-int64(2*Widthreg) { + if lo != hi && n.Offset()+n.Type().Size() >= lo-int64(2*Widthreg) { // Merge with range we already have. - lo = n.Xoffset + lo = n.Offset() continue } @@ -6610,8 +6610,8 @@ func defframe(s *SSAGenState, e *ssafn) { p = thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state) // Set new range. - lo = n.Xoffset - hi = lo + n.Type.Size() + lo = n.Offset() + hi = lo + n.Type().Size() } // Zero final range. @@ -6680,13 +6680,13 @@ func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) { case *ir.Node: if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT { a.Name = obj.NAME_PARAM - a.Sym = n.Orig.Sym.Linksym() - a.Offset += n.Xoffset + a.Sym = n.Orig().Sym().Linksym() + a.Offset += n.Offset() break } a.Name = obj.NAME_AUTO - a.Sym = n.Sym.Linksym() - a.Offset += n.Xoffset + a.Sym = n.Sym().Linksym() + a.Offset += n.Offset() default: v.Fatalf("aux in %s not implemented %#v", v, v.Aux) } @@ -6827,9 +6827,9 @@ func AutoVar(v *ssa.Value) (*ir.Node, int64) { func AddrAuto(a *obj.Addr, v *ssa.Value) { n, off := AutoVar(v) a.Type = obj.TYPE_MEM - a.Sym = n.Sym.Linksym() + a.Sym = n.Sym().Linksym() a.Reg = int16(thearch.REGSP) - a.Offset = n.Xoffset + off + a.Offset = n.Offset() + off if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT { a.Name = obj.NAME_PARAM } else { @@ -6843,9 +6843,9 @@ func (s *SSAGenState) AddrScratch(a *obj.Addr) { } a.Type = obj.TYPE_MEM a.Name = obj.NAME_AUTO - a.Sym = s.ScratchFpMem.Sym.Linksym() + a.Sym = s.ScratchFpMem.Sym().Linksym() a.Reg = int16(thearch.REGSP) - a.Offset = s.ScratchFpMem.Xoffset + a.Offset = s.ScratchFpMem.Offset() } // Call returns a new CALL instruction for the SSA value v. @@ -6928,8 +6928,8 @@ func (s *SSAGenState) UseArgs(n int64) { // fieldIdx finds the index of the field referred to by the ODOT node n. func fieldIdx(n *ir.Node) int { - t := n.Left.Type - f := n.Sym + t := n.Left().Type() + f := n.Sym() if !t.IsStruct() { panic("ODOT's LHS is not a struct") } @@ -6940,7 +6940,7 @@ func fieldIdx(n *ir.Node) int { i++ continue } - if t1.Offset != n.Xoffset { + if t1.Offset != n.Offset() { panic("field offset doesn't match") } return i @@ -6971,7 +6971,7 @@ func (e *ssafn) StringData(s string) *obj.LSym { if e.strings == nil { e.strings = make(map[string]*obj.LSym) } - data := stringsym(e.curfn.Pos, s) + data := stringsym(e.curfn.Pos(), s) e.strings[s] = data return data } @@ -6996,7 +6996,7 @@ func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot t := types.NewPtr(types.Types[types.TUINT8]) // Split this interface up into two separate variables. f := ".itab" - if n.Type.IsEmptyInterface() { + if n.Type().IsEmptyInterface() { f = ".type" } c := e.SplitSlot(&name, f, 0, u) // see comment in plive.go:onebitwalktype1. @@ -7051,7 +7051,7 @@ func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot { n := name.N at := name.Type if at.NumElem() != 1 { - e.Fatalf(n.Pos, "bad array size") + e.Fatalf(n.Pos(), "bad array size") } et := at.Elem() return e.SplitSlot(&name, "[0]", 0, et) @@ -7065,20 +7065,20 @@ func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym { func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot { node := parent.N - if node.Class() != ir.PAUTO || node.Name.Addrtaken() { + if node.Class() != ir.PAUTO || node.Name().Addrtaken() { // addressed things and non-autos retain their parents (i.e., cannot truly be split) return ssa.LocalSlot{N: node, Type: t, Off: parent.Off + offset} } - s := &types.Sym{Name: node.Sym.Name + suffix, Pkg: ir.LocalPkg} - n := ir.NewNameAt(parent.N.Pos, s) + s := &types.Sym{Name: node.Sym().Name + suffix, Pkg: ir.LocalPkg} + n := ir.NewNameAt(parent.N.Pos(), s) s.Def = ir.AsTypesNode(n) - ir.AsNode(s.Def).Name.SetUsed(true) - n.Type = t + ir.AsNode(s.Def).Name().SetUsed(true) + n.SetType(t) n.SetClass(ir.PAUTO) - n.Esc = EscNever - n.Name.Curfn = e.curfn - e.curfn.Func.Dcl = append(e.curfn.Func.Dcl, n) + n.SetEsc(EscNever) + n.Name().Curfn = e.curfn + e.curfn.Func().Dcl = append(e.curfn.Func().Dcl, n) dowidth(t) return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset} } @@ -7141,7 +7141,7 @@ func (e *ssafn) Syslook(name string) *obj.LSym { } func (e *ssafn) SetWBPos(pos src.XPos) { - e.curfn.Func.SetWBPos(pos) + e.curfn.Func().SetWBPos(pos) } func (e *ssafn) MyImportPath() string { @@ -7149,11 +7149,11 @@ func (e *ssafn) MyImportPath() string { } func clobberBase(n *ir.Node) *ir.Node { - if n.Op == ir.ODOT && n.Left.Type.NumFields() == 1 { - return clobberBase(n.Left) + if n.Op() == ir.ODOT && n.Left().Type().NumFields() == 1 { + return clobberBase(n.Left()) } - if n.Op == ir.OINDEX && n.Left.Type.IsArray() && n.Left.Type.NumElem() == 1 { - return clobberBase(n.Left) + if n.Op() == ir.OINDEX && n.Left().Type().IsArray() && n.Left().Type().NumElem() == 1 { + return clobberBase(n.Left()) } return n } diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 46f4153fe19fc..542dc49bb0e09 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -41,16 +41,16 @@ var ( // whose Pos will point back to their declaration position rather than // their usage position. func hasUniquePos(n *ir.Node) bool { - switch n.Op { + switch n.Op() { case ir.ONAME, ir.OPACK: return false case ir.OLITERAL, ir.ONIL, ir.OTYPE: - if n.Sym != nil { + if n.Sym() != nil { return false } } - if !n.Pos.IsKnown() { + if !n.Pos().IsKnown() { if base.Flag.K != 0 { base.Warn("setlineno: unknown position (line 0)") } @@ -63,7 +63,7 @@ func hasUniquePos(n *ir.Node) bool { func setlineno(n *ir.Node) src.XPos { lno := base.Pos if n != nil && hasUniquePos(n) { - base.Pos = n.Pos + base.Pos = n.Pos() } return lno } @@ -95,8 +95,8 @@ func autolabel(prefix string) *types.Sym { if Curfn == nil { base.Fatalf("autolabel outside function") } - n := fn.Func.Label - fn.Func.Label++ + n := fn.Func().Label + fn.Func().Label++ return lookupN(prefix, int(n)) } @@ -120,25 +120,25 @@ func importdot(opkg *types.Pkg, pack *ir.Node) { s1.Def = s.Def s1.Block = s.Block - if ir.AsNode(s1.Def).Name == nil { + if ir.AsNode(s1.Def).Name() == nil { ir.Dump("s1def", ir.AsNode(s1.Def)) base.Fatalf("missing Name") } - ir.AsNode(s1.Def).Name.Pack = pack + ir.AsNode(s1.Def).Name().Pack = pack s1.Origpkg = opkg n++ } if n == 0 { // can't possibly be used - there were no symbols - base.ErrorfAt(pack.Pos, "imported and not used: %q", opkg.Path) + base.ErrorfAt(pack.Pos(), "imported and not used: %q", opkg.Path) } } // newname returns a new ONAME Node associated with symbol s. func NewName(s *types.Sym) *ir.Node { n := ir.NewNameAt(base.Pos, s) - n.Name.Curfn = Curfn + n.Name().Curfn = Curfn return n } @@ -152,7 +152,7 @@ func nodSym(op ir.Op, left *ir.Node, sym *types.Sym) *ir.Node { // and the Sym field set to sym. This is for ODOT and friends. func nodlSym(pos src.XPos, op ir.Op, left *ir.Node, sym *types.Sym) *ir.Node { n := ir.NodAt(pos, op, left, nil) - n.Sym = sym + n.SetSym(sym) return n } @@ -169,7 +169,7 @@ func nodintconst(v int64) *ir.Node { func nodnil() *ir.Node { n := ir.Nod(ir.ONIL, nil, nil) - n.Type = types.Types[types.TNIL] + n.SetType(types.Types[types.TNIL]) return n } @@ -190,16 +190,16 @@ func treecopy(n *ir.Node, pos src.XPos) *ir.Node { return nil } - switch n.Op { + switch n.Op() { default: m := ir.SepCopy(n) - m.Left = treecopy(n.Left, pos) - m.Right = treecopy(n.Right, pos) - m.List.Set(listtreecopy(n.List.Slice(), pos)) + m.SetLeft(treecopy(n.Left(), pos)) + m.SetRight(treecopy(n.Right(), pos)) + m.PtrList().Set(listtreecopy(n.List().Slice(), pos)) if pos.IsKnown() { - m.Pos = pos + m.SetPos(pos) } - if m.Name != nil && n.Op != ir.ODCLFIELD { + if m.Name() != nil && n.Op() != ir.ODCLFIELD { ir.Dump("treecopy", n) base.Fatalf("treecopy Name") } @@ -517,16 +517,16 @@ func assignconv(n *ir.Node, t *types.Type, context string) *ir.Node { // Convert node n for assignment to type t. func assignconvfn(n *ir.Node, t *types.Type, context func() string) *ir.Node { - if n == nil || n.Type == nil || n.Type.Broke() { + if n == nil || n.Type() == nil || n.Type().Broke() { return n } - if t.Etype == types.TBLANK && n.Type.Etype == types.TNIL { + if t.Etype == types.TBLANK && n.Type().Etype == types.TNIL { base.Errorf("use of untyped nil") } n = convlit1(n, t, false, context) - if n.Type == nil { + if n.Type() == nil { return n } if t.Etype == types.TBLANK { @@ -535,31 +535,31 @@ func assignconvfn(n *ir.Node, t *types.Type, context func() string) *ir.Node { // Convert ideal bool from comparison to plain bool // if the next step is non-bool (like interface{}). - if n.Type == types.UntypedBool && !t.IsBoolean() { - if n.Op == ir.ONAME || n.Op == ir.OLITERAL { + if n.Type() == types.UntypedBool && !t.IsBoolean() { + if n.Op() == ir.ONAME || n.Op() == ir.OLITERAL { r := ir.Nod(ir.OCONVNOP, n, nil) - r.Type = types.Types[types.TBOOL] + r.SetType(types.Types[types.TBOOL]) r.SetTypecheck(1) r.SetImplicit(true) n = r } } - if types.Identical(n.Type, t) { + if types.Identical(n.Type(), t) { return n } - op, why := assignop(n.Type, t) + op, why := assignop(n.Type(), t) if op == ir.OXXX { base.Errorf("cannot use %L as type %v in %s%s", n, t, context(), why) op = ir.OCONV } r := ir.Nod(op, n, nil) - r.Type = t + r.SetType(t) r.SetTypecheck(1) r.SetImplicit(true) - r.Orig = n.Orig + r.SetOrig(n.Orig()) return r } @@ -572,27 +572,27 @@ func backingArrayPtrLen(n *ir.Node) (ptr, len *ir.Node) { base.Fatalf("backingArrayPtrLen not cheap: %v", n) } ptr = ir.Nod(ir.OSPTR, n, nil) - if n.Type.IsString() { - ptr.Type = types.Types[types.TUINT8].PtrTo() + if n.Type().IsString() { + ptr.SetType(types.Types[types.TUINT8].PtrTo()) } else { - ptr.Type = n.Type.Elem().PtrTo() + ptr.SetType(n.Type().Elem().PtrTo()) } len = ir.Nod(ir.OLEN, n, nil) - len.Type = types.Types[types.TINT] + len.SetType(types.Types[types.TINT]) return ptr, len } // labeledControl returns the control flow Node (for, switch, select) // associated with the label n, if any. func labeledControl(n *ir.Node) *ir.Node { - if n.Op != ir.OLABEL { - base.Fatalf("labeledControl %v", n.Op) + if n.Op() != ir.OLABEL { + base.Fatalf("labeledControl %v", n.Op()) } - ctl := n.Name.Defn + ctl := n.Name().Defn if ctl == nil { return nil } - switch ctl.Op { + switch ctl.Op() { case ir.OFOR, ir.OFORUNTIL, ir.OSWITCH, ir.OSELECT: return ctl } @@ -626,12 +626,12 @@ func updateHasCall(n *ir.Node) { } func calcHasCall(n *ir.Node) bool { - if n.Ninit.Len() != 0 { + if n.Init().Len() != 0 { // TODO(mdempsky): This seems overly conservative. return true } - switch n.Op { + switch n.Op() { case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.OTYPE: if n.HasCall() { base.Fatalf("OLITERAL/ONAME/OTYPE should never have calls: %+v", n) @@ -653,23 +653,23 @@ func calcHasCall(n *ir.Node) bool { // When using soft-float, these ops might be rewritten to function calls // so we ensure they are evaluated first. case ir.OADD, ir.OSUB, ir.ONEG, ir.OMUL: - if thearch.SoftFloat && (isFloat[n.Type.Etype] || isComplex[n.Type.Etype]) { + if thearch.SoftFloat && (isFloat[n.Type().Etype] || isComplex[n.Type().Etype]) { return true } case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT: - if thearch.SoftFloat && (isFloat[n.Left.Type.Etype] || isComplex[n.Left.Type.Etype]) { + if thearch.SoftFloat && (isFloat[n.Left().Type().Etype] || isComplex[n.Left().Type().Etype]) { return true } case ir.OCONV: - if thearch.SoftFloat && ((isFloat[n.Type.Etype] || isComplex[n.Type.Etype]) || (isFloat[n.Left.Type.Etype] || isComplex[n.Left.Type.Etype])) { + if thearch.SoftFloat && ((isFloat[n.Type().Etype] || isComplex[n.Type().Etype]) || (isFloat[n.Left().Type().Etype] || isComplex[n.Left().Type().Etype])) { return true } } - if n.Left != nil && n.Left.HasCall() { + if n.Left() != nil && n.Left().HasCall() { return true } - if n.Right != nil && n.Right.HasCall() { + if n.Right() != nil && n.Right().HasCall() { return true } return false @@ -745,45 +745,45 @@ func safeexpr(n *ir.Node, init *ir.Nodes) *ir.Node { return nil } - if n.Ninit.Len() != 0 { - walkstmtlist(n.Ninit.Slice()) - init.AppendNodes(&n.Ninit) + if n.Init().Len() != 0 { + walkstmtlist(n.Init().Slice()) + init.AppendNodes(n.PtrInit()) } - switch n.Op { + switch n.Op() { case ir.ONAME, ir.OLITERAL, ir.ONIL: return n case ir.ODOT, ir.OLEN, ir.OCAP: - l := safeexpr(n.Left, init) - if l == n.Left { + l := safeexpr(n.Left(), init) + if l == n.Left() { return n } r := ir.Copy(n) - r.Left = l + r.SetLeft(l) r = typecheck(r, ctxExpr) r = walkexpr(r, init) return r case ir.ODOTPTR, ir.ODEREF: - l := safeexpr(n.Left, init) - if l == n.Left { + l := safeexpr(n.Left(), init) + if l == n.Left() { return n } a := ir.Copy(n) - a.Left = l + a.SetLeft(l) a = walkexpr(a, init) return a case ir.OINDEX, ir.OINDEXMAP: - l := safeexpr(n.Left, init) - r := safeexpr(n.Right, init) - if l == n.Left && r == n.Right { + l := safeexpr(n.Left(), init) + r := safeexpr(n.Right(), init) + if l == n.Left() && r == n.Right() { return n } a := ir.Copy(n) - a.Left = l - a.Right = r + a.SetLeft(l) + a.SetRight(r) a = walkexpr(a, init) return a @@ -812,12 +812,12 @@ func copyexpr(n *ir.Node, t *types.Type, init *ir.Nodes) *ir.Node { // return side-effect free and cheap n, appending side effects to init. // result may not be assignable. func cheapexpr(n *ir.Node, init *ir.Nodes) *ir.Node { - switch n.Op { + switch n.Op() { case ir.ONAME, ir.OLITERAL, ir.ONIL: return n } - return copyexpr(n, n.Type, init) + return copyexpr(n, n.Type(), init) } // Code to resolve elided DOTs in embedded types. @@ -958,20 +958,20 @@ func dotpath(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) ( // will give shortest unique addressing. // modify the tree with missing type names. func adddot(n *ir.Node) *ir.Node { - n.Left = typecheck(n.Left, ctxType|ctxExpr) - if n.Left.Diag() { + n.SetLeft(typecheck(n.Left(), ctxType|ctxExpr)) + if n.Left().Diag() { n.SetDiag(true) } - t := n.Left.Type + t := n.Left().Type() if t == nil { return n } - if n.Left.Op == ir.OTYPE { + if n.Left().Op() == ir.OTYPE { return n } - s := n.Sym + s := n.Sym() if s == nil { return n } @@ -980,12 +980,12 @@ func adddot(n *ir.Node) *ir.Node { case path != nil: // rebuild elided dots for c := len(path) - 1; c >= 0; c-- { - n.Left = nodSym(ir.ODOT, n.Left, path[c].field.Sym) - n.Left.SetImplicit(true) + n.SetLeft(nodSym(ir.ODOT, n.Left(), path[c].field.Sym)) + n.Left().SetImplicit(true) } case ambig: base.Errorf("ambiguous selector %v", n) - n.Left = nil + n.SetLeft(nil) } return n @@ -1127,7 +1127,7 @@ func structargs(tl *types.Type, mustname bool) []*ir.Node { gen++ } a := symfield(s, t.Type) - a.Pos = t.Pos + a.SetPos(t.Pos) a.SetIsDDD(t.IsDDD()) args = append(args, a) } @@ -1177,14 +1177,14 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { dclcontext = ir.PEXTERN tfn := ir.Nod(ir.OTFUNC, nil, nil) - tfn.Left = namedfield(".this", rcvr) - tfn.List.Set(structargs(method.Type.Params(), true)) - tfn.Rlist.Set(structargs(method.Type.Results(), false)) + tfn.SetLeft(namedfield(".this", rcvr)) + tfn.PtrList().Set(structargs(method.Type.Params(), true)) + tfn.PtrRlist().Set(structargs(method.Type.Results(), false)) fn := dclfunc(newnam, tfn) - fn.Func.SetDupok(true) + fn.Func().SetDupok(true) - nthis := ir.AsNode(tfn.Type.Recv().Nname) + nthis := ir.AsNode(tfn.Type().Recv().Nname) methodrcvr := method.Type.Recv().Type @@ -1192,10 +1192,10 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { if rcvr.IsPtr() && rcvr.Elem() == methodrcvr { // generating wrapper from *T to T. n := ir.Nod(ir.OIF, nil, nil) - n.Left = ir.Nod(ir.OEQ, nthis, nodnil()) + n.SetLeft(ir.Nod(ir.OEQ, nthis, nodnil())) call := ir.Nod(ir.OCALL, syslook("panicwrap"), nil) - n.Nbody.Set1(call) - fn.Nbody.Append(n) + n.PtrBody().Set1(call) + fn.PtrBody().Append(n) } dot := adddot(nodSym(ir.OXDOT, nthis, method.Sym)) @@ -1209,29 +1209,29 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { // value for that function. if !instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !isifacemethod(method.Type) && !(thearch.LinkArch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) { // generate tail call: adjust pointer receiver and jump to embedded method. - dot = dot.Left // skip final .M + dot = dot.Left() // skip final .M // TODO(mdempsky): Remove dependency on dotlist. if !dotlist[0].field.Type.IsPtr() { dot = ir.Nod(ir.OADDR, dot, nil) } as := ir.Nod(ir.OAS, nthis, convnop(dot, rcvr)) - fn.Nbody.Append(as) - fn.Nbody.Append(nodSym(ir.ORETJMP, nil, methodSym(methodrcvr, method.Sym))) + fn.PtrBody().Append(as) + fn.PtrBody().Append(nodSym(ir.ORETJMP, nil, methodSym(methodrcvr, method.Sym))) } else { - fn.Func.SetWrapper(true) // ignore frame for panic+recover matching + fn.Func().SetWrapper(true) // ignore frame for panic+recover matching call := ir.Nod(ir.OCALL, dot, nil) - call.List.Set(paramNnames(tfn.Type)) - call.SetIsDDD(tfn.Type.IsVariadic()) + call.PtrList().Set(paramNnames(tfn.Type())) + call.SetIsDDD(tfn.Type().IsVariadic()) if method.Type.NumResults() > 0 { n := ir.Nod(ir.ORETURN, nil, nil) - n.List.Set1(call) + n.PtrList().Set1(call) call = n } - fn.Nbody.Append(call) + fn.PtrBody().Append(call) } if false && base.Flag.LowerR != 0 { - ir.DumpList("genwrapper body", fn.Nbody) + ir.DumpList("genwrapper body", fn.Body()) } funcbody() @@ -1242,7 +1242,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { fn = typecheck(fn, ctxStmt) Curfn = fn - typecheckslice(fn.Nbody.Slice(), ctxStmt) + typecheckslice(fn.Body().Slice(), ctxStmt) // Inline calls within (*T).M wrappers. This is safe because we only // generate those wrappers within the same compilation unit as (T).M. @@ -1269,13 +1269,13 @@ func hashmem(t *types.Type) *ir.Node { n := NewName(sym) setNodeNameFunc(n) - n.Type = functype(nil, []*ir.Node{ + n.SetType(functype(nil, []*ir.Node{ anonfield(types.NewPtr(t)), anonfield(types.Types[types.TUINTPTR]), anonfield(types.Types[types.TUINTPTR]), }, []*ir.Node{ anonfield(types.Types[types.TUINTPTR]), - }) + })) return n } @@ -1403,16 +1403,16 @@ func listtreecopy(l []*ir.Node, pos src.XPos) []*ir.Node { func liststmt(l []*ir.Node) *ir.Node { n := ir.Nod(ir.OBLOCK, nil, nil) - n.List.Set(l) + n.PtrList().Set(l) if len(l) != 0 { - n.Pos = l[0].Pos + n.SetPos(l[0].Pos()) } return n } func ngotype(n *ir.Node) *types.Sym { - if n.Type != nil { - return typenamesym(n.Type) + if n.Type() != nil { + return typenamesym(n.Type()) } return nil } @@ -1426,11 +1426,11 @@ func addinit(n *ir.Node, init []*ir.Node) *ir.Node { if ir.MayBeShared(n) { // Introduce OCONVNOP to hold init list. n = ir.Nod(ir.OCONVNOP, n, nil) - n.Type = n.Left.Type + n.SetType(n.Left().Type()) n.SetTypecheck(1) } - n.Ninit.Prepend(init...) + n.PtrInit().Prepend(init...) n.SetHasCall(true) return n } @@ -1520,10 +1520,10 @@ func isdirectiface(t *types.Type) bool { // itabType loads the _type field from a runtime.itab struct. func itabType(itab *ir.Node) *ir.Node { typ := nodSym(ir.ODOTPTR, itab, nil) - typ.Type = types.NewPtr(types.Types[types.TUINT8]) + typ.SetType(types.NewPtr(types.Types[types.TUINT8])) typ.SetTypecheck(1) - typ.Xoffset = int64(Widthptr) // offset of _type in runtime.itab - typ.SetBounded(true) // guaranteed not to fault + typ.SetOffset(int64(Widthptr)) // offset of _type in runtime.itab + typ.SetBounded(true) // guaranteed not to fault return typ } @@ -1536,14 +1536,14 @@ func ifaceData(pos src.XPos, n *ir.Node, t *types.Type) *ir.Node { } ptr := nodlSym(pos, ir.OIDATA, n, nil) if isdirectiface(t) { - ptr.Type = t + ptr.SetType(t) ptr.SetTypecheck(1) return ptr } - ptr.Type = types.NewPtr(t) + ptr.SetType(types.NewPtr(t)) ptr.SetTypecheck(1) ind := ir.NodAt(pos, ir.ODEREF, ptr, nil) - ind.Type = t + ind.SetType(t) ind.SetTypecheck(1) ind.SetBounded(true) return ind @@ -1553,8 +1553,8 @@ func ifaceData(pos src.XPos, n *ir.Node, t *types.Type) *ir.Node { // This is where t was declared or where it appeared as a type expression. func typePos(t *types.Type) src.XPos { n := ir.AsNode(t.Nod) - if n == nil || !n.Pos.IsKnown() { + if n == nil || !n.Pos().IsKnown() { base.Fatalf("bad type: %v", t) } - return n.Pos + return n.Pos() } diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go index f3195df79aa13..c85483fafaaf8 100644 --- a/src/cmd/compile/internal/gc/swt.go +++ b/src/cmd/compile/internal/gc/swt.go @@ -16,8 +16,8 @@ import ( // typecheckswitch typechecks a switch statement. func typecheckswitch(n *ir.Node) { - typecheckslice(n.Ninit.Slice(), ctxStmt) - if n.Left != nil && n.Left.Op == ir.OTYPESW { + typecheckslice(n.Init().Slice(), ctxStmt) + if n.Left() != nil && n.Left().Op() == ir.OTYPESW { typecheckTypeSwitch(n) } else { typecheckExprSwitch(n) @@ -25,27 +25,27 @@ func typecheckswitch(n *ir.Node) { } func typecheckTypeSwitch(n *ir.Node) { - n.Left.Right = typecheck(n.Left.Right, ctxExpr) - t := n.Left.Right.Type + n.Left().SetRight(typecheck(n.Left().Right(), ctxExpr)) + t := n.Left().Right().Type() if t != nil && !t.IsInterface() { - base.ErrorfAt(n.Pos, "cannot type switch on non-interface value %L", n.Left.Right) + base.ErrorfAt(n.Pos(), "cannot type switch on non-interface value %L", n.Left().Right()) t = nil } // We don't actually declare the type switch's guarded // declaration itself. So if there are no cases, we won't // notice that it went unused. - if v := n.Left.Left; v != nil && !ir.IsBlank(v) && n.List.Len() == 0 { - base.ErrorfAt(v.Pos, "%v declared but not used", v.Sym) + if v := n.Left().Left(); v != nil && !ir.IsBlank(v) && n.List().Len() == 0 { + base.ErrorfAt(v.Pos(), "%v declared but not used", v.Sym()) } var defCase, nilCase *ir.Node var ts typeSet - for _, ncase := range n.List.Slice() { - ls := ncase.List.Slice() + for _, ncase := range n.List().Slice() { + ls := ncase.List().Slice() if len(ls) == 0 { // default: if defCase != nil { - base.ErrorfAt(ncase.Pos, "multiple defaults in switch (first at %v)", ir.Line(defCase)) + base.ErrorfAt(ncase.Pos(), "multiple defaults in switch (first at %v)", ir.Line(defCase)) } else { defCase = ncase } @@ -54,7 +54,7 @@ func typecheckTypeSwitch(n *ir.Node) { for i := range ls { ls[i] = typecheck(ls[i], ctxExpr|ctxType) n1 := ls[i] - if t == nil || n1.Type == nil { + if t == nil || n1.Type() == nil { continue } @@ -63,36 +63,36 @@ func typecheckTypeSwitch(n *ir.Node) { switch { case ir.IsNil(n1): // case nil: if nilCase != nil { - base.ErrorfAt(ncase.Pos, "multiple nil cases in type switch (first at %v)", ir.Line(nilCase)) + base.ErrorfAt(ncase.Pos(), "multiple nil cases in type switch (first at %v)", ir.Line(nilCase)) } else { nilCase = ncase } - case n1.Op != ir.OTYPE: - base.ErrorfAt(ncase.Pos, "%L is not a type", n1) - case !n1.Type.IsInterface() && !implements(n1.Type, t, &missing, &have, &ptr) && !missing.Broke(): + case n1.Op() != ir.OTYPE: + base.ErrorfAt(ncase.Pos(), "%L is not a type", n1) + case !n1.Type().IsInterface() && !implements(n1.Type(), t, &missing, &have, &ptr) && !missing.Broke(): if have != nil && !have.Broke() { - base.ErrorfAt(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+ - " (wrong type for %v method)\n\thave %v%S\n\twant %v%S", n.Left.Right, n1.Type, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) + base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+ + " (wrong type for %v method)\n\thave %v%S\n\twant %v%S", n.Left().Right(), n1.Type(), missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) } else if ptr != 0 { - base.ErrorfAt(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+ - " (%v method has pointer receiver)", n.Left.Right, n1.Type, missing.Sym) + base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+ + " (%v method has pointer receiver)", n.Left().Right(), n1.Type(), missing.Sym) } else { - base.ErrorfAt(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+ - " (missing %v method)", n.Left.Right, n1.Type, missing.Sym) + base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+ + " (missing %v method)", n.Left().Right(), n1.Type(), missing.Sym) } } - if n1.Op == ir.OTYPE { - ts.add(ncase.Pos, n1.Type) + if n1.Op() == ir.OTYPE { + ts.add(ncase.Pos(), n1.Type()) } } - if ncase.Rlist.Len() != 0 { + if ncase.Rlist().Len() != 0 { // Assign the clause variable's type. vt := t if len(ls) == 1 { - if ls[0].Op == ir.OTYPE { - vt = ls[0].Type + if ls[0].Op() == ir.OTYPE { + vt = ls[0].Type() } else if !ir.IsNil(ls[0]) { // Invalid single-type case; // mark variable as broken. @@ -100,8 +100,8 @@ func typecheckTypeSwitch(n *ir.Node) { } } - nvar := ncase.Rlist.First() - nvar.Type = vt + nvar := ncase.Rlist().First() + nvar.SetType(vt) if vt != nil { nvar = typecheck(nvar, ctxExpr|ctxAssign) } else { @@ -109,10 +109,10 @@ func typecheckTypeSwitch(n *ir.Node) { nvar.SetTypecheck(1) nvar.SetWalkdef(1) } - ncase.Rlist.SetFirst(nvar) + ncase.Rlist().SetFirst(nvar) } - typecheckslice(ncase.Nbody.Slice(), ctxStmt) + typecheckslice(ncase.Body().Slice(), ctxStmt) } } @@ -146,10 +146,10 @@ func (s *typeSet) add(pos src.XPos, typ *types.Type) { func typecheckExprSwitch(n *ir.Node) { t := types.Types[types.TBOOL] - if n.Left != nil { - n.Left = typecheck(n.Left, ctxExpr) - n.Left = defaultlit(n.Left, nil) - t = n.Left.Type + if n.Left() != nil { + n.SetLeft(typecheck(n.Left(), ctxExpr)) + n.SetLeft(defaultlit(n.Left(), nil)) + t = n.Left().Type() } var nilonly string @@ -164,9 +164,9 @@ func typecheckExprSwitch(n *ir.Node) { case !IsComparable(t): if t.IsStruct() { - base.ErrorfAt(n.Pos, "cannot switch on %L (struct containing %v cannot be compared)", n.Left, IncomparableField(t).Type) + base.ErrorfAt(n.Pos(), "cannot switch on %L (struct containing %v cannot be compared)", n.Left(), IncomparableField(t).Type) } else { - base.ErrorfAt(n.Pos, "cannot switch on %L", n.Left) + base.ErrorfAt(n.Pos(), "cannot switch on %L", n.Left()) } t = nil } @@ -174,11 +174,11 @@ func typecheckExprSwitch(n *ir.Node) { var defCase *ir.Node var cs constSet - for _, ncase := range n.List.Slice() { - ls := ncase.List.Slice() + for _, ncase := range n.List().Slice() { + ls := ncase.List().Slice() if len(ls) == 0 { // default: if defCase != nil { - base.ErrorfAt(ncase.Pos, "multiple defaults in switch (first at %v)", ir.Line(defCase)) + base.ErrorfAt(ncase.Pos(), "multiple defaults in switch (first at %v)", ir.Line(defCase)) } else { defCase = ncase } @@ -189,22 +189,22 @@ func typecheckExprSwitch(n *ir.Node) { ls[i] = typecheck(ls[i], ctxExpr) ls[i] = defaultlit(ls[i], t) n1 := ls[i] - if t == nil || n1.Type == nil { + if t == nil || n1.Type() == nil { continue } if nilonly != "" && !ir.IsNil(n1) { - base.ErrorfAt(ncase.Pos, "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Left) - } else if t.IsInterface() && !n1.Type.IsInterface() && !IsComparable(n1.Type) { - base.ErrorfAt(ncase.Pos, "invalid case %L in switch (incomparable type)", n1) + base.ErrorfAt(ncase.Pos(), "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Left()) + } else if t.IsInterface() && !n1.Type().IsInterface() && !IsComparable(n1.Type()) { + base.ErrorfAt(ncase.Pos(), "invalid case %L in switch (incomparable type)", n1) } else { - op1, _ := assignop(n1.Type, t) - op2, _ := assignop(t, n1.Type) + op1, _ := assignop(n1.Type(), t) + op2, _ := assignop(t, n1.Type()) if op1 == ir.OXXX && op2 == ir.OXXX { - if n.Left != nil { - base.ErrorfAt(ncase.Pos, "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Left, n1.Type, t) + if n.Left() != nil { + base.ErrorfAt(ncase.Pos(), "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Left(), n1.Type(), t) } else { - base.ErrorfAt(ncase.Pos, "invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type) + base.ErrorfAt(ncase.Pos(), "invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type()) } } } @@ -215,23 +215,23 @@ func typecheckExprSwitch(n *ir.Node) { // case GOARCH == "arm" && GOARM == "5": // case GOARCH == "arm": // which would both evaluate to false for non-ARM compiles. - if !n1.Type.IsBoolean() { - cs.add(ncase.Pos, n1, "case", "switch") + if !n1.Type().IsBoolean() { + cs.add(ncase.Pos(), n1, "case", "switch") } } - typecheckslice(ncase.Nbody.Slice(), ctxStmt) + typecheckslice(ncase.Body().Slice(), ctxStmt) } } // walkswitch walks a switch statement. func walkswitch(sw *ir.Node) { // Guard against double walk, see #25776. - if sw.List.Len() == 0 && sw.Nbody.Len() > 0 { + if sw.List().Len() == 0 && sw.Body().Len() > 0 { return // Was fatal, but eliminating every possible source of double-walking is hard } - if sw.Left != nil && sw.Left.Op == ir.OTYPESW { + if sw.Left() != nil && sw.Left().Op() == ir.OTYPESW { walkTypeSwitch(sw) } else { walkExprSwitch(sw) @@ -243,8 +243,8 @@ func walkswitch(sw *ir.Node) { func walkExprSwitch(sw *ir.Node) { lno := setlineno(sw) - cond := sw.Left - sw.Left = nil + cond := sw.Left() + sw.SetLeft(nil) // convert switch {...} to switch true {...} if cond == nil { @@ -260,13 +260,13 @@ func walkExprSwitch(sw *ir.Node) { // because walkexpr will lower the string // conversion into a runtime call. // See issue 24937 for more discussion. - if cond.Op == ir.OBYTES2STR && allCaseExprsAreSideEffectFree(sw) { - cond.Op = ir.OBYTES2STRTMP + if cond.Op() == ir.OBYTES2STR && allCaseExprsAreSideEffectFree(sw) { + cond.SetOp(ir.OBYTES2STRTMP) } - cond = walkexpr(cond, &sw.Ninit) - if cond.Op != ir.OLITERAL && cond.Op != ir.ONIL { - cond = copyexpr(cond, cond.Type, &sw.Nbody) + cond = walkexpr(cond, sw.PtrInit()) + if cond.Op() != ir.OLITERAL && cond.Op() != ir.ONIL { + cond = copyexpr(cond, cond.Type(), sw.PtrBody()) } base.Pos = lno @@ -277,43 +277,43 @@ func walkExprSwitch(sw *ir.Node) { var defaultGoto *ir.Node var body ir.Nodes - for _, ncase := range sw.List.Slice() { + for _, ncase := range sw.List().Slice() { label := autolabel(".s") - jmp := npos(ncase.Pos, nodSym(ir.OGOTO, nil, label)) + jmp := npos(ncase.Pos(), nodSym(ir.OGOTO, nil, label)) // Process case dispatch. - if ncase.List.Len() == 0 { + if ncase.List().Len() == 0 { if defaultGoto != nil { base.Fatalf("duplicate default case not detected during typechecking") } defaultGoto = jmp } - for _, n1 := range ncase.List.Slice() { - s.Add(ncase.Pos, n1, jmp) + for _, n1 := range ncase.List().Slice() { + s.Add(ncase.Pos(), n1, jmp) } // Process body. - body.Append(npos(ncase.Pos, nodSym(ir.OLABEL, nil, label))) - body.Append(ncase.Nbody.Slice()...) - if fall, pos := hasFall(ncase.Nbody.Slice()); !fall { + body.Append(npos(ncase.Pos(), nodSym(ir.OLABEL, nil, label))) + body.Append(ncase.Body().Slice()...) + if fall, pos := hasFall(ncase.Body().Slice()); !fall { br := ir.Nod(ir.OBREAK, nil, nil) - br.Pos = pos + br.SetPos(pos) body.Append(br) } } - sw.List.Set(nil) + sw.PtrList().Set(nil) if defaultGoto == nil { br := ir.Nod(ir.OBREAK, nil, nil) - br.Pos = br.Pos.WithNotStmt() + br.SetPos(br.Pos().WithNotStmt()) defaultGoto = br } - s.Emit(&sw.Nbody) - sw.Nbody.Append(defaultGoto) - sw.Nbody.AppendNodes(&body) - walkstmtlist(sw.Nbody.Slice()) + s.Emit(sw.PtrBody()) + sw.PtrBody().Append(defaultGoto) + sw.PtrBody().AppendNodes(&body) + walkstmtlist(sw.Body().Slice()) } // An exprSwitch walks an expression switch. @@ -332,7 +332,7 @@ type exprClause struct { func (s *exprSwitch) Add(pos src.XPos, expr, jmp *ir.Node) { c := exprClause{pos: pos, lo: expr, hi: expr, jmp: jmp} - if okforcmp[s.exprname.Type.Etype] && expr.Op == ir.OLITERAL { + if okforcmp[s.exprname.Type().Etype] && expr.Op() == ir.OLITERAL { s.clauses = append(s.clauses, c) return } @@ -359,7 +359,7 @@ func (s *exprSwitch) flush() { // (e.g., sort.Slice doesn't need to invoke the less function // when there's only a single slice element). - if s.exprname.Type.IsString() && len(cc) >= 2 { + if s.exprname.Type().IsString() && len(cc) >= 2 { // Sort strings by length and then by value. It is // much cheaper to compare lengths than values, and // all we need here is consistency. We respect this @@ -395,8 +395,8 @@ func (s *exprSwitch) flush() { }, func(i int, nif *ir.Node) { run := runs[i] - nif.Left = ir.Nod(ir.OEQ, ir.Nod(ir.OLEN, s.exprname, nil), nodintconst(runLen(run))) - s.search(run, &nif.Nbody) + nif.SetLeft(ir.Nod(ir.OEQ, ir.Nod(ir.OLEN, s.exprname, nil), nodintconst(runLen(run)))) + s.search(run, nif.PtrBody()) }, ) return @@ -407,7 +407,7 @@ func (s *exprSwitch) flush() { }) // Merge consecutive integer cases. - if s.exprname.Type.IsInteger() { + if s.exprname.Type().IsInteger() { merged := cc[:1] for _, c := range cc[1:] { last := &merged[len(merged)-1] @@ -430,8 +430,8 @@ func (s *exprSwitch) search(cc []exprClause, out *ir.Nodes) { }, func(i int, nif *ir.Node) { c := &cc[i] - nif.Left = c.test(s.exprname) - nif.Nbody.Set1(c.jmp) + nif.SetLeft(c.test(s.exprname)) + nif.PtrBody().Set1(c.jmp) }, ) } @@ -445,7 +445,7 @@ func (c *exprClause) test(exprname *ir.Node) *ir.Node { } // Optimize "switch true { ...}" and "switch false { ... }". - if ir.IsConst(exprname, constant.Bool) && !c.lo.Type.IsInterface() { + if ir.IsConst(exprname, constant.Bool) && !c.lo.Type().IsInterface() { if exprname.BoolVal() { return c.lo } else { @@ -464,12 +464,12 @@ func allCaseExprsAreSideEffectFree(sw *ir.Node) bool { // Restricting to constants is simple and probably powerful // enough. - for _, ncase := range sw.List.Slice() { - if ncase.Op != ir.OCASE { - base.Fatalf("switch string(byteslice) bad op: %v", ncase.Op) + for _, ncase := range sw.List().Slice() { + if ncase.Op() != ir.OCASE { + base.Fatalf("switch string(byteslice) bad op: %v", ncase.Op()) } - for _, v := range ncase.List.Slice() { - if v.Op != ir.OLITERAL { + for _, v := range ncase.List().Slice() { + if v.Op() != ir.OLITERAL { return false } } @@ -486,24 +486,24 @@ func hasFall(stmts []*ir.Node) (bool, src.XPos) { // nodes will be at the end of the list. i := len(stmts) - 1 - for i >= 0 && stmts[i].Op == ir.OVARKILL { + for i >= 0 && stmts[i].Op() == ir.OVARKILL { i-- } if i < 0 { return false, src.NoXPos } - return stmts[i].Op == ir.OFALL, stmts[i].Pos + return stmts[i].Op() == ir.OFALL, stmts[i].Pos() } // walkTypeSwitch generates an AST that implements sw, where sw is a // type switch. func walkTypeSwitch(sw *ir.Node) { var s typeSwitch - s.facename = sw.Left.Right - sw.Left = nil + s.facename = sw.Left().Right() + sw.SetLeft(nil) - s.facename = walkexpr(s.facename, &sw.Ninit) - s.facename = copyexpr(s.facename, s.facename.Type, &sw.Nbody) + s.facename = walkexpr(s.facename, sw.PtrInit()) + s.facename = copyexpr(s.facename, s.facename.Type(), sw.PtrBody()) s.okname = temp(types.Types[types.TBOOL]) // Get interface descriptor word. @@ -518,54 +518,54 @@ func walkTypeSwitch(sw *ir.Node) { // h := e._type.hash // Use a similar strategy for non-empty interfaces. ifNil := ir.Nod(ir.OIF, nil, nil) - ifNil.Left = ir.Nod(ir.OEQ, itab, nodnil()) + ifNil.SetLeft(ir.Nod(ir.OEQ, itab, nodnil())) base.Pos = base.Pos.WithNotStmt() // disable statement marks after the first check. - ifNil.Left = typecheck(ifNil.Left, ctxExpr) - ifNil.Left = defaultlit(ifNil.Left, nil) + ifNil.SetLeft(typecheck(ifNil.Left(), ctxExpr)) + ifNil.SetLeft(defaultlit(ifNil.Left(), nil)) // ifNil.Nbody assigned at end. - sw.Nbody.Append(ifNil) + sw.PtrBody().Append(ifNil) // Load hash from type or itab. dotHash := nodSym(ir.ODOTPTR, itab, nil) - dotHash.Type = types.Types[types.TUINT32] + dotHash.SetType(types.Types[types.TUINT32]) dotHash.SetTypecheck(1) - if s.facename.Type.IsEmptyInterface() { - dotHash.Xoffset = int64(2 * Widthptr) // offset of hash in runtime._type + if s.facename.Type().IsEmptyInterface() { + dotHash.SetOffset(int64(2 * Widthptr)) // offset of hash in runtime._type } else { - dotHash.Xoffset = int64(2 * Widthptr) // offset of hash in runtime.itab + dotHash.SetOffset(int64(2 * Widthptr)) // offset of hash in runtime.itab } dotHash.SetBounded(true) // guaranteed not to fault - s.hashname = copyexpr(dotHash, dotHash.Type, &sw.Nbody) + s.hashname = copyexpr(dotHash, dotHash.Type(), sw.PtrBody()) br := ir.Nod(ir.OBREAK, nil, nil) var defaultGoto, nilGoto *ir.Node var body ir.Nodes - for _, ncase := range sw.List.Slice() { + for _, ncase := range sw.List().Slice() { var caseVar *ir.Node - if ncase.Rlist.Len() != 0 { - caseVar = ncase.Rlist.First() + if ncase.Rlist().Len() != 0 { + caseVar = ncase.Rlist().First() } // For single-type cases with an interface type, // we initialize the case variable as part of the type assertion. // In other cases, we initialize it in the body. var singleType *types.Type - if ncase.List.Len() == 1 && ncase.List.First().Op == ir.OTYPE { - singleType = ncase.List.First().Type + if ncase.List().Len() == 1 && ncase.List().First().Op() == ir.OTYPE { + singleType = ncase.List().First().Type() } caseVarInitialized := false label := autolabel(".s") - jmp := npos(ncase.Pos, nodSym(ir.OGOTO, nil, label)) + jmp := npos(ncase.Pos(), nodSym(ir.OGOTO, nil, label)) - if ncase.List.Len() == 0 { // default: + if ncase.List().Len() == 0 { // default: if defaultGoto != nil { base.Fatalf("duplicate default case not detected during typechecking") } defaultGoto = jmp } - for _, n1 := range ncase.List.Slice() { + for _, n1 := range ncase.List().Slice() { if ir.IsNil(n1) { // case nil: if nilGoto != nil { base.Fatalf("duplicate nil case not detected during typechecking") @@ -575,14 +575,14 @@ func walkTypeSwitch(sw *ir.Node) { } if singleType != nil && singleType.IsInterface() { - s.Add(ncase.Pos, n1.Type, caseVar, jmp) + s.Add(ncase.Pos(), n1.Type(), caseVar, jmp) caseVarInitialized = true } else { - s.Add(ncase.Pos, n1.Type, nil, jmp) + s.Add(ncase.Pos(), n1.Type(), nil, jmp) } } - body.Append(npos(ncase.Pos, nodSym(ir.OLABEL, nil, label))) + body.Append(npos(ncase.Pos(), nodSym(ir.OLABEL, nil, label))) if caseVar != nil && !caseVarInitialized { val := s.facename if singleType != nil { @@ -590,19 +590,19 @@ func walkTypeSwitch(sw *ir.Node) { if singleType.IsInterface() { base.Fatalf("singleType interface should have been handled in Add") } - val = ifaceData(ncase.Pos, s.facename, singleType) + val = ifaceData(ncase.Pos(), s.facename, singleType) } l := []*ir.Node{ - ir.NodAt(ncase.Pos, ir.ODCL, caseVar, nil), - ir.NodAt(ncase.Pos, ir.OAS, caseVar, val), + ir.NodAt(ncase.Pos(), ir.ODCL, caseVar, nil), + ir.NodAt(ncase.Pos(), ir.OAS, caseVar, val), } typecheckslice(l, ctxStmt) body.Append(l...) } - body.Append(ncase.Nbody.Slice()...) + body.Append(ncase.Body().Slice()...) body.Append(br) } - sw.List.Set(nil) + sw.PtrList().Set(nil) if defaultGoto == nil { defaultGoto = br @@ -610,13 +610,13 @@ func walkTypeSwitch(sw *ir.Node) { if nilGoto == nil { nilGoto = defaultGoto } - ifNil.Nbody.Set1(nilGoto) + ifNil.PtrBody().Set1(nilGoto) - s.Emit(&sw.Nbody) - sw.Nbody.Append(defaultGoto) - sw.Nbody.AppendNodes(&body) + s.Emit(sw.PtrBody()) + sw.PtrBody().Append(defaultGoto) + sw.PtrBody().AppendNodes(&body) - walkstmtlist(sw.Nbody.Slice()) + walkstmtlist(sw.Body().Slice()) } // A typeSwitch walks a type switch. @@ -650,18 +650,18 @@ func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp *ir.Node) { // cv, ok = iface.(type) as := ir.NodAt(pos, ir.OAS2, nil, nil) - as.List.Set2(caseVar, s.okname) // cv, ok = + as.PtrList().Set2(caseVar, s.okname) // cv, ok = dot := ir.NodAt(pos, ir.ODOTTYPE, s.facename, nil) - dot.Type = typ // iface.(type) - as.Rlist.Set1(dot) + dot.SetType(typ) // iface.(type) + as.PtrRlist().Set1(dot) as = typecheck(as, ctxStmt) as = walkexpr(as, &body) body.Append(as) // if ok { goto label } nif := ir.NodAt(pos, ir.OIF, nil, nil) - nif.Left = s.okname - nif.Nbody.Set1(jmp) + nif.SetLeft(s.okname) + nif.PtrBody().Set1(jmp) body.Append(nif) if !typ.IsInterface() { @@ -710,8 +710,8 @@ func (s *typeSwitch) flush() { // TODO(mdempsky): Omit hash equality check if // there's only one type. c := cc[i] - nif.Left = ir.Nod(ir.OEQ, s.hashname, nodintconst(int64(c.hash))) - nif.Nbody.AppendNodes(&c.body) + nif.SetLeft(ir.Nod(ir.OEQ, s.hashname, nodintconst(int64(c.hash)))) + nif.PtrBody().AppendNodes(&c.body) }, ) } @@ -736,22 +736,22 @@ func binarySearch(n int, out *ir.Nodes, less func(i int) *ir.Node, leaf func(i i nif := ir.Nod(ir.OIF, nil, nil) leaf(i, nif) base.Pos = base.Pos.WithNotStmt() - nif.Left = typecheck(nif.Left, ctxExpr) - nif.Left = defaultlit(nif.Left, nil) + nif.SetLeft(typecheck(nif.Left(), ctxExpr)) + nif.SetLeft(defaultlit(nif.Left(), nil)) out.Append(nif) - out = &nif.Rlist + out = nif.PtrRlist() } return } half := lo + n/2 nif := ir.Nod(ir.OIF, nil, nil) - nif.Left = less(half) + nif.SetLeft(less(half)) base.Pos = base.Pos.WithNotStmt() - nif.Left = typecheck(nif.Left, ctxExpr) - nif.Left = defaultlit(nif.Left, nil) - do(lo, half, &nif.Nbody) - do(half, hi, &nif.Rlist) + nif.SetLeft(typecheck(nif.Left(), ctxExpr)) + nif.SetLeft(defaultlit(nif.Left(), nil)) + do(lo, half, nif.PtrBody()) + do(half, hi, nif.PtrRlist()) out.Append(nif) } diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 318f315f16cbe..4bc7f035f5090 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -27,8 +27,8 @@ func tracePrint(title string, n *ir.Node) func(np **ir.Node) { var pos, op string var tc uint8 if n != nil { - pos = base.FmtPos(n.Pos) - op = n.Op.String() + pos = base.FmtPos(n.Pos()) + op = n.Op().String() tc = n.Typecheck() } @@ -50,10 +50,10 @@ func tracePrint(title string, n *ir.Node) func(np **ir.Node) { var tc uint8 var typ *types.Type if n != nil { - pos = base.FmtPos(n.Pos) - op = n.Op.String() + pos = base.FmtPos(n.Pos()) + op = n.Op().String() tc = n.Typecheck() - typ = n.Type + typ = n.Type() } skipDowidthForTracing = true @@ -81,7 +81,7 @@ var typecheckdefstack []*ir.Node // resolve ONONAME to definition, if any. func resolve(n *ir.Node) (res *ir.Node) { - if n == nil || n.Op != ir.ONONAME { + if n == nil || n.Op() != ir.ONONAME { return n } @@ -90,7 +90,7 @@ func resolve(n *ir.Node) (res *ir.Node) { defer tracePrint("resolve", n)(&res) } - if n.Sym.Pkg != ir.LocalPkg { + if n.Sym().Pkg != ir.LocalPkg { if inimport { base.Fatalf("recursive inimport") } @@ -100,12 +100,12 @@ func resolve(n *ir.Node) (res *ir.Node) { return n } - r := ir.AsNode(n.Sym.Def) + r := ir.AsNode(n.Sym().Def) if r == nil { return n } - if r.Op == ir.OIOTA { + if r.Op() == ir.OIOTA { if x := getIotaValue(); x >= 0 { return nodintconst(x) } @@ -181,7 +181,7 @@ func cycleFor(start *ir.Node) []*ir.Node { // collect all nodes with same Op var cycle []*ir.Node for _, n := range typecheck_tcstack[i:] { - if n.Op == start.Op { + if n.Op() == start.Op() { cycle = append(cycle, n) } } @@ -220,8 +220,8 @@ func typecheck(n *ir.Node, top int) (res *ir.Node) { lno := setlineno(n) // Skip over parens. - for n.Op == ir.OPAREN { - n = n.Left + for n.Op() == ir.OPAREN { + n = n.Left() } // Resolve definition of name and value of iota lazily. @@ -230,7 +230,7 @@ func typecheck(n *ir.Node, top int) (res *ir.Node) { // Skip typecheck if already done. // But re-typecheck ONAME/OTYPE/OLITERAL/OPACK node in case context has changed. if n.Typecheck() == 1 { - switch n.Op { + switch n.Op() { case ir.ONAME, ir.OTYPE, ir.OLITERAL, ir.OPACK: break @@ -243,7 +243,7 @@ func typecheck(n *ir.Node, top int) (res *ir.Node) { if n.Typecheck() == 2 { // Typechecking loop. Trying printing a meaningful message, // otherwise a stack trace of typechecking. - switch n.Op { + switch n.Op() { // We can already diagnose variables used as types. case ir.ONAME: if top&(ctxExpr|ctxType) == ctxType { @@ -259,20 +259,20 @@ func typecheck(n *ir.Node, top int) (res *ir.Node) { // are substituted. cycle := cycleFor(n) for _, n1 := range cycle { - if n1.Name != nil && !n1.Name.Param.Alias() { + if n1.Name() != nil && !n1.Name().Param.Alias() { // Cycle is ok. But if n is an alias type and doesn't // have a type yet, we have a recursive type declaration // with aliases that we can't handle properly yet. // Report an error rather than crashing later. - if n.Name != nil && n.Name.Param.Alias() && n.Type == nil { - base.Pos = n.Pos + if n.Name() != nil && n.Name().Param.Alias() && n.Type() == nil { + base.Pos = n.Pos() base.Fatalf("cannot handle alias type declaration (issue #25838): %v", n) } base.Pos = lno return n } } - base.ErrorfAt(n.Pos, "invalid recursive type alias %v%s", n, cycleTrace(cycle)) + base.ErrorfAt(n.Pos(), "invalid recursive type alias %v%s", n, cycleTrace(cycle)) } case ir.OLITERAL: @@ -280,7 +280,7 @@ func typecheck(n *ir.Node, top int) (res *ir.Node) { base.Errorf("%v is not a type", n) break } - base.ErrorfAt(n.Pos, "constant definition loop%s", cycleTrace(cycleFor(n))) + base.ErrorfAt(n.Pos(), "constant definition loop%s", cycleTrace(cycleFor(n))) } if base.Errors() == 0 { @@ -318,7 +318,7 @@ func typecheck(n *ir.Node, top int) (res *ir.Node) { // The result of indexlit MUST be assigned back to n, e.g. // n.Left = indexlit(n.Left) func indexlit(n *ir.Node) *ir.Node { - if n != nil && n.Type != nil && n.Type.Etype == types.TIDEAL { + if n != nil && n.Type() != nil && n.Type().Etype == types.TIDEAL { return defaultlit(n, types.Types[types.TINT]) } return n @@ -331,38 +331,38 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { defer tracePrint("typecheck1", n)(&res) } - switch n.Op { + switch n.Op() { case ir.OLITERAL, ir.ONAME, ir.ONONAME, ir.OTYPE: - if n.Sym == nil { + if n.Sym() == nil { break } - if n.Op == ir.ONAME && n.SubOp() != 0 && top&ctxCallee == 0 { - base.Errorf("use of builtin %v not in function call", n.Sym) - n.Type = nil + if n.Op() == ir.ONAME && n.SubOp() != 0 && top&ctxCallee == 0 { + base.Errorf("use of builtin %v not in function call", n.Sym()) + n.SetType(nil) return n } typecheckdef(n) - if n.Op == ir.ONONAME { - n.Type = nil + if n.Op() == ir.ONONAME { + n.SetType(nil) return n } } ok := 0 - switch n.Op { + switch n.Op() { // until typecheck is complete, do nothing. default: ir.Dump("typecheck", n) - base.Fatalf("typecheck %v", n.Op) + base.Fatalf("typecheck %v", n.Op()) // names case ir.OLITERAL: ok |= ctxExpr - if n.Type == nil && n.Val().Kind() == constant.String { + if n.Type() == nil && n.Val().Kind() == constant.String { base.Fatalf("string literal missing type") } @@ -370,8 +370,8 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { ok |= ctxExpr case ir.ONAME: - if n.Name.Decldepth == 0 { - n.Name.Decldepth = decldepth + if n.Name().Decldepth == 0 { + n.Name().Decldepth = decldepth } if n.SubOp() != 0 { ok |= ctxCallee @@ -382,18 +382,18 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { // not a write to the variable if ir.IsBlank(n) { base.Errorf("cannot use _ as value") - n.Type = nil + n.SetType(nil) return n } - n.Name.SetUsed(true) + n.Name().SetUsed(true) } ok |= ctxExpr case ir.OPACK: - base.Errorf("use of package %v without selector", n.Sym) - n.Type = nil + base.Errorf("use of package %v without selector", n.Sym()) + n.SetType(nil) return n case ir.ODDD: @@ -403,142 +403,142 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { case ir.OTYPE: ok |= ctxType - if n.Type == nil { + if n.Type() == nil { return n } case ir.OTARRAY: ok |= ctxType - r := typecheck(n.Right, ctxType) - if r.Type == nil { - n.Type = nil + r := typecheck(n.Right(), ctxType) + if r.Type() == nil { + n.SetType(nil) return n } var t *types.Type - if n.Left == nil { - t = types.NewSlice(r.Type) - } else if n.Left.Op == ir.ODDD { + if n.Left() == nil { + t = types.NewSlice(r.Type()) + } else if n.Left().Op() == ir.ODDD { if !n.Diag() { n.SetDiag(true) base.Errorf("use of [...] array outside of array literal") } - n.Type = nil + n.SetType(nil) return n } else { - n.Left = indexlit(typecheck(n.Left, ctxExpr)) - l := n.Left + n.SetLeft(indexlit(typecheck(n.Left(), ctxExpr))) + l := n.Left() if ir.ConstType(l) != constant.Int { switch { - case l.Type == nil: + case l.Type() == nil: // Error already reported elsewhere. - case l.Type.IsInteger() && l.Op != ir.OLITERAL: + case l.Type().IsInteger() && l.Op() != ir.OLITERAL: base.Errorf("non-constant array bound %v", l) default: base.Errorf("invalid array bound %v", l) } - n.Type = nil + n.SetType(nil) return n } v := l.Val() if doesoverflow(v, types.Types[types.TINT]) { base.Errorf("array bound is too large") - n.Type = nil + n.SetType(nil) return n } if constant.Sign(v) < 0 { base.Errorf("array bound must be non-negative") - n.Type = nil + n.SetType(nil) return n } bound, _ := constant.Int64Val(v) - t = types.NewArray(r.Type, bound) + t = types.NewArray(r.Type(), bound) } setTypeNode(n, t) - n.Left = nil - n.Right = nil + n.SetLeft(nil) + n.SetRight(nil) checkwidth(t) case ir.OTMAP: ok |= ctxType - n.Left = typecheck(n.Left, ctxType) - n.Right = typecheck(n.Right, ctxType) - l := n.Left - r := n.Right - if l.Type == nil || r.Type == nil { - n.Type = nil + n.SetLeft(typecheck(n.Left(), ctxType)) + n.SetRight(typecheck(n.Right(), ctxType)) + l := n.Left() + r := n.Right() + if l.Type() == nil || r.Type() == nil { + n.SetType(nil) return n } - if l.Type.NotInHeap() { + if l.Type().NotInHeap() { base.Errorf("incomplete (or unallocatable) map key not allowed") } - if r.Type.NotInHeap() { + if r.Type().NotInHeap() { base.Errorf("incomplete (or unallocatable) map value not allowed") } - setTypeNode(n, types.NewMap(l.Type, r.Type)) + setTypeNode(n, types.NewMap(l.Type(), r.Type())) mapqueue = append(mapqueue, n) // check map keys when all types are settled - n.Left = nil - n.Right = nil + n.SetLeft(nil) + n.SetRight(nil) case ir.OTCHAN: ok |= ctxType - n.Left = typecheck(n.Left, ctxType) - l := n.Left - if l.Type == nil { - n.Type = nil + n.SetLeft(typecheck(n.Left(), ctxType)) + l := n.Left() + if l.Type() == nil { + n.SetType(nil) return n } - if l.Type.NotInHeap() { + if l.Type().NotInHeap() { base.Errorf("chan of incomplete (or unallocatable) type not allowed") } - setTypeNode(n, types.NewChan(l.Type, n.TChanDir())) - n.Left = nil + setTypeNode(n, types.NewChan(l.Type(), n.TChanDir())) + n.SetLeft(nil) n.ResetAux() case ir.OTSTRUCT: ok |= ctxType - setTypeNode(n, tostruct(n.List.Slice())) - n.List.Set(nil) + setTypeNode(n, tostruct(n.List().Slice())) + n.PtrList().Set(nil) case ir.OTINTER: ok |= ctxType - setTypeNode(n, tointerface(n.List.Slice())) + setTypeNode(n, tointerface(n.List().Slice())) case ir.OTFUNC: ok |= ctxType - setTypeNode(n, functype(n.Left, n.List.Slice(), n.Rlist.Slice())) - n.Left = nil - n.List.Set(nil) - n.Rlist.Set(nil) + setTypeNode(n, functype(n.Left(), n.List().Slice(), n.Rlist().Slice())) + n.SetLeft(nil) + n.PtrList().Set(nil) + n.PtrRlist().Set(nil) // type or expr case ir.ODEREF: - n.Left = typecheck(n.Left, ctxExpr|ctxType) - l := n.Left - t := l.Type + n.SetLeft(typecheck(n.Left(), ctxExpr|ctxType)) + l := n.Left() + t := l.Type() if t == nil { - n.Type = nil + n.SetType(nil) return n } - if l.Op == ir.OTYPE { + if l.Op() == ir.OTYPE { ok |= ctxType - setTypeNode(n, types.NewPtr(l.Type)) - n.Left = nil + setTypeNode(n, types.NewPtr(l.Type())) + n.SetLeft(nil) // Ensure l.Type gets dowidth'd for the backend. Issue 20174. - checkwidth(l.Type) + checkwidth(l.Type()) break } if !t.IsPtr() { if top&(ctxExpr|ctxStmt) != 0 { - base.Errorf("invalid indirect of %L", n.Left) - n.Type = nil + base.Errorf("invalid indirect of %L", n.Left()) + n.SetType(nil) return n } @@ -546,7 +546,7 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { } ok |= ctxExpr - n.Type = t.Elem() + n.SetType(t.Elem()) // arithmetic exprs case ir.OASOP, @@ -572,62 +572,62 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { var l *ir.Node var op ir.Op var r *ir.Node - if n.Op == ir.OASOP { + if n.Op() == ir.OASOP { ok |= ctxStmt - n.Left = typecheck(n.Left, ctxExpr) - n.Right = typecheck(n.Right, ctxExpr) - l = n.Left - r = n.Right - checkassign(n, n.Left) - if l.Type == nil || r.Type == nil { - n.Type = nil + n.SetLeft(typecheck(n.Left(), ctxExpr)) + n.SetRight(typecheck(n.Right(), ctxExpr)) + l = n.Left() + r = n.Right() + checkassign(n, n.Left()) + if l.Type() == nil || r.Type() == nil { + n.SetType(nil) return n } - if n.Implicit() && !okforarith[l.Type.Etype] { - base.Errorf("invalid operation: %v (non-numeric type %v)", n, l.Type) - n.Type = nil + if n.Implicit() && !okforarith[l.Type().Etype] { + base.Errorf("invalid operation: %v (non-numeric type %v)", n, l.Type()) + n.SetType(nil) return n } // TODO(marvin): Fix Node.EType type union. op = n.SubOp() } else { ok |= ctxExpr - n.Left = typecheck(n.Left, ctxExpr) - n.Right = typecheck(n.Right, ctxExpr) - l = n.Left - r = n.Right - if l.Type == nil || r.Type == nil { - n.Type = nil + n.SetLeft(typecheck(n.Left(), ctxExpr)) + n.SetRight(typecheck(n.Right(), ctxExpr)) + l = n.Left() + r = n.Right() + if l.Type() == nil || r.Type() == nil { + n.SetType(nil) return n } - op = n.Op + op = n.Op() } if op == ir.OLSH || op == ir.ORSH { r = defaultlit(r, types.Types[types.TUINT]) - n.Right = r - t := r.Type + n.SetRight(r) + t := r.Type() if !t.IsInteger() { - base.Errorf("invalid operation: %v (shift count type %v, must be integer)", n, r.Type) - n.Type = nil + base.Errorf("invalid operation: %v (shift count type %v, must be integer)", n, r.Type()) + n.SetType(nil) return n } if t.IsSigned() && !langSupported(1, 13, curpkg()) { - base.ErrorfVers("go1.13", "invalid operation: %v (signed shift count type %v)", n, r.Type) - n.Type = nil + base.ErrorfVers("go1.13", "invalid operation: %v (signed shift count type %v)", n, r.Type()) + n.SetType(nil) return n } - t = l.Type + t = l.Type() if t != nil && t.Etype != types.TIDEAL && !t.IsInteger() { base.Errorf("invalid operation: %v (shift of type %v)", n, t) - n.Type = nil + n.SetType(nil) return n } // no defaultlit for left // the outer context gives the type - n.Type = l.Type - if (l.Type == types.UntypedFloat || l.Type == types.UntypedComplex) && r.Op == ir.OLITERAL { - n.Type = types.UntypedInt + n.SetType(l.Type()) + if (l.Type() == types.UntypedFloat || l.Type() == types.UntypedComplex) && r.Op() == ir.OLITERAL { + n.SetType(types.UntypedInt) } break @@ -636,15 +636,15 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { // For "x == x && len(s)", it's better to report that "len(s)" (type int) // can't be used with "&&" than to report that "x == x" (type untyped bool) // can't be converted to int (see issue #41500). - if n.Op == ir.OANDAND || n.Op == ir.OOROR { - if !n.Left.Type.IsBoolean() { - base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op, typekind(n.Left.Type)) - n.Type = nil + if n.Op() == ir.OANDAND || n.Op() == ir.OOROR { + if !n.Left().Type().IsBoolean() { + base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.Left().Type())) + n.SetType(nil) return n } - if !n.Right.Type.IsBoolean() { - base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op, typekind(n.Right.Type)) - n.Type = nil + if !n.Right().Type().IsBoolean() { + base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.Right().Type())) + n.SetType(nil) return n } } @@ -652,22 +652,22 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { // ideal mixed with non-ideal l, r = defaultlit2(l, r, false) - n.Left = l - n.Right = r - if l.Type == nil || r.Type == nil { - n.Type = nil + n.SetLeft(l) + n.SetRight(r) + if l.Type() == nil || r.Type() == nil { + n.SetType(nil) return n } - t := l.Type + t := l.Type() if t.Etype == types.TIDEAL { - t = r.Type + t = r.Type() } et := t.Etype if et == types.TIDEAL { et = types.TINT } aop := ir.OXXX - if iscmp[n.Op] && t.Etype != types.TIDEAL && !types.Identical(l.Type, r.Type) { + if iscmp[n.Op()] && t.Etype != types.TIDEAL && !types.Identical(l.Type(), r.Type()) { // comparison is okay as long as one side is // assignable to the other. convert so they have // the same type. @@ -676,235 +676,235 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { // in that case, check comparability of the concrete type. // The conversion allocates, so only do it if the concrete type is huge. converted := false - if r.Type.Etype != types.TBLANK { - aop, _ = assignop(l.Type, r.Type) + if r.Type().Etype != types.TBLANK { + aop, _ = assignop(l.Type(), r.Type()) if aop != ir.OXXX { - if r.Type.IsInterface() && !l.Type.IsInterface() && !IsComparable(l.Type) { - base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(l.Type)) - n.Type = nil + if r.Type().IsInterface() && !l.Type().IsInterface() && !IsComparable(l.Type()) { + base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(l.Type())) + n.SetType(nil) return n } - dowidth(l.Type) - if r.Type.IsInterface() == l.Type.IsInterface() || l.Type.Width >= 1<<16 { + dowidth(l.Type()) + if r.Type().IsInterface() == l.Type().IsInterface() || l.Type().Width >= 1<<16 { l = ir.Nod(aop, l, nil) - l.Type = r.Type + l.SetType(r.Type()) l.SetTypecheck(1) - n.Left = l + n.SetLeft(l) } - t = r.Type + t = r.Type() converted = true } } - if !converted && l.Type.Etype != types.TBLANK { - aop, _ = assignop(r.Type, l.Type) + if !converted && l.Type().Etype != types.TBLANK { + aop, _ = assignop(r.Type(), l.Type()) if aop != ir.OXXX { - if l.Type.IsInterface() && !r.Type.IsInterface() && !IsComparable(r.Type) { - base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(r.Type)) - n.Type = nil + if l.Type().IsInterface() && !r.Type().IsInterface() && !IsComparable(r.Type()) { + base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(r.Type())) + n.SetType(nil) return n } - dowidth(r.Type) - if r.Type.IsInterface() == l.Type.IsInterface() || r.Type.Width >= 1<<16 { + dowidth(r.Type()) + if r.Type().IsInterface() == l.Type().IsInterface() || r.Type().Width >= 1<<16 { r = ir.Nod(aop, r, nil) - r.Type = l.Type + r.SetType(l.Type()) r.SetTypecheck(1) - n.Right = r + n.SetRight(r) } - t = l.Type + t = l.Type() } } et = t.Etype } - if t.Etype != types.TIDEAL && !types.Identical(l.Type, r.Type) { + if t.Etype != types.TIDEAL && !types.Identical(l.Type(), r.Type()) { l, r = defaultlit2(l, r, true) - if l.Type == nil || r.Type == nil { - n.Type = nil + if l.Type() == nil || r.Type() == nil { + n.SetType(nil) return n } - if l.Type.IsInterface() == r.Type.IsInterface() || aop == 0 { - base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type, r.Type) - n.Type = nil + if l.Type().IsInterface() == r.Type().IsInterface() || aop == 0 { + base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type()) + n.SetType(nil) return n } } if t.Etype == types.TIDEAL { - t = mixUntyped(l.Type, r.Type) + t = mixUntyped(l.Type(), r.Type()) } if dt := defaultType(t); !okfor[op][dt.Etype] { base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(t)) - n.Type = nil + n.SetType(nil) return n } // okfor allows any array == array, map == map, func == func. // restrict to slice/map/func == nil and nil == slice/map/func. - if l.Type.IsArray() && !IsComparable(l.Type) { - base.Errorf("invalid operation: %v (%v cannot be compared)", n, l.Type) - n.Type = nil + if l.Type().IsArray() && !IsComparable(l.Type()) { + base.Errorf("invalid operation: %v (%v cannot be compared)", n, l.Type()) + n.SetType(nil) return n } - if l.Type.IsSlice() && !ir.IsNil(l) && !ir.IsNil(r) { + if l.Type().IsSlice() && !ir.IsNil(l) && !ir.IsNil(r) { base.Errorf("invalid operation: %v (slice can only be compared to nil)", n) - n.Type = nil + n.SetType(nil) return n } - if l.Type.IsMap() && !ir.IsNil(l) && !ir.IsNil(r) { + if l.Type().IsMap() && !ir.IsNil(l) && !ir.IsNil(r) { base.Errorf("invalid operation: %v (map can only be compared to nil)", n) - n.Type = nil + n.SetType(nil) return n } - if l.Type.Etype == types.TFUNC && !ir.IsNil(l) && !ir.IsNil(r) { + if l.Type().Etype == types.TFUNC && !ir.IsNil(l) && !ir.IsNil(r) { base.Errorf("invalid operation: %v (func can only be compared to nil)", n) - n.Type = nil + n.SetType(nil) return n } - if l.Type.IsStruct() { - if f := IncomparableField(l.Type); f != nil { + if l.Type().IsStruct() { + if f := IncomparableField(l.Type()); f != nil { base.Errorf("invalid operation: %v (struct containing %v cannot be compared)", n, f.Type) - n.Type = nil + n.SetType(nil) return n } } - if iscmp[n.Op] { + if iscmp[n.Op()] { t = types.UntypedBool - n.Type = t + n.SetType(t) n = evalConst(n) - if n.Op != ir.OLITERAL { + if n.Op() != ir.OLITERAL { l, r = defaultlit2(l, r, true) - n.Left = l - n.Right = r + n.SetLeft(l) + n.SetRight(r) } } - if et == types.TSTRING && n.Op == ir.OADD { + if et == types.TSTRING && n.Op() == ir.OADD { // create or update OADDSTR node with list of strings in x + y + z + (w + v) + ... - if l.Op == ir.OADDSTR { + if l.Op() == ir.OADDSTR { orig := n n = l - n.Pos = orig.Pos + n.SetPos(orig.Pos()) } else { - n = ir.NodAt(n.Pos, ir.OADDSTR, nil, nil) - n.List.Set1(l) + n = ir.NodAt(n.Pos(), ir.OADDSTR, nil, nil) + n.PtrList().Set1(l) } - if r.Op == ir.OADDSTR { - n.List.AppendNodes(&r.List) + if r.Op() == ir.OADDSTR { + n.PtrList().AppendNodes(r.PtrList()) } else { - n.List.Append(r) + n.PtrList().Append(r) } } if (op == ir.ODIV || op == ir.OMOD) && ir.IsConst(r, constant.Int) { if constant.Sign(r.Val()) == 0 { base.Errorf("division by zero") - n.Type = nil + n.SetType(nil) return n } } - n.Type = t + n.SetType(t) case ir.OBITNOT, ir.ONEG, ir.ONOT, ir.OPLUS: ok |= ctxExpr - n.Left = typecheck(n.Left, ctxExpr) - l := n.Left - t := l.Type + n.SetLeft(typecheck(n.Left(), ctxExpr)) + l := n.Left() + t := l.Type() if t == nil { - n.Type = nil + n.SetType(nil) return n } - if !okfor[n.Op][defaultType(t).Etype] { - base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op, typekind(t)) - n.Type = nil + if !okfor[n.Op()][defaultType(t).Etype] { + base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(t)) + n.SetType(nil) return n } - n.Type = t + n.SetType(t) // exprs case ir.OADDR: ok |= ctxExpr - n.Left = typecheck(n.Left, ctxExpr) - if n.Left.Type == nil { - n.Type = nil + n.SetLeft(typecheck(n.Left(), ctxExpr)) + if n.Left().Type() == nil { + n.SetType(nil) return n } - switch n.Left.Op { + switch n.Left().Op() { case ir.OARRAYLIT, ir.OMAPLIT, ir.OSLICELIT, ir.OSTRUCTLIT: - n.Op = ir.OPTRLIT + n.SetOp(ir.OPTRLIT) default: - checklvalue(n.Left, "take the address of") - r := outervalue(n.Left) - if r.Op == ir.ONAME { - if r.Orig != r { + checklvalue(n.Left(), "take the address of") + r := outervalue(n.Left()) + if r.Op() == ir.ONAME { + if r.Orig() != r { base.Fatalf("found non-orig name node %v", r) // TODO(mdempsky): What does this mean? } - r.Name.SetAddrtaken(true) - if r.Name.IsClosureVar() && !capturevarscomplete { + r.Name().SetAddrtaken(true) + if r.Name().IsClosureVar() && !capturevarscomplete { // Mark the original variable as Addrtaken so that capturevars // knows not to pass it by value. // But if the capturevars phase is complete, don't touch it, // in case l.Name's containing function has not yet been compiled. - r.Name.Defn.Name.SetAddrtaken(true) + r.Name().Defn.Name().SetAddrtaken(true) } } - n.Left = defaultlit(n.Left, nil) - if n.Left.Type == nil { - n.Type = nil + n.SetLeft(defaultlit(n.Left(), nil)) + if n.Left().Type() == nil { + n.SetType(nil) return n } } - n.Type = types.NewPtr(n.Left.Type) + n.SetType(types.NewPtr(n.Left().Type())) case ir.OCOMPLIT: ok |= ctxExpr n = typecheckcomplit(n) - if n.Type == nil { + if n.Type() == nil { return n } case ir.OXDOT, ir.ODOT: - if n.Op == ir.OXDOT { + if n.Op() == ir.OXDOT { n = adddot(n) - n.Op = ir.ODOT - if n.Left == nil { - n.Type = nil + n.SetOp(ir.ODOT) + if n.Left() == nil { + n.SetType(nil) return n } } - n.Left = typecheck(n.Left, ctxExpr|ctxType) + n.SetLeft(typecheck(n.Left(), ctxExpr|ctxType)) - n.Left = defaultlit(n.Left, nil) + n.SetLeft(defaultlit(n.Left(), nil)) - t := n.Left.Type + t := n.Left().Type() if t == nil { - base.UpdateErrorDot(ir.Line(n), n.Left.String(), n.String()) - n.Type = nil + base.UpdateErrorDot(ir.Line(n), n.Left().String(), n.String()) + n.SetType(nil) return n } - s := n.Sym + s := n.Sym() - if n.Left.Op == ir.OTYPE { + if n.Left().Op() == ir.OTYPE { n = typecheckMethodExpr(n) - if n.Type == nil { + if n.Type() == nil { return n } ok = ctxExpr @@ -914,16 +914,16 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { if t.IsPtr() && !t.Elem().IsInterface() { t = t.Elem() if t == nil { - n.Type = nil + n.SetType(nil) return n } - n.Op = ir.ODOTPTR + n.SetOp(ir.ODOTPTR) checkwidth(t) } - if n.Sym.IsBlank() { + if n.Sym().IsBlank() { base.Errorf("cannot refer to blank field or method") - n.Type = nil + n.SetType(nil) return n } @@ -931,28 +931,28 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { // Legitimate field or method lookup failed, try to explain the error switch { case t.IsEmptyInterface(): - base.Errorf("%v undefined (type %v is interface with no methods)", n, n.Left.Type) + base.Errorf("%v undefined (type %v is interface with no methods)", n, n.Left().Type()) case t.IsPtr() && t.Elem().IsInterface(): // Pointer to interface is almost always a mistake. - base.Errorf("%v undefined (type %v is pointer to interface, not interface)", n, n.Left.Type) + base.Errorf("%v undefined (type %v is pointer to interface, not interface)", n, n.Left().Type()) case lookdot(n, t, 1) != nil: // Field or method matches by name, but it is not exported. - base.Errorf("%v undefined (cannot refer to unexported field or method %v)", n, n.Sym) + base.Errorf("%v undefined (cannot refer to unexported field or method %v)", n, n.Sym()) default: if mt := lookdot(n, t, 2); mt != nil && visible(mt.Sym) { // Case-insensitive lookup. - base.Errorf("%v undefined (type %v has no field or method %v, but does have %v)", n, n.Left.Type, n.Sym, mt.Sym) + base.Errorf("%v undefined (type %v has no field or method %v, but does have %v)", n, n.Left().Type(), n.Sym(), mt.Sym) } else { - base.Errorf("%v undefined (type %v has no field or method %v)", n, n.Left.Type, n.Sym) + base.Errorf("%v undefined (type %v has no field or method %v)", n, n.Left().Type(), n.Sym()) } } - n.Type = nil + n.SetType(nil) return n } - switch n.Op { + switch n.Op() { case ir.ODOTINTER, ir.ODOTMETH: if top&ctxCallee != 0 { ok |= ctxCallee @@ -967,74 +967,74 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { case ir.ODOTTYPE: ok |= ctxExpr - n.Left = typecheck(n.Left, ctxExpr) - n.Left = defaultlit(n.Left, nil) - l := n.Left - t := l.Type + n.SetLeft(typecheck(n.Left(), ctxExpr)) + n.SetLeft(defaultlit(n.Left(), nil)) + l := n.Left() + t := l.Type() if t == nil { - n.Type = nil + n.SetType(nil) return n } if !t.IsInterface() { base.Errorf("invalid type assertion: %v (non-interface type %v on left)", n, t) - n.Type = nil + n.SetType(nil) return n } - if n.Right != nil { - n.Right = typecheck(n.Right, ctxType) - n.Type = n.Right.Type - n.Right = nil - if n.Type == nil { + if n.Right() != nil { + n.SetRight(typecheck(n.Right(), ctxType)) + n.SetType(n.Right().Type()) + n.SetRight(nil) + if n.Type() == nil { return n } } - if n.Type != nil && !n.Type.IsInterface() { + if n.Type() != nil && !n.Type().IsInterface() { var missing, have *types.Field var ptr int - if !implements(n.Type, t, &missing, &have, &ptr) { + if !implements(n.Type(), t, &missing, &have, &ptr) { if have != nil && have.Sym == missing.Sym { base.Errorf("impossible type assertion:\n\t%v does not implement %v (wrong type for %v method)\n"+ - "\t\thave %v%0S\n\t\twant %v%0S", n.Type, t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) + "\t\thave %v%0S\n\t\twant %v%0S", n.Type(), t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) } else if ptr != 0 { - base.Errorf("impossible type assertion:\n\t%v does not implement %v (%v method has pointer receiver)", n.Type, t, missing.Sym) + base.Errorf("impossible type assertion:\n\t%v does not implement %v (%v method has pointer receiver)", n.Type(), t, missing.Sym) } else if have != nil { base.Errorf("impossible type assertion:\n\t%v does not implement %v (missing %v method)\n"+ - "\t\thave %v%0S\n\t\twant %v%0S", n.Type, t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) + "\t\thave %v%0S\n\t\twant %v%0S", n.Type(), t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) } else { - base.Errorf("impossible type assertion:\n\t%v does not implement %v (missing %v method)", n.Type, t, missing.Sym) + base.Errorf("impossible type assertion:\n\t%v does not implement %v (missing %v method)", n.Type(), t, missing.Sym) } - n.Type = nil + n.SetType(nil) return n } } case ir.OINDEX: ok |= ctxExpr - n.Left = typecheck(n.Left, ctxExpr) - n.Left = defaultlit(n.Left, nil) - n.Left = implicitstar(n.Left) - l := n.Left - n.Right = typecheck(n.Right, ctxExpr) - r := n.Right - t := l.Type - if t == nil || r.Type == nil { - n.Type = nil + n.SetLeft(typecheck(n.Left(), ctxExpr)) + n.SetLeft(defaultlit(n.Left(), nil)) + n.SetLeft(implicitstar(n.Left())) + l := n.Left() + n.SetRight(typecheck(n.Right(), ctxExpr)) + r := n.Right() + t := l.Type() + if t == nil || r.Type() == nil { + n.SetType(nil) return n } switch t.Etype { default: base.Errorf("invalid operation: %v (type %v does not support indexing)", n, t) - n.Type = nil + n.SetType(nil) return n case types.TSTRING, types.TARRAY, types.TSLICE: - n.Right = indexlit(n.Right) + n.SetRight(indexlit(n.Right())) if t.IsString() { - n.Type = types.Bytetype + n.SetType(types.Bytetype) } else { - n.Type = t.Elem() + n.SetType(t.Elem()) } why := "string" if t.IsArray() { @@ -1043,83 +1043,83 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { why = "slice" } - if n.Right.Type != nil && !n.Right.Type.IsInteger() { - base.Errorf("non-integer %s index %v", why, n.Right) + if n.Right().Type() != nil && !n.Right().Type().IsInteger() { + base.Errorf("non-integer %s index %v", why, n.Right()) break } - if !n.Bounded() && ir.IsConst(n.Right, constant.Int) { - x := n.Right.Val() + if !n.Bounded() && ir.IsConst(n.Right(), constant.Int) { + x := n.Right().Val() if constant.Sign(x) < 0 { - base.Errorf("invalid %s index %v (index must be non-negative)", why, n.Right) + base.Errorf("invalid %s index %v (index must be non-negative)", why, n.Right()) } else if t.IsArray() && constant.Compare(x, token.GEQ, constant.MakeInt64(t.NumElem())) { - base.Errorf("invalid array index %v (out of bounds for %d-element array)", n.Right, t.NumElem()) - } else if ir.IsConst(n.Left, constant.String) && constant.Compare(x, token.GEQ, constant.MakeInt64(int64(len(n.Left.StringVal())))) { - base.Errorf("invalid string index %v (out of bounds for %d-byte string)", n.Right, len(n.Left.StringVal())) + base.Errorf("invalid array index %v (out of bounds for %d-element array)", n.Right(), t.NumElem()) + } else if ir.IsConst(n.Left(), constant.String) && constant.Compare(x, token.GEQ, constant.MakeInt64(int64(len(n.Left().StringVal())))) { + base.Errorf("invalid string index %v (out of bounds for %d-byte string)", n.Right(), len(n.Left().StringVal())) } else if doesoverflow(x, types.Types[types.TINT]) { - base.Errorf("invalid %s index %v (index too large)", why, n.Right) + base.Errorf("invalid %s index %v (index too large)", why, n.Right()) } } case types.TMAP: - n.Right = assignconv(n.Right, t.Key(), "map index") - n.Type = t.Elem() - n.Op = ir.OINDEXMAP + n.SetRight(assignconv(n.Right(), t.Key(), "map index")) + n.SetType(t.Elem()) + n.SetOp(ir.OINDEXMAP) n.ResetAux() } case ir.ORECV: ok |= ctxStmt | ctxExpr - n.Left = typecheck(n.Left, ctxExpr) - n.Left = defaultlit(n.Left, nil) - l := n.Left - t := l.Type + n.SetLeft(typecheck(n.Left(), ctxExpr)) + n.SetLeft(defaultlit(n.Left(), nil)) + l := n.Left() + t := l.Type() if t == nil { - n.Type = nil + n.SetType(nil) return n } if !t.IsChan() { base.Errorf("invalid operation: %v (receive from non-chan type %v)", n, t) - n.Type = nil + n.SetType(nil) return n } if !t.ChanDir().CanRecv() { base.Errorf("invalid operation: %v (receive from send-only type %v)", n, t) - n.Type = nil + n.SetType(nil) return n } - n.Type = t.Elem() + n.SetType(t.Elem()) case ir.OSEND: ok |= ctxStmt - n.Left = typecheck(n.Left, ctxExpr) - n.Right = typecheck(n.Right, ctxExpr) - n.Left = defaultlit(n.Left, nil) - t := n.Left.Type + n.SetLeft(typecheck(n.Left(), ctxExpr)) + n.SetRight(typecheck(n.Right(), ctxExpr)) + n.SetLeft(defaultlit(n.Left(), nil)) + t := n.Left().Type() if t == nil { - n.Type = nil + n.SetType(nil) return n } if !t.IsChan() { base.Errorf("invalid operation: %v (send to non-chan type %v)", n, t) - n.Type = nil + n.SetType(nil) return n } if !t.ChanDir().CanSend() { base.Errorf("invalid operation: %v (send to receive-only type %v)", n, t) - n.Type = nil + n.SetType(nil) return n } - n.Right = assignconv(n.Right, t.Elem(), "send") - if n.Right.Type == nil { - n.Type = nil + n.SetRight(assignconv(n.Right(), t.Elem(), "send")) + if n.Right().Type() == nil { + n.SetType(nil) return n } - n.Type = nil + n.SetType(nil) case ir.OSLICEHEADER: // Errors here are Fatalf instead of Errorf because only the compiler @@ -1128,26 +1128,26 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { // have already been typechecked in e.g. OMAKESLICE earlier. ok |= ctxExpr - t := n.Type + t := n.Type() if t == nil { base.Fatalf("no type specified for OSLICEHEADER") } if !t.IsSlice() { - base.Fatalf("invalid type %v for OSLICEHEADER", n.Type) + base.Fatalf("invalid type %v for OSLICEHEADER", n.Type()) } - if n.Left == nil || n.Left.Type == nil || !n.Left.Type.IsUnsafePtr() { + if n.Left() == nil || n.Left().Type() == nil || !n.Left().Type().IsUnsafePtr() { base.Fatalf("need unsafe.Pointer for OSLICEHEADER") } - if x := n.List.Len(); x != 2 { + if x := n.List().Len(); x != 2 { base.Fatalf("expected 2 params (len, cap) for OSLICEHEADER, got %d", x) } - n.Left = typecheck(n.Left, ctxExpr) - l := typecheck(n.List.First(), ctxExpr) - c := typecheck(n.List.Second(), ctxExpr) + n.SetLeft(typecheck(n.Left(), ctxExpr)) + l := typecheck(n.List().First(), ctxExpr) + c := typecheck(n.List().Second(), ctxExpr) l = defaultlit(l, types.Types[types.TINT]) c = defaultlit(c, types.Types[types.TINT]) @@ -1163,8 +1163,8 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { base.Fatalf("len larger than cap for OSLICEHEADER") } - n.List.SetFirst(l) - n.List.SetSecond(c) + n.List().SetFirst(l) + n.List().SetSecond(c) case ir.OMAKESLICECOPY: // Errors here are Fatalf instead of Errorf because only the compiler @@ -1173,145 +1173,145 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { // have already been typechecked in OMAKE and OCOPY earlier. ok |= ctxExpr - t := n.Type + t := n.Type() if t == nil { base.Fatalf("no type specified for OMAKESLICECOPY") } if !t.IsSlice() { - base.Fatalf("invalid type %v for OMAKESLICECOPY", n.Type) + base.Fatalf("invalid type %v for OMAKESLICECOPY", n.Type()) } - if n.Left == nil { + if n.Left() == nil { base.Fatalf("missing len argument for OMAKESLICECOPY") } - if n.Right == nil { + if n.Right() == nil { base.Fatalf("missing slice argument to copy for OMAKESLICECOPY") } - n.Left = typecheck(n.Left, ctxExpr) - n.Right = typecheck(n.Right, ctxExpr) + n.SetLeft(typecheck(n.Left(), ctxExpr)) + n.SetRight(typecheck(n.Right(), ctxExpr)) - n.Left = defaultlit(n.Left, types.Types[types.TINT]) + n.SetLeft(defaultlit(n.Left(), types.Types[types.TINT])) - if !n.Left.Type.IsInteger() && n.Type.Etype != types.TIDEAL { + if !n.Left().Type().IsInteger() && n.Type().Etype != types.TIDEAL { base.Errorf("non-integer len argument in OMAKESLICECOPY") } - if ir.IsConst(n.Left, constant.Int) { - if doesoverflow(n.Left.Val(), types.Types[types.TINT]) { + if ir.IsConst(n.Left(), constant.Int) { + if doesoverflow(n.Left().Val(), types.Types[types.TINT]) { base.Fatalf("len for OMAKESLICECOPY too large") } - if constant.Sign(n.Left.Val()) < 0 { + if constant.Sign(n.Left().Val()) < 0 { base.Fatalf("len for OMAKESLICECOPY must be non-negative") } } case ir.OSLICE, ir.OSLICE3: ok |= ctxExpr - n.Left = typecheck(n.Left, ctxExpr) + n.SetLeft(typecheck(n.Left(), ctxExpr)) low, high, max := n.SliceBounds() - hasmax := n.Op.IsSlice3() + hasmax := n.Op().IsSlice3() low = typecheck(low, ctxExpr) high = typecheck(high, ctxExpr) max = typecheck(max, ctxExpr) - n.Left = defaultlit(n.Left, nil) + n.SetLeft(defaultlit(n.Left(), nil)) low = indexlit(low) high = indexlit(high) max = indexlit(max) n.SetSliceBounds(low, high, max) - l := n.Left - if l.Type == nil { - n.Type = nil + l := n.Left() + if l.Type() == nil { + n.SetType(nil) return n } - if l.Type.IsArray() { - if !islvalue(n.Left) { + if l.Type().IsArray() { + if !islvalue(n.Left()) { base.Errorf("invalid operation %v (slice of unaddressable value)", n) - n.Type = nil + n.SetType(nil) return n } - n.Left = ir.Nod(ir.OADDR, n.Left, nil) - n.Left.SetImplicit(true) - n.Left = typecheck(n.Left, ctxExpr) - l = n.Left + n.SetLeft(ir.Nod(ir.OADDR, n.Left(), nil)) + n.Left().SetImplicit(true) + n.SetLeft(typecheck(n.Left(), ctxExpr)) + l = n.Left() } - t := l.Type + t := l.Type() var tp *types.Type if t.IsString() { if hasmax { base.Errorf("invalid operation %v (3-index slice of string)", n) - n.Type = nil + n.SetType(nil) return n } - n.Type = t - n.Op = ir.OSLICESTR + n.SetType(t) + n.SetOp(ir.OSLICESTR) } else if t.IsPtr() && t.Elem().IsArray() { tp = t.Elem() - n.Type = types.NewSlice(tp.Elem()) - dowidth(n.Type) + n.SetType(types.NewSlice(tp.Elem())) + dowidth(n.Type()) if hasmax { - n.Op = ir.OSLICE3ARR + n.SetOp(ir.OSLICE3ARR) } else { - n.Op = ir.OSLICEARR + n.SetOp(ir.OSLICEARR) } } else if t.IsSlice() { - n.Type = t + n.SetType(t) } else { base.Errorf("cannot slice %v (type %v)", l, t) - n.Type = nil + n.SetType(nil) return n } if low != nil && !checksliceindex(l, low, tp) { - n.Type = nil + n.SetType(nil) return n } if high != nil && !checksliceindex(l, high, tp) { - n.Type = nil + n.SetType(nil) return n } if max != nil && !checksliceindex(l, max, tp) { - n.Type = nil + n.SetType(nil) return n } if !checksliceconst(low, high) || !checksliceconst(low, max) || !checksliceconst(high, max) { - n.Type = nil + n.SetType(nil) return n } // call and call like case ir.OCALL: - typecheckslice(n.Ninit.Slice(), ctxStmt) // imported rewritten f(g()) calls (#30907) - n.Left = typecheck(n.Left, ctxExpr|ctxType|ctxCallee) - if n.Left.Diag() { + typecheckslice(n.Init().Slice(), ctxStmt) // imported rewritten f(g()) calls (#30907) + n.SetLeft(typecheck(n.Left(), ctxExpr|ctxType|ctxCallee)) + if n.Left().Diag() { n.SetDiag(true) } - l := n.Left + l := n.Left() - if l.Op == ir.ONAME && l.SubOp() != 0 { + if l.Op() == ir.ONAME && l.SubOp() != 0 { if n.IsDDD() && l.SubOp() != ir.OAPPEND { base.Errorf("invalid use of ... with builtin %v", l) } // builtin: OLEN, OCAP, etc. - n.Op = l.SubOp() - n.Left = n.Right - n.Right = nil + n.SetOp(l.SubOp()) + n.SetLeft(n.Right()) + n.SetRight(nil) n = typecheck1(n, top) return n } - n.Left = defaultlit(n.Left, nil) - l = n.Left - if l.Op == ir.OTYPE { + n.SetLeft(defaultlit(n.Left(), nil)) + l = n.Left() + if l.Op() == ir.OTYPE { if n.IsDDD() { - if !l.Type.Broke() { - base.Errorf("invalid use of ... in type conversion to %v", l.Type) + if !l.Type().Broke() { + base.Errorf("invalid use of ... in type conversion to %v", l.Type()) } n.SetDiag(true) } @@ -1320,12 +1320,12 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { ok |= ctxExpr // turn CALL(type, arg) into CONV(arg) w/ type - n.Left = nil + n.SetLeft(nil) - n.Op = ir.OCONV - n.Type = l.Type - if !onearg(n, "conversion to %v", l.Type) { - n.Type = nil + n.SetOp(ir.OCONV) + n.SetType(l.Type()) + if !onearg(n, "conversion to %v", l.Type()) { + n.SetType(nil) return n } n = typecheck1(n, top) @@ -1333,19 +1333,19 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { } typecheckargs(n) - t := l.Type + t := l.Type() if t == nil { - n.Type = nil + n.SetType(nil) return n } checkwidth(t) - switch l.Op { + switch l.Op() { case ir.ODOTINTER: - n.Op = ir.OCALLINTER + n.SetOp(ir.OCALLINTER) case ir.ODOTMETH: - n.Op = ir.OCALLMETH + n.SetOp(ir.OCALLMETH) // typecheckaste was used here but there wasn't enough // information further down the call chain to know if we @@ -1353,44 +1353,44 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { // It isn't necessary, so just do a sanity check. tp := t.Recv().Type - if l.Left == nil || !types.Identical(l.Left.Type, tp) { + if l.Left() == nil || !types.Identical(l.Left().Type(), tp) { base.Fatalf("method receiver") } default: - n.Op = ir.OCALLFUNC + n.SetOp(ir.OCALLFUNC) if t.Etype != types.TFUNC { name := l.String() - if isBuiltinFuncName(name) && l.Name.Defn != nil { + if isBuiltinFuncName(name) && l.Name().Defn != nil { // be more specific when the function // name matches a predeclared function base.Errorf("cannot call non-function %s (type %v), declared at %s", - name, t, base.FmtPos(l.Name.Defn.Pos)) + name, t, base.FmtPos(l.Name().Defn.Pos())) } else { base.Errorf("cannot call non-function %s (type %v)", name, t) } - n.Type = nil + n.SetType(nil) return n } } - typecheckaste(ir.OCALL, n.Left, n.IsDDD(), t.Params(), n.List, func() string { return fmt.Sprintf("argument to %v", n.Left) }) + typecheckaste(ir.OCALL, n.Left(), n.IsDDD(), t.Params(), n.List(), func() string { return fmt.Sprintf("argument to %v", n.Left()) }) ok |= ctxStmt if t.NumResults() == 0 { break } ok |= ctxExpr if t.NumResults() == 1 { - n.Type = l.Type.Results().Field(0).Type + n.SetType(l.Type().Results().Field(0).Type) - if n.Op == ir.OCALLFUNC && n.Left.Op == ir.ONAME && isRuntimePkg(n.Left.Sym.Pkg) && n.Left.Sym.Name == "getg" { + if n.Op() == ir.OCALLFUNC && n.Left().Op() == ir.ONAME && isRuntimePkg(n.Left().Sym().Pkg) && n.Left().Sym().Name == "getg" { // Emit code for runtime.getg() directly instead of calling function. // Most such rewrites (for example the similar one for math.Sqrt) should be done in walk, // so that the ordering pass can make sure to preserve the semantics of the original code // (in particular, the exact time of the function call) by introducing temporaries. // In this case, we know getg() always returns the same result within a given function // and we want to avoid the temporaries, so we do the rewrite earlier than is typical. - n.Op = ir.OGETG + n.SetOp(ir.OGETG) } break @@ -1402,73 +1402,73 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { break } - n.Type = l.Type.Results() + n.SetType(l.Type().Results()) case ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF: ok |= ctxExpr - if !onearg(n, "%v", n.Op) { - n.Type = nil + if !onearg(n, "%v", n.Op()) { + n.SetType(nil) return n } - n.Type = types.Types[types.TUINTPTR] + n.SetType(types.Types[types.TUINTPTR]) case ir.OCAP, ir.OLEN: ok |= ctxExpr - if !onearg(n, "%v", n.Op) { - n.Type = nil + if !onearg(n, "%v", n.Op()) { + n.SetType(nil) return n } - n.Left = typecheck(n.Left, ctxExpr) - n.Left = defaultlit(n.Left, nil) - n.Left = implicitstar(n.Left) - l := n.Left - t := l.Type + n.SetLeft(typecheck(n.Left(), ctxExpr)) + n.SetLeft(defaultlit(n.Left(), nil)) + n.SetLeft(implicitstar(n.Left())) + l := n.Left() + t := l.Type() if t == nil { - n.Type = nil + n.SetType(nil) return n } var ok bool - if n.Op == ir.OLEN { + if n.Op() == ir.OLEN { ok = okforlen[t.Etype] } else { ok = okforcap[t.Etype] } if !ok { - base.Errorf("invalid argument %L for %v", l, n.Op) - n.Type = nil + base.Errorf("invalid argument %L for %v", l, n.Op()) + n.SetType(nil) return n } - n.Type = types.Types[types.TINT] + n.SetType(types.Types[types.TINT]) case ir.OREAL, ir.OIMAG: ok |= ctxExpr - if !onearg(n, "%v", n.Op) { - n.Type = nil + if !onearg(n, "%v", n.Op()) { + n.SetType(nil) return n } - n.Left = typecheck(n.Left, ctxExpr) - l := n.Left - t := l.Type + n.SetLeft(typecheck(n.Left(), ctxExpr)) + l := n.Left() + t := l.Type() if t == nil { - n.Type = nil + n.SetType(nil) return n } // Determine result type. switch t.Etype { case types.TIDEAL: - n.Type = types.UntypedFloat + n.SetType(types.UntypedFloat) case types.TCOMPLEX64: - n.Type = types.Types[types.TFLOAT32] + n.SetType(types.Types[types.TFLOAT32]) case types.TCOMPLEX128: - n.Type = types.Types[types.TFLOAT64] + n.SetType(types.Types[types.TFLOAT64]) default: - base.Errorf("invalid argument %L for %v", l, n.Op) - n.Type = nil + base.Errorf("invalid argument %L for %v", l, n.Op()) + n.SetType(nil) return n } @@ -1476,34 +1476,34 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { ok |= ctxExpr typecheckargs(n) if !twoarg(n) { - n.Type = nil + n.SetType(nil) return n } - l := n.Left - r := n.Right - if l.Type == nil || r.Type == nil { - n.Type = nil + l := n.Left() + r := n.Right() + if l.Type() == nil || r.Type() == nil { + n.SetType(nil) return n } l, r = defaultlit2(l, r, false) - if l.Type == nil || r.Type == nil { - n.Type = nil + if l.Type() == nil || r.Type() == nil { + n.SetType(nil) return n } - n.Left = l - n.Right = r + n.SetLeft(l) + n.SetRight(r) - if !types.Identical(l.Type, r.Type) { - base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type, r.Type) - n.Type = nil + if !types.Identical(l.Type(), r.Type()) { + base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type()) + n.SetType(nil) return n } var t *types.Type - switch l.Type.Etype { + switch l.Type().Etype { default: - base.Errorf("invalid operation: %v (arguments have type %v, expected floating-point)", n, l.Type) - n.Type = nil + base.Errorf("invalid operation: %v (arguments have type %v, expected floating-point)", n, l.Type()) + n.SetType(nil) return n case types.TIDEAL: @@ -1515,30 +1515,30 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { case types.TFLOAT64: t = types.Types[types.TCOMPLEX128] } - n.Type = t + n.SetType(t) case ir.OCLOSE: - if !onearg(n, "%v", n.Op) { - n.Type = nil + if !onearg(n, "%v", n.Op()) { + n.SetType(nil) return n } - n.Left = typecheck(n.Left, ctxExpr) - n.Left = defaultlit(n.Left, nil) - l := n.Left - t := l.Type + n.SetLeft(typecheck(n.Left(), ctxExpr)) + n.SetLeft(defaultlit(n.Left(), nil)) + l := n.Left() + t := l.Type() if t == nil { - n.Type = nil + n.SetType(nil) return n } if !t.IsChan() { base.Errorf("invalid operation: %v (non-chan type %v)", n, t) - n.Type = nil + n.SetType(nil) return n } if !t.ChanDir().CanSend() { base.Errorf("invalid operation: %v (cannot close receive-only channel)", n) - n.Type = nil + n.SetType(nil) return n } @@ -1547,78 +1547,78 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { case ir.ODELETE: ok |= ctxStmt typecheckargs(n) - args := n.List + args := n.List() if args.Len() == 0 { base.Errorf("missing arguments to delete") - n.Type = nil + n.SetType(nil) return n } if args.Len() == 1 { base.Errorf("missing second (key) argument to delete") - n.Type = nil + n.SetType(nil) return n } if args.Len() != 2 { base.Errorf("too many arguments to delete") - n.Type = nil + n.SetType(nil) return n } l := args.First() r := args.Second() - if l.Type != nil && !l.Type.IsMap() { - base.Errorf("first argument to delete must be map; have %L", l.Type) - n.Type = nil + if l.Type() != nil && !l.Type().IsMap() { + base.Errorf("first argument to delete must be map; have %L", l.Type()) + n.SetType(nil) return n } - args.SetSecond(assignconv(r, l.Type.Key(), "delete")) + args.SetSecond(assignconv(r, l.Type().Key(), "delete")) case ir.OAPPEND: ok |= ctxExpr typecheckargs(n) - args := n.List + args := n.List() if args.Len() == 0 { base.Errorf("missing arguments to append") - n.Type = nil + n.SetType(nil) return n } - t := args.First().Type + t := args.First().Type() if t == nil { - n.Type = nil + n.SetType(nil) return n } - n.Type = t + n.SetType(t) if !t.IsSlice() { if ir.IsNil(args.First()) { base.Errorf("first argument to append must be typed slice; have untyped nil") - n.Type = nil + n.SetType(nil) return n } base.Errorf("first argument to append must be slice; have %L", t) - n.Type = nil + n.SetType(nil) return n } if n.IsDDD() { if args.Len() == 1 { base.Errorf("cannot use ... on first argument to append") - n.Type = nil + n.SetType(nil) return n } if args.Len() != 2 { base.Errorf("too many arguments to append") - n.Type = nil + n.SetType(nil) return n } - if t.Elem().IsKind(types.TUINT8) && args.Second().Type.IsString() { + if t.Elem().IsKind(types.TUINT8) && args.Second().Type().IsString() { args.SetSecond(defaultlit(args.Second(), types.Types[types.TSTRING])) break } @@ -1629,90 +1629,90 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { as := args.Slice()[1:] for i, n := range as { - if n.Type == nil { + if n.Type() == nil { continue } as[i] = assignconv(n, t.Elem(), "append") - checkwidth(as[i].Type) // ensure width is calculated for backend + checkwidth(as[i].Type()) // ensure width is calculated for backend } case ir.OCOPY: ok |= ctxStmt | ctxExpr typecheckargs(n) if !twoarg(n) { - n.Type = nil + n.SetType(nil) return n } - n.Type = types.Types[types.TINT] - if n.Left.Type == nil || n.Right.Type == nil { - n.Type = nil + n.SetType(types.Types[types.TINT]) + if n.Left().Type() == nil || n.Right().Type() == nil { + n.SetType(nil) return n } - n.Left = defaultlit(n.Left, nil) - n.Right = defaultlit(n.Right, nil) - if n.Left.Type == nil || n.Right.Type == nil { - n.Type = nil + n.SetLeft(defaultlit(n.Left(), nil)) + n.SetRight(defaultlit(n.Right(), nil)) + if n.Left().Type() == nil || n.Right().Type() == nil { + n.SetType(nil) return n } // copy([]byte, string) - if n.Left.Type.IsSlice() && n.Right.Type.IsString() { - if types.Identical(n.Left.Type.Elem(), types.Bytetype) { + if n.Left().Type().IsSlice() && n.Right().Type().IsString() { + if types.Identical(n.Left().Type().Elem(), types.Bytetype) { break } - base.Errorf("arguments to copy have different element types: %L and string", n.Left.Type) - n.Type = nil + base.Errorf("arguments to copy have different element types: %L and string", n.Left().Type()) + n.SetType(nil) return n } - if !n.Left.Type.IsSlice() || !n.Right.Type.IsSlice() { - if !n.Left.Type.IsSlice() && !n.Right.Type.IsSlice() { - base.Errorf("arguments to copy must be slices; have %L, %L", n.Left.Type, n.Right.Type) - } else if !n.Left.Type.IsSlice() { - base.Errorf("first argument to copy should be slice; have %L", n.Left.Type) + if !n.Left().Type().IsSlice() || !n.Right().Type().IsSlice() { + if !n.Left().Type().IsSlice() && !n.Right().Type().IsSlice() { + base.Errorf("arguments to copy must be slices; have %L, %L", n.Left().Type(), n.Right().Type()) + } else if !n.Left().Type().IsSlice() { + base.Errorf("first argument to copy should be slice; have %L", n.Left().Type()) } else { - base.Errorf("second argument to copy should be slice or string; have %L", n.Right.Type) + base.Errorf("second argument to copy should be slice or string; have %L", n.Right().Type()) } - n.Type = nil + n.SetType(nil) return n } - if !types.Identical(n.Left.Type.Elem(), n.Right.Type.Elem()) { - base.Errorf("arguments to copy have different element types: %L and %L", n.Left.Type, n.Right.Type) - n.Type = nil + if !types.Identical(n.Left().Type().Elem(), n.Right().Type().Elem()) { + base.Errorf("arguments to copy have different element types: %L and %L", n.Left().Type(), n.Right().Type()) + n.SetType(nil) return n } case ir.OCONV: ok |= ctxExpr - checkwidth(n.Type) // ensure width is calculated for backend - n.Left = typecheck(n.Left, ctxExpr) - n.Left = convlit1(n.Left, n.Type, true, nil) - t := n.Left.Type - if t == nil || n.Type == nil { - n.Type = nil - return n - } - op, why := convertop(n.Left.Op == ir.OLITERAL, t, n.Type) - n.Op = op - if n.Op == ir.OXXX { - if !n.Diag() && !n.Type.Broke() && !n.Left.Diag() { - base.Errorf("cannot convert %L to type %v%s", n.Left, n.Type, why) + checkwidth(n.Type()) // ensure width is calculated for backend + n.SetLeft(typecheck(n.Left(), ctxExpr)) + n.SetLeft(convlit1(n.Left(), n.Type(), true, nil)) + t := n.Left().Type() + if t == nil || n.Type() == nil { + n.SetType(nil) + return n + } + op, why := convertop(n.Left().Op() == ir.OLITERAL, t, n.Type()) + n.SetOp(op) + if n.Op() == ir.OXXX { + if !n.Diag() && !n.Type().Broke() && !n.Left().Diag() { + base.Errorf("cannot convert %L to type %v%s", n.Left(), n.Type(), why) n.SetDiag(true) } - n.Op = ir.OCONV - n.Type = nil + n.SetOp(ir.OCONV) + n.SetType(nil) return n } - switch n.Op { + switch n.Op() { case ir.OCONVNOP: - if t.Etype == n.Type.Etype { + if t.Etype == n.Type().Etype { switch t.Etype { case types.TFLOAT32, types.TFLOAT64, types.TCOMPLEX64, types.TCOMPLEX128: // Floating point casts imply rounding and // so the conversion must be kept. - n.Op = ir.OCONV + n.SetOp(ir.OCONV) } } @@ -1722,26 +1722,26 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { break case ir.OSTR2RUNES: - if n.Left.Op == ir.OLITERAL { + if n.Left().Op() == ir.OLITERAL { n = stringtoruneslit(n) } } case ir.OMAKE: ok |= ctxExpr - args := n.List.Slice() + args := n.List().Slice() if len(args) == 0 { base.Errorf("missing argument to make") - n.Type = nil + n.SetType(nil) return n } - n.List.Set(nil) + n.PtrList().Set(nil) l := args[0] l = typecheck(l, ctxType) - t := l.Type + t := l.Type() if t == nil { - n.Type = nil + n.SetType(nil) return n } @@ -1749,13 +1749,13 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { switch t.Etype { default: base.Errorf("cannot make type %v", t) - n.Type = nil + n.SetType(nil) return n case types.TSLICE: if i >= len(args) { base.Errorf("missing len argument to make(%v)", t) - n.Type = nil + n.SetType(nil) return n } @@ -1769,23 +1769,23 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { r = typecheck(r, ctxExpr) } - if l.Type == nil || (r != nil && r.Type == nil) { - n.Type = nil + if l.Type() == nil || (r != nil && r.Type() == nil) { + n.SetType(nil) return n } if !checkmake(t, "len", &l) || r != nil && !checkmake(t, "cap", &r) { - n.Type = nil + n.SetType(nil) return n } if ir.IsConst(l, constant.Int) && r != nil && ir.IsConst(r, constant.Int) && constant.Compare(l.Val(), token.GTR, r.Val()) { base.Errorf("len larger than cap in make(%v)", t) - n.Type = nil + n.SetType(nil) return n } - n.Left = l - n.Right = r - n.Op = ir.OMAKESLICE + n.SetLeft(l) + n.SetRight(r) + n.SetOp(ir.OMAKESLICE) case types.TMAP: if i < len(args) { @@ -1793,19 +1793,19 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { i++ l = typecheck(l, ctxExpr) l = defaultlit(l, types.Types[types.TINT]) - if l.Type == nil { - n.Type = nil + if l.Type() == nil { + n.SetType(nil) return n } if !checkmake(t, "size", &l) { - n.Type = nil + n.SetType(nil) return n } - n.Left = l + n.SetLeft(l) } else { - n.Left = nodintconst(0) + n.SetLeft(nodintconst(0)) } - n.Op = ir.OMAKEMAP + n.SetOp(ir.OMAKEMAP) case types.TCHAN: l = nil @@ -1814,59 +1814,59 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { i++ l = typecheck(l, ctxExpr) l = defaultlit(l, types.Types[types.TINT]) - if l.Type == nil { - n.Type = nil + if l.Type() == nil { + n.SetType(nil) return n } if !checkmake(t, "buffer", &l) { - n.Type = nil + n.SetType(nil) return n } - n.Left = l + n.SetLeft(l) } else { - n.Left = nodintconst(0) + n.SetLeft(nodintconst(0)) } - n.Op = ir.OMAKECHAN + n.SetOp(ir.OMAKECHAN) } if i < len(args) { base.Errorf("too many arguments to make(%v)", t) - n.Op = ir.OMAKE - n.Type = nil + n.SetOp(ir.OMAKE) + n.SetType(nil) return n } - n.Type = t + n.SetType(t) case ir.ONEW: ok |= ctxExpr - args := n.List + args := n.List() if args.Len() == 0 { base.Errorf("missing argument to new") - n.Type = nil + n.SetType(nil) return n } l := args.First() l = typecheck(l, ctxType) - t := l.Type + t := l.Type() if t == nil { - n.Type = nil + n.SetType(nil) return n } if args.Len() > 1 { base.Errorf("too many arguments to new(%v)", t) - n.Type = nil + n.SetType(nil) return n } - n.Left = l - n.Type = types.NewPtr(t) + n.SetLeft(l) + n.SetType(types.NewPtr(t)) case ir.OPRINT, ir.OPRINTN: ok |= ctxStmt typecheckargs(n) - ls := n.List.Slice() + ls := n.List().Slice() for i1, n1 := range ls { // Special case for print: int constant is int64, not int. if ir.IsConst(n1, constant.Int) { @@ -1879,45 +1879,45 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { case ir.OPANIC: ok |= ctxStmt if !onearg(n, "panic") { - n.Type = nil + n.SetType(nil) return n } - n.Left = typecheck(n.Left, ctxExpr) - n.Left = defaultlit(n.Left, types.Types[types.TINTER]) - if n.Left.Type == nil { - n.Type = nil + n.SetLeft(typecheck(n.Left(), ctxExpr)) + n.SetLeft(defaultlit(n.Left(), types.Types[types.TINTER])) + if n.Left().Type() == nil { + n.SetType(nil) return n } case ir.ORECOVER: ok |= ctxExpr | ctxStmt - if n.List.Len() != 0 { + if n.List().Len() != 0 { base.Errorf("too many arguments to recover") - n.Type = nil + n.SetType(nil) return n } - n.Type = types.Types[types.TINTER] + n.SetType(types.Types[types.TINTER]) case ir.OCLOSURE: ok |= ctxExpr typecheckclosure(n, top) - if n.Type == nil { + if n.Type() == nil { return n } case ir.OITAB: ok |= ctxExpr - n.Left = typecheck(n.Left, ctxExpr) - t := n.Left.Type + n.SetLeft(typecheck(n.Left(), ctxExpr)) + t := n.Left().Type() if t == nil { - n.Type = nil + n.SetType(nil) return n } if !t.IsInterface() { base.Fatalf("OITAB of %v", t) } - n.Type = types.NewPtr(types.Types[types.TUINTPTR]) + n.SetType(types.NewPtr(types.Types[types.TUINTPTR])) case ir.OIDATA: // Whoever creates the OIDATA node must know a priori the concrete type at that moment, @@ -1926,19 +1926,19 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { case ir.OSPTR: ok |= ctxExpr - n.Left = typecheck(n.Left, ctxExpr) - t := n.Left.Type + n.SetLeft(typecheck(n.Left(), ctxExpr)) + t := n.Left().Type() if t == nil { - n.Type = nil + n.SetType(nil) return n } if !t.IsSlice() && !t.IsString() { base.Fatalf("OSPTR of %v", t) } if t.IsString() { - n.Type = types.NewPtr(types.Types[types.TUINT8]) + n.SetType(types.NewPtr(types.Types[types.TUINT8])) } else { - n.Type = types.NewPtr(t.Elem()) + n.SetType(types.NewPtr(t.Elem())) } case ir.OCLOSUREVAR: @@ -1946,12 +1946,12 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { case ir.OCFUNC: ok |= ctxExpr - n.Left = typecheck(n.Left, ctxExpr) - n.Type = types.Types[types.TUINTPTR] + n.SetLeft(typecheck(n.Left(), ctxExpr)) + n.SetType(types.Types[types.TUINTPTR]) case ir.OCONVNOP: ok |= ctxExpr - n.Left = typecheck(n.Left, ctxExpr) + n.SetLeft(typecheck(n.Left(), ctxExpr)) // statements case ir.OAS: @@ -1960,8 +1960,8 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { typecheckas(n) // Code that creates temps does not bother to set defn, so do it here. - if n.Left.Op == ir.ONAME && ir.IsAutoTmp(n.Left) { - n.Left.Name.Defn = n + if n.Left().Op() == ir.ONAME && ir.IsAutoTmp(n.Left()) { + n.Left().Name().Defn = n } case ir.OAS2: @@ -1981,72 +1981,72 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { case ir.OLABEL: ok |= ctxStmt decldepth++ - if n.Sym.IsBlank() { + if n.Sym().IsBlank() { // Empty identifier is valid but useless. // Eliminate now to simplify life later. // See issues 7538, 11589, 11593. - n.Op = ir.OEMPTY - n.Left = nil + n.SetOp(ir.OEMPTY) + n.SetLeft(nil) } case ir.ODEFER: ok |= ctxStmt - n.Left = typecheck(n.Left, ctxStmt|ctxExpr) - if !n.Left.Diag() { + n.SetLeft(typecheck(n.Left(), ctxStmt|ctxExpr)) + if !n.Left().Diag() { checkdefergo(n) } case ir.OGO: ok |= ctxStmt - n.Left = typecheck(n.Left, ctxStmt|ctxExpr) + n.SetLeft(typecheck(n.Left(), ctxStmt|ctxExpr)) checkdefergo(n) case ir.OFOR, ir.OFORUNTIL: ok |= ctxStmt - typecheckslice(n.Ninit.Slice(), ctxStmt) + typecheckslice(n.Init().Slice(), ctxStmt) decldepth++ - n.Left = typecheck(n.Left, ctxExpr) - n.Left = defaultlit(n.Left, nil) - if n.Left != nil { - t := n.Left.Type + n.SetLeft(typecheck(n.Left(), ctxExpr)) + n.SetLeft(defaultlit(n.Left(), nil)) + if n.Left() != nil { + t := n.Left().Type() if t != nil && !t.IsBoolean() { - base.Errorf("non-bool %L used as for condition", n.Left) + base.Errorf("non-bool %L used as for condition", n.Left()) } } - n.Right = typecheck(n.Right, ctxStmt) - if n.Op == ir.OFORUNTIL { - typecheckslice(n.List.Slice(), ctxStmt) + n.SetRight(typecheck(n.Right(), ctxStmt)) + if n.Op() == ir.OFORUNTIL { + typecheckslice(n.List().Slice(), ctxStmt) } - typecheckslice(n.Nbody.Slice(), ctxStmt) + typecheckslice(n.Body().Slice(), ctxStmt) decldepth-- case ir.OIF: ok |= ctxStmt - typecheckslice(n.Ninit.Slice(), ctxStmt) - n.Left = typecheck(n.Left, ctxExpr) - n.Left = defaultlit(n.Left, nil) - if n.Left != nil { - t := n.Left.Type + typecheckslice(n.Init().Slice(), ctxStmt) + n.SetLeft(typecheck(n.Left(), ctxExpr)) + n.SetLeft(defaultlit(n.Left(), nil)) + if n.Left() != nil { + t := n.Left().Type() if t != nil && !t.IsBoolean() { - base.Errorf("non-bool %L used as if condition", n.Left) + base.Errorf("non-bool %L used as if condition", n.Left()) } } - typecheckslice(n.Nbody.Slice(), ctxStmt) - typecheckslice(n.Rlist.Slice(), ctxStmt) + typecheckslice(n.Body().Slice(), ctxStmt) + typecheckslice(n.Rlist().Slice(), ctxStmt) case ir.ORETURN: ok |= ctxStmt typecheckargs(n) if Curfn == nil { base.Errorf("return outside function") - n.Type = nil + n.SetType(nil) return n } - if Curfn.Type.FuncType().Outnamed && n.List.Len() == 0 { + if Curfn.Type().FuncType().Outnamed && n.List().Len() == 0 { break } - typecheckaste(ir.ORETURN, nil, false, Curfn.Type.Results(), n.List, func() string { return "return argument" }) + typecheckaste(ir.ORETURN, nil, false, Curfn.Type().Results(), n.List(), func() string { return "return argument" }) case ir.ORETJMP: ok |= ctxStmt @@ -2065,7 +2065,7 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { case ir.OTYPESW: base.Errorf("use of .(type) outside type switch") - n.Type = nil + n.SetType(nil) return n case ir.ODCLFUNC: @@ -2074,16 +2074,16 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { case ir.ODCLCONST: ok |= ctxStmt - n.Left = typecheck(n.Left, ctxExpr) + n.SetLeft(typecheck(n.Left(), ctxExpr)) case ir.ODCLTYPE: ok |= ctxStmt - n.Left = typecheck(n.Left, ctxType) - checkwidth(n.Left.Type) + n.SetLeft(typecheck(n.Left(), ctxType)) + checkwidth(n.Left().Type()) } - t := n.Type - if t != nil && !t.IsFuncArgStruct() && n.Op != ir.OTYPE { + t := n.Type() + if t != nil && !t.IsFuncArgStruct() && n.Op() != ir.OTYPE { switch t.Etype { case types.TFUNC, // might have TANY; wait until it's called types.TANY, types.TFORW, types.TIDEAL, types.TNIL, types.TBLANK: @@ -2095,24 +2095,24 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { } n = evalConst(n) - if n.Op == ir.OTYPE && top&ctxType == 0 { - if !n.Type.Broke() { - base.Errorf("type %v is not an expression", n.Type) + if n.Op() == ir.OTYPE && top&ctxType == 0 { + if !n.Type().Broke() { + base.Errorf("type %v is not an expression", n.Type()) } - n.Type = nil + n.SetType(nil) return n } - if top&(ctxExpr|ctxType) == ctxType && n.Op != ir.OTYPE { + if top&(ctxExpr|ctxType) == ctxType && n.Op() != ir.OTYPE { base.Errorf("%v is not a type", n) - n.Type = nil + n.SetType(nil) return n } // TODO(rsc): simplify if (top&(ctxCallee|ctxExpr|ctxType) != 0) && top&ctxStmt == 0 && ok&(ctxExpr|ctxType|ctxCallee) == 0 { base.Errorf("%v used as value", n) - n.Type = nil + n.SetType(nil) return n } @@ -2122,7 +2122,7 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { n.SetDiag(true) } - n.Type = nil + n.SetType(nil) return n } @@ -2130,13 +2130,13 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { } func typecheckargs(n *ir.Node) { - if n.List.Len() != 1 || n.IsDDD() { - typecheckslice(n.List.Slice(), ctxExpr) + if n.List().Len() != 1 || n.IsDDD() { + typecheckslice(n.List().Slice(), ctxExpr) return } - typecheckslice(n.List.Slice(), ctxExpr|ctxMultiOK) - t := n.List.First().Type + typecheckslice(n.List().Slice(), ctxExpr|ctxMultiOK) + t := n.List().First().Type() if t == nil || !t.IsFuncArgStruct() { return } @@ -2144,12 +2144,12 @@ func typecheckargs(n *ir.Node) { // Rewrite f(g()) into t1, t2, ... = g(); f(t1, t2, ...). // Save n as n.Orig for fmt.go. - if n.Orig == n { - n.Orig = ir.SepCopy(n) + if n.Orig() == n { + n.SetOrig(ir.SepCopy(n)) } as := ir.Nod(ir.OAS2, nil, nil) - as.Rlist.AppendNodes(&n.List) + as.PtrRlist().AppendNodes(n.PtrList()) // If we're outside of function context, then this call will // be executed during the generated init function. However, @@ -2162,20 +2162,20 @@ func typecheckargs(n *ir.Node) { } for _, f := range t.FieldSlice() { t := temp(f.Type) - as.Ninit.Append(ir.Nod(ir.ODCL, t, nil)) - as.List.Append(t) - n.List.Append(t) + as.PtrInit().Append(ir.Nod(ir.ODCL, t, nil)) + as.PtrList().Append(t) + n.PtrList().Append(t) } if static { Curfn = nil } as = typecheck(as, ctxStmt) - n.Ninit.Append(as) + n.PtrInit().Append(as) } func checksliceindex(l *ir.Node, r *ir.Node, tp *types.Type) bool { - t := r.Type + t := r.Type() if t == nil { return false } @@ -2184,7 +2184,7 @@ func checksliceindex(l *ir.Node, r *ir.Node, tp *types.Type) bool { return false } - if r.Op == ir.OLITERAL { + if r.Op() == ir.OLITERAL { x := r.Val() if constant.Sign(x) < 0 { base.Errorf("invalid slice index %v (index must be non-negative)", r) @@ -2205,7 +2205,7 @@ func checksliceindex(l *ir.Node, r *ir.Node, tp *types.Type) bool { } func checksliceconst(lo *ir.Node, hi *ir.Node) bool { - if lo != nil && hi != nil && lo.Op == ir.OLITERAL && hi.Op == ir.OLITERAL && constant.Compare(lo.Val(), token.GTR, hi.Val()) { + if lo != nil && hi != nil && lo.Op() == ir.OLITERAL && hi.Op() == ir.OLITERAL && constant.Compare(lo.Val(), token.GTR, hi.Val()) { base.Errorf("invalid slice index: %v > %v", lo, hi) return false } @@ -2215,11 +2215,11 @@ func checksliceconst(lo *ir.Node, hi *ir.Node) bool { func checkdefergo(n *ir.Node) { what := "defer" - if n.Op == ir.OGO { + if n.Op() == ir.OGO { what = "go" } - switch n.Left.Op { + switch n.Left().Op() { // ok case ir.OCALLINTER, ir.OCALLMETH, @@ -2245,16 +2245,16 @@ func checkdefergo(n *ir.Node) { ir.ONEW, ir.OREAL, ir.OLITERAL: // conversion or unsafe.Alignof, Offsetof, Sizeof - if n.Left.Orig != nil && n.Left.Orig.Op == ir.OCONV { + if n.Left().Orig() != nil && n.Left().Orig().Op() == ir.OCONV { break } - base.ErrorfAt(n.Pos, "%s discards result of %v", what, n.Left) + base.ErrorfAt(n.Pos(), "%s discards result of %v", what, n.Left()) return } // type is broken or missing, most likely a method call on a broken type // we will warn about the broken type elsewhere. no need to emit a potentially confusing error - if n.Left.Type == nil || n.Left.Type.Broke() { + if n.Left().Type() == nil || n.Left().Type().Broke() { return } @@ -2262,7 +2262,7 @@ func checkdefergo(n *ir.Node) { // The syntax made sure it was a call, so this must be // a conversion. n.SetDiag(true) - base.ErrorfAt(n.Pos, "%s requires function call, not conversion", what) + base.ErrorfAt(n.Pos(), "%s requires function call, not conversion", what) } } @@ -2270,7 +2270,7 @@ func checkdefergo(n *ir.Node) { // n.Left = implicitstar(n.Left) func implicitstar(n *ir.Node) *ir.Node { // insert implicit * if needed for fixed array - t := n.Type + t := n.Type() if t == nil || !t.IsPtr() { return n } @@ -2288,43 +2288,43 @@ func implicitstar(n *ir.Node) *ir.Node { } func onearg(n *ir.Node, f string, args ...interface{}) bool { - if n.Left != nil { + if n.Left() != nil { return true } - if n.List.Len() == 0 { + if n.List().Len() == 0 { p := fmt.Sprintf(f, args...) base.Errorf("missing argument to %s: %v", p, n) return false } - if n.List.Len() > 1 { + if n.List().Len() > 1 { p := fmt.Sprintf(f, args...) base.Errorf("too many arguments to %s: %v", p, n) - n.Left = n.List.First() - n.List.Set(nil) + n.SetLeft(n.List().First()) + n.PtrList().Set(nil) return false } - n.Left = n.List.First() - n.List.Set(nil) + n.SetLeft(n.List().First()) + n.PtrList().Set(nil) return true } func twoarg(n *ir.Node) bool { - if n.Left != nil { + if n.Left() != nil { return true } - if n.List.Len() != 2 { - if n.List.Len() < 2 { + if n.List().Len() != 2 { + if n.List().Len() < 2 { base.Errorf("not enough arguments in call to %v", n) } else { base.Errorf("too many arguments in call to %v", n) } return false } - n.Left = n.List.First() - n.Right = n.List.Second() - n.List.Set(nil) + n.SetLeft(n.List().First()) + n.SetRight(n.List().Second()) + n.PtrList().Set(nil) return true } @@ -2364,7 +2364,7 @@ func typecheckMethodExpr(n *ir.Node) (res *ir.Node) { defer tracePrint("typecheckMethodExpr", n)(&res) } - t := n.Left.Type + t := n.Left().Type() // Compute the method set for t. var ms *types.Fields @@ -2373,8 +2373,8 @@ func typecheckMethodExpr(n *ir.Node) (res *ir.Node) { } else { mt := methtype(t) if mt == nil { - base.Errorf("%v undefined (type %v has no method %v)", n, t, n.Sym) - n.Type = nil + base.Errorf("%v undefined (type %v has no method %v)", n, t, n.Sym()) + n.SetType(nil) return n } expandmeth(mt) @@ -2392,7 +2392,7 @@ func typecheckMethodExpr(n *ir.Node) (res *ir.Node) { } } - s := n.Sym + s := n.Sym() m := lookdot1(n, s, t, ms, 0) if m == nil { if lookdot1(n, s, t, ms, 1) != nil { @@ -2402,31 +2402,31 @@ func typecheckMethodExpr(n *ir.Node) (res *ir.Node) { } else { base.Errorf("%v undefined (type %v has no method %v)", n, t, s) } - n.Type = nil + n.SetType(nil) return n } if !isMethodApplicable(t, m) { base.Errorf("invalid method expression %v (needs pointer receiver: (*%v).%S)", n, t, s) - n.Type = nil + n.SetType(nil) return n } - n.Op = ir.OMETHEXPR - if n.Name == nil { - n.Name = new(ir.Name) + n.SetOp(ir.OMETHEXPR) + if n.Name() == nil { + n.SetName(new(ir.Name)) } - n.Right = NewName(n.Sym) - n.Sym = methodSym(t, n.Sym) - n.Type = methodfunc(m.Type, n.Left.Type) - n.Xoffset = 0 + n.SetRight(NewName(n.Sym())) + n.SetSym(methodSym(t, n.Sym())) + n.SetType(methodfunc(m.Type, n.Left().Type())) + n.SetOffset(0) n.SetClass(ir.PFUNC) n.SetOpt(m) // methodSym already marked n.Sym as a function. // Issue 25065. Make sure that we emit the symbol for a local method. if base.Ctxt.Flag_dynlink && !inimport && (t.Sym == nil || t.Sym.Pkg == ir.LocalPkg) { - makefuncsym(n.Sym) + makefuncsym(n.Sym()) } return n @@ -2448,7 +2448,7 @@ func derefall(t *types.Type) *types.Type { } func lookdot(n *ir.Node, t *types.Type, dostrcmp int) *types.Field { - s := n.Sym + s := n.Sym() dowidth(t) var f1 *types.Field @@ -2457,7 +2457,7 @@ func lookdot(n *ir.Node, t *types.Type, dostrcmp int) *types.Field { } var f2 *types.Field - if n.Left.Type == t || n.Left.Type.Sym == nil { + if n.Left().Type() == t || n.Left().Type().Sym == nil { mt := methtype(t) if mt != nil { f2 = lookdot1(n, s, mt, mt.Methods(), dostrcmp) @@ -2470,21 +2470,21 @@ func lookdot(n *ir.Node, t *types.Type, dostrcmp int) *types.Field { return f1 } if f2 != nil { - base.Errorf("%v is both field and method", n.Sym) + base.Errorf("%v is both field and method", n.Sym()) } if f1.Offset == types.BADWIDTH { base.Fatalf("lookdot badwidth %v %p", f1, f1) } - n.Xoffset = f1.Offset - n.Type = f1.Type + n.SetOffset(f1.Offset) + n.SetType(f1.Type) if t.IsInterface() { - if n.Left.Type.IsPtr() { - n.Left = ir.Nod(ir.ODEREF, n.Left, nil) // implicitstar - n.Left.SetImplicit(true) - n.Left = typecheck(n.Left, ctxExpr) + if n.Left().Type().IsPtr() { + n.SetLeft(ir.Nod(ir.ODEREF, n.Left(), nil)) // implicitstar + n.Left().SetImplicit(true) + n.SetLeft(typecheck(n.Left(), ctxExpr)) } - n.Op = ir.ODOTINTER + n.SetOp(ir.ODOTINTER) } else { n.SetOpt(f1) } @@ -2497,29 +2497,29 @@ func lookdot(n *ir.Node, t *types.Type, dostrcmp int) *types.Field { // Already in the process of diagnosing an error. return f2 } - tt := n.Left.Type + tt := n.Left().Type() dowidth(tt) rcvr := f2.Type.Recv().Type if !types.Identical(rcvr, tt) { if rcvr.IsPtr() && types.Identical(rcvr.Elem(), tt) { - checklvalue(n.Left, "call pointer method on") - n.Left = ir.Nod(ir.OADDR, n.Left, nil) - n.Left.SetImplicit(true) - n.Left = typecheck(n.Left, ctxType|ctxExpr) + checklvalue(n.Left(), "call pointer method on") + n.SetLeft(ir.Nod(ir.OADDR, n.Left(), nil)) + n.Left().SetImplicit(true) + n.SetLeft(typecheck(n.Left(), ctxType|ctxExpr)) } else if tt.IsPtr() && (!rcvr.IsPtr() || rcvr.IsPtr() && rcvr.Elem().NotInHeap()) && types.Identical(tt.Elem(), rcvr) { - n.Left = ir.Nod(ir.ODEREF, n.Left, nil) - n.Left.SetImplicit(true) - n.Left = typecheck(n.Left, ctxType|ctxExpr) + n.SetLeft(ir.Nod(ir.ODEREF, n.Left(), nil)) + n.Left().SetImplicit(true) + n.SetLeft(typecheck(n.Left(), ctxType|ctxExpr)) } else if tt.IsPtr() && tt.Elem().IsPtr() && types.Identical(derefall(tt), derefall(rcvr)) { - base.Errorf("calling method %v with receiver %L requires explicit dereference", n.Sym, n.Left) + base.Errorf("calling method %v with receiver %L requires explicit dereference", n.Sym(), n.Left()) for tt.IsPtr() { // Stop one level early for method with pointer receiver. if rcvr.IsPtr() && !tt.Elem().IsPtr() { break } - n.Left = ir.Nod(ir.ODEREF, n.Left, nil) - n.Left.SetImplicit(true) - n.Left = typecheck(n.Left, ctxType|ctxExpr) + n.SetLeft(ir.Nod(ir.ODEREF, n.Left(), nil)) + n.Left().SetImplicit(true) + n.SetLeft(typecheck(n.Left(), ctxType|ctxExpr)) tt = tt.Elem() } } else { @@ -2528,22 +2528,22 @@ func lookdot(n *ir.Node, t *types.Type, dostrcmp int) *types.Field { } pll := n - ll := n.Left - for ll.Left != nil && (ll.Op == ir.ODOT || ll.Op == ir.ODOTPTR || ll.Op == ir.ODEREF) { + ll := n.Left() + for ll.Left() != nil && (ll.Op() == ir.ODOT || ll.Op() == ir.ODOTPTR || ll.Op() == ir.ODEREF) { pll = ll - ll = ll.Left + ll = ll.Left() } - if pll.Implicit() && ll.Type.IsPtr() && ll.Type.Sym != nil && ir.AsNode(ll.Type.Sym.Def) != nil && ir.AsNode(ll.Type.Sym.Def).Op == ir.OTYPE { + if pll.Implicit() && ll.Type().IsPtr() && ll.Type().Sym != nil && ir.AsNode(ll.Type().Sym.Def) != nil && ir.AsNode(ll.Type().Sym.Def).Op() == ir.OTYPE { // It is invalid to automatically dereference a named pointer type when selecting a method. // Make n.Left == ll to clarify error message. - n.Left = ll + n.SetLeft(ll) return nil } - n.Sym = methodSym(n.Left.Type, f2.Sym) - n.Xoffset = f2.Offset - n.Type = f2.Type - n.Op = ir.ODOTMETH + n.SetSym(methodSym(n.Left().Type(), f2.Sym)) + n.SetOffset(f2.Offset) + n.SetType(f2.Type) + n.SetOp(ir.ODOTMETH) n.SetOpt(f2) return f2 @@ -2554,7 +2554,7 @@ func lookdot(n *ir.Node, t *types.Type, dostrcmp int) *types.Field { func nokeys(l ir.Nodes) bool { for _, n := range l.Slice() { - if n.Op == ir.OKEY || n.Op == ir.OSTRUCTKEY { + if n.Op() == ir.OKEY || n.Op() == ir.OSTRUCTKEY { return false } } @@ -2625,7 +2625,7 @@ func typecheckaste(op ir.Op, call *ir.Node, isddd bool, tstruct *types.Type, nl } n = nl.Index(i) setlineno(n) - if n.Type != nil { + if n.Type() != nil { nl.SetIndex(i, assignconvfn(n, t, desc)) } return @@ -2635,7 +2635,7 @@ func typecheckaste(op ir.Op, call *ir.Node, isddd bool, tstruct *types.Type, nl for ; i < nl.Len(); i++ { n = nl.Index(i) setlineno(n) - if n.Type != nil { + if n.Type() != nil { nl.SetIndex(i, assignconvfn(n, t.Elem(), desc)) } } @@ -2647,7 +2647,7 @@ func typecheckaste(op ir.Op, call *ir.Node, isddd bool, tstruct *types.Type, nl } n = nl.Index(i) setlineno(n) - if n.Type != nil { + if n.Type() != nil { nl.SetIndex(i, assignconvfn(n, t, desc)) } i++ @@ -2666,13 +2666,13 @@ func typecheckaste(op ir.Op, call *ir.Node, isddd bool, tstruct *types.Type, nl return notenough: - if n == nil || (!n.Diag() && n.Type != nil) { + if n == nil || (!n.Diag() && n.Type() != nil) { details := errorDetails(nl, tstruct, isddd) if call != nil { // call is the expression being called, not the overall call. // Method expressions have the form T.M, and the compiler has // rewritten those to ONAME nodes but left T in Left. - if call.Op == ir.OMETHEXPR { + if call.Op() == ir.OMETHEXPR { base.Errorf("not enough arguments in call to method expression %v%s", call, details) } else { base.Errorf("not enough arguments in call to %v%s", call, details) @@ -2703,7 +2703,7 @@ func errorDetails(nl ir.Nodes, tstruct *types.Type, isddd bool) string { } // If any node has an unknown type, suppress it as well for _, n := range nl.Slice() { - if n.Type == nil { + if n.Type() == nil { return "" } } @@ -2747,7 +2747,7 @@ func fmtSignature(nl ir.Nodes, isddd bool) string { var typeStrings []string for i, n := range nl.Slice() { isdddArg := isddd && i == nl.Len()-1 - typeStrings = append(typeStrings, sigrepr(n.Type, isdddArg)) + typeStrings = append(typeStrings, sigrepr(n.Type(), isdddArg)) } return fmt.Sprintf("(%s)", strings.Join(typeStrings, ", ")) @@ -2775,20 +2775,20 @@ func iscomptype(t *types.Type) bool { // pushtype adds elided type information for composite literals if // appropriate, and returns the resulting expression. func pushtype(n *ir.Node, t *types.Type) *ir.Node { - if n == nil || n.Op != ir.OCOMPLIT || n.Right != nil { + if n == nil || n.Op() != ir.OCOMPLIT || n.Right() != nil { return n } switch { case iscomptype(t): // For T, return T{...}. - n.Right = typenod(t) + n.SetRight(typenod(t)) case t.IsPtr() && iscomptype(t.Elem()): // For *T, return &T{...}. - n.Right = typenod(t.Elem()) + n.SetRight(typenod(t.Elem())) - n = ir.NodAt(n.Pos, ir.OADDR, n, nil) + n = ir.NodAt(n.Pos(), ir.OADDR, n, nil) n.SetImplicit(true) } @@ -2807,90 +2807,90 @@ func typecheckcomplit(n *ir.Node) (res *ir.Node) { base.Pos = lno }() - if n.Right == nil { - base.ErrorfAt(n.Pos, "missing type in composite literal") - n.Type = nil + if n.Right() == nil { + base.ErrorfAt(n.Pos(), "missing type in composite literal") + n.SetType(nil) return n } // Save original node (including n.Right) - n.Orig = ir.Copy(n) + n.SetOrig(ir.Copy(n)) - setlineno(n.Right) + setlineno(n.Right()) // Need to handle [...]T arrays specially. - if n.Right.Op == ir.OTARRAY && n.Right.Left != nil && n.Right.Left.Op == ir.ODDD { - n.Right.Right = typecheck(n.Right.Right, ctxType) - if n.Right.Right.Type == nil { - n.Type = nil + if n.Right().Op() == ir.OTARRAY && n.Right().Left() != nil && n.Right().Left().Op() == ir.ODDD { + n.Right().SetRight(typecheck(n.Right().Right(), ctxType)) + if n.Right().Right().Type() == nil { + n.SetType(nil) return n } - elemType := n.Right.Right.Type + elemType := n.Right().Right().Type() - length := typecheckarraylit(elemType, -1, n.List.Slice(), "array literal") + length := typecheckarraylit(elemType, -1, n.List().Slice(), "array literal") - n.Op = ir.OARRAYLIT - n.Type = types.NewArray(elemType, length) - n.Right = nil + n.SetOp(ir.OARRAYLIT) + n.SetType(types.NewArray(elemType, length)) + n.SetRight(nil) return n } - n.Right = typecheck(n.Right, ctxType) - t := n.Right.Type + n.SetRight(typecheck(n.Right(), ctxType)) + t := n.Right().Type() if t == nil { - n.Type = nil + n.SetType(nil) return n } - n.Type = t + n.SetType(t) switch t.Etype { default: base.Errorf("invalid composite literal type %v", t) - n.Type = nil + n.SetType(nil) case types.TARRAY: - typecheckarraylit(t.Elem(), t.NumElem(), n.List.Slice(), "array literal") - n.Op = ir.OARRAYLIT - n.Right = nil + typecheckarraylit(t.Elem(), t.NumElem(), n.List().Slice(), "array literal") + n.SetOp(ir.OARRAYLIT) + n.SetRight(nil) case types.TSLICE: - length := typecheckarraylit(t.Elem(), -1, n.List.Slice(), "slice literal") - n.Op = ir.OSLICELIT - n.Right = nodintconst(length) + length := typecheckarraylit(t.Elem(), -1, n.List().Slice(), "slice literal") + n.SetOp(ir.OSLICELIT) + n.SetRight(nodintconst(length)) case types.TMAP: var cs constSet - for i3, l := range n.List.Slice() { + for i3, l := range n.List().Slice() { setlineno(l) - if l.Op != ir.OKEY { - n.List.SetIndex(i3, typecheck(l, ctxExpr)) + if l.Op() != ir.OKEY { + n.List().SetIndex(i3, typecheck(l, ctxExpr)) base.Errorf("missing key in map literal") continue } - r := l.Left + r := l.Left() r = pushtype(r, t.Key()) r = typecheck(r, ctxExpr) - l.Left = assignconv(r, t.Key(), "map key") - cs.add(base.Pos, l.Left, "key", "map literal") + l.SetLeft(assignconv(r, t.Key(), "map key")) + cs.add(base.Pos, l.Left(), "key", "map literal") - r = l.Right + r = l.Right() r = pushtype(r, t.Elem()) r = typecheck(r, ctxExpr) - l.Right = assignconv(r, t.Elem(), "map value") + l.SetRight(assignconv(r, t.Elem(), "map value")) } - n.Op = ir.OMAPLIT - n.Right = nil + n.SetOp(ir.OMAPLIT) + n.SetRight(nil) case types.TSTRUCT: // Need valid field offsets for Xoffset below. dowidth(t) errored := false - if n.List.Len() != 0 && nokeys(n.List) { + if n.List().Len() != 0 && nokeys(n.List()) { // simple list of variables - ls := n.List.Slice() + ls := n.List().Slice() for i, n1 := range ls { setlineno(n1) n1 = typecheck(n1, ctxExpr) @@ -2911,7 +2911,7 @@ func typecheckcomplit(n *ir.Node) (res *ir.Node) { // No pushtype allowed here. Must name fields for that. n1 = assignconv(n1, f.Type, "field value") n1 = nodSym(ir.OSTRUCTKEY, n1, f.Sym) - n1.Xoffset = f.Offset + n1.SetOffset(f.Offset) ls[i] = n1 } if len(ls) < t.NumFields() { @@ -2921,41 +2921,41 @@ func typecheckcomplit(n *ir.Node) (res *ir.Node) { hash := make(map[string]bool) // keyed list - ls := n.List.Slice() + ls := n.List().Slice() for i, l := range ls { setlineno(l) - if l.Op == ir.OKEY { - key := l.Left + if l.Op() == ir.OKEY { + key := l.Left() - l.Op = ir.OSTRUCTKEY - l.Left = l.Right - l.Right = nil + l.SetOp(ir.OSTRUCTKEY) + l.SetLeft(l.Right()) + l.SetRight(nil) // An OXDOT uses the Sym field to hold // the field to the right of the dot, // so s will be non-nil, but an OXDOT // is never a valid struct literal key. - if key.Sym == nil || key.Op == ir.OXDOT || key.Sym.IsBlank() { + if key.Sym() == nil || key.Op() == ir.OXDOT || key.Sym().IsBlank() { base.Errorf("invalid field name %v in struct initializer", key) - l.Left = typecheck(l.Left, ctxExpr) + l.SetLeft(typecheck(l.Left(), ctxExpr)) continue } // Sym might have resolved to name in other top-level // package, because of import dot. Redirect to correct sym // before we do the lookup. - s := key.Sym + s := key.Sym() if s.Pkg != ir.LocalPkg && types.IsExported(s.Name) { s1 := lookup(s.Name) if s1.Origpkg == s.Pkg { s = s1 } } - l.Sym = s + l.SetSym(s) } - if l.Op != ir.OSTRUCTKEY { + if l.Op() != ir.OSTRUCTKEY { if !errored { base.Errorf("mixture of field:value and value initializers") errored = true @@ -2964,22 +2964,22 @@ func typecheckcomplit(n *ir.Node) (res *ir.Node) { continue } - f := lookdot1(nil, l.Sym, t, t.Fields(), 0) + f := lookdot1(nil, l.Sym(), t, t.Fields(), 0) if f == nil { - if ci := lookdot1(nil, l.Sym, t, t.Fields(), 2); ci != nil { // Case-insensitive lookup. + if ci := lookdot1(nil, l.Sym(), t, t.Fields(), 2); ci != nil { // Case-insensitive lookup. if visible(ci.Sym) { - base.Errorf("unknown field '%v' in struct literal of type %v (but does have %v)", l.Sym, t, ci.Sym) - } else if nonexported(l.Sym) && l.Sym.Name == ci.Sym.Name { // Ensure exactness before the suggestion. - base.Errorf("cannot refer to unexported field '%v' in struct literal of type %v", l.Sym, t) + base.Errorf("unknown field '%v' in struct literal of type %v (but does have %v)", l.Sym(), t, ci.Sym) + } else if nonexported(l.Sym()) && l.Sym().Name == ci.Sym.Name { // Ensure exactness before the suggestion. + base.Errorf("cannot refer to unexported field '%v' in struct literal of type %v", l.Sym(), t) } else { - base.Errorf("unknown field '%v' in struct literal of type %v", l.Sym, t) + base.Errorf("unknown field '%v' in struct literal of type %v", l.Sym(), t) } continue } var f *types.Field - p, _ := dotpath(l.Sym, t, &f, true) + p, _ := dotpath(l.Sym(), t, &f, true) if p == nil || f.IsMethod() { - base.Errorf("unknown field '%v' in struct literal of type %v", l.Sym, t) + base.Errorf("unknown field '%v' in struct literal of type %v", l.Sym(), t) continue } // dotpath returns the parent embedded types in reverse order. @@ -2987,21 +2987,21 @@ func typecheckcomplit(n *ir.Node) (res *ir.Node) { for ei := len(p) - 1; ei >= 0; ei-- { ep = append(ep, p[ei].field.Sym.Name) } - ep = append(ep, l.Sym.Name) + ep = append(ep, l.Sym().Name) base.Errorf("cannot use promoted field %v in struct literal of type %v", strings.Join(ep, "."), t) continue } fielddup(f.Sym.Name, hash) - l.Xoffset = f.Offset + l.SetOffset(f.Offset) // No pushtype allowed here. Tried and rejected. - l.Left = typecheck(l.Left, ctxExpr) - l.Left = assignconv(l.Left, f.Type, "field value") + l.SetLeft(typecheck(l.Left(), ctxExpr)) + l.SetLeft(assignconv(l.Left(), f.Type, "field value")) } } - n.Op = ir.OSTRUCTLIT - n.Right = nil + n.SetOp(ir.OSTRUCTLIT) + n.SetRight(nil) } return n @@ -3013,7 +3013,7 @@ func typecheckarraylit(elemType *types.Type, bound int64, elts []*ir.Node, ctx s // keys so we can check for duplicate indices. var indices map[int64]bool for _, elt := range elts { - if elt.Op == ir.OKEY { + if elt.Op() == ir.OKEY { indices = make(map[int64]bool) break } @@ -3024,29 +3024,29 @@ func typecheckarraylit(elemType *types.Type, bound int64, elts []*ir.Node, ctx s setlineno(elt) r := elts[i] var kv *ir.Node - if elt.Op == ir.OKEY { - elt.Left = typecheck(elt.Left, ctxExpr) - key = indexconst(elt.Left) + if elt.Op() == ir.OKEY { + elt.SetLeft(typecheck(elt.Left(), ctxExpr)) + key = indexconst(elt.Left()) if key < 0 { - if !elt.Left.Diag() { + if !elt.Left().Diag() { if key == -2 { base.Errorf("index too large") } else { base.Errorf("index must be non-negative integer constant") } - elt.Left.SetDiag(true) + elt.Left().SetDiag(true) } key = -(1 << 30) // stay negative for a while } kv = elt - r = elt.Right + r = elt.Right() } r = pushtype(r, elemType) r = typecheck(r, ctxExpr) r = assignconv(r, elemType, ctx) if kv != nil { - kv.Right = r + kv.SetRight(r) } else { elts[i] = r } @@ -3087,12 +3087,12 @@ func nonexported(sym *types.Sym) bool { // lvalue etc func islvalue(n *ir.Node) bool { - switch n.Op { + switch n.Op() { case ir.OINDEX: - if n.Left.Type != nil && n.Left.Type.IsArray() { - return islvalue(n.Left) + if n.Left().Type() != nil && n.Left().Type().IsArray() { + return islvalue(n.Left()) } - if n.Left.Type != nil && n.Left.Type.IsString() { + if n.Left().Type() != nil && n.Left().Type().IsString() { return false } fallthrough @@ -3100,7 +3100,7 @@ func islvalue(n *ir.Node) bool { return true case ir.ODOT: - return islvalue(n.Left) + return islvalue(n.Left()) case ir.ONAME: if n.Class() == ir.PFUNC { @@ -3120,12 +3120,12 @@ func checklvalue(n *ir.Node, verb string) { func checkassign(stmt *ir.Node, n *ir.Node) { // Variables declared in ORANGE are assigned on every iteration. - if n.Name == nil || n.Name.Defn != stmt || stmt.Op == ir.ORANGE { + if n.Name() == nil || n.Name().Defn != stmt || stmt.Op() == ir.ORANGE { r := outervalue(n) - if r.Op == ir.ONAME { - r.Name.SetAssigned(true) - if r.Name.IsClosureVar() { - r.Name.Defn.Name.SetAssigned(true) + if r.Op() == ir.ONAME { + r.Name().SetAssigned(true) + if r.Name().IsClosureVar() { + r.Name().Defn.Name().SetAssigned(true) } } } @@ -3133,27 +3133,27 @@ func checkassign(stmt *ir.Node, n *ir.Node) { if islvalue(n) { return } - if n.Op == ir.OINDEXMAP { + if n.Op() == ir.OINDEXMAP { n.SetIndexMapLValue(true) return } // have already complained about n being invalid - if n.Type == nil { + if n.Type() == nil { return } switch { - case n.Op == ir.ODOT && n.Left.Op == ir.OINDEXMAP: + case n.Op() == ir.ODOT && n.Left().Op() == ir.OINDEXMAP: base.Errorf("cannot assign to struct field %v in map", n) - case (n.Op == ir.OINDEX && n.Left.Type.IsString()) || n.Op == ir.OSLICESTR: + case (n.Op() == ir.OINDEX && n.Left().Type().IsString()) || n.Op() == ir.OSLICESTR: base.Errorf("cannot assign to %v (strings are immutable)", n) - case n.Op == ir.OLITERAL && n.Sym != nil && isGoConst(n): + case n.Op() == ir.OLITERAL && n.Sym() != nil && isGoConst(n): base.Errorf("cannot assign to %v (declared const)", n) default: base.Errorf("cannot assign to %v", n) } - n.Type = nil + n.SetType(nil) } func checkassignlist(stmt *ir.Node, l ir.Nodes) { @@ -3178,29 +3178,29 @@ func checkassignlist(stmt *ir.Node, l ir.Nodes) { // lvalue expression is for OSLICE and OAPPEND optimizations, and it // is correct in those settings. func samesafeexpr(l *ir.Node, r *ir.Node) bool { - if l.Op != r.Op || !types.Identical(l.Type, r.Type) { + if l.Op() != r.Op() || !types.Identical(l.Type(), r.Type()) { return false } - switch l.Op { + switch l.Op() { case ir.ONAME, ir.OCLOSUREVAR: return l == r case ir.ODOT, ir.ODOTPTR: - return l.Sym != nil && r.Sym != nil && l.Sym == r.Sym && samesafeexpr(l.Left, r.Left) + return l.Sym() != nil && r.Sym() != nil && l.Sym() == r.Sym() && samesafeexpr(l.Left(), r.Left()) case ir.ODEREF, ir.OCONVNOP, ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG: - return samesafeexpr(l.Left, r.Left) + return samesafeexpr(l.Left(), r.Left()) case ir.OCONV: // Some conversions can't be reused, such as []byte(str). // Allow only numeric-ish types. This is a bit conservative. - return issimple[l.Type.Etype] && samesafeexpr(l.Left, r.Left) + return issimple[l.Type().Etype] && samesafeexpr(l.Left(), r.Left()) case ir.OINDEX, ir.OINDEXMAP, ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD: - return samesafeexpr(l.Left, r.Left) && samesafeexpr(l.Right, r.Right) + return samesafeexpr(l.Left(), r.Left()) && samesafeexpr(l.Right(), r.Right()) case ir.OLITERAL: return constant.Compare(l.Val(), token.EQL, r.Val()) @@ -3227,30 +3227,30 @@ func typecheckas(n *ir.Node) { // if the variable has a type (ntype) then typechecking // will not look at defn, so it is okay (and desirable, // so that the conversion below happens). - n.Left = resolve(n.Left) + n.SetLeft(resolve(n.Left())) - if n.Left.Name == nil || n.Left.Name.Defn != n || n.Left.Name.Param.Ntype != nil { - n.Left = typecheck(n.Left, ctxExpr|ctxAssign) + if n.Left().Name() == nil || n.Left().Name().Defn != n || n.Left().Name().Param.Ntype != nil { + n.SetLeft(typecheck(n.Left(), ctxExpr|ctxAssign)) } // Use ctxMultiOK so we can emit an "N variables but M values" error // to be consistent with typecheckas2 (#26616). - n.Right = typecheck(n.Right, ctxExpr|ctxMultiOK) - checkassign(n, n.Left) - if n.Right != nil && n.Right.Type != nil { - if n.Right.Type.IsFuncArgStruct() { - base.Errorf("assignment mismatch: 1 variable but %v returns %d values", n.Right.Left, n.Right.Type.NumFields()) + n.SetRight(typecheck(n.Right(), ctxExpr|ctxMultiOK)) + checkassign(n, n.Left()) + if n.Right() != nil && n.Right().Type() != nil { + if n.Right().Type().IsFuncArgStruct() { + base.Errorf("assignment mismatch: 1 variable but %v returns %d values", n.Right().Left(), n.Right().Type().NumFields()) // Multi-value RHS isn't actually valid for OAS; nil out // to indicate failed typechecking. - n.Right.Type = nil - } else if n.Left.Type != nil { - n.Right = assignconv(n.Right, n.Left.Type, "assignment") + n.Right().SetType(nil) + } else if n.Left().Type() != nil { + n.SetRight(assignconv(n.Right(), n.Left().Type(), "assignment")) } } - if n.Left.Name != nil && n.Left.Name.Defn == n && n.Left.Name.Param.Ntype == nil { - n.Right = defaultlit(n.Right, nil) - n.Left.Type = n.Right.Type + if n.Left().Name() != nil && n.Left().Name().Defn == n && n.Left().Name().Param.Ntype == nil { + n.SetRight(defaultlit(n.Right(), nil)) + n.Left().SetType(n.Right().Type()) } // second half of dance. @@ -3258,16 +3258,16 @@ func typecheckas(n *ir.Node) { // just to get it over with. see dance above. n.SetTypecheck(1) - if n.Left.Typecheck() == 0 { - n.Left = typecheck(n.Left, ctxExpr|ctxAssign) + if n.Left().Typecheck() == 0 { + n.SetLeft(typecheck(n.Left(), ctxExpr|ctxAssign)) } - if !ir.IsBlank(n.Left) { - checkwidth(n.Left.Type) // ensure width is calculated for backend + if !ir.IsBlank(n.Left()) { + checkwidth(n.Left().Type()) // ensure width is calculated for backend } } func checkassignto(src *types.Type, dst *ir.Node) { - if op, why := assignop(src, dst.Type); op == ir.OXXX { + if op, why := assignop(src, dst.Type()); op == ir.OXXX { base.Errorf("cannot assign %v to %L in multiple assignment%s", src, dst, why) return } @@ -3278,73 +3278,73 @@ func typecheckas2(n *ir.Node) { defer tracePrint("typecheckas2", n)(nil) } - ls := n.List.Slice() + ls := n.List().Slice() for i1, n1 := range ls { // delicate little dance. n1 = resolve(n1) ls[i1] = n1 - if n1.Name == nil || n1.Name.Defn != n || n1.Name.Param.Ntype != nil { + if n1.Name() == nil || n1.Name().Defn != n || n1.Name().Param.Ntype != nil { ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign) } } - cl := n.List.Len() - cr := n.Rlist.Len() + cl := n.List().Len() + cr := n.Rlist().Len() if cl > 1 && cr == 1 { - n.Rlist.SetFirst(typecheck(n.Rlist.First(), ctxExpr|ctxMultiOK)) + n.Rlist().SetFirst(typecheck(n.Rlist().First(), ctxExpr|ctxMultiOK)) } else { - typecheckslice(n.Rlist.Slice(), ctxExpr) + typecheckslice(n.Rlist().Slice(), ctxExpr) } - checkassignlist(n, n.List) + checkassignlist(n, n.List()) var l *ir.Node var r *ir.Node if cl == cr { // easy - ls := n.List.Slice() - rs := n.Rlist.Slice() + ls := n.List().Slice() + rs := n.Rlist().Slice() for il, nl := range ls { nr := rs[il] - if nl.Type != nil && nr.Type != nil { - rs[il] = assignconv(nr, nl.Type, "assignment") + if nl.Type() != nil && nr.Type() != nil { + rs[il] = assignconv(nr, nl.Type(), "assignment") } - if nl.Name != nil && nl.Name.Defn == n && nl.Name.Param.Ntype == nil { + if nl.Name() != nil && nl.Name().Defn == n && nl.Name().Param.Ntype == nil { rs[il] = defaultlit(rs[il], nil) - nl.Type = rs[il].Type + nl.SetType(rs[il].Type()) } } goto out } - l = n.List.First() - r = n.Rlist.First() + l = n.List().First() + r = n.Rlist().First() // x,y,z = f() if cr == 1 { - if r.Type == nil { + if r.Type() == nil { goto out } - switch r.Op { + switch r.Op() { case ir.OCALLMETH, ir.OCALLINTER, ir.OCALLFUNC: - if !r.Type.IsFuncArgStruct() { + if !r.Type().IsFuncArgStruct() { break } - cr = r.Type.NumFields() + cr = r.Type().NumFields() if cr != cl { goto mismatch } - n.Op = ir.OAS2FUNC - n.Right = r - n.Rlist.Set(nil) - for i, l := range n.List.Slice() { - f := r.Type.Field(i) - if f.Type != nil && l.Type != nil { + n.SetOp(ir.OAS2FUNC) + n.SetRight(r) + n.PtrRlist().Set(nil) + for i, l := range n.List().Slice() { + f := r.Type().Field(i) + if f.Type != nil && l.Type() != nil { checkassignto(f.Type, l) } - if l.Name != nil && l.Name.Defn == n && l.Name.Param.Ntype == nil { - l.Type = f.Type + if l.Name() != nil && l.Name().Defn == n && l.Name().Param.Ntype == nil { + l.SetType(f.Type) } } goto out @@ -3353,51 +3353,51 @@ func typecheckas2(n *ir.Node) { // x, ok = y if cl == 2 && cr == 1 { - if r.Type == nil { + if r.Type() == nil { goto out } - switch r.Op { + switch r.Op() { case ir.OINDEXMAP, ir.ORECV, ir.ODOTTYPE: - switch r.Op { + switch r.Op() { case ir.OINDEXMAP: - n.Op = ir.OAS2MAPR + n.SetOp(ir.OAS2MAPR) case ir.ORECV: - n.Op = ir.OAS2RECV + n.SetOp(ir.OAS2RECV) case ir.ODOTTYPE: - n.Op = ir.OAS2DOTTYPE - r.Op = ir.ODOTTYPE2 + n.SetOp(ir.OAS2DOTTYPE) + r.SetOp(ir.ODOTTYPE2) } - n.Right = r - n.Rlist.Set(nil) - if l.Type != nil { - checkassignto(r.Type, l) + n.SetRight(r) + n.PtrRlist().Set(nil) + if l.Type() != nil { + checkassignto(r.Type(), l) } - if l.Name != nil && l.Name.Defn == n { - l.Type = r.Type + if l.Name() != nil && l.Name().Defn == n { + l.SetType(r.Type()) } - l := n.List.Second() - if l.Type != nil && !l.Type.IsBoolean() { + l := n.List().Second() + if l.Type() != nil && !l.Type().IsBoolean() { checkassignto(types.Types[types.TBOOL], l) } - if l.Name != nil && l.Name.Defn == n && l.Name.Param.Ntype == nil { - l.Type = types.Types[types.TBOOL] + if l.Name() != nil && l.Name().Defn == n && l.Name().Param.Ntype == nil { + l.SetType(types.Types[types.TBOOL]) } goto out } } mismatch: - switch r.Op { + switch r.Op() { default: base.Errorf("assignment mismatch: %d variables but %d values", cl, cr) case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER: - base.Errorf("assignment mismatch: %d variables but %v returns %d values", cl, r.Left, cr) + base.Errorf("assignment mismatch: %d variables but %v returns %d values", cl, r.Left(), cr) } // second half of dance out: n.SetTypecheck(1) - ls = n.List.Slice() + ls = n.List().Slice() for i1, n1 := range ls { if n1.Typecheck() == 0 { ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign) @@ -3411,50 +3411,50 @@ func typecheckfunc(n *ir.Node) { defer tracePrint("typecheckfunc", n)(nil) } - for _, ln := range n.Func.Dcl { - if ln.Op == ir.ONAME && (ln.Class() == ir.PPARAM || ln.Class() == ir.PPARAMOUT) { - ln.Name.Decldepth = 1 + for _, ln := range n.Func().Dcl { + if ln.Op() == ir.ONAME && (ln.Class() == ir.PPARAM || ln.Class() == ir.PPARAMOUT) { + ln.Name().Decldepth = 1 } } - n.Func.Nname = typecheck(n.Func.Nname, ctxExpr|ctxAssign) - t := n.Func.Nname.Type + n.Func().Nname = typecheck(n.Func().Nname, ctxExpr|ctxAssign) + t := n.Func().Nname.Type() if t == nil { return } - n.Type = t + n.SetType(t) rcvr := t.Recv() - if rcvr != nil && n.Func.Shortname != nil { - m := addmethod(n, n.Func.Shortname, t, true, n.Func.Pragma&ir.Nointerface != 0) + if rcvr != nil && n.Func().Shortname != nil { + m := addmethod(n, n.Func().Shortname, t, true, n.Func().Pragma&ir.Nointerface != 0) if m == nil { return } - n.Func.Nname.Sym = methodSym(rcvr.Type, n.Func.Shortname) - declare(n.Func.Nname, ir.PFUNC) + n.Func().Nname.SetSym(methodSym(rcvr.Type, n.Func().Shortname)) + declare(n.Func().Nname, ir.PFUNC) } - if base.Ctxt.Flag_dynlink && !inimport && n.Func.Nname != nil { - makefuncsym(n.Func.Nname.Sym) + if base.Ctxt.Flag_dynlink && !inimport && n.Func().Nname != nil { + makefuncsym(n.Func().Nname.Sym()) } } // The result of stringtoruneslit MUST be assigned back to n, e.g. // n.Left = stringtoruneslit(n.Left) func stringtoruneslit(n *ir.Node) *ir.Node { - if n.Left.Op != ir.OLITERAL || n.Left.Val().Kind() != constant.String { + if n.Left().Op() != ir.OLITERAL || n.Left().Val().Kind() != constant.String { base.Fatalf("stringtoarraylit %v", n) } var l []*ir.Node i := 0 - for _, r := range n.Left.StringVal() { + for _, r := range n.Left().StringVal() { l = append(l, ir.Nod(ir.OKEY, nodintconst(int64(i)), nodintconst(int64(r)))) i++ } - nn := ir.Nod(ir.OCOMPLIT, nil, typenod(n.Type)) - nn.List.Set(l) + nn := ir.Nod(ir.OCOMPLIT, nil, typenod(n.Type())) + nn.PtrList().Set(l) nn = typecheck(nn, ctxExpr) return nn } @@ -3463,9 +3463,9 @@ var mapqueue []*ir.Node func checkMapKeys() { for _, n := range mapqueue { - k := n.Type.MapType().Key + k := n.Type().MapType().Key if !k.Broke() && !IsComparable(k) { - base.ErrorfAt(n.Pos, "invalid map key type %v", k) + base.ErrorfAt(n.Pos(), "invalid map key type %v", k) } } mapqueue = nil @@ -3487,9 +3487,9 @@ func setUnderlying(t, underlying *types.Type) { // Restore unnecessarily clobbered attributes. t.Nod = ir.AsTypesNode(n) - t.Sym = n.Sym - if n.Name != nil { - t.Vargen = n.Name.Vargen + t.Sym = n.Sym() + if n.Name() != nil { + t.Vargen = n.Name().Vargen } t.Cache = cache t.SetDeferwidth(false) @@ -3503,7 +3503,7 @@ func setUnderlying(t, underlying *types.Type) { } // Propagate go:notinheap pragma from the Name to the Type. - if n.Name != nil && n.Name.Param != nil && n.Name.Param.Pragma()&ir.NotInHeap != 0 { + if n.Name() != nil && n.Name().Param != nil && n.Name().Param.Pragma()&ir.NotInHeap != 0 { t.SetNotInHeap(true) } @@ -3526,17 +3526,17 @@ func typecheckdeftype(n *ir.Node) { } n.SetTypecheck(1) - n.Name.Param.Ntype = typecheck(n.Name.Param.Ntype, ctxType) - t := n.Name.Param.Ntype.Type + n.Name().Param.Ntype = typecheck(n.Name().Param.Ntype, ctxType) + t := n.Name().Param.Ntype.Type() if t == nil { n.SetDiag(true) - n.Type = nil - } else if n.Type == nil { + n.SetType(nil) + } else if n.Type() == nil { n.SetDiag(true) } else { // copy new type and clear fields // that don't come along. - setUnderlying(n.Type, t) + setUnderlying(n.Type(), t) } } @@ -3547,13 +3547,13 @@ func typecheckdef(n *ir.Node) { lno := setlineno(n) - if n.Op == ir.ONONAME { + if n.Op() == ir.ONONAME { if !n.Diag() { n.SetDiag(true) // Note: adderrorname looks for this string and // adds context about the outer expression - base.ErrorfAt(base.Pos, "undefined: %v", n.Sym) + base.ErrorfAt(base.Pos, "undefined: %v", n.Sym()) } base.Pos = lno return @@ -3570,7 +3570,7 @@ func typecheckdef(n *ir.Node) { fmt.Printf("typecheckdef loop:") for i := len(typecheckdefstack) - 1; i >= 0; i-- { n := typecheckdefstack[i] - fmt.Printf(" %v", n.Sym) + fmt.Printf(" %v", n.Sym()) } fmt.Printf("\n") base.Fatalf("typecheckdef loop") @@ -3578,82 +3578,82 @@ func typecheckdef(n *ir.Node) { n.SetWalkdef(2) - if n.Type != nil || n.Sym == nil { // builtin or no name + if n.Type() != nil || n.Sym() == nil { // builtin or no name goto ret } - switch n.Op { + switch n.Op() { default: - base.Fatalf("typecheckdef %v", n.Op) + base.Fatalf("typecheckdef %v", n.Op()) case ir.OLITERAL: - if n.Name.Param.Ntype != nil { - n.Name.Param.Ntype = typecheck(n.Name.Param.Ntype, ctxType) - n.Type = n.Name.Param.Ntype.Type - n.Name.Param.Ntype = nil - if n.Type == nil { + if n.Name().Param.Ntype != nil { + n.Name().Param.Ntype = typecheck(n.Name().Param.Ntype, ctxType) + n.SetType(n.Name().Param.Ntype.Type()) + n.Name().Param.Ntype = nil + if n.Type() == nil { n.SetDiag(true) goto ret } } - e := n.Name.Defn - n.Name.Defn = nil + e := n.Name().Defn + n.Name().Defn = nil if e == nil { ir.Dump("typecheckdef nil defn", n) - base.ErrorfAt(n.Pos, "xxx") + base.ErrorfAt(n.Pos(), "xxx") } e = typecheck(e, ctxExpr) - if e.Type == nil { + if e.Type() == nil { goto ret } if !isGoConst(e) { if !e.Diag() { - if e.Op == ir.ONIL { - base.ErrorfAt(n.Pos, "const initializer cannot be nil") + if e.Op() == ir.ONIL { + base.ErrorfAt(n.Pos(), "const initializer cannot be nil") } else { - base.ErrorfAt(n.Pos, "const initializer %v is not a constant", e) + base.ErrorfAt(n.Pos(), "const initializer %v is not a constant", e) } e.SetDiag(true) } goto ret } - t := n.Type + t := n.Type() if t != nil { if !ir.OKForConst[t.Etype] { - base.ErrorfAt(n.Pos, "invalid constant type %v", t) + base.ErrorfAt(n.Pos(), "invalid constant type %v", t) goto ret } - if !e.Type.IsUntyped() && !types.Identical(t, e.Type) { - base.ErrorfAt(n.Pos, "cannot use %L as type %v in const initializer", e, t) + if !e.Type().IsUntyped() && !types.Identical(t, e.Type()) { + base.ErrorfAt(n.Pos(), "cannot use %L as type %v in const initializer", e, t) goto ret } e = convlit(e, t) } - n.Type = e.Type - if n.Type != nil { + n.SetType(e.Type()) + if n.Type() != nil { n.SetVal(e.Val()) } case ir.ONAME: - if n.Name.Param.Ntype != nil { - n.Name.Param.Ntype = typecheck(n.Name.Param.Ntype, ctxType) - n.Type = n.Name.Param.Ntype.Type - if n.Type == nil { + if n.Name().Param.Ntype != nil { + n.Name().Param.Ntype = typecheck(n.Name().Param.Ntype, ctxType) + n.SetType(n.Name().Param.Ntype.Type()) + if n.Type() == nil { n.SetDiag(true) goto ret } } - if n.Type != nil { + if n.Type() != nil { break } - if n.Name.Defn == nil { + if n.Name().Defn == nil { if n.SubOp() != 0 { // like OPRINTN break } @@ -3665,33 +3665,33 @@ func typecheckdef(n *ir.Node) { break } - base.Fatalf("var without type, init: %v", n.Sym) + base.Fatalf("var without type, init: %v", n.Sym()) } - if n.Name.Defn.Op == ir.ONAME { - n.Name.Defn = typecheck(n.Name.Defn, ctxExpr) - n.Type = n.Name.Defn.Type + if n.Name().Defn.Op() == ir.ONAME { + n.Name().Defn = typecheck(n.Name().Defn, ctxExpr) + n.SetType(n.Name().Defn.Type()) break } - n.Name.Defn = typecheck(n.Name.Defn, ctxStmt) // fills in n.Type + n.Name().Defn = typecheck(n.Name().Defn, ctxStmt) // fills in n.Type case ir.OTYPE: - if p := n.Name.Param; p.Alias() { + if p := n.Name().Param; p.Alias() { // Type alias declaration: Simply use the rhs type - no need // to create a new type. // If we have a syntax error, p.Ntype may be nil. if p.Ntype != nil { p.Ntype = typecheck(p.Ntype, ctxType) - n.Type = p.Ntype.Type - if n.Type == nil { + n.SetType(p.Ntype.Type()) + if n.Type() == nil { n.SetDiag(true) goto ret } // For package-level type aliases, set n.Sym.Def so we can identify // it as a type alias during export. See also #31959. - if n.Name.Curfn == nil { - n.Sym.Def = ir.AsTypesNode(p.Ntype) + if n.Name().Curfn == nil { + n.Sym().Def = ir.AsTypesNode(p.Ntype) } } break @@ -3701,20 +3701,20 @@ func typecheckdef(n *ir.Node) { defercheckwidth() n.SetWalkdef(1) setTypeNode(n, types.New(types.TFORW)) - n.Type.Sym = n.Sym + n.Type().Sym = n.Sym() errorsBefore := base.Errors() typecheckdeftype(n) - if n.Type.Etype == types.TFORW && base.Errors() > errorsBefore { + if n.Type().Etype == types.TFORW && base.Errors() > errorsBefore { // Something went wrong during type-checking, // but it was reported. Silence future errors. - n.Type.SetBroke(true) + n.Type().SetBroke(true) } resumecheckwidth() } ret: - if n.Op != ir.OLITERAL && n.Type != nil && n.Type.IsUntyped() { - base.Fatalf("got %v for %v", n.Type, n) + if n.Op() != ir.OLITERAL && n.Type() != nil && n.Type().IsUntyped() { + base.Fatalf("got %v for %v", n.Type(), n) } last := len(typecheckdefstack) - 1 if typecheckdefstack[last] != n { @@ -3729,14 +3729,14 @@ ret: func checkmake(t *types.Type, arg string, np **ir.Node) bool { n := *np - if !n.Type.IsInteger() && n.Type.Etype != types.TIDEAL { - base.Errorf("non-integer %s argument in make(%v) - %v", arg, t, n.Type) + if !n.Type().IsInteger() && n.Type().Etype != types.TIDEAL { + base.Errorf("non-integer %s argument in make(%v) - %v", arg, t, n.Type()) return false } // Do range checks for constants before defaultlit // to avoid redundant "constant NNN overflows int" errors. - if n.Op == ir.OLITERAL { + if n.Op() == ir.OLITERAL { v := toint(n.Val()) if constant.Sign(v) < 0 { base.Errorf("negative %s argument in make(%v)", arg, t) @@ -3764,14 +3764,14 @@ func markbreak(n *ir.Node, implicit *ir.Node) { return } - switch n.Op { + switch n.Op() { case ir.OBREAK: - if n.Sym == nil { + if n.Sym() == nil { if implicit != nil { implicit.SetHasBreak(true) } } else { - lab := ir.AsNode(n.Sym.Label) + lab := ir.AsNode(n.Sym().Label) if lab != nil { lab.SetHasBreak(true) } @@ -3780,12 +3780,12 @@ func markbreak(n *ir.Node, implicit *ir.Node) { implicit = n fallthrough default: - markbreak(n.Left, implicit) - markbreak(n.Right, implicit) - markbreaklist(n.Ninit, implicit) - markbreaklist(n.Nbody, implicit) - markbreaklist(n.List, implicit) - markbreaklist(n.Rlist, implicit) + markbreak(n.Left(), implicit) + markbreak(n.Right(), implicit) + markbreaklist(n.Init(), implicit) + markbreaklist(n.Body(), implicit) + markbreaklist(n.List(), implicit) + markbreaklist(n.Rlist(), implicit) } } @@ -3796,12 +3796,12 @@ func markbreaklist(l ir.Nodes, implicit *ir.Node) { if n == nil { continue } - if n.Op == ir.OLABEL && i+1 < len(s) && n.Name.Defn == s[i+1] { - switch n.Name.Defn.Op { + if n.Op() == ir.OLABEL && i+1 < len(s) && n.Name().Defn == s[i+1] { + switch n.Name().Defn.Op() { case ir.OFOR, ir.OFORUNTIL, ir.OSWITCH, ir.OTYPESW, ir.OSELECT, ir.ORANGE: - n.Sym.Label = ir.AsTypesNode(n.Name.Defn) - markbreak(n.Name.Defn, n.Name.Defn) - n.Sym.Label = nil + n.Sym().Label = ir.AsTypesNode(n.Name().Defn) + markbreak(n.Name().Defn, n.Name().Defn) + n.Sym().Label = nil i++ continue } @@ -3824,20 +3824,20 @@ func isTermNodes(l ir.Nodes) bool { // Isterminating reports whether the node n, the last one in a // statement list, is a terminating statement. func isTermNode(n *ir.Node) bool { - switch n.Op { + switch n.Op() { // NOTE: OLABEL is treated as a separate statement, // not a separate prefix, so skipping to the last statement // in the block handles the labeled statement case by // skipping over the label. No case OLABEL here. case ir.OBLOCK: - return isTermNodes(n.List) + return isTermNodes(n.List()) case ir.OGOTO, ir.ORETURN, ir.ORETJMP, ir.OPANIC, ir.OFALL: return true case ir.OFOR, ir.OFORUNTIL: - if n.Left != nil { + if n.Left() != nil { return false } if n.HasBreak() { @@ -3846,23 +3846,23 @@ func isTermNode(n *ir.Node) bool { return true case ir.OIF: - return isTermNodes(n.Nbody) && isTermNodes(n.Rlist) + return isTermNodes(n.Body()) && isTermNodes(n.Rlist()) case ir.OSWITCH, ir.OTYPESW, ir.OSELECT: if n.HasBreak() { return false } def := false - for _, n1 := range n.List.Slice() { - if !isTermNodes(n1.Nbody) { + for _, n1 := range n.List().Slice() { + if !isTermNodes(n1.Body()) { return false } - if n1.List.Len() == 0 { // default + if n1.List().Len() == 0 { // default def = true } } - if n.Op != ir.OSELECT && !def { + if n.Op() != ir.OSELECT && !def { return false } return true @@ -3873,35 +3873,35 @@ func isTermNode(n *ir.Node) bool { // checkreturn makes sure that fn terminates appropriately. func checkreturn(fn *ir.Node) { - if fn.Type.NumResults() != 0 && fn.Nbody.Len() != 0 { - markbreaklist(fn.Nbody, nil) - if !isTermNodes(fn.Nbody) { - base.ErrorfAt(fn.Func.Endlineno, "missing return at end of function") + if fn.Type().NumResults() != 0 && fn.Body().Len() != 0 { + markbreaklist(fn.Body(), nil) + if !isTermNodes(fn.Body()) { + base.ErrorfAt(fn.Func().Endlineno, "missing return at end of function") } } } func deadcode(fn *ir.Node) { - deadcodeslice(&fn.Nbody) + deadcodeslice(fn.PtrBody()) deadcodefn(fn) } func deadcodefn(fn *ir.Node) { - if fn.Nbody.Len() == 0 { + if fn.Body().Len() == 0 { return } - for _, n := range fn.Nbody.Slice() { - if n.Ninit.Len() > 0 { + for _, n := range fn.Body().Slice() { + if n.Init().Len() > 0 { return } - switch n.Op { + switch n.Op() { case ir.OIF: - if !ir.IsConst(n.Left, constant.Bool) || n.Nbody.Len() > 0 || n.Rlist.Len() > 0 { + if !ir.IsConst(n.Left(), constant.Bool) || n.Body().Len() > 0 || n.Rlist().Len() > 0 { return } case ir.OFOR: - if !ir.IsConst(n.Left, constant.Bool) || n.Left.BoolVal() { + if !ir.IsConst(n.Left(), constant.Bool) || n.Left().BoolVal() { return } default: @@ -3909,13 +3909,13 @@ func deadcodefn(fn *ir.Node) { } } - fn.Nbody.Set([]*ir.Node{ir.Nod(ir.OEMPTY, nil, nil)}) + fn.PtrBody().Set([]*ir.Node{ir.Nod(ir.OEMPTY, nil, nil)}) } func deadcodeslice(nn *ir.Nodes) { var lastLabel = -1 for i, n := range nn.Slice() { - if n != nil && n.Op == ir.OLABEL { + if n != nil && n.Op() == ir.OLABEL { lastLabel = i } } @@ -3927,16 +3927,16 @@ func deadcodeslice(nn *ir.Nodes) { if n == nil { continue } - if n.Op == ir.OIF { - n.Left = deadcodeexpr(n.Left) - if ir.IsConst(n.Left, constant.Bool) { + if n.Op() == ir.OIF { + n.SetLeft(deadcodeexpr(n.Left())) + if ir.IsConst(n.Left(), constant.Bool) { var body ir.Nodes - if n.Left.BoolVal() { - n.Rlist = ir.Nodes{} - body = n.Nbody + if n.Left().BoolVal() { + n.SetRlist(ir.Nodes{}) + body = n.Body() } else { - n.Nbody = ir.Nodes{} - body = n.Rlist + n.SetBody(ir.Nodes{}) + body = n.Rlist() } // If "then" or "else" branch ends with panic or return statement, // it is safe to remove all statements after this node. @@ -3944,7 +3944,7 @@ func deadcodeslice(nn *ir.Nodes) { // We must be careful not to deadcode-remove labels, as they // might be the target of a goto. See issue 28616. if body := body.Slice(); len(body) != 0 { - switch body[(len(body) - 1)].Op { + switch body[(len(body) - 1)].Op() { case ir.ORETURN, ir.ORETJMP, ir.OPANIC: if i > lastLabel { cut = true @@ -3954,10 +3954,10 @@ func deadcodeslice(nn *ir.Nodes) { } } - deadcodeslice(&n.Ninit) - deadcodeslice(&n.Nbody) - deadcodeslice(&n.List) - deadcodeslice(&n.Rlist) + deadcodeslice(n.PtrInit()) + deadcodeslice(n.PtrBody()) + deadcodeslice(n.PtrList()) + deadcodeslice(n.PtrRlist()) if cut { nn.Set(nn.Slice()[:i+1]) break @@ -3969,25 +3969,25 @@ func deadcodeexpr(n *ir.Node) *ir.Node { // Perform dead-code elimination on short-circuited boolean // expressions involving constants with the intent of // producing a constant 'if' condition. - switch n.Op { + switch n.Op() { case ir.OANDAND: - n.Left = deadcodeexpr(n.Left) - n.Right = deadcodeexpr(n.Right) - if ir.IsConst(n.Left, constant.Bool) { - if n.Left.BoolVal() { - return n.Right // true && x => x + n.SetLeft(deadcodeexpr(n.Left())) + n.SetRight(deadcodeexpr(n.Right())) + if ir.IsConst(n.Left(), constant.Bool) { + if n.Left().BoolVal() { + return n.Right() // true && x => x } else { - return n.Left // false && x => false + return n.Left() // false && x => false } } case ir.OOROR: - n.Left = deadcodeexpr(n.Left) - n.Right = deadcodeexpr(n.Right) - if ir.IsConst(n.Left, constant.Bool) { - if n.Left.BoolVal() { - return n.Left // true || x => true + n.SetLeft(deadcodeexpr(n.Left())) + n.SetRight(deadcodeexpr(n.Right())) + if ir.IsConst(n.Left(), constant.Bool) { + if n.Left().BoolVal() { + return n.Left() // true || x => true } else { - return n.Right // false || x => x + return n.Right() // false || x => x } } } @@ -3996,16 +3996,16 @@ func deadcodeexpr(n *ir.Node) *ir.Node { // setTypeNode sets n to an OTYPE node representing t. func setTypeNode(n *ir.Node, t *types.Type) { - n.Op = ir.OTYPE - n.Type = t - n.Type.Nod = ir.AsTypesNode(n) + n.SetOp(ir.OTYPE) + n.SetType(t) + n.Type().Nod = ir.AsTypesNode(n) } // getIotaValue returns the current value for "iota", // or -1 if not within a ConstSpec. func getIotaValue() int64 { if i := len(typecheckdefstack); i > 0 { - if x := typecheckdefstack[i-1]; x.Op == ir.OLITERAL { + if x := typecheckdefstack[i-1]; x.Op() == ir.OLITERAL { return x.Iota() } } @@ -4027,8 +4027,8 @@ func curpkg() *types.Pkg { // TODO(mdempsky): Standardize on either ODCLFUNC or ONAME for // Curfn, rather than mixing them. - if fn.Op == ir.ODCLFUNC { - fn = fn.Func.Nname + if fn.Op() == ir.ODCLFUNC { + fn = fn.Func().Nname } return fnpkg(fn) @@ -4043,12 +4043,12 @@ func methodExprName(n *ir.Node) *ir.Node { // MethodFunc is like MethodName, but returns the types.Field instead. func methodExprFunc(n *ir.Node) *types.Field { - switch n.Op { + switch n.Op() { case ir.ODOTMETH, ir.OMETHEXPR: return n.Opt().(*types.Field) case ir.OCALLPART: return callpartMethod(n) } - base.Fatalf("unexpected node: %v (%v)", n, n.Op) + base.Fatalf("unexpected node: %v (%v)", n, n.Op()) panic("unreachable") } diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index bf31055dcc1be..be22b7e9dbee9 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -110,7 +110,7 @@ func lexinit() { types.Types[etype] = t } s2.Def = ir.AsTypesNode(typenod(t)) - ir.AsNode(s2.Def).Name = new(ir.Name) + ir.AsNode(s2.Def).SetName(new(ir.Name)) } for _, s := range &builtinFuncs { @@ -131,39 +131,39 @@ func lexinit() { s := ir.BuiltinPkg.Lookup("true") s.Def = ir.AsTypesNode(nodbool(true)) - ir.AsNode(s.Def).Sym = lookup("true") - ir.AsNode(s.Def).Name = new(ir.Name) - ir.AsNode(s.Def).Type = types.UntypedBool + ir.AsNode(s.Def).SetSym(lookup("true")) + ir.AsNode(s.Def).SetName(new(ir.Name)) + ir.AsNode(s.Def).SetType(types.UntypedBool) s = ir.BuiltinPkg.Lookup("false") s.Def = ir.AsTypesNode(nodbool(false)) - ir.AsNode(s.Def).Sym = lookup("false") - ir.AsNode(s.Def).Name = new(ir.Name) - ir.AsNode(s.Def).Type = types.UntypedBool + ir.AsNode(s.Def).SetSym(lookup("false")) + ir.AsNode(s.Def).SetName(new(ir.Name)) + ir.AsNode(s.Def).SetType(types.UntypedBool) s = lookup("_") s.Block = -100 s.Def = ir.AsTypesNode(NewName(s)) types.Types[types.TBLANK] = types.New(types.TBLANK) - ir.AsNode(s.Def).Type = types.Types[types.TBLANK] + ir.AsNode(s.Def).SetType(types.Types[types.TBLANK]) ir.BlankNode = ir.AsNode(s.Def) s = ir.BuiltinPkg.Lookup("_") s.Block = -100 s.Def = ir.AsTypesNode(NewName(s)) types.Types[types.TBLANK] = types.New(types.TBLANK) - ir.AsNode(s.Def).Type = types.Types[types.TBLANK] + ir.AsNode(s.Def).SetType(types.Types[types.TBLANK]) types.Types[types.TNIL] = types.New(types.TNIL) s = ir.BuiltinPkg.Lookup("nil") s.Def = ir.AsTypesNode(nodnil()) - ir.AsNode(s.Def).Sym = s - ir.AsNode(s.Def).Name = new(ir.Name) + ir.AsNode(s.Def).SetSym(s) + ir.AsNode(s.Def).SetName(new(ir.Name)) s = ir.BuiltinPkg.Lookup("iota") s.Def = ir.AsTypesNode(ir.Nod(ir.OIOTA, nil, nil)) - ir.AsNode(s.Def).Sym = s - ir.AsNode(s.Def).Name = new(ir.Name) + ir.AsNode(s.Def).SetSym(s) + ir.AsNode(s.Def).SetName(new(ir.Name)) } func typeinit() { @@ -182,7 +182,7 @@ func typeinit() { types.Types[types.TUNSAFEPTR] = t t.Sym = unsafepkg.Lookup("Pointer") t.Sym.Def = ir.AsTypesNode(typenod(t)) - ir.AsNode(t.Sym.Def).Name = new(ir.Name) + ir.AsNode(t.Sym.Def).SetName(new(ir.Name)) dowidth(types.Types[types.TUNSAFEPTR]) for et := types.TINT8; et <= types.TUINT64; et++ { @@ -359,7 +359,7 @@ func lexinit1() { types.Bytetype = types.New(types.TUINT8) types.Bytetype.Sym = s s.Def = ir.AsTypesNode(typenod(types.Bytetype)) - ir.AsNode(s.Def).Name = new(ir.Name) + ir.AsNode(s.Def).SetName(new(ir.Name)) dowidth(types.Bytetype) // rune alias @@ -367,7 +367,7 @@ func lexinit1() { types.Runetype = types.New(types.TINT32) types.Runetype.Sym = s s.Def = ir.AsTypesNode(typenod(types.Runetype)) - ir.AsNode(s.Def).Name = new(ir.Name) + ir.AsNode(s.Def).SetName(new(ir.Name)) dowidth(types.Runetype) // backend-dependent builtin types (e.g. int). @@ -385,7 +385,7 @@ func lexinit1() { t.Sym = s1 types.Types[s.etype] = t s1.Def = ir.AsTypesNode(typenod(t)) - ir.AsNode(s1.Def).Name = new(ir.Name) + ir.AsNode(s1.Def).SetName(new(ir.Name)) s1.Origpkg = ir.BuiltinPkg dowidth(t) @@ -412,7 +412,7 @@ func finishUniverse() { } nodfp = NewName(lookup(".fp")) - nodfp.Type = types.Types[types.TINT32] + nodfp.SetType(types.Types[types.TINT32]) nodfp.SetClass(ir.PPARAM) - nodfp.Name.SetUsed(true) + nodfp.Name().SetUsed(true) } diff --git a/src/cmd/compile/internal/gc/unsafe.go b/src/cmd/compile/internal/gc/unsafe.go index fce79a631964c..c9b0dbcf2fcbf 100644 --- a/src/cmd/compile/internal/gc/unsafe.go +++ b/src/cmd/compile/internal/gc/unsafe.go @@ -11,23 +11,23 @@ import ( // evalunsafe evaluates a package unsafe operation and returns the result. func evalunsafe(n *ir.Node) int64 { - switch n.Op { + switch n.Op() { case ir.OALIGNOF, ir.OSIZEOF: - n.Left = typecheck(n.Left, ctxExpr) - n.Left = defaultlit(n.Left, nil) - tr := n.Left.Type + n.SetLeft(typecheck(n.Left(), ctxExpr)) + n.SetLeft(defaultlit(n.Left(), nil)) + tr := n.Left().Type() if tr == nil { return 0 } dowidth(tr) - if n.Op == ir.OALIGNOF { + if n.Op() == ir.OALIGNOF { return int64(tr.Align) } return tr.Width case ir.OOFFSETOF: // must be a selector. - if n.Left.Op != ir.OXDOT { + if n.Left().Op() != ir.OXDOT { base.Errorf("invalid expression %v", n) return 0 } @@ -35,14 +35,14 @@ func evalunsafe(n *ir.Node) int64 { // Remember base of selector to find it back after dot insertion. // Since r->left may be mutated by typechecking, check it explicitly // first to track it correctly. - n.Left.Left = typecheck(n.Left.Left, ctxExpr) - sbase := n.Left.Left + n.Left().SetLeft(typecheck(n.Left().Left(), ctxExpr)) + sbase := n.Left().Left() - n.Left = typecheck(n.Left, ctxExpr) - if n.Left.Type == nil { + n.SetLeft(typecheck(n.Left(), ctxExpr)) + if n.Left().Type() == nil { return 0 } - switch n.Left.Op { + switch n.Left().Op() { case ir.ODOT, ir.ODOTPTR: break case ir.OCALLPART: @@ -55,27 +55,27 @@ func evalunsafe(n *ir.Node) int64 { // Sum offsets for dots until we reach sbase. var v int64 - for r := n.Left; r != sbase; r = r.Left { - switch r.Op { + for r := n.Left(); r != sbase; r = r.Left() { + switch r.Op() { case ir.ODOTPTR: // For Offsetof(s.f), s may itself be a pointer, // but accessing f must not otherwise involve // indirection via embedded pointer types. - if r.Left != sbase { - base.Errorf("invalid expression %v: selector implies indirection of embedded %v", n, r.Left) + if r.Left() != sbase { + base.Errorf("invalid expression %v: selector implies indirection of embedded %v", n, r.Left()) return 0 } fallthrough case ir.ODOT: - v += r.Xoffset + v += r.Offset() default: - ir.Dump("unsafenmagic", n.Left) - base.Fatalf("impossible %#v node after dot insertion", r.Op) + ir.Dump("unsafenmagic", n.Left()) + base.Fatalf("impossible %#v node after dot insertion", r.Op()) } } return v } - base.Fatalf("unexpected op %v", n.Op) + base.Fatalf("unexpected op %v", n.Op()) return 0 } diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 619a413b9e49c..77cf59bde8496 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -27,39 +27,39 @@ func walk(fn *ir.Node) { errorsBefore := base.Errors() if base.Flag.W != 0 { - s := fmt.Sprintf("\nbefore walk %v", Curfn.Func.Nname.Sym) - ir.DumpList(s, Curfn.Nbody) + s := fmt.Sprintf("\nbefore walk %v", Curfn.Func().Nname.Sym()) + ir.DumpList(s, Curfn.Body()) } lno := base.Pos // Final typecheck for any unused variables. - for i, ln := range fn.Func.Dcl { - if ln.Op == ir.ONAME && (ln.Class() == ir.PAUTO || ln.Class() == ir.PAUTOHEAP) { + for i, ln := range fn.Func().Dcl { + if ln.Op() == ir.ONAME && (ln.Class() == ir.PAUTO || ln.Class() == ir.PAUTOHEAP) { ln = typecheck(ln, ctxExpr|ctxAssign) - fn.Func.Dcl[i] = ln + fn.Func().Dcl[i] = ln } } // Propagate the used flag for typeswitch variables up to the NONAME in its definition. - for _, ln := range fn.Func.Dcl { - if ln.Op == ir.ONAME && (ln.Class() == ir.PAUTO || ln.Class() == ir.PAUTOHEAP) && ln.Name.Defn != nil && ln.Name.Defn.Op == ir.OTYPESW && ln.Name.Used() { - ln.Name.Defn.Left.Name.SetUsed(true) + for _, ln := range fn.Func().Dcl { + if ln.Op() == ir.ONAME && (ln.Class() == ir.PAUTO || ln.Class() == ir.PAUTOHEAP) && ln.Name().Defn != nil && ln.Name().Defn.Op() == ir.OTYPESW && ln.Name().Used() { + ln.Name().Defn.Left().Name().SetUsed(true) } } - for _, ln := range fn.Func.Dcl { - if ln.Op != ir.ONAME || (ln.Class() != ir.PAUTO && ln.Class() != ir.PAUTOHEAP) || ln.Sym.Name[0] == '&' || ln.Name.Used() { + for _, ln := range fn.Func().Dcl { + if ln.Op() != ir.ONAME || (ln.Class() != ir.PAUTO && ln.Class() != ir.PAUTOHEAP) || ln.Sym().Name[0] == '&' || ln.Name().Used() { continue } - if defn := ln.Name.Defn; defn != nil && defn.Op == ir.OTYPESW { - if defn.Left.Name.Used() { + if defn := ln.Name().Defn; defn != nil && defn.Op() == ir.OTYPESW { + if defn.Left().Name().Used() { continue } - base.ErrorfAt(defn.Left.Pos, "%v declared but not used", ln.Sym) - defn.Left.Name.SetUsed(true) // suppress repeats + base.ErrorfAt(defn.Left().Pos(), "%v declared but not used", ln.Sym()) + defn.Left().Name().SetUsed(true) // suppress repeats } else { - base.ErrorfAt(ln.Pos, "%v declared but not used", ln.Sym) + base.ErrorfAt(ln.Pos(), "%v declared but not used", ln.Sym()) } } @@ -67,17 +67,17 @@ func walk(fn *ir.Node) { if base.Errors() > errorsBefore { return } - walkstmtlist(Curfn.Nbody.Slice()) + walkstmtlist(Curfn.Body().Slice()) if base.Flag.W != 0 { - s := fmt.Sprintf("after walk %v", Curfn.Func.Nname.Sym) - ir.DumpList(s, Curfn.Nbody) + s := fmt.Sprintf("after walk %v", Curfn.Func().Nname.Sym()) + ir.DumpList(s, Curfn.Body()) } zeroResults() heapmoves() - if base.Flag.W != 0 && Curfn.Func.Enter.Len() > 0 { - s := fmt.Sprintf("enter %v", Curfn.Func.Nname.Sym) - ir.DumpList(s, Curfn.Func.Enter) + if base.Flag.W != 0 && Curfn.Func().Enter.Len() > 0 { + s := fmt.Sprintf("enter %v", Curfn.Func().Nname.Sym()) + ir.DumpList(s, Curfn.Func().Enter) } } @@ -88,10 +88,10 @@ func walkstmtlist(s []*ir.Node) { } func paramoutheap(fn *ir.Node) bool { - for _, ln := range fn.Func.Dcl { + for _, ln := range fn.Func().Dcl { switch ln.Class() { case ir.PPARAMOUT: - if isParamStackCopy(ln) || ln.Name.Addrtaken() { + if isParamStackCopy(ln) || ln.Name().Addrtaken() { return true } @@ -113,14 +113,14 @@ func walkstmt(n *ir.Node) *ir.Node { setlineno(n) - walkstmtlist(n.Ninit.Slice()) + walkstmtlist(n.Init().Slice()) - switch n.Op { + switch n.Op() { default: - if n.Op == ir.ONAME { - base.Errorf("%v is not a top level statement", n.Sym) + if n.Op() == ir.ONAME { + base.Errorf("%v is not a top level statement", n.Sym()) } else { - base.Errorf("%v is not a top level statement", n.Op) + base.Errorf("%v is not a top level statement", n.Op()) } ir.Dump("nottop", n) @@ -148,13 +148,13 @@ func walkstmt(n *ir.Node) *ir.Node { if n.Typecheck() == 0 { base.Fatalf("missing typecheck: %+v", n) } - wascopy := n.Op == ir.OCOPY - init := n.Ninit - n.Ninit.Set(nil) + wascopy := n.Op() == ir.OCOPY + init := n.Init() + n.PtrInit().Set(nil) n = walkexpr(n, &init) n = addinit(n, init.Slice()) - if wascopy && n.Op == ir.OCONVNOP { - n.Op = ir.OEMPTY // don't leave plain values as statements. + if wascopy && n.Op() == ir.OCONVNOP { + n.SetOp(ir.OEMPTY) // don't leave plain values as statements. } // special case for a receive where we throw away @@ -163,11 +163,11 @@ func walkstmt(n *ir.Node) *ir.Node { if n.Typecheck() == 0 { base.Fatalf("missing typecheck: %+v", n) } - init := n.Ninit - n.Ninit.Set(nil) + init := n.Init() + n.PtrInit().Set(nil) - n.Left = walkexpr(n.Left, &init) - n = mkcall1(chanfn("chanrecv1", 2, n.Left.Type), nil, &init, n.Left, nodnil()) + n.SetLeft(walkexpr(n.Left(), &init)) + n = mkcall1(chanfn("chanrecv1", 2, n.Left().Type()), nil, &init, n.Left(), nodnil()) n = walkexpr(n, &init) n = addinit(n, init.Slice()) @@ -186,138 +186,138 @@ func walkstmt(n *ir.Node) *ir.Node { break case ir.ODCL: - v := n.Left + v := n.Left() if v.Class() == ir.PAUTOHEAP { if base.Flag.CompilingRuntime { base.Errorf("%v escapes to heap, not allowed in runtime", v) } if prealloc[v] == nil { - prealloc[v] = callnew(v.Type) + prealloc[v] = callnew(v.Type()) } - nn := ir.Nod(ir.OAS, v.Name.Param.Heapaddr, prealloc[v]) + nn := ir.Nod(ir.OAS, v.Name().Param.Heapaddr, prealloc[v]) nn.SetColas(true) nn = typecheck(nn, ctxStmt) return walkstmt(nn) } case ir.OBLOCK: - walkstmtlist(n.List.Slice()) + walkstmtlist(n.List().Slice()) case ir.OCASE: base.Errorf("case statement out of place") case ir.ODEFER: - Curfn.Func.SetHasDefer(true) - Curfn.Func.NumDefers++ - if Curfn.Func.NumDefers > maxOpenDefers { + Curfn.Func().SetHasDefer(true) + Curfn.Func().NumDefers++ + if Curfn.Func().NumDefers > maxOpenDefers { // Don't allow open-coded defers if there are more than // 8 defers in the function, since we use a single // byte to record active defers. - Curfn.Func.SetOpenCodedDeferDisallowed(true) + Curfn.Func().SetOpenCodedDeferDisallowed(true) } - if n.Esc != EscNever { + if n.Esc() != EscNever { // If n.Esc is not EscNever, then this defer occurs in a loop, // so open-coded defers cannot be used in this function. - Curfn.Func.SetOpenCodedDeferDisallowed(true) + Curfn.Func().SetOpenCodedDeferDisallowed(true) } fallthrough case ir.OGO: - switch n.Left.Op { + switch n.Left().Op() { case ir.OPRINT, ir.OPRINTN: - n.Left = wrapCall(n.Left, &n.Ninit) + n.SetLeft(wrapCall(n.Left(), n.PtrInit())) case ir.ODELETE: - if mapfast(n.Left.List.First().Type) == mapslow { - n.Left = wrapCall(n.Left, &n.Ninit) + if mapfast(n.Left().List().First().Type()) == mapslow { + n.SetLeft(wrapCall(n.Left(), n.PtrInit())) } else { - n.Left = walkexpr(n.Left, &n.Ninit) + n.SetLeft(walkexpr(n.Left(), n.PtrInit())) } case ir.OCOPY: - n.Left = copyany(n.Left, &n.Ninit, true) + n.SetLeft(copyany(n.Left(), n.PtrInit(), true)) case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER: - if n.Left.Nbody.Len() > 0 { - n.Left = wrapCall(n.Left, &n.Ninit) + if n.Left().Body().Len() > 0 { + n.SetLeft(wrapCall(n.Left(), n.PtrInit())) } else { - n.Left = walkexpr(n.Left, &n.Ninit) + n.SetLeft(walkexpr(n.Left(), n.PtrInit())) } default: - n.Left = walkexpr(n.Left, &n.Ninit) + n.SetLeft(walkexpr(n.Left(), n.PtrInit())) } case ir.OFOR, ir.OFORUNTIL: - if n.Left != nil { - walkstmtlist(n.Left.Ninit.Slice()) - init := n.Left.Ninit - n.Left.Ninit.Set(nil) - n.Left = walkexpr(n.Left, &init) - n.Left = addinit(n.Left, init.Slice()) + if n.Left() != nil { + walkstmtlist(n.Left().Init().Slice()) + init := n.Left().Init() + n.Left().PtrInit().Set(nil) + n.SetLeft(walkexpr(n.Left(), &init)) + n.SetLeft(addinit(n.Left(), init.Slice())) } - n.Right = walkstmt(n.Right) - if n.Op == ir.OFORUNTIL { - walkstmtlist(n.List.Slice()) + n.SetRight(walkstmt(n.Right())) + if n.Op() == ir.OFORUNTIL { + walkstmtlist(n.List().Slice()) } - walkstmtlist(n.Nbody.Slice()) + walkstmtlist(n.Body().Slice()) case ir.OIF: - n.Left = walkexpr(n.Left, &n.Ninit) - walkstmtlist(n.Nbody.Slice()) - walkstmtlist(n.Rlist.Slice()) + n.SetLeft(walkexpr(n.Left(), n.PtrInit())) + walkstmtlist(n.Body().Slice()) + walkstmtlist(n.Rlist().Slice()) case ir.ORETURN: - Curfn.Func.NumReturns++ - if n.List.Len() == 0 { + Curfn.Func().NumReturns++ + if n.List().Len() == 0 { break } - if (Curfn.Type.FuncType().Outnamed && n.List.Len() > 1) || paramoutheap(Curfn) { + if (Curfn.Type().FuncType().Outnamed && n.List().Len() > 1) || paramoutheap(Curfn) { // assign to the function out parameters, // so that reorder3 can fix up conflicts var rl []*ir.Node - for _, ln := range Curfn.Func.Dcl { + for _, ln := range Curfn.Func().Dcl { cl := ln.Class() if cl == ir.PAUTO || cl == ir.PAUTOHEAP { break } if cl == ir.PPARAMOUT { if isParamStackCopy(ln) { - ln = walkexpr(typecheck(ir.Nod(ir.ODEREF, ln.Name.Param.Heapaddr, nil), ctxExpr), nil) + ln = walkexpr(typecheck(ir.Nod(ir.ODEREF, ln.Name().Param.Heapaddr, nil), ctxExpr), nil) } rl = append(rl, ln) } } - if got, want := n.List.Len(), len(rl); got != want { + if got, want := n.List().Len(), len(rl); got != want { // order should have rewritten multi-value function calls // with explicit OAS2FUNC nodes. base.Fatalf("expected %v return arguments, have %v", want, got) } // move function calls out, to make reorder3's job easier. - walkexprlistsafe(n.List.Slice(), &n.Ninit) + walkexprlistsafe(n.List().Slice(), n.PtrInit()) - ll := ascompatee(n.Op, rl, n.List.Slice(), &n.Ninit) - n.List.Set(reorder3(ll)) + ll := ascompatee(n.Op(), rl, n.List().Slice(), n.PtrInit()) + n.PtrList().Set(reorder3(ll)) break } - walkexprlist(n.List.Slice(), &n.Ninit) + walkexprlist(n.List().Slice(), n.PtrInit()) // For each return parameter (lhs), assign the corresponding result (rhs). - lhs := Curfn.Type.Results() - rhs := n.List.Slice() + lhs := Curfn.Type().Results() + rhs := n.List().Slice() res := make([]*ir.Node, lhs.NumFields()) for i, nl := range lhs.FieldSlice() { nname := ir.AsNode(nl.Nname) if isParamHeapCopy(nname) { - nname = nname.Name.Param.Stackcopy + nname = nname.Name().Param.Stackcopy } a := ir.Nod(ir.OAS, nname, rhs[i]) - res[i] = convas(a, &n.Ninit) + res[i] = convas(a, n.PtrInit()) } - n.List.Set(res) + n.PtrList().Set(res) case ir.ORETJMP: break @@ -335,7 +335,7 @@ func walkstmt(n *ir.Node) *ir.Node { n = walkrange(n) } - if n.Op == ir.ONAME { + if n.Op() == ir.ONAME { base.Fatalf("walkstmt ended up with name: %+v", n) } return n @@ -419,24 +419,24 @@ func walkexpr(n *ir.Node, init *ir.Nodes) *ir.Node { } // Eagerly checkwidth all expressions for the back end. - if n.Type != nil && !n.Type.WidthCalculated() { - switch n.Type.Etype { + if n.Type() != nil && !n.Type().WidthCalculated() { + switch n.Type().Etype { case types.TBLANK, types.TNIL, types.TIDEAL: default: - checkwidth(n.Type) + checkwidth(n.Type()) } } - if init == &n.Ninit { + if init == n.PtrInit() { // not okay to use n->ninit when walking n, // because we might replace n with some other node // and would lose the init list. base.Fatalf("walkexpr init == &n->ninit") } - if n.Ninit.Len() != 0 { - walkstmtlist(n.Ninit.Slice()) - init.AppendNodes(&n.Ninit) + if n.Init().Len() != 0 { + walkstmtlist(n.Init().Slice()) + init.AppendNodes(n.PtrInit()) } lno := setlineno(n) @@ -449,20 +449,20 @@ func walkexpr(n *ir.Node, init *ir.Nodes) *ir.Node { base.Fatalf("missed typecheck: %+v", n) } - if n.Type.IsUntyped() { + if n.Type().IsUntyped() { base.Fatalf("expression has untyped type: %+v", n) } - if n.Op == ir.ONAME && n.Class() == ir.PAUTOHEAP { - nn := ir.Nod(ir.ODEREF, n.Name.Param.Heapaddr, nil) + if n.Op() == ir.ONAME && n.Class() == ir.PAUTOHEAP { + nn := ir.Nod(ir.ODEREF, n.Name().Param.Heapaddr, nil) nn = typecheck(nn, ctxExpr) nn = walkexpr(nn, init) - nn.Left.MarkNonNil() + nn.Left().MarkNonNil() return nn } opswitch: - switch n.Op { + switch n.Op() { default: ir.Dump("walk", n) base.Fatalf("walkexpr: switch 1 unknown op %+S", n) @@ -477,134 +477,134 @@ opswitch: case ir.ONOT, ir.ONEG, ir.OPLUS, ir.OBITNOT, ir.OREAL, ir.OIMAG, ir.ODOTMETH, ir.ODOTINTER, ir.ODEREF, ir.OSPTR, ir.OITAB, ir.OIDATA, ir.OADDR: - n.Left = walkexpr(n.Left, init) + n.SetLeft(walkexpr(n.Left(), init)) case ir.OEFACE, ir.OAND, ir.OANDNOT, ir.OSUB, ir.OMUL, ir.OADD, ir.OOR, ir.OXOR, ir.OLSH, ir.ORSH: - n.Left = walkexpr(n.Left, init) - n.Right = walkexpr(n.Right, init) + n.SetLeft(walkexpr(n.Left(), init)) + n.SetRight(walkexpr(n.Right(), init)) case ir.ODOT, ir.ODOTPTR: usefield(n) - n.Left = walkexpr(n.Left, init) + n.SetLeft(walkexpr(n.Left(), init)) case ir.ODOTTYPE, ir.ODOTTYPE2: - n.Left = walkexpr(n.Left, init) + n.SetLeft(walkexpr(n.Left(), init)) // Set up interface type addresses for back end. - n.Right = typename(n.Type) - if n.Op == ir.ODOTTYPE { - n.Right.Right = typename(n.Left.Type) + n.SetRight(typename(n.Type())) + if n.Op() == ir.ODOTTYPE { + n.Right().SetRight(typename(n.Left().Type())) } - if !n.Type.IsInterface() && !n.Left.Type.IsEmptyInterface() { - n.List.Set1(itabname(n.Type, n.Left.Type)) + if !n.Type().IsInterface() && !n.Left().Type().IsEmptyInterface() { + n.PtrList().Set1(itabname(n.Type(), n.Left().Type())) } case ir.OLEN, ir.OCAP: if isRuneCount(n) { // Replace len([]rune(string)) with runtime.countrunes(string). - n = mkcall("countrunes", n.Type, init, conv(n.Left.Left, types.Types[types.TSTRING])) + n = mkcall("countrunes", n.Type(), init, conv(n.Left().Left(), types.Types[types.TSTRING])) break } - n.Left = walkexpr(n.Left, init) + n.SetLeft(walkexpr(n.Left(), init)) // replace len(*[10]int) with 10. // delayed until now to preserve side effects. - t := n.Left.Type + t := n.Left().Type() if t.IsPtr() { t = t.Elem() } if t.IsArray() { - safeexpr(n.Left, init) + safeexpr(n.Left(), init) n = origIntConst(n, t.NumElem()) n.SetTypecheck(1) } case ir.OCOMPLEX: // Use results from call expression as arguments for complex. - if n.Left == nil && n.Right == nil { - n.Left = n.List.First() - n.Right = n.List.Second() + if n.Left() == nil && n.Right() == nil { + n.SetLeft(n.List().First()) + n.SetRight(n.List().Second()) } - n.Left = walkexpr(n.Left, init) - n.Right = walkexpr(n.Right, init) + n.SetLeft(walkexpr(n.Left(), init)) + n.SetRight(walkexpr(n.Right(), init)) case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE: n = walkcompare(n, init) case ir.OANDAND, ir.OOROR: - n.Left = walkexpr(n.Left, init) + n.SetLeft(walkexpr(n.Left(), init)) // cannot put side effects from n.Right on init, // because they cannot run before n.Left is checked. // save elsewhere and store on the eventual n.Right. var ll ir.Nodes - n.Right = walkexpr(n.Right, &ll) - n.Right = addinit(n.Right, ll.Slice()) + n.SetRight(walkexpr(n.Right(), &ll)) + n.SetRight(addinit(n.Right(), ll.Slice())) case ir.OPRINT, ir.OPRINTN: n = walkprint(n, init) case ir.OPANIC: - n = mkcall("gopanic", nil, init, n.Left) + n = mkcall("gopanic", nil, init, n.Left()) case ir.ORECOVER: - n = mkcall("gorecover", n.Type, init, ir.Nod(ir.OADDR, nodfp, nil)) + n = mkcall("gorecover", n.Type(), init, ir.Nod(ir.OADDR, nodfp, nil)) case ir.OCLOSUREVAR, ir.OCFUNC: case ir.OCALLINTER, ir.OCALLFUNC, ir.OCALLMETH: - if n.Op == ir.OCALLINTER { + if n.Op() == ir.OCALLINTER { usemethod(n) markUsedIfaceMethod(n) } - if n.Op == ir.OCALLFUNC && n.Left.Op == ir.OCLOSURE { + if n.Op() == ir.OCALLFUNC && n.Left().Op() == ir.OCLOSURE { // Transform direct call of a closure to call of a normal function. // transformclosure already did all preparation work. // Prepend captured variables to argument list. - n.List.Prepend(n.Left.Func.ClosureEnter.Slice()...) - n.Left.Func.ClosureEnter.Set(nil) + n.PtrList().Prepend(n.Left().Func().ClosureEnter.Slice()...) + n.Left().Func().ClosureEnter.Set(nil) // Replace OCLOSURE with ONAME/PFUNC. - n.Left = n.Left.Func.Nname + n.SetLeft(n.Left().Func().Nname) // Update type of OCALLFUNC node. // Output arguments had not changed, but their offsets could. - if n.Left.Type.NumResults() == 1 { - n.Type = n.Left.Type.Results().Field(0).Type + if n.Left().Type().NumResults() == 1 { + n.SetType(n.Left().Type().Results().Field(0).Type) } else { - n.Type = n.Left.Type.Results() + n.SetType(n.Left().Type().Results()) } } walkCall(n, init) case ir.OAS, ir.OASOP: - init.AppendNodes(&n.Ninit) + init.AppendNodes(n.PtrInit()) // Recognize m[k] = append(m[k], ...) so we can reuse // the mapassign call. - mapAppend := n.Left.Op == ir.OINDEXMAP && n.Right.Op == ir.OAPPEND - if mapAppend && !samesafeexpr(n.Left, n.Right.List.First()) { - base.Fatalf("not same expressions: %v != %v", n.Left, n.Right.List.First()) + mapAppend := n.Left().Op() == ir.OINDEXMAP && n.Right().Op() == ir.OAPPEND + if mapAppend && !samesafeexpr(n.Left(), n.Right().List().First()) { + base.Fatalf("not same expressions: %v != %v", n.Left(), n.Right().List().First()) } - n.Left = walkexpr(n.Left, init) - n.Left = safeexpr(n.Left, init) + n.SetLeft(walkexpr(n.Left(), init)) + n.SetLeft(safeexpr(n.Left(), init)) if mapAppend { - n.Right.List.SetFirst(n.Left) + n.Right().List().SetFirst(n.Left()) } - if n.Op == ir.OASOP { + if n.Op() == ir.OASOP { // Rewrite x op= y into x = x op y. - n.Right = ir.Nod(n.SubOp(), n.Left, n.Right) - n.Right = typecheck(n.Right, ctxExpr) + n.SetRight(ir.Nod(n.SubOp(), n.Left(), n.Right())) + n.SetRight(typecheck(n.Right(), ctxExpr)) - n.Op = ir.OAS + n.SetOp(ir.OAS) n.ResetAux() } @@ -612,35 +612,35 @@ opswitch: break } - if n.Right == nil { + if n.Right() == nil { // TODO(austin): Check all "implicit zeroing" break } - if !instrumenting && isZero(n.Right) { + if !instrumenting && isZero(n.Right()) { break } - switch n.Right.Op { + switch n.Right().Op() { default: - n.Right = walkexpr(n.Right, init) + n.SetRight(walkexpr(n.Right(), init)) case ir.ORECV: // x = <-c; n.Left is x, n.Right.Left is c. // order.stmt made sure x is addressable. - n.Right.Left = walkexpr(n.Right.Left, init) + n.Right().SetLeft(walkexpr(n.Right().Left(), init)) - n1 := ir.Nod(ir.OADDR, n.Left, nil) - r := n.Right.Left // the channel - n = mkcall1(chanfn("chanrecv1", 2, r.Type), nil, init, r, n1) + n1 := ir.Nod(ir.OADDR, n.Left(), nil) + r := n.Right().Left() // the channel + n = mkcall1(chanfn("chanrecv1", 2, r.Type()), nil, init, r, n1) n = walkexpr(n, init) break opswitch case ir.OAPPEND: // x = append(...) - r := n.Right - if r.Type.Elem().NotInHeap() { - base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", r.Type.Elem()) + r := n.Right() + if r.Type().Elem().NotInHeap() { + base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", r.Type().Elem()) } switch { case isAppendOfMake(r): @@ -651,86 +651,86 @@ opswitch: default: r = walkappend(r, init, n) } - n.Right = r - if r.Op == ir.OAPPEND { + n.SetRight(r) + if r.Op() == ir.OAPPEND { // Left in place for back end. // Do not add a new write barrier. // Set up address of type for back end. - r.Left = typename(r.Type.Elem()) + r.SetLeft(typename(r.Type().Elem())) break opswitch } // Otherwise, lowered for race detector. // Treat as ordinary assignment. } - if n.Left != nil && n.Right != nil { + if n.Left() != nil && n.Right() != nil { n = convas(n, init) } case ir.OAS2: - init.AppendNodes(&n.Ninit) - walkexprlistsafe(n.List.Slice(), init) - walkexprlistsafe(n.Rlist.Slice(), init) - ll := ascompatee(ir.OAS, n.List.Slice(), n.Rlist.Slice(), init) + init.AppendNodes(n.PtrInit()) + walkexprlistsafe(n.List().Slice(), init) + walkexprlistsafe(n.Rlist().Slice(), init) + ll := ascompatee(ir.OAS, n.List().Slice(), n.Rlist().Slice(), init) ll = reorder3(ll) n = liststmt(ll) // a,b,... = fn() case ir.OAS2FUNC: - init.AppendNodes(&n.Ninit) + init.AppendNodes(n.PtrInit()) - r := n.Right - walkexprlistsafe(n.List.Slice(), init) + r := n.Right() + walkexprlistsafe(n.List().Slice(), init) r = walkexpr(r, init) if isIntrinsicCall(r) { - n.Right = r + n.SetRight(r) break } init.Append(r) - ll := ascompatet(n.List, r.Type) + ll := ascompatet(n.List(), r.Type()) n = liststmt(ll) // x, y = <-c // order.stmt made sure x is addressable or blank. case ir.OAS2RECV: - init.AppendNodes(&n.Ninit) + init.AppendNodes(n.PtrInit()) - r := n.Right - walkexprlistsafe(n.List.Slice(), init) - r.Left = walkexpr(r.Left, init) + r := n.Right() + walkexprlistsafe(n.List().Slice(), init) + r.SetLeft(walkexpr(r.Left(), init)) var n1 *ir.Node - if ir.IsBlank(n.List.First()) { + if ir.IsBlank(n.List().First()) { n1 = nodnil() } else { - n1 = ir.Nod(ir.OADDR, n.List.First(), nil) + n1 = ir.Nod(ir.OADDR, n.List().First(), nil) } - fn := chanfn("chanrecv2", 2, r.Left.Type) - ok := n.List.Second() - call := mkcall1(fn, types.Types[types.TBOOL], init, r.Left, n1) + fn := chanfn("chanrecv2", 2, r.Left().Type()) + ok := n.List().Second() + call := mkcall1(fn, types.Types[types.TBOOL], init, r.Left(), n1) n = ir.Nod(ir.OAS, ok, call) n = typecheck(n, ctxStmt) // a,b = m[i] case ir.OAS2MAPR: - init.AppendNodes(&n.Ninit) + init.AppendNodes(n.PtrInit()) - r := n.Right - walkexprlistsafe(n.List.Slice(), init) - r.Left = walkexpr(r.Left, init) - r.Right = walkexpr(r.Right, init) - t := r.Left.Type + r := n.Right() + walkexprlistsafe(n.List().Slice(), init) + r.SetLeft(walkexpr(r.Left(), init)) + r.SetRight(walkexpr(r.Right(), init)) + t := r.Left().Type() fast := mapfast(t) var key *ir.Node if fast != mapslow { // fast versions take key by value - key = r.Right + key = r.Right() } else { // standard version takes key by reference // order.expr made sure key is addressable. - key = ir.Nod(ir.OADDR, r.Right, nil) + key = ir.Nod(ir.OADDR, r.Right(), nil) } // from: @@ -738,32 +738,32 @@ opswitch: // to: // var,b = mapaccess2*(t, m, i) // a = *var - a := n.List.First() + a := n.List().First() if w := t.Elem().Width; w <= zeroValSize { fn := mapfn(mapaccess2[fast], t) - r = mkcall1(fn, fn.Type.Results(), init, typename(t), r.Left, key) + r = mkcall1(fn, fn.Type().Results(), init, typename(t), r.Left(), key) } else { fn := mapfn("mapaccess2_fat", t) z := zeroaddr(w) - r = mkcall1(fn, fn.Type.Results(), init, typename(t), r.Left, key, z) + r = mkcall1(fn, fn.Type().Results(), init, typename(t), r.Left(), key, z) } // mapaccess2* returns a typed bool, but due to spec changes, // the boolean result of i.(T) is now untyped so we make it the // same type as the variable on the lhs. - if ok := n.List.Second(); !ir.IsBlank(ok) && ok.Type.IsBoolean() { - r.Type.Field(1).Type = ok.Type + if ok := n.List().Second(); !ir.IsBlank(ok) && ok.Type().IsBoolean() { + r.Type().Field(1).Type = ok.Type() } - n.Right = r - n.Op = ir.OAS2FUNC + n.SetRight(r) + n.SetOp(ir.OAS2FUNC) // don't generate a = *var if a is _ if !ir.IsBlank(a) { var_ := temp(types.NewPtr(t.Elem())) var_.SetTypecheck(1) var_.MarkNonNil() // mapaccess always returns a non-nil pointer - n.List.SetFirst(var_) + n.List().SetFirst(var_) n = walkexpr(n, init) init.Append(n) n = ir.Nod(ir.OAS, a, ir.Nod(ir.ODEREF, var_, nil)) @@ -773,13 +773,13 @@ opswitch: n = walkexpr(n, init) case ir.ODELETE: - init.AppendNodes(&n.Ninit) - map_ := n.List.First() - key := n.List.Second() + init.AppendNodes(n.PtrInit()) + map_ := n.List().First() + key := n.List().Second() map_ = walkexpr(map_, init) key = walkexpr(key, init) - t := map_.Type + t := map_.Type() fast := mapfast(t) if fast == mapslow { // order.stmt made sure key is addressable. @@ -788,17 +788,17 @@ opswitch: n = mkcall1(mapfndel(mapdelete[fast], t), nil, init, typename(t), map_, key) case ir.OAS2DOTTYPE: - walkexprlistsafe(n.List.Slice(), init) - n.Right = walkexpr(n.Right, init) + walkexprlistsafe(n.List().Slice(), init) + n.SetRight(walkexpr(n.Right(), init)) case ir.OCONVIFACE: - n.Left = walkexpr(n.Left, init) + n.SetLeft(walkexpr(n.Left(), init)) - fromType := n.Left.Type - toType := n.Type + fromType := n.Left().Type() + toType := n.Type() - if !fromType.IsInterface() && !ir.IsBlank(Curfn.Func.Nname) { // skip unnamed functions (func _()) - markTypeUsedInInterface(fromType, Curfn.Func.LSym) + if !fromType.IsInterface() && !ir.IsBlank(Curfn.Func().Nname) { // skip unnamed functions (func _()) + markTypeUsedInInterface(fromType, Curfn.Func().LSym) } // typeword generates the type word of the interface value. @@ -811,8 +811,8 @@ opswitch: // Optimize convT2E or convT2I as a two-word copy when T is pointer-shaped. if isdirectiface(fromType) { - l := ir.Nod(ir.OEFACE, typeword(), n.Left) - l.Type = toType + l := ir.Nod(ir.OEFACE, typeword(), n.Left()) + l.SetType(toType) l.SetTypecheck(n.Typecheck()) n = l break @@ -823,10 +823,10 @@ opswitch: staticuint64s.SetClass(ir.PEXTERN) // The actual type is [256]uint64, but we use [256*8]uint8 so we can address // individual bytes. - staticuint64s.Type = types.NewArray(types.Types[types.TUINT8], 256*8) + staticuint64s.SetType(types.NewArray(types.Types[types.TUINT8], 256*8)) zerobase = NewName(Runtimepkg.Lookup("zerobase")) zerobase.SetClass(ir.PEXTERN) - zerobase.Type = types.Types[types.TUINTPTR] + zerobase.SetType(types.Types[types.TUINTPTR]) } // Optimize convT2{E,I} for many cases in which T is not pointer-shaped, @@ -836,33 +836,33 @@ opswitch: switch { case fromType.Size() == 0: // n.Left is zero-sized. Use zerobase. - cheapexpr(n.Left, init) // Evaluate n.Left for side-effects. See issue 19246. + cheapexpr(n.Left(), init) // Evaluate n.Left for side-effects. See issue 19246. value = zerobase case fromType.IsBoolean() || (fromType.Size() == 1 && fromType.IsInteger()): // n.Left is a bool/byte. Use staticuint64s[n.Left * 8] on little-endian // and staticuint64s[n.Left * 8 + 7] on big-endian. - n.Left = cheapexpr(n.Left, init) + n.SetLeft(cheapexpr(n.Left(), init)) // byteindex widens n.Left so that the multiplication doesn't overflow. - index := ir.Nod(ir.OLSH, byteindex(n.Left), nodintconst(3)) + index := ir.Nod(ir.OLSH, byteindex(n.Left()), nodintconst(3)) if thearch.LinkArch.ByteOrder == binary.BigEndian { index = ir.Nod(ir.OADD, index, nodintconst(7)) } value = ir.Nod(ir.OINDEX, staticuint64s, index) value.SetBounded(true) - case n.Left.Class() == ir.PEXTERN && n.Left.Name != nil && n.Left.Name.Readonly(): + case n.Left().Class() == ir.PEXTERN && n.Left().Name() != nil && n.Left().Name().Readonly(): // n.Left is a readonly global; use it directly. - value = n.Left - case !fromType.IsInterface() && n.Esc == EscNone && fromType.Width <= 1024: + value = n.Left() + case !fromType.IsInterface() && n.Esc() == EscNone && fromType.Width <= 1024: // n.Left does not escape. Use a stack temporary initialized to n.Left. value = temp(fromType) - init.Append(typecheck(ir.Nod(ir.OAS, value, n.Left), ctxStmt)) + init.Append(typecheck(ir.Nod(ir.OAS, value, n.Left()), ctxStmt)) } if value != nil { // Value is identical to n.Left. // Construct the interface directly: {type/itab, &value}. l := ir.Nod(ir.OEFACE, typeword(), typecheck(ir.Nod(ir.OADDR, value, nil), ctxExpr)) - l.Type = toType + l.SetType(toType) l.SetTypecheck(n.Typecheck()) n = l break @@ -877,7 +877,7 @@ opswitch: if toType.IsEmptyInterface() && fromType.IsInterface() && !fromType.IsEmptyInterface() { // Evaluate the input interface. c := temp(fromType) - init.Append(ir.Nod(ir.OAS, c, n.Left)) + init.Append(ir.Nod(ir.OAS, c, n.Left())) // Get the itab out of the interface. tmp := temp(types.NewPtr(types.Types[types.TUINT8])) @@ -885,12 +885,12 @@ opswitch: // Get the type out of the itab. nif := ir.Nod(ir.OIF, typecheck(ir.Nod(ir.ONE, tmp, nodnil()), ctxExpr), nil) - nif.Nbody.Set1(ir.Nod(ir.OAS, tmp, itabType(tmp))) + nif.PtrBody().Set1(ir.Nod(ir.OAS, tmp, itabType(tmp))) init.Append(nif) // Build the result. - e := ir.Nod(ir.OEFACE, tmp, ifaceData(n.Pos, c, types.NewPtr(types.Types[types.TUINT8]))) - e.Type = toType // assign type manually, typecheck doesn't understand OEFACE. + e := ir.Nod(ir.OEFACE, tmp, ifaceData(n.Pos(), c, types.NewPtr(types.Types[types.TUINT8]))) + e.SetType(toType) // assign type manually, typecheck doesn't understand OEFACE. e.SetTypecheck(1) n = e break @@ -905,14 +905,14 @@ opswitch: fn := syslook(fnname) dowidth(fromType) fn = substArgTypes(fn, fromType) - dowidth(fn.Type) + dowidth(fn.Type()) call := ir.Nod(ir.OCALL, fn, nil) - call.List.Set1(n.Left) + call.PtrList().Set1(n.Left()) call = typecheck(call, ctxExpr) call = walkexpr(call, init) call = safeexpr(call, init) e := ir.Nod(ir.OEFACE, typeword(), call) - e.Type = toType + e.SetType(toType) e.SetTypecheck(1) n = e break @@ -927,7 +927,7 @@ opswitch: tab = typeword() } - v := n.Left + v := n.Left() if needsaddr { // Types of large or unknown size are passed by reference. // Orderexpr arranged for n.Left to be a temporary for all @@ -936,7 +936,7 @@ opswitch: // with non-interface cases, is not visible to order.stmt, so we // have to fall back on allocating a temp here. if !islvalue(v) { - v = copyexpr(v, v.Type, init) + v = copyexpr(v, v.Type(), init) } v = ir.Nod(ir.OADDR, v, nil) } @@ -944,41 +944,41 @@ opswitch: dowidth(fromType) fn := syslook(fnname) fn = substArgTypes(fn, fromType, toType) - dowidth(fn.Type) + dowidth(fn.Type()) n = ir.Nod(ir.OCALL, fn, nil) - n.List.Set2(tab, v) + n.PtrList().Set2(tab, v) n = typecheck(n, ctxExpr) n = walkexpr(n, init) case ir.OCONV, ir.OCONVNOP: - n.Left = walkexpr(n.Left, init) - if n.Op == ir.OCONVNOP && checkPtr(Curfn, 1) { - if n.Type.IsPtr() && n.Left.Type.IsUnsafePtr() { // unsafe.Pointer to *T + n.SetLeft(walkexpr(n.Left(), init)) + if n.Op() == ir.OCONVNOP && checkPtr(Curfn, 1) { + if n.Type().IsPtr() && n.Left().Type().IsUnsafePtr() { // unsafe.Pointer to *T n = walkCheckPtrAlignment(n, init, nil) break } - if n.Type.IsUnsafePtr() && n.Left.Type.IsUintptr() { // uintptr to unsafe.Pointer + if n.Type().IsUnsafePtr() && n.Left().Type().IsUintptr() { // uintptr to unsafe.Pointer n = walkCheckPtrArithmetic(n, init) break } } - param, result := rtconvfn(n.Left.Type, n.Type) + param, result := rtconvfn(n.Left().Type(), n.Type()) if param == types.Txxx { break } fn := ir.BasicTypeNames[param] + "to" + ir.BasicTypeNames[result] - n = conv(mkcall(fn, types.Types[result], init, conv(n.Left, types.Types[param])), n.Type) + n = conv(mkcall(fn, types.Types[result], init, conv(n.Left(), types.Types[param])), n.Type()) case ir.ODIV, ir.OMOD: - n.Left = walkexpr(n.Left, init) - n.Right = walkexpr(n.Right, init) + n.SetLeft(walkexpr(n.Left(), init)) + n.SetRight(walkexpr(n.Right(), init)) // rewrite complex div into function call. - et := n.Left.Type.Etype + et := n.Left().Type().Etype - if isComplex[et] && n.Op == ir.ODIV { - t := n.Type - n = mkcall("complex128div", types.Types[types.TCOMPLEX128], init, conv(n.Left, types.Types[types.TCOMPLEX128]), conv(n.Right, types.Types[types.TCOMPLEX128])) + if isComplex[et] && n.Op() == ir.ODIV { + t := n.Type() + n = mkcall("complex128div", types.Types[types.TCOMPLEX128], init, conv(n.Left(), types.Types[types.TCOMPLEX128]), conv(n.Right(), types.Types[types.TCOMPLEX128])) n = conv(n, t) break } @@ -992,12 +992,12 @@ opswitch: // TODO: Remove this code once we can introduce // runtime calls late in SSA processing. if Widthreg < 8 && (et == types.TINT64 || et == types.TUINT64) { - if n.Right.Op == ir.OLITERAL { + if n.Right().Op() == ir.OLITERAL { // Leave div/mod by constant powers of 2 or small 16-bit constants. // The SSA backend will handle those. switch et { case types.TINT64: - c := n.Right.Int64Val() + c := n.Right().Int64Val() if c < 0 { c = -c } @@ -1005,7 +1005,7 @@ opswitch: break opswitch } case types.TUINT64: - c := n.Right.Uint64Val() + c := n.Right().Uint64Val() if c < 1<<16 { break opswitch } @@ -1020,63 +1020,63 @@ opswitch: } else { fn = "uint64" } - if n.Op == ir.ODIV { + if n.Op() == ir.ODIV { fn += "div" } else { fn += "mod" } - n = mkcall(fn, n.Type, init, conv(n.Left, types.Types[et]), conv(n.Right, types.Types[et])) + n = mkcall(fn, n.Type(), init, conv(n.Left(), types.Types[et]), conv(n.Right(), types.Types[et])) } case ir.OINDEX: - n.Left = walkexpr(n.Left, init) + n.SetLeft(walkexpr(n.Left(), init)) // save the original node for bounds checking elision. // If it was a ODIV/OMOD walk might rewrite it. - r := n.Right + r := n.Right() - n.Right = walkexpr(n.Right, init) + n.SetRight(walkexpr(n.Right(), init)) // if range of type cannot exceed static array bound, // disable bounds check. if n.Bounded() { break } - t := n.Left.Type + t := n.Left().Type() if t != nil && t.IsPtr() { t = t.Elem() } if t.IsArray() { n.SetBounded(bounded(r, t.NumElem())) - if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Right, constant.Int) { + if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Right(), constant.Int) { base.Warn("index bounds check elided") } - if smallintconst(n.Right) && !n.Bounded() { + if smallintconst(n.Right()) && !n.Bounded() { base.Errorf("index out of bounds") } - } else if ir.IsConst(n.Left, constant.String) { - n.SetBounded(bounded(r, int64(len(n.Left.StringVal())))) - if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Right, constant.Int) { + } else if ir.IsConst(n.Left(), constant.String) { + n.SetBounded(bounded(r, int64(len(n.Left().StringVal())))) + if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Right(), constant.Int) { base.Warn("index bounds check elided") } - if smallintconst(n.Right) && !n.Bounded() { + if smallintconst(n.Right()) && !n.Bounded() { base.Errorf("index out of bounds") } } - if ir.IsConst(n.Right, constant.Int) { - if v := n.Right.Val(); constant.Sign(v) < 0 || doesoverflow(v, types.Types[types.TINT]) { + if ir.IsConst(n.Right(), constant.Int) { + if v := n.Right().Val(); constant.Sign(v) < 0 || doesoverflow(v, types.Types[types.TINT]) { base.Errorf("index out of bounds") } } case ir.OINDEXMAP: // Replace m[k] with *map{access1,assign}(maptype, m, &k) - n.Left = walkexpr(n.Left, init) - n.Right = walkexpr(n.Right, init) - map_ := n.Left - key := n.Right - t := map_.Type + n.SetLeft(walkexpr(n.Left(), init)) + n.SetRight(walkexpr(n.Right(), init)) + map_ := n.Left() + key := n.Right() + t := map_.Type() if n.IndexMapLValue() { // This m[k] expression is on the left-hand side of an assignment. fast := mapfast(t) @@ -1102,26 +1102,26 @@ opswitch: n = mkcall1(mapfn("mapaccess1_fat", t), types.NewPtr(t.Elem()), init, typename(t), map_, key, z) } } - n.Type = types.NewPtr(t.Elem()) + n.SetType(types.NewPtr(t.Elem())) n.MarkNonNil() // mapaccess1* and mapassign always return non-nil pointers. n = ir.Nod(ir.ODEREF, n, nil) - n.Type = t.Elem() + n.SetType(t.Elem()) n.SetTypecheck(1) case ir.ORECV: base.Fatalf("walkexpr ORECV") // should see inside OAS only case ir.OSLICEHEADER: - n.Left = walkexpr(n.Left, init) - n.List.SetFirst(walkexpr(n.List.First(), init)) - n.List.SetSecond(walkexpr(n.List.Second(), init)) + n.SetLeft(walkexpr(n.Left(), init)) + n.List().SetFirst(walkexpr(n.List().First(), init)) + n.List().SetSecond(walkexpr(n.List().Second(), init)) case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR: - checkSlice := checkPtr(Curfn, 1) && n.Op == ir.OSLICE3ARR && n.Left.Op == ir.OCONVNOP && n.Left.Left.Type.IsUnsafePtr() + checkSlice := checkPtr(Curfn, 1) && n.Op() == ir.OSLICE3ARR && n.Left().Op() == ir.OCONVNOP && n.Left().Left().Type().IsUnsafePtr() if checkSlice { - n.Left.Left = walkexpr(n.Left.Left, init) + n.Left().SetLeft(walkexpr(n.Left().Left(), init)) } else { - n.Left = walkexpr(n.Left, init) + n.SetLeft(walkexpr(n.Left(), init)) } low, high, max := n.SliceBounds() low = walkexpr(low, init) @@ -1133,15 +1133,15 @@ opswitch: max = walkexpr(max, init) n.SetSliceBounds(low, high, max) if checkSlice { - n.Left = walkCheckPtrAlignment(n.Left, init, max) + n.SetLeft(walkCheckPtrAlignment(n.Left(), init, max)) } - if n.Op.IsSlice3() { - if max != nil && max.Op == ir.OCAP && samesafeexpr(n.Left, max.Left) { + if n.Op().IsSlice3() { + if max != nil && max.Op() == ir.OCAP && samesafeexpr(n.Left(), max.Left()) { // Reduce x[i:j:cap(x)] to x[i:j]. - if n.Op == ir.OSLICE3 { - n.Op = ir.OSLICE + if n.Op() == ir.OSLICE3 { + n.SetOp(ir.OSLICE) } else { - n.Op = ir.OSLICEARR + n.SetOp(ir.OSLICEARR) } n = reduceSlice(n) } @@ -1150,22 +1150,22 @@ opswitch: } case ir.ONEW: - if n.Type.Elem().NotInHeap() { - base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type.Elem()) + if n.Type().Elem().NotInHeap() { + base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type().Elem()) } - if n.Esc == EscNone { - if n.Type.Elem().Width >= maxImplicitStackVarSize { + if n.Esc() == EscNone { + if n.Type().Elem().Width >= maxImplicitStackVarSize { base.Fatalf("large ONEW with EscNone: %v", n) } - r := temp(n.Type.Elem()) + r := temp(n.Type().Elem()) r = ir.Nod(ir.OAS, r, nil) // zero temp r = typecheck(r, ctxStmt) init.Append(r) - r = ir.Nod(ir.OADDR, r.Left, nil) + r = ir.Nod(ir.OADDR, r.Left(), nil) r = typecheck(r, ctxExpr) n = r } else { - n = callnew(n.Type.Elem()) + n = callnew(n.Type().Elem()) } case ir.OADDSTR: @@ -1182,34 +1182,34 @@ opswitch: case ir.OCLOSE: fn := syslook("closechan") - fn = substArgTypes(fn, n.Left.Type) - n = mkcall1(fn, nil, init, n.Left) + fn = substArgTypes(fn, n.Left().Type()) + n = mkcall1(fn, nil, init, n.Left()) case ir.OMAKECHAN: // When size fits into int, use makechan instead of // makechan64, which is faster and shorter on 32 bit platforms. - size := n.Left + size := n.Left() fnname := "makechan64" argtype := types.Types[types.TINT64] // Type checking guarantees that TIDEAL size is positive and fits in an int. // The case of size overflow when converting TUINT or TUINTPTR to TINT // will be handled by the negative range checks in makechan during runtime. - if size.Type.IsKind(types.TIDEAL) || size.Type.Size() <= types.Types[types.TUINT].Size() { + if size.Type().IsKind(types.TIDEAL) || size.Type().Size() <= types.Types[types.TUINT].Size() { fnname = "makechan" argtype = types.Types[types.TINT] } - n = mkcall1(chanfn(fnname, 1, n.Type), n.Type, init, typename(n.Type), conv(size, argtype)) + n = mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, typename(n.Type()), conv(size, argtype)) case ir.OMAKEMAP: - t := n.Type + t := n.Type() hmapType := hmap(t) - hint := n.Left + hint := n.Left() // var h *hmap var h *ir.Node - if n.Esc == EscNone { + if n.Esc() == EscNone { // Allocate hmap on stack. // var hv hmap @@ -1243,7 +1243,7 @@ opswitch: // var bv bmap bv := temp(bmap(t)) zero = ir.Nod(ir.OAS, bv, nil) - nif.Nbody.Append(zero) + nif.PtrBody().Append(zero) // b = &bv b := ir.Nod(ir.OADDR, bv, nil) @@ -1251,7 +1251,7 @@ opswitch: // h.buckets = b bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap na := ir.Nod(ir.OAS, nodSym(ir.ODOT, h, bsym), b) - nif.Nbody.Append(na) + nif.PtrBody().Append(na) nif = typecheck(nif, ctxStmt) nif = walkstmt(nif) @@ -1267,7 +1267,7 @@ opswitch: // For hint <= BUCKETSIZE overLoadFactor(hint, 0) is false // and no buckets will be allocated by makemap. Therefore, // no buckets need to be allocated in this code path. - if n.Esc == EscNone { + if n.Esc() == EscNone { // Only need to initialize h.hash0 since // hmap h has been allocated on the stack already. // h.hash0 = fastrand() @@ -1283,10 +1283,10 @@ opswitch: // hmap on the heap and initialize hmap's hash0 field. fn := syslook("makemap_small") fn = substArgTypes(fn, t.Key(), t.Elem()) - n = mkcall1(fn, n.Type, init) + n = mkcall1(fn, n.Type(), init) } } else { - if n.Esc != EscNone { + if n.Esc() != EscNone { h = nodnil() } // Map initialization with a variable or large hint is @@ -1303,28 +1303,28 @@ opswitch: // See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function. // The case of hint overflow when converting TUINT or TUINTPTR to TINT // will be handled by the negative range checks in makemap during runtime. - if hint.Type.IsKind(types.TIDEAL) || hint.Type.Size() <= types.Types[types.TUINT].Size() { + if hint.Type().IsKind(types.TIDEAL) || hint.Type().Size() <= types.Types[types.TUINT].Size() { fnname = "makemap" argtype = types.Types[types.TINT] } fn := syslook(fnname) fn = substArgTypes(fn, hmapType, t.Key(), t.Elem()) - n = mkcall1(fn, n.Type, init, typename(n.Type), conv(hint, argtype), h) + n = mkcall1(fn, n.Type(), init, typename(n.Type()), conv(hint, argtype), h) } case ir.OMAKESLICE: - l := n.Left - r := n.Right + l := n.Left() + r := n.Right() if r == nil { r = safeexpr(l, init) l = r } - t := n.Type + t := n.Type() if t.Elem().NotInHeap() { base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem()) } - if n.Esc == EscNone { + if n.Esc() == EscNone { if why := heapAllocReason(n); why != "" { base.Fatalf("%v has EscNone, but %v", n, why) } @@ -1344,8 +1344,8 @@ opswitch: // } nif := ir.Nod(ir.OIF, ir.Nod(ir.OGT, conv(l, types.Types[types.TUINT64]), nodintconst(i)), nil) niflen := ir.Nod(ir.OIF, ir.Nod(ir.OLT, l, nodintconst(0)), nil) - niflen.Nbody.Set1(mkcall("panicmakeslicelen", nil, init)) - nif.Nbody.Append(niflen, mkcall("panicmakeslicecap", nil, init)) + niflen.PtrBody().Set1(mkcall("panicmakeslicelen", nil, init)) + nif.PtrBody().Append(niflen, mkcall("panicmakeslicecap", nil, init)) nif = typecheck(nif, ctxStmt) init.Append(nif) @@ -1356,7 +1356,7 @@ opswitch: init.Append(a) r := ir.Nod(ir.OSLICE, var_, nil) // arr[:l] r.SetSliceBounds(nil, l, nil) - r = conv(r, n.Type) // in case n.Type is named. + r = conv(r, n.Type()) // in case n.Type is named. r = typecheck(r, ctxExpr) r = walkexpr(r, init) n = r @@ -1373,19 +1373,19 @@ opswitch: // Type checking guarantees that TIDEAL len/cap are positive and fit in an int. // The case of len or cap overflow when converting TUINT or TUINTPTR to TINT // will be handled by the negative range checks in makeslice during runtime. - if (len.Type.IsKind(types.TIDEAL) || len.Type.Size() <= types.Types[types.TUINT].Size()) && - (cap.Type.IsKind(types.TIDEAL) || cap.Type.Size() <= types.Types[types.TUINT].Size()) { + if (len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size()) && + (cap.Type().IsKind(types.TIDEAL) || cap.Type().Size() <= types.Types[types.TUINT].Size()) { fnname = "makeslice" argtype = types.Types[types.TINT] } m := ir.Nod(ir.OSLICEHEADER, nil, nil) - m.Type = t + m.SetType(t) fn := syslook(fnname) - m.Left = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), conv(len, argtype), conv(cap, argtype)) - m.Left.MarkNonNil() - m.List.Set2(conv(len, types.Types[types.TINT]), conv(cap, types.Types[types.TINT])) + m.SetLeft(mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), conv(len, argtype), conv(cap, argtype))) + m.Left().MarkNonNil() + m.PtrList().Set2(conv(len, types.Types[types.TINT]), conv(cap, types.Types[types.TINT])) m = typecheck(m, ctxExpr) m = walkexpr(m, init) @@ -1393,18 +1393,18 @@ opswitch: } case ir.OMAKESLICECOPY: - if n.Esc == EscNone { + if n.Esc() == EscNone { base.Fatalf("OMAKESLICECOPY with EscNone: %v", n) } - t := n.Type + t := n.Type() if t.Elem().NotInHeap() { base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem()) } - length := conv(n.Left, types.Types[types.TINT]) - copylen := ir.Nod(ir.OLEN, n.Right, nil) - copyptr := ir.Nod(ir.OSPTR, n.Right, nil) + length := conv(n.Left(), types.Types[types.TINT]) + copylen := ir.Nod(ir.OLEN, n.Right(), nil) + copyptr := ir.Nod(ir.OSPTR, n.Right(), nil) if !t.Elem().HasPointers() && n.Bounded() { // When len(to)==len(from) and elements have no pointers: @@ -1418,10 +1418,10 @@ opswitch: // instantiate mallocgc(size uintptr, typ *byte, needszero bool) unsafe.Pointer fn := syslook("mallocgc") sh := ir.Nod(ir.OSLICEHEADER, nil, nil) - sh.Left = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, nodnil(), nodbool(false)) - sh.Left.MarkNonNil() - sh.List.Set2(length, length) - sh.Type = t + sh.SetLeft(mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, nodnil(), nodbool(false))) + sh.Left().MarkNonNil() + sh.PtrList().Set2(length, length) + sh.SetType(t) s := temp(t) r := typecheck(ir.Nod(ir.OAS, s, sh), ctxStmt) @@ -1441,61 +1441,61 @@ opswitch: // instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer fn := syslook("makeslicecopy") s := ir.Nod(ir.OSLICEHEADER, nil, nil) - s.Left = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), length, copylen, conv(copyptr, types.Types[types.TUNSAFEPTR])) - s.Left.MarkNonNil() - s.List.Set2(length, length) - s.Type = t + s.SetLeft(mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), length, copylen, conv(copyptr, types.Types[types.TUNSAFEPTR]))) + s.Left().MarkNonNil() + s.PtrList().Set2(length, length) + s.SetType(t) n = typecheck(s, ctxExpr) n = walkexpr(n, init) } case ir.ORUNESTR: a := nodnil() - if n.Esc == EscNone { + if n.Esc() == EscNone { t := types.NewArray(types.Types[types.TUINT8], 4) a = ir.Nod(ir.OADDR, temp(t), nil) } // intstring(*[4]byte, rune) - n = mkcall("intstring", n.Type, init, a, conv(n.Left, types.Types[types.TINT64])) + n = mkcall("intstring", n.Type(), init, a, conv(n.Left(), types.Types[types.TINT64])) case ir.OBYTES2STR, ir.ORUNES2STR: a := nodnil() - if n.Esc == EscNone { + if n.Esc() == EscNone { // Create temporary buffer for string on stack. t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize) a = ir.Nod(ir.OADDR, temp(t), nil) } - if n.Op == ir.ORUNES2STR { + if n.Op() == ir.ORUNES2STR { // slicerunetostring(*[32]byte, []rune) string - n = mkcall("slicerunetostring", n.Type, init, a, n.Left) + n = mkcall("slicerunetostring", n.Type(), init, a, n.Left()) } else { // slicebytetostring(*[32]byte, ptr *byte, n int) string - n.Left = cheapexpr(n.Left, init) - ptr, len := backingArrayPtrLen(n.Left) - n = mkcall("slicebytetostring", n.Type, init, a, ptr, len) + n.SetLeft(cheapexpr(n.Left(), init)) + ptr, len := backingArrayPtrLen(n.Left()) + n = mkcall("slicebytetostring", n.Type(), init, a, ptr, len) } case ir.OBYTES2STRTMP: - n.Left = walkexpr(n.Left, init) + n.SetLeft(walkexpr(n.Left(), init)) if !instrumenting { // Let the backend handle OBYTES2STRTMP directly // to avoid a function call to slicebytetostringtmp. break } // slicebytetostringtmp(ptr *byte, n int) string - n.Left = cheapexpr(n.Left, init) - ptr, len := backingArrayPtrLen(n.Left) - n = mkcall("slicebytetostringtmp", n.Type, init, ptr, len) + n.SetLeft(cheapexpr(n.Left(), init)) + ptr, len := backingArrayPtrLen(n.Left()) + n = mkcall("slicebytetostringtmp", n.Type(), init, ptr, len) case ir.OSTR2BYTES: - s := n.Left + s := n.Left() if ir.IsConst(s, constant.String) { sc := s.StringVal() // Allocate a [n]byte of the right size. t := types.NewArray(types.Types[types.TUINT8], int64(len(sc))) var a *ir.Node - if n.Esc == EscNone && len(sc) <= int(maxImplicitStackVarSize) { + if n.Esc() == EscNone && len(sc) <= int(maxImplicitStackVarSize) { a = ir.Nod(ir.OADDR, temp(t), nil) } else { a = callnew(t) @@ -1514,20 +1514,20 @@ opswitch: } // Slice the [n]byte to a []byte. - n.Op = ir.OSLICEARR - n.Left = p + n.SetOp(ir.OSLICEARR) + n.SetLeft(p) n = walkexpr(n, init) break } a := nodnil() - if n.Esc == EscNone { + if n.Esc() == EscNone { // Create temporary buffer for slice on stack. t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize) a = ir.Nod(ir.OADDR, temp(t), nil) } // stringtoslicebyte(*32[byte], string) []byte - n = mkcall("stringtoslicebyte", n.Type, init, a, conv(s, types.Types[types.TSTRING])) + n = mkcall("stringtoslicebyte", n.Type(), init, a, conv(s, types.Types[types.TSTRING])) case ir.OSTR2BYTESTMP: // []byte(string) conversion that creates a slice @@ -1537,38 +1537,38 @@ opswitch: // that know that the slice won't be mutated. // The only such case today is: // for i, c := range []byte(string) - n.Left = walkexpr(n.Left, init) + n.SetLeft(walkexpr(n.Left(), init)) case ir.OSTR2RUNES: a := nodnil() - if n.Esc == EscNone { + if n.Esc() == EscNone { // Create temporary buffer for slice on stack. t := types.NewArray(types.Types[types.TINT32], tmpstringbufsize) a = ir.Nod(ir.OADDR, temp(t), nil) } // stringtoslicerune(*[32]rune, string) []rune - n = mkcall("stringtoslicerune", n.Type, init, a, conv(n.Left, types.Types[types.TSTRING])) + n = mkcall("stringtoslicerune", n.Type(), init, a, conv(n.Left(), types.Types[types.TSTRING])) case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT, ir.OPTRLIT: - if isStaticCompositeLiteral(n) && !canSSAType(n.Type) { + if isStaticCompositeLiteral(n) && !canSSAType(n.Type()) { // n can be directly represented in the read-only data section. // Make direct reference to the static data. See issue 12841. - vstat := readonlystaticname(n.Type) + vstat := readonlystaticname(n.Type()) fixedlit(inInitFunction, initKindStatic, n, vstat, init) n = vstat n = typecheck(n, ctxExpr) break } - var_ := temp(n.Type) + var_ := temp(n.Type()) anylit(n, var_, init) n = var_ case ir.OSEND: - n1 := n.Right - n1 = assignconv(n1, n.Left.Type.Elem(), "chan send") + n1 := n.Right() + n1 = assignconv(n1, n.Left().Type().Elem(), "chan send") n1 = walkexpr(n1, init) n1 = ir.Nod(ir.OADDR, n1, nil) - n = mkcall1(chanfn("chansend1", 2, n.Left.Type), nil, init, n.Left, n1) + n = mkcall1(chanfn("chansend1", 2, n.Left().Type()), nil, init, n.Left(), n1) case ir.OCLOSURE: n = walkclosure(n, init) @@ -1582,17 +1582,17 @@ opswitch: // constants until walk. For example, if n is y%1 == 0, the // walk of y%1 may have replaced it by 0. // Check whether n with its updated args is itself now a constant. - t := n.Type + t := n.Type() n = evalConst(n) - if n.Type != t { - base.Fatalf("evconst changed Type: %v had type %v, now %v", n, t, n.Type) + if n.Type() != t { + base.Fatalf("evconst changed Type: %v had type %v, now %v", n, t, n.Type()) } - if n.Op == ir.OLITERAL { + if n.Op() == ir.OLITERAL { n = typecheck(n, ctxExpr) // Emit string symbol now to avoid emitting // any concurrently during the backend. if v := n.Val(); v.Kind() == constant.String { - _ = stringsym(n.Pos, constant.StringVal(v)) + _ = stringsym(n.Pos(), constant.StringVal(v)) } } @@ -1620,13 +1620,13 @@ func markTypeUsedInInterface(t *types.Type, from *obj.LSym) { // markUsedIfaceMethod marks that an interface method is used in the current // function. n is OCALLINTER node. func markUsedIfaceMethod(n *ir.Node) { - ityp := n.Left.Left.Type + ityp := n.Left().Left().Type() tsym := typenamesym(ityp).Linksym() - r := obj.Addrel(Curfn.Func.LSym) + r := obj.Addrel(Curfn.Func().LSym) r.Sym = tsym // n.Left.Xoffset is the method index * Widthptr (the offset of code pointer // in itab). - midx := n.Left.Xoffset / int64(Widthptr) + midx := n.Left().Offset() / int64(Widthptr) r.Add = ifaceMethodOffset(ityp, midx) r.Type = objabi.R_USEIFACEMETHOD } @@ -1680,17 +1680,17 @@ func rtconvfn(src, dst *types.Type) (param, result types.EType) { // TODO(josharian): combine this with its caller and simplify func reduceSlice(n *ir.Node) *ir.Node { low, high, max := n.SliceBounds() - if high != nil && high.Op == ir.OLEN && samesafeexpr(n.Left, high.Left) { + if high != nil && high.Op() == ir.OLEN && samesafeexpr(n.Left(), high.Left()) { // Reduce x[i:len(x)] to x[i:]. high = nil } n.SetSliceBounds(low, high, max) - if (n.Op == ir.OSLICE || n.Op == ir.OSLICESTR) && low == nil && high == nil { + if (n.Op() == ir.OSLICE || n.Op() == ir.OSLICESTR) && low == nil && high == nil { // Reduce x[:] to x. if base.Debug.Slice > 0 { base.Warn("slice: omit slice operation") } - return n.Left + return n.Left() } return n } @@ -1700,7 +1700,7 @@ func ascompatee1(l *ir.Node, r *ir.Node, init *ir.Nodes) *ir.Node { // making it impossible for reorder3 to work. n := ir.Nod(ir.OAS, l, r) - if l.Op == ir.OINDEXMAP { + if l.Op() == ir.OINDEXMAP { return n } @@ -1745,10 +1745,10 @@ func ascompatee(op ir.Op, nl, nr []*ir.Node, init *ir.Nodes) []*ir.Node { // fncall reports whether assigning an rvalue of type rt to an lvalue l might involve a function call. func fncall(l *ir.Node, rt *types.Type) bool { - if l.HasCall() || l.Op == ir.OINDEXMAP { + if l.HasCall() || l.Op() == ir.OINDEXMAP { return true } - if types.Identical(l.Type, rt) { + if types.Identical(l.Type(), rt) { return false } // There might be a conversion required, which might involve a runtime call. @@ -1782,8 +1782,8 @@ func ascompatet(nl ir.Nodes, nr *types.Type) []*ir.Node { } res := ir.Nod(ir.ORESULT, nil, nil) - res.Xoffset = base.Ctxt.FixedFrameSize() + r.Offset - res.Type = r.Type + res.SetOffset(base.Ctxt.FixedFrameSize() + r.Offset) + res.SetType(r.Type) res.SetTypecheck(1) a := ir.Nod(ir.OAS, l, res) @@ -1804,15 +1804,15 @@ func mkdotargslice(typ *types.Type, args []*ir.Node) *ir.Node { var n *ir.Node if len(args) == 0 { n = nodnil() - n.Type = typ + n.SetType(typ) } else { n = ir.Nod(ir.OCOMPLIT, nil, typenod(typ)) - n.List.Append(args...) + n.PtrList().Append(args...) n.SetImplicit(true) } n = typecheck(n, ctxExpr) - if n.Type == nil { + if n.Type() == nil { base.Fatalf("mkdotargslice: typecheck failed") } return n @@ -1821,7 +1821,7 @@ func mkdotargslice(typ *types.Type, args []*ir.Node) *ir.Node { // fixVariadicCall rewrites calls to variadic functions to use an // explicit ... argument if one is not already present. func fixVariadicCall(call *ir.Node) { - fntype := call.Left.Type + fntype := call.Left().Type() if !fntype.IsVariadic() || call.IsDDD() { return } @@ -1829,33 +1829,33 @@ func fixVariadicCall(call *ir.Node) { vi := fntype.NumParams() - 1 vt := fntype.Params().Field(vi).Type - args := call.List.Slice() + args := call.List().Slice() extra := args[vi:] slice := mkdotargslice(vt, extra) for i := range extra { extra[i] = nil // allow GC } - call.List.Set(append(args[:vi], slice)) + call.PtrList().Set(append(args[:vi], slice)) call.SetIsDDD(true) } func walkCall(n *ir.Node, init *ir.Nodes) { - if n.Rlist.Len() != 0 { + if n.Rlist().Len() != 0 { return // already walked } - params := n.Left.Type.Params() - args := n.List.Slice() + params := n.Left().Type().Params() + args := n.List().Slice() - n.Left = walkexpr(n.Left, init) + n.SetLeft(walkexpr(n.Left(), init)) walkexprlist(args, init) // If this is a method call, add the receiver at the beginning of the args. - if n.Op == ir.OCALLMETH { + if n.Op() == ir.OCALLMETH { withRecv := make([]*ir.Node, len(args)+1) - withRecv[0] = n.Left.Left - n.Left.Left = nil + withRecv[0] = n.Left().Left() + n.Left().SetLeft(nil) copy(withRecv[1:], args) args = withRecv } @@ -1869,9 +1869,9 @@ func walkCall(n *ir.Node, init *ir.Nodes) { updateHasCall(arg) // Determine param type. var t *types.Type - if n.Op == ir.OCALLMETH { + if n.Op() == ir.OCALLMETH { if i == 0 { - t = n.Left.Type.Recv().Type + t = n.Left().Type().Recv().Type } else { t = params.Field(i - 1).Type } @@ -1889,18 +1889,18 @@ func walkCall(n *ir.Node, init *ir.Nodes) { } } - n.List.Set(tempAssigns) - n.Rlist.Set(args) + n.PtrList().Set(tempAssigns) + n.PtrRlist().Set(args) } // generate code for print func walkprint(nn *ir.Node, init *ir.Nodes) *ir.Node { // Hoist all the argument evaluation up before the lock. - walkexprlistcheap(nn.List.Slice(), init) + walkexprlistcheap(nn.List().Slice(), init) // For println, add " " between elements and "\n" at the end. - if nn.Op == ir.OPRINTN { - s := nn.List.Slice() + if nn.Op() == ir.OPRINTN { + s := nn.List().Slice() t := make([]*ir.Node, 0, len(s)*2) for i, n := range s { if i != 0 { @@ -1909,11 +1909,11 @@ func walkprint(nn *ir.Node, init *ir.Nodes) *ir.Node { t = append(t, n) } t = append(t, nodstr("\n")) - nn.List.Set(t) + nn.PtrList().Set(t) } // Collapse runs of constant strings. - s := nn.List.Slice() + s := nn.List().Slice() t := make([]*ir.Node, 0, len(s)) for i := 0; i < len(s); { var strs []string @@ -1929,12 +1929,12 @@ func walkprint(nn *ir.Node, init *ir.Nodes) *ir.Node { i++ } } - nn.List.Set(t) + nn.PtrList().Set(t) calls := []*ir.Node{mkcall("printlock", nil, init)} - for i, n := range nn.List.Slice() { - if n.Op == ir.OLITERAL { - if n.Type == types.UntypedRune { + for i, n := range nn.List().Slice() { + if n.Op() == ir.OLITERAL { + if n.Type() == types.UntypedRune { n = defaultlit(n, types.Runetype) } @@ -1947,42 +1947,42 @@ func walkprint(nn *ir.Node, init *ir.Nodes) *ir.Node { } } - if n.Op != ir.OLITERAL && n.Type != nil && n.Type.Etype == types.TIDEAL { + if n.Op() != ir.OLITERAL && n.Type() != nil && n.Type().Etype == types.TIDEAL { n = defaultlit(n, types.Types[types.TINT64]) } n = defaultlit(n, nil) - nn.List.SetIndex(i, n) - if n.Type == nil || n.Type.Etype == types.TFORW { + nn.List().SetIndex(i, n) + if n.Type() == nil || n.Type().Etype == types.TFORW { continue } var on *ir.Node - switch n.Type.Etype { + switch n.Type().Etype { case types.TINTER: - if n.Type.IsEmptyInterface() { + if n.Type().IsEmptyInterface() { on = syslook("printeface") } else { on = syslook("printiface") } - on = substArgTypes(on, n.Type) // any-1 + on = substArgTypes(on, n.Type()) // any-1 case types.TPTR: - if n.Type.Elem().NotInHeap() { + if n.Type().Elem().NotInHeap() { on = syslook("printuintptr") n = ir.Nod(ir.OCONV, n, nil) - n.Type = types.Types[types.TUNSAFEPTR] + n.SetType(types.Types[types.TUNSAFEPTR]) n = ir.Nod(ir.OCONV, n, nil) - n.Type = types.Types[types.TUINTPTR] + n.SetType(types.Types[types.TUINTPTR]) break } fallthrough case types.TCHAN, types.TMAP, types.TFUNC, types.TUNSAFEPTR: on = syslook("printpointer") - on = substArgTypes(on, n.Type) // any-1 + on = substArgTypes(on, n.Type()) // any-1 case types.TSLICE: on = syslook("printslice") - on = substArgTypes(on, n.Type) // any-1 + on = substArgTypes(on, n.Type()) // any-1 case types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR: - if isRuntimePkg(n.Type.Sym.Pkg) && n.Type.Sym.Name == "hex" { + if isRuntimePkg(n.Type().Sym.Pkg) && n.Type().Sym.Name == "hex" { on = syslook("printhex") } else { on = syslook("printuint") @@ -2009,18 +2009,18 @@ func walkprint(nn *ir.Node, init *ir.Nodes) *ir.Node { on = syslook("printstring") } default: - badtype(ir.OPRINT, n.Type, nil) + badtype(ir.OPRINT, n.Type(), nil) continue } r := ir.Nod(ir.OCALL, on, nil) - if params := on.Type.Params().FieldSlice(); len(params) > 0 { + if params := on.Type().Params().FieldSlice(); len(params) > 0 { t := params[0].Type - if !types.Identical(t, n.Type) { + if !types.Identical(t, n.Type()) { n = ir.Nod(ir.OCONV, n, nil) - n.Type = t + n.SetType(t) } - r.List.Append(n) + r.PtrList().Append(n) } calls = append(calls, r) } @@ -2033,14 +2033,14 @@ func walkprint(nn *ir.Node, init *ir.Nodes) *ir.Node { r := ir.Nod(ir.OEMPTY, nil, nil) r = typecheck(r, ctxStmt) r = walkexpr(r, init) - r.Ninit.Set(calls) + r.PtrInit().Set(calls) return r } func callnew(t *types.Type) *ir.Node { dowidth(t) n := ir.Nod(ir.ONEWOBJ, typename(t), nil) - n.Type = types.NewPtr(t) + n.SetType(types.NewPtr(t)) n.SetTypecheck(1) n.MarkNonNil() return n @@ -2049,54 +2049,54 @@ func callnew(t *types.Type) *ir.Node { // isReflectHeaderDataField reports whether l is an expression p.Data // where p has type reflect.SliceHeader or reflect.StringHeader. func isReflectHeaderDataField(l *ir.Node) bool { - if l.Type != types.Types[types.TUINTPTR] { + if l.Type() != types.Types[types.TUINTPTR] { return false } var tsym *types.Sym - switch l.Op { + switch l.Op() { case ir.ODOT: - tsym = l.Left.Type.Sym + tsym = l.Left().Type().Sym case ir.ODOTPTR: - tsym = l.Left.Type.Elem().Sym + tsym = l.Left().Type().Elem().Sym default: return false } - if tsym == nil || l.Sym.Name != "Data" || tsym.Pkg.Path != "reflect" { + if tsym == nil || l.Sym().Name != "Data" || tsym.Pkg.Path != "reflect" { return false } return tsym.Name == "SliceHeader" || tsym.Name == "StringHeader" } func convas(n *ir.Node, init *ir.Nodes) *ir.Node { - if n.Op != ir.OAS { - base.Fatalf("convas: not OAS %v", n.Op) + if n.Op() != ir.OAS { + base.Fatalf("convas: not OAS %v", n.Op()) } defer updateHasCall(n) n.SetTypecheck(1) - if n.Left == nil || n.Right == nil { + if n.Left() == nil || n.Right() == nil { return n } - lt := n.Left.Type - rt := n.Right.Type + lt := n.Left().Type() + rt := n.Right().Type() if lt == nil || rt == nil { return n } - if ir.IsBlank(n.Left) { - n.Right = defaultlit(n.Right, nil) + if ir.IsBlank(n.Left()) { + n.SetRight(defaultlit(n.Right(), nil)) return n } if !types.Identical(lt, rt) { - n.Right = assignconv(n.Right, lt, "assignment") - n.Right = walkexpr(n.Right, init) + n.SetRight(assignconv(n.Right(), lt, "assignment")) + n.SetRight(walkexpr(n.Right(), init)) } - dowidth(n.Right.Type) + dowidth(n.Right().Type()) return n } @@ -2115,45 +2115,45 @@ func reorder3(all []*ir.Node) []*ir.Node { var mapinit ir.Nodes for i, n := range all { - l := n.Left + l := n.Left() // Save subexpressions needed on left side. // Drill through non-dereferences. for { - if l.Op == ir.ODOT || l.Op == ir.OPAREN { - l = l.Left + if l.Op() == ir.ODOT || l.Op() == ir.OPAREN { + l = l.Left() continue } - if l.Op == ir.OINDEX && l.Left.Type.IsArray() { - l.Right = reorder3save(l.Right, all, i, &early) - l = l.Left + if l.Op() == ir.OINDEX && l.Left().Type().IsArray() { + l.SetRight(reorder3save(l.Right(), all, i, &early)) + l = l.Left() continue } break } - switch l.Op { + switch l.Op() { default: - base.Fatalf("reorder3 unexpected lvalue %#v", l.Op) + base.Fatalf("reorder3 unexpected lvalue %#v", l.Op()) case ir.ONAME: break case ir.OINDEX, ir.OINDEXMAP: - l.Left = reorder3save(l.Left, all, i, &early) - l.Right = reorder3save(l.Right, all, i, &early) - if l.Op == ir.OINDEXMAP { + l.SetLeft(reorder3save(l.Left(), all, i, &early)) + l.SetRight(reorder3save(l.Right(), all, i, &early)) + if l.Op() == ir.OINDEXMAP { all[i] = convas(all[i], &mapinit) } case ir.ODEREF, ir.ODOTPTR: - l.Left = reorder3save(l.Left, all, i, &early) + l.SetLeft(reorder3save(l.Left(), all, i, &early)) } // Save expression on right side. - all[i].Right = reorder3save(all[i].Right, all, i, &early) + all[i].SetRight(reorder3save(all[i].Right(), all, i, &early)) } early = append(mapinit.Slice(), early...) @@ -2171,26 +2171,26 @@ func reorder3save(n *ir.Node, all []*ir.Node, i int, early *[]*ir.Node) *ir.Node return n } - q := temp(n.Type) + q := temp(n.Type()) q = ir.Nod(ir.OAS, q, n) q = typecheck(q, ctxStmt) *early = append(*early, q) - return q.Left + return q.Left() } // what's the outer value that a write to n affects? // outer value means containing struct or array. func outervalue(n *ir.Node) *ir.Node { for { - switch n.Op { + switch n.Op() { case ir.OXDOT: base.Fatalf("OXDOT in walk") case ir.ODOT, ir.OPAREN, ir.OCONVNOP: - n = n.Left + n = n.Left() continue case ir.OINDEX: - if n.Left.Type != nil && n.Left.Type.IsArray() { - n = n.Left + if n.Left().Type() != nil && n.Left().Type().IsArray() { + n = n.Left() continue } } @@ -2208,8 +2208,8 @@ func aliased(r *ir.Node, all []*ir.Node) bool { // Treat all fields of a struct as referring to the whole struct. // We could do better but we would have to keep track of the fields. - for r.Op == ir.ODOT { - r = r.Left + for r.Op() == ir.ODOT { + r = r.Left() } // Look for obvious aliasing: a variable being assigned @@ -2220,12 +2220,12 @@ func aliased(r *ir.Node, all []*ir.Node) bool { memwrite := false for _, as := range all { // We can ignore assignments to blank. - if ir.IsBlank(as.Left) { + if ir.IsBlank(as.Left()) { continue } - l := outervalue(as.Left) - if l.Op != ir.ONAME { + l := outervalue(as.Left()) + if l.Op() != ir.ONAME { memwrite = true continue } @@ -2239,7 +2239,7 @@ func aliased(r *ir.Node, all []*ir.Node) bool { continue case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT: - if l.Name.Addrtaken() { + if l.Name().Addrtaken() { memwrite = true continue } @@ -2280,14 +2280,14 @@ func varexpr(n *ir.Node) bool { return true } - switch n.Op { + switch n.Op() { case ir.OLITERAL, ir.ONIL: return true case ir.ONAME: switch n.Class() { case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT: - if !n.Name.Addrtaken() { + if !n.Name().Addrtaken() { return true } } @@ -2315,7 +2315,7 @@ func varexpr(n *ir.Node) bool { ir.OCONVNOP, ir.OCONVIFACE, ir.ODOTTYPE: - return varexpr(n.Left) && varexpr(n.Right) + return varexpr(n.Left()) && varexpr(n.Right()) case ir.ODOT: // but not ODOTPTR // Should have been handled in aliased. @@ -2331,7 +2331,7 @@ func vmatch2(l *ir.Node, r *ir.Node) bool { if r == nil { return false } - switch r.Op { + switch r.Op() { // match each right given left case ir.ONAME: return l == r @@ -2340,13 +2340,13 @@ func vmatch2(l *ir.Node, r *ir.Node) bool { return false } - if vmatch2(l, r.Left) { + if vmatch2(l, r.Left()) { return true } - if vmatch2(l, r.Right) { + if vmatch2(l, r.Right()) { return true } - for _, n := range r.List.Slice() { + for _, n := range r.List().Slice() { if vmatch2(l, n) { return true } @@ -2361,7 +2361,7 @@ func vmatch1(l *ir.Node, r *ir.Node) bool { if l == nil || r == nil { return false } - switch l.Op { + switch l.Op() { case ir.ONAME: switch l.Class() { case ir.PPARAM, ir.PAUTO: @@ -2381,13 +2381,13 @@ func vmatch1(l *ir.Node, r *ir.Node) bool { return false } - if vmatch1(l.Left, r) { + if vmatch1(l.Left(), r) { return true } - if vmatch1(l.Right, r) { + if vmatch1(l.Right(), r) { return true } - for _, n := range l.List.Slice() { + for _, n := range l.List().Slice() { if vmatch1(n, r) { return true } @@ -2401,14 +2401,14 @@ func paramstoheap(params *types.Type) []*ir.Node { var nn []*ir.Node for _, t := range params.Fields().Slice() { v := ir.AsNode(t.Nname) - if v != nil && v.Sym != nil && strings.HasPrefix(v.Sym.Name, "~r") { // unnamed result + if v != nil && v.Sym() != nil && strings.HasPrefix(v.Sym().Name, "~r") { // unnamed result v = nil } if v == nil { continue } - if stackcopy := v.Name.Param.Stackcopy; stackcopy != nil { + if stackcopy := v.Name().Param.Stackcopy; stackcopy != nil { nn = append(nn, walkstmt(ir.Nod(ir.ODCL, v, nil))) if stackcopy.Class() == ir.PPARAM { nn = append(nn, walkstmt(typecheck(ir.Nod(ir.OAS, v, stackcopy), ctxStmt))) @@ -2427,9 +2427,9 @@ func paramstoheap(params *types.Type) []*ir.Node { // even allocations to move params/results to the heap. // The generated code is added to Curfn's Enter list. func zeroResults() { - for _, f := range Curfn.Type.Results().Fields().Slice() { + for _, f := range Curfn.Type().Results().Fields().Slice() { v := ir.AsNode(f.Nname) - if v != nil && v.Name.Param.Heapaddr != nil { + if v != nil && v.Name().Param.Heapaddr != nil { // The local which points to the return value is the // thing that needs zeroing. This is already handled // by a Needzero annotation in plive.go:livenessepilogue. @@ -2442,10 +2442,10 @@ func zeroResults() { // I don't think the zeroing below matters. // The stack return value will never be marked as live anywhere in the function. // It is not written to until deferreturn returns. - v = v.Name.Param.Stackcopy + v = v.Name().Param.Stackcopy } // Zero the stack location containing f. - Curfn.Func.Enter.Append(ir.NodAt(Curfn.Pos, ir.OAS, v, nil)) + Curfn.Func().Enter.Append(ir.NodAt(Curfn.Pos(), ir.OAS, v, nil)) } } @@ -2458,7 +2458,7 @@ func returnsfromheap(params *types.Type) []*ir.Node { if v == nil { continue } - if stackcopy := v.Name.Param.Stackcopy; stackcopy != nil && stackcopy.Class() == ir.PPARAMOUT { + if stackcopy := v.Name().Param.Stackcopy; stackcopy != nil && stackcopy.Class() == ir.PPARAMOUT { nn = append(nn, walkstmt(typecheck(ir.Nod(ir.OAS, stackcopy, v), ctxStmt))) } } @@ -2471,35 +2471,35 @@ func returnsfromheap(params *types.Type) []*ir.Node { // Enter and Exit lists. func heapmoves() { lno := base.Pos - base.Pos = Curfn.Pos - nn := paramstoheap(Curfn.Type.Recvs()) - nn = append(nn, paramstoheap(Curfn.Type.Params())...) - nn = append(nn, paramstoheap(Curfn.Type.Results())...) - Curfn.Func.Enter.Append(nn...) - base.Pos = Curfn.Func.Endlineno - Curfn.Func.Exit.Append(returnsfromheap(Curfn.Type.Results())...) + base.Pos = Curfn.Pos() + nn := paramstoheap(Curfn.Type().Recvs()) + nn = append(nn, paramstoheap(Curfn.Type().Params())...) + nn = append(nn, paramstoheap(Curfn.Type().Results())...) + Curfn.Func().Enter.Append(nn...) + base.Pos = Curfn.Func().Endlineno + Curfn.Func().Exit.Append(returnsfromheap(Curfn.Type().Results())...) base.Pos = lno } func vmkcall(fn *ir.Node, t *types.Type, init *ir.Nodes, va []*ir.Node) *ir.Node { - if fn.Type == nil || fn.Type.Etype != types.TFUNC { - base.Fatalf("mkcall %v %v", fn, fn.Type) + if fn.Type() == nil || fn.Type().Etype != types.TFUNC { + base.Fatalf("mkcall %v %v", fn, fn.Type()) } - n := fn.Type.NumParams() + n := fn.Type().NumParams() if n != len(va) { base.Fatalf("vmkcall %v needs %v args got %v", fn, n, len(va)) } r := ir.Nod(ir.OCALL, fn, nil) - r.List.Set(va) - if fn.Type.NumResults() > 0 { + r.PtrList().Set(va) + if fn.Type().NumResults() > 0 { r = typecheck(r, ctxExpr|ctxMultiOK) } else { r = typecheck(r, ctxStmt) } r = walkexpr(r, init) - r.Type = t + r.SetType(t) return r } @@ -2512,11 +2512,11 @@ func mkcall1(fn *ir.Node, t *types.Type, init *ir.Nodes, args ...*ir.Node) *ir.N } func conv(n *ir.Node, t *types.Type) *ir.Node { - if types.Identical(n.Type, t) { + if types.Identical(n.Type(), t) { return n } n = ir.Nod(ir.OCONV, n, nil) - n.Type = t + n.SetType(t) n = typecheck(n, ctxExpr) return n } @@ -2524,11 +2524,11 @@ func conv(n *ir.Node, t *types.Type) *ir.Node { // convnop converts node n to type t using the OCONVNOP op // and typechecks the result with ctxExpr. func convnop(n *ir.Node, t *types.Type) *ir.Node { - if types.Identical(n.Type, t) { + if types.Identical(n.Type(), t) { return n } n = ir.Nod(ir.OCONVNOP, n, nil) - n.Type = t + n.SetType(t) n = typecheck(n, ctxExpr) return n } @@ -2541,13 +2541,13 @@ func byteindex(n *ir.Node) *ir.Node { // While converting from int8 to int is possible, it would yield // the wrong result for negative values. // Reinterpreting the value as an unsigned byte solves both cases. - if !types.Identical(n.Type, types.Types[types.TUINT8]) { + if !types.Identical(n.Type(), types.Types[types.TUINT8]) { n = ir.Nod(ir.OCONV, n, nil) - n.Type = types.Types[types.TUINT8] + n.SetType(types.Types[types.TUINT8]) n.SetTypecheck(1) } n = ir.Nod(ir.OCONV, n, nil) - n.Type = types.Types[types.TINT] + n.SetType(types.Types[types.TINT]) n.SetTypecheck(1) return n } @@ -2644,17 +2644,17 @@ func writebarrierfn(name string, l *types.Type, r *types.Type) *ir.Node { func addstr(n *ir.Node, init *ir.Nodes) *ir.Node { // order.expr rewrote OADDSTR to have a list of strings. - c := n.List.Len() + c := n.List().Len() if c < 2 { base.Fatalf("addstr count %d too small", c) } buf := nodnil() - if n.Esc == EscNone { + if n.Esc() == EscNone { sz := int64(0) - for _, n1 := range n.List.Slice() { - if n1.Op == ir.OLITERAL { + for _, n1 := range n.List().Slice() { + if n1.Op() == ir.OLITERAL { sz += int64(len(n1.StringVal())) } } @@ -2669,7 +2669,7 @@ func addstr(n *ir.Node, init *ir.Nodes) *ir.Node { // build list of string arguments args := []*ir.Node{buf} - for _, n2 := range n.List.Slice() { + for _, n2 := range n.List().Slice() { args = append(args, conv(n2, types.Types[types.TSTRING])) } @@ -2687,28 +2687,28 @@ func addstr(n *ir.Node, init *ir.Nodes) *ir.Node { if prealloc[n] != nil { prealloc[slice] = prealloc[n] } - slice.List.Set(args[1:]) // skip buf arg + slice.PtrList().Set(args[1:]) // skip buf arg args = []*ir.Node{buf, slice} - slice.Esc = EscNone + slice.SetEsc(EscNone) } cat := syslook(fn) r := ir.Nod(ir.OCALL, cat, nil) - r.List.Set(args) + r.PtrList().Set(args) r = typecheck(r, ctxExpr) r = walkexpr(r, init) - r.Type = n.Type + r.SetType(n.Type()) return r } func walkAppendArgs(n *ir.Node, init *ir.Nodes) { - walkexprlistsafe(n.List.Slice(), init) + walkexprlistsafe(n.List().Slice(), init) // walkexprlistsafe will leave OINDEX (s[n]) alone if both s // and n are name or literal, but those may index the slice we're // modifying here. Fix explicitly. - ls := n.List.Slice() + ls := n.List().Slice() for i1, n1 := range ls { ls[i1] = cheapexpr(n1, init) } @@ -2731,18 +2731,18 @@ func walkAppendArgs(n *ir.Node, init *ir.Nodes) { func appendslice(n *ir.Node, init *ir.Nodes) *ir.Node { walkAppendArgs(n, init) - l1 := n.List.First() - l2 := n.List.Second() + l1 := n.List().First() + l2 := n.List().Second() l2 = cheapexpr(l2, init) - n.List.SetSecond(l2) + n.List().SetSecond(l2) var nodes ir.Nodes // var s []T - s := temp(l1.Type) + s := temp(l1.Type()) nodes.Append(ir.Nod(ir.OAS, s, l1)) // s = l1 - elemtype := s.Type.Elem() + elemtype := s.Type().Elem() // n := len(s) + len(l2) nn := temp(types.Types[types.TINT]) @@ -2752,14 +2752,14 @@ func appendslice(n *ir.Node, init *ir.Nodes) *ir.Node { nif := ir.Nod(ir.OIF, nil, nil) nuint := conv(nn, types.Types[types.TUINT]) scapuint := conv(ir.Nod(ir.OCAP, s, nil), types.Types[types.TUINT]) - nif.Left = ir.Nod(ir.OGT, nuint, scapuint) + nif.SetLeft(ir.Nod(ir.OGT, nuint, scapuint)) // instantiate growslice(typ *type, []any, int) []any fn := syslook("growslice") fn = substArgTypes(fn, elemtype, elemtype) // s = growslice(T, s, n) - nif.Nbody.Set1(ir.Nod(ir.OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(elemtype), s, nn))) + nif.PtrBody().Set1(ir.Nod(ir.OAS, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn))) nodes.Append(nif) // s = s[:n] @@ -2772,17 +2772,17 @@ func appendslice(n *ir.Node, init *ir.Nodes) *ir.Node { if elemtype.HasPointers() { // copy(s[len(l1):], l2) nptr1 := ir.Nod(ir.OSLICE, s, nil) - nptr1.Type = s.Type + nptr1.SetType(s.Type()) nptr1.SetSliceBounds(ir.Nod(ir.OLEN, l1, nil), nil, nil) nptr1 = cheapexpr(nptr1, &nodes) nptr2 := l2 - Curfn.Func.SetWBPos(n.Pos) + Curfn.Func().SetWBPos(n.Pos()) // instantiate typedslicecopy(typ *type, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int fn := syslook("typedslicecopy") - fn = substArgTypes(fn, l1.Type.Elem(), l2.Type.Elem()) + fn = substArgTypes(fn, l1.Type().Elem(), l2.Type().Elem()) ptr1, len1 := backingArrayPtrLen(nptr1) ptr2, len2 := backingArrayPtrLen(nptr2) ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, typename(elemtype), ptr1, len1, ptr2, len2) @@ -2791,7 +2791,7 @@ func appendslice(n *ir.Node, init *ir.Nodes) *ir.Node { // copy(s[len(l1):], l2) // l2 can be a slice or string. nptr1 := ir.Nod(ir.OSLICE, s, nil) - nptr1.Type = s.Type + nptr1.SetType(s.Type()) nptr1.SetSliceBounds(ir.Nod(ir.OLEN, l1, nil), nil, nil) nptr1 = cheapexpr(nptr1, &nodes) nptr2 := l2 @@ -2800,7 +2800,7 @@ func appendslice(n *ir.Node, init *ir.Nodes) *ir.Node { ptr2, len2 := backingArrayPtrLen(nptr2) fn := syslook("slicecopy") - fn = substArgTypes(fn, ptr1.Type.Elem(), ptr2.Type.Elem()) + fn = substArgTypes(fn, ptr1.Type().Elem(), ptr2.Type().Elem()) ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, ptr1, len1, ptr2, len2, nodintconst(elemtype.Width)) } else { // memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T)) @@ -2837,12 +2837,12 @@ func isAppendOfMake(n *ir.Node) bool { base.Fatalf("missing typecheck: %+v", n) } - if n.Op != ir.OAPPEND || !n.IsDDD() || n.List.Len() != 2 { + if n.Op() != ir.OAPPEND || !n.IsDDD() || n.List().Len() != 2 { return false } - second := n.List.Second() - if second.Op != ir.OMAKESLICE || second.Right != nil { + second := n.List().Second() + if second.Op() != ir.OMAKESLICE || second.Right() != nil { return false } @@ -2852,8 +2852,8 @@ func isAppendOfMake(n *ir.Node) bool { // typecheck made sure that constant arguments to make are not negative and fit into an int. // The care of overflow of the len argument to make will be handled by an explicit check of int(len) < 0 during runtime. - y := second.Left - if !ir.IsConst(y, constant.Int) && y.Type.Size() > types.Types[types.TUINT].Size() { + y := second.Left() + if !ir.IsConst(y, constant.Int) && y.Type().Size() > types.Types[types.TUINT].Size() { return false } @@ -2891,14 +2891,14 @@ func extendslice(n *ir.Node, init *ir.Nodes) *ir.Node { // isAppendOfMake made sure all possible positive values of l2 fit into an uint. // The case of l2 overflow when converting from e.g. uint to int is handled by an explicit // check of l2 < 0 at runtime which is generated below. - l2 := conv(n.List.Second().Left, types.Types[types.TINT]) + l2 := conv(n.List().Second().Left(), types.Types[types.TINT]) l2 = typecheck(l2, ctxExpr) - n.List.SetSecond(l2) // walkAppendArgs expects l2 in n.List.Second(). + n.List().SetSecond(l2) // walkAppendArgs expects l2 in n.List.Second(). walkAppendArgs(n, init) - l1 := n.List.First() - l2 = n.List.Second() // re-read l2, as it may have been updated by walkAppendArgs + l1 := n.List().First() + l2 = n.List().Second() // re-read l2, as it may have been updated by walkAppendArgs var nodes []*ir.Node @@ -2907,14 +2907,14 @@ func extendslice(n *ir.Node, init *ir.Nodes) *ir.Node { nifneg.SetLikely(true) // else panicmakeslicelen() - nifneg.Rlist.Set1(mkcall("panicmakeslicelen", nil, init)) + nifneg.PtrRlist().Set1(mkcall("panicmakeslicelen", nil, init)) nodes = append(nodes, nifneg) // s := l1 - s := temp(l1.Type) + s := temp(l1.Type()) nodes = append(nodes, ir.Nod(ir.OAS, s, l1)) - elemtype := s.Type.Elem() + elemtype := s.Type().Elem() // n := len(s) + l2 nn := temp(types.Types[types.TINT]) @@ -2930,7 +2930,7 @@ func extendslice(n *ir.Node, init *ir.Nodes) *ir.Node { fn = substArgTypes(fn, elemtype, elemtype) // s = growslice(T, s, n) - nif.Nbody.Set1(ir.Nod(ir.OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(elemtype), s, nn))) + nif.PtrBody().Set1(ir.Nod(ir.OAS, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn))) nodes = append(nodes, nif) // s = s[:n] @@ -2940,7 +2940,7 @@ func extendslice(n *ir.Node, init *ir.Nodes) *ir.Node { nodes = append(nodes, ir.Nod(ir.OAS, s, nt)) // lptr := &l1[0] - l1ptr := temp(l1.Type.Elem().PtrTo()) + l1ptr := temp(l1.Type().Elem().PtrTo()) tmp := ir.Nod(ir.OSPTR, l1, nil) nodes = append(nodes, ir.Nod(ir.OAS, l1ptr, tmp)) @@ -2963,7 +2963,7 @@ func extendslice(n *ir.Node, init *ir.Nodes) *ir.Node { hasPointers := elemtype.HasPointers() if hasPointers { clrname = "memclrHasPointers" - Curfn.Func.SetWBPos(n.Pos) + Curfn.Func().SetWBPos(n.Pos()) } var clr ir.Nodes @@ -2973,7 +2973,7 @@ func extendslice(n *ir.Node, init *ir.Nodes) *ir.Node { if hasPointers { // if l1ptr == sptr nifclr := ir.Nod(ir.OIF, ir.Nod(ir.OEQ, l1ptr, sptr), nil) - nifclr.Nbody = clr + nifclr.SetBody(clr) nodes = append(nodes, nifclr) } else { nodes = append(nodes, clr.Slice()...) @@ -3007,13 +3007,13 @@ func extendslice(n *ir.Node, init *ir.Nodes) *ir.Node { // } // s func walkappend(n *ir.Node, init *ir.Nodes, dst *ir.Node) *ir.Node { - if !samesafeexpr(dst, n.List.First()) { - n.List.SetFirst(safeexpr(n.List.First(), init)) - n.List.SetFirst(walkexpr(n.List.First(), init)) + if !samesafeexpr(dst, n.List().First()) { + n.List().SetFirst(safeexpr(n.List().First(), init)) + n.List().SetFirst(walkexpr(n.List().First(), init)) } - walkexprlistsafe(n.List.Slice()[1:], init) + walkexprlistsafe(n.List().Slice()[1:], init) - nsrc := n.List.First() + nsrc := n.List().First() // walkexprlistsafe will leave OINDEX (s[n]) alone if both s // and n are name or literal, but those may index the slice we're @@ -3021,17 +3021,17 @@ func walkappend(n *ir.Node, init *ir.Nodes, dst *ir.Node) *ir.Node { // Using cheapexpr also makes sure that the evaluation // of all arguments (and especially any panics) happen // before we begin to modify the slice in a visible way. - ls := n.List.Slice()[1:] + ls := n.List().Slice()[1:] for i, n := range ls { n = cheapexpr(n, init) - if !types.Identical(n.Type, nsrc.Type.Elem()) { - n = assignconv(n, nsrc.Type.Elem(), "append") + if !types.Identical(n.Type(), nsrc.Type().Elem()) { + n = assignconv(n, nsrc.Type().Elem(), "append") n = walkexpr(n, init) } ls[i] = n } - argc := n.List.Len() - 1 + argc := n.List().Len() - 1 if argc < 1 { return nsrc } @@ -3044,18 +3044,18 @@ func walkappend(n *ir.Node, init *ir.Nodes, dst *ir.Node) *ir.Node { var l []*ir.Node - ns := temp(nsrc.Type) + ns := temp(nsrc.Type()) l = append(l, ir.Nod(ir.OAS, ns, nsrc)) // s = src na := nodintconst(int64(argc)) // const argc nx := ir.Nod(ir.OIF, nil, nil) // if cap(s) - len(s) < argc - nx.Left = ir.Nod(ir.OLT, ir.Nod(ir.OSUB, ir.Nod(ir.OCAP, ns, nil), ir.Nod(ir.OLEN, ns, nil)), na) + nx.SetLeft(ir.Nod(ir.OLT, ir.Nod(ir.OSUB, ir.Nod(ir.OCAP, ns, nil), ir.Nod(ir.OLEN, ns, nil)), na)) fn := syslook("growslice") // growslice(, old []T, mincap int) (ret []T) - fn = substArgTypes(fn, ns.Type.Elem(), ns.Type.Elem()) + fn = substArgTypes(fn, ns.Type().Elem(), ns.Type().Elem()) - nx.Nbody.Set1(ir.Nod(ir.OAS, ns, - mkcall1(fn, ns.Type, &nx.Ninit, typename(ns.Type.Elem()), ns, + nx.PtrBody().Set1(ir.Nod(ir.OAS, ns, + mkcall1(fn, ns.Type(), nx.PtrInit(), typename(ns.Type().Elem()), ns, ir.Nod(ir.OADD, ir.Nod(ir.OLEN, ns, nil), na)))) l = append(l, nx) @@ -3068,7 +3068,7 @@ func walkappend(n *ir.Node, init *ir.Nodes, dst *ir.Node) *ir.Node { nx.SetBounded(true) l = append(l, ir.Nod(ir.OAS, ns, nx)) // s = s[:n+argc] - ls = n.List.Slice()[1:] + ls = n.List().Slice()[1:] for i, n := range ls { nx = ir.Nod(ir.OINDEX, ns, nn) // s[n] ... nx.SetBounded(true) @@ -3096,14 +3096,14 @@ func walkappend(n *ir.Node, init *ir.Nodes, dst *ir.Node) *ir.Node { // Also works if b is a string. // func copyany(n *ir.Node, init *ir.Nodes, runtimecall bool) *ir.Node { - if n.Left.Type.Elem().HasPointers() { - Curfn.Func.SetWBPos(n.Pos) - fn := writebarrierfn("typedslicecopy", n.Left.Type.Elem(), n.Right.Type.Elem()) - n.Left = cheapexpr(n.Left, init) - ptrL, lenL := backingArrayPtrLen(n.Left) - n.Right = cheapexpr(n.Right, init) - ptrR, lenR := backingArrayPtrLen(n.Right) - return mkcall1(fn, n.Type, init, typename(n.Left.Type.Elem()), ptrL, lenL, ptrR, lenR) + if n.Left().Type().Elem().HasPointers() { + Curfn.Func().SetWBPos(n.Pos()) + fn := writebarrierfn("typedslicecopy", n.Left().Type().Elem(), n.Right().Type().Elem()) + n.SetLeft(cheapexpr(n.Left(), init)) + ptrL, lenL := backingArrayPtrLen(n.Left()) + n.SetRight(cheapexpr(n.Right(), init)) + ptrR, lenR := backingArrayPtrLen(n.Right()) + return mkcall1(fn, n.Type(), init, typename(n.Left().Type().Elem()), ptrL, lenL, ptrR, lenR) } if runtimecall { @@ -3111,24 +3111,24 @@ func copyany(n *ir.Node, init *ir.Nodes, runtimecall bool) *ir.Node { // copy(n.Left, n.Right) // n.Right can be a slice or string. - n.Left = cheapexpr(n.Left, init) - ptrL, lenL := backingArrayPtrLen(n.Left) - n.Right = cheapexpr(n.Right, init) - ptrR, lenR := backingArrayPtrLen(n.Right) + n.SetLeft(cheapexpr(n.Left(), init)) + ptrL, lenL := backingArrayPtrLen(n.Left()) + n.SetRight(cheapexpr(n.Right(), init)) + ptrR, lenR := backingArrayPtrLen(n.Right()) fn := syslook("slicecopy") - fn = substArgTypes(fn, ptrL.Type.Elem(), ptrR.Type.Elem()) + fn = substArgTypes(fn, ptrL.Type().Elem(), ptrR.Type().Elem()) - return mkcall1(fn, n.Type, init, ptrL, lenL, ptrR, lenR, nodintconst(n.Left.Type.Elem().Width)) + return mkcall1(fn, n.Type(), init, ptrL, lenL, ptrR, lenR, nodintconst(n.Left().Type().Elem().Width)) } - n.Left = walkexpr(n.Left, init) - n.Right = walkexpr(n.Right, init) - nl := temp(n.Left.Type) - nr := temp(n.Right.Type) + n.SetLeft(walkexpr(n.Left(), init)) + n.SetRight(walkexpr(n.Right(), init)) + nl := temp(n.Left().Type()) + nr := temp(n.Right().Type()) var l []*ir.Node - l = append(l, ir.Nod(ir.OAS, nl, n.Left)) - l = append(l, ir.Nod(ir.OAS, nr, n.Right)) + l = append(l, ir.Nod(ir.OAS, nl, n.Left())) + l = append(l, ir.Nod(ir.OAS, nr, n.Right())) nfrm := ir.Nod(ir.OSPTR, nr, nil) nto := ir.Nod(ir.OSPTR, nl, nil) @@ -3141,8 +3141,8 @@ func copyany(n *ir.Node, init *ir.Nodes, runtimecall bool) *ir.Node { // if n > len(frm) { n = len(frm) } nif := ir.Nod(ir.OIF, nil, nil) - nif.Left = ir.Nod(ir.OGT, nlen, ir.Nod(ir.OLEN, nr, nil)) - nif.Nbody.Append(ir.Nod(ir.OAS, nlen, ir.Nod(ir.OLEN, nr, nil))) + nif.SetLeft(ir.Nod(ir.OGT, nlen, ir.Nod(ir.OLEN, nr, nil))) + nif.PtrBody().Append(ir.Nod(ir.OAS, nlen, ir.Nod(ir.OLEN, nr, nil))) l = append(l, nif) // if to.ptr != frm.ptr { memmove( ... ) } @@ -3151,13 +3151,13 @@ func copyany(n *ir.Node, init *ir.Nodes, runtimecall bool) *ir.Node { l = append(l, ne) fn := syslook("memmove") - fn = substArgTypes(fn, nl.Type.Elem(), nl.Type.Elem()) + fn = substArgTypes(fn, nl.Type().Elem(), nl.Type().Elem()) nwid := temp(types.Types[types.TUINTPTR]) setwid := ir.Nod(ir.OAS, nwid, conv(nlen, types.Types[types.TUINTPTR])) - ne.Nbody.Append(setwid) - nwid = ir.Nod(ir.OMUL, nwid, nodintconst(nl.Type.Elem().Width)) + ne.PtrBody().Append(setwid) + nwid = ir.Nod(ir.OMUL, nwid, nodintconst(nl.Type().Elem().Width)) call := mkcall1(fn, nil, init, nto, nfrm, nwid) - ne.Nbody.Append(call) + ne.PtrBody().Append(call) typecheckslice(l, ctxStmt) walkstmtlist(l) @@ -3179,12 +3179,12 @@ func eqfor(t *types.Type) (n *ir.Node, needsize bool) { sym := typesymprefix(".eq", t) n := NewName(sym) setNodeNameFunc(n) - n.Type = functype(nil, []*ir.Node{ + n.SetType(functype(nil, []*ir.Node{ anonfield(types.NewPtr(t)), anonfield(types.NewPtr(t)), }, []*ir.Node{ anonfield(types.Types[types.TBOOL]), - }) + })) return n, false } base.Fatalf("eqfor %v", t) @@ -3194,31 +3194,31 @@ func eqfor(t *types.Type) (n *ir.Node, needsize bool) { // The result of walkcompare MUST be assigned back to n, e.g. // n.Left = walkcompare(n.Left, init) func walkcompare(n *ir.Node, init *ir.Nodes) *ir.Node { - if n.Left.Type.IsInterface() && n.Right.Type.IsInterface() && n.Left.Op != ir.ONIL && n.Right.Op != ir.ONIL { + if n.Left().Type().IsInterface() && n.Right().Type().IsInterface() && n.Left().Op() != ir.ONIL && n.Right().Op() != ir.ONIL { return walkcompareInterface(n, init) } - if n.Left.Type.IsString() && n.Right.Type.IsString() { + if n.Left().Type().IsString() && n.Right().Type().IsString() { return walkcompareString(n, init) } - n.Left = walkexpr(n.Left, init) - n.Right = walkexpr(n.Right, init) + n.SetLeft(walkexpr(n.Left(), init)) + n.SetRight(walkexpr(n.Right(), init)) // Given mixed interface/concrete comparison, // rewrite into types-equal && data-equal. // This is efficient, avoids allocations, and avoids runtime calls. - if n.Left.Type.IsInterface() != n.Right.Type.IsInterface() { + if n.Left().Type().IsInterface() != n.Right().Type().IsInterface() { // Preserve side-effects in case of short-circuiting; see #32187. - l := cheapexpr(n.Left, init) - r := cheapexpr(n.Right, init) + l := cheapexpr(n.Left(), init) + r := cheapexpr(n.Right(), init) // Swap so that l is the interface value and r is the concrete value. - if n.Right.Type.IsInterface() { + if n.Right().Type().IsInterface() { l, r = r, l } // Handle both == and !=. - eq := n.Op + eq := n.Op() andor := ir.OOROR if eq == ir.OEQ { andor = ir.OANDAND @@ -3230,9 +3230,9 @@ func walkcompare(n *ir.Node, init *ir.Nodes) *ir.Node { // l.tab != nil && l.tab._type == type(r) var eqtype *ir.Node tab := ir.Nod(ir.OITAB, l, nil) - rtyp := typename(r.Type) - if l.Type.IsEmptyInterface() { - tab.Type = types.NewPtr(types.Types[types.TUINT8]) + rtyp := typename(r.Type()) + if l.Type().IsEmptyInterface() { + tab.SetType(types.NewPtr(types.Types[types.TUINT8])) tab.SetTypecheck(1) eqtype = ir.Nod(eq, tab, rtyp) } else { @@ -3241,7 +3241,7 @@ func walkcompare(n *ir.Node, init *ir.Nodes) *ir.Node { eqtype = ir.Nod(andor, nonnil, match) } // Check for data equal. - eqdata := ir.Nod(eq, ifaceData(n.Pos, l, r.Type), r) + eqdata := ir.Nod(eq, ifaceData(n.Pos(), l, r.Type()), r) // Put it all together. expr := ir.Nod(andor, eqtype, eqdata) n = finishcompare(n, expr, init) @@ -3252,7 +3252,7 @@ func walkcompare(n *ir.Node, init *ir.Nodes) *ir.Node { // Otherwise back end handles it. // While we're here, decide whether to // inline or call an eq alg. - t := n.Left.Type + t := n.Left().Type() var inline bool maxcmpsize := int64(4) @@ -3265,18 +3265,18 @@ func walkcompare(n *ir.Node, init *ir.Nodes) *ir.Node { switch t.Etype { default: if base.Debug.Libfuzzer != 0 && t.IsInteger() { - n.Left = cheapexpr(n.Left, init) - n.Right = cheapexpr(n.Right, init) + n.SetLeft(cheapexpr(n.Left(), init)) + n.SetRight(cheapexpr(n.Right(), init)) // If exactly one comparison operand is // constant, invoke the constcmp functions // instead, and arrange for the constant // operand to be the first argument. - l, r := n.Left, n.Right - if r.Op == ir.OLITERAL { + l, r := n.Left(), n.Right() + if r.Op() == ir.OLITERAL { l, r = r, l } - constcmp := l.Op == ir.OLITERAL && r.Op != ir.OLITERAL + constcmp := l.Op() == ir.OLITERAL && r.Op() != ir.OLITERAL var fn string var paramType *types.Type @@ -3318,13 +3318,13 @@ func walkcompare(n *ir.Node, init *ir.Nodes) *ir.Node { inline = t.NumComponents(types.IgnoreBlankFields) <= 4 } - cmpl := n.Left - for cmpl != nil && cmpl.Op == ir.OCONVNOP { - cmpl = cmpl.Left + cmpl := n.Left() + for cmpl != nil && cmpl.Op() == ir.OCONVNOP { + cmpl = cmpl.Left() } - cmpr := n.Right - for cmpr != nil && cmpr.Op == ir.OCONVNOP { - cmpr = cmpr.Left + cmpr := n.Right() + for cmpr != nil && cmpr.Op() == ir.OCONVNOP { + cmpr = cmpr.Left() } // Chose not to inline. Call equality function directly. @@ -3336,13 +3336,13 @@ func walkcompare(n *ir.Node, init *ir.Nodes) *ir.Node { fn, needsize := eqfor(t) call := ir.Nod(ir.OCALL, fn, nil) - call.List.Append(ir.Nod(ir.OADDR, cmpl, nil)) - call.List.Append(ir.Nod(ir.OADDR, cmpr, nil)) + call.PtrList().Append(ir.Nod(ir.OADDR, cmpl, nil)) + call.PtrList().Append(ir.Nod(ir.OADDR, cmpr, nil)) if needsize { - call.List.Append(nodintconst(t.Width)) + call.PtrList().Append(nodintconst(t.Width)) } res := call - if n.Op != ir.OEQ { + if n.Op() != ir.OEQ { res = ir.Nod(ir.ONOT, res, nil) } n = finishcompare(n, res, init) @@ -3351,12 +3351,12 @@ func walkcompare(n *ir.Node, init *ir.Nodes) *ir.Node { // inline: build boolean expression comparing element by element andor := ir.OANDAND - if n.Op == ir.ONE { + if n.Op() == ir.ONE { andor = ir.OOROR } var expr *ir.Node compare := func(el, er *ir.Node) { - a := ir.Nod(n.Op, el, er) + a := ir.Nod(n.Op(), el, er) if expr == nil { expr = a } else { @@ -3433,10 +3433,10 @@ func walkcompare(n *ir.Node, init *ir.Nodes) *ir.Node { } } if expr == nil { - expr = nodbool(n.Op == ir.OEQ) + expr = nodbool(n.Op() == ir.OEQ) // We still need to use cmpl and cmpr, in case they contain // an expression which might panic. See issue 23837. - t := temp(cmpl.Type) + t := temp(cmpl.Type()) a1 := ir.Nod(ir.OAS, t, cmpl) a1 = typecheck(a1, ctxStmt) a2 := ir.Nod(ir.OAS, t, cmpr) @@ -3449,22 +3449,22 @@ func walkcompare(n *ir.Node, init *ir.Nodes) *ir.Node { func tracecmpArg(n *ir.Node, t *types.Type, init *ir.Nodes) *ir.Node { // Ugly hack to avoid "constant -1 overflows uintptr" errors, etc. - if n.Op == ir.OLITERAL && n.Type.IsSigned() && n.Int64Val() < 0 { - n = copyexpr(n, n.Type, init) + if n.Op() == ir.OLITERAL && n.Type().IsSigned() && n.Int64Val() < 0 { + n = copyexpr(n, n.Type(), init) } return conv(n, t) } func walkcompareInterface(n *ir.Node, init *ir.Nodes) *ir.Node { - n.Right = cheapexpr(n.Right, init) - n.Left = cheapexpr(n.Left, init) - eqtab, eqdata := eqinterface(n.Left, n.Right) + n.SetRight(cheapexpr(n.Right(), init)) + n.SetLeft(cheapexpr(n.Left(), init)) + eqtab, eqdata := eqinterface(n.Left(), n.Right()) var cmp *ir.Node - if n.Op == ir.OEQ { + if n.Op() == ir.OEQ { cmp = ir.Nod(ir.OANDAND, eqtab, eqdata) } else { - eqtab.Op = ir.ONE + eqtab.SetOp(ir.ONE) cmp = ir.Nod(ir.OOROR, eqtab, ir.Nod(ir.ONOT, eqdata, nil)) } return finishcompare(n, cmp, init) @@ -3474,21 +3474,21 @@ func walkcompareString(n *ir.Node, init *ir.Nodes) *ir.Node { // Rewrite comparisons to short constant strings as length+byte-wise comparisons. var cs, ncs *ir.Node // const string, non-const string switch { - case ir.IsConst(n.Left, constant.String) && ir.IsConst(n.Right, constant.String): + case ir.IsConst(n.Left(), constant.String) && ir.IsConst(n.Right(), constant.String): // ignore; will be constant evaluated - case ir.IsConst(n.Left, constant.String): - cs = n.Left - ncs = n.Right - case ir.IsConst(n.Right, constant.String): - cs = n.Right - ncs = n.Left + case ir.IsConst(n.Left(), constant.String): + cs = n.Left() + ncs = n.Right() + case ir.IsConst(n.Right(), constant.String): + cs = n.Right() + ncs = n.Left() } if cs != nil { - cmp := n.Op + cmp := n.Op() // Our comparison below assumes that the non-constant string // is on the left hand side, so rewrite "" cmp x to x cmp "". // See issue 24817. - if ir.IsConst(n.Left, constant.String) { + if ir.IsConst(n.Left(), constant.String) { cmp = brrev(cmp) } @@ -3571,25 +3571,25 @@ func walkcompareString(n *ir.Node, init *ir.Nodes) *ir.Node { } var r *ir.Node - if n.Op == ir.OEQ || n.Op == ir.ONE { + if n.Op() == ir.OEQ || n.Op() == ir.ONE { // prepare for rewrite below - n.Left = cheapexpr(n.Left, init) - n.Right = cheapexpr(n.Right, init) - eqlen, eqmem := eqstring(n.Left, n.Right) + n.SetLeft(cheapexpr(n.Left(), init)) + n.SetRight(cheapexpr(n.Right(), init)) + eqlen, eqmem := eqstring(n.Left(), n.Right()) // quick check of len before full compare for == or !=. // memequal then tests equality up to length len. - if n.Op == ir.OEQ { + if n.Op() == ir.OEQ { // len(left) == len(right) && memequal(left, right, len) r = ir.Nod(ir.OANDAND, eqlen, eqmem) } else { // len(left) != len(right) || !memequal(left, right, len) - eqlen.Op = ir.ONE + eqlen.SetOp(ir.ONE) r = ir.Nod(ir.OOROR, eqlen, ir.Nod(ir.ONOT, eqmem, nil)) } } else { // sys_cmpstring(s1, s2) :: 0 - r = mkcall("cmpstring", types.Types[types.TINT], init, conv(n.Left, types.Types[types.TSTRING]), conv(n.Right, types.Types[types.TSTRING])) - r = ir.Nod(n.Op, r, nodintconst(0)) + r = mkcall("cmpstring", types.Types[types.TINT], init, conv(n.Left(), types.Types[types.TSTRING]), conv(n.Right(), types.Types[types.TSTRING])) + r = ir.Nod(n.Op(), r, nodintconst(0)) } return finishcompare(n, r, init) @@ -3599,34 +3599,34 @@ func walkcompareString(n *ir.Node, init *ir.Nodes) *ir.Node { // n.Left = finishcompare(n.Left, x, r, init) func finishcompare(n, r *ir.Node, init *ir.Nodes) *ir.Node { r = typecheck(r, ctxExpr) - r = conv(r, n.Type) + r = conv(r, n.Type()) r = walkexpr(r, init) return r } // return 1 if integer n must be in range [0, max), 0 otherwise func bounded(n *ir.Node, max int64) bool { - if n.Type == nil || !n.Type.IsInteger() { + if n.Type() == nil || !n.Type().IsInteger() { return false } - sign := n.Type.IsSigned() - bits := int32(8 * n.Type.Width) + sign := n.Type().IsSigned() + bits := int32(8 * n.Type().Width) if smallintconst(n) { v := n.Int64Val() return 0 <= v && v < max } - switch n.Op { + switch n.Op() { case ir.OAND, ir.OANDNOT: v := int64(-1) switch { - case smallintconst(n.Left): - v = n.Left.Int64Val() - case smallintconst(n.Right): - v = n.Right.Int64Val() - if n.Op == ir.OANDNOT { + case smallintconst(n.Left()): + v = n.Left().Int64Val() + case smallintconst(n.Right()): + v = n.Right().Int64Val() + if n.Op() == ir.OANDNOT { v = ^v if !sign { v &= 1< 0 && v >= 2 { bits-- v >>= 1 @@ -3655,8 +3655,8 @@ func bounded(n *ir.Node, max int64) bool { } case ir.ORSH: - if !sign && smallintconst(n.Right) { - v := n.Right.Int64Val() + if !sign && smallintconst(n.Right()) { + v := n.Right().Int64Val() if v > int64(bits) { return true } @@ -3673,7 +3673,7 @@ func bounded(n *ir.Node, max int64) bool { // usemethod checks interface method calls for uses of reflect.Type.Method. func usemethod(n *ir.Node) { - t := n.Left.Type + t := n.Left().Type() // Looking for either of: // Method(int) reflect.Method @@ -3711,9 +3711,9 @@ func usemethod(n *ir.Node) { // (including global variables such as numImports - was issue #19028). // Also need to check for reflect package itself (see Issue #38515). if s := res0.Type.Sym; s != nil && s.Name == "Method" && isReflectPkg(s.Pkg) { - Curfn.Func.SetReflectMethod(true) + Curfn.Func().SetReflectMethod(true) // The LSym is initialized at this point. We need to set the attribute on the LSym. - Curfn.Func.LSym.Set(obj.AttrReflectMethod, true) + Curfn.Func().LSym.Set(obj.AttrReflectMethod, true) } } @@ -3722,35 +3722,35 @@ func usefield(n *ir.Node) { return } - switch n.Op { + switch n.Op() { default: - base.Fatalf("usefield %v", n.Op) + base.Fatalf("usefield %v", n.Op()) case ir.ODOT, ir.ODOTPTR: break } - if n.Sym == nil { + if n.Sym() == nil { // No field name. This DOTPTR was built by the compiler for access // to runtime data structures. Ignore. return } - t := n.Left.Type + t := n.Left().Type() if t.IsPtr() { t = t.Elem() } field := n.Opt().(*types.Field) if field == nil { - base.Fatalf("usefield %v %v without paramfld", n.Left.Type, n.Sym) + base.Fatalf("usefield %v %v without paramfld", n.Left().Type(), n.Sym()) } - if field.Sym != n.Sym || field.Offset != n.Xoffset { - base.Fatalf("field inconsistency: %v,%v != %v,%v", field.Sym, field.Offset, n.Sym, n.Xoffset) + if field.Sym != n.Sym() || field.Offset != n.Offset() { + base.Fatalf("field inconsistency: %v,%v != %v,%v", field.Sym, field.Offset, n.Sym(), n.Offset()) } if !strings.Contains(field.Note, "go:\"track\"") { return } - outer := n.Left.Type + outer := n.Left().Type() if outer.IsPtr() { outer = outer.Elem() } @@ -3762,10 +3762,10 @@ func usefield(n *ir.Node) { } sym := tracksym(outer, field) - if Curfn.Func.FieldTrack == nil { - Curfn.Func.FieldTrack = make(map[*types.Sym]struct{}) + if Curfn.Func().FieldTrack == nil { + Curfn.Func().FieldTrack = make(map[*types.Sym]struct{}) } - Curfn.Func.FieldTrack[sym] = struct{}{} + Curfn.Func().FieldTrack[sym] = struct{}{} } func candiscardlist(l ir.Nodes) bool { @@ -3782,7 +3782,7 @@ func candiscard(n *ir.Node) bool { return true } - switch n.Op { + switch n.Op() { default: return false @@ -3844,14 +3844,14 @@ func candiscard(n *ir.Node) bool { // Discardable as long as we know it's not division by zero. case ir.ODIV, ir.OMOD: - if n.Right.Op == ir.OLITERAL && constant.Sign(n.Right.Val()) != 0 { + if n.Right().Op() == ir.OLITERAL && constant.Sign(n.Right().Val()) != 0 { break } return false // Discardable as long as we know it won't fail because of a bad size. case ir.OMAKECHAN, ir.OMAKEMAP: - if ir.IsConst(n.Left, constant.Int) && constant.Sign(n.Left.Val()) == 0 { + if ir.IsConst(n.Left(), constant.Int) && constant.Sign(n.Left().Val()) == 0 { break } return false @@ -3864,7 +3864,7 @@ func candiscard(n *ir.Node) bool { return false } - if !candiscard(n.Left) || !candiscard(n.Right) || !candiscardlist(n.Ninit) || !candiscardlist(n.Nbody) || !candiscardlist(n.List) || !candiscardlist(n.Rlist) { + if !candiscard(n.Left()) || !candiscard(n.Right()) || !candiscardlist(n.Init()) || !candiscardlist(n.Body()) || !candiscardlist(n.List()) || !candiscardlist(n.Rlist()) { return false } @@ -3892,66 +3892,66 @@ var wrapCall_prgen int // The result of wrapCall MUST be assigned back to n, e.g. // n.Left = wrapCall(n.Left, init) func wrapCall(n *ir.Node, init *ir.Nodes) *ir.Node { - if n.Ninit.Len() != 0 { - walkstmtlist(n.Ninit.Slice()) - init.AppendNodes(&n.Ninit) + if n.Init().Len() != 0 { + walkstmtlist(n.Init().Slice()) + init.AppendNodes(n.PtrInit()) } - isBuiltinCall := n.Op != ir.OCALLFUNC && n.Op != ir.OCALLMETH && n.Op != ir.OCALLINTER + isBuiltinCall := n.Op() != ir.OCALLFUNC && n.Op() != ir.OCALLMETH && n.Op() != ir.OCALLINTER // Turn f(a, b, []T{c, d, e}...) back into f(a, b, c, d, e). if !isBuiltinCall && n.IsDDD() { - last := n.List.Len() - 1 - if va := n.List.Index(last); va.Op == ir.OSLICELIT { - n.List.Set(append(n.List.Slice()[:last], va.List.Slice()...)) + last := n.List().Len() - 1 + if va := n.List().Index(last); va.Op() == ir.OSLICELIT { + n.PtrList().Set(append(n.List().Slice()[:last], va.List().Slice()...)) n.SetIsDDD(false) } } // origArgs keeps track of what argument is uintptr-unsafe/unsafe-uintptr conversion. - origArgs := make([]*ir.Node, n.List.Len()) + origArgs := make([]*ir.Node, n.List().Len()) t := ir.Nod(ir.OTFUNC, nil, nil) - for i, arg := range n.List.Slice() { + for i, arg := range n.List().Slice() { s := lookupN("a", i) - if !isBuiltinCall && arg.Op == ir.OCONVNOP && arg.Type.IsUintptr() && arg.Left.Type.IsUnsafePtr() { + if !isBuiltinCall && arg.Op() == ir.OCONVNOP && arg.Type().IsUintptr() && arg.Left().Type().IsUnsafePtr() { origArgs[i] = arg - arg = arg.Left - n.List.SetIndex(i, arg) + arg = arg.Left() + n.List().SetIndex(i, arg) } - t.List.Append(symfield(s, arg.Type)) + t.PtrList().Append(symfield(s, arg.Type())) } wrapCall_prgen++ sym := lookupN("wrap·", wrapCall_prgen) fn := dclfunc(sym, t) - args := paramNnames(t.Type) + args := paramNnames(t.Type()) for i, origArg := range origArgs { if origArg == nil { continue } - arg := ir.Nod(origArg.Op, args[i], nil) - arg.Type = origArg.Type + arg := ir.Nod(origArg.Op(), args[i], nil) + arg.SetType(origArg.Type()) args[i] = arg } - call := ir.Nod(n.Op, nil, nil) + call := ir.Nod(n.Op(), nil, nil) if !isBuiltinCall { - call.Op = ir.OCALL - call.Left = n.Left + call.SetOp(ir.OCALL) + call.SetLeft(n.Left()) call.SetIsDDD(n.IsDDD()) } - call.List.Set(args) - fn.Nbody.Set1(call) + call.PtrList().Set(args) + fn.PtrBody().Set1(call) funcbody() fn = typecheck(fn, ctxStmt) - typecheckslice(fn.Nbody.Slice(), ctxStmt) + typecheckslice(fn.Body().Slice(), ctxStmt) xtop = append(xtop, fn) call = ir.Nod(ir.OCALL, nil, nil) - call.Left = fn.Func.Nname - call.List.Set(n.List.Slice()) + call.SetLeft(fn.Func().Nname) + call.PtrList().Set(n.List().Slice()) call = typecheck(call, ctxStmt) call = walkexpr(call, init) return call @@ -3968,7 +3968,7 @@ func substArgTypes(old *ir.Node, types_ ...*types.Type) *ir.Node { for _, t := range types_ { dowidth(t) } - n.Type = types.SubstAny(n.Type, &types_) + n.SetType(types.SubstAny(n.Type(), &types_)) if len(types_) > 0 { base.Fatalf("substArgTypes: too many argument types") } @@ -3993,14 +3993,14 @@ func canMergeLoads() bool { // isRuneCount reports whether n is of the form len([]rune(string)). // These are optimized into a call to runtime.countrunes. func isRuneCount(n *ir.Node) bool { - return base.Flag.N == 0 && !instrumenting && n.Op == ir.OLEN && n.Left.Op == ir.OSTR2RUNES + return base.Flag.N == 0 && !instrumenting && n.Op() == ir.OLEN && n.Left().Op() == ir.OSTR2RUNES } func walkCheckPtrAlignment(n *ir.Node, init *ir.Nodes, count *ir.Node) *ir.Node { - if !n.Type.IsPtr() { - base.Fatalf("expected pointer type: %v", n.Type) + if !n.Type().IsPtr() { + base.Fatalf("expected pointer type: %v", n.Type()) } - elem := n.Type.Elem() + elem := n.Type().Elem() if count != nil { if !elem.IsArray() { base.Fatalf("expected array type: %v", elem) @@ -4017,8 +4017,8 @@ func walkCheckPtrAlignment(n *ir.Node, init *ir.Nodes, count *ir.Node) *ir.Node count = nodintconst(1) } - n.Left = cheapexpr(n.Left, init) - init.Append(mkcall("checkptrAlignment", nil, init, convnop(n.Left, types.Types[types.TUNSAFEPTR]), typename(elem), conv(count, types.Types[types.TUINTPTR]))) + n.SetLeft(cheapexpr(n.Left(), init)) + init.Append(mkcall("checkptrAlignment", nil, init, convnop(n.Left(), types.Types[types.TUNSAFEPTR]), typename(elem), conv(count, types.Types[types.TUINTPTR]))) return n } @@ -4040,12 +4040,12 @@ func walkCheckPtrArithmetic(n *ir.Node, init *ir.Nodes) *ir.Node { // TODO(mdempsky): Make stricter. We only need to exempt // reflect.Value.Pointer and reflect.Value.UnsafeAddr. - switch n.Left.Op { + switch n.Left().Op() { case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER: return n } - if n.Left.Op == ir.ODOTPTR && isReflectHeaderDataField(n.Left) { + if n.Left().Op() == ir.ODOTPTR && isReflectHeaderDataField(n.Left()) { return n } @@ -4058,25 +4058,25 @@ func walkCheckPtrArithmetic(n *ir.Node, init *ir.Nodes) *ir.Node { var originals []*ir.Node var walk func(n *ir.Node) walk = func(n *ir.Node) { - switch n.Op { + switch n.Op() { case ir.OADD: - walk(n.Left) - walk(n.Right) + walk(n.Left()) + walk(n.Right()) case ir.OSUB, ir.OANDNOT: - walk(n.Left) + walk(n.Left()) case ir.OCONVNOP: - if n.Left.Type.IsUnsafePtr() { - n.Left = cheapexpr(n.Left, init) - originals = append(originals, convnop(n.Left, types.Types[types.TUNSAFEPTR])) + if n.Left().Type().IsUnsafePtr() { + n.SetLeft(cheapexpr(n.Left(), init)) + originals = append(originals, convnop(n.Left(), types.Types[types.TUNSAFEPTR])) } } } - walk(n.Left) + walk(n.Left()) n = cheapexpr(n, init) slice := mkdotargslice(types.NewSlice(types.Types[types.TUNSAFEPTR]), originals) - slice.Esc = EscNone + slice.SetEsc(EscNone) init.Append(mkcall("checkptrArithmetic", nil, init, convnop(n, types.Types[types.TUNSAFEPTR]), slice)) // TODO(khr): Mark backing store of slice as dead. This will allow us to reuse @@ -4089,5 +4089,5 @@ func walkCheckPtrArithmetic(n *ir.Node, init *ir.Nodes) *ir.Node { // function fn at a given level. See debugHelpFooter for defined // levels. func checkPtr(fn *ir.Node, level int) bool { - return base.Debug.Checkptr >= level && fn.Func.Pragma&ir.NoCheckPtr == 0 + return base.Debug.Checkptr >= level && fn.Func().Pragma&ir.NoCheckPtr == 0 } diff --git a/src/cmd/compile/internal/ir/dump.go b/src/cmd/compile/internal/ir/dump.go index 9306366e8ad31..43d0742c73c33 100644 --- a/src/cmd/compile/internal/ir/dump.go +++ b/src/cmd/compile/internal/ir/dump.go @@ -205,7 +205,7 @@ func (p *dumper) dump(x reflect.Value, depth int) { isNode := false if n, ok := x.Interface().(Node); ok { isNode = true - p.printf("%s %s {", n.Op.String(), p.addr(x)) + p.printf("%s %s {", n.op.String(), p.addr(x)) } else { p.printf("%s {", typ) } diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index 5dea0880fc140..e1e3813368b69 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -351,28 +351,28 @@ func jconvFmt(n *Node, s fmt.State, flag FmtFlag) { if base.Debug.DumpPtrs != 0 { fmt.Fprintf(s, " p(%p)", n) } - if !short && n.Name != nil && n.Name.Vargen != 0 { - fmt.Fprintf(s, " g(%d)", n.Name.Vargen) + if !short && n.Name() != nil && n.Name().Vargen != 0 { + fmt.Fprintf(s, " g(%d)", n.Name().Vargen) } - if base.Debug.DumpPtrs != 0 && !short && n.Name != nil && n.Name.Defn != nil { + if base.Debug.DumpPtrs != 0 && !short && n.Name() != nil && n.Name().Defn != nil { // Useful to see where Defn is set and what node it points to - fmt.Fprintf(s, " defn(%p)", n.Name.Defn) + fmt.Fprintf(s, " defn(%p)", n.Name().Defn) } - if n.Pos.IsKnown() { + if n.Pos().IsKnown() { pfx := "" - switch n.Pos.IsStmt() { + switch n.Pos().IsStmt() { case src.PosNotStmt: pfx = "_" // "-" would be confusing case src.PosIsStmt: pfx = "+" } - fmt.Fprintf(s, " l(%s%d)", pfx, n.Pos.Line()) + fmt.Fprintf(s, " l(%s%d)", pfx, n.Pos().Line()) } - if !short && n.Xoffset != types.BADWIDTH { - fmt.Fprintf(s, " x(%d)", n.Xoffset) + if !short && n.Offset() != types.BADWIDTH { + fmt.Fprintf(s, " x(%d)", n.Offset()) } if n.Class() != 0 { @@ -405,20 +405,20 @@ func jconvFmt(n *Node, s fmt.State, flag FmtFlag) { fmt.Fprintf(s, " embedded") } - if n.Op == ONAME { - if n.Name.Addrtaken() { + if n.Op() == ONAME { + if n.Name().Addrtaken() { fmt.Fprint(s, " addrtaken") } - if n.Name.Assigned() { + if n.Name().Assigned() { fmt.Fprint(s, " assigned") } - if n.Name.IsClosureVar() { + if n.Name().IsClosureVar() { fmt.Fprint(s, " closurevar") } - if n.Name.Captured() { + if n.Name().Captured() { fmt.Fprint(s, " captured") } - if n.Name.IsOutputParamHeapAddr() { + if n.Name().IsOutputParamHeapAddr() { fmt.Fprint(s, " outputparamheapaddr") } } @@ -433,7 +433,7 @@ func jconvFmt(n *Node, s fmt.State, flag FmtFlag) { fmt.Fprint(s, " hascall") } - if !short && n.Name != nil && n.Name.Used() { + if !short && n.Name() != nil && n.Name().Used() { fmt.Fprint(s, " used") } } @@ -899,31 +899,31 @@ func stmtFmt(n *Node, s fmt.State, mode FmtMode) { // block starting with the init statements. // if we can just say "for" n->ninit; ... then do so - simpleinit := n.Ninit.Len() == 1 && n.Ninit.First().Ninit.Len() == 0 && StmtWithInit(n.Op) + simpleinit := n.Init().Len() == 1 && n.Init().First().Init().Len() == 0 && StmtWithInit(n.Op()) // otherwise, print the inits as separate statements - complexinit := n.Ninit.Len() != 0 && !simpleinit && (mode != FErr) + complexinit := n.Init().Len() != 0 && !simpleinit && (mode != FErr) // but if it was for if/for/switch, put in an extra surrounding block to limit the scope - extrablock := complexinit && StmtWithInit(n.Op) + extrablock := complexinit && StmtWithInit(n.Op()) if extrablock { fmt.Fprint(s, "{") } if complexinit { - mode.Fprintf(s, " %v; ", n.Ninit) + mode.Fprintf(s, " %v; ", n.Init()) } - switch n.Op { + switch n.Op() { case ODCL: - mode.Fprintf(s, "var %v %v", n.Left.Sym, n.Left.Type) + mode.Fprintf(s, "var %v %v", n.Left().Sym(), n.Left().Type()) case ODCLFIELD: - if n.Sym != nil { - mode.Fprintf(s, "%v %v", n.Sym, n.Left) + if n.Sym() != nil { + mode.Fprintf(s, "%v %v", n.Sym(), n.Left()) } else { - mode.Fprintf(s, "%v", n.Left) + mode.Fprintf(s, "%v", n.Left()) } // Don't export "v = " initializing statements, hope they're always @@ -931,61 +931,61 @@ func stmtFmt(n *Node, s fmt.State, mode FmtMode) { // the "v = " again. case OAS: if n.Colas() && !complexinit { - mode.Fprintf(s, "%v := %v", n.Left, n.Right) + mode.Fprintf(s, "%v := %v", n.Left(), n.Right()) } else { - mode.Fprintf(s, "%v = %v", n.Left, n.Right) + mode.Fprintf(s, "%v = %v", n.Left(), n.Right()) } case OASOP: if n.Implicit() { if n.SubOp() == OADD { - mode.Fprintf(s, "%v++", n.Left) + mode.Fprintf(s, "%v++", n.Left()) } else { - mode.Fprintf(s, "%v--", n.Left) + mode.Fprintf(s, "%v--", n.Left()) } break } - mode.Fprintf(s, "%v %#v= %v", n.Left, n.SubOp(), n.Right) + mode.Fprintf(s, "%v %#v= %v", n.Left(), n.SubOp(), n.Right()) case OAS2: if n.Colas() && !complexinit { - mode.Fprintf(s, "%.v := %.v", n.List, n.Rlist) + mode.Fprintf(s, "%.v := %.v", n.List(), n.Rlist()) break } fallthrough case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV: - mode.Fprintf(s, "%.v = %v", n.List, n.Right) + mode.Fprintf(s, "%.v = %v", n.List(), n.Right()) case ORETURN: - mode.Fprintf(s, "return %.v", n.List) + mode.Fprintf(s, "return %.v", n.List()) case ORETJMP: - mode.Fprintf(s, "retjmp %v", n.Sym) + mode.Fprintf(s, "retjmp %v", n.Sym()) case OINLMARK: - mode.Fprintf(s, "inlmark %d", n.Xoffset) + mode.Fprintf(s, "inlmark %d", n.Offset()) case OGO: - mode.Fprintf(s, "go %v", n.Left) + mode.Fprintf(s, "go %v", n.Left()) case ODEFER: - mode.Fprintf(s, "defer %v", n.Left) + mode.Fprintf(s, "defer %v", n.Left()) case OIF: if simpleinit { - mode.Fprintf(s, "if %v; %v { %v }", n.Ninit.First(), n.Left, n.Nbody) + mode.Fprintf(s, "if %v; %v { %v }", n.Init().First(), n.Left(), n.Body()) } else { - mode.Fprintf(s, "if %v { %v }", n.Left, n.Nbody) + mode.Fprintf(s, "if %v { %v }", n.Left(), n.Body()) } - if n.Rlist.Len() != 0 { - mode.Fprintf(s, " else { %v }", n.Rlist) + if n.Rlist().Len() != 0 { + mode.Fprintf(s, " else { %v }", n.Rlist()) } case OFOR, OFORUNTIL: opname := "for" - if n.Op == OFORUNTIL { + if n.Op() == OFORUNTIL { opname = "foruntil" } if mode == FErr { // TODO maybe only if FmtShort, same below @@ -995,26 +995,26 @@ func stmtFmt(n *Node, s fmt.State, mode FmtMode) { fmt.Fprint(s, opname) if simpleinit { - mode.Fprintf(s, " %v;", n.Ninit.First()) - } else if n.Right != nil { + mode.Fprintf(s, " %v;", n.Init().First()) + } else if n.Right() != nil { fmt.Fprint(s, " ;") } - if n.Left != nil { - mode.Fprintf(s, " %v", n.Left) + if n.Left() != nil { + mode.Fprintf(s, " %v", n.Left()) } - if n.Right != nil { - mode.Fprintf(s, "; %v", n.Right) + if n.Right() != nil { + mode.Fprintf(s, "; %v", n.Right()) } else if simpleinit { fmt.Fprint(s, ";") } - if n.Op == OFORUNTIL && n.List.Len() != 0 { - mode.Fprintf(s, "; %v", n.List) + if n.Op() == OFORUNTIL && n.List().Len() != 0 { + mode.Fprintf(s, "; %v", n.List()) } - mode.Fprintf(s, " { %v }", n.Nbody) + mode.Fprintf(s, " { %v }", n.Body()) case ORANGE: if mode == FErr { @@ -1022,49 +1022,49 @@ func stmtFmt(n *Node, s fmt.State, mode FmtMode) { break } - if n.List.Len() == 0 { - mode.Fprintf(s, "for range %v { %v }", n.Right, n.Nbody) + if n.List().Len() == 0 { + mode.Fprintf(s, "for range %v { %v }", n.Right(), n.Body()) break } - mode.Fprintf(s, "for %.v = range %v { %v }", n.List, n.Right, n.Nbody) + mode.Fprintf(s, "for %.v = range %v { %v }", n.List(), n.Right(), n.Body()) case OSELECT, OSWITCH: if mode == FErr { - mode.Fprintf(s, "%v statement", n.Op) + mode.Fprintf(s, "%v statement", n.Op()) break } - mode.Fprintf(s, "%#v", n.Op) + mode.Fprintf(s, "%#v", n.Op()) if simpleinit { - mode.Fprintf(s, " %v;", n.Ninit.First()) + mode.Fprintf(s, " %v;", n.Init().First()) } - if n.Left != nil { - mode.Fprintf(s, " %v ", n.Left) + if n.Left() != nil { + mode.Fprintf(s, " %v ", n.Left()) } - mode.Fprintf(s, " { %v }", n.List) + mode.Fprintf(s, " { %v }", n.List()) case OCASE: - if n.List.Len() != 0 { - mode.Fprintf(s, "case %.v", n.List) + if n.List().Len() != 0 { + mode.Fprintf(s, "case %.v", n.List()) } else { fmt.Fprint(s, "default") } - mode.Fprintf(s, ": %v", n.Nbody) + mode.Fprintf(s, ": %v", n.Body()) case OBREAK, OCONTINUE, OGOTO, OFALL: - if n.Sym != nil { - mode.Fprintf(s, "%#v %v", n.Op, n.Sym) + if n.Sym() != nil { + mode.Fprintf(s, "%#v %v", n.Op(), n.Sym()) } else { - mode.Fprintf(s, "%#v", n.Op) + mode.Fprintf(s, "%#v", n.Op()) } case OEMPTY: break case OLABEL: - mode.Fprintf(s, "%v: ", n.Sym) + mode.Fprintf(s, "%v: ", n.Sym()) } if extrablock { @@ -1193,8 +1193,8 @@ var OpPrec = []int{ } func exprFmt(n *Node, s fmt.State, prec int, mode FmtMode) { - for n != nil && n.Implicit() && (n.Op == ODEREF || n.Op == OADDR) { - n = n.Left + for n != nil && n.Implicit() && (n.Op() == ODEREF || n.Op() == OADDR) { + n = n.Left() } if n == nil { @@ -1202,8 +1202,8 @@ func exprFmt(n *Node, s fmt.State, prec int, mode FmtMode) { return } - nprec := OpPrec[n.Op] - if n.Op == OTYPE && n.Sym != nil { + nprec := OpPrec[n.Op()] + if n.Op() == OTYPE && n.Sym() != nil { nprec = 8 } @@ -1212,38 +1212,38 @@ func exprFmt(n *Node, s fmt.State, prec int, mode FmtMode) { return } - switch n.Op { + switch n.Op() { case OPAREN: - mode.Fprintf(s, "(%v)", n.Left) + mode.Fprintf(s, "(%v)", n.Left()) case ONIL: fmt.Fprint(s, "nil") case OLITERAL: // this is a bit of a mess if mode == FErr { - if n.Orig != nil && n.Orig != n { - exprFmt(n.Orig, s, prec, mode) + if n.Orig() != nil && n.Orig() != n { + exprFmt(n.Orig(), s, prec, mode) return } - if n.Sym != nil { - fmt.Fprint(s, smodeString(n.Sym, mode)) + if n.Sym() != nil { + fmt.Fprint(s, smodeString(n.Sym(), mode)) return } } needUnparen := false - if n.Type != nil && !n.Type.IsUntyped() { + if n.Type() != nil && !n.Type().IsUntyped() { // Need parens when type begins with what might // be misinterpreted as a unary operator: * or <-. - if n.Type.IsPtr() || (n.Type.IsChan() && n.Type.ChanDir() == types.Crecv) { - mode.Fprintf(s, "(%v)(", n.Type) + if n.Type().IsPtr() || (n.Type().IsChan() && n.Type().ChanDir() == types.Crecv) { + mode.Fprintf(s, "(%v)(", n.Type()) } else { - mode.Fprintf(s, "%v(", n.Type) + mode.Fprintf(s, "%v(", n.Type()) } needUnparen = true } - if n.Type == types.UntypedRune { + if n.Type() == types.UntypedRune { switch x, ok := constant.Int64Val(n.Val()); { case !ok: fallthrough @@ -1270,44 +1270,44 @@ func exprFmt(n *Node, s fmt.State, prec int, mode FmtMode) { case ONAME: // Special case: name used as local variable in export. // _ becomes ~b%d internally; print as _ for export - if mode == FErr && n.Sym != nil && n.Sym.Name[0] == '~' && n.Sym.Name[1] == 'b' { + if mode == FErr && n.Sym() != nil && n.Sym().Name[0] == '~' && n.Sym().Name[1] == 'b' { fmt.Fprint(s, "_") return } fallthrough case OPACK, ONONAME, OMETHEXPR: - fmt.Fprint(s, smodeString(n.Sym, mode)) + fmt.Fprint(s, smodeString(n.Sym(), mode)) case OTYPE: - if n.Type == nil && n.Sym != nil { - fmt.Fprint(s, smodeString(n.Sym, mode)) + if n.Type() == nil && n.Sym() != nil { + fmt.Fprint(s, smodeString(n.Sym(), mode)) return } - mode.Fprintf(s, "%v", n.Type) + mode.Fprintf(s, "%v", n.Type()) case OTARRAY: - if n.Left != nil { - mode.Fprintf(s, "[%v]%v", n.Left, n.Right) + if n.Left() != nil { + mode.Fprintf(s, "[%v]%v", n.Left(), n.Right()) return } - mode.Fprintf(s, "[]%v", n.Right) // happens before typecheck + mode.Fprintf(s, "[]%v", n.Right()) // happens before typecheck case OTMAP: - mode.Fprintf(s, "map[%v]%v", n.Left, n.Right) + mode.Fprintf(s, "map[%v]%v", n.Left(), n.Right()) case OTCHAN: switch n.TChanDir() { case types.Crecv: - mode.Fprintf(s, "<-chan %v", n.Left) + mode.Fprintf(s, "<-chan %v", n.Left()) case types.Csend: - mode.Fprintf(s, "chan<- %v", n.Left) + mode.Fprintf(s, "chan<- %v", n.Left()) default: - if n.Left != nil && n.Left.Op == OTCHAN && n.Left.Sym == nil && n.Left.TChanDir() == types.Crecv { - mode.Fprintf(s, "chan (%v)", n.Left) + if n.Left() != nil && n.Left().Op() == OTCHAN && n.Left().Sym() == nil && n.Left().TChanDir() == types.Crecv { + mode.Fprintf(s, "chan (%v)", n.Left()) } else { - mode.Fprintf(s, "chan %v", n.Left) + mode.Fprintf(s, "chan %v", n.Left()) } } @@ -1325,11 +1325,11 @@ func exprFmt(n *Node, s fmt.State, prec int, mode FmtMode) { fmt.Fprint(s, "func literal") return } - if n.Nbody.Len() != 0 { - mode.Fprintf(s, "%v { %v }", n.Type, n.Nbody) + if n.Body().Len() != 0 { + mode.Fprintf(s, "%v { %v }", n.Type(), n.Body()) return } - mode.Fprintf(s, "%v { %v }", n.Type, n.Func.Decl.Nbody) + mode.Fprintf(s, "%v { %v }", n.Type(), n.Func().Decl.Body()) case OCOMPLIT: if mode == FErr { @@ -1337,75 +1337,75 @@ func exprFmt(n *Node, s fmt.State, prec int, mode FmtMode) { mode.Fprintf(s, "... argument") return } - if n.Right != nil { - mode.Fprintf(s, "%v{%s}", n.Right, ellipsisIf(n.List.Len() != 0)) + if n.Right() != nil { + mode.Fprintf(s, "%v{%s}", n.Right(), ellipsisIf(n.List().Len() != 0)) return } fmt.Fprint(s, "composite literal") return } - mode.Fprintf(s, "(%v{ %.v })", n.Right, n.List) + mode.Fprintf(s, "(%v{ %.v })", n.Right(), n.List()) case OPTRLIT: - mode.Fprintf(s, "&%v", n.Left) + mode.Fprintf(s, "&%v", n.Left()) case OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT: if mode == FErr { - mode.Fprintf(s, "%v{%s}", n.Type, ellipsisIf(n.List.Len() != 0)) + mode.Fprintf(s, "%v{%s}", n.Type(), ellipsisIf(n.List().Len() != 0)) return } - mode.Fprintf(s, "(%v{ %.v })", n.Type, n.List) + mode.Fprintf(s, "(%v{ %.v })", n.Type(), n.List()) case OKEY: - if n.Left != nil && n.Right != nil { - mode.Fprintf(s, "%v:%v", n.Left, n.Right) + if n.Left() != nil && n.Right() != nil { + mode.Fprintf(s, "%v:%v", n.Left(), n.Right()) return } - if n.Left == nil && n.Right != nil { - mode.Fprintf(s, ":%v", n.Right) + if n.Left() == nil && n.Right() != nil { + mode.Fprintf(s, ":%v", n.Right()) return } - if n.Left != nil && n.Right == nil { - mode.Fprintf(s, "%v:", n.Left) + if n.Left() != nil && n.Right() == nil { + mode.Fprintf(s, "%v:", n.Left()) return } fmt.Fprint(s, ":") case OSTRUCTKEY: - mode.Fprintf(s, "%v:%v", n.Sym, n.Left) + mode.Fprintf(s, "%v:%v", n.Sym(), n.Left()) case OCALLPART: - exprFmt(n.Left, s, nprec, mode) - if n.Right == nil || n.Right.Sym == nil { + exprFmt(n.Left(), s, nprec, mode) + if n.Right() == nil || n.Right().Sym() == nil { fmt.Fprint(s, ".") return } - mode.Fprintf(s, ".%0S", n.Right.Sym) + mode.Fprintf(s, ".%0S", n.Right().Sym()) case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH: - exprFmt(n.Left, s, nprec, mode) - if n.Sym == nil { + exprFmt(n.Left(), s, nprec, mode) + if n.Sym() == nil { fmt.Fprint(s, ".") return } - mode.Fprintf(s, ".%0S", n.Sym) + mode.Fprintf(s, ".%0S", n.Sym()) case ODOTTYPE, ODOTTYPE2: - exprFmt(n.Left, s, nprec, mode) - if n.Right != nil { - mode.Fprintf(s, ".(%v)", n.Right) + exprFmt(n.Left(), s, nprec, mode) + if n.Right() != nil { + mode.Fprintf(s, ".(%v)", n.Right()) return } - mode.Fprintf(s, ".(%v)", n.Type) + mode.Fprintf(s, ".(%v)", n.Type()) case OINDEX, OINDEXMAP: - exprFmt(n.Left, s, nprec, mode) - mode.Fprintf(s, "[%v]", n.Right) + exprFmt(n.Left(), s, nprec, mode) + mode.Fprintf(s, "[%v]", n.Right()) case OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR: - exprFmt(n.Left, s, nprec, mode) + exprFmt(n.Left(), s, nprec, mode) fmt.Fprint(s, "[") low, high, max := n.SliceBounds() if low != nil { @@ -1415,7 +1415,7 @@ func exprFmt(n *Node, s fmt.State, prec int, mode FmtMode) { if high != nil { fmt.Fprint(s, modeString(high, mode)) } - if n.Op.IsSlice3() { + if n.Op().IsSlice3() { fmt.Fprint(s, ":") if max != nil { fmt.Fprint(s, modeString(max, mode)) @@ -1424,16 +1424,16 @@ func exprFmt(n *Node, s fmt.State, prec int, mode FmtMode) { fmt.Fprint(s, "]") case OSLICEHEADER: - if n.List.Len() != 2 { - base.Fatalf("bad OSLICEHEADER list length %d", n.List.Len()) + if n.List().Len() != 2 { + base.Fatalf("bad OSLICEHEADER list length %d", n.List().Len()) } - mode.Fprintf(s, "sliceheader{%v,%v,%v}", n.Left, n.List.First(), n.List.Second()) + mode.Fprintf(s, "sliceheader{%v,%v,%v}", n.Left(), n.List().First(), n.List().Second()) case OCOMPLEX, OCOPY: - if n.Left != nil { - mode.Fprintf(s, "%#v(%v, %v)", n.Op, n.Left, n.Right) + if n.Left() != nil { + mode.Fprintf(s, "%#v(%v, %v)", n.Op(), n.Left(), n.Right()) } else { - mode.Fprintf(s, "%#v(%.v)", n.Op, n.List) + mode.Fprintf(s, "%#v(%.v)", n.Op(), n.List()) } case OCONV, @@ -1444,15 +1444,15 @@ func exprFmt(n *Node, s fmt.State, prec int, mode FmtMode) { OSTR2BYTES, OSTR2RUNES, ORUNESTR: - if n.Type == nil || n.Type.Sym == nil { - mode.Fprintf(s, "(%v)", n.Type) + if n.Type() == nil || n.Type().Sym == nil { + mode.Fprintf(s, "(%v)", n.Type()) } else { - mode.Fprintf(s, "%v", n.Type) + mode.Fprintf(s, "%v", n.Type()) } - if n.Left != nil { - mode.Fprintf(s, "(%v)", n.Left) + if n.Left() != nil { + mode.Fprintf(s, "(%v)", n.Left()) } else { - mode.Fprintf(s, "(%.v)", n.List) + mode.Fprintf(s, "(%.v)", n.List()) } case OREAL, @@ -1471,49 +1471,49 @@ func exprFmt(n *Node, s fmt.State, prec int, mode FmtMode) { OSIZEOF, OPRINT, OPRINTN: - if n.Left != nil { - mode.Fprintf(s, "%#v(%v)", n.Op, n.Left) + if n.Left() != nil { + mode.Fprintf(s, "%#v(%v)", n.Op(), n.Left()) return } if n.IsDDD() { - mode.Fprintf(s, "%#v(%.v...)", n.Op, n.List) + mode.Fprintf(s, "%#v(%.v...)", n.Op(), n.List()) return } - mode.Fprintf(s, "%#v(%.v)", n.Op, n.List) + mode.Fprintf(s, "%#v(%.v)", n.Op(), n.List()) case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, OGETG: - exprFmt(n.Left, s, nprec, mode) + exprFmt(n.Left(), s, nprec, mode) if n.IsDDD() { - mode.Fprintf(s, "(%.v...)", n.List) + mode.Fprintf(s, "(%.v...)", n.List()) return } - mode.Fprintf(s, "(%.v)", n.List) + mode.Fprintf(s, "(%.v)", n.List()) case OMAKEMAP, OMAKECHAN, OMAKESLICE: - if n.List.Len() != 0 { // pre-typecheck - mode.Fprintf(s, "make(%v, %.v)", n.Type, n.List) + if n.List().Len() != 0 { // pre-typecheck + mode.Fprintf(s, "make(%v, %.v)", n.Type(), n.List()) return } - if n.Right != nil { - mode.Fprintf(s, "make(%v, %v, %v)", n.Type, n.Left, n.Right) + if n.Right() != nil { + mode.Fprintf(s, "make(%v, %v, %v)", n.Type(), n.Left(), n.Right()) return } - if n.Left != nil && (n.Op == OMAKESLICE || !n.Left.Type.IsUntyped()) { - mode.Fprintf(s, "make(%v, %v)", n.Type, n.Left) + if n.Left() != nil && (n.Op() == OMAKESLICE || !n.Left().Type().IsUntyped()) { + mode.Fprintf(s, "make(%v, %v)", n.Type(), n.Left()) return } - mode.Fprintf(s, "make(%v)", n.Type) + mode.Fprintf(s, "make(%v)", n.Type()) case OMAKESLICECOPY: - mode.Fprintf(s, "makeslicecopy(%v, %v, %v)", n.Type, n.Left, n.Right) + mode.Fprintf(s, "makeslicecopy(%v, %v, %v)", n.Type(), n.Left(), n.Right()) case OPLUS, ONEG, OADDR, OBITNOT, ODEREF, ONOT, ORECV: // Unary - mode.Fprintf(s, "%#v", n.Op) - if n.Left != nil && n.Left.Op == n.Op { + mode.Fprintf(s, "%#v", n.Op()) + if n.Left() != nil && n.Left().Op() == n.Op() { fmt.Fprint(s, " ") } - exprFmt(n.Left, s, nprec+1, mode) + exprFmt(n.Left(), s, nprec+1, mode) // Binary case OADD, @@ -1536,12 +1536,12 @@ func exprFmt(n *Node, s fmt.State, prec int, mode FmtMode) { OSEND, OSUB, OXOR: - exprFmt(n.Left, s, nprec, mode) - mode.Fprintf(s, " %#v ", n.Op) - exprFmt(n.Right, s, nprec+1, mode) + exprFmt(n.Left(), s, nprec, mode) + mode.Fprintf(s, " %#v ", n.Op()) + exprFmt(n.Right(), s, nprec+1, mode) case OADDSTR: - for i, n1 := range n.List.Slice() { + for i, n1 := range n.List().Slice() { if i != 0 { fmt.Fprint(s, " + ") } @@ -1550,23 +1550,23 @@ func exprFmt(n *Node, s fmt.State, prec int, mode FmtMode) { case ODDD: mode.Fprintf(s, "...") default: - mode.Fprintf(s, "", n.Op) + mode.Fprintf(s, "", n.Op()) } } func nodeFmt(n *Node, s fmt.State, flag FmtFlag, mode FmtMode) { - t := n.Type + t := n.Type() // We almost always want the original. // TODO(gri) Why the special case for OLITERAL? - if n.Op != OLITERAL && n.Orig != nil { - n = n.Orig + if n.Op() != OLITERAL && n.Orig() != nil { + n = n.Orig() } if flag&FmtLong != 0 && t != nil { if t.Etype == types.TNIL { fmt.Fprint(s, "nil") - } else if n.Op == ONAME && n.Name.AutoTemp() { + } else if n.Op() == ONAME && n.Name().AutoTemp() { mode.Fprintf(s, "%v value", t) } else { mode.Fprintf(s, "%v (type %v)", n, t) @@ -1576,7 +1576,7 @@ func nodeFmt(n *Node, s fmt.State, flag FmtFlag, mode FmtMode) { // TODO inlining produces expressions with ninits. we can't print these yet. - if OpPrec[n.Op] < 0 { + if OpPrec[n.Op()] < 0 { stmtFmt(n, s, mode) return } @@ -1594,82 +1594,82 @@ func nodeDumpFmt(n *Node, s fmt.State, flag FmtFlag, mode FmtMode) { return } - if n.Ninit.Len() != 0 { - mode.Fprintf(s, "%v-init%v", n.Op, n.Ninit) + if n.Init().Len() != 0 { + mode.Fprintf(s, "%v-init%v", n.Op(), n.Init()) indent(s) } } - switch n.Op { + switch n.Op() { default: - mode.Fprintf(s, "%v%j", n.Op, n) + mode.Fprintf(s, "%v%j", n.Op(), n) case OLITERAL: - mode.Fprintf(s, "%v-%v%j", n.Op, n.Val(), n) + mode.Fprintf(s, "%v-%v%j", n.Op(), n.Val(), n) case ONAME, ONONAME, OMETHEXPR: - if n.Sym != nil { - mode.Fprintf(s, "%v-%v%j", n.Op, n.Sym, n) + if n.Sym() != nil { + mode.Fprintf(s, "%v-%v%j", n.Op(), n.Sym(), n) } else { - mode.Fprintf(s, "%v%j", n.Op, n) + mode.Fprintf(s, "%v%j", n.Op(), n) } - if recur && n.Type == nil && n.Name != nil && n.Name.Param != nil && n.Name.Param.Ntype != nil { + if recur && n.Type() == nil && n.Name() != nil && n.Name().Param != nil && n.Name().Param.Ntype != nil { indent(s) - mode.Fprintf(s, "%v-ntype%v", n.Op, n.Name.Param.Ntype) + mode.Fprintf(s, "%v-ntype%v", n.Op(), n.Name().Param.Ntype) } case OASOP: - mode.Fprintf(s, "%v-%v%j", n.Op, n.SubOp(), n) + mode.Fprintf(s, "%v-%v%j", n.Op(), n.SubOp(), n) case OTYPE: - mode.Fprintf(s, "%v %v%j type=%v", n.Op, n.Sym, n, n.Type) - if recur && n.Type == nil && n.Name != nil && n.Name.Param != nil && n.Name.Param.Ntype != nil { + mode.Fprintf(s, "%v %v%j type=%v", n.Op(), n.Sym(), n, n.Type()) + if recur && n.Type() == nil && n.Name() != nil && n.Name().Param != nil && n.Name().Param.Ntype != nil { indent(s) - mode.Fprintf(s, "%v-ntype%v", n.Op, n.Name.Param.Ntype) + mode.Fprintf(s, "%v-ntype%v", n.Op(), n.Name().Param.Ntype) } } - if n.Op == OCLOSURE && n.Func.Decl != nil && n.Func.Nname.Sym != nil { - mode.Fprintf(s, " fnName %v", n.Func.Nname.Sym) + if n.Op() == OCLOSURE && n.Func().Decl != nil && n.Func().Nname.Sym() != nil { + mode.Fprintf(s, " fnName %v", n.Func().Nname.Sym()) } - if n.Sym != nil && n.Op != ONAME { - mode.Fprintf(s, " %v", n.Sym) + if n.Sym() != nil && n.Op() != ONAME { + mode.Fprintf(s, " %v", n.Sym()) } - if n.Type != nil { - mode.Fprintf(s, " %v", n.Type) + if n.Type() != nil { + mode.Fprintf(s, " %v", n.Type()) } if recur { - if n.Left != nil { - mode.Fprintf(s, "%v", n.Left) + if n.Left() != nil { + mode.Fprintf(s, "%v", n.Left()) } - if n.Right != nil { - mode.Fprintf(s, "%v", n.Right) + if n.Right() != nil { + mode.Fprintf(s, "%v", n.Right()) } - if n.Op == OCLOSURE && n.Func != nil && n.Func.Decl != nil && n.Func.Decl.Nbody.Len() != 0 { + if n.Op() == OCLOSURE && n.Func() != nil && n.Func().Decl != nil && n.Func().Decl.Body().Len() != 0 { indent(s) // The function associated with a closure - mode.Fprintf(s, "%v-clofunc%v", n.Op, n.Func.Decl) + mode.Fprintf(s, "%v-clofunc%v", n.Op(), n.Func().Decl) } - if n.Op == ODCLFUNC && n.Func != nil && n.Func.Dcl != nil && len(n.Func.Dcl) != 0 { + if n.Op() == ODCLFUNC && n.Func() != nil && n.Func().Dcl != nil && len(n.Func().Dcl) != 0 { indent(s) // The dcls for a func or closure - mode.Fprintf(s, "%v-dcl%v", n.Op, AsNodes(n.Func.Dcl)) + mode.Fprintf(s, "%v-dcl%v", n.Op(), AsNodes(n.Func().Dcl)) } - if n.List.Len() != 0 { + if n.List().Len() != 0 { indent(s) - mode.Fprintf(s, "%v-list%v", n.Op, n.List) + mode.Fprintf(s, "%v-list%v", n.Op(), n.List()) } - if n.Rlist.Len() != 0 { + if n.Rlist().Len() != 0 { indent(s) - mode.Fprintf(s, "%v-rlist%v", n.Op, n.Rlist) + mode.Fprintf(s, "%v-rlist%v", n.Op(), n.Rlist()) } - if n.Nbody.Len() != 0 { + if n.Body().Len() != 0 { indent(s) - mode.Fprintf(s, "%v-body%v", n.Op, n.Nbody) + mode.Fprintf(s, "%v-body%v", n.Op(), n.Body()) } } } @@ -1910,5 +1910,5 @@ func InstallTypeFormats() { // Line returns n's position as a string. If n has been inlined, // it uses the outermost position where n has been inlined. func Line(n *Node) string { - return base.FmtPos(n.Pos) + return base.FmtPos(n.Pos()) } diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index cac9e6eb3eeb1..dce1bfdbefa1d 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -26,25 +26,25 @@ import ( type Node struct { // Tree structure. // Generic recursive walks should follow these fields. - Left *Node - Right *Node - Ninit Nodes - Nbody Nodes - List Nodes - Rlist Nodes + left *Node + right *Node + init Nodes + body Nodes + list Nodes + rlist Nodes // most nodes - Type *types.Type - Orig *Node // original form, for printing, and tracking copies of ONAMEs + typ *types.Type + orig *Node // original form, for printing, and tracking copies of ONAMEs // func - Func *Func + fn *Func // ONAME, OTYPE, OPACK, OLABEL, some OLITERAL - Name *Name + name *Name - Sym *types.Sym // various - E interface{} // Opt or Val, see methods below + sym *types.Sym // various + e interface{} // Opt or Val, see methods below // Various. Usually an offset into a struct. For example: // - ONAME nodes that refer to local variables use it to identify their stack frame position. @@ -54,85 +54,85 @@ type Node struct { // - OINLMARK stores an index into the inlTree data structure. // - OCLOSURE uses it to store ambient iota value, if any. // Possibly still more uses. If you find any, document them. - Xoffset int64 + offset int64 - Pos src.XPos + pos src.XPos flags bitset32 - Esc uint16 // EscXXX + esc uint16 // EscXXX - Op Op + op Op aux uint8 } -func (n *Node) GetLeft() *Node { return n.Left } -func (n *Node) SetLeft(x *Node) { n.Left = x } -func (n *Node) GetRight() *Node { return n.Right } -func (n *Node) SetRight(x *Node) { n.Right = x } -func (n *Node) GetOrig() *Node { return n.Orig } -func (n *Node) SetOrig(x *Node) { n.Orig = x } -func (n *Node) GetType() *types.Type { return n.Type } -func (n *Node) SetType(x *types.Type) { n.Type = x } -func (n *Node) GetFunc() *Func { return n.Func } -func (n *Node) SetFunc(x *Func) { n.Func = x } -func (n *Node) GetName() *Name { return n.Name } -func (n *Node) SetName(x *Name) { n.Name = x } -func (n *Node) GetSym() *types.Sym { return n.Sym } -func (n *Node) SetSym(x *types.Sym) { n.Sym = x } -func (n *Node) GetPos() src.XPos { return n.Pos } -func (n *Node) SetPos(x src.XPos) { n.Pos = x } -func (n *Node) GetXoffset() int64 { return n.Xoffset } -func (n *Node) SetXoffset(x int64) { n.Xoffset = x } -func (n *Node) GetEsc() uint16 { return n.Esc } -func (n *Node) SetEsc(x uint16) { n.Esc = x } -func (n *Node) GetOp() Op { return n.Op } -func (n *Node) SetOp(x Op) { n.Op = x } -func (n *Node) GetNinit() Nodes { return n.Ninit } -func (n *Node) SetNinit(x Nodes) { n.Ninit = x } -func (n *Node) PtrNinit() *Nodes { return &n.Ninit } -func (n *Node) GetNbody() Nodes { return n.Nbody } -func (n *Node) SetNbody(x Nodes) { n.Nbody = x } -func (n *Node) PtrNbody() *Nodes { return &n.Nbody } -func (n *Node) GetList() Nodes { return n.List } -func (n *Node) SetList(x Nodes) { n.List = x } -func (n *Node) PtrList() *Nodes { return &n.List } -func (n *Node) GetRlist() Nodes { return n.Rlist } -func (n *Node) SetRlist(x Nodes) { n.Rlist = x } -func (n *Node) PtrRlist() *Nodes { return &n.Rlist } +func (n *Node) Left() *Node { return n.left } +func (n *Node) SetLeft(x *Node) { n.left = x } +func (n *Node) Right() *Node { return n.right } +func (n *Node) SetRight(x *Node) { n.right = x } +func (n *Node) Orig() *Node { return n.orig } +func (n *Node) SetOrig(x *Node) { n.orig = x } +func (n *Node) Type() *types.Type { return n.typ } +func (n *Node) SetType(x *types.Type) { n.typ = x } +func (n *Node) Func() *Func { return n.fn } +func (n *Node) SetFunc(x *Func) { n.fn = x } +func (n *Node) Name() *Name { return n.name } +func (n *Node) SetName(x *Name) { n.name = x } +func (n *Node) Sym() *types.Sym { return n.sym } +func (n *Node) SetSym(x *types.Sym) { n.sym = x } +func (n *Node) Pos() src.XPos { return n.pos } +func (n *Node) SetPos(x src.XPos) { n.pos = x } +func (n *Node) Offset() int64 { return n.offset } +func (n *Node) SetOffset(x int64) { n.offset = x } +func (n *Node) Esc() uint16 { return n.esc } +func (n *Node) SetEsc(x uint16) { n.esc = x } +func (n *Node) Op() Op { return n.op } +func (n *Node) SetOp(x Op) { n.op = x } +func (n *Node) Init() Nodes { return n.init } +func (n *Node) SetInit(x Nodes) { n.init = x } +func (n *Node) PtrInit() *Nodes { return &n.init } +func (n *Node) Body() Nodes { return n.body } +func (n *Node) SetBody(x Nodes) { n.body = x } +func (n *Node) PtrBody() *Nodes { return &n.body } +func (n *Node) List() Nodes { return n.list } +func (n *Node) SetList(x Nodes) { n.list = x } +func (n *Node) PtrList() *Nodes { return &n.list } +func (n *Node) Rlist() Nodes { return n.rlist } +func (n *Node) SetRlist(x Nodes) { n.rlist = x } +func (n *Node) PtrRlist() *Nodes { return &n.rlist } func (n *Node) ResetAux() { n.aux = 0 } func (n *Node) SubOp() Op { - switch n.Op { + switch n.Op() { case OASOP, ONAME: default: - base.Fatalf("unexpected op: %v", n.Op) + base.Fatalf("unexpected op: %v", n.Op()) } return Op(n.aux) } func (n *Node) SetSubOp(op Op) { - switch n.Op { + switch n.Op() { case OASOP, ONAME: default: - base.Fatalf("unexpected op: %v", n.Op) + base.Fatalf("unexpected op: %v", n.Op()) } n.aux = uint8(op) } func (n *Node) IndexMapLValue() bool { - if n.Op != OINDEXMAP { - base.Fatalf("unexpected op: %v", n.Op) + if n.Op() != OINDEXMAP { + base.Fatalf("unexpected op: %v", n.Op()) } return n.aux != 0 } func (n *Node) SetIndexMapLValue(b bool) { - if n.Op != OINDEXMAP { - base.Fatalf("unexpected op: %v", n.Op) + if n.Op() != OINDEXMAP { + base.Fatalf("unexpected op: %v", n.Op()) } if b { n.aux = 1 @@ -142,31 +142,31 @@ func (n *Node) SetIndexMapLValue(b bool) { } func (n *Node) TChanDir() types.ChanDir { - if n.Op != OTCHAN { - base.Fatalf("unexpected op: %v", n.Op) + if n.Op() != OTCHAN { + base.Fatalf("unexpected op: %v", n.Op()) } return types.ChanDir(n.aux) } func (n *Node) SetTChanDir(dir types.ChanDir) { - if n.Op != OTCHAN { - base.Fatalf("unexpected op: %v", n.Op) + if n.Op() != OTCHAN { + base.Fatalf("unexpected op: %v", n.Op()) } n.aux = uint8(dir) } func IsSynthetic(n *Node) bool { - name := n.Sym.Name + name := n.Sym().Name return name[0] == '.' || name[0] == '~' } // IsAutoTmp indicates if n was created by the compiler as a temporary, // based on the setting of the .AutoTemp flag in n's Name. func IsAutoTmp(n *Node) bool { - if n == nil || n.Op != ONAME { + if n == nil || n.Op() != ONAME { return false } - return n.Name.AutoTemp() + return n.Name().AutoTemp() } const ( @@ -229,8 +229,8 @@ func (n *Node) SetColas(b bool) { n.flags.set(nodeColas, b) } func (n *Node) SetTransient(b bool) { n.flags.set(nodeTransient, b) } func (n *Node) SetHasCall(b bool) { n.flags.set(nodeHasCall, b) } func (n *Node) SetLikely(b bool) { n.flags.set(nodeLikely, b) } -func (n *Node) SetHasVal(b bool) { n.flags.set(nodeHasVal, b) } -func (n *Node) SetHasOpt(b bool) { n.flags.set(nodeHasOpt, b) } +func (n *Node) setHasVal(b bool) { n.flags.set(nodeHasVal, b) } +func (n *Node) setHasOpt(b bool) { n.flags.set(nodeHasOpt, b) } func (n *Node) SetEmbedded(b bool) { n.flags.set(nodeEmbedded, b) } // MarkNonNil marks a pointer n as being guaranteed non-nil, @@ -238,8 +238,8 @@ func (n *Node) SetEmbedded(b bool) { n.flags.set(nodeEmbedded, b) } // During conversion to SSA, non-nil pointers won't have nil checks // inserted before dereferencing. See state.exprPtr. func (n *Node) MarkNonNil() { - if !n.Type.IsPtr() && !n.Type.IsUnsafePtr() { - base.Fatalf("MarkNonNil(%v), type %v", n, n.Type) + if !n.Type().IsPtr() && !n.Type().IsUnsafePtr() { + base.Fatalf("MarkNonNil(%v), type %v", n, n.Type()) } n.flags.set(nodeNonNil, true) } @@ -249,7 +249,7 @@ func (n *Node) MarkNonNil() { // When n is a dereferencing operation, n does not need nil checks. // When n is a makeslice+copy operation, n does not need length and cap checks. func (n *Node) SetBounded(b bool) { - switch n.Op { + switch n.Op() { case OINDEX, OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR: // No bounds checks needed. case ODOTPTR, ODEREF: @@ -265,14 +265,14 @@ func (n *Node) SetBounded(b bool) { // MarkReadonly indicates that n is an ONAME with readonly contents. func (n *Node) MarkReadonly() { - if n.Op != ONAME { - base.Fatalf("Node.MarkReadonly %v", n.Op) + if n.Op() != ONAME { + base.Fatalf("Node.MarkReadonly %v", n.Op()) } - n.Name.SetReadonly(true) + n.Name().SetReadonly(true) // Mark the linksym as readonly immediately // so that the SSA backend can use this information. // It will be overridden later during dumpglobls. - n.Sym.Linksym().Type = objabi.SRODATA + n.Sym().Linksym().Type = objabi.SRODATA } // Val returns the constant.Value for the node. @@ -280,7 +280,7 @@ func (n *Node) Val() constant.Value { if !n.HasVal() { return constant.MakeUnknown() } - return *n.E.(*constant.Value) + return *n.e.(*constant.Value) } // SetVal sets the constant.Value for the node, @@ -291,11 +291,11 @@ func (n *Node) SetVal(v constant.Value) { Dump("have Opt", n) base.Fatalf("have Opt") } - if n.Op == OLITERAL { - AssertValidTypeForConst(n.Type, v) + if n.Op() == OLITERAL { + AssertValidTypeForConst(n.Type(), v) } - n.SetHasVal(true) - n.E = &v + n.setHasVal(true) + n.e = &v } // Opt returns the optimizer data for the node. @@ -303,7 +303,7 @@ func (n *Node) Opt() interface{} { if !n.HasOpt() { return nil } - return n.E + return n.e } // SetOpt sets the optimizer data for the node, which must not have been used with SetVal. @@ -311,8 +311,8 @@ func (n *Node) Opt() interface{} { func (n *Node) SetOpt(x interface{}) { if x == nil { if n.HasOpt() { - n.SetHasOpt(false) - n.E = nil + n.setHasOpt(false) + n.e = nil } return } @@ -321,22 +321,22 @@ func (n *Node) SetOpt(x interface{}) { Dump("have Val", n) base.Fatalf("have Val") } - n.SetHasOpt(true) - n.E = x + n.setHasOpt(true) + n.e = x } func (n *Node) Iota() int64 { - return n.Xoffset + return n.Offset() } func (n *Node) SetIota(x int64) { - n.Xoffset = x + n.SetOffset(x) } // mayBeShared reports whether n may occur in multiple places in the AST. // Extra care must be taken when mutating such a node. func MayBeShared(n *Node) bool { - switch n.Op { + switch n.Op() { case ONAME, OLITERAL, ONIL, OTYPE: return true } @@ -345,10 +345,10 @@ func MayBeShared(n *Node) bool { // funcname returns the name (without the package) of the function n. func FuncName(n *Node) string { - if n == nil || n.Func == nil || n.Func.Nname == nil { + if n == nil || n.Func() == nil || n.Func().Nname == nil { return "" } - return n.Func.Nname.Sym.Name + return n.Func().Nname.Sym().Name } // pkgFuncName returns the name of the function referenced by n, with package prepended. @@ -360,13 +360,13 @@ func PkgFuncName(n *Node) string { if n == nil { return "" } - if n.Op == ONAME { - s = n.Sym + if n.Op() == ONAME { + s = n.Sym() } else { - if n.Func == nil || n.Func.Nname == nil { + if n.Func() == nil || n.Func().Nname == nil { return "" } - s = n.Func.Nname.Sym + s = n.Func().Nname.Sym() } pkg := s.Pkg @@ -1142,12 +1142,12 @@ func Inspect(n *Node, f func(*Node) bool) { if n == nil || !f(n) { return } - InspectList(n.Ninit, f) - Inspect(n.Left, f) - Inspect(n.Right, f) - InspectList(n.List, f) - InspectList(n.Nbody, f) - InspectList(n.Rlist, f) + InspectList(n.Init(), f) + Inspect(n.Left(), f) + Inspect(n.Right(), f) + InspectList(n.List(), f) + InspectList(n.Body(), f) + InspectList(n.Rlist(), f) } func InspectList(l Nodes, f func(*Node) bool) { @@ -1242,8 +1242,8 @@ func NodAt(pos src.XPos, op Op, nleft, nright *Node) *Node { f Func } n = &x.n - n.Func = &x.f - n.Func.Decl = n + n.SetFunc(&x.f) + n.Func().Decl = n case ONAME: base.Fatalf("use newname instead") case OLABEL, OPACK: @@ -1252,16 +1252,16 @@ func NodAt(pos src.XPos, op Op, nleft, nright *Node) *Node { m Name } n = &x.n - n.Name = &x.m + n.SetName(&x.m) default: n = new(Node) } - n.Op = op - n.Left = nleft - n.Right = nright - n.Pos = pos - n.Xoffset = types.BADWIDTH - n.Orig = n + n.SetOp(op) + n.SetLeft(nleft) + n.SetRight(nright) + n.SetPos(pos) + n.SetOffset(types.BADWIDTH) + n.SetOrig(n) return n } @@ -1278,14 +1278,14 @@ func NewNameAt(pos src.XPos, s *types.Sym) *Node { p Param } n := &x.n - n.Name = &x.m - n.Name.Param = &x.p + n.SetName(&x.m) + n.Name().Param = &x.p - n.Op = ONAME - n.Pos = pos - n.Orig = n + n.SetOp(ONAME) + n.SetPos(pos) + n.SetOrig(n) - n.Sym = s + n.SetSym(s) return n } @@ -1358,7 +1358,7 @@ func OrigSym(s *types.Sym) *types.Sym { return nil case 'b': // originally the blank identifier _ // TODO(mdempsky): Does s.Pkg matter here? - return BlankNode.Sym + return BlankNode.Sym() } return s } @@ -1374,48 +1374,48 @@ func OrigSym(s *types.Sym) *types.Sym { // SliceBounds returns n's slice bounds: low, high, and max in expr[low:high:max]. // n must be a slice expression. max is nil if n is a simple slice expression. func (n *Node) SliceBounds() (low, high, max *Node) { - if n.List.Len() == 0 { + if n.List().Len() == 0 { return nil, nil, nil } - switch n.Op { + switch n.Op() { case OSLICE, OSLICEARR, OSLICESTR: - s := n.List.Slice() + s := n.List().Slice() return s[0], s[1], nil case OSLICE3, OSLICE3ARR: - s := n.List.Slice() + s := n.List().Slice() return s[0], s[1], s[2] } - base.Fatalf("SliceBounds op %v: %v", n.Op, n) + base.Fatalf("SliceBounds op %v: %v", n.Op(), n) return nil, nil, nil } // SetSliceBounds sets n's slice bounds, where n is a slice expression. // n must be a slice expression. If max is non-nil, n must be a full slice expression. func (n *Node) SetSliceBounds(low, high, max *Node) { - switch n.Op { + switch n.Op() { case OSLICE, OSLICEARR, OSLICESTR: if max != nil { - base.Fatalf("SetSliceBounds %v given three bounds", n.Op) + base.Fatalf("SetSliceBounds %v given three bounds", n.Op()) } - s := n.List.Slice() + s := n.List().Slice() if s == nil { if low == nil && high == nil { return } - n.List.Set2(low, high) + n.PtrList().Set2(low, high) return } s[0] = low s[1] = high return case OSLICE3, OSLICE3ARR: - s := n.List.Slice() + s := n.List().Slice() if s == nil { if low == nil && high == nil && max == nil { return } - n.List.Set3(low, high, max) + n.PtrList().Set3(low, high, max) return } s[0] = low @@ -1423,7 +1423,7 @@ func (n *Node) SetSliceBounds(low, high, max *Node) { s[2] = max return } - base.Fatalf("SetSliceBounds op %v: %v", n.Op, n) + base.Fatalf("SetSliceBounds op %v: %v", n.Op(), n) } // IsSlice3 reports whether o is a slice3 op (OSLICE3, OSLICE3ARR). @@ -1511,7 +1511,7 @@ func (n *Node) RawCopy() *Node { // Orig pointing to itself. func SepCopy(n *Node) *Node { copy := *n - copy.Orig = © + copy.orig = © return © } @@ -1524,8 +1524,8 @@ func SepCopy(n *Node) *Node { // messages; see issues #26855, #27765). func Copy(n *Node) *Node { copy := *n - if n.Orig == n { - copy.Orig = © + if n.Orig() == n { + copy.orig = © } return © } @@ -1534,18 +1534,18 @@ func Copy(n *Node) *Node { func IsNil(n *Node) bool { // Check n.Orig because constant propagation may produce typed nil constants, // which don't exist in the Go spec. - return n.Orig.Op == ONIL + return n.Orig().Op() == ONIL } func IsBlank(n *Node) bool { if n == nil { return false } - return n.Sym.IsBlank() + return n.Sym().IsBlank() } // IsMethod reports whether n is a method. // n must be a function or a method. func IsMethod(n *Node) bool { - return n.Type.Recv() != nil + return n.Type().Recv() != nil } diff --git a/src/cmd/compile/internal/ir/val.go b/src/cmd/compile/internal/ir/val.go index 00b5bfd1ad4fd..6bcee7c01c72b 100644 --- a/src/cmd/compile/internal/ir/val.go +++ b/src/cmd/compile/internal/ir/val.go @@ -13,7 +13,7 @@ import ( ) func ConstType(n *Node) constant.Kind { - if n == nil || n.Op != OLITERAL { + if n == nil || n.Op() != OLITERAL { return constant.Unknown } return n.Val().Kind() @@ -32,7 +32,7 @@ func ConstValue(n *Node) interface{} { case constant.String: return constant.StringVal(v) case constant.Int: - return Int64Val(n.Type, v) + return Int64Val(n.Type(), v) case constant.Float: return Float64Val(v) case constant.Complex: @@ -94,7 +94,7 @@ func ValidTypeForConst(t *types.Type, v constant.Value) bool { func NewLiteral(v constant.Value) *Node { n := Nod(OLITERAL, nil, nil) if k := v.Kind(); k != constant.Unknown { - n.Type = idealType(k) + n.SetType(idealType(k)) n.SetVal(v) } return n diff --git a/src/cmd/compile/internal/ssa/nilcheck.go b/src/cmd/compile/internal/ssa/nilcheck.go index e0ae0454ef358..3c1fa600a3227 100644 --- a/src/cmd/compile/internal/ssa/nilcheck.go +++ b/src/cmd/compile/internal/ssa/nilcheck.go @@ -236,7 +236,7 @@ func nilcheckelim2(f *Func) { continue } if v.Type.IsMemory() || v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() { - if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(*ir.Node).Type.HasPointers()) { + if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(*ir.Node).Type().HasPointers()) { // These ops don't really change memory. continue // Note: OpVarDef requires that the defined variable not have pointers. From c26aead50c3c8226c51fb97a94852f2134b881aa Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 25 Nov 2020 00:30:58 -0500 Subject: [PATCH 044/474] [dev.regabi] cmd/compile: convert types.Node (a pointer) to types.IRNode (an interface) The pointer hack was nice and saved a word, but it's untenable in a world where nodes are themselves interfaces with different underlying types. Bite the bullet and use an interface to hold the Node when in types.Sym and types.Type. This has the nice benefit of removing AsTypesNode entirely. AsNode is still useful because of its nil handling. Change-Id: I298cba9ff788b956ee287283bec78010e8b601e5 Reviewed-on: https://go-review.googlesource.com/c/go/+/272933 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/closure.go | 4 +-- src/cmd/compile/internal/gc/dcl.go | 10 +++---- src/cmd/compile/internal/gc/embed.go | 2 +- src/cmd/compile/internal/gc/escape.go | 6 ++-- src/cmd/compile/internal/gc/export.go | 4 +-- src/cmd/compile/internal/gc/gen.go | 2 +- src/cmd/compile/internal/gc/iimport.go | 6 ++-- src/cmd/compile/internal/gc/init.go | 2 +- src/cmd/compile/internal/gc/noder.go | 2 +- src/cmd/compile/internal/gc/obj.go | 2 +- src/cmd/compile/internal/gc/reflect.go | 6 ++-- src/cmd/compile/internal/gc/ssa.go | 2 +- src/cmd/compile/internal/gc/typecheck.go | 8 +++--- src/cmd/compile/internal/gc/universe.go | 28 +++++++++---------- src/cmd/compile/internal/ir/dump.go | 3 -- src/cmd/compile/internal/ir/node.go | 10 ++++--- src/cmd/compile/internal/types/scope.go | 8 +++--- src/cmd/compile/internal/types/sizeof_test.go | 4 +-- src/cmd/compile/internal/types/sym.go | 4 +-- src/cmd/compile/internal/types/type.go | 14 +++++----- 20 files changed, 63 insertions(+), 64 deletions(-) diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index 1b926ec17e48b..2dce7b7f03768 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -270,7 +270,7 @@ func transformclosure(dcl *ir.Node) { decls = append(decls, v) fld := types.NewField(src.NoXPos, v.Sym(), v.Type()) - fld.Nname = ir.AsTypesNode(v) + fld.Nname = v params = append(params, fld) } @@ -511,7 +511,7 @@ func makepartialcall(dot *ir.Node, t0 *types.Type, meth *types.Sym) *ir.Node { // typecheckslice() requires that Curfn is set when processing an ORETURN. Curfn = dcl typecheckslice(dcl.Body().Slice(), ctxStmt) - sym.Def = ir.AsTypesNode(dcl) + sym.Def = dcl xtop = append(xtop, dcl) Curfn = savecurfn base.Pos = saveLineNo diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 8b3274890f3c2..8980c47e2c39e 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -118,7 +118,7 @@ func declare(n *ir.Node, ctxt ir.Class) { s.Block = types.Block s.Lastlineno = base.Pos - s.Def = ir.AsTypesNode(n) + s.Def = n n.Name().Vargen = int32(gen) n.SetClass(ctxt) if ctxt == ir.PFUNC { @@ -235,7 +235,7 @@ func typenodl(pos src.XPos, t *types.Type) *ir.Node { // then t->nod might be out of date, so // check t->nod->type too if ir.AsNode(t.Nod) == nil || ir.AsNode(t.Nod).Type() != t { - t.Nod = ir.AsTypesNode(ir.NodAt(pos, ir.OTYPE, nil, nil)) + t.Nod = ir.NodAt(pos, ir.OTYPE, nil, nil) ir.AsNode(t.Nod).SetType(t) ir.AsNode(t.Nod).SetSym(t.Sym) } @@ -490,7 +490,7 @@ func funcarg2(f *types.Field, ctxt ir.Class) { return } n := ir.NewNameAt(f.Pos, f.Sym) - f.Nname = ir.AsTypesNode(n) + f.Nname = n n.SetType(f.Type) n.SetIsDDD(f.IsDDD()) declare(n, ctxt) @@ -614,7 +614,7 @@ func tofunargs(l []*ir.Node, funarg types.Funarg) *types.Type { f.SetIsDDD(n.IsDDD()) if n.Right() != nil { n.Right().SetType(f.Type) - f.Nname = ir.AsTypesNode(n.Right()) + f.Nname = n.Right() } if f.Broke() { t.SetBroke(true) @@ -872,7 +872,7 @@ func addmethod(n *ir.Node, msym *types.Sym, t *types.Type, local, nointerface bo } f := types.NewField(base.Pos, msym, t) - f.Nname = ir.AsTypesNode(n.Func().Nname) + f.Nname = n.Func().Nname f.SetNointerface(nointerface) mt.Methods().Append(f) diff --git a/src/cmd/compile/internal/gc/embed.go b/src/cmd/compile/internal/gc/embed.go index d515696add242..03703f68d5fa3 100644 --- a/src/cmd/compile/internal/gc/embed.go +++ b/src/cmd/compile/internal/gc/embed.go @@ -114,7 +114,7 @@ func varEmbed(p *noder, names []*ir.Node, typ *ir.Node, exprs []*ir.Node, embeds if dclcontext != ir.PEXTERN { numLocalEmbed++ v = ir.NewNameAt(v.Pos(), lookupN("embed.", numLocalEmbed)) - v.Sym().Def = ir.AsTypesNode(v) + v.Sym().Def = v v.Name().Param.Ntype = typ v.SetClass(ir.PEXTERN) externdcl = append(externdcl, v) diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index 866bdf8a6f6a7..f1786e74dcf10 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -229,13 +229,13 @@ func (e *Escape) walkFunc(fn *ir.Node) { ir.InspectList(fn.Body(), func(n *ir.Node) bool { switch n.Op() { case ir.OLABEL: - n.Sym().Label = ir.AsTypesNode(nonlooping) + n.Sym().Label = nonlooping case ir.OGOTO: // If we visited the label before the goto, // then this is a looping label. - if n.Sym().Label == ir.AsTypesNode(nonlooping) { - n.Sym().Label = ir.AsTypesNode(looping) + if n.Sym().Label == nonlooping { + n.Sym().Label = looping } } diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index 1f0288a5911a3..ace461fc90fec 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -86,7 +86,7 @@ func importsym(ipkg *types.Pkg, s *types.Sym, op ir.Op) *ir.Node { } n = dclname(s) - s.SetPkgDef(ir.AsTypesNode(n)) + s.SetPkgDef(n) s.Importdef = ipkg } if n.Op() != ir.ONONAME && n.Op() != op { @@ -103,7 +103,7 @@ func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *types.Type { if n.Op() != ir.OTYPE { t := types.New(types.TFORW) t.Sym = s - t.Nod = ir.AsTypesNode(n) + t.Nod = n n.SetOp(ir.OTYPE) n.SetPos(pos) diff --git a/src/cmd/compile/internal/gc/gen.go b/src/cmd/compile/internal/gc/gen.go index d7320f3cccbbb..a89ff528e5266 100644 --- a/src/cmd/compile/internal/gc/gen.go +++ b/src/cmd/compile/internal/gc/gen.go @@ -69,7 +69,7 @@ func tempAt(pos src.XPos, curfn *ir.Node, t *types.Type) *ir.Node { Pkg: ir.LocalPkg, } n := ir.NewNameAt(pos, s) - s.Def = ir.AsTypesNode(n) + s.Def = n n.SetType(t) n.SetClass(ir.PAUTO) n.SetEsc(EscNever) diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 71063566659b2..5d845d90e84dc 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -151,7 +151,7 @@ func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType) ir.NumImport[pkgName]++ // TODO(mdempsky): This belongs somewhere else. - pkg.Lookup("_").Def = ir.AsTypesNode(ir.BlankNode) + pkg.Lookup("_").Def = ir.BlankNode } else { if pkg.Name != pkgName { base.Fatalf("conflicting package names %v and %v for path %q", pkg.Name, pkgName, pkg.Path) @@ -175,7 +175,7 @@ func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType) if s.Def != nil { base.Fatalf("unexpected definition for %v: %v", s, ir.AsNode(s.Def)) } - s.Def = ir.AsTypesNode(npos(src.NoXPos, dclname(s))) + s.Def = npos(src.NoXPos, dclname(s)) } } @@ -337,7 +337,7 @@ func (r *importReader) doDecl(n *ir.Node) { // methodSym already marked m.Sym as a function. f := types.NewField(mpos, msym, mtyp) - f.Nname = ir.AsTypesNode(m) + f.Nname = m ms[i] = f } t.Methods().Set(ms) diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go index b66ee6f9533d3..02a6175c6bf9d 100644 --- a/src/cmd/compile/internal/gc/init.go +++ b/src/cmd/compile/internal/gc/init.go @@ -93,7 +93,7 @@ func fninit(n []*ir.Node) { nn := NewName(sym) nn.SetType(types.Types[types.TUINT8]) // fake type nn.SetClass(ir.PEXTERN) - sym.Def = ir.AsTypesNode(nn) + sym.Def = nn exportsym(nn) lsym := sym.Linksym() ot := 0 diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 98819fadde5aa..d9642f4b672f9 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -373,7 +373,7 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) { if my.Def != nil { redeclare(pack.Pos(), my, "as imported package name") } - my.Def = ir.AsTypesNode(pack) + my.Def = pack my.Lastlineno = pack.Pos() my.Block = 1 // at top level } diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 9f0cefbd1cb9a..05f8358fdfa6d 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -480,7 +480,7 @@ func slicedata(pos src.XPos, s string) *ir.Node { symname := fmt.Sprintf(".gobytes.%d", slicedataGen) sym := ir.LocalPkg.Lookup(symname) symnode := NewName(sym) - sym.Def = ir.AsTypesNode(symnode) + sym.Def = symnode lsym := sym.Linksym() off := dstringdata(lsym, 0, s, pos, "slice") diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 4559dd3a219f5..664b3cc942139 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -997,7 +997,7 @@ func typename(t *types.Type) *ir.Node { n.SetType(types.Types[types.TUINT8]) n.SetClass(ir.PEXTERN) n.SetTypecheck(1) - s.Def = ir.AsTypesNode(n) + s.Def = n } n := ir.Nod(ir.OADDR, ir.AsNode(s.Def), nil) @@ -1016,7 +1016,7 @@ func itabname(t, itype *types.Type) *ir.Node { n.SetType(types.Types[types.TUINT8]) n.SetClass(ir.PEXTERN) n.SetTypecheck(1) - s.Def = ir.AsTypesNode(n) + s.Def = n itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: s.Linksym()}) } @@ -1882,7 +1882,7 @@ func zeroaddr(size int64) *ir.Node { x.SetType(types.Types[types.TUINT8]) x.SetClass(ir.PEXTERN) x.SetTypecheck(1) - s.Def = ir.AsTypesNode(x) + s.Def = x } z := ir.Nod(ir.OADDR, ir.AsNode(s.Def), nil) z.SetType(types.NewPtr(types.Types[types.TUINT8])) diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 018b94d9d8011..262aa0e95cf59 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -7072,7 +7072,7 @@ func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t s := &types.Sym{Name: node.Sym().Name + suffix, Pkg: ir.LocalPkg} n := ir.NewNameAt(parent.N.Pos(), s) - s.Def = ir.AsTypesNode(n) + s.Def = n ir.AsNode(s.Def).Name().SetUsed(true) n.SetType(t) n.SetClass(ir.PAUTO) diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 4bc7f035f5090..0559dabe32ac9 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -3486,7 +3486,7 @@ func setUnderlying(t, underlying *types.Type) { *t = *underlying // Restore unnecessarily clobbered attributes. - t.Nod = ir.AsTypesNode(n) + t.Nod = n t.Sym = n.Sym() if n.Name() != nil { t.Vargen = n.Name().Vargen @@ -3691,7 +3691,7 @@ func typecheckdef(n *ir.Node) { // For package-level type aliases, set n.Sym.Def so we can identify // it as a type alias during export. See also #31959. if n.Name().Curfn == nil { - n.Sym().Def = ir.AsTypesNode(p.Ntype) + n.Sym().Def = p.Ntype } } break @@ -3799,7 +3799,7 @@ func markbreaklist(l ir.Nodes, implicit *ir.Node) { if n.Op() == ir.OLABEL && i+1 < len(s) && n.Name().Defn == s[i+1] { switch n.Name().Defn.Op() { case ir.OFOR, ir.OFORUNTIL, ir.OSWITCH, ir.OTYPESW, ir.OSELECT, ir.ORANGE: - n.Sym().Label = ir.AsTypesNode(n.Name().Defn) + n.Sym().Label = n.Name().Defn markbreak(n.Name().Defn, n.Name().Defn) n.Sym().Label = nil i++ @@ -3998,7 +3998,7 @@ func deadcodeexpr(n *ir.Node) *ir.Node { func setTypeNode(n *ir.Node, t *types.Type) { n.SetOp(ir.OTYPE) n.SetType(t) - n.Type().Nod = ir.AsTypesNode(n) + n.Type().Nod = n } // getIotaValue returns the current value for "iota", diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index be22b7e9dbee9..978e53ac159a7 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -109,19 +109,19 @@ func lexinit() { } types.Types[etype] = t } - s2.Def = ir.AsTypesNode(typenod(t)) + s2.Def = typenod(t) ir.AsNode(s2.Def).SetName(new(ir.Name)) } for _, s := range &builtinFuncs { s2 := ir.BuiltinPkg.Lookup(s.name) - s2.Def = ir.AsTypesNode(NewName(s2)) + s2.Def = NewName(s2) ir.AsNode(s2.Def).SetSubOp(s.op) } for _, s := range &unsafeFuncs { s2 := unsafepkg.Lookup(s.name) - s2.Def = ir.AsTypesNode(NewName(s2)) + s2.Def = NewName(s2) ir.AsNode(s2.Def).SetSubOp(s.op) } @@ -130,38 +130,38 @@ func lexinit() { types.Types[types.TANY] = types.New(types.TANY) s := ir.BuiltinPkg.Lookup("true") - s.Def = ir.AsTypesNode(nodbool(true)) + s.Def = nodbool(true) ir.AsNode(s.Def).SetSym(lookup("true")) ir.AsNode(s.Def).SetName(new(ir.Name)) ir.AsNode(s.Def).SetType(types.UntypedBool) s = ir.BuiltinPkg.Lookup("false") - s.Def = ir.AsTypesNode(nodbool(false)) + s.Def = nodbool(false) ir.AsNode(s.Def).SetSym(lookup("false")) ir.AsNode(s.Def).SetName(new(ir.Name)) ir.AsNode(s.Def).SetType(types.UntypedBool) s = lookup("_") s.Block = -100 - s.Def = ir.AsTypesNode(NewName(s)) + s.Def = NewName(s) types.Types[types.TBLANK] = types.New(types.TBLANK) ir.AsNode(s.Def).SetType(types.Types[types.TBLANK]) ir.BlankNode = ir.AsNode(s.Def) s = ir.BuiltinPkg.Lookup("_") s.Block = -100 - s.Def = ir.AsTypesNode(NewName(s)) + s.Def = NewName(s) types.Types[types.TBLANK] = types.New(types.TBLANK) ir.AsNode(s.Def).SetType(types.Types[types.TBLANK]) types.Types[types.TNIL] = types.New(types.TNIL) s = ir.BuiltinPkg.Lookup("nil") - s.Def = ir.AsTypesNode(nodnil()) + s.Def = nodnil() ir.AsNode(s.Def).SetSym(s) ir.AsNode(s.Def).SetName(new(ir.Name)) s = ir.BuiltinPkg.Lookup("iota") - s.Def = ir.AsTypesNode(ir.Nod(ir.OIOTA, nil, nil)) + s.Def = ir.Nod(ir.OIOTA, nil, nil) ir.AsNode(s.Def).SetSym(s) ir.AsNode(s.Def).SetName(new(ir.Name)) } @@ -181,7 +181,7 @@ func typeinit() { t := types.New(types.TUNSAFEPTR) types.Types[types.TUNSAFEPTR] = t t.Sym = unsafepkg.Lookup("Pointer") - t.Sym.Def = ir.AsTypesNode(typenod(t)) + t.Sym.Def = typenod(t) ir.AsNode(t.Sym.Def).SetName(new(ir.Name)) dowidth(types.Types[types.TUNSAFEPTR]) @@ -343,7 +343,7 @@ func lexinit1() { types.Errortype = makeErrorInterface() types.Errortype.Sym = s types.Errortype.Orig = makeErrorInterface() - s.Def = ir.AsTypesNode(typenod(types.Errortype)) + s.Def = typenod(types.Errortype) dowidth(types.Errortype) // We create separate byte and rune types for better error messages @@ -358,7 +358,7 @@ func lexinit1() { s = ir.BuiltinPkg.Lookup("byte") types.Bytetype = types.New(types.TUINT8) types.Bytetype.Sym = s - s.Def = ir.AsTypesNode(typenod(types.Bytetype)) + s.Def = typenod(types.Bytetype) ir.AsNode(s.Def).SetName(new(ir.Name)) dowidth(types.Bytetype) @@ -366,7 +366,7 @@ func lexinit1() { s = ir.BuiltinPkg.Lookup("rune") types.Runetype = types.New(types.TINT32) types.Runetype.Sym = s - s.Def = ir.AsTypesNode(typenod(types.Runetype)) + s.Def = typenod(types.Runetype) ir.AsNode(s.Def).SetName(new(ir.Name)) dowidth(types.Runetype) @@ -384,7 +384,7 @@ func lexinit1() { t := types.New(s.etype) t.Sym = s1 types.Types[s.etype] = t - s1.Def = ir.AsTypesNode(typenod(t)) + s1.Def = typenod(t) ir.AsNode(s1.Def).SetName(new(ir.Name)) s1.Origpkg = ir.BuiltinPkg diff --git a/src/cmd/compile/internal/ir/dump.go b/src/cmd/compile/internal/ir/dump.go index 43d0742c73c33..c4ea5af3d1feb 100644 --- a/src/cmd/compile/internal/ir/dump.go +++ b/src/cmd/compile/internal/ir/dump.go @@ -150,9 +150,6 @@ func (p *dumper) dump(x reflect.Value, depth int) { case src.XPos: p.printf("%s", base.FmtPos(v)) return - - case *types.Node: - x = reflect.ValueOf(AsNode(v)) } switch x.Kind() { diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index dce1bfdbefa1d..b42ca5b8a3c3c 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -10,7 +10,6 @@ import ( "go/constant" "sort" "strings" - "unsafe" "cmd/compile/internal/base" "cmd/compile/internal/types" @@ -1340,9 +1339,12 @@ type SymAndPos struct { Pos src.XPos // line of call } -func AsNode(n *types.Node) *Node { return (*Node)(unsafe.Pointer(n)) } - -func AsTypesNode(n *Node) *types.Node { return (*types.Node)(unsafe.Pointer(n)) } +func AsNode(n types.IRNode) *Node { + if n == nil { + return nil + } + return n.(*Node) +} var BlankNode *Node diff --git a/src/cmd/compile/internal/types/scope.go b/src/cmd/compile/internal/types/scope.go index 40d3d86ef11a4..33a02c543d153 100644 --- a/src/cmd/compile/internal/types/scope.go +++ b/src/cmd/compile/internal/types/scope.go @@ -15,7 +15,7 @@ var Block int32 // current block number // restored once the block scope ends. type dsym struct { sym *Sym // sym == nil indicates stack mark - def *Node + def IRNode block int32 lastlineno src.XPos // last declaration for diagnostic } @@ -79,16 +79,16 @@ func IsDclstackValid() bool { } // PkgDef returns the definition associated with s at package scope. -func (s *Sym) PkgDef() *Node { +func (s *Sym) PkgDef() IRNode { return *s.pkgDefPtr() } // SetPkgDef sets the definition associated with s at package scope. -func (s *Sym) SetPkgDef(n *Node) { +func (s *Sym) SetPkgDef(n IRNode) { *s.pkgDefPtr() = n } -func (s *Sym) pkgDefPtr() **Node { +func (s *Sym) pkgDefPtr() *IRNode { // Look for outermost saved declaration, which must be the // package scope definition, if present. for _, d := range dclstack { diff --git a/src/cmd/compile/internal/types/sizeof_test.go b/src/cmd/compile/internal/types/sizeof_test.go index 0cf343e8f13a8..2821d9a3c7054 100644 --- a/src/cmd/compile/internal/types/sizeof_test.go +++ b/src/cmd/compile/internal/types/sizeof_test.go @@ -20,8 +20,8 @@ func TestSizeof(t *testing.T) { _32bit uintptr // size on 32bit platforms _64bit uintptr // size on 64bit platforms }{ - {Sym{}, 52, 88}, - {Type{}, 52, 88}, + {Sym{}, 60, 104}, + {Type{}, 56, 96}, {Map{}, 20, 40}, {Forward{}, 20, 32}, {Func{}, 28, 48}, diff --git a/src/cmd/compile/internal/types/sym.go b/src/cmd/compile/internal/types/sym.go index 07bce4d5cdbb8..046104d0dcf79 100644 --- a/src/cmd/compile/internal/types/sym.go +++ b/src/cmd/compile/internal/types/sym.go @@ -33,12 +33,12 @@ type Sym struct { Name string // object name // saved and restored by dcopy - Def *Node // definition: ONAME OTYPE OPACK or OLITERAL + Def IRNode // definition: ONAME OTYPE OPACK or OLITERAL Block int32 // blocknumber to catch redeclaration Lastlineno src.XPos // last declaration for diagnostic flags bitset8 - Label *Node // corresponding label (ephemeral) + Label IRNode // corresponding label (ephemeral) Origpkg *Pkg // original package for . import } diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index b93409aac1a3c..8499a36edc8f5 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -10,10 +10,10 @@ import ( "fmt" ) -// Our own “Node” so we can refer to *gc.Node without actually -// having a gc.Node. Necessary to break import cycles. -// TODO(gri) try to eliminate soon -type Node struct{ _ int } +// IRNode represents an ir.Node, but without needing to import cmd/compile/internal/ir, +// which would cause an import cycle. The uses in other packages must type assert +// values of type IRNode to ir.Node or a more specific type. +type IRNode interface{ Type() *Type } //go:generate stringer -type EType -trimprefix T @@ -141,8 +141,8 @@ type Type struct { methods Fields allMethods Fields - Nod *Node // canonical OTYPE node - Orig *Type // original type (type literal or predefined type) + Nod IRNode // canonical OTYPE node + Orig *Type // original type (type literal or predefined type) // Cache of composite types, with this type being the element type. Cache struct { @@ -360,7 +360,7 @@ type Field struct { // For fields that represent function parameters, Nname points // to the associated ONAME Node. - Nname *Node + Nname IRNode // Offset in bytes of this field or method within its enclosing struct // or interface Type. From 4d0d9c2c5c35377b0662f2fd0995867919552251 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 25 Nov 2020 00:37:36 -0500 Subject: [PATCH 045/474] [dev.regabi] cmd/compile: introduce ir.INode interface for *ir.Node Define the interface for an IR node. The next CL will shuffle the names and leave us with ir.Node being the interface. Change-Id: Ifc40f7846d522cf99efa6b4e558bebb6db5218f9 Reviewed-on: https://go-review.googlesource.com/c/go/+/272934 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/fmt.go | 8 +- src/cmd/compile/internal/ir/node.go | 126 ++++++++++++++++++++++++++-- 2 files changed, 125 insertions(+), 9 deletions(-) diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index e1e3813368b69..9682bae39b151 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -247,7 +247,7 @@ type fmtNode struct { m FmtMode } -func (f *fmtNode) Format(s fmt.State, verb rune) { f.x.format(s, verb, f.m) } +func (f *fmtNode) Format(s fmt.State, verb rune) { nodeFormat(f.x, s, verb, f.m) } type fmtOp struct { x Op @@ -282,7 +282,7 @@ func (n *Node) Format(s fmt.State, verb rune) { } func FmtNode(n *Node, s fmt.State, verb rune) { - n.format(s, verb, FErr) + nodeFormat(n, s, verb, FErr) } func (o Op) Format(s fmt.State, verb rune) { o.format(s, verb, FErr) } @@ -313,6 +313,8 @@ func (m FmtMode) prepareArgs(args []interface{}) { args[i] = &fmtOp{arg, m} case *Node: args[i] = &fmtNode{arg, m} + case nil: + args[i] = &fmtNode{nil, m} // assume this was a node interface case *types.Type: args[i] = &fmtType{arg, m} case *types.Sym: @@ -327,7 +329,7 @@ func (m FmtMode) prepareArgs(args []interface{}) { } } -func (n *Node) format(s fmt.State, verb rune, mode FmtMode) { +func nodeFormat(n *Node, s fmt.State, verb rune, mode FmtMode) { switch verb { case 'v', 'S', 'L': nconvFmt(n, s, fmtFlag(s, verb), mode) diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index b42ca5b8a3c3c..d700c593906d8 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -7,6 +7,7 @@ package ir import ( + "fmt" "go/constant" "sort" "strings" @@ -18,6 +19,119 @@ import ( "cmd/internal/src" ) +// A Node is the abstract interface to an IR node. +type INode interface { + // Formatting + Format(s fmt.State, verb rune) + String() string + + // Source position. + Pos() src.XPos + SetPos(x src.XPos) + + // For making copies. Mainly used by Copy and SepCopy. + RawCopy() *Node + + // Abstract graph structure, for generic traversals. + Op() Op + SetOp(x Op) + Orig() *Node + SetOrig(x *Node) + SubOp() Op + SetSubOp(x Op) + Left() *Node + SetLeft(x *Node) + Right() *Node + SetRight(x *Node) + Init() Nodes + PtrInit() *Nodes + SetInit(x Nodes) + Body() Nodes + PtrBody() *Nodes + SetBody(x Nodes) + List() Nodes + SetList(x Nodes) + PtrList() *Nodes + Rlist() Nodes + SetRlist(x Nodes) + PtrRlist() *Nodes + + // Fields specific to certain Ops only. + Type() *types.Type + SetType(t *types.Type) + Func() *Func + SetFunc(x *Func) + Name() *Name + SetName(x *Name) + Sym() *types.Sym + SetSym(x *types.Sym) + Offset() int64 + SetOffset(x int64) + Class() Class + SetClass(x Class) + Likely() bool + SetLikely(x bool) + SliceBounds() (low, high, max *Node) + SetSliceBounds(low, high, max *Node) + Iota() int64 + SetIota(x int64) + Colas() bool + SetColas(x bool) + NoInline() bool + SetNoInline(x bool) + Transient() bool + SetTransient(x bool) + Implicit() bool + SetImplicit(x bool) + IsDDD() bool + SetIsDDD(x bool) + Embedded() bool + SetEmbedded(x bool) + IndexMapLValue() bool + SetIndexMapLValue(x bool) + TChanDir() types.ChanDir + SetTChanDir(x types.ChanDir) + ResetAux() + HasBreak() bool + SetHasBreak(x bool) + MarkReadonly() + Val() constant.Value + HasVal() bool + SetVal(v constant.Value) + Int64Val() int64 + Uint64Val() uint64 + CanInt64() bool + BoolVal() bool + StringVal() string + + // Storage for analysis passes. + Esc() uint16 + SetEsc(x uint16) + Walkdef() uint8 + SetWalkdef(x uint8) + Opt() interface{} + SetOpt(x interface{}) + HasOpt() bool + Diag() bool + SetDiag(x bool) + Bounded() bool + SetBounded(x bool) + Typecheck() uint8 + SetTypecheck(x uint8) + Initorder() uint8 + SetInitorder(x uint8) + NonNil() bool + MarkNonNil() + HasCall() bool + SetHasCall(x bool) + + // Only for SSA and should be removed when SSA starts + // using a more specific type than Node. + CanBeAnSSASym() +} + +var _ INode = (*Node)(nil) + // A Node is a single node in the syntax tree. // Actually the syntax tree is a syntax DAG, because there is only one // node with Op=ONAME for a given instance of a variable x. @@ -1512,9 +1626,9 @@ func (n *Node) RawCopy() *Node { // sepcopy returns a separate shallow copy of n, with the copy's // Orig pointing to itself. func SepCopy(n *Node) *Node { - copy := *n - copy.orig = © - return © + n = n.RawCopy() + n.SetOrig(n) + return n } // copy returns shallow copy of n and adjusts the copy's Orig if @@ -1525,11 +1639,11 @@ func SepCopy(n *Node) *Node { // (This caused the wrong complit Op to be used when printing error // messages; see issues #26855, #27765). func Copy(n *Node) *Node { - copy := *n + copy := n.RawCopy() if n.Orig() == n { - copy.orig = © + copy.SetOrig(copy) } - return © + return copy } // isNil reports whether n represents the universal untyped zero value "nil". From 41f3af9d04362a56c1af186af134c704a03fa97b Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 25 Nov 2020 01:11:56 -0500 Subject: [PATCH 046/474] [dev.regabi] cmd/compile: replace *Node type with an interface Node [generated] MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The plan is to introduce a Node interface that replaces the old *Node pointer-to-struct. The previous CL defined an interface INode modeling a *Node. This CL: - Changes all references outside internal/ir to use INode, along with many references inside internal/ir as well. - Renames Node to node. - Renames INode to Node So now ir.Node is an interface implemented by *ir.node, which is otherwise inaccessible, and the code outside package ir is now (clearly) using only the interface. The usual rule is never to redefine an existing name with a new meaning, so that old code that hasn't been updated gets a "unknown name" error instead of more mysterious errors or silent misbehavior. That rule would caution against replacing Node-the-struct with Node-the-interface, as in this CL, because code that says *Node would now be using a pointer to an interface. But this CL is being landed at the same time as another that moves Node from gc to ir. So the net effect is to replace *gc.Node with ir.Node, which does follow the rule: any lingering references to gc.Node will be told it's gone, not silently start using pointers to interfaces. So the rule is followed by the CL sequence, just not this specific CL. Overall, the loss of inlining caused by using interfaces cuts the compiler speed by about 6%, a not insignificant amount. However, as we convert the representation to concrete structs that are not the giant Node over the next weeks, that speed should come back as more of the compiler starts operating directly on concrete types and the memory taken up by the graph of Nodes drops due to the more precise structs. Honestly, I was expecting worse. % benchstat bench.old bench.new name old time/op new time/op delta Template 168ms ± 4% 182ms ± 2% +8.34% (p=0.000 n=9+9) Unicode 72.2ms ±10% 82.5ms ± 6% +14.38% (p=0.000 n=9+9) GoTypes 563ms ± 8% 598ms ± 2% +6.14% (p=0.006 n=9+9) Compiler 2.89s ± 4% 3.04s ± 2% +5.37% (p=0.000 n=10+9) SSA 6.45s ± 4% 7.25s ± 5% +12.41% (p=0.000 n=9+10) Flate 105ms ± 2% 115ms ± 1% +9.66% (p=0.000 n=10+8) GoParser 144ms ±10% 152ms ± 2% +5.79% (p=0.011 n=9+8) Reflect 345ms ± 9% 370ms ± 4% +7.28% (p=0.001 n=10+9) Tar 149ms ± 9% 161ms ± 5% +8.05% (p=0.001 n=10+9) XML 190ms ± 3% 209ms ± 2% +9.54% (p=0.000 n=9+8) LinkCompiler 327ms ± 2% 325ms ± 2% ~ (p=0.382 n=8+8) ExternalLinkCompiler 1.77s ± 4% 1.73s ± 6% ~ (p=0.113 n=9+10) LinkWithoutDebugCompiler 214ms ± 4% 211ms ± 2% ~ (p=0.360 n=10+8) StdCmd 14.8s ± 3% 15.9s ± 1% +6.98% (p=0.000 n=10+9) [Geo mean] 480ms 510ms +6.31% name old user-time/op new user-time/op delta Template 223ms ± 3% 237ms ± 3% +6.16% (p=0.000 n=9+10) Unicode 103ms ± 6% 113ms ± 3% +9.53% (p=0.000 n=9+9) GoTypes 758ms ± 8% 800ms ± 2% +5.55% (p=0.003 n=10+9) Compiler 3.95s ± 2% 4.12s ± 2% +4.34% (p=0.000 n=10+9) SSA 9.43s ± 1% 9.74s ± 4% +3.25% (p=0.000 n=8+10) Flate 132ms ± 2% 141ms ± 2% +6.89% (p=0.000 n=9+9) GoParser 177ms ± 9% 183ms ± 4% ~ (p=0.050 n=9+9) Reflect 467ms ±10% 495ms ± 7% +6.17% (p=0.029 n=10+10) Tar 183ms ± 9% 197ms ± 5% +7.92% (p=0.001 n=10+10) XML 249ms ± 5% 268ms ± 4% +7.82% (p=0.000 n=10+9) LinkCompiler 544ms ± 5% 544ms ± 6% ~ (p=0.863 n=9+9) ExternalLinkCompiler 1.79s ± 4% 1.75s ± 6% ~ (p=0.075 n=10+10) LinkWithoutDebugCompiler 248ms ± 6% 246ms ± 2% ~ (p=0.965 n=10+8) [Geo mean] 483ms 504ms +4.41% [git-generate] cd src/cmd/compile/internal/ir : # We need to do the conversion in multiple steps, so we introduce : # a temporary type alias that will start out meaning the pointer-to-struct : # and then change to mean the interface. rf ' mv Node OldNode add node.go \ type Node = *OldNode ' : # It should work to do this ex in ir, but it misses test files, due to a bug in rf. : # Run the command in gc to handle gc's tests, and then again in ssa for ssa's tests. cd ../gc rf ' ex . ../arm ../riscv64 ../arm64 ../mips64 ../ppc64 ../mips ../wasm { import "cmd/compile/internal/ir" *ir.OldNode -> ir.Node } ' cd ../ssa rf ' ex { import "cmd/compile/internal/ir" *ir.OldNode -> ir.Node } ' : # Back in ir, finish conversion clumsily with sed, : # because type checking and circular aliases do not mix. cd ../ir sed -i '' ' /type Node = \*OldNode/d s/\*OldNode/Node/g s/^func (n Node)/func (n *OldNode)/ s/OldNode/node/g s/type INode interface/type Node interface/ s/var _ INode = (Node)(nil)/var _ Node = (*node)(nil)/ ' *.go gofmt -w *.go sed -i '' ' s/{Func{}, 136, 248}/{Func{}, 152, 280}/ s/{Name{}, 32, 56}/{Name{}, 44, 80}/ s/{Param{}, 24, 48}/{Param{}, 44, 88}/ s/{node{}, 76, 128}/{node{}, 88, 152}/ ' sizeof_test.go cd ../ssa sed -i '' ' s/{LocalSlot{}, 28, 40}/{LocalSlot{}, 32, 48}/ ' sizeof_test.go cd ../gc sed -i '' 's/\*ir.Node/ir.Node/' mkbuiltin.go cd ../../../.. go install std cmd cd cmd/compile go test -u || go test -u Change-Id: I196bbe3b648e4701662e4a2bada40bf155e2a553 Reviewed-on: https://go-review.googlesource.com/c/go/+/272935 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/fmtmap_test.go | 23 +- src/cmd/compile/internal/arm/ssa.go | 2 +- src/cmd/compile/internal/arm64/ssa.go | 2 +- src/cmd/compile/internal/gc/alg.go | 46 +-- src/cmd/compile/internal/gc/bexport.go | 2 +- src/cmd/compile/internal/gc/bimport.go | 4 +- src/cmd/compile/internal/gc/builtin.go | 182 +++++----- src/cmd/compile/internal/gc/closure.go | 40 +-- src/cmd/compile/internal/gc/const.go | 30 +- src/cmd/compile/internal/gc/dcl.go | 90 ++--- src/cmd/compile/internal/gc/embed.go | 10 +- src/cmd/compile/internal/gc/escape.go | 84 ++--- src/cmd/compile/internal/gc/export.go | 10 +- src/cmd/compile/internal/gc/gen.go | 8 +- src/cmd/compile/internal/gc/go.go | 12 +- src/cmd/compile/internal/gc/gsubr.go | 8 +- src/cmd/compile/internal/gc/iexport.go | 38 +- src/cmd/compile/internal/gc/iimport.go | 44 +-- src/cmd/compile/internal/gc/init.go | 2 +- src/cmd/compile/internal/gc/initorder.go | 38 +- src/cmd/compile/internal/gc/inl.go | 96 +++--- src/cmd/compile/internal/gc/main.go | 4 +- src/cmd/compile/internal/gc/mkbuiltin.go | 2 +- src/cmd/compile/internal/gc/noder.go | 126 +++---- src/cmd/compile/internal/gc/obj.go | 16 +- src/cmd/compile/internal/gc/order.go | 74 ++-- src/cmd/compile/internal/gc/pgen.go | 56 +-- src/cmd/compile/internal/gc/pgen_test.go | 14 +- src/cmd/compile/internal/gc/phi.go | 34 +- src/cmd/compile/internal/gc/plive.go | 28 +- src/cmd/compile/internal/gc/racewalk.go | 2 +- src/cmd/compile/internal/gc/range.go | 36 +- src/cmd/compile/internal/gc/reflect.go | 14 +- src/cmd/compile/internal/gc/scc.go | 14 +- src/cmd/compile/internal/gc/scope.go | 2 +- src/cmd/compile/internal/gc/select.go | 30 +- src/cmd/compile/internal/gc/sinit.go | 78 ++--- src/cmd/compile/internal/gc/ssa.go | 326 +++++++++--------- src/cmd/compile/internal/gc/subr.go | 74 ++-- src/cmd/compile/internal/gc/swt.go | 62 ++-- src/cmd/compile/internal/gc/typecheck.go | 118 +++---- src/cmd/compile/internal/gc/unsafe.go | 2 +- src/cmd/compile/internal/gc/walk.go | 202 +++++------ src/cmd/compile/internal/ir/dump.go | 2 +- src/cmd/compile/internal/ir/fmt.go | 32 +- src/cmd/compile/internal/ir/node.go | 364 ++++++++++---------- src/cmd/compile/internal/ir/sizeof_test.go | 8 +- src/cmd/compile/internal/ir/val.go | 6 +- src/cmd/compile/internal/mips/ssa.go | 2 +- src/cmd/compile/internal/mips64/ssa.go | 2 +- src/cmd/compile/internal/ppc64/ssa.go | 2 +- src/cmd/compile/internal/riscv64/ssa.go | 2 +- src/cmd/compile/internal/ssa/config.go | 2 +- src/cmd/compile/internal/ssa/deadstore.go | 20 +- src/cmd/compile/internal/ssa/debug.go | 12 +- src/cmd/compile/internal/ssa/export_test.go | 2 +- src/cmd/compile/internal/ssa/location.go | 2 +- src/cmd/compile/internal/ssa/nilcheck.go | 2 +- src/cmd/compile/internal/ssa/regalloc.go | 2 +- src/cmd/compile/internal/ssa/sizeof_test.go | 2 +- src/cmd/compile/internal/ssa/stackalloc.go | 2 +- src/cmd/compile/internal/wasm/ssa.go | 2 +- 62 files changed, 1277 insertions(+), 1276 deletions(-) diff --git a/src/cmd/compile/fmtmap_test.go b/src/cmd/compile/fmtmap_test.go index 432d26a7b8751..7a375604fd9f2 100644 --- a/src/cmd/compile/fmtmap_test.go +++ b/src/cmd/compile/fmtmap_test.go @@ -22,14 +22,7 @@ package main_test var knownFormats = map[string]string{ "*bytes.Buffer %s": "", "*cmd/compile/internal/gc.EscLocation %v": "", - "*cmd/compile/internal/ir.Node %#v": "", - "*cmd/compile/internal/ir.Node %+S": "", - "*cmd/compile/internal/ir.Node %+v": "", - "*cmd/compile/internal/ir.Node %L": "", - "*cmd/compile/internal/ir.Node %S": "", - "*cmd/compile/internal/ir.Node %j": "", - "*cmd/compile/internal/ir.Node %p": "", - "*cmd/compile/internal/ir.Node %v": "", + "*cmd/compile/internal/ir.node %v": "", "*cmd/compile/internal/ssa.Block %s": "", "*cmd/compile/internal/ssa.Block %v": "", "*cmd/compile/internal/ssa.Func %s": "", @@ -83,6 +76,14 @@ var knownFormats = map[string]string{ "cmd/compile/internal/ir.Class %d": "", "cmd/compile/internal/ir.Class %v": "", "cmd/compile/internal/ir.FmtMode %d": "", + "cmd/compile/internal/ir.Node %#v": "", + "cmd/compile/internal/ir.Node %+S": "", + "cmd/compile/internal/ir.Node %+v": "", + "cmd/compile/internal/ir.Node %L": "", + "cmd/compile/internal/ir.Node %S": "", + "cmd/compile/internal/ir.Node %j": "", + "cmd/compile/internal/ir.Node %p": "", + "cmd/compile/internal/ir.Node %v": "", "cmd/compile/internal/ir.Nodes %#v": "", "cmd/compile/internal/ir.Nodes %+v": "", "cmd/compile/internal/ir.Nodes %.v": "", @@ -160,9 +161,9 @@ var knownFormats = map[string]string{ "interface{} %q": "", "interface{} %s": "", "interface{} %v": "", - "map[*cmd/compile/internal/ir.Node]*cmd/compile/internal/ssa.Value %v": "", - "map[*cmd/compile/internal/ir.Node][]*cmd/compile/internal/ir.Node %v": "", - "map[cmd/compile/internal/ssa.ID]uint32 %v": "", + "map[cmd/compile/internal/ir.Node]*cmd/compile/internal/ssa.Value %v": "", + "map[cmd/compile/internal/ir.Node][]cmd/compile/internal/ir.Node %v": "", + "map[cmd/compile/internal/ssa.ID]uint32 %v": "", "map[int64]uint32 %v": "", "math/big.Accuracy %s": "", "reflect.Type %s": "", diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go index ff1dd8869e211..b34e2973b24d8 100644 --- a/src/cmd/compile/internal/arm/ssa.go +++ b/src/cmd/compile/internal/arm/ssa.go @@ -546,7 +546,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case *obj.LSym: wantreg = "SB" gc.AddAux(&p.From, v) - case *ir.Node: + case ir.Node: wantreg = "SP" gc.AddAux(&p.From, v) case nil: diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go index 58c00dc3bd14c..d5bd9687cfa7b 100644 --- a/src/cmd/compile/internal/arm64/ssa.go +++ b/src/cmd/compile/internal/arm64/ssa.go @@ -396,7 +396,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case *obj.LSym: wantreg = "SB" gc.AddAux(&p.From, v) - case *ir.Node: + case ir.Node: wantreg = "SP" gc.AddAux(&p.From, v) case nil: diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index ffd1682b35bcd..d2762126ade37 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -404,7 +404,7 @@ func genhash(t *types.Type) *obj.LSym { return closure } -func hashfor(t *types.Type) *ir.Node { +func hashfor(t *types.Type) ir.Node { var sym *types.Sym switch a, _ := algtype1(t); a { @@ -432,10 +432,10 @@ func hashfor(t *types.Type) *ir.Node { n := NewName(sym) setNodeNameFunc(n) - n.SetType(functype(nil, []*ir.Node{ + n.SetType(functype(nil, []ir.Node{ anonfield(types.NewPtr(t)), anonfield(types.Types[types.TUINTPTR]), - }, []*ir.Node{ + }, []ir.Node{ anonfield(types.Types[types.TUINTPTR]), })) return n @@ -567,9 +567,9 @@ func geneq(t *types.Type) *obj.LSym { // // TODO(josharian): consider doing some loop unrolling // for larger nelem as well, processing a few elements at a time in a loop. - checkAll := func(unroll int64, last bool, eq func(pi, qi *ir.Node) *ir.Node) { + checkAll := func(unroll int64, last bool, eq func(pi, qi ir.Node) ir.Node) { // checkIdx generates a node to check for equality at index i. - checkIdx := func(i *ir.Node) *ir.Node { + checkIdx := func(i ir.Node) ir.Node { // pi := p[i] pi := ir.Nod(ir.OINDEX, np, i) pi.SetBounded(true) @@ -621,24 +621,24 @@ func geneq(t *types.Type) *obj.LSym { // Do two loops. First, check that all the lengths match (cheap). // Second, check that all the contents match (expensive). // TODO: when the array size is small, unroll the length match checks. - checkAll(3, false, func(pi, qi *ir.Node) *ir.Node { + checkAll(3, false, func(pi, qi ir.Node) ir.Node { // Compare lengths. eqlen, _ := eqstring(pi, qi) return eqlen }) - checkAll(1, true, func(pi, qi *ir.Node) *ir.Node { + checkAll(1, true, func(pi, qi ir.Node) ir.Node { // Compare contents. _, eqmem := eqstring(pi, qi) return eqmem }) case types.TFLOAT32, types.TFLOAT64: - checkAll(2, true, func(pi, qi *ir.Node) *ir.Node { + checkAll(2, true, func(pi, qi ir.Node) ir.Node { // p[i] == q[i] return ir.Nod(ir.OEQ, pi, qi) }) // TODO: pick apart structs, do them piecemeal too default: - checkAll(1, true, func(pi, qi *ir.Node) *ir.Node { + checkAll(1, true, func(pi, qi ir.Node) ir.Node { // p[i] == q[i] return ir.Nod(ir.OEQ, pi, qi) }) @@ -648,9 +648,9 @@ func geneq(t *types.Type) *obj.LSym { // Build a list of conditions to satisfy. // The conditions are a list-of-lists. Conditions are reorderable // within each inner list. The outer lists must be evaluated in order. - var conds [][]*ir.Node - conds = append(conds, []*ir.Node{}) - and := func(n *ir.Node) { + var conds [][]ir.Node + conds = append(conds, []ir.Node{}) + and := func(n ir.Node) { i := len(conds) - 1 conds[i] = append(conds[i], n) } @@ -670,7 +670,7 @@ func geneq(t *types.Type) *obj.LSym { if !IsRegularMemory(f.Type) { if EqCanPanic(f.Type) { // Enforce ordering by starting a new set of reorderable conditions. - conds = append(conds, []*ir.Node{}) + conds = append(conds, []ir.Node{}) } p := nodSym(ir.OXDOT, np, f.Sym) q := nodSym(ir.OXDOT, nq, f.Sym) @@ -684,7 +684,7 @@ func geneq(t *types.Type) *obj.LSym { } if EqCanPanic(f.Type) { // Also enforce ordering after something that can panic. - conds = append(conds, []*ir.Node{}) + conds = append(conds, []ir.Node{}) } i++ continue @@ -709,9 +709,9 @@ func geneq(t *types.Type) *obj.LSym { // Sort conditions to put runtime calls last. // Preserve the rest of the ordering. - var flatConds []*ir.Node + var flatConds []ir.Node for _, c := range conds { - isCall := func(n *ir.Node) bool { + isCall := func(n ir.Node) bool { return n.Op() == ir.OCALL || n.Op() == ir.OCALLFUNC } sort.SliceStable(c, func(i, j int) bool { @@ -785,7 +785,7 @@ func geneq(t *types.Type) *obj.LSym { return closure } -func hasCall(n *ir.Node) bool { +func hasCall(n ir.Node) bool { if n.Op() == ir.OCALL || n.Op() == ir.OCALLFUNC { return true } @@ -820,7 +820,7 @@ func hasCall(n *ir.Node) bool { // eqfield returns the node // p.field == q.field -func eqfield(p *ir.Node, q *ir.Node, field *types.Sym) *ir.Node { +func eqfield(p ir.Node, q ir.Node, field *types.Sym) ir.Node { nx := nodSym(ir.OXDOT, p, field) ny := nodSym(ir.OXDOT, q, field) ne := ir.Nod(ir.OEQ, nx, ny) @@ -833,7 +833,7 @@ func eqfield(p *ir.Node, q *ir.Node, field *types.Sym) *ir.Node { // memequal(s.ptr, t.ptr, len(s)) // which can be used to construct string equality comparison. // eqlen must be evaluated before eqmem, and shortcircuiting is required. -func eqstring(s, t *ir.Node) (eqlen, eqmem *ir.Node) { +func eqstring(s, t ir.Node) (eqlen, eqmem ir.Node) { s = conv(s, types.Types[types.TSTRING]) t = conv(t, types.Types[types.TSTRING]) sptr := ir.Nod(ir.OSPTR, s, nil) @@ -859,13 +859,13 @@ func eqstring(s, t *ir.Node) (eqlen, eqmem *ir.Node) { // ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate) // which can be used to construct interface equality comparison. // eqtab must be evaluated before eqdata, and shortcircuiting is required. -func eqinterface(s, t *ir.Node) (eqtab, eqdata *ir.Node) { +func eqinterface(s, t ir.Node) (eqtab, eqdata ir.Node) { if !types.Identical(s.Type(), t.Type()) { base.Fatalf("eqinterface %v %v", s.Type(), t.Type()) } // func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool) // func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool) - var fn *ir.Node + var fn ir.Node if s.Type().IsEmptyInterface() { fn = syslook("efaceeq") } else { @@ -893,7 +893,7 @@ func eqinterface(s, t *ir.Node) (eqtab, eqdata *ir.Node) { // eqmem returns the node // memequal(&p.field, &q.field [, size]) -func eqmem(p *ir.Node, q *ir.Node, field *types.Sym, size int64) *ir.Node { +func eqmem(p ir.Node, q ir.Node, field *types.Sym, size int64) ir.Node { nx := ir.Nod(ir.OADDR, nodSym(ir.OXDOT, p, field), nil) ny := ir.Nod(ir.OADDR, nodSym(ir.OXDOT, q, field), nil) nx = typecheck(nx, ctxExpr) @@ -910,7 +910,7 @@ func eqmem(p *ir.Node, q *ir.Node, field *types.Sym, size int64) *ir.Node { return call } -func eqmemfunc(size int64, t *types.Type) (fn *ir.Node, needsize bool) { +func eqmemfunc(size int64, t *types.Type) (fn ir.Node, needsize bool) { switch size { default: fn = syslook("memequal") diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index e36903cbe0906..a470b842ff8d1 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -14,7 +14,7 @@ type exporter struct { } // markObject visits a reachable object. -func (p *exporter) markObject(n *ir.Node) { +func (p *exporter) markObject(n ir.Node) { if n.Op() == ir.ONAME && n.Class() == ir.PFUNC { inlFlood(n) } diff --git a/src/cmd/compile/internal/gc/bimport.go b/src/cmd/compile/internal/gc/bimport.go index 603710d6b192a..c0c18e728e631 100644 --- a/src/cmd/compile/internal/gc/bimport.go +++ b/src/cmd/compile/internal/gc/bimport.go @@ -9,11 +9,11 @@ import ( "cmd/internal/src" ) -func npos(pos src.XPos, n *ir.Node) *ir.Node { +func npos(pos src.XPos, n ir.Node) ir.Node { n.SetPos(pos) return n } -func builtinCall(op ir.Op) *ir.Node { +func builtinCall(op ir.Op) ir.Node { return ir.Nod(ir.OCALL, mkname(ir.BuiltinPkg.Lookup(ir.OpNames[op])), nil) } diff --git a/src/cmd/compile/internal/gc/builtin.go b/src/cmd/compile/internal/gc/builtin.go index 5016905f225d3..a57c6115595f0 100644 --- a/src/cmd/compile/internal/gc/builtin.go +++ b/src/cmd/compile/internal/gc/builtin.go @@ -210,132 +210,132 @@ func runtimeTypes() []*types.Type { typs[1] = types.NewPtr(typs[0]) typs[2] = types.Types[types.TANY] typs[3] = types.NewPtr(typs[2]) - typs[4] = functype(nil, []*ir.Node{anonfield(typs[1])}, []*ir.Node{anonfield(typs[3])}) + typs[4] = functype(nil, []ir.Node{anonfield(typs[1])}, []ir.Node{anonfield(typs[3])}) typs[5] = types.Types[types.TUINTPTR] typs[6] = types.Types[types.TBOOL] typs[7] = types.Types[types.TUNSAFEPTR] - typs[8] = functype(nil, []*ir.Node{anonfield(typs[5]), anonfield(typs[1]), anonfield(typs[6])}, []*ir.Node{anonfield(typs[7])}) + typs[8] = functype(nil, []ir.Node{anonfield(typs[5]), anonfield(typs[1]), anonfield(typs[6])}, []ir.Node{anonfield(typs[7])}) typs[9] = functype(nil, nil, nil) typs[10] = types.Types[types.TINTER] - typs[11] = functype(nil, []*ir.Node{anonfield(typs[10])}, nil) + typs[11] = functype(nil, []ir.Node{anonfield(typs[10])}, nil) typs[12] = types.Types[types.TINT32] typs[13] = types.NewPtr(typs[12]) - typs[14] = functype(nil, []*ir.Node{anonfield(typs[13])}, []*ir.Node{anonfield(typs[10])}) + typs[14] = functype(nil, []ir.Node{anonfield(typs[13])}, []ir.Node{anonfield(typs[10])}) typs[15] = types.Types[types.TINT] - typs[16] = functype(nil, []*ir.Node{anonfield(typs[15]), anonfield(typs[15])}, nil) + typs[16] = functype(nil, []ir.Node{anonfield(typs[15]), anonfield(typs[15])}, nil) typs[17] = types.Types[types.TUINT] - typs[18] = functype(nil, []*ir.Node{anonfield(typs[17]), anonfield(typs[15])}, nil) - typs[19] = functype(nil, []*ir.Node{anonfield(typs[6])}, nil) + typs[18] = functype(nil, []ir.Node{anonfield(typs[17]), anonfield(typs[15])}, nil) + typs[19] = functype(nil, []ir.Node{anonfield(typs[6])}, nil) typs[20] = types.Types[types.TFLOAT64] - typs[21] = functype(nil, []*ir.Node{anonfield(typs[20])}, nil) + typs[21] = functype(nil, []ir.Node{anonfield(typs[20])}, nil) typs[22] = types.Types[types.TINT64] - typs[23] = functype(nil, []*ir.Node{anonfield(typs[22])}, nil) + typs[23] = functype(nil, []ir.Node{anonfield(typs[22])}, nil) typs[24] = types.Types[types.TUINT64] - typs[25] = functype(nil, []*ir.Node{anonfield(typs[24])}, nil) + typs[25] = functype(nil, []ir.Node{anonfield(typs[24])}, nil) typs[26] = types.Types[types.TCOMPLEX128] - typs[27] = functype(nil, []*ir.Node{anonfield(typs[26])}, nil) + typs[27] = functype(nil, []ir.Node{anonfield(typs[26])}, nil) typs[28] = types.Types[types.TSTRING] - typs[29] = functype(nil, []*ir.Node{anonfield(typs[28])}, nil) - typs[30] = functype(nil, []*ir.Node{anonfield(typs[2])}, nil) - typs[31] = functype(nil, []*ir.Node{anonfield(typs[5])}, nil) + typs[29] = functype(nil, []ir.Node{anonfield(typs[28])}, nil) + typs[30] = functype(nil, []ir.Node{anonfield(typs[2])}, nil) + typs[31] = functype(nil, []ir.Node{anonfield(typs[5])}, nil) typs[32] = types.NewArray(typs[0], 32) typs[33] = types.NewPtr(typs[32]) - typs[34] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Node{anonfield(typs[28])}) - typs[35] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Node{anonfield(typs[28])}) - typs[36] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Node{anonfield(typs[28])}) - typs[37] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Node{anonfield(typs[28])}) + typs[34] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28])}, []ir.Node{anonfield(typs[28])}) + typs[35] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []ir.Node{anonfield(typs[28])}) + typs[36] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []ir.Node{anonfield(typs[28])}) + typs[37] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []ir.Node{anonfield(typs[28])}) typs[38] = types.NewSlice(typs[28]) - typs[39] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[38])}, []*ir.Node{anonfield(typs[28])}) - typs[40] = functype(nil, []*ir.Node{anonfield(typs[28]), anonfield(typs[28])}, []*ir.Node{anonfield(typs[15])}) + typs[39] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[38])}, []ir.Node{anonfield(typs[28])}) + typs[40] = functype(nil, []ir.Node{anonfield(typs[28]), anonfield(typs[28])}, []ir.Node{anonfield(typs[15])}) typs[41] = types.NewArray(typs[0], 4) typs[42] = types.NewPtr(typs[41]) - typs[43] = functype(nil, []*ir.Node{anonfield(typs[42]), anonfield(typs[22])}, []*ir.Node{anonfield(typs[28])}) - typs[44] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[1]), anonfield(typs[15])}, []*ir.Node{anonfield(typs[28])}) - typs[45] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[15])}, []*ir.Node{anonfield(typs[28])}) + typs[43] = functype(nil, []ir.Node{anonfield(typs[42]), anonfield(typs[22])}, []ir.Node{anonfield(typs[28])}) + typs[44] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[1]), anonfield(typs[15])}, []ir.Node{anonfield(typs[28])}) + typs[45] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[15])}, []ir.Node{anonfield(typs[28])}) typs[46] = types.Runetype typs[47] = types.NewSlice(typs[46]) - typs[48] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[47])}, []*ir.Node{anonfield(typs[28])}) + typs[48] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[47])}, []ir.Node{anonfield(typs[28])}) typs[49] = types.NewSlice(typs[0]) - typs[50] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[28])}, []*ir.Node{anonfield(typs[49])}) + typs[50] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[28])}, []ir.Node{anonfield(typs[49])}) typs[51] = types.NewArray(typs[46], 32) typs[52] = types.NewPtr(typs[51]) - typs[53] = functype(nil, []*ir.Node{anonfield(typs[52]), anonfield(typs[28])}, []*ir.Node{anonfield(typs[47])}) - typs[54] = functype(nil, []*ir.Node{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []*ir.Node{anonfield(typs[15])}) - typs[55] = functype(nil, []*ir.Node{anonfield(typs[28]), anonfield(typs[15])}, []*ir.Node{anonfield(typs[46]), anonfield(typs[15])}) - typs[56] = functype(nil, []*ir.Node{anonfield(typs[28])}, []*ir.Node{anonfield(typs[15])}) - typs[57] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[2])}, []*ir.Node{anonfield(typs[2])}) - typs[58] = functype(nil, []*ir.Node{anonfield(typs[2])}, []*ir.Node{anonfield(typs[7])}) - typs[59] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[2])}) - typs[60] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[2])}, []*ir.Node{anonfield(typs[2]), anonfield(typs[6])}) - typs[61] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil) - typs[62] = functype(nil, []*ir.Node{anonfield(typs[1])}, nil) + typs[53] = functype(nil, []ir.Node{anonfield(typs[52]), anonfield(typs[28])}, []ir.Node{anonfield(typs[47])}) + typs[54] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []ir.Node{anonfield(typs[15])}) + typs[55] = functype(nil, []ir.Node{anonfield(typs[28]), anonfield(typs[15])}, []ir.Node{anonfield(typs[46]), anonfield(typs[15])}) + typs[56] = functype(nil, []ir.Node{anonfield(typs[28])}, []ir.Node{anonfield(typs[15])}) + typs[57] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[2])}, []ir.Node{anonfield(typs[2])}) + typs[58] = functype(nil, []ir.Node{anonfield(typs[2])}, []ir.Node{anonfield(typs[7])}) + typs[59] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[3])}, []ir.Node{anonfield(typs[2])}) + typs[60] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[2])}, []ir.Node{anonfield(typs[2]), anonfield(typs[6])}) + typs[61] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil) + typs[62] = functype(nil, []ir.Node{anonfield(typs[1])}, nil) typs[63] = types.NewPtr(typs[5]) - typs[64] = functype(nil, []*ir.Node{anonfield(typs[63]), anonfield(typs[7]), anonfield(typs[7])}, []*ir.Node{anonfield(typs[6])}) + typs[64] = functype(nil, []ir.Node{anonfield(typs[63]), anonfield(typs[7]), anonfield(typs[7])}, []ir.Node{anonfield(typs[6])}) typs[65] = types.Types[types.TUINT32] - typs[66] = functype(nil, nil, []*ir.Node{anonfield(typs[65])}) + typs[66] = functype(nil, nil, []ir.Node{anonfield(typs[65])}) typs[67] = types.NewMap(typs[2], typs[2]) - typs[68] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[67])}) - typs[69] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[67])}) - typs[70] = functype(nil, nil, []*ir.Node{anonfield(typs[67])}) - typs[71] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[3])}) - typs[72] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*ir.Node{anonfield(typs[3])}) - typs[73] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*ir.Node{anonfield(typs[3])}) - typs[74] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[3]), anonfield(typs[6])}) - typs[75] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*ir.Node{anonfield(typs[3]), anonfield(typs[6])}) - typs[76] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*ir.Node{anonfield(typs[3]), anonfield(typs[6])}) - typs[77] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, nil) - typs[78] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, nil) - typs[79] = functype(nil, []*ir.Node{anonfield(typs[3])}, nil) - typs[80] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67])}, nil) + typs[68] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []ir.Node{anonfield(typs[67])}) + typs[69] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []ir.Node{anonfield(typs[67])}) + typs[70] = functype(nil, nil, []ir.Node{anonfield(typs[67])}) + typs[71] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []ir.Node{anonfield(typs[3])}) + typs[72] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []ir.Node{anonfield(typs[3])}) + typs[73] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []ir.Node{anonfield(typs[3])}) + typs[74] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []ir.Node{anonfield(typs[3]), anonfield(typs[6])}) + typs[75] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []ir.Node{anonfield(typs[3]), anonfield(typs[6])}) + typs[76] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []ir.Node{anonfield(typs[3]), anonfield(typs[6])}) + typs[77] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, nil) + typs[78] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, nil) + typs[79] = functype(nil, []ir.Node{anonfield(typs[3])}, nil) + typs[80] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67])}, nil) typs[81] = types.NewChan(typs[2], types.Cboth) - typs[82] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[22])}, []*ir.Node{anonfield(typs[81])}) - typs[83] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[15])}, []*ir.Node{anonfield(typs[81])}) + typs[82] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[22])}, []ir.Node{anonfield(typs[81])}) + typs[83] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[15])}, []ir.Node{anonfield(typs[81])}) typs[84] = types.NewChan(typs[2], types.Crecv) - typs[85] = functype(nil, []*ir.Node{anonfield(typs[84]), anonfield(typs[3])}, nil) - typs[86] = functype(nil, []*ir.Node{anonfield(typs[84]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[6])}) + typs[85] = functype(nil, []ir.Node{anonfield(typs[84]), anonfield(typs[3])}, nil) + typs[86] = functype(nil, []ir.Node{anonfield(typs[84]), anonfield(typs[3])}, []ir.Node{anonfield(typs[6])}) typs[87] = types.NewChan(typs[2], types.Csend) - typs[88] = functype(nil, []*ir.Node{anonfield(typs[87]), anonfield(typs[3])}, nil) + typs[88] = functype(nil, []ir.Node{anonfield(typs[87]), anonfield(typs[3])}, nil) typs[89] = types.NewArray(typs[0], 3) - typs[90] = tostruct([]*ir.Node{namedfield("enabled", typs[6]), namedfield("pad", typs[89]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])}) - typs[91] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil) - typs[92] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[3])}, nil) - typs[93] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []*ir.Node{anonfield(typs[15])}) - typs[94] = functype(nil, []*ir.Node{anonfield(typs[87]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[6])}) - typs[95] = functype(nil, []*ir.Node{anonfield(typs[3]), anonfield(typs[84])}, []*ir.Node{anonfield(typs[6])}) + typs[90] = tostruct([]ir.Node{namedfield("enabled", typs[6]), namedfield("pad", typs[89]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])}) + typs[91] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil) + typs[92] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[3])}, nil) + typs[93] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []ir.Node{anonfield(typs[15])}) + typs[94] = functype(nil, []ir.Node{anonfield(typs[87]), anonfield(typs[3])}, []ir.Node{anonfield(typs[6])}) + typs[95] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[84])}, []ir.Node{anonfield(typs[6])}) typs[96] = types.NewPtr(typs[6]) - typs[97] = functype(nil, []*ir.Node{anonfield(typs[3]), anonfield(typs[96]), anonfield(typs[84])}, []*ir.Node{anonfield(typs[6])}) - typs[98] = functype(nil, []*ir.Node{anonfield(typs[63])}, nil) - typs[99] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[63]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []*ir.Node{anonfield(typs[15]), anonfield(typs[6])}) - typs[100] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*ir.Node{anonfield(typs[7])}) - typs[101] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []*ir.Node{anonfield(typs[7])}) - typs[102] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []*ir.Node{anonfield(typs[7])}) + typs[97] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[96]), anonfield(typs[84])}, []ir.Node{anonfield(typs[6])}) + typs[98] = functype(nil, []ir.Node{anonfield(typs[63])}, nil) + typs[99] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[63]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []ir.Node{anonfield(typs[15]), anonfield(typs[6])}) + typs[100] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []ir.Node{anonfield(typs[7])}) + typs[101] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []ir.Node{anonfield(typs[7])}) + typs[102] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []ir.Node{anonfield(typs[7])}) typs[103] = types.NewSlice(typs[2]) - typs[104] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[103]), anonfield(typs[15])}, []*ir.Node{anonfield(typs[103])}) - typs[105] = functype(nil, []*ir.Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil) - typs[106] = functype(nil, []*ir.Node{anonfield(typs[7]), anonfield(typs[5])}, nil) - typs[107] = functype(nil, []*ir.Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []*ir.Node{anonfield(typs[6])}) - typs[108] = functype(nil, []*ir.Node{anonfield(typs[3]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[6])}) - typs[109] = functype(nil, []*ir.Node{anonfield(typs[7]), anonfield(typs[7])}, []*ir.Node{anonfield(typs[6])}) - typs[110] = functype(nil, []*ir.Node{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []*ir.Node{anonfield(typs[5])}) - typs[111] = functype(nil, []*ir.Node{anonfield(typs[7]), anonfield(typs[5])}, []*ir.Node{anonfield(typs[5])}) - typs[112] = functype(nil, []*ir.Node{anonfield(typs[22]), anonfield(typs[22])}, []*ir.Node{anonfield(typs[22])}) - typs[113] = functype(nil, []*ir.Node{anonfield(typs[24]), anonfield(typs[24])}, []*ir.Node{anonfield(typs[24])}) - typs[114] = functype(nil, []*ir.Node{anonfield(typs[20])}, []*ir.Node{anonfield(typs[22])}) - typs[115] = functype(nil, []*ir.Node{anonfield(typs[20])}, []*ir.Node{anonfield(typs[24])}) - typs[116] = functype(nil, []*ir.Node{anonfield(typs[20])}, []*ir.Node{anonfield(typs[65])}) - typs[117] = functype(nil, []*ir.Node{anonfield(typs[22])}, []*ir.Node{anonfield(typs[20])}) - typs[118] = functype(nil, []*ir.Node{anonfield(typs[24])}, []*ir.Node{anonfield(typs[20])}) - typs[119] = functype(nil, []*ir.Node{anonfield(typs[65])}, []*ir.Node{anonfield(typs[20])}) - typs[120] = functype(nil, []*ir.Node{anonfield(typs[26]), anonfield(typs[26])}, []*ir.Node{anonfield(typs[26])}) - typs[121] = functype(nil, []*ir.Node{anonfield(typs[5]), anonfield(typs[5])}, nil) - typs[122] = functype(nil, []*ir.Node{anonfield(typs[7]), anonfield(typs[1]), anonfield(typs[5])}, nil) + typs[104] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[103]), anonfield(typs[15])}, []ir.Node{anonfield(typs[103])}) + typs[105] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil) + typs[106] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[5])}, nil) + typs[107] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []ir.Node{anonfield(typs[6])}) + typs[108] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[3])}, []ir.Node{anonfield(typs[6])}) + typs[109] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[7])}, []ir.Node{anonfield(typs[6])}) + typs[110] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []ir.Node{anonfield(typs[5])}) + typs[111] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[5])}, []ir.Node{anonfield(typs[5])}) + typs[112] = functype(nil, []ir.Node{anonfield(typs[22]), anonfield(typs[22])}, []ir.Node{anonfield(typs[22])}) + typs[113] = functype(nil, []ir.Node{anonfield(typs[24]), anonfield(typs[24])}, []ir.Node{anonfield(typs[24])}) + typs[114] = functype(nil, []ir.Node{anonfield(typs[20])}, []ir.Node{anonfield(typs[22])}) + typs[115] = functype(nil, []ir.Node{anonfield(typs[20])}, []ir.Node{anonfield(typs[24])}) + typs[116] = functype(nil, []ir.Node{anonfield(typs[20])}, []ir.Node{anonfield(typs[65])}) + typs[117] = functype(nil, []ir.Node{anonfield(typs[22])}, []ir.Node{anonfield(typs[20])}) + typs[118] = functype(nil, []ir.Node{anonfield(typs[24])}, []ir.Node{anonfield(typs[20])}) + typs[119] = functype(nil, []ir.Node{anonfield(typs[65])}, []ir.Node{anonfield(typs[20])}) + typs[120] = functype(nil, []ir.Node{anonfield(typs[26]), anonfield(typs[26])}, []ir.Node{anonfield(typs[26])}) + typs[121] = functype(nil, []ir.Node{anonfield(typs[5]), anonfield(typs[5])}, nil) + typs[122] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[1]), anonfield(typs[5])}, nil) typs[123] = types.NewSlice(typs[7]) - typs[124] = functype(nil, []*ir.Node{anonfield(typs[7]), anonfield(typs[123])}, nil) + typs[124] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[123])}, nil) typs[125] = types.Types[types.TUINT8] - typs[126] = functype(nil, []*ir.Node{anonfield(typs[125]), anonfield(typs[125])}, nil) + typs[126] = functype(nil, []ir.Node{anonfield(typs[125]), anonfield(typs[125])}, nil) typs[127] = types.Types[types.TUINT16] - typs[128] = functype(nil, []*ir.Node{anonfield(typs[127]), anonfield(typs[127])}, nil) - typs[129] = functype(nil, []*ir.Node{anonfield(typs[65]), anonfield(typs[65])}, nil) - typs[130] = functype(nil, []*ir.Node{anonfield(typs[24]), anonfield(typs[24])}, nil) + typs[128] = functype(nil, []ir.Node{anonfield(typs[127]), anonfield(typs[127])}, nil) + typs[129] = functype(nil, []ir.Node{anonfield(typs[65]), anonfield(typs[65])}, nil) + typs[130] = functype(nil, []ir.Node{anonfield(typs[24]), anonfield(typs[24])}, nil) return typs[:] } diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index 2dce7b7f03768..2901ae41d6eba 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -13,7 +13,7 @@ import ( "fmt" ) -func (p *noder) funcLit(expr *syntax.FuncLit) *ir.Node { +func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node { xtype := p.typeExpr(expr.Type) ntype := p.typeExpr(expr.Type) @@ -78,7 +78,7 @@ func (p *noder) funcLit(expr *syntax.FuncLit) *ir.Node { // function associated with the closure. // TODO: This creation of the named function should probably really be done in a // separate pass from type-checking. -func typecheckclosure(clo *ir.Node, top int) { +func typecheckclosure(clo ir.Node, top int) { fn := clo.Func() dcl := fn.Decl // Set current associated iota value, so iota can be used inside @@ -140,7 +140,7 @@ var globClosgen int // closurename generates a new unique name for a closure within // outerfunc. -func closurename(outerfunc *ir.Node) *types.Sym { +func closurename(outerfunc ir.Node) *types.Sym { outer := "glob." prefix := "func" gen := &globClosgen @@ -172,7 +172,7 @@ var capturevarscomplete bool // by value or by reference. // We use value capturing for values <= 128 bytes that are never reassigned // after capturing (effectively constant). -func capturevars(dcl *ir.Node) { +func capturevars(dcl ir.Node) { lno := base.Pos base.Pos = dcl.Pos() fn := dcl.Func() @@ -227,7 +227,7 @@ func capturevars(dcl *ir.Node) { // transformclosure is called in a separate phase after escape analysis. // It transform closure bodies to properly reference captured variables. -func transformclosure(dcl *ir.Node) { +func transformclosure(dcl ir.Node) { lno := base.Pos base.Pos = dcl.Pos() fn := dcl.Func() @@ -253,7 +253,7 @@ func transformclosure(dcl *ir.Node) { // We are going to insert captured variables before input args. var params []*types.Field - var decls []*ir.Node + var decls []ir.Node for _, v := range fn.ClosureVars.Slice() { if !v.Name().Byval() { // If v of type T is captured by reference, @@ -284,7 +284,7 @@ func transformclosure(dcl *ir.Node) { dcl.SetType(f.Type()) // update type of ODCLFUNC } else { // The closure is not called, so it is going to stay as closure. - var body []*ir.Node + var body []ir.Node offset := int64(Widthptr) for _, v := range fn.ClosureVars.Slice() { // cv refers to the field inside of closure OSTRUCTLIT. @@ -332,13 +332,13 @@ func transformclosure(dcl *ir.Node) { // hasemptycvars reports whether closure clo has an // empty list of captured vars. -func hasemptycvars(clo *ir.Node) bool { +func hasemptycvars(clo ir.Node) bool { return clo.Func().ClosureVars.Len() == 0 } // closuredebugruntimecheck applies boilerplate checks for debug flags // and compiling runtime -func closuredebugruntimecheck(clo *ir.Node) { +func closuredebugruntimecheck(clo ir.Node) { if base.Debug.Closure > 0 { if clo.Esc() == EscHeap { base.WarnfAt(clo.Pos(), "heap closure, captured vars = %v", clo.Func().ClosureVars) @@ -354,7 +354,7 @@ func closuredebugruntimecheck(clo *ir.Node) { // closureType returns the struct type used to hold all the information // needed in the closure for clo (clo must be a OCLOSURE node). // The address of a variable of the returned type can be cast to a func. -func closureType(clo *ir.Node) *types.Type { +func closureType(clo ir.Node) *types.Type { // Create closure in the form of a composite literal. // supposing the closure captures an int i and a string s // and has one float64 argument and no results, @@ -368,7 +368,7 @@ func closureType(clo *ir.Node) *types.Type { // The information appears in the binary in the form of type descriptors; // the struct is unnamed so that closures in multiple packages with the // same struct type can share the descriptor. - fields := []*ir.Node{ + fields := []ir.Node{ namedfield(".F", types.Types[types.TUINTPTR]), } for _, v := range clo.Func().ClosureVars.Slice() { @@ -383,7 +383,7 @@ func closureType(clo *ir.Node) *types.Type { return typ } -func walkclosure(clo *ir.Node, init *ir.Nodes) *ir.Node { +func walkclosure(clo ir.Node, init *ir.Nodes) ir.Node { fn := clo.Func() // If no closure vars, don't bother wrapping. @@ -399,7 +399,7 @@ func walkclosure(clo *ir.Node, init *ir.Nodes) *ir.Node { clos := ir.Nod(ir.OCOMPLIT, nil, typenod(typ)) clos.SetEsc(clo.Esc()) - clos.PtrList().Set(append([]*ir.Node{ir.Nod(ir.OCFUNC, fn.Nname, nil)}, fn.ClosureEnter.Slice()...)) + clos.PtrList().Set(append([]ir.Node{ir.Nod(ir.OCFUNC, fn.Nname, nil)}, fn.ClosureEnter.Slice()...)) clos = ir.Nod(ir.OADDR, clos, nil) clos.SetEsc(clo.Esc()) @@ -419,7 +419,7 @@ func walkclosure(clo *ir.Node, init *ir.Nodes) *ir.Node { return walkexpr(clos, init) } -func typecheckpartialcall(dot *ir.Node, sym *types.Sym) { +func typecheckpartialcall(dot ir.Node, sym *types.Sym) { switch dot.Op() { case ir.ODOTINTER, ir.ODOTMETH: break @@ -440,7 +440,7 @@ func typecheckpartialcall(dot *ir.Node, sym *types.Sym) { // makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed // for partial calls. -func makepartialcall(dot *ir.Node, t0 *types.Type, meth *types.Sym) *ir.Node { +func makepartialcall(dot ir.Node, t0 *types.Type, meth *types.Sym) ir.Node { rcvrtype := dot.Left().Type() sym := methodSymSuffix(rcvrtype, meth, "-fm") @@ -484,7 +484,7 @@ func makepartialcall(dot *ir.Node, t0 *types.Type, meth *types.Sym) *ir.Node { ptr := NewName(lookup(".this")) declare(ptr, ir.PAUTO) ptr.Name().SetUsed(true) - var body []*ir.Node + var body []ir.Node if rcvrtype.IsPtr() || rcvrtype.IsInterface() { ptr.SetType(rcvrtype) body = append(body, ir.Nod(ir.OAS, ptr, cv)) @@ -522,8 +522,8 @@ func makepartialcall(dot *ir.Node, t0 *types.Type, meth *types.Sym) *ir.Node { // partialCallType returns the struct type used to hold all the information // needed in the closure for n (n must be a OCALLPART node). // The address of a variable of the returned type can be cast to a func. -func partialCallType(n *ir.Node) *types.Type { - t := tostruct([]*ir.Node{ +func partialCallType(n ir.Node) *types.Type { + t := tostruct([]ir.Node{ namedfield("F", types.Types[types.TUINTPTR]), namedfield("R", n.Left().Type()), }) @@ -531,7 +531,7 @@ func partialCallType(n *ir.Node) *types.Type { return t } -func walkpartialcall(n *ir.Node, init *ir.Nodes) *ir.Node { +func walkpartialcall(n ir.Node, init *ir.Nodes) ir.Node { // Create closure in the form of a composite literal. // For x.M with receiver (x) type T, the generated code looks like: // @@ -579,7 +579,7 @@ func walkpartialcall(n *ir.Node, init *ir.Nodes) *ir.Node { // callpartMethod returns the *types.Field representing the method // referenced by method value n. -func callpartMethod(n *ir.Node) *types.Field { +func callpartMethod(n ir.Node) *types.Field { if n.Op() != ir.OCALLPART { base.Fatalf("expected OCALLPART, got %v", n) } diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index 27e54b46c83d4..4beb85245fde7 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -84,8 +84,8 @@ func trunccmplxlit(v constant.Value, t *types.Type) constant.Value { } // TODO(mdempsky): Replace these with better APIs. -func convlit(n *ir.Node, t *types.Type) *ir.Node { return convlit1(n, t, false, nil) } -func defaultlit(n *ir.Node, t *types.Type) *ir.Node { return convlit1(n, t, false, nil) } +func convlit(n ir.Node, t *types.Type) ir.Node { return convlit1(n, t, false, nil) } +func defaultlit(n ir.Node, t *types.Type) ir.Node { return convlit1(n, t, false, nil) } // convlit1 converts an untyped expression n to type t. If n already // has a type, convlit1 has no effect. @@ -98,7 +98,7 @@ func defaultlit(n *ir.Node, t *types.Type) *ir.Node { return convlit1(n, t, fals // // If there's an error converting n to t, context is used in the error // message. -func convlit1(n *ir.Node, t *types.Type, explicit bool, context func() string) *ir.Node { +func convlit1(n ir.Node, t *types.Type, explicit bool, context func() string) ir.Node { if explicit && t == nil { base.Fatalf("explicit conversion missing type") } @@ -438,7 +438,7 @@ var tokenForOp = [...]token.Token{ // If n is not a constant, evalConst returns n. // Otherwise, evalConst returns a new OLITERAL with the same value as n, // and with .Orig pointing back to n. -func evalConst(n *ir.Node) *ir.Node { +func evalConst(n ir.Node) ir.Node { nl, nr := n.Left(), n.Right() // Pick off just the opcodes that can be constant evaluated. @@ -525,7 +525,7 @@ func evalConst(n *ir.Node) *ir.Node { } return origConst(n, constant.MakeString(strings.Join(strs, ""))) } - newList := make([]*ir.Node, 0, need) + newList := make([]ir.Node, 0, need) for i := 0; i < len(s); i++ { if ir.IsConst(s[i], constant.String) && i+1 < len(s) && ir.IsConst(s[i+1], constant.String) { // merge from i up to but not including i2 @@ -619,7 +619,7 @@ var overflowNames = [...]string{ } // origConst returns an OLITERAL with orig n and value v. -func origConst(n *ir.Node, v constant.Value) *ir.Node { +func origConst(n ir.Node, v constant.Value) ir.Node { lno := setlineno(n) v = convertVal(v, n.Type(), false) base.Pos = lno @@ -648,11 +648,11 @@ func origConst(n *ir.Node, v constant.Value) *ir.Node { return n } -func origBoolConst(n *ir.Node, v bool) *ir.Node { +func origBoolConst(n ir.Node, v bool) ir.Node { return origConst(n, constant.MakeBool(v)) } -func origIntConst(n *ir.Node, v int64) *ir.Node { +func origIntConst(n ir.Node, v int64) ir.Node { return origConst(n, constant.MakeInt64(v)) } @@ -662,7 +662,7 @@ func origIntConst(n *ir.Node, v int64) *ir.Node { // force means must assign concrete (non-ideal) type. // The results of defaultlit2 MUST be assigned back to l and r, e.g. // n.Left, n.Right = defaultlit2(n.Left, n.Right, force) -func defaultlit2(l *ir.Node, r *ir.Node, force bool) (*ir.Node, *ir.Node) { +func defaultlit2(l ir.Node, r ir.Node, force bool) (ir.Node, ir.Node) { if l.Type() == nil || r.Type() == nil { return l, r } @@ -747,7 +747,7 @@ func defaultType(t *types.Type) *types.Type { return nil } -func smallintconst(n *ir.Node) bool { +func smallintconst(n ir.Node) bool { if n.Op() == ir.OLITERAL { v, ok := constant.Int64Val(n.Val()) return ok && int64(int32(v)) == v @@ -760,7 +760,7 @@ func smallintconst(n *ir.Node) bool { // If n is not a constant expression, not representable as an // integer, or negative, it returns -1. If n is too large, it // returns -2. -func indexconst(n *ir.Node) int64 { +func indexconst(n ir.Node) int64 { if n.Op() != ir.OLITERAL { return -1 } @@ -783,11 +783,11 @@ func indexconst(n *ir.Node) int64 { // // Expressions derived from nil, like string([]byte(nil)), while they // may be known at compile time, are not Go language constants. -func isGoConst(n *ir.Node) bool { +func isGoConst(n ir.Node) bool { return n.Op() == ir.OLITERAL } -func hascallchan(n *ir.Node) bool { +func hascallchan(n ir.Node) bool { if n == nil { return false } @@ -851,7 +851,7 @@ type constSetKey struct { // where are used in the error message. // // n must not be an untyped constant. -func (s *constSet) add(pos src.XPos, n *ir.Node, what, where string) { +func (s *constSet) add(pos src.XPos, n ir.Node, what, where string) { if n.Op() == ir.OCONVIFACE && n.Implicit() { n = n.Left() } @@ -908,7 +908,7 @@ func (s *constSet) add(pos src.XPos, n *ir.Node, what, where string) { // the latter is non-obvious. // // TODO(mdempsky): This could probably be a fmt.go flag. -func nodeAndVal(n *ir.Node) string { +func nodeAndVal(n ir.Node) string { show := n.String() val := ir.ConstValue(n) if s := fmt.Sprintf("%#v", val); show != s { diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 8980c47e2c39e..2a7be137c0698 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -18,7 +18,7 @@ import ( // Declaration stack & operations -var externdcl []*ir.Node +var externdcl []ir.Node func testdclstack() { if !types.IsDclstackValid() { @@ -59,7 +59,7 @@ var declare_typegen int // declare records that Node n declares symbol n.Sym in the specified // declaration context. -func declare(n *ir.Node, ctxt ir.Class) { +func declare(n ir.Node, ctxt ir.Class) { if ir.IsBlank(n) { return } @@ -128,7 +128,7 @@ func declare(n *ir.Node, ctxt ir.Class) { autoexport(n, ctxt) } -func addvar(n *ir.Node, t *types.Type, ctxt ir.Class) { +func addvar(n ir.Node, t *types.Type, ctxt ir.Class) { if n == nil || n.Sym() == nil || (n.Op() != ir.ONAME && n.Op() != ir.ONONAME) || t == nil { base.Fatalf("addvar: n=%v t=%v nil", n, t) } @@ -140,8 +140,8 @@ func addvar(n *ir.Node, t *types.Type, ctxt ir.Class) { // declare variables from grammar // new_name_list (type | [type] = expr_list) -func variter(vl []*ir.Node, t *ir.Node, el []*ir.Node) []*ir.Node { - var init []*ir.Node +func variter(vl []ir.Node, t ir.Node, el []ir.Node) []ir.Node { + var init []ir.Node doexpr := len(el) > 0 if len(el) == 1 && len(vl) > 1 { @@ -164,7 +164,7 @@ func variter(vl []*ir.Node, t *ir.Node, el []*ir.Node) []*ir.Node { nel := len(el) for _, v := range vl { - var e *ir.Node + var e ir.Node if doexpr { if len(el) == 0 { base.Errorf("assignment mismatch: %d variables but %d values", len(vl), nel) @@ -197,7 +197,7 @@ func variter(vl []*ir.Node, t *ir.Node, el []*ir.Node) []*ir.Node { } // newnoname returns a new ONONAME Node associated with symbol s. -func newnoname(s *types.Sym) *ir.Node { +func newnoname(s *types.Sym) ir.Node { if s == nil { base.Fatalf("newnoname nil") } @@ -208,7 +208,7 @@ func newnoname(s *types.Sym) *ir.Node { } // newfuncnamel generates a new name node for a function or method. -func newfuncnamel(pos src.XPos, s *types.Sym, fn *ir.Func) *ir.Node { +func newfuncnamel(pos src.XPos, s *types.Sym, fn *ir.Func) ir.Node { if fn.Nname != nil { base.Fatalf("newfuncnamel - already have name") } @@ -220,17 +220,17 @@ func newfuncnamel(pos src.XPos, s *types.Sym, fn *ir.Func) *ir.Node { // this generates a new name node for a name // being declared. -func dclname(s *types.Sym) *ir.Node { +func dclname(s *types.Sym) ir.Node { n := NewName(s) n.SetOp(ir.ONONAME) // caller will correct it return n } -func typenod(t *types.Type) *ir.Node { +func typenod(t *types.Type) ir.Node { return typenodl(src.NoXPos, t) } -func typenodl(pos src.XPos, t *types.Type) *ir.Node { +func typenodl(pos src.XPos, t *types.Type) ir.Node { // if we copied another type with *t = *u // then t->nod might be out of date, so // check t->nod->type too @@ -243,15 +243,15 @@ func typenodl(pos src.XPos, t *types.Type) *ir.Node { return ir.AsNode(t.Nod) } -func anonfield(typ *types.Type) *ir.Node { +func anonfield(typ *types.Type) ir.Node { return symfield(nil, typ) } -func namedfield(s string, typ *types.Type) *ir.Node { +func namedfield(s string, typ *types.Type) ir.Node { return symfield(lookup(s), typ) } -func symfield(s *types.Sym, typ *types.Type) *ir.Node { +func symfield(s *types.Sym, typ *types.Type) ir.Node { n := nodSym(ir.ODCLFIELD, nil, s) n.SetType(typ) return n @@ -261,7 +261,7 @@ func symfield(s *types.Sym, typ *types.Type) *ir.Node { // If no such Node currently exists, an ONONAME Node is returned instead. // Automatically creates a new closure variable if the referenced symbol was // declared in a different (containing) function. -func oldname(s *types.Sym) *ir.Node { +func oldname(s *types.Sym) ir.Node { n := ir.AsNode(s.Def) if n == nil { // Maybe a top-level declaration will come along later to @@ -302,7 +302,7 @@ func oldname(s *types.Sym) *ir.Node { } // importName is like oldname, but it reports an error if sym is from another package and not exported. -func importName(sym *types.Sym) *ir.Node { +func importName(sym *types.Sym) ir.Node { n := oldname(sym) if !types.IsExported(sym.Name) && sym.Pkg != ir.LocalPkg { n.SetDiag(true) @@ -312,7 +312,7 @@ func importName(sym *types.Sym) *ir.Node { } // := declarations -func colasname(n *ir.Node) bool { +func colasname(n ir.Node) bool { switch n.Op() { case ir.ONAME, ir.ONONAME, @@ -325,7 +325,7 @@ func colasname(n *ir.Node) bool { return false } -func colasdefn(left []*ir.Node, defn *ir.Node) { +func colasdefn(left []ir.Node, defn ir.Node) { for _, n := range left { if n.Sym() != nil { n.Sym().SetUniq(true) @@ -370,7 +370,7 @@ func colasdefn(left []*ir.Node, defn *ir.Node) { // declare the arguments in an // interface field declaration. -func ifacedcl(n *ir.Node) { +func ifacedcl(n ir.Node) { if n.Op() != ir.ODCLFIELD || n.Left() == nil { base.Fatalf("ifacedcl") } @@ -384,7 +384,7 @@ func ifacedcl(n *ir.Node) { // and declare the arguments. // called in extern-declaration context // returns in auto-declaration context. -func funchdr(n *ir.Node) { +func funchdr(n ir.Node) { // change the declaration context from extern to auto funcStack = append(funcStack, funcStackEnt{Curfn, dclcontext}) Curfn = n @@ -399,7 +399,7 @@ func funchdr(n *ir.Node) { } } -func funcargs(nt *ir.Node) { +func funcargs(nt ir.Node) { if nt.Op() != ir.OTFUNC { base.Fatalf("funcargs %v", nt.Op()) } @@ -449,7 +449,7 @@ func funcargs(nt *ir.Node) { vargen = oldvargen } -func funcarg(n *ir.Node, ctxt ir.Class) { +func funcarg(n ir.Node, ctxt ir.Class) { if n.Op() != ir.ODCLFIELD { base.Fatalf("funcarg %v", n.Op()) } @@ -499,7 +499,7 @@ func funcarg2(f *types.Field, ctxt ir.Class) { var funcStack []funcStackEnt // stack of previous values of Curfn/dclcontext type funcStackEnt struct { - curfn *ir.Node + curfn ir.Node dclcontext ir.Class } @@ -535,7 +535,7 @@ func checkembeddedtype(t *types.Type) { } } -func structfield(n *ir.Node) *types.Field { +func structfield(n ir.Node) *types.Field { lno := base.Pos base.Pos = n.Pos() @@ -582,7 +582,7 @@ func checkdupfields(what string, fss ...[]*types.Field) { // convert a parsed id/type list into // a type for struct/interface/arglist -func tostruct(l []*ir.Node) *types.Type { +func tostruct(l []ir.Node) *types.Type { t := types.New(types.TSTRUCT) fields := make([]*types.Field, len(l)) @@ -604,7 +604,7 @@ func tostruct(l []*ir.Node) *types.Type { return t } -func tofunargs(l []*ir.Node, funarg types.Funarg) *types.Type { +func tofunargs(l []ir.Node, funarg types.Funarg) *types.Type { t := types.New(types.TSTRUCT) t.StructType().Funarg = funarg @@ -632,7 +632,7 @@ func tofunargsfield(fields []*types.Field, funarg types.Funarg) *types.Type { return t } -func interfacefield(n *ir.Node) *types.Field { +func interfacefield(n ir.Node) *types.Field { lno := base.Pos base.Pos = n.Pos() @@ -661,7 +661,7 @@ func interfacefield(n *ir.Node) *types.Field { return f } -func tointerface(l []*ir.Node) *types.Type { +func tointerface(l []ir.Node) *types.Type { if len(l) == 0 { return types.Types[types.TINTER] } @@ -678,7 +678,7 @@ func tointerface(l []*ir.Node) *types.Type { return t } -func fakeRecv() *ir.Node { +func fakeRecv() ir.Node { return anonfield(types.FakeRecvType()) } @@ -694,12 +694,12 @@ func isifacemethod(f *types.Type) bool { } // turn a parsed function declaration into a type -func functype(this *ir.Node, in, out []*ir.Node) *types.Type { +func functype(this ir.Node, in, out []ir.Node) *types.Type { t := types.New(types.TFUNC) - var rcvr []*ir.Node + var rcvr []ir.Node if this != nil { - rcvr = []*ir.Node{this} + rcvr = []ir.Node{this} } t.FuncType().Receiver = tofunargs(rcvr, types.FunargRcvr) t.FuncType().Params = tofunargs(in, types.FunargParams) @@ -799,7 +799,7 @@ func methodSymSuffix(recv *types.Type, msym *types.Sym, suffix string) *types.Sy // - msym is the method symbol // - t is function type (with receiver) // Returns a pointer to the existing or added Field; or nil if there's an error. -func addmethod(n *ir.Node, msym *types.Sym, t *types.Type, local, nointerface bool) *types.Field { +func addmethod(n ir.Node, msym *types.Sym, t *types.Type, local, nointerface bool) *types.Field { if msym == nil { base.Fatalf("no method symbol") } @@ -935,7 +935,7 @@ func makefuncsym(s *types.Sym) { } // setNodeNameFunc marks a node as a function. -func setNodeNameFunc(n *ir.Node) { +func setNodeNameFunc(n ir.Node) { if n.Op() != ir.ONAME || n.Class() != ir.Pxxx { base.Fatalf("expected ONAME/Pxxx node, got %v", n) } @@ -944,7 +944,7 @@ func setNodeNameFunc(n *ir.Node) { n.Sym().SetFunc(true) } -func dclfunc(sym *types.Sym, tfn *ir.Node) *ir.Node { +func dclfunc(sym *types.Sym, tfn ir.Node) ir.Node { if tfn.Op() != ir.OTFUNC { base.Fatalf("expected OTFUNC node, got %v", tfn) } @@ -963,14 +963,14 @@ type nowritebarrierrecChecker struct { // extraCalls contains extra function calls that may not be // visible during later analysis. It maps from the ODCLFUNC of // the caller to a list of callees. - extraCalls map[*ir.Node][]nowritebarrierrecCall + extraCalls map[ir.Node][]nowritebarrierrecCall // curfn is the current function during AST walks. - curfn *ir.Node + curfn ir.Node } type nowritebarrierrecCall struct { - target *ir.Node // ODCLFUNC of caller or callee + target ir.Node // ODCLFUNC of caller or callee lineno src.XPos // line of call } @@ -978,7 +978,7 @@ type nowritebarrierrecCall struct { // must be called before transformclosure and walk. func newNowritebarrierrecChecker() *nowritebarrierrecChecker { c := &nowritebarrierrecChecker{ - extraCalls: make(map[*ir.Node][]nowritebarrierrecCall), + extraCalls: make(map[ir.Node][]nowritebarrierrecCall), } // Find all systemstack calls and record their targets. In @@ -997,7 +997,7 @@ func newNowritebarrierrecChecker() *nowritebarrierrecChecker { return c } -func (c *nowritebarrierrecChecker) findExtraCalls(n *ir.Node) bool { +func (c *nowritebarrierrecChecker) findExtraCalls(n ir.Node) bool { if n.Op() != ir.OCALLFUNC { return true } @@ -1009,7 +1009,7 @@ func (c *nowritebarrierrecChecker) findExtraCalls(n *ir.Node) bool { return true } - var callee *ir.Node + var callee ir.Node arg := n.List().First() switch arg.Op() { case ir.ONAME: @@ -1034,7 +1034,7 @@ func (c *nowritebarrierrecChecker) findExtraCalls(n *ir.Node) bool { // because that's all we know after we start SSA. // // This can be called concurrently for different from Nodes. -func (c *nowritebarrierrecChecker) recordCall(from *ir.Node, to *obj.LSym, pos src.XPos) { +func (c *nowritebarrierrecChecker) recordCall(from ir.Node, to *obj.LSym, pos src.XPos) { if from.Op() != ir.ODCLFUNC { base.Fatalf("expected ODCLFUNC, got %v", from) } @@ -1052,14 +1052,14 @@ func (c *nowritebarrierrecChecker) check() { // capture all calls created by lowering, but this means we // only get to see the obj.LSyms of calls. symToFunc lets us // get back to the ODCLFUNCs. - symToFunc := make(map[*obj.LSym]*ir.Node) + symToFunc := make(map[*obj.LSym]ir.Node) // funcs records the back-edges of the BFS call graph walk. It // maps from the ODCLFUNC of each function that must not have // write barriers to the call that inhibits them. Functions // that are directly marked go:nowritebarrierrec are in this // map with a zero-valued nowritebarrierrecCall. This also // acts as the set of marks for the BFS of the call graph. - funcs := make(map[*ir.Node]nowritebarrierrecCall) + funcs := make(map[ir.Node]nowritebarrierrecCall) // q is the queue of ODCLFUNC Nodes to visit in BFS order. var q ir.NodeQueue @@ -1083,7 +1083,7 @@ func (c *nowritebarrierrecChecker) check() { // Perform a BFS of the call graph from all // go:nowritebarrierrec functions. - enqueue := func(src, target *ir.Node, pos src.XPos) { + enqueue := func(src, target ir.Node, pos src.XPos) { if target.Func().Pragma&ir.Yeswritebarrierrec != 0 { // Don't flow into this function. return diff --git a/src/cmd/compile/internal/gc/embed.go b/src/cmd/compile/internal/gc/embed.go index 03703f68d5fa3..33b05a5bf0889 100644 --- a/src/cmd/compile/internal/gc/embed.go +++ b/src/cmd/compile/internal/gc/embed.go @@ -17,7 +17,7 @@ import ( "strings" ) -var embedlist []*ir.Node +var embedlist []ir.Node const ( embedUnknown = iota @@ -28,7 +28,7 @@ const ( var numLocalEmbed int -func varEmbed(p *noder, names []*ir.Node, typ *ir.Node, exprs []*ir.Node, embeds []PragmaEmbed) (newExprs []*ir.Node) { +func varEmbed(p *noder, names []ir.Node, typ ir.Node, exprs []ir.Node, embeds []PragmaEmbed) (newExprs []ir.Node) { haveEmbed := false for _, decl := range p.file.DeclList { imp, ok := decl.(*syntax.ImportDecl) @@ -118,7 +118,7 @@ func varEmbed(p *noder, names []*ir.Node, typ *ir.Node, exprs []*ir.Node, embeds v.Name().Param.Ntype = typ v.SetClass(ir.PEXTERN) externdcl = append(externdcl, v) - exprs = []*ir.Node{v} + exprs = []ir.Node{v} } v.Name().Param.SetEmbedFiles(list) @@ -130,7 +130,7 @@ func varEmbed(p *noder, names []*ir.Node, typ *ir.Node, exprs []*ir.Node, embeds // The match is approximate because we haven't done scope resolution yet and // can't tell whether "string" and "byte" really mean "string" and "byte". // The result must be confirmed later, after type checking, using embedKind. -func embedKindApprox(typ *ir.Node) int { +func embedKindApprox(typ ir.Node) int { if typ.Sym() != nil && typ.Sym().Name == "FS" && (typ.Sym().Pkg.Path == "embed" || (typ.Sym().Pkg == ir.LocalPkg && base.Ctxt.Pkgpath == "embed")) { return embedFiles } @@ -192,7 +192,7 @@ func dumpembeds() { // initEmbed emits the init data for a //go:embed variable, // which is either a string, a []byte, or an embed.FS. -func initEmbed(v *ir.Node) { +func initEmbed(v ir.Node) { files := v.Name().Param.EmbedFiles() switch kind := embedKind(v.Type()); kind { case embedUnknown: diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index f1786e74dcf10..783bc8c41dd12 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -86,7 +86,7 @@ import ( type Escape struct { allLocs []*EscLocation - curfn *ir.Node + curfn ir.Node // loopDepth counts the current loop nesting depth within // curfn. It increments within each "for" loop and at each @@ -101,8 +101,8 @@ type Escape struct { // An EscLocation represents an abstract location that stores a Go // variable. type EscLocation struct { - n *ir.Node // represented variable or expression, if any - curfn *ir.Node // enclosing function + n ir.Node // represented variable or expression, if any + curfn ir.Node // enclosing function edges []EscEdge // incoming edges loopDepth int // loopDepth at declaration @@ -147,7 +147,7 @@ func init() { } // escFmt is called from node printing to print information about escape analysis results. -func escFmt(n *ir.Node, short bool) string { +func escFmt(n ir.Node, short bool) string { text := "" switch n.Esc() { case EscUnknown: @@ -179,7 +179,7 @@ func escFmt(n *ir.Node, short bool) string { // escapeFuncs performs escape analysis on a minimal batch of // functions. -func escapeFuncs(fns []*ir.Node, recursive bool) { +func escapeFuncs(fns []ir.Node, recursive bool) { for _, fn := range fns { if fn.Op() != ir.ODCLFUNC { base.Fatalf("unexpected node: %v", fn) @@ -202,7 +202,7 @@ func escapeFuncs(fns []*ir.Node, recursive bool) { e.finish(fns) } -func (e *Escape) initFunc(fn *ir.Node) { +func (e *Escape) initFunc(fn ir.Node) { if fn.Op() != ir.ODCLFUNC || fn.Esc() != EscFuncUnknown { base.Fatalf("unexpected node: %v", fn) } @@ -222,11 +222,11 @@ func (e *Escape) initFunc(fn *ir.Node) { } } -func (e *Escape) walkFunc(fn *ir.Node) { +func (e *Escape) walkFunc(fn ir.Node) { fn.SetEsc(EscFuncStarted) // Identify labels that mark the head of an unstructured loop. - ir.InspectList(fn.Body(), func(n *ir.Node) bool { + ir.InspectList(fn.Body(), func(n ir.Node) bool { switch n.Op() { case ir.OLABEL: n.Sym().Label = nonlooping @@ -274,7 +274,7 @@ func (e *Escape) walkFunc(fn *ir.Node) { // } // stmt evaluates a single Go statement. -func (e *Escape) stmt(n *ir.Node) { +func (e *Escape) stmt(n ir.Node) { if n == nil { return } @@ -447,7 +447,7 @@ func (e *Escape) block(l ir.Nodes) { // expr models evaluating an expression n and flowing the result into // hole k. -func (e *Escape) expr(k EscHole, n *ir.Node) { +func (e *Escape) expr(k EscHole, n ir.Node) { if n == nil { return } @@ -455,7 +455,7 @@ func (e *Escape) expr(k EscHole, n *ir.Node) { e.exprSkipInit(k, n) } -func (e *Escape) exprSkipInit(k EscHole, n *ir.Node) { +func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { if n == nil { return } @@ -653,7 +653,7 @@ func (e *Escape) exprSkipInit(k EscHole, n *ir.Node) { // unsafeValue evaluates a uintptr-typed arithmetic expression looking // for conversions from an unsafe.Pointer. -func (e *Escape) unsafeValue(k EscHole, n *ir.Node) { +func (e *Escape) unsafeValue(k EscHole, n ir.Node) { if n.Type().Etype != types.TUINTPTR { base.Fatalf("unexpected type %v for %v", n.Type(), n) } @@ -690,7 +690,7 @@ func (e *Escape) unsafeValue(k EscHole, n *ir.Node) { // discard evaluates an expression n for side-effects, but discards // its value. -func (e *Escape) discard(n *ir.Node) { +func (e *Escape) discard(n ir.Node) { e.expr(e.discardHole(), n) } @@ -702,7 +702,7 @@ func (e *Escape) discards(l ir.Nodes) { // addr evaluates an addressable expression n and returns an EscHole // that represents storing into the represented location. -func (e *Escape) addr(n *ir.Node) EscHole { +func (e *Escape) addr(n ir.Node) EscHole { if n == nil || ir.IsBlank(n) { // Can happen at least in OSELRECV. // TODO(mdempsky): Anywhere else? @@ -751,7 +751,7 @@ func (e *Escape) addrs(l ir.Nodes) []EscHole { } // assign evaluates the assignment dst = src. -func (e *Escape) assign(dst, src *ir.Node, why string, where *ir.Node) { +func (e *Escape) assign(dst, src ir.Node, why string, where ir.Node) { // Filter out some no-op assignments for escape analysis. ignore := dst != nil && src != nil && isSelfAssign(dst, src) if ignore && base.Flag.LowerM != 0 { @@ -769,14 +769,14 @@ func (e *Escape) assign(dst, src *ir.Node, why string, where *ir.Node) { } } -func (e *Escape) assignHeap(src *ir.Node, why string, where *ir.Node) { +func (e *Escape) assignHeap(src ir.Node, why string, where ir.Node) { e.expr(e.heapHole().note(where, why), src) } // call evaluates a call expressions, including builtin calls. ks // should contain the holes representing where the function callee's // results flows; where is the OGO/ODEFER context of the call, if any. -func (e *Escape) call(ks []EscHole, call, where *ir.Node) { +func (e *Escape) call(ks []EscHole, call, where ir.Node) { topLevelDefer := where != nil && where.Op() == ir.ODEFER && e.loopDepth == 1 if topLevelDefer { // force stack allocation of defer record, unless @@ -784,7 +784,7 @@ func (e *Escape) call(ks []EscHole, call, where *ir.Node) { where.SetEsc(EscNever) } - argument := func(k EscHole, arg *ir.Node) { + argument := func(k EscHole, arg ir.Node) { if topLevelDefer { // Top level defers arguments don't escape to // heap, but they do need to last until end of @@ -805,7 +805,7 @@ func (e *Escape) call(ks []EscHole, call, where *ir.Node) { fixVariadicCall(call) // Pick out the function callee, if statically known. - var fn *ir.Node + var fn ir.Node switch call.Op() { case ir.OCALLFUNC: switch v := staticValue(call.Left()); { @@ -894,7 +894,7 @@ func (e *Escape) call(ks []EscHole, call, where *ir.Node) { // ks should contain the holes representing where the function // callee's results flows. fn is the statically-known callee function, // if any. -func (e *Escape) tagHole(ks []EscHole, fn *ir.Node, param *types.Field) EscHole { +func (e *Escape) tagHole(ks []EscHole, fn ir.Node, param *types.Field) EscHole { // If this is a dynamic call, we can't rely on param.Note. if fn == nil { return e.heapHole() @@ -935,7 +935,7 @@ func (e *Escape) tagHole(ks []EscHole, fn *ir.Node, param *types.Field) EscHole // fn has not yet been analyzed, so its parameters and results // should be incorporated directly into the flow graph instead of // relying on its escape analysis tagging. -func (e *Escape) inMutualBatch(fn *ir.Node) bool { +func (e *Escape) inMutualBatch(fn ir.Node) bool { if fn.Name().Defn != nil && fn.Name().Defn.Esc() < EscFuncTagged { if fn.Name().Defn.Esc() == EscFuncUnknown { base.Fatalf("graph inconsistency") @@ -960,11 +960,11 @@ type EscHole struct { type EscNote struct { next *EscNote - where *ir.Node + where ir.Node why string } -func (k EscHole) note(where *ir.Node, why string) EscHole { +func (k EscHole) note(where ir.Node, why string) EscHole { if where == nil || why == "" { base.Fatalf("note: missing where/why") } @@ -986,10 +986,10 @@ func (k EscHole) shift(delta int) EscHole { return k } -func (k EscHole) deref(where *ir.Node, why string) EscHole { return k.shift(1).note(where, why) } -func (k EscHole) addr(where *ir.Node, why string) EscHole { return k.shift(-1).note(where, why) } +func (k EscHole) deref(where ir.Node, why string) EscHole { return k.shift(1).note(where, why) } +func (k EscHole) addr(where ir.Node, why string) EscHole { return k.shift(-1).note(where, why) } -func (k EscHole) dotType(t *types.Type, where *ir.Node, why string) EscHole { +func (k EscHole) dotType(t *types.Type, where ir.Node, why string) EscHole { if !t.IsInterface() && !isdirectiface(t) { k = k.shift(1) } @@ -1026,7 +1026,7 @@ func (e *Escape) teeHole(ks ...EscHole) EscHole { return loc.asHole() } -func (e *Escape) dcl(n *ir.Node) EscHole { +func (e *Escape) dcl(n ir.Node) EscHole { loc := e.oldLoc(n) loc.loopDepth = e.loopDepth return loc.asHole() @@ -1035,7 +1035,7 @@ func (e *Escape) dcl(n *ir.Node) EscHole { // spill allocates a new location associated with expression n, flows // its address to k, and returns a hole that flows values to it. It's // intended for use with most expressions that allocate storage. -func (e *Escape) spill(k EscHole, n *ir.Node) EscHole { +func (e *Escape) spill(k EscHole, n ir.Node) EscHole { loc := e.newLoc(n, true) e.flow(k.addr(n, "spill"), loc) return loc.asHole() @@ -1052,7 +1052,7 @@ func (e *Escape) later(k EscHole) EscHole { // canonicalNode returns the canonical *Node that n logically // represents. -func canonicalNode(n *ir.Node) *ir.Node { +func canonicalNode(n ir.Node) ir.Node { if n != nil && n.Op() == ir.ONAME && n.Name().IsClosureVar() { n = n.Name().Defn if n.Name().IsClosureVar() { @@ -1063,7 +1063,7 @@ func canonicalNode(n *ir.Node) *ir.Node { return n } -func (e *Escape) newLoc(n *ir.Node, transient bool) *EscLocation { +func (e *Escape) newLoc(n ir.Node, transient bool) *EscLocation { if e.curfn == nil { base.Fatalf("e.curfn isn't set") } @@ -1096,7 +1096,7 @@ func (e *Escape) newLoc(n *ir.Node, transient bool) *EscLocation { return loc } -func (e *Escape) oldLoc(n *ir.Node) *EscLocation { +func (e *Escape) oldLoc(n ir.Node) *EscLocation { n = canonicalNode(n) return n.Opt().(*EscLocation) } @@ -1394,7 +1394,7 @@ func (e *Escape) outlives(l, other *EscLocation) bool { } // containsClosure reports whether c is a closure contained within f. -func containsClosure(f, c *ir.Node) bool { +func containsClosure(f, c ir.Node) bool { if f.Op() != ir.ODCLFUNC || c.Op() != ir.ODCLFUNC { base.Fatalf("bad containsClosure: %v, %v", f, c) } @@ -1429,7 +1429,7 @@ func (l *EscLocation) leakTo(sink *EscLocation, derefs int) { l.paramEsc.AddHeap(derefs) } -func (e *Escape) finish(fns []*ir.Node) { +func (e *Escape) finish(fns []ir.Node) { // Record parameter tags for package export data. for _, fn := range fns { fn.SetEsc(EscFuncTagged) @@ -1574,7 +1574,7 @@ func ParseLeaks(s string) EscLeaks { return l } -func escapes(all []*ir.Node) { +func escapes(all []ir.Node) { visitBottomUp(all, escapeFuncs) } @@ -1607,7 +1607,7 @@ const ( ) // funcSym returns fn.Func.Nname.Sym if no nils are encountered along the way. -func funcSym(fn *ir.Node) *types.Sym { +func funcSym(fn ir.Node) *types.Sym { if fn == nil || fn.Func().Nname == nil { return nil } @@ -1622,7 +1622,7 @@ var ( nonlooping = ir.Nod(ir.OXXX, nil, nil) ) -func isSliceSelfAssign(dst, src *ir.Node) bool { +func isSliceSelfAssign(dst, src ir.Node) bool { // Detect the following special case. // // func (b *Buffer) Foo() { @@ -1672,7 +1672,7 @@ func isSliceSelfAssign(dst, src *ir.Node) bool { // isSelfAssign reports whether assignment from src to dst can // be ignored by the escape analysis as it's effectively a self-assignment. -func isSelfAssign(dst, src *ir.Node) bool { +func isSelfAssign(dst, src ir.Node) bool { if isSliceSelfAssign(dst, src) { return true } @@ -1709,7 +1709,7 @@ func isSelfAssign(dst, src *ir.Node) bool { // mayAffectMemory reports whether evaluation of n may affect the program's // memory state. If the expression can't affect memory state, then it can be // safely ignored by the escape analysis. -func mayAffectMemory(n *ir.Node) bool { +func mayAffectMemory(n ir.Node) bool { // We may want to use a list of "memory safe" ops instead of generally // "side-effect free", which would include all calls and other ops that can // allocate or change global state. For now, it's safer to start with the latter. @@ -1736,7 +1736,7 @@ func mayAffectMemory(n *ir.Node) bool { // heapAllocReason returns the reason the given Node must be heap // allocated, or the empty string if it doesn't. -func heapAllocReason(n *ir.Node) string { +func heapAllocReason(n ir.Node) string { if n.Type() == nil { return "" } @@ -1781,7 +1781,7 @@ func heapAllocReason(n *ir.Node) string { // by "increasing" the "value" of n.Esc to EscHeap. // Storage is allocated as necessary to allow the address // to be taken. -func addrescapes(n *ir.Node) { +func addrescapes(n ir.Node) { switch n.Op() { default: // Unexpected Op, probably due to a previous type error. Ignore. @@ -1847,7 +1847,7 @@ func addrescapes(n *ir.Node) { } // moveToHeap records the parameter or local variable n as moved to the heap. -func moveToHeap(n *ir.Node) { +func moveToHeap(n ir.Node) { if base.Flag.LowerR != 0 { ir.Dump("MOVE", n) } @@ -1939,7 +1939,7 @@ const unsafeUintptrTag = "unsafe-uintptr" // marked go:uintptrescapes. const uintptrEscapesTag = "uintptr-escapes" -func (e *Escape) paramTag(fn *ir.Node, narg int, f *types.Field) string { +func (e *Escape) paramTag(fn ir.Node, narg int, f *types.Field) string { name := func() string { if f.Sym != nil { return f.Sym.Name diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index ace461fc90fec..10033793bf7a7 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -21,10 +21,10 @@ func exportf(bout *bio.Writer, format string, args ...interface{}) { } } -var asmlist []*ir.Node +var asmlist []ir.Node // exportsym marks n for export (or reexport). -func exportsym(n *ir.Node) { +func exportsym(n ir.Node) { if n.Sym().OnExportList() { return } @@ -41,7 +41,7 @@ func initname(s string) bool { return s == "init" } -func autoexport(n *ir.Node, ctxt ir.Class) { +func autoexport(n ir.Node, ctxt ir.Class) { if n.Sym().Pkg != ir.LocalPkg { return } @@ -74,7 +74,7 @@ func dumpexport(bout *bio.Writer) { } } -func importsym(ipkg *types.Pkg, s *types.Sym, op ir.Op) *ir.Node { +func importsym(ipkg *types.Pkg, s *types.Sym, op ir.Op) ir.Node { n := ir.AsNode(s.PkgDef()) if n == nil { // iimport should have created a stub ONONAME @@ -120,7 +120,7 @@ func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *types.Type { // importobj declares symbol s as an imported object representable by op. // ipkg is the package being imported -func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class, t *types.Type) *ir.Node { +func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class, t *types.Type) ir.Node { n := importsym(ipkg, s, op) if n.Op() != ir.ONONAME { if n.Op() == op && (n.Class() != ctxt || !types.Identical(n.Type(), t)) { diff --git a/src/cmd/compile/internal/gc/gen.go b/src/cmd/compile/internal/gc/gen.go index a89ff528e5266..44e918f2c1794 100644 --- a/src/cmd/compile/internal/gc/gen.go +++ b/src/cmd/compile/internal/gc/gen.go @@ -30,13 +30,13 @@ func sysvar(name string) *obj.LSym { // isParamStackCopy reports whether this is the on-stack copy of a // function parameter that moved to the heap. -func isParamStackCopy(n *ir.Node) bool { +func isParamStackCopy(n ir.Node) bool { return n.Op() == ir.ONAME && (n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Name().Param.Heapaddr != nil } // isParamHeapCopy reports whether this is the on-heap copy of // a function parameter that moved to the heap. -func isParamHeapCopy(n *ir.Node) bool { +func isParamHeapCopy(n ir.Node) bool { return n.Op() == ir.ONAME && n.Class() == ir.PAUTOHEAP && n.Name().Param.Stackcopy != nil } @@ -52,7 +52,7 @@ func autotmpname(n int) string { } // make a new Node off the books -func tempAt(pos src.XPos, curfn *ir.Node, t *types.Type) *ir.Node { +func tempAt(pos src.XPos, curfn ir.Node, t *types.Type) ir.Node { if curfn == nil { base.Fatalf("no curfn for tempAt") } @@ -83,6 +83,6 @@ func tempAt(pos src.XPos, curfn *ir.Node, t *types.Type) *ir.Node { return n.Orig() } -func temp(t *types.Type) *ir.Node { +func temp(t *types.Type) ir.Node { return tempAt(base.Pos, Curfn, t) } diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index 8642cc4a30566..84e6bc5faf388 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -128,11 +128,11 @@ var ( iscmp [ir.OEND]bool ) -var xtop []*ir.Node +var xtop []ir.Node -var exportlist []*ir.Node +var exportlist []ir.Node -var importlist []*ir.Node // imported functions and methods with inlinable bodies +var importlist []ir.Node // imported functions and methods with inlinable bodies var ( funcsymsmu sync.Mutex // protects funcsyms and associated package lookups (see func funcsym) @@ -141,7 +141,7 @@ var ( var dclcontext ir.Class // PEXTERN/PAUTO -var Curfn *ir.Node +var Curfn ir.Node var Widthptr int @@ -156,7 +156,7 @@ var instrumenting bool // Whether we are tracking lexical scopes for DWARF. var trackScopes bool -var nodfp *ir.Node +var nodfp ir.Node var autogeneratedPos src.XPos @@ -193,7 +193,7 @@ var thearch Arch var ( staticuint64s, - zerobase *ir.Node + zerobase ir.Node assertE2I, assertE2I2, diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go index 3416a00cd17cf..950033a8a3189 100644 --- a/src/cmd/compile/internal/gc/gsubr.go +++ b/src/cmd/compile/internal/gc/gsubr.go @@ -47,7 +47,7 @@ type Progs struct { next *obj.Prog // next Prog pc int64 // virtual PC; count of Progs pos src.XPos // position to use for new Progs - curfn *ir.Node // fn these Progs are for + curfn ir.Node // fn these Progs are for progcache []obj.Prog // local progcache cacheidx int // first free element of progcache @@ -57,7 +57,7 @@ type Progs struct { // newProgs returns a new Progs for fn. // worker indicates which of the backend workers will use the Progs. -func newProgs(fn *ir.Node, worker int) *Progs { +func newProgs(fn ir.Node, worker int) *Progs { pp := new(Progs) if base.Ctxt.CanReuseProgs() { sz := len(sharedProgArray) / base.Flag.LowerC @@ -174,7 +174,7 @@ func (pp *Progs) Appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16 return q } -func (pp *Progs) settext(fn *ir.Node) { +func (pp *Progs) settext(fn ir.Node) { if pp.Text != nil { base.Fatalf("Progs.settext called twice") } @@ -290,7 +290,7 @@ func initLSym(f *ir.Func, hasBody bool) { base.Ctxt.InitTextSym(f.LSym, flag) } -func ggloblnod(nam *ir.Node) { +func ggloblnod(nam ir.Node) { s := nam.Sym().Linksym() s.Gotype = ngotype(nam).Linksym() flags := 0 diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index 281e2de43dd0d..ef52e40f21678 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -259,8 +259,8 @@ func iexport(out *bufio.Writer) { p := iexporter{ allPkgs: map[*types.Pkg]bool{}, stringIndex: map[string]uint64{}, - declIndex: map[*ir.Node]uint64{}, - inlineIndex: map[*ir.Node]uint64{}, + declIndex: map[ir.Node]uint64{}, + inlineIndex: map[ir.Node]uint64{}, typIndex: map[*types.Type]uint64{}, } @@ -314,9 +314,9 @@ func iexport(out *bufio.Writer) { // we're writing out the main index, which is also read by // non-compiler tools and includes a complete package description // (i.e., name and height). -func (w *exportWriter) writeIndex(index map[*ir.Node]uint64, mainIndex bool) { +func (w *exportWriter) writeIndex(index map[ir.Node]uint64, mainIndex bool) { // Build a map from packages to objects from that package. - pkgObjs := map[*types.Pkg][]*ir.Node{} + pkgObjs := map[*types.Pkg][]ir.Node{} // For the main index, make sure to include every package that // we reference, even if we're not exporting (or reexporting) @@ -374,8 +374,8 @@ type iexporter struct { stringIndex map[string]uint64 data0 intWriter - declIndex map[*ir.Node]uint64 - inlineIndex map[*ir.Node]uint64 + declIndex map[ir.Node]uint64 + inlineIndex map[ir.Node]uint64 typIndex map[*types.Type]uint64 } @@ -394,7 +394,7 @@ func (p *iexporter) stringOff(s string) uint64 { } // pushDecl adds n to the declaration work queue, if not already present. -func (p *iexporter) pushDecl(n *ir.Node) { +func (p *iexporter) pushDecl(n ir.Node) { if n.Sym() == nil || ir.AsNode(n.Sym().Def) != n && n.Op() != ir.OTYPE { base.Fatalf("weird Sym: %v, %v", n, n.Sym()) } @@ -423,7 +423,7 @@ type exportWriter struct { prevColumn int64 } -func (p *iexporter) doDecl(n *ir.Node) { +func (p *iexporter) doDecl(n ir.Node) { w := p.newWriter() w.setPkg(n.Sym().Pkg, false) @@ -515,7 +515,7 @@ func (w *exportWriter) tag(tag byte) { w.data.WriteByte(tag) } -func (p *iexporter) doInline(f *ir.Node) { +func (p *iexporter) doInline(f ir.Node) { w := p.newWriter() w.setPkg(fnpkg(f), false) @@ -570,7 +570,7 @@ func (w *exportWriter) pkg(pkg *types.Pkg) { w.string(pkg.Path) } -func (w *exportWriter) qualifiedIdent(n *ir.Node) { +func (w *exportWriter) qualifiedIdent(n ir.Node) { // Ensure any referenced declarations are written out too. w.p.pushDecl(n) @@ -955,12 +955,12 @@ func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) } // Compiler-specific extensions. -func (w *exportWriter) varExt(n *ir.Node) { +func (w *exportWriter) varExt(n ir.Node) { w.linkname(n.Sym()) w.symIdx(n.Sym()) } -func (w *exportWriter) funcExt(n *ir.Node) { +func (w *exportWriter) funcExt(n ir.Node) { w.linkname(n.Sym()) w.symIdx(n.Sym()) @@ -1037,7 +1037,7 @@ func (w *exportWriter) stmtList(list ir.Nodes) { w.op(ir.OEND) } -func (w *exportWriter) node(n *ir.Node) { +func (w *exportWriter) node(n ir.Node) { if ir.OpPrec[n.Op()] < 0 { w.stmt(n) } else { @@ -1047,7 +1047,7 @@ func (w *exportWriter) node(n *ir.Node) { // Caution: stmt will emit more than one node for statement nodes n that have a non-empty // n.Ninit and where n cannot have a natural init section (such as in "if", "for", etc.). -func (w *exportWriter) stmt(n *ir.Node) { +func (w *exportWriter) stmt(n ir.Node) { if n.Init().Len() > 0 && !ir.StmtWithInit(n.Op()) { // can't use stmtList here since we don't want the final OEND for _, n := range n.Init().Slice() { @@ -1095,7 +1095,7 @@ func (w *exportWriter) stmt(n *ir.Node) { w.op(ir.OAS2) w.pos(n.Pos()) w.exprList(n.List()) - w.exprList(ir.AsNodes([]*ir.Node{n.Right()})) + w.exprList(ir.AsNodes([]ir.Node{n.Right()})) case ir.ORETURN: w.op(ir.ORETURN) @@ -1164,7 +1164,7 @@ func (w *exportWriter) stmt(n *ir.Node) { } } -func (w *exportWriter) caseList(sw *ir.Node) { +func (w *exportWriter) caseList(sw ir.Node) { namedTypeSwitch := sw.Op() == ir.OSWITCH && sw.Left() != nil && sw.Left().Op() == ir.OTYPESW && sw.Left().Left() != nil cases := sw.List().Slice() @@ -1189,7 +1189,7 @@ func (w *exportWriter) exprList(list ir.Nodes) { w.op(ir.OEND) } -func (w *exportWriter) expr(n *ir.Node) { +func (w *exportWriter) expr(n ir.Node) { // from nodefmt (fmt.go) // // nodefmt reverts nodes back to their original - we don't need to do @@ -1430,7 +1430,7 @@ func (w *exportWriter) op(op ir.Op) { w.uint64(uint64(op)) } -func (w *exportWriter) exprsOrNil(a, b *ir.Node) { +func (w *exportWriter) exprsOrNil(a, b ir.Node) { ab := 0 if a != nil { ab |= 1 @@ -1455,7 +1455,7 @@ func (w *exportWriter) elemList(list ir.Nodes) { } } -func (w *exportWriter) localName(n *ir.Node) { +func (w *exportWriter) localName(n ir.Node) { // Escape analysis happens after inline bodies are saved, but // we're using the same ONAME nodes, so we might still see // PAUTOHEAP here. diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 5d845d90e84dc..77078c118a8d3 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -41,7 +41,7 @@ var ( inlineImporter = map[*types.Sym]iimporterAndOffset{} ) -func expandDecl(n *ir.Node) { +func expandDecl(n ir.Node) { if n.Op() != ir.ONONAME { return } @@ -55,7 +55,7 @@ func expandDecl(n *ir.Node) { r.doDecl(n) } -func expandInline(fn *ir.Node) { +func expandInline(fn ir.Node) { if fn.Func().Inl.Body != nil { return } @@ -68,7 +68,7 @@ func expandInline(fn *ir.Node) { r.doInline(fn) } -func importReaderFor(n *ir.Node, importers map[*types.Sym]iimporterAndOffset) *importReader { +func importReaderFor(n ir.Node, importers map[*types.Sym]iimporterAndOffset) *importReader { x, ok := importers[n.Sym()] if !ok { return nil @@ -281,7 +281,7 @@ func (r *importReader) setPkg() { r.currPkg = r.pkg() } -func (r *importReader) doDecl(n *ir.Node) { +func (r *importReader) doDecl(n ir.Node) { if n.Op() != ir.ONONAME { base.Fatalf("doDecl: unexpected Op for %v: %v", n.Sym(), n.Op()) } @@ -635,12 +635,12 @@ func (r *importReader) byte() byte { // Compiler-specific extensions. -func (r *importReader) varExt(n *ir.Node) { +func (r *importReader) varExt(n ir.Node) { r.linkname(n.Sym()) r.symIdx(n.Sym()) } -func (r *importReader) funcExt(n *ir.Node) { +func (r *importReader) funcExt(n ir.Node) { r.linkname(n.Sym()) r.symIdx(n.Sym()) @@ -695,7 +695,7 @@ func (r *importReader) typeExt(t *types.Type) { // so we can use index to reference the symbol. var typeSymIdx = make(map[*types.Type][2]int64) -func (r *importReader) doInline(n *ir.Node) { +func (r *importReader) doInline(n ir.Node) { if len(n.Func().Inl.Body) != 0 { base.Fatalf("%v already has inline body", n) } @@ -710,7 +710,7 @@ func (r *importReader) doInline(n *ir.Node) { // (not doing so can cause significant performance // degradation due to unnecessary calls to empty // functions). - body = []*ir.Node{} + body = []ir.Node{} } n.Func().Inl.Body = body @@ -740,8 +740,8 @@ func (r *importReader) doInline(n *ir.Node) { // unrefined nodes (since this is what the importer uses). The respective case // entries are unreachable in the importer. -func (r *importReader) stmtList() []*ir.Node { - var list []*ir.Node +func (r *importReader) stmtList() []ir.Node { + var list []ir.Node for { n := r.node() if n == nil { @@ -758,10 +758,10 @@ func (r *importReader) stmtList() []*ir.Node { return list } -func (r *importReader) caseList(sw *ir.Node) []*ir.Node { +func (r *importReader) caseList(sw ir.Node) []ir.Node { namedTypeSwitch := sw.Op() == ir.OSWITCH && sw.Left() != nil && sw.Left().Op() == ir.OTYPESW && sw.Left().Left() != nil - cases := make([]*ir.Node, r.uint64()) + cases := make([]ir.Node, r.uint64()) for i := range cases { cas := ir.NodAt(r.pos(), ir.OCASE, nil, nil) cas.PtrList().Set(r.stmtList()) @@ -780,8 +780,8 @@ func (r *importReader) caseList(sw *ir.Node) []*ir.Node { return cases } -func (r *importReader) exprList() []*ir.Node { - var list []*ir.Node +func (r *importReader) exprList() []ir.Node { + var list []ir.Node for { n := r.expr() if n == nil { @@ -792,7 +792,7 @@ func (r *importReader) exprList() []*ir.Node { return list } -func (r *importReader) expr() *ir.Node { +func (r *importReader) expr() ir.Node { n := r.node() if n != nil && n.Op() == ir.OBLOCK { base.Fatalf("unexpected block node: %v", n) @@ -801,7 +801,7 @@ func (r *importReader) expr() *ir.Node { } // TODO(gri) split into expr and stmt -func (r *importReader) node() *ir.Node { +func (r *importReader) node() ir.Node { switch op := r.op(); op { // expressions // case OPAREN: @@ -814,7 +814,7 @@ func (r *importReader) node() *ir.Node { pos := r.pos() typ := r.typ() - var n *ir.Node + var n ir.Node if typ.HasNil() { n = nodnil() } else { @@ -906,7 +906,7 @@ func (r *importReader) node() *ir.Node { case ir.OSLICE, ir.OSLICE3: n := ir.NodAt(r.pos(), op, r.expr(), nil) low, high := r.exprsOrNil() - var max *ir.Node + var max ir.Node if n.Op().IsSlice3() { max = r.expr() } @@ -970,7 +970,7 @@ func (r *importReader) node() *ir.Node { pos := r.pos() lhs := npos(pos, dclname(r.ident())) typ := typenod(r.typ()) - return npos(pos, liststmt(variter([]*ir.Node{lhs}, typ, nil))) // TODO(gri) avoid list creation + return npos(pos, liststmt(variter([]ir.Node{lhs}, typ, nil))) // TODO(gri) avoid list creation // case ODCLFIELD: // unimplemented @@ -1082,9 +1082,9 @@ func (r *importReader) op() ir.Op { return ir.Op(r.uint64()) } -func (r *importReader) elemList() []*ir.Node { +func (r *importReader) elemList() []ir.Node { c := r.uint64() - list := make([]*ir.Node, c) + list := make([]ir.Node, c) for i := range list { s := r.ident() list[i] = nodSym(ir.OSTRUCTKEY, r.expr(), s) @@ -1092,7 +1092,7 @@ func (r *importReader) elemList() []*ir.Node { return list } -func (r *importReader) exprsOrNil() (a, b *ir.Node) { +func (r *importReader) exprsOrNil() (a, b ir.Node) { ab := r.uint64() if ab&1 != 0 { a = r.expr() diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go index 02a6175c6bf9d..2b7ecd1d05d84 100644 --- a/src/cmd/compile/internal/gc/init.go +++ b/src/cmd/compile/internal/gc/init.go @@ -33,7 +33,7 @@ func renameinit() *types.Sym { // 1) Initialize all of the packages the current package depends on. // 2) Initialize all the variables that have initializers. // 3) Run any init functions. -func fninit(n []*ir.Node) { +func fninit(n []ir.Node) { nf := initOrder(n) var deps []*obj.LSym // initTask records for packages the current package depends on diff --git a/src/cmd/compile/internal/gc/initorder.go b/src/cmd/compile/internal/gc/initorder.go index 71da72f0cfe93..1003f131b8fc6 100644 --- a/src/cmd/compile/internal/gc/initorder.go +++ b/src/cmd/compile/internal/gc/initorder.go @@ -64,7 +64,7 @@ const ( type InitOrder struct { // blocking maps initialization assignments to the assignments // that depend on it. - blocking map[*ir.Node][]*ir.Node + blocking map[ir.Node][]ir.Node // ready is the queue of Pending initialization assignments // that are ready for initialization. @@ -75,13 +75,13 @@ type InitOrder struct { // package-level declarations (in declaration order) and outputs the // corresponding list of statements to include in the init() function // body. -func initOrder(l []*ir.Node) []*ir.Node { +func initOrder(l []ir.Node) []ir.Node { s := InitSchedule{ - initplans: make(map[*ir.Node]*InitPlan), - inittemps: make(map[*ir.Node]*ir.Node), + initplans: make(map[ir.Node]*InitPlan), + inittemps: make(map[ir.Node]ir.Node), } o := InitOrder{ - blocking: make(map[*ir.Node][]*ir.Node), + blocking: make(map[ir.Node][]ir.Node), } // Process all package-level assignment in declaration order. @@ -110,7 +110,7 @@ func initOrder(l []*ir.Node) []*ir.Node { // first. base.ExitIfErrors() - findInitLoopAndExit(firstLHS(n), new([]*ir.Node)) + findInitLoopAndExit(firstLHS(n), new([]ir.Node)) base.Fatalf("initialization unfinished, but failed to identify loop") } } @@ -125,7 +125,7 @@ func initOrder(l []*ir.Node) []*ir.Node { return s.out } -func (o *InitOrder) processAssign(n *ir.Node) { +func (o *InitOrder) processAssign(n ir.Node) { if n.Initorder() != InitNotStarted || n.Offset() != types.BADWIDTH { base.Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Offset()) } @@ -154,9 +154,9 @@ func (o *InitOrder) processAssign(n *ir.Node) { // flushReady repeatedly applies initialize to the earliest (in // declaration order) assignment ready for initialization and updates // the inverse dependency ("blocking") graph. -func (o *InitOrder) flushReady(initialize func(*ir.Node)) { +func (o *InitOrder) flushReady(initialize func(ir.Node)) { for o.ready.Len() != 0 { - n := heap.Pop(&o.ready).(*ir.Node) + n := heap.Pop(&o.ready).(ir.Node) if n.Initorder() != InitPending || n.Offset() != 0 { base.Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Offset()) } @@ -183,7 +183,7 @@ func (o *InitOrder) flushReady(initialize func(*ir.Node)) { // path points to a slice used for tracking the sequence of // variables/functions visited. Using a pointer to a slice allows the // slice capacity to grow and limit reallocations. -func findInitLoopAndExit(n *ir.Node, path *[]*ir.Node) { +func findInitLoopAndExit(n ir.Node, path *[]ir.Node) { // We implement a simple DFS loop-finding algorithm. This // could be faster, but initialization cycles are rare. @@ -196,7 +196,7 @@ func findInitLoopAndExit(n *ir.Node, path *[]*ir.Node) { // There might be multiple loops involving n; by sorting // references, we deterministically pick the one reported. - refers := collectDeps(n.Name().Defn, false).Sorted(func(ni, nj *ir.Node) bool { + refers := collectDeps(n.Name().Defn, false).Sorted(func(ni, nj ir.Node) bool { return ni.Pos().Before(nj.Pos()) }) @@ -215,7 +215,7 @@ func findInitLoopAndExit(n *ir.Node, path *[]*ir.Node) { // reportInitLoopAndExit reports and initialization loop as an error // and exits. However, if l is not actually an initialization loop, it // simply returns instead. -func reportInitLoopAndExit(l []*ir.Node) { +func reportInitLoopAndExit(l []ir.Node) { // Rotate loop so that the earliest variable declaration is at // the start. i := -1 @@ -250,7 +250,7 @@ func reportInitLoopAndExit(l []*ir.Node) { // variables that declaration n depends on. If transitive is true, // then it also includes the transitive dependencies of any depended // upon functions (but not variables). -func collectDeps(n *ir.Node, transitive bool) ir.NodeSet { +func collectDeps(n ir.Node, transitive bool) ir.NodeSet { d := initDeps{transitive: transitive} switch n.Op() { case ir.OAS: @@ -270,12 +270,12 @@ type initDeps struct { seen ir.NodeSet } -func (d *initDeps) inspect(n *ir.Node) { ir.Inspect(n, d.visit) } +func (d *initDeps) inspect(n ir.Node) { ir.Inspect(n, d.visit) } func (d *initDeps) inspectList(l ir.Nodes) { ir.InspectList(l, d.visit) } // visit calls foundDep on any package-level functions or variables // referenced by n, if any. -func (d *initDeps) visit(n *ir.Node) bool { +func (d *initDeps) visit(n ir.Node) bool { switch n.Op() { case ir.OMETHEXPR: d.foundDep(methodExprName(n)) @@ -299,7 +299,7 @@ func (d *initDeps) visit(n *ir.Node) bool { // foundDep records that we've found a dependency on n by adding it to // seen. -func (d *initDeps) foundDep(n *ir.Node) { +func (d *initDeps) foundDep(n ir.Node) { // Can happen with method expressions involving interface // types; e.g., fixedbugs/issue4495.go. if n == nil { @@ -328,7 +328,7 @@ func (d *initDeps) foundDep(n *ir.Node) { // an OAS node's Pos may not be unique. For example, given the // declaration "var a, b = f(), g()", "a" must be ordered before "b", // but both OAS nodes use the "=" token's position as their Pos. -type declOrder []*ir.Node +type declOrder []ir.Node func (s declOrder) Len() int { return len(s) } func (s declOrder) Less(i, j int) bool { @@ -336,7 +336,7 @@ func (s declOrder) Less(i, j int) bool { } func (s declOrder) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s *declOrder) Push(x interface{}) { *s = append(*s, x.(*ir.Node)) } +func (s *declOrder) Push(x interface{}) { *s = append(*s, x.(ir.Node)) } func (s *declOrder) Pop() interface{} { n := (*s)[len(*s)-1] *s = (*s)[:len(*s)-1] @@ -345,7 +345,7 @@ func (s *declOrder) Pop() interface{} { // firstLHS returns the first expression on the left-hand side of // assignment n. -func firstLHS(n *ir.Node) *ir.Node { +func firstLHS(n ir.Node) ir.Node { switch n.Op() { case ir.OAS: return n.Left() diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index f82c1282657a4..6310762c1f4d2 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -53,7 +53,7 @@ const ( // Get the function's package. For ordinary functions it's on the ->sym, but for imported methods // the ->sym can be re-used in the local package, so peel it off the receiver's type. -func fnpkg(fn *ir.Node) *types.Pkg { +func fnpkg(fn ir.Node) *types.Pkg { if ir.IsMethod(fn) { // method rcvr := fn.Type().Recv().Type @@ -73,7 +73,7 @@ func fnpkg(fn *ir.Node) *types.Pkg { // Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck // because they're a copy of an already checked body. -func typecheckinl(fn *ir.Node) { +func typecheckinl(fn ir.Node) { lno := setlineno(fn) expandInline(fn) @@ -111,7 +111,7 @@ func typecheckinl(fn *ir.Node) { // Caninl determines whether fn is inlineable. // If so, caninl saves fn->nbody in fn->inl and substitutes it with a copy. // fn and ->nbody will already have been typechecked. -func caninl(fn *ir.Node) { +func caninl(fn ir.Node) { if fn.Op() != ir.ODCLFUNC { base.Fatalf("caninl %v", fn) } @@ -207,7 +207,7 @@ func caninl(fn *ir.Node) { visitor := hairyVisitor{ budget: inlineMaxBudget, extraCallCost: cc, - usedLocals: make(map[*ir.Node]bool), + usedLocals: make(map[ir.Node]bool), } if visitor.visitList(fn.Body()) { reason = visitor.reason @@ -236,7 +236,7 @@ func caninl(fn *ir.Node) { // inlFlood marks n's inline body for export and recursively ensures // all called functions are marked too. -func inlFlood(n *ir.Node) { +func inlFlood(n ir.Node) { if n == nil { return } @@ -260,7 +260,7 @@ func inlFlood(n *ir.Node) { // Recursively identify all referenced functions for // reexport. We want to include even non-called functions, // because after inlining they might be callable. - ir.InspectList(ir.AsNodes(n.Func().Inl.Body), func(n *ir.Node) bool { + ir.InspectList(ir.AsNodes(n.Func().Inl.Body), func(n ir.Node) bool { switch n.Op() { case ir.OMETHEXPR: inlFlood(methodExprName(n)) @@ -300,7 +300,7 @@ type hairyVisitor struct { budget int32 reason string extraCallCost int32 - usedLocals map[*ir.Node]bool + usedLocals map[ir.Node]bool } // Look for anything we want to punt on. @@ -313,7 +313,7 @@ func (v *hairyVisitor) visitList(ll ir.Nodes) bool { return false } -func (v *hairyVisitor) visit(n *ir.Node) bool { +func (v *hairyVisitor) visit(n ir.Node) bool { if n == nil { return false } @@ -447,15 +447,15 @@ func (v *hairyVisitor) visit(n *ir.Node) bool { // inlcopylist (together with inlcopy) recursively copies a list of nodes, except // that it keeps the same ONAME, OTYPE, and OLITERAL nodes. It is used for copying // the body and dcls of an inlineable function. -func inlcopylist(ll []*ir.Node) []*ir.Node { - s := make([]*ir.Node, 0, len(ll)) +func inlcopylist(ll []ir.Node) []ir.Node { + s := make([]ir.Node, 0, len(ll)) for _, n := range ll { s = append(s, inlcopy(n)) } return s } -func inlcopy(n *ir.Node) *ir.Node { +func inlcopy(n ir.Node) ir.Node { if n == nil { return nil } @@ -479,7 +479,7 @@ func inlcopy(n *ir.Node) *ir.Node { return m } -func countNodes(n *ir.Node) int { +func countNodes(n ir.Node) int { if n == nil { return 0 } @@ -503,7 +503,7 @@ func countNodes(n *ir.Node) int { // Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any // calls made to inlineable functions. This is the external entry point. -func inlcalls(fn *ir.Node) { +func inlcalls(fn ir.Node) { savefn := Curfn Curfn = fn maxCost := int32(inlineMaxBudget) @@ -516,7 +516,7 @@ func inlcalls(fn *ir.Node) { // but allow inlining if there is a recursion cycle of many functions. // Most likely, the inlining will stop before we even hit the beginning of // the cycle again, but the map catches the unusual case. - inlMap := make(map[*ir.Node]bool) + inlMap := make(map[ir.Node]bool) fn = inlnode(fn, maxCost, inlMap) if fn != Curfn { base.Fatalf("inlnode replaced curfn") @@ -525,7 +525,7 @@ func inlcalls(fn *ir.Node) { } // Turn an OINLCALL into a statement. -func inlconv2stmt(n *ir.Node) { +func inlconv2stmt(n ir.Node) { n.SetOp(ir.OBLOCK) // n->ninit stays @@ -538,7 +538,7 @@ func inlconv2stmt(n *ir.Node) { // Turn an OINLCALL into a single valued expression. // The result of inlconv2expr MUST be assigned back to n, e.g. // n.Left = inlconv2expr(n.Left) -func inlconv2expr(n *ir.Node) *ir.Node { +func inlconv2expr(n ir.Node) ir.Node { r := n.Rlist().First() return addinit(r, append(n.Init().Slice(), n.Body().Slice()...)) } @@ -548,7 +548,7 @@ func inlconv2expr(n *ir.Node) *ir.Node { // containing the inlined statements on the first list element so // order will be preserved Used in return, oas2func and call // statements. -func inlconv2list(n *ir.Node) []*ir.Node { +func inlconv2list(n ir.Node) []ir.Node { if n.Op() != ir.OINLCALL || n.Rlist().Len() == 0 { base.Fatalf("inlconv2list %+v\n", n) } @@ -558,7 +558,7 @@ func inlconv2list(n *ir.Node) []*ir.Node { return s } -func inlnodelist(l ir.Nodes, maxCost int32, inlMap map[*ir.Node]bool) { +func inlnodelist(l ir.Nodes, maxCost int32, inlMap map[ir.Node]bool) { s := l.Slice() for i := range s { s[i] = inlnode(s[i], maxCost, inlMap) @@ -578,7 +578,7 @@ func inlnodelist(l ir.Nodes, maxCost int32, inlMap map[*ir.Node]bool) { // shorter and less complicated. // The result of inlnode MUST be assigned back to n, e.g. // n.Left = inlnode(n.Left) -func inlnode(n *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node { +func inlnode(n ir.Node, maxCost int32, inlMap map[ir.Node]bool) ir.Node { if n == nil { return n } @@ -707,7 +707,7 @@ func inlnode(n *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node { // inlCallee takes a function-typed expression and returns the underlying function ONAME // that it refers to if statically known. Otherwise, it returns nil. -func inlCallee(fn *ir.Node) *ir.Node { +func inlCallee(fn ir.Node) ir.Node { fn = staticValue(fn) switch { case fn.Op() == ir.OMETHEXPR: @@ -729,7 +729,7 @@ func inlCallee(fn *ir.Node) *ir.Node { return nil } -func staticValue(n *ir.Node) *ir.Node { +func staticValue(n ir.Node) ir.Node { for { if n.Op() == ir.OCONVNOP { n = n.Left() @@ -747,7 +747,7 @@ func staticValue(n *ir.Node) *ir.Node { // staticValue1 implements a simple SSA-like optimization. If n is a local variable // that is initialized and never reassigned, staticValue1 returns the initializer // expression. Otherwise, it returns nil. -func staticValue1(n *ir.Node) *ir.Node { +func staticValue1(n ir.Node) ir.Node { if n.Op() != ir.ONAME || n.Class() != ir.PAUTO || n.Name().Addrtaken() { return nil } @@ -757,7 +757,7 @@ func staticValue1(n *ir.Node) *ir.Node { return nil } - var rhs *ir.Node + var rhs ir.Node FindRHS: switch defn.Op() { case ir.OAS: @@ -791,7 +791,7 @@ FindRHS: // useful for -m output documenting the reason for inhibited optimizations. // NB: global variables are always considered to be re-assigned. // TODO: handle initial declaration not including an assignment and followed by a single assignment? -func reassigned(n *ir.Node) (bool, *ir.Node) { +func reassigned(n ir.Node) (bool, ir.Node) { if n.Op() != ir.ONAME { base.Fatalf("reassigned %v", n) } @@ -814,10 +814,10 @@ func reassigned(n *ir.Node) (bool, *ir.Node) { } type reassignVisitor struct { - name *ir.Node + name ir.Node } -func (v *reassignVisitor) visit(n *ir.Node) *ir.Node { +func (v *reassignVisitor) visit(n ir.Node) ir.Node { if n == nil { return nil } @@ -854,7 +854,7 @@ func (v *reassignVisitor) visit(n *ir.Node) *ir.Node { return nil } -func (v *reassignVisitor) visitList(l ir.Nodes) *ir.Node { +func (v *reassignVisitor) visitList(l ir.Nodes) ir.Node { for _, n := range l.Slice() { if a := v.visit(n); a != nil { return a @@ -863,7 +863,7 @@ func (v *reassignVisitor) visitList(l ir.Nodes) *ir.Node { return nil } -func inlParam(t *types.Field, as *ir.Node, inlvars map[*ir.Node]*ir.Node) *ir.Node { +func inlParam(t *types.Field, as ir.Node, inlvars map[ir.Node]ir.Node) ir.Node { n := ir.AsNode(t.Nname) if n == nil || ir.IsBlank(n) { return ir.BlankNode @@ -887,7 +887,7 @@ var inlgen int // parameters. // The result of mkinlcall MUST be assigned back to n, e.g. // n.Left = mkinlcall(n.Left, fn, isddd) -func mkinlcall(n, fn *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node { +func mkinlcall(n, fn ir.Node, maxCost int32, inlMap map[ir.Node]bool) ir.Node { if fn.Func().Inl == nil { if logopt.Enabled() { logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(Curfn), @@ -969,10 +969,10 @@ func mkinlcall(n, fn *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node } // Make temp names to use instead of the originals. - inlvars := make(map[*ir.Node]*ir.Node) + inlvars := make(map[ir.Node]ir.Node) // record formals/locals for later post-processing - var inlfvars []*ir.Node + var inlfvars []ir.Node // Handle captured variables when inlining closures. if fn.Name().Defn != nil { @@ -1040,7 +1040,7 @@ func mkinlcall(n, fn *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node } nreturns := 0 - ir.InspectList(ir.AsNodes(fn.Func().Inl.Body), func(n *ir.Node) bool { + ir.InspectList(ir.AsNodes(fn.Func().Inl.Body), func(n ir.Node) bool { if n != nil && n.Op() == ir.ORETURN { nreturns++ } @@ -1053,9 +1053,9 @@ func mkinlcall(n, fn *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node delayretvars := nreturns == 1 // temporaries for return values. - var retvars []*ir.Node + var retvars []ir.Node for i, t := range fn.Type().Results().Fields().Slice() { - var m *ir.Node + var m ir.Node if n := ir.AsNode(t.Nname); n != nil && !ir.IsBlank(n) && !strings.HasPrefix(n.Sym().Name, "~r") { m = inlvar(n) m = typecheck(m, ctxExpr) @@ -1093,7 +1093,7 @@ func mkinlcall(n, fn *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node // For non-dotted calls to variadic functions, we assign the // variadic parameter's temp name separately. - var vas *ir.Node + var vas ir.Node if recv := fn.Type().Recv(); recv != nil { as.PtrList().Append(inlParam(recv, as, inlvars)) @@ -1228,7 +1228,7 @@ func mkinlcall(n, fn *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node // Every time we expand a function we generate a new set of tmpnames, // PAUTO's in the calling functions, and link them off of the // PPARAM's, PAUTOS and PPARAMOUTs of the called function. -func inlvar(var_ *ir.Node) *ir.Node { +func inlvar(var_ ir.Node) ir.Node { if base.Flag.LowerM > 3 { fmt.Printf("inlvar %+v\n", var_) } @@ -1245,7 +1245,7 @@ func inlvar(var_ *ir.Node) *ir.Node { } // Synthesize a variable to store the inlined function's results in. -func retvar(t *types.Field, i int) *ir.Node { +func retvar(t *types.Field, i int) ir.Node { n := NewName(lookupN("~R", i)) n.SetType(t.Type) n.SetClass(ir.PAUTO) @@ -1257,7 +1257,7 @@ func retvar(t *types.Field, i int) *ir.Node { // Synthesize a variable to store the inlined function's arguments // when they come from a multiple return call. -func argvar(t *types.Type, i int) *ir.Node { +func argvar(t *types.Type, i int) ir.Node { n := NewName(lookupN("~arg", i)) n.SetType(t.Elem()) n.SetClass(ir.PAUTO) @@ -1274,13 +1274,13 @@ type inlsubst struct { retlabel *types.Sym // Temporary result variables. - retvars []*ir.Node + retvars []ir.Node // Whether result variables should be initialized at the // "return" statement. delayretvars bool - inlvars map[*ir.Node]*ir.Node + inlvars map[ir.Node]ir.Node // bases maps from original PosBase to PosBase with an extra // inlined call frame. @@ -1292,8 +1292,8 @@ type inlsubst struct { } // list inlines a list of nodes. -func (subst *inlsubst) list(ll ir.Nodes) []*ir.Node { - s := make([]*ir.Node, 0, ll.Len()) +func (subst *inlsubst) list(ll ir.Nodes) []ir.Node { + s := make([]ir.Node, 0, ll.Len()) for _, n := range ll.Slice() { s = append(s, subst.node(n)) } @@ -1304,7 +1304,7 @@ func (subst *inlsubst) list(ll ir.Nodes) []*ir.Node { // inlined function, substituting references to input/output // parameters with ones to the tmpnames, and substituting returns with // assignments to the output. -func (subst *inlsubst) node(n *ir.Node) *ir.Node { +func (subst *inlsubst) node(n ir.Node) ir.Node { if n == nil { return nil } @@ -1409,8 +1409,8 @@ func (subst *inlsubst) updatedPos(xpos src.XPos) src.XPos { return base.Ctxt.PosTable.XPos(pos) } -func pruneUnusedAutos(ll []*ir.Node, vis *hairyVisitor) []*ir.Node { - s := make([]*ir.Node, 0, len(ll)) +func pruneUnusedAutos(ll []ir.Node, vis *hairyVisitor) []ir.Node { + s := make([]ir.Node, 0, len(ll)) for _, n := range ll { if n.Class() == ir.PAUTO { if _, found := vis.usedLocals[n]; !found { @@ -1424,9 +1424,9 @@ func pruneUnusedAutos(ll []*ir.Node, vis *hairyVisitor) []*ir.Node { // devirtualize replaces interface method calls within fn with direct // concrete-type method calls where applicable. -func devirtualize(fn *ir.Node) { +func devirtualize(fn ir.Node) { Curfn = fn - ir.InspectList(fn.Body(), func(n *ir.Node) bool { + ir.InspectList(fn.Body(), func(n ir.Node) bool { if n.Op() == ir.OCALLINTER { devirtualizeCall(n) } @@ -1434,7 +1434,7 @@ func devirtualize(fn *ir.Node) { }) } -func devirtualizeCall(call *ir.Node) { +func devirtualizeCall(call ir.Node) { recv := staticValue(call.Left().Left()) if recv.Op() != ir.OCONVIFACE { return diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index a7d605f3ba66b..30ee57c02daa2 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -330,7 +330,7 @@ func Main(archInit func(*Arch)) { if base.Flag.LowerL != 0 { // Find functions that can be inlined and clone them before walk expands them. - visitBottomUp(xtop, func(list []*ir.Node, recursive bool) { + visitBottomUp(xtop, func(list []ir.Node, recursive bool) { numfns := numNonClosures(list) for _, n := range list { if !recursive || numfns > 1 { @@ -481,7 +481,7 @@ func Main(archInit func(*Arch)) { } // numNonClosures returns the number of functions in list which are not closures. -func numNonClosures(list []*ir.Node) int { +func numNonClosures(list []ir.Node) int { count := 0 for _, n := range list { if n.Func().OClosure == nil { diff --git a/src/cmd/compile/internal/gc/mkbuiltin.go b/src/cmd/compile/internal/gc/mkbuiltin.go index 8fa6d02f2c2c0..d763f1ebee8e9 100644 --- a/src/cmd/compile/internal/gc/mkbuiltin.go +++ b/src/cmd/compile/internal/gc/mkbuiltin.go @@ -207,7 +207,7 @@ func (i *typeInterner) fields(fl *ast.FieldList, keepNames bool) string { } } } - return fmt.Sprintf("[]*ir.Node{%s}", strings.Join(res, ", ")) + return fmt.Sprintf("[]ir.Node{%s}", strings.Join(res, ", ")) } func intconst(e ast.Expr) int64 { diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index d9642f4b672f9..950d50904795d 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -152,7 +152,7 @@ type noder struct { lastCloseScopePos syntax.Pos } -func (p *noder) funcBody(fn *ir.Node, block *syntax.BlockStmt) { +func (p *noder) funcBody(fn ir.Node, block *syntax.BlockStmt) { oldScope := p.scope p.scope = 0 funchdr(fn) @@ -160,7 +160,7 @@ func (p *noder) funcBody(fn *ir.Node, block *syntax.BlockStmt) { if block != nil { body := p.stmts(block.List) if body == nil { - body = []*ir.Node{ir.Nod(ir.OEMPTY, nil, nil)} + body = []ir.Node{ir.Nod(ir.OEMPTY, nil, nil)} } fn.PtrBody().Set(body) @@ -294,7 +294,7 @@ func (p *noder) node() { clearImports() } -func (p *noder) decls(decls []syntax.Decl) (l []*ir.Node) { +func (p *noder) decls(decls []syntax.Decl) (l []ir.Node) { var cs constState for _, decl := range decls { @@ -378,11 +378,11 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) { my.Block = 1 // at top level } -func (p *noder) varDecl(decl *syntax.VarDecl) []*ir.Node { +func (p *noder) varDecl(decl *syntax.VarDecl) []ir.Node { names := p.declNames(decl.NameList) typ := p.typeExprOrNil(decl.Type) - var exprs []*ir.Node + var exprs []ir.Node if decl.Values != nil { exprs = p.exprList(decl.Values) } @@ -414,12 +414,12 @@ func (p *noder) varDecl(decl *syntax.VarDecl) []*ir.Node { // constant declarations are handled correctly (e.g., issue 15550). type constState struct { group *syntax.Group - typ *ir.Node - values []*ir.Node + typ ir.Node + values []ir.Node iota int64 } -func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []*ir.Node { +func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []ir.Node { if decl.Group == nil || decl.Group != cs.group { *cs = constState{ group: decl.Group, @@ -433,7 +433,7 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []*ir.Node { names := p.declNames(decl.NameList) typ := p.typeExprOrNil(decl.Type) - var values []*ir.Node + var values []ir.Node if decl.Values != nil { values = p.exprList(decl.Values) cs.typ, cs.values = typ, values @@ -444,7 +444,7 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []*ir.Node { typ, values = cs.typ, cs.values } - nn := make([]*ir.Node, 0, len(names)) + nn := make([]ir.Node, 0, len(names)) for i, n := range names { if i >= len(values) { base.Errorf("missing value in const declaration") @@ -474,7 +474,7 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []*ir.Node { return nn } -func (p *noder) typeDecl(decl *syntax.TypeDecl) *ir.Node { +func (p *noder) typeDecl(decl *syntax.TypeDecl) ir.Node { n := p.declName(decl.Name) n.SetOp(ir.OTYPE) declare(n, dclcontext) @@ -500,21 +500,21 @@ func (p *noder) typeDecl(decl *syntax.TypeDecl) *ir.Node { return nod } -func (p *noder) declNames(names []*syntax.Name) []*ir.Node { - nodes := make([]*ir.Node, 0, len(names)) +func (p *noder) declNames(names []*syntax.Name) []ir.Node { + nodes := make([]ir.Node, 0, len(names)) for _, name := range names { nodes = append(nodes, p.declName(name)) } return nodes } -func (p *noder) declName(name *syntax.Name) *ir.Node { +func (p *noder) declName(name *syntax.Name) ir.Node { n := dclname(p.name(name)) n.SetPos(p.pos(name)) return n } -func (p *noder) funcDecl(fun *syntax.FuncDecl) *ir.Node { +func (p *noder) funcDecl(fun *syntax.FuncDecl) ir.Node { name := p.name(fun.Name) t := p.signature(fun.Recv, fun.Type) f := p.nod(fun, ir.ODCLFUNC, nil, nil) @@ -580,7 +580,7 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) *ir.Node { return f } -func (p *noder) signature(recv *syntax.Field, typ *syntax.FuncType) *ir.Node { +func (p *noder) signature(recv *syntax.Field, typ *syntax.FuncType) ir.Node { n := p.nod(typ, ir.OTFUNC, nil, nil) if recv != nil { n.SetLeft(p.param(recv, false, false)) @@ -590,8 +590,8 @@ func (p *noder) signature(recv *syntax.Field, typ *syntax.FuncType) *ir.Node { return n } -func (p *noder) params(params []*syntax.Field, dddOk bool) []*ir.Node { - nodes := make([]*ir.Node, 0, len(params)) +func (p *noder) params(params []*syntax.Field, dddOk bool) []ir.Node { + nodes := make([]ir.Node, 0, len(params)) for i, param := range params { p.setlineno(param) nodes = append(nodes, p.param(param, dddOk, i+1 == len(params))) @@ -599,7 +599,7 @@ func (p *noder) params(params []*syntax.Field, dddOk bool) []*ir.Node { return nodes } -func (p *noder) param(param *syntax.Field, dddOk, final bool) *ir.Node { +func (p *noder) param(param *syntax.Field, dddOk, final bool) ir.Node { var name *types.Sym if param.Name != nil { name = p.name(param.Name) @@ -633,22 +633,22 @@ func (p *noder) param(param *syntax.Field, dddOk, final bool) *ir.Node { return n } -func (p *noder) exprList(expr syntax.Expr) []*ir.Node { +func (p *noder) exprList(expr syntax.Expr) []ir.Node { if list, ok := expr.(*syntax.ListExpr); ok { return p.exprs(list.ElemList) } - return []*ir.Node{p.expr(expr)} + return []ir.Node{p.expr(expr)} } -func (p *noder) exprs(exprs []syntax.Expr) []*ir.Node { - nodes := make([]*ir.Node, 0, len(exprs)) +func (p *noder) exprs(exprs []syntax.Expr) []ir.Node { + nodes := make([]ir.Node, 0, len(exprs)) for _, expr := range exprs { nodes = append(nodes, p.expr(expr)) } return nodes } -func (p *noder) expr(expr syntax.Expr) *ir.Node { +func (p *noder) expr(expr syntax.Expr) ir.Node { p.setlineno(expr) switch expr := expr.(type) { case nil, *syntax.BadExpr: @@ -699,7 +699,7 @@ func (p *noder) expr(expr syntax.Expr) *ir.Node { op = ir.OSLICE3 } n := p.nod(expr, op, p.expr(expr.X), nil) - var index [3]*ir.Node + var index [3]ir.Node for i, x := range &expr.Index { if x != nil { index[i] = p.expr(x) @@ -725,7 +725,7 @@ func (p *noder) expr(expr syntax.Expr) *ir.Node { return n case *syntax.ArrayType: - var len *ir.Node + var len ir.Node if expr.Len != nil { len = p.expr(expr.Len) } else { @@ -765,7 +765,7 @@ func (p *noder) expr(expr syntax.Expr) *ir.Node { // sum efficiently handles very large summation expressions (such as // in issue #16394). In particular, it avoids left recursion and // collapses string literals. -func (p *noder) sum(x syntax.Expr) *ir.Node { +func (p *noder) sum(x syntax.Expr) ir.Node { // While we need to handle long sums with asymptotic // efficiency, the vast majority of sums are very small: ~95% // have only 2 or 3 operands, and ~99% of string literals are @@ -800,7 +800,7 @@ func (p *noder) sum(x syntax.Expr) *ir.Node { // handle correctly. For now, we avoid these problems by // treating named string constants the same as non-constant // operands. - var nstr *ir.Node + var nstr ir.Node chunks := make([]string, 0, 1) n := p.expr(x) @@ -838,12 +838,12 @@ func (p *noder) sum(x syntax.Expr) *ir.Node { return n } -func (p *noder) typeExpr(typ syntax.Expr) *ir.Node { +func (p *noder) typeExpr(typ syntax.Expr) ir.Node { // TODO(mdempsky): Be stricter? typecheck should handle errors anyway. return p.expr(typ) } -func (p *noder) typeExprOrNil(typ syntax.Expr) *ir.Node { +func (p *noder) typeExprOrNil(typ syntax.Expr) ir.Node { if typ != nil { return p.expr(typ) } @@ -862,11 +862,11 @@ func (p *noder) chanDir(dir syntax.ChanDir) types.ChanDir { panic("unhandled ChanDir") } -func (p *noder) structType(expr *syntax.StructType) *ir.Node { - l := make([]*ir.Node, 0, len(expr.FieldList)) +func (p *noder) structType(expr *syntax.StructType) ir.Node { + l := make([]ir.Node, 0, len(expr.FieldList)) for i, field := range expr.FieldList { p.setlineno(field) - var n *ir.Node + var n ir.Node if field.Name == nil { n = p.embedded(field.Type) } else { @@ -884,11 +884,11 @@ func (p *noder) structType(expr *syntax.StructType) *ir.Node { return n } -func (p *noder) interfaceType(expr *syntax.InterfaceType) *ir.Node { - l := make([]*ir.Node, 0, len(expr.MethodList)) +func (p *noder) interfaceType(expr *syntax.InterfaceType) ir.Node { + l := make([]ir.Node, 0, len(expr.MethodList)) for _, method := range expr.MethodList { p.setlineno(method) - var n *ir.Node + var n ir.Node if method.Name == nil { n = p.nodSym(method, ir.ODCLFIELD, importName(p.packname(method.Type)), nil) } else { @@ -934,7 +934,7 @@ func (p *noder) packname(expr syntax.Expr) *types.Sym { panic(fmt.Sprintf("unexpected packname: %#v", expr)) } -func (p *noder) embedded(typ syntax.Expr) *ir.Node { +func (p *noder) embedded(typ syntax.Expr) ir.Node { op, isStar := typ.(*syntax.Operation) if isStar { if op.Op != syntax.Mul || op.Y != nil { @@ -953,12 +953,12 @@ func (p *noder) embedded(typ syntax.Expr) *ir.Node { return n } -func (p *noder) stmts(stmts []syntax.Stmt) []*ir.Node { +func (p *noder) stmts(stmts []syntax.Stmt) []ir.Node { return p.stmtsFall(stmts, false) } -func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []*ir.Node { - var nodes []*ir.Node +func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []ir.Node { + var nodes []ir.Node for i, stmt := range stmts { s := p.stmtFall(stmt, fallOK && i+1 == len(stmts)) if s == nil { @@ -971,11 +971,11 @@ func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []*ir.Node { return nodes } -func (p *noder) stmt(stmt syntax.Stmt) *ir.Node { +func (p *noder) stmt(stmt syntax.Stmt) ir.Node { return p.stmtFall(stmt, false) } -func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) *ir.Node { +func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node { p.setlineno(stmt) switch stmt := stmt.(type) { case *syntax.EmptyStmt: @@ -1053,7 +1053,7 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) *ir.Node { } return p.nod(stmt, op, p.expr(stmt.Call), nil) case *syntax.ReturnStmt: - var results []*ir.Node + var results []ir.Node if stmt.Results != nil { results = p.exprList(stmt.Results) } @@ -1085,7 +1085,7 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) *ir.Node { panic("unhandled Stmt") } -func (p *noder) assignList(expr syntax.Expr, defn *ir.Node, colas bool) []*ir.Node { +func (p *noder) assignList(expr syntax.Expr, defn ir.Node, colas bool) []ir.Node { if !colas { return p.exprList(expr) } @@ -1099,7 +1099,7 @@ func (p *noder) assignList(expr syntax.Expr, defn *ir.Node, colas bool) []*ir.No exprs = []syntax.Expr{expr} } - res := make([]*ir.Node, len(exprs)) + res := make([]ir.Node, len(exprs)) seen := make(map[*types.Sym]bool, len(exprs)) newOrErr := false @@ -1145,14 +1145,14 @@ func (p *noder) assignList(expr syntax.Expr, defn *ir.Node, colas bool) []*ir.No return res } -func (p *noder) blockStmt(stmt *syntax.BlockStmt) []*ir.Node { +func (p *noder) blockStmt(stmt *syntax.BlockStmt) []ir.Node { p.openScope(stmt.Pos()) nodes := p.stmts(stmt.List) p.closeScope(stmt.Rbrace) return nodes } -func (p *noder) ifStmt(stmt *syntax.IfStmt) *ir.Node { +func (p *noder) ifStmt(stmt *syntax.IfStmt) ir.Node { p.openScope(stmt.Pos()) n := p.nod(stmt, ir.OIF, nil, nil) if stmt.Init != nil { @@ -1174,9 +1174,9 @@ func (p *noder) ifStmt(stmt *syntax.IfStmt) *ir.Node { return n } -func (p *noder) forStmt(stmt *syntax.ForStmt) *ir.Node { +func (p *noder) forStmt(stmt *syntax.ForStmt) ir.Node { p.openScope(stmt.Pos()) - var n *ir.Node + var n ir.Node if r, ok := stmt.Init.(*syntax.RangeClause); ok { if stmt.Cond != nil || stmt.Post != nil { panic("unexpected RangeClause") @@ -1203,7 +1203,7 @@ func (p *noder) forStmt(stmt *syntax.ForStmt) *ir.Node { return n } -func (p *noder) switchStmt(stmt *syntax.SwitchStmt) *ir.Node { +func (p *noder) switchStmt(stmt *syntax.SwitchStmt) ir.Node { p.openScope(stmt.Pos()) n := p.nod(stmt, ir.OSWITCH, nil, nil) if stmt.Init != nil { @@ -1223,8 +1223,8 @@ func (p *noder) switchStmt(stmt *syntax.SwitchStmt) *ir.Node { return n } -func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.Node, rbrace syntax.Pos) []*ir.Node { - nodes := make([]*ir.Node, 0, len(clauses)) +func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch ir.Node, rbrace syntax.Pos) []ir.Node { + nodes := make([]ir.Node, 0, len(clauses)) for i, clause := range clauses { p.setlineno(clause) if i > 0 { @@ -1273,14 +1273,14 @@ func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.Node, rbra return nodes } -func (p *noder) selectStmt(stmt *syntax.SelectStmt) *ir.Node { +func (p *noder) selectStmt(stmt *syntax.SelectStmt) ir.Node { n := p.nod(stmt, ir.OSELECT, nil, nil) n.PtrList().Set(p.commClauses(stmt.Body, stmt.Rbrace)) return n } -func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []*ir.Node { - nodes := make([]*ir.Node, 0, len(clauses)) +func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []ir.Node { + nodes := make([]ir.Node, 0, len(clauses)) for i, clause := range clauses { p.setlineno(clause) if i > 0 { @@ -1301,16 +1301,16 @@ func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []* return nodes } -func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) *ir.Node { +func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) ir.Node { lhs := p.nodSym(label, ir.OLABEL, nil, p.name(label.Label)) - var ls *ir.Node + var ls ir.Node if label.Stmt != nil { // TODO(mdempsky): Should always be present. ls = p.stmtFall(label.Stmt, fallOK) } lhs.Name().Defn = ls - l := []*ir.Node{lhs} + l := []ir.Node{lhs} if ls != nil { if ls.Op() == ir.OBLOCK && ls.Init().Len() == 0 { l = append(l, ls.List().Slice()...) @@ -1443,12 +1443,12 @@ func (p *noder) name(name *syntax.Name) *types.Sym { return lookup(name.Value) } -func (p *noder) mkname(name *syntax.Name) *ir.Node { +func (p *noder) mkname(name *syntax.Name) ir.Node { // TODO(mdempsky): Set line number? return mkname(p.name(name)) } -func (p *noder) wrapname(n syntax.Node, x *ir.Node) *ir.Node { +func (p *noder) wrapname(n syntax.Node, x ir.Node) ir.Node { // These nodes do not carry line numbers. // Introduce a wrapper node to give them the correct line. switch x.Op() { @@ -1464,11 +1464,11 @@ func (p *noder) wrapname(n syntax.Node, x *ir.Node) *ir.Node { return x } -func (p *noder) nod(orig syntax.Node, op ir.Op, left, right *ir.Node) *ir.Node { +func (p *noder) nod(orig syntax.Node, op ir.Op, left, right ir.Node) ir.Node { return ir.NodAt(p.pos(orig), op, left, right) } -func (p *noder) nodSym(orig syntax.Node, op ir.Op, left *ir.Node, sym *types.Sym) *ir.Node { +func (p *noder) nodSym(orig syntax.Node, op ir.Op, left ir.Node, sym *types.Sym) ir.Node { n := nodSym(op, left, sym) n.SetPos(p.pos(orig)) return n @@ -1668,7 +1668,7 @@ func safeArg(name string) bool { return '0' <= c && c <= '9' || 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || c == '.' || c == '_' || c == '/' || c >= utf8.RuneSelf } -func mkname(sym *types.Sym) *ir.Node { +func mkname(sym *types.Sym) ir.Node { n := oldname(sym) if n.Name() != nil && n.Name().Pack != nil { n.Name().Pack.Name().SetUsed(true) diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 05f8358fdfa6d..d566959d9eab9 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -228,7 +228,7 @@ func addptabs() { } } -func dumpGlobal(n *ir.Node) { +func dumpGlobal(n ir.Node) { if n.Type() == nil { base.Fatalf("external %v nil type\n", n) } @@ -242,7 +242,7 @@ func dumpGlobal(n *ir.Node) { ggloblnod(n) } -func dumpGlobalConst(n *ir.Node) { +func dumpGlobalConst(n ir.Node) { // only export typed constants t := n.Type() if t == nil { @@ -475,7 +475,7 @@ func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj. var slicedataGen int -func slicedata(pos src.XPos, s string) *ir.Node { +func slicedata(pos src.XPos, s string) ir.Node { slicedataGen++ symname := fmt.Sprintf(".gobytes.%d", slicedataGen) sym := ir.LocalPkg.Lookup(symname) @@ -489,7 +489,7 @@ func slicedata(pos src.XPos, s string) *ir.Node { return symnode } -func slicebytes(nam *ir.Node, s string) { +func slicebytes(nam ir.Node, s string) { if nam.Op() != ir.ONAME { base.Fatalf("slicebytes %v", nam) } @@ -530,7 +530,7 @@ func dsymptrWeakOff(s *obj.LSym, off int, x *obj.LSym) int { // slicesym writes a static slice symbol {&arr, lencap, lencap} to n. // arr must be an ONAME. slicesym does not modify n. -func slicesym(n, arr *ir.Node, lencap int64) { +func slicesym(n, arr ir.Node, lencap int64) { s := n.Sym().Linksym() off := n.Offset() if arr.Op() != ir.ONAME { @@ -543,7 +543,7 @@ func slicesym(n, arr *ir.Node, lencap int64) { // addrsym writes the static address of a to n. a must be an ONAME. // Neither n nor a is modified. -func addrsym(n, a *ir.Node) { +func addrsym(n, a ir.Node) { if n.Op() != ir.ONAME { base.Fatalf("addrsym n op %v", n.Op()) } @@ -559,7 +559,7 @@ func addrsym(n, a *ir.Node) { // pfuncsym writes the static address of f to n. f must be a global function. // Neither n nor f is modified. -func pfuncsym(n, f *ir.Node) { +func pfuncsym(n, f ir.Node) { if n.Op() != ir.ONAME { base.Fatalf("pfuncsym n op %v", n.Op()) } @@ -575,7 +575,7 @@ func pfuncsym(n, f *ir.Node) { // litsym writes the static literal c to n. // Neither n nor c is modified. -func litsym(n, c *ir.Node, wid int) { +func litsym(n, c ir.Node, wid int) { if n.Op() != ir.ONAME { base.Fatalf("litsym n op %v", n.Op()) } diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 36a4095640603..b7d713439ba79 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -44,27 +44,27 @@ import ( // Order holds state during the ordering process. type Order struct { - out []*ir.Node // list of generated statements - temp []*ir.Node // stack of temporary variables - free map[string][]*ir.Node // free list of unused temporaries, by type.LongString(). + out []ir.Node // list of generated statements + temp []ir.Node // stack of temporary variables + free map[string][]ir.Node // free list of unused temporaries, by type.LongString(). } // Order rewrites fn.Nbody to apply the ordering constraints // described in the comment at the top of the file. -func order(fn *ir.Node) { +func order(fn ir.Node) { if base.Flag.W > 1 { s := fmt.Sprintf("\nbefore order %v", fn.Func().Nname.Sym()) ir.DumpList(s, fn.Body()) } - orderBlock(fn.PtrBody(), map[string][]*ir.Node{}) + orderBlock(fn.PtrBody(), map[string][]ir.Node{}) } // newTemp allocates a new temporary with the given type, // pushes it onto the temp stack, and returns it. // If clear is true, newTemp emits code to zero the temporary. -func (o *Order) newTemp(t *types.Type, clear bool) *ir.Node { - var v *ir.Node +func (o *Order) newTemp(t *types.Type, clear bool) ir.Node { + var v ir.Node // Note: LongString is close to the type equality we want, // but not exactly. We still need to double-check with types.Identical. key := t.LongString() @@ -103,7 +103,7 @@ func (o *Order) newTemp(t *types.Type, clear bool) *ir.Node { // (The other candidate would be map access, but map access // returns a pointer to the result data instead of taking a pointer // to be filled in.) -func (o *Order) copyExpr(n *ir.Node, t *types.Type, clear bool) *ir.Node { +func (o *Order) copyExpr(n ir.Node, t *types.Type, clear bool) ir.Node { v := o.newTemp(t, clear) a := ir.Nod(ir.OAS, v, n) a = typecheck(a, ctxStmt) @@ -115,7 +115,7 @@ func (o *Order) copyExpr(n *ir.Node, t *types.Type, clear bool) *ir.Node { // The definition of cheap is that n is a variable or constant. // If not, cheapExpr allocates a new tmp, emits tmp = n, // and then returns tmp. -func (o *Order) cheapExpr(n *ir.Node) *ir.Node { +func (o *Order) cheapExpr(n ir.Node) ir.Node { if n == nil { return nil } @@ -143,7 +143,7 @@ func (o *Order) cheapExpr(n *ir.Node) *ir.Node { // as assigning to the original n. // // The intended use is to apply to x when rewriting x += y into x = x + y. -func (o *Order) safeExpr(n *ir.Node) *ir.Node { +func (o *Order) safeExpr(n ir.Node) ir.Node { switch n.Op() { case ir.ONAME, ir.OLITERAL, ir.ONIL: return n @@ -167,7 +167,7 @@ func (o *Order) safeExpr(n *ir.Node) *ir.Node { return typecheck(a, ctxExpr) case ir.OINDEX, ir.OINDEXMAP: - var l *ir.Node + var l ir.Node if n.Left().Type().IsArray() { l = o.safeExpr(n.Left()) } else { @@ -194,7 +194,7 @@ func (o *Order) safeExpr(n *ir.Node) *ir.Node { // of ordinary stack variables, those are not 'isaddrokay'. Temporaries are okay, // because we emit explicit VARKILL instructions marking the end of those // temporaries' lifetimes. -func isaddrokay(n *ir.Node) bool { +func isaddrokay(n ir.Node) bool { return islvalue(n) && (n.Op() != ir.ONAME || n.Class() == ir.PEXTERN || ir.IsAutoTmp(n)) } @@ -203,7 +203,7 @@ func isaddrokay(n *ir.Node) bool { // tmp = n, and then returns tmp. // The result of addrTemp MUST be assigned back to n, e.g. // n.Left = o.addrTemp(n.Left) -func (o *Order) addrTemp(n *ir.Node) *ir.Node { +func (o *Order) addrTemp(n ir.Node) ir.Node { if n.Op() == ir.OLITERAL || n.Op() == ir.ONIL { // TODO: expand this to all static composite literal nodes? n = defaultlit(n, nil) @@ -225,7 +225,7 @@ func (o *Order) addrTemp(n *ir.Node) *ir.Node { // mapKeyTemp prepares n to be a key in a map runtime call and returns n. // It should only be used for map runtime calls which have *_fast* versions. -func (o *Order) mapKeyTemp(t *types.Type, n *ir.Node) *ir.Node { +func (o *Order) mapKeyTemp(t *types.Type, n ir.Node) ir.Node { // Most map calls need to take the address of the key. // Exception: map*_fast* calls. See golang.org/issue/19015. if mapfast(t) == mapslow { @@ -248,7 +248,7 @@ func (o *Order) mapKeyTemp(t *types.Type, n *ir.Node) *ir.Node { // It would be nice to handle these generally, but because // []byte keys are not allowed in maps, the use of string(k) // comes up in important cases in practice. See issue 3512. -func mapKeyReplaceStrConv(n *ir.Node) bool { +func mapKeyReplaceStrConv(n ir.Node) bool { var replaced bool switch n.Op() { case ir.OBYTES2STR: @@ -293,8 +293,8 @@ func (o *Order) popTemp(mark ordermarker) { // cleanTempNoPop emits VARKILL instructions to *out // for each temporary above the mark on the temporary stack. // It does not pop the temporaries from the stack. -func (o *Order) cleanTempNoPop(mark ordermarker) []*ir.Node { - var out []*ir.Node +func (o *Order) cleanTempNoPop(mark ordermarker) []ir.Node { + var out []ir.Node for i := len(o.temp) - 1; i >= int(mark); i-- { n := o.temp[i] kill := ir.Nod(ir.OVARKILL, n, nil) @@ -324,7 +324,7 @@ func (o *Order) stmtList(l ir.Nodes) { // m = OMAKESLICE([]T, x); OCOPY(m, s) // and rewrites it to: // m = OMAKESLICECOPY([]T, x, s); nil -func orderMakeSliceCopy(s []*ir.Node) { +func orderMakeSliceCopy(s []ir.Node) { if base.Flag.N != 0 || instrumenting { return } @@ -406,7 +406,7 @@ func (o *Order) edge() { // orderBlock orders the block of statements in n into a new slice, // and then replaces the old slice in n with the new slice. // free is a map that can be used to obtain temporary variables by type. -func orderBlock(n *ir.Nodes, free map[string][]*ir.Node) { +func orderBlock(n *ir.Nodes, free map[string][]ir.Node) { var order Order order.free = free mark := order.markTemp() @@ -420,7 +420,7 @@ func orderBlock(n *ir.Nodes, free map[string][]*ir.Node) { // leaves them as the init list of the final *np. // The result of exprInPlace MUST be assigned back to n, e.g. // n.Left = o.exprInPlace(n.Left) -func (o *Order) exprInPlace(n *ir.Node) *ir.Node { +func (o *Order) exprInPlace(n ir.Node) ir.Node { var order Order order.free = o.free n = order.expr(n, nil) @@ -437,7 +437,7 @@ func (o *Order) exprInPlace(n *ir.Node) *ir.Node { // The result of orderStmtInPlace MUST be assigned back to n, e.g. // n.Left = orderStmtInPlace(n.Left) // free is a map that can be used to obtain temporary variables by type. -func orderStmtInPlace(n *ir.Node, free map[string][]*ir.Node) *ir.Node { +func orderStmtInPlace(n ir.Node, free map[string][]ir.Node) ir.Node { var order Order order.free = free mark := order.markTemp() @@ -447,7 +447,7 @@ func orderStmtInPlace(n *ir.Node, free map[string][]*ir.Node) *ir.Node { } // init moves n's init list to o.out. -func (o *Order) init(n *ir.Node) { +func (o *Order) init(n ir.Node) { if ir.MayBeShared(n) { // For concurrency safety, don't mutate potentially shared nodes. // First, ensure that no work is required here. @@ -462,7 +462,7 @@ func (o *Order) init(n *ir.Node) { // call orders the call expression n. // n.Op is OCALLMETH/OCALLFUNC/OCALLINTER or a builtin like OCOPY. -func (o *Order) call(n *ir.Node) { +func (o *Order) call(n ir.Node) { if n.Init().Len() > 0 { // Caller should have already called o.init(n). base.Fatalf("%v with unexpected ninit", n.Op()) @@ -483,7 +483,7 @@ func (o *Order) call(n *ir.Node) { if n.Op() == ir.OCALLINTER { return } - keepAlive := func(arg *ir.Node) { + keepAlive := func(arg ir.Node) { // If the argument is really a pointer being converted to uintptr, // arrange for the pointer to be kept alive until the call returns, // by copying it into a temp and marking that temp @@ -525,7 +525,7 @@ func (o *Order) call(n *ir.Node) { // cases they are also typically registerizable, so not much harm done. // And this only applies to the multiple-assignment form. // We could do a more precise analysis if needed, like in walk.go. -func (o *Order) mapAssign(n *ir.Node) { +func (o *Order) mapAssign(n ir.Node) { switch n.Op() { default: base.Fatalf("order.mapAssign %v", n.Op()) @@ -546,7 +546,7 @@ func (o *Order) mapAssign(n *ir.Node) { o.out = append(o.out, n) case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2MAPR, ir.OAS2FUNC: - var post []*ir.Node + var post []ir.Node for i, m := range n.List().Slice() { switch { case m.Op() == ir.OINDEXMAP: @@ -574,7 +574,7 @@ func (o *Order) mapAssign(n *ir.Node) { // stmt orders the statement n, appending to o.out. // Temporaries created during the statement are cleaned // up using VARKILL instructions as possible. -func (o *Order) stmt(n *ir.Node) { +func (o *Order) stmt(n ir.Node) { if n == nil { return } @@ -1022,7 +1022,7 @@ func (o *Order) stmt(n *ir.Node) { base.Pos = lno } -func hasDefaultCase(n *ir.Node) bool { +func hasDefaultCase(n ir.Node) bool { for _, ncas := range n.List().Slice() { if ncas.Op() != ir.OCASE { base.Fatalf("expected case, found %v", ncas.Op()) @@ -1052,7 +1052,7 @@ func (o *Order) exprListInPlace(l ir.Nodes) { } // prealloc[x] records the allocation to use for x. -var prealloc = map[*ir.Node]*ir.Node{} +var prealloc = map[ir.Node]ir.Node{} // expr orders a single expression, appending side // effects to o.out as needed. @@ -1061,7 +1061,7 @@ var prealloc = map[*ir.Node]*ir.Node{} // to avoid copying the result of the expression to a temporary.) // The result of expr MUST be assigned back to n, e.g. // n.Left = o.expr(n.Left, lhs) -func (o *Order) expr(n, lhs *ir.Node) *ir.Node { +func (o *Order) expr(n, lhs ir.Node) ir.Node { if n == nil { return n } @@ -1329,7 +1329,7 @@ func (o *Order) expr(n, lhs *ir.Node) *ir.Node { // See issue 26552. entries := n.List().Slice() statics := entries[:0] - var dynamics []*ir.Node + var dynamics []ir.Node for _, r := range entries { if r.Op() != ir.OKEY { base.Fatalf("OMAPLIT entry not OKEY: %v\n", r) @@ -1377,7 +1377,7 @@ func (o *Order) expr(n, lhs *ir.Node) *ir.Node { // okas creates and returns an assignment of val to ok, // including an explicit conversion if necessary. -func okas(ok, val *ir.Node) *ir.Node { +func okas(ok, val ir.Node) ir.Node { if !ir.IsBlank(ok) { val = conv(val, ok.Type()) } @@ -1392,9 +1392,9 @@ func okas(ok, val *ir.Node) *ir.Node { // tmp1, tmp2, tmp3 = ... // a, b, a = tmp1, tmp2, tmp3 // This is necessary to ensure left to right assignment order. -func (o *Order) as2(n *ir.Node) { - tmplist := []*ir.Node{} - left := []*ir.Node{} +func (o *Order) as2(n ir.Node) { + tmplist := []ir.Node{} + left := []ir.Node{} for ni, l := range n.List().Slice() { if !ir.IsBlank(l) { tmp := o.newTemp(l.Type(), l.Type().HasPointers()) @@ -1415,8 +1415,8 @@ func (o *Order) as2(n *ir.Node) { // okAs2 orders OAS2XXX with ok. // Just like as2, this also adds temporaries to ensure left-to-right assignment. -func (o *Order) okAs2(n *ir.Node) { - var tmp1, tmp2 *ir.Node +func (o *Order) okAs2(n ir.Node) { + var tmp1, tmp2 ir.Node if !ir.IsBlank(n.List().First()) { typ := n.Right().Type() tmp1 = o.newTemp(typ, typ.HasPointers()) diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index 5827b5a7a6d69..221b733a070a6 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -24,10 +24,10 @@ import ( // "Portable" code generation. var ( - compilequeue []*ir.Node // functions waiting to be compiled + compilequeue []ir.Node // functions waiting to be compiled ) -func emitptrargsmap(fn *ir.Node) { +func emitptrargsmap(fn ir.Node) { if ir.FuncName(fn) == "_" || fn.Func().Nname.Sym().Linkname != "" { return } @@ -68,7 +68,7 @@ func emitptrargsmap(fn *ir.Node) { // really means, in memory, things with pointers needing zeroing at // the top of the stack and increasing in size. // Non-autos sort on offset. -func cmpstackvarlt(a, b *ir.Node) bool { +func cmpstackvarlt(a, b ir.Node) bool { if (a.Class() == ir.PAUTO) != (b.Class() == ir.PAUTO) { return b.Class() == ir.PAUTO } @@ -101,7 +101,7 @@ func cmpstackvarlt(a, b *ir.Node) bool { } // byStackvar implements sort.Interface for []*Node using cmpstackvarlt. -type byStackVar []*ir.Node +type byStackVar []ir.Node func (s byStackVar) Len() int { return len(s) } func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) } @@ -128,7 +128,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { scratchUsed := false for _, b := range f.Blocks { for _, v := range b.Values { - if n, ok := v.Aux.(*ir.Node); ok { + if n, ok := v.Aux.(ir.Node); ok { switch n.Class() { case ir.PPARAM, ir.PPARAMOUT: // Don't modify nodfp; it is a global. @@ -193,7 +193,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { s.stkptrsize = Rnd(s.stkptrsize, int64(Widthreg)) } -func funccompile(fn *ir.Node) { +func funccompile(fn ir.Node) { if Curfn != nil { base.Fatalf("funccompile %v inside %v", fn.Func().Nname.Sym(), Curfn.Func().Nname.Sym()) } @@ -224,7 +224,7 @@ func funccompile(fn *ir.Node) { dclcontext = ir.PEXTERN } -func compile(fn *ir.Node) { +func compile(fn ir.Node) { errorsBefore := base.Errors() order(fn) if base.Errors() > errorsBefore { @@ -284,7 +284,7 @@ func compile(fn *ir.Node) { // If functions are not compiled immediately, // they are enqueued in compilequeue, // which is drained by compileFunctions. -func compilenow(fn *ir.Node) bool { +func compilenow(fn ir.Node) bool { // Issue 38068: if this function is a method AND an inline // candidate AND was not inlined (yet), put it onto the compile // queue instead of compiling it immediately. This is in case we @@ -299,7 +299,7 @@ func compilenow(fn *ir.Node) bool { // isInlinableButNotInlined returns true if 'fn' was marked as an // inline candidate but then never inlined (presumably because we // found no call sites). -func isInlinableButNotInlined(fn *ir.Node) bool { +func isInlinableButNotInlined(fn ir.Node) bool { if fn.Func().Nname.Func().Inl == nil { return false } @@ -315,7 +315,7 @@ const maxStackSize = 1 << 30 // uses it to generate a plist, // and flushes that plist to machine code. // worker indicates which of the backend workers is doing the processing. -func compileSSA(fn *ir.Node, worker int) { +func compileSSA(fn ir.Node, worker int) { f := buildssa(fn, worker) // Note: check arg size to fix issue 25507. if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type().ArgWidth() >= maxStackSize { @@ -360,7 +360,7 @@ func compileFunctions() { sizeCalculationDisabled = true // not safe to calculate sizes concurrently if race.Enabled { // Randomize compilation order to try to shake out races. - tmp := make([]*ir.Node, len(compilequeue)) + tmp := make([]ir.Node, len(compilequeue)) perm := rand.Perm(len(compilequeue)) for i, v := range perm { tmp[v] = compilequeue[i] @@ -376,7 +376,7 @@ func compileFunctions() { } var wg sync.WaitGroup base.Ctxt.InParallel = true - c := make(chan *ir.Node, base.Flag.LowerC) + c := make(chan ir.Node, base.Flag.LowerC) for i := 0; i < base.Flag.LowerC; i++ { wg.Add(1) go func(worker int) { @@ -398,7 +398,7 @@ func compileFunctions() { } func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) { - fn := curfn.(*ir.Node) + fn := curfn.(ir.Node) if fn.Func().Nname != nil { if expect := fn.Func().Nname.Sym().Linksym(); fnsym != expect { base.Fatalf("unexpected fnsym: %v != %v", fnsym, expect) @@ -432,7 +432,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S // Deciding the right answer is, as they say, future work. isODCLFUNC := fn.Op() == ir.ODCLFUNC - var apdecls []*ir.Node + var apdecls []ir.Node // Populate decls for fn. if isODCLFUNC { for _, n := range fn.Func().Dcl { @@ -489,7 +489,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S return scopes, inlcalls } -func declPos(decl *ir.Node) src.XPos { +func declPos(decl ir.Node) src.XPos { if decl.Name().Defn != nil && (decl.Name().Captured() || decl.Name().Byval()) { // It's not clear which position is correct for captured variables here: // * decl.Pos is the wrong position for captured variables, in the inner @@ -512,10 +512,10 @@ func declPos(decl *ir.Node) src.XPos { // createSimpleVars creates a DWARF entry for every variable declared in the // function, claiming that they are permanently on the stack. -func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Node) ([]*ir.Node, []*dwarf.Var, map[*ir.Node]bool) { +func createSimpleVars(fnsym *obj.LSym, apDecls []ir.Node) ([]ir.Node, []*dwarf.Var, map[ir.Node]bool) { var vars []*dwarf.Var - var decls []*ir.Node - selected := make(map[*ir.Node]bool) + var decls []ir.Node + selected := make(map[ir.Node]bool) for _, n := range apDecls { if ir.IsAutoTmp(n) { continue @@ -528,7 +528,7 @@ func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Node) ([]*ir.Node, []*dwarf return decls, vars, selected } -func createSimpleVar(fnsym *obj.LSym, n *ir.Node) *dwarf.Var { +func createSimpleVar(fnsym *obj.LSym, n ir.Node) *dwarf.Var { var abbrev int offs := n.Offset() @@ -579,13 +579,13 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Node) *dwarf.Var { // createComplexVars creates recomposed DWARF vars with location lists, // suitable for describing optimized code. -func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Node, []*dwarf.Var, map[*ir.Node]bool) { +func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]ir.Node, []*dwarf.Var, map[ir.Node]bool) { debugInfo := fn.DebugInfo.(*ssa.FuncDebug) // Produce a DWARF variable entry for each user variable. - var decls []*ir.Node + var decls []ir.Node var vars []*dwarf.Var - ssaVars := make(map[*ir.Node]bool) + ssaVars := make(map[ir.Node]bool) for varID, dvar := range debugInfo.Vars { n := dvar @@ -605,11 +605,11 @@ func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Node, []*dwarf.Var, // createDwarfVars process fn, returning a list of DWARF variables and the // Nodes they represent. -func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir.Node) ([]*ir.Node, []*dwarf.Var) { +func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []ir.Node) ([]ir.Node, []*dwarf.Var) { // Collect a raw list of DWARF vars. var vars []*dwarf.Var - var decls []*ir.Node - var selected map[*ir.Node]bool + var decls []ir.Node + var selected map[ir.Node]bool if base.Ctxt.Flag_locationlists && base.Ctxt.Flag_optimize && fn.DebugInfo != nil && complexOK { decls, vars, selected = createComplexVars(fnsym, fn) } else { @@ -708,9 +708,9 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir // function that is not local to the package being compiled, then the // names of the variables may have been "versioned" to avoid conflicts // with local vars; disregard this versioning when sorting. -func preInliningDcls(fnsym *obj.LSym) []*ir.Node { - fn := base.Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*ir.Node) - var rdcl []*ir.Node +func preInliningDcls(fnsym *obj.LSym) []ir.Node { + fn := base.Ctxt.DwFixups.GetPrecursorFunc(fnsym).(ir.Node) + var rdcl []ir.Node for _, n := range fn.Func().Inl.Dcl { c := n.Sym().Name[0] // Avoid reporting "_" parameters, since if there are more than diff --git a/src/cmd/compile/internal/gc/pgen_test.go b/src/cmd/compile/internal/gc/pgen_test.go index efdffe0256c9a..1984f9aa08558 100644 --- a/src/cmd/compile/internal/gc/pgen_test.go +++ b/src/cmd/compile/internal/gc/pgen_test.go @@ -26,19 +26,19 @@ func typeWithPointers() *types.Type { return t } -func markUsed(n *ir.Node) *ir.Node { +func markUsed(n ir.Node) ir.Node { n.Name().SetUsed(true) return n } -func markNeedZero(n *ir.Node) *ir.Node { +func markNeedZero(n ir.Node) ir.Node { n.Name().SetNeedzero(true) return n } // Test all code paths for cmpstackvarlt. func TestCmpstackvar(t *testing.T) { - nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) *ir.Node { + nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) ir.Node { if s == nil { s = &types.Sym{Name: "."} } @@ -49,7 +49,7 @@ func TestCmpstackvar(t *testing.T) { return n } testdata := []struct { - a, b *ir.Node + a, b ir.Node lt bool }{ { @@ -156,14 +156,14 @@ func TestCmpstackvar(t *testing.T) { } func TestStackvarSort(t *testing.T) { - nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) *ir.Node { + nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) ir.Node { n := NewName(s) n.SetType(t) n.SetOffset(xoffset) n.SetClass(cl) return n } - inp := []*ir.Node{ + inp := []ir.Node{ nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC), nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO), nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC), @@ -178,7 +178,7 @@ func TestStackvarSort(t *testing.T) { nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO), nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, ir.PAUTO), } - want := []*ir.Node{ + want := []ir.Node{ nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC), nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC), nod(10, &types.Type{}, &types.Sym{}, ir.PFUNC), diff --git a/src/cmd/compile/internal/gc/phi.go b/src/cmd/compile/internal/gc/phi.go index 2a88d4a5b4314..677bfc92df2f1 100644 --- a/src/cmd/compile/internal/gc/phi.go +++ b/src/cmd/compile/internal/gc/phi.go @@ -41,11 +41,11 @@ func (s *state) insertPhis() { } type phiState struct { - s *state // SSA state - f *ssa.Func // function to work on - defvars []map[*ir.Node]*ssa.Value // defined variables at end of each block + s *state // SSA state + f *ssa.Func // function to work on + defvars []map[ir.Node]*ssa.Value // defined variables at end of each block - varnum map[*ir.Node]int32 // variable numbering + varnum map[ir.Node]int32 // variable numbering // properties of the dominator tree idom []*ssa.Block // dominator parents @@ -71,15 +71,15 @@ func (s *phiState) insertPhis() { // Find all the variables for which we need to match up reads & writes. // This step prunes any basic-block-only variables from consideration. // Generate a numbering for these variables. - s.varnum = map[*ir.Node]int32{} - var vars []*ir.Node + s.varnum = map[ir.Node]int32{} + var vars []ir.Node var vartypes []*types.Type for _, b := range s.f.Blocks { for _, v := range b.Values { if v.Op != ssa.OpFwdRef { continue } - var_ := v.Aux.(*ir.Node) + var_ := v.Aux.(ir.Node) // Optimization: look back 1 block for the definition. if len(b.Preds) == 1 { @@ -184,7 +184,7 @@ levels: } } -func (s *phiState) insertVarPhis(n int, var_ *ir.Node, defs []*ssa.Block, typ *types.Type) { +func (s *phiState) insertVarPhis(n int, var_ ir.Node, defs []*ssa.Block, typ *types.Type) { priq := &s.priq q := s.q queued := s.queued @@ -319,7 +319,7 @@ func (s *phiState) resolveFwdRefs() { if v.Op != ssa.OpFwdRef { continue } - n := s.varnum[v.Aux.(*ir.Node)] + n := s.varnum[v.Aux.(ir.Node)] v.Op = ssa.OpCopy v.Aux = nil v.AddArg(values[n]) @@ -433,11 +433,11 @@ func (s *sparseSet) clear() { // Variant to use for small functions. type simplePhiState struct { - s *state // SSA state - f *ssa.Func // function to work on - fwdrefs []*ssa.Value // list of FwdRefs to be processed - defvars []map[*ir.Node]*ssa.Value // defined variables at end of each block - reachable []bool // which blocks are reachable + s *state // SSA state + f *ssa.Func // function to work on + fwdrefs []*ssa.Value // list of FwdRefs to be processed + defvars []map[ir.Node]*ssa.Value // defined variables at end of each block + reachable []bool // which blocks are reachable } func (s *simplePhiState) insertPhis() { @@ -450,7 +450,7 @@ func (s *simplePhiState) insertPhis() { continue } s.fwdrefs = append(s.fwdrefs, v) - var_ := v.Aux.(*ir.Node) + var_ := v.Aux.(ir.Node) if _, ok := s.defvars[b.ID][var_]; !ok { s.defvars[b.ID][var_] = v // treat FwdDefs as definitions. } @@ -464,7 +464,7 @@ loop: v := s.fwdrefs[len(s.fwdrefs)-1] s.fwdrefs = s.fwdrefs[:len(s.fwdrefs)-1] b := v.Block - var_ := v.Aux.(*ir.Node) + var_ := v.Aux.(ir.Node) if b == s.f.Entry { // No variable should be live at entry. s.s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, var_, v) @@ -512,7 +512,7 @@ loop: } // lookupVarOutgoing finds the variable's value at the end of block b. -func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t *types.Type, var_ *ir.Node, line src.XPos) *ssa.Value { +func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t *types.Type, var_ ir.Node, line src.XPos) *ssa.Value { for { if v := s.defvars[b.ID][var_]; v != nil { return v diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go index c1e523f7a0d2b..bd7696d859c21 100644 --- a/src/cmd/compile/internal/gc/plive.go +++ b/src/cmd/compile/internal/gc/plive.go @@ -101,10 +101,10 @@ type BlockEffects struct { // A collection of global state used by liveness analysis. type Liveness struct { - fn *ir.Node + fn ir.Node f *ssa.Func - vars []*ir.Node - idx map[*ir.Node]int32 + vars []ir.Node + idx map[ir.Node]int32 stkptrsize int64 be []BlockEffects @@ -206,20 +206,20 @@ type progeffectscache struct { // nor do we care about non-local variables, // nor do we care about empty structs (handled by the pointer check), // nor do we care about the fake PAUTOHEAP variables. -func livenessShouldTrack(n *ir.Node) bool { +func livenessShouldTrack(n ir.Node) bool { return n.Op() == ir.ONAME && (n.Class() == ir.PAUTO || n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Type().HasPointers() } // getvariables returns the list of on-stack variables that we need to track // and a map for looking up indices by *Node. -func getvariables(fn *ir.Node) ([]*ir.Node, map[*ir.Node]int32) { - var vars []*ir.Node +func getvariables(fn ir.Node) ([]ir.Node, map[ir.Node]int32) { + var vars []ir.Node for _, n := range fn.Func().Dcl { if livenessShouldTrack(n) { vars = append(vars, n) } } - idx := make(map[*ir.Node]int32, len(vars)) + idx := make(map[ir.Node]int32, len(vars)) for i, n := range vars { idx[n] = int32(i) } @@ -312,7 +312,7 @@ func (lv *Liveness) valueEffects(v *ssa.Value) (int32, liveEffect) { } // affectedNode returns the *Node affected by v -func affectedNode(v *ssa.Value) (*ir.Node, ssa.SymEffect) { +func affectedNode(v *ssa.Value) (ir.Node, ssa.SymEffect) { // Special cases. switch v.Op { case ssa.OpLoadReg: @@ -323,9 +323,9 @@ func affectedNode(v *ssa.Value) (*ir.Node, ssa.SymEffect) { return n, ssa.SymWrite case ssa.OpVarLive: - return v.Aux.(*ir.Node), ssa.SymRead + return v.Aux.(ir.Node), ssa.SymRead case ssa.OpVarDef, ssa.OpVarKill: - return v.Aux.(*ir.Node), ssa.SymWrite + return v.Aux.(ir.Node), ssa.SymWrite case ssa.OpKeepAlive: n, _ := AutoVar(v.Args[0]) return n, ssa.SymRead @@ -340,7 +340,7 @@ func affectedNode(v *ssa.Value) (*ir.Node, ssa.SymEffect) { case nil, *obj.LSym: // ok, but no node return nil, e - case *ir.Node: + case ir.Node: return a, e default: base.Fatalf("weird aux: %s", v.LongString()) @@ -356,7 +356,7 @@ type livenessFuncCache struct { // Constructs a new liveness structure used to hold the global state of the // liveness computation. The cfg argument is a slice of *BasicBlocks and the // vars argument is a slice of *Nodes. -func newliveness(fn *ir.Node, f *ssa.Func, vars []*ir.Node, idx map[*ir.Node]int32, stkptrsize int64) *Liveness { +func newliveness(fn ir.Node, f *ssa.Func, vars []ir.Node, idx map[ir.Node]int32, stkptrsize int64) *Liveness { lv := &Liveness{ fn: fn, f: f, @@ -482,7 +482,7 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) { // Generates live pointer value maps for arguments and local variables. The // this argument and the in arguments are always assumed live. The vars // argument is a slice of *Nodes. -func (lv *Liveness) pointerMap(liveout bvec, vars []*ir.Node, args, locals bvec) { +func (lv *Liveness) pointerMap(liveout bvec, vars []ir.Node, args, locals bvec) { for i := int32(0); ; i++ { i = liveout.Next(i) if i < 0 { @@ -1164,7 +1164,7 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) { // Size args bitmaps to be just large enough to hold the largest pointer. // First, find the largest Xoffset node we care about. // (Nodes without pointers aren't in lv.vars; see livenessShouldTrack.) - var maxArgNode *ir.Node + var maxArgNode ir.Node for _, n := range lv.vars { switch n.Class() { case ir.PPARAM, ir.PPARAMOUT: diff --git a/src/cmd/compile/internal/gc/racewalk.go b/src/cmd/compile/internal/gc/racewalk.go index 5ab2821187b23..c41d923f78652 100644 --- a/src/cmd/compile/internal/gc/racewalk.go +++ b/src/cmd/compile/internal/gc/racewalk.go @@ -60,7 +60,7 @@ func ispkgin(pkgs []string) bool { return false } -func instrument(fn *ir.Node) { +func instrument(fn ir.Node) { if fn.Func().Pragma&ir.Norace != 0 { return } diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go index 6a2a65c2dfff5..0ff00cca44d06 100644 --- a/src/cmd/compile/internal/gc/range.go +++ b/src/cmd/compile/internal/gc/range.go @@ -13,7 +13,7 @@ import ( ) // range -func typecheckrange(n *ir.Node) { +func typecheckrange(n ir.Node) { // Typechecking order is important here: // 0. first typecheck range expression (slice/map/chan), // it is evaluated only once and so logically it is not part of the loop. @@ -39,7 +39,7 @@ func typecheckrange(n *ir.Node) { decldepth-- } -func typecheckrangeExpr(n *ir.Node) { +func typecheckrangeExpr(n ir.Node) { n.SetRight(typecheck(n.Right(), ctxExpr)) t := n.Right().Type() @@ -95,7 +95,7 @@ func typecheckrangeExpr(n *ir.Node) { base.ErrorfAt(n.Pos(), "too many variables in range") } - var v1, v2 *ir.Node + var v1, v2 ir.Node if n.List().Len() != 0 { v1 = n.List().First() } @@ -157,7 +157,7 @@ func cheapComputableIndex(width int64) bool { // simpler forms. The result must be assigned back to n. // Node n may also be modified in place, and may also be // the returned node. -func walkrange(n *ir.Node) *ir.Node { +func walkrange(n ir.Node) ir.Node { if isMapClear(n) { m := n.Right() lno := setlineno(m) @@ -179,7 +179,7 @@ func walkrange(n *ir.Node) *ir.Node { lno := setlineno(a) n.SetRight(nil) - var v1, v2 *ir.Node + var v1, v2 ir.Node l := n.List().Len() if l > 0 { v1 = n.List().First() @@ -205,12 +205,12 @@ func walkrange(n *ir.Node) *ir.Node { // to avoid erroneous processing by racewalk. n.PtrList().Set(nil) - var ifGuard *ir.Node + var ifGuard ir.Node translatedLoopOp := ir.OFOR - var body []*ir.Node - var init []*ir.Node + var body []ir.Node + var init []ir.Node switch t.Etype { default: base.Fatalf("walkrange") @@ -240,7 +240,7 @@ func walkrange(n *ir.Node) *ir.Node { // for v1 := range ha { body } if v2 == nil { - body = []*ir.Node{ir.Nod(ir.OAS, v1, hv1)} + body = []ir.Node{ir.Nod(ir.OAS, v1, hv1)} break } @@ -254,7 +254,7 @@ func walkrange(n *ir.Node) *ir.Node { a := ir.Nod(ir.OAS2, nil, nil) a.PtrList().Set2(v1, v2) a.PtrRlist().Set2(hv1, tmp) - body = []*ir.Node{a} + body = []ir.Node{a} break } @@ -321,14 +321,14 @@ func walkrange(n *ir.Node) *ir.Node { if v1 == nil { body = nil } else if v2 == nil { - body = []*ir.Node{ir.Nod(ir.OAS, v1, key)} + body = []ir.Node{ir.Nod(ir.OAS, v1, key)} } else { elem := nodSym(ir.ODOT, hit, elemsym) elem = ir.Nod(ir.ODEREF, elem, nil) a := ir.Nod(ir.OAS2, nil, nil) a.PtrList().Set2(v1, v2) a.PtrRlist().Set2(key, elem) - body = []*ir.Node{a} + body = []ir.Node{a} } case types.TCHAN: @@ -353,7 +353,7 @@ func walkrange(n *ir.Node) *ir.Node { if v1 == nil { body = nil } else { - body = []*ir.Node{ir.Nod(ir.OAS, v1, hv1)} + body = []ir.Node{ir.Nod(ir.OAS, v1, hv1)} } // Zero hv1. This prevents hv1 from being the sole, inaccessible // reference to an otherwise GC-able value during the next channel receive. @@ -467,7 +467,7 @@ func walkrange(n *ir.Node) *ir.Node { // } // // where == for keys of map m is reflexive. -func isMapClear(n *ir.Node) bool { +func isMapClear(n ir.Node) bool { if base.Flag.N != 0 || instrumenting { return false } @@ -509,7 +509,7 @@ func isMapClear(n *ir.Node) bool { } // mapClear constructs a call to runtime.mapclear for the map m. -func mapClear(m *ir.Node) *ir.Node { +func mapClear(m ir.Node) ir.Node { t := m.Type() // instantiate mapclear(typ *type, hmap map[any]any) @@ -534,7 +534,7 @@ func mapClear(m *ir.Node) *ir.Node { // in which the evaluation of a is side-effect-free. // // Parameters are as in walkrange: "for v1, v2 = range a". -func arrayClear(n, v1, v2, a *ir.Node) bool { +func arrayClear(n, v1, v2, a ir.Node) bool { if base.Flag.N != 0 || instrumenting { return false } @@ -590,7 +590,7 @@ func arrayClear(n, v1, v2, a *ir.Node) bool { tmp = conv(tmp, types.Types[types.TUINTPTR]) n.PtrBody().Append(ir.Nod(ir.OAS, hn, tmp)) - var fn *ir.Node + var fn ir.Node if a.Type().Elem().HasPointers() { // memclrHasPointers(hp, hn) Curfn.Func().SetWBPos(stmt.Pos()) @@ -615,7 +615,7 @@ func arrayClear(n, v1, v2, a *ir.Node) bool { } // addptr returns (*T)(uintptr(p) + n). -func addptr(p *ir.Node, n int64) *ir.Node { +func addptr(p ir.Node, n int64) ir.Node { t := p.Type() p = ir.Nod(ir.OCONVNOP, p, nil) diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 664b3cc942139..dc9efc07fef0a 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -347,7 +347,7 @@ func methodfunc(f *types.Type, receiver *types.Type) *types.Type { if receiver != nil { inLen++ } - in := make([]*ir.Node, 0, inLen) + in := make([]ir.Node, 0, inLen) if receiver != nil { d := anonfield(receiver) @@ -361,7 +361,7 @@ func methodfunc(f *types.Type, receiver *types.Type) *types.Type { } outLen := f.Results().Fields().Len() - out := make([]*ir.Node, 0, outLen) + out := make([]ir.Node, 0, outLen) for _, t := range f.Results().Fields().Slice() { d := anonfield(t.Type) out = append(out, d) @@ -990,7 +990,7 @@ func typenamesym(t *types.Type) *types.Sym { return s } -func typename(t *types.Type) *ir.Node { +func typename(t *types.Type) ir.Node { s := typenamesym(t) if s.Def == nil { n := ir.NewNameAt(src.NoXPos, s) @@ -1006,7 +1006,7 @@ func typename(t *types.Type) *ir.Node { return n } -func itabname(t, itype *types.Type) *ir.Node { +func itabname(t, itype *types.Type) ir.Node { if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() { base.Fatalf("itabname(%v, %v)", t, itype) } @@ -1516,7 +1516,7 @@ func addsignat(t *types.Type) { } } -func addsignats(dcls []*ir.Node) { +func addsignats(dcls []ir.Node) { // copy types from dcl list to signatset for _, n := range dcls { if n.Op() == ir.OTYPE { @@ -1626,7 +1626,7 @@ func dumpbasictypes() { // The latter is the type of an auto-generated wrapper. dtypesym(types.NewPtr(types.Errortype)) - dtypesym(functype(nil, []*ir.Node{anonfield(types.Errortype)}, []*ir.Node{anonfield(types.Types[types.TSTRING])})) + dtypesym(functype(nil, []ir.Node{anonfield(types.Errortype)}, []ir.Node{anonfield(types.Types[types.TSTRING])})) // add paths for runtime and main, which 6l imports implicitly. dimportpath(Runtimepkg) @@ -1869,7 +1869,7 @@ func (p *GCProg) emit(t *types.Type, offset int64) { // zeroaddr returns the address of a symbol with at least // size bytes of zeros. -func zeroaddr(size int64) *ir.Node { +func zeroaddr(size int64) ir.Node { if size >= 1<<31 { base.Fatalf("map elem too big %d", size) } diff --git a/src/cmd/compile/internal/gc/scc.go b/src/cmd/compile/internal/gc/scc.go index 880eff7595afd..fe7956d5d590f 100644 --- a/src/cmd/compile/internal/gc/scc.go +++ b/src/cmd/compile/internal/gc/scc.go @@ -32,10 +32,10 @@ import "cmd/compile/internal/ir" // when analyzing a set of mutually recursive functions. type bottomUpVisitor struct { - analyze func([]*ir.Node, bool) + analyze func([]ir.Node, bool) visitgen uint32 - nodeID map[*ir.Node]uint32 - stack []*ir.Node + nodeID map[ir.Node]uint32 + stack []ir.Node } // visitBottomUp invokes analyze on the ODCLFUNC nodes listed in list. @@ -51,10 +51,10 @@ type bottomUpVisitor struct { // If recursive is false, the list consists of only a single function and its closures. // If recursive is true, the list may still contain only a single function, // if that function is itself recursive. -func visitBottomUp(list []*ir.Node, analyze func(list []*ir.Node, recursive bool)) { +func visitBottomUp(list []ir.Node, analyze func(list []ir.Node, recursive bool)) { var v bottomUpVisitor v.analyze = analyze - v.nodeID = make(map[*ir.Node]uint32) + v.nodeID = make(map[ir.Node]uint32) for _, n := range list { if n.Op() == ir.ODCLFUNC && !n.Func().IsHiddenClosure() { v.visit(n) @@ -62,7 +62,7 @@ func visitBottomUp(list []*ir.Node, analyze func(list []*ir.Node, recursive bool } } -func (v *bottomUpVisitor) visit(n *ir.Node) uint32 { +func (v *bottomUpVisitor) visit(n ir.Node) uint32 { if id := v.nodeID[n]; id > 0 { // already visited return id @@ -75,7 +75,7 @@ func (v *bottomUpVisitor) visit(n *ir.Node) uint32 { min := v.visitgen v.stack = append(v.stack, n) - ir.InspectList(n.Body(), func(n *ir.Node) bool { + ir.InspectList(n.Body(), func(n ir.Node) bool { switch n.Op() { case ir.ONAME: if n.Class() == ir.PFUNC { diff --git a/src/cmd/compile/internal/gc/scope.go b/src/cmd/compile/internal/gc/scope.go index 16e66dee6c6d6..fe4e1d185ad14 100644 --- a/src/cmd/compile/internal/gc/scope.go +++ b/src/cmd/compile/internal/gc/scope.go @@ -28,7 +28,7 @@ func findScope(marks []ir.Mark, pos src.XPos) ir.ScopeID { return marks[i-1].Scope } -func assembleScopes(fnsym *obj.LSym, fn *ir.Node, dwarfVars []*dwarf.Var, varScopes []ir.ScopeID) []dwarf.Scope { +func assembleScopes(fnsym *obj.LSym, fn ir.Node, dwarfVars []*dwarf.Var, varScopes []ir.ScopeID) []dwarf.Scope { // Initialize the DWARF scope tree based on lexical scopes. dwarfScopes := make([]dwarf.Scope, 1+len(fn.Func().Parents)) for i, parent := range fn.Func().Parents { diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go index 73b808b81543d..116b6f5b6e657 100644 --- a/src/cmd/compile/internal/gc/select.go +++ b/src/cmd/compile/internal/gc/select.go @@ -11,8 +11,8 @@ import ( ) // select -func typecheckselect(sel *ir.Node) { - var def *ir.Node +func typecheckselect(sel ir.Node) { + var def ir.Node lno := setlineno(sel) typecheckslice(sel.Init().Slice(), ctxStmt) for _, ncase := range sel.List().Slice() { @@ -91,7 +91,7 @@ func typecheckselect(sel *ir.Node) { base.Pos = lno } -func walkselect(sel *ir.Node) { +func walkselect(sel ir.Node) { lno := setlineno(sel) if sel.Body().Len() != 0 { base.Fatalf("double walkselect") @@ -109,13 +109,13 @@ func walkselect(sel *ir.Node) { base.Pos = lno } -func walkselectcases(cases *ir.Nodes) []*ir.Node { +func walkselectcases(cases *ir.Nodes) []ir.Node { ncas := cases.Len() sellineno := base.Pos // optimization: zero-case select if ncas == 0 { - return []*ir.Node{mkcall("block", nil, nil)} + return []ir.Node{mkcall("block", nil, nil)} } // optimization: one-case select: single op. @@ -168,7 +168,7 @@ func walkselectcases(cases *ir.Nodes) []*ir.Node { // convert case value arguments to addresses. // this rewrite is used by both the general code and the next optimization. - var dflt *ir.Node + var dflt ir.Node for _, cas := range cases.Slice() { setlineno(cas) n := cas.Left() @@ -237,16 +237,16 @@ func walkselectcases(cases *ir.Nodes) []*ir.Node { r.SetLeft(typecheck(r.Left(), ctxExpr)) r.PtrBody().Set(cas.Body().Slice()) r.PtrRlist().Set(append(dflt.Init().Slice(), dflt.Body().Slice()...)) - return []*ir.Node{r, ir.Nod(ir.OBREAK, nil, nil)} + return []ir.Node{r, ir.Nod(ir.OBREAK, nil, nil)} } if dflt != nil { ncas-- } - casorder := make([]*ir.Node, ncas) + casorder := make([]ir.Node, ncas) nsends, nrecvs := 0, 0 - var init []*ir.Node + var init []ir.Node // generate sel-struct base.Pos = sellineno @@ -258,7 +258,7 @@ func walkselectcases(cases *ir.Nodes) []*ir.Node { // No initialization for order; runtime.selectgo is responsible for that. order := temp(types.NewArray(types.Types[types.TUINT16], 2*int64(ncas))) - var pc0, pcs *ir.Node + var pc0, pcs ir.Node if base.Flag.Race { pcs = temp(types.NewArray(types.Types[types.TUINTPTR], int64(ncas))) pc0 = typecheck(ir.Nod(ir.OADDR, ir.Nod(ir.OINDEX, pcs, nodintconst(0)), nil), ctxExpr) @@ -279,7 +279,7 @@ func walkselectcases(cases *ir.Nodes) []*ir.Node { } var i int - var c, elem *ir.Node + var c, elem ir.Node switch n.Op() { default: base.Fatalf("select %v", n.Op()) @@ -297,7 +297,7 @@ func walkselectcases(cases *ir.Nodes) []*ir.Node { casorder[i] = cas - setField := func(f string, val *ir.Node) { + setField := func(f string, val ir.Node) { r := ir.Nod(ir.OAS, nodSym(ir.ODOT, ir.Nod(ir.OINDEX, selv, nodintconst(int64(i))), lookup(f)), val) r = typecheck(r, ctxStmt) init = append(init, r) @@ -340,7 +340,7 @@ func walkselectcases(cases *ir.Nodes) []*ir.Node { } // dispatch cases - dispatch := func(cond, cas *ir.Node) { + dispatch := func(cond, cas ir.Node) { cond = typecheck(cond, ctxExpr) cond = defaultlit(cond, nil) @@ -370,7 +370,7 @@ func walkselectcases(cases *ir.Nodes) []*ir.Node { } // bytePtrToIndex returns a Node representing "(*byte)(&n[i])". -func bytePtrToIndex(n *ir.Node, i int64) *ir.Node { +func bytePtrToIndex(n ir.Node, i int64) ir.Node { s := ir.Nod(ir.OADDR, ir.Nod(ir.OINDEX, n, nodintconst(i)), nil) t := types.NewPtr(types.Types[types.TUINT8]) return convnop(s, t) @@ -381,7 +381,7 @@ var scase *types.Type // Keep in sync with src/runtime/select.go. func scasetype() *types.Type { if scase == nil { - scase = tostruct([]*ir.Node{ + scase = tostruct([]ir.Node{ namedfield("c", types.Types[types.TUNSAFEPTR]), namedfield("elem", types.Types[types.TUNSAFEPTR]), }) diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index c0f85a1e337bd..e30663cfbb8fa 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -14,8 +14,8 @@ import ( ) type InitEntry struct { - Xoffset int64 // struct, array only - Expr *ir.Node // bytes of run-time computed expressions + Xoffset int64 // struct, array only + Expr ir.Node // bytes of run-time computed expressions } type InitPlan struct { @@ -29,18 +29,18 @@ type InitPlan struct { type InitSchedule struct { // out is the ordered list of dynamic initialization // statements. - out []*ir.Node + out []ir.Node - initplans map[*ir.Node]*InitPlan - inittemps map[*ir.Node]*ir.Node + initplans map[ir.Node]*InitPlan + inittemps map[ir.Node]ir.Node } -func (s *InitSchedule) append(n *ir.Node) { +func (s *InitSchedule) append(n ir.Node) { s.out = append(s.out, n) } // staticInit adds an initialization statement n to the schedule. -func (s *InitSchedule) staticInit(n *ir.Node) { +func (s *InitSchedule) staticInit(n ir.Node) { if !s.tryStaticInit(n) { if base.Flag.Percent != 0 { ir.Dump("nonstatic", n) @@ -51,7 +51,7 @@ func (s *InitSchedule) staticInit(n *ir.Node) { // tryStaticInit attempts to statically execute an initialization // statement and reports whether it succeeded. -func (s *InitSchedule) tryStaticInit(n *ir.Node) bool { +func (s *InitSchedule) tryStaticInit(n ir.Node) bool { // Only worry about simple "l = r" assignments. Multiple // variable/expression OAS2 assignments have already been // replaced by multiple simple OAS assignments, and the other @@ -70,7 +70,7 @@ func (s *InitSchedule) tryStaticInit(n *ir.Node) bool { // like staticassign but we are copying an already // initialized value r. -func (s *InitSchedule) staticcopy(l *ir.Node, r *ir.Node) bool { +func (s *InitSchedule) staticcopy(l ir.Node, r ir.Node) bool { if r.Op() != ir.ONAME && r.Op() != ir.OMETHEXPR { return false } @@ -168,7 +168,7 @@ func (s *InitSchedule) staticcopy(l *ir.Node, r *ir.Node) bool { return false } -func (s *InitSchedule) staticassign(l *ir.Node, r *ir.Node) bool { +func (s *InitSchedule) staticassign(l ir.Node, r ir.Node) bool { for r.Op() == ir.OCONVNOP { r = r.Left() } @@ -289,7 +289,7 @@ func (s *InitSchedule) staticassign(l *ir.Node, r *ir.Node) bool { markTypeUsedInInterface(val.Type(), l.Sym().Linksym()) - var itab *ir.Node + var itab ir.Node if l.Type().IsEmptyInterface() { itab = typename(val.Type()) } else { @@ -367,7 +367,7 @@ var statuniqgen int // name generator for static temps // staticname returns a name backed by a (writable) static data symbol. // Use readonlystaticname for read-only node. -func staticname(t *types.Type) *ir.Node { +func staticname(t *types.Type) ir.Node { // Don't use lookupN; it interns the resulting string, but these are all unique. n := NewName(lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen))) statuniqgen++ @@ -377,18 +377,18 @@ func staticname(t *types.Type) *ir.Node { } // readonlystaticname returns a name backed by a (writable) static data symbol. -func readonlystaticname(t *types.Type) *ir.Node { +func readonlystaticname(t *types.Type) ir.Node { n := staticname(t) n.MarkReadonly() n.Sym().Linksym().Set(obj.AttrContentAddressable, true) return n } -func isSimpleName(n *ir.Node) bool { +func isSimpleName(n ir.Node) bool { return (n.Op() == ir.ONAME || n.Op() == ir.OMETHEXPR) && n.Class() != ir.PAUTOHEAP && n.Class() != ir.PEXTERN } -func litas(l *ir.Node, r *ir.Node, init *ir.Nodes) { +func litas(l ir.Node, r ir.Node, init *ir.Nodes) { a := ir.Nod(ir.OAS, l, r) a = typecheck(a, ctxStmt) a = walkexpr(a, init) @@ -405,7 +405,7 @@ const ( // getdyn calculates the initGenType for n. // If top is false, getdyn is recursing. -func getdyn(n *ir.Node, top bool) initGenType { +func getdyn(n ir.Node, top bool) initGenType { switch n.Op() { default: if isGoConst(n) { @@ -447,7 +447,7 @@ func getdyn(n *ir.Node, top bool) initGenType { } // isStaticCompositeLiteral reports whether n is a compile-time constant. -func isStaticCompositeLiteral(n *ir.Node) bool { +func isStaticCompositeLiteral(n ir.Node) bool { switch n.Op() { case ir.OSLICELIT: return false @@ -509,13 +509,13 @@ const ( // fixedlit handles struct, array, and slice literals. // TODO: expand documentation. -func fixedlit(ctxt initContext, kind initKind, n *ir.Node, var_ *ir.Node, init *ir.Nodes) { +func fixedlit(ctxt initContext, kind initKind, n ir.Node, var_ ir.Node, init *ir.Nodes) { isBlank := var_ == ir.BlankNode - var splitnode func(*ir.Node) (a *ir.Node, value *ir.Node) + var splitnode func(ir.Node) (a ir.Node, value ir.Node) switch n.Op() { case ir.OARRAYLIT, ir.OSLICELIT: var k int64 - splitnode = func(r *ir.Node) (*ir.Node, *ir.Node) { + splitnode = func(r ir.Node) (ir.Node, ir.Node) { if r.Op() == ir.OKEY { k = indexconst(r.Left()) if k < 0 { @@ -531,7 +531,7 @@ func fixedlit(ctxt initContext, kind initKind, n *ir.Node, var_ *ir.Node, init * return a, r } case ir.OSTRUCTLIT: - splitnode = func(r *ir.Node) (*ir.Node, *ir.Node) { + splitnode = func(r ir.Node) (ir.Node, ir.Node) { if r.Op() != ir.OSTRUCTKEY { base.Fatalf("fixedlit: rhs not OSTRUCTKEY: %v", r) } @@ -576,7 +576,7 @@ func fixedlit(ctxt initContext, kind initKind, n *ir.Node, var_ *ir.Node, init * case initKindStatic: genAsStatic(a) case initKindDynamic, initKindLocalCode: - a = orderStmtInPlace(a, map[string][]*ir.Node{}) + a = orderStmtInPlace(a, map[string][]ir.Node{}) a = walkstmt(a) init.Append(a) default: @@ -586,7 +586,7 @@ func fixedlit(ctxt initContext, kind initKind, n *ir.Node, var_ *ir.Node, init * } } -func isSmallSliceLit(n *ir.Node) bool { +func isSmallSliceLit(n ir.Node) bool { if n.Op() != ir.OSLICELIT { return false } @@ -596,7 +596,7 @@ func isSmallSliceLit(n *ir.Node) bool { return smallintconst(r) && (n.Type().Elem().Width == 0 || r.Int64Val() <= smallArrayBytes/n.Type().Elem().Width) } -func slicelit(ctxt initContext, n *ir.Node, var_ *ir.Node, init *ir.Nodes) { +func slicelit(ctxt initContext, n ir.Node, var_ ir.Node, init *ir.Nodes) { // make an array type corresponding the number of elements we have t := types.NewArray(n.Type().Elem(), n.Right().Int64Val()) dowidth(t) @@ -639,7 +639,7 @@ func slicelit(ctxt initContext, n *ir.Node, var_ *ir.Node, init *ir.Nodes) { // if the literal contains constants, // make static initialized array (1),(2) - var vstat *ir.Node + var vstat ir.Node mode := getdyn(n, true) if mode&initConst != 0 && !isSmallSliceLit(n) { @@ -655,7 +655,7 @@ func slicelit(ctxt initContext, n *ir.Node, var_ *ir.Node, init *ir.Nodes) { vauto := temp(types.NewPtr(t)) // set auto to point at new temp or heap (3 assign) - var a *ir.Node + var a ir.Node if x := prealloc[n]; x != nil { // temp allocated during order.go for dddarg if !types.Identical(t, x.Type()) { @@ -745,7 +745,7 @@ func slicelit(ctxt initContext, n *ir.Node, var_ *ir.Node, init *ir.Nodes) { a = ir.Nod(ir.OAS, a, value) a = typecheck(a, ctxStmt) - a = orderStmtInPlace(a, map[string][]*ir.Node{}) + a = orderStmtInPlace(a, map[string][]ir.Node{}) a = walkstmt(a) init.Append(a) } @@ -754,12 +754,12 @@ func slicelit(ctxt initContext, n *ir.Node, var_ *ir.Node, init *ir.Nodes) { a = ir.Nod(ir.OAS, var_, ir.Nod(ir.OSLICE, vauto, nil)) a = typecheck(a, ctxStmt) - a = orderStmtInPlace(a, map[string][]*ir.Node{}) + a = orderStmtInPlace(a, map[string][]ir.Node{}) a = walkstmt(a) init.Append(a) } -func maplit(n *ir.Node, m *ir.Node, init *ir.Nodes) { +func maplit(n ir.Node, m ir.Node, init *ir.Nodes) { // make the map var a := ir.Nod(ir.OMAKE, nil, nil) a.SetEsc(n.Esc()) @@ -866,7 +866,7 @@ func maplit(n *ir.Node, m *ir.Node, init *ir.Nodes) { init.Append(a) } -func anylit(n *ir.Node, var_ *ir.Node, init *ir.Nodes) { +func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { t := n.Type() switch n.Op() { default: @@ -882,7 +882,7 @@ func anylit(n *ir.Node, var_ *ir.Node, init *ir.Nodes) { base.Fatalf("anylit: not ptr") } - var r *ir.Node + var r ir.Node if n.Right() != nil { // n.Right is stack temporary used as backing store. init.Append(ir.Nod(ir.OAS, n.Right(), nil)) // zero backing store, just in case (#18410) @@ -959,7 +959,7 @@ func anylit(n *ir.Node, var_ *ir.Node, init *ir.Nodes) { } } -func oaslit(n *ir.Node, init *ir.Nodes) bool { +func oaslit(n ir.Node, init *ir.Nodes) bool { if n.Left() == nil || n.Right() == nil { // not a special composite literal assignment return false @@ -995,7 +995,7 @@ func oaslit(n *ir.Node, init *ir.Nodes) bool { return true } -func getlit(lit *ir.Node) int { +func getlit(lit ir.Node) int { if smallintconst(lit) { return int(lit.Int64Val()) } @@ -1003,7 +1003,7 @@ func getlit(lit *ir.Node) int { } // stataddr returns the static address of n, if n has one, or else nil. -func stataddr(n *ir.Node) *ir.Node { +func stataddr(n ir.Node) ir.Node { if n == nil { return nil } @@ -1046,7 +1046,7 @@ func stataddr(n *ir.Node) *ir.Node { return nil } -func (s *InitSchedule) initplan(n *ir.Node) { +func (s *InitSchedule) initplan(n ir.Node) { if s.initplans[n] != nil { return } @@ -1091,7 +1091,7 @@ func (s *InitSchedule) initplan(n *ir.Node) { } } -func (s *InitSchedule) addvalue(p *InitPlan, xoffset int64, n *ir.Node) { +func (s *InitSchedule) addvalue(p *InitPlan, xoffset int64, n ir.Node) { // special case: zero can be dropped entirely if isZero(n) { return @@ -1113,7 +1113,7 @@ func (s *InitSchedule) addvalue(p *InitPlan, xoffset int64, n *ir.Node) { p.E = append(p.E, InitEntry{Xoffset: xoffset, Expr: n}) } -func isZero(n *ir.Node) bool { +func isZero(n ir.Node) bool { switch n.Op() { case ir.ONIL: return true @@ -1151,11 +1151,11 @@ func isZero(n *ir.Node) bool { return false } -func isvaluelit(n *ir.Node) bool { +func isvaluelit(n ir.Node) bool { return n.Op() == ir.OARRAYLIT || n.Op() == ir.OSTRUCTLIT } -func genAsStatic(as *ir.Node) { +func genAsStatic(as ir.Node) { if as.Left().Type() == nil { base.Fatalf("genAsStatic as.Left not typechecked") } diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 262aa0e95cf59..cb73532b48524 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -40,7 +40,7 @@ const ssaDumpFile = "ssa.html" const maxOpenDefers = 8 // ssaDumpInlined holds all inlined functions when ssaDump contains a function name. -var ssaDumpInlined []*ir.Node +var ssaDumpInlined []ir.Node func initssaconfig() { types_ := ssa.NewTypes() @@ -186,7 +186,7 @@ func initssaconfig() { // function/method/interface call), where the receiver of a method call is // considered as the 0th parameter. This does not include the receiver of an // interface call. -func getParam(n *ir.Node, i int) *types.Field { +func getParam(n ir.Node, i int) *types.Field { t := n.Left().Type() if n.Op() == ir.OCALLMETH { if i == 0 { @@ -289,7 +289,7 @@ func (s *state) emitOpenDeferInfo() { // buildssa builds an SSA function for fn. // worker indicates which of the backend workers is doing the processing. -func buildssa(fn *ir.Node, worker int) *ssa.Func { +func buildssa(fn ir.Node, worker int) *ssa.Func { name := ir.FuncName(fn) printssa := false if ssaDump != "" { // match either a simple name e.g. "(*Reader).Reset", or a package.name e.g. "compress/gzip.(*Reader).Reset" @@ -356,8 +356,8 @@ func buildssa(fn *ir.Node, worker int) *ssa.Func { // Allocate starting values s.labels = map[string]*ssaLabel{} - s.labeledNodes = map[*ir.Node]*ssaLabel{} - s.fwdVars = map[*ir.Node]*ssa.Value{} + s.labeledNodes = map[ir.Node]*ssaLabel{} + s.fwdVars = map[ir.Node]*ssa.Value{} s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem) s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.Func().OpenCodedDeferDisallowed() @@ -411,7 +411,7 @@ func buildssa(fn *ir.Node, worker int) *ssa.Func { } // Generate addresses of local declarations - s.decladdrs = map[*ir.Node]*ssa.Value{} + s.decladdrs = map[ir.Node]*ssa.Value{} var args []ssa.Param var results []ssa.Param for _, n := range fn.Func().Dcl { @@ -478,7 +478,7 @@ func buildssa(fn *ir.Node, worker int) *ssa.Func { return s.f } -func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *ir.Node) { +func dumpSourcesColumn(writer *ssa.HTMLWriter, fn ir.Node) { // Read sources of target function fn. fname := base.Ctxt.PosTable.Pos(fn.Pos()).Filename() targetFn, err := readFuncLines(fname, fn.Pos().Line(), fn.Func().Endlineno.Line()) @@ -566,24 +566,24 @@ func (s *state) updateUnsetPredPos(b *ssa.Block) { // Information about each open-coded defer. type openDeferInfo struct { // The ODEFER node representing the function call of the defer - n *ir.Node + n ir.Node // If defer call is closure call, the address of the argtmp where the // closure is stored. closure *ssa.Value // The node representing the argtmp where the closure is stored - used for // function, method, or interface call, to store a closure that panic // processing can use for this defer. - closureNode *ir.Node + closureNode ir.Node // If defer call is interface call, the address of the argtmp where the // receiver is stored rcvr *ssa.Value // The node representing the argtmp where the receiver is stored - rcvrNode *ir.Node + rcvrNode ir.Node // The addresses of the argtmps where the evaluated arguments of the defer // function call are stored. argVals []*ssa.Value // The nodes representing the argtmps where the args of the defer are stored - argNodes []*ir.Node + argNodes []ir.Node } type state struct { @@ -594,11 +594,11 @@ type state struct { f *ssa.Func // Node for function - curfn *ir.Node + curfn ir.Node // labels and labeled control flow nodes (OFOR, OFORUNTIL, OSWITCH, OSELECT) in f labels map[string]*ssaLabel - labeledNodes map[*ir.Node]*ssaLabel + labeledNodes map[ir.Node]*ssaLabel // unlabeled break and continue statement tracking breakTo *ssa.Block // current target for plain break statement @@ -610,18 +610,18 @@ type state struct { // variable assignments in the current block (map from variable symbol to ssa value) // *Node is the unique identifier (an ONAME Node) for the variable. // TODO: keep a single varnum map, then make all of these maps slices instead? - vars map[*ir.Node]*ssa.Value + vars map[ir.Node]*ssa.Value // fwdVars are variables that are used before they are defined in the current block. // This map exists just to coalesce multiple references into a single FwdRef op. // *Node is the unique identifier (an ONAME Node) for the variable. - fwdVars map[*ir.Node]*ssa.Value + fwdVars map[ir.Node]*ssa.Value // all defined variables at the end of each block. Indexed by block ID. - defvars []map[*ir.Node]*ssa.Value + defvars []map[ir.Node]*ssa.Value // addresses of PPARAM and PPARAMOUT variables. - decladdrs map[*ir.Node]*ssa.Value + decladdrs map[ir.Node]*ssa.Value // starting values. Memory, stack pointer, and globals pointer startmem *ssa.Value @@ -629,7 +629,7 @@ type state struct { sb *ssa.Value // value representing address of where deferBits autotmp is stored deferBitsAddr *ssa.Value - deferBitsTemp *ir.Node + deferBitsTemp ir.Node // line number stack. The current line number is top of stack line []src.XPos @@ -641,7 +641,7 @@ type state struct { panics map[funcLine]*ssa.Block // list of PPARAMOUT (return) variables. - returns []*ir.Node + returns []ir.Node cgoUnsafeArgs bool hasdefer bool // whether the function contains a defer statement @@ -693,7 +693,7 @@ func (s *state) Fatalf(msg string, args ...interface{}) { func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) } func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() } -func ssaMarker(name string) *ir.Node { +func ssaMarker(name string) ir.Node { return NewName(&types.Sym{Name: name}) } @@ -717,7 +717,7 @@ func (s *state) startBlock(b *ssa.Block) { s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock) } s.curBlock = b - s.vars = map[*ir.Node]*ssa.Value{} + s.vars = map[ir.Node]*ssa.Value{} for n := range s.fwdVars { delete(s.fwdVars, n) } @@ -1059,7 +1059,7 @@ func (s *state) stmtList(l ir.Nodes) { } // stmt converts the statement n to SSA and adds it to s. -func (s *state) stmt(n *ir.Node) { +func (s *state) stmt(n ir.Node) { if !(n.Op() == ir.OVARKILL || n.Op() == ir.OVARLIVE || n.Op() == ir.OVARDEF) { // OVARKILL, OVARLIVE, and OVARDEF are invisible to the programmer, so we don't use their line numbers to avoid confusion in debugging. s.pushLine(n.Pos()) @@ -1999,7 +1999,7 @@ func (s *state) ssaShiftOp(op ir.Op, t *types.Type, u *types.Type) ssa.Op { } // expr converts the expression n to ssa, adds it to s and returns the ssa result. -func (s *state) expr(n *ir.Node) *ssa.Value { +func (s *state) expr(n ir.Node) *ssa.Value { if hasUniquePos(n) { // ONAMEs and named OLITERALs have the line number // of the decl, not the use. See issue 14742. @@ -2790,7 +2790,7 @@ func (s *state) expr(n *ir.Node) *ssa.Value { // If inplace is true, it writes the result of the OAPPEND expression n // back to the slice being appended to, and returns nil. // inplace MUST be set to false if the slice can be SSA'd. -func (s *state) append(n *ir.Node, inplace bool) *ssa.Value { +func (s *state) append(n ir.Node, inplace bool) *ssa.Value { // If inplace is false, process as expression "append(s, e1, e2, e3)": // // ptr, len, cap := s @@ -2948,7 +2948,7 @@ func (s *state) append(n *ir.Node, inplace bool) *ssa.Value { // if cond is true and no if cond is false. // This function is intended to handle && and || better than just calling // s.expr(cond) and branching on the result. -func (s *state) condBranch(cond *ir.Node, yes, no *ssa.Block, likely int8) { +func (s *state) condBranch(cond ir.Node, yes, no *ssa.Block, likely int8) { switch cond.Op() { case ir.OANDAND: mid := s.f.NewBlock(ssa.BlockPlain) @@ -3000,7 +3000,7 @@ const ( // If deref is true, then we do left = *right instead (and right has already been nil-checked). // If deref is true and right == nil, just do left = 0. // skip indicates assignments (at the top level) that can be avoided. -func (s *state) assign(left *ir.Node, right *ssa.Value, deref bool, skip skipMask) { +func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask) { if left.Op() == ir.ONAME && ir.IsBlank(left) { return } @@ -3254,7 +3254,7 @@ var intrinsics map[intrinsicKey]intrinsicBuilder // An intrinsicBuilder converts a call node n into an ssa value that // implements that call as an intrinsic. args is a list of arguments to the func. -type intrinsicBuilder func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value +type intrinsicBuilder func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value type intrinsicKey struct { arch *sys.Arch @@ -3319,7 +3319,7 @@ func init() { /******** runtime ********/ if !instrumenting { add("runtime", "slicebytetostringtmp", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { // Compiler frontend optimizations emit OBYTES2STRTMP nodes // for the backend instead of slicebytetostringtmp calls // when not instrumenting. @@ -3328,7 +3328,7 @@ func init() { all...) } addF("runtime/internal/math", "MulUintptr", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { if s.config.PtrSize == 4 { return s.newValue2(ssa.OpMul32uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1]) } @@ -3336,90 +3336,90 @@ func init() { }, sys.AMD64, sys.I386, sys.MIPS64) add("runtime", "KeepAlive", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0]) s.vars[memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem()) return nil }, all...) add("runtime", "getclosureptr", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue0(ssa.OpGetClosurePtr, s.f.Config.Types.Uintptr) }, all...) add("runtime", "getcallerpc", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr) }, all...) add("runtime", "getcallersp", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue0(ssa.OpGetCallerSP, s.f.Config.Types.Uintptr) }, all...) /******** runtime/internal/sys ********/ addF("runtime/internal/sys", "Ctz32", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0]) }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) addF("runtime/internal/sys", "Ctz64", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0]) }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) addF("runtime/internal/sys", "Bswap32", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpBswap32, types.Types[types.TUINT32], args[0]) }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X) addF("runtime/internal/sys", "Bswap64", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpBswap64, types.Types[types.TUINT64], args[0]) }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X) /******** runtime/internal/atomic ********/ addF("runtime/internal/atomic", "Load", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v) }, sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Load8", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[types.TUINT8], types.TypeMem), args[0], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT8], v) }, sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Load64", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v) }, sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "LoadAcq", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v) }, sys.PPC64, sys.S390X) addF("runtime/internal/atomic", "LoadAcq64", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v) }, sys.PPC64) addF("runtime/internal/atomic", "Loadp", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v) @@ -3427,62 +3427,62 @@ func init() { sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Store", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Store8", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Store64", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "StorepNoWB", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "StoreRel", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.PPC64, sys.S390X) addF("runtime/internal/atomic", "StoreRel64", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.PPC64) addF("runtime/internal/atomic", "Xchg", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v) }, sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Xchg64", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v) }, sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) - type atomicOpEmitter func(s *state, n *ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) + type atomicOpEmitter func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) makeAtomicGuardedIntrinsicARM64 := func(op0, op1 ssa.Op, typ, rtyp types.EType, emit atomicOpEmitter) intrinsicBuilder { - return func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + return func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { // Target Atomic feature is identified by dynamic detection addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), arm64HasATOMICS, s.sb) v := s.load(types.Types[types.TBOOL], addr) @@ -3516,7 +3516,7 @@ func init() { } } - atomicXchgXaddEmitterARM64 := func(s *state, n *ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) { + atomicXchgXaddEmitterARM64 := func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) { v := s.newValue3(op, types.NewTuple(types.Types[typ], types.TypeMem), args[0], args[1], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v) @@ -3529,14 +3529,14 @@ func init() { sys.ARM64) addF("runtime/internal/atomic", "Xadd", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v) }, sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Xadd64", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v) @@ -3551,28 +3551,28 @@ func init() { sys.ARM64) addF("runtime/internal/atomic", "Cas", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v) }, sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Cas64", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v) }, sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "CasRel", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v) }, sys.PPC64) - atomicCasEmitterARM64 := func(s *state, n *ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) { + atomicCasEmitterARM64 := func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) { v := s.newValue4(op, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v) @@ -3586,31 +3586,31 @@ func init() { sys.ARM64) addF("runtime/internal/atomic", "And8", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X) addF("runtime/internal/atomic", "And", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X) addF("runtime/internal/atomic", "Or8", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X) addF("runtime/internal/atomic", "Or", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X) - atomicAndOrEmitterARM64 := func(s *state, n *ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) { + atomicAndOrEmitterARM64 := func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) { s.vars[memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem()) } @@ -3659,52 +3659,52 @@ func init() { /******** math ********/ addF("math", "Sqrt", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpSqrt, types.Types[types.TFLOAT64], args[0]) }, sys.I386, sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm) addF("math", "Trunc", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpTrunc, types.Types[types.TFLOAT64], args[0]) }, sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm) addF("math", "Ceil", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpCeil, types.Types[types.TFLOAT64], args[0]) }, sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm) addF("math", "Floor", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpFloor, types.Types[types.TFLOAT64], args[0]) }, sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm) addF("math", "Round", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpRound, types.Types[types.TFLOAT64], args[0]) }, sys.ARM64, sys.PPC64, sys.S390X) addF("math", "RoundToEven", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpRoundToEven, types.Types[types.TFLOAT64], args[0]) }, sys.ARM64, sys.S390X, sys.Wasm) addF("math", "Abs", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpAbs, types.Types[types.TFLOAT64], args[0]) }, sys.ARM64, sys.ARM, sys.PPC64, sys.Wasm) addF("math", "Copysign", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue2(ssa.OpCopysign, types.Types[types.TFLOAT64], args[0], args[1]) }, sys.PPC64, sys.Wasm) addF("math", "FMA", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2]) }, sys.ARM64, sys.PPC64, sys.S390X) addF("math", "FMA", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { if !s.config.UseFMA { s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64] return s.variable(n, types.Types[types.TFLOAT64]) @@ -3736,7 +3736,7 @@ func init() { }, sys.AMD64) addF("math", "FMA", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { if !s.config.UseFMA { s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64] return s.variable(n, types.Types[types.TFLOAT64]) @@ -3769,8 +3769,8 @@ func init() { }, sys.ARM) - makeRoundAMD64 := func(op ssa.Op) func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { - return func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + makeRoundAMD64 := func(op ssa.Op) func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + return func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], x86HasSSE41) b := s.endBlock() b.Kind = ssa.BlockIf @@ -3812,17 +3812,17 @@ func init() { /******** math/bits ********/ addF("math/bits", "TrailingZeros64", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0]) }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm) addF("math/bits", "TrailingZeros32", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0]) }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm) addF("math/bits", "TrailingZeros16", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0]) c := s.constInt32(types.Types[types.TUINT32], 1<<16) y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c) @@ -3830,12 +3830,12 @@ func init() { }, sys.MIPS) addF("math/bits", "TrailingZeros16", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpCtz16, types.Types[types.TINT], args[0]) }, sys.AMD64, sys.I386, sys.ARM, sys.ARM64, sys.Wasm) addF("math/bits", "TrailingZeros16", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0]) c := s.constInt64(types.Types[types.TUINT64], 1<<16) y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c) @@ -3843,7 +3843,7 @@ func init() { }, sys.S390X, sys.PPC64) addF("math/bits", "TrailingZeros8", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0]) c := s.constInt32(types.Types[types.TUINT32], 1<<8) y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c) @@ -3851,12 +3851,12 @@ func init() { }, sys.MIPS) addF("math/bits", "TrailingZeros8", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpCtz8, types.Types[types.TINT], args[0]) }, sys.AMD64, sys.ARM, sys.ARM64, sys.Wasm) addF("math/bits", "TrailingZeros8", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0]) c := s.constInt64(types.Types[types.TUINT64], 1<<8) y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c) @@ -3868,17 +3868,17 @@ func init() { // ReverseBytes inlines correctly, no need to intrinsify it. // ReverseBytes16 lowers to a rotate, no need for anything special here. addF("math/bits", "Len64", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0]) }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm) addF("math/bits", "Len32", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0]) }, sys.AMD64, sys.ARM64) addF("math/bits", "Len32", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { if s.config.PtrSize == 4 { return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0]) } @@ -3887,7 +3887,7 @@ func init() { }, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm) addF("math/bits", "Len16", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { if s.config.PtrSize == 4 { x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0]) return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x) @@ -3897,12 +3897,12 @@ func init() { }, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm) addF("math/bits", "Len16", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpBitLen16, types.Types[types.TINT], args[0]) }, sys.AMD64) addF("math/bits", "Len8", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { if s.config.PtrSize == 4 { x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0]) return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x) @@ -3912,12 +3912,12 @@ func init() { }, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm) addF("math/bits", "Len8", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpBitLen8, types.Types[types.TINT], args[0]) }, sys.AMD64) addF("math/bits", "Len", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { if s.config.PtrSize == 4 { return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0]) } @@ -3926,27 +3926,27 @@ func init() { sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm) // LeadingZeros is handled because it trivially calls Len. addF("math/bits", "Reverse64", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0]) }, sys.ARM64) addF("math/bits", "Reverse32", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0]) }, sys.ARM64) addF("math/bits", "Reverse16", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpBitRev16, types.Types[types.TINT], args[0]) }, sys.ARM64) addF("math/bits", "Reverse8", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpBitRev8, types.Types[types.TINT], args[0]) }, sys.ARM64) addF("math/bits", "Reverse", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { if s.config.PtrSize == 4 { return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0]) } @@ -3954,29 +3954,29 @@ func init() { }, sys.ARM64) addF("math/bits", "RotateLeft8", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue2(ssa.OpRotateLeft8, types.Types[types.TUINT8], args[0], args[1]) }, sys.AMD64) addF("math/bits", "RotateLeft16", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue2(ssa.OpRotateLeft16, types.Types[types.TUINT16], args[0], args[1]) }, sys.AMD64) addF("math/bits", "RotateLeft32", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue2(ssa.OpRotateLeft32, types.Types[types.TUINT32], args[0], args[1]) }, sys.AMD64, sys.ARM, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm) addF("math/bits", "RotateLeft64", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue2(ssa.OpRotateLeft64, types.Types[types.TUINT64], args[0], args[1]) }, sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm) alias("math/bits", "RotateLeft", "math/bits", "RotateLeft64", p8...) - makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { - return func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + return func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], x86HasPOPCNT) b := s.endBlock() b.Kind = ssa.BlockIf @@ -4011,7 +4011,7 @@ func init() { makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount64), sys.AMD64) addF("math/bits", "OnesCount64", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpPopCount64, types.Types[types.TINT], args[0]) }, sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm) @@ -4019,7 +4019,7 @@ func init() { makeOnesCountAMD64(ssa.OpPopCount32, ssa.OpPopCount32), sys.AMD64) addF("math/bits", "OnesCount32", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpPopCount32, types.Types[types.TINT], args[0]) }, sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm) @@ -4027,12 +4027,12 @@ func init() { makeOnesCountAMD64(ssa.OpPopCount16, ssa.OpPopCount16), sys.AMD64) addF("math/bits", "OnesCount16", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpPopCount16, types.Types[types.TINT], args[0]) }, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm) addF("math/bits", "OnesCount8", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpPopCount8, types.Types[types.TINT], args[0]) }, sys.S390X, sys.PPC64, sys.Wasm) @@ -4040,25 +4040,25 @@ func init() { makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount32), sys.AMD64) addF("math/bits", "Mul64", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1]) }, sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.MIPS64) alias("math/bits", "Mul", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE) addF("math/bits", "Add64", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2]) }, sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X) alias("math/bits", "Add", "math/bits", "Add64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchS390X) addF("math/bits", "Sub64", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2]) }, sys.AMD64, sys.ARM64, sys.S390X) alias("math/bits", "Sub", "math/bits", "Sub64", sys.ArchAMD64, sys.ArchARM64, sys.ArchS390X) addF("math/bits", "Div64", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { // check for divide-by-zero/overflow and panic with appropriate message cmpZero := s.newValue2(s.ssaOp(ir.ONE, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[2], s.zeroVal(types.Types[types.TUINT64])) s.check(cmpZero, panicdivide) @@ -4118,7 +4118,7 @@ func init() { /******** math/big ********/ add("math/big", "mulWW", - func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1]) }, sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64LE, sys.ArchPPC64, sys.ArchS390X) @@ -4156,7 +4156,7 @@ func findIntrinsic(sym *types.Sym) intrinsicBuilder { return intrinsics[intrinsicKey{thearch.LinkArch.Arch, pkg, fn}] } -func isIntrinsicCall(n *ir.Node) bool { +func isIntrinsicCall(n ir.Node) bool { if n == nil || n.Left() == nil { return false } @@ -4164,7 +4164,7 @@ func isIntrinsicCall(n *ir.Node) bool { } // intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation. -func (s *state) intrinsicCall(n *ir.Node) *ssa.Value { +func (s *state) intrinsicCall(n ir.Node) *ssa.Value { v := findIntrinsic(n.Left().Sym())(s, n, s.intrinsicArgs(n)) if ssa.IntrinsicsDebug > 0 { x := v @@ -4180,9 +4180,9 @@ func (s *state) intrinsicCall(n *ir.Node) *ssa.Value { } // intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them. -func (s *state) intrinsicArgs(n *ir.Node) []*ssa.Value { +func (s *state) intrinsicArgs(n ir.Node) []*ssa.Value { // Construct map of temps; see comments in s.call about the structure of n. - temps := map[*ir.Node]*ssa.Value{} + temps := map[ir.Node]*ssa.Value{} for _, a := range n.List().Slice() { if a.Op() != ir.OAS { s.Fatalf("non-assignment as a temp function argument %v", a.Op()) @@ -4215,7 +4215,7 @@ func (s *state) intrinsicArgs(n *ir.Node) []*ssa.Value { // call. We will also record funcdata information on where the args are stored // (as well as the deferBits variable), and this will enable us to run the proper // defer calls during panics. -func (s *state) openDeferRecord(n *ir.Node) { +func (s *state) openDeferRecord(n ir.Node) { // Do any needed expression evaluation for the args (including the // receiver, if any). This may be evaluating something like 'autotmp_3 = // once.mutex'. Such a statement will create a mapping in s.vars[] from @@ -4224,7 +4224,7 @@ func (s *state) openDeferRecord(n *ir.Node) { s.stmtList(n.List()) var args []*ssa.Value - var argNodes []*ir.Node + var argNodes []ir.Node opendefer := &openDeferInfo{ n: n, @@ -4236,7 +4236,7 @@ func (s *state) openDeferRecord(n *ir.Node) { // call the function directly if it is a static function. closureVal := s.expr(fn) closure := s.openDeferSave(nil, fn.Type(), closureVal) - opendefer.closureNode = closure.Aux.(*ir.Node) + opendefer.closureNode = closure.Aux.(ir.Node) if !(fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC) { opendefer.closure = closure } @@ -4249,7 +4249,7 @@ func (s *state) openDeferRecord(n *ir.Node) { // runtime panic code to use. But in the defer exit code, we will // call the method directly. closure := s.openDeferSave(nil, fn.Type(), closureVal) - opendefer.closureNode = closure.Aux.(*ir.Node) + opendefer.closureNode = closure.Aux.(ir.Node) } else { if fn.Op() != ir.ODOTINTER { base.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op()) @@ -4259,8 +4259,8 @@ func (s *state) openDeferRecord(n *ir.Node) { // Important to get the receiver type correct, so it is recognized // as a pointer for GC purposes. opendefer.rcvr = s.openDeferSave(nil, fn.Type().Recv().Type, rcvr) - opendefer.closureNode = opendefer.closure.Aux.(*ir.Node) - opendefer.rcvrNode = opendefer.rcvr.Aux.(*ir.Node) + opendefer.closureNode = opendefer.closure.Aux.(ir.Node) + opendefer.rcvrNode = opendefer.rcvr.Aux.(ir.Node) } for _, argn := range n.Rlist().Slice() { var v *ssa.Value @@ -4270,7 +4270,7 @@ func (s *state) openDeferRecord(n *ir.Node) { v = s.openDeferSave(argn, argn.Type(), nil) } args = append(args, v) - argNodes = append(argNodes, v.Aux.(*ir.Node)) + argNodes = append(argNodes, v.Aux.(ir.Node)) } opendefer.argVals = args opendefer.argNodes = argNodes @@ -4292,7 +4292,7 @@ func (s *state) openDeferRecord(n *ir.Node) { // type t is non-SSAable, then n must be non-nil (and val should be nil) and n is // evaluated (via s.addr() below) to get the value that is to be stored. The // function returns an SSA value representing a pointer to the autotmp location. -func (s *state) openDeferSave(n *ir.Node, t *types.Type, val *ssa.Value) *ssa.Value { +func (s *state) openDeferSave(n ir.Node, t *types.Type, val *ssa.Value) *ssa.Value { canSSA := canSSAType(t) var pos src.XPos if canSSA { @@ -4476,17 +4476,17 @@ func (s *state) openDeferExit() { } } -func (s *state) callResult(n *ir.Node, k callKind) *ssa.Value { +func (s *state) callResult(n ir.Node, k callKind) *ssa.Value { return s.call(n, k, false) } -func (s *state) callAddr(n *ir.Node, k callKind) *ssa.Value { +func (s *state) callAddr(n ir.Node, k callKind) *ssa.Value { return s.call(n, k, true) } // Calls the function n using the specified call type. // Returns the address of the return value (or nil if none). -func (s *state) call(n *ir.Node, k callKind, returnResultAddr bool) *ssa.Value { +func (s *state) call(n ir.Node, k callKind, returnResultAddr bool) *ssa.Value { s.prevCall = nil var sym *types.Sym // target symbol (if static) var closure *ssa.Value // ptr to closure to run (if dynamic) @@ -4788,7 +4788,7 @@ func (s *state) maybeNilCheckClosure(closure *ssa.Value, k callKind) { } // getMethodClosure returns a value representing the closure for a method call -func (s *state) getMethodClosure(fn *ir.Node) *ssa.Value { +func (s *state) getMethodClosure(fn ir.Node) *ssa.Value { // Make a name n2 for the function. // fn.Sym might be sync.(*Mutex).Unlock. // Make a PFUNC node out of that, then evaluate it. @@ -4805,7 +4805,7 @@ func (s *state) getMethodClosure(fn *ir.Node) *ssa.Value { // getClosureAndRcvr returns values for the appropriate closure and receiver of an // interface call -func (s *state) getClosureAndRcvr(fn *ir.Node) (*ssa.Value, *ssa.Value) { +func (s *state) getClosureAndRcvr(fn ir.Node) (*ssa.Value, *ssa.Value) { i := s.expr(fn.Left()) itab := s.newValue1(ssa.OpITab, types.Types[types.TUINTPTR], i) s.nilCheck(itab) @@ -4829,7 +4829,7 @@ func etypesign(e types.EType) int8 { // addr converts the address of the expression n to SSA, adds it to s and returns the SSA result. // The value that the returned Value represents is guaranteed to be non-nil. -func (s *state) addr(n *ir.Node) *ssa.Value { +func (s *state) addr(n ir.Node) *ssa.Value { if n.Op() != ir.ONAME { s.pushLine(n.Pos()) defer s.popLine() @@ -4931,7 +4931,7 @@ func (s *state) addr(n *ir.Node) *ssa.Value { // canSSA reports whether n is SSA-able. // n must be an ONAME (or an ODOT sequence with an ONAME base). -func (s *state) canSSA(n *ir.Node) bool { +func (s *state) canSSA(n ir.Node) bool { if base.Flag.N != 0 { return false } @@ -5012,7 +5012,7 @@ func canSSAType(t *types.Type) bool { } // exprPtr evaluates n to a pointer and nil-checks it. -func (s *state) exprPtr(n *ir.Node, bounded bool, lineno src.XPos) *ssa.Value { +func (s *state) exprPtr(n ir.Node, bounded bool, lineno src.XPos) *ssa.Value { p := s.expr(n) if bounded || n.NonNil() { if s.f.Frontend().Debug_checknil() && lineno.Line() > 1 { @@ -5151,7 +5151,7 @@ func (s *state) check(cmp *ssa.Value, fn *obj.LSym) { s.startBlock(bNext) } -func (s *state) intDivide(n *ir.Node, a, b *ssa.Value) *ssa.Value { +func (s *state) intDivide(n ir.Node, a, b *ssa.Value) *ssa.Value { needcheck := true switch b.Op { case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64: @@ -5370,7 +5370,7 @@ func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) { // putArg evaluates n for the purpose of passing it as an argument to a function and returns the corresponding Param for the call. // If forLateExpandedCall is true, it returns the argument value to pass to the call operation. // If forLateExpandedCall is false, then the value is stored at the specified stack offset, and the returned value is nil. -func (s *state) putArg(n *ir.Node, t *types.Type, off int64, forLateExpandedCall bool) (ssa.Param, *ssa.Value) { +func (s *state) putArg(n ir.Node, t *types.Type, off int64, forLateExpandedCall bool) (ssa.Param, *ssa.Value) { var a *ssa.Value if forLateExpandedCall { if !canSSAType(t) { @@ -5384,7 +5384,7 @@ func (s *state) putArg(n *ir.Node, t *types.Type, off int64, forLateExpandedCall return ssa.Param{Type: t, Offset: int32(off)}, a } -func (s *state) storeArgWithBase(n *ir.Node, t *types.Type, base *ssa.Value, off int64) { +func (s *state) storeArgWithBase(n ir.Node, t *types.Type, base *ssa.Value, off int64) { pt := types.NewPtr(t) var addr *ssa.Value if base == s.sp { @@ -5545,15 +5545,15 @@ var u64_f32 = u642fcvtTab{ one: (*state).constInt64, } -func (s *state) uint64Tofloat64(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { +func (s *state) uint64Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { return s.uint64Tofloat(&u64_f64, n, x, ft, tt) } -func (s *state) uint64Tofloat32(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { +func (s *state) uint64Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { return s.uint64Tofloat(&u64_f32, n, x, ft, tt) } -func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { +func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { // if x >= 0 { // result = (floatY) x // } else { @@ -5626,15 +5626,15 @@ var u32_f32 = u322fcvtTab{ cvtF2F: ssa.OpCvt64Fto32F, } -func (s *state) uint32Tofloat64(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { +func (s *state) uint32Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { return s.uint32Tofloat(&u32_f64, n, x, ft, tt) } -func (s *state) uint32Tofloat32(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { +func (s *state) uint32Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { return s.uint32Tofloat(&u32_f32, n, x, ft, tt) } -func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { +func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { // if x >= 0 { // result = floatY(x) // } else { @@ -5673,7 +5673,7 @@ func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *ir.Node, x *ssa.Value, ft, } // referenceTypeBuiltin generates code for the len/cap builtins for maps and channels. -func (s *state) referenceTypeBuiltin(n *ir.Node, x *ssa.Value) *ssa.Value { +func (s *state) referenceTypeBuiltin(n ir.Node, x *ssa.Value) *ssa.Value { if !n.Left().Type().IsMap() && !n.Left().Type().IsChan() { s.Fatalf("node must be a map or a channel") } @@ -5771,22 +5771,22 @@ var f64_u32 = f2uCvtTab{ cutoff: 1 << 31, } -func (s *state) float32ToUint64(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { +func (s *state) float32ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { return s.floatToUint(&f32_u64, n, x, ft, tt) } -func (s *state) float64ToUint64(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { +func (s *state) float64ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { return s.floatToUint(&f64_u64, n, x, ft, tt) } -func (s *state) float32ToUint32(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { +func (s *state) float32ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { return s.floatToUint(&f32_u32, n, x, ft, tt) } -func (s *state) float64ToUint32(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { +func (s *state) float64ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { return s.floatToUint(&f64_u32, n, x, ft, tt) } -func (s *state) floatToUint(cvttab *f2uCvtTab, n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { +func (s *state) floatToUint(cvttab *f2uCvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { // cutoff:=1<<(intY_Size-1) // if x < floatX(cutoff) { // result = uintY(x) @@ -5830,7 +5830,7 @@ func (s *state) floatToUint(cvttab *f2uCvtTab, n *ir.Node, x *ssa.Value, ft, tt // dottype generates SSA for a type assertion node. // commaok indicates whether to panic or return a bool. // If commaok is false, resok will be nil. -func (s *state) dottype(n *ir.Node, commaok bool) (res, resok *ssa.Value) { +func (s *state) dottype(n ir.Node, commaok bool) (res, resok *ssa.Value) { iface := s.expr(n.Left()) // input interface target := s.expr(n.Right()) // target type byteptr := s.f.Config.Types.BytePtr @@ -5942,7 +5942,7 @@ func (s *state) dottype(n *ir.Node, commaok bool) (res, resok *ssa.Value) { targetITab = s.expr(n.List().First()) } - var tmp *ir.Node // temporary for use with large types + var tmp ir.Node // temporary for use with large types var addr *ssa.Value // address of tmp if commaok && !canSSAType(n.Type()) { // unSSAable type, use temporary. @@ -6032,7 +6032,7 @@ func (s *state) dottype(n *ir.Node, commaok bool) (res, resok *ssa.Value) { } // variable returns the value of a variable at the current location. -func (s *state) variable(name *ir.Node, t *types.Type) *ssa.Value { +func (s *state) variable(name ir.Node, t *types.Type) *ssa.Value { v := s.vars[name] if v != nil { return v @@ -6058,7 +6058,7 @@ func (s *state) mem() *ssa.Value { return s.variable(memVar, types.TypeMem) } -func (s *state) addNamedValue(n *ir.Node, v *ssa.Value) { +func (s *state) addNamedValue(n ir.Node, v *ssa.Value) { if n.Class() == ir.Pxxx { // Don't track our marker nodes (memVar etc.). return @@ -6111,7 +6111,7 @@ type SSAGenState struct { bstart []*obj.Prog // Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include PPC and Sparc V8. - ScratchFpMem *ir.Node + ScratchFpMem ir.Node maxarg int64 // largest frame size for arguments to calls made by the function @@ -6194,14 +6194,14 @@ func (s *SSAGenState) DebugFriendlySetPosFrom(v *ssa.Value) { } // byXoffset implements sort.Interface for []*Node using Xoffset as the ordering. -type byXoffset []*ir.Node +type byXoffset []ir.Node func (s byXoffset) Len() int { return len(s) } func (s byXoffset) Less(i, j int) bool { return s[i].Offset() < s[j].Offset() } func (s byXoffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func emitStackObjects(e *ssafn, pp *Progs) { - var vars []*ir.Node + var vars []ir.Node for _, n := range e.curfn.Func().Dcl { if livenessShouldTrack(n) && n.Name().Addrtaken() { vars = append(vars, n) @@ -6677,7 +6677,7 @@ func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) { case *obj.LSym: a.Name = obj.NAME_EXTERN a.Sym = n - case *ir.Node: + case ir.Node: if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT { a.Name = obj.NAME_PARAM a.Sym = n.Orig().Sym().Linksym() @@ -6816,7 +6816,7 @@ func CheckLoweredGetClosurePtr(v *ssa.Value) { // AutoVar returns a *Node and int64 representing the auto variable and offset within it // where v should be spilled. -func AutoVar(v *ssa.Value) (*ir.Node, int64) { +func AutoVar(v *ssa.Value) (ir.Node, int64) { loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot) if v.Type.Size() > loc.Type.Size() { v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type) @@ -6927,7 +6927,7 @@ func (s *SSAGenState) UseArgs(n int64) { } // fieldIdx finds the index of the field referred to by the ODOT node n. -func fieldIdx(n *ir.Node) int { +func fieldIdx(n ir.Node) int { t := n.Left().Type() f := n.Sym() if !t.IsStruct() { @@ -6954,9 +6954,9 @@ func fieldIdx(n *ir.Node) int { // ssafn holds frontend information about a function that the backend is processing. // It also exports a bunch of compiler services for the ssa backend. type ssafn struct { - curfn *ir.Node + curfn ir.Node strings map[string]*obj.LSym // map from constant string to data symbols - scratchFpMem *ir.Node // temp for floating point register / memory moves on some architectures + scratchFpMem ir.Node // temp for floating point register / memory moves on some architectures stksize int64 // stack size for current frame stkptrsize int64 // prefix of stack containing pointers log bool // print ssa debug to the stdout @@ -6976,7 +6976,7 @@ func (e *ssafn) StringData(s string) *obj.LSym { return data } -func (e *ssafn) Auto(pos src.XPos, t *types.Type) *ir.Node { +func (e *ssafn) Auto(pos src.XPos, t *types.Type) ir.Node { n := tempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list return n } @@ -7148,7 +7148,7 @@ func (e *ssafn) MyImportPath() string { return base.Ctxt.Pkgpath } -func clobberBase(n *ir.Node) *ir.Node { +func clobberBase(n ir.Node) ir.Node { if n.Op() == ir.ODOT && n.Left().Type().NumFields() == 1 { return clobberBase(n.Left()) } diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 542dc49bb0e09..fcda219737f5f 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -40,7 +40,7 @@ var ( // It's primarily used to distinguish references to named objects, // whose Pos will point back to their declaration position rather than // their usage position. -func hasUniquePos(n *ir.Node) bool { +func hasUniquePos(n ir.Node) bool { switch n.Op() { case ir.ONAME, ir.OPACK: return false @@ -60,7 +60,7 @@ func hasUniquePos(n *ir.Node) bool { return true } -func setlineno(n *ir.Node) src.XPos { +func setlineno(n ir.Node) src.XPos { lno := base.Pos if n != nil && hasUniquePos(n) { base.Pos = n.Pos() @@ -102,7 +102,7 @@ func autolabel(prefix string) *types.Sym { // find all the exported symbols in package opkg // and make them available in the current package -func importdot(opkg *types.Pkg, pack *ir.Node) { +func importdot(opkg *types.Pkg, pack ir.Node) { n := 0 for _, s := range opkg.Syms { if s.Def == nil { @@ -136,7 +136,7 @@ func importdot(opkg *types.Pkg, pack *ir.Node) { } // newname returns a new ONAME Node associated with symbol s. -func NewName(s *types.Sym) *ir.Node { +func NewName(s *types.Sym) ir.Node { n := ir.NewNameAt(base.Pos, s) n.Name().Curfn = Curfn return n @@ -144,13 +144,13 @@ func NewName(s *types.Sym) *ir.Node { // nodSym makes a Node with Op op and with the Left field set to left // and the Sym field set to sym. This is for ODOT and friends. -func nodSym(op ir.Op, left *ir.Node, sym *types.Sym) *ir.Node { +func nodSym(op ir.Op, left ir.Node, sym *types.Sym) ir.Node { return nodlSym(base.Pos, op, left, sym) } // nodlSym makes a Node with position Pos, with Op op, and with the Left field set to left // and the Sym field set to sym. This is for ODOT and friends. -func nodlSym(pos src.XPos, op ir.Op, left *ir.Node, sym *types.Sym) *ir.Node { +func nodlSym(pos src.XPos, op ir.Op, left ir.Node, sym *types.Sym) ir.Node { n := ir.NodAt(pos, op, left, nil) n.SetSym(sym) return n @@ -163,21 +163,21 @@ func (x methcmp) Len() int { return len(x) } func (x methcmp) Swap(i, j int) { x[i], x[j] = x[j], x[i] } func (x methcmp) Less(i, j int) bool { return x[i].Sym.Less(x[j].Sym) } -func nodintconst(v int64) *ir.Node { +func nodintconst(v int64) ir.Node { return ir.NewLiteral(constant.MakeInt64(v)) } -func nodnil() *ir.Node { +func nodnil() ir.Node { n := ir.Nod(ir.ONIL, nil, nil) n.SetType(types.Types[types.TNIL]) return n } -func nodbool(b bool) *ir.Node { +func nodbool(b bool) ir.Node { return ir.NewLiteral(constant.MakeBool(b)) } -func nodstr(s string) *ir.Node { +func nodstr(s string) ir.Node { return ir.NewLiteral(constant.MakeString(s)) } @@ -185,7 +185,7 @@ func nodstr(s string) *ir.Node { // ONAME, OLITERAL, OTYPE, and ONONAME leaves. // If pos.IsKnown(), it sets the source position of newly // allocated nodes to pos. -func treecopy(n *ir.Node, pos src.XPos) *ir.Node { +func treecopy(n ir.Node, pos src.XPos) ir.Node { if n == nil { return nil } @@ -511,12 +511,12 @@ func convertop(srcConstant bool, src, dst *types.Type) (ir.Op, string) { return ir.OXXX, "" } -func assignconv(n *ir.Node, t *types.Type, context string) *ir.Node { +func assignconv(n ir.Node, t *types.Type, context string) ir.Node { return assignconvfn(n, t, func() string { return context }) } // Convert node n for assignment to type t. -func assignconvfn(n *ir.Node, t *types.Type, context func() string) *ir.Node { +func assignconvfn(n ir.Node, t *types.Type, context func() string) ir.Node { if n == nil || n.Type() == nil || n.Type().Broke() { return n } @@ -565,7 +565,7 @@ func assignconvfn(n *ir.Node, t *types.Type, context func() string) *ir.Node { // backingArrayPtrLen extracts the pointer and length from a slice or string. // This constructs two nodes referring to n, so n must be a cheapexpr. -func backingArrayPtrLen(n *ir.Node) (ptr, len *ir.Node) { +func backingArrayPtrLen(n ir.Node) (ptr, len ir.Node) { var init ir.Nodes c := cheapexpr(n, &init) if c != n || init.Len() != 0 { @@ -584,7 +584,7 @@ func backingArrayPtrLen(n *ir.Node) (ptr, len *ir.Node) { // labeledControl returns the control flow Node (for, switch, select) // associated with the label n, if any. -func labeledControl(n *ir.Node) *ir.Node { +func labeledControl(n ir.Node) ir.Node { if n.Op() != ir.OLABEL { base.Fatalf("labeledControl %v", n.Op()) } @@ -599,7 +599,7 @@ func labeledControl(n *ir.Node) *ir.Node { return nil } -func syslook(name string) *ir.Node { +func syslook(name string) ir.Node { s := Runtimepkg.Lookup(name) if s == nil || s.Def == nil { base.Fatalf("syslook: can't find runtime.%s", name) @@ -618,14 +618,14 @@ func typehash(t *types.Type) uint32 { // updateHasCall checks whether expression n contains any function // calls and sets the n.HasCall flag if so. -func updateHasCall(n *ir.Node) { +func updateHasCall(n ir.Node) { if n == nil { return } n.SetHasCall(calcHasCall(n)) } -func calcHasCall(n *ir.Node) bool { +func calcHasCall(n ir.Node) bool { if n.Init().Len() != 0 { // TODO(mdempsky): This seems overly conservative. return true @@ -740,7 +740,7 @@ func brrev(op ir.Op) ir.Op { // return side effect-free n, appending side effects to init. // result is assignable if n is. -func safeexpr(n *ir.Node, init *ir.Nodes) *ir.Node { +func safeexpr(n ir.Node, init *ir.Nodes) ir.Node { if n == nil { return nil } @@ -800,7 +800,7 @@ func safeexpr(n *ir.Node, init *ir.Nodes) *ir.Node { return cheapexpr(n, init) } -func copyexpr(n *ir.Node, t *types.Type, init *ir.Nodes) *ir.Node { +func copyexpr(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node { l := temp(t) a := ir.Nod(ir.OAS, l, n) a = typecheck(a, ctxStmt) @@ -811,7 +811,7 @@ func copyexpr(n *ir.Node, t *types.Type, init *ir.Nodes) *ir.Node { // return side-effect free and cheap n, appending side effects to init. // result may not be assignable. -func cheapexpr(n *ir.Node, init *ir.Nodes) *ir.Node { +func cheapexpr(n ir.Node, init *ir.Nodes) ir.Node { switch n.Op() { case ir.ONAME, ir.OLITERAL, ir.ONIL: return n @@ -957,7 +957,7 @@ func dotpath(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) ( // find missing fields that // will give shortest unique addressing. // modify the tree with missing type names. -func adddot(n *ir.Node) *ir.Node { +func adddot(n ir.Node) ir.Node { n.SetLeft(typecheck(n.Left(), ctxType|ctxExpr)) if n.Left().Diag() { n.SetDiag(true) @@ -1116,8 +1116,8 @@ func expandmeth(t *types.Type) { } // Given funarg struct list, return list of ODCLFIELD Node fn args. -func structargs(tl *types.Type, mustname bool) []*ir.Node { - var args []*ir.Node +func structargs(tl *types.Type, mustname bool) []ir.Node { + var args []ir.Node gen := 0 for _, t := range tl.Fields().Slice() { s := t.Sym @@ -1250,30 +1250,30 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym != nil { inlcalls(fn) } - escapeFuncs([]*ir.Node{fn}, false) + escapeFuncs([]ir.Node{fn}, false) Curfn = nil xtop = append(xtop, fn) } -func paramNnames(ft *types.Type) []*ir.Node { - args := make([]*ir.Node, ft.NumParams()) +func paramNnames(ft *types.Type) []ir.Node { + args := make([]ir.Node, ft.NumParams()) for i, f := range ft.Params().FieldSlice() { args[i] = ir.AsNode(f.Nname) } return args } -func hashmem(t *types.Type) *ir.Node { +func hashmem(t *types.Type) ir.Node { sym := Runtimepkg.Lookup("memhash") n := NewName(sym) setNodeNameFunc(n) - n.SetType(functype(nil, []*ir.Node{ + n.SetType(functype(nil, []ir.Node{ anonfield(types.NewPtr(t)), anonfield(types.Types[types.TUINTPTR]), anonfield(types.Types[types.TUINTPTR]), - }, []*ir.Node{ + }, []ir.Node{ anonfield(types.Types[types.TUINTPTR]), })) return n @@ -1393,15 +1393,15 @@ func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool return true } -func listtreecopy(l []*ir.Node, pos src.XPos) []*ir.Node { - var out []*ir.Node +func listtreecopy(l []ir.Node, pos src.XPos) []ir.Node { + var out []ir.Node for _, n := range l { out = append(out, treecopy(n, pos)) } return out } -func liststmt(l []*ir.Node) *ir.Node { +func liststmt(l []ir.Node) ir.Node { n := ir.Nod(ir.OBLOCK, nil, nil) n.PtrList().Set(l) if len(l) != 0 { @@ -1410,7 +1410,7 @@ func liststmt(l []*ir.Node) *ir.Node { return n } -func ngotype(n *ir.Node) *types.Sym { +func ngotype(n ir.Node) *types.Sym { if n.Type() != nil { return typenamesym(n.Type()) } @@ -1419,7 +1419,7 @@ func ngotype(n *ir.Node) *types.Sym { // The result of addinit MUST be assigned back to n, e.g. // n.Left = addinit(n.Left, init) -func addinit(n *ir.Node, init []*ir.Node) *ir.Node { +func addinit(n ir.Node, init []ir.Node) ir.Node { if len(init) == 0 { return n } @@ -1518,7 +1518,7 @@ func isdirectiface(t *types.Type) bool { } // itabType loads the _type field from a runtime.itab struct. -func itabType(itab *ir.Node) *ir.Node { +func itabType(itab ir.Node) ir.Node { typ := nodSym(ir.ODOTPTR, itab, nil) typ.SetType(types.NewPtr(types.Types[types.TUINT8])) typ.SetTypecheck(1) @@ -1530,7 +1530,7 @@ func itabType(itab *ir.Node) *ir.Node { // ifaceData loads the data field from an interface. // The concrete type must be known to have type t. // It follows the pointer if !isdirectiface(t). -func ifaceData(pos src.XPos, n *ir.Node, t *types.Type) *ir.Node { +func ifaceData(pos src.XPos, n ir.Node, t *types.Type) ir.Node { if t.IsInterface() { base.Fatalf("ifaceData interface: %v", t) } diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go index c85483fafaaf8..02d38ac4b1693 100644 --- a/src/cmd/compile/internal/gc/swt.go +++ b/src/cmd/compile/internal/gc/swt.go @@ -15,7 +15,7 @@ import ( ) // typecheckswitch typechecks a switch statement. -func typecheckswitch(n *ir.Node) { +func typecheckswitch(n ir.Node) { typecheckslice(n.Init().Slice(), ctxStmt) if n.Left() != nil && n.Left().Op() == ir.OTYPESW { typecheckTypeSwitch(n) @@ -24,7 +24,7 @@ func typecheckswitch(n *ir.Node) { } } -func typecheckTypeSwitch(n *ir.Node) { +func typecheckTypeSwitch(n ir.Node) { n.Left().SetRight(typecheck(n.Left().Right(), ctxExpr)) t := n.Left().Right().Type() if t != nil && !t.IsInterface() { @@ -39,7 +39,7 @@ func typecheckTypeSwitch(n *ir.Node) { base.ErrorfAt(v.Pos(), "%v declared but not used", v.Sym()) } - var defCase, nilCase *ir.Node + var defCase, nilCase ir.Node var ts typeSet for _, ncase := range n.List().Slice() { ls := ncase.List().Slice() @@ -144,7 +144,7 @@ func (s *typeSet) add(pos src.XPos, typ *types.Type) { s.m[ls] = append(prevs, typeSetEntry{pos, typ}) } -func typecheckExprSwitch(n *ir.Node) { +func typecheckExprSwitch(n ir.Node) { t := types.Types[types.TBOOL] if n.Left() != nil { n.SetLeft(typecheck(n.Left(), ctxExpr)) @@ -172,7 +172,7 @@ func typecheckExprSwitch(n *ir.Node) { } } - var defCase *ir.Node + var defCase ir.Node var cs constSet for _, ncase := range n.List().Slice() { ls := ncase.List().Slice() @@ -225,7 +225,7 @@ func typecheckExprSwitch(n *ir.Node) { } // walkswitch walks a switch statement. -func walkswitch(sw *ir.Node) { +func walkswitch(sw ir.Node) { // Guard against double walk, see #25776. if sw.List().Len() == 0 && sw.Body().Len() > 0 { return // Was fatal, but eliminating every possible source of double-walking is hard @@ -240,7 +240,7 @@ func walkswitch(sw *ir.Node) { // walkExprSwitch generates an AST implementing sw. sw is an // expression switch. -func walkExprSwitch(sw *ir.Node) { +func walkExprSwitch(sw ir.Node) { lno := setlineno(sw) cond := sw.Left() @@ -275,7 +275,7 @@ func walkExprSwitch(sw *ir.Node) { exprname: cond, } - var defaultGoto *ir.Node + var defaultGoto ir.Node var body ir.Nodes for _, ncase := range sw.List().Slice() { label := autolabel(".s") @@ -318,7 +318,7 @@ func walkExprSwitch(sw *ir.Node) { // An exprSwitch walks an expression switch. type exprSwitch struct { - exprname *ir.Node // value being switched on + exprname ir.Node // value being switched on done ir.Nodes clauses []exprClause @@ -326,11 +326,11 @@ type exprSwitch struct { type exprClause struct { pos src.XPos - lo, hi *ir.Node - jmp *ir.Node + lo, hi ir.Node + jmp ir.Node } -func (s *exprSwitch) Add(pos src.XPos, expr, jmp *ir.Node) { +func (s *exprSwitch) Add(pos src.XPos, expr, jmp ir.Node) { c := exprClause{pos: pos, lo: expr, hi: expr, jmp: jmp} if okforcmp[s.exprname.Type().Etype] && expr.Op() == ir.OLITERAL { s.clauses = append(s.clauses, c) @@ -390,10 +390,10 @@ func (s *exprSwitch) flush() { // Perform two-level binary search. binarySearch(len(runs), &s.done, - func(i int) *ir.Node { + func(i int) ir.Node { return ir.Nod(ir.OLE, ir.Nod(ir.OLEN, s.exprname, nil), nodintconst(runLen(runs[i-1]))) }, - func(i int, nif *ir.Node) { + func(i int, nif ir.Node) { run := runs[i] nif.SetLeft(ir.Nod(ir.OEQ, ir.Nod(ir.OLEN, s.exprname, nil), nodintconst(runLen(run)))) s.search(run, nif.PtrBody()) @@ -425,10 +425,10 @@ func (s *exprSwitch) flush() { func (s *exprSwitch) search(cc []exprClause, out *ir.Nodes) { binarySearch(len(cc), out, - func(i int) *ir.Node { + func(i int) ir.Node { return ir.Nod(ir.OLE, s.exprname, cc[i-1].hi) }, - func(i int, nif *ir.Node) { + func(i int, nif ir.Node) { c := &cc[i] nif.SetLeft(c.test(s.exprname)) nif.PtrBody().Set1(c.jmp) @@ -436,7 +436,7 @@ func (s *exprSwitch) search(cc []exprClause, out *ir.Nodes) { ) } -func (c *exprClause) test(exprname *ir.Node) *ir.Node { +func (c *exprClause) test(exprname ir.Node) ir.Node { // Integer range. if c.hi != c.lo { low := ir.NodAt(c.pos, ir.OGE, exprname, c.lo) @@ -456,7 +456,7 @@ func (c *exprClause) test(exprname *ir.Node) *ir.Node { return ir.NodAt(c.pos, ir.OEQ, exprname, c.lo) } -func allCaseExprsAreSideEffectFree(sw *ir.Node) bool { +func allCaseExprsAreSideEffectFree(sw ir.Node) bool { // In theory, we could be more aggressive, allowing any // side-effect-free expressions in cases, but it's a bit // tricky because some of that information is unavailable due @@ -478,7 +478,7 @@ func allCaseExprsAreSideEffectFree(sw *ir.Node) bool { } // hasFall reports whether stmts ends with a "fallthrough" statement. -func hasFall(stmts []*ir.Node) (bool, src.XPos) { +func hasFall(stmts []ir.Node) (bool, src.XPos) { // Search backwards for the index of the fallthrough // statement. Do not assume it'll be in the last // position, since in some cases (e.g. when the statement @@ -497,7 +497,7 @@ func hasFall(stmts []*ir.Node) (bool, src.XPos) { // walkTypeSwitch generates an AST that implements sw, where sw is a // type switch. -func walkTypeSwitch(sw *ir.Node) { +func walkTypeSwitch(sw ir.Node) { var s typeSwitch s.facename = sw.Left().Right() sw.SetLeft(nil) @@ -538,10 +538,10 @@ func walkTypeSwitch(sw *ir.Node) { s.hashname = copyexpr(dotHash, dotHash.Type(), sw.PtrBody()) br := ir.Nod(ir.OBREAK, nil, nil) - var defaultGoto, nilGoto *ir.Node + var defaultGoto, nilGoto ir.Node var body ir.Nodes for _, ncase := range sw.List().Slice() { - var caseVar *ir.Node + var caseVar ir.Node if ncase.Rlist().Len() != 0 { caseVar = ncase.Rlist().First() } @@ -592,7 +592,7 @@ func walkTypeSwitch(sw *ir.Node) { } val = ifaceData(ncase.Pos(), s.facename, singleType) } - l := []*ir.Node{ + l := []ir.Node{ ir.NodAt(ncase.Pos(), ir.ODCL, caseVar, nil), ir.NodAt(ncase.Pos(), ir.OAS, caseVar, val), } @@ -622,9 +622,9 @@ func walkTypeSwitch(sw *ir.Node) { // A typeSwitch walks a type switch. type typeSwitch struct { // Temporary variables (i.e., ONAMEs) used by type switch dispatch logic: - facename *ir.Node // value being type-switched on - hashname *ir.Node // type hash of the value being type-switched on - okname *ir.Node // boolean used for comma-ok type assertions + facename ir.Node // value being type-switched on + hashname ir.Node // type hash of the value being type-switched on + okname ir.Node // boolean used for comma-ok type assertions done ir.Nodes clauses []typeClause @@ -635,10 +635,10 @@ type typeClause struct { body ir.Nodes } -func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp *ir.Node) { +func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp ir.Node) { var body ir.Nodes if caseVar != nil { - l := []*ir.Node{ + l := []ir.Node{ ir.NodAt(pos, ir.ODCL, caseVar, nil), ir.NodAt(pos, ir.OAS, caseVar, nil), } @@ -703,10 +703,10 @@ func (s *typeSwitch) flush() { cc = merged binarySearch(len(cc), &s.done, - func(i int) *ir.Node { + func(i int) ir.Node { return ir.Nod(ir.OLE, s.hashname, nodintconst(int64(cc[i-1].hash))) }, - func(i int, nif *ir.Node) { + func(i int, nif ir.Node) { // TODO(mdempsky): Omit hash equality check if // there's only one type. c := cc[i] @@ -725,7 +725,7 @@ func (s *typeSwitch) flush() { // // leaf(i, nif) should setup nif (an OIF node) to test case i. In // particular, it should set nif.Left and nif.Nbody. -func binarySearch(n int, out *ir.Nodes, less func(i int) *ir.Node, leaf func(i int, nif *ir.Node)) { +func binarySearch(n int, out *ir.Nodes, less func(i int) ir.Node, leaf func(i int, nif ir.Node)) { const binarySearchMin = 4 // minimum number of cases for binary search var do func(lo, hi int, out *ir.Nodes) diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 0559dabe32ac9..4e2f205312110 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -20,7 +20,7 @@ const enableTrace = false var traceIndent []byte var skipDowidthForTracing bool -func tracePrint(title string, n *ir.Node) func(np **ir.Node) { +func tracePrint(title string, n ir.Node) func(np *ir.Node) { indent := traceIndent // guard against nil @@ -37,7 +37,7 @@ func tracePrint(title string, n *ir.Node) func(np **ir.Node) { fmt.Printf("%s: %s%s %p %s %v tc=%d\n", pos, indent, title, n, op, n, tc) traceIndent = append(traceIndent, ". "...) - return func(np **ir.Node) { + return func(np *ir.Node) { traceIndent = traceIndent[:len(traceIndent)-2] // if we have a result, use that @@ -77,10 +77,10 @@ const ( // marks variables that escape the local frame. // rewrites n.Op to be more specific in some cases. -var typecheckdefstack []*ir.Node +var typecheckdefstack []ir.Node // resolve ONONAME to definition, if any. -func resolve(n *ir.Node) (res *ir.Node) { +func resolve(n ir.Node) (res ir.Node) { if n == nil || n.Op() != ir.ONONAME { return n } @@ -115,7 +115,7 @@ func resolve(n *ir.Node) (res *ir.Node) { return r } -func typecheckslice(l []*ir.Node, top int) { +func typecheckslice(l []ir.Node, top int) { for i := range l { l[i] = typecheck(l[i], top) } @@ -166,7 +166,7 @@ func typekind(t *types.Type) string { return fmt.Sprintf("etype=%d", et) } -func cycleFor(start *ir.Node) []*ir.Node { +func cycleFor(start ir.Node) []ir.Node { // Find the start node in typecheck_tcstack. // We know that it must exist because each time we mark // a node with n.SetTypecheck(2) we push it on the stack, @@ -179,7 +179,7 @@ func cycleFor(start *ir.Node) []*ir.Node { } // collect all nodes with same Op - var cycle []*ir.Node + var cycle []ir.Node for _, n := range typecheck_tcstack[i:] { if n.Op() == start.Op() { cycle = append(cycle, n) @@ -189,7 +189,7 @@ func cycleFor(start *ir.Node) []*ir.Node { return cycle } -func cycleTrace(cycle []*ir.Node) string { +func cycleTrace(cycle []ir.Node) string { var s string for i, n := range cycle { s += fmt.Sprintf("\n\t%v: %v uses %v", ir.Line(n), n, cycle[(i+1)%len(cycle)]) @@ -197,12 +197,12 @@ func cycleTrace(cycle []*ir.Node) string { return s } -var typecheck_tcstack []*ir.Node +var typecheck_tcstack []ir.Node // typecheck type checks node n. // The result of typecheck MUST be assigned back to n, e.g. // n.Left = typecheck(n.Left, top) -func typecheck(n *ir.Node, top int) (res *ir.Node) { +func typecheck(n ir.Node, top int) (res ir.Node) { // cannot type check until all the source has been parsed if !typecheckok { base.Fatalf("early typecheck") @@ -317,7 +317,7 @@ func typecheck(n *ir.Node, top int) (res *ir.Node) { // value of type int (see also checkmake for comparison). // The result of indexlit MUST be assigned back to n, e.g. // n.Left = indexlit(n.Left) -func indexlit(n *ir.Node) *ir.Node { +func indexlit(n ir.Node) ir.Node { if n != nil && n.Type() != nil && n.Type().Etype == types.TIDEAL { return defaultlit(n, types.Types[types.TINT]) } @@ -326,7 +326,7 @@ func indexlit(n *ir.Node) *ir.Node { // The result of typecheck1 MUST be assigned back to n, e.g. // n.Left = typecheck1(n.Left, top) -func typecheck1(n *ir.Node, top int) (res *ir.Node) { +func typecheck1(n ir.Node, top int) (res ir.Node) { if enableTrace && base.Flag.LowerT { defer tracePrint("typecheck1", n)(&res) } @@ -569,9 +569,9 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { ir.OOROR, ir.OSUB, ir.OXOR: - var l *ir.Node + var l ir.Node var op ir.Op - var r *ir.Node + var r ir.Node if n.Op() == ir.OASOP { ok |= ctxStmt n.SetLeft(typecheck(n.Left(), ctxExpr)) @@ -1762,7 +1762,7 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { l = args[i] i++ l = typecheck(l, ctxExpr) - var r *ir.Node + var r ir.Node if i < len(args) { r = args[i] i++ @@ -2129,7 +2129,7 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) { return n } -func typecheckargs(n *ir.Node) { +func typecheckargs(n ir.Node) { if n.List().Len() != 1 || n.IsDDD() { typecheckslice(n.List().Slice(), ctxExpr) return @@ -2174,7 +2174,7 @@ func typecheckargs(n *ir.Node) { n.PtrInit().Append(as) } -func checksliceindex(l *ir.Node, r *ir.Node, tp *types.Type) bool { +func checksliceindex(l ir.Node, r ir.Node, tp *types.Type) bool { t := r.Type() if t == nil { return false @@ -2204,7 +2204,7 @@ func checksliceindex(l *ir.Node, r *ir.Node, tp *types.Type) bool { return true } -func checksliceconst(lo *ir.Node, hi *ir.Node) bool { +func checksliceconst(lo ir.Node, hi ir.Node) bool { if lo != nil && hi != nil && lo.Op() == ir.OLITERAL && hi.Op() == ir.OLITERAL && constant.Compare(lo.Val(), token.GTR, hi.Val()) { base.Errorf("invalid slice index: %v > %v", lo, hi) return false @@ -2213,7 +2213,7 @@ func checksliceconst(lo *ir.Node, hi *ir.Node) bool { return true } -func checkdefergo(n *ir.Node) { +func checkdefergo(n ir.Node) { what := "defer" if n.Op() == ir.OGO { what = "go" @@ -2268,7 +2268,7 @@ func checkdefergo(n *ir.Node) { // The result of implicitstar MUST be assigned back to n, e.g. // n.Left = implicitstar(n.Left) -func implicitstar(n *ir.Node) *ir.Node { +func implicitstar(n ir.Node) ir.Node { // insert implicit * if needed for fixed array t := n.Type() if t == nil || !t.IsPtr() { @@ -2287,7 +2287,7 @@ func implicitstar(n *ir.Node) *ir.Node { return n } -func onearg(n *ir.Node, f string, args ...interface{}) bool { +func onearg(n ir.Node, f string, args ...interface{}) bool { if n.Left() != nil { return true } @@ -2310,7 +2310,7 @@ func onearg(n *ir.Node, f string, args ...interface{}) bool { return true } -func twoarg(n *ir.Node) bool { +func twoarg(n ir.Node) bool { if n.Left() != nil { return true } @@ -2328,7 +2328,7 @@ func twoarg(n *ir.Node) bool { return true } -func lookdot1(errnode *ir.Node, s *types.Sym, t *types.Type, fs *types.Fields, dostrcmp int) *types.Field { +func lookdot1(errnode ir.Node, s *types.Sym, t *types.Type, fs *types.Fields, dostrcmp int) *types.Field { var r *types.Field for _, f := range fs.Slice() { if dostrcmp != 0 && f.Sym.Name == s.Name { @@ -2359,7 +2359,7 @@ func lookdot1(errnode *ir.Node, s *types.Sym, t *types.Type, fs *types.Fields, d // typecheckMethodExpr checks selector expressions (ODOT) where the // base expression is a type expression (OTYPE). -func typecheckMethodExpr(n *ir.Node) (res *ir.Node) { +func typecheckMethodExpr(n ir.Node) (res ir.Node) { if enableTrace && base.Flag.LowerT { defer tracePrint("typecheckMethodExpr", n)(&res) } @@ -2447,7 +2447,7 @@ func derefall(t *types.Type) *types.Type { return t } -func lookdot(n *ir.Node, t *types.Type, dostrcmp int) *types.Field { +func lookdot(n ir.Node, t *types.Type, dostrcmp int) *types.Field { s := n.Sym() dowidth(t) @@ -2572,7 +2572,7 @@ func hasddd(t *types.Type) bool { } // typecheck assignment: type list = expression list -func typecheckaste(op ir.Op, call *ir.Node, isddd bool, tstruct *types.Type, nl ir.Nodes, desc func() string) { +func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl ir.Nodes, desc func() string) { var t *types.Type var i int @@ -2583,7 +2583,7 @@ func typecheckaste(op ir.Op, call *ir.Node, isddd bool, tstruct *types.Type, nl return } - var n *ir.Node + var n ir.Node if nl.Len() == 1 { n = nl.First() } @@ -2774,7 +2774,7 @@ func iscomptype(t *types.Type) bool { // pushtype adds elided type information for composite literals if // appropriate, and returns the resulting expression. -func pushtype(n *ir.Node, t *types.Type) *ir.Node { +func pushtype(n ir.Node, t *types.Type) ir.Node { if n == nil || n.Op() != ir.OCOMPLIT || n.Right() != nil { return n } @@ -2797,7 +2797,7 @@ func pushtype(n *ir.Node, t *types.Type) *ir.Node { // The result of typecheckcomplit MUST be assigned back to n, e.g. // n.Left = typecheckcomplit(n.Left) -func typecheckcomplit(n *ir.Node) (res *ir.Node) { +func typecheckcomplit(n ir.Node) (res ir.Node) { if enableTrace && base.Flag.LowerT { defer tracePrint("typecheckcomplit", n)(&res) } @@ -3008,7 +3008,7 @@ func typecheckcomplit(n *ir.Node) (res *ir.Node) { } // typecheckarraylit type-checks a sequence of slice/array literal elements. -func typecheckarraylit(elemType *types.Type, bound int64, elts []*ir.Node, ctx string) int64 { +func typecheckarraylit(elemType *types.Type, bound int64, elts []ir.Node, ctx string) int64 { // If there are key/value pairs, create a map to keep seen // keys so we can check for duplicate indices. var indices map[int64]bool @@ -3023,7 +3023,7 @@ func typecheckarraylit(elemType *types.Type, bound int64, elts []*ir.Node, ctx s for i, elt := range elts { setlineno(elt) r := elts[i] - var kv *ir.Node + var kv ir.Node if elt.Op() == ir.OKEY { elt.SetLeft(typecheck(elt.Left(), ctxExpr)) key = indexconst(elt.Left()) @@ -3086,7 +3086,7 @@ func nonexported(sym *types.Sym) bool { } // lvalue etc -func islvalue(n *ir.Node) bool { +func islvalue(n ir.Node) bool { switch n.Op() { case ir.OINDEX: if n.Left().Type() != nil && n.Left().Type().IsArray() { @@ -3112,13 +3112,13 @@ func islvalue(n *ir.Node) bool { return false } -func checklvalue(n *ir.Node, verb string) { +func checklvalue(n ir.Node, verb string) { if !islvalue(n) { base.Errorf("cannot %s %v", verb, n) } } -func checkassign(stmt *ir.Node, n *ir.Node) { +func checkassign(stmt ir.Node, n ir.Node) { // Variables declared in ORANGE are assigned on every iteration. if n.Name() == nil || n.Name().Defn != stmt || stmt.Op() == ir.ORANGE { r := outervalue(n) @@ -3156,7 +3156,7 @@ func checkassign(stmt *ir.Node, n *ir.Node) { n.SetType(nil) } -func checkassignlist(stmt *ir.Node, l ir.Nodes) { +func checkassignlist(stmt ir.Node, l ir.Nodes) { for _, n := range l.Slice() { checkassign(stmt, n) } @@ -3177,7 +3177,7 @@ func checkassignlist(stmt *ir.Node, l ir.Nodes) { // currently OK, since the only place samesafeexpr gets used on an // lvalue expression is for OSLICE and OAPPEND optimizations, and it // is correct in those settings. -func samesafeexpr(l *ir.Node, r *ir.Node) bool { +func samesafeexpr(l ir.Node, r ir.Node) bool { if l.Op() != r.Op() || !types.Identical(l.Type(), r.Type()) { return false } @@ -3215,7 +3215,7 @@ func samesafeexpr(l *ir.Node, r *ir.Node) bool { // type check assignment. // if this assignment is the definition of a var on the left side, // fill in the var's type. -func typecheckas(n *ir.Node) { +func typecheckas(n ir.Node) { if enableTrace && base.Flag.LowerT { defer tracePrint("typecheckas", n)(nil) } @@ -3266,14 +3266,14 @@ func typecheckas(n *ir.Node) { } } -func checkassignto(src *types.Type, dst *ir.Node) { +func checkassignto(src *types.Type, dst ir.Node) { if op, why := assignop(src, dst.Type()); op == ir.OXXX { base.Errorf("cannot assign %v to %L in multiple assignment%s", src, dst, why) return } } -func typecheckas2(n *ir.Node) { +func typecheckas2(n ir.Node) { if enableTrace && base.Flag.LowerT { defer tracePrint("typecheckas2", n)(nil) } @@ -3298,8 +3298,8 @@ func typecheckas2(n *ir.Node) { } checkassignlist(n, n.List()) - var l *ir.Node - var r *ir.Node + var l ir.Node + var r ir.Node if cl == cr { // easy ls := n.List().Slice() @@ -3406,7 +3406,7 @@ out: } // type check function definition -func typecheckfunc(n *ir.Node) { +func typecheckfunc(n ir.Node) { if enableTrace && base.Flag.LowerT { defer tracePrint("typecheckfunc", n)(nil) } @@ -3441,12 +3441,12 @@ func typecheckfunc(n *ir.Node) { // The result of stringtoruneslit MUST be assigned back to n, e.g. // n.Left = stringtoruneslit(n.Left) -func stringtoruneslit(n *ir.Node) *ir.Node { +func stringtoruneslit(n ir.Node) ir.Node { if n.Left().Op() != ir.OLITERAL || n.Left().Val().Kind() != constant.String { base.Fatalf("stringtoarraylit %v", n) } - var l []*ir.Node + var l []ir.Node i := 0 for _, r := range n.Left().StringVal() { l = append(l, ir.Nod(ir.OKEY, nodintconst(int64(i)), nodintconst(int64(r)))) @@ -3459,7 +3459,7 @@ func stringtoruneslit(n *ir.Node) *ir.Node { return nn } -var mapqueue []*ir.Node +var mapqueue []ir.Node func checkMapKeys() { for _, n := range mapqueue { @@ -3520,7 +3520,7 @@ func setUnderlying(t, underlying *types.Type) { } } -func typecheckdeftype(n *ir.Node) { +func typecheckdeftype(n ir.Node) { if enableTrace && base.Flag.LowerT { defer tracePrint("typecheckdeftype", n)(nil) } @@ -3540,7 +3540,7 @@ func typecheckdeftype(n *ir.Node) { } } -func typecheckdef(n *ir.Node) { +func typecheckdef(n ir.Node) { if enableTrace && base.Flag.LowerT { defer tracePrint("typecheckdef", n)(nil) } @@ -3727,7 +3727,7 @@ ret: n.SetWalkdef(1) } -func checkmake(t *types.Type, arg string, np **ir.Node) bool { +func checkmake(t *types.Type, arg string, np *ir.Node) bool { n := *np if !n.Type().IsInteger() && n.Type().Etype != types.TIDEAL { base.Errorf("non-integer %s argument in make(%v) - %v", arg, t, n.Type()) @@ -3759,7 +3759,7 @@ func checkmake(t *types.Type, arg string, np **ir.Node) bool { return true } -func markbreak(n *ir.Node, implicit *ir.Node) { +func markbreak(n ir.Node, implicit ir.Node) { if n == nil { return } @@ -3789,7 +3789,7 @@ func markbreak(n *ir.Node, implicit *ir.Node) { } } -func markbreaklist(l ir.Nodes, implicit *ir.Node) { +func markbreaklist(l ir.Nodes, implicit ir.Node) { s := l.Slice() for i := 0; i < len(s); i++ { n := s[i] @@ -3823,7 +3823,7 @@ func isTermNodes(l ir.Nodes) bool { // Isterminating reports whether the node n, the last one in a // statement list, is a terminating statement. -func isTermNode(n *ir.Node) bool { +func isTermNode(n ir.Node) bool { switch n.Op() { // NOTE: OLABEL is treated as a separate statement, // not a separate prefix, so skipping to the last statement @@ -3872,7 +3872,7 @@ func isTermNode(n *ir.Node) bool { } // checkreturn makes sure that fn terminates appropriately. -func checkreturn(fn *ir.Node) { +func checkreturn(fn ir.Node) { if fn.Type().NumResults() != 0 && fn.Body().Len() != 0 { markbreaklist(fn.Body(), nil) if !isTermNodes(fn.Body()) { @@ -3881,12 +3881,12 @@ func checkreturn(fn *ir.Node) { } } -func deadcode(fn *ir.Node) { +func deadcode(fn ir.Node) { deadcodeslice(fn.PtrBody()) deadcodefn(fn) } -func deadcodefn(fn *ir.Node) { +func deadcodefn(fn ir.Node) { if fn.Body().Len() == 0 { return } @@ -3909,7 +3909,7 @@ func deadcodefn(fn *ir.Node) { } } - fn.PtrBody().Set([]*ir.Node{ir.Nod(ir.OEMPTY, nil, nil)}) + fn.PtrBody().Set([]ir.Node{ir.Nod(ir.OEMPTY, nil, nil)}) } func deadcodeslice(nn *ir.Nodes) { @@ -3965,7 +3965,7 @@ func deadcodeslice(nn *ir.Nodes) { } } -func deadcodeexpr(n *ir.Node) *ir.Node { +func deadcodeexpr(n ir.Node) ir.Node { // Perform dead-code elimination on short-circuited boolean // expressions involving constants with the intent of // producing a constant 'if' condition. @@ -3995,7 +3995,7 @@ func deadcodeexpr(n *ir.Node) *ir.Node { } // setTypeNode sets n to an OTYPE node representing t. -func setTypeNode(n *ir.Node, t *types.Type) { +func setTypeNode(n ir.Node, t *types.Type) { n.SetOp(ir.OTYPE) n.SetType(t) n.Type().Nod = n @@ -4037,12 +4037,12 @@ func curpkg() *types.Pkg { // MethodName returns the ONAME representing the method // referenced by expression n, which must be a method selector, // method expression, or method value. -func methodExprName(n *ir.Node) *ir.Node { +func methodExprName(n ir.Node) ir.Node { return ir.AsNode(methodExprFunc(n).Nname) } // MethodFunc is like MethodName, but returns the types.Field instead. -func methodExprFunc(n *ir.Node) *types.Field { +func methodExprFunc(n ir.Node) *types.Field { switch n.Op() { case ir.ODOTMETH, ir.OMETHEXPR: return n.Opt().(*types.Field) diff --git a/src/cmd/compile/internal/gc/unsafe.go b/src/cmd/compile/internal/gc/unsafe.go index c9b0dbcf2fcbf..678924b229495 100644 --- a/src/cmd/compile/internal/gc/unsafe.go +++ b/src/cmd/compile/internal/gc/unsafe.go @@ -10,7 +10,7 @@ import ( ) // evalunsafe evaluates a package unsafe operation and returns the result. -func evalunsafe(n *ir.Node) int64 { +func evalunsafe(n ir.Node) int64 { switch n.Op() { case ir.OALIGNOF, ir.OSIZEOF: n.SetLeft(typecheck(n.Left(), ctxExpr)) diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 77cf59bde8496..db8791ee05780 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -22,7 +22,7 @@ import ( const tmpstringbufsize = 32 const zeroValSize = 1024 // must match value of runtime/map.go:maxZero -func walk(fn *ir.Node) { +func walk(fn ir.Node) { Curfn = fn errorsBefore := base.Errors() @@ -81,13 +81,13 @@ func walk(fn *ir.Node) { } } -func walkstmtlist(s []*ir.Node) { +func walkstmtlist(s []ir.Node) { for i := range s { s[i] = walkstmt(s[i]) } } -func paramoutheap(fn *ir.Node) bool { +func paramoutheap(fn ir.Node) bool { for _, ln := range fn.Func().Dcl { switch ln.Class() { case ir.PPARAMOUT: @@ -106,7 +106,7 @@ func paramoutheap(fn *ir.Node) bool { // The result of walkstmt MUST be assigned back to n, e.g. // n.Left = walkstmt(n.Left) -func walkstmt(n *ir.Node) *ir.Node { +func walkstmt(n ir.Node) ir.Node { if n == nil { return n } @@ -275,7 +275,7 @@ func walkstmt(n *ir.Node) *ir.Node { if (Curfn.Type().FuncType().Outnamed && n.List().Len() > 1) || paramoutheap(Curfn) { // assign to the function out parameters, // so that reorder3 can fix up conflicts - var rl []*ir.Node + var rl []ir.Node for _, ln := range Curfn.Func().Dcl { cl := ln.Class() @@ -308,7 +308,7 @@ func walkstmt(n *ir.Node) *ir.Node { // For each return parameter (lhs), assign the corresponding result (rhs). lhs := Curfn.Type().Results() rhs := n.List().Slice() - res := make([]*ir.Node, lhs.NumFields()) + res := make([]ir.Node, lhs.NumFields()) for i, nl := range lhs.FieldSlice() { nname := ir.AsNode(nl.Nname) if isParamHeapCopy(nname) { @@ -346,20 +346,20 @@ func walkstmt(n *ir.Node) *ir.Node { // the types expressions are calculated. // compile-time constants are evaluated. // complex side effects like statements are appended to init -func walkexprlist(s []*ir.Node, init *ir.Nodes) { +func walkexprlist(s []ir.Node, init *ir.Nodes) { for i := range s { s[i] = walkexpr(s[i], init) } } -func walkexprlistsafe(s []*ir.Node, init *ir.Nodes) { +func walkexprlistsafe(s []ir.Node, init *ir.Nodes) { for i, n := range s { s[i] = safeexpr(n, init) s[i] = walkexpr(s[i], init) } } -func walkexprlistcheap(s []*ir.Node, init *ir.Nodes) { +func walkexprlistcheap(s []ir.Node, init *ir.Nodes) { for i, n := range s { s[i] = cheapexpr(n, init) s[i] = walkexpr(s[i], init) @@ -413,7 +413,7 @@ func convFuncName(from, to *types.Type) (fnname string, needsaddr bool) { // The result of walkexpr MUST be assigned back to n, e.g. // n.Left = walkexpr(n.Left, init) -func walkexpr(n *ir.Node, init *ir.Nodes) *ir.Node { +func walkexpr(n ir.Node, init *ir.Nodes) ir.Node { if n == nil { return n } @@ -700,7 +700,7 @@ opswitch: r := n.Right() walkexprlistsafe(n.List().Slice(), init) r.SetLeft(walkexpr(r.Left(), init)) - var n1 *ir.Node + var n1 ir.Node if ir.IsBlank(n.List().First()) { n1 = nodnil() } else { @@ -723,7 +723,7 @@ opswitch: t := r.Left().Type() fast := mapfast(t) - var key *ir.Node + var key ir.Node if fast != mapslow { // fast versions take key by value key = r.Right() @@ -802,7 +802,7 @@ opswitch: } // typeword generates the type word of the interface value. - typeword := func() *ir.Node { + typeword := func() ir.Node { if toType.IsEmptyInterface() { return typename(fromType) } @@ -832,7 +832,7 @@ opswitch: // Optimize convT2{E,I} for many cases in which T is not pointer-shaped, // by using an existing addressable value identical to n.Left // or creating one on the stack. - var value *ir.Node + var value ir.Node switch { case fromType.Size() == 0: // n.Left is zero-sized. Use zerobase. @@ -918,7 +918,7 @@ opswitch: break } - var tab *ir.Node + var tab ir.Node if fromType.IsInterface() { // convI2I tab = typename(toType) @@ -1208,7 +1208,7 @@ opswitch: hint := n.Left() // var h *hmap - var h *ir.Node + var h ir.Node if n.Esc() == EscNone { // Allocate hmap on stack. @@ -1494,7 +1494,7 @@ opswitch: // Allocate a [n]byte of the right size. t := types.NewArray(types.Types[types.TUINT8], int64(len(sc))) - var a *ir.Node + var a ir.Node if n.Esc() == EscNone && len(sc) <= int(maxImplicitStackVarSize) { a = ir.Nod(ir.OADDR, temp(t), nil) } else { @@ -1619,7 +1619,7 @@ func markTypeUsedInInterface(t *types.Type, from *obj.LSym) { // markUsedIfaceMethod marks that an interface method is used in the current // function. n is OCALLINTER node. -func markUsedIfaceMethod(n *ir.Node) { +func markUsedIfaceMethod(n ir.Node) { ityp := n.Left().Left().Type() tsym := typenamesym(ityp).Linksym() r := obj.Addrel(Curfn.Func().LSym) @@ -1678,7 +1678,7 @@ func rtconvfn(src, dst *types.Type) (param, result types.EType) { } // TODO(josharian): combine this with its caller and simplify -func reduceSlice(n *ir.Node) *ir.Node { +func reduceSlice(n ir.Node) ir.Node { low, high, max := n.SliceBounds() if high != nil && high.Op() == ir.OLEN && samesafeexpr(n.Left(), high.Left()) { // Reduce x[i:len(x)] to x[i:]. @@ -1695,7 +1695,7 @@ func reduceSlice(n *ir.Node) *ir.Node { return n } -func ascompatee1(l *ir.Node, r *ir.Node, init *ir.Nodes) *ir.Node { +func ascompatee1(l ir.Node, r ir.Node, init *ir.Nodes) ir.Node { // convas will turn map assigns into function calls, // making it impossible for reorder3 to work. n := ir.Nod(ir.OAS, l, r) @@ -1707,7 +1707,7 @@ func ascompatee1(l *ir.Node, r *ir.Node, init *ir.Nodes) *ir.Node { return convas(n, init) } -func ascompatee(op ir.Op, nl, nr []*ir.Node, init *ir.Nodes) []*ir.Node { +func ascompatee(op ir.Op, nl, nr []ir.Node, init *ir.Nodes) []ir.Node { // check assign expression list to // an expression list. called in // expr-list = expr-list @@ -1720,7 +1720,7 @@ func ascompatee(op ir.Op, nl, nr []*ir.Node, init *ir.Nodes) []*ir.Node { nr[i1] = safeexpr(nr[i1], init) } - var nn []*ir.Node + var nn []ir.Node i := 0 for ; i < len(nl); i++ { if i >= len(nr) { @@ -1744,7 +1744,7 @@ func ascompatee(op ir.Op, nl, nr []*ir.Node, init *ir.Nodes) []*ir.Node { } // fncall reports whether assigning an rvalue of type rt to an lvalue l might involve a function call. -func fncall(l *ir.Node, rt *types.Type) bool { +func fncall(l ir.Node, rt *types.Type) bool { if l.HasCall() || l.Op() == ir.OINDEXMAP { return true } @@ -1758,7 +1758,7 @@ func fncall(l *ir.Node, rt *types.Type) bool { // check assign type list to // an expression list. called in // expr-list = func() -func ascompatet(nl ir.Nodes, nr *types.Type) []*ir.Node { +func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node { if nl.Len() != nr.NumFields() { base.Fatalf("ascompatet: assignment count mismatch: %d = %d", nl.Len(), nr.NumFields()) } @@ -1800,8 +1800,8 @@ func ascompatet(nl ir.Nodes, nr *types.Type) []*ir.Node { } // package all the arguments that match a ... T parameter into a []T. -func mkdotargslice(typ *types.Type, args []*ir.Node) *ir.Node { - var n *ir.Node +func mkdotargslice(typ *types.Type, args []ir.Node) ir.Node { + var n ir.Node if len(args) == 0 { n = nodnil() n.SetType(typ) @@ -1820,7 +1820,7 @@ func mkdotargslice(typ *types.Type, args []*ir.Node) *ir.Node { // fixVariadicCall rewrites calls to variadic functions to use an // explicit ... argument if one is not already present. -func fixVariadicCall(call *ir.Node) { +func fixVariadicCall(call ir.Node) { fntype := call.Left().Type() if !fntype.IsVariadic() || call.IsDDD() { return @@ -1840,7 +1840,7 @@ func fixVariadicCall(call *ir.Node) { call.SetIsDDD(true) } -func walkCall(n *ir.Node, init *ir.Nodes) { +func walkCall(n ir.Node, init *ir.Nodes) { if n.Rlist().Len() != 0 { return // already walked } @@ -1853,7 +1853,7 @@ func walkCall(n *ir.Node, init *ir.Nodes) { // If this is a method call, add the receiver at the beginning of the args. if n.Op() == ir.OCALLMETH { - withRecv := make([]*ir.Node, len(args)+1) + withRecv := make([]ir.Node, len(args)+1) withRecv[0] = n.Left().Left() n.Left().SetLeft(nil) copy(withRecv[1:], args) @@ -1864,7 +1864,7 @@ func walkCall(n *ir.Node, init *ir.Nodes) { // store that argument into a temporary variable, // to prevent that calls from clobbering arguments already on the stack. // When instrumenting, all arguments might require function calls. - var tempAssigns []*ir.Node + var tempAssigns []ir.Node for i, arg := range args { updateHasCall(arg) // Determine param type. @@ -1894,14 +1894,14 @@ func walkCall(n *ir.Node, init *ir.Nodes) { } // generate code for print -func walkprint(nn *ir.Node, init *ir.Nodes) *ir.Node { +func walkprint(nn ir.Node, init *ir.Nodes) ir.Node { // Hoist all the argument evaluation up before the lock. walkexprlistcheap(nn.List().Slice(), init) // For println, add " " between elements and "\n" at the end. if nn.Op() == ir.OPRINTN { s := nn.List().Slice() - t := make([]*ir.Node, 0, len(s)*2) + t := make([]ir.Node, 0, len(s)*2) for i, n := range s { if i != 0 { t = append(t, nodstr(" ")) @@ -1914,7 +1914,7 @@ func walkprint(nn *ir.Node, init *ir.Nodes) *ir.Node { // Collapse runs of constant strings. s := nn.List().Slice() - t := make([]*ir.Node, 0, len(s)) + t := make([]ir.Node, 0, len(s)) for i := 0; i < len(s); { var strs []string for i < len(s) && ir.IsConst(s[i], constant.String) { @@ -1931,7 +1931,7 @@ func walkprint(nn *ir.Node, init *ir.Nodes) *ir.Node { } nn.PtrList().Set(t) - calls := []*ir.Node{mkcall("printlock", nil, init)} + calls := []ir.Node{mkcall("printlock", nil, init)} for i, n := range nn.List().Slice() { if n.Op() == ir.OLITERAL { if n.Type() == types.UntypedRune { @@ -1956,7 +1956,7 @@ func walkprint(nn *ir.Node, init *ir.Nodes) *ir.Node { continue } - var on *ir.Node + var on ir.Node switch n.Type().Etype { case types.TINTER: if n.Type().IsEmptyInterface() { @@ -2037,7 +2037,7 @@ func walkprint(nn *ir.Node, init *ir.Nodes) *ir.Node { return r } -func callnew(t *types.Type) *ir.Node { +func callnew(t *types.Type) ir.Node { dowidth(t) n := ir.Nod(ir.ONEWOBJ, typename(t), nil) n.SetType(types.NewPtr(t)) @@ -2048,7 +2048,7 @@ func callnew(t *types.Type) *ir.Node { // isReflectHeaderDataField reports whether l is an expression p.Data // where p has type reflect.SliceHeader or reflect.StringHeader. -func isReflectHeaderDataField(l *ir.Node) bool { +func isReflectHeaderDataField(l ir.Node) bool { if l.Type() != types.Types[types.TUINTPTR] { return false } @@ -2069,7 +2069,7 @@ func isReflectHeaderDataField(l *ir.Node) bool { return tsym.Name == "SliceHeader" || tsym.Name == "StringHeader" } -func convas(n *ir.Node, init *ir.Nodes) *ir.Node { +func convas(n ir.Node, init *ir.Nodes) ir.Node { if n.Op() != ir.OAS { base.Fatalf("convas: not OAS %v", n.Op()) } @@ -2107,11 +2107,11 @@ func convas(n *ir.Node, init *ir.Nodes) *ir.Node { // be later use of an earlier lvalue. // // function calls have been removed. -func reorder3(all []*ir.Node) []*ir.Node { +func reorder3(all []ir.Node) []ir.Node { // If a needed expression may be affected by an // earlier assignment, make an early copy of that // expression and use the copy instead. - var early []*ir.Node + var early []ir.Node var mapinit ir.Nodes for i, n := range all { @@ -2166,7 +2166,7 @@ func reorder3(all []*ir.Node) []*ir.Node { // replace *np with that temp. // The result of reorder3save MUST be assigned back to n, e.g. // n.Left = reorder3save(n.Left, all, i, early) -func reorder3save(n *ir.Node, all []*ir.Node, i int, early *[]*ir.Node) *ir.Node { +func reorder3save(n ir.Node, all []ir.Node, i int, early *[]ir.Node) ir.Node { if !aliased(n, all[:i]) { return n } @@ -2180,7 +2180,7 @@ func reorder3save(n *ir.Node, all []*ir.Node, i int, early *[]*ir.Node) *ir.Node // what's the outer value that a write to n affects? // outer value means containing struct or array. -func outervalue(n *ir.Node) *ir.Node { +func outervalue(n ir.Node) ir.Node { for { switch n.Op() { case ir.OXDOT: @@ -2201,7 +2201,7 @@ func outervalue(n *ir.Node) *ir.Node { // Is it possible that the computation of r might be // affected by assignments in all? -func aliased(r *ir.Node, all []*ir.Node) bool { +func aliased(r ir.Node, all []ir.Node) bool { if r == nil { return false } @@ -2275,7 +2275,7 @@ func aliased(r *ir.Node, all []*ir.Node) bool { // does the evaluation of n only refer to variables // whose addresses have not been taken? // (and no other memory) -func varexpr(n *ir.Node) bool { +func varexpr(n ir.Node) bool { if n == nil { return true } @@ -2327,7 +2327,7 @@ func varexpr(n *ir.Node) bool { } // is the name l mentioned in r? -func vmatch2(l *ir.Node, r *ir.Node) bool { +func vmatch2(l ir.Node, r ir.Node) bool { if r == nil { return false } @@ -2356,7 +2356,7 @@ func vmatch2(l *ir.Node, r *ir.Node) bool { // is any name mentioned in l also mentioned in r? // called by sinit.go -func vmatch1(l *ir.Node, r *ir.Node) bool { +func vmatch1(l ir.Node, r ir.Node) bool { // isolate all left sides if l == nil || r == nil { return false @@ -2397,8 +2397,8 @@ func vmatch1(l *ir.Node, r *ir.Node) bool { // paramstoheap returns code to allocate memory for heap-escaped parameters // and to copy non-result parameters' values from the stack. -func paramstoheap(params *types.Type) []*ir.Node { - var nn []*ir.Node +func paramstoheap(params *types.Type) []ir.Node { + var nn []ir.Node for _, t := range params.Fields().Slice() { v := ir.AsNode(t.Nname) if v != nil && v.Sym() != nil && strings.HasPrefix(v.Sym().Name, "~r") { // unnamed result @@ -2451,8 +2451,8 @@ func zeroResults() { // returnsfromheap returns code to copy values for heap-escaped parameters // back to the stack. -func returnsfromheap(params *types.Type) []*ir.Node { - var nn []*ir.Node +func returnsfromheap(params *types.Type) []ir.Node { + var nn []ir.Node for _, t := range params.Fields().Slice() { v := ir.AsNode(t.Nname) if v == nil { @@ -2481,7 +2481,7 @@ func heapmoves() { base.Pos = lno } -func vmkcall(fn *ir.Node, t *types.Type, init *ir.Nodes, va []*ir.Node) *ir.Node { +func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) ir.Node { if fn.Type() == nil || fn.Type().Etype != types.TFUNC { base.Fatalf("mkcall %v %v", fn, fn.Type()) } @@ -2503,15 +2503,15 @@ func vmkcall(fn *ir.Node, t *types.Type, init *ir.Nodes, va []*ir.Node) *ir.Node return r } -func mkcall(name string, t *types.Type, init *ir.Nodes, args ...*ir.Node) *ir.Node { +func mkcall(name string, t *types.Type, init *ir.Nodes, args ...ir.Node) ir.Node { return vmkcall(syslook(name), t, init, args) } -func mkcall1(fn *ir.Node, t *types.Type, init *ir.Nodes, args ...*ir.Node) *ir.Node { +func mkcall1(fn ir.Node, t *types.Type, init *ir.Nodes, args ...ir.Node) ir.Node { return vmkcall(fn, t, init, args) } -func conv(n *ir.Node, t *types.Type) *ir.Node { +func conv(n ir.Node, t *types.Type) ir.Node { if types.Identical(n.Type(), t) { return n } @@ -2523,7 +2523,7 @@ func conv(n *ir.Node, t *types.Type) *ir.Node { // convnop converts node n to type t using the OCONVNOP op // and typechecks the result with ctxExpr. -func convnop(n *ir.Node, t *types.Type) *ir.Node { +func convnop(n ir.Node, t *types.Type) ir.Node { if types.Identical(n.Type(), t) { return n } @@ -2536,7 +2536,7 @@ func convnop(n *ir.Node, t *types.Type) *ir.Node { // byteindex converts n, which is byte-sized, to an int used to index into an array. // We cannot use conv, because we allow converting bool to int here, // which is forbidden in user code. -func byteindex(n *ir.Node) *ir.Node { +func byteindex(n ir.Node) ir.Node { // We cannot convert from bool to int directly. // While converting from int8 to int is possible, it would yield // the wrong result for negative values. @@ -2552,7 +2552,7 @@ func byteindex(n *ir.Node) *ir.Node { return n } -func chanfn(name string, n int, t *types.Type) *ir.Node { +func chanfn(name string, n int, t *types.Type) ir.Node { if !t.IsChan() { base.Fatalf("chanfn %v", t) } @@ -2568,7 +2568,7 @@ func chanfn(name string, n int, t *types.Type) *ir.Node { return fn } -func mapfn(name string, t *types.Type) *ir.Node { +func mapfn(name string, t *types.Type) ir.Node { if !t.IsMap() { base.Fatalf("mapfn %v", t) } @@ -2577,7 +2577,7 @@ func mapfn(name string, t *types.Type) *ir.Node { return fn } -func mapfndel(name string, t *types.Type) *ir.Node { +func mapfndel(name string, t *types.Type) ir.Node { if !t.IsMap() { base.Fatalf("mapfn %v", t) } @@ -2636,13 +2636,13 @@ func mapfast(t *types.Type) int { return mapslow } -func writebarrierfn(name string, l *types.Type, r *types.Type) *ir.Node { +func writebarrierfn(name string, l *types.Type, r *types.Type) ir.Node { fn := syslook(name) fn = substArgTypes(fn, l, r) return fn } -func addstr(n *ir.Node, init *ir.Nodes) *ir.Node { +func addstr(n ir.Node, init *ir.Nodes) ir.Node { // order.expr rewrote OADDSTR to have a list of strings. c := n.List().Len() @@ -2668,7 +2668,7 @@ func addstr(n *ir.Node, init *ir.Nodes) *ir.Node { } // build list of string arguments - args := []*ir.Node{buf} + args := []ir.Node{buf} for _, n2 := range n.List().Slice() { args = append(args, conv(n2, types.Types[types.TSTRING])) } @@ -2688,7 +2688,7 @@ func addstr(n *ir.Node, init *ir.Nodes) *ir.Node { prealloc[slice] = prealloc[n] } slice.PtrList().Set(args[1:]) // skip buf arg - args = []*ir.Node{buf, slice} + args = []ir.Node{buf, slice} slice.SetEsc(EscNone) } @@ -2702,7 +2702,7 @@ func addstr(n *ir.Node, init *ir.Nodes) *ir.Node { return r } -func walkAppendArgs(n *ir.Node, init *ir.Nodes) { +func walkAppendArgs(n ir.Node, init *ir.Nodes) { walkexprlistsafe(n.List().Slice(), init) // walkexprlistsafe will leave OINDEX (s[n]) alone if both s @@ -2728,7 +2728,7 @@ func walkAppendArgs(n *ir.Node, init *ir.Nodes) { // s // // l2 is allowed to be a string. -func appendslice(n *ir.Node, init *ir.Nodes) *ir.Node { +func appendslice(n ir.Node, init *ir.Nodes) ir.Node { walkAppendArgs(n, init) l1 := n.List().First() @@ -2768,7 +2768,7 @@ func appendslice(n *ir.Node, init *ir.Nodes) *ir.Node { nt.SetBounded(true) nodes.Append(ir.Nod(ir.OAS, s, nt)) - var ncopy *ir.Node + var ncopy ir.Node if elemtype.HasPointers() { // copy(s[len(l1):], l2) nptr1 := ir.Nod(ir.OSLICE, s, nil) @@ -2828,7 +2828,7 @@ func appendslice(n *ir.Node, init *ir.Nodes) *ir.Node { // isAppendOfMake reports whether n is of the form append(x , make([]T, y)...). // isAppendOfMake assumes n has already been typechecked. -func isAppendOfMake(n *ir.Node) bool { +func isAppendOfMake(n ir.Node) bool { if base.Flag.N != 0 || instrumenting { return false } @@ -2887,7 +2887,7 @@ func isAppendOfMake(n *ir.Node) bool { // } // } // s -func extendslice(n *ir.Node, init *ir.Nodes) *ir.Node { +func extendslice(n ir.Node, init *ir.Nodes) ir.Node { // isAppendOfMake made sure all possible positive values of l2 fit into an uint. // The case of l2 overflow when converting from e.g. uint to int is handled by an explicit // check of l2 < 0 at runtime which is generated below. @@ -2900,7 +2900,7 @@ func extendslice(n *ir.Node, init *ir.Nodes) *ir.Node { l1 := n.List().First() l2 = n.List().Second() // re-read l2, as it may have been updated by walkAppendArgs - var nodes []*ir.Node + var nodes []ir.Node // if l2 >= 0 (likely happens), do nothing nifneg := ir.Nod(ir.OIF, ir.Nod(ir.OGE, l2, nodintconst(0)), nil) @@ -3006,7 +3006,7 @@ func extendslice(n *ir.Node, init *ir.Nodes) *ir.Node { // ... // } // s -func walkappend(n *ir.Node, init *ir.Nodes, dst *ir.Node) *ir.Node { +func walkappend(n ir.Node, init *ir.Nodes, dst ir.Node) ir.Node { if !samesafeexpr(dst, n.List().First()) { n.List().SetFirst(safeexpr(n.List().First(), init)) n.List().SetFirst(walkexpr(n.List().First(), init)) @@ -3042,7 +3042,7 @@ func walkappend(n *ir.Node, init *ir.Nodes, dst *ir.Node) *ir.Node { return n } - var l []*ir.Node + var l []ir.Node ns := temp(nsrc.Type()) l = append(l, ir.Nod(ir.OAS, ns, nsrc)) // s = src @@ -3095,7 +3095,7 @@ func walkappend(n *ir.Node, init *ir.Nodes, dst *ir.Node) *ir.Node { // // Also works if b is a string. // -func copyany(n *ir.Node, init *ir.Nodes, runtimecall bool) *ir.Node { +func copyany(n ir.Node, init *ir.Nodes, runtimecall bool) ir.Node { if n.Left().Type().Elem().HasPointers() { Curfn.Func().SetWBPos(n.Pos()) fn := writebarrierfn("typedslicecopy", n.Left().Type().Elem(), n.Right().Type().Elem()) @@ -3126,7 +3126,7 @@ func copyany(n *ir.Node, init *ir.Nodes, runtimecall bool) *ir.Node { n.SetRight(walkexpr(n.Right(), init)) nl := temp(n.Left().Type()) nr := temp(n.Right().Type()) - var l []*ir.Node + var l []ir.Node l = append(l, ir.Nod(ir.OAS, nl, n.Left())) l = append(l, ir.Nod(ir.OAS, nr, n.Right())) @@ -3165,7 +3165,7 @@ func copyany(n *ir.Node, init *ir.Nodes, runtimecall bool) *ir.Node { return nlen } -func eqfor(t *types.Type) (n *ir.Node, needsize bool) { +func eqfor(t *types.Type) (n ir.Node, needsize bool) { // Should only arrive here with large memory or // a struct/array containing a non-memory field/element. // Small memory is handled inline, and single non-memory @@ -3179,10 +3179,10 @@ func eqfor(t *types.Type) (n *ir.Node, needsize bool) { sym := typesymprefix(".eq", t) n := NewName(sym) setNodeNameFunc(n) - n.SetType(functype(nil, []*ir.Node{ + n.SetType(functype(nil, []ir.Node{ anonfield(types.NewPtr(t)), anonfield(types.NewPtr(t)), - }, []*ir.Node{ + }, []ir.Node{ anonfield(types.Types[types.TBOOL]), })) return n, false @@ -3193,7 +3193,7 @@ func eqfor(t *types.Type) (n *ir.Node, needsize bool) { // The result of walkcompare MUST be assigned back to n, e.g. // n.Left = walkcompare(n.Left, init) -func walkcompare(n *ir.Node, init *ir.Nodes) *ir.Node { +func walkcompare(n ir.Node, init *ir.Nodes) ir.Node { if n.Left().Type().IsInterface() && n.Right().Type().IsInterface() && n.Left().Op() != ir.ONIL && n.Right().Op() != ir.ONIL { return walkcompareInterface(n, init) } @@ -3228,7 +3228,7 @@ func walkcompare(n *ir.Node, init *ir.Nodes) *ir.Node { // l.tab == type(r) // For non-empty interface, this is: // l.tab != nil && l.tab._type == type(r) - var eqtype *ir.Node + var eqtype ir.Node tab := ir.Nod(ir.OITAB, l, nil) rtyp := typename(r.Type()) if l.Type().IsEmptyInterface() { @@ -3354,8 +3354,8 @@ func walkcompare(n *ir.Node, init *ir.Nodes) *ir.Node { if n.Op() == ir.ONE { andor = ir.OOROR } - var expr *ir.Node - compare := func(el, er *ir.Node) { + var expr ir.Node + compare := func(el, er ir.Node) { a := ir.Nod(n.Op(), el, er) if expr == nil { expr = a @@ -3447,7 +3447,7 @@ func walkcompare(n *ir.Node, init *ir.Nodes) *ir.Node { return n } -func tracecmpArg(n *ir.Node, t *types.Type, init *ir.Nodes) *ir.Node { +func tracecmpArg(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node { // Ugly hack to avoid "constant -1 overflows uintptr" errors, etc. if n.Op() == ir.OLITERAL && n.Type().IsSigned() && n.Int64Val() < 0 { n = copyexpr(n, n.Type(), init) @@ -3456,11 +3456,11 @@ func tracecmpArg(n *ir.Node, t *types.Type, init *ir.Nodes) *ir.Node { return conv(n, t) } -func walkcompareInterface(n *ir.Node, init *ir.Nodes) *ir.Node { +func walkcompareInterface(n ir.Node, init *ir.Nodes) ir.Node { n.SetRight(cheapexpr(n.Right(), init)) n.SetLeft(cheapexpr(n.Left(), init)) eqtab, eqdata := eqinterface(n.Left(), n.Right()) - var cmp *ir.Node + var cmp ir.Node if n.Op() == ir.OEQ { cmp = ir.Nod(ir.OANDAND, eqtab, eqdata) } else { @@ -3470,9 +3470,9 @@ func walkcompareInterface(n *ir.Node, init *ir.Nodes) *ir.Node { return finishcompare(n, cmp, init) } -func walkcompareString(n *ir.Node, init *ir.Nodes) *ir.Node { +func walkcompareString(n ir.Node, init *ir.Nodes) ir.Node { // Rewrite comparisons to short constant strings as length+byte-wise comparisons. - var cs, ncs *ir.Node // const string, non-const string + var cs, ncs ir.Node // const string, non-const string switch { case ir.IsConst(n.Left(), constant.String) && ir.IsConst(n.Right(), constant.String): // ignore; will be constant evaluated @@ -3570,7 +3570,7 @@ func walkcompareString(n *ir.Node, init *ir.Nodes) *ir.Node { } } - var r *ir.Node + var r ir.Node if n.Op() == ir.OEQ || n.Op() == ir.ONE { // prepare for rewrite below n.SetLeft(cheapexpr(n.Left(), init)) @@ -3597,7 +3597,7 @@ func walkcompareString(n *ir.Node, init *ir.Nodes) *ir.Node { // The result of finishcompare MUST be assigned back to n, e.g. // n.Left = finishcompare(n.Left, x, r, init) -func finishcompare(n, r *ir.Node, init *ir.Nodes) *ir.Node { +func finishcompare(n, r ir.Node, init *ir.Nodes) ir.Node { r = typecheck(r, ctxExpr) r = conv(r, n.Type()) r = walkexpr(r, init) @@ -3605,7 +3605,7 @@ func finishcompare(n, r *ir.Node, init *ir.Nodes) *ir.Node { } // return 1 if integer n must be in range [0, max), 0 otherwise -func bounded(n *ir.Node, max int64) bool { +func bounded(n ir.Node, max int64) bool { if n.Type() == nil || !n.Type().IsInteger() { return false } @@ -3672,7 +3672,7 @@ func bounded(n *ir.Node, max int64) bool { } // usemethod checks interface method calls for uses of reflect.Type.Method. -func usemethod(n *ir.Node) { +func usemethod(n ir.Node) { t := n.Left().Type() // Looking for either of: @@ -3717,7 +3717,7 @@ func usemethod(n *ir.Node) { } } -func usefield(n *ir.Node) { +func usefield(n ir.Node) { if objabi.Fieldtrack_enabled == 0 { return } @@ -3777,7 +3777,7 @@ func candiscardlist(l ir.Nodes) bool { return true } -func candiscard(n *ir.Node) bool { +func candiscard(n ir.Node) bool { if n == nil { return true } @@ -3891,7 +3891,7 @@ var wrapCall_prgen int // The result of wrapCall MUST be assigned back to n, e.g. // n.Left = wrapCall(n.Left, init) -func wrapCall(n *ir.Node, init *ir.Nodes) *ir.Node { +func wrapCall(n ir.Node, init *ir.Nodes) ir.Node { if n.Init().Len() != 0 { walkstmtlist(n.Init().Slice()) init.AppendNodes(n.PtrInit()) @@ -3909,7 +3909,7 @@ func wrapCall(n *ir.Node, init *ir.Nodes) *ir.Node { } // origArgs keeps track of what argument is uintptr-unsafe/unsafe-uintptr conversion. - origArgs := make([]*ir.Node, n.List().Len()) + origArgs := make([]ir.Node, n.List().Len()) t := ir.Nod(ir.OTFUNC, nil, nil) for i, arg := range n.List().Slice() { s := lookupN("a", i) @@ -3962,7 +3962,7 @@ func wrapCall(n *ir.Node, init *ir.Nodes) *ir.Node { // type syntax expression n.Type. // The result of substArgTypes MUST be assigned back to old, e.g. // n.Left = substArgTypes(n.Left, t1, t2) -func substArgTypes(old *ir.Node, types_ ...*types.Type) *ir.Node { +func substArgTypes(old ir.Node, types_ ...*types.Type) ir.Node { n := ir.Copy(old) for _, t := range types_ { @@ -3992,11 +3992,11 @@ func canMergeLoads() bool { // isRuneCount reports whether n is of the form len([]rune(string)). // These are optimized into a call to runtime.countrunes. -func isRuneCount(n *ir.Node) bool { +func isRuneCount(n ir.Node) bool { return base.Flag.N == 0 && !instrumenting && n.Op() == ir.OLEN && n.Left().Op() == ir.OSTR2RUNES } -func walkCheckPtrAlignment(n *ir.Node, init *ir.Nodes, count *ir.Node) *ir.Node { +func walkCheckPtrAlignment(n ir.Node, init *ir.Nodes, count ir.Node) ir.Node { if !n.Type().IsPtr() { base.Fatalf("expected pointer type: %v", n.Type()) } @@ -4024,7 +4024,7 @@ func walkCheckPtrAlignment(n *ir.Node, init *ir.Nodes, count *ir.Node) *ir.Node var walkCheckPtrArithmeticMarker byte -func walkCheckPtrArithmetic(n *ir.Node, init *ir.Nodes) *ir.Node { +func walkCheckPtrArithmetic(n ir.Node, init *ir.Nodes) ir.Node { // Calling cheapexpr(n, init) below leads to a recursive call // to walkexpr, which leads us back here again. Use n.Opt to // prevent infinite loops. @@ -4055,9 +4055,9 @@ func walkCheckPtrArithmetic(n *ir.Node, init *ir.Nodes) *ir.Node { // "It is valid both to add and to subtract offsets from a // pointer in this way. It is also valid to use &^ to round // pointers, usually for alignment." - var originals []*ir.Node - var walk func(n *ir.Node) - walk = func(n *ir.Node) { + var originals []ir.Node + var walk func(n ir.Node) + walk = func(n ir.Node) { switch n.Op() { case ir.OADD: walk(n.Left()) @@ -4088,6 +4088,6 @@ func walkCheckPtrArithmetic(n *ir.Node, init *ir.Nodes) *ir.Node { // checkPtr reports whether pointer checking should be enabled for // function fn at a given level. See debugHelpFooter for defined // levels. -func checkPtr(fn *ir.Node, level int) bool { +func checkPtr(fn ir.Node, level int) bool { return base.Debug.Checkptr >= level && fn.Func().Pragma&ir.NoCheckPtr == 0 } diff --git a/src/cmd/compile/internal/ir/dump.go b/src/cmd/compile/internal/ir/dump.go index c4ea5af3d1feb..fe1410969f0ef 100644 --- a/src/cmd/compile/internal/ir/dump.go +++ b/src/cmd/compile/internal/ir/dump.go @@ -200,7 +200,7 @@ func (p *dumper) dump(x reflect.Value, depth int) { typ := x.Type() isNode := false - if n, ok := x.Interface().(Node); ok { + if n, ok := x.Interface().(node); ok { isNode = true p.printf("%s %s {", n.op.String(), p.addr(x)) } else { diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index 9682bae39b151..f394219c0532b 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -243,7 +243,7 @@ func (o Op) oconv(s fmt.State, flag FmtFlag, mode FmtMode) { type FmtMode int type fmtNode struct { - x *Node + x Node m FmtMode } @@ -277,11 +277,11 @@ type fmtNodes struct { func (f *fmtNodes) Format(s fmt.State, verb rune) { f.x.format(s, verb, f.m) } -func (n *Node) Format(s fmt.State, verb rune) { +func (n *node) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func FmtNode(n *Node, s fmt.State, verb rune) { +func FmtNode(n Node, s fmt.State, verb rune) { nodeFormat(n, s, verb, FErr) } @@ -311,7 +311,7 @@ func (m FmtMode) prepareArgs(args []interface{}) { switch arg := arg.(type) { case Op: args[i] = &fmtOp{arg, m} - case *Node: + case Node: args[i] = &fmtNode{arg, m} case nil: args[i] = &fmtNode{nil, m} // assume this was a node interface @@ -329,7 +329,7 @@ func (m FmtMode) prepareArgs(args []interface{}) { } } -func nodeFormat(n *Node, s fmt.State, verb rune, mode FmtMode) { +func nodeFormat(n Node, s fmt.State, verb rune, mode FmtMode) { switch verb { case 'v', 'S', 'L': nconvFmt(n, s, fmtFlag(s, verb), mode) @@ -343,10 +343,10 @@ func nodeFormat(n *Node, s fmt.State, verb rune, mode FmtMode) { } // EscFmt is set by the escape analysis code to add escape analysis details to the node print. -var EscFmt func(n *Node, short bool) string +var EscFmt func(n Node, short bool) string // *Node details -func jconvFmt(n *Node, s fmt.State, flag FmtFlag) { +func jconvFmt(n Node, s fmt.State, flag FmtFlag) { short := flag&FmtShort != 0 // Useful to see which nodes in an AST printout are actually identical @@ -894,7 +894,7 @@ func StmtWithInit(op Op) bool { return false } -func stmtFmt(n *Node, s fmt.State, mode FmtMode) { +func stmtFmt(n Node, s fmt.State, mode FmtMode) { // some statements allow for an init, but at most one, // but we may have an arbitrary number added, eg by typecheck // and inlining. If it doesn't fit the syntax, emit an enclosing @@ -1194,7 +1194,7 @@ var OpPrec = []int{ OEND: 0, } -func exprFmt(n *Node, s fmt.State, prec int, mode FmtMode) { +func exprFmt(n Node, s fmt.State, prec int, mode FmtMode) { for n != nil && n.Implicit() && (n.Op() == ODEREF || n.Op() == OADDR) { n = n.Left() } @@ -1556,7 +1556,7 @@ func exprFmt(n *Node, s fmt.State, prec int, mode FmtMode) { } } -func nodeFmt(n *Node, s fmt.State, flag FmtFlag, mode FmtMode) { +func nodeFmt(n Node, s fmt.State, flag FmtFlag, mode FmtMode) { t := n.Type() // We almost always want the original. @@ -1586,7 +1586,7 @@ func nodeFmt(n *Node, s fmt.State, flag FmtFlag, mode FmtMode) { exprFmt(n, s, 0, mode) } -func nodeDumpFmt(n *Node, s fmt.State, flag FmtFlag, mode FmtMode) { +func nodeDumpFmt(n Node, s fmt.State, flag FmtFlag, mode FmtMode) { recur := flag&FmtShort == 0 if recur { @@ -1794,12 +1794,12 @@ func typeFormat(t *types.Type, s fmt.State, verb rune, mode FmtMode) { } } -func (n *Node) String() string { return fmt.Sprint(n) } -func modeString(n *Node, mode FmtMode) string { return mode.Sprint(n) } +func (n *node) String() string { return fmt.Sprint(n) } +func modeString(n Node, mode FmtMode) string { return mode.Sprint(n) } // "%L" suffix with "(type %T)" where possible // "%+S" in debug mode, don't recurse, no multiline output -func nconvFmt(n *Node, s fmt.State, flag FmtFlag, mode FmtMode) { +func nconvFmt(n Node, s fmt.State, flag FmtFlag, mode FmtMode) { if n == nil { fmt.Fprint(s, "") return @@ -1866,7 +1866,7 @@ func FDumpList(w io.Writer, s string, l Nodes) { fmt.Fprintf(w, "%s%+v\n", s, l) } -func Dump(s string, n *Node) { +func Dump(s string, n Node) { fmt.Printf("%s [%p]%+v\n", s, n, n) } @@ -1911,6 +1911,6 @@ func InstallTypeFormats() { // Line returns n's position as a string. If n has been inlined, // it uses the outermost position where n has been inlined. -func Line(n *Node) string { +func Line(n Node) string { return base.FmtPos(n.Pos()) } diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index d700c593906d8..477d07f5029b5 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -20,7 +20,7 @@ import ( ) // A Node is the abstract interface to an IR node. -type INode interface { +type Node interface { // Formatting Format(s fmt.State, verb rune) String() string @@ -30,19 +30,19 @@ type INode interface { SetPos(x src.XPos) // For making copies. Mainly used by Copy and SepCopy. - RawCopy() *Node + RawCopy() Node // Abstract graph structure, for generic traversals. Op() Op SetOp(x Op) - Orig() *Node - SetOrig(x *Node) + Orig() Node + SetOrig(x Node) SubOp() Op SetSubOp(x Op) - Left() *Node - SetLeft(x *Node) - Right() *Node - SetRight(x *Node) + Left() Node + SetLeft(x Node) + Right() Node + SetRight(x Node) Init() Nodes PtrInit() *Nodes SetInit(x Nodes) @@ -71,8 +71,8 @@ type INode interface { SetClass(x Class) Likely() bool SetLikely(x bool) - SliceBounds() (low, high, max *Node) - SetSliceBounds(low, high, max *Node) + SliceBounds() (low, high, max Node) + SetSliceBounds(low, high, max Node) Iota() int64 SetIota(x int64) Colas() bool @@ -130,17 +130,17 @@ type INode interface { CanBeAnSSASym() } -var _ INode = (*Node)(nil) +var _ Node = (*node)(nil) // A Node is a single node in the syntax tree. // Actually the syntax tree is a syntax DAG, because there is only one // node with Op=ONAME for a given instance of a variable x. // The same is true for Op=OTYPE and Op=OLITERAL. See Node.mayBeShared. -type Node struct { +type node struct { // Tree structure. // Generic recursive walks should follow these fields. - left *Node - right *Node + left Node + right Node init Nodes body Nodes list Nodes @@ -148,7 +148,7 @@ type Node struct { // most nodes typ *types.Type - orig *Node // original form, for printing, and tracking copies of ONAMEs + orig Node // original form, for printing, and tracking copies of ONAMEs // func fn *Func @@ -179,46 +179,46 @@ type Node struct { aux uint8 } -func (n *Node) Left() *Node { return n.left } -func (n *Node) SetLeft(x *Node) { n.left = x } -func (n *Node) Right() *Node { return n.right } -func (n *Node) SetRight(x *Node) { n.right = x } -func (n *Node) Orig() *Node { return n.orig } -func (n *Node) SetOrig(x *Node) { n.orig = x } -func (n *Node) Type() *types.Type { return n.typ } -func (n *Node) SetType(x *types.Type) { n.typ = x } -func (n *Node) Func() *Func { return n.fn } -func (n *Node) SetFunc(x *Func) { n.fn = x } -func (n *Node) Name() *Name { return n.name } -func (n *Node) SetName(x *Name) { n.name = x } -func (n *Node) Sym() *types.Sym { return n.sym } -func (n *Node) SetSym(x *types.Sym) { n.sym = x } -func (n *Node) Pos() src.XPos { return n.pos } -func (n *Node) SetPos(x src.XPos) { n.pos = x } -func (n *Node) Offset() int64 { return n.offset } -func (n *Node) SetOffset(x int64) { n.offset = x } -func (n *Node) Esc() uint16 { return n.esc } -func (n *Node) SetEsc(x uint16) { n.esc = x } -func (n *Node) Op() Op { return n.op } -func (n *Node) SetOp(x Op) { n.op = x } -func (n *Node) Init() Nodes { return n.init } -func (n *Node) SetInit(x Nodes) { n.init = x } -func (n *Node) PtrInit() *Nodes { return &n.init } -func (n *Node) Body() Nodes { return n.body } -func (n *Node) SetBody(x Nodes) { n.body = x } -func (n *Node) PtrBody() *Nodes { return &n.body } -func (n *Node) List() Nodes { return n.list } -func (n *Node) SetList(x Nodes) { n.list = x } -func (n *Node) PtrList() *Nodes { return &n.list } -func (n *Node) Rlist() Nodes { return n.rlist } -func (n *Node) SetRlist(x Nodes) { n.rlist = x } -func (n *Node) PtrRlist() *Nodes { return &n.rlist } - -func (n *Node) ResetAux() { +func (n *node) Left() Node { return n.left } +func (n *node) SetLeft(x Node) { n.left = x } +func (n *node) Right() Node { return n.right } +func (n *node) SetRight(x Node) { n.right = x } +func (n *node) Orig() Node { return n.orig } +func (n *node) SetOrig(x Node) { n.orig = x } +func (n *node) Type() *types.Type { return n.typ } +func (n *node) SetType(x *types.Type) { n.typ = x } +func (n *node) Func() *Func { return n.fn } +func (n *node) SetFunc(x *Func) { n.fn = x } +func (n *node) Name() *Name { return n.name } +func (n *node) SetName(x *Name) { n.name = x } +func (n *node) Sym() *types.Sym { return n.sym } +func (n *node) SetSym(x *types.Sym) { n.sym = x } +func (n *node) Pos() src.XPos { return n.pos } +func (n *node) SetPos(x src.XPos) { n.pos = x } +func (n *node) Offset() int64 { return n.offset } +func (n *node) SetOffset(x int64) { n.offset = x } +func (n *node) Esc() uint16 { return n.esc } +func (n *node) SetEsc(x uint16) { n.esc = x } +func (n *node) Op() Op { return n.op } +func (n *node) SetOp(x Op) { n.op = x } +func (n *node) Init() Nodes { return n.init } +func (n *node) SetInit(x Nodes) { n.init = x } +func (n *node) PtrInit() *Nodes { return &n.init } +func (n *node) Body() Nodes { return n.body } +func (n *node) SetBody(x Nodes) { n.body = x } +func (n *node) PtrBody() *Nodes { return &n.body } +func (n *node) List() Nodes { return n.list } +func (n *node) SetList(x Nodes) { n.list = x } +func (n *node) PtrList() *Nodes { return &n.list } +func (n *node) Rlist() Nodes { return n.rlist } +func (n *node) SetRlist(x Nodes) { n.rlist = x } +func (n *node) PtrRlist() *Nodes { return &n.rlist } + +func (n *node) ResetAux() { n.aux = 0 } -func (n *Node) SubOp() Op { +func (n *node) SubOp() Op { switch n.Op() { case OASOP, ONAME: default: @@ -227,7 +227,7 @@ func (n *Node) SubOp() Op { return Op(n.aux) } -func (n *Node) SetSubOp(op Op) { +func (n *node) SetSubOp(op Op) { switch n.Op() { case OASOP, ONAME: default: @@ -236,14 +236,14 @@ func (n *Node) SetSubOp(op Op) { n.aux = uint8(op) } -func (n *Node) IndexMapLValue() bool { +func (n *node) IndexMapLValue() bool { if n.Op() != OINDEXMAP { base.Fatalf("unexpected op: %v", n.Op()) } return n.aux != 0 } -func (n *Node) SetIndexMapLValue(b bool) { +func (n *node) SetIndexMapLValue(b bool) { if n.Op() != OINDEXMAP { base.Fatalf("unexpected op: %v", n.Op()) } @@ -254,28 +254,28 @@ func (n *Node) SetIndexMapLValue(b bool) { } } -func (n *Node) TChanDir() types.ChanDir { +func (n *node) TChanDir() types.ChanDir { if n.Op() != OTCHAN { base.Fatalf("unexpected op: %v", n.Op()) } return types.ChanDir(n.aux) } -func (n *Node) SetTChanDir(dir types.ChanDir) { +func (n *node) SetTChanDir(dir types.ChanDir) { if n.Op() != OTCHAN { base.Fatalf("unexpected op: %v", n.Op()) } n.aux = uint8(dir) } -func IsSynthetic(n *Node) bool { +func IsSynthetic(n Node) bool { name := n.Sym().Name return name[0] == '.' || name[0] == '~' } // IsAutoTmp indicates if n was created by the compiler as a temporary, // based on the setting of the .AutoTemp flag in n's Name. -func IsAutoTmp(n *Node) bool { +func IsAutoTmp(n Node) bool { if n == nil || n.Op() != ONAME { return false } @@ -308,49 +308,49 @@ const ( _, nodeEmbedded // ODCLFIELD embedded type ) -func (n *Node) Class() Class { return Class(n.flags.get3(nodeClass)) } -func (n *Node) Walkdef() uint8 { return n.flags.get2(nodeWalkdef) } -func (n *Node) Typecheck() uint8 { return n.flags.get2(nodeTypecheck) } -func (n *Node) Initorder() uint8 { return n.flags.get2(nodeInitorder) } - -func (n *Node) HasBreak() bool { return n.flags&nodeHasBreak != 0 } -func (n *Node) NoInline() bool { return n.flags&nodeNoInline != 0 } -func (n *Node) Implicit() bool { return n.flags&nodeImplicit != 0 } -func (n *Node) IsDDD() bool { return n.flags&nodeIsDDD != 0 } -func (n *Node) Diag() bool { return n.flags&nodeDiag != 0 } -func (n *Node) Colas() bool { return n.flags&nodeColas != 0 } -func (n *Node) NonNil() bool { return n.flags&nodeNonNil != 0 } -func (n *Node) Transient() bool { return n.flags&nodeTransient != 0 } -func (n *Node) Bounded() bool { return n.flags&nodeBounded != 0 } -func (n *Node) HasCall() bool { return n.flags&nodeHasCall != 0 } -func (n *Node) Likely() bool { return n.flags&nodeLikely != 0 } -func (n *Node) HasVal() bool { return n.flags&nodeHasVal != 0 } -func (n *Node) HasOpt() bool { return n.flags&nodeHasOpt != 0 } -func (n *Node) Embedded() bool { return n.flags&nodeEmbedded != 0 } - -func (n *Node) SetClass(b Class) { n.flags.set3(nodeClass, uint8(b)) } -func (n *Node) SetWalkdef(b uint8) { n.flags.set2(nodeWalkdef, b) } -func (n *Node) SetTypecheck(b uint8) { n.flags.set2(nodeTypecheck, b) } -func (n *Node) SetInitorder(b uint8) { n.flags.set2(nodeInitorder, b) } - -func (n *Node) SetHasBreak(b bool) { n.flags.set(nodeHasBreak, b) } -func (n *Node) SetNoInline(b bool) { n.flags.set(nodeNoInline, b) } -func (n *Node) SetImplicit(b bool) { n.flags.set(nodeImplicit, b) } -func (n *Node) SetIsDDD(b bool) { n.flags.set(nodeIsDDD, b) } -func (n *Node) SetDiag(b bool) { n.flags.set(nodeDiag, b) } -func (n *Node) SetColas(b bool) { n.flags.set(nodeColas, b) } -func (n *Node) SetTransient(b bool) { n.flags.set(nodeTransient, b) } -func (n *Node) SetHasCall(b bool) { n.flags.set(nodeHasCall, b) } -func (n *Node) SetLikely(b bool) { n.flags.set(nodeLikely, b) } -func (n *Node) setHasVal(b bool) { n.flags.set(nodeHasVal, b) } -func (n *Node) setHasOpt(b bool) { n.flags.set(nodeHasOpt, b) } -func (n *Node) SetEmbedded(b bool) { n.flags.set(nodeEmbedded, b) } +func (n *node) Class() Class { return Class(n.flags.get3(nodeClass)) } +func (n *node) Walkdef() uint8 { return n.flags.get2(nodeWalkdef) } +func (n *node) Typecheck() uint8 { return n.flags.get2(nodeTypecheck) } +func (n *node) Initorder() uint8 { return n.flags.get2(nodeInitorder) } + +func (n *node) HasBreak() bool { return n.flags&nodeHasBreak != 0 } +func (n *node) NoInline() bool { return n.flags&nodeNoInline != 0 } +func (n *node) Implicit() bool { return n.flags&nodeImplicit != 0 } +func (n *node) IsDDD() bool { return n.flags&nodeIsDDD != 0 } +func (n *node) Diag() bool { return n.flags&nodeDiag != 0 } +func (n *node) Colas() bool { return n.flags&nodeColas != 0 } +func (n *node) NonNil() bool { return n.flags&nodeNonNil != 0 } +func (n *node) Transient() bool { return n.flags&nodeTransient != 0 } +func (n *node) Bounded() bool { return n.flags&nodeBounded != 0 } +func (n *node) HasCall() bool { return n.flags&nodeHasCall != 0 } +func (n *node) Likely() bool { return n.flags&nodeLikely != 0 } +func (n *node) HasVal() bool { return n.flags&nodeHasVal != 0 } +func (n *node) HasOpt() bool { return n.flags&nodeHasOpt != 0 } +func (n *node) Embedded() bool { return n.flags&nodeEmbedded != 0 } + +func (n *node) SetClass(b Class) { n.flags.set3(nodeClass, uint8(b)) } +func (n *node) SetWalkdef(b uint8) { n.flags.set2(nodeWalkdef, b) } +func (n *node) SetTypecheck(b uint8) { n.flags.set2(nodeTypecheck, b) } +func (n *node) SetInitorder(b uint8) { n.flags.set2(nodeInitorder, b) } + +func (n *node) SetHasBreak(b bool) { n.flags.set(nodeHasBreak, b) } +func (n *node) SetNoInline(b bool) { n.flags.set(nodeNoInline, b) } +func (n *node) SetImplicit(b bool) { n.flags.set(nodeImplicit, b) } +func (n *node) SetIsDDD(b bool) { n.flags.set(nodeIsDDD, b) } +func (n *node) SetDiag(b bool) { n.flags.set(nodeDiag, b) } +func (n *node) SetColas(b bool) { n.flags.set(nodeColas, b) } +func (n *node) SetTransient(b bool) { n.flags.set(nodeTransient, b) } +func (n *node) SetHasCall(b bool) { n.flags.set(nodeHasCall, b) } +func (n *node) SetLikely(b bool) { n.flags.set(nodeLikely, b) } +func (n *node) setHasVal(b bool) { n.flags.set(nodeHasVal, b) } +func (n *node) setHasOpt(b bool) { n.flags.set(nodeHasOpt, b) } +func (n *node) SetEmbedded(b bool) { n.flags.set(nodeEmbedded, b) } // MarkNonNil marks a pointer n as being guaranteed non-nil, // on all code paths, at all times. // During conversion to SSA, non-nil pointers won't have nil checks // inserted before dereferencing. See state.exprPtr. -func (n *Node) MarkNonNil() { +func (n *node) MarkNonNil() { if !n.Type().IsPtr() && !n.Type().IsUnsafePtr() { base.Fatalf("MarkNonNil(%v), type %v", n, n.Type()) } @@ -361,7 +361,7 @@ func (n *Node) MarkNonNil() { // When n is an index or slice operation, n does not need bounds checks. // When n is a dereferencing operation, n does not need nil checks. // When n is a makeslice+copy operation, n does not need length and cap checks. -func (n *Node) SetBounded(b bool) { +func (n *node) SetBounded(b bool) { switch n.Op() { case OINDEX, OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR: // No bounds checks needed. @@ -377,7 +377,7 @@ func (n *Node) SetBounded(b bool) { } // MarkReadonly indicates that n is an ONAME with readonly contents. -func (n *Node) MarkReadonly() { +func (n *node) MarkReadonly() { if n.Op() != ONAME { base.Fatalf("Node.MarkReadonly %v", n.Op()) } @@ -389,7 +389,7 @@ func (n *Node) MarkReadonly() { } // Val returns the constant.Value for the node. -func (n *Node) Val() constant.Value { +func (n *node) Val() constant.Value { if !n.HasVal() { return constant.MakeUnknown() } @@ -398,7 +398,7 @@ func (n *Node) Val() constant.Value { // SetVal sets the constant.Value for the node, // which must not have been used with SetOpt. -func (n *Node) SetVal(v constant.Value) { +func (n *node) SetVal(v constant.Value) { if n.HasOpt() { base.Flag.LowerH = 1 Dump("have Opt", n) @@ -412,7 +412,7 @@ func (n *Node) SetVal(v constant.Value) { } // Opt returns the optimizer data for the node. -func (n *Node) Opt() interface{} { +func (n *node) Opt() interface{} { if !n.HasOpt() { return nil } @@ -421,7 +421,7 @@ func (n *Node) Opt() interface{} { // SetOpt sets the optimizer data for the node, which must not have been used with SetVal. // SetOpt(nil) is ignored for Vals to simplify call sites that are clearing Opts. -func (n *Node) SetOpt(x interface{}) { +func (n *node) SetOpt(x interface{}) { if x == nil { if n.HasOpt() { n.setHasOpt(false) @@ -438,17 +438,17 @@ func (n *Node) SetOpt(x interface{}) { n.e = x } -func (n *Node) Iota() int64 { +func (n *node) Iota() int64 { return n.Offset() } -func (n *Node) SetIota(x int64) { +func (n *node) SetIota(x int64) { n.SetOffset(x) } // mayBeShared reports whether n may occur in multiple places in the AST. // Extra care must be taken when mutating such a node. -func MayBeShared(n *Node) bool { +func MayBeShared(n Node) bool { switch n.Op() { case ONAME, OLITERAL, ONIL, OTYPE: return true @@ -457,7 +457,7 @@ func MayBeShared(n *Node) bool { } // funcname returns the name (without the package) of the function n. -func FuncName(n *Node) string { +func FuncName(n Node) string { if n == nil || n.Func() == nil || n.Func().Nname == nil { return "" } @@ -468,7 +468,7 @@ func FuncName(n *Node) string { // This differs from the compiler's internal convention where local functions lack a package // because the ultimate consumer of this is a human looking at an IDE; package is only empty // if the compilation package is actually the empty string. -func PkgFuncName(n *Node) string { +func PkgFuncName(n Node) string { var s *types.Sym if n == nil { return "" @@ -494,19 +494,19 @@ func PkgFuncName(n *Node) string { } // The compiler needs *Node to be assignable to cmd/compile/internal/ssa.Sym. -func (n *Node) CanBeAnSSASym() { +func (n *node) CanBeAnSSASym() { } // Name holds Node fields used only by named nodes (ONAME, OTYPE, OPACK, OLABEL, some OLITERAL). type Name struct { - Pack *Node // real package for import . names + Pack Node // real package for import . names Pkg *types.Pkg // pkg for OPACK nodes // For a local variable (not param) or extern, the initializing assignment (OAS or OAS2). // For a closure var, the ONAME node of the outer captured variable - Defn *Node + Defn Node // The ODCLFUNC node (for a static function/method or a closure) in which // local variable or param is declared. - Curfn *Node + Curfn Node Param *Param // additional fields for ONAME, OTYPE Decldepth int32 // declaration loop depth, increased for every loop or label // Unique number for ONAME nodes within a function. Function outputs @@ -565,11 +565,11 @@ func (n *Name) SetOpenDeferSlot(b bool) { n.flags.set(nameOpenDeferSlot, func (n *Name) SetLibfuzzerExtraCounter(b bool) { n.flags.set(nameLibfuzzerExtraCounter, b) } type Param struct { - Ntype *Node - Heapaddr *Node // temp holding heap address of param + Ntype Node + Heapaddr Node // temp holding heap address of param // ONAME PAUTOHEAP - Stackcopy *Node // the PPARAM/PPARAMOUT on-stack slot (moved func params only) + Stackcopy Node // the PPARAM/PPARAMOUT on-stack slot (moved func params only) // ONAME closure linkage // Consider: @@ -640,8 +640,8 @@ type Param struct { // // Because of the sharding of pieces of the node, x.Defn means x.Name.Defn // and x.Innermost/Outer means x.Name.Param.Innermost/Outer. - Innermost *Node - Outer *Node + Innermost Node + Outer Node // OTYPE & ONAME //go:embed info, // sharing storage to reduce gc.Param size. @@ -762,9 +762,9 @@ func (p *Param) SetEmbedFiles(list []string) { // the generated ODCLFUNC (as n.Func.Decl), but there is no // pointer from the Func back to the OCALLPART. type Func struct { - Nname *Node // ONAME node - Decl *Node // ODCLFUNC node - OClosure *Node // OCLOSURE node + Nname Node // ONAME node + Decl Node // ODCLFUNC node + OClosure Node // OCLOSURE node Shortname *types.Sym @@ -774,10 +774,10 @@ type Func struct { Exit Nodes // ONAME nodes for all params/locals for this func/closure, does NOT // include closurevars until transformclosure runs. - Dcl []*Node + Dcl []Node ClosureEnter Nodes // list of ONAME nodes of captured variables - ClosureType *Node // closure representation type + ClosureType Node // closure representation type ClosureCalled bool // closure is only immediately called ClosureVars Nodes // closure params; each has closurevar set @@ -822,8 +822,8 @@ type Inline struct { Cost int32 // heuristic cost of inlining this function // Copies of Func.Dcl and Nbody for use during inlining. - Dcl []*Node - Body []*Node + Dcl []Node + Body []Node } // A Mark represents a scope boundary. @@ -1108,17 +1108,17 @@ const ( // Nodes is a pointer to a slice of *Node. // For fields that are not used in most nodes, this is used instead of // a slice to save space. -type Nodes struct{ slice *[]*Node } +type Nodes struct{ slice *[]Node } // asNodes returns a slice of *Node as a Nodes value. -func AsNodes(s []*Node) Nodes { +func AsNodes(s []Node) Nodes { return Nodes{&s} } // Slice returns the entries in Nodes as a slice. // Changes to the slice entries (as in s[i] = n) will be reflected in // the Nodes. -func (n Nodes) Slice() []*Node { +func (n Nodes) Slice() []Node { if n.slice == nil { return nil } @@ -1135,25 +1135,25 @@ func (n Nodes) Len() int { // Index returns the i'th element of Nodes. // It panics if n does not have at least i+1 elements. -func (n Nodes) Index(i int) *Node { +func (n Nodes) Index(i int) Node { return (*n.slice)[i] } // First returns the first element of Nodes (same as n.Index(0)). // It panics if n has no elements. -func (n Nodes) First() *Node { +func (n Nodes) First() Node { return (*n.slice)[0] } // Second returns the second element of Nodes (same as n.Index(1)). // It panics if n has fewer than two elements. -func (n Nodes) Second() *Node { +func (n Nodes) Second() Node { return (*n.slice)[1] } // Set sets n to a slice. // This takes ownership of the slice. -func (n *Nodes) Set(s []*Node) { +func (n *Nodes) Set(s []Node) { if len(s) == 0 { n.slice = nil } else { @@ -1166,18 +1166,18 @@ func (n *Nodes) Set(s []*Node) { } // Set1 sets n to a slice containing a single node. -func (n *Nodes) Set1(n1 *Node) { - n.slice = &[]*Node{n1} +func (n *Nodes) Set1(n1 Node) { + n.slice = &[]Node{n1} } // Set2 sets n to a slice containing two nodes. -func (n *Nodes) Set2(n1, n2 *Node) { - n.slice = &[]*Node{n1, n2} +func (n *Nodes) Set2(n1, n2 Node) { + n.slice = &[]Node{n1, n2} } // Set3 sets n to a slice containing three nodes. -func (n *Nodes) Set3(n1, n2, n3 *Node) { - n.slice = &[]*Node{n1, n2, n3} +func (n *Nodes) Set3(n1, n2, n3 Node) { + n.slice = &[]Node{n1, n2, n3} } // MoveNodes sets n to the contents of n2, then clears n2. @@ -1188,35 +1188,35 @@ func (n *Nodes) MoveNodes(n2 *Nodes) { // SetIndex sets the i'th element of Nodes to node. // It panics if n does not have at least i+1 elements. -func (n Nodes) SetIndex(i int, node *Node) { +func (n Nodes) SetIndex(i int, node Node) { (*n.slice)[i] = node } // SetFirst sets the first element of Nodes to node. // It panics if n does not have at least one elements. -func (n Nodes) SetFirst(node *Node) { +func (n Nodes) SetFirst(node Node) { (*n.slice)[0] = node } // SetSecond sets the second element of Nodes to node. // It panics if n does not have at least two elements. -func (n Nodes) SetSecond(node *Node) { +func (n Nodes) SetSecond(node Node) { (*n.slice)[1] = node } // Addr returns the address of the i'th element of Nodes. // It panics if n does not have at least i+1 elements. -func (n Nodes) Addr(i int) **Node { +func (n Nodes) Addr(i int) *Node { return &(*n.slice)[i] } // Append appends entries to Nodes. -func (n *Nodes) Append(a ...*Node) { +func (n *Nodes) Append(a ...Node) { if len(a) == 0 { return } if n.slice == nil { - s := make([]*Node, len(a)) + s := make([]Node, len(a)) copy(s, a) n.slice = &s return @@ -1226,7 +1226,7 @@ func (n *Nodes) Append(a ...*Node) { // Prepend prepends entries to Nodes. // If a slice is passed in, this will take ownership of it. -func (n *Nodes) Prepend(a ...*Node) { +func (n *Nodes) Prepend(a ...Node) { if len(a) == 0 { return } @@ -1251,7 +1251,7 @@ func (n *Nodes) AppendNodes(n2 *Nodes) { // inspect invokes f on each node in an AST in depth-first order. // If f(n) returns false, inspect skips visiting n's children. -func Inspect(n *Node, f func(*Node) bool) { +func Inspect(n Node, f func(Node) bool) { if n == nil || !f(n) { return } @@ -1263,7 +1263,7 @@ func Inspect(n *Node, f func(*Node) bool) { InspectList(n.Rlist(), f) } -func InspectList(l Nodes, f func(*Node) bool) { +func InspectList(l Nodes, f func(Node) bool) { for _, n := range l.Slice() { Inspect(n, f) } @@ -1272,7 +1272,7 @@ func InspectList(l Nodes, f func(*Node) bool) { // nodeQueue is a FIFO queue of *Node. The zero value of nodeQueue is // a ready-to-use empty queue. type NodeQueue struct { - ring []*Node + ring []Node head, tail int } @@ -1282,12 +1282,12 @@ func (q *NodeQueue) Empty() bool { } // pushRight appends n to the right of the queue. -func (q *NodeQueue) PushRight(n *Node) { +func (q *NodeQueue) PushRight(n Node) { if len(q.ring) == 0 { - q.ring = make([]*Node, 16) + q.ring = make([]Node, 16) } else if q.head+len(q.ring) == q.tail { // Grow the ring. - nring := make([]*Node, len(q.ring)*2) + nring := make([]Node, len(q.ring)*2) // Copy the old elements. part := q.ring[q.head%len(q.ring):] if q.tail-q.head <= len(part) { @@ -1306,7 +1306,7 @@ func (q *NodeQueue) PushRight(n *Node) { // popLeft pops a node from the left of the queue. It panics if q is // empty. -func (q *NodeQueue) PopLeft() *Node { +func (q *NodeQueue) PopLeft() Node { if q.Empty() { panic("dequeue empty") } @@ -1316,25 +1316,25 @@ func (q *NodeQueue) PopLeft() *Node { } // NodeSet is a set of Nodes. -type NodeSet map[*Node]struct{} +type NodeSet map[Node]struct{} // Has reports whether s contains n. -func (s NodeSet) Has(n *Node) bool { +func (s NodeSet) Has(n Node) bool { _, isPresent := s[n] return isPresent } // Add adds n to s. -func (s *NodeSet) Add(n *Node) { +func (s *NodeSet) Add(n Node) { if *s == nil { - *s = make(map[*Node]struct{}) + *s = make(map[Node]struct{}) } (*s)[n] = struct{}{} } // Sorted returns s sorted according to less. -func (s NodeSet) Sorted(less func(*Node, *Node) bool) []*Node { - var res []*Node +func (s NodeSet) Sorted(less func(Node, Node) bool) []Node { + var res []Node for n := range s { res = append(res, n) } @@ -1342,16 +1342,16 @@ func (s NodeSet) Sorted(less func(*Node, *Node) bool) []*Node { return res } -func Nod(op Op, nleft, nright *Node) *Node { +func Nod(op Op, nleft, nright Node) Node { return NodAt(base.Pos, op, nleft, nright) } -func NodAt(pos src.XPos, op Op, nleft, nright *Node) *Node { - var n *Node +func NodAt(pos src.XPos, op Op, nleft, nright Node) Node { + var n Node switch op { case ODCLFUNC: var x struct { - n Node + n node f Func } n = &x.n @@ -1361,13 +1361,13 @@ func NodAt(pos src.XPos, op Op, nleft, nright *Node) *Node { base.Fatalf("use newname instead") case OLABEL, OPACK: var x struct { - n Node + n node m Name } n = &x.n n.SetName(&x.m) default: - n = new(Node) + n = new(node) } n.SetOp(op) n.SetLeft(nleft) @@ -1380,13 +1380,13 @@ func NodAt(pos src.XPos, op Op, nleft, nright *Node) *Node { // newnamel returns a new ONAME Node associated with symbol s at position pos. // The caller is responsible for setting n.Name.Curfn. -func NewNameAt(pos src.XPos, s *types.Sym) *Node { +func NewNameAt(pos src.XPos, s *types.Sym) Node { if s == nil { base.Fatalf("newnamel nil") } var x struct { - n Node + n node m Name p Param } @@ -1453,14 +1453,14 @@ type SymAndPos struct { Pos src.XPos // line of call } -func AsNode(n types.IRNode) *Node { +func AsNode(n types.IRNode) Node { if n == nil { return nil } - return n.(*Node) + return n.(Node) } -var BlankNode *Node +var BlankNode Node // origSym returns the original symbol written by the user. func OrigSym(s *types.Sym) *types.Sym { @@ -1489,7 +1489,7 @@ func OrigSym(s *types.Sym) *types.Sym { // SliceBounds returns n's slice bounds: low, high, and max in expr[low:high:max]. // n must be a slice expression. max is nil if n is a simple slice expression. -func (n *Node) SliceBounds() (low, high, max *Node) { +func (n *node) SliceBounds() (low, high, max Node) { if n.List().Len() == 0 { return nil, nil, nil } @@ -1508,7 +1508,7 @@ func (n *Node) SliceBounds() (low, high, max *Node) { // SetSliceBounds sets n's slice bounds, where n is a slice expression. // n must be a slice expression. If max is non-nil, n must be a full slice expression. -func (n *Node) SetSliceBounds(low, high, max *Node) { +func (n *node) SetSliceBounds(low, high, max Node) { switch n.Op() { case OSLICE, OSLICEARR, OSLICESTR: if max != nil { @@ -1555,13 +1555,13 @@ func (o Op) IsSlice3() bool { return false } -func IsConst(n *Node, ct constant.Kind) bool { +func IsConst(n Node, ct constant.Kind) bool { return ConstType(n) == ct } // Int64Val returns n as an int64. // n must be an integer or rune constant. -func (n *Node) Int64Val() int64 { +func (n *node) Int64Val() int64 { if !IsConst(n, constant.Int) { base.Fatalf("Int64Val(%v)", n) } @@ -1573,7 +1573,7 @@ func (n *Node) Int64Val() int64 { } // CanInt64 reports whether it is safe to call Int64Val() on n. -func (n *Node) CanInt64() bool { +func (n *node) CanInt64() bool { if !IsConst(n, constant.Int) { return false } @@ -1586,7 +1586,7 @@ func (n *Node) CanInt64() bool { // Uint64Val returns n as an uint64. // n must be an integer or rune constant. -func (n *Node) Uint64Val() uint64 { +func (n *node) Uint64Val() uint64 { if !IsConst(n, constant.Int) { base.Fatalf("Uint64Val(%v)", n) } @@ -1599,7 +1599,7 @@ func (n *Node) Uint64Val() uint64 { // BoolVal returns n as a bool. // n must be a boolean constant. -func (n *Node) BoolVal() bool { +func (n *node) BoolVal() bool { if !IsConst(n, constant.Bool) { base.Fatalf("BoolVal(%v)", n) } @@ -1608,7 +1608,7 @@ func (n *Node) BoolVal() bool { // StringVal returns the value of a literal string Node as a string. // n must be a string constant. -func (n *Node) StringVal() string { +func (n *node) StringVal() string { if !IsConst(n, constant.String) { base.Fatalf("StringVal(%v)", n) } @@ -1618,14 +1618,14 @@ func (n *Node) StringVal() string { // rawcopy returns a shallow copy of n. // Note: copy or sepcopy (rather than rawcopy) is usually the // correct choice (see comment with Node.copy, below). -func (n *Node) RawCopy() *Node { +func (n *node) RawCopy() Node { copy := *n return © } // sepcopy returns a separate shallow copy of n, with the copy's // Orig pointing to itself. -func SepCopy(n *Node) *Node { +func SepCopy(n Node) Node { n = n.RawCopy() n.SetOrig(n) return n @@ -1638,7 +1638,7 @@ func SepCopy(n *Node) *Node { // represent the original node anymore. // (This caused the wrong complit Op to be used when printing error // messages; see issues #26855, #27765). -func Copy(n *Node) *Node { +func Copy(n Node) Node { copy := n.RawCopy() if n.Orig() == n { copy.SetOrig(copy) @@ -1647,13 +1647,13 @@ func Copy(n *Node) *Node { } // isNil reports whether n represents the universal untyped zero value "nil". -func IsNil(n *Node) bool { +func IsNil(n Node) bool { // Check n.Orig because constant propagation may produce typed nil constants, // which don't exist in the Go spec. return n.Orig().Op() == ONIL } -func IsBlank(n *Node) bool { +func IsBlank(n Node) bool { if n == nil { return false } @@ -1662,6 +1662,6 @@ func IsBlank(n *Node) bool { // IsMethod reports whether n is a method. // n must be a function or a method. -func IsMethod(n *Node) bool { +func IsMethod(n Node) bool { return n.Type().Recv() != nil } diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go index 1ec89c338d1e9..0a9542fa44e2a 100644 --- a/src/cmd/compile/internal/ir/sizeof_test.go +++ b/src/cmd/compile/internal/ir/sizeof_test.go @@ -20,10 +20,10 @@ func TestSizeof(t *testing.T) { _32bit uintptr // size on 32bit platforms _64bit uintptr // size on 64bit platforms }{ - {Func{}, 136, 248}, - {Name{}, 32, 56}, - {Param{}, 24, 48}, - {Node{}, 76, 128}, + {Func{}, 152, 280}, + {Name{}, 44, 80}, + {Param{}, 44, 88}, + {node{}, 88, 152}, } for _, tt := range tests { diff --git a/src/cmd/compile/internal/ir/val.go b/src/cmd/compile/internal/ir/val.go index 6bcee7c01c72b..9035e90084712 100644 --- a/src/cmd/compile/internal/ir/val.go +++ b/src/cmd/compile/internal/ir/val.go @@ -12,7 +12,7 @@ import ( "cmd/compile/internal/types" ) -func ConstType(n *Node) constant.Kind { +func ConstType(n Node) constant.Kind { if n == nil || n.Op() != OLITERAL { return constant.Unknown } @@ -22,7 +22,7 @@ func ConstType(n *Node) constant.Kind { // ValueInterface returns the constant value stored in n as an interface{}. // It returns int64s for ints and runes, float64s for floats, // and complex128s for complex values. -func ConstValue(n *Node) interface{} { +func ConstValue(n Node) interface{} { switch v := n.Val(); v.Kind() { default: base.Fatalf("unexpected constant: %v", v) @@ -91,7 +91,7 @@ func ValidTypeForConst(t *types.Type, v constant.Value) bool { } // nodlit returns a new untyped constant with value v. -func NewLiteral(v constant.Value) *Node { +func NewLiteral(v constant.Value) Node { n := Nod(OLITERAL, nil, nil) if k := v.Kind(); k != constant.Unknown { n.SetType(idealType(k)) diff --git a/src/cmd/compile/internal/mips/ssa.go b/src/cmd/compile/internal/mips/ssa.go index 87e6f5b0c7ae2..bd71b2fcd874f 100644 --- a/src/cmd/compile/internal/mips/ssa.go +++ b/src/cmd/compile/internal/mips/ssa.go @@ -289,7 +289,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case *obj.LSym: wantreg = "SB" gc.AddAux(&p.From, v) - case *ir.Node: + case ir.Node: wantreg = "SP" gc.AddAux(&p.From, v) case nil: diff --git a/src/cmd/compile/internal/mips64/ssa.go b/src/cmd/compile/internal/mips64/ssa.go index ea22c488aab97..bcadebde4e7cb 100644 --- a/src/cmd/compile/internal/mips64/ssa.go +++ b/src/cmd/compile/internal/mips64/ssa.go @@ -263,7 +263,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case *obj.LSym: wantreg = "SB" gc.AddAux(&p.From, v) - case *ir.Node: + case ir.Node: wantreg = "SP" gc.AddAux(&p.From, v) case nil: diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go index 848f27af843f4..32e9be8417523 100644 --- a/src/cmd/compile/internal/ppc64/ssa.go +++ b/src/cmd/compile/internal/ppc64/ssa.go @@ -752,7 +752,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Reg = v.Reg() } - case *obj.LSym, *ir.Node: + case *obj.LSym, ir.Node: p := s.Prog(ppc64.AMOVD) p.From.Type = obj.TYPE_ADDR p.From.Reg = v.Args[0].Reg() diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go index a3dc07fe03029..c81b6897a6bf6 100644 --- a/src/cmd/compile/internal/riscv64/ssa.go +++ b/src/cmd/compile/internal/riscv64/ssa.go @@ -324,7 +324,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case *obj.LSym: wantreg = "SB" gc.AddAux(&p.From, v) - case *ir.Node: + case ir.Node: wantreg = "SP" gc.AddAux(&p.From, v) case nil: diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go index 62abbdc2238e4..eeabd81d0391b 100644 --- a/src/cmd/compile/internal/ssa/config.go +++ b/src/cmd/compile/internal/ssa/config.go @@ -139,7 +139,7 @@ type Frontend interface { // Auto returns a Node for an auto variable of the given type. // The SSA compiler uses this function to allocate space for spills. - Auto(src.XPos, *types.Type) *ir.Node + Auto(src.XPos, *types.Type) ir.Node // Given the name for a compound type, returns the name we should use // for the parts of that compound type. diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go index 0f1cd4bc9fa1b..f3ef33d67040e 100644 --- a/src/cmd/compile/internal/ssa/deadstore.go +++ b/src/cmd/compile/internal/ssa/deadstore.go @@ -137,9 +137,9 @@ func dse(f *Func) { // reaches stores then we delete all the stores. The other operations will then // be eliminated by the dead code elimination pass. func elimDeadAutosGeneric(f *Func) { - addr := make(map[*Value]*ir.Node) // values that the address of the auto reaches - elim := make(map[*Value]*ir.Node) // values that could be eliminated if the auto is - used := make(map[*ir.Node]bool) // used autos that must be kept + addr := make(map[*Value]ir.Node) // values that the address of the auto reaches + elim := make(map[*Value]ir.Node) // values that could be eliminated if the auto is + used := make(map[ir.Node]bool) // used autos that must be kept // visit the value and report whether any of the maps are updated visit := func(v *Value) (changed bool) { @@ -147,7 +147,7 @@ func elimDeadAutosGeneric(f *Func) { switch v.Op { case OpAddr, OpLocalAddr: // Propagate the address if it points to an auto. - n, ok := v.Aux.(*ir.Node) + n, ok := v.Aux.(ir.Node) if !ok || n.Class() != ir.PAUTO { return } @@ -158,7 +158,7 @@ func elimDeadAutosGeneric(f *Func) { return case OpVarDef, OpVarKill: // v should be eliminated if we eliminate the auto. - n, ok := v.Aux.(*ir.Node) + n, ok := v.Aux.(ir.Node) if !ok || n.Class() != ir.PAUTO { return } @@ -174,7 +174,7 @@ func elimDeadAutosGeneric(f *Func) { // for open-coded defers from being removed (since they // may not be used by the inline code, but will be used by // panic processing). - n, ok := v.Aux.(*ir.Node) + n, ok := v.Aux.(ir.Node) if !ok || n.Class() != ir.PAUTO { return } @@ -222,7 +222,7 @@ func elimDeadAutosGeneric(f *Func) { } // Propagate any auto addresses through v. - var node *ir.Node + var node ir.Node for _, a := range args { if n, ok := addr[a]; ok && !used[n] { if node == nil { @@ -299,11 +299,11 @@ func elimUnreadAutos(f *Func) { // Loop over all ops that affect autos taking note of which // autos we need and also stores that we might be able to // eliminate. - seen := make(map[*ir.Node]bool) + seen := make(map[ir.Node]bool) var stores []*Value for _, b := range f.Blocks { for _, v := range b.Values { - n, ok := v.Aux.(*ir.Node) + n, ok := v.Aux.(ir.Node) if !ok { continue } @@ -335,7 +335,7 @@ func elimUnreadAutos(f *Func) { // Eliminate stores to unread autos. for _, store := range stores { - n, _ := store.Aux.(*ir.Node) + n, _ := store.Aux.(ir.Node) if seen[n] { continue } diff --git a/src/cmd/compile/internal/ssa/debug.go b/src/cmd/compile/internal/ssa/debug.go index 9de5f427c07a0..0d660361b1f94 100644 --- a/src/cmd/compile/internal/ssa/debug.go +++ b/src/cmd/compile/internal/ssa/debug.go @@ -25,7 +25,7 @@ type FuncDebug struct { // Slots is all the slots used in the debug info, indexed by their SlotID. Slots []LocalSlot // The user variables, indexed by VarID. - Vars []*ir.Node + Vars []ir.Node // The slots that make up each variable, indexed by VarID. VarSlots [][]SlotID // The location list data, indexed by VarID. Must be processed by PutLocationList. @@ -166,7 +166,7 @@ func (s *debugState) logf(msg string, args ...interface{}) { type debugState struct { // See FuncDebug. slots []LocalSlot - vars []*ir.Node + vars []ir.Node varSlots [][]SlotID lists [][]byte @@ -190,7 +190,7 @@ type debugState struct { // The pending location list entry for each user variable, indexed by VarID. pendingEntries []pendingEntry - varParts map[*ir.Node][]SlotID + varParts map[ir.Node][]SlotID blockDebug []BlockDebug pendingSlotLocs []VarLoc liveSlots []liveSlot @@ -347,7 +347,7 @@ func BuildFuncDebug(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset fu } if state.varParts == nil { - state.varParts = make(map[*ir.Node][]SlotID) + state.varParts = make(map[ir.Node][]SlotID) } else { for n := range state.varParts { delete(state.varParts, n) @@ -380,7 +380,7 @@ func BuildFuncDebug(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset fu for _, b := range f.Blocks { for _, v := range b.Values { if v.Op == OpVarDef || v.Op == OpVarKill { - n := v.Aux.(*ir.Node) + n := v.Aux.(ir.Node) if ir.IsSynthetic(n) { continue } @@ -718,7 +718,7 @@ func (state *debugState) processValue(v *Value, vSlots []SlotID, vReg *Register) switch { case v.Op == OpVarDef, v.Op == OpVarKill: - n := v.Aux.(*ir.Node) + n := v.Aux.(ir.Node) if ir.IsSynthetic(n) { break } diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go index 3d142a2272014..df83383308cdb 100644 --- a/src/cmd/compile/internal/ssa/export_test.go +++ b/src/cmd/compile/internal/ssa/export_test.go @@ -69,7 +69,7 @@ type TestFrontend struct { func (TestFrontend) StringData(s string) *obj.LSym { return nil } -func (TestFrontend) Auto(pos src.XPos, t *types.Type) *ir.Node { +func (TestFrontend) Auto(pos src.XPos, t *types.Type) ir.Node { n := ir.NewNameAt(pos, &types.Sym{Name: "aFakeAuto"}) n.SetClass(ir.PAUTO) return n diff --git a/src/cmd/compile/internal/ssa/location.go b/src/cmd/compile/internal/ssa/location.go index 2f456c9f899b9..3dc3a81703984 100644 --- a/src/cmd/compile/internal/ssa/location.go +++ b/src/cmd/compile/internal/ssa/location.go @@ -60,7 +60,7 @@ func (r *Register) GCNum() int16 { // { N: len, Type: int, Off: 0, SplitOf: parent, SplitOffset: 8} // parent = &{N: s, Type: string} type LocalSlot struct { - N *ir.Node // an ONAME *gc.Node representing a stack location. + N ir.Node // an ONAME *gc.Node representing a stack location. Type *types.Type // type of slot Off int64 // offset of slot in N diff --git a/src/cmd/compile/internal/ssa/nilcheck.go b/src/cmd/compile/internal/ssa/nilcheck.go index 3c1fa600a3227..b36f6b97e18c7 100644 --- a/src/cmd/compile/internal/ssa/nilcheck.go +++ b/src/cmd/compile/internal/ssa/nilcheck.go @@ -236,7 +236,7 @@ func nilcheckelim2(f *Func) { continue } if v.Type.IsMemory() || v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() { - if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(*ir.Node).Type().HasPointers()) { + if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(ir.Node).Type().HasPointers()) { // These ops don't really change memory. continue // Note: OpVarDef requires that the defined variable not have pointers. diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index 984188393976e..459a9923f7b2d 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -1249,7 +1249,7 @@ func (s *regAllocState) regalloc(f *Func) { // This forces later liveness analysis to make the // value live at this point. v.SetArg(0, s.makeSpill(a, b)) - } else if _, ok := a.Aux.(*ir.Node); ok && vi.rematerializeable { + } else if _, ok := a.Aux.(ir.Node); ok && vi.rematerializeable { // Rematerializeable value with a gc.Node. This is the address of // a stack object (e.g. an LEAQ). Keep the object live. // Change it to VarLive, which is what plive expects for locals. diff --git a/src/cmd/compile/internal/ssa/sizeof_test.go b/src/cmd/compile/internal/ssa/sizeof_test.go index a27002ee3ac3b..60ada011e3e0b 100644 --- a/src/cmd/compile/internal/ssa/sizeof_test.go +++ b/src/cmd/compile/internal/ssa/sizeof_test.go @@ -22,7 +22,7 @@ func TestSizeof(t *testing.T) { }{ {Value{}, 72, 112}, {Block{}, 164, 304}, - {LocalSlot{}, 28, 40}, + {LocalSlot{}, 32, 48}, {valState{}, 28, 40}, } diff --git a/src/cmd/compile/internal/ssa/stackalloc.go b/src/cmd/compile/internal/ssa/stackalloc.go index eee0a21a6675f..5257d44cfeaf0 100644 --- a/src/cmd/compile/internal/ssa/stackalloc.go +++ b/src/cmd/compile/internal/ssa/stackalloc.go @@ -157,7 +157,7 @@ func (s *stackAllocState) stackalloc() { if v.Aux == nil { f.Fatalf("%s has nil Aux\n", v.LongString()) } - loc := LocalSlot{N: v.Aux.(*ir.Node), Type: v.Type, Off: v.AuxInt} + loc := LocalSlot{N: v.Aux.(ir.Node), Type: v.Type, Off: v.AuxInt} if f.pass.debug > stackDebug { fmt.Printf("stackalloc %s to %s\n", v, loc) } diff --git a/src/cmd/compile/internal/wasm/ssa.go b/src/cmd/compile/internal/wasm/ssa.go index 1a8b5691ef09a..e7451381b4abe 100644 --- a/src/cmd/compile/internal/wasm/ssa.go +++ b/src/cmd/compile/internal/wasm/ssa.go @@ -237,7 +237,7 @@ func ssaGenValueOnStack(s *gc.SSAGenState, v *ssa.Value, extend bool) { switch v.Aux.(type) { case *obj.LSym: gc.AddAux(&p.From, v) - case *ir.Node: + case ir.Node: p.From.Reg = v.Args[0].Reg() gc.AddAux(&p.From, v) default: From 88e33f6ecb9ea44a464bd3863f8037bc081b2a6e Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Wed, 25 Nov 2020 14:02:46 -0800 Subject: [PATCH 047/474] [dev.regabi] cmd/compile: fix latent import/export issue with break/continue In CL 145200, I changed OBREAK, OCONTINUE, OGOTO, and OLABEL to just use Sym instead of Node. However, within the export data, I forgot to update the code for OBREAK and OCONTINUE. This isn't currently an issue because the inliner currently disallows these anyway, but it'll be an issue in the future once we add support for inlining them. Also, Russ independently ran into it in CL 273246. Updates #14768. Change-Id: I94575df59c08a750b0dce1d3ce612aba7bfeeb76 Reviewed-on: https://go-review.googlesource.com/c/go/+/273270 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Trust: Matthew Dempsky Reviewed-by: Russ Cox --- src/cmd/compile/internal/gc/iexport.go | 13 ++++++------- src/cmd/compile/internal/gc/iimport.go | 14 ++++---------- 2 files changed, 10 insertions(+), 17 deletions(-) diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index ef52e40f21678..7c42e43beead5 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -1146,18 +1146,17 @@ func (w *exportWriter) stmt(n ir.Node) { w.op(ir.OFALL) w.pos(n.Pos()) - case ir.OBREAK, ir.OCONTINUE: - w.op(op) - w.pos(n.Pos()) - w.exprsOrNil(n.Left(), nil) - case ir.OEMPTY: // nothing to emit - case ir.OGOTO, ir.OLABEL: + case ir.OBREAK, ir.OCONTINUE, ir.OGOTO, ir.OLABEL: w.op(op) w.pos(n.Pos()) - w.string(n.Sym().Name) + label := "" + if sym := n.Sym(); sym != nil { + label = sym.Name + } + w.string(label) default: base.Fatalf("exporter: CANNOT EXPORT: %v\nPlease notify gri@\n", n.Op()) diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 77078c118a8d3..066d956b9330c 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -1052,20 +1052,14 @@ func (r *importReader) node() ir.Node { n := ir.NodAt(r.pos(), ir.OFALL, nil, nil) return n - case ir.OBREAK, ir.OCONTINUE: - pos := r.pos() - left, _ := r.exprsOrNil() - if left != nil { - left = NewName(left.Sym()) - } - return ir.NodAt(pos, op, left, nil) - // case OEMPTY: // unreachable - not emitted by exporter - case ir.OGOTO, ir.OLABEL: + case ir.OBREAK, ir.OCONTINUE, ir.OGOTO, ir.OLABEL: n := ir.NodAt(r.pos(), op, nil, nil) - n.SetSym(lookup(r.string())) + if label := r.string(); label != "" { + n.SetSym(lookup(label)) + } return n case ir.OEND: From 65f4ec2faec54b7a3e70f2404132df9d83df11e0 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Fri, 27 Nov 2020 23:52:37 -0500 Subject: [PATCH 048/474] [dev.regabi] cmd/compile: cleanup label handling - The use of a label's Name.Defn to point at the named for/select/switch means that any rewrite of the for/select/switch must overwrite the original or else the pointer will dangle. Remove that pointer by adding the label name directly to the for/select/switch representation instead. - The only uses of a label's Sym.Label were ephemeral values during markbreak and escape analysis. Use a map for each. Although in general we are not going to replace all computed fields with maps (too slow), the one in markbreak is only for labeled for/select/switch, and the one in escape is for all labels, but even so, labels are fairly rare. In theory this cleanup should make it easy to allow labeled for/select/switch in inlined bodies, but this CL does not attempt that. It's only concerned with cleanup to enable a new Node representation. Passes buildall w/ toolstash -cmp. Change-Id: I7e36ee98d2ea40dbae94e6722d585f007b7afcfa Reviewed-on: https://go-review.googlesource.com/c/go/+/274086 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/escape.go | 28 +++++++---- src/cmd/compile/internal/gc/inl.go | 8 ++-- src/cmd/compile/internal/gc/noder.go | 9 +++- src/cmd/compile/internal/gc/ssa.go | 21 ++++----- src/cmd/compile/internal/gc/subr.go | 17 ------- src/cmd/compile/internal/gc/typecheck.go | 47 ++++++++----------- src/cmd/compile/internal/types/sizeof_test.go | 2 +- src/cmd/compile/internal/types/sym.go | 5 +- 8 files changed, 60 insertions(+), 77 deletions(-) diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index 783bc8c41dd12..34f52c743abb8 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -85,6 +85,7 @@ import ( type Escape struct { allLocs []*EscLocation + labels map[*types.Sym]labelState // known labels curfn ir.Node @@ -229,13 +230,16 @@ func (e *Escape) walkFunc(fn ir.Node) { ir.InspectList(fn.Body(), func(n ir.Node) bool { switch n.Op() { case ir.OLABEL: - n.Sym().Label = nonlooping + if e.labels == nil { + e.labels = make(map[*types.Sym]labelState) + } + e.labels[n.Sym()] = nonlooping case ir.OGOTO: // If we visited the label before the goto, // then this is a looping label. - if n.Sym().Label == nonlooping { - n.Sym().Label = looping + if e.labels[n.Sym()] == nonlooping { + e.labels[n.Sym()] = looping } } @@ -245,6 +249,10 @@ func (e *Escape) walkFunc(fn ir.Node) { e.curfn = fn e.loopDepth = 1 e.block(fn.Body()) + + if len(e.labels) != 0 { + base.FatalfAt(fn.Pos(), "leftover labels after walkFunc") + } } // Below we implement the methods for walking the AST and recording @@ -310,7 +318,7 @@ func (e *Escape) stmt(n ir.Node) { } case ir.OLABEL: - switch ir.AsNode(n.Sym().Label) { + switch e.labels[n.Sym()] { case nonlooping: if base.Flag.LowerM > 2 { fmt.Printf("%v:%v non-looping label\n", base.FmtPos(base.Pos), n) @@ -323,7 +331,7 @@ func (e *Escape) stmt(n ir.Node) { default: base.Fatalf("label missing tag") } - n.Sym().Label = nil + delete(e.labels, n.Sym()) case ir.OIF: e.discard(n.Left()) @@ -1615,11 +1623,11 @@ func funcSym(fn ir.Node) *types.Sym { } // Mark labels that have no backjumps to them as not increasing e.loopdepth. -// Walk hasn't generated (goto|label).Left.Sym.Label yet, so we'll cheat -// and set it to one of the following two. Then in esc we'll clear it again. -var ( - looping = ir.Nod(ir.OXXX, nil, nil) - nonlooping = ir.Nod(ir.OXXX, nil, nil) +type labelState int + +const ( + looping labelState = 1 + iota + nonlooping ) func isSliceSelfAssign(dst, src ir.Node) bool { diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 6310762c1f4d2..d43d0d06af459 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -405,16 +405,16 @@ func (v *hairyVisitor) visit(n ir.Node) bool { // These nodes don't produce code; omit from inlining budget. return false - case ir.OLABEL: - // TODO(mdempsky): Add support for inlining labeled control statements. - if labeledControl(n) != nil { + case ir.OFOR, ir.OFORUNTIL, ir.OSWITCH: + // ORANGE, OSELECT in "unhandled" above + if n.Sym() != nil { v.reason = "labeled control" return true } case ir.OBREAK, ir.OCONTINUE: if n.Sym() != nil { - // Should have short-circuited due to labeledControl above. + // Should have short-circuited due to labeled control error above. base.Fatalf("unexpected labeled break/continue: %v", n) } diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 950d50904795d..ecd50b87f69dd 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -1302,14 +1302,19 @@ func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []i } func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) ir.Node { - lhs := p.nodSym(label, ir.OLABEL, nil, p.name(label.Label)) + sym := p.name(label.Label) + lhs := p.nodSym(label, ir.OLABEL, nil, sym) var ls ir.Node if label.Stmt != nil { // TODO(mdempsky): Should always be present. ls = p.stmtFall(label.Stmt, fallOK) + switch label.Stmt.(type) { + case *syntax.ForStmt, *syntax.SwitchStmt, *syntax.SelectStmt: + // Attach label directly to control statement too. + ls.SetSym(sym) + } } - lhs.Name().Defn = ls l := []ir.Node{lhs} if ls != nil { if ls.Op() == ir.OBLOCK && ls.Init().Len() == 0 { diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index cb73532b48524..bcc126f82e521 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -356,7 +356,6 @@ func buildssa(fn ir.Node, worker int) *ssa.Func { // Allocate starting values s.labels = map[string]*ssaLabel{} - s.labeledNodes = map[ir.Node]*ssaLabel{} s.fwdVars = map[ir.Node]*ssa.Value{} s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem) @@ -596,9 +595,8 @@ type state struct { // Node for function curfn ir.Node - // labels and labeled control flow nodes (OFOR, OFORUNTIL, OSWITCH, OSELECT) in f - labels map[string]*ssaLabel - labeledNodes map[ir.Node]*ssaLabel + // labels in f + labels map[string]*ssaLabel // unlabeled break and continue statement tracking breakTo *ssa.Block // current target for plain break statement @@ -1169,11 +1167,6 @@ func (s *state) stmt(n ir.Node) { sym := n.Sym() lab := s.label(sym) - // Associate label with its control flow node, if any - if ctl := labeledControl(n); ctl != nil { - s.labeledNodes[ctl] = lab - } - // The label might already have a target block via a goto. if lab.target == nil { lab.target = s.f.NewBlock(ssa.BlockPlain) @@ -1431,9 +1424,10 @@ func (s *state) stmt(n ir.Node) { prevBreak := s.breakTo s.continueTo = bIncr s.breakTo = bEnd - lab := s.labeledNodes[n] - if lab != nil { + var lab *ssaLabel + if sym := n.Sym(); sym != nil { // labeled for loop + lab = s.label(sym) lab.continueTarget = bIncr lab.breakTarget = bEnd } @@ -1489,9 +1483,10 @@ func (s *state) stmt(n ir.Node) { prevBreak := s.breakTo s.breakTo = bEnd - lab := s.labeledNodes[n] - if lab != nil { + var lab *ssaLabel + if sym := n.Sym(); sym != nil { // labeled + lab = s.label(sym) lab.breakTarget = bEnd } diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index fcda219737f5f..d174ebd582b5c 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -582,23 +582,6 @@ func backingArrayPtrLen(n ir.Node) (ptr, len ir.Node) { return ptr, len } -// labeledControl returns the control flow Node (for, switch, select) -// associated with the label n, if any. -func labeledControl(n ir.Node) ir.Node { - if n.Op() != ir.OLABEL { - base.Fatalf("labeledControl %v", n.Op()) - } - ctl := n.Name().Defn - if ctl == nil { - return nil - } - switch ctl.Op() { - case ir.OFOR, ir.OFORUNTIL, ir.OSWITCH, ir.OSELECT: - return ctl - } - return nil -} - func syslook(name string) ir.Node { s := Runtimepkg.Lookup(name) if s == nil || s.Def == nil { diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 4e2f205312110..ede3778184bbb 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -3759,7 +3759,7 @@ func checkmake(t *types.Type, arg string, np *ir.Node) bool { return true } -func markbreak(n ir.Node, implicit ir.Node) { +func markbreak(labels *map[*types.Sym]ir.Node, n ir.Node, implicit ir.Node) { if n == nil { return } @@ -3771,43 +3771,35 @@ func markbreak(n ir.Node, implicit ir.Node) { implicit.SetHasBreak(true) } } else { - lab := ir.AsNode(n.Sym().Label) - if lab != nil { + if lab := (*labels)[n.Sym()]; lab != nil { lab.SetHasBreak(true) } } case ir.OFOR, ir.OFORUNTIL, ir.OSWITCH, ir.OTYPESW, ir.OSELECT, ir.ORANGE: implicit = n + if sym := n.Sym(); sym != nil { + if *labels == nil { + // Map creation delayed until we need it - most functions don't. + *labels = make(map[*types.Sym]ir.Node) + } + (*labels)[sym] = n + defer delete(*labels, sym) + } fallthrough default: - markbreak(n.Left(), implicit) - markbreak(n.Right(), implicit) - markbreaklist(n.Init(), implicit) - markbreaklist(n.Body(), implicit) - markbreaklist(n.List(), implicit) - markbreaklist(n.Rlist(), implicit) + markbreak(labels, n.Left(), implicit) + markbreak(labels, n.Right(), implicit) + markbreaklist(labels, n.Init(), implicit) + markbreaklist(labels, n.Body(), implicit) + markbreaklist(labels, n.List(), implicit) + markbreaklist(labels, n.Rlist(), implicit) } } -func markbreaklist(l ir.Nodes, implicit ir.Node) { +func markbreaklist(labels *map[*types.Sym]ir.Node, l ir.Nodes, implicit ir.Node) { s := l.Slice() for i := 0; i < len(s); i++ { - n := s[i] - if n == nil { - continue - } - if n.Op() == ir.OLABEL && i+1 < len(s) && n.Name().Defn == s[i+1] { - switch n.Name().Defn.Op() { - case ir.OFOR, ir.OFORUNTIL, ir.OSWITCH, ir.OTYPESW, ir.OSELECT, ir.ORANGE: - n.Sym().Label = n.Name().Defn - markbreak(n.Name().Defn, n.Name().Defn) - n.Sym().Label = nil - i++ - continue - } - } - - markbreak(n, implicit) + markbreak(labels, s[i], implicit) } } @@ -3874,7 +3866,8 @@ func isTermNode(n ir.Node) bool { // checkreturn makes sure that fn terminates appropriately. func checkreturn(fn ir.Node) { if fn.Type().NumResults() != 0 && fn.Body().Len() != 0 { - markbreaklist(fn.Body(), nil) + var labels map[*types.Sym]ir.Node + markbreaklist(&labels, fn.Body(), nil) if !isTermNodes(fn.Body()) { base.ErrorfAt(fn.Func().Endlineno, "missing return at end of function") } diff --git a/src/cmd/compile/internal/types/sizeof_test.go b/src/cmd/compile/internal/types/sizeof_test.go index 2821d9a3c7054..88a2fbba2f5eb 100644 --- a/src/cmd/compile/internal/types/sizeof_test.go +++ b/src/cmd/compile/internal/types/sizeof_test.go @@ -20,7 +20,7 @@ func TestSizeof(t *testing.T) { _32bit uintptr // size on 32bit platforms _64bit uintptr // size on 64bit platforms }{ - {Sym{}, 60, 104}, + {Sym{}, 52, 88}, {Type{}, 56, 96}, {Map{}, 20, 40}, {Forward{}, 20, 32}, diff --git a/src/cmd/compile/internal/types/sym.go b/src/cmd/compile/internal/types/sym.go index 046104d0dcf79..7272f1f7861e5 100644 --- a/src/cmd/compile/internal/types/sym.go +++ b/src/cmd/compile/internal/types/sym.go @@ -33,13 +33,12 @@ type Sym struct { Name string // object name // saved and restored by dcopy - Def IRNode // definition: ONAME OTYPE OPACK or OLITERAL + Def IRNode // definition: ONAME OTYPE OPACK or OLITERAL Block int32 // blocknumber to catch redeclaration Lastlineno src.XPos // last declaration for diagnostic flags bitset8 - Label IRNode // corresponding label (ephemeral) - Origpkg *Pkg // original package for . import + Origpkg *Pkg // original package for . import } const ( From 0c65a2f31734021654ec5eebc270f8c84e5410c7 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sat, 28 Nov 2020 00:05:15 -0500 Subject: [PATCH 049/474] [dev.regabi] cmd/compile: drop Node.HasOpt method Node.HasOpt is only used once, and that use can use Opt instead. Interface is one method smaller. Passes buildall w/ toolstash -cmp. Change-Id: I6a9d5859a9977a8f4c9db70e166f50f0d8052160 Reviewed-on: https://go-review.googlesource.com/c/go/+/274087 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/escape.go | 2 +- src/cmd/compile/internal/ir/node.go | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index 34f52c743abb8..6b6fb44a9942a 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -1092,7 +1092,7 @@ func (e *Escape) newLoc(n ir.Node, transient bool) *EscLocation { base.Fatalf("curfn mismatch: %v != %v", n.Name().Curfn, e.curfn) } - if n.HasOpt() { + if n.Opt() != nil { base.Fatalf("%v already has a location", n) } n.SetOpt(loc) diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 477d07f5029b5..acfddd2dc7d76 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -111,7 +111,6 @@ type Node interface { SetWalkdef(x uint8) Opt() interface{} SetOpt(x interface{}) - HasOpt() bool Diag() bool SetDiag(x bool) Bounded() bool @@ -325,7 +324,7 @@ func (n *node) Bounded() bool { return n.flags&nodeBounded != 0 } func (n *node) HasCall() bool { return n.flags&nodeHasCall != 0 } func (n *node) Likely() bool { return n.flags&nodeLikely != 0 } func (n *node) HasVal() bool { return n.flags&nodeHasVal != 0 } -func (n *node) HasOpt() bool { return n.flags&nodeHasOpt != 0 } +func (n *node) hasOpt() bool { return n.flags&nodeHasOpt != 0 } func (n *node) Embedded() bool { return n.flags&nodeEmbedded != 0 } func (n *node) SetClass(b Class) { n.flags.set3(nodeClass, uint8(b)) } @@ -399,7 +398,7 @@ func (n *node) Val() constant.Value { // SetVal sets the constant.Value for the node, // which must not have been used with SetOpt. func (n *node) SetVal(v constant.Value) { - if n.HasOpt() { + if n.hasOpt() { base.Flag.LowerH = 1 Dump("have Opt", n) base.Fatalf("have Opt") @@ -413,7 +412,7 @@ func (n *node) SetVal(v constant.Value) { // Opt returns the optimizer data for the node. func (n *node) Opt() interface{} { - if !n.HasOpt() { + if !n.hasOpt() { return nil } return n.e @@ -423,7 +422,7 @@ func (n *node) Opt() interface{} { // SetOpt(nil) is ignored for Vals to simplify call sites that are clearing Opts. func (n *node) SetOpt(x interface{}) { if x == nil { - if n.HasOpt() { + if n.hasOpt() { n.setHasOpt(false) n.e = nil } From 79a3d5ce158de1696256d58aa563ca7cd30f6c3f Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sat, 28 Nov 2020 00:14:38 -0500 Subject: [PATCH 050/474] [dev.regabi] cmd/compile: setup for new Node implementations Start a list of which ops are valid for the default node struct implementation (currently all of them). Add a Node implementation helper for a minimal node. Passes buildall w/ toolstash -cmp. Change-Id: I7ae45f2cf2be85013cb71ab00524be53f243e13d Reviewed-on: https://go-review.googlesource.com/c/go/+/274088 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/typecheck.go | 4 +- src/cmd/compile/internal/ir/bitset.go | 12 ++ src/cmd/compile/internal/ir/mini.go | 188 ++++++++++++++++ src/cmd/compile/internal/ir/node.go | 260 +++++++++++++++++++---- 4 files changed, 424 insertions(+), 40 deletions(-) create mode 100644 src/cmd/compile/internal/ir/mini.go diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index ede3778184bbb..9da464e1b6714 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -1694,8 +1694,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } op, why := convertop(n.Left().Op() == ir.OLITERAL, t, n.Type()) - n.SetOp(op) - if n.Op() == ir.OXXX { + if op == ir.OXXX { if !n.Diag() && !n.Type().Broke() && !n.Left().Diag() { base.Errorf("cannot convert %L to type %v%s", n.Left(), n.Type(), why) n.SetDiag(true) @@ -1705,6 +1704,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } + n.SetOp(op) switch n.Op() { case ir.OCONVNOP: if t.Etype == n.Type().Etype { diff --git a/src/cmd/compile/internal/ir/bitset.go b/src/cmd/compile/internal/ir/bitset.go index 29f136296fd1e..0c7bd542f60bb 100644 --- a/src/cmd/compile/internal/ir/bitset.go +++ b/src/cmd/compile/internal/ir/bitset.go @@ -14,6 +14,18 @@ func (f *bitset8) set(mask uint8, b bool) { } } +func (f bitset8) get2(shift uint8) uint8 { + return uint8(f>>shift) & 3 +} + +// set2 sets two bits in f using the bottom two bits of b. +func (f *bitset8) set2(shift uint8, b uint8) { + // Clear old bits. + *(*uint8)(f) &^= 3 << shift + // Set new bits. + *(*uint8)(f) |= uint8(b&3) << shift +} + type bitset16 uint16 func (f *bitset16) set(mask uint16, b bool) { diff --git a/src/cmd/compile/internal/ir/mini.go b/src/cmd/compile/internal/ir/mini.go new file mode 100644 index 0000000000000..48dccf6a5f781 --- /dev/null +++ b/src/cmd/compile/internal/ir/mini.go @@ -0,0 +1,188 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +import ( + "cmd/compile/internal/types" + "cmd/internal/src" + "fmt" + "go/constant" +) + +// A miniNode is a minimal node implementation, +// meant to be embedded as the first field in a larger node implementation, +// at a cost of 8 bytes. +// +// A miniNode is NOT a valid Node by itself: the embedding struct +// must at the least provide: +// +// func (n *MyNode) String() string { return fmt.Sprint(n) } +// func (n *MyNode) RawCopy() Node { c := *n; return &c } +// func (n *MyNode) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +// +// The embedding struct should also fill in n.op in its constructor, +// for more useful panic messages when invalid methods are called, +// instead of implementing Op itself. +// +type miniNode struct { + pos src.XPos // uint32 + op Op // uint8 + bits bitset8 + esc uint16 +} + +// op can be read, but not written. +// An embedding implementation can provide a SetOp if desired. +// (The panicking SetOp is with the other panics below.) +func (n *miniNode) Op() Op { return n.op } +func (n *miniNode) Pos() src.XPos { return n.pos } +func (n *miniNode) SetPos(x src.XPos) { n.pos = x } +func (n *miniNode) Esc() uint16 { return n.esc } +func (n *miniNode) SetEsc(x uint16) { n.esc = x } + +const ( + miniWalkdefShift = 0 + miniTypecheckShift = 2 + miniInitorderShift = 4 + miniDiag = 1 << 6 + miniHasCall = 1 << 7 // for miniStmt +) + +func (n *miniNode) Walkdef() uint8 { return n.bits.get2(miniWalkdefShift) } +func (n *miniNode) Typecheck() uint8 { return n.bits.get2(miniTypecheckShift) } +func (n *miniNode) Initorder() uint8 { return n.bits.get2(miniInitorderShift) } +func (n *miniNode) SetWalkdef(x uint8) { + if x > 3 { + panic(fmt.Sprintf("cannot SetWalkdef %d", x)) + } + n.bits.set2(miniWalkdefShift, x) +} +func (n *miniNode) SetTypecheck(x uint8) { + if x > 3 { + panic(fmt.Sprintf("cannot SetTypecheck %d", x)) + } + n.bits.set2(miniTypecheckShift, x) +} +func (n *miniNode) SetInitorder(x uint8) { + if x > 3 { + panic(fmt.Sprintf("cannot SetInitorder %d", x)) + } + n.bits.set2(miniInitorderShift, x) +} + +func (n *miniNode) Diag() bool { return n.bits&miniDiag != 0 } +func (n *miniNode) SetDiag(x bool) { n.bits.set(miniDiag, x) } + +// Empty, immutable graph structure. + +func (n *miniNode) Left() Node { return nil } +func (n *miniNode) Right() Node { return nil } +func (n *miniNode) Init() Nodes { return Nodes{} } +func (n *miniNode) PtrInit() *Nodes { return &immutableEmptyNodes } +func (n *miniNode) Body() Nodes { return Nodes{} } +func (n *miniNode) PtrBody() *Nodes { return &immutableEmptyNodes } +func (n *miniNode) List() Nodes { return Nodes{} } +func (n *miniNode) PtrList() *Nodes { return &immutableEmptyNodes } +func (n *miniNode) Rlist() Nodes { return Nodes{} } +func (n *miniNode) PtrRlist() *Nodes { return &immutableEmptyNodes } +func (n *miniNode) SetLeft(x Node) { + if x != nil { + panic(n.no("SetLeft")) + } +} +func (n *miniNode) SetRight(x Node) { + if x != nil { + panic(n.no("SetRight")) + } +} +func (n *miniNode) SetInit(x Nodes) { + if x != (Nodes{}) { + panic(n.no("SetInit")) + } +} +func (n *miniNode) SetBody(x Nodes) { + if x != (Nodes{}) { + panic(n.no("SetBody")) + } +} +func (n *miniNode) SetList(x Nodes) { + if x != (Nodes{}) { + panic(n.no("SetList")) + } +} +func (n *miniNode) SetRlist(x Nodes) { + if x != (Nodes{}) { + panic(n.no("SetRlist")) + } +} + +// Additional functionality unavailable. + +func (n *miniNode) no(name string) string { return "cannot " + name + " on " + n.op.String() } + +func (n *miniNode) SetOp(Op) { panic(n.no("SetOp")) } +func (n *miniNode) SubOp() Op { panic(n.no("SubOp")) } +func (n *miniNode) SetSubOp(Op) { panic(n.no("SetSubOp")) } +func (n *miniNode) Type() *types.Type { return nil } +func (n *miniNode) SetType(*types.Type) { panic(n.no("SetType")) } +func (n *miniNode) Func() *Func { panic(n.no("Func")) } +func (n *miniNode) SetFunc(*Func) { panic(n.no("SetFunc")) } +func (n *miniNode) Name() *Name { return nil } +func (n *miniNode) SetName(*Name) { panic(n.no("SetName")) } +func (n *miniNode) Sym() *types.Sym { return nil } +func (n *miniNode) SetSym(*types.Sym) { panic(n.no("SetSym")) } +func (n *miniNode) Offset() int64 { return types.BADWIDTH } +func (n *miniNode) SetOffset(x int64) { panic(n.no("SetOffset")) } +func (n *miniNode) Class() Class { return Pxxx } +func (n *miniNode) SetClass(Class) { panic(n.no("SetClass")) } +func (n *miniNode) Likely() bool { panic(n.no("Likely")) } +func (n *miniNode) SetLikely(bool) { panic(n.no("SetLikely")) } +func (n *miniNode) SliceBounds() (low, high, max Node) { + panic(n.no("SliceBounds")) +} +func (n *miniNode) SetSliceBounds(low, high, max Node) { + panic(n.no("SetSliceBounds")) +} +func (n *miniNode) Iota() int64 { panic(n.no("Iota")) } +func (n *miniNode) SetIota(int64) { panic(n.no("SetIota")) } +func (n *miniNode) Colas() bool { return false } +func (n *miniNode) SetColas(bool) { panic(n.no("SetColas")) } +func (n *miniNode) NoInline() bool { panic(n.no("NoInline")) } +func (n *miniNode) SetNoInline(bool) { panic(n.no("SetNoInline")) } +func (n *miniNode) Transient() bool { panic(n.no("Transient")) } +func (n *miniNode) SetTransient(bool) { panic(n.no("SetTransient")) } +func (n *miniNode) Implicit() bool { return false } +func (n *miniNode) SetImplicit(bool) { panic(n.no("SetImplicit")) } +func (n *miniNode) IsDDD() bool { return false } +func (n *miniNode) SetIsDDD(bool) { panic(n.no("SetIsDDD")) } +func (n *miniNode) Embedded() bool { return false } +func (n *miniNode) SetEmbedded(bool) { panic(n.no("SetEmbedded")) } +func (n *miniNode) IndexMapLValue() bool { panic(n.no("IndexMapLValue")) } +func (n *miniNode) SetIndexMapLValue(bool) { panic(n.no("SetIndexMapLValue")) } +func (n *miniNode) ResetAux() { panic(n.no("ResetAux")) } +func (n *miniNode) HasBreak() bool { panic(n.no("HasBreak")) } +func (n *miniNode) SetHasBreak(bool) { panic(n.no("SetHasBreak")) } +func (n *miniNode) HasVal() bool { return false } +func (n *miniNode) Val() constant.Value { panic(n.no("Val")) } +func (n *miniNode) SetVal(v constant.Value) { panic(n.no("SetVal")) } +func (n *miniNode) Int64Val() int64 { panic(n.no("Int64Val")) } +func (n *miniNode) Uint64Val() uint64 { panic(n.no("Uint64Val")) } +func (n *miniNode) CanInt64() bool { panic(n.no("CanInt64")) } +func (n *miniNode) BoolVal() bool { panic(n.no("BoolVal")) } +func (n *miniNode) StringVal() string { panic(n.no("StringVal")) } +func (n *miniNode) HasCall() bool { panic(n.no("HasCall")) } +func (n *miniNode) SetHasCall(bool) { panic(n.no("SetHasCall")) } +func (n *miniNode) NonNil() bool { return false } +func (n *miniNode) MarkNonNil() { panic(n.no("MarkNonNil")) } +func (n *miniNode) Bounded() bool { return false } +func (n *miniNode) SetBounded(bool) { panic(n.no("SetBounded")) } +func (n *miniNode) Opt() interface{} { return nil } +func (n *miniNode) SetOpt(interface{}) { panic(n.no("SetOpt")) } +func (n *miniNode) MarkReadonly() { panic(n.no("MarkReadonly")) } +func (n *miniNode) TChanDir() types.ChanDir { panic(n.no("TChanDir")) } +func (n *miniNode) SetTChanDir(types.ChanDir) { panic(n.no("SetTChanDir")) } + +// TODO: Delete when CanBeAnSSASym is removed from Node itself. +func (*miniNode) CanBeAnSSASym() {} diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index acfddd2dc7d76..7a61355858fb1 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -199,7 +199,6 @@ func (n *node) SetOffset(x int64) { n.offset = x } func (n *node) Esc() uint16 { return n.esc } func (n *node) SetEsc(x uint16) { n.esc = x } func (n *node) Op() Op { return n.op } -func (n *node) SetOp(x Op) { n.op = x } func (n *node) Init() Nodes { return n.init } func (n *node) SetInit(x Nodes) { n.init = x } func (n *node) PtrInit() *Nodes { return &n.init } @@ -213,6 +212,13 @@ func (n *node) Rlist() Nodes { return n.rlist } func (n *node) SetRlist(x Nodes) { n.rlist = x } func (n *node) PtrRlist() *Nodes { return &n.rlist } +func (n *node) SetOp(op Op) { + if !okForNod[op] { + panic("cannot node.SetOp " + op.String()) + } + n.op = op +} + func (n *node) ResetAux() { n.aux = 0 } @@ -1109,6 +1115,10 @@ const ( // a slice to save space. type Nodes struct{ slice *[]Node } +// immutableEmptyNodes is an immutable, empty Nodes list. +// The methods that would modify it panic instead. +var immutableEmptyNodes = Nodes{} + // asNodes returns a slice of *Node as a Nodes value. func AsNodes(s []Node) Nodes { return Nodes{&s} @@ -1150,9 +1160,22 @@ func (n Nodes) Second() Node { return (*n.slice)[1] } +func (n *Nodes) mutate() { + if n == &immutableEmptyNodes { + panic("immutable Nodes.Set") + } +} + // Set sets n to a slice. // This takes ownership of the slice. func (n *Nodes) Set(s []Node) { + if n == &immutableEmptyNodes { + if len(s) == 0 { + // Allow immutableEmptyNodes.Set(nil) (a no-op). + return + } + n.mutate() + } if len(s) == 0 { n.slice = nil } else { @@ -1166,21 +1189,25 @@ func (n *Nodes) Set(s []Node) { // Set1 sets n to a slice containing a single node. func (n *Nodes) Set1(n1 Node) { + n.mutate() n.slice = &[]Node{n1} } // Set2 sets n to a slice containing two nodes. func (n *Nodes) Set2(n1, n2 Node) { + n.mutate() n.slice = &[]Node{n1, n2} } // Set3 sets n to a slice containing three nodes. func (n *Nodes) Set3(n1, n2, n3 Node) { + n.mutate() n.slice = &[]Node{n1, n2, n3} } // MoveNodes sets n to the contents of n2, then clears n2. func (n *Nodes) MoveNodes(n2 *Nodes) { + n.mutate() n.slice = n2.slice n2.slice = nil } @@ -1214,6 +1241,7 @@ func (n *Nodes) Append(a ...Node) { if len(a) == 0 { return } + n.mutate() if n.slice == nil { s := make([]Node, len(a)) copy(s, a) @@ -1229,6 +1257,7 @@ func (n *Nodes) Prepend(a ...Node) { if len(a) == 0 { return } + n.mutate() if n.slice == nil { n.slice = &a } else { @@ -1238,6 +1267,7 @@ func (n *Nodes) Prepend(a ...Node) { // AppendNodes appends the contents of *n2 to n, then clears n2. func (n *Nodes) AppendNodes(n2 *Nodes) { + n.mutate() switch { case n2.slice == nil: case n.slice == nil: @@ -1341,43 +1371,7 @@ func (s NodeSet) Sorted(less func(Node, Node) bool) []Node { return res } -func Nod(op Op, nleft, nright Node) Node { - return NodAt(base.Pos, op, nleft, nright) -} - -func NodAt(pos src.XPos, op Op, nleft, nright Node) Node { - var n Node - switch op { - case ODCLFUNC: - var x struct { - n node - f Func - } - n = &x.n - n.SetFunc(&x.f) - n.Func().Decl = n - case ONAME: - base.Fatalf("use newname instead") - case OLABEL, OPACK: - var x struct { - n node - m Name - } - n = &x.n - n.SetName(&x.m) - default: - n = new(node) - } - n.SetOp(op) - n.SetLeft(nleft) - n.SetRight(nright) - n.SetPos(pos) - n.SetOffset(types.BADWIDTH) - n.SetOrig(n) - return n -} - -// newnamel returns a new ONAME Node associated with symbol s at position pos. +// NewNameAt returns a new ONAME Node associated with symbol s at position pos. // The caller is responsible for setting n.Name.Curfn. func NewNameAt(pos src.XPos, s *types.Sym) Node { if s == nil { @@ -1664,3 +1658,193 @@ func IsBlank(n Node) bool { func IsMethod(n Node) bool { return n.Type().Recv() != nil } + +func Nod(op Op, nleft, nright Node) Node { + return NodAt(base.Pos, op, nleft, nright) +} + +func NodAt(pos src.XPos, op Op, nleft, nright Node) Node { + var n Node + switch op { + case ODCLFUNC: + var x struct { + n node + f Func + } + n = &x.n + n.SetFunc(&x.f) + n.Func().Decl = n + case OLABEL, OPACK: + var x struct { + n node + m Name + } + n = &x.n + n.SetName(&x.m) + default: + n = new(node) + } + n.SetOp(op) + n.SetLeft(nleft) + n.SetRight(nright) + n.SetPos(pos) + n.SetOffset(types.BADWIDTH) + n.SetOrig(n) + return n +} + +var okForNod = [OEND]bool{ + OADD: true, + OADDR: true, + OADDSTR: true, + OALIGNOF: true, + OAND: true, + OANDAND: true, + OANDNOT: true, + OAPPEND: true, + OARRAYLIT: true, + OAS: true, + OAS2: true, + OAS2DOTTYPE: true, + OAS2FUNC: true, + OAS2MAPR: true, + OAS2RECV: true, + OASOP: true, + OBITNOT: true, + OBLOCK: true, + OBREAK: true, + OBYTES2STR: true, + OBYTES2STRTMP: true, + OCALL: true, + OCALLFUNC: true, + OCALLINTER: true, + OCALLMETH: true, + OCALLPART: true, + OCAP: true, + OCASE: true, + OCFUNC: true, + OCHECKNIL: true, + OCLOSE: true, + OCLOSURE: true, + OCLOSUREVAR: true, + OCOMPLEX: true, + OCOMPLIT: true, + OCONTINUE: true, + OCONV: true, + OCONVIFACE: true, + OCONVNOP: true, + OCOPY: true, + ODCL: true, + ODCLCONST: true, + ODCLFIELD: true, + ODCLFUNC: true, + ODCLTYPE: true, + ODDD: true, + ODEFER: true, + ODELETE: true, + ODEREF: true, + ODIV: true, + ODOT: true, + ODOTINTER: true, + ODOTMETH: true, + ODOTPTR: true, + ODOTTYPE: true, + ODOTTYPE2: true, + OEFACE: true, + OEMPTY: true, + OEQ: true, + OFALL: true, + OFOR: true, + OFORUNTIL: true, + OGE: true, + OGETG: true, + OGO: true, + OGOTO: true, + OGT: true, + OIDATA: true, + OIF: true, + OIMAG: true, + OINDEX: true, + OINDEXMAP: true, + OINLCALL: true, + OINLMARK: true, + OIOTA: true, + OITAB: true, + OKEY: true, + OLABEL: true, + OLE: true, + OLEN: true, + OLITERAL: true, + OLSH: true, + OLT: true, + OMAKE: true, + OMAKECHAN: true, + OMAKEMAP: true, + OMAKESLICE: true, + OMAKESLICECOPY: true, + OMAPLIT: true, + OMETHEXPR: true, + OMOD: true, + OMUL: true, + ONAME: true, + ONE: true, + ONEG: true, + ONEW: true, + ONEWOBJ: true, + ONIL: true, + ONONAME: true, + ONOT: true, + OOFFSETOF: true, + OOR: true, + OOROR: true, + OPACK: true, + OPANIC: true, + OPAREN: true, + OPLUS: true, + OPRINT: true, + OPRINTN: true, + OPTRLIT: true, + ORANGE: true, + OREAL: true, + ORECOVER: true, + ORECV: true, + ORESULT: true, + ORETJMP: true, + ORETURN: true, + ORSH: true, + ORUNES2STR: true, + ORUNESTR: true, + OSELECT: true, + OSELRECV: true, + OSELRECV2: true, + OSEND: true, + OSIZEOF: true, + OSLICE: true, + OSLICE3: true, + OSLICE3ARR: true, + OSLICEARR: true, + OSLICEHEADER: true, + OSLICELIT: true, + OSLICESTR: true, + OSPTR: true, + OSTR2BYTES: true, + OSTR2BYTESTMP: true, + OSTR2RUNES: true, + OSTRUCTKEY: true, + OSTRUCTLIT: true, + OSUB: true, + OSWITCH: true, + OTARRAY: true, + OTCHAN: true, + OTFUNC: true, + OTINTER: true, + OTMAP: true, + OTSTRUCT: true, + OTYPE: true, + OTYPESW: true, + OVARDEF: true, + OVARKILL: true, + OVARLIVE: true, + OXDOT: true, + OXOR: true, +} From 171787efcd7a59c90f05a191c74bf5844f1c542a Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sat, 28 Nov 2020 00:36:44 -0500 Subject: [PATCH 051/474] [dev.regabi] cmd/compile: remove Orig, SetOrig from Node interface These are only needed for a few opcodes, and we can avoid wasting storage in every implementation by using the extension interface pattern with a helper function for access. Of course, in the current codebase, there is only one Node implementation (*node) and it has these methods, so there is no danger of a functional change in this particular CL. Passes buildall w/ toolstash -cmp. Change-Id: I440c6c232f1fe7b56b852a00dc530f8f49a6b12d Reviewed-on: https://go-review.googlesource.com/c/go/+/274089 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/const.go | 4 +-- src/cmd/compile/internal/gc/escape.go | 2 +- src/cmd/compile/internal/gc/gen.go | 2 +- src/cmd/compile/internal/gc/iexport.go | 4 +-- src/cmd/compile/internal/gc/ssa.go | 2 +- src/cmd/compile/internal/gc/subr.go | 2 +- src/cmd/compile/internal/gc/typecheck.go | 10 +++--- src/cmd/compile/internal/ir/fmt.go | 8 ++--- src/cmd/compile/internal/ir/node.go | 42 ++++++++++++++++++++---- 9 files changed, 52 insertions(+), 24 deletions(-) diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index 4beb85245fde7..3c161d8e127d9 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -537,7 +537,7 @@ func evalConst(n ir.Node) ir.Node { } nl := origConst(s[i], constant.MakeString(strings.Join(strs, ""))) - nl.SetOrig(nl) // it's bigger than just s[i] + nl.(ir.OrigNode).SetOrig(nl) // it's bigger than just s[i] newList = append(newList, nl) i = i2 - 1 } else { @@ -642,7 +642,7 @@ func origConst(n ir.Node, v constant.Value) ir.Node { orig := n n = ir.NodAt(orig.Pos(), ir.OLITERAL, nil, nil) - n.SetOrig(orig) + n.(ir.OrigNode).SetOrig(orig) n.SetType(orig.Type()) n.SetVal(v) return n diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index 6b6fb44a9942a..e3ac883e958cb 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -1871,7 +1871,7 @@ func moveToHeap(n ir.Node) { // temp will add it to the function declaration list automatically. heapaddr := temp(types.NewPtr(n.Type())) heapaddr.SetSym(lookup("&" + n.Sym().Name)) - heapaddr.Orig().SetSym(heapaddr.Sym()) + ir.Orig(heapaddr).SetSym(heapaddr.Sym()) heapaddr.SetPos(n.Pos()) // Unset AutoTemp to persist the &foo variable name through SSA to diff --git a/src/cmd/compile/internal/gc/gen.go b/src/cmd/compile/internal/gc/gen.go index 44e918f2c1794..cb640c7ccf76f 100644 --- a/src/cmd/compile/internal/gc/gen.go +++ b/src/cmd/compile/internal/gc/gen.go @@ -80,7 +80,7 @@ func tempAt(pos src.XPos, curfn ir.Node, t *types.Type) ir.Node { dowidth(t) - return n.Orig() + return ir.Orig(n) } func temp(t *types.Type) ir.Node { diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index 7c42e43beead5..c2ea599af40e3 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -1210,8 +1210,8 @@ func (w *exportWriter) expr(n ir.Node) { if !n.Type().HasNil() { base.Fatalf("unexpected type for nil: %v", n.Type()) } - if n.Orig() != nil && n.Orig() != n { - w.expr(n.Orig()) + if orig := ir.Orig(n); orig != nil && orig != n { + w.expr(orig) break } w.op(ir.OLITERAL) diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index bcc126f82e521..1a13b14376e0b 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -6675,7 +6675,7 @@ func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) { case ir.Node: if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT { a.Name = obj.NAME_PARAM - a.Sym = n.Orig().Sym().Linksym() + a.Sym = ir.Orig(n).Sym().Linksym() a.Offset += n.Offset() break } diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index d174ebd582b5c..722876abf5b1d 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -559,7 +559,7 @@ func assignconvfn(n ir.Node, t *types.Type, context func() string) ir.Node { r.SetType(t) r.SetTypecheck(1) r.SetImplicit(true) - r.SetOrig(n.Orig()) + r.(ir.OrigNode).SetOrig(ir.Orig(n)) return r } diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 9da464e1b6714..7037eddff0c6e 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -851,7 +851,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { checklvalue(n.Left(), "take the address of") r := outervalue(n.Left()) if r.Op() == ir.ONAME { - if r.Orig() != r { + if ir.Orig(r) != r { base.Fatalf("found non-orig name node %v", r) // TODO(mdempsky): What does this mean? } r.Name().SetAddrtaken(true) @@ -2144,8 +2144,8 @@ func typecheckargs(n ir.Node) { // Rewrite f(g()) into t1, t2, ... = g(); f(t1, t2, ...). // Save n as n.Orig for fmt.go. - if n.Orig() == n { - n.SetOrig(ir.SepCopy(n)) + if ir.Orig(n) == n { + n.(ir.OrigNode).SetOrig(ir.SepCopy(n)) } as := ir.Nod(ir.OAS2, nil, nil) @@ -2245,7 +2245,7 @@ func checkdefergo(n ir.Node) { ir.ONEW, ir.OREAL, ir.OLITERAL: // conversion or unsafe.Alignof, Offsetof, Sizeof - if n.Left().Orig() != nil && n.Left().Orig().Op() == ir.OCONV { + if orig := ir.Orig(n.Left()); orig.Op() == ir.OCONV { break } base.ErrorfAt(n.Pos(), "%s discards result of %v", what, n.Left()) @@ -2814,7 +2814,7 @@ func typecheckcomplit(n ir.Node) (res ir.Node) { } // Save original node (including n.Right) - n.SetOrig(ir.Copy(n)) + n.(ir.OrigNode).SetOrig(ir.Copy(n)) setlineno(n.Right()) diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index f394219c0532b..24318d501f5f6 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -1223,8 +1223,8 @@ func exprFmt(n Node, s fmt.State, prec int, mode FmtMode) { case OLITERAL: // this is a bit of a mess if mode == FErr { - if n.Orig() != nil && n.Orig() != n { - exprFmt(n.Orig(), s, prec, mode) + if orig := Orig(n); orig != nil && orig != n { + exprFmt(orig, s, prec, mode) return } if n.Sym() != nil { @@ -1561,8 +1561,8 @@ func nodeFmt(n Node, s fmt.State, flag FmtFlag, mode FmtMode) { // We almost always want the original. // TODO(gri) Why the special case for OLITERAL? - if n.Op() != OLITERAL && n.Orig() != nil { - n = n.Orig() + if n.Op() != OLITERAL && Orig(n) != nil { + n = Orig(n) } if flag&FmtLong != 0 && t != nil { diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 7a61355858fb1..7e46673eab28d 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -35,8 +35,6 @@ type Node interface { // Abstract graph structure, for generic traversals. Op() Op SetOp(x Op) - Orig() Node - SetOrig(x Node) SubOp() Op SetSubOp(x Op) Left() Node @@ -1616,11 +1614,41 @@ func (n *node) RawCopy() Node { return © } +// A Node may implement the Orig and SetOrig method to +// maintain a pointer to the "unrewritten" form of a Node. +// If a Node does not implement OrigNode, it is its own Orig. +// +// Note that both SepCopy and Copy have definitions compatible +// with a Node that does not implement OrigNode: such a Node +// is its own Orig, and in that case, that's what both want to return +// anyway (SepCopy unconditionally, and Copy only when the input +// is its own Orig as well, but if the output does not implement +// OrigNode, then neither does the input, making the condition true). +type OrigNode interface { + Node + Orig() Node + SetOrig(Node) +} + +func Orig(n Node) Node { + if n, ok := n.(OrigNode); ok { + o := n.Orig() + if o == nil { + Dump("Orig nil", n) + base.Fatalf("Orig returned nil") + } + return o + } + return n +} + // sepcopy returns a separate shallow copy of n, with the copy's // Orig pointing to itself. func SepCopy(n Node) Node { n = n.RawCopy() - n.SetOrig(n) + if n, ok := n.(OrigNode); ok { + n.SetOrig(n) + } return n } @@ -1633,8 +1661,8 @@ func SepCopy(n Node) Node { // messages; see issues #26855, #27765). func Copy(n Node) Node { copy := n.RawCopy() - if n.Orig() == n { - copy.SetOrig(copy) + if n, ok := n.(OrigNode); ok && n.Orig() == n { + copy.(OrigNode).SetOrig(copy) } return copy } @@ -1643,7 +1671,7 @@ func Copy(n Node) Node { func IsNil(n Node) bool { // Check n.Orig because constant propagation may produce typed nil constants, // which don't exist in the Go spec. - return n.Orig().Op() == ONIL + return Orig(n).Op() == ONIL } func IsBlank(n Node) bool { @@ -1664,7 +1692,7 @@ func Nod(op Op, nleft, nright Node) Node { } func NodAt(pos src.XPos, op Op, nleft, nright Node) Node { - var n Node + var n *node switch op { case ODCLFUNC: var x struct { From b09dbc69132aeee3571867cd269f5273290a2255 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sat, 28 Nov 2020 00:54:58 -0500 Subject: [PATCH 052/474] [dev.regabi] cmd/compile: remove SetOp(OEMPTY) calls In preparation for OEMPTY being its own Node implementation, remove SetOp(OEMPTY) calls that assume other implementations can be turned into OEMPTY. Passes buildall w/ toolstash -cmp. Change-Id: Icac16d12548f35f52a5efa9d09dacf8260f42075 Reviewed-on: https://go-review.googlesource.com/c/go/+/274090 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/sinit.go | 5 +++-- src/cmd/compile/internal/gc/typecheck.go | 3 +-- src/cmd/compile/internal/gc/walk.go | 9 ++++++--- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index e30663cfbb8fa..fca81763c0f0a 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -959,6 +959,9 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { } } +// oaslit handles special composite literal assignments. +// It returns true if n's effects have been added to init, +// in which case n should be dropped from the program by the caller. func oaslit(n ir.Node, init *ir.Nodes) bool { if n.Left() == nil || n.Right() == nil { // not a special composite literal assignment @@ -990,8 +993,6 @@ func oaslit(n ir.Node, init *ir.Nodes) bool { anylit(n.Right(), n.Left(), init) } - n.SetOp(ir.OEMPTY) - n.SetRight(nil) return true } diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 7037eddff0c6e..0c4a3ad833a51 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -1985,8 +1985,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { // Empty identifier is valid but useless. // Eliminate now to simplify life later. // See issues 7538, 11589, 11593. - n.SetOp(ir.OEMPTY) - n.SetLeft(nil) + n = ir.NodAt(n.Pos(), ir.OEMPTY, nil, nil) } case ir.ODEFER: diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index db8791ee05780..87fe36b08a010 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -152,10 +152,12 @@ func walkstmt(n ir.Node) ir.Node { init := n.Init() n.PtrInit().Set(nil) n = walkexpr(n, &init) - n = addinit(n, init.Slice()) - if wascopy && n.Op() == ir.OCONVNOP { - n.SetOp(ir.OEMPTY) // don't leave plain values as statements. + if wascopy && n.Op() == ir.ONAME { + // copy rewrote to a statement list and a temp for the length. + // Throw away the temp to avoid plain values as statements. + n = ir.NodAt(n.Pos(), ir.OEMPTY, nil, nil) } + n = addinit(n, init.Slice()) // special case for a receive where we throw away // the value received. @@ -609,6 +611,7 @@ opswitch: } if oaslit(n, init) { + n = ir.NodAt(n.Pos(), ir.OEMPTY, nil, nil) break } From be3d8b40b5447f787174015260e85b5198e8f7e6 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sat, 28 Nov 2020 00:43:50 -0500 Subject: [PATCH 053/474] [dev.regabi] cmd/compile: ir.BranchStmt, add ir.EmptyStmt, ir.LabelStmt These are the first three specific implementations of Node. They are both a bit of a warmup and also working toward removing references to Name from Node types other than the proper named things - ONAME, ONONAME, OTYPE, OLITERAL. (In this case, BranchStmt and LabelStmt.) Passes buildall w/ toolstash -cmp. Change-Id: Ide816b162025ee4c858dd061d7c29ed633fb7baf Reviewed-on: https://go-review.googlesource.com/c/go/+/274091 Trust: Russ Cox Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/mini.go | 4 +- src/cmd/compile/internal/ir/node.go | 13 ++--- src/cmd/compile/internal/ir/stmt.go | 83 +++++++++++++++++++++++++++++ 3 files changed, 92 insertions(+), 8 deletions(-) create mode 100644 src/cmd/compile/internal/ir/stmt.go diff --git a/src/cmd/compile/internal/ir/mini.go b/src/cmd/compile/internal/ir/mini.go index 48dccf6a5f781..608c2bed81160 100644 --- a/src/cmd/compile/internal/ir/mini.go +++ b/src/cmd/compile/internal/ir/mini.go @@ -127,7 +127,7 @@ func (n *miniNode) SubOp() Op { panic(n.no("SubOp")) } func (n *miniNode) SetSubOp(Op) { panic(n.no("SetSubOp")) } func (n *miniNode) Type() *types.Type { return nil } func (n *miniNode) SetType(*types.Type) { panic(n.no("SetType")) } -func (n *miniNode) Func() *Func { panic(n.no("Func")) } +func (n *miniNode) Func() *Func { return nil } func (n *miniNode) SetFunc(*Func) { panic(n.no("SetFunc")) } func (n *miniNode) Name() *Name { return nil } func (n *miniNode) SetName(*Name) { panic(n.no("SetName")) } @@ -172,7 +172,7 @@ func (n *miniNode) Uint64Val() uint64 { panic(n.no("Uint64Val")) } func (n *miniNode) CanInt64() bool { panic(n.no("CanInt64")) } func (n *miniNode) BoolVal() bool { panic(n.no("BoolVal")) } func (n *miniNode) StringVal() string { panic(n.no("StringVal")) } -func (n *miniNode) HasCall() bool { panic(n.no("HasCall")) } +func (n *miniNode) HasCall() bool { return false } func (n *miniNode) SetHasCall(bool) { panic(n.no("SetHasCall")) } func (n *miniNode) NonNil() bool { return false } func (n *miniNode) MarkNonNil() { panic(n.no("MarkNonNil")) } diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 7e46673eab28d..cafe47493bfec 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -1702,13 +1702,19 @@ func NodAt(pos src.XPos, op Op, nleft, nright Node) Node { n = &x.n n.SetFunc(&x.f) n.Func().Decl = n - case OLABEL, OPACK: + case OPACK: var x struct { n node m Name } n = &x.n n.SetName(&x.m) + case OEMPTY: + return NewEmptyStmt(pos) + case OBREAK, OCONTINUE, OFALL, OGOTO: + return NewBranchStmt(pos, op, nil) + case OLABEL: + return NewLabelStmt(pos, nil) default: n = new(node) } @@ -1740,7 +1746,6 @@ var okForNod = [OEND]bool{ OASOP: true, OBITNOT: true, OBLOCK: true, - OBREAK: true, OBYTES2STR: true, OBYTES2STRTMP: true, OCALL: true, @@ -1757,7 +1762,6 @@ var okForNod = [OEND]bool{ OCLOSUREVAR: true, OCOMPLEX: true, OCOMPLIT: true, - OCONTINUE: true, OCONV: true, OCONVIFACE: true, OCONVNOP: true, @@ -1779,15 +1783,12 @@ var okForNod = [OEND]bool{ ODOTTYPE: true, ODOTTYPE2: true, OEFACE: true, - OEMPTY: true, OEQ: true, - OFALL: true, OFOR: true, OFORUNTIL: true, OGE: true, OGETG: true, OGO: true, - OGOTO: true, OGT: true, OIDATA: true, OIF: true, diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go new file mode 100644 index 0000000000000..5b89ff27a4734 --- /dev/null +++ b/src/cmd/compile/internal/ir/stmt.go @@ -0,0 +1,83 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +import ( + "cmd/compile/internal/types" + "cmd/internal/src" + "fmt" +) + +// A miniStmt is a miniNode with extra fields common to statements. +type miniStmt struct { + miniNode + init Nodes +} + +func (n *miniStmt) Init() Nodes { return n.init } +func (n *miniStmt) SetInit(x Nodes) { n.init = x } +func (n *miniStmt) PtrInit() *Nodes { return &n.init } +func (n *miniStmt) HasCall() bool { return n.bits&miniHasCall != 0 } +func (n *miniStmt) SetHasCall(b bool) { n.bits.set(miniHasCall, b) } + +// A BranchStmt is a break, continue, fallthrough, or goto statement. +type BranchStmt struct { + miniStmt + Label *types.Sym // label if present +} + +func NewBranchStmt(pos src.XPos, op Op, label *types.Sym) *BranchStmt { + switch op { + case OBREAK, OCONTINUE, OFALL, OGOTO: + // ok + default: + panic("NewBranch " + op.String()) + } + n := &BranchStmt{Label: label} + n.pos = pos + n.op = op + return n +} + +func (n *BranchStmt) String() string { return fmt.Sprint(n) } +func (n *BranchStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *BranchStmt) RawCopy() Node { c := *n; return &c } +func (n *BranchStmt) Sym() *types.Sym { return n.Label } +func (n *BranchStmt) SetSym(sym *types.Sym) { n.Label = sym } + +// An EmptyStmt is an empty statement +type EmptyStmt struct { + miniStmt +} + +func NewEmptyStmt(pos src.XPos) *EmptyStmt { + n := &EmptyStmt{} + n.pos = pos + n.op = OEMPTY + return n +} + +func (n *EmptyStmt) String() string { return fmt.Sprint(n) } +func (n *EmptyStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *EmptyStmt) RawCopy() Node { c := *n; return &c } + +// A LabelStmt is a label statement (just the label, not including the statement it labels). +type LabelStmt struct { + miniStmt + Label *types.Sym // "Label:" +} + +func NewLabelStmt(pos src.XPos, label *types.Sym) *LabelStmt { + n := &LabelStmt{Label: label} + n.pos = pos + n.op = OLABEL + return n +} + +func (n *LabelStmt) String() string { return fmt.Sprint(n) } +func (n *LabelStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *LabelStmt) RawCopy() Node { c := *n; return &c } +func (n *LabelStmt) Sym() *types.Sym { return n.Label } +func (n *LabelStmt) SetSym(x *types.Sym) { n.Label = x } From 420809ab08d28fbe8dbe0e8fa4159c7dc82d88ae Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sat, 28 Nov 2020 01:03:40 -0500 Subject: [PATCH 054/474] [dev.regabi] cmd/compile: move name code from node.go to name.go No code changes here, only copying of text. This will make the diffs in a future CL readable. Passes buildall w/ toolstash -cmp. Change-Id: I1b8d8b9ec9408859e36af5ff3bef7c6c10eac0d6 Reviewed-on: https://go-review.googlesource.com/c/go/+/274092 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/name.go | 376 ++++++++++++++++++++++++++++ src/cmd/compile/internal/ir/node.go | 364 --------------------------- 2 files changed, 376 insertions(+), 364 deletions(-) create mode 100644 src/cmd/compile/internal/ir/name.go diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go new file mode 100644 index 0000000000000..fc7a5049e07ef --- /dev/null +++ b/src/cmd/compile/internal/ir/name.go @@ -0,0 +1,376 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/types" + "cmd/internal/objabi" + "cmd/internal/src" + "go/constant" +) + +// Name holds Node fields used only by named nodes (ONAME, OTYPE, OPACK, OLABEL, some OLITERAL). +type Name struct { + Pack Node // real package for import . names + Pkg *types.Pkg // pkg for OPACK nodes + // For a local variable (not param) or extern, the initializing assignment (OAS or OAS2). + // For a closure var, the ONAME node of the outer captured variable + Defn Node + // The ODCLFUNC node (for a static function/method or a closure) in which + // local variable or param is declared. + Curfn Node + Param *Param // additional fields for ONAME, OTYPE + Decldepth int32 // declaration loop depth, increased for every loop or label + // Unique number for ONAME nodes within a function. Function outputs + // (results) are numbered starting at one, followed by function inputs + // (parameters), and then local variables. Vargen is used to distinguish + // local variables/params with the same name. + Vargen int32 + flags bitset16 +} + +type Param struct { + Ntype Node + Heapaddr Node // temp holding heap address of param + + // ONAME PAUTOHEAP + Stackcopy Node // the PPARAM/PPARAMOUT on-stack slot (moved func params only) + + // ONAME closure linkage + // Consider: + // + // func f() { + // x := 1 // x1 + // func() { + // use(x) // x2 + // func() { + // use(x) // x3 + // --- parser is here --- + // }() + // }() + // } + // + // There is an original declaration of x and then a chain of mentions of x + // leading into the current function. Each time x is mentioned in a new closure, + // we create a variable representing x for use in that specific closure, + // since the way you get to x is different in each closure. + // + // Let's number the specific variables as shown in the code: + // x1 is the original x, x2 is when mentioned in the closure, + // and x3 is when mentioned in the closure in the closure. + // + // We keep these linked (assume N > 1): + // + // - x1.Defn = original declaration statement for x (like most variables) + // - x1.Innermost = current innermost closure x (in this case x3), or nil for none + // - x1.IsClosureVar() = false + // + // - xN.Defn = x1, N > 1 + // - xN.IsClosureVar() = true, N > 1 + // - x2.Outer = nil + // - xN.Outer = x(N-1), N > 2 + // + // + // When we look up x in the symbol table, we always get x1. + // Then we can use x1.Innermost (if not nil) to get the x + // for the innermost known closure function, + // but the first reference in a closure will find either no x1.Innermost + // or an x1.Innermost with .Funcdepth < Funcdepth. + // In that case, a new xN must be created, linked in with: + // + // xN.Defn = x1 + // xN.Outer = x1.Innermost + // x1.Innermost = xN + // + // When we finish the function, we'll process its closure variables + // and find xN and pop it off the list using: + // + // x1 := xN.Defn + // x1.Innermost = xN.Outer + // + // We leave x1.Innermost set so that we can still get to the original + // variable quickly. Not shown here, but once we're + // done parsing a function and no longer need xN.Outer for the + // lexical x reference links as described above, funcLit + // recomputes xN.Outer as the semantic x reference link tree, + // even filling in x in intermediate closures that might not + // have mentioned it along the way to inner closures that did. + // See funcLit for details. + // + // During the eventual compilation, then, for closure variables we have: + // + // xN.Defn = original variable + // xN.Outer = variable captured in next outward scope + // to make closure where xN appears + // + // Because of the sharding of pieces of the node, x.Defn means x.Name.Defn + // and x.Innermost/Outer means x.Name.Param.Innermost/Outer. + Innermost Node + Outer Node + + // OTYPE & ONAME //go:embed info, + // sharing storage to reduce gc.Param size. + // Extra is nil, or else *Extra is a *paramType or an *embedFileList. + Extra *interface{} +} + +// NewNameAt returns a new ONAME Node associated with symbol s at position pos. +// The caller is responsible for setting n.Name.Curfn. +func NewNameAt(pos src.XPos, s *types.Sym) Node { + if s == nil { + base.Fatalf("newnamel nil") + } + + var x struct { + n node + m Name + p Param + } + n := &x.n + n.SetName(&x.m) + n.Name().Param = &x.p + + n.SetOp(ONAME) + n.SetPos(pos) + n.SetOrig(n) + + n.SetSym(s) + return n +} + +type paramType struct { + flag PragmaFlag + alias bool +} + +// Pragma returns the PragmaFlag for p, which must be for an OTYPE. +func (p *Param) Pragma() PragmaFlag { + if p.Extra == nil { + return 0 + } + return (*p.Extra).(*paramType).flag +} + +// SetPragma sets the PragmaFlag for p, which must be for an OTYPE. +func (p *Param) SetPragma(flag PragmaFlag) { + if p.Extra == nil { + if flag == 0 { + return + } + p.Extra = new(interface{}) + *p.Extra = ¶mType{flag: flag} + return + } + (*p.Extra).(*paramType).flag = flag +} + +// Alias reports whether p, which must be for an OTYPE, is a type alias. +func (p *Param) Alias() bool { + if p.Extra == nil { + return false + } + t, ok := (*p.Extra).(*paramType) + if !ok { + return false + } + return t.alias +} + +// SetAlias sets whether p, which must be for an OTYPE, is a type alias. +func (p *Param) SetAlias(alias bool) { + if p.Extra == nil { + if !alias { + return + } + p.Extra = new(interface{}) + *p.Extra = ¶mType{alias: alias} + return + } + (*p.Extra).(*paramType).alias = alias +} + +type embedFileList []string + +// EmbedFiles returns the list of embedded files for p, +// which must be for an ONAME var. +func (p *Param) EmbedFiles() []string { + if p.Extra == nil { + return nil + } + return *(*p.Extra).(*embedFileList) +} + +// SetEmbedFiles sets the list of embedded files for p, +// which must be for an ONAME var. +func (p *Param) SetEmbedFiles(list []string) { + if p.Extra == nil { + if len(list) == 0 { + return + } + f := embedFileList(list) + p.Extra = new(interface{}) + *p.Extra = &f + return + } + *(*p.Extra).(*embedFileList) = list +} + +const ( + nameCaptured = 1 << iota // is the variable captured by a closure + nameReadonly + nameByval // is the variable captured by value or by reference + nameNeedzero // if it contains pointers, needs to be zeroed on function entry + nameAutoTemp // is the variable a temporary (implies no dwarf info. reset if escapes to heap) + nameUsed // for variable declared and not used error + nameIsClosureVar // PAUTOHEAP closure pseudo-variable; original at n.Name.Defn + nameIsOutputParamHeapAddr // pointer to a result parameter's heap copy + nameAssigned // is the variable ever assigned to + nameAddrtaken // address taken, even if not moved to heap + nameInlFormal // PAUTO created by inliner, derived from callee formal + nameInlLocal // PAUTO created by inliner, derived from callee local + nameOpenDeferSlot // if temporary var storing info for open-coded defers + nameLibfuzzerExtraCounter // if PEXTERN should be assigned to __libfuzzer_extra_counters section +) + +func (n *Name) Captured() bool { return n.flags&nameCaptured != 0 } +func (n *Name) Readonly() bool { return n.flags&nameReadonly != 0 } +func (n *Name) Byval() bool { return n.flags&nameByval != 0 } +func (n *Name) Needzero() bool { return n.flags&nameNeedzero != 0 } +func (n *Name) AutoTemp() bool { return n.flags&nameAutoTemp != 0 } +func (n *Name) Used() bool { return n.flags&nameUsed != 0 } +func (n *Name) IsClosureVar() bool { return n.flags&nameIsClosureVar != 0 } +func (n *Name) IsOutputParamHeapAddr() bool { return n.flags&nameIsOutputParamHeapAddr != 0 } +func (n *Name) Assigned() bool { return n.flags&nameAssigned != 0 } +func (n *Name) Addrtaken() bool { return n.flags&nameAddrtaken != 0 } +func (n *Name) InlFormal() bool { return n.flags&nameInlFormal != 0 } +func (n *Name) InlLocal() bool { return n.flags&nameInlLocal != 0 } +func (n *Name) OpenDeferSlot() bool { return n.flags&nameOpenDeferSlot != 0 } +func (n *Name) LibfuzzerExtraCounter() bool { return n.flags&nameLibfuzzerExtraCounter != 0 } + +func (n *Name) SetCaptured(b bool) { n.flags.set(nameCaptured, b) } +func (n *Name) SetReadonly(b bool) { n.flags.set(nameReadonly, b) } +func (n *Name) SetByval(b bool) { n.flags.set(nameByval, b) } +func (n *Name) SetNeedzero(b bool) { n.flags.set(nameNeedzero, b) } +func (n *Name) SetAutoTemp(b bool) { n.flags.set(nameAutoTemp, b) } +func (n *Name) SetUsed(b bool) { n.flags.set(nameUsed, b) } +func (n *Name) SetIsClosureVar(b bool) { n.flags.set(nameIsClosureVar, b) } +func (n *Name) SetIsOutputParamHeapAddr(b bool) { n.flags.set(nameIsOutputParamHeapAddr, b) } +func (n *Name) SetAssigned(b bool) { n.flags.set(nameAssigned, b) } +func (n *Name) SetAddrtaken(b bool) { n.flags.set(nameAddrtaken, b) } +func (n *Name) SetInlFormal(b bool) { n.flags.set(nameInlFormal, b) } +func (n *Name) SetInlLocal(b bool) { n.flags.set(nameInlLocal, b) } +func (n *Name) SetOpenDeferSlot(b bool) { n.flags.set(nameOpenDeferSlot, b) } +func (n *Name) SetLibfuzzerExtraCounter(b bool) { n.flags.set(nameLibfuzzerExtraCounter, b) } + +// MarkReadonly indicates that n is an ONAME with readonly contents. +func (n *node) MarkReadonly() { + if n.Op() != ONAME { + base.Fatalf("Node.MarkReadonly %v", n.Op()) + } + n.Name().SetReadonly(true) + // Mark the linksym as readonly immediately + // so that the SSA backend can use this information. + // It will be overridden later during dumpglobls. + n.Sym().Linksym().Type = objabi.SRODATA +} + +// Val returns the constant.Value for the node. +func (n *node) Val() constant.Value { + if !n.HasVal() { + return constant.MakeUnknown() + } + return *n.e.(*constant.Value) +} + +// SetVal sets the constant.Value for the node, +// which must not have been used with SetOpt. +func (n *node) SetVal(v constant.Value) { + if n.hasOpt() { + base.Flag.LowerH = 1 + Dump("have Opt", n) + base.Fatalf("have Opt") + } + if n.Op() == OLITERAL { + AssertValidTypeForConst(n.Type(), v) + } + n.setHasVal(true) + n.e = &v +} + +// Int64Val returns n as an int64. +// n must be an integer or rune constant. +func (n *node) Int64Val() int64 { + if !IsConst(n, constant.Int) { + base.Fatalf("Int64Val(%v)", n) + } + x, ok := constant.Int64Val(n.Val()) + if !ok { + base.Fatalf("Int64Val(%v)", n) + } + return x +} + +// CanInt64 reports whether it is safe to call Int64Val() on n. +func (n *node) CanInt64() bool { + if !IsConst(n, constant.Int) { + return false + } + + // if the value inside n cannot be represented as an int64, the + // return value of Int64 is undefined + _, ok := constant.Int64Val(n.Val()) + return ok +} + +// Uint64Val returns n as an uint64. +// n must be an integer or rune constant. +func (n *node) Uint64Val() uint64 { + if !IsConst(n, constant.Int) { + base.Fatalf("Uint64Val(%v)", n) + } + x, ok := constant.Uint64Val(n.Val()) + if !ok { + base.Fatalf("Uint64Val(%v)", n) + } + return x +} + +// BoolVal returns n as a bool. +// n must be a boolean constant. +func (n *node) BoolVal() bool { + if !IsConst(n, constant.Bool) { + base.Fatalf("BoolVal(%v)", n) + } + return constant.BoolVal(n.Val()) +} + +// StringVal returns the value of a literal string Node as a string. +// n must be a string constant. +func (n *node) StringVal() string { + if !IsConst(n, constant.String) { + base.Fatalf("StringVal(%v)", n) + } + return constant.StringVal(n.Val()) +} + +// The Class of a variable/function describes the "storage class" +// of a variable or function. During parsing, storage classes are +// called declaration contexts. +type Class uint8 + +//go:generate stringer -type=Class +const ( + Pxxx Class = iota // no class; used during ssa conversion to indicate pseudo-variables + PEXTERN // global variables + PAUTO // local variables + PAUTOHEAP // local variables or parameters moved to heap + PPARAM // input arguments + PPARAMOUT // output results + PFUNC // global functions + + // Careful: Class is stored in three bits in Node.flags. + _ = uint((1 << 3) - iota) // static assert for iota <= (1 << 3) +) diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index cafe47493bfec..079871879d08b 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -15,7 +15,6 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/types" "cmd/internal/obj" - "cmd/internal/objabi" "cmd/internal/src" ) @@ -379,41 +378,6 @@ func (n *node) SetBounded(b bool) { n.flags.set(nodeBounded, b) } -// MarkReadonly indicates that n is an ONAME with readonly contents. -func (n *node) MarkReadonly() { - if n.Op() != ONAME { - base.Fatalf("Node.MarkReadonly %v", n.Op()) - } - n.Name().SetReadonly(true) - // Mark the linksym as readonly immediately - // so that the SSA backend can use this information. - // It will be overridden later during dumpglobls. - n.Sym().Linksym().Type = objabi.SRODATA -} - -// Val returns the constant.Value for the node. -func (n *node) Val() constant.Value { - if !n.HasVal() { - return constant.MakeUnknown() - } - return *n.e.(*constant.Value) -} - -// SetVal sets the constant.Value for the node, -// which must not have been used with SetOpt. -func (n *node) SetVal(v constant.Value) { - if n.hasOpt() { - base.Flag.LowerH = 1 - Dump("have Opt", n) - base.Fatalf("have Opt") - } - if n.Op() == OLITERAL { - AssertValidTypeForConst(n.Type(), v) - } - n.setHasVal(true) - n.e = &v -} - // Opt returns the optimizer data for the node. func (n *node) Opt() interface{} { if !n.hasOpt() { @@ -500,235 +464,6 @@ func PkgFuncName(n Node) string { func (n *node) CanBeAnSSASym() { } -// Name holds Node fields used only by named nodes (ONAME, OTYPE, OPACK, OLABEL, some OLITERAL). -type Name struct { - Pack Node // real package for import . names - Pkg *types.Pkg // pkg for OPACK nodes - // For a local variable (not param) or extern, the initializing assignment (OAS or OAS2). - // For a closure var, the ONAME node of the outer captured variable - Defn Node - // The ODCLFUNC node (for a static function/method or a closure) in which - // local variable or param is declared. - Curfn Node - Param *Param // additional fields for ONAME, OTYPE - Decldepth int32 // declaration loop depth, increased for every loop or label - // Unique number for ONAME nodes within a function. Function outputs - // (results) are numbered starting at one, followed by function inputs - // (parameters), and then local variables. Vargen is used to distinguish - // local variables/params with the same name. - Vargen int32 - flags bitset16 -} - -const ( - nameCaptured = 1 << iota // is the variable captured by a closure - nameReadonly - nameByval // is the variable captured by value or by reference - nameNeedzero // if it contains pointers, needs to be zeroed on function entry - nameAutoTemp // is the variable a temporary (implies no dwarf info. reset if escapes to heap) - nameUsed // for variable declared and not used error - nameIsClosureVar // PAUTOHEAP closure pseudo-variable; original at n.Name.Defn - nameIsOutputParamHeapAddr // pointer to a result parameter's heap copy - nameAssigned // is the variable ever assigned to - nameAddrtaken // address taken, even if not moved to heap - nameInlFormal // PAUTO created by inliner, derived from callee formal - nameInlLocal // PAUTO created by inliner, derived from callee local - nameOpenDeferSlot // if temporary var storing info for open-coded defers - nameLibfuzzerExtraCounter // if PEXTERN should be assigned to __libfuzzer_extra_counters section -) - -func (n *Name) Captured() bool { return n.flags&nameCaptured != 0 } -func (n *Name) Readonly() bool { return n.flags&nameReadonly != 0 } -func (n *Name) Byval() bool { return n.flags&nameByval != 0 } -func (n *Name) Needzero() bool { return n.flags&nameNeedzero != 0 } -func (n *Name) AutoTemp() bool { return n.flags&nameAutoTemp != 0 } -func (n *Name) Used() bool { return n.flags&nameUsed != 0 } -func (n *Name) IsClosureVar() bool { return n.flags&nameIsClosureVar != 0 } -func (n *Name) IsOutputParamHeapAddr() bool { return n.flags&nameIsOutputParamHeapAddr != 0 } -func (n *Name) Assigned() bool { return n.flags&nameAssigned != 0 } -func (n *Name) Addrtaken() bool { return n.flags&nameAddrtaken != 0 } -func (n *Name) InlFormal() bool { return n.flags&nameInlFormal != 0 } -func (n *Name) InlLocal() bool { return n.flags&nameInlLocal != 0 } -func (n *Name) OpenDeferSlot() bool { return n.flags&nameOpenDeferSlot != 0 } -func (n *Name) LibfuzzerExtraCounter() bool { return n.flags&nameLibfuzzerExtraCounter != 0 } - -func (n *Name) SetCaptured(b bool) { n.flags.set(nameCaptured, b) } -func (n *Name) SetReadonly(b bool) { n.flags.set(nameReadonly, b) } -func (n *Name) SetByval(b bool) { n.flags.set(nameByval, b) } -func (n *Name) SetNeedzero(b bool) { n.flags.set(nameNeedzero, b) } -func (n *Name) SetAutoTemp(b bool) { n.flags.set(nameAutoTemp, b) } -func (n *Name) SetUsed(b bool) { n.flags.set(nameUsed, b) } -func (n *Name) SetIsClosureVar(b bool) { n.flags.set(nameIsClosureVar, b) } -func (n *Name) SetIsOutputParamHeapAddr(b bool) { n.flags.set(nameIsOutputParamHeapAddr, b) } -func (n *Name) SetAssigned(b bool) { n.flags.set(nameAssigned, b) } -func (n *Name) SetAddrtaken(b bool) { n.flags.set(nameAddrtaken, b) } -func (n *Name) SetInlFormal(b bool) { n.flags.set(nameInlFormal, b) } -func (n *Name) SetInlLocal(b bool) { n.flags.set(nameInlLocal, b) } -func (n *Name) SetOpenDeferSlot(b bool) { n.flags.set(nameOpenDeferSlot, b) } -func (n *Name) SetLibfuzzerExtraCounter(b bool) { n.flags.set(nameLibfuzzerExtraCounter, b) } - -type Param struct { - Ntype Node - Heapaddr Node // temp holding heap address of param - - // ONAME PAUTOHEAP - Stackcopy Node // the PPARAM/PPARAMOUT on-stack slot (moved func params only) - - // ONAME closure linkage - // Consider: - // - // func f() { - // x := 1 // x1 - // func() { - // use(x) // x2 - // func() { - // use(x) // x3 - // --- parser is here --- - // }() - // }() - // } - // - // There is an original declaration of x and then a chain of mentions of x - // leading into the current function. Each time x is mentioned in a new closure, - // we create a variable representing x for use in that specific closure, - // since the way you get to x is different in each closure. - // - // Let's number the specific variables as shown in the code: - // x1 is the original x, x2 is when mentioned in the closure, - // and x3 is when mentioned in the closure in the closure. - // - // We keep these linked (assume N > 1): - // - // - x1.Defn = original declaration statement for x (like most variables) - // - x1.Innermost = current innermost closure x (in this case x3), or nil for none - // - x1.IsClosureVar() = false - // - // - xN.Defn = x1, N > 1 - // - xN.IsClosureVar() = true, N > 1 - // - x2.Outer = nil - // - xN.Outer = x(N-1), N > 2 - // - // - // When we look up x in the symbol table, we always get x1. - // Then we can use x1.Innermost (if not nil) to get the x - // for the innermost known closure function, - // but the first reference in a closure will find either no x1.Innermost - // or an x1.Innermost with .Funcdepth < Funcdepth. - // In that case, a new xN must be created, linked in with: - // - // xN.Defn = x1 - // xN.Outer = x1.Innermost - // x1.Innermost = xN - // - // When we finish the function, we'll process its closure variables - // and find xN and pop it off the list using: - // - // x1 := xN.Defn - // x1.Innermost = xN.Outer - // - // We leave x1.Innermost set so that we can still get to the original - // variable quickly. Not shown here, but once we're - // done parsing a function and no longer need xN.Outer for the - // lexical x reference links as described above, funcLit - // recomputes xN.Outer as the semantic x reference link tree, - // even filling in x in intermediate closures that might not - // have mentioned it along the way to inner closures that did. - // See funcLit for details. - // - // During the eventual compilation, then, for closure variables we have: - // - // xN.Defn = original variable - // xN.Outer = variable captured in next outward scope - // to make closure where xN appears - // - // Because of the sharding of pieces of the node, x.Defn means x.Name.Defn - // and x.Innermost/Outer means x.Name.Param.Innermost/Outer. - Innermost Node - Outer Node - - // OTYPE & ONAME //go:embed info, - // sharing storage to reduce gc.Param size. - // Extra is nil, or else *Extra is a *paramType or an *embedFileList. - Extra *interface{} -} - -type paramType struct { - flag PragmaFlag - alias bool -} - -type embedFileList []string - -// Pragma returns the PragmaFlag for p, which must be for an OTYPE. -func (p *Param) Pragma() PragmaFlag { - if p.Extra == nil { - return 0 - } - return (*p.Extra).(*paramType).flag -} - -// SetPragma sets the PragmaFlag for p, which must be for an OTYPE. -func (p *Param) SetPragma(flag PragmaFlag) { - if p.Extra == nil { - if flag == 0 { - return - } - p.Extra = new(interface{}) - *p.Extra = ¶mType{flag: flag} - return - } - (*p.Extra).(*paramType).flag = flag -} - -// Alias reports whether p, which must be for an OTYPE, is a type alias. -func (p *Param) Alias() bool { - if p.Extra == nil { - return false - } - t, ok := (*p.Extra).(*paramType) - if !ok { - return false - } - return t.alias -} - -// SetAlias sets whether p, which must be for an OTYPE, is a type alias. -func (p *Param) SetAlias(alias bool) { - if p.Extra == nil { - if !alias { - return - } - p.Extra = new(interface{}) - *p.Extra = ¶mType{alias: alias} - return - } - (*p.Extra).(*paramType).alias = alias -} - -// EmbedFiles returns the list of embedded files for p, -// which must be for an ONAME var. -func (p *Param) EmbedFiles() []string { - if p.Extra == nil { - return nil - } - return *(*p.Extra).(*embedFileList) -} - -// SetEmbedFiles sets the list of embedded files for p, -// which must be for an ONAME var. -func (p *Param) SetEmbedFiles(list []string) { - if p.Extra == nil { - if len(list) == 0 { - return - } - f := embedFileList(list) - p.Extra = new(interface{}) - *p.Extra = &f - return - } - *(*p.Extra).(*embedFileList) = list -} - // A Func corresponds to a single function in a Go program // (and vice versa: each function is denoted by exactly one *Func). // @@ -1369,49 +1104,6 @@ func (s NodeSet) Sorted(less func(Node, Node) bool) []Node { return res } -// NewNameAt returns a new ONAME Node associated with symbol s at position pos. -// The caller is responsible for setting n.Name.Curfn. -func NewNameAt(pos src.XPos, s *types.Sym) Node { - if s == nil { - base.Fatalf("newnamel nil") - } - - var x struct { - n node - m Name - p Param - } - n := &x.n - n.SetName(&x.m) - n.Name().Param = &x.p - - n.SetOp(ONAME) - n.SetPos(pos) - n.SetOrig(n) - - n.SetSym(s) - return n -} - -// The Class of a variable/function describes the "storage class" -// of a variable or function. During parsing, storage classes are -// called declaration contexts. -type Class uint8 - -//go:generate stringer -type=Class -const ( - Pxxx Class = iota // no class; used during ssa conversion to indicate pseudo-variables - PEXTERN // global variables - PAUTO // local variables - PAUTOHEAP // local variables or parameters moved to heap - PPARAM // input arguments - PPARAMOUT // output results - PFUNC // global functions - - // Careful: Class is stored in three bits in Node.flags. - _ = uint((1 << 3) - iota) // static assert for iota <= (1 << 3) -) - type PragmaFlag int16 const ( @@ -1550,62 +1242,6 @@ func IsConst(n Node, ct constant.Kind) bool { return ConstType(n) == ct } -// Int64Val returns n as an int64. -// n must be an integer or rune constant. -func (n *node) Int64Val() int64 { - if !IsConst(n, constant.Int) { - base.Fatalf("Int64Val(%v)", n) - } - x, ok := constant.Int64Val(n.Val()) - if !ok { - base.Fatalf("Int64Val(%v)", n) - } - return x -} - -// CanInt64 reports whether it is safe to call Int64Val() on n. -func (n *node) CanInt64() bool { - if !IsConst(n, constant.Int) { - return false - } - - // if the value inside n cannot be represented as an int64, the - // return value of Int64 is undefined - _, ok := constant.Int64Val(n.Val()) - return ok -} - -// Uint64Val returns n as an uint64. -// n must be an integer or rune constant. -func (n *node) Uint64Val() uint64 { - if !IsConst(n, constant.Int) { - base.Fatalf("Uint64Val(%v)", n) - } - x, ok := constant.Uint64Val(n.Val()) - if !ok { - base.Fatalf("Uint64Val(%v)", n) - } - return x -} - -// BoolVal returns n as a bool. -// n must be a boolean constant. -func (n *node) BoolVal() bool { - if !IsConst(n, constant.Bool) { - base.Fatalf("BoolVal(%v)", n) - } - return constant.BoolVal(n.Val()) -} - -// StringVal returns the value of a literal string Node as a string. -// n must be a string constant. -func (n *node) StringVal() string { - if !IsConst(n, constant.String) { - base.Fatalf("StringVal(%v)", n) - } - return constant.StringVal(n.Val()) -} - // rawcopy returns a shallow copy of n. // Note: copy or sepcopy (rather than rawcopy) is usually the // correct choice (see comment with Node.copy, below). From f6106d195db8bd7ef268e621f4a0d9ddbe9c58f6 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sat, 28 Nov 2020 01:11:49 -0500 Subject: [PATCH 055/474] [dev.regabi] cmd/compile: add ir.PkgName OPACK was using a whole Node and Name and Param to hold about three fields. Give it its own implementation. Passes buildall w/ toolstash -cmp. Change-Id: I85a28b43d37183b2062d337b0b1b2eea52884e8c Reviewed-on: https://go-review.googlesource.com/c/go/+/274093 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/main.go | 11 ++++----- src/cmd/compile/internal/gc/noder.go | 22 +++++++++--------- src/cmd/compile/internal/gc/subr.go | 4 ++-- src/cmd/compile/internal/ir/name.go | 26 +++++++++++++++++++--- src/cmd/compile/internal/ir/node.go | 8 +------ src/cmd/compile/internal/ir/sizeof_test.go | 2 +- 6 files changed, 44 insertions(+), 29 deletions(-) diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 30ee57c02daa2..7d2933f3602d8 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -955,8 +955,9 @@ func clearImports() { // leave s->block set to cause redeclaration // errors if a conflicting top-level name is // introduced by a different file. - if !n.Name().Used() && base.SyntaxErrors() == 0 { - unused = append(unused, importedPkg{n.Pos(), n.Name().Pkg.Path, s.Name}) + p := n.(*ir.PkgName) + if !p.Used && base.SyntaxErrors() == 0 { + unused = append(unused, importedPkg{p.Pos(), p.Pkg.Path, s.Name}) } s.Def = nil continue @@ -964,9 +965,9 @@ func clearImports() { if IsAlias(s) { // throw away top-level name left over // from previous import . "x" - if n.Name() != nil && n.Name().Pack != nil && !n.Name().Pack.Name().Used() && base.SyntaxErrors() == 0 { - unused = append(unused, importedPkg{n.Name().Pack.Pos(), n.Name().Pack.Name().Pkg.Path, ""}) - n.Name().Pack.Name().SetUsed(true) + if name := n.Name(); name != nil && name.PkgName != nil && !name.PkgName.Used && base.SyntaxErrors() == 0 { + unused = append(unused, importedPkg{name.PkgName.Pos(), name.PkgName.Pkg.Path, ""}) + name.PkgName.Used = true } s.Def = nil continue diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index ecd50b87f69dd..54915d76931fb 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -356,9 +356,7 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) { my = lookup(ipkg.Name) } - pack := p.nod(imp, ir.OPACK, nil, nil) - pack.SetSym(my) - pack.Name().Pkg = ipkg + pack := ir.NewPkgName(p.pos(imp), my, ipkg) switch my.Name { case ".": @@ -685,8 +683,9 @@ func (p *noder) expr(expr syntax.Expr) ir.Node { // parser.new_dotname obj := p.expr(expr.X) if obj.Op() == ir.OPACK { - obj.Name().SetUsed(true) - return importName(obj.Name().Pkg.Lookup(expr.Sel.Value)) + pack := obj.(*ir.PkgName) + pack.Used = true + return importName(pack.Pkg.Lookup(expr.Sel.Value)) } n := nodSym(ir.OXDOT, obj, p.name(expr.Sel)) n.SetPos(p.pos(expr)) // lineno may have been changed by p.expr(expr.X) @@ -910,8 +909,8 @@ func (p *noder) packname(expr syntax.Expr) *types.Sym { switch expr := expr.(type) { case *syntax.Name: name := p.name(expr) - if n := oldname(name); n.Name() != nil && n.Name().Pack != nil { - n.Name().Pack.Name().SetUsed(true) + if n := oldname(name); n.Name() != nil && n.Name().PkgName != nil { + n.Name().PkgName.Used = true } return name case *syntax.SelectorExpr: @@ -926,8 +925,9 @@ func (p *noder) packname(expr syntax.Expr) *types.Sym { base.Errorf("%v is not a package", name) pkg = ir.LocalPkg } else { - def.Name().SetUsed(true) - pkg = def.Name().Pkg + def := def.(*ir.PkgName) + def.Used = true + pkg = def.Pkg } return pkg.Lookup(expr.Sel.Value) } @@ -1675,8 +1675,8 @@ func safeArg(name string) bool { func mkname(sym *types.Sym) ir.Node { n := oldname(sym) - if n.Name() != nil && n.Name().Pack != nil { - n.Name().Pack.Name().SetUsed(true) + if n.Name() != nil && n.Name().PkgName != nil { + n.Name().PkgName.Used = true } return n } diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 722876abf5b1d..5c410ce3ba5e5 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -102,7 +102,7 @@ func autolabel(prefix string) *types.Sym { // find all the exported symbols in package opkg // and make them available in the current package -func importdot(opkg *types.Pkg, pack ir.Node) { +func importdot(opkg *types.Pkg, pack *ir.PkgName) { n := 0 for _, s := range opkg.Syms { if s.Def == nil { @@ -124,7 +124,7 @@ func importdot(opkg *types.Pkg, pack ir.Node) { ir.Dump("s1def", ir.AsNode(s1.Def)) base.Fatalf("missing Name") } - ir.AsNode(s1.Def).Name().Pack = pack + ir.AsNode(s1.Def).Name().PkgName = pack s1.Origpkg = opkg n++ } diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index fc7a5049e07ef..64d5d2a2ed56b 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -9,13 +9,13 @@ import ( "cmd/compile/internal/types" "cmd/internal/objabi" "cmd/internal/src" + "fmt" "go/constant" ) -// Name holds Node fields used only by named nodes (ONAME, OTYPE, OPACK, OLABEL, some OLITERAL). +// Name holds Node fields used only by named nodes (ONAME, OTYPE, some OLITERAL). type Name struct { - Pack Node // real package for import . names - Pkg *types.Pkg // pkg for OPACK nodes + PkgName *PkgName // real package for import . names // For a local variable (not param) or extern, the initializing assignment (OAS or OAS2). // For a closure var, the ONAME node of the outer captured variable Defn Node @@ -374,3 +374,23 @@ const ( // Careful: Class is stored in three bits in Node.flags. _ = uint((1 << 3) - iota) // static assert for iota <= (1 << 3) ) + +// A Pack is an identifier referring to an imported package. +type PkgName struct { + miniNode + sym *types.Sym + Pkg *types.Pkg + Used bool +} + +func (p *PkgName) String() string { return fmt.Sprint(p) } +func (p *PkgName) Format(s fmt.State, verb rune) { FmtNode(p, s, verb) } +func (p *PkgName) RawCopy() Node { c := *p; return &c } +func (p *PkgName) Sym() *types.Sym { return p.sym } + +func NewPkgName(pos src.XPos, sym *types.Sym, pkg *types.Pkg) *PkgName { + p := &PkgName{sym: sym, Pkg: pkg} + p.op = OPACK + p.pos = pos + return p +} diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 079871879d08b..0023df97a80ef 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -1339,12 +1339,7 @@ func NodAt(pos src.XPos, op Op, nleft, nright Node) Node { n.SetFunc(&x.f) n.Func().Decl = n case OPACK: - var x struct { - n node - m Name - } - n = &x.n - n.SetName(&x.m) + return NewPkgName(pos, nil, nil) case OEMPTY: return NewEmptyStmt(pos) case OBREAK, OCONTINUE, OFALL, OGOTO: @@ -1462,7 +1457,6 @@ var okForNod = [OEND]bool{ OOFFSETOF: true, OOR: true, OOROR: true, - OPACK: true, OPANIC: true, OPAREN: true, OPLUS: true, diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go index 0a9542fa44e2a..a025cb59868ef 100644 --- a/src/cmd/compile/internal/ir/sizeof_test.go +++ b/src/cmd/compile/internal/ir/sizeof_test.go @@ -21,7 +21,7 @@ func TestSizeof(t *testing.T) { _64bit uintptr // size on 64bit platforms }{ {Func{}, 152, 280}, - {Name{}, 44, 80}, + {Name{}, 36, 64}, {Param{}, 44, 88}, {node{}, 88, 152}, } From 862f638a89c5ec721a53446fb1a74f9ad15893d5 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sat, 28 Nov 2020 01:41:13 -0500 Subject: [PATCH 056/474] [dev.regabi] cmd/compile: make ir.Name the ONAME Node implementation Before this CL, an ONAME Node was represented by three structs linked together: a node, a Name, and a Param. Previous CLs removed OLABEL and OPACK from the set of nodes that knew about Name. Now Name can be repurposed to *be* the ONAME Node implementation, replacing three linked structs totaling 152+64+88 = 304 bytes (64-bit) with a single 232-byte struct. Many expressions in the code become simpler as well, without having to use .Param. and sometimes even .Name(). (For a node n where n.Name() != nil, n.Name() == n.(*Name) now.) Passes buildall w/ toolstash -cmp. Change-Id: Ie719f1285c05623b9fd2faaa059e5b360a64b3be Reviewed-on: https://go-review.googlesource.com/c/go/+/274094 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/fmtmap_test.go | 1 + src/cmd/compile/internal/gc/alg.go | 2 +- src/cmd/compile/internal/gc/align.go | 10 +- src/cmd/compile/internal/gc/closure.go | 13 +- src/cmd/compile/internal/gc/dcl.go | 39 ++-- src/cmd/compile/internal/gc/embed.go | 6 +- src/cmd/compile/internal/gc/escape.go | 6 +- src/cmd/compile/internal/gc/gen.go | 10 +- src/cmd/compile/internal/gc/iexport.go | 2 +- src/cmd/compile/internal/gc/inl.go | 2 +- src/cmd/compile/internal/gc/main.go | 4 +- src/cmd/compile/internal/gc/noder.go | 17 +- src/cmd/compile/internal/gc/pgen.go | 2 +- src/cmd/compile/internal/gc/plive.go | 2 +- src/cmd/compile/internal/gc/subr.go | 2 +- src/cmd/compile/internal/gc/typecheck.go | 52 +++--- src/cmd/compile/internal/gc/universe.go | 9 - src/cmd/compile/internal/gc/walk.go | 24 +-- src/cmd/compile/internal/ir/expr.go | 47 +++++ src/cmd/compile/internal/ir/fmt.go | 8 +- src/cmd/compile/internal/ir/mini.go | 2 - src/cmd/compile/internal/ir/name.go | 200 ++++++++++----------- src/cmd/compile/internal/ir/node.go | 56 ++---- src/cmd/compile/internal/ir/sizeof_test.go | 5 +- 24 files changed, 256 insertions(+), 265 deletions(-) create mode 100644 src/cmd/compile/internal/ir/expr.go diff --git a/src/cmd/compile/fmtmap_test.go b/src/cmd/compile/fmtmap_test.go index 7a375604fd9f2..978c83e5c2128 100644 --- a/src/cmd/compile/fmtmap_test.go +++ b/src/cmd/compile/fmtmap_test.go @@ -22,6 +22,7 @@ package main_test var knownFormats = map[string]string{ "*bytes.Buffer %s": "", "*cmd/compile/internal/gc.EscLocation %v": "", + "*cmd/compile/internal/ir.Name %v": "", "*cmd/compile/internal/ir.node %v": "", "*cmd/compile/internal/ssa.Block %s": "", "*cmd/compile/internal/ssa.Block %v": "", diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index d2762126ade37..356f0eada76f9 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -311,7 +311,7 @@ func genhash(t *types.Type) *obj.LSym { hashel := hashfor(t.Elem()) n := ir.Nod(ir.ORANGE, nil, ir.Nod(ir.ODEREF, np, nil)) - ni := NewName(lookup("i")) + ni := ir.Node(NewName(lookup("i"))) ni.SetType(types.Types[types.TINT]) n.PtrList().Set1(ni) n.SetColas(true) diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go index edf7d263a33cb..4f8f04d73d004 100644 --- a/src/cmd/compile/internal/gc/align.go +++ b/src/cmd/compile/internal/gc/align.go @@ -126,8 +126,8 @@ func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 { // NOTE(rsc): This comment may be stale. // It's possible the ordering has changed and this is // now the common case. I'm not sure. - if n.Name().Param.Stackcopy != nil { - n.Name().Param.Stackcopy.SetOffset(o) + if n.Name().Stackcopy != nil { + n.Name().Stackcopy.SetOffset(o) n.SetOffset(0) } else { n.SetOffset(o) @@ -198,8 +198,10 @@ func findTypeLoop(t *types.Type, path *[]*types.Type) bool { } *path = append(*path, t) - if p := ir.AsNode(t.Nod).Name().Param; p != nil && findTypeLoop(p.Ntype.Type(), path) { - return true + if n := ir.AsNode(t.Nod); n != nil { + if name := n.Name(); name != nil && name.Ntype != nil && findTypeLoop(name.Ntype.Type(), path) { + return true + } } *path = (*path)[:len(*path)-1] } else { diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index 2901ae41d6eba..7a1078326dee4 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -21,7 +21,7 @@ func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node { fn := dcl.Func() fn.SetIsHiddenClosure(Curfn != nil) fn.Nname = newfuncnamel(p.pos(expr), ir.BlankNode.Sym(), fn) // filled in by typecheckclosure - fn.Nname.Name().Param.Ntype = xtype + fn.Nname.Name().Ntype = xtype fn.Nname.Name().Defn = dcl clo := p.nod(expr, ir.OCLOSURE, nil, nil) @@ -38,7 +38,7 @@ func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node { for _, v := range fn.ClosureVars.Slice() { // Unlink from v1; see comment in syntax.go type Param for these fields. v1 := v.Name().Defn - v1.Name().Param.Innermost = v.Name().Param.Outer + v1.Name().Innermost = v.Name().Outer // If the closure usage of v is not dense, // we need to make it dense; now that we're out @@ -68,7 +68,7 @@ func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node { // obtains f3's v, creating it if necessary (as it is in the example). // // capturevars will decide whether to use v directly or &v. - v.Name().Param.Outer = oldname(v.Sym()) + v.Name().Outer = oldname(v.Sym()).(*ir.Name) } return clo @@ -194,7 +194,8 @@ func capturevars(dcl ir.Node) { // so that the outer frame also grabs them and knows they escape. dowidth(v.Type()) - outer := v.Name().Param.Outer + var outer ir.Node + outer = v.Name().Outer outermost := v.Name().Defn // out parameters will be assigned to implicitly upon return. @@ -262,7 +263,7 @@ func transformclosure(dcl ir.Node) { // (accesses will implicitly deref &v). addr := NewName(lookup("&" + v.Sym().Name)) addr.SetType(types.NewPtr(v.Type())) - v.Name().Param.Heapaddr = addr + v.Name().Heapaddr = addr v = addr } @@ -312,7 +313,7 @@ func transformclosure(dcl ir.Node) { addr.Name().SetUsed(true) addr.Name().Curfn = dcl fn.Dcl = append(fn.Dcl, addr) - v.Name().Param.Heapaddr = addr + v.Name().Heapaddr = addr if v.Name().Byval() { cv = ir.Nod(ir.OADDR, cv, nil) } diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 2a7be137c0698..04e6e7a596a7c 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -12,7 +12,6 @@ import ( "cmd/internal/obj" "cmd/internal/src" "fmt" - "go/constant" "strings" ) @@ -64,11 +63,6 @@ func declare(n ir.Node, ctxt ir.Class) { return } - if n.Name() == nil { - // named OLITERAL needs Name; most OLITERALs don't. - n.SetName(new(ir.Name)) - } - s := n.Sym() // kludgy: typecheckok means we're past parsing. Eg genwrapper may declare out of package names later. @@ -152,7 +146,7 @@ func variter(vl []ir.Node, t ir.Node, el []ir.Node) []ir.Node { for _, v := range vl { v.SetOp(ir.ONAME) declare(v, dclcontext) - v.Name().Param.Ntype = t + v.Name().Ntype = t v.Name().Defn = as2 if Curfn != nil { init = append(init, ir.Nod(ir.ODCL, v, nil)) @@ -176,7 +170,7 @@ func variter(vl []ir.Node, t ir.Node, el []ir.Node) []ir.Node { v.SetOp(ir.ONAME) declare(v, dclcontext) - v.Name().Param.Ntype = t + v.Name().Ntype = t if e != nil || Curfn != nil || ir.IsBlank(v) { if Curfn != nil { @@ -201,9 +195,8 @@ func newnoname(s *types.Sym) ir.Node { if s == nil { base.Fatalf("newnoname nil") } - n := ir.Nod(ir.ONONAME, nil, nil) - n.SetSym(s) - n.SetOffset(0) + n := ir.NewNameAt(base.Pos, s) + n.SetOp(ir.ONONAME) return n } @@ -220,7 +213,7 @@ func newfuncnamel(pos src.XPos, s *types.Sym, fn *ir.Func) ir.Node { // this generates a new name node for a name // being declared. -func dclname(s *types.Sym) ir.Node { +func dclname(s *types.Sym) *ir.Name { n := NewName(s) n.SetOp(ir.ONONAME) // caller will correct it return n @@ -277,7 +270,7 @@ func oldname(s *types.Sym) ir.Node { // are parsing x := 5 inside the closure, until we get to // the := it looks like a reference to the outer x so we'll // make x a closure variable unnecessarily. - c := n.Name().Param.Innermost + c := n.Name().Innermost if c == nil || c.Name().Curfn != Curfn { // Do not have a closure var for the active closure yet; make one. c = NewName(s) @@ -288,8 +281,8 @@ func oldname(s *types.Sym) ir.Node { // Link into list of active closure variables. // Popped from list in func funcLit. - c.Name().Param.Outer = n.Name().Param.Innermost - n.Name().Param.Innermost = c + c.Name().Outer = n.Name().Innermost + n.Name().Innermost = c Curfn.Func().ClosureVars.Append(c) } @@ -392,8 +385,8 @@ func funchdr(n ir.Node) { types.Markdcl() - if n.Func().Nname != nil && n.Func().Nname.Name().Param.Ntype != nil { - funcargs(n.Func().Nname.Name().Param.Ntype) + if n.Func().Nname != nil && n.Func().Nname.Name().Ntype != nil { + funcargs(n.Func().Nname.Name().Ntype) } else { funcargs2(n.Type()) } @@ -458,7 +451,7 @@ func funcarg(n ir.Node, ctxt ir.Class) { } n.SetRight(ir.NewNameAt(n.Pos(), n.Sym())) - n.Right().Name().Param.Ntype = n.Left() + n.Right().Name().Ntype = n.Left() n.Right().SetIsDDD(n.IsDDD()) declare(n.Right(), ctxt) @@ -554,8 +547,8 @@ func structfield(n ir.Node) *types.Field { checkembeddedtype(n.Type()) f.Embedded = 1 } - if n.HasVal() { - f.Note = constant.StringVal(n.Val()) + if n.Opt() != nil { + f.Note = n.Opt().(string) } base.Pos = lno @@ -640,7 +633,7 @@ func interfacefield(n ir.Node) *types.Field { base.Fatalf("interfacefield: oops %v\n", n) } - if n.HasVal() { + if n.Opt() != nil { base.Errorf("interface method cannot have annotation") } @@ -952,10 +945,10 @@ func dclfunc(sym *types.Sym, tfn ir.Node) ir.Node { fn := ir.Nod(ir.ODCLFUNC, nil, nil) fn.Func().Nname = newfuncnamel(base.Pos, sym, fn.Func()) fn.Func().Nname.Name().Defn = fn - fn.Func().Nname.Name().Param.Ntype = tfn + fn.Func().Nname.Name().Ntype = tfn setNodeNameFunc(fn.Func().Nname) funchdr(fn) - fn.Func().Nname.Name().Param.Ntype = typecheck(fn.Func().Nname.Name().Param.Ntype, ctxType) + fn.Func().Nname.Name().Ntype = typecheck(fn.Func().Nname.Name().Ntype, ctxType) return fn } diff --git a/src/cmd/compile/internal/gc/embed.go b/src/cmd/compile/internal/gc/embed.go index 33b05a5bf0889..1c8ccdadefeb5 100644 --- a/src/cmd/compile/internal/gc/embed.go +++ b/src/cmd/compile/internal/gc/embed.go @@ -115,13 +115,13 @@ func varEmbed(p *noder, names []ir.Node, typ ir.Node, exprs []ir.Node, embeds [] numLocalEmbed++ v = ir.NewNameAt(v.Pos(), lookupN("embed.", numLocalEmbed)) v.Sym().Def = v - v.Name().Param.Ntype = typ + v.Name().Ntype = typ v.SetClass(ir.PEXTERN) externdcl = append(externdcl, v) exprs = []ir.Node{v} } - v.Name().Param.SetEmbedFiles(list) + v.Name().SetEmbedFiles(list) embedlist = append(embedlist, v) return exprs } @@ -193,7 +193,7 @@ func dumpembeds() { // initEmbed emits the init data for a //go:embed variable, // which is either a string, a []byte, or an embed.FS. func initEmbed(v ir.Node) { - files := v.Name().Param.EmbedFiles() + files := v.Name().EmbedFiles() switch kind := embedKind(v.Type()); kind { case embedUnknown: base.ErrorfAt(v.Pos(), "go:embed cannot apply to var of type %v", v.Type()) diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index e3ac883e958cb..351643ef5d4c3 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -1895,7 +1895,7 @@ func moveToHeap(n ir.Node) { stackcopy.SetType(n.Type()) stackcopy.SetOffset(n.Offset()) stackcopy.SetClass(n.Class()) - stackcopy.Name().Param.Heapaddr = heapaddr + stackcopy.Name().Heapaddr = heapaddr if n.Class() == ir.PPARAMOUT { // Make sure the pointer to the heap copy is kept live throughout the function. // The function could panic at any point, and then a defer could recover. @@ -1904,7 +1904,7 @@ func moveToHeap(n ir.Node) { // See issue 16095. heapaddr.Name().SetIsOutputParamHeapAddr(true) } - n.Name().Param.Stackcopy = stackcopy + n.Name().Stackcopy = stackcopy // Substitute the stackcopy into the function variable list so that // liveness and other analyses use the underlying stack slot @@ -1931,7 +1931,7 @@ func moveToHeap(n ir.Node) { // Modify n in place so that uses of n now mean indirection of the heapaddr. n.SetClass(ir.PAUTOHEAP) n.SetOffset(0) - n.Name().Param.Heapaddr = heapaddr + n.Name().Heapaddr = heapaddr n.SetEsc(EscHeap) if base.Flag.LowerM != 0 { base.WarnfAt(n.Pos(), "moved to heap: %v", n) diff --git a/src/cmd/compile/internal/gc/gen.go b/src/cmd/compile/internal/gc/gen.go index cb640c7ccf76f..cf9e0d58bfcfd 100644 --- a/src/cmd/compile/internal/gc/gen.go +++ b/src/cmd/compile/internal/gc/gen.go @@ -31,13 +31,13 @@ func sysvar(name string) *obj.LSym { // isParamStackCopy reports whether this is the on-stack copy of a // function parameter that moved to the heap. func isParamStackCopy(n ir.Node) bool { - return n.Op() == ir.ONAME && (n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Name().Param.Heapaddr != nil + return n.Op() == ir.ONAME && (n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Name().Heapaddr != nil } // isParamHeapCopy reports whether this is the on-heap copy of // a function parameter that moved to the heap. func isParamHeapCopy(n ir.Node) bool { - return n.Op() == ir.ONAME && n.Class() == ir.PAUTOHEAP && n.Name().Param.Stackcopy != nil + return n.Op() == ir.ONAME && n.Class() == ir.PAUTOHEAP && n.Name().Stackcopy != nil } // autotmpname returns the name for an autotmp variable numbered n. @@ -52,7 +52,7 @@ func autotmpname(n int) string { } // make a new Node off the books -func tempAt(pos src.XPos, curfn ir.Node, t *types.Type) ir.Node { +func tempAt(pos src.XPos, curfn ir.Node, t *types.Type) *ir.Name { if curfn == nil { base.Fatalf("no curfn for tempAt") } @@ -80,9 +80,9 @@ func tempAt(pos src.XPos, curfn ir.Node, t *types.Type) ir.Node { dowidth(t) - return ir.Orig(n) + return n } -func temp(t *types.Type) ir.Node { +func temp(t *types.Type) *ir.Name { return tempAt(base.Pos, Curfn, t) } diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index c2ea599af40e3..88d3a6477c0cf 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -1463,7 +1463,7 @@ func (w *exportWriter) localName(n ir.Node) { // PPARAM/PPARAMOUT, because we only want to include vargen in // non-param names. var v int32 - if n.Class() == ir.PAUTO || (n.Class() == ir.PAUTOHEAP && n.Name().Param.Stackcopy == nil) { + if n.Class() == ir.PAUTO || (n.Class() == ir.PAUTOHEAP && n.Name().Stackcopy == nil) { v = n.Name().Vargen } diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index d43d0d06af459..102144aedf42d 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -982,7 +982,7 @@ func mkinlcall(n, fn ir.Node, maxCost int32, inlMap map[ir.Node]bool) ir.Node { continue } - o := v.Name().Param.Outer + o := v.Name().Outer // make sure the outer param matches the inlining location // NB: if we enabled inlining of functions containing OCLOSURE or refined // the reassigned check via some sort of copy propagation this would most diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 7d2933f3602d8..931626159d9d0 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -253,7 +253,7 @@ func Main(archInit func(*Arch)) { timings.Start("fe", "typecheck", "top1") for i := 0; i < len(xtop); i++ { n := xtop[i] - if op := n.Op(); op != ir.ODCL && op != ir.OAS && op != ir.OAS2 && (op != ir.ODCLTYPE || !n.Left().Name().Param.Alias()) { + if op := n.Op(); op != ir.ODCL && op != ir.OAS && op != ir.OAS2 && (op != ir.ODCLTYPE || !n.Left().Name().Alias()) { xtop[i] = typecheck(n, ctxStmt) } } @@ -265,7 +265,7 @@ func Main(archInit func(*Arch)) { timings.Start("fe", "typecheck", "top2") for i := 0; i < len(xtop); i++ { n := xtop[i] - if op := n.Op(); op == ir.ODCL || op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.Left().Name().Param.Alias() { + if op := n.Op(); op == ir.ODCL || op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.Left().Name().Alias() { xtop[i] = typecheck(n, ctxStmt) } } diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 54915d76931fb..cbe8a2405164b 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -456,7 +456,7 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []ir.Node { n.SetOp(ir.OLITERAL) declare(n, dclcontext) - n.Name().Param.Ntype = typ + n.Name().Ntype = typ n.Name().Defn = v n.SetIota(cs.iota) @@ -480,19 +480,18 @@ func (p *noder) typeDecl(decl *syntax.TypeDecl) ir.Node { // decl.Type may be nil but in that case we got a syntax error during parsing typ := p.typeExprOrNil(decl.Type) - param := n.Name().Param - param.Ntype = typ - param.SetAlias(decl.Alias) + n.Ntype = typ + n.SetAlias(decl.Alias) if pragma, ok := decl.Pragma.(*Pragma); ok { if !decl.Alias { - param.SetPragma(pragma.Flag & TypePragmas) + n.SetPragma(pragma.Flag & TypePragmas) pragma.Flag &^= TypePragmas } p.checkUnused(pragma) } nod := p.nod(decl, ir.ODCLTYPE, n, nil) - if param.Alias() && !langSupported(1, 9, ir.LocalPkg) { + if n.Alias() && !langSupported(1, 9, ir.LocalPkg) { base.ErrorfAt(nod.Pos(), "type aliases only supported as of -lang=go1.9") } return nod @@ -506,7 +505,7 @@ func (p *noder) declNames(names []*syntax.Name) []ir.Node { return nodes } -func (p *noder) declName(name *syntax.Name) ir.Node { +func (p *noder) declName(name *syntax.Name) *ir.Name { n := dclname(p.name(name)) n.SetPos(p.pos(name)) return n @@ -537,7 +536,7 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) ir.Node { f.Func().Nname = newfuncnamel(p.pos(fun.Name), name, f.Func()) f.Func().Nname.Name().Defn = f - f.Func().Nname.Name().Param.Ntype = t + f.Func().Nname.Name().Ntype = t if pragma, ok := fun.Pragma.(*Pragma); ok { f.Func().Pragma = pragma.Flag & FuncPragmas @@ -872,7 +871,7 @@ func (p *noder) structType(expr *syntax.StructType) ir.Node { n = p.nodSym(field, ir.ODCLFIELD, p.typeExpr(field.Type), p.name(field.Name)) } if i < len(expr.TagList) && expr.TagList[i] != nil { - n.SetVal(p.basicLit(expr.TagList[i])) + n.SetOpt(constant.StringVal(p.basicLit(expr.TagList[i]))) } l = append(l, n) } diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index 221b733a070a6..b74b132632b9b 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -667,7 +667,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []ir. // misleading location for the param (we want pointer-to-heap // and not stack). // TODO(thanm): generate a better location expression - stackcopy := n.Name().Param.Stackcopy + stackcopy := n.Name().Stackcopy if stackcopy != nil && (stackcopy.Class() == ir.PPARAM || stackcopy.Class() == ir.PPARAMOUT) { abbrev = dwarf.DW_ABRV_PARAM_LOCLIST isReturnValue = (stackcopy.Class() == ir.PPARAMOUT) diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go index bd7696d859c21..e3a9b2a1988d6 100644 --- a/src/cmd/compile/internal/gc/plive.go +++ b/src/cmd/compile/internal/gc/plive.go @@ -795,7 +795,7 @@ func (lv *Liveness) epilogue() { // Just to be paranoid. Heap addresses are PAUTOs. base.Fatalf("variable %v both output param and heap output param", n) } - if n.Name().Param.Heapaddr != nil { + if n.Name().Heapaddr != nil { // If this variable moved to the heap, then // its stack copy is not live. continue diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 5c410ce3ba5e5..28703205d6655 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -136,7 +136,7 @@ func importdot(opkg *types.Pkg, pack *ir.PkgName) { } // newname returns a new ONAME Node associated with symbol s. -func NewName(s *types.Sym) ir.Node { +func NewName(s *types.Sym) *ir.Name { n := ir.NewNameAt(base.Pos, s) n.Name().Curfn = Curfn return n diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 0c4a3ad833a51..4ab47fb4069a9 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -259,12 +259,12 @@ func typecheck(n ir.Node, top int) (res ir.Node) { // are substituted. cycle := cycleFor(n) for _, n1 := range cycle { - if n1.Name() != nil && !n1.Name().Param.Alias() { + if n1.Name() != nil && !n1.Name().Alias() { // Cycle is ok. But if n is an alias type and doesn't // have a type yet, we have a recursive type declaration // with aliases that we can't handle properly yet. // Report an error rather than crashing later. - if n.Name() != nil && n.Name().Param.Alias() && n.Type() == nil { + if n.Name() != nil && n.Name().Alias() && n.Type() == nil { base.Pos = n.Pos() base.Fatalf("cannot handle alias type declaration (issue #25838): %v", n) } @@ -2412,9 +2412,6 @@ func typecheckMethodExpr(n ir.Node) (res ir.Node) { } n.SetOp(ir.OMETHEXPR) - if n.Name() == nil { - n.SetName(new(ir.Name)) - } n.SetRight(NewName(n.Sym())) n.SetSym(methodSym(t, n.Sym())) n.SetType(methodfunc(m.Type, n.Left().Type())) @@ -3228,7 +3225,7 @@ func typecheckas(n ir.Node) { // so that the conversion below happens). n.SetLeft(resolve(n.Left())) - if n.Left().Name() == nil || n.Left().Name().Defn != n || n.Left().Name().Param.Ntype != nil { + if n.Left().Name() == nil || n.Left().Name().Defn != n || n.Left().Name().Ntype != nil { n.SetLeft(typecheck(n.Left(), ctxExpr|ctxAssign)) } @@ -3247,7 +3244,7 @@ func typecheckas(n ir.Node) { } } - if n.Left().Name() != nil && n.Left().Name().Defn == n && n.Left().Name().Param.Ntype == nil { + if n.Left().Name() != nil && n.Left().Name().Defn == n && n.Left().Name().Ntype == nil { n.SetRight(defaultlit(n.Right(), nil)) n.Left().SetType(n.Right().Type()) } @@ -3283,7 +3280,7 @@ func typecheckas2(n ir.Node) { n1 = resolve(n1) ls[i1] = n1 - if n1.Name() == nil || n1.Name().Defn != n || n1.Name().Param.Ntype != nil { + if n1.Name() == nil || n1.Name().Defn != n || n1.Name().Ntype != nil { ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign) } } @@ -3308,7 +3305,7 @@ func typecheckas2(n ir.Node) { if nl.Type() != nil && nr.Type() != nil { rs[il] = assignconv(nr, nl.Type(), "assignment") } - if nl.Name() != nil && nl.Name().Defn == n && nl.Name().Param.Ntype == nil { + if nl.Name() != nil && nl.Name().Defn == n && nl.Name().Ntype == nil { rs[il] = defaultlit(rs[il], nil) nl.SetType(rs[il].Type()) } @@ -3342,7 +3339,7 @@ func typecheckas2(n ir.Node) { if f.Type != nil && l.Type() != nil { checkassignto(f.Type, l) } - if l.Name() != nil && l.Name().Defn == n && l.Name().Param.Ntype == nil { + if l.Name() != nil && l.Name().Defn == n && l.Name().Ntype == nil { l.SetType(f.Type) } } @@ -3378,7 +3375,7 @@ func typecheckas2(n ir.Node) { if l.Type() != nil && !l.Type().IsBoolean() { checkassignto(types.Types[types.TBOOL], l) } - if l.Name() != nil && l.Name().Defn == n && l.Name().Param.Ntype == nil { + if l.Name() != nil && l.Name().Defn == n && l.Name().Ntype == nil { l.SetType(types.Types[types.TBOOL]) } goto out @@ -3502,7 +3499,7 @@ func setUnderlying(t, underlying *types.Type) { } // Propagate go:notinheap pragma from the Name to the Type. - if n.Name() != nil && n.Name().Param != nil && n.Name().Param.Pragma()&ir.NotInHeap != 0 { + if n.Name() != nil && n.Name().Pragma()&ir.NotInHeap != 0 { t.SetNotInHeap(true) } @@ -3525,8 +3522,8 @@ func typecheckdeftype(n ir.Node) { } n.SetTypecheck(1) - n.Name().Param.Ntype = typecheck(n.Name().Param.Ntype, ctxType) - t := n.Name().Param.Ntype.Type() + n.Name().Ntype = typecheck(n.Name().Ntype, ctxType) + t := n.Name().Ntype.Type() if t == nil { n.SetDiag(true) n.SetType(nil) @@ -3586,10 +3583,10 @@ func typecheckdef(n ir.Node) { base.Fatalf("typecheckdef %v", n.Op()) case ir.OLITERAL: - if n.Name().Param.Ntype != nil { - n.Name().Param.Ntype = typecheck(n.Name().Param.Ntype, ctxType) - n.SetType(n.Name().Param.Ntype.Type()) - n.Name().Param.Ntype = nil + if n.Name().Ntype != nil { + n.Name().Ntype = typecheck(n.Name().Ntype, ctxType) + n.SetType(n.Name().Ntype.Type()) + n.Name().Ntype = nil if n.Type() == nil { n.SetDiag(true) goto ret @@ -3640,9 +3637,9 @@ func typecheckdef(n ir.Node) { } case ir.ONAME: - if n.Name().Param.Ntype != nil { - n.Name().Param.Ntype = typecheck(n.Name().Param.Ntype, ctxType) - n.SetType(n.Name().Param.Ntype.Type()) + if n.Name().Ntype != nil { + n.Name().Ntype = typecheck(n.Name().Ntype, ctxType) + n.SetType(n.Name().Ntype.Type()) if n.Type() == nil { n.SetDiag(true) goto ret @@ -3676,21 +3673,22 @@ func typecheckdef(n ir.Node) { n.Name().Defn = typecheck(n.Name().Defn, ctxStmt) // fills in n.Type case ir.OTYPE: - if p := n.Name().Param; p.Alias() { + n := n.(*ir.Name) + if n.Alias() { // Type alias declaration: Simply use the rhs type - no need // to create a new type. // If we have a syntax error, p.Ntype may be nil. - if p.Ntype != nil { - p.Ntype = typecheck(p.Ntype, ctxType) - n.SetType(p.Ntype.Type()) + if n.Ntype != nil { + n.Ntype = typecheck(n.Ntype, ctxType) + n.SetType(n.Ntype.Type()) if n.Type() == nil { n.SetDiag(true) goto ret } // For package-level type aliases, set n.Sym.Def so we can identify // it as a type alias during export. See also #31959. - if n.Name().Curfn == nil { - n.Sym().Def = p.Ntype + if n.Curfn == nil { + n.Sym().Def = n.Ntype } } break diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index 978e53ac159a7..1068720748002 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -110,7 +110,6 @@ func lexinit() { types.Types[etype] = t } s2.Def = typenod(t) - ir.AsNode(s2.Def).SetName(new(ir.Name)) } for _, s := range &builtinFuncs { @@ -132,13 +131,11 @@ func lexinit() { s := ir.BuiltinPkg.Lookup("true") s.Def = nodbool(true) ir.AsNode(s.Def).SetSym(lookup("true")) - ir.AsNode(s.Def).SetName(new(ir.Name)) ir.AsNode(s.Def).SetType(types.UntypedBool) s = ir.BuiltinPkg.Lookup("false") s.Def = nodbool(false) ir.AsNode(s.Def).SetSym(lookup("false")) - ir.AsNode(s.Def).SetName(new(ir.Name)) ir.AsNode(s.Def).SetType(types.UntypedBool) s = lookup("_") @@ -158,12 +155,10 @@ func lexinit() { s = ir.BuiltinPkg.Lookup("nil") s.Def = nodnil() ir.AsNode(s.Def).SetSym(s) - ir.AsNode(s.Def).SetName(new(ir.Name)) s = ir.BuiltinPkg.Lookup("iota") s.Def = ir.Nod(ir.OIOTA, nil, nil) ir.AsNode(s.Def).SetSym(s) - ir.AsNode(s.Def).SetName(new(ir.Name)) } func typeinit() { @@ -182,7 +177,6 @@ func typeinit() { types.Types[types.TUNSAFEPTR] = t t.Sym = unsafepkg.Lookup("Pointer") t.Sym.Def = typenod(t) - ir.AsNode(t.Sym.Def).SetName(new(ir.Name)) dowidth(types.Types[types.TUNSAFEPTR]) for et := types.TINT8; et <= types.TUINT64; et++ { @@ -359,7 +353,6 @@ func lexinit1() { types.Bytetype = types.New(types.TUINT8) types.Bytetype.Sym = s s.Def = typenod(types.Bytetype) - ir.AsNode(s.Def).SetName(new(ir.Name)) dowidth(types.Bytetype) // rune alias @@ -367,7 +360,6 @@ func lexinit1() { types.Runetype = types.New(types.TINT32) types.Runetype.Sym = s s.Def = typenod(types.Runetype) - ir.AsNode(s.Def).SetName(new(ir.Name)) dowidth(types.Runetype) // backend-dependent builtin types (e.g. int). @@ -385,7 +377,6 @@ func lexinit1() { t.Sym = s1 types.Types[s.etype] = t s1.Def = typenod(t) - ir.AsNode(s1.Def).SetName(new(ir.Name)) s1.Origpkg = ir.BuiltinPkg dowidth(t) diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 87fe36b08a010..c05aa0c37217c 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -196,7 +196,7 @@ func walkstmt(n ir.Node) ir.Node { if prealloc[v] == nil { prealloc[v] = callnew(v.Type()) } - nn := ir.Nod(ir.OAS, v.Name().Param.Heapaddr, prealloc[v]) + nn := ir.Nod(ir.OAS, v.Name().Heapaddr, prealloc[v]) nn.SetColas(true) nn = typecheck(nn, ctxStmt) return walkstmt(nn) @@ -286,7 +286,7 @@ func walkstmt(n ir.Node) ir.Node { } if cl == ir.PPARAMOUT { if isParamStackCopy(ln) { - ln = walkexpr(typecheck(ir.Nod(ir.ODEREF, ln.Name().Param.Heapaddr, nil), ctxExpr), nil) + ln = walkexpr(typecheck(ir.Nod(ir.ODEREF, ln.Name().Heapaddr, nil), ctxExpr), nil) } rl = append(rl, ln) } @@ -314,7 +314,7 @@ func walkstmt(n ir.Node) ir.Node { for i, nl := range lhs.FieldSlice() { nname := ir.AsNode(nl.Nname) if isParamHeapCopy(nname) { - nname = nname.Name().Param.Stackcopy + nname = nname.Name().Stackcopy } a := ir.Nod(ir.OAS, nname, rhs[i]) res[i] = convas(a, n.PtrInit()) @@ -456,7 +456,7 @@ func walkexpr(n ir.Node, init *ir.Nodes) ir.Node { } if n.Op() == ir.ONAME && n.Class() == ir.PAUTOHEAP { - nn := ir.Nod(ir.ODEREF, n.Name().Param.Heapaddr, nil) + nn := ir.Nod(ir.ODEREF, n.Name().Heapaddr, nil) nn = typecheck(nn, ctxExpr) nn = walkexpr(nn, init) nn.Left().MarkNonNil() @@ -1160,7 +1160,7 @@ opswitch: if n.Type().Elem().Width >= maxImplicitStackVarSize { base.Fatalf("large ONEW with EscNone: %v", n) } - r := temp(n.Type().Elem()) + r := ir.Node(temp(n.Type().Elem())) r = ir.Nod(ir.OAS, r, nil) // zero temp r = typecheck(r, ctxStmt) init.Append(r) @@ -1776,7 +1776,7 @@ func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node { // Any assignment to an lvalue that might cause a function call must be // deferred until all the returned values have been read. if fncall(l, r.Type) { - tmp := temp(r.Type) + tmp := ir.Node(temp(r.Type)) tmp = typecheck(tmp, ctxExpr) a := ir.Nod(ir.OAS, l, tmp) a = convas(a, &mm) @@ -2174,7 +2174,7 @@ func reorder3save(n ir.Node, all []ir.Node, i int, early *[]ir.Node) ir.Node { return n } - q := temp(n.Type()) + q := ir.Node(temp(n.Type())) q = ir.Nod(ir.OAS, q, n) q = typecheck(q, ctxStmt) *early = append(*early, q) @@ -2411,7 +2411,7 @@ func paramstoheap(params *types.Type) []ir.Node { continue } - if stackcopy := v.Name().Param.Stackcopy; stackcopy != nil { + if stackcopy := v.Name().Stackcopy; stackcopy != nil { nn = append(nn, walkstmt(ir.Nod(ir.ODCL, v, nil))) if stackcopy.Class() == ir.PPARAM { nn = append(nn, walkstmt(typecheck(ir.Nod(ir.OAS, v, stackcopy), ctxStmt))) @@ -2432,7 +2432,7 @@ func paramstoheap(params *types.Type) []ir.Node { func zeroResults() { for _, f := range Curfn.Type().Results().Fields().Slice() { v := ir.AsNode(f.Nname) - if v != nil && v.Name().Param.Heapaddr != nil { + if v != nil && v.Name().Heapaddr != nil { // The local which points to the return value is the // thing that needs zeroing. This is already handled // by a Needzero annotation in plive.go:livenessepilogue. @@ -2445,7 +2445,7 @@ func zeroResults() { // I don't think the zeroing below matters. // The stack return value will never be marked as live anywhere in the function. // It is not written to until deferreturn returns. - v = v.Name().Param.Stackcopy + v = v.Name().Stackcopy } // Zero the stack location containing f. Curfn.Func().Enter.Append(ir.NodAt(Curfn.Pos(), ir.OAS, v, nil)) @@ -2461,7 +2461,7 @@ func returnsfromheap(params *types.Type) []ir.Node { if v == nil { continue } - if stackcopy := v.Name().Param.Stackcopy; stackcopy != nil && stackcopy.Class() == ir.PPARAMOUT { + if stackcopy := v.Name().Stackcopy; stackcopy != nil && stackcopy.Class() == ir.PPARAMOUT { nn = append(nn, walkstmt(typecheck(ir.Nod(ir.OAS, stackcopy, v), ctxStmt))) } } @@ -3155,7 +3155,7 @@ func copyany(n ir.Node, init *ir.Nodes, runtimecall bool) ir.Node { fn := syslook("memmove") fn = substArgTypes(fn, nl.Type().Elem(), nl.Type().Elem()) - nwid := temp(types.Types[types.TUINTPTR]) + nwid := ir.Node(temp(types.Types[types.TUINTPTR])) setwid := ir.Nod(ir.OAS, nwid, conv(nlen, types.Types[types.TUINTPTR])) ne.PtrBody().Append(setwid) nwid = ir.Nod(ir.OMUL, nwid, nodintconst(nl.Type().Elem().Width)) diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go new file mode 100644 index 0000000000000..418351742e76a --- /dev/null +++ b/src/cmd/compile/internal/ir/expr.go @@ -0,0 +1,47 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +import ( + "cmd/compile/internal/types" +) + +// A miniStmt is a miniNode with extra fields common to expressions. +// TODO(rsc): Once we are sure about the contents, compact the bools +// into a bit field and leave extra bits available for implementations +// embedding miniExpr. Right now there are ~60 unused bits sitting here. +type miniExpr struct { + miniNode + typ *types.Type + init Nodes // TODO(rsc): Don't require every Node to have an init + opt interface{} // TODO(rsc): Don't require every Node to have an opt? + flags bitset8 +} + +const ( + miniExprHasCall = 1 << iota + miniExprImplicit + miniExprNonNil + miniExprTransient + miniExprBounded +) + +func (n *miniExpr) Type() *types.Type { return n.typ } +func (n *miniExpr) SetType(x *types.Type) { n.typ = x } +func (n *miniExpr) Opt() interface{} { return n.opt } +func (n *miniExpr) SetOpt(x interface{}) { n.opt = x } +func (n *miniExpr) HasCall() bool { return n.flags&miniExprHasCall != 0 } +func (n *miniExpr) SetHasCall(b bool) { n.flags.set(miniExprHasCall, b) } +func (n *miniExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 } +func (n *miniExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) } +func (n *miniExpr) NonNil() bool { return n.flags&miniExprNonNil != 0 } +func (n *miniExpr) MarkNonNil() { n.flags |= miniExprNonNil } +func (n *miniExpr) Transient() bool { return n.flags&miniExprTransient != 0 } +func (n *miniExpr) SetTransient(b bool) { n.flags.set(miniExprTransient, b) } +func (n *miniExpr) Bounded() bool { return n.flags&miniExprBounded != 0 } +func (n *miniExpr) SetBounded(b bool) { n.flags.set(miniExprBounded, b) } +func (n *miniExpr) Init() Nodes { return n.init } +func (n *miniExpr) PtrInit() *Nodes { return &n.init } +func (n *miniExpr) SetInit(x Nodes) { n.init = x } diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index 24318d501f5f6..e749778030c97 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -1615,9 +1615,9 @@ func nodeDumpFmt(n Node, s fmt.State, flag FmtFlag, mode FmtMode) { } else { mode.Fprintf(s, "%v%j", n.Op(), n) } - if recur && n.Type() == nil && n.Name() != nil && n.Name().Param != nil && n.Name().Param.Ntype != nil { + if recur && n.Type() == nil && n.Name() != nil && n.Name().Ntype != nil { indent(s) - mode.Fprintf(s, "%v-ntype%v", n.Op(), n.Name().Param.Ntype) + mode.Fprintf(s, "%v-ntype%v", n.Op(), n.Name().Ntype) } case OASOP: @@ -1625,9 +1625,9 @@ func nodeDumpFmt(n Node, s fmt.State, flag FmtFlag, mode FmtMode) { case OTYPE: mode.Fprintf(s, "%v %v%j type=%v", n.Op(), n.Sym(), n, n.Type()) - if recur && n.Type() == nil && n.Name() != nil && n.Name().Param != nil && n.Name().Param.Ntype != nil { + if recur && n.Type() == nil && n.Name() != nil && n.Name().Ntype != nil { indent(s) - mode.Fprintf(s, "%v-ntype%v", n.Op(), n.Name().Param.Ntype) + mode.Fprintf(s, "%v-ntype%v", n.Op(), n.Name().Ntype) } } diff --git a/src/cmd/compile/internal/ir/mini.go b/src/cmd/compile/internal/ir/mini.go index 608c2bed81160..248fe232cb88d 100644 --- a/src/cmd/compile/internal/ir/mini.go +++ b/src/cmd/compile/internal/ir/mini.go @@ -130,7 +130,6 @@ func (n *miniNode) SetType(*types.Type) { panic(n.no("SetType")) } func (n *miniNode) Func() *Func { return nil } func (n *miniNode) SetFunc(*Func) { panic(n.no("SetFunc")) } func (n *miniNode) Name() *Name { return nil } -func (n *miniNode) SetName(*Name) { panic(n.no("SetName")) } func (n *miniNode) Sym() *types.Sym { return nil } func (n *miniNode) SetSym(*types.Sym) { panic(n.no("SetSym")) } func (n *miniNode) Offset() int64 { return types.BADWIDTH } @@ -164,7 +163,6 @@ func (n *miniNode) SetIndexMapLValue(bool) { panic(n.no("SetIndexMapLValue")) func (n *miniNode) ResetAux() { panic(n.no("ResetAux")) } func (n *miniNode) HasBreak() bool { panic(n.no("HasBreak")) } func (n *miniNode) SetHasBreak(bool) { panic(n.no("SetHasBreak")) } -func (n *miniNode) HasVal() bool { return false } func (n *miniNode) Val() constant.Value { panic(n.no("Val")) } func (n *miniNode) SetVal(v constant.Value) { panic(n.no("SetVal")) } func (n *miniNode) Int64Val() int64 { panic(n.no("Int64Val")) } diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 64d5d2a2ed56b..d330745cfbb18 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -15,29 +15,38 @@ import ( // Name holds Node fields used only by named nodes (ONAME, OTYPE, some OLITERAL). type Name struct { + miniExpr + subOp Op // uint8 + class Class // uint8 + flags bitset16 + pragma PragmaFlag // int16 + sym *types.Sym + typ *types.Type + fn *Func + offset int64 + val constant.Value + orig Node + embedFiles *[]string // list of embedded files, for ONAME var + PkgName *PkgName // real package for import . names // For a local variable (not param) or extern, the initializing assignment (OAS or OAS2). // For a closure var, the ONAME node of the outer captured variable Defn Node // The ODCLFUNC node (for a static function/method or a closure) in which // local variable or param is declared. - Curfn Node - Param *Param // additional fields for ONAME, OTYPE - Decldepth int32 // declaration loop depth, increased for every loop or label + Curfn Node // Unique number for ONAME nodes within a function. Function outputs // (results) are numbered starting at one, followed by function inputs // (parameters), and then local variables. Vargen is used to distinguish // local variables/params with the same name. - Vargen int32 - flags bitset16 -} + Vargen int32 + Decldepth int32 // declaration loop depth, increased for every loop or label -type Param struct { Ntype Node - Heapaddr Node // temp holding heap address of param + Heapaddr *Name // temp holding heap address of param // ONAME PAUTOHEAP - Stackcopy Node // the PPARAM/PPARAMOUT on-stack slot (moved func params only) + Stackcopy *Name // the PPARAM/PPARAMOUT on-stack slot (moved func params only) // ONAME closure linkage // Consider: @@ -108,114 +117,88 @@ type Param struct { // // Because of the sharding of pieces of the node, x.Defn means x.Name.Defn // and x.Innermost/Outer means x.Name.Param.Innermost/Outer. - Innermost Node - Outer Node - - // OTYPE & ONAME //go:embed info, - // sharing storage to reduce gc.Param size. - // Extra is nil, or else *Extra is a *paramType or an *embedFileList. - Extra *interface{} + Innermost *Name + Outer *Name } // NewNameAt returns a new ONAME Node associated with symbol s at position pos. // The caller is responsible for setting n.Name.Curfn. -func NewNameAt(pos src.XPos, s *types.Sym) Node { - if s == nil { - base.Fatalf("newnamel nil") +func NewNameAt(pos src.XPos, sym *types.Sym) *Name { + if sym == nil { + base.Fatalf("NewNameAt nil") } + return newNameAt(pos, sym) +} - var x struct { - n node - m Name - p Param - } - n := &x.n - n.SetName(&x.m) - n.Name().Param = &x.p - - n.SetOp(ONAME) - n.SetPos(pos) - n.SetOrig(n) - - n.SetSym(s) +// newNameAt is like NewNameAt but allows sym == nil. +func newNameAt(pos src.XPos, sym *types.Sym) *Name { + n := new(Name) + n.op = ONAME + n.pos = pos + n.orig = n + n.sym = sym return n } -type paramType struct { - flag PragmaFlag - alias bool +func (n *Name) String() string { return fmt.Sprint(n) } +func (n *Name) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *Name) RawCopy() Node { c := *n; return &c } +func (n *Name) Name() *Name { return n } +func (n *Name) Sym() *types.Sym { return n.sym } +func (n *Name) SetSym(x *types.Sym) { n.sym = x } +func (n *Name) Orig() Node { return n.orig } +func (n *Name) SetOrig(x Node) { n.orig = x } +func (n *Name) SubOp() Op { return n.subOp } +func (n *Name) SetSubOp(x Op) { n.subOp = x } +func (n *Name) Class() Class { return n.class } +func (n *Name) SetClass(x Class) { n.class = x } +func (n *Name) Func() *Func { return n.fn } +func (n *Name) SetFunc(x *Func) { n.fn = x } +func (n *Name) Offset() int64 { return n.offset } +func (n *Name) SetOffset(x int64) { n.offset = x } +func (n *Name) Iota() int64 { return n.offset } +func (n *Name) SetIota(x int64) { n.offset = x } + +func (n *Name) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case OLITERAL, ONONAME, ONAME, OTYPE, OIOTA: + n.op = op + } } // Pragma returns the PragmaFlag for p, which must be for an OTYPE. -func (p *Param) Pragma() PragmaFlag { - if p.Extra == nil { - return 0 - } - return (*p.Extra).(*paramType).flag -} +func (n *Name) Pragma() PragmaFlag { return n.pragma } // SetPragma sets the PragmaFlag for p, which must be for an OTYPE. -func (p *Param) SetPragma(flag PragmaFlag) { - if p.Extra == nil { - if flag == 0 { - return - } - p.Extra = new(interface{}) - *p.Extra = ¶mType{flag: flag} - return - } - (*p.Extra).(*paramType).flag = flag -} +func (n *Name) SetPragma(flag PragmaFlag) { n.pragma = flag } // Alias reports whether p, which must be for an OTYPE, is a type alias. -func (p *Param) Alias() bool { - if p.Extra == nil { - return false - } - t, ok := (*p.Extra).(*paramType) - if !ok { - return false - } - return t.alias -} +func (n *Name) Alias() bool { return n.flags&nameAlias != 0 } // SetAlias sets whether p, which must be for an OTYPE, is a type alias. -func (p *Param) SetAlias(alias bool) { - if p.Extra == nil { - if !alias { - return - } - p.Extra = new(interface{}) - *p.Extra = ¶mType{alias: alias} - return - } - (*p.Extra).(*paramType).alias = alias -} - -type embedFileList []string +func (n *Name) SetAlias(alias bool) { n.flags.set(nameAlias, alias) } // EmbedFiles returns the list of embedded files for p, // which must be for an ONAME var. -func (p *Param) EmbedFiles() []string { - if p.Extra == nil { +func (n *Name) EmbedFiles() []string { + if n.embedFiles == nil { return nil } - return *(*p.Extra).(*embedFileList) + return *n.embedFiles } // SetEmbedFiles sets the list of embedded files for p, // which must be for an ONAME var. -func (p *Param) SetEmbedFiles(list []string) { - if p.Extra == nil { - if len(list) == 0 { - return - } - f := embedFileList(list) - p.Extra = new(interface{}) - *p.Extra = &f +func (n *Name) SetEmbedFiles(list []string) { + if n.embedFiles == nil && list == nil { return } - *(*p.Extra).(*embedFileList) = list + if n.embedFiles == nil { + n.embedFiles = new([]string) + } + *n.embedFiles = list } const ( @@ -233,6 +216,8 @@ const ( nameInlLocal // PAUTO created by inliner, derived from callee local nameOpenDeferSlot // if temporary var storing info for open-coded defers nameLibfuzzerExtraCounter // if PEXTERN should be assigned to __libfuzzer_extra_counters section + nameIsDDD // is function argument a ... + nameAlias // is type name an alias ) func (n *Name) Captured() bool { return n.flags&nameCaptured != 0 } @@ -249,9 +234,10 @@ func (n *Name) InlFormal() bool { return n.flags&nameInlFormal != 0 func (n *Name) InlLocal() bool { return n.flags&nameInlLocal != 0 } func (n *Name) OpenDeferSlot() bool { return n.flags&nameOpenDeferSlot != 0 } func (n *Name) LibfuzzerExtraCounter() bool { return n.flags&nameLibfuzzerExtraCounter != 0 } +func (n *Name) IsDDD() bool { return n.flags&nameIsDDD != 0 } func (n *Name) SetCaptured(b bool) { n.flags.set(nameCaptured, b) } -func (n *Name) SetReadonly(b bool) { n.flags.set(nameReadonly, b) } +func (n *Name) setReadonly(b bool) { n.flags.set(nameReadonly, b) } func (n *Name) SetByval(b bool) { n.flags.set(nameByval, b) } func (n *Name) SetNeedzero(b bool) { n.flags.set(nameNeedzero, b) } func (n *Name) SetAutoTemp(b bool) { n.flags.set(nameAutoTemp, b) } @@ -264,13 +250,14 @@ func (n *Name) SetInlFormal(b bool) { n.flags.set(nameInlFormal, b) func (n *Name) SetInlLocal(b bool) { n.flags.set(nameInlLocal, b) } func (n *Name) SetOpenDeferSlot(b bool) { n.flags.set(nameOpenDeferSlot, b) } func (n *Name) SetLibfuzzerExtraCounter(b bool) { n.flags.set(nameLibfuzzerExtraCounter, b) } +func (n *Name) SetIsDDD(b bool) { n.flags.set(nameIsDDD, b) } // MarkReadonly indicates that n is an ONAME with readonly contents. -func (n *node) MarkReadonly() { +func (n *Name) MarkReadonly() { if n.Op() != ONAME { base.Fatalf("Node.MarkReadonly %v", n.Op()) } - n.Name().SetReadonly(true) + n.Name().setReadonly(true) // Mark the linksym as readonly immediately // so that the SSA backend can use this information. // It will be overridden later during dumpglobls. @@ -278,31 +265,26 @@ func (n *node) MarkReadonly() { } // Val returns the constant.Value for the node. -func (n *node) Val() constant.Value { - if !n.HasVal() { +func (n *Name) Val() constant.Value { + if n.val == nil { return constant.MakeUnknown() } - return *n.e.(*constant.Value) + return n.val } // SetVal sets the constant.Value for the node, // which must not have been used with SetOpt. -func (n *node) SetVal(v constant.Value) { - if n.hasOpt() { - base.Flag.LowerH = 1 - Dump("have Opt", n) - base.Fatalf("have Opt") - } - if n.Op() == OLITERAL { - AssertValidTypeForConst(n.Type(), v) +func (n *Name) SetVal(v constant.Value) { + if n.op != OLITERAL { + panic(n.no("SetVal")) } - n.setHasVal(true) - n.e = &v + AssertValidTypeForConst(n.Type(), v) + n.val = v } // Int64Val returns n as an int64. // n must be an integer or rune constant. -func (n *node) Int64Val() int64 { +func (n *Name) Int64Val() int64 { if !IsConst(n, constant.Int) { base.Fatalf("Int64Val(%v)", n) } @@ -314,7 +296,7 @@ func (n *node) Int64Val() int64 { } // CanInt64 reports whether it is safe to call Int64Val() on n. -func (n *node) CanInt64() bool { +func (n *Name) CanInt64() bool { if !IsConst(n, constant.Int) { return false } @@ -327,7 +309,7 @@ func (n *node) CanInt64() bool { // Uint64Val returns n as an uint64. // n must be an integer or rune constant. -func (n *node) Uint64Val() uint64 { +func (n *Name) Uint64Val() uint64 { if !IsConst(n, constant.Int) { base.Fatalf("Uint64Val(%v)", n) } @@ -340,7 +322,7 @@ func (n *node) Uint64Val() uint64 { // BoolVal returns n as a bool. // n must be a boolean constant. -func (n *node) BoolVal() bool { +func (n *Name) BoolVal() bool { if !IsConst(n, constant.Bool) { base.Fatalf("BoolVal(%v)", n) } @@ -349,7 +331,7 @@ func (n *node) BoolVal() bool { // StringVal returns the value of a literal string Node as a string. // n must be a string constant. -func (n *node) StringVal() string { +func (n *Name) StringVal() string { if !IsConst(n, constant.String) { base.Fatalf("StringVal(%v)", n) } diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 0023df97a80ef..a6a24774b5c67 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -59,7 +59,6 @@ type Node interface { Func() *Func SetFunc(x *Func) Name() *Name - SetName(x *Name) Sym() *types.Sym SetSym(x *types.Sym) Offset() int64 @@ -93,7 +92,6 @@ type Node interface { SetHasBreak(x bool) MarkReadonly() Val() constant.Value - HasVal() bool SetVal(v constant.Value) Int64Val() int64 Uint64Val() uint64 @@ -149,11 +147,8 @@ type node struct { // func fn *Func - // ONAME, OTYPE, OPACK, OLABEL, some OLITERAL - name *Name - - sym *types.Sym // various - e interface{} // Opt or Val, see methods below + sym *types.Sym // various + opt interface{} // Various. Usually an offset into a struct. For example: // - ONAME nodes that refer to local variables use it to identify their stack frame position. @@ -185,8 +180,7 @@ func (n *node) Type() *types.Type { return n.typ } func (n *node) SetType(x *types.Type) { n.typ = x } func (n *node) Func() *Func { return n.fn } func (n *node) SetFunc(x *Func) { n.fn = x } -func (n *node) Name() *Name { return n.name } -func (n *node) SetName(x *Name) { n.name = x } +func (n *node) Name() *Name { return nil } func (n *node) Sym() *types.Sym { return n.sym } func (n *node) SetSym(x *types.Sym) { n.sym = x } func (n *node) Pos() src.XPos { return n.pos } @@ -208,6 +202,14 @@ func (n *node) PtrList() *Nodes { return &n.list } func (n *node) Rlist() Nodes { return n.rlist } func (n *node) SetRlist(x Nodes) { n.rlist = x } func (n *node) PtrRlist() *Nodes { return &n.rlist } +func (n *node) MarkReadonly() { panic("node.MarkReadOnly") } +func (n *node) Val() constant.Value { panic("node.Val") } +func (n *node) SetVal(constant.Value) { panic("node.SetVal") } +func (n *node) Int64Val() int64 { panic("node.Int64Val") } +func (n *node) CanInt64() bool { return false } +func (n *node) Uint64Val() uint64 { panic("node.Uint64Val") } +func (n *node) BoolVal() bool { panic("node.BoolVal") } +func (n *node) StringVal() string { panic("node.StringVal") } func (n *node) SetOp(op Op) { if !okForNod[op] { @@ -305,8 +307,6 @@ const ( _, nodeBounded // bounds check unnecessary _, nodeHasCall // expression contains a function call _, nodeLikely // if statement condition likely - _, nodeHasVal // node.E contains a Val - _, nodeHasOpt // node.E contains an Opt _, nodeEmbedded // ODCLFIELD embedded type ) @@ -326,8 +326,6 @@ func (n *node) Transient() bool { return n.flags&nodeTransient != 0 } func (n *node) Bounded() bool { return n.flags&nodeBounded != 0 } func (n *node) HasCall() bool { return n.flags&nodeHasCall != 0 } func (n *node) Likely() bool { return n.flags&nodeLikely != 0 } -func (n *node) HasVal() bool { return n.flags&nodeHasVal != 0 } -func (n *node) hasOpt() bool { return n.flags&nodeHasOpt != 0 } func (n *node) Embedded() bool { return n.flags&nodeEmbedded != 0 } func (n *node) SetClass(b Class) { n.flags.set3(nodeClass, uint8(b)) } @@ -344,8 +342,6 @@ func (n *node) SetColas(b bool) { n.flags.set(nodeColas, b) } func (n *node) SetTransient(b bool) { n.flags.set(nodeTransient, b) } func (n *node) SetHasCall(b bool) { n.flags.set(nodeHasCall, b) } func (n *node) SetLikely(b bool) { n.flags.set(nodeLikely, b) } -func (n *node) setHasVal(b bool) { n.flags.set(nodeHasVal, b) } -func (n *node) setHasOpt(b bool) { n.flags.set(nodeHasOpt, b) } func (n *node) SetEmbedded(b bool) { n.flags.set(nodeEmbedded, b) } // MarkNonNil marks a pointer n as being guaranteed non-nil, @@ -380,29 +376,13 @@ func (n *node) SetBounded(b bool) { // Opt returns the optimizer data for the node. func (n *node) Opt() interface{} { - if !n.hasOpt() { - return nil - } - return n.e + return n.opt } // SetOpt sets the optimizer data for the node, which must not have been used with SetVal. // SetOpt(nil) is ignored for Vals to simplify call sites that are clearing Opts. func (n *node) SetOpt(x interface{}) { - if x == nil { - if n.hasOpt() { - n.setHasOpt(false) - n.e = nil - } - return - } - if n.HasVal() { - base.Flag.LowerH = 1 - Dump("have Val", n) - base.Fatalf("have Val") - } - n.setHasOpt(true) - n.e = x + n.opt = x } func (n *node) Iota() int64 { @@ -1344,6 +1324,10 @@ func NodAt(pos src.XPos, op Op, nleft, nright Node) Node { return NewEmptyStmt(pos) case OBREAK, OCONTINUE, OFALL, OGOTO: return NewBranchStmt(pos, op, nil) + case OLITERAL, OTYPE, OIOTA: + n := newNameAt(pos, nil) + n.SetOp(op) + return n case OLABEL: return NewLabelStmt(pos, nil) default: @@ -1428,13 +1412,11 @@ var okForNod = [OEND]bool{ OINDEXMAP: true, OINLCALL: true, OINLMARK: true, - OIOTA: true, OITAB: true, OKEY: true, OLABEL: true, OLE: true, OLEN: true, - OLITERAL: true, OLSH: true, OLT: true, OMAKE: true, @@ -1446,13 +1428,11 @@ var okForNod = [OEND]bool{ OMETHEXPR: true, OMOD: true, OMUL: true, - ONAME: true, ONE: true, ONEG: true, ONEW: true, ONEWOBJ: true, ONIL: true, - ONONAME: true, ONOT: true, OOFFSETOF: true, OOR: true, @@ -1499,7 +1479,7 @@ var okForNod = [OEND]bool{ OTINTER: true, OTMAP: true, OTSTRUCT: true, - OTYPE: true, + OTYPE: true, // TODO: Remove once setTypeNode is gone. OTYPESW: true, OVARDEF: true, OVARKILL: true, diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go index a025cb59868ef..8a0b078b9b9f6 100644 --- a/src/cmd/compile/internal/ir/sizeof_test.go +++ b/src/cmd/compile/internal/ir/sizeof_test.go @@ -21,9 +21,8 @@ func TestSizeof(t *testing.T) { _64bit uintptr // size on 64bit platforms }{ {Func{}, 152, 280}, - {Name{}, 36, 64}, - {Param{}, 44, 88}, - {node{}, 88, 152}, + {Name{}, 132, 232}, + {node{}, 84, 144}, } for _, tt := range tests { From 65ae15ac5d43ad82f664e5a914d74c7549568c93 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sat, 28 Nov 2020 07:13:54 -0500 Subject: [PATCH 057/474] [dev.regabi] cmd/compile: move func code from node.go to func.go No code changes here, only copying of text. This will make the diffs in a future CL readable. Passes buildall w/ toolstash -cmp. Change-Id: I325a62e79edd82f1437769891ea63a32f51c0170 Reviewed-on: https://go-review.googlesource.com/c/go/+/274095 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/func.go | 216 ++++++++++++++++++++++++++++ src/cmd/compile/internal/ir/node.go | 205 -------------------------- 2 files changed, 216 insertions(+), 205 deletions(-) create mode 100644 src/cmd/compile/internal/ir/func.go diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go new file mode 100644 index 0000000000000..1566125955483 --- /dev/null +++ b/src/cmd/compile/internal/ir/func.go @@ -0,0 +1,216 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/src" +) + +// A Func corresponds to a single function in a Go program +// (and vice versa: each function is denoted by exactly one *Func). +// +// There are multiple nodes that represent a Func in the IR. +// +// The ONAME node (Func.Name) is used for plain references to it. +// The ODCLFUNC node (Func.Decl) is used for its declaration code. +// The OCLOSURE node (Func.Closure) is used for a reference to a +// function literal. +// +// A Func for an imported function will have only an ONAME node. +// A declared function or method has an ONAME and an ODCLFUNC. +// A function literal is represented directly by an OCLOSURE, but it also +// has an ODCLFUNC (and a matching ONAME) representing the compiled +// underlying form of the closure, which accesses the captured variables +// using a special data structure passed in a register. +// +// A method declaration is represented like functions, except f.Sym +// will be the qualified method name (e.g., "T.m") and +// f.Func.Shortname is the bare method name (e.g., "m"). +// +// A method expression (T.M) is represented as an OMETHEXPR node, +// in which n.Left and n.Right point to the type and method, respectively. +// Each distinct mention of a method expression in the source code +// constructs a fresh node. +// +// A method value (t.M) is represented by ODOTMETH/ODOTINTER +// when it is called directly and by OCALLPART otherwise. +// These are like method expressions, except that for ODOTMETH/ODOTINTER, +// the method name is stored in Sym instead of Right. +// Each OCALLPART ends up being implemented as a new +// function, a bit like a closure, with its own ODCLFUNC. +// The OCALLPART has uses n.Func to record the linkage to +// the generated ODCLFUNC (as n.Func.Decl), but there is no +// pointer from the Func back to the OCALLPART. +type Func struct { + Nname Node // ONAME node + Decl Node // ODCLFUNC node + OClosure Node // OCLOSURE node + + Shortname *types.Sym + + // Extra entry code for the function. For example, allocate and initialize + // memory for escaping parameters. + Enter Nodes + Exit Nodes + // ONAME nodes for all params/locals for this func/closure, does NOT + // include closurevars until transformclosure runs. + Dcl []Node + + ClosureEnter Nodes // list of ONAME nodes of captured variables + ClosureType Node // closure representation type + ClosureCalled bool // closure is only immediately called + ClosureVars Nodes // closure params; each has closurevar set + + // Parents records the parent scope of each scope within a + // function. The root scope (0) has no parent, so the i'th + // scope's parent is stored at Parents[i-1]. + Parents []ScopeID + + // Marks records scope boundary changes. + Marks []Mark + + // Closgen tracks how many closures have been generated within + // this function. Used by closurename for creating unique + // function names. + Closgen int + + FieldTrack map[*types.Sym]struct{} + DebugInfo interface{} + LSym *obj.LSym + + Inl *Inline + + Label int32 // largest auto-generated label in this function + + Endlineno src.XPos + WBPos src.XPos // position of first write barrier; see SetWBPos + + Pragma PragmaFlag // go:xxx function annotations + + flags bitset16 + NumDefers int // number of defer calls in the function + NumReturns int // number of explicit returns in the function + + // nwbrCalls records the LSyms of functions called by this + // function for go:nowritebarrierrec analysis. Only filled in + // if nowritebarrierrecCheck != nil. + NWBRCalls *[]SymAndPos +} + +// An Inline holds fields used for function bodies that can be inlined. +type Inline struct { + Cost int32 // heuristic cost of inlining this function + + // Copies of Func.Dcl and Nbody for use during inlining. + Dcl []Node + Body []Node +} + +// A Mark represents a scope boundary. +type Mark struct { + // Pos is the position of the token that marks the scope + // change. + Pos src.XPos + + // Scope identifies the innermost scope to the right of Pos. + Scope ScopeID +} + +// A ScopeID represents a lexical scope within a function. +type ScopeID int32 + +const ( + funcDupok = 1 << iota // duplicate definitions ok + funcWrapper // is method wrapper + funcNeedctxt // function uses context register (has closure variables) + funcReflectMethod // function calls reflect.Type.Method or MethodByName + // true if closure inside a function; false if a simple function or a + // closure in a global variable initialization + funcIsHiddenClosure + funcHasDefer // contains a defer statement + funcNilCheckDisabled // disable nil checks when compiling this function + funcInlinabilityChecked // inliner has already determined whether the function is inlinable + funcExportInline // include inline body in export data + funcInstrumentBody // add race/msan instrumentation during SSA construction + funcOpenCodedDeferDisallowed // can't do open-coded defers +) + +type SymAndPos struct { + Sym *obj.LSym // LSym of callee + Pos src.XPos // line of call +} + +func (f *Func) Dupok() bool { return f.flags&funcDupok != 0 } +func (f *Func) Wrapper() bool { return f.flags&funcWrapper != 0 } +func (f *Func) Needctxt() bool { return f.flags&funcNeedctxt != 0 } +func (f *Func) ReflectMethod() bool { return f.flags&funcReflectMethod != 0 } +func (f *Func) IsHiddenClosure() bool { return f.flags&funcIsHiddenClosure != 0 } +func (f *Func) HasDefer() bool { return f.flags&funcHasDefer != 0 } +func (f *Func) NilCheckDisabled() bool { return f.flags&funcNilCheckDisabled != 0 } +func (f *Func) InlinabilityChecked() bool { return f.flags&funcInlinabilityChecked != 0 } +func (f *Func) ExportInline() bool { return f.flags&funcExportInline != 0 } +func (f *Func) InstrumentBody() bool { return f.flags&funcInstrumentBody != 0 } +func (f *Func) OpenCodedDeferDisallowed() bool { return f.flags&funcOpenCodedDeferDisallowed != 0 } + +func (f *Func) SetDupok(b bool) { f.flags.set(funcDupok, b) } +func (f *Func) SetWrapper(b bool) { f.flags.set(funcWrapper, b) } +func (f *Func) SetNeedctxt(b bool) { f.flags.set(funcNeedctxt, b) } +func (f *Func) SetReflectMethod(b bool) { f.flags.set(funcReflectMethod, b) } +func (f *Func) SetIsHiddenClosure(b bool) { f.flags.set(funcIsHiddenClosure, b) } +func (f *Func) SetHasDefer(b bool) { f.flags.set(funcHasDefer, b) } +func (f *Func) SetNilCheckDisabled(b bool) { f.flags.set(funcNilCheckDisabled, b) } +func (f *Func) SetInlinabilityChecked(b bool) { f.flags.set(funcInlinabilityChecked, b) } +func (f *Func) SetExportInline(b bool) { f.flags.set(funcExportInline, b) } +func (f *Func) SetInstrumentBody(b bool) { f.flags.set(funcInstrumentBody, b) } +func (f *Func) SetOpenCodedDeferDisallowed(b bool) { f.flags.set(funcOpenCodedDeferDisallowed, b) } + +func (f *Func) SetWBPos(pos src.XPos) { + if base.Debug.WB != 0 { + base.WarnfAt(pos, "write barrier") + } + if !f.WBPos.IsKnown() { + f.WBPos = pos + } +} + +// funcname returns the name (without the package) of the function n. +func FuncName(n Node) string { + if n == nil || n.Func() == nil || n.Func().Nname == nil { + return "" + } + return n.Func().Nname.Sym().Name +} + +// pkgFuncName returns the name of the function referenced by n, with package prepended. +// This differs from the compiler's internal convention where local functions lack a package +// because the ultimate consumer of this is a human looking at an IDE; package is only empty +// if the compilation package is actually the empty string. +func PkgFuncName(n Node) string { + var s *types.Sym + if n == nil { + return "" + } + if n.Op() == ONAME { + s = n.Sym() + } else { + if n.Func() == nil || n.Func().Nname == nil { + return "" + } + s = n.Func().Nname.Sym() + } + pkg := s.Pkg + + p := base.Ctxt.Pkgpath + if pkg != nil && pkg.Path != "" { + p = pkg.Path + } + if p == "" { + return s.Name + } + return p + "." + s.Name +} diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index a6a24774b5c67..1b01032c9b395 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -14,7 +14,6 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/types" - "cmd/internal/obj" "cmd/internal/src" ) @@ -403,209 +402,10 @@ func MayBeShared(n Node) bool { return false } -// funcname returns the name (without the package) of the function n. -func FuncName(n Node) string { - if n == nil || n.Func() == nil || n.Func().Nname == nil { - return "" - } - return n.Func().Nname.Sym().Name -} - -// pkgFuncName returns the name of the function referenced by n, with package prepended. -// This differs from the compiler's internal convention where local functions lack a package -// because the ultimate consumer of this is a human looking at an IDE; package is only empty -// if the compilation package is actually the empty string. -func PkgFuncName(n Node) string { - var s *types.Sym - if n == nil { - return "" - } - if n.Op() == ONAME { - s = n.Sym() - } else { - if n.Func() == nil || n.Func().Nname == nil { - return "" - } - s = n.Func().Nname.Sym() - } - pkg := s.Pkg - - p := base.Ctxt.Pkgpath - if pkg != nil && pkg.Path != "" { - p = pkg.Path - } - if p == "" { - return s.Name - } - return p + "." + s.Name -} - // The compiler needs *Node to be assignable to cmd/compile/internal/ssa.Sym. func (n *node) CanBeAnSSASym() { } -// A Func corresponds to a single function in a Go program -// (and vice versa: each function is denoted by exactly one *Func). -// -// There are multiple nodes that represent a Func in the IR. -// -// The ONAME node (Func.Name) is used for plain references to it. -// The ODCLFUNC node (Func.Decl) is used for its declaration code. -// The OCLOSURE node (Func.Closure) is used for a reference to a -// function literal. -// -// A Func for an imported function will have only an ONAME node. -// A declared function or method has an ONAME and an ODCLFUNC. -// A function literal is represented directly by an OCLOSURE, but it also -// has an ODCLFUNC (and a matching ONAME) representing the compiled -// underlying form of the closure, which accesses the captured variables -// using a special data structure passed in a register. -// -// A method declaration is represented like functions, except f.Sym -// will be the qualified method name (e.g., "T.m") and -// f.Func.Shortname is the bare method name (e.g., "m"). -// -// A method expression (T.M) is represented as an OMETHEXPR node, -// in which n.Left and n.Right point to the type and method, respectively. -// Each distinct mention of a method expression in the source code -// constructs a fresh node. -// -// A method value (t.M) is represented by ODOTMETH/ODOTINTER -// when it is called directly and by OCALLPART otherwise. -// These are like method expressions, except that for ODOTMETH/ODOTINTER, -// the method name is stored in Sym instead of Right. -// Each OCALLPART ends up being implemented as a new -// function, a bit like a closure, with its own ODCLFUNC. -// The OCALLPART has uses n.Func to record the linkage to -// the generated ODCLFUNC (as n.Func.Decl), but there is no -// pointer from the Func back to the OCALLPART. -type Func struct { - Nname Node // ONAME node - Decl Node // ODCLFUNC node - OClosure Node // OCLOSURE node - - Shortname *types.Sym - - // Extra entry code for the function. For example, allocate and initialize - // memory for escaping parameters. - Enter Nodes - Exit Nodes - // ONAME nodes for all params/locals for this func/closure, does NOT - // include closurevars until transformclosure runs. - Dcl []Node - - ClosureEnter Nodes // list of ONAME nodes of captured variables - ClosureType Node // closure representation type - ClosureCalled bool // closure is only immediately called - ClosureVars Nodes // closure params; each has closurevar set - - // Parents records the parent scope of each scope within a - // function. The root scope (0) has no parent, so the i'th - // scope's parent is stored at Parents[i-1]. - Parents []ScopeID - - // Marks records scope boundary changes. - Marks []Mark - - // Closgen tracks how many closures have been generated within - // this function. Used by closurename for creating unique - // function names. - Closgen int - - FieldTrack map[*types.Sym]struct{} - DebugInfo interface{} - LSym *obj.LSym - - Inl *Inline - - Label int32 // largest auto-generated label in this function - - Endlineno src.XPos - WBPos src.XPos // position of first write barrier; see SetWBPos - - Pragma PragmaFlag // go:xxx function annotations - - flags bitset16 - NumDefers int // number of defer calls in the function - NumReturns int // number of explicit returns in the function - - // nwbrCalls records the LSyms of functions called by this - // function for go:nowritebarrierrec analysis. Only filled in - // if nowritebarrierrecCheck != nil. - NWBRCalls *[]SymAndPos -} - -// An Inline holds fields used for function bodies that can be inlined. -type Inline struct { - Cost int32 // heuristic cost of inlining this function - - // Copies of Func.Dcl and Nbody for use during inlining. - Dcl []Node - Body []Node -} - -// A Mark represents a scope boundary. -type Mark struct { - // Pos is the position of the token that marks the scope - // change. - Pos src.XPos - - // Scope identifies the innermost scope to the right of Pos. - Scope ScopeID -} - -// A ScopeID represents a lexical scope within a function. -type ScopeID int32 - -const ( - funcDupok = 1 << iota // duplicate definitions ok - funcWrapper // is method wrapper - funcNeedctxt // function uses context register (has closure variables) - funcReflectMethod // function calls reflect.Type.Method or MethodByName - // true if closure inside a function; false if a simple function or a - // closure in a global variable initialization - funcIsHiddenClosure - funcHasDefer // contains a defer statement - funcNilCheckDisabled // disable nil checks when compiling this function - funcInlinabilityChecked // inliner has already determined whether the function is inlinable - funcExportInline // include inline body in export data - funcInstrumentBody // add race/msan instrumentation during SSA construction - funcOpenCodedDeferDisallowed // can't do open-coded defers -) - -func (f *Func) Dupok() bool { return f.flags&funcDupok != 0 } -func (f *Func) Wrapper() bool { return f.flags&funcWrapper != 0 } -func (f *Func) Needctxt() bool { return f.flags&funcNeedctxt != 0 } -func (f *Func) ReflectMethod() bool { return f.flags&funcReflectMethod != 0 } -func (f *Func) IsHiddenClosure() bool { return f.flags&funcIsHiddenClosure != 0 } -func (f *Func) HasDefer() bool { return f.flags&funcHasDefer != 0 } -func (f *Func) NilCheckDisabled() bool { return f.flags&funcNilCheckDisabled != 0 } -func (f *Func) InlinabilityChecked() bool { return f.flags&funcInlinabilityChecked != 0 } -func (f *Func) ExportInline() bool { return f.flags&funcExportInline != 0 } -func (f *Func) InstrumentBody() bool { return f.flags&funcInstrumentBody != 0 } -func (f *Func) OpenCodedDeferDisallowed() bool { return f.flags&funcOpenCodedDeferDisallowed != 0 } - -func (f *Func) SetDupok(b bool) { f.flags.set(funcDupok, b) } -func (f *Func) SetWrapper(b bool) { f.flags.set(funcWrapper, b) } -func (f *Func) SetNeedctxt(b bool) { f.flags.set(funcNeedctxt, b) } -func (f *Func) SetReflectMethod(b bool) { f.flags.set(funcReflectMethod, b) } -func (f *Func) SetIsHiddenClosure(b bool) { f.flags.set(funcIsHiddenClosure, b) } -func (f *Func) SetHasDefer(b bool) { f.flags.set(funcHasDefer, b) } -func (f *Func) SetNilCheckDisabled(b bool) { f.flags.set(funcNilCheckDisabled, b) } -func (f *Func) SetInlinabilityChecked(b bool) { f.flags.set(funcInlinabilityChecked, b) } -func (f *Func) SetExportInline(b bool) { f.flags.set(funcExportInline, b) } -func (f *Func) SetInstrumentBody(b bool) { f.flags.set(funcInstrumentBody, b) } -func (f *Func) SetOpenCodedDeferDisallowed(b bool) { f.flags.set(funcOpenCodedDeferDisallowed, b) } - -func (f *Func) SetWBPos(pos src.XPos) { - if base.Debug.WB != 0 { - base.WarnfAt(pos, "write barrier") - } - if !f.WBPos.IsKnown() { - f.WBPos = pos - } -} - //go:generate stringer -type=Op -trimprefix=O type Op uint8 @@ -1111,11 +911,6 @@ const ( GoBuildPragma ) -type SymAndPos struct { - Sym *obj.LSym // LSym of callee - Pos src.XPos // line of call -} - func AsNode(n types.IRNode) Node { if n == nil { return nil From c4bd0b7474f169a60acf66306a4a721f790e36c9 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sat, 28 Nov 2020 07:23:50 -0500 Subject: [PATCH 058/474] [dev.regabi] cmd/compile: make ir.Func the ODCLFUNC Node implementation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Before this CL, an ODCLFUNC Node was represented by both a node struct and a Func struct (and a Name for the ONAME, which isn't changing here). Now Func can be repurposed as the ODCLFUNC implementation, replacing the two structs totaling 280+144 = 424 bytes (64-bit) with a single 320-byte struct. Using the *Func as the node also gives us a clear, typed answer to “which node should we use to represent functions?” The next CL will clean up uses. This CL is just the trivial change in representation. Passes buildall w/ toolstash -cmp. Change-Id: Ie6d670da91d6eb8d67a85f8f83630b9586dc7443 Reviewed-on: https://go-review.googlesource.com/c/go/+/274096 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/fmt.go | 7 +++++ src/cmd/compile/internal/ir/func.go | 34 ++++++++++++++++++++++ src/cmd/compile/internal/ir/node.go | 9 +----- src/cmd/compile/internal/ir/sizeof_test.go | 2 +- 4 files changed, 43 insertions(+), 9 deletions(-) diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index e749778030c97..3822c4c73bd77 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -1269,6 +1269,13 @@ func exprFmt(n Node, s fmt.State, prec int, mode FmtMode) { mode.Fprintf(s, ")") } + case ODCLFUNC: + if sym := n.Sym(); sym != nil { + fmt.Fprint(s, smodeString(sym, mode)) + return + } + mode.Fprintf(s, "") + case ONAME: // Special case: name used as local variable in export. // _ becomes ~b%d internally; print as _ for export diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index 1566125955483..57ec0707e9fc4 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -9,6 +9,7 @@ import ( "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/src" + "fmt" ) // A Func corresponds to a single function in a Go program @@ -47,6 +48,11 @@ import ( // the generated ODCLFUNC (as n.Func.Decl), but there is no // pointer from the Func back to the OCALLPART. type Func struct { + miniNode + typ *types.Type + body Nodes + iota int64 + Nname Node // ONAME node Decl Node // ODCLFUNC node OClosure Node // OCLOSURE node @@ -102,6 +108,34 @@ type Func struct { NWBRCalls *[]SymAndPos } +func NewFunc(pos src.XPos) *Func { + f := new(Func) + f.pos = pos + f.op = ODCLFUNC + f.Decl = f + f.iota = -1 + return f +} + +func (f *Func) String() string { return fmt.Sprint(f) } +func (f *Func) Format(s fmt.State, verb rune) { FmtNode(f, s, verb) } +func (f *Func) RawCopy() Node { panic(f.no("RawCopy")) } +func (f *Func) Func() *Func { return f } +func (f *Func) Body() Nodes { return f.body } +func (f *Func) PtrBody() *Nodes { return &f.body } +func (f *Func) SetBody(x Nodes) { f.body = x } +func (f *Func) Type() *types.Type { return f.typ } +func (f *Func) SetType(x *types.Type) { f.typ = x } +func (f *Func) Iota() int64 { return f.iota } +func (f *Func) SetIota(x int64) { f.iota = x } + +func (f *Func) Sym() *types.Sym { + if f.Nname != nil { + return f.Nname.Sym() + } + return nil +} + // An Inline holds fields used for function bodies that can be inlined. type Inline struct { Cost int32 // heuristic cost of inlining this function diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 1b01032c9b395..02a5d7769a4c3 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -1106,13 +1106,7 @@ func NodAt(pos src.XPos, op Op, nleft, nright Node) Node { var n *node switch op { case ODCLFUNC: - var x struct { - n node - f Func - } - n = &x.n - n.SetFunc(&x.f) - n.Func().Decl = n + return NewFunc(pos) case OPACK: return NewPkgName(pos, nil, nil) case OEMPTY: @@ -1179,7 +1173,6 @@ var okForNod = [OEND]bool{ ODCL: true, ODCLCONST: true, ODCLFIELD: true, - ODCLFUNC: true, ODCLTYPE: true, ODDD: true, ODEFER: true, diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go index 8a0b078b9b9f6..8597ad492ad32 100644 --- a/src/cmd/compile/internal/ir/sizeof_test.go +++ b/src/cmd/compile/internal/ir/sizeof_test.go @@ -20,7 +20,7 @@ func TestSizeof(t *testing.T) { _32bit uintptr // size on 32bit platforms _64bit uintptr // size on 64bit platforms }{ - {Func{}, 152, 280}, + {Func{}, 180, 320}, {Name{}, 132, 232}, {node{}, 84, 144}, } From e84b27bec587e4393533e83e3ea1cbf1ed548425 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sat, 28 Nov 2020 07:31:18 -0500 Subject: [PATCH 059/474] [dev.regabi] cmd/compile: clean up Name and Func uses Now that we have specific types for ONAME and ODCLFUNC nodes (*Name and *Func), use them throughout the compiler to be more precise about what data is being operated on. This is a somewhat large CL, but once you start applying the types in a few places, you end up needing to apply them to many other places to keep everything type-checking. A lot of code also melts away as types are added. Passes buildall w/ toolstash -cmp. Change-Id: I21dd9b945d701c470332bac5394fca744a5b232d Reviewed-on: https://go-review.googlesource.com/c/go/+/274097 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/fmtmap_test.go | 8 +- src/cmd/compile/internal/gc/alg.go | 12 +- src/cmd/compile/internal/gc/bexport.go | 2 +- src/cmd/compile/internal/gc/closure.go | 142 ++++++------- src/cmd/compile/internal/gc/dcl.go | 121 ++++++----- src/cmd/compile/internal/gc/dwinl.go | 2 + src/cmd/compile/internal/gc/escape.go | 80 ++++--- src/cmd/compile/internal/gc/export.go | 6 +- src/cmd/compile/internal/gc/gen.go | 12 +- src/cmd/compile/internal/gc/go.go | 6 +- src/cmd/compile/internal/gc/gsubr.go | 12 +- src/cmd/compile/internal/gc/iexport.go | 16 +- src/cmd/compile/internal/gc/iimport.go | 34 +-- src/cmd/compile/internal/gc/init.go | 12 +- src/cmd/compile/internal/gc/initorder.go | 10 +- src/cmd/compile/internal/gc/inl.go | 231 ++++++++++----------- src/cmd/compile/internal/gc/main.go | 26 +-- src/cmd/compile/internal/gc/noder.go | 63 +++--- src/cmd/compile/internal/gc/obj.go | 2 +- src/cmd/compile/internal/gc/order.go | 6 +- src/cmd/compile/internal/gc/pgen.go | 72 ++++--- src/cmd/compile/internal/gc/pgen_test.go | 18 +- src/cmd/compile/internal/gc/plive.go | 22 +- src/cmd/compile/internal/gc/racewalk.go | 18 +- src/cmd/compile/internal/gc/range.go | 2 +- src/cmd/compile/internal/gc/scc.go | 30 +-- src/cmd/compile/internal/gc/ssa.go | 82 ++++---- src/cmd/compile/internal/gc/subr.go | 15 +- src/cmd/compile/internal/gc/typecheck.go | 56 ++--- src/cmd/compile/internal/gc/walk.go | 79 +++---- src/cmd/compile/internal/ir/fmt.go | 20 +- src/cmd/compile/internal/ir/func.go | 34 +-- src/cmd/compile/internal/ir/name.go | 7 +- src/cmd/compile/internal/ir/sizeof_test.go | 4 +- 34 files changed, 628 insertions(+), 634 deletions(-) diff --git a/src/cmd/compile/fmtmap_test.go b/src/cmd/compile/fmtmap_test.go index 978c83e5c2128..e949a89d93312 100644 --- a/src/cmd/compile/fmtmap_test.go +++ b/src/cmd/compile/fmtmap_test.go @@ -22,6 +22,12 @@ package main_test var knownFormats = map[string]string{ "*bytes.Buffer %s": "", "*cmd/compile/internal/gc.EscLocation %v": "", + "*cmd/compile/internal/ir.Func %+v": "", + "*cmd/compile/internal/ir.Func %L": "", + "*cmd/compile/internal/ir.Func %v": "", + "*cmd/compile/internal/ir.Name %#v": "", + "*cmd/compile/internal/ir.Name %+v": "", + "*cmd/compile/internal/ir.Name %L": "", "*cmd/compile/internal/ir.Name %v": "", "*cmd/compile/internal/ir.node %v": "", "*cmd/compile/internal/ssa.Block %s": "", @@ -54,6 +60,7 @@ var knownFormats = map[string]string{ "*math/big.Float %f": "", "*math/big.Int %s": "", "[16]byte %x": "", + "[]*cmd/compile/internal/ir.Name %v": "", "[]*cmd/compile/internal/ssa.Block %v": "", "[]*cmd/compile/internal/ssa.Value %v": "", "[][]string %q": "", @@ -77,7 +84,6 @@ var knownFormats = map[string]string{ "cmd/compile/internal/ir.Class %d": "", "cmd/compile/internal/ir.Class %v": "", "cmd/compile/internal/ir.FmtMode %d": "", - "cmd/compile/internal/ir.Node %#v": "", "cmd/compile/internal/ir.Node %+S": "", "cmd/compile/internal/ir.Node %+v": "", "cmd/compile/internal/ir.Node %L": "", diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index 356f0eada76f9..b40a56fe39ee4 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -382,8 +382,8 @@ func genhash(t *types.Type) *obj.LSym { funcbody() - fn.Func().SetDupok(true) - fn = typecheck(fn, ctxStmt) + fn.SetDupok(true) + typecheckFunc(fn) Curfn = fn typecheckslice(fn.Body().Slice(), ctxStmt) @@ -393,7 +393,7 @@ func genhash(t *types.Type) *obj.LSym { testdclstack() } - fn.Func().SetNilCheckDisabled(true) + fn.SetNilCheckDisabled(true) xtop = append(xtop, fn) // Build closure. It doesn't close over any variables, so @@ -761,8 +761,8 @@ func geneq(t *types.Type) *obj.LSym { funcbody() - fn.Func().SetDupok(true) - fn = typecheck(fn, ctxStmt) + fn.SetDupok(true) + typecheckFunc(fn) Curfn = fn typecheckslice(fn.Body().Slice(), ctxStmt) @@ -776,7 +776,7 @@ func geneq(t *types.Type) *obj.LSym { // We are comparing a struct or an array, // neither of which can be nil, and our comparisons // are shallow. - fn.Func().SetNilCheckDisabled(true) + fn.SetNilCheckDisabled(true) xtop = append(xtop, fn) // Generate a closure which points at the function we just generated. diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index a470b842ff8d1..dbbac559ae63c 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -16,7 +16,7 @@ type exporter struct { // markObject visits a reachable object. func (p *exporter) markObject(n ir.Node) { if n.Op() == ir.ONAME && n.Class() == ir.PFUNC { - inlFlood(n) + inlFlood(n.(*ir.Name)) } p.markType(n.Type()) diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index 7a1078326dee4..0cf59ee0ebc4e 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -17,28 +17,27 @@ func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node { xtype := p.typeExpr(expr.Type) ntype := p.typeExpr(expr.Type) - dcl := p.nod(expr, ir.ODCLFUNC, nil, nil) - fn := dcl.Func() + fn := ir.NewFunc(p.pos(expr)) fn.SetIsHiddenClosure(Curfn != nil) - fn.Nname = newfuncnamel(p.pos(expr), ir.BlankNode.Sym(), fn) // filled in by typecheckclosure - fn.Nname.Name().Ntype = xtype - fn.Nname.Name().Defn = dcl + fn.Nname = newFuncNameAt(p.pos(expr), ir.BlankNode.Sym(), fn) // filled in by typecheckclosure + fn.Nname.Ntype = xtype + fn.Nname.Defn = fn clo := p.nod(expr, ir.OCLOSURE, nil, nil) clo.SetFunc(fn) fn.ClosureType = ntype fn.OClosure = clo - p.funcBody(dcl, expr.Body) + p.funcBody(fn, expr.Body) // closure-specific variables are hanging off the // ordinary ones in the symbol table; see oldname. // unhook them. // make the list of pointers for the closure call. - for _, v := range fn.ClosureVars.Slice() { + for _, v := range fn.ClosureVars { // Unlink from v1; see comment in syntax.go type Param for these fields. - v1 := v.Name().Defn - v1.Name().Innermost = v.Name().Outer + v1 := v.Defn + v1.Name().Innermost = v.Outer // If the closure usage of v is not dense, // we need to make it dense; now that we're out @@ -68,7 +67,7 @@ func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node { // obtains f3's v, creating it if necessary (as it is in the example). // // capturevars will decide whether to use v directly or &v. - v.Name().Outer = oldname(v.Sym()).(*ir.Name) + v.Outer = oldname(v.Sym()).(*ir.Name) } return clo @@ -80,26 +79,25 @@ func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node { // separate pass from type-checking. func typecheckclosure(clo ir.Node, top int) { fn := clo.Func() - dcl := fn.Decl // Set current associated iota value, so iota can be used inside // function in ConstSpec, see issue #22344 if x := getIotaValue(); x >= 0 { - dcl.SetIota(x) + fn.SetIota(x) } fn.ClosureType = typecheck(fn.ClosureType, ctxType) clo.SetType(fn.ClosureType.Type()) - fn.ClosureCalled = top&ctxCallee != 0 + fn.SetClosureCalled(top&ctxCallee != 0) - // Do not typecheck dcl twice, otherwise, we will end up pushing - // dcl to xtop multiple times, causing initLSym called twice. + // Do not typecheck fn twice, otherwise, we will end up pushing + // fn to xtop multiple times, causing initLSym called twice. // See #30709 - if dcl.Typecheck() == 1 { + if fn.Typecheck() == 1 { return } - for _, ln := range fn.ClosureVars.Slice() { - n := ln.Name().Defn + for _, ln := range fn.ClosureVars { + n := ln.Defn if !n.Name().Captured() { n.Name().SetCaptured(true) if n.Name().Decldepth == 0 { @@ -116,7 +114,7 @@ func typecheckclosure(clo ir.Node, top int) { fn.Nname.SetSym(closurename(Curfn)) setNodeNameFunc(fn.Nname) - dcl = typecheck(dcl, ctxStmt) + typecheckFunc(fn) // Type check the body now, but only if we're inside a function. // At top level (in a variable initialization: curfn==nil) we're not @@ -124,29 +122,29 @@ func typecheckclosure(clo ir.Node, top int) { // underlying closure function we create is added to xtop. if Curfn != nil && clo.Type() != nil { oldfn := Curfn - Curfn = dcl + Curfn = fn olddd := decldepth decldepth = 1 - typecheckslice(dcl.Body().Slice(), ctxStmt) + typecheckslice(fn.Body().Slice(), ctxStmt) decldepth = olddd Curfn = oldfn } - xtop = append(xtop, dcl) + xtop = append(xtop, fn) } // globClosgen is like Func.Closgen, but for the global scope. -var globClosgen int +var globClosgen int32 // closurename generates a new unique name for a closure within // outerfunc. -func closurename(outerfunc ir.Node) *types.Sym { +func closurename(outerfunc *ir.Func) *types.Sym { outer := "glob." prefix := "func" gen := &globClosgen if outerfunc != nil { - if outerfunc.Func().OClosure != nil { + if outerfunc.OClosure != nil { prefix = "" } @@ -155,8 +153,8 @@ func closurename(outerfunc ir.Node) *types.Sym { // There may be multiple functions named "_". In those // cases, we can't use their individual Closgens as it // would lead to name clashes. - if !ir.IsBlank(outerfunc.Func().Nname) { - gen = &outerfunc.Func().Closgen + if !ir.IsBlank(outerfunc.Nname) { + gen = &outerfunc.Closgen } } @@ -172,11 +170,10 @@ var capturevarscomplete bool // by value or by reference. // We use value capturing for values <= 128 bytes that are never reassigned // after capturing (effectively constant). -func capturevars(dcl ir.Node) { +func capturevars(fn *ir.Func) { lno := base.Pos - base.Pos = dcl.Pos() - fn := dcl.Func() - cvars := fn.ClosureVars.Slice() + base.Pos = fn.Pos() + cvars := fn.ClosureVars out := cvars[:0] for _, v := range cvars { if v.Type() == nil { @@ -195,12 +192,12 @@ func capturevars(dcl ir.Node) { dowidth(v.Type()) var outer ir.Node - outer = v.Name().Outer - outermost := v.Name().Defn + outer = v.Outer + outermost := v.Defn // out parameters will be assigned to implicitly upon return. if outermost.Class() != ir.PPARAMOUT && !outermost.Name().Addrtaken() && !outermost.Name().Assigned() && v.Type().Width <= 128 { - v.Name().SetByval(true) + v.SetByval(true) } else { outermost.Name().SetAddrtaken(true) outer = ir.Nod(ir.OADDR, outer, nil) @@ -208,11 +205,11 @@ func capturevars(dcl ir.Node) { if base.Flag.LowerM > 1 { var name *types.Sym - if v.Name().Curfn != nil && v.Name().Curfn.Func().Nname != nil { - name = v.Name().Curfn.Func().Nname.Sym() + if v.Curfn != nil && v.Curfn.Nname != nil { + name = v.Curfn.Sym() } how := "ref" - if v.Name().Byval() { + if v.Byval() { how = "value" } base.WarnfAt(v.Pos(), "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym(), outermost.Name().Addrtaken(), outermost.Name().Assigned(), int32(v.Type().Width)) @@ -222,18 +219,17 @@ func capturevars(dcl ir.Node) { fn.ClosureEnter.Append(outer) } - fn.ClosureVars.Set(out) + fn.ClosureVars = out base.Pos = lno } // transformclosure is called in a separate phase after escape analysis. // It transform closure bodies to properly reference captured variables. -func transformclosure(dcl ir.Node) { +func transformclosure(fn *ir.Func) { lno := base.Pos - base.Pos = dcl.Pos() - fn := dcl.Func() + base.Pos = fn.Pos() - if fn.ClosureCalled { + if fn.ClosureCalled() { // If the closure is directly called, we transform it to a plain function call // with variables passed as args. This avoids allocation of a closure object. // Here we do only a part of the transformation. Walk of OCALLFUNC(OCLOSURE) @@ -254,16 +250,16 @@ func transformclosure(dcl ir.Node) { // We are going to insert captured variables before input args. var params []*types.Field - var decls []ir.Node - for _, v := range fn.ClosureVars.Slice() { - if !v.Name().Byval() { + var decls []*ir.Name + for _, v := range fn.ClosureVars { + if !v.Byval() { // If v of type T is captured by reference, // we introduce function param &v *T // and v remains PAUTOHEAP with &v heapaddr // (accesses will implicitly deref &v). addr := NewName(lookup("&" + v.Sym().Name)) addr.SetType(types.NewPtr(v.Type())) - v.Name().Heapaddr = addr + v.Heapaddr = addr v = addr } @@ -282,24 +278,24 @@ func transformclosure(dcl ir.Node) { } dowidth(f.Type()) - dcl.SetType(f.Type()) // update type of ODCLFUNC + fn.SetType(f.Type()) // update type of ODCLFUNC } else { // The closure is not called, so it is going to stay as closure. var body []ir.Node offset := int64(Widthptr) - for _, v := range fn.ClosureVars.Slice() { + for _, v := range fn.ClosureVars { // cv refers to the field inside of closure OSTRUCTLIT. cv := ir.Nod(ir.OCLOSUREVAR, nil, nil) cv.SetType(v.Type()) - if !v.Name().Byval() { + if !v.Byval() { cv.SetType(types.NewPtr(v.Type())) } offset = Rnd(offset, int64(cv.Type().Align)) cv.SetOffset(offset) offset += cv.Type().Width - if v.Name().Byval() && v.Type().Width <= int64(2*Widthptr) { + if v.Byval() && v.Type().Width <= int64(2*Widthptr) { // If it is a small variable captured by value, downgrade it to PAUTO. v.SetClass(ir.PAUTO) fn.Dcl = append(fn.Dcl, v) @@ -310,11 +306,11 @@ func transformclosure(dcl ir.Node) { addr := NewName(lookup("&" + v.Sym().Name)) addr.SetType(types.NewPtr(v.Type())) addr.SetClass(ir.PAUTO) - addr.Name().SetUsed(true) - addr.Name().Curfn = dcl + addr.SetUsed(true) + addr.Curfn = fn fn.Dcl = append(fn.Dcl, addr) - v.Name().Heapaddr = addr - if v.Name().Byval() { + v.Heapaddr = addr + if v.Byval() { cv = ir.Nod(ir.OADDR, cv, nil) } body = append(body, ir.Nod(ir.OAS, addr, cv)) @@ -334,7 +330,7 @@ func transformclosure(dcl ir.Node) { // hasemptycvars reports whether closure clo has an // empty list of captured vars. func hasemptycvars(clo ir.Node) bool { - return clo.Func().ClosureVars.Len() == 0 + return len(clo.Func().ClosureVars) == 0 } // closuredebugruntimecheck applies boilerplate checks for debug flags @@ -372,9 +368,9 @@ func closureType(clo ir.Node) *types.Type { fields := []ir.Node{ namedfield(".F", types.Types[types.TUINTPTR]), } - for _, v := range clo.Func().ClosureVars.Slice() { + for _, v := range clo.Func().ClosureVars { typ := v.Type() - if !v.Name().Byval() { + if !v.Byval() { typ = types.NewPtr(typ) } fields = append(fields, symfield(v.Sym(), typ)) @@ -430,23 +426,24 @@ func typecheckpartialcall(dot ir.Node, sym *types.Sym) { } // Create top-level function. - dcl := makepartialcall(dot, dot.Type(), sym) - dcl.Func().SetWrapper(true) + fn := makepartialcall(dot, dot.Type(), sym) + fn.SetWrapper(true) + dot.SetOp(ir.OCALLPART) dot.SetRight(NewName(sym)) - dot.SetType(dcl.Type()) - dot.SetFunc(dcl.Func()) + dot.SetType(fn.Type()) + dot.SetFunc(fn) dot.SetOpt(nil) // clear types.Field from ODOTMETH } // makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed // for partial calls. -func makepartialcall(dot ir.Node, t0 *types.Type, meth *types.Sym) ir.Node { +func makepartialcall(dot ir.Node, t0 *types.Type, meth *types.Sym) *ir.Func { rcvrtype := dot.Left().Type() sym := methodSymSuffix(rcvrtype, meth, "-fm") if sym.Uniq() { - return ir.AsNode(sym.Def) + return ir.AsNode(sym.Def).(*ir.Func) } sym.SetUniq(true) @@ -469,8 +466,7 @@ func makepartialcall(dot ir.Node, t0 *types.Type, meth *types.Sym) ir.Node { tfn.PtrList().Set(structargs(t0.Params(), true)) tfn.PtrRlist().Set(structargs(t0.Results(), false)) - dcl := dclfunc(sym, tfn) - fn := dcl.Func() + fn := dclfunc(sym, tfn) fn.SetDupok(true) fn.SetNeedctxt(true) @@ -484,7 +480,7 @@ func makepartialcall(dot ir.Node, t0 *types.Type, meth *types.Sym) ir.Node { ptr := NewName(lookup(".this")) declare(ptr, ir.PAUTO) - ptr.Name().SetUsed(true) + ptr.SetUsed(true) var body []ir.Node if rcvrtype.IsPtr() || rcvrtype.IsInterface() { ptr.SetType(rcvrtype) @@ -504,20 +500,20 @@ func makepartialcall(dot ir.Node, t0 *types.Type, meth *types.Sym) ir.Node { } body = append(body, call) - dcl.PtrBody().Set(body) + fn.PtrBody().Set(body) funcbody() - dcl = typecheck(dcl, ctxStmt) + typecheckFunc(fn) // Need to typecheck the body of the just-generated wrapper. // typecheckslice() requires that Curfn is set when processing an ORETURN. - Curfn = dcl - typecheckslice(dcl.Body().Slice(), ctxStmt) - sym.Def = dcl - xtop = append(xtop, dcl) + Curfn = fn + typecheckslice(fn.Body().Slice(), ctxStmt) + sym.Def = fn + xtop = append(xtop, fn) Curfn = savecurfn base.Pos = saveLineNo - return dcl + return fn } // partialCallType returns the struct type used to hold all the information diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 04e6e7a596a7c..2bcee269d9f1d 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -58,7 +58,7 @@ var declare_typegen int // declare records that Node n declares symbol n.Sym in the specified // declaration context. -func declare(n ir.Node, ctxt ir.Class) { +func declare(n *ir.Name, ctxt ir.Class) { if ir.IsBlank(n) { return } @@ -85,7 +85,7 @@ func declare(n ir.Node, ctxt ir.Class) { base.Fatalf("automatic outside function") } if Curfn != nil && ctxt != ir.PFUNC { - Curfn.Func().Dcl = append(Curfn.Func().Dcl, n) + Curfn.Dcl = append(Curfn.Dcl, n) } if n.Op() == ir.OTYPE { declare_typegen++ @@ -122,7 +122,7 @@ func declare(n ir.Node, ctxt ir.Class) { autoexport(n, ctxt) } -func addvar(n ir.Node, t *types.Type, ctxt ir.Class) { +func addvar(n *ir.Name, t *types.Type, ctxt ir.Class) { if n == nil || n.Sym() == nil || (n.Op() != ir.ONAME && n.Op() != ir.ONONAME) || t == nil { base.Fatalf("addvar: n=%v t=%v nil", n, t) } @@ -144,10 +144,11 @@ func variter(vl []ir.Node, t ir.Node, el []ir.Node) []ir.Node { as2.PtrList().Set(vl) as2.PtrRlist().Set1(e) for _, v := range vl { + v := v.(*ir.Name) v.SetOp(ir.ONAME) declare(v, dclcontext) - v.Name().Ntype = t - v.Name().Defn = as2 + v.Ntype = t + v.Defn = as2 if Curfn != nil { init = append(init, ir.Nod(ir.ODCL, v, nil)) } @@ -158,6 +159,7 @@ func variter(vl []ir.Node, t ir.Node, el []ir.Node) []ir.Node { nel := len(el) for _, v := range vl { + v := v.(*ir.Name) var e ir.Node if doexpr { if len(el) == 0 { @@ -170,7 +172,7 @@ func variter(vl []ir.Node, t ir.Node, el []ir.Node) []ir.Node { v.SetOp(ir.ONAME) declare(v, dclcontext) - v.Name().Ntype = t + v.Ntype = t if e != nil || Curfn != nil || ir.IsBlank(v) { if Curfn != nil { @@ -179,7 +181,7 @@ func variter(vl []ir.Node, t ir.Node, el []ir.Node) []ir.Node { e = ir.Nod(ir.OAS, v, e) init = append(init, e) if e.Right() != nil { - v.Name().Defn = e + v.Defn = e } } } @@ -200,10 +202,10 @@ func newnoname(s *types.Sym) ir.Node { return n } -// newfuncnamel generates a new name node for a function or method. -func newfuncnamel(pos src.XPos, s *types.Sym, fn *ir.Func) ir.Node { +// newFuncNameAt generates a new name node for a function or method. +func newFuncNameAt(pos src.XPos, s *types.Sym, fn *ir.Func) *ir.Name { if fn.Nname != nil { - base.Fatalf("newfuncnamel - already have name") + base.Fatalf("newFuncName - already have name") } n := ir.NewNameAt(pos, s) n.SetFunc(fn) @@ -271,20 +273,20 @@ func oldname(s *types.Sym) ir.Node { // the := it looks like a reference to the outer x so we'll // make x a closure variable unnecessarily. c := n.Name().Innermost - if c == nil || c.Name().Curfn != Curfn { + if c == nil || c.Curfn != Curfn { // Do not have a closure var for the active closure yet; make one. c = NewName(s) c.SetClass(ir.PAUTOHEAP) - c.Name().SetIsClosureVar(true) + c.SetIsClosureVar(true) c.SetIsDDD(n.IsDDD()) - c.Name().Defn = n + c.Defn = n // Link into list of active closure variables. // Popped from list in func funcLit. - c.Name().Outer = n.Name().Innermost + c.Outer = n.Name().Innermost n.Name().Innermost = c - Curfn.Func().ClosureVars.Append(c) + Curfn.ClosureVars = append(Curfn.ClosureVars, c) } // return ref to closure var, not original @@ -349,7 +351,7 @@ func colasdefn(left []ir.Node, defn ir.Node) { } nnew++ - n = NewName(n.Sym()) + n := NewName(n.Sym()) declare(n, dclcontext) n.Name().Defn = defn defn.PtrInit().Append(ir.Nod(ir.ODCL, n, nil)) @@ -377,18 +379,18 @@ func ifacedcl(n ir.Node) { // and declare the arguments. // called in extern-declaration context // returns in auto-declaration context. -func funchdr(n ir.Node) { +func funchdr(fn *ir.Func) { // change the declaration context from extern to auto funcStack = append(funcStack, funcStackEnt{Curfn, dclcontext}) - Curfn = n + Curfn = fn dclcontext = ir.PAUTO types.Markdcl() - if n.Func().Nname != nil && n.Func().Nname.Name().Ntype != nil { - funcargs(n.Func().Nname.Name().Ntype) + if fn.Nname != nil && fn.Nname.Ntype != nil { + funcargs(fn.Nname.Ntype) } else { - funcargs2(n.Type()) + funcargs2(fn.Type()) } } @@ -450,10 +452,11 @@ func funcarg(n ir.Node, ctxt ir.Class) { return } - n.SetRight(ir.NewNameAt(n.Pos(), n.Sym())) - n.Right().Name().Ntype = n.Left() - n.Right().SetIsDDD(n.IsDDD()) - declare(n.Right(), ctxt) + name := ir.NewNameAt(n.Pos(), n.Sym()) + n.SetRight(name) + name.Ntype = n.Left() + name.SetIsDDD(n.IsDDD()) + declare(name, ctxt) vargen++ n.Right().Name().Vargen = int32(vargen) @@ -492,7 +495,7 @@ func funcarg2(f *types.Field, ctxt ir.Class) { var funcStack []funcStackEnt // stack of previous values of Curfn/dclcontext type funcStackEnt struct { - curfn ir.Node + curfn *ir.Func dclcontext ir.Class } @@ -937,18 +940,18 @@ func setNodeNameFunc(n ir.Node) { n.Sym().SetFunc(true) } -func dclfunc(sym *types.Sym, tfn ir.Node) ir.Node { +func dclfunc(sym *types.Sym, tfn ir.Node) *ir.Func { if tfn.Op() != ir.OTFUNC { base.Fatalf("expected OTFUNC node, got %v", tfn) } - fn := ir.Nod(ir.ODCLFUNC, nil, nil) - fn.Func().Nname = newfuncnamel(base.Pos, sym, fn.Func()) - fn.Func().Nname.Name().Defn = fn - fn.Func().Nname.Name().Ntype = tfn - setNodeNameFunc(fn.Func().Nname) + fn := ir.NewFunc(base.Pos) + fn.Nname = newFuncNameAt(base.Pos, sym, fn) + fn.Nname.Defn = fn + fn.Nname.Ntype = tfn + setNodeNameFunc(fn.Nname) funchdr(fn) - fn.Func().Nname.Name().Ntype = typecheck(fn.Func().Nname.Name().Ntype, ctxType) + fn.Nname.Ntype = typecheck(fn.Nname.Ntype, ctxType) return fn } @@ -959,11 +962,11 @@ type nowritebarrierrecChecker struct { extraCalls map[ir.Node][]nowritebarrierrecCall // curfn is the current function during AST walks. - curfn ir.Node + curfn *ir.Func } type nowritebarrierrecCall struct { - target ir.Node // ODCLFUNC of caller or callee + target *ir.Func // caller or callee lineno src.XPos // line of call } @@ -983,7 +986,7 @@ func newNowritebarrierrecChecker() *nowritebarrierrecChecker { if n.Op() != ir.ODCLFUNC { continue } - c.curfn = n + c.curfn = n.(*ir.Func) ir.Inspect(n, c.findExtraCalls) } c.curfn = nil @@ -1002,13 +1005,13 @@ func (c *nowritebarrierrecChecker) findExtraCalls(n ir.Node) bool { return true } - var callee ir.Node + var callee *ir.Func arg := n.List().First() switch arg.Op() { case ir.ONAME: - callee = arg.Name().Defn + callee = arg.Name().Defn.(*ir.Func) case ir.OCLOSURE: - callee = arg.Func().Decl + callee = arg.Func() default: base.Fatalf("expected ONAME or OCLOSURE node, got %+v", arg) } @@ -1027,13 +1030,8 @@ func (c *nowritebarrierrecChecker) findExtraCalls(n ir.Node) bool { // because that's all we know after we start SSA. // // This can be called concurrently for different from Nodes. -func (c *nowritebarrierrecChecker) recordCall(from ir.Node, to *obj.LSym, pos src.XPos) { - if from.Op() != ir.ODCLFUNC { - base.Fatalf("expected ODCLFUNC, got %v", from) - } - // We record this information on the *Func so this is - // concurrent-safe. - fn := from.Func() +func (c *nowritebarrierrecChecker) recordCall(fn *ir.Func, to *obj.LSym, pos src.XPos) { + // We record this information on the *Func so this is concurrent-safe. if fn.NWBRCalls == nil { fn.NWBRCalls = new([]ir.SymAndPos) } @@ -1045,7 +1043,7 @@ func (c *nowritebarrierrecChecker) check() { // capture all calls created by lowering, but this means we // only get to see the obj.LSyms of calls. symToFunc lets us // get back to the ODCLFUNCs. - symToFunc := make(map[*obj.LSym]ir.Node) + symToFunc := make(map[*obj.LSym]*ir.Func) // funcs records the back-edges of the BFS call graph walk. It // maps from the ODCLFUNC of each function that must not have // write barriers to the call that inhibits them. Functions @@ -1060,24 +1058,25 @@ func (c *nowritebarrierrecChecker) check() { if n.Op() != ir.ODCLFUNC { continue } + fn := n.(*ir.Func) - symToFunc[n.Func().LSym] = n + symToFunc[fn.LSym] = fn // Make nowritebarrierrec functions BFS roots. - if n.Func().Pragma&ir.Nowritebarrierrec != 0 { - funcs[n] = nowritebarrierrecCall{} - q.PushRight(n) + if fn.Pragma&ir.Nowritebarrierrec != 0 { + funcs[fn] = nowritebarrierrecCall{} + q.PushRight(fn) } // Check go:nowritebarrier functions. - if n.Func().Pragma&ir.Nowritebarrier != 0 && n.Func().WBPos.IsKnown() { - base.ErrorfAt(n.Func().WBPos, "write barrier prohibited") + if fn.Pragma&ir.Nowritebarrier != 0 && fn.WBPos.IsKnown() { + base.ErrorfAt(fn.WBPos, "write barrier prohibited") } } // Perform a BFS of the call graph from all // go:nowritebarrierrec functions. - enqueue := func(src, target ir.Node, pos src.XPos) { - if target.Func().Pragma&ir.Yeswritebarrierrec != 0 { + enqueue := func(src, target *ir.Func, pos src.XPos) { + if target.Pragma&ir.Yeswritebarrierrec != 0 { // Don't flow into this function. return } @@ -1091,17 +1090,17 @@ func (c *nowritebarrierrecChecker) check() { q.PushRight(target) } for !q.Empty() { - fn := q.PopLeft() + fn := q.PopLeft().(*ir.Func) // Check fn. - if fn.Func().WBPos.IsKnown() { + if fn.WBPos.IsKnown() { var err bytes.Buffer call := funcs[fn] for call.target != nil { - fmt.Fprintf(&err, "\n\t%v: called by %v", base.FmtPos(call.lineno), call.target.Func().Nname) + fmt.Fprintf(&err, "\n\t%v: called by %v", base.FmtPos(call.lineno), call.target.Nname) call = funcs[call.target] } - base.ErrorfAt(fn.Func().WBPos, "write barrier prohibited by caller; %v%s", fn.Func().Nname, err.String()) + base.ErrorfAt(fn.WBPos, "write barrier prohibited by caller; %v%s", fn.Nname, err.String()) continue } @@ -1109,10 +1108,10 @@ func (c *nowritebarrierrecChecker) check() { for _, callee := range c.extraCalls[fn] { enqueue(fn, callee.target, callee.lineno) } - if fn.Func().NWBRCalls == nil { + if fn.NWBRCalls == nil { continue } - for _, callee := range *fn.Func().NWBRCalls { + for _, callee := range *fn.NWBRCalls { target := symToFunc[callee.Sym] if target != nil { enqueue(fn, target, callee.Pos) diff --git a/src/cmd/compile/internal/gc/dwinl.go b/src/cmd/compile/internal/gc/dwinl.go index 1e4e43caadd65..d9eb930037456 100644 --- a/src/cmd/compile/internal/gc/dwinl.go +++ b/src/cmd/compile/internal/gc/dwinl.go @@ -6,6 +6,7 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/internal/dwarf" "cmd/internal/obj" "cmd/internal/src" @@ -211,6 +212,7 @@ func genAbstractFunc(fn *obj.LSym) { base.Ctxt.Diag("failed to locate precursor fn for %v", fn) return } + _ = ifn.(*ir.Func) if base.Debug.DwarfInl != 0 { base.Ctxt.Logf("DwarfAbstractFunc(%v)\n", fn.Name) } diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index 351643ef5d4c3..4bddb7f0f46c4 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -87,7 +87,7 @@ type Escape struct { allLocs []*EscLocation labels map[*types.Sym]labelState // known labels - curfn ir.Node + curfn *ir.Func // loopDepth counts the current loop nesting depth within // curfn. It increments within each "for" loop and at each @@ -103,7 +103,7 @@ type Escape struct { // variable. type EscLocation struct { n ir.Node // represented variable or expression, if any - curfn ir.Node // enclosing function + curfn *ir.Func // enclosing function edges []EscEdge // incoming edges loopDepth int // loopDepth at declaration @@ -180,7 +180,7 @@ func escFmt(n ir.Node, short bool) string { // escapeFuncs performs escape analysis on a minimal batch of // functions. -func escapeFuncs(fns []ir.Node, recursive bool) { +func escapeFuncs(fns []*ir.Func, recursive bool) { for _, fn := range fns { if fn.Op() != ir.ODCLFUNC { base.Fatalf("unexpected node: %v", fn) @@ -203,8 +203,8 @@ func escapeFuncs(fns []ir.Node, recursive bool) { e.finish(fns) } -func (e *Escape) initFunc(fn ir.Node) { - if fn.Op() != ir.ODCLFUNC || fn.Esc() != EscFuncUnknown { +func (e *Escape) initFunc(fn *ir.Func) { + if fn.Esc() != EscFuncUnknown { base.Fatalf("unexpected node: %v", fn) } fn.SetEsc(EscFuncPlanned) @@ -216,14 +216,14 @@ func (e *Escape) initFunc(fn ir.Node) { e.loopDepth = 1 // Allocate locations for local variables. - for _, dcl := range fn.Func().Dcl { + for _, dcl := range fn.Dcl { if dcl.Op() == ir.ONAME { e.newLoc(dcl, false) } } } -func (e *Escape) walkFunc(fn ir.Node) { +func (e *Escape) walkFunc(fn *ir.Func) { fn.SetEsc(EscFuncStarted) // Identify labels that mark the head of an unstructured loop. @@ -589,7 +589,8 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { for i := m.Type.NumResults(); i > 0; i-- { ks = append(ks, e.heapHole()) } - paramK := e.tagHole(ks, ir.AsNode(m.Nname), m.Type.Recv()) + name, _ := m.Nname.(*ir.Name) + paramK := e.tagHole(ks, name, m.Type.Recv()) e.expr(e.teeHole(paramK, closureK), n.Left()) @@ -633,17 +634,13 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { k = e.spill(k, n) // Link addresses of captured variables to closure. - for _, v := range n.Func().ClosureVars.Slice() { - if v.Op() == ir.OXXX { // unnamed out argument; see dcl.go:/^funcargs - continue - } - + for _, v := range n.Func().ClosureVars { k := k - if !v.Name().Byval() { + if !v.Byval() { k = k.addr(v, "reference") } - e.expr(k.note(n, "captured by a closure"), v.Name().Defn) + e.expr(k.note(n, "captured by a closure"), v.Defn) } case ir.ORUNES2STR, ir.OBYTES2STR, ir.OSTR2RUNES, ir.OSTR2BYTES, ir.ORUNESTR: @@ -813,12 +810,12 @@ func (e *Escape) call(ks []EscHole, call, where ir.Node) { fixVariadicCall(call) // Pick out the function callee, if statically known. - var fn ir.Node + var fn *ir.Name switch call.Op() { case ir.OCALLFUNC: switch v := staticValue(call.Left()); { case v.Op() == ir.ONAME && v.Class() == ir.PFUNC: - fn = v + fn = v.(*ir.Name) case v.Op() == ir.OCLOSURE: fn = v.Func().Nname } @@ -902,7 +899,7 @@ func (e *Escape) call(ks []EscHole, call, where ir.Node) { // ks should contain the holes representing where the function // callee's results flows. fn is the statically-known callee function, // if any. -func (e *Escape) tagHole(ks []EscHole, fn ir.Node, param *types.Field) EscHole { +func (e *Escape) tagHole(ks []EscHole, fn *ir.Name, param *types.Field) EscHole { // If this is a dynamic call, we can't rely on param.Note. if fn == nil { return e.heapHole() @@ -943,9 +940,9 @@ func (e *Escape) tagHole(ks []EscHole, fn ir.Node, param *types.Field) EscHole { // fn has not yet been analyzed, so its parameters and results // should be incorporated directly into the flow graph instead of // relying on its escape analysis tagging. -func (e *Escape) inMutualBatch(fn ir.Node) bool { - if fn.Name().Defn != nil && fn.Name().Defn.Esc() < EscFuncTagged { - if fn.Name().Defn.Esc() == EscFuncUnknown { +func (e *Escape) inMutualBatch(fn *ir.Name) bool { + if fn.Defn != nil && fn.Defn.Esc() < EscFuncTagged { + if fn.Defn.Esc() == EscFuncUnknown { base.Fatalf("graph inconsistency") } return true @@ -1368,7 +1365,7 @@ func (e *Escape) outlives(l, other *EscLocation) bool { // // var u int // okay to stack allocate // *(func() *int { return &u }()) = 42 - if containsClosure(other.curfn, l.curfn) && l.curfn.Func().ClosureCalled { + if containsClosure(other.curfn, l.curfn) && l.curfn.ClosureCalled() { return false } @@ -1402,11 +1399,7 @@ func (e *Escape) outlives(l, other *EscLocation) bool { } // containsClosure reports whether c is a closure contained within f. -func containsClosure(f, c ir.Node) bool { - if f.Op() != ir.ODCLFUNC || c.Op() != ir.ODCLFUNC { - base.Fatalf("bad containsClosure: %v, %v", f, c) - } - +func containsClosure(f, c *ir.Func) bool { // Common case. if f == c { return false @@ -1414,8 +1407,8 @@ func containsClosure(f, c ir.Node) bool { // Closures within function Foo are named like "Foo.funcN..." // TODO(mdempsky): Better way to recognize this. - fn := f.Func().Nname.Sym().Name - cn := c.Func().Nname.Sym().Name + fn := f.Sym().Name + cn := c.Sym().Name return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.' } @@ -1437,7 +1430,7 @@ func (l *EscLocation) leakTo(sink *EscLocation, derefs int) { l.paramEsc.AddHeap(derefs) } -func (e *Escape) finish(fns []ir.Node) { +func (e *Escape) finish(fns []*ir.Func) { // Record parameter tags for package export data. for _, fn := range fns { fn.SetEsc(EscFuncTagged) @@ -1614,12 +1607,12 @@ const ( EscNever // By construction will not escape. ) -// funcSym returns fn.Func.Nname.Sym if no nils are encountered along the way. -func funcSym(fn ir.Node) *types.Sym { - if fn == nil || fn.Func().Nname == nil { +// funcSym returns fn.Nname.Sym if no nils are encountered along the way. +func funcSym(fn *ir.Func) *types.Sym { + if fn == nil || fn.Nname == nil { return nil } - return fn.Func().Nname.Sym() + return fn.Sym() } // Mark labels that have no backjumps to them as not increasing e.loopdepth. @@ -1798,6 +1791,7 @@ func addrescapes(n ir.Node) { // Nothing to do. case ir.ONAME: + n := n.(*ir.Name) if n == nodfp { break } @@ -1832,10 +1826,6 @@ func addrescapes(n ir.Node) { // heap in f, not in the inner closure. Flip over to f before calling moveToHeap. oldfn := Curfn Curfn = n.Name().Curfn - if Curfn.Op() == ir.OCLOSURE { - Curfn = Curfn.Func().Decl - panic("can't happen") - } ln := base.Pos base.Pos = Curfn.Pos() moveToHeap(n) @@ -1855,7 +1845,7 @@ func addrescapes(n ir.Node) { } // moveToHeap records the parameter or local variable n as moved to the heap. -func moveToHeap(n ir.Node) { +func moveToHeap(n *ir.Name) { if base.Flag.LowerR != 0 { ir.Dump("MOVE", n) } @@ -1877,7 +1867,7 @@ func moveToHeap(n ir.Node) { // Unset AutoTemp to persist the &foo variable name through SSA to // liveness analysis. // TODO(mdempsky/drchase): Cleaner solution? - heapaddr.Name().SetAutoTemp(false) + heapaddr.SetAutoTemp(false) // Parameters have a local stack copy used at function start/end // in addition to the copy in the heap that may live longer than @@ -1895,14 +1885,14 @@ func moveToHeap(n ir.Node) { stackcopy.SetType(n.Type()) stackcopy.SetOffset(n.Offset()) stackcopy.SetClass(n.Class()) - stackcopy.Name().Heapaddr = heapaddr + stackcopy.Heapaddr = heapaddr if n.Class() == ir.PPARAMOUT { // Make sure the pointer to the heap copy is kept live throughout the function. // The function could panic at any point, and then a defer could recover. // Thus, we need the pointer to the heap copy always available so the // post-deferreturn code can copy the return value back to the stack. // See issue 16095. - heapaddr.Name().SetIsOutputParamHeapAddr(true) + heapaddr.SetIsOutputParamHeapAddr(true) } n.Name().Stackcopy = stackcopy @@ -1910,9 +1900,9 @@ func moveToHeap(n ir.Node) { // liveness and other analyses use the underlying stack slot // and not the now-pseudo-variable n. found := false - for i, d := range Curfn.Func().Dcl { + for i, d := range Curfn.Dcl { if d == n { - Curfn.Func().Dcl[i] = stackcopy + Curfn.Dcl[i] = stackcopy found = true break } @@ -1925,7 +1915,7 @@ func moveToHeap(n ir.Node) { if !found { base.Fatalf("cannot find %v in local variable list", n) } - Curfn.Func().Dcl = append(Curfn.Func().Dcl, n) + Curfn.Dcl = append(Curfn.Dcl, n) } // Modify n in place so that uses of n now mean indirection of the heapaddr. diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index 10033793bf7a7..5cd379a7d38e3 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -161,8 +161,12 @@ func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) { if n == nil { return } + name := n.(*ir.Name) - n.SetFunc(new(ir.Func)) + fn := ir.NewFunc(pos) + fn.SetType(t) + name.SetFunc(fn) + fn.Nname = name if base.Flag.E != 0 { fmt.Printf("import func %v%S\n", s, t) diff --git a/src/cmd/compile/internal/gc/gen.go b/src/cmd/compile/internal/gc/gen.go index cf9e0d58bfcfd..0d3f9392fbc0e 100644 --- a/src/cmd/compile/internal/gc/gen.go +++ b/src/cmd/compile/internal/gc/gen.go @@ -52,7 +52,7 @@ func autotmpname(n int) string { } // make a new Node off the books -func tempAt(pos src.XPos, curfn ir.Node, t *types.Type) *ir.Name { +func tempAt(pos src.XPos, curfn *ir.Func, t *types.Type) *ir.Name { if curfn == nil { base.Fatalf("no curfn for tempAt") } @@ -65,7 +65,7 @@ func tempAt(pos src.XPos, curfn ir.Node, t *types.Type) *ir.Name { } s := &types.Sym{ - Name: autotmpname(len(curfn.Func().Dcl)), + Name: autotmpname(len(curfn.Dcl)), Pkg: ir.LocalPkg, } n := ir.NewNameAt(pos, s) @@ -73,10 +73,10 @@ func tempAt(pos src.XPos, curfn ir.Node, t *types.Type) *ir.Name { n.SetType(t) n.SetClass(ir.PAUTO) n.SetEsc(EscNever) - n.Name().Curfn = curfn - n.Name().SetUsed(true) - n.Name().SetAutoTemp(true) - curfn.Func().Dcl = append(curfn.Func().Dcl, n) + n.Curfn = curfn + n.SetUsed(true) + n.SetAutoTemp(true) + curfn.Dcl = append(curfn.Dcl, n) dowidth(t) diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index 84e6bc5faf388..24393de53d6d1 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -132,7 +132,7 @@ var xtop []ir.Node var exportlist []ir.Node -var importlist []ir.Node // imported functions and methods with inlinable bodies +var importlist []*ir.Func // imported functions and methods with inlinable bodies var ( funcsymsmu sync.Mutex // protects funcsyms and associated package lookups (see func funcsym) @@ -141,7 +141,7 @@ var ( var dclcontext ir.Class // PEXTERN/PAUTO -var Curfn ir.Node +var Curfn *ir.Func var Widthptr int @@ -156,7 +156,7 @@ var instrumenting bool // Whether we are tracking lexical scopes for DWARF. var trackScopes bool -var nodfp ir.Node +var nodfp *ir.Name var autogeneratedPos src.XPos diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go index 950033a8a3189..79ca669dfbba9 100644 --- a/src/cmd/compile/internal/gc/gsubr.go +++ b/src/cmd/compile/internal/gc/gsubr.go @@ -47,7 +47,7 @@ type Progs struct { next *obj.Prog // next Prog pc int64 // virtual PC; count of Progs pos src.XPos // position to use for new Progs - curfn ir.Node // fn these Progs are for + curfn *ir.Func // fn these Progs are for progcache []obj.Prog // local progcache cacheidx int // first free element of progcache @@ -57,7 +57,7 @@ type Progs struct { // newProgs returns a new Progs for fn. // worker indicates which of the backend workers will use the Progs. -func newProgs(fn ir.Node, worker int) *Progs { +func newProgs(fn *ir.Func, worker int) *Progs { pp := new(Progs) if base.Ctxt.CanReuseProgs() { sz := len(sharedProgArray) / base.Flag.LowerC @@ -174,17 +174,17 @@ func (pp *Progs) Appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16 return q } -func (pp *Progs) settext(fn ir.Node) { +func (pp *Progs) settext(fn *ir.Func) { if pp.Text != nil { base.Fatalf("Progs.settext called twice") } ptxt := pp.Prog(obj.ATEXT) pp.Text = ptxt - fn.Func().LSym.Func().Text = ptxt + fn.LSym.Func().Text = ptxt ptxt.From.Type = obj.TYPE_MEM ptxt.From.Name = obj.NAME_EXTERN - ptxt.From.Sym = fn.Func().LSym + ptxt.From.Sym = fn.LSym } // initLSym defines f's obj.LSym and initializes it based on the @@ -281,7 +281,7 @@ func initLSym(f *ir.Func, hasBody bool) { // See test/recover.go for test cases and src/reflect/value.go // for the actual functions being considered. if base.Ctxt.Pkgpath == "reflect" { - switch f.Nname.Sym().Name { + switch f.Sym().Name { case "callReflect", "callMethod": flag |= obj.WRAPPER } diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index 88d3a6477c0cf..3f5ec2e4ddbc1 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -429,6 +429,7 @@ func (p *iexporter) doDecl(n ir.Node) { switch n.Op() { case ir.ONAME: + n := n.(*ir.Name) switch n.Class() { case ir.PEXTERN: // Variable. @@ -515,7 +516,7 @@ func (w *exportWriter) tag(tag byte) { w.data.WriteByte(tag) } -func (p *iexporter) doInline(f ir.Node) { +func (p *iexporter) doInline(f *ir.Name) { w := p.newWriter() w.setPkg(fnpkg(f), false) @@ -960,7 +961,7 @@ func (w *exportWriter) varExt(n ir.Node) { w.symIdx(n.Sym()) } -func (w *exportWriter) funcExt(n ir.Node) { +func (w *exportWriter) funcExt(n *ir.Name) { w.linkname(n.Sym()) w.symIdx(n.Sym()) @@ -979,14 +980,7 @@ func (w *exportWriter) funcExt(n ir.Node) { } // Endlineno for inlined function. - if n.Name().Defn != nil { - w.pos(n.Name().Defn.Func().Endlineno) - } else { - // When the exported node was defined externally, - // e.g. io exports atomic.(*Value).Load or bytes exports errors.New. - // Keep it as we don't distinguish this case in iimport.go. - w.pos(n.Func().Endlineno) - } + w.pos(n.Func().Endlineno) } else { w.uint64(0) } @@ -994,7 +988,7 @@ func (w *exportWriter) funcExt(n ir.Node) { func (w *exportWriter) methExt(m *types.Field) { w.bool(m.Nointerface()) - w.funcExt(ir.AsNode(m.Nname)) + w.funcExt(ir.AsNode(m.Nname).(*ir.Name)) } func (w *exportWriter) linkname(s *types.Sym) { diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 066d956b9330c..5a50682ab21c9 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -41,7 +41,7 @@ var ( inlineImporter = map[*types.Sym]iimporterAndOffset{} ) -func expandDecl(n ir.Node) { +func expandDecl(n *ir.Name) { if n.Op() != ir.ONONAME { return } @@ -55,12 +55,12 @@ func expandDecl(n ir.Node) { r.doDecl(n) } -func expandInline(fn ir.Node) { - if fn.Func().Inl.Body != nil { +func expandInline(fn *ir.Func) { + if fn.Inl.Body != nil { return } - r := importReaderFor(fn, inlineImporter) + r := importReaderFor(fn.Nname, inlineImporter) if r == nil { base.Fatalf("missing import reader for %v", fn) } @@ -68,7 +68,7 @@ func expandInline(fn ir.Node) { r.doInline(fn) } -func importReaderFor(n ir.Node, importers map[*types.Sym]iimporterAndOffset) *importReader { +func importReaderFor(n *ir.Name, importers map[*types.Sym]iimporterAndOffset) *importReader { x, ok := importers[n.Sym()] if !ok { return nil @@ -331,7 +331,9 @@ func (r *importReader) doDecl(n ir.Node) { recv := r.param() mtyp := r.signature(recv) - m := newfuncnamel(mpos, methodSym(recv.Type, msym), new(ir.Func)) + fn := ir.NewFunc(mpos) + fn.SetType(mtyp) + m := newFuncNameAt(mpos, methodSym(recv.Type, msym), fn) m.SetType(mtyp) m.SetClass(ir.PFUNC) // methodSym already marked m.Sym as a function. @@ -501,7 +503,7 @@ func (r *importReader) typ1() *types.Type { // type. n := ir.AsNode(r.qualifiedIdent().PkgDef()) if n.Op() == ir.ONONAME { - expandDecl(n) + expandDecl(n.(*ir.Name)) } if n.Op() != ir.OTYPE { base.Fatalf("expected OTYPE, got %v: %v, %v", n.Op(), n.Sym(), n) @@ -695,12 +697,12 @@ func (r *importReader) typeExt(t *types.Type) { // so we can use index to reference the symbol. var typeSymIdx = make(map[*types.Type][2]int64) -func (r *importReader) doInline(n ir.Node) { - if len(n.Func().Inl.Body) != 0 { - base.Fatalf("%v already has inline body", n) +func (r *importReader) doInline(fn *ir.Func) { + if len(fn.Inl.Body) != 0 { + base.Fatalf("%v already has inline body", fn) } - funchdr(n) + funchdr(fn) body := r.stmtList() funcbody() if body == nil { @@ -712,15 +714,15 @@ func (r *importReader) doInline(n ir.Node) { // functions). body = []ir.Node{} } - n.Func().Inl.Body = body + fn.Inl.Body = body - importlist = append(importlist, n) + importlist = append(importlist, fn) if base.Flag.E > 0 && base.Flag.LowerM > 2 { if base.Flag.LowerM > 3 { - fmt.Printf("inl body for %v %#v: %+v\n", n, n.Type(), ir.AsNodes(n.Func().Inl.Body)) + fmt.Printf("inl body for %v %#v: %+v\n", fn, fn.Type(), ir.AsNodes(fn.Inl.Body)) } else { - fmt.Printf("inl body for %v %#v: %v\n", n, n.Type(), ir.AsNodes(n.Func().Inl.Body)) + fmt.Printf("inl body for %v %#v: %v\n", fn, fn.Type(), ir.AsNodes(fn.Inl.Body)) } } } @@ -772,7 +774,7 @@ func (r *importReader) caseList(sw ir.Node) []ir.Node { caseVar := ir.NewNameAt(cas.Pos(), r.ident()) declare(caseVar, dclcontext) cas.PtrRlist().Set1(caseVar) - caseVar.Name().Defn = sw.Left() + caseVar.Defn = sw.Left() } cas.PtrBody().Set(r.stmtList()) cases[i] = cas diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go index 2b7ecd1d05d84..7f2a39ff464ff 100644 --- a/src/cmd/compile/internal/gc/init.go +++ b/src/cmd/compile/internal/gc/init.go @@ -19,7 +19,7 @@ var renameinitgen int // Function collecting autotmps generated during typechecking, // to be included in the package-level init function. -var initTodo = ir.Nod(ir.ODCLFUNC, nil, nil) +var initTodo = ir.NewFunc(base.Pos) func renameinit() *types.Sym { s := lookupN("init.", renameinitgen) @@ -49,23 +49,23 @@ func fninit(n []ir.Node) { base.Pos = nf[0].Pos() // prolog/epilog gets line number of first init stmt initializers := lookup("init") fn := dclfunc(initializers, ir.Nod(ir.OTFUNC, nil, nil)) - for _, dcl := range initTodo.Func().Dcl { + for _, dcl := range initTodo.Dcl { dcl.Name().Curfn = fn } - fn.Func().Dcl = append(fn.Func().Dcl, initTodo.Func().Dcl...) - initTodo.Func().Dcl = nil + fn.Dcl = append(fn.Dcl, initTodo.Dcl...) + initTodo.Dcl = nil fn.PtrBody().Set(nf) funcbody() - fn = typecheck(fn, ctxStmt) + typecheckFunc(fn) Curfn = fn typecheckslice(nf, ctxStmt) Curfn = nil xtop = append(xtop, fn) fns = append(fns, initializers.Linksym()) } - if initTodo.Func().Dcl != nil { + if initTodo.Dcl != nil { // We only generate temps using initTodo if there // are package-scope initialization statements, so // something's weird if we get here. diff --git a/src/cmd/compile/internal/gc/initorder.go b/src/cmd/compile/internal/gc/initorder.go index 1003f131b8fc6..ea3d74d5ba084 100644 --- a/src/cmd/compile/internal/gc/initorder.go +++ b/src/cmd/compile/internal/gc/initorder.go @@ -284,11 +284,11 @@ func (d *initDeps) visit(n ir.Node) bool { case ir.ONAME: switch n.Class() { case ir.PEXTERN, ir.PFUNC: - d.foundDep(n) + d.foundDep(n.(*ir.Name)) } case ir.OCLOSURE: - d.inspectList(n.Func().Decl.Body()) + d.inspectList(n.Func().Body()) case ir.ODOTMETH, ir.OCALLPART: d.foundDep(methodExprName(n)) @@ -299,7 +299,7 @@ func (d *initDeps) visit(n ir.Node) bool { // foundDep records that we've found a dependency on n by adding it to // seen. -func (d *initDeps) foundDep(n ir.Node) { +func (d *initDeps) foundDep(n *ir.Name) { // Can happen with method expressions involving interface // types; e.g., fixedbugs/issue4495.go. if n == nil { @@ -308,7 +308,7 @@ func (d *initDeps) foundDep(n ir.Node) { // Names without definitions aren't interesting as far as // initialization ordering goes. - if n.Name().Defn == nil { + if n.Defn == nil { return } @@ -317,7 +317,7 @@ func (d *initDeps) foundDep(n ir.Node) { } d.seen.Add(n) if d.transitive && n.Class() == ir.PFUNC { - d.inspectList(n.Name().Defn.Body()) + d.inspectList(n.Defn.Body()) } } diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 102144aedf42d..20f145b8eba82 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -53,7 +53,7 @@ const ( // Get the function's package. For ordinary functions it's on the ->sym, but for imported methods // the ->sym can be re-used in the local package, so peel it off the receiver's type. -func fnpkg(fn ir.Node) *types.Pkg { +func fnpkg(fn *ir.Name) *types.Pkg { if ir.IsMethod(fn) { // method rcvr := fn.Type().Recv().Type @@ -73,8 +73,8 @@ func fnpkg(fn ir.Node) *types.Pkg { // Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck // because they're a copy of an already checked body. -func typecheckinl(fn ir.Node) { - lno := setlineno(fn) +func typecheckinl(fn *ir.Func) { + lno := setlineno(fn.Nname) expandInline(fn) @@ -82,19 +82,19 @@ func typecheckinl(fn ir.Node) { // their bodies may refer to unsafe as long as the package // was marked safe during import (which was checked then). // the ->inl of a local function has been typechecked before caninl copied it. - pkg := fnpkg(fn) + pkg := fnpkg(fn.Nname) if pkg == ir.LocalPkg || pkg == nil { return // typecheckinl on local function } if base.Flag.LowerM > 2 || base.Debug.Export != 0 { - fmt.Printf("typecheck import [%v] %L { %#v }\n", fn.Sym(), fn, ir.AsNodes(fn.Func().Inl.Body)) + fmt.Printf("typecheck import [%v] %L { %#v }\n", fn.Sym(), fn, ir.AsNodes(fn.Inl.Body)) } savefn := Curfn Curfn = fn - typecheckslice(fn.Func().Inl.Body, ctxStmt) + typecheckslice(fn.Inl.Body, ctxStmt) Curfn = savefn // During expandInline (which imports fn.Func.Inl.Body), @@ -102,8 +102,8 @@ func typecheckinl(fn ir.Node) { // to fn.Func.Inl.Dcl for consistency with how local functions // behave. (Append because typecheckinl may be called multiple // times.) - fn.Func().Inl.Dcl = append(fn.Func().Inl.Dcl, fn.Func().Dcl...) - fn.Func().Dcl = nil + fn.Inl.Dcl = append(fn.Inl.Dcl, fn.Dcl...) + fn.Dcl = nil base.Pos = lno } @@ -111,11 +111,8 @@ func typecheckinl(fn ir.Node) { // Caninl determines whether fn is inlineable. // If so, caninl saves fn->nbody in fn->inl and substitutes it with a copy. // fn and ->nbody will already have been typechecked. -func caninl(fn ir.Node) { - if fn.Op() != ir.ODCLFUNC { - base.Fatalf("caninl %v", fn) - } - if fn.Func().Nname == nil { +func caninl(fn *ir.Func) { + if fn.Nname == nil { base.Fatalf("caninl no nname %+v", fn) } @@ -124,7 +121,7 @@ func caninl(fn ir.Node) { defer func() { if reason != "" { if base.Flag.LowerM > 1 { - fmt.Printf("%v: cannot inline %v: %s\n", ir.Line(fn), fn.Func().Nname, reason) + fmt.Printf("%v: cannot inline %v: %s\n", ir.Line(fn), fn.Nname, reason) } if logopt.Enabled() { logopt.LogOpt(fn.Pos(), "cannotInlineFunction", "inline", ir.FuncName(fn), reason) @@ -134,33 +131,33 @@ func caninl(fn ir.Node) { } // If marked "go:noinline", don't inline - if fn.Func().Pragma&ir.Noinline != 0 { + if fn.Pragma&ir.Noinline != 0 { reason = "marked go:noinline" return } // If marked "go:norace" and -race compilation, don't inline. - if base.Flag.Race && fn.Func().Pragma&ir.Norace != 0 { + if base.Flag.Race && fn.Pragma&ir.Norace != 0 { reason = "marked go:norace with -race compilation" return } // If marked "go:nocheckptr" and -d checkptr compilation, don't inline. - if base.Debug.Checkptr != 0 && fn.Func().Pragma&ir.NoCheckPtr != 0 { + if base.Debug.Checkptr != 0 && fn.Pragma&ir.NoCheckPtr != 0 { reason = "marked go:nocheckptr" return } // If marked "go:cgo_unsafe_args", don't inline, since the // function makes assumptions about its argument frame layout. - if fn.Func().Pragma&ir.CgoUnsafeArgs != 0 { + if fn.Pragma&ir.CgoUnsafeArgs != 0 { reason = "marked go:cgo_unsafe_args" return } // If marked as "go:uintptrescapes", don't inline, since the // escape information is lost during inlining. - if fn.Func().Pragma&ir.UintptrEscapes != 0 { + if fn.Pragma&ir.UintptrEscapes != 0 { reason = "marked as having an escaping uintptr argument" return } @@ -169,7 +166,7 @@ func caninl(fn ir.Node) { // granularity, so inlining yeswritebarrierrec functions can // confuse it (#22342). As a workaround, disallow inlining // them for now. - if fn.Func().Pragma&ir.Yeswritebarrierrec != 0 { + if fn.Pragma&ir.Yeswritebarrierrec != 0 { reason = "marked go:yeswritebarrierrec" return } @@ -184,7 +181,7 @@ func caninl(fn ir.Node) { base.Fatalf("caninl on non-typechecked function %v", fn) } - n := fn.Func().Nname + n := fn.Nname if n.Func().InlinabilityChecked() { return } @@ -220,7 +217,7 @@ func caninl(fn ir.Node) { n.Func().Inl = &ir.Inline{ Cost: inlineMaxBudget - visitor.budget, - Dcl: inlcopylist(pruneUnusedAutos(n.Name().Defn.Func().Dcl, &visitor)), + Dcl: pruneUnusedAutos(n.Defn.Func().Dcl, &visitor), Body: inlcopylist(fn.Body().Slice()), } @@ -236,36 +233,38 @@ func caninl(fn ir.Node) { // inlFlood marks n's inline body for export and recursively ensures // all called functions are marked too. -func inlFlood(n ir.Node) { +func inlFlood(n *ir.Name) { if n == nil { return } if n.Op() != ir.ONAME || n.Class() != ir.PFUNC { base.Fatalf("inlFlood: unexpected %v, %v, %v", n, n.Op(), n.Class()) } - if n.Func() == nil { + fn := n.Func() + if fn == nil { base.Fatalf("inlFlood: missing Func on %v", n) } - if n.Func().Inl == nil { + if fn.Inl == nil { return } - if n.Func().ExportInline() { + if fn.ExportInline() { return } - n.Func().SetExportInline(true) + fn.SetExportInline(true) - typecheckinl(n) + typecheckinl(fn) // Recursively identify all referenced functions for // reexport. We want to include even non-called functions, // because after inlining they might be callable. - ir.InspectList(ir.AsNodes(n.Func().Inl.Body), func(n ir.Node) bool { + ir.InspectList(ir.AsNodes(fn.Inl.Body), func(n ir.Node) bool { switch n.Op() { - case ir.OMETHEXPR: + case ir.OMETHEXPR, ir.ODOTMETH: inlFlood(methodExprName(n)) case ir.ONAME: + n := n.(*ir.Name) switch n.Class() { case ir.PFUNC: inlFlood(n) @@ -274,10 +273,6 @@ func inlFlood(n ir.Node) { exportsym(n) } - case ir.ODOTMETH: - fn := methodExprName(n) - inlFlood(fn) - case ir.OCALLPART: // Okay, because we don't yet inline indirect // calls to method values. @@ -342,8 +337,8 @@ func (v *hairyVisitor) visit(n ir.Node) bool { break } - if fn := inlCallee(n.Left()); fn != nil && fn.Func().Inl != nil { - v.budget -= fn.Func().Inl.Cost + if fn := inlCallee(n.Left()); fn != nil && fn.Inl != nil { + v.budget -= fn.Inl.Cost break } @@ -503,7 +498,7 @@ func countNodes(n ir.Node) int { // Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any // calls made to inlineable functions. This is the external entry point. -func inlcalls(fn ir.Node) { +func inlcalls(fn *ir.Func) { savefn := Curfn Curfn = fn maxCost := int32(inlineMaxBudget) @@ -516,8 +511,8 @@ func inlcalls(fn ir.Node) { // but allow inlining if there is a recursion cycle of many functions. // Most likely, the inlining will stop before we even hit the beginning of // the cycle again, but the map catches the unusual case. - inlMap := make(map[ir.Node]bool) - fn = inlnode(fn, maxCost, inlMap) + inlMap := make(map[*ir.Func]bool) + fn = inlnode(fn, maxCost, inlMap).(*ir.Func) if fn != Curfn { base.Fatalf("inlnode replaced curfn") } @@ -558,7 +553,7 @@ func inlconv2list(n ir.Node) []ir.Node { return s } -func inlnodelist(l ir.Nodes, maxCost int32, inlMap map[ir.Node]bool) { +func inlnodelist(l ir.Nodes, maxCost int32, inlMap map[*ir.Func]bool) { s := l.Slice() for i := range s { s[i] = inlnode(s[i], maxCost, inlMap) @@ -578,7 +573,7 @@ func inlnodelist(l ir.Nodes, maxCost int32, inlMap map[ir.Node]bool) { // shorter and less complicated. // The result of inlnode MUST be assigned back to n, e.g. // n.Left = inlnode(n.Left) -func inlnode(n ir.Node, maxCost int32, inlMap map[ir.Node]bool) ir.Node { +func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool) ir.Node { if n == nil { return n } @@ -684,7 +679,7 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[ir.Node]bool) ir.Node { if isIntrinsicCall(n) { break } - if fn := inlCallee(n.Left()); fn != nil && fn.Func().Inl != nil { + if fn := inlCallee(n.Left()); fn != nil && fn.Inl != nil { n = mkinlcall(n, fn, maxCost, inlMap) } @@ -698,7 +693,7 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[ir.Node]bool) ir.Node { base.Fatalf("no function type for [%p] %+v\n", n.Left(), n.Left()) } - n = mkinlcall(n, methodExprName(n.Left()), maxCost, inlMap) + n = mkinlcall(n, methodExprName(n.Left()).Func(), maxCost, inlMap) } base.Pos = lno @@ -707,7 +702,7 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[ir.Node]bool) ir.Node { // inlCallee takes a function-typed expression and returns the underlying function ONAME // that it refers to if statically known. Otherwise, it returns nil. -func inlCallee(fn ir.Node) ir.Node { +func inlCallee(fn ir.Node) *ir.Func { fn = staticValue(fn) switch { case fn.Op() == ir.OMETHEXPR: @@ -718,13 +713,13 @@ func inlCallee(fn ir.Node) ir.Node { if n == nil || !types.Identical(n.Type().Recv().Type, fn.Left().Type()) { return nil } - return n + return n.Func() case fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC: - return fn + return fn.Func() case fn.Op() == ir.OCLOSURE: - c := fn.Func().Decl + c := fn.Func() caninl(c) - return c.Func().Nname + return c } return nil } @@ -777,7 +772,7 @@ FindRHS: base.Fatalf("RHS is nil: %v", defn) } - unsafe, _ := reassigned(n) + unsafe, _ := reassigned(n.(*ir.Name)) if unsafe { return nil } @@ -791,23 +786,15 @@ FindRHS: // useful for -m output documenting the reason for inhibited optimizations. // NB: global variables are always considered to be re-assigned. // TODO: handle initial declaration not including an assignment and followed by a single assignment? -func reassigned(n ir.Node) (bool, ir.Node) { +func reassigned(n *ir.Name) (bool, ir.Node) { if n.Op() != ir.ONAME { base.Fatalf("reassigned %v", n) } // no way to reliably check for no-reassignment of globals, assume it can be - if n.Name().Curfn == nil { + if n.Curfn == nil { return true, nil } - f := n.Name().Curfn - // There just might be a good reason for this although this can be pretty surprising: - // local variables inside a closure have Curfn pointing to the OCLOSURE node instead - // of the corresponding ODCLFUNC. - // We need to walk the function body to check for reassignments so we follow the - // linkage to the ODCLFUNC node as that is where body is held. - if f.Op() == ir.OCLOSURE { - f = f.Func().Decl - } + f := n.Curfn v := reassignVisitor{name: n} a := v.visitList(f.Body()) return a != nil, a @@ -863,13 +850,13 @@ func (v *reassignVisitor) visitList(l ir.Nodes) ir.Node { return nil } -func inlParam(t *types.Field, as ir.Node, inlvars map[ir.Node]ir.Node) ir.Node { +func inlParam(t *types.Field, as ir.Node, inlvars map[*ir.Name]ir.Node) ir.Node { n := ir.AsNode(t.Nname) if n == nil || ir.IsBlank(n) { return ir.BlankNode } - inlvar := inlvars[n] + inlvar := inlvars[n.(*ir.Name)] if inlvar == nil { base.Fatalf("missing inlvar for %v", n) } @@ -887,25 +874,25 @@ var inlgen int // parameters. // The result of mkinlcall MUST be assigned back to n, e.g. // n.Left = mkinlcall(n.Left, fn, isddd) -func mkinlcall(n, fn ir.Node, maxCost int32, inlMap map[ir.Node]bool) ir.Node { - if fn.Func().Inl == nil { +func mkinlcall(n ir.Node, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool) ir.Node { + if fn.Inl == nil { if logopt.Enabled() { logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(Curfn), fmt.Sprintf("%s cannot be inlined", ir.PkgFuncName(fn))) } return n } - if fn.Func().Inl.Cost > maxCost { + if fn.Inl.Cost > maxCost { // The inlined function body is too big. Typically we use this check to restrict // inlining into very big functions. See issue 26546 and 17566. if logopt.Enabled() { logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(Curfn), - fmt.Sprintf("cost %d of %s exceeds max large caller cost %d", fn.Func().Inl.Cost, ir.PkgFuncName(fn), maxCost)) + fmt.Sprintf("cost %d of %s exceeds max large caller cost %d", fn.Inl.Cost, ir.PkgFuncName(fn), maxCost)) } return n } - if fn == Curfn || fn.Name().Defn == Curfn { + if fn == Curfn { // Can't recursively inline a function into itself. if logopt.Enabled() { logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", fmt.Sprintf("recursive call to %s", ir.FuncName(Curfn))) @@ -939,7 +926,7 @@ func mkinlcall(n, fn ir.Node, maxCost int32, inlMap map[ir.Node]bool) ir.Node { // We have a function node, and it has an inlineable body. if base.Flag.LowerM > 1 { - fmt.Printf("%v: inlining call to %v %#v { %#v }\n", ir.Line(n), fn.Sym(), fn.Type(), ir.AsNodes(fn.Func().Inl.Body)) + fmt.Printf("%v: inlining call to %v %#v { %#v }\n", ir.Line(n), fn.Sym(), fn.Type(), ir.AsNodes(fn.Inl.Body)) } else if base.Flag.LowerM != 0 { fmt.Printf("%v: inlining call to %v\n", ir.Line(n), fn) } @@ -969,50 +956,48 @@ func mkinlcall(n, fn ir.Node, maxCost int32, inlMap map[ir.Node]bool) ir.Node { } // Make temp names to use instead of the originals. - inlvars := make(map[ir.Node]ir.Node) + inlvars := make(map[*ir.Name]ir.Node) // record formals/locals for later post-processing var inlfvars []ir.Node // Handle captured variables when inlining closures. - if fn.Name().Defn != nil { - if c := fn.Name().Defn.Func().OClosure; c != nil { - for _, v := range c.Func().ClosureVars.Slice() { - if v.Op() == ir.OXXX { - continue - } + if c := fn.OClosure; c != nil { + for _, v := range c.Func().ClosureVars { + if v.Op() == ir.OXXX { + continue + } - o := v.Name().Outer - // make sure the outer param matches the inlining location - // NB: if we enabled inlining of functions containing OCLOSURE or refined - // the reassigned check via some sort of copy propagation this would most - // likely need to be changed to a loop to walk up to the correct Param - if o == nil || (o.Name().Curfn != Curfn && o.Name().Curfn.Func().OClosure != Curfn) { - base.Fatalf("%v: unresolvable capture %v %v\n", ir.Line(n), fn, v) - } + o := v.Outer + // make sure the outer param matches the inlining location + // NB: if we enabled inlining of functions containing OCLOSURE or refined + // the reassigned check via some sort of copy propagation this would most + // likely need to be changed to a loop to walk up to the correct Param + if o == nil || (o.Curfn != Curfn && o.Curfn.OClosure != Curfn) { + base.Fatalf("%v: unresolvable capture %v %v\n", ir.Line(n), fn, v) + } - if v.Name().Byval() { - iv := typecheck(inlvar(v), ctxExpr) - ninit.Append(ir.Nod(ir.ODCL, iv, nil)) - ninit.Append(typecheck(ir.Nod(ir.OAS, iv, o), ctxStmt)) - inlvars[v] = iv - } else { - addr := NewName(lookup("&" + v.Sym().Name)) - addr.SetType(types.NewPtr(v.Type())) - ia := typecheck(inlvar(addr), ctxExpr) - ninit.Append(ir.Nod(ir.ODCL, ia, nil)) - ninit.Append(typecheck(ir.Nod(ir.OAS, ia, ir.Nod(ir.OADDR, o, nil)), ctxStmt)) - inlvars[addr] = ia - - // When capturing by reference, all occurrence of the captured var - // must be substituted with dereference of the temporary address - inlvars[v] = typecheck(ir.Nod(ir.ODEREF, ia, nil), ctxExpr) - } + if v.Byval() { + iv := typecheck(inlvar(v), ctxExpr) + ninit.Append(ir.Nod(ir.ODCL, iv, nil)) + ninit.Append(typecheck(ir.Nod(ir.OAS, iv, o), ctxStmt)) + inlvars[v] = iv + } else { + addr := NewName(lookup("&" + v.Sym().Name)) + addr.SetType(types.NewPtr(v.Type())) + ia := typecheck(inlvar(addr), ctxExpr) + ninit.Append(ir.Nod(ir.ODCL, ia, nil)) + ninit.Append(typecheck(ir.Nod(ir.OAS, ia, ir.Nod(ir.OADDR, o, nil)), ctxStmt)) + inlvars[addr] = ia + + // When capturing by reference, all occurrence of the captured var + // must be substituted with dereference of the temporary address + inlvars[v] = typecheck(ir.Nod(ir.ODEREF, ia, nil), ctxExpr) } } } - for _, ln := range fn.Func().Inl.Dcl { + for _, ln := range fn.Inl.Dcl { if ln.Op() != ir.ONAME { continue } @@ -1040,7 +1025,7 @@ func mkinlcall(n, fn ir.Node, maxCost int32, inlMap map[ir.Node]bool) ir.Node { } nreturns := 0 - ir.InspectList(ir.AsNodes(fn.Func().Inl.Body), func(n ir.Node) bool { + ir.InspectList(ir.AsNodes(fn.Inl.Body), func(n ir.Node) bool { if n != nil && n.Op() == ir.ORETURN { nreturns++ } @@ -1057,6 +1042,7 @@ func mkinlcall(n, fn ir.Node, maxCost int32, inlMap map[ir.Node]bool) ir.Node { for i, t := range fn.Type().Results().Fields().Slice() { var m ir.Node if n := ir.AsNode(t.Nname); n != nil && !ir.IsBlank(n) && !strings.HasPrefix(n.Sym().Name, "~r") { + n := n.(*ir.Name) m = inlvar(n) m = typecheck(m, ctxExpr) inlvars[n] = m @@ -1155,7 +1141,9 @@ func mkinlcall(n, fn ir.Node, maxCost int32, inlMap map[ir.Node]bool) ir.Node { if b := base.Ctxt.PosTable.Pos(n.Pos()).Base(); b != nil { parent = b.InliningIndex() } - newIndex := base.Ctxt.InlTree.Add(parent, n.Pos(), fn.Sym().Linksym()) + + sym := fn.Sym().Linksym() + newIndex := base.Ctxt.InlTree.Add(parent, n.Pos(), sym) // Add an inline mark just before the inlined body. // This mark is inline in the code so that it's a reasonable spot @@ -1168,9 +1156,9 @@ func mkinlcall(n, fn ir.Node, maxCost int32, inlMap map[ir.Node]bool) ir.Node { ninit.Append(inlMark) if base.Flag.GenDwarfInl > 0 { - if !fn.Sym().Linksym().WasInlined() { - base.Ctxt.DwFixups.SetPrecursorFunc(fn.Sym().Linksym(), fn) - fn.Sym().Linksym().Set(obj.AttrWasInlined, true) + if !sym.WasInlined() { + base.Ctxt.DwFixups.SetPrecursorFunc(sym, fn) + sym.Set(obj.AttrWasInlined, true) } } @@ -1183,7 +1171,7 @@ func mkinlcall(n, fn ir.Node, maxCost int32, inlMap map[ir.Node]bool) ir.Node { newInlIndex: newIndex, } - body := subst.list(ir.AsNodes(fn.Func().Inl.Body)) + body := subst.list(ir.AsNodes(fn.Inl.Body)) lab := nodSym(ir.OLABEL, nil, retlabel) body = append(body, lab) @@ -1236,11 +1224,11 @@ func inlvar(var_ ir.Node) ir.Node { n := NewName(var_.Sym()) n.SetType(var_.Type()) n.SetClass(ir.PAUTO) - n.Name().SetUsed(true) - n.Name().Curfn = Curfn // the calling function, not the called one - n.Name().SetAddrtaken(var_.Name().Addrtaken()) + n.SetUsed(true) + n.Curfn = Curfn // the calling function, not the called one + n.SetAddrtaken(var_.Name().Addrtaken()) - Curfn.Func().Dcl = append(Curfn.Func().Dcl, n) + Curfn.Dcl = append(Curfn.Dcl, n) return n } @@ -1249,9 +1237,9 @@ func retvar(t *types.Field, i int) ir.Node { n := NewName(lookupN("~R", i)) n.SetType(t.Type) n.SetClass(ir.PAUTO) - n.Name().SetUsed(true) - n.Name().Curfn = Curfn // the calling function, not the called one - Curfn.Func().Dcl = append(Curfn.Func().Dcl, n) + n.SetUsed(true) + n.Curfn = Curfn // the calling function, not the called one + Curfn.Dcl = append(Curfn.Dcl, n) return n } @@ -1261,9 +1249,9 @@ func argvar(t *types.Type, i int) ir.Node { n := NewName(lookupN("~arg", i)) n.SetType(t.Elem()) n.SetClass(ir.PAUTO) - n.Name().SetUsed(true) - n.Name().Curfn = Curfn // the calling function, not the called one - Curfn.Func().Dcl = append(Curfn.Func().Dcl, n) + n.SetUsed(true) + n.Curfn = Curfn // the calling function, not the called one + Curfn.Dcl = append(Curfn.Dcl, n) return n } @@ -1280,7 +1268,7 @@ type inlsubst struct { // "return" statement. delayretvars bool - inlvars map[ir.Node]ir.Node + inlvars map[*ir.Name]ir.Node // bases maps from original PosBase to PosBase with an extra // inlined call frame. @@ -1311,6 +1299,7 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { switch n.Op() { case ir.ONAME: + n := n.(*ir.Name) if inlvar := subst.inlvars[n]; inlvar != nil { // These will be set during inlnode if base.Flag.LowerM > 2 { fmt.Printf("substituting name %+v -> %+v\n", n, inlvar) @@ -1409,8 +1398,8 @@ func (subst *inlsubst) updatedPos(xpos src.XPos) src.XPos { return base.Ctxt.PosTable.XPos(pos) } -func pruneUnusedAutos(ll []ir.Node, vis *hairyVisitor) []ir.Node { - s := make([]ir.Node, 0, len(ll)) +func pruneUnusedAutos(ll []*ir.Name, vis *hairyVisitor) []*ir.Name { + s := make([]*ir.Name, 0, len(ll)) for _, n := range ll { if n.Class() == ir.PAUTO { if _, found := vis.usedLocals[n]; !found { @@ -1424,7 +1413,7 @@ func pruneUnusedAutos(ll []ir.Node, vis *hairyVisitor) []ir.Node { // devirtualize replaces interface method calls within fn with direct // concrete-type method calls where applicable. -func devirtualize(fn ir.Node) { +func devirtualize(fn *ir.Func) { Curfn = fn ir.InspectList(fn.Body(), func(n ir.Node) bool { if n.Op() == ir.OCALLINTER { diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 931626159d9d0..7bad05265d67a 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -277,7 +277,7 @@ func Main(archInit func(*Arch)) { for i := 0; i < len(xtop); i++ { n := xtop[i] if n.Op() == ir.ODCLFUNC { - Curfn = n + Curfn = n.(*ir.Func) decldepth = 1 errorsBefore := base.Errors() typecheckslice(Curfn.Body().Slice(), ctxStmt) @@ -307,8 +307,8 @@ func Main(archInit func(*Arch)) { timings.Start("fe", "capturevars") for _, n := range xtop { if n.Op() == ir.ODCLFUNC && n.Func().OClosure != nil { - Curfn = n - capturevars(n) + Curfn = n.(*ir.Func) + capturevars(Curfn) } } capturevarscomplete = true @@ -321,7 +321,7 @@ func Main(archInit func(*Arch)) { // Typecheck imported function bodies if Debug.l > 1, // otherwise lazily when used or re-exported. for _, n := range importlist { - if n.Func().Inl != nil { + if n.Inl != nil { typecheckinl(n) } } @@ -330,7 +330,7 @@ func Main(archInit func(*Arch)) { if base.Flag.LowerL != 0 { // Find functions that can be inlined and clone them before walk expands them. - visitBottomUp(xtop, func(list []ir.Node, recursive bool) { + visitBottomUp(xtop, func(list []*ir.Func, recursive bool) { numfns := numNonClosures(list) for _, n := range list { if !recursive || numfns > 1 { @@ -340,7 +340,7 @@ func Main(archInit func(*Arch)) { caninl(n) } else { if base.Flag.LowerM > 1 { - fmt.Printf("%v: cannot inline %v: recursive\n", ir.Line(n), n.Func().Nname) + fmt.Printf("%v: cannot inline %v: recursive\n", ir.Line(n), n.Nname) } } inlcalls(n) @@ -350,7 +350,7 @@ func Main(archInit func(*Arch)) { for _, n := range xtop { if n.Op() == ir.ODCLFUNC { - devirtualize(n) + devirtualize(n.(*ir.Func)) } } Curfn = nil @@ -380,8 +380,8 @@ func Main(archInit func(*Arch)) { timings.Start("fe", "xclosures") for _, n := range xtop { if n.Op() == ir.ODCLFUNC && n.Func().OClosure != nil { - Curfn = n - transformclosure(n) + Curfn = n.(*ir.Func) + transformclosure(Curfn) } } @@ -403,7 +403,7 @@ func Main(archInit func(*Arch)) { for i := 0; i < len(xtop); i++ { n := xtop[i] if n.Op() == ir.ODCLFUNC { - funccompile(n) + funccompile(n.(*ir.Func)) fcount++ } } @@ -481,10 +481,10 @@ func Main(archInit func(*Arch)) { } // numNonClosures returns the number of functions in list which are not closures. -func numNonClosures(list []ir.Node) int { +func numNonClosures(list []*ir.Func) int { count := 0 - for _, n := range list { - if n.Func().OClosure == nil { + for _, fn := range list { + if fn.OClosure == nil { count++ } } diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index cbe8a2405164b..8ae5874d3b463 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -152,7 +152,7 @@ type noder struct { lastCloseScopePos syntax.Pos } -func (p *noder) funcBody(fn ir.Node, block *syntax.BlockStmt) { +func (p *noder) funcBody(fn *ir.Func, block *syntax.BlockStmt) { oldScope := p.scope p.scope = 0 funchdr(fn) @@ -165,7 +165,7 @@ func (p *noder) funcBody(fn ir.Node, block *syntax.BlockStmt) { fn.PtrBody().Set(body) base.Pos = p.makeXPos(block.Rbrace) - fn.Func().Endlineno = base.Pos + fn.Endlineno = base.Pos } funcbody() @@ -176,9 +176,9 @@ func (p *noder) openScope(pos syntax.Pos) { types.Markdcl() if trackScopes { - Curfn.Func().Parents = append(Curfn.Func().Parents, p.scope) - p.scopeVars = append(p.scopeVars, len(Curfn.Func().Dcl)) - p.scope = ir.ScopeID(len(Curfn.Func().Parents)) + Curfn.Parents = append(Curfn.Parents, p.scope) + p.scopeVars = append(p.scopeVars, len(Curfn.Dcl)) + p.scope = ir.ScopeID(len(Curfn.Parents)) p.markScope(pos) } @@ -191,29 +191,29 @@ func (p *noder) closeScope(pos syntax.Pos) { if trackScopes { scopeVars := p.scopeVars[len(p.scopeVars)-1] p.scopeVars = p.scopeVars[:len(p.scopeVars)-1] - if scopeVars == len(Curfn.Func().Dcl) { + if scopeVars == len(Curfn.Dcl) { // no variables were declared in this scope, so we can retract it. - if int(p.scope) != len(Curfn.Func().Parents) { + if int(p.scope) != len(Curfn.Parents) { base.Fatalf("scope tracking inconsistency, no variables declared but scopes were not retracted") } - p.scope = Curfn.Func().Parents[p.scope-1] - Curfn.Func().Parents = Curfn.Func().Parents[:len(Curfn.Func().Parents)-1] + p.scope = Curfn.Parents[p.scope-1] + Curfn.Parents = Curfn.Parents[:len(Curfn.Parents)-1] - nmarks := len(Curfn.Func().Marks) - Curfn.Func().Marks[nmarks-1].Scope = p.scope + nmarks := len(Curfn.Marks) + Curfn.Marks[nmarks-1].Scope = p.scope prevScope := ir.ScopeID(0) if nmarks >= 2 { - prevScope = Curfn.Func().Marks[nmarks-2].Scope + prevScope = Curfn.Marks[nmarks-2].Scope } - if Curfn.Func().Marks[nmarks-1].Scope == prevScope { - Curfn.Func().Marks = Curfn.Func().Marks[:nmarks-1] + if Curfn.Marks[nmarks-1].Scope == prevScope { + Curfn.Marks = Curfn.Marks[:nmarks-1] } return } - p.scope = Curfn.Func().Parents[p.scope-1] + p.scope = Curfn.Parents[p.scope-1] p.markScope(pos) } @@ -221,10 +221,10 @@ func (p *noder) closeScope(pos syntax.Pos) { func (p *noder) markScope(pos syntax.Pos) { xpos := p.makeXPos(pos) - if i := len(Curfn.Func().Marks); i > 0 && Curfn.Func().Marks[i-1].Pos == xpos { - Curfn.Func().Marks[i-1].Scope = p.scope + if i := len(Curfn.Marks); i > 0 && Curfn.Marks[i-1].Pos == xpos { + Curfn.Marks[i-1].Scope = p.scope } else { - Curfn.Func().Marks = append(Curfn.Func().Marks, ir.Mark{Pos: xpos, Scope: p.scope}) + Curfn.Marks = append(Curfn.Marks, ir.Mark{Pos: xpos, Scope: p.scope}) } } @@ -444,6 +444,7 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []ir.Node { nn := make([]ir.Node, 0, len(names)) for i, n := range names { + n := n.(*ir.Name) if i >= len(values) { base.Errorf("missing value in const declaration") break @@ -456,8 +457,8 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []ir.Node { n.SetOp(ir.OLITERAL) declare(n, dclcontext) - n.Name().Ntype = typ - n.Name().Defn = v + n.Ntype = typ + n.Defn = v n.SetIota(cs.iota) nn = append(nn, p.nod(decl, ir.ODCLCONST, n, nil)) @@ -514,7 +515,7 @@ func (p *noder) declName(name *syntax.Name) *ir.Name { func (p *noder) funcDecl(fun *syntax.FuncDecl) ir.Node { name := p.name(fun.Name) t := p.signature(fun.Recv, fun.Type) - f := p.nod(fun, ir.ODCLFUNC, nil, nil) + f := ir.NewFunc(p.pos(fun)) if fun.Recv == nil { if name.Name == "init" { @@ -530,16 +531,16 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) ir.Node { } } } else { - f.Func().Shortname = name + f.Shortname = name name = ir.BlankNode.Sym() // filled in by typecheckfunc } - f.Func().Nname = newfuncnamel(p.pos(fun.Name), name, f.Func()) - f.Func().Nname.Name().Defn = f - f.Func().Nname.Name().Ntype = t + f.Nname = newFuncNameAt(p.pos(fun.Name), name, f) + f.Nname.Defn = f + f.Nname.Ntype = t if pragma, ok := fun.Pragma.(*Pragma); ok { - f.Func().Pragma = pragma.Flag & FuncPragmas + f.Pragma = pragma.Flag & FuncPragmas if pragma.Flag&ir.Systemstack != 0 && pragma.Flag&ir.Nosplit != 0 { base.ErrorfAt(f.Pos(), "go:nosplit and go:systemstack cannot be combined") } @@ -548,13 +549,13 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) ir.Node { } if fun.Recv == nil { - declare(f.Func().Nname, ir.PFUNC) + declare(f.Nname, ir.PFUNC) } p.funcBody(f, fun.Body) if fun.Body != nil { - if f.Func().Pragma&ir.Noescape != 0 { + if f.Pragma&ir.Noescape != 0 { base.ErrorfAt(f.Pos(), "can only use //go:noescape with external func implementations") } } else { @@ -1059,7 +1060,7 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node { n := p.nod(stmt, ir.ORETURN, nil, nil) n.PtrList().Set(results) if n.List().Len() == 0 && Curfn != nil { - for _, ln := range Curfn.Func().Dcl { + for _, ln := range Curfn.Dcl { if ln.Class() == ir.PPARAM { continue } @@ -1133,7 +1134,7 @@ func (p *noder) assignList(expr syntax.Expr, defn ir.Node, colas bool) []ir.Node newOrErr = true n := NewName(sym) declare(n, dclcontext) - n.Name().Defn = defn + n.Defn = defn defn.PtrInit().Append(ir.Nod(ir.ODCL, n, nil)) res[i] = n } @@ -1240,7 +1241,7 @@ func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch ir.Node, rbrac declare(nn, dclcontext) n.PtrRlist().Set1(nn) // keep track of the instances for reporting unused - nn.Name().Defn = tswitch + nn.Defn = tswitch } // Trim trailing empty statements. We omit them from diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index d566959d9eab9..7b5e3015c25ae 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -143,7 +143,7 @@ func dumpdata() { for i := xtops; i < len(xtop); i++ { n := xtop[i] if n.Op() == ir.ODCLFUNC { - funccompile(n) + funccompile(n.(*ir.Func)) } } xtops = len(xtop) diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index b7d713439ba79..6a91b8c91bdd4 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -51,9 +51,9 @@ type Order struct { // Order rewrites fn.Nbody to apply the ordering constraints // described in the comment at the top of the file. -func order(fn ir.Node) { +func order(fn *ir.Func) { if base.Flag.W > 1 { - s := fmt.Sprintf("\nbefore order %v", fn.Func().Nname.Sym()) + s := fmt.Sprintf("\nbefore order %v", fn.Sym()) ir.DumpList(s, fn.Body()) } @@ -1258,7 +1258,7 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node { } case ir.OCLOSURE: - if n.Transient() && n.Func().ClosureVars.Len() > 0 { + if n.Transient() && len(n.Func().ClosureVars) > 0 { prealloc[n] = o.newTemp(closureType(n), false) } diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index b74b132632b9b..ea294ed66d5f4 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -24,14 +24,14 @@ import ( // "Portable" code generation. var ( - compilequeue []ir.Node // functions waiting to be compiled + compilequeue []*ir.Func // functions waiting to be compiled ) -func emitptrargsmap(fn ir.Node) { - if ir.FuncName(fn) == "_" || fn.Func().Nname.Sym().Linkname != "" { +func emitptrargsmap(fn *ir.Func) { + if ir.FuncName(fn) == "_" || fn.Sym().Linkname != "" { return } - lsym := base.Ctxt.Lookup(fn.Func().LSym.Name + ".args_stackmap") + lsym := base.Ctxt.Lookup(fn.LSym.Name + ".args_stackmap") nptr := int(fn.Type().ArgWidth() / int64(Widthptr)) bv := bvalloc(int32(nptr) * 2) @@ -68,7 +68,7 @@ func emitptrargsmap(fn ir.Node) { // really means, in memory, things with pointers needing zeroing at // the top of the stack and increasing in size. // Non-autos sort on offset. -func cmpstackvarlt(a, b ir.Node) bool { +func cmpstackvarlt(a, b *ir.Name) bool { if (a.Class() == ir.PAUTO) != (b.Class() == ir.PAUTO) { return b.Class() == ir.PAUTO } @@ -101,7 +101,7 @@ func cmpstackvarlt(a, b ir.Node) bool { } // byStackvar implements sort.Interface for []*Node using cmpstackvarlt. -type byStackVar []ir.Node +type byStackVar []*ir.Name func (s byStackVar) Len() int { return len(s) } func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) } @@ -110,7 +110,7 @@ func (s byStackVar) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s *ssafn) AllocFrame(f *ssa.Func) { s.stksize = 0 s.stkptrsize = 0 - fn := s.curfn.Func() + fn := s.curfn // Mark the PAUTO's unused. for _, ln := range fn.Dcl { @@ -193,9 +193,9 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { s.stkptrsize = Rnd(s.stkptrsize, int64(Widthreg)) } -func funccompile(fn ir.Node) { +func funccompile(fn *ir.Func) { if Curfn != nil { - base.Fatalf("funccompile %v inside %v", fn.Func().Nname.Sym(), Curfn.Func().Nname.Sym()) + base.Fatalf("funccompile %v inside %v", fn.Sym(), Curfn.Sym()) } if fn.Type() == nil { @@ -210,21 +210,19 @@ func funccompile(fn ir.Node) { if fn.Body().Len() == 0 { // Initialize ABI wrappers if necessary. - initLSym(fn.Func(), false) + initLSym(fn, false) emitptrargsmap(fn) return } dclcontext = ir.PAUTO Curfn = fn - compile(fn) - Curfn = nil dclcontext = ir.PEXTERN } -func compile(fn ir.Node) { +func compile(fn *ir.Func) { errorsBefore := base.Errors() order(fn) if base.Errors() > errorsBefore { @@ -234,7 +232,7 @@ func compile(fn ir.Node) { // Set up the function's LSym early to avoid data races with the assemblers. // Do this before walk, as walk needs the LSym to set attributes/relocations // (e.g. in markTypeUsedInInterface). - initLSym(fn.Func(), true) + initLSym(fn, true) walk(fn) if base.Errors() > errorsBefore { @@ -259,15 +257,15 @@ func compile(fn ir.Node) { // be types of stack objects. We need to do this here // because symbols must be allocated before the parallel // phase of the compiler. - for _, n := range fn.Func().Dcl { + for _, n := range fn.Dcl { switch n.Class() { case ir.PPARAM, ir.PPARAMOUT, ir.PAUTO: if livenessShouldTrack(n) && n.Name().Addrtaken() { dtypesym(n.Type()) // Also make sure we allocate a linker symbol // for the stack object data, for the same reason. - if fn.Func().LSym.Func().StackObjects == nil { - fn.Func().LSym.Func().StackObjects = base.Ctxt.Lookup(fn.Func().LSym.Name + ".stkobj") + if fn.LSym.Func().StackObjects == nil { + fn.LSym.Func().StackObjects = base.Ctxt.Lookup(fn.LSym.Name + ".stkobj") } } } @@ -284,7 +282,7 @@ func compile(fn ir.Node) { // If functions are not compiled immediately, // they are enqueued in compilequeue, // which is drained by compileFunctions. -func compilenow(fn ir.Node) bool { +func compilenow(fn *ir.Func) bool { // Issue 38068: if this function is a method AND an inline // candidate AND was not inlined (yet), put it onto the compile // queue instead of compiling it immediately. This is in case we @@ -299,8 +297,8 @@ func compilenow(fn ir.Node) bool { // isInlinableButNotInlined returns true if 'fn' was marked as an // inline candidate but then never inlined (presumably because we // found no call sites). -func isInlinableButNotInlined(fn ir.Node) bool { - if fn.Func().Nname.Func().Inl == nil { +func isInlinableButNotInlined(fn *ir.Func) bool { + if fn.Inl == nil { return false } if fn.Sym() == nil { @@ -315,7 +313,7 @@ const maxStackSize = 1 << 30 // uses it to generate a plist, // and flushes that plist to machine code. // worker indicates which of the backend workers is doing the processing. -func compileSSA(fn ir.Node, worker int) { +func compileSSA(fn *ir.Func, worker int) { f := buildssa(fn, worker) // Note: check arg size to fix issue 25507. if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type().ArgWidth() >= maxStackSize { @@ -343,7 +341,7 @@ func compileSSA(fn ir.Node, worker int) { pp.Flush() // assemble, fill in boilerplate, etc. // fieldtrack must be called after pp.Flush. See issue 20014. - fieldtrack(pp.Text.From.Sym, fn.Func().FieldTrack) + fieldtrack(pp.Text.From.Sym, fn.FieldTrack) } func init() { @@ -360,7 +358,7 @@ func compileFunctions() { sizeCalculationDisabled = true // not safe to calculate sizes concurrently if race.Enabled { // Randomize compilation order to try to shake out races. - tmp := make([]ir.Node, len(compilequeue)) + tmp := make([]*ir.Func, len(compilequeue)) perm := rand.Perm(len(compilequeue)) for i, v := range perm { tmp[v] = compilequeue[i] @@ -376,7 +374,7 @@ func compileFunctions() { } var wg sync.WaitGroup base.Ctxt.InParallel = true - c := make(chan ir.Node, base.Flag.LowerC) + c := make(chan *ir.Func, base.Flag.LowerC) for i := 0; i < base.Flag.LowerC; i++ { wg.Add(1) go func(worker int) { @@ -398,9 +396,10 @@ func compileFunctions() { } func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) { - fn := curfn.(ir.Node) - if fn.Func().Nname != nil { - if expect := fn.Func().Nname.Sym().Linksym(); fnsym != expect { + fn := curfn.(*ir.Func) + + if fn.Nname != nil { + if expect := fn.Sym().Linksym(); fnsym != expect { base.Fatalf("unexpected fnsym: %v != %v", fnsym, expect) } } @@ -430,12 +429,19 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S // // These two adjustments keep toolstash -cmp working for now. // Deciding the right answer is, as they say, future work. - isODCLFUNC := fn.Op() == ir.ODCLFUNC + // + // We can tell the difference between the old ODCLFUNC and ONAME + // cases by looking at the infosym.Name. If it's empty, DebugInfo is + // being called from (*obj.Link).populateDWARF, which used to use + // the ODCLFUNC. If it's non-empty (the name will end in $abstract), + // DebugInfo is being called from (*obj.Link).DwarfAbstractFunc, + // which used to use the ONAME form. + isODCLFUNC := infosym.Name == "" var apdecls []ir.Node // Populate decls for fn. if isODCLFUNC { - for _, n := range fn.Func().Dcl { + for _, n := range fn.Dcl { if n.Op() != ir.ONAME { // might be OTYPE or OLITERAL continue } @@ -457,7 +463,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S } } - decls, dwarfVars := createDwarfVars(fnsym, isODCLFUNC, fn.Func(), apdecls) + decls, dwarfVars := createDwarfVars(fnsym, isODCLFUNC, fn, apdecls) // For each type referenced by the functions auto vars but not // already referenced by a dwarf var, attach an R_USETYPE relocation to @@ -478,7 +484,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S var varScopes []ir.ScopeID for _, decl := range decls { pos := declPos(decl) - varScopes = append(varScopes, findScope(fn.Func().Marks, pos)) + varScopes = append(varScopes, findScope(fn.Marks, pos)) } scopes := assembleScopes(fnsym, fn, dwarfVars, varScopes) @@ -709,9 +715,9 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []ir. // names of the variables may have been "versioned" to avoid conflicts // with local vars; disregard this versioning when sorting. func preInliningDcls(fnsym *obj.LSym) []ir.Node { - fn := base.Ctxt.DwFixups.GetPrecursorFunc(fnsym).(ir.Node) + fn := base.Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*ir.Func) var rdcl []ir.Node - for _, n := range fn.Func().Inl.Dcl { + for _, n := range fn.Inl.Dcl { c := n.Sym().Name[0] // Avoid reporting "_" parameters, since if there are more than // one, it can result in a collision later on, as in #23179. diff --git a/src/cmd/compile/internal/gc/pgen_test.go b/src/cmd/compile/internal/gc/pgen_test.go index 1984f9aa08558..35ce087af62ce 100644 --- a/src/cmd/compile/internal/gc/pgen_test.go +++ b/src/cmd/compile/internal/gc/pgen_test.go @@ -26,19 +26,19 @@ func typeWithPointers() *types.Type { return t } -func markUsed(n ir.Node) ir.Node { - n.Name().SetUsed(true) +func markUsed(n *ir.Name) *ir.Name { + n.SetUsed(true) return n } -func markNeedZero(n ir.Node) ir.Node { - n.Name().SetNeedzero(true) +func markNeedZero(n *ir.Name) *ir.Name { + n.SetNeedzero(true) return n } // Test all code paths for cmpstackvarlt. func TestCmpstackvar(t *testing.T) { - nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) ir.Node { + nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) *ir.Name { if s == nil { s = &types.Sym{Name: "."} } @@ -49,7 +49,7 @@ func TestCmpstackvar(t *testing.T) { return n } testdata := []struct { - a, b ir.Node + a, b *ir.Name lt bool }{ { @@ -156,14 +156,14 @@ func TestCmpstackvar(t *testing.T) { } func TestStackvarSort(t *testing.T) { - nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) ir.Node { + nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) *ir.Name { n := NewName(s) n.SetType(t) n.SetOffset(xoffset) n.SetClass(cl) return n } - inp := []ir.Node{ + inp := []*ir.Name{ nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC), nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO), nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC), @@ -178,7 +178,7 @@ func TestStackvarSort(t *testing.T) { nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO), nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, ir.PAUTO), } - want := []ir.Node{ + want := []*ir.Name{ nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC), nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC), nod(10, &types.Type{}, &types.Sym{}, ir.PFUNC), diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go index e3a9b2a1988d6..6ad3140081ac4 100644 --- a/src/cmd/compile/internal/gc/plive.go +++ b/src/cmd/compile/internal/gc/plive.go @@ -101,7 +101,7 @@ type BlockEffects struct { // A collection of global state used by liveness analysis. type Liveness struct { - fn ir.Node + fn *ir.Func f *ssa.Func vars []ir.Node idx map[ir.Node]int32 @@ -212,9 +212,9 @@ func livenessShouldTrack(n ir.Node) bool { // getvariables returns the list of on-stack variables that we need to track // and a map for looking up indices by *Node. -func getvariables(fn ir.Node) ([]ir.Node, map[ir.Node]int32) { +func getvariables(fn *ir.Func) ([]ir.Node, map[ir.Node]int32) { var vars []ir.Node - for _, n := range fn.Func().Dcl { + for _, n := range fn.Dcl { if livenessShouldTrack(n) { vars = append(vars, n) } @@ -356,7 +356,7 @@ type livenessFuncCache struct { // Constructs a new liveness structure used to hold the global state of the // liveness computation. The cfg argument is a slice of *BasicBlocks and the // vars argument is a slice of *Nodes. -func newliveness(fn ir.Node, f *ssa.Func, vars []ir.Node, idx map[ir.Node]int32, stkptrsize int64) *Liveness { +func newliveness(fn *ir.Func, f *ssa.Func, vars []ir.Node, idx map[ir.Node]int32, stkptrsize int64) *Liveness { lv := &Liveness{ fn: fn, f: f, @@ -788,7 +788,7 @@ func (lv *Liveness) epilogue() { // pointers to copy values back to the stack). // TODO: if the output parameter is heap-allocated, then we // don't need to keep the stack copy live? - if lv.fn.Func().HasDefer() { + if lv.fn.HasDefer() { for i, n := range lv.vars { if n.Class() == ir.PPARAMOUT { if n.Name().IsOutputParamHeapAddr() { @@ -891,7 +891,7 @@ func (lv *Liveness) epilogue() { if n.Class() == ir.PPARAM { continue // ok } - base.Fatalf("bad live variable at entry of %v: %L", lv.fn.Func().Nname, n) + base.Fatalf("bad live variable at entry of %v: %L", lv.fn.Nname, n) } // Record live variables. @@ -904,7 +904,7 @@ func (lv *Liveness) epilogue() { } // If we have an open-coded deferreturn call, make a liveness map for it. - if lv.fn.Func().OpenCodedDeferDisallowed() { + if lv.fn.OpenCodedDeferDisallowed() { lv.livenessMap.deferreturn = LivenessDontCare } else { lv.livenessMap.deferreturn = LivenessIndex{ @@ -922,7 +922,7 @@ func (lv *Liveness) epilogue() { // input parameters. for j, n := range lv.vars { if n.Class() != ir.PPARAM && lv.stackMaps[0].Get(int32(j)) { - lv.f.Fatalf("%v %L recorded as live on entry", lv.fn.Func().Nname, n) + lv.f.Fatalf("%v %L recorded as live on entry", lv.fn.Nname, n) } } } @@ -980,7 +980,7 @@ func (lv *Liveness) showlive(v *ssa.Value, live bvec) { return } - pos := lv.fn.Func().Nname.Pos() + pos := lv.fn.Nname.Pos() if v != nil { pos = v.Pos } @@ -1090,7 +1090,7 @@ func (lv *Liveness) printDebug() { if b == lv.f.Entry { live := lv.stackMaps[0] - fmt.Printf("(%s) function entry\n", base.FmtPos(lv.fn.Func().Nname.Pos())) + fmt.Printf("(%s) function entry\n", base.FmtPos(lv.fn.Nname.Pos())) fmt.Printf("\tlive=") printed = false for j, n := range lv.vars { @@ -1266,7 +1266,7 @@ func liveness(e *ssafn, f *ssa.Func, pp *Progs) LivenessMap { } // Emit the live pointer map data structures - ls := e.curfn.Func().LSym + ls := e.curfn.LSym fninfo := ls.Func() fninfo.GCArgs, fninfo.GCLocals = lv.emit() diff --git a/src/cmd/compile/internal/gc/racewalk.go b/src/cmd/compile/internal/gc/racewalk.go index c41d923f78652..6b5d53e80647e 100644 --- a/src/cmd/compile/internal/gc/racewalk.go +++ b/src/cmd/compile/internal/gc/racewalk.go @@ -60,13 +60,13 @@ func ispkgin(pkgs []string) bool { return false } -func instrument(fn ir.Node) { - if fn.Func().Pragma&ir.Norace != 0 { +func instrument(fn *ir.Func) { + if fn.Pragma&ir.Norace != 0 { return } if !base.Flag.Race || !ispkgin(norace_inst_pkgs) { - fn.Func().SetInstrumentBody(true) + fn.SetInstrumentBody(true) } if base.Flag.Race { @@ -74,8 +74,8 @@ func instrument(fn ir.Node) { base.Pos = src.NoXPos if thearch.LinkArch.Arch.Family != sys.AMD64 { - fn.Func().Enter.Prepend(mkcall("racefuncenterfp", nil, nil)) - fn.Func().Exit.Append(mkcall("racefuncexit", nil, nil)) + fn.Enter.Prepend(mkcall("racefuncenterfp", nil, nil)) + fn.Exit.Append(mkcall("racefuncexit", nil, nil)) } else { // nodpc is the PC of the caller as extracted by @@ -83,12 +83,12 @@ func instrument(fn ir.Node) { // This only works for amd64. This will not // work on arm or others that might support // race in the future. - nodpc := ir.Copy(nodfp) + nodpc := ir.Copy(nodfp).(*ir.Name) nodpc.SetType(types.Types[types.TUINTPTR]) nodpc.SetOffset(int64(-Widthptr)) - fn.Func().Dcl = append(fn.Func().Dcl, nodpc) - fn.Func().Enter.Prepend(mkcall("racefuncenter", nil, nil, nodpc)) - fn.Func().Exit.Append(mkcall("racefuncexit", nil, nil)) + fn.Dcl = append(fn.Dcl, nodpc) + fn.Enter.Prepend(mkcall("racefuncenter", nil, nil, nodpc)) + fn.Exit.Append(mkcall("racefuncexit", nil, nil)) } base.Pos = lno } diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go index 0ff00cca44d06..d52fad5fece24 100644 --- a/src/cmd/compile/internal/gc/range.go +++ b/src/cmd/compile/internal/gc/range.go @@ -593,7 +593,7 @@ func arrayClear(n, v1, v2, a ir.Node) bool { var fn ir.Node if a.Type().Elem().HasPointers() { // memclrHasPointers(hp, hn) - Curfn.Func().SetWBPos(stmt.Pos()) + Curfn.SetWBPos(stmt.Pos()) fn = mkcall("memclrHasPointers", nil, nil, hp, hn) } else { // memclrNoHeapPointers(hp, hn) diff --git a/src/cmd/compile/internal/gc/scc.go b/src/cmd/compile/internal/gc/scc.go index fe7956d5d590f..063aaa09bd2bd 100644 --- a/src/cmd/compile/internal/gc/scc.go +++ b/src/cmd/compile/internal/gc/scc.go @@ -32,10 +32,10 @@ import "cmd/compile/internal/ir" // when analyzing a set of mutually recursive functions. type bottomUpVisitor struct { - analyze func([]ir.Node, bool) + analyze func([]*ir.Func, bool) visitgen uint32 - nodeID map[ir.Node]uint32 - stack []ir.Node + nodeID map[*ir.Func]uint32 + stack []*ir.Func } // visitBottomUp invokes analyze on the ODCLFUNC nodes listed in list. @@ -51,18 +51,18 @@ type bottomUpVisitor struct { // If recursive is false, the list consists of only a single function and its closures. // If recursive is true, the list may still contain only a single function, // if that function is itself recursive. -func visitBottomUp(list []ir.Node, analyze func(list []ir.Node, recursive bool)) { +func visitBottomUp(list []ir.Node, analyze func(list []*ir.Func, recursive bool)) { var v bottomUpVisitor v.analyze = analyze - v.nodeID = make(map[ir.Node]uint32) + v.nodeID = make(map[*ir.Func]uint32) for _, n := range list { if n.Op() == ir.ODCLFUNC && !n.Func().IsHiddenClosure() { - v.visit(n) + v.visit(n.(*ir.Func)) } } } -func (v *bottomUpVisitor) visit(n ir.Node) uint32 { +func (v *bottomUpVisitor) visit(n *ir.Func) uint32 { if id := v.nodeID[n]; id > 0 { // already visited return id @@ -80,41 +80,41 @@ func (v *bottomUpVisitor) visit(n ir.Node) uint32 { case ir.ONAME: if n.Class() == ir.PFUNC { if n != nil && n.Name().Defn != nil { - if m := v.visit(n.Name().Defn); m < min { + if m := v.visit(n.Name().Defn.(*ir.Func)); m < min { min = m } } } case ir.OMETHEXPR: fn := methodExprName(n) - if fn != nil && fn.Name().Defn != nil { - if m := v.visit(fn.Name().Defn); m < min { + if fn != nil && fn.Defn != nil { + if m := v.visit(fn.Defn.(*ir.Func)); m < min { min = m } } case ir.ODOTMETH: fn := methodExprName(n) - if fn != nil && fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC && fn.Name().Defn != nil { - if m := v.visit(fn.Name().Defn); m < min { + if fn != nil && fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC && fn.Defn != nil { + if m := v.visit(fn.Defn.(*ir.Func)); m < min { min = m } } case ir.OCALLPART: fn := ir.AsNode(callpartMethod(n).Nname) if fn != nil && fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC && fn.Name().Defn != nil { - if m := v.visit(fn.Name().Defn); m < min { + if m := v.visit(fn.Name().Defn.(*ir.Func)); m < min { min = m } } case ir.OCLOSURE: - if m := v.visit(n.Func().Decl); m < min { + if m := v.visit(n.Func()); m < min { min = m } } return true }) - if (min == id || min == id+1) && !n.Func().IsHiddenClosure() { + if (min == id || min == id+1) && !n.IsHiddenClosure() { // This node is the root of a strongly connected component. // The original min passed to visitcodelist was v.nodeID[n]+1. diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 1a13b14376e0b..91faf18a1d4a5 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -40,7 +40,7 @@ const ssaDumpFile = "ssa.html" const maxOpenDefers = 8 // ssaDumpInlined holds all inlined functions when ssaDump contains a function name. -var ssaDumpInlined []ir.Node +var ssaDumpInlined []*ir.Func func initssaconfig() { types_ := ssa.NewTypes() @@ -242,8 +242,8 @@ func dvarint(x *obj.LSym, off int, v int64) int { // - Size of the argument // - Offset of where argument should be placed in the args frame when making call func (s *state) emitOpenDeferInfo() { - x := base.Ctxt.Lookup(s.curfn.Func().LSym.Name + ".opendefer") - s.curfn.Func().LSym.Func().OpenCodedDeferInfo = x + x := base.Ctxt.Lookup(s.curfn.LSym.Name + ".opendefer") + s.curfn.LSym.Func().OpenCodedDeferInfo = x off := 0 // Compute maxargsize (max size of arguments for all defers) @@ -289,7 +289,7 @@ func (s *state) emitOpenDeferInfo() { // buildssa builds an SSA function for fn. // worker indicates which of the backend workers is doing the processing. -func buildssa(fn ir.Node, worker int) *ssa.Func { +func buildssa(fn *ir.Func, worker int) *ssa.Func { name := ir.FuncName(fn) printssa := false if ssaDump != "" { // match either a simple name e.g. "(*Reader).Reset", or a package.name e.g. "compress/gzip.(*Reader).Reset" @@ -298,9 +298,9 @@ func buildssa(fn ir.Node, worker int) *ssa.Func { var astBuf *bytes.Buffer if printssa { astBuf = &bytes.Buffer{} - ir.FDumpList(astBuf, "buildssa-enter", fn.Func().Enter) + ir.FDumpList(astBuf, "buildssa-enter", fn.Enter) ir.FDumpList(astBuf, "buildssa-body", fn.Body()) - ir.FDumpList(astBuf, "buildssa-exit", fn.Func().Exit) + ir.FDumpList(astBuf, "buildssa-exit", fn.Exit) if ssaDumpStdout { fmt.Println("generating SSA for", name) fmt.Print(astBuf.String()) @@ -311,8 +311,8 @@ func buildssa(fn ir.Node, worker int) *ssa.Func { s.pushLine(fn.Pos()) defer s.popLine() - s.hasdefer = fn.Func().HasDefer() - if fn.Func().Pragma&ir.CgoUnsafeArgs != 0 { + s.hasdefer = fn.HasDefer() + if fn.Pragma&ir.CgoUnsafeArgs != 0 { s.cgoUnsafeArgs = true } @@ -331,7 +331,7 @@ func buildssa(fn ir.Node, worker int) *ssa.Func { s.f.Name = name s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH") s.f.PrintOrHtmlSSA = printssa - if fn.Func().Pragma&ir.Nosplit != 0 { + if fn.Pragma&ir.Nosplit != 0 { s.f.NoSplit = true } s.panics = map[funcLine]*ssa.Block{} @@ -359,7 +359,7 @@ func buildssa(fn ir.Node, worker int) *ssa.Func { s.fwdVars = map[ir.Node]*ssa.Value{} s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem) - s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.Func().OpenCodedDeferDisallowed() + s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.OpenCodedDeferDisallowed() switch { case s.hasOpenDefers && (base.Ctxt.Flag_shared || base.Ctxt.Flag_dynlink) && thearch.LinkArch.Name == "386": // Don't support open-coded defers for 386 ONLY when using shared @@ -368,7 +368,7 @@ func buildssa(fn ir.Node, worker int) *ssa.Func { // that we don't track correctly. s.hasOpenDefers = false } - if s.hasOpenDefers && s.curfn.Func().Exit.Len() > 0 { + if s.hasOpenDefers && s.curfn.Exit.Len() > 0 { // Skip doing open defers if there is any extra exit code (likely // copying heap-allocated return values or race detection), since // we will not generate that code in the case of the extra @@ -376,7 +376,7 @@ func buildssa(fn ir.Node, worker int) *ssa.Func { s.hasOpenDefers = false } if s.hasOpenDefers && - s.curfn.Func().NumReturns*s.curfn.Func().NumDefers > 15 { + s.curfn.NumReturns*s.curfn.NumDefers > 15 { // Since we are generating defer calls at every exit for // open-coded defers, skip doing open-coded defers if there are // too many returns (especially if there are multiple defers). @@ -413,7 +413,7 @@ func buildssa(fn ir.Node, worker int) *ssa.Func { s.decladdrs = map[ir.Node]*ssa.Value{} var args []ssa.Param var results []ssa.Param - for _, n := range fn.Func().Dcl { + for _, n := range fn.Dcl { switch n.Class() { case ir.PPARAM: s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem) @@ -440,7 +440,7 @@ func buildssa(fn ir.Node, worker int) *ssa.Func { } // Populate SSAable arguments. - for _, n := range fn.Func().Dcl { + for _, n := range fn.Dcl { if n.Class() == ir.PPARAM && s.canSSA(n) { v := s.newValue0A(ssa.OpArg, n.Type(), n) s.vars[n] = v @@ -449,12 +449,12 @@ func buildssa(fn ir.Node, worker int) *ssa.Func { } // Convert the AST-based IR to the SSA-based IR - s.stmtList(fn.Func().Enter) + s.stmtList(fn.Enter) s.stmtList(fn.Body()) // fallthrough to exit if s.curBlock != nil { - s.pushLine(fn.Func().Endlineno) + s.pushLine(fn.Endlineno) s.exit() s.popLine() } @@ -477,10 +477,10 @@ func buildssa(fn ir.Node, worker int) *ssa.Func { return s.f } -func dumpSourcesColumn(writer *ssa.HTMLWriter, fn ir.Node) { +func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *ir.Func) { // Read sources of target function fn. fname := base.Ctxt.PosTable.Pos(fn.Pos()).Filename() - targetFn, err := readFuncLines(fname, fn.Pos().Line(), fn.Func().Endlineno.Line()) + targetFn, err := readFuncLines(fname, fn.Pos().Line(), fn.Endlineno.Line()) if err != nil { writer.Logf("cannot read sources for function %v: %v", fn, err) } @@ -488,13 +488,7 @@ func dumpSourcesColumn(writer *ssa.HTMLWriter, fn ir.Node) { // Read sources of inlined functions. var inlFns []*ssa.FuncLines for _, fi := range ssaDumpInlined { - var elno src.XPos - if fi.Name().Defn == nil { - // Endlineno is filled from exported data. - elno = fi.Func().Endlineno - } else { - elno = fi.Name().Defn.Func().Endlineno - } + elno := fi.Endlineno fname := base.Ctxt.PosTable.Pos(fi.Pos()).Filename() fnLines, err := readFuncLines(fname, fi.Pos().Line(), elno.Line()) if err != nil { @@ -593,7 +587,7 @@ type state struct { f *ssa.Func // Node for function - curfn ir.Node + curfn *ir.Func // labels in f labels map[string]*ssaLabel @@ -972,7 +966,7 @@ func (s *state) newValueOrSfCall2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Valu } func (s *state) instrument(t *types.Type, addr *ssa.Value, wr bool) { - if !s.curfn.Func().InstrumentBody() { + if !s.curfn.InstrumentBody() { return } @@ -1571,7 +1565,7 @@ func (s *state) exit() *ssa.Block { // Run exit code. Typically, this code copies heap-allocated PPARAMOUT // variables back to the stack. - s.stmtList(s.curfn.Func().Exit) + s.stmtList(s.curfn.Exit) // Store SSAable PPARAMOUT variables back to stack locations. for _, n := range s.returns { @@ -4296,7 +4290,7 @@ func (s *state) openDeferSave(n ir.Node, t *types.Type, val *ssa.Value) *ssa.Val pos = n.Pos() } argTemp := tempAt(pos.WithNotStmt(), s.curfn, t) - argTemp.Name().SetOpenDeferSlot(true) + argTemp.SetOpenDeferSlot(true) var addrArgTemp *ssa.Value // Use OpVarLive to make sure stack slots for the args, etc. are not // removed by dead-store elimination @@ -4322,7 +4316,7 @@ func (s *state) openDeferSave(n ir.Node, t *types.Type, val *ssa.Value) *ssa.Val // Therefore, we must make sure it is zeroed out in the entry // block if it contains pointers, else GC may wrongly follow an // uninitialized pointer value. - argTemp.Name().SetNeedzero(true) + argTemp.SetNeedzero(true) } if !canSSA { a := s.addr(n) @@ -4790,7 +4784,7 @@ func (s *state) getMethodClosure(fn ir.Node) *ssa.Value { // We get back an SSA value representing &sync.(*Mutex).Unlock·f. // We can then pass that to defer or go. n2 := ir.NewNameAt(fn.Pos(), fn.Sym()) - n2.Name().Curfn = s.curfn + n2.Curfn = s.curfn n2.SetClass(ir.PFUNC) // n2.Sym already existed, so it's already marked as a function. n2.SetPos(fn.Pos()) @@ -5023,7 +5017,7 @@ func (s *state) exprPtr(n ir.Node, bounded bool, lineno src.XPos) *ssa.Value { // Used only for automatically inserted nil checks, // not for user code like 'x != nil'. func (s *state) nilCheck(ptr *ssa.Value) { - if base.Debug.DisableNil != 0 || s.curfn.Func().NilCheckDisabled() { + if base.Debug.DisableNil != 0 || s.curfn.NilCheckDisabled() { return } s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem()) @@ -6197,7 +6191,7 @@ func (s byXoffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func emitStackObjects(e *ssafn, pp *Progs) { var vars []ir.Node - for _, n := range e.curfn.Func().Dcl { + for _, n := range e.curfn.Dcl { if livenessShouldTrack(n) && n.Name().Addrtaken() { vars = append(vars, n) } @@ -6211,7 +6205,7 @@ func emitStackObjects(e *ssafn, pp *Progs) { // Populate the stack object data. // Format must match runtime/stack.go:stackObjectRecord. - x := e.curfn.Func().LSym.Func().StackObjects + x := e.curfn.LSym.Func().StackObjects off := 0 off = duintptr(x, off, uint64(len(vars))) for _, v := range vars { @@ -6248,7 +6242,7 @@ func genssa(f *ssa.Func, pp *Progs) { s.livenessMap = liveness(e, f, pp) emitStackObjects(e, pp) - openDeferInfo := e.curfn.Func().LSym.Func().OpenCodedDeferInfo + openDeferInfo := e.curfn.LSym.Func().OpenCodedDeferInfo if openDeferInfo != nil { // This function uses open-coded defers -- write out the funcdata // info that we computed at the end of genssa. @@ -6453,7 +6447,7 @@ func genssa(f *ssa.Func, pp *Progs) { // some of the inline marks. // Use this instruction instead. p.Pos = p.Pos.WithIsStmt() // promote position to a statement - pp.curfn.Func().LSym.Func().AddInlMark(p, inlMarks[m]) + pp.curfn.LSym.Func().AddInlMark(p, inlMarks[m]) // Make the inline mark a real nop, so it doesn't generate any code. m.As = obj.ANOP m.Pos = src.NoXPos @@ -6465,14 +6459,14 @@ func genssa(f *ssa.Func, pp *Progs) { // Any unmatched inline marks now need to be added to the inlining tree (and will generate a nop instruction). for _, p := range inlMarkList { if p.As != obj.ANOP { - pp.curfn.Func().LSym.Func().AddInlMark(p, inlMarks[p]) + pp.curfn.LSym.Func().AddInlMark(p, inlMarks[p]) } } } if base.Ctxt.Flag_locationlists { debugInfo := ssa.BuildFuncDebug(base.Ctxt, f, base.Debug.LocationLists > 1, stackOffset) - e.curfn.Func().DebugInfo = debugInfo + e.curfn.DebugInfo = debugInfo bstart := s.bstart // Note that at this moment, Prog.Pc is a sequence number; it's // not a real PC until after assembly, so this mapping has to @@ -6486,7 +6480,7 @@ func genssa(f *ssa.Func, pp *Progs) { } return bstart[b].Pc case ssa.BlockEnd.ID: - return e.curfn.Func().LSym.Size + return e.curfn.LSym.Size default: return valueToProgAfter[v].Pc } @@ -6584,7 +6578,7 @@ func defframe(s *SSAGenState, e *ssafn) { var state uint32 // Iterate through declarations. They are sorted in decreasing Xoffset order. - for _, n := range e.curfn.Func().Dcl { + for _, n := range e.curfn.Dcl { if !n.Name().Needzero() { continue } @@ -6949,7 +6943,7 @@ func fieldIdx(n ir.Node) int { // ssafn holds frontend information about a function that the backend is processing. // It also exports a bunch of compiler services for the ssa backend. type ssafn struct { - curfn ir.Node + curfn *ir.Func strings map[string]*obj.LSym // map from constant string to data symbols scratchFpMem ir.Node // temp for floating point register / memory moves on some architectures stksize int64 // stack size for current frame @@ -7072,8 +7066,8 @@ func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t n.SetType(t) n.SetClass(ir.PAUTO) n.SetEsc(EscNever) - n.Name().Curfn = e.curfn - e.curfn.Func().Dcl = append(e.curfn.Func().Dcl, n) + n.Curfn = e.curfn + e.curfn.Dcl = append(e.curfn.Dcl, n) dowidth(t) return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset} } @@ -7136,7 +7130,7 @@ func (e *ssafn) Syslook(name string) *obj.LSym { } func (e *ssafn) SetWBPos(pos src.XPos) { - e.curfn.Func().SetWBPos(pos) + e.curfn.SetWBPos(pos) } func (e *ssafn) MyImportPath() string { diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 28703205d6655..336465db9800d 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -95,8 +95,8 @@ func autolabel(prefix string) *types.Sym { if Curfn == nil { base.Fatalf("autolabel outside function") } - n := fn.Func().Label - fn.Func().Label++ + n := fn.Label + fn.Label++ return lookupN(prefix, int(n)) } @@ -138,7 +138,7 @@ func importdot(opkg *types.Pkg, pack *ir.PkgName) { // newname returns a new ONAME Node associated with symbol s. func NewName(s *types.Sym) *ir.Name { n := ir.NewNameAt(base.Pos, s) - n.Name().Curfn = Curfn + n.Curfn = Curfn return n } @@ -1165,7 +1165,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { tfn.PtrRlist().Set(structargs(method.Type.Results(), false)) fn := dclfunc(newnam, tfn) - fn.Func().SetDupok(true) + fn.SetDupok(true) nthis := ir.AsNode(tfn.Type().Recv().Nname) @@ -1201,7 +1201,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { fn.PtrBody().Append(as) fn.PtrBody().Append(nodSym(ir.ORETJMP, nil, methodSym(methodrcvr, method.Sym))) } else { - fn.Func().SetWrapper(true) // ignore frame for panic+recover matching + fn.SetWrapper(true) // ignore frame for panic+recover matching call := ir.Nod(ir.OCALL, dot, nil) call.PtrList().Set(paramNnames(tfn.Type())) call.SetIsDDD(tfn.Type().IsVariadic()) @@ -1222,8 +1222,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { testdclstack() } - fn = typecheck(fn, ctxStmt) - + typecheckFunc(fn) Curfn = fn typecheckslice(fn.Body().Slice(), ctxStmt) @@ -1233,7 +1232,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym != nil { inlcalls(fn) } - escapeFuncs([]ir.Node{fn}, false) + escapeFuncs([]*ir.Func{fn}, false) Curfn = nil xtop = append(xtop, fn) diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 4ab47fb4069a9..7d19a2b58e91f 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -95,7 +95,7 @@ func resolve(n ir.Node) (res ir.Node) { base.Fatalf("recursive inimport") } inimport = true - expandDecl(n) + expandDecl(n.(*ir.Name)) inimport = false return n } @@ -199,6 +199,13 @@ func cycleTrace(cycle []ir.Node) string { var typecheck_tcstack []ir.Node +func typecheckFunc(fn *ir.Func) { + new := typecheck(fn, ctxStmt) + if new != fn { + base.Fatalf("typecheck changed func") + } +} + // typecheck type checks node n. // The result of typecheck MUST be assigned back to n, e.g. // n.Left = typecheck(n.Left, top) @@ -2069,7 +2076,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.ODCLFUNC: ok |= ctxStmt - typecheckfunc(n) + typecheckfunc(n.(*ir.Func)) case ir.ODCLCONST: ok |= ctxStmt @@ -3402,36 +3409,38 @@ out: } // type check function definition -func typecheckfunc(n ir.Node) { +// To be called by typecheck, not directly. +// (Call typecheckfn instead.) +func typecheckfunc(n *ir.Func) { if enableTrace && base.Flag.LowerT { defer tracePrint("typecheckfunc", n)(nil) } - for _, ln := range n.Func().Dcl { + for _, ln := range n.Dcl { if ln.Op() == ir.ONAME && (ln.Class() == ir.PPARAM || ln.Class() == ir.PPARAMOUT) { ln.Name().Decldepth = 1 } } - n.Func().Nname = typecheck(n.Func().Nname, ctxExpr|ctxAssign) - t := n.Func().Nname.Type() + n.Nname = typecheck(n.Nname, ctxExpr|ctxAssign).(*ir.Name) + t := n.Nname.Type() if t == nil { return } n.SetType(t) rcvr := t.Recv() - if rcvr != nil && n.Func().Shortname != nil { - m := addmethod(n, n.Func().Shortname, t, true, n.Func().Pragma&ir.Nointerface != 0) + if rcvr != nil && n.Shortname != nil { + m := addmethod(n, n.Shortname, t, true, n.Pragma&ir.Nointerface != 0) if m == nil { return } - n.Func().Nname.SetSym(methodSym(rcvr.Type, n.Func().Shortname)) - declare(n.Func().Nname, ir.PFUNC) + n.Nname.SetSym(methodSym(rcvr.Type, n.Shortname)) + declare(n.Nname, ir.PFUNC) } - if base.Ctxt.Flag_dynlink && !inimport && n.Func().Nname != nil { - makefuncsym(n.Func().Nname.Sym()) + if base.Ctxt.Flag_dynlink && !inimport && n.Nname != nil { + makefuncsym(n.Sym()) } } @@ -3861,22 +3870,19 @@ func isTermNode(n ir.Node) bool { } // checkreturn makes sure that fn terminates appropriately. -func checkreturn(fn ir.Node) { +func checkreturn(fn *ir.Func) { if fn.Type().NumResults() != 0 && fn.Body().Len() != 0 { var labels map[*types.Sym]ir.Node markbreaklist(&labels, fn.Body(), nil) if !isTermNodes(fn.Body()) { - base.ErrorfAt(fn.Func().Endlineno, "missing return at end of function") + base.ErrorfAt(fn.Endlineno, "missing return at end of function") } } } -func deadcode(fn ir.Node) { +func deadcode(fn *ir.Func) { deadcodeslice(fn.PtrBody()) - deadcodefn(fn) -} -func deadcodefn(fn ir.Node) { if fn.Body().Len() == 0 { return } @@ -4014,21 +4020,15 @@ func curpkg() *types.Pkg { // Initialization expressions for package-scope variables. return ir.LocalPkg } - - // TODO(mdempsky): Standardize on either ODCLFUNC or ONAME for - // Curfn, rather than mixing them. - if fn.Op() == ir.ODCLFUNC { - fn = fn.Func().Nname - } - - return fnpkg(fn) + return fnpkg(fn.Nname) } // MethodName returns the ONAME representing the method // referenced by expression n, which must be a method selector, // method expression, or method value. -func methodExprName(n ir.Node) ir.Node { - return ir.AsNode(methodExprFunc(n).Nname) +func methodExprName(n ir.Node) *ir.Name { + name, _ := ir.AsNode(methodExprFunc(n).Nname).(*ir.Name) + return name } // MethodFunc is like MethodName, but returns the types.Field instead. diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index c05aa0c37217c..d749dff827431 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -22,33 +22,33 @@ import ( const tmpstringbufsize = 32 const zeroValSize = 1024 // must match value of runtime/map.go:maxZero -func walk(fn ir.Node) { +func walk(fn *ir.Func) { Curfn = fn errorsBefore := base.Errors() if base.Flag.W != 0 { - s := fmt.Sprintf("\nbefore walk %v", Curfn.Func().Nname.Sym()) + s := fmt.Sprintf("\nbefore walk %v", Curfn.Sym()) ir.DumpList(s, Curfn.Body()) } lno := base.Pos // Final typecheck for any unused variables. - for i, ln := range fn.Func().Dcl { + for i, ln := range fn.Dcl { if ln.Op() == ir.ONAME && (ln.Class() == ir.PAUTO || ln.Class() == ir.PAUTOHEAP) { - ln = typecheck(ln, ctxExpr|ctxAssign) - fn.Func().Dcl[i] = ln + ln = typecheck(ln, ctxExpr|ctxAssign).(*ir.Name) + fn.Dcl[i] = ln } } // Propagate the used flag for typeswitch variables up to the NONAME in its definition. - for _, ln := range fn.Func().Dcl { + for _, ln := range fn.Dcl { if ln.Op() == ir.ONAME && (ln.Class() == ir.PAUTO || ln.Class() == ir.PAUTOHEAP) && ln.Name().Defn != nil && ln.Name().Defn.Op() == ir.OTYPESW && ln.Name().Used() { ln.Name().Defn.Left().Name().SetUsed(true) } } - for _, ln := range fn.Func().Dcl { + for _, ln := range fn.Dcl { if ln.Op() != ir.ONAME || (ln.Class() != ir.PAUTO && ln.Class() != ir.PAUTOHEAP) || ln.Sym().Name[0] == '&' || ln.Name().Used() { continue } @@ -69,15 +69,15 @@ func walk(fn ir.Node) { } walkstmtlist(Curfn.Body().Slice()) if base.Flag.W != 0 { - s := fmt.Sprintf("after walk %v", Curfn.Func().Nname.Sym()) + s := fmt.Sprintf("after walk %v", Curfn.Sym()) ir.DumpList(s, Curfn.Body()) } zeroResults() heapmoves() - if base.Flag.W != 0 && Curfn.Func().Enter.Len() > 0 { - s := fmt.Sprintf("enter %v", Curfn.Func().Nname.Sym()) - ir.DumpList(s, Curfn.Func().Enter) + if base.Flag.W != 0 && Curfn.Enter.Len() > 0 { + s := fmt.Sprintf("enter %v", Curfn.Sym()) + ir.DumpList(s, Curfn.Enter) } } @@ -87,8 +87,8 @@ func walkstmtlist(s []ir.Node) { } } -func paramoutheap(fn ir.Node) bool { - for _, ln := range fn.Func().Dcl { +func paramoutheap(fn *ir.Func) bool { + for _, ln := range fn.Dcl { switch ln.Class() { case ir.PPARAMOUT: if isParamStackCopy(ln) || ln.Name().Addrtaken() { @@ -209,18 +209,18 @@ func walkstmt(n ir.Node) ir.Node { base.Errorf("case statement out of place") case ir.ODEFER: - Curfn.Func().SetHasDefer(true) - Curfn.Func().NumDefers++ - if Curfn.Func().NumDefers > maxOpenDefers { + Curfn.SetHasDefer(true) + Curfn.NumDefers++ + if Curfn.NumDefers > maxOpenDefers { // Don't allow open-coded defers if there are more than // 8 defers in the function, since we use a single // byte to record active defers. - Curfn.Func().SetOpenCodedDeferDisallowed(true) + Curfn.SetOpenCodedDeferDisallowed(true) } if n.Esc() != EscNever { // If n.Esc is not EscNever, then this defer occurs in a loop, // so open-coded defers cannot be used in this function. - Curfn.Func().SetOpenCodedDeferDisallowed(true) + Curfn.SetOpenCodedDeferDisallowed(true) } fallthrough case ir.OGO: @@ -270,7 +270,7 @@ func walkstmt(n ir.Node) ir.Node { walkstmtlist(n.Rlist().Slice()) case ir.ORETURN: - Curfn.Func().NumReturns++ + Curfn.NumReturns++ if n.List().Len() == 0 { break } @@ -279,12 +279,13 @@ func walkstmt(n ir.Node) ir.Node { // so that reorder3 can fix up conflicts var rl []ir.Node - for _, ln := range Curfn.Func().Dcl { + for _, ln := range Curfn.Dcl { cl := ln.Class() if cl == ir.PAUTO || cl == ir.PAUTOHEAP { break } if cl == ir.PPARAMOUT { + var ln ir.Node = ln if isParamStackCopy(ln) { ln = walkexpr(typecheck(ir.Nod(ir.ODEREF, ln.Name().Heapaddr, nil), ctxExpr), nil) } @@ -800,8 +801,8 @@ opswitch: fromType := n.Left().Type() toType := n.Type() - if !fromType.IsInterface() && !ir.IsBlank(Curfn.Func().Nname) { // skip unnamed functions (func _()) - markTypeUsedInInterface(fromType, Curfn.Func().LSym) + if !fromType.IsInterface() && !ir.IsBlank(Curfn.Nname) { // skip unnamed functions (func _()) + markTypeUsedInInterface(fromType, Curfn.LSym) } // typeword generates the type word of the interface value. @@ -1625,7 +1626,7 @@ func markTypeUsedInInterface(t *types.Type, from *obj.LSym) { func markUsedIfaceMethod(n ir.Node) { ityp := n.Left().Left().Type() tsym := typenamesym(ityp).Linksym() - r := obj.Addrel(Curfn.Func().LSym) + r := obj.Addrel(Curfn.LSym) r.Sym = tsym // n.Left.Xoffset is the method index * Widthptr (the offset of code pointer // in itab). @@ -2448,7 +2449,7 @@ func zeroResults() { v = v.Name().Stackcopy } // Zero the stack location containing f. - Curfn.Func().Enter.Append(ir.NodAt(Curfn.Pos(), ir.OAS, v, nil)) + Curfn.Enter.Append(ir.NodAt(Curfn.Pos(), ir.OAS, v, nil)) } } @@ -2478,9 +2479,9 @@ func heapmoves() { nn := paramstoheap(Curfn.Type().Recvs()) nn = append(nn, paramstoheap(Curfn.Type().Params())...) nn = append(nn, paramstoheap(Curfn.Type().Results())...) - Curfn.Func().Enter.Append(nn...) - base.Pos = Curfn.Func().Endlineno - Curfn.Func().Exit.Append(returnsfromheap(Curfn.Type().Results())...) + Curfn.Enter.Append(nn...) + base.Pos = Curfn.Endlineno + Curfn.Exit.Append(returnsfromheap(Curfn.Type().Results())...) base.Pos = lno } @@ -2781,7 +2782,7 @@ func appendslice(n ir.Node, init *ir.Nodes) ir.Node { nptr2 := l2 - Curfn.Func().SetWBPos(n.Pos()) + Curfn.SetWBPos(n.Pos()) // instantiate typedslicecopy(typ *type, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int fn := syslook("typedslicecopy") @@ -2966,7 +2967,7 @@ func extendslice(n ir.Node, init *ir.Nodes) ir.Node { hasPointers := elemtype.HasPointers() if hasPointers { clrname = "memclrHasPointers" - Curfn.Func().SetWBPos(n.Pos()) + Curfn.SetWBPos(n.Pos()) } var clr ir.Nodes @@ -3100,7 +3101,7 @@ func walkappend(n ir.Node, init *ir.Nodes, dst ir.Node) ir.Node { // func copyany(n ir.Node, init *ir.Nodes, runtimecall bool) ir.Node { if n.Left().Type().Elem().HasPointers() { - Curfn.Func().SetWBPos(n.Pos()) + Curfn.SetWBPos(n.Pos()) fn := writebarrierfn("typedslicecopy", n.Left().Type().Elem(), n.Right().Type().Elem()) n.SetLeft(cheapexpr(n.Left(), init)) ptrL, lenL := backingArrayPtrLen(n.Left()) @@ -3714,9 +3715,9 @@ func usemethod(n ir.Node) { // (including global variables such as numImports - was issue #19028). // Also need to check for reflect package itself (see Issue #38515). if s := res0.Type.Sym; s != nil && s.Name == "Method" && isReflectPkg(s.Pkg) { - Curfn.Func().SetReflectMethod(true) + Curfn.SetReflectMethod(true) // The LSym is initialized at this point. We need to set the attribute on the LSym. - Curfn.Func().LSym.Set(obj.AttrReflectMethod, true) + Curfn.LSym.Set(obj.AttrReflectMethod, true) } } @@ -3765,10 +3766,10 @@ func usefield(n ir.Node) { } sym := tracksym(outer, field) - if Curfn.Func().FieldTrack == nil { - Curfn.Func().FieldTrack = make(map[*types.Sym]struct{}) + if Curfn.FieldTrack == nil { + Curfn.FieldTrack = make(map[*types.Sym]struct{}) } - Curfn.Func().FieldTrack[sym] = struct{}{} + Curfn.FieldTrack[sym] = struct{}{} } func candiscardlist(l ir.Nodes) bool { @@ -3948,12 +3949,12 @@ func wrapCall(n ir.Node, init *ir.Nodes) ir.Node { funcbody() - fn = typecheck(fn, ctxStmt) + typecheckFunc(fn) typecheckslice(fn.Body().Slice(), ctxStmt) xtop = append(xtop, fn) call = ir.Nod(ir.OCALL, nil, nil) - call.SetLeft(fn.Func().Nname) + call.SetLeft(fn.Nname) call.PtrList().Set(n.List().Slice()) call = typecheck(call, ctxStmt) call = walkexpr(call, init) @@ -4091,6 +4092,6 @@ func walkCheckPtrArithmetic(n ir.Node, init *ir.Nodes) ir.Node { // checkPtr reports whether pointer checking should be enabled for // function fn at a given level. See debugHelpFooter for defined // levels. -func checkPtr(fn ir.Node, level int) bool { - return base.Debug.Checkptr >= level && fn.Func().Pragma&ir.NoCheckPtr == 0 +func checkPtr(fn *ir.Func, level int) bool { + return base.Debug.Checkptr >= level && fn.Pragma&ir.NoCheckPtr == 0 } diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index 3822c4c73bd77..a3999b6da0380 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -1338,7 +1338,7 @@ func exprFmt(n Node, s fmt.State, prec int, mode FmtMode) { mode.Fprintf(s, "%v { %v }", n.Type(), n.Body()) return } - mode.Fprintf(s, "%v { %v }", n.Type(), n.Func().Decl.Body()) + mode.Fprintf(s, "%v { %v }", n.Type(), n.Func().Body()) case OCOMPLIT: if mode == FErr { @@ -1638,7 +1638,7 @@ func nodeDumpFmt(n Node, s fmt.State, flag FmtFlag, mode FmtMode) { } } - if n.Op() == OCLOSURE && n.Func().Decl != nil && n.Func().Nname.Sym() != nil { + if n.Op() == OCLOSURE && n.Func() != nil && n.Func().Nname.Sym() != nil { mode.Fprintf(s, " fnName %v", n.Func().Nname.Sym()) } if n.Sym() != nil && n.Op() != ONAME { @@ -1656,15 +1656,15 @@ func nodeDumpFmt(n Node, s fmt.State, flag FmtFlag, mode FmtMode) { if n.Right() != nil { mode.Fprintf(s, "%v", n.Right()) } - if n.Op() == OCLOSURE && n.Func() != nil && n.Func().Decl != nil && n.Func().Decl.Body().Len() != 0 { + if n.Op() == OCLOSURE && n.Func() != nil && n.Func().Body().Len() != 0 { indent(s) // The function associated with a closure - mode.Fprintf(s, "%v-clofunc%v", n.Op(), n.Func().Decl) + mode.Fprintf(s, "%v-clofunc%v", n.Op(), n.Func()) } if n.Op() == ODCLFUNC && n.Func() != nil && n.Func().Dcl != nil && len(n.Func().Dcl) != 0 { indent(s) // The dcls for a func or closure - mode.Fprintf(s, "%v-dcl%v", n.Op(), AsNodes(n.Func().Dcl)) + mode.Fprintf(s, "%v-dcl%v", n.Op(), asNameNodes(n.Func().Dcl)) } if n.List().Len() != 0 { indent(s) @@ -1683,6 +1683,16 @@ func nodeDumpFmt(n Node, s fmt.State, flag FmtFlag, mode FmtMode) { } } +// asNameNodes copies list to a new Nodes. +// It should only be called in debug formatting and other low-performance contexts. +func asNameNodes(list []*Name) Nodes { + var ns Nodes + for _, n := range list { + ns.Append(n) + } + return ns +} + // "%S" suppresses qualifying with package func symFormat(s *types.Sym, f fmt.State, verb rune, mode FmtMode) { switch verb { diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index 57ec0707e9fc4..92a24c838569f 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -53,9 +53,8 @@ type Func struct { body Nodes iota int64 - Nname Node // ONAME node - Decl Node // ODCLFUNC node - OClosure Node // OCLOSURE node + Nname *Name // ONAME node + OClosure Node // OCLOSURE node Shortname *types.Sym @@ -65,12 +64,11 @@ type Func struct { Exit Nodes // ONAME nodes for all params/locals for this func/closure, does NOT // include closurevars until transformclosure runs. - Dcl []Node + Dcl []*Name - ClosureEnter Nodes // list of ONAME nodes of captured variables - ClosureType Node // closure representation type - ClosureCalled bool // closure is only immediately called - ClosureVars Nodes // closure params; each has closurevar set + ClosureEnter Nodes // list of ONAME nodes (or OADDR-of-ONAME nodes, for output parameters) of captured variables + ClosureType Node // closure representation type + ClosureVars []*Name // closure params; each has closurevar set // Parents records the parent scope of each scope within a // function. The root scope (0) has no parent, so the i'th @@ -80,17 +78,17 @@ type Func struct { // Marks records scope boundary changes. Marks []Mark - // Closgen tracks how many closures have been generated within - // this function. Used by closurename for creating unique - // function names. - Closgen int - FieldTrack map[*types.Sym]struct{} DebugInfo interface{} LSym *obj.LSym Inl *Inline + // Closgen tracks how many closures have been generated within + // this function. Used by closurename for creating unique + // function names. + Closgen int32 + Label int32 // largest auto-generated label in this function Endlineno src.XPos @@ -99,8 +97,8 @@ type Func struct { Pragma PragmaFlag // go:xxx function annotations flags bitset16 - NumDefers int // number of defer calls in the function - NumReturns int // number of explicit returns in the function + NumDefers int32 // number of defer calls in the function + NumReturns int32 // number of explicit returns in the function // nwbrCalls records the LSyms of functions called by this // function for go:nowritebarrierrec analysis. Only filled in @@ -112,7 +110,6 @@ func NewFunc(pos src.XPos) *Func { f := new(Func) f.pos = pos f.op = ODCLFUNC - f.Decl = f f.iota = -1 return f } @@ -141,7 +138,7 @@ type Inline struct { Cost int32 // heuristic cost of inlining this function // Copies of Func.Dcl and Nbody for use during inlining. - Dcl []Node + Dcl []*Name Body []Node } @@ -172,6 +169,7 @@ const ( funcExportInline // include inline body in export data funcInstrumentBody // add race/msan instrumentation during SSA construction funcOpenCodedDeferDisallowed // can't do open-coded defers + funcClosureCalled // closure is only immediately called ) type SymAndPos struct { @@ -190,6 +188,7 @@ func (f *Func) InlinabilityChecked() bool { return f.flags&funcInlinability func (f *Func) ExportInline() bool { return f.flags&funcExportInline != 0 } func (f *Func) InstrumentBody() bool { return f.flags&funcInstrumentBody != 0 } func (f *Func) OpenCodedDeferDisallowed() bool { return f.flags&funcOpenCodedDeferDisallowed != 0 } +func (f *Func) ClosureCalled() bool { return f.flags&funcClosureCalled != 0 } func (f *Func) SetDupok(b bool) { f.flags.set(funcDupok, b) } func (f *Func) SetWrapper(b bool) { f.flags.set(funcWrapper, b) } @@ -202,6 +201,7 @@ func (f *Func) SetInlinabilityChecked(b bool) { f.flags.set(funcInlinabilit func (f *Func) SetExportInline(b bool) { f.flags.set(funcExportInline, b) } func (f *Func) SetInstrumentBody(b bool) { f.flags.set(funcInstrumentBody, b) } func (f *Func) SetOpenCodedDeferDisallowed(b bool) { f.flags.set(funcOpenCodedDeferDisallowed, b) } +func (f *Func) SetClosureCalled(b bool) { f.flags.set(funcClosureCalled, b) } func (f *Func) SetWBPos(pos src.XPos) { if base.Debug.WB != 0 { diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index d330745cfbb18..5546488fa7f16 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -32,9 +32,10 @@ type Name struct { // For a local variable (not param) or extern, the initializing assignment (OAS or OAS2). // For a closure var, the ONAME node of the outer captured variable Defn Node - // The ODCLFUNC node (for a static function/method or a closure) in which - // local variable or param is declared. - Curfn Node + + // The function, method, or closure in which local variable or param is declared. + Curfn *Func + // Unique number for ONAME nodes within a function. Function outputs // (results) are numbered starting at one, followed by function inputs // (parameters), and then local variables. Vargen is used to distinguish diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go index 8597ad492ad32..9321f765e0e28 100644 --- a/src/cmd/compile/internal/ir/sizeof_test.go +++ b/src/cmd/compile/internal/ir/sizeof_test.go @@ -20,8 +20,8 @@ func TestSizeof(t *testing.T) { _32bit uintptr // size on 32bit platforms _64bit uintptr // size on 64bit platforms }{ - {Func{}, 180, 320}, - {Name{}, 132, 232}, + {Func{}, 172, 296}, + {Name{}, 128, 224}, {node{}, 84, 144}, } From 4eaef981b5b5bac873256d63ffecaaa73fb5f28b Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 26 Nov 2020 00:47:44 -0500 Subject: [PATCH 060/474] [dev.regabi] cmd/compile: add ir.Closure, ir.ClosureRead Closures are another reference to Funcs, and it cleans up the code quite a bit to be clear about types. OCLOSUREVAR is renamed to OCLOSUREREAD to make clearer that it is unrelated to the list Func.ClosureVars. Passes buildall w/ toolstash -cmp. Change-Id: Id0d28df2d4d6e9954e34df7a39ea226995eee937 Reviewed-on: https://go-review.googlesource.com/c/go/+/274098 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/closure.go | 32 ++++++++---------- src/cmd/compile/internal/gc/escape.go | 4 +-- src/cmd/compile/internal/gc/inl.go | 4 +-- src/cmd/compile/internal/gc/ssa.go | 4 +-- src/cmd/compile/internal/gc/typecheck.go | 6 ++-- src/cmd/compile/internal/gc/walk.go | 2 +- src/cmd/compile/internal/ir/expr.go | 39 ++++++++++++++++++++++ src/cmd/compile/internal/ir/func.go | 4 +-- src/cmd/compile/internal/ir/node.go | 30 ++++++++--------- src/cmd/compile/internal/ir/op_string.go | 6 ++-- src/cmd/compile/internal/ir/sizeof_test.go | 2 +- 11 files changed, 82 insertions(+), 51 deletions(-) diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index 0cf59ee0ebc4e..e8a0617be3c8c 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -23,8 +23,7 @@ func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node { fn.Nname.Ntype = xtype fn.Nname.Defn = fn - clo := p.nod(expr, ir.OCLOSURE, nil, nil) - clo.SetFunc(fn) + clo := ir.NewClosureExpr(p.pos(expr), fn) fn.ClosureType = ntype fn.OClosure = clo @@ -285,21 +284,19 @@ func transformclosure(fn *ir.Func) { offset := int64(Widthptr) for _, v := range fn.ClosureVars { // cv refers to the field inside of closure OSTRUCTLIT. - cv := ir.Nod(ir.OCLOSUREVAR, nil, nil) - - cv.SetType(v.Type()) + typ := v.Type() if !v.Byval() { - cv.SetType(types.NewPtr(v.Type())) + typ = types.NewPtr(typ) } - offset = Rnd(offset, int64(cv.Type().Align)) - cv.SetOffset(offset) - offset += cv.Type().Width + offset = Rnd(offset, int64(typ.Align)) + cr := ir.NewClosureRead(typ, offset) + offset += typ.Width if v.Byval() && v.Type().Width <= int64(2*Widthptr) { // If it is a small variable captured by value, downgrade it to PAUTO. v.SetClass(ir.PAUTO) fn.Dcl = append(fn.Dcl, v) - body = append(body, ir.Nod(ir.OAS, v, cv)) + body = append(body, ir.Nod(ir.OAS, v, cr)) } else { // Declare variable holding addresses taken from closure // and initialize in entry prologue. @@ -310,10 +307,11 @@ func transformclosure(fn *ir.Func) { addr.Curfn = fn fn.Dcl = append(fn.Dcl, addr) v.Heapaddr = addr + var src ir.Node = cr if v.Byval() { - cv = ir.Nod(ir.OADDR, cv, nil) + src = ir.Nod(ir.OADDR, cr, nil) } - body = append(body, ir.Nod(ir.OAS, addr, cv)) + body = append(body, ir.Nod(ir.OAS, addr, src)) } } @@ -473,21 +471,17 @@ func makepartialcall(dot ir.Node, t0 *types.Type, meth *types.Sym) *ir.Func { tfn.Type().SetPkg(t0.Pkg()) // Declare and initialize variable holding receiver. - - cv := ir.Nod(ir.OCLOSUREVAR, nil, nil) - cv.SetType(rcvrtype) - cv.SetOffset(Rnd(int64(Widthptr), int64(cv.Type().Align))) - + cr := ir.NewClosureRead(rcvrtype, Rnd(int64(Widthptr), int64(rcvrtype.Align))) ptr := NewName(lookup(".this")) declare(ptr, ir.PAUTO) ptr.SetUsed(true) var body []ir.Node if rcvrtype.IsPtr() || rcvrtype.IsInterface() { ptr.SetType(rcvrtype) - body = append(body, ir.Nod(ir.OAS, ptr, cv)) + body = append(body, ir.Nod(ir.OAS, ptr, cr)) } else { ptr.SetType(types.NewPtr(rcvrtype)) - body = append(body, ir.Nod(ir.OAS, ptr, ir.Nod(ir.OADDR, cv, nil))) + body = append(body, ir.Nod(ir.OAS, ptr, ir.Nod(ir.OADDR, cr, nil))) } call := ir.Nod(ir.OCALL, nodSym(ir.OXDOT, ptr, meth), nil) diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index 4bddb7f0f46c4..4cbc5d3851840 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -486,7 +486,7 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { default: base.Fatalf("unexpected expr: %v", n) - case ir.OLITERAL, ir.ONIL, ir.OGETG, ir.OCLOSUREVAR, ir.OTYPE, ir.OMETHEXPR: + case ir.OLITERAL, ir.ONIL, ir.OGETG, ir.OCLOSUREREAD, ir.OTYPE, ir.OMETHEXPR: // nop case ir.ONAME: @@ -1718,7 +1718,7 @@ func mayAffectMemory(n ir.Node) bool { // We're ignoring things like division by zero, index out of range, // and nil pointer dereference here. switch n.Op() { - case ir.ONAME, ir.OCLOSUREVAR, ir.OLITERAL, ir.ONIL: + case ir.ONAME, ir.OCLOSUREREAD, ir.OLITERAL, ir.ONIL: return false // Left+Right group. diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 20f145b8eba82..97f37a4716b2b 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -963,7 +963,7 @@ func mkinlcall(n ir.Node, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool) // Handle captured variables when inlining closures. if c := fn.OClosure; c != nil { - for _, v := range c.Func().ClosureVars { + for _, v := range fn.ClosureVars { if v.Op() == ir.OXXX { continue } @@ -973,7 +973,7 @@ func mkinlcall(n ir.Node, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool) // NB: if we enabled inlining of functions containing OCLOSURE or refined // the reassigned check via some sort of copy propagation this would most // likely need to be changed to a loop to walk up to the correct Param - if o == nil || (o.Curfn != Curfn && o.Curfn.OClosure != Curfn) { + if o == nil || o.Curfn != Curfn { base.Fatalf("%v: unresolvable capture %v %v\n", ir.Line(n), fn, v) } diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 91faf18a1d4a5..10df6d5411bf2 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -2025,7 +2025,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { } addr := s.addr(n) return s.load(n.Type(), addr) - case ir.OCLOSUREVAR: + case ir.OCLOSUREREAD: addr := s.addr(n) return s.load(n.Type(), addr) case ir.ONIL: @@ -4895,7 +4895,7 @@ func (s *state) addr(n ir.Node) *ssa.Value { case ir.ODOTPTR: p := s.exprPtr(n.Left(), n.Bounded(), n.Pos()) return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p) - case ir.OCLOSUREVAR: + case ir.OCLOSUREREAD: return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr)) case ir.OCONVNOP: diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 7d19a2b58e91f..8c2df77ffef92 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -1948,7 +1948,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetType(types.NewPtr(t.Elem())) } - case ir.OCLOSUREVAR: + case ir.OCLOSUREREAD: ok |= ctxExpr case ir.OCFUNC: @@ -3099,7 +3099,7 @@ func islvalue(n ir.Node) bool { return false } fallthrough - case ir.ODEREF, ir.ODOTPTR, ir.OCLOSUREVAR: + case ir.ODEREF, ir.ODOTPTR, ir.OCLOSUREREAD: return true case ir.ODOT: @@ -3186,7 +3186,7 @@ func samesafeexpr(l ir.Node, r ir.Node) bool { } switch l.Op() { - case ir.ONAME, ir.OCLOSUREVAR: + case ir.ONAME, ir.OCLOSUREREAD: return l == r case ir.ODOT, ir.ODOTPTR: diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index d749dff827431..e0e715716b1f5 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -555,7 +555,7 @@ opswitch: case ir.ORECOVER: n = mkcall("gorecover", n.Type(), init, ir.Nod(ir.OADDR, nodfp, nil)) - case ir.OCLOSUREVAR, ir.OCFUNC: + case ir.OCLOSUREREAD, ir.OCFUNC: case ir.OCALLINTER, ir.OCALLFUNC, ir.OCALLMETH: if n.Op() == ir.OCALLINTER { diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 418351742e76a..13774a2c7b915 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -6,6 +6,8 @@ package ir import ( "cmd/compile/internal/types" + "cmd/internal/src" + "fmt" ) // A miniStmt is a miniNode with extra fields common to expressions. @@ -45,3 +47,40 @@ func (n *miniExpr) SetBounded(b bool) { n.flags.set(miniExprBounded, b) } func (n *miniExpr) Init() Nodes { return n.init } func (n *miniExpr) PtrInit() *Nodes { return &n.init } func (n *miniExpr) SetInit(x Nodes) { n.init = x } + +// A ClosureExpr is a function literal expression. +type ClosureExpr struct { + miniExpr + fn *Func +} + +func NewClosureExpr(pos src.XPos, fn *Func) *ClosureExpr { + n := &ClosureExpr{fn: fn} + n.op = OCLOSURE + n.pos = pos + return n +} + +func (n *ClosureExpr) String() string { return fmt.Sprint(n) } +func (n *ClosureExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ClosureExpr) RawCopy() Node { c := *n; return &c } +func (n *ClosureExpr) Func() *Func { return n.fn } + +// A ClosureRead denotes reading a variable stored within a closure struct. +type ClosureRead struct { + miniExpr + offset int64 +} + +func NewClosureRead(typ *types.Type, offset int64) *ClosureRead { + n := &ClosureRead{offset: offset} + n.typ = typ + n.op = OCLOSUREREAD + return n +} + +func (n *ClosureRead) String() string { return fmt.Sprint(n) } +func (n *ClosureRead) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ClosureRead) RawCopy() Node { c := *n; return &c } +func (n *ClosureRead) Type() *types.Type { return n.typ } +func (n *ClosureRead) Offset() int64 { return n.offset } diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index 92a24c838569f..9d2a8ad94bdfb 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -53,8 +53,8 @@ type Func struct { body Nodes iota int64 - Nname *Name // ONAME node - OClosure Node // OCLOSURE node + Nname *Name // ONAME node + OClosure *ClosureExpr // OCLOSURE node Shortname *types.Sym diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 02a5d7769a4c3..8e10569f6a63e 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -601,20 +601,20 @@ const ( OTARRAY // []int, [8]int, [N]int or [...]int // misc - ODDD // func f(args ...int) or f(l...) or var a = [...]int{0, 1, 2}. - OINLCALL // intermediary representation of an inlined call. - OEFACE // itable and data words of an empty-interface value. - OITAB // itable word of an interface value. - OIDATA // data word of an interface value in Left - OSPTR // base pointer of a slice or string. - OCLOSUREVAR // variable reference at beginning of closure function - OCFUNC // reference to c function pointer (not go func value) - OCHECKNIL // emit code to ensure pointer/interface not nil - OVARDEF // variable is about to be fully initialized - OVARKILL // variable is dead - OVARLIVE // variable is alive - ORESULT // result of a function call; Xoffset is stack offset - OINLMARK // start of an inlined body, with file/line of caller. Xoffset is an index into the inline tree. + ODDD // func f(args ...int) or f(l...) or var a = [...]int{0, 1, 2}. + OINLCALL // intermediary representation of an inlined call. + OEFACE // itable and data words of an empty-interface value. + OITAB // itable word of an interface value. + OIDATA // data word of an interface value in Left + OSPTR // base pointer of a slice or string. + OCLOSUREREAD // read from inside closure struct at beginning of closure function + OCFUNC // reference to c function pointer (not go func value) + OCHECKNIL // emit code to ensure pointer/interface not nil + OVARDEF // variable is about to be fully initialized + OVARKILL // variable is dead + OVARLIVE // variable is alive + ORESULT // result of a function call; Xoffset is stack offset + OINLMARK // start of an inlined body, with file/line of caller. Xoffset is an index into the inline tree. // arch-specific opcodes ORETJMP // return to other function @@ -1162,8 +1162,6 @@ var okForNod = [OEND]bool{ OCFUNC: true, OCHECKNIL: true, OCLOSE: true, - OCLOSURE: true, - OCLOSUREVAR: true, OCOMPLEX: true, OCOMPLIT: true, OCONV: true, diff --git a/src/cmd/compile/internal/ir/op_string.go b/src/cmd/compile/internal/ir/op_string.go index d0d3778357e67..637c924dd550e 100644 --- a/src/cmd/compile/internal/ir/op_string.go +++ b/src/cmd/compile/internal/ir/op_string.go @@ -152,7 +152,7 @@ func _() { _ = x[OITAB-141] _ = x[OIDATA-142] _ = x[OSPTR-143] - _ = x[OCLOSUREVAR-144] + _ = x[OCLOSUREREAD-144] _ = x[OCFUNC-145] _ = x[OCHECKNIL-146] _ = x[OVARDEF-147] @@ -165,9 +165,9 @@ func _() { _ = x[OEND-154] } -const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLFIELDDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRBLOCKBREAKCASECONTINUEDEFEREMPTYFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYDDDINLCALLEFACEITABIDATASPTRCLOSUREVARCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKRETJMPGETGEND" +const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLFIELDDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRBLOCKBREAKCASECONTINUEDEFEREMPTYFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYDDDINLCALLEFACEITABIDATASPTRCLOSUREREADCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKRETJMPGETGEND" -var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 310, 317, 323, 326, 332, 339, 347, 351, 358, 366, 368, 370, 372, 374, 376, 378, 383, 388, 396, 399, 408, 411, 415, 423, 430, 439, 452, 455, 458, 461, 464, 467, 470, 476, 479, 485, 488, 494, 498, 501, 505, 510, 515, 521, 526, 530, 535, 543, 551, 557, 566, 577, 584, 588, 595, 602, 610, 614, 618, 622, 629, 636, 644, 650, 658, 663, 668, 672, 680, 685, 690, 694, 697, 705, 709, 711, 716, 718, 723, 729, 735, 741, 747, 752, 756, 763, 769, 774, 780, 783, 790, 795, 799, 804, 808, 818, 823, 831, 837, 844, 851, 857, 864, 870, 874, 877} +var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 310, 317, 323, 326, 332, 339, 347, 351, 358, 366, 368, 370, 372, 374, 376, 378, 383, 388, 396, 399, 408, 411, 415, 423, 430, 439, 452, 455, 458, 461, 464, 467, 470, 476, 479, 485, 488, 494, 498, 501, 505, 510, 515, 521, 526, 530, 535, 543, 551, 557, 566, 577, 584, 588, 595, 602, 610, 614, 618, 622, 629, 636, 644, 650, 658, 663, 668, 672, 680, 685, 690, 694, 697, 705, 709, 711, 716, 718, 723, 729, 735, 741, 747, 752, 756, 763, 769, 774, 780, 783, 790, 795, 799, 804, 808, 819, 824, 832, 838, 845, 852, 858, 865, 871, 875, 878} func (i Op) String() string { if i >= Op(len(_Op_index)-1) { diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go index 9321f765e0e28..0859022a62a9b 100644 --- a/src/cmd/compile/internal/ir/sizeof_test.go +++ b/src/cmd/compile/internal/ir/sizeof_test.go @@ -20,7 +20,7 @@ func TestSizeof(t *testing.T) { _32bit uintptr // size on 32bit platforms _64bit uintptr // size on 64bit platforms }{ - {Func{}, 172, 296}, + {Func{}, 168, 288}, {Name{}, 128, 224}, {node{}, 84, 144}, } From e5c6463e205e0dfe5df8af59c76fd1ee94feddd4 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 26 Nov 2020 01:05:39 -0500 Subject: [PATCH 061/474] [dev.regabi] cmd/compile: add ir.CallPartExpr Now there are no longer any generic nodes with a non-nil associated Func, so node.fn can be deleted. Also all manipulation of func fields is done with concrete types, so Node.SetFunc can be deleted, along with generic implementations. Passes buildall w/ toolstash -cmp. Change-Id: I4fee99870951ec9dc224f146d87b22e2bfe16889 Reviewed-on: https://go-review.googlesource.com/c/go/+/274099 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/closure.go | 10 +++------ src/cmd/compile/internal/gc/dcl.go | 2 +- src/cmd/compile/internal/gc/ssa.go | 8 +++++-- src/cmd/compile/internal/gc/typecheck.go | 2 +- src/cmd/compile/internal/gc/walk.go | 2 +- src/cmd/compile/internal/ir/expr.go | 26 ++++++++++++++++++++++ src/cmd/compile/internal/ir/mini.go | 1 - src/cmd/compile/internal/ir/node.go | 8 +------ src/cmd/compile/internal/ir/sizeof_test.go | 2 +- 9 files changed, 40 insertions(+), 21 deletions(-) diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index e8a0617be3c8c..58113977d5233 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -414,7 +414,7 @@ func walkclosure(clo ir.Node, init *ir.Nodes) ir.Node { return walkexpr(clos, init) } -func typecheckpartialcall(dot ir.Node, sym *types.Sym) { +func typecheckpartialcall(dot ir.Node, sym *types.Sym) *ir.CallPartExpr { switch dot.Op() { case ir.ODOTINTER, ir.ODOTMETH: break @@ -427,11 +427,7 @@ func typecheckpartialcall(dot ir.Node, sym *types.Sym) { fn := makepartialcall(dot, dot.Type(), sym) fn.SetWrapper(true) - dot.SetOp(ir.OCALLPART) - dot.SetRight(NewName(sym)) - dot.SetType(fn.Type()) - dot.SetFunc(fn) - dot.SetOpt(nil) // clear types.Field from ODOTMETH + return ir.NewCallPartExpr(dot.Pos(), dot.Left(), NewName(sym), fn) } // makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed @@ -522,7 +518,7 @@ func partialCallType(n ir.Node) *types.Type { return t } -func walkpartialcall(n ir.Node, init *ir.Nodes) ir.Node { +func walkpartialcall(n *ir.CallPartExpr, init *ir.Nodes) ir.Node { // Create closure in the form of a composite literal. // For x.M with receiver (x) type T, the generated code looks like: // diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 2bcee269d9f1d..5d1bde384a64d 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -795,7 +795,7 @@ func methodSymSuffix(recv *types.Type, msym *types.Sym, suffix string) *types.Sy // - msym is the method symbol // - t is function type (with receiver) // Returns a pointer to the existing or added Field; or nil if there's an error. -func addmethod(n ir.Node, msym *types.Sym, t *types.Type, local, nointerface bool) *types.Field { +func addmethod(n *ir.Func, msym *types.Sym, t *types.Type, local, nointerface bool) *types.Field { if msym == nil { base.Fatalf("no method symbol") } diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 10df6d5411bf2..6d818be1322ef 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -4146,10 +4146,14 @@ func findIntrinsic(sym *types.Sym) intrinsicBuilder { } func isIntrinsicCall(n ir.Node) bool { - if n == nil || n.Left() == nil { + if n == nil { return false } - return findIntrinsic(n.Left().Sym()) != nil + name, ok := n.Left().(*ir.Name) + if !ok { + return false + } + return findIntrinsic(name.Sym()) != nil } // intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation. diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 8c2df77ffef92..0ed5009a22c0a 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -964,7 +964,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { if top&ctxCallee != 0 { ok |= ctxCallee } else { - typecheckpartialcall(n, s) + n = typecheckpartialcall(n, s) ok |= ctxExpr } diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index e0e715716b1f5..e04413841a6e5 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -1578,7 +1578,7 @@ opswitch: n = walkclosure(n, init) case ir.OCALLPART: - n = walkpartialcall(n, init) + n = walkpartialcall(n.(*ir.CallPartExpr), init) } // Expressions that are constant at run time but not diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 13774a2c7b915..2c1391859972e 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -84,3 +84,29 @@ func (n *ClosureRead) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *ClosureRead) RawCopy() Node { c := *n; return &c } func (n *ClosureRead) Type() *types.Type { return n.typ } func (n *ClosureRead) Offset() int64 { return n.offset } + +// A CallPartExpr is a method expression X.Method (uncalled). +type CallPartExpr struct { + miniExpr + fn *Func + X Node + Method *Name +} + +func NewCallPartExpr(pos src.XPos, x Node, method *Name, fn *Func) *CallPartExpr { + n := &CallPartExpr{fn: fn, X: x, Method: method} + n.op = OCALLPART + n.pos = pos + n.typ = fn.Type() + n.fn = fn + return n +} + +func (n *CallPartExpr) String() string { return fmt.Sprint(n) } +func (n *CallPartExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *CallPartExpr) RawCopy() Node { c := *n; return &c } +func (n *CallPartExpr) Func() *Func { return n.fn } +func (n *CallPartExpr) Left() Node { return n.X } +func (n *CallPartExpr) Right() Node { return n.Method } +func (n *CallPartExpr) SetLeft(x Node) { n.X = x } +func (n *CallPartExpr) SetRight(x Node) { n.Method = x.(*Name) } diff --git a/src/cmd/compile/internal/ir/mini.go b/src/cmd/compile/internal/ir/mini.go index 248fe232cb88d..338ded3308f32 100644 --- a/src/cmd/compile/internal/ir/mini.go +++ b/src/cmd/compile/internal/ir/mini.go @@ -128,7 +128,6 @@ func (n *miniNode) SetSubOp(Op) { panic(n.no("SetSubOp")) } func (n *miniNode) Type() *types.Type { return nil } func (n *miniNode) SetType(*types.Type) { panic(n.no("SetType")) } func (n *miniNode) Func() *Func { return nil } -func (n *miniNode) SetFunc(*Func) { panic(n.no("SetFunc")) } func (n *miniNode) Name() *Name { return nil } func (n *miniNode) Sym() *types.Sym { return nil } func (n *miniNode) SetSym(*types.Sym) { panic(n.no("SetSym")) } diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 8e10569f6a63e..f09727c36984b 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -56,7 +56,6 @@ type Node interface { Type() *types.Type SetType(t *types.Type) Func() *Func - SetFunc(x *Func) Name() *Name Sym() *types.Sym SetSym(x *types.Sym) @@ -143,9 +142,6 @@ type node struct { typ *types.Type orig Node // original form, for printing, and tracking copies of ONAMEs - // func - fn *Func - sym *types.Sym // various opt interface{} @@ -177,8 +173,7 @@ func (n *node) Orig() Node { return n.orig } func (n *node) SetOrig(x Node) { n.orig = x } func (n *node) Type() *types.Type { return n.typ } func (n *node) SetType(x *types.Type) { n.typ = x } -func (n *node) Func() *Func { return n.fn } -func (n *node) SetFunc(x *Func) { n.fn = x } +func (n *node) Func() *Func { return nil } func (n *node) Name() *Name { return nil } func (n *node) Sym() *types.Sym { return n.sym } func (n *node) SetSym(x *types.Sym) { n.sym = x } @@ -1156,7 +1151,6 @@ var okForNod = [OEND]bool{ OCALLFUNC: true, OCALLINTER: true, OCALLMETH: true, - OCALLPART: true, OCAP: true, OCASE: true, OCFUNC: true, diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go index 0859022a62a9b..2f31ba8d34dd3 100644 --- a/src/cmd/compile/internal/ir/sizeof_test.go +++ b/src/cmd/compile/internal/ir/sizeof_test.go @@ -22,7 +22,7 @@ func TestSizeof(t *testing.T) { }{ {Func{}, 168, 288}, {Name{}, 128, 224}, - {node{}, 84, 144}, + {node{}, 80, 136}, } for _, tt := range tests { From 1b84aabb01770ae65d28f951c65a9eb6c16441d7 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sat, 28 Nov 2020 09:07:48 -0500 Subject: [PATCH 062/474] [dev.regabi] cmd/compile: move typenod, typenodl to ir.TypeNode, ir.TypeNodeAt [generated] [git-generate] cd src/cmd/compile/internal/gc rf ' mv typenod TypeNode mv typenodl TypeNodeAt mv TypeNode TypeNodeAt type.go mv type.go cmd/compile/internal/ir ' Passes buildall w/ toolstash -cmp. Change-Id: Id546a8cfae93074ebb1496490da7635800807faf Reviewed-on: https://go-review.googlesource.com/c/go/+/274100 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/closure.go | 4 ++-- src/cmd/compile/internal/gc/dcl.go | 17 --------------- src/cmd/compile/internal/gc/iexport.go | 2 +- src/cmd/compile/internal/gc/iimport.go | 10 ++++----- src/cmd/compile/internal/gc/inl.go | 2 +- src/cmd/compile/internal/gc/sinit.go | 4 ++-- src/cmd/compile/internal/gc/typecheck.go | 6 +++--- src/cmd/compile/internal/gc/universe.go | 12 +++++------ src/cmd/compile/internal/gc/walk.go | 4 ++-- src/cmd/compile/internal/ir/type.go | 27 ++++++++++++++++++++++++ 10 files changed, 49 insertions(+), 39 deletions(-) create mode 100644 src/cmd/compile/internal/ir/type.go diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index 58113977d5233..ee09e7876eed8 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -392,7 +392,7 @@ func walkclosure(clo ir.Node, init *ir.Nodes) ir.Node { typ := closureType(clo) - clos := ir.Nod(ir.OCOMPLIT, nil, typenod(typ)) + clos := ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(typ)) clos.SetEsc(clo.Esc()) clos.PtrList().Set(append([]ir.Node{ir.Nod(ir.OCFUNC, fn.Nname, nil)}, fn.ClosureEnter.Slice()...)) @@ -542,7 +542,7 @@ func walkpartialcall(n *ir.CallPartExpr, init *ir.Nodes) ir.Node { typ := partialCallType(n) - clos := ir.Nod(ir.OCOMPLIT, nil, typenod(typ)) + clos := ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(typ)) clos.SetEsc(n.Esc()) clos.PtrList().Set2(ir.Nod(ir.OCFUNC, n.Func().Nname, nil), n.Left()) diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 5d1bde384a64d..3d8f97d93dd51 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -221,23 +221,6 @@ func dclname(s *types.Sym) *ir.Name { return n } -func typenod(t *types.Type) ir.Node { - return typenodl(src.NoXPos, t) -} - -func typenodl(pos src.XPos, t *types.Type) ir.Node { - // if we copied another type with *t = *u - // then t->nod might be out of date, so - // check t->nod->type too - if ir.AsNode(t.Nod) == nil || ir.AsNode(t.Nod).Type() != t { - t.Nod = ir.NodAt(pos, ir.OTYPE, nil, nil) - ir.AsNode(t.Nod).SetType(t) - ir.AsNode(t.Nod).SetSym(t.Sym) - } - - return ir.AsNode(t.Nod) -} - func anonfield(typ *types.Type) ir.Node { return symfield(nil, typ) } diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index 3f5ec2e4ddbc1..3f0f381974ea3 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -640,7 +640,7 @@ func (w *exportWriter) doTyp(t *types.Type) { } w.startType(definedType) - w.qualifiedIdent(typenod(t)) + w.qualifiedIdent(ir.TypeNode(t)) return } diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 5a50682ab21c9..88f6e36e0735f 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -836,7 +836,7 @@ func (r *importReader) node() ir.Node { // unreachable - should have been resolved by typechecking case ir.OTYPE: - return typenod(r.typ()) + return ir.TypeNode(r.typ()) case ir.OTYPESW: n := ir.NodAt(r.pos(), ir.OTYPESW, nil, nil) @@ -860,7 +860,7 @@ func (r *importReader) node() ir.Node { // TODO(mdempsky): Export position information for OSTRUCTKEY nodes. savedlineno := base.Pos base.Pos = r.pos() - n := ir.NodAt(base.Pos, ir.OCOMPLIT, nil, typenod(r.typ())) + n := ir.NodAt(base.Pos, ir.OCOMPLIT, nil, ir.TypeNode(r.typ())) n.PtrList().Set(r.elemList()) // special handling of field names base.Pos = savedlineno return n @@ -869,7 +869,7 @@ func (r *importReader) node() ir.Node { // unreachable - mapped to case OCOMPLIT below by exporter case ir.OCOMPLIT: - n := ir.NodAt(r.pos(), ir.OCOMPLIT, nil, typenod(r.typ())) + n := ir.NodAt(r.pos(), ir.OCOMPLIT, nil, ir.TypeNode(r.typ())) n.PtrList().Set(r.exprList()) return n @@ -944,7 +944,7 @@ func (r *importReader) node() ir.Node { case ir.OMAKEMAP, ir.OMAKECHAN, ir.OMAKESLICE: n := npos(r.pos(), builtinCall(ir.OMAKE)) - n.PtrList().Append(typenod(r.typ())) + n.PtrList().Append(ir.TypeNode(r.typ())) n.PtrList().Append(r.exprList()...) return n @@ -971,7 +971,7 @@ func (r *importReader) node() ir.Node { case ir.ODCL: pos := r.pos() lhs := npos(pos, dclname(r.ident())) - typ := typenod(r.typ()) + typ := ir.TypeNode(r.typ()) return npos(pos, liststmt(variter([]ir.Node{lhs}, typ, nil))) // TODO(gri) avoid list creation // case ODCLFIELD: diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 97f37a4716b2b..bbbffebf5cec6 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -1108,7 +1108,7 @@ func mkinlcall(n ir.Node, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool) vas.SetRight(nodnil()) vas.Right().SetType(param.Type) } else { - vas.SetRight(ir.Nod(ir.OCOMPLIT, nil, typenod(param.Type))) + vas.SetRight(ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(param.Type))) vas.Right().PtrList().Set(varargs) } } diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index fca81763c0f0a..ff3d3281ddfa5 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -687,7 +687,7 @@ func slicelit(ctxt initContext, n ir.Node, var_ ir.Node, init *ir.Nodes) { a = ir.Nod(ir.OADDR, a, nil) } else { a = ir.Nod(ir.ONEW, nil, nil) - a.PtrList().Set1(typenod(t)) + a.PtrList().Set1(ir.TypeNode(t)) } a = ir.Nod(ir.OAS, vauto, a) @@ -763,7 +763,7 @@ func maplit(n ir.Node, m ir.Node, init *ir.Nodes) { // make the map var a := ir.Nod(ir.OMAKE, nil, nil) a.SetEsc(n.Esc()) - a.PtrList().Set2(typenod(n.Type()), nodintconst(int64(n.List().Len()))) + a.PtrList().Set2(ir.TypeNode(n.Type()), nodintconst(int64(n.List().Len()))) litas(m, a, init) entries := n.List().Slice() diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 0ed5009a22c0a..a1b18097906db 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -2785,11 +2785,11 @@ func pushtype(n ir.Node, t *types.Type) ir.Node { switch { case iscomptype(t): // For T, return T{...}. - n.SetRight(typenod(t)) + n.SetRight(ir.TypeNode(t)) case t.IsPtr() && iscomptype(t.Elem()): // For *T, return &T{...}. - n.SetRight(typenod(t.Elem())) + n.SetRight(ir.TypeNode(t.Elem())) n = ir.NodAt(n.Pos(), ir.OADDR, n, nil) n.SetImplicit(true) @@ -3458,7 +3458,7 @@ func stringtoruneslit(n ir.Node) ir.Node { i++ } - nn := ir.Nod(ir.OCOMPLIT, nil, typenod(n.Type())) + nn := ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(n.Type())) nn.PtrList().Set(l) nn = typecheck(nn, ctxExpr) return nn diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index 1068720748002..931135759ac84 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -109,7 +109,7 @@ func lexinit() { } types.Types[etype] = t } - s2.Def = typenod(t) + s2.Def = ir.TypeNode(t) } for _, s := range &builtinFuncs { @@ -176,7 +176,7 @@ func typeinit() { t := types.New(types.TUNSAFEPTR) types.Types[types.TUNSAFEPTR] = t t.Sym = unsafepkg.Lookup("Pointer") - t.Sym.Def = typenod(t) + t.Sym.Def = ir.TypeNode(t) dowidth(types.Types[types.TUNSAFEPTR]) for et := types.TINT8; et <= types.TUINT64; et++ { @@ -337,7 +337,7 @@ func lexinit1() { types.Errortype = makeErrorInterface() types.Errortype.Sym = s types.Errortype.Orig = makeErrorInterface() - s.Def = typenod(types.Errortype) + s.Def = ir.TypeNode(types.Errortype) dowidth(types.Errortype) // We create separate byte and rune types for better error messages @@ -352,14 +352,14 @@ func lexinit1() { s = ir.BuiltinPkg.Lookup("byte") types.Bytetype = types.New(types.TUINT8) types.Bytetype.Sym = s - s.Def = typenod(types.Bytetype) + s.Def = ir.TypeNode(types.Bytetype) dowidth(types.Bytetype) // rune alias s = ir.BuiltinPkg.Lookup("rune") types.Runetype = types.New(types.TINT32) types.Runetype.Sym = s - s.Def = typenod(types.Runetype) + s.Def = ir.TypeNode(types.Runetype) dowidth(types.Runetype) // backend-dependent builtin types (e.g. int). @@ -376,7 +376,7 @@ func lexinit1() { t := types.New(s.etype) t.Sym = s1 types.Types[s.etype] = t - s1.Def = typenod(t) + s1.Def = ir.TypeNode(t) s1.Origpkg = ir.BuiltinPkg dowidth(t) diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index e04413841a6e5..2376bfc093503 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -1810,7 +1810,7 @@ func mkdotargslice(typ *types.Type, args []ir.Node) ir.Node { n = nodnil() n.SetType(typ) } else { - n = ir.Nod(ir.OCOMPLIT, nil, typenod(typ)) + n = ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(typ)) n.PtrList().Append(args...) n.SetImplicit(true) } @@ -2687,7 +2687,7 @@ func addstr(n ir.Node, init *ir.Nodes) ir.Node { fn = "concatstrings" t := types.NewSlice(types.Types[types.TSTRING]) - slice := ir.Nod(ir.OCOMPLIT, nil, typenod(t)) + slice := ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(t)) if prealloc[n] != nil { prealloc[slice] = prealloc[n] } diff --git a/src/cmd/compile/internal/ir/type.go b/src/cmd/compile/internal/ir/type.go new file mode 100644 index 0000000000000..3409424fed2e8 --- /dev/null +++ b/src/cmd/compile/internal/ir/type.go @@ -0,0 +1,27 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +import ( + "cmd/compile/internal/types" + "cmd/internal/src" +) + +func TypeNode(t *types.Type) Node { + return TypeNodeAt(src.NoXPos, t) +} + +func TypeNodeAt(pos src.XPos, t *types.Type) Node { + // if we copied another type with *t = *u + // then t->nod might be out of date, so + // check t->nod->type too + if AsNode(t.Nod) == nil || AsNode(t.Nod).Type() != t { + t.Nod = NodAt(pos, OTYPE, nil, nil) + AsNode(t.Nod).SetType(t) + AsNode(t.Nod).SetSym(t.Sym) + } + + return AsNode(t.Nod) +} From f0001e8867be0004a8bc13eaea8b59653a5d141e Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sun, 29 Nov 2020 08:09:01 -0500 Subject: [PATCH 063/474] [dev.regabi] cmd/compile: add OTSLICE Op This is not safe for toolstash -cmp and so is split into its own CL. Change-Id: Ic3254e68fb84a90a11ac5f0b59ef252135c23658 Reviewed-on: https://go-review.googlesource.com/c/go/+/274101 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/node.go | 1 + src/cmd/compile/internal/ir/op_string.go | 39 ++++++++++++------------ 2 files changed, 21 insertions(+), 19 deletions(-) diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index f09727c36984b..47c38c2ab5dde 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -594,6 +594,7 @@ const ( // list of result fields. OTFUNC OTARRAY // []int, [8]int, [N]int or [...]int + OTSLICE // to be used in future CL // misc ODDD // func f(args ...int) or f(l...) or var a = [...]int{0, 1, 2}. diff --git a/src/cmd/compile/internal/ir/op_string.go b/src/cmd/compile/internal/ir/op_string.go index 637c924dd550e..faec164c7bfab 100644 --- a/src/cmd/compile/internal/ir/op_string.go +++ b/src/cmd/compile/internal/ir/op_string.go @@ -146,28 +146,29 @@ func _() { _ = x[OTINTER-135] _ = x[OTFUNC-136] _ = x[OTARRAY-137] - _ = x[ODDD-138] - _ = x[OINLCALL-139] - _ = x[OEFACE-140] - _ = x[OITAB-141] - _ = x[OIDATA-142] - _ = x[OSPTR-143] - _ = x[OCLOSUREREAD-144] - _ = x[OCFUNC-145] - _ = x[OCHECKNIL-146] - _ = x[OVARDEF-147] - _ = x[OVARKILL-148] - _ = x[OVARLIVE-149] - _ = x[ORESULT-150] - _ = x[OINLMARK-151] - _ = x[ORETJMP-152] - _ = x[OGETG-153] - _ = x[OEND-154] + _ = x[OTSLICE-138] + _ = x[ODDD-139] + _ = x[OINLCALL-140] + _ = x[OEFACE-141] + _ = x[OITAB-142] + _ = x[OIDATA-143] + _ = x[OSPTR-144] + _ = x[OCLOSUREREAD-145] + _ = x[OCFUNC-146] + _ = x[OCHECKNIL-147] + _ = x[OVARDEF-148] + _ = x[OVARKILL-149] + _ = x[OVARLIVE-150] + _ = x[ORESULT-151] + _ = x[OINLMARK-152] + _ = x[ORETJMP-153] + _ = x[OGETG-154] + _ = x[OEND-155] } -const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLFIELDDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRBLOCKBREAKCASECONTINUEDEFEREMPTYFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYDDDINLCALLEFACEITABIDATASPTRCLOSUREREADCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKRETJMPGETGEND" +const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLFIELDDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRBLOCKBREAKCASECONTINUEDEFEREMPTYFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEDDDINLCALLEFACEITABIDATASPTRCLOSUREREADCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKRETJMPGETGEND" -var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 310, 317, 323, 326, 332, 339, 347, 351, 358, 366, 368, 370, 372, 374, 376, 378, 383, 388, 396, 399, 408, 411, 415, 423, 430, 439, 452, 455, 458, 461, 464, 467, 470, 476, 479, 485, 488, 494, 498, 501, 505, 510, 515, 521, 526, 530, 535, 543, 551, 557, 566, 577, 584, 588, 595, 602, 610, 614, 618, 622, 629, 636, 644, 650, 658, 663, 668, 672, 680, 685, 690, 694, 697, 705, 709, 711, 716, 718, 723, 729, 735, 741, 747, 752, 756, 763, 769, 774, 780, 783, 790, 795, 799, 804, 808, 819, 824, 832, 838, 845, 852, 858, 865, 871, 875, 878} +var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 310, 317, 323, 326, 332, 339, 347, 351, 358, 366, 368, 370, 372, 374, 376, 378, 383, 388, 396, 399, 408, 411, 415, 423, 430, 439, 452, 455, 458, 461, 464, 467, 470, 476, 479, 485, 488, 494, 498, 501, 505, 510, 515, 521, 526, 530, 535, 543, 551, 557, 566, 577, 584, 588, 595, 602, 610, 614, 618, 622, 629, 636, 644, 650, 658, 663, 668, 672, 680, 685, 690, 694, 697, 705, 709, 711, 716, 718, 723, 729, 735, 741, 747, 752, 756, 763, 769, 774, 780, 786, 789, 796, 801, 805, 810, 814, 825, 830, 838, 844, 851, 858, 864, 871, 877, 881, 884} func (i Op) String() string { if i >= Op(len(_Op_index)-1) { From d40869fcedb208b0bcf7e7d828db12f210a17dc6 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sun, 29 Nov 2020 09:38:52 -0500 Subject: [PATCH 064/474] [dev.regabi] cmd/compile: move gc.treecopy to ir.DeepCopy This is a general operation on IR nodes, so it belongs in ir. The copied implementation is adapted to support the extension pattern, allowing nodes to implement their own DeepCopy implementations if needed. This is the first step toward higher-level operations instead of Left, Right, etc. It will allow the new type syntax nodes to be properly immutable and opt out of those fine-grained methods. Passes buildall w/ toolstash -cmp. Change-Id: Ibd64061e01daf14aebc6586cb2eb2b12057ca85a Reviewed-on: https://go-review.googlesource.com/c/go/+/274102 Trust: Russ Cox Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/noder.go | 2 +- src/cmd/compile/internal/gc/order.go | 8 +- src/cmd/compile/internal/gc/subr.go | 44 ---------- src/cmd/compile/internal/ir/copy.go | 127 +++++++++++++++++++++++++++ src/cmd/compile/internal/ir/node.go | 53 ----------- 5 files changed, 133 insertions(+), 101 deletions(-) create mode 100644 src/cmd/compile/internal/ir/copy.go diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 8ae5874d3b463..1c433b5d30085 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -451,7 +451,7 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []ir.Node { } v := values[i] if decl.Values == nil { - v = treecopy(v, n.Pos()) + v = ir.DeepCopy(n.Pos(), v) } n.SetOp(ir.OLITERAL) diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 6a91b8c91bdd4..d4db7be9119f9 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -609,7 +609,10 @@ func (o *Order) stmt(n ir.Node) { n.SetLeft(o.safeExpr(n.Left())) - l := treecopy(n.Left(), src.NoXPos) + // TODO(rsc): Why is this DeepCopy? + // We should know enough about the form here + // to do something more provably shallower. + l := ir.DeepCopy(src.NoXPos, n.Left()) if l.Op() == ir.OINDEXMAP { l.SetIndexMapLValue(false) } @@ -1123,8 +1126,7 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node { needCopy = mapKeyReplaceStrConv(n.Right()) if instrumenting { - // Race detector needs the copy so it can - // call treecopy on the result. + // Race detector needs the copy. needCopy = true } } diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 336465db9800d..25490246e6d0b 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -181,42 +181,6 @@ func nodstr(s string) ir.Node { return ir.NewLiteral(constant.MakeString(s)) } -// treecopy recursively copies n, with the exception of -// ONAME, OLITERAL, OTYPE, and ONONAME leaves. -// If pos.IsKnown(), it sets the source position of newly -// allocated nodes to pos. -func treecopy(n ir.Node, pos src.XPos) ir.Node { - if n == nil { - return nil - } - - switch n.Op() { - default: - m := ir.SepCopy(n) - m.SetLeft(treecopy(n.Left(), pos)) - m.SetRight(treecopy(n.Right(), pos)) - m.PtrList().Set(listtreecopy(n.List().Slice(), pos)) - if pos.IsKnown() { - m.SetPos(pos) - } - if m.Name() != nil && n.Op() != ir.ODCLFIELD { - ir.Dump("treecopy", n) - base.Fatalf("treecopy Name") - } - return m - - case ir.OPACK: - // OPACK nodes are never valid in const value declarations, - // but allow them like any other declared symbol to avoid - // crashing (golang.org/issue/11361). - fallthrough - - case ir.ONAME, ir.ONONAME, ir.OLITERAL, ir.ONIL, ir.OTYPE: - return n - - } -} - func isptrto(t *types.Type, et types.EType) bool { if t == nil { return false @@ -1375,14 +1339,6 @@ func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool return true } -func listtreecopy(l []ir.Node, pos src.XPos) []ir.Node { - var out []ir.Node - for _, n := range l { - out = append(out, treecopy(n, pos)) - } - return out -} - func liststmt(l []ir.Node) ir.Node { n := ir.Nod(ir.OBLOCK, nil, nil) n.PtrList().Set(l) diff --git a/src/cmd/compile/internal/ir/copy.go b/src/cmd/compile/internal/ir/copy.go new file mode 100644 index 0000000000000..7a1611d0d6e62 --- /dev/null +++ b/src/cmd/compile/internal/ir/copy.go @@ -0,0 +1,127 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +import ( + "cmd/compile/internal/base" + "cmd/internal/src" +) + +// A Node may implement the Orig and SetOrig method to +// maintain a pointer to the "unrewritten" form of a Node. +// If a Node does not implement OrigNode, it is its own Orig. +// +// Note that both SepCopy and Copy have definitions compatible +// with a Node that does not implement OrigNode: such a Node +// is its own Orig, and in that case, that's what both want to return +// anyway (SepCopy unconditionally, and Copy only when the input +// is its own Orig as well, but if the output does not implement +// OrigNode, then neither does the input, making the condition true). +type OrigNode interface { + Node + Orig() Node + SetOrig(Node) +} + +// Orig returns the “original” node for n. +// If n implements OrigNode, Orig returns n.Orig(). +// Otherwise Orig returns n itself. +func Orig(n Node) Node { + if n, ok := n.(OrigNode); ok { + o := n.Orig() + if o == nil { + Dump("Orig nil", n) + base.Fatalf("Orig returned nil") + } + return o + } + return n +} + +// SepCopy returns a separate shallow copy of n, +// breaking any Orig link to any other nodes. +func SepCopy(n Node) Node { + n = n.RawCopy() + if n, ok := n.(OrigNode); ok { + n.SetOrig(n) + } + return n +} + +// Copy returns a shallow copy of n. +// If Orig(n) == n, then Orig(Copy(n)) == the copy. +// Otherwise the Orig link is preserved as well. +// +// The specific semantics surrounding Orig are subtle but right for most uses. +// See issues #26855 and #27765 for pitfalls. +func Copy(n Node) Node { + copy := n.RawCopy() + if n, ok := n.(OrigNode); ok && n.Orig() == n { + copy.(OrigNode).SetOrig(copy) + } + return copy +} + +// A Node can implement DeepCopyNode to provide a custom implementation +// of DeepCopy. If the compiler only needs access to a Node's structure during +// DeepCopy, then a Node can implement DeepCopyNode instead of providing +// fine-grained mutable access with Left, SetLeft, Right, SetRight, and so on. +type DeepCopyNode interface { + Node + DeepCopy(pos src.XPos) Node +} + +// DeepCopy returns a “deep” copy of n, with its entire structure copied +// (except for shared nodes like ONAME, ONONAME, OLITERAL, and OTYPE). +// If pos.IsKnown(), it sets the source position of newly allocated Nodes to pos. +// +// The default implementation is to traverse the Node graph, making +// a shallow copy of each node and then updating each field to point +// at shallow copies of children, recursively, using Left, SetLeft, and so on. +// +// If a Node wishes to provide an alternate implementation, it can +// implement a DeepCopy method: see the DeepCopyNode interface. +func DeepCopy(pos src.XPos, n Node) Node { + if n == nil { + return nil + } + + if n, ok := n.(DeepCopyNode); ok { + return n.DeepCopy(pos) + } + + switch n.Op() { + default: + m := SepCopy(n) + m.SetLeft(DeepCopy(pos, n.Left())) + m.SetRight(DeepCopy(pos, n.Right())) + m.PtrList().Set(deepCopyList(pos, n.List().Slice())) + if pos.IsKnown() { + m.SetPos(pos) + } + if m.Name() != nil { + Dump("DeepCopy", n) + base.Fatalf("DeepCopy Name") + } + return m + + case OPACK: + // OPACK nodes are never valid in const value declarations, + // but allow them like any other declared symbol to avoid + // crashing (golang.org/issue/11361). + fallthrough + + case ONAME, ONONAME, OLITERAL, ONIL, OTYPE: + return n + } +} + +func deepCopyList(pos src.XPos, list []Node) []Node { + var out []Node + for _, n := range list { + out = append(out, DeepCopy(pos, n)) + } + return out +} diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 47c38c2ab5dde..653410d175be9 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -1021,59 +1021,6 @@ func (n *node) RawCopy() Node { return © } -// A Node may implement the Orig and SetOrig method to -// maintain a pointer to the "unrewritten" form of a Node. -// If a Node does not implement OrigNode, it is its own Orig. -// -// Note that both SepCopy and Copy have definitions compatible -// with a Node that does not implement OrigNode: such a Node -// is its own Orig, and in that case, that's what both want to return -// anyway (SepCopy unconditionally, and Copy only when the input -// is its own Orig as well, but if the output does not implement -// OrigNode, then neither does the input, making the condition true). -type OrigNode interface { - Node - Orig() Node - SetOrig(Node) -} - -func Orig(n Node) Node { - if n, ok := n.(OrigNode); ok { - o := n.Orig() - if o == nil { - Dump("Orig nil", n) - base.Fatalf("Orig returned nil") - } - return o - } - return n -} - -// sepcopy returns a separate shallow copy of n, with the copy's -// Orig pointing to itself. -func SepCopy(n Node) Node { - n = n.RawCopy() - if n, ok := n.(OrigNode); ok { - n.SetOrig(n) - } - return n -} - -// copy returns shallow copy of n and adjusts the copy's Orig if -// necessary: In general, if n.Orig points to itself, the copy's -// Orig should point to itself as well. Otherwise, if n is modified, -// the copy's Orig node appears modified, too, and then doesn't -// represent the original node anymore. -// (This caused the wrong complit Op to be used when printing error -// messages; see issues #26855, #27765). -func Copy(n Node) Node { - copy := n.RawCopy() - if n, ok := n.(OrigNode); ok && n.Orig() == n { - copy.(OrigNode).SetOrig(copy) - } - return copy -} - // isNil reports whether n represents the universal untyped zero value "nil". func IsNil(n Node) bool { // Check n.Orig because constant propagation may produce typed nil constants, From 4e7685ef1afbf8b1cc692f2a1bd3b02e444d5901 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 26 Nov 2020 07:02:13 -0500 Subject: [PATCH 065/474] [dev.regabi] cmd/compile: add custom type syntax Node implementations The type syntax is reused to stand in for the actual type once typechecked, to avoid updating all the possible references to the original type syntax. So all these implementations allow changing their Op from the raw syntax like OTMAP to the finished form OTYPE, even though obviously the representation does not change. Passes buildall w/ toolstash -cmp. Change-Id: I4acca1a5b35fa2f48ee08e8f1e5a330a004c284b Reviewed-on: https://go-review.googlesource.com/c/go/+/274103 Trust: Russ Cox Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky TryBot-Result: Go Bot --- src/cmd/compile/fmtmap_test.go | 1 + src/cmd/compile/internal/gc/alg.go | 21 +- src/cmd/compile/internal/gc/builtin.go | 182 +++++------ src/cmd/compile/internal/gc/closure.go | 10 +- src/cmd/compile/internal/gc/dcl.go | 134 ++++---- src/cmd/compile/internal/gc/embed.go | 8 +- src/cmd/compile/internal/gc/iexport.go | 3 - src/cmd/compile/internal/gc/iimport.go | 3 - src/cmd/compile/internal/gc/init.go | 2 +- src/cmd/compile/internal/gc/mkbuiltin.go | 2 +- src/cmd/compile/internal/gc/noder.go | 102 +++--- src/cmd/compile/internal/gc/reflect.go | 8 +- src/cmd/compile/internal/gc/select.go | 2 +- src/cmd/compile/internal/gc/subr.go | 22 +- src/cmd/compile/internal/gc/typecheck.go | 170 +++++----- src/cmd/compile/internal/gc/universe.go | 5 +- src/cmd/compile/internal/gc/walk.go | 11 +- src/cmd/compile/internal/ir/expr.go | 44 +++ src/cmd/compile/internal/ir/fmt.go | 48 ++- src/cmd/compile/internal/ir/mini.go | 9 + src/cmd/compile/internal/ir/name.go | 6 +- src/cmd/compile/internal/ir/node.go | 45 +-- src/cmd/compile/internal/ir/type.go | 376 ++++++++++++++++++++++- 23 files changed, 787 insertions(+), 427 deletions(-) diff --git a/src/cmd/compile/fmtmap_test.go b/src/cmd/compile/fmtmap_test.go index e949a89d93312..32891aea66d55 100644 --- a/src/cmd/compile/fmtmap_test.go +++ b/src/cmd/compile/fmtmap_test.go @@ -95,6 +95,7 @@ var knownFormats = map[string]string{ "cmd/compile/internal/ir.Nodes %+v": "", "cmd/compile/internal/ir.Nodes %.v": "", "cmd/compile/internal/ir.Nodes %v": "", + "cmd/compile/internal/ir.Ntype %v": "", "cmd/compile/internal/ir.Op %#v": "", "cmd/compile/internal/ir.Op %v": "", "cmd/compile/internal/ssa.BranchPrediction %d": "", diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index b40a56fe39ee4..806417d03de31 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -292,12 +292,12 @@ func genhash(t *types.Type) *obj.LSym { dclcontext = ir.PEXTERN // func sym(p *T, h uintptr) uintptr - tfn := ir.Nod(ir.OTFUNC, nil, nil) - tfn.PtrList().Set2( + args := []*ir.Field{ namedfield("p", types.NewPtr(t)), namedfield("h", types.Types[types.TUINTPTR]), - ) - tfn.PtrRlist().Set1(anonfield(types.Types[types.TUINTPTR])) + } + results := []*ir.Field{anonfield(types.Types[types.TUINTPTR])} + tfn := ir.NewFuncType(base.Pos, nil, args, results) fn := dclfunc(sym, tfn) np := ir.AsNode(tfn.Type().Params().Field(0).Nname) @@ -432,10 +432,10 @@ func hashfor(t *types.Type) ir.Node { n := NewName(sym) setNodeNameFunc(n) - n.SetType(functype(nil, []ir.Node{ + n.SetType(functype(nil, []*ir.Field{ anonfield(types.NewPtr(t)), anonfield(types.Types[types.TUINTPTR]), - }, []ir.Node{ + }, []*ir.Field{ anonfield(types.Types[types.TUINTPTR]), })) return n @@ -521,12 +521,9 @@ func geneq(t *types.Type) *obj.LSym { dclcontext = ir.PEXTERN // func sym(p, q *T) bool - tfn := ir.Nod(ir.OTFUNC, nil, nil) - tfn.PtrList().Set2( - namedfield("p", types.NewPtr(t)), - namedfield("q", types.NewPtr(t)), - ) - tfn.PtrRlist().Set1(namedfield("r", types.Types[types.TBOOL])) + tfn := ir.NewFuncType(base.Pos, nil, + []*ir.Field{namedfield("p", types.NewPtr(t)), namedfield("q", types.NewPtr(t))}, + []*ir.Field{namedfield("r", types.Types[types.TBOOL])}) fn := dclfunc(sym, tfn) np := ir.AsNode(tfn.Type().Params().Field(0).Nname) diff --git a/src/cmd/compile/internal/gc/builtin.go b/src/cmd/compile/internal/gc/builtin.go index a57c6115595f0..efca44c667640 100644 --- a/src/cmd/compile/internal/gc/builtin.go +++ b/src/cmd/compile/internal/gc/builtin.go @@ -210,132 +210,132 @@ func runtimeTypes() []*types.Type { typs[1] = types.NewPtr(typs[0]) typs[2] = types.Types[types.TANY] typs[3] = types.NewPtr(typs[2]) - typs[4] = functype(nil, []ir.Node{anonfield(typs[1])}, []ir.Node{anonfield(typs[3])}) + typs[4] = functype(nil, []*ir.Field{anonfield(typs[1])}, []*ir.Field{anonfield(typs[3])}) typs[5] = types.Types[types.TUINTPTR] typs[6] = types.Types[types.TBOOL] typs[7] = types.Types[types.TUNSAFEPTR] - typs[8] = functype(nil, []ir.Node{anonfield(typs[5]), anonfield(typs[1]), anonfield(typs[6])}, []ir.Node{anonfield(typs[7])}) + typs[8] = functype(nil, []*ir.Field{anonfield(typs[5]), anonfield(typs[1]), anonfield(typs[6])}, []*ir.Field{anonfield(typs[7])}) typs[9] = functype(nil, nil, nil) typs[10] = types.Types[types.TINTER] - typs[11] = functype(nil, []ir.Node{anonfield(typs[10])}, nil) + typs[11] = functype(nil, []*ir.Field{anonfield(typs[10])}, nil) typs[12] = types.Types[types.TINT32] typs[13] = types.NewPtr(typs[12]) - typs[14] = functype(nil, []ir.Node{anonfield(typs[13])}, []ir.Node{anonfield(typs[10])}) + typs[14] = functype(nil, []*ir.Field{anonfield(typs[13])}, []*ir.Field{anonfield(typs[10])}) typs[15] = types.Types[types.TINT] - typs[16] = functype(nil, []ir.Node{anonfield(typs[15]), anonfield(typs[15])}, nil) + typs[16] = functype(nil, []*ir.Field{anonfield(typs[15]), anonfield(typs[15])}, nil) typs[17] = types.Types[types.TUINT] - typs[18] = functype(nil, []ir.Node{anonfield(typs[17]), anonfield(typs[15])}, nil) - typs[19] = functype(nil, []ir.Node{anonfield(typs[6])}, nil) + typs[18] = functype(nil, []*ir.Field{anonfield(typs[17]), anonfield(typs[15])}, nil) + typs[19] = functype(nil, []*ir.Field{anonfield(typs[6])}, nil) typs[20] = types.Types[types.TFLOAT64] - typs[21] = functype(nil, []ir.Node{anonfield(typs[20])}, nil) + typs[21] = functype(nil, []*ir.Field{anonfield(typs[20])}, nil) typs[22] = types.Types[types.TINT64] - typs[23] = functype(nil, []ir.Node{anonfield(typs[22])}, nil) + typs[23] = functype(nil, []*ir.Field{anonfield(typs[22])}, nil) typs[24] = types.Types[types.TUINT64] - typs[25] = functype(nil, []ir.Node{anonfield(typs[24])}, nil) + typs[25] = functype(nil, []*ir.Field{anonfield(typs[24])}, nil) typs[26] = types.Types[types.TCOMPLEX128] - typs[27] = functype(nil, []ir.Node{anonfield(typs[26])}, nil) + typs[27] = functype(nil, []*ir.Field{anonfield(typs[26])}, nil) typs[28] = types.Types[types.TSTRING] - typs[29] = functype(nil, []ir.Node{anonfield(typs[28])}, nil) - typs[30] = functype(nil, []ir.Node{anonfield(typs[2])}, nil) - typs[31] = functype(nil, []ir.Node{anonfield(typs[5])}, nil) + typs[29] = functype(nil, []*ir.Field{anonfield(typs[28])}, nil) + typs[30] = functype(nil, []*ir.Field{anonfield(typs[2])}, nil) + typs[31] = functype(nil, []*ir.Field{anonfield(typs[5])}, nil) typs[32] = types.NewArray(typs[0], 32) typs[33] = types.NewPtr(typs[32]) - typs[34] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28])}, []ir.Node{anonfield(typs[28])}) - typs[35] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []ir.Node{anonfield(typs[28])}) - typs[36] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []ir.Node{anonfield(typs[28])}) - typs[37] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []ir.Node{anonfield(typs[28])}) + typs[34] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[28])}) + typs[35] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[28])}) + typs[36] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[28])}) + typs[37] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[28])}) typs[38] = types.NewSlice(typs[28]) - typs[39] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[38])}, []ir.Node{anonfield(typs[28])}) - typs[40] = functype(nil, []ir.Node{anonfield(typs[28]), anonfield(typs[28])}, []ir.Node{anonfield(typs[15])}) + typs[39] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[38])}, []*ir.Field{anonfield(typs[28])}) + typs[40] = functype(nil, []*ir.Field{anonfield(typs[28]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[15])}) typs[41] = types.NewArray(typs[0], 4) typs[42] = types.NewPtr(typs[41]) - typs[43] = functype(nil, []ir.Node{anonfield(typs[42]), anonfield(typs[22])}, []ir.Node{anonfield(typs[28])}) - typs[44] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[1]), anonfield(typs[15])}, []ir.Node{anonfield(typs[28])}) - typs[45] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[15])}, []ir.Node{anonfield(typs[28])}) + typs[43] = functype(nil, []*ir.Field{anonfield(typs[42]), anonfield(typs[22])}, []*ir.Field{anonfield(typs[28])}) + typs[44] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[1]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[28])}) + typs[45] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[28])}) typs[46] = types.Runetype typs[47] = types.NewSlice(typs[46]) - typs[48] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[47])}, []ir.Node{anonfield(typs[28])}) + typs[48] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[47])}, []*ir.Field{anonfield(typs[28])}) typs[49] = types.NewSlice(typs[0]) - typs[50] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[28])}, []ir.Node{anonfield(typs[49])}) + typs[50] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[49])}) typs[51] = types.NewArray(typs[46], 32) typs[52] = types.NewPtr(typs[51]) - typs[53] = functype(nil, []ir.Node{anonfield(typs[52]), anonfield(typs[28])}, []ir.Node{anonfield(typs[47])}) - typs[54] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []ir.Node{anonfield(typs[15])}) - typs[55] = functype(nil, []ir.Node{anonfield(typs[28]), anonfield(typs[15])}, []ir.Node{anonfield(typs[46]), anonfield(typs[15])}) - typs[56] = functype(nil, []ir.Node{anonfield(typs[28])}, []ir.Node{anonfield(typs[15])}) - typs[57] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[2])}, []ir.Node{anonfield(typs[2])}) - typs[58] = functype(nil, []ir.Node{anonfield(typs[2])}, []ir.Node{anonfield(typs[7])}) - typs[59] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[3])}, []ir.Node{anonfield(typs[2])}) - typs[60] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[2])}, []ir.Node{anonfield(typs[2]), anonfield(typs[6])}) - typs[61] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil) - typs[62] = functype(nil, []ir.Node{anonfield(typs[1])}, nil) + typs[53] = functype(nil, []*ir.Field{anonfield(typs[52]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[47])}) + typs[54] = functype(nil, []*ir.Field{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []*ir.Field{anonfield(typs[15])}) + typs[55] = functype(nil, []*ir.Field{anonfield(typs[28]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[46]), anonfield(typs[15])}) + typs[56] = functype(nil, []*ir.Field{anonfield(typs[28])}, []*ir.Field{anonfield(typs[15])}) + typs[57] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[2])}, []*ir.Field{anonfield(typs[2])}) + typs[58] = functype(nil, []*ir.Field{anonfield(typs[2])}, []*ir.Field{anonfield(typs[7])}) + typs[59] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[2])}) + typs[60] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[2])}, []*ir.Field{anonfield(typs[2]), anonfield(typs[6])}) + typs[61] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil) + typs[62] = functype(nil, []*ir.Field{anonfield(typs[1])}, nil) typs[63] = types.NewPtr(typs[5]) - typs[64] = functype(nil, []ir.Node{anonfield(typs[63]), anonfield(typs[7]), anonfield(typs[7])}, []ir.Node{anonfield(typs[6])}) + typs[64] = functype(nil, []*ir.Field{anonfield(typs[63]), anonfield(typs[7]), anonfield(typs[7])}, []*ir.Field{anonfield(typs[6])}) typs[65] = types.Types[types.TUINT32] - typs[66] = functype(nil, nil, []ir.Node{anonfield(typs[65])}) + typs[66] = functype(nil, nil, []*ir.Field{anonfield(typs[65])}) typs[67] = types.NewMap(typs[2], typs[2]) - typs[68] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []ir.Node{anonfield(typs[67])}) - typs[69] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []ir.Node{anonfield(typs[67])}) - typs[70] = functype(nil, nil, []ir.Node{anonfield(typs[67])}) - typs[71] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []ir.Node{anonfield(typs[3])}) - typs[72] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []ir.Node{anonfield(typs[3])}) - typs[73] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []ir.Node{anonfield(typs[3])}) - typs[74] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []ir.Node{anonfield(typs[3]), anonfield(typs[6])}) - typs[75] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []ir.Node{anonfield(typs[3]), anonfield(typs[6])}) - typs[76] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []ir.Node{anonfield(typs[3]), anonfield(typs[6])}) - typs[77] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, nil) - typs[78] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, nil) - typs[79] = functype(nil, []ir.Node{anonfield(typs[3])}, nil) - typs[80] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67])}, nil) + typs[68] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[67])}) + typs[69] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[67])}) + typs[70] = functype(nil, nil, []*ir.Field{anonfield(typs[67])}) + typs[71] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[3])}) + typs[72] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*ir.Field{anonfield(typs[3])}) + typs[73] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*ir.Field{anonfield(typs[3])}) + typs[74] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[3]), anonfield(typs[6])}) + typs[75] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*ir.Field{anonfield(typs[3]), anonfield(typs[6])}) + typs[76] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*ir.Field{anonfield(typs[3]), anonfield(typs[6])}) + typs[77] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, nil) + typs[78] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, nil) + typs[79] = functype(nil, []*ir.Field{anonfield(typs[3])}, nil) + typs[80] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67])}, nil) typs[81] = types.NewChan(typs[2], types.Cboth) - typs[82] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[22])}, []ir.Node{anonfield(typs[81])}) - typs[83] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[15])}, []ir.Node{anonfield(typs[81])}) + typs[82] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[22])}, []*ir.Field{anonfield(typs[81])}) + typs[83] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[81])}) typs[84] = types.NewChan(typs[2], types.Crecv) - typs[85] = functype(nil, []ir.Node{anonfield(typs[84]), anonfield(typs[3])}, nil) - typs[86] = functype(nil, []ir.Node{anonfield(typs[84]), anonfield(typs[3])}, []ir.Node{anonfield(typs[6])}) + typs[85] = functype(nil, []*ir.Field{anonfield(typs[84]), anonfield(typs[3])}, nil) + typs[86] = functype(nil, []*ir.Field{anonfield(typs[84]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[6])}) typs[87] = types.NewChan(typs[2], types.Csend) - typs[88] = functype(nil, []ir.Node{anonfield(typs[87]), anonfield(typs[3])}, nil) + typs[88] = functype(nil, []*ir.Field{anonfield(typs[87]), anonfield(typs[3])}, nil) typs[89] = types.NewArray(typs[0], 3) - typs[90] = tostruct([]ir.Node{namedfield("enabled", typs[6]), namedfield("pad", typs[89]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])}) - typs[91] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil) - typs[92] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[3])}, nil) - typs[93] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []ir.Node{anonfield(typs[15])}) - typs[94] = functype(nil, []ir.Node{anonfield(typs[87]), anonfield(typs[3])}, []ir.Node{anonfield(typs[6])}) - typs[95] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[84])}, []ir.Node{anonfield(typs[6])}) + typs[90] = tostruct([]*ir.Field{namedfield("enabled", typs[6]), namedfield("pad", typs[89]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])}) + typs[91] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil) + typs[92] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[3])}, nil) + typs[93] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[15])}) + typs[94] = functype(nil, []*ir.Field{anonfield(typs[87]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[6])}) + typs[95] = functype(nil, []*ir.Field{anonfield(typs[3]), anonfield(typs[84])}, []*ir.Field{anonfield(typs[6])}) typs[96] = types.NewPtr(typs[6]) - typs[97] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[96]), anonfield(typs[84])}, []ir.Node{anonfield(typs[6])}) - typs[98] = functype(nil, []ir.Node{anonfield(typs[63])}, nil) - typs[99] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[63]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []ir.Node{anonfield(typs[15]), anonfield(typs[6])}) - typs[100] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []ir.Node{anonfield(typs[7])}) - typs[101] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []ir.Node{anonfield(typs[7])}) - typs[102] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []ir.Node{anonfield(typs[7])}) + typs[97] = functype(nil, []*ir.Field{anonfield(typs[3]), anonfield(typs[96]), anonfield(typs[84])}, []*ir.Field{anonfield(typs[6])}) + typs[98] = functype(nil, []*ir.Field{anonfield(typs[63])}, nil) + typs[99] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[63]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []*ir.Field{anonfield(typs[15]), anonfield(typs[6])}) + typs[100] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[7])}) + typs[101] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []*ir.Field{anonfield(typs[7])}) + typs[102] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []*ir.Field{anonfield(typs[7])}) typs[103] = types.NewSlice(typs[2]) - typs[104] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[103]), anonfield(typs[15])}, []ir.Node{anonfield(typs[103])}) - typs[105] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil) - typs[106] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[5])}, nil) - typs[107] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []ir.Node{anonfield(typs[6])}) - typs[108] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[3])}, []ir.Node{anonfield(typs[6])}) - typs[109] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[7])}, []ir.Node{anonfield(typs[6])}) - typs[110] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []ir.Node{anonfield(typs[5])}) - typs[111] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[5])}, []ir.Node{anonfield(typs[5])}) - typs[112] = functype(nil, []ir.Node{anonfield(typs[22]), anonfield(typs[22])}, []ir.Node{anonfield(typs[22])}) - typs[113] = functype(nil, []ir.Node{anonfield(typs[24]), anonfield(typs[24])}, []ir.Node{anonfield(typs[24])}) - typs[114] = functype(nil, []ir.Node{anonfield(typs[20])}, []ir.Node{anonfield(typs[22])}) - typs[115] = functype(nil, []ir.Node{anonfield(typs[20])}, []ir.Node{anonfield(typs[24])}) - typs[116] = functype(nil, []ir.Node{anonfield(typs[20])}, []ir.Node{anonfield(typs[65])}) - typs[117] = functype(nil, []ir.Node{anonfield(typs[22])}, []ir.Node{anonfield(typs[20])}) - typs[118] = functype(nil, []ir.Node{anonfield(typs[24])}, []ir.Node{anonfield(typs[20])}) - typs[119] = functype(nil, []ir.Node{anonfield(typs[65])}, []ir.Node{anonfield(typs[20])}) - typs[120] = functype(nil, []ir.Node{anonfield(typs[26]), anonfield(typs[26])}, []ir.Node{anonfield(typs[26])}) - typs[121] = functype(nil, []ir.Node{anonfield(typs[5]), anonfield(typs[5])}, nil) - typs[122] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[1]), anonfield(typs[5])}, nil) + typs[104] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[103]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[103])}) + typs[105] = functype(nil, []*ir.Field{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil) + typs[106] = functype(nil, []*ir.Field{anonfield(typs[7]), anonfield(typs[5])}, nil) + typs[107] = functype(nil, []*ir.Field{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []*ir.Field{anonfield(typs[6])}) + typs[108] = functype(nil, []*ir.Field{anonfield(typs[3]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[6])}) + typs[109] = functype(nil, []*ir.Field{anonfield(typs[7]), anonfield(typs[7])}, []*ir.Field{anonfield(typs[6])}) + typs[110] = functype(nil, []*ir.Field{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []*ir.Field{anonfield(typs[5])}) + typs[111] = functype(nil, []*ir.Field{anonfield(typs[7]), anonfield(typs[5])}, []*ir.Field{anonfield(typs[5])}) + typs[112] = functype(nil, []*ir.Field{anonfield(typs[22]), anonfield(typs[22])}, []*ir.Field{anonfield(typs[22])}) + typs[113] = functype(nil, []*ir.Field{anonfield(typs[24]), anonfield(typs[24])}, []*ir.Field{anonfield(typs[24])}) + typs[114] = functype(nil, []*ir.Field{anonfield(typs[20])}, []*ir.Field{anonfield(typs[22])}) + typs[115] = functype(nil, []*ir.Field{anonfield(typs[20])}, []*ir.Field{anonfield(typs[24])}) + typs[116] = functype(nil, []*ir.Field{anonfield(typs[20])}, []*ir.Field{anonfield(typs[65])}) + typs[117] = functype(nil, []*ir.Field{anonfield(typs[22])}, []*ir.Field{anonfield(typs[20])}) + typs[118] = functype(nil, []*ir.Field{anonfield(typs[24])}, []*ir.Field{anonfield(typs[20])}) + typs[119] = functype(nil, []*ir.Field{anonfield(typs[65])}, []*ir.Field{anonfield(typs[20])}) + typs[120] = functype(nil, []*ir.Field{anonfield(typs[26]), anonfield(typs[26])}, []*ir.Field{anonfield(typs[26])}) + typs[121] = functype(nil, []*ir.Field{anonfield(typs[5]), anonfield(typs[5])}, nil) + typs[122] = functype(nil, []*ir.Field{anonfield(typs[7]), anonfield(typs[1]), anonfield(typs[5])}, nil) typs[123] = types.NewSlice(typs[7]) - typs[124] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[123])}, nil) + typs[124] = functype(nil, []*ir.Field{anonfield(typs[7]), anonfield(typs[123])}, nil) typs[125] = types.Types[types.TUINT8] - typs[126] = functype(nil, []ir.Node{anonfield(typs[125]), anonfield(typs[125])}, nil) + typs[126] = functype(nil, []*ir.Field{anonfield(typs[125]), anonfield(typs[125])}, nil) typs[127] = types.Types[types.TUINT16] - typs[128] = functype(nil, []ir.Node{anonfield(typs[127]), anonfield(typs[127])}, nil) - typs[129] = functype(nil, []ir.Node{anonfield(typs[65]), anonfield(typs[65])}, nil) - typs[130] = functype(nil, []ir.Node{anonfield(typs[24]), anonfield(typs[24])}, nil) + typs[128] = functype(nil, []*ir.Field{anonfield(typs[127]), anonfield(typs[127])}, nil) + typs[129] = functype(nil, []*ir.Field{anonfield(typs[65]), anonfield(typs[65])}, nil) + typs[130] = functype(nil, []*ir.Field{anonfield(typs[24]), anonfield(typs[24])}, nil) return typs[:] } diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index ee09e7876eed8..0ba2858b8b4e8 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -363,7 +363,7 @@ func closureType(clo ir.Node) *types.Type { // The information appears in the binary in the form of type descriptors; // the struct is unnamed so that closures in multiple packages with the // same struct type can share the descriptor. - fields := []ir.Node{ + fields := []*ir.Field{ namedfield(".F", types.Types[types.TUINTPTR]), } for _, v := range clo.Func().ClosureVars { @@ -456,9 +456,9 @@ func makepartialcall(dot ir.Node, t0 *types.Type, meth *types.Sym) *ir.Func { // number at the use of the method expression in this // case. See issue 29389. - tfn := ir.Nod(ir.OTFUNC, nil, nil) - tfn.PtrList().Set(structargs(t0.Params(), true)) - tfn.PtrRlist().Set(structargs(t0.Results(), false)) + tfn := ir.NewFuncType(base.Pos, nil, + structargs(t0.Params(), true), + structargs(t0.Results(), false)) fn := dclfunc(sym, tfn) fn.SetDupok(true) @@ -510,7 +510,7 @@ func makepartialcall(dot ir.Node, t0 *types.Type, meth *types.Sym) *ir.Func { // needed in the closure for n (n must be a OCALLPART node). // The address of a variable of the returned type can be cast to a func. func partialCallType(n ir.Node) *types.Type { - t := tostruct([]ir.Node{ + t := tostruct([]*ir.Field{ namedfield("F", types.Types[types.TUINTPTR]), namedfield("R", n.Left().Type()), }) diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 3d8f97d93dd51..637587392aa44 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -134,7 +134,7 @@ func addvar(n *ir.Name, t *types.Type, ctxt ir.Class) { // declare variables from grammar // new_name_list (type | [type] = expr_list) -func variter(vl []ir.Node, t ir.Node, el []ir.Node) []ir.Node { +func variter(vl []ir.Node, t ir.Ntype, el []ir.Node) []ir.Node { var init []ir.Node doexpr := len(el) > 0 @@ -221,18 +221,16 @@ func dclname(s *types.Sym) *ir.Name { return n } -func anonfield(typ *types.Type) ir.Node { +func anonfield(typ *types.Type) *ir.Field { return symfield(nil, typ) } -func namedfield(s string, typ *types.Type) ir.Node { +func namedfield(s string, typ *types.Type) *ir.Field { return symfield(lookup(s), typ) } -func symfield(s *types.Sym, typ *types.Type) ir.Node { - n := nodSym(ir.ODCLFIELD, nil, s) - n.SetType(typ) - return n +func symfield(s *types.Sym, typ *types.Type) *ir.Field { + return ir.NewField(base.Pos, s, nil, typ) } // oldname returns the Node that declares symbol s in the current scope. @@ -279,7 +277,8 @@ func oldname(s *types.Sym) ir.Node { return n } -// importName is like oldname, but it reports an error if sym is from another package and not exported. +// importName is like oldname, +// but it reports an error if sym is from another package and not exported. func importName(sym *types.Sym) ir.Node { n := oldname(sym) if !types.IsExported(sym.Name) && sym.Pkg != ir.LocalPkg { @@ -348,12 +347,12 @@ func colasdefn(left []ir.Node, defn ir.Node) { // declare the arguments in an // interface field declaration. -func ifacedcl(n ir.Node) { - if n.Op() != ir.ODCLFIELD || n.Left() == nil { +func ifacedcl(n *ir.Field) { + if n.Sym == nil { base.Fatalf("ifacedcl") } - if n.Sym().IsBlank() { + if n.Sym.IsBlank() { base.Errorf("methods must have a unique non-blank name") } } @@ -371,13 +370,13 @@ func funchdr(fn *ir.Func) { types.Markdcl() if fn.Nname != nil && fn.Nname.Ntype != nil { - funcargs(fn.Nname.Ntype) + funcargs(fn.Nname.Ntype.(*ir.FuncType)) } else { funcargs2(fn.Type()) } } -func funcargs(nt ir.Node) { +func funcargs(nt *ir.FuncType) { if nt.Op() != ir.OTFUNC { base.Fatalf("funcargs %v", nt.Op()) } @@ -389,13 +388,13 @@ func funcargs(nt ir.Node) { // TODO(mdempsky): This is ugly, and only necessary because // esc.go uses Vargen to figure out result parameters' index // within the result tuple. - vargen = nt.Rlist().Len() + vargen = len(nt.Results) // declare the receiver and in arguments. - if nt.Left() != nil { - funcarg(nt.Left(), ir.PPARAM) + if nt.Recv != nil { + funcarg(nt.Recv, ir.PPARAM) } - for _, n := range nt.List().Slice() { + for _, n := range nt.Params { funcarg(n, ir.PPARAM) } @@ -403,21 +402,21 @@ func funcargs(nt ir.Node) { vargen = 0 // declare the out arguments. - gen := nt.List().Len() - for _, n := range nt.Rlist().Slice() { - if n.Sym() == nil { + gen := len(nt.Params) + for _, n := range nt.Results { + if n.Sym == nil { // Name so that escape analysis can track it. ~r stands for 'result'. - n.SetSym(lookupN("~r", gen)) + n.Sym = lookupN("~r", gen) gen++ } - if n.Sym().IsBlank() { + if n.Sym.IsBlank() { // Give it a name so we can assign to it during return. ~b stands for 'blank'. // The name must be different from ~r above because if you have // func f() (_ int) // func g() int // f is allowed to use a plain 'return' with no arguments, while g is not. // So the two cases must be distinguished. - n.SetSym(lookupN("~b", gen)) + n.Sym = lookupN("~b", gen) gen++ } @@ -427,22 +426,19 @@ func funcargs(nt ir.Node) { vargen = oldvargen } -func funcarg(n ir.Node, ctxt ir.Class) { - if n.Op() != ir.ODCLFIELD { - base.Fatalf("funcarg %v", n.Op()) - } - if n.Sym() == nil { +func funcarg(n *ir.Field, ctxt ir.Class) { + if n.Sym == nil { return } - name := ir.NewNameAt(n.Pos(), n.Sym()) - n.SetRight(name) - name.Ntype = n.Left() - name.SetIsDDD(n.IsDDD()) + name := ir.NewNameAt(n.Pos, n.Sym) + n.Decl = name + name.Ntype = n.Ntype + name.SetIsDDD(n.IsDDD) declare(name, ctxt) vargen++ - n.Right().Name().Vargen = int32(vargen) + n.Decl.Name().Vargen = int32(vargen) } // Same as funcargs, except run over an already constructed TFUNC. @@ -514,28 +510,22 @@ func checkembeddedtype(t *types.Type) { } } -func structfield(n ir.Node) *types.Field { +func structfield(n *ir.Field) *types.Field { lno := base.Pos - base.Pos = n.Pos() - - if n.Op() != ir.ODCLFIELD { - base.Fatalf("structfield: oops %v\n", n) - } + base.Pos = n.Pos - if n.Left() != nil { - n.SetLeft(typecheck(n.Left(), ctxType)) - n.SetType(n.Left().Type()) - n.SetLeft(nil) + if n.Ntype != nil { + n.Ntype = typecheckNtype(n.Ntype) + n.Type = n.Ntype.Type() + n.Ntype = nil } - f := types.NewField(n.Pos(), n.Sym(), n.Type()) - if n.Embedded() { - checkembeddedtype(n.Type()) + f := types.NewField(n.Pos, n.Sym, n.Type) + if n.Embedded { + checkembeddedtype(n.Type) f.Embedded = 1 } - if n.Opt() != nil { - f.Note = n.Opt().(string) - } + f.Note = n.Note base.Pos = lno return f @@ -561,7 +551,7 @@ func checkdupfields(what string, fss ...[]*types.Field) { // convert a parsed id/type list into // a type for struct/interface/arglist -func tostruct(l []ir.Node) *types.Type { +func tostruct(l []*ir.Field) *types.Type { t := types.New(types.TSTRUCT) fields := make([]*types.Field, len(l)) @@ -583,17 +573,17 @@ func tostruct(l []ir.Node) *types.Type { return t } -func tofunargs(l []ir.Node, funarg types.Funarg) *types.Type { +func tofunargs(l []*ir.Field, funarg types.Funarg) *types.Type { t := types.New(types.TSTRUCT) t.StructType().Funarg = funarg fields := make([]*types.Field, len(l)) for i, n := range l { f := structfield(n) - f.SetIsDDD(n.IsDDD()) - if n.Right() != nil { - n.Right().SetType(f.Type) - f.Nname = n.Right() + f.SetIsDDD(n.IsDDD) + if n.Decl != nil { + n.Decl.SetType(f.Type) + f.Nname = n.Decl } if f.Broke() { t.SetBroke(true) @@ -611,15 +601,11 @@ func tofunargsfield(fields []*types.Field, funarg types.Funarg) *types.Type { return t } -func interfacefield(n ir.Node) *types.Field { +func interfacefield(n *ir.Field) *types.Field { lno := base.Pos - base.Pos = n.Pos() - - if n.Op() != ir.ODCLFIELD { - base.Fatalf("interfacefield: oops %v\n", n) - } + base.Pos = n.Pos - if n.Opt() != nil { + if n.Note != "" { base.Errorf("interface method cannot have annotation") } @@ -628,19 +614,19 @@ func interfacefield(n ir.Node) *types.Field { // If Sym != nil, then Sym is MethodName and Left is Signature. // Otherwise, Left is InterfaceTypeName. - if n.Left() != nil { - n.SetLeft(typecheck(n.Left(), ctxType)) - n.SetType(n.Left().Type()) - n.SetLeft(nil) + if n.Ntype != nil { + n.Ntype = typecheckNtype(n.Ntype) + n.Type = n.Ntype.Type() + n.Ntype = nil } - f := types.NewField(n.Pos(), n.Sym(), n.Type()) + f := types.NewField(n.Pos, n.Sym, n.Type) base.Pos = lno return f } -func tointerface(l []ir.Node) *types.Type { +func tointerface(l []*ir.Field) *types.Type { if len(l) == 0 { return types.Types[types.TINTER] } @@ -657,7 +643,7 @@ func tointerface(l []ir.Node) *types.Type { return t } -func fakeRecv() ir.Node { +func fakeRecv() *ir.Field { return anonfield(types.FakeRecvType()) } @@ -673,12 +659,12 @@ func isifacemethod(f *types.Type) bool { } // turn a parsed function declaration into a type -func functype(this ir.Node, in, out []ir.Node) *types.Type { +func functype(this *ir.Field, in, out []*ir.Field) *types.Type { t := types.New(types.TFUNC) - var rcvr []ir.Node + var rcvr []*ir.Field if this != nil { - rcvr = []ir.Node{this} + rcvr = []*ir.Field{this} } t.FuncType().Receiver = tofunargs(rcvr, types.FunargRcvr) t.FuncType().Params = tofunargs(in, types.FunargParams) @@ -923,7 +909,7 @@ func setNodeNameFunc(n ir.Node) { n.Sym().SetFunc(true) } -func dclfunc(sym *types.Sym, tfn ir.Node) *ir.Func { +func dclfunc(sym *types.Sym, tfn ir.Ntype) *ir.Func { if tfn.Op() != ir.OTFUNC { base.Fatalf("expected OTFUNC node, got %v", tfn) } @@ -934,7 +920,7 @@ func dclfunc(sym *types.Sym, tfn ir.Node) *ir.Func { fn.Nname.Ntype = tfn setNodeNameFunc(fn.Nname) funchdr(fn) - fn.Nname.Ntype = typecheck(fn.Nname.Ntype, ctxType) + fn.Nname.Ntype = typecheckNtype(fn.Nname.Ntype) return fn } diff --git a/src/cmd/compile/internal/gc/embed.go b/src/cmd/compile/internal/gc/embed.go index 1c8ccdadefeb5..d9bfd6f5edebe 100644 --- a/src/cmd/compile/internal/gc/embed.go +++ b/src/cmd/compile/internal/gc/embed.go @@ -28,7 +28,7 @@ const ( var numLocalEmbed int -func varEmbed(p *noder, names []ir.Node, typ ir.Node, exprs []ir.Node, embeds []PragmaEmbed) (newExprs []ir.Node) { +func varEmbed(p *noder, names []ir.Node, typ ir.Ntype, exprs []ir.Node, embeds []PragmaEmbed) (newExprs []ir.Node) { haveEmbed := false for _, decl := range p.file.DeclList { imp, ok := decl.(*syntax.ImportDecl) @@ -141,8 +141,10 @@ func embedKindApprox(typ ir.Node) int { if typ.Sym() != nil && typ.Sym().Name == "string" && typ.Sym().Pkg == ir.LocalPkg { return embedString } - if typ.Op() == ir.OTARRAY && typ.Left() == nil && typ.Right().Sym() != nil && typ.Right().Sym().Name == "byte" && typ.Right().Sym().Pkg == ir.LocalPkg { - return embedBytes + if typ, ok := typ.(*ir.SliceType); ok { + if sym := typ.Elem.Sym(); sym != nil && sym.Name == "byte" && sym.Pkg == ir.LocalPkg { + return embedBytes + } } return embedUnknown } diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index 3f0f381974ea3..c9f5d0c85c401 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -1056,9 +1056,6 @@ func (w *exportWriter) stmt(n ir.Node) { w.localName(n.Left()) w.typ(n.Left().Type()) - // case ODCLFIELD: - // unimplemented - handled by default case - case ir.OAS: // Don't export "v = " initializing statements, hope they're always // preceded by the DCL which will be re-parsed and typecheck to reproduce diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 88f6e36e0735f..c219b70e0fc4b 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -974,9 +974,6 @@ func (r *importReader) node() ir.Node { typ := ir.TypeNode(r.typ()) return npos(pos, liststmt(variter([]ir.Node{lhs}, typ, nil))) // TODO(gri) avoid list creation - // case ODCLFIELD: - // unimplemented - // case OAS, OASWB: // unreachable - mapped to OAS case below by exporter diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go index 7f2a39ff464ff..ed0218c0e2363 100644 --- a/src/cmd/compile/internal/gc/init.go +++ b/src/cmd/compile/internal/gc/init.go @@ -48,7 +48,7 @@ func fninit(n []ir.Node) { if len(nf) > 0 { base.Pos = nf[0].Pos() // prolog/epilog gets line number of first init stmt initializers := lookup("init") - fn := dclfunc(initializers, ir.Nod(ir.OTFUNC, nil, nil)) + fn := dclfunc(initializers, ir.NewFuncType(base.Pos, nil, nil, nil)) for _, dcl := range initTodo.Dcl { dcl.Name().Curfn = fn } diff --git a/src/cmd/compile/internal/gc/mkbuiltin.go b/src/cmd/compile/internal/gc/mkbuiltin.go index d763f1ebee8e9..5317484de9ec6 100644 --- a/src/cmd/compile/internal/gc/mkbuiltin.go +++ b/src/cmd/compile/internal/gc/mkbuiltin.go @@ -207,7 +207,7 @@ func (i *typeInterner) fields(fl *ast.FieldList, keepNames bool) string { } } } - return fmt.Sprintf("[]ir.Node{%s}", strings.Join(res, ", ")) + return fmt.Sprintf("[]*ir.Field{%s}", strings.Join(res, ", ")) } func intconst(e ast.Expr) int64 { diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 1c433b5d30085..e6c78d1afb5d9 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -412,7 +412,7 @@ func (p *noder) varDecl(decl *syntax.VarDecl) []ir.Node { // constant declarations are handled correctly (e.g., issue 15550). type constState struct { group *syntax.Group - typ ir.Node + typ ir.Ntype values []ir.Node iota int64 } @@ -578,18 +578,18 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) ir.Node { return f } -func (p *noder) signature(recv *syntax.Field, typ *syntax.FuncType) ir.Node { - n := p.nod(typ, ir.OTFUNC, nil, nil) +func (p *noder) signature(recv *syntax.Field, typ *syntax.FuncType) *ir.FuncType { + var rcvr *ir.Field if recv != nil { - n.SetLeft(p.param(recv, false, false)) + rcvr = p.param(recv, false, false) } - n.PtrList().Set(p.params(typ.ParamList, true)) - n.PtrRlist().Set(p.params(typ.ResultList, false)) - return n + return ir.NewFuncType(p.pos(typ), rcvr, + p.params(typ.ParamList, true), + p.params(typ.ResultList, false)) } -func (p *noder) params(params []*syntax.Field, dddOk bool) []ir.Node { - nodes := make([]ir.Node, 0, len(params)) +func (p *noder) params(params []*syntax.Field, dddOk bool) []*ir.Field { + nodes := make([]*ir.Field, 0, len(params)) for i, param := range params { p.setlineno(param) nodes = append(nodes, p.param(param, dddOk, i+1 == len(params))) @@ -597,17 +597,17 @@ func (p *noder) params(params []*syntax.Field, dddOk bool) []ir.Node { return nodes } -func (p *noder) param(param *syntax.Field, dddOk, final bool) ir.Node { +func (p *noder) param(param *syntax.Field, dddOk, final bool) *ir.Field { var name *types.Sym if param.Name != nil { name = p.name(param.Name) } typ := p.typeExpr(param.Type) - n := p.nodSym(param, ir.ODCLFIELD, typ, name) + n := ir.NewField(p.pos(param), name, typ, nil) // rewrite ...T parameter - if typ.Op() == ir.ODDD { + if typ, ok := typ.(*ir.SliceType); ok && typ.DDD { if !dddOk { // We mark these as syntax errors to get automatic elimination // of multiple such errors per line (see ErrorfAt in subr.go). @@ -619,13 +619,8 @@ func (p *noder) param(param *syntax.Field, dddOk, final bool) ir.Node { p.errorAt(param.Name.Pos(), "syntax error: cannot use ... with non-final parameter %s", param.Name.Value) } } - typ.SetOp(ir.OTARRAY) - typ.SetRight(typ.Left()) - typ.SetLeft(nil) - n.SetIsDDD(true) - if n.Left() != nil { - n.Left().SetIsDDD(true) - } + typ.DDD = false + n.IsDDD = true } return n @@ -727,14 +722,14 @@ func (p *noder) expr(expr syntax.Expr) ir.Node { var len ir.Node if expr.Len != nil { len = p.expr(expr.Len) - } else { - len = p.nod(expr, ir.ODDD, nil, nil) } - return p.nod(expr, ir.OTARRAY, len, p.typeExpr(expr.Elem)) + return ir.NewArrayType(p.pos(expr), len, p.typeExpr(expr.Elem)) case *syntax.SliceType: - return p.nod(expr, ir.OTARRAY, nil, p.typeExpr(expr.Elem)) + return ir.NewSliceType(p.pos(expr), p.typeExpr(expr.Elem)) case *syntax.DotsType: - return p.nod(expr, ir.ODDD, p.typeExpr(expr.Elem), nil) + t := ir.NewSliceType(p.pos(expr), p.typeExpr(expr.Elem)) + t.DDD = true + return t case *syntax.StructType: return p.structType(expr) case *syntax.InterfaceType: @@ -742,11 +737,11 @@ func (p *noder) expr(expr syntax.Expr) ir.Node { case *syntax.FuncType: return p.signature(nil, expr) case *syntax.MapType: - return p.nod(expr, ir.OTMAP, p.typeExpr(expr.Key), p.typeExpr(expr.Value)) + return ir.NewMapType(p.pos(expr), + p.typeExpr(expr.Key), p.typeExpr(expr.Value)) case *syntax.ChanType: - n := p.nod(expr, ir.OTCHAN, p.typeExpr(expr.Elem), nil) - n.SetTChanDir(p.chanDir(expr.Dir)) - return n + return ir.NewChanType(p.pos(expr), + p.typeExpr(expr.Elem), p.chanDir(expr.Dir)) case *syntax.TypeSwitchGuard: n := p.nod(expr, ir.OTYPESW, nil, p.expr(expr.X)) @@ -837,14 +832,21 @@ func (p *noder) sum(x syntax.Expr) ir.Node { return n } -func (p *noder) typeExpr(typ syntax.Expr) ir.Node { +func (p *noder) typeExpr(typ syntax.Expr) ir.Ntype { // TODO(mdempsky): Be stricter? typecheck should handle errors anyway. - return p.expr(typ) + n := p.expr(typ) + if n == nil { + return nil + } + if _, ok := n.(ir.Ntype); !ok { + ir.Dump("NOT NTYPE", n) + } + return n.(ir.Ntype) } -func (p *noder) typeExprOrNil(typ syntax.Expr) ir.Node { +func (p *noder) typeExprOrNil(typ syntax.Expr) ir.Ntype { if typ != nil { - return p.expr(typ) + return p.typeExpr(typ) } return nil } @@ -862,47 +864,43 @@ func (p *noder) chanDir(dir syntax.ChanDir) types.ChanDir { } func (p *noder) structType(expr *syntax.StructType) ir.Node { - l := make([]ir.Node, 0, len(expr.FieldList)) + l := make([]*ir.Field, 0, len(expr.FieldList)) for i, field := range expr.FieldList { p.setlineno(field) - var n ir.Node + var n *ir.Field if field.Name == nil { n = p.embedded(field.Type) } else { - n = p.nodSym(field, ir.ODCLFIELD, p.typeExpr(field.Type), p.name(field.Name)) + n = ir.NewField(p.pos(field), p.name(field.Name), p.typeExpr(field.Type), nil) } if i < len(expr.TagList) && expr.TagList[i] != nil { - n.SetOpt(constant.StringVal(p.basicLit(expr.TagList[i]))) + n.Note = constant.StringVal(p.basicLit(expr.TagList[i])) } l = append(l, n) } p.setlineno(expr) - n := p.nod(expr, ir.OTSTRUCT, nil, nil) - n.PtrList().Set(l) - return n + return ir.NewStructType(p.pos(expr), l) } func (p *noder) interfaceType(expr *syntax.InterfaceType) ir.Node { - l := make([]ir.Node, 0, len(expr.MethodList)) + l := make([]*ir.Field, 0, len(expr.MethodList)) for _, method := range expr.MethodList { p.setlineno(method) - var n ir.Node + var n *ir.Field if method.Name == nil { - n = p.nodSym(method, ir.ODCLFIELD, importName(p.packname(method.Type)), nil) + n = ir.NewField(p.pos(method), nil, importName(p.packname(method.Type)).(ir.Ntype), nil) } else { mname := p.name(method.Name) - sig := p.typeExpr(method.Type) - sig.SetLeft(fakeRecv()) - n = p.nodSym(method, ir.ODCLFIELD, sig, mname) + sig := p.typeExpr(method.Type).(*ir.FuncType) + sig.Recv = fakeRecv() + n = ir.NewField(p.pos(method), mname, sig, nil) ifacedcl(n) } l = append(l, n) } - n := p.nod(expr, ir.OTINTER, nil, nil) - n.PtrList().Set(l) - return n + return ir.NewInterfaceType(p.pos(expr), l) } func (p *noder) packname(expr syntax.Expr) *types.Sym { @@ -934,7 +932,7 @@ func (p *noder) packname(expr syntax.Expr) *types.Sym { panic(fmt.Sprintf("unexpected packname: %#v", expr)) } -func (p *noder) embedded(typ syntax.Expr) ir.Node { +func (p *noder) embedded(typ syntax.Expr) *ir.Field { op, isStar := typ.(*syntax.Operation) if isStar { if op.Op != syntax.Mul || op.Y != nil { @@ -944,11 +942,11 @@ func (p *noder) embedded(typ syntax.Expr) ir.Node { } sym := p.packname(typ) - n := p.nodSym(typ, ir.ODCLFIELD, importName(sym), lookup(sym.Name)) - n.SetEmbedded(true) + n := ir.NewField(p.pos(typ), lookup(sym.Name), importName(sym).(ir.Ntype), nil) + n.Embedded = true if isStar { - n.SetLeft(p.nod(op, ir.ODEREF, n.Left(), nil)) + n.Ntype = ir.NewStarExpr(p.pos(op), n.Ntype) } return n } diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index dc9efc07fef0a..73d369f413b42 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -347,7 +347,7 @@ func methodfunc(f *types.Type, receiver *types.Type) *types.Type { if receiver != nil { inLen++ } - in := make([]ir.Node, 0, inLen) + in := make([]*ir.Field, 0, inLen) if receiver != nil { d := anonfield(receiver) @@ -356,12 +356,12 @@ func methodfunc(f *types.Type, receiver *types.Type) *types.Type { for _, t := range f.Params().Fields().Slice() { d := anonfield(t.Type) - d.SetIsDDD(t.IsDDD()) + d.IsDDD = t.IsDDD() in = append(in, d) } outLen := f.Results().Fields().Len() - out := make([]ir.Node, 0, outLen) + out := make([]*ir.Field, 0, outLen) for _, t := range f.Results().Fields().Slice() { d := anonfield(t.Type) out = append(out, d) @@ -1626,7 +1626,7 @@ func dumpbasictypes() { // The latter is the type of an auto-generated wrapper. dtypesym(types.NewPtr(types.Errortype)) - dtypesym(functype(nil, []ir.Node{anonfield(types.Errortype)}, []ir.Node{anonfield(types.Types[types.TSTRING])})) + dtypesym(functype(nil, []*ir.Field{anonfield(types.Errortype)}, []*ir.Field{anonfield(types.Types[types.TSTRING])})) // add paths for runtime and main, which 6l imports implicitly. dimportpath(Runtimepkg) diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go index 116b6f5b6e657..9668df082a753 100644 --- a/src/cmd/compile/internal/gc/select.go +++ b/src/cmd/compile/internal/gc/select.go @@ -381,7 +381,7 @@ var scase *types.Type // Keep in sync with src/runtime/select.go. func scasetype() *types.Type { if scase == nil { - scase = tostruct([]ir.Node{ + scase = tostruct([]*ir.Field{ namedfield("c", types.Types[types.TUNSAFEPTR]), namedfield("elem", types.Types[types.TUNSAFEPTR]), }) diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 25490246e6d0b..b1c9d24d991f2 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -1062,9 +1062,9 @@ func expandmeth(t *types.Type) { t.AllMethods().Set(ms) } -// Given funarg struct list, return list of ODCLFIELD Node fn args. -func structargs(tl *types.Type, mustname bool) []ir.Node { - var args []ir.Node +// Given funarg struct list, return list of fn args. +func structargs(tl *types.Type, mustname bool) []*ir.Field { + var args []*ir.Field gen := 0 for _, t := range tl.Fields().Slice() { s := t.Sym @@ -1074,8 +1074,8 @@ func structargs(tl *types.Type, mustname bool) []ir.Node { gen++ } a := symfield(s, t.Type) - a.SetPos(t.Pos) - a.SetIsDDD(t.IsDDD()) + a.Pos = t.Pos + a.IsDDD = t.IsDDD() args = append(args, a) } @@ -1123,10 +1123,10 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { base.Pos = autogeneratedPos dclcontext = ir.PEXTERN - tfn := ir.Nod(ir.OTFUNC, nil, nil) - tfn.SetLeft(namedfield(".this", rcvr)) - tfn.PtrList().Set(structargs(method.Type.Params(), true)) - tfn.PtrRlist().Set(structargs(method.Type.Results(), false)) + tfn := ir.NewFuncType(base.Pos, + namedfield(".this", rcvr), + structargs(method.Type.Params(), true), + structargs(method.Type.Results(), false)) fn := dclfunc(newnam, tfn) fn.SetDupok(true) @@ -1215,11 +1215,11 @@ func hashmem(t *types.Type) ir.Node { n := NewName(sym) setNodeNameFunc(n) - n.SetType(functype(nil, []ir.Node{ + n.SetType(functype(nil, []*ir.Field{ anonfield(types.NewPtr(t)), anonfield(types.Types[types.TUINTPTR]), anonfield(types.Types[types.TUINTPTR]), - }, []ir.Node{ + }, []*ir.Field{ anonfield(types.Types[types.TUINTPTR]), })) return n diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index a1b18097906db..19146e2a9e795 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -206,6 +206,10 @@ func typecheckFunc(fn *ir.Func) { } } +func typecheckNtype(n ir.Ntype) ir.Ntype { + return typecheck(n, ctxType).(ir.Ntype) +} + // typecheck type checks node n. // The result of typecheck MUST be assigned back to n, e.g. // n.Left = typecheck(n.Left, top) @@ -403,9 +407,6 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetType(nil) return n - case ir.ODDD: - break - // types (ODEREF is with exprs) case ir.OTYPE: ok |= ctxType @@ -414,70 +415,69 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } - case ir.OTARRAY: + case ir.OTSLICE: ok |= ctxType - r := typecheck(n.Right(), ctxType) - if r.Type() == nil { - n.SetType(nil) + n := n.(*ir.SliceType) + n.Elem = typecheck(n.Elem, ctxType) + if n.Elem.Type() == nil { return n } + t := types.NewSlice(n.Elem.Type()) + n.SetOTYPE(t) + checkwidth(t) - var t *types.Type - if n.Left() == nil { - t = types.NewSlice(r.Type()) - } else if n.Left().Op() == ir.ODDD { + case ir.OTARRAY: + ok |= ctxType + n := n.(*ir.ArrayType) + n.Elem = typecheck(n.Elem, ctxType) + if n.Elem.Type() == nil { + return n + } + if n.Len == nil { // [...]T if !n.Diag() { n.SetDiag(true) base.Errorf("use of [...] array outside of array literal") } - n.SetType(nil) return n - } else { - n.SetLeft(indexlit(typecheck(n.Left(), ctxExpr))) - l := n.Left() - if ir.ConstType(l) != constant.Int { - switch { - case l.Type() == nil: - // Error already reported elsewhere. - case l.Type().IsInteger() && l.Op() != ir.OLITERAL: - base.Errorf("non-constant array bound %v", l) - default: - base.Errorf("invalid array bound %v", l) - } - n.SetType(nil) - return n - } - - v := l.Val() - if doesoverflow(v, types.Types[types.TINT]) { - base.Errorf("array bound is too large") - n.SetType(nil) - return n + } + n.Len = indexlit(typecheck(n.Len, ctxExpr)) + size := n.Len + if ir.ConstType(size) != constant.Int { + switch { + case size.Type() == nil: + // Error already reported elsewhere. + case size.Type().IsInteger() && size.Op() != ir.OLITERAL: + base.Errorf("non-constant array bound %v", size) + default: + base.Errorf("invalid array bound %v", size) } + return n + } - if constant.Sign(v) < 0 { - base.Errorf("array bound must be non-negative") - n.SetType(nil) - return n - } + v := size.Val() + if doesoverflow(v, types.Types[types.TINT]) { + base.Errorf("array bound is too large") + return n + } - bound, _ := constant.Int64Val(v) - t = types.NewArray(r.Type(), bound) + if constant.Sign(v) < 0 { + base.Errorf("array bound must be non-negative") + return n } - setTypeNode(n, t) - n.SetLeft(nil) - n.SetRight(nil) + bound, _ := constant.Int64Val(v) + t := types.NewArray(n.Elem.Type(), bound) + n.SetOTYPE(t) checkwidth(t) case ir.OTMAP: ok |= ctxType - n.SetLeft(typecheck(n.Left(), ctxType)) - n.SetRight(typecheck(n.Right(), ctxType)) - l := n.Left() - r := n.Right() + n := n.(*ir.MapType) + n.Key = typecheck(n.Key, ctxType) + n.Elem = typecheck(n.Elem, ctxType) + l := n.Key + r := n.Elem if l.Type() == nil || r.Type() == nil { - n.SetType(nil) return n } if l.Type().NotInHeap() { @@ -486,48 +486,42 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { if r.Type().NotInHeap() { base.Errorf("incomplete (or unallocatable) map value not allowed") } - - setTypeNode(n, types.NewMap(l.Type(), r.Type())) + n.SetOTYPE(types.NewMap(l.Type(), r.Type())) mapqueue = append(mapqueue, n) // check map keys when all types are settled - n.SetLeft(nil) - n.SetRight(nil) case ir.OTCHAN: ok |= ctxType - n.SetLeft(typecheck(n.Left(), ctxType)) - l := n.Left() + n := n.(*ir.ChanType) + n.Elem = typecheck(n.Elem, ctxType) + l := n.Elem if l.Type() == nil { - n.SetType(nil) return n } if l.Type().NotInHeap() { base.Errorf("chan of incomplete (or unallocatable) type not allowed") } - - setTypeNode(n, types.NewChan(l.Type(), n.TChanDir())) - n.SetLeft(nil) - n.ResetAux() + n.SetOTYPE(types.NewChan(l.Type(), n.Dir)) case ir.OTSTRUCT: ok |= ctxType - setTypeNode(n, tostruct(n.List().Slice())) - n.PtrList().Set(nil) + n := n.(*ir.StructType) + n.SetOTYPE(tostruct(n.Fields)) case ir.OTINTER: ok |= ctxType - setTypeNode(n, tointerface(n.List().Slice())) + n := n.(*ir.InterfaceType) + n.SetOTYPE(tointerface(n.Methods)) case ir.OTFUNC: ok |= ctxType - setTypeNode(n, functype(n.Left(), n.List().Slice(), n.Rlist().Slice())) - n.SetLeft(nil) - n.PtrList().Set(nil) - n.PtrRlist().Set(nil) + n := n.(*ir.FuncType) + n.SetOTYPE(functype(n.Recv, n.Params, n.Results)) // type or expr case ir.ODEREF: - n.SetLeft(typecheck(n.Left(), ctxExpr|ctxType)) - l := n.Left() + n := n.(*ir.StarExpr) + n.X = typecheck(n.X, ctxExpr|ctxType) + l := n.X t := l.Type() if t == nil { n.SetType(nil) @@ -535,8 +529,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } if l.Op() == ir.OTYPE { ok |= ctxType - setTypeNode(n, types.NewPtr(l.Type())) - n.SetLeft(nil) + n.SetOTYPE(types.NewPtr(l.Type())) // Ensure l.Type gets dowidth'd for the backend. Issue 20174. checkwidth(l.Type()) break @@ -2822,16 +2815,14 @@ func typecheckcomplit(n ir.Node) (res ir.Node) { setlineno(n.Right()) // Need to handle [...]T arrays specially. - if n.Right().Op() == ir.OTARRAY && n.Right().Left() != nil && n.Right().Left().Op() == ir.ODDD { - n.Right().SetRight(typecheck(n.Right().Right(), ctxType)) - if n.Right().Right().Type() == nil { + if array, ok := n.Right().(*ir.ArrayType); ok && array.Elem != nil && array.Len == nil { + array.Elem = typecheck(array.Elem, ctxType) + elemType := array.Elem.Type() + if elemType == nil { n.SetType(nil) return n } - elemType := n.Right().Right().Type() - length := typecheckarraylit(elemType, -1, n.List().Slice(), "array literal") - n.SetOp(ir.OARRAYLIT) n.SetType(types.NewArray(elemType, length)) n.SetRight(nil) @@ -3464,7 +3455,7 @@ func stringtoruneslit(n ir.Node) ir.Node { return nn } -var mapqueue []ir.Node +var mapqueue []*ir.MapType func checkMapKeys() { for _, n := range mapqueue { @@ -3531,7 +3522,7 @@ func typecheckdeftype(n ir.Node) { } n.SetTypecheck(1) - n.Name().Ntype = typecheck(n.Name().Ntype, ctxType) + n.Name().Ntype = typecheckNtype(n.Name().Ntype) t := n.Name().Ntype.Type() if t == nil { n.SetDiag(true) @@ -3593,7 +3584,7 @@ func typecheckdef(n ir.Node) { case ir.OLITERAL: if n.Name().Ntype != nil { - n.Name().Ntype = typecheck(n.Name().Ntype, ctxType) + n.Name().Ntype = typecheckNtype(n.Name().Ntype) n.SetType(n.Name().Ntype.Type()) n.Name().Ntype = nil if n.Type() == nil { @@ -3647,7 +3638,7 @@ func typecheckdef(n ir.Node) { case ir.ONAME: if n.Name().Ntype != nil { - n.Name().Ntype = typecheck(n.Name().Ntype, ctxType) + n.Name().Ntype = typecheckNtype(n.Name().Ntype) n.SetType(n.Name().Ntype.Type()) if n.Type() == nil { n.SetDiag(true) @@ -3686,9 +3677,9 @@ func typecheckdef(n ir.Node) { if n.Alias() { // Type alias declaration: Simply use the rhs type - no need // to create a new type. - // If we have a syntax error, p.Ntype may be nil. + // If we have a syntax error, name.Ntype may be nil. if n.Ntype != nil { - n.Ntype = typecheck(n.Ntype, ctxType) + n.Ntype = typecheckNtype(n.Ntype) n.SetType(n.Ntype.Type()) if n.Type() == nil { n.SetDiag(true) @@ -3706,8 +3697,10 @@ func typecheckdef(n ir.Node) { // regular type declaration defercheckwidth() n.SetWalkdef(1) - setTypeNode(n, types.New(types.TFORW)) - n.Type().Sym = n.Sym() + t := types.New(types.TFORW) + t.Nod = n + t.Sym = n.Sym() + n.SetType(t) errorsBefore := base.Errors() typecheckdeftype(n) if n.Type().Etype == types.TFORW && base.Errors() > errorsBefore { @@ -3990,11 +3983,12 @@ func deadcodeexpr(n ir.Node) ir.Node { return n } -// setTypeNode sets n to an OTYPE node representing t. -func setTypeNode(n ir.Node, t *types.Type) { - n.SetOp(ir.OTYPE) +func toTypeNode(orig ir.Node, t *types.Type) ir.Node { + n := ir.Nod(ir.OTYPE, nil, nil) + n.SetPos(orig.Pos()) n.SetType(t) - n.Type().Nod = n + t.Nod = n + return n } // getIotaValue returns the current value for "iota", diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index 931135759ac84..d43545391cfc7 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -176,7 +176,10 @@ func typeinit() { t := types.New(types.TUNSAFEPTR) types.Types[types.TUNSAFEPTR] = t t.Sym = unsafepkg.Lookup("Pointer") - t.Sym.Def = ir.TypeNode(t) + n := ir.NewNameAt(src.NoXPos, t.Sym) // NewNameAt to get a package for use tracking + n.SetOp(ir.OTYPE) + n.SetType(t) + t.Sym.Def = n dowidth(types.Types[types.TUNSAFEPTR]) for et := types.TINT8; et <= types.TUINT64; et++ { diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 2376bfc093503..e7c88bd329610 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -853,7 +853,7 @@ opswitch: } value = ir.Nod(ir.OINDEX, staticuint64s, index) value.SetBounded(true) - case n.Left().Class() == ir.PEXTERN && n.Left().Name() != nil && n.Left().Name().Readonly(): + case n.Left().Name() != nil && n.Left().Class() == ir.PEXTERN && n.Left().Name().Readonly(): // n.Left is a readonly global; use it directly. value = n.Left() case !fromType.IsInterface() && n.Esc() == EscNone && fromType.Width <= 1024: @@ -3183,10 +3183,10 @@ func eqfor(t *types.Type) (n ir.Node, needsize bool) { sym := typesymprefix(".eq", t) n := NewName(sym) setNodeNameFunc(n) - n.SetType(functype(nil, []ir.Node{ + n.SetType(functype(nil, []*ir.Field{ anonfield(types.NewPtr(t)), anonfield(types.NewPtr(t)), - }, []ir.Node{ + }, []*ir.Field{ anonfield(types.Types[types.TBOOL]), })) return n, false @@ -3914,7 +3914,7 @@ func wrapCall(n ir.Node, init *ir.Nodes) ir.Node { // origArgs keeps track of what argument is uintptr-unsafe/unsafe-uintptr conversion. origArgs := make([]ir.Node, n.List().Len()) - t := ir.Nod(ir.OTFUNC, nil, nil) + var funcArgs []*ir.Field for i, arg := range n.List().Slice() { s := lookupN("a", i) if !isBuiltinCall && arg.Op() == ir.OCONVNOP && arg.Type().IsUintptr() && arg.Left().Type().IsUnsafePtr() { @@ -3922,8 +3922,9 @@ func wrapCall(n ir.Node, init *ir.Nodes) ir.Node { arg = arg.Left() n.List().SetIndex(i, arg) } - t.PtrList().Append(symfield(s, arg.Type())) + funcArgs = append(funcArgs, symfield(s, arg.Type())) } + t := ir.NewFuncType(base.Pos, nil, funcArgs, nil) wrapCall_prgen++ sym := lookupN("wrap·", wrapCall_prgen) diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 2c1391859972e..f8e5f7641ca15 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -110,3 +110,47 @@ func (n *CallPartExpr) Left() Node { return n.X } func (n *CallPartExpr) Right() Node { return n.Method } func (n *CallPartExpr) SetLeft(x Node) { n.X = x } func (n *CallPartExpr) SetRight(x Node) { n.Method = x.(*Name) } + +// A StarExpr is a dereference expression *X. +// It may end up being a value or a type. +type StarExpr struct { + miniExpr + X Node +} + +func NewStarExpr(pos src.XPos, x Node) *StarExpr { + n := &StarExpr{X: x} + n.op = ODEREF + n.pos = pos + return n +} + +func (n *StarExpr) String() string { return fmt.Sprint(n) } +func (n *StarExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *StarExpr) RawCopy() Node { c := *n; return &c } +func (n *StarExpr) Left() Node { return n.X } +func (n *StarExpr) SetLeft(x Node) { n.X = x } + +func (*StarExpr) CanBeNtype() {} + +// SetOTYPE changes n to be an OTYPE node returning t, +// like all the type nodes in type.go. +func (n *StarExpr) SetOTYPE(t *types.Type) { + n.op = OTYPE + n.X = nil + n.typ = t + if t.Nod == nil { + t.Nod = n + } +} + +func (n *StarExpr) DeepCopy(pos src.XPos) Node { + if n.op == OTYPE { + // Can't change types and no node references left. + return n + } + c := SepCopy(n).(*StarExpr) + c.pos = n.posOr(pos) + c.X = DeepCopy(pos, n.X) + return c +} diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index a3999b6da0380..c723bad4c9332 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -403,10 +403,6 @@ func jconvFmt(n Node, s fmt.State, flag FmtFlag) { fmt.Fprintf(s, " implicit(%v)", n.Implicit()) } - if n.Embedded() { - fmt.Fprintf(s, " embedded") - } - if n.Op() == ONAME { if n.Name().Addrtaken() { fmt.Fprint(s, " addrtaken") @@ -921,13 +917,6 @@ func stmtFmt(n Node, s fmt.State, mode FmtMode) { case ODCL: mode.Fprintf(s, "var %v %v", n.Left().Sym(), n.Left().Type()) - case ODCLFIELD: - if n.Sym() != nil { - mode.Fprintf(s, "%v %v", n.Sym(), n.Left()) - } else { - mode.Fprintf(s, "%v", n.Left()) - } - // Don't export "v = " initializing statements, hope they're always // preceded by the DCL which will be re-parsed and typechecked to reproduce // the "v = " again. @@ -1115,6 +1104,7 @@ var OpPrec = []int{ OSTR2RUNES: 8, OSTRUCTLIT: 8, OTARRAY: 8, + OTSLICE: 8, OTCHAN: 8, OTFUNC: 8, OTINTER: 8, @@ -1176,7 +1166,6 @@ var OpPrec = []int{ OCASE: -1, OCONTINUE: -1, ODCL: -1, - ODCLFIELD: -1, ODEFER: -1, OEMPTY: -1, OFALL: -1, @@ -1294,29 +1283,40 @@ func exprFmt(n Node, s fmt.State, prec int, mode FmtMode) { } mode.Fprintf(s, "%v", n.Type()) + case OTSLICE: + n := n.(*SliceType) + if n.DDD { + mode.Fprintf(s, "...%v", n.Elem) + } else { + mode.Fprintf(s, "[]%v", n.Elem) // happens before typecheck + } + case OTARRAY: - if n.Left() != nil { - mode.Fprintf(s, "[%v]%v", n.Left(), n.Right()) - return + n := n.(*ArrayType) + if n.Len == nil { + mode.Fprintf(s, "[...]%v", n.Elem) + } else { + mode.Fprintf(s, "[%v]%v", n.Len, n.Elem) } - mode.Fprintf(s, "[]%v", n.Right()) // happens before typecheck case OTMAP: - mode.Fprintf(s, "map[%v]%v", n.Left(), n.Right()) + n := n.(*MapType) + mode.Fprintf(s, "map[%v]%v", n.Key, n.Elem) case OTCHAN: - switch n.TChanDir() { + n := n.(*ChanType) + switch n.Dir { case types.Crecv: - mode.Fprintf(s, "<-chan %v", n.Left()) + mode.Fprintf(s, "<-chan %v", n.Elem) case types.Csend: - mode.Fprintf(s, "chan<- %v", n.Left()) + mode.Fprintf(s, "chan<- %v", n.Elem) default: - if n.Left() != nil && n.Left().Op() == OTCHAN && n.Left().Sym() == nil && n.Left().TChanDir() == types.Crecv { - mode.Fprintf(s, "chan (%v)", n.Left()) + if n.Elem != nil && n.Elem.Op() == OTCHAN && n.Elem.(*ChanType).Dir == types.Crecv { + mode.Fprintf(s, "chan (%v)", n.Elem) } else { - mode.Fprintf(s, "chan %v", n.Left()) + mode.Fprintf(s, "chan %v", n.Elem) } } @@ -1556,8 +1556,6 @@ func exprFmt(n Node, s fmt.State, prec int, mode FmtMode) { } exprFmt(n1, s, nprec, mode) } - case ODDD: - mode.Fprintf(s, "...") default: mode.Fprintf(s, "", n.Op()) } diff --git a/src/cmd/compile/internal/ir/mini.go b/src/cmd/compile/internal/ir/mini.go index 338ded3308f32..d73ec4ecd5ec0 100644 --- a/src/cmd/compile/internal/ir/mini.go +++ b/src/cmd/compile/internal/ir/mini.go @@ -33,6 +33,15 @@ type miniNode struct { esc uint16 } +// posOr returns pos if known, or else n.pos. +// For use in DeepCopy. +func (n *miniNode) posOr(pos src.XPos) src.XPos { + if pos.IsKnown() { + return pos + } + return n.pos +} + // op can be read, but not written. // An embedding implementation can provide a SetOp if desired. // (The panicking SetOp is with the other panics below.) diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 5546488fa7f16..1bc6bea3b670b 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -43,7 +43,7 @@ type Name struct { Vargen int32 Decldepth int32 // declaration loop depth, increased for every loop or label - Ntype Node + Ntype Ntype Heapaddr *Name // temp holding heap address of param // ONAME PAUTOHEAP @@ -160,6 +160,8 @@ func (n *Name) SetOffset(x int64) { n.offset = x } func (n *Name) Iota() int64 { return n.offset } func (n *Name) SetIota(x int64) { n.offset = x } +func (*Name) CanBeNtype() {} + func (n *Name) SetOp(op Op) { switch op { default: @@ -371,6 +373,8 @@ func (p *PkgName) Format(s fmt.State, verb rune) { FmtNode(p, s, verb) } func (p *PkgName) RawCopy() Node { c := *p; return &c } func (p *PkgName) Sym() *types.Sym { return p.sym } +func (*PkgName) CanBeNtype() {} + func NewPkgName(pos src.XPos, sym *types.Sym, pkg *types.Pkg) *PkgName { p := &PkgName{sym: sym, Pkg: pkg} p.op = OPACK diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 653410d175be9..74557236ccb1d 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -79,12 +79,8 @@ type Node interface { SetImplicit(x bool) IsDDD() bool SetIsDDD(x bool) - Embedded() bool - SetEmbedded(x bool) IndexMapLValue() bool SetIndexMapLValue(x bool) - TChanDir() types.ChanDir - SetTChanDir(x types.ChanDir) ResetAux() HasBreak() bool SetHasBreak(x bool) @@ -205,6 +201,10 @@ func (n *node) Uint64Val() uint64 { panic("node.Uint64Val") } func (n *node) BoolVal() bool { panic("node.BoolVal") } func (n *node) StringVal() string { panic("node.StringVal") } +// node can be Ntype only because of OXDOT of undefined name. +// When that moves into its own syntax, can drop this. +func (n *node) CanBeNtype() {} + func (n *node) SetOp(op Op) { if !okForNod[op] { panic("cannot node.SetOp " + op.String()) @@ -252,20 +252,6 @@ func (n *node) SetIndexMapLValue(b bool) { } } -func (n *node) TChanDir() types.ChanDir { - if n.Op() != OTCHAN { - base.Fatalf("unexpected op: %v", n.Op()) - } - return types.ChanDir(n.aux) -} - -func (n *node) SetTChanDir(dir types.ChanDir) { - if n.Op() != OTCHAN { - base.Fatalf("unexpected op: %v", n.Op()) - } - n.aux = uint8(dir) -} - func IsSynthetic(n Node) bool { name := n.Sym().Name return name[0] == '.' || name[0] == '~' @@ -301,7 +287,6 @@ const ( _, nodeBounded // bounds check unnecessary _, nodeHasCall // expression contains a function call _, nodeLikely // if statement condition likely - _, nodeEmbedded // ODCLFIELD embedded type ) func (n *node) Class() Class { return Class(n.flags.get3(nodeClass)) } @@ -320,7 +305,6 @@ func (n *node) Transient() bool { return n.flags&nodeTransient != 0 } func (n *node) Bounded() bool { return n.flags&nodeBounded != 0 } func (n *node) HasCall() bool { return n.flags&nodeHasCall != 0 } func (n *node) Likely() bool { return n.flags&nodeLikely != 0 } -func (n *node) Embedded() bool { return n.flags&nodeEmbedded != 0 } func (n *node) SetClass(b Class) { n.flags.set3(nodeClass, uint8(b)) } func (n *node) SetWalkdef(b uint8) { n.flags.set2(nodeWalkdef, b) } @@ -336,7 +320,6 @@ func (n *node) SetColas(b bool) { n.flags.set(nodeColas, b) } func (n *node) SetTransient(b bool) { n.flags.set(nodeTransient, b) } func (n *node) SetHasCall(b bool) { n.flags.set(nodeHasCall, b) } func (n *node) SetLikely(b bool) { n.flags.set(nodeLikely, b) } -func (n *node) SetEmbedded(b bool) { n.flags.set(nodeEmbedded, b) } // MarkNonNil marks a pointer n as being guaranteed non-nil, // on all code paths, at all times. @@ -474,7 +457,7 @@ const ( // Used during parsing but don't last. ODCLFUNC // func f() or func (r) f() - ODCLFIELD // struct field, interface field, or func/method argument/return value. + ODCLFIELD // UNUSED: TODO(rsc): Delete. ODCLCONST // const pi = 3.14 ODCLTYPE // type Int int or type Int = int @@ -593,11 +576,11 @@ const ( // OTFUNC: func() - Left is receiver field, List is list of param fields, Rlist is // list of result fields. OTFUNC - OTARRAY // []int, [8]int, [N]int or [...]int - OTSLICE // to be used in future CL + OTARRAY // [8]int or [...]int + OTSLICE // []int // misc - ODDD // func f(args ...int) or f(l...) or var a = [...]int{0, 1, 2}. + ODDD // UNUSED; TODO(rsc): Delete. OINLCALL // intermediary representation of an inlined call. OEFACE // itable and data words of an empty-interface value. OITAB // itable word of an interface value. @@ -1050,6 +1033,8 @@ func NodAt(pos src.XPos, op Op, nleft, nright Node) Node { switch op { case ODCLFUNC: return NewFunc(pos) + case ODEREF: + return NewStarExpr(pos, nleft) case OPACK: return NewPkgName(pos, nil, nil) case OEMPTY: @@ -1112,12 +1097,9 @@ var okForNod = [OEND]bool{ OCOPY: true, ODCL: true, ODCLCONST: true, - ODCLFIELD: true, ODCLTYPE: true, - ODDD: true, ODEFER: true, ODELETE: true, - ODEREF: true, ODIV: true, ODOT: true, ODOTINTER: true, @@ -1201,13 +1183,6 @@ var okForNod = [OEND]bool{ OSTRUCTLIT: true, OSUB: true, OSWITCH: true, - OTARRAY: true, - OTCHAN: true, - OTFUNC: true, - OTINTER: true, - OTMAP: true, - OTSTRUCT: true, - OTYPE: true, // TODO: Remove once setTypeNode is gone. OTYPESW: true, OVARDEF: true, OVARKILL: true, diff --git a/src/cmd/compile/internal/ir/type.go b/src/cmd/compile/internal/ir/type.go index 3409424fed2e8..39411ed431976 100644 --- a/src/cmd/compile/internal/ir/type.go +++ b/src/cmd/compile/internal/ir/type.go @@ -7,21 +7,375 @@ package ir import ( "cmd/compile/internal/types" "cmd/internal/src" + "fmt" ) -func TypeNode(t *types.Type) Node { - return TypeNodeAt(src.NoXPos, t) +// Nodes that represent the syntax of a type before type-checking. +// After type-checking, they serve only as shells around a *types.Type. +// Calling TypeNode converts a *types.Type to a Node shell. + +// An Ntype is a Node that syntactically looks like a type. +// It can be the raw syntax for a type before typechecking, +// or it can be an OTYPE with Type() set to a *types.Type. +// Note that syntax doesn't guarantee it's a type: an expression +// like *fmt is an Ntype (we don't know whether names are types yet), +// but at least 1+1 is not an Ntype. +type Ntype interface { + Node + CanBeNtype() +} + +// A miniType is a minimal type syntax Node implementation, +// to be embedded as the first field in a larger node implementation. +type miniType struct { + miniNode + typ *types.Type +} + +func (*miniType) CanBeNtype() {} + +func (n *miniType) Type() *types.Type { return n.typ } + +// setOTYPE changes n to be an OTYPE node returning t. +// Rewriting the node in place this way should not be strictly +// necessary (we should be able to update the uses with +// proper OTYPE nodes), but it's mostly harmless and easy +// to keep doing for now. +// +// setOTYPE also records t.Nod = self if t.Nod is not already set. +// (Some types are shared by multiple OTYPE nodes, so only +// the first such node is used as t.Nod.) +func (n *miniType) setOTYPE(t *types.Type, self Node) { + if n.typ != nil { + panic(n.op.String() + " SetType: type already set") + } + n.op = OTYPE + n.typ = t + + // t.Nod can be non-nil already + // in the case of shared *type.Types, like []byte or interface{}. + if t.Nod == nil { + t.Nod = self + } +} + +func (n *miniType) Sym() *types.Sym { return nil } // for Format OTYPE +func (n *miniType) Implicit() bool { return false } // for Format OTYPE + +// A ChanType represents a chan Elem syntax with the direction Dir. +type ChanType struct { + miniType + Elem Node + Dir types.ChanDir +} + +func NewChanType(pos src.XPos, elem Node, dir types.ChanDir) *ChanType { + n := &ChanType{Elem: elem, Dir: dir} + n.op = OTCHAN + n.pos = pos + return n } -func TypeNodeAt(pos src.XPos, t *types.Type) Node { - // if we copied another type with *t = *u - // then t->nod might be out of date, so - // check t->nod->type too - if AsNode(t.Nod) == nil || AsNode(t.Nod).Type() != t { - t.Nod = NodAt(pos, OTYPE, nil, nil) - AsNode(t.Nod).SetType(t) - AsNode(t.Nod).SetSym(t.Sym) +func (n *ChanType) String() string { return fmt.Sprint(n) } +func (n *ChanType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ChanType) RawCopy() Node { c := *n; return &c } +func (n *ChanType) SetOTYPE(t *types.Type) { + n.setOTYPE(t, n) + n.Elem = nil +} + +func (n *ChanType) DeepCopy(pos src.XPos) Node { + if n.op == OTYPE { + // Can't change types and no node references left. + return n } + return NewChanType(n.posOr(pos), DeepCopy(pos, n.Elem), n.Dir) +} + +// A MapType represents a map[Key]Value type syntax.u +type MapType struct { + miniType + Key Node + Elem Node +} + +func NewMapType(pos src.XPos, key, elem Node) *MapType { + n := &MapType{Key: key, Elem: elem} + n.op = OTMAP + n.pos = pos + return n +} + +func (n *MapType) String() string { return fmt.Sprint(n) } +func (n *MapType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *MapType) RawCopy() Node { c := *n; return &c } +func (n *MapType) SetOTYPE(t *types.Type) { + n.setOTYPE(t, n) + n.Key = nil + n.Elem = nil +} + +func (n *MapType) DeepCopy(pos src.XPos) Node { + if n.op == OTYPE { + // Can't change types and no node references left. + return n + } + return NewMapType(n.posOr(pos), DeepCopy(pos, n.Key), DeepCopy(pos, n.Elem)) +} + +// A StructType represents a struct { ... } type syntax. +type StructType struct { + miniType + Fields []*Field +} + +func NewStructType(pos src.XPos, fields []*Field) *StructType { + n := &StructType{Fields: fields} + n.op = OTSTRUCT + n.pos = pos + return n +} + +func (n *StructType) String() string { return fmt.Sprint(n) } +func (n *StructType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *StructType) RawCopy() Node { c := *n; return &c } +func (n *StructType) SetOTYPE(t *types.Type) { + n.setOTYPE(t, n) + n.Fields = nil +} + +func (n *StructType) DeepCopy(pos src.XPos) Node { + if n.op == OTYPE { + // Can't change types and no node references left. + return n + } + return NewStructType(n.posOr(pos), deepCopyFields(pos, n.Fields)) +} + +func deepCopyFields(pos src.XPos, fields []*Field) []*Field { + var out []*Field + for _, f := range fields { + out = append(out, f.deepCopy(pos)) + } + return out +} + +// An InterfaceType represents a struct { ... } type syntax. +type InterfaceType struct { + miniType + Methods []*Field +} + +func NewInterfaceType(pos src.XPos, methods []*Field) *InterfaceType { + n := &InterfaceType{Methods: methods} + n.op = OTINTER + n.pos = pos + return n +} + +func (n *InterfaceType) String() string { return fmt.Sprint(n) } +func (n *InterfaceType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *InterfaceType) RawCopy() Node { c := *n; return &c } +func (n *InterfaceType) SetOTYPE(t *types.Type) { + n.setOTYPE(t, n) + n.Methods = nil +} + +func (n *InterfaceType) DeepCopy(pos src.XPos) Node { + if n.op == OTYPE { + // Can't change types and no node references left. + return n + } + return NewInterfaceType(n.posOr(pos), deepCopyFields(pos, n.Methods)) +} + +// A FuncType represents a func(Args) Results type syntax. +type FuncType struct { + miniType + Recv *Field + Params []*Field + Results []*Field +} - return AsNode(t.Nod) +func NewFuncType(pos src.XPos, rcvr *Field, args, results []*Field) *FuncType { + n := &FuncType{Recv: rcvr, Params: args, Results: results} + n.op = OTFUNC + n.pos = pos + return n +} + +func (n *FuncType) String() string { return fmt.Sprint(n) } +func (n *FuncType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *FuncType) RawCopy() Node { c := *n; return &c } + +func (n *FuncType) SetOTYPE(t *types.Type) { + n.setOTYPE(t, n) + n.Recv = nil + n.Params = nil + n.Results = nil +} + +func (n *FuncType) DeepCopy(pos src.XPos) Node { + if n.op == OTYPE { + // Can't change types and no node references left. + return n + } + return NewFuncType(n.posOr(pos), + n.Recv.deepCopy(pos), + deepCopyFields(pos, n.Params), + deepCopyFields(pos, n.Results)) +} + +// A Field is a declared struct field, interface method, or function argument. +// It is not a Node. +type Field struct { + Pos src.XPos + Sym *types.Sym + Ntype Ntype + Type *types.Type + Embedded bool + IsDDD bool + Note string + Decl *Name +} + +func NewField(pos src.XPos, sym *types.Sym, ntyp Ntype, typ *types.Type) *Field { + return &Field{Pos: pos, Sym: sym, Ntype: ntyp, Type: typ} +} + +func (f *Field) String() string { + var typ string + if f.Type != nil { + typ = fmt.Sprint(f.Type) + } else { + typ = fmt.Sprint(f.Ntype) + } + if f.Sym != nil { + return fmt.Sprintf("%v %v", f.Sym, typ) + } + return typ +} + +func (f *Field) deepCopy(pos src.XPos) *Field { + if f == nil { + return nil + } + fpos := pos + if !pos.IsKnown() { + fpos = f.Pos + } + decl := f.Decl + if decl != nil { + decl = DeepCopy(pos, decl).(*Name) + } + ntype := f.Ntype + if ntype != nil { + ntype = DeepCopy(pos, ntype).(Ntype) + } + // No keyed literal here: if a new struct field is added, we want this to stop compiling. + return &Field{fpos, f.Sym, ntype, f.Type, f.Embedded, f.IsDDD, f.Note, decl} +} + +// A SliceType represents a []Elem type syntax. +// If DDD is true, it's the ...Elem at the end of a function list. +type SliceType struct { + miniType + Elem Node + DDD bool +} + +func NewSliceType(pos src.XPos, elem Node) *SliceType { + n := &SliceType{Elem: elem} + n.op = OTSLICE + n.pos = pos + return n +} + +func (n *SliceType) String() string { return fmt.Sprint(n) } +func (n *SliceType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *SliceType) RawCopy() Node { c := *n; return &c } +func (n *SliceType) SetOTYPE(t *types.Type) { + n.setOTYPE(t, n) + n.Elem = nil +} + +func (n *SliceType) DeepCopy(pos src.XPos) Node { + if n.op == OTYPE { + // Can't change types and no node references left. + return n + } + return NewSliceType(n.posOr(pos), DeepCopy(pos, n.Elem)) +} + +// An ArrayType represents a [Len]Elem type syntax. +// If Len is nil, the type is a [...]Elem in an array literal. +type ArrayType struct { + miniType + Len Node + Elem Node +} + +func NewArrayType(pos src.XPos, size Node, elem Node) *ArrayType { + n := &ArrayType{Len: size, Elem: elem} + n.op = OTARRAY + n.pos = pos + return n +} + +func (n *ArrayType) String() string { return fmt.Sprint(n) } +func (n *ArrayType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ArrayType) RawCopy() Node { c := *n; return &c } + +func (n *ArrayType) DeepCopy(pos src.XPos) Node { + if n.op == OTYPE { + // Can't change types and no node references left. + return n + } + return NewArrayType(n.posOr(pos), DeepCopy(pos, n.Len), DeepCopy(pos, n.Elem)) +} + +func (n *ArrayType) SetOTYPE(t *types.Type) { + n.setOTYPE(t, n) + n.Len = nil + n.Elem = nil +} + +// A typeNode is a Node wrapper for type t. +type typeNode struct { + miniNode + typ *types.Type +} + +func newTypeNode(pos src.XPos, typ *types.Type) *typeNode { + n := &typeNode{typ: typ} + n.pos = pos + n.op = OTYPE + return n +} + +func (n *typeNode) String() string { return fmt.Sprint(n) } +func (n *typeNode) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *typeNode) RawCopy() Node { c := *n; return &c } +func (n *typeNode) Type() *types.Type { return n.typ } +func (n *typeNode) Sym() *types.Sym { return n.typ.Sym } +func (n *typeNode) CanBeNtype() {} + +// TypeNode returns the Node representing the type t. +func TypeNode(t *types.Type) Ntype { + return TypeNodeAt(src.NoXPos, t) +} + +// TypeNodeAt returns the Node representing the type t. +// If the node must be created, TypeNodeAt uses the position pos. +// TODO(rsc): Does anyone actually use position on these type nodes? +func TypeNodeAt(pos src.XPos, t *types.Type) Ntype { + // If we copied another type with *t = *u, + // then t.Nod might be out of date, so check t.Nod.Type() too. + n := AsNode(t.Nod) + if n == nil || n.Type() != t { + n := newTypeNode(pos, t) // t.Sym may be nil + t.Nod = n + return n + } + return n.(Ntype) } From ae1a3378092e25c7a7aa0100c2e29397f7bc2798 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sun, 29 Nov 2020 08:58:39 -0500 Subject: [PATCH 066/474] [dev.regabi] cmd/compile: remove ODCLFIELD and ODDD ops These are plain data now, not nodes (see previous CL). The opcode deletions are not safe for toolstash -cmp, so they are split into a separate CL. Change-Id: Icef8a01e190195a7539a35b92f42835d823e314a Reviewed-on: https://go-review.googlesource.com/c/go/+/274104 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/node.go | 2 - src/cmd/compile/internal/ir/op_string.go | 218 +++++++++++------------ 2 files changed, 108 insertions(+), 112 deletions(-) diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 74557236ccb1d..2850704ae1803 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -457,7 +457,6 @@ const ( // Used during parsing but don't last. ODCLFUNC // func f() or func (r) f() - ODCLFIELD // UNUSED: TODO(rsc): Delete. ODCLCONST // const pi = 3.14 ODCLTYPE // type Int int or type Int = int @@ -580,7 +579,6 @@ const ( OTSLICE // []int // misc - ODDD // UNUSED; TODO(rsc): Delete. OINLCALL // intermediary representation of an inlined call. OEFACE // itable and data words of an empty-interface value. OITAB // itable word of an interface value. diff --git a/src/cmd/compile/internal/ir/op_string.go b/src/cmd/compile/internal/ir/op_string.go index faec164c7bfab..eefdc0ee590b8 100644 --- a/src/cmd/compile/internal/ir/op_string.go +++ b/src/cmd/compile/internal/ir/op_string.go @@ -56,119 +56,117 @@ func _() { _ = x[OCOPY-45] _ = x[ODCL-46] _ = x[ODCLFUNC-47] - _ = x[ODCLFIELD-48] - _ = x[ODCLCONST-49] - _ = x[ODCLTYPE-50] - _ = x[ODELETE-51] - _ = x[ODOT-52] - _ = x[ODOTPTR-53] - _ = x[ODOTMETH-54] - _ = x[ODOTINTER-55] - _ = x[OXDOT-56] - _ = x[ODOTTYPE-57] - _ = x[ODOTTYPE2-58] - _ = x[OEQ-59] - _ = x[ONE-60] - _ = x[OLT-61] - _ = x[OLE-62] - _ = x[OGE-63] - _ = x[OGT-64] - _ = x[ODEREF-65] - _ = x[OINDEX-66] - _ = x[OINDEXMAP-67] - _ = x[OKEY-68] - _ = x[OSTRUCTKEY-69] - _ = x[OLEN-70] - _ = x[OMAKE-71] - _ = x[OMAKECHAN-72] - _ = x[OMAKEMAP-73] - _ = x[OMAKESLICE-74] - _ = x[OMAKESLICECOPY-75] - _ = x[OMUL-76] - _ = x[ODIV-77] - _ = x[OMOD-78] - _ = x[OLSH-79] - _ = x[ORSH-80] - _ = x[OAND-81] - _ = x[OANDNOT-82] - _ = x[ONEW-83] - _ = x[ONEWOBJ-84] - _ = x[ONOT-85] - _ = x[OBITNOT-86] - _ = x[OPLUS-87] - _ = x[ONEG-88] - _ = x[OOROR-89] - _ = x[OPANIC-90] - _ = x[OPRINT-91] - _ = x[OPRINTN-92] - _ = x[OPAREN-93] - _ = x[OSEND-94] - _ = x[OSLICE-95] - _ = x[OSLICEARR-96] - _ = x[OSLICESTR-97] - _ = x[OSLICE3-98] - _ = x[OSLICE3ARR-99] - _ = x[OSLICEHEADER-100] - _ = x[ORECOVER-101] - _ = x[ORECV-102] - _ = x[ORUNESTR-103] - _ = x[OSELRECV-104] - _ = x[OSELRECV2-105] - _ = x[OIOTA-106] - _ = x[OREAL-107] - _ = x[OIMAG-108] - _ = x[OCOMPLEX-109] - _ = x[OALIGNOF-110] - _ = x[OOFFSETOF-111] - _ = x[OSIZEOF-112] - _ = x[OMETHEXPR-113] - _ = x[OBLOCK-114] - _ = x[OBREAK-115] - _ = x[OCASE-116] - _ = x[OCONTINUE-117] - _ = x[ODEFER-118] - _ = x[OEMPTY-119] - _ = x[OFALL-120] - _ = x[OFOR-121] - _ = x[OFORUNTIL-122] - _ = x[OGOTO-123] - _ = x[OIF-124] - _ = x[OLABEL-125] - _ = x[OGO-126] - _ = x[ORANGE-127] - _ = x[ORETURN-128] - _ = x[OSELECT-129] - _ = x[OSWITCH-130] - _ = x[OTYPESW-131] - _ = x[OTCHAN-132] - _ = x[OTMAP-133] - _ = x[OTSTRUCT-134] - _ = x[OTINTER-135] - _ = x[OTFUNC-136] - _ = x[OTARRAY-137] - _ = x[OTSLICE-138] - _ = x[ODDD-139] - _ = x[OINLCALL-140] - _ = x[OEFACE-141] - _ = x[OITAB-142] - _ = x[OIDATA-143] - _ = x[OSPTR-144] - _ = x[OCLOSUREREAD-145] - _ = x[OCFUNC-146] - _ = x[OCHECKNIL-147] - _ = x[OVARDEF-148] - _ = x[OVARKILL-149] - _ = x[OVARLIVE-150] - _ = x[ORESULT-151] - _ = x[OINLMARK-152] - _ = x[ORETJMP-153] - _ = x[OGETG-154] - _ = x[OEND-155] + _ = x[ODCLCONST-48] + _ = x[ODCLTYPE-49] + _ = x[ODELETE-50] + _ = x[ODOT-51] + _ = x[ODOTPTR-52] + _ = x[ODOTMETH-53] + _ = x[ODOTINTER-54] + _ = x[OXDOT-55] + _ = x[ODOTTYPE-56] + _ = x[ODOTTYPE2-57] + _ = x[OEQ-58] + _ = x[ONE-59] + _ = x[OLT-60] + _ = x[OLE-61] + _ = x[OGE-62] + _ = x[OGT-63] + _ = x[ODEREF-64] + _ = x[OINDEX-65] + _ = x[OINDEXMAP-66] + _ = x[OKEY-67] + _ = x[OSTRUCTKEY-68] + _ = x[OLEN-69] + _ = x[OMAKE-70] + _ = x[OMAKECHAN-71] + _ = x[OMAKEMAP-72] + _ = x[OMAKESLICE-73] + _ = x[OMAKESLICECOPY-74] + _ = x[OMUL-75] + _ = x[ODIV-76] + _ = x[OMOD-77] + _ = x[OLSH-78] + _ = x[ORSH-79] + _ = x[OAND-80] + _ = x[OANDNOT-81] + _ = x[ONEW-82] + _ = x[ONEWOBJ-83] + _ = x[ONOT-84] + _ = x[OBITNOT-85] + _ = x[OPLUS-86] + _ = x[ONEG-87] + _ = x[OOROR-88] + _ = x[OPANIC-89] + _ = x[OPRINT-90] + _ = x[OPRINTN-91] + _ = x[OPAREN-92] + _ = x[OSEND-93] + _ = x[OSLICE-94] + _ = x[OSLICEARR-95] + _ = x[OSLICESTR-96] + _ = x[OSLICE3-97] + _ = x[OSLICE3ARR-98] + _ = x[OSLICEHEADER-99] + _ = x[ORECOVER-100] + _ = x[ORECV-101] + _ = x[ORUNESTR-102] + _ = x[OSELRECV-103] + _ = x[OSELRECV2-104] + _ = x[OIOTA-105] + _ = x[OREAL-106] + _ = x[OIMAG-107] + _ = x[OCOMPLEX-108] + _ = x[OALIGNOF-109] + _ = x[OOFFSETOF-110] + _ = x[OSIZEOF-111] + _ = x[OMETHEXPR-112] + _ = x[OBLOCK-113] + _ = x[OBREAK-114] + _ = x[OCASE-115] + _ = x[OCONTINUE-116] + _ = x[ODEFER-117] + _ = x[OEMPTY-118] + _ = x[OFALL-119] + _ = x[OFOR-120] + _ = x[OFORUNTIL-121] + _ = x[OGOTO-122] + _ = x[OIF-123] + _ = x[OLABEL-124] + _ = x[OGO-125] + _ = x[ORANGE-126] + _ = x[ORETURN-127] + _ = x[OSELECT-128] + _ = x[OSWITCH-129] + _ = x[OTYPESW-130] + _ = x[OTCHAN-131] + _ = x[OTMAP-132] + _ = x[OTSTRUCT-133] + _ = x[OTINTER-134] + _ = x[OTFUNC-135] + _ = x[OTARRAY-136] + _ = x[OTSLICE-137] + _ = x[OINLCALL-138] + _ = x[OEFACE-139] + _ = x[OITAB-140] + _ = x[OIDATA-141] + _ = x[OSPTR-142] + _ = x[OCLOSUREREAD-143] + _ = x[OCFUNC-144] + _ = x[OCHECKNIL-145] + _ = x[OVARDEF-146] + _ = x[OVARKILL-147] + _ = x[OVARLIVE-148] + _ = x[ORESULT-149] + _ = x[OINLMARK-150] + _ = x[ORETJMP-151] + _ = x[OGETG-152] + _ = x[OEND-153] } -const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLFIELDDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRBLOCKBREAKCASECONTINUEDEFEREMPTYFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEDDDINLCALLEFACEITABIDATASPTRCLOSUREREADCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKRETJMPGETGEND" +const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRBLOCKBREAKCASECONTINUEDEFEREMPTYFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCLOSUREREADCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKRETJMPGETGEND" -var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 310, 317, 323, 326, 332, 339, 347, 351, 358, 366, 368, 370, 372, 374, 376, 378, 383, 388, 396, 399, 408, 411, 415, 423, 430, 439, 452, 455, 458, 461, 464, 467, 470, 476, 479, 485, 488, 494, 498, 501, 505, 510, 515, 521, 526, 530, 535, 543, 551, 557, 566, 577, 584, 588, 595, 602, 610, 614, 618, 622, 629, 636, 644, 650, 658, 663, 668, 672, 680, 685, 690, 694, 697, 705, 709, 711, 716, 718, 723, 729, 735, 741, 747, 752, 756, 763, 769, 774, 780, 786, 789, 796, 801, 805, 810, 814, 825, 830, 838, 844, 851, 858, 864, 871, 877, 881, 884} +var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 309, 315, 318, 324, 331, 339, 343, 350, 358, 360, 362, 364, 366, 368, 370, 375, 380, 388, 391, 400, 403, 407, 415, 422, 431, 444, 447, 450, 453, 456, 459, 462, 468, 471, 477, 480, 486, 490, 493, 497, 502, 507, 513, 518, 522, 527, 535, 543, 549, 558, 569, 576, 580, 587, 594, 602, 606, 610, 614, 621, 628, 636, 642, 650, 655, 660, 664, 672, 677, 682, 686, 689, 697, 701, 703, 708, 710, 715, 721, 727, 733, 739, 744, 748, 755, 761, 766, 772, 778, 785, 790, 794, 799, 803, 814, 819, 827, 833, 840, 847, 853, 860, 866, 870, 873} func (i Op) String() string { if i >= Op(len(_Op_index)-1) { From c6de5d8d1f56465869a9271753796da35c60f3e6 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 30 Nov 2020 13:50:05 -0800 Subject: [PATCH 067/474] [dev.regabi] cmd/compile: simplify export data representation of nil The handling of ONIL and Orig has been a mess for a while, and dates back to how fmt.go used to print out typed nils. That hasn't applied for a while, but we've kept dragging it along to appease toolstash with the intention of someday finally removing it. Today is that day. Change-Id: I9a441628e53068ab1993cd2b67b977574d8117b7 Reviewed-on: https://go-review.googlesource.com/c/go/+/274212 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky Reviewed-by: Russ Cox TryBot-Result: Go Bot --- src/cmd/compile/internal/gc/iexport.go | 6 +----- src/cmd/compile/internal/gc/iimport.go | 17 ++++++++--------- 2 files changed, 9 insertions(+), 14 deletions(-) diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index c9f5d0c85c401..f19acb8bc2523 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -1201,11 +1201,7 @@ func (w *exportWriter) expr(n ir.Node) { if !n.Type().HasNil() { base.Fatalf("unexpected type for nil: %v", n.Type()) } - if orig := ir.Orig(n); orig != nil && orig != n { - w.expr(orig) - break - } - w.op(ir.OLITERAL) + w.op(ir.ONIL) w.pos(n.Pos()) w.typ(n.Type()) diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index c219b70e0fc4b..57c5e621829c8 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -809,20 +809,19 @@ func (r *importReader) node() ir.Node { // case OPAREN: // unreachable - unpacked by exporter - // case ONIL: - // unreachable - mapped to OLITERAL + case ir.ONIL: + pos := r.pos() + typ := r.typ() + + n := npos(pos, nodnil()) + n.SetType(typ) + return n case ir.OLITERAL: pos := r.pos() typ := r.typ() - var n ir.Node - if typ.HasNil() { - n = nodnil() - } else { - n = ir.NewLiteral(r.value(typ)) - } - n = npos(pos, n) + n := npos(pos, ir.NewLiteral(r.value(typ))) n.SetType(typ) return n From 7c9b6b1ca249c14d358075da9678cd1c20041b21 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sat, 28 Nov 2020 15:28:18 -0500 Subject: [PATCH 068/474] [dev.regabi] cmd/compile: clean up in preparation for statement Nodes Using statement nodes restricts the set of valid SetOp operations, because you can't SetOp across representation. Rewrite various code to avoid crossing those as-yet-unintroduced boundaries. In particular, code like x, y := v.(T) x, y := f() x, y := m[k] x, y := <-c starts out with Op = OAS2, and then it turns into a specific Op OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV, and then later in walk is lowered to an OAS2 again. In the middle, the specific forms move the right-hand side from n.Rlist().First() to n.Right(), and then the conversion to OAS2 moves it back. This is unnecessary and makes it hard for these all to share an underlying Node implementation. This CL changes these specific forms to leave the right-hand side in n.Rlist().First(). Similarly, OSELRECV2 is really just a temporary form of OAS2. This CL changes it to use same fields too. Finally, this CL fixes the printing of OAS2 nodes in ir/fmt.go, which formerly printed n.Right() instead of n.Rlist(). This results in a (correct!) update to cmd/compile/internal/logopt's expected output: ~R0 = becomes ~R0 = &y.b. Passes buildall w/ toolstash -cmp. Change-Id: I164aa2e17dc55bfb292024de53d7d250192ad64a Reviewed-on: https://go-review.googlesource.com/c/go/+/274105 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/escape.go | 17 ++- src/cmd/compile/internal/gc/iexport.go | 8 +- src/cmd/compile/internal/gc/initorder.go | 2 +- src/cmd/compile/internal/gc/inl.go | 74 +++++----- src/cmd/compile/internal/gc/noder.go | 19 ++- src/cmd/compile/internal/gc/order.go | 136 +++++++++--------- src/cmd/compile/internal/gc/range.go | 102 +++++++------ src/cmd/compile/internal/gc/select.go | 99 ++++++------- src/cmd/compile/internal/gc/ssa.go | 10 +- src/cmd/compile/internal/gc/typecheck.go | 4 - src/cmd/compile/internal/gc/universe.go | 1 + src/cmd/compile/internal/gc/walk.go | 19 ++- src/cmd/compile/internal/ir/fmt.go | 9 +- src/cmd/compile/internal/ir/node.go | 4 +- .../compile/internal/logopt/logopt_test.go | 4 +- 15 files changed, 241 insertions(+), 267 deletions(-) diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index 4cbc5d3851840..f2fff02959518 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -394,8 +394,8 @@ func (e *Escape) stmt(n ir.Node) { case ir.OSELRECV: e.assign(n.Left(), n.Right(), "selrecv", n) case ir.OSELRECV2: - e.assign(n.Left(), n.Right(), "selrecv", n) - e.assign(n.List().First(), nil, "selrecv", n) + e.assign(n.List().First(), n.Rlist().First(), "selrecv", n) + e.assign(n.List().Second(), nil, "selrecv", n) case ir.ORECV: // TODO(mdempsky): Consider e.discard(n.Left). e.exprSkipInit(e.discardHole(), n) // already visited n.Ninit @@ -412,18 +412,18 @@ func (e *Escape) stmt(n ir.Node) { } case ir.OAS2DOTTYPE: // v, ok = x.(type) - e.assign(n.List().First(), n.Right(), "assign-pair-dot-type", n) + e.assign(n.List().First(), n.Rlist().First(), "assign-pair-dot-type", n) e.assign(n.List().Second(), nil, "assign-pair-dot-type", n) case ir.OAS2MAPR: // v, ok = m[k] - e.assign(n.List().First(), n.Right(), "assign-pair-mapr", n) + e.assign(n.List().First(), n.Rlist().First(), "assign-pair-mapr", n) e.assign(n.List().Second(), nil, "assign-pair-mapr", n) case ir.OAS2RECV: // v, ok = <-ch - e.assign(n.List().First(), n.Right(), "assign-pair-receive", n) + e.assign(n.List().First(), n.Rlist().First(), "assign-pair-receive", n) e.assign(n.List().Second(), nil, "assign-pair-receive", n) case ir.OAS2FUNC: - e.stmts(n.Right().Init()) - e.call(e.addrs(n.List()), n.Right(), nil) + e.stmts(n.Rlist().First().Init()) + e.call(e.addrs(n.List()), n.Rlist().First(), nil) case ir.ORETURN: results := e.curfn.Type().Results().FieldSlice() for i, v := range n.List().Slice() { @@ -709,8 +709,7 @@ func (e *Escape) discards(l ir.Nodes) { // that represents storing into the represented location. func (e *Escape) addr(n ir.Node) EscHole { if n == nil || ir.IsBlank(n) { - // Can happen at least in OSELRECV. - // TODO(mdempsky): Anywhere else? + // Can happen in select case, range, maybe others. return e.discardHole() } diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index f19acb8bc2523..d6c50c7285772 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -1076,18 +1076,12 @@ func (w *exportWriter) stmt(n ir.Node) { w.expr(n.Right()) } - case ir.OAS2: + case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV: w.op(ir.OAS2) w.pos(n.Pos()) w.exprList(n.List()) w.exprList(n.Rlist()) - case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV: - w.op(ir.OAS2) - w.pos(n.Pos()) - w.exprList(n.List()) - w.exprList(ir.AsNodes([]ir.Node{n.Right()})) - case ir.ORETURN: w.op(ir.ORETURN) w.pos(n.Pos()) diff --git a/src/cmd/compile/internal/gc/initorder.go b/src/cmd/compile/internal/gc/initorder.go index ea3d74d5ba084..87a78ae0532f4 100644 --- a/src/cmd/compile/internal/gc/initorder.go +++ b/src/cmd/compile/internal/gc/initorder.go @@ -256,7 +256,7 @@ func collectDeps(n ir.Node, transitive bool) ir.NodeSet { case ir.OAS: d.inspect(n.Right()) case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV: - d.inspect(n.Right()) + d.inspect(n.Rlist().First()) case ir.ODCLFUNC: d.inspectList(n.Body()) default: diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index bbbffebf5cec6..97ecb9559b431 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -520,14 +520,11 @@ func inlcalls(fn *ir.Func) { } // Turn an OINLCALL into a statement. -func inlconv2stmt(n ir.Node) { - n.SetOp(ir.OBLOCK) - - // n->ninit stays - n.PtrList().Set(n.Body().Slice()) - - n.PtrBody().Set(nil) - n.PtrRlist().Set(nil) +func inlconv2stmt(inlcall ir.Node) ir.Node { + n := ir.NodAt(inlcall.Pos(), ir.OBLOCK, nil, nil) + n.SetList(inlcall.Body()) + n.SetInit(inlcall.Init()) + return n } // Turn an OINLCALL into a single valued expression. @@ -600,9 +597,10 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool) ir.Node { lno := setlineno(n) inlnodelist(n.Init(), maxCost, inlMap) - for _, n1 := range n.Init().Slice() { + init := n.Init().Slice() + for i, n1 := range init { if n1.Op() == ir.OINLCALL { - inlconv2stmt(n1) + init[i] = inlconv2stmt(n1) } } @@ -614,50 +612,49 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool) ir.Node { n.SetRight(inlnode(n.Right(), maxCost, inlMap)) if n.Right() != nil && n.Right().Op() == ir.OINLCALL { if n.Op() == ir.OFOR || n.Op() == ir.OFORUNTIL { - inlconv2stmt(n.Right()) - } else if n.Op() == ir.OAS2FUNC { - n.PtrRlist().Set(inlconv2list(n.Right())) - n.SetRight(nil) - n.SetOp(ir.OAS2) - n.SetTypecheck(0) - n = typecheck(n, ctxStmt) + n.SetRight(inlconv2stmt(n.Right())) } else { n.SetRight(inlconv2expr(n.Right())) } } inlnodelist(n.List(), maxCost, inlMap) + s := n.List().Slice() + convert := inlconv2expr if n.Op() == ir.OBLOCK { - for _, n2 := range n.List().Slice() { - if n2.Op() == ir.OINLCALL { - inlconv2stmt(n2) - } - } - } else { - s := n.List().Slice() - for i1, n1 := range s { - if n1 != nil && n1.Op() == ir.OINLCALL { - s[i1] = inlconv2expr(s[i1]) - } + convert = inlconv2stmt + } + for i, n1 := range s { + if n1 != nil && n1.Op() == ir.OINLCALL { + s[i] = convert(n1) } } inlnodelist(n.Rlist(), maxCost, inlMap) - s := n.Rlist().Slice() - for i1, n1 := range s { + + if n.Op() == ir.OAS2FUNC && n.Rlist().First().Op() == ir.OINLCALL { + n.PtrRlist().Set(inlconv2list(n.Rlist().First())) + n.SetOp(ir.OAS2) + n.SetTypecheck(0) + n = typecheck(n, ctxStmt) + } + + s = n.Rlist().Slice() + for i, n1 := range s { if n1.Op() == ir.OINLCALL { if n.Op() == ir.OIF { - inlconv2stmt(n1) + s[i] = inlconv2stmt(n1) } else { - s[i1] = inlconv2expr(s[i1]) + s[i] = inlconv2expr(n1) } } } inlnodelist(n.Body(), maxCost, inlMap) - for _, n := range n.Body().Slice() { - if n.Op() == ir.OINLCALL { - inlconv2stmt(n) + s = n.Body().Slice() + for i, n1 := range s { + if n1.Op() == ir.OINLCALL { + s[i] = inlconv2stmt(n1) } } @@ -1200,9 +1197,10 @@ func mkinlcall(n ir.Node, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool) // and each use must redo the inlining. // luckily these are small. inlnodelist(call.Body(), maxCost, inlMap) - for _, n := range call.Body().Slice() { - if n.Op() == ir.OINLCALL { - inlconv2stmt(n) + s := call.Body().Slice() + for i, n1 := range s { + if n1.Op() == ir.OINLCALL { + s[i] = inlconv2stmt(n1) } } diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index e6c78d1afb5d9..98a09f40069a3 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -1001,20 +1001,17 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node { return n } - n := p.nod(stmt, ir.OAS, nil, nil) // assume common case - rhs := p.exprList(stmt.Rhs) - lhs := p.assignList(stmt.Lhs, n, stmt.Op == syntax.Def) - - if len(lhs) == 1 && len(rhs) == 1 { - // common case - n.SetLeft(lhs[0]) - n.SetRight(rhs[0]) - } else { - n.SetOp(ir.OAS2) - n.PtrList().Set(lhs) + if list, ok := stmt.Lhs.(*syntax.ListExpr); ok && len(list.ElemList) != 1 || len(rhs) != 1 { + n := p.nod(stmt, ir.OAS2, nil, nil) + n.PtrList().Set(p.assignList(stmt.Lhs, n, stmt.Op == syntax.Def)) n.PtrRlist().Set(rhs) + return n } + + n := p.nod(stmt, ir.OAS, nil, nil) + n.SetLeft(p.assignList(stmt.Lhs, n, stmt.Op == syntax.Def)[0]) + n.SetRight(rhs[0]) return n case *syntax.BranchStmt: diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index d4db7be9119f9..66e279d85f83a 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -103,7 +103,11 @@ func (o *Order) newTemp(t *types.Type, clear bool) ir.Node { // (The other candidate would be map access, but map access // returns a pointer to the result data instead of taking a pointer // to be filled in.) +// TODO(rsc): t == n.Type() always; remove parameter. func (o *Order) copyExpr(n ir.Node, t *types.Type, clear bool) ir.Node { + if t != n.Type() { + panic("copyExpr") + } v := o.newTemp(t, clear) a := ir.Nod(ir.OAS, v, n) a = typecheck(a, ctxStmt) @@ -606,23 +610,19 @@ func (o *Order) stmt(n ir.Node) { // that we can ensure that if op panics // because r is zero, the panic happens before // the map assignment. - - n.SetLeft(o.safeExpr(n.Left())) - - // TODO(rsc): Why is this DeepCopy? - // We should know enough about the form here - // to do something more provably shallower. - l := ir.DeepCopy(src.NoXPos, n.Left()) - if l.Op() == ir.OINDEXMAP { - l.SetIndexMapLValue(false) + // DeepCopy is a big hammer here, but safeExpr + // makes sure there is nothing too deep being copied. + l1 := o.safeExpr(n.Left()) + l2 := ir.DeepCopy(src.NoXPos, l1) + if l1.Op() == ir.OINDEXMAP { + l2.SetIndexMapLValue(false) } - l = o.copyExpr(l, n.Left().Type(), false) - n.SetRight(ir.Nod(n.SubOp(), l, n.Right())) - n.SetRight(typecheck(n.Right(), ctxExpr)) - n.SetRight(o.expr(n.Right(), nil)) - - n.SetOp(ir.OAS) - n.ResetAux() + l2 = o.copyExpr(l2, l2.Type(), false) + r := ir.NodAt(n.Pos(), n.SubOp(), l2, n.Right()) + r = typecheck(r, ctxExpr) + r = o.expr(r, nil) + n = ir.NodAt(n.Pos(), ir.OAS, l1, r) + n = typecheck(n, ctxStmt) } o.mapAssign(n) @@ -639,8 +639,8 @@ func (o *Order) stmt(n ir.Node) { case ir.OAS2FUNC: t := o.markTemp() o.exprList(n.List()) - o.init(n.Right()) - o.call(n.Right()) + o.init(n.Rlist().First()) + o.call(n.Rlist().First()) o.as2(n) o.cleanTemp(t) @@ -654,7 +654,7 @@ func (o *Order) stmt(n ir.Node) { t := o.markTemp() o.exprList(n.List()) - switch r := n.Right(); r.Op() { + switch r := n.Rlist().First(); r.Op() { case ir.ODOTTYPE2, ir.ORECV: r.SetLeft(o.expr(r.Left(), nil)) case ir.OINDEXMAP: @@ -866,38 +866,39 @@ func (o *Order) stmt(n ir.Node) { ir.Dump("select case", r) base.Fatalf("unknown op in select %v", r.Op()) - // If this is case x := <-ch or case x, y := <-ch, the case has - // the ODCL nodes to declare x and y. We want to delay that - // declaration (and possible allocation) until inside the case body. - // Delete the ODCL nodes here and recreate them inside the body below. case ir.OSELRECV, ir.OSELRECV2: + var dst, ok, recv ir.Node + if r.Op() == ir.OSELRECV { + // case x = <-c + // case <-c (dst is ir.BlankNode) + dst, ok, recv = r.Left(), ir.BlankNode, r.Right() + } else { + // case x, ok = <-c + dst, ok, recv = r.List().First(), r.List().Second(), r.Rlist().First() + } + + // If this is case x := <-ch or case x, y := <-ch, the case has + // the ODCL nodes to declare x and y. We want to delay that + // declaration (and possible allocation) until inside the case body. + // Delete the ODCL nodes here and recreate them inside the body below. if r.Colas() { - i := 0 - if r.Init().Len() != 0 && r.Init().First().Op() == ir.ODCL && r.Init().First().Left() == r.Left() { - i++ - } - if i < r.Init().Len() && r.Init().Index(i).Op() == ir.ODCL && r.List().Len() != 0 && r.Init().Index(i).Left() == r.List().First() { - i++ + init := r.Init().Slice() + if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].Left() == dst { + init = init[1:] } - if i >= r.Init().Len() { - r.PtrInit().Set(nil) + if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].Left() == ok { + init = init[1:] } + r.PtrInit().Set(init) } - if r.Init().Len() != 0 { ir.DumpList("ninit", r.Init()) base.Fatalf("ninit on select recv") } - // case x = <-c - // case x, ok = <-c - // r->left is x, r->ntest is ok, r->right is ORECV, r->right->left is c. - // r->left == N means 'case <-c'. - // c is always evaluated; x and ok are only evaluated when assigned. - r.Right().SetLeft(o.expr(r.Right().Left(), nil)) - - if r.Right().Left().Op() != ir.ONAME { - r.Right().SetLeft(o.copyExpr(r.Right().Left(), r.Right().Left().Type(), false)) + recv.SetLeft(o.expr(recv.Left(), nil)) + if recv.Left().Op() != ir.ONAME { + recv.SetLeft(o.copyExpr(recv.Left(), recv.Left().Type(), false)) } // Introduce temporary for receive and move actual copy into case body. @@ -906,42 +907,41 @@ func (o *Order) stmt(n ir.Node) { // temporary per distinct type, sharing the temp among all receives // with that temp. Similarly one ok bool could be shared among all // the x,ok receives. Not worth doing until there's a clear need. - if r.Left() != nil && ir.IsBlank(r.Left()) { - r.SetLeft(nil) - } - if r.Left() != nil { + if !ir.IsBlank(dst) { // use channel element type for temporary to avoid conversions, // such as in case interfacevalue = <-intchan. // the conversion happens in the OAS instead. - tmp1 := r.Left() - if r.Colas() { - tmp2 := ir.Nod(ir.ODCL, tmp1, nil) - tmp2 = typecheck(tmp2, ctxStmt) - n2.PtrInit().Append(tmp2) + dcl := ir.Nod(ir.ODCL, dst, nil) + dcl = typecheck(dcl, ctxStmt) + n2.PtrInit().Append(dcl) } - r.SetLeft(o.newTemp(r.Right().Left().Type().Elem(), r.Right().Left().Type().Elem().HasPointers())) - tmp2 := ir.Nod(ir.OAS, tmp1, r.Left()) - tmp2 = typecheck(tmp2, ctxStmt) - n2.PtrInit().Append(tmp2) + tmp := o.newTemp(recv.Left().Type().Elem(), recv.Left().Type().Elem().HasPointers()) + as := ir.Nod(ir.OAS, dst, tmp) + as = typecheck(as, ctxStmt) + n2.PtrInit().Append(as) + dst = tmp } - - if r.List().Len() != 0 && ir.IsBlank(r.List().First()) { - r.PtrList().Set(nil) - } - if r.List().Len() != 0 { - tmp1 := r.List().First() + if !ir.IsBlank(ok) { if r.Colas() { - tmp2 := ir.Nod(ir.ODCL, tmp1, nil) - tmp2 = typecheck(tmp2, ctxStmt) - n2.PtrInit().Append(tmp2) + dcl := ir.Nod(ir.ODCL, ok, nil) + dcl = typecheck(dcl, ctxStmt) + n2.PtrInit().Append(dcl) } - r.PtrList().Set1(o.newTemp(types.Types[types.TBOOL], false)) - tmp2 := okas(tmp1, r.List().First()) - tmp2 = typecheck(tmp2, ctxStmt) - n2.PtrInit().Append(tmp2) + tmp := o.newTemp(types.Types[types.TBOOL], false) + as := okas(ok, tmp) + as = typecheck(as, ctxStmt) + n2.PtrInit().Append(as) + ok = tmp + } + + if r.Op() == ir.OSELRECV { + r.SetLeft(dst) + } else { + r.List().SetIndex(0, dst) + r.List().SetIndex(1, ok) } orderBlock(n2.PtrInit(), o.free) @@ -1420,7 +1420,7 @@ func (o *Order) as2(n ir.Node) { func (o *Order) okAs2(n ir.Node) { var tmp1, tmp2 ir.Node if !ir.IsBlank(n.List().First()) { - typ := n.Right().Type() + typ := n.Rlist().First().Type() tmp1 = o.newTemp(typ, typ.HasPointers()) } diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go index d52fad5fece24..2f2d7051c3dd4 100644 --- a/src/cmd/compile/internal/gc/range.go +++ b/src/cmd/compile/internal/gc/range.go @@ -157,15 +157,19 @@ func cheapComputableIndex(width int64) bool { // simpler forms. The result must be assigned back to n. // Node n may also be modified in place, and may also be // the returned node. -func walkrange(n ir.Node) ir.Node { - if isMapClear(n) { - m := n.Right() +func walkrange(nrange ir.Node) ir.Node { + if isMapClear(nrange) { + m := nrange.Right() lno := setlineno(m) - n = mapClear(m) + n := mapClear(m) base.Pos = lno return n } + nfor := ir.NodAt(nrange.Pos(), ir.OFOR, nil, nil) + nfor.SetInit(nrange.Init()) + nfor.SetSym(nrange.Sym()) + // variable name conventions: // ohv1, hv1, hv2: hidden (old) val 1, 2 // ha, hit: hidden aggregate, iterator @@ -173,20 +177,19 @@ func walkrange(n ir.Node) ir.Node { // hb: hidden bool // a, v1, v2: not hidden aggregate, val 1, 2 - t := n.Type() + t := nrange.Type() - a := n.Right() + a := nrange.Right() lno := setlineno(a) - n.SetRight(nil) var v1, v2 ir.Node - l := n.List().Len() + l := nrange.List().Len() if l > 0 { - v1 = n.List().First() + v1 = nrange.List().First() } if l > 1 { - v2 = n.List().Second() + v2 = nrange.List().Second() } if ir.IsBlank(v2) { @@ -201,14 +204,8 @@ func walkrange(n ir.Node) ir.Node { base.Fatalf("walkrange: v2 != nil while v1 == nil") } - // n.List has no meaning anymore, clear it - // to avoid erroneous processing by racewalk. - n.PtrList().Set(nil) - var ifGuard ir.Node - translatedLoopOp := ir.OFOR - var body []ir.Node var init []ir.Node switch t.Etype { @@ -216,9 +213,9 @@ func walkrange(n ir.Node) ir.Node { base.Fatalf("walkrange") case types.TARRAY, types.TSLICE: - if arrayClear(n, v1, v2, a) { + if nn := arrayClear(nrange, v1, v2, a); nn != nil { base.Pos = lno - return n + return nn } // order.stmt arranged for a copy of the array/slice variable if needed. @@ -230,8 +227,8 @@ func walkrange(n ir.Node) ir.Node { init = append(init, ir.Nod(ir.OAS, hv1, nil)) init = append(init, ir.Nod(ir.OAS, hn, ir.Nod(ir.OLEN, ha, nil))) - n.SetLeft(ir.Nod(ir.OLT, hv1, hn)) - n.SetRight(ir.Nod(ir.OAS, hv1, ir.Nod(ir.OADD, hv1, nodintconst(1)))) + nfor.SetLeft(ir.Nod(ir.OLT, hv1, hn)) + nfor.SetRight(ir.Nod(ir.OAS, hv1, ir.Nod(ir.OADD, hv1, nodintconst(1)))) // for range ha { body } if v1 == nil { @@ -245,7 +242,7 @@ func walkrange(n ir.Node) ir.Node { } // for v1, v2 := range ha { body } - if cheapComputableIndex(n.Type().Elem().Width) { + if cheapComputableIndex(nrange.Type().Elem().Width) { // v1, v2 = hv1, ha[hv1] tmp := ir.Nod(ir.OINDEX, ha, hv1) tmp.SetBounded(true) @@ -272,9 +269,9 @@ func walkrange(n ir.Node) ir.Node { // Enhance the prove pass to understand this. ifGuard = ir.Nod(ir.OIF, nil, nil) ifGuard.SetLeft(ir.Nod(ir.OLT, hv1, hn)) - translatedLoopOp = ir.OFORUNTIL + nfor.SetOp(ir.OFORUNTIL) - hp := temp(types.NewPtr(n.Type().Elem())) + hp := temp(types.NewPtr(nrange.Type().Elem())) tmp := ir.Nod(ir.OINDEX, ha, nodintconst(0)) tmp.SetBounded(true) init = append(init, ir.Nod(ir.OAS, hp, ir.Nod(ir.OADDR, tmp, nil))) @@ -293,16 +290,15 @@ func walkrange(n ir.Node) ir.Node { // end of the allocation. a = ir.Nod(ir.OAS, hp, addptr(hp, t.Elem().Width)) a = typecheck(a, ctxStmt) - n.PtrList().Set1(a) + nfor.PtrList().Set1(a) case types.TMAP: // order.stmt allocated the iterator for us. // we only use a once, so no copy needed. ha := a - hit := prealloc[n] + hit := prealloc[nrange] th := hit.Type() - n.SetLeft(nil) keysym := th.Field(0).Sym // depends on layout of iterator struct. See reflect.go:hiter elemsym := th.Field(1).Sym // ditto @@ -310,11 +306,11 @@ func walkrange(n ir.Node) ir.Node { fn = substArgTypes(fn, t.Key(), t.Elem(), th) init = append(init, mkcall1(fn, nil, nil, typename(t), ha, ir.Nod(ir.OADDR, hit, nil))) - n.SetLeft(ir.Nod(ir.ONE, nodSym(ir.ODOT, hit, keysym), nodnil())) + nfor.SetLeft(ir.Nod(ir.ONE, nodSym(ir.ODOT, hit, keysym), nodnil())) fn = syslook("mapiternext") fn = substArgTypes(fn, th) - n.SetRight(mkcall1(fn, nil, nil, ir.Nod(ir.OADDR, hit, nil))) + nfor.SetRight(mkcall1(fn, nil, nil, ir.Nod(ir.OADDR, hit, nil))) key := nodSym(ir.ODOT, hit, keysym) key = ir.Nod(ir.ODEREF, key, nil) @@ -335,8 +331,6 @@ func walkrange(n ir.Node) ir.Node { // order.stmt arranged for a copy of the channel variable. ha := a - n.SetLeft(nil) - hv1 := temp(t.Elem()) hv1.SetTypecheck(1) if t.Elem().HasPointers() { @@ -344,12 +338,12 @@ func walkrange(n ir.Node) ir.Node { } hb := temp(types.Types[types.TBOOL]) - n.SetLeft(ir.Nod(ir.ONE, hb, nodbool(false))) + nfor.SetLeft(ir.Nod(ir.ONE, hb, nodbool(false))) a := ir.Nod(ir.OAS2RECV, nil, nil) a.SetTypecheck(1) a.PtrList().Set2(hv1, hb) - a.SetRight(ir.Nod(ir.ORECV, ha, nil)) - n.Left().PtrInit().Set1(a) + a.PtrRlist().Set1(ir.Nod(ir.ORECV, ha, nil)) + nfor.Left().PtrInit().Set1(a) if v1 == nil { body = nil } else { @@ -387,7 +381,7 @@ func walkrange(n ir.Node) ir.Node { init = append(init, ir.Nod(ir.OAS, hv1, nil)) // hv1 < len(ha) - n.SetLeft(ir.Nod(ir.OLT, hv1, ir.Nod(ir.OLEN, ha, nil))) + nfor.SetLeft(ir.Nod(ir.OLT, hv1, ir.Nod(ir.OLEN, ha, nil))) if v1 != nil { // hv1t = hv1 @@ -431,24 +425,25 @@ func walkrange(n ir.Node) ir.Node { } } - n.SetOp(translatedLoopOp) typecheckslice(init, ctxStmt) if ifGuard != nil { ifGuard.PtrInit().Append(init...) ifGuard = typecheck(ifGuard, ctxStmt) } else { - n.PtrInit().Append(init...) + nfor.PtrInit().Append(init...) } - typecheckslice(n.Left().Init().Slice(), ctxStmt) + typecheckslice(nfor.Left().Init().Slice(), ctxStmt) - n.SetLeft(typecheck(n.Left(), ctxExpr)) - n.SetLeft(defaultlit(n.Left(), nil)) - n.SetRight(typecheck(n.Right(), ctxStmt)) + nfor.SetLeft(typecheck(nfor.Left(), ctxExpr)) + nfor.SetLeft(defaultlit(nfor.Left(), nil)) + nfor.SetRight(typecheck(nfor.Right(), ctxStmt)) typecheckslice(body, ctxStmt) - n.PtrBody().Prepend(body...) + nfor.PtrBody().Append(body...) + nfor.PtrBody().Append(nrange.Body().Slice()...) + var n ir.Node = nfor if ifGuard != nil { ifGuard.PtrBody().Set1(n) n = ifGuard @@ -534,31 +529,31 @@ func mapClear(m ir.Node) ir.Node { // in which the evaluation of a is side-effect-free. // // Parameters are as in walkrange: "for v1, v2 = range a". -func arrayClear(n, v1, v2, a ir.Node) bool { +func arrayClear(loop, v1, v2, a ir.Node) ir.Node { if base.Flag.N != 0 || instrumenting { - return false + return nil } if v1 == nil || v2 != nil { - return false + return nil } - if n.Body().Len() != 1 || n.Body().First() == nil { - return false + if loop.Body().Len() != 1 || loop.Body().First() == nil { + return nil } - stmt := n.Body().First() // only stmt in body + stmt := loop.Body().First() // only stmt in body if stmt.Op() != ir.OAS || stmt.Left().Op() != ir.OINDEX { - return false + return nil } if !samesafeexpr(stmt.Left().Left(), a) || !samesafeexpr(stmt.Left().Right(), v1) { - return false + return nil } - elemsize := n.Type().Elem().Width + elemsize := loop.Type().Elem().Width if elemsize <= 0 || !isZero(stmt.Right()) { - return false + return nil } // Convert to @@ -568,8 +563,7 @@ func arrayClear(n, v1, v2, a ir.Node) bool { // memclr{NoHeap,Has}Pointers(hp, hn) // i = len(a) - 1 // } - n.SetOp(ir.OIF) - + n := ir.Nod(ir.OIF, nil, nil) n.PtrBody().Set(nil) n.SetLeft(ir.Nod(ir.ONE, ir.Nod(ir.OLEN, a, nil), nodintconst(0))) @@ -611,7 +605,7 @@ func arrayClear(n, v1, v2, a ir.Node) bool { n.SetLeft(defaultlit(n.Left(), nil)) typecheckslice(n.Body().Slice(), ctxStmt) n = walkstmt(n) - return true + return n } // addptr returns (*T)(uintptr(p) + n). diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go index 9668df082a753..3afcef69f87ed 100644 --- a/src/cmd/compile/internal/gc/select.go +++ b/src/cmd/compile/internal/gc/select.go @@ -47,36 +47,30 @@ func typecheckselect(sel ir.Node) { } base.ErrorfAt(pos, "select case must be receive, send or assign recv") - // convert x = <-c into OSELRECV(x, <-c). - // remove implicit conversions; the eventual assignment - // will reintroduce them. case ir.OAS: + // convert x = <-c into OSELRECV(x, <-c). + // remove implicit conversions; the eventual assignment + // will reintroduce them. if (n.Right().Op() == ir.OCONVNOP || n.Right().Op() == ir.OCONVIFACE) && n.Right().Implicit() { n.SetRight(n.Right().Left()) } - if n.Right().Op() != ir.ORECV { base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side") break } - n.SetOp(ir.OSELRECV) - // convert x, ok = <-c into OSELRECV2(x, <-c) with ntest=ok case ir.OAS2RECV: - if n.Right().Op() != ir.ORECV { + // convert x, ok = <-c into OSELRECV2(x, <-c) with ntest=ok + if n.Rlist().First().Op() != ir.ORECV { base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side") break } - n.SetOp(ir.OSELRECV2) - n.SetLeft(n.List().First()) - n.PtrList().Set1(n.List().Second()) - // convert <-c into OSELRECV(N, <-c) case ir.ORECV: - n = ir.NodAt(n.Pos(), ir.OSELRECV, nil, n) - + // convert <-c into OSELRECV(_, <-c) + n = ir.NodAt(n.Pos(), ir.OSELRECV, ir.BlankNode, n) n.SetTypecheck(1) ncase.SetLeft(n) @@ -134,28 +128,19 @@ func walkselectcases(cases *ir.Nodes) []ir.Node { case ir.OSEND: // already ok - case ir.OSELRECV, ir.OSELRECV2: - if n.Op() == ir.OSELRECV || n.List().Len() == 0 { - if n.Left() == nil { - n = n.Right() - } else { - n.SetOp(ir.OAS) - } + case ir.OSELRECV: + if ir.IsBlank(n.Left()) { + n = n.Right() break } + n.SetOp(ir.OAS) - if n.Left() == nil { - ir.BlankNode = typecheck(ir.BlankNode, ctxExpr|ctxAssign) - n.SetLeft(ir.BlankNode) + case ir.OSELRECV2: + if ir.IsBlank(n.List().First()) && ir.IsBlank(n.List().Second()) { + n = n.Rlist().First() + break } - - n.SetOp(ir.OAS2) - n.PtrList().Prepend(n.Left()) - n.PtrRlist().Set1(n.Right()) - n.SetRight(nil) - n.SetLeft(nil) - n.SetTypecheck(0) - n = typecheck(n, ctxStmt) + n.SetOp(ir.OAS2RECV) } l = append(l, n) @@ -176,20 +161,30 @@ func walkselectcases(cases *ir.Nodes) []ir.Node { dflt = cas continue } + + // Lower x, _ = <-c to x = <-c. + if n.Op() == ir.OSELRECV2 && ir.IsBlank(n.List().Second()) { + n = ir.NodAt(n.Pos(), ir.OSELRECV, n.List().First(), n.Rlist().First()) + n.SetTypecheck(1) + cas.SetLeft(n) + } + switch n.Op() { case ir.OSEND: n.SetRight(ir.Nod(ir.OADDR, n.Right(), nil)) n.SetRight(typecheck(n.Right(), ctxExpr)) - case ir.OSELRECV, ir.OSELRECV2: - if n.Op() == ir.OSELRECV2 && n.List().Len() == 0 { - n.SetOp(ir.OSELRECV) - } - - if n.Left() != nil { + case ir.OSELRECV: + if !ir.IsBlank(n.Left()) { n.SetLeft(ir.Nod(ir.OADDR, n.Left(), nil)) n.SetLeft(typecheck(n.Left(), ctxExpr)) } + + case ir.OSELRECV2: + if !ir.IsBlank(n.List().First()) { + n.List().SetIndex(0, ir.Nod(ir.OADDR, n.List().First(), nil)) + n.List().SetIndex(0, typecheck(n.List().First(), ctxExpr)) + } } } @@ -204,6 +199,7 @@ func walkselectcases(cases *ir.Nodes) []ir.Node { setlineno(n) r := ir.Nod(ir.OIF, nil, nil) r.PtrInit().Set(cas.Init().Slice()) + var call ir.Node switch n.Op() { default: base.Fatalf("select %v", n.Op()) @@ -211,30 +207,30 @@ func walkselectcases(cases *ir.Nodes) []ir.Node { case ir.OSEND: // if selectnbsend(c, v) { body } else { default body } ch := n.Left() - r.SetLeft(mkcall1(chanfn("selectnbsend", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), ch, n.Right())) + call = mkcall1(chanfn("selectnbsend", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), ch, n.Right()) case ir.OSELRECV: // if selectnbrecv(&v, c) { body } else { default body } ch := n.Right().Left() elem := n.Left() - if elem == nil { + if ir.IsBlank(elem) { elem = nodnil() } - r.SetLeft(mkcall1(chanfn("selectnbrecv", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, ch)) + call = mkcall1(chanfn("selectnbrecv", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, ch) case ir.OSELRECV2: // if selectnbrecv2(&v, &received, c) { body } else { default body } - ch := n.Right().Left() - elem := n.Left() - if elem == nil { + ch := n.Rlist().First().Left() + elem := n.List().First() + if ir.IsBlank(elem) { elem = nodnil() } - receivedp := ir.Nod(ir.OADDR, n.List().First(), nil) + receivedp := ir.Nod(ir.OADDR, n.List().Second(), nil) receivedp = typecheck(receivedp, ctxExpr) - r.SetLeft(mkcall1(chanfn("selectnbrecv2", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, receivedp, ch)) + call = mkcall1(chanfn("selectnbrecv2", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, receivedp, ch) } - r.SetLeft(typecheck(r.Left(), ctxExpr)) + r.SetLeft(typecheck(call, ctxExpr)) r.PtrBody().Set(cas.Body().Slice()) r.PtrRlist().Set(append(dflt.Init().Slice(), dflt.Body().Slice()...)) return []ir.Node{r, ir.Nod(ir.OBREAK, nil, nil)} @@ -288,11 +284,16 @@ func walkselectcases(cases *ir.Nodes) []ir.Node { nsends++ c = n.Left() elem = n.Right() - case ir.OSELRECV, ir.OSELRECV2: + case ir.OSELRECV: nrecvs++ i = ncas - nrecvs c = n.Right().Left() elem = n.Left() + case ir.OSELRECV2: + nrecvs++ + i = ncas - nrecvs + c = n.Rlist().First().Left() + elem = n.List().First() } casorder[i] = cas @@ -305,7 +306,7 @@ func walkselectcases(cases *ir.Nodes) []ir.Node { c = convnop(c, types.Types[types.TUNSAFEPTR]) setField("c", c) - if elem != nil { + if !ir.IsBlank(elem) { elem = convnop(elem, types.Types[types.TUNSAFEPTR]) setField("elem", elem) } @@ -347,7 +348,7 @@ func walkselectcases(cases *ir.Nodes) []ir.Node { r := ir.Nod(ir.OIF, cond, nil) if n := cas.Left(); n != nil && n.Op() == ir.OSELRECV2 { - x := ir.Nod(ir.OAS, n.List().First(), recvOK) + x := ir.Nod(ir.OAS, n.List().Second(), recvOK) x = typecheck(x, ctxStmt) r.PtrBody().Append(x) } diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 6d818be1322ef..4be6caa0e3123 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -1120,9 +1120,9 @@ func (s *state) stmt(n ir.Node) { s.callResult(n.Left(), callGo) case ir.OAS2DOTTYPE: - res, resok := s.dottype(n.Right(), true) + res, resok := s.dottype(n.Rlist().First(), true) deref := false - if !canSSAType(n.Right().Type()) { + if !canSSAType(n.Rlist().First().Type()) { if res.Op != ssa.OpLoad { s.Fatalf("dottype of non-load") } @@ -1142,10 +1142,10 @@ func (s *state) stmt(n ir.Node) { case ir.OAS2FUNC: // We come here only when it is an intrinsic call returning two values. - if !isIntrinsicCall(n.Right()) { - s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Right()) + if !isIntrinsicCall(n.Rlist().First()) { + s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Rlist().First()) } - v := s.intrinsicCall(n.Right()) + v := s.intrinsicCall(n.Rlist().First()) v1 := s.newValue1(ssa.OpSelect0, n.List().First().Type(), v) v2 := s.newValue1(ssa.OpSelect1, n.List().Second().Type(), v) s.assign(n.List().First(), v1, false, 0) diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 19146e2a9e795..b5ace76552822 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -3330,8 +3330,6 @@ func typecheckas2(n ir.Node) { goto mismatch } n.SetOp(ir.OAS2FUNC) - n.SetRight(r) - n.PtrRlist().Set(nil) for i, l := range n.List().Slice() { f := r.Type().Field(i) if f.Type != nil && l.Type() != nil { @@ -3361,8 +3359,6 @@ func typecheckas2(n ir.Node) { n.SetOp(ir.OAS2DOTTYPE) r.SetOp(ir.ODOTTYPE2) } - n.SetRight(r) - n.PtrRlist().Set(nil) if l.Type() != nil { checkassignto(r.Type(), l) } diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index d43545391cfc7..c3c2c0492aa7d 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -144,6 +144,7 @@ func lexinit() { types.Types[types.TBLANK] = types.New(types.TBLANK) ir.AsNode(s.Def).SetType(types.Types[types.TBLANK]) ir.BlankNode = ir.AsNode(s.Def) + ir.BlankNode.SetTypecheck(1) s = ir.BuiltinPkg.Lookup("_") s.Block = -100 diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index e7c88bd329610..0a77cfbb3848f 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -604,11 +604,8 @@ opswitch: if n.Op() == ir.OASOP { // Rewrite x op= y into x = x op y. - n.SetRight(ir.Nod(n.SubOp(), n.Left(), n.Right())) - n.SetRight(typecheck(n.Right(), ctxExpr)) - - n.SetOp(ir.OAS) - n.ResetAux() + n = ir.Nod(ir.OAS, n.Left(), + typecheck(ir.Nod(n.SubOp(), n.Left(), n.Right()), ctxExpr)) } if oaslit(n, init) { @@ -683,12 +680,12 @@ opswitch: case ir.OAS2FUNC: init.AppendNodes(n.PtrInit()) - r := n.Right() + r := n.Rlist().First() walkexprlistsafe(n.List().Slice(), init) r = walkexpr(r, init) if isIntrinsicCall(r) { - n.SetRight(r) + n.PtrRlist().Set1(r) break } init.Append(r) @@ -701,7 +698,7 @@ opswitch: case ir.OAS2RECV: init.AppendNodes(n.PtrInit()) - r := n.Right() + r := n.Rlist().First() walkexprlistsafe(n.List().Slice(), init) r.SetLeft(walkexpr(r.Left(), init)) var n1 ir.Node @@ -720,7 +717,7 @@ opswitch: case ir.OAS2MAPR: init.AppendNodes(n.PtrInit()) - r := n.Right() + r := n.Rlist().First() walkexprlistsafe(n.List().Slice(), init) r.SetLeft(walkexpr(r.Left(), init)) r.SetRight(walkexpr(r.Right(), init)) @@ -759,7 +756,7 @@ opswitch: if ok := n.List().Second(); !ir.IsBlank(ok) && ok.Type().IsBoolean() { r.Type().Field(1).Type = ok.Type() } - n.SetRight(r) + n.PtrRlist().Set1(r) n.SetOp(ir.OAS2FUNC) // don't generate a = *var if a is _ @@ -793,7 +790,7 @@ opswitch: case ir.OAS2DOTTYPE: walkexprlistsafe(n.List().Slice(), init) - n.SetRight(walkexpr(n.Right(), init)) + n.PtrRlist().SetIndex(0, walkexpr(n.Rlist().First(), init)) case ir.OCONVIFACE: n.SetLeft(walkexpr(n.Left(), init)) diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index c723bad4c9332..4a08cca359c05 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -939,15 +939,12 @@ func stmtFmt(n Node, s fmt.State, mode FmtMode) { mode.Fprintf(s, "%v %#v= %v", n.Left(), n.SubOp(), n.Right()) - case OAS2: + case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV: if n.Colas() && !complexinit { mode.Fprintf(s, "%.v := %.v", n.List(), n.Rlist()) - break + } else { + mode.Fprintf(s, "%.v = %.v", n.List(), n.Rlist()) } - fallthrough - - case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV: - mode.Fprintf(s, "%.v = %v", n.List(), n.Right()) case ORETURN: mode.Fprintf(s, "return %.v", n.List()) diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 2850704ae1803..85f7f92a423c5 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -520,8 +520,8 @@ const ( ORECOVER // recover() ORECV // <-Left ORUNESTR // Type(Left) (Type is string, Left is rune) - OSELRECV // Left = <-Right.Left: (appears as .Left of OCASE; Right.Op == ORECV) - OSELRECV2 // List = <-Right.Left: (appears as .Left of OCASE; count(List) == 2, Right.Op == ORECV) + OSELRECV // like OAS: Left = Right where Right.Op = ORECV (appears as .Left of OCASE) + OSELRECV2 // like OAS2: List = Rlist where len(List)=2, len(Rlist)=1, Rlist[0].Op = ORECV (appears as .Left of OCASE) OIOTA // iota OREAL // real(Left) OIMAG // imag(Left) diff --git a/src/cmd/compile/internal/logopt/logopt_test.go b/src/cmd/compile/internal/logopt/logopt_test.go index 51bab49518c07..4a6ed2fac953f 100644 --- a/src/cmd/compile/internal/logopt/logopt_test.go +++ b/src/cmd/compile/internal/logopt/logopt_test.go @@ -132,7 +132,7 @@ func TestLogOpt(t *testing.T) { // Check at both 1 and 8-byte alignments. t.Run("Copy", func(t *testing.T) { const copyCode = `package x -func s128a1(x *[128]int8) [128]int8 { +func s128a1(x *[128]int8) [128]int8 { return *x } func s127a1(x *[127]int8) [127]int8 { @@ -219,7 +219,7 @@ func s15a8(x *[15]int64) [15]int64 { `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":11},"end":{"line":4,"character":11}}},"message":"inlineLoc"},`+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from \u0026y.b (address-of)"},`+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":9},"end":{"line":4,"character":9}}},"message":"inlineLoc"},`+ - `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from ~R0 = \u003cN\u003e (assign-pair)"},`+ + `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from ~R0 = \u0026y.b (assign-pair)"},`+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: flow: ~r2 = ~R0:"},`+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: from return (*int)(~R0) (return)"}]}`) }) From 5fc192af56dd1a9977bf73175ab9e32232b4a14d Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sun, 29 Nov 2020 11:16:20 -0500 Subject: [PATCH 069/474] [dev.regabi] cmd/compile: clean up Order.copyExpr TODO Just a little cleaner to read. Passes buildall w/ toolstash -cmp. Change-Id: I27b9f09bf6756f74f1c01794444518ded1a7d625 Reviewed-on: https://go-review.googlesource.com/c/go/+/274106 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/order.go | 65 +++++++++++++++------------- 1 file changed, 35 insertions(+), 30 deletions(-) diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 66e279d85f83a..83cfb44474055 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -93,21 +93,26 @@ func (o *Order) newTemp(t *types.Type, clear bool) ir.Node { // copyExpr behaves like newTemp but also emits // code to initialize the temporary to the value n. -// -// The clear argument is provided for use when the evaluation -// of tmp = n turns into a function call that is passed a pointer -// to the temporary as the output space. If the call blocks before -// tmp has been written, the garbage collector will still treat the -// temporary as live, so we must zero it before entering that call. +func (o *Order) copyExpr(n ir.Node) ir.Node { + return o.copyExpr1(n, false) +} + +// copyExprClear is like copyExpr but clears the temp before assignment. +// It is provided for use when the evaluation of tmp = n turns into +// a function call that is passed a pointer to the temporary as the output space. +// If the call blocks before tmp has been written, +// the garbage collector will still treat the temporary as live, +// so we must zero it before entering that call. // Today, this only happens for channel receive operations. // (The other candidate would be map access, but map access // returns a pointer to the result data instead of taking a pointer // to be filled in.) -// TODO(rsc): t == n.Type() always; remove parameter. -func (o *Order) copyExpr(n ir.Node, t *types.Type, clear bool) ir.Node { - if t != n.Type() { - panic("copyExpr") - } +func (o *Order) copyExprClear(n ir.Node) ir.Node { + return o.copyExpr1(n, true) +} + +func (o *Order) copyExpr1(n ir.Node, clear bool) ir.Node { + t := n.Type() v := o.newTemp(t, clear) a := ir.Nod(ir.OAS, v, n) a = typecheck(a, ctxStmt) @@ -137,7 +142,7 @@ func (o *Order) cheapExpr(n ir.Node) ir.Node { return typecheck(a, ctxExpr) } - return o.copyExpr(n, n.Type(), false) + return o.copyExpr(n) } // safeExpr returns a safe version of n. @@ -224,7 +229,7 @@ func (o *Order) addrTemp(n ir.Node) ir.Node { if isaddrokay(n) { return n } - return o.copyExpr(n, n.Type(), false) + return o.copyExpr(n) } // mapKeyTemp prepares n to be a key in a map runtime call and returns n. @@ -493,7 +498,7 @@ func (o *Order) call(n ir.Node) { // by copying it into a temp and marking that temp // still alive when we pop the temp stack. if arg.Op() == ir.OCONVNOP && arg.Left().Type().IsUnsafePtr() { - x := o.copyExpr(arg.Left(), arg.Left().Type(), false) + x := o.copyExpr(arg.Left()) arg.SetLeft(x) x.Name().SetAddrtaken(true) // ensure SSA keeps the x variable n.PtrBody().Append(typecheck(ir.Nod(ir.OVARLIVE, x, nil), ctxStmt)) @@ -555,10 +560,10 @@ func (o *Order) mapAssign(n ir.Node) { switch { case m.Op() == ir.OINDEXMAP: if !ir.IsAutoTmp(m.Left()) { - m.SetLeft(o.copyExpr(m.Left(), m.Left().Type(), false)) + m.SetLeft(o.copyExpr(m.Left())) } if !ir.IsAutoTmp(m.Right()) { - m.SetRight(o.copyExpr(m.Right(), m.Right().Type(), false)) + m.SetRight(o.copyExpr(m.Right())) } fallthrough case instrumenting && n.Op() == ir.OAS2FUNC && !ir.IsBlank(m): @@ -617,7 +622,7 @@ func (o *Order) stmt(n ir.Node) { if l1.Op() == ir.OINDEXMAP { l2.SetIndexMapLValue(false) } - l2 = o.copyExpr(l2, l2.Type(), false) + l2 = o.copyExpr(l2) r := ir.NodAt(n.Pos(), n.SubOp(), l2, n.Right()) r = typecheck(r, ctxExpr) r = o.expr(r, nil) @@ -802,7 +807,7 @@ func (o *Order) stmt(n ir.Node) { r = typecheck(r, ctxExpr) } - n.SetRight(o.copyExpr(r, r.Type(), false)) + n.SetRight(o.copyExpr(r)) case types.TMAP: if isMapClear(n) { @@ -817,7 +822,7 @@ func (o *Order) stmt(n ir.Node) { // TODO(rsc): Make tmp = literal expressions reuse tmp. // For maps tmp is just one word so it hardly matters. r := n.Right() - n.SetRight(o.copyExpr(r, r.Type(), false)) + n.SetRight(o.copyExpr(r)) // prealloc[n] is the temp for the iterator. // hiter contains pointers and needs to be zeroed. @@ -898,7 +903,7 @@ func (o *Order) stmt(n ir.Node) { recv.SetLeft(o.expr(recv.Left(), nil)) if recv.Left().Op() != ir.ONAME { - recv.SetLeft(o.copyExpr(recv.Left(), recv.Left().Type(), false)) + recv.SetLeft(o.copyExpr(recv.Left())) } // Introduce temporary for receive and move actual copy into case body. @@ -956,11 +961,11 @@ func (o *Order) stmt(n ir.Node) { r.SetLeft(o.expr(r.Left(), nil)) if !ir.IsAutoTmp(r.Left()) { - r.SetLeft(o.copyExpr(r.Left(), r.Left().Type(), false)) + r.SetLeft(o.copyExpr(r.Left())) } r.SetRight(o.expr(r.Right(), nil)) if !ir.IsAutoTmp(r.Right()) { - r.SetRight(o.copyExpr(r.Right(), r.Right().Type(), false)) + r.SetRight(o.copyExpr(r.Right())) } } } @@ -988,7 +993,7 @@ func (o *Order) stmt(n ir.Node) { if instrumenting { // Force copying to the stack so that (chan T)(nil) <- x // is still instrumented as a read of x. - n.SetRight(o.copyExpr(n.Right(), n.Right().Type(), false)) + n.SetRight(o.copyExpr(n.Right())) } else { n.SetRight(o.addrTemp(n.Right())) } @@ -1134,7 +1139,7 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node { // key must be addressable n.SetRight(o.mapKeyTemp(n.Left().Type(), n.Right())) if needCopy { - n = o.copyExpr(n, n.Type(), false) + n = o.copyExpr(n) } // concrete type (not interface) argument might need an addressable @@ -1159,7 +1164,7 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node { o.init(n.Left()) o.call(n.Left()) if lhs == nil || lhs.Op() != ir.ONAME || instrumenting { - n = o.copyExpr(n, n.Type(), false) + n = o.copyExpr(n) } } else { n.SetLeft(o.expr(n.Left(), nil)) @@ -1229,7 +1234,7 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node { } if lhs == nil || lhs.Op() != ir.ONAME || instrumenting { - n = o.copyExpr(n, n.Type(), false) + n = o.copyExpr(n) } case ir.OAPPEND: @@ -1242,7 +1247,7 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node { } if lhs == nil || lhs.Op() != ir.ONAME && !samesafeexpr(lhs, n.List().First()) { - n = o.copyExpr(n, n.Type(), false) + n = o.copyExpr(n) } case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR: @@ -1256,7 +1261,7 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node { max = o.cheapExpr(max) n.SetSliceBounds(low, high, max) if lhs == nil || lhs.Op() != ir.ONAME && !samesafeexpr(lhs, n.Left()) { - n = o.copyExpr(n, n.Type(), false) + n = o.copyExpr(n) } case ir.OCLOSURE: @@ -1283,12 +1288,12 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node { case ir.ODOTTYPE, ir.ODOTTYPE2: n.SetLeft(o.expr(n.Left(), nil)) if !isdirectiface(n.Type()) || instrumenting { - n = o.copyExpr(n, n.Type(), true) + n = o.copyExprClear(n) } case ir.ORECV: n.SetLeft(o.expr(n.Left(), nil)) - n = o.copyExpr(n, n.Type(), true) + n = o.copyExprClear(n) case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE: n.SetLeft(o.expr(n.Left(), nil)) From b7f67b75d2fe94098afb618adc1badc12ce6e21c Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sun, 29 Nov 2020 21:11:07 -0500 Subject: [PATCH 070/474] [dev.regabi] cmd/compile: clean up in preparation for expression Nodes Using expression nodes restricts the set of valid SetOp operations, because you can't SetOp across representation. Rewrite various code to avoid crossing those as-yet-unintroduced boundaries. This also includes choosing a single representation for any given Op. For example, OCLOSE starts out as an OCALL, so it starts with a List of one node and then moves that node to Left. That's no good with real data structures, so the code picks a single canonical implementation and prepares it during the conversion from one Op to the next. In this case, the conversion of an OCALL to an OCLOSE now creates a new node with Left initialized from the start. This pattern repeats. Passes buildall w/ toolstash -cmp. Change-Id: I55a0872c614d883cac9d64976c46aeeaa639e25d Reviewed-on: https://go-review.googlesource.com/c/go/+/274107 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/sinit.go | 8 +- src/cmd/compile/internal/gc/subr.go | 2 +- src/cmd/compile/internal/gc/typecheck.go | 185 ++++++++++------------- src/cmd/compile/internal/gc/walk.go | 7 +- 4 files changed, 85 insertions(+), 117 deletions(-) diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index ff3d3281ddfa5..8146f30377107 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -686,8 +686,7 @@ func slicelit(ctxt initContext, n ir.Node, var_ ir.Node, init *ir.Nodes) { a = ir.Nod(ir.OADDR, a, nil) } else { - a = ir.Nod(ir.ONEW, nil, nil) - a.PtrList().Set1(ir.TypeNode(t)) + a = ir.Nod(ir.ONEW, ir.TypeNode(t), nil) } a = ir.Nod(ir.OAS, vauto, a) @@ -889,9 +888,8 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { r = ir.Nod(ir.OADDR, n.Right(), nil) r = typecheck(r, ctxExpr) } else { - r = ir.Nod(ir.ONEW, nil, nil) - r.SetTypecheck(1) - r.SetType(t) + r = ir.Nod(ir.ONEW, ir.TypeNode(n.Left().Type()), nil) + r = typecheck(r, ctxExpr) r.SetEsc(n.Esc()) } diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index b1c9d24d991f2..0163653d3bd9a 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -1472,7 +1472,7 @@ func ifaceData(pos src.XPos, n ir.Node, t *types.Type) ir.Node { if t.IsInterface() { base.Fatalf("ifaceData interface: %v", t) } - ptr := nodlSym(pos, ir.OIDATA, n, nil) + ptr := ir.NodAt(pos, ir.OIDATA, n, nil) if isdirectiface(t) { ptr.SetType(t) ptr.SetTypecheck(1) diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index b5ace76552822..f021ea48b14af 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -1065,7 +1065,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetRight(assignconv(n.Right(), t.Key(), "map index")) n.SetType(t.Elem()) n.SetOp(ir.OINDEXMAP) - n.ResetAux() + n.SetIndexMapLValue(false) } case ir.ORECV: @@ -1099,27 +1099,22 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetLeft(defaultlit(n.Left(), nil)) t := n.Left().Type() if t == nil { - n.SetType(nil) return n } if !t.IsChan() { base.Errorf("invalid operation: %v (send to non-chan type %v)", n, t) - n.SetType(nil) return n } if !t.ChanDir().CanSend() { base.Errorf("invalid operation: %v (send to receive-only type %v)", n, t) - n.SetType(nil) return n } n.SetRight(assignconv(n.Right(), t.Elem(), "send")) if n.Right().Type() == nil { - n.SetType(nil) return n } - n.SetType(nil) case ir.OSLICEHEADER: // Errors here are Fatalf instead of Errorf because only the compiler @@ -1299,9 +1294,44 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } // builtin: OLEN, OCAP, etc. - n.SetOp(l.SubOp()) - n.SetLeft(n.Right()) - n.SetRight(nil) + switch l.SubOp() { + default: + base.Fatalf("unknown builtin %v", l) + return n + + case ir.OAPPEND, ir.ODELETE, ir.OMAKE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER: + n.SetOp(l.SubOp()) + n.SetLeft(nil) + + case ir.OCAP, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.OPANIC, ir.OREAL: + typecheckargs(n) + fallthrough + case ir.ONEW, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF: + arg, ok := needOneArg(n, "%v", n.Op()) + if !ok { + n.SetType(nil) + return n + } + old := n + n = ir.NodAt(n.Pos(), l.SubOp(), arg, nil) + n = addinit(n, old.Init().Slice()) // typecheckargs can add to old.Init + if l.SubOp() == ir.ONEW { + // Bug-compatibility with earlier version. + // This extra node is unnecessary but raises the inlining cost by 1. + n.SetList(old.List()) + } + + case ir.OCOMPLEX, ir.OCOPY: + typecheckargs(n) + arg1, arg2, ok := needTwoArgs(n) + if !ok { + n.SetType(nil) + return n + } + old := n + n = ir.NodAt(n.Pos(), l.SubOp(), arg1, arg2) + n = addinit(n, old.Init().Slice()) // typecheckargs can add to old.Init + } n = typecheck1(n, top) return n } @@ -1319,15 +1349,14 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { // pick off before type-checking arguments ok |= ctxExpr - // turn CALL(type, arg) into CONV(arg) w/ type - n.SetLeft(nil) - - n.SetOp(ir.OCONV) - n.SetType(l.Type()) - if !onearg(n, "conversion to %v", l.Type()) { + arg, ok := needOneArg(n, "conversion to %v", l.Type()) + if !ok { n.SetType(nil) return n } + + n = ir.NodAt(n.Pos(), ir.OCONV, arg, nil) + n.SetType(l.Type()) n = typecheck1(n, top) return n } @@ -1406,19 +1435,10 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF: ok |= ctxExpr - if !onearg(n, "%v", n.Op()) { - n.SetType(nil) - return n - } n.SetType(types.Types[types.TUINTPTR]) case ir.OCAP, ir.OLEN: ok |= ctxExpr - if !onearg(n, "%v", n.Op()) { - n.SetType(nil) - return n - } - n.SetLeft(typecheck(n.Left(), ctxExpr)) n.SetLeft(defaultlit(n.Left(), nil)) n.SetLeft(implicitstar(n.Left())) @@ -1445,11 +1465,6 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OREAL, ir.OIMAG: ok |= ctxExpr - if !onearg(n, "%v", n.Op()) { - n.SetType(nil) - return n - } - n.SetLeft(typecheck(n.Left(), ctxExpr)) l := n.Left() t := l.Type() @@ -1474,13 +1489,8 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OCOMPLEX: ok |= ctxExpr - typecheckargs(n) - if !twoarg(n) { - n.SetType(nil) - return n - } - l := n.Left() - r := n.Right() + l := typecheck(n.Left(), ctxExpr) + r := typecheck(n.Right(), ctxExpr) if l.Type() == nil || r.Type() == nil { n.SetType(nil) return n @@ -1518,10 +1528,6 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetType(t) case ir.OCLOSE: - if !onearg(n, "%v", n.Op()) { - n.SetType(nil) - return n - } n.SetLeft(typecheck(n.Left(), ctxExpr)) n.SetLeft(defaultlit(n.Left(), nil)) l := n.Left() @@ -1638,17 +1644,10 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OCOPY: ok |= ctxStmt | ctxExpr - typecheckargs(n) - if !twoarg(n) { - n.SetType(nil) - return n - } n.SetType(types.Types[types.TINT]) - if n.Left().Type() == nil || n.Right().Type() == nil { - n.SetType(nil) - return n - } + n.SetLeft(typecheck(n.Left(), ctxExpr)) n.SetLeft(defaultlit(n.Left(), nil)) + n.SetRight(typecheck(n.Right(), ctxExpr)) n.SetRight(defaultlit(n.Right(), nil)) if n.Left().Type() == nil || n.Right().Type() == nil { n.SetType(nil) @@ -1746,6 +1745,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } i := 1 + var nn ir.Node switch t.Etype { default: base.Errorf("cannot make type %v", t) @@ -1782,10 +1782,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetType(nil) return n } - - n.SetLeft(l) - n.SetRight(r) - n.SetOp(ir.OMAKESLICE) + nn = ir.NodAt(n.Pos(), ir.OMAKESLICE, l, r) case types.TMAP: if i < len(args) { @@ -1801,11 +1798,11 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetType(nil) return n } - n.SetLeft(l) } else { - n.SetLeft(nodintconst(0)) + l = nodintconst(0) } - n.SetOp(ir.OMAKEMAP) + nn = ir.NodAt(n.Pos(), ir.OMAKEMAP, l, nil) + nn.SetEsc(n.Esc()) case types.TCHAN: l = nil @@ -1822,44 +1819,35 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetType(nil) return n } - n.SetLeft(l) } else { - n.SetLeft(nodintconst(0)) + l = nodintconst(0) } - n.SetOp(ir.OMAKECHAN) + nn = ir.NodAt(n.Pos(), ir.OMAKECHAN, l, nil) } if i < len(args) { base.Errorf("too many arguments to make(%v)", t) - n.SetOp(ir.OMAKE) n.SetType(nil) return n } - n.SetType(t) + nn.SetType(t) + n = nn case ir.ONEW: ok |= ctxExpr - args := n.List() - if args.Len() == 0 { - base.Errorf("missing argument to new") - n.SetType(nil) - return n + if n.Left() == nil { + // Fatalf because the OCALL above checked for us, + // so this must be an internally-generated mistake. + base.Fatalf("missing argument to new") } - - l := args.First() + l := n.Left() l = typecheck(l, ctxType) t := l.Type() if t == nil { n.SetType(nil) return n } - if args.Len() > 1 { - base.Errorf("too many arguments to new(%v)", t) - n.SetType(nil) - return n - } - n.SetLeft(l) n.SetType(types.NewPtr(t)) @@ -1878,10 +1866,6 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OPANIC: ok |= ctxStmt - if !onearg(n, "panic") { - n.SetType(nil) - return n - } n.SetLeft(typecheck(n.Left(), ctxExpr)) n.SetLeft(defaultlit(n.Left(), types.Types[types.TINTER])) if n.Left().Type() == nil { @@ -2286,45 +2270,32 @@ func implicitstar(n ir.Node) ir.Node { return n } -func onearg(n ir.Node, f string, args ...interface{}) bool { - if n.Left() != nil { - return true - } +func needOneArg(n ir.Node, f string, args ...interface{}) (ir.Node, bool) { if n.List().Len() == 0 { p := fmt.Sprintf(f, args...) base.Errorf("missing argument to %s: %v", p, n) - return false + return nil, false } if n.List().Len() > 1 { p := fmt.Sprintf(f, args...) base.Errorf("too many arguments to %s: %v", p, n) - n.SetLeft(n.List().First()) - n.PtrList().Set(nil) - return false + return n.List().First(), false } - n.SetLeft(n.List().First()) - n.PtrList().Set(nil) - return true + return n.List().First(), true } -func twoarg(n ir.Node) bool { - if n.Left() != nil { - return true - } +func needTwoArgs(n ir.Node) (ir.Node, ir.Node, bool) { if n.List().Len() != 2 { if n.List().Len() < 2 { base.Errorf("not enough arguments in call to %v", n) } else { base.Errorf("too many arguments in call to %v", n) } - return false + return nil, nil, false } - n.SetLeft(n.List().First()) - n.SetRight(n.List().Second()) - n.PtrList().Set(nil) - return true + return n.List().First(), n.List().Second(), true } func lookdot1(errnode ir.Node, s *types.Sym, t *types.Type, fs *types.Fields, dostrcmp int) *types.Field { @@ -2411,21 +2382,19 @@ func typecheckMethodExpr(n ir.Node) (res ir.Node) { return n } - n.SetOp(ir.OMETHEXPR) - n.SetRight(NewName(n.Sym())) - n.SetSym(methodSym(t, n.Sym())) - n.SetType(methodfunc(m.Type, n.Left().Type())) - n.SetOffset(0) - n.SetClass(ir.PFUNC) - n.SetOpt(m) - // methodSym already marked n.Sym as a function. + me := ir.NodAt(n.Pos(), ir.OMETHEXPR, n.Left(), NewName(n.Sym())) + me.SetSym(methodSym(t, n.Sym())) + me.SetType(methodfunc(m.Type, n.Left().Type())) + me.SetOffset(0) + me.SetClass(ir.PFUNC) + me.SetOpt(m) // Issue 25065. Make sure that we emit the symbol for a local method. if base.Ctxt.Flag_dynlink && !inimport && (t.Sym == nil || t.Sym.Pkg == ir.LocalPkg) { - makefuncsym(n.Sym()) + makefuncsym(me.Sym()) } - return n + return me } // isMethodApplicable reports whether method m can be called on a diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 0a77cfbb3848f..511cdd3685e4d 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -1515,9 +1515,10 @@ opswitch: } // Slice the [n]byte to a []byte. - n.SetOp(ir.OSLICEARR) - n.SetLeft(p) - n = walkexpr(n, init) + slice := ir.NodAt(n.Pos(), ir.OSLICEARR, p, nil) + slice.SetType(n.Type()) + slice.SetTypecheck(1) + n = walkexpr(slice, init) break } From 2bc814cd18b582030f25d22e0a3e80d4d30b19cf Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 30 Nov 2020 14:16:39 -0500 Subject: [PATCH 071/474] [dev.regabi] cmd/compile: clean up ONEW node The list is no longer needed and can be deleted. Doing so reduces the inlining cost of any function containing an explicit call to new by 1 point, so this change is not toolstash -cmp safe. Change-Id: Id29e115d68e466a353708ab4b8c1021e9c85a628 Reviewed-on: https://go-review.googlesource.com/c/go/+/274132 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/typecheck.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index f021ea48b14af..874594d764dce 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -1315,11 +1315,6 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { old := n n = ir.NodAt(n.Pos(), l.SubOp(), arg, nil) n = addinit(n, old.Init().Slice()) // typecheckargs can add to old.Init - if l.SubOp() == ir.ONEW { - // Bug-compatibility with earlier version. - // This extra node is unnecessary but raises the inlining cost by 1. - n.SetList(old.List()) - } case ir.OCOMPLEX, ir.OCOPY: typecheckargs(n) From ffa68716a0d50acd29a8eae7874c7e8d02f757ca Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sun, 29 Nov 2020 21:23:47 -0500 Subject: [PATCH 072/474] [dev.regabi] cmd/compile: add custom statement Node implementations These are fairly rote implementations of structs appropriate to each Op (or group of Ops). The names of these are unknown except to ir.NodAt for now. A later, automated change will introduce direct use of the types throughout package gc. Passes buildall w/ toolstash -cmp. Change-Id: Ie9835fcd2b214fda5b2149e187af369d76534487 Reviewed-on: https://go-review.googlesource.com/c/go/+/274108 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/node.go | 78 ++-- src/cmd/compile/internal/ir/stmt.go | 535 +++++++++++++++++++++++++++- 2 files changed, 578 insertions(+), 35 deletions(-) diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 85f7f92a423c5..a4d19c39f8db3 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -1029,22 +1029,60 @@ func Nod(op Op, nleft, nright Node) Node { func NodAt(pos src.XPos, op Op, nleft, nright Node) Node { var n *node switch op { + case OAS, OSELRECV: + n := NewAssignStmt(pos, nleft, nright) + n.SetOp(op) + return n + case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV, OSELRECV2: + n := NewAssignListStmt(pos, nil, nil) + n.SetOp(op) + return n + case OASOP: + return NewAssignOpStmt(pos, OXXX, nleft, nright) + case OBLOCK: + return NewBlockStmt(pos, nil) + case OBREAK, OCONTINUE, OFALL, OGOTO, ORETJMP: + return NewBranchStmt(pos, op, nil) + case OCASE: + return NewCaseStmt(pos, nil, nil) + case ODCL, ODCLCONST, ODCLTYPE: + return NewDecl(pos, op, nleft) case ODCLFUNC: return NewFunc(pos) + case ODEFER: + return NewDeferStmt(pos, nleft) case ODEREF: return NewStarExpr(pos, nleft) - case OPACK: - return NewPkgName(pos, nil, nil) case OEMPTY: return NewEmptyStmt(pos) - case OBREAK, OCONTINUE, OFALL, OGOTO: - return NewBranchStmt(pos, op, nil) + case OFOR: + return NewForStmt(pos, nil, nleft, nright, nil) + case OGO: + return NewGoStmt(pos, nleft) + case OIF: + return NewIfStmt(pos, nleft, nil, nil) + case OINLMARK: + return NewInlineMarkStmt(pos, types.BADWIDTH) + case OLABEL: + return NewLabelStmt(pos, nil) case OLITERAL, OTYPE, OIOTA: n := newNameAt(pos, nil) n.SetOp(op) return n - case OLABEL: - return NewLabelStmt(pos, nil) + case OPACK: + return NewPkgName(pos, nil, nil) + case ORANGE: + return NewRangeStmt(pos, nil, nright, nil) + case ORETURN: + return NewReturnStmt(pos, nil) + case OSELECT: + return NewSelectStmt(pos, nil) + case OSEND: + return NewSendStmt(pos, nleft, nright) + case OSWITCH: + return NewSwitchStmt(pos, nleft, nil) + case OTYPESW: + return NewTypeSwitchGuard(pos, nleft, nright) default: n = new(node) } @@ -1067,15 +1105,7 @@ var okForNod = [OEND]bool{ OANDNOT: true, OAPPEND: true, OARRAYLIT: true, - OAS: true, - OAS2: true, - OAS2DOTTYPE: true, - OAS2FUNC: true, - OAS2MAPR: true, - OAS2RECV: true, - OASOP: true, OBITNOT: true, - OBLOCK: true, OBYTES2STR: true, OBYTES2STRTMP: true, OCALL: true, @@ -1083,7 +1113,6 @@ var okForNod = [OEND]bool{ OCALLINTER: true, OCALLMETH: true, OCAP: true, - OCASE: true, OCFUNC: true, OCHECKNIL: true, OCLOSE: true, @@ -1093,10 +1122,6 @@ var okForNod = [OEND]bool{ OCONVIFACE: true, OCONVNOP: true, OCOPY: true, - ODCL: true, - ODCLCONST: true, - ODCLTYPE: true, - ODEFER: true, ODELETE: true, ODIV: true, ODOT: true, @@ -1107,22 +1132,16 @@ var okForNod = [OEND]bool{ ODOTTYPE2: true, OEFACE: true, OEQ: true, - OFOR: true, - OFORUNTIL: true, OGE: true, OGETG: true, - OGO: true, OGT: true, OIDATA: true, - OIF: true, OIMAG: true, OINDEX: true, OINDEXMAP: true, OINLCALL: true, - OINLMARK: true, OITAB: true, OKEY: true, - OLABEL: true, OLE: true, OLEN: true, OLSH: true, @@ -1151,20 +1170,13 @@ var okForNod = [OEND]bool{ OPRINT: true, OPRINTN: true, OPTRLIT: true, - ORANGE: true, OREAL: true, ORECOVER: true, ORECV: true, ORESULT: true, - ORETJMP: true, - ORETURN: true, ORSH: true, ORUNES2STR: true, ORUNESTR: true, - OSELECT: true, - OSELRECV: true, - OSELRECV2: true, - OSEND: true, OSIZEOF: true, OSLICE: true, OSLICE3: true, @@ -1180,8 +1192,6 @@ var okForNod = [OEND]bool{ OSTRUCTKEY: true, OSTRUCTLIT: true, OSUB: true, - OSWITCH: true, - OTYPESW: true, OVARDEF: true, OVARKILL: true, OVARLIVE: true, diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index 5b89ff27a4734..251683551367f 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -10,6 +10,31 @@ import ( "fmt" ) +// A Decl is a declaration of a const, type, or var. (A declared func is a Func.) +// (This is not technically a statement but it's not worth its own file.) +type Decl struct { + miniNode + X Node // the thing being declared +} + +func NewDecl(pos src.XPos, op Op, x Node) *Decl { + n := &Decl{X: x} + n.pos = pos + switch op { + default: + panic("invalid Decl op " + op.String()) + case ODCL, ODCLCONST, ODCLTYPE: + n.op = op + } + return n +} + +func (n *Decl) String() string { return fmt.Sprint(n) } +func (n *Decl) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *Decl) RawCopy() Node { c := *n; return &c } +func (n *Decl) Left() Node { return n.X } +func (n *Decl) SetLeft(x Node) { n.X = x } + // A miniStmt is a miniNode with extra fields common to statements. type miniStmt struct { miniNode @@ -22,7 +47,148 @@ func (n *miniStmt) PtrInit() *Nodes { return &n.init } func (n *miniStmt) HasCall() bool { return n.bits&miniHasCall != 0 } func (n *miniStmt) SetHasCall(b bool) { n.bits.set(miniHasCall, b) } +// An AssignListStmt is an assignment statement with +// more than one item on at least one side: Lhs = Rhs. +// If Def is true, the assignment is a :=. +type AssignListStmt struct { + miniStmt + Lhs Nodes + Def bool + Rhs Nodes + offset int64 // for initorder +} + +func NewAssignListStmt(pos src.XPos, lhs, rhs []Node) *AssignListStmt { + n := &AssignListStmt{} + n.pos = pos + n.op = OAS2 + n.Lhs.Set(lhs) + n.Rhs.Set(rhs) + n.offset = types.BADWIDTH + return n +} + +func (n *AssignListStmt) String() string { return fmt.Sprint(n) } +func (n *AssignListStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *AssignListStmt) RawCopy() Node { c := *n; return &c } + +func (n *AssignListStmt) List() Nodes { return n.Lhs } +func (n *AssignListStmt) PtrList() *Nodes { return &n.Lhs } +func (n *AssignListStmt) SetList(x Nodes) { n.Lhs = x } +func (n *AssignListStmt) Rlist() Nodes { return n.Rhs } +func (n *AssignListStmt) PtrRlist() *Nodes { return &n.Rhs } +func (n *AssignListStmt) SetRlist(x Nodes) { n.Rhs = x } +func (n *AssignListStmt) Colas() bool { return n.Def } +func (n *AssignListStmt) SetColas(x bool) { n.Def = x } +func (n *AssignListStmt) Offset() int64 { return n.offset } +func (n *AssignListStmt) SetOffset(x int64) { n.offset = x } + +func (n *AssignListStmt) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV, OSELRECV2: + n.op = op + } +} + +// An AssignStmt is a simple assignment statement: X = Y. +// If Def is true, the assignment is a :=. +type AssignStmt struct { + miniStmt + X Node + Def bool + Y Node + offset int64 // for initorder +} + +func NewAssignStmt(pos src.XPos, x, y Node) *AssignStmt { + n := &AssignStmt{X: x, Y: y} + n.pos = pos + n.op = OAS + n.offset = types.BADWIDTH + return n +} + +func (n *AssignStmt) String() string { return fmt.Sprint(n) } +func (n *AssignStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *AssignStmt) RawCopy() Node { c := *n; return &c } + +func (n *AssignStmt) Left() Node { return n.X } +func (n *AssignStmt) SetLeft(x Node) { n.X = x } +func (n *AssignStmt) Right() Node { return n.Y } +func (n *AssignStmt) SetRight(y Node) { n.Y = y } +func (n *AssignStmt) Colas() bool { return n.Def } +func (n *AssignStmt) SetColas(x bool) { n.Def = x } +func (n *AssignStmt) Offset() int64 { return n.offset } +func (n *AssignStmt) SetOffset(x int64) { n.offset = x } + +func (n *AssignStmt) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case OAS, OSELRECV: + n.op = op + } +} + +// An AssignOpStmt is an AsOp= assignment statement: X AsOp= Y. +type AssignOpStmt struct { + miniStmt + typ *types.Type + X Node + AsOp Op // OADD etc + Y Node + IncDec bool // actually ++ or -- +} + +func NewAssignOpStmt(pos src.XPos, op Op, x, y Node) *AssignOpStmt { + n := &AssignOpStmt{AsOp: op, X: x, Y: y} + n.pos = pos + n.op = OASOP + return n +} + +func (n *AssignOpStmt) String() string { return fmt.Sprint(n) } +func (n *AssignOpStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *AssignOpStmt) RawCopy() Node { c := *n; return &c } + +func (n *AssignOpStmt) Left() Node { return n.X } +func (n *AssignOpStmt) SetLeft(x Node) { n.X = x } +func (n *AssignOpStmt) Right() Node { return n.Y } +func (n *AssignOpStmt) SetRight(y Node) { n.Y = y } +func (n *AssignOpStmt) SubOp() Op { return n.AsOp } +func (n *AssignOpStmt) SetSubOp(x Op) { n.AsOp = x } +func (n *AssignOpStmt) Implicit() bool { return n.IncDec } +func (n *AssignOpStmt) SetImplicit(b bool) { n.IncDec = b } +func (n *AssignOpStmt) Type() *types.Type { return n.typ } +func (n *AssignOpStmt) SetType(x *types.Type) { n.typ = x } + +// A BlockStmt is a block: { List }. +type BlockStmt struct { + miniStmt + list Nodes +} + +func NewBlockStmt(pos src.XPos, list []Node) *BlockStmt { + n := &BlockStmt{} + n.pos = pos + n.op = OBLOCK + n.list.Set(list) + return n +} + +func (n *BlockStmt) String() string { return fmt.Sprint(n) } +func (n *BlockStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *BlockStmt) RawCopy() Node { c := *n; return &c } +func (n *BlockStmt) List() Nodes { return n.list } +func (n *BlockStmt) PtrList() *Nodes { return &n.list } +func (n *BlockStmt) SetList(x Nodes) { n.list = x } + // A BranchStmt is a break, continue, fallthrough, or goto statement. +// +// For back-end code generation, Op may also be RETJMP (return+jump), +// in which case the label names another function entirely. type BranchStmt struct { miniStmt Label *types.Sym // label if present @@ -30,7 +196,7 @@ type BranchStmt struct { func NewBranchStmt(pos src.XPos, op Op, label *types.Sym) *BranchStmt { switch op { - case OBREAK, OCONTINUE, OFALL, OGOTO: + case OBREAK, OCONTINUE, OFALL, OGOTO, ORETJMP: // ok default: panic("NewBranch " + op.String()) @@ -47,6 +213,59 @@ func (n *BranchStmt) RawCopy() Node { c := *n; return &c } func (n *BranchStmt) Sym() *types.Sym { return n.Label } func (n *BranchStmt) SetSym(sym *types.Sym) { n.Label = sym } +// A CaseStmt is a case statement in a switch or select: case List: Body. +type CaseStmt struct { + miniStmt + Vars Nodes // declared variable for this case in type switch + list Nodes // list of expressions for switch, early select + Comm Node // communication case (Exprs[0]) after select is type-checked + body Nodes +} + +func NewCaseStmt(pos src.XPos, list, body []Node) *CaseStmt { + n := &CaseStmt{} + n.pos = pos + n.op = OCASE + n.list.Set(list) + n.body.Set(body) + return n +} + +func (n *CaseStmt) String() string { return fmt.Sprint(n) } +func (n *CaseStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *CaseStmt) RawCopy() Node { c := *n; return &c } +func (n *CaseStmt) List() Nodes { return n.list } +func (n *CaseStmt) PtrList() *Nodes { return &n.list } +func (n *CaseStmt) SetList(x Nodes) { n.list = x } +func (n *CaseStmt) Body() Nodes { return n.body } +func (n *CaseStmt) PtrBody() *Nodes { return &n.body } +func (n *CaseStmt) SetBody(x Nodes) { n.body = x } +func (n *CaseStmt) Rlist() Nodes { return n.Vars } +func (n *CaseStmt) PtrRlist() *Nodes { return &n.Vars } +func (n *CaseStmt) SetRlist(x Nodes) { n.Vars = x } +func (n *CaseStmt) Left() Node { return n.Comm } +func (n *CaseStmt) SetLeft(x Node) { n.Comm = x } + +// A DeferStmt is a defer statement: defer Call. +type DeferStmt struct { + miniStmt + Call Node +} + +func NewDeferStmt(pos src.XPos, call Node) *DeferStmt { + n := &DeferStmt{Call: call} + n.pos = pos + n.op = ODEFER + return n +} + +func (n *DeferStmt) String() string { return fmt.Sprint(n) } +func (n *DeferStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *DeferStmt) RawCopy() Node { c := *n; return &c } + +func (n *DeferStmt) Left() Node { return n.Call } +func (n *DeferStmt) SetLeft(x Node) { n.Call = x } + // An EmptyStmt is an empty statement type EmptyStmt struct { miniStmt @@ -63,6 +282,123 @@ func (n *EmptyStmt) String() string { return fmt.Sprint(n) } func (n *EmptyStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *EmptyStmt) RawCopy() Node { c := *n; return &c } +// A ForStmt is a non-range for loop: for Init; Cond; Post { Body } +// Op can be OFOR or OFORUNTIL (!Cond). +type ForStmt struct { + miniStmt + Label *types.Sym + Cond Node + Post Node + Late Nodes + body Nodes + hasBreak bool +} + +func NewForStmt(pos src.XPos, init []Node, cond, post Node, body []Node) *ForStmt { + n := &ForStmt{Cond: cond, Post: post} + n.pos = pos + n.op = OFOR + n.init.Set(init) + n.body.Set(body) + return n +} + +func (n *ForStmt) String() string { return fmt.Sprint(n) } +func (n *ForStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ForStmt) RawCopy() Node { c := *n; return &c } +func (n *ForStmt) Sym() *types.Sym { return n.Label } +func (n *ForStmt) SetSym(x *types.Sym) { n.Label = x } +func (n *ForStmt) Left() Node { return n.Cond } +func (n *ForStmt) SetLeft(x Node) { n.Cond = x } +func (n *ForStmt) Right() Node { return n.Post } +func (n *ForStmt) SetRight(x Node) { n.Post = x } +func (n *ForStmt) Body() Nodes { return n.body } +func (n *ForStmt) PtrBody() *Nodes { return &n.body } +func (n *ForStmt) SetBody(x Nodes) { n.body = x } +func (n *ForStmt) List() Nodes { return n.Late } +func (n *ForStmt) PtrList() *Nodes { return &n.Late } +func (n *ForStmt) SetList(x Nodes) { n.Late = x } +func (n *ForStmt) HasBreak() bool { return n.hasBreak } +func (n *ForStmt) SetHasBreak(b bool) { n.hasBreak = b } + +func (n *ForStmt) SetOp(op Op) { + if op != OFOR && op != OFORUNTIL { + panic(n.no("SetOp " + op.String())) + } + n.op = op +} + +// A GoStmt is a go statement: go Call. +type GoStmt struct { + miniStmt + Call Node +} + +func NewGoStmt(pos src.XPos, call Node) *GoStmt { + n := &GoStmt{Call: call} + n.pos = pos + n.op = OGO + return n +} + +func (n *GoStmt) String() string { return fmt.Sprint(n) } +func (n *GoStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *GoStmt) RawCopy() Node { c := *n; return &c } + +func (n *GoStmt) Left() Node { return n.Call } +func (n *GoStmt) SetLeft(x Node) { n.Call = x } + +// A IfStmt is a return statement: if Init; Cond { Then } else { Else }. +type IfStmt struct { + miniStmt + Cond Node + body Nodes + Else Nodes + likely bool // code layout hint +} + +func NewIfStmt(pos src.XPos, cond Node, body, els []Node) *IfStmt { + n := &IfStmt{Cond: cond} + n.pos = pos + n.op = OIF + n.body.Set(body) + n.Else.Set(els) + return n +} + +func (n *IfStmt) String() string { return fmt.Sprint(n) } +func (n *IfStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *IfStmt) RawCopy() Node { c := *n; return &c } +func (n *IfStmt) Left() Node { return n.Cond } +func (n *IfStmt) SetLeft(x Node) { n.Cond = x } +func (n *IfStmt) Body() Nodes { return n.body } +func (n *IfStmt) PtrBody() *Nodes { return &n.body } +func (n *IfStmt) SetBody(x Nodes) { n.body = x } +func (n *IfStmt) Rlist() Nodes { return n.Else } +func (n *IfStmt) PtrRlist() *Nodes { return &n.Else } +func (n *IfStmt) SetRlist(x Nodes) { n.Else = x } +func (n *IfStmt) Likely() bool { return n.likely } +func (n *IfStmt) SetLikely(x bool) { n.likely = x } + +// An InlineMarkStmt is a marker placed just before an inlined body. +type InlineMarkStmt struct { + miniStmt + Index int64 +} + +func NewInlineMarkStmt(pos src.XPos, index int64) *InlineMarkStmt { + n := &InlineMarkStmt{Index: index} + n.pos = pos + n.op = OINLMARK + return n +} + +func (n *InlineMarkStmt) String() string { return fmt.Sprint(n) } +func (n *InlineMarkStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *InlineMarkStmt) RawCopy() Node { c := *n; return &c } +func (n *InlineMarkStmt) Offset() int64 { return n.Index } +func (n *InlineMarkStmt) SetOffset(x int64) { n.Index = x } + // A LabelStmt is a label statement (just the label, not including the statement it labels). type LabelStmt struct { miniStmt @@ -81,3 +417,200 @@ func (n *LabelStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *LabelStmt) RawCopy() Node { c := *n; return &c } func (n *LabelStmt) Sym() *types.Sym { return n.Label } func (n *LabelStmt) SetSym(x *types.Sym) { n.Label = x } + +// A RangeStmt is a range loop: for Vars = range X { Stmts } +// Op can be OFOR or OFORUNTIL (!Cond). +type RangeStmt struct { + miniStmt + Label *types.Sym + Vars Nodes // TODO(rsc): Replace with Key, Value Node + Def bool + X Node + body Nodes + hasBreak bool + typ *types.Type // TODO(rsc): Remove - use X.Type() instead +} + +func NewRangeStmt(pos src.XPos, vars []Node, x Node, body []Node) *RangeStmt { + n := &RangeStmt{X: x} + n.pos = pos + n.op = ORANGE + n.Vars.Set(vars) + n.body.Set(body) + return n +} + +func (n *RangeStmt) String() string { return fmt.Sprint(n) } +func (n *RangeStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *RangeStmt) RawCopy() Node { c := *n; return &c } +func (n *RangeStmt) Sym() *types.Sym { return n.Label } +func (n *RangeStmt) SetSym(x *types.Sym) { n.Label = x } +func (n *RangeStmt) Right() Node { return n.X } +func (n *RangeStmt) SetRight(x Node) { n.X = x } +func (n *RangeStmt) Body() Nodes { return n.body } +func (n *RangeStmt) PtrBody() *Nodes { return &n.body } +func (n *RangeStmt) SetBody(x Nodes) { n.body = x } +func (n *RangeStmt) List() Nodes { return n.Vars } +func (n *RangeStmt) PtrList() *Nodes { return &n.Vars } +func (n *RangeStmt) SetList(x Nodes) { n.Vars = x } +func (n *RangeStmt) HasBreak() bool { return n.hasBreak } +func (n *RangeStmt) SetHasBreak(b bool) { n.hasBreak = b } +func (n *RangeStmt) Colas() bool { return n.Def } +func (n *RangeStmt) SetColas(b bool) { n.Def = b } +func (n *RangeStmt) Type() *types.Type { return n.typ } +func (n *RangeStmt) SetType(x *types.Type) { n.typ = x } + +// A ReturnStmt is a return statement. +type ReturnStmt struct { + miniStmt + orig Node // for typecheckargs rewrite + Results Nodes // return list +} + +func NewReturnStmt(pos src.XPos, results []Node) *ReturnStmt { + n := &ReturnStmt{} + n.pos = pos + n.op = ORETURN + n.orig = n + n.Results.Set(results) + return n +} + +func (n *ReturnStmt) String() string { return fmt.Sprint(n) } +func (n *ReturnStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ReturnStmt) RawCopy() Node { c := *n; return &c } +func (n *ReturnStmt) Orig() Node { return n.orig } +func (n *ReturnStmt) SetOrig(x Node) { n.orig = x } +func (n *ReturnStmt) List() Nodes { return n.Results } +func (n *ReturnStmt) PtrList() *Nodes { return &n.Results } +func (n *ReturnStmt) SetList(x Nodes) { n.Results = x } +func (n *ReturnStmt) IsDDD() bool { return false } // typecheckargs asks + +// A SelectStmt is a block: { Cases }. +type SelectStmt struct { + miniStmt + Label *types.Sym + Cases Nodes + hasBreak bool + + // TODO(rsc): Instead of recording here, replace with a block? + Compiled Nodes // compiled form, after walkswitch +} + +func NewSelectStmt(pos src.XPos, cases []Node) *SelectStmt { + n := &SelectStmt{} + n.pos = pos + n.op = OSELECT + n.Cases.Set(cases) + return n +} + +func (n *SelectStmt) String() string { return fmt.Sprint(n) } +func (n *SelectStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *SelectStmt) RawCopy() Node { c := *n; return &c } +func (n *SelectStmt) List() Nodes { return n.Cases } +func (n *SelectStmt) PtrList() *Nodes { return &n.Cases } +func (n *SelectStmt) SetList(x Nodes) { n.Cases = x } +func (n *SelectStmt) Sym() *types.Sym { return n.Label } +func (n *SelectStmt) SetSym(x *types.Sym) { n.Label = x } +func (n *SelectStmt) HasBreak() bool { return n.hasBreak } +func (n *SelectStmt) SetHasBreak(x bool) { n.hasBreak = x } +func (n *SelectStmt) Body() Nodes { return n.Compiled } +func (n *SelectStmt) PtrBody() *Nodes { return &n.Compiled } +func (n *SelectStmt) SetBody(x Nodes) { n.Compiled = x } + +// A SendStmt is a send statement: X <- Y. +type SendStmt struct { + miniStmt + Chan Node + Value Node +} + +func NewSendStmt(pos src.XPos, ch, value Node) *SendStmt { + n := &SendStmt{Chan: ch, Value: value} + n.pos = pos + n.op = OSEND + return n +} + +func (n *SendStmt) String() string { return fmt.Sprint(n) } +func (n *SendStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *SendStmt) RawCopy() Node { c := *n; return &c } + +func (n *SendStmt) Left() Node { return n.Chan } +func (n *SendStmt) SetLeft(x Node) { n.Chan = x } +func (n *SendStmt) Right() Node { return n.Value } +func (n *SendStmt) SetRight(y Node) { n.Value = y } + +// A SwitchStmt is a switch statement: switch Init; Expr { Cases }. +type SwitchStmt struct { + miniStmt + Tag Node + Cases Nodes // list of *CaseStmt + Label *types.Sym + hasBreak bool + + // TODO(rsc): Instead of recording here, replace with a block? + Compiled Nodes // compiled form, after walkswitch +} + +func NewSwitchStmt(pos src.XPos, tag Node, cases []Node) *SwitchStmt { + n := &SwitchStmt{Tag: tag} + n.pos = pos + n.op = OSWITCH + n.Cases.Set(cases) + return n +} + +func (n *SwitchStmt) String() string { return fmt.Sprint(n) } +func (n *SwitchStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *SwitchStmt) RawCopy() Node { c := *n; return &c } +func (n *SwitchStmt) Left() Node { return n.Tag } +func (n *SwitchStmt) SetLeft(x Node) { n.Tag = x } +func (n *SwitchStmt) List() Nodes { return n.Cases } +func (n *SwitchStmt) PtrList() *Nodes { return &n.Cases } +func (n *SwitchStmt) SetList(x Nodes) { n.Cases = x } +func (n *SwitchStmt) Body() Nodes { return n.Compiled } +func (n *SwitchStmt) PtrBody() *Nodes { return &n.Compiled } +func (n *SwitchStmt) SetBody(x Nodes) { n.Compiled = x } +func (n *SwitchStmt) Sym() *types.Sym { return n.Label } +func (n *SwitchStmt) SetSym(x *types.Sym) { n.Label = x } +func (n *SwitchStmt) HasBreak() bool { return n.hasBreak } +func (n *SwitchStmt) SetHasBreak(x bool) { n.hasBreak = x } + +// A TypeSwitchGuard is the [Name :=] X.(type) in a type switch. +type TypeSwitchGuard struct { + miniNode + name *Name + X Node +} + +func NewTypeSwitchGuard(pos src.XPos, name, x Node) *TypeSwitchGuard { + n := &TypeSwitchGuard{X: x} + if name != nil { + n.name = name.(*Name) + } + n.pos = pos + n.op = OTYPESW + return n +} + +func (n *TypeSwitchGuard) String() string { return fmt.Sprint(n) } +func (n *TypeSwitchGuard) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *TypeSwitchGuard) RawCopy() Node { c := *n; return &c } + +func (n *TypeSwitchGuard) Left() Node { + if n.name == nil { + return nil + } + return n.name +} +func (n *TypeSwitchGuard) SetLeft(x Node) { + if x == nil { + n.name = nil + return + } + n.name = x.(*Name) +} +func (n *TypeSwitchGuard) Right() Node { return n.X } +func (n *TypeSwitchGuard) SetRight(x Node) { n.X = x } From 41ad4dec991c11d9e1efff27fc0b1568f5981c9c Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 30 Nov 2020 09:34:34 -0500 Subject: [PATCH 073/474] [dev.regabi] cmd/compile: fix -h The compile -h flag is *meant* to panic, so you can see the stack trace where the error is being printed. Make it do that again. Change-Id: Ieb0042863582d7a4c5d08d2f866a144962915b06 Reviewed-on: https://go-review.googlesource.com/c/go/+/274116 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/main.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 7bad05265d67a..718239484b7c3 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -43,6 +43,9 @@ func hidePanic() { // about a panic too; let the user clean up // the code and try again. if err := recover(); err != nil { + if err == "-h" { + panic(err) + } base.ErrorExit() } } From 0f9f27287b6eaac1634248e325aaab848e0dfd55 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 30 Nov 2020 00:01:26 -0800 Subject: [PATCH 074/474] [dev.regabi] cmd/compile: remove types.InitSyms It's not types's responsibility to understand how package initialization is implemented. Instead, have gc keep track of the order that packages were imported, and then look for inittask declarations. Also, use resolve to force importing of the inittask's export data, so that we can get the appropriate linker symbol index. (This is also why this CL doesn't satisfy "toolstash -cmp".) Change-Id: I5b706497d4a8d1c4439178863b4a8dba4da0f5a9 Reviewed-on: https://go-review.googlesource.com/c/go/+/274006 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Keith Randall --- src/cmd/compile/internal/gc/init.go | 14 ++++++++++++-- src/cmd/compile/internal/gc/noder.go | 3 +++ src/cmd/compile/internal/types/pkg.go | 6 ------ 3 files changed, 15 insertions(+), 8 deletions(-) diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go index ed0218c0e2363..b5fd2e7c758d2 100644 --- a/src/cmd/compile/internal/gc/init.go +++ b/src/cmd/compile/internal/gc/init.go @@ -27,6 +27,9 @@ func renameinit() *types.Sym { return s } +// List of imported packages, in source code order. See #31636. +var sourceOrderImports []*types.Pkg + // fninit makes an initialization record for the package. // See runtime/proc.go:initTask for its layout. // The 3 tasks for initialization are: @@ -40,8 +43,15 @@ func fninit(n []ir.Node) { var fns []*obj.LSym // functions to call for package initialization // Find imported packages with init tasks. - for _, s := range types.InitSyms { - deps = append(deps, s.Linksym()) + for _, pkg := range sourceOrderImports { + n := resolve(ir.AsNode(pkg.Lookup(".inittask").Def)) + if n == nil { + continue + } + if n.Op() != ir.ONAME || n.Class() != ir.PEXTERN { + base.Fatalf("bad inittask: %v", n) + } + deps = append(deps, n.Sym().Linksym()) } // Make a function that contains all the initialization statements. diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 98a09f40069a3..6a5afe7687f00 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -347,6 +347,9 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) { p.importedEmbed = true } + if !ipkg.Direct { + sourceOrderImports = append(sourceOrderImports, ipkg) + } ipkg.Direct = true var my *types.Sym diff --git a/src/cmd/compile/internal/types/pkg.go b/src/cmd/compile/internal/types/pkg.go index bcc6789509581..bf90570b537b3 100644 --- a/src/cmd/compile/internal/types/pkg.go +++ b/src/cmd/compile/internal/types/pkg.go @@ -84,9 +84,6 @@ func (pkg *Pkg) Lookup(name string) *Sym { return s } -// List of .inittask entries in imported packages, in source code order. -var InitSyms []*Sym - // LookupOK looks up name in pkg and reports whether it previously existed. func (pkg *Pkg) LookupOK(name string) (s *Sym, existed bool) { // TODO(gri) remove this check in favor of specialized lookup @@ -101,9 +98,6 @@ func (pkg *Pkg) LookupOK(name string) (s *Sym, existed bool) { Name: name, Pkg: pkg, } - if name == ".inittask" { - InitSyms = append(InitSyms, s) - } pkg.Syms[name] = s return s, false } From 9a5a11adfa0f5ead728641d8fb72244e03239547 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sun, 29 Nov 2020 21:25:47 -0500 Subject: [PATCH 075/474] [dev.regabi] cmd/compile: add custom expression Node implementations These are fairly rote implementations of structs appropriate to each Op (or group of Ops). The names of these are unknown except to ir.NodAt for now. A later, automated change will introduce direct use of the types throughout package gc. (This CL is expressions; the previous one was statements.) This is the last of the Ops that were previously handled by the generic node struct, so that struct and its methods can be and are deleted in this CL. Passes buildall w/ toolstash -cmp. Change-Id: I1703f35f24dcd3f7c5782a278e53c3fe04e87c37 Reviewed-on: https://go-review.googlesource.com/c/go/+/274109 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/fmtmap_test.go | 2 +- src/cmd/compile/internal/ir/dump.go | 4 +- src/cmd/compile/internal/ir/expr.go | 725 ++++++++++++++++++++- src/cmd/compile/internal/ir/fmt.go | 5 - src/cmd/compile/internal/ir/node.go | 504 ++------------ src/cmd/compile/internal/ir/sizeof_test.go | 1 - 6 files changed, 782 insertions(+), 459 deletions(-) diff --git a/src/cmd/compile/fmtmap_test.go b/src/cmd/compile/fmtmap_test.go index 32891aea66d55..09b06c4d939d0 100644 --- a/src/cmd/compile/fmtmap_test.go +++ b/src/cmd/compile/fmtmap_test.go @@ -29,7 +29,7 @@ var knownFormats = map[string]string{ "*cmd/compile/internal/ir.Name %+v": "", "*cmd/compile/internal/ir.Name %L": "", "*cmd/compile/internal/ir.Name %v": "", - "*cmd/compile/internal/ir.node %v": "", + "*cmd/compile/internal/ir.SliceExpr %v": "", "*cmd/compile/internal/ssa.Block %s": "", "*cmd/compile/internal/ssa.Block %v": "", "*cmd/compile/internal/ssa.Func %s": "", diff --git a/src/cmd/compile/internal/ir/dump.go b/src/cmd/compile/internal/ir/dump.go index fe1410969f0ef..bff3a408550a5 100644 --- a/src/cmd/compile/internal/ir/dump.go +++ b/src/cmd/compile/internal/ir/dump.go @@ -200,9 +200,9 @@ func (p *dumper) dump(x reflect.Value, depth int) { typ := x.Type() isNode := false - if n, ok := x.Interface().(node); ok { + if n, ok := x.Interface().(Node); ok { isNode = true - p.printf("%s %s {", n.op.String(), p.addr(x)) + p.printf("%s %s {", n.Op().String(), p.addr(x)) } else { p.printf("%s {", typ) } diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index f8e5f7641ca15..be9f486682150 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -5,6 +5,7 @@ package ir import ( + "cmd/compile/internal/base" "cmd/compile/internal/types" "cmd/internal/src" "fmt" @@ -48,6 +49,182 @@ func (n *miniExpr) Init() Nodes { return n.init } func (n *miniExpr) PtrInit() *Nodes { return &n.init } func (n *miniExpr) SetInit(x Nodes) { n.init = x } +func toNtype(x Node) Ntype { + if x == nil { + return nil + } + if _, ok := x.(Ntype); !ok { + Dump("not Ntype", x) + } + return x.(Ntype) +} + +// An AddStringExpr is a string concatenation Expr[0] + Exprs[1] + ... + Expr[len(Expr)-1]. +type AddStringExpr struct { + miniExpr + list Nodes +} + +func NewAddStringExpr(pos src.XPos, list []Node) *AddStringExpr { + n := &AddStringExpr{} + n.pos = pos + n.op = OADDSTR + n.list.Set(list) + return n +} + +func (n *AddStringExpr) String() string { return fmt.Sprint(n) } +func (n *AddStringExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *AddStringExpr) RawCopy() Node { c := *n; return &c } +func (n *AddStringExpr) List() Nodes { return n.list } +func (n *AddStringExpr) PtrList() *Nodes { return &n.list } +func (n *AddStringExpr) SetList(x Nodes) { n.list = x } + +// An AddrExpr is an address-of expression &X. +// It may end up being a normal address-of or an allocation of a composite literal. +type AddrExpr struct { + miniExpr + X Node + Alloc Node // preallocated storage if any +} + +func NewAddrExpr(pos src.XPos, x Node) *AddrExpr { + n := &AddrExpr{X: x} + n.op = OADDR + n.pos = pos + return n +} + +func (n *AddrExpr) String() string { return fmt.Sprint(n) } +func (n *AddrExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *AddrExpr) RawCopy() Node { c := *n; return &c } +func (n *AddrExpr) Left() Node { return n.X } +func (n *AddrExpr) SetLeft(x Node) { n.X = x } +func (n *AddrExpr) Right() Node { return n.Alloc } +func (n *AddrExpr) SetRight(x Node) { n.Alloc = x } + +func (n *AddrExpr) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case OADDR, OPTRLIT: + n.op = op + } +} + +// A BinaryExpr is a binary expression X Op Y, +// or Op(X, Y) for builtin functions that do not become calls. +type BinaryExpr struct { + miniExpr + X Node + Y Node +} + +func NewBinaryExpr(pos src.XPos, op Op, x, y Node) *BinaryExpr { + n := &BinaryExpr{X: x, Y: y} + n.pos = pos + n.SetOp(op) + return n +} + +func (n *BinaryExpr) String() string { return fmt.Sprint(n) } +func (n *BinaryExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *BinaryExpr) RawCopy() Node { c := *n; return &c } +func (n *BinaryExpr) Left() Node { return n.X } +func (n *BinaryExpr) SetLeft(x Node) { n.X = x } +func (n *BinaryExpr) Right() Node { return n.Y } +func (n *BinaryExpr) SetRight(y Node) { n.Y = y } + +func (n *BinaryExpr) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case OADD, OADDSTR, OAND, OANDAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE, + OLSH, OLT, OMOD, OMUL, ONE, OOR, OOROR, ORSH, OSUB, OXOR, + OCOPY, OCOMPLEX, + OEFACE: + n.op = op + } +} + +// A CallExpr is a function call X(Args). +type CallExpr struct { + miniExpr + orig Node + X Node + Args Nodes + Rargs Nodes // TODO(rsc): Delete. + body Nodes // TODO(rsc): Delete. + DDD bool + noInline bool +} + +func NewCallExpr(pos src.XPos, fun Node, args []Node) *CallExpr { + n := &CallExpr{X: fun} + n.pos = pos + n.orig = n + n.op = OCALL + n.Args.Set(args) + return n +} + +func (n *CallExpr) String() string { return fmt.Sprint(n) } +func (n *CallExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *CallExpr) RawCopy() Node { c := *n; return &c } +func (n *CallExpr) Orig() Node { return n.orig } +func (n *CallExpr) SetOrig(x Node) { n.orig = x } +func (n *CallExpr) Left() Node { return n.X } +func (n *CallExpr) SetLeft(x Node) { n.X = x } +func (n *CallExpr) List() Nodes { return n.Args } +func (n *CallExpr) PtrList() *Nodes { return &n.Args } +func (n *CallExpr) SetList(x Nodes) { n.Args = x } +func (n *CallExpr) Rlist() Nodes { return n.Rargs } +func (n *CallExpr) PtrRlist() *Nodes { return &n.Rargs } +func (n *CallExpr) SetRlist(x Nodes) { n.Rargs = x } +func (n *CallExpr) IsDDD() bool { return n.DDD } +func (n *CallExpr) SetIsDDD(x bool) { n.DDD = x } +func (n *CallExpr) NoInline() bool { return n.noInline } +func (n *CallExpr) SetNoInline(x bool) { n.noInline = x } +func (n *CallExpr) Body() Nodes { return n.body } +func (n *CallExpr) PtrBody() *Nodes { return &n.body } +func (n *CallExpr) SetBody(x Nodes) { n.body = x } + +func (n *CallExpr) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, + OAPPEND, ODELETE, OGETG, OMAKE, OPRINT, OPRINTN, ORECOVER: + n.op = op + } +} + +// A CallPartExpr is a method expression X.Method (uncalled). +type CallPartExpr struct { + miniExpr + fn *Func + X Node + Method *Name +} + +func NewCallPartExpr(pos src.XPos, x Node, method *Name, fn *Func) *CallPartExpr { + n := &CallPartExpr{fn: fn, X: x, Method: method} + n.op = OCALLPART + n.pos = pos + n.typ = fn.Type() + n.fn = fn + return n +} + +func (n *CallPartExpr) String() string { return fmt.Sprint(n) } +func (n *CallPartExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *CallPartExpr) RawCopy() Node { c := *n; return &c } +func (n *CallPartExpr) Func() *Func { return n.fn } +func (n *CallPartExpr) Left() Node { return n.X } +func (n *CallPartExpr) Right() Node { return n.Method } +func (n *CallPartExpr) SetLeft(x Node) { n.X = x } +func (n *CallPartExpr) SetRight(x Node) { n.Method = x.(*Name) } + // A ClosureExpr is a function literal expression. type ClosureExpr struct { miniExpr @@ -85,31 +262,477 @@ func (n *ClosureRead) RawCopy() Node { c := *n; return &c } func (n *ClosureRead) Type() *types.Type { return n.typ } func (n *ClosureRead) Offset() int64 { return n.offset } -// A CallPartExpr is a method expression X.Method (uncalled). -type CallPartExpr struct { +// A CompLitExpr is a composite literal Type{Vals}. +// Before type-checking, the type is Ntype. +type CompLitExpr struct { + miniExpr + orig Node + Ntype Ntype + list Nodes // initialized values +} + +func NewCompLitExpr(pos src.XPos, typ Ntype, list []Node) *CompLitExpr { + n := &CompLitExpr{Ntype: typ} + n.pos = pos + n.op = OCOMPLIT + n.list.Set(list) + n.orig = n + return n +} + +func (n *CompLitExpr) String() string { return fmt.Sprint(n) } +func (n *CompLitExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *CompLitExpr) RawCopy() Node { c := *n; return &c } +func (n *CompLitExpr) Orig() Node { return n.orig } +func (n *CompLitExpr) SetOrig(x Node) { n.orig = x } +func (n *CompLitExpr) Right() Node { return n.Ntype } +func (n *CompLitExpr) SetRight(x Node) { n.Ntype = toNtype(x) } +func (n *CompLitExpr) List() Nodes { return n.list } +func (n *CompLitExpr) PtrList() *Nodes { return &n.list } +func (n *CompLitExpr) SetList(x Nodes) { n.list = x } + +func (n *CompLitExpr) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case OARRAYLIT, OCOMPLIT, OMAPLIT, OSTRUCTLIT, OSLICELIT: + n.op = op + } +} + +// A ConvExpr is a conversion Type(X). +// It may end up being a value or a type. +type ConvExpr struct { + miniExpr + orig Node + X Node +} + +func NewConvExpr(pos src.XPos, op Op, typ *types.Type, x Node) *ConvExpr { + n := &ConvExpr{X: x} + n.pos = pos + n.typ = typ + n.SetOp(op) + n.orig = n + return n +} + +func (n *ConvExpr) String() string { return fmt.Sprint(n) } +func (n *ConvExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ConvExpr) RawCopy() Node { c := *n; return &c } +func (n *ConvExpr) Orig() Node { return n.orig } +func (n *ConvExpr) SetOrig(x Node) { n.orig = x } +func (n *ConvExpr) Left() Node { return n.X } +func (n *ConvExpr) SetLeft(x Node) { n.X = x } + +func (n *ConvExpr) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case OCONV, OCONVIFACE, OCONVNOP, OBYTES2STR, OBYTES2STRTMP, ORUNES2STR, OSTR2BYTES, OSTR2BYTESTMP, OSTR2RUNES, ORUNESTR: + n.op = op + } +} + +// An IndexExpr is an index expression X[Y]. +type IndexExpr struct { + miniExpr + X Node + Index Node + Assigned bool +} + +func NewIndexExpr(pos src.XPos, x, index Node) *IndexExpr { + n := &IndexExpr{X: x, Index: index} + n.pos = pos + n.op = OINDEX + return n +} + +func (n *IndexExpr) String() string { return fmt.Sprint(n) } +func (n *IndexExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *IndexExpr) RawCopy() Node { c := *n; return &c } +func (n *IndexExpr) Left() Node { return n.X } +func (n *IndexExpr) SetLeft(x Node) { n.X = x } +func (n *IndexExpr) Right() Node { return n.Index } +func (n *IndexExpr) SetRight(y Node) { n.Index = y } +func (n *IndexExpr) IndexMapLValue() bool { return n.Assigned } +func (n *IndexExpr) SetIndexMapLValue(x bool) { n.Assigned = x } + +func (n *IndexExpr) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case OINDEX, OINDEXMAP: + n.op = op + } +} + +// A KeyExpr is an X:Y composite literal key. +// After type-checking, a key for a struct sets Sym to the field. +type KeyExpr struct { + miniExpr + Key Node + sym *types.Sym + Value Node + offset int64 +} + +func NewKeyExpr(pos src.XPos, key, value Node) *KeyExpr { + n := &KeyExpr{Key: key, Value: value} + n.pos = pos + n.op = OKEY + n.offset = types.BADWIDTH + return n +} + +func (n *KeyExpr) String() string { return fmt.Sprint(n) } +func (n *KeyExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *KeyExpr) RawCopy() Node { c := *n; return &c } +func (n *KeyExpr) Left() Node { return n.Key } +func (n *KeyExpr) SetLeft(x Node) { n.Key = x } +func (n *KeyExpr) Right() Node { return n.Value } +func (n *KeyExpr) SetRight(y Node) { n.Value = y } +func (n *KeyExpr) Sym() *types.Sym { return n.sym } +func (n *KeyExpr) SetSym(x *types.Sym) { n.sym = x } +func (n *KeyExpr) Offset() int64 { return n.offset } +func (n *KeyExpr) SetOffset(x int64) { n.offset = x } + +func (n *KeyExpr) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case OKEY, OSTRUCTKEY: + n.op = op + } +} + +// An InlinedCallExpr is an inlined function call. +type InlinedCallExpr struct { + miniExpr + body Nodes + ReturnVars Nodes +} + +func NewInlinedCallExpr(pos src.XPos, body, retvars []Node) *InlinedCallExpr { + n := &InlinedCallExpr{} + n.pos = pos + n.op = OINLCALL + n.body.Set(body) + n.ReturnVars.Set(retvars) + return n +} + +func (n *InlinedCallExpr) String() string { return fmt.Sprint(n) } +func (n *InlinedCallExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *InlinedCallExpr) RawCopy() Node { c := *n; return &c } +func (n *InlinedCallExpr) Body() Nodes { return n.body } +func (n *InlinedCallExpr) PtrBody() *Nodes { return &n.body } +func (n *InlinedCallExpr) SetBody(x Nodes) { n.body = x } +func (n *InlinedCallExpr) Rlist() Nodes { return n.ReturnVars } +func (n *InlinedCallExpr) PtrRlist() *Nodes { return &n.ReturnVars } +func (n *InlinedCallExpr) SetRlist(x Nodes) { n.ReturnVars = x } + +// A MakeExpr is a make expression: make(Type[, Len[, Cap]]). +// Op is OMAKECHAN, OMAKEMAP, OMAKESLICE, or OMAKESLICECOPY, +// but *not* OMAKE (that's a pre-typechecking CallExpr). +type MakeExpr struct { + miniExpr + Len Node + Cap Node +} + +func NewMakeExpr(pos src.XPos, op Op, len, cap Node) *MakeExpr { + n := &MakeExpr{Len: len, Cap: cap} + n.pos = pos + n.SetOp(op) + return n +} + +func (n *MakeExpr) String() string { return fmt.Sprint(n) } +func (n *MakeExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *MakeExpr) RawCopy() Node { c := *n; return &c } +func (n *MakeExpr) Left() Node { return n.Len } +func (n *MakeExpr) SetLeft(x Node) { n.Len = x } +func (n *MakeExpr) Right() Node { return n.Cap } +func (n *MakeExpr) SetRight(x Node) { n.Cap = x } + +func (n *MakeExpr) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case OMAKECHAN, OMAKEMAP, OMAKESLICE, OMAKESLICECOPY: + n.op = op + } +} + +// A MethodExpr is a method expression X.M (where X is an expression, not a type). +type MethodExpr struct { miniExpr - fn *Func X Node - Method *Name + M Node + sym *types.Sym + offset int64 + class Class } -func NewCallPartExpr(pos src.XPos, x Node, method *Name, fn *Func) *CallPartExpr { - n := &CallPartExpr{fn: fn, X: x, Method: method} - n.op = OCALLPART +func NewMethodExpr(pos src.XPos, op Op, x, m Node) *MethodExpr { + n := &MethodExpr{X: x, M: m} n.pos = pos - n.typ = fn.Type() - n.fn = fn + n.op = OMETHEXPR + n.offset = types.BADWIDTH return n } -func (n *CallPartExpr) String() string { return fmt.Sprint(n) } -func (n *CallPartExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *CallPartExpr) RawCopy() Node { c := *n; return &c } -func (n *CallPartExpr) Func() *Func { return n.fn } -func (n *CallPartExpr) Left() Node { return n.X } -func (n *CallPartExpr) Right() Node { return n.Method } -func (n *CallPartExpr) SetLeft(x Node) { n.X = x } -func (n *CallPartExpr) SetRight(x Node) { n.Method = x.(*Name) } +func (n *MethodExpr) String() string { return fmt.Sprint(n) } +func (n *MethodExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *MethodExpr) RawCopy() Node { c := *n; return &c } +func (n *MethodExpr) Left() Node { return n.X } +func (n *MethodExpr) SetLeft(x Node) { n.X = x } +func (n *MethodExpr) Right() Node { return n.M } +func (n *MethodExpr) SetRight(y Node) { n.M = y } +func (n *MethodExpr) Sym() *types.Sym { return n.sym } +func (n *MethodExpr) SetSym(x *types.Sym) { n.sym = x } +func (n *MethodExpr) Offset() int64 { return n.offset } +func (n *MethodExpr) SetOffset(x int64) { n.offset = x } +func (n *MethodExpr) Class() Class { return n.class } +func (n *MethodExpr) SetClass(x Class) { n.class = x } + +// A NilExpr represents the predefined untyped constant nil. +// (It may be copied and assigned a type, though.) +type NilExpr struct { + miniExpr + sym *types.Sym // TODO: Remove +} + +func NewNilExpr(pos src.XPos) *NilExpr { + n := &NilExpr{} + n.pos = pos + n.op = ONIL + return n +} + +func (n *NilExpr) String() string { return fmt.Sprint(n) } +func (n *NilExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *NilExpr) RawCopy() Node { c := *n; return &c } +func (n *NilExpr) Sym() *types.Sym { return n.sym } +func (n *NilExpr) SetSym(x *types.Sym) { n.sym = x } + +// A ParenExpr is a parenthesized expression (X). +// It may end up being a value or a type. +type ParenExpr struct { + miniExpr + X Node +} + +func NewParenExpr(pos src.XPos, x Node) *ParenExpr { + n := &ParenExpr{X: x} + n.op = OPAREN + n.pos = pos + return n +} + +func (n *ParenExpr) String() string { return fmt.Sprint(n) } +func (n *ParenExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ParenExpr) RawCopy() Node { c := *n; return &c } +func (n *ParenExpr) Left() Node { return n.X } +func (n *ParenExpr) SetLeft(x Node) { n.X = x } + +func (*ParenExpr) CanBeNtype() {} + +// SetOTYPE changes n to be an OTYPE node returning t, +// like all the type nodes in type.go. +func (n *ParenExpr) SetOTYPE(t *types.Type) { + n.op = OTYPE + n.typ = t + if t.Nod == nil { + t.Nod = n + } +} + +// A ResultExpr represents a direct access to a result slot on the stack frame. +type ResultExpr struct { + miniExpr + offset int64 +} + +func NewResultExpr(pos src.XPos, typ *types.Type, offset int64) *ResultExpr { + n := &ResultExpr{offset: offset} + n.pos = pos + n.op = ORESULT + n.typ = typ + return n +} + +func (n *ResultExpr) String() string { return fmt.Sprint(n) } +func (n *ResultExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ResultExpr) RawCopy() Node { c := *n; return &c } +func (n *ResultExpr) Offset() int64 { return n.offset } +func (n *ResultExpr) SetOffset(x int64) { n.offset = x } + +// A SelectorExpr is a selector expression X.Sym. +type SelectorExpr struct { + miniExpr + X Node + Sel *types.Sym + offset int64 +} + +func NewSelectorExpr(pos src.XPos, x Node, sel *types.Sym) *SelectorExpr { + n := &SelectorExpr{X: x, Sel: sel} + n.pos = pos + n.op = OXDOT + n.offset = types.BADWIDTH + return n +} + +func (n *SelectorExpr) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case ODOT, ODOTPTR, ODOTMETH, ODOTINTER, OXDOT: + n.op = op + } +} + +func (n *SelectorExpr) String() string { return fmt.Sprint(n) } +func (n *SelectorExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *SelectorExpr) RawCopy() Node { c := *n; return &c } +func (n *SelectorExpr) Left() Node { return n.X } +func (n *SelectorExpr) SetLeft(x Node) { n.X = x } +func (n *SelectorExpr) Sym() *types.Sym { return n.Sel } +func (n *SelectorExpr) SetSym(x *types.Sym) { n.Sel = x } +func (n *SelectorExpr) Offset() int64 { return n.offset } +func (n *SelectorExpr) SetOffset(x int64) { n.offset = x } + +// Before type-checking, bytes.Buffer is a SelectorExpr. +// After type-checking it becomes a Name. +func (*SelectorExpr) CanBeNtype() {} + +// A SliceExpr is a slice expression X[Low:High] or X[Low:High:Max]. +type SliceExpr struct { + miniExpr + X Node + list Nodes // TODO(rsc): Use separate Nodes +} + +func NewSliceExpr(pos src.XPos, op Op, x Node) *SliceExpr { + n := &SliceExpr{X: x} + n.pos = pos + n.op = op + return n +} + +func (n *SliceExpr) String() string { return fmt.Sprint(n) } +func (n *SliceExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *SliceExpr) RawCopy() Node { c := *n; return &c } +func (n *SliceExpr) Left() Node { return n.X } +func (n *SliceExpr) SetLeft(x Node) { n.X = x } +func (n *SliceExpr) List() Nodes { return n.list } +func (n *SliceExpr) PtrList() *Nodes { return &n.list } +func (n *SliceExpr) SetList(x Nodes) { n.list = x } + +func (n *SliceExpr) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR: + n.op = op + } +} + +// SliceBounds returns n's slice bounds: low, high, and max in expr[low:high:max]. +// n must be a slice expression. max is nil if n is a simple slice expression. +func (n *SliceExpr) SliceBounds() (low, high, max Node) { + if n.list.Len() == 0 { + return nil, nil, nil + } + + switch n.Op() { + case OSLICE, OSLICEARR, OSLICESTR: + s := n.list.Slice() + return s[0], s[1], nil + case OSLICE3, OSLICE3ARR: + s := n.list.Slice() + return s[0], s[1], s[2] + } + base.Fatalf("SliceBounds op %v: %v", n.Op(), n) + return nil, nil, nil +} + +// SetSliceBounds sets n's slice bounds, where n is a slice expression. +// n must be a slice expression. If max is non-nil, n must be a full slice expression. +func (n *SliceExpr) SetSliceBounds(low, high, max Node) { + switch n.Op() { + case OSLICE, OSLICEARR, OSLICESTR: + if max != nil { + base.Fatalf("SetSliceBounds %v given three bounds", n.Op()) + } + s := n.list.Slice() + if s == nil { + if low == nil && high == nil { + return + } + n.list.Set2(low, high) + return + } + s[0] = low + s[1] = high + return + case OSLICE3, OSLICE3ARR: + s := n.list.Slice() + if s == nil { + if low == nil && high == nil && max == nil { + return + } + n.list.Set3(low, high, max) + return + } + s[0] = low + s[1] = high + s[2] = max + return + } + base.Fatalf("SetSliceBounds op %v: %v", n.Op(), n) +} + +// IsSlice3 reports whether o is a slice3 op (OSLICE3, OSLICE3ARR). +// o must be a slicing op. +func (o Op) IsSlice3() bool { + switch o { + case OSLICE, OSLICEARR, OSLICESTR: + return false + case OSLICE3, OSLICE3ARR: + return true + } + base.Fatalf("IsSlice3 op %v", o) + return false +} + +// A SliceHeader expression constructs a slice header from its parts. +type SliceHeaderExpr struct { + miniExpr + Ptr Node + lenCap Nodes // TODO(rsc): Split into two Node fields +} + +func NewSliceHeaderExpr(pos src.XPos, typ *types.Type, ptr, len, cap Node) *SliceHeaderExpr { + n := &SliceHeaderExpr{Ptr: ptr} + n.pos = pos + n.op = OSLICEHEADER + n.typ = typ + n.lenCap.Set2(len, cap) + return n +} + +func (n *SliceHeaderExpr) String() string { return fmt.Sprint(n) } +func (n *SliceHeaderExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *SliceHeaderExpr) RawCopy() Node { c := *n; return &c } +func (n *SliceHeaderExpr) Left() Node { return n.Ptr } +func (n *SliceHeaderExpr) SetLeft(x Node) { n.Ptr = x } +func (n *SliceHeaderExpr) List() Nodes { return n.lenCap } +func (n *SliceHeaderExpr) PtrList() *Nodes { return &n.lenCap } +func (n *SliceHeaderExpr) SetList(x Nodes) { n.lenCap = x } // A StarExpr is a dereference expression *X. // It may end up being a value or a type. @@ -154,3 +777,71 @@ func (n *StarExpr) DeepCopy(pos src.XPos) Node { c.X = DeepCopy(pos, n.X) return c } + +// A TypeAssertionExpr is a selector expression X.(Type). +// Before type-checking, the type is Ntype. +type TypeAssertExpr struct { + miniExpr + X Node + Ntype Node // TODO: Should be Ntype, but reused as address of type structure + Itab Nodes // Itab[0] is itab +} + +func NewTypeAssertExpr(pos src.XPos, x Node, typ Ntype) *TypeAssertExpr { + n := &TypeAssertExpr{X: x, Ntype: typ} + n.pos = pos + n.op = ODOTTYPE + return n +} + +func (n *TypeAssertExpr) String() string { return fmt.Sprint(n) } +func (n *TypeAssertExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *TypeAssertExpr) RawCopy() Node { c := *n; return &c } +func (n *TypeAssertExpr) Left() Node { return n.X } +func (n *TypeAssertExpr) SetLeft(x Node) { n.X = x } +func (n *TypeAssertExpr) Right() Node { return n.Ntype } +func (n *TypeAssertExpr) SetRight(x Node) { n.Ntype = x } // TODO: toNtype(x) +func (n *TypeAssertExpr) List() Nodes { return n.Itab } +func (n *TypeAssertExpr) PtrList() *Nodes { return &n.Itab } +func (n *TypeAssertExpr) SetList(x Nodes) { n.Itab = x } + +func (n *TypeAssertExpr) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case ODOTTYPE, ODOTTYPE2: + n.op = op + } +} + +// A UnaryExpr is a unary expression Op X, +// or Op(X) for a builtin function that does not end up being a call. +type UnaryExpr struct { + miniExpr + X Node +} + +func NewUnaryExpr(pos src.XPos, op Op, x Node) *UnaryExpr { + n := &UnaryExpr{X: x} + n.pos = pos + n.SetOp(op) + return n +} + +func (n *UnaryExpr) String() string { return fmt.Sprint(n) } +func (n *UnaryExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *UnaryExpr) RawCopy() Node { c := *n; return &c } +func (n *UnaryExpr) Left() Node { return n.X } +func (n *UnaryExpr) SetLeft(x Node) { n.X = x } + +func (n *UnaryExpr) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case OBITNOT, ONEG, ONOT, OPLUS, ORECV, + OALIGNOF, OCAP, OCLOSE, OIMAG, OLEN, ONEW, + OOFFSETOF, OPANIC, OREAL, OSIZEOF, + OCHECKNIL, OCFUNC, OIDATA, OITAB, ONEWOBJ, OSPTR, OVARDEF, OVARKILL, OVARLIVE: + n.op = op + } +} diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index 4a08cca359c05..a1114712222c8 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -277,10 +277,6 @@ type fmtNodes struct { func (f *fmtNodes) Format(s fmt.State, verb rune) { f.x.format(s, verb, f.m) } -func (n *node) Format(s fmt.State, verb rune) { - FmtNode(n, s, verb) -} - func FmtNode(n Node, s fmt.State, verb rune) { nodeFormat(n, s, verb, FErr) } @@ -1806,7 +1802,6 @@ func typeFormat(t *types.Type, s fmt.State, verb rune, mode FmtMode) { } } -func (n *node) String() string { return fmt.Sprint(n) } func modeString(n Node, mode FmtMode) string { return mode.Sprint(n) } // "%L" suffix with "(type %T)" where possible diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index a4d19c39f8db3..9b407b36c0418 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -118,140 +118,6 @@ type Node interface { CanBeAnSSASym() } -var _ Node = (*node)(nil) - -// A Node is a single node in the syntax tree. -// Actually the syntax tree is a syntax DAG, because there is only one -// node with Op=ONAME for a given instance of a variable x. -// The same is true for Op=OTYPE and Op=OLITERAL. See Node.mayBeShared. -type node struct { - // Tree structure. - // Generic recursive walks should follow these fields. - left Node - right Node - init Nodes - body Nodes - list Nodes - rlist Nodes - - // most nodes - typ *types.Type - orig Node // original form, for printing, and tracking copies of ONAMEs - - sym *types.Sym // various - opt interface{} - - // Various. Usually an offset into a struct. For example: - // - ONAME nodes that refer to local variables use it to identify their stack frame position. - // - ODOT, ODOTPTR, and ORESULT use it to indicate offset relative to their base address. - // - OSTRUCTKEY uses it to store the named field's offset. - // - Named OLITERALs use it to store their ambient iota value. - // - OINLMARK stores an index into the inlTree data structure. - // - OCLOSURE uses it to store ambient iota value, if any. - // Possibly still more uses. If you find any, document them. - offset int64 - - pos src.XPos - - flags bitset32 - - esc uint16 // EscXXX - - op Op - aux uint8 -} - -func (n *node) Left() Node { return n.left } -func (n *node) SetLeft(x Node) { n.left = x } -func (n *node) Right() Node { return n.right } -func (n *node) SetRight(x Node) { n.right = x } -func (n *node) Orig() Node { return n.orig } -func (n *node) SetOrig(x Node) { n.orig = x } -func (n *node) Type() *types.Type { return n.typ } -func (n *node) SetType(x *types.Type) { n.typ = x } -func (n *node) Func() *Func { return nil } -func (n *node) Name() *Name { return nil } -func (n *node) Sym() *types.Sym { return n.sym } -func (n *node) SetSym(x *types.Sym) { n.sym = x } -func (n *node) Pos() src.XPos { return n.pos } -func (n *node) SetPos(x src.XPos) { n.pos = x } -func (n *node) Offset() int64 { return n.offset } -func (n *node) SetOffset(x int64) { n.offset = x } -func (n *node) Esc() uint16 { return n.esc } -func (n *node) SetEsc(x uint16) { n.esc = x } -func (n *node) Op() Op { return n.op } -func (n *node) Init() Nodes { return n.init } -func (n *node) SetInit(x Nodes) { n.init = x } -func (n *node) PtrInit() *Nodes { return &n.init } -func (n *node) Body() Nodes { return n.body } -func (n *node) SetBody(x Nodes) { n.body = x } -func (n *node) PtrBody() *Nodes { return &n.body } -func (n *node) List() Nodes { return n.list } -func (n *node) SetList(x Nodes) { n.list = x } -func (n *node) PtrList() *Nodes { return &n.list } -func (n *node) Rlist() Nodes { return n.rlist } -func (n *node) SetRlist(x Nodes) { n.rlist = x } -func (n *node) PtrRlist() *Nodes { return &n.rlist } -func (n *node) MarkReadonly() { panic("node.MarkReadOnly") } -func (n *node) Val() constant.Value { panic("node.Val") } -func (n *node) SetVal(constant.Value) { panic("node.SetVal") } -func (n *node) Int64Val() int64 { panic("node.Int64Val") } -func (n *node) CanInt64() bool { return false } -func (n *node) Uint64Val() uint64 { panic("node.Uint64Val") } -func (n *node) BoolVal() bool { panic("node.BoolVal") } -func (n *node) StringVal() string { panic("node.StringVal") } - -// node can be Ntype only because of OXDOT of undefined name. -// When that moves into its own syntax, can drop this. -func (n *node) CanBeNtype() {} - -func (n *node) SetOp(op Op) { - if !okForNod[op] { - panic("cannot node.SetOp " + op.String()) - } - n.op = op -} - -func (n *node) ResetAux() { - n.aux = 0 -} - -func (n *node) SubOp() Op { - switch n.Op() { - case OASOP, ONAME: - default: - base.Fatalf("unexpected op: %v", n.Op()) - } - return Op(n.aux) -} - -func (n *node) SetSubOp(op Op) { - switch n.Op() { - case OASOP, ONAME: - default: - base.Fatalf("unexpected op: %v", n.Op()) - } - n.aux = uint8(op) -} - -func (n *node) IndexMapLValue() bool { - if n.Op() != OINDEXMAP { - base.Fatalf("unexpected op: %v", n.Op()) - } - return n.aux != 0 -} - -func (n *node) SetIndexMapLValue(b bool) { - if n.Op() != OINDEXMAP { - base.Fatalf("unexpected op: %v", n.Op()) - } - if b { - n.aux = 1 - } else { - n.aux = 0 - } -} - func IsSynthetic(n Node) bool { name := n.Sym().Name return name[0] == '.' || name[0] == '~' @@ -266,110 +132,6 @@ func IsAutoTmp(n Node) bool { return n.Name().AutoTemp() } -const ( - nodeClass, _ = iota, 1 << iota // PPARAM, PAUTO, PEXTERN, etc; three bits; first in the list because frequently accessed - _, _ // second nodeClass bit - _, _ // third nodeClass bit - nodeWalkdef, _ // tracks state during typecheckdef; 2 == loop detected; two bits - _, _ // second nodeWalkdef bit - nodeTypecheck, _ // tracks state during typechecking; 2 == loop detected; two bits - _, _ // second nodeTypecheck bit - nodeInitorder, _ // tracks state during init1; two bits - _, _ // second nodeInitorder bit - _, nodeHasBreak - _, nodeNoInline // used internally by inliner to indicate that a function call should not be inlined; set for OCALLFUNC and OCALLMETH only - _, nodeImplicit // implicit OADDR or ODEREF; ++/-- statement represented as OASOP - _, nodeIsDDD // is the argument variadic - _, nodeDiag // already printed error about this - _, nodeColas // OAS resulting from := - _, nodeNonNil // guaranteed to be non-nil - _, nodeTransient // storage can be reused immediately after this statement - _, nodeBounded // bounds check unnecessary - _, nodeHasCall // expression contains a function call - _, nodeLikely // if statement condition likely -) - -func (n *node) Class() Class { return Class(n.flags.get3(nodeClass)) } -func (n *node) Walkdef() uint8 { return n.flags.get2(nodeWalkdef) } -func (n *node) Typecheck() uint8 { return n.flags.get2(nodeTypecheck) } -func (n *node) Initorder() uint8 { return n.flags.get2(nodeInitorder) } - -func (n *node) HasBreak() bool { return n.flags&nodeHasBreak != 0 } -func (n *node) NoInline() bool { return n.flags&nodeNoInline != 0 } -func (n *node) Implicit() bool { return n.flags&nodeImplicit != 0 } -func (n *node) IsDDD() bool { return n.flags&nodeIsDDD != 0 } -func (n *node) Diag() bool { return n.flags&nodeDiag != 0 } -func (n *node) Colas() bool { return n.flags&nodeColas != 0 } -func (n *node) NonNil() bool { return n.flags&nodeNonNil != 0 } -func (n *node) Transient() bool { return n.flags&nodeTransient != 0 } -func (n *node) Bounded() bool { return n.flags&nodeBounded != 0 } -func (n *node) HasCall() bool { return n.flags&nodeHasCall != 0 } -func (n *node) Likely() bool { return n.flags&nodeLikely != 0 } - -func (n *node) SetClass(b Class) { n.flags.set3(nodeClass, uint8(b)) } -func (n *node) SetWalkdef(b uint8) { n.flags.set2(nodeWalkdef, b) } -func (n *node) SetTypecheck(b uint8) { n.flags.set2(nodeTypecheck, b) } -func (n *node) SetInitorder(b uint8) { n.flags.set2(nodeInitorder, b) } - -func (n *node) SetHasBreak(b bool) { n.flags.set(nodeHasBreak, b) } -func (n *node) SetNoInline(b bool) { n.flags.set(nodeNoInline, b) } -func (n *node) SetImplicit(b bool) { n.flags.set(nodeImplicit, b) } -func (n *node) SetIsDDD(b bool) { n.flags.set(nodeIsDDD, b) } -func (n *node) SetDiag(b bool) { n.flags.set(nodeDiag, b) } -func (n *node) SetColas(b bool) { n.flags.set(nodeColas, b) } -func (n *node) SetTransient(b bool) { n.flags.set(nodeTransient, b) } -func (n *node) SetHasCall(b bool) { n.flags.set(nodeHasCall, b) } -func (n *node) SetLikely(b bool) { n.flags.set(nodeLikely, b) } - -// MarkNonNil marks a pointer n as being guaranteed non-nil, -// on all code paths, at all times. -// During conversion to SSA, non-nil pointers won't have nil checks -// inserted before dereferencing. See state.exprPtr. -func (n *node) MarkNonNil() { - if !n.Type().IsPtr() && !n.Type().IsUnsafePtr() { - base.Fatalf("MarkNonNil(%v), type %v", n, n.Type()) - } - n.flags.set(nodeNonNil, true) -} - -// SetBounded indicates whether operation n does not need safety checks. -// When n is an index or slice operation, n does not need bounds checks. -// When n is a dereferencing operation, n does not need nil checks. -// When n is a makeslice+copy operation, n does not need length and cap checks. -func (n *node) SetBounded(b bool) { - switch n.Op() { - case OINDEX, OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR: - // No bounds checks needed. - case ODOTPTR, ODEREF: - // No nil check needed. - case OMAKESLICECOPY: - // No length and cap checks needed - // since new slice and copied over slice data have same length. - default: - base.Fatalf("SetBounded(%v)", n) - } - n.flags.set(nodeBounded, b) -} - -// Opt returns the optimizer data for the node. -func (n *node) Opt() interface{} { - return n.opt -} - -// SetOpt sets the optimizer data for the node, which must not have been used with SetVal. -// SetOpt(nil) is ignored for Vals to simplify call sites that are clearing Opts. -func (n *node) SetOpt(x interface{}) { - n.opt = x -} - -func (n *node) Iota() int64 { - return n.Offset() -} - -func (n *node) SetIota(x int64) { - n.SetOffset(x) -} - // mayBeShared reports whether n may occur in multiple places in the AST. // Extra care must be taken when mutating such a node. func MayBeShared(n Node) bool { @@ -380,10 +142,6 @@ func MayBeShared(n Node) bool { return false } -// The compiler needs *Node to be assignable to cmd/compile/internal/ssa.Sym. -func (n *node) CanBeAnSSASym() { -} - //go:generate stringer -type=Op -trimprefix=O type Op uint8 @@ -922,91 +680,15 @@ func OrigSym(s *types.Sym) *types.Sym { return s } -// SliceBounds returns n's slice bounds: low, high, and max in expr[low:high:max]. -// n must be a slice expression. max is nil if n is a simple slice expression. -func (n *node) SliceBounds() (low, high, max Node) { - if n.List().Len() == 0 { - return nil, nil, nil - } - - switch n.Op() { - case OSLICE, OSLICEARR, OSLICESTR: - s := n.List().Slice() - return s[0], s[1], nil - case OSLICE3, OSLICE3ARR: - s := n.List().Slice() - return s[0], s[1], s[2] - } - base.Fatalf("SliceBounds op %v: %v", n.Op(), n) - return nil, nil, nil -} - -// SetSliceBounds sets n's slice bounds, where n is a slice expression. -// n must be a slice expression. If max is non-nil, n must be a full slice expression. -func (n *node) SetSliceBounds(low, high, max Node) { - switch n.Op() { - case OSLICE, OSLICEARR, OSLICESTR: - if max != nil { - base.Fatalf("SetSliceBounds %v given three bounds", n.Op()) - } - s := n.List().Slice() - if s == nil { - if low == nil && high == nil { - return - } - n.PtrList().Set2(low, high) - return - } - s[0] = low - s[1] = high - return - case OSLICE3, OSLICE3ARR: - s := n.List().Slice() - if s == nil { - if low == nil && high == nil && max == nil { - return - } - n.PtrList().Set3(low, high, max) - return - } - s[0] = low - s[1] = high - s[2] = max - return - } - base.Fatalf("SetSliceBounds op %v: %v", n.Op(), n) -} - -// IsSlice3 reports whether o is a slice3 op (OSLICE3, OSLICE3ARR). -// o must be a slicing op. -func (o Op) IsSlice3() bool { - switch o { - case OSLICE, OSLICEARR, OSLICESTR: - return false - case OSLICE3, OSLICE3ARR: - return true - } - base.Fatalf("IsSlice3 op %v", o) - return false -} - func IsConst(n Node, ct constant.Kind) bool { return ConstType(n) == ct } -// rawcopy returns a shallow copy of n. -// Note: copy or sepcopy (rather than rawcopy) is usually the -// correct choice (see comment with Node.copy, below). -func (n *node) RawCopy() Node { - copy := *n - return © -} - // isNil reports whether n represents the universal untyped zero value "nil". func IsNil(n Node) bool { // Check n.Orig because constant propagation may produce typed nil constants, // which don't exist in the Go spec. - return Orig(n).Op() == ONIL + return n != nil && Orig(n).Op() == ONIL } func IsBlank(n Node) bool { @@ -1027,8 +709,26 @@ func Nod(op Op, nleft, nright Node) Node { } func NodAt(pos src.XPos, op Op, nleft, nright Node) Node { - var n *node switch op { + default: + panic("NodAt " + op.String()) + case OADD, OAND, OANDAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE, + OLSH, OLT, OMOD, OMUL, ONE, OOR, OOROR, ORSH, OSUB, OXOR, + OCOPY, OCOMPLEX, + OEFACE: + return NewBinaryExpr(pos, op, nleft, nright) + case OADDR, OPTRLIT: + return NewAddrExpr(pos, nleft) + case OADDSTR: + return NewAddStringExpr(pos, nil) + case OARRAYLIT, OCOMPLIT, OMAPLIT, OSTRUCTLIT, OSLICELIT: + var typ Ntype + if nright != nil { + typ = nright.(Ntype) + } + n := NewCompLitExpr(pos, typ, nil) + n.SetOp(op) + return n case OAS, OSELRECV: n := NewAssignStmt(pos, nleft, nright) n.SetOp(op) @@ -1039,12 +739,27 @@ func NodAt(pos src.XPos, op Op, nleft, nright Node) Node { return n case OASOP: return NewAssignOpStmt(pos, OXXX, nleft, nright) + case OBITNOT, ONEG, ONOT, OPLUS, ORECV, + OALIGNOF, OCAP, OCLOSE, OIMAG, OLEN, ONEW, ONEWOBJ, + OOFFSETOF, OPANIC, OREAL, OSIZEOF, + OCHECKNIL, OCFUNC, OIDATA, OITAB, OSPTR, OVARDEF, OVARKILL, OVARLIVE: + if nright != nil { + panic("unary nright") + } + return NewUnaryExpr(pos, op, nleft) case OBLOCK: return NewBlockStmt(pos, nil) case OBREAK, OCONTINUE, OFALL, OGOTO, ORETJMP: return NewBranchStmt(pos, op, nil) + case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, + OAPPEND, ODELETE, OGETG, OMAKE, OPRINT, OPRINTN, ORECOVER: + n := NewCallExpr(pos, nleft, nil) + n.SetOp(op) + return n case OCASE: return NewCaseStmt(pos, nil, nil) + case OCONV, OCONVIFACE, OCONVNOP, ORUNESTR: + return NewConvExpr(pos, op, nil, nleft) case ODCL, ODCLCONST, ODCLTYPE: return NewDecl(pos, op, nleft) case ODCLFUNC: @@ -1053,6 +768,18 @@ func NodAt(pos src.XPos, op Op, nleft, nright Node) Node { return NewDeferStmt(pos, nleft) case ODEREF: return NewStarExpr(pos, nleft) + case ODOT, ODOTPTR, ODOTMETH, ODOTINTER, OXDOT: + n := NewSelectorExpr(pos, nleft, nil) + n.SetOp(op) + return n + case ODOTTYPE, ODOTTYPE2: + var typ Ntype + if nright != nil { + typ = nright.(Ntype) + } + n := NewTypeAssertExpr(pos, nleft, typ) + n.SetOp(op) + return n case OEMPTY: return NewEmptyStmt(pos) case OFOR: @@ -1061,140 +788,51 @@ func NodAt(pos src.XPos, op Op, nleft, nright Node) Node { return NewGoStmt(pos, nleft) case OIF: return NewIfStmt(pos, nleft, nil, nil) + case OINDEX, OINDEXMAP: + n := NewIndexExpr(pos, nleft, nright) + n.SetOp(op) + return n case OINLMARK: return NewInlineMarkStmt(pos, types.BADWIDTH) + case OKEY, OSTRUCTKEY: + n := NewKeyExpr(pos, nleft, nright) + n.SetOp(op) + return n case OLABEL: return NewLabelStmt(pos, nil) case OLITERAL, OTYPE, OIOTA: n := newNameAt(pos, nil) n.SetOp(op) return n + case OMAKECHAN, OMAKEMAP, OMAKESLICE, OMAKESLICECOPY: + return NewMakeExpr(pos, op, nleft, nright) + case OMETHEXPR: + return NewMethodExpr(pos, op, nleft, nright) + case ONIL: + return NewNilExpr(pos) case OPACK: return NewPkgName(pos, nil, nil) + case OPAREN: + return NewParenExpr(pos, nleft) case ORANGE: return NewRangeStmt(pos, nil, nright, nil) + case ORESULT: + return NewResultExpr(pos, nil, types.BADWIDTH) case ORETURN: return NewReturnStmt(pos, nil) case OSELECT: return NewSelectStmt(pos, nil) case OSEND: return NewSendStmt(pos, nleft, nright) + case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR: + return NewSliceExpr(pos, op, nleft) + case OSLICEHEADER: + return NewSliceHeaderExpr(pos, nil, nleft, nil, nil) case OSWITCH: return NewSwitchStmt(pos, nleft, nil) case OTYPESW: return NewTypeSwitchGuard(pos, nleft, nright) - default: - n = new(node) + case OINLCALL: + return NewInlinedCallExpr(pos, nil, nil) } - n.SetOp(op) - n.SetLeft(nleft) - n.SetRight(nright) - n.SetPos(pos) - n.SetOffset(types.BADWIDTH) - n.SetOrig(n) - return n -} - -var okForNod = [OEND]bool{ - OADD: true, - OADDR: true, - OADDSTR: true, - OALIGNOF: true, - OAND: true, - OANDAND: true, - OANDNOT: true, - OAPPEND: true, - OARRAYLIT: true, - OBITNOT: true, - OBYTES2STR: true, - OBYTES2STRTMP: true, - OCALL: true, - OCALLFUNC: true, - OCALLINTER: true, - OCALLMETH: true, - OCAP: true, - OCFUNC: true, - OCHECKNIL: true, - OCLOSE: true, - OCOMPLEX: true, - OCOMPLIT: true, - OCONV: true, - OCONVIFACE: true, - OCONVNOP: true, - OCOPY: true, - ODELETE: true, - ODIV: true, - ODOT: true, - ODOTINTER: true, - ODOTMETH: true, - ODOTPTR: true, - ODOTTYPE: true, - ODOTTYPE2: true, - OEFACE: true, - OEQ: true, - OGE: true, - OGETG: true, - OGT: true, - OIDATA: true, - OIMAG: true, - OINDEX: true, - OINDEXMAP: true, - OINLCALL: true, - OITAB: true, - OKEY: true, - OLE: true, - OLEN: true, - OLSH: true, - OLT: true, - OMAKE: true, - OMAKECHAN: true, - OMAKEMAP: true, - OMAKESLICE: true, - OMAKESLICECOPY: true, - OMAPLIT: true, - OMETHEXPR: true, - OMOD: true, - OMUL: true, - ONE: true, - ONEG: true, - ONEW: true, - ONEWOBJ: true, - ONIL: true, - ONOT: true, - OOFFSETOF: true, - OOR: true, - OOROR: true, - OPANIC: true, - OPAREN: true, - OPLUS: true, - OPRINT: true, - OPRINTN: true, - OPTRLIT: true, - OREAL: true, - ORECOVER: true, - ORECV: true, - ORESULT: true, - ORSH: true, - ORUNES2STR: true, - ORUNESTR: true, - OSIZEOF: true, - OSLICE: true, - OSLICE3: true, - OSLICE3ARR: true, - OSLICEARR: true, - OSLICEHEADER: true, - OSLICELIT: true, - OSLICESTR: true, - OSPTR: true, - OSTR2BYTES: true, - OSTR2BYTESTMP: true, - OSTR2RUNES: true, - OSTRUCTKEY: true, - OSTRUCTLIT: true, - OSUB: true, - OVARDEF: true, - OVARKILL: true, - OVARLIVE: true, - OXDOT: true, - OXOR: true, } diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go index 2f31ba8d34dd3..4a133cb999153 100644 --- a/src/cmd/compile/internal/ir/sizeof_test.go +++ b/src/cmd/compile/internal/ir/sizeof_test.go @@ -22,7 +22,6 @@ func TestSizeof(t *testing.T) { }{ {Func{}, 168, 288}, {Name{}, 128, 224}, - {node{}, 80, 136}, } for _, tt := range tests { From 45f3b646d42d73a8a54c81ada0ef1ffc11dce592 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sun, 29 Nov 2020 23:06:02 -0500 Subject: [PATCH 076/474] [dev.regabi] cmd/compile: add OSTMTEXPR Op This CL only adds the new constant, which is not safe for toolstash -cmp. Change-Id: I774463a0ab5f57113d67a8888b6ac787be68510c Reviewed-on: https://go-review.googlesource.com/c/go/+/274110 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/node.go | 1 + src/cmd/compile/internal/ir/op_string.go | 87 ++++++++++++------------ 2 files changed, 45 insertions(+), 43 deletions(-) diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 9b407b36c0418..a93a87fb681ed 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -288,6 +288,7 @@ const ( OOFFSETOF // unsafe.Offsetof(Left) OSIZEOF // unsafe.Sizeof(Left) OMETHEXPR // method expression + OSTMTEXPR // statement expression (Init; Left) // statements OBLOCK // { List } (block of code) diff --git a/src/cmd/compile/internal/ir/op_string.go b/src/cmd/compile/internal/ir/op_string.go index eefdc0ee590b8..96eee439741d4 100644 --- a/src/cmd/compile/internal/ir/op_string.go +++ b/src/cmd/compile/internal/ir/op_string.go @@ -121,52 +121,53 @@ func _() { _ = x[OOFFSETOF-110] _ = x[OSIZEOF-111] _ = x[OMETHEXPR-112] - _ = x[OBLOCK-113] - _ = x[OBREAK-114] - _ = x[OCASE-115] - _ = x[OCONTINUE-116] - _ = x[ODEFER-117] - _ = x[OEMPTY-118] - _ = x[OFALL-119] - _ = x[OFOR-120] - _ = x[OFORUNTIL-121] - _ = x[OGOTO-122] - _ = x[OIF-123] - _ = x[OLABEL-124] - _ = x[OGO-125] - _ = x[ORANGE-126] - _ = x[ORETURN-127] - _ = x[OSELECT-128] - _ = x[OSWITCH-129] - _ = x[OTYPESW-130] - _ = x[OTCHAN-131] - _ = x[OTMAP-132] - _ = x[OTSTRUCT-133] - _ = x[OTINTER-134] - _ = x[OTFUNC-135] - _ = x[OTARRAY-136] - _ = x[OTSLICE-137] - _ = x[OINLCALL-138] - _ = x[OEFACE-139] - _ = x[OITAB-140] - _ = x[OIDATA-141] - _ = x[OSPTR-142] - _ = x[OCLOSUREREAD-143] - _ = x[OCFUNC-144] - _ = x[OCHECKNIL-145] - _ = x[OVARDEF-146] - _ = x[OVARKILL-147] - _ = x[OVARLIVE-148] - _ = x[ORESULT-149] - _ = x[OINLMARK-150] - _ = x[ORETJMP-151] - _ = x[OGETG-152] - _ = x[OEND-153] + _ = x[OSTMTEXPR-113] + _ = x[OBLOCK-114] + _ = x[OBREAK-115] + _ = x[OCASE-116] + _ = x[OCONTINUE-117] + _ = x[ODEFER-118] + _ = x[OEMPTY-119] + _ = x[OFALL-120] + _ = x[OFOR-121] + _ = x[OFORUNTIL-122] + _ = x[OGOTO-123] + _ = x[OIF-124] + _ = x[OLABEL-125] + _ = x[OGO-126] + _ = x[ORANGE-127] + _ = x[ORETURN-128] + _ = x[OSELECT-129] + _ = x[OSWITCH-130] + _ = x[OTYPESW-131] + _ = x[OTCHAN-132] + _ = x[OTMAP-133] + _ = x[OTSTRUCT-134] + _ = x[OTINTER-135] + _ = x[OTFUNC-136] + _ = x[OTARRAY-137] + _ = x[OTSLICE-138] + _ = x[OINLCALL-139] + _ = x[OEFACE-140] + _ = x[OITAB-141] + _ = x[OIDATA-142] + _ = x[OSPTR-143] + _ = x[OCLOSUREREAD-144] + _ = x[OCFUNC-145] + _ = x[OCHECKNIL-146] + _ = x[OVARDEF-147] + _ = x[OVARKILL-148] + _ = x[OVARLIVE-149] + _ = x[ORESULT-150] + _ = x[OINLMARK-151] + _ = x[ORETJMP-152] + _ = x[OGETG-153] + _ = x[OEND-154] } -const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRBLOCKBREAKCASECONTINUEDEFEREMPTYFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCLOSUREREADCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKRETJMPGETGEND" +const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRSTMTEXPRBLOCKBREAKCASECONTINUEDEFEREMPTYFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCLOSUREREADCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKRETJMPGETGEND" -var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 309, 315, 318, 324, 331, 339, 343, 350, 358, 360, 362, 364, 366, 368, 370, 375, 380, 388, 391, 400, 403, 407, 415, 422, 431, 444, 447, 450, 453, 456, 459, 462, 468, 471, 477, 480, 486, 490, 493, 497, 502, 507, 513, 518, 522, 527, 535, 543, 549, 558, 569, 576, 580, 587, 594, 602, 606, 610, 614, 621, 628, 636, 642, 650, 655, 660, 664, 672, 677, 682, 686, 689, 697, 701, 703, 708, 710, 715, 721, 727, 733, 739, 744, 748, 755, 761, 766, 772, 778, 785, 790, 794, 799, 803, 814, 819, 827, 833, 840, 847, 853, 860, 866, 870, 873} +var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 309, 315, 318, 324, 331, 339, 343, 350, 358, 360, 362, 364, 366, 368, 370, 375, 380, 388, 391, 400, 403, 407, 415, 422, 431, 444, 447, 450, 453, 456, 459, 462, 468, 471, 477, 480, 486, 490, 493, 497, 502, 507, 513, 518, 522, 527, 535, 543, 549, 558, 569, 576, 580, 587, 594, 602, 606, 610, 614, 621, 628, 636, 642, 650, 658, 663, 668, 672, 680, 685, 690, 694, 697, 705, 709, 711, 716, 718, 723, 729, 735, 741, 747, 752, 756, 763, 769, 774, 780, 786, 793, 798, 802, 807, 811, 822, 827, 835, 841, 848, 855, 861, 868, 874, 878, 881} func (i Op) String() string { if i >= Op(len(_Op_index)-1) { From dadfc80bc173ce4475bc76231de5259d797b0522 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 30 Nov 2020 20:34:25 -0800 Subject: [PATCH 077/474] [dev.regabi] cmd/compile: improve findTypeLoop When checking if a defined type is part of a type loop, we can short-circuit if it was defined in another package. We can assume any package we import already successfully compiled, so any types it contains cannot be part of a type loop. This also allows us to simplify the logic for recursing into the type used in the type declaration, because any defined type from this package will have a properly setup node. Change-Id: Ic024814d95533afd9e59f2103c8ddb22bd87e900 Reviewed-on: https://go-review.googlesource.com/c/go/+/274294 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky Reviewed-by: Cuong Manh Le TryBot-Result: Go Bot --- src/cmd/compile/internal/gc/align.go | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go index 4f8f04d73d004..ffae8dc27b6f3 100644 --- a/src/cmd/compile/internal/gc/align.go +++ b/src/cmd/compile/internal/gc/align.go @@ -190,6 +190,13 @@ func findTypeLoop(t *types.Type, path *[]*types.Type) bool { // recurse on the type expression used in the type // declaration. + // Type imported from package, so it can't be part of + // a type loop (otherwise that package should have + // failed to compile). + if t.Sym.Pkg != ir.LocalPkg { + return false + } + for i, x := range *path { if x == t { *path = (*path)[i:] @@ -198,10 +205,8 @@ func findTypeLoop(t *types.Type, path *[]*types.Type) bool { } *path = append(*path, t) - if n := ir.AsNode(t.Nod); n != nil { - if name := n.Name(); name != nil && name.Ntype != nil && findTypeLoop(name.Ntype.Type(), path) { - return true - } + if findTypeLoop(ir.AsNode(t.Nod).Name().Ntype.Type(), path) { + return true } *path = (*path)[:len(*path)-1] } else { From 4da41fb3f8aa2e81b6ed371b617643042ba5e170 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 30 Nov 2020 21:18:48 -0500 Subject: [PATCH 078/474] [dev.regabi] cmd/compile: use ir.Copy instead of direct use of RawCopy MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The ONIL export bug happened because the logic about maintaining an “implicit” orig pointer in the comments around ir.Orig only applies to Copy and SepCopy, not to direct use of RawCopy. I'd forgotten those could exist. The sole direct use of RawCopy was for the OLITERAL/ONIL case. The ONIL is now provably indistinguishable from Copy, since NilExpr does not have an explicit Orig field, so for NilExpr RawCopy and Copy are the same. The OLITERAL is not, but we can reconstruct the effect with Copy+SetOrig to be explicit that we need the orig link. The next CL will unexport RawCopy. Also fix a typo in MapType doc comment. Passes buildall w/ toolstash -cmp. Change-Id: I876a85ff188e6d1cd4c0dfa385be32482e0de0d4 Reviewed-on: https://go-review.googlesource.com/c/go/+/274292 Trust: Russ Cox Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky TryBot-Result: Go Bot --- src/cmd/compile/internal/gc/const.go | 7 ++++++- src/cmd/compile/internal/ir/type.go | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index 3c161d8e127d9..4dee373bfa55c 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -118,7 +118,12 @@ func convlit1(n ir.Node, t *types.Type, explicit bool, context func() string) ir if n.Op() == ir.OLITERAL || n.Op() == ir.ONIL { // Can't always set n.Type directly on OLITERAL nodes. // See discussion on CL 20813. - n = n.RawCopy() + old := n + n = ir.Copy(old) + if old.Op() == ir.OLITERAL { + // Keep untyped constants in their original untyped syntax for error messages. + n.(ir.OrigNode).SetOrig(old) + } } // Nil is technically not a constant, so handle it specially. diff --git a/src/cmd/compile/internal/ir/type.go b/src/cmd/compile/internal/ir/type.go index 39411ed431976..519a7291b0432 100644 --- a/src/cmd/compile/internal/ir/type.go +++ b/src/cmd/compile/internal/ir/type.go @@ -92,7 +92,7 @@ func (n *ChanType) DeepCopy(pos src.XPos) Node { return NewChanType(n.posOr(pos), DeepCopy(pos, n.Elem), n.Dir) } -// A MapType represents a map[Key]Value type syntax.u +// A MapType represents a map[Key]Value type syntax. type MapType struct { miniType Key Node From ecff7628ead3b0191f5fe191864ee47fcc90bb92 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 30 Nov 2020 21:20:45 -0500 Subject: [PATCH 079/474] [dev.regabi] cmd/compile: unexport Node.RawCopy RawCopy breaks the invariant that ir.Orig depends on for allowing nodes to omit keeping their own orig fields. Avoid surprises by unexporting it. The only use in package gc was removed in the previous CL. This one is a straight global search and replace RawCopy -> rawCopy. Change-Id: Ia99c0f4665bf7ed4f878cc44456d5fbdf33bab8d Reviewed-on: https://go-review.googlesource.com/c/go/+/274293 Trust: Russ Cox Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky TryBot-Result: Go Bot --- src/cmd/compile/internal/ir/copy.go | 4 +-- src/cmd/compile/internal/ir/expr.go | 46 ++++++++++++++--------------- src/cmd/compile/internal/ir/func.go | 2 +- src/cmd/compile/internal/ir/mini.go | 2 +- src/cmd/compile/internal/ir/name.go | 4 +-- src/cmd/compile/internal/ir/node.go | 2 +- src/cmd/compile/internal/ir/stmt.go | 40 ++++++++++++------------- src/cmd/compile/internal/ir/type.go | 16 +++++----- 8 files changed, 58 insertions(+), 58 deletions(-) diff --git a/src/cmd/compile/internal/ir/copy.go b/src/cmd/compile/internal/ir/copy.go index 7a1611d0d6e62..a356074bb8435 100644 --- a/src/cmd/compile/internal/ir/copy.go +++ b/src/cmd/compile/internal/ir/copy.go @@ -43,7 +43,7 @@ func Orig(n Node) Node { // SepCopy returns a separate shallow copy of n, // breaking any Orig link to any other nodes. func SepCopy(n Node) Node { - n = n.RawCopy() + n = n.rawCopy() if n, ok := n.(OrigNode); ok { n.SetOrig(n) } @@ -57,7 +57,7 @@ func SepCopy(n Node) Node { // The specific semantics surrounding Orig are subtle but right for most uses. // See issues #26855 and #27765 for pitfalls. func Copy(n Node) Node { - copy := n.RawCopy() + copy := n.rawCopy() if n, ok := n.(OrigNode); ok && n.Orig() == n { copy.(OrigNode).SetOrig(copy) } diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index be9f486682150..87593520a1d29 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -75,7 +75,7 @@ func NewAddStringExpr(pos src.XPos, list []Node) *AddStringExpr { func (n *AddStringExpr) String() string { return fmt.Sprint(n) } func (n *AddStringExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *AddStringExpr) RawCopy() Node { c := *n; return &c } +func (n *AddStringExpr) rawCopy() Node { c := *n; return &c } func (n *AddStringExpr) List() Nodes { return n.list } func (n *AddStringExpr) PtrList() *Nodes { return &n.list } func (n *AddStringExpr) SetList(x Nodes) { n.list = x } @@ -97,7 +97,7 @@ func NewAddrExpr(pos src.XPos, x Node) *AddrExpr { func (n *AddrExpr) String() string { return fmt.Sprint(n) } func (n *AddrExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *AddrExpr) RawCopy() Node { c := *n; return &c } +func (n *AddrExpr) rawCopy() Node { c := *n; return &c } func (n *AddrExpr) Left() Node { return n.X } func (n *AddrExpr) SetLeft(x Node) { n.X = x } func (n *AddrExpr) Right() Node { return n.Alloc } @@ -129,7 +129,7 @@ func NewBinaryExpr(pos src.XPos, op Op, x, y Node) *BinaryExpr { func (n *BinaryExpr) String() string { return fmt.Sprint(n) } func (n *BinaryExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *BinaryExpr) RawCopy() Node { c := *n; return &c } +func (n *BinaryExpr) rawCopy() Node { c := *n; return &c } func (n *BinaryExpr) Left() Node { return n.X } func (n *BinaryExpr) SetLeft(x Node) { n.X = x } func (n *BinaryExpr) Right() Node { return n.Y } @@ -170,7 +170,7 @@ func NewCallExpr(pos src.XPos, fun Node, args []Node) *CallExpr { func (n *CallExpr) String() string { return fmt.Sprint(n) } func (n *CallExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *CallExpr) RawCopy() Node { c := *n; return &c } +func (n *CallExpr) rawCopy() Node { c := *n; return &c } func (n *CallExpr) Orig() Node { return n.orig } func (n *CallExpr) SetOrig(x Node) { n.orig = x } func (n *CallExpr) Left() Node { return n.X } @@ -218,7 +218,7 @@ func NewCallPartExpr(pos src.XPos, x Node, method *Name, fn *Func) *CallPartExpr func (n *CallPartExpr) String() string { return fmt.Sprint(n) } func (n *CallPartExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *CallPartExpr) RawCopy() Node { c := *n; return &c } +func (n *CallPartExpr) rawCopy() Node { c := *n; return &c } func (n *CallPartExpr) Func() *Func { return n.fn } func (n *CallPartExpr) Left() Node { return n.X } func (n *CallPartExpr) Right() Node { return n.Method } @@ -240,7 +240,7 @@ func NewClosureExpr(pos src.XPos, fn *Func) *ClosureExpr { func (n *ClosureExpr) String() string { return fmt.Sprint(n) } func (n *ClosureExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *ClosureExpr) RawCopy() Node { c := *n; return &c } +func (n *ClosureExpr) rawCopy() Node { c := *n; return &c } func (n *ClosureExpr) Func() *Func { return n.fn } // A ClosureRead denotes reading a variable stored within a closure struct. @@ -258,7 +258,7 @@ func NewClosureRead(typ *types.Type, offset int64) *ClosureRead { func (n *ClosureRead) String() string { return fmt.Sprint(n) } func (n *ClosureRead) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *ClosureRead) RawCopy() Node { c := *n; return &c } +func (n *ClosureRead) rawCopy() Node { c := *n; return &c } func (n *ClosureRead) Type() *types.Type { return n.typ } func (n *ClosureRead) Offset() int64 { return n.offset } @@ -282,7 +282,7 @@ func NewCompLitExpr(pos src.XPos, typ Ntype, list []Node) *CompLitExpr { func (n *CompLitExpr) String() string { return fmt.Sprint(n) } func (n *CompLitExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *CompLitExpr) RawCopy() Node { c := *n; return &c } +func (n *CompLitExpr) rawCopy() Node { c := *n; return &c } func (n *CompLitExpr) Orig() Node { return n.orig } func (n *CompLitExpr) SetOrig(x Node) { n.orig = x } func (n *CompLitExpr) Right() Node { return n.Ntype } @@ -319,7 +319,7 @@ func NewConvExpr(pos src.XPos, op Op, typ *types.Type, x Node) *ConvExpr { func (n *ConvExpr) String() string { return fmt.Sprint(n) } func (n *ConvExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *ConvExpr) RawCopy() Node { c := *n; return &c } +func (n *ConvExpr) rawCopy() Node { c := *n; return &c } func (n *ConvExpr) Orig() Node { return n.orig } func (n *ConvExpr) SetOrig(x Node) { n.orig = x } func (n *ConvExpr) Left() Node { return n.X } @@ -351,7 +351,7 @@ func NewIndexExpr(pos src.XPos, x, index Node) *IndexExpr { func (n *IndexExpr) String() string { return fmt.Sprint(n) } func (n *IndexExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *IndexExpr) RawCopy() Node { c := *n; return &c } +func (n *IndexExpr) rawCopy() Node { c := *n; return &c } func (n *IndexExpr) Left() Node { return n.X } func (n *IndexExpr) SetLeft(x Node) { n.X = x } func (n *IndexExpr) Right() Node { return n.Index } @@ -388,7 +388,7 @@ func NewKeyExpr(pos src.XPos, key, value Node) *KeyExpr { func (n *KeyExpr) String() string { return fmt.Sprint(n) } func (n *KeyExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *KeyExpr) RawCopy() Node { c := *n; return &c } +func (n *KeyExpr) rawCopy() Node { c := *n; return &c } func (n *KeyExpr) Left() Node { return n.Key } func (n *KeyExpr) SetLeft(x Node) { n.Key = x } func (n *KeyExpr) Right() Node { return n.Value } @@ -425,7 +425,7 @@ func NewInlinedCallExpr(pos src.XPos, body, retvars []Node) *InlinedCallExpr { func (n *InlinedCallExpr) String() string { return fmt.Sprint(n) } func (n *InlinedCallExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *InlinedCallExpr) RawCopy() Node { c := *n; return &c } +func (n *InlinedCallExpr) rawCopy() Node { c := *n; return &c } func (n *InlinedCallExpr) Body() Nodes { return n.body } func (n *InlinedCallExpr) PtrBody() *Nodes { return &n.body } func (n *InlinedCallExpr) SetBody(x Nodes) { n.body = x } @@ -451,7 +451,7 @@ func NewMakeExpr(pos src.XPos, op Op, len, cap Node) *MakeExpr { func (n *MakeExpr) String() string { return fmt.Sprint(n) } func (n *MakeExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *MakeExpr) RawCopy() Node { c := *n; return &c } +func (n *MakeExpr) rawCopy() Node { c := *n; return &c } func (n *MakeExpr) Left() Node { return n.Len } func (n *MakeExpr) SetLeft(x Node) { n.Len = x } func (n *MakeExpr) Right() Node { return n.Cap } @@ -486,7 +486,7 @@ func NewMethodExpr(pos src.XPos, op Op, x, m Node) *MethodExpr { func (n *MethodExpr) String() string { return fmt.Sprint(n) } func (n *MethodExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *MethodExpr) RawCopy() Node { c := *n; return &c } +func (n *MethodExpr) rawCopy() Node { c := *n; return &c } func (n *MethodExpr) Left() Node { return n.X } func (n *MethodExpr) SetLeft(x Node) { n.X = x } func (n *MethodExpr) Right() Node { return n.M } @@ -514,7 +514,7 @@ func NewNilExpr(pos src.XPos) *NilExpr { func (n *NilExpr) String() string { return fmt.Sprint(n) } func (n *NilExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *NilExpr) RawCopy() Node { c := *n; return &c } +func (n *NilExpr) rawCopy() Node { c := *n; return &c } func (n *NilExpr) Sym() *types.Sym { return n.sym } func (n *NilExpr) SetSym(x *types.Sym) { n.sym = x } @@ -534,7 +534,7 @@ func NewParenExpr(pos src.XPos, x Node) *ParenExpr { func (n *ParenExpr) String() string { return fmt.Sprint(n) } func (n *ParenExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *ParenExpr) RawCopy() Node { c := *n; return &c } +func (n *ParenExpr) rawCopy() Node { c := *n; return &c } func (n *ParenExpr) Left() Node { return n.X } func (n *ParenExpr) SetLeft(x Node) { n.X = x } @@ -566,7 +566,7 @@ func NewResultExpr(pos src.XPos, typ *types.Type, offset int64) *ResultExpr { func (n *ResultExpr) String() string { return fmt.Sprint(n) } func (n *ResultExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *ResultExpr) RawCopy() Node { c := *n; return &c } +func (n *ResultExpr) rawCopy() Node { c := *n; return &c } func (n *ResultExpr) Offset() int64 { return n.offset } func (n *ResultExpr) SetOffset(x int64) { n.offset = x } @@ -597,7 +597,7 @@ func (n *SelectorExpr) SetOp(op Op) { func (n *SelectorExpr) String() string { return fmt.Sprint(n) } func (n *SelectorExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *SelectorExpr) RawCopy() Node { c := *n; return &c } +func (n *SelectorExpr) rawCopy() Node { c := *n; return &c } func (n *SelectorExpr) Left() Node { return n.X } func (n *SelectorExpr) SetLeft(x Node) { n.X = x } func (n *SelectorExpr) Sym() *types.Sym { return n.Sel } @@ -625,7 +625,7 @@ func NewSliceExpr(pos src.XPos, op Op, x Node) *SliceExpr { func (n *SliceExpr) String() string { return fmt.Sprint(n) } func (n *SliceExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *SliceExpr) RawCopy() Node { c := *n; return &c } +func (n *SliceExpr) rawCopy() Node { c := *n; return &c } func (n *SliceExpr) Left() Node { return n.X } func (n *SliceExpr) SetLeft(x Node) { n.X = x } func (n *SliceExpr) List() Nodes { return n.list } @@ -727,7 +727,7 @@ func NewSliceHeaderExpr(pos src.XPos, typ *types.Type, ptr, len, cap Node) *Slic func (n *SliceHeaderExpr) String() string { return fmt.Sprint(n) } func (n *SliceHeaderExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *SliceHeaderExpr) RawCopy() Node { c := *n; return &c } +func (n *SliceHeaderExpr) rawCopy() Node { c := *n; return &c } func (n *SliceHeaderExpr) Left() Node { return n.Ptr } func (n *SliceHeaderExpr) SetLeft(x Node) { n.Ptr = x } func (n *SliceHeaderExpr) List() Nodes { return n.lenCap } @@ -750,7 +750,7 @@ func NewStarExpr(pos src.XPos, x Node) *StarExpr { func (n *StarExpr) String() string { return fmt.Sprint(n) } func (n *StarExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *StarExpr) RawCopy() Node { c := *n; return &c } +func (n *StarExpr) rawCopy() Node { c := *n; return &c } func (n *StarExpr) Left() Node { return n.X } func (n *StarExpr) SetLeft(x Node) { n.X = x } @@ -796,7 +796,7 @@ func NewTypeAssertExpr(pos src.XPos, x Node, typ Ntype) *TypeAssertExpr { func (n *TypeAssertExpr) String() string { return fmt.Sprint(n) } func (n *TypeAssertExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *TypeAssertExpr) RawCopy() Node { c := *n; return &c } +func (n *TypeAssertExpr) rawCopy() Node { c := *n; return &c } func (n *TypeAssertExpr) Left() Node { return n.X } func (n *TypeAssertExpr) SetLeft(x Node) { n.X = x } func (n *TypeAssertExpr) Right() Node { return n.Ntype } @@ -830,7 +830,7 @@ func NewUnaryExpr(pos src.XPos, op Op, x Node) *UnaryExpr { func (n *UnaryExpr) String() string { return fmt.Sprint(n) } func (n *UnaryExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *UnaryExpr) RawCopy() Node { c := *n; return &c } +func (n *UnaryExpr) rawCopy() Node { c := *n; return &c } func (n *UnaryExpr) Left() Node { return n.X } func (n *UnaryExpr) SetLeft(x Node) { n.X = x } diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index 9d2a8ad94bdfb..3fc8597ef0518 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -116,7 +116,7 @@ func NewFunc(pos src.XPos) *Func { func (f *Func) String() string { return fmt.Sprint(f) } func (f *Func) Format(s fmt.State, verb rune) { FmtNode(f, s, verb) } -func (f *Func) RawCopy() Node { panic(f.no("RawCopy")) } +func (f *Func) rawCopy() Node { panic(f.no("rawCopy")) } func (f *Func) Func() *Func { return f } func (f *Func) Body() Nodes { return f.body } func (f *Func) PtrBody() *Nodes { return &f.body } diff --git a/src/cmd/compile/internal/ir/mini.go b/src/cmd/compile/internal/ir/mini.go index d73ec4ecd5ec0..909ca0220d479 100644 --- a/src/cmd/compile/internal/ir/mini.go +++ b/src/cmd/compile/internal/ir/mini.go @@ -19,7 +19,7 @@ import ( // must at the least provide: // // func (n *MyNode) String() string { return fmt.Sprint(n) } -// func (n *MyNode) RawCopy() Node { c := *n; return &c } +// func (n *MyNode) rawCopy() Node { c := *n; return &c } // func (n *MyNode) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } // // The embedding struct should also fill in n.op in its constructor, diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 1bc6bea3b670b..76abb454eedcd 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -143,7 +143,7 @@ func newNameAt(pos src.XPos, sym *types.Sym) *Name { func (n *Name) String() string { return fmt.Sprint(n) } func (n *Name) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *Name) RawCopy() Node { c := *n; return &c } +func (n *Name) rawCopy() Node { c := *n; return &c } func (n *Name) Name() *Name { return n } func (n *Name) Sym() *types.Sym { return n.sym } func (n *Name) SetSym(x *types.Sym) { n.sym = x } @@ -370,7 +370,7 @@ type PkgName struct { func (p *PkgName) String() string { return fmt.Sprint(p) } func (p *PkgName) Format(s fmt.State, verb rune) { FmtNode(p, s, verb) } -func (p *PkgName) RawCopy() Node { c := *p; return &c } +func (p *PkgName) rawCopy() Node { c := *p; return &c } func (p *PkgName) Sym() *types.Sym { return p.sym } func (*PkgName) CanBeNtype() {} diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index a93a87fb681ed..a7144eee44552 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -28,7 +28,7 @@ type Node interface { SetPos(x src.XPos) // For making copies. Mainly used by Copy and SepCopy. - RawCopy() Node + rawCopy() Node // Abstract graph structure, for generic traversals. Op() Op diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index 251683551367f..91714e38e3dd2 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -31,7 +31,7 @@ func NewDecl(pos src.XPos, op Op, x Node) *Decl { func (n *Decl) String() string { return fmt.Sprint(n) } func (n *Decl) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *Decl) RawCopy() Node { c := *n; return &c } +func (n *Decl) rawCopy() Node { c := *n; return &c } func (n *Decl) Left() Node { return n.X } func (n *Decl) SetLeft(x Node) { n.X = x } @@ -70,7 +70,7 @@ func NewAssignListStmt(pos src.XPos, lhs, rhs []Node) *AssignListStmt { func (n *AssignListStmt) String() string { return fmt.Sprint(n) } func (n *AssignListStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *AssignListStmt) RawCopy() Node { c := *n; return &c } +func (n *AssignListStmt) rawCopy() Node { c := *n; return &c } func (n *AssignListStmt) List() Nodes { return n.Lhs } func (n *AssignListStmt) PtrList() *Nodes { return &n.Lhs } @@ -112,7 +112,7 @@ func NewAssignStmt(pos src.XPos, x, y Node) *AssignStmt { func (n *AssignStmt) String() string { return fmt.Sprint(n) } func (n *AssignStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *AssignStmt) RawCopy() Node { c := *n; return &c } +func (n *AssignStmt) rawCopy() Node { c := *n; return &c } func (n *AssignStmt) Left() Node { return n.X } func (n *AssignStmt) SetLeft(x Node) { n.X = x } @@ -151,7 +151,7 @@ func NewAssignOpStmt(pos src.XPos, op Op, x, y Node) *AssignOpStmt { func (n *AssignOpStmt) String() string { return fmt.Sprint(n) } func (n *AssignOpStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *AssignOpStmt) RawCopy() Node { c := *n; return &c } +func (n *AssignOpStmt) rawCopy() Node { c := *n; return &c } func (n *AssignOpStmt) Left() Node { return n.X } func (n *AssignOpStmt) SetLeft(x Node) { n.X = x } @@ -180,7 +180,7 @@ func NewBlockStmt(pos src.XPos, list []Node) *BlockStmt { func (n *BlockStmt) String() string { return fmt.Sprint(n) } func (n *BlockStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *BlockStmt) RawCopy() Node { c := *n; return &c } +func (n *BlockStmt) rawCopy() Node { c := *n; return &c } func (n *BlockStmt) List() Nodes { return n.list } func (n *BlockStmt) PtrList() *Nodes { return &n.list } func (n *BlockStmt) SetList(x Nodes) { n.list = x } @@ -209,7 +209,7 @@ func NewBranchStmt(pos src.XPos, op Op, label *types.Sym) *BranchStmt { func (n *BranchStmt) String() string { return fmt.Sprint(n) } func (n *BranchStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *BranchStmt) RawCopy() Node { c := *n; return &c } +func (n *BranchStmt) rawCopy() Node { c := *n; return &c } func (n *BranchStmt) Sym() *types.Sym { return n.Label } func (n *BranchStmt) SetSym(sym *types.Sym) { n.Label = sym } @@ -233,7 +233,7 @@ func NewCaseStmt(pos src.XPos, list, body []Node) *CaseStmt { func (n *CaseStmt) String() string { return fmt.Sprint(n) } func (n *CaseStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *CaseStmt) RawCopy() Node { c := *n; return &c } +func (n *CaseStmt) rawCopy() Node { c := *n; return &c } func (n *CaseStmt) List() Nodes { return n.list } func (n *CaseStmt) PtrList() *Nodes { return &n.list } func (n *CaseStmt) SetList(x Nodes) { n.list = x } @@ -261,7 +261,7 @@ func NewDeferStmt(pos src.XPos, call Node) *DeferStmt { func (n *DeferStmt) String() string { return fmt.Sprint(n) } func (n *DeferStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *DeferStmt) RawCopy() Node { c := *n; return &c } +func (n *DeferStmt) rawCopy() Node { c := *n; return &c } func (n *DeferStmt) Left() Node { return n.Call } func (n *DeferStmt) SetLeft(x Node) { n.Call = x } @@ -280,7 +280,7 @@ func NewEmptyStmt(pos src.XPos) *EmptyStmt { func (n *EmptyStmt) String() string { return fmt.Sprint(n) } func (n *EmptyStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *EmptyStmt) RawCopy() Node { c := *n; return &c } +func (n *EmptyStmt) rawCopy() Node { c := *n; return &c } // A ForStmt is a non-range for loop: for Init; Cond; Post { Body } // Op can be OFOR or OFORUNTIL (!Cond). @@ -305,7 +305,7 @@ func NewForStmt(pos src.XPos, init []Node, cond, post Node, body []Node) *ForStm func (n *ForStmt) String() string { return fmt.Sprint(n) } func (n *ForStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *ForStmt) RawCopy() Node { c := *n; return &c } +func (n *ForStmt) rawCopy() Node { c := *n; return &c } func (n *ForStmt) Sym() *types.Sym { return n.Label } func (n *ForStmt) SetSym(x *types.Sym) { n.Label = x } func (n *ForStmt) Left() Node { return n.Cond } @@ -343,7 +343,7 @@ func NewGoStmt(pos src.XPos, call Node) *GoStmt { func (n *GoStmt) String() string { return fmt.Sprint(n) } func (n *GoStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *GoStmt) RawCopy() Node { c := *n; return &c } +func (n *GoStmt) rawCopy() Node { c := *n; return &c } func (n *GoStmt) Left() Node { return n.Call } func (n *GoStmt) SetLeft(x Node) { n.Call = x } @@ -368,7 +368,7 @@ func NewIfStmt(pos src.XPos, cond Node, body, els []Node) *IfStmt { func (n *IfStmt) String() string { return fmt.Sprint(n) } func (n *IfStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *IfStmt) RawCopy() Node { c := *n; return &c } +func (n *IfStmt) rawCopy() Node { c := *n; return &c } func (n *IfStmt) Left() Node { return n.Cond } func (n *IfStmt) SetLeft(x Node) { n.Cond = x } func (n *IfStmt) Body() Nodes { return n.body } @@ -395,7 +395,7 @@ func NewInlineMarkStmt(pos src.XPos, index int64) *InlineMarkStmt { func (n *InlineMarkStmt) String() string { return fmt.Sprint(n) } func (n *InlineMarkStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *InlineMarkStmt) RawCopy() Node { c := *n; return &c } +func (n *InlineMarkStmt) rawCopy() Node { c := *n; return &c } func (n *InlineMarkStmt) Offset() int64 { return n.Index } func (n *InlineMarkStmt) SetOffset(x int64) { n.Index = x } @@ -414,7 +414,7 @@ func NewLabelStmt(pos src.XPos, label *types.Sym) *LabelStmt { func (n *LabelStmt) String() string { return fmt.Sprint(n) } func (n *LabelStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *LabelStmt) RawCopy() Node { c := *n; return &c } +func (n *LabelStmt) rawCopy() Node { c := *n; return &c } func (n *LabelStmt) Sym() *types.Sym { return n.Label } func (n *LabelStmt) SetSym(x *types.Sym) { n.Label = x } @@ -442,7 +442,7 @@ func NewRangeStmt(pos src.XPos, vars []Node, x Node, body []Node) *RangeStmt { func (n *RangeStmt) String() string { return fmt.Sprint(n) } func (n *RangeStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *RangeStmt) RawCopy() Node { c := *n; return &c } +func (n *RangeStmt) rawCopy() Node { c := *n; return &c } func (n *RangeStmt) Sym() *types.Sym { return n.Label } func (n *RangeStmt) SetSym(x *types.Sym) { n.Label = x } func (n *RangeStmt) Right() Node { return n.X } @@ -478,7 +478,7 @@ func NewReturnStmt(pos src.XPos, results []Node) *ReturnStmt { func (n *ReturnStmt) String() string { return fmt.Sprint(n) } func (n *ReturnStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *ReturnStmt) RawCopy() Node { c := *n; return &c } +func (n *ReturnStmt) rawCopy() Node { c := *n; return &c } func (n *ReturnStmt) Orig() Node { return n.orig } func (n *ReturnStmt) SetOrig(x Node) { n.orig = x } func (n *ReturnStmt) List() Nodes { return n.Results } @@ -507,7 +507,7 @@ func NewSelectStmt(pos src.XPos, cases []Node) *SelectStmt { func (n *SelectStmt) String() string { return fmt.Sprint(n) } func (n *SelectStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *SelectStmt) RawCopy() Node { c := *n; return &c } +func (n *SelectStmt) rawCopy() Node { c := *n; return &c } func (n *SelectStmt) List() Nodes { return n.Cases } func (n *SelectStmt) PtrList() *Nodes { return &n.Cases } func (n *SelectStmt) SetList(x Nodes) { n.Cases = x } @@ -535,7 +535,7 @@ func NewSendStmt(pos src.XPos, ch, value Node) *SendStmt { func (n *SendStmt) String() string { return fmt.Sprint(n) } func (n *SendStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *SendStmt) RawCopy() Node { c := *n; return &c } +func (n *SendStmt) rawCopy() Node { c := *n; return &c } func (n *SendStmt) Left() Node { return n.Chan } func (n *SendStmt) SetLeft(x Node) { n.Chan = x } @@ -564,7 +564,7 @@ func NewSwitchStmt(pos src.XPos, tag Node, cases []Node) *SwitchStmt { func (n *SwitchStmt) String() string { return fmt.Sprint(n) } func (n *SwitchStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *SwitchStmt) RawCopy() Node { c := *n; return &c } +func (n *SwitchStmt) rawCopy() Node { c := *n; return &c } func (n *SwitchStmt) Left() Node { return n.Tag } func (n *SwitchStmt) SetLeft(x Node) { n.Tag = x } func (n *SwitchStmt) List() Nodes { return n.Cases } @@ -597,7 +597,7 @@ func NewTypeSwitchGuard(pos src.XPos, name, x Node) *TypeSwitchGuard { func (n *TypeSwitchGuard) String() string { return fmt.Sprint(n) } func (n *TypeSwitchGuard) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *TypeSwitchGuard) RawCopy() Node { c := *n; return &c } +func (n *TypeSwitchGuard) rawCopy() Node { c := *n; return &c } func (n *TypeSwitchGuard) Left() Node { if n.name == nil { diff --git a/src/cmd/compile/internal/ir/type.go b/src/cmd/compile/internal/ir/type.go index 519a7291b0432..af8db15e8473f 100644 --- a/src/cmd/compile/internal/ir/type.go +++ b/src/cmd/compile/internal/ir/type.go @@ -78,7 +78,7 @@ func NewChanType(pos src.XPos, elem Node, dir types.ChanDir) *ChanType { func (n *ChanType) String() string { return fmt.Sprint(n) } func (n *ChanType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *ChanType) RawCopy() Node { c := *n; return &c } +func (n *ChanType) rawCopy() Node { c := *n; return &c } func (n *ChanType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) n.Elem = nil @@ -108,7 +108,7 @@ func NewMapType(pos src.XPos, key, elem Node) *MapType { func (n *MapType) String() string { return fmt.Sprint(n) } func (n *MapType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *MapType) RawCopy() Node { c := *n; return &c } +func (n *MapType) rawCopy() Node { c := *n; return &c } func (n *MapType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) n.Key = nil @@ -138,7 +138,7 @@ func NewStructType(pos src.XPos, fields []*Field) *StructType { func (n *StructType) String() string { return fmt.Sprint(n) } func (n *StructType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *StructType) RawCopy() Node { c := *n; return &c } +func (n *StructType) rawCopy() Node { c := *n; return &c } func (n *StructType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) n.Fields = nil @@ -175,7 +175,7 @@ func NewInterfaceType(pos src.XPos, methods []*Field) *InterfaceType { func (n *InterfaceType) String() string { return fmt.Sprint(n) } func (n *InterfaceType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *InterfaceType) RawCopy() Node { c := *n; return &c } +func (n *InterfaceType) rawCopy() Node { c := *n; return &c } func (n *InterfaceType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) n.Methods = nil @@ -206,7 +206,7 @@ func NewFuncType(pos src.XPos, rcvr *Field, args, results []*Field) *FuncType { func (n *FuncType) String() string { return fmt.Sprint(n) } func (n *FuncType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *FuncType) RawCopy() Node { c := *n; return &c } +func (n *FuncType) rawCopy() Node { c := *n; return &c } func (n *FuncType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) @@ -293,7 +293,7 @@ func NewSliceType(pos src.XPos, elem Node) *SliceType { func (n *SliceType) String() string { return fmt.Sprint(n) } func (n *SliceType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *SliceType) RawCopy() Node { c := *n; return &c } +func (n *SliceType) rawCopy() Node { c := *n; return &c } func (n *SliceType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) n.Elem = nil @@ -324,7 +324,7 @@ func NewArrayType(pos src.XPos, size Node, elem Node) *ArrayType { func (n *ArrayType) String() string { return fmt.Sprint(n) } func (n *ArrayType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *ArrayType) RawCopy() Node { c := *n; return &c } +func (n *ArrayType) rawCopy() Node { c := *n; return &c } func (n *ArrayType) DeepCopy(pos src.XPos) Node { if n.op == OTYPE { @@ -355,7 +355,7 @@ func newTypeNode(pos src.XPos, typ *types.Type) *typeNode { func (n *typeNode) String() string { return fmt.Sprint(n) } func (n *typeNode) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *typeNode) RawCopy() Node { c := *n; return &c } +func (n *typeNode) rawCopy() Node { c := *n; return &c } func (n *typeNode) Type() *types.Type { return n.typ } func (n *typeNode) Sym() *types.Sym { return n.typ.Sym } func (n *typeNode) CanBeNtype() {} From 2d6ff998edc0f3877ee24d28647a494491742f25 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 29 Nov 2020 14:06:17 -0800 Subject: [PATCH 080/474] [dev.regabi] cmd/compile: process //go:linknames after declarations Allows emitting errors about ineffectual //go:linkname directives. In particular, this exposed: a typo in os2_aix.go; redundant (but harmless) directives for libc_pipe in both os3_solaris.go and syscall2_solaris.go; and a bunch of useless //go:linkname directives in macOS wrapper code. However, because there's also ineffectual directives in the vendored macOS code from x/sys, that can't be an error just yet. So instead we print a warning (including a heads up that it will be promoted to an error in Go 1.17) to prevent backsliding while we fix and re-vendor that code. Passes toolstash-check. Change-Id: I59badeab5df0d8b3abfd14c6066e9bb00e840f73 Reviewed-on: https://go-review.googlesource.com/c/go/+/273986 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Russ Cox Trust: Matthew Dempsky --- src/cmd/compile/internal/gc/noder.go | 43 ++++--- .../x509/internal/macos/corefoundation.go | 9 -- src/crypto/x509/internal/macos/security.go | 4 - src/go/types/stdlib_test.go | 1 + src/runtime/os2_aix.go | 4 +- src/runtime/syscall2_solaris.go | 2 - src/syscall/mksyscall.pl | 3 - src/syscall/syscall_darwin.go | 3 - src/syscall/syscall_darwin_amd64.go | 1 - src/syscall/syscall_darwin_arm64.go | 1 - src/syscall/zsyscall_darwin_amd64.go | 121 ------------------ src/syscall/zsyscall_darwin_arm64.go | 121 ------------------ test/linkname2.go | 27 ++++ 13 files changed, 58 insertions(+), 282 deletions(-) create mode 100644 test/linkname2.go diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 6a5afe7687f00..1340068c722c0 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -75,6 +75,10 @@ func parseFiles(filenames []string) uint { testdclstack() } + for _, p := range noders { + p.processPragmas() + } + ir.LocalPkg.Height = myheight return lines @@ -258,23 +262,27 @@ func (p *noder) node() { xtop = append(xtop, p.decls(p.file.DeclList)...) - for _, n := range p.linknames { + base.Pos = src.NoXPos + clearImports() +} + +func (p *noder) processPragmas() { + for _, l := range p.linknames { if !p.importedUnsafe { - p.errorAt(n.pos, "//go:linkname only allowed in Go files that import \"unsafe\"") + p.errorAt(l.pos, "//go:linkname only allowed in Go files that import \"unsafe\"") continue } - s := lookup(n.local) - if n.remote != "" { - s.Linkname = n.remote - } else { - // Use the default object symbol name if the - // user didn't provide one. - if base.Ctxt.Pkgpath == "" { - p.errorAt(n.pos, "//go:linkname requires linkname argument or -p compiler flag") - } else { - s.Linkname = objabi.PathToPrefix(base.Ctxt.Pkgpath) + "." + n.local - } + n := ir.AsNode(lookup(l.local).Def) + if n == nil || n.Op() != ir.ONAME { + // TODO(mdempsky): Change to p.errorAt before Go 1.17 release. + base.WarnfAt(p.makeXPos(l.pos), "//go:linkname must refer to declared function or variable (will be an error in Go 1.17)") + continue } + if n.Sym().Linkname != "" { + p.errorAt(l.pos, "duplicate //go:linkname for %s", l.local) + continue + } + n.Sym().Linkname = l.remote } // The linker expects an ABI0 wrapper for all cgo-exported @@ -290,8 +298,6 @@ func (p *noder) node() { } pragcgobuf = append(pragcgobuf, p.pragcgobuf...) - base.Pos = src.NoXPos - clearImports() } func (p *noder) decls(decls []syntax.Decl) (l []ir.Node) { @@ -1592,6 +1598,13 @@ func (p *noder) pragma(pos syntax.Pos, blankLine bool, text string, old syntax.P var target string if len(f) == 3 { target = f[2] + } else if base.Ctxt.Pkgpath != "" { + // Use the default object symbol name if the + // user didn't provide one. + target = objabi.PathToPrefix(base.Ctxt.Pkgpath) + "." + f[1] + } else { + p.error(syntax.Error{Pos: pos, Msg: "//go:linkname requires linkname argument or -p compiler flag"}) + break } p.linknames = append(p.linknames, linkname{pos, f[1], target}) diff --git a/src/crypto/x509/internal/macos/corefoundation.go b/src/crypto/x509/internal/macos/corefoundation.go index 9b776d4b8571e..0572c6ccd8108 100644 --- a/src/crypto/x509/internal/macos/corefoundation.go +++ b/src/crypto/x509/internal/macos/corefoundation.go @@ -39,7 +39,6 @@ type CFString CFRef const kCFAllocatorDefault = 0 const kCFStringEncodingUTF8 = 0x08000100 -//go:linkname x509_CFStringCreateWithBytes x509_CFStringCreateWithBytes //go:cgo_import_dynamic x509_CFStringCreateWithBytes CFStringCreateWithBytes "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation" // StringToCFString returns a copy of the UTF-8 contents of s as a new CFString. @@ -52,7 +51,6 @@ func StringToCFString(s string) CFString { } func x509_CFStringCreateWithBytes_trampoline() -//go:linkname x509_CFDictionaryGetValueIfPresent x509_CFDictionaryGetValueIfPresent //go:cgo_import_dynamic x509_CFDictionaryGetValueIfPresent CFDictionaryGetValueIfPresent "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation" func CFDictionaryGetValueIfPresent(dict CFRef, key CFString) (value CFRef, ok bool) { @@ -67,7 +65,6 @@ func x509_CFDictionaryGetValueIfPresent_trampoline() const kCFNumberSInt32Type = 3 -//go:linkname x509_CFNumberGetValue x509_CFNumberGetValue //go:cgo_import_dynamic x509_CFNumberGetValue CFNumberGetValue "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation" func CFNumberGetValue(num CFRef) (int32, error) { @@ -81,7 +78,6 @@ func CFNumberGetValue(num CFRef) (int32, error) { } func x509_CFNumberGetValue_trampoline() -//go:linkname x509_CFDataGetLength x509_CFDataGetLength //go:cgo_import_dynamic x509_CFDataGetLength CFDataGetLength "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation" func CFDataGetLength(data CFRef) int { @@ -90,7 +86,6 @@ func CFDataGetLength(data CFRef) int { } func x509_CFDataGetLength_trampoline() -//go:linkname x509_CFDataGetBytePtr x509_CFDataGetBytePtr //go:cgo_import_dynamic x509_CFDataGetBytePtr CFDataGetBytePtr "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation" func CFDataGetBytePtr(data CFRef) uintptr { @@ -99,7 +94,6 @@ func CFDataGetBytePtr(data CFRef) uintptr { } func x509_CFDataGetBytePtr_trampoline() -//go:linkname x509_CFArrayGetCount x509_CFArrayGetCount //go:cgo_import_dynamic x509_CFArrayGetCount CFArrayGetCount "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation" func CFArrayGetCount(array CFRef) int { @@ -108,7 +102,6 @@ func CFArrayGetCount(array CFRef) int { } func x509_CFArrayGetCount_trampoline() -//go:linkname x509_CFArrayGetValueAtIndex x509_CFArrayGetValueAtIndex //go:cgo_import_dynamic x509_CFArrayGetValueAtIndex CFArrayGetValueAtIndex "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation" func CFArrayGetValueAtIndex(array CFRef, index int) CFRef { @@ -117,7 +110,6 @@ func CFArrayGetValueAtIndex(array CFRef, index int) CFRef { } func x509_CFArrayGetValueAtIndex_trampoline() -//go:linkname x509_CFEqual x509_CFEqual //go:cgo_import_dynamic x509_CFEqual CFEqual "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation" func CFEqual(a, b CFRef) bool { @@ -126,7 +118,6 @@ func CFEqual(a, b CFRef) bool { } func x509_CFEqual_trampoline() -//go:linkname x509_CFRelease x509_CFRelease //go:cgo_import_dynamic x509_CFRelease CFRelease "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation" func CFRelease(ref CFRef) { diff --git a/src/crypto/x509/internal/macos/security.go b/src/crypto/x509/internal/macos/security.go index 5e39e93666629..3163e3a4f727f 100644 --- a/src/crypto/x509/internal/macos/security.go +++ b/src/crypto/x509/internal/macos/security.go @@ -63,7 +63,6 @@ var ErrNoTrustSettings = errors.New("no trust settings found") const errSecNoTrustSettings = -25263 -//go:linkname x509_SecTrustSettingsCopyCertificates x509_SecTrustSettingsCopyCertificates //go:cgo_import_dynamic x509_SecTrustSettingsCopyCertificates SecTrustSettingsCopyCertificates "/System/Library/Frameworks/Security.framework/Versions/A/Security" func SecTrustSettingsCopyCertificates(domain SecTrustSettingsDomain) (certArray CFRef, err error) { @@ -80,7 +79,6 @@ func x509_SecTrustSettingsCopyCertificates_trampoline() const kSecFormatX509Cert int32 = 9 -//go:linkname x509_SecItemExport x509_SecItemExport //go:cgo_import_dynamic x509_SecItemExport SecItemExport "/System/Library/Frameworks/Security.framework/Versions/A/Security" func SecItemExport(cert CFRef) (data CFRef, err error) { @@ -95,7 +93,6 @@ func x509_SecItemExport_trampoline() const errSecItemNotFound = -25300 -//go:linkname x509_SecTrustSettingsCopyTrustSettings x509_SecTrustSettingsCopyTrustSettings //go:cgo_import_dynamic x509_SecTrustSettingsCopyTrustSettings SecTrustSettingsCopyTrustSettings "/System/Library/Frameworks/Security.framework/Versions/A/Security" func SecTrustSettingsCopyTrustSettings(cert CFRef, domain SecTrustSettingsDomain) (trustSettings CFRef, err error) { @@ -110,7 +107,6 @@ func SecTrustSettingsCopyTrustSettings(cert CFRef, domain SecTrustSettingsDomain } func x509_SecTrustSettingsCopyTrustSettings_trampoline() -//go:linkname x509_SecPolicyCopyProperties x509_SecPolicyCopyProperties //go:cgo_import_dynamic x509_SecPolicyCopyProperties SecPolicyCopyProperties "/System/Library/Frameworks/Security.framework/Versions/A/Security" func SecPolicyCopyProperties(policy CFRef) CFRef { diff --git a/src/go/types/stdlib_test.go b/src/go/types/stdlib_test.go index 669e7bec20f80..5bd43e688a041 100644 --- a/src/go/types/stdlib_test.go +++ b/src/go/types/stdlib_test.go @@ -156,6 +156,7 @@ func TestStdTest(t *testing.T) { testTestDir(t, filepath.Join(runtime.GOROOT(), "test"), "cmplxdivide.go", // also needs file cmplxdivide1.go - ignore "directive.go", // tests compiler rejection of bad directive placement - ignore + "linkname2.go", // go/types doesn't check validity of //go:xxx directives ) } diff --git a/src/runtime/os2_aix.go b/src/runtime/os2_aix.go index 428ff7f225c37..abd1010be93d7 100644 --- a/src/runtime/os2_aix.go +++ b/src/runtime/os2_aix.go @@ -18,11 +18,11 @@ import ( //go:cgo_import_dynamic libc___n_pthreads __n_pthreads "libpthread.a/shr_xpg5_64.o" //go:cgo_import_dynamic libc___mod_init __mod_init "libc.a/shr_64.o" -//go:linkname libc___n_pthreads libc___n_pthread +//go:linkname libc___n_pthreads libc___n_pthreads //go:linkname libc___mod_init libc___mod_init var ( - libc___n_pthread, + libc___n_pthreads, libc___mod_init libFunc ) diff --git a/src/runtime/syscall2_solaris.go b/src/runtime/syscall2_solaris.go index e098e8006a681..3310489202122 100644 --- a/src/runtime/syscall2_solaris.go +++ b/src/runtime/syscall2_solaris.go @@ -15,7 +15,6 @@ import _ "unsafe" // for go:linkname //go:cgo_import_dynamic libc_gethostname gethostname "libc.so" //go:cgo_import_dynamic libc_getpid getpid "libc.so" //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" -//go:cgo_import_dynamic libc_pipe pipe "libc.so" //go:cgo_import_dynamic libc_setgid setgid "libc.so" //go:cgo_import_dynamic libc_setgroups setgroups "libc.so" //go:cgo_import_dynamic libc_setsid setsid "libc.so" @@ -33,7 +32,6 @@ import _ "unsafe" // for go:linkname //go:linkname libc_gethostname libc_gethostname //go:linkname libc_getpid libc_getpid //go:linkname libc_ioctl libc_ioctl -//go:linkname libc_pipe libc_pipe //go:linkname libc_setgid libc_setgid //go:linkname libc_setgroups libc_setgroups //go:linkname libc_setsid libc_setsid diff --git a/src/syscall/mksyscall.pl b/src/syscall/mksyscall.pl index 25b40d7ba23cf..790df3825bbdf 100755 --- a/src/syscall/mksyscall.pl +++ b/src/syscall/mksyscall.pl @@ -343,9 +343,6 @@ ($) $trampolines{$funcname} = 1; # The assembly trampoline that jumps to the libc routine. $text .= "func ${funcname}_trampoline()\n"; - # Map syscall.funcname to just plain funcname. - # (The jump to this function is in the assembly trampoline, generated by mksyscallasm_darwin.go.) - $text .= "//go:linkname $funcname $funcname\n"; # Tell the linker that funcname can be found in libSystem using varname without the libc_ prefix. my $basename = substr $funcname, 5; $text .= "//go:cgo_import_dynamic $funcname $basename \"/usr/lib/libSystem.B.dylib\"\n\n"; diff --git a/src/syscall/syscall_darwin.go b/src/syscall/syscall_darwin.go index afdadbf89468d..162e94479fa31 100644 --- a/src/syscall/syscall_darwin.go +++ b/src/syscall/syscall_darwin.go @@ -115,7 +115,6 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { func libc_getfsstat_trampoline() -//go:linkname libc_getfsstat libc_getfsstat //go:cgo_import_dynamic libc_getfsstat getfsstat "/usr/lib/libSystem.B.dylib" func setattrlistTimes(path string, times []Timespec) error { @@ -148,7 +147,6 @@ func setattrlistTimes(path string, times []Timespec) error { func libc_setattrlist_trampoline() -//go:linkname libc_setattrlist libc_setattrlist //go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" func utimensat(dirfd int, path string, times *[2]Timespec, flag int) error { @@ -276,7 +274,6 @@ func fdopendir(fd int) (dir uintptr, err error) { func libc_fdopendir_trampoline() -//go:linkname libc_fdopendir libc_fdopendir //go:cgo_import_dynamic libc_fdopendir fdopendir "/usr/lib/libSystem.B.dylib" func readlen(fd int, buf *byte, nbuf int) (n int, err error) { diff --git a/src/syscall/syscall_darwin_amd64.go b/src/syscall/syscall_darwin_amd64.go index 23a4e5f996263..96fadf7837658 100644 --- a/src/syscall/syscall_darwin_amd64.go +++ b/src/syscall/syscall_darwin_amd64.go @@ -56,7 +56,6 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func libc_sendfile_trampoline() -//go:linkname libc_sendfile libc_sendfile //go:cgo_import_dynamic libc_sendfile sendfile "/usr/lib/libSystem.B.dylib" // Implemented in the runtime package (runtime/sys_darwin_64.go) diff --git a/src/syscall/syscall_darwin_arm64.go b/src/syscall/syscall_darwin_arm64.go index c824f6d89d839..d267a4ae6e202 100644 --- a/src/syscall/syscall_darwin_arm64.go +++ b/src/syscall/syscall_darwin_arm64.go @@ -56,7 +56,6 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func libc_sendfile_trampoline() -//go:linkname libc_sendfile libc_sendfile //go:cgo_import_dynamic libc_sendfile sendfile "/usr/lib/libSystem.B.dylib" // Implemented in the runtime package (runtime/sys_darwin_64.go) diff --git a/src/syscall/zsyscall_darwin_amd64.go b/src/syscall/zsyscall_darwin_amd64.go index 093739ebc77e7..ee726fb24d599 100644 --- a/src/syscall/zsyscall_darwin_amd64.go +++ b/src/syscall/zsyscall_darwin_amd64.go @@ -20,7 +20,6 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { func libc_getgroups_trampoline() -//go:linkname libc_getgroups libc_getgroups //go:cgo_import_dynamic libc_getgroups getgroups "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -35,7 +34,6 @@ func setgroups(ngid int, gid *_Gid_t) (err error) { func libc_setgroups_trampoline() -//go:linkname libc_setgroups libc_setgroups //go:cgo_import_dynamic libc_setgroups setgroups "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -51,7 +49,6 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err func libc_wait4_trampoline() -//go:linkname libc_wait4 libc_wait4 //go:cgo_import_dynamic libc_wait4 wait4 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -67,7 +64,6 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { func libc_accept_trampoline() -//go:linkname libc_accept libc_accept //go:cgo_import_dynamic libc_accept accept "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -82,7 +78,6 @@ func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { func libc_bind_trampoline() -//go:linkname libc_bind libc_bind //go:cgo_import_dynamic libc_bind bind "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -97,7 +92,6 @@ func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { func libc_connect_trampoline() -//go:linkname libc_connect libc_connect //go:cgo_import_dynamic libc_connect connect "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -113,7 +107,6 @@ func socket(domain int, typ int, proto int) (fd int, err error) { func libc_socket_trampoline() -//go:linkname libc_socket libc_socket //go:cgo_import_dynamic libc_socket socket "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -128,7 +121,6 @@ func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen func libc_getsockopt_trampoline() -//go:linkname libc_getsockopt libc_getsockopt //go:cgo_import_dynamic libc_getsockopt getsockopt "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -143,7 +135,6 @@ func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) func libc_setsockopt_trampoline() -//go:linkname libc_setsockopt libc_setsockopt //go:cgo_import_dynamic libc_setsockopt setsockopt "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -158,7 +149,6 @@ func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { func libc_getpeername_trampoline() -//go:linkname libc_getpeername libc_getpeername //go:cgo_import_dynamic libc_getpeername getpeername "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -173,7 +163,6 @@ func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { func libc_getsockname_trampoline() -//go:linkname libc_getsockname libc_getsockname //go:cgo_import_dynamic libc_getsockname getsockname "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -188,7 +177,6 @@ func Shutdown(s int, how int) (err error) { func libc_shutdown_trampoline() -//go:linkname libc_shutdown libc_shutdown //go:cgo_import_dynamic libc_shutdown shutdown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -203,7 +191,6 @@ func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { func libc_socketpair_trampoline() -//go:linkname libc_socketpair libc_socketpair //go:cgo_import_dynamic libc_socketpair socketpair "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -225,7 +212,6 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl func libc_recvfrom_trampoline() -//go:linkname libc_recvfrom libc_recvfrom //go:cgo_import_dynamic libc_recvfrom recvfrom "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -246,7 +232,6 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( func libc_sendto_trampoline() -//go:linkname libc_sendto libc_sendto //go:cgo_import_dynamic libc_sendto sendto "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -262,7 +247,6 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { func libc_recvmsg_trampoline() -//go:linkname libc_recvmsg libc_recvmsg //go:cgo_import_dynamic libc_recvmsg recvmsg "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -278,7 +262,6 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { func libc_sendmsg_trampoline() -//go:linkname libc_sendmsg libc_sendmsg //go:cgo_import_dynamic libc_sendmsg sendmsg "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -294,7 +277,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne func libc_kevent_trampoline() -//go:linkname libc_kevent libc_kevent //go:cgo_import_dynamic libc_kevent kevent "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -314,7 +296,6 @@ func utimes(path string, timeval *[2]Timeval) (err error) { func libc_utimes_trampoline() -//go:linkname libc_utimes libc_utimes //go:cgo_import_dynamic libc_utimes utimes "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -329,7 +310,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { func libc_futimes_trampoline() -//go:linkname libc_futimes libc_futimes //go:cgo_import_dynamic libc_futimes futimes "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -345,7 +325,6 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { func libc_fcntl_trampoline() -//go:linkname libc_fcntl libc_fcntl //go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -360,7 +339,6 @@ func pipe(p *[2]int32) (err error) { func libc_pipe_trampoline() -//go:linkname libc_pipe libc_pipe //go:cgo_import_dynamic libc_pipe pipe "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -375,7 +353,6 @@ func kill(pid int, signum int, posix int) (err error) { func libc_kill_trampoline() -//go:linkname libc_kill libc_kill //go:cgo_import_dynamic libc_kill kill "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -395,7 +372,6 @@ func Access(path string, mode uint32) (err error) { func libc_access_trampoline() -//go:linkname libc_access libc_access //go:cgo_import_dynamic libc_access access "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -410,7 +386,6 @@ func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { func libc_adjtime_trampoline() -//go:linkname libc_adjtime libc_adjtime //go:cgo_import_dynamic libc_adjtime adjtime "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -430,7 +405,6 @@ func Chdir(path string) (err error) { func libc_chdir_trampoline() -//go:linkname libc_chdir libc_chdir //go:cgo_import_dynamic libc_chdir chdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -450,7 +424,6 @@ func Chflags(path string, flags int) (err error) { func libc_chflags_trampoline() -//go:linkname libc_chflags libc_chflags //go:cgo_import_dynamic libc_chflags chflags "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -470,7 +443,6 @@ func Chmod(path string, mode uint32) (err error) { func libc_chmod_trampoline() -//go:linkname libc_chmod libc_chmod //go:cgo_import_dynamic libc_chmod chmod "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -490,7 +462,6 @@ func Chown(path string, uid int, gid int) (err error) { func libc_chown_trampoline() -//go:linkname libc_chown libc_chown //go:cgo_import_dynamic libc_chown chown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -510,7 +481,6 @@ func Chroot(path string) (err error) { func libc_chroot_trampoline() -//go:linkname libc_chroot libc_chroot //go:cgo_import_dynamic libc_chroot chroot "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -525,7 +495,6 @@ func Close(fd int) (err error) { func libc_close_trampoline() -//go:linkname libc_close libc_close //go:cgo_import_dynamic libc_close close "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -540,7 +509,6 @@ func closedir(dir uintptr) (err error) { func libc_closedir_trampoline() -//go:linkname libc_closedir libc_closedir //go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -556,7 +524,6 @@ func Dup(fd int) (nfd int, err error) { func libc_dup_trampoline() -//go:linkname libc_dup libc_dup //go:cgo_import_dynamic libc_dup dup "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -571,7 +538,6 @@ func Dup2(from int, to int) (err error) { func libc_dup2_trampoline() -//go:linkname libc_dup2 libc_dup2 //go:cgo_import_dynamic libc_dup2 dup2 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -596,7 +562,6 @@ func Exchangedata(path1 string, path2 string, options int) (err error) { func libc_exchangedata_trampoline() -//go:linkname libc_exchangedata libc_exchangedata //go:cgo_import_dynamic libc_exchangedata exchangedata "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -611,7 +576,6 @@ func Fchdir(fd int) (err error) { func libc_fchdir_trampoline() -//go:linkname libc_fchdir libc_fchdir //go:cgo_import_dynamic libc_fchdir fchdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -626,7 +590,6 @@ func Fchflags(fd int, flags int) (err error) { func libc_fchflags_trampoline() -//go:linkname libc_fchflags libc_fchflags //go:cgo_import_dynamic libc_fchflags fchflags "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -641,7 +604,6 @@ func Fchmod(fd int, mode uint32) (err error) { func libc_fchmod_trampoline() -//go:linkname libc_fchmod libc_fchmod //go:cgo_import_dynamic libc_fchmod fchmod "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -656,7 +618,6 @@ func Fchown(fd int, uid int, gid int) (err error) { func libc_fchown_trampoline() -//go:linkname libc_fchown libc_fchown //go:cgo_import_dynamic libc_fchown fchown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -671,7 +632,6 @@ func Flock(fd int, how int) (err error) { func libc_flock_trampoline() -//go:linkname libc_flock libc_flock //go:cgo_import_dynamic libc_flock flock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -687,7 +647,6 @@ func Fpathconf(fd int, name int) (val int, err error) { func libc_fpathconf_trampoline() -//go:linkname libc_fpathconf libc_fpathconf //go:cgo_import_dynamic libc_fpathconf fpathconf "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -702,7 +661,6 @@ func Fsync(fd int) (err error) { func libc_fsync_trampoline() -//go:linkname libc_fsync libc_fsync //go:cgo_import_dynamic libc_fsync fsync "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -717,7 +675,6 @@ func Ftruncate(fd int, length int64) (err error) { func libc_ftruncate_trampoline() -//go:linkname libc_ftruncate libc_ftruncate //go:cgo_import_dynamic libc_ftruncate ftruncate "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -730,7 +687,6 @@ func Getdtablesize() (size int) { func libc_getdtablesize_trampoline() -//go:linkname libc_getdtablesize libc_getdtablesize //go:cgo_import_dynamic libc_getdtablesize getdtablesize "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -743,7 +699,6 @@ func Getegid() (egid int) { func libc_getegid_trampoline() -//go:linkname libc_getegid libc_getegid //go:cgo_import_dynamic libc_getegid getegid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -756,7 +711,6 @@ func Geteuid() (uid int) { func libc_geteuid_trampoline() -//go:linkname libc_geteuid libc_geteuid //go:cgo_import_dynamic libc_geteuid geteuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -769,7 +723,6 @@ func Getgid() (gid int) { func libc_getgid_trampoline() -//go:linkname libc_getgid libc_getgid //go:cgo_import_dynamic libc_getgid getgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -785,7 +738,6 @@ func Getpgid(pid int) (pgid int, err error) { func libc_getpgid_trampoline() -//go:linkname libc_getpgid libc_getpgid //go:cgo_import_dynamic libc_getpgid getpgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -798,7 +750,6 @@ func Getpgrp() (pgrp int) { func libc_getpgrp_trampoline() -//go:linkname libc_getpgrp libc_getpgrp //go:cgo_import_dynamic libc_getpgrp getpgrp "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -811,7 +762,6 @@ func Getpid() (pid int) { func libc_getpid_trampoline() -//go:linkname libc_getpid libc_getpid //go:cgo_import_dynamic libc_getpid getpid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -824,7 +774,6 @@ func Getppid() (ppid int) { func libc_getppid_trampoline() -//go:linkname libc_getppid libc_getppid //go:cgo_import_dynamic libc_getppid getppid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -840,7 +789,6 @@ func Getpriority(which int, who int) (prio int, err error) { func libc_getpriority_trampoline() -//go:linkname libc_getpriority libc_getpriority //go:cgo_import_dynamic libc_getpriority getpriority "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -855,7 +803,6 @@ func Getrlimit(which int, lim *Rlimit) (err error) { func libc_getrlimit_trampoline() -//go:linkname libc_getrlimit libc_getrlimit //go:cgo_import_dynamic libc_getrlimit getrlimit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -870,7 +817,6 @@ func Getrusage(who int, rusage *Rusage) (err error) { func libc_getrusage_trampoline() -//go:linkname libc_getrusage libc_getrusage //go:cgo_import_dynamic libc_getrusage getrusage "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -886,7 +832,6 @@ func Getsid(pid int) (sid int, err error) { func libc_getsid_trampoline() -//go:linkname libc_getsid libc_getsid //go:cgo_import_dynamic libc_getsid getsid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -899,7 +844,6 @@ func Getuid() (uid int) { func libc_getuid_trampoline() -//go:linkname libc_getuid libc_getuid //go:cgo_import_dynamic libc_getuid getuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -912,7 +856,6 @@ func Issetugid() (tainted bool) { func libc_issetugid_trampoline() -//go:linkname libc_issetugid libc_issetugid //go:cgo_import_dynamic libc_issetugid issetugid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -928,7 +871,6 @@ func Kqueue() (fd int, err error) { func libc_kqueue_trampoline() -//go:linkname libc_kqueue libc_kqueue //go:cgo_import_dynamic libc_kqueue kqueue "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -948,7 +890,6 @@ func Lchown(path string, uid int, gid int) (err error) { func libc_lchown_trampoline() -//go:linkname libc_lchown libc_lchown //go:cgo_import_dynamic libc_lchown lchown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -973,7 +914,6 @@ func Link(path string, link string) (err error) { func libc_link_trampoline() -//go:linkname libc_link libc_link //go:cgo_import_dynamic libc_link link "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -988,7 +928,6 @@ func Listen(s int, backlog int) (err error) { func libc_listen_trampoline() -//go:linkname libc_listen libc_listen //go:cgo_import_dynamic libc_listen listen "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1008,7 +947,6 @@ func Mkdir(path string, mode uint32) (err error) { func libc_mkdir_trampoline() -//go:linkname libc_mkdir libc_mkdir //go:cgo_import_dynamic libc_mkdir mkdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1028,7 +966,6 @@ func Mkfifo(path string, mode uint32) (err error) { func libc_mkfifo_trampoline() -//go:linkname libc_mkfifo libc_mkfifo //go:cgo_import_dynamic libc_mkfifo mkfifo "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1048,7 +985,6 @@ func Mknod(path string, mode uint32, dev int) (err error) { func libc_mknod_trampoline() -//go:linkname libc_mknod libc_mknod //go:cgo_import_dynamic libc_mknod mknod "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1069,7 +1005,6 @@ func Mlock(b []byte) (err error) { func libc_mlock_trampoline() -//go:linkname libc_mlock libc_mlock //go:cgo_import_dynamic libc_mlock mlock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1084,7 +1019,6 @@ func Mlockall(flags int) (err error) { func libc_mlockall_trampoline() -//go:linkname libc_mlockall libc_mlockall //go:cgo_import_dynamic libc_mlockall mlockall "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1105,7 +1039,6 @@ func Mprotect(b []byte, prot int) (err error) { func libc_mprotect_trampoline() -//go:linkname libc_mprotect libc_mprotect //go:cgo_import_dynamic libc_mprotect mprotect "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1126,7 +1059,6 @@ func Munlock(b []byte) (err error) { func libc_munlock_trampoline() -//go:linkname libc_munlock libc_munlock //go:cgo_import_dynamic libc_munlock munlock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1141,7 +1073,6 @@ func Munlockall() (err error) { func libc_munlockall_trampoline() -//go:linkname libc_munlockall libc_munlockall //go:cgo_import_dynamic libc_munlockall munlockall "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1162,7 +1093,6 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { func libc_open_trampoline() -//go:linkname libc_open libc_open //go:cgo_import_dynamic libc_open open "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1183,7 +1113,6 @@ func Pathconf(path string, name int) (val int, err error) { func libc_pathconf_trampoline() -//go:linkname libc_pathconf libc_pathconf //go:cgo_import_dynamic libc_pathconf pathconf "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1205,7 +1134,6 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { func libc_pread_trampoline() -//go:linkname libc_pread libc_pread //go:cgo_import_dynamic libc_pread pread "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1227,7 +1155,6 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { func libc_pwrite_trampoline() -//go:linkname libc_pwrite libc_pwrite //go:cgo_import_dynamic libc_pwrite pwrite "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1249,7 +1176,6 @@ func read(fd int, p []byte) (n int, err error) { func libc_read_trampoline() -//go:linkname libc_read libc_read //go:cgo_import_dynamic libc_read read "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1262,7 +1188,6 @@ func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { func libc_readdir_r_trampoline() -//go:linkname libc_readdir_r libc_readdir_r //go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1289,7 +1214,6 @@ func Readlink(path string, buf []byte) (n int, err error) { func libc_readlink_trampoline() -//go:linkname libc_readlink libc_readlink //go:cgo_import_dynamic libc_readlink readlink "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1314,7 +1238,6 @@ func Rename(from string, to string) (err error) { func libc_rename_trampoline() -//go:linkname libc_rename libc_rename //go:cgo_import_dynamic libc_rename rename "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1334,7 +1257,6 @@ func Revoke(path string) (err error) { func libc_revoke_trampoline() -//go:linkname libc_revoke libc_revoke //go:cgo_import_dynamic libc_revoke revoke "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1354,7 +1276,6 @@ func Rmdir(path string) (err error) { func libc_rmdir_trampoline() -//go:linkname libc_rmdir libc_rmdir //go:cgo_import_dynamic libc_rmdir rmdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1370,7 +1291,6 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { func libc_lseek_trampoline() -//go:linkname libc_lseek libc_lseek //go:cgo_import_dynamic libc_lseek lseek "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1385,7 +1305,6 @@ func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { func libc_select_trampoline() -//go:linkname libc_select libc_select //go:cgo_import_dynamic libc_select select "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1400,7 +1319,6 @@ func Setegid(egid int) (err error) { func libc_setegid_trampoline() -//go:linkname libc_setegid libc_setegid //go:cgo_import_dynamic libc_setegid setegid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1415,7 +1333,6 @@ func Seteuid(euid int) (err error) { func libc_seteuid_trampoline() -//go:linkname libc_seteuid libc_seteuid //go:cgo_import_dynamic libc_seteuid seteuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1430,7 +1347,6 @@ func Setgid(gid int) (err error) { func libc_setgid_trampoline() -//go:linkname libc_setgid libc_setgid //go:cgo_import_dynamic libc_setgid setgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1450,7 +1366,6 @@ func Setlogin(name string) (err error) { func libc_setlogin_trampoline() -//go:linkname libc_setlogin libc_setlogin //go:cgo_import_dynamic libc_setlogin setlogin "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1465,7 +1380,6 @@ func Setpgid(pid int, pgid int) (err error) { func libc_setpgid_trampoline() -//go:linkname libc_setpgid libc_setpgid //go:cgo_import_dynamic libc_setpgid setpgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1480,7 +1394,6 @@ func Setpriority(which int, who int, prio int) (err error) { func libc_setpriority_trampoline() -//go:linkname libc_setpriority libc_setpriority //go:cgo_import_dynamic libc_setpriority setpriority "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1495,7 +1408,6 @@ func Setprivexec(flag int) (err error) { func libc_setprivexec_trampoline() -//go:linkname libc_setprivexec libc_setprivexec //go:cgo_import_dynamic libc_setprivexec setprivexec "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1510,7 +1422,6 @@ func Setregid(rgid int, egid int) (err error) { func libc_setregid_trampoline() -//go:linkname libc_setregid libc_setregid //go:cgo_import_dynamic libc_setregid setregid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1525,7 +1436,6 @@ func Setreuid(ruid int, euid int) (err error) { func libc_setreuid_trampoline() -//go:linkname libc_setreuid libc_setreuid //go:cgo_import_dynamic libc_setreuid setreuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1540,7 +1450,6 @@ func Setrlimit(which int, lim *Rlimit) (err error) { func libc_setrlimit_trampoline() -//go:linkname libc_setrlimit libc_setrlimit //go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1556,7 +1465,6 @@ func Setsid() (pid int, err error) { func libc_setsid_trampoline() -//go:linkname libc_setsid libc_setsid //go:cgo_import_dynamic libc_setsid setsid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1571,7 +1479,6 @@ func Settimeofday(tp *Timeval) (err error) { func libc_settimeofday_trampoline() -//go:linkname libc_settimeofday libc_settimeofday //go:cgo_import_dynamic libc_settimeofday settimeofday "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1586,7 +1493,6 @@ func Setuid(uid int) (err error) { func libc_setuid_trampoline() -//go:linkname libc_setuid libc_setuid //go:cgo_import_dynamic libc_setuid setuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1611,7 +1517,6 @@ func Symlink(path string, link string) (err error) { func libc_symlink_trampoline() -//go:linkname libc_symlink libc_symlink //go:cgo_import_dynamic libc_symlink symlink "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1626,7 +1531,6 @@ func Sync() (err error) { func libc_sync_trampoline() -//go:linkname libc_sync libc_sync //go:cgo_import_dynamic libc_sync sync "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1646,7 +1550,6 @@ func Truncate(path string, length int64) (err error) { func libc_truncate_trampoline() -//go:linkname libc_truncate libc_truncate //go:cgo_import_dynamic libc_truncate truncate "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1659,7 +1562,6 @@ func Umask(newmask int) (oldmask int) { func libc_umask_trampoline() -//go:linkname libc_umask libc_umask //go:cgo_import_dynamic libc_umask umask "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1679,7 +1581,6 @@ func Undelete(path string) (err error) { func libc_undelete_trampoline() -//go:linkname libc_undelete libc_undelete //go:cgo_import_dynamic libc_undelete undelete "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1699,7 +1600,6 @@ func Unlink(path string) (err error) { func libc_unlink_trampoline() -//go:linkname libc_unlink libc_unlink //go:cgo_import_dynamic libc_unlink unlink "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1719,7 +1619,6 @@ func Unmount(path string, flags int) (err error) { func libc_unmount_trampoline() -//go:linkname libc_unmount libc_unmount //go:cgo_import_dynamic libc_unmount unmount "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1741,7 +1640,6 @@ func write(fd int, p []byte) (n int, err error) { func libc_write_trampoline() -//go:linkname libc_write libc_write //go:cgo_import_dynamic libc_write write "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1763,7 +1661,6 @@ func writev(fd int, iovecs []Iovec) (cnt uintptr, err error) { func libc_writev_trampoline() -//go:linkname libc_writev libc_writev //go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1779,7 +1676,6 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( func libc_mmap_trampoline() -//go:linkname libc_mmap libc_mmap //go:cgo_import_dynamic libc_mmap mmap "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1794,7 +1690,6 @@ func munmap(addr uintptr, length uintptr) (err error) { func libc_munmap_trampoline() -//go:linkname libc_munmap libc_munmap //go:cgo_import_dynamic libc_munmap munmap "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1810,7 +1705,6 @@ func fork() (pid int, err error) { func libc_fork_trampoline() -//go:linkname libc_fork libc_fork //go:cgo_import_dynamic libc_fork fork "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1825,7 +1719,6 @@ func ioctl(fd int, req int, arg int) (err error) { func libc_ioctl_trampoline() -//go:linkname libc_ioctl libc_ioctl //go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1850,7 +1743,6 @@ func execve(path *byte, argv **byte, envp **byte) (err error) { func libc_execve_trampoline() -//go:linkname libc_execve libc_execve //go:cgo_import_dynamic libc_execve execve "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1865,7 +1757,6 @@ func exit(res int) (err error) { func libc_exit_trampoline() -//go:linkname libc_exit libc_exit //go:cgo_import_dynamic libc_exit exit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1886,7 +1777,6 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) func libc_sysctl_trampoline() -//go:linkname libc_sysctl libc_sysctl //go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1917,7 +1807,6 @@ func unlinkat(fd int, path string, flags int) (err error) { func libc_unlinkat_trampoline() -//go:linkname libc_unlinkat libc_unlinkat //go:cgo_import_dynamic libc_unlinkat unlinkat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1938,7 +1827,6 @@ func openat(fd int, path string, flags int, perm uint32) (fdret int, err error) func libc_openat_trampoline() -//go:linkname libc_openat libc_openat //go:cgo_import_dynamic libc_openat openat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1960,7 +1848,6 @@ func getcwd(buf []byte) (n int, err error) { func libc_getcwd_trampoline() -//go:linkname libc_getcwd libc_getcwd //go:cgo_import_dynamic libc_getcwd getcwd "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1975,7 +1862,6 @@ func Fstat(fd int, stat *Stat_t) (err error) { func libc_fstat64_trampoline() -//go:linkname libc_fstat64 libc_fstat64 //go:cgo_import_dynamic libc_fstat64 fstat64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1990,7 +1876,6 @@ func Fstatfs(fd int, stat *Statfs_t) (err error) { func libc_fstatfs64_trampoline() -//go:linkname libc_fstatfs64 libc_fstatfs64 //go:cgo_import_dynamic libc_fstatfs64 fstatfs64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2005,7 +1890,6 @@ func Gettimeofday(tp *Timeval) (err error) { func libc_gettimeofday_trampoline() -//go:linkname libc_gettimeofday libc_gettimeofday //go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2025,7 +1909,6 @@ func Lstat(path string, stat *Stat_t) (err error) { func libc_lstat64_trampoline() -//go:linkname libc_lstat64 libc_lstat64 //go:cgo_import_dynamic libc_lstat64 lstat64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2045,7 +1928,6 @@ func Stat(path string, stat *Stat_t) (err error) { func libc_stat64_trampoline() -//go:linkname libc_stat64 libc_stat64 //go:cgo_import_dynamic libc_stat64 stat64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2065,7 +1947,6 @@ func Statfs(path string, stat *Statfs_t) (err error) { func libc_statfs64_trampoline() -//go:linkname libc_statfs64 libc_statfs64 //go:cgo_import_dynamic libc_statfs64 statfs64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2085,7 +1966,6 @@ func fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { func libc_fstatat64_trampoline() -//go:linkname libc_fstatat64 libc_fstatat64 //go:cgo_import_dynamic libc_fstatat64 fstatat64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2101,5 +1981,4 @@ func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { func libc_ptrace_trampoline() -//go:linkname libc_ptrace libc_ptrace //go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" diff --git a/src/syscall/zsyscall_darwin_arm64.go b/src/syscall/zsyscall_darwin_arm64.go index 7698b2503e85e..ac530f31084f6 100644 --- a/src/syscall/zsyscall_darwin_arm64.go +++ b/src/syscall/zsyscall_darwin_arm64.go @@ -20,7 +20,6 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { func libc_getgroups_trampoline() -//go:linkname libc_getgroups libc_getgroups //go:cgo_import_dynamic libc_getgroups getgroups "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -35,7 +34,6 @@ func setgroups(ngid int, gid *_Gid_t) (err error) { func libc_setgroups_trampoline() -//go:linkname libc_setgroups libc_setgroups //go:cgo_import_dynamic libc_setgroups setgroups "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -51,7 +49,6 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err func libc_wait4_trampoline() -//go:linkname libc_wait4 libc_wait4 //go:cgo_import_dynamic libc_wait4 wait4 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -67,7 +64,6 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { func libc_accept_trampoline() -//go:linkname libc_accept libc_accept //go:cgo_import_dynamic libc_accept accept "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -82,7 +78,6 @@ func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { func libc_bind_trampoline() -//go:linkname libc_bind libc_bind //go:cgo_import_dynamic libc_bind bind "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -97,7 +92,6 @@ func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { func libc_connect_trampoline() -//go:linkname libc_connect libc_connect //go:cgo_import_dynamic libc_connect connect "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -113,7 +107,6 @@ func socket(domain int, typ int, proto int) (fd int, err error) { func libc_socket_trampoline() -//go:linkname libc_socket libc_socket //go:cgo_import_dynamic libc_socket socket "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -128,7 +121,6 @@ func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen func libc_getsockopt_trampoline() -//go:linkname libc_getsockopt libc_getsockopt //go:cgo_import_dynamic libc_getsockopt getsockopt "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -143,7 +135,6 @@ func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) func libc_setsockopt_trampoline() -//go:linkname libc_setsockopt libc_setsockopt //go:cgo_import_dynamic libc_setsockopt setsockopt "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -158,7 +149,6 @@ func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { func libc_getpeername_trampoline() -//go:linkname libc_getpeername libc_getpeername //go:cgo_import_dynamic libc_getpeername getpeername "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -173,7 +163,6 @@ func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { func libc_getsockname_trampoline() -//go:linkname libc_getsockname libc_getsockname //go:cgo_import_dynamic libc_getsockname getsockname "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -188,7 +177,6 @@ func Shutdown(s int, how int) (err error) { func libc_shutdown_trampoline() -//go:linkname libc_shutdown libc_shutdown //go:cgo_import_dynamic libc_shutdown shutdown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -203,7 +191,6 @@ func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { func libc_socketpair_trampoline() -//go:linkname libc_socketpair libc_socketpair //go:cgo_import_dynamic libc_socketpair socketpair "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -225,7 +212,6 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl func libc_recvfrom_trampoline() -//go:linkname libc_recvfrom libc_recvfrom //go:cgo_import_dynamic libc_recvfrom recvfrom "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -246,7 +232,6 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( func libc_sendto_trampoline() -//go:linkname libc_sendto libc_sendto //go:cgo_import_dynamic libc_sendto sendto "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -262,7 +247,6 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { func libc_recvmsg_trampoline() -//go:linkname libc_recvmsg libc_recvmsg //go:cgo_import_dynamic libc_recvmsg recvmsg "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -278,7 +262,6 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { func libc_sendmsg_trampoline() -//go:linkname libc_sendmsg libc_sendmsg //go:cgo_import_dynamic libc_sendmsg sendmsg "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -294,7 +277,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne func libc_kevent_trampoline() -//go:linkname libc_kevent libc_kevent //go:cgo_import_dynamic libc_kevent kevent "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -314,7 +296,6 @@ func utimes(path string, timeval *[2]Timeval) (err error) { func libc_utimes_trampoline() -//go:linkname libc_utimes libc_utimes //go:cgo_import_dynamic libc_utimes utimes "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -329,7 +310,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { func libc_futimes_trampoline() -//go:linkname libc_futimes libc_futimes //go:cgo_import_dynamic libc_futimes futimes "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -345,7 +325,6 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { func libc_fcntl_trampoline() -//go:linkname libc_fcntl libc_fcntl //go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -360,7 +339,6 @@ func pipe(p *[2]int32) (err error) { func libc_pipe_trampoline() -//go:linkname libc_pipe libc_pipe //go:cgo_import_dynamic libc_pipe pipe "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -375,7 +353,6 @@ func kill(pid int, signum int, posix int) (err error) { func libc_kill_trampoline() -//go:linkname libc_kill libc_kill //go:cgo_import_dynamic libc_kill kill "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -395,7 +372,6 @@ func Access(path string, mode uint32) (err error) { func libc_access_trampoline() -//go:linkname libc_access libc_access //go:cgo_import_dynamic libc_access access "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -410,7 +386,6 @@ func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { func libc_adjtime_trampoline() -//go:linkname libc_adjtime libc_adjtime //go:cgo_import_dynamic libc_adjtime adjtime "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -430,7 +405,6 @@ func Chdir(path string) (err error) { func libc_chdir_trampoline() -//go:linkname libc_chdir libc_chdir //go:cgo_import_dynamic libc_chdir chdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -450,7 +424,6 @@ func Chflags(path string, flags int) (err error) { func libc_chflags_trampoline() -//go:linkname libc_chflags libc_chflags //go:cgo_import_dynamic libc_chflags chflags "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -470,7 +443,6 @@ func Chmod(path string, mode uint32) (err error) { func libc_chmod_trampoline() -//go:linkname libc_chmod libc_chmod //go:cgo_import_dynamic libc_chmod chmod "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -490,7 +462,6 @@ func Chown(path string, uid int, gid int) (err error) { func libc_chown_trampoline() -//go:linkname libc_chown libc_chown //go:cgo_import_dynamic libc_chown chown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -510,7 +481,6 @@ func Chroot(path string) (err error) { func libc_chroot_trampoline() -//go:linkname libc_chroot libc_chroot //go:cgo_import_dynamic libc_chroot chroot "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -525,7 +495,6 @@ func Close(fd int) (err error) { func libc_close_trampoline() -//go:linkname libc_close libc_close //go:cgo_import_dynamic libc_close close "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -540,7 +509,6 @@ func closedir(dir uintptr) (err error) { func libc_closedir_trampoline() -//go:linkname libc_closedir libc_closedir //go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -556,7 +524,6 @@ func Dup(fd int) (nfd int, err error) { func libc_dup_trampoline() -//go:linkname libc_dup libc_dup //go:cgo_import_dynamic libc_dup dup "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -571,7 +538,6 @@ func Dup2(from int, to int) (err error) { func libc_dup2_trampoline() -//go:linkname libc_dup2 libc_dup2 //go:cgo_import_dynamic libc_dup2 dup2 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -596,7 +562,6 @@ func Exchangedata(path1 string, path2 string, options int) (err error) { func libc_exchangedata_trampoline() -//go:linkname libc_exchangedata libc_exchangedata //go:cgo_import_dynamic libc_exchangedata exchangedata "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -611,7 +576,6 @@ func Fchdir(fd int) (err error) { func libc_fchdir_trampoline() -//go:linkname libc_fchdir libc_fchdir //go:cgo_import_dynamic libc_fchdir fchdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -626,7 +590,6 @@ func Fchflags(fd int, flags int) (err error) { func libc_fchflags_trampoline() -//go:linkname libc_fchflags libc_fchflags //go:cgo_import_dynamic libc_fchflags fchflags "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -641,7 +604,6 @@ func Fchmod(fd int, mode uint32) (err error) { func libc_fchmod_trampoline() -//go:linkname libc_fchmod libc_fchmod //go:cgo_import_dynamic libc_fchmod fchmod "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -656,7 +618,6 @@ func Fchown(fd int, uid int, gid int) (err error) { func libc_fchown_trampoline() -//go:linkname libc_fchown libc_fchown //go:cgo_import_dynamic libc_fchown fchown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -671,7 +632,6 @@ func Flock(fd int, how int) (err error) { func libc_flock_trampoline() -//go:linkname libc_flock libc_flock //go:cgo_import_dynamic libc_flock flock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -687,7 +647,6 @@ func Fpathconf(fd int, name int) (val int, err error) { func libc_fpathconf_trampoline() -//go:linkname libc_fpathconf libc_fpathconf //go:cgo_import_dynamic libc_fpathconf fpathconf "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -702,7 +661,6 @@ func Fsync(fd int) (err error) { func libc_fsync_trampoline() -//go:linkname libc_fsync libc_fsync //go:cgo_import_dynamic libc_fsync fsync "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -717,7 +675,6 @@ func Ftruncate(fd int, length int64) (err error) { func libc_ftruncate_trampoline() -//go:linkname libc_ftruncate libc_ftruncate //go:cgo_import_dynamic libc_ftruncate ftruncate "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -730,7 +687,6 @@ func Getdtablesize() (size int) { func libc_getdtablesize_trampoline() -//go:linkname libc_getdtablesize libc_getdtablesize //go:cgo_import_dynamic libc_getdtablesize getdtablesize "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -743,7 +699,6 @@ func Getegid() (egid int) { func libc_getegid_trampoline() -//go:linkname libc_getegid libc_getegid //go:cgo_import_dynamic libc_getegid getegid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -756,7 +711,6 @@ func Geteuid() (uid int) { func libc_geteuid_trampoline() -//go:linkname libc_geteuid libc_geteuid //go:cgo_import_dynamic libc_geteuid geteuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -769,7 +723,6 @@ func Getgid() (gid int) { func libc_getgid_trampoline() -//go:linkname libc_getgid libc_getgid //go:cgo_import_dynamic libc_getgid getgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -785,7 +738,6 @@ func Getpgid(pid int) (pgid int, err error) { func libc_getpgid_trampoline() -//go:linkname libc_getpgid libc_getpgid //go:cgo_import_dynamic libc_getpgid getpgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -798,7 +750,6 @@ func Getpgrp() (pgrp int) { func libc_getpgrp_trampoline() -//go:linkname libc_getpgrp libc_getpgrp //go:cgo_import_dynamic libc_getpgrp getpgrp "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -811,7 +762,6 @@ func Getpid() (pid int) { func libc_getpid_trampoline() -//go:linkname libc_getpid libc_getpid //go:cgo_import_dynamic libc_getpid getpid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -824,7 +774,6 @@ func Getppid() (ppid int) { func libc_getppid_trampoline() -//go:linkname libc_getppid libc_getppid //go:cgo_import_dynamic libc_getppid getppid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -840,7 +789,6 @@ func Getpriority(which int, who int) (prio int, err error) { func libc_getpriority_trampoline() -//go:linkname libc_getpriority libc_getpriority //go:cgo_import_dynamic libc_getpriority getpriority "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -855,7 +803,6 @@ func Getrlimit(which int, lim *Rlimit) (err error) { func libc_getrlimit_trampoline() -//go:linkname libc_getrlimit libc_getrlimit //go:cgo_import_dynamic libc_getrlimit getrlimit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -870,7 +817,6 @@ func Getrusage(who int, rusage *Rusage) (err error) { func libc_getrusage_trampoline() -//go:linkname libc_getrusage libc_getrusage //go:cgo_import_dynamic libc_getrusage getrusage "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -886,7 +832,6 @@ func Getsid(pid int) (sid int, err error) { func libc_getsid_trampoline() -//go:linkname libc_getsid libc_getsid //go:cgo_import_dynamic libc_getsid getsid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -899,7 +844,6 @@ func Getuid() (uid int) { func libc_getuid_trampoline() -//go:linkname libc_getuid libc_getuid //go:cgo_import_dynamic libc_getuid getuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -912,7 +856,6 @@ func Issetugid() (tainted bool) { func libc_issetugid_trampoline() -//go:linkname libc_issetugid libc_issetugid //go:cgo_import_dynamic libc_issetugid issetugid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -928,7 +871,6 @@ func Kqueue() (fd int, err error) { func libc_kqueue_trampoline() -//go:linkname libc_kqueue libc_kqueue //go:cgo_import_dynamic libc_kqueue kqueue "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -948,7 +890,6 @@ func Lchown(path string, uid int, gid int) (err error) { func libc_lchown_trampoline() -//go:linkname libc_lchown libc_lchown //go:cgo_import_dynamic libc_lchown lchown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -973,7 +914,6 @@ func Link(path string, link string) (err error) { func libc_link_trampoline() -//go:linkname libc_link libc_link //go:cgo_import_dynamic libc_link link "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -988,7 +928,6 @@ func Listen(s int, backlog int) (err error) { func libc_listen_trampoline() -//go:linkname libc_listen libc_listen //go:cgo_import_dynamic libc_listen listen "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1008,7 +947,6 @@ func Mkdir(path string, mode uint32) (err error) { func libc_mkdir_trampoline() -//go:linkname libc_mkdir libc_mkdir //go:cgo_import_dynamic libc_mkdir mkdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1028,7 +966,6 @@ func Mkfifo(path string, mode uint32) (err error) { func libc_mkfifo_trampoline() -//go:linkname libc_mkfifo libc_mkfifo //go:cgo_import_dynamic libc_mkfifo mkfifo "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1048,7 +985,6 @@ func Mknod(path string, mode uint32, dev int) (err error) { func libc_mknod_trampoline() -//go:linkname libc_mknod libc_mknod //go:cgo_import_dynamic libc_mknod mknod "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1069,7 +1005,6 @@ func Mlock(b []byte) (err error) { func libc_mlock_trampoline() -//go:linkname libc_mlock libc_mlock //go:cgo_import_dynamic libc_mlock mlock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1084,7 +1019,6 @@ func Mlockall(flags int) (err error) { func libc_mlockall_trampoline() -//go:linkname libc_mlockall libc_mlockall //go:cgo_import_dynamic libc_mlockall mlockall "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1105,7 +1039,6 @@ func Mprotect(b []byte, prot int) (err error) { func libc_mprotect_trampoline() -//go:linkname libc_mprotect libc_mprotect //go:cgo_import_dynamic libc_mprotect mprotect "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1126,7 +1059,6 @@ func Munlock(b []byte) (err error) { func libc_munlock_trampoline() -//go:linkname libc_munlock libc_munlock //go:cgo_import_dynamic libc_munlock munlock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1141,7 +1073,6 @@ func Munlockall() (err error) { func libc_munlockall_trampoline() -//go:linkname libc_munlockall libc_munlockall //go:cgo_import_dynamic libc_munlockall munlockall "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1162,7 +1093,6 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { func libc_open_trampoline() -//go:linkname libc_open libc_open //go:cgo_import_dynamic libc_open open "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1183,7 +1113,6 @@ func Pathconf(path string, name int) (val int, err error) { func libc_pathconf_trampoline() -//go:linkname libc_pathconf libc_pathconf //go:cgo_import_dynamic libc_pathconf pathconf "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1205,7 +1134,6 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { func libc_pread_trampoline() -//go:linkname libc_pread libc_pread //go:cgo_import_dynamic libc_pread pread "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1227,7 +1155,6 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { func libc_pwrite_trampoline() -//go:linkname libc_pwrite libc_pwrite //go:cgo_import_dynamic libc_pwrite pwrite "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1249,7 +1176,6 @@ func read(fd int, p []byte) (n int, err error) { func libc_read_trampoline() -//go:linkname libc_read libc_read //go:cgo_import_dynamic libc_read read "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1262,7 +1188,6 @@ func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { func libc_readdir_r_trampoline() -//go:linkname libc_readdir_r libc_readdir_r //go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1289,7 +1214,6 @@ func Readlink(path string, buf []byte) (n int, err error) { func libc_readlink_trampoline() -//go:linkname libc_readlink libc_readlink //go:cgo_import_dynamic libc_readlink readlink "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1314,7 +1238,6 @@ func Rename(from string, to string) (err error) { func libc_rename_trampoline() -//go:linkname libc_rename libc_rename //go:cgo_import_dynamic libc_rename rename "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1334,7 +1257,6 @@ func Revoke(path string) (err error) { func libc_revoke_trampoline() -//go:linkname libc_revoke libc_revoke //go:cgo_import_dynamic libc_revoke revoke "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1354,7 +1276,6 @@ func Rmdir(path string) (err error) { func libc_rmdir_trampoline() -//go:linkname libc_rmdir libc_rmdir //go:cgo_import_dynamic libc_rmdir rmdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1370,7 +1291,6 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { func libc_lseek_trampoline() -//go:linkname libc_lseek libc_lseek //go:cgo_import_dynamic libc_lseek lseek "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1385,7 +1305,6 @@ func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { func libc_select_trampoline() -//go:linkname libc_select libc_select //go:cgo_import_dynamic libc_select select "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1400,7 +1319,6 @@ func Setegid(egid int) (err error) { func libc_setegid_trampoline() -//go:linkname libc_setegid libc_setegid //go:cgo_import_dynamic libc_setegid setegid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1415,7 +1333,6 @@ func Seteuid(euid int) (err error) { func libc_seteuid_trampoline() -//go:linkname libc_seteuid libc_seteuid //go:cgo_import_dynamic libc_seteuid seteuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1430,7 +1347,6 @@ func Setgid(gid int) (err error) { func libc_setgid_trampoline() -//go:linkname libc_setgid libc_setgid //go:cgo_import_dynamic libc_setgid setgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1450,7 +1366,6 @@ func Setlogin(name string) (err error) { func libc_setlogin_trampoline() -//go:linkname libc_setlogin libc_setlogin //go:cgo_import_dynamic libc_setlogin setlogin "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1465,7 +1380,6 @@ func Setpgid(pid int, pgid int) (err error) { func libc_setpgid_trampoline() -//go:linkname libc_setpgid libc_setpgid //go:cgo_import_dynamic libc_setpgid setpgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1480,7 +1394,6 @@ func Setpriority(which int, who int, prio int) (err error) { func libc_setpriority_trampoline() -//go:linkname libc_setpriority libc_setpriority //go:cgo_import_dynamic libc_setpriority setpriority "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1495,7 +1408,6 @@ func Setprivexec(flag int) (err error) { func libc_setprivexec_trampoline() -//go:linkname libc_setprivexec libc_setprivexec //go:cgo_import_dynamic libc_setprivexec setprivexec "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1510,7 +1422,6 @@ func Setregid(rgid int, egid int) (err error) { func libc_setregid_trampoline() -//go:linkname libc_setregid libc_setregid //go:cgo_import_dynamic libc_setregid setregid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1525,7 +1436,6 @@ func Setreuid(ruid int, euid int) (err error) { func libc_setreuid_trampoline() -//go:linkname libc_setreuid libc_setreuid //go:cgo_import_dynamic libc_setreuid setreuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1540,7 +1450,6 @@ func Setrlimit(which int, lim *Rlimit) (err error) { func libc_setrlimit_trampoline() -//go:linkname libc_setrlimit libc_setrlimit //go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1556,7 +1465,6 @@ func Setsid() (pid int, err error) { func libc_setsid_trampoline() -//go:linkname libc_setsid libc_setsid //go:cgo_import_dynamic libc_setsid setsid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1571,7 +1479,6 @@ func Settimeofday(tp *Timeval) (err error) { func libc_settimeofday_trampoline() -//go:linkname libc_settimeofday libc_settimeofday //go:cgo_import_dynamic libc_settimeofday settimeofday "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1586,7 +1493,6 @@ func Setuid(uid int) (err error) { func libc_setuid_trampoline() -//go:linkname libc_setuid libc_setuid //go:cgo_import_dynamic libc_setuid setuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1611,7 +1517,6 @@ func Symlink(path string, link string) (err error) { func libc_symlink_trampoline() -//go:linkname libc_symlink libc_symlink //go:cgo_import_dynamic libc_symlink symlink "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1626,7 +1531,6 @@ func Sync() (err error) { func libc_sync_trampoline() -//go:linkname libc_sync libc_sync //go:cgo_import_dynamic libc_sync sync "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1646,7 +1550,6 @@ func Truncate(path string, length int64) (err error) { func libc_truncate_trampoline() -//go:linkname libc_truncate libc_truncate //go:cgo_import_dynamic libc_truncate truncate "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1659,7 +1562,6 @@ func Umask(newmask int) (oldmask int) { func libc_umask_trampoline() -//go:linkname libc_umask libc_umask //go:cgo_import_dynamic libc_umask umask "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1679,7 +1581,6 @@ func Undelete(path string) (err error) { func libc_undelete_trampoline() -//go:linkname libc_undelete libc_undelete //go:cgo_import_dynamic libc_undelete undelete "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1699,7 +1600,6 @@ func Unlink(path string) (err error) { func libc_unlink_trampoline() -//go:linkname libc_unlink libc_unlink //go:cgo_import_dynamic libc_unlink unlink "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1719,7 +1619,6 @@ func Unmount(path string, flags int) (err error) { func libc_unmount_trampoline() -//go:linkname libc_unmount libc_unmount //go:cgo_import_dynamic libc_unmount unmount "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1741,7 +1640,6 @@ func write(fd int, p []byte) (n int, err error) { func libc_write_trampoline() -//go:linkname libc_write libc_write //go:cgo_import_dynamic libc_write write "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1763,7 +1661,6 @@ func writev(fd int, iovecs []Iovec) (cnt uintptr, err error) { func libc_writev_trampoline() -//go:linkname libc_writev libc_writev //go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1779,7 +1676,6 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( func libc_mmap_trampoline() -//go:linkname libc_mmap libc_mmap //go:cgo_import_dynamic libc_mmap mmap "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1794,7 +1690,6 @@ func munmap(addr uintptr, length uintptr) (err error) { func libc_munmap_trampoline() -//go:linkname libc_munmap libc_munmap //go:cgo_import_dynamic libc_munmap munmap "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1810,7 +1705,6 @@ func fork() (pid int, err error) { func libc_fork_trampoline() -//go:linkname libc_fork libc_fork //go:cgo_import_dynamic libc_fork fork "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1825,7 +1719,6 @@ func ioctl(fd int, req int, arg int) (err error) { func libc_ioctl_trampoline() -//go:linkname libc_ioctl libc_ioctl //go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1850,7 +1743,6 @@ func execve(path *byte, argv **byte, envp **byte) (err error) { func libc_execve_trampoline() -//go:linkname libc_execve libc_execve //go:cgo_import_dynamic libc_execve execve "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1865,7 +1757,6 @@ func exit(res int) (err error) { func libc_exit_trampoline() -//go:linkname libc_exit libc_exit //go:cgo_import_dynamic libc_exit exit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1886,7 +1777,6 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) func libc_sysctl_trampoline() -//go:linkname libc_sysctl libc_sysctl //go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1917,7 +1807,6 @@ func unlinkat(fd int, path string, flags int) (err error) { func libc_unlinkat_trampoline() -//go:linkname libc_unlinkat libc_unlinkat //go:cgo_import_dynamic libc_unlinkat unlinkat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1938,7 +1827,6 @@ func openat(fd int, path string, flags int, perm uint32) (fdret int, err error) func libc_openat_trampoline() -//go:linkname libc_openat libc_openat //go:cgo_import_dynamic libc_openat openat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1960,7 +1848,6 @@ func getcwd(buf []byte) (n int, err error) { func libc_getcwd_trampoline() -//go:linkname libc_getcwd libc_getcwd //go:cgo_import_dynamic libc_getcwd getcwd "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1975,7 +1862,6 @@ func Fstat(fd int, stat *Stat_t) (err error) { func libc_fstat_trampoline() -//go:linkname libc_fstat libc_fstat //go:cgo_import_dynamic libc_fstat fstat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1990,7 +1876,6 @@ func Fstatfs(fd int, stat *Statfs_t) (err error) { func libc_fstatfs_trampoline() -//go:linkname libc_fstatfs libc_fstatfs //go:cgo_import_dynamic libc_fstatfs fstatfs "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2005,7 +1890,6 @@ func Gettimeofday(tp *Timeval) (err error) { func libc_gettimeofday_trampoline() -//go:linkname libc_gettimeofday libc_gettimeofday //go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2025,7 +1909,6 @@ func Lstat(path string, stat *Stat_t) (err error) { func libc_lstat_trampoline() -//go:linkname libc_lstat libc_lstat //go:cgo_import_dynamic libc_lstat lstat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2045,7 +1928,6 @@ func Stat(path string, stat *Stat_t) (err error) { func libc_stat_trampoline() -//go:linkname libc_stat libc_stat //go:cgo_import_dynamic libc_stat stat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2065,7 +1947,6 @@ func Statfs(path string, stat *Statfs_t) (err error) { func libc_statfs_trampoline() -//go:linkname libc_statfs libc_statfs //go:cgo_import_dynamic libc_statfs statfs "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2085,7 +1966,6 @@ func fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { func libc_fstatat_trampoline() -//go:linkname libc_fstatat libc_fstatat //go:cgo_import_dynamic libc_fstatat fstatat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2101,5 +1981,4 @@ func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { func libc_ptrace_trampoline() -//go:linkname libc_ptrace libc_ptrace //go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" diff --git a/test/linkname2.go b/test/linkname2.go new file mode 100644 index 0000000000000..cb7f9be3452a0 --- /dev/null +++ b/test/linkname2.go @@ -0,0 +1,27 @@ +// errorcheck + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Tests that errors are reported for misuse of linkname. +package p + +import _ "unsafe" + +type t int + +var x, y int + +//go:linkname x ok + +// ERROR "//go:linkname requires linkname argument or -p compiler flag" +// ERROR "//go:linkname must refer to declared function or variable" +// ERROR "//go:linkname must refer to declared function or variable" +// ERROR "duplicate //go:linkname for x" + +//line linkname2.go:18 +//go:linkname y +//go:linkname nonexist nonexist +//go:linkname t notvarfunc +//go:linkname x duplicate From f2311462ab6f2359006f42b7febd19ce95a9bbcf Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 1 Dec 2020 01:01:59 -0800 Subject: [PATCH 081/474] [dev.regabi] cmd/compile: cleanup type-checking of defined types The code for type-checking defined types was scattered between typecheckdef, typecheckdeftype, and setUnderlying. There was redundant work between them, and setUnderlying also needed to redo a lot of work because of its brute-force solution of just copying all Type fields. This CL reorders things so as many of the defined type's fields are set in advance (in typecheckdeftype), and then setUnderlying only copies over the details actually needed from the underlying type. Incidentally, this evidently improves our error handling for an existing test case, by allowing us to report an additional error. Passes toolstash/buildall. Change-Id: Id59a24341e7e960edd1f7366c3e2356da91b9fe7 Reviewed-on: https://go-review.googlesource.com/c/go/+/274432 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Russ Cox --- src/cmd/compile/internal/gc/typecheck.go | 78 +++++++++++------------- test/fixedbugs/issue28079b.go | 2 +- 2 files changed, 38 insertions(+), 42 deletions(-) diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 874594d764dce..d9ec06c531e97 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -3434,33 +3434,28 @@ func setUnderlying(t, underlying *types.Type) { return } - n := ir.AsNode(t.Nod) ft := t.ForwardType() - cache := t.Cache // TODO(mdempsky): Fix Type rekinding. - *t = *underlying + t.Etype = underlying.Etype + t.Extra = underlying.Extra + t.Width = underlying.Width + t.Align = underlying.Align + t.Orig = underlying.Orig - // Restore unnecessarily clobbered attributes. - t.Nod = n - t.Sym = n.Sym() - if n.Name() != nil { - t.Vargen = n.Name().Vargen + if underlying.NotInHeap() { + t.SetNotInHeap(true) + } + if underlying.Broke() { + t.SetBroke(true) } - t.Cache = cache - t.SetDeferwidth(false) // spec: "The declared type does not inherit any methods bound // to the existing type, but the method set of an interface // type [...] remains unchanged." - if !t.IsInterface() { - *t.Methods() = types.Fields{} - *t.AllMethods() = types.Fields{} - } - - // Propagate go:notinheap pragma from the Name to the Type. - if n.Name() != nil && n.Name().Pragma()&ir.NotInHeap != 0 { - t.SetNotInHeap(true) + if t.IsInterface() { + *t.Methods() = *underlying.Methods() + *t.AllMethods() = *underlying.AllMethods() } // Update types waiting on this type. @@ -3476,24 +3471,38 @@ func setUnderlying(t, underlying *types.Type) { } } -func typecheckdeftype(n ir.Node) { +func typecheckdeftype(n *ir.Name) { if enableTrace && base.Flag.LowerT { defer tracePrint("typecheckdeftype", n)(nil) } + t := types.New(types.TFORW) + t.Sym = n.Sym() + t.Vargen = n.Vargen + t.Nod = n + if n.Pragma()&ir.NotInHeap != 0 { + t.SetNotInHeap(true) + } + + n.SetType(t) n.SetTypecheck(1) - n.Name().Ntype = typecheckNtype(n.Name().Ntype) - t := n.Name().Ntype.Type() - if t == nil { + n.SetWalkdef(1) + + defercheckwidth() + errorsBefore := base.Errors() + n.Ntype = typecheckNtype(n.Ntype) + if underlying := n.Ntype.Type(); underlying != nil { + setUnderlying(t, underlying) + } else { n.SetDiag(true) n.SetType(nil) - } else if n.Type() == nil { - n.SetDiag(true) - } else { - // copy new type and clear fields - // that don't come along. - setUnderlying(n.Type(), t) } + if t.Etype == types.TFORW && base.Errors() > errorsBefore { + // Something went wrong during type-checking, + // but it was reported. Silence future errors. + t.SetBroke(true) + } + resumecheckwidth() } func typecheckdef(n ir.Node) { @@ -3655,20 +3664,7 @@ func typecheckdef(n ir.Node) { } // regular type declaration - defercheckwidth() - n.SetWalkdef(1) - t := types.New(types.TFORW) - t.Nod = n - t.Sym = n.Sym() - n.SetType(t) - errorsBefore := base.Errors() typecheckdeftype(n) - if n.Type().Etype == types.TFORW && base.Errors() > errorsBefore { - // Something went wrong during type-checking, - // but it was reported. Silence future errors. - n.Type().SetBroke(true) - } - resumecheckwidth() } ret: diff --git a/test/fixedbugs/issue28079b.go b/test/fixedbugs/issue28079b.go index 47cc16dfb2f6c..9ff221baffd45 100644 --- a/test/fixedbugs/issue28079b.go +++ b/test/fixedbugs/issue28079b.go @@ -13,5 +13,5 @@ import "unsafe" type T [uintptr(unsafe.Pointer(nil))]int // ERROR "non-constant array bound" func f() { - _ = complex(1< Date: Tue, 1 Dec 2020 01:31:29 -0800 Subject: [PATCH 082/474] [dev.regabi] cmd/compile: move setUnderlying to package types Now that setUnderlying is decoupled from Nodes, it can be moved into package types, where it really belongs. [git-generate] cd src/cmd/compile/internal/gc rf ' mv setUnderlying SetUnderlying mv SetUnderlying typex.go mv typex.go cmd/compile/internal/types ' cd ../types rf ' mv typex.go type.go ' Change-Id: I76e2d4d8a6df599f24a731c4d8e5774ec83a119c Reviewed-on: https://go-review.googlesource.com/c/go/+/274433 Trust: Matthew Dempsky Reviewed-by: Russ Cox --- src/cmd/compile/internal/gc/iimport.go | 2 +- src/cmd/compile/internal/gc/typecheck.go | 46 +----------------------- src/cmd/compile/internal/types/type.go | 45 +++++++++++++++++++++++ 3 files changed, 47 insertions(+), 46 deletions(-) diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 57c5e621829c8..0696d05c11050 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -316,7 +316,7 @@ func (r *importReader) doDecl(n ir.Node) { // after the underlying type has been assigned. defercheckwidth() underlying := r.typ() - setUnderlying(t, underlying) + types.SetUnderlying(t, underlying) resumecheckwidth() if underlying.IsInterface() { diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index d9ec06c531e97..6858b51699dfa 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -3427,50 +3427,6 @@ func checkMapKeys() { mapqueue = nil } -func setUnderlying(t, underlying *types.Type) { - if underlying.Etype == types.TFORW { - // This type isn't computed yet; when it is, update n. - underlying.ForwardType().Copyto = append(underlying.ForwardType().Copyto, t) - return - } - - ft := t.ForwardType() - - // TODO(mdempsky): Fix Type rekinding. - t.Etype = underlying.Etype - t.Extra = underlying.Extra - t.Width = underlying.Width - t.Align = underlying.Align - t.Orig = underlying.Orig - - if underlying.NotInHeap() { - t.SetNotInHeap(true) - } - if underlying.Broke() { - t.SetBroke(true) - } - - // spec: "The declared type does not inherit any methods bound - // to the existing type, but the method set of an interface - // type [...] remains unchanged." - if t.IsInterface() { - *t.Methods() = *underlying.Methods() - *t.AllMethods() = *underlying.AllMethods() - } - - // Update types waiting on this type. - for _, w := range ft.Copyto { - setUnderlying(w, t) - } - - // Double-check use of type as embedded type. - if ft.Embedlineno.IsKnown() { - if t.IsPtr() || t.IsUnsafePtr() { - base.ErrorfAt(ft.Embedlineno, "embedded type cannot be a pointer") - } - } -} - func typecheckdeftype(n *ir.Name) { if enableTrace && base.Flag.LowerT { defer tracePrint("typecheckdeftype", n)(nil) @@ -3492,7 +3448,7 @@ func typecheckdeftype(n *ir.Name) { errorsBefore := base.Errors() n.Ntype = typecheckNtype(n.Ntype) if underlying := n.Ntype.Type(); underlying != nil { - setUnderlying(t, underlying) + types.SetUnderlying(t, underlying) } else { n.SetDiag(true) n.SetType(nil) diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index 8499a36edc8f5..2a65b713be868 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -5,6 +5,7 @@ package types import ( + "cmd/compile/internal/base" "cmd/internal/obj" "cmd/internal/src" "fmt" @@ -1517,3 +1518,47 @@ var ( TypeVoid = newSSA("void") TypeInt128 = newSSA("int128") ) + +func SetUnderlying(t, underlying *Type) { + if underlying.Etype == TFORW { + // This type isn't computed yet; when it is, update n. + underlying.ForwardType().Copyto = append(underlying.ForwardType().Copyto, t) + return + } + + ft := t.ForwardType() + + // TODO(mdempsky): Fix Type rekinding. + t.Etype = underlying.Etype + t.Extra = underlying.Extra + t.Width = underlying.Width + t.Align = underlying.Align + t.Orig = underlying.Orig + + if underlying.NotInHeap() { + t.SetNotInHeap(true) + } + if underlying.Broke() { + t.SetBroke(true) + } + + // spec: "The declared type does not inherit any methods bound + // to the existing type, but the method set of an interface + // type [...] remains unchanged." + if t.IsInterface() { + *t.Methods() = *underlying.Methods() + *t.AllMethods() = *underlying.AllMethods() + } + + // Update types waiting on this type. + for _, w := range ft.Copyto { + SetUnderlying(w, t) + } + + // Double-check use of type as embedded type. + if ft.Embedlineno.IsKnown() { + if t.IsPtr() || t.IsUnsafePtr() { + base.ErrorfAt(ft.Embedlineno, "embedded type cannot be a pointer") + } + } +} From f37aa5e4e26a7212b6300e2021b8e6ea7000979b Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 1 Dec 2020 01:42:47 -0800 Subject: [PATCH 083/474] [dev.regabi] cmd/compile: add NewNamed The start of abstracting away Type fields. This adds a new constructor for named types, styled after go/types.NewNamed. Along with helper methods for SetNod and Pos, this allows hiding Nod. Change-Id: Ica107034b6346c7b523bf6ae2a34009e350a9aa8 Reviewed-on: https://go-review.googlesource.com/c/go/+/274434 Trust: Matthew Dempsky Reviewed-by: Russ Cox --- src/cmd/compile/fmtmap_test.go | 1 + src/cmd/compile/internal/gc/align.go | 6 +-- src/cmd/compile/internal/gc/export.go | 4 +- src/cmd/compile/internal/gc/iexport.go | 2 +- src/cmd/compile/internal/gc/iimport.go | 2 +- src/cmd/compile/internal/gc/subr.go | 8 ++-- src/cmd/compile/internal/gc/typecheck.go | 14 +------ src/cmd/compile/internal/gc/universe.go | 13 +++--- src/cmd/compile/internal/ir/expr.go | 8 +--- src/cmd/compile/internal/ir/type.go | 29 ++++---------- src/cmd/compile/internal/types/type.go | 51 +++++++++++++++++++++--- 11 files changed, 75 insertions(+), 63 deletions(-) diff --git a/src/cmd/compile/fmtmap_test.go b/src/cmd/compile/fmtmap_test.go index 09b06c4d939d0..ca31705f72cf3 100644 --- a/src/cmd/compile/fmtmap_test.go +++ b/src/cmd/compile/fmtmap_test.go @@ -130,6 +130,7 @@ var knownFormats = map[string]string{ "cmd/compile/internal/types.EType %d": "", "cmd/compile/internal/types.EType %s": "", "cmd/compile/internal/types.EType %v": "", + "cmd/compile/internal/types.IRNode %v": "", "cmd/internal/obj.ABI %v": "", "error %v": "", "float64 %.2f": "", diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go index ffae8dc27b6f3..5171983af0d83 100644 --- a/src/cmd/compile/internal/gc/align.go +++ b/src/cmd/compile/internal/gc/align.go @@ -205,7 +205,7 @@ func findTypeLoop(t *types.Type, path *[]*types.Type) bool { } *path = append(*path, t) - if findTypeLoop(ir.AsNode(t.Nod).Name().Ntype.Type(), path) { + if findTypeLoop(t.Obj().(*ir.Name).Ntype.Type(), path) { return true } *path = (*path)[:len(*path)-1] @@ -314,8 +314,8 @@ func dowidth(t *types.Type) { defercheckwidth() lno := base.Pos - if ir.AsNode(t.Nod) != nil { - base.Pos = ir.AsNode(t.Nod).Pos() + if pos := t.Pos(); pos.IsKnown() { + base.Pos = pos } t.Width = -2 diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index 5cd379a7d38e3..f803a17c60a25 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -101,9 +101,7 @@ func importsym(ipkg *types.Pkg, s *types.Sym, op ir.Op) ir.Node { func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *types.Type { n := importsym(ipkg, s, ir.OTYPE) if n.Op() != ir.OTYPE { - t := types.New(types.TFORW) - t.Sym = s - t.Nod = n + t := types.NewNamed(n) n.SetOp(ir.OTYPE) n.SetPos(pos) diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index d6c50c7285772..2dfce2659640c 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -640,7 +640,7 @@ func (w *exportWriter) doTyp(t *types.Type) { } w.startType(definedType) - w.qualifiedIdent(ir.TypeNode(t)) + w.qualifiedIdent(t.Obj().(*ir.Name)) return } diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 0696d05c11050..15f1b646f794c 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -316,7 +316,7 @@ func (r *importReader) doDecl(n ir.Node) { // after the underlying type has been assigned. defercheckwidth() underlying := r.typ() - types.SetUnderlying(t, underlying) + t.SetUnderlying(underlying) resumecheckwidth() if underlying.IsInterface() { diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 0163653d3bd9a..04c8c537bd955 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -1490,9 +1490,9 @@ func ifaceData(pos src.XPos, n ir.Node, t *types.Type) ir.Node { // typePos returns the position associated with t. // This is where t was declared or where it appeared as a type expression. func typePos(t *types.Type) src.XPos { - n := ir.AsNode(t.Nod) - if n == nil || !n.Pos().IsKnown() { - base.Fatalf("bad type: %v", t) + if pos := t.Pos(); pos.IsKnown() { + return pos } - return n.Pos() + base.Fatalf("bad type: %v", t) + panic("unreachable") } diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 6858b51699dfa..dccb5ecdce8fa 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -3432,10 +3432,8 @@ func typecheckdeftype(n *ir.Name) { defer tracePrint("typecheckdeftype", n)(nil) } - t := types.New(types.TFORW) - t.Sym = n.Sym() + t := types.NewNamed(n) t.Vargen = n.Vargen - t.Nod = n if n.Pragma()&ir.NotInHeap != 0 { t.SetNotInHeap(true) } @@ -3448,7 +3446,7 @@ func typecheckdeftype(n *ir.Name) { errorsBefore := base.Errors() n.Ntype = typecheckNtype(n.Ntype) if underlying := n.Ntype.Type(); underlying != nil { - types.SetUnderlying(t, underlying) + t.SetUnderlying(underlying) } else { n.SetDiag(true) n.SetType(nil) @@ -3895,14 +3893,6 @@ func deadcodeexpr(n ir.Node) ir.Node { return n } -func toTypeNode(orig ir.Node, t *types.Type) ir.Node { - n := ir.Nod(ir.OTYPE, nil, nil) - n.SetPos(orig.Pos()) - n.SetType(t) - t.Nod = n - return n -} - // getIotaValue returns the current value for "iota", // or -1 if not within a ConstSpec. func getIotaValue() int64 { diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index c3c2c0492aa7d..31b49e05a502f 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -337,11 +337,12 @@ func makeErrorInterface() *types.Type { func lexinit1() { // error type - s := ir.BuiltinPkg.Lookup("error") - types.Errortype = makeErrorInterface() - types.Errortype.Sym = s - types.Errortype.Orig = makeErrorInterface() - s.Def = ir.TypeNode(types.Errortype) + n := ir.NewNameAt(src.NoXPos, ir.BuiltinPkg.Lookup("error")) + types.Errortype = types.NewNamed(n) + types.Errortype.SetUnderlying(makeErrorInterface()) + n.SetOp(ir.OTYPE) + n.SetType(types.Errortype) + n.Sym().Def = n dowidth(types.Errortype) // We create separate byte and rune types for better error messages @@ -353,7 +354,7 @@ func lexinit1() { // type aliases, albeit at the cost of having to deal with it everywhere). // byte alias - s = ir.BuiltinPkg.Lookup("byte") + s := ir.BuiltinPkg.Lookup("byte") types.Bytetype = types.New(types.TUINT8) types.Bytetype.Sym = s s.Def = ir.TypeNode(types.Bytetype) diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 87593520a1d29..2a7211cfdac36 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -545,9 +545,7 @@ func (*ParenExpr) CanBeNtype() {} func (n *ParenExpr) SetOTYPE(t *types.Type) { n.op = OTYPE n.typ = t - if t.Nod == nil { - t.Nod = n - } + t.SetNod(n) } // A ResultExpr represents a direct access to a result slot on the stack frame. @@ -762,9 +760,7 @@ func (n *StarExpr) SetOTYPE(t *types.Type) { n.op = OTYPE n.X = nil n.typ = t - if t.Nod == nil { - t.Nod = n - } + t.SetNod(n) } func (n *StarExpr) DeepCopy(pos src.XPos) Node { diff --git a/src/cmd/compile/internal/ir/type.go b/src/cmd/compile/internal/ir/type.go index af8db15e8473f..446145b24c99a 100644 --- a/src/cmd/compile/internal/ir/type.go +++ b/src/cmd/compile/internal/ir/type.go @@ -5,6 +5,7 @@ package ir import ( + "cmd/compile/internal/base" "cmd/compile/internal/types" "cmd/internal/src" "fmt" @@ -51,12 +52,7 @@ func (n *miniType) setOTYPE(t *types.Type, self Node) { } n.op = OTYPE n.typ = t - - // t.Nod can be non-nil already - // in the case of shared *type.Types, like []byte or interface{}. - if t.Nod == nil { - t.Nod = self - } + t.SetNod(self) } func (n *miniType) Sym() *types.Sym { return nil } // for Format OTYPE @@ -362,20 +358,11 @@ func (n *typeNode) CanBeNtype() {} // TypeNode returns the Node representing the type t. func TypeNode(t *types.Type) Ntype { - return TypeNodeAt(src.NoXPos, t) -} - -// TypeNodeAt returns the Node representing the type t. -// If the node must be created, TypeNodeAt uses the position pos. -// TODO(rsc): Does anyone actually use position on these type nodes? -func TypeNodeAt(pos src.XPos, t *types.Type) Ntype { - // If we copied another type with *t = *u, - // then t.Nod might be out of date, so check t.Nod.Type() too. - n := AsNode(t.Nod) - if n == nil || n.Type() != t { - n := newTypeNode(pos, t) // t.Sym may be nil - t.Nod = n - return n + if n := t.Obj(); n != nil { + if n.Type() != t { + base.Fatalf("type skew: %v has type %v, but expected %v", n, n.Type(), t) + } + return n.(Ntype) } - return n.(Ntype) + return newTypeNode(src.NoXPos, t) } diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index 2a65b713be868..d6d56426a5b25 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -14,7 +14,11 @@ import ( // IRNode represents an ir.Node, but without needing to import cmd/compile/internal/ir, // which would cause an import cycle. The uses in other packages must type assert // values of type IRNode to ir.Node or a more specific type. -type IRNode interface{ Type() *Type } +type IRNode interface { + Pos() src.XPos + Sym() *Sym + Type() *Type +} //go:generate stringer -type EType -trimprefix T @@ -142,7 +146,7 @@ type Type struct { methods Fields allMethods Fields - Nod IRNode // canonical OTYPE node + nod IRNode // canonical OTYPE node Orig *Type // original type (type literal or predefined type) // Cache of composite types, with this type being the element type. @@ -180,6 +184,24 @@ func (t *Type) SetNoalg(b bool) { t.flags.set(typeNoalg, b) } func (t *Type) SetDeferwidth(b bool) { t.flags.set(typeDeferwidth, b) } func (t *Type) SetRecur(b bool) { t.flags.set(typeRecur, b) } +// SetNod associates t with syntax node n. +func (t *Type) SetNod(n IRNode) { + // t.nod can be non-nil already + // in the case of shared *Types, like []byte or interface{}. + if t.nod == nil { + t.nod = n + } +} + +// Pos returns a position associated with t, if any. +// This should only be used for diagnostics. +func (t *Type) Pos() src.XPos { + if t.nod != nil { + return t.nod.Pos() + } + return src.NoXPos +} + // Pkg returns the package that t appeared in. // // Pkg is only defined for function, struct, and interface types @@ -1519,7 +1541,24 @@ var ( TypeInt128 = newSSA("int128") ) -func SetUnderlying(t, underlying *Type) { +// NewNamed returns a new named type for the given type name. +func NewNamed(obj IRNode) *Type { + t := New(TFORW) + t.Sym = obj.Sym() + t.nod = obj + return t +} + +// Obj returns the type name for the named type t. +func (t *Type) Obj() IRNode { + if t.Sym != nil { + return t.nod + } + return nil +} + +// SetUnderlying sets the underlying type. +func (t *Type) SetUnderlying(underlying *Type) { if underlying.Etype == TFORW { // This type isn't computed yet; when it is, update n. underlying.ForwardType().Copyto = append(underlying.ForwardType().Copyto, t) @@ -1546,13 +1585,13 @@ func SetUnderlying(t, underlying *Type) { // to the existing type, but the method set of an interface // type [...] remains unchanged." if t.IsInterface() { - *t.Methods() = *underlying.Methods() - *t.AllMethods() = *underlying.AllMethods() + t.methods = underlying.methods + t.allMethods = underlying.allMethods } // Update types waiting on this type. for _, w := range ft.Copyto { - SetUnderlying(w, t) + w.SetUnderlying(t) } // Double-check use of type as embedded type. From a17c5e2fce9340ec19d4019490b38a7645f244df Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 1 Dec 2020 02:58:41 -0800 Subject: [PATCH 084/474] [dev.regabi] cmd/compile: add NewBasic and cleanup universe This CL introduces types.NewBasic, for creating the predeclared universal types, and reorganizes how the universe is initialized so that all predeclared types are uniformly constructed. There are now a bunch of Type fields that are no longer assigned outside of the package, so this CL also introduces some new accessor methods that a subsequent CL will mechanically introduce uses of. Change-Id: Ie7996c3d5f1ca46cd5bfe45ecc91ebfa6a7b6c7d Reviewed-on: https://go-review.googlesource.com/c/go/+/274435 Trust: Matthew Dempsky Reviewed-by: Russ Cox --- src/cmd/compile/internal/gc/universe.go | 194 +++++++++--------------- src/cmd/compile/internal/types/type.go | 21 ++- 2 files changed, 90 insertions(+), 125 deletions(-) diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index 31b49e05a502f..b1492659b458b 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -87,31 +87,80 @@ var unsafeFuncs = [...]struct { // initUniverse initializes the universe block. func initUniverse() { - lexinit() - typeinit() - lexinit1() -} + if Widthptr == 0 { + base.Fatalf("typeinit before betypeinit") + } -// lexinit initializes known symbols and the basic types. -func lexinit() { - for _, s := range &basicTypes { - etype := s.etype - if int(etype) >= len(types.Types) { - base.Fatalf("lexinit: %s bad etype", s.name) + slicePtrOffset = 0 + sliceLenOffset = Rnd(slicePtrOffset+int64(Widthptr), int64(Widthptr)) + sliceCapOffset = Rnd(sliceLenOffset+int64(Widthptr), int64(Widthptr)) + sizeofSlice = Rnd(sliceCapOffset+int64(Widthptr), int64(Widthptr)) + + // string is same as slice wo the cap + sizeofString = Rnd(sliceLenOffset+int64(Widthptr), int64(Widthptr)) + + for et := types.EType(0); et < types.NTYPE; et++ { + simtype[et] = et + } + + types.Types[types.TANY] = types.New(types.TANY) + types.Types[types.TINTER] = types.New(types.TINTER) // empty interface + + defBasic := func(kind types.EType, pkg *types.Pkg, name string) *types.Type { + sym := pkg.Lookup(name) + n := ir.NewNameAt(src.NoXPos, sym) + n.SetOp(ir.OTYPE) + t := types.NewBasic(kind, n) + n.SetType(t) + sym.Def = n + if kind != types.TANY { + dowidth(t) } - s2 := ir.BuiltinPkg.Lookup(s.name) - t := types.Types[etype] - if t == nil { - t = types.New(etype) - t.Sym = s2 - if etype != types.TANY && etype != types.TSTRING { - dowidth(t) - } - types.Types[etype] = t + return t + } + + for _, s := range &basicTypes { + types.Types[s.etype] = defBasic(s.etype, ir.BuiltinPkg, s.name) + } + + for _, s := range &typedefs { + sameas := s.sameas32 + if Widthptr == 8 { + sameas = s.sameas64 } - s2.Def = ir.TypeNode(t) + simtype[s.etype] = sameas + + types.Types[s.etype] = defBasic(s.etype, ir.BuiltinPkg, s.name) } + // We create separate byte and rune types for better error messages + // rather than just creating type alias *types.Sym's for the uint8 and + // int32 types. Hence, (bytetype|runtype).Sym.isAlias() is false. + // TODO(gri) Should we get rid of this special case (at the cost + // of less informative error messages involving bytes and runes)? + // (Alternatively, we could introduce an OTALIAS node representing + // type aliases, albeit at the cost of having to deal with it everywhere). + types.Bytetype = defBasic(types.TUINT8, ir.BuiltinPkg, "byte") + types.Runetype = defBasic(types.TINT32, ir.BuiltinPkg, "rune") + + // error type + s := ir.BuiltinPkg.Lookup("error") + n := ir.NewNameAt(src.NoXPos, s) + n.SetOp(ir.OTYPE) + types.Errortype = types.NewNamed(n) + types.Errortype.SetUnderlying(makeErrorInterface()) + n.SetType(types.Errortype) + s.Def = n + dowidth(types.Errortype) + + types.Types[types.TUNSAFEPTR] = defBasic(types.TUNSAFEPTR, unsafepkg, "Pointer") + + // simple aliases + simtype[types.TMAP] = types.TPTR + simtype[types.TCHAN] = types.TPTR + simtype[types.TFUNC] = types.TPTR + simtype[types.TUNSAFEPTR] = types.TPTR + for _, s := range &builtinFuncs { s2 := ir.BuiltinPkg.Lookup(s.name) s2.Def = NewName(s2) @@ -124,19 +173,13 @@ func lexinit() { ir.AsNode(s2.Def).SetSubOp(s.op) } - types.UntypedString = types.New(types.TSTRING) - types.UntypedBool = types.New(types.TBOOL) - types.Types[types.TANY] = types.New(types.TANY) - - s := ir.BuiltinPkg.Lookup("true") + s = ir.BuiltinPkg.Lookup("true") s.Def = nodbool(true) ir.AsNode(s.Def).SetSym(lookup("true")) - ir.AsNode(s.Def).SetType(types.UntypedBool) s = ir.BuiltinPkg.Lookup("false") s.Def = nodbool(false) ir.AsNode(s.Def).SetSym(lookup("false")) - ir.AsNode(s.Def).SetType(types.UntypedBool) s = lookup("_") s.Block = -100 @@ -160,28 +203,6 @@ func lexinit() { s = ir.BuiltinPkg.Lookup("iota") s.Def = ir.Nod(ir.OIOTA, nil, nil) ir.AsNode(s.Def).SetSym(s) -} - -func typeinit() { - if Widthptr == 0 { - base.Fatalf("typeinit before betypeinit") - } - - for et := types.EType(0); et < types.NTYPE; et++ { - simtype[et] = et - } - - types.Types[types.TPTR] = types.New(types.TPTR) - dowidth(types.Types[types.TPTR]) - - t := types.New(types.TUNSAFEPTR) - types.Types[types.TUNSAFEPTR] = t - t.Sym = unsafepkg.Lookup("Pointer") - n := ir.NewNameAt(src.NoXPos, t.Sym) // NewNameAt to get a package for use tracking - n.SetOp(ir.OTYPE) - n.SetType(t) - t.Sym.Def = n - dowidth(types.Types[types.TUNSAFEPTR]) for et := types.TINT8; et <= types.TUINT64; et++ { isInt[et] = true @@ -259,8 +280,7 @@ func typeinit() { okforcmp[types.TSTRING] = true - var i int - for i = 0; i < len(okfor); i++ { + for i := range okfor { okfor[i] = okfornone[:] } @@ -302,25 +322,6 @@ func typeinit() { iscmp[ir.OLE] = true iscmp[ir.OEQ] = true iscmp[ir.ONE] = true - - types.Types[types.TINTER] = types.New(types.TINTER) // empty interface - - // simple aliases - simtype[types.TMAP] = types.TPTR - simtype[types.TCHAN] = types.TPTR - simtype[types.TFUNC] = types.TPTR - simtype[types.TUNSAFEPTR] = types.TPTR - - slicePtrOffset = 0 - sliceLenOffset = Rnd(slicePtrOffset+int64(Widthptr), int64(Widthptr)) - sliceCapOffset = Rnd(sliceLenOffset+int64(Widthptr), int64(Widthptr)) - sizeofSlice = Rnd(sliceCapOffset+int64(Widthptr), int64(Widthptr)) - - // string is same as slice wo the cap - sizeofString = Rnd(sliceLenOffset+int64(Widthptr), int64(Widthptr)) - - dowidth(types.Types[types.TSTRING]) - dowidth(types.UntypedString) } func makeErrorInterface() *types.Type { @@ -335,59 +336,6 @@ func makeErrorInterface() *types.Type { return t } -func lexinit1() { - // error type - n := ir.NewNameAt(src.NoXPos, ir.BuiltinPkg.Lookup("error")) - types.Errortype = types.NewNamed(n) - types.Errortype.SetUnderlying(makeErrorInterface()) - n.SetOp(ir.OTYPE) - n.SetType(types.Errortype) - n.Sym().Def = n - dowidth(types.Errortype) - - // We create separate byte and rune types for better error messages - // rather than just creating type alias *types.Sym's for the uint8 and - // int32 types. Hence, (bytetype|runtype).Sym.isAlias() is false. - // TODO(gri) Should we get rid of this special case (at the cost - // of less informative error messages involving bytes and runes)? - // (Alternatively, we could introduce an OTALIAS node representing - // type aliases, albeit at the cost of having to deal with it everywhere). - - // byte alias - s := ir.BuiltinPkg.Lookup("byte") - types.Bytetype = types.New(types.TUINT8) - types.Bytetype.Sym = s - s.Def = ir.TypeNode(types.Bytetype) - dowidth(types.Bytetype) - - // rune alias - s = ir.BuiltinPkg.Lookup("rune") - types.Runetype = types.New(types.TINT32) - types.Runetype.Sym = s - s.Def = ir.TypeNode(types.Runetype) - dowidth(types.Runetype) - - // backend-dependent builtin types (e.g. int). - for _, s := range &typedefs { - s1 := ir.BuiltinPkg.Lookup(s.name) - - sameas := s.sameas32 - if Widthptr == 8 { - sameas = s.sameas64 - } - - simtype[s.etype] = sameas - - t := types.New(s.etype) - t.Sym = s1 - types.Types[s.etype] = t - s1.Def = ir.TypeNode(t) - s1.Origpkg = ir.BuiltinPkg - - dowidth(t) - } -} - // finishUniverse makes the universe block visible within the current package. func finishUniverse() { // Operationally, this is similar to a dot import of builtinpkg, except diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index d6d56426a5b25..f0211a67fbd64 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -110,8 +110,8 @@ var ( Errortype *Type // Types to represent untyped string and boolean constants. - UntypedString *Type - UntypedBool *Type + UntypedString = New(TSTRING) + UntypedBool = New(TBOOL) // Types to represent untyped numeric constants. UntypedInt = New(TIDEAL) @@ -184,6 +184,15 @@ func (t *Type) SetNoalg(b bool) { t.flags.set(typeNoalg, b) } func (t *Type) SetDeferwidth(b bool) { t.flags.set(typeDeferwidth, b) } func (t *Type) SetRecur(b bool) { t.flags.set(typeRecur, b) } +// Kind returns the kind of type t. +func (t *Type) Kind() EType { return t.Etype } + +// Sym returns the name of type t. +func (t *Type) GetSym() *Sym { return t.Sym } + +// Underlying returns the underlying type of type t. +func (t *Type) Underlying() *Type { return t.Orig } + // SetNod associates t with syntax node n. func (t *Type) SetNod(n IRNode) { // t.nod can be non-nil already @@ -1601,3 +1610,11 @@ func (t *Type) SetUnderlying(underlying *Type) { } } } + +// NewNamed returns a new basic type of the given kind. +func NewBasic(kind EType, obj IRNode) *Type { + t := New(kind) + t.Sym = obj.Sym() + t.nod = obj + return t +} From 6ca23a45feebc8672a1851dbc65c5b34d481ca30 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 1 Dec 2020 03:52:20 -0800 Subject: [PATCH 085/474] [dev.regabi] cmd/compile: only save ONAMEs on Curfn.Dcl There's not really any use to tracking function-scoped constants and types on Curfn.Dcl, and there's sloppy code that assumes all of the declarations are variables (e.g., cmpstackvarlt). Change-Id: I5d10dc681dac2c161c7b73ba808403052ca0608e Reviewed-on: https://go-review.googlesource.com/c/go/+/274436 Reviewed-by: Russ Cox Trust: Matthew Dempsky --- src/cmd/compile/internal/gc/dcl.go | 2 +- test/live.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 637587392aa44..3b60496c5c689 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -84,7 +84,7 @@ func declare(n *ir.Name, ctxt ir.Class) { base.Pos = n.Pos() base.Fatalf("automatic outside function") } - if Curfn != nil && ctxt != ir.PFUNC { + if Curfn != nil && ctxt != ir.PFUNC && n.Op() == ir.ONAME { Curfn.Dcl = append(Curfn.Dcl, n) } if n.Op() == ir.OTYPE { diff --git a/test/live.go b/test/live.go index 3df7ab01af47f..d52ce7f007501 100644 --- a/test/live.go +++ b/test/live.go @@ -718,5 +718,5 @@ func f44(f func() [2]*int) interface{} { // ERROR "live at entry to f44: f" } ret := T{} ret.s[0] = f() - return ret // ERROR "stack object .autotmp_5 T" + return ret // ERROR "stack object .autotmp_[0-9]+ T" } From 5ffa275f3cab631483a1ce76a63fc4ede3d204e8 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 1 Dec 2020 03:25:29 -0800 Subject: [PATCH 086/474] [dev.regabi] cmd/compile: first pass at abstracting Type Passes toolstash/buildall. [git-generate] cd src/cmd/compile/internal/ssa rf ' ex . ../ir ../gc { import "cmd/compile/internal/types" var t *types.Type t.Etype -> t.Kind() t.Sym -> t.GetSym() t.Orig -> t.Underlying() } ' cd ../types rf ' mv EType Kind mv IRNode Object mv Type.Etype Type.kind mv Type.Sym Type.sym mv Type.Orig Type.underlying mv Type.Cache Type.cache mv Type.GetSym Type.Sym mv Bytetype ByteType mv Runetype RuneType mv Errortype ErrorType ' cd ../gc sed -i 's/Bytetype/ByteType/; s/Runetype/RuneType/' mkbuiltin.go git codereview gofmt go install cmd/compile/internal/... go test cmd/compile -u || go test cmd/compile Change-Id: Ibecb2d7100d3318a49238eb4a78d70acb49eedca Reviewed-on: https://go-review.googlesource.com/c/go/+/274437 Run-TryBot: Matthew Dempsky Reviewed-by: Russ Cox Trust: Matthew Dempsky --- src/cmd/compile/fmtmap_test.go | 8 +- src/cmd/compile/internal/gc/alg.go | 12 +- src/cmd/compile/internal/gc/align.go | 12 +- src/cmd/compile/internal/gc/bexport.go | 10 +- src/cmd/compile/internal/gc/builtin.go | 4 +- src/cmd/compile/internal/gc/const.go | 22 +-- src/cmd/compile/internal/gc/dcl.go | 18 +- src/cmd/compile/internal/gc/embed.go | 4 +- src/cmd/compile/internal/gc/escape.go | 2 +- src/cmd/compile/internal/gc/go.go | 2 +- src/cmd/compile/internal/gc/iexport.go | 18 +- src/cmd/compile/internal/gc/inl.go | 4 +- src/cmd/compile/internal/gc/mkbuiltin.go | 4 +- src/cmd/compile/internal/gc/obj.go | 6 +- src/cmd/compile/internal/gc/order.go | 2 +- src/cmd/compile/internal/gc/plive.go | 4 +- src/cmd/compile/internal/gc/range.go | 12 +- src/cmd/compile/internal/gc/reflect.go | 76 ++++---- src/cmd/compile/internal/gc/ssa.go | 56 +++--- src/cmd/compile/internal/gc/subr.go | 72 +++---- src/cmd/compile/internal/gc/swt.go | 4 +- src/cmd/compile/internal/gc/typecheck.go | 80 ++++---- src/cmd/compile/internal/gc/universe.go | 26 +-- src/cmd/compile/internal/gc/walk.go | 48 ++--- src/cmd/compile/internal/ir/fmt.go | 50 ++--- src/cmd/compile/internal/ir/node.go | 2 +- src/cmd/compile/internal/ir/type.go | 2 +- src/cmd/compile/internal/ir/val.go | 2 +- src/cmd/compile/internal/ssa/expand_calls.go | 16 +- src/cmd/compile/internal/ssa/export_test.go | 6 +- src/cmd/compile/internal/ssa/regalloc.go | 4 +- .../compile/internal/types/etype_string.go | 4 +- src/cmd/compile/internal/types/identity.go | 12 +- src/cmd/compile/internal/types/scope.go | 8 +- src/cmd/compile/internal/types/sym.go | 2 +- src/cmd/compile/internal/types/type.go | 182 +++++++++--------- 36 files changed, 398 insertions(+), 398 deletions(-) diff --git a/src/cmd/compile/fmtmap_test.go b/src/cmd/compile/fmtmap_test.go index ca31705f72cf3..fde9c51b27bea 100644 --- a/src/cmd/compile/fmtmap_test.go +++ b/src/cmd/compile/fmtmap_test.go @@ -127,10 +127,10 @@ var knownFormats = map[string]string{ "cmd/compile/internal/syntax.position %s": "", "cmd/compile/internal/syntax.token %q": "", "cmd/compile/internal/syntax.token %s": "", - "cmd/compile/internal/types.EType %d": "", - "cmd/compile/internal/types.EType %s": "", - "cmd/compile/internal/types.EType %v": "", - "cmd/compile/internal/types.IRNode %v": "", + "cmd/compile/internal/types.Kind %d": "", + "cmd/compile/internal/types.Kind %s": "", + "cmd/compile/internal/types.Kind %v": "", + "cmd/compile/internal/types.Object %v": "", "cmd/internal/obj.ABI %v": "", "error %v": "", "float64 %.2f": "", diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index 806417d03de31..b2716399a5535 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -68,7 +68,7 @@ func IncomparableField(t *types.Type) *types.Field { // EqCanPanic reports whether == on type t could panic (has an interface somewhere). // t must be comparable. func EqCanPanic(t *types.Type) bool { - switch t.Etype { + switch t.Kind() { default: return false case types.TINTER: @@ -120,7 +120,7 @@ func algtype1(t *types.Type) (AlgKind, *types.Type) { return ANOEQ, t } - switch t.Etype { + switch t.Kind() { case types.TANY, types.TFORW: // will be defined later. return ANOEQ, t @@ -274,7 +274,7 @@ func genhash(t *types.Type) *obj.LSym { // (And the closure generated by genhash will also get // dead-code eliminated, as we call the subtype hashers // directly.) - switch t.Etype { + switch t.Kind() { case types.TARRAY: genhash(t.Elem()) case types.TSTRUCT: @@ -303,7 +303,7 @@ func genhash(t *types.Type) *obj.LSym { np := ir.AsNode(tfn.Type().Params().Field(0).Nname) nh := ir.AsNode(tfn.Type().Params().Field(1).Nname) - switch t.Etype { + switch t.Kind() { case types.TARRAY: // An array of pure memory would be handled by the // standard algorithm, so the element type must not be @@ -536,7 +536,7 @@ func geneq(t *types.Type) *obj.LSym { // We reach here only for types that have equality but // cannot be handled by the standard algorithms, // so t must be either an array or a struct. - switch t.Etype { + switch t.Kind() { default: base.Fatalf("geneq %v", t) @@ -613,7 +613,7 @@ func geneq(t *types.Type) *obj.LSym { } } - switch t.Elem().Etype { + switch t.Elem().Kind() { case types.TSTRING: // Do two loops. First, check that all the lengths match (cheap). // Second, check that all the contents match (expensive). diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go index 5171983af0d83..af426f5b245de 100644 --- a/src/cmd/compile/internal/gc/align.go +++ b/src/cmd/compile/internal/gc/align.go @@ -185,7 +185,7 @@ func findTypeLoop(t *types.Type, path *[]*types.Type) bool { // We implement a simple DFS loop-finding algorithm. This // could be faster, but type cycles are rare. - if t.Sym != nil { + if t.Sym() != nil { // Declared type. Check for loops and otherwise // recurse on the type expression used in the type // declaration. @@ -193,7 +193,7 @@ func findTypeLoop(t *types.Type, path *[]*types.Type) bool { // Type imported from package, so it can't be part of // a type loop (otherwise that package should have // failed to compile). - if t.Sym.Pkg != ir.LocalPkg { + if t.Sym().Pkg != ir.LocalPkg { return false } @@ -212,7 +212,7 @@ func findTypeLoop(t *types.Type, path *[]*types.Type) bool { } else { // Anonymous type. Recurse on contained types. - switch t.Etype { + switch t.Kind() { case types.TARRAY: if findTypeLoop(t.Elem(), path) { return true @@ -321,15 +321,15 @@ func dowidth(t *types.Type) { t.Width = -2 t.Align = 0 // 0 means use t.Width, below - et := t.Etype + et := t.Kind() switch et { case types.TFUNC, types.TCHAN, types.TMAP, types.TSTRING: break // simtype == 0 during bootstrap default: - if simtype[t.Etype] != 0 { - et = simtype[t.Etype] + if simtype[t.Kind()] != 0 { + et = simtype[t.Kind()] } } diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index dbbac559ae63c..43c4ce7150fe4 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -35,7 +35,7 @@ func (p *exporter) markType(t *types.Type) { // only their unexpanded method set (i.e., exclusive of // interface embeddings), and the switch statement below // handles their full method set. - if t.Sym != nil && t.Etype != types.TINTER { + if t.Sym() != nil && t.Kind() != types.TINTER { for _, m := range t.Methods().Slice() { if types.IsExported(m.Sym.Name) { p.markObject(ir.AsNode(m.Nname)) @@ -52,7 +52,7 @@ func (p *exporter) markType(t *types.Type) { // Notably, we don't mark function parameter types, because // the user already needs some way to construct values of // those types. - switch t.Etype { + switch t.Kind() { case types.TPTR, types.TARRAY, types.TSLICE: p.markType(t.Elem()) @@ -153,11 +153,11 @@ func predeclared() []*types.Type { types.Types[types.TSTRING], // basic type aliases - types.Bytetype, - types.Runetype, + types.ByteType, + types.RuneType, // error - types.Errortype, + types.ErrorType, // untyped types types.UntypedBool, diff --git a/src/cmd/compile/internal/gc/builtin.go b/src/cmd/compile/internal/gc/builtin.go index efca44c667640..07e864dd2e466 100644 --- a/src/cmd/compile/internal/gc/builtin.go +++ b/src/cmd/compile/internal/gc/builtin.go @@ -206,7 +206,7 @@ var runtimeDecls = [...]struct { func runtimeTypes() []*types.Type { var typs [131]*types.Type - typs[0] = types.Bytetype + typs[0] = types.ByteType typs[1] = types.NewPtr(typs[0]) typs[2] = types.Types[types.TANY] typs[3] = types.NewPtr(typs[2]) @@ -252,7 +252,7 @@ func runtimeTypes() []*types.Type { typs[43] = functype(nil, []*ir.Field{anonfield(typs[42]), anonfield(typs[22])}, []*ir.Field{anonfield(typs[28])}) typs[44] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[1]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[28])}) typs[45] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[28])}) - typs[46] = types.Runetype + typs[46] = types.RuneType typs[47] = types.NewSlice(typs[46]) typs[48] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[47])}, []*ir.Field{anonfield(typs[28])}) typs[49] = types.NewSlice(typs[0]) diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index 4dee373bfa55c..4a61c77630c45 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -127,7 +127,7 @@ func convlit1(n ir.Node, t *types.Type, explicit bool, context func() string) ir } // Nil is technically not a constant, so handle it specially. - if n.Type().Etype == types.TNIL { + if n.Type().Kind() == types.TNIL { if n.Op() != ir.ONIL { base.Fatalf("unexpected op: %v (%v)", n, n.Op()) } @@ -147,7 +147,7 @@ func convlit1(n ir.Node, t *types.Type, explicit bool, context func() string) ir return n } - if t == nil || !ir.OKForConst[t.Etype] { + if t == nil || !ir.OKForConst[t.Kind()] { t = defaultType(n.Type()) } @@ -245,7 +245,7 @@ func operandType(op ir.Op, t *types.Type) *types.Type { return complexForFloat(t) } default: - if okfor[op][t.Etype] { + if okfor[op][t.Kind()] { return t } } @@ -499,12 +499,12 @@ func evalConst(n ir.Node) ir.Node { } case ir.OCONV, ir.ORUNESTR: - if ir.OKForConst[n.Type().Etype] && nl.Op() == ir.OLITERAL { + if ir.OKForConst[n.Type().Kind()] && nl.Op() == ir.OLITERAL { return origConst(n, convertVal(nl.Val(), n.Type(), true)) } case ir.OCONVNOP: - if ir.OKForConst[n.Type().Etype] && nl.Op() == ir.OLITERAL { + if ir.OKForConst[n.Type().Kind()] && nl.Op() == ir.OLITERAL { // set so n.Orig gets OCONV instead of OCONVNOP n.SetOp(ir.OCONV) return origConst(n, nl.Val()) @@ -555,7 +555,7 @@ func evalConst(n ir.Node) ir.Node { return n case ir.OCAP, ir.OLEN: - switch nl.Type().Etype { + switch nl.Type().Kind() { case types.TSTRING: if ir.IsConst(nl, constant.String) { return origIntConst(n, int64(len(nl.StringVal()))) @@ -729,7 +729,7 @@ func mixUntyped(t1, t2 *types.Type) *types.Type { } func defaultType(t *types.Type) *types.Type { - if !t.IsUntyped() || t.Etype == types.TNIL { + if !t.IsUntyped() || t.Kind() == types.TNIL { return t } @@ -741,7 +741,7 @@ func defaultType(t *types.Type) *types.Type { case types.UntypedInt: return types.Types[types.TINT] case types.UntypedRune: - return types.Runetype + return types.RuneType case types.UntypedFloat: return types.Types[types.TFLOAT64] case types.UntypedComplex: @@ -769,7 +769,7 @@ func indexconst(n ir.Node) int64 { if n.Op() != ir.OLITERAL { return -1 } - if !n.Type().IsInteger() && n.Type().Etype != types.TIDEAL { + if !n.Type().IsInteger() && n.Type().Kind() != types.TIDEAL { return -1 } @@ -885,9 +885,9 @@ func (s *constSet) add(pos src.XPos, n ir.Node, what, where string) { typ := n.Type() switch typ { - case types.Bytetype: + case types.ByteType: typ = types.Types[types.TUINT8] - case types.Runetype: + case types.RuneType: typ = types.Types[types.TINT32] } k := constSetKey{typ, ir.ConstValue(n)} diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 3b60496c5c689..3d0bdaec7a4a1 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -445,7 +445,7 @@ func funcarg(n *ir.Field, ctxt ir.Class) { // This happens during import, where the hidden_fndcl rule has // used functype directly to parse the function's type. func funcargs2(t *types.Type) { - if t.Etype != types.TFUNC { + if t.Kind() != types.TFUNC { base.Fatalf("funcargs2 %v", t) } @@ -496,7 +496,7 @@ func checkembeddedtype(t *types.Type) { return } - if t.Sym == nil && t.IsPtr() { + if t.Sym() == nil && t.IsPtr() { t = t.Elem() if t.IsInterface() { base.Errorf("embedded type cannot be a pointer to interface") @@ -505,7 +505,7 @@ func checkembeddedtype(t *types.Type) { if t.IsPtr() || t.IsUnsafePtr() { base.Errorf("embedded type cannot be a pointer") - } else if t.Etype == types.TFORW && !t.ForwardType().Embedlineno.IsKnown() { + } else if t.Kind() == types.TFORW && !t.ForwardType().Embedlineno.IsKnown() { t.ForwardType().Embedlineno = base.Pos } } @@ -719,12 +719,12 @@ func methodSymSuffix(recv *types.Type, msym *types.Sym, suffix string) *types.Sy base.Fatalf("blank method name") } - rsym := recv.Sym + rsym := recv.Sym() if recv.IsPtr() { if rsym != nil { base.Fatalf("declared pointer receiver type: %v", recv) } - rsym = recv.Elem().Sym + rsym = recv.Elem().Sym() } // Find the package the receiver type appeared in. For @@ -777,11 +777,11 @@ func addmethod(n *ir.Func, msym *types.Sym, t *types.Type, local, nointerface bo } mt := methtype(rf.Type) - if mt == nil || mt.Sym == nil { + if mt == nil || mt.Sym() == nil { pa := rf.Type t := pa if t != nil && t.IsPtr() { - if t.Sym != nil { + if t.Sym() != nil { base.Errorf("invalid receiver type %v (%v is a pointer type)", pa, t) return nil } @@ -791,7 +791,7 @@ func addmethod(n *ir.Func, msym *types.Sym, t *types.Type, local, nointerface bo switch { case t == nil || t.Broke(): // rely on typecheck having complained before - case t.Sym == nil: + case t.Sym() == nil: base.Errorf("invalid receiver type %v (%v is not a defined type)", pa, t) case t.IsPtr(): base.Errorf("invalid receiver type %v (%v is a pointer type)", pa, t) @@ -805,7 +805,7 @@ func addmethod(n *ir.Func, msym *types.Sym, t *types.Type, local, nointerface bo return nil } - if local && mt.Sym.Pkg != ir.LocalPkg { + if local && mt.Sym().Pkg != ir.LocalPkg { base.Errorf("cannot define new methods on non-local type %v", mt) return nil } diff --git a/src/cmd/compile/internal/gc/embed.go b/src/cmd/compile/internal/gc/embed.go index d9bfd6f5edebe..d6e42e4f03edf 100644 --- a/src/cmd/compile/internal/gc/embed.go +++ b/src/cmd/compile/internal/gc/embed.go @@ -151,13 +151,13 @@ func embedKindApprox(typ ir.Node) int { // embedKind determines the kind of embedding variable. func embedKind(typ *types.Type) int { - if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == ir.LocalPkg && base.Ctxt.Pkgpath == "embed")) { + if typ.Sym() != nil && typ.Sym().Name == "FS" && (typ.Sym().Pkg.Path == "embed" || (typ.Sym().Pkg == ir.LocalPkg && base.Ctxt.Pkgpath == "embed")) { return embedFiles } if typ == types.Types[types.TSTRING] { return embedString } - if typ.Sym == nil && typ.IsSlice() && typ.Elem() == types.Bytetype { + if typ.Sym() == nil && typ.IsSlice() && typ.Elem() == types.ByteType { return embedBytes } return embedUnknown diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index f2fff02959518..b29896e5a48b8 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -659,7 +659,7 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { // unsafeValue evaluates a uintptr-typed arithmetic expression looking // for conversions from an unsafe.Pointer. func (e *Escape) unsafeValue(k EscHole, n ir.Node) { - if n.Type().Etype != types.TUINTPTR { + if n.Type().Kind() != types.TUINTPTR { base.Fatalf("unexpected type %v for %v", n.Type(), n) } diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index 24393de53d6d1..c493165c7688a 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -102,7 +102,7 @@ var gopkg *types.Pkg // pseudo-package for method symbols on anonymous receiver var zerosize int64 -var simtype [types.NTYPE]types.EType +var simtype [types.NTYPE]types.Kind var ( isInt [types.NTYPE]bool diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index 2dfce2659640c..8f50868fc793e 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -473,15 +473,15 @@ func (p *iexporter) doDecl(n ir.Node) { w.tag('T') w.pos(n.Pos()) - underlying := n.Type().Orig - if underlying == types.Errortype.Orig { + underlying := n.Type().Underlying() + if underlying == types.ErrorType.Underlying() { // For "type T error", use error as the // underlying type instead of error's own // underlying anonymous interface. This // ensures consistency with how importers may // declare error (e.g., go/types uses nil Pkg // for predeclared objects). - underlying = types.Errortype + underlying = types.ErrorType } w.typ(underlying) @@ -634,8 +634,8 @@ func (w *exportWriter) startType(k itag) { } func (w *exportWriter) doTyp(t *types.Type) { - if t.Sym != nil { - if t.Sym.Pkg == ir.BuiltinPkg || t.Sym.Pkg == unsafepkg { + if t.Sym() != nil { + if t.Sym().Pkg == ir.BuiltinPkg || t.Sym().Pkg == unsafepkg { base.Fatalf("builtin type missing from typIndex: %v", t) } @@ -644,7 +644,7 @@ func (w *exportWriter) doTyp(t *types.Type) { return } - switch t.Etype { + switch t.Kind() { case types.TPTR: w.startType(pointerType) w.typ(t.Elem()) @@ -762,7 +762,7 @@ func constTypeOf(typ *types.Type) constant.Kind { return constant.Complex } - switch typ.Etype { + switch typ.Kind() { case types.TBOOL: return constant.Bool case types.TSTRING: @@ -809,7 +809,7 @@ func intSize(typ *types.Type) (signed bool, maxBytes uint) { return true, Mpprec / 8 } - switch typ.Etype { + switch typ.Kind() { case types.TFLOAT32, types.TCOMPLEX64: return true, 3 case types.TFLOAT64, types.TCOMPLEX128: @@ -821,7 +821,7 @@ func intSize(typ *types.Type) (signed bool, maxBytes uint) { // The go/types API doesn't expose sizes to importers, so they // don't know how big these types are. - switch typ.Etype { + switch typ.Kind() { case types.TINT, types.TUINT, types.TUINTPTR: maxBytes = 8 } diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 97ecb9559b431..b36a01e3898b6 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -61,10 +61,10 @@ func fnpkg(fn *ir.Name) *types.Pkg { if rcvr.IsPtr() { rcvr = rcvr.Elem() } - if rcvr.Sym == nil { + if rcvr.Sym() == nil { base.Fatalf("receiver with no sym: [%v] %L (%v)", fn.Sym(), fn, rcvr) } - return rcvr.Sym.Pkg + return rcvr.Sym().Pkg } // non-method diff --git a/src/cmd/compile/internal/gc/mkbuiltin.go b/src/cmd/compile/internal/gc/mkbuiltin.go index 5317484de9ec6..38aa6016457e2 100644 --- a/src/cmd/compile/internal/gc/mkbuiltin.go +++ b/src/cmd/compile/internal/gc/mkbuiltin.go @@ -143,9 +143,9 @@ func (i *typeInterner) mktype(t ast.Expr) string { case *ast.Ident: switch t.Name { case "byte": - return "types.Bytetype" + return "types.ByteType" case "rune": - return "types.Runetype" + return "types.RuneType" } return fmt.Sprintf("types.Types[types.T%s]", strings.ToUpper(t.Name)) case *ast.SelectorExpr: diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 7b5e3015c25ae..f65131417a38a 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -218,7 +218,7 @@ func addptabs() { if s.Pkg.Name != "main" { continue } - if n.Type().Etype == types.TFUNC && n.Class() == ir.PFUNC { + if n.Type().Kind() == types.TFUNC && n.Class() == ir.PFUNC { // function ptabs = append(ptabs, ptabEntry{s: s, t: ir.AsNode(s.Def).Type()}) } else { @@ -602,7 +602,7 @@ func litsym(n, c ir.Node, wid int) { case constant.Float: f, _ := constant.Float64Val(u) - switch n.Type().Etype { + switch n.Type().Kind() { case types.TFLOAT32: s.WriteFloat32(base.Ctxt, n.Offset(), float32(f)) case types.TFLOAT64: @@ -612,7 +612,7 @@ func litsym(n, c ir.Node, wid int) { case constant.Complex: re, _ := constant.Float64Val(constant.Real(u)) im, _ := constant.Float64Val(constant.Imag(u)) - switch n.Type().Etype { + switch n.Type().Kind() { case types.TCOMPLEX64: s.WriteFloat32(base.Ctxt, n.Offset(), float32(re)) s.WriteFloat32(base.Ctxt, n.Offset()+4, float32(im)) diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 83cfb44474055..c2e236537f7be 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -784,7 +784,7 @@ func (o *Order) stmt(n ir.Node) { n.SetRight(o.expr(n.Right(), nil)) orderBody := true - switch n.Type().Etype { + switch n.Type().Kind() { default: base.Fatalf("order.stmt range %v", n.Type()) diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go index 6ad3140081ac4..f2555cc94160f 100644 --- a/src/cmd/compile/internal/gc/plive.go +++ b/src/cmd/compile/internal/gc/plive.go @@ -416,7 +416,7 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) { return } - switch t.Etype { + switch t.Kind() { case types.TPTR, types.TUNSAFEPTR, types.TFUNC, types.TCHAN, types.TMAP: if off&int64(Widthptr-1) != 0 { base.Fatalf("onebitwalktype1: invalid alignment, %v", t) @@ -1300,7 +1300,7 @@ func liveness(e *ssafn, f *ssa.Func, pp *Progs) LivenessMap { // to fully initialize t. func isfat(t *types.Type) bool { if t != nil { - switch t.Etype { + switch t.Kind() { case types.TSLICE, types.TSTRING, types.TINTER: // maybe remove later return true diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go index 2f2d7051c3dd4..e48642a854a5b 100644 --- a/src/cmd/compile/internal/gc/range.go +++ b/src/cmd/compile/internal/gc/range.go @@ -61,7 +61,7 @@ func typecheckrangeExpr(n ir.Node) { var t1, t2 *types.Type toomany := false - switch t.Etype { + switch t.Kind() { default: base.ErrorfAt(n.Pos(), "cannot range over %L", n.Right()) return @@ -88,7 +88,7 @@ func typecheckrangeExpr(n ir.Node) { case types.TSTRING: t1 = types.Types[types.TINT] - t2 = types.Runetype + t2 = types.RuneType } if n.List().Len() > 2 || toomany { @@ -208,7 +208,7 @@ func walkrange(nrange ir.Node) ir.Node { var body []ir.Node var init []ir.Node - switch t.Etype { + switch t.Kind() { default: base.Fatalf("walkrange") @@ -375,7 +375,7 @@ func walkrange(nrange ir.Node) ir.Node { hv1 := temp(types.Types[types.TINT]) hv1t := temp(types.Types[types.TINT]) - hv2 := temp(types.Runetype) + hv2 := temp(types.RuneType) // hv1 := 0 init = append(init, ir.Nod(ir.OAS, hv1, nil)) @@ -391,7 +391,7 @@ func walkrange(nrange ir.Node) ir.Node { // hv2 := rune(ha[hv1]) nind := ir.Nod(ir.OINDEX, ha, hv1) nind.SetBounded(true) - body = append(body, ir.Nod(ir.OAS, hv2, conv(nind, types.Runetype))) + body = append(body, ir.Nod(ir.OAS, hv2, conv(nind, types.RuneType))) // if hv2 < utf8.RuneSelf nif := ir.Nod(ir.OIF, nil, nil) @@ -467,7 +467,7 @@ func isMapClear(n ir.Node) bool { return false } - if n.Op() != ir.ORANGE || n.Type().Etype != types.TMAP || n.List().Len() != 1 { + if n.Op() != ir.ORANGE || n.Type().Kind() != types.TMAP || n.List().Len() != 1 { return false } diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 73d369f413b42..4ab3005ce8211 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -68,7 +68,7 @@ func imethodSize() int { return 4 + 4 } // Sizeof(runtime.imeth func commonSize() int { return 4*Widthptr + 8 + 8 } // Sizeof(runtime._type{}) func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{}) - if t.Sym == nil && len(methods(t)) == 0 { + if t.Sym() == nil && len(methods(t)) == 0 { return 0 } return 4 + 2 + 2 + 4 + 4 @@ -448,7 +448,7 @@ func methods(t *types.Type) []*Sig { func imethods(t *types.Type) []*Sig { var methods []*Sig for _, f := range t.Fields().Slice() { - if f.Type.Etype != types.TFUNC || f.Sym == nil { + if f.Type.Kind() != types.TFUNC || f.Sym == nil { continue } if f.Sym.IsBlank() { @@ -640,7 +640,7 @@ func dname(name, tag string, pkg *types.Pkg, exported bool) *obj.LSym { // backing array of the []method field is written (by dextratypeData). func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int { m := methods(t) - if t.Sym == nil && len(m) == 0 { + if t.Sym() == nil && len(m) == 0 { return ot } noff := int(Rnd(int64(ot), int64(Widthptr))) @@ -672,16 +672,16 @@ func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int { } func typePkg(t *types.Type) *types.Pkg { - tsym := t.Sym + tsym := t.Sym() if tsym == nil { - switch t.Etype { + switch t.Kind() { case types.TARRAY, types.TSLICE, types.TPTR, types.TCHAN: if t.Elem() != nil { - tsym = t.Elem().Sym + tsym = t.Elem().Sym() } } } - if tsym != nil && t != types.Types[t.Etype] && t != types.Errortype { + if tsym != nil && t != types.Types[t.Kind()] && t != types.ErrorType { return tsym.Pkg } return nil @@ -753,7 +753,7 @@ func typeptrdata(t *types.Type) int64 { return 0 } - switch t.Etype { + switch t.Kind() { case types.TPTR, types.TUNSAFEPTR, types.TFUNC, @@ -823,7 +823,7 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int { var sptr *obj.LSym if !t.IsPtr() || t.IsPtrElem() { tptr := types.NewPtr(t) - if t.Sym != nil || methods(tptr) != nil { + if t.Sym() != nil || methods(tptr) != nil { sptrWeak = false } sptr = dtypesym(tptr) @@ -855,7 +855,7 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int { if uncommonSize(t) != 0 { tflag |= tflagUncommon } - if t.Sym != nil && t.Sym.Name != "" { + if t.Sym() != nil && t.Sym().Name != "" { tflag |= tflagNamed } if IsRegularMemory(t) { @@ -872,12 +872,12 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int { if !strings.HasPrefix(p, "*") { p = "*" + p tflag |= tflagExtraStar - if t.Sym != nil { - exported = types.IsExported(t.Sym.Name) + if t.Sym() != nil { + exported = types.IsExported(t.Sym().Name) } } else { - if t.Elem() != nil && t.Elem().Sym != nil { - exported = types.IsExported(t.Elem().Sym.Name) + if t.Elem() != nil && t.Elem().Sym() != nil { + exported = types.IsExported(t.Elem().Sym().Name) } } @@ -895,7 +895,7 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int { ot = duint8(lsym, ot, t.Align) // align ot = duint8(lsym, ot, t.Align) // fieldAlign - i = kinds[t.Etype] + i = kinds[t.Kind()] if isdirectiface(t) { i |= objabi.KindDirectIface } @@ -1029,7 +1029,7 @@ func itabname(t, itype *types.Type) ir.Node { // isreflexive reports whether t has a reflexive equality operator. // That is, if x==x for all x of type t. func isreflexive(t *types.Type) bool { - switch t.Etype { + switch t.Kind() { case types.TBOOL, types.TINT, types.TUINT, @@ -1075,7 +1075,7 @@ func isreflexive(t *types.Type) bool { // needkeyupdate reports whether map updates with t as a key // need the key to be updated. func needkeyupdate(t *types.Type) bool { - switch t.Etype { + switch t.Kind() { case types.TBOOL, types.TINT, types.TUINT, types.TINT8, types.TUINT8, types.TINT16, types.TUINT16, types.TINT32, types.TUINT32, types.TINT64, types.TUINT64, types.TUINTPTR, types.TPTR, types.TUNSAFEPTR, types.TCHAN: return false @@ -1104,7 +1104,7 @@ func needkeyupdate(t *types.Type) bool { // hashMightPanic reports whether the hash of a map key of type t might panic. func hashMightPanic(t *types.Type) bool { - switch t.Etype { + switch t.Kind() { case types.TINTER: return true @@ -1128,8 +1128,8 @@ func hashMightPanic(t *types.Type) bool { // They've been separate internally to make error messages // better, but we have to merge them in the reflect tables. func formalType(t *types.Type) *types.Type { - if t == types.Bytetype || t == types.Runetype { - return types.Types[t.Etype] + if t == types.ByteType || t == types.RuneType { + return types.Types[t.Kind()] } return t } @@ -1152,19 +1152,19 @@ func dtypesym(t *types.Type) *obj.LSym { // emit the type structures for int, float, etc. tbase := t - if t.IsPtr() && t.Sym == nil && t.Elem().Sym != nil { + if t.IsPtr() && t.Sym() == nil && t.Elem().Sym() != nil { tbase = t.Elem() } dupok := 0 - if tbase.Sym == nil { + if tbase.Sym() == nil { dupok = obj.DUPOK } - if base.Ctxt.Pkgpath != "runtime" || (tbase != types.Types[tbase.Etype] && tbase != types.Bytetype && tbase != types.Runetype && tbase != types.Errortype) { // int, float, etc + if base.Ctxt.Pkgpath != "runtime" || (tbase != types.Types[tbase.Kind()] && tbase != types.ByteType && tbase != types.RuneType && tbase != types.ErrorType) { // int, float, etc // named types from other files are defined only by those files - if tbase.Sym != nil && tbase.Sym.Pkg != ir.LocalPkg { + if tbase.Sym() != nil && tbase.Sym().Pkg != ir.LocalPkg { if i, ok := typeSymIdx[tbase]; ok { - lsym.Pkg = tbase.Sym.Pkg.Prefix + lsym.Pkg = tbase.Sym().Pkg.Prefix if t != tbase { lsym.SymIdx = int32(i[1]) } else { @@ -1175,13 +1175,13 @@ func dtypesym(t *types.Type) *obj.LSym { return lsym } // TODO(mdempsky): Investigate whether this can happen. - if tbase.Etype == types.TFORW { + if tbase.Kind() == types.TFORW { return lsym } } ot := 0 - switch t.Etype { + switch t.Kind() { default: ot = dcommontype(lsym, t) ot = dextratype(lsym, ot, t, 0) @@ -1262,8 +1262,8 @@ func dtypesym(t *types.Type) *obj.LSym { ot = dcommontype(lsym, t) var tpkg *types.Pkg - if t.Sym != nil && t != types.Types[t.Etype] && t != types.Errortype { - tpkg = t.Sym.Pkg + if t.Sym() != nil && t != types.Types[t.Kind()] && t != types.ErrorType { + tpkg = t.Sym().Pkg } ot = dgopkgpath(lsym, ot, tpkg) @@ -1328,7 +1328,7 @@ func dtypesym(t *types.Type) *obj.LSym { ot = dextratype(lsym, ot, t, 0) case types.TPTR: - if t.Elem().Etype == types.TANY { + if t.Elem().Kind() == types.TANY { // ../../../../runtime/type.go:/UnsafePointerType ot = dcommontype(lsym, t) ot = dextratype(lsym, ot, t, 0) @@ -1397,13 +1397,13 @@ func dtypesym(t *types.Type) *obj.LSym { // When buildmode=shared, all types are in typelinks so the // runtime can deduplicate type pointers. keep := base.Ctxt.Flag_dynlink - if !keep && t.Sym == nil { + if !keep && t.Sym() == nil { // For an unnamed type, we only need the link if the type can // be created at run time by reflect.PtrTo and similar // functions. If the type exists in the program, those // functions must return the existing type structure rather // than creating a new one. - switch t.Etype { + switch t.Kind() { case types.TPTR, types.TARRAY, types.TCHAN, types.TFUNC, types.TMAP, types.TSLICE, types.TSTRUCT: keep = true } @@ -1541,7 +1541,7 @@ func dumpsignats() { for _, ts := range signats { t := ts.t dtypesym(t) - if t.Sym != nil { + if t.Sym() != nil { dtypesym(types.NewPtr(t)) } } @@ -1616,7 +1616,7 @@ func dumpbasictypes() { // another possible choice would be package main, // but using runtime means fewer copies in object files. if base.Ctxt.Pkgpath == "runtime" { - for i := types.EType(1); i <= types.TBOOL; i++ { + for i := types.Kind(1); i <= types.TBOOL; i++ { dtypesym(types.NewPtr(types.Types[i])) } dtypesym(types.NewPtr(types.Types[types.TSTRING])) @@ -1624,9 +1624,9 @@ func dumpbasictypes() { // emit type structs for error and func(error) string. // The latter is the type of an auto-generated wrapper. - dtypesym(types.NewPtr(types.Errortype)) + dtypesym(types.NewPtr(types.ErrorType)) - dtypesym(functype(nil, []*ir.Field{anonfield(types.Errortype)}, []*ir.Field{anonfield(types.Types[types.TSTRING])})) + dtypesym(functype(nil, []*ir.Field{anonfield(types.ErrorType)}, []*ir.Field{anonfield(types.Types[types.TSTRING])})) // add paths for runtime and main, which 6l imports implicitly. dimportpath(Runtimepkg) @@ -1665,7 +1665,7 @@ func (a typesByString) Less(i, j int) bool { // will be equal for the above checks, but different in DWARF output. // Sort by source position to ensure deterministic order. // See issues 27013 and 30202. - if a[i].t.Etype == types.TINTER && a[i].t.Methods().Len() > 0 { + if a[i].t.Kind() == types.TINTER && a[i].t.Methods().Len() > 0 { return a[i].t.Methods().Index(0).Pos.Before(a[j].t.Methods().Index(0).Pos) } return false @@ -1821,7 +1821,7 @@ func (p *GCProg) emit(t *types.Type, offset int64) { p.w.Ptr(offset / int64(Widthptr)) return } - switch t.Etype { + switch t.Kind() { default: base.Fatalf("GCProg.emit: unexpected type %v", t) diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 4be6caa0e3123..3e020d7b9269b 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -54,13 +54,13 @@ func initssaconfig() { _ = types.NewPtr(types.Types[types.TINTER]) // *interface{} _ = types.NewPtr(types.NewPtr(types.Types[types.TSTRING])) // **string _ = types.NewPtr(types.NewSlice(types.Types[types.TINTER])) // *[]interface{} - _ = types.NewPtr(types.NewPtr(types.Bytetype)) // **byte - _ = types.NewPtr(types.NewSlice(types.Bytetype)) // *[]byte + _ = types.NewPtr(types.NewPtr(types.ByteType)) // **byte + _ = types.NewPtr(types.NewSlice(types.ByteType)) // *[]byte _ = types.NewPtr(types.NewSlice(types.Types[types.TSTRING])) // *[]string _ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[types.TUINT8]))) // ***uint8 _ = types.NewPtr(types.Types[types.TINT16]) // *int16 _ = types.NewPtr(types.Types[types.TINT64]) // *int64 - _ = types.NewPtr(types.Errortype) // *error + _ = types.NewPtr(types.ErrorType) // *error types.NewPtrCacheEnabled = false ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, *types_, base.Ctxt, base.Flag.N == 0) ssaConfig.SoftFloat = thearch.SoftFloat @@ -1591,7 +1591,7 @@ func (s *state) exit() *ssa.Block { type opAndType struct { op ir.Op - etype types.EType + etype types.Kind } var opToSSA = map[opAndType]ssa.Op{ @@ -1766,8 +1766,8 @@ var opToSSA = map[opAndType]ssa.Op{ opAndType{ir.OLE, types.TFLOAT32}: ssa.OpLeq32F, } -func (s *state) concreteEtype(t *types.Type) types.EType { - e := t.Etype +func (s *state) concreteEtype(t *types.Type) types.Kind { + e := t.Kind() switch e { default: return e @@ -1799,7 +1799,7 @@ func (s *state) ssaOp(op ir.Op, t *types.Type) ssa.Op { } func floatForComplex(t *types.Type) *types.Type { - switch t.Etype { + switch t.Kind() { case types.TCOMPLEX64: return types.Types[types.TFLOAT32] case types.TCOMPLEX128: @@ -1810,7 +1810,7 @@ func floatForComplex(t *types.Type) *types.Type { } func complexForFloat(t *types.Type) *types.Type { - switch t.Etype { + switch t.Kind() { case types.TFLOAT32: return types.Types[types.TCOMPLEX64] case types.TFLOAT64: @@ -1822,19 +1822,19 @@ func complexForFloat(t *types.Type) *types.Type { type opAndTwoTypes struct { op ir.Op - etype1 types.EType - etype2 types.EType + etype1 types.Kind + etype2 types.Kind } type twoTypes struct { - etype1 types.EType - etype2 types.EType + etype1 types.Kind + etype2 types.Kind } type twoOpsAndType struct { op1 ssa.Op op2 ssa.Op - intermediateType types.EType + intermediateType types.Kind } var fpConvOpToSSA = map[twoTypes]twoOpsAndType{ @@ -2115,12 +2115,12 @@ func (s *state) expr(n ir.Node) *ssa.Value { v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type // CONVNOP closure - if to.Etype == types.TFUNC && from.IsPtrShaped() { + if to.Kind() == types.TFUNC && from.IsPtrShaped() { return v } // named <--> unnamed type or typed <--> untyped const - if from.Etype == to.Etype { + if from.Kind() == to.Kind() { return v } @@ -2130,7 +2130,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { } // map <--> *hmap - if to.Etype == types.TMAP && from.IsPtr() && + if to.Kind() == types.TMAP && from.IsPtr() && to.MapType().Hmap == from.Elem() { return v } @@ -2141,8 +2141,8 @@ func (s *state) expr(n ir.Node) *ssa.Value { s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width) return nil } - if etypesign(from.Etype) != etypesign(to.Etype) { - s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Etype, to, to.Etype) + if etypesign(from.Kind()) != etypesign(to.Kind()) { + s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Kind(), to, to.Kind()) return nil } @@ -2153,7 +2153,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { return v } - if etypesign(from.Etype) == 0 { + if etypesign(from.Kind()) == 0 { s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to) return nil } @@ -2329,7 +2329,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x))) } - s.Fatalf("unhandled OCONV %s -> %s", n.Left().Type().Etype, n.Type().Etype) + s.Fatalf("unhandled OCONV %s -> %s", n.Left().Type().Kind(), n.Type().Kind()) return nil case ir.ODOTTYPE: @@ -3172,7 +3172,7 @@ const ( type sfRtCallDef struct { rtfn *obj.LSym - rtype types.EType + rtype types.Kind } var softFloatOps map[ssa.Op]sfRtCallDef @@ -3467,9 +3467,9 @@ func init() { }, sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) - type atomicOpEmitter func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) + type atomicOpEmitter func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.Kind) - makeAtomicGuardedIntrinsicARM64 := func(op0, op1 ssa.Op, typ, rtyp types.EType, emit atomicOpEmitter) intrinsicBuilder { + makeAtomicGuardedIntrinsicARM64 := func(op0, op1 ssa.Op, typ, rtyp types.Kind, emit atomicOpEmitter) intrinsicBuilder { return func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { // Target Atomic feature is identified by dynamic detection @@ -3505,7 +3505,7 @@ func init() { } } - atomicXchgXaddEmitterARM64 := func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) { + atomicXchgXaddEmitterARM64 := func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.Kind) { v := s.newValue3(op, types.NewTuple(types.Types[typ], types.TypeMem), args[0], args[1], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v) @@ -3561,7 +3561,7 @@ func init() { }, sys.PPC64) - atomicCasEmitterARM64 := func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) { + atomicCasEmitterARM64 := func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.Kind) { v := s.newValue4(op, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v) @@ -3599,7 +3599,7 @@ func init() { }, sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X) - atomicAndOrEmitterARM64 := func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) { + atomicAndOrEmitterARM64 := func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.Kind) { s.vars[memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem()) } @@ -4810,7 +4810,7 @@ func (s *state) getClosureAndRcvr(fn ir.Node) (*ssa.Value, *ssa.Value) { // etypesign returns the signed-ness of e, for integer/pointer etypes. // -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer. -func etypesign(e types.EType) int8 { +func etypesign(e types.Kind) int8 { switch e { case types.TINT8, types.TINT16, types.TINT32, types.TINT64, types.TINT: return -1 @@ -4980,7 +4980,7 @@ func canSSAType(t *types.Type) bool { // Too big and we'll introduce too much register pressure. return false } - switch t.Etype { + switch t.Kind() { case types.TARRAY: // We can't do larger arrays because dynamic indexing is // not supported on SSA variables. diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 04c8c537bd955..011a7ac5bc0f7 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -181,7 +181,7 @@ func nodstr(s string) ir.Node { return ir.NewLiteral(constant.MakeString(s)) } -func isptrto(t *types.Type, et types.EType) bool { +func isptrto(t *types.Type, et types.Kind) bool { if t == nil { return false } @@ -192,7 +192,7 @@ func isptrto(t *types.Type, et types.EType) bool { if t == nil { return false } - if t.Etype != et { + if t.Kind() != et { return false } return true @@ -208,7 +208,7 @@ func methtype(t *types.Type) *types.Type { // Strip away pointer if it's there. if t.IsPtr() { - if t.Sym != nil { + if t.Sym() != nil { return nil } t = t.Elem() @@ -218,15 +218,15 @@ func methtype(t *types.Type) *types.Type { } // Must be a named type or anonymous struct. - if t.Sym == nil && !t.IsStruct() { + if t.Sym() == nil && !t.IsStruct() { return nil } // Check types. - if issimple[t.Etype] { + if issimple[t.Kind()] { return t } - switch t.Etype { + switch t.Kind() { case types.TARRAY, types.TCHAN, types.TFUNC, types.TMAP, types.TSLICE, types.TSTRING, types.TSTRUCT: return t } @@ -241,7 +241,7 @@ func assignop(src, dst *types.Type) (ir.Op, string) { if src == dst { return ir.OCONVNOP, "" } - if src == nil || dst == nil || src.Etype == types.TFORW || dst.Etype == types.TFORW || src.Orig == nil || dst.Orig == nil { + if src == nil || dst == nil || src.Kind() == types.TFORW || dst.Kind() == types.TFORW || src.Underlying() == nil || dst.Underlying() == nil { return ir.OXXX, "" } @@ -257,13 +257,13 @@ func assignop(src, dst *types.Type) (ir.Op, string) { // we want to recompute the itab. Recomputing the itab ensures // that itabs are unique (thus an interface with a compile-time // type I has an itab with interface type I). - if types.Identical(src.Orig, dst.Orig) { + if types.Identical(src.Underlying(), dst.Underlying()) { if src.IsEmptyInterface() { // Conversion between two empty interfaces // requires no code. return ir.OCONVNOP, "" } - if (src.Sym == nil || dst.Sym == nil) && !src.IsInterface() { + if (src.Sym() == nil || dst.Sym() == nil) && !src.IsInterface() { // Conversion between two types, at least one unnamed, // needs no conversion. The exception is nonempty interfaces // which need to have their itab updated. @@ -272,7 +272,7 @@ func assignop(src, dst *types.Type) (ir.Op, string) { } // 3. dst is an interface type and src implements dst. - if dst.IsInterface() && src.Etype != types.TNIL { + if dst.IsInterface() && src.Kind() != types.TNIL { var missing, have *types.Field var ptr int if implements(src, dst, &missing, &have, &ptr) { @@ -309,7 +309,7 @@ func assignop(src, dst *types.Type) (ir.Op, string) { return ir.OXXX, why } - if src.IsInterface() && dst.Etype != types.TBLANK { + if src.IsInterface() && dst.Kind() != types.TBLANK { var missing, have *types.Field var ptr int var why string @@ -323,14 +323,14 @@ func assignop(src, dst *types.Type) (ir.Op, string) { // src and dst have identical element types, and // either src or dst is not a named type. if src.IsChan() && src.ChanDir() == types.Cboth && dst.IsChan() { - if types.Identical(src.Elem(), dst.Elem()) && (src.Sym == nil || dst.Sym == nil) { + if types.Identical(src.Elem(), dst.Elem()) && (src.Sym() == nil || dst.Sym() == nil) { return ir.OCONVNOP, "" } } // 5. src is the predeclared identifier nil and dst is a nillable type. - if src.Etype == types.TNIL { - switch dst.Etype { + if src.Kind() == types.TNIL { + switch dst.Kind() { case types.TPTR, types.TFUNC, types.TMAP, @@ -344,7 +344,7 @@ func assignop(src, dst *types.Type) (ir.Op, string) { // 6. rule about untyped constants - already converted by defaultlit. // 7. Any typed value can be assigned to the blank identifier. - if dst.Etype == types.TBLANK { + if dst.Kind() == types.TBLANK { return ir.OCONVNOP, "" } @@ -373,7 +373,7 @@ func convertop(srcConstant bool, src, dst *types.Type) (ir.Op, string) { return ir.OXXX, why } // (b) Disallow string to []T where T is go:notinheap. - if src.IsString() && dst.IsSlice() && dst.Elem().NotInHeap() && (dst.Elem().Etype == types.Bytetype.Etype || dst.Elem().Etype == types.Runetype.Etype) { + if src.IsString() && dst.IsSlice() && dst.Elem().NotInHeap() && (dst.Elem().Kind() == types.ByteType.Kind() || dst.Elem().Kind() == types.RuneType.Kind()) { why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable)", dst.Elem()) return ir.OXXX, why } @@ -393,21 +393,21 @@ func convertop(srcConstant bool, src, dst *types.Type) (ir.Op, string) { } // 2. Ignoring struct tags, src and dst have identical underlying types. - if types.IdenticalIgnoreTags(src.Orig, dst.Orig) { + if types.IdenticalIgnoreTags(src.Underlying(), dst.Underlying()) { return ir.OCONVNOP, "" } // 3. src and dst are unnamed pointer types and, ignoring struct tags, // their base types have identical underlying types. - if src.IsPtr() && dst.IsPtr() && src.Sym == nil && dst.Sym == nil { - if types.IdenticalIgnoreTags(src.Elem().Orig, dst.Elem().Orig) { + if src.IsPtr() && dst.IsPtr() && src.Sym() == nil && dst.Sym() == nil { + if types.IdenticalIgnoreTags(src.Elem().Underlying(), dst.Elem().Underlying()) { return ir.OCONVNOP, "" } } // 4. src and dst are both integer or floating point types. if (src.IsInteger() || src.IsFloat()) && (dst.IsInteger() || dst.IsFloat()) { - if simtype[src.Etype] == simtype[dst.Etype] { + if simtype[src.Kind()] == simtype[dst.Kind()] { return ir.OCONVNOP, "" } return ir.OCONV, "" @@ -415,7 +415,7 @@ func convertop(srcConstant bool, src, dst *types.Type) (ir.Op, string) { // 5. src and dst are both complex types. if src.IsComplex() && dst.IsComplex() { - if simtype[src.Etype] == simtype[dst.Etype] { + if simtype[src.Kind()] == simtype[dst.Kind()] { return ir.OCONVNOP, "" } return ir.OCONV, "" @@ -435,10 +435,10 @@ func convertop(srcConstant bool, src, dst *types.Type) (ir.Op, string) { } if src.IsSlice() && dst.IsString() { - if src.Elem().Etype == types.Bytetype.Etype { + if src.Elem().Kind() == types.ByteType.Kind() { return ir.OBYTES2STR, "" } - if src.Elem().Etype == types.Runetype.Etype { + if src.Elem().Kind() == types.RuneType.Kind() { return ir.ORUNES2STR, "" } } @@ -446,10 +446,10 @@ func convertop(srcConstant bool, src, dst *types.Type) (ir.Op, string) { // 7. src is a string and dst is []byte or []rune. // String to slice. if src.IsString() && dst.IsSlice() { - if dst.Elem().Etype == types.Bytetype.Etype { + if dst.Elem().Kind() == types.ByteType.Kind() { return ir.OSTR2BYTES, "" } - if dst.Elem().Etype == types.Runetype.Etype { + if dst.Elem().Kind() == types.RuneType.Kind() { return ir.OSTR2RUNES, "" } } @@ -467,7 +467,7 @@ func convertop(srcConstant bool, src, dst *types.Type) (ir.Op, string) { // src is map and dst is a pointer to corresponding hmap. // This rule is needed for the implementation detail that // go gc maps are implemented as a pointer to a hmap struct. - if src.Etype == types.TMAP && dst.IsPtr() && + if src.Kind() == types.TMAP && dst.IsPtr() && src.MapType().Hmap == dst.Elem() { return ir.OCONVNOP, "" } @@ -485,7 +485,7 @@ func assignconvfn(n ir.Node, t *types.Type, context func() string) ir.Node { return n } - if t.Etype == types.TBLANK && n.Type().Etype == types.TNIL { + if t.Kind() == types.TBLANK && n.Type().Kind() == types.TNIL { base.Errorf("use of untyped nil") } @@ -493,7 +493,7 @@ func assignconvfn(n ir.Node, t *types.Type, context func() string) ir.Node { if n.Type() == nil { return n } - if t.Etype == types.TBLANK { + if t.Kind() == types.TBLANK { return n } @@ -600,15 +600,15 @@ func calcHasCall(n ir.Node) bool { // When using soft-float, these ops might be rewritten to function calls // so we ensure they are evaluated first. case ir.OADD, ir.OSUB, ir.ONEG, ir.OMUL: - if thearch.SoftFloat && (isFloat[n.Type().Etype] || isComplex[n.Type().Etype]) { + if thearch.SoftFloat && (isFloat[n.Type().Kind()] || isComplex[n.Type().Kind()]) { return true } case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT: - if thearch.SoftFloat && (isFloat[n.Left().Type().Etype] || isComplex[n.Left().Type().Etype]) { + if thearch.SoftFloat && (isFloat[n.Left().Type().Kind()] || isComplex[n.Left().Type().Kind()]) { return true } case ir.OCONV: - if thearch.SoftFloat && ((isFloat[n.Type().Etype] || isComplex[n.Type().Etype]) || (isFloat[n.Left().Type().Etype] || isComplex[n.Left().Type().Etype])) { + if thearch.SoftFloat && ((isFloat[n.Type().Kind()] || isComplex[n.Type().Kind()]) || (isFloat[n.Left().Type().Kind()] || isComplex[n.Left().Type().Kind()])) { return true } } @@ -802,7 +802,7 @@ func lookdot0(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) } u = t - if t.Sym != nil && t.IsPtr() && !t.Elem().IsPtr() { + if t.Sym() != nil && t.IsPtr() && !t.Elem().IsPtr() { // If t is a defined pointer type, then x.m is shorthand for (*x).m. u = t.Elem() } @@ -1110,13 +1110,13 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { // Only generate (*T).M wrappers for T.M in T's own package. if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && - rcvr.Elem().Sym != nil && rcvr.Elem().Sym.Pkg != ir.LocalPkg { + rcvr.Elem().Sym() != nil && rcvr.Elem().Sym().Pkg != ir.LocalPkg { return } // Only generate I.M wrappers for I in I's own package // but keep doing it for error.Error (was issue #29304). - if rcvr.IsInterface() && rcvr.Sym != nil && rcvr.Sym.Pkg != ir.LocalPkg && rcvr != types.Errortype { + if rcvr.IsInterface() && rcvr.Sym() != nil && rcvr.Sym().Pkg != ir.LocalPkg && rcvr != types.ErrorType { return } @@ -1193,7 +1193,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { // Inline calls within (*T).M wrappers. This is safe because we only // generate those wrappers within the same compilation unit as (T).M. // TODO(mdempsky): Investigate why we can't enable this more generally. - if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym != nil { + if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym() != nil { inlcalls(fn) } escapeFuncs([]*ir.Func{fn}, false) @@ -1433,7 +1433,7 @@ func isdirectiface(t *types.Type) bool { return false } - switch t.Etype { + switch t.Kind() { case types.TPTR: // Pointers to notinheap types must be stored indirectly. See issue 42076. return !t.Elem().NotInHeap() diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go index 02d38ac4b1693..30179e1dd63ef 100644 --- a/src/cmd/compile/internal/gc/swt.go +++ b/src/cmd/compile/internal/gc/swt.go @@ -157,7 +157,7 @@ func typecheckExprSwitch(n ir.Node) { switch { case t.IsMap(): nilonly = "map" - case t.Etype == types.TFUNC: + case t.Kind() == types.TFUNC: nilonly = "func" case t.IsSlice(): nilonly = "slice" @@ -332,7 +332,7 @@ type exprClause struct { func (s *exprSwitch) Add(pos src.XPos, expr, jmp ir.Node) { c := exprClause{pos: pos, lo: expr, hi: expr, jmp: jmp} - if okforcmp[s.exprname.Type().Etype] && expr.Op() == ir.OLITERAL { + if okforcmp[s.exprname.Type().Kind()] && expr.Op() == ir.OLITERAL { s.clauses = append(s.clauses, c) return } diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index dccb5ecdce8fa..f120b4441323f 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -156,7 +156,7 @@ func typekind(t *types.Type) string { if t.IsUntyped() { return fmt.Sprintf("%v", t) } - et := t.Etype + et := t.Kind() if int(et) < len(_typekind) { s := _typekind[et] if s != "" { @@ -329,7 +329,7 @@ func typecheck(n ir.Node, top int) (res ir.Node) { // The result of indexlit MUST be assigned back to n, e.g. // n.Left = indexlit(n.Left) func indexlit(n ir.Node) ir.Node { - if n != nil && n.Type() != nil && n.Type().Etype == types.TIDEAL { + if n != nil && n.Type() != nil && n.Type().Kind() == types.TIDEAL { return defaultlit(n, types.Types[types.TINT]) } return n @@ -583,7 +583,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetType(nil) return n } - if n.Implicit() && !okforarith[l.Type().Etype] { + if n.Implicit() && !okforarith[l.Type().Kind()] { base.Errorf("invalid operation: %v (non-numeric type %v)", n, l.Type()) n.SetType(nil) return n @@ -617,7 +617,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } t = l.Type() - if t != nil && t.Etype != types.TIDEAL && !t.IsInteger() { + if t != nil && t.Kind() != types.TIDEAL && !t.IsInteger() { base.Errorf("invalid operation: %v (shift of type %v)", n, t) n.SetType(nil) return n @@ -659,15 +659,15 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } t := l.Type() - if t.Etype == types.TIDEAL { + if t.Kind() == types.TIDEAL { t = r.Type() } - et := t.Etype + et := t.Kind() if et == types.TIDEAL { et = types.TINT } aop := ir.OXXX - if iscmp[n.Op()] && t.Etype != types.TIDEAL && !types.Identical(l.Type(), r.Type()) { + if iscmp[n.Op()] && t.Kind() != types.TIDEAL && !types.Identical(l.Type(), r.Type()) { // comparison is okay as long as one side is // assignable to the other. convert so they have // the same type. @@ -676,7 +676,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { // in that case, check comparability of the concrete type. // The conversion allocates, so only do it if the concrete type is huge. converted := false - if r.Type().Etype != types.TBLANK { + if r.Type().Kind() != types.TBLANK { aop, _ = assignop(l.Type(), r.Type()) if aop != ir.OXXX { if r.Type().IsInterface() && !l.Type().IsInterface() && !IsComparable(l.Type()) { @@ -698,7 +698,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } } - if !converted && l.Type().Etype != types.TBLANK { + if !converted && l.Type().Kind() != types.TBLANK { aop, _ = assignop(r.Type(), l.Type()) if aop != ir.OXXX { if l.Type().IsInterface() && !r.Type().IsInterface() && !IsComparable(r.Type()) { @@ -719,10 +719,10 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } } - et = t.Etype + et = t.Kind() } - if t.Etype != types.TIDEAL && !types.Identical(l.Type(), r.Type()) { + if t.Kind() != types.TIDEAL && !types.Identical(l.Type(), r.Type()) { l, r = defaultlit2(l, r, true) if l.Type() == nil || r.Type() == nil { n.SetType(nil) @@ -735,10 +735,10 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } } - if t.Etype == types.TIDEAL { + if t.Kind() == types.TIDEAL { t = mixUntyped(l.Type(), r.Type()) } - if dt := defaultType(t); !okfor[op][dt.Etype] { + if dt := defaultType(t); !okfor[op][dt.Kind()] { base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(t)) n.SetType(nil) return n @@ -764,7 +764,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } - if l.Type().Etype == types.TFUNC && !ir.IsNil(l) && !ir.IsNil(r) { + if l.Type().Kind() == types.TFUNC && !ir.IsNil(l) && !ir.IsNil(r) { base.Errorf("invalid operation: %v (func can only be compared to nil)", n) n.SetType(nil) return n @@ -825,7 +825,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetType(nil) return n } - if !okfor[n.Op()][defaultType(t).Etype] { + if !okfor[n.Op()][defaultType(t).Kind()] { base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(t)) n.SetType(nil) return n @@ -1023,7 +1023,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetType(nil) return n } - switch t.Etype { + switch t.Kind() { default: base.Errorf("invalid operation: %v (type %v does not support indexing)", n, t) n.SetType(nil) @@ -1032,7 +1032,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case types.TSTRING, types.TARRAY, types.TSLICE: n.SetRight(indexlit(n.Right())) if t.IsString() { - n.SetType(types.Bytetype) + n.SetType(types.ByteType) } else { n.SetType(t.Elem()) } @@ -1191,7 +1191,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetLeft(defaultlit(n.Left(), types.Types[types.TINT])) - if !n.Left().Type().IsInteger() && n.Type().Etype != types.TIDEAL { + if !n.Left().Type().IsInteger() && n.Type().Kind() != types.TIDEAL { base.Errorf("non-integer len argument in OMAKESLICECOPY") } @@ -1383,7 +1383,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { default: n.SetOp(ir.OCALLFUNC) - if t.Etype != types.TFUNC { + if t.Kind() != types.TFUNC { name := l.String() if isBuiltinFuncName(name) && l.Name().Defn != nil { // be more specific when the function @@ -1446,9 +1446,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { var ok bool if n.Op() == ir.OLEN { - ok = okforlen[t.Etype] + ok = okforlen[t.Kind()] } else { - ok = okforcap[t.Etype] + ok = okforcap[t.Kind()] } if !ok { base.Errorf("invalid argument %L for %v", l, n.Op()) @@ -1469,7 +1469,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } // Determine result type. - switch t.Etype { + switch t.Kind() { case types.TIDEAL: n.SetType(types.UntypedFloat) case types.TCOMPLEX64: @@ -1505,7 +1505,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } var t *types.Type - switch l.Type().Etype { + switch l.Type().Kind() { default: base.Errorf("invalid operation: %v (arguments have type %v, expected floating-point)", n, l.Type()) n.SetType(nil) @@ -1624,7 +1624,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { break } - args.SetSecond(assignconv(args.Second(), t.Orig, "append")) + args.SetSecond(assignconv(args.Second(), t.Underlying(), "append")) break } @@ -1651,7 +1651,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { // copy([]byte, string) if n.Left().Type().IsSlice() && n.Right().Type().IsString() { - if types.Identical(n.Left().Type().Elem(), types.Bytetype) { + if types.Identical(n.Left().Type().Elem(), types.ByteType) { break } base.Errorf("arguments to copy have different element types: %L and string", n.Left().Type()) @@ -1701,8 +1701,8 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetOp(op) switch n.Op() { case ir.OCONVNOP: - if t.Etype == n.Type().Etype { - switch t.Etype { + if t.Kind() == n.Type().Kind() { + switch t.Kind() { case types.TFLOAT32, types.TFLOAT64, types.TCOMPLEX64, types.TCOMPLEX128: // Floating point casts imply rounding and // so the conversion must be kept. @@ -1741,7 +1741,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { i := 1 var nn ir.Node - switch t.Etype { + switch t.Kind() { default: base.Errorf("cannot make type %v", t) n.SetType(nil) @@ -2062,7 +2062,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { t := n.Type() if t != nil && !t.IsFuncArgStruct() && n.Op() != ir.OTYPE { - switch t.Etype { + switch t.Kind() { case types.TFUNC, // might have TANY; wait until it's called types.TANY, types.TFORW, types.TIDEAL, types.TNIL, types.TBLANK: break @@ -2352,7 +2352,7 @@ func typecheckMethodExpr(n ir.Node) (res ir.Node) { // types declared at package scope. However, we need // to make sure to generate wrappers for anonymous // receiver types too. - if mt.Sym == nil { + if mt.Sym() == nil { addsignat(t) } } @@ -2385,7 +2385,7 @@ func typecheckMethodExpr(n ir.Node) (res ir.Node) { me.SetOpt(m) // Issue 25065. Make sure that we emit the symbol for a local method. - if base.Ctxt.Flag_dynlink && !inimport && (t.Sym == nil || t.Sym.Pkg == ir.LocalPkg) { + if base.Ctxt.Flag_dynlink && !inimport && (t.Sym() == nil || t.Sym().Pkg == ir.LocalPkg) { makefuncsym(me.Sym()) } @@ -2417,7 +2417,7 @@ func lookdot(n ir.Node, t *types.Type, dostrcmp int) *types.Field { } var f2 *types.Field - if n.Left().Type() == t || n.Left().Type().Sym == nil { + if n.Left().Type() == t || n.Left().Type().Sym() == nil { mt := methtype(t) if mt != nil { f2 = lookdot1(n, s, mt, mt.Methods(), dostrcmp) @@ -2493,7 +2493,7 @@ func lookdot(n ir.Node, t *types.Type, dostrcmp int) *types.Field { pll = ll ll = ll.Left() } - if pll.Implicit() && ll.Type().IsPtr() && ll.Type().Sym != nil && ir.AsNode(ll.Type().Sym.Def) != nil && ir.AsNode(ll.Type().Sym.Def).Op() == ir.OTYPE { + if pll.Implicit() && ll.Type().IsPtr() && ll.Type().Sym() != nil && ir.AsNode(ll.Type().Sym().Def) != nil && ir.AsNode(ll.Type().Sym().Def).Op() == ir.OTYPE { // It is invalid to automatically dereference a named pointer type when selecting a method. // Make n.Left == ll to clarify error message. n.SetLeft(ll) @@ -2681,7 +2681,7 @@ func sigrepr(t *types.Type, isddd bool) string { return "bool" } - if t.Etype == types.TIDEAL { + if t.Kind() == types.TIDEAL { // "untyped number" is not commonly used // outside of the compiler, so let's use "number". // TODO(mdempsky): Revisit this. @@ -2724,7 +2724,7 @@ func fielddup(name string, hash map[string]bool) { // iscomptype reports whether type t is a composite literal type. func iscomptype(t *types.Type) bool { - switch t.Etype { + switch t.Kind() { case types.TARRAY, types.TSLICE, types.TSTRUCT, types.TMAP: return true default: @@ -2801,7 +2801,7 @@ func typecheckcomplit(n ir.Node) (res ir.Node) { } n.SetType(t) - switch t.Etype { + switch t.Kind() { default: base.Errorf("invalid composite literal type %v", t) n.SetType(nil) @@ -3154,7 +3154,7 @@ func samesafeexpr(l ir.Node, r ir.Node) bool { case ir.OCONV: // Some conversions can't be reused, such as []byte(str). // Allow only numeric-ish types. This is a bit conservative. - return issimple[l.Type().Etype] && samesafeexpr(l.Left(), r.Left()) + return issimple[l.Type().Kind()] && samesafeexpr(l.Left(), r.Left()) case ir.OINDEX, ir.OINDEXMAP, ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD: @@ -3451,7 +3451,7 @@ func typecheckdeftype(n *ir.Name) { n.SetDiag(true) n.SetType(nil) } - if t.Etype == types.TFORW && base.Errors() > errorsBefore { + if t.Kind() == types.TFORW && base.Errors() > errorsBefore { // Something went wrong during type-checking, // but it was reported. Silence future errors. t.SetBroke(true) @@ -3541,7 +3541,7 @@ func typecheckdef(n ir.Node) { t := n.Type() if t != nil { - if !ir.OKForConst[t.Etype] { + if !ir.OKForConst[t.Kind()] { base.ErrorfAt(n.Pos(), "invalid constant type %v", t) goto ret } @@ -3638,7 +3638,7 @@ ret: func checkmake(t *types.Type, arg string, np *ir.Node) bool { n := *np - if !n.Type().IsInteger() && n.Type().Etype != types.TIDEAL { + if !n.Type().IsInteger() && n.Type().Kind() != types.TIDEAL { base.Errorf("non-integer %s argument in make(%v) - %v", arg, t, n.Type()) return false } diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index b1492659b458b..49e50734c6a66 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -15,7 +15,7 @@ import ( var basicTypes = [...]struct { name string - etype types.EType + etype types.Kind }{ {"int8", types.TINT8}, {"int16", types.TINT16}, @@ -35,9 +35,9 @@ var basicTypes = [...]struct { var typedefs = [...]struct { name string - etype types.EType - sameas32 types.EType - sameas64 types.EType + etype types.Kind + sameas32 types.Kind + sameas64 types.Kind }{ {"int", types.TINT, types.TINT32, types.TINT64}, {"uint", types.TUINT, types.TUINT32, types.TUINT64}, @@ -99,14 +99,14 @@ func initUniverse() { // string is same as slice wo the cap sizeofString = Rnd(sliceLenOffset+int64(Widthptr), int64(Widthptr)) - for et := types.EType(0); et < types.NTYPE; et++ { + for et := types.Kind(0); et < types.NTYPE; et++ { simtype[et] = et } types.Types[types.TANY] = types.New(types.TANY) types.Types[types.TINTER] = types.New(types.TINTER) // empty interface - defBasic := func(kind types.EType, pkg *types.Pkg, name string) *types.Type { + defBasic := func(kind types.Kind, pkg *types.Pkg, name string) *types.Type { sym := pkg.Lookup(name) n := ir.NewNameAt(src.NoXPos, sym) n.SetOp(ir.OTYPE) @@ -140,18 +140,18 @@ func initUniverse() { // of less informative error messages involving bytes and runes)? // (Alternatively, we could introduce an OTALIAS node representing // type aliases, albeit at the cost of having to deal with it everywhere). - types.Bytetype = defBasic(types.TUINT8, ir.BuiltinPkg, "byte") - types.Runetype = defBasic(types.TINT32, ir.BuiltinPkg, "rune") + types.ByteType = defBasic(types.TUINT8, ir.BuiltinPkg, "byte") + types.RuneType = defBasic(types.TINT32, ir.BuiltinPkg, "rune") // error type s := ir.BuiltinPkg.Lookup("error") n := ir.NewNameAt(src.NoXPos, s) n.SetOp(ir.OTYPE) - types.Errortype = types.NewNamed(n) - types.Errortype.SetUnderlying(makeErrorInterface()) - n.SetType(types.Errortype) + types.ErrorType = types.NewNamed(n) + types.ErrorType.SetUnderlying(makeErrorInterface()) + n.SetType(types.ErrorType) s.Def = n - dowidth(types.Errortype) + dowidth(types.ErrorType) types.Types[types.TUNSAFEPTR] = defBasic(types.TUNSAFEPTR, unsafepkg, "Pointer") @@ -218,7 +218,7 @@ func initUniverse() { isComplex[types.TCOMPLEX128] = true // initialize okfor - for et := types.EType(0); et < types.NTYPE; et++ { + for et := types.Kind(0); et < types.NTYPE; et++ { if isInt[et] || et == types.TIDEAL { okforeq[et] = true okforcmp[et] = true diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 511cdd3685e4d..b3af353c3fbce 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -423,7 +423,7 @@ func walkexpr(n ir.Node, init *ir.Nodes) ir.Node { // Eagerly checkwidth all expressions for the back end. if n.Type() != nil && !n.Type().WidthCalculated() { - switch n.Type().Etype { + switch n.Type().Kind() { case types.TBLANK, types.TNIL, types.TIDEAL: default: checkwidth(n.Type()) @@ -975,7 +975,7 @@ opswitch: n.SetRight(walkexpr(n.Right(), init)) // rewrite complex div into function call. - et := n.Left().Type().Etype + et := n.Left().Type().Kind() if isComplex[et] && n.Op() == ir.ODIV { t := n.Type() @@ -1638,7 +1638,7 @@ func markUsedIfaceMethod(n ir.Node) { // name can be derived from the names of the returned types. // // If no such function is necessary, it returns (Txxx, Txxx). -func rtconvfn(src, dst *types.Type) (param, result types.EType) { +func rtconvfn(src, dst *types.Type) (param, result types.Kind) { if thearch.SoftFloat { return types.Txxx, types.Txxx } @@ -1646,31 +1646,31 @@ func rtconvfn(src, dst *types.Type) (param, result types.EType) { switch thearch.LinkArch.Family { case sys.ARM, sys.MIPS: if src.IsFloat() { - switch dst.Etype { + switch dst.Kind() { case types.TINT64, types.TUINT64: - return types.TFLOAT64, dst.Etype + return types.TFLOAT64, dst.Kind() } } if dst.IsFloat() { - switch src.Etype { + switch src.Kind() { case types.TINT64, types.TUINT64: - return src.Etype, types.TFLOAT64 + return src.Kind(), types.TFLOAT64 } } case sys.I386: if src.IsFloat() { - switch dst.Etype { + switch dst.Kind() { case types.TINT64, types.TUINT64: - return types.TFLOAT64, dst.Etype + return types.TFLOAT64, dst.Kind() case types.TUINT32, types.TUINT, types.TUINTPTR: return types.TFLOAT64, types.TUINT32 } } if dst.IsFloat() { - switch src.Etype { + switch src.Kind() { case types.TINT64, types.TUINT64: - return src.Etype, types.TFLOAT64 + return src.Kind(), types.TFLOAT64 case types.TUINT32, types.TUINT, types.TUINTPTR: return types.TUINT32, types.TFLOAT64 } @@ -1937,7 +1937,7 @@ func walkprint(nn ir.Node, init *ir.Nodes) ir.Node { for i, n := range nn.List().Slice() { if n.Op() == ir.OLITERAL { if n.Type() == types.UntypedRune { - n = defaultlit(n, types.Runetype) + n = defaultlit(n, types.RuneType) } switch n.Val().Kind() { @@ -1949,17 +1949,17 @@ func walkprint(nn ir.Node, init *ir.Nodes) ir.Node { } } - if n.Op() != ir.OLITERAL && n.Type() != nil && n.Type().Etype == types.TIDEAL { + if n.Op() != ir.OLITERAL && n.Type() != nil && n.Type().Kind() == types.TIDEAL { n = defaultlit(n, types.Types[types.TINT64]) } n = defaultlit(n, nil) nn.List().SetIndex(i, n) - if n.Type() == nil || n.Type().Etype == types.TFORW { + if n.Type() == nil || n.Type().Kind() == types.TFORW { continue } var on ir.Node - switch n.Type().Etype { + switch n.Type().Kind() { case types.TINTER: if n.Type().IsEmptyInterface() { on = syslook("printeface") @@ -1984,7 +1984,7 @@ func walkprint(nn ir.Node, init *ir.Nodes) ir.Node { on = syslook("printslice") on = substArgTypes(on, n.Type()) // any-1 case types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR: - if isRuntimePkg(n.Type().Sym.Pkg) && n.Type().Sym.Name == "hex" { + if isRuntimePkg(n.Type().Sym().Pkg) && n.Type().Sym().Name == "hex" { on = syslook("printhex") } else { on = syslook("printuint") @@ -2058,9 +2058,9 @@ func isReflectHeaderDataField(l ir.Node) bool { var tsym *types.Sym switch l.Op() { case ir.ODOT: - tsym = l.Left().Type().Sym + tsym = l.Left().Type().Sym() case ir.ODOTPTR: - tsym = l.Left().Type().Elem().Sym + tsym = l.Left().Type().Elem().Sym() default: return false } @@ -2484,7 +2484,7 @@ func heapmoves() { } func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) ir.Node { - if fn.Type() == nil || fn.Type().Etype != types.TFUNC { + if fn.Type() == nil || fn.Type().Kind() != types.TFUNC { base.Fatalf("mkcall %v %v", fn, fn.Type()) } @@ -3264,7 +3264,7 @@ func walkcompare(n ir.Node, init *ir.Nodes) ir.Node { maxcmpsize = 2 * int64(thearch.LinkArch.RegSize) } - switch t.Etype { + switch t.Kind() { default: if base.Debug.Libfuzzer != 0 && t.IsInteger() { n.SetLeft(cheapexpr(n.Left(), init)) @@ -3315,7 +3315,7 @@ func walkcompare(n ir.Node, init *ir.Nodes) ir.Node { return n case types.TARRAY: // We can compare several elements at once with 2/4/8 byte integer compares - inline = t.NumElem() <= 1 || (issimple[t.Elem().Etype] && (t.NumElem() <= 4 || t.Elem().Width*t.NumElem() <= maxcmpsize)) + inline = t.NumElem() <= 1 || (issimple[t.Elem().Kind()] && (t.NumElem() <= 4 || t.Elem().Width*t.NumElem() <= maxcmpsize)) case types.TSTRUCT: inline = t.NumComponents(types.IgnoreBlankFields) <= 4 } @@ -3697,7 +3697,7 @@ func usemethod(n ir.Node) { } if res1 == nil { - if p0.Type.Etype != types.TINT { + if p0.Type.Kind() != types.TINT { return } } else { @@ -3712,7 +3712,7 @@ func usemethod(n ir.Node) { // Note: Don't rely on res0.Type.String() since its formatting depends on multiple factors // (including global variables such as numImports - was issue #19028). // Also need to check for reflect package itself (see Issue #38515). - if s := res0.Type.Sym; s != nil && s.Name == "Method" && isReflectPkg(s.Pkg) { + if s := res0.Type.Sym(); s != nil && s.Name == "Method" && isReflectPkg(s.Pkg) { Curfn.SetReflectMethod(true) // The LSym is initialized at this point. We need to set the attribute on the LSym. Curfn.LSym.Set(obj.AttrReflectMethod, true) @@ -3756,7 +3756,7 @@ func usefield(n ir.Node) { if outer.IsPtr() { outer = outer.Elem() } - if outer.Sym == nil { + if outer.Sym() == nil { base.Errorf("tracked field must be in named struct type") } if !types.IsExported(field.Sym.Name) { diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index a1114712222c8..5bb1ed857c0ba 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -317,7 +317,7 @@ func (m FmtMode) prepareArgs(args []interface{}) { args[i] = &fmtSym{arg, m} case Nodes: args[i] = &fmtNodes{arg, m} - case int32, int64, string, types.EType, constant.Value: + case int32, int64, string, types.Kind, constant.Value: // OK: printing these types doesn't depend on mode default: base.Fatalf("mode.prepareArgs type %T", arg) @@ -590,18 +590,18 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode FmtMode, visited b.WriteString("") return } - if t.Etype == types.TSSA { + if t.Kind() == types.TSSA { b.WriteString(t.Extra.(string)) return } - if t.Etype == types.TTUPLE { + if t.Kind() == types.TTUPLE { b.WriteString(t.FieldType(0).String()) b.WriteByte(',') b.WriteString(t.FieldType(1).String()) return } - if t.Etype == types.TRESULTS { + if t.Kind() == types.TRESULTS { tys := t.Extra.(*types.Results).Types for i, et := range tys { if i > 0 { @@ -616,51 +616,51 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode FmtMode, visited if mode == FTypeIdName { flag |= FmtUnsigned } - if t == types.Bytetype || t == types.Runetype { + if t == types.ByteType || t == types.RuneType { // in %-T mode collapse rune and byte with their originals. switch mode { case FTypeIdName, FTypeId: - t = types.Types[t.Etype] + t = types.Types[t.Kind()] default: - sconv2(b, t.Sym, FmtShort, mode) + sconv2(b, t.Sym(), FmtShort, mode) return } } - if t == types.Errortype { + if t == types.ErrorType { b.WriteString("error") return } // Unless the 'L' flag was specified, if the type has a name, just print that name. - if flag&FmtLong == 0 && t.Sym != nil && t != types.Types[t.Etype] { + if flag&FmtLong == 0 && t.Sym() != nil && t != types.Types[t.Kind()] { switch mode { case FTypeId, FTypeIdName: if flag&FmtShort != 0 { if t.Vargen != 0 { - sconv2(b, t.Sym, FmtShort, mode) + sconv2(b, t.Sym(), FmtShort, mode) fmt.Fprintf(b, "·%d", t.Vargen) return } - sconv2(b, t.Sym, FmtShort, mode) + sconv2(b, t.Sym(), FmtShort, mode) return } if mode == FTypeIdName { - sconv2(b, t.Sym, FmtUnsigned, mode) + sconv2(b, t.Sym(), FmtUnsigned, mode) return } - if t.Sym.Pkg == LocalPkg && t.Vargen != 0 { - b.WriteString(mode.Sprintf("%v·%d", t.Sym, t.Vargen)) + if t.Sym().Pkg == LocalPkg && t.Vargen != 0 { + b.WriteString(mode.Sprintf("%v·%d", t.Sym(), t.Vargen)) return } } - sconv2(b, t.Sym, 0, mode) + sconv2(b, t.Sym(), 0, mode) return } - if int(t.Etype) < len(BasicTypeNames) && BasicTypeNames[t.Etype] != "" { + if int(t.Kind()) < len(BasicTypeNames) && BasicTypeNames[t.Kind()] != "" { var name string switch t { case types.UntypedBool: @@ -676,14 +676,14 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode FmtMode, visited case types.UntypedComplex: name = "untyped complex" default: - name = BasicTypeNames[t.Etype] + name = BasicTypeNames[t.Kind()] } b.WriteString(name) return } if mode == FDbg { - b.WriteString(t.Etype.String()) + b.WriteString(t.Kind().String()) b.WriteByte('-') tconv2(b, t, flag, FErr, visited) return @@ -702,7 +702,7 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode FmtMode, visited visited[t] = b.Len() defer delete(visited, t) - switch t.Etype { + switch t.Kind() { case types.TPTR: b.WriteByte('*') switch mode { @@ -734,7 +734,7 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode FmtMode, visited tconv2(b, t.Elem(), 0, mode, visited) default: b.WriteString("chan ") - if t.Elem() != nil && t.Elem().IsChan() && t.Elem().Sym == nil && t.Elem().ChanDir() == types.Crecv { + if t.Elem() != nil && t.Elem().IsChan() && t.Elem().Sym() == nil && t.Elem().ChanDir() == types.Crecv { b.WriteByte('(') tconv2(b, t.Elem(), 0, mode, visited) b.WriteByte(')') @@ -860,9 +860,9 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode FmtMode, visited case types.TFORW: b.WriteString("undefined") - if t.Sym != nil { + if t.Sym() != nil { b.WriteByte(' ') - sconv2(b, t.Sym, 0, mode) + sconv2(b, t.Sym(), 0, mode) } case types.TUNSAFEPTR: @@ -872,7 +872,7 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode FmtMode, visited b.WriteString("Txxx") default: // Don't know how to handle - fall back to detailed prints. - b.WriteString(mode.Sprintf("%v <%v>", t.Etype, t.Sym)) + b.WriteString(mode.Sprintf("%v <%v>", t.Kind(), t.Sym())) } } @@ -1446,7 +1446,7 @@ func exprFmt(n Node, s fmt.State, prec int, mode FmtMode) { OSTR2BYTES, OSTR2RUNES, ORUNESTR: - if n.Type() == nil || n.Type().Sym == nil { + if n.Type() == nil || n.Type().Sym() == nil { mode.Fprintf(s, "(%v)", n.Type()) } else { mode.Fprintf(s, "%v", n.Type()) @@ -1564,7 +1564,7 @@ func nodeFmt(n Node, s fmt.State, flag FmtFlag, mode FmtMode) { } if flag&FmtLong != 0 && t != nil { - if t.Etype == types.TNIL { + if t.Kind() == types.TNIL { fmt.Fprint(s, "nil") } else if n.Op() == ONAME && n.Name().AutoTemp() { mode.Fprintf(s, "%v value", t) diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index a7144eee44552..fc4c5939296ca 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -647,7 +647,7 @@ const ( GoBuildPragma ) -func AsNode(n types.IRNode) Node { +func AsNode(n types.Object) Node { if n == nil { return nil } diff --git a/src/cmd/compile/internal/ir/type.go b/src/cmd/compile/internal/ir/type.go index 446145b24c99a..d2f5bb9239949 100644 --- a/src/cmd/compile/internal/ir/type.go +++ b/src/cmd/compile/internal/ir/type.go @@ -353,7 +353,7 @@ func (n *typeNode) String() string { return fmt.Sprint(n) } func (n *typeNode) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *typeNode) rawCopy() Node { c := *n; return &c } func (n *typeNode) Type() *types.Type { return n.typ } -func (n *typeNode) Sym() *types.Sym { return n.typ.Sym } +func (n *typeNode) Sym() *types.Sym { return n.typ.Sym() } func (n *typeNode) CanBeNtype() {} // TypeNode returns the Node representing the type t. diff --git a/src/cmd/compile/internal/ir/val.go b/src/cmd/compile/internal/ir/val.go index 9035e90084712..aae965bb4c977 100644 --- a/src/cmd/compile/internal/ir/val.go +++ b/src/cmd/compile/internal/ir/val.go @@ -73,7 +73,7 @@ func AssertValidTypeForConst(t *types.Type, v constant.Value) { func ValidTypeForConst(t *types.Type, v constant.Value) bool { switch v.Kind() { case constant.Unknown: - return OKForConst[t.Etype] + return OKForConst[t.Kind()] case constant.Bool: return t.IsBoolean() case constant.String: diff --git a/src/cmd/compile/internal/ssa/expand_calls.go b/src/cmd/compile/internal/ssa/expand_calls.go index f266e49327eee..0eba238d81aee 100644 --- a/src/cmd/compile/internal/ssa/expand_calls.go +++ b/src/cmd/compile/internal/ssa/expand_calls.go @@ -69,7 +69,7 @@ func expandCalls(f *Func) { // intPairTypes returns the pair of 32-bit int types needed to encode a 64-bit integer type on a target // that has no 64-bit integer registers. - intPairTypes := func(et types.EType) (tHi, tLo *types.Type) { + intPairTypes := func(et types.Kind) (tHi, tLo *types.Type) { tHi = typ.UInt32 if et == types.TINT64 { tHi = typ.Int32 @@ -294,7 +294,7 @@ func expandCalls(f *Func) { case OpStructSelect: w := selector.Args[0] var ls []LocalSlot - if w.Type.Etype != types.TSTRUCT { // IData artifact + if w.Type.Kind() != types.TSTRUCT { // IData artifact ls = rewriteSelect(leaf, w, offset) } else { ls = rewriteSelect(leaf, w, offset+w.Type.FieldOff(int(selector.AuxInt))) @@ -383,7 +383,7 @@ func expandCalls(f *Func) { decomposeOne func(pos src.XPos, b *Block, base, source, mem *Value, t1 *types.Type, offArg, offStore int64) *Value, decomposeTwo func(pos src.XPos, b *Block, base, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64) *Value) *Value { u := source.Type - switch u.Etype { + switch u.Kind() { case types.TARRAY: elem := u.Elem() for i := int64(0); i < u.NumElem(); i++ { @@ -403,7 +403,7 @@ func expandCalls(f *Func) { if t.Width == regSize { break } - tHi, tLo := intPairTypes(t.Etype) + tHi, tLo := intPairTypes(t.Kind()) mem = decomposeOne(pos, b, base, source, mem, tHi, source.AuxInt+hiOffset, offset+hiOffset) pos = pos.WithNotStmt() return decomposeOne(pos, b, base, source, mem, tLo, source.AuxInt+lowOffset, offset+lowOffset) @@ -491,7 +491,7 @@ func expandCalls(f *Func) { return storeArgOrLoad(pos, b, base, source.Args[0], mem, t.Elem(), offset) case OpInt64Make: - tHi, tLo := intPairTypes(t.Etype) + tHi, tLo := intPairTypes(t.Kind()) mem = storeArgOrLoad(pos, b, base, source.Args[0], mem, tHi, offset+hiOffset) pos = pos.WithNotStmt() return storeArgOrLoad(pos, b, base, source.Args[1], mem, tLo, offset+lowOffset) @@ -524,7 +524,7 @@ func expandCalls(f *Func) { } // For nodes that cannot be taken apart -- OpSelectN, other structure selectors. - switch t.Etype { + switch t.Kind() { case types.TARRAY: elt := t.Elem() if source.Type != t && t.NumElem() == 1 && elt.Width == t.Width && t.Width == regSize { @@ -576,7 +576,7 @@ func expandCalls(f *Func) { if t.Width == regSize { break } - tHi, tLo := intPairTypes(t.Etype) + tHi, tLo := intPairTypes(t.Kind()) sel := source.Block.NewValue1(pos, OpInt64Hi, tHi, source) mem = storeArgOrLoad(pos, b, base, sel, mem, tHi, offset+hiOffset) pos = pos.WithNotStmt() @@ -873,7 +873,7 @@ func expandCalls(f *Func) { offset := int64(0) switch v.Op { case OpStructSelect: - if w.Type.Etype == types.TSTRUCT { + if w.Type.Kind() == types.TSTRUCT { offset = w.Type.FieldOff(int(v.AuxInt)) } else { // Immediate interface data artifact, offset is zero. f.Fatalf("Expand calls interface data problem, func %s, v=%s, w=%s\n", f.Name, v.LongString(), w.LongString()) diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go index df83383308cdb..5a81f76cebcc3 100644 --- a/src/cmd/compile/internal/ssa/export_test.go +++ b/src/cmd/compile/internal/ssa/export_test.go @@ -140,7 +140,7 @@ func init() { // so this test setup can share it. types.Tconv = func(t *types.Type, flag, mode int) string { - return t.Etype.String() + return t.Kind().String() } types.Sconv = func(s *types.Sym, flag, mode int) string { return "sym" @@ -149,13 +149,13 @@ func init() { fmt.Fprintf(s, "sym") } types.FormatType = func(t *types.Type, s fmt.State, verb rune, mode int) { - fmt.Fprintf(s, "%v", t.Etype) + fmt.Fprintf(s, "%v", t.Kind()) } types.Dowidth = func(t *types.Type) {} for _, typ := range [...]struct { width int64 - et types.EType + et types.Kind }{ {1, types.TINT8}, {1, types.TUINT8}, diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index 459a9923f7b2d..376ca975123c9 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -783,9 +783,9 @@ func (s *regAllocState) compatRegs(t *types.Type) regMask { return 0 } if t.IsFloat() || t == types.TypeInt128 { - if t.Etype == types.TFLOAT32 && s.f.Config.fp32RegMask != 0 { + if t.Kind() == types.TFLOAT32 && s.f.Config.fp32RegMask != 0 { m = s.f.Config.fp32RegMask - } else if t.Etype == types.TFLOAT64 && s.f.Config.fp64RegMask != 0 { + } else if t.Kind() == types.TFLOAT64 && s.f.Config.fp64RegMask != 0 { m = s.f.Config.fp64RegMask } else { m = s.f.Config.fpRegMask diff --git a/src/cmd/compile/internal/types/etype_string.go b/src/cmd/compile/internal/types/etype_string.go index 14fd5b71df801..e7698296abf26 100644 --- a/src/cmd/compile/internal/types/etype_string.go +++ b/src/cmd/compile/internal/types/etype_string.go @@ -52,8 +52,8 @@ const _EType_name = "xxxINT8UINT8INT16UINT16INT32UINT32INT64UINT64INTUINTUINTPTR var _EType_index = [...]uint8{0, 3, 7, 12, 17, 23, 28, 34, 39, 45, 48, 52, 59, 68, 78, 85, 92, 96, 99, 103, 108, 113, 119, 123, 126, 131, 135, 138, 144, 153, 158, 161, 166, 174, 182, 185, 190, 197, 202} -func (i EType) String() string { - if i >= EType(len(_EType_index)-1) { +func (i Kind) String() string { + if i >= Kind(len(_EType_index)-1) { return "EType(" + strconv.FormatInt(int64(i), 10) + ")" } return _EType_name[_EType_index[i]:_EType_index[i+1]] diff --git a/src/cmd/compile/internal/types/identity.go b/src/cmd/compile/internal/types/identity.go index a77f514df96be..9bc636d7ffe41 100644 --- a/src/cmd/compile/internal/types/identity.go +++ b/src/cmd/compile/internal/types/identity.go @@ -25,17 +25,17 @@ func identical(t1, t2 *Type, cmpTags bool, assumedEqual map[typePair]struct{}) b if t1 == t2 { return true } - if t1 == nil || t2 == nil || t1.Etype != t2.Etype || t1.Broke() || t2.Broke() { + if t1 == nil || t2 == nil || t1.kind != t2.kind || t1.Broke() || t2.Broke() { return false } - if t1.Sym != nil || t2.Sym != nil { + if t1.sym != nil || t2.sym != nil { // Special case: we keep byte/uint8 and rune/int32 // separate for error messages. Treat them as equal. - switch t1.Etype { + switch t1.kind { case TUINT8: - return (t1 == Types[TUINT8] || t1 == Bytetype) && (t2 == Types[TUINT8] || t2 == Bytetype) + return (t1 == Types[TUINT8] || t1 == ByteType) && (t2 == Types[TUINT8] || t2 == ByteType) case TINT32: - return (t1 == Types[TINT32] || t1 == Runetype) && (t2 == Types[TINT32] || t2 == Runetype) + return (t1 == Types[TINT32] || t1 == RuneType) && (t2 == Types[TINT32] || t2 == RuneType) default: return false } @@ -52,7 +52,7 @@ func identical(t1, t2 *Type, cmpTags bool, assumedEqual map[typePair]struct{}) b } assumedEqual[typePair{t1, t2}] = struct{}{} - switch t1.Etype { + switch t1.kind { case TIDEAL: // Historically, cmd/compile used a single "untyped // number" type, so all untyped number types were diff --git a/src/cmd/compile/internal/types/scope.go b/src/cmd/compile/internal/types/scope.go index 33a02c543d153..37ac90a0250b6 100644 --- a/src/cmd/compile/internal/types/scope.go +++ b/src/cmd/compile/internal/types/scope.go @@ -15,7 +15,7 @@ var Block int32 // current block number // restored once the block scope ends. type dsym struct { sym *Sym // sym == nil indicates stack mark - def IRNode + def Object block int32 lastlineno src.XPos // last declaration for diagnostic } @@ -79,16 +79,16 @@ func IsDclstackValid() bool { } // PkgDef returns the definition associated with s at package scope. -func (s *Sym) PkgDef() IRNode { +func (s *Sym) PkgDef() Object { return *s.pkgDefPtr() } // SetPkgDef sets the definition associated with s at package scope. -func (s *Sym) SetPkgDef(n IRNode) { +func (s *Sym) SetPkgDef(n Object) { *s.pkgDefPtr() = n } -func (s *Sym) pkgDefPtr() *IRNode { +func (s *Sym) pkgDefPtr() *Object { // Look for outermost saved declaration, which must be the // package scope definition, if present. for _, d := range dclstack { diff --git a/src/cmd/compile/internal/types/sym.go b/src/cmd/compile/internal/types/sym.go index 7272f1f7861e5..490222d843522 100644 --- a/src/cmd/compile/internal/types/sym.go +++ b/src/cmd/compile/internal/types/sym.go @@ -33,7 +33,7 @@ type Sym struct { Name string // object name // saved and restored by dcopy - Def IRNode // definition: ONAME OTYPE OPACK or OLITERAL + Def Object // definition: ONAME OTYPE OPACK or OLITERAL Block int32 // blocknumber to catch redeclaration Lastlineno src.XPos // last declaration for diagnostic diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index f0211a67fbd64..36aac53124b10 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -14,7 +14,7 @@ import ( // IRNode represents an ir.Node, but without needing to import cmd/compile/internal/ir, // which would cause an import cycle. The uses in other packages must type assert // values of type IRNode to ir.Node or a more specific type. -type IRNode interface { +type Object interface { Pos() src.XPos Sym() *Sym Type() *Type @@ -23,10 +23,10 @@ type IRNode interface { //go:generate stringer -type EType -trimprefix T // EType describes a kind of type. -type EType uint8 +type Kind uint8 const ( - Txxx EType = iota + Txxx Kind = iota TINT8 TUINT8 @@ -103,11 +103,11 @@ var Types [NTYPE]*Type var ( // Predeclared alias types. Kept separate for better error messages. - Bytetype *Type - Runetype *Type + ByteType *Type + RuneType *Type // Predeclared error interface type. - Errortype *Type + ErrorType *Type // Types to represent untyped string and boolean constants. UntypedString = New(TSTRING) @@ -146,19 +146,19 @@ type Type struct { methods Fields allMethods Fields - nod IRNode // canonical OTYPE node - Orig *Type // original type (type literal or predefined type) + nod Object // canonical OTYPE node + underlying *Type // original type (type literal or predefined type) // Cache of composite types, with this type being the element type. - Cache struct { + cache struct { ptr *Type // *T, or nil slice *Type // []T, or nil } - Sym *Sym // symbol containing name, for named types + sym *Sym // symbol containing name, for named types Vargen int32 // unique name for OTYPE/ONAME - Etype EType // kind of type + kind Kind // kind of type Align uint8 // the required alignment of this type, in bytes (0 means Width and Align have not yet been computed) flags bitset8 @@ -185,16 +185,16 @@ func (t *Type) SetDeferwidth(b bool) { t.flags.set(typeDeferwidth, b) } func (t *Type) SetRecur(b bool) { t.flags.set(typeRecur, b) } // Kind returns the kind of type t. -func (t *Type) Kind() EType { return t.Etype } +func (t *Type) Kind() Kind { return t.kind } // Sym returns the name of type t. -func (t *Type) GetSym() *Sym { return t.Sym } +func (t *Type) Sym() *Sym { return t.sym } // Underlying returns the underlying type of type t. -func (t *Type) Underlying() *Type { return t.Orig } +func (t *Type) Underlying() *Type { return t.underlying } // SetNod associates t with syntax node n. -func (t *Type) SetNod(n IRNode) { +func (t *Type) SetNod(n Object) { // t.nod can be non-nil already // in the case of shared *Types, like []byte or interface{}. if t.nod == nil { @@ -218,7 +218,7 @@ func (t *Type) Pos() src.XPos { // cmd/compile itself, but we need to track it because it's exposed by // the go/types API. func (t *Type) Pkg() *Pkg { - switch t.Etype { + switch t.kind { case TFUNC: return t.Extra.(*Func).pkg case TSTRUCT: @@ -233,7 +233,7 @@ func (t *Type) Pkg() *Pkg { // SetPkg sets the package that t appeared in. func (t *Type) SetPkg(pkg *Pkg) { - switch t.Etype { + switch t.kind { case TFUNC: t.Extra.(*Func).pkg = pkg case TSTRUCT: @@ -392,7 +392,7 @@ type Field struct { // For fields that represent function parameters, Nname points // to the associated ONAME Node. - Nname IRNode + Nname Object // Offset in bytes of this field or method within its enclosing struct // or interface Type. @@ -420,7 +420,7 @@ func (f *Field) End() int64 { // IsMethod reports whether f represents a method rather than a struct field. func (f *Field) IsMethod() bool { - return f.Type.Etype == TFUNC && f.Type.Recv() != nil + return f.Type.kind == TFUNC && f.Type.Recv() != nil } // Fields is a pointer to a slice of *Field. @@ -475,14 +475,14 @@ func (f *Fields) Append(s ...*Field) { } // New returns a new Type of the specified kind. -func New(et EType) *Type { +func New(et Kind) *Type { t := &Type{ - Etype: et, + kind: et, Width: BADWIDTH, } - t.Orig = t + t.underlying = t // TODO(josharian): lazily initialize some of these? - switch t.Etype { + switch t.kind { case TMAP: t.Extra = new(Map) case TFORW: @@ -522,7 +522,7 @@ func NewArray(elem *Type, bound int64) *Type { // NewSlice returns the slice Type with element type elem. func NewSlice(elem *Type) *Type { - if t := elem.Cache.slice; t != nil { + if t := elem.cache.slice; t != nil { if t.Elem() != elem { Fatalf("elem mismatch") } @@ -531,7 +531,7 @@ func NewSlice(elem *Type) *Type { t := New(TSLICE) t.Extra = Slice{Elem: elem} - elem.Cache.slice = t + elem.cache.slice = t return t } @@ -583,7 +583,7 @@ func NewPtr(elem *Type) *Type { Fatalf("NewPtr: pointer to elem Type is nil") } - if t := elem.Cache.ptr; t != nil { + if t := elem.cache.ptr; t != nil { if t.Elem() != elem { Fatalf("NewPtr: elem mismatch") } @@ -595,7 +595,7 @@ func NewPtr(elem *Type) *Type { t.Width = int64(Widthptr) t.Align = uint8(Widthptr) if NewPtrCacheEnabled { - elem.Cache.ptr = t + elem.cache.ptr = t } return t } @@ -634,7 +634,7 @@ func SubstAny(t *Type, types *[]*Type) *Type { return nil } - switch t.Etype { + switch t.kind { default: // Leave the type unchanged. @@ -718,7 +718,7 @@ func (t *Type) copy() *Type { } nt := *t // copy any *T Extra fields, to avoid aliasing - switch t.Etype { + switch t.kind { case TMAP: x := *t.Extra.(*Map) nt.Extra = &x @@ -744,8 +744,8 @@ func (t *Type) copy() *Type { Fatalf("ssa types cannot be copied") } // TODO(mdempsky): Find out why this is necessary and explain. - if t.Orig == t { - nt.Orig = &nt + if t.underlying == t { + nt.underlying = &nt } return &nt } @@ -755,8 +755,8 @@ func (f *Field) Copy() *Field { return &nf } -func (t *Type) wantEtype(et EType) { - if t.Etype != et { +func (t *Type) wantEtype(et Kind) { + if t.kind != et { Fatalf("want %v, but have %v", et, t) } } @@ -810,7 +810,7 @@ func (t *Type) Key() *Type { // Elem returns the type of elements of t. // Usable with pointers, channels, arrays, slices, and maps. func (t *Type) Elem() *Type { - switch t.Etype { + switch t.kind { case TPTR: return t.Extra.(Ptr).Elem case TARRAY: @@ -822,7 +822,7 @@ func (t *Type) Elem() *Type { case TMAP: return t.Extra.(*Map).Elem } - Fatalf("Type.Elem %s", t.Etype) + Fatalf("Type.Elem %s", t.kind) return nil } @@ -840,7 +840,7 @@ func (t *Type) FuncArgs() *Type { // IsFuncArgStruct reports whether t is a struct representing function parameters. func (t *Type) IsFuncArgStruct() bool { - return t.Etype == TSTRUCT && t.Extra.(*Struct).Funarg != FunargNone + return t.kind == TSTRUCT && t.Extra.(*Struct).Funarg != FunargNone } func (t *Type) Methods() *Fields { @@ -854,7 +854,7 @@ func (t *Type) AllMethods() *Fields { } func (t *Type) Fields() *Fields { - switch t.Etype { + switch t.kind { case TSTRUCT: return &t.Extra.(*Struct).fields case TINTER: @@ -919,7 +919,7 @@ func (t *Type) ArgWidth() int64 { } func (t *Type) Size() int64 { - if t.Etype == TSSA { + if t.kind == TSSA { if t == TypeInt128 { return 16 } @@ -935,7 +935,7 @@ func (t *Type) Alignment() int64 { } func (t *Type) SimpleString() string { - return t.Etype.String() + return t.kind.String() } // Cmp is a comparison between values a and b. @@ -1019,31 +1019,31 @@ func (t *Type) cmp(x *Type) Cmp { return CMPgt } - if t.Etype != x.Etype { - return cmpForNe(t.Etype < x.Etype) + if t.kind != x.kind { + return cmpForNe(t.kind < x.kind) } - if t.Sym != nil || x.Sym != nil { + if t.sym != nil || x.sym != nil { // Special case: we keep byte and uint8 separate // for error messages. Treat them as equal. - switch t.Etype { + switch t.kind { case TUINT8: - if (t == Types[TUINT8] || t == Bytetype) && (x == Types[TUINT8] || x == Bytetype) { + if (t == Types[TUINT8] || t == ByteType) && (x == Types[TUINT8] || x == ByteType) { return CMPeq } case TINT32: - if (t == Types[Runetype.Etype] || t == Runetype) && (x == Types[Runetype.Etype] || x == Runetype) { + if (t == Types[RuneType.kind] || t == RuneType) && (x == Types[RuneType.kind] || x == RuneType) { return CMPeq } } } - if c := t.Sym.cmpsym(x.Sym); c != CMPeq { + if c := t.sym.cmpsym(x.sym); c != CMPeq { return c } - if x.Sym != nil { + if x.sym != nil { // Syms non-nil, if vargens match then equal. if t.Vargen != x.Vargen { return cmpForNe(t.Vargen < x.Vargen) @@ -1052,7 +1052,7 @@ func (t *Type) cmp(x *Type) Cmp { } // both syms nil, look at structure below. - switch t.Etype { + switch t.kind { case TBOOL, TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, TUNSAFEPTR, TUINTPTR, TINT8, TINT16, TINT32, TINT64, TINT, TUINT8, TUINT16, TUINT32, TUINT64, TUINT: return CMPeq @@ -1209,15 +1209,15 @@ func (t *Type) cmp(x *Type) Cmp { } // IsKind reports whether t is a Type of the specified kind. -func (t *Type) IsKind(et EType) bool { - return t != nil && t.Etype == et +func (t *Type) IsKind(et Kind) bool { + return t != nil && t.kind == et } func (t *Type) IsBoolean() bool { - return t.Etype == TBOOL + return t.kind == TBOOL } -var unsignedEType = [...]EType{ +var unsignedEType = [...]Kind{ TINT8: TUINT8, TUINT8: TUINT8, TINT16: TUINT16, @@ -1236,11 +1236,11 @@ func (t *Type) ToUnsigned() *Type { if !t.IsInteger() { Fatalf("unsignedType(%v)", t) } - return Types[unsignedEType[t.Etype]] + return Types[unsignedEType[t.kind]] } func (t *Type) IsInteger() bool { - switch t.Etype { + switch t.kind { case TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TINT, TUINT, TUINTPTR: return true } @@ -1248,7 +1248,7 @@ func (t *Type) IsInteger() bool { } func (t *Type) IsSigned() bool { - switch t.Etype { + switch t.kind { case TINT8, TINT16, TINT32, TINT64, TINT: return true } @@ -1256,7 +1256,7 @@ func (t *Type) IsSigned() bool { } func (t *Type) IsUnsigned() bool { - switch t.Etype { + switch t.kind { case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR: return true } @@ -1264,32 +1264,32 @@ func (t *Type) IsUnsigned() bool { } func (t *Type) IsFloat() bool { - return t.Etype == TFLOAT32 || t.Etype == TFLOAT64 || t == UntypedFloat + return t.kind == TFLOAT32 || t.kind == TFLOAT64 || t == UntypedFloat } func (t *Type) IsComplex() bool { - return t.Etype == TCOMPLEX64 || t.Etype == TCOMPLEX128 || t == UntypedComplex + return t.kind == TCOMPLEX64 || t.kind == TCOMPLEX128 || t == UntypedComplex } // IsPtr reports whether t is a regular Go pointer type. // This does not include unsafe.Pointer. func (t *Type) IsPtr() bool { - return t.Etype == TPTR + return t.kind == TPTR } // IsPtrElem reports whether t is the element of a pointer (to t). func (t *Type) IsPtrElem() bool { - return t.Cache.ptr != nil + return t.cache.ptr != nil } // IsUnsafePtr reports whether t is an unsafe pointer. func (t *Type) IsUnsafePtr() bool { - return t.Etype == TUNSAFEPTR + return t.kind == TUNSAFEPTR } // IsUintptr reports whether t is an uintptr. func (t *Type) IsUintptr() bool { - return t.Etype == TUINTPTR + return t.kind == TUINTPTR } // IsPtrShaped reports whether t is represented by a single machine pointer. @@ -1298,13 +1298,13 @@ func (t *Type) IsUintptr() bool { // that consist of a single pointer shaped type. // TODO(mdempsky): Should it? See golang.org/issue/15028. func (t *Type) IsPtrShaped() bool { - return t.Etype == TPTR || t.Etype == TUNSAFEPTR || - t.Etype == TMAP || t.Etype == TCHAN || t.Etype == TFUNC + return t.kind == TPTR || t.kind == TUNSAFEPTR || + t.kind == TMAP || t.kind == TCHAN || t.kind == TFUNC } // HasNil reports whether the set of values determined by t includes nil. func (t *Type) HasNil() bool { - switch t.Etype { + switch t.kind { case TCHAN, TFUNC, TINTER, TMAP, TNIL, TPTR, TSLICE, TUNSAFEPTR: return true } @@ -1312,31 +1312,31 @@ func (t *Type) HasNil() bool { } func (t *Type) IsString() bool { - return t.Etype == TSTRING + return t.kind == TSTRING } func (t *Type) IsMap() bool { - return t.Etype == TMAP + return t.kind == TMAP } func (t *Type) IsChan() bool { - return t.Etype == TCHAN + return t.kind == TCHAN } func (t *Type) IsSlice() bool { - return t.Etype == TSLICE + return t.kind == TSLICE } func (t *Type) IsArray() bool { - return t.Etype == TARRAY + return t.kind == TARRAY } func (t *Type) IsStruct() bool { - return t.Etype == TSTRUCT + return t.kind == TSTRUCT } func (t *Type) IsInterface() bool { - return t.Etype == TINTER + return t.kind == TINTER } // IsEmptyInterface reports whether t is an empty interface type. @@ -1352,7 +1352,7 @@ func (t *Type) NumFields() int { return t.Fields().Len() } func (t *Type) FieldType(i int) *Type { - if t.Etype == TTUPLE { + if t.kind == TTUPLE { switch i { case 0: return t.Extra.(*Tuple).first @@ -1362,7 +1362,7 @@ func (t *Type) FieldType(i int) *Type { panic("bad tuple index") } } - if t.Etype == TRESULTS { + if t.kind == TRESULTS { return t.Extra.(*Results).Types[i] } return t.Field(i).Type @@ -1393,7 +1393,7 @@ const ( // (and their comprised elements) are excluded from the count. // struct { x, y [3]int } has six components; [10]struct{ x, y string } has twenty. func (t *Type) NumComponents(countBlank componentsIncludeBlankFields) int64 { - switch t.Etype { + switch t.kind { case TSTRUCT: if t.IsFuncArgStruct() { Fatalf("NumComponents func arg struct") @@ -1416,7 +1416,7 @@ func (t *Type) NumComponents(countBlank componentsIncludeBlankFields) int64 { // if there is exactly one. Otherwise, it returns nil. // Components are counted as in NumComponents, including blank fields. func (t *Type) SoleComponent() *Type { - switch t.Etype { + switch t.kind { case TSTRUCT: if t.IsFuncArgStruct() { Fatalf("SoleComponent func arg struct") @@ -1442,10 +1442,10 @@ func (t *Type) ChanDir() ChanDir { } func (t *Type) IsMemory() bool { - if t == TypeMem || t.Etype == TTUPLE && t.Extra.(*Tuple).second == TypeMem { + if t == TypeMem || t.kind == TTUPLE && t.Extra.(*Tuple).second == TypeMem { return true } - if t.Etype == TRESULTS { + if t.kind == TRESULTS { if types := t.Extra.(*Results).Types; len(types) > 0 && types[len(types)-1] == TypeMem { return true } @@ -1454,8 +1454,8 @@ func (t *Type) IsMemory() bool { } func (t *Type) IsFlags() bool { return t == TypeFlags } func (t *Type) IsVoid() bool { return t == TypeVoid } -func (t *Type) IsTuple() bool { return t.Etype == TTUPLE } -func (t *Type) IsResults() bool { return t.Etype == TRESULTS } +func (t *Type) IsTuple() bool { return t.kind == TTUPLE } +func (t *Type) IsResults() bool { return t.kind == TRESULTS } // IsUntyped reports whether t is an untyped type. func (t *Type) IsUntyped() bool { @@ -1465,7 +1465,7 @@ func (t *Type) IsUntyped() bool { if t == UntypedString || t == UntypedBool { return true } - switch t.Etype { + switch t.kind { case TNIL, TIDEAL: return true } @@ -1475,7 +1475,7 @@ func (t *Type) IsUntyped() bool { // HasPointers reports whether t contains a heap pointer. // Note that this function ignores pointers to go:notinheap types. func (t *Type) HasPointers() bool { - switch t.Etype { + switch t.kind { case TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TUINTPTR, TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, TBOOL, TSSA: return false @@ -1551,16 +1551,16 @@ var ( ) // NewNamed returns a new named type for the given type name. -func NewNamed(obj IRNode) *Type { +func NewNamed(obj Object) *Type { t := New(TFORW) - t.Sym = obj.Sym() + t.sym = obj.Sym() t.nod = obj return t } // Obj returns the type name for the named type t. -func (t *Type) Obj() IRNode { - if t.Sym != nil { +func (t *Type) Obj() Object { + if t.sym != nil { return t.nod } return nil @@ -1568,7 +1568,7 @@ func (t *Type) Obj() IRNode { // SetUnderlying sets the underlying type. func (t *Type) SetUnderlying(underlying *Type) { - if underlying.Etype == TFORW { + if underlying.kind == TFORW { // This type isn't computed yet; when it is, update n. underlying.ForwardType().Copyto = append(underlying.ForwardType().Copyto, t) return @@ -1577,11 +1577,11 @@ func (t *Type) SetUnderlying(underlying *Type) { ft := t.ForwardType() // TODO(mdempsky): Fix Type rekinding. - t.Etype = underlying.Etype + t.kind = underlying.kind t.Extra = underlying.Extra t.Width = underlying.Width t.Align = underlying.Align - t.Orig = underlying.Orig + t.underlying = underlying.underlying if underlying.NotInHeap() { t.SetNotInHeap(true) @@ -1612,9 +1612,9 @@ func (t *Type) SetUnderlying(underlying *Type) { } // NewNamed returns a new basic type of the given kind. -func NewBasic(kind EType, obj IRNode) *Type { +func NewBasic(kind Kind, obj Object) *Type { t := New(kind) - t.Sym = obj.Sym() + t.sym = obj.Sym() t.nod = obj return t } From 1408d26ccca5f770e29785ddd442523416de2dd6 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 1 Dec 2020 11:37:30 -0800 Subject: [PATCH 087/474] [dev.regabi] cmd/compile: cleanup some leftover cruft Just clearing away some scaffolding artifacts from previous refactorings. [git-generate] cd src/cmd/compile/internal/gc rf ' ex { import "cmd/compile/internal/ir" import "cmd/compile/internal/types" var n *ir.Name; n.Name() -> n var f *ir.Func; f.Func() -> f var o types.Object ir.AsNode(o).Sym() -> o.Sym() ir.AsNode(o).Type() -> o.Type() ir.AsNode(o).(*ir.Name) -> o.(*ir.Name) ir.AsNode(o).(*ir.Func) -> o.(*ir.Func) var x ir.Node ir.AsNode(o) != x -> o != x } ' Change-Id: I946ec344bd7ee274900a392da53b95308ceaade4 Reviewed-on: https://go-review.googlesource.com/c/go/+/274592 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Russ Cox --- src/cmd/compile/internal/gc/closure.go | 2 +- src/cmd/compile/internal/gc/dcl.go | 10 +++++----- src/cmd/compile/internal/gc/escape.go | 10 +++++----- src/cmd/compile/internal/gc/iexport.go | 4 ++-- src/cmd/compile/internal/gc/init.go | 2 +- src/cmd/compile/internal/gc/main.go | 2 +- src/cmd/compile/internal/gc/noder.go | 2 +- src/cmd/compile/internal/gc/obj.go | 4 ++-- src/cmd/compile/internal/gc/pgen.go | 16 ++++++++-------- src/cmd/compile/internal/gc/reflect.go | 4 ++-- src/cmd/compile/internal/gc/ssa.go | 4 ++-- src/cmd/compile/internal/gc/typecheck.go | 6 +++--- src/cmd/compile/internal/gc/universe.go | 2 +- src/cmd/compile/internal/gc/walk.go | 10 +++++----- 14 files changed, 39 insertions(+), 39 deletions(-) diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index 0ba2858b8b4e8..e33a561bd4bbd 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -437,7 +437,7 @@ func makepartialcall(dot ir.Node, t0 *types.Type, meth *types.Sym) *ir.Func { sym := methodSymSuffix(rcvrtype, meth, "-fm") if sym.Uniq() { - return ir.AsNode(sym.Def).(*ir.Func) + return sym.Def.(*ir.Func) } sym.SetUniq(true) diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 3d0bdaec7a4a1..dd59d829fedf2 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -95,7 +95,7 @@ func declare(n *ir.Name, ctxt ir.Class) { gen = vargen } types.Pushdcl(s) - n.Name().Curfn = Curfn + n.Curfn = Curfn } if ctxt == ir.PAUTO { @@ -113,7 +113,7 @@ func declare(n *ir.Name, ctxt ir.Class) { s.Block = types.Block s.Lastlineno = base.Pos s.Def = n - n.Name().Vargen = int32(gen) + n.Vargen = int32(gen) n.SetClass(ctxt) if ctxt == ir.PFUNC { n.Sym().SetFunc(true) @@ -335,7 +335,7 @@ func colasdefn(left []ir.Node, defn ir.Node) { nnew++ n := NewName(n.Sym()) declare(n, dclcontext) - n.Name().Defn = defn + n.Defn = defn defn.PtrInit().Append(ir.Nod(ir.ODCL, n, nil)) left[i] = n } @@ -438,7 +438,7 @@ func funcarg(n *ir.Field, ctxt ir.Class) { declare(name, ctxt) vargen++ - n.Decl.Name().Vargen = int32(vargen) + n.Decl.Vargen = int32(vargen) } // Same as funcargs, except run over an already constructed TFUNC. @@ -837,7 +837,7 @@ func addmethod(n *ir.Func, msym *types.Sym, t *types.Type, local, nointerface bo } f := types.NewField(base.Pos, msym, t) - f.Nname = n.Func().Nname + f.Nname = n.Nname f.SetNointerface(nointerface) mt.Methods().Append(f) diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index b29896e5a48b8..c1397717302d2 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -1802,8 +1802,8 @@ func addrescapes(n ir.Node) { } // If a closure reference escapes, mark the outer variable as escaping. - if n.Name().IsClosureVar() { - addrescapes(n.Name().Defn) + if n.IsClosureVar() { + addrescapes(n.Defn) break } @@ -1824,7 +1824,7 @@ func addrescapes(n ir.Node) { // then we're analyzing the inner closure but we need to move x to the // heap in f, not in the inner closure. Flip over to f before calling moveToHeap. oldfn := Curfn - Curfn = n.Name().Curfn + Curfn = n.Curfn ln := base.Pos base.Pos = Curfn.Pos() moveToHeap(n) @@ -1893,7 +1893,7 @@ func moveToHeap(n *ir.Name) { // See issue 16095. heapaddr.SetIsOutputParamHeapAddr(true) } - n.Name().Stackcopy = stackcopy + n.Stackcopy = stackcopy // Substitute the stackcopy into the function variable list so that // liveness and other analyses use the underlying stack slot @@ -1920,7 +1920,7 @@ func moveToHeap(n *ir.Name) { // Modify n in place so that uses of n now mean indirection of the heapaddr. n.SetClass(ir.PAUTOHEAP) n.SetOffset(0) - n.Name().Heapaddr = heapaddr + n.Heapaddr = heapaddr n.SetEsc(EscHeap) if base.Flag.LowerM != 0 { base.WarnfAt(n.Pos(), "moved to heap: %v", n) diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index 8f50868fc793e..2231f493dd6ef 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -395,7 +395,7 @@ func (p *iexporter) stringOff(s string) uint64 { // pushDecl adds n to the declaration work queue, if not already present. func (p *iexporter) pushDecl(n ir.Node) { - if n.Sym() == nil || ir.AsNode(n.Sym().Def) != n && n.Op() != ir.OTYPE { + if n.Sym() == nil || n.Sym().Def != n && n.Op() != ir.OTYPE { base.Fatalf("weird Sym: %v, %v", n, n.Sym()) } @@ -988,7 +988,7 @@ func (w *exportWriter) funcExt(n *ir.Name) { func (w *exportWriter) methExt(m *types.Field) { w.bool(m.Nointerface()) - w.funcExt(ir.AsNode(m.Nname).(*ir.Name)) + w.funcExt(m.Nname.(*ir.Name)) } func (w *exportWriter) linkname(s *types.Sym) { diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go index b5fd2e7c758d2..e67a032c5d6c6 100644 --- a/src/cmd/compile/internal/gc/init.go +++ b/src/cmd/compile/internal/gc/init.go @@ -60,7 +60,7 @@ func fninit(n []ir.Node) { initializers := lookup("init") fn := dclfunc(initializers, ir.NewFuncType(base.Pos, nil, nil, nil)) for _, dcl := range initTodo.Dcl { - dcl.Name().Curfn = fn + dcl.Curfn = fn } fn.Dcl = append(fn.Dcl, initTodo.Dcl...) initTodo.Dcl = nil diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 718239484b7c3..96031fe511490 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -984,7 +984,7 @@ func clearImports() { } func IsAlias(sym *types.Sym) bool { - return sym.Def != nil && ir.AsNode(sym.Def).Sym() != sym + return sym.Def != nil && sym.Def.Sym() != sym } // recordFlags records the specified command-line flags to be placed diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 1340068c722c0..de7dcda15eaeb 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -1071,7 +1071,7 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node { if ln.Class() != ir.PPARAMOUT { break } - if ir.AsNode(ln.Sym().Def) != ln { + if ln.Sym().Def != ln { base.Errorf("%s is shadowed during return", ln.Sym().Name) } } diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index f65131417a38a..21a50257b88a2 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -220,10 +220,10 @@ func addptabs() { } if n.Type().Kind() == types.TFUNC && n.Class() == ir.PFUNC { // function - ptabs = append(ptabs, ptabEntry{s: s, t: ir.AsNode(s.Def).Type()}) + ptabs = append(ptabs, ptabEntry{s: s, t: s.Def.Type()}) } else { // variable - ptabs = append(ptabs, ptabEntry{s: s, t: types.NewPtr(ir.AsNode(s.Def).Type())}) + ptabs = append(ptabs, ptabEntry{s: s, t: types.NewPtr(s.Def.Type())}) } } } diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index ea294ed66d5f4..1da0929290626 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -77,8 +77,8 @@ func cmpstackvarlt(a, b *ir.Name) bool { return a.Offset() < b.Offset() } - if a.Name().Used() != b.Name().Used() { - return a.Name().Used() + if a.Used() != b.Used() { + return a.Used() } ap := a.Type().HasPointers() @@ -87,8 +87,8 @@ func cmpstackvarlt(a, b *ir.Name) bool { return ap } - ap = a.Name().Needzero() - bp = b.Name().Needzero() + ap = a.Needzero() + bp = b.Needzero() if ap != bp { return ap } @@ -115,7 +115,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { // Mark the PAUTO's unused. for _, ln := range fn.Dcl { if ln.Class() == ir.PAUTO { - ln.Name().SetUsed(false) + ln.SetUsed(false) } } @@ -158,7 +158,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { if n.Op() != ir.ONAME || n.Class() != ir.PAUTO { continue } - if !n.Name().Used() { + if !n.Used() { fn.Dcl = fn.Dcl[:i] break } @@ -260,7 +260,7 @@ func compile(fn *ir.Func) { for _, n := range fn.Dcl { switch n.Class() { case ir.PPARAM, ir.PPARAMOUT, ir.PAUTO: - if livenessShouldTrack(n) && n.Name().Addrtaken() { + if livenessShouldTrack(n) && n.Addrtaken() { dtypesym(n.Type()) // Also make sure we allocate a linker symbol // for the stack object data, for the same reason. @@ -447,7 +447,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S } switch n.Class() { case ir.PAUTO: - if !n.Name().Used() { + if !n.Used() { // Text == nil -> generating abstract function if fnsym.Func().Text != nil { base.Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)") diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 4ab3005ce8211..06b91ddae64bc 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -1001,7 +1001,7 @@ func typename(t *types.Type) ir.Node { } n := ir.Nod(ir.OADDR, ir.AsNode(s.Def), nil) - n.SetType(types.NewPtr(ir.AsNode(s.Def).Type())) + n.SetType(types.NewPtr(s.Def.Type())) n.SetTypecheck(1) return n } @@ -1021,7 +1021,7 @@ func itabname(t, itype *types.Type) ir.Node { } n := ir.Nod(ir.OADDR, ir.AsNode(s.Def), nil) - n.SetType(types.NewPtr(ir.AsNode(s.Def).Type())) + n.SetType(types.NewPtr(s.Def.Type())) n.SetTypecheck(1) return n } diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 3e020d7b9269b..60e65e4b11c92 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -6196,7 +6196,7 @@ func (s byXoffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func emitStackObjects(e *ssafn, pp *Progs) { var vars []ir.Node for _, n := range e.curfn.Dcl { - if livenessShouldTrack(n) && n.Name().Addrtaken() { + if livenessShouldTrack(n) && n.Addrtaken() { vars = append(vars, n) } } @@ -6583,7 +6583,7 @@ func defframe(s *SSAGenState, e *ssafn) { // Iterate through declarations. They are sorted in decreasing Xoffset order. for _, n := range e.curfn.Dcl { - if !n.Name().Needzero() { + if !n.Needzero() { continue } if n.Class() != ir.PAUTO { diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index f120b4441323f..20ef3fc70a7db 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -2493,7 +2493,7 @@ func lookdot(n ir.Node, t *types.Type, dostrcmp int) *types.Field { pll = ll ll = ll.Left() } - if pll.Implicit() && ll.Type().IsPtr() && ll.Type().Sym() != nil && ir.AsNode(ll.Type().Sym().Def) != nil && ir.AsNode(ll.Type().Sym().Def).Op() == ir.OTYPE { + if pll.Implicit() && ll.Type().IsPtr() && ll.Type().Sym() != nil && ll.Type().Sym().Def != nil && ir.AsNode(ll.Type().Sym().Def).Op() == ir.OTYPE { // It is invalid to automatically dereference a named pointer type when selecting a method. // Make n.Left == ll to clarify error message. n.SetLeft(ll) @@ -3369,7 +3369,7 @@ func typecheckfunc(n *ir.Func) { for _, ln := range n.Dcl { if ln.Op() == ir.ONAME && (ln.Class() == ir.PPARAM || ln.Class() == ir.PPARAMOUT) { - ln.Name().Decldepth = 1 + ln.Decldepth = 1 } } @@ -3923,7 +3923,7 @@ func curpkg() *types.Pkg { // referenced by expression n, which must be a method selector, // method expression, or method value. func methodExprName(n ir.Node) *ir.Name { - name, _ := ir.AsNode(methodExprFunc(n).Nname).(*ir.Name) + name, _ := methodExprFunc(n).Nname.(*ir.Name) return name } diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index 49e50734c6a66..b554674fbc686 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -358,5 +358,5 @@ func finishUniverse() { nodfp = NewName(lookup(".fp")) nodfp.SetType(types.Types[types.TINT32]) nodfp.SetClass(ir.PPARAM) - nodfp.Name().SetUsed(true) + nodfp.SetUsed(true) } diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index b3af353c3fbce..be6f1539b93cd 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -43,16 +43,16 @@ func walk(fn *ir.Func) { // Propagate the used flag for typeswitch variables up to the NONAME in its definition. for _, ln := range fn.Dcl { - if ln.Op() == ir.ONAME && (ln.Class() == ir.PAUTO || ln.Class() == ir.PAUTOHEAP) && ln.Name().Defn != nil && ln.Name().Defn.Op() == ir.OTYPESW && ln.Name().Used() { - ln.Name().Defn.Left().Name().SetUsed(true) + if ln.Op() == ir.ONAME && (ln.Class() == ir.PAUTO || ln.Class() == ir.PAUTOHEAP) && ln.Defn != nil && ln.Defn.Op() == ir.OTYPESW && ln.Used() { + ln.Defn.Left().Name().SetUsed(true) } } for _, ln := range fn.Dcl { - if ln.Op() != ir.ONAME || (ln.Class() != ir.PAUTO && ln.Class() != ir.PAUTOHEAP) || ln.Sym().Name[0] == '&' || ln.Name().Used() { + if ln.Op() != ir.ONAME || (ln.Class() != ir.PAUTO && ln.Class() != ir.PAUTOHEAP) || ln.Sym().Name[0] == '&' || ln.Used() { continue } - if defn := ln.Name().Defn; defn != nil && defn.Op() == ir.OTYPESW { + if defn := ln.Defn; defn != nil && defn.Op() == ir.OTYPESW { if defn.Left().Name().Used() { continue } @@ -91,7 +91,7 @@ func paramoutheap(fn *ir.Func) bool { for _, ln := range fn.Dcl { switch ln.Class() { case ir.PPARAMOUT: - if isParamStackCopy(ln) || ln.Name().Addrtaken() { + if isParamStackCopy(ln) || ln.Addrtaken() { return true } From 15085f89746762e0919fa257feac3eb5b996e6db Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 1 Dec 2020 13:38:26 -0800 Subject: [PATCH 088/474] [dev.regabi] cmd/compile: tweak hash bucket type descriptor There's no need for the bucket type to be precise. The compiler doesn't actually generate code that references these fields; it just needs it for size and GC bitmap calculations. However, changing the type field does alter the runtime type descriptor and relocations emitted by the compiler, so this change isn't safe for toolstash. Change-Id: Icf79d6c4326515889b13435a575d618e3bbfbcd7 Reviewed-on: https://go-review.googlesource.com/c/go/+/274712 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Russ Cox --- src/cmd/compile/internal/gc/reflect.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 06b91ddae64bc..0b860b5f7a404 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -85,7 +85,6 @@ func bmap(t *types.Type) *types.Type { return t.MapType().Bucket } - bucket := types.New(types.TSTRUCT) keytype := t.Key() elemtype := t.Elem() dowidth(keytype) @@ -119,7 +118,7 @@ func bmap(t *types.Type) *types.Type { // Arrange for the bucket to have no pointers by changing // the type of the overflow field to uintptr in this case. // See comment on hmap.overflow in runtime/map.go. - otyp := types.NewPtr(bucket) + otyp := types.Types[types.TUNSAFEPTR] if !elemtype.HasPointers() && !keytype.HasPointers() { otyp = types.Types[types.TUINTPTR] } @@ -127,6 +126,7 @@ func bmap(t *types.Type) *types.Type { field = append(field, overflow) // link up fields + bucket := types.New(types.TSTRUCT) bucket.SetNoalg(true) bucket.SetFields(field[:]) dowidth(bucket) From 77a71e0057357b0567cc5036f7e0f903d82705bb Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 1 Dec 2020 13:07:35 -0800 Subject: [PATCH 089/474] [dev.regabi] cmd/compile: add Interface, Signature, and Struct constructors This CL adds the remaining constructors needed to abstract away construction of Types, and updates the compiler to use them throughout. There's now just a couple uses within test cases to remove. While at it, I also replace the Func.Outnamed field with a simple helper function, which reduces the size of function types somewhat. Passes toolstash/buildall. Change-Id: If1aa1095c98ae34b00380d0b3531bd63c10ce885 Reviewed-on: https://go-review.googlesource.com/c/go/+/274713 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Russ Cox --- src/cmd/compile/internal/gc/dcl.go | 182 ++++++------------ src/cmd/compile/internal/gc/iimport.go | 8 +- src/cmd/compile/internal/gc/pgen_test.go | 15 +- src/cmd/compile/internal/gc/reflect.go | 12 +- src/cmd/compile/internal/gc/typecheck.go | 2 +- src/cmd/compile/internal/gc/universe.go | 10 +- src/cmd/compile/internal/gc/walk.go | 2 +- src/cmd/compile/internal/types/sizeof_test.go | 2 +- src/cmd/compile/internal/types/type.go | 59 +++++- 9 files changed, 137 insertions(+), 155 deletions(-) diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index dd59d829fedf2..e0c87d4517c48 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -369,7 +369,7 @@ func funchdr(fn *ir.Func) { types.Markdcl() - if fn.Nname != nil && fn.Nname.Ntype != nil { + if fn.Nname.Ntype != nil { funcargs(fn.Nname.Ntype.(*ir.FuncType)) } else { funcargs2(fn.Type()) @@ -510,27 +510,6 @@ func checkembeddedtype(t *types.Type) { } } -func structfield(n *ir.Field) *types.Field { - lno := base.Pos - base.Pos = n.Pos - - if n.Ntype != nil { - n.Ntype = typecheckNtype(n.Ntype) - n.Type = n.Ntype.Type() - n.Ntype = nil - } - - f := types.NewField(n.Pos, n.Sym, n.Type) - if n.Embedded { - checkembeddedtype(n.Type) - f.Embedded = 1 - } - f.Note = n.Note - - base.Pos = lno - return f -} - // checkdupfields emits errors for duplicately named fields or methods in // a list of struct or interface types. func checkdupfields(what string, fss ...[]*types.Field) { @@ -552,95 +531,49 @@ func checkdupfields(what string, fss ...[]*types.Field) { // convert a parsed id/type list into // a type for struct/interface/arglist func tostruct(l []*ir.Field) *types.Type { - t := types.New(types.TSTRUCT) + lno := base.Pos fields := make([]*types.Field, len(l)) for i, n := range l { - f := structfield(n) - if f.Broke() { - t.SetBroke(true) - } - fields[i] = f - } - t.SetFields(fields) - - checkdupfields("field", t.FieldSlice()) - - if !t.Broke() { - checkwidth(t) - } + base.Pos = n.Pos - return t -} - -func tofunargs(l []*ir.Field, funarg types.Funarg) *types.Type { - t := types.New(types.TSTRUCT) - t.StructType().Funarg = funarg - - fields := make([]*types.Field, len(l)) - for i, n := range l { - f := structfield(n) - f.SetIsDDD(n.IsDDD) - if n.Decl != nil { - n.Decl.SetType(f.Type) - f.Nname = n.Decl + if n.Ntype != nil { + n.Type = typecheckNtype(n.Ntype).Type() + n.Ntype = nil } - if f.Broke() { - t.SetBroke(true) + f := types.NewField(n.Pos, n.Sym, n.Type) + if n.Embedded { + checkembeddedtype(n.Type) + f.Embedded = 1 } + f.Note = n.Note fields[i] = f } - t.SetFields(fields) - return t -} + checkdupfields("field", fields) -func tofunargsfield(fields []*types.Field, funarg types.Funarg) *types.Type { - t := types.New(types.TSTRUCT) - t.StructType().Funarg = funarg - t.SetFields(fields) - return t + base.Pos = lno + return types.NewStruct(fields) } -func interfacefield(n *ir.Field) *types.Field { - lno := base.Pos - base.Pos = n.Pos - - if n.Note != "" { - base.Errorf("interface method cannot have annotation") +func tointerface(nmethods []*ir.Field) *types.Type { + if len(nmethods) == 0 { + return types.Types[types.TINTER] } - // MethodSpec = MethodName Signature | InterfaceTypeName . - // - // If Sym != nil, then Sym is MethodName and Left is Signature. - // Otherwise, Left is InterfaceTypeName. + lno := base.Pos - if n.Ntype != nil { - n.Ntype = typecheckNtype(n.Ntype) - n.Type = n.Ntype.Type() - n.Ntype = nil + methods := make([]*types.Field, len(nmethods)) + for i, n := range nmethods { + base.Pos = n.Pos + if n.Ntype != nil { + n.Type = typecheckNtype(n.Ntype).Type() + n.Ntype = nil + } + methods[i] = types.NewField(n.Pos, n.Sym, n.Type) } - f := types.NewField(n.Pos, n.Sym, n.Type) - base.Pos = lno - return f -} - -func tointerface(l []*ir.Field) *types.Type { - if len(l) == 0 { - return types.Types[types.TINTER] - } - t := types.New(types.TINTER) - var fields []*types.Field - for _, n := range l { - f := interfacefield(n) - if f.Broke() { - t.SetBroke(true) - } - fields = append(fields, f) - } - t.SetInterface(fields) - return t + return types.NewInterface(methods) } func fakeRecv() *ir.Field { @@ -659,42 +592,47 @@ func isifacemethod(f *types.Type) bool { } // turn a parsed function declaration into a type -func functype(this *ir.Field, in, out []*ir.Field) *types.Type { - t := types.New(types.TFUNC) - - var rcvr []*ir.Field - if this != nil { - rcvr = []*ir.Field{this} - } - t.FuncType().Receiver = tofunargs(rcvr, types.FunargRcvr) - t.FuncType().Params = tofunargs(in, types.FunargParams) - t.FuncType().Results = tofunargs(out, types.FunargResults) +func functype(nrecv *ir.Field, nparams, nresults []*ir.Field) *types.Type { + funarg := func(n *ir.Field) *types.Field { + lno := base.Pos + base.Pos = n.Pos + + if n.Ntype != nil { + n.Type = typecheckNtype(n.Ntype).Type() + n.Ntype = nil + } - checkdupfields("argument", t.Recvs().FieldSlice(), t.Params().FieldSlice(), t.Results().FieldSlice()) + f := types.NewField(n.Pos, n.Sym, n.Type) + f.SetIsDDD(n.IsDDD) + if n.Decl != nil { + n.Decl.SetType(f.Type) + f.Nname = n.Decl + } - if t.Recvs().Broke() || t.Results().Broke() || t.Params().Broke() { - t.SetBroke(true) + base.Pos = lno + return f + } + funargs := func(nn []*ir.Field) []*types.Field { + res := make([]*types.Field, len(nn)) + for i, n := range nn { + res[i] = funarg(n) + } + return res } - t.FuncType().Outnamed = t.NumResults() > 0 && ir.OrigSym(t.Results().Field(0).Sym) != nil + var recv *types.Field + if nrecv != nil { + recv = funarg(nrecv) + } + t := types.NewSignature(recv, funargs(nparams), funargs(nresults)) + checkdupfields("argument", t.Recvs().FieldSlice(), t.Params().FieldSlice(), t.Results().FieldSlice()) return t } -func functypefield(this *types.Field, in, out []*types.Field) *types.Type { - t := types.New(types.TFUNC) - - var rcvr []*types.Field - if this != nil { - rcvr = []*types.Field{this} - } - t.FuncType().Receiver = tofunargsfield(rcvr, types.FunargRcvr) - t.FuncType().Params = tofunargsfield(in, types.FunargParams) - t.FuncType().Results = tofunargsfield(out, types.FunargResults) - - t.FuncType().Outnamed = t.NumResults() > 0 && ir.OrigSym(t.Results().Field(0).Sym) != nil - - return t +func hasNamedResults(fn *ir.Func) bool { + typ := fn.Type() + return typ.NumResults() > 0 && ir.OrigSym(typ.Results().Field(0).Sym) != nil } // methodSym returns the method symbol representing a method name diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 15f1b646f794c..1bb98415641cb 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -545,9 +545,8 @@ func (r *importReader) typ1() *types.Type { fs[i] = f } - t := types.New(types.TSTRUCT) + t := types.NewStruct(fs) t.SetPkg(r.currPkg) - t.SetFields(fs) return t case interfaceType: @@ -570,9 +569,8 @@ func (r *importReader) typ1() *types.Type { methods[i] = types.NewField(pos, sym, typ) } - t := types.New(types.TINTER) + t := types.NewInterface(append(embeddeds, methods...)) t.SetPkg(r.currPkg) - t.SetInterface(append(embeddeds, methods...)) // Ensure we expand the interface in the frontend (#25055). checkwidth(t) @@ -590,7 +588,7 @@ func (r *importReader) signature(recv *types.Field) *types.Type { if n := len(params); n > 0 { params[n-1].SetIsDDD(r.bool()) } - t := functypefield(recv, params, results) + t := types.NewSignature(recv, params, results) t.SetPkg(r.currPkg) return t } diff --git a/src/cmd/compile/internal/gc/pgen_test.go b/src/cmd/compile/internal/gc/pgen_test.go index 35ce087af62ce..710bc325349d1 100644 --- a/src/cmd/compile/internal/gc/pgen_test.go +++ b/src/cmd/compile/internal/gc/pgen_test.go @@ -7,23 +7,22 @@ package gc import ( "cmd/compile/internal/ir" "cmd/compile/internal/types" + "cmd/internal/src" "reflect" "sort" "testing" ) func typeWithoutPointers() *types.Type { - t := types.New(types.TSTRUCT) - f := &types.Field{Type: types.New(types.TINT)} - t.SetFields([]*types.Field{f}) - return t + return types.NewStruct([]*types.Field{ + types.NewField(src.NoXPos, nil, types.New(types.TINT)), + }) } func typeWithPointers() *types.Type { - t := types.New(types.TSTRUCT) - f := &types.Field{Type: types.NewPtr(types.New(types.TINT))} - t.SetFields([]*types.Field{f}) - return t + return types.NewStruct([]*types.Field{ + types.NewField(src.NoXPos, nil, types.NewPtr(types.New(types.TINT))), + }) } func markUsed(n *ir.Name) *ir.Name { diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 0b860b5f7a404..b249310df0078 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -126,9 +126,8 @@ func bmap(t *types.Type) *types.Type { field = append(field, overflow) // link up fields - bucket := types.New(types.TSTRUCT) + bucket := types.NewStruct(field[:]) bucket.SetNoalg(true) - bucket.SetFields(field[:]) dowidth(bucket) // Check invariants that map code depends on. @@ -221,9 +220,8 @@ func hmap(t *types.Type) *types.Type { makefield("extra", types.Types[types.TUNSAFEPTR]), } - hmap := types.New(types.TSTRUCT) + hmap := types.NewStruct(fields) hmap.SetNoalg(true) - hmap.SetFields(fields) dowidth(hmap) // The size of hmap should be 48 bytes on 64 bit @@ -285,9 +283,8 @@ func hiter(t *types.Type) *types.Type { } // build iterator struct holding the above fields - hiter := types.New(types.TSTRUCT) + hiter := types.NewStruct(fields) hiter.SetNoalg(true) - hiter.SetFields(fields) dowidth(hiter) if hiter.Width != int64(12*Widthptr) { base.Fatalf("hash_iter size not correct %d %d", hiter.Width, 12*Widthptr) @@ -332,9 +329,8 @@ func deferstruct(stksize int64) *types.Type { } // build struct holding the above fields - s := types.New(types.TSTRUCT) + s := types.NewStruct(fields) s.SetNoalg(true) - s.SetFields(fields) s.Width = widstruct(s, s, 0, 1) s.Align = uint8(Widthptr) return s diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 20ef3fc70a7db..2a0caad4693f3 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -2021,7 +2021,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } - if Curfn.Type().FuncType().Outnamed && n.List().Len() == 0 { + if hasNamedResults(Curfn) && n.List().Len() == 0 { break } typecheckaste(ir.ORETURN, nil, false, Curfn.Type().Results(), n.List(), func() string { return "return argument" }) diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index b554674fbc686..1c744dc367042 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -104,7 +104,7 @@ func initUniverse() { } types.Types[types.TANY] = types.New(types.TANY) - types.Types[types.TINTER] = types.New(types.TINTER) // empty interface + types.Types[types.TINTER] = types.NewInterface(nil) defBasic := func(kind types.Kind, pkg *types.Pkg, name string) *types.Type { sym := pkg.Lookup(name) @@ -325,15 +325,11 @@ func initUniverse() { } func makeErrorInterface() *types.Type { - sig := functypefield(fakeRecvField(), nil, []*types.Field{ + sig := types.NewSignature(fakeRecvField(), nil, []*types.Field{ types.NewField(src.NoXPos, nil, types.Types[types.TSTRING]), }) - method := types.NewField(src.NoXPos, lookup("Error"), sig) - - t := types.New(types.TINTER) - t.SetInterface([]*types.Field{method}) - return t + return types.NewInterface([]*types.Field{method}) } // finishUniverse makes the universe block visible within the current package. diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index be6f1539b93cd..183a7acc1b5b3 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -274,7 +274,7 @@ func walkstmt(n ir.Node) ir.Node { if n.List().Len() == 0 { break } - if (Curfn.Type().FuncType().Outnamed && n.List().Len() > 1) || paramoutheap(Curfn) { + if (hasNamedResults(Curfn) && n.List().Len() > 1) || paramoutheap(Curfn) { // assign to the function out parameters, // so that reorder3 can fix up conflicts var rl []ir.Node diff --git a/src/cmd/compile/internal/types/sizeof_test.go b/src/cmd/compile/internal/types/sizeof_test.go index 88a2fbba2f5eb..72a35bc7daadc 100644 --- a/src/cmd/compile/internal/types/sizeof_test.go +++ b/src/cmd/compile/internal/types/sizeof_test.go @@ -24,7 +24,7 @@ func TestSizeof(t *testing.T) { {Type{}, 56, 96}, {Map{}, 20, 40}, {Forward{}, 20, 32}, - {Func{}, 28, 48}, + {Func{}, 24, 40}, {Struct{}, 16, 32}, {Interface{}, 8, 16}, {Chan{}, 8, 16}, diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index 36aac53124b10..2eff8e3ba47f6 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -285,8 +285,6 @@ type Func struct { // It gets calculated via a temporary TFUNCARGS type. // Note that TFUNC's Width is Widthptr. Argwid int64 - - Outnamed bool } // FuncType returns t's extra func-specific fields. @@ -1618,3 +1616,60 @@ func NewBasic(kind Kind, obj Object) *Type { t.nod = obj return t } + +// NewInterface returns a new interface for the given methods and +// embedded types. Embedded types are specified as fields with no Sym. +func NewInterface(methods []*Field) *Type { + t := New(TINTER) + t.SetInterface(methods) + if anyBroke(methods) { + t.SetBroke(true) + } + return t +} + +// NewSignature returns a new function type for the given receiver, +// parameters, and results, any of which may be nil. +func NewSignature(recv *Field, params, results []*Field) *Type { + var recvs []*Field + if recv != nil { + recvs = []*Field{recv} + } + + t := New(TFUNC) + ft := t.FuncType() + + funargs := func(fields []*Field, funarg Funarg) *Type { + s := NewStruct(fields) + s.StructType().Funarg = funarg + if s.Broke() { + t.SetBroke(true) + } + return s + } + + ft.Receiver = funargs(recvs, FunargRcvr) + ft.Params = funargs(params, FunargParams) + ft.Results = funargs(results, FunargResults) + + return t +} + +// NewStruct returns a new struct with the given fields. +func NewStruct(fields []*Field) *Type { + t := New(TSTRUCT) + t.SetFields(fields) + if anyBroke(fields) { + t.SetBroke(true) + } + return t +} + +func anyBroke(fields []*Field) bool { + for _, f := range fields { + if f.Broke() { + return true + } + } + return false +} From 42e46f4ae0c4f3d6bf7f3920fa936f056ea485c4 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 1 Dec 2020 22:11:56 -0800 Subject: [PATCH 090/474] [dev.regabi] cmd/compile: comment out //go:linkname warning It's noisy and not doing any harm, and we still have an entire release cycle to revisit and address the issue properly. Updates #42938 Change-Id: I1de5cfb495a8148c9c08b215deba38f2617fb467 Reviewed-on: https://go-review.googlesource.com/c/go/+/274732 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/gc/noder.go | 2 +- test/linkname2.go | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index de7dcda15eaeb..e5677f921fc80 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -275,7 +275,7 @@ func (p *noder) processPragmas() { n := ir.AsNode(lookup(l.local).Def) if n == nil || n.Op() != ir.ONAME { // TODO(mdempsky): Change to p.errorAt before Go 1.17 release. - base.WarnfAt(p.makeXPos(l.pos), "//go:linkname must refer to declared function or variable (will be an error in Go 1.17)") + // base.WarnfAt(p.makeXPos(l.pos), "//go:linkname must refer to declared function or variable (will be an error in Go 1.17)") continue } if n.Sym().Linkname != "" { diff --git a/test/linkname2.go b/test/linkname2.go index cb7f9be3452a0..43e66a584958b 100644 --- a/test/linkname2.go +++ b/test/linkname2.go @@ -16,10 +16,13 @@ var x, y int //go:linkname x ok // ERROR "//go:linkname requires linkname argument or -p compiler flag" -// ERROR "//go:linkname must refer to declared function or variable" -// ERROR "//go:linkname must refer to declared function or variable" +// BAD: want error "//go:linkname must refer to declared function or variable" +// BAD: want error "//go:linkname must refer to declared function or variable" // ERROR "duplicate //go:linkname for x" +// The two BAD lines are just waiting for #42938 before we can +// re-enable the errors. + //line linkname2.go:18 //go:linkname y //go:linkname nonexist nonexist From c10b0ad628b4c7dd0f327c583702364abebb5132 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 1 Dec 2020 23:05:53 -0800 Subject: [PATCH 091/474] [dev.regabi] cmd/compile: add Pkg parameter to type constructors Allows getting rid of the SetPkg method and also addresses a long-standing TODO in the exporter. Suggested by rsc@. Passes buildall w/ toolstash -cmp. Change-Id: Ib294f75f1350572efb2e0d993d49efef884de3d4 Reviewed-on: https://go-review.googlesource.com/c/go/+/274440 Trust: Matthew Dempsky Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/gc/closure.go | 2 -- src/cmd/compile/internal/gc/dcl.go | 6 ++-- src/cmd/compile/internal/gc/iexport.go | 6 ++-- src/cmd/compile/internal/gc/iimport.go | 11 ++------ src/cmd/compile/internal/gc/pgen_test.go | 4 +-- src/cmd/compile/internal/gc/reflect.go | 8 +++--- src/cmd/compile/internal/gc/universe.go | 6 ++-- src/cmd/compile/internal/types/type.go | 36 ++++++++++-------------- 8 files changed, 32 insertions(+), 47 deletions(-) diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index e33a561bd4bbd..a5441a037a0bb 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -464,8 +464,6 @@ func makepartialcall(dot ir.Node, t0 *types.Type, meth *types.Sym) *ir.Func { fn.SetDupok(true) fn.SetNeedctxt(true) - tfn.Type().SetPkg(t0.Pkg()) - // Declare and initialize variable holding receiver. cr := ir.NewClosureRead(rcvrtype, Rnd(int64(Widthptr), int64(rcvrtype.Align))) ptr := NewName(lookup(".this")) diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index e0c87d4517c48..87b389b98b74a 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -552,7 +552,7 @@ func tostruct(l []*ir.Field) *types.Type { checkdupfields("field", fields) base.Pos = lno - return types.NewStruct(fields) + return types.NewStruct(ir.LocalPkg, fields) } func tointerface(nmethods []*ir.Field) *types.Type { @@ -573,7 +573,7 @@ func tointerface(nmethods []*ir.Field) *types.Type { } base.Pos = lno - return types.NewInterface(methods) + return types.NewInterface(ir.LocalPkg, methods) } func fakeRecv() *ir.Field { @@ -625,7 +625,7 @@ func functype(nrecv *ir.Field, nparams, nresults []*ir.Field) *types.Type { recv = funarg(nrecv) } - t := types.NewSignature(recv, funargs(nparams), funargs(nresults)) + t := types.NewSignature(ir.LocalPkg, recv, funargs(nparams), funargs(nresults)) checkdupfields("argument", t.Recvs().FieldSlice(), t.Params().FieldSlice(), t.Results().FieldSlice()) return t } diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index 2231f493dd6ef..7b21efb8c2b95 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -718,10 +718,8 @@ func (w *exportWriter) doTyp(t *types.Type) { } func (w *exportWriter) setPkg(pkg *types.Pkg, write bool) { - if pkg == nil { - // TODO(mdempsky): Proactively set Pkg for types and - // remove this fallback logic. - pkg = ir.LocalPkg + if pkg == types.NoPkg { + base.Fatalf("missing pkg") } if write { diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 1bb98415641cb..b6653dabda3c9 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -545,9 +545,7 @@ func (r *importReader) typ1() *types.Type { fs[i] = f } - t := types.NewStruct(fs) - t.SetPkg(r.currPkg) - return t + return types.NewStruct(r.currPkg, fs) case interfaceType: r.setPkg() @@ -569,8 +567,7 @@ func (r *importReader) typ1() *types.Type { methods[i] = types.NewField(pos, sym, typ) } - t := types.NewInterface(append(embeddeds, methods...)) - t.SetPkg(r.currPkg) + t := types.NewInterface(r.currPkg, append(embeddeds, methods...)) // Ensure we expand the interface in the frontend (#25055). checkwidth(t) @@ -588,9 +585,7 @@ func (r *importReader) signature(recv *types.Field) *types.Type { if n := len(params); n > 0 { params[n-1].SetIsDDD(r.bool()) } - t := types.NewSignature(recv, params, results) - t.SetPkg(r.currPkg) - return t + return types.NewSignature(r.currPkg, recv, params, results) } func (r *importReader) paramList() []*types.Field { diff --git a/src/cmd/compile/internal/gc/pgen_test.go b/src/cmd/compile/internal/gc/pgen_test.go index 710bc325349d1..473df82a0d709 100644 --- a/src/cmd/compile/internal/gc/pgen_test.go +++ b/src/cmd/compile/internal/gc/pgen_test.go @@ -14,13 +14,13 @@ import ( ) func typeWithoutPointers() *types.Type { - return types.NewStruct([]*types.Field{ + return types.NewStruct(types.NoPkg, []*types.Field{ types.NewField(src.NoXPos, nil, types.New(types.TINT)), }) } func typeWithPointers() *types.Type { - return types.NewStruct([]*types.Field{ + return types.NewStruct(types.NoPkg, []*types.Field{ types.NewField(src.NoXPos, nil, types.NewPtr(types.New(types.TINT))), }) } diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index b249310df0078..42139b7135980 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -126,7 +126,7 @@ func bmap(t *types.Type) *types.Type { field = append(field, overflow) // link up fields - bucket := types.NewStruct(field[:]) + bucket := types.NewStruct(types.NoPkg, field[:]) bucket.SetNoalg(true) dowidth(bucket) @@ -220,7 +220,7 @@ func hmap(t *types.Type) *types.Type { makefield("extra", types.Types[types.TUNSAFEPTR]), } - hmap := types.NewStruct(fields) + hmap := types.NewStruct(types.NoPkg, fields) hmap.SetNoalg(true) dowidth(hmap) @@ -283,7 +283,7 @@ func hiter(t *types.Type) *types.Type { } // build iterator struct holding the above fields - hiter := types.NewStruct(fields) + hiter := types.NewStruct(types.NoPkg, fields) hiter.SetNoalg(true) dowidth(hiter) if hiter.Width != int64(12*Widthptr) { @@ -329,7 +329,7 @@ func deferstruct(stksize int64) *types.Type { } // build struct holding the above fields - s := types.NewStruct(fields) + s := types.NewStruct(types.NoPkg, fields) s.SetNoalg(true) s.Width = widstruct(s, s, 0, 1) s.Align = uint8(Widthptr) diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index 1c744dc367042..b315502964689 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -104,7 +104,7 @@ func initUniverse() { } types.Types[types.TANY] = types.New(types.TANY) - types.Types[types.TINTER] = types.NewInterface(nil) + types.Types[types.TINTER] = types.NewInterface(ir.LocalPkg, nil) defBasic := func(kind types.Kind, pkg *types.Pkg, name string) *types.Type { sym := pkg.Lookup(name) @@ -325,11 +325,11 @@ func initUniverse() { } func makeErrorInterface() *types.Type { - sig := types.NewSignature(fakeRecvField(), nil, []*types.Field{ + sig := types.NewSignature(types.NoPkg, fakeRecvField(), nil, []*types.Field{ types.NewField(src.NoXPos, nil, types.Types[types.TSTRING]), }) method := types.NewField(src.NoXPos, lookup("Error"), sig) - return types.NewInterface([]*types.Field{method}) + return types.NewInterface(types.NoPkg, []*types.Field{method}) } // finishUniverse makes the universe block visible within the current package. diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index 2eff8e3ba47f6..2c42e5579d1e5 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -211,6 +211,11 @@ func (t *Type) Pos() src.XPos { return src.NoXPos } +// NoPkg is a nil *Pkg value for clarity. +// It's intended for use when constructing types that aren't exported +// and thus don't need to be associated with any package. +var NoPkg *Pkg = nil + // Pkg returns the package that t appeared in. // // Pkg is only defined for function, struct, and interface types @@ -231,20 +236,6 @@ func (t *Type) Pkg() *Pkg { } } -// SetPkg sets the package that t appeared in. -func (t *Type) SetPkg(pkg *Pkg) { - switch t.kind { - case TFUNC: - t.Extra.(*Func).pkg = pkg - case TSTRUCT: - t.Extra.(*Struct).pkg = pkg - case TINTER: - t.Extra.(*Interface).pkg = pkg - default: - Fatalf("Pkg: unexpected kind: %v", t) - } -} - // Map contains Type fields specific to maps. type Map struct { Key *Type // Key type @@ -1609,7 +1600,7 @@ func (t *Type) SetUnderlying(underlying *Type) { } } -// NewNamed returns a new basic type of the given kind. +// NewBasic returns a new basic type of the given kind. func NewBasic(kind Kind, obj Object) *Type { t := New(kind) t.sym = obj.Sym() @@ -1619,18 +1610,19 @@ func NewBasic(kind Kind, obj Object) *Type { // NewInterface returns a new interface for the given methods and // embedded types. Embedded types are specified as fields with no Sym. -func NewInterface(methods []*Field) *Type { +func NewInterface(pkg *Pkg, methods []*Field) *Type { t := New(TINTER) t.SetInterface(methods) if anyBroke(methods) { t.SetBroke(true) } + t.Extra.(*Interface).pkg = pkg return t } -// NewSignature returns a new function type for the given receiver, -// parameters, and results, any of which may be nil. -func NewSignature(recv *Field, params, results []*Field) *Type { +// NewSignature returns a new function type for the given receiver, +// parameters, and results, any of which may be nil. +func NewSignature(pkg *Pkg, recv *Field, params, results []*Field) *Type { var recvs []*Field if recv != nil { recvs = []*Field{recv} @@ -1640,7 +1632,7 @@ func NewSignature(recv *Field, params, results []*Field) *Type { ft := t.FuncType() funargs := func(fields []*Field, funarg Funarg) *Type { - s := NewStruct(fields) + s := NewStruct(NoPkg, fields) s.StructType().Funarg = funarg if s.Broke() { t.SetBroke(true) @@ -1651,17 +1643,19 @@ func NewSignature(recv *Field, params, results []*Field) *Type { ft.Receiver = funargs(recvs, FunargRcvr) ft.Params = funargs(params, FunargParams) ft.Results = funargs(results, FunargResults) + ft.pkg = pkg return t } // NewStruct returns a new struct with the given fields. -func NewStruct(fields []*Field) *Type { +func NewStruct(pkg *Pkg, fields []*Field) *Type { t := New(TSTRUCT) t.SetFields(fields) if anyBroke(fields) { t.SetBroke(true) } + t.Extra.(*Struct).pkg = pkg return t } From c769d393de3d735d32aa9c8917afcd0394e5ac57 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 1 Dec 2020 23:55:03 -0800 Subject: [PATCH 092/474] [dev.regabi] cmd/compile: add ir.NewDeclNameAt This allows directly creating an ONONAME, which is a primordial Name before having its Op initialized. Then after an Op is assigned, we never allow it to be reassigned. Passes buildall w/ toolstash -cmp. Change-Id: Ibc2f413dc68c0af6a96abfe653c25ce31b184287 Reviewed-on: https://go-review.googlesource.com/c/go/+/274620 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/gc/dcl.go | 30 +------------------------ src/cmd/compile/internal/gc/export.go | 2 +- src/cmd/compile/internal/gc/iimport.go | 6 ++--- src/cmd/compile/internal/gc/noder.go | 4 +--- src/cmd/compile/internal/gc/sinit.go | 3 ++- src/cmd/compile/internal/gc/universe.go | 4 ++-- src/cmd/compile/internal/ir/name.go | 22 +++++++++++++----- src/cmd/compile/internal/ir/node.go | 4 +--- 8 files changed, 28 insertions(+), 47 deletions(-) diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 87b389b98b74a..ce13f0bdfcc67 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -122,16 +122,6 @@ func declare(n *ir.Name, ctxt ir.Class) { autoexport(n, ctxt) } -func addvar(n *ir.Name, t *types.Type, ctxt ir.Class) { - if n == nil || n.Sym() == nil || (n.Op() != ir.ONAME && n.Op() != ir.ONONAME) || t == nil { - base.Fatalf("addvar: n=%v t=%v nil", n, t) - } - - n.SetOp(ir.ONAME) - declare(n, ctxt) - n.SetType(t) -} - // declare variables from grammar // new_name_list (type | [type] = expr_list) func variter(vl []ir.Node, t ir.Ntype, el []ir.Node) []ir.Node { @@ -192,16 +182,6 @@ func variter(vl []ir.Node, t ir.Ntype, el []ir.Node) []ir.Node { return init } -// newnoname returns a new ONONAME Node associated with symbol s. -func newnoname(s *types.Sym) ir.Node { - if s == nil { - base.Fatalf("newnoname nil") - } - n := ir.NewNameAt(base.Pos, s) - n.SetOp(ir.ONONAME) - return n -} - // newFuncNameAt generates a new name node for a function or method. func newFuncNameAt(pos src.XPos, s *types.Sym, fn *ir.Func) *ir.Name { if fn.Nname != nil { @@ -213,14 +193,6 @@ func newFuncNameAt(pos src.XPos, s *types.Sym, fn *ir.Func) *ir.Name { return n } -// this generates a new name node for a name -// being declared. -func dclname(s *types.Sym) *ir.Name { - n := NewName(s) - n.SetOp(ir.ONONAME) // caller will correct it - return n -} - func anonfield(typ *types.Type) *ir.Field { return symfield(nil, typ) } @@ -243,7 +215,7 @@ func oldname(s *types.Sym) ir.Node { // Maybe a top-level declaration will come along later to // define s. resolve will check s.Def again once all input // source has been processed. - return newnoname(s) + return ir.NewDeclNameAt(base.Pos, s) } if Curfn != nil && n.Op() == ir.ONAME && n.Name().Curfn != nil && n.Name().Curfn != Curfn { diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index f803a17c60a25..44fc70be037fe 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -85,7 +85,7 @@ func importsym(ipkg *types.Pkg, s *types.Sym, op ir.Op) ir.Node { base.Fatalf("missing ONONAME for %v\n", s) } - n = dclname(s) + n = ir.NewDeclNameAt(src.NoXPos, s) s.SetPkgDef(n) s.Importdef = ipkg } diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index b6653dabda3c9..419db285b58cc 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -175,7 +175,7 @@ func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType) if s.Def != nil { base.Fatalf("unexpected definition for %v: %v", s, ir.AsNode(s.Def)) } - s.Def = npos(src.NoXPos, dclname(s)) + s.Def = ir.NewDeclNameAt(src.NoXPos, s) } } @@ -833,7 +833,7 @@ func (r *importReader) node() ir.Node { case ir.OTYPESW: n := ir.NodAt(r.pos(), ir.OTYPESW, nil, nil) if s := r.ident(); s != nil { - n.SetLeft(npos(n.Pos(), newnoname(s))) + n.SetLeft(ir.NewDeclNameAt(n.Pos(), s)) } right, _ := r.exprsOrNil() n.SetRight(right) @@ -962,7 +962,7 @@ func (r *importReader) node() ir.Node { // statements case ir.ODCL: pos := r.pos() - lhs := npos(pos, dclname(r.ident())) + lhs := ir.NewDeclNameAt(pos, r.ident()) typ := ir.TypeNode(r.typ()) return npos(pos, liststmt(variter([]ir.Node{lhs}, typ, nil))) // TODO(gri) avoid list creation diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index e5677f921fc80..4c81657628331 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -516,9 +516,7 @@ func (p *noder) declNames(names []*syntax.Name) []ir.Node { } func (p *noder) declName(name *syntax.Name) *ir.Name { - n := dclname(p.name(name)) - n.SetPos(p.pos(name)) - return n + return ir.NewDeclNameAt(p.pos(name), p.name(name)) } func (p *noder) funcDecl(fun *syntax.FuncDecl) ir.Node { diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 8146f30377107..2dc4281857e01 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -371,7 +371,8 @@ func staticname(t *types.Type) ir.Node { // Don't use lookupN; it interns the resulting string, but these are all unique. n := NewName(lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen))) statuniqgen++ - addvar(n, t, ir.PEXTERN) + declare(n, ir.PEXTERN) + n.SetType(t) n.Sym().Linksym().Set(obj.AttrLocal, true) return n } diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index b315502964689..f9984cbe945eb 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -108,7 +108,7 @@ func initUniverse() { defBasic := func(kind types.Kind, pkg *types.Pkg, name string) *types.Type { sym := pkg.Lookup(name) - n := ir.NewNameAt(src.NoXPos, sym) + n := ir.NewDeclNameAt(src.NoXPos, sym) n.SetOp(ir.OTYPE) t := types.NewBasic(kind, n) n.SetType(t) @@ -145,7 +145,7 @@ func initUniverse() { // error type s := ir.BuiltinPkg.Lookup("error") - n := ir.NewNameAt(src.NoXPos, s) + n := ir.NewDeclNameAt(src.NoXPos, s) n.SetOp(ir.OTYPE) types.ErrorType = types.NewNamed(n) types.ErrorType.SetUnderlying(makeErrorInterface()) diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 76abb454eedcd..3c62800ad31ef 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -123,18 +123,27 @@ type Name struct { } // NewNameAt returns a new ONAME Node associated with symbol s at position pos. -// The caller is responsible for setting n.Name.Curfn. +// The caller is responsible for setting Curfn. func NewNameAt(pos src.XPos, sym *types.Sym) *Name { if sym == nil { base.Fatalf("NewNameAt nil") } - return newNameAt(pos, sym) + return newNameAt(pos, ONAME, sym) +} + +// NewDeclNameAt returns a new ONONAME Node associated with symbol s at position pos. +// The caller is responsible for setting Curfn. +func NewDeclNameAt(pos src.XPos, sym *types.Sym) *Name { + if sym == nil { + base.Fatalf("NewDeclNameAt nil") + } + return newNameAt(pos, ONONAME, sym) } // newNameAt is like NewNameAt but allows sym == nil. -func newNameAt(pos src.XPos, sym *types.Sym) *Name { +func newNameAt(pos src.XPos, op Op, sym *types.Sym) *Name { n := new(Name) - n.op = ONAME + n.op = op n.pos = pos n.orig = n n.sym = sym @@ -163,10 +172,13 @@ func (n *Name) SetIota(x int64) { n.offset = x } func (*Name) CanBeNtype() {} func (n *Name) SetOp(op Op) { + if n.op != ONONAME { + base.Fatalf("%v already has Op %v", n, n.op) + } switch op { default: panic(n.no("SetOp " + op.String())) - case OLITERAL, ONONAME, ONAME, OTYPE, OIOTA: + case OLITERAL, ONAME, OTYPE, OIOTA: n.op = op } } diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index fc4c5939296ca..d121cc19d4017 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -802,9 +802,7 @@ func NodAt(pos src.XPos, op Op, nleft, nright Node) Node { case OLABEL: return NewLabelStmt(pos, nil) case OLITERAL, OTYPE, OIOTA: - n := newNameAt(pos, nil) - n.SetOp(op) - return n + return newNameAt(pos, op, nil) case OMAKECHAN, OMAKEMAP, OMAKESLICE, OMAKESLICECOPY: return NewMakeExpr(pos, op, nleft, nright) case OMETHEXPR: From ec5f349b2291fa3c0a30d8859c84f7476a1d14a2 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 30 Nov 2020 21:56:24 -0500 Subject: [PATCH 093/474] [dev.regabi] cmd/compile: merge OBLOCK and OEMPTY OEMPTY is an empty *statement*, but it confusingly gets handled as an expression in a few places. More confusingly, OEMPTY often has an init list, making it not empty at all. Replace uses and analysis of OEMPTY with OBLOCK instead. Passes buildall w/ toolstash -cmp. Change-Id: I8d4fcef151e4f441fa19b1b96da5272d778131d6 Reviewed-on: https://go-review.googlesource.com/c/go/+/274594 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/escape.go | 2 +- src/cmd/compile/internal/gc/iexport.go | 14 +++++++++++--- src/cmd/compile/internal/gc/iimport.go | 4 +++- src/cmd/compile/internal/gc/init.go | 6 ++++-- src/cmd/compile/internal/gc/inl.go | 7 ++++++- src/cmd/compile/internal/gc/noder.go | 12 +++++++----- src/cmd/compile/internal/gc/order.go | 2 +- src/cmd/compile/internal/gc/ssa.go | 2 +- src/cmd/compile/internal/gc/typecheck.go | 9 ++++++--- src/cmd/compile/internal/gc/walk.go | 13 ++++++------- src/cmd/compile/internal/ir/fmt.go | 10 ++++++---- src/cmd/compile/internal/ir/node.go | 4 +--- src/cmd/compile/internal/ir/stmt.go | 16 ---------------- 13 files changed, 53 insertions(+), 48 deletions(-) diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index c1397717302d2..9fc3dd27788d7 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -302,7 +302,7 @@ func (e *Escape) stmt(n ir.Node) { default: base.Fatalf("unexpected stmt: %v", n) - case ir.ODCLCONST, ir.ODCLTYPE, ir.OEMPTY, ir.OFALL, ir.OINLMARK: + case ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL, ir.OINLMARK: // nop case ir.OBREAK, ir.OCONTINUE, ir.OGOTO: diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index 7b21efb8c2b95..85518bc9391a1 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -1048,6 +1048,17 @@ func (w *exportWriter) stmt(n ir.Node) { } switch op := n.Op(); op { + case ir.OBLOCK: + // No OBLOCK in export data. + // Inline content into this statement list, + // like the init list above. + // (At the moment neither the parser nor the typechecker + // generate OBLOCK nodes except to denote an empty + // function body, although that may change.) + for _, n := range n.List().Slice() { + w.stmt(n) + } + case ir.ODCL: w.op(ir.ODCL) w.pos(n.Left().Pos()) @@ -1129,9 +1140,6 @@ func (w *exportWriter) stmt(n ir.Node) { w.op(ir.OFALL) w.pos(n.Pos()) - case ir.OEMPTY: - // nothing to emit - case ir.OBREAK, ir.OCONTINUE, ir.OGOTO, ir.OLABEL: w.op(op) w.pos(n.Pos()) diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 419db285b58cc..1d9baed5ad812 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -742,7 +742,9 @@ func (r *importReader) stmtList() []ir.Node { if n == nil { break } - // OBLOCK nodes may be created when importing ODCL nodes - unpack them + // OBLOCK nodes are not written to the import data directly, + // but the handling of ODCL calls liststmt, which creates one. + // Inline them into the statement list. if n.Op() == ir.OBLOCK { list = append(list, n.List().Slice()...) } else { diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go index e67a032c5d6c6..dc825b24218a3 100644 --- a/src/cmd/compile/internal/gc/init.go +++ b/src/cmd/compile/internal/gc/init.go @@ -88,8 +88,10 @@ func fninit(n []ir.Node) { s := lookupN("init.", i) fn := ir.AsNode(s.Def).Name().Defn // Skip init functions with empty bodies. - if fn.Body().Len() == 1 && fn.Body().First().Op() == ir.OEMPTY { - continue + if fn.Body().Len() == 1 { + if stmt := fn.Body().First(); stmt.Op() == ir.OBLOCK && stmt.List().Len() == 0 { + continue + } } fns = append(fns, s.Linksym()) } diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index b36a01e3898b6..89c9873c1dae3 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -396,7 +396,7 @@ func (v *hairyVisitor) visit(n ir.Node) bool { case ir.OAPPEND: v.budget -= inlineExtraAppendCost - case ir.ODCLCONST, ir.OEMPTY, ir.OFALL: + case ir.ODCLCONST, ir.OFALL: // These nodes don't produce code; omit from inlining budget. return false @@ -425,6 +425,11 @@ func (v *hairyVisitor) visit(n ir.Node) bool { v.usedLocals[n] = true } + case ir.OBLOCK: + // The only OBLOCK we should see at this point is an empty one. + // In any event, let the visitList(n.List()) below take care of the statements, + // and don't charge for the OBLOCK itself. The ++ undoes the -- below. + v.budget++ } v.budget-- diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 4c81657628331..9352463f18c47 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -164,7 +164,7 @@ func (p *noder) funcBody(fn *ir.Func, block *syntax.BlockStmt) { if block != nil { body := p.stmts(block.List) if body == nil { - body = []ir.Node{ir.Nod(ir.OEMPTY, nil, nil)} + body = []ir.Node{ir.Nod(ir.OBLOCK, nil, nil)} } fn.PtrBody().Set(body) @@ -967,7 +967,9 @@ func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []ir.Node { for i, stmt := range stmts { s := p.stmtFall(stmt, fallOK && i+1 == len(stmts)) if s == nil { - } else if s.Op() == ir.OBLOCK && s.Init().Len() == 0 { + } else if s.Op() == ir.OBLOCK && s.List().Len() > 0 { + // Inline non-empty block. + // Empty blocks must be preserved for checkreturn. nodes = append(nodes, s.List().Slice()...) } else { nodes = append(nodes, s) @@ -991,7 +993,7 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node { l := p.blockStmt(stmt) if len(l) == 0 { // TODO(mdempsky): Line number? - return ir.Nod(ir.OEMPTY, nil, nil) + return ir.Nod(ir.OBLOCK, nil, nil) } return liststmt(l) case *syntax.ExprStmt: @@ -1166,7 +1168,7 @@ func (p *noder) ifStmt(stmt *syntax.IfStmt) ir.Node { n.PtrBody().Set(p.blockStmt(stmt.Then)) if stmt.Else != nil { e := p.stmt(stmt.Else) - if e.Op() == ir.OBLOCK && e.Init().Len() == 0 { + if e.Op() == ir.OBLOCK { n.PtrRlist().Set(e.List().Slice()) } else { n.PtrRlist().Set1(e) @@ -1319,7 +1321,7 @@ func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) ir.Node { l := []ir.Node{lhs} if ls != nil { - if ls.Op() == ir.OBLOCK && ls.Init().Len() == 0 { + if ls.Op() == ir.OBLOCK { l = append(l, ls.List().Slice()...) } else { l = append(l, ls) diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index c2e236537f7be..352e9c473b51c 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -676,7 +676,7 @@ func (o *Order) stmt(n ir.Node) { o.cleanTemp(t) // Special: does not save n onto out. - case ir.OBLOCK, ir.OEMPTY: + case ir.OBLOCK: o.stmtList(n.List()) // Special: n->left is not an expression; save as is. diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 60e65e4b11c92..7c74054b6032a 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -1071,7 +1071,7 @@ func (s *state) stmt(n ir.Node) { s.stmtList(n.List()) // No-ops - case ir.OEMPTY, ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL: + case ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL: // Expression statements case ir.OCALLFUNC: diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 2a0caad4693f3..5a073ac32462e 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -1950,13 +1950,16 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OBREAK, ir.OCONTINUE, ir.ODCL, - ir.OEMPTY, ir.OGOTO, ir.OFALL, ir.OVARKILL, ir.OVARLIVE: ok |= ctxStmt + case ir.OBLOCK: + ok |= ctxStmt + typecheckslice(n.List().Slice(), ctxStmt) + case ir.OLABEL: ok |= ctxStmt decldepth++ @@ -1964,7 +1967,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { // Empty identifier is valid but useless. // Eliminate now to simplify life later. // See issues 7538, 11589, 11593. - n = ir.NodAt(n.Pos(), ir.OEMPTY, nil, nil) + n = ir.NodAt(n.Pos(), ir.OBLOCK, nil, nil) } case ir.ODEFER: @@ -3808,7 +3811,7 @@ func deadcode(fn *ir.Func) { } } - fn.PtrBody().Set([]ir.Node{ir.Nod(ir.OEMPTY, nil, nil)}) + fn.PtrBody().Set([]ir.Node{ir.Nod(ir.OBLOCK, nil, nil)}) } func deadcodeslice(nn *ir.Nodes) { diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 183a7acc1b5b3..7e8ae22e4e1fe 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -142,7 +142,6 @@ func walkstmt(n ir.Node) ir.Node { ir.OPRINT, ir.OPRINTN, ir.OPANIC, - ir.OEMPTY, ir.ORECOVER, ir.OGETG: if n.Typecheck() == 0 { @@ -155,7 +154,7 @@ func walkstmt(n ir.Node) ir.Node { if wascopy && n.Op() == ir.ONAME { // copy rewrote to a statement list and a temp for the length. // Throw away the temp to avoid plain values as statements. - n = ir.NodAt(n.Pos(), ir.OEMPTY, nil, nil) + n = ir.NodAt(n.Pos(), ir.OBLOCK, nil, nil) } n = addinit(n, init.Slice()) @@ -470,7 +469,7 @@ opswitch: ir.Dump("walk", n) base.Fatalf("walkexpr: switch 1 unknown op %+S", n) - case ir.ONONAME, ir.OEMPTY, ir.OGETG, ir.ONEWOBJ, ir.OMETHEXPR: + case ir.ONONAME, ir.OGETG, ir.ONEWOBJ, ir.OMETHEXPR: case ir.OTYPE, ir.ONAME, ir.OLITERAL, ir.ONIL: // TODO(mdempsky): Just return n; see discussion on CL 38655. @@ -609,7 +608,7 @@ opswitch: } if oaslit(n, init) { - n = ir.NodAt(n.Pos(), ir.OEMPTY, nil, nil) + n = ir.NodAt(n.Pos(), ir.OBLOCK, nil, nil) break } @@ -2032,10 +2031,10 @@ func walkprint(nn ir.Node, init *ir.Nodes) ir.Node { typecheckslice(calls, ctxStmt) walkexprlist(calls, init) - r := ir.Nod(ir.OEMPTY, nil, nil) + r := ir.Nod(ir.OBLOCK, nil, nil) r = typecheck(r, ctxStmt) - r = walkexpr(r, init) - r.PtrInit().Set(calls) + r = walkstmt(r) + r.PtrList().Set(calls) return r } diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index 5bb1ed857c0ba..9486d8b021d54 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -942,6 +942,11 @@ func stmtFmt(n Node, s fmt.State, mode FmtMode) { mode.Fprintf(s, "%.v = %.v", n.List(), n.Rlist()) } + case OBLOCK: + if n.List().Len() != 0 { + mode.Fprintf(s, "%v", n.List()) + } + case ORETURN: mode.Fprintf(s, "return %.v", n.List()) @@ -1044,9 +1049,6 @@ func stmtFmt(n Node, s fmt.State, mode FmtMode) { mode.Fprintf(s, "%#v", n.Op()) } - case OEMPTY: - break - case OLABEL: mode.Fprintf(s, "%v: ", n.Sym()) } @@ -1155,12 +1157,12 @@ var OpPrec = []int{ OAS2MAPR: -1, OAS2RECV: -1, OASOP: -1, + OBLOCK: -1, OBREAK: -1, OCASE: -1, OCONTINUE: -1, ODCL: -1, ODEFER: -1, - OEMPTY: -1, OFALL: -1, OFOR: -1, OFORUNTIL: -1, diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index d121cc19d4017..06bc48e9ca2b1 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -301,7 +301,7 @@ const ( OCASE OCONTINUE // continue [Sym] ODEFER // defer Left (Left must be call) - OEMPTY // no-op (empty statement) + OEMPTY // TODO(rsc): Delete. (Use OBLOCK instead.) OFALL // fallthrough OFOR // for Ninit; Left; Right { Nbody } // OFORUNTIL is like OFOR, but the test (Left) is applied after the body: @@ -781,8 +781,6 @@ func NodAt(pos src.XPos, op Op, nleft, nright Node) Node { n := NewTypeAssertExpr(pos, nleft, typ) n.SetOp(op) return n - case OEMPTY: - return NewEmptyStmt(pos) case OFOR: return NewForStmt(pos, nil, nleft, nright, nil) case OGO: diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index 91714e38e3dd2..a6bbab48896b7 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -266,22 +266,6 @@ func (n *DeferStmt) rawCopy() Node { c := *n; return &c } func (n *DeferStmt) Left() Node { return n.Call } func (n *DeferStmt) SetLeft(x Node) { n.Call = x } -// An EmptyStmt is an empty statement -type EmptyStmt struct { - miniStmt -} - -func NewEmptyStmt(pos src.XPos) *EmptyStmt { - n := &EmptyStmt{} - n.pos = pos - n.op = OEMPTY - return n -} - -func (n *EmptyStmt) String() string { return fmt.Sprint(n) } -func (n *EmptyStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *EmptyStmt) rawCopy() Node { c := *n; return &c } - // A ForStmt is a non-range for loop: for Init; Cond; Post { Body } // Op can be OFOR or OFORUNTIL (!Cond). type ForStmt struct { From ecc8d15bc5f48916176c4bba809b327a72b66e2c Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 30 Nov 2020 21:59:44 -0500 Subject: [PATCH 094/474] [dev.regabi] cmd/compile: delete OEMPTY Not toolstash -cmp safe, so split into its own CL. Change-Id: I523227514a95e19c9afe17556d754a03164bce76 Reviewed-on: https://go-review.googlesource.com/c/go/+/274595 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/node.go | 1 - src/cmd/compile/internal/ir/op_string.go | 75 ++++++++++++------------ 2 files changed, 37 insertions(+), 39 deletions(-) diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 06bc48e9ca2b1..cc3ac5765df69 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -301,7 +301,6 @@ const ( OCASE OCONTINUE // continue [Sym] ODEFER // defer Left (Left must be call) - OEMPTY // TODO(rsc): Delete. (Use OBLOCK instead.) OFALL // fallthrough OFOR // for Ninit; Left; Right { Nbody } // OFORUNTIL is like OFOR, but the test (Left) is applied after the body: diff --git a/src/cmd/compile/internal/ir/op_string.go b/src/cmd/compile/internal/ir/op_string.go index 96eee439741d4..bb5e16fbbc9e0 100644 --- a/src/cmd/compile/internal/ir/op_string.go +++ b/src/cmd/compile/internal/ir/op_string.go @@ -127,47 +127,46 @@ func _() { _ = x[OCASE-116] _ = x[OCONTINUE-117] _ = x[ODEFER-118] - _ = x[OEMPTY-119] - _ = x[OFALL-120] - _ = x[OFOR-121] - _ = x[OFORUNTIL-122] - _ = x[OGOTO-123] - _ = x[OIF-124] - _ = x[OLABEL-125] - _ = x[OGO-126] - _ = x[ORANGE-127] - _ = x[ORETURN-128] - _ = x[OSELECT-129] - _ = x[OSWITCH-130] - _ = x[OTYPESW-131] - _ = x[OTCHAN-132] - _ = x[OTMAP-133] - _ = x[OTSTRUCT-134] - _ = x[OTINTER-135] - _ = x[OTFUNC-136] - _ = x[OTARRAY-137] - _ = x[OTSLICE-138] - _ = x[OINLCALL-139] - _ = x[OEFACE-140] - _ = x[OITAB-141] - _ = x[OIDATA-142] - _ = x[OSPTR-143] - _ = x[OCLOSUREREAD-144] - _ = x[OCFUNC-145] - _ = x[OCHECKNIL-146] - _ = x[OVARDEF-147] - _ = x[OVARKILL-148] - _ = x[OVARLIVE-149] - _ = x[ORESULT-150] - _ = x[OINLMARK-151] - _ = x[ORETJMP-152] - _ = x[OGETG-153] - _ = x[OEND-154] + _ = x[OFALL-119] + _ = x[OFOR-120] + _ = x[OFORUNTIL-121] + _ = x[OGOTO-122] + _ = x[OIF-123] + _ = x[OLABEL-124] + _ = x[OGO-125] + _ = x[ORANGE-126] + _ = x[ORETURN-127] + _ = x[OSELECT-128] + _ = x[OSWITCH-129] + _ = x[OTYPESW-130] + _ = x[OTCHAN-131] + _ = x[OTMAP-132] + _ = x[OTSTRUCT-133] + _ = x[OTINTER-134] + _ = x[OTFUNC-135] + _ = x[OTARRAY-136] + _ = x[OTSLICE-137] + _ = x[OINLCALL-138] + _ = x[OEFACE-139] + _ = x[OITAB-140] + _ = x[OIDATA-141] + _ = x[OSPTR-142] + _ = x[OCLOSUREREAD-143] + _ = x[OCFUNC-144] + _ = x[OCHECKNIL-145] + _ = x[OVARDEF-146] + _ = x[OVARKILL-147] + _ = x[OVARLIVE-148] + _ = x[ORESULT-149] + _ = x[OINLMARK-150] + _ = x[ORETJMP-151] + _ = x[OGETG-152] + _ = x[OEND-153] } -const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRSTMTEXPRBLOCKBREAKCASECONTINUEDEFEREMPTYFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCLOSUREREADCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKRETJMPGETGEND" +const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRSTMTEXPRBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCLOSUREREADCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKRETJMPGETGEND" -var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 309, 315, 318, 324, 331, 339, 343, 350, 358, 360, 362, 364, 366, 368, 370, 375, 380, 388, 391, 400, 403, 407, 415, 422, 431, 444, 447, 450, 453, 456, 459, 462, 468, 471, 477, 480, 486, 490, 493, 497, 502, 507, 513, 518, 522, 527, 535, 543, 549, 558, 569, 576, 580, 587, 594, 602, 606, 610, 614, 621, 628, 636, 642, 650, 658, 663, 668, 672, 680, 685, 690, 694, 697, 705, 709, 711, 716, 718, 723, 729, 735, 741, 747, 752, 756, 763, 769, 774, 780, 786, 793, 798, 802, 807, 811, 822, 827, 835, 841, 848, 855, 861, 868, 874, 878, 881} +var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 309, 315, 318, 324, 331, 339, 343, 350, 358, 360, 362, 364, 366, 368, 370, 375, 380, 388, 391, 400, 403, 407, 415, 422, 431, 444, 447, 450, 453, 456, 459, 462, 468, 471, 477, 480, 486, 490, 493, 497, 502, 507, 513, 518, 522, 527, 535, 543, 549, 558, 569, 576, 580, 587, 594, 602, 606, 610, 614, 621, 628, 636, 642, 650, 658, 663, 668, 672, 680, 685, 689, 692, 700, 704, 706, 711, 713, 718, 724, 730, 736, 742, 747, 751, 758, 764, 769, 775, 781, 788, 793, 797, 802, 806, 817, 822, 830, 836, 843, 850, 856, 863, 869, 873, 876} func (i Op) String() string { if i >= Op(len(_Op_index)-1) { From 64bc656aed3ba7539a85f6b52f2aa933c9ce8130 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 30 Nov 2020 23:49:25 -0500 Subject: [PATCH 095/474] [dev.regabi] cmd/compile: use explicit block statements for init For statements like goto that don't need an init, use an explicit block statement instead of forcing them to have one. There is also one call to addinit that is being replaced with a block. That call is the source of much of my confusion regarding init statements: walkstmt calls addinit on a statement, whereas all the other uses of addinit are on expressions. After this CL, they're all expressions. Passes buildall w/ toolstash -cmp. Change-Id: Ifdef9d318c236dc1a7567f9e9ef4a6bedd3fe81f Reviewed-on: https://go-review.googlesource.com/c/go/+/274597 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/inl.go | 21 +++++++---------- src/cmd/compile/internal/gc/walk.go | 36 ++++++++++++++++++++--------- 2 files changed, 33 insertions(+), 24 deletions(-) diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 89c9873c1dae3..fd8e9cfd46486 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -527,8 +527,8 @@ func inlcalls(fn *ir.Func) { // Turn an OINLCALL into a statement. func inlconv2stmt(inlcall ir.Node) ir.Node { n := ir.NodAt(inlcall.Pos(), ir.OBLOCK, nil, nil) - n.SetList(inlcall.Body()) - n.SetInit(inlcall.Init()) + n.SetList(inlcall.Init()) + n.PtrList().AppendNodes(inlcall.PtrBody()) return n } @@ -543,7 +543,7 @@ func inlconv2expr(n ir.Node) ir.Node { // Turn the rlist (with the return values) of the OINLCALL in // n into an expression list lumping the ninit and body // containing the inlined statements on the first list element so -// order will be preserved Used in return, oas2func and call +// order will be preserved. Used in return, oas2func and call // statements. func inlconv2list(n ir.Node) []ir.Node { if n.Op() != ir.OINLCALL || n.Rlist().Len() == 0 { @@ -1330,9 +1330,7 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { // dump("Return before substitution", n); case ir.ORETURN: - m := nodSym(ir.OGOTO, nil, subst.retlabel) - m.PtrInit().Set(subst.list(n.Init())) - + init := subst.list(n.Init()) if len(subst.retvars) != 0 && n.List().Len() != 0 { as := ir.Nod(ir.OAS2, nil, nil) @@ -1352,14 +1350,11 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { } as = typecheck(as, ctxStmt) - m.PtrInit().Append(as) + init = append(init, as) } - - typecheckslice(m.Init().Slice(), ctxStmt) - m = typecheck(m, ctxStmt) - - // dump("Return after substitution", m); - return m + init = append(init, nodSym(ir.OGOTO, nil, subst.retlabel)) + typecheckslice(init, ctxStmt) + return ir.NewBlockStmt(base.Pos, init) case ir.OGOTO, ir.OLABEL: m := ir.Copy(n) diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 7e8ae22e4e1fe..f439237936ff9 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -147,16 +147,25 @@ func walkstmt(n ir.Node) ir.Node { if n.Typecheck() == 0 { base.Fatalf("missing typecheck: %+v", n) } - wascopy := n.Op() == ir.OCOPY init := n.Init() n.PtrInit().Set(nil) n = walkexpr(n, &init) - if wascopy && n.Op() == ir.ONAME { + if n.Op() == ir.ONAME { // copy rewrote to a statement list and a temp for the length. // Throw away the temp to avoid plain values as statements. - n = ir.NodAt(n.Pos(), ir.OBLOCK, nil, nil) + n = ir.NewBlockStmt(n.Pos(), init.Slice()) + init.Set(nil) + } + if init.Len() > 0 { + switch n.Op() { + case ir.OAS, ir.OAS2, ir.OBLOCK: + n.PtrInit().Prepend(init.Slice()...) + + default: + init.Append(n) + n = ir.NewBlockStmt(n.Pos(), init.Slice()) + } } - n = addinit(n, init.Slice()) // special case for a receive where we throw away // the value received. @@ -223,29 +232,34 @@ func walkstmt(n ir.Node) ir.Node { } fallthrough case ir.OGO: + var init ir.Nodes switch n.Left().Op() { case ir.OPRINT, ir.OPRINTN: - n.SetLeft(wrapCall(n.Left(), n.PtrInit())) + n.SetLeft(wrapCall(n.Left(), &init)) case ir.ODELETE: if mapfast(n.Left().List().First().Type()) == mapslow { - n.SetLeft(wrapCall(n.Left(), n.PtrInit())) + n.SetLeft(wrapCall(n.Left(), &init)) } else { - n.SetLeft(walkexpr(n.Left(), n.PtrInit())) + n.SetLeft(walkexpr(n.Left(), &init)) } case ir.OCOPY: - n.SetLeft(copyany(n.Left(), n.PtrInit(), true)) + n.SetLeft(copyany(n.Left(), &init, true)) case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER: if n.Left().Body().Len() > 0 { - n.SetLeft(wrapCall(n.Left(), n.PtrInit())) + n.SetLeft(wrapCall(n.Left(), &init)) } else { - n.SetLeft(walkexpr(n.Left(), n.PtrInit())) + n.SetLeft(walkexpr(n.Left(), &init)) } default: - n.SetLeft(walkexpr(n.Left(), n.PtrInit())) + n.SetLeft(walkexpr(n.Left(), &init)) + } + if init.Len() > 0 { + init.Append(n) + n = ir.NewBlockStmt(n.Pos(), init.Slice()) } case ir.OFOR, ir.OFORUNTIL: From 5a3b6796cdb1833929805262b6f843c0e82fa7e1 Mon Sep 17 00:00:00 2001 From: Dan Scales Date: Tue, 1 Dec 2020 20:51:18 -0800 Subject: [PATCH 096/474] [dev.regabi] cmd/compile: remove extra typ field in Name struct Noticed the typ field was duplicated, since it is also in miniExpr inside Name. Also clarified the comments for Func, now that it is actually the ODCLFUNC node. Change-Id: Ia483a0ad34bb409cd92c43d4ae0a6852f9e4f644 Reviewed-on: https://go-review.googlesource.com/c/go/+/274619 Run-TryBot: Dan Scales TryBot-Result: Go Bot Trust: Dan Scales Reviewed-by: Russ Cox --- src/cmd/compile/internal/ir/func.go | 15 ++++++++------- src/cmd/compile/internal/ir/name.go | 1 - src/cmd/compile/internal/ir/sizeof_test.go | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index 3fc8597ef0518..98830fb502d64 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -17,13 +17,14 @@ import ( // // There are multiple nodes that represent a Func in the IR. // -// The ONAME node (Func.Name) is used for plain references to it. -// The ODCLFUNC node (Func.Decl) is used for its declaration code. -// The OCLOSURE node (Func.Closure) is used for a reference to a +// The ONAME node (Func.Nname) is used for plain references to it. +// The ODCLFUNC node (the Func itself) is used for its declaration code. +// The OCLOSURE node (Func.OClosure) is used for a reference to a // function literal. // -// A Func for an imported function will have only an ONAME node. -// A declared function or method has an ONAME and an ODCLFUNC. +// An imported function will have an ONAME node which points to a Func +// with an empty body. +// A declared function or method has an ODCLFUNC (the Func itself) and an ONAME. // A function literal is represented directly by an OCLOSURE, but it also // has an ODCLFUNC (and a matching ONAME) representing the compiled // underlying form of the closure, which accesses the captured variables @@ -44,8 +45,8 @@ import ( // the method name is stored in Sym instead of Right. // Each OCALLPART ends up being implemented as a new // function, a bit like a closure, with its own ODCLFUNC. -// The OCALLPART has uses n.Func to record the linkage to -// the generated ODCLFUNC (as n.Func.Decl), but there is no +// The OCALLPART uses n.Func to record the linkage to +// the generated ODCLFUNC, but there is no // pointer from the Func back to the OCALLPART. type Func struct { miniNode diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 3c62800ad31ef..1d886bb9a1af6 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -21,7 +21,6 @@ type Name struct { flags bitset16 pragma PragmaFlag // int16 sym *types.Sym - typ *types.Type fn *Func offset int64 val constant.Value diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go index 4a133cb999153..181f1462fe97c 100644 --- a/src/cmd/compile/internal/ir/sizeof_test.go +++ b/src/cmd/compile/internal/ir/sizeof_test.go @@ -21,7 +21,7 @@ func TestSizeof(t *testing.T) { _64bit uintptr // size on 64bit platforms }{ {Func{}, 168, 288}, - {Name{}, 128, 224}, + {Name{}, 124, 216}, } for _, tt := range tests { From 00e572779077737d409ed57194510ec42c520b34 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Wed, 2 Dec 2020 14:40:48 +0700 Subject: [PATCH 097/474] [dev.regabi] cmd/compile: remove okAs The check for blank in okAs is redundant with what its callers already done, so just inline the conversion in callers side instead. Passes toolstash-check. Change-Id: I606105e2d2cf8e80214722a13c3101c464d20d82 Reviewed-on: https://go-review.googlesource.com/c/go/+/274793 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/order.go | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 352e9c473b51c..7816e684dc089 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -936,7 +936,7 @@ func (o *Order) stmt(n ir.Node) { } tmp := o.newTemp(types.Types[types.TBOOL], false) - as := okas(ok, tmp) + as := ir.Nod(ir.OAS, ok, conv(tmp, ok.Type())) as = typecheck(as, ctxStmt) n2.PtrInit().Append(as) ok = tmp @@ -1382,15 +1382,6 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node { return n } -// okas creates and returns an assignment of val to ok, -// including an explicit conversion if necessary. -func okas(ok, val ir.Node) ir.Node { - if !ir.IsBlank(ok) { - val = conv(val, ok.Type()) - } - return ir.Nod(ir.OAS, ok, val) -} - // as2 orders OAS2XXXX nodes. It creates temporaries to ensure left-to-right assignment. // The caller should order the right-hand side of the assignment before calling order.as2. // It rewrites, @@ -1442,7 +1433,7 @@ func (o *Order) okAs2(n ir.Node) { n.List().SetFirst(tmp1) } if tmp2 != nil { - r := okas(n.List().Second(), tmp2) + r := ir.Nod(ir.OAS, n.List().Second(), conv(tmp2, n.List().Second().Type())) r = typecheck(r, ctxStmt) o.mapAssign(r) n.List().SetSecond(tmp2) From 59b8916d482bdca933885881dff54365432ec9f5 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Tue, 1 Dec 2020 12:02:16 -0500 Subject: [PATCH 098/474] [dev.regabi] cmd/compile: handle OCONVNOP better in ssa This CL improves handling of OCONVNOP nodes during ssa generation, so it is not toolstash safe. An OCONVNOP wrapper is necessary for the "for" condition of certain compiled range loops, and the boolean evaluator was not looking through them properly, generating unnecessary temporaries. That change saved 8k of the (13 MB) go binary. The other changes just streamline the handling of OCONVNOP to be more like what OSTMTEXPR will be like. They have no effect on output size but do tweak the ssa graph a little, which causes different register decisions and therefore different output. Change-Id: I9e1dcd413b60944e21554c3e3f2bdc9adcee7634 Reviewed-on: https://go-review.googlesource.com/c/go/+/274598 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Keith Randall --- src/cmd/compile/internal/gc/ssa.go | 10 ++++++++++ src/cmd/compile/internal/gc/walk.go | 3 +++ 2 files changed, 13 insertions(+) diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 7c74054b6032a..d53bd1aa4fb1b 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -2103,6 +2103,9 @@ func (s *state) expr(n ir.Node) *ssa.Value { // Assume everything will work out, so set up our return value. // Anything interesting that happens from here is a fatal. x := s.expr(n.Left()) + if to == from { + return x + } // Special case for not confusing GC and liveness. // We don't want pointers accidentally classified @@ -2966,6 +2969,10 @@ func (s *state) condBranch(cond ir.Node, yes, no *ssa.Block, likely int8) { s.stmtList(cond.Init()) s.condBranch(cond.Left(), no, yes, -likely) return + case ir.OCONVNOP: + s.stmtList(cond.Init()) + s.condBranch(cond.Left(), yes, no, likely) + return } c := s.expr(cond) b := s.endBlock() @@ -4903,6 +4910,9 @@ func (s *state) addr(n ir.Node) *ssa.Value { return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr)) case ir.OCONVNOP: + if n.Type() == n.Left().Type() { + return s.addr(n.Left()) + } addr := s.addr(n.Left()) return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH: diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index f439237936ff9..c0f447f1a221f 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -966,6 +966,9 @@ opswitch: case ir.OCONV, ir.OCONVNOP: n.SetLeft(walkexpr(n.Left(), init)) + if n.Op() == ir.OCONVNOP && n.Type() == n.Left().Type() { + return n.Left() + } if n.Op() == ir.OCONVNOP && checkPtr(Curfn, 1) { if n.Type().IsPtr() && n.Left().Type().IsUnsafePtr() { // unsafe.Pointer to *T n = walkCheckPtrAlignment(n, init, nil) From 7e81135be7b264517cf2ae17dec0fdbafc4c6841 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 2 Dec 2020 17:03:18 -0500 Subject: [PATCH 099/474] [dev.regabi] cmd/compile: rename addinit(n, init) to initExpr(init, n) Recreated manually to push below some CLs it depended on. Change-Id: I1b3316fcdce39cbb33e5cbb471f5cd1cd2efc1f5 Reviewed-on: https://go-review.googlesource.com/c/go/+/274599 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/inl.go | 4 ++-- src/cmd/compile/internal/gc/order.go | 2 +- src/cmd/compile/internal/gc/subr.go | 6 +++--- src/cmd/compile/internal/gc/typecheck.go | 4 ++-- src/cmd/compile/internal/gc/walk.go | 6 +++--- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index fd8e9cfd46486..42125f38f3e27 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -537,7 +537,7 @@ func inlconv2stmt(inlcall ir.Node) ir.Node { // n.Left = inlconv2expr(n.Left) func inlconv2expr(n ir.Node) ir.Node { r := n.Rlist().First() - return addinit(r, append(n.Init().Slice(), n.Body().Slice()...)) + return initExpr(append(n.Init().Slice(), n.Body().Slice()...), r) } // Turn the rlist (with the return values) of the OINLCALL in @@ -551,7 +551,7 @@ func inlconv2list(n ir.Node) []ir.Node { } s := n.Rlist().Slice() - s[0] = addinit(s[0], append(n.Init().Slice(), n.Body().Slice()...)) + s[0] = initExpr(append(n.Init().Slice(), n.Body().Slice()...), s[0]) return s } diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 7816e684dc089..e4175bbf36817 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -433,7 +433,7 @@ func (o *Order) exprInPlace(n ir.Node) ir.Node { var order Order order.free = o.free n = order.expr(n, nil) - n = addinit(n, order.out) + n = initExpr(order.out, n) // insert new temporaries from order // at head of outer list. diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 011a7ac5bc0f7..970f78b355ad3 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -1355,9 +1355,9 @@ func ngotype(n ir.Node) *types.Sym { return nil } -// The result of addinit MUST be assigned back to n, e.g. -// n.Left = addinit(n.Left, init) -func addinit(n ir.Node, init []ir.Node) ir.Node { +// The result of initExpr MUST be assigned back to n, e.g. +// n.Left = initExpr(init, n.Left) +func initExpr(init []ir.Node, n ir.Node) ir.Node { if len(init) == 0 { return n } diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 5a073ac32462e..55443ba596e21 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -1314,7 +1314,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } old := n n = ir.NodAt(n.Pos(), l.SubOp(), arg, nil) - n = addinit(n, old.Init().Slice()) // typecheckargs can add to old.Init + n = initExpr(old.Init().Slice(), n) // typecheckargs can add to old.Init case ir.OCOMPLEX, ir.OCOPY: typecheckargs(n) @@ -1325,7 +1325,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } old := n n = ir.NodAt(n.Pos(), l.SubOp(), arg1, arg2) - n = addinit(n, old.Init().Slice()) // typecheckargs can add to old.Init + n = initExpr(old.Init().Slice(), n) // typecheckargs can add to old.Init } n = typecheck1(n, top) return n diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index c0f447f1a221f..e72015c05e8d7 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -180,7 +180,7 @@ func walkstmt(n ir.Node) ir.Node { n = mkcall1(chanfn("chanrecv1", 2, n.Left().Type()), nil, &init, n.Left(), nodnil()) n = walkexpr(n, &init) - n = addinit(n, init.Slice()) + n = initExpr(init.Slice(), n) case ir.OBREAK, ir.OCONTINUE, @@ -268,7 +268,7 @@ func walkstmt(n ir.Node) ir.Node { init := n.Left().Init() n.Left().PtrInit().Set(nil) n.SetLeft(walkexpr(n.Left(), &init)) - n.SetLeft(addinit(n.Left(), init.Slice())) + n.SetLeft(initExpr(init.Slice(), n.Left())) } n.SetRight(walkstmt(n.Right())) @@ -557,7 +557,7 @@ opswitch: var ll ir.Nodes n.SetRight(walkexpr(n.Right(), &ll)) - n.SetRight(addinit(n.Right(), ll.Slice())) + n.SetRight(initExpr(ll.Slice(), n.Right())) case ir.OPRINT, ir.OPRINTN: n = walkprint(n, init) From beb5e0540406e2281a7502a2009db752668219da Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Wed, 2 Dec 2020 23:55:42 -0800 Subject: [PATCH 100/474] [dev.regabi] cmd/compile: refactoring prep for ConstExpr The next CL adds ConstExpr, which is a more memory efficient representation for constant expressions than Name. However, currently a bunch of Val helper methods are defined on Name. This CL changes them into standalone functions that work with any Node.Val implementation. There's also an existing standalone function named Int64Val, which takes a Type argument to specify what type of integer is expected. So to avoid collisions, this CL renames it to IntVal. Passes buildall w/ toolstash -cmp. [git-generate] cd src/cmd/compile/internal/ir rf 'mv Int64Val IntVal' sed -i -E -e 's/\(n \*Name\) (CanInt64|((I|Ui)nt64|Bool|String)Val)\(/\1(n Node/' name.go cd ../gc rf ' ex { import "cmd/compile/internal/ir" var n ir.Node n.CanInt64() -> ir.CanInt64(n) n.Int64Val() -> ir.Int64Val(n) n.Uint64Val() -> ir.Uint64Val(n) n.BoolVal() -> ir.BoolVal(n) n.StringVal() -> ir.StringVal(n) } ' cd ../ir rf ' mv CanInt64 Int64Val Uint64Val BoolVal StringVal val.go rm Node.CanInt64 Node.Int64Val Node.Uint64Val Node.BoolVal Node.StringVal ' Change-Id: I003140bda1690d770fd608bdd087e6d4ff00fb1f Reviewed-on: https://go-review.googlesource.com/c/go/+/275032 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Russ Cox --- src/cmd/compile/internal/gc/const.go | 8 ++-- src/cmd/compile/internal/gc/escape.go | 2 +- src/cmd/compile/internal/gc/noder.go | 6 +-- src/cmd/compile/internal/gc/obj.go | 4 +- src/cmd/compile/internal/gc/order.go | 4 +- src/cmd/compile/internal/gc/sinit.go | 14 +++--- src/cmd/compile/internal/gc/ssa.go | 10 ++-- src/cmd/compile/internal/gc/swt.go | 10 ++-- src/cmd/compile/internal/gc/typecheck.go | 22 ++++----- src/cmd/compile/internal/gc/walk.go | 30 ++++++------ src/cmd/compile/internal/ir/name.go | 56 ---------------------- src/cmd/compile/internal/ir/node.go | 5 -- src/cmd/compile/internal/ir/val.go | 60 +++++++++++++++++++++++- 13 files changed, 113 insertions(+), 118 deletions(-) diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index 4a61c77630c45..8771d82cfa009 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -526,7 +526,7 @@ func evalConst(n ir.Node) ir.Node { if need == 1 { var strs []string for _, c := range s { - strs = append(strs, c.StringVal()) + strs = append(strs, ir.StringVal(c)) } return origConst(n, constant.MakeString(strings.Join(strs, ""))) } @@ -537,7 +537,7 @@ func evalConst(n ir.Node) ir.Node { var strs []string i2 := i for i2 < len(s) && ir.IsConst(s[i2], constant.String) { - strs = append(strs, s[i2].StringVal()) + strs = append(strs, ir.StringVal(s[i2])) i2++ } @@ -558,7 +558,7 @@ func evalConst(n ir.Node) ir.Node { switch nl.Type().Kind() { case types.TSTRING: if ir.IsConst(nl, constant.String) { - return origIntConst(n, int64(len(nl.StringVal()))) + return origIntConst(n, int64(len(ir.StringVal(nl)))) } case types.TARRAY: if !hascallchan(nl) { @@ -780,7 +780,7 @@ func indexconst(n ir.Node) int64 { if doesoverflow(v, types.Types[types.TINT]) { return -2 } - return ir.Int64Val(types.Types[types.TINT], v) + return ir.IntVal(types.Types[types.TINT], v) } // isGoConst reports whether n is a Go language constant (as opposed to a diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index 9fc3dd27788d7..622edb98203c9 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -1769,7 +1769,7 @@ func heapAllocReason(n ir.Node) string { if !smallintconst(r) { return "non-constant size" } - if t := n.Type(); t.Elem().Width != 0 && r.Int64Val() >= maxImplicitStackVarSize/t.Elem().Width { + if t := n.Type(); t.Elem().Width != 0 && ir.Int64Val(r) >= maxImplicitStackVarSize/t.Elem().Width { return "too large for stack" } } diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 9352463f18c47..61320123a8a2c 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -807,7 +807,7 @@ func (p *noder) sum(x syntax.Expr) ir.Node { n := p.expr(x) if ir.IsConst(n, constant.String) && n.Sym() == nil { nstr = n - chunks = append(chunks, nstr.StringVal()) + chunks = append(chunks, ir.StringVal(nstr)) } for i := len(adds) - 1; i >= 0; i-- { @@ -817,12 +817,12 @@ func (p *noder) sum(x syntax.Expr) ir.Node { if ir.IsConst(r, constant.String) && r.Sym() == nil { if nstr != nil { // Collapse r into nstr instead of adding to n. - chunks = append(chunks, r.StringVal()) + chunks = append(chunks, ir.StringVal(r)) continue } nstr = r - chunks = append(chunks, nstr.StringVal()) + chunks = append(chunks, ir.StringVal(nstr)) } else { if len(chunks) > 1 { nstr.SetVal(constant.MakeString(strings.Join(chunks, ""))) diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 21a50257b88a2..b1701b30a1286 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -263,7 +263,7 @@ func dumpGlobalConst(n ir.Node) { return } } - base.Ctxt.DwarfIntConst(base.Ctxt.Pkgpath, n.Sym().Name, typesymname(t), ir.Int64Val(t, v)) + base.Ctxt.DwarfIntConst(base.Ctxt.Pkgpath, n.Sym().Name, typesymname(t), ir.IntVal(t, v)) } func dumpglobls() { @@ -598,7 +598,7 @@ func litsym(n, c ir.Node, wid int) { s.WriteInt(base.Ctxt, n.Offset(), wid, i) case constant.Int: - s.WriteInt(base.Ctxt, n.Offset(), wid, ir.Int64Val(n.Type(), u)) + s.WriteInt(base.Ctxt, n.Offset(), wid, ir.IntVal(n.Type(), u)) case constant.Float: f, _ := constant.Float64Val(u) diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index e4175bbf36817..5440806e8ecb7 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -1107,7 +1107,7 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node { haslit := false for _, n1 := range n.List().Slice() { hasbyte = hasbyte || n1.Op() == ir.OBYTES2STR - haslit = haslit || n1.Op() == ir.OLITERAL && len(n1.StringVal()) != 0 + haslit = haslit || n1.Op() == ir.OLITERAL && len(ir.StringVal(n1)) != 0 } if haslit && hasbyte { @@ -1278,7 +1278,7 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node { var t *types.Type switch n.Op() { case ir.OSLICELIT: - t = types.NewArray(n.Type().Elem(), n.Right().Int64Val()) + t = types.NewArray(n.Type().Elem(), ir.Int64Val(n.Right())) case ir.OCALLPART: t = partialCallType(n) } diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 2dc4281857e01..3ef976d8aa7d3 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -134,7 +134,7 @@ func (s *InitSchedule) staticcopy(l ir.Node, r ir.Node) bool { case ir.OSLICELIT: // copy slice a := s.inittemps[r] - slicesym(l, a, r.Right().Int64Val()) + slicesym(l, a, ir.Int64Val(r.Right())) return true case ir.OARRAYLIT, ir.OSTRUCTLIT: @@ -213,7 +213,7 @@ func (s *InitSchedule) staticassign(l ir.Node, r ir.Node) bool { case ir.OSTR2BYTES: if l.Class() == ir.PEXTERN && r.Left().Op() == ir.OLITERAL { - sval := r.Left().StringVal() + sval := ir.StringVal(r.Left()) slicebytes(l, sval) return true } @@ -221,7 +221,7 @@ func (s *InitSchedule) staticassign(l ir.Node, r ir.Node) bool { case ir.OSLICELIT: s.initplan(r) // Init slice. - bound := r.Right().Int64Val() + bound := ir.Int64Val(r.Right()) ta := types.NewArray(r.Type().Elem(), bound) ta.SetNoalg(true) a := staticname(ta) @@ -418,7 +418,7 @@ func getdyn(n ir.Node, top bool) initGenType { if !top { return initDynamic } - if n.Right().Int64Val()/4 > int64(n.List().Len()) { + if ir.Int64Val(n.Right())/4 > int64(n.List().Len()) { // <25% of entries have explicit values. // Very rough estimation, it takes 4 bytes of instructions // to initialize 1 byte of result. So don't use a static @@ -594,12 +594,12 @@ func isSmallSliceLit(n ir.Node) bool { r := n.Right() - return smallintconst(r) && (n.Type().Elem().Width == 0 || r.Int64Val() <= smallArrayBytes/n.Type().Elem().Width) + return smallintconst(r) && (n.Type().Elem().Width == 0 || ir.Int64Val(r) <= smallArrayBytes/n.Type().Elem().Width) } func slicelit(ctxt initContext, n ir.Node, var_ ir.Node, init *ir.Nodes) { // make an array type corresponding the number of elements we have - t := types.NewArray(n.Type().Elem(), n.Right().Int64Val()) + t := types.NewArray(n.Type().Elem(), ir.Int64Val(n.Right())) dowidth(t) if ctxt == inNonInitFunction { @@ -997,7 +997,7 @@ func oaslit(n ir.Node, init *ir.Nodes) bool { func getlit(lit ir.Node) int { if smallintconst(lit) { - return int(lit.Int64Val()) + return int(ir.Int64Val(lit)) } return -1 } diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index d53bd1aa4fb1b..89918e21333cb 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -1271,7 +1271,7 @@ func (s *state) stmt(n ir.Node) { // We're assigning a slicing operation back to its source. // Don't write back fields we aren't changing. See issue #14855. i, j, k := rhs.SliceBounds() - if i != nil && (i.Op() == ir.OLITERAL && i.Val().Kind() == constant.Int && i.Int64Val() == 0) { + if i != nil && (i.Op() == ir.OLITERAL && i.Val().Kind() == constant.Int && ir.Int64Val(i) == 0) { // [0:...] is the same as [:...] i = nil } @@ -1301,7 +1301,7 @@ func (s *state) stmt(n ir.Node) { case ir.OIF: if ir.IsConst(n.Left(), constant.Bool) { s.stmtList(n.Left().Init()) - if n.Left().BoolVal() { + if ir.BoolVal(n.Left()) { s.stmtList(n.Body()) } else { s.stmtList(n.Rlist()) @@ -2041,7 +2041,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { case ir.OLITERAL: switch u := n.Val(); u.Kind() { case constant.Int: - i := ir.Int64Val(n.Type(), u) + i := ir.IntVal(n.Type(), u) switch n.Type().Size() { case 1: return s.constInt8(n.Type(), int8(i)) @@ -2624,7 +2624,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { // Replace "abc"[1] with 'b'. // Delayed until now because "abc"[1] is not an ideal constant. // See test/fixedbugs/issue11370.go. - return s.newValue0I(ssa.OpConst8, types.Types[types.TUINT8], int64(int8(n.Left().StringVal()[n.Right().Int64Val()]))) + return s.newValue0I(ssa.OpConst8, types.Types[types.TUINT8], int64(int8(ir.StringVal(n.Left())[ir.Int64Val(n.Right())]))) } a := s.expr(n.Left()) i := s.expr(n.Right()) @@ -2633,7 +2633,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { ptrtyp := s.f.Config.Types.BytePtr ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a) if ir.IsConst(n.Right(), constant.Int) { - ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right().Int64Val(), ptr) + ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, ir.Int64Val(n.Right()), ptr) } else { ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i) } diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go index 30179e1dd63ef..e241721588819 100644 --- a/src/cmd/compile/internal/gc/swt.go +++ b/src/cmd/compile/internal/gc/swt.go @@ -365,8 +365,8 @@ func (s *exprSwitch) flush() { // all we need here is consistency. We respect this // sorting below. sort.Slice(cc, func(i, j int) bool { - si := cc[i].lo.StringVal() - sj := cc[j].lo.StringVal() + si := ir.StringVal(cc[i].lo) + sj := ir.StringVal(cc[j].lo) if len(si) != len(sj) { return len(si) < len(sj) } @@ -375,7 +375,7 @@ func (s *exprSwitch) flush() { // runLen returns the string length associated with a // particular run of exprClauses. - runLen := func(run []exprClause) int64 { return int64(len(run[0].lo.StringVal())) } + runLen := func(run []exprClause) int64 { return int64(len(ir.StringVal(run[0].lo))) } // Collapse runs of consecutive strings with the same length. var runs [][]exprClause @@ -411,7 +411,7 @@ func (s *exprSwitch) flush() { merged := cc[:1] for _, c := range cc[1:] { last := &merged[len(merged)-1] - if last.jmp == c.jmp && last.hi.Int64Val()+1 == c.lo.Int64Val() { + if last.jmp == c.jmp && ir.Int64Val(last.hi)+1 == ir.Int64Val(c.lo) { last.hi = c.lo } else { merged = append(merged, c) @@ -446,7 +446,7 @@ func (c *exprClause) test(exprname ir.Node) ir.Node { // Optimize "switch true { ...}" and "switch false { ... }". if ir.IsConst(exprname, constant.Bool) && !c.lo.Type().IsInterface() { - if exprname.BoolVal() { + if ir.BoolVal(exprname) { return c.lo } else { return ir.NodAt(c.pos, ir.ONOT, c.lo, nil) diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 55443ba596e21..b19481311b6c1 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -1054,8 +1054,8 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { base.Errorf("invalid %s index %v (index must be non-negative)", why, n.Right()) } else if t.IsArray() && constant.Compare(x, token.GEQ, constant.MakeInt64(t.NumElem())) { base.Errorf("invalid array index %v (out of bounds for %d-element array)", n.Right(), t.NumElem()) - } else if ir.IsConst(n.Left(), constant.String) && constant.Compare(x, token.GEQ, constant.MakeInt64(int64(len(n.Left().StringVal())))) { - base.Errorf("invalid string index %v (out of bounds for %d-byte string)", n.Right(), len(n.Left().StringVal())) + } else if ir.IsConst(n.Left(), constant.String) && constant.Compare(x, token.GEQ, constant.MakeInt64(int64(len(ir.StringVal(n.Left()))))) { + base.Errorf("invalid string index %v (out of bounds for %d-byte string)", n.Right(), len(ir.StringVal(n.Left()))) } else if doesoverflow(x, types.Types[types.TINT]) { base.Errorf("invalid %s index %v (index too large)", why, n.Right()) } @@ -1146,11 +1146,11 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { l = defaultlit(l, types.Types[types.TINT]) c = defaultlit(c, types.Types[types.TINT]) - if ir.IsConst(l, constant.Int) && l.Int64Val() < 0 { + if ir.IsConst(l, constant.Int) && ir.Int64Val(l) < 0 { base.Fatalf("len for OSLICEHEADER must be non-negative") } - if ir.IsConst(c, constant.Int) && c.Int64Val() < 0 { + if ir.IsConst(c, constant.Int) && ir.Int64Val(c) < 0 { base.Fatalf("cap for OSLICEHEADER must be non-negative") } @@ -2173,8 +2173,8 @@ func checksliceindex(l ir.Node, r ir.Node, tp *types.Type) bool { } else if tp != nil && tp.NumElem() >= 0 && constant.Compare(x, token.GTR, constant.MakeInt64(tp.NumElem())) { base.Errorf("invalid slice index %v (out of bounds for %d-element array)", r, tp.NumElem()) return false - } else if ir.IsConst(l, constant.String) && constant.Compare(x, token.GTR, constant.MakeInt64(int64(len(l.StringVal())))) { - base.Errorf("invalid slice index %v (out of bounds for %d-byte string)", r, len(l.StringVal())) + } else if ir.IsConst(l, constant.String) && constant.Compare(x, token.GTR, constant.MakeInt64(int64(len(ir.StringVal(l))))) { + base.Errorf("invalid slice index %v (out of bounds for %d-byte string)", r, len(ir.StringVal(l))) return false } else if doesoverflow(x, types.Types[types.TINT]) { base.Errorf("invalid slice index %v (index too large)", r) @@ -3407,7 +3407,7 @@ func stringtoruneslit(n ir.Node) ir.Node { var l []ir.Node i := 0 - for _, r := range n.Left().StringVal() { + for _, r := range ir.StringVal(n.Left()) { l = append(l, ir.Nod(ir.OKEY, nodintconst(int64(i)), nodintconst(int64(r)))) i++ } @@ -3803,7 +3803,7 @@ func deadcode(fn *ir.Func) { return } case ir.OFOR: - if !ir.IsConst(n.Left(), constant.Bool) || n.Left().BoolVal() { + if !ir.IsConst(n.Left(), constant.Bool) || ir.BoolVal(n.Left()) { return } default: @@ -3833,7 +3833,7 @@ func deadcodeslice(nn *ir.Nodes) { n.SetLeft(deadcodeexpr(n.Left())) if ir.IsConst(n.Left(), constant.Bool) { var body ir.Nodes - if n.Left().BoolVal() { + if ir.BoolVal(n.Left()) { n.SetRlist(ir.Nodes{}) body = n.Body() } else { @@ -3876,7 +3876,7 @@ func deadcodeexpr(n ir.Node) ir.Node { n.SetLeft(deadcodeexpr(n.Left())) n.SetRight(deadcodeexpr(n.Right())) if ir.IsConst(n.Left(), constant.Bool) { - if n.Left().BoolVal() { + if ir.BoolVal(n.Left()) { return n.Right() // true && x => x } else { return n.Left() // false && x => false @@ -3886,7 +3886,7 @@ func deadcodeexpr(n ir.Node) ir.Node { n.SetLeft(deadcodeexpr(n.Left())) n.SetRight(deadcodeexpr(n.Right())) if ir.IsConst(n.Left(), constant.Bool) { - if n.Left().BoolVal() { + if ir.BoolVal(n.Left()) { return n.Left() // true || x => true } else { return n.Right() // false || x => x diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index e72015c05e8d7..ce7de1396b345 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -1014,7 +1014,7 @@ opswitch: // The SSA backend will handle those. switch et { case types.TINT64: - c := n.Right().Int64Val() + c := ir.Int64Val(n.Right()) if c < 0 { c = -c } @@ -1022,7 +1022,7 @@ opswitch: break opswitch } case types.TUINT64: - c := n.Right().Uint64Val() + c := ir.Uint64Val(n.Right()) if c < 1<<16 { break opswitch } @@ -1072,7 +1072,7 @@ opswitch: base.Errorf("index out of bounds") } } else if ir.IsConst(n.Left(), constant.String) { - n.SetBounded(bounded(r, int64(len(n.Left().StringVal())))) + n.SetBounded(bounded(r, int64(len(ir.StringVal(n.Left()))))) if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Right(), constant.Int) { base.Warn("index bounds check elided") } @@ -1507,7 +1507,7 @@ opswitch: case ir.OSTR2BYTES: s := n.Left() if ir.IsConst(s, constant.String) { - sc := s.StringVal() + sc := ir.StringVal(s) // Allocate a [n]byte of the right size. t := types.NewArray(types.Types[types.TUINT8], int64(len(sc))) @@ -1936,7 +1936,7 @@ func walkprint(nn ir.Node, init *ir.Nodes) ir.Node { for i := 0; i < len(s); { var strs []string for i < len(s) && ir.IsConst(s[i], constant.String) { - strs = append(strs, s[i].StringVal()) + strs = append(strs, ir.StringVal(s[i])) i++ } if len(strs) > 0 { @@ -2016,7 +2016,7 @@ func walkprint(nn ir.Node, init *ir.Nodes) ir.Node { case types.TSTRING: cs := "" if ir.IsConst(n, constant.String) { - cs = n.StringVal() + cs = ir.StringVal(n) } switch cs { case " ": @@ -2673,7 +2673,7 @@ func addstr(n ir.Node, init *ir.Nodes) ir.Node { sz := int64(0) for _, n1 := range n.List().Slice() { if n1.Op() == ir.OLITERAL { - sz += int64(len(n1.StringVal())) + sz += int64(len(ir.StringVal(n1))) } } @@ -3467,7 +3467,7 @@ func walkcompare(n ir.Node, init *ir.Nodes) ir.Node { func tracecmpArg(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node { // Ugly hack to avoid "constant -1 overflows uintptr" errors, etc. - if n.Op() == ir.OLITERAL && n.Type().IsSigned() && n.Int64Val() < 0 { + if n.Op() == ir.OLITERAL && n.Type().IsSigned() && ir.Int64Val(n) < 0 { n = copyexpr(n, n.Type(), init) } @@ -3537,7 +3537,7 @@ func walkcompareString(n ir.Node, init *ir.Nodes) ir.Node { // Length-only checks are ok, though. maxRewriteLen = 0 } - if s := cs.StringVal(); len(s) <= maxRewriteLen { + if s := ir.StringVal(cs); len(s) <= maxRewriteLen { if len(s) > 0 { ncs = safeexpr(ncs, init) } @@ -3632,7 +3632,7 @@ func bounded(n ir.Node, max int64) bool { bits := int32(8 * n.Type().Width) if smallintconst(n) { - v := n.Int64Val() + v := ir.Int64Val(n) return 0 <= v && v < max } @@ -3641,9 +3641,9 @@ func bounded(n ir.Node, max int64) bool { v := int64(-1) switch { case smallintconst(n.Left()): - v = n.Left().Int64Val() + v = ir.Int64Val(n.Left()) case smallintconst(n.Right()): - v = n.Right().Int64Val() + v = ir.Int64Val(n.Right()) if n.Op() == ir.OANDNOT { v = ^v if !sign { @@ -3657,7 +3657,7 @@ func bounded(n ir.Node, max int64) bool { case ir.OMOD: if !sign && smallintconst(n.Right()) { - v := n.Right().Int64Val() + v := ir.Int64Val(n.Right()) if 0 <= v && v <= max { return true } @@ -3665,7 +3665,7 @@ func bounded(n ir.Node, max int64) bool { case ir.ODIV: if !sign && smallintconst(n.Right()) { - v := n.Right().Int64Val() + v := ir.Int64Val(n.Right()) for bits > 0 && v >= 2 { bits-- v >>= 1 @@ -3674,7 +3674,7 @@ func bounded(n ir.Node, max int64) bool { case ir.ORSH: if !sign && smallintconst(n.Right()) { - v := n.Right().Int64Val() + v := ir.Int64Val(n.Right()) if v > int64(bits) { return true } diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 1d886bb9a1af6..aeeb63d2d65fc 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -296,62 +296,6 @@ func (n *Name) SetVal(v constant.Value) { n.val = v } -// Int64Val returns n as an int64. -// n must be an integer or rune constant. -func (n *Name) Int64Val() int64 { - if !IsConst(n, constant.Int) { - base.Fatalf("Int64Val(%v)", n) - } - x, ok := constant.Int64Val(n.Val()) - if !ok { - base.Fatalf("Int64Val(%v)", n) - } - return x -} - -// CanInt64 reports whether it is safe to call Int64Val() on n. -func (n *Name) CanInt64() bool { - if !IsConst(n, constant.Int) { - return false - } - - // if the value inside n cannot be represented as an int64, the - // return value of Int64 is undefined - _, ok := constant.Int64Val(n.Val()) - return ok -} - -// Uint64Val returns n as an uint64. -// n must be an integer or rune constant. -func (n *Name) Uint64Val() uint64 { - if !IsConst(n, constant.Int) { - base.Fatalf("Uint64Val(%v)", n) - } - x, ok := constant.Uint64Val(n.Val()) - if !ok { - base.Fatalf("Uint64Val(%v)", n) - } - return x -} - -// BoolVal returns n as a bool. -// n must be a boolean constant. -func (n *Name) BoolVal() bool { - if !IsConst(n, constant.Bool) { - base.Fatalf("BoolVal(%v)", n) - } - return constant.BoolVal(n.Val()) -} - -// StringVal returns the value of a literal string Node as a string. -// n must be a string constant. -func (n *Name) StringVal() string { - if !IsConst(n, constant.String) { - base.Fatalf("StringVal(%v)", n) - } - return constant.StringVal(n.Val()) -} - // The Class of a variable/function describes the "storage class" // of a variable or function. During parsing, storage classes are // called declaration contexts. diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index cc3ac5765df69..42ba4cb0e98e7 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -87,11 +87,6 @@ type Node interface { MarkReadonly() Val() constant.Value SetVal(v constant.Value) - Int64Val() int64 - Uint64Val() uint64 - CanInt64() bool - BoolVal() bool - StringVal() string // Storage for analysis passes. Esc() uint16 diff --git a/src/cmd/compile/internal/ir/val.go b/src/cmd/compile/internal/ir/val.go index aae965bb4c977..ad0df5508d926 100644 --- a/src/cmd/compile/internal/ir/val.go +++ b/src/cmd/compile/internal/ir/val.go @@ -32,7 +32,7 @@ func ConstValue(n Node) interface{} { case constant.String: return constant.StringVal(v) case constant.Int: - return Int64Val(n.Type(), v) + return IntVal(n.Type(), v) case constant.Float: return Float64Val(v) case constant.Complex: @@ -42,7 +42,7 @@ func ConstValue(n Node) interface{} { // int64Val returns v converted to int64. // Note: if t is uint64, very large values will be converted to negative int64. -func Int64Val(t *types.Type, v constant.Value) int64 { +func IntVal(t *types.Type, v constant.Value) int64 { if t.IsUnsigned() { if x, ok := constant.Uint64Val(v); ok { return int64(x) @@ -118,3 +118,59 @@ func idealType(ct constant.Kind) *types.Type { } var OKForConst [types.NTYPE]bool + +// CanInt64 reports whether it is safe to call Int64Val() on n. +func CanInt64(n Node) bool { + if !IsConst(n, constant.Int) { + return false + } + + // if the value inside n cannot be represented as an int64, the + // return value of Int64 is undefined + _, ok := constant.Int64Val(n.Val()) + return ok +} + +// Int64Val returns n as an int64. +// n must be an integer or rune constant. +func Int64Val(n Node) int64 { + if !IsConst(n, constant.Int) { + base.Fatalf("Int64Val(%v)", n) + } + x, ok := constant.Int64Val(n.Val()) + if !ok { + base.Fatalf("Int64Val(%v)", n) + } + return x +} + +// Uint64Val returns n as an uint64. +// n must be an integer or rune constant. +func Uint64Val(n Node) uint64 { + if !IsConst(n, constant.Int) { + base.Fatalf("Uint64Val(%v)", n) + } + x, ok := constant.Uint64Val(n.Val()) + if !ok { + base.Fatalf("Uint64Val(%v)", n) + } + return x +} + +// BoolVal returns n as a bool. +// n must be a boolean constant. +func BoolVal(n Node) bool { + if !IsConst(n, constant.Bool) { + base.Fatalf("BoolVal(%v)", n) + } + return constant.BoolVal(n.Val()) +} + +// StringVal returns the value of a literal string Node as a string. +// n must be a string constant. +func StringVal(n Node) string { + if !IsConst(n, constant.String) { + base.Fatalf("StringVal(%v)", n) + } + return constant.StringVal(n.Val()) +} From a2058bac21f40925a33d7f99622c967b65827f29 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Wed, 2 Dec 2020 19:26:56 -0800 Subject: [PATCH 101/474] [dev.regabi] cmd/compile: add ConstExpr MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, we represent constant-folded expressions with Name, which is suboptimal because Name has a lot of fields to support declared names (which are irrelevant to constant-folded expressions), while constant expressions are fairly common. This CL introduces a new lightweight ConstExpr type that simply wraps an existing expression and associates it with a value. Passes buildall w/ toolstash -cmp. name old time/op new time/op delta Template 252ms ± 3% 254ms ± 1% ~ (p=0.821 n=12+10) Unicode 120ms ± 2% 107ms ± 7% -11.09% (p=0.000 n=12+12) GoTypes 918ms ± 2% 918ms ± 1% ~ (p=0.974 n=12+10) Compiler 5.19s ± 1% 5.18s ± 0% ~ (p=0.190 n=12+11) SSA 12.4s ± 1% 12.3s ± 1% ~ (p=0.283 n=10+12) Flate 152ms ± 2% 148ms ± 4% -2.68% (p=0.007 n=10+12) GoParser 212ms ± 1% 211ms ± 2% ~ (p=0.674 n=10+12) Reflect 543ms ± 3% 542ms ± 3% ~ (p=0.799 n=12+12) Tar 224ms ± 2% 225ms ± 2% ~ (p=0.378 n=12+12) XML 292ms ± 1% 299ms ± 3% +2.18% (p=0.006 n=10+12) name old user-time/op new user-time/op delta Template 243ms ± 4% 244ms ± 5% ~ (p=0.887 n=12+12) Unicode 112ms ± 6% 100ms ±10% -10.76% (p=0.000 n=12+12) GoTypes 898ms ± 3% 895ms ± 3% ~ (p=0.671 n=12+12) Compiler 5.10s ± 1% 5.08s ± 1% ~ (p=0.104 n=12+11) SSA 12.2s ± 2% 12.1s ± 1% ~ (p=0.487 n=11+12) Flate 144ms ± 6% 145ms ± 5% ~ (p=0.695 n=12+11) GoParser 205ms ± 5% 204ms ± 3% ~ (p=0.514 n=12+12) Reflect 528ms ± 3% 531ms ± 4% ~ (p=0.630 n=12+12) Tar 218ms ± 4% 219ms ± 3% ~ (p=0.843 n=12+12) XML 284ms ± 5% 291ms ± 5% ~ (p=0.069 n=11+12) name old alloc/op new alloc/op delta Template 37.0MB ± 0% 36.7MB ± 0% -0.72% (p=0.000 n=12+12) Unicode 31.9MB ± 0% 29.5MB ± 0% -7.60% (p=0.000 n=12+12) GoTypes 119MB ± 0% 118MB ± 0% -0.40% (p=0.000 n=12+12) Compiler 629MB ± 0% 626MB ± 0% -0.36% (p=0.000 n=11+12) SSA 1.45GB ± 0% 1.43GB ± 0% -0.78% (p=0.000 n=12+12) Flate 22.2MB ± 0% 21.9MB ± 0% -1.12% (p=0.000 n=12+12) GoParser 29.4MB ± 0% 29.3MB ± 0% -0.36% (p=0.000 n=12+12) Reflect 76.1MB ± 0% 75.8MB ± 0% -0.38% (p=0.000 n=12+12) Tar 33.4MB ± 0% 33.2MB ± 0% -0.61% (p=0.000 n=12+12) XML 43.2MB ± 0% 42.8MB ± 0% -1.03% (p=0.000 n=11+12) name old allocs/op new allocs/op delta Template 375k ± 0% 375k ± 0% ~ (p=0.854 n=12+12) Unicode 300k ± 0% 300k ± 0% ~ (p=0.766 n=12+12) GoTypes 1.30M ± 0% 1.30M ± 0% ~ (p=0.272 n=12+12) Compiler 5.89M ± 0% 5.89M ± 0% ~ (p=0.478 n=12+12) SSA 14.0M ± 0% 14.0M ± 0% ~ (p=0.266 n=12+12) Flate 226k ± 0% 226k ± 0% ~ (p=0.898 n=12+12) GoParser 313k ± 0% 313k ± 0% -0.01% (p=0.042 n=12+11) Reflect 971k ± 0% 971k ± 0% ~ (p=0.080 n=12+12) Tar 342k ± 0% 342k ± 0% ~ (p=0.600 n=12+12) XML 416k ± 0% 416k ± 0% ~ (p=0.217 n=11+12) name old maxRSS/op new maxRSS/op delta Template 43.1M ± 5% 42.5M ± 5% ~ (p=0.086 n=12+12) Unicode 49.4M ± 2% 47.0M ± 2% -4.88% (p=0.000 n=12+12) GoTypes 85.3M ± 2% 84.6M ± 2% -0.84% (p=0.047 n=11+11) Compiler 394M ± 3% 386M ± 2% -1.97% (p=0.000 n=10+11) SSA 847M ± 4% 821M ± 2% -2.98% (p=0.000 n=11+12) Flate 36.0M ± 7% 35.2M ± 7% ~ (p=0.128 n=12+12) GoParser 39.4M ± 7% 39.5M ± 4% ~ (p=0.413 n=12+11) Reflect 64.0M ± 3% 63.6M ± 3% ~ (p=0.413 n=11+12) Tar 43.3M ± 5% 43.3M ± 5% ~ (p=0.503 n=12+12) XML 47.6M ± 4% 46.4M ± 2% -2.46% (p=0.013 n=11+12) Change-Id: If5781be346351c30b2228807211b5e57f777c506 Reviewed-on: https://go-review.googlesource.com/c/go/+/275033 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Russ Cox Trust: Matthew Dempsky --- src/cmd/compile/internal/gc/const.go | 27 +++++++-------------------- src/cmd/compile/internal/ir/expr.go | 25 +++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 20 deletions(-) diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index 8771d82cfa009..9aa65f97b6fe7 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -115,22 +115,12 @@ func convlit1(n ir.Node, t *types.Type, explicit bool, context func() string) ir return n } - if n.Op() == ir.OLITERAL || n.Op() == ir.ONIL { - // Can't always set n.Type directly on OLITERAL nodes. - // See discussion on CL 20813. - old := n - n = ir.Copy(old) - if old.Op() == ir.OLITERAL { - // Keep untyped constants in their original untyped syntax for error messages. - n.(ir.OrigNode).SetOrig(old) - } - } - // Nil is technically not a constant, so handle it specially. if n.Type().Kind() == types.TNIL { if n.Op() != ir.ONIL { base.Fatalf("unexpected op: %v (%v)", n, n.Op()) } + n = ir.Copy(n) if t == nil { base.Errorf("use of untyped nil") n.SetDiag(true) @@ -158,10 +148,11 @@ func convlit1(n ir.Node, t *types.Type, explicit bool, context func() string) ir case ir.OLITERAL: v := convertVal(n.Val(), t, explicit) if v.Kind() == constant.Unknown { + n = ir.NewConstExpr(n.Val(), n) break } + n = ir.NewConstExpr(v, n) n.SetType(t) - n.SetVal(v) return n case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.OREAL, ir.OIMAG: @@ -541,8 +532,9 @@ func evalConst(n ir.Node) ir.Node { i2++ } - nl := origConst(s[i], constant.MakeString(strings.Join(strs, ""))) - nl.(ir.OrigNode).SetOrig(nl) // it's bigger than just s[i] + nl := ir.Copy(n) + nl.PtrList().Set(s[i:i2]) + nl = origConst(nl, constant.MakeString(strings.Join(strs, ""))) newList = append(newList, nl) i = i2 - 1 } else { @@ -645,12 +637,7 @@ func origConst(n ir.Node, v constant.Value) ir.Node { return n } - orig := n - n = ir.NodAt(orig.Pos(), ir.OLITERAL, nil, nil) - n.(ir.OrigNode).SetOrig(orig) - n.SetType(orig.Type()) - n.SetVal(v) - return n + return ir.NewConstExpr(v, n) } func origBoolConst(n ir.Node, v bool) ir.Node { diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 2a7211cfdac36..412b7a18f014a 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -9,6 +9,7 @@ import ( "cmd/compile/internal/types" "cmd/internal/src" "fmt" + "go/constant" ) // A miniStmt is a miniNode with extra fields common to expressions. @@ -300,6 +301,30 @@ func (n *CompLitExpr) SetOp(op Op) { } } +type ConstExpr struct { + miniExpr + val constant.Value + orig Node +} + +func NewConstExpr(val constant.Value, orig Node) Node { + n := &ConstExpr{orig: orig, val: val} + n.op = OLITERAL + n.pos = orig.Pos() + n.SetType(orig.Type()) + n.SetTypecheck(orig.Typecheck()) + n.SetDiag(orig.Diag()) + return n +} + +func (n *ConstExpr) String() string { return fmt.Sprint(n) } +func (n *ConstExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ConstExpr) rawCopy() Node { c := *n; return &c } +func (n *ConstExpr) Sym() *types.Sym { return n.orig.Sym() } +func (n *ConstExpr) Orig() Node { return n.orig } +func (n *ConstExpr) SetOrig(orig Node) { n.orig = orig } +func (n *ConstExpr) Val() constant.Value { return n.val } + // A ConvExpr is a conversion Type(X). // It may end up being a value or a type. type ConvExpr struct { From 351bc2f38c4291c01299c2add16f1f5a96e54bb4 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Wed, 2 Dec 2020 21:38:20 -0800 Subject: [PATCH 102/474] [dev.regabi] cmd/compile: store types.Field on {Selector,CallPart}Expr It's useful to have quick access to the types.Field that a given selector or method value expression refer to. Previously we abused Opt for this, but couldn't do that for OCALLPART because escape analysis uses Opt. Now that we have more flexibility, we can simply add additional pointer fields for this. This also allows getting rid of an unneeded ONAME node for OCALLPART. Passes buildall w/ toolstash -cmp. Change-Id: I980d7bdb19abfd0b6f58a232876861b88dee1e47 Reviewed-on: https://go-review.googlesource.com/c/go/+/275034 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Russ Cox Trust: Matthew Dempsky --- src/cmd/compile/internal/gc/closure.go | 15 ++------------- src/cmd/compile/internal/gc/iexport.go | 3 +-- src/cmd/compile/internal/gc/inl.go | 3 +++ src/cmd/compile/internal/gc/typecheck.go | 14 +++++++------- src/cmd/compile/internal/gc/walk.go | 2 +- src/cmd/compile/internal/ir/expr.go | 15 ++++++++------- src/cmd/compile/internal/ir/fmt.go | 4 ++-- 7 files changed, 24 insertions(+), 32 deletions(-) diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index a5441a037a0bb..01e5a953de8d5 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -427,7 +427,7 @@ func typecheckpartialcall(dot ir.Node, sym *types.Sym) *ir.CallPartExpr { fn := makepartialcall(dot, dot.Type(), sym) fn.SetWrapper(true) - return ir.NewCallPartExpr(dot.Pos(), dot.Left(), NewName(sym), fn) + return ir.NewCallPartExpr(dot.Pos(), dot.Left(), dot.(*ir.SelectorExpr).Selection, fn) } // makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed @@ -565,16 +565,5 @@ func walkpartialcall(n *ir.CallPartExpr, init *ir.Nodes) ir.Node { // callpartMethod returns the *types.Field representing the method // referenced by method value n. func callpartMethod(n ir.Node) *types.Field { - if n.Op() != ir.OCALLPART { - base.Fatalf("expected OCALLPART, got %v", n) - } - - // TODO(mdempsky): Optimize this. If necessary, - // makepartialcall could save m for us somewhere. - var m *types.Field - if lookdot0(n.Right().Sym(), n.Left().Type(), &m, false) != 1 { - base.Fatalf("failed to find field for OCALLPART") - } - - return m + return n.(*ir.CallPartExpr).Method } diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index 85518bc9391a1..bb6f2b11e62e4 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -1290,8 +1290,7 @@ func (w *exportWriter) expr(n ir.Node) { w.op(ir.OXDOT) w.pos(n.Pos()) w.expr(n.Left()) - // Right node should be ONAME - w.selector(n.Right().Sym()) + w.selector(n.Sym()) case ir.OXDOT, ir.ODOT, ir.ODOTPTR, ir.ODOTINTER, ir.ODOTMETH: w.op(ir.OXDOT) diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 42125f38f3e27..64f1b062be57d 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -430,6 +430,9 @@ func (v *hairyVisitor) visit(n ir.Node) bool { // In any event, let the visitList(n.List()) below take care of the statements, // and don't charge for the OBLOCK itself. The ++ undoes the -- below. v.budget++ + + case ir.OCALLPART: + v.budget-- // Hack for toolstash -cmp. } v.budget-- diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index b19481311b6c1..e2100481aa6fa 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -2385,7 +2385,7 @@ func typecheckMethodExpr(n ir.Node) (res ir.Node) { me.SetType(methodfunc(m.Type, n.Left().Type())) me.SetOffset(0) me.SetClass(ir.PFUNC) - me.SetOpt(m) + me.(*ir.MethodExpr).Method = m // Issue 25065. Make sure that we emit the symbol for a local method. if base.Ctxt.Flag_dynlink && !inimport && (t.Sym() == nil || t.Sym().Pkg == ir.LocalPkg) { @@ -2448,10 +2448,8 @@ func lookdot(n ir.Node, t *types.Type, dostrcmp int) *types.Field { } n.SetOp(ir.ODOTINTER) - } else { - n.SetOpt(f1) } - + n.(*ir.SelectorExpr).Selection = f1 return f1 } @@ -2507,7 +2505,7 @@ func lookdot(n ir.Node, t *types.Type, dostrcmp int) *types.Field { n.SetOffset(f2.Offset) n.SetType(f2.Type) n.SetOp(ir.ODOTMETH) - n.SetOpt(f2) + n.(*ir.SelectorExpr).Selection = f2 return f2 } @@ -3933,8 +3931,10 @@ func methodExprName(n ir.Node) *ir.Name { // MethodFunc is like MethodName, but returns the types.Field instead. func methodExprFunc(n ir.Node) *types.Field { switch n.Op() { - case ir.ODOTMETH, ir.OMETHEXPR: - return n.Opt().(*types.Field) + case ir.ODOTMETH: + return n.(*ir.SelectorExpr).Selection + case ir.OMETHEXPR: + return n.(*ir.MethodExpr).Method case ir.OCALLPART: return callpartMethod(n) } diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index ce7de1396b345..3d22c66d901c5 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -3757,7 +3757,7 @@ func usefield(n ir.Node) { if t.IsPtr() { t = t.Elem() } - field := n.Opt().(*types.Field) + field := n.(*ir.SelectorExpr).Selection if field == nil { base.Fatalf("usefield %v %v without paramfld", n.Left().Type(), n.Sym()) } diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 412b7a18f014a..18d85a01df8df 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -205,10 +205,10 @@ type CallPartExpr struct { miniExpr fn *Func X Node - Method *Name + Method *types.Field } -func NewCallPartExpr(pos src.XPos, x Node, method *Name, fn *Func) *CallPartExpr { +func NewCallPartExpr(pos src.XPos, x Node, method *types.Field, fn *Func) *CallPartExpr { n := &CallPartExpr{fn: fn, X: x, Method: method} n.op = OCALLPART n.pos = pos @@ -222,9 +222,8 @@ func (n *CallPartExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *CallPartExpr) rawCopy() Node { c := *n; return &c } func (n *CallPartExpr) Func() *Func { return n.fn } func (n *CallPartExpr) Left() Node { return n.X } -func (n *CallPartExpr) Right() Node { return n.Method } +func (n *CallPartExpr) Sym() *types.Sym { return n.Method.Sym } func (n *CallPartExpr) SetLeft(x Node) { n.X = x } -func (n *CallPartExpr) SetRight(x Node) { n.Method = x.(*Name) } // A ClosureExpr is a function literal expression. type ClosureExpr struct { @@ -499,6 +498,7 @@ type MethodExpr struct { sym *types.Sym offset int64 class Class + Method *types.Field } func NewMethodExpr(pos src.XPos, op Op, x, m Node) *MethodExpr { @@ -596,9 +596,10 @@ func (n *ResultExpr) SetOffset(x int64) { n.offset = x } // A SelectorExpr is a selector expression X.Sym. type SelectorExpr struct { miniExpr - X Node - Sel *types.Sym - offset int64 + X Node + Sel *types.Sym + offset int64 + Selection *types.Field } func NewSelectorExpr(pos src.XPos, x Node, sel *types.Sym) *SelectorExpr { diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index 9486d8b021d54..45a66a2290925 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -1382,11 +1382,11 @@ func exprFmt(n Node, s fmt.State, prec int, mode FmtMode) { case OCALLPART: exprFmt(n.Left(), s, nprec, mode) - if n.Right() == nil || n.Right().Sym() == nil { + if n.Sym() == nil { fmt.Fprint(s, ".") return } - mode.Fprintf(s, ".%0S", n.Right().Sym()) + mode.Fprintf(s, ".%0S", n.Sym()) case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH: exprFmt(n.Left(), s, nprec, mode) From 84cb51d7d7a936d56d6287ca075dd578097499a9 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Thu, 3 Dec 2020 11:56:29 -0800 Subject: [PATCH 103/474] [dev.regabi] cmd/compile: eliminate more SetOrig This CL consolidates and cleans up fmt.go's logic for skipping past Nodes introduced during typechecking. This allows eliminating SetOrig on ConvExpr and Name. Also changes ConstExpr.SetOrig to a panic for good measure. The only remaining SetOrig uses now are for rewriting multi-value "f(g())" calls and "return g()" statements, and type-checking composite literals. It should be possible to eliminate both of those as well. Passes buildall w/ toolstash -cmp. Change-Id: I478aea1a17dfb7a784293b930bf9081637eb2d7a Reviewed-on: https://go-review.googlesource.com/c/go/+/275179 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Russ Cox --- src/cmd/compile/internal/gc/subr.go | 1 - src/cmd/compile/internal/ir/expr.go | 4 +-- src/cmd/compile/internal/ir/fmt.go | 47 +++++++++++++++-------------- src/cmd/compile/internal/ir/name.go | 2 -- 4 files changed, 26 insertions(+), 28 deletions(-) diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 970f78b355ad3..65eb61e6800f5 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -523,7 +523,6 @@ func assignconvfn(n ir.Node, t *types.Type, context func() string) ir.Node { r.SetType(t) r.SetTypecheck(1) r.SetImplicit(true) - r.(ir.OrigNode).SetOrig(ir.Orig(n)) return r } diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 18d85a01df8df..49543f4286f1e 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -321,7 +321,7 @@ func (n *ConstExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *ConstExpr) rawCopy() Node { c := *n; return &c } func (n *ConstExpr) Sym() *types.Sym { return n.orig.Sym() } func (n *ConstExpr) Orig() Node { return n.orig } -func (n *ConstExpr) SetOrig(orig Node) { n.orig = orig } +func (n *ConstExpr) SetOrig(orig Node) { panic(n.no("SetOrig")) } func (n *ConstExpr) Val() constant.Value { return n.val } // A ConvExpr is a conversion Type(X). @@ -344,8 +344,6 @@ func NewConvExpr(pos src.XPos, op Op, typ *types.Type, x Node) *ConvExpr { func (n *ConvExpr) String() string { return fmt.Sprint(n) } func (n *ConvExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *ConvExpr) rawCopy() Node { c := *n; return &c } -func (n *ConvExpr) Orig() Node { return n.orig } -func (n *ConvExpr) SetOrig(x Node) { n.orig = x } func (n *ConvExpr) Left() Node { return n.X } func (n *ConvExpr) SetLeft(x Node) { n.X = x } diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index 45a66a2290925..bc5536241ea1d 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -1071,6 +1071,7 @@ var OpPrec = []int{ OCALL: 8, OCAP: 8, OCLOSE: 8, + OCOMPLIT: 8, OCONVIFACE: 8, OCONVNOP: 8, OCONV: 8, @@ -1179,13 +1180,28 @@ var OpPrec = []int{ } func exprFmt(n Node, s fmt.State, prec int, mode FmtMode) { - for n != nil && n.Implicit() && (n.Op() == ODEREF || n.Op() == OADDR) { - n = n.Left() - } + for { + if n == nil { + fmt.Fprint(s, "") + return + } - if n == nil { - fmt.Fprint(s, "") - return + // We always want the original, if any. + if o := Orig(n); o != n { + n = o + continue + } + + // Skip implicit operations introduced during typechecking. + switch n.Op() { + case OADDR, ODEREF, OCONV, OCONVNOP, OCONVIFACE: + if n.Implicit() { + n = n.Left() + continue + } + } + + break } nprec := OpPrec[n.Op()] @@ -1206,15 +1222,9 @@ func exprFmt(n Node, s fmt.State, prec int, mode FmtMode) { fmt.Fprint(s, "nil") case OLITERAL: // this is a bit of a mess - if mode == FErr { - if orig := Orig(n); orig != nil && orig != n { - exprFmt(orig, s, prec, mode) - return - } - if n.Sym() != nil { - fmt.Fprint(s, smodeString(n.Sym(), mode)) - return - } + if mode == FErr && n.Sym() != nil { + fmt.Fprint(s, smodeString(n.Sym(), mode)) + return } needUnparen := false @@ -1558,13 +1568,6 @@ func exprFmt(n Node, s fmt.State, prec int, mode FmtMode) { func nodeFmt(n Node, s fmt.State, flag FmtFlag, mode FmtMode) { t := n.Type() - - // We almost always want the original. - // TODO(gri) Why the special case for OLITERAL? - if n.Op() != OLITERAL && Orig(n) != nil { - n = Orig(n) - } - if flag&FmtLong != 0 && t != nil { if t.Kind() == types.TNIL { fmt.Fprint(s, "nil") diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index aeeb63d2d65fc..67d4d2b391121 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -155,8 +155,6 @@ func (n *Name) rawCopy() Node { c := *n; return &c } func (n *Name) Name() *Name { return n } func (n *Name) Sym() *types.Sym { return n.sym } func (n *Name) SetSym(x *types.Sym) { n.sym = x } -func (n *Name) Orig() Node { return n.orig } -func (n *Name) SetOrig(x Node) { n.orig = x } func (n *Name) SubOp() Op { return n.subOp } func (n *Name) SetSubOp(x Op) { n.subOp = x } func (n *Name) Class() Class { return n.class } From 99ecfcae31e52a297195b2c1d1d9326e16d6c775 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 3 Dec 2020 15:40:46 -0500 Subject: [PATCH 104/474] [dev.regabi] cmd/compile: swap inlining order of if then vs else blocks The upcoming general iterators will process nodes in source code order, meaning that the "then" block comes before the "else" block. But for an if node, "then" is Body while "else" is Rlist, and the inliner processes Rlist first. The order of processing changes the order of inlining decisions, which can affect which functions are inlined, but in general won't affect much. (It's not like we know that we should prefer to inline functions in else bodies over then bodies.) Swapping these is not safe for toolstash -cmp. Doing it in a separate CL lets the upcoming CLs all be toolstash-safe. Change-Id: Id16172849239b0564930d2bbff1260ad6d03d5ab Reviewed-on: https://go-review.googlesource.com/c/go/+/275308 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/inl.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 64f1b062be57d..980ba7429ae91 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -638,6 +638,14 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool) ir.Node { } } + inlnodelist(n.Body(), maxCost, inlMap) + s = n.Body().Slice() + for i, n1 := range s { + if n1.Op() == ir.OINLCALL { + s[i] = inlconv2stmt(n1) + } + } + inlnodelist(n.Rlist(), maxCost, inlMap) if n.Op() == ir.OAS2FUNC && n.Rlist().First().Op() == ir.OINLCALL { @@ -658,14 +666,6 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool) ir.Node { } } - inlnodelist(n.Body(), maxCost, inlMap) - s = n.Body().Slice() - for i, n1 := range s { - if n1.Op() == ir.OINLCALL { - s[i] = inlconv2stmt(n1) - } - } - // with all the branches out of the way, it is now time to // transmogrify this node itself unless inhibited by the // switch at the top of this function. From 989a3f5041d2055e165e363d3fb2d27e75e2fa38 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 2 Dec 2020 22:42:24 -0500 Subject: [PATCH 105/474] [dev.regabi] cmd/compile: adjustments to Copy and DeepCopy DeepCopy is not called DeepSepCopy, so it should use Copy, not SepCopy. Also, the old gc.treecopy, which became ir.DeepCopy, only copied the Left, Right, and List fields - not Init, Rlist, Body - and I didn't notice when I moved it over. A general utility function should of course copy the whole node, so do that. Finally, the semantics of Copy should not depend on whether a particular child node is held directly in a field or in a slice, so make Copy duplicate the slice backing arrays as well. (Logically, those backing arrays are part of the node storage.) Passes buildall w/ toolstash -cmp. Change-Id: I18fbe3f2b40078f566ed6370684d5585052b36a1 Reviewed-on: https://go-review.googlesource.com/c/go/+/275309 Trust: Russ Cox Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/copy.go | 43 +++++++++++++++++++++++++++-- 1 file changed, 40 insertions(+), 3 deletions(-) diff --git a/src/cmd/compile/internal/ir/copy.go b/src/cmd/compile/internal/ir/copy.go index a356074bb8435..705de0195b5e0 100644 --- a/src/cmd/compile/internal/ir/copy.go +++ b/src/cmd/compile/internal/ir/copy.go @@ -61,9 +61,33 @@ func Copy(n Node) Node { if n, ok := n.(OrigNode); ok && n.Orig() == n { copy.(OrigNode).SetOrig(copy) } + + // Copy lists so that updates to n.List[0] + // don't affect copy.List[0] and vice versa, + // same as updates to Left and Right. + // TODO(rsc): Eventually the Node implementations will need to do this. + if l := copy.List(); l.Len() > 0 { + copy.SetList(copyList(l)) + } + if l := copy.Rlist(); l.Len() > 0 { + copy.SetRlist(copyList(l)) + } + if l := copy.Init(); l.Len() > 0 { + copy.SetInit(copyList(l)) + } + if l := copy.Body(); l.Len() > 0 { + copy.SetBody(copyList(l)) + } + return copy } +func copyList(x Nodes) Nodes { + out := make([]Node, x.Len()) + copy(out, x.Slice()) + return AsNodes(out) +} + // A Node can implement DeepCopyNode to provide a custom implementation // of DeepCopy. If the compiler only needs access to a Node's structure during // DeepCopy, then a Node can implement DeepCopyNode instead of providing @@ -94,10 +118,15 @@ func DeepCopy(pos src.XPos, n Node) Node { switch n.Op() { default: - m := SepCopy(n) + m := Copy(n) m.SetLeft(DeepCopy(pos, n.Left())) m.SetRight(DeepCopy(pos, n.Right())) - m.PtrList().Set(deepCopyList(pos, n.List().Slice())) + // deepCopyList instead of DeepCopyList + // because Copy already copied all these slices. + deepCopyList(pos, m.PtrList().Slice()) + deepCopyList(pos, m.PtrRlist().Slice()) + deepCopyList(pos, m.PtrInit().Slice()) + deepCopyList(pos, m.PtrBody().Slice()) if pos.IsKnown() { m.SetPos(pos) } @@ -118,10 +147,18 @@ func DeepCopy(pos src.XPos, n Node) Node { } } -func deepCopyList(pos src.XPos, list []Node) []Node { +// DeepCopyList returns a list of deep copies (using DeepCopy) of the nodes in list. +func DeepCopyList(pos src.XPos, list []Node) []Node { var out []Node for _, n := range list { out = append(out, DeepCopy(pos, n)) } return out } + +// deepCopyList edits list to point to deep copies of its elements. +func deepCopyList(pos src.XPos, list []Node) { + for i, n := range list { + list[i] = DeepCopy(pos, n) + } +} From 7fcf5b994cf24dc7eda4d65d448e25489dd357f6 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 2 Dec 2020 22:54:33 -0500 Subject: [PATCH 106/474] [dev.regabi] cmd/compile: replace inlcopy with ir.DeepCopy Now inlcopy and ir.DeepCopy are semantically the same, so drop the inlcopy implementation. Passes buildall w/ toolstash -cmp. Change-Id: Id2abb39a412a8e57167a29be5ecf76e990dc9d3d Reviewed-on: https://go-review.googlesource.com/c/go/+/275310 Trust: Russ Cox Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/inl.go | 37 +----------------------------- 1 file changed, 1 insertion(+), 36 deletions(-) diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 980ba7429ae91..efd6fea844538 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -218,7 +218,7 @@ func caninl(fn *ir.Func) { n.Func().Inl = &ir.Inline{ Cost: inlineMaxBudget - visitor.budget, Dcl: pruneUnusedAutos(n.Defn.Func().Dcl, &visitor), - Body: inlcopylist(fn.Body().Slice()), + Body: ir.DeepCopyList(src.NoXPos, fn.Body().Slice()), } if base.Flag.LowerM > 1 { @@ -447,41 +447,6 @@ func (v *hairyVisitor) visit(n ir.Node) bool { v.visitList(n.Init()) || v.visitList(n.Body()) } -// inlcopylist (together with inlcopy) recursively copies a list of nodes, except -// that it keeps the same ONAME, OTYPE, and OLITERAL nodes. It is used for copying -// the body and dcls of an inlineable function. -func inlcopylist(ll []ir.Node) []ir.Node { - s := make([]ir.Node, 0, len(ll)) - for _, n := range ll { - s = append(s, inlcopy(n)) - } - return s -} - -func inlcopy(n ir.Node) ir.Node { - if n == nil { - return nil - } - - switch n.Op() { - case ir.ONAME, ir.OTYPE, ir.OLITERAL, ir.ONIL: - return n - } - - m := ir.Copy(n) - if n.Op() != ir.OCALLPART && m.Func() != nil { - base.Fatalf("unexpected Func: %v", m) - } - m.SetLeft(inlcopy(n.Left())) - m.SetRight(inlcopy(n.Right())) - m.PtrList().Set(inlcopylist(n.List().Slice())) - m.PtrRlist().Set(inlcopylist(n.Rlist().Slice())) - m.PtrInit().Set(inlcopylist(n.Init().Slice())) - m.PtrBody().Set(inlcopylist(n.Body().Slice())) - - return m -} - func countNodes(n ir.Node) int { if n == nil { return 0 From 0d1b44c6457bcfad611252175934e82f73440475 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 3 Dec 2020 12:57:38 -0500 Subject: [PATCH 107/474] [dev.regabi] cmd/compile: introduce IR visitors This CL introduces the general visitor functionality that will replace the Left, SetLeft, Right, SetRight, etc methods in the Node interface. For now, the CL defines the functionality in terms of those methods, but eventually the Nodes themselves will implement DoChildren and EditChildren and be relieved of implementing Left, SetLeft, and so on. The CL also updates Inspect (which moved to visit.go) and DeepCopy to use the new functionality. The Find helper is not used in this CL but will be used in a future one. Passes buildall w/ toolstash -cmp. Change-Id: Id0eea654a884ab3ea25f48bd8bdd71712b5dcb44 Reviewed-on: https://go-review.googlesource.com/c/go/+/275311 Trust: Russ Cox Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/copy.go | 57 ++---- src/cmd/compile/internal/ir/node.go | 20 -- src/cmd/compile/internal/ir/visit.go | 273 +++++++++++++++++++++++++++ 3 files changed, 289 insertions(+), 61 deletions(-) create mode 100644 src/cmd/compile/internal/ir/visit.go diff --git a/src/cmd/compile/internal/ir/copy.go b/src/cmd/compile/internal/ir/copy.go index 705de0195b5e0..2f340df1abae2 100644 --- a/src/cmd/compile/internal/ir/copy.go +++ b/src/cmd/compile/internal/ir/copy.go @@ -107,44 +107,26 @@ type DeepCopyNode interface { // // If a Node wishes to provide an alternate implementation, it can // implement a DeepCopy method: see the DeepCopyNode interface. +// +// TODO(rsc): Once Nodes implement EditChildren, remove the DeepCopyNode interface. func DeepCopy(pos src.XPos, n Node) Node { - if n == nil { - return nil - } - - if n, ok := n.(DeepCopyNode); ok { - return n.DeepCopy(pos) - } - - switch n.Op() { - default: - m := Copy(n) - m.SetLeft(DeepCopy(pos, n.Left())) - m.SetRight(DeepCopy(pos, n.Right())) - // deepCopyList instead of DeepCopyList - // because Copy already copied all these slices. - deepCopyList(pos, m.PtrList().Slice()) - deepCopyList(pos, m.PtrRlist().Slice()) - deepCopyList(pos, m.PtrInit().Slice()) - deepCopyList(pos, m.PtrBody().Slice()) - if pos.IsKnown() { - m.SetPos(pos) + var edit func(Node) Node + edit = func(x Node) Node { + if x, ok := x.(DeepCopyNode); ok { + return x.DeepCopy(pos) } - if m.Name() != nil { - Dump("DeepCopy", n) - base.Fatalf("DeepCopy Name") + switch x.Op() { + case OPACK, ONAME, ONONAME, OLITERAL, ONIL, OTYPE: + return x } - return m - - case OPACK: - // OPACK nodes are never valid in const value declarations, - // but allow them like any other declared symbol to avoid - // crashing (golang.org/issue/11361). - fallthrough - - case ONAME, ONONAME, OLITERAL, ONIL, OTYPE: - return n + x = Copy(x) + if pos.IsKnown() { + x.SetPos(pos) + } + EditChildren(x, edit) + return x } + return edit(n) } // DeepCopyList returns a list of deep copies (using DeepCopy) of the nodes in list. @@ -155,10 +137,3 @@ func DeepCopyList(pos src.XPos, list []Node) []Node { } return out } - -// deepCopyList edits list to point to deep copies of its elements. -func deepCopyList(pos src.XPos, list []Node) { - for i, n := range list { - list[i] = DeepCopy(pos, n) - } -} diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 42ba4cb0e98e7..c3184a3a0bd49 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -521,26 +521,6 @@ func (n *Nodes) AppendNodes(n2 *Nodes) { n2.slice = nil } -// inspect invokes f on each node in an AST in depth-first order. -// If f(n) returns false, inspect skips visiting n's children. -func Inspect(n Node, f func(Node) bool) { - if n == nil || !f(n) { - return - } - InspectList(n.Init(), f) - Inspect(n.Left(), f) - Inspect(n.Right(), f) - InspectList(n.List(), f) - InspectList(n.Body(), f) - InspectList(n.Rlist(), f) -} - -func InspectList(l Nodes, f func(Node) bool) { - for _, n := range l.Slice() { - Inspect(n, f) - } -} - // nodeQueue is a FIFO queue of *Node. The zero value of nodeQueue is // a ready-to-use empty queue. type NodeQueue struct { diff --git a/src/cmd/compile/internal/ir/visit.go b/src/cmd/compile/internal/ir/visit.go new file mode 100644 index 0000000000000..a239fd1532123 --- /dev/null +++ b/src/cmd/compile/internal/ir/visit.go @@ -0,0 +1,273 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// IR visitors for walking the IR tree. +// +// The lowest level helpers are DoChildren and EditChildren, +// which nodes help implement (TODO(rsc): eventually) and +// provide control over whether and when recursion happens +// during the walk of the IR. +// +// Although these are both useful directly, two simpler patterns +// are fairly common and also provided: Inspect and Scan. + +package ir + +import "errors" + +// DoChildren calls do(x) on each of n's non-nil child nodes x. +// If any call returns a non-nil error, DoChildren stops and returns that error. +// Otherwise, DoChildren returns nil. +// +// Note that DoChildren(n, do) only calls do(x) for n's immediate children. +// If x's children should be processed, then do(x) must call DoChildren(x, do). +// +// DoChildren allows constructing general traversals of the IR graph +// that can stop early if needed. The most general usage is: +// +// var do func(ir.Node) error +// do = func(x ir.Node) error { +// ... processing BEFORE visting children ... +// if ... should visit children ... { +// ir.DoChildren(x, do) +// ... processing AFTER visting children ... +// } +// if ... should stop parent DoChildren call from visiting siblings ... { +// return non-nil error +// } +// return nil +// } +// do(root) +// +// Since DoChildren does not generate any errors itself, if the do function +// never wants to stop the traversal, it can assume that DoChildren itself +// will always return nil, simplifying to: +// +// var do func(ir.Node) error +// do = func(x ir.Node) error { +// ... processing BEFORE visting children ... +// if ... should visit children ... { +// ir.DoChildren(x, do) +// } +// ... processing AFTER visting children ... +// return nil +// } +// do(root) +// +// The Inspect function illustrates a further simplification of the pattern, +// only considering processing before visiting children, and letting +// that processing decide whether children are visited at all: +// +// func Inspect(n ir.Node, inspect func(ir.Node) bool) { +// var do func(ir.Node) error +// do = func(x ir.Node) error { +// if inspect(x) { +// ir.DoChildren(x, do) +// } +// return nil +// } +// if n != nil { +// do(n) +// } +// } +// +// The Find function illustrates a different simplification of the pattern, +// visiting each node and then its children, recursively, until finding +// a node x such that find(x) returns a non-nil result, +// at which point the entire traversal stops: +// +// func Find(n ir.Node, find func(ir.Node) interface{}) interface{} { +// stop := errors.New("stop") +// var found interface{} +// var do func(ir.Node) error +// do = func(x ir.Node) error { +// if v := find(x); v != nil { +// found = v +// return stop +// } +// return DoChildren(x, do) +// } +// do(n) +// return found +// } +// +// Inspect and Find are presented above as examples of how to use +// DoChildren effectively, but of course, usage that fits within the +// simplifications captured by Inspect or Find will be best served +// by directly calling the ones provided by this package. +func DoChildren(n Node, do func(Node) error) error { + if n == nil { + return nil + } + if err := DoList(n.Init(), do); err != nil { + return err + } + if l := n.Left(); l != nil { + if err := do(l); err != nil { + return err + } + } + if r := n.Right(); r != nil { + if err := do(r); err != nil { + return err + } + } + if err := DoList(n.List(), do); err != nil { + return err + } + if err := DoList(n.Body(), do); err != nil { + return err + } + if err := DoList(n.Rlist(), do); err != nil { + return err + } + return nil +} + +// DoList calls f on each non-nil node x in the list, in list order. +// If any call returns a non-nil error, DoList stops and returns that error. +// Otherwise DoList returns nil. +// +// Note that DoList only calls do on the nodes in the list, not their children. +// If x's children should be processed, do(x) must call DoChildren(x, do) itself. +func DoList(list Nodes, do func(Node) error) error { + for _, x := range list.Slice() { + if x != nil { + if err := do(x); err != nil { + return err + } + } + } + return nil +} + +// Inspect visits each node x in the IR tree rooted at n +// in a depth-first preorder traversal, calling inspect on each node visited. +// If inspect(x) returns false, then Inspect skips over x's children. +// +// Note that the meaning of the boolean result in the callback function +// passed to Inspect differs from that of Scan. +// During Scan, if scan(x) returns false, then Scan stops the scan. +// During Inspect, if inspect(x) returns false, then Inspect skips x's children +// but continues with the remainder of the tree (x's siblings and so on). +func Inspect(n Node, inspect func(Node) bool) { + var do func(Node) error + do = func(x Node) error { + if inspect(x) { + DoChildren(x, do) + } + return nil + } + if n != nil { + do(n) + } +} + +// InspectList calls Inspect(x, inspect) for each node x in the list. +func InspectList(list Nodes, inspect func(Node) bool) { + for _, x := range list.Slice() { + Inspect(x, inspect) + } +} + +var stop = errors.New("stop") + +// Find looks for a non-nil node x in the IR tree rooted at n +// for which find(x) returns a non-nil value. +// Find considers nodes in a depth-first, preorder traversal. +// When Find finds a node x such that find(x) != nil, +// Find ends the traversal and returns the value of find(x) immediately. +// Otherwise Find returns nil. +func Find(n Node, find func(Node) interface{}) interface{} { + if n == nil { + return nil + } + var found interface{} + var do func(Node) error + do = func(x Node) error { + if v := find(x); v != nil { + found = v + return stop + } + return DoChildren(x, do) + } + do(n) + return found +} + +// FindList calls Find(x, ok) for each node x in the list, in order. +// If any call find(x) returns a non-nil result, FindList stops and +// returns that result, skipping the remainder of the list. +// Otherwise FindList returns nil. +func FindList(list Nodes, find func(Node) interface{}) interface{} { + for _, x := range list.Slice() { + if v := Find(x, find); v != nil { + return v + } + } + return nil +} + +// EditChildren edits the child nodes of n, replacing each child x with edit(x). +// +// Note that EditChildren(n, edit) only calls edit(x) for n's immediate children. +// If x's children should be processed, then edit(x) must call EditChildren(x, edit). +// +// EditChildren allows constructing general editing passes of the IR graph. +// The most general usage is: +// +// var edit func(ir.Node) ir.Node +// edit = func(x ir.Node) ir.Node { +// ... processing BEFORE editing children ... +// if ... should edit children ... { +// EditChildren(x, edit) +// ... processing AFTER editing children ... +// } +// ... return x ... +// } +// n = edit(n) +// +// EditChildren edits the node in place. To edit a copy, call Copy first. +// As an example, a simple deep copy implementation would be: +// +// func deepCopy(n ir.Node) ir.Node { +// var edit func(ir.Node) ir.Node +// edit = func(x ir.Node) ir.Node { +// x = ir.Copy(x) +// ir.EditChildren(x, edit) +// return x +// } +// return edit(n) +// } +// +// Of course, in this case it is better to call ir.DeepCopy than to build one anew. +func EditChildren(n Node, edit func(Node) Node) { + if n == nil { + return + } + editList(n.Init(), edit) + if l := n.Left(); l != nil { + n.SetLeft(edit(l)) + } + if r := n.Right(); r != nil { + n.SetRight(edit(r)) + } + editList(n.List(), edit) + editList(n.Body(), edit) + editList(n.Rlist(), edit) +} + +// editList calls edit on each non-nil node x in the list, +// saving the result of edit back into the list. +// +// Note that editList only calls edit on the nodes in the list, not their children. +// If x's children should be processed, edit(x) must call EditChildren(x, edit) itself. +func editList(list Nodes, edit func(Node) Node) { + s := list.Slice() + for i, x := range list.Slice() { + if x != nil { + s[i] = edit(x) + } + } +} From b9df26d7a86e0b402f4ae5fd0cb44bab46b6331e Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 2 Dec 2020 20:18:47 -0500 Subject: [PATCH 108/474] [dev.regabi] cmd/compile: use ir.Find for "search" traversals This CL converts all the generic searching traversal to use ir.Find instead of relying on direct access to Left, Right, and so on. Passes buildall w/ toolstash -cmp. Change-Id: I4d951aef630c00bf333f24be79565cc564694d04 Reviewed-on: https://go-review.googlesource.com/c/go/+/275372 Trust: Russ Cox Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/alg.go | 37 +---- src/cmd/compile/internal/gc/const.go | 72 ++++----- src/cmd/compile/internal/gc/inl.go | 192 +++++++++-------------- src/cmd/compile/internal/gc/order.go | 9 +- src/cmd/compile/internal/gc/sinit.go | 6 +- src/cmd/compile/internal/gc/typecheck.go | 78 ++++----- src/cmd/compile/internal/gc/walk.go | 180 ++++++++++----------- 7 files changed, 240 insertions(+), 334 deletions(-) diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index b2716399a5535..c786a27415c38 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -782,37 +782,14 @@ func geneq(t *types.Type) *obj.LSym { return closure } -func hasCall(n ir.Node) bool { - if n.Op() == ir.OCALL || n.Op() == ir.OCALLFUNC { - return true - } - if n.Left() != nil && hasCall(n.Left()) { - return true - } - if n.Right() != nil && hasCall(n.Right()) { - return true - } - for _, x := range n.Init().Slice() { - if hasCall(x) { - return true - } - } - for _, x := range n.Body().Slice() { - if hasCall(x) { - return true - } - } - for _, x := range n.List().Slice() { - if hasCall(x) { - return true +func hasCall(fn *ir.Func) bool { + found := ir.Find(fn, func(n ir.Node) interface{} { + if op := n.Op(); op == ir.OCALL || op == ir.OCALLFUNC { + return n } - } - for _, x := range n.Rlist().Slice() { - if hasCall(x) { - return true - } - } - return false + return nil + }) + return found != nil } // eqfield returns the node diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index 9aa65f97b6fe7..6cd414a41931a 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -553,7 +553,7 @@ func evalConst(n ir.Node) ir.Node { return origIntConst(n, int64(len(ir.StringVal(nl)))) } case types.TARRAY: - if !hascallchan(nl) { + if !hasCallOrChan(nl) { return origIntConst(n, nl.Type().NumElem()) } } @@ -779,49 +779,35 @@ func isGoConst(n ir.Node) bool { return n.Op() == ir.OLITERAL } -func hascallchan(n ir.Node) bool { - if n == nil { - return false - } - switch n.Op() { - case ir.OAPPEND, - ir.OCALL, - ir.OCALLFUNC, - ir.OCALLINTER, - ir.OCALLMETH, - ir.OCAP, - ir.OCLOSE, - ir.OCOMPLEX, - ir.OCOPY, - ir.ODELETE, - ir.OIMAG, - ir.OLEN, - ir.OMAKE, - ir.ONEW, - ir.OPANIC, - ir.OPRINT, - ir.OPRINTN, - ir.OREAL, - ir.ORECOVER, - ir.ORECV: - return true - } - - if hascallchan(n.Left()) || hascallchan(n.Right()) { - return true - } - for _, n1 := range n.List().Slice() { - if hascallchan(n1) { - return true - } - } - for _, n2 := range n.Rlist().Slice() { - if hascallchan(n2) { - return true +// hasCallOrChan reports whether n contains any calls or channel operations. +func hasCallOrChan(n ir.Node) bool { + found := ir.Find(n, func(n ir.Node) interface{} { + switch n.Op() { + case ir.OAPPEND, + ir.OCALL, + ir.OCALLFUNC, + ir.OCALLINTER, + ir.OCALLMETH, + ir.OCAP, + ir.OCLOSE, + ir.OCOMPLEX, + ir.OCOPY, + ir.ODELETE, + ir.OIMAG, + ir.OLEN, + ir.OMAKE, + ir.ONEW, + ir.OPANIC, + ir.OPRINT, + ir.OPRINTN, + ir.OREAL, + ir.ORECOVER, + ir.ORECV: + return n } - } - - return false + return nil + }) + return found != nil } // A constSet represents a set of Go constant expressions. diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index efd6fea844538..09ec0b6f99a11 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -33,6 +33,7 @@ import ( "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/src" + "errors" "fmt" "go/constant" "strings" @@ -206,14 +207,10 @@ func caninl(fn *ir.Func) { extraCallCost: cc, usedLocals: make(map[ir.Node]bool), } - if visitor.visitList(fn.Body()) { + if visitor.tooHairy(fn) { reason = visitor.reason return } - if visitor.budget < 0 { - reason = fmt.Sprintf("function too complex: cost %d exceeds budget %d", inlineMaxBudget-visitor.budget, inlineMaxBudget) - return - } n.Func().Inl = &ir.Inline{ Cost: inlineMaxBudget - visitor.budget, @@ -296,21 +293,29 @@ type hairyVisitor struct { reason string extraCallCost int32 usedLocals map[ir.Node]bool + do func(ir.Node) error } -// Look for anything we want to punt on. -func (v *hairyVisitor) visitList(ll ir.Nodes) bool { - for _, n := range ll.Slice() { - if v.visit(n) { - return true - } +var errBudget = errors.New("too expensive") + +func (v *hairyVisitor) tooHairy(fn *ir.Func) bool { + v.do = v.doNode // cache closure + + err := ir.DoChildren(fn, v.do) + if err != nil { + v.reason = err.Error() + return true + } + if v.budget < 0 { + v.reason = fmt.Sprintf("function too complex: cost %d exceeds budget %d", inlineMaxBudget-v.budget, inlineMaxBudget) + return true } return false } -func (v *hairyVisitor) visit(n ir.Node) bool { +func (v *hairyVisitor) doNode(n ir.Node) error { if n == nil { - return false + return nil } switch n.Op() { @@ -323,8 +328,7 @@ func (v *hairyVisitor) visit(n ir.Node) bool { if n.Left().Op() == ir.ONAME && n.Left().Class() == ir.PFUNC && isRuntimePkg(n.Left().Sym().Pkg) { fn := n.Left().Sym().Name if fn == "getcallerpc" || fn == "getcallersp" { - v.reason = "call to " + fn - return true + return errors.New("call to " + fn) } if fn == "throw" { v.budget -= inlineExtraThrowCost @@ -380,8 +384,7 @@ func (v *hairyVisitor) visit(n ir.Node) bool { case ir.ORECOVER: // recover matches the argument frame pointer to find // the right panic value, so it needs an argument frame. - v.reason = "call to recover" - return true + return errors.New("call to recover") case ir.OCLOSURE, ir.ORANGE, @@ -390,21 +393,19 @@ func (v *hairyVisitor) visit(n ir.Node) bool { ir.ODEFER, ir.ODCLTYPE, // can't print yet ir.ORETJMP: - v.reason = "unhandled op " + n.Op().String() - return true + return errors.New("unhandled op " + n.Op().String()) case ir.OAPPEND: v.budget -= inlineExtraAppendCost case ir.ODCLCONST, ir.OFALL: // These nodes don't produce code; omit from inlining budget. - return false + return nil case ir.OFOR, ir.OFORUNTIL, ir.OSWITCH: // ORANGE, OSELECT in "unhandled" above if n.Sym() != nil { - v.reason = "labeled control" - return true + return errors.New("labeled control") } case ir.OBREAK, ir.OCONTINUE: @@ -416,8 +417,17 @@ func (v *hairyVisitor) visit(n ir.Node) bool { case ir.OIF: if ir.IsConst(n.Left(), constant.Bool) { // This if and the condition cost nothing. - return v.visitList(n.Init()) || v.visitList(n.Body()) || - v.visitList(n.Rlist()) + // TODO(rsc): It seems strange that we visit the dead branch. + if err := ir.DoList(n.Init(), v.do); err != nil { + return err + } + if err := ir.DoList(n.Body(), v.do); err != nil { + return err + } + if err := ir.DoList(n.Rlist(), v.do); err != nil { + return err + } + return nil } case ir.ONAME: @@ -439,34 +449,22 @@ func (v *hairyVisitor) visit(n ir.Node) bool { // When debugging, don't stop early, to get full cost of inlining this function if v.budget < 0 && base.Flag.LowerM < 2 && !logopt.Enabled() { - return true + return errBudget } - return v.visit(n.Left()) || v.visit(n.Right()) || - v.visitList(n.List()) || v.visitList(n.Rlist()) || - v.visitList(n.Init()) || v.visitList(n.Body()) + return ir.DoChildren(n, v.do) } -func countNodes(n ir.Node) int { - if n == nil { - return 0 - } - cnt := 1 - cnt += countNodes(n.Left()) - cnt += countNodes(n.Right()) - for _, n1 := range n.Init().Slice() { - cnt += countNodes(n1) - } - for _, n1 := range n.Body().Slice() { - cnt += countNodes(n1) - } - for _, n1 := range n.List().Slice() { - cnt += countNodes(n1) - } - for _, n1 := range n.Rlist().Slice() { - cnt += countNodes(n1) - } - return cnt +func isBigFunc(fn *ir.Func) bool { + budget := inlineBigFunctionNodes + over := ir.Find(fn, func(n ir.Node) interface{} { + budget-- + if budget <= 0 { + return n + } + return nil + }) + return over != nil } // Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any @@ -475,7 +473,7 @@ func inlcalls(fn *ir.Func) { savefn := Curfn Curfn = fn maxCost := int32(inlineMaxBudget) - if countNodes(fn) >= inlineBigFunctionNodes { + if isBigFunc(fn) { maxCost = inlineBigFunctionMaxCost } // Map to keep track of functions that have been inlined at a particular @@ -742,82 +740,45 @@ FindRHS: base.Fatalf("RHS is nil: %v", defn) } - unsafe, _ := reassigned(n.(*ir.Name)) - if unsafe { + if reassigned(n.(*ir.Name)) { return nil } return rhs } +var errFound = errors.New("found") + // reassigned takes an ONAME node, walks the function in which it is defined, and returns a boolean // indicating whether the name has any assignments other than its declaration. // The second return value is the first such assignment encountered in the walk, if any. It is mostly // useful for -m output documenting the reason for inhibited optimizations. // NB: global variables are always considered to be re-assigned. // TODO: handle initial declaration not including an assignment and followed by a single assignment? -func reassigned(n *ir.Name) (bool, ir.Node) { - if n.Op() != ir.ONAME { - base.Fatalf("reassigned %v", n) +func reassigned(name *ir.Name) bool { + if name.Op() != ir.ONAME { + base.Fatalf("reassigned %v", name) } // no way to reliably check for no-reassignment of globals, assume it can be - if n.Curfn == nil { - return true, nil - } - f := n.Curfn - v := reassignVisitor{name: n} - a := v.visitList(f.Body()) - return a != nil, a -} - -type reassignVisitor struct { - name ir.Node -} - -func (v *reassignVisitor) visit(n ir.Node) ir.Node { - if n == nil { - return nil + if name.Curfn == nil { + return true } - switch n.Op() { - case ir.OAS: - if n.Left() == v.name && n != v.name.Name().Defn { - return n - } - case ir.OAS2, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2DOTTYPE: - for _, p := range n.List().Slice() { - if p == v.name && n != v.name.Name().Defn { + a := ir.Find(name.Curfn, func(n ir.Node) interface{} { + switch n.Op() { + case ir.OAS: + if n.Left() == name && n != name.Defn { return n } + case ir.OAS2, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2DOTTYPE: + for _, p := range n.List().Slice() { + if p == name && n != name.Defn { + return n + } + } } - } - if a := v.visit(n.Left()); a != nil { - return a - } - if a := v.visit(n.Right()); a != nil { - return a - } - if a := v.visitList(n.List()); a != nil { - return a - } - if a := v.visitList(n.Rlist()); a != nil { - return a - } - if a := v.visitList(n.Init()); a != nil { - return a - } - if a := v.visitList(n.Body()); a != nil { - return a - } - return nil -} - -func (v *reassignVisitor) visitList(l ir.Nodes) ir.Node { - for _, n := range l.Slice() { - if a := v.visit(n); a != nil { - return a - } - } - return nil + return nil + }) + return a != nil } func inlParam(t *types.Field, as ir.Node, inlvars map[*ir.Name]ir.Node) ir.Node { @@ -1140,6 +1101,7 @@ func mkinlcall(n ir.Node, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool) bases: make(map[*src.PosBase]*src.PosBase), newInlIndex: newIndex, } + subst.edit = subst.node body := subst.list(ir.AsNodes(fn.Inl.Body)) @@ -1248,6 +1210,8 @@ type inlsubst struct { // newInlIndex is the index of the inlined call frame to // insert for inlined nodes. newInlIndex int + + edit func(ir.Node) ir.Node // cached copy of subst.node method value closure } // list inlines a list of nodes. @@ -1334,21 +1298,13 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { return m } - m := ir.Copy(n) - m.SetPos(subst.updatedPos(m.Pos())) - m.PtrInit().Set(nil) - if n.Op() == ir.OCLOSURE { base.Fatalf("cannot inline function containing closure: %+v", n) } - m.SetLeft(subst.node(n.Left())) - m.SetRight(subst.node(n.Right())) - m.PtrList().Set(subst.list(n.List())) - m.PtrRlist().Set(subst.list(n.Rlist())) - m.PtrInit().Set(append(m.Init().Slice(), subst.list(n.Init())...)) - m.PtrBody().Set(subst.list(n.Body())) - + m := ir.Copy(n) + m.SetPos(subst.updatedPos(m.Pos())) + ir.EditChildren(m, subst.edit) return m } diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 5440806e8ecb7..1680d9d920781 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -1062,6 +1062,10 @@ func (o *Order) exprListInPlace(l ir.Nodes) { // prealloc[x] records the allocation to use for x. var prealloc = map[ir.Node]ir.Node{} +func (o *Order) exprNoLHS(n ir.Node) ir.Node { + return o.expr(n, nil) +} + // expr orders a single expression, appending side // effects to o.out as needed. // If this is part of an assignment lhs = *np, lhs is given. @@ -1079,10 +1083,7 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node { switch n.Op() { default: - n.SetLeft(o.expr(n.Left(), nil)) - n.SetRight(o.expr(n.Right(), nil)) - o.exprList(n.List()) - o.exprList(n.Rlist()) + ir.EditChildren(n, o.exprNoLHS) // Addition of strings turns into a function call. // Allocate a temporary to hold the strings. diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 3ef976d8aa7d3..20abbfef8cf9c 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -60,7 +60,8 @@ func (s *InitSchedule) tryStaticInit(n ir.Node) bool { if n.Op() != ir.OAS { return false } - if ir.IsBlank(n.Left()) && candiscard(n.Right()) { + if ir.IsBlank(n.Left()) && !hasSideEffects(n.Right()) { + // Discard. return true } lno := setlineno(n) @@ -548,7 +549,8 @@ func fixedlit(ctxt initContext, kind initKind, n ir.Node, var_ ir.Node, init *ir for _, r := range n.List().Slice() { a, value := splitnode(r) - if a == ir.BlankNode && candiscard(value) { + if a == ir.BlankNode && !hasSideEffects(value) { + // Discard. continue } diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index e2100481aa6fa..a8acd468c9f6b 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -3669,51 +3669,52 @@ func checkmake(t *types.Type, arg string, np *ir.Node) bool { return true } -func markbreak(labels *map[*types.Sym]ir.Node, n ir.Node, implicit ir.Node) { - if n == nil { - return - } +// markBreak marks control statements containing break statements with SetHasBreak(true). +func markBreak(fn *ir.Func) { + var labels map[*types.Sym]ir.Node + var implicit ir.Node - switch n.Op() { - case ir.OBREAK: - if n.Sym() == nil { - if implicit != nil { - implicit.SetHasBreak(true) + var mark func(ir.Node) error + mark = func(n ir.Node) error { + switch n.Op() { + default: + ir.DoChildren(n, mark) + + case ir.OBREAK: + if n.Sym() == nil { + if implicit != nil { + implicit.SetHasBreak(true) + } + } else { + if lab := labels[n.Sym()]; lab != nil { + lab.SetHasBreak(true) + } } - } else { - if lab := (*labels)[n.Sym()]; lab != nil { - lab.SetHasBreak(true) + + case ir.OFOR, ir.OFORUNTIL, ir.OSWITCH, ir.OTYPESW, ir.OSELECT, ir.ORANGE: + old := implicit + implicit = n + sym := n.Sym() + if sym != nil { + if labels == nil { + // Map creation delayed until we need it - most functions don't. + labels = make(map[*types.Sym]ir.Node) + } + labels[sym] = n } - } - case ir.OFOR, ir.OFORUNTIL, ir.OSWITCH, ir.OTYPESW, ir.OSELECT, ir.ORANGE: - implicit = n - if sym := n.Sym(); sym != nil { - if *labels == nil { - // Map creation delayed until we need it - most functions don't. - *labels = make(map[*types.Sym]ir.Node) + ir.DoChildren(n, mark) + if sym != nil { + delete(labels, sym) } - (*labels)[sym] = n - defer delete(*labels, sym) + implicit = old } - fallthrough - default: - markbreak(labels, n.Left(), implicit) - markbreak(labels, n.Right(), implicit) - markbreaklist(labels, n.Init(), implicit) - markbreaklist(labels, n.Body(), implicit) - markbreaklist(labels, n.List(), implicit) - markbreaklist(labels, n.Rlist(), implicit) + return nil } -} -func markbreaklist(labels *map[*types.Sym]ir.Node, l ir.Nodes, implicit ir.Node) { - s := l.Slice() - for i := 0; i < len(s); i++ { - markbreak(labels, s[i], implicit) - } + mark(fn) } -// isterminating reports whether the Nodes list ends with a terminating statement. +// isTermNodes reports whether the Nodes list ends with a terminating statement. func isTermNodes(l ir.Nodes) bool { s := l.Slice() c := len(s) @@ -3723,7 +3724,7 @@ func isTermNodes(l ir.Nodes) bool { return isTermNode(s[c-1]) } -// Isterminating reports whether the node n, the last one in a +// isTermNode reports whether the node n, the last one in a // statement list, is a terminating statement. func isTermNode(n ir.Node) bool { switch n.Op() { @@ -3776,8 +3777,7 @@ func isTermNode(n ir.Node) bool { // checkreturn makes sure that fn terminates appropriately. func checkreturn(fn *ir.Func) { if fn.Type().NumResults() != 0 && fn.Body().Len() != 0 { - var labels map[*types.Sym]ir.Node - markbreaklist(&labels, fn.Body(), nil) + markBreak(fn) if !isTermNodes(fn.Body()) { base.ErrorfAt(fn.Endlineno, "missing return at end of function") } diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 3d22c66d901c5..bbc08ab953249 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -3786,107 +3786,91 @@ func usefield(n ir.Node) { Curfn.FieldTrack[sym] = struct{}{} } -func candiscardlist(l ir.Nodes) bool { - for _, n := range l.Slice() { - if !candiscard(n) { - return false - } - } - return true -} - -func candiscard(n ir.Node) bool { - if n == nil { - return true - } - - switch n.Op() { - default: - return false - - // Discardable as long as the subpieces are. - case ir.ONAME, - ir.ONONAME, - ir.OTYPE, - ir.OPACK, - ir.OLITERAL, - ir.ONIL, - ir.OADD, - ir.OSUB, - ir.OOR, - ir.OXOR, - ir.OADDSTR, - ir.OADDR, - ir.OANDAND, - ir.OBYTES2STR, - ir.ORUNES2STR, - ir.OSTR2BYTES, - ir.OSTR2RUNES, - ir.OCAP, - ir.OCOMPLIT, - ir.OMAPLIT, - ir.OSTRUCTLIT, - ir.OARRAYLIT, - ir.OSLICELIT, - ir.OPTRLIT, - ir.OCONV, - ir.OCONVIFACE, - ir.OCONVNOP, - ir.ODOT, - ir.OEQ, - ir.ONE, - ir.OLT, - ir.OLE, - ir.OGT, - ir.OGE, - ir.OKEY, - ir.OSTRUCTKEY, - ir.OLEN, - ir.OMUL, - ir.OLSH, - ir.ORSH, - ir.OAND, - ir.OANDNOT, - ir.ONEW, - ir.ONOT, - ir.OBITNOT, - ir.OPLUS, - ir.ONEG, - ir.OOROR, - ir.OPAREN, - ir.ORUNESTR, - ir.OREAL, - ir.OIMAG, - ir.OCOMPLEX: - break +// hasSideEffects reports whether n contains any operations that could have observable side effects. +func hasSideEffects(n ir.Node) bool { + found := ir.Find(n, func(n ir.Node) interface{} { + switch n.Op() { + // Assume side effects unless we know otherwise. + default: + return n + + // No side effects here (arguments are checked separately). + case ir.ONAME, + ir.ONONAME, + ir.OTYPE, + ir.OPACK, + ir.OLITERAL, + ir.ONIL, + ir.OADD, + ir.OSUB, + ir.OOR, + ir.OXOR, + ir.OADDSTR, + ir.OADDR, + ir.OANDAND, + ir.OBYTES2STR, + ir.ORUNES2STR, + ir.OSTR2BYTES, + ir.OSTR2RUNES, + ir.OCAP, + ir.OCOMPLIT, + ir.OMAPLIT, + ir.OSTRUCTLIT, + ir.OARRAYLIT, + ir.OSLICELIT, + ir.OPTRLIT, + ir.OCONV, + ir.OCONVIFACE, + ir.OCONVNOP, + ir.ODOT, + ir.OEQ, + ir.ONE, + ir.OLT, + ir.OLE, + ir.OGT, + ir.OGE, + ir.OKEY, + ir.OSTRUCTKEY, + ir.OLEN, + ir.OMUL, + ir.OLSH, + ir.ORSH, + ir.OAND, + ir.OANDNOT, + ir.ONEW, + ir.ONOT, + ir.OBITNOT, + ir.OPLUS, + ir.ONEG, + ir.OOROR, + ir.OPAREN, + ir.ORUNESTR, + ir.OREAL, + ir.OIMAG, + ir.OCOMPLEX: + return nil + + // Only possible side effect is division by zero. + case ir.ODIV, ir.OMOD: + if n.Right().Op() != ir.OLITERAL || constant.Sign(n.Right().Val()) == 0 { + return n + } - // Discardable as long as we know it's not division by zero. - case ir.ODIV, ir.OMOD: - if n.Right().Op() == ir.OLITERAL && constant.Sign(n.Right().Val()) != 0 { - break - } - return false + // Only possible side effect is panic on invalid size, + // but many makechan and makemap use size zero, which is definitely OK. + case ir.OMAKECHAN, ir.OMAKEMAP: + if !ir.IsConst(n.Left(), constant.Int) || constant.Sign(n.Left().Val()) != 0 { + return n + } - // Discardable as long as we know it won't fail because of a bad size. - case ir.OMAKECHAN, ir.OMAKEMAP: - if ir.IsConst(n.Left(), constant.Int) && constant.Sign(n.Left().Val()) == 0 { - break + // Only possible side effect is panic on invalid size. + // TODO(rsc): Merge with previous case (probably breaks toolstash -cmp). + case ir.OMAKESLICE, ir.OMAKESLICECOPY: + return n } - return false - - // Difficult to tell what sizes are okay. - case ir.OMAKESLICE: - return false - - case ir.OMAKESLICECOPY: - return false - } - - if !candiscard(n.Left()) || !candiscard(n.Right()) || !candiscardlist(n.Init()) || !candiscardlist(n.Body()) || !candiscardlist(n.List()) || !candiscardlist(n.Rlist()) { - return false - } - - return true + return nil + }) + return found != nil } // Rewrite From d855b30fe48fe108921733c8d86e42063a5c601f Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 3 Dec 2020 14:06:41 -0500 Subject: [PATCH 109/474] [dev.regabi] cmd/compile: use ir.EditChildren for inline rewriting This CL rephrases the general inlining rewriter in terms of ir.EditChildren. It is the final part of the code that was processing arbitrary nodes using Left, SetLeft, and so on. After this CL, there should be none left except for the implementations of DoChildren and EditChildren, which fall next. Passes buildall w/ toolstash -cmp. Change-Id: I9c36053360cd040710716f0b39397a80114be713 Reviewed-on: https://go-review.googlesource.com/c/go/+/275373 Trust: Russ Cox Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/escape.go | 1 + src/cmd/compile/internal/gc/inl.go | 103 ++++++----------------- src/cmd/compile/internal/gc/typecheck.go | 5 ++ src/cmd/compile/internal/ir/expr.go | 12 +++ 4 files changed, 46 insertions(+), 75 deletions(-) diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index 622edb98203c9..32bc7b297b0c4 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -803,6 +803,7 @@ func (e *Escape) call(ks []EscHole, call, where ir.Node) { switch call.Op() { default: + ir.Dump("esc", call) base.Fatalf("unexpected call op: %v", call.Op()) case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER: diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 09ec0b6f99a11..840285242404c 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -483,10 +483,11 @@ func inlcalls(fn *ir.Func) { // Most likely, the inlining will stop before we even hit the beginning of // the cycle again, but the map catches the unusual case. inlMap := make(map[*ir.Func]bool) - fn = inlnode(fn, maxCost, inlMap).(*ir.Func) - if fn != Curfn { - base.Fatalf("inlnode replaced curfn") + var edit func(ir.Node) ir.Node + edit = func(n ir.Node) ir.Node { + return inlnode(n, maxCost, inlMap, edit) } + ir.EditChildren(fn, edit) Curfn = savefn } @@ -521,13 +522,6 @@ func inlconv2list(n ir.Node) []ir.Node { return s } -func inlnodelist(l ir.Nodes, maxCost int32, inlMap map[*ir.Func]bool) { - s := l.Slice() - for i := range s { - s[i] = inlnode(s[i], maxCost, inlMap) - } -} - // inlnode recurses over the tree to find inlineable calls, which will // be turned into OINLCALLs by mkinlcall. When the recursion comes // back up will examine left, right, list, rlist, ninit, ntest, nincr, @@ -541,7 +535,7 @@ func inlnodelist(l ir.Nodes, maxCost int32, inlMap map[*ir.Func]bool) { // shorter and less complicated. // The result of inlnode MUST be assigned back to n, e.g. // n.Left = inlnode(n.Left) -func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool) ir.Node { +func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.Node) ir.Node) ir.Node { if n == nil { return n } @@ -567,49 +561,7 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool) ir.Node { lno := setlineno(n) - inlnodelist(n.Init(), maxCost, inlMap) - init := n.Init().Slice() - for i, n1 := range init { - if n1.Op() == ir.OINLCALL { - init[i] = inlconv2stmt(n1) - } - } - - n.SetLeft(inlnode(n.Left(), maxCost, inlMap)) - if n.Left() != nil && n.Left().Op() == ir.OINLCALL { - n.SetLeft(inlconv2expr(n.Left())) - } - - n.SetRight(inlnode(n.Right(), maxCost, inlMap)) - if n.Right() != nil && n.Right().Op() == ir.OINLCALL { - if n.Op() == ir.OFOR || n.Op() == ir.OFORUNTIL { - n.SetRight(inlconv2stmt(n.Right())) - } else { - n.SetRight(inlconv2expr(n.Right())) - } - } - - inlnodelist(n.List(), maxCost, inlMap) - s := n.List().Slice() - convert := inlconv2expr - if n.Op() == ir.OBLOCK { - convert = inlconv2stmt - } - for i, n1 := range s { - if n1 != nil && n1.Op() == ir.OINLCALL { - s[i] = convert(n1) - } - } - - inlnodelist(n.Body(), maxCost, inlMap) - s = n.Body().Slice() - for i, n1 := range s { - if n1.Op() == ir.OINLCALL { - s[i] = inlconv2stmt(n1) - } - } - - inlnodelist(n.Rlist(), maxCost, inlMap) + ir.EditChildren(n, edit) if n.Op() == ir.OAS2FUNC && n.Rlist().First().Op() == ir.OINLCALL { n.PtrRlist().Set(inlconv2list(n.Rlist().First())) @@ -618,17 +570,6 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool) ir.Node { n = typecheck(n, ctxStmt) } - s = n.Rlist().Slice() - for i, n1 := range s { - if n1.Op() == ir.OINLCALL { - if n.Op() == ir.OIF { - s[i] = inlconv2stmt(n1) - } else { - s[i] = inlconv2expr(n1) - } - } - } - // with all the branches out of the way, it is now time to // transmogrify this node itself unless inhibited by the // switch at the top of this function. @@ -639,8 +580,10 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool) ir.Node { } } + var call ir.Node switch n.Op() { case ir.OCALLFUNC: + call = n if base.Flag.LowerM > 3 { fmt.Printf("%v:call to func %+v\n", ir.Line(n), n.Left()) } @@ -648,10 +591,11 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool) ir.Node { break } if fn := inlCallee(n.Left()); fn != nil && fn.Inl != nil { - n = mkinlcall(n, fn, maxCost, inlMap) + n = mkinlcall(n, fn, maxCost, inlMap, edit) } case ir.OCALLMETH: + call = n if base.Flag.LowerM > 3 { fmt.Printf("%v:call to meth %L\n", ir.Line(n), n.Left().Right()) } @@ -661,10 +605,25 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool) ir.Node { base.Fatalf("no function type for [%p] %+v\n", n.Left(), n.Left()) } - n = mkinlcall(n, methodExprName(n.Left()).Func(), maxCost, inlMap) + n = mkinlcall(n, methodExprName(n.Left()).Func(), maxCost, inlMap, edit) } base.Pos = lno + + if n.Op() == ir.OINLCALL { + switch call.(*ir.CallExpr).Use { + default: + ir.Dump("call", call) + base.Fatalf("call missing use") + case ir.CallUseExpr: + n = inlconv2expr(n) + case ir.CallUseStmt: + n = inlconv2stmt(n) + case ir.CallUseList: + // leave for caller to convert + } + } + return n } @@ -805,7 +764,7 @@ var inlgen int // parameters. // The result of mkinlcall MUST be assigned back to n, e.g. // n.Left = mkinlcall(n.Left, fn, isddd) -func mkinlcall(n ir.Node, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool) ir.Node { +func mkinlcall(n ir.Node, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.Node) ir.Node) ir.Node { if fn.Inl == nil { if logopt.Enabled() { logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(Curfn), @@ -1131,13 +1090,7 @@ func mkinlcall(n ir.Node, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool) // instead we emit the things that the body needs // and each use must redo the inlining. // luckily these are small. - inlnodelist(call.Body(), maxCost, inlMap) - s := call.Body().Slice() - for i, n1 := range s { - if n1.Op() == ir.OINLCALL { - s[i] = inlconv2stmt(n1) - } - } + ir.EditChildren(call, edit) if base.Flag.LowerM > 2 { fmt.Printf("%v: After inlining %+v\n\n", ir.Line(call), call) diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index a8acd468c9f6b..65c5f2abce006 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -1280,6 +1280,10 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { // call and call like case ir.OCALL: + n.(*ir.CallExpr).Use = ir.CallUseExpr + if top == ctxStmt { + n.(*ir.CallExpr).Use = ir.CallUseStmt + } typecheckslice(n.Init().Slice(), ctxStmt) // imported rewritten f(g()) calls (#30907) n.SetLeft(typecheck(n.Left(), ctxExpr|ctxType|ctxCallee)) if n.Left().Diag() { @@ -3294,6 +3298,7 @@ func typecheckas2(n ir.Node) { if cr != cl { goto mismatch } + r.(*ir.CallExpr).Use = ir.CallUseList n.SetOp(ir.OAS2FUNC) for i, l := range n.List().Slice() { f := r.Type().Field(i) diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 49543f4286f1e..9600d13d8e666 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -148,6 +148,17 @@ func (n *BinaryExpr) SetOp(op Op) { } } +// A CallUse records how the result of the call is used: +type CallUse int + +const ( + _ CallUse = iota + + CallUseExpr // single expression result is used + CallUseList // list of results are used + CallUseStmt // results not used - call is a statement +) + // A CallExpr is a function call X(Args). type CallExpr struct { miniExpr @@ -157,6 +168,7 @@ type CallExpr struct { Rargs Nodes // TODO(rsc): Delete. body Nodes // TODO(rsc): Delete. DDD bool + Use CallUse noInline bool } From 18f2df7e810ac221d05577b746f2bf4e3cd789f4 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 3 Dec 2020 18:43:18 -0500 Subject: [PATCH 110/474] [dev.regabi] cmd/compile: implement copy for nodes Put each node in charge of making copies of its own slices. This removes a generic use of Body, SetBody, and so on in func Copy, heading toward removing those even from being used in package ir. Passes buildall w/ toolstash -cmp. Change-Id: I249b7fe54cf72e9d2f0467b10f3f257abf9b29b9 Reviewed-on: https://go-review.googlesource.com/c/go/+/275374 Trust: Russ Cox Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/copy.go | 26 +- src/cmd/compile/internal/ir/expr.go | 404 ++++++++++++++++++---------- src/cmd/compile/internal/ir/func.go | 2 +- src/cmd/compile/internal/ir/name.go | 4 +- src/cmd/compile/internal/ir/node.go | 19 +- src/cmd/compile/internal/ir/stmt.go | 310 +++++++++++++-------- src/cmd/compile/internal/ir/type.go | 48 +++- 7 files changed, 532 insertions(+), 281 deletions(-) diff --git a/src/cmd/compile/internal/ir/copy.go b/src/cmd/compile/internal/ir/copy.go index 2f340df1abae2..8d174d6e5386c 100644 --- a/src/cmd/compile/internal/ir/copy.go +++ b/src/cmd/compile/internal/ir/copy.go @@ -43,7 +43,7 @@ func Orig(n Node) Node { // SepCopy returns a separate shallow copy of n, // breaking any Orig link to any other nodes. func SepCopy(n Node) Node { - n = n.rawCopy() + n = n.copy() if n, ok := n.(OrigNode); ok { n.SetOrig(n) } @@ -57,29 +57,11 @@ func SepCopy(n Node) Node { // The specific semantics surrounding Orig are subtle but right for most uses. // See issues #26855 and #27765 for pitfalls. func Copy(n Node) Node { - copy := n.rawCopy() + c := n.copy() if n, ok := n.(OrigNode); ok && n.Orig() == n { - copy.(OrigNode).SetOrig(copy) + c.(OrigNode).SetOrig(c) } - - // Copy lists so that updates to n.List[0] - // don't affect copy.List[0] and vice versa, - // same as updates to Left and Right. - // TODO(rsc): Eventually the Node implementations will need to do this. - if l := copy.List(); l.Len() > 0 { - copy.SetList(copyList(l)) - } - if l := copy.Rlist(); l.Len() > 0 { - copy.SetRlist(copyList(l)) - } - if l := copy.Init(); l.Len() > 0 { - copy.SetInit(copyList(l)) - } - if l := copy.Body(); l.Len() > 0 { - copy.SetBody(copyList(l)) - } - - return copy + return c } func copyList(x Nodes) Nodes { diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 9600d13d8e666..7431a56d944d0 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -76,10 +76,16 @@ func NewAddStringExpr(pos src.XPos, list []Node) *AddStringExpr { func (n *AddStringExpr) String() string { return fmt.Sprint(n) } func (n *AddStringExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *AddStringExpr) rawCopy() Node { c := *n; return &c } -func (n *AddStringExpr) List() Nodes { return n.list } -func (n *AddStringExpr) PtrList() *Nodes { return &n.list } -func (n *AddStringExpr) SetList(x Nodes) { n.list = x } +func (n *AddStringExpr) copy() Node { + c := *n + c.init = c.init.Copy() + c.list = c.list.Copy() + return &c +} + +func (n *AddStringExpr) List() Nodes { return n.list } +func (n *AddStringExpr) PtrList() *Nodes { return &n.list } +func (n *AddStringExpr) SetList(x Nodes) { n.list = x } // An AddrExpr is an address-of expression &X. // It may end up being a normal address-of or an allocation of a composite literal. @@ -98,11 +104,16 @@ func NewAddrExpr(pos src.XPos, x Node) *AddrExpr { func (n *AddrExpr) String() string { return fmt.Sprint(n) } func (n *AddrExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *AddrExpr) rawCopy() Node { c := *n; return &c } -func (n *AddrExpr) Left() Node { return n.X } -func (n *AddrExpr) SetLeft(x Node) { n.X = x } -func (n *AddrExpr) Right() Node { return n.Alloc } -func (n *AddrExpr) SetRight(x Node) { n.Alloc = x } +func (n *AddrExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} + +func (n *AddrExpr) Left() Node { return n.X } +func (n *AddrExpr) SetLeft(x Node) { n.X = x } +func (n *AddrExpr) Right() Node { return n.Alloc } +func (n *AddrExpr) SetRight(x Node) { n.Alloc = x } func (n *AddrExpr) SetOp(op Op) { switch op { @@ -130,11 +141,16 @@ func NewBinaryExpr(pos src.XPos, op Op, x, y Node) *BinaryExpr { func (n *BinaryExpr) String() string { return fmt.Sprint(n) } func (n *BinaryExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *BinaryExpr) rawCopy() Node { c := *n; return &c } -func (n *BinaryExpr) Left() Node { return n.X } -func (n *BinaryExpr) SetLeft(x Node) { n.X = x } -func (n *BinaryExpr) Right() Node { return n.Y } -func (n *BinaryExpr) SetRight(y Node) { n.Y = y } +func (n *BinaryExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} + +func (n *BinaryExpr) Left() Node { return n.X } +func (n *BinaryExpr) SetLeft(x Node) { n.X = x } +func (n *BinaryExpr) Right() Node { return n.Y } +func (n *BinaryExpr) SetRight(y Node) { n.Y = y } func (n *BinaryExpr) SetOp(op Op) { switch op { @@ -183,24 +199,32 @@ func NewCallExpr(pos src.XPos, fun Node, args []Node) *CallExpr { func (n *CallExpr) String() string { return fmt.Sprint(n) } func (n *CallExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *CallExpr) rawCopy() Node { c := *n; return &c } -func (n *CallExpr) Orig() Node { return n.orig } -func (n *CallExpr) SetOrig(x Node) { n.orig = x } -func (n *CallExpr) Left() Node { return n.X } -func (n *CallExpr) SetLeft(x Node) { n.X = x } -func (n *CallExpr) List() Nodes { return n.Args } -func (n *CallExpr) PtrList() *Nodes { return &n.Args } -func (n *CallExpr) SetList(x Nodes) { n.Args = x } -func (n *CallExpr) Rlist() Nodes { return n.Rargs } -func (n *CallExpr) PtrRlist() *Nodes { return &n.Rargs } -func (n *CallExpr) SetRlist(x Nodes) { n.Rargs = x } -func (n *CallExpr) IsDDD() bool { return n.DDD } -func (n *CallExpr) SetIsDDD(x bool) { n.DDD = x } -func (n *CallExpr) NoInline() bool { return n.noInline } -func (n *CallExpr) SetNoInline(x bool) { n.noInline = x } -func (n *CallExpr) Body() Nodes { return n.body } -func (n *CallExpr) PtrBody() *Nodes { return &n.body } -func (n *CallExpr) SetBody(x Nodes) { n.body = x } +func (n *CallExpr) copy() Node { + c := *n + c.init = c.init.Copy() + c.Args = c.Args.Copy() + c.Rargs = c.Rargs.Copy() + c.body = c.body.Copy() + return &c +} + +func (n *CallExpr) Orig() Node { return n.orig } +func (n *CallExpr) SetOrig(x Node) { n.orig = x } +func (n *CallExpr) Left() Node { return n.X } +func (n *CallExpr) SetLeft(x Node) { n.X = x } +func (n *CallExpr) List() Nodes { return n.Args } +func (n *CallExpr) PtrList() *Nodes { return &n.Args } +func (n *CallExpr) SetList(x Nodes) { n.Args = x } +func (n *CallExpr) Rlist() Nodes { return n.Rargs } +func (n *CallExpr) PtrRlist() *Nodes { return &n.Rargs } +func (n *CallExpr) SetRlist(x Nodes) { n.Rargs = x } +func (n *CallExpr) IsDDD() bool { return n.DDD } +func (n *CallExpr) SetIsDDD(x bool) { n.DDD = x } +func (n *CallExpr) NoInline() bool { return n.noInline } +func (n *CallExpr) SetNoInline(x bool) { n.noInline = x } +func (n *CallExpr) Body() Nodes { return n.body } +func (n *CallExpr) PtrBody() *Nodes { return &n.body } +func (n *CallExpr) SetBody(x Nodes) { n.body = x } func (n *CallExpr) SetOp(op Op) { switch op { @@ -231,11 +255,16 @@ func NewCallPartExpr(pos src.XPos, x Node, method *types.Field, fn *Func) *CallP func (n *CallPartExpr) String() string { return fmt.Sprint(n) } func (n *CallPartExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *CallPartExpr) rawCopy() Node { c := *n; return &c } -func (n *CallPartExpr) Func() *Func { return n.fn } -func (n *CallPartExpr) Left() Node { return n.X } -func (n *CallPartExpr) Sym() *types.Sym { return n.Method.Sym } -func (n *CallPartExpr) SetLeft(x Node) { n.X = x } +func (n *CallPartExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} + +func (n *CallPartExpr) Func() *Func { return n.fn } +func (n *CallPartExpr) Left() Node { return n.X } +func (n *CallPartExpr) Sym() *types.Sym { return n.Method.Sym } +func (n *CallPartExpr) SetLeft(x Node) { n.X = x } // A ClosureExpr is a function literal expression. type ClosureExpr struct { @@ -252,8 +281,13 @@ func NewClosureExpr(pos src.XPos, fn *Func) *ClosureExpr { func (n *ClosureExpr) String() string { return fmt.Sprint(n) } func (n *ClosureExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *ClosureExpr) rawCopy() Node { c := *n; return &c } -func (n *ClosureExpr) Func() *Func { return n.fn } +func (n *ClosureExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} + +func (n *ClosureExpr) Func() *Func { return n.fn } // A ClosureRead denotes reading a variable stored within a closure struct. type ClosureRead struct { @@ -270,9 +304,14 @@ func NewClosureRead(typ *types.Type, offset int64) *ClosureRead { func (n *ClosureRead) String() string { return fmt.Sprint(n) } func (n *ClosureRead) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *ClosureRead) rawCopy() Node { c := *n; return &c } -func (n *ClosureRead) Type() *types.Type { return n.typ } -func (n *ClosureRead) Offset() int64 { return n.offset } +func (n *ClosureRead) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} + +func (n *ClosureRead) Type() *types.Type { return n.typ } +func (n *ClosureRead) Offset() int64 { return n.offset } // A CompLitExpr is a composite literal Type{Vals}. // Before type-checking, the type is Ntype. @@ -294,14 +333,20 @@ func NewCompLitExpr(pos src.XPos, typ Ntype, list []Node) *CompLitExpr { func (n *CompLitExpr) String() string { return fmt.Sprint(n) } func (n *CompLitExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *CompLitExpr) rawCopy() Node { c := *n; return &c } -func (n *CompLitExpr) Orig() Node { return n.orig } -func (n *CompLitExpr) SetOrig(x Node) { n.orig = x } -func (n *CompLitExpr) Right() Node { return n.Ntype } -func (n *CompLitExpr) SetRight(x Node) { n.Ntype = toNtype(x) } -func (n *CompLitExpr) List() Nodes { return n.list } -func (n *CompLitExpr) PtrList() *Nodes { return &n.list } -func (n *CompLitExpr) SetList(x Nodes) { n.list = x } +func (n *CompLitExpr) copy() Node { + c := *n + c.init = c.init.Copy() + c.list = c.list.Copy() + return &c +} + +func (n *CompLitExpr) Orig() Node { return n.orig } +func (n *CompLitExpr) SetOrig(x Node) { n.orig = x } +func (n *CompLitExpr) Right() Node { return n.Ntype } +func (n *CompLitExpr) SetRight(x Node) { n.Ntype = toNtype(x) } +func (n *CompLitExpr) List() Nodes { return n.list } +func (n *CompLitExpr) PtrList() *Nodes { return &n.list } +func (n *CompLitExpr) SetList(x Nodes) { n.list = x } func (n *CompLitExpr) SetOp(op Op) { switch op { @@ -330,11 +375,12 @@ func NewConstExpr(val constant.Value, orig Node) Node { func (n *ConstExpr) String() string { return fmt.Sprint(n) } func (n *ConstExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *ConstExpr) rawCopy() Node { c := *n; return &c } -func (n *ConstExpr) Sym() *types.Sym { return n.orig.Sym() } -func (n *ConstExpr) Orig() Node { return n.orig } -func (n *ConstExpr) SetOrig(orig Node) { panic(n.no("SetOrig")) } -func (n *ConstExpr) Val() constant.Value { return n.val } +func (n *ConstExpr) copy() Node { c := *n; return &c } + +func (n *ConstExpr) Sym() *types.Sym { return n.orig.Sym() } +func (n *ConstExpr) Orig() Node { return n.orig } +func (n *ConstExpr) SetOrig(orig Node) { panic(n.no("SetOrig")) } +func (n *ConstExpr) Val() constant.Value { return n.val } // A ConvExpr is a conversion Type(X). // It may end up being a value or a type. @@ -355,9 +401,15 @@ func NewConvExpr(pos src.XPos, op Op, typ *types.Type, x Node) *ConvExpr { func (n *ConvExpr) String() string { return fmt.Sprint(n) } func (n *ConvExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *ConvExpr) rawCopy() Node { c := *n; return &c } -func (n *ConvExpr) Left() Node { return n.X } -func (n *ConvExpr) SetLeft(x Node) { n.X = x } +func (n *ConvExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} + +func (n *ConvExpr) rawCopy() Node { c := *n; return &c } +func (n *ConvExpr) Left() Node { return n.X } +func (n *ConvExpr) SetLeft(x Node) { n.X = x } func (n *ConvExpr) SetOp(op Op) { switch op { @@ -385,13 +437,18 @@ func NewIndexExpr(pos src.XPos, x, index Node) *IndexExpr { func (n *IndexExpr) String() string { return fmt.Sprint(n) } func (n *IndexExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *IndexExpr) rawCopy() Node { c := *n; return &c } -func (n *IndexExpr) Left() Node { return n.X } -func (n *IndexExpr) SetLeft(x Node) { n.X = x } -func (n *IndexExpr) Right() Node { return n.Index } -func (n *IndexExpr) SetRight(y Node) { n.Index = y } -func (n *IndexExpr) IndexMapLValue() bool { return n.Assigned } -func (n *IndexExpr) SetIndexMapLValue(x bool) { n.Assigned = x } +func (n *IndexExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} + +func (n *IndexExpr) Left() Node { return n.X } +func (n *IndexExpr) SetLeft(x Node) { n.X = x } +func (n *IndexExpr) Right() Node { return n.Index } +func (n *IndexExpr) SetRight(y Node) { n.Index = y } +func (n *IndexExpr) IndexMapLValue() bool { return n.Assigned } +func (n *IndexExpr) SetIndexMapLValue(x bool) { n.Assigned = x } func (n *IndexExpr) SetOp(op Op) { switch op { @@ -422,15 +479,20 @@ func NewKeyExpr(pos src.XPos, key, value Node) *KeyExpr { func (n *KeyExpr) String() string { return fmt.Sprint(n) } func (n *KeyExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *KeyExpr) rawCopy() Node { c := *n; return &c } -func (n *KeyExpr) Left() Node { return n.Key } -func (n *KeyExpr) SetLeft(x Node) { n.Key = x } -func (n *KeyExpr) Right() Node { return n.Value } -func (n *KeyExpr) SetRight(y Node) { n.Value = y } -func (n *KeyExpr) Sym() *types.Sym { return n.sym } -func (n *KeyExpr) SetSym(x *types.Sym) { n.sym = x } -func (n *KeyExpr) Offset() int64 { return n.offset } -func (n *KeyExpr) SetOffset(x int64) { n.offset = x } +func (n *KeyExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} + +func (n *KeyExpr) Left() Node { return n.Key } +func (n *KeyExpr) SetLeft(x Node) { n.Key = x } +func (n *KeyExpr) Right() Node { return n.Value } +func (n *KeyExpr) SetRight(y Node) { n.Value = y } +func (n *KeyExpr) Sym() *types.Sym { return n.sym } +func (n *KeyExpr) SetSym(x *types.Sym) { n.sym = x } +func (n *KeyExpr) Offset() int64 { return n.offset } +func (n *KeyExpr) SetOffset(x int64) { n.offset = x } func (n *KeyExpr) SetOp(op Op) { switch op { @@ -459,13 +521,20 @@ func NewInlinedCallExpr(pos src.XPos, body, retvars []Node) *InlinedCallExpr { func (n *InlinedCallExpr) String() string { return fmt.Sprint(n) } func (n *InlinedCallExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *InlinedCallExpr) rawCopy() Node { c := *n; return &c } -func (n *InlinedCallExpr) Body() Nodes { return n.body } -func (n *InlinedCallExpr) PtrBody() *Nodes { return &n.body } -func (n *InlinedCallExpr) SetBody(x Nodes) { n.body = x } -func (n *InlinedCallExpr) Rlist() Nodes { return n.ReturnVars } -func (n *InlinedCallExpr) PtrRlist() *Nodes { return &n.ReturnVars } -func (n *InlinedCallExpr) SetRlist(x Nodes) { n.ReturnVars = x } +func (n *InlinedCallExpr) copy() Node { + c := *n + c.init = c.init.Copy() + c.body = c.body.Copy() + c.ReturnVars = c.ReturnVars.Copy() + return &c +} + +func (n *InlinedCallExpr) Body() Nodes { return n.body } +func (n *InlinedCallExpr) PtrBody() *Nodes { return &n.body } +func (n *InlinedCallExpr) SetBody(x Nodes) { n.body = x } +func (n *InlinedCallExpr) Rlist() Nodes { return n.ReturnVars } +func (n *InlinedCallExpr) PtrRlist() *Nodes { return &n.ReturnVars } +func (n *InlinedCallExpr) SetRlist(x Nodes) { n.ReturnVars = x } // A MakeExpr is a make expression: make(Type[, Len[, Cap]]). // Op is OMAKECHAN, OMAKEMAP, OMAKESLICE, or OMAKESLICECOPY, @@ -485,11 +554,16 @@ func NewMakeExpr(pos src.XPos, op Op, len, cap Node) *MakeExpr { func (n *MakeExpr) String() string { return fmt.Sprint(n) } func (n *MakeExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *MakeExpr) rawCopy() Node { c := *n; return &c } -func (n *MakeExpr) Left() Node { return n.Len } -func (n *MakeExpr) SetLeft(x Node) { n.Len = x } -func (n *MakeExpr) Right() Node { return n.Cap } -func (n *MakeExpr) SetRight(x Node) { n.Cap = x } +func (n *MakeExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} + +func (n *MakeExpr) Left() Node { return n.Len } +func (n *MakeExpr) SetLeft(x Node) { n.Len = x } +func (n *MakeExpr) Right() Node { return n.Cap } +func (n *MakeExpr) SetRight(x Node) { n.Cap = x } func (n *MakeExpr) SetOp(op Op) { switch op { @@ -521,17 +595,22 @@ func NewMethodExpr(pos src.XPos, op Op, x, m Node) *MethodExpr { func (n *MethodExpr) String() string { return fmt.Sprint(n) } func (n *MethodExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *MethodExpr) rawCopy() Node { c := *n; return &c } -func (n *MethodExpr) Left() Node { return n.X } -func (n *MethodExpr) SetLeft(x Node) { n.X = x } -func (n *MethodExpr) Right() Node { return n.M } -func (n *MethodExpr) SetRight(y Node) { n.M = y } -func (n *MethodExpr) Sym() *types.Sym { return n.sym } -func (n *MethodExpr) SetSym(x *types.Sym) { n.sym = x } -func (n *MethodExpr) Offset() int64 { return n.offset } -func (n *MethodExpr) SetOffset(x int64) { n.offset = x } -func (n *MethodExpr) Class() Class { return n.class } -func (n *MethodExpr) SetClass(x Class) { n.class = x } +func (n *MethodExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} + +func (n *MethodExpr) Left() Node { return n.X } +func (n *MethodExpr) SetLeft(x Node) { n.X = x } +func (n *MethodExpr) Right() Node { return n.M } +func (n *MethodExpr) SetRight(y Node) { n.M = y } +func (n *MethodExpr) Sym() *types.Sym { return n.sym } +func (n *MethodExpr) SetSym(x *types.Sym) { n.sym = x } +func (n *MethodExpr) Offset() int64 { return n.offset } +func (n *MethodExpr) SetOffset(x int64) { n.offset = x } +func (n *MethodExpr) Class() Class { return n.class } +func (n *MethodExpr) SetClass(x Class) { n.class = x } // A NilExpr represents the predefined untyped constant nil. // (It may be copied and assigned a type, though.) @@ -549,9 +628,14 @@ func NewNilExpr(pos src.XPos) *NilExpr { func (n *NilExpr) String() string { return fmt.Sprint(n) } func (n *NilExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *NilExpr) rawCopy() Node { c := *n; return &c } -func (n *NilExpr) Sym() *types.Sym { return n.sym } -func (n *NilExpr) SetSym(x *types.Sym) { n.sym = x } +func (n *NilExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} + +func (n *NilExpr) Sym() *types.Sym { return n.sym } +func (n *NilExpr) SetSym(x *types.Sym) { n.sym = x } // A ParenExpr is a parenthesized expression (X). // It may end up being a value or a type. @@ -569,9 +653,14 @@ func NewParenExpr(pos src.XPos, x Node) *ParenExpr { func (n *ParenExpr) String() string { return fmt.Sprint(n) } func (n *ParenExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *ParenExpr) rawCopy() Node { c := *n; return &c } -func (n *ParenExpr) Left() Node { return n.X } -func (n *ParenExpr) SetLeft(x Node) { n.X = x } +func (n *ParenExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} + +func (n *ParenExpr) Left() Node { return n.X } +func (n *ParenExpr) SetLeft(x Node) { n.X = x } func (*ParenExpr) CanBeNtype() {} @@ -599,9 +688,14 @@ func NewResultExpr(pos src.XPos, typ *types.Type, offset int64) *ResultExpr { func (n *ResultExpr) String() string { return fmt.Sprint(n) } func (n *ResultExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *ResultExpr) rawCopy() Node { c := *n; return &c } -func (n *ResultExpr) Offset() int64 { return n.offset } -func (n *ResultExpr) SetOffset(x int64) { n.offset = x } +func (n *ResultExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} + +func (n *ResultExpr) Offset() int64 { return n.offset } +func (n *ResultExpr) SetOffset(x int64) { n.offset = x } // A SelectorExpr is a selector expression X.Sym. type SelectorExpr struct { @@ -631,13 +725,18 @@ func (n *SelectorExpr) SetOp(op Op) { func (n *SelectorExpr) String() string { return fmt.Sprint(n) } func (n *SelectorExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *SelectorExpr) rawCopy() Node { c := *n; return &c } -func (n *SelectorExpr) Left() Node { return n.X } -func (n *SelectorExpr) SetLeft(x Node) { n.X = x } -func (n *SelectorExpr) Sym() *types.Sym { return n.Sel } -func (n *SelectorExpr) SetSym(x *types.Sym) { n.Sel = x } -func (n *SelectorExpr) Offset() int64 { return n.offset } -func (n *SelectorExpr) SetOffset(x int64) { n.offset = x } +func (n *SelectorExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} + +func (n *SelectorExpr) Left() Node { return n.X } +func (n *SelectorExpr) SetLeft(x Node) { n.X = x } +func (n *SelectorExpr) Sym() *types.Sym { return n.Sel } +func (n *SelectorExpr) SetSym(x *types.Sym) { n.Sel = x } +func (n *SelectorExpr) Offset() int64 { return n.offset } +func (n *SelectorExpr) SetOffset(x int64) { n.offset = x } // Before type-checking, bytes.Buffer is a SelectorExpr. // After type-checking it becomes a Name. @@ -659,12 +758,18 @@ func NewSliceExpr(pos src.XPos, op Op, x Node) *SliceExpr { func (n *SliceExpr) String() string { return fmt.Sprint(n) } func (n *SliceExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *SliceExpr) rawCopy() Node { c := *n; return &c } -func (n *SliceExpr) Left() Node { return n.X } -func (n *SliceExpr) SetLeft(x Node) { n.X = x } -func (n *SliceExpr) List() Nodes { return n.list } -func (n *SliceExpr) PtrList() *Nodes { return &n.list } -func (n *SliceExpr) SetList(x Nodes) { n.list = x } +func (n *SliceExpr) copy() Node { + c := *n + c.init = c.init.Copy() + c.list = c.list.Copy() + return &c +} + +func (n *SliceExpr) Left() Node { return n.X } +func (n *SliceExpr) SetLeft(x Node) { n.X = x } +func (n *SliceExpr) List() Nodes { return n.list } +func (n *SliceExpr) PtrList() *Nodes { return &n.list } +func (n *SliceExpr) SetList(x Nodes) { n.list = x } func (n *SliceExpr) SetOp(op Op) { switch op { @@ -761,12 +866,17 @@ func NewSliceHeaderExpr(pos src.XPos, typ *types.Type, ptr, len, cap Node) *Slic func (n *SliceHeaderExpr) String() string { return fmt.Sprint(n) } func (n *SliceHeaderExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *SliceHeaderExpr) rawCopy() Node { c := *n; return &c } -func (n *SliceHeaderExpr) Left() Node { return n.Ptr } -func (n *SliceHeaderExpr) SetLeft(x Node) { n.Ptr = x } -func (n *SliceHeaderExpr) List() Nodes { return n.lenCap } -func (n *SliceHeaderExpr) PtrList() *Nodes { return &n.lenCap } -func (n *SliceHeaderExpr) SetList(x Nodes) { n.lenCap = x } +func (n *SliceHeaderExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} + +func (n *SliceHeaderExpr) Left() Node { return n.Ptr } +func (n *SliceHeaderExpr) SetLeft(x Node) { n.Ptr = x } +func (n *SliceHeaderExpr) List() Nodes { return n.lenCap } +func (n *SliceHeaderExpr) PtrList() *Nodes { return &n.lenCap } +func (n *SliceHeaderExpr) SetList(x Nodes) { n.lenCap = x } // A StarExpr is a dereference expression *X. // It may end up being a value or a type. @@ -784,9 +894,14 @@ func NewStarExpr(pos src.XPos, x Node) *StarExpr { func (n *StarExpr) String() string { return fmt.Sprint(n) } func (n *StarExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *StarExpr) rawCopy() Node { c := *n; return &c } -func (n *StarExpr) Left() Node { return n.X } -func (n *StarExpr) SetLeft(x Node) { n.X = x } +func (n *StarExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} + +func (n *StarExpr) Left() Node { return n.X } +func (n *StarExpr) SetLeft(x Node) { n.X = x } func (*StarExpr) CanBeNtype() {} @@ -828,14 +943,20 @@ func NewTypeAssertExpr(pos src.XPos, x Node, typ Ntype) *TypeAssertExpr { func (n *TypeAssertExpr) String() string { return fmt.Sprint(n) } func (n *TypeAssertExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *TypeAssertExpr) rawCopy() Node { c := *n; return &c } -func (n *TypeAssertExpr) Left() Node { return n.X } -func (n *TypeAssertExpr) SetLeft(x Node) { n.X = x } -func (n *TypeAssertExpr) Right() Node { return n.Ntype } -func (n *TypeAssertExpr) SetRight(x Node) { n.Ntype = x } // TODO: toNtype(x) -func (n *TypeAssertExpr) List() Nodes { return n.Itab } -func (n *TypeAssertExpr) PtrList() *Nodes { return &n.Itab } -func (n *TypeAssertExpr) SetList(x Nodes) { n.Itab = x } +func (n *TypeAssertExpr) copy() Node { + c := *n + c.init = c.init.Copy() + c.Itab = c.Itab.Copy() + return &c +} + +func (n *TypeAssertExpr) Left() Node { return n.X } +func (n *TypeAssertExpr) SetLeft(x Node) { n.X = x } +func (n *TypeAssertExpr) Right() Node { return n.Ntype } +func (n *TypeAssertExpr) SetRight(x Node) { n.Ntype = x } // TODO: toNtype(x) +func (n *TypeAssertExpr) List() Nodes { return n.Itab } +func (n *TypeAssertExpr) PtrList() *Nodes { return &n.Itab } +func (n *TypeAssertExpr) SetList(x Nodes) { n.Itab = x } func (n *TypeAssertExpr) SetOp(op Op) { switch op { @@ -862,9 +983,14 @@ func NewUnaryExpr(pos src.XPos, op Op, x Node) *UnaryExpr { func (n *UnaryExpr) String() string { return fmt.Sprint(n) } func (n *UnaryExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *UnaryExpr) rawCopy() Node { c := *n; return &c } -func (n *UnaryExpr) Left() Node { return n.X } -func (n *UnaryExpr) SetLeft(x Node) { n.X = x } +func (n *UnaryExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} + +func (n *UnaryExpr) Left() Node { return n.X } +func (n *UnaryExpr) SetLeft(x Node) { n.X = x } func (n *UnaryExpr) SetOp(op Op) { switch op { diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index 98830fb502d64..ae803cd6a557e 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -117,7 +117,7 @@ func NewFunc(pos src.XPos) *Func { func (f *Func) String() string { return fmt.Sprint(f) } func (f *Func) Format(s fmt.State, verb rune) { FmtNode(f, s, verb) } -func (f *Func) rawCopy() Node { panic(f.no("rawCopy")) } +func (f *Func) copy() Node { panic(f.no("copy")) } func (f *Func) Func() *Func { return f } func (f *Func) Body() Nodes { return f.body } func (f *Func) PtrBody() *Nodes { return &f.body } diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 67d4d2b391121..dc8c58e4f4009 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -151,7 +151,7 @@ func newNameAt(pos src.XPos, op Op, sym *types.Sym) *Name { func (n *Name) String() string { return fmt.Sprint(n) } func (n *Name) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *Name) rawCopy() Node { c := *n; return &c } +func (n *Name) copy() Node { c := *n; return &c } func (n *Name) Name() *Name { return n } func (n *Name) Sym() *types.Sym { return n.sym } func (n *Name) SetSym(x *types.Sym) { n.sym = x } @@ -323,7 +323,7 @@ type PkgName struct { func (p *PkgName) String() string { return fmt.Sprint(p) } func (p *PkgName) Format(s fmt.State, verb rune) { FmtNode(p, s, verb) } -func (p *PkgName) rawCopy() Node { c := *p; return &c } +func (p *PkgName) copy() Node { c := *p; return &c } func (p *PkgName) Sym() *types.Sym { return p.sym } func (*PkgName) CanBeNtype() {} diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index c3184a3a0bd49..705eb9e47ed00 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -27,8 +27,8 @@ type Node interface { Pos() src.XPos SetPos(x src.XPos) - // For making copies. Mainly used by Copy and SepCopy. - rawCopy() Node + // For making copies. For Copy and SepCopy. + copy() Node // Abstract graph structure, for generic traversals. Op() Op @@ -521,6 +521,21 @@ func (n *Nodes) AppendNodes(n2 *Nodes) { n2.slice = nil } +// Copy returns a copy of the content of the slice. +func (n Nodes) Copy() Nodes { + var c Nodes + if n.slice == nil { + return c + } + c.slice = new([]Node) + if *n.slice == nil { + return c + } + *c.slice = make([]Node, n.Len()) + copy(*c.slice, n.Slice()) + return c +} + // nodeQueue is a FIFO queue of *Node. The zero value of nodeQueue is // a ready-to-use empty queue. type NodeQueue struct { diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index a6bbab48896b7..5af6a62cf2b57 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -31,7 +31,7 @@ func NewDecl(pos src.XPos, op Op, x Node) *Decl { func (n *Decl) String() string { return fmt.Sprint(n) } func (n *Decl) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *Decl) rawCopy() Node { c := *n; return &c } +func (n *Decl) copy() Node { c := *n; return &c } func (n *Decl) Left() Node { return n.X } func (n *Decl) SetLeft(x Node) { n.X = x } @@ -70,7 +70,13 @@ func NewAssignListStmt(pos src.XPos, lhs, rhs []Node) *AssignListStmt { func (n *AssignListStmt) String() string { return fmt.Sprint(n) } func (n *AssignListStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *AssignListStmt) rawCopy() Node { c := *n; return &c } +func (n *AssignListStmt) copy() Node { + c := *n + c.init = c.init.Copy() + c.Lhs = c.Lhs.Copy() + c.Rhs = c.Rhs.Copy() + return &c +} func (n *AssignListStmt) List() Nodes { return n.Lhs } func (n *AssignListStmt) PtrList() *Nodes { return &n.Lhs } @@ -112,7 +118,11 @@ func NewAssignStmt(pos src.XPos, x, y Node) *AssignStmt { func (n *AssignStmt) String() string { return fmt.Sprint(n) } func (n *AssignStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *AssignStmt) rawCopy() Node { c := *n; return &c } +func (n *AssignStmt) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} func (n *AssignStmt) Left() Node { return n.X } func (n *AssignStmt) SetLeft(x Node) { n.X = x } @@ -151,7 +161,11 @@ func NewAssignOpStmt(pos src.XPos, op Op, x, y Node) *AssignOpStmt { func (n *AssignOpStmt) String() string { return fmt.Sprint(n) } func (n *AssignOpStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *AssignOpStmt) rawCopy() Node { c := *n; return &c } +func (n *AssignOpStmt) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} func (n *AssignOpStmt) Left() Node { return n.X } func (n *AssignOpStmt) SetLeft(x Node) { n.X = x } @@ -180,10 +194,16 @@ func NewBlockStmt(pos src.XPos, list []Node) *BlockStmt { func (n *BlockStmt) String() string { return fmt.Sprint(n) } func (n *BlockStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *BlockStmt) rawCopy() Node { c := *n; return &c } -func (n *BlockStmt) List() Nodes { return n.list } -func (n *BlockStmt) PtrList() *Nodes { return &n.list } -func (n *BlockStmt) SetList(x Nodes) { n.list = x } +func (n *BlockStmt) copy() Node { + c := *n + c.init = c.init.Copy() + c.list = c.list.Copy() + return &c +} + +func (n *BlockStmt) List() Nodes { return n.list } +func (n *BlockStmt) PtrList() *Nodes { return &n.list } +func (n *BlockStmt) SetList(x Nodes) { n.list = x } // A BranchStmt is a break, continue, fallthrough, or goto statement. // @@ -209,9 +229,14 @@ func NewBranchStmt(pos src.XPos, op Op, label *types.Sym) *BranchStmt { func (n *BranchStmt) String() string { return fmt.Sprint(n) } func (n *BranchStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *BranchStmt) rawCopy() Node { c := *n; return &c } -func (n *BranchStmt) Sym() *types.Sym { return n.Label } -func (n *BranchStmt) SetSym(sym *types.Sym) { n.Label = sym } +func (n *BranchStmt) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} + +func (n *BranchStmt) Sym() *types.Sym { return n.Label } +func (n *BranchStmt) SetSym(sym *types.Sym) { n.Label = sym } // A CaseStmt is a case statement in a switch or select: case List: Body. type CaseStmt struct { @@ -233,18 +258,26 @@ func NewCaseStmt(pos src.XPos, list, body []Node) *CaseStmt { func (n *CaseStmt) String() string { return fmt.Sprint(n) } func (n *CaseStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *CaseStmt) rawCopy() Node { c := *n; return &c } -func (n *CaseStmt) List() Nodes { return n.list } -func (n *CaseStmt) PtrList() *Nodes { return &n.list } -func (n *CaseStmt) SetList(x Nodes) { n.list = x } -func (n *CaseStmt) Body() Nodes { return n.body } -func (n *CaseStmt) PtrBody() *Nodes { return &n.body } -func (n *CaseStmt) SetBody(x Nodes) { n.body = x } -func (n *CaseStmt) Rlist() Nodes { return n.Vars } -func (n *CaseStmt) PtrRlist() *Nodes { return &n.Vars } -func (n *CaseStmt) SetRlist(x Nodes) { n.Vars = x } -func (n *CaseStmt) Left() Node { return n.Comm } -func (n *CaseStmt) SetLeft(x Node) { n.Comm = x } +func (n *CaseStmt) copy() Node { + c := *n + c.init = c.init.Copy() + c.Vars = c.Vars.Copy() + c.list = c.list.Copy() + c.body = c.body.Copy() + return &c +} + +func (n *CaseStmt) List() Nodes { return n.list } +func (n *CaseStmt) PtrList() *Nodes { return &n.list } +func (n *CaseStmt) SetList(x Nodes) { n.list = x } +func (n *CaseStmt) Body() Nodes { return n.body } +func (n *CaseStmt) PtrBody() *Nodes { return &n.body } +func (n *CaseStmt) SetBody(x Nodes) { n.body = x } +func (n *CaseStmt) Rlist() Nodes { return n.Vars } +func (n *CaseStmt) PtrRlist() *Nodes { return &n.Vars } +func (n *CaseStmt) SetRlist(x Nodes) { n.Vars = x } +func (n *CaseStmt) Left() Node { return n.Comm } +func (n *CaseStmt) SetLeft(x Node) { n.Comm = x } // A DeferStmt is a defer statement: defer Call. type DeferStmt struct { @@ -261,7 +294,11 @@ func NewDeferStmt(pos src.XPos, call Node) *DeferStmt { func (n *DeferStmt) String() string { return fmt.Sprint(n) } func (n *DeferStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *DeferStmt) rawCopy() Node { c := *n; return &c } +func (n *DeferStmt) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} func (n *DeferStmt) Left() Node { return n.Call } func (n *DeferStmt) SetLeft(x Node) { n.Call = x } @@ -289,21 +326,28 @@ func NewForStmt(pos src.XPos, init []Node, cond, post Node, body []Node) *ForStm func (n *ForStmt) String() string { return fmt.Sprint(n) } func (n *ForStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *ForStmt) rawCopy() Node { c := *n; return &c } -func (n *ForStmt) Sym() *types.Sym { return n.Label } -func (n *ForStmt) SetSym(x *types.Sym) { n.Label = x } -func (n *ForStmt) Left() Node { return n.Cond } -func (n *ForStmt) SetLeft(x Node) { n.Cond = x } -func (n *ForStmt) Right() Node { return n.Post } -func (n *ForStmt) SetRight(x Node) { n.Post = x } -func (n *ForStmt) Body() Nodes { return n.body } -func (n *ForStmt) PtrBody() *Nodes { return &n.body } -func (n *ForStmt) SetBody(x Nodes) { n.body = x } -func (n *ForStmt) List() Nodes { return n.Late } -func (n *ForStmt) PtrList() *Nodes { return &n.Late } -func (n *ForStmt) SetList(x Nodes) { n.Late = x } -func (n *ForStmt) HasBreak() bool { return n.hasBreak } -func (n *ForStmt) SetHasBreak(b bool) { n.hasBreak = b } +func (n *ForStmt) copy() Node { + c := *n + c.init = c.init.Copy() + c.Late = c.Late.Copy() + c.body = c.body.Copy() + return &c +} + +func (n *ForStmt) Sym() *types.Sym { return n.Label } +func (n *ForStmt) SetSym(x *types.Sym) { n.Label = x } +func (n *ForStmt) Left() Node { return n.Cond } +func (n *ForStmt) SetLeft(x Node) { n.Cond = x } +func (n *ForStmt) Right() Node { return n.Post } +func (n *ForStmt) SetRight(x Node) { n.Post = x } +func (n *ForStmt) Body() Nodes { return n.body } +func (n *ForStmt) PtrBody() *Nodes { return &n.body } +func (n *ForStmt) SetBody(x Nodes) { n.body = x } +func (n *ForStmt) List() Nodes { return n.Late } +func (n *ForStmt) PtrList() *Nodes { return &n.Late } +func (n *ForStmt) SetList(x Nodes) { n.Late = x } +func (n *ForStmt) HasBreak() bool { return n.hasBreak } +func (n *ForStmt) SetHasBreak(b bool) { n.hasBreak = b } func (n *ForStmt) SetOp(op Op) { if op != OFOR && op != OFORUNTIL { @@ -327,7 +371,11 @@ func NewGoStmt(pos src.XPos, call Node) *GoStmt { func (n *GoStmt) String() string { return fmt.Sprint(n) } func (n *GoStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *GoStmt) rawCopy() Node { c := *n; return &c } +func (n *GoStmt) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} func (n *GoStmt) Left() Node { return n.Call } func (n *GoStmt) SetLeft(x Node) { n.Call = x } @@ -352,17 +400,24 @@ func NewIfStmt(pos src.XPos, cond Node, body, els []Node) *IfStmt { func (n *IfStmt) String() string { return fmt.Sprint(n) } func (n *IfStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *IfStmt) rawCopy() Node { c := *n; return &c } -func (n *IfStmt) Left() Node { return n.Cond } -func (n *IfStmt) SetLeft(x Node) { n.Cond = x } -func (n *IfStmt) Body() Nodes { return n.body } -func (n *IfStmt) PtrBody() *Nodes { return &n.body } -func (n *IfStmt) SetBody(x Nodes) { n.body = x } -func (n *IfStmt) Rlist() Nodes { return n.Else } -func (n *IfStmt) PtrRlist() *Nodes { return &n.Else } -func (n *IfStmt) SetRlist(x Nodes) { n.Else = x } -func (n *IfStmt) Likely() bool { return n.likely } -func (n *IfStmt) SetLikely(x bool) { n.likely = x } +func (n *IfStmt) copy() Node { + c := *n + c.init = c.init.Copy() + c.body = c.body.Copy() + c.Else = c.Else.Copy() + return &c +} + +func (n *IfStmt) Left() Node { return n.Cond } +func (n *IfStmt) SetLeft(x Node) { n.Cond = x } +func (n *IfStmt) Body() Nodes { return n.body } +func (n *IfStmt) PtrBody() *Nodes { return &n.body } +func (n *IfStmt) SetBody(x Nodes) { n.body = x } +func (n *IfStmt) Rlist() Nodes { return n.Else } +func (n *IfStmt) PtrRlist() *Nodes { return &n.Else } +func (n *IfStmt) SetRlist(x Nodes) { n.Else = x } +func (n *IfStmt) Likely() bool { return n.likely } +func (n *IfStmt) SetLikely(x bool) { n.likely = x } // An InlineMarkStmt is a marker placed just before an inlined body. type InlineMarkStmt struct { @@ -379,9 +434,14 @@ func NewInlineMarkStmt(pos src.XPos, index int64) *InlineMarkStmt { func (n *InlineMarkStmt) String() string { return fmt.Sprint(n) } func (n *InlineMarkStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *InlineMarkStmt) rawCopy() Node { c := *n; return &c } -func (n *InlineMarkStmt) Offset() int64 { return n.Index } -func (n *InlineMarkStmt) SetOffset(x int64) { n.Index = x } +func (n *InlineMarkStmt) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} + +func (n *InlineMarkStmt) Offset() int64 { return n.Index } +func (n *InlineMarkStmt) SetOffset(x int64) { n.Index = x } // A LabelStmt is a label statement (just the label, not including the statement it labels). type LabelStmt struct { @@ -398,9 +458,14 @@ func NewLabelStmt(pos src.XPos, label *types.Sym) *LabelStmt { func (n *LabelStmt) String() string { return fmt.Sprint(n) } func (n *LabelStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *LabelStmt) rawCopy() Node { c := *n; return &c } -func (n *LabelStmt) Sym() *types.Sym { return n.Label } -func (n *LabelStmt) SetSym(x *types.Sym) { n.Label = x } +func (n *LabelStmt) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} + +func (n *LabelStmt) Sym() *types.Sym { return n.Label } +func (n *LabelStmt) SetSym(x *types.Sym) { n.Label = x } // A RangeStmt is a range loop: for Vars = range X { Stmts } // Op can be OFOR or OFORUNTIL (!Cond). @@ -426,23 +491,30 @@ func NewRangeStmt(pos src.XPos, vars []Node, x Node, body []Node) *RangeStmt { func (n *RangeStmt) String() string { return fmt.Sprint(n) } func (n *RangeStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *RangeStmt) rawCopy() Node { c := *n; return &c } -func (n *RangeStmt) Sym() *types.Sym { return n.Label } -func (n *RangeStmt) SetSym(x *types.Sym) { n.Label = x } -func (n *RangeStmt) Right() Node { return n.X } -func (n *RangeStmt) SetRight(x Node) { n.X = x } -func (n *RangeStmt) Body() Nodes { return n.body } -func (n *RangeStmt) PtrBody() *Nodes { return &n.body } -func (n *RangeStmt) SetBody(x Nodes) { n.body = x } -func (n *RangeStmt) List() Nodes { return n.Vars } -func (n *RangeStmt) PtrList() *Nodes { return &n.Vars } -func (n *RangeStmt) SetList(x Nodes) { n.Vars = x } -func (n *RangeStmt) HasBreak() bool { return n.hasBreak } -func (n *RangeStmt) SetHasBreak(b bool) { n.hasBreak = b } -func (n *RangeStmt) Colas() bool { return n.Def } -func (n *RangeStmt) SetColas(b bool) { n.Def = b } -func (n *RangeStmt) Type() *types.Type { return n.typ } -func (n *RangeStmt) SetType(x *types.Type) { n.typ = x } +func (n *RangeStmt) copy() Node { + c := *n + c.init = c.init.Copy() + c.Vars = c.Vars.Copy() + c.body = c.body.Copy() + return &c +} + +func (n *RangeStmt) Sym() *types.Sym { return n.Label } +func (n *RangeStmt) SetSym(x *types.Sym) { n.Label = x } +func (n *RangeStmt) Right() Node { return n.X } +func (n *RangeStmt) SetRight(x Node) { n.X = x } +func (n *RangeStmt) Body() Nodes { return n.body } +func (n *RangeStmt) PtrBody() *Nodes { return &n.body } +func (n *RangeStmt) SetBody(x Nodes) { n.body = x } +func (n *RangeStmt) List() Nodes { return n.Vars } +func (n *RangeStmt) PtrList() *Nodes { return &n.Vars } +func (n *RangeStmt) SetList(x Nodes) { n.Vars = x } +func (n *RangeStmt) HasBreak() bool { return n.hasBreak } +func (n *RangeStmt) SetHasBreak(b bool) { n.hasBreak = b } +func (n *RangeStmt) Colas() bool { return n.Def } +func (n *RangeStmt) SetColas(b bool) { n.Def = b } +func (n *RangeStmt) Type() *types.Type { return n.typ } +func (n *RangeStmt) SetType(x *types.Type) { n.typ = x } // A ReturnStmt is a return statement. type ReturnStmt struct { @@ -462,13 +534,19 @@ func NewReturnStmt(pos src.XPos, results []Node) *ReturnStmt { func (n *ReturnStmt) String() string { return fmt.Sprint(n) } func (n *ReturnStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *ReturnStmt) rawCopy() Node { c := *n; return &c } -func (n *ReturnStmt) Orig() Node { return n.orig } -func (n *ReturnStmt) SetOrig(x Node) { n.orig = x } -func (n *ReturnStmt) List() Nodes { return n.Results } -func (n *ReturnStmt) PtrList() *Nodes { return &n.Results } -func (n *ReturnStmt) SetList(x Nodes) { n.Results = x } -func (n *ReturnStmt) IsDDD() bool { return false } // typecheckargs asks +func (n *ReturnStmt) copy() Node { + c := *n + c.init = c.init.Copy() + c.Results = c.Results.Copy() + return &c +} + +func (n *ReturnStmt) Orig() Node { return n.orig } +func (n *ReturnStmt) SetOrig(x Node) { n.orig = x } +func (n *ReturnStmt) List() Nodes { return n.Results } +func (n *ReturnStmt) PtrList() *Nodes { return &n.Results } +func (n *ReturnStmt) SetList(x Nodes) { n.Results = x } +func (n *ReturnStmt) IsDDD() bool { return false } // typecheckargs asks // A SelectStmt is a block: { Cases }. type SelectStmt struct { @@ -491,17 +569,24 @@ func NewSelectStmt(pos src.XPos, cases []Node) *SelectStmt { func (n *SelectStmt) String() string { return fmt.Sprint(n) } func (n *SelectStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *SelectStmt) rawCopy() Node { c := *n; return &c } -func (n *SelectStmt) List() Nodes { return n.Cases } -func (n *SelectStmt) PtrList() *Nodes { return &n.Cases } -func (n *SelectStmt) SetList(x Nodes) { n.Cases = x } -func (n *SelectStmt) Sym() *types.Sym { return n.Label } -func (n *SelectStmt) SetSym(x *types.Sym) { n.Label = x } -func (n *SelectStmt) HasBreak() bool { return n.hasBreak } -func (n *SelectStmt) SetHasBreak(x bool) { n.hasBreak = x } -func (n *SelectStmt) Body() Nodes { return n.Compiled } -func (n *SelectStmt) PtrBody() *Nodes { return &n.Compiled } -func (n *SelectStmt) SetBody(x Nodes) { n.Compiled = x } +func (n *SelectStmt) copy() Node { + c := *n + c.init = c.init.Copy() + c.Cases = c.Cases.Copy() + c.Compiled = c.Compiled.Copy() + return &c +} + +func (n *SelectStmt) List() Nodes { return n.Cases } +func (n *SelectStmt) PtrList() *Nodes { return &n.Cases } +func (n *SelectStmt) SetList(x Nodes) { n.Cases = x } +func (n *SelectStmt) Sym() *types.Sym { return n.Label } +func (n *SelectStmt) SetSym(x *types.Sym) { n.Label = x } +func (n *SelectStmt) HasBreak() bool { return n.hasBreak } +func (n *SelectStmt) SetHasBreak(x bool) { n.hasBreak = x } +func (n *SelectStmt) Body() Nodes { return n.Compiled } +func (n *SelectStmt) PtrBody() *Nodes { return &n.Compiled } +func (n *SelectStmt) SetBody(x Nodes) { n.Compiled = x } // A SendStmt is a send statement: X <- Y. type SendStmt struct { @@ -519,7 +604,11 @@ func NewSendStmt(pos src.XPos, ch, value Node) *SendStmt { func (n *SendStmt) String() string { return fmt.Sprint(n) } func (n *SendStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *SendStmt) rawCopy() Node { c := *n; return &c } +func (n *SendStmt) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} func (n *SendStmt) Left() Node { return n.Chan } func (n *SendStmt) SetLeft(x Node) { n.Chan = x } @@ -548,19 +637,26 @@ func NewSwitchStmt(pos src.XPos, tag Node, cases []Node) *SwitchStmt { func (n *SwitchStmt) String() string { return fmt.Sprint(n) } func (n *SwitchStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *SwitchStmt) rawCopy() Node { c := *n; return &c } -func (n *SwitchStmt) Left() Node { return n.Tag } -func (n *SwitchStmt) SetLeft(x Node) { n.Tag = x } -func (n *SwitchStmt) List() Nodes { return n.Cases } -func (n *SwitchStmt) PtrList() *Nodes { return &n.Cases } -func (n *SwitchStmt) SetList(x Nodes) { n.Cases = x } -func (n *SwitchStmt) Body() Nodes { return n.Compiled } -func (n *SwitchStmt) PtrBody() *Nodes { return &n.Compiled } -func (n *SwitchStmt) SetBody(x Nodes) { n.Compiled = x } -func (n *SwitchStmt) Sym() *types.Sym { return n.Label } -func (n *SwitchStmt) SetSym(x *types.Sym) { n.Label = x } -func (n *SwitchStmt) HasBreak() bool { return n.hasBreak } -func (n *SwitchStmt) SetHasBreak(x bool) { n.hasBreak = x } +func (n *SwitchStmt) copy() Node { + c := *n + c.init = c.init.Copy() + c.Cases = c.Cases.Copy() + c.Compiled = c.Compiled.Copy() + return &c +} + +func (n *SwitchStmt) Left() Node { return n.Tag } +func (n *SwitchStmt) SetLeft(x Node) { n.Tag = x } +func (n *SwitchStmt) List() Nodes { return n.Cases } +func (n *SwitchStmt) PtrList() *Nodes { return &n.Cases } +func (n *SwitchStmt) SetList(x Nodes) { n.Cases = x } +func (n *SwitchStmt) Body() Nodes { return n.Compiled } +func (n *SwitchStmt) PtrBody() *Nodes { return &n.Compiled } +func (n *SwitchStmt) SetBody(x Nodes) { n.Compiled = x } +func (n *SwitchStmt) Sym() *types.Sym { return n.Label } +func (n *SwitchStmt) SetSym(x *types.Sym) { n.Label = x } +func (n *SwitchStmt) HasBreak() bool { return n.hasBreak } +func (n *SwitchStmt) SetHasBreak(x bool) { n.hasBreak = x } // A TypeSwitchGuard is the [Name :=] X.(type) in a type switch. type TypeSwitchGuard struct { @@ -581,7 +677,7 @@ func NewTypeSwitchGuard(pos src.XPos, name, x Node) *TypeSwitchGuard { func (n *TypeSwitchGuard) String() string { return fmt.Sprint(n) } func (n *TypeSwitchGuard) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *TypeSwitchGuard) rawCopy() Node { c := *n; return &c } +func (n *TypeSwitchGuard) copy() Node { c := *n; return &c } func (n *TypeSwitchGuard) Left() Node { if n.name == nil { diff --git a/src/cmd/compile/internal/ir/type.go b/src/cmd/compile/internal/ir/type.go index d2f5bb9239949..a8af99034d223 100644 --- a/src/cmd/compile/internal/ir/type.go +++ b/src/cmd/compile/internal/ir/type.go @@ -74,7 +74,7 @@ func NewChanType(pos src.XPos, elem Node, dir types.ChanDir) *ChanType { func (n *ChanType) String() string { return fmt.Sprint(n) } func (n *ChanType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *ChanType) rawCopy() Node { c := *n; return &c } +func (n *ChanType) copy() Node { c := *n; return &c } func (n *ChanType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) n.Elem = nil @@ -104,7 +104,7 @@ func NewMapType(pos src.XPos, key, elem Node) *MapType { func (n *MapType) String() string { return fmt.Sprint(n) } func (n *MapType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *MapType) rawCopy() Node { c := *n; return &c } +func (n *MapType) copy() Node { c := *n; return &c } func (n *MapType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) n.Key = nil @@ -134,7 +134,12 @@ func NewStructType(pos src.XPos, fields []*Field) *StructType { func (n *StructType) String() string { return fmt.Sprint(n) } func (n *StructType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *StructType) rawCopy() Node { c := *n; return &c } +func (n *StructType) copy() Node { + c := *n + c.Fields = copyFields(c.Fields) + return &c +} + func (n *StructType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) n.Fields = nil @@ -171,7 +176,12 @@ func NewInterfaceType(pos src.XPos, methods []*Field) *InterfaceType { func (n *InterfaceType) String() string { return fmt.Sprint(n) } func (n *InterfaceType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *InterfaceType) rawCopy() Node { c := *n; return &c } +func (n *InterfaceType) copy() Node { + c := *n + c.Methods = copyFields(c.Methods) + return &c +} + func (n *InterfaceType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) n.Methods = nil @@ -202,7 +212,15 @@ func NewFuncType(pos src.XPos, rcvr *Field, args, results []*Field) *FuncType { func (n *FuncType) String() string { return fmt.Sprint(n) } func (n *FuncType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *FuncType) rawCopy() Node { c := *n; return &c } +func (n *FuncType) copy() Node { + c := *n + if c.Recv != nil { + c.Recv = c.Recv.copy() + } + c.Params = copyFields(c.Params) + c.Results = copyFields(c.Results) + return &c +} func (n *FuncType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) @@ -252,6 +270,20 @@ func (f *Field) String() string { return typ } +func (f *Field) copy() *Field { + c := *f + return &c +} + +func copyFields(list []*Field) []*Field { + out := make([]*Field, len(list)) + copy(out, list) + for i, f := range out { + out[i] = f.copy() + } + return out +} + func (f *Field) deepCopy(pos src.XPos) *Field { if f == nil { return nil @@ -289,7 +321,7 @@ func NewSliceType(pos src.XPos, elem Node) *SliceType { func (n *SliceType) String() string { return fmt.Sprint(n) } func (n *SliceType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *SliceType) rawCopy() Node { c := *n; return &c } +func (n *SliceType) copy() Node { c := *n; return &c } func (n *SliceType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) n.Elem = nil @@ -320,7 +352,7 @@ func NewArrayType(pos src.XPos, size Node, elem Node) *ArrayType { func (n *ArrayType) String() string { return fmt.Sprint(n) } func (n *ArrayType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *ArrayType) rawCopy() Node { c := *n; return &c } +func (n *ArrayType) copy() Node { c := *n; return &c } func (n *ArrayType) DeepCopy(pos src.XPos) Node { if n.op == OTYPE { @@ -351,7 +383,7 @@ func newTypeNode(pos src.XPos, typ *types.Type) *typeNode { func (n *typeNode) String() string { return fmt.Sprint(n) } func (n *typeNode) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *typeNode) rawCopy() Node { c := *n; return &c } +func (n *typeNode) copy() Node { c := *n; return &c } func (n *typeNode) Type() *types.Type { return n.typ } func (n *typeNode) Sym() *types.Sym { return n.typ.Sym() } func (n *typeNode) CanBeNtype() {} From 4725c3ffd1b8baf87204936e59bf00c96e3bf4a0 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 3 Dec 2020 21:02:19 -0500 Subject: [PATCH 111/474] [dev.regabi] cmd/compile: implement doChildren for nodes Put each node in charge of its DoChildren implementation. This removes a generic use of Left, Right, and so on in func DoChildren, heading toward removing those even from being used in package ir. Passes buildall w/ toolstash -cmp. Change-Id: Ibdf56f36801217cf24549e063da0078c1820a56b Reviewed-on: https://go-review.googlesource.com/c/go/+/275375 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/copy.go | 6 +- src/cmd/compile/internal/ir/expr.go | 171 ++++++++++++++++++++++++++- src/cmd/compile/internal/ir/func.go | 22 ++-- src/cmd/compile/internal/ir/name.go | 44 +++---- src/cmd/compile/internal/ir/node.go | 2 + src/cmd/compile/internal/ir/stmt.go | 137 ++++++++++++++++++++- src/cmd/compile/internal/ir/type.go | 74 +++++++++++- src/cmd/compile/internal/ir/visit.go | 30 +---- 8 files changed, 419 insertions(+), 67 deletions(-) diff --git a/src/cmd/compile/internal/ir/copy.go b/src/cmd/compile/internal/ir/copy.go index 8d174d6e5386c..86e78cfc33e38 100644 --- a/src/cmd/compile/internal/ir/copy.go +++ b/src/cmd/compile/internal/ir/copy.go @@ -65,9 +65,9 @@ func Copy(n Node) Node { } func copyList(x Nodes) Nodes { - out := make([]Node, x.Len()) - copy(out, x.Slice()) - return AsNodes(out) + c := make([]Node, x.Len()) + copy(c, x.Slice()) + return AsNodes(c) } // A Node can implement DeepCopyNode to provide a custom implementation diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 7431a56d944d0..9e5dfaf0f243d 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -12,6 +12,20 @@ import ( "go/constant" ) +func maybeDo(x Node, err error, do func(Node) error) error { + if x != nil && err == nil { + err = do(x) + } + return err +} + +func maybeDoList(x Nodes, err error, do func(Node) error) error { + if err == nil { + err = DoList(x, do) + } + return err +} + // A miniStmt is a miniNode with extra fields common to expressions. // TODO(rsc): Once we are sure about the contents, compact the bools // into a bit field and leave extra bits available for implementations @@ -82,6 +96,12 @@ func (n *AddStringExpr) copy() Node { c.list = c.list.Copy() return &c } +func (n *AddStringExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDoList(n.list, err, do) + return err +} func (n *AddStringExpr) List() Nodes { return n.list } func (n *AddStringExpr) PtrList() *Nodes { return &n.list } @@ -109,6 +129,12 @@ func (n *AddrExpr) copy() Node { c.init = c.init.Copy() return &c } +func (n *AddrExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.X, err, do) + return err +} func (n *AddrExpr) Left() Node { return n.X } func (n *AddrExpr) SetLeft(x Node) { n.X = x } @@ -146,6 +172,13 @@ func (n *BinaryExpr) copy() Node { c.init = c.init.Copy() return &c } +func (n *BinaryExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.X, err, do) + err = maybeDo(n.Y, err, do) + return err +} func (n *BinaryExpr) Left() Node { return n.X } func (n *BinaryExpr) SetLeft(x Node) { n.X = x } @@ -207,6 +240,15 @@ func (n *CallExpr) copy() Node { c.body = c.body.Copy() return &c } +func (n *CallExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.X, err, do) + err = maybeDoList(n.Args, err, do) + err = maybeDoList(n.Rargs, err, do) + err = maybeDoList(n.body, err, do) + return err +} func (n *CallExpr) Orig() Node { return n.orig } func (n *CallExpr) SetOrig(x Node) { n.orig = x } @@ -260,6 +302,12 @@ func (n *CallPartExpr) copy() Node { c.init = c.init.Copy() return &c } +func (n *CallPartExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.X, err, do) + return err +} func (n *CallPartExpr) Func() *Func { return n.fn } func (n *CallPartExpr) Left() Node { return n.X } @@ -286,6 +334,11 @@ func (n *ClosureExpr) copy() Node { c.init = c.init.Copy() return &c } +func (n *ClosureExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + return err +} func (n *ClosureExpr) Func() *Func { return n.fn } @@ -312,6 +365,11 @@ func (n *ClosureRead) copy() Node { func (n *ClosureRead) Type() *types.Type { return n.typ } func (n *ClosureRead) Offset() int64 { return n.offset } +func (n *ClosureRead) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + return err +} // A CompLitExpr is a composite literal Type{Vals}. // Before type-checking, the type is Ntype. @@ -339,6 +397,13 @@ func (n *CompLitExpr) copy() Node { c.list = c.list.Copy() return &c } +func (n *CompLitExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.Ntype, err, do) + err = maybeDoList(n.list, err, do) + return err +} func (n *CompLitExpr) Orig() Node { return n.orig } func (n *CompLitExpr) SetOrig(x Node) { n.orig = x } @@ -373,9 +438,10 @@ func NewConstExpr(val constant.Value, orig Node) Node { return n } -func (n *ConstExpr) String() string { return fmt.Sprint(n) } -func (n *ConstExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *ConstExpr) copy() Node { c := *n; return &c } +func (n *ConstExpr) String() string { return fmt.Sprint(n) } +func (n *ConstExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ConstExpr) copy() Node { c := *n; return &c } +func (n *ConstExpr) doChildren(do func(Node) error) error { return nil } func (n *ConstExpr) Sym() *types.Sym { return n.orig.Sym() } func (n *ConstExpr) Orig() Node { return n.orig } @@ -406,6 +472,12 @@ func (n *ConvExpr) copy() Node { c.init = c.init.Copy() return &c } +func (n *ConvExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.X, err, do) + return err +} func (n *ConvExpr) rawCopy() Node { c := *n; return &c } func (n *ConvExpr) Left() Node { return n.X } @@ -442,6 +514,13 @@ func (n *IndexExpr) copy() Node { c.init = c.init.Copy() return &c } +func (n *IndexExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.X, err, do) + err = maybeDo(n.Index, err, do) + return err +} func (n *IndexExpr) Left() Node { return n.X } func (n *IndexExpr) SetLeft(x Node) { n.X = x } @@ -484,6 +563,13 @@ func (n *KeyExpr) copy() Node { c.init = c.init.Copy() return &c } +func (n *KeyExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.Key, err, do) + err = maybeDo(n.Value, err, do) + return err +} func (n *KeyExpr) Left() Node { return n.Key } func (n *KeyExpr) SetLeft(x Node) { n.Key = x } @@ -528,6 +614,13 @@ func (n *InlinedCallExpr) copy() Node { c.ReturnVars = c.ReturnVars.Copy() return &c } +func (n *InlinedCallExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDoList(n.body, err, do) + err = maybeDoList(n.ReturnVars, err, do) + return err +} func (n *InlinedCallExpr) Body() Nodes { return n.body } func (n *InlinedCallExpr) PtrBody() *Nodes { return &n.body } @@ -559,6 +652,13 @@ func (n *MakeExpr) copy() Node { c.init = c.init.Copy() return &c } +func (n *MakeExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.Len, err, do) + err = maybeDo(n.Cap, err, do) + return err +} func (n *MakeExpr) Left() Node { return n.Len } func (n *MakeExpr) SetLeft(x Node) { n.Len = x } @@ -574,7 +674,7 @@ func (n *MakeExpr) SetOp(op Op) { } } -// A MethodExpr is a method expression X.M (where X is an expression, not a type). +// A MethodExpr is a method value X.M (where X is an expression, not a type). type MethodExpr struct { miniExpr X Node @@ -600,6 +700,13 @@ func (n *MethodExpr) copy() Node { c.init = c.init.Copy() return &c } +func (n *MethodExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.X, err, do) + err = maybeDo(n.M, err, do) + return err +} func (n *MethodExpr) Left() Node { return n.X } func (n *MethodExpr) SetLeft(x Node) { n.X = x } @@ -633,6 +740,11 @@ func (n *NilExpr) copy() Node { c.init = c.init.Copy() return &c } +func (n *NilExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + return err +} func (n *NilExpr) Sym() *types.Sym { return n.sym } func (n *NilExpr) SetSym(x *types.Sym) { n.sym = x } @@ -658,6 +770,12 @@ func (n *ParenExpr) copy() Node { c.init = c.init.Copy() return &c } +func (n *ParenExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.X, err, do) + return err +} func (n *ParenExpr) Left() Node { return n.X } func (n *ParenExpr) SetLeft(x Node) { n.X = x } @@ -693,6 +811,11 @@ func (n *ResultExpr) copy() Node { c.init = c.init.Copy() return &c } +func (n *ResultExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + return err +} func (n *ResultExpr) Offset() int64 { return n.offset } func (n *ResultExpr) SetOffset(x int64) { n.offset = x } @@ -730,6 +853,12 @@ func (n *SelectorExpr) copy() Node { c.init = c.init.Copy() return &c } +func (n *SelectorExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.X, err, do) + return err +} func (n *SelectorExpr) Left() Node { return n.X } func (n *SelectorExpr) SetLeft(x Node) { n.X = x } @@ -764,6 +893,13 @@ func (n *SliceExpr) copy() Node { c.list = c.list.Copy() return &c } +func (n *SliceExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.X, err, do) + err = maybeDoList(n.list, err, do) + return err +} func (n *SliceExpr) Left() Node { return n.X } func (n *SliceExpr) SetLeft(x Node) { n.X = x } @@ -871,6 +1007,13 @@ func (n *SliceHeaderExpr) copy() Node { c.init = c.init.Copy() return &c } +func (n *SliceHeaderExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.Ptr, err, do) + err = maybeDoList(n.lenCap, err, do) + return err +} func (n *SliceHeaderExpr) Left() Node { return n.Ptr } func (n *SliceHeaderExpr) SetLeft(x Node) { n.Ptr = x } @@ -899,6 +1042,12 @@ func (n *StarExpr) copy() Node { c.init = c.init.Copy() return &c } +func (n *StarExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.X, err, do) + return err +} func (n *StarExpr) Left() Node { return n.X } func (n *StarExpr) SetLeft(x Node) { n.X = x } @@ -949,6 +1098,14 @@ func (n *TypeAssertExpr) copy() Node { c.Itab = c.Itab.Copy() return &c } +func (n *TypeAssertExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.X, err, do) + err = maybeDo(n.Ntype, err, do) + err = maybeDoList(n.Itab, err, do) + return err +} func (n *TypeAssertExpr) Left() Node { return n.X } func (n *TypeAssertExpr) SetLeft(x Node) { n.X = x } @@ -988,6 +1145,12 @@ func (n *UnaryExpr) copy() Node { c.init = c.init.Copy() return &c } +func (n *UnaryExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.X, err, do) + return err +} func (n *UnaryExpr) Left() Node { return n.X } func (n *UnaryExpr) SetLeft(x Node) { n.X = x } diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index ae803cd6a557e..342b7a91e754b 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -118,14 +118,20 @@ func NewFunc(pos src.XPos) *Func { func (f *Func) String() string { return fmt.Sprint(f) } func (f *Func) Format(s fmt.State, verb rune) { FmtNode(f, s, verb) } func (f *Func) copy() Node { panic(f.no("copy")) } -func (f *Func) Func() *Func { return f } -func (f *Func) Body() Nodes { return f.body } -func (f *Func) PtrBody() *Nodes { return &f.body } -func (f *Func) SetBody(x Nodes) { f.body = x } -func (f *Func) Type() *types.Type { return f.typ } -func (f *Func) SetType(x *types.Type) { f.typ = x } -func (f *Func) Iota() int64 { return f.iota } -func (f *Func) SetIota(x int64) { f.iota = x } +func (f *Func) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(f.body, err, do) + return err +} + +func (f *Func) Func() *Func { return f } +func (f *Func) Body() Nodes { return f.body } +func (f *Func) PtrBody() *Nodes { return &f.body } +func (f *Func) SetBody(x Nodes) { f.body = x } +func (f *Func) Type() *types.Type { return f.typ } +func (f *Func) SetType(x *types.Type) { f.typ = x } +func (f *Func) Iota() int64 { return f.iota } +func (f *Func) SetIota(x int64) { f.iota = x } func (f *Func) Sym() *types.Sym { if f.Nname != nil { diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index dc8c58e4f4009..2ff1fbc683852 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -149,22 +149,24 @@ func newNameAt(pos src.XPos, op Op, sym *types.Sym) *Name { return n } -func (n *Name) String() string { return fmt.Sprint(n) } -func (n *Name) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *Name) copy() Node { c := *n; return &c } -func (n *Name) Name() *Name { return n } -func (n *Name) Sym() *types.Sym { return n.sym } -func (n *Name) SetSym(x *types.Sym) { n.sym = x } -func (n *Name) SubOp() Op { return n.subOp } -func (n *Name) SetSubOp(x Op) { n.subOp = x } -func (n *Name) Class() Class { return n.class } -func (n *Name) SetClass(x Class) { n.class = x } -func (n *Name) Func() *Func { return n.fn } -func (n *Name) SetFunc(x *Func) { n.fn = x } -func (n *Name) Offset() int64 { return n.offset } -func (n *Name) SetOffset(x int64) { n.offset = x } -func (n *Name) Iota() int64 { return n.offset } -func (n *Name) SetIota(x int64) { n.offset = x } +func (n *Name) String() string { return fmt.Sprint(n) } +func (n *Name) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *Name) copy() Node { c := *n; return &c } +func (n *Name) doChildren(do func(Node) error) error { return nil } + +func (n *Name) Name() *Name { return n } +func (n *Name) Sym() *types.Sym { return n.sym } +func (n *Name) SetSym(x *types.Sym) { n.sym = x } +func (n *Name) SubOp() Op { return n.subOp } +func (n *Name) SetSubOp(x Op) { n.subOp = x } +func (n *Name) Class() Class { return n.class } +func (n *Name) SetClass(x Class) { n.class = x } +func (n *Name) Func() *Func { return n.fn } +func (n *Name) SetFunc(x *Func) { n.fn = x } +func (n *Name) Offset() int64 { return n.offset } +func (n *Name) SetOffset(x int64) { n.offset = x } +func (n *Name) Iota() int64 { return n.offset } +func (n *Name) SetIota(x int64) { n.offset = x } func (*Name) CanBeNtype() {} @@ -321,10 +323,12 @@ type PkgName struct { Used bool } -func (p *PkgName) String() string { return fmt.Sprint(p) } -func (p *PkgName) Format(s fmt.State, verb rune) { FmtNode(p, s, verb) } -func (p *PkgName) copy() Node { c := *p; return &c } -func (p *PkgName) Sym() *types.Sym { return p.sym } +func (p *PkgName) String() string { return fmt.Sprint(p) } +func (p *PkgName) Format(s fmt.State, verb rune) { FmtNode(p, s, verb) } +func (p *PkgName) copy() Node { c := *p; return &c } +func (p *PkgName) doChildren(do func(Node) error) error { return nil } + +func (p *PkgName) Sym() *types.Sym { return p.sym } func (*PkgName) CanBeNtype() {} diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 705eb9e47ed00..02ab87846ff3e 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -30,6 +30,8 @@ type Node interface { // For making copies. For Copy and SepCopy. copy() Node + doChildren(func(Node) error) error + // Abstract graph structure, for generic traversals. Op() Op SetOp(x Op) diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index 5af6a62cf2b57..b940c5f59d5d6 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -11,7 +11,6 @@ import ( ) // A Decl is a declaration of a const, type, or var. (A declared func is a Func.) -// (This is not technically a statement but it's not worth its own file.) type Decl struct { miniNode X Node // the thing being declared @@ -32,8 +31,14 @@ func NewDecl(pos src.XPos, op Op, x Node) *Decl { func (n *Decl) String() string { return fmt.Sprint(n) } func (n *Decl) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *Decl) copy() Node { c := *n; return &c } -func (n *Decl) Left() Node { return n.X } -func (n *Decl) SetLeft(x Node) { n.X = x } +func (n *Decl) doChildren(do func(Node) error) error { + var err error + err = maybeDo(n.X, err, do) + return err +} + +func (n *Decl) Left() Node { return n.X } +func (n *Decl) SetLeft(x Node) { n.X = x } // A miniStmt is a miniNode with extra fields common to statements. type miniStmt struct { @@ -77,6 +82,13 @@ func (n *AssignListStmt) copy() Node { c.Rhs = c.Rhs.Copy() return &c } +func (n *AssignListStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDoList(n.Lhs, err, do) + err = maybeDoList(n.Rhs, err, do) + return err +} func (n *AssignListStmt) List() Nodes { return n.Lhs } func (n *AssignListStmt) PtrList() *Nodes { return &n.Lhs } @@ -123,6 +135,13 @@ func (n *AssignStmt) copy() Node { c.init = c.init.Copy() return &c } +func (n *AssignStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.X, err, do) + err = maybeDo(n.Y, err, do) + return err +} func (n *AssignStmt) Left() Node { return n.X } func (n *AssignStmt) SetLeft(x Node) { n.X = x } @@ -166,6 +185,13 @@ func (n *AssignOpStmt) copy() Node { c.init = c.init.Copy() return &c } +func (n *AssignOpStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.X, err, do) + err = maybeDo(n.Y, err, do) + return err +} func (n *AssignOpStmt) Left() Node { return n.X } func (n *AssignOpStmt) SetLeft(x Node) { n.X = x } @@ -200,6 +226,12 @@ func (n *BlockStmt) copy() Node { c.list = c.list.Copy() return &c } +func (n *BlockStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDoList(n.list, err, do) + return err +} func (n *BlockStmt) List() Nodes { return n.list } func (n *BlockStmt) PtrList() *Nodes { return &n.list } @@ -234,6 +266,11 @@ func (n *BranchStmt) copy() Node { c.init = c.init.Copy() return &c } +func (n *BranchStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + return err +} func (n *BranchStmt) Sym() *types.Sym { return n.Label } func (n *BranchStmt) SetSym(sym *types.Sym) { n.Label = sym } @@ -266,6 +303,15 @@ func (n *CaseStmt) copy() Node { c.body = c.body.Copy() return &c } +func (n *CaseStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDoList(n.Vars, err, do) + err = maybeDoList(n.list, err, do) + err = maybeDo(n.Comm, err, do) + err = maybeDoList(n.body, err, do) + return err +} func (n *CaseStmt) List() Nodes { return n.list } func (n *CaseStmt) PtrList() *Nodes { return &n.list } @@ -299,6 +345,12 @@ func (n *DeferStmt) copy() Node { c.init = c.init.Copy() return &c } +func (n *DeferStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.Call, err, do) + return err +} func (n *DeferStmt) Left() Node { return n.Call } func (n *DeferStmt) SetLeft(x Node) { n.Call = x } @@ -309,8 +361,8 @@ type ForStmt struct { miniStmt Label *types.Sym Cond Node - Post Node Late Nodes + Post Node body Nodes hasBreak bool } @@ -333,6 +385,15 @@ func (n *ForStmt) copy() Node { c.body = c.body.Copy() return &c } +func (n *ForStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.Cond, err, do) + err = maybeDoList(n.Late, err, do) + err = maybeDo(n.Post, err, do) + err = maybeDoList(n.body, err, do) + return err +} func (n *ForStmt) Sym() *types.Sym { return n.Label } func (n *ForStmt) SetSym(x *types.Sym) { n.Label = x } @@ -376,6 +437,12 @@ func (n *GoStmt) copy() Node { c.init = c.init.Copy() return &c } +func (n *GoStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.Call, err, do) + return err +} func (n *GoStmt) Left() Node { return n.Call } func (n *GoStmt) SetLeft(x Node) { n.Call = x } @@ -407,6 +474,14 @@ func (n *IfStmt) copy() Node { c.Else = c.Else.Copy() return &c } +func (n *IfStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.Cond, err, do) + err = maybeDoList(n.body, err, do) + err = maybeDoList(n.Else, err, do) + return err +} func (n *IfStmt) Left() Node { return n.Cond } func (n *IfStmt) SetLeft(x Node) { n.Cond = x } @@ -439,6 +514,11 @@ func (n *InlineMarkStmt) copy() Node { c.init = c.init.Copy() return &c } +func (n *InlineMarkStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + return err +} func (n *InlineMarkStmt) Offset() int64 { return n.Index } func (n *InlineMarkStmt) SetOffset(x int64) { n.Index = x } @@ -463,6 +543,11 @@ func (n *LabelStmt) copy() Node { c.init = c.init.Copy() return &c } +func (n *LabelStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + return err +} func (n *LabelStmt) Sym() *types.Sym { return n.Label } func (n *LabelStmt) SetSym(x *types.Sym) { n.Label = x } @@ -498,6 +583,14 @@ func (n *RangeStmt) copy() Node { c.body = c.body.Copy() return &c } +func (n *RangeStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDoList(n.Vars, err, do) + err = maybeDo(n.X, err, do) + err = maybeDoList(n.body, err, do) + return err +} func (n *RangeStmt) Sym() *types.Sym { return n.Label } func (n *RangeStmt) SetSym(x *types.Sym) { n.Label = x } @@ -540,6 +633,12 @@ func (n *ReturnStmt) copy() Node { c.Results = c.Results.Copy() return &c } +func (n *ReturnStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDoList(n.Results, err, do) + return err +} func (n *ReturnStmt) Orig() Node { return n.orig } func (n *ReturnStmt) SetOrig(x Node) { n.orig = x } @@ -576,6 +675,13 @@ func (n *SelectStmt) copy() Node { c.Compiled = c.Compiled.Copy() return &c } +func (n *SelectStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDoList(n.Cases, err, do) + err = maybeDoList(n.Compiled, err, do) + return err +} func (n *SelectStmt) List() Nodes { return n.Cases } func (n *SelectStmt) PtrList() *Nodes { return &n.Cases } @@ -609,6 +715,13 @@ func (n *SendStmt) copy() Node { c.init = c.init.Copy() return &c } +func (n *SendStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.Chan, err, do) + err = maybeDo(n.Value, err, do) + return err +} func (n *SendStmt) Left() Node { return n.Chan } func (n *SendStmt) SetLeft(x Node) { n.Chan = x } @@ -644,6 +757,14 @@ func (n *SwitchStmt) copy() Node { c.Compiled = c.Compiled.Copy() return &c } +func (n *SwitchStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.Tag, err, do) + err = maybeDoList(n.Cases, err, do) + err = maybeDoList(n.Compiled, err, do) + return err +} func (n *SwitchStmt) Left() Node { return n.Tag } func (n *SwitchStmt) SetLeft(x Node) { n.Tag = x } @@ -678,6 +799,14 @@ func NewTypeSwitchGuard(pos src.XPos, name, x Node) *TypeSwitchGuard { func (n *TypeSwitchGuard) String() string { return fmt.Sprint(n) } func (n *TypeSwitchGuard) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *TypeSwitchGuard) copy() Node { c := *n; return &c } +func (n *TypeSwitchGuard) doChildren(do func(Node) error) error { + var err error + if n.name != nil { + err = maybeDo(n.name, err, do) + } + err = maybeDo(n.X, err, do) + return err +} func (n *TypeSwitchGuard) Left() Node { if n.name == nil { diff --git a/src/cmd/compile/internal/ir/type.go b/src/cmd/compile/internal/ir/type.go index a8af99034d223..2723c000446a3 100644 --- a/src/cmd/compile/internal/ir/type.go +++ b/src/cmd/compile/internal/ir/type.go @@ -75,6 +75,11 @@ func NewChanType(pos src.XPos, elem Node, dir types.ChanDir) *ChanType { func (n *ChanType) String() string { return fmt.Sprint(n) } func (n *ChanType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *ChanType) copy() Node { c := *n; return &c } +func (n *ChanType) doChildren(do func(Node) error) error { + var err error + err = maybeDo(n.Elem, err, do) + return err +} func (n *ChanType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) n.Elem = nil @@ -105,6 +110,12 @@ func NewMapType(pos src.XPos, key, elem Node) *MapType { func (n *MapType) String() string { return fmt.Sprint(n) } func (n *MapType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *MapType) copy() Node { c := *n; return &c } +func (n *MapType) doChildren(do func(Node) error) error { + var err error + err = maybeDo(n.Key, err, do) + err = maybeDo(n.Elem, err, do) + return err +} func (n *MapType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) n.Key = nil @@ -139,6 +150,11 @@ func (n *StructType) copy() Node { c.Fields = copyFields(c.Fields) return &c } +func (n *StructType) doChildren(do func(Node) error) error { + var err error + err = maybeDoFields(n.Fields, err, do) + return err +} func (n *StructType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) @@ -181,6 +197,11 @@ func (n *InterfaceType) copy() Node { c.Methods = copyFields(c.Methods) return &c } +func (n *InterfaceType) doChildren(do func(Node) error) error { + var err error + err = maybeDoFields(n.Methods, err, do) + return err +} func (n *InterfaceType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) @@ -221,6 +242,13 @@ func (n *FuncType) copy() Node { c.Results = copyFields(c.Results) return &c } +func (n *FuncType) doChildren(do func(Node) error) error { + var err error + err = maybeDoField(n.Recv, err, do) + err = maybeDoFields(n.Params, err, do) + err = maybeDoFields(n.Results, err, do) + return err +} func (n *FuncType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) @@ -284,6 +312,31 @@ func copyFields(list []*Field) []*Field { return out } +func maybeDoField(f *Field, err error, do func(Node) error) error { + if f != nil { + if err == nil && f.Decl != nil { + err = do(f.Decl) + } + if err == nil && f.Ntype != nil { + err = do(f.Ntype) + } + } + return err +} + +func maybeDoFields(list []*Field, err error, do func(Node) error) error { + if err != nil { + return err + } + for _, f := range list { + err = maybeDoField(f, err, do) + if err != nil { + return err + } + } + return err +} + func (f *Field) deepCopy(pos src.XPos) *Field { if f == nil { return nil @@ -322,6 +375,11 @@ func NewSliceType(pos src.XPos, elem Node) *SliceType { func (n *SliceType) String() string { return fmt.Sprint(n) } func (n *SliceType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *SliceType) copy() Node { c := *n; return &c } +func (n *SliceType) doChildren(do func(Node) error) error { + var err error + err = maybeDo(n.Elem, err, do) + return err +} func (n *SliceType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) n.Elem = nil @@ -353,6 +411,12 @@ func NewArrayType(pos src.XPos, size Node, elem Node) *ArrayType { func (n *ArrayType) String() string { return fmt.Sprint(n) } func (n *ArrayType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *ArrayType) copy() Node { c := *n; return &c } +func (n *ArrayType) doChildren(do func(Node) error) error { + var err error + err = maybeDo(n.Len, err, do) + err = maybeDo(n.Elem, err, do) + return err +} func (n *ArrayType) DeepCopy(pos src.XPos) Node { if n.op == OTYPE { @@ -384,9 +448,13 @@ func newTypeNode(pos src.XPos, typ *types.Type) *typeNode { func (n *typeNode) String() string { return fmt.Sprint(n) } func (n *typeNode) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *typeNode) copy() Node { c := *n; return &c } -func (n *typeNode) Type() *types.Type { return n.typ } -func (n *typeNode) Sym() *types.Sym { return n.typ.Sym() } -func (n *typeNode) CanBeNtype() {} +func (n *typeNode) doChildren(do func(Node) error) error { + return nil +} + +func (n *typeNode) Type() *types.Type { return n.typ } +func (n *typeNode) Sym() *types.Sym { return n.typ.Sym() } +func (n *typeNode) CanBeNtype() {} // TypeNode returns the Node representing the type t. func TypeNode(t *types.Type) Ntype { diff --git a/src/cmd/compile/internal/ir/visit.go b/src/cmd/compile/internal/ir/visit.go index a239fd1532123..042257c32ad83 100644 --- a/src/cmd/compile/internal/ir/visit.go +++ b/src/cmd/compile/internal/ir/visit.go @@ -14,7 +14,9 @@ package ir -import "errors" +import ( + "errors" +) // DoChildren calls do(x) on each of n's non-nil child nodes x. // If any call returns a non-nil error, DoChildren stops and returns that error. @@ -86,7 +88,7 @@ import "errors" // found = v // return stop // } -// return DoChildren(x, do) +// return ir.DoChildren(x, do) // } // do(n) // return found @@ -100,29 +102,7 @@ func DoChildren(n Node, do func(Node) error) error { if n == nil { return nil } - if err := DoList(n.Init(), do); err != nil { - return err - } - if l := n.Left(); l != nil { - if err := do(l); err != nil { - return err - } - } - if r := n.Right(); r != nil { - if err := do(r); err != nil { - return err - } - } - if err := DoList(n.List(), do); err != nil { - return err - } - if err := DoList(n.Body(), do); err != nil { - return err - } - if err := DoList(n.Rlist(), do); err != nil { - return err - } - return nil + return n.doChildren(do) } // DoList calls f on each non-nil node x in the list, in list order. From bb5aa2b664331087d3230732cb0d11c8e87b9e98 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 3 Dec 2020 21:29:23 -0500 Subject: [PATCH 112/474] [dev.regabi] cmd/compile: implement editChildren for nodes Put each node in charge of its EditChildren implementation. This removes the final generic use of Left, SetLeft, Right, SetRight, and so on in package ir. Passes buildall w/ toolstash -cmp. Change-Id: I9821cc20f5b91cc9b44eb1f386cc82f20cd6770c Reviewed-on: https://go-review.googlesource.com/c/go/+/275376 Trust: Russ Cox Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/expr.go | 110 +++++++++++++++++++++++++++ src/cmd/compile/internal/ir/func.go | 3 + src/cmd/compile/internal/ir/name.go | 2 + src/cmd/compile/internal/ir/node.go | 1 + src/cmd/compile/internal/ir/stmt.go | 91 ++++++++++++++++++++++ src/cmd/compile/internal/ir/type.go | 44 +++++++++++ src/cmd/compile/internal/ir/visit.go | 11 +-- 7 files changed, 252 insertions(+), 10 deletions(-) diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 9e5dfaf0f243d..312faa8436a33 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -26,6 +26,13 @@ func maybeDoList(x Nodes, err error, do func(Node) error) error { return err } +func maybeEdit(x Node, edit func(Node) Node) Node { + if x == nil { + return x + } + return edit(x) +} + // A miniStmt is a miniNode with extra fields common to expressions. // TODO(rsc): Once we are sure about the contents, compact the bools // into a bit field and leave extra bits available for implementations @@ -102,6 +109,10 @@ func (n *AddStringExpr) doChildren(do func(Node) error) error { err = maybeDoList(n.list, err, do) return err } +func (n *AddStringExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + editList(n.list, edit) +} func (n *AddStringExpr) List() Nodes { return n.list } func (n *AddStringExpr) PtrList() *Nodes { return &n.list } @@ -135,6 +146,10 @@ func (n *AddrExpr) doChildren(do func(Node) error) error { err = maybeDo(n.X, err, do) return err } +func (n *AddrExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.X = maybeEdit(n.X, edit) +} func (n *AddrExpr) Left() Node { return n.X } func (n *AddrExpr) SetLeft(x Node) { n.X = x } @@ -179,6 +194,11 @@ func (n *BinaryExpr) doChildren(do func(Node) error) error { err = maybeDo(n.Y, err, do) return err } +func (n *BinaryExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.X = maybeEdit(n.X, edit) + n.Y = maybeEdit(n.Y, edit) +} func (n *BinaryExpr) Left() Node { return n.X } func (n *BinaryExpr) SetLeft(x Node) { n.X = x } @@ -249,6 +269,13 @@ func (n *CallExpr) doChildren(do func(Node) error) error { err = maybeDoList(n.body, err, do) return err } +func (n *CallExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.X = maybeEdit(n.X, edit) + editList(n.Args, edit) + editList(n.Rargs, edit) + editList(n.body, edit) +} func (n *CallExpr) Orig() Node { return n.orig } func (n *CallExpr) SetOrig(x Node) { n.orig = x } @@ -308,6 +335,10 @@ func (n *CallPartExpr) doChildren(do func(Node) error) error { err = maybeDo(n.X, err, do) return err } +func (n *CallPartExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.X = maybeEdit(n.X, edit) +} func (n *CallPartExpr) Func() *Func { return n.fn } func (n *CallPartExpr) Left() Node { return n.X } @@ -339,6 +370,9 @@ func (n *ClosureExpr) doChildren(do func(Node) error) error { err = maybeDoList(n.init, err, do) return err } +func (n *ClosureExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) +} func (n *ClosureExpr) Func() *Func { return n.fn } @@ -370,6 +404,9 @@ func (n *ClosureRead) doChildren(do func(Node) error) error { err = maybeDoList(n.init, err, do) return err } +func (n *ClosureRead) editChildren(edit func(Node) Node) { + editList(n.init, edit) +} // A CompLitExpr is a composite literal Type{Vals}. // Before type-checking, the type is Ntype. @@ -404,6 +441,11 @@ func (n *CompLitExpr) doChildren(do func(Node) error) error { err = maybeDoList(n.list, err, do) return err } +func (n *CompLitExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.Ntype = toNtype(maybeEdit(n.Ntype, edit)) + editList(n.list, edit) +} func (n *CompLitExpr) Orig() Node { return n.orig } func (n *CompLitExpr) SetOrig(x Node) { n.orig = x } @@ -442,6 +484,7 @@ func (n *ConstExpr) String() string { return fmt.Sprint(n) func (n *ConstExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *ConstExpr) copy() Node { c := *n; return &c } func (n *ConstExpr) doChildren(do func(Node) error) error { return nil } +func (n *ConstExpr) editChildren(edit func(Node) Node) {} func (n *ConstExpr) Sym() *types.Sym { return n.orig.Sym() } func (n *ConstExpr) Orig() Node { return n.orig } @@ -478,6 +521,10 @@ func (n *ConvExpr) doChildren(do func(Node) error) error { err = maybeDo(n.X, err, do) return err } +func (n *ConvExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.X = maybeEdit(n.X, edit) +} func (n *ConvExpr) rawCopy() Node { c := *n; return &c } func (n *ConvExpr) Left() Node { return n.X } @@ -521,6 +568,11 @@ func (n *IndexExpr) doChildren(do func(Node) error) error { err = maybeDo(n.Index, err, do) return err } +func (n *IndexExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.X = maybeEdit(n.X, edit) + n.Index = maybeEdit(n.Index, edit) +} func (n *IndexExpr) Left() Node { return n.X } func (n *IndexExpr) SetLeft(x Node) { n.X = x } @@ -570,6 +622,11 @@ func (n *KeyExpr) doChildren(do func(Node) error) error { err = maybeDo(n.Value, err, do) return err } +func (n *KeyExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.Key = maybeEdit(n.Key, edit) + n.Value = maybeEdit(n.Value, edit) +} func (n *KeyExpr) Left() Node { return n.Key } func (n *KeyExpr) SetLeft(x Node) { n.Key = x } @@ -621,6 +678,11 @@ func (n *InlinedCallExpr) doChildren(do func(Node) error) error { err = maybeDoList(n.ReturnVars, err, do) return err } +func (n *InlinedCallExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + editList(n.body, edit) + editList(n.ReturnVars, edit) +} func (n *InlinedCallExpr) Body() Nodes { return n.body } func (n *InlinedCallExpr) PtrBody() *Nodes { return &n.body } @@ -659,6 +721,11 @@ func (n *MakeExpr) doChildren(do func(Node) error) error { err = maybeDo(n.Cap, err, do) return err } +func (n *MakeExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.Len = maybeEdit(n.Len, edit) + n.Cap = maybeEdit(n.Cap, edit) +} func (n *MakeExpr) Left() Node { return n.Len } func (n *MakeExpr) SetLeft(x Node) { n.Len = x } @@ -707,6 +774,11 @@ func (n *MethodExpr) doChildren(do func(Node) error) error { err = maybeDo(n.M, err, do) return err } +func (n *MethodExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.X = maybeEdit(n.X, edit) + n.M = maybeEdit(n.M, edit) +} func (n *MethodExpr) Left() Node { return n.X } func (n *MethodExpr) SetLeft(x Node) { n.X = x } @@ -745,6 +817,9 @@ func (n *NilExpr) doChildren(do func(Node) error) error { err = maybeDoList(n.init, err, do) return err } +func (n *NilExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) +} func (n *NilExpr) Sym() *types.Sym { return n.sym } func (n *NilExpr) SetSym(x *types.Sym) { n.sym = x } @@ -776,6 +851,10 @@ func (n *ParenExpr) doChildren(do func(Node) error) error { err = maybeDo(n.X, err, do) return err } +func (n *ParenExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.X = maybeEdit(n.X, edit) +} func (n *ParenExpr) Left() Node { return n.X } func (n *ParenExpr) SetLeft(x Node) { n.X = x } @@ -816,6 +895,9 @@ func (n *ResultExpr) doChildren(do func(Node) error) error { err = maybeDoList(n.init, err, do) return err } +func (n *ResultExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) +} func (n *ResultExpr) Offset() int64 { return n.offset } func (n *ResultExpr) SetOffset(x int64) { n.offset = x } @@ -859,6 +941,10 @@ func (n *SelectorExpr) doChildren(do func(Node) error) error { err = maybeDo(n.X, err, do) return err } +func (n *SelectorExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.X = maybeEdit(n.X, edit) +} func (n *SelectorExpr) Left() Node { return n.X } func (n *SelectorExpr) SetLeft(x Node) { n.X = x } @@ -900,6 +986,11 @@ func (n *SliceExpr) doChildren(do func(Node) error) error { err = maybeDoList(n.list, err, do) return err } +func (n *SliceExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.X = maybeEdit(n.X, edit) + editList(n.list, edit) +} func (n *SliceExpr) Left() Node { return n.X } func (n *SliceExpr) SetLeft(x Node) { n.X = x } @@ -1014,6 +1105,11 @@ func (n *SliceHeaderExpr) doChildren(do func(Node) error) error { err = maybeDoList(n.lenCap, err, do) return err } +func (n *SliceHeaderExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.Ptr = maybeEdit(n.Ptr, edit) + editList(n.lenCap, edit) +} func (n *SliceHeaderExpr) Left() Node { return n.Ptr } func (n *SliceHeaderExpr) SetLeft(x Node) { n.Ptr = x } @@ -1048,6 +1144,10 @@ func (n *StarExpr) doChildren(do func(Node) error) error { err = maybeDo(n.X, err, do) return err } +func (n *StarExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.X = maybeEdit(n.X, edit) +} func (n *StarExpr) Left() Node { return n.X } func (n *StarExpr) SetLeft(x Node) { n.X = x } @@ -1106,6 +1206,12 @@ func (n *TypeAssertExpr) doChildren(do func(Node) error) error { err = maybeDoList(n.Itab, err, do) return err } +func (n *TypeAssertExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.X = maybeEdit(n.X, edit) + n.Ntype = maybeEdit(n.Ntype, edit) + editList(n.Itab, edit) +} func (n *TypeAssertExpr) Left() Node { return n.X } func (n *TypeAssertExpr) SetLeft(x Node) { n.X = x } @@ -1151,6 +1257,10 @@ func (n *UnaryExpr) doChildren(do func(Node) error) error { err = maybeDo(n.X, err, do) return err } +func (n *UnaryExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.X = maybeEdit(n.X, edit) +} func (n *UnaryExpr) Left() Node { return n.X } func (n *UnaryExpr) SetLeft(x Node) { n.X = x } diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index 342b7a91e754b..78e98c4d31c25 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -123,6 +123,9 @@ func (f *Func) doChildren(do func(Node) error) error { err = maybeDoList(f.body, err, do) return err } +func (f *Func) editChildren(edit func(Node) Node) { + editList(f.body, edit) +} func (f *Func) Func() *Func { return f } func (f *Func) Body() Nodes { return f.body } diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 2ff1fbc683852..d2c33eab2b590 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -153,6 +153,7 @@ func (n *Name) String() string { return fmt.Sprint(n) } func (n *Name) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *Name) copy() Node { c := *n; return &c } func (n *Name) doChildren(do func(Node) error) error { return nil } +func (n *Name) editChildren(edit func(Node) Node) {} func (n *Name) Name() *Name { return n } func (n *Name) Sym() *types.Sym { return n.sym } @@ -327,6 +328,7 @@ func (p *PkgName) String() string { return fmt.Sprint(p) } func (p *PkgName) Format(s fmt.State, verb rune) { FmtNode(p, s, verb) } func (p *PkgName) copy() Node { c := *p; return &c } func (p *PkgName) doChildren(do func(Node) error) error { return nil } +func (p *PkgName) editChildren(edit func(Node) Node) {} func (p *PkgName) Sym() *types.Sym { return p.sym } diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 02ab87846ff3e..f44d22313cabb 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -31,6 +31,7 @@ type Node interface { copy() Node doChildren(func(Node) error) error + editChildren(func(Node) Node) // Abstract graph structure, for generic traversals. Op() Op diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index b940c5f59d5d6..c859fae55b20a 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -36,6 +36,9 @@ func (n *Decl) doChildren(do func(Node) error) error { err = maybeDo(n.X, err, do) return err } +func (n *Decl) editChildren(edit func(Node) Node) { + n.X = maybeEdit(n.X, edit) +} func (n *Decl) Left() Node { return n.X } func (n *Decl) SetLeft(x Node) { n.X = x } @@ -89,6 +92,11 @@ func (n *AssignListStmt) doChildren(do func(Node) error) error { err = maybeDoList(n.Rhs, err, do) return err } +func (n *AssignListStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) + editList(n.Lhs, edit) + editList(n.Rhs, edit) +} func (n *AssignListStmt) List() Nodes { return n.Lhs } func (n *AssignListStmt) PtrList() *Nodes { return &n.Lhs } @@ -142,6 +150,11 @@ func (n *AssignStmt) doChildren(do func(Node) error) error { err = maybeDo(n.Y, err, do) return err } +func (n *AssignStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.X = maybeEdit(n.X, edit) + n.Y = maybeEdit(n.Y, edit) +} func (n *AssignStmt) Left() Node { return n.X } func (n *AssignStmt) SetLeft(x Node) { n.X = x } @@ -192,6 +205,11 @@ func (n *AssignOpStmt) doChildren(do func(Node) error) error { err = maybeDo(n.Y, err, do) return err } +func (n *AssignOpStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.X = maybeEdit(n.X, edit) + n.Y = maybeEdit(n.Y, edit) +} func (n *AssignOpStmt) Left() Node { return n.X } func (n *AssignOpStmt) SetLeft(x Node) { n.X = x } @@ -232,6 +250,10 @@ func (n *BlockStmt) doChildren(do func(Node) error) error { err = maybeDoList(n.list, err, do) return err } +func (n *BlockStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) + editList(n.list, edit) +} func (n *BlockStmt) List() Nodes { return n.list } func (n *BlockStmt) PtrList() *Nodes { return &n.list } @@ -271,6 +293,9 @@ func (n *BranchStmt) doChildren(do func(Node) error) error { err = maybeDoList(n.init, err, do) return err } +func (n *BranchStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) +} func (n *BranchStmt) Sym() *types.Sym { return n.Label } func (n *BranchStmt) SetSym(sym *types.Sym) { n.Label = sym } @@ -312,6 +337,13 @@ func (n *CaseStmt) doChildren(do func(Node) error) error { err = maybeDoList(n.body, err, do) return err } +func (n *CaseStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) + editList(n.Vars, edit) + editList(n.list, edit) + n.Comm = maybeEdit(n.Comm, edit) + editList(n.body, edit) +} func (n *CaseStmt) List() Nodes { return n.list } func (n *CaseStmt) PtrList() *Nodes { return &n.list } @@ -351,6 +383,10 @@ func (n *DeferStmt) doChildren(do func(Node) error) error { err = maybeDo(n.Call, err, do) return err } +func (n *DeferStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.Call = maybeEdit(n.Call, edit) +} func (n *DeferStmt) Left() Node { return n.Call } func (n *DeferStmt) SetLeft(x Node) { n.Call = x } @@ -394,6 +430,13 @@ func (n *ForStmt) doChildren(do func(Node) error) error { err = maybeDoList(n.body, err, do) return err } +func (n *ForStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.Cond = maybeEdit(n.Cond, edit) + editList(n.Late, edit) + n.Post = maybeEdit(n.Post, edit) + editList(n.body, edit) +} func (n *ForStmt) Sym() *types.Sym { return n.Label } func (n *ForStmt) SetSym(x *types.Sym) { n.Label = x } @@ -443,6 +486,10 @@ func (n *GoStmt) doChildren(do func(Node) error) error { err = maybeDo(n.Call, err, do) return err } +func (n *GoStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.Call = maybeEdit(n.Call, edit) +} func (n *GoStmt) Left() Node { return n.Call } func (n *GoStmt) SetLeft(x Node) { n.Call = x } @@ -482,6 +529,12 @@ func (n *IfStmt) doChildren(do func(Node) error) error { err = maybeDoList(n.Else, err, do) return err } +func (n *IfStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.Cond = maybeEdit(n.Cond, edit) + editList(n.body, edit) + editList(n.Else, edit) +} func (n *IfStmt) Left() Node { return n.Cond } func (n *IfStmt) SetLeft(x Node) { n.Cond = x } @@ -519,6 +572,9 @@ func (n *InlineMarkStmt) doChildren(do func(Node) error) error { err = maybeDoList(n.init, err, do) return err } +func (n *InlineMarkStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) +} func (n *InlineMarkStmt) Offset() int64 { return n.Index } func (n *InlineMarkStmt) SetOffset(x int64) { n.Index = x } @@ -548,6 +604,9 @@ func (n *LabelStmt) doChildren(do func(Node) error) error { err = maybeDoList(n.init, err, do) return err } +func (n *LabelStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) +} func (n *LabelStmt) Sym() *types.Sym { return n.Label } func (n *LabelStmt) SetSym(x *types.Sym) { n.Label = x } @@ -591,6 +650,12 @@ func (n *RangeStmt) doChildren(do func(Node) error) error { err = maybeDoList(n.body, err, do) return err } +func (n *RangeStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) + editList(n.Vars, edit) + n.X = maybeEdit(n.X, edit) + editList(n.body, edit) +} func (n *RangeStmt) Sym() *types.Sym { return n.Label } func (n *RangeStmt) SetSym(x *types.Sym) { n.Label = x } @@ -639,6 +704,10 @@ func (n *ReturnStmt) doChildren(do func(Node) error) error { err = maybeDoList(n.Results, err, do) return err } +func (n *ReturnStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) + editList(n.Results, edit) +} func (n *ReturnStmt) Orig() Node { return n.orig } func (n *ReturnStmt) SetOrig(x Node) { n.orig = x } @@ -682,6 +751,11 @@ func (n *SelectStmt) doChildren(do func(Node) error) error { err = maybeDoList(n.Compiled, err, do) return err } +func (n *SelectStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) + editList(n.Cases, edit) + editList(n.Compiled, edit) +} func (n *SelectStmt) List() Nodes { return n.Cases } func (n *SelectStmt) PtrList() *Nodes { return &n.Cases } @@ -722,6 +796,11 @@ func (n *SendStmt) doChildren(do func(Node) error) error { err = maybeDo(n.Value, err, do) return err } +func (n *SendStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.Chan = maybeEdit(n.Chan, edit) + n.Value = maybeEdit(n.Value, edit) +} func (n *SendStmt) Left() Node { return n.Chan } func (n *SendStmt) SetLeft(x Node) { n.Chan = x } @@ -765,6 +844,12 @@ func (n *SwitchStmt) doChildren(do func(Node) error) error { err = maybeDoList(n.Compiled, err, do) return err } +func (n *SwitchStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.Tag = maybeEdit(n.Tag, edit) + editList(n.Cases, edit) + editList(n.Compiled, edit) +} func (n *SwitchStmt) Left() Node { return n.Tag } func (n *SwitchStmt) SetLeft(x Node) { n.Tag = x } @@ -807,6 +892,12 @@ func (n *TypeSwitchGuard) doChildren(do func(Node) error) error { err = maybeDo(n.X, err, do) return err } +func (n *TypeSwitchGuard) editChildren(edit func(Node) Node) { + if n.name != nil { + n.name = edit(n.name).(*Name) + } + n.X = maybeEdit(n.X, edit) +} func (n *TypeSwitchGuard) Left() Node { if n.name == nil { diff --git a/src/cmd/compile/internal/ir/type.go b/src/cmd/compile/internal/ir/type.go index 2723c000446a3..d69dc3fd2ae9b 100644 --- a/src/cmd/compile/internal/ir/type.go +++ b/src/cmd/compile/internal/ir/type.go @@ -80,6 +80,9 @@ func (n *ChanType) doChildren(do func(Node) error) error { err = maybeDo(n.Elem, err, do) return err } +func (n *ChanType) editChildren(edit func(Node) Node) { + n.Elem = maybeEdit(n.Elem, edit) +} func (n *ChanType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) n.Elem = nil @@ -116,6 +119,10 @@ func (n *MapType) doChildren(do func(Node) error) error { err = maybeDo(n.Elem, err, do) return err } +func (n *MapType) editChildren(edit func(Node) Node) { + n.Key = maybeEdit(n.Key, edit) + n.Elem = maybeEdit(n.Elem, edit) +} func (n *MapType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) n.Key = nil @@ -155,6 +162,9 @@ func (n *StructType) doChildren(do func(Node) error) error { err = maybeDoFields(n.Fields, err, do) return err } +func (n *StructType) editChildren(edit func(Node) Node) { + editFields(n.Fields, edit) +} func (n *StructType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) @@ -202,6 +212,9 @@ func (n *InterfaceType) doChildren(do func(Node) error) error { err = maybeDoFields(n.Methods, err, do) return err } +func (n *InterfaceType) editChildren(edit func(Node) Node) { + editFields(n.Methods, edit) +} func (n *InterfaceType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) @@ -249,6 +262,11 @@ func (n *FuncType) doChildren(do func(Node) error) error { err = maybeDoFields(n.Results, err, do) return err } +func (n *FuncType) editChildren(edit func(Node) Node) { + editField(n.Recv, edit) + editFields(n.Params, edit) + editFields(n.Results, edit) +} func (n *FuncType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) @@ -337,6 +355,24 @@ func maybeDoFields(list []*Field, err error, do func(Node) error) error { return err } +func editField(f *Field, edit func(Node) Node) { + if f == nil { + return + } + if f.Decl != nil { + f.Decl = edit(f.Decl).(*Name) + } + if f.Ntype != nil { + f.Ntype = toNtype(edit(f.Ntype)) + } +} + +func editFields(list []*Field, edit func(Node) Node) { + for _, f := range list { + editField(f, edit) + } +} + func (f *Field) deepCopy(pos src.XPos) *Field { if f == nil { return nil @@ -380,6 +416,9 @@ func (n *SliceType) doChildren(do func(Node) error) error { err = maybeDo(n.Elem, err, do) return err } +func (n *SliceType) editChildren(edit func(Node) Node) { + n.Elem = maybeEdit(n.Elem, edit) +} func (n *SliceType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) n.Elem = nil @@ -417,6 +456,10 @@ func (n *ArrayType) doChildren(do func(Node) error) error { err = maybeDo(n.Elem, err, do) return err } +func (n *ArrayType) editChildren(edit func(Node) Node) { + n.Len = maybeEdit(n.Len, edit) + n.Elem = maybeEdit(n.Elem, edit) +} func (n *ArrayType) DeepCopy(pos src.XPos) Node { if n.op == OTYPE { @@ -451,6 +494,7 @@ func (n *typeNode) copy() Node { c := *n; return &c } func (n *typeNode) doChildren(do func(Node) error) error { return nil } +func (n *typeNode) editChildren(edit func(Node) Node) {} func (n *typeNode) Type() *types.Type { return n.typ } func (n *typeNode) Sym() *types.Sym { return n.typ.Sym() } diff --git a/src/cmd/compile/internal/ir/visit.go b/src/cmd/compile/internal/ir/visit.go index 042257c32ad83..4f3575614df64 100644 --- a/src/cmd/compile/internal/ir/visit.go +++ b/src/cmd/compile/internal/ir/visit.go @@ -226,16 +226,7 @@ func EditChildren(n Node, edit func(Node) Node) { if n == nil { return } - editList(n.Init(), edit) - if l := n.Left(); l != nil { - n.SetLeft(edit(l)) - } - if r := n.Right(); r != nil { - n.SetRight(edit(r)) - } - editList(n.List(), edit) - editList(n.Body(), edit) - editList(n.Rlist(), edit) + n.editChildren(edit) } // editList calls edit on each non-nil node x in the list, From 9ab3d854ad95d06f5dd0874050ee57dd63c5a746 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 3 Dec 2020 23:56:16 -0500 Subject: [PATCH 113/474] [dev.regabi] cmd/compile: avoid general traversal in deadcode deadcode is trying to walk the statements it can find, but it can sweep in other nodes too. Stop doing that: only walk known statements containing statements. Otherwise, if we put panics in expression accessors that shouldn't be used anymore, deadcode can trip them. deadcode would be a good candidate to rewrite using EditChildren, but that would certainly cause toolstash changes, since deadcode is so ad-hoc about exactly which parts of the function it looks at. For now just remove the general traversal and leave as is. Passes buildall w/ toolstash -cmp. Change-Id: I06481eb87350905597600203c4fa724d55645b46 Reviewed-on: https://go-review.googlesource.com/c/go/+/275377 Trust: Russ Cox Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/typecheck.go | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 65c5f2abce006..2070297bc0da6 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -3860,9 +3860,24 @@ func deadcodeslice(nn *ir.Nodes) { } deadcodeslice(n.PtrInit()) - deadcodeslice(n.PtrBody()) - deadcodeslice(n.PtrList()) - deadcodeslice(n.PtrRlist()) + switch n.Op() { + case ir.OBLOCK: + deadcodeslice(n.PtrList()) + case ir.OCASE: + deadcodeslice(n.PtrBody()) + case ir.OFOR: + deadcodeslice(n.PtrBody()) + case ir.OIF: + deadcodeslice(n.PtrBody()) + deadcodeslice(n.PtrRlist()) + case ir.ORANGE: + deadcodeslice(n.PtrBody()) + case ir.OSELECT: + deadcodeslice(n.PtrList()) + case ir.OSWITCH: + deadcodeslice(n.PtrList()) + } + if cut { nn.Set(nn.Slice()[:i+1]) break From 5dbd2e8e44d823bfbc3df883c544e23f4a872de1 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Fri, 4 Dec 2020 00:30:53 -0500 Subject: [PATCH 114/474] [dev.regabi] cmd/compile: remove DeepCopyNode interface The only reason for the DeepCopyNode interface was to allow the type syntaxes to avoid being constrained by Left, Right etc. methods. Now those are gone, so the general traversal methods they implement (doChildren, editChildren) do the right thing for DeepCopy. Passes buildall w/ toolstash -cmp. Change-Id: I54672c011114a95efabff32dbcf02e6071f91b9e Reviewed-on: https://go-review.googlesource.com/c/go/+/275379 Trust: Russ Cox Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/copy.go | 21 ---------- src/cmd/compile/internal/ir/expr.go | 11 ------ src/cmd/compile/internal/ir/type.go | 59 ----------------------------- 3 files changed, 91 deletions(-) diff --git a/src/cmd/compile/internal/ir/copy.go b/src/cmd/compile/internal/ir/copy.go index 86e78cfc33e38..7f5d3135133af 100644 --- a/src/cmd/compile/internal/ir/copy.go +++ b/src/cmd/compile/internal/ir/copy.go @@ -70,33 +70,12 @@ func copyList(x Nodes) Nodes { return AsNodes(c) } -// A Node can implement DeepCopyNode to provide a custom implementation -// of DeepCopy. If the compiler only needs access to a Node's structure during -// DeepCopy, then a Node can implement DeepCopyNode instead of providing -// fine-grained mutable access with Left, SetLeft, Right, SetRight, and so on. -type DeepCopyNode interface { - Node - DeepCopy(pos src.XPos) Node -} - // DeepCopy returns a “deep” copy of n, with its entire structure copied // (except for shared nodes like ONAME, ONONAME, OLITERAL, and OTYPE). // If pos.IsKnown(), it sets the source position of newly allocated Nodes to pos. -// -// The default implementation is to traverse the Node graph, making -// a shallow copy of each node and then updating each field to point -// at shallow copies of children, recursively, using Left, SetLeft, and so on. -// -// If a Node wishes to provide an alternate implementation, it can -// implement a DeepCopy method: see the DeepCopyNode interface. -// -// TODO(rsc): Once Nodes implement EditChildren, remove the DeepCopyNode interface. func DeepCopy(pos src.XPos, n Node) Node { var edit func(Node) Node edit = func(x Node) Node { - if x, ok := x.(DeepCopyNode); ok { - return x.DeepCopy(pos) - } switch x.Op() { case OPACK, ONAME, ONONAME, OLITERAL, ONIL, OTYPE: return x diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 312faa8436a33..cfdb86f221249 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -1163,17 +1163,6 @@ func (n *StarExpr) SetOTYPE(t *types.Type) { t.SetNod(n) } -func (n *StarExpr) DeepCopy(pos src.XPos) Node { - if n.op == OTYPE { - // Can't change types and no node references left. - return n - } - c := SepCopy(n).(*StarExpr) - c.pos = n.posOr(pos) - c.X = DeepCopy(pos, n.X) - return c -} - // A TypeAssertionExpr is a selector expression X.(Type). // Before type-checking, the type is Ntype. type TypeAssertExpr struct { diff --git a/src/cmd/compile/internal/ir/type.go b/src/cmd/compile/internal/ir/type.go index d69dc3fd2ae9b..9f82c9faa2680 100644 --- a/src/cmd/compile/internal/ir/type.go +++ b/src/cmd/compile/internal/ir/type.go @@ -88,14 +88,6 @@ func (n *ChanType) SetOTYPE(t *types.Type) { n.Elem = nil } -func (n *ChanType) DeepCopy(pos src.XPos) Node { - if n.op == OTYPE { - // Can't change types and no node references left. - return n - } - return NewChanType(n.posOr(pos), DeepCopy(pos, n.Elem), n.Dir) -} - // A MapType represents a map[Key]Value type syntax. type MapType struct { miniType @@ -129,14 +121,6 @@ func (n *MapType) SetOTYPE(t *types.Type) { n.Elem = nil } -func (n *MapType) DeepCopy(pos src.XPos) Node { - if n.op == OTYPE { - // Can't change types and no node references left. - return n - } - return NewMapType(n.posOr(pos), DeepCopy(pos, n.Key), DeepCopy(pos, n.Elem)) -} - // A StructType represents a struct { ... } type syntax. type StructType struct { miniType @@ -171,14 +155,6 @@ func (n *StructType) SetOTYPE(t *types.Type) { n.Fields = nil } -func (n *StructType) DeepCopy(pos src.XPos) Node { - if n.op == OTYPE { - // Can't change types and no node references left. - return n - } - return NewStructType(n.posOr(pos), deepCopyFields(pos, n.Fields)) -} - func deepCopyFields(pos src.XPos, fields []*Field) []*Field { var out []*Field for _, f := range fields { @@ -221,14 +197,6 @@ func (n *InterfaceType) SetOTYPE(t *types.Type) { n.Methods = nil } -func (n *InterfaceType) DeepCopy(pos src.XPos) Node { - if n.op == OTYPE { - // Can't change types and no node references left. - return n - } - return NewInterfaceType(n.posOr(pos), deepCopyFields(pos, n.Methods)) -} - // A FuncType represents a func(Args) Results type syntax. type FuncType struct { miniType @@ -275,17 +243,6 @@ func (n *FuncType) SetOTYPE(t *types.Type) { n.Results = nil } -func (n *FuncType) DeepCopy(pos src.XPos) Node { - if n.op == OTYPE { - // Can't change types and no node references left. - return n - } - return NewFuncType(n.posOr(pos), - n.Recv.deepCopy(pos), - deepCopyFields(pos, n.Params), - deepCopyFields(pos, n.Results)) -} - // A Field is a declared struct field, interface method, or function argument. // It is not a Node. type Field struct { @@ -424,14 +381,6 @@ func (n *SliceType) SetOTYPE(t *types.Type) { n.Elem = nil } -func (n *SliceType) DeepCopy(pos src.XPos) Node { - if n.op == OTYPE { - // Can't change types and no node references left. - return n - } - return NewSliceType(n.posOr(pos), DeepCopy(pos, n.Elem)) -} - // An ArrayType represents a [Len]Elem type syntax. // If Len is nil, the type is a [...]Elem in an array literal. type ArrayType struct { @@ -461,14 +410,6 @@ func (n *ArrayType) editChildren(edit func(Node) Node) { n.Elem = maybeEdit(n.Elem, edit) } -func (n *ArrayType) DeepCopy(pos src.XPos) Node { - if n.op == OTYPE { - // Can't change types and no node references left. - return n - } - return NewArrayType(n.posOr(pos), DeepCopy(pos, n.Len), DeepCopy(pos, n.Elem)) -} - func (n *ArrayType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) n.Len = nil From d9cb84c84bb0edc1afb782f99de4cc424ac0d23f Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Thu, 3 Dec 2020 16:52:45 -0800 Subject: [PATCH 115/474] [dev.regabi] cmd/compile: add SameSource, Uses, and DeclaredBy helpers Currently, because we use the same *Name to represent both declaration and uses of an object, it's ambiguous what "n1 == n2" means when comparing two Node values. It can mean any of: Are these the same syntactic element? Is n1 a use of declared variable n2? Are n1 and n2 both uses of the same declared variable? We'd like to introduce a new IdentExpr node to replace use of Name within the AST, but that means those three cases need to be handled differently. The first case needs to stay "n1 == n2", but the other cases need to become "n1.Name() == n2" and "n1.Name() == n2.Name()", respectively. ("n1.Name() == n2.Name()" also currently works for the second case, but eventually we'll want to get rid of the Name.Name method.) This CL introduces helper functions SameSource and Uses to handle these cases. It also introduces DeclaredBy, which is another somewhat common case that the next CL introduces uses of. Passes buildall w/ toolstash -cmp. Updates #42990. Change-Id: Ia816c124446e9067645d5820a8163f295968794f Reviewed-on: https://go-review.googlesource.com/c/go/+/275305 Reviewed-by: Russ Cox Trust: Matthew Dempsky --- src/cmd/compile/internal/ir/name.go | 37 +++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index d2c33eab2b590..030fb82a7d2d1 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -297,6 +297,43 @@ func (n *Name) SetVal(v constant.Value) { n.val = v } +// SameSource reports whether two nodes refer to the same source +// element. +// +// It exists to help incrementally migrate the compiler towards +// allowing the introduction of IdentExpr (#42990). Once we have +// IdentExpr, it will no longer be safe to directly compare Node +// values to tell if they refer to the same Name. Instead, code will +// need to explicitly get references to the underlying Name object(s), +// and compare those instead. +// +// It will still be safe to compare Nodes directly for checking if two +// nodes are syntactically the same. The SameSource function exists to +// indicate code that intentionally compares Nodes for syntactic +// equality as opposed to code that has yet to be updated in +// preparation for IdentExpr. +func SameSource(n1, n2 Node) bool { + return n1 == n2 +} + +// Uses reports whether expression x is a (direct) use of the given +// variable. +func Uses(x Node, v *Name) bool { + if v == nil || v.Op() != ONAME { + base.Fatalf("RefersTo bad Name: %v", v) + } + return x.Op() == ONAME && x.Name() == v +} + +// DeclaredBy reports whether expression x refers (directly) to a +// variable that was declared by the given statement. +func DeclaredBy(x, stmt Node) bool { + if stmt == nil { + base.Fatalf("DeclaredBy nil") + } + return x.Op() == ONAME && SameSource(x.Name().Defn, stmt) +} + // The Class of a variable/function describes the "storage class" // of a variable or function. During parsing, storage classes are // called declaration contexts. From 133b03e1c386dc69e46fa36f9053ff6993125ace Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Thu, 3 Dec 2020 16:57:56 -0800 Subject: [PATCH 116/474] [dev.regabi] cmd/compile: rewrite code to use DeclaredBy Passes buildall w/ toolstash -cmp. Updates #42990. [git-generate] cd src/cmd/compile/internal/gc rf ' ex { import "cmd/compile/internal/ir" var x, stmt ir.Node x.Name() != nil && x.Name().Defn == stmt -> ir.DeclaredBy(x, stmt) x.Name() == nil || x.Name().Defn != stmt -> !ir.DeclaredBy(x, stmt) } ' Change-Id: I222a757296dbcb5d0889d617d221a9d7319f2d74 Reviewed-on: https://go-review.googlesource.com/c/go/+/275306 Reviewed-by: Russ Cox Trust: Matthew Dempsky --- src/cmd/compile/internal/gc/range.go | 8 ++++---- src/cmd/compile/internal/gc/typecheck.go | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go index e48642a854a5b..8025119c5e250 100644 --- a/src/cmd/compile/internal/gc/range.go +++ b/src/cmd/compile/internal/gc/range.go @@ -49,7 +49,7 @@ func typecheckrangeExpr(n ir.Node) { // delicate little dance. see typecheckas2 ls := n.List().Slice() for i1, n1 := range ls { - if n1.Name() == nil || n1.Name().Defn != n { + if !ir.DeclaredBy(n1, n) { ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign) } } @@ -115,7 +115,7 @@ func typecheckrangeExpr(n ir.Node) { } if v1 != nil { - if v1.Name() != nil && v1.Name().Defn == n { + if ir.DeclaredBy(v1, n) { v1.SetType(t1) } else if v1.Type() != nil { if op, why := assignop(t1, v1.Type()); op == ir.OXXX { @@ -126,7 +126,7 @@ func typecheckrangeExpr(n ir.Node) { } if v2 != nil { - if v2.Name() != nil && v2.Name().Defn == n { + if ir.DeclaredBy(v2, n) { v2.SetType(t2) } else if v2.Type() != nil { if op, why := assignop(t2, v2.Type()); op == ir.OXXX { @@ -477,7 +477,7 @@ func isMapClear(n ir.Node) bool { } // Require k to be a new variable name. - if k.Name() == nil || k.Name().Defn != n { + if !ir.DeclaredBy(k, n) { return false } diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 2070297bc0da6..c22786f1486cf 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -3083,7 +3083,7 @@ func checklvalue(n ir.Node, verb string) { func checkassign(stmt ir.Node, n ir.Node) { // Variables declared in ORANGE are assigned on every iteration. - if n.Name() == nil || n.Name().Defn != stmt || stmt.Op() == ir.ORANGE { + if !ir.DeclaredBy(n, stmt) || stmt.Op() == ir.ORANGE { r := outervalue(n) if r.Op() == ir.ONAME { r.Name().SetAssigned(true) @@ -3192,7 +3192,7 @@ func typecheckas(n ir.Node) { // so that the conversion below happens). n.SetLeft(resolve(n.Left())) - if n.Left().Name() == nil || n.Left().Name().Defn != n || n.Left().Name().Ntype != nil { + if !ir.DeclaredBy(n.Left(), n) || n.Left().Name().Ntype != nil { n.SetLeft(typecheck(n.Left(), ctxExpr|ctxAssign)) } @@ -3211,7 +3211,7 @@ func typecheckas(n ir.Node) { } } - if n.Left().Name() != nil && n.Left().Name().Defn == n && n.Left().Name().Ntype == nil { + if ir.DeclaredBy(n.Left(), n) && n.Left().Name().Ntype == nil { n.SetRight(defaultlit(n.Right(), nil)) n.Left().SetType(n.Right().Type()) } @@ -3247,7 +3247,7 @@ func typecheckas2(n ir.Node) { n1 = resolve(n1) ls[i1] = n1 - if n1.Name() == nil || n1.Name().Defn != n || n1.Name().Ntype != nil { + if !ir.DeclaredBy(n1, n) || n1.Name().Ntype != nil { ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign) } } @@ -3272,7 +3272,7 @@ func typecheckas2(n ir.Node) { if nl.Type() != nil && nr.Type() != nil { rs[il] = assignconv(nr, nl.Type(), "assignment") } - if nl.Name() != nil && nl.Name().Defn == n && nl.Name().Ntype == nil { + if ir.DeclaredBy(nl, n) && nl.Name().Ntype == nil { rs[il] = defaultlit(rs[il], nil) nl.SetType(rs[il].Type()) } @@ -3305,7 +3305,7 @@ func typecheckas2(n ir.Node) { if f.Type != nil && l.Type() != nil { checkassignto(f.Type, l) } - if l.Name() != nil && l.Name().Defn == n && l.Name().Ntype == nil { + if ir.DeclaredBy(l, n) && l.Name().Ntype == nil { l.SetType(f.Type) } } @@ -3332,14 +3332,14 @@ func typecheckas2(n ir.Node) { if l.Type() != nil { checkassignto(r.Type(), l) } - if l.Name() != nil && l.Name().Defn == n { + if ir.DeclaredBy(l, n) { l.SetType(r.Type()) } l := n.List().Second() if l.Type() != nil && !l.Type().IsBoolean() { checkassignto(types.Types[types.TBOOL], l) } - if l.Name() != nil && l.Name().Defn == n && l.Name().Ntype == nil { + if ir.DeclaredBy(l, n) && l.Name().Ntype == nil { l.SetType(types.Types[types.TBOOL]) } goto out From b75f51c6451a00f223ad43ed7069e4136466fdac Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Fri, 4 Dec 2020 11:07:25 +0700 Subject: [PATCH 117/474] [dev.regabi] cmd/compile: replace ir.Node with *ir.Name in Liveness Passes buildall w/ toolstash -cmp. Updates #42982 Change-Id: Iad8df321adfd576da070c13ed16a9651d4e59ad8 Reviewed-on: https://go-review.googlesource.com/c/go/+/275352 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/plive.go | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go index f2555cc94160f..06e423daa1b63 100644 --- a/src/cmd/compile/internal/gc/plive.go +++ b/src/cmd/compile/internal/gc/plive.go @@ -103,8 +103,8 @@ type BlockEffects struct { type Liveness struct { fn *ir.Func f *ssa.Func - vars []ir.Node - idx map[ir.Node]int32 + vars []*ir.Name + idx map[*ir.Name]int32 stkptrsize int64 be []BlockEffects @@ -212,14 +212,14 @@ func livenessShouldTrack(n ir.Node) bool { // getvariables returns the list of on-stack variables that we need to track // and a map for looking up indices by *Node. -func getvariables(fn *ir.Func) ([]ir.Node, map[ir.Node]int32) { - var vars []ir.Node +func getvariables(fn *ir.Func) ([]*ir.Name, map[*ir.Name]int32) { + var vars []*ir.Name for _, n := range fn.Dcl { if livenessShouldTrack(n) { vars = append(vars, n) } } - idx := make(map[ir.Node]int32, len(vars)) + idx := make(map[*ir.Name]int32, len(vars)) for i, n := range vars { idx[n] = int32(i) } @@ -276,13 +276,14 @@ func (lv *Liveness) valueEffects(v *ssa.Value) (int32, liveEffect) { return -1, 0 } + nn := n.(*ir.Name) // AllocFrame has dropped unused variables from // lv.fn.Func.Dcl, but they might still be referenced by // OpVarFoo pseudo-ops. Ignore them to prevent "lost track of // variable" ICEs (issue 19632). switch v.Op { case ssa.OpVarDef, ssa.OpVarKill, ssa.OpVarLive, ssa.OpKeepAlive: - if !n.Name().Used() { + if !nn.Name().Used() { return -1, 0 } } @@ -305,7 +306,7 @@ func (lv *Liveness) valueEffects(v *ssa.Value) (int32, liveEffect) { return -1, 0 } - if pos, ok := lv.idx[n]; ok { + if pos, ok := lv.idx[nn]; ok { return pos, effect } return -1, 0 @@ -356,7 +357,7 @@ type livenessFuncCache struct { // Constructs a new liveness structure used to hold the global state of the // liveness computation. The cfg argument is a slice of *BasicBlocks and the // vars argument is a slice of *Nodes. -func newliveness(fn *ir.Func, f *ssa.Func, vars []ir.Node, idx map[ir.Node]int32, stkptrsize int64) *Liveness { +func newliveness(fn *ir.Func, f *ssa.Func, vars []*ir.Name, idx map[*ir.Name]int32, stkptrsize int64) *Liveness { lv := &Liveness{ fn: fn, f: f, @@ -482,7 +483,7 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) { // Generates live pointer value maps for arguments and local variables. The // this argument and the in arguments are always assumed live. The vars // argument is a slice of *Nodes. -func (lv *Liveness) pointerMap(liveout bvec, vars []ir.Node, args, locals bvec) { +func (lv *Liveness) pointerMap(liveout bvec, vars []*ir.Name, args, locals bvec) { for i := int32(0); ; i++ { i = liveout.Next(i) if i < 0 { From 46b6e70e3b9380b5dff6319673e385950b9fb201 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Fri, 4 Dec 2020 11:38:47 +0700 Subject: [PATCH 118/474] [dev.regabi] cmd/compile: replace ir.Node with *ir.Name in Order Passes buildall w/ toolstash -cmp. Updates #42982 Change-Id: I7121c37f72ccbc141a7dd17fba1753f2c6289908 Reviewed-on: https://go-review.googlesource.com/c/go/+/275353 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/order.go | 14 +++++++------- src/cmd/compile/internal/gc/sinit.go | 6 +++--- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 1680d9d920781..39b78c9819517 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -44,9 +44,9 @@ import ( // Order holds state during the ordering process. type Order struct { - out []ir.Node // list of generated statements - temp []ir.Node // stack of temporary variables - free map[string][]ir.Node // free list of unused temporaries, by type.LongString(). + out []ir.Node // list of generated statements + temp []*ir.Name // stack of temporary variables + free map[string][]*ir.Name // free list of unused temporaries, by type.LongString(). } // Order rewrites fn.Nbody to apply the ordering constraints @@ -57,14 +57,14 @@ func order(fn *ir.Func) { ir.DumpList(s, fn.Body()) } - orderBlock(fn.PtrBody(), map[string][]ir.Node{}) + orderBlock(fn.PtrBody(), map[string][]*ir.Name{}) } // newTemp allocates a new temporary with the given type, // pushes it onto the temp stack, and returns it. // If clear is true, newTemp emits code to zero the temporary. func (o *Order) newTemp(t *types.Type, clear bool) ir.Node { - var v ir.Node + var v *ir.Name // Note: LongString is close to the type equality we want, // but not exactly. We still need to double-check with types.Identical. key := t.LongString() @@ -415,7 +415,7 @@ func (o *Order) edge() { // orderBlock orders the block of statements in n into a new slice, // and then replaces the old slice in n with the new slice. // free is a map that can be used to obtain temporary variables by type. -func orderBlock(n *ir.Nodes, free map[string][]ir.Node) { +func orderBlock(n *ir.Nodes, free map[string][]*ir.Name) { var order Order order.free = free mark := order.markTemp() @@ -446,7 +446,7 @@ func (o *Order) exprInPlace(n ir.Node) ir.Node { // The result of orderStmtInPlace MUST be assigned back to n, e.g. // n.Left = orderStmtInPlace(n.Left) // free is a map that can be used to obtain temporary variables by type. -func orderStmtInPlace(n ir.Node, free map[string][]ir.Node) ir.Node { +func orderStmtInPlace(n ir.Node, free map[string][]*ir.Name) ir.Node { var order Order order.free = free mark := order.markTemp() diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 20abbfef8cf9c..c446c9d083bcd 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -579,7 +579,7 @@ func fixedlit(ctxt initContext, kind initKind, n ir.Node, var_ ir.Node, init *ir case initKindStatic: genAsStatic(a) case initKindDynamic, initKindLocalCode: - a = orderStmtInPlace(a, map[string][]ir.Node{}) + a = orderStmtInPlace(a, map[string][]*ir.Name{}) a = walkstmt(a) init.Append(a) default: @@ -747,7 +747,7 @@ func slicelit(ctxt initContext, n ir.Node, var_ ir.Node, init *ir.Nodes) { a = ir.Nod(ir.OAS, a, value) a = typecheck(a, ctxStmt) - a = orderStmtInPlace(a, map[string][]ir.Node{}) + a = orderStmtInPlace(a, map[string][]*ir.Name{}) a = walkstmt(a) init.Append(a) } @@ -756,7 +756,7 @@ func slicelit(ctxt initContext, n ir.Node, var_ ir.Node, init *ir.Nodes) { a = ir.Nod(ir.OAS, var_, ir.Nod(ir.OSLICE, vauto, nil)) a = typecheck(a, ctxStmt) - a = orderStmtInPlace(a, map[string][]ir.Node{}) + a = orderStmtInPlace(a, map[string][]*ir.Name{}) a = walkstmt(a) init.Append(a) } From 6c5967e528f1efc9dfed107c33dccf2d305f2a25 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 5 Dec 2020 15:49:03 -0800 Subject: [PATCH 119/474] [dev.regabi] cmd/compile: change NodeSet to NameSet The only user of NodeSet (computing initialization dependencies) only needs to store *Names in this structure. So change its definition to match that need, and update the code in initorder.go accordingly. Passes buildall w/ toolstash -cmp. Updates #42982. Change-Id: I181a8aaf9bc71e88f4ac009c4f381a718080e48f Reviewed-on: https://go-review.googlesource.com/c/go/+/275752 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky Reviewed-by: Cuong Manh Le TryBot-Result: Go Bot --- src/cmd/compile/internal/gc/initorder.go | 22 +++++++++++----------- src/cmd/compile/internal/ir/node.go | 14 +++++++------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/src/cmd/compile/internal/gc/initorder.go b/src/cmd/compile/internal/gc/initorder.go index 87a78ae0532f4..7f1f3cba92604 100644 --- a/src/cmd/compile/internal/gc/initorder.go +++ b/src/cmd/compile/internal/gc/initorder.go @@ -110,7 +110,7 @@ func initOrder(l []ir.Node) []ir.Node { // first. base.ExitIfErrors() - findInitLoopAndExit(firstLHS(n), new([]ir.Node)) + findInitLoopAndExit(firstLHS(n), new([]*ir.Name)) base.Fatalf("initialization unfinished, but failed to identify loop") } } @@ -136,7 +136,7 @@ func (o *InitOrder) processAssign(n ir.Node) { // Compute number of variable dependencies and build the // inverse dependency ("blocking") graph. for dep := range collectDeps(n, true) { - defn := dep.Name().Defn + defn := dep.Defn // Skip dependencies on functions (PFUNC) and // variables already initialized (InitDone). if dep.Class() != ir.PEXTERN || defn.Initorder() == InitDone { @@ -183,7 +183,7 @@ func (o *InitOrder) flushReady(initialize func(ir.Node)) { // path points to a slice used for tracking the sequence of // variables/functions visited. Using a pointer to a slice allows the // slice capacity to grow and limit reallocations. -func findInitLoopAndExit(n ir.Node, path *[]ir.Node) { +func findInitLoopAndExit(n *ir.Name, path *[]*ir.Name) { // We implement a simple DFS loop-finding algorithm. This // could be faster, but initialization cycles are rare. @@ -196,14 +196,14 @@ func findInitLoopAndExit(n ir.Node, path *[]ir.Node) { // There might be multiple loops involving n; by sorting // references, we deterministically pick the one reported. - refers := collectDeps(n.Name().Defn, false).Sorted(func(ni, nj ir.Node) bool { + refers := collectDeps(n.Name().Defn, false).Sorted(func(ni, nj *ir.Name) bool { return ni.Pos().Before(nj.Pos()) }) *path = append(*path, n) for _, ref := range refers { // Short-circuit variables that were initialized. - if ref.Class() == ir.PEXTERN && ref.Name().Defn.Initorder() == InitDone { + if ref.Class() == ir.PEXTERN && ref.Defn.Initorder() == InitDone { continue } @@ -215,7 +215,7 @@ func findInitLoopAndExit(n ir.Node, path *[]ir.Node) { // reportInitLoopAndExit reports and initialization loop as an error // and exits. However, if l is not actually an initialization loop, it // simply returns instead. -func reportInitLoopAndExit(l []ir.Node) { +func reportInitLoopAndExit(l []*ir.Name) { // Rotate loop so that the earliest variable declaration is at // the start. i := -1 @@ -250,7 +250,7 @@ func reportInitLoopAndExit(l []ir.Node) { // variables that declaration n depends on. If transitive is true, // then it also includes the transitive dependencies of any depended // upon functions (but not variables). -func collectDeps(n ir.Node, transitive bool) ir.NodeSet { +func collectDeps(n ir.Node, transitive bool) ir.NameSet { d := initDeps{transitive: transitive} switch n.Op() { case ir.OAS: @@ -267,7 +267,7 @@ func collectDeps(n ir.Node, transitive bool) ir.NodeSet { type initDeps struct { transitive bool - seen ir.NodeSet + seen ir.NameSet } func (d *initDeps) inspect(n ir.Node) { ir.Inspect(n, d.visit) } @@ -345,12 +345,12 @@ func (s *declOrder) Pop() interface{} { // firstLHS returns the first expression on the left-hand side of // assignment n. -func firstLHS(n ir.Node) ir.Node { +func firstLHS(n ir.Node) *ir.Name { switch n.Op() { case ir.OAS: - return n.Left() + return n.Left().Name() case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2RECV, ir.OAS2MAPR: - return n.List().First() + return n.List().First().Name() } base.Fatalf("unexpected Op: %v", n.Op()) diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index f44d22313cabb..a0ee8aa0fecd1 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -585,26 +585,26 @@ func (q *NodeQueue) PopLeft() Node { return n } -// NodeSet is a set of Nodes. -type NodeSet map[Node]struct{} +// NameSet is a set of Names. +type NameSet map[*Name]struct{} // Has reports whether s contains n. -func (s NodeSet) Has(n Node) bool { +func (s NameSet) Has(n *Name) bool { _, isPresent := s[n] return isPresent } // Add adds n to s. -func (s *NodeSet) Add(n Node) { +func (s *NameSet) Add(n *Name) { if *s == nil { - *s = make(map[Node]struct{}) + *s = make(map[*Name]struct{}) } (*s)[n] = struct{}{} } // Sorted returns s sorted according to less. -func (s NodeSet) Sorted(less func(Node, Node) bool) []Node { - var res []Node +func (s NameSet) Sorted(less func(*Name, *Name) bool) []*Name { + var res []*Name for n := range s { res = append(res, n) } From 1b5eed89828f41e290ae212c596ff301c5db7204 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 6 Dec 2020 12:29:42 -0800 Subject: [PATCH 120/474] [dev.regabi] cmd/compile: replace NodeQueue with NameQueue Similar to the previous CL, the only two users of NodeQueue only needed it for tracking objects, not arbitrary AST nodes. So change it's signature to use *Name instead of Node. This does require a tweak to the nowritebarrierrec checker, because previously it was pushing the ODCLFUNC *Func pointers into the queue, whereas now we push the ONAME/PFUNC *Name pointers instead. However, it's trivial and safe to flip between them. Also, this changes a handful of export-related code from Node to *Name, to avoid introducing type assertions within iexport.go. Passes buildall w/ toolstash -cmp. Updates #42982. Change-Id: I867f9752121509fc3da753978c6a41d5015bc0ce Reviewed-on: https://go-review.googlesource.com/c/go/+/275753 Reviewed-by: Cuong Manh Le Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot --- src/cmd/compile/internal/gc/dcl.go | 8 ++++---- src/cmd/compile/internal/gc/export.go | 4 ++-- src/cmd/compile/internal/gc/go.go | 2 +- src/cmd/compile/internal/gc/iexport.go | 6 +++--- src/cmd/compile/internal/ir/node.go | 22 +++++++++++----------- 5 files changed, 21 insertions(+), 21 deletions(-) diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index ce13f0bdfcc67..56f8d1b9bf79d 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -931,7 +931,7 @@ func (c *nowritebarrierrecChecker) check() { // acts as the set of marks for the BFS of the call graph. funcs := make(map[ir.Node]nowritebarrierrecCall) // q is the queue of ODCLFUNC Nodes to visit in BFS order. - var q ir.NodeQueue + var q ir.NameQueue for _, n := range xtop { if n.Op() != ir.ODCLFUNC { @@ -944,7 +944,7 @@ func (c *nowritebarrierrecChecker) check() { // Make nowritebarrierrec functions BFS roots. if fn.Pragma&ir.Nowritebarrierrec != 0 { funcs[fn] = nowritebarrierrecCall{} - q.PushRight(fn) + q.PushRight(fn.Nname) } // Check go:nowritebarrier functions. if fn.Pragma&ir.Nowritebarrier != 0 && fn.WBPos.IsKnown() { @@ -966,10 +966,10 @@ func (c *nowritebarrierrecChecker) check() { // Record the path. funcs[target] = nowritebarrierrecCall{target: src, lineno: pos} - q.PushRight(target) + q.PushRight(target.Nname) } for !q.Empty() { - fn := q.PopLeft().(*ir.Func) + fn := q.PopLeft().Func() // Check fn. if fn.WBPos.IsKnown() { diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index 44fc70be037fe..b632a15865d0a 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -24,7 +24,7 @@ func exportf(bout *bio.Writer, format string, args ...interface{}) { var asmlist []ir.Node // exportsym marks n for export (or reexport). -func exportsym(n ir.Node) { +func exportsym(n *ir.Name) { if n.Sym().OnExportList() { return } @@ -41,7 +41,7 @@ func initname(s string) bool { return s == "init" } -func autoexport(n ir.Node, ctxt ir.Class) { +func autoexport(n *ir.Name, ctxt ir.Class) { if n.Sym().Pkg != ir.LocalPkg { return } diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index c493165c7688a..c4b9c185dcb37 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -130,7 +130,7 @@ var ( var xtop []ir.Node -var exportlist []ir.Node +var exportlist []*ir.Name var importlist []*ir.Func // imported functions and methods with inlinable bodies diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index bb6f2b11e62e4..14614d8ab89ef 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -368,7 +368,7 @@ type iexporter struct { // main index. allPkgs map[*types.Pkg]bool - declTodo ir.NodeQueue + declTodo ir.NameQueue strings intWriter stringIndex map[string]uint64 @@ -394,7 +394,7 @@ func (p *iexporter) stringOff(s string) uint64 { } // pushDecl adds n to the declaration work queue, if not already present. -func (p *iexporter) pushDecl(n ir.Node) { +func (p *iexporter) pushDecl(n *ir.Name) { if n.Sym() == nil || n.Sym().Def != n && n.Op() != ir.OTYPE { base.Fatalf("weird Sym: %v, %v", n, n.Sym()) } @@ -573,7 +573,7 @@ func (w *exportWriter) pkg(pkg *types.Pkg) { func (w *exportWriter) qualifiedIdent(n ir.Node) { // Ensure any referenced declarations are written out too. - w.p.pushDecl(n) + w.p.pushDecl(n.Name()) s := n.Sym() w.string(s.Name) diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index a0ee8aa0fecd1..7fd02925ba34a 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -539,25 +539,25 @@ func (n Nodes) Copy() Nodes { return c } -// nodeQueue is a FIFO queue of *Node. The zero value of nodeQueue is +// NameQueue is a FIFO queue of *Name. The zero value of NameQueue is // a ready-to-use empty queue. -type NodeQueue struct { - ring []Node +type NameQueue struct { + ring []*Name head, tail int } -// empty reports whether q contains no Nodes. -func (q *NodeQueue) Empty() bool { +// Empty reports whether q contains no Names. +func (q *NameQueue) Empty() bool { return q.head == q.tail } -// pushRight appends n to the right of the queue. -func (q *NodeQueue) PushRight(n Node) { +// PushRight appends n to the right of the queue. +func (q *NameQueue) PushRight(n *Name) { if len(q.ring) == 0 { - q.ring = make([]Node, 16) + q.ring = make([]*Name, 16) } else if q.head+len(q.ring) == q.tail { // Grow the ring. - nring := make([]Node, len(q.ring)*2) + nring := make([]*Name, len(q.ring)*2) // Copy the old elements. part := q.ring[q.head%len(q.ring):] if q.tail-q.head <= len(part) { @@ -574,9 +574,9 @@ func (q *NodeQueue) PushRight(n Node) { q.tail++ } -// popLeft pops a node from the left of the queue. It panics if q is +// PopLeft pops a Name from the left of the queue. It panics if q is // empty. -func (q *NodeQueue) PopLeft() Node { +func (q *NameQueue) PopLeft() *Name { if q.Empty() { panic("dequeue empty") } From 2d4c95565a770227ef2943b68ebe9fac02f79377 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 5 Dec 2020 16:33:59 -0800 Subject: [PATCH 121/474] [dev.regabi] cmd/compile: change nowritebarrierrec to use map[*ir.Func] All of the uses were already using *ir.Func index operands, so only needs the map type itself updated. Passes buildall w/ toolstash -cmp. Updates #42982. Change-Id: I568d8601f3eb077e07e887f2071aa1a2667d803c Reviewed-on: https://go-review.googlesource.com/c/go/+/275754 Reviewed-by: Cuong Manh Le Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky --- src/cmd/compile/internal/gc/dcl.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 56f8d1b9bf79d..5936aeb950152 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -838,7 +838,7 @@ type nowritebarrierrecChecker struct { // extraCalls contains extra function calls that may not be // visible during later analysis. It maps from the ODCLFUNC of // the caller to a list of callees. - extraCalls map[ir.Node][]nowritebarrierrecCall + extraCalls map[*ir.Func][]nowritebarrierrecCall // curfn is the current function during AST walks. curfn *ir.Func @@ -853,7 +853,7 @@ type nowritebarrierrecCall struct { // must be called before transformclosure and walk. func newNowritebarrierrecChecker() *nowritebarrierrecChecker { c := &nowritebarrierrecChecker{ - extraCalls: make(map[ir.Node][]nowritebarrierrecCall), + extraCalls: make(map[*ir.Func][]nowritebarrierrecCall), } // Find all systemstack calls and record their targets. In @@ -929,7 +929,7 @@ func (c *nowritebarrierrecChecker) check() { // that are directly marked go:nowritebarrierrec are in this // map with a zero-valued nowritebarrierrecCall. This also // acts as the set of marks for the BFS of the call graph. - funcs := make(map[ir.Node]nowritebarrierrecCall) + funcs := make(map[*ir.Func]nowritebarrierrecCall) // q is the queue of ODCLFUNC Nodes to visit in BFS order. var q ir.NameQueue From e885df2731cb36925c9a9de9cf1a34a167461cd7 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 6 Dec 2020 12:14:46 -0800 Subject: [PATCH 122/474] [dev.regabi] cmd/compile: change iexport to avoid map[ir.Node] In the past, we had a lot of trouble with misusing *types.Sym throughout the frontend, so I tried to push us towards always passing around ONAMEs instead. But for constructing and writing out the symbol indexes for the indexed export data, keying by *types.Sym is exactly what we want. Passes buildall w/ toolstash -cmp. Updates #42982. Change-Id: Idd8f1fb057d75a52a34ebc7788d9332fb49caf8d Reviewed-on: https://go-review.googlesource.com/c/go/+/275755 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/gc/iexport.go | 62 +++++++++++++------------- 1 file changed, 32 insertions(+), 30 deletions(-) diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index 14614d8ab89ef..003cf3b446d18 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -259,8 +259,8 @@ func iexport(out *bufio.Writer) { p := iexporter{ allPkgs: map[*types.Pkg]bool{}, stringIndex: map[string]uint64{}, - declIndex: map[ir.Node]uint64{}, - inlineIndex: map[ir.Node]uint64{}, + declIndex: map[*types.Sym]uint64{}, + inlineIndex: map[*types.Sym]uint64{}, typIndex: map[*types.Type]uint64{}, } @@ -310,37 +310,34 @@ func iexport(out *bufio.Writer) { out.Write(base.Ctxt.Fingerprint[:]) } -// writeIndex writes out an object index. mainIndex indicates whether +// writeIndex writes out a symbol index. mainIndex indicates whether // we're writing out the main index, which is also read by // non-compiler tools and includes a complete package description // (i.e., name and height). -func (w *exportWriter) writeIndex(index map[ir.Node]uint64, mainIndex bool) { - // Build a map from packages to objects from that package. - pkgObjs := map[*types.Pkg][]ir.Node{} +func (w *exportWriter) writeIndex(index map[*types.Sym]uint64, mainIndex bool) { + // Build a map from packages to symbols from that package. + pkgSyms := map[*types.Pkg][]*types.Sym{} // For the main index, make sure to include every package that // we reference, even if we're not exporting (or reexporting) // any symbols from it. if mainIndex { - pkgObjs[ir.LocalPkg] = nil + pkgSyms[ir.LocalPkg] = nil for pkg := range w.p.allPkgs { - pkgObjs[pkg] = nil + pkgSyms[pkg] = nil } } - for n := range index { - pkgObjs[n.Sym().Pkg] = append(pkgObjs[n.Sym().Pkg], n) + // Group symbols by package. + for sym := range index { + pkgSyms[sym.Pkg] = append(pkgSyms[sym.Pkg], sym) } + // Sort packages by path. var pkgs []*types.Pkg - for pkg, objs := range pkgObjs { + for pkg := range pkgSyms { pkgs = append(pkgs, pkg) - - sort.Slice(objs, func(i, j int) bool { - return objs[i].Sym().Name < objs[j].Sym().Name - }) } - sort.Slice(pkgs, func(i, j int) bool { return pkgs[i].Path < pkgs[j].Path }) @@ -353,11 +350,16 @@ func (w *exportWriter) writeIndex(index map[ir.Node]uint64, mainIndex bool) { w.uint64(uint64(pkg.Height)) } - objs := pkgObjs[pkg] - w.uint64(uint64(len(objs))) - for _, n := range objs { - w.string(n.Sym().Name) - w.uint64(index[n]) + // Sort symbols within a package by name. + syms := pkgSyms[pkg] + sort.Slice(syms, func(i, j int) bool { + return syms[i].Name < syms[j].Name + }) + + w.uint64(uint64(len(syms))) + for _, sym := range syms { + w.string(sym.Name) + w.uint64(index[sym]) } } } @@ -374,8 +376,8 @@ type iexporter struct { stringIndex map[string]uint64 data0 intWriter - declIndex map[ir.Node]uint64 - inlineIndex map[ir.Node]uint64 + declIndex map[*types.Sym]uint64 + inlineIndex map[*types.Sym]uint64 typIndex map[*types.Type]uint64 } @@ -404,11 +406,11 @@ func (p *iexporter) pushDecl(n *ir.Name) { return } - if _, ok := p.declIndex[n]; ok { + if _, ok := p.declIndex[n.Sym()]; ok { return } - p.declIndex[n] = ^uint64(0) // mark n present in work queue + p.declIndex[n.Sym()] = ^uint64(0) // mark n present in work queue p.declTodo.PushRight(n) } @@ -423,13 +425,12 @@ type exportWriter struct { prevColumn int64 } -func (p *iexporter) doDecl(n ir.Node) { +func (p *iexporter) doDecl(n *ir.Name) { w := p.newWriter() w.setPkg(n.Sym().Pkg, false) switch n.Op() { case ir.ONAME: - n := n.(*ir.Name) switch n.Class() { case ir.PEXTERN: // Variable. @@ -455,7 +456,8 @@ func (p *iexporter) doDecl(n ir.Node) { case ir.OLITERAL: // Constant. - n = typecheck(n, ctxExpr) + // TODO(mdempsky): Do we still need this typecheck? If so, why? + n = typecheck(n, ctxExpr).(*ir.Name) w.tag('C') w.pos(n.Pos()) w.value(n.Type(), n.Val()) @@ -509,7 +511,7 @@ func (p *iexporter) doDecl(n ir.Node) { base.Fatalf("unexpected node: %v", n) } - p.declIndex[n] = w.flush() + p.declIndex[n.Sym()] = w.flush() } func (w *exportWriter) tag(tag byte) { @@ -522,7 +524,7 @@ func (p *iexporter) doInline(f *ir.Name) { w.stmtList(ir.AsNodes(f.Func().Inl.Body)) - p.inlineIndex[f] = w.flush() + p.inlineIndex[f.Sym()] = w.flush() } func (w *exportWriter) pos(pos src.XPos) { From d90b199e9c3d6673b1951ddb6a78addd7e0dda26 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Thu, 3 Dec 2020 14:00:19 -0800 Subject: [PATCH 123/474] [dev.regabi] cmd/compile: silence errors about missing blank methods If an interface contains a blank method, that's already an error. No need for useless follow-up error messages about not implementing them. Fixes #42964. Change-Id: I5bf53a8f27d75d4c86c61588c5e2e3e95563d320 Reviewed-on: https://go-review.googlesource.com/c/go/+/275294 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Robert Griesemer --- src/cmd/compile/internal/gc/dcl.go | 12 ------------ src/cmd/compile/internal/gc/noder.go | 5 ++++- test/interface/explicit.go | 7 ++++--- 3 files changed, 8 insertions(+), 16 deletions(-) diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 5936aeb950152..a77c1aed45554 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -317,18 +317,6 @@ func colasdefn(left []ir.Node, defn ir.Node) { } } -// declare the arguments in an -// interface field declaration. -func ifacedcl(n *ir.Field) { - if n.Sym == nil { - base.Fatalf("ifacedcl") - } - - if n.Sym.IsBlank() { - base.Errorf("methods must have a unique non-blank name") - } -} - // declare the function proper // and declare the arguments. // called in extern-declaration context diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 61320123a8a2c..1cd83756773b7 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -899,10 +899,13 @@ func (p *noder) interfaceType(expr *syntax.InterfaceType) ir.Node { n = ir.NewField(p.pos(method), nil, importName(p.packname(method.Type)).(ir.Ntype), nil) } else { mname := p.name(method.Name) + if mname.IsBlank() { + base.Errorf("methods must have a unique non-blank name") + continue + } sig := p.typeExpr(method.Type).(*ir.FuncType) sig.Recv = fakeRecv() n = ir.NewField(p.pos(method), mname, sig, nil) - ifacedcl(n) } l = append(l, n) } diff --git a/test/interface/explicit.go b/test/interface/explicit.go index 3f9451e8d2faa..b705b97676007 100644 --- a/test/interface/explicit.go +++ b/test/interface/explicit.go @@ -100,6 +100,7 @@ type T2 struct{} func (t *T2) M() {} func (t *T2) _() {} -// Check that nothing satisfies an interface with blank methods. -var b1 B1 = &T2{} // ERROR "incompatible|missing _ method" -var b2 B2 = &T2{} // ERROR "incompatible|missing _ method" +// Already reported about the invalid blank interface method above; +// no need to report about not implementing it. +var b1 B1 = &T2{} +var b2 B2 = &T2{} From 2cec6c4a8cca6106c9939ae1560d614dec656839 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Fri, 4 Dec 2020 10:17:50 -0500 Subject: [PATCH 124/474] [dev.regabi] cmd/compile: generate Node methods using program Add Node method generator by Matthew Dempsky, lightly adapted to account for a few special cases. No more writing these by hand. Change-Id: I6933b895df666928b851bddf81b994799c0c97f7 Reviewed-on: https://go-review.googlesource.com/c/go/+/275434 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/expr.go | 455 +--------- src/cmd/compile/internal/ir/func.go | 13 - src/cmd/compile/internal/ir/mini.go | 8 + src/cmd/compile/internal/ir/mknode.go | 175 ++++ src/cmd/compile/internal/ir/name.go | 14 +- src/cmd/compile/internal/ir/node_gen.go | 1029 +++++++++++++++++++++++ src/cmd/compile/internal/ir/stmt.go | 382 --------- src/cmd/compile/internal/ir/type.go | 113 --- 8 files changed, 1214 insertions(+), 975 deletions(-) create mode 100644 src/cmd/compile/internal/ir/mknode.go create mode 100644 src/cmd/compile/internal/ir/node_gen.go diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index cfdb86f221249..7b1aeedcdf8d4 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -8,7 +8,6 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/types" "cmd/internal/src" - "fmt" "go/constant" ) @@ -95,25 +94,6 @@ func NewAddStringExpr(pos src.XPos, list []Node) *AddStringExpr { return n } -func (n *AddStringExpr) String() string { return fmt.Sprint(n) } -func (n *AddStringExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *AddStringExpr) copy() Node { - c := *n - c.init = c.init.Copy() - c.list = c.list.Copy() - return &c -} -func (n *AddStringExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDoList(n.list, err, do) - return err -} -func (n *AddStringExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - editList(n.list, edit) -} - func (n *AddStringExpr) List() Nodes { return n.list } func (n *AddStringExpr) PtrList() *Nodes { return &n.list } func (n *AddStringExpr) SetList(x Nodes) { n.list = x } @@ -133,24 +113,6 @@ func NewAddrExpr(pos src.XPos, x Node) *AddrExpr { return n } -func (n *AddrExpr) String() string { return fmt.Sprint(n) } -func (n *AddrExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *AddrExpr) copy() Node { - c := *n - c.init = c.init.Copy() - return &c -} -func (n *AddrExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) - return err -} -func (n *AddrExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.X = maybeEdit(n.X, edit) -} - func (n *AddrExpr) Left() Node { return n.X } func (n *AddrExpr) SetLeft(x Node) { n.X = x } func (n *AddrExpr) Right() Node { return n.Alloc } @@ -180,26 +142,6 @@ func NewBinaryExpr(pos src.XPos, op Op, x, y Node) *BinaryExpr { return n } -func (n *BinaryExpr) String() string { return fmt.Sprint(n) } -func (n *BinaryExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *BinaryExpr) copy() Node { - c := *n - c.init = c.init.Copy() - return &c -} -func (n *BinaryExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) - err = maybeDo(n.Y, err, do) - return err -} -func (n *BinaryExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.X = maybeEdit(n.X, edit) - n.Y = maybeEdit(n.Y, edit) -} - func (n *BinaryExpr) Left() Node { return n.X } func (n *BinaryExpr) SetLeft(x Node) { n.X = x } func (n *BinaryExpr) Right() Node { return n.Y } @@ -250,33 +192,6 @@ func NewCallExpr(pos src.XPos, fun Node, args []Node) *CallExpr { return n } -func (n *CallExpr) String() string { return fmt.Sprint(n) } -func (n *CallExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *CallExpr) copy() Node { - c := *n - c.init = c.init.Copy() - c.Args = c.Args.Copy() - c.Rargs = c.Rargs.Copy() - c.body = c.body.Copy() - return &c -} -func (n *CallExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) - err = maybeDoList(n.Args, err, do) - err = maybeDoList(n.Rargs, err, do) - err = maybeDoList(n.body, err, do) - return err -} -func (n *CallExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.X = maybeEdit(n.X, edit) - editList(n.Args, edit) - editList(n.Rargs, edit) - editList(n.body, edit) -} - func (n *CallExpr) Orig() Node { return n.orig } func (n *CallExpr) SetOrig(x Node) { n.orig = x } func (n *CallExpr) Left() Node { return n.X } @@ -322,24 +237,6 @@ func NewCallPartExpr(pos src.XPos, x Node, method *types.Field, fn *Func) *CallP return n } -func (n *CallPartExpr) String() string { return fmt.Sprint(n) } -func (n *CallPartExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *CallPartExpr) copy() Node { - c := *n - c.init = c.init.Copy() - return &c -} -func (n *CallPartExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) - return err -} -func (n *CallPartExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.X = maybeEdit(n.X, edit) -} - func (n *CallPartExpr) Func() *Func { return n.fn } func (n *CallPartExpr) Left() Node { return n.X } func (n *CallPartExpr) Sym() *types.Sym { return n.Method.Sym } @@ -358,22 +255,6 @@ func NewClosureExpr(pos src.XPos, fn *Func) *ClosureExpr { return n } -func (n *ClosureExpr) String() string { return fmt.Sprint(n) } -func (n *ClosureExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *ClosureExpr) copy() Node { - c := *n - c.init = c.init.Copy() - return &c -} -func (n *ClosureExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - return err -} -func (n *ClosureExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) -} - func (n *ClosureExpr) Func() *Func { return n.fn } // A ClosureRead denotes reading a variable stored within a closure struct. @@ -389,24 +270,8 @@ func NewClosureRead(typ *types.Type, offset int64) *ClosureRead { return n } -func (n *ClosureRead) String() string { return fmt.Sprint(n) } -func (n *ClosureRead) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *ClosureRead) copy() Node { - c := *n - c.init = c.init.Copy() - return &c -} - func (n *ClosureRead) Type() *types.Type { return n.typ } func (n *ClosureRead) Offset() int64 { return n.offset } -func (n *ClosureRead) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - return err -} -func (n *ClosureRead) editChildren(edit func(Node) Node) { - editList(n.init, edit) -} // A CompLitExpr is a composite literal Type{Vals}. // Before type-checking, the type is Ntype. @@ -426,27 +291,6 @@ func NewCompLitExpr(pos src.XPos, typ Ntype, list []Node) *CompLitExpr { return n } -func (n *CompLitExpr) String() string { return fmt.Sprint(n) } -func (n *CompLitExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *CompLitExpr) copy() Node { - c := *n - c.init = c.init.Copy() - c.list = c.list.Copy() - return &c -} -func (n *CompLitExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.Ntype, err, do) - err = maybeDoList(n.list, err, do) - return err -} -func (n *CompLitExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.Ntype = toNtype(maybeEdit(n.Ntype, edit)) - editList(n.list, edit) -} - func (n *CompLitExpr) Orig() Node { return n.orig } func (n *CompLitExpr) SetOrig(x Node) { n.orig = x } func (n *CompLitExpr) Right() Node { return n.Ntype } @@ -480,12 +324,6 @@ func NewConstExpr(val constant.Value, orig Node) Node { return n } -func (n *ConstExpr) String() string { return fmt.Sprint(n) } -func (n *ConstExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *ConstExpr) copy() Node { c := *n; return &c } -func (n *ConstExpr) doChildren(do func(Node) error) error { return nil } -func (n *ConstExpr) editChildren(edit func(Node) Node) {} - func (n *ConstExpr) Sym() *types.Sym { return n.orig.Sym() } func (n *ConstExpr) Orig() Node { return n.orig } func (n *ConstExpr) SetOrig(orig Node) { panic(n.no("SetOrig")) } @@ -495,8 +333,7 @@ func (n *ConstExpr) Val() constant.Value { return n.val } // It may end up being a value or a type. type ConvExpr struct { miniExpr - orig Node - X Node + X Node } func NewConvExpr(pos src.XPos, op Op, typ *types.Type, x Node) *ConvExpr { @@ -504,29 +341,9 @@ func NewConvExpr(pos src.XPos, op Op, typ *types.Type, x Node) *ConvExpr { n.pos = pos n.typ = typ n.SetOp(op) - n.orig = n return n } -func (n *ConvExpr) String() string { return fmt.Sprint(n) } -func (n *ConvExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *ConvExpr) copy() Node { - c := *n - c.init = c.init.Copy() - return &c -} -func (n *ConvExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) - return err -} -func (n *ConvExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.X = maybeEdit(n.X, edit) -} - -func (n *ConvExpr) rawCopy() Node { c := *n; return &c } func (n *ConvExpr) Left() Node { return n.X } func (n *ConvExpr) SetLeft(x Node) { n.X = x } @@ -554,26 +371,6 @@ func NewIndexExpr(pos src.XPos, x, index Node) *IndexExpr { return n } -func (n *IndexExpr) String() string { return fmt.Sprint(n) } -func (n *IndexExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *IndexExpr) copy() Node { - c := *n - c.init = c.init.Copy() - return &c -} -func (n *IndexExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) - err = maybeDo(n.Index, err, do) - return err -} -func (n *IndexExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.X = maybeEdit(n.X, edit) - n.Index = maybeEdit(n.Index, edit) -} - func (n *IndexExpr) Left() Node { return n.X } func (n *IndexExpr) SetLeft(x Node) { n.X = x } func (n *IndexExpr) Right() Node { return n.Index } @@ -608,26 +405,6 @@ func NewKeyExpr(pos src.XPos, key, value Node) *KeyExpr { return n } -func (n *KeyExpr) String() string { return fmt.Sprint(n) } -func (n *KeyExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *KeyExpr) copy() Node { - c := *n - c.init = c.init.Copy() - return &c -} -func (n *KeyExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.Key, err, do) - err = maybeDo(n.Value, err, do) - return err -} -func (n *KeyExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.Key = maybeEdit(n.Key, edit) - n.Value = maybeEdit(n.Value, edit) -} - func (n *KeyExpr) Left() Node { return n.Key } func (n *KeyExpr) SetLeft(x Node) { n.Key = x } func (n *KeyExpr) Right() Node { return n.Value } @@ -662,28 +439,6 @@ func NewInlinedCallExpr(pos src.XPos, body, retvars []Node) *InlinedCallExpr { return n } -func (n *InlinedCallExpr) String() string { return fmt.Sprint(n) } -func (n *InlinedCallExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *InlinedCallExpr) copy() Node { - c := *n - c.init = c.init.Copy() - c.body = c.body.Copy() - c.ReturnVars = c.ReturnVars.Copy() - return &c -} -func (n *InlinedCallExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDoList(n.body, err, do) - err = maybeDoList(n.ReturnVars, err, do) - return err -} -func (n *InlinedCallExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - editList(n.body, edit) - editList(n.ReturnVars, edit) -} - func (n *InlinedCallExpr) Body() Nodes { return n.body } func (n *InlinedCallExpr) PtrBody() *Nodes { return &n.body } func (n *InlinedCallExpr) SetBody(x Nodes) { n.body = x } @@ -707,26 +462,6 @@ func NewMakeExpr(pos src.XPos, op Op, len, cap Node) *MakeExpr { return n } -func (n *MakeExpr) String() string { return fmt.Sprint(n) } -func (n *MakeExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *MakeExpr) copy() Node { - c := *n - c.init = c.init.Copy() - return &c -} -func (n *MakeExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.Len, err, do) - err = maybeDo(n.Cap, err, do) - return err -} -func (n *MakeExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.Len = maybeEdit(n.Len, edit) - n.Cap = maybeEdit(n.Cap, edit) -} - func (n *MakeExpr) Left() Node { return n.Len } func (n *MakeExpr) SetLeft(x Node) { n.Len = x } func (n *MakeExpr) Right() Node { return n.Cap } @@ -760,26 +495,6 @@ func NewMethodExpr(pos src.XPos, op Op, x, m Node) *MethodExpr { return n } -func (n *MethodExpr) String() string { return fmt.Sprint(n) } -func (n *MethodExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *MethodExpr) copy() Node { - c := *n - c.init = c.init.Copy() - return &c -} -func (n *MethodExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) - err = maybeDo(n.M, err, do) - return err -} -func (n *MethodExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.X = maybeEdit(n.X, edit) - n.M = maybeEdit(n.M, edit) -} - func (n *MethodExpr) Left() Node { return n.X } func (n *MethodExpr) SetLeft(x Node) { n.X = x } func (n *MethodExpr) Right() Node { return n.M } @@ -805,22 +520,6 @@ func NewNilExpr(pos src.XPos) *NilExpr { return n } -func (n *NilExpr) String() string { return fmt.Sprint(n) } -func (n *NilExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *NilExpr) copy() Node { - c := *n - c.init = c.init.Copy() - return &c -} -func (n *NilExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - return err -} -func (n *NilExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) -} - func (n *NilExpr) Sym() *types.Sym { return n.sym } func (n *NilExpr) SetSym(x *types.Sym) { n.sym = x } @@ -838,24 +537,6 @@ func NewParenExpr(pos src.XPos, x Node) *ParenExpr { return n } -func (n *ParenExpr) String() string { return fmt.Sprint(n) } -func (n *ParenExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *ParenExpr) copy() Node { - c := *n - c.init = c.init.Copy() - return &c -} -func (n *ParenExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) - return err -} -func (n *ParenExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.X = maybeEdit(n.X, edit) -} - func (n *ParenExpr) Left() Node { return n.X } func (n *ParenExpr) SetLeft(x Node) { n.X = x } @@ -883,22 +564,6 @@ func NewResultExpr(pos src.XPos, typ *types.Type, offset int64) *ResultExpr { return n } -func (n *ResultExpr) String() string { return fmt.Sprint(n) } -func (n *ResultExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *ResultExpr) copy() Node { - c := *n - c.init = c.init.Copy() - return &c -} -func (n *ResultExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - return err -} -func (n *ResultExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) -} - func (n *ResultExpr) Offset() int64 { return n.offset } func (n *ResultExpr) SetOffset(x int64) { n.offset = x } @@ -928,24 +593,6 @@ func (n *SelectorExpr) SetOp(op Op) { } } -func (n *SelectorExpr) String() string { return fmt.Sprint(n) } -func (n *SelectorExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *SelectorExpr) copy() Node { - c := *n - c.init = c.init.Copy() - return &c -} -func (n *SelectorExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) - return err -} -func (n *SelectorExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.X = maybeEdit(n.X, edit) -} - func (n *SelectorExpr) Left() Node { return n.X } func (n *SelectorExpr) SetLeft(x Node) { n.X = x } func (n *SelectorExpr) Sym() *types.Sym { return n.Sel } @@ -971,27 +618,6 @@ func NewSliceExpr(pos src.XPos, op Op, x Node) *SliceExpr { return n } -func (n *SliceExpr) String() string { return fmt.Sprint(n) } -func (n *SliceExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *SliceExpr) copy() Node { - c := *n - c.init = c.init.Copy() - c.list = c.list.Copy() - return &c -} -func (n *SliceExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) - err = maybeDoList(n.list, err, do) - return err -} -func (n *SliceExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.X = maybeEdit(n.X, edit) - editList(n.list, edit) -} - func (n *SliceExpr) Left() Node { return n.X } func (n *SliceExpr) SetLeft(x Node) { n.X = x } func (n *SliceExpr) List() Nodes { return n.list } @@ -1091,26 +717,6 @@ func NewSliceHeaderExpr(pos src.XPos, typ *types.Type, ptr, len, cap Node) *Slic return n } -func (n *SliceHeaderExpr) String() string { return fmt.Sprint(n) } -func (n *SliceHeaderExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *SliceHeaderExpr) copy() Node { - c := *n - c.init = c.init.Copy() - return &c -} -func (n *SliceHeaderExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.Ptr, err, do) - err = maybeDoList(n.lenCap, err, do) - return err -} -func (n *SliceHeaderExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.Ptr = maybeEdit(n.Ptr, edit) - editList(n.lenCap, edit) -} - func (n *SliceHeaderExpr) Left() Node { return n.Ptr } func (n *SliceHeaderExpr) SetLeft(x Node) { n.Ptr = x } func (n *SliceHeaderExpr) List() Nodes { return n.lenCap } @@ -1131,24 +737,6 @@ func NewStarExpr(pos src.XPos, x Node) *StarExpr { return n } -func (n *StarExpr) String() string { return fmt.Sprint(n) } -func (n *StarExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *StarExpr) copy() Node { - c := *n - c.init = c.init.Copy() - return &c -} -func (n *StarExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) - return err -} -func (n *StarExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.X = maybeEdit(n.X, edit) -} - func (n *StarExpr) Left() Node { return n.X } func (n *StarExpr) SetLeft(x Node) { n.X = x } @@ -1179,29 +767,6 @@ func NewTypeAssertExpr(pos src.XPos, x Node, typ Ntype) *TypeAssertExpr { return n } -func (n *TypeAssertExpr) String() string { return fmt.Sprint(n) } -func (n *TypeAssertExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *TypeAssertExpr) copy() Node { - c := *n - c.init = c.init.Copy() - c.Itab = c.Itab.Copy() - return &c -} -func (n *TypeAssertExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) - err = maybeDo(n.Ntype, err, do) - err = maybeDoList(n.Itab, err, do) - return err -} -func (n *TypeAssertExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.X = maybeEdit(n.X, edit) - n.Ntype = maybeEdit(n.Ntype, edit) - editList(n.Itab, edit) -} - func (n *TypeAssertExpr) Left() Node { return n.X } func (n *TypeAssertExpr) SetLeft(x Node) { n.X = x } func (n *TypeAssertExpr) Right() Node { return n.Ntype } @@ -1233,24 +798,6 @@ func NewUnaryExpr(pos src.XPos, op Op, x Node) *UnaryExpr { return n } -func (n *UnaryExpr) String() string { return fmt.Sprint(n) } -func (n *UnaryExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *UnaryExpr) copy() Node { - c := *n - c.init = c.init.Copy() - return &c -} -func (n *UnaryExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) - return err -} -func (n *UnaryExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.X = maybeEdit(n.X, edit) -} - func (n *UnaryExpr) Left() Node { return n.X } func (n *UnaryExpr) SetLeft(x Node) { n.X = x } diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index 78e98c4d31c25..38e00da7dae30 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -9,7 +9,6 @@ import ( "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/src" - "fmt" ) // A Func corresponds to a single function in a Go program @@ -115,18 +114,6 @@ func NewFunc(pos src.XPos) *Func { return f } -func (f *Func) String() string { return fmt.Sprint(f) } -func (f *Func) Format(s fmt.State, verb rune) { FmtNode(f, s, verb) } -func (f *Func) copy() Node { panic(f.no("copy")) } -func (f *Func) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(f.body, err, do) - return err -} -func (f *Func) editChildren(edit func(Node) Node) { - editList(f.body, edit) -} - func (f *Func) Func() *Func { return f } func (f *Func) Body() Nodes { return f.body } func (f *Func) PtrBody() *Nodes { return &f.body } diff --git a/src/cmd/compile/internal/ir/mini.go b/src/cmd/compile/internal/ir/mini.go index 909ca0220d479..612e7d62c3925 100644 --- a/src/cmd/compile/internal/ir/mini.go +++ b/src/cmd/compile/internal/ir/mini.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:generate go run -mod=mod mknode.go + package ir import ( @@ -33,6 +35,12 @@ type miniNode struct { esc uint16 } +func (n *miniNode) String() string { panic(1) } +func (n *miniNode) Format(s fmt.State, verb rune) { panic(1) } +func (n *miniNode) copy() Node { panic(1) } +func (n *miniNode) doChildren(do func(Node) error) error { panic(1) } +func (n *miniNode) editChildren(edit func(Node) Node) { panic(1) } + // posOr returns pos if known, or else n.pos. // For use in DeepCopy. func (n *miniNode) posOr(pos src.XPos) src.XPos { diff --git a/src/cmd/compile/internal/ir/mknode.go b/src/cmd/compile/internal/ir/mknode.go new file mode 100644 index 0000000000000..2c007f93f1f49 --- /dev/null +++ b/src/cmd/compile/internal/ir/mknode.go @@ -0,0 +1,175 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import ( + "bytes" + "fmt" + "go/format" + "go/types" + "io/ioutil" + "log" + "strings" + + "golang.org/x/tools/go/packages" +) + +func main() { + cfg := &packages.Config{ + Mode: packages.NeedSyntax | packages.NeedTypes, + } + pkgs, err := packages.Load(cfg, "cmd/compile/internal/ir") + if err != nil { + log.Fatal(err) + } + + pkg := pkgs[0].Types + scope := pkg.Scope() + + lookup := func(name string) *types.Named { + return scope.Lookup(name).(*types.TypeName).Type().(*types.Named) + } + + nodeType := lookup("Node") + ntypeType := lookup("Ntype") + nodesType := lookup("Nodes") + ptrFieldType := types.NewPointer(lookup("Field")) + slicePtrFieldType := types.NewSlice(ptrFieldType) + ptrNameType := types.NewPointer(lookup("Name")) + + var buf bytes.Buffer + fmt.Fprintln(&buf, "// Code generated by mknode.go. DO NOT EDIT.") + fmt.Fprintln(&buf) + fmt.Fprintln(&buf, "package ir") + fmt.Fprintln(&buf) + fmt.Fprintln(&buf, `import "fmt"`) + + for _, name := range scope.Names() { + obj, ok := scope.Lookup(name).(*types.TypeName) + if !ok { + continue + } + + typName := obj.Name() + typ, ok := obj.Type().(*types.Named).Underlying().(*types.Struct) + if !ok { + continue + } + + if strings.HasPrefix(typName, "mini") || !hasMiniNode(typ) { + continue + } + + fmt.Fprintf(&buf, "\n") + fmt.Fprintf(&buf, "func (n *%s) String() string { return fmt.Sprint(n) }\n", name) + fmt.Fprintf(&buf, "func (n *%s) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }\n", name) + + fmt.Fprintf(&buf, "func (n *%s) copy() Node { c := *n\n", name) + forNodeFields(typName, typ, func(name string, is func(types.Type) bool) { + switch { + case is(nodesType): + fmt.Fprintf(&buf, "c.%s = c.%s.Copy()\n", name, name) + case is(ptrFieldType): + fmt.Fprintf(&buf, "if c.%s != nil { c.%s = c.%s.copy() }\n", name, name, name) + case is(slicePtrFieldType): + fmt.Fprintf(&buf, "c.%s = copyFields(c.%s)\n", name, name) + } + }) + fmt.Fprintf(&buf, "return &c }\n") + + fmt.Fprintf(&buf, "func (n *%s) doChildren(do func(Node) error) error { var err error\n", name) + forNodeFields(typName, typ, func(name string, is func(types.Type) bool) { + switch { + case is(ptrNameType): + fmt.Fprintf(&buf, "if n.%s != nil { err = maybeDo(n.%s, err, do) }\n", name, name) + case is(nodeType), is(ntypeType): + fmt.Fprintf(&buf, "err = maybeDo(n.%s, err, do)\n", name) + case is(nodesType): + fmt.Fprintf(&buf, "err = maybeDoList(n.%s, err, do)\n", name) + case is(ptrFieldType): + fmt.Fprintf(&buf, "err = maybeDoField(n.%s, err, do)\n", name) + case is(slicePtrFieldType): + fmt.Fprintf(&buf, "err = maybeDoFields(n.%s, err, do)\n", name) + } + }) + fmt.Fprintf(&buf, "return err }\n") + + fmt.Fprintf(&buf, "func (n *%s) editChildren(edit func(Node) Node) {\n", name) + forNodeFields(typName, typ, func(name string, is func(types.Type) bool) { + switch { + case is(ptrNameType): + fmt.Fprintf(&buf, "if n.%s != nil { n.%s = edit(n.%s).(*Name) }\n", name, name, name) + case is(nodeType): + fmt.Fprintf(&buf, "n.%s = maybeEdit(n.%s, edit)\n", name, name) + case is(ntypeType): + fmt.Fprintf(&buf, "n.%s = toNtype(maybeEdit(n.%s, edit))\n", name, name) + case is(nodesType): + fmt.Fprintf(&buf, "editList(n.%s, edit)\n", name) + case is(ptrFieldType): + fmt.Fprintf(&buf, "editField(n.%s, edit)\n", name) + case is(slicePtrFieldType): + fmt.Fprintf(&buf, "editFields(n.%s, edit)\n", name) + } + }) + fmt.Fprintf(&buf, "}\n") + } + + out, err := format.Source(buf.Bytes()) + if err != nil { + // write out mangled source so we can see the bug. + out = buf.Bytes() + } + + err = ioutil.WriteFile("node_gen.go", out, 0666) + if err != nil { + log.Fatal(err) + } +} + +func forNodeFields(typName string, typ *types.Struct, f func(name string, is func(types.Type) bool)) { + for i, n := 0, typ.NumFields(); i < n; i++ { + v := typ.Field(i) + if v.Embedded() { + if typ, ok := v.Type().Underlying().(*types.Struct); ok { + forNodeFields(typName, typ, f) + continue + } + } + switch typName { + case "Func": + if v.Name() != "body" { + continue + } + case "Name", "Pack": + continue + } + switch v.Name() { + case "orig": + continue + } + switch typName + "." + v.Name() { + case "AddStringExpr.Alloc": + continue + } + f(v.Name(), func(t types.Type) bool { return types.Identical(t, v.Type()) }) + } +} + +func hasMiniNode(typ *types.Struct) bool { + for i, n := 0, typ.NumFields(); i < n; i++ { + v := typ.Field(i) + if v.Name() == "miniNode" { + return true + } + if v.Embedded() { + if typ, ok := v.Type().Underlying().(*types.Struct); ok && hasMiniNode(typ) { + return true + } + } + } + return false +} diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 030fb82a7d2d1..06cffe0325855 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -9,7 +9,7 @@ import ( "cmd/compile/internal/types" "cmd/internal/objabi" "cmd/internal/src" - "fmt" + "go/constant" ) @@ -149,12 +149,6 @@ func newNameAt(pos src.XPos, op Op, sym *types.Sym) *Name { return n } -func (n *Name) String() string { return fmt.Sprint(n) } -func (n *Name) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *Name) copy() Node { c := *n; return &c } -func (n *Name) doChildren(do func(Node) error) error { return nil } -func (n *Name) editChildren(edit func(Node) Node) {} - func (n *Name) Name() *Name { return n } func (n *Name) Sym() *types.Sym { return n.sym } func (n *Name) SetSym(x *types.Sym) { n.sym = x } @@ -361,12 +355,6 @@ type PkgName struct { Used bool } -func (p *PkgName) String() string { return fmt.Sprint(p) } -func (p *PkgName) Format(s fmt.State, verb rune) { FmtNode(p, s, verb) } -func (p *PkgName) copy() Node { c := *p; return &c } -func (p *PkgName) doChildren(do func(Node) error) error { return nil } -func (p *PkgName) editChildren(edit func(Node) Node) {} - func (p *PkgName) Sym() *types.Sym { return p.sym } func (*PkgName) CanBeNtype() {} diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go new file mode 100644 index 0000000000000..4c47a4486e19f --- /dev/null +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -0,0 +1,1029 @@ +// Code generated by mknode.go. DO NOT EDIT. + +package ir + +import "fmt" + +func (n *AddStringExpr) String() string { return fmt.Sprint(n) } +func (n *AddStringExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *AddStringExpr) copy() Node { + c := *n + c.init = c.init.Copy() + c.list = c.list.Copy() + return &c +} +func (n *AddStringExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDoList(n.list, err, do) + return err +} +func (n *AddStringExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + editList(n.list, edit) +} + +func (n *AddrExpr) String() string { return fmt.Sprint(n) } +func (n *AddrExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *AddrExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} +func (n *AddrExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.X, err, do) + err = maybeDo(n.Alloc, err, do) + return err +} +func (n *AddrExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.X = maybeEdit(n.X, edit) + n.Alloc = maybeEdit(n.Alloc, edit) +} + +func (n *ArrayType) String() string { return fmt.Sprint(n) } +func (n *ArrayType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ArrayType) copy() Node { + c := *n + return &c +} +func (n *ArrayType) doChildren(do func(Node) error) error { + var err error + err = maybeDo(n.Len, err, do) + err = maybeDo(n.Elem, err, do) + return err +} +func (n *ArrayType) editChildren(edit func(Node) Node) { + n.Len = maybeEdit(n.Len, edit) + n.Elem = maybeEdit(n.Elem, edit) +} + +func (n *AssignListStmt) String() string { return fmt.Sprint(n) } +func (n *AssignListStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *AssignListStmt) copy() Node { + c := *n + c.init = c.init.Copy() + c.Lhs = c.Lhs.Copy() + c.Rhs = c.Rhs.Copy() + return &c +} +func (n *AssignListStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDoList(n.Lhs, err, do) + err = maybeDoList(n.Rhs, err, do) + return err +} +func (n *AssignListStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) + editList(n.Lhs, edit) + editList(n.Rhs, edit) +} + +func (n *AssignOpStmt) String() string { return fmt.Sprint(n) } +func (n *AssignOpStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *AssignOpStmt) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} +func (n *AssignOpStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.X, err, do) + err = maybeDo(n.Y, err, do) + return err +} +func (n *AssignOpStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.X = maybeEdit(n.X, edit) + n.Y = maybeEdit(n.Y, edit) +} + +func (n *AssignStmt) String() string { return fmt.Sprint(n) } +func (n *AssignStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *AssignStmt) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} +func (n *AssignStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.X, err, do) + err = maybeDo(n.Y, err, do) + return err +} +func (n *AssignStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.X = maybeEdit(n.X, edit) + n.Y = maybeEdit(n.Y, edit) +} + +func (n *BinaryExpr) String() string { return fmt.Sprint(n) } +func (n *BinaryExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *BinaryExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} +func (n *BinaryExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.X, err, do) + err = maybeDo(n.Y, err, do) + return err +} +func (n *BinaryExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.X = maybeEdit(n.X, edit) + n.Y = maybeEdit(n.Y, edit) +} + +func (n *BlockStmt) String() string { return fmt.Sprint(n) } +func (n *BlockStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *BlockStmt) copy() Node { + c := *n + c.init = c.init.Copy() + c.list = c.list.Copy() + return &c +} +func (n *BlockStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDoList(n.list, err, do) + return err +} +func (n *BlockStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) + editList(n.list, edit) +} + +func (n *BranchStmt) String() string { return fmt.Sprint(n) } +func (n *BranchStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *BranchStmt) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} +func (n *BranchStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + return err +} +func (n *BranchStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) +} + +func (n *CallExpr) String() string { return fmt.Sprint(n) } +func (n *CallExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *CallExpr) copy() Node { + c := *n + c.init = c.init.Copy() + c.Args = c.Args.Copy() + c.Rargs = c.Rargs.Copy() + c.body = c.body.Copy() + return &c +} +func (n *CallExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.X, err, do) + err = maybeDoList(n.Args, err, do) + err = maybeDoList(n.Rargs, err, do) + err = maybeDoList(n.body, err, do) + return err +} +func (n *CallExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.X = maybeEdit(n.X, edit) + editList(n.Args, edit) + editList(n.Rargs, edit) + editList(n.body, edit) +} + +func (n *CallPartExpr) String() string { return fmt.Sprint(n) } +func (n *CallPartExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *CallPartExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} +func (n *CallPartExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.X, err, do) + return err +} +func (n *CallPartExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.X = maybeEdit(n.X, edit) +} + +func (n *CaseStmt) String() string { return fmt.Sprint(n) } +func (n *CaseStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *CaseStmt) copy() Node { + c := *n + c.init = c.init.Copy() + c.Vars = c.Vars.Copy() + c.list = c.list.Copy() + c.body = c.body.Copy() + return &c +} +func (n *CaseStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDoList(n.Vars, err, do) + err = maybeDoList(n.list, err, do) + err = maybeDo(n.Comm, err, do) + err = maybeDoList(n.body, err, do) + return err +} +func (n *CaseStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) + editList(n.Vars, edit) + editList(n.list, edit) + n.Comm = maybeEdit(n.Comm, edit) + editList(n.body, edit) +} + +func (n *ChanType) String() string { return fmt.Sprint(n) } +func (n *ChanType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ChanType) copy() Node { + c := *n + return &c +} +func (n *ChanType) doChildren(do func(Node) error) error { + var err error + err = maybeDo(n.Elem, err, do) + return err +} +func (n *ChanType) editChildren(edit func(Node) Node) { + n.Elem = maybeEdit(n.Elem, edit) +} + +func (n *ClosureExpr) String() string { return fmt.Sprint(n) } +func (n *ClosureExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ClosureExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} +func (n *ClosureExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + return err +} +func (n *ClosureExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) +} + +func (n *ClosureRead) String() string { return fmt.Sprint(n) } +func (n *ClosureRead) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ClosureRead) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} +func (n *ClosureRead) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + return err +} +func (n *ClosureRead) editChildren(edit func(Node) Node) { + editList(n.init, edit) +} + +func (n *CompLitExpr) String() string { return fmt.Sprint(n) } +func (n *CompLitExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *CompLitExpr) copy() Node { + c := *n + c.init = c.init.Copy() + c.list = c.list.Copy() + return &c +} +func (n *CompLitExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.Ntype, err, do) + err = maybeDoList(n.list, err, do) + return err +} +func (n *CompLitExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.Ntype = toNtype(maybeEdit(n.Ntype, edit)) + editList(n.list, edit) +} + +func (n *ConstExpr) String() string { return fmt.Sprint(n) } +func (n *ConstExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ConstExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} +func (n *ConstExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + return err +} +func (n *ConstExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) +} + +func (n *ConvExpr) String() string { return fmt.Sprint(n) } +func (n *ConvExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ConvExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} +func (n *ConvExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.X, err, do) + return err +} +func (n *ConvExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.X = maybeEdit(n.X, edit) +} + +func (n *Decl) String() string { return fmt.Sprint(n) } +func (n *Decl) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *Decl) copy() Node { + c := *n + return &c +} +func (n *Decl) doChildren(do func(Node) error) error { + var err error + err = maybeDo(n.X, err, do) + return err +} +func (n *Decl) editChildren(edit func(Node) Node) { + n.X = maybeEdit(n.X, edit) +} + +func (n *DeferStmt) String() string { return fmt.Sprint(n) } +func (n *DeferStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *DeferStmt) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} +func (n *DeferStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.Call, err, do) + return err +} +func (n *DeferStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.Call = maybeEdit(n.Call, edit) +} + +func (n *ForStmt) String() string { return fmt.Sprint(n) } +func (n *ForStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ForStmt) copy() Node { + c := *n + c.init = c.init.Copy() + c.Late = c.Late.Copy() + c.body = c.body.Copy() + return &c +} +func (n *ForStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.Cond, err, do) + err = maybeDoList(n.Late, err, do) + err = maybeDo(n.Post, err, do) + err = maybeDoList(n.body, err, do) + return err +} +func (n *ForStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.Cond = maybeEdit(n.Cond, edit) + editList(n.Late, edit) + n.Post = maybeEdit(n.Post, edit) + editList(n.body, edit) +} + +func (n *Func) String() string { return fmt.Sprint(n) } +func (n *Func) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *Func) copy() Node { + c := *n + c.body = c.body.Copy() + return &c +} +func (n *Func) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.body, err, do) + return err +} +func (n *Func) editChildren(edit func(Node) Node) { + editList(n.body, edit) +} + +func (n *FuncType) String() string { return fmt.Sprint(n) } +func (n *FuncType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *FuncType) copy() Node { + c := *n + if c.Recv != nil { + c.Recv = c.Recv.copy() + } + c.Params = copyFields(c.Params) + c.Results = copyFields(c.Results) + return &c +} +func (n *FuncType) doChildren(do func(Node) error) error { + var err error + err = maybeDoField(n.Recv, err, do) + err = maybeDoFields(n.Params, err, do) + err = maybeDoFields(n.Results, err, do) + return err +} +func (n *FuncType) editChildren(edit func(Node) Node) { + editField(n.Recv, edit) + editFields(n.Params, edit) + editFields(n.Results, edit) +} + +func (n *GoStmt) String() string { return fmt.Sprint(n) } +func (n *GoStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *GoStmt) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} +func (n *GoStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.Call, err, do) + return err +} +func (n *GoStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.Call = maybeEdit(n.Call, edit) +} + +func (n *IfStmt) String() string { return fmt.Sprint(n) } +func (n *IfStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *IfStmt) copy() Node { + c := *n + c.init = c.init.Copy() + c.body = c.body.Copy() + c.Else = c.Else.Copy() + return &c +} +func (n *IfStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.Cond, err, do) + err = maybeDoList(n.body, err, do) + err = maybeDoList(n.Else, err, do) + return err +} +func (n *IfStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.Cond = maybeEdit(n.Cond, edit) + editList(n.body, edit) + editList(n.Else, edit) +} + +func (n *IndexExpr) String() string { return fmt.Sprint(n) } +func (n *IndexExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *IndexExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} +func (n *IndexExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.X, err, do) + err = maybeDo(n.Index, err, do) + return err +} +func (n *IndexExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.X = maybeEdit(n.X, edit) + n.Index = maybeEdit(n.Index, edit) +} + +func (n *InlineMarkStmt) String() string { return fmt.Sprint(n) } +func (n *InlineMarkStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *InlineMarkStmt) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} +func (n *InlineMarkStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + return err +} +func (n *InlineMarkStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) +} + +func (n *InlinedCallExpr) String() string { return fmt.Sprint(n) } +func (n *InlinedCallExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *InlinedCallExpr) copy() Node { + c := *n + c.init = c.init.Copy() + c.body = c.body.Copy() + c.ReturnVars = c.ReturnVars.Copy() + return &c +} +func (n *InlinedCallExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDoList(n.body, err, do) + err = maybeDoList(n.ReturnVars, err, do) + return err +} +func (n *InlinedCallExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + editList(n.body, edit) + editList(n.ReturnVars, edit) +} + +func (n *InterfaceType) String() string { return fmt.Sprint(n) } +func (n *InterfaceType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *InterfaceType) copy() Node { + c := *n + c.Methods = copyFields(c.Methods) + return &c +} +func (n *InterfaceType) doChildren(do func(Node) error) error { + var err error + err = maybeDoFields(n.Methods, err, do) + return err +} +func (n *InterfaceType) editChildren(edit func(Node) Node) { + editFields(n.Methods, edit) +} + +func (n *KeyExpr) String() string { return fmt.Sprint(n) } +func (n *KeyExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *KeyExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} +func (n *KeyExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.Key, err, do) + err = maybeDo(n.Value, err, do) + return err +} +func (n *KeyExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.Key = maybeEdit(n.Key, edit) + n.Value = maybeEdit(n.Value, edit) +} + +func (n *LabelStmt) String() string { return fmt.Sprint(n) } +func (n *LabelStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *LabelStmt) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} +func (n *LabelStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + return err +} +func (n *LabelStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) +} + +func (n *MakeExpr) String() string { return fmt.Sprint(n) } +func (n *MakeExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *MakeExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} +func (n *MakeExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.Len, err, do) + err = maybeDo(n.Cap, err, do) + return err +} +func (n *MakeExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.Len = maybeEdit(n.Len, edit) + n.Cap = maybeEdit(n.Cap, edit) +} + +func (n *MapType) String() string { return fmt.Sprint(n) } +func (n *MapType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *MapType) copy() Node { + c := *n + return &c +} +func (n *MapType) doChildren(do func(Node) error) error { + var err error + err = maybeDo(n.Key, err, do) + err = maybeDo(n.Elem, err, do) + return err +} +func (n *MapType) editChildren(edit func(Node) Node) { + n.Key = maybeEdit(n.Key, edit) + n.Elem = maybeEdit(n.Elem, edit) +} + +func (n *MethodExpr) String() string { return fmt.Sprint(n) } +func (n *MethodExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *MethodExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} +func (n *MethodExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.X, err, do) + err = maybeDo(n.M, err, do) + return err +} +func (n *MethodExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.X = maybeEdit(n.X, edit) + n.M = maybeEdit(n.M, edit) +} + +func (n *Name) String() string { return fmt.Sprint(n) } +func (n *Name) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *Name) copy() Node { + c := *n + return &c +} +func (n *Name) doChildren(do func(Node) error) error { + var err error + return err +} +func (n *Name) editChildren(edit func(Node) Node) { +} + +func (n *NilExpr) String() string { return fmt.Sprint(n) } +func (n *NilExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *NilExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} +func (n *NilExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + return err +} +func (n *NilExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) +} + +func (n *ParenExpr) String() string { return fmt.Sprint(n) } +func (n *ParenExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ParenExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} +func (n *ParenExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.X, err, do) + return err +} +func (n *ParenExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.X = maybeEdit(n.X, edit) +} + +func (n *PkgName) String() string { return fmt.Sprint(n) } +func (n *PkgName) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *PkgName) copy() Node { + c := *n + return &c +} +func (n *PkgName) doChildren(do func(Node) error) error { + var err error + return err +} +func (n *PkgName) editChildren(edit func(Node) Node) { +} + +func (n *RangeStmt) String() string { return fmt.Sprint(n) } +func (n *RangeStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *RangeStmt) copy() Node { + c := *n + c.init = c.init.Copy() + c.Vars = c.Vars.Copy() + c.body = c.body.Copy() + return &c +} +func (n *RangeStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDoList(n.Vars, err, do) + err = maybeDo(n.X, err, do) + err = maybeDoList(n.body, err, do) + return err +} +func (n *RangeStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) + editList(n.Vars, edit) + n.X = maybeEdit(n.X, edit) + editList(n.body, edit) +} + +func (n *ResultExpr) String() string { return fmt.Sprint(n) } +func (n *ResultExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ResultExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} +func (n *ResultExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + return err +} +func (n *ResultExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) +} + +func (n *ReturnStmt) String() string { return fmt.Sprint(n) } +func (n *ReturnStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ReturnStmt) copy() Node { + c := *n + c.init = c.init.Copy() + c.Results = c.Results.Copy() + return &c +} +func (n *ReturnStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDoList(n.Results, err, do) + return err +} +func (n *ReturnStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) + editList(n.Results, edit) +} + +func (n *SelectStmt) String() string { return fmt.Sprint(n) } +func (n *SelectStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *SelectStmt) copy() Node { + c := *n + c.init = c.init.Copy() + c.Cases = c.Cases.Copy() + c.Compiled = c.Compiled.Copy() + return &c +} +func (n *SelectStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDoList(n.Cases, err, do) + err = maybeDoList(n.Compiled, err, do) + return err +} +func (n *SelectStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) + editList(n.Cases, edit) + editList(n.Compiled, edit) +} + +func (n *SelectorExpr) String() string { return fmt.Sprint(n) } +func (n *SelectorExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *SelectorExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} +func (n *SelectorExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.X, err, do) + return err +} +func (n *SelectorExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.X = maybeEdit(n.X, edit) +} + +func (n *SendStmt) String() string { return fmt.Sprint(n) } +func (n *SendStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *SendStmt) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} +func (n *SendStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.Chan, err, do) + err = maybeDo(n.Value, err, do) + return err +} +func (n *SendStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.Chan = maybeEdit(n.Chan, edit) + n.Value = maybeEdit(n.Value, edit) +} + +func (n *SliceExpr) String() string { return fmt.Sprint(n) } +func (n *SliceExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *SliceExpr) copy() Node { + c := *n + c.init = c.init.Copy() + c.list = c.list.Copy() + return &c +} +func (n *SliceExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.X, err, do) + err = maybeDoList(n.list, err, do) + return err +} +func (n *SliceExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.X = maybeEdit(n.X, edit) + editList(n.list, edit) +} + +func (n *SliceHeaderExpr) String() string { return fmt.Sprint(n) } +func (n *SliceHeaderExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *SliceHeaderExpr) copy() Node { + c := *n + c.init = c.init.Copy() + c.lenCap = c.lenCap.Copy() + return &c +} +func (n *SliceHeaderExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.Ptr, err, do) + err = maybeDoList(n.lenCap, err, do) + return err +} +func (n *SliceHeaderExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.Ptr = maybeEdit(n.Ptr, edit) + editList(n.lenCap, edit) +} + +func (n *SliceType) String() string { return fmt.Sprint(n) } +func (n *SliceType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *SliceType) copy() Node { + c := *n + return &c +} +func (n *SliceType) doChildren(do func(Node) error) error { + var err error + err = maybeDo(n.Elem, err, do) + return err +} +func (n *SliceType) editChildren(edit func(Node) Node) { + n.Elem = maybeEdit(n.Elem, edit) +} + +func (n *StarExpr) String() string { return fmt.Sprint(n) } +func (n *StarExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *StarExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} +func (n *StarExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.X, err, do) + return err +} +func (n *StarExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.X = maybeEdit(n.X, edit) +} + +func (n *StructType) String() string { return fmt.Sprint(n) } +func (n *StructType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *StructType) copy() Node { + c := *n + c.Fields = copyFields(c.Fields) + return &c +} +func (n *StructType) doChildren(do func(Node) error) error { + var err error + err = maybeDoFields(n.Fields, err, do) + return err +} +func (n *StructType) editChildren(edit func(Node) Node) { + editFields(n.Fields, edit) +} + +func (n *SwitchStmt) String() string { return fmt.Sprint(n) } +func (n *SwitchStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *SwitchStmt) copy() Node { + c := *n + c.init = c.init.Copy() + c.Cases = c.Cases.Copy() + c.Compiled = c.Compiled.Copy() + return &c +} +func (n *SwitchStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.Tag, err, do) + err = maybeDoList(n.Cases, err, do) + err = maybeDoList(n.Compiled, err, do) + return err +} +func (n *SwitchStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.Tag = maybeEdit(n.Tag, edit) + editList(n.Cases, edit) + editList(n.Compiled, edit) +} + +func (n *TypeAssertExpr) String() string { return fmt.Sprint(n) } +func (n *TypeAssertExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *TypeAssertExpr) copy() Node { + c := *n + c.init = c.init.Copy() + c.Itab = c.Itab.Copy() + return &c +} +func (n *TypeAssertExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.X, err, do) + err = maybeDo(n.Ntype, err, do) + err = maybeDoList(n.Itab, err, do) + return err +} +func (n *TypeAssertExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.X = maybeEdit(n.X, edit) + n.Ntype = maybeEdit(n.Ntype, edit) + editList(n.Itab, edit) +} + +func (n *TypeSwitchGuard) String() string { return fmt.Sprint(n) } +func (n *TypeSwitchGuard) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *TypeSwitchGuard) copy() Node { + c := *n + return &c +} +func (n *TypeSwitchGuard) doChildren(do func(Node) error) error { + var err error + if n.name != nil { + err = maybeDo(n.name, err, do) + } + err = maybeDo(n.X, err, do) + return err +} +func (n *TypeSwitchGuard) editChildren(edit func(Node) Node) { + if n.name != nil { + n.name = edit(n.name).(*Name) + } + n.X = maybeEdit(n.X, edit) +} + +func (n *UnaryExpr) String() string { return fmt.Sprint(n) } +func (n *UnaryExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *UnaryExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} +func (n *UnaryExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.X, err, do) + return err +} +func (n *UnaryExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.X = maybeEdit(n.X, edit) +} + +func (n *typeNode) String() string { return fmt.Sprint(n) } +func (n *typeNode) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *typeNode) copy() Node { + c := *n + return &c +} +func (n *typeNode) doChildren(do func(Node) error) error { + var err error + return err +} +func (n *typeNode) editChildren(edit func(Node) Node) { +} diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index c859fae55b20a..19f90ce1fa7b6 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -7,7 +7,6 @@ package ir import ( "cmd/compile/internal/types" "cmd/internal/src" - "fmt" ) // A Decl is a declaration of a const, type, or var. (A declared func is a Func.) @@ -28,18 +27,6 @@ func NewDecl(pos src.XPos, op Op, x Node) *Decl { return n } -func (n *Decl) String() string { return fmt.Sprint(n) } -func (n *Decl) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *Decl) copy() Node { c := *n; return &c } -func (n *Decl) doChildren(do func(Node) error) error { - var err error - err = maybeDo(n.X, err, do) - return err -} -func (n *Decl) editChildren(edit func(Node) Node) { - n.X = maybeEdit(n.X, edit) -} - func (n *Decl) Left() Node { return n.X } func (n *Decl) SetLeft(x Node) { n.X = x } @@ -76,28 +63,6 @@ func NewAssignListStmt(pos src.XPos, lhs, rhs []Node) *AssignListStmt { return n } -func (n *AssignListStmt) String() string { return fmt.Sprint(n) } -func (n *AssignListStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *AssignListStmt) copy() Node { - c := *n - c.init = c.init.Copy() - c.Lhs = c.Lhs.Copy() - c.Rhs = c.Rhs.Copy() - return &c -} -func (n *AssignListStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDoList(n.Lhs, err, do) - err = maybeDoList(n.Rhs, err, do) - return err -} -func (n *AssignListStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) - editList(n.Lhs, edit) - editList(n.Rhs, edit) -} - func (n *AssignListStmt) List() Nodes { return n.Lhs } func (n *AssignListStmt) PtrList() *Nodes { return &n.Lhs } func (n *AssignListStmt) SetList(x Nodes) { n.Lhs = x } @@ -136,26 +101,6 @@ func NewAssignStmt(pos src.XPos, x, y Node) *AssignStmt { return n } -func (n *AssignStmt) String() string { return fmt.Sprint(n) } -func (n *AssignStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *AssignStmt) copy() Node { - c := *n - c.init = c.init.Copy() - return &c -} -func (n *AssignStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) - err = maybeDo(n.Y, err, do) - return err -} -func (n *AssignStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.X = maybeEdit(n.X, edit) - n.Y = maybeEdit(n.Y, edit) -} - func (n *AssignStmt) Left() Node { return n.X } func (n *AssignStmt) SetLeft(x Node) { n.X = x } func (n *AssignStmt) Right() Node { return n.Y } @@ -191,26 +136,6 @@ func NewAssignOpStmt(pos src.XPos, op Op, x, y Node) *AssignOpStmt { return n } -func (n *AssignOpStmt) String() string { return fmt.Sprint(n) } -func (n *AssignOpStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *AssignOpStmt) copy() Node { - c := *n - c.init = c.init.Copy() - return &c -} -func (n *AssignOpStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) - err = maybeDo(n.Y, err, do) - return err -} -func (n *AssignOpStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.X = maybeEdit(n.X, edit) - n.Y = maybeEdit(n.Y, edit) -} - func (n *AssignOpStmt) Left() Node { return n.X } func (n *AssignOpStmt) SetLeft(x Node) { n.X = x } func (n *AssignOpStmt) Right() Node { return n.Y } @@ -236,25 +161,6 @@ func NewBlockStmt(pos src.XPos, list []Node) *BlockStmt { return n } -func (n *BlockStmt) String() string { return fmt.Sprint(n) } -func (n *BlockStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *BlockStmt) copy() Node { - c := *n - c.init = c.init.Copy() - c.list = c.list.Copy() - return &c -} -func (n *BlockStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDoList(n.list, err, do) - return err -} -func (n *BlockStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) - editList(n.list, edit) -} - func (n *BlockStmt) List() Nodes { return n.list } func (n *BlockStmt) PtrList() *Nodes { return &n.list } func (n *BlockStmt) SetList(x Nodes) { n.list = x } @@ -281,22 +187,6 @@ func NewBranchStmt(pos src.XPos, op Op, label *types.Sym) *BranchStmt { return n } -func (n *BranchStmt) String() string { return fmt.Sprint(n) } -func (n *BranchStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *BranchStmt) copy() Node { - c := *n - c.init = c.init.Copy() - return &c -} -func (n *BranchStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - return err -} -func (n *BranchStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) -} - func (n *BranchStmt) Sym() *types.Sym { return n.Label } func (n *BranchStmt) SetSym(sym *types.Sym) { n.Label = sym } @@ -318,33 +208,6 @@ func NewCaseStmt(pos src.XPos, list, body []Node) *CaseStmt { return n } -func (n *CaseStmt) String() string { return fmt.Sprint(n) } -func (n *CaseStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *CaseStmt) copy() Node { - c := *n - c.init = c.init.Copy() - c.Vars = c.Vars.Copy() - c.list = c.list.Copy() - c.body = c.body.Copy() - return &c -} -func (n *CaseStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDoList(n.Vars, err, do) - err = maybeDoList(n.list, err, do) - err = maybeDo(n.Comm, err, do) - err = maybeDoList(n.body, err, do) - return err -} -func (n *CaseStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) - editList(n.Vars, edit) - editList(n.list, edit) - n.Comm = maybeEdit(n.Comm, edit) - editList(n.body, edit) -} - func (n *CaseStmt) List() Nodes { return n.list } func (n *CaseStmt) PtrList() *Nodes { return &n.list } func (n *CaseStmt) SetList(x Nodes) { n.list = x } @@ -370,24 +233,6 @@ func NewDeferStmt(pos src.XPos, call Node) *DeferStmt { return n } -func (n *DeferStmt) String() string { return fmt.Sprint(n) } -func (n *DeferStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *DeferStmt) copy() Node { - c := *n - c.init = c.init.Copy() - return &c -} -func (n *DeferStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.Call, err, do) - return err -} -func (n *DeferStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.Call = maybeEdit(n.Call, edit) -} - func (n *DeferStmt) Left() Node { return n.Call } func (n *DeferStmt) SetLeft(x Node) { n.Call = x } @@ -412,32 +257,6 @@ func NewForStmt(pos src.XPos, init []Node, cond, post Node, body []Node) *ForStm return n } -func (n *ForStmt) String() string { return fmt.Sprint(n) } -func (n *ForStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *ForStmt) copy() Node { - c := *n - c.init = c.init.Copy() - c.Late = c.Late.Copy() - c.body = c.body.Copy() - return &c -} -func (n *ForStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.Cond, err, do) - err = maybeDoList(n.Late, err, do) - err = maybeDo(n.Post, err, do) - err = maybeDoList(n.body, err, do) - return err -} -func (n *ForStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.Cond = maybeEdit(n.Cond, edit) - editList(n.Late, edit) - n.Post = maybeEdit(n.Post, edit) - editList(n.body, edit) -} - func (n *ForStmt) Sym() *types.Sym { return n.Label } func (n *ForStmt) SetSym(x *types.Sym) { n.Label = x } func (n *ForStmt) Left() Node { return n.Cond } @@ -473,24 +292,6 @@ func NewGoStmt(pos src.XPos, call Node) *GoStmt { return n } -func (n *GoStmt) String() string { return fmt.Sprint(n) } -func (n *GoStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *GoStmt) copy() Node { - c := *n - c.init = c.init.Copy() - return &c -} -func (n *GoStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.Call, err, do) - return err -} -func (n *GoStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.Call = maybeEdit(n.Call, edit) -} - func (n *GoStmt) Left() Node { return n.Call } func (n *GoStmt) SetLeft(x Node) { n.Call = x } @@ -512,30 +313,6 @@ func NewIfStmt(pos src.XPos, cond Node, body, els []Node) *IfStmt { return n } -func (n *IfStmt) String() string { return fmt.Sprint(n) } -func (n *IfStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *IfStmt) copy() Node { - c := *n - c.init = c.init.Copy() - c.body = c.body.Copy() - c.Else = c.Else.Copy() - return &c -} -func (n *IfStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.Cond, err, do) - err = maybeDoList(n.body, err, do) - err = maybeDoList(n.Else, err, do) - return err -} -func (n *IfStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.Cond = maybeEdit(n.Cond, edit) - editList(n.body, edit) - editList(n.Else, edit) -} - func (n *IfStmt) Left() Node { return n.Cond } func (n *IfStmt) SetLeft(x Node) { n.Cond = x } func (n *IfStmt) Body() Nodes { return n.body } @@ -560,22 +337,6 @@ func NewInlineMarkStmt(pos src.XPos, index int64) *InlineMarkStmt { return n } -func (n *InlineMarkStmt) String() string { return fmt.Sprint(n) } -func (n *InlineMarkStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *InlineMarkStmt) copy() Node { - c := *n - c.init = c.init.Copy() - return &c -} -func (n *InlineMarkStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - return err -} -func (n *InlineMarkStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) -} - func (n *InlineMarkStmt) Offset() int64 { return n.Index } func (n *InlineMarkStmt) SetOffset(x int64) { n.Index = x } @@ -592,22 +353,6 @@ func NewLabelStmt(pos src.XPos, label *types.Sym) *LabelStmt { return n } -func (n *LabelStmt) String() string { return fmt.Sprint(n) } -func (n *LabelStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *LabelStmt) copy() Node { - c := *n - c.init = c.init.Copy() - return &c -} -func (n *LabelStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - return err -} -func (n *LabelStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) -} - func (n *LabelStmt) Sym() *types.Sym { return n.Label } func (n *LabelStmt) SetSym(x *types.Sym) { n.Label = x } @@ -633,30 +378,6 @@ func NewRangeStmt(pos src.XPos, vars []Node, x Node, body []Node) *RangeStmt { return n } -func (n *RangeStmt) String() string { return fmt.Sprint(n) } -func (n *RangeStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *RangeStmt) copy() Node { - c := *n - c.init = c.init.Copy() - c.Vars = c.Vars.Copy() - c.body = c.body.Copy() - return &c -} -func (n *RangeStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDoList(n.Vars, err, do) - err = maybeDo(n.X, err, do) - err = maybeDoList(n.body, err, do) - return err -} -func (n *RangeStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) - editList(n.Vars, edit) - n.X = maybeEdit(n.X, edit) - editList(n.body, edit) -} - func (n *RangeStmt) Sym() *types.Sym { return n.Label } func (n *RangeStmt) SetSym(x *types.Sym) { n.Label = x } func (n *RangeStmt) Right() Node { return n.X } @@ -690,25 +411,6 @@ func NewReturnStmt(pos src.XPos, results []Node) *ReturnStmt { return n } -func (n *ReturnStmt) String() string { return fmt.Sprint(n) } -func (n *ReturnStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *ReturnStmt) copy() Node { - c := *n - c.init = c.init.Copy() - c.Results = c.Results.Copy() - return &c -} -func (n *ReturnStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDoList(n.Results, err, do) - return err -} -func (n *ReturnStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) - editList(n.Results, edit) -} - func (n *ReturnStmt) Orig() Node { return n.orig } func (n *ReturnStmt) SetOrig(x Node) { n.orig = x } func (n *ReturnStmt) List() Nodes { return n.Results } @@ -735,28 +437,6 @@ func NewSelectStmt(pos src.XPos, cases []Node) *SelectStmt { return n } -func (n *SelectStmt) String() string { return fmt.Sprint(n) } -func (n *SelectStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *SelectStmt) copy() Node { - c := *n - c.init = c.init.Copy() - c.Cases = c.Cases.Copy() - c.Compiled = c.Compiled.Copy() - return &c -} -func (n *SelectStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDoList(n.Cases, err, do) - err = maybeDoList(n.Compiled, err, do) - return err -} -func (n *SelectStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) - editList(n.Cases, edit) - editList(n.Compiled, edit) -} - func (n *SelectStmt) List() Nodes { return n.Cases } func (n *SelectStmt) PtrList() *Nodes { return &n.Cases } func (n *SelectStmt) SetList(x Nodes) { n.Cases = x } @@ -782,26 +462,6 @@ func NewSendStmt(pos src.XPos, ch, value Node) *SendStmt { return n } -func (n *SendStmt) String() string { return fmt.Sprint(n) } -func (n *SendStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *SendStmt) copy() Node { - c := *n - c.init = c.init.Copy() - return &c -} -func (n *SendStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.Chan, err, do) - err = maybeDo(n.Value, err, do) - return err -} -func (n *SendStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.Chan = maybeEdit(n.Chan, edit) - n.Value = maybeEdit(n.Value, edit) -} - func (n *SendStmt) Left() Node { return n.Chan } func (n *SendStmt) SetLeft(x Node) { n.Chan = x } func (n *SendStmt) Right() Node { return n.Value } @@ -827,30 +487,6 @@ func NewSwitchStmt(pos src.XPos, tag Node, cases []Node) *SwitchStmt { return n } -func (n *SwitchStmt) String() string { return fmt.Sprint(n) } -func (n *SwitchStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *SwitchStmt) copy() Node { - c := *n - c.init = c.init.Copy() - c.Cases = c.Cases.Copy() - c.Compiled = c.Compiled.Copy() - return &c -} -func (n *SwitchStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.Tag, err, do) - err = maybeDoList(n.Cases, err, do) - err = maybeDoList(n.Compiled, err, do) - return err -} -func (n *SwitchStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.Tag = maybeEdit(n.Tag, edit) - editList(n.Cases, edit) - editList(n.Compiled, edit) -} - func (n *SwitchStmt) Left() Node { return n.Tag } func (n *SwitchStmt) SetLeft(x Node) { n.Tag = x } func (n *SwitchStmt) List() Nodes { return n.Cases } @@ -881,24 +517,6 @@ func NewTypeSwitchGuard(pos src.XPos, name, x Node) *TypeSwitchGuard { return n } -func (n *TypeSwitchGuard) String() string { return fmt.Sprint(n) } -func (n *TypeSwitchGuard) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *TypeSwitchGuard) copy() Node { c := *n; return &c } -func (n *TypeSwitchGuard) doChildren(do func(Node) error) error { - var err error - if n.name != nil { - err = maybeDo(n.name, err, do) - } - err = maybeDo(n.X, err, do) - return err -} -func (n *TypeSwitchGuard) editChildren(edit func(Node) Node) { - if n.name != nil { - n.name = edit(n.name).(*Name) - } - n.X = maybeEdit(n.X, edit) -} - func (n *TypeSwitchGuard) Left() Node { if n.name == nil { return nil diff --git a/src/cmd/compile/internal/ir/type.go b/src/cmd/compile/internal/ir/type.go index 9f82c9faa2680..5e6d76229d32e 100644 --- a/src/cmd/compile/internal/ir/type.go +++ b/src/cmd/compile/internal/ir/type.go @@ -72,17 +72,6 @@ func NewChanType(pos src.XPos, elem Node, dir types.ChanDir) *ChanType { return n } -func (n *ChanType) String() string { return fmt.Sprint(n) } -func (n *ChanType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *ChanType) copy() Node { c := *n; return &c } -func (n *ChanType) doChildren(do func(Node) error) error { - var err error - err = maybeDo(n.Elem, err, do) - return err -} -func (n *ChanType) editChildren(edit func(Node) Node) { - n.Elem = maybeEdit(n.Elem, edit) -} func (n *ChanType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) n.Elem = nil @@ -102,19 +91,6 @@ func NewMapType(pos src.XPos, key, elem Node) *MapType { return n } -func (n *MapType) String() string { return fmt.Sprint(n) } -func (n *MapType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *MapType) copy() Node { c := *n; return &c } -func (n *MapType) doChildren(do func(Node) error) error { - var err error - err = maybeDo(n.Key, err, do) - err = maybeDo(n.Elem, err, do) - return err -} -func (n *MapType) editChildren(edit func(Node) Node) { - n.Key = maybeEdit(n.Key, edit) - n.Elem = maybeEdit(n.Elem, edit) -} func (n *MapType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) n.Key = nil @@ -134,22 +110,6 @@ func NewStructType(pos src.XPos, fields []*Field) *StructType { return n } -func (n *StructType) String() string { return fmt.Sprint(n) } -func (n *StructType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *StructType) copy() Node { - c := *n - c.Fields = copyFields(c.Fields) - return &c -} -func (n *StructType) doChildren(do func(Node) error) error { - var err error - err = maybeDoFields(n.Fields, err, do) - return err -} -func (n *StructType) editChildren(edit func(Node) Node) { - editFields(n.Fields, edit) -} - func (n *StructType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) n.Fields = nil @@ -176,22 +136,6 @@ func NewInterfaceType(pos src.XPos, methods []*Field) *InterfaceType { return n } -func (n *InterfaceType) String() string { return fmt.Sprint(n) } -func (n *InterfaceType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *InterfaceType) copy() Node { - c := *n - c.Methods = copyFields(c.Methods) - return &c -} -func (n *InterfaceType) doChildren(do func(Node) error) error { - var err error - err = maybeDoFields(n.Methods, err, do) - return err -} -func (n *InterfaceType) editChildren(edit func(Node) Node) { - editFields(n.Methods, edit) -} - func (n *InterfaceType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) n.Methods = nil @@ -212,30 +156,6 @@ func NewFuncType(pos src.XPos, rcvr *Field, args, results []*Field) *FuncType { return n } -func (n *FuncType) String() string { return fmt.Sprint(n) } -func (n *FuncType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *FuncType) copy() Node { - c := *n - if c.Recv != nil { - c.Recv = c.Recv.copy() - } - c.Params = copyFields(c.Params) - c.Results = copyFields(c.Results) - return &c -} -func (n *FuncType) doChildren(do func(Node) error) error { - var err error - err = maybeDoField(n.Recv, err, do) - err = maybeDoFields(n.Params, err, do) - err = maybeDoFields(n.Results, err, do) - return err -} -func (n *FuncType) editChildren(edit func(Node) Node) { - editField(n.Recv, edit) - editFields(n.Params, edit) - editFields(n.Results, edit) -} - func (n *FuncType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) n.Recv = nil @@ -365,17 +285,6 @@ func NewSliceType(pos src.XPos, elem Node) *SliceType { return n } -func (n *SliceType) String() string { return fmt.Sprint(n) } -func (n *SliceType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *SliceType) copy() Node { c := *n; return &c } -func (n *SliceType) doChildren(do func(Node) error) error { - var err error - err = maybeDo(n.Elem, err, do) - return err -} -func (n *SliceType) editChildren(edit func(Node) Node) { - n.Elem = maybeEdit(n.Elem, edit) -} func (n *SliceType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) n.Elem = nil @@ -396,20 +305,6 @@ func NewArrayType(pos src.XPos, size Node, elem Node) *ArrayType { return n } -func (n *ArrayType) String() string { return fmt.Sprint(n) } -func (n *ArrayType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *ArrayType) copy() Node { c := *n; return &c } -func (n *ArrayType) doChildren(do func(Node) error) error { - var err error - err = maybeDo(n.Len, err, do) - err = maybeDo(n.Elem, err, do) - return err -} -func (n *ArrayType) editChildren(edit func(Node) Node) { - n.Len = maybeEdit(n.Len, edit) - n.Elem = maybeEdit(n.Elem, edit) -} - func (n *ArrayType) SetOTYPE(t *types.Type) { n.setOTYPE(t, n) n.Len = nil @@ -429,14 +324,6 @@ func newTypeNode(pos src.XPos, typ *types.Type) *typeNode { return n } -func (n *typeNode) String() string { return fmt.Sprint(n) } -func (n *typeNode) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *typeNode) copy() Node { c := *n; return &c } -func (n *typeNode) doChildren(do func(Node) error) error { - return nil -} -func (n *typeNode) editChildren(edit func(Node) Node) {} - func (n *typeNode) Type() *types.Type { return n.typ } func (n *typeNode) Sym() *types.Sym { return n.typ.Sym() } func (n *typeNode) CanBeNtype() {} From dcc640e8391d6d022b595a3b53124bbcbd985c76 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Fri, 4 Dec 2020 12:40:39 -0500 Subject: [PATCH 125/474] [dev.regabi] test: add exhaustive test of evaluated but not used Change-Id: I49db03c88b7595f1ea593df568244ad6aad3b024 Reviewed-on: https://go-review.googlesource.com/c/go/+/275443 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- test/used.go | 142 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 142 insertions(+) create mode 100644 test/used.go diff --git a/test/used.go b/test/used.go new file mode 100644 index 0000000000000..adf2bfcb95b4a --- /dev/null +++ b/test/used.go @@ -0,0 +1,142 @@ +// errorcheck + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +import "unsafe" + +const C = 1 + +var x1, x int +var b bool +var s string +var c chan int +var cp complex128 +var slice []int +var array [2]int +var bytes []byte +var runes []rune +var r rune + +func f0() {} +func f1() int { return 1 } +func f2() (int, int) { return 1, 1 } + +type T struct{ X int } + +func (T) M1() int { return 1 } +func (T) M0() {} +func (T) M() {} + +var t T +var tp *T + +type I interface{ M() } + +var i I + +var m map[int]int + +func _() { + // Note: if the next line changes to x, the error silences the x+x etc below! + x1 // ERROR "x1 evaluated but not used" + + nil // ERROR "nil evaluated but not used" + C // ERROR "C evaluated but not used" + 1 // ERROR "1 evaluated but not used" + x + x // ERROR "x \+ x evaluated but not used" + x - x // ERROR "x - x evaluated but not used" + x | x // ERROR "x \| x evaluated but not used" + "a" + s // ERROR ".a. \+ s evaluated but not used" + &x // ERROR "&x evaluated but not used" + b && b // ERROR "b && b evaluated but not used" + append(slice, 1) // ERROR "append\(slice, 1\) evaluated but not used" + string(bytes) // ERROR "string\(bytes\) evaluated but not used" + string(runes) // ERROR "string\(runes\) evaluated but not used" + f0() // ok + f1() // ok + f2() // ok + _ = f0() // ERROR "f0\(\) used as value" + _ = f1() // ok + _, _ = f2() // ok + _ = f2() // ERROR "assignment mismatch: 1 variable but f2 returns 2 values" + T.M0 // ERROR "T.M0 evaluated but not used" + t.M0 // ERROR "t.M0 evaluated but not used" + cap // ERROR "use of builtin cap not in function call" + cap(slice) // ERROR "cap\(slice\) evaluated but not used" + close(c) // ok + _ = close(c) // ERROR "close\(c\) used as value" + func() {} // ERROR "func literal evaluated but not used" + X{} // ERROR "undefined: X" + map[string]int{} // ERROR "map\[string\]int{} evaluated but not used" + struct{}{} // ERROR "struct ?{}{} evaluated but not used" + [1]int{} // ERROR "\[1\]int{} evaluated but not used" + []int{} // ERROR "\[\]int{} evaluated but not used" + &struct{}{} // ERROR "&struct ?{}{} evaluated but not used" + float32(x) // ERROR "float32\(x\) evaluated but not used" + I(t) // ERROR "I\(t\) evaluated but not used" + int(x) // ERROR "int\(x\) evaluated but not used" + copy(slice, slice) // ok + _ = copy(slice, slice) // ok + delete(m, 1) // ok + _ = delete(m, 1) // ERROR "delete\(m, 1\) used as value" + t.X // ERROR "t.X evaluated but not used" + tp.X // ERROR "tp.X evaluated but not used" + t.M // ERROR "t.M evaluated but not used" + I.M // ERROR "I.M evaluated but not used" + i.(T) // ERROR "i.\(T\) evaluated but not used" + x == x // ERROR "x == x evaluated but not used" + x != x // ERROR "x != x evaluated but not used" + x != x // ERROR "x != x evaluated but not used" + x < x // ERROR "x < x evaluated but not used" + x >= x // ERROR "x >= x evaluated but not used" + x > x // ERROR "x > x evaluated but not used" + *tp // ERROR "\*tp evaluated but not used" + slice[0] // ERROR "slice\[0\] evaluated but not used" + m[1] // ERROR "m\[1\] evaluated but not used" + len(slice) // ERROR "len\(slice\) evaluated but not used" + make(chan int) // ERROR "make\(chan int\) evaluated but not used" + make(map[int]int) // ERROR "make\(map\[int\]int\) evaluated but not used" + make([]int, 1) // ERROR "make\(\[\]int, 1\) evaluated but not used" + x * x // ERROR "x \* x evaluated but not used" + x / x // ERROR "x / x evaluated but not used" + x % x // ERROR "x % x evaluated but not used" + x << x // ERROR "x << x evaluated but not used" + x >> x // ERROR "x >> x evaluated but not used" + x & x // ERROR "x & x evaluated but not used" + x &^ x // ERROR "x &\^ x evaluated but not used" + new(int) // ERROR "new\(int\) evaluated but not used" + !b // ERROR "!b evaluated but not used" + ^x // ERROR "\^x evaluated but not used" + +x // ERROR "\+x evaluated but not used" + -x // ERROR "-x evaluated but not used" + b || b // ERROR "b \|\| b evaluated but not used" + panic(1) // ok + _ = panic(1) // ERROR "panic\(1\) used as value" + print(1) // ok + _ = print(1) // ERROR "print\(1\) used as value" + println(1) // ok + _ = println(1) // ERROR "println\(1\) used as value" + (x) // ERROR "x evaluated but not used" + c <- 1 // ok + slice[1:1] // ERROR "slice\[1:1\] evaluated but not used" + array[1:1] // ERROR "array\[1:1\] evaluated but not used" + s[1:1] // ERROR "s\[1:1\] evaluated but not used" + slice[1:1:1] // ERROR "slice\[1:1:1\] evaluated but not used" + array[1:1:1] // ERROR "array\[1:1:1\] evaluated but not used" + recover() // ok + <-c // ok + string(r) // ERROR "string\(r\) evaluated but not used" + iota // ERROR "undefined: iota" + real(cp) // ERROR "real\(cp\) evaluated but not used" + imag(cp) // ERROR "imag\(cp\) evaluated but not used" + complex(1, 2) // ERROR "complex\(1, 2\) evaluated but not used" + unsafe.Alignof(t.X) // ERROR "unsafe.Alignof\(t.X\) evaluated but not used" + unsafe.Offsetof(t.X) // ERROR "unsafe.Offsetof\(t.X\) evaluated but not used" + unsafe.Sizeof(t) // ERROR "unsafe.Sizeof\(t\) evaluated but not used" + _ = new(x) // ERROR "x is not a type" + _ = int // ERROR "type int is not an expression" +} From ef5964dd6b092f7e0d9bd4332a5d258eb80ecef8 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Fri, 4 Dec 2020 11:37:54 -0500 Subject: [PATCH 126/474] [dev.regabi] cmd/compile: arrange for typecheck1 to end in switch Ending typecheck1 in the switch makes it safe for each case to do an appropriate type assertion. The main change is dropping the computation of "ok" and using the syntax nodes themselves to decide what's OK. Passes buildall w/ toolstash -cmp. Change-Id: I2a1873a51e3f1194d74bb87a6653cb9857a02a1b Reviewed-on: https://go-review.googlesource.com/c/go/+/275444 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/typecheck.go | 336 +++++++++++------------ src/cmd/compile/internal/ir/expr.go | 12 +- src/cmd/compile/internal/ir/func.go | 2 + src/cmd/compile/internal/ir/name.go | 2 + src/cmd/compile/internal/ir/stmt.go | 11 + test/used.go | 7 +- 6 files changed, 195 insertions(+), 175 deletions(-) diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index c22786f1486cf..dc9e23069e692 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -307,17 +307,91 @@ func typecheck(n ir.Node, top int) (res ir.Node) { return n } - n.SetTypecheck(2) - typecheck_tcstack = append(typecheck_tcstack, n) - n = typecheck1(n, top) + n.SetTypecheck(2) + n = typecheck1(n, top) n.SetTypecheck(1) last := len(typecheck_tcstack) - 1 typecheck_tcstack[last] = nil typecheck_tcstack = typecheck_tcstack[:last] + _, isExpr := n.(ir.Expr) + _, isStmt := n.(ir.Stmt) + isMulti := false + switch n.Op() { + case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH: + if t := n.Left().Type(); t != nil && t.Kind() == types.TFUNC { + nr := t.NumResults() + isMulti = nr > 1 + if nr == 0 { + isExpr = false + } + } + case ir.OAPPEND: + // Must be used (and not BinaryExpr/UnaryExpr). + isStmt = false + case ir.OCLOSE, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN, ir.OVARKILL, ir.OVARLIVE: + // Must not be used. + isExpr = false + isStmt = true + case ir.OCOPY, ir.ORECOVER, ir.ORECV: + // Can be used or not. + isStmt = true + } + + t := n.Type() + if t != nil && !t.IsFuncArgStruct() && n.Op() != ir.OTYPE { + switch t.Kind() { + case types.TFUNC, // might have TANY; wait until it's called + types.TANY, types.TFORW, types.TIDEAL, types.TNIL, types.TBLANK: + break + + default: + checkwidth(t) + } + } + if t != nil { + n = evalConst(n) + t = n.Type() + } + + // TODO(rsc): Lots of the complexity here is because typecheck can + // see OTYPE, ONAME, and OLITERAL nodes multiple times. + // Once we make the IR a proper tree, we should be able to simplify + // this code a bit, especially the final case. + switch { + case top&(ctxStmt|ctxExpr) == ctxExpr && !isExpr && n.Op() != ir.OTYPE && !isMulti: + if !n.Diag() { + base.Errorf("%v used as value", n) + n.SetDiag(true) + } + if t != nil { + n.SetType(nil) + } + + case top&ctxType == 0 && n.Op() == ir.OTYPE && t != nil: + if !n.Type().Broke() { + base.Errorf("type %v is not an expression", n.Type()) + } + n.SetType(nil) + + case top&(ctxStmt|ctxExpr) == ctxStmt && !isStmt && t != nil: + if !n.Diag() { + base.Errorf("%v evaluated but not used", n) + n.SetDiag(true) + } + n.SetType(nil) + + case top&(ctxType|ctxExpr) == ctxType && n.Op() != ir.OTYPE && n.Op() != ir.ONONAME && (t != nil || n.Op() == ir.ONAME): + base.Errorf("%v is not a type", n) + if t != nil { + n.SetType(nil) + } + + } + base.Pos = lno return n } @@ -335,8 +409,7 @@ func indexlit(n ir.Node) ir.Node { return n } -// The result of typecheck1 MUST be assigned back to n, e.g. -// n.Left = typecheck1(n.Left, top) +// typecheck1 should ONLY be called from typecheck. func typecheck1(n ir.Node, top int) (res ir.Node) { if enableTrace && base.Flag.LowerT { defer tracePrint("typecheck1", n)(&res) @@ -345,7 +418,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { switch n.Op() { case ir.OLITERAL, ir.ONAME, ir.ONONAME, ir.OTYPE: if n.Sym() == nil { - break + return n } if n.Op() == ir.ONAME && n.SubOp() != 0 && top&ctxCallee == 0 { @@ -361,34 +434,29 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } } - ok := 0 switch n.Op() { - // until typecheck is complete, do nothing. default: ir.Dump("typecheck", n) - base.Fatalf("typecheck %v", n.Op()) + panic("unreachable") // names case ir.OLITERAL: - ok |= ctxExpr - if n.Type() == nil && n.Val().Kind() == constant.String { base.Fatalf("string literal missing type") } + return n case ir.ONIL, ir.ONONAME: - ok |= ctxExpr + return n case ir.ONAME: if n.Name().Decldepth == 0 { n.Name().Decldepth = decldepth } if n.SubOp() != 0 { - ok |= ctxCallee - break + return n } - if top&ctxAssign == 0 { // not a write to the variable if ir.IsBlank(n) { @@ -396,11 +464,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetType(nil) return n } - n.Name().SetUsed(true) } - - ok |= ctxExpr + return n case ir.OPACK: base.Errorf("use of package %v without selector", n.Sym()) @@ -409,14 +475,12 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { // types (ODEREF is with exprs) case ir.OTYPE: - ok |= ctxType - if n.Type() == nil { return n } + return n case ir.OTSLICE: - ok |= ctxType n := n.(*ir.SliceType) n.Elem = typecheck(n.Elem, ctxType) if n.Elem.Type() == nil { @@ -425,9 +489,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { t := types.NewSlice(n.Elem.Type()) n.SetOTYPE(t) checkwidth(t) + return n case ir.OTARRAY: - ok |= ctxType n := n.(*ir.ArrayType) n.Elem = typecheck(n.Elem, ctxType) if n.Elem.Type() == nil { @@ -469,9 +533,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { t := types.NewArray(n.Elem.Type(), bound) n.SetOTYPE(t) checkwidth(t) + return n case ir.OTMAP: - ok |= ctxType n := n.(*ir.MapType) n.Key = typecheck(n.Key, ctxType) n.Elem = typecheck(n.Elem, ctxType) @@ -488,9 +552,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } n.SetOTYPE(types.NewMap(l.Type(), r.Type())) mapqueue = append(mapqueue, n) // check map keys when all types are settled + return n case ir.OTCHAN: - ok |= ctxType n := n.(*ir.ChanType) n.Elem = typecheck(n.Elem, ctxType) l := n.Elem @@ -501,21 +565,22 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { base.Errorf("chan of incomplete (or unallocatable) type not allowed") } n.SetOTYPE(types.NewChan(l.Type(), n.Dir)) + return n case ir.OTSTRUCT: - ok |= ctxType n := n.(*ir.StructType) n.SetOTYPE(tostruct(n.Fields)) + return n case ir.OTINTER: - ok |= ctxType n := n.(*ir.InterfaceType) n.SetOTYPE(tointerface(n.Methods)) + return n case ir.OTFUNC: - ok |= ctxType n := n.(*ir.FuncType) n.SetOTYPE(functype(n.Recv, n.Params, n.Results)) + return n // type or expr case ir.ODEREF: @@ -528,11 +593,10 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } if l.Op() == ir.OTYPE { - ok |= ctxType n.SetOTYPE(types.NewPtr(l.Type())) // Ensure l.Type gets dowidth'd for the backend. Issue 20174. checkwidth(l.Type()) - break + return n } if !t.IsPtr() { @@ -541,12 +605,12 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetType(nil) return n } - - break + base.Errorf("%v is not a type", l) + return n } - ok |= ctxExpr n.SetType(t.Elem()) + return n // arithmetic exprs case ir.OASOP, @@ -573,7 +637,6 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { var op ir.Op var r ir.Node if n.Op() == ir.OASOP { - ok |= ctxStmt n.SetLeft(typecheck(n.Left(), ctxExpr)) n.SetRight(typecheck(n.Right(), ctxExpr)) l = n.Left() @@ -591,7 +654,6 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { // TODO(marvin): Fix Node.EType type union. op = n.SubOp() } else { - ok |= ctxExpr n.SetLeft(typecheck(n.Left(), ctxExpr)) n.SetRight(typecheck(n.Right(), ctxExpr)) l = n.Left() @@ -629,8 +691,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { if (l.Type() == types.UntypedFloat || l.Type() == types.UntypedComplex) && r.Op() == ir.OLITERAL { n.SetType(types.UntypedInt) } - - break + return n } // For "x == x && len(s)", it's better to report that "len(s)" (type int) @@ -815,9 +876,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } n.SetType(t) + return n case ir.OBITNOT, ir.ONEG, ir.ONOT, ir.OPLUS: - ok |= ctxExpr n.SetLeft(typecheck(n.Left(), ctxExpr)) l := n.Left() t := l.Type() @@ -832,11 +893,10 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } n.SetType(t) + return n // exprs case ir.OADDR: - ok |= ctxExpr - n.SetLeft(typecheck(n.Left(), ctxExpr)) if n.Left().Type() == nil { n.SetType(nil) @@ -871,13 +931,10 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } n.SetType(types.NewPtr(n.Left().Type())) + return n case ir.OCOMPLIT: - ok |= ctxExpr - n = typecheckcomplit(n) - if n.Type() == nil { - return n - } + return typecheckcomplit(n) case ir.OXDOT, ir.ODOT: if n.Op() == ir.OXDOT { @@ -903,12 +960,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { s := n.Sym() if n.Left().Op() == ir.OTYPE { - n = typecheckMethodExpr(n) - if n.Type() == nil { - return n - } - ok = ctxExpr - break + return typecheckMethodExpr(n) } if t.IsPtr() && !t.Elem().IsInterface() { @@ -952,21 +1004,12 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } - switch n.Op() { - case ir.ODOTINTER, ir.ODOTMETH: - if top&ctxCallee != 0 { - ok |= ctxCallee - } else { - n = typecheckpartialcall(n, s) - ok |= ctxExpr - } - - default: - ok |= ctxExpr + if (n.Op() == ir.ODOTINTER || n.Op() == ir.ODOTMETH) && top&ctxCallee == 0 { + n = typecheckpartialcall(n, s) } + return n case ir.ODOTTYPE: - ok |= ctxExpr n.SetLeft(typecheck(n.Left(), ctxExpr)) n.SetLeft(defaultlit(n.Left(), nil)) l := n.Left() @@ -1009,9 +1052,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } } + return n case ir.OINDEX: - ok |= ctxExpr n.SetLeft(typecheck(n.Left(), ctxExpr)) n.SetLeft(defaultlit(n.Left(), nil)) n.SetLeft(implicitstar(n.Left())) @@ -1045,7 +1088,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { if n.Right().Type() != nil && !n.Right().Type().IsInteger() { base.Errorf("non-integer %s index %v", why, n.Right()) - break + return n } if !n.Bounded() && ir.IsConst(n.Right(), constant.Int) { @@ -1067,9 +1110,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetOp(ir.OINDEXMAP) n.SetIndexMapLValue(false) } + return n case ir.ORECV: - ok |= ctxStmt | ctxExpr n.SetLeft(typecheck(n.Left(), ctxExpr)) n.SetLeft(defaultlit(n.Left(), nil)) l := n.Left() @@ -1091,9 +1134,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } n.SetType(t.Elem()) + return n case ir.OSEND: - ok |= ctxStmt n.SetLeft(typecheck(n.Left(), ctxExpr)) n.SetRight(typecheck(n.Right(), ctxExpr)) n.SetLeft(defaultlit(n.Left(), nil)) @@ -1115,14 +1158,13 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { if n.Right().Type() == nil { return n } + return n case ir.OSLICEHEADER: // Errors here are Fatalf instead of Errorf because only the compiler // can construct an OSLICEHEADER node. // Components used in OSLICEHEADER that are supplied by parsed source code // have already been typechecked in e.g. OMAKESLICE earlier. - ok |= ctxExpr - t := n.Type() if t == nil { base.Fatalf("no type specified for OSLICEHEADER") @@ -1160,14 +1202,13 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.List().SetFirst(l) n.List().SetSecond(c) + return n case ir.OMAKESLICECOPY: // Errors here are Fatalf instead of Errorf because only the compiler // can construct an OMAKESLICECOPY node. // Components used in OMAKESCLICECOPY that are supplied by parsed source code // have already been typechecked in OMAKE and OCOPY earlier. - ok |= ctxExpr - t := n.Type() if t == nil { @@ -1203,9 +1244,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { base.Fatalf("len for OMAKESLICECOPY must be non-negative") } } + return n case ir.OSLICE, ir.OSLICE3: - ok |= ctxExpr n.SetLeft(typecheck(n.Left(), ctxExpr)) low, high, max := n.SliceBounds() hasmax := n.Op().IsSlice3() @@ -1277,6 +1318,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetType(nil) return n } + return n // call and call like case ir.OCALL: @@ -1306,6 +1348,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OAPPEND, ir.ODELETE, ir.OMAKE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER: n.SetOp(l.SubOp()) n.SetLeft(nil) + n.SetTypecheck(0) // re-typechecking new op is OK, not a loop case ir.OCAP, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.OPANIC, ir.OREAL: typecheckargs(n) @@ -1331,8 +1374,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n = ir.NodAt(n.Pos(), l.SubOp(), arg1, arg2) n = initExpr(old.Init().Slice(), n) // typecheckargs can add to old.Init } - n = typecheck1(n, top) - return n + return typecheck(n, top) } n.SetLeft(defaultlit(n.Left(), nil)) @@ -1346,8 +1388,6 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } // pick off before type-checking arguments - ok |= ctxExpr - arg, ok := needOneArg(n, "conversion to %v", l.Type()) if !ok { n.SetType(nil) @@ -1356,8 +1396,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n = ir.NodAt(n.Pos(), ir.OCONV, arg, nil) n.SetType(l.Type()) - n = typecheck1(n, top) - return n + return typecheck1(n, top) } typecheckargs(n) @@ -1403,11 +1442,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } typecheckaste(ir.OCALL, n.Left(), n.IsDDD(), t.Params(), n.List(), func() string { return fmt.Sprintf("argument to %v", n.Left()) }) - ok |= ctxStmt if t.NumResults() == 0 { - break + return n } - ok |= ctxExpr if t.NumResults() == 1 { n.SetType(l.Type().Results().Field(0).Type) @@ -1420,24 +1457,23 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { // and we want to avoid the temporaries, so we do the rewrite earlier than is typical. n.SetOp(ir.OGETG) } - - break + return n } // multiple return if top&(ctxMultiOK|ctxStmt) == 0 { base.Errorf("multiple-value %v() in single-value context", l) - break + return n } n.SetType(l.Type().Results()) + return n case ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF: - ok |= ctxExpr n.SetType(types.Types[types.TUINTPTR]) + return n case ir.OCAP, ir.OLEN: - ok |= ctxExpr n.SetLeft(typecheck(n.Left(), ctxExpr)) n.SetLeft(defaultlit(n.Left(), nil)) n.SetLeft(implicitstar(n.Left())) @@ -1461,9 +1497,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } n.SetType(types.Types[types.TINT]) + return n case ir.OREAL, ir.OIMAG: - ok |= ctxExpr n.SetLeft(typecheck(n.Left(), ctxExpr)) l := n.Left() t := l.Type() @@ -1485,9 +1521,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetType(nil) return n } + return n case ir.OCOMPLEX: - ok |= ctxExpr l := typecheck(n.Left(), ctxExpr) r := typecheck(n.Right(), ctxExpr) if l.Type() == nil || r.Type() == nil { @@ -1525,6 +1561,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { t = types.Types[types.TCOMPLEX128] } n.SetType(t) + return n case ir.OCLOSE: n.SetLeft(typecheck(n.Left(), ctxExpr)) @@ -1546,11 +1583,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetType(nil) return n } - - ok |= ctxStmt + return n case ir.ODELETE: - ok |= ctxStmt typecheckargs(n) args := n.List() if args.Len() == 0 { @@ -1580,9 +1615,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } args.SetSecond(assignconv(r, l.Type().Key(), "delete")) + return n case ir.OAPPEND: - ok |= ctxExpr typecheckargs(n) args := n.List() if args.Len() == 0 { @@ -1625,11 +1660,11 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { if t.Elem().IsKind(types.TUINT8) && args.Second().Type().IsString() { args.SetSecond(defaultlit(args.Second(), types.Types[types.TSTRING])) - break + return n } args.SetSecond(assignconv(args.Second(), t.Underlying(), "append")) - break + return n } as := args.Slice()[1:] @@ -1640,9 +1675,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { as[i] = assignconv(n, t.Elem(), "append") checkwidth(as[i].Type()) // ensure width is calculated for backend } + return n case ir.OCOPY: - ok |= ctxStmt | ctxExpr n.SetType(types.Types[types.TINT]) n.SetLeft(typecheck(n.Left(), ctxExpr)) n.SetLeft(defaultlit(n.Left(), nil)) @@ -1656,7 +1691,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { // copy([]byte, string) if n.Left().Type().IsSlice() && n.Right().Type().IsString() { if types.Identical(n.Left().Type().Elem(), types.ByteType) { - break + return n } base.Errorf("arguments to copy have different element types: %L and string", n.Left().Type()) n.SetType(nil) @@ -1680,9 +1715,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetType(nil) return n } + return n case ir.OCONV: - ok |= ctxExpr checkwidth(n.Type()) // ensure width is calculated for backend n.SetLeft(typecheck(n.Left(), ctxExpr)) n.SetLeft(convlit1(n.Left(), n.Type(), true, nil)) @@ -1717,16 +1752,16 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { // do not convert to []byte literal. See CL 125796. // generated code and compiler memory footprint is better without it. case ir.OSTR2BYTES: - break + // ok case ir.OSTR2RUNES: if n.Left().Op() == ir.OLITERAL { n = stringtoruneslit(n) } } + return n case ir.OMAKE: - ok |= ctxExpr args := n.List().Slice() if len(args) == 0 { base.Errorf("missing argument to make") @@ -1832,9 +1867,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { nn.SetType(t) n = nn + return n case ir.ONEW: - ok |= ctxExpr if n.Left() == nil { // Fatalf because the OCALL above checked for us, // so this must be an internally-generated mistake. @@ -1849,9 +1884,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } n.SetLeft(l) n.SetType(types.NewPtr(t)) + return n case ir.OPRINT, ir.OPRINTN: - ok |= ctxStmt typecheckargs(n) ls := n.List().Slice() for i1, n1 := range ls { @@ -1862,18 +1897,18 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { ls[i1] = defaultlit(ls[i1], nil) } } + return n case ir.OPANIC: - ok |= ctxStmt n.SetLeft(typecheck(n.Left(), ctxExpr)) n.SetLeft(defaultlit(n.Left(), types.Types[types.TINTER])) if n.Left().Type() == nil { n.SetType(nil) return n } + return n case ir.ORECOVER: - ok |= ctxExpr | ctxStmt if n.List().Len() != 0 { base.Errorf("too many arguments to recover") n.SetType(nil) @@ -1881,16 +1916,16 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } n.SetType(types.Types[types.TINTER]) + return n case ir.OCLOSURE: - ok |= ctxExpr typecheckclosure(n, top) if n.Type() == nil { return n } + return n case ir.OITAB: - ok |= ctxExpr n.SetLeft(typecheck(n.Left(), ctxExpr)) t := n.Left().Type() if t == nil { @@ -1901,14 +1936,15 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { base.Fatalf("OITAB of %v", t) } n.SetType(types.NewPtr(types.Types[types.TUINTPTR])) + return n case ir.OIDATA: // Whoever creates the OIDATA node must know a priori the concrete type at that moment, // usually by just having checked the OITAB. base.Fatalf("cannot typecheck interface data %v", n) + panic("unreachable") case ir.OSPTR: - ok |= ctxExpr n.SetLeft(typecheck(n.Left(), ctxExpr)) t := n.Left().Type() if t == nil { @@ -1923,33 +1959,33 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } else { n.SetType(types.NewPtr(t.Elem())) } + return n case ir.OCLOSUREREAD: - ok |= ctxExpr + return n case ir.OCFUNC: - ok |= ctxExpr n.SetLeft(typecheck(n.Left(), ctxExpr)) n.SetType(types.Types[types.TUINTPTR]) + return n case ir.OCONVNOP: - ok |= ctxExpr n.SetLeft(typecheck(n.Left(), ctxExpr)) + return n // statements case ir.OAS: - ok |= ctxStmt - typecheckas(n) // Code that creates temps does not bother to set defn, so do it here. if n.Left().Op() == ir.ONAME && ir.IsAutoTmp(n.Left()) { n.Left().Name().Defn = n } + return n case ir.OAS2: - ok |= ctxStmt typecheckas2(n) + return n case ir.OBREAK, ir.OCONTINUE, @@ -1958,14 +1994,13 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { ir.OFALL, ir.OVARKILL, ir.OVARLIVE: - ok |= ctxStmt + return n case ir.OBLOCK: - ok |= ctxStmt typecheckslice(n.List().Slice(), ctxStmt) + return n case ir.OLABEL: - ok |= ctxStmt decldepth++ if n.Sym().IsBlank() { // Empty identifier is valid but useless. @@ -1973,21 +2008,21 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { // See issues 7538, 11589, 11593. n = ir.NodAt(n.Pos(), ir.OBLOCK, nil, nil) } + return n case ir.ODEFER: - ok |= ctxStmt n.SetLeft(typecheck(n.Left(), ctxStmt|ctxExpr)) if !n.Left().Diag() { checkdefergo(n) } + return n case ir.OGO: - ok |= ctxStmt n.SetLeft(typecheck(n.Left(), ctxStmt|ctxExpr)) checkdefergo(n) + return n case ir.OFOR, ir.OFORUNTIL: - ok |= ctxStmt typecheckslice(n.Init().Slice(), ctxStmt) decldepth++ n.SetLeft(typecheck(n.Left(), ctxExpr)) @@ -2004,9 +2039,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } typecheckslice(n.Body().Slice(), ctxStmt) decldepth-- + return n case ir.OIF: - ok |= ctxStmt typecheckslice(n.Init().Slice(), ctxStmt) n.SetLeft(typecheck(n.Left(), ctxExpr)) n.SetLeft(defaultlit(n.Left(), nil)) @@ -2018,9 +2053,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } typecheckslice(n.Body().Slice(), ctxStmt) typecheckslice(n.Rlist().Slice(), ctxStmt) + return n case ir.ORETURN: - ok |= ctxStmt typecheckargs(n) if Curfn == nil { base.Errorf("return outside function") @@ -2029,24 +2064,25 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } if hasNamedResults(Curfn) && n.List().Len() == 0 { - break + return n } typecheckaste(ir.ORETURN, nil, false, Curfn.Type().Results(), n.List(), func() string { return "return argument" }) + return n case ir.ORETJMP: - ok |= ctxStmt + return n case ir.OSELECT: - ok |= ctxStmt typecheckselect(n) + return n case ir.OSWITCH: - ok |= ctxStmt typecheckswitch(n) + return n case ir.ORANGE: - ok |= ctxStmt typecheckrange(n) + return n case ir.OTYPESW: base.Errorf("use of .(type) outside type switch") @@ -2054,64 +2090,22 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.ODCLFUNC: - ok |= ctxStmt typecheckfunc(n.(*ir.Func)) + return n case ir.ODCLCONST: - ok |= ctxStmt n.SetLeft(typecheck(n.Left(), ctxExpr)) + return n case ir.ODCLTYPE: - ok |= ctxStmt n.SetLeft(typecheck(n.Left(), ctxType)) checkwidth(n.Left().Type()) - } - - t := n.Type() - if t != nil && !t.IsFuncArgStruct() && n.Op() != ir.OTYPE { - switch t.Kind() { - case types.TFUNC, // might have TANY; wait until it's called - types.TANY, types.TFORW, types.TIDEAL, types.TNIL, types.TBLANK: - break - - default: - checkwidth(t) - } - } - - n = evalConst(n) - if n.Op() == ir.OTYPE && top&ctxType == 0 { - if !n.Type().Broke() { - base.Errorf("type %v is not an expression", n.Type()) - } - n.SetType(nil) return n } - if top&(ctxExpr|ctxType) == ctxType && n.Op() != ir.OTYPE { - base.Errorf("%v is not a type", n) - n.SetType(nil) - return n - } - - // TODO(rsc): simplify - if (top&(ctxCallee|ctxExpr|ctxType) != 0) && top&ctxStmt == 0 && ok&(ctxExpr|ctxType|ctxCallee) == 0 { - base.Errorf("%v used as value", n) - n.SetType(nil) - return n - } - - if (top&ctxStmt != 0) && top&(ctxCallee|ctxExpr|ctxType) == 0 && ok&ctxStmt == 0 { - if !n.Diag() { - base.Errorf("%v evaluated but not used", n) - n.SetDiag(true) - } - - n.SetType(nil) - return n - } - - return n + // No return n here! + // Individual cases can type-assert n, introducing a new one. + // Each must execute its own return n. } func typecheckargs(n ir.Node) { diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 7b1aeedcdf8d4..7165a06b25751 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -32,7 +32,13 @@ func maybeEdit(x Node, edit func(Node) Node) Node { return edit(x) } -// A miniStmt is a miniNode with extra fields common to expressions. +// An Expr is a Node that can appear as an expression. +type Expr interface { + Node + isExpr() +} + +// A miniExpr is a miniNode with extra fields common to expressions. // TODO(rsc): Once we are sure about the contents, compact the bools // into a bit field and leave extra bits available for implementations // embedding miniExpr. Right now there are ~60 unused bits sitting here. @@ -52,6 +58,8 @@ const ( miniExprBounded ) +func (*miniExpr) isExpr() {} + func (n *miniExpr) Type() *types.Type { return n.typ } func (n *miniExpr) SetType(x *types.Type) { n.typ = x } func (n *miniExpr) Opt() interface{} { return n.opt } @@ -192,6 +200,8 @@ func NewCallExpr(pos src.XPos, fun Node, args []Node) *CallExpr { return n } +func (*CallExpr) isStmt() {} + func (n *CallExpr) Orig() Node { return n.orig } func (n *CallExpr) SetOrig(x Node) { n.orig = x } func (n *CallExpr) Left() Node { return n.X } diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index 38e00da7dae30..3bca25b504b99 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -114,6 +114,8 @@ func NewFunc(pos src.XPos) *Func { return f } +func (f *Func) isStmt() {} + func (f *Func) Func() *Func { return f } func (f *Func) Body() Nodes { return f.body } func (f *Func) PtrBody() *Nodes { return &f.body } diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 06cffe0325855..c527ba281dd3b 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -121,6 +121,8 @@ type Name struct { Outer *Name } +func (n *Name) isExpr() {} + // NewNameAt returns a new ONAME Node associated with symbol s at position pos. // The caller is responsible for setting Curfn. func NewNameAt(pos src.XPos, sym *types.Sym) *Name { diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index 19f90ce1fa7b6..836bbcb45320d 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -27,15 +27,26 @@ func NewDecl(pos src.XPos, op Op, x Node) *Decl { return n } +func (*Decl) isStmt() {} + func (n *Decl) Left() Node { return n.X } func (n *Decl) SetLeft(x Node) { n.X = x } +// A Stmt is a Node that can appear as a statement. +// This includes statement-like expressions such as <-c and f(). +type Stmt interface { + Node + isStmt() +} + // A miniStmt is a miniNode with extra fields common to statements. type miniStmt struct { miniNode init Nodes } +func (*miniStmt) isStmt() {} + func (n *miniStmt) Init() Nodes { return n.init } func (n *miniStmt) SetInit(x Nodes) { n.init = x } func (n *miniStmt) PtrInit() *Nodes { return &n.init } diff --git a/test/used.go b/test/used.go index adf2bfcb95b4a..5c7aad24a6ca2 100644 --- a/test/used.go +++ b/test/used.go @@ -10,7 +10,7 @@ import "unsafe" const C = 1 -var x1, x int +var x, x1, x2 int var b bool var s string var c chan int @@ -120,7 +120,6 @@ func _() { _ = print(1) // ERROR "print\(1\) used as value" println(1) // ok _ = println(1) // ERROR "println\(1\) used as value" - (x) // ERROR "x evaluated but not used" c <- 1 // ok slice[1:1] // ERROR "slice\[1:1\] evaluated but not used" array[1:1] // ERROR "array\[1:1\] evaluated but not used" @@ -137,6 +136,8 @@ func _() { unsafe.Alignof(t.X) // ERROR "unsafe.Alignof\(t.X\) evaluated but not used" unsafe.Offsetof(t.X) // ERROR "unsafe.Offsetof\(t.X\) evaluated but not used" unsafe.Sizeof(t) // ERROR "unsafe.Sizeof\(t\) evaluated but not used" - _ = new(x) // ERROR "x is not a type" _ = int // ERROR "type int is not an expression" + (x) // ERROR "x evaluated but not used" + _ = new(x2) // ERROR "x2 is not a type" + _ = new(1 + 1) // ERROR "1 \+ 1 is not a type" } From a79742f39a906a52fce4873895599298c0699743 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Fri, 4 Dec 2020 18:40:24 -0500 Subject: [PATCH 127/474] [dev.regabi] cmd/compile: remove "short" node header mode This is unreachable code - the only way short can be true is if verb == 'S', but jconv is only called when verb == 'j'. Simplify by removing. Passes buildall w/ toolstash -cmp. Change-Id: I27bd38319f72215069e940b320b5c82608e2651a Reviewed-on: https://go-review.googlesource.com/c/go/+/275772 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/escape.go | 6 ++---- src/cmd/compile/internal/ir/fmt.go | 18 ++++++++---------- 2 files changed, 10 insertions(+), 14 deletions(-) diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index 32bc7b297b0c4..a7458ab733fb2 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -148,7 +148,7 @@ func init() { } // escFmt is called from node printing to print information about escape analysis results. -func escFmt(n ir.Node, short bool) string { +func escFmt(n ir.Node) string { text := "" switch n.Esc() { case EscUnknown: @@ -161,9 +161,7 @@ func escFmt(n ir.Node, short bool) string { text = "esc(no)" case EscNever: - if !short { - text = "esc(N)" - } + text = "esc(N)" default: text = fmt.Sprintf("esc(%d)", n.Esc()) diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index bc5536241ea1d..593e77880d959 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -339,21 +339,19 @@ func nodeFormat(n Node, s fmt.State, verb rune, mode FmtMode) { } // EscFmt is set by the escape analysis code to add escape analysis details to the node print. -var EscFmt func(n Node, short bool) string +var EscFmt func(n Node) string // *Node details func jconvFmt(n Node, s fmt.State, flag FmtFlag) { - short := flag&FmtShort != 0 - // Useful to see which nodes in an AST printout are actually identical if base.Debug.DumpPtrs != 0 { fmt.Fprintf(s, " p(%p)", n) } - if !short && n.Name() != nil && n.Name().Vargen != 0 { + if n.Name() != nil && n.Name().Vargen != 0 { fmt.Fprintf(s, " g(%d)", n.Name().Vargen) } - if base.Debug.DumpPtrs != 0 && !short && n.Name() != nil && n.Name().Defn != nil { + if base.Debug.DumpPtrs != 0 && n.Name() != nil && n.Name().Defn != nil { // Useful to see where Defn is set and what node it points to fmt.Fprintf(s, " defn(%p)", n.Name().Defn) } @@ -369,7 +367,7 @@ func jconvFmt(n Node, s fmt.State, flag FmtFlag) { fmt.Fprintf(s, " l(%s%d)", pfx, n.Pos().Line()) } - if !short && n.Offset() != types.BADWIDTH { + if n.Offset() != types.BADWIDTH { fmt.Fprintf(s, " x(%d)", n.Offset()) } @@ -382,12 +380,12 @@ func jconvFmt(n Node, s fmt.State, flag FmtFlag) { } if EscFmt != nil { - if esc := EscFmt(n, short); esc != "" { + if esc := EscFmt(n); esc != "" { fmt.Fprintf(s, " %s", esc) } } - if !short && n.Typecheck() != 0 { + if n.Typecheck() != 0 { fmt.Fprintf(s, " tc(%d)", n.Typecheck()) } @@ -423,11 +421,11 @@ func jconvFmt(n Node, s fmt.State, flag FmtFlag) { fmt.Fprint(s, " nonnil") } - if !short && n.HasCall() { + if n.HasCall() { fmt.Fprint(s, " hascall") } - if !short && n.Name() != nil && n.Name().Used() { + if n.Name() != nil && n.Name().Used() { fmt.Fprint(s, " used") } } From 158c9dd131db86a381535a902b54bc7f610a8c97 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sat, 5 Dec 2020 00:02:46 -0500 Subject: [PATCH 128/474] [dev.regabi] cmd/compile: reorganize ir/fmt.go This code is a few layer of abstraction stacked up on top of each other, and they're hard to see all at the same time because the file is pretty mixed up. As much as I try to avoid code rearrangement to keep history, this one is long overdue. A followup CL will cut out some of the layers, and the diff will be much clearer what's going on with the code ordered with callers near callees, as it is now. Passes buildall w/ toolstash -cmp. Change-Id: Iffc49d43cf4be9fab47e2dd59a5f98930573350f Reviewed-on: https://go-review.googlesource.com/c/go/+/275773 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/fmt.go | 1416 +++++++++++++-------------- src/cmd/compile/internal/ir/node.go | 6 + 2 files changed, 712 insertions(+), 710 deletions(-) diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index 593e77880d959..ae33dcddd79bd 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -19,53 +19,6 @@ import ( "cmd/internal/src" ) -// A FmtFlag value is a set of flags (or 0). -// They control how the Xconv functions format their values. -// See the respective function's documentation for details. -type FmtFlag int - -const ( // fmt.Format flag/prec or verb - FmtLeft FmtFlag = 1 << iota // '-' - FmtSharp // '#' - FmtSign // '+' - FmtUnsigned // internal use only (historic: u flag) - FmtShort // verb == 'S' (historic: h flag) - FmtLong // verb == 'L' (historic: l flag) - FmtComma // '.' (== hasPrec) (historic: , flag) - FmtByte // '0' (historic: hh flag) -) - -// fmtFlag computes the (internal) FmtFlag -// value given the fmt.State and format verb. -func fmtFlag(s fmt.State, verb rune) FmtFlag { - var flag FmtFlag - if s.Flag('-') { - flag |= FmtLeft - } - if s.Flag('#') { - flag |= FmtSharp - } - if s.Flag('+') { - flag |= FmtSign - } - if s.Flag(' ') { - base.Fatalf("FmtUnsigned in format string") - } - if _, ok := s.Precision(); ok { - flag |= FmtComma - } - if s.Flag('0') { - flag |= FmtByte - } - switch verb { - case 'S': - flag |= FmtShort - case 'L': - flag |= FmtLong - } - return flag -} - // Format conversions: // TODO(gri) verify these; eliminate those not used anymore // @@ -98,12 +51,6 @@ func fmtFlag(s fmt.State, verb rune) FmtFlag { // .: separate items with ',' instead of ';' // *types.Sym, *types.Type, and *Node types use the flags below to set the format mode -const ( - FErr FmtMode = iota - FDbg - FTypeId - FTypeIdName // same as FTypeId, but use package name instead of prefix -) // The mode flags '+', '-', and '#' are sticky; they persist through // recursions of *Node, *types.Type, and *types.Sym values. The ' ' flag is @@ -131,6 +78,62 @@ const ( // %-S type identifiers without "func" and arg names in type signatures (methodsym) // %- v type identifiers with package name instead of prefix (typesym, dcommontype, typehash) +type FmtMode int + +const ( + FErr FmtMode = iota + FDbg + FTypeId + FTypeIdName // same as FTypeId, but use package name instead of prefix +) + +// A FmtFlag value is a set of flags (or 0). +// They control how the Xconv functions format their values. +// See the respective function's documentation for details. +type FmtFlag int + +const ( // fmt.Format flag/prec or verb + FmtLeft FmtFlag = 1 << iota // '-' + FmtSharp // '#' + FmtSign // '+' + FmtUnsigned // internal use only (historic: u flag) + FmtShort // verb == 'S' (historic: h flag) + FmtLong // verb == 'L' (historic: l flag) + FmtComma // '.' (== hasPrec) (historic: , flag) + FmtByte // '0' (historic: hh flag) +) + +// fmtFlag computes the (internal) FmtFlag +// value given the fmt.State and format verb. +func fmtFlag(s fmt.State, verb rune) FmtFlag { + var flag FmtFlag + if s.Flag('-') { + flag |= FmtLeft + } + if s.Flag('#') { + flag |= FmtSharp + } + if s.Flag('+') { + flag |= FmtSign + } + if s.Flag(' ') { + base.Fatalf("FmtUnsigned in format string") + } + if _, ok := s.Precision(); ok { + flag |= FmtComma + } + if s.Flag('0') { + flag |= FmtByte + } + switch verb { + case 'S': + flag |= FmtShort + case 'L': + flag |= FmtLong + } + return flag +} + // update returns the results of applying f to mode. func (f FmtFlag) update(mode FmtMode) (FmtFlag, FmtMode) { switch { @@ -148,6 +151,46 @@ func (f FmtFlag) update(mode FmtMode) (FmtFlag, FmtMode) { return f, mode } +func (m FmtMode) Fprintf(s fmt.State, format string, args ...interface{}) { + m.prepareArgs(args) + fmt.Fprintf(s, format, args...) +} + +func (m FmtMode) Sprintf(format string, args ...interface{}) string { + m.prepareArgs(args) + return fmt.Sprintf(format, args...) +} + +func (m FmtMode) Sprint(args ...interface{}) string { + m.prepareArgs(args) + return fmt.Sprint(args...) +} + +func (m FmtMode) prepareArgs(args []interface{}) { + for i, arg := range args { + switch arg := arg.(type) { + case Op: + args[i] = &fmtOp{arg, m} + case Node: + args[i] = &fmtNode{arg, m} + case nil: + args[i] = &fmtNode{nil, m} // assume this was a node interface + case *types.Type: + args[i] = &fmtType{arg, m} + case *types.Sym: + args[i] = &fmtSym{arg, m} + case Nodes: + args[i] = &fmtNodes{arg, m} + case int32, int64, string, types.Kind, constant.Value: + // OK: printing these types doesn't depend on mode + default: + base.Fatalf("mode.prepareArgs type %T", arg) + } + } +} + +// Op + var OpNames = []string{ OADDR: "&", OADD: "+", @@ -218,6 +261,15 @@ func (o Op) GoString() string { return fmt.Sprintf("%#v", o) } +type fmtOp struct { + x Op + m FmtMode +} + +func (f *fmtOp) Format(s fmt.State, verb rune) { f.x.format(s, verb, f.m) } + +func (o Op) Format(s fmt.State, verb rune) { o.format(s, verb, FErr) } + func (o Op) format(s fmt.State, verb rune, mode FmtMode) { switch verb { case 'v': @@ -240,28 +292,48 @@ func (o Op) oconv(s fmt.State, flag FmtFlag, mode FmtMode) { fmt.Fprint(s, o.String()) } -type FmtMode int +// Val -type fmtNode struct { - x Node - m FmtMode -} +func FmtConst(v constant.Value, flag FmtFlag) string { + if flag&FmtSharp == 0 && v.Kind() == constant.Complex { + real, imag := constant.Real(v), constant.Imag(v) -func (f *fmtNode) Format(s fmt.State, verb rune) { nodeFormat(f.x, s, verb, f.m) } + var re string + sre := constant.Sign(real) + if sre != 0 { + re = real.String() + } -type fmtOp struct { - x Op - m FmtMode -} + var im string + sim := constant.Sign(imag) + if sim != 0 { + im = imag.String() + } -func (f *fmtOp) Format(s fmt.State, verb rune) { f.x.format(s, verb, f.m) } + switch { + case sre == 0 && sim == 0: + return "0" + case sre == 0: + return im + "i" + case sim == 0: + return re + case sim < 0: + return fmt.Sprintf("(%s%si)", re, im) + default: + return fmt.Sprintf("(%s+%si)", re, im) + } + } -type fmtType struct { - x *types.Type - m FmtMode + return v.String() } -func (f *fmtType) Format(s fmt.State, verb rune) { typeFormat(f.x, s, verb, f.m) } +// Sym + +// numImport tracks how often a package with a given name is imported. +// It is used to provide a better error message (by using the package +// path to disambiguate) if a package that appears multiple times with +// the same name appears in an error message. +var NumImport = make(map[string]int) type fmtSym struct { x *types.Sym @@ -270,209 +342,58 @@ type fmtSym struct { func (f *fmtSym) Format(s fmt.State, verb rune) { symFormat(f.x, s, verb, f.m) } -type fmtNodes struct { - x Nodes - m FmtMode -} - -func (f *fmtNodes) Format(s fmt.State, verb rune) { f.x.format(s, verb, f.m) } +// "%S" suppresses qualifying with package +func symFormat(s *types.Sym, f fmt.State, verb rune, mode FmtMode) { + switch verb { + case 'v', 'S': + fmt.Fprint(f, sconv(s, fmtFlag(f, verb), mode)) -func FmtNode(n Node, s fmt.State, verb rune) { - nodeFormat(n, s, verb, FErr) + default: + fmt.Fprintf(f, "%%!%c(*types.Sym=%p)", verb, s) + } } -func (o Op) Format(s fmt.State, verb rune) { o.format(s, verb, FErr) } +func smodeString(s *types.Sym, mode FmtMode) string { return sconv(s, 0, mode) } -// func (t *types.Type) Format(s fmt.State, verb rune) // in package types -// func (y *types.Sym) Format(s fmt.State, verb rune) // in package types { y.format(s, verb, FErr) } -func (n Nodes) Format(s fmt.State, verb rune) { n.format(s, verb, FErr) } +// See #16897 before changing the implementation of sconv. +func sconv(s *types.Sym, flag FmtFlag, mode FmtMode) string { + if flag&FmtLong != 0 { + panic("linksymfmt") + } -func (m FmtMode) Fprintf(s fmt.State, format string, args ...interface{}) { - m.prepareArgs(args) - fmt.Fprintf(s, format, args...) -} + if s == nil { + return "" + } -func (m FmtMode) Sprintf(format string, args ...interface{}) string { - m.prepareArgs(args) - return fmt.Sprintf(format, args...) -} + if s.Name == "_" { + return "_" + } + buf := fmtBufferPool.Get().(*bytes.Buffer) + buf.Reset() + defer fmtBufferPool.Put(buf) -func (m FmtMode) Sprint(args ...interface{}) string { - m.prepareArgs(args) - return fmt.Sprint(args...) + flag, mode = flag.update(mode) + symfmt(buf, s, flag, mode) + return types.InternString(buf.Bytes()) } -func (m FmtMode) prepareArgs(args []interface{}) { - for i, arg := range args { - switch arg := arg.(type) { - case Op: - args[i] = &fmtOp{arg, m} - case Node: - args[i] = &fmtNode{arg, m} - case nil: - args[i] = &fmtNode{nil, m} // assume this was a node interface - case *types.Type: - args[i] = &fmtType{arg, m} - case *types.Sym: - args[i] = &fmtSym{arg, m} - case Nodes: - args[i] = &fmtNodes{arg, m} - case int32, int64, string, types.Kind, constant.Value: - // OK: printing these types doesn't depend on mode - default: - base.Fatalf("mode.prepareArgs type %T", arg) - } - } -} - -func nodeFormat(n Node, s fmt.State, verb rune, mode FmtMode) { - switch verb { - case 'v', 'S', 'L': - nconvFmt(n, s, fmtFlag(s, verb), mode) - - case 'j': - jconvFmt(n, s, fmtFlag(s, verb)) - - default: - fmt.Fprintf(s, "%%!%c(*Node=%p)", verb, n) - } -} - -// EscFmt is set by the escape analysis code to add escape analysis details to the node print. -var EscFmt func(n Node) string - -// *Node details -func jconvFmt(n Node, s fmt.State, flag FmtFlag) { - // Useful to see which nodes in an AST printout are actually identical - if base.Debug.DumpPtrs != 0 { - fmt.Fprintf(s, " p(%p)", n) - } - if n.Name() != nil && n.Name().Vargen != 0 { - fmt.Fprintf(s, " g(%d)", n.Name().Vargen) - } - - if base.Debug.DumpPtrs != 0 && n.Name() != nil && n.Name().Defn != nil { - // Useful to see where Defn is set and what node it points to - fmt.Fprintf(s, " defn(%p)", n.Name().Defn) - } - - if n.Pos().IsKnown() { - pfx := "" - switch n.Pos().IsStmt() { - case src.PosNotStmt: - pfx = "_" // "-" would be confusing - case src.PosIsStmt: - pfx = "+" - } - fmt.Fprintf(s, " l(%s%d)", pfx, n.Pos().Line()) - } - - if n.Offset() != types.BADWIDTH { - fmt.Fprintf(s, " x(%d)", n.Offset()) - } - - if n.Class() != 0 { - fmt.Fprintf(s, " class(%v)", n.Class()) - } - - if n.Colas() { - fmt.Fprintf(s, " colas(%v)", n.Colas()) - } - - if EscFmt != nil { - if esc := EscFmt(n); esc != "" { - fmt.Fprintf(s, " %s", esc) - } - } - - if n.Typecheck() != 0 { - fmt.Fprintf(s, " tc(%d)", n.Typecheck()) - } - - if n.IsDDD() { - fmt.Fprintf(s, " isddd(%v)", n.IsDDD()) - } - - if n.Implicit() { - fmt.Fprintf(s, " implicit(%v)", n.Implicit()) - } - - if n.Op() == ONAME { - if n.Name().Addrtaken() { - fmt.Fprint(s, " addrtaken") - } - if n.Name().Assigned() { - fmt.Fprint(s, " assigned") - } - if n.Name().IsClosureVar() { - fmt.Fprint(s, " closurevar") - } - if n.Name().Captured() { - fmt.Fprint(s, " captured") - } - if n.Name().IsOutputParamHeapAddr() { - fmt.Fprint(s, " outputparamheapaddr") - } - } - if n.Bounded() { - fmt.Fprint(s, " bounded") - } - if n.NonNil() { - fmt.Fprint(s, " nonnil") - } - - if n.HasCall() { - fmt.Fprint(s, " hascall") +func sconv2(b *bytes.Buffer, s *types.Sym, flag FmtFlag, mode FmtMode) { + if flag&FmtLong != 0 { + panic("linksymfmt") } - - if n.Name() != nil && n.Name().Used() { - fmt.Fprint(s, " used") + if s == nil { + b.WriteString("") + return } -} - -func FmtConst(v constant.Value, flag FmtFlag) string { - if flag&FmtSharp == 0 && v.Kind() == constant.Complex { - real, imag := constant.Real(v), constant.Imag(v) - - var re string - sre := constant.Sign(real) - if sre != 0 { - re = real.String() - } - - var im string - sim := constant.Sign(imag) - if sim != 0 { - im = imag.String() - } - - switch { - case sre == 0 && sim == 0: - return "0" - case sre == 0: - return im + "i" - case sim == 0: - return re - case sim < 0: - return fmt.Sprintf("(%s%si)", re, im) - default: - return fmt.Sprintf("(%s+%si)", re, im) - } + if s.Name == "_" { + b.WriteString("_") + return } - return v.String() + flag, mode = flag.update(mode) + symfmt(b, s, flag, mode) } -/* -s%,%,\n%g -s%\n+%\n%g -s%^[ ]*T%%g -s%,.*%%g -s%.+% [T&] = "&",%g -s%^ ........*\]%&~%g -s%~ %%g -*/ - func symfmt(b *bytes.Buffer, s *types.Sym, flag FmtFlag, mode FmtMode) { if flag&FmtShort == 0 { switch mode { @@ -534,6 +455,8 @@ func symfmt(b *bytes.Buffer, s *types.Sym, flag FmtFlag, mode FmtMode) { b.WriteString(s.Name) } +// Type + var BasicTypeNames = []string{ types.TINT: "int", types.TUINT: "uint", @@ -564,6 +487,39 @@ var fmtBufferPool = sync.Pool{ }, } +func InstallTypeFormats() { + types.Sconv = func(s *types.Sym, flag, mode int) string { + return sconv(s, FmtFlag(flag), FmtMode(mode)) + } + types.Tconv = func(t *types.Type, flag, mode int) string { + return tconv(t, FmtFlag(flag), FmtMode(mode)) + } + types.FormatSym = func(sym *types.Sym, s fmt.State, verb rune, mode int) { + symFormat(sym, s, verb, FmtMode(mode)) + } + types.FormatType = func(t *types.Type, s fmt.State, verb rune, mode int) { + typeFormat(t, s, verb, FmtMode(mode)) + } +} + +type fmtType struct { + x *types.Type + m FmtMode +} + +func (f *fmtType) Format(s fmt.State, verb rune) { typeFormat(f.x, s, verb, f.m) } + +// "%L" print definition, not name +// "%S" omit 'func' and receiver from function types, short type names +func typeFormat(t *types.Type, s fmt.State, verb rune, mode FmtMode) { + switch verb { + case 'v', 'S', 'L': + fmt.Fprint(s, tconv(t, fmtFlag(s, verb), mode)) + default: + fmt.Fprintf(s, "%%!%c(*Type=%p)", verb, t) + } +} + func tconv(t *types.Type, flag FmtFlag, mode FmtMode) string { buf := fmtBufferPool.Get().(*bytes.Buffer) buf.Reset() @@ -874,186 +830,134 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode FmtMode, visited } } -// Statements which may be rendered with a simplestmt as init. -func StmtWithInit(op Op) bool { - switch op { - case OIF, OFOR, OFORUNTIL, OSWITCH: - return true +func fldconv(b *bytes.Buffer, f *types.Field, flag FmtFlag, mode FmtMode, visited map[*types.Type]int, funarg types.Funarg) { + if f == nil { + b.WriteString("") + return + } + flag, mode = flag.update(mode) + if mode == FTypeIdName { + flag |= FmtUnsigned } - return false -} - -func stmtFmt(n Node, s fmt.State, mode FmtMode) { - // some statements allow for an init, but at most one, - // but we may have an arbitrary number added, eg by typecheck - // and inlining. If it doesn't fit the syntax, emit an enclosing - // block starting with the init statements. - - // if we can just say "for" n->ninit; ... then do so - simpleinit := n.Init().Len() == 1 && n.Init().First().Init().Len() == 0 && StmtWithInit(n.Op()) - - // otherwise, print the inits as separate statements - complexinit := n.Init().Len() != 0 && !simpleinit && (mode != FErr) + var name string + if flag&FmtShort == 0 { + s := f.Sym - // but if it was for if/for/switch, put in an extra surrounding block to limit the scope - extrablock := complexinit && StmtWithInit(n.Op()) + // Take the name from the original. + if mode == FErr { + s = OrigSym(s) + } - if extrablock { - fmt.Fprint(s, "{") + if s != nil && f.Embedded == 0 { + if funarg != types.FunargNone { + name = modeString(AsNode(f.Nname), mode) + } else if flag&FmtLong != 0 { + name = mode.Sprintf("%0S", s) + if !types.IsExported(name) && flag&FmtUnsigned == 0 { + name = smodeString(s, mode) // qualify non-exported names (used on structs, not on funarg) + } + } else { + name = smodeString(s, mode) + } + } } - if complexinit { - mode.Fprintf(s, " %v; ", n.Init()) + if name != "" { + b.WriteString(name) + b.WriteString(" ") } - switch n.Op() { - case ODCL: - mode.Fprintf(s, "var %v %v", n.Left().Sym(), n.Left().Type()) - - // Don't export "v = " initializing statements, hope they're always - // preceded by the DCL which will be re-parsed and typechecked to reproduce - // the "v = " again. - case OAS: - if n.Colas() && !complexinit { - mode.Fprintf(s, "%v := %v", n.Left(), n.Right()) - } else { - mode.Fprintf(s, "%v = %v", n.Left(), n.Right()) - } - - case OASOP: - if n.Implicit() { - if n.SubOp() == OADD { - mode.Fprintf(s, "%v++", n.Left()) - } else { - mode.Fprintf(s, "%v--", n.Left()) - } - break - } - - mode.Fprintf(s, "%v %#v= %v", n.Left(), n.SubOp(), n.Right()) - - case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV: - if n.Colas() && !complexinit { - mode.Fprintf(s, "%.v := %.v", n.List(), n.Rlist()) - } else { - mode.Fprintf(s, "%.v = %.v", n.List(), n.Rlist()) - } - - case OBLOCK: - if n.List().Len() != 0 { - mode.Fprintf(s, "%v", n.List()) + if f.IsDDD() { + var et *types.Type + if f.Type != nil { + et = f.Type.Elem() } + b.WriteString("...") + tconv2(b, et, 0, mode, visited) + } else { + tconv2(b, f.Type, 0, mode, visited) + } - case ORETURN: - mode.Fprintf(s, "return %.v", n.List()) - - case ORETJMP: - mode.Fprintf(s, "retjmp %v", n.Sym()) - - case OINLMARK: - mode.Fprintf(s, "inlmark %d", n.Offset()) - - case OGO: - mode.Fprintf(s, "go %v", n.Left()) - - case ODEFER: - mode.Fprintf(s, "defer %v", n.Left()) + if flag&FmtShort == 0 && funarg == types.FunargNone && f.Note != "" { + b.WriteString(" ") + b.WriteString(strconv.Quote(f.Note)) + } +} - case OIF: - if simpleinit { - mode.Fprintf(s, "if %v; %v { %v }", n.Init().First(), n.Left(), n.Body()) - } else { - mode.Fprintf(s, "if %v { %v }", n.Left(), n.Body()) - } - if n.Rlist().Len() != 0 { - mode.Fprintf(s, " else { %v }", n.Rlist()) - } +// Node - case OFOR, OFORUNTIL: - opname := "for" - if n.Op() == OFORUNTIL { - opname = "foruntil" - } - if mode == FErr { // TODO maybe only if FmtShort, same below - fmt.Fprintf(s, "%s loop", opname) - break - } +func modeString(n Node, mode FmtMode) string { return mode.Sprint(n) } - fmt.Fprint(s, opname) - if simpleinit { - mode.Fprintf(s, " %v;", n.Init().First()) - } else if n.Right() != nil { - fmt.Fprint(s, " ;") - } +type fmtNode struct { + x Node + m FmtMode +} - if n.Left() != nil { - mode.Fprintf(s, " %v", n.Left()) - } +func (f *fmtNode) Format(s fmt.State, verb rune) { nodeFormat(f.x, s, verb, f.m) } - if n.Right() != nil { - mode.Fprintf(s, "; %v", n.Right()) - } else if simpleinit { - fmt.Fprint(s, ";") - } +func FmtNode(n Node, s fmt.State, verb rune) { + nodeFormat(n, s, verb, FErr) +} - if n.Op() == OFORUNTIL && n.List().Len() != 0 { - mode.Fprintf(s, "; %v", n.List()) - } +func nodeFormat(n Node, s fmt.State, verb rune, mode FmtMode) { + switch verb { + case 'v', 'S', 'L': + nconvFmt(n, s, fmtFlag(s, verb), mode) - mode.Fprintf(s, " { %v }", n.Body()) + case 'j': + jconvFmt(n, s, fmtFlag(s, verb)) - case ORANGE: - if mode == FErr { - fmt.Fprint(s, "for loop") - break - } + default: + fmt.Fprintf(s, "%%!%c(*Node=%p)", verb, n) + } +} - if n.List().Len() == 0 { - mode.Fprintf(s, "for range %v { %v }", n.Right(), n.Body()) - break - } +// "%L" suffix with "(type %T)" where possible +// "%+S" in debug mode, don't recurse, no multiline output +func nconvFmt(n Node, s fmt.State, flag FmtFlag, mode FmtMode) { + if n == nil { + fmt.Fprint(s, "") + return + } - mode.Fprintf(s, "for %.v = range %v { %v }", n.List(), n.Right(), n.Body()) + flag, mode = flag.update(mode) - case OSELECT, OSWITCH: - if mode == FErr { - mode.Fprintf(s, "%v statement", n.Op()) - break - } + switch mode { + case FErr: + nodeFmt(n, s, flag, mode) - mode.Fprintf(s, "%#v", n.Op()) - if simpleinit { - mode.Fprintf(s, " %v;", n.Init().First()) - } - if n.Left() != nil { - mode.Fprintf(s, " %v ", n.Left()) - } + case FDbg: + dumpdepth++ + nodeDumpFmt(n, s, flag, mode) + dumpdepth-- - mode.Fprintf(s, " { %v }", n.List()) + default: + base.Fatalf("unhandled %%N mode: %d", mode) + } +} - case OCASE: - if n.List().Len() != 0 { - mode.Fprintf(s, "case %.v", n.List()) +func nodeFmt(n Node, s fmt.State, flag FmtFlag, mode FmtMode) { + t := n.Type() + if flag&FmtLong != 0 && t != nil { + if t.Kind() == types.TNIL { + fmt.Fprint(s, "nil") + } else if n.Op() == ONAME && n.Name().AutoTemp() { + mode.Fprintf(s, "%v value", t) } else { - fmt.Fprint(s, "default") + mode.Fprintf(s, "%v (type %v)", n, t) } - mode.Fprintf(s, ": %v", n.Body()) + return + } - case OBREAK, OCONTINUE, OGOTO, OFALL: - if n.Sym() != nil { - mode.Fprintf(s, "%#v %v", n.Op(), n.Sym()) - } else { - mode.Fprintf(s, "%#v", n.Op()) - } + // TODO inlining produces expressions with ninits. we can't print these yet. - case OLABEL: - mode.Fprintf(s, "%v: ", n.Sym()) + if OpPrec[n.Op()] < 0 { + stmtFmt(n, s, mode) + return } - if extrablock { - fmt.Fprint(s, "}") - } + exprFmt(n, s, 0, mode) } var OpPrec = []int{ @@ -1177,51 +1081,232 @@ var OpPrec = []int{ OEND: 0, } -func exprFmt(n Node, s fmt.State, prec int, mode FmtMode) { - for { - if n == nil { - fmt.Fprint(s, "") - return - } +// Statements which may be rendered with a simplestmt as init. +func StmtWithInit(op Op) bool { + switch op { + case OIF, OFOR, OFORUNTIL, OSWITCH: + return true + } + return false +} - // We always want the original, if any. - if o := Orig(n); o != n { - n = o - continue - } +func stmtFmt(n Node, s fmt.State, mode FmtMode) { + // some statements allow for an init, but at most one, + // but we may have an arbitrary number added, eg by typecheck + // and inlining. If it doesn't fit the syntax, emit an enclosing + // block starting with the init statements. - // Skip implicit operations introduced during typechecking. - switch n.Op() { - case OADDR, ODEREF, OCONV, OCONVNOP, OCONVIFACE: - if n.Implicit() { - n = n.Left() - continue - } - } + // if we can just say "for" n->ninit; ... then do so + simpleinit := n.Init().Len() == 1 && n.Init().First().Init().Len() == 0 && StmtWithInit(n.Op()) - break - } + // otherwise, print the inits as separate statements + complexinit := n.Init().Len() != 0 && !simpleinit && (mode != FErr) - nprec := OpPrec[n.Op()] - if n.Op() == OTYPE && n.Sym() != nil { - nprec = 8 + // but if it was for if/for/switch, put in an extra surrounding block to limit the scope + extrablock := complexinit && StmtWithInit(n.Op()) + + if extrablock { + fmt.Fprint(s, "{") } - if prec > nprec { - mode.Fprintf(s, "(%v)", n) - return + if complexinit { + mode.Fprintf(s, " %v; ", n.Init()) } switch n.Op() { - case OPAREN: - mode.Fprintf(s, "(%v)", n.Left()) + case ODCL: + mode.Fprintf(s, "var %v %v", n.Left().Sym(), n.Left().Type()) - case ONIL: - fmt.Fprint(s, "nil") + // Don't export "v = " initializing statements, hope they're always + // preceded by the DCL which will be re-parsed and typechecked to reproduce + // the "v = " again. + case OAS: + if n.Colas() && !complexinit { + mode.Fprintf(s, "%v := %v", n.Left(), n.Right()) + } else { + mode.Fprintf(s, "%v = %v", n.Left(), n.Right()) + } - case OLITERAL: // this is a bit of a mess - if mode == FErr && n.Sym() != nil { - fmt.Fprint(s, smodeString(n.Sym(), mode)) + case OASOP: + if n.Implicit() { + if n.SubOp() == OADD { + mode.Fprintf(s, "%v++", n.Left()) + } else { + mode.Fprintf(s, "%v--", n.Left()) + } + break + } + + mode.Fprintf(s, "%v %#v= %v", n.Left(), n.SubOp(), n.Right()) + + case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV: + if n.Colas() && !complexinit { + mode.Fprintf(s, "%.v := %.v", n.List(), n.Rlist()) + } else { + mode.Fprintf(s, "%.v = %.v", n.List(), n.Rlist()) + } + + case OBLOCK: + if n.List().Len() != 0 { + mode.Fprintf(s, "%v", n.List()) + } + + case ORETURN: + mode.Fprintf(s, "return %.v", n.List()) + + case ORETJMP: + mode.Fprintf(s, "retjmp %v", n.Sym()) + + case OINLMARK: + mode.Fprintf(s, "inlmark %d", n.Offset()) + + case OGO: + mode.Fprintf(s, "go %v", n.Left()) + + case ODEFER: + mode.Fprintf(s, "defer %v", n.Left()) + + case OIF: + if simpleinit { + mode.Fprintf(s, "if %v; %v { %v }", n.Init().First(), n.Left(), n.Body()) + } else { + mode.Fprintf(s, "if %v { %v }", n.Left(), n.Body()) + } + if n.Rlist().Len() != 0 { + mode.Fprintf(s, " else { %v }", n.Rlist()) + } + + case OFOR, OFORUNTIL: + opname := "for" + if n.Op() == OFORUNTIL { + opname = "foruntil" + } + if mode == FErr { // TODO maybe only if FmtShort, same below + fmt.Fprintf(s, "%s loop", opname) + break + } + + fmt.Fprint(s, opname) + if simpleinit { + mode.Fprintf(s, " %v;", n.Init().First()) + } else if n.Right() != nil { + fmt.Fprint(s, " ;") + } + + if n.Left() != nil { + mode.Fprintf(s, " %v", n.Left()) + } + + if n.Right() != nil { + mode.Fprintf(s, "; %v", n.Right()) + } else if simpleinit { + fmt.Fprint(s, ";") + } + + if n.Op() == OFORUNTIL && n.List().Len() != 0 { + mode.Fprintf(s, "; %v", n.List()) + } + + mode.Fprintf(s, " { %v }", n.Body()) + + case ORANGE: + if mode == FErr { + fmt.Fprint(s, "for loop") + break + } + + if n.List().Len() == 0 { + mode.Fprintf(s, "for range %v { %v }", n.Right(), n.Body()) + break + } + + mode.Fprintf(s, "for %.v = range %v { %v }", n.List(), n.Right(), n.Body()) + + case OSELECT, OSWITCH: + if mode == FErr { + mode.Fprintf(s, "%v statement", n.Op()) + break + } + + mode.Fprintf(s, "%#v", n.Op()) + if simpleinit { + mode.Fprintf(s, " %v;", n.Init().First()) + } + if n.Left() != nil { + mode.Fprintf(s, " %v ", n.Left()) + } + + mode.Fprintf(s, " { %v }", n.List()) + + case OCASE: + if n.List().Len() != 0 { + mode.Fprintf(s, "case %.v", n.List()) + } else { + fmt.Fprint(s, "default") + } + mode.Fprintf(s, ": %v", n.Body()) + + case OBREAK, OCONTINUE, OGOTO, OFALL: + if n.Sym() != nil { + mode.Fprintf(s, "%#v %v", n.Op(), n.Sym()) + } else { + mode.Fprintf(s, "%#v", n.Op()) + } + + case OLABEL: + mode.Fprintf(s, "%v: ", n.Sym()) + } + + if extrablock { + fmt.Fprint(s, "}") + } +} + +func exprFmt(n Node, s fmt.State, prec int, mode FmtMode) { + for { + if n == nil { + fmt.Fprint(s, "") + return + } + + // We always want the original, if any. + if o := Orig(n); o != n { + n = o + continue + } + + // Skip implicit operations introduced during typechecking. + switch n.Op() { + case OADDR, ODEREF, OCONV, OCONVNOP, OCONVIFACE: + if n.Implicit() { + n = n.Left() + continue + } + } + + break + } + + nprec := OpPrec[n.Op()] + if n.Op() == OTYPE && n.Sym() != nil { + nprec = 8 + } + + if prec > nprec { + mode.Fprintf(s, "(%v)", n) + return + } + + switch n.Op() { + case OPAREN: + mode.Fprintf(s, "(%v)", n.Left()) + + case ONIL: + fmt.Fprint(s, "nil") + + case OLITERAL: // this is a bit of a mess + if mode == FErr && n.Sym() != nil { + fmt.Fprint(s, smodeString(n.Sym(), mode)) return } @@ -1564,51 +1649,200 @@ func exprFmt(n Node, s fmt.State, prec int, mode FmtMode) { } } -func nodeFmt(n Node, s fmt.State, flag FmtFlag, mode FmtMode) { - t := n.Type() - if flag&FmtLong != 0 && t != nil { - if t.Kind() == types.TNIL { - fmt.Fprint(s, "nil") - } else if n.Op() == ONAME && n.Name().AutoTemp() { - mode.Fprintf(s, "%v value", t) - } else { - mode.Fprintf(s, "%v (type %v)", n, t) - } - return +func ellipsisIf(b bool) string { + if b { + return "..." } + return "" +} - // TODO inlining produces expressions with ninits. we can't print these yet. +// Nodes - if OpPrec[n.Op()] < 0 { - stmtFmt(n, s, mode) - return +type fmtNodes struct { + x Nodes + m FmtMode +} + +func (f *fmtNodes) Format(s fmt.State, verb rune) { f.x.format(s, verb, f.m) } + +func (l Nodes) Format(s fmt.State, verb rune) { l.format(s, verb, FErr) } + +func (l Nodes) format(s fmt.State, verb rune, mode FmtMode) { + switch verb { + case 'v': + l.hconv(s, fmtFlag(s, verb), mode) + + default: + fmt.Fprintf(s, "%%!%c(Nodes)", verb) } +} - exprFmt(n, s, 0, mode) +func (n Nodes) String() string { + return fmt.Sprint(n) } -func nodeDumpFmt(n Node, s fmt.State, flag FmtFlag, mode FmtMode) { - recur := flag&FmtShort == 0 +// Flags: all those of %N plus '.': separate with comma's instead of semicolons. +func (l Nodes) hconv(s fmt.State, flag FmtFlag, mode FmtMode) { + if l.Len() == 0 && mode == FDbg { + fmt.Fprint(s, "") + return + } - if recur { - indent(s) - if dumpdepth > 40 { - fmt.Fprint(s, "...") - return - } + flag, mode = flag.update(mode) + sep := "; " + if mode == FDbg { + sep = "\n" + } else if flag&FmtComma != 0 { + sep = ", " + } - if n.Init().Len() != 0 { - mode.Fprintf(s, "%v-init%v", n.Op(), n.Init()) - indent(s) + for i, n := range l.Slice() { + fmt.Fprint(s, modeString(n, mode)) + if i+1 < l.Len() { + fmt.Fprint(s, sep) } } +} - switch n.Op() { - default: - mode.Fprintf(s, "%v%j", n.Op(), n) +// Dump - case OLITERAL: - mode.Fprintf(s, "%v-%v%j", n.Op(), n.Val(), n) +func Dump(s string, n Node) { + fmt.Printf("%s [%p]%+v\n", s, n, n) +} + +func DumpList(s string, l Nodes) { + fmt.Printf("%s%+v\n", s, l) +} + +func FDumpList(w io.Writer, s string, l Nodes) { + fmt.Fprintf(w, "%s%+v\n", s, l) +} + +// TODO(gri) make variable local somehow +var dumpdepth int + +// indent prints indentation to s. +func indent(s fmt.State) { + fmt.Fprint(s, "\n") + for i := 0; i < dumpdepth; i++ { + fmt.Fprint(s, ". ") + } +} + +// EscFmt is set by the escape analysis code to add escape analysis details to the node print. +var EscFmt func(n Node) string + +// *Node details +func jconvFmt(n Node, s fmt.State, flag FmtFlag) { + // Useful to see which nodes in an AST printout are actually identical + if base.Debug.DumpPtrs != 0 { + fmt.Fprintf(s, " p(%p)", n) + } + if n.Name() != nil && n.Name().Vargen != 0 { + fmt.Fprintf(s, " g(%d)", n.Name().Vargen) + } + + if base.Debug.DumpPtrs != 0 && n.Name() != nil && n.Name().Defn != nil { + // Useful to see where Defn is set and what node it points to + fmt.Fprintf(s, " defn(%p)", n.Name().Defn) + } + + if n.Pos().IsKnown() { + pfx := "" + switch n.Pos().IsStmt() { + case src.PosNotStmt: + pfx = "_" // "-" would be confusing + case src.PosIsStmt: + pfx = "+" + } + fmt.Fprintf(s, " l(%s%d)", pfx, n.Pos().Line()) + } + + if n.Offset() != types.BADWIDTH { + fmt.Fprintf(s, " x(%d)", n.Offset()) + } + + if n.Class() != 0 { + fmt.Fprintf(s, " class(%v)", n.Class()) + } + + if n.Colas() { + fmt.Fprintf(s, " colas(%v)", n.Colas()) + } + + if EscFmt != nil { + if esc := EscFmt(n); esc != "" { + fmt.Fprintf(s, " %s", esc) + } + } + + if n.Typecheck() != 0 { + fmt.Fprintf(s, " tc(%d)", n.Typecheck()) + } + + if n.IsDDD() { + fmt.Fprintf(s, " isddd(%v)", n.IsDDD()) + } + + if n.Implicit() { + fmt.Fprintf(s, " implicit(%v)", n.Implicit()) + } + + if n.Op() == ONAME { + if n.Name().Addrtaken() { + fmt.Fprint(s, " addrtaken") + } + if n.Name().Assigned() { + fmt.Fprint(s, " assigned") + } + if n.Name().IsClosureVar() { + fmt.Fprint(s, " closurevar") + } + if n.Name().Captured() { + fmt.Fprint(s, " captured") + } + if n.Name().IsOutputParamHeapAddr() { + fmt.Fprint(s, " outputparamheapaddr") + } + } + if n.Bounded() { + fmt.Fprint(s, " bounded") + } + if n.NonNil() { + fmt.Fprint(s, " nonnil") + } + + if n.HasCall() { + fmt.Fprint(s, " hascall") + } + + if n.Name() != nil && n.Name().Used() { + fmt.Fprint(s, " used") + } +} + +func nodeDumpFmt(n Node, s fmt.State, flag FmtFlag, mode FmtMode) { + recur := flag&FmtShort == 0 + + if recur { + indent(s) + if dumpdepth > 40 { + fmt.Fprint(s, "...") + return + } + + if n.Init().Len() != 0 { + mode.Fprintf(s, "%v-init%v", n.Op(), n.Init()) + indent(s) + } + } + + switch n.Op() { + default: + mode.Fprintf(s, "%v%j", n.Op(), n) + + case OLITERAL: + mode.Fprintf(s, "%v-%v%j", n.Op(), n.Val(), n) case ONAME, ONONAME, OMETHEXPR: if n.Sym() != nil { @@ -1686,241 +1920,3 @@ func asNameNodes(list []*Name) Nodes { } return ns } - -// "%S" suppresses qualifying with package -func symFormat(s *types.Sym, f fmt.State, verb rune, mode FmtMode) { - switch verb { - case 'v', 'S': - fmt.Fprint(f, sconv(s, fmtFlag(f, verb), mode)) - - default: - fmt.Fprintf(f, "%%!%c(*types.Sym=%p)", verb, s) - } -} - -func smodeString(s *types.Sym, mode FmtMode) string { return sconv(s, 0, mode) } - -// See #16897 before changing the implementation of sconv. -func sconv(s *types.Sym, flag FmtFlag, mode FmtMode) string { - if flag&FmtLong != 0 { - panic("linksymfmt") - } - - if s == nil { - return "" - } - - if s.Name == "_" { - return "_" - } - buf := fmtBufferPool.Get().(*bytes.Buffer) - buf.Reset() - defer fmtBufferPool.Put(buf) - - flag, mode = flag.update(mode) - symfmt(buf, s, flag, mode) - return types.InternString(buf.Bytes()) -} - -func sconv2(b *bytes.Buffer, s *types.Sym, flag FmtFlag, mode FmtMode) { - if flag&FmtLong != 0 { - panic("linksymfmt") - } - if s == nil { - b.WriteString("") - return - } - if s.Name == "_" { - b.WriteString("_") - return - } - - flag, mode = flag.update(mode) - symfmt(b, s, flag, mode) -} - -func fldconv(b *bytes.Buffer, f *types.Field, flag FmtFlag, mode FmtMode, visited map[*types.Type]int, funarg types.Funarg) { - if f == nil { - b.WriteString("") - return - } - flag, mode = flag.update(mode) - if mode == FTypeIdName { - flag |= FmtUnsigned - } - - var name string - if flag&FmtShort == 0 { - s := f.Sym - - // Take the name from the original. - if mode == FErr { - s = OrigSym(s) - } - - if s != nil && f.Embedded == 0 { - if funarg != types.FunargNone { - name = modeString(AsNode(f.Nname), mode) - } else if flag&FmtLong != 0 { - name = mode.Sprintf("%0S", s) - if !types.IsExported(name) && flag&FmtUnsigned == 0 { - name = smodeString(s, mode) // qualify non-exported names (used on structs, not on funarg) - } - } else { - name = smodeString(s, mode) - } - } - } - - if name != "" { - b.WriteString(name) - b.WriteString(" ") - } - - if f.IsDDD() { - var et *types.Type - if f.Type != nil { - et = f.Type.Elem() - } - b.WriteString("...") - tconv2(b, et, 0, mode, visited) - } else { - tconv2(b, f.Type, 0, mode, visited) - } - - if flag&FmtShort == 0 && funarg == types.FunargNone && f.Note != "" { - b.WriteString(" ") - b.WriteString(strconv.Quote(f.Note)) - } -} - -// "%L" print definition, not name -// "%S" omit 'func' and receiver from function types, short type names -func typeFormat(t *types.Type, s fmt.State, verb rune, mode FmtMode) { - switch verb { - case 'v', 'S', 'L': - fmt.Fprint(s, tconv(t, fmtFlag(s, verb), mode)) - default: - fmt.Fprintf(s, "%%!%c(*Type=%p)", verb, t) - } -} - -func modeString(n Node, mode FmtMode) string { return mode.Sprint(n) } - -// "%L" suffix with "(type %T)" where possible -// "%+S" in debug mode, don't recurse, no multiline output -func nconvFmt(n Node, s fmt.State, flag FmtFlag, mode FmtMode) { - if n == nil { - fmt.Fprint(s, "") - return - } - - flag, mode = flag.update(mode) - - switch mode { - case FErr: - nodeFmt(n, s, flag, mode) - - case FDbg: - dumpdepth++ - nodeDumpFmt(n, s, flag, mode) - dumpdepth-- - - default: - base.Fatalf("unhandled %%N mode: %d", mode) - } -} - -func (l Nodes) format(s fmt.State, verb rune, mode FmtMode) { - switch verb { - case 'v': - l.hconv(s, fmtFlag(s, verb), mode) - - default: - fmt.Fprintf(s, "%%!%c(Nodes)", verb) - } -} - -func (n Nodes) String() string { - return fmt.Sprint(n) -} - -// Flags: all those of %N plus '.': separate with comma's instead of semicolons. -func (l Nodes) hconv(s fmt.State, flag FmtFlag, mode FmtMode) { - if l.Len() == 0 && mode == FDbg { - fmt.Fprint(s, "") - return - } - - flag, mode = flag.update(mode) - sep := "; " - if mode == FDbg { - sep = "\n" - } else if flag&FmtComma != 0 { - sep = ", " - } - - for i, n := range l.Slice() { - fmt.Fprint(s, modeString(n, mode)) - if i+1 < l.Len() { - fmt.Fprint(s, sep) - } - } -} - -func DumpList(s string, l Nodes) { - fmt.Printf("%s%+v\n", s, l) -} - -func FDumpList(w io.Writer, s string, l Nodes) { - fmt.Fprintf(w, "%s%+v\n", s, l) -} - -func Dump(s string, n Node) { - fmt.Printf("%s [%p]%+v\n", s, n, n) -} - -// TODO(gri) make variable local somehow -var dumpdepth int - -// indent prints indentation to s. -func indent(s fmt.State) { - fmt.Fprint(s, "\n") - for i := 0; i < dumpdepth; i++ { - fmt.Fprint(s, ". ") - } -} - -func ellipsisIf(b bool) string { - if b { - return "..." - } - return "" -} - -// numImport tracks how often a package with a given name is imported. -// It is used to provide a better error message (by using the package -// path to disambiguate) if a package that appears multiple times with -// the same name appears in an error message. -var NumImport = make(map[string]int) - -func InstallTypeFormats() { - types.Sconv = func(s *types.Sym, flag, mode int) string { - return sconv(s, FmtFlag(flag), FmtMode(mode)) - } - types.Tconv = func(t *types.Type, flag, mode int) string { - return tconv(t, FmtFlag(flag), FmtMode(mode)) - } - types.FormatSym = func(sym *types.Sym, s fmt.State, verb rune, mode int) { - symFormat(sym, s, verb, FmtMode(mode)) - } - types.FormatType = func(t *types.Type, s fmt.State, verb rune, mode int) { - typeFormat(t, s, verb, FmtMode(mode)) - } -} - -// Line returns n's position as a string. If n has been inlined, -// it uses the outermost position where n has been inlined. -func Line(n Node) string { - return base.FmtPos(n.Pos()) -} diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 7fd02925ba34a..83f5b0cf78ed5 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -116,6 +116,12 @@ type Node interface { CanBeAnSSASym() } +// Line returns n's position as a string. If n has been inlined, +// it uses the outermost position where n has been inlined. +func Line(n Node) string { + return base.FmtPos(n.Pos()) +} + func IsSynthetic(n Node) bool { name := n.Sym().Name return name[0] == '.' || name[0] == '~' From 8ce2605c5b4bc64432e1711a1273f91eee3a41fc Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sat, 5 Dec 2020 00:02:46 -0500 Subject: [PATCH 129/474] [dev.regabi] cmd/compile: untangle ir.Dump printing The Node printing code is tangled up due to the multiple printing modes. Split out the Dump mode into its own code, which clarifies it considerably. We are going to have to change the code for the new Node representations, so it is nice to have it in an understandable form first. The output of Dump is unchanged except for the removal of spurious mid-Dump blank lines that have been printed for a while but don't really make sense and appear to be a bug. The %+v verb on Op prints the name ("ADD" not "+"), matching %+v on Node and %+v on Nodes to get Dump and DumpList formats. Passes buildall w/ toolstash -cmp. Change-Id: I07f0f245859f1f785e10bdd671855ca43c51b545 Reviewed-on: https://go-review.googlesource.com/c/go/+/275774 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/fmtmap_test.go | 4 +- src/cmd/compile/internal/ir/fmt.go | 272 +++++++++++++++-------------- 2 files changed, 144 insertions(+), 132 deletions(-) diff --git a/src/cmd/compile/fmtmap_test.go b/src/cmd/compile/fmtmap_test.go index fde9c51b27bea..bf81cc07dbbee 100644 --- a/src/cmd/compile/fmtmap_test.go +++ b/src/cmd/compile/fmtmap_test.go @@ -42,12 +42,14 @@ var knownFormats = map[string]string{ "*cmd/compile/internal/ssa.sparseTreeMapEntry %v": "", "*cmd/compile/internal/types.Field %p": "", "*cmd/compile/internal/types.Field %v": "", + "*cmd/compile/internal/types.Sym %+v": "", "*cmd/compile/internal/types.Sym %0S": "", "*cmd/compile/internal/types.Sym %S": "", "*cmd/compile/internal/types.Sym %p": "", "*cmd/compile/internal/types.Sym %v": "", "*cmd/compile/internal/types.Type %#L": "", "*cmd/compile/internal/types.Type %#v": "", + "*cmd/compile/internal/types.Type %+v": "", "*cmd/compile/internal/types.Type %-S": "", "*cmd/compile/internal/types.Type %0S": "", "*cmd/compile/internal/types.Type %L": "", @@ -88,7 +90,6 @@ var knownFormats = map[string]string{ "cmd/compile/internal/ir.Node %+v": "", "cmd/compile/internal/ir.Node %L": "", "cmd/compile/internal/ir.Node %S": "", - "cmd/compile/internal/ir.Node %j": "", "cmd/compile/internal/ir.Node %p": "", "cmd/compile/internal/ir.Node %v": "", "cmd/compile/internal/ir.Nodes %#v": "", @@ -97,6 +98,7 @@ var knownFormats = map[string]string{ "cmd/compile/internal/ir.Nodes %v": "", "cmd/compile/internal/ir.Ntype %v": "", "cmd/compile/internal/ir.Op %#v": "", + "cmd/compile/internal/ir.Op %+v": "", "cmd/compile/internal/ir.Op %v": "", "cmd/compile/internal/ssa.BranchPrediction %d": "", "cmd/compile/internal/ssa.Edge %v": "", diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index ae33dcddd79bd..b5bf036d5ecb7 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -9,6 +9,7 @@ import ( "fmt" "go/constant" "io" + "os" "strconv" "strings" "sync" @@ -258,7 +259,10 @@ var OpNames = []string{ } func (o Op) GoString() string { - return fmt.Sprintf("%#v", o) + if int(o) < len(OpNames) && OpNames[o] != "" { + return OpNames[o] + } + return o.String() } type fmtOp struct { @@ -266,30 +270,20 @@ type fmtOp struct { m FmtMode } -func (f *fmtOp) Format(s fmt.State, verb rune) { f.x.format(s, verb, f.m) } - -func (o Op) Format(s fmt.State, verb rune) { o.format(s, verb, FErr) } +func (f *fmtOp) Format(s fmt.State, verb rune) { f.x.Format(s, verb) } -func (o Op) format(s fmt.State, verb rune, mode FmtMode) { +func (o Op) Format(s fmt.State, verb rune) { switch verb { - case 'v': - o.oconv(s, fmtFlag(s, verb), mode) - default: fmt.Fprintf(s, "%%!%c(Op=%d)", verb, int(o)) - } -} - -func (o Op) oconv(s fmt.State, flag FmtFlag, mode FmtMode) { - if flag&FmtSharp != 0 || mode != FDbg { - if int(o) < len(OpNames) && OpNames[o] != "" { - fmt.Fprint(s, OpNames[o]) + case 'v': + if s.Flag('+') { + // %+v is OMUL instead of "*" + io.WriteString(s, o.String()) return } + io.WriteString(s, o.GoString()) } - - // 'o.String()' instead of just 'o' to avoid infinite recursion - fmt.Fprint(s, o.String()) } // Val @@ -346,6 +340,9 @@ func (f *fmtSym) Format(s fmt.State, verb rune) { symFormat(f.x, s, verb, f.m) } func symFormat(s *types.Sym, f fmt.State, verb rune, mode FmtMode) { switch verb { case 'v', 'S': + if verb == 'v' && f.Flag('+') { + mode = FDbg + } fmt.Fprint(f, sconv(s, fmtFlag(f, verb), mode)) default: @@ -514,6 +511,9 @@ func (f *fmtType) Format(s fmt.State, verb rune) { typeFormat(f.x, s, verb, f.m) func typeFormat(t *types.Type, s fmt.State, verb rune, mode FmtMode) { switch verb { case 'v', 'S', 'L': + if verb == 'v' && s.Flag('+') { // %+v is debug format + mode = FDbg + } fmt.Fprint(s, tconv(t, fmtFlag(s, verb), mode)) default: fmt.Fprintf(s, "%%!%c(*Type=%p)", verb, t) @@ -897,6 +897,13 @@ type fmtNode struct { func (f *fmtNode) Format(s fmt.State, verb rune) { nodeFormat(f.x, s, verb, f.m) } func FmtNode(n Node, s fmt.State, verb rune) { + // %+v prints Dump. + if s.Flag('+') && verb == 'v' { + dumpNode(s, n, 1) + return + } + + // Otherwise print Go syntax. nodeFormat(n, s, verb, FErr) } @@ -905,9 +912,6 @@ func nodeFormat(n Node, s fmt.State, verb rune, mode FmtMode) { case 'v', 'S', 'L': nconvFmt(n, s, fmtFlag(s, verb), mode) - case 'j': - jconvFmt(n, s, fmtFlag(s, verb)) - default: fmt.Fprintf(s, "%%!%c(*Node=%p)", verb, n) } @@ -927,11 +931,6 @@ func nconvFmt(n Node, s fmt.State, flag FmtFlag, mode FmtMode) { case FErr: nodeFmt(n, s, flag, mode) - case FDbg: - dumpdepth++ - nodeDumpFmt(n, s, flag, mode) - dumpdepth-- - default: base.Fatalf("unhandled %%N mode: %d", mode) } @@ -1663,11 +1662,17 @@ type fmtNodes struct { m FmtMode } -func (f *fmtNodes) Format(s fmt.State, verb rune) { f.x.format(s, verb, f.m) } +func (f *fmtNodes) Format(s fmt.State, verb rune) { f.x.format(s, verb, FErr) } func (l Nodes) Format(s fmt.State, verb rune) { l.format(s, verb, FErr) } func (l Nodes) format(s fmt.State, verb rune, mode FmtMode) { + if s.Flag('+') && verb == 'v' { + // %+v is DumpList output + dumpNodes(s, l, 1) + return + } + switch verb { case 'v': l.hconv(s, fmtFlag(s, verb), mode) @@ -1683,16 +1688,9 @@ func (n Nodes) String() string { // Flags: all those of %N plus '.': separate with comma's instead of semicolons. func (l Nodes) hconv(s fmt.State, flag FmtFlag, mode FmtMode) { - if l.Len() == 0 && mode == FDbg { - fmt.Fprint(s, "") - return - } - flag, mode = flag.update(mode) sep := "; " - if mode == FDbg { - sep = "\n" - } else if flag&FmtComma != 0 { + if flag&FmtComma != 0 { sep = ", " } @@ -1707,44 +1705,45 @@ func (l Nodes) hconv(s fmt.State, flag FmtFlag, mode FmtMode) { // Dump func Dump(s string, n Node) { - fmt.Printf("%s [%p]%+v\n", s, n, n) + fmt.Printf("%s [%p]%+v", s, n, n) } func DumpList(s string, l Nodes) { - fmt.Printf("%s%+v\n", s, l) + var buf bytes.Buffer + FDumpList(&buf, s, l) + os.Stdout.Write(buf.Bytes()) } func FDumpList(w io.Writer, s string, l Nodes) { - fmt.Fprintf(w, "%s%+v\n", s, l) + io.WriteString(w, s) + dumpNodes(w, l, 1) + io.WriteString(w, "\n") } -// TODO(gri) make variable local somehow -var dumpdepth int - -// indent prints indentation to s. -func indent(s fmt.State) { - fmt.Fprint(s, "\n") - for i := 0; i < dumpdepth; i++ { - fmt.Fprint(s, ". ") +// indent prints indentation to w. +func indent(w io.Writer, depth int) { + fmt.Fprint(w, "\n") + for i := 0; i < depth; i++ { + fmt.Fprint(w, ". ") } } // EscFmt is set by the escape analysis code to add escape analysis details to the node print. var EscFmt func(n Node) string -// *Node details -func jconvFmt(n Node, s fmt.State, flag FmtFlag) { +// dumpNodeHeader prints the debug-format node header line to w. +func dumpNodeHeader(w io.Writer, n Node) { // Useful to see which nodes in an AST printout are actually identical if base.Debug.DumpPtrs != 0 { - fmt.Fprintf(s, " p(%p)", n) + fmt.Fprintf(w, " p(%p)", n) } if n.Name() != nil && n.Name().Vargen != 0 { - fmt.Fprintf(s, " g(%d)", n.Name().Vargen) + fmt.Fprintf(w, " g(%d)", n.Name().Vargen) } if base.Debug.DumpPtrs != 0 && n.Name() != nil && n.Name().Defn != nil { // Useful to see where Defn is set and what node it points to - fmt.Fprintf(s, " defn(%p)", n.Name().Defn) + fmt.Fprintf(w, " defn(%p)", n.Name().Defn) } if n.Pos().IsKnown() { @@ -1755,168 +1754,179 @@ func jconvFmt(n Node, s fmt.State, flag FmtFlag) { case src.PosIsStmt: pfx = "+" } - fmt.Fprintf(s, " l(%s%d)", pfx, n.Pos().Line()) + fmt.Fprintf(w, " l(%s%d)", pfx, n.Pos().Line()) } if n.Offset() != types.BADWIDTH { - fmt.Fprintf(s, " x(%d)", n.Offset()) + fmt.Fprintf(w, " x(%d)", n.Offset()) } if n.Class() != 0 { - fmt.Fprintf(s, " class(%v)", n.Class()) + fmt.Fprintf(w, " class(%v)", n.Class()) } if n.Colas() { - fmt.Fprintf(s, " colas(%v)", n.Colas()) + fmt.Fprintf(w, " colas(%v)", n.Colas()) } if EscFmt != nil { if esc := EscFmt(n); esc != "" { - fmt.Fprintf(s, " %s", esc) + fmt.Fprintf(w, " %s", esc) } } if n.Typecheck() != 0 { - fmt.Fprintf(s, " tc(%d)", n.Typecheck()) + fmt.Fprintf(w, " tc(%d)", n.Typecheck()) } if n.IsDDD() { - fmt.Fprintf(s, " isddd(%v)", n.IsDDD()) + fmt.Fprintf(w, " isddd(%v)", n.IsDDD()) } if n.Implicit() { - fmt.Fprintf(s, " implicit(%v)", n.Implicit()) + fmt.Fprintf(w, " implicit(%v)", n.Implicit()) } if n.Op() == ONAME { if n.Name().Addrtaken() { - fmt.Fprint(s, " addrtaken") + fmt.Fprint(w, " addrtaken") } if n.Name().Assigned() { - fmt.Fprint(s, " assigned") + fmt.Fprint(w, " assigned") } if n.Name().IsClosureVar() { - fmt.Fprint(s, " closurevar") + fmt.Fprint(w, " closurevar") } if n.Name().Captured() { - fmt.Fprint(s, " captured") + fmt.Fprint(w, " captured") } if n.Name().IsOutputParamHeapAddr() { - fmt.Fprint(s, " outputparamheapaddr") + fmt.Fprint(w, " outputparamheapaddr") } } if n.Bounded() { - fmt.Fprint(s, " bounded") + fmt.Fprint(w, " bounded") } if n.NonNil() { - fmt.Fprint(s, " nonnil") + fmt.Fprint(w, " nonnil") } if n.HasCall() { - fmt.Fprint(s, " hascall") + fmt.Fprint(w, " hascall") } if n.Name() != nil && n.Name().Used() { - fmt.Fprint(s, " used") + fmt.Fprint(w, " used") } } -func nodeDumpFmt(n Node, s fmt.State, flag FmtFlag, mode FmtMode) { - recur := flag&FmtShort == 0 - - if recur { - indent(s) - if dumpdepth > 40 { - fmt.Fprint(s, "...") - return - } +func dumpNode(w io.Writer, n Node, depth int) { + indent(w, depth) + if depth > 40 { + fmt.Fprint(w, "...") + return + } - if n.Init().Len() != 0 { - mode.Fprintf(s, "%v-init%v", n.Op(), n.Init()) - indent(s) - } + if n.Init().Len() != 0 { + fmt.Fprintf(w, "%+v-init", n.Op()) + dumpNodes(w, n.Init(), depth+1) + indent(w, depth) } switch n.Op() { default: - mode.Fprintf(s, "%v%j", n.Op(), n) + fmt.Fprintf(w, "%+v", n.Op()) + dumpNodeHeader(w, n) case OLITERAL: - mode.Fprintf(s, "%v-%v%j", n.Op(), n.Val(), n) + fmt.Fprintf(w, "%+v-%v", n.Op(), n.Val()) + dumpNodeHeader(w, n) case ONAME, ONONAME, OMETHEXPR: if n.Sym() != nil { - mode.Fprintf(s, "%v-%v%j", n.Op(), n.Sym(), n) + fmt.Fprintf(w, "%+v-%+v", n.Op(), n.Sym()) } else { - mode.Fprintf(s, "%v%j", n.Op(), n) + fmt.Fprintf(w, "%+v", n.Op()) } - if recur && n.Type() == nil && n.Name() != nil && n.Name().Ntype != nil { - indent(s) - mode.Fprintf(s, "%v-ntype%v", n.Op(), n.Name().Ntype) + dumpNodeHeader(w, n) + if n.Type() == nil && n.Name() != nil && n.Name().Ntype != nil { + indent(w, depth) + fmt.Fprintf(w, "%+v-ntype", n.Op()) + dumpNode(w, n.Name().Ntype, depth+1) } case OASOP: - mode.Fprintf(s, "%v-%v%j", n.Op(), n.SubOp(), n) + fmt.Fprintf(w, "%+v-%+v", n.Op(), n.SubOp()) + dumpNodeHeader(w, n) case OTYPE: - mode.Fprintf(s, "%v %v%j type=%v", n.Op(), n.Sym(), n, n.Type()) - if recur && n.Type() == nil && n.Name() != nil && n.Name().Ntype != nil { - indent(s) - mode.Fprintf(s, "%v-ntype%v", n.Op(), n.Name().Ntype) + fmt.Fprintf(w, "%+v %+v", n.Op(), n.Sym()) + dumpNodeHeader(w, n) + fmt.Fprintf(w, " type=%+v", n.Type()) + if n.Type() == nil && n.Name() != nil && n.Name().Ntype != nil { + indent(w, depth) + fmt.Fprintf(w, "%+v-ntype", n.Op()) + dumpNode(w, n.Name().Ntype, depth+1) } } if n.Op() == OCLOSURE && n.Func() != nil && n.Func().Nname.Sym() != nil { - mode.Fprintf(s, " fnName %v", n.Func().Nname.Sym()) + fmt.Fprintf(w, " fnName %+v", n.Func().Nname.Sym()) } if n.Sym() != nil && n.Op() != ONAME { - mode.Fprintf(s, " %v", n.Sym()) + fmt.Fprintf(w, " %+v", n.Sym()) } if n.Type() != nil { - mode.Fprintf(s, " %v", n.Type()) + fmt.Fprintf(w, " %+v", n.Type()) } - if recur { - if n.Left() != nil { - mode.Fprintf(s, "%v", n.Left()) - } - if n.Right() != nil { - mode.Fprintf(s, "%v", n.Right()) - } - if n.Op() == OCLOSURE && n.Func() != nil && n.Func().Body().Len() != 0 { - indent(s) - // The function associated with a closure - mode.Fprintf(s, "%v-clofunc%v", n.Op(), n.Func()) - } - if n.Op() == ODCLFUNC && n.Func() != nil && n.Func().Dcl != nil && len(n.Func().Dcl) != 0 { - indent(s) - // The dcls for a func or closure - mode.Fprintf(s, "%v-dcl%v", n.Op(), asNameNodes(n.Func().Dcl)) - } - if n.List().Len() != 0 { - indent(s) - mode.Fprintf(s, "%v-list%v", n.Op(), n.List()) + if n.Left() != nil { + dumpNode(w, n.Left(), depth+1) + } + if n.Right() != nil { + dumpNode(w, n.Right(), depth+1) + } + if n.Op() == OCLOSURE && n.Func() != nil && n.Func().Body().Len() != 0 { + indent(w, depth) + // The function associated with a closure + fmt.Fprintf(w, "%+v-clofunc", n.Op()) + dumpNode(w, n.Func(), depth+1) + } + if n.Op() == ODCLFUNC && n.Func() != nil && n.Func().Dcl != nil && len(n.Func().Dcl) != 0 { + indent(w, depth) + // The dcls for a func or closure + fmt.Fprintf(w, "%+v-dcl", n.Op()) + for _, dcl := range n.Func().Dcl { + dumpNode(w, dcl, depth+1) } + } + if n.List().Len() != 0 { + indent(w, depth) + fmt.Fprintf(w, "%+v-list", n.Op()) + dumpNodes(w, n.List(), depth+1) + } - if n.Rlist().Len() != 0 { - indent(s) - mode.Fprintf(s, "%v-rlist%v", n.Op(), n.Rlist()) - } + if n.Rlist().Len() != 0 { + indent(w, depth) + fmt.Fprintf(w, "%+v-rlist", n.Op()) + dumpNodes(w, n.Rlist(), depth+1) + } - if n.Body().Len() != 0 { - indent(s) - mode.Fprintf(s, "%v-body%v", n.Op(), n.Body()) - } + if n.Body().Len() != 0 { + indent(w, depth) + fmt.Fprintf(w, "%+v-body", n.Op()) + dumpNodes(w, n.Body(), depth+1) } } -// asNameNodes copies list to a new Nodes. -// It should only be called in debug formatting and other low-performance contexts. -func asNameNodes(list []*Name) Nodes { - var ns Nodes - for _, n := range list { - ns.Append(n) +func dumpNodes(w io.Writer, list Nodes, depth int) { + if list.Len() == 0 { + fmt.Fprintf(w, " ") + return + } + + for _, n := range list.Slice() { + dumpNode(w, n, depth) } - return ns } From 3b25f3c1504cdc8f2263d68436df42042251b290 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sat, 5 Dec 2020 14:46:19 -0500 Subject: [PATCH 130/474] [dev.regabi] cmd/compile: simplify Op, Node, Nodes printing nconvFmt calls base.Fatalf if mode is anything but FErr, proving that the only formats that matter for nodes are plain %v, %S, and %L. And the nodes formatter can only get to %v. (%S and %v are the same; we'll clean that up separately.) Node and Nodes can therefore ignore mode, and all the mode code can be removed from those implementations, removing quite a few layers of abstraction. Op similarly only runs in one mode and can be simplified. Passes buildall w/ toolstash -cmp. Change-Id: Ibfd845033e9c68181a20fb81c8f3dd428463920a Reviewed-on: https://go-review.googlesource.com/c/go/+/275775 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/fmtmap_test.go | 2 - src/cmd/compile/internal/gc/walk.go | 2 +- src/cmd/compile/internal/ir/fmt.go | 351 ++++++++++++---------------- 3 files changed, 155 insertions(+), 200 deletions(-) diff --git a/src/cmd/compile/fmtmap_test.go b/src/cmd/compile/fmtmap_test.go index bf81cc07dbbee..60b772e932570 100644 --- a/src/cmd/compile/fmtmap_test.go +++ b/src/cmd/compile/fmtmap_test.go @@ -85,8 +85,6 @@ var knownFormats = map[string]string{ "cmd/compile/internal/gc.itag %v": "", "cmd/compile/internal/ir.Class %d": "", "cmd/compile/internal/ir.Class %v": "", - "cmd/compile/internal/ir.FmtMode %d": "", - "cmd/compile/internal/ir.Node %+S": "", "cmd/compile/internal/ir.Node %+v": "", "cmd/compile/internal/ir.Node %L": "", "cmd/compile/internal/ir.Node %S": "", diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index bbc08ab953249..574c7c470933a 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -481,7 +481,7 @@ opswitch: switch n.Op() { default: ir.Dump("walk", n) - base.Fatalf("walkexpr: switch 1 unknown op %+S", n) + base.Fatalf("walkexpr: switch 1 unknown op %+v", n.Op()) case ir.ONONAME, ir.OGETG, ir.ONEWOBJ, ir.OMETHEXPR: diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index b5bf036d5ecb7..b0c732ae56ce4 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -170,19 +170,13 @@ func (m FmtMode) Sprint(args ...interface{}) string { func (m FmtMode) prepareArgs(args []interface{}) { for i, arg := range args { switch arg := arg.(type) { - case Op: - args[i] = &fmtOp{arg, m} - case Node: - args[i] = &fmtNode{arg, m} case nil: - args[i] = &fmtNode{nil, m} // assume this was a node interface + args[i] = "" // assume this was a node interface case *types.Type: args[i] = &fmtType{arg, m} case *types.Sym: args[i] = &fmtSym{arg, m} - case Nodes: - args[i] = &fmtNodes{arg, m} - case int32, int64, string, types.Kind, constant.Value: + case int32, int64, string, Op, Node, Nodes, types.Kind, constant.Value: // OK: printing these types doesn't depend on mode default: base.Fatalf("mode.prepareArgs type %T", arg) @@ -265,13 +259,6 @@ func (o Op) GoString() string { return o.String() } -type fmtOp struct { - x Op - m FmtMode -} - -func (f *fmtOp) Format(s fmt.State, verb rune) { f.x.Format(s, verb) } - func (o Op) Format(s fmt.State, verb rune) { switch verb { default: @@ -851,7 +838,7 @@ func fldconv(b *bytes.Buffer, f *types.Field, flag FmtFlag, mode FmtMode, visite if s != nil && f.Embedded == 0 { if funarg != types.FunargNone { - name = modeString(AsNode(f.Nname), mode) + name = fmt.Sprint(f.Nname) } else if flag&FmtLong != 0 { name = mode.Sprintf("%0S", s) if !types.IsExported(name) && flag&FmtUnsigned == 0 { @@ -887,64 +874,38 @@ func fldconv(b *bytes.Buffer, f *types.Field, flag FmtFlag, mode FmtMode, visite // Node -func modeString(n Node, mode FmtMode) string { return mode.Sprint(n) } - -type fmtNode struct { - x Node - m FmtMode -} - -func (f *fmtNode) Format(s fmt.State, verb rune) { nodeFormat(f.x, s, verb, f.m) } - func FmtNode(n Node, s fmt.State, verb rune) { + // TODO(rsc): Remove uses of %#v, which behaves just like %v. + // TODO(rsc): Remove uses of %S, which behaves just like %v. + if verb == 'S' { + verb = 'v' + } + // %+v prints Dump. + // Otherwise we print Go syntax. if s.Flag('+') && verb == 'v' { dumpNode(s, n, 1) return } - // Otherwise print Go syntax. - nodeFormat(n, s, verb, FErr) -} - -func nodeFormat(n Node, s fmt.State, verb rune, mode FmtMode) { - switch verb { - case 'v', 'S', 'L': - nconvFmt(n, s, fmtFlag(s, verb), mode) - - default: + if verb != 'v' && verb != 'S' && verb != 'L' { fmt.Fprintf(s, "%%!%c(*Node=%p)", verb, n) + return } -} -// "%L" suffix with "(type %T)" where possible -// "%+S" in debug mode, don't recurse, no multiline output -func nconvFmt(n Node, s fmt.State, flag FmtFlag, mode FmtMode) { if n == nil { fmt.Fprint(s, "") return } - flag, mode = flag.update(mode) - - switch mode { - case FErr: - nodeFmt(n, s, flag, mode) - - default: - base.Fatalf("unhandled %%N mode: %d", mode) - } -} - -func nodeFmt(n Node, s fmt.State, flag FmtFlag, mode FmtMode) { t := n.Type() - if flag&FmtLong != 0 && t != nil { + if verb == 'L' && t != nil { if t.Kind() == types.TNIL { fmt.Fprint(s, "nil") } else if n.Op() == ONAME && n.Name().AutoTemp() { - mode.Fprintf(s, "%v value", t) + fmt.Fprintf(s, "%v value", t) } else { - mode.Fprintf(s, "%v (type %v)", n, t) + fmt.Fprintf(s, "%v (type %v)", n, t) } return } @@ -952,11 +913,11 @@ func nodeFmt(n Node, s fmt.State, flag FmtFlag, mode FmtMode) { // TODO inlining produces expressions with ninits. we can't print these yet. if OpPrec[n.Op()] < 0 { - stmtFmt(n, s, mode) + stmtFmt(n, s) return } - exprFmt(n, s, 0, mode) + exprFmt(n, s, 0) } var OpPrec = []int{ @@ -1089,7 +1050,15 @@ func StmtWithInit(op Op) bool { return false } -func stmtFmt(n Node, s fmt.State, mode FmtMode) { +func stmtFmt(n Node, s fmt.State) { + // NOTE(rsc): This code used to support the text-based + // which was more aggressive about printing full Go syntax + // (for example, an actual loop instead of "for loop"). + // The code is preserved for now in case we want to expand + // any of those shortenings later. Or maybe we will delete + // the code. But for now, keep it. + const exportFormat = false + // some statements allow for an init, but at most one, // but we may have an arbitrary number added, eg by typecheck // and inlining. If it doesn't fit the syntax, emit an enclosing @@ -1099,7 +1068,7 @@ func stmtFmt(n Node, s fmt.State, mode FmtMode) { simpleinit := n.Init().Len() == 1 && n.Init().First().Init().Len() == 0 && StmtWithInit(n.Op()) // otherwise, print the inits as separate statements - complexinit := n.Init().Len() != 0 && !simpleinit && (mode != FErr) + complexinit := n.Init().Len() != 0 && !simpleinit && exportFormat // but if it was for if/for/switch, put in an extra surrounding block to limit the scope extrablock := complexinit && StmtWithInit(n.Op()) @@ -1109,70 +1078,70 @@ func stmtFmt(n Node, s fmt.State, mode FmtMode) { } if complexinit { - mode.Fprintf(s, " %v; ", n.Init()) + fmt.Fprintf(s, " %v; ", n.Init()) } switch n.Op() { case ODCL: - mode.Fprintf(s, "var %v %v", n.Left().Sym(), n.Left().Type()) + fmt.Fprintf(s, "var %v %v", n.Left().Sym(), n.Left().Type()) // Don't export "v = " initializing statements, hope they're always // preceded by the DCL which will be re-parsed and typechecked to reproduce // the "v = " again. case OAS: if n.Colas() && !complexinit { - mode.Fprintf(s, "%v := %v", n.Left(), n.Right()) + fmt.Fprintf(s, "%v := %v", n.Left(), n.Right()) } else { - mode.Fprintf(s, "%v = %v", n.Left(), n.Right()) + fmt.Fprintf(s, "%v = %v", n.Left(), n.Right()) } case OASOP: if n.Implicit() { if n.SubOp() == OADD { - mode.Fprintf(s, "%v++", n.Left()) + fmt.Fprintf(s, "%v++", n.Left()) } else { - mode.Fprintf(s, "%v--", n.Left()) + fmt.Fprintf(s, "%v--", n.Left()) } break } - mode.Fprintf(s, "%v %#v= %v", n.Left(), n.SubOp(), n.Right()) + fmt.Fprintf(s, "%v %#v= %v", n.Left(), n.SubOp(), n.Right()) case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV: if n.Colas() && !complexinit { - mode.Fprintf(s, "%.v := %.v", n.List(), n.Rlist()) + fmt.Fprintf(s, "%.v := %.v", n.List(), n.Rlist()) } else { - mode.Fprintf(s, "%.v = %.v", n.List(), n.Rlist()) + fmt.Fprintf(s, "%.v = %.v", n.List(), n.Rlist()) } case OBLOCK: if n.List().Len() != 0 { - mode.Fprintf(s, "%v", n.List()) + fmt.Fprintf(s, "%v", n.List()) } case ORETURN: - mode.Fprintf(s, "return %.v", n.List()) + fmt.Fprintf(s, "return %.v", n.List()) case ORETJMP: - mode.Fprintf(s, "retjmp %v", n.Sym()) + fmt.Fprintf(s, "retjmp %v", n.Sym()) case OINLMARK: - mode.Fprintf(s, "inlmark %d", n.Offset()) + fmt.Fprintf(s, "inlmark %d", n.Offset()) case OGO: - mode.Fprintf(s, "go %v", n.Left()) + fmt.Fprintf(s, "go %v", n.Left()) case ODEFER: - mode.Fprintf(s, "defer %v", n.Left()) + fmt.Fprintf(s, "defer %v", n.Left()) case OIF: if simpleinit { - mode.Fprintf(s, "if %v; %v { %v }", n.Init().First(), n.Left(), n.Body()) + fmt.Fprintf(s, "if %v; %v { %v }", n.Init().First(), n.Left(), n.Body()) } else { - mode.Fprintf(s, "if %v { %v }", n.Left(), n.Body()) + fmt.Fprintf(s, "if %v { %v }", n.Left(), n.Body()) } if n.Rlist().Len() != 0 { - mode.Fprintf(s, " else { %v }", n.Rlist()) + fmt.Fprintf(s, " else { %v }", n.Rlist()) } case OFOR, OFORUNTIL: @@ -1180,80 +1149,80 @@ func stmtFmt(n Node, s fmt.State, mode FmtMode) { if n.Op() == OFORUNTIL { opname = "foruntil" } - if mode == FErr { // TODO maybe only if FmtShort, same below + if !exportFormat { // TODO maybe only if FmtShort, same below fmt.Fprintf(s, "%s loop", opname) break } fmt.Fprint(s, opname) if simpleinit { - mode.Fprintf(s, " %v;", n.Init().First()) + fmt.Fprintf(s, " %v;", n.Init().First()) } else if n.Right() != nil { fmt.Fprint(s, " ;") } if n.Left() != nil { - mode.Fprintf(s, " %v", n.Left()) + fmt.Fprintf(s, " %v", n.Left()) } if n.Right() != nil { - mode.Fprintf(s, "; %v", n.Right()) + fmt.Fprintf(s, "; %v", n.Right()) } else if simpleinit { fmt.Fprint(s, ";") } if n.Op() == OFORUNTIL && n.List().Len() != 0 { - mode.Fprintf(s, "; %v", n.List()) + fmt.Fprintf(s, "; %v", n.List()) } - mode.Fprintf(s, " { %v }", n.Body()) + fmt.Fprintf(s, " { %v }", n.Body()) case ORANGE: - if mode == FErr { + if !exportFormat { fmt.Fprint(s, "for loop") break } if n.List().Len() == 0 { - mode.Fprintf(s, "for range %v { %v }", n.Right(), n.Body()) + fmt.Fprintf(s, "for range %v { %v }", n.Right(), n.Body()) break } - mode.Fprintf(s, "for %.v = range %v { %v }", n.List(), n.Right(), n.Body()) + fmt.Fprintf(s, "for %.v = range %v { %v }", n.List(), n.Right(), n.Body()) case OSELECT, OSWITCH: - if mode == FErr { - mode.Fprintf(s, "%v statement", n.Op()) + if !exportFormat { + fmt.Fprintf(s, "%v statement", n.Op()) break } - mode.Fprintf(s, "%#v", n.Op()) + fmt.Fprintf(s, "%#v", n.Op()) if simpleinit { - mode.Fprintf(s, " %v;", n.Init().First()) + fmt.Fprintf(s, " %v;", n.Init().First()) } if n.Left() != nil { - mode.Fprintf(s, " %v ", n.Left()) + fmt.Fprintf(s, " %v ", n.Left()) } - mode.Fprintf(s, " { %v }", n.List()) + fmt.Fprintf(s, " { %v }", n.List()) case OCASE: if n.List().Len() != 0 { - mode.Fprintf(s, "case %.v", n.List()) + fmt.Fprintf(s, "case %.v", n.List()) } else { fmt.Fprint(s, "default") } - mode.Fprintf(s, ": %v", n.Body()) + fmt.Fprintf(s, ": %v", n.Body()) case OBREAK, OCONTINUE, OGOTO, OFALL: if n.Sym() != nil { - mode.Fprintf(s, "%#v %v", n.Op(), n.Sym()) + fmt.Fprintf(s, "%#v %v", n.Op(), n.Sym()) } else { - mode.Fprintf(s, "%#v", n.Op()) + fmt.Fprintf(s, "%#v", n.Op()) } case OLABEL: - mode.Fprintf(s, "%v: ", n.Sym()) + fmt.Fprintf(s, "%v: ", n.Sym()) } if extrablock { @@ -1261,7 +1230,15 @@ func stmtFmt(n Node, s fmt.State, mode FmtMode) { } } -func exprFmt(n Node, s fmt.State, prec int, mode FmtMode) { +func exprFmt(n Node, s fmt.State, prec int) { + // NOTE(rsc): This code used to support the text-based + // which was more aggressive about printing full Go syntax + // (for example, an actual loop instead of "for loop"). + // The code is preserved for now in case we want to expand + // any of those shortenings later. Or maybe we will delete + // the code. But for now, keep it. + const exportFormat = false + for { if n == nil { fmt.Fprint(s, "") @@ -1292,20 +1269,20 @@ func exprFmt(n Node, s fmt.State, prec int, mode FmtMode) { } if prec > nprec { - mode.Fprintf(s, "(%v)", n) + fmt.Fprintf(s, "(%v)", n) return } switch n.Op() { case OPAREN: - mode.Fprintf(s, "(%v)", n.Left()) + fmt.Fprintf(s, "(%v)", n.Left()) case ONIL: fmt.Fprint(s, "nil") case OLITERAL: // this is a bit of a mess - if mode == FErr && n.Sym() != nil { - fmt.Fprint(s, smodeString(n.Sym(), mode)) + if !exportFormat && n.Sym() != nil { + fmt.Fprint(s, smodeString(n.Sym(), FErr)) return } @@ -1314,9 +1291,9 @@ func exprFmt(n Node, s fmt.State, prec int, mode FmtMode) { // Need parens when type begins with what might // be misinterpreted as a unary operator: * or <-. if n.Type().IsPtr() || (n.Type().IsChan() && n.Type().ChanDir() == types.Crecv) { - mode.Fprintf(s, "(%v)(", n.Type()) + fmt.Fprintf(s, "(%v)(", n.Type()) } else { - mode.Fprintf(s, "%v(", n.Type()) + fmt.Fprintf(s, "%v(", n.Type()) } needUnparen = true } @@ -1342,68 +1319,68 @@ func exprFmt(n Node, s fmt.State, prec int, mode FmtMode) { } if needUnparen { - mode.Fprintf(s, ")") + fmt.Fprintf(s, ")") } case ODCLFUNC: if sym := n.Sym(); sym != nil { - fmt.Fprint(s, smodeString(sym, mode)) + fmt.Fprint(s, smodeString(sym, FErr)) return } - mode.Fprintf(s, "") + fmt.Fprintf(s, "") case ONAME: // Special case: name used as local variable in export. // _ becomes ~b%d internally; print as _ for export - if mode == FErr && n.Sym() != nil && n.Sym().Name[0] == '~' && n.Sym().Name[1] == 'b' { + if !exportFormat && n.Sym() != nil && n.Sym().Name[0] == '~' && n.Sym().Name[1] == 'b' { fmt.Fprint(s, "_") return } fallthrough case OPACK, ONONAME, OMETHEXPR: - fmt.Fprint(s, smodeString(n.Sym(), mode)) + fmt.Fprint(s, smodeString(n.Sym(), FErr)) case OTYPE: if n.Type() == nil && n.Sym() != nil { - fmt.Fprint(s, smodeString(n.Sym(), mode)) + fmt.Fprint(s, smodeString(n.Sym(), FErr)) return } - mode.Fprintf(s, "%v", n.Type()) + fmt.Fprintf(s, "%v", n.Type()) case OTSLICE: n := n.(*SliceType) if n.DDD { - mode.Fprintf(s, "...%v", n.Elem) + fmt.Fprintf(s, "...%v", n.Elem) } else { - mode.Fprintf(s, "[]%v", n.Elem) // happens before typecheck + fmt.Fprintf(s, "[]%v", n.Elem) // happens before typecheck } case OTARRAY: n := n.(*ArrayType) if n.Len == nil { - mode.Fprintf(s, "[...]%v", n.Elem) + fmt.Fprintf(s, "[...]%v", n.Elem) } else { - mode.Fprintf(s, "[%v]%v", n.Len, n.Elem) + fmt.Fprintf(s, "[%v]%v", n.Len, n.Elem) } case OTMAP: n := n.(*MapType) - mode.Fprintf(s, "map[%v]%v", n.Key, n.Elem) + fmt.Fprintf(s, "map[%v]%v", n.Key, n.Elem) case OTCHAN: n := n.(*ChanType) switch n.Dir { case types.Crecv: - mode.Fprintf(s, "<-chan %v", n.Elem) + fmt.Fprintf(s, "<-chan %v", n.Elem) case types.Csend: - mode.Fprintf(s, "chan<- %v", n.Elem) + fmt.Fprintf(s, "chan<- %v", n.Elem) default: if n.Elem != nil && n.Elem.Op() == OTCHAN && n.Elem.(*ChanType).Dir == types.Crecv { - mode.Fprintf(s, "chan (%v)", n.Elem) + fmt.Fprintf(s, "chan (%v)", n.Elem) } else { - mode.Fprintf(s, "chan %v", n.Elem) + fmt.Fprintf(s, "chan %v", n.Elem) } } @@ -1417,104 +1394,104 @@ func exprFmt(n Node, s fmt.State, prec int, mode FmtMode) { fmt.Fprint(s, "") case OCLOSURE: - if mode == FErr { + if !exportFormat { fmt.Fprint(s, "func literal") return } if n.Body().Len() != 0 { - mode.Fprintf(s, "%v { %v }", n.Type(), n.Body()) + fmt.Fprintf(s, "%v { %v }", n.Type(), n.Body()) return } - mode.Fprintf(s, "%v { %v }", n.Type(), n.Func().Body()) + fmt.Fprintf(s, "%v { %v }", n.Type(), n.Func().Body()) case OCOMPLIT: - if mode == FErr { + if !exportFormat { if n.Implicit() { - mode.Fprintf(s, "... argument") + fmt.Fprintf(s, "... argument") return } if n.Right() != nil { - mode.Fprintf(s, "%v{%s}", n.Right(), ellipsisIf(n.List().Len() != 0)) + fmt.Fprintf(s, "%v{%s}", n.Right(), ellipsisIf(n.List().Len() != 0)) return } fmt.Fprint(s, "composite literal") return } - mode.Fprintf(s, "(%v{ %.v })", n.Right(), n.List()) + fmt.Fprintf(s, "(%v{ %.v })", n.Right(), n.List()) case OPTRLIT: - mode.Fprintf(s, "&%v", n.Left()) + fmt.Fprintf(s, "&%v", n.Left()) case OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT: - if mode == FErr { - mode.Fprintf(s, "%v{%s}", n.Type(), ellipsisIf(n.List().Len() != 0)) + if !exportFormat { + fmt.Fprintf(s, "%v{%s}", n.Type(), ellipsisIf(n.List().Len() != 0)) return } - mode.Fprintf(s, "(%v{ %.v })", n.Type(), n.List()) + fmt.Fprintf(s, "(%v{ %.v })", n.Type(), n.List()) case OKEY: if n.Left() != nil && n.Right() != nil { - mode.Fprintf(s, "%v:%v", n.Left(), n.Right()) + fmt.Fprintf(s, "%v:%v", n.Left(), n.Right()) return } if n.Left() == nil && n.Right() != nil { - mode.Fprintf(s, ":%v", n.Right()) + fmt.Fprintf(s, ":%v", n.Right()) return } if n.Left() != nil && n.Right() == nil { - mode.Fprintf(s, "%v:", n.Left()) + fmt.Fprintf(s, "%v:", n.Left()) return } fmt.Fprint(s, ":") case OSTRUCTKEY: - mode.Fprintf(s, "%v:%v", n.Sym(), n.Left()) + fmt.Fprintf(s, "%v:%v", n.Sym(), n.Left()) case OCALLPART: - exprFmt(n.Left(), s, nprec, mode) + exprFmt(n.Left(), s, nprec) if n.Sym() == nil { fmt.Fprint(s, ".") return } - mode.Fprintf(s, ".%0S", n.Sym()) + fmt.Fprintf(s, ".%0S", n.Sym()) case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH: - exprFmt(n.Left(), s, nprec, mode) + exprFmt(n.Left(), s, nprec) if n.Sym() == nil { fmt.Fprint(s, ".") return } - mode.Fprintf(s, ".%0S", n.Sym()) + fmt.Fprintf(s, ".%0S", n.Sym()) case ODOTTYPE, ODOTTYPE2: - exprFmt(n.Left(), s, nprec, mode) + exprFmt(n.Left(), s, nprec) if n.Right() != nil { - mode.Fprintf(s, ".(%v)", n.Right()) + fmt.Fprintf(s, ".(%v)", n.Right()) return } - mode.Fprintf(s, ".(%v)", n.Type()) + fmt.Fprintf(s, ".(%v)", n.Type()) case OINDEX, OINDEXMAP: - exprFmt(n.Left(), s, nprec, mode) - mode.Fprintf(s, "[%v]", n.Right()) + exprFmt(n.Left(), s, nprec) + fmt.Fprintf(s, "[%v]", n.Right()) case OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR: - exprFmt(n.Left(), s, nprec, mode) + exprFmt(n.Left(), s, nprec) fmt.Fprint(s, "[") low, high, max := n.SliceBounds() if low != nil { - fmt.Fprint(s, modeString(low, mode)) + fmt.Fprint(s, low) } fmt.Fprint(s, ":") if high != nil { - fmt.Fprint(s, modeString(high, mode)) + fmt.Fprint(s, high) } if n.Op().IsSlice3() { fmt.Fprint(s, ":") if max != nil { - fmt.Fprint(s, modeString(max, mode)) + fmt.Fprint(s, max) } } fmt.Fprint(s, "]") @@ -1523,13 +1500,13 @@ func exprFmt(n Node, s fmt.State, prec int, mode FmtMode) { if n.List().Len() != 2 { base.Fatalf("bad OSLICEHEADER list length %d", n.List().Len()) } - mode.Fprintf(s, "sliceheader{%v,%v,%v}", n.Left(), n.List().First(), n.List().Second()) + fmt.Fprintf(s, "sliceheader{%v,%v,%v}", n.Left(), n.List().First(), n.List().Second()) case OCOMPLEX, OCOPY: if n.Left() != nil { - mode.Fprintf(s, "%#v(%v, %v)", n.Op(), n.Left(), n.Right()) + fmt.Fprintf(s, "%#v(%v, %v)", n.Op(), n.Left(), n.Right()) } else { - mode.Fprintf(s, "%#v(%.v)", n.Op(), n.List()) + fmt.Fprintf(s, "%#v(%.v)", n.Op(), n.List()) } case OCONV, @@ -1541,14 +1518,14 @@ func exprFmt(n Node, s fmt.State, prec int, mode FmtMode) { OSTR2RUNES, ORUNESTR: if n.Type() == nil || n.Type().Sym() == nil { - mode.Fprintf(s, "(%v)", n.Type()) + fmt.Fprintf(s, "(%v)", n.Type()) } else { - mode.Fprintf(s, "%v", n.Type()) + fmt.Fprintf(s, "%v", n.Type()) } if n.Left() != nil { - mode.Fprintf(s, "(%v)", n.Left()) + fmt.Fprintf(s, "(%v)", n.Left()) } else { - mode.Fprintf(s, "(%.v)", n.List()) + fmt.Fprintf(s, "(%.v)", n.List()) } case OREAL, @@ -1568,48 +1545,48 @@ func exprFmt(n Node, s fmt.State, prec int, mode FmtMode) { OPRINT, OPRINTN: if n.Left() != nil { - mode.Fprintf(s, "%#v(%v)", n.Op(), n.Left()) + fmt.Fprintf(s, "%#v(%v)", n.Op(), n.Left()) return } if n.IsDDD() { - mode.Fprintf(s, "%#v(%.v...)", n.Op(), n.List()) + fmt.Fprintf(s, "%#v(%.v...)", n.Op(), n.List()) return } - mode.Fprintf(s, "%#v(%.v)", n.Op(), n.List()) + fmt.Fprintf(s, "%#v(%.v)", n.Op(), n.List()) case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, OGETG: - exprFmt(n.Left(), s, nprec, mode) + exprFmt(n.Left(), s, nprec) if n.IsDDD() { - mode.Fprintf(s, "(%.v...)", n.List()) + fmt.Fprintf(s, "(%.v...)", n.List()) return } - mode.Fprintf(s, "(%.v)", n.List()) + fmt.Fprintf(s, "(%.v)", n.List()) case OMAKEMAP, OMAKECHAN, OMAKESLICE: if n.List().Len() != 0 { // pre-typecheck - mode.Fprintf(s, "make(%v, %.v)", n.Type(), n.List()) + fmt.Fprintf(s, "make(%v, %.v)", n.Type(), n.List()) return } if n.Right() != nil { - mode.Fprintf(s, "make(%v, %v, %v)", n.Type(), n.Left(), n.Right()) + fmt.Fprintf(s, "make(%v, %v, %v)", n.Type(), n.Left(), n.Right()) return } if n.Left() != nil && (n.Op() == OMAKESLICE || !n.Left().Type().IsUntyped()) { - mode.Fprintf(s, "make(%v, %v)", n.Type(), n.Left()) + fmt.Fprintf(s, "make(%v, %v)", n.Type(), n.Left()) return } - mode.Fprintf(s, "make(%v)", n.Type()) + fmt.Fprintf(s, "make(%v)", n.Type()) case OMAKESLICECOPY: - mode.Fprintf(s, "makeslicecopy(%v, %v, %v)", n.Type(), n.Left(), n.Right()) + fmt.Fprintf(s, "makeslicecopy(%v, %v, %v)", n.Type(), n.Left(), n.Right()) case OPLUS, ONEG, OADDR, OBITNOT, ODEREF, ONOT, ORECV: // Unary - mode.Fprintf(s, "%#v", n.Op()) + fmt.Fprintf(s, "%#v", n.Op()) if n.Left() != nil && n.Left().Op() == n.Op() { fmt.Fprint(s, " ") } - exprFmt(n.Left(), s, nprec+1, mode) + exprFmt(n.Left(), s, nprec+1) // Binary case OADD, @@ -1632,19 +1609,19 @@ func exprFmt(n Node, s fmt.State, prec int, mode FmtMode) { OSEND, OSUB, OXOR: - exprFmt(n.Left(), s, nprec, mode) - mode.Fprintf(s, " %#v ", n.Op()) - exprFmt(n.Right(), s, nprec+1, mode) + exprFmt(n.Left(), s, nprec) + fmt.Fprintf(s, " %#v ", n.Op()) + exprFmt(n.Right(), s, nprec+1) case OADDSTR: for i, n1 := range n.List().Slice() { if i != 0 { fmt.Fprint(s, " + ") } - exprFmt(n1, s, nprec, mode) + exprFmt(n1, s, nprec) } default: - mode.Fprintf(s, "", n.Op()) + fmt.Fprintf(s, "", n.Op()) } } @@ -1657,45 +1634,25 @@ func ellipsisIf(b bool) string { // Nodes -type fmtNodes struct { - x Nodes - m FmtMode -} - -func (f *fmtNodes) Format(s fmt.State, verb rune) { f.x.format(s, verb, FErr) } - -func (l Nodes) Format(s fmt.State, verb rune) { l.format(s, verb, FErr) } - -func (l Nodes) format(s fmt.State, verb rune, mode FmtMode) { +func (l Nodes) Format(s fmt.State, verb rune) { if s.Flag('+') && verb == 'v' { // %+v is DumpList output dumpNodes(s, l, 1) return } - switch verb { - case 'v': - l.hconv(s, fmtFlag(s, verb), mode) - - default: + if verb != 'v' { fmt.Fprintf(s, "%%!%c(Nodes)", verb) + return } -} -func (n Nodes) String() string { - return fmt.Sprint(n) -} - -// Flags: all those of %N plus '.': separate with comma's instead of semicolons. -func (l Nodes) hconv(s fmt.State, flag FmtFlag, mode FmtMode) { - flag, mode = flag.update(mode) sep := "; " - if flag&FmtComma != 0 { + if _, ok := s.Precision(); ok { // %.v is expr list sep = ", " } for i, n := range l.Slice() { - fmt.Fprint(s, modeString(n, mode)) + fmt.Fprint(s, n) if i+1 < l.Len() { fmt.Fprint(s, sep) } From fb17dfa43d1c8e08d08f380ea082195d1c4f89f4 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sat, 5 Dec 2020 15:20:51 -0500 Subject: [PATCH 131/474] [dev.regabi] cmd/compile: narrow interface between ir and types Narrow the interface between package ir and package types to make it easier to clean up the type formatting code all in one place. Also introduce ir.BlankSym for use by OrigSym, so that later OrigSym can move to package types without needing to reference a variable of type ir.Node. Passes buildall w/ toolstash -cmp. Change-Id: I39fa419a1c8fb3318203e31cacc8d06399deeff9 Reviewed-on: https://go-review.googlesource.com/c/go/+/275776 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/main.go | 5 --- src/cmd/compile/internal/gc/universe.go | 1 + src/cmd/compile/internal/ir/fmt.go | 25 ++++++++------ src/cmd/compile/internal/ir/node.go | 4 ++- src/cmd/compile/internal/ssa/export_test.go | 15 +-------- src/cmd/compile/internal/types/scope.go | 7 ++-- src/cmd/compile/internal/types/sym.go | 5 +-- src/cmd/compile/internal/types/type.go | 28 ++++++++-------- src/cmd/compile/internal/types/utils.go | 36 +++++++++------------ 9 files changed, 59 insertions(+), 67 deletions(-) diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 96031fe511490..a40671bccf818 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -212,15 +212,10 @@ func Main(archInit func(*Arch)) { // would lead to import cycles) types.Widthptr = Widthptr types.Dowidth = dowidth - types.Fatalf = base.Fatalf ir.InstallTypeFormats() types.TypeLinkSym = func(t *types.Type) *obj.LSym { return typenamesym(t).Linksym() } - types.FmtLeft = int(ir.FmtLeft) - types.FmtUnsigned = int(ir.FmtUnsigned) - types.FErr = int(ir.FErr) - types.Ctxt = base.Ctxt initUniverse() diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index f9984cbe945eb..cd68719a9978a 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -182,6 +182,7 @@ func initUniverse() { ir.AsNode(s.Def).SetSym(lookup("false")) s = lookup("_") + ir.BlankSym = s s.Block = -100 s.Def = NewName(s) types.Types[types.TBLANK] = types.New(types.TBLANK) diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index b0c732ae56ce4..88534864a9ec4 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -339,7 +339,8 @@ func symFormat(s *types.Sym, f fmt.State, verb rune, mode FmtMode) { func smodeString(s *types.Sym, mode FmtMode) string { return sconv(s, 0, mode) } -// See #16897 before changing the implementation of sconv. +// See #16897 for details about performance implications +// before changing the implementation of sconv. func sconv(s *types.Sym, flag FmtFlag, mode FmtMode) string { if flag&FmtLong != 0 { panic("linksymfmt") @@ -472,17 +473,23 @@ var fmtBufferPool = sync.Pool{ } func InstallTypeFormats() { - types.Sconv = func(s *types.Sym, flag, mode int) string { - return sconv(s, FmtFlag(flag), FmtMode(mode)) + types.SymString = func(s *types.Sym) string { + return sconv(s, 0, FErr) } - types.Tconv = func(t *types.Type, flag, mode int) string { - return tconv(t, FmtFlag(flag), FmtMode(mode)) + types.TypeString = func(t *types.Type) string { + return tconv(t, 0, FErr) } - types.FormatSym = func(sym *types.Sym, s fmt.State, verb rune, mode int) { - symFormat(sym, s, verb, FmtMode(mode)) + types.TypeShortString = func(t *types.Type) string { + return tconv(t, FmtLeft, FErr) } - types.FormatType = func(t *types.Type, s fmt.State, verb rune, mode int) { - typeFormat(t, s, verb, FmtMode(mode)) + types.TypeLongString = func(t *types.Type) string { + return tconv(t, FmtLeft|FmtUnsigned, FErr) + } + types.FormatSym = func(sym *types.Sym, s fmt.State, verb rune) { + symFormat(sym, s, verb, FErr) + } + types.FormatType = func(t *types.Type, s fmt.State, verb rune) { + typeFormat(t, s, verb, FErr) } } diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 83f5b0cf78ed5..56b320e726482 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -654,6 +654,8 @@ func AsNode(n types.Object) Node { var BlankNode Node +var BlankSym *types.Sym + // origSym returns the original symbol written by the user. func OrigSym(s *types.Sym) *types.Sym { if s == nil { @@ -666,7 +668,7 @@ func OrigSym(s *types.Sym) *types.Sym { return nil case 'b': // originally the blank identifier _ // TODO(mdempsky): Does s.Pkg matter here? - return BlankNode.Sym() + return BlankSym } return s } diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go index 5a81f76cebcc3..cb3b9c0e2a0ea 100644 --- a/src/cmd/compile/internal/ssa/export_test.go +++ b/src/cmd/compile/internal/ssa/export_test.go @@ -12,7 +12,6 @@ import ( "cmd/internal/obj/s390x" "cmd/internal/obj/x86" "cmd/internal/src" - "fmt" "testing" ) @@ -138,19 +137,7 @@ func init() { // Initialize just enough of the universe and the types package to make our tests function. // TODO(josharian): move universe initialization to the types package, // so this test setup can share it. - - types.Tconv = func(t *types.Type, flag, mode int) string { - return t.Kind().String() - } - types.Sconv = func(s *types.Sym, flag, mode int) string { - return "sym" - } - types.FormatSym = func(sym *types.Sym, s fmt.State, verb rune, mode int) { - fmt.Fprintf(s, "sym") - } - types.FormatType = func(t *types.Type, s fmt.State, verb rune, mode int) { - fmt.Fprintf(s, "%v", t.Kind()) - } + ir.InstallTypeFormats() types.Dowidth = func(t *types.Type) {} for _, typ := range [...]struct { diff --git a/src/cmd/compile/internal/types/scope.go b/src/cmd/compile/internal/types/scope.go index 37ac90a0250b6..04ea3c325f05d 100644 --- a/src/cmd/compile/internal/types/scope.go +++ b/src/cmd/compile/internal/types/scope.go @@ -4,7 +4,10 @@ package types -import "cmd/internal/src" +import ( + "cmd/compile/internal/base" + "cmd/internal/src" +) // Declaration stack & operations @@ -56,7 +59,7 @@ func Popdcl() { d.sym = nil d.def = nil } - Fatalf("popdcl: no stack mark") + base.Fatalf("popdcl: no stack mark") } // Markdcl records the start of a new block scope for declarations. diff --git a/src/cmd/compile/internal/types/sym.go b/src/cmd/compile/internal/types/sym.go index 490222d843522..fcb095c53c985 100644 --- a/src/cmd/compile/internal/types/sym.go +++ b/src/cmd/compile/internal/types/sym.go @@ -5,6 +5,7 @@ package types import ( + "cmd/compile/internal/base" "cmd/internal/obj" "cmd/internal/src" "unicode" @@ -88,9 +89,9 @@ func (sym *Sym) Linksym() *obj.LSym { } if sym.Func() { // This is a function symbol. Mark it as "internal ABI". - return Ctxt.LookupABIInit(sym.LinksymName(), obj.ABIInternal, initPkg) + return base.Ctxt.LookupABIInit(sym.LinksymName(), obj.ABIInternal, initPkg) } - return Ctxt.LookupInit(sym.LinksymName(), initPkg) + return base.Ctxt.LookupInit(sym.LinksymName(), initPkg) } // Less reports whether symbol a is ordered before symbol b. diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index 2c42e5579d1e5..c5807af199ecc 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -231,7 +231,7 @@ func (t *Type) Pkg() *Pkg { case TINTER: return t.Extra.(*Interface).pkg default: - Fatalf("Pkg: unexpected kind: %v", t) + base.Fatalf("Pkg: unexpected kind: %v", t) return nil } } @@ -501,7 +501,7 @@ func New(et Kind) *Type { // NewArray returns a new fixed-length array Type. func NewArray(elem *Type, bound int64) *Type { if bound < 0 { - Fatalf("NewArray: invalid bound %v", bound) + base.Fatalf("NewArray: invalid bound %v", bound) } t := New(TARRAY) t.Extra = &Array{Elem: elem, Bound: bound} @@ -513,7 +513,7 @@ func NewArray(elem *Type, bound int64) *Type { func NewSlice(elem *Type) *Type { if t := elem.cache.slice; t != nil { if t.Elem() != elem { - Fatalf("elem mismatch") + base.Fatalf("elem mismatch") } return t } @@ -569,12 +569,12 @@ var NewPtrCacheEnabled = true // NewPtr returns the pointer type pointing to t. func NewPtr(elem *Type) *Type { if elem == nil { - Fatalf("NewPtr: pointer to elem Type is nil") + base.Fatalf("NewPtr: pointer to elem Type is nil") } if t := elem.cache.ptr; t != nil { if t.Elem() != elem { - Fatalf("NewPtr: elem mismatch") + base.Fatalf("NewPtr: elem mismatch") } return t } @@ -629,7 +629,7 @@ func SubstAny(t *Type, types *[]*Type) *Type { case TANY: if len(*types) == 0 { - Fatalf("substArgTypes: not enough argument types") + base.Fatalf("substArgTypes: not enough argument types") } t = (*types)[0] *types = (*types)[1:] @@ -730,7 +730,7 @@ func (t *Type) copy() *Type { x := *t.Extra.(*Array) nt.Extra = &x case TTUPLE, TSSA, TRESULTS: - Fatalf("ssa types cannot be copied") + base.Fatalf("ssa types cannot be copied") } // TODO(mdempsky): Find out why this is necessary and explain. if t.underlying == t { @@ -746,7 +746,7 @@ func (f *Field) Copy() *Field { func (t *Type) wantEtype(et Kind) { if t.kind != et { - Fatalf("want %v, but have %v", et, t) + base.Fatalf("want %v, but have %v", et, t) } } @@ -811,7 +811,7 @@ func (t *Type) Elem() *Type { case TMAP: return t.Extra.(*Map).Elem } - Fatalf("Type.Elem %s", t.kind) + base.Fatalf("Type.Elem %s", t.kind) return nil } @@ -850,7 +850,7 @@ func (t *Type) Fields() *Fields { Dowidth(t) return &t.Extra.(*Interface).Fields } - Fatalf("Fields: type %v does not have fields", t) + base.Fatalf("Fields: type %v does not have fields", t) return nil } @@ -874,7 +874,7 @@ func (t *Type) SetFields(fields []*Field) { // enforce that SetFields cannot be called once // t's width has been calculated. if t.WidthCalculated() { - Fatalf("SetFields of %v: width previously calculated", t) + base.Fatalf("SetFields of %v: width previously calculated", t) } t.wantEtype(TSTRUCT) for _, f := range fields { @@ -1223,7 +1223,7 @@ var unsignedEType = [...]Kind{ // ToUnsigned returns the unsigned equivalent of integer type t. func (t *Type) ToUnsigned() *Type { if !t.IsInteger() { - Fatalf("unsignedType(%v)", t) + base.Fatalf("unsignedType(%v)", t) } return Types[unsignedEType[t.kind]] } @@ -1385,7 +1385,7 @@ func (t *Type) NumComponents(countBlank componentsIncludeBlankFields) int64 { switch t.kind { case TSTRUCT: if t.IsFuncArgStruct() { - Fatalf("NumComponents func arg struct") + base.Fatalf("NumComponents func arg struct") } var n int64 for _, f := range t.FieldSlice() { @@ -1408,7 +1408,7 @@ func (t *Type) SoleComponent() *Type { switch t.kind { case TSTRUCT: if t.IsFuncArgStruct() { - Fatalf("SoleComponent func arg struct") + base.Fatalf("SoleComponent func arg struct") } if t.NumFields() != 1 { return nil diff --git a/src/cmd/compile/internal/types/utils.go b/src/cmd/compile/internal/types/utils.go index e8b1073818054..a1be77eef1040 100644 --- a/src/cmd/compile/internal/types/utils.go +++ b/src/cmd/compile/internal/types/utils.go @@ -15,51 +15,47 @@ const BADWIDTH = -1000000000 // They are here to break import cycles. // TODO(gri) eliminate these dependencies. var ( - Widthptr int - Dowidth func(*Type) - Fatalf func(string, ...interface{}) - Sconv func(*Sym, int, int) string // orig: func sconv(s *Sym, flag FmtFlag, mode fmtMode) string - Tconv func(*Type, int, int) string // orig: func tconv(t *Type, flag FmtFlag, mode fmtMode) string - FormatSym func(*Sym, fmt.State, rune, int) // orig: func symFormat(sym *Sym, s fmt.State, verb rune, mode fmtMode) - FormatType func(*Type, fmt.State, rune, int) // orig: func typeFormat(t *Type, s fmt.State, verb rune, mode fmtMode) - TypeLinkSym func(*Type) *obj.LSym - Ctxt *obj.Link - - FmtLeft int - FmtUnsigned int - FErr int + Widthptr int + Dowidth func(*Type) + SymString func(*Sym) string + TypeString func(*Type) string + TypeShortString func(*Type) string + TypeLongString func(*Type) string + FormatSym func(*Sym, fmt.State, rune) + FormatType func(*Type, fmt.State, rune) + TypeLinkSym func(*Type) *obj.LSym ) func (s *Sym) String() string { - return Sconv(s, 0, FErr) + return SymString(s) } func (sym *Sym) Format(s fmt.State, verb rune) { - FormatSym(sym, s, verb, FErr) + FormatSym(sym, s, verb) } func (t *Type) String() string { - // The implementation of tconv (including typefmt and fldconv) + // The implementation // must handle recursive types correctly. - return Tconv(t, 0, FErr) + return TypeString(t) } // ShortString generates a short description of t. // It is used in autogenerated method names, reflection, // and itab names. func (t *Type) ShortString() string { - return Tconv(t, FmtLeft, FErr) + return TypeShortString(t) } // LongString generates a complete description of t. // It is useful for reflection, // or when a unique fingerprint or hash of a type is required. func (t *Type) LongString() string { - return Tconv(t, FmtLeft|FmtUnsigned, FErr) + return TypeLongString(t) } func (t *Type) Format(s fmt.State, verb rune) { - FormatType(t, s, verb, FErr) + FormatType(t, s, verb) } type bitset8 uint8 From 3904a6282945276ec72683920c278b2e3141a1fe Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sun, 6 Dec 2020 12:38:11 -0500 Subject: [PATCH 132/474] [dev.regabi] cmd/compile: remove mode.Sprintf etc in printer This code is now hardly used and not worth the complexity. It also tangles together Nodes and Types in a way that keeps this code in package ir instead of package types. Passes buildall w/ toolstash -cmp. Change-Id: I2e829c1f6b602acbdc8ab4aac3b798f9ded762ef Reviewed-on: https://go-review.googlesource.com/c/go/+/275777 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/fmtmap_test.go | 1 - src/cmd/compile/internal/ir/fmt.go | 111 +++++++++-------------------- 2 files changed, 32 insertions(+), 80 deletions(-) diff --git a/src/cmd/compile/fmtmap_test.go b/src/cmd/compile/fmtmap_test.go index 60b772e932570..5dd30e619baf6 100644 --- a/src/cmd/compile/fmtmap_test.go +++ b/src/cmd/compile/fmtmap_test.go @@ -43,7 +43,6 @@ var knownFormats = map[string]string{ "*cmd/compile/internal/types.Field %p": "", "*cmd/compile/internal/types.Field %v": "", "*cmd/compile/internal/types.Sym %+v": "", - "*cmd/compile/internal/types.Sym %0S": "", "*cmd/compile/internal/types.Sym %S": "", "*cmd/compile/internal/types.Sym %p": "", "*cmd/compile/internal/types.Sym %v": "", diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index 88534864a9ec4..0bd0340af8acf 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -152,38 +152,6 @@ func (f FmtFlag) update(mode FmtMode) (FmtFlag, FmtMode) { return f, mode } -func (m FmtMode) Fprintf(s fmt.State, format string, args ...interface{}) { - m.prepareArgs(args) - fmt.Fprintf(s, format, args...) -} - -func (m FmtMode) Sprintf(format string, args ...interface{}) string { - m.prepareArgs(args) - return fmt.Sprintf(format, args...) -} - -func (m FmtMode) Sprint(args ...interface{}) string { - m.prepareArgs(args) - return fmt.Sprint(args...) -} - -func (m FmtMode) prepareArgs(args []interface{}) { - for i, arg := range args { - switch arg := arg.(type) { - case nil: - args[i] = "" // assume this was a node interface - case *types.Type: - args[i] = &fmtType{arg, m} - case *types.Sym: - args[i] = &fmtSym{arg, m} - case int32, int64, string, Op, Node, Nodes, types.Kind, constant.Value: - // OK: printing these types doesn't depend on mode - default: - base.Fatalf("mode.prepareArgs type %T", arg) - } - } -} - // Op var OpNames = []string{ @@ -316,15 +284,9 @@ func FmtConst(v constant.Value, flag FmtFlag) string { // the same name appears in an error message. var NumImport = make(map[string]int) -type fmtSym struct { - x *types.Sym - m FmtMode -} - -func (f *fmtSym) Format(s fmt.State, verb rune) { symFormat(f.x, s, verb, f.m) } - // "%S" suppresses qualifying with package -func symFormat(s *types.Sym, f fmt.State, verb rune, mode FmtMode) { +func symFormat(s *types.Sym, f fmt.State, verb rune) { + mode := FErr switch verb { case 'v', 'S': if verb == 'v' && f.Flag('+') { @@ -337,8 +299,6 @@ func symFormat(s *types.Sym, f fmt.State, verb rune, mode FmtMode) { } } -func smodeString(s *types.Sym, mode FmtMode) string { return sconv(s, 0, mode) } - // See #16897 for details about performance implications // before changing the implementation of sconv. func sconv(s *types.Sym, flag FmtFlag, mode FmtMode) string { @@ -421,25 +381,22 @@ func symfmt(b *bytes.Buffer, s *types.Sym, flag FmtFlag, mode FmtMode) { } if flag&FmtByte != 0 { - // FmtByte (hh) implies FmtShort (h) - // skip leading "type." in method name - name := s.Name - if i := strings.LastIndex(name, "."); i >= 0 { - name = name[i+1:] - } - - if mode == FDbg { - fmt.Fprintf(b, "@%q.%s", s.Pkg.Path, name) - return - } - - b.WriteString(name) + b.WriteString(methodSymName(s)) return } b.WriteString(s.Name) } +func methodSymName(s *types.Sym) string { + // Skip leading "type." in method name + name := s.Name + if i := strings.LastIndex(name, "."); i >= 0 { + name = name[i+1:] + } + return name +} + // Type var BasicTypeNames = []string{ @@ -485,24 +442,14 @@ func InstallTypeFormats() { types.TypeLongString = func(t *types.Type) string { return tconv(t, FmtLeft|FmtUnsigned, FErr) } - types.FormatSym = func(sym *types.Sym, s fmt.State, verb rune) { - symFormat(sym, s, verb, FErr) - } - types.FormatType = func(t *types.Type, s fmt.State, verb rune) { - typeFormat(t, s, verb, FErr) - } -} - -type fmtType struct { - x *types.Type - m FmtMode + types.FormatSym = symFormat + types.FormatType = typeFormat } -func (f *fmtType) Format(s fmt.State, verb rune) { typeFormat(f.x, s, verb, f.m) } - // "%L" print definition, not name // "%S" omit 'func' and receiver from function types, short type names -func typeFormat(t *types.Type, s fmt.State, verb rune, mode FmtMode) { +func typeFormat(t *types.Type, s fmt.State, verb rune) { + mode := FErr switch verb { case 'v', 'S', 'L': if verb == 'v' && s.Flag('+') { // %+v is debug format @@ -599,7 +546,8 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode FmtMode, visited } if t.Sym().Pkg == LocalPkg && t.Vargen != 0 { - b.WriteString(mode.Sprintf("%v·%d", t.Sym(), t.Vargen)) + sconv2(b, t.Sym(), 0, mode) + fmt.Fprintf(b, "·%d", t.Vargen) return } } @@ -818,9 +766,14 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode FmtMode, visited case types.Txxx: b.WriteString("Txxx") + default: - // Don't know how to handle - fall back to detailed prints. - b.WriteString(mode.Sprintf("%v <%v>", t.Kind(), t.Sym())) + // Don't know how to handle - fall back to detailed prints + b.WriteString(t.Kind().String()) + b.WriteString(" <") + sconv2(b, t.Sym(), 0, mode) + b.WriteString(">") + } } @@ -847,12 +800,12 @@ func fldconv(b *bytes.Buffer, f *types.Field, flag FmtFlag, mode FmtMode, visite if funarg != types.FunargNone { name = fmt.Sprint(f.Nname) } else if flag&FmtLong != 0 { - name = mode.Sprintf("%0S", s) + name = methodSymName(s) if !types.IsExported(name) && flag&FmtUnsigned == 0 { - name = smodeString(s, mode) // qualify non-exported names (used on structs, not on funarg) + name = sconv(s, 0, mode) // qualify non-exported names (used on structs, not on funarg) } } else { - name = smodeString(s, mode) + name = sconv(s, 0, mode) } } } @@ -1289,7 +1242,7 @@ func exprFmt(n Node, s fmt.State, prec int) { case OLITERAL: // this is a bit of a mess if !exportFormat && n.Sym() != nil { - fmt.Fprint(s, smodeString(n.Sym(), FErr)) + fmt.Fprint(s, n.Sym()) return } @@ -1331,7 +1284,7 @@ func exprFmt(n Node, s fmt.State, prec int) { case ODCLFUNC: if sym := n.Sym(); sym != nil { - fmt.Fprint(s, smodeString(sym, FErr)) + fmt.Fprint(s, sym) return } fmt.Fprintf(s, "") @@ -1345,11 +1298,11 @@ func exprFmt(n Node, s fmt.State, prec int) { } fallthrough case OPACK, ONONAME, OMETHEXPR: - fmt.Fprint(s, smodeString(n.Sym(), FErr)) + fmt.Fprint(s, n.Sym()) case OTYPE: if n.Type() == nil && n.Sym() != nil { - fmt.Fprint(s, smodeString(n.Sym(), FErr)) + fmt.Fprint(s, n.Sym()) return } fmt.Fprintf(s, "%v", n.Type()) From 70155cca81d061686d4f23b7ad59fe8213e87f9f Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sun, 6 Dec 2020 13:17:03 -0500 Subject: [PATCH 133/474] [dev.regabi] cmd/compile: untangle FmtFlag, FmtMode It turns out that the FmtFlag is really only tracking the FmtLong and FmtShort bits, and the others simply mirror the state of the FmtMode and are copied out and back in repeatedly. Simplify to FmtFlag being the verb itself ('S', 'L', or 'v'). Now there is only one formatting enumeration, making it a bit easier to understand what's going on. Passes buildall w/ toolstash -cmp. Change-Id: I85bde2183eb22228fcf46d19d003401d588d9825 Reviewed-on: https://go-review.googlesource.com/c/go/+/275778 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/const.go | 2 +- src/cmd/compile/internal/ir/fmt.go | 181 ++++++++------------------- 2 files changed, 53 insertions(+), 130 deletions(-) diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index 6cd414a41931a..304c9aa2c3174 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -384,7 +384,7 @@ func overflow(v constant.Value, t *types.Type) bool { return true } if doesoverflow(v, t) { - base.Errorf("constant %v overflows %v", ir.FmtConst(v, 0), t) + base.Errorf("constant %v overflows %v", ir.FmtConst(v, false), t) return true } return false diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index 0bd0340af8acf..117c7417d2c8e 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -88,70 +88,6 @@ const ( FTypeIdName // same as FTypeId, but use package name instead of prefix ) -// A FmtFlag value is a set of flags (or 0). -// They control how the Xconv functions format their values. -// See the respective function's documentation for details. -type FmtFlag int - -const ( // fmt.Format flag/prec or verb - FmtLeft FmtFlag = 1 << iota // '-' - FmtSharp // '#' - FmtSign // '+' - FmtUnsigned // internal use only (historic: u flag) - FmtShort // verb == 'S' (historic: h flag) - FmtLong // verb == 'L' (historic: l flag) - FmtComma // '.' (== hasPrec) (historic: , flag) - FmtByte // '0' (historic: hh flag) -) - -// fmtFlag computes the (internal) FmtFlag -// value given the fmt.State and format verb. -func fmtFlag(s fmt.State, verb rune) FmtFlag { - var flag FmtFlag - if s.Flag('-') { - flag |= FmtLeft - } - if s.Flag('#') { - flag |= FmtSharp - } - if s.Flag('+') { - flag |= FmtSign - } - if s.Flag(' ') { - base.Fatalf("FmtUnsigned in format string") - } - if _, ok := s.Precision(); ok { - flag |= FmtComma - } - if s.Flag('0') { - flag |= FmtByte - } - switch verb { - case 'S': - flag |= FmtShort - case 'L': - flag |= FmtLong - } - return flag -} - -// update returns the results of applying f to mode. -func (f FmtFlag) update(mode FmtMode) (FmtFlag, FmtMode) { - switch { - case f&FmtSign != 0: - mode = FDbg - case f&FmtSharp != 0: - // ignore (textual export format no longer supported) - case f&FmtUnsigned != 0: - mode = FTypeIdName - case f&FmtLeft != 0: - mode = FTypeId - } - - f &^= FmtSharp | FmtLeft | FmtSign - return f, mode -} - // Op var OpNames = []string{ @@ -243,8 +179,8 @@ func (o Op) Format(s fmt.State, verb rune) { // Val -func FmtConst(v constant.Value, flag FmtFlag) string { - if flag&FmtSharp == 0 && v.Kind() == constant.Complex { +func FmtConst(v constant.Value, sharp bool) string { + if !sharp && v.Kind() == constant.Complex { real, imag := constant.Real(v), constant.Imag(v) var re string @@ -292,7 +228,7 @@ func symFormat(s *types.Sym, f fmt.State, verb rune) { if verb == 'v' && f.Flag('+') { mode = FDbg } - fmt.Fprint(f, sconv(s, fmtFlag(f, verb), mode)) + fmt.Fprint(f, sconv(s, verb, mode)) default: fmt.Fprintf(f, "%%!%c(*types.Sym=%p)", verb, s) @@ -301,8 +237,8 @@ func symFormat(s *types.Sym, f fmt.State, verb rune) { // See #16897 for details about performance implications // before changing the implementation of sconv. -func sconv(s *types.Sym, flag FmtFlag, mode FmtMode) string { - if flag&FmtLong != 0 { +func sconv(s *types.Sym, verb rune, mode FmtMode) string { + if verb == 'L' { panic("linksymfmt") } @@ -317,13 +253,12 @@ func sconv(s *types.Sym, flag FmtFlag, mode FmtMode) string { buf.Reset() defer fmtBufferPool.Put(buf) - flag, mode = flag.update(mode) - symfmt(buf, s, flag, mode) + symfmt(buf, s, verb, mode) return types.InternString(buf.Bytes()) } -func sconv2(b *bytes.Buffer, s *types.Sym, flag FmtFlag, mode FmtMode) { - if flag&FmtLong != 0 { +func sconv2(b *bytes.Buffer, s *types.Sym, verb rune, mode FmtMode) { + if verb == 'L' { panic("linksymfmt") } if s == nil { @@ -335,12 +270,11 @@ func sconv2(b *bytes.Buffer, s *types.Sym, flag FmtFlag, mode FmtMode) { return } - flag, mode = flag.update(mode) - symfmt(b, s, flag, mode) + symfmt(b, s, verb, mode) } -func symfmt(b *bytes.Buffer, s *types.Sym, flag FmtFlag, mode FmtMode) { - if flag&FmtShort == 0 { +func symfmt(b *bytes.Buffer, s *types.Sym, verb rune, mode FmtMode) { + if verb != 'S' { switch mode { case FErr: // This is for the user if s.Pkg == BuiltinPkg || s.Pkg == LocalPkg { @@ -380,11 +314,6 @@ func symfmt(b *bytes.Buffer, s *types.Sym, flag FmtFlag, mode FmtMode) { } } - if flag&FmtByte != 0 { - b.WriteString(methodSymName(s)) - return - } - b.WriteString(s.Name) } @@ -437,10 +366,10 @@ func InstallTypeFormats() { return tconv(t, 0, FErr) } types.TypeShortString = func(t *types.Type) string { - return tconv(t, FmtLeft, FErr) + return tconv(t, 0, FTypeId) } types.TypeLongString = func(t *types.Type) string { - return tconv(t, FmtLeft|FmtUnsigned, FErr) + return tconv(t, 0, FTypeIdName) } types.FormatSym = symFormat types.FormatType = typeFormat @@ -455,18 +384,21 @@ func typeFormat(t *types.Type, s fmt.State, verb rune) { if verb == 'v' && s.Flag('+') { // %+v is debug format mode = FDbg } - fmt.Fprint(s, tconv(t, fmtFlag(s, verb), mode)) + if verb == 'S' && s.Flag('-') { // %-S is special case for receiver - short typeid format + mode = FTypeId + } + fmt.Fprint(s, tconv(t, verb, mode)) default: fmt.Fprintf(s, "%%!%c(*Type=%p)", verb, t) } } -func tconv(t *types.Type, flag FmtFlag, mode FmtMode) string { +func tconv(t *types.Type, verb rune, mode FmtMode) string { buf := fmtBufferPool.Get().(*bytes.Buffer) buf.Reset() defer fmtBufferPool.Put(buf) - tconv2(buf, t, flag, mode, nil) + tconv2(buf, t, verb, mode, nil) return types.InternString(buf.Bytes()) } @@ -474,7 +406,7 @@ func tconv(t *types.Type, flag FmtFlag, mode FmtMode) string { // flag and mode control exactly what is printed. // Any types x that are already in the visited map get printed as @%d where %d=visited[x]. // See #16897 before changing the implementation of tconv. -func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode FmtMode, visited map[*types.Type]int) { +func tconv2(b *bytes.Buffer, t *types.Type, verb rune, mode FmtMode, visited map[*types.Type]int) { if off, ok := visited[t]; ok { // We've seen this type before, so we're trying to print it recursively. // Print a reference to it instead. @@ -507,17 +439,13 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode FmtMode, visited return } - flag, mode = flag.update(mode) - if mode == FTypeIdName { - flag |= FmtUnsigned - } if t == types.ByteType || t == types.RuneType { // in %-T mode collapse rune and byte with their originals. switch mode { case FTypeIdName, FTypeId: t = types.Types[t.Kind()] default: - sconv2(b, t.Sym(), FmtShort, mode) + sconv2(b, t.Sym(), 'S', mode) return } } @@ -527,32 +455,32 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode FmtMode, visited } // Unless the 'L' flag was specified, if the type has a name, just print that name. - if flag&FmtLong == 0 && t.Sym() != nil && t != types.Types[t.Kind()] { + if verb != 'L' && t.Sym() != nil && t != types.Types[t.Kind()] { switch mode { case FTypeId, FTypeIdName: - if flag&FmtShort != 0 { + if verb == 'S' { if t.Vargen != 0 { - sconv2(b, t.Sym(), FmtShort, mode) + sconv2(b, t.Sym(), 'S', mode) fmt.Fprintf(b, "·%d", t.Vargen) return } - sconv2(b, t.Sym(), FmtShort, mode) + sconv2(b, t.Sym(), 'S', mode) return } if mode == FTypeIdName { - sconv2(b, t.Sym(), FmtUnsigned, mode) + sconv2(b, t.Sym(), 'v', FTypeIdName) return } if t.Sym().Pkg == LocalPkg && t.Vargen != 0 { - sconv2(b, t.Sym(), 0, mode) + sconv2(b, t.Sym(), 'v', mode) fmt.Fprintf(b, "·%d", t.Vargen) return } } - sconv2(b, t.Sym(), 0, mode) + sconv2(b, t.Sym(), 'v', mode) return } @@ -581,7 +509,7 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode FmtMode, visited if mode == FDbg { b.WriteString(t.Kind().String()) b.WriteByte('-') - tconv2(b, t, flag, FErr, visited) + tconv2(b, t, 'v', FErr, visited) return } @@ -603,12 +531,12 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode FmtMode, visited b.WriteByte('*') switch mode { case FTypeId, FTypeIdName: - if flag&FmtShort != 0 { - tconv2(b, t.Elem(), FmtShort, mode, visited) + if verb == 'S' { + tconv2(b, t.Elem(), 'S', mode, visited) return } } - tconv2(b, t.Elem(), 0, mode, visited) + tconv2(b, t.Elem(), 'v', mode, visited) case types.TARRAY: b.WriteByte('[') @@ -662,15 +590,14 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode FmtMode, visited // Wrong interface definitions may have types lacking a symbol. break case types.IsExported(f.Sym.Name): - sconv2(b, f.Sym, FmtShort, mode) + sconv2(b, f.Sym, 'S', mode) default: - flag1 := FmtLeft - if flag&FmtUnsigned != 0 { - flag1 = FmtUnsigned + if mode != FTypeIdName { + mode = FTypeId } - sconv2(b, f.Sym, flag1, mode) + sconv2(b, f.Sym, 'v', mode) } - tconv2(b, f.Type, FmtShort, mode, visited) + tconv2(b, f.Type, 'S', mode, visited) } if t.NumFields() != 0 { b.WriteByte(' ') @@ -678,7 +605,7 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode FmtMode, visited b.WriteByte('}') case types.TFUNC: - if flag&FmtShort != 0 { + if verb == 'S' { // no leading func } else { if t.Recv() != nil { @@ -726,17 +653,17 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode FmtMode, visited if funarg := t.StructType().Funarg; funarg != types.FunargNone { b.WriteByte('(') - var flag1 FmtFlag + fieldVerb := 'v' switch mode { case FTypeId, FTypeIdName, FErr: // no argument names on function signature, and no "noescape"/"nosplit" tags - flag1 = FmtShort + fieldVerb = 'S' } for i, f := range t.Fields().Slice() { if i != 0 { b.WriteString(", ") } - fldconv(b, f, flag1, mode, visited, funarg) + fldconv(b, f, fieldVerb, mode, visited, funarg) } b.WriteByte(')') } else { @@ -746,7 +673,7 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode FmtMode, visited b.WriteByte(';') } b.WriteByte(' ') - fldconv(b, f, FmtLong, mode, visited, funarg) + fldconv(b, f, 'L', mode, visited, funarg) } if t.NumFields() != 0 { b.WriteByte(' ') @@ -758,7 +685,7 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode FmtMode, visited b.WriteString("undefined") if t.Sym() != nil { b.WriteByte(' ') - sconv2(b, t.Sym(), 0, mode) + sconv2(b, t.Sym(), 'v', mode) } case types.TUNSAFEPTR: @@ -771,24 +698,20 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode FmtMode, visited // Don't know how to handle - fall back to detailed prints b.WriteString(t.Kind().String()) b.WriteString(" <") - sconv2(b, t.Sym(), 0, mode) + sconv2(b, t.Sym(), 'v', mode) b.WriteString(">") } } -func fldconv(b *bytes.Buffer, f *types.Field, flag FmtFlag, mode FmtMode, visited map[*types.Type]int, funarg types.Funarg) { +func fldconv(b *bytes.Buffer, f *types.Field, verb rune, mode FmtMode, visited map[*types.Type]int, funarg types.Funarg) { if f == nil { b.WriteString("") return } - flag, mode = flag.update(mode) - if mode == FTypeIdName { - flag |= FmtUnsigned - } var name string - if flag&FmtShort == 0 { + if verb != 'S' { s := f.Sym // Take the name from the original. @@ -799,9 +722,9 @@ func fldconv(b *bytes.Buffer, f *types.Field, flag FmtFlag, mode FmtMode, visite if s != nil && f.Embedded == 0 { if funarg != types.FunargNone { name = fmt.Sprint(f.Nname) - } else if flag&FmtLong != 0 { + } else if verb == 'L' { name = methodSymName(s) - if !types.IsExported(name) && flag&FmtUnsigned == 0 { + if !types.IsExported(name) && mode != FTypeIdName { name = sconv(s, 0, mode) // qualify non-exported names (used on structs, not on funarg) } } else { @@ -826,7 +749,7 @@ func fldconv(b *bytes.Buffer, f *types.Field, flag FmtFlag, mode FmtMode, visite tconv2(b, f.Type, 0, mode, visited) } - if flag&FmtShort == 0 && funarg == types.FunargNone && f.Note != "" { + if verb != 'S' && funarg == types.FunargNone && f.Note != "" { b.WriteString(" ") b.WriteString(strconv.Quote(f.Note)) } @@ -1275,7 +1198,7 @@ func exprFmt(n Node, s fmt.State, prec int) { fmt.Fprintf(s, "'\\U%08x'", uint64(x)) } } else { - fmt.Fprint(s, FmtConst(n.Val(), fmtFlag(s, 'v'))) + fmt.Fprint(s, FmtConst(n.Val(), s.Flag('#'))) } if needUnparen { @@ -1415,7 +1338,7 @@ func exprFmt(n Node, s fmt.State, prec int) { fmt.Fprint(s, ".") return } - fmt.Fprintf(s, ".%0S", n.Sym()) + fmt.Fprintf(s, ".%s", methodSymName(n.Sym())) case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH: exprFmt(n.Left(), s, nprec) @@ -1423,7 +1346,7 @@ func exprFmt(n Node, s fmt.State, prec int) { fmt.Fprint(s, ".") return } - fmt.Fprintf(s, ".%0S", n.Sym()) + fmt.Fprintf(s, ".%s", methodSymName(n.Sym())) case ODOTTYPE, ODOTTYPE2: exprFmt(n.Left(), s, nprec) From bb4a37bd9316a04c45845634a721ef44d8b5b787 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sun, 6 Dec 2020 13:54:50 -0500 Subject: [PATCH 134/474] [dev.regabi] cmd/compile: move Type, Sym printing to package types [generated] Move the printing of types.Type and types.Sym out of ir into package types, where it properly belongs. This wasn't done originally (when the code was in gc) because the Type and Sym printing was a bit tangled up with the Node printing. But now they are untangled and can move into the correct package. This CL is automatically generated. A followup CL will clean up a little bit more by hand. Passes buildall w/ toolstash -cmp. [git-generate] cd src/cmd/compile/internal/ir rf ' mv FmtMode fmtMode mv FErr fmtGo mv FDbg fmtDebug mv FTypeId fmtTypeID mv FTypeIdName fmtTypeIDName mv methodSymName SymMethodName mv BuiltinPkg LocalPkg BlankSym OrigSym NumImport \ fmtMode fmtGo symFormat sconv sconv2 symfmt SymMethodName \ BasicTypeNames fmtBufferPool InstallTypeFormats typeFormat tconv tconv2 fldconv FmtConst \ typefmt.go mv typefmt.go cmd/compile/internal/types ' cd ../types mv typefmt.go fmt.go Change-Id: I6f3fd818323733ab8446f00594937c1628760b27 Reviewed-on: https://go-review.googlesource.com/c/go/+/275779 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/align.go | 2 +- src/cmd/compile/internal/gc/bimport.go | 3 +- src/cmd/compile/internal/gc/const.go | 2 +- src/cmd/compile/internal/gc/dcl.go | 14 +- src/cmd/compile/internal/gc/embed.go | 8 +- src/cmd/compile/internal/gc/export.go | 4 +- src/cmd/compile/internal/gc/gen.go | 2 +- src/cmd/compile/internal/gc/go.go | 4 +- src/cmd/compile/internal/gc/iexport.go | 10 +- src/cmd/compile/internal/gc/iimport.go | 4 +- src/cmd/compile/internal/gc/init.go | 2 +- src/cmd/compile/internal/gc/inl.go | 2 +- src/cmd/compile/internal/gc/main.go | 28 +- src/cmd/compile/internal/gc/noder.go | 10 +- src/cmd/compile/internal/gc/obj.go | 10 +- src/cmd/compile/internal/gc/reflect.go | 12 +- src/cmd/compile/internal/gc/sinit.go | 2 +- src/cmd/compile/internal/gc/ssa.go | 4 +- src/cmd/compile/internal/gc/subr.go | 8 +- src/cmd/compile/internal/gc/typecheck.go | 12 +- src/cmd/compile/internal/gc/universe.go | 28 +- src/cmd/compile/internal/gc/walk.go | 2 +- src/cmd/compile/internal/ir/fmt.go | 656 +----------------- src/cmd/compile/internal/ir/ir.go | 7 - src/cmd/compile/internal/ir/node.go | 28 - src/cmd/compile/internal/ssa/export_test.go | 2 +- src/cmd/compile/internal/types/fmt.go | 694 ++++++++++++++++++++ 27 files changed, 786 insertions(+), 774 deletions(-) create mode 100644 src/cmd/compile/internal/types/fmt.go diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go index af426f5b245de..212e4c46aea80 100644 --- a/src/cmd/compile/internal/gc/align.go +++ b/src/cmd/compile/internal/gc/align.go @@ -193,7 +193,7 @@ func findTypeLoop(t *types.Type, path *[]*types.Type) bool { // Type imported from package, so it can't be part of // a type loop (otherwise that package should have // failed to compile). - if t.Sym().Pkg != ir.LocalPkg { + if t.Sym().Pkg != types.LocalPkg { return false } diff --git a/src/cmd/compile/internal/gc/bimport.go b/src/cmd/compile/internal/gc/bimport.go index c0c18e728e631..5a7018d8e6187 100644 --- a/src/cmd/compile/internal/gc/bimport.go +++ b/src/cmd/compile/internal/gc/bimport.go @@ -6,6 +6,7 @@ package gc import ( "cmd/compile/internal/ir" + "cmd/compile/internal/types" "cmd/internal/src" ) @@ -15,5 +16,5 @@ func npos(pos src.XPos, n ir.Node) ir.Node { } func builtinCall(op ir.Op) ir.Node { - return ir.Nod(ir.OCALL, mkname(ir.BuiltinPkg.Lookup(ir.OpNames[op])), nil) + return ir.Nod(ir.OCALL, mkname(types.BuiltinPkg.Lookup(ir.OpNames[op])), nil) } diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index 304c9aa2c3174..80799580c6d1f 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -384,7 +384,7 @@ func overflow(v constant.Value, t *types.Type) bool { return true } if doesoverflow(v, t) { - base.Errorf("constant %v overflows %v", ir.FmtConst(v, false), t) + base.Errorf("constant %v overflows %v", types.FmtConst(v, false), t) return true } return false diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index a77c1aed45554..1c23c5a92f635 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -66,7 +66,7 @@ func declare(n *ir.Name, ctxt ir.Class) { s := n.Sym() // kludgy: typecheckok means we're past parsing. Eg genwrapper may declare out of package names later. - if !inimport && !typecheckok && s.Pkg != ir.LocalPkg { + if !inimport && !typecheckok && s.Pkg != types.LocalPkg { base.ErrorfAt(n.Pos(), "cannot declare name %v", s) } @@ -253,7 +253,7 @@ func oldname(s *types.Sym) ir.Node { // but it reports an error if sym is from another package and not exported. func importName(sym *types.Sym) ir.Node { n := oldname(sym) - if !types.IsExported(sym.Name) && sym.Pkg != ir.LocalPkg { + if !types.IsExported(sym.Name) && sym.Pkg != types.LocalPkg { n.SetDiag(true) base.Errorf("cannot refer to unexported name %s.%s", sym.Pkg.Name, sym.Name) } @@ -512,7 +512,7 @@ func tostruct(l []*ir.Field) *types.Type { checkdupfields("field", fields) base.Pos = lno - return types.NewStruct(ir.LocalPkg, fields) + return types.NewStruct(types.LocalPkg, fields) } func tointerface(nmethods []*ir.Field) *types.Type { @@ -533,7 +533,7 @@ func tointerface(nmethods []*ir.Field) *types.Type { } base.Pos = lno - return types.NewInterface(ir.LocalPkg, methods) + return types.NewInterface(types.LocalPkg, methods) } func fakeRecv() *ir.Field { @@ -585,14 +585,14 @@ func functype(nrecv *ir.Field, nparams, nresults []*ir.Field) *types.Type { recv = funarg(nrecv) } - t := types.NewSignature(ir.LocalPkg, recv, funargs(nparams), funargs(nresults)) + t := types.NewSignature(types.LocalPkg, recv, funargs(nparams), funargs(nresults)) checkdupfields("argument", t.Recvs().FieldSlice(), t.Params().FieldSlice(), t.Results().FieldSlice()) return t } func hasNamedResults(fn *ir.Func) bool { typ := fn.Type() - return typ.NumResults() > 0 && ir.OrigSym(typ.Results().Field(0).Sym) != nil + return typ.NumResults() > 0 && types.OrigSym(typ.Results().Field(0).Sym) != nil } // methodSym returns the method symbol representing a method name @@ -703,7 +703,7 @@ func addmethod(n *ir.Func, msym *types.Sym, t *types.Type, local, nointerface bo return nil } - if local && mt.Sym().Pkg != ir.LocalPkg { + if local && mt.Sym().Pkg != types.LocalPkg { base.Errorf("cannot define new methods on non-local type %v", mt) return nil } diff --git a/src/cmd/compile/internal/gc/embed.go b/src/cmd/compile/internal/gc/embed.go index d6e42e4f03edf..7664bde1c5878 100644 --- a/src/cmd/compile/internal/gc/embed.go +++ b/src/cmd/compile/internal/gc/embed.go @@ -131,18 +131,18 @@ func varEmbed(p *noder, names []ir.Node, typ ir.Ntype, exprs []ir.Node, embeds [ // can't tell whether "string" and "byte" really mean "string" and "byte". // The result must be confirmed later, after type checking, using embedKind. func embedKindApprox(typ ir.Node) int { - if typ.Sym() != nil && typ.Sym().Name == "FS" && (typ.Sym().Pkg.Path == "embed" || (typ.Sym().Pkg == ir.LocalPkg && base.Ctxt.Pkgpath == "embed")) { + if typ.Sym() != nil && typ.Sym().Name == "FS" && (typ.Sym().Pkg.Path == "embed" || (typ.Sym().Pkg == types.LocalPkg && base.Ctxt.Pkgpath == "embed")) { return embedFiles } // These are not guaranteed to match only string and []byte - // maybe the local package has redefined one of those words. // But it's the best we can do now during the noder. // The stricter check happens later, in initEmbed calling embedKind. - if typ.Sym() != nil && typ.Sym().Name == "string" && typ.Sym().Pkg == ir.LocalPkg { + if typ.Sym() != nil && typ.Sym().Name == "string" && typ.Sym().Pkg == types.LocalPkg { return embedString } if typ, ok := typ.(*ir.SliceType); ok { - if sym := typ.Elem.Sym(); sym != nil && sym.Name == "byte" && sym.Pkg == ir.LocalPkg { + if sym := typ.Elem.Sym(); sym != nil && sym.Name == "byte" && sym.Pkg == types.LocalPkg { return embedBytes } } @@ -151,7 +151,7 @@ func embedKindApprox(typ ir.Node) int { // embedKind determines the kind of embedding variable. func embedKind(typ *types.Type) int { - if typ.Sym() != nil && typ.Sym().Name == "FS" && (typ.Sym().Pkg.Path == "embed" || (typ.Sym().Pkg == ir.LocalPkg && base.Ctxt.Pkgpath == "embed")) { + if typ.Sym() != nil && typ.Sym().Name == "FS" && (typ.Sym().Pkg.Path == "embed" || (typ.Sym().Pkg == types.LocalPkg && base.Ctxt.Pkgpath == "embed")) { return embedFiles } if typ == types.Types[types.TSTRING] { diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index b632a15865d0a..593dd3b2f83c7 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -42,7 +42,7 @@ func initname(s string) bool { } func autoexport(n *ir.Name, ctxt ir.Class) { - if n.Sym().Pkg != ir.LocalPkg { + if n.Sym().Pkg != types.LocalPkg { return } if (ctxt != ir.PEXTERN && ctxt != ir.PFUNC) || dclcontext != ir.PEXTERN { @@ -202,7 +202,7 @@ func dumpasmhdr() { if err != nil { base.Fatalf("%v", err) } - fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", ir.LocalPkg.Name) + fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", types.LocalPkg.Name) for _, n := range asmlist { if n.Sym().IsBlank() { continue diff --git a/src/cmd/compile/internal/gc/gen.go b/src/cmd/compile/internal/gc/gen.go index 0d3f9392fbc0e..39e94259786bd 100644 --- a/src/cmd/compile/internal/gc/gen.go +++ b/src/cmd/compile/internal/gc/gen.go @@ -66,7 +66,7 @@ func tempAt(pos src.XPos, curfn *ir.Func, t *types.Type) *ir.Name { s := &types.Sym{ Name: autotmpname(len(curfn.Dcl)), - Pkg: ir.LocalPkg, + Pkg: types.LocalPkg, } n := ir.NewNameAt(pos, s) s.Def = n diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index c4b9c185dcb37..041073f1173a9 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -37,7 +37,7 @@ var ( // isRuntimePkg reports whether p is package runtime. func isRuntimePkg(p *types.Pkg) bool { - if base.Flag.CompilingRuntime && p == ir.LocalPkg { + if base.Flag.CompilingRuntime && p == types.LocalPkg { return true } return p.Path == "runtime" @@ -45,7 +45,7 @@ func isRuntimePkg(p *types.Pkg) bool { // isReflectPkg reports whether p is package reflect. func isReflectPkg(p *types.Pkg) bool { - if p == ir.LocalPkg { + if p == types.LocalPkg { return base.Ctxt.Pkgpath == "reflect" } return p.Path == "reflect" diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index 003cf3b446d18..b1cc9a3dd97e8 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -322,7 +322,7 @@ func (w *exportWriter) writeIndex(index map[*types.Sym]uint64, mainIndex bool) { // we reference, even if we're not exporting (or reexporting) // any symbols from it. if mainIndex { - pkgSyms[ir.LocalPkg] = nil + pkgSyms[types.LocalPkg] = nil for pkg := range w.p.allPkgs { pkgSyms[pkg] = nil } @@ -402,7 +402,7 @@ func (p *iexporter) pushDecl(n *ir.Name) { } // Don't export predeclared declarations. - if n.Sym().Pkg == ir.BuiltinPkg || n.Sym().Pkg == unsafepkg { + if n.Sym().Pkg == types.BuiltinPkg || n.Sym().Pkg == unsafepkg { return } @@ -596,7 +596,7 @@ func (w *exportWriter) selector(s *types.Sym) { } else { pkg := w.currPkg if types.IsExported(name) { - pkg = ir.LocalPkg + pkg = types.LocalPkg } if s.Pkg != pkg { base.Fatalf("package mismatch in selector: %v in package %q, but want %q", s, s.Pkg.Path, pkg.Path) @@ -637,7 +637,7 @@ func (w *exportWriter) startType(k itag) { func (w *exportWriter) doTyp(t *types.Type) { if t.Sym() != nil { - if t.Sym().Pkg == ir.BuiltinPkg || t.Sym().Pkg == unsafepkg { + if t.Sym().Pkg == types.BuiltinPkg || t.Sym().Pkg == unsafepkg { base.Fatalf("builtin type missing from typIndex: %v", t) } @@ -748,7 +748,7 @@ func (w *exportWriter) paramList(fs []*types.Field) { func (w *exportWriter) param(f *types.Field) { w.pos(f.Pos) - w.localIdent(ir.OrigSym(f.Sym), 0) + w.localIdent(types.OrigSym(f.Sym), 0) w.typ(f.Type) } diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 1d9baed5ad812..859263c83f337 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -148,7 +148,7 @@ func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType) if pkg.Name == "" { pkg.Name = pkgName pkg.Height = pkgHeight - ir.NumImport[pkgName]++ + types.NumImport[pkgName]++ // TODO(mdempsky): This belongs somewhere else. pkg.Lookup("_").Def = ir.BlankNode @@ -437,7 +437,7 @@ func (r *importReader) ident() *types.Sym { } pkg := r.currPkg if types.IsExported(name) { - pkg = ir.LocalPkg + pkg = types.LocalPkg } return pkg.Lookup(name) } diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go index dc825b24218a3..e0907f952cdf2 100644 --- a/src/cmd/compile/internal/gc/init.go +++ b/src/cmd/compile/internal/gc/init.go @@ -96,7 +96,7 @@ func fninit(n []ir.Node) { fns = append(fns, s.Linksym()) } - if len(deps) == 0 && len(fns) == 0 && ir.LocalPkg.Name != "main" && ir.LocalPkg.Name != "runtime" { + if len(deps) == 0 && len(fns) == 0 && types.LocalPkg.Name != "main" && types.LocalPkg.Name != "runtime" { return // nothing to initialize } diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 840285242404c..77fbf7c802700 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -85,7 +85,7 @@ func typecheckinl(fn *ir.Func) { // the ->inl of a local function has been typechecked before caninl copied it. pkg := fnpkg(fn.Nname) - if pkg == ir.LocalPkg || pkg == nil { + if pkg == types.LocalPkg || pkg == nil { return // typecheckinl on local function } diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index a40671bccf818..15659dc7fd2f8 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -77,17 +77,17 @@ func Main(archInit func(*Arch)) { // See bugs 31188 and 21945 (CLs 170638, 98075, 72371). base.Ctxt.UseBASEntries = base.Ctxt.Headtype != objabi.Hdarwin - ir.LocalPkg = types.NewPkg("", "") - ir.LocalPkg.Prefix = "\"\"" + types.LocalPkg = types.NewPkg("", "") + types.LocalPkg.Prefix = "\"\"" // We won't know localpkg's height until after import // processing. In the mean time, set to MaxPkgHeight to ensure // height comparisons at least work until then. - ir.LocalPkg.Height = types.MaxPkgHeight + types.LocalPkg.Height = types.MaxPkgHeight // pseudo-package, for scoping - ir.BuiltinPkg = types.NewPkg("go.builtin", "") // TODO(gri) name this package go.builtin? - ir.BuiltinPkg.Prefix = "go.builtin" // not go%2ebuiltin + types.BuiltinPkg = types.NewPkg("go.builtin", "") // TODO(gri) name this package go.builtin? + types.BuiltinPkg.Prefix = "go.builtin" // not go%2ebuiltin // pseudo-package, accessed by import "unsafe" unsafepkg = types.NewPkg("unsafe", "unsafe") @@ -212,7 +212,7 @@ func Main(archInit func(*Arch)) { // would lead to import cycles) types.Widthptr = Widthptr types.Dowidth = dowidth - ir.InstallTypeFormats() + types.InstallTypeFormats() types.TypeLinkSym = func(t *types.Type) *obj.LSym { return typenamesym(t).Linksym() } @@ -922,14 +922,14 @@ func pkgnotused(lineno src.XPos, path string, name string) { } func mkpackage(pkgname string) { - if ir.LocalPkg.Name == "" { + if types.LocalPkg.Name == "" { if pkgname == "_" { base.Errorf("invalid package name _") } - ir.LocalPkg.Name = pkgname + types.LocalPkg.Name = pkgname } else { - if pkgname != ir.LocalPkg.Name { - base.Errorf("package %s; expected %s", pkgname, ir.LocalPkg.Name) + if pkgname != types.LocalPkg.Name { + base.Errorf("package %s; expected %s", pkgname, types.LocalPkg.Name) } } } @@ -942,7 +942,7 @@ func clearImports() { } var unused []importedPkg - for _, s := range ir.LocalPkg.Syms { + for _, s := range types.LocalPkg.Syms { n := ir.AsNode(s.Def) if n == nil { continue @@ -1046,7 +1046,7 @@ func recordPackageName() { // together two package main archives. So allow dups. s.Set(obj.AttrDuplicateOK, true) base.Ctxt.Data = append(base.Ctxt.Data, s) - s.P = []byte(ir.LocalPkg.Name) + s.P = []byte(types.LocalPkg.Name) } // currentLang returns the current language version. @@ -1073,9 +1073,9 @@ var langWant lang func langSupported(major, minor int, pkg *types.Pkg) bool { if pkg == nil { // TODO(mdempsky): Set Pkg for local types earlier. - pkg = ir.LocalPkg + pkg = types.LocalPkg } - if pkg != ir.LocalPkg { + if pkg != types.LocalPkg { // Assume imported packages passed type-checking. return true } diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 1cd83756773b7..f39bf2ff3c476 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -79,7 +79,7 @@ func parseFiles(filenames []string) uint { p.processPragmas() } - ir.LocalPkg.Height = myheight + types.LocalPkg.Height = myheight return lines } @@ -501,7 +501,7 @@ func (p *noder) typeDecl(decl *syntax.TypeDecl) ir.Node { } nod := p.nod(decl, ir.ODCLTYPE, n, nil) - if n.Alias() && !langSupported(1, 9, ir.LocalPkg) { + if n.Alias() && !langSupported(1, 9, types.LocalPkg) { base.ErrorfAt(nod.Pos(), "type aliases only supported as of -lang=go1.9") } return nod @@ -532,7 +532,7 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) ir.Node { } } - if ir.LocalPkg.Name == "main" && name.Name == "main" { + if types.LocalPkg.Name == "main" && name.Name == "main" { if t.List().Len() > 0 || t.Rlist().Len() > 0 { base.ErrorfAt(f.Pos(), "func main must have no arguments and no return values") } @@ -931,7 +931,7 @@ func (p *noder) packname(expr syntax.Expr) *types.Sym { var pkg *types.Pkg if def.Op() != ir.OPACK { base.Errorf("%v is not a package", name) - pkg = ir.LocalPkg + pkg = types.LocalPkg } else { def := def.(*ir.PkgName) def.Used = true @@ -1387,7 +1387,7 @@ func (p *noder) binOp(op syntax.Operator) ir.Op { // literal is not compatible with the current language version. func checkLangCompat(lit *syntax.BasicLit) { s := lit.Value - if len(s) <= 2 || langSupported(1, 13, ir.LocalPkg) { + if len(s) <= 2 || langSupported(1, 13, types.LocalPkg) { return } // len(s) > 2 diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index b1701b30a1286..c34a86d4ebe9f 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -84,7 +84,7 @@ func printObjHeader(bout *bio.Writer) { if base.Flag.BuildID != "" { fmt.Fprintf(bout, "build id %q\n", base.Flag.BuildID) } - if ir.LocalPkg.Name == "main" { + if types.LocalPkg.Name == "main" { fmt.Fprintf(bout, "main\n") } fmt.Fprintf(bout, "\n") // header ends with blank line @@ -200,7 +200,7 @@ func dumpLinkerObj(bout *bio.Writer) { } func addptabs() { - if !base.Ctxt.Flag_dynlink || ir.LocalPkg.Name != "main" { + if !base.Ctxt.Flag_dynlink || types.LocalPkg.Name != "main" { return } for _, exportn := range exportlist { @@ -235,7 +235,7 @@ func dumpGlobal(n ir.Node) { if n.Class() == ir.PFUNC { return } - if n.Sym().Pkg != ir.LocalPkg { + if n.Sym().Pkg != types.LocalPkg { return } dowidth(n.Type()) @@ -248,7 +248,7 @@ func dumpGlobalConst(n ir.Node) { if t == nil { return } - if n.Sym().Pkg != ir.LocalPkg { + if n.Sym().Pkg != types.LocalPkg { return } // only export integer constants for now @@ -478,7 +478,7 @@ var slicedataGen int func slicedata(pos src.XPos, s string) ir.Node { slicedataGen++ symname := fmt.Sprintf(".gobytes.%d", slicedataGen) - sym := ir.LocalPkg.Lookup(symname) + sym := types.LocalPkg.Lookup(symname) symnode := NewName(sym) sym.Def = symnode diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 42139b7135980..9b8f26a84ba55 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -301,7 +301,7 @@ func deferstruct(stksize int64) *types.Type { // Unlike the global makefield function, this one needs to set Pkg // because these types might be compared (in SSA CSE sorting). // TODO: unify this makefield and the global one above. - sym := &types.Sym{Name: name, Pkg: ir.LocalPkg} + sym := &types.Sym{Name: name, Pkg: types.LocalPkg} return types.NewField(src.NoXPos, sym, typ) } argtype := types.NewArray(types.Types[types.TUINT8], stksize) @@ -491,7 +491,7 @@ func dimportpath(p *types.Pkg) { } str := p.Path - if p == ir.LocalPkg { + if p == types.LocalPkg { // Note: myimportpath != "", or else dgopkgpath won't call dimportpath. str = base.Ctxt.Pkgpath } @@ -508,7 +508,7 @@ func dgopkgpath(s *obj.LSym, ot int, pkg *types.Pkg) int { return duintptr(s, ot, 0) } - if pkg == ir.LocalPkg && base.Ctxt.Pkgpath == "" { + if pkg == types.LocalPkg && base.Ctxt.Pkgpath == "" { // If we don't know the full import path of the package being compiled // (i.e. -p was not passed on the compiler command line), emit a reference to // type..importpath.""., which the linker will rewrite using the correct import path. @@ -527,7 +527,7 @@ func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int { if pkg == nil { return duint32(s, ot, 0) } - if pkg == ir.LocalPkg && base.Ctxt.Pkgpath == "" { + if pkg == types.LocalPkg && base.Ctxt.Pkgpath == "" { // If we don't know the full import path of the package being compiled // (i.e. -p was not passed on the compiler command line), emit a reference to // type..importpath.""., which the linker will rewrite using the correct import path. @@ -1158,7 +1158,7 @@ func dtypesym(t *types.Type) *obj.LSym { if base.Ctxt.Pkgpath != "runtime" || (tbase != types.Types[tbase.Kind()] && tbase != types.ByteType && tbase != types.RuneType && tbase != types.ErrorType) { // int, float, etc // named types from other files are defined only by those files - if tbase.Sym() != nil && tbase.Sym().Pkg != ir.LocalPkg { + if tbase.Sym() != nil && tbase.Sym().Pkg != types.LocalPkg { if i, ok := typeSymIdx[tbase]; ok { lsym.Pkg = tbase.Sym().Pkg.Prefix if t != tbase { @@ -1568,7 +1568,7 @@ func dumptabs() { } // process ptabs - if ir.LocalPkg.Name == "main" && len(ptabs) > 0 { + if types.LocalPkg.Name == "main" && len(ptabs) > 0 { ot := 0 s := base.Ctxt.Lookup("go.plugin.tabs") for _, p := range ptabs { diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index c446c9d083bcd..3c5f11c5abf1b 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -79,7 +79,7 @@ func (s *InitSchedule) staticcopy(l ir.Node, r ir.Node) bool { pfuncsym(l, r) return true } - if r.Class() != ir.PEXTERN || r.Sym().Pkg != ir.LocalPkg { + if r.Class() != ir.PEXTERN || r.Sym().Pkg != types.LocalPkg { return false } if r.Name().Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 89918e21333cb..add50c35d7495 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -4127,7 +4127,7 @@ func findIntrinsic(sym *types.Sym) intrinsicBuilder { return nil } pkg := sym.Pkg.Path - if sym.Pkg == ir.LocalPkg { + if sym.Pkg == types.LocalPkg { pkg = base.Ctxt.Pkgpath } if base.Flag.Race && pkg == "sync/atomic" { @@ -7073,7 +7073,7 @@ func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t return ssa.LocalSlot{N: node, Type: t, Off: parent.Off + offset} } - s := &types.Sym{Name: node.Sym().Name + suffix, Pkg: ir.LocalPkg} + s := &types.Sym{Name: node.Sym().Name + suffix, Pkg: types.LocalPkg} n := ir.NewNameAt(parent.N.Pos(), s) s.Def = n ir.AsNode(s.Def).Name().SetUsed(true) diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 65eb61e6800f5..dffebc58f2d17 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -69,7 +69,7 @@ func setlineno(n ir.Node) src.XPos { } func lookup(name string) *types.Sym { - return ir.LocalPkg.Lookup(name) + return types.LocalPkg.Lookup(name) } // lookupN looks up the symbol starting with prefix and ending with @@ -78,7 +78,7 @@ func lookupN(prefix string, n int) *types.Sym { var buf [20]byte // plenty long enough for all current users copy(buf[:], prefix) b := strconv.AppendInt(buf[:len(prefix)], int64(n), 10) - return ir.LocalPkg.LookupBytes(b) + return types.LocalPkg.LookupBytes(b) } // autolabel generates a new Name node for use with @@ -1109,13 +1109,13 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { // Only generate (*T).M wrappers for T.M in T's own package. if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && - rcvr.Elem().Sym() != nil && rcvr.Elem().Sym().Pkg != ir.LocalPkg { + rcvr.Elem().Sym() != nil && rcvr.Elem().Sym().Pkg != types.LocalPkg { return } // Only generate I.M wrappers for I in I's own package // but keep doing it for error.Error (was issue #29304). - if rcvr.IsInterface() && rcvr.Sym() != nil && rcvr.Sym().Pkg != ir.LocalPkg && rcvr != types.ErrorType { + if rcvr.IsInterface() && rcvr.Sym() != nil && rcvr.Sym().Pkg != types.LocalPkg && rcvr != types.ErrorType { return } diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index dc9e23069e692..85094dbebcad7 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -90,7 +90,7 @@ func resolve(n ir.Node) (res ir.Node) { defer tracePrint("resolve", n)(&res) } - if n.Sym().Pkg != ir.LocalPkg { + if n.Sym().Pkg != types.LocalPkg { if inimport { base.Fatalf("recursive inimport") } @@ -2386,7 +2386,7 @@ func typecheckMethodExpr(n ir.Node) (res ir.Node) { me.(*ir.MethodExpr).Method = m // Issue 25065. Make sure that we emit the symbol for a local method. - if base.Ctxt.Flag_dynlink && !inimport && (t.Sym() == nil || t.Sym().Pkg == ir.LocalPkg) { + if base.Ctxt.Flag_dynlink && !inimport && (t.Sym() == nil || t.Sym().Pkg == types.LocalPkg) { makefuncsym(me.Sym()) } @@ -2862,7 +2862,7 @@ func typecheckcomplit(n ir.Node) (res ir.Node) { f := t.Field(i) s := f.Sym - if s != nil && !types.IsExported(s.Name) && s.Pkg != ir.LocalPkg { + if s != nil && !types.IsExported(s.Name) && s.Pkg != types.LocalPkg { base.Errorf("implicit assignment of unexported field '%s' in %v literal", s.Name, t) } // No pushtype allowed here. Must name fields for that. @@ -2903,7 +2903,7 @@ func typecheckcomplit(n ir.Node) (res ir.Node) { // package, because of import dot. Redirect to correct sym // before we do the lookup. s := key.Sym() - if s.Pkg != ir.LocalPkg && types.IsExported(s.Name) { + if s.Pkg != types.LocalPkg && types.IsExported(s.Name) { s1 := lookup(s.Name) if s1.Origpkg == s.Pkg { s = s1 @@ -3034,7 +3034,7 @@ func typecheckarraylit(elemType *types.Type, bound int64, elts []ir.Node, ctx st // visible reports whether sym is exported or locally defined. func visible(sym *types.Sym) bool { - return sym != nil && (types.IsExported(sym.Name) || sym.Pkg == ir.LocalPkg) + return sym != nil && (types.IsExported(sym.Name) || sym.Pkg == types.LocalPkg) } // nonexported reports whether sym is an unexported field. @@ -3929,7 +3929,7 @@ func curpkg() *types.Pkg { fn := Curfn if fn == nil { // Initialization expressions for package-scope variables. - return ir.LocalPkg + return types.LocalPkg } return fnpkg(fn.Nname) } diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index cd68719a9978a..42b996d88d918 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -104,7 +104,7 @@ func initUniverse() { } types.Types[types.TANY] = types.New(types.TANY) - types.Types[types.TINTER] = types.NewInterface(ir.LocalPkg, nil) + types.Types[types.TINTER] = types.NewInterface(types.LocalPkg, nil) defBasic := func(kind types.Kind, pkg *types.Pkg, name string) *types.Type { sym := pkg.Lookup(name) @@ -120,7 +120,7 @@ func initUniverse() { } for _, s := range &basicTypes { - types.Types[s.etype] = defBasic(s.etype, ir.BuiltinPkg, s.name) + types.Types[s.etype] = defBasic(s.etype, types.BuiltinPkg, s.name) } for _, s := range &typedefs { @@ -130,7 +130,7 @@ func initUniverse() { } simtype[s.etype] = sameas - types.Types[s.etype] = defBasic(s.etype, ir.BuiltinPkg, s.name) + types.Types[s.etype] = defBasic(s.etype, types.BuiltinPkg, s.name) } // We create separate byte and rune types for better error messages @@ -140,11 +140,11 @@ func initUniverse() { // of less informative error messages involving bytes and runes)? // (Alternatively, we could introduce an OTALIAS node representing // type aliases, albeit at the cost of having to deal with it everywhere). - types.ByteType = defBasic(types.TUINT8, ir.BuiltinPkg, "byte") - types.RuneType = defBasic(types.TINT32, ir.BuiltinPkg, "rune") + types.ByteType = defBasic(types.TUINT8, types.BuiltinPkg, "byte") + types.RuneType = defBasic(types.TINT32, types.BuiltinPkg, "rune") // error type - s := ir.BuiltinPkg.Lookup("error") + s := types.BuiltinPkg.Lookup("error") n := ir.NewDeclNameAt(src.NoXPos, s) n.SetOp(ir.OTYPE) types.ErrorType = types.NewNamed(n) @@ -162,7 +162,7 @@ func initUniverse() { simtype[types.TUNSAFEPTR] = types.TPTR for _, s := range &builtinFuncs { - s2 := ir.BuiltinPkg.Lookup(s.name) + s2 := types.BuiltinPkg.Lookup(s.name) s2.Def = NewName(s2) ir.AsNode(s2.Def).SetSubOp(s.op) } @@ -173,16 +173,16 @@ func initUniverse() { ir.AsNode(s2.Def).SetSubOp(s.op) } - s = ir.BuiltinPkg.Lookup("true") + s = types.BuiltinPkg.Lookup("true") s.Def = nodbool(true) ir.AsNode(s.Def).SetSym(lookup("true")) - s = ir.BuiltinPkg.Lookup("false") + s = types.BuiltinPkg.Lookup("false") s.Def = nodbool(false) ir.AsNode(s.Def).SetSym(lookup("false")) s = lookup("_") - ir.BlankSym = s + types.BlankSym = s s.Block = -100 s.Def = NewName(s) types.Types[types.TBLANK] = types.New(types.TBLANK) @@ -190,18 +190,18 @@ func initUniverse() { ir.BlankNode = ir.AsNode(s.Def) ir.BlankNode.SetTypecheck(1) - s = ir.BuiltinPkg.Lookup("_") + s = types.BuiltinPkg.Lookup("_") s.Block = -100 s.Def = NewName(s) types.Types[types.TBLANK] = types.New(types.TBLANK) ir.AsNode(s.Def).SetType(types.Types[types.TBLANK]) types.Types[types.TNIL] = types.New(types.TNIL) - s = ir.BuiltinPkg.Lookup("nil") + s = types.BuiltinPkg.Lookup("nil") s.Def = nodnil() ir.AsNode(s.Def).SetSym(s) - s = ir.BuiltinPkg.Lookup("iota") + s = types.BuiltinPkg.Lookup("iota") s.Def = ir.Nod(ir.OIOTA, nil, nil) ir.AsNode(s.Def).SetSym(s) @@ -339,7 +339,7 @@ func finishUniverse() { // that we silently skip symbols that are already declared in the // package block rather than emitting a redeclared symbol error. - for _, s := range ir.BuiltinPkg.Syms { + for _, s := range types.BuiltinPkg.Syms { if s.Def == nil { continue } diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 574c7c470933a..346817e589fe5 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -983,7 +983,7 @@ opswitch: if param == types.Txxx { break } - fn := ir.BasicTypeNames[param] + "to" + ir.BasicTypeNames[result] + fn := types.BasicTypeNames[param] + "to" + types.BasicTypeNames[result] n = conv(mkcall(fn, types.Types[result], init, conv(n.Left(), types.Types[param])), n.Type()) case ir.ODIV, ir.OMOD: diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index 117c7417d2c8e..79d85d1803871 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -10,9 +10,7 @@ import ( "go/constant" "io" "os" - "strconv" - "strings" - "sync" + "unicode/utf8" "cmd/compile/internal/base" @@ -20,74 +18,6 @@ import ( "cmd/internal/src" ) -// Format conversions: -// TODO(gri) verify these; eliminate those not used anymore -// -// %v Op Node opcodes -// Flags: #: print Go syntax (automatic unless mode == FDbg) -// -// %j *Node Node details -// Flags: 0: suppresses things not relevant until walk -// -// %v *Val Constant values -// -// %v *types.Sym Symbols -// %S unqualified identifier in any mode -// Flags: +,- #: mode (see below) -// 0: in export mode: unqualified identifier if exported, qualified if not -// -// %v *types.Type Types -// %S omit "func" and receiver in function types -// %L definition instead of name. -// Flags: +,- #: mode (see below) -// ' ' (only in -/Sym mode) print type identifiers wit package name instead of prefix. -// -// %v *Node Nodes -// %S (only in +/debug mode) suppress recursion -// %L (only in Error mode) print "foo (type Bar)" -// Flags: +,- #: mode (see below) -// -// %v Nodes Node lists -// Flags: those of *Node -// .: separate items with ',' instead of ';' - -// *types.Sym, *types.Type, and *Node types use the flags below to set the format mode - -// The mode flags '+', '-', and '#' are sticky; they persist through -// recursions of *Node, *types.Type, and *types.Sym values. The ' ' flag is -// sticky only on *types.Type recursions and only used in %-/*types.Sym mode. -// -// Example: given a *types.Sym: %+v %#v %-v print an identifier properly qualified for debug/export/internal mode - -// Useful format combinations: -// TODO(gri): verify these -// -// *Node, Nodes: -// %+v multiline recursive debug dump of *Node/Nodes -// %+S non-recursive debug dump -// -// *Node: -// %#v Go format -// %L "foo (type Bar)" for error messages -// -// *types.Type: -// %#v Go format -// %#L type definition instead of name -// %#S omit "func" and receiver in function signature -// -// %-v type identifiers -// %-S type identifiers without "func" and arg names in type signatures (methodsym) -// %- v type identifiers with package name instead of prefix (typesym, dcommontype, typehash) - -type FmtMode int - -const ( - FErr FmtMode = iota - FDbg - FTypeId - FTypeIdName // same as FTypeId, but use package name instead of prefix -) - // Op var OpNames = []string{ @@ -177,584 +107,6 @@ func (o Op) Format(s fmt.State, verb rune) { } } -// Val - -func FmtConst(v constant.Value, sharp bool) string { - if !sharp && v.Kind() == constant.Complex { - real, imag := constant.Real(v), constant.Imag(v) - - var re string - sre := constant.Sign(real) - if sre != 0 { - re = real.String() - } - - var im string - sim := constant.Sign(imag) - if sim != 0 { - im = imag.String() - } - - switch { - case sre == 0 && sim == 0: - return "0" - case sre == 0: - return im + "i" - case sim == 0: - return re - case sim < 0: - return fmt.Sprintf("(%s%si)", re, im) - default: - return fmt.Sprintf("(%s+%si)", re, im) - } - } - - return v.String() -} - -// Sym - -// numImport tracks how often a package with a given name is imported. -// It is used to provide a better error message (by using the package -// path to disambiguate) if a package that appears multiple times with -// the same name appears in an error message. -var NumImport = make(map[string]int) - -// "%S" suppresses qualifying with package -func symFormat(s *types.Sym, f fmt.State, verb rune) { - mode := FErr - switch verb { - case 'v', 'S': - if verb == 'v' && f.Flag('+') { - mode = FDbg - } - fmt.Fprint(f, sconv(s, verb, mode)) - - default: - fmt.Fprintf(f, "%%!%c(*types.Sym=%p)", verb, s) - } -} - -// See #16897 for details about performance implications -// before changing the implementation of sconv. -func sconv(s *types.Sym, verb rune, mode FmtMode) string { - if verb == 'L' { - panic("linksymfmt") - } - - if s == nil { - return "" - } - - if s.Name == "_" { - return "_" - } - buf := fmtBufferPool.Get().(*bytes.Buffer) - buf.Reset() - defer fmtBufferPool.Put(buf) - - symfmt(buf, s, verb, mode) - return types.InternString(buf.Bytes()) -} - -func sconv2(b *bytes.Buffer, s *types.Sym, verb rune, mode FmtMode) { - if verb == 'L' { - panic("linksymfmt") - } - if s == nil { - b.WriteString("") - return - } - if s.Name == "_" { - b.WriteString("_") - return - } - - symfmt(b, s, verb, mode) -} - -func symfmt(b *bytes.Buffer, s *types.Sym, verb rune, mode FmtMode) { - if verb != 'S' { - switch mode { - case FErr: // This is for the user - if s.Pkg == BuiltinPkg || s.Pkg == LocalPkg { - b.WriteString(s.Name) - return - } - - // If the name was used by multiple packages, display the full path, - if s.Pkg.Name != "" && NumImport[s.Pkg.Name] > 1 { - fmt.Fprintf(b, "%q.%s", s.Pkg.Path, s.Name) - return - } - b.WriteString(s.Pkg.Name) - b.WriteByte('.') - b.WriteString(s.Name) - return - - case FDbg: - b.WriteString(s.Pkg.Name) - b.WriteByte('.') - b.WriteString(s.Name) - return - - case FTypeIdName: - // dcommontype, typehash - b.WriteString(s.Pkg.Name) - b.WriteByte('.') - b.WriteString(s.Name) - return - - case FTypeId: - // (methodsym), typesym, weaksym - b.WriteString(s.Pkg.Prefix) - b.WriteByte('.') - b.WriteString(s.Name) - return - } - } - - b.WriteString(s.Name) -} - -func methodSymName(s *types.Sym) string { - // Skip leading "type." in method name - name := s.Name - if i := strings.LastIndex(name, "."); i >= 0 { - name = name[i+1:] - } - return name -} - -// Type - -var BasicTypeNames = []string{ - types.TINT: "int", - types.TUINT: "uint", - types.TINT8: "int8", - types.TUINT8: "uint8", - types.TINT16: "int16", - types.TUINT16: "uint16", - types.TINT32: "int32", - types.TUINT32: "uint32", - types.TINT64: "int64", - types.TUINT64: "uint64", - types.TUINTPTR: "uintptr", - types.TFLOAT32: "float32", - types.TFLOAT64: "float64", - types.TCOMPLEX64: "complex64", - types.TCOMPLEX128: "complex128", - types.TBOOL: "bool", - types.TANY: "any", - types.TSTRING: "string", - types.TNIL: "nil", - types.TIDEAL: "untyped number", - types.TBLANK: "blank", -} - -var fmtBufferPool = sync.Pool{ - New: func() interface{} { - return new(bytes.Buffer) - }, -} - -func InstallTypeFormats() { - types.SymString = func(s *types.Sym) string { - return sconv(s, 0, FErr) - } - types.TypeString = func(t *types.Type) string { - return tconv(t, 0, FErr) - } - types.TypeShortString = func(t *types.Type) string { - return tconv(t, 0, FTypeId) - } - types.TypeLongString = func(t *types.Type) string { - return tconv(t, 0, FTypeIdName) - } - types.FormatSym = symFormat - types.FormatType = typeFormat -} - -// "%L" print definition, not name -// "%S" omit 'func' and receiver from function types, short type names -func typeFormat(t *types.Type, s fmt.State, verb rune) { - mode := FErr - switch verb { - case 'v', 'S', 'L': - if verb == 'v' && s.Flag('+') { // %+v is debug format - mode = FDbg - } - if verb == 'S' && s.Flag('-') { // %-S is special case for receiver - short typeid format - mode = FTypeId - } - fmt.Fprint(s, tconv(t, verb, mode)) - default: - fmt.Fprintf(s, "%%!%c(*Type=%p)", verb, t) - } -} - -func tconv(t *types.Type, verb rune, mode FmtMode) string { - buf := fmtBufferPool.Get().(*bytes.Buffer) - buf.Reset() - defer fmtBufferPool.Put(buf) - - tconv2(buf, t, verb, mode, nil) - return types.InternString(buf.Bytes()) -} - -// tconv2 writes a string representation of t to b. -// flag and mode control exactly what is printed. -// Any types x that are already in the visited map get printed as @%d where %d=visited[x]. -// See #16897 before changing the implementation of tconv. -func tconv2(b *bytes.Buffer, t *types.Type, verb rune, mode FmtMode, visited map[*types.Type]int) { - if off, ok := visited[t]; ok { - // We've seen this type before, so we're trying to print it recursively. - // Print a reference to it instead. - fmt.Fprintf(b, "@%d", off) - return - } - if t == nil { - b.WriteString("") - return - } - if t.Kind() == types.TSSA { - b.WriteString(t.Extra.(string)) - return - } - if t.Kind() == types.TTUPLE { - b.WriteString(t.FieldType(0).String()) - b.WriteByte(',') - b.WriteString(t.FieldType(1).String()) - return - } - - if t.Kind() == types.TRESULTS { - tys := t.Extra.(*types.Results).Types - for i, et := range tys { - if i > 0 { - b.WriteByte(',') - } - b.WriteString(et.String()) - } - return - } - - if t == types.ByteType || t == types.RuneType { - // in %-T mode collapse rune and byte with their originals. - switch mode { - case FTypeIdName, FTypeId: - t = types.Types[t.Kind()] - default: - sconv2(b, t.Sym(), 'S', mode) - return - } - } - if t == types.ErrorType { - b.WriteString("error") - return - } - - // Unless the 'L' flag was specified, if the type has a name, just print that name. - if verb != 'L' && t.Sym() != nil && t != types.Types[t.Kind()] { - switch mode { - case FTypeId, FTypeIdName: - if verb == 'S' { - if t.Vargen != 0 { - sconv2(b, t.Sym(), 'S', mode) - fmt.Fprintf(b, "·%d", t.Vargen) - return - } - sconv2(b, t.Sym(), 'S', mode) - return - } - - if mode == FTypeIdName { - sconv2(b, t.Sym(), 'v', FTypeIdName) - return - } - - if t.Sym().Pkg == LocalPkg && t.Vargen != 0 { - sconv2(b, t.Sym(), 'v', mode) - fmt.Fprintf(b, "·%d", t.Vargen) - return - } - } - - sconv2(b, t.Sym(), 'v', mode) - return - } - - if int(t.Kind()) < len(BasicTypeNames) && BasicTypeNames[t.Kind()] != "" { - var name string - switch t { - case types.UntypedBool: - name = "untyped bool" - case types.UntypedString: - name = "untyped string" - case types.UntypedInt: - name = "untyped int" - case types.UntypedRune: - name = "untyped rune" - case types.UntypedFloat: - name = "untyped float" - case types.UntypedComplex: - name = "untyped complex" - default: - name = BasicTypeNames[t.Kind()] - } - b.WriteString(name) - return - } - - if mode == FDbg { - b.WriteString(t.Kind().String()) - b.WriteByte('-') - tconv2(b, t, 'v', FErr, visited) - return - } - - // At this point, we might call tconv2 recursively. Add the current type to the visited list so we don't - // try to print it recursively. - // We record the offset in the result buffer where the type's text starts. This offset serves as a reference - // point for any later references to the same type. - // Note that we remove the type from the visited map as soon as the recursive call is done. - // This prevents encoding types like map[*int]*int as map[*int]@4. (That encoding would work, - // but I'd like to use the @ notation only when strictly necessary.) - if visited == nil { - visited = map[*types.Type]int{} - } - visited[t] = b.Len() - defer delete(visited, t) - - switch t.Kind() { - case types.TPTR: - b.WriteByte('*') - switch mode { - case FTypeId, FTypeIdName: - if verb == 'S' { - tconv2(b, t.Elem(), 'S', mode, visited) - return - } - } - tconv2(b, t.Elem(), 'v', mode, visited) - - case types.TARRAY: - b.WriteByte('[') - b.WriteString(strconv.FormatInt(t.NumElem(), 10)) - b.WriteByte(']') - tconv2(b, t.Elem(), 0, mode, visited) - - case types.TSLICE: - b.WriteString("[]") - tconv2(b, t.Elem(), 0, mode, visited) - - case types.TCHAN: - switch t.ChanDir() { - case types.Crecv: - b.WriteString("<-chan ") - tconv2(b, t.Elem(), 0, mode, visited) - case types.Csend: - b.WriteString("chan<- ") - tconv2(b, t.Elem(), 0, mode, visited) - default: - b.WriteString("chan ") - if t.Elem() != nil && t.Elem().IsChan() && t.Elem().Sym() == nil && t.Elem().ChanDir() == types.Crecv { - b.WriteByte('(') - tconv2(b, t.Elem(), 0, mode, visited) - b.WriteByte(')') - } else { - tconv2(b, t.Elem(), 0, mode, visited) - } - } - - case types.TMAP: - b.WriteString("map[") - tconv2(b, t.Key(), 0, mode, visited) - b.WriteByte(']') - tconv2(b, t.Elem(), 0, mode, visited) - - case types.TINTER: - if t.IsEmptyInterface() { - b.WriteString("interface {}") - break - } - b.WriteString("interface {") - for i, f := range t.Fields().Slice() { - if i != 0 { - b.WriteByte(';') - } - b.WriteByte(' ') - switch { - case f.Sym == nil: - // Check first that a symbol is defined for this type. - // Wrong interface definitions may have types lacking a symbol. - break - case types.IsExported(f.Sym.Name): - sconv2(b, f.Sym, 'S', mode) - default: - if mode != FTypeIdName { - mode = FTypeId - } - sconv2(b, f.Sym, 'v', mode) - } - tconv2(b, f.Type, 'S', mode, visited) - } - if t.NumFields() != 0 { - b.WriteByte(' ') - } - b.WriteByte('}') - - case types.TFUNC: - if verb == 'S' { - // no leading func - } else { - if t.Recv() != nil { - b.WriteString("method") - tconv2(b, t.Recvs(), 0, mode, visited) - b.WriteByte(' ') - } - b.WriteString("func") - } - tconv2(b, t.Params(), 0, mode, visited) - - switch t.NumResults() { - case 0: - // nothing to do - - case 1: - b.WriteByte(' ') - tconv2(b, t.Results().Field(0).Type, 0, mode, visited) // struct->field->field's type - - default: - b.WriteByte(' ') - tconv2(b, t.Results(), 0, mode, visited) - } - - case types.TSTRUCT: - if m := t.StructType().Map; m != nil { - mt := m.MapType() - // Format the bucket struct for map[x]y as map.bucket[x]y. - // This avoids a recursive print that generates very long names. - switch t { - case mt.Bucket: - b.WriteString("map.bucket[") - case mt.Hmap: - b.WriteString("map.hdr[") - case mt.Hiter: - b.WriteString("map.iter[") - default: - base.Fatalf("unknown internal map type") - } - tconv2(b, m.Key(), 0, mode, visited) - b.WriteByte(']') - tconv2(b, m.Elem(), 0, mode, visited) - break - } - - if funarg := t.StructType().Funarg; funarg != types.FunargNone { - b.WriteByte('(') - fieldVerb := 'v' - switch mode { - case FTypeId, FTypeIdName, FErr: - // no argument names on function signature, and no "noescape"/"nosplit" tags - fieldVerb = 'S' - } - for i, f := range t.Fields().Slice() { - if i != 0 { - b.WriteString(", ") - } - fldconv(b, f, fieldVerb, mode, visited, funarg) - } - b.WriteByte(')') - } else { - b.WriteString("struct {") - for i, f := range t.Fields().Slice() { - if i != 0 { - b.WriteByte(';') - } - b.WriteByte(' ') - fldconv(b, f, 'L', mode, visited, funarg) - } - if t.NumFields() != 0 { - b.WriteByte(' ') - } - b.WriteByte('}') - } - - case types.TFORW: - b.WriteString("undefined") - if t.Sym() != nil { - b.WriteByte(' ') - sconv2(b, t.Sym(), 'v', mode) - } - - case types.TUNSAFEPTR: - b.WriteString("unsafe.Pointer") - - case types.Txxx: - b.WriteString("Txxx") - - default: - // Don't know how to handle - fall back to detailed prints - b.WriteString(t.Kind().String()) - b.WriteString(" <") - sconv2(b, t.Sym(), 'v', mode) - b.WriteString(">") - - } -} - -func fldconv(b *bytes.Buffer, f *types.Field, verb rune, mode FmtMode, visited map[*types.Type]int, funarg types.Funarg) { - if f == nil { - b.WriteString("") - return - } - - var name string - if verb != 'S' { - s := f.Sym - - // Take the name from the original. - if mode == FErr { - s = OrigSym(s) - } - - if s != nil && f.Embedded == 0 { - if funarg != types.FunargNone { - name = fmt.Sprint(f.Nname) - } else if verb == 'L' { - name = methodSymName(s) - if !types.IsExported(name) && mode != FTypeIdName { - name = sconv(s, 0, mode) // qualify non-exported names (used on structs, not on funarg) - } - } else { - name = sconv(s, 0, mode) - } - } - } - - if name != "" { - b.WriteString(name) - b.WriteString(" ") - } - - if f.IsDDD() { - var et *types.Type - if f.Type != nil { - et = f.Type.Elem() - } - b.WriteString("...") - tconv2(b, et, 0, mode, visited) - } else { - tconv2(b, f.Type, 0, mode, visited) - } - - if verb != 'S' && funarg == types.FunargNone && f.Note != "" { - b.WriteString(" ") - b.WriteString(strconv.Quote(f.Note)) - } -} - // Node func FmtNode(n Node, s fmt.State, verb rune) { @@ -1198,7 +550,7 @@ func exprFmt(n Node, s fmt.State, prec int) { fmt.Fprintf(s, "'\\U%08x'", uint64(x)) } } else { - fmt.Fprint(s, FmtConst(n.Val(), s.Flag('#'))) + fmt.Fprint(s, types.FmtConst(n.Val(), s.Flag('#'))) } if needUnparen { @@ -1338,7 +690,7 @@ func exprFmt(n Node, s fmt.State, prec int) { fmt.Fprint(s, ".") return } - fmt.Fprintf(s, ".%s", methodSymName(n.Sym())) + fmt.Fprintf(s, ".%s", types.SymMethodName(n.Sym())) case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH: exprFmt(n.Left(), s, nprec) @@ -1346,7 +698,7 @@ func exprFmt(n Node, s fmt.State, prec int) { fmt.Fprint(s, ".") return } - fmt.Fprintf(s, ".%s", methodSymName(n.Sym())) + fmt.Fprintf(s, ".%s", types.SymMethodName(n.Sym())) case ODOTTYPE, ODOTTYPE2: exprFmt(n.Left(), s, nprec) diff --git a/src/cmd/compile/internal/ir/ir.go b/src/cmd/compile/internal/ir/ir.go index ad7f692b07782..82224ca2ed835 100644 --- a/src/cmd/compile/internal/ir/ir.go +++ b/src/cmd/compile/internal/ir/ir.go @@ -3,10 +3,3 @@ // license that can be found in the LICENSE file. package ir - -import "cmd/compile/internal/types" - -var LocalPkg *types.Pkg // package being compiled - -// builtinpkg is a fake package that declares the universe block. -var BuiltinPkg *types.Pkg diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 56b320e726482..ba7eaae1b9012 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -10,7 +10,6 @@ import ( "fmt" "go/constant" "sort" - "strings" "cmd/compile/internal/base" "cmd/compile/internal/types" @@ -654,33 +653,6 @@ func AsNode(n types.Object) Node { var BlankNode Node -var BlankSym *types.Sym - -// origSym returns the original symbol written by the user. -func OrigSym(s *types.Sym) *types.Sym { - if s == nil { - return nil - } - - if len(s.Name) > 1 && s.Name[0] == '~' { - switch s.Name[1] { - case 'r': // originally an unnamed result - return nil - case 'b': // originally the blank identifier _ - // TODO(mdempsky): Does s.Pkg matter here? - return BlankSym - } - return s - } - - if strings.HasPrefix(s.Name, ".anon") { - // originally an unnamed or _ name (see subr.go: structargs) - return nil - } - - return s -} - func IsConst(n Node, ct constant.Kind) bool { return ConstType(n) == ct } diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go index cb3b9c0e2a0ea..decb843465e54 100644 --- a/src/cmd/compile/internal/ssa/export_test.go +++ b/src/cmd/compile/internal/ssa/export_test.go @@ -137,7 +137,7 @@ func init() { // Initialize just enough of the universe and the types package to make our tests function. // TODO(josharian): move universe initialization to the types package, // so this test setup can share it. - ir.InstallTypeFormats() + types.InstallTypeFormats() types.Dowidth = func(t *types.Type) {} for _, typ := range [...]struct { diff --git a/src/cmd/compile/internal/types/fmt.go b/src/cmd/compile/internal/types/fmt.go new file mode 100644 index 0000000000000..4f36e4c3933b1 --- /dev/null +++ b/src/cmd/compile/internal/types/fmt.go @@ -0,0 +1,694 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types + +import ( + "bytes" + "fmt" + "go/constant" + "strconv" + "strings" + "sync" + + "cmd/compile/internal/base" +) + +// builtinpkg is a fake package that declares the universe block. +var BuiltinPkg *Pkg + +var LocalPkg *Pkg // package being compiled + +var BlankSym *Sym + +// origSym returns the original symbol written by the user. +func OrigSym(s *Sym) *Sym { + if s == nil { + return nil + } + + if len(s.Name) > 1 && s.Name[0] == '~' { + switch s.Name[1] { + case 'r': // originally an unnamed result + return nil + case 'b': // originally the blank identifier _ + // TODO(mdempsky): Does s.Pkg matter here? + return BlankSym + } + return s + } + + if strings.HasPrefix(s.Name, ".anon") { + // originally an unnamed or _ name (see subr.go: structargs) + return nil + } + + return s +} + +// Sym + +// numImport tracks how often a package with a given name is imported. +// It is used to provide a better error message (by using the package +// path to disambiguate) if a package that appears multiple times with +// the same name appears in an error message. +var NumImport = make(map[string]int) + +// Format conversions: +// TODO(gri) verify these; eliminate those not used anymore +// +// %v Op Node opcodes +// Flags: #: print Go syntax (automatic unless mode == FDbg) +// +// %j *Node Node details +// Flags: 0: suppresses things not relevant until walk +// +// %v *Val Constant values +// +// %v *types.Sym Symbols +// %S unqualified identifier in any mode +// Flags: +,- #: mode (see below) +// 0: in export mode: unqualified identifier if exported, qualified if not +// +// %v *types.Type Types +// %S omit "func" and receiver in function types +// %L definition instead of name. +// Flags: +,- #: mode (see below) +// ' ' (only in -/Sym mode) print type identifiers wit package name instead of prefix. +// +// %v *Node Nodes +// %S (only in +/debug mode) suppress recursion +// %L (only in Error mode) print "foo (type Bar)" +// Flags: +,- #: mode (see below) +// +// %v Nodes Node lists +// Flags: those of *Node +// .: separate items with ',' instead of ';' + +// *types.Sym, *types.Type, and *Node types use the flags below to set the format mode + +// The mode flags '+', '-', and '#' are sticky; they persist through +// recursions of *Node, *types.Type, and *types.Sym values. The ' ' flag is +// sticky only on *types.Type recursions and only used in %-/*types.Sym mode. +// +// Example: given a *types.Sym: %+v %#v %-v print an identifier properly qualified for debug/export/internal mode + +// Useful format combinations: +// TODO(gri): verify these +// +// *Node, Nodes: +// %+v multiline recursive debug dump of *Node/Nodes +// %+S non-recursive debug dump +// +// *Node: +// %#v Go format +// %L "foo (type Bar)" for error messages +// +// *types.Type: +// %#v Go format +// %#L type definition instead of name +// %#S omit "func" and receiver in function signature +// +// %-v type identifiers +// %-S type identifiers without "func" and arg names in type signatures (methodsym) +// %- v type identifiers with package name instead of prefix (typesym, dcommontype, typehash) + +type fmtMode int + +const ( + fmtGo fmtMode = iota + fmtDebug + fmtTypeID + fmtTypeIDName // same as FTypeId, but use package name instead of prefix +) + +// "%S" suppresses qualifying with package +func symFormat(s *Sym, f fmt.State, verb rune) { + mode := fmtGo + switch verb { + case 'v', 'S': + if verb == 'v' && f.Flag('+') { + mode = fmtDebug + } + fmt.Fprint(f, sconv(s, verb, mode)) + + default: + fmt.Fprintf(f, "%%!%c(*types.Sym=%p)", verb, s) + } +} + +// See #16897 for details about performance implications +// before changing the implementation of sconv. +func sconv(s *Sym, verb rune, mode fmtMode) string { + if verb == 'L' { + panic("linksymfmt") + } + + if s == nil { + return "" + } + + if s.Name == "_" { + return "_" + } + buf := fmtBufferPool.Get().(*bytes.Buffer) + buf.Reset() + defer fmtBufferPool.Put(buf) + + symfmt(buf, s, verb, mode) + return InternString(buf.Bytes()) +} + +func sconv2(b *bytes.Buffer, s *Sym, verb rune, mode fmtMode) { + if verb == 'L' { + panic("linksymfmt") + } + if s == nil { + b.WriteString("") + return + } + if s.Name == "_" { + b.WriteString("_") + return + } + + symfmt(b, s, verb, mode) +} + +func symfmt(b *bytes.Buffer, s *Sym, verb rune, mode fmtMode) { + if verb != 'S' { + switch mode { + case fmtGo: // This is for the user + if s.Pkg == BuiltinPkg || s.Pkg == LocalPkg { + b.WriteString(s.Name) + return + } + + // If the name was used by multiple packages, display the full path, + if s.Pkg.Name != "" && NumImport[s.Pkg.Name] > 1 { + fmt.Fprintf(b, "%q.%s", s.Pkg.Path, s.Name) + return + } + b.WriteString(s.Pkg.Name) + b.WriteByte('.') + b.WriteString(s.Name) + return + + case fmtDebug: + b.WriteString(s.Pkg.Name) + b.WriteByte('.') + b.WriteString(s.Name) + return + + case fmtTypeIDName: + // dcommontype, typehash + b.WriteString(s.Pkg.Name) + b.WriteByte('.') + b.WriteString(s.Name) + return + + case fmtTypeID: + // (methodsym), typesym, weaksym + b.WriteString(s.Pkg.Prefix) + b.WriteByte('.') + b.WriteString(s.Name) + return + } + } + + b.WriteString(s.Name) +} + +func SymMethodName(s *Sym) string { + // Skip leading "type." in method name + name := s.Name + if i := strings.LastIndex(name, "."); i >= 0 { + name = name[i+1:] + } + return name +} + +// Type + +var BasicTypeNames = []string{ + TINT: "int", + TUINT: "uint", + TINT8: "int8", + TUINT8: "uint8", + TINT16: "int16", + TUINT16: "uint16", + TINT32: "int32", + TUINT32: "uint32", + TINT64: "int64", + TUINT64: "uint64", + TUINTPTR: "uintptr", + TFLOAT32: "float32", + TFLOAT64: "float64", + TCOMPLEX64: "complex64", + TCOMPLEX128: "complex128", + TBOOL: "bool", + TANY: "any", + TSTRING: "string", + TNIL: "nil", + TIDEAL: "untyped number", + TBLANK: "blank", +} + +var fmtBufferPool = sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, +} + +func InstallTypeFormats() { + SymString = func(s *Sym) string { + return sconv(s, 0, fmtGo) + } + TypeString = func(t *Type) string { + return tconv(t, 0, fmtGo) + } + TypeShortString = func(t *Type) string { + return tconv(t, 0, fmtTypeID) + } + TypeLongString = func(t *Type) string { + return tconv(t, 0, fmtTypeIDName) + } + FormatSym = symFormat + FormatType = typeFormat +} + +// "%L" print definition, not name +// "%S" omit 'func' and receiver from function types, short type names +func typeFormat(t *Type, s fmt.State, verb rune) { + mode := fmtGo + switch verb { + case 'v', 'S', 'L': + if verb == 'v' && s.Flag('+') { // %+v is debug format + mode = fmtDebug + } + if verb == 'S' && s.Flag('-') { // %-S is special case for receiver - short typeid format + mode = fmtTypeID + } + fmt.Fprint(s, tconv(t, verb, mode)) + default: + fmt.Fprintf(s, "%%!%c(*Type=%p)", verb, t) + } +} + +func tconv(t *Type, verb rune, mode fmtMode) string { + buf := fmtBufferPool.Get().(*bytes.Buffer) + buf.Reset() + defer fmtBufferPool.Put(buf) + + tconv2(buf, t, verb, mode, nil) + return InternString(buf.Bytes()) +} + +// tconv2 writes a string representation of t to b. +// flag and mode control exactly what is printed. +// Any types x that are already in the visited map get printed as @%d where %d=visited[x]. +// See #16897 before changing the implementation of tconv. +func tconv2(b *bytes.Buffer, t *Type, verb rune, mode fmtMode, visited map[*Type]int) { + if off, ok := visited[t]; ok { + // We've seen this type before, so we're trying to print it recursively. + // Print a reference to it instead. + fmt.Fprintf(b, "@%d", off) + return + } + if t == nil { + b.WriteString("") + return + } + if t.Kind() == TSSA { + b.WriteString(t.Extra.(string)) + return + } + if t.Kind() == TTUPLE { + b.WriteString(t.FieldType(0).String()) + b.WriteByte(',') + b.WriteString(t.FieldType(1).String()) + return + } + + if t.Kind() == TRESULTS { + tys := t.Extra.(*Results).Types + for i, et := range tys { + if i > 0 { + b.WriteByte(',') + } + b.WriteString(et.String()) + } + return + } + + if t == ByteType || t == RuneType { + // in %-T mode collapse rune and byte with their originals. + switch mode { + case fmtTypeIDName, fmtTypeID: + t = Types[t.Kind()] + default: + sconv2(b, t.Sym(), 'S', mode) + return + } + } + if t == ErrorType { + b.WriteString("error") + return + } + + // Unless the 'L' flag was specified, if the type has a name, just print that name. + if verb != 'L' && t.Sym() != nil && t != Types[t.Kind()] { + switch mode { + case fmtTypeID, fmtTypeIDName: + if verb == 'S' { + if t.Vargen != 0 { + sconv2(b, t.Sym(), 'S', mode) + fmt.Fprintf(b, "·%d", t.Vargen) + return + } + sconv2(b, t.Sym(), 'S', mode) + return + } + + if mode == fmtTypeIDName { + sconv2(b, t.Sym(), 'v', fmtTypeIDName) + return + } + + if t.Sym().Pkg == LocalPkg && t.Vargen != 0 { + sconv2(b, t.Sym(), 'v', mode) + fmt.Fprintf(b, "·%d", t.Vargen) + return + } + } + + sconv2(b, t.Sym(), 'v', mode) + return + } + + if int(t.Kind()) < len(BasicTypeNames) && BasicTypeNames[t.Kind()] != "" { + var name string + switch t { + case UntypedBool: + name = "untyped bool" + case UntypedString: + name = "untyped string" + case UntypedInt: + name = "untyped int" + case UntypedRune: + name = "untyped rune" + case UntypedFloat: + name = "untyped float" + case UntypedComplex: + name = "untyped complex" + default: + name = BasicTypeNames[t.Kind()] + } + b.WriteString(name) + return + } + + if mode == fmtDebug { + b.WriteString(t.Kind().String()) + b.WriteByte('-') + tconv2(b, t, 'v', fmtGo, visited) + return + } + + // At this point, we might call tconv2 recursively. Add the current type to the visited list so we don't + // try to print it recursively. + // We record the offset in the result buffer where the type's text starts. This offset serves as a reference + // point for any later references to the same type. + // Note that we remove the type from the visited map as soon as the recursive call is done. + // This prevents encoding types like map[*int]*int as map[*int]@4. (That encoding would work, + // but I'd like to use the @ notation only when strictly necessary.) + if visited == nil { + visited = map[*Type]int{} + } + visited[t] = b.Len() + defer delete(visited, t) + + switch t.Kind() { + case TPTR: + b.WriteByte('*') + switch mode { + case fmtTypeID, fmtTypeIDName: + if verb == 'S' { + tconv2(b, t.Elem(), 'S', mode, visited) + return + } + } + tconv2(b, t.Elem(), 'v', mode, visited) + + case TARRAY: + b.WriteByte('[') + b.WriteString(strconv.FormatInt(t.NumElem(), 10)) + b.WriteByte(']') + tconv2(b, t.Elem(), 0, mode, visited) + + case TSLICE: + b.WriteString("[]") + tconv2(b, t.Elem(), 0, mode, visited) + + case TCHAN: + switch t.ChanDir() { + case Crecv: + b.WriteString("<-chan ") + tconv2(b, t.Elem(), 0, mode, visited) + case Csend: + b.WriteString("chan<- ") + tconv2(b, t.Elem(), 0, mode, visited) + default: + b.WriteString("chan ") + if t.Elem() != nil && t.Elem().IsChan() && t.Elem().Sym() == nil && t.Elem().ChanDir() == Crecv { + b.WriteByte('(') + tconv2(b, t.Elem(), 0, mode, visited) + b.WriteByte(')') + } else { + tconv2(b, t.Elem(), 0, mode, visited) + } + } + + case TMAP: + b.WriteString("map[") + tconv2(b, t.Key(), 0, mode, visited) + b.WriteByte(']') + tconv2(b, t.Elem(), 0, mode, visited) + + case TINTER: + if t.IsEmptyInterface() { + b.WriteString("interface {}") + break + } + b.WriteString("interface {") + for i, f := range t.Fields().Slice() { + if i != 0 { + b.WriteByte(';') + } + b.WriteByte(' ') + switch { + case f.Sym == nil: + // Check first that a symbol is defined for this type. + // Wrong interface definitions may have types lacking a symbol. + break + case IsExported(f.Sym.Name): + sconv2(b, f.Sym, 'S', mode) + default: + if mode != fmtTypeIDName { + mode = fmtTypeID + } + sconv2(b, f.Sym, 'v', mode) + } + tconv2(b, f.Type, 'S', mode, visited) + } + if t.NumFields() != 0 { + b.WriteByte(' ') + } + b.WriteByte('}') + + case TFUNC: + if verb == 'S' { + // no leading func + } else { + if t.Recv() != nil { + b.WriteString("method") + tconv2(b, t.Recvs(), 0, mode, visited) + b.WriteByte(' ') + } + b.WriteString("func") + } + tconv2(b, t.Params(), 0, mode, visited) + + switch t.NumResults() { + case 0: + // nothing to do + + case 1: + b.WriteByte(' ') + tconv2(b, t.Results().Field(0).Type, 0, mode, visited) // struct->field->field's type + + default: + b.WriteByte(' ') + tconv2(b, t.Results(), 0, mode, visited) + } + + case TSTRUCT: + if m := t.StructType().Map; m != nil { + mt := m.MapType() + // Format the bucket struct for map[x]y as map.bucket[x]y. + // This avoids a recursive print that generates very long names. + switch t { + case mt.Bucket: + b.WriteString("map.bucket[") + case mt.Hmap: + b.WriteString("map.hdr[") + case mt.Hiter: + b.WriteString("map.iter[") + default: + base.Fatalf("unknown internal map type") + } + tconv2(b, m.Key(), 0, mode, visited) + b.WriteByte(']') + tconv2(b, m.Elem(), 0, mode, visited) + break + } + + if funarg := t.StructType().Funarg; funarg != FunargNone { + b.WriteByte('(') + fieldVerb := 'v' + switch mode { + case fmtTypeID, fmtTypeIDName, fmtGo: + // no argument names on function signature, and no "noescape"/"nosplit" tags + fieldVerb = 'S' + } + for i, f := range t.Fields().Slice() { + if i != 0 { + b.WriteString(", ") + } + fldconv(b, f, fieldVerb, mode, visited, funarg) + } + b.WriteByte(')') + } else { + b.WriteString("struct {") + for i, f := range t.Fields().Slice() { + if i != 0 { + b.WriteByte(';') + } + b.WriteByte(' ') + fldconv(b, f, 'L', mode, visited, funarg) + } + if t.NumFields() != 0 { + b.WriteByte(' ') + } + b.WriteByte('}') + } + + case TFORW: + b.WriteString("undefined") + if t.Sym() != nil { + b.WriteByte(' ') + sconv2(b, t.Sym(), 'v', mode) + } + + case TUNSAFEPTR: + b.WriteString("unsafe.Pointer") + + case Txxx: + b.WriteString("Txxx") + + default: + // Don't know how to handle - fall back to detailed prints + b.WriteString(t.Kind().String()) + b.WriteString(" <") + sconv2(b, t.Sym(), 'v', mode) + b.WriteString(">") + + } +} + +func fldconv(b *bytes.Buffer, f *Field, verb rune, mode fmtMode, visited map[*Type]int, funarg Funarg) { + if f == nil { + b.WriteString("") + return + } + + var name string + if verb != 'S' { + s := f.Sym + + // Take the name from the original. + if mode == fmtGo { + s = OrigSym(s) + } + + if s != nil && f.Embedded == 0 { + if funarg != FunargNone { + name = fmt.Sprint(f.Nname) + } else if verb == 'L' { + name = SymMethodName(s) + if !IsExported(name) && mode != fmtTypeIDName { + name = sconv(s, 0, mode) // qualify non-exported names (used on structs, not on funarg) + } + } else { + name = sconv(s, 0, mode) + } + } + } + + if name != "" { + b.WriteString(name) + b.WriteString(" ") + } + + if f.IsDDD() { + var et *Type + if f.Type != nil { + et = f.Type.Elem() + } + b.WriteString("...") + tconv2(b, et, 0, mode, visited) + } else { + tconv2(b, f.Type, 0, mode, visited) + } + + if verb != 'S' && funarg == FunargNone && f.Note != "" { + b.WriteString(" ") + b.WriteString(strconv.Quote(f.Note)) + } +} + +// Val + +func FmtConst(v constant.Value, sharp bool) string { + if !sharp && v.Kind() == constant.Complex { + real, imag := constant.Real(v), constant.Imag(v) + + var re string + sre := constant.Sign(real) + if sre != 0 { + re = real.String() + } + + var im string + sim := constant.Sign(imag) + if sim != 0 { + im = imag.String() + } + + switch { + case sre == 0 && sim == 0: + return "0" + case sre == 0: + return im + "i" + case sim == 0: + return re + case sim < 0: + return fmt.Sprintf("(%s%si)", re, im) + default: + return fmt.Sprintf("(%s+%si)", re, im) + } + } + + return v.String() +} From 6ea2b8c54cbc2d3a03d5dd174bc7526d33459d37 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sun, 6 Dec 2020 14:15:09 -0500 Subject: [PATCH 135/474] [dev.regabi] cmd/compile: clean up and document formatting Some cleanup left over from moving the Type and Sym formatting to types. And then document what the type formats are, now that it's clear. Passes buildall w/ toolstash -cmp. Change-Id: I35cb8978f1627db1056cb8ab343ce6ba6c99afad Reviewed-on: https://go-review.googlesource.com/c/go/+/275780 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/main.go | 1 - src/cmd/compile/internal/ir/fmt.go | 35 ++++- src/cmd/compile/internal/ssa/export_test.go | 1 - src/cmd/compile/internal/types/fmt.go | 141 ++++++++------------ src/cmd/compile/internal/types/utils.go | 45 +------ 5 files changed, 87 insertions(+), 136 deletions(-) diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 15659dc7fd2f8..503dc449d3ea5 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -212,7 +212,6 @@ func Main(archInit func(*Arch)) { // would lead to import cycles) types.Widthptr = Widthptr types.Dowidth = dowidth - types.InstallTypeFormats() types.TypeLinkSym = func(t *types.Type) *obj.LSym { return typenamesym(t).Linksym() } diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index 79d85d1803871..85c6b218e2a32 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -86,6 +86,7 @@ var OpNames = []string{ OXOR: "^", } +// GoString returns the Go syntax for the Op, or else its name. func (o Op) GoString() string { if int(o) < len(OpNames) && OpNames[o] != "" { return OpNames[o] @@ -93,6 +94,12 @@ func (o Op) GoString() string { return o.String() } +// Format implements formatting for an Op. +// The valid formats are: +// +// %v Go syntax ("+", "<-", "print") +// %+v Debug syntax ("ADD", "RECV", "PRINT") +// func (o Op) Format(s fmt.State, verb rune) { switch verb { default: @@ -109,6 +116,14 @@ func (o Op) Format(s fmt.State, verb rune) { // Node +// FmtNode implements formatting for a Node n. +// Every Node implementation must define a Format method that calls FmtNode. +// The valid formats are: +// +// %v Go syntax +// %L Go syntax followed by " (type T)" if type is known. +// %+v Debug syntax, as in Dump. +// func FmtNode(n Node, s fmt.State, verb rune) { // TODO(rsc): Remove uses of %#v, which behaves just like %v. // TODO(rsc): Remove uses of %S, which behaves just like %v. @@ -276,7 +291,7 @@ var OpPrec = []int{ OEND: 0, } -// Statements which may be rendered with a simplestmt as init. +// StmtWithInit reports whether op is a statement with an explicit init list. func StmtWithInit(op Op) bool { switch op { case OIF, OFOR, OFORUNTIL, OSWITCH: @@ -869,6 +884,13 @@ func ellipsisIf(b bool) string { // Nodes +// Format implements formatting for a Nodes. +// The valid formats are: +// +// %v Go syntax, semicolon-separated +// %.v Go syntax, comma-separated +// %+v Debug syntax, as in DumpList. +// func (l Nodes) Format(s fmt.State, verb rune) { if s.Flag('+') && verb == 'v' { // %+v is DumpList output @@ -896,19 +918,22 @@ func (l Nodes) Format(s fmt.State, verb rune) { // Dump +// Dump prints the message s followed by a debug dump of n. func Dump(s string, n Node) { fmt.Printf("%s [%p]%+v", s, n, n) } -func DumpList(s string, l Nodes) { +// DumpList prints the message s followed by a debug dump of each node in the list. +func DumpList(s string, list Nodes) { var buf bytes.Buffer - FDumpList(&buf, s, l) + FDumpList(&buf, s, list) os.Stdout.Write(buf.Bytes()) } -func FDumpList(w io.Writer, s string, l Nodes) { +// FDumpList prints to w the message s followed by a debug dump of each node in the list. +func FDumpList(w io.Writer, s string, list Nodes) { io.WriteString(w, s) - dumpNodes(w, l, 1) + dumpNodes(w, list, 1) io.WriteString(w, "\n") } diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go index decb843465e54..55fce31088008 100644 --- a/src/cmd/compile/internal/ssa/export_test.go +++ b/src/cmd/compile/internal/ssa/export_test.go @@ -137,7 +137,6 @@ func init() { // Initialize just enough of the universe and the types package to make our tests function. // TODO(josharian): move universe initialization to the types package, // so this test setup can share it. - types.InstallTypeFormats() types.Dowidth = func(t *types.Type) {} for _, typ := range [...]struct { diff --git a/src/cmd/compile/internal/types/fmt.go b/src/cmd/compile/internal/types/fmt.go index 4f36e4c3933b1..d63f7a4f8d4a1 100644 --- a/src/cmd/compile/internal/types/fmt.go +++ b/src/cmd/compile/internal/types/fmt.go @@ -15,14 +15,16 @@ import ( "cmd/compile/internal/base" ) -// builtinpkg is a fake package that declares the universe block. +// BuiltinPkg is a fake package that declares the universe block. var BuiltinPkg *Pkg -var LocalPkg *Pkg // package being compiled +// LocalPkg is the package being compiled. +var LocalPkg *Pkg +// BlankSym is the blank (_) symbol. var BlankSym *Sym -// origSym returns the original symbol written by the user. +// OrigSym returns the original symbol written by the user. func OrigSym(s *Sym) *Sym { if s == nil { return nil @@ -47,84 +49,36 @@ func OrigSym(s *Sym) *Sym { return s } -// Sym - // numImport tracks how often a package with a given name is imported. // It is used to provide a better error message (by using the package // path to disambiguate) if a package that appears multiple times with // the same name appears in an error message. var NumImport = make(map[string]int) -// Format conversions: -// TODO(gri) verify these; eliminate those not used anymore -// -// %v Op Node opcodes -// Flags: #: print Go syntax (automatic unless mode == FDbg) -// -// %j *Node Node details -// Flags: 0: suppresses things not relevant until walk -// -// %v *Val Constant values -// -// %v *types.Sym Symbols -// %S unqualified identifier in any mode -// Flags: +,- #: mode (see below) -// 0: in export mode: unqualified identifier if exported, qualified if not -// -// %v *types.Type Types -// %S omit "func" and receiver in function types -// %L definition instead of name. -// Flags: +,- #: mode (see below) -// ' ' (only in -/Sym mode) print type identifiers wit package name instead of prefix. -// -// %v *Node Nodes -// %S (only in +/debug mode) suppress recursion -// %L (only in Error mode) print "foo (type Bar)" -// Flags: +,- #: mode (see below) -// -// %v Nodes Node lists -// Flags: those of *Node -// .: separate items with ',' instead of ';' - -// *types.Sym, *types.Type, and *Node types use the flags below to set the format mode - -// The mode flags '+', '-', and '#' are sticky; they persist through -// recursions of *Node, *types.Type, and *types.Sym values. The ' ' flag is -// sticky only on *types.Type recursions and only used in %-/*types.Sym mode. -// -// Example: given a *types.Sym: %+v %#v %-v print an identifier properly qualified for debug/export/internal mode - -// Useful format combinations: -// TODO(gri): verify these -// -// *Node, Nodes: -// %+v multiline recursive debug dump of *Node/Nodes -// %+S non-recursive debug dump -// -// *Node: -// %#v Go format -// %L "foo (type Bar)" for error messages -// -// *types.Type: -// %#v Go format -// %#L type definition instead of name -// %#S omit "func" and receiver in function signature -// -// %-v type identifiers -// %-S type identifiers without "func" and arg names in type signatures (methodsym) -// %- v type identifiers with package name instead of prefix (typesym, dcommontype, typehash) - +// fmtMode represents the kind of printing being done. +// The default is regular Go syntax (fmtGo). +// fmtDebug is like fmtGo but for debugging dumps and prints the type kind too. +// fmtTypeID and fmtTypeIDName are for generating various unique representations +// of types used in hashes and the linker. type fmtMode int const ( fmtGo fmtMode = iota fmtDebug fmtTypeID - fmtTypeIDName // same as FTypeId, but use package name instead of prefix + fmtTypeIDName ) -// "%S" suppresses qualifying with package -func symFormat(s *Sym, f fmt.State, verb rune) { +// Sym + +// Format implements formatting for a Sym. +// The valid formats are: +// +// %v Go syntax: Name for symbols in the local package, PkgName.Name for imported symbols. +// %+v Debug syntax: always include PkgName. prefix even for local names. +// %S Short syntax: Name only, no matter what. +// +func (s *Sym) Format(f fmt.State, verb rune) { mode := fmtGo switch verb { case 'v', 'S': @@ -138,6 +92,10 @@ func symFormat(s *Sym, f fmt.State, verb rune) { } } +func (s *Sym) String() string { + return sconv(s, 0, fmtGo) +} + // See #16897 for details about performance implications // before changing the implementation of sconv. func sconv(s *Sym, verb rune, mode fmtMode) string { @@ -261,26 +219,16 @@ var fmtBufferPool = sync.Pool{ }, } -func InstallTypeFormats() { - SymString = func(s *Sym) string { - return sconv(s, 0, fmtGo) - } - TypeString = func(t *Type) string { - return tconv(t, 0, fmtGo) - } - TypeShortString = func(t *Type) string { - return tconv(t, 0, fmtTypeID) - } - TypeLongString = func(t *Type) string { - return tconv(t, 0, fmtTypeIDName) - } - FormatSym = symFormat - FormatType = typeFormat -} - -// "%L" print definition, not name -// "%S" omit 'func' and receiver from function types, short type names -func typeFormat(t *Type, s fmt.State, verb rune) { +// Format implements formatting for a Type. +// The valid formats are: +// +// %v Go syntax +// %+v Debug syntax: Go syntax with a KIND- prefix for all but builtins. +// %L Go syntax for underlying type if t is named +// %S short Go syntax: drop leading "func" in function type +// %-S special case for method receiver symbol +// +func (t *Type) Format(s fmt.State, verb rune) { mode := fmtGo switch verb { case 'v', 'S', 'L': @@ -296,6 +244,25 @@ func typeFormat(t *Type, s fmt.State, verb rune) { } } +// String returns the Go syntax for the type t. +func (t *Type) String() string { + return tconv(t, 0, fmtGo) +} + +// ShortString generates a short description of t. +// It is used in autogenerated method names, reflection, +// and itab names. +func (t *Type) ShortString() string { + return tconv(t, 0, fmtTypeID) +} + +// LongString generates a complete description of t. +// It is useful for reflection, +// or when a unique fingerprint or hash of a type is required. +func (t *Type) LongString() string { + return tconv(t, 0, fmtTypeIDName) +} + func tconv(t *Type, verb rune, mode fmtMode) string { buf := fmtBufferPool.Get().(*bytes.Buffer) buf.Reset() diff --git a/src/cmd/compile/internal/types/utils.go b/src/cmd/compile/internal/types/utils.go index a1be77eef1040..531f3ea1cab9e 100644 --- a/src/cmd/compile/internal/types/utils.go +++ b/src/cmd/compile/internal/types/utils.go @@ -6,7 +6,6 @@ package types import ( "cmd/internal/obj" - "fmt" ) const BADWIDTH = -1000000000 @@ -15,49 +14,11 @@ const BADWIDTH = -1000000000 // They are here to break import cycles. // TODO(gri) eliminate these dependencies. var ( - Widthptr int - Dowidth func(*Type) - SymString func(*Sym) string - TypeString func(*Type) string - TypeShortString func(*Type) string - TypeLongString func(*Type) string - FormatSym func(*Sym, fmt.State, rune) - FormatType func(*Type, fmt.State, rune) - TypeLinkSym func(*Type) *obj.LSym + Widthptr int + Dowidth func(*Type) + TypeLinkSym func(*Type) *obj.LSym ) -func (s *Sym) String() string { - return SymString(s) -} - -func (sym *Sym) Format(s fmt.State, verb rune) { - FormatSym(sym, s, verb) -} - -func (t *Type) String() string { - // The implementation - // must handle recursive types correctly. - return TypeString(t) -} - -// ShortString generates a short description of t. -// It is used in autogenerated method names, reflection, -// and itab names. -func (t *Type) ShortString() string { - return TypeShortString(t) -} - -// LongString generates a complete description of t. -// It is useful for reflection, -// or when a unique fingerprint or hash of a type is required. -func (t *Type) LongString() string { - return TypeLongString(t) -} - -func (t *Type) Format(s fmt.State, verb rune) { - FormatType(t, s, verb) -} - type bitset8 uint8 func (f *bitset8) set(mask uint8, b bool) { From 61889ba68098fa0e79e0b182f3b8c38b69c9b36c Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sun, 6 Dec 2020 14:33:06 -0500 Subject: [PATCH 136/474] [dev.regabi] cmd/compile: simplify fmtmap The format map is going to keep growing as we add more use of concrete node types. Stop that by reporting all Node implementations as Node. Also, there's little point to reporting uses of %v, %p, %T, nor to reporting formatting of basic types like int and []byte. Remove those too. (Vet takes care of mistakes involving basic types now.) Passes buildall w/ toolstash -cmp. Change-Id: Ia9fb39b401c29bf0c76ffebaa24836c70acd773f Reviewed-on: https://go-review.googlesource.com/c/go/+/275781 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/fmt_test.go | 18 ++- src/cmd/compile/fmtmap_test.go | 242 ++++++++------------------------- 2 files changed, 72 insertions(+), 188 deletions(-) diff --git a/src/cmd/compile/fmt_test.go b/src/cmd/compile/fmt_test.go index 6625ccf5e24a8..6398a84f8f07f 100644 --- a/src/cmd/compile/fmt_test.go +++ b/src/cmd/compile/fmt_test.go @@ -125,6 +125,12 @@ func TestFormats(t *testing.T) { typ := p.types[index] format := typ + " " + in // e.g., "*Node %n" + // Do not bother reporting basic types, nor %v, %T, %p. + // Vet handles basic types, and those three formats apply to all types. + if !strings.Contains(typ, ".") || (in == "%v" || in == "%T" || in == "%p") { + return in + } + // check if format is known out, known := knownFormats[format] @@ -413,7 +419,17 @@ func nodeString(n ast.Node) string { // typeString returns a string representation of n. func typeString(typ types.Type) string { - return filepath.ToSlash(typ.String()) + s := filepath.ToSlash(typ.String()) + + // Report all the concrete IR types as Node, to shorten fmtmap. + const ir = "cmd/compile/internal/ir." + if s == "*"+ir+"Name" || s == "*"+ir+"Func" || s == "*"+ir+"Decl" || + s == ir+"Ntype" || s == ir+"Expr" || s == ir+"Stmt" || + strings.HasPrefix(s, "*"+ir) && (strings.HasSuffix(s, "Expr") || strings.HasSuffix(s, "Stmt")) { + return "cmd/compile/internal/ir.Node" + } + + return s } // stringLit returns the unquoted string value and true if diff --git a/src/cmd/compile/fmtmap_test.go b/src/cmd/compile/fmtmap_test.go index 5dd30e619baf6..ca6f1c302e091 100644 --- a/src/cmd/compile/fmtmap_test.go +++ b/src/cmd/compile/fmtmap_test.go @@ -20,191 +20,59 @@ package main_test // An absent entry means that the format is not recognized as valid. // An empty new format means that the format should remain unchanged. var knownFormats = map[string]string{ - "*bytes.Buffer %s": "", - "*cmd/compile/internal/gc.EscLocation %v": "", - "*cmd/compile/internal/ir.Func %+v": "", - "*cmd/compile/internal/ir.Func %L": "", - "*cmd/compile/internal/ir.Func %v": "", - "*cmd/compile/internal/ir.Name %#v": "", - "*cmd/compile/internal/ir.Name %+v": "", - "*cmd/compile/internal/ir.Name %L": "", - "*cmd/compile/internal/ir.Name %v": "", - "*cmd/compile/internal/ir.SliceExpr %v": "", - "*cmd/compile/internal/ssa.Block %s": "", - "*cmd/compile/internal/ssa.Block %v": "", - "*cmd/compile/internal/ssa.Func %s": "", - "*cmd/compile/internal/ssa.Func %v": "", - "*cmd/compile/internal/ssa.Register %s": "", - "*cmd/compile/internal/ssa.Register %v": "", - "*cmd/compile/internal/ssa.SparseTreeNode %v": "", - "*cmd/compile/internal/ssa.Value %s": "", - "*cmd/compile/internal/ssa.Value %v": "", - "*cmd/compile/internal/ssa.sparseTreeMapEntry %v": "", - "*cmd/compile/internal/types.Field %p": "", - "*cmd/compile/internal/types.Field %v": "", - "*cmd/compile/internal/types.Sym %+v": "", - "*cmd/compile/internal/types.Sym %S": "", - "*cmd/compile/internal/types.Sym %p": "", - "*cmd/compile/internal/types.Sym %v": "", - "*cmd/compile/internal/types.Type %#L": "", - "*cmd/compile/internal/types.Type %#v": "", - "*cmd/compile/internal/types.Type %+v": "", - "*cmd/compile/internal/types.Type %-S": "", - "*cmd/compile/internal/types.Type %0S": "", - "*cmd/compile/internal/types.Type %L": "", - "*cmd/compile/internal/types.Type %S": "", - "*cmd/compile/internal/types.Type %p": "", - "*cmd/compile/internal/types.Type %s": "", - "*cmd/compile/internal/types.Type %v": "", - "*cmd/internal/obj.Addr %v": "", - "*cmd/internal/obj.LSym %v": "", - "*math/big.Float %f": "", - "*math/big.Int %s": "", - "[16]byte %x": "", - "[]*cmd/compile/internal/ir.Name %v": "", - "[]*cmd/compile/internal/ssa.Block %v": "", - "[]*cmd/compile/internal/ssa.Value %v": "", - "[][]string %q": "", - "[]byte %s": "", - "[]byte %x": "", - "[]cmd/compile/internal/ssa.Edge %v": "", - "[]cmd/compile/internal/ssa.ID %v": "", - "[]cmd/compile/internal/ssa.posetNode %v": "", - "[]cmd/compile/internal/ssa.posetUndo %v": "", - "[]cmd/compile/internal/syntax.token %s": "", - "[]string %v": "", - "[]uint32 %v": "", - "bool %v": "", - "byte %08b": "", - "byte %c": "", - "byte %q": "", - "byte %v": "", - "cmd/compile/internal/arm.shift %d": "", - "cmd/compile/internal/gc.initKind %d": "", - "cmd/compile/internal/gc.itag %v": "", - "cmd/compile/internal/ir.Class %d": "", - "cmd/compile/internal/ir.Class %v": "", - "cmd/compile/internal/ir.Node %+v": "", - "cmd/compile/internal/ir.Node %L": "", - "cmd/compile/internal/ir.Node %S": "", - "cmd/compile/internal/ir.Node %p": "", - "cmd/compile/internal/ir.Node %v": "", - "cmd/compile/internal/ir.Nodes %#v": "", - "cmd/compile/internal/ir.Nodes %+v": "", - "cmd/compile/internal/ir.Nodes %.v": "", - "cmd/compile/internal/ir.Nodes %v": "", - "cmd/compile/internal/ir.Ntype %v": "", - "cmd/compile/internal/ir.Op %#v": "", - "cmd/compile/internal/ir.Op %+v": "", - "cmd/compile/internal/ir.Op %v": "", - "cmd/compile/internal/ssa.BranchPrediction %d": "", - "cmd/compile/internal/ssa.Edge %v": "", - "cmd/compile/internal/ssa.ID %d": "", - "cmd/compile/internal/ssa.ID %v": "", - "cmd/compile/internal/ssa.LocalSlot %s": "", - "cmd/compile/internal/ssa.LocalSlot %v": "", - "cmd/compile/internal/ssa.Location %s": "", - "cmd/compile/internal/ssa.Op %s": "", - "cmd/compile/internal/ssa.Op %v": "", - "cmd/compile/internal/ssa.Sym %v": "", - "cmd/compile/internal/ssa.ValAndOff %s": "", - "cmd/compile/internal/ssa.domain %v": "", - "cmd/compile/internal/ssa.flagConstant %s": "", - "cmd/compile/internal/ssa.posetNode %v": "", - "cmd/compile/internal/ssa.posetTestOp %v": "", - "cmd/compile/internal/ssa.rbrank %d": "", - "cmd/compile/internal/ssa.regMask %d": "", - "cmd/compile/internal/ssa.register %d": "", - "cmd/compile/internal/ssa.relation %s": "", - "cmd/compile/internal/syntax.Error %q": "", - "cmd/compile/internal/syntax.Expr %#v": "", - "cmd/compile/internal/syntax.LitKind %d": "", - "cmd/compile/internal/syntax.Node %T": "", - "cmd/compile/internal/syntax.Operator %s": "", - "cmd/compile/internal/syntax.Pos %s": "", - "cmd/compile/internal/syntax.Pos %v": "", - "cmd/compile/internal/syntax.position %s": "", - "cmd/compile/internal/syntax.token %q": "", - "cmd/compile/internal/syntax.token %s": "", - "cmd/compile/internal/types.Kind %d": "", - "cmd/compile/internal/types.Kind %s": "", - "cmd/compile/internal/types.Kind %v": "", - "cmd/compile/internal/types.Object %v": "", - "cmd/internal/obj.ABI %v": "", - "error %v": "", - "float64 %.2f": "", - "float64 %.3f": "", - "float64 %g": "", - "go/constant.Kind %v": "", - "go/constant.Value %#v": "", - "go/constant.Value %v": "", - "int %#x": "", - "int %-12d": "", - "int %-6d": "", - "int %-8o": "", - "int %02d": "", - "int %6d": "", - "int %c": "", - "int %d": "", - "int %v": "", - "int %x": "", - "int16 %d": "", - "int16 %x": "", - "int32 %#x": "", - "int32 %d": "", - "int32 %v": "", - "int32 %x": "", - "int64 %#x": "", - "int64 %-10d": "", - "int64 %.5d": "", - "int64 %d": "", - "int64 %v": "", - "int64 %x": "", - "int8 %d": "", - "int8 %v": "", - "int8 %x": "", - "interface{} %#v": "", - "interface{} %T": "", - "interface{} %p": "", - "interface{} %q": "", - "interface{} %s": "", - "interface{} %v": "", - "map[cmd/compile/internal/ir.Node]*cmd/compile/internal/ssa.Value %v": "", - "map[cmd/compile/internal/ir.Node][]cmd/compile/internal/ir.Node %v": "", - "map[cmd/compile/internal/ssa.ID]uint32 %v": "", - "map[int64]uint32 %v": "", - "math/big.Accuracy %s": "", - "reflect.Type %s": "", - "reflect.Type %v": "", - "rune %#U": "", - "rune %c": "", - "rune %q": "", - "string %-*s": "", - "string %-16s": "", - "string %-6s": "", - "string %q": "", - "string %s": "", - "string %v": "", - "time.Duration %d": "", - "time.Duration %v": "", - "uint %04x": "", - "uint %5d": "", - "uint %d": "", - "uint %x": "", - "uint16 %d": "", - "uint16 %x": "", - "uint32 %#U": "", - "uint32 %#x": "", - "uint32 %d": "", - "uint32 %v": "", - "uint32 %x": "", - "uint64 %08x": "", - "uint64 %b": "", - "uint64 %d": "", - "uint64 %x": "", - "uint8 %#x": "", - "uint8 %d": "", - "uint8 %v": "", - "uint8 %x": "", - "uintptr %d": "", + "*bytes.Buffer %s": "", + "*cmd/compile/internal/ssa.Block %s": "", + "*cmd/compile/internal/ssa.Func %s": "", + "*cmd/compile/internal/ssa.Register %s": "", + "*cmd/compile/internal/ssa.Value %s": "", + "*cmd/compile/internal/types.Sym %+v": "", + "*cmd/compile/internal/types.Sym %S": "", + "*cmd/compile/internal/types.Type %#L": "", + "*cmd/compile/internal/types.Type %#v": "", + "*cmd/compile/internal/types.Type %+v": "", + "*cmd/compile/internal/types.Type %-S": "", + "*cmd/compile/internal/types.Type %0S": "", + "*cmd/compile/internal/types.Type %L": "", + "*cmd/compile/internal/types.Type %S": "", + "*cmd/compile/internal/types.Type %s": "", + "*math/big.Float %f": "", + "*math/big.Int %s": "", + "[]cmd/compile/internal/syntax.token %s": "", + "cmd/compile/internal/arm.shift %d": "", + "cmd/compile/internal/gc.initKind %d": "", + "cmd/compile/internal/ir.Class %d": "", + "cmd/compile/internal/ir.Node %#v": "", + "cmd/compile/internal/ir.Node %+v": "", + "cmd/compile/internal/ir.Node %L": "", + "cmd/compile/internal/ir.Node %S": "", + "cmd/compile/internal/ir.Nodes %#v": "", + "cmd/compile/internal/ir.Nodes %+v": "", + "cmd/compile/internal/ir.Nodes %.v": "", + "cmd/compile/internal/ir.Op %#v": "", + "cmd/compile/internal/ir.Op %+v": "", + "cmd/compile/internal/ssa.BranchPrediction %d": "", + "cmd/compile/internal/ssa.ID %d": "", + "cmd/compile/internal/ssa.LocalSlot %s": "", + "cmd/compile/internal/ssa.Location %s": "", + "cmd/compile/internal/ssa.Op %s": "", + "cmd/compile/internal/ssa.ValAndOff %s": "", + "cmd/compile/internal/ssa.flagConstant %s": "", + "cmd/compile/internal/ssa.rbrank %d": "", + "cmd/compile/internal/ssa.regMask %d": "", + "cmd/compile/internal/ssa.register %d": "", + "cmd/compile/internal/ssa.relation %s": "", + "cmd/compile/internal/syntax.Error %q": "", + "cmd/compile/internal/syntax.Expr %#v": "", + "cmd/compile/internal/syntax.LitKind %d": "", + "cmd/compile/internal/syntax.Operator %s": "", + "cmd/compile/internal/syntax.Pos %s": "", + "cmd/compile/internal/syntax.position %s": "", + "cmd/compile/internal/syntax.token %q": "", + "cmd/compile/internal/syntax.token %s": "", + "cmd/compile/internal/types.Kind %d": "", + "cmd/compile/internal/types.Kind %s": "", + "go/constant.Value %#v": "", + "math/big.Accuracy %s": "", + "reflect.Type %s": "", + "time.Duration %d": "", } From 724374f85985d6ce5e5a8a32b4b9aea22ead6dc3 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sun, 6 Dec 2020 14:53:38 -0500 Subject: [PATCH 137/474] [dev.regabi] cmd/compile: rewrite stale format strings On ir.Node, ir.Nodes, and ir.Op, # is ignored, so %#v is %v. On ir.Node, %S is the same as %v. On types.Type, # is ignored, so %#L is %L, %#v is %v. On types.Type, 0 is ignored, so %0S is %S. Rewrite all these using go test cmd/compile -r, plus a few multiline formats mentioning %0S on types updated by hand. Now the formats used in the compiler match the documentation for the format methods, a minor miracle. Passes buildall w/ toolstash -cmp. Change-Id: I3d4a3fae543145a68da13eede91166632c5b1ceb Reviewed-on: https://go-review.googlesource.com/c/go/+/275782 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/fmtmap_test.go | 7 ------ src/cmd/compile/internal/gc/closure.go | 2 +- src/cmd/compile/internal/gc/escape.go | 6 ++--- src/cmd/compile/internal/gc/iimport.go | 4 ++-- src/cmd/compile/internal/gc/inl.go | 6 ++--- src/cmd/compile/internal/gc/pgen_test.go | 4 ++-- src/cmd/compile/internal/gc/subr.go | 4 ++-- src/cmd/compile/internal/gc/typecheck.go | 6 ++--- src/cmd/compile/internal/gc/unsafe.go | 2 +- src/cmd/compile/internal/gc/walk.go | 2 +- src/cmd/compile/internal/ir/fmt.go | 28 ++++++++++-------------- 11 files changed, 29 insertions(+), 42 deletions(-) diff --git a/src/cmd/compile/fmtmap_test.go b/src/cmd/compile/fmtmap_test.go index ca6f1c302e091..756320285ca99 100644 --- a/src/cmd/compile/fmtmap_test.go +++ b/src/cmd/compile/fmtmap_test.go @@ -27,11 +27,8 @@ var knownFormats = map[string]string{ "*cmd/compile/internal/ssa.Value %s": "", "*cmd/compile/internal/types.Sym %+v": "", "*cmd/compile/internal/types.Sym %S": "", - "*cmd/compile/internal/types.Type %#L": "", - "*cmd/compile/internal/types.Type %#v": "", "*cmd/compile/internal/types.Type %+v": "", "*cmd/compile/internal/types.Type %-S": "", - "*cmd/compile/internal/types.Type %0S": "", "*cmd/compile/internal/types.Type %L": "", "*cmd/compile/internal/types.Type %S": "", "*cmd/compile/internal/types.Type %s": "", @@ -41,14 +38,10 @@ var knownFormats = map[string]string{ "cmd/compile/internal/arm.shift %d": "", "cmd/compile/internal/gc.initKind %d": "", "cmd/compile/internal/ir.Class %d": "", - "cmd/compile/internal/ir.Node %#v": "", "cmd/compile/internal/ir.Node %+v": "", "cmd/compile/internal/ir.Node %L": "", - "cmd/compile/internal/ir.Node %S": "", - "cmd/compile/internal/ir.Nodes %#v": "", "cmd/compile/internal/ir.Nodes %+v": "", "cmd/compile/internal/ir.Nodes %.v": "", - "cmd/compile/internal/ir.Op %#v": "", "cmd/compile/internal/ir.Op %+v": "", "cmd/compile/internal/ssa.BranchPrediction %d": "", "cmd/compile/internal/ssa.ID %d": "", diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index 01e5a953de8d5..b56e255d10a3f 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -100,7 +100,7 @@ func typecheckclosure(clo ir.Node, top int) { if !n.Name().Captured() { n.Name().SetCaptured(true) if n.Name().Decldepth == 0 { - base.Fatalf("typecheckclosure: var %S does not have decldepth assigned", n) + base.Fatalf("typecheckclosure: var %v does not have decldepth assigned", n) } // Ignore assignments to the variable in straightline code diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index a7458ab733fb2..f317e9999cc53 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -757,7 +757,7 @@ func (e *Escape) assign(dst, src ir.Node, why string, where ir.Node) { // Filter out some no-op assignments for escape analysis. ignore := dst != nil && src != nil && isSelfAssign(dst, src) if ignore && base.Flag.LowerM != 0 { - base.WarnfAt(where.Pos(), "%v ignoring self-assignment in %S", funcSym(e.curfn), where) + base.WarnfAt(where.Pos(), "%v ignoring self-assignment in %v", funcSym(e.curfn), where) } k := e.addr(dst) @@ -1454,7 +1454,7 @@ func (e *Escape) finish(fns []*ir.Func) { if loc.escapes { if n.Op() != ir.ONAME { if base.Flag.LowerM != 0 { - base.WarnfAt(n.Pos(), "%S escapes to heap", n) + base.WarnfAt(n.Pos(), "%v escapes to heap", n) } if logopt.Enabled() { logopt.LogOpt(n.Pos(), "escape", "escape", ir.FuncName(e.curfn)) @@ -1464,7 +1464,7 @@ func (e *Escape) finish(fns []*ir.Func) { addrescapes(n) } else { if base.Flag.LowerM != 0 && n.Op() != ir.ONAME { - base.WarnfAt(n.Pos(), "%S does not escape", n) + base.WarnfAt(n.Pos(), "%v does not escape", n) } n.SetEsc(EscNone) if loc.transient { diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 859263c83f337..1f75393b3e1e5 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -713,9 +713,9 @@ func (r *importReader) doInline(fn *ir.Func) { if base.Flag.E > 0 && base.Flag.LowerM > 2 { if base.Flag.LowerM > 3 { - fmt.Printf("inl body for %v %#v: %+v\n", fn, fn.Type(), ir.AsNodes(fn.Inl.Body)) + fmt.Printf("inl body for %v %v: %+v\n", fn, fn.Type(), ir.AsNodes(fn.Inl.Body)) } else { - fmt.Printf("inl body for %v %#v: %v\n", fn, fn.Type(), ir.AsNodes(fn.Inl.Body)) + fmt.Printf("inl body for %v %v: %v\n", fn, fn.Type(), ir.AsNodes(fn.Inl.Body)) } } } diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 77fbf7c802700..f965fa6325a5a 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -90,7 +90,7 @@ func typecheckinl(fn *ir.Func) { } if base.Flag.LowerM > 2 || base.Debug.Export != 0 { - fmt.Printf("typecheck import [%v] %L { %#v }\n", fn.Sym(), fn, ir.AsNodes(fn.Inl.Body)) + fmt.Printf("typecheck import [%v] %L { %v }\n", fn.Sym(), fn, ir.AsNodes(fn.Inl.Body)) } savefn := Curfn @@ -219,7 +219,7 @@ func caninl(fn *ir.Func) { } if base.Flag.LowerM > 1 { - fmt.Printf("%v: can inline %#v with cost %d as: %#v { %#v }\n", ir.Line(fn), n, inlineMaxBudget-visitor.budget, fn.Type(), ir.AsNodes(n.Func().Inl.Body)) + fmt.Printf("%v: can inline %v with cost %d as: %v { %v }\n", ir.Line(fn), n, inlineMaxBudget-visitor.budget, fn.Type(), ir.AsNodes(n.Func().Inl.Body)) } else if base.Flag.LowerM != 0 { fmt.Printf("%v: can inline %v\n", ir.Line(fn), n) } @@ -816,7 +816,7 @@ func mkinlcall(n ir.Node, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool, // We have a function node, and it has an inlineable body. if base.Flag.LowerM > 1 { - fmt.Printf("%v: inlining call to %v %#v { %#v }\n", ir.Line(n), fn.Sym(), fn.Type(), ir.AsNodes(fn.Inl.Body)) + fmt.Printf("%v: inlining call to %v %v { %v }\n", ir.Line(n), fn.Sym(), fn.Type(), ir.AsNodes(fn.Inl.Body)) } else if base.Flag.LowerM != 0 { fmt.Printf("%v: inlining call to %v\n", ir.Line(n), fn) } diff --git a/src/cmd/compile/internal/gc/pgen_test.go b/src/cmd/compile/internal/gc/pgen_test.go index 473df82a0d709..ad8b87c6f539f 100644 --- a/src/cmd/compile/internal/gc/pgen_test.go +++ b/src/cmd/compile/internal/gc/pgen_test.go @@ -145,11 +145,11 @@ func TestCmpstackvar(t *testing.T) { for _, d := range testdata { got := cmpstackvarlt(d.a, d.b) if got != d.lt { - t.Errorf("want %#v < %#v", d.a, d.b) + t.Errorf("want %v < %v", d.a, d.b) } // If we expect a < b to be true, check that b < a is false. if d.lt && cmpstackvarlt(d.b, d.a) { - t.Errorf("unexpected %#v < %#v", d.b, d.a) + t.Errorf("unexpected %v < %v", d.b, d.a) } } } diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index dffebc58f2d17..e05a124b29cef 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -291,12 +291,12 @@ func assignop(src, dst *types.Type) (ir.Op, string) { why = fmt.Sprintf(":\n\t%v does not implement %v (%v method is marked 'nointerface')", src, dst, missing.Sym) } else if have != nil && have.Sym == missing.Sym { why = fmt.Sprintf(":\n\t%v does not implement %v (wrong type for %v method)\n"+ - "\t\thave %v%0S\n\t\twant %v%0S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) + "\t\thave %v%S\n\t\twant %v%S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) } else if ptr != 0 { why = fmt.Sprintf(":\n\t%v does not implement %v (%v method has pointer receiver)", src, dst, missing.Sym) } else if have != nil { why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)\n"+ - "\t\thave %v%0S\n\t\twant %v%0S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) + "\t\thave %v%S\n\t\twant %v%S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) } else { why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)", src, dst, missing.Sym) } diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 85094dbebcad7..a7c05c6c0fa62 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -58,7 +58,7 @@ func tracePrint(title string, n ir.Node) func(np *ir.Node) { skipDowidthForTracing = true defer func() { skipDowidthForTracing = false }() - fmt.Printf("%s: %s=> %p %s %v tc=%d type=%#L\n", pos, indent, n, op, n, tc, typ) + fmt.Printf("%s: %s=> %p %s %v tc=%d type=%L\n", pos, indent, n, op, n, tc, typ) } } @@ -1039,12 +1039,12 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { if !implements(n.Type(), t, &missing, &have, &ptr) { if have != nil && have.Sym == missing.Sym { base.Errorf("impossible type assertion:\n\t%v does not implement %v (wrong type for %v method)\n"+ - "\t\thave %v%0S\n\t\twant %v%0S", n.Type(), t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) + "\t\thave %v%S\n\t\twant %v%S", n.Type(), t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) } else if ptr != 0 { base.Errorf("impossible type assertion:\n\t%v does not implement %v (%v method has pointer receiver)", n.Type(), t, missing.Sym) } else if have != nil { base.Errorf("impossible type assertion:\n\t%v does not implement %v (missing %v method)\n"+ - "\t\thave %v%0S\n\t\twant %v%0S", n.Type(), t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) + "\t\thave %v%S\n\t\twant %v%S", n.Type(), t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) } else { base.Errorf("impossible type assertion:\n\t%v does not implement %v (missing %v method)", n.Type(), t, missing.Sym) } diff --git a/src/cmd/compile/internal/gc/unsafe.go b/src/cmd/compile/internal/gc/unsafe.go index 678924b229495..d7ae5d7aaa696 100644 --- a/src/cmd/compile/internal/gc/unsafe.go +++ b/src/cmd/compile/internal/gc/unsafe.go @@ -70,7 +70,7 @@ func evalunsafe(n ir.Node) int64 { v += r.Offset() default: ir.Dump("unsafenmagic", n.Left()) - base.Fatalf("impossible %#v node after dot insertion", r.Op()) + base.Fatalf("impossible %v node after dot insertion", r.Op()) } } return v diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 346817e589fe5..4189d1a721735 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -2154,7 +2154,7 @@ func reorder3(all []ir.Node) []ir.Node { switch l.Op() { default: - base.Fatalf("reorder3 unexpected lvalue %#v", l.Op()) + base.Fatalf("reorder3 unexpected lvalue %v", l.Op()) case ir.ONAME: break diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index 85c6b218e2a32..68e425bdaa287 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -125,12 +125,6 @@ func (o Op) Format(s fmt.State, verb rune) { // %+v Debug syntax, as in Dump. // func FmtNode(n Node, s fmt.State, verb rune) { - // TODO(rsc): Remove uses of %#v, which behaves just like %v. - // TODO(rsc): Remove uses of %S, which behaves just like %v. - if verb == 'S' { - verb = 'v' - } - // %+v prints Dump. // Otherwise we print Go syntax. if s.Flag('+') && verb == 'v' { @@ -355,7 +349,7 @@ func stmtFmt(n Node, s fmt.State) { break } - fmt.Fprintf(s, "%v %#v= %v", n.Left(), n.SubOp(), n.Right()) + fmt.Fprintf(s, "%v %v= %v", n.Left(), n.SubOp(), n.Right()) case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV: if n.Colas() && !complexinit { @@ -446,7 +440,7 @@ func stmtFmt(n Node, s fmt.State) { break } - fmt.Fprintf(s, "%#v", n.Op()) + fmt.Fprintf(s, "%v", n.Op()) if simpleinit { fmt.Fprintf(s, " %v;", n.Init().First()) } @@ -466,9 +460,9 @@ func stmtFmt(n Node, s fmt.State) { case OBREAK, OCONTINUE, OGOTO, OFALL: if n.Sym() != nil { - fmt.Fprintf(s, "%#v %v", n.Op(), n.Sym()) + fmt.Fprintf(s, "%v %v", n.Op(), n.Sym()) } else { - fmt.Fprintf(s, "%#v", n.Op()) + fmt.Fprintf(s, "%v", n.Op()) } case OLABEL: @@ -754,9 +748,9 @@ func exprFmt(n Node, s fmt.State, prec int) { case OCOMPLEX, OCOPY: if n.Left() != nil { - fmt.Fprintf(s, "%#v(%v, %v)", n.Op(), n.Left(), n.Right()) + fmt.Fprintf(s, "%v(%v, %v)", n.Op(), n.Left(), n.Right()) } else { - fmt.Fprintf(s, "%#v(%.v)", n.Op(), n.List()) + fmt.Fprintf(s, "%v(%.v)", n.Op(), n.List()) } case OCONV, @@ -795,14 +789,14 @@ func exprFmt(n Node, s fmt.State, prec int) { OPRINT, OPRINTN: if n.Left() != nil { - fmt.Fprintf(s, "%#v(%v)", n.Op(), n.Left()) + fmt.Fprintf(s, "%v(%v)", n.Op(), n.Left()) return } if n.IsDDD() { - fmt.Fprintf(s, "%#v(%.v...)", n.Op(), n.List()) + fmt.Fprintf(s, "%v(%.v...)", n.Op(), n.List()) return } - fmt.Fprintf(s, "%#v(%.v)", n.Op(), n.List()) + fmt.Fprintf(s, "%v(%.v)", n.Op(), n.List()) case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, OGETG: exprFmt(n.Left(), s, nprec) @@ -832,7 +826,7 @@ func exprFmt(n Node, s fmt.State, prec int) { case OPLUS, ONEG, OADDR, OBITNOT, ODEREF, ONOT, ORECV: // Unary - fmt.Fprintf(s, "%#v", n.Op()) + fmt.Fprintf(s, "%v", n.Op()) if n.Left() != nil && n.Left().Op() == n.Op() { fmt.Fprint(s, " ") } @@ -860,7 +854,7 @@ func exprFmt(n Node, s fmt.State, prec int) { OSUB, OXOR: exprFmt(n.Left(), s, nprec) - fmt.Fprintf(s, " %#v ", n.Op()) + fmt.Fprintf(s, " %v ", n.Op()) exprFmt(n.Right(), s, nprec+1) case OADDSTR: From 2de0af3b1b09e11b71ec4c58bb406be7abf112b0 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sun, 6 Dec 2020 15:49:58 -0500 Subject: [PATCH 138/474] [dev.regabi] cmd/compile: prepare mknode for rename of Func.body The next CL will rename Func.body to Func.Body_. At some point in the future we will rename it to Func.Body. Make the generator not get confused. Passes buildall w/ toolstash -cmp. Change-Id: Iee3f4915889a8287377bf3304d5b9250a909477e Reviewed-on: https://go-review.googlesource.com/c/go/+/275783 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/mknode.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/ir/mknode.go b/src/cmd/compile/internal/ir/mknode.go index 2c007f93f1f49..978b2de5a5247 100644 --- a/src/cmd/compile/internal/ir/mknode.go +++ b/src/cmd/compile/internal/ir/mknode.go @@ -141,7 +141,7 @@ func forNodeFields(typName string, typ *types.Struct, f func(name string, is fun } switch typName { case "Func": - if v.Name() != "body" { + if strings.ToLower(strings.TrimSuffix(v.Name(), "_")) != "body" { continue } case "Name", "Pack": From 6d783e7440056ca24b57b52605def43d09d8b2a2 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sun, 6 Dec 2020 15:36:58 -0500 Subject: [PATCH 139/474] [dev.regabi] cmd/compile: export all Node fields [generated] The plan was always to export them once we remove the getters and setters, but do it a bit early, with _ suffixes as needed, so that the reflection-based ir.Dump can access the fields. Passes buildall w/ toolstash -cmp. [git-generate] cd src/cmd/compile/internal/ir rf ' mv AddStringExpr.list AddStringExpr.List_ mv BlockStmt.list BlockStmt.List_ mv CallExpr.body CallExpr.Body_ mv CaseStmt.list CaseStmt.List_ mv CaseStmt.body CaseStmt.Body_ mv ClosureExpr.fn ClosureExpr.Func_ mv CompLitExpr.list CompLitExpr.List_ mv ForStmt.body ForStmt.Body_ mv Func.body Func.Body_ mv IfStmt.body IfStmt.Body_ mv InlinedCallExpr.body InlinedCallExpr.Body_ mv RangeStmt.body RangeStmt.Body_ mv SliceExpr.list SliceExpr.List_ mv SliceHeaderExpr.lenCap SliceHeaderExpr.LenCap_ mv TypeSwitchGuard.name TypeSwitchGuard.Name_ ' go generate Change-Id: I06e65920cecbcc51bea2254f52fcd7d5c5d0dc90 Reviewed-on: https://go-review.googlesource.com/c/go/+/275784 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/expr.go | 80 +++++++++++------------ src/cmd/compile/internal/ir/func.go | 12 ++-- src/cmd/compile/internal/ir/node_gen.go | 86 ++++++++++++------------- src/cmd/compile/internal/ir/stmt.go | 78 +++++++++++----------- 4 files changed, 128 insertions(+), 128 deletions(-) diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 7165a06b25751..a74e0712b91d6 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -91,20 +91,20 @@ func toNtype(x Node) Ntype { // An AddStringExpr is a string concatenation Expr[0] + Exprs[1] + ... + Expr[len(Expr)-1]. type AddStringExpr struct { miniExpr - list Nodes + List_ Nodes } func NewAddStringExpr(pos src.XPos, list []Node) *AddStringExpr { n := &AddStringExpr{} n.pos = pos n.op = OADDSTR - n.list.Set(list) + n.List_.Set(list) return n } -func (n *AddStringExpr) List() Nodes { return n.list } -func (n *AddStringExpr) PtrList() *Nodes { return &n.list } -func (n *AddStringExpr) SetList(x Nodes) { n.list = x } +func (n *AddStringExpr) List() Nodes { return n.List_ } +func (n *AddStringExpr) PtrList() *Nodes { return &n.List_ } +func (n *AddStringExpr) SetList(x Nodes) { n.List_ = x } // An AddrExpr is an address-of expression &X. // It may end up being a normal address-of or an allocation of a composite literal. @@ -185,7 +185,7 @@ type CallExpr struct { X Node Args Nodes Rargs Nodes // TODO(rsc): Delete. - body Nodes // TODO(rsc): Delete. + Body_ Nodes // TODO(rsc): Delete. DDD bool Use CallUse noInline bool @@ -216,9 +216,9 @@ func (n *CallExpr) IsDDD() bool { return n.DDD } func (n *CallExpr) SetIsDDD(x bool) { n.DDD = x } func (n *CallExpr) NoInline() bool { return n.noInline } func (n *CallExpr) SetNoInline(x bool) { n.noInline = x } -func (n *CallExpr) Body() Nodes { return n.body } -func (n *CallExpr) PtrBody() *Nodes { return &n.body } -func (n *CallExpr) SetBody(x Nodes) { n.body = x } +func (n *CallExpr) Body() Nodes { return n.Body_ } +func (n *CallExpr) PtrBody() *Nodes { return &n.Body_ } +func (n *CallExpr) SetBody(x Nodes) { n.Body_ = x } func (n *CallExpr) SetOp(op Op) { switch op { @@ -255,17 +255,17 @@ func (n *CallPartExpr) SetLeft(x Node) { n.X = x } // A ClosureExpr is a function literal expression. type ClosureExpr struct { miniExpr - fn *Func + Func_ *Func } func NewClosureExpr(pos src.XPos, fn *Func) *ClosureExpr { - n := &ClosureExpr{fn: fn} + n := &ClosureExpr{Func_: fn} n.op = OCLOSURE n.pos = pos return n } -func (n *ClosureExpr) Func() *Func { return n.fn } +func (n *ClosureExpr) Func() *Func { return n.Func_ } // A ClosureRead denotes reading a variable stored within a closure struct. type ClosureRead struct { @@ -289,14 +289,14 @@ type CompLitExpr struct { miniExpr orig Node Ntype Ntype - list Nodes // initialized values + List_ Nodes // initialized values } func NewCompLitExpr(pos src.XPos, typ Ntype, list []Node) *CompLitExpr { n := &CompLitExpr{Ntype: typ} n.pos = pos n.op = OCOMPLIT - n.list.Set(list) + n.List_.Set(list) n.orig = n return n } @@ -305,9 +305,9 @@ func (n *CompLitExpr) Orig() Node { return n.orig } func (n *CompLitExpr) SetOrig(x Node) { n.orig = x } func (n *CompLitExpr) Right() Node { return n.Ntype } func (n *CompLitExpr) SetRight(x Node) { n.Ntype = toNtype(x) } -func (n *CompLitExpr) List() Nodes { return n.list } -func (n *CompLitExpr) PtrList() *Nodes { return &n.list } -func (n *CompLitExpr) SetList(x Nodes) { n.list = x } +func (n *CompLitExpr) List() Nodes { return n.List_ } +func (n *CompLitExpr) PtrList() *Nodes { return &n.List_ } +func (n *CompLitExpr) SetList(x Nodes) { n.List_ = x } func (n *CompLitExpr) SetOp(op Op) { switch op { @@ -436,7 +436,7 @@ func (n *KeyExpr) SetOp(op Op) { // An InlinedCallExpr is an inlined function call. type InlinedCallExpr struct { miniExpr - body Nodes + Body_ Nodes ReturnVars Nodes } @@ -444,14 +444,14 @@ func NewInlinedCallExpr(pos src.XPos, body, retvars []Node) *InlinedCallExpr { n := &InlinedCallExpr{} n.pos = pos n.op = OINLCALL - n.body.Set(body) + n.Body_.Set(body) n.ReturnVars.Set(retvars) return n } -func (n *InlinedCallExpr) Body() Nodes { return n.body } -func (n *InlinedCallExpr) PtrBody() *Nodes { return &n.body } -func (n *InlinedCallExpr) SetBody(x Nodes) { n.body = x } +func (n *InlinedCallExpr) Body() Nodes { return n.Body_ } +func (n *InlinedCallExpr) PtrBody() *Nodes { return &n.Body_ } +func (n *InlinedCallExpr) SetBody(x Nodes) { n.Body_ = x } func (n *InlinedCallExpr) Rlist() Nodes { return n.ReturnVars } func (n *InlinedCallExpr) PtrRlist() *Nodes { return &n.ReturnVars } func (n *InlinedCallExpr) SetRlist(x Nodes) { n.ReturnVars = x } @@ -617,8 +617,8 @@ func (*SelectorExpr) CanBeNtype() {} // A SliceExpr is a slice expression X[Low:High] or X[Low:High:Max]. type SliceExpr struct { miniExpr - X Node - list Nodes // TODO(rsc): Use separate Nodes + X Node + List_ Nodes // TODO(rsc): Use separate Nodes } func NewSliceExpr(pos src.XPos, op Op, x Node) *SliceExpr { @@ -630,9 +630,9 @@ func NewSliceExpr(pos src.XPos, op Op, x Node) *SliceExpr { func (n *SliceExpr) Left() Node { return n.X } func (n *SliceExpr) SetLeft(x Node) { n.X = x } -func (n *SliceExpr) List() Nodes { return n.list } -func (n *SliceExpr) PtrList() *Nodes { return &n.list } -func (n *SliceExpr) SetList(x Nodes) { n.list = x } +func (n *SliceExpr) List() Nodes { return n.List_ } +func (n *SliceExpr) PtrList() *Nodes { return &n.List_ } +func (n *SliceExpr) SetList(x Nodes) { n.List_ = x } func (n *SliceExpr) SetOp(op Op) { switch op { @@ -646,16 +646,16 @@ func (n *SliceExpr) SetOp(op Op) { // SliceBounds returns n's slice bounds: low, high, and max in expr[low:high:max]. // n must be a slice expression. max is nil if n is a simple slice expression. func (n *SliceExpr) SliceBounds() (low, high, max Node) { - if n.list.Len() == 0 { + if n.List_.Len() == 0 { return nil, nil, nil } switch n.Op() { case OSLICE, OSLICEARR, OSLICESTR: - s := n.list.Slice() + s := n.List_.Slice() return s[0], s[1], nil case OSLICE3, OSLICE3ARR: - s := n.list.Slice() + s := n.List_.Slice() return s[0], s[1], s[2] } base.Fatalf("SliceBounds op %v: %v", n.Op(), n) @@ -670,24 +670,24 @@ func (n *SliceExpr) SetSliceBounds(low, high, max Node) { if max != nil { base.Fatalf("SetSliceBounds %v given three bounds", n.Op()) } - s := n.list.Slice() + s := n.List_.Slice() if s == nil { if low == nil && high == nil { return } - n.list.Set2(low, high) + n.List_.Set2(low, high) return } s[0] = low s[1] = high return case OSLICE3, OSLICE3ARR: - s := n.list.Slice() + s := n.List_.Slice() if s == nil { if low == nil && high == nil && max == nil { return } - n.list.Set3(low, high, max) + n.List_.Set3(low, high, max) return } s[0] = low @@ -714,8 +714,8 @@ func (o Op) IsSlice3() bool { // A SliceHeader expression constructs a slice header from its parts. type SliceHeaderExpr struct { miniExpr - Ptr Node - lenCap Nodes // TODO(rsc): Split into two Node fields + Ptr Node + LenCap_ Nodes // TODO(rsc): Split into two Node fields } func NewSliceHeaderExpr(pos src.XPos, typ *types.Type, ptr, len, cap Node) *SliceHeaderExpr { @@ -723,15 +723,15 @@ func NewSliceHeaderExpr(pos src.XPos, typ *types.Type, ptr, len, cap Node) *Slic n.pos = pos n.op = OSLICEHEADER n.typ = typ - n.lenCap.Set2(len, cap) + n.LenCap_.Set2(len, cap) return n } func (n *SliceHeaderExpr) Left() Node { return n.Ptr } func (n *SliceHeaderExpr) SetLeft(x Node) { n.Ptr = x } -func (n *SliceHeaderExpr) List() Nodes { return n.lenCap } -func (n *SliceHeaderExpr) PtrList() *Nodes { return &n.lenCap } -func (n *SliceHeaderExpr) SetList(x Nodes) { n.lenCap = x } +func (n *SliceHeaderExpr) List() Nodes { return n.LenCap_ } +func (n *SliceHeaderExpr) PtrList() *Nodes { return &n.LenCap_ } +func (n *SliceHeaderExpr) SetList(x Nodes) { n.LenCap_ = x } // A StarExpr is a dereference expression *X. // It may end up being a value or a type. diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index 3bca25b504b99..8aa6daed6fc51 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -49,9 +49,9 @@ import ( // pointer from the Func back to the OCALLPART. type Func struct { miniNode - typ *types.Type - body Nodes - iota int64 + typ *types.Type + Body_ Nodes + iota int64 Nname *Name // ONAME node OClosure *ClosureExpr // OCLOSURE node @@ -117,9 +117,9 @@ func NewFunc(pos src.XPos) *Func { func (f *Func) isStmt() {} func (f *Func) Func() *Func { return f } -func (f *Func) Body() Nodes { return f.body } -func (f *Func) PtrBody() *Nodes { return &f.body } -func (f *Func) SetBody(x Nodes) { f.body = x } +func (f *Func) Body() Nodes { return f.Body_ } +func (f *Func) PtrBody() *Nodes { return &f.Body_ } +func (f *Func) SetBody(x Nodes) { f.Body_ = x } func (f *Func) Type() *types.Type { return f.typ } func (f *Func) SetType(x *types.Type) { f.typ = x } func (f *Func) Iota() int64 { return f.iota } diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index 4c47a4486e19f..b3fd89c3670b7 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -9,18 +9,18 @@ func (n *AddStringExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *AddStringExpr) copy() Node { c := *n c.init = c.init.Copy() - c.list = c.list.Copy() + c.List_ = c.List_.Copy() return &c } func (n *AddStringExpr) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) - err = maybeDoList(n.list, err, do) + err = maybeDoList(n.List_, err, do) return err } func (n *AddStringExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) - editList(n.list, edit) + editList(n.List_, edit) } func (n *AddrExpr) String() string { return fmt.Sprint(n) } @@ -147,18 +147,18 @@ func (n *BlockStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *BlockStmt) copy() Node { c := *n c.init = c.init.Copy() - c.list = c.list.Copy() + c.List_ = c.List_.Copy() return &c } func (n *BlockStmt) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) - err = maybeDoList(n.list, err, do) + err = maybeDoList(n.List_, err, do) return err } func (n *BlockStmt) editChildren(edit func(Node) Node) { editList(n.init, edit) - editList(n.list, edit) + editList(n.List_, edit) } func (n *BranchStmt) String() string { return fmt.Sprint(n) } @@ -184,7 +184,7 @@ func (n *CallExpr) copy() Node { c.init = c.init.Copy() c.Args = c.Args.Copy() c.Rargs = c.Rargs.Copy() - c.body = c.body.Copy() + c.Body_ = c.Body_.Copy() return &c } func (n *CallExpr) doChildren(do func(Node) error) error { @@ -193,7 +193,7 @@ func (n *CallExpr) doChildren(do func(Node) error) error { err = maybeDo(n.X, err, do) err = maybeDoList(n.Args, err, do) err = maybeDoList(n.Rargs, err, do) - err = maybeDoList(n.body, err, do) + err = maybeDoList(n.Body_, err, do) return err } func (n *CallExpr) editChildren(edit func(Node) Node) { @@ -201,7 +201,7 @@ func (n *CallExpr) editChildren(edit func(Node) Node) { n.X = maybeEdit(n.X, edit) editList(n.Args, edit) editList(n.Rargs, edit) - editList(n.body, edit) + editList(n.Body_, edit) } func (n *CallPartExpr) String() string { return fmt.Sprint(n) } @@ -228,25 +228,25 @@ func (n *CaseStmt) copy() Node { c := *n c.init = c.init.Copy() c.Vars = c.Vars.Copy() - c.list = c.list.Copy() - c.body = c.body.Copy() + c.List_ = c.List_.Copy() + c.Body_ = c.Body_.Copy() return &c } func (n *CaseStmt) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) err = maybeDoList(n.Vars, err, do) - err = maybeDoList(n.list, err, do) + err = maybeDoList(n.List_, err, do) err = maybeDo(n.Comm, err, do) - err = maybeDoList(n.body, err, do) + err = maybeDoList(n.Body_, err, do) return err } func (n *CaseStmt) editChildren(edit func(Node) Node) { editList(n.init, edit) editList(n.Vars, edit) - editList(n.list, edit) + editList(n.List_, edit) n.Comm = maybeEdit(n.Comm, edit) - editList(n.body, edit) + editList(n.Body_, edit) } func (n *ChanType) String() string { return fmt.Sprint(n) } @@ -301,20 +301,20 @@ func (n *CompLitExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *CompLitExpr) copy() Node { c := *n c.init = c.init.Copy() - c.list = c.list.Copy() + c.List_ = c.List_.Copy() return &c } func (n *CompLitExpr) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) err = maybeDo(n.Ntype, err, do) - err = maybeDoList(n.list, err, do) + err = maybeDoList(n.List_, err, do) return err } func (n *CompLitExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) n.Ntype = toNtype(maybeEdit(n.Ntype, edit)) - editList(n.list, edit) + editList(n.List_, edit) } func (n *ConstExpr) String() string { return fmt.Sprint(n) } @@ -390,7 +390,7 @@ func (n *ForStmt) copy() Node { c := *n c.init = c.init.Copy() c.Late = c.Late.Copy() - c.body = c.body.Copy() + c.Body_ = c.Body_.Copy() return &c } func (n *ForStmt) doChildren(do func(Node) error) error { @@ -399,7 +399,7 @@ func (n *ForStmt) doChildren(do func(Node) error) error { err = maybeDo(n.Cond, err, do) err = maybeDoList(n.Late, err, do) err = maybeDo(n.Post, err, do) - err = maybeDoList(n.body, err, do) + err = maybeDoList(n.Body_, err, do) return err } func (n *ForStmt) editChildren(edit func(Node) Node) { @@ -407,23 +407,23 @@ func (n *ForStmt) editChildren(edit func(Node) Node) { n.Cond = maybeEdit(n.Cond, edit) editList(n.Late, edit) n.Post = maybeEdit(n.Post, edit) - editList(n.body, edit) + editList(n.Body_, edit) } func (n *Func) String() string { return fmt.Sprint(n) } func (n *Func) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *Func) copy() Node { c := *n - c.body = c.body.Copy() + c.Body_ = c.Body_.Copy() return &c } func (n *Func) doChildren(do func(Node) error) error { var err error - err = maybeDoList(n.body, err, do) + err = maybeDoList(n.Body_, err, do) return err } func (n *Func) editChildren(edit func(Node) Node) { - editList(n.body, edit) + editList(n.Body_, edit) } func (n *FuncType) String() string { return fmt.Sprint(n) } @@ -473,7 +473,7 @@ func (n *IfStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *IfStmt) copy() Node { c := *n c.init = c.init.Copy() - c.body = c.body.Copy() + c.Body_ = c.Body_.Copy() c.Else = c.Else.Copy() return &c } @@ -481,14 +481,14 @@ func (n *IfStmt) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) err = maybeDo(n.Cond, err, do) - err = maybeDoList(n.body, err, do) + err = maybeDoList(n.Body_, err, do) err = maybeDoList(n.Else, err, do) return err } func (n *IfStmt) editChildren(edit func(Node) Node) { editList(n.init, edit) n.Cond = maybeEdit(n.Cond, edit) - editList(n.body, edit) + editList(n.Body_, edit) editList(n.Else, edit) } @@ -533,20 +533,20 @@ func (n *InlinedCallExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *InlinedCallExpr) copy() Node { c := *n c.init = c.init.Copy() - c.body = c.body.Copy() + c.Body_ = c.Body_.Copy() c.ReturnVars = c.ReturnVars.Copy() return &c } func (n *InlinedCallExpr) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) - err = maybeDoList(n.body, err, do) + err = maybeDoList(n.Body_, err, do) err = maybeDoList(n.ReturnVars, err, do) return err } func (n *InlinedCallExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) - editList(n.body, edit) + editList(n.Body_, edit) editList(n.ReturnVars, edit) } @@ -725,7 +725,7 @@ func (n *RangeStmt) copy() Node { c := *n c.init = c.init.Copy() c.Vars = c.Vars.Copy() - c.body = c.body.Copy() + c.Body_ = c.Body_.Copy() return &c } func (n *RangeStmt) doChildren(do func(Node) error) error { @@ -733,14 +733,14 @@ func (n *RangeStmt) doChildren(do func(Node) error) error { err = maybeDoList(n.init, err, do) err = maybeDoList(n.Vars, err, do) err = maybeDo(n.X, err, do) - err = maybeDoList(n.body, err, do) + err = maybeDoList(n.Body_, err, do) return err } func (n *RangeStmt) editChildren(edit func(Node) Node) { editList(n.init, edit) editList(n.Vars, edit) n.X = maybeEdit(n.X, edit) - editList(n.body, edit) + editList(n.Body_, edit) } func (n *ResultExpr) String() string { return fmt.Sprint(n) } @@ -843,20 +843,20 @@ func (n *SliceExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *SliceExpr) copy() Node { c := *n c.init = c.init.Copy() - c.list = c.list.Copy() + c.List_ = c.List_.Copy() return &c } func (n *SliceExpr) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) err = maybeDo(n.X, err, do) - err = maybeDoList(n.list, err, do) + err = maybeDoList(n.List_, err, do) return err } func (n *SliceExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) n.X = maybeEdit(n.X, edit) - editList(n.list, edit) + editList(n.List_, edit) } func (n *SliceHeaderExpr) String() string { return fmt.Sprint(n) } @@ -864,20 +864,20 @@ func (n *SliceHeaderExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *SliceHeaderExpr) copy() Node { c := *n c.init = c.init.Copy() - c.lenCap = c.lenCap.Copy() + c.LenCap_ = c.LenCap_.Copy() return &c } func (n *SliceHeaderExpr) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) err = maybeDo(n.Ptr, err, do) - err = maybeDoList(n.lenCap, err, do) + err = maybeDoList(n.LenCap_, err, do) return err } func (n *SliceHeaderExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) n.Ptr = maybeEdit(n.Ptr, edit) - editList(n.lenCap, edit) + editList(n.LenCap_, edit) } func (n *SliceType) String() string { return fmt.Sprint(n) } @@ -984,15 +984,15 @@ func (n *TypeSwitchGuard) copy() Node { } func (n *TypeSwitchGuard) doChildren(do func(Node) error) error { var err error - if n.name != nil { - err = maybeDo(n.name, err, do) + if n.Name_ != nil { + err = maybeDo(n.Name_, err, do) } err = maybeDo(n.X, err, do) return err } func (n *TypeSwitchGuard) editChildren(edit func(Node) Node) { - if n.name != nil { - n.name = edit(n.name).(*Name) + if n.Name_ != nil { + n.Name_ = edit(n.Name_).(*Name) } n.X = maybeEdit(n.X, edit) } diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index 836bbcb45320d..ccf46dfa73a27 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -161,20 +161,20 @@ func (n *AssignOpStmt) SetType(x *types.Type) { n.typ = x } // A BlockStmt is a block: { List }. type BlockStmt struct { miniStmt - list Nodes + List_ Nodes } func NewBlockStmt(pos src.XPos, list []Node) *BlockStmt { n := &BlockStmt{} n.pos = pos n.op = OBLOCK - n.list.Set(list) + n.List_.Set(list) return n } -func (n *BlockStmt) List() Nodes { return n.list } -func (n *BlockStmt) PtrList() *Nodes { return &n.list } -func (n *BlockStmt) SetList(x Nodes) { n.list = x } +func (n *BlockStmt) List() Nodes { return n.List_ } +func (n *BlockStmt) PtrList() *Nodes { return &n.List_ } +func (n *BlockStmt) SetList(x Nodes) { n.List_ = x } // A BranchStmt is a break, continue, fallthrough, or goto statement. // @@ -204,27 +204,27 @@ func (n *BranchStmt) SetSym(sym *types.Sym) { n.Label = sym } // A CaseStmt is a case statement in a switch or select: case List: Body. type CaseStmt struct { miniStmt - Vars Nodes // declared variable for this case in type switch - list Nodes // list of expressions for switch, early select - Comm Node // communication case (Exprs[0]) after select is type-checked - body Nodes + Vars Nodes // declared variable for this case in type switch + List_ Nodes // list of expressions for switch, early select + Comm Node // communication case (Exprs[0]) after select is type-checked + Body_ Nodes } func NewCaseStmt(pos src.XPos, list, body []Node) *CaseStmt { n := &CaseStmt{} n.pos = pos n.op = OCASE - n.list.Set(list) - n.body.Set(body) + n.List_.Set(list) + n.Body_.Set(body) return n } -func (n *CaseStmt) List() Nodes { return n.list } -func (n *CaseStmt) PtrList() *Nodes { return &n.list } -func (n *CaseStmt) SetList(x Nodes) { n.list = x } -func (n *CaseStmt) Body() Nodes { return n.body } -func (n *CaseStmt) PtrBody() *Nodes { return &n.body } -func (n *CaseStmt) SetBody(x Nodes) { n.body = x } +func (n *CaseStmt) List() Nodes { return n.List_ } +func (n *CaseStmt) PtrList() *Nodes { return &n.List_ } +func (n *CaseStmt) SetList(x Nodes) { n.List_ = x } +func (n *CaseStmt) Body() Nodes { return n.Body_ } +func (n *CaseStmt) PtrBody() *Nodes { return &n.Body_ } +func (n *CaseStmt) SetBody(x Nodes) { n.Body_ = x } func (n *CaseStmt) Rlist() Nodes { return n.Vars } func (n *CaseStmt) PtrRlist() *Nodes { return &n.Vars } func (n *CaseStmt) SetRlist(x Nodes) { n.Vars = x } @@ -255,7 +255,7 @@ type ForStmt struct { Cond Node Late Nodes Post Node - body Nodes + Body_ Nodes hasBreak bool } @@ -264,7 +264,7 @@ func NewForStmt(pos src.XPos, init []Node, cond, post Node, body []Node) *ForStm n.pos = pos n.op = OFOR n.init.Set(init) - n.body.Set(body) + n.Body_.Set(body) return n } @@ -274,9 +274,9 @@ func (n *ForStmt) Left() Node { return n.Cond } func (n *ForStmt) SetLeft(x Node) { n.Cond = x } func (n *ForStmt) Right() Node { return n.Post } func (n *ForStmt) SetRight(x Node) { n.Post = x } -func (n *ForStmt) Body() Nodes { return n.body } -func (n *ForStmt) PtrBody() *Nodes { return &n.body } -func (n *ForStmt) SetBody(x Nodes) { n.body = x } +func (n *ForStmt) Body() Nodes { return n.Body_ } +func (n *ForStmt) PtrBody() *Nodes { return &n.Body_ } +func (n *ForStmt) SetBody(x Nodes) { n.Body_ = x } func (n *ForStmt) List() Nodes { return n.Late } func (n *ForStmt) PtrList() *Nodes { return &n.Late } func (n *ForStmt) SetList(x Nodes) { n.Late = x } @@ -310,7 +310,7 @@ func (n *GoStmt) SetLeft(x Node) { n.Call = x } type IfStmt struct { miniStmt Cond Node - body Nodes + Body_ Nodes Else Nodes likely bool // code layout hint } @@ -319,16 +319,16 @@ func NewIfStmt(pos src.XPos, cond Node, body, els []Node) *IfStmt { n := &IfStmt{Cond: cond} n.pos = pos n.op = OIF - n.body.Set(body) + n.Body_.Set(body) n.Else.Set(els) return n } func (n *IfStmt) Left() Node { return n.Cond } func (n *IfStmt) SetLeft(x Node) { n.Cond = x } -func (n *IfStmt) Body() Nodes { return n.body } -func (n *IfStmt) PtrBody() *Nodes { return &n.body } -func (n *IfStmt) SetBody(x Nodes) { n.body = x } +func (n *IfStmt) Body() Nodes { return n.Body_ } +func (n *IfStmt) PtrBody() *Nodes { return &n.Body_ } +func (n *IfStmt) SetBody(x Nodes) { n.Body_ = x } func (n *IfStmt) Rlist() Nodes { return n.Else } func (n *IfStmt) PtrRlist() *Nodes { return &n.Else } func (n *IfStmt) SetRlist(x Nodes) { n.Else = x } @@ -375,7 +375,7 @@ type RangeStmt struct { Vars Nodes // TODO(rsc): Replace with Key, Value Node Def bool X Node - body Nodes + Body_ Nodes hasBreak bool typ *types.Type // TODO(rsc): Remove - use X.Type() instead } @@ -385,7 +385,7 @@ func NewRangeStmt(pos src.XPos, vars []Node, x Node, body []Node) *RangeStmt { n.pos = pos n.op = ORANGE n.Vars.Set(vars) - n.body.Set(body) + n.Body_.Set(body) return n } @@ -393,9 +393,9 @@ func (n *RangeStmt) Sym() *types.Sym { return n.Label } func (n *RangeStmt) SetSym(x *types.Sym) { n.Label = x } func (n *RangeStmt) Right() Node { return n.X } func (n *RangeStmt) SetRight(x Node) { n.X = x } -func (n *RangeStmt) Body() Nodes { return n.body } -func (n *RangeStmt) PtrBody() *Nodes { return &n.body } -func (n *RangeStmt) SetBody(x Nodes) { n.body = x } +func (n *RangeStmt) Body() Nodes { return n.Body_ } +func (n *RangeStmt) PtrBody() *Nodes { return &n.Body_ } +func (n *RangeStmt) SetBody(x Nodes) { n.Body_ = x } func (n *RangeStmt) List() Nodes { return n.Vars } func (n *RangeStmt) PtrList() *Nodes { return &n.Vars } func (n *RangeStmt) SetList(x Nodes) { n.Vars = x } @@ -514,14 +514,14 @@ func (n *SwitchStmt) SetHasBreak(x bool) { n.hasBreak = x } // A TypeSwitchGuard is the [Name :=] X.(type) in a type switch. type TypeSwitchGuard struct { miniNode - name *Name - X Node + Name_ *Name + X Node } func NewTypeSwitchGuard(pos src.XPos, name, x Node) *TypeSwitchGuard { n := &TypeSwitchGuard{X: x} if name != nil { - n.name = name.(*Name) + n.Name_ = name.(*Name) } n.pos = pos n.op = OTYPESW @@ -529,17 +529,17 @@ func NewTypeSwitchGuard(pos src.XPos, name, x Node) *TypeSwitchGuard { } func (n *TypeSwitchGuard) Left() Node { - if n.name == nil { + if n.Name_ == nil { return nil } - return n.name + return n.Name_ } func (n *TypeSwitchGuard) SetLeft(x Node) { if x == nil { - n.name = nil + n.Name_ = nil return } - n.name = x.(*Name) + n.Name_ = x.(*Name) } func (n *TypeSwitchGuard) Right() Node { return n.X } func (n *TypeSwitchGuard) SetRight(x Node) { n.X = x } From 63722da46bbf320670e8f993490fe1431feeeb04 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Sat, 5 Dec 2020 17:24:48 -0800 Subject: [PATCH 140/474] [dev.regabi] cmd/compile: fix comment Russ, is this what you meant? Change-Id: I27d2847811c6eabd94358e435eb3eb4bc8cfaa9e Reviewed-on: https://go-review.googlesource.com/c/go/+/275712 Trust: Keith Randall Reviewed-by: Russ Cox --- src/cmd/compile/internal/gc/typecheck.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index a7c05c6c0fa62..990921189a439 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -3361,7 +3361,7 @@ out: // type check function definition // To be called by typecheck, not directly. -// (Call typecheckfn instead.) +// (Call typecheckFunc instead.) func typecheckfunc(n *ir.Func) { if enableTrace && base.Flag.LowerT { defer tracePrint("typecheckfunc", n)(nil) From 1a98ab0e2dad7029d9db18fc1fae0b7e4fa4970c Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 7 Dec 2020 17:15:44 -0800 Subject: [PATCH 141/474] [dev.regabi] cmd/compile: add ssa.Aux tag interface for Value.Aux It's currently hard to automate refactorings around the Value.Aux field, because we don't have any static typing information for it. Adding a tag interface will make subsequent CLs easier and safer. Passes buildall w/ toolstash -cmp. Updates #42982. Change-Id: I41ae8e411a66bda3195a0957b60c2fe8a8002893 Reviewed-on: https://go-review.googlesource.com/c/go/+/275756 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Keith Randall Trust: Matthew Dempsky --- src/cmd/compile/fmtmap_test.go | 3 ++ src/cmd/compile/internal/gc/ssa.go | 22 +++++----- src/cmd/compile/internal/ir/mini.go | 3 +- src/cmd/compile/internal/ir/node.go | 1 + src/cmd/compile/internal/ssa/block.go | 2 +- src/cmd/compile/internal/ssa/check.go | 2 +- src/cmd/compile/internal/ssa/cse.go | 2 +- src/cmd/compile/internal/ssa/cse_test.go | 2 + src/cmd/compile/internal/ssa/debug.go | 4 +- src/cmd/compile/internal/ssa/func.go | 22 ++++------ src/cmd/compile/internal/ssa/func_test.go | 8 ++-- src/cmd/compile/internal/ssa/nilcheck_test.go | 2 +- src/cmd/compile/internal/ssa/op.go | 3 ++ src/cmd/compile/internal/ssa/rewrite.go | 42 ++++++++++++------- src/cmd/compile/internal/ssa/value.go | 5 ++- src/cmd/compile/internal/ssa/zcse.go | 2 +- src/cmd/compile/internal/types/type.go | 2 + src/cmd/internal/obj/link.go | 4 +- src/cmd/internal/obj/s390x/condition_code.go | 2 + src/cmd/internal/obj/s390x/rotate.go | 2 + 20 files changed, 79 insertions(+), 56 deletions(-) diff --git a/src/cmd/compile/fmtmap_test.go b/src/cmd/compile/fmtmap_test.go index 756320285ca99..e62b9613e1840 100644 --- a/src/cmd/compile/fmtmap_test.go +++ b/src/cmd/compile/fmtmap_test.go @@ -43,6 +43,9 @@ var knownFormats = map[string]string{ "cmd/compile/internal/ir.Nodes %+v": "", "cmd/compile/internal/ir.Nodes %.v": "", "cmd/compile/internal/ir.Op %+v": "", + "cmd/compile/internal/ssa.Aux %#v": "", + "cmd/compile/internal/ssa.Aux %q": "", + "cmd/compile/internal/ssa.Aux %s": "", "cmd/compile/internal/ssa.BranchPrediction %d": "", "cmd/compile/internal/ssa.ID %d": "", "cmd/compile/internal/ssa.LocalSlot %s": "", diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index add50c35d7495..95650328b1c55 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -772,7 +772,7 @@ func (s *state) newValue0(op ssa.Op, t *types.Type) *ssa.Value { } // newValue0A adds a new value with no arguments and an aux value to the current block. -func (s *state) newValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value { +func (s *state) newValue0A(op ssa.Op, t *types.Type, aux ssa.Aux) *ssa.Value { return s.curBlock.NewValue0A(s.peekPos(), op, t, aux) } @@ -787,14 +787,14 @@ func (s *state) newValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value { } // newValue1A adds a new value with one argument and an aux value to the current block. -func (s *state) newValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value { +func (s *state) newValue1A(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value) *ssa.Value { return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg) } // newValue1Apos adds a new value with one argument and an aux value to the current block. // isStmt determines whether the created values may be a statement or not // (i.e., false means never, yes means maybe). -func (s *state) newValue1Apos(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value, isStmt bool) *ssa.Value { +func (s *state) newValue1Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value, isStmt bool) *ssa.Value { if isStmt { return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg) } @@ -812,14 +812,14 @@ func (s *state) newValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa. } // newValue2A adds a new value with two arguments and an aux value to the current block. -func (s *state) newValue2A(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1 *ssa.Value) *ssa.Value { +func (s *state) newValue2A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value) *ssa.Value { return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1) } // newValue2Apos adds a new value with two arguments and an aux value to the current block. // isStmt determines whether the created values may be a statement or not // (i.e., false means never, yes means maybe). -func (s *state) newValue2Apos(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1 *ssa.Value, isStmt bool) *ssa.Value { +func (s *state) newValue2Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value, isStmt bool) *ssa.Value { if isStmt { return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1) } @@ -842,14 +842,14 @@ func (s *state) newValue3I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2 } // newValue3A adds a new value with three arguments and an aux value to the current block. -func (s *state) newValue3A(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1, arg2 *ssa.Value) *ssa.Value { +func (s *state) newValue3A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1, arg2 *ssa.Value) *ssa.Value { return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2) } // newValue3Apos adds a new value with three arguments and an aux value to the current block. // isStmt determines whether the created values may be a statement or not // (i.e., false means never, yes means maybe). -func (s *state) newValue3Apos(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1, arg2 *ssa.Value, isStmt bool) *ssa.Value { +func (s *state) newValue3Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1, arg2 *ssa.Value, isStmt bool) *ssa.Value { if isStmt { return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2) } @@ -872,7 +872,7 @@ func (s *state) entryNewValue0(op ssa.Op, t *types.Type) *ssa.Value { } // entryNewValue0A adds a new value with no arguments and an aux value to the entry block. -func (s *state) entryNewValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value { +func (s *state) entryNewValue0A(op ssa.Op, t *types.Type, aux ssa.Aux) *ssa.Value { return s.f.Entry.NewValue0A(src.NoXPos, op, t, aux) } @@ -887,7 +887,7 @@ func (s *state) entryNewValue1I(op ssa.Op, t *types.Type, auxint int64, arg *ssa } // entryNewValue1A adds a new value with one argument and an aux value to the entry block. -func (s *state) entryNewValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value { +func (s *state) entryNewValue1A(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value) *ssa.Value { return s.f.Entry.NewValue1A(src.NoXPos, op, t, aux, arg) } @@ -897,7 +897,7 @@ func (s *state) entryNewValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) } // entryNewValue2A adds a new value with two arguments and an aux value to the entry block. -func (s *state) entryNewValue2A(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1 *ssa.Value) *ssa.Value { +func (s *state) entryNewValue2A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value) *ssa.Value { return s.f.Entry.NewValue2A(src.NoXPos, op, t, aux, arg0, arg1) } @@ -2060,7 +2060,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { if i == "" { return s.constEmptyString(n.Type()) } - return s.entryNewValue0A(ssa.OpConstString, n.Type(), i) + return s.entryNewValue0A(ssa.OpConstString, n.Type(), ssa.StringToAux(i)) case constant.Bool: return s.constBool(constant.BoolVal(u)) case constant.Float: diff --git a/src/cmd/compile/internal/ir/mini.go b/src/cmd/compile/internal/ir/mini.go index 612e7d62c3925..edb3b197da1b5 100644 --- a/src/cmd/compile/internal/ir/mini.go +++ b/src/cmd/compile/internal/ir/mini.go @@ -198,5 +198,6 @@ func (n *miniNode) MarkReadonly() { panic(n.no("MarkReadonly")) } func (n *miniNode) TChanDir() types.ChanDir { panic(n.no("TChanDir")) } func (n *miniNode) SetTChanDir(types.ChanDir) { panic(n.no("SetTChanDir")) } -// TODO: Delete when CanBeAnSSASym is removed from Node itself. +// TODO: Delete when these are removed from Node itself. func (*miniNode) CanBeAnSSASym() {} +func (*miniNode) CanBeAnSSAAux() {} diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index ba7eaae1b9012..b878b00546440 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -113,6 +113,7 @@ type Node interface { // Only for SSA and should be removed when SSA starts // using a more specific type than Node. CanBeAnSSASym() + CanBeAnSSAAux() } // Line returns n's position as a string. If n has been inlined, diff --git a/src/cmd/compile/internal/ssa/block.go b/src/cmd/compile/internal/ssa/block.go index 519ac214caef2..937c757b2153b 100644 --- a/src/cmd/compile/internal/ssa/block.go +++ b/src/cmd/compile/internal/ssa/block.go @@ -52,7 +52,7 @@ type Block struct { Controls [2]*Value // Auxiliary info for the block. Its value depends on the Kind. - Aux interface{} + Aux Aux AuxInt int64 // The unordered set of Values that define the operation of this block. diff --git a/src/cmd/compile/internal/ssa/check.go b/src/cmd/compile/internal/ssa/check.go index 5f5dfc328a3f8..4d57eef55681d 100644 --- a/src/cmd/compile/internal/ssa/check.go +++ b/src/cmd/compile/internal/ssa/check.go @@ -161,7 +161,7 @@ func checkFunc(f *Func) { f.Fatalf("value %v has an AuxInt that encodes a NaN", v) } case auxString: - if _, ok := v.Aux.(string); !ok { + if _, ok := v.Aux.(stringAux); !ok { f.Fatalf("value %v has Aux type %T, want string", v, v.Aux) } canHaveAux = true diff --git a/src/cmd/compile/internal/ssa/cse.go b/src/cmd/compile/internal/ssa/cse.go index 3b4f2be37e7be..f78527410c8dd 100644 --- a/src/cmd/compile/internal/ssa/cse.go +++ b/src/cmd/compile/internal/ssa/cse.go @@ -275,7 +275,7 @@ func lt2Cmp(isLt bool) types.Cmp { return types.CMPgt } -type auxmap map[interface{}]int32 +type auxmap map[Aux]int32 func cmpVal(v, w *Value, auxIDs auxmap) types.Cmp { // Try to order these comparison by cost (cheaper first) diff --git a/src/cmd/compile/internal/ssa/cse_test.go b/src/cmd/compile/internal/ssa/cse_test.go index 9e76645f54fa5..8052016f3af69 100644 --- a/src/cmd/compile/internal/ssa/cse_test.go +++ b/src/cmd/compile/internal/ssa/cse_test.go @@ -14,6 +14,8 @@ type tstAux struct { s string } +func (*tstAux) CanBeAnSSAAux() {} + // This tests for a bug found when partitioning, but not sorting by the Aux value. func TestCSEAuxPartitionBug(t *testing.T) { c := testConfig(t) diff --git a/src/cmd/compile/internal/ssa/debug.go b/src/cmd/compile/internal/ssa/debug.go index 0d660361b1f94..44e91270fad47 100644 --- a/src/cmd/compile/internal/ssa/debug.go +++ b/src/cmd/compile/internal/ssa/debug.go @@ -143,13 +143,13 @@ func (loc VarLoc) absent() bool { var BlockStart = &Value{ ID: -10000, Op: OpInvalid, - Aux: "BlockStart", + Aux: StringToAux("BlockStart"), } var BlockEnd = &Value{ ID: -20000, Op: OpInvalid, - Aux: "BlockEnd", + Aux: StringToAux("BlockEnd"), } // RegisterSet is a bitmap of registers, indexed by Register.num. diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go index e6f899a2c77b1..e6c4798a78882 100644 --- a/src/cmd/compile/internal/ssa/func.go +++ b/src/cmd/compile/internal/ssa/func.go @@ -377,13 +377,7 @@ func (b *Block) NewValue0I(pos src.XPos, op Op, t *types.Type, auxint int64) *Va } // NewValue returns a new value in the block with no arguments and an aux value. -func (b *Block) NewValue0A(pos src.XPos, op Op, t *types.Type, aux interface{}) *Value { - if _, ok := aux.(int64); ok { - // Disallow int64 aux values. They should be in the auxint field instead. - // Maybe we want to allow this at some point, but for now we disallow it - // to prevent errors like using NewValue1A instead of NewValue1I. - b.Fatalf("aux field has int64 type op=%s type=%s aux=%v", op, t, aux) - } +func (b *Block) NewValue0A(pos src.XPos, op Op, t *types.Type, aux Aux) *Value { v := b.Func.newValue(op, t, b, pos) v.AuxInt = 0 v.Aux = aux @@ -392,7 +386,7 @@ func (b *Block) NewValue0A(pos src.XPos, op Op, t *types.Type, aux interface{}) } // NewValue returns a new value in the block with no arguments and both an auxint and aux values. -func (b *Block) NewValue0IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux interface{}) *Value { +func (b *Block) NewValue0IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux Aux) *Value { v := b.Func.newValue(op, t, b, pos) v.AuxInt = auxint v.Aux = aux @@ -421,7 +415,7 @@ func (b *Block) NewValue1I(pos src.XPos, op Op, t *types.Type, auxint int64, arg } // NewValue1A returns a new value in the block with one argument and an aux value. -func (b *Block) NewValue1A(pos src.XPos, op Op, t *types.Type, aux interface{}, arg *Value) *Value { +func (b *Block) NewValue1A(pos src.XPos, op Op, t *types.Type, aux Aux, arg *Value) *Value { v := b.Func.newValue(op, t, b, pos) v.AuxInt = 0 v.Aux = aux @@ -432,7 +426,7 @@ func (b *Block) NewValue1A(pos src.XPos, op Op, t *types.Type, aux interface{}, } // NewValue1IA returns a new value in the block with one argument and both an auxint and aux values. -func (b *Block) NewValue1IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux interface{}, arg *Value) *Value { +func (b *Block) NewValue1IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux Aux, arg *Value) *Value { v := b.Func.newValue(op, t, b, pos) v.AuxInt = auxint v.Aux = aux @@ -455,7 +449,7 @@ func (b *Block) NewValue2(pos src.XPos, op Op, t *types.Type, arg0, arg1 *Value) } // NewValue2A returns a new value in the block with two arguments and one aux values. -func (b *Block) NewValue2A(pos src.XPos, op Op, t *types.Type, aux interface{}, arg0, arg1 *Value) *Value { +func (b *Block) NewValue2A(pos src.XPos, op Op, t *types.Type, aux Aux, arg0, arg1 *Value) *Value { v := b.Func.newValue(op, t, b, pos) v.AuxInt = 0 v.Aux = aux @@ -480,7 +474,7 @@ func (b *Block) NewValue2I(pos src.XPos, op Op, t *types.Type, auxint int64, arg } // NewValue2IA returns a new value in the block with two arguments and both an auxint and aux values. -func (b *Block) NewValue2IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux interface{}, arg0, arg1 *Value) *Value { +func (b *Block) NewValue2IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux Aux, arg0, arg1 *Value) *Value { v := b.Func.newValue(op, t, b, pos) v.AuxInt = auxint v.Aux = aux @@ -521,7 +515,7 @@ func (b *Block) NewValue3I(pos src.XPos, op Op, t *types.Type, auxint int64, arg } // NewValue3A returns a new value in the block with three argument and an aux value. -func (b *Block) NewValue3A(pos src.XPos, op Op, t *types.Type, aux interface{}, arg0, arg1, arg2 *Value) *Value { +func (b *Block) NewValue3A(pos src.XPos, op Op, t *types.Type, aux Aux, arg0, arg1, arg2 *Value) *Value { v := b.Func.newValue(op, t, b, pos) v.AuxInt = 0 v.Aux = aux @@ -633,7 +627,7 @@ func (f *Func) ConstNil(t *types.Type) *Value { } func (f *Func) ConstEmptyString(t *types.Type) *Value { v := f.constVal(OpConstString, t, constEmptyStringMagic, false) - v.Aux = "" + v.Aux = StringToAux("") return v } func (f *Func) ConstOffPtrSP(t *types.Type, c int64, sp *Value) *Value { diff --git a/src/cmd/compile/internal/ssa/func_test.go b/src/cmd/compile/internal/ssa/func_test.go index 568c6436f5bb5..276c444b9a88f 100644 --- a/src/cmd/compile/internal/ssa/func_test.go +++ b/src/cmd/compile/internal/ssa/func_test.go @@ -232,7 +232,7 @@ func Bloc(name string, entries ...interface{}) bloc { } // Valu defines a value in a block. -func Valu(name string, op Op, t *types.Type, auxint int64, aux interface{}, args ...string) valu { +func Valu(name string, op Op, t *types.Type, auxint int64, aux Aux, args ...string) valu { return valu{name, op, t, auxint, aux, args} } @@ -277,7 +277,7 @@ type valu struct { op Op t *types.Type auxint int64 - aux interface{} + aux Aux args []string } @@ -402,12 +402,12 @@ func TestEquiv(t *testing.T) { cfg.Fun("entry", Bloc("entry", Valu("mem", OpInitMem, types.TypeMem, 0, nil), - Valu("a", OpConst64, cfg.config.Types.Int64, 0, 14), + Valu("a", OpConstString, cfg.config.Types.String, 0, StringToAux("foo")), Exit("mem"))), cfg.Fun("entry", Bloc("entry", Valu("mem", OpInitMem, types.TypeMem, 0, nil), - Valu("a", OpConst64, cfg.config.Types.Int64, 0, 26), + Valu("a", OpConstString, cfg.config.Types.String, 0, StringToAux("bar")), Exit("mem"))), }, // value args different diff --git a/src/cmd/compile/internal/ssa/nilcheck_test.go b/src/cmd/compile/internal/ssa/nilcheck_test.go index 16d94614d815c..2e32afe2a6ba6 100644 --- a/src/cmd/compile/internal/ssa/nilcheck_test.go +++ b/src/cmd/compile/internal/ssa/nilcheck_test.go @@ -212,7 +212,7 @@ func TestNilcheckPhi(t *testing.T) { Valu("mem", OpInitMem, types.TypeMem, 0, nil), Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil), Valu("sp", OpSP, c.config.Types.Uintptr, 0, nil), - Valu("baddr", OpLocalAddr, c.config.Types.Bool, 0, "b", "sp", "mem"), + Valu("baddr", OpLocalAddr, c.config.Types.Bool, 0, StringToAux("b"), "sp", "mem"), Valu("bool1", OpLoad, c.config.Types.Bool, 0, nil, "baddr", "mem"), If("bool1", "b1", "b2")), Bloc("b1", diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go index 6f029a421e18a..97726a6f95346 100644 --- a/src/cmd/compile/internal/ssa/op.go +++ b/src/cmd/compile/internal/ssa/op.go @@ -197,6 +197,8 @@ func ClosureAuxCall(args []Param, results []Param) *AuxCall { return &AuxCall{Fn: nil, args: args, results: results} } +func (*AuxCall) CanBeAnSSAAux() {} + const ( auxNone auxType = iota auxBool // auxInt is 0/1 for false/true @@ -248,6 +250,7 @@ const ( type Sym interface { String() string CanBeAnSSASym() + CanBeAnSSAAux() } // A ValAndOff is used by the several opcodes. It holds diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index 24efd38fb7c44..9abfe0938bd07 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -678,43 +678,53 @@ func opToAuxInt(o Op) int64 { return int64(o) } -func auxToString(i interface{}) string { - return i.(string) +// Aux is an interface to hold miscellaneous data in Blocks and Values. +type Aux interface { + CanBeAnSSAAux() } -func auxToSym(i interface{}) Sym { + +// stringAux wraps string values for use in Aux. +type stringAux string + +func (stringAux) CanBeAnSSAAux() {} + +func auxToString(i Aux) string { + return string(i.(stringAux)) +} +func auxToSym(i Aux) Sym { // TODO: kind of a hack - allows nil interface through s, _ := i.(Sym) return s } -func auxToType(i interface{}) *types.Type { +func auxToType(i Aux) *types.Type { return i.(*types.Type) } -func auxToCall(i interface{}) *AuxCall { +func auxToCall(i Aux) *AuxCall { return i.(*AuxCall) } -func auxToS390xCCMask(i interface{}) s390x.CCMask { +func auxToS390xCCMask(i Aux) s390x.CCMask { return i.(s390x.CCMask) } -func auxToS390xRotateParams(i interface{}) s390x.RotateParams { +func auxToS390xRotateParams(i Aux) s390x.RotateParams { return i.(s390x.RotateParams) } -func stringToAux(s string) interface{} { - return s +func StringToAux(s string) Aux { + return stringAux(s) } -func symToAux(s Sym) interface{} { +func symToAux(s Sym) Aux { return s } -func callToAux(s *AuxCall) interface{} { +func callToAux(s *AuxCall) Aux { return s } -func typeToAux(t *types.Type) interface{} { +func typeToAux(t *types.Type) Aux { return t } -func s390xCCMaskToAux(c s390x.CCMask) interface{} { +func s390xCCMaskToAux(c s390x.CCMask) Aux { return c } -func s390xRotateParamsToAux(r s390x.RotateParams) interface{} { +func s390xRotateParamsToAux(r s390x.RotateParams) Aux { return r } @@ -725,7 +735,7 @@ func uaddOvf(a, b int64) bool { // de-virtualize an InterCall // 'sym' is the symbol for the itab -func devirt(v *Value, aux interface{}, sym Sym, offset int64) *AuxCall { +func devirt(v *Value, aux Aux, sym Sym, offset int64) *AuxCall { f := v.Block.Func n, ok := sym.(*obj.LSym) if !ok { @@ -748,7 +758,7 @@ func devirt(v *Value, aux interface{}, sym Sym, offset int64) *AuxCall { // de-virtualize an InterLECall // 'sym' is the symbol for the itab -func devirtLESym(v *Value, aux interface{}, sym Sym, offset int64) *obj.LSym { +func devirtLESym(v *Value, aux Aux, sym Sym, offset int64) *obj.LSym { n, ok := sym.(*obj.LSym) if !ok { return nil diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go index edc43aaae7218..993c5a580f4dd 100644 --- a/src/cmd/compile/internal/ssa/value.go +++ b/src/cmd/compile/internal/ssa/value.go @@ -36,7 +36,7 @@ type Value struct { // Users of AuxInt which interpret AuxInt as unsigned (e.g. shifts) must be careful. // Use Value.AuxUnsigned to get the zero-extended value of AuxInt. AuxInt int64 - Aux interface{} + Aux Aux // Arguments of this value Args []*Value @@ -492,3 +492,6 @@ func (v *Value) removeable() bool { } return true } + +// TODO(mdempsky): Shouldn't be necessary; see discussion at golang.org/cl/275756 +func (*Value) CanBeAnSSAAux() {} diff --git a/src/cmd/compile/internal/ssa/zcse.go b/src/cmd/compile/internal/ssa/zcse.go index ec38b7d1ba4ae..e08272c345801 100644 --- a/src/cmd/compile/internal/ssa/zcse.go +++ b/src/cmd/compile/internal/ssa/zcse.go @@ -57,7 +57,7 @@ func zcse(f *Func) { type vkey struct { op Op ai int64 // aux int - ax interface{} // aux + ax Aux // aux t *types.Type // type } diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index c5807af199ecc..e968a799e3827 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -164,6 +164,8 @@ type Type struct { flags bitset8 } +func (*Type) CanBeAnSSAAux() {} + const ( typeNotInHeap = 1 << iota // type cannot be heap allocated typeBroke // broken type definition diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go index 8c8ff587ffce7..eaebfaf4b6314 100644 --- a/src/cmd/internal/obj/link.go +++ b/src/cmd/internal/obj/link.go @@ -723,8 +723,8 @@ func (s *LSym) String() string { } // The compiler needs *LSym to be assignable to cmd/compile/internal/ssa.Sym. -func (s *LSym) CanBeAnSSASym() { -} +func (*LSym) CanBeAnSSASym() {} +func (*LSym) CanBeAnSSAAux() {} type Pcln struct { // Aux symbols for pcln diff --git a/src/cmd/internal/obj/s390x/condition_code.go b/src/cmd/internal/obj/s390x/condition_code.go index 764fc5bc6a855..f498fd6f77442 100644 --- a/src/cmd/internal/obj/s390x/condition_code.go +++ b/src/cmd/internal/obj/s390x/condition_code.go @@ -124,3 +124,5 @@ func (c CCMask) String() string { // invalid return fmt.Sprintf("Invalid (%#x)", c) } + +func (CCMask) CanBeAnSSAAux() {} diff --git a/src/cmd/internal/obj/s390x/rotate.go b/src/cmd/internal/obj/s390x/rotate.go index 7dbc45e648946..c9998804921f3 100644 --- a/src/cmd/internal/obj/s390x/rotate.go +++ b/src/cmd/internal/obj/s390x/rotate.go @@ -113,3 +113,5 @@ func (r RotateParams) OutMerge(mask uint64) *RotateParams { func (r RotateParams) InMerge(mask uint64) *RotateParams { return r.OutMerge(bits.RotateLeft64(mask, int(r.Amount))) } + +func (RotateParams) CanBeAnSSAAux() {} From dcec658f6c9798b226d2f1e72a7b22b613e95c00 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 6 Dec 2020 12:02:22 -0800 Subject: [PATCH 142/474] [dev.regabi] cmd/compile: change LocalSlot.N to *ir.Name This was already documented as always being an ONAME, so it just needed a few type assertion changes. Passes buildall w/ toolstash -cmp. Updates #42982. Change-Id: I61f4b6ebd57c43b41977f4b37b81fe94fb11a723 Reviewed-on: https://go-review.googlesource.com/c/go/+/275757 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le Reviewed-by: Russ Cox Trust: Matthew Dempsky --- src/cmd/compile/internal/gc/ssa.go | 7 +++---- src/cmd/compile/internal/ssa/config.go | 2 +- src/cmd/compile/internal/ssa/debug.go | 2 +- src/cmd/compile/internal/ssa/export_test.go | 2 +- src/cmd/compile/internal/ssa/location.go | 2 +- src/cmd/compile/internal/ssa/sizeof_test.go | 2 +- src/cmd/compile/internal/ssa/stackalloc.go | 2 +- 7 files changed, 9 insertions(+), 10 deletions(-) diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 95650328b1c55..2378ea7711b95 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -6078,7 +6078,7 @@ func (s *state) addNamedValue(n ir.Node, v *ssa.Value) { if n.Class() == ir.PAUTO && n.Offset() != 0 { s.Fatalf("AUTO var with offset %v %d", n, n.Offset()) } - loc := ssa.LocalSlot{N: n, Type: n.Type(), Off: 0} + loc := ssa.LocalSlot{N: n.Name(), Type: n.Type(), Off: 0} values, ok := s.f.NamedValues[loc] if !ok { s.f.Names = append(s.f.Names, loc) @@ -6979,9 +6979,8 @@ func (e *ssafn) StringData(s string) *obj.LSym { return data } -func (e *ssafn) Auto(pos src.XPos, t *types.Type) ir.Node { - n := tempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list - return n +func (e *ssafn) Auto(pos src.XPos, t *types.Type) *ir.Name { + return tempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list } func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go index eeabd81d0391b..8dc2ee8213bda 100644 --- a/src/cmd/compile/internal/ssa/config.go +++ b/src/cmd/compile/internal/ssa/config.go @@ -139,7 +139,7 @@ type Frontend interface { // Auto returns a Node for an auto variable of the given type. // The SSA compiler uses this function to allocate space for spills. - Auto(src.XPos, *types.Type) ir.Node + Auto(src.XPos, *types.Type) *ir.Name // Given the name for a compound type, returns the name we should use // for the parts of that compound type. diff --git a/src/cmd/compile/internal/ssa/debug.go b/src/cmd/compile/internal/ssa/debug.go index 44e91270fad47..6123978e55ca9 100644 --- a/src/cmd/compile/internal/ssa/debug.go +++ b/src/cmd/compile/internal/ssa/debug.go @@ -380,7 +380,7 @@ func BuildFuncDebug(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset fu for _, b := range f.Blocks { for _, v := range b.Values { if v.Op == OpVarDef || v.Op == OpVarKill { - n := v.Aux.(ir.Node) + n := v.Aux.(*ir.Name) if ir.IsSynthetic(n) { continue } diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go index 55fce31088008..644baa8548e72 100644 --- a/src/cmd/compile/internal/ssa/export_test.go +++ b/src/cmd/compile/internal/ssa/export_test.go @@ -68,7 +68,7 @@ type TestFrontend struct { func (TestFrontend) StringData(s string) *obj.LSym { return nil } -func (TestFrontend) Auto(pos src.XPos, t *types.Type) ir.Node { +func (TestFrontend) Auto(pos src.XPos, t *types.Type) *ir.Name { n := ir.NewNameAt(pos, &types.Sym{Name: "aFakeAuto"}) n.SetClass(ir.PAUTO) return n diff --git a/src/cmd/compile/internal/ssa/location.go b/src/cmd/compile/internal/ssa/location.go index 3dc3a81703984..69f90d9ab4107 100644 --- a/src/cmd/compile/internal/ssa/location.go +++ b/src/cmd/compile/internal/ssa/location.go @@ -60,7 +60,7 @@ func (r *Register) GCNum() int16 { // { N: len, Type: int, Off: 0, SplitOf: parent, SplitOffset: 8} // parent = &{N: s, Type: string} type LocalSlot struct { - N ir.Node // an ONAME *gc.Node representing a stack location. + N *ir.Name // an ONAME *ir.Name representing a stack location. Type *types.Type // type of slot Off int64 // offset of slot in N diff --git a/src/cmd/compile/internal/ssa/sizeof_test.go b/src/cmd/compile/internal/ssa/sizeof_test.go index 60ada011e3e0b..a27002ee3ac3b 100644 --- a/src/cmd/compile/internal/ssa/sizeof_test.go +++ b/src/cmd/compile/internal/ssa/sizeof_test.go @@ -22,7 +22,7 @@ func TestSizeof(t *testing.T) { }{ {Value{}, 72, 112}, {Block{}, 164, 304}, - {LocalSlot{}, 32, 48}, + {LocalSlot{}, 28, 40}, {valState{}, 28, 40}, } diff --git a/src/cmd/compile/internal/ssa/stackalloc.go b/src/cmd/compile/internal/ssa/stackalloc.go index 5257d44cfeaf0..68a6f08a2a9b2 100644 --- a/src/cmd/compile/internal/ssa/stackalloc.go +++ b/src/cmd/compile/internal/ssa/stackalloc.go @@ -157,7 +157,7 @@ func (s *stackAllocState) stackalloc() { if v.Aux == nil { f.Fatalf("%s has nil Aux\n", v.LongString()) } - loc := LocalSlot{N: v.Aux.(ir.Node), Type: v.Type, Off: v.AuxInt} + loc := LocalSlot{N: v.Aux.(*ir.Name), Type: v.Type, Off: v.AuxInt} if f.pass.debug > stackDebug { fmt.Printf("stackalloc %s to %s\n", v, loc) } From 1c8943a6add218f6ffd86c0952372fe54b0672a4 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 6 Dec 2020 18:10:34 -0800 Subject: [PATCH 143/474] [dev.regabi] cmd/compile: introduce FwdRefAux for wrapping ir.Node as ssa.Aux OpFwdRef is the only SSA value that needs the ability to store an arbitrary ir.Node in its Aux field. Every other SSA value always uses an *ir.Name. This CL introduces FwdRefAux, which wraps an ir.Node and implements the ssa.Aux tag interface, so that a subsequent refactoring can change ir.Node to not implement ssa.Aux. Passes buildall w/ toolstash -cmp. Updates #42982. Change-Id: Id1475b28847579573cd376e82f28761d84cd1c23 Reviewed-on: https://go-review.googlesource.com/c/go/+/275788 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Trust: Matthew Dempsky Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/gc/phi.go | 18 +++++++++++++----- src/cmd/compile/internal/gc/ssa.go | 2 +- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/src/cmd/compile/internal/gc/phi.go b/src/cmd/compile/internal/gc/phi.go index 677bfc92df2f1..def11e1be0cb2 100644 --- a/src/cmd/compile/internal/gc/phi.go +++ b/src/cmd/compile/internal/gc/phi.go @@ -23,6 +23,14 @@ const smallBlocks = 500 const debugPhi = false +// FwdRefAux wraps an arbitrary ir.Node as an ssa.Aux for use with OpFwdref. +type FwdRefAux struct { + _ [0]func() // ensure ir.Node isn't compared for equality + N ir.Node +} + +func (FwdRefAux) CanBeAnSSAAux() {} + // insertPhis finds all the places in the function where a phi is // necessary and inserts them. // Uses FwdRef ops to find all uses of variables, and s.defvars to find @@ -79,7 +87,7 @@ func (s *phiState) insertPhis() { if v.Op != ssa.OpFwdRef { continue } - var_ := v.Aux.(ir.Node) + var_ := v.Aux.(FwdRefAux).N // Optimization: look back 1 block for the definition. if len(b.Preds) == 1 { @@ -319,7 +327,7 @@ func (s *phiState) resolveFwdRefs() { if v.Op != ssa.OpFwdRef { continue } - n := s.varnum[v.Aux.(ir.Node)] + n := s.varnum[v.Aux.(FwdRefAux).N] v.Op = ssa.OpCopy v.Aux = nil v.AddArg(values[n]) @@ -450,7 +458,7 @@ func (s *simplePhiState) insertPhis() { continue } s.fwdrefs = append(s.fwdrefs, v) - var_ := v.Aux.(ir.Node) + var_ := v.Aux.(FwdRefAux).N if _, ok := s.defvars[b.ID][var_]; !ok { s.defvars[b.ID][var_] = v // treat FwdDefs as definitions. } @@ -464,7 +472,7 @@ loop: v := s.fwdrefs[len(s.fwdrefs)-1] s.fwdrefs = s.fwdrefs[:len(s.fwdrefs)-1] b := v.Block - var_ := v.Aux.(ir.Node) + var_ := v.Aux.(FwdRefAux).N if b == s.f.Entry { // No variable should be live at entry. s.s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, var_, v) @@ -531,7 +539,7 @@ func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t *types.Type, var_ ir. } } // Generate a FwdRef for the variable and return that. - v := b.NewValue0A(line, ssa.OpFwdRef, t, var_) + v := b.NewValue0A(line, ssa.OpFwdRef, t, FwdRefAux{N: var_}) s.defvars[b.ID][var_] = v s.s.addNamedValue(var_, v) s.fwdrefs = append(s.fwdrefs, v) diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 2378ea7711b95..90c754604299c 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -6051,7 +6051,7 @@ func (s *state) variable(name ir.Node, t *types.Type) *ssa.Value { } // Make a FwdRef, which records a value that's live on block input. // We'll find the matching definition as part of insertPhis. - v = s.newValue0A(ssa.OpFwdRef, t, name) + v = s.newValue0A(ssa.OpFwdRef, t, FwdRefAux{N: name}) s.fwdVars[name] = v s.addNamedValue(name, v) return v From 6db970e20acd7caeed268fdff458609570f21c90 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 6 Dec 2020 18:13:43 -0800 Subject: [PATCH 144/474] [dev.regabi] cmd/compile: rewrite Aux uses of ir.Node to *ir.Name [generated] Now that the only remaining ir.Node implementation that is stored (directly) into ssa.Aux, we can rewrite all of the conversions between ir.Node and ssa.Aux to use *ir.Name instead. rf doesn't have a way to rewrite the type switch case clauses, so we just use sed instead. There's only a handful, and they're the only times that "case ir.Node" appears anyway. The next CL will move the tag method declarations so that ir.Node no longer implements ssa.Aux. Passes buildall w/ toolstash -cmp. Updates #42982. [git-generate] cd src/cmd/compile/internal sed -i -e 's/case ir.Node/case *ir.Name/' gc/plive.go */ssa.go cd ssa rf ' ex . ../gc { import "cmd/compile/internal/ir" var v *Value v.Aux.(ir.Node) -> v.Aux.(*ir.Name) var n ir.Node var asAux func(Aux) strict n # only match ir.Node-typed expressions; not *ir.Name implicit asAux # match implicit assignments to ssa.Aux asAux(n) -> n.(*ir.Name) } ' Change-Id: I3206ef5f12a7cfa37c5fecc67a1ca02ea4d52b32 Reviewed-on: https://go-review.googlesource.com/c/go/+/275789 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Trust: Matthew Dempsky Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/arm/ssa.go | 2 +- src/cmd/compile/internal/arm64/ssa.go | 2 +- src/cmd/compile/internal/gc/pgen.go | 2 +- src/cmd/compile/internal/gc/plive.go | 6 ++-- src/cmd/compile/internal/gc/ssa.go | 40 +++++++++++------------ src/cmd/compile/internal/mips/ssa.go | 2 +- src/cmd/compile/internal/mips64/ssa.go | 2 +- src/cmd/compile/internal/riscv64/ssa.go | 2 +- src/cmd/compile/internal/ssa/deadstore.go | 10 +++--- src/cmd/compile/internal/ssa/debug.go | 2 +- src/cmd/compile/internal/ssa/nilcheck.go | 2 +- src/cmd/compile/internal/ssa/regalloc.go | 2 +- src/cmd/compile/internal/wasm/ssa.go | 2 +- 13 files changed, 38 insertions(+), 38 deletions(-) diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go index b34e2973b24d8..8b155712aa80a 100644 --- a/src/cmd/compile/internal/arm/ssa.go +++ b/src/cmd/compile/internal/arm/ssa.go @@ -546,7 +546,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case *obj.LSym: wantreg = "SB" gc.AddAux(&p.From, v) - case ir.Node: + case *ir.Name: wantreg = "SP" gc.AddAux(&p.From, v) case nil: diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go index d5bd9687cfa7b..3eb0ae65573b3 100644 --- a/src/cmd/compile/internal/arm64/ssa.go +++ b/src/cmd/compile/internal/arm64/ssa.go @@ -396,7 +396,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case *obj.LSym: wantreg = "SB" gc.AddAux(&p.From, v) - case ir.Node: + case *ir.Name: wantreg = "SP" gc.AddAux(&p.From, v) case nil: diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index 1da0929290626..a7b19953ba897 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -128,7 +128,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { scratchUsed := false for _, b := range f.Blocks { for _, v := range b.Values { - if n, ok := v.Aux.(ir.Node); ok { + if n, ok := v.Aux.(*ir.Name); ok { switch n.Class() { case ir.PPARAM, ir.PPARAMOUT: // Don't modify nodfp; it is a global. diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go index 06e423daa1b63..9952bfcf36677 100644 --- a/src/cmd/compile/internal/gc/plive.go +++ b/src/cmd/compile/internal/gc/plive.go @@ -324,9 +324,9 @@ func affectedNode(v *ssa.Value) (ir.Node, ssa.SymEffect) { return n, ssa.SymWrite case ssa.OpVarLive: - return v.Aux.(ir.Node), ssa.SymRead + return v.Aux.(*ir.Name), ssa.SymRead case ssa.OpVarDef, ssa.OpVarKill: - return v.Aux.(ir.Node), ssa.SymWrite + return v.Aux.(*ir.Name), ssa.SymWrite case ssa.OpKeepAlive: n, _ := AutoVar(v.Args[0]) return n, ssa.SymRead @@ -341,7 +341,7 @@ func affectedNode(v *ssa.Value) (ir.Node, ssa.SymEffect) { case nil, *obj.LSym: // ok, but no node return nil, e - case ir.Node: + case *ir.Name: return a, e default: base.Fatalf("weird aux: %s", v.LongString()) diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 90c754604299c..e8f345d8f686c 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -1504,7 +1504,7 @@ func (s *state) stmt(n ir.Node) { case ir.OVARDEF: if !s.canSSA(n.Left()) { - s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.Left(), s.mem(), false) + s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.Left().(*ir.Name), s.mem(), false) } case ir.OVARKILL: // Insert a varkill op to record that a variable is no longer live. @@ -1512,7 +1512,7 @@ func (s *state) stmt(n ir.Node) { // varkill in the store chain is enough to keep it correctly ordered // with respect to call ops. if !s.canSSA(n.Left()) { - s.vars[memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.Left(), s.mem(), false) + s.vars[memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.Left().(*ir.Name), s.mem(), false) } case ir.OVARLIVE: @@ -1525,7 +1525,7 @@ func (s *state) stmt(n ir.Node) { default: s.Fatalf("VARLIVE variable %v must be Auto or Arg", n.Left()) } - s.vars[memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, n.Left(), s.mem()) + s.vars[memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, n.Left().(*ir.Name), s.mem()) case ir.OCHECKNIL: p := s.expr(n.Left()) @@ -1571,7 +1571,7 @@ func (s *state) exit() *ssa.Block { for _, n := range s.returns { addr := s.decladdrs[n] val := s.variable(n, n.Type()) - s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem()) + s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n.(*ir.Name), s.mem()) s.store(n.Type(), addr, val) // TODO: if val is ever spilled, we'd like to use the // PPARAMOUT slot for spilling it. That won't happen @@ -2866,7 +2866,7 @@ func (s *state) append(n ir.Node, inplace bool) *ssa.Value { if inplace { if sn.Op() == ir.ONAME && sn.Class() != ir.PEXTERN { // Tell liveness we're about to build a new slice - s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem()) + s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn.(*ir.Name), s.mem()) } capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, sliceCapOffset, addr) s.store(types.Types[types.TINT], capaddr, r[2]) @@ -3076,7 +3076,7 @@ func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask // If this assignment clobbers an entire local variable, then emit // OpVarDef so liveness analysis knows the variable is redefined. if base := clobberBase(left); base.Op() == ir.ONAME && base.Class() != ir.PEXTERN && skip == 0 { - s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !ir.IsAutoTmp(base)) + s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base.(*ir.Name), s.mem(), !ir.IsAutoTmp(base)) } // Left is not ssa-able. Compute its address. @@ -4236,7 +4236,7 @@ func (s *state) openDeferRecord(n ir.Node) { // call the function directly if it is a static function. closureVal := s.expr(fn) closure := s.openDeferSave(nil, fn.Type(), closureVal) - opendefer.closureNode = closure.Aux.(ir.Node) + opendefer.closureNode = closure.Aux.(*ir.Name) if !(fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC) { opendefer.closure = closure } @@ -4249,7 +4249,7 @@ func (s *state) openDeferRecord(n ir.Node) { // runtime panic code to use. But in the defer exit code, we will // call the method directly. closure := s.openDeferSave(nil, fn.Type(), closureVal) - opendefer.closureNode = closure.Aux.(ir.Node) + opendefer.closureNode = closure.Aux.(*ir.Name) } else { if fn.Op() != ir.ODOTINTER { base.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op()) @@ -4259,8 +4259,8 @@ func (s *state) openDeferRecord(n ir.Node) { // Important to get the receiver type correct, so it is recognized // as a pointer for GC purposes. opendefer.rcvr = s.openDeferSave(nil, fn.Type().Recv().Type, rcvr) - opendefer.closureNode = opendefer.closure.Aux.(ir.Node) - opendefer.rcvrNode = opendefer.rcvr.Aux.(ir.Node) + opendefer.closureNode = opendefer.closure.Aux.(*ir.Name) + opendefer.rcvrNode = opendefer.rcvr.Aux.(*ir.Name) } for _, argn := range n.Rlist().Slice() { var v *ssa.Value @@ -4270,7 +4270,7 @@ func (s *state) openDeferRecord(n ir.Node) { v = s.openDeferSave(argn, argn.Type(), nil) } args = append(args, v) - argNodes = append(argNodes, v.Aux.(ir.Node)) + argNodes = append(argNodes, v.Aux.(*ir.Name)) } opendefer.argVals = args opendefer.argNodes = argNodes @@ -4458,16 +4458,16 @@ func (s *state) openDeferExit() { // use the first call of the last defer exit to compute liveness // for the deferreturn, so we want all stack slots to be live. if r.closureNode != nil { - s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false) + s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode.(*ir.Name), s.mem(), false) } if r.rcvrNode != nil { if r.rcvrNode.Type().HasPointers() { - s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.rcvrNode, s.mem(), false) + s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.rcvrNode.(*ir.Name), s.mem(), false) } } for _, argNode := range r.argNodes { if argNode.Type().HasPointers() { - s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argNode, s.mem(), false) + s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argNode.(*ir.Name), s.mem(), false) } } @@ -4855,17 +4855,17 @@ func (s *state) addr(n ir.Node) *ssa.Value { } if n == nodfp { // Special arg that points to the frame pointer (Used by ORECOVER). - return s.entryNewValue2A(ssa.OpLocalAddr, t, n, s.sp, s.startmem) + return s.entryNewValue2A(ssa.OpLocalAddr, t, n.(*ir.Name), s.sp, s.startmem) } s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs) return nil case ir.PAUTO: - return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), !ir.IsAutoTmp(n)) + return s.newValue2Apos(ssa.OpLocalAddr, t, n.(*ir.Name), s.sp, s.mem(), !ir.IsAutoTmp(n)) case ir.PPARAMOUT: // Same as PAUTO -- cannot generate LEA early. // ensure that we reuse symbols for out parameters so // that cse works on their addresses - return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), true) + return s.newValue2Apos(ssa.OpLocalAddr, t, n.(*ir.Name), s.sp, s.mem(), true) default: s.Fatalf("variable address class %v not implemented", n.Class()) return nil @@ -5951,7 +5951,7 @@ func (s *state) dottype(n ir.Node, commaok bool) (res, resok *ssa.Value) { // unSSAable type, use temporary. // TODO: get rid of some of these temporaries. tmp = tempAt(n.Pos(), s.curfn, n.Type()) - s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem()) + s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp.(*ir.Name), s.mem()) addr = s.addr(tmp) } @@ -6027,7 +6027,7 @@ func (s *state) dottype(n ir.Node, commaok bool) (res, resok *ssa.Value) { delete(s.vars, valVar) } else { res = s.load(n.Type(), addr) - s.vars[memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp, s.mem()) + s.vars[memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp.(*ir.Name), s.mem()) } resok = s.variable(okVar, types.Types[types.TBOOL]) delete(s.vars, okVar) @@ -6680,7 +6680,7 @@ func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) { case *obj.LSym: a.Name = obj.NAME_EXTERN a.Sym = n - case ir.Node: + case *ir.Name: if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT { a.Name = obj.NAME_PARAM a.Sym = ir.Orig(n).Sym().Linksym() diff --git a/src/cmd/compile/internal/mips/ssa.go b/src/cmd/compile/internal/mips/ssa.go index bd71b2fcd874f..10453c27d5f83 100644 --- a/src/cmd/compile/internal/mips/ssa.go +++ b/src/cmd/compile/internal/mips/ssa.go @@ -289,7 +289,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case *obj.LSym: wantreg = "SB" gc.AddAux(&p.From, v) - case ir.Node: + case *ir.Name: wantreg = "SP" gc.AddAux(&p.From, v) case nil: diff --git a/src/cmd/compile/internal/mips64/ssa.go b/src/cmd/compile/internal/mips64/ssa.go index bcadebde4e7cb..9aaf8715de3f7 100644 --- a/src/cmd/compile/internal/mips64/ssa.go +++ b/src/cmd/compile/internal/mips64/ssa.go @@ -263,7 +263,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case *obj.LSym: wantreg = "SB" gc.AddAux(&p.From, v) - case ir.Node: + case *ir.Name: wantreg = "SP" gc.AddAux(&p.From, v) case nil: diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go index c81b6897a6bf6..d382304d72e79 100644 --- a/src/cmd/compile/internal/riscv64/ssa.go +++ b/src/cmd/compile/internal/riscv64/ssa.go @@ -324,7 +324,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case *obj.LSym: wantreg = "SB" gc.AddAux(&p.From, v) - case ir.Node: + case *ir.Name: wantreg = "SP" gc.AddAux(&p.From, v) case nil: diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go index f3ef33d67040e..d0446a0311b52 100644 --- a/src/cmd/compile/internal/ssa/deadstore.go +++ b/src/cmd/compile/internal/ssa/deadstore.go @@ -147,7 +147,7 @@ func elimDeadAutosGeneric(f *Func) { switch v.Op { case OpAddr, OpLocalAddr: // Propagate the address if it points to an auto. - n, ok := v.Aux.(ir.Node) + n, ok := v.Aux.(*ir.Name) if !ok || n.Class() != ir.PAUTO { return } @@ -158,7 +158,7 @@ func elimDeadAutosGeneric(f *Func) { return case OpVarDef, OpVarKill: // v should be eliminated if we eliminate the auto. - n, ok := v.Aux.(ir.Node) + n, ok := v.Aux.(*ir.Name) if !ok || n.Class() != ir.PAUTO { return } @@ -174,7 +174,7 @@ func elimDeadAutosGeneric(f *Func) { // for open-coded defers from being removed (since they // may not be used by the inline code, but will be used by // panic processing). - n, ok := v.Aux.(ir.Node) + n, ok := v.Aux.(*ir.Name) if !ok || n.Class() != ir.PAUTO { return } @@ -303,7 +303,7 @@ func elimUnreadAutos(f *Func) { var stores []*Value for _, b := range f.Blocks { for _, v := range b.Values { - n, ok := v.Aux.(ir.Node) + n, ok := v.Aux.(*ir.Name) if !ok { continue } @@ -335,7 +335,7 @@ func elimUnreadAutos(f *Func) { // Eliminate stores to unread autos. for _, store := range stores { - n, _ := store.Aux.(ir.Node) + n, _ := store.Aux.(*ir.Name) if seen[n] { continue } diff --git a/src/cmd/compile/internal/ssa/debug.go b/src/cmd/compile/internal/ssa/debug.go index 6123978e55ca9..405817dbe1529 100644 --- a/src/cmd/compile/internal/ssa/debug.go +++ b/src/cmd/compile/internal/ssa/debug.go @@ -718,7 +718,7 @@ func (state *debugState) processValue(v *Value, vSlots []SlotID, vReg *Register) switch { case v.Op == OpVarDef, v.Op == OpVarKill: - n := v.Aux.(ir.Node) + n := v.Aux.(*ir.Name) if ir.IsSynthetic(n) { break } diff --git a/src/cmd/compile/internal/ssa/nilcheck.go b/src/cmd/compile/internal/ssa/nilcheck.go index b36f6b97e18c7..bae50657c9adc 100644 --- a/src/cmd/compile/internal/ssa/nilcheck.go +++ b/src/cmd/compile/internal/ssa/nilcheck.go @@ -236,7 +236,7 @@ func nilcheckelim2(f *Func) { continue } if v.Type.IsMemory() || v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() { - if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(ir.Node).Type().HasPointers()) { + if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(*ir.Name).Type().HasPointers()) { // These ops don't really change memory. continue // Note: OpVarDef requires that the defined variable not have pointers. diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index 376ca975123c9..8c25b1c81dc0c 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -1249,7 +1249,7 @@ func (s *regAllocState) regalloc(f *Func) { // This forces later liveness analysis to make the // value live at this point. v.SetArg(0, s.makeSpill(a, b)) - } else if _, ok := a.Aux.(ir.Node); ok && vi.rematerializeable { + } else if _, ok := a.Aux.(*ir.Name); ok && vi.rematerializeable { // Rematerializeable value with a gc.Node. This is the address of // a stack object (e.g. an LEAQ). Keep the object live. // Change it to VarLive, which is what plive expects for locals. diff --git a/src/cmd/compile/internal/wasm/ssa.go b/src/cmd/compile/internal/wasm/ssa.go index e7451381b4abe..01ba721556d73 100644 --- a/src/cmd/compile/internal/wasm/ssa.go +++ b/src/cmd/compile/internal/wasm/ssa.go @@ -237,7 +237,7 @@ func ssaGenValueOnStack(s *gc.SSAGenState, v *ssa.Value, extend bool) { switch v.Aux.(type) { case *obj.LSym: gc.AddAux(&p.From, v) - case ir.Node: + case *ir.Name: p.From.Reg = v.Args[0].Reg() gc.AddAux(&p.From, v) default: From bb31c75343de2114f541cd66870ace3f33047550 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 6 Dec 2020 18:25:41 -0800 Subject: [PATCH 145/474] [dev.regabi] cmd/compile: ir.Node is no longer an ssa.Aux After the previous rewrite, we can now remove CanBeAnSSASym and CanBeAnSSAAux from the generic Node interface, and declare them just on *ir.Name. Updates #42982. Change-Id: I865771fd30c95c009740410844f20ade08648343 Reviewed-on: https://go-review.googlesource.com/c/go/+/275790 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Trust: Matthew Dempsky Reviewed-by: Cuong Manh Le Reviewed-by: Russ Cox --- src/cmd/compile/internal/ir/mini.go | 4 ---- src/cmd/compile/internal/ir/name.go | 4 +++- src/cmd/compile/internal/ir/node.go | 5 ----- 3 files changed, 3 insertions(+), 10 deletions(-) diff --git a/src/cmd/compile/internal/ir/mini.go b/src/cmd/compile/internal/ir/mini.go index edb3b197da1b5..7ecdcbf32f0af 100644 --- a/src/cmd/compile/internal/ir/mini.go +++ b/src/cmd/compile/internal/ir/mini.go @@ -197,7 +197,3 @@ func (n *miniNode) SetOpt(interface{}) { panic(n.no("SetOpt")) } func (n *miniNode) MarkReadonly() { panic(n.no("MarkReadonly")) } func (n *miniNode) TChanDir() types.ChanDir { panic(n.no("TChanDir")) } func (n *miniNode) SetTChanDir(types.ChanDir) { panic(n.no("SetTChanDir")) } - -// TODO: Delete when these are removed from Node itself. -func (*miniNode) CanBeAnSSASym() {} -func (*miniNode) CanBeAnSSAAux() {} diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index c527ba281dd3b..319c40e4e9373 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -165,7 +165,9 @@ func (n *Name) SetOffset(x int64) { n.offset = x } func (n *Name) Iota() int64 { return n.offset } func (n *Name) SetIota(x int64) { n.offset = x } -func (*Name) CanBeNtype() {} +func (*Name) CanBeNtype() {} +func (*Name) CanBeAnSSASym() {} +func (*Name) CanBeAnSSAAux() {} func (n *Name) SetOp(op Op) { if n.op != ONONAME { diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index b878b00546440..d6dab0b9e21bd 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -109,11 +109,6 @@ type Node interface { MarkNonNil() HasCall() bool SetHasCall(x bool) - - // Only for SSA and should be removed when SSA starts - // using a more specific type than Node. - CanBeAnSSASym() - CanBeAnSSAAux() } // Line returns n's position as a string. If n has been inlined, From dbf2fc8cff5f7d6a5fcbeea0d4b0349cc7d158e2 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 6 Dec 2020 18:28:49 -0800 Subject: [PATCH 146/474] [dev.regabi] cmd/compile: replace many uses of ir.Node with *ir.Name This commit adds exactly two "n := n.(*ir.Name)" statements, that are each immediately preceded by a "case ir.ONAME:" clause in an n.Op() switch. The rest of the changes are simply replacing "ir.Node" to "*ir.Name" and removing now unnecessary "n.(*ir.Name)" type assertions, exposing the latent typing details. Passes buildall w/ toolstash -cmp. Updates #42982. Change-Id: I8ea3bbb7ddf0c7192245cafa49a19c0e7a556a39 Reviewed-on: https://go-review.googlesource.com/c/go/+/275791 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Trust: Matthew Dempsky Reviewed-by: Cuong Manh Le Reviewed-by: Russ Cox --- src/cmd/compile/internal/gc/inl.go | 5 ++-- src/cmd/compile/internal/gc/order.go | 6 ++--- src/cmd/compile/internal/gc/pgen.go | 28 +++++++++++----------- src/cmd/compile/internal/gc/ssa.go | 29 ++++++++++++----------- src/cmd/compile/internal/ssa/deadstore.go | 10 ++++---- src/cmd/compile/internal/ssa/debug.go | 8 +++---- 6 files changed, 44 insertions(+), 42 deletions(-) diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index f965fa6325a5a..37e5167c25db7 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -205,7 +205,7 @@ func caninl(fn *ir.Func) { visitor := hairyVisitor{ budget: inlineMaxBudget, extraCallCost: cc, - usedLocals: make(map[ir.Node]bool), + usedLocals: make(map[*ir.Name]bool), } if visitor.tooHairy(fn) { reason = visitor.reason @@ -292,7 +292,7 @@ type hairyVisitor struct { budget int32 reason string extraCallCost int32 - usedLocals map[ir.Node]bool + usedLocals map[*ir.Name]bool do func(ir.Node) error } @@ -431,6 +431,7 @@ func (v *hairyVisitor) doNode(n ir.Node) error { } case ir.ONAME: + n := n.(*ir.Name) if n.Class() == ir.PAUTO { v.usedLocals[n] = true } diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 39b78c9819517..c3645256a6da1 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -63,7 +63,7 @@ func order(fn *ir.Func) { // newTemp allocates a new temporary with the given type, // pushes it onto the temp stack, and returns it. // If clear is true, newTemp emits code to zero the temporary. -func (o *Order) newTemp(t *types.Type, clear bool) ir.Node { +func (o *Order) newTemp(t *types.Type, clear bool) *ir.Name { var v *ir.Name // Note: LongString is close to the type equality we want, // but not exactly. We still need to double-check with types.Identical. @@ -107,11 +107,11 @@ func (o *Order) copyExpr(n ir.Node) ir.Node { // (The other candidate would be map access, but map access // returns a pointer to the result data instead of taking a pointer // to be filled in.) -func (o *Order) copyExprClear(n ir.Node) ir.Node { +func (o *Order) copyExprClear(n ir.Node) *ir.Name { return o.copyExpr1(n, true) } -func (o *Order) copyExpr1(n ir.Node, clear bool) ir.Node { +func (o *Order) copyExpr1(n ir.Node, clear bool) *ir.Name { t := n.Type() v := o.newTemp(t, clear) a := ir.Nod(ir.OAS, v, n) diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index a7b19953ba897..5b04e106571f6 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -438,7 +438,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S // which used to use the ONAME form. isODCLFUNC := infosym.Name == "" - var apdecls []ir.Node + var apdecls []*ir.Name // Populate decls for fn. if isODCLFUNC { for _, n := range fn.Dcl { @@ -495,7 +495,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S return scopes, inlcalls } -func declPos(decl ir.Node) src.XPos { +func declPos(decl *ir.Name) src.XPos { if decl.Name().Defn != nil && (decl.Name().Captured() || decl.Name().Byval()) { // It's not clear which position is correct for captured variables here: // * decl.Pos is the wrong position for captured variables, in the inner @@ -518,10 +518,10 @@ func declPos(decl ir.Node) src.XPos { // createSimpleVars creates a DWARF entry for every variable declared in the // function, claiming that they are permanently on the stack. -func createSimpleVars(fnsym *obj.LSym, apDecls []ir.Node) ([]ir.Node, []*dwarf.Var, map[ir.Node]bool) { +func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var, map[*ir.Name]bool) { var vars []*dwarf.Var - var decls []ir.Node - selected := make(map[ir.Node]bool) + var decls []*ir.Name + selected := make(map[*ir.Name]bool) for _, n := range apDecls { if ir.IsAutoTmp(n) { continue @@ -534,7 +534,7 @@ func createSimpleVars(fnsym *obj.LSym, apDecls []ir.Node) ([]ir.Node, []*dwarf.V return decls, vars, selected } -func createSimpleVar(fnsym *obj.LSym, n ir.Node) *dwarf.Var { +func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var { var abbrev int offs := n.Offset() @@ -585,13 +585,13 @@ func createSimpleVar(fnsym *obj.LSym, n ir.Node) *dwarf.Var { // createComplexVars creates recomposed DWARF vars with location lists, // suitable for describing optimized code. -func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]ir.Node, []*dwarf.Var, map[ir.Node]bool) { +func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Name, []*dwarf.Var, map[*ir.Name]bool) { debugInfo := fn.DebugInfo.(*ssa.FuncDebug) // Produce a DWARF variable entry for each user variable. - var decls []ir.Node + var decls []*ir.Name var vars []*dwarf.Var - ssaVars := make(map[ir.Node]bool) + ssaVars := make(map[*ir.Name]bool) for varID, dvar := range debugInfo.Vars { n := dvar @@ -611,11 +611,11 @@ func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]ir.Node, []*dwarf.Var, m // createDwarfVars process fn, returning a list of DWARF variables and the // Nodes they represent. -func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []ir.Node) ([]ir.Node, []*dwarf.Var) { +func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var) { // Collect a raw list of DWARF vars. var vars []*dwarf.Var - var decls []ir.Node - var selected map[ir.Node]bool + var decls []*ir.Name + var selected map[*ir.Name]bool if base.Ctxt.Flag_locationlists && base.Ctxt.Flag_optimize && fn.DebugInfo != nil && complexOK { decls, vars, selected = createComplexVars(fnsym, fn) } else { @@ -714,9 +714,9 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []ir. // function that is not local to the package being compiled, then the // names of the variables may have been "versioned" to avoid conflicts // with local vars; disregard this versioning when sorting. -func preInliningDcls(fnsym *obj.LSym) []ir.Node { +func preInliningDcls(fnsym *obj.LSym) []*ir.Name { fn := base.Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*ir.Func) - var rdcl []ir.Node + var rdcl []*ir.Name for _, n := range fn.Inl.Dcl { c := n.Sym().Name[0] // Avoid reporting "_" parameters, since if there are more than diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index e8f345d8f686c..9539e9cc8a4c2 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -410,7 +410,7 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func { } // Generate addresses of local declarations - s.decladdrs = map[ir.Node]*ssa.Value{} + s.decladdrs = map[*ir.Name]*ssa.Value{} var args []ssa.Param var results []ssa.Param for _, n := range fn.Dcl { @@ -576,7 +576,7 @@ type openDeferInfo struct { // function call are stored. argVals []*ssa.Value // The nodes representing the argtmps where the args of the defer are stored - argNodes []ir.Node + argNodes []*ir.Name } type state struct { @@ -613,7 +613,7 @@ type state struct { defvars []map[ir.Node]*ssa.Value // addresses of PPARAM and PPARAMOUT variables. - decladdrs map[ir.Node]*ssa.Value + decladdrs map[*ir.Name]*ssa.Value // starting values. Memory, stack pointer, and globals pointer startmem *ssa.Value @@ -633,7 +633,7 @@ type state struct { panics map[funcLine]*ssa.Block // list of PPARAMOUT (return) variables. - returns []ir.Node + returns []*ir.Name cgoUnsafeArgs bool hasdefer bool // whether the function contains a defer statement @@ -685,7 +685,7 @@ func (s *state) Fatalf(msg string, args ...interface{}) { func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) } func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() } -func ssaMarker(name string) ir.Node { +func ssaMarker(name string) *ir.Name { return NewName(&types.Sym{Name: name}) } @@ -1571,7 +1571,7 @@ func (s *state) exit() *ssa.Block { for _, n := range s.returns { addr := s.decladdrs[n] val := s.variable(n, n.Type()) - s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n.(*ir.Name), s.mem()) + s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem()) s.store(n.Type(), addr, val) // TODO: if val is ever spilled, we'd like to use the // PPARAMOUT slot for spilling it. That won't happen @@ -4224,7 +4224,7 @@ func (s *state) openDeferRecord(n ir.Node) { s.stmtList(n.List()) var args []*ssa.Value - var argNodes []ir.Node + var argNodes []*ir.Name opendefer := &openDeferInfo{ n: n, @@ -4467,7 +4467,7 @@ func (s *state) openDeferExit() { } for _, argNode := range r.argNodes { if argNode.Type().HasPointers() { - s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argNode.(*ir.Name), s.mem(), false) + s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argNode, s.mem(), false) } } @@ -4838,6 +4838,7 @@ func (s *state) addr(n ir.Node) *ssa.Value { t := types.NewPtr(n.Type()) switch n.Op() { case ir.ONAME: + n := n.(*ir.Name) switch n.Class() { case ir.PEXTERN: // global variable @@ -4855,17 +4856,17 @@ func (s *state) addr(n ir.Node) *ssa.Value { } if n == nodfp { // Special arg that points to the frame pointer (Used by ORECOVER). - return s.entryNewValue2A(ssa.OpLocalAddr, t, n.(*ir.Name), s.sp, s.startmem) + return s.entryNewValue2A(ssa.OpLocalAddr, t, n, s.sp, s.startmem) } s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs) return nil case ir.PAUTO: - return s.newValue2Apos(ssa.OpLocalAddr, t, n.(*ir.Name), s.sp, s.mem(), !ir.IsAutoTmp(n)) + return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), !ir.IsAutoTmp(n)) case ir.PPARAMOUT: // Same as PAUTO -- cannot generate LEA early. // ensure that we reuse symbols for out parameters so // that cse works on their addresses - return s.newValue2Apos(ssa.OpLocalAddr, t, n.(*ir.Name), s.sp, s.mem(), true) + return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), true) default: s.Fatalf("variable address class %v not implemented", n.Class()) return nil @@ -6196,15 +6197,15 @@ func (s *SSAGenState) DebugFriendlySetPosFrom(v *ssa.Value) { } } -// byXoffset implements sort.Interface for []*Node using Xoffset as the ordering. -type byXoffset []ir.Node +// byXoffset implements sort.Interface for []*ir.Name using Xoffset as the ordering. +type byXoffset []*ir.Name func (s byXoffset) Len() int { return len(s) } func (s byXoffset) Less(i, j int) bool { return s[i].Offset() < s[j].Offset() } func (s byXoffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func emitStackObjects(e *ssafn, pp *Progs) { - var vars []ir.Node + var vars []*ir.Name for _, n := range e.curfn.Dcl { if livenessShouldTrack(n) && n.Addrtaken() { vars = append(vars, n) diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go index d0446a0311b52..a68c82ba97905 100644 --- a/src/cmd/compile/internal/ssa/deadstore.go +++ b/src/cmd/compile/internal/ssa/deadstore.go @@ -137,9 +137,9 @@ func dse(f *Func) { // reaches stores then we delete all the stores. The other operations will then // be eliminated by the dead code elimination pass. func elimDeadAutosGeneric(f *Func) { - addr := make(map[*Value]ir.Node) // values that the address of the auto reaches - elim := make(map[*Value]ir.Node) // values that could be eliminated if the auto is - used := make(map[ir.Node]bool) // used autos that must be kept + addr := make(map[*Value]*ir.Name) // values that the address of the auto reaches + elim := make(map[*Value]*ir.Name) // values that could be eliminated if the auto is + used := make(map[*ir.Name]bool) // used autos that must be kept // visit the value and report whether any of the maps are updated visit := func(v *Value) (changed bool) { @@ -222,7 +222,7 @@ func elimDeadAutosGeneric(f *Func) { } // Propagate any auto addresses through v. - var node ir.Node + var node *ir.Name for _, a := range args { if n, ok := addr[a]; ok && !used[n] { if node == nil { @@ -299,7 +299,7 @@ func elimUnreadAutos(f *Func) { // Loop over all ops that affect autos taking note of which // autos we need and also stores that we might be able to // eliminate. - seen := make(map[ir.Node]bool) + seen := make(map[*ir.Name]bool) var stores []*Value for _, b := range f.Blocks { for _, v := range b.Values { diff --git a/src/cmd/compile/internal/ssa/debug.go b/src/cmd/compile/internal/ssa/debug.go index 405817dbe1529..68b6ab5fe9e76 100644 --- a/src/cmd/compile/internal/ssa/debug.go +++ b/src/cmd/compile/internal/ssa/debug.go @@ -25,7 +25,7 @@ type FuncDebug struct { // Slots is all the slots used in the debug info, indexed by their SlotID. Slots []LocalSlot // The user variables, indexed by VarID. - Vars []ir.Node + Vars []*ir.Name // The slots that make up each variable, indexed by VarID. VarSlots [][]SlotID // The location list data, indexed by VarID. Must be processed by PutLocationList. @@ -166,7 +166,7 @@ func (s *debugState) logf(msg string, args ...interface{}) { type debugState struct { // See FuncDebug. slots []LocalSlot - vars []ir.Node + vars []*ir.Name varSlots [][]SlotID lists [][]byte @@ -190,7 +190,7 @@ type debugState struct { // The pending location list entry for each user variable, indexed by VarID. pendingEntries []pendingEntry - varParts map[ir.Node][]SlotID + varParts map[*ir.Name][]SlotID blockDebug []BlockDebug pendingSlotLocs []VarLoc liveSlots []liveSlot @@ -347,7 +347,7 @@ func BuildFuncDebug(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset fu } if state.varParts == nil { - state.varParts = make(map[ir.Node][]SlotID) + state.varParts = make(map[*ir.Name][]SlotID) } else { for n := range state.varParts { delete(state.varParts, n) From e2d278bfeb2f0f117efc50b3f0f9dcb086a45ed2 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 7 Dec 2020 15:26:24 -0500 Subject: [PATCH 147/474] [dev.regabi] cmd/compile: two small fixes Addressing comments from CL 275434 and CL 275444. I forgot to run "git rw" to rebase the fixup CLs down before running "git submit". Change-Id: Ideaa2340a81511491c096555c6834cd9bdb267d8 Reviewed-on: https://go-review.googlesource.com/c/go/+/275881 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/mknode.go | 2 +- src/cmd/compile/internal/ir/stmt.go | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/cmd/compile/internal/ir/mknode.go b/src/cmd/compile/internal/ir/mknode.go index 978b2de5a5247..72034022cbdf6 100644 --- a/src/cmd/compile/internal/ir/mknode.go +++ b/src/cmd/compile/internal/ir/mknode.go @@ -144,7 +144,7 @@ func forNodeFields(typName string, typ *types.Struct, f func(name string, is fun if strings.ToLower(strings.TrimSuffix(v.Name(), "_")) != "body" { continue } - case "Name", "Pack": + case "Name": continue } switch v.Name() { diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index ccf46dfa73a27..68f9b0bd7c5db 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -33,7 +33,12 @@ func (n *Decl) Left() Node { return n.X } func (n *Decl) SetLeft(x Node) { n.X = x } // A Stmt is a Node that can appear as a statement. -// This includes statement-like expressions such as <-c and f(). +// This includes statement-like expressions such as f(). +// +// (It's possible it should include <-c, but that would require +// splitting ORECV out of UnaryExpr, which hasn't yet been +// necessary. Maybe instead we will introduce ExprStmt at +// some point.) type Stmt interface { Node isStmt() From 4090af83c57c857de600ada68e7a27dffd37d8b1 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sun, 6 Dec 2020 15:17:05 -0500 Subject: [PATCH 148/474] [dev.regabi] cmd/compile: use reflection in ir.Dump ir.Dump is the final (I think!) piece of the compiler that was walking nodes using Left, Right etc without knowing what they meant. This CL uses reflection to walk nodes without knowing what they mean instead. One benefit is that we can print actual meanings (field names). While we are here, I could not resist fixing a long-standing mental TODO: make the line number more clearly a line number. I've forgotten where the line number is in the dumps far too many times in the last decade. As a small example, here is a fragment of go tool compile -W test/235.go: . FOR l(28) tc(1) . . LT-init . . . AS l(28) tc(1) . . . . NAME-main..autotmp_4 l(28) x(0) class(PAUTO) esc(N) tc(1) assigned used int . . . . LEN l(28) tc(1) int . . . . . NAME-main.xs g(2) l(26) x(0) class(PPARAM) esc(no) tc(1) used SLICE-[]uint64 . . LT l(28) tc(1) hascall bool . . . NAME-main.i g(4) l(28) x(0) class(PAUTO) esc(no) tc(1) assigned used int . . . NAME-main..autotmp_4 l(28) x(0) class(PAUTO) esc(N) tc(1) assigned used int . . BLOCK l(28) . . BLOCK-list . . . ASOP-ADD l(28) tc(1) implicit(true) int . . . . NAME-main.i g(4) l(28) x(0) class(PAUTO) esc(no) tc(1) assigned used int . . . . LITERAL-1 l(28) tc(1) int . FOR-body . . VARKILL l(28) tc(1) . . . NAME-main..autotmp_4 l(28) x(0) class(PAUTO) esc(N) tc(1) assigned used int . . IF l(29) tc(1) . . . LT l(29) tc(1) bool . . . . INDEX l(29) tc(1) uint64 . . . . . NAME-main.xs g(2) l(26) x(0) class(PPARAM) esc(no) tc(1) used SLICE-[]uint64 . . . . . NAME-main.i g(4) l(28) x(0) class(PAUTO) esc(no) tc(1) assigned used int . . . . NAME-main.m g(3) l(27) x(0) class(PAUTO) esc(no) tc(1) assigned used uint64 . . IF-body . . . AS l(30) tc(1) . . . . NAME-main.m g(3) l(27) x(0) class(PAUTO) esc(no) tc(1) assigned used uint64 . . . . INDEX l(30) tc(1) uint64 . . . . . NAME-main.xs g(2) l(26) x(0) class(PPARAM) esc(no) tc(1) used SLICE-[]uint64 . . . . . NAME-main.i g(4) l(28) x(0) class(PAUTO) esc(no) tc(1) assigned used int and here it is after this CL: . FOR tc(1) # 235.go:28 . FOR-Cond . . LT-init . . . AS tc(1) # 235.go:28 . . . . NAME-main..autotmp_4 x(0) class(PAUTO) esc(N) tc(1) assigned used int # 235.go:28 . . . . LEN tc(1) int # 235.go:28 int . . . . . NAME-main.xs g(2) x(0) class(PPARAM) esc(no) tc(1) used SLICE-[]uint64 # 235.go:26 . . LT tc(1) hascall bool # 235.go:28 bool . . . NAME-main.i g(4) x(0) class(PAUTO) esc(no) tc(1) assigned used int # 235.go:28 . . . NAME-main..autotmp_4 x(0) class(PAUTO) esc(N) tc(1) assigned used int # 235.go:28 . FOR-Post . . BLOCK # 235.go:28 . . BLOCK-List . . . ASOP-ADD tc(1) implicit(true) int # 235.go:28 int . . . . NAME-main.i g(4) x(0) class(PAUTO) esc(no) tc(1) assigned used int # 235.go:28 . . . . LITERAL-1 tc(1) int # 235.go:28 . FOR-Body . . VARKILL tc(1) # 235.go:28 . . . NAME-main..autotmp_4 x(0) class(PAUTO) esc(N) tc(1) assigned used int # 235.go:28 . . IF tc(1) # 235.go:29 . . IF-Cond . . . LT tc(1) bool # 235.go:29 bool . . . . INDEX tc(1) uint64 # 235.go:29 uint64 . . . . . NAME-main.xs g(2) x(0) class(PPARAM) esc(no) tc(1) used SLICE-[]uint64 # 235.go:26 . . . . . NAME-main.i g(4) x(0) class(PAUTO) esc(no) tc(1) assigned used int # 235.go:28 . . . . NAME-main.m g(3) x(0) class(PAUTO) esc(no) tc(1) assigned used uint64 # 235.go:27 . . IF-Body . . . AS tc(1) # 235.go:30 . . . . NAME-main.m g(3) x(0) class(PAUTO) esc(no) tc(1) assigned used uint64 # 235.go:27 . . . . INDEX tc(1) uint64 # 235.go:30 uint64 . . . . . NAME-main.xs g(2) x(0) class(PPARAM) esc(no) tc(1) used SLICE-[]uint64 # 235.go:26 . . . . . NAME-main.i g(4) x(0) class(PAUTO) esc(no) tc(1) assigned used int # 235.go:28 Note in particular the clear marking of FOR-Cond, FOR-Post, FOR-Body compared to the original. The only changes to a few test files are the improved field name lines, and of course the line numbers. Passes buildall w/ toolstash -cmp. Change-Id: I5b654d9d8ee898976d4c387742ea688a082bac78 Reviewed-on: https://go-review.googlesource.com/c/go/+/275785 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/fmt.go | 149 +++++++++++++++++++---------- 1 file changed, 97 insertions(+), 52 deletions(-) diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index 68e425bdaa287..4bea6e2ae0938 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -10,6 +10,9 @@ import ( "go/constant" "io" "os" + "path/filepath" + "reflect" + "strings" "unicode/utf8" @@ -957,17 +960,6 @@ func dumpNodeHeader(w io.Writer, n Node) { fmt.Fprintf(w, " defn(%p)", n.Name().Defn) } - if n.Pos().IsKnown() { - pfx := "" - switch n.Pos().IsStmt() { - case src.PosNotStmt: - pfx = "_" // "-" would be confusing - case src.PosIsStmt: - pfx = "+" - } - fmt.Fprintf(w, " l(%s%d)", pfx, n.Pos().Line()) - } - if n.Offset() != types.BADWIDTH { fmt.Fprintf(w, " x(%d)", n.Offset()) } @@ -1029,6 +1021,32 @@ func dumpNodeHeader(w io.Writer, n Node) { if n.Name() != nil && n.Name().Used() { fmt.Fprint(w, " used") } + + if n.Op() == OCLOSURE { + if fn := n.Func(); fn != nil && fn.Nname.Sym() != nil { + fmt.Fprintf(w, " fnName(%+v)", fn.Nname.Sym()) + } + } + + if n.Type() != nil { + if n.Op() == OTYPE { + fmt.Fprintf(w, " type") + } + fmt.Fprintf(w, " %+v", n.Type()) + } + + if n.Pos().IsKnown() { + pfx := "" + switch n.Pos().IsStmt() { + case src.PosNotStmt: + pfx = "_" // "-" would be confusing + case src.PosIsStmt: + pfx = "+" + } + pos := base.Ctxt.PosTable.Pos(n.Pos()) + file := filepath.Base(pos.Filename()) + fmt.Fprintf(w, " # %s%s:%d", pfx, file, pos.Line()) + } } func dumpNode(w io.Writer, n Node, depth int) { @@ -1052,6 +1070,7 @@ func dumpNode(w io.Writer, n Node, depth int) { case OLITERAL: fmt.Fprintf(w, "%+v-%v", n.Op(), n.Val()) dumpNodeHeader(w, n) + return case ONAME, ONONAME, OMETHEXPR: if n.Sym() != nil { @@ -1065,6 +1084,7 @@ func dumpNode(w io.Writer, n Node, depth int) { fmt.Fprintf(w, "%+v-ntype", n.Op()) dumpNode(w, n.Name().Ntype, depth+1) } + return case OASOP: fmt.Fprintf(w, "%+v-%+v", n.Op(), n.SubOp()) @@ -1073,61 +1093,86 @@ func dumpNode(w io.Writer, n Node, depth int) { case OTYPE: fmt.Fprintf(w, "%+v %+v", n.Op(), n.Sym()) dumpNodeHeader(w, n) - fmt.Fprintf(w, " type=%+v", n.Type()) if n.Type() == nil && n.Name() != nil && n.Name().Ntype != nil { indent(w, depth) fmt.Fprintf(w, "%+v-ntype", n.Op()) dumpNode(w, n.Name().Ntype, depth+1) } - } + return - if n.Op() == OCLOSURE && n.Func() != nil && n.Func().Nname.Sym() != nil { - fmt.Fprintf(w, " fnName %+v", n.Func().Nname.Sym()) + case OCLOSURE: + fmt.Fprintf(w, "%+v", n.Op()) + dumpNodeHeader(w, n) + + case ODCLFUNC: + // Func has many fields we don't want to print. + // Bypass reflection and just print what we want. + fmt.Fprintf(w, "%+v", n.Op()) + dumpNodeHeader(w, n) + fn := n.Func() + if len(fn.Dcl) > 0 { + indent(w, depth) + fmt.Fprintf(w, "%+v-Dcl", n.Op()) + for _, dcl := range n.Func().Dcl { + dumpNode(w, dcl, depth+1) + } + } + if fn.Body().Len() > 0 { + indent(w, depth) + fmt.Fprintf(w, "%+v-body", n.Op()) + dumpNodes(w, n.Body(), depth+1) + } + return } - if n.Sym() != nil && n.Op() != ONAME { + + if n.Sym() != nil { fmt.Fprintf(w, " %+v", n.Sym()) } - if n.Type() != nil { fmt.Fprintf(w, " %+v", n.Type()) } - if n.Left() != nil { - dumpNode(w, n.Left(), depth+1) - } - if n.Right() != nil { - dumpNode(w, n.Right(), depth+1) - } - if n.Op() == OCLOSURE && n.Func() != nil && n.Func().Body().Len() != 0 { - indent(w, depth) - // The function associated with a closure - fmt.Fprintf(w, "%+v-clofunc", n.Op()) - dumpNode(w, n.Func(), depth+1) - } - if n.Op() == ODCLFUNC && n.Func() != nil && n.Func().Dcl != nil && len(n.Func().Dcl) != 0 { - indent(w, depth) - // The dcls for a func or closure - fmt.Fprintf(w, "%+v-dcl", n.Op()) - for _, dcl := range n.Func().Dcl { - dumpNode(w, dcl, depth+1) + v := reflect.ValueOf(n).Elem() + t := reflect.TypeOf(n).Elem() + nf := t.NumField() + for i := 0; i < nf; i++ { + tf := t.Field(i) + vf := v.Field(i) + if tf.PkgPath != "" { + // skip unexported field - Interface will fail + continue + } + switch tf.Type.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Slice: + if vf.IsNil() { + continue + } + } + name := strings.TrimSuffix(tf.Name, "_") + // Do not bother with field name header lines for the + // most common positional arguments: unary, binary expr, + // index expr, send stmt, go and defer call expression. + switch name { + case "X", "Y", "Index", "Chan", "Value", "Call": + name = "" + } + switch val := vf.Interface().(type) { + case Node: + if name != "" { + indent(w, depth) + fmt.Fprintf(w, "%+v-%s", n.Op(), name) + } + dumpNode(w, val, depth+1) + case Nodes: + if val.Len() == 0 { + continue + } + if name != "" { + indent(w, depth) + fmt.Fprintf(w, "%+v-%s", n.Op(), name) + } + dumpNodes(w, val, depth+1) } - } - if n.List().Len() != 0 { - indent(w, depth) - fmt.Fprintf(w, "%+v-list", n.Op()) - dumpNodes(w, n.List(), depth+1) - } - - if n.Rlist().Len() != 0 { - indent(w, depth) - fmt.Fprintf(w, "%+v-rlist", n.Op()) - dumpNodes(w, n.Rlist(), depth+1) - } - - if n.Body().Len() != 0 { - indent(w, depth) - fmt.Fprintf(w, "%+v-body", n.Op()) - dumpNodes(w, n.Body(), depth+1) } } From 0c4944066411c5570ad9e7b66ae414f409d5d826 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 7 Dec 2020 00:04:54 -0500 Subject: [PATCH 149/474] [dev.regabi] cmd/compile: arrange for walkstmt, walkexpr, to return from switch cases Ending them in a returning switch makes it safe for each case to do an appropriate type assertion. Passes buildall w/ toolstash -cmp. Change-Id: I55d8f0a555006104164d84d27822aa8c5ad68515 Reviewed-on: https://go-review.googlesource.com/c/go/+/275882 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/walk.go | 392 +++++++++++++++------------- 1 file changed, 205 insertions(+), 187 deletions(-) diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 4189d1a721735..f35e9d768bd49 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -123,6 +123,7 @@ func walkstmt(n ir.Node) ir.Node { base.Errorf("%v is not a top level statement", n.Op()) } ir.Dump("nottop", n) + return n case ir.OAS, ir.OASOP, @@ -166,6 +167,7 @@ func walkstmt(n ir.Node) ir.Node { n = ir.NewBlockStmt(n.Pos(), init.Slice()) } } + return n // special case for a receive where we throw away // the value received. @@ -179,8 +181,7 @@ func walkstmt(n ir.Node) ir.Node { n.SetLeft(walkexpr(n.Left(), &init)) n = mkcall1(chanfn("chanrecv1", 2, n.Left().Type()), nil, &init, n.Left(), nodnil()) n = walkexpr(n, &init) - - n = initExpr(init.Slice(), n) + return initExpr(init.Slice(), n) case ir.OBREAK, ir.OCONTINUE, @@ -193,7 +194,7 @@ func walkstmt(n ir.Node) ir.Node { ir.OVARDEF, ir.OVARKILL, ir.OVARLIVE: - break + return n case ir.ODCL: v := n.Left() @@ -209,12 +210,15 @@ func walkstmt(n ir.Node) ir.Node { nn = typecheck(nn, ctxStmt) return walkstmt(nn) } + return n case ir.OBLOCK: walkstmtlist(n.List().Slice()) + return n case ir.OCASE: base.Errorf("case statement out of place") + panic("unreachable") case ir.ODEFER: Curfn.SetHasDefer(true) @@ -261,6 +265,7 @@ func walkstmt(n ir.Node) ir.Node { init.Append(n) n = ir.NewBlockStmt(n.Pos(), init.Slice()) } + return n case ir.OFOR, ir.OFORUNTIL: if n.Left() != nil { @@ -276,16 +281,18 @@ func walkstmt(n ir.Node) ir.Node { walkstmtlist(n.List().Slice()) } walkstmtlist(n.Body().Slice()) + return n case ir.OIF: n.SetLeft(walkexpr(n.Left(), n.PtrInit())) walkstmtlist(n.Body().Slice()) walkstmtlist(n.Rlist().Slice()) + return n case ir.ORETURN: Curfn.NumReturns++ if n.List().Len() == 0 { - break + return n } if (hasNamedResults(Curfn) && n.List().Len() > 1) || paramoutheap(Curfn) { // assign to the function out parameters, @@ -317,7 +324,7 @@ func walkstmt(n ir.Node) ir.Node { ll := ascompatee(n.Op(), rl, n.List().Slice(), n.PtrInit()) n.PtrList().Set(reorder3(ll)) - break + return n } walkexprlist(n.List().Slice(), n.PtrInit()) @@ -334,27 +341,29 @@ func walkstmt(n ir.Node) ir.Node { res[i] = convas(a, n.PtrInit()) } n.PtrList().Set(res) + return n case ir.ORETJMP: - break + return n case ir.OINLMARK: - break + return n case ir.OSELECT: walkselect(n) + return n case ir.OSWITCH: walkswitch(n) + return n case ir.ORANGE: - n = walkrange(n) + return walkrange(n) } - if n.Op() == ir.ONAME { - base.Fatalf("walkstmt ended up with name: %+v", n) - } - return n + // No return! Each case must return (or panic), + // to avoid confusion about what gets returned + // in the presence of type assertions. } // walk the whole tree of the body of an @@ -477,31 +486,68 @@ func walkexpr(n ir.Node, init *ir.Nodes) ir.Node { return nn } -opswitch: + n = walkexpr1(n, init) + + // Expressions that are constant at run time but not + // considered const by the language spec are not turned into + // constants until walk. For example, if n is y%1 == 0, the + // walk of y%1 may have replaced it by 0. + // Check whether n with its updated args is itself now a constant. + t := n.Type() + n = evalConst(n) + if n.Type() != t { + base.Fatalf("evconst changed Type: %v had type %v, now %v", n, t, n.Type()) + } + if n.Op() == ir.OLITERAL { + n = typecheck(n, ctxExpr) + // Emit string symbol now to avoid emitting + // any concurrently during the backend. + if v := n.Val(); v.Kind() == constant.String { + _ = stringsym(n.Pos(), constant.StringVal(v)) + } + } + + updateHasCall(n) + + if base.Flag.LowerW != 0 && n != nil { + ir.Dump("after walk expr", n) + } + + base.Pos = lno + return n +} + +func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { switch n.Op() { default: ir.Dump("walk", n) base.Fatalf("walkexpr: switch 1 unknown op %+v", n.Op()) + panic("unreachable") case ir.ONONAME, ir.OGETG, ir.ONEWOBJ, ir.OMETHEXPR: + return n case ir.OTYPE, ir.ONAME, ir.OLITERAL, ir.ONIL: // TODO(mdempsky): Just return n; see discussion on CL 38655. // Perhaps refactor to use Node.mayBeShared for these instead. // If these return early, make sure to still call // stringsym for constant strings. + return n case ir.ONOT, ir.ONEG, ir.OPLUS, ir.OBITNOT, ir.OREAL, ir.OIMAG, ir.ODOTMETH, ir.ODOTINTER, ir.ODEREF, ir.OSPTR, ir.OITAB, ir.OIDATA, ir.OADDR: n.SetLeft(walkexpr(n.Left(), init)) + return n case ir.OEFACE, ir.OAND, ir.OANDNOT, ir.OSUB, ir.OMUL, ir.OADD, ir.OOR, ir.OXOR, ir.OLSH, ir.ORSH: n.SetLeft(walkexpr(n.Left(), init)) n.SetRight(walkexpr(n.Right(), init)) + return n case ir.ODOT, ir.ODOTPTR: usefield(n) n.SetLeft(walkexpr(n.Left(), init)) + return n case ir.ODOTTYPE, ir.ODOTTYPE2: n.SetLeft(walkexpr(n.Left(), init)) @@ -513,12 +559,12 @@ opswitch: if !n.Type().IsInterface() && !n.Left().Type().IsEmptyInterface() { n.PtrList().Set1(itabname(n.Type(), n.Left().Type())) } + return n case ir.OLEN, ir.OCAP: if isRuneCount(n) { // Replace len([]rune(string)) with runtime.countrunes(string). - n = mkcall("countrunes", n.Type(), init, conv(n.Left().Left(), types.Types[types.TSTRING])) - break + return mkcall("countrunes", n.Type(), init, conv(n.Left().Left(), types.Types[types.TSTRING])) } n.SetLeft(walkexpr(n.Left(), init)) @@ -535,6 +581,7 @@ opswitch: n = origIntConst(n, t.NumElem()) n.SetTypecheck(1) } + return n case ir.OCOMPLEX: // Use results from call expression as arguments for complex. @@ -544,9 +591,10 @@ opswitch: } n.SetLeft(walkexpr(n.Left(), init)) n.SetRight(walkexpr(n.Right(), init)) + return n case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE: - n = walkcompare(n, init) + return walkcompare(n, init) case ir.OANDAND, ir.OOROR: n.SetLeft(walkexpr(n.Left(), init)) @@ -558,17 +606,19 @@ opswitch: n.SetRight(walkexpr(n.Right(), &ll)) n.SetRight(initExpr(ll.Slice(), n.Right())) + return n case ir.OPRINT, ir.OPRINTN: - n = walkprint(n, init) + return walkprint(n, init) case ir.OPANIC: - n = mkcall("gopanic", nil, init, n.Left()) + return mkcall("gopanic", nil, init, n.Left()) case ir.ORECOVER: - n = mkcall("gorecover", n.Type(), init, ir.Nod(ir.OADDR, nodfp, nil)) + return mkcall("gorecover", n.Type(), init, ir.Nod(ir.OADDR, nodfp, nil)) case ir.OCLOSUREREAD, ir.OCFUNC: + return n case ir.OCALLINTER, ir.OCALLFUNC, ir.OCALLMETH: if n.Op() == ir.OCALLINTER { @@ -597,6 +647,7 @@ opswitch: } walkCall(n, init) + return n case ir.OAS, ir.OASOP: init.AppendNodes(n.PtrInit()) @@ -622,17 +673,16 @@ opswitch: } if oaslit(n, init) { - n = ir.NodAt(n.Pos(), ir.OBLOCK, nil, nil) - break + return ir.NodAt(n.Pos(), ir.OBLOCK, nil, nil) } if n.Right() == nil { // TODO(austin): Check all "implicit zeroing" - break + return n } if !instrumenting && isZero(n.Right()) { - break + return n } switch n.Right().Op() { @@ -646,9 +696,7 @@ opswitch: n1 := ir.Nod(ir.OADDR, n.Left(), nil) r := n.Right().Left() // the channel - n = mkcall1(chanfn("chanrecv1", 2, r.Type()), nil, init, r, n1) - n = walkexpr(n, init) - break opswitch + return mkcall1(chanfn("chanrecv1", 2, r.Type()), nil, init, r, n1) case ir.OAPPEND: // x = append(...) @@ -671,7 +719,7 @@ opswitch: // Do not add a new write barrier. // Set up address of type for back end. r.SetLeft(typename(r.Type().Elem())) - break opswitch + return n } // Otherwise, lowered for race detector. // Treat as ordinary assignment. @@ -680,6 +728,7 @@ opswitch: if n.Left() != nil && n.Right() != nil { n = convas(n, init) } + return n case ir.OAS2: init.AppendNodes(n.PtrInit()) @@ -687,7 +736,7 @@ opswitch: walkexprlistsafe(n.Rlist().Slice(), init) ll := ascompatee(ir.OAS, n.List().Slice(), n.Rlist().Slice(), init) ll = reorder3(ll) - n = liststmt(ll) + return liststmt(ll) // a,b,... = fn() case ir.OAS2FUNC: @@ -699,12 +748,12 @@ opswitch: if isIntrinsicCall(r) { n.PtrRlist().Set1(r) - break + return n } init.Append(r) ll := ascompatet(n.List(), r.Type()) - n = liststmt(ll) + return liststmt(ll) // x, y = <-c // order.stmt made sure x is addressable or blank. @@ -724,7 +773,7 @@ opswitch: ok := n.List().Second() call := mkcall1(fn, types.Types[types.TBOOL], init, r.Left(), n1) n = ir.Nod(ir.OAS, ok, call) - n = typecheck(n, ctxStmt) + return typecheck(n, ctxStmt) // a,b = m[i] case ir.OAS2MAPR: @@ -784,7 +833,7 @@ opswitch: } n = typecheck(n, ctxStmt) - n = walkexpr(n, init) + return walkexpr(n, init) case ir.ODELETE: init.AppendNodes(n.PtrInit()) @@ -799,11 +848,12 @@ opswitch: // order.stmt made sure key is addressable. key = ir.Nod(ir.OADDR, key, nil) } - n = mkcall1(mapfndel(mapdelete[fast], t), nil, init, typename(t), map_, key) + return mkcall1(mapfndel(mapdelete[fast], t), nil, init, typename(t), map_, key) case ir.OAS2DOTTYPE: walkexprlistsafe(n.List().Slice(), init) n.PtrRlist().SetIndex(0, walkexpr(n.Rlist().First(), init)) + return n case ir.OCONVIFACE: n.SetLeft(walkexpr(n.Left(), init)) @@ -828,8 +878,7 @@ opswitch: l := ir.Nod(ir.OEFACE, typeword(), n.Left()) l.SetType(toType) l.SetTypecheck(n.Typecheck()) - n = l - break + return l } if staticuint64s == nil { @@ -878,8 +927,7 @@ opswitch: l := ir.Nod(ir.OEFACE, typeword(), typecheck(ir.Nod(ir.OADDR, value, nil), ctxExpr)) l.SetType(toType) l.SetTypecheck(n.Typecheck()) - n = l - break + return l } // Implement interface to empty interface conversion. @@ -906,8 +954,7 @@ opswitch: e := ir.Nod(ir.OEFACE, tmp, ifaceData(n.Pos(), c, types.NewPtr(types.Types[types.TUINT8]))) e.SetType(toType) // assign type manually, typecheck doesn't understand OEFACE. e.SetTypecheck(1) - n = e - break + return e } fnname, needsaddr := convFuncName(fromType, toType) @@ -928,8 +975,7 @@ opswitch: e := ir.Nod(ir.OEFACE, typeword(), call) e.SetType(toType) e.SetTypecheck(1) - n = e - break + return e } var tab ir.Node @@ -962,7 +1008,7 @@ opswitch: n = ir.Nod(ir.OCALL, fn, nil) n.PtrList().Set2(tab, v) n = typecheck(n, ctxExpr) - n = walkexpr(n, init) + return walkexpr(n, init) case ir.OCONV, ir.OCONVNOP: n.SetLeft(walkexpr(n.Left(), init)) @@ -971,20 +1017,18 @@ opswitch: } if n.Op() == ir.OCONVNOP && checkPtr(Curfn, 1) { if n.Type().IsPtr() && n.Left().Type().IsUnsafePtr() { // unsafe.Pointer to *T - n = walkCheckPtrAlignment(n, init, nil) - break + return walkCheckPtrAlignment(n, init, nil) } if n.Type().IsUnsafePtr() && n.Left().Type().IsUintptr() { // uintptr to unsafe.Pointer - n = walkCheckPtrArithmetic(n, init) - break + return walkCheckPtrArithmetic(n, init) } } param, result := rtconvfn(n.Left().Type(), n.Type()) if param == types.Txxx { - break + return n } fn := types.BasicTypeNames[param] + "to" + types.BasicTypeNames[result] - n = conv(mkcall(fn, types.Types[result], init, conv(n.Left(), types.Types[param])), n.Type()) + return conv(mkcall(fn, types.Types[result], init, conv(n.Left(), types.Types[param])), n.Type()) case ir.ODIV, ir.OMOD: n.SetLeft(walkexpr(n.Left(), init)) @@ -996,13 +1040,12 @@ opswitch: if isComplex[et] && n.Op() == ir.ODIV { t := n.Type() n = mkcall("complex128div", types.Types[types.TCOMPLEX128], init, conv(n.Left(), types.Types[types.TCOMPLEX128]), conv(n.Right(), types.Types[types.TCOMPLEX128])) - n = conv(n, t) - break + return conv(n, t) } // Nothing to do for float divisions. if isFloat[et] { - break + return n } // rewrite 64-bit div and mod on 32-bit architectures. @@ -1019,15 +1062,15 @@ opswitch: c = -c } if c != 0 && c&(c-1) == 0 { - break opswitch + return n } case types.TUINT64: c := ir.Uint64Val(n.Right()) if c < 1<<16 { - break opswitch + return n } if c != 0 && c&(c-1) == 0 { - break opswitch + return n } } } @@ -1042,8 +1085,9 @@ opswitch: } else { fn += "mod" } - n = mkcall(fn, n.Type(), init, conv(n.Left(), types.Types[et]), conv(n.Right(), types.Types[et])) + return mkcall(fn, n.Type(), init, conv(n.Left(), types.Types[et]), conv(n.Right(), types.Types[et])) } + return n case ir.OINDEX: n.SetLeft(walkexpr(n.Left(), init)) @@ -1057,7 +1101,7 @@ opswitch: // if range of type cannot exceed static array bound, // disable bounds check. if n.Bounded() { - break + return n } t := n.Left().Type() if t != nil && t.IsPtr() { @@ -1086,6 +1130,7 @@ opswitch: base.Errorf("index out of bounds") } } + return n case ir.OINDEXMAP: // Replace m[k] with *map{access1,assign}(maptype, m, &k) @@ -1124,14 +1169,17 @@ opswitch: n = ir.Nod(ir.ODEREF, n, nil) n.SetType(t.Elem()) n.SetTypecheck(1) + return n case ir.ORECV: base.Fatalf("walkexpr ORECV") // should see inside OAS only + panic("unreachable") case ir.OSLICEHEADER: n.SetLeft(walkexpr(n.Left(), init)) n.List().SetFirst(walkexpr(n.List().First(), init)) n.List().SetSecond(walkexpr(n.List().Second(), init)) + return n case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR: checkSlice := checkPtr(Curfn, 1) && n.Op() == ir.OSLICE3ARR && n.Left().Op() == ir.OCONVNOP && n.Left().Left().Type().IsUnsafePtr() @@ -1160,11 +1208,11 @@ opswitch: } else { n.SetOp(ir.OSLICEARR) } - n = reduceSlice(n) + return reduceSlice(n) } - } else { - n = reduceSlice(n) + return n } + return reduceSlice(n) case ir.ONEW: if n.Type().Elem().NotInHeap() { @@ -1179,28 +1227,26 @@ opswitch: r = typecheck(r, ctxStmt) init.Append(r) r = ir.Nod(ir.OADDR, r.Left(), nil) - r = typecheck(r, ctxExpr) - n = r - } else { - n = callnew(n.Type().Elem()) + return typecheck(r, ctxExpr) } + return callnew(n.Type().Elem()) case ir.OADDSTR: - n = addstr(n, init) + return addstr(n, init) case ir.OAPPEND: // order should make sure we only see OAS(node, OAPPEND), which we handle above. base.Fatalf("append outside assignment") + panic("unreachable") case ir.OCOPY: - n = copyany(n, init, instrumenting && !base.Flag.CompilingRuntime) + return copyany(n, init, instrumenting && !base.Flag.CompilingRuntime) - // cannot use chanfn - closechan takes any, not chan any case ir.OCLOSE: + // cannot use chanfn - closechan takes any, not chan any fn := syslook("closechan") - fn = substArgTypes(fn, n.Left().Type()) - n = mkcall1(fn, nil, init, n.Left()) + return mkcall1(fn, nil, init, n.Left()) case ir.OMAKECHAN: // When size fits into int, use makechan instead of @@ -1217,7 +1263,7 @@ opswitch: argtype = types.Types[types.TINT] } - n = mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, typename(n.Type()), conv(size, argtype)) + return mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, typename(n.Type()), conv(size, argtype)) case ir.OMAKEMAP: t := n.Type() @@ -1294,42 +1340,41 @@ opswitch: a = typecheck(a, ctxStmt) a = walkexpr(a, init) init.Append(a) - n = convnop(h, t) - } else { - // Call runtime.makehmap to allocate an - // hmap on the heap and initialize hmap's hash0 field. - fn := syslook("makemap_small") - fn = substArgTypes(fn, t.Key(), t.Elem()) - n = mkcall1(fn, n.Type(), init) - } - } else { - if n.Esc() != EscNone { - h = nodnil() - } - // Map initialization with a variable or large hint is - // more complicated. We therefore generate a call to - // runtime.makemap to initialize hmap and allocate the - // map buckets. - - // When hint fits into int, use makemap instead of - // makemap64, which is faster and shorter on 32 bit platforms. - fnname := "makemap64" - argtype := types.Types[types.TINT64] - - // Type checking guarantees that TIDEAL hint is positive and fits in an int. - // See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function. - // The case of hint overflow when converting TUINT or TUINTPTR to TINT - // will be handled by the negative range checks in makemap during runtime. - if hint.Type().IsKind(types.TIDEAL) || hint.Type().Size() <= types.Types[types.TUINT].Size() { - fnname = "makemap" - argtype = types.Types[types.TINT] + return convnop(h, t) } + // Call runtime.makehmap to allocate an + // hmap on the heap and initialize hmap's hash0 field. + fn := syslook("makemap_small") + fn = substArgTypes(fn, t.Key(), t.Elem()) + return mkcall1(fn, n.Type(), init) + } - fn := syslook(fnname) - fn = substArgTypes(fn, hmapType, t.Key(), t.Elem()) - n = mkcall1(fn, n.Type(), init, typename(n.Type()), conv(hint, argtype), h) + if n.Esc() != EscNone { + h = nodnil() + } + // Map initialization with a variable or large hint is + // more complicated. We therefore generate a call to + // runtime.makemap to initialize hmap and allocate the + // map buckets. + + // When hint fits into int, use makemap instead of + // makemap64, which is faster and shorter on 32 bit platforms. + fnname := "makemap64" + argtype := types.Types[types.TINT64] + + // Type checking guarantees that TIDEAL hint is positive and fits in an int. + // See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function. + // The case of hint overflow when converting TUINT or TUINTPTR to TINT + // will be handled by the negative range checks in makemap during runtime. + if hint.Type().IsKind(types.TIDEAL) || hint.Type().Size() <= types.Types[types.TUINT].Size() { + fnname = "makemap" + argtype = types.Types[types.TINT] } + fn := syslook(fnname) + fn = substArgTypes(fn, hmapType, t.Key(), t.Elem()) + return mkcall1(fn, n.Type(), init, typename(n.Type()), conv(hint, argtype), h) + case ir.OMAKESLICE: l := n.Left() r := n.Right() @@ -1376,39 +1421,39 @@ opswitch: r = conv(r, n.Type()) // in case n.Type is named. r = typecheck(r, ctxExpr) r = walkexpr(r, init) - n = r - } else { - // n escapes; set up a call to makeslice. - // When len and cap can fit into int, use makeslice instead of - // makeslice64, which is faster and shorter on 32 bit platforms. - - len, cap := l, r - - fnname := "makeslice64" - argtype := types.Types[types.TINT64] - - // Type checking guarantees that TIDEAL len/cap are positive and fit in an int. - // The case of len or cap overflow when converting TUINT or TUINTPTR to TINT - // will be handled by the negative range checks in makeslice during runtime. - if (len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size()) && - (cap.Type().IsKind(types.TIDEAL) || cap.Type().Size() <= types.Types[types.TUINT].Size()) { - fnname = "makeslice" - argtype = types.Types[types.TINT] - } + return r + } - m := ir.Nod(ir.OSLICEHEADER, nil, nil) - m.SetType(t) + // n escapes; set up a call to makeslice. + // When len and cap can fit into int, use makeslice instead of + // makeslice64, which is faster and shorter on 32 bit platforms. - fn := syslook(fnname) - m.SetLeft(mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), conv(len, argtype), conv(cap, argtype))) - m.Left().MarkNonNil() - m.PtrList().Set2(conv(len, types.Types[types.TINT]), conv(cap, types.Types[types.TINT])) + len, cap := l, r + + fnname := "makeslice64" + argtype := types.Types[types.TINT64] - m = typecheck(m, ctxExpr) - m = walkexpr(m, init) - n = m + // Type checking guarantees that TIDEAL len/cap are positive and fit in an int. + // The case of len or cap overflow when converting TUINT or TUINTPTR to TINT + // will be handled by the negative range checks in makeslice during runtime. + if (len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size()) && + (cap.Type().IsKind(types.TIDEAL) || cap.Type().Size() <= types.Types[types.TUINT].Size()) { + fnname = "makeslice" + argtype = types.Types[types.TINT] } + m := ir.Nod(ir.OSLICEHEADER, nil, nil) + m.SetType(t) + + fn := syslook(fnname) + m.SetLeft(mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), conv(len, argtype), conv(cap, argtype))) + m.Left().MarkNonNil() + m.PtrList().Set2(conv(len, types.Types[types.TINT]), conv(cap, types.Types[types.TINT])) + + m = typecheck(m, ctxExpr) + m = walkexpr(m, init) + return m + case ir.OMAKESLICECOPY: if n.Esc() == EscNone { base.Fatalf("OMAKESLICECOPY with EscNone: %v", n) @@ -1453,18 +1498,18 @@ opswitch: ncopy = walkexpr(ncopy, init) init.Append(ncopy) - n = s - } else { // Replace make+copy with runtime.makeslicecopy. - // instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer - fn := syslook("makeslicecopy") - s := ir.Nod(ir.OSLICEHEADER, nil, nil) - s.SetLeft(mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), length, copylen, conv(copyptr, types.Types[types.TUNSAFEPTR]))) - s.Left().MarkNonNil() - s.PtrList().Set2(length, length) - s.SetType(t) - n = typecheck(s, ctxExpr) - n = walkexpr(n, init) + return s } + // Replace make+copy with runtime.makeslicecopy. + // instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer + fn := syslook("makeslicecopy") + s := ir.Nod(ir.OSLICEHEADER, nil, nil) + s.SetLeft(mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), length, copylen, conv(copyptr, types.Types[types.TUNSAFEPTR]))) + s.Left().MarkNonNil() + s.PtrList().Set2(length, length) + s.SetType(t) + n = typecheck(s, ctxExpr) + return walkexpr(n, init) case ir.ORUNESTR: a := nodnil() @@ -1473,7 +1518,7 @@ opswitch: a = ir.Nod(ir.OADDR, temp(t), nil) } // intstring(*[4]byte, rune) - n = mkcall("intstring", n.Type(), init, a, conv(n.Left(), types.Types[types.TINT64])) + return mkcall("intstring", n.Type(), init, a, conv(n.Left(), types.Types[types.TINT64])) case ir.OBYTES2STR, ir.ORUNES2STR: a := nodnil() @@ -1484,25 +1529,24 @@ opswitch: } if n.Op() == ir.ORUNES2STR { // slicerunetostring(*[32]byte, []rune) string - n = mkcall("slicerunetostring", n.Type(), init, a, n.Left()) - } else { - // slicebytetostring(*[32]byte, ptr *byte, n int) string - n.SetLeft(cheapexpr(n.Left(), init)) - ptr, len := backingArrayPtrLen(n.Left()) - n = mkcall("slicebytetostring", n.Type(), init, a, ptr, len) + return mkcall("slicerunetostring", n.Type(), init, a, n.Left()) } + // slicebytetostring(*[32]byte, ptr *byte, n int) string + n.SetLeft(cheapexpr(n.Left(), init)) + ptr, len := backingArrayPtrLen(n.Left()) + return mkcall("slicebytetostring", n.Type(), init, a, ptr, len) case ir.OBYTES2STRTMP: n.SetLeft(walkexpr(n.Left(), init)) if !instrumenting { // Let the backend handle OBYTES2STRTMP directly // to avoid a function call to slicebytetostringtmp. - break + return n } // slicebytetostringtmp(ptr *byte, n int) string n.SetLeft(cheapexpr(n.Left(), init)) ptr, len := backingArrayPtrLen(n.Left()) - n = mkcall("slicebytetostringtmp", n.Type(), init, ptr, len) + return mkcall("slicebytetostringtmp", n.Type(), init, ptr, len) case ir.OSTR2BYTES: s := n.Left() @@ -1534,8 +1578,7 @@ opswitch: slice := ir.NodAt(n.Pos(), ir.OSLICEARR, p, nil) slice.SetType(n.Type()) slice.SetTypecheck(1) - n = walkexpr(slice, init) - break + return walkexpr(slice, init) } a := nodnil() @@ -1545,7 +1588,7 @@ opswitch: a = ir.Nod(ir.OADDR, temp(t), nil) } // stringtoslicebyte(*32[byte], string) []byte - n = mkcall("stringtoslicebyte", n.Type(), init, a, conv(s, types.Types[types.TSTRING])) + return mkcall("stringtoslicebyte", n.Type(), init, a, conv(s, types.Types[types.TSTRING])) case ir.OSTR2BYTESTMP: // []byte(string) conversion that creates a slice @@ -1556,6 +1599,7 @@ opswitch: // The only such case today is: // for i, c := range []byte(string) n.SetLeft(walkexpr(n.Left(), init)) + return n case ir.OSTR2RUNES: a := nodnil() @@ -1565,7 +1609,7 @@ opswitch: a = ir.Nod(ir.OADDR, temp(t), nil) } // stringtoslicerune(*[32]rune, string) []rune - n = mkcall("stringtoslicerune", n.Type(), init, a, conv(n.Left(), types.Types[types.TSTRING])) + return mkcall("stringtoslicerune", n.Type(), init, a, conv(n.Left(), types.Types[types.TSTRING])) case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT, ir.OPTRLIT: if isStaticCompositeLiteral(n) && !canSSAType(n.Type()) { @@ -1573,55 +1617,29 @@ opswitch: // Make direct reference to the static data. See issue 12841. vstat := readonlystaticname(n.Type()) fixedlit(inInitFunction, initKindStatic, n, vstat, init) - n = vstat - n = typecheck(n, ctxExpr) - break + return typecheck(vstat, ctxExpr) } var_ := temp(n.Type()) anylit(n, var_, init) - n = var_ + return var_ case ir.OSEND: n1 := n.Right() n1 = assignconv(n1, n.Left().Type().Elem(), "chan send") n1 = walkexpr(n1, init) n1 = ir.Nod(ir.OADDR, n1, nil) - n = mkcall1(chanfn("chansend1", 2, n.Left().Type()), nil, init, n.Left(), n1) + return mkcall1(chanfn("chansend1", 2, n.Left().Type()), nil, init, n.Left(), n1) case ir.OCLOSURE: - n = walkclosure(n, init) + return walkclosure(n, init) case ir.OCALLPART: - n = walkpartialcall(n.(*ir.CallPartExpr), init) - } - - // Expressions that are constant at run time but not - // considered const by the language spec are not turned into - // constants until walk. For example, if n is y%1 == 0, the - // walk of y%1 may have replaced it by 0. - // Check whether n with its updated args is itself now a constant. - t := n.Type() - n = evalConst(n) - if n.Type() != t { - base.Fatalf("evconst changed Type: %v had type %v, now %v", n, t, n.Type()) - } - if n.Op() == ir.OLITERAL { - n = typecheck(n, ctxExpr) - // Emit string symbol now to avoid emitting - // any concurrently during the backend. - if v := n.Val(); v.Kind() == constant.String { - _ = stringsym(n.Pos(), constant.StringVal(v)) - } - } - - updateHasCall(n) - - if base.Flag.LowerW != 0 && n != nil { - ir.Dump("after walk expr", n) + return walkpartialcall(n.(*ir.CallPartExpr), init) } - base.Pos = lno - return n + // No return! Each case must return (or panic), + // to avoid confusion about what gets returned + // in the presence of type assertions. } // markTypeUsedInInterface marks that type t is converted to an interface. From 837b35cc55c258bb57ac9fa337ed0783a6fcc617 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 7 Dec 2020 09:14:44 -0500 Subject: [PATCH 150/474] [dev.regabi] cmd/compile: adjust IR representations Based on actually using the IR when prototyping adding type assertions, a few changes to improve it: - Merge DeferStmt and GoStmt, since they are variants of one thing. - Introduce LogicalExpr for && and ||, since they (alone) need an init list before Y. - Add an explicit op to various constructors to make them easier to use. - Add separate StructKeyExpr - it stores Value in a different abstract location (Left) than KeyExpr (Right). - Export all fields for use by rewrites (and later reflection). Passes buildall w/ toolstash -cmp. Change-Id: Iefbff2386d2bb9ef511ce53b7f92ff6c709dc991 Reviewed-on: https://go-review.googlesource.com/c/go/+/275883 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/typecheck.go | 17 +-- src/cmd/compile/internal/gc/universe.go | 3 +- src/cmd/compile/internal/ir/expr.go | 186 ++++++++++++++--------- src/cmd/compile/internal/ir/name.go | 8 + src/cmd/compile/internal/ir/node.go | 51 +++---- src/cmd/compile/internal/ir/node_gen.go | 76 +++++---- src/cmd/compile/internal/ir/stmt.go | 139 ++++++++--------- src/cmd/compile/internal/ir/val.go | 2 +- 8 files changed, 266 insertions(+), 216 deletions(-) diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 990921189a439..36526d4c2d6e7 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -2010,18 +2010,13 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } return n - case ir.ODEFER: + case ir.ODEFER, ir.OGO: n.SetLeft(typecheck(n.Left(), ctxStmt|ctxExpr)) if !n.Left().Diag() { checkdefergo(n) } return n - case ir.OGO: - n.SetLeft(typecheck(n.Left(), ctxStmt|ctxExpr)) - checkdefergo(n) - return n - case ir.OFOR, ir.OFORUNTIL: typecheckslice(n.Init().Slice(), ctxStmt) decldepth++ @@ -2885,9 +2880,9 @@ func typecheckcomplit(n ir.Node) (res ir.Node) { if l.Op() == ir.OKEY { key := l.Left() - l.SetOp(ir.OSTRUCTKEY) - l.SetLeft(l.Right()) - l.SetRight(nil) + sk := ir.NewStructKeyExpr(l.Pos(), nil, l.Right()) + ls[i] = sk + l = sk // An OXDOT uses the Sym field to hold // the field to the right of the dot, @@ -2895,7 +2890,7 @@ func typecheckcomplit(n ir.Node) (res ir.Node) { // is never a valid struct literal key. if key.Sym() == nil || key.Op() == ir.OXDOT || key.Sym().IsBlank() { base.Errorf("invalid field name %v in struct initializer", key) - l.SetLeft(typecheck(l.Left(), ctxExpr)) + sk.SetLeft(typecheck(sk.Left(), ctxExpr)) continue } @@ -2909,7 +2904,7 @@ func typecheckcomplit(n ir.Node) (res ir.Node) { s = s1 } } - l.SetSym(s) + sk.SetSym(s) } if l.Op() != ir.OSTRUCTKEY { diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index 42b996d88d918..c592e37497ddb 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -202,8 +202,7 @@ func initUniverse() { ir.AsNode(s.Def).SetSym(s) s = types.BuiltinPkg.Lookup("iota") - s.Def = ir.Nod(ir.OIOTA, nil, nil) - ir.AsNode(s.Def).SetSym(s) + s.Def = ir.NewIota(base.Pos, s) for et := types.TINT8; et <= types.TUINT64; et++ { isInt[et] = true diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index a74e0712b91d6..8ea31c1929c89 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -159,8 +159,8 @@ func (n *BinaryExpr) SetOp(op Op) { switch op { default: panic(n.no("SetOp " + op.String())) - case OADD, OADDSTR, OAND, OANDAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE, - OLSH, OLT, OMOD, OMUL, ONE, OOR, OOROR, ORSH, OSUB, OXOR, + case OADD, OADDSTR, OAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE, + OLSH, OLT, OMOD, OMUL, ONE, OOR, ORSH, OSUB, OXOR, OCOPY, OCOMPLEX, OEFACE: n.op = op @@ -181,21 +181,21 @@ const ( // A CallExpr is a function call X(Args). type CallExpr struct { miniExpr - orig Node - X Node - Args Nodes - Rargs Nodes // TODO(rsc): Delete. - Body_ Nodes // TODO(rsc): Delete. - DDD bool - Use CallUse - noInline bool + orig Node + X Node + Args Nodes + Rargs Nodes // TODO(rsc): Delete. + Body_ Nodes // TODO(rsc): Delete. + DDD bool + Use CallUse + NoInline_ bool } -func NewCallExpr(pos src.XPos, fun Node, args []Node) *CallExpr { +func NewCallExpr(pos src.XPos, op Op, fun Node, args []Node) *CallExpr { n := &CallExpr{X: fun} n.pos = pos n.orig = n - n.op = OCALL + n.SetOp(op) n.Args.Set(args) return n } @@ -214,8 +214,8 @@ func (n *CallExpr) PtrRlist() *Nodes { return &n.Rargs } func (n *CallExpr) SetRlist(x Nodes) { n.Rargs = x } func (n *CallExpr) IsDDD() bool { return n.DDD } func (n *CallExpr) SetIsDDD(x bool) { n.DDD = x } -func (n *CallExpr) NoInline() bool { return n.noInline } -func (n *CallExpr) SetNoInline(x bool) { n.noInline = x } +func (n *CallExpr) NoInline() bool { return n.NoInline_ } +func (n *CallExpr) SetNoInline(x bool) { n.NoInline_ = x } func (n *CallExpr) Body() Nodes { return n.Body_ } func (n *CallExpr) PtrBody() *Nodes { return &n.Body_ } func (n *CallExpr) SetBody(x Nodes) { n.Body_ = x } @@ -233,21 +233,21 @@ func (n *CallExpr) SetOp(op Op) { // A CallPartExpr is a method expression X.Method (uncalled). type CallPartExpr struct { miniExpr - fn *Func + Func_ *Func X Node Method *types.Field } func NewCallPartExpr(pos src.XPos, x Node, method *types.Field, fn *Func) *CallPartExpr { - n := &CallPartExpr{fn: fn, X: x, Method: method} + n := &CallPartExpr{Func_: fn, X: x, Method: method} n.op = OCALLPART n.pos = pos n.typ = fn.Type() - n.fn = fn + n.Func_ = fn return n } -func (n *CallPartExpr) Func() *Func { return n.fn } +func (n *CallPartExpr) Func() *Func { return n.Func_ } func (n *CallPartExpr) Left() Node { return n.X } func (n *CallPartExpr) Sym() *types.Sym { return n.Method.Sym } func (n *CallPartExpr) SetLeft(x Node) { n.X = x } @@ -268,20 +268,20 @@ func NewClosureExpr(pos src.XPos, fn *Func) *ClosureExpr { func (n *ClosureExpr) Func() *Func { return n.Func_ } // A ClosureRead denotes reading a variable stored within a closure struct. -type ClosureRead struct { +type ClosureReadExpr struct { miniExpr - offset int64 + Offset_ int64 } -func NewClosureRead(typ *types.Type, offset int64) *ClosureRead { - n := &ClosureRead{offset: offset} +func NewClosureRead(typ *types.Type, offset int64) *ClosureReadExpr { + n := &ClosureReadExpr{Offset_: offset} n.typ = typ n.op = OCLOSUREREAD return n } -func (n *ClosureRead) Type() *types.Type { return n.typ } -func (n *ClosureRead) Offset() int64 { return n.offset } +func (n *ClosureReadExpr) Type() *types.Type { return n.typ } +func (n *ClosureReadExpr) Offset() int64 { return n.Offset_ } // A CompLitExpr is a composite literal Type{Vals}. // Before type-checking, the type is Ntype. @@ -292,10 +292,10 @@ type CompLitExpr struct { List_ Nodes // initialized values } -func NewCompLitExpr(pos src.XPos, typ Ntype, list []Node) *CompLitExpr { +func NewCompLitExpr(pos src.XPos, op Op, typ Ntype, list []Node) *CompLitExpr { n := &CompLitExpr{Ntype: typ} n.pos = pos - n.op = OCOMPLIT + n.SetOp(op) n.List_.Set(list) n.orig = n return n @@ -397,42 +397,48 @@ func (n *IndexExpr) SetOp(op Op) { } } -// A KeyExpr is an X:Y composite literal key. -// After type-checking, a key for a struct sets Sym to the field. +// A KeyExpr is a Key: Value composite literal key. type KeyExpr struct { miniExpr - Key Node - sym *types.Sym - Value Node - offset int64 + Key Node + Value Node } func NewKeyExpr(pos src.XPos, key, value Node) *KeyExpr { n := &KeyExpr{Key: key, Value: value} n.pos = pos n.op = OKEY - n.offset = types.BADWIDTH return n } -func (n *KeyExpr) Left() Node { return n.Key } -func (n *KeyExpr) SetLeft(x Node) { n.Key = x } -func (n *KeyExpr) Right() Node { return n.Value } -func (n *KeyExpr) SetRight(y Node) { n.Value = y } -func (n *KeyExpr) Sym() *types.Sym { return n.sym } -func (n *KeyExpr) SetSym(x *types.Sym) { n.sym = x } -func (n *KeyExpr) Offset() int64 { return n.offset } -func (n *KeyExpr) SetOffset(x int64) { n.offset = x } +func (n *KeyExpr) Left() Node { return n.Key } +func (n *KeyExpr) SetLeft(x Node) { n.Key = x } +func (n *KeyExpr) Right() Node { return n.Value } +func (n *KeyExpr) SetRight(y Node) { n.Value = y } -func (n *KeyExpr) SetOp(op Op) { - switch op { - default: - panic(n.no("SetOp " + op.String())) - case OKEY, OSTRUCTKEY: - n.op = op - } +// A StructKeyExpr is an Field: Value composite literal key. +type StructKeyExpr struct { + miniExpr + Field *types.Sym + Value Node + Offset_ int64 +} + +func NewStructKeyExpr(pos src.XPos, field *types.Sym, value Node) *StructKeyExpr { + n := &StructKeyExpr{Field: field, Value: value} + n.pos = pos + n.op = OSTRUCTKEY + n.Offset_ = types.BADWIDTH + return n } +func (n *StructKeyExpr) Sym() *types.Sym { return n.Field } +func (n *StructKeyExpr) SetSym(x *types.Sym) { n.Field = x } +func (n *StructKeyExpr) Left() Node { return n.Value } +func (n *StructKeyExpr) SetLeft(x Node) { n.Value = x } +func (n *StructKeyExpr) Offset() int64 { return n.Offset_ } +func (n *StructKeyExpr) SetOffset(x int64) { n.Offset_ = x } + // An InlinedCallExpr is an inlined function call. type InlinedCallExpr struct { miniExpr @@ -456,6 +462,36 @@ func (n *InlinedCallExpr) Rlist() Nodes { return n.ReturnVars } func (n *InlinedCallExpr) PtrRlist() *Nodes { return &n.ReturnVars } func (n *InlinedCallExpr) SetRlist(x Nodes) { n.ReturnVars = x } +// A LogicalExpr is a expression X Op Y where Op is && or ||. +// It is separate from BinaryExpr to make room for statements +// that must be executed before Y but after X. +type LogicalExpr struct { + miniExpr + X Node + Y Node +} + +func NewLogicalExpr(pos src.XPos, op Op, x, y Node) *LogicalExpr { + n := &LogicalExpr{X: x, Y: y} + n.pos = pos + n.SetOp(op) + return n +} + +func (n *LogicalExpr) Left() Node { return n.X } +func (n *LogicalExpr) SetLeft(x Node) { n.X = x } +func (n *LogicalExpr) Right() Node { return n.Y } +func (n *LogicalExpr) SetRight(y Node) { n.Y = y } + +func (n *LogicalExpr) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case OANDAND, OOROR: + n.op = op + } +} + // A MakeExpr is a make expression: make(Type[, Len[, Cap]]). // Op is OMAKECHAN, OMAKEMAP, OMAKESLICE, or OMAKESLICECOPY, // but *not* OMAKE (that's a pre-typechecking CallExpr). @@ -489,19 +525,19 @@ func (n *MakeExpr) SetOp(op Op) { // A MethodExpr is a method value X.M (where X is an expression, not a type). type MethodExpr struct { miniExpr - X Node - M Node - sym *types.Sym - offset int64 - class Class - Method *types.Field + X Node + M Node + Sym_ *types.Sym + Offset_ int64 + Class_ Class + Method *types.Field } -func NewMethodExpr(pos src.XPos, op Op, x, m Node) *MethodExpr { +func NewMethodExpr(pos src.XPos, x, m Node) *MethodExpr { n := &MethodExpr{X: x, M: m} n.pos = pos n.op = OMETHEXPR - n.offset = types.BADWIDTH + n.Offset_ = types.BADWIDTH return n } @@ -509,18 +545,18 @@ func (n *MethodExpr) Left() Node { return n.X } func (n *MethodExpr) SetLeft(x Node) { n.X = x } func (n *MethodExpr) Right() Node { return n.M } func (n *MethodExpr) SetRight(y Node) { n.M = y } -func (n *MethodExpr) Sym() *types.Sym { return n.sym } -func (n *MethodExpr) SetSym(x *types.Sym) { n.sym = x } -func (n *MethodExpr) Offset() int64 { return n.offset } -func (n *MethodExpr) SetOffset(x int64) { n.offset = x } -func (n *MethodExpr) Class() Class { return n.class } -func (n *MethodExpr) SetClass(x Class) { n.class = x } +func (n *MethodExpr) Sym() *types.Sym { return n.Sym_ } +func (n *MethodExpr) SetSym(x *types.Sym) { n.Sym_ = x } +func (n *MethodExpr) Offset() int64 { return n.Offset_ } +func (n *MethodExpr) SetOffset(x int64) { n.Offset_ = x } +func (n *MethodExpr) Class() Class { return n.Class_ } +func (n *MethodExpr) SetClass(x Class) { n.Class_ = x } // A NilExpr represents the predefined untyped constant nil. // (It may be copied and assigned a type, though.) type NilExpr struct { miniExpr - sym *types.Sym // TODO: Remove + Sym_ *types.Sym // TODO: Remove } func NewNilExpr(pos src.XPos) *NilExpr { @@ -530,8 +566,8 @@ func NewNilExpr(pos src.XPos) *NilExpr { return n } -func (n *NilExpr) Sym() *types.Sym { return n.sym } -func (n *NilExpr) SetSym(x *types.Sym) { n.sym = x } +func (n *NilExpr) Sym() *types.Sym { return n.Sym_ } +func (n *NilExpr) SetSym(x *types.Sym) { n.Sym_ = x } // A ParenExpr is a parenthesized expression (X). // It may end up being a value or a type. @@ -563,34 +599,34 @@ func (n *ParenExpr) SetOTYPE(t *types.Type) { // A ResultExpr represents a direct access to a result slot on the stack frame. type ResultExpr struct { miniExpr - offset int64 + Offset_ int64 } func NewResultExpr(pos src.XPos, typ *types.Type, offset int64) *ResultExpr { - n := &ResultExpr{offset: offset} + n := &ResultExpr{Offset_: offset} n.pos = pos n.op = ORESULT n.typ = typ return n } -func (n *ResultExpr) Offset() int64 { return n.offset } -func (n *ResultExpr) SetOffset(x int64) { n.offset = x } +func (n *ResultExpr) Offset() int64 { return n.Offset_ } +func (n *ResultExpr) SetOffset(x int64) { n.Offset_ = x } // A SelectorExpr is a selector expression X.Sym. type SelectorExpr struct { miniExpr X Node Sel *types.Sym - offset int64 + Offset_ int64 Selection *types.Field } -func NewSelectorExpr(pos src.XPos, x Node, sel *types.Sym) *SelectorExpr { +func NewSelectorExpr(pos src.XPos, op Op, x Node, sel *types.Sym) *SelectorExpr { n := &SelectorExpr{X: x, Sel: sel} n.pos = pos - n.op = OXDOT - n.offset = types.BADWIDTH + n.Offset_ = types.BADWIDTH + n.SetOp(op) return n } @@ -607,8 +643,8 @@ func (n *SelectorExpr) Left() Node { return n.X } func (n *SelectorExpr) SetLeft(x Node) { n.X = x } func (n *SelectorExpr) Sym() *types.Sym { return n.Sel } func (n *SelectorExpr) SetSym(x *types.Sym) { n.Sel = x } -func (n *SelectorExpr) Offset() int64 { return n.offset } -func (n *SelectorExpr) SetOffset(x int64) { n.offset = x } +func (n *SelectorExpr) Offset() int64 { return n.Offset_ } +func (n *SelectorExpr) SetOffset(x int64) { n.Offset_ = x } // Before type-checking, bytes.Buffer is a SelectorExpr. // After type-checking it becomes a Name. diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 319c40e4e9373..4cf12f2c5d7f9 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -132,6 +132,14 @@ func NewNameAt(pos src.XPos, sym *types.Sym) *Name { return newNameAt(pos, ONAME, sym) } +// NewIota returns a new OIOTA Node. +func NewIota(pos src.XPos, sym *types.Sym) *Name { + if sym == nil { + base.Fatalf("NewIota nil") + } + return newNameAt(pos, OIOTA, sym) +} + // NewDeclNameAt returns a new ONONAME Node associated with symbol s at position pos. // The caller is responsible for setting Curfn. func NewDeclNameAt(pos src.XPos, sym *types.Sym) *Name { diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index d6dab0b9e21bd..0191014133c2a 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -681,30 +681,31 @@ func NodAt(pos src.XPos, op Op, nleft, nright Node) Node { switch op { default: panic("NodAt " + op.String()) - case OADD, OAND, OANDAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE, - OLSH, OLT, OMOD, OMUL, ONE, OOR, OOROR, ORSH, OSUB, OXOR, + case OADD, OAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE, + OLSH, OLT, OMOD, OMUL, ONE, OOR, ORSH, OSUB, OXOR, OCOPY, OCOMPLEX, OEFACE: return NewBinaryExpr(pos, op, nleft, nright) - case OADDR, OPTRLIT: + case OADDR: return NewAddrExpr(pos, nleft) case OADDSTR: return NewAddStringExpr(pos, nil) + case OANDAND, OOROR: + return NewLogicalExpr(pos, op, nleft, nright) case OARRAYLIT, OCOMPLIT, OMAPLIT, OSTRUCTLIT, OSLICELIT: var typ Ntype if nright != nil { typ = nright.(Ntype) } - n := NewCompLitExpr(pos, typ, nil) - n.SetOp(op) - return n + return NewCompLitExpr(pos, op, typ, nil) case OAS, OSELRECV: n := NewAssignStmt(pos, nleft, nright) - n.SetOp(op) + if op != OAS { + n.SetOp(op) + } return n case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV, OSELRECV2: - n := NewAssignListStmt(pos, nil, nil) - n.SetOp(op) + n := NewAssignListStmt(pos, op, nil, nil) return n case OASOP: return NewAssignOpStmt(pos, OXXX, nleft, nright) @@ -722,9 +723,7 @@ func NodAt(pos src.XPos, op Op, nleft, nright Node) Node { return NewBranchStmt(pos, op, nil) case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, OAPPEND, ODELETE, OGETG, OMAKE, OPRINT, OPRINTN, ORECOVER: - n := NewCallExpr(pos, nleft, nil) - n.SetOp(op) - return n + return NewCallExpr(pos, op, nleft, nil) case OCASE: return NewCaseStmt(pos, nil, nil) case OCONV, OCONVIFACE, OCONVNOP, ORUNESTR: @@ -733,38 +732,38 @@ func NodAt(pos src.XPos, op Op, nleft, nright Node) Node { return NewDecl(pos, op, nleft) case ODCLFUNC: return NewFunc(pos) - case ODEFER: - return NewDeferStmt(pos, nleft) + case ODEFER, OGO: + return NewGoDeferStmt(pos, op, nleft) case ODEREF: return NewStarExpr(pos, nleft) case ODOT, ODOTPTR, ODOTMETH, ODOTINTER, OXDOT: - n := NewSelectorExpr(pos, nleft, nil) - n.SetOp(op) - return n + return NewSelectorExpr(pos, op, nleft, nil) case ODOTTYPE, ODOTTYPE2: var typ Ntype if nright != nil { typ = nright.(Ntype) } n := NewTypeAssertExpr(pos, nleft, typ) - n.SetOp(op) + if op != ODOTTYPE { + n.SetOp(op) + } return n case OFOR: return NewForStmt(pos, nil, nleft, nright, nil) - case OGO: - return NewGoStmt(pos, nleft) case OIF: return NewIfStmt(pos, nleft, nil, nil) case OINDEX, OINDEXMAP: n := NewIndexExpr(pos, nleft, nright) - n.SetOp(op) + if op != OINDEX { + n.SetOp(op) + } return n case OINLMARK: return NewInlineMarkStmt(pos, types.BADWIDTH) - case OKEY, OSTRUCTKEY: - n := NewKeyExpr(pos, nleft, nright) - n.SetOp(op) - return n + case OKEY: + return NewKeyExpr(pos, nleft, nright) + case OSTRUCTKEY: + return NewStructKeyExpr(pos, nil, nleft) case OLABEL: return NewLabelStmt(pos, nil) case OLITERAL, OTYPE, OIOTA: @@ -772,7 +771,7 @@ func NodAt(pos src.XPos, op Op, nleft, nright Node) Node { case OMAKECHAN, OMAKEMAP, OMAKESLICE, OMAKESLICECOPY: return NewMakeExpr(pos, op, nleft, nright) case OMETHEXPR: - return NewMethodExpr(pos, op, nleft, nright) + return NewMethodExpr(pos, nleft, nright) case ONIL: return NewNilExpr(pos) case OPACK: diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index b3fd89c3670b7..4eedcfdd29bc8 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -280,19 +280,19 @@ func (n *ClosureExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) } -func (n *ClosureRead) String() string { return fmt.Sprint(n) } -func (n *ClosureRead) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *ClosureRead) copy() Node { +func (n *ClosureReadExpr) String() string { return fmt.Sprint(n) } +func (n *ClosureReadExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ClosureReadExpr) copy() Node { c := *n c.init = c.init.Copy() return &c } -func (n *ClosureRead) doChildren(do func(Node) error) error { +func (n *ClosureReadExpr) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) return err } -func (n *ClosureRead) editChildren(edit func(Node) Node) { +func (n *ClosureReadExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) } @@ -366,24 +366,6 @@ func (n *Decl) editChildren(edit func(Node) Node) { n.X = maybeEdit(n.X, edit) } -func (n *DeferStmt) String() string { return fmt.Sprint(n) } -func (n *DeferStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *DeferStmt) copy() Node { - c := *n - c.init = c.init.Copy() - return &c -} -func (n *DeferStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.Call, err, do) - return err -} -func (n *DeferStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.Call = maybeEdit(n.Call, edit) -} - func (n *ForStmt) String() string { return fmt.Sprint(n) } func (n *ForStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *ForStmt) copy() Node { @@ -450,20 +432,20 @@ func (n *FuncType) editChildren(edit func(Node) Node) { editFields(n.Results, edit) } -func (n *GoStmt) String() string { return fmt.Sprint(n) } -func (n *GoStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *GoStmt) copy() Node { +func (n *GoDeferStmt) String() string { return fmt.Sprint(n) } +func (n *GoDeferStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *GoDeferStmt) copy() Node { c := *n c.init = c.init.Copy() return &c } -func (n *GoStmt) doChildren(do func(Node) error) error { +func (n *GoDeferStmt) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) err = maybeDo(n.Call, err, do) return err } -func (n *GoStmt) editChildren(edit func(Node) Node) { +func (n *GoDeferStmt) editChildren(edit func(Node) Node) { editList(n.init, edit) n.Call = maybeEdit(n.Call, edit) } @@ -602,6 +584,26 @@ func (n *LabelStmt) editChildren(edit func(Node) Node) { editList(n.init, edit) } +func (n *LogicalExpr) String() string { return fmt.Sprint(n) } +func (n *LogicalExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *LogicalExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} +func (n *LogicalExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.X, err, do) + err = maybeDo(n.Y, err, do) + return err +} +func (n *LogicalExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.X = maybeEdit(n.X, edit) + n.Y = maybeEdit(n.Y, edit) +} + func (n *MakeExpr) String() string { return fmt.Sprint(n) } func (n *MakeExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *MakeExpr) copy() Node { @@ -913,6 +915,24 @@ func (n *StarExpr) editChildren(edit func(Node) Node) { n.X = maybeEdit(n.X, edit) } +func (n *StructKeyExpr) String() string { return fmt.Sprint(n) } +func (n *StructKeyExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *StructKeyExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} +func (n *StructKeyExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDo(n.Value, err, do) + return err +} +func (n *StructKeyExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) + n.Value = maybeEdit(n.Value, edit) +} + func (n *StructType) String() string { return fmt.Sprint(n) } func (n *StructType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *StructType) copy() Node { diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index 68f9b0bd7c5db..28c40c0781357 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -63,19 +63,19 @@ func (n *miniStmt) SetHasCall(b bool) { n.bits.set(miniHasCall, b) } // If Def is true, the assignment is a :=. type AssignListStmt struct { miniStmt - Lhs Nodes - Def bool - Rhs Nodes - offset int64 // for initorder + Lhs Nodes + Def bool + Rhs Nodes + Offset_ int64 // for initorder } -func NewAssignListStmt(pos src.XPos, lhs, rhs []Node) *AssignListStmt { +func NewAssignListStmt(pos src.XPos, op Op, lhs, rhs []Node) *AssignListStmt { n := &AssignListStmt{} n.pos = pos - n.op = OAS2 + n.SetOp(op) n.Lhs.Set(lhs) n.Rhs.Set(rhs) - n.offset = types.BADWIDTH + n.Offset_ = types.BADWIDTH return n } @@ -87,8 +87,8 @@ func (n *AssignListStmt) PtrRlist() *Nodes { return &n.Rhs } func (n *AssignListStmt) SetRlist(x Nodes) { n.Rhs = x } func (n *AssignListStmt) Colas() bool { return n.Def } func (n *AssignListStmt) SetColas(x bool) { n.Def = x } -func (n *AssignListStmt) Offset() int64 { return n.offset } -func (n *AssignListStmt) SetOffset(x int64) { n.offset = x } +func (n *AssignListStmt) Offset() int64 { return n.Offset_ } +func (n *AssignListStmt) SetOffset(x int64) { n.Offset_ = x } func (n *AssignListStmt) SetOp(op Op) { switch op { @@ -103,17 +103,17 @@ func (n *AssignListStmt) SetOp(op Op) { // If Def is true, the assignment is a :=. type AssignStmt struct { miniStmt - X Node - Def bool - Y Node - offset int64 // for initorder + X Node + Def bool + Y Node + Offset_ int64 // for initorder } func NewAssignStmt(pos src.XPos, x, y Node) *AssignStmt { n := &AssignStmt{X: x, Y: y} n.pos = pos n.op = OAS - n.offset = types.BADWIDTH + n.Offset_ = types.BADWIDTH return n } @@ -123,8 +123,8 @@ func (n *AssignStmt) Right() Node { return n.Y } func (n *AssignStmt) SetRight(y Node) { n.Y = y } func (n *AssignStmt) Colas() bool { return n.Def } func (n *AssignStmt) SetColas(x bool) { n.Def = x } -func (n *AssignStmt) Offset() int64 { return n.offset } -func (n *AssignStmt) SetOffset(x int64) { n.offset = x } +func (n *AssignStmt) Offset() int64 { return n.Offset_ } +func (n *AssignStmt) SetOffset(x int64) { n.Offset_ = x } func (n *AssignStmt) SetOp(op Op) { switch op { @@ -236,32 +236,16 @@ func (n *CaseStmt) SetRlist(x Nodes) { n.Vars = x } func (n *CaseStmt) Left() Node { return n.Comm } func (n *CaseStmt) SetLeft(x Node) { n.Comm = x } -// A DeferStmt is a defer statement: defer Call. -type DeferStmt struct { - miniStmt - Call Node -} - -func NewDeferStmt(pos src.XPos, call Node) *DeferStmt { - n := &DeferStmt{Call: call} - n.pos = pos - n.op = ODEFER - return n -} - -func (n *DeferStmt) Left() Node { return n.Call } -func (n *DeferStmt) SetLeft(x Node) { n.Call = x } - // A ForStmt is a non-range for loop: for Init; Cond; Post { Body } // Op can be OFOR or OFORUNTIL (!Cond). type ForStmt struct { miniStmt - Label *types.Sym - Cond Node - Late Nodes - Post Node - Body_ Nodes - hasBreak bool + Label *types.Sym + Cond Node + Late Nodes + Post Node + Body_ Nodes + HasBreak_ bool } func NewForStmt(pos src.XPos, init []Node, cond, post Node, body []Node) *ForStmt { @@ -285,8 +269,8 @@ func (n *ForStmt) SetBody(x Nodes) { n.Body_ = x } func (n *ForStmt) List() Nodes { return n.Late } func (n *ForStmt) PtrList() *Nodes { return &n.Late } func (n *ForStmt) SetList(x Nodes) { n.Late = x } -func (n *ForStmt) HasBreak() bool { return n.hasBreak } -func (n *ForStmt) SetHasBreak(b bool) { n.hasBreak = b } +func (n *ForStmt) HasBreak() bool { return n.HasBreak_ } +func (n *ForStmt) SetHasBreak(b bool) { n.HasBreak_ = b } func (n *ForStmt) SetOp(op Op) { if op != OFOR && op != OFORUNTIL { @@ -295,29 +279,38 @@ func (n *ForStmt) SetOp(op Op) { n.op = op } -// A GoStmt is a go statement: go Call. -type GoStmt struct { +// A GoDeferStmt is a go or defer statement: go Call / defer Call. +// +// The two opcodes use a signle syntax because the implementations +// are very similar: both are concerned with saving Call and running it +// in a different context (a separate goroutine or a later time). +type GoDeferStmt struct { miniStmt Call Node } -func NewGoStmt(pos src.XPos, call Node) *GoStmt { - n := &GoStmt{Call: call} +func NewGoDeferStmt(pos src.XPos, op Op, call Node) *GoDeferStmt { + n := &GoDeferStmt{Call: call} n.pos = pos - n.op = OGO + switch op { + case ODEFER, OGO: + n.op = op + default: + panic("NewGoDeferStmt " + op.String()) + } return n } -func (n *GoStmt) Left() Node { return n.Call } -func (n *GoStmt) SetLeft(x Node) { n.Call = x } +func (n *GoDeferStmt) Left() Node { return n.Call } +func (n *GoDeferStmt) SetLeft(x Node) { n.Call = x } // A IfStmt is a return statement: if Init; Cond { Then } else { Else }. type IfStmt struct { miniStmt - Cond Node - Body_ Nodes - Else Nodes - likely bool // code layout hint + Cond Node + Body_ Nodes + Else Nodes + Likely_ bool // code layout hint } func NewIfStmt(pos src.XPos, cond Node, body, els []Node) *IfStmt { @@ -337,8 +330,8 @@ func (n *IfStmt) SetBody(x Nodes) { n.Body_ = x } func (n *IfStmt) Rlist() Nodes { return n.Else } func (n *IfStmt) PtrRlist() *Nodes { return &n.Else } func (n *IfStmt) SetRlist(x Nodes) { n.Else = x } -func (n *IfStmt) Likely() bool { return n.likely } -func (n *IfStmt) SetLikely(x bool) { n.likely = x } +func (n *IfStmt) Likely() bool { return n.Likely_ } +func (n *IfStmt) SetLikely(x bool) { n.Likely_ = x } // An InlineMarkStmt is a marker placed just before an inlined body. type InlineMarkStmt struct { @@ -376,13 +369,13 @@ func (n *LabelStmt) SetSym(x *types.Sym) { n.Label = x } // Op can be OFOR or OFORUNTIL (!Cond). type RangeStmt struct { miniStmt - Label *types.Sym - Vars Nodes // TODO(rsc): Replace with Key, Value Node - Def bool - X Node - Body_ Nodes - hasBreak bool - typ *types.Type // TODO(rsc): Remove - use X.Type() instead + Label *types.Sym + Vars Nodes // TODO(rsc): Replace with Key, Value Node + Def bool + X Node + Body_ Nodes + HasBreak_ bool + typ *types.Type // TODO(rsc): Remove - use X.Type() instead } func NewRangeStmt(pos src.XPos, vars []Node, x Node, body []Node) *RangeStmt { @@ -404,8 +397,8 @@ func (n *RangeStmt) SetBody(x Nodes) { n.Body_ = x } func (n *RangeStmt) List() Nodes { return n.Vars } func (n *RangeStmt) PtrList() *Nodes { return &n.Vars } func (n *RangeStmt) SetList(x Nodes) { n.Vars = x } -func (n *RangeStmt) HasBreak() bool { return n.hasBreak } -func (n *RangeStmt) SetHasBreak(b bool) { n.hasBreak = b } +func (n *RangeStmt) HasBreak() bool { return n.HasBreak_ } +func (n *RangeStmt) SetHasBreak(b bool) { n.HasBreak_ = b } func (n *RangeStmt) Colas() bool { return n.Def } func (n *RangeStmt) SetColas(b bool) { n.Def = b } func (n *RangeStmt) Type() *types.Type { return n.typ } @@ -437,9 +430,9 @@ func (n *ReturnStmt) IsDDD() bool { return false } // typecheckargs asks // A SelectStmt is a block: { Cases }. type SelectStmt struct { miniStmt - Label *types.Sym - Cases Nodes - hasBreak bool + Label *types.Sym + Cases Nodes + HasBreak_ bool // TODO(rsc): Instead of recording here, replace with a block? Compiled Nodes // compiled form, after walkswitch @@ -458,8 +451,8 @@ func (n *SelectStmt) PtrList() *Nodes { return &n.Cases } func (n *SelectStmt) SetList(x Nodes) { n.Cases = x } func (n *SelectStmt) Sym() *types.Sym { return n.Label } func (n *SelectStmt) SetSym(x *types.Sym) { n.Label = x } -func (n *SelectStmt) HasBreak() bool { return n.hasBreak } -func (n *SelectStmt) SetHasBreak(x bool) { n.hasBreak = x } +func (n *SelectStmt) HasBreak() bool { return n.HasBreak_ } +func (n *SelectStmt) SetHasBreak(x bool) { n.HasBreak_ = x } func (n *SelectStmt) Body() Nodes { return n.Compiled } func (n *SelectStmt) PtrBody() *Nodes { return &n.Compiled } func (n *SelectStmt) SetBody(x Nodes) { n.Compiled = x } @@ -486,10 +479,10 @@ func (n *SendStmt) SetRight(y Node) { n.Value = y } // A SwitchStmt is a switch statement: switch Init; Expr { Cases }. type SwitchStmt struct { miniStmt - Tag Node - Cases Nodes // list of *CaseStmt - Label *types.Sym - hasBreak bool + Tag Node + Cases Nodes // list of *CaseStmt + Label *types.Sym + HasBreak_ bool // TODO(rsc): Instead of recording here, replace with a block? Compiled Nodes // compiled form, after walkswitch @@ -513,8 +506,8 @@ func (n *SwitchStmt) PtrBody() *Nodes { return &n.Compiled } func (n *SwitchStmt) SetBody(x Nodes) { n.Compiled = x } func (n *SwitchStmt) Sym() *types.Sym { return n.Label } func (n *SwitchStmt) SetSym(x *types.Sym) { n.Label = x } -func (n *SwitchStmt) HasBreak() bool { return n.hasBreak } -func (n *SwitchStmt) SetHasBreak(x bool) { n.hasBreak = x } +func (n *SwitchStmt) HasBreak() bool { return n.HasBreak_ } +func (n *SwitchStmt) SetHasBreak(x bool) { n.HasBreak_ = x } // A TypeSwitchGuard is the [Name :=] X.(type) in a type switch. type TypeSwitchGuard struct { diff --git a/src/cmd/compile/internal/ir/val.go b/src/cmd/compile/internal/ir/val.go index ad0df5508d926..5b0506c0d04fc 100644 --- a/src/cmd/compile/internal/ir/val.go +++ b/src/cmd/compile/internal/ir/val.go @@ -92,7 +92,7 @@ func ValidTypeForConst(t *types.Type, v constant.Value) bool { // nodlit returns a new untyped constant with value v. func NewLiteral(v constant.Value) Node { - n := Nod(OLITERAL, nil, nil) + n := newNameAt(base.Pos, OLITERAL, nil) if k := v.Kind(); k != constant.Unknown { n.SetType(idealType(k)) n.SetVal(v) From eae8fd519b2cbfa253f2f9068587e0ce765efced Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 8 Dec 2020 01:28:57 -0800 Subject: [PATCH 151/474] [dev.regabi] cmd/compile: iexport debug crumbs for toolstash Prints offsets for declarations, inline bodies, and strings when -v is used. Still not much, but hopefully useful for narrowing down the cause of export data differences. Change-Id: I9b2e4a3d55b92823fa45a39923e8c4b25303693c Reviewed-on: https://go-review.googlesource.com/c/go/+/276112 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Russ Cox --- src/cmd/compile/internal/gc/iexport.go | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index b1cc9a3dd97e8..14356013de8bb 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -290,6 +290,10 @@ func iexport(out *bufio.Writer) { w.writeIndex(p.inlineIndex, false) w.flush() + if *base.Flag.LowerV { + fmt.Printf("export: hdr strings %v, data %v, index %v\n", p.strings.Len(), dataLen, p.data0.Len()) + } + // Assemble header. var hdr intWriter hdr.WriteByte('i') @@ -389,6 +393,10 @@ func (p *iexporter) stringOff(s string) uint64 { off = uint64(p.strings.Len()) p.stringIndex[s] = off + if *base.Flag.LowerV { + fmt.Printf("export: str %v %.40q\n", off, s) + } + p.strings.uint64(uint64(len(s))) p.strings.WriteString(s) } @@ -511,20 +519,28 @@ func (p *iexporter) doDecl(n *ir.Name) { base.Fatalf("unexpected node: %v", n) } - p.declIndex[n.Sym()] = w.flush() + w.finish("dcl", p.declIndex, n.Sym()) } func (w *exportWriter) tag(tag byte) { w.data.WriteByte(tag) } +func (w *exportWriter) finish(what string, index map[*types.Sym]uint64, sym *types.Sym) { + off := w.flush() + if *base.Flag.LowerV { + fmt.Printf("export: %v %v %v\n", what, off, sym) + } + index[sym] = off +} + func (p *iexporter) doInline(f *ir.Name) { w := p.newWriter() w.setPkg(fnpkg(f), false) w.stmtList(ir.AsNodes(f.Func().Inl.Body)) - p.inlineIndex[f.Sym()] = w.flush() + w.finish("inl", p.inlineIndex, f.Sym()) } func (w *exportWriter) pos(pos src.XPos) { @@ -625,7 +641,11 @@ func (p *iexporter) typOff(t *types.Type) uint64 { if !ok { w := p.newWriter() w.doTyp(t) - off = predeclReserved + w.flush() + rawOff := w.flush() + if *base.Flag.LowerV { + fmt.Printf("export: typ %v %v\n", rawOff, t) + } + off = predeclReserved + rawOff p.typIndex[t] = off } return off From 63bc23b5452f6605df3e40ce7ecdd8b0348792af Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 7 Dec 2020 21:56:58 -0800 Subject: [PATCH 152/474] [dev.regabi] cmd/compile: first start towards using Ident This CL adds Ident, which will eventually replace *Name and *PkgName within the AST for representing uses of declared names. (Originally, I intended to call it "IdentExpr", but neither go/ast nor cmd/compile/internal/syntax include the "Expr" suffix for their respective types.) To start, this CL converts two uses of *Name to *Ident: the tag identifier in a TypeSwitchGuard (which doesn't actually declare a variable by itself), and the not-yet-known placeholder ONONAME returned by oldname to stand-in for identifiers that might be declared later in the package. The TypeSwitchGuard's Name's Used flag was previously used for detecting whether none of the per-clause variables were used. To avoid bloating all Idents for this rare use, a "Used" bool is added to TypeSwitchGuard instead. Eventually it could maybe be packed into miniNode.bits, but for now this is good enough. Passes buildall w/ toolstash -cmp. Change-Id: I393284d86757cbbebd26e1320c7354e2bdcb30b0 Reviewed-on: https://go-review.googlesource.com/c/go/+/276113 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Russ Cox --- src/cmd/compile/internal/gc/dcl.go | 2 +- src/cmd/compile/internal/gc/iimport.go | 10 +++++----- src/cmd/compile/internal/gc/noder.go | 10 +++++----- src/cmd/compile/internal/gc/typecheck.go | 8 ++++++-- src/cmd/compile/internal/gc/walk.go | 10 +++++----- src/cmd/compile/internal/ir/mknode.go | 8 ++++---- src/cmd/compile/internal/ir/name.go | 19 +++++++++++++++++++ src/cmd/compile/internal/ir/node.go | 2 -- src/cmd/compile/internal/ir/node_gen.go | 24 ++++++++++++++++++++---- src/cmd/compile/internal/ir/stmt.go | 20 +++++++++----------- 10 files changed, 74 insertions(+), 39 deletions(-) diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 1c23c5a92f635..1ebadd9213725 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -215,7 +215,7 @@ func oldname(s *types.Sym) ir.Node { // Maybe a top-level declaration will come along later to // define s. resolve will check s.Def again once all input // source has been processed. - return ir.NewDeclNameAt(base.Pos, s) + return ir.NewIdent(base.Pos, s) } if Curfn != nil && n.Op() == ir.ONAME && n.Name().Curfn != nil && n.Name().Curfn != Curfn { diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 1f75393b3e1e5..3c9693e5fce05 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -833,13 +833,13 @@ func (r *importReader) node() ir.Node { return ir.TypeNode(r.typ()) case ir.OTYPESW: - n := ir.NodAt(r.pos(), ir.OTYPESW, nil, nil) + pos := r.pos() + var tag *ir.Ident if s := r.ident(); s != nil { - n.SetLeft(ir.NewDeclNameAt(n.Pos(), s)) + tag = ir.NewIdent(pos, s) } - right, _ := r.exprsOrNil() - n.SetRight(right) - return n + expr, _ := r.exprsOrNil() + return ir.NewTypeSwitchGuard(pos, tag, expr) // case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC: // unreachable - should have been resolved by typechecking diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index f39bf2ff3c476..8c765f9dfc25f 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -751,14 +751,14 @@ func (p *noder) expr(expr syntax.Expr) ir.Node { p.typeExpr(expr.Elem), p.chanDir(expr.Dir)) case *syntax.TypeSwitchGuard: - n := p.nod(expr, ir.OTYPESW, nil, p.expr(expr.X)) + var tag *ir.Ident if expr.Lhs != nil { - n.SetLeft(p.declName(expr.Lhs)) - if ir.IsBlank(n.Left()) { - base.Errorf("invalid variable name %v in type switch", n.Left()) + tag = ir.NewIdent(p.pos(expr.Lhs), p.name(expr.Lhs)) + if ir.IsBlank(tag) { + base.Errorf("invalid variable name %v in type switch", tag) } } - return n + return ir.NewTypeSwitchGuard(p.pos(expr), tag, p.expr(expr.X)) } panic("unhandled Expr") } diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 36526d4c2d6e7..d88989f83c835 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -90,12 +90,16 @@ func resolve(n ir.Node) (res ir.Node) { defer tracePrint("resolve", n)(&res) } - if n.Sym().Pkg != types.LocalPkg { + // Stub ir.Name left for us by iimport. + if n, ok := n.(*ir.Name); ok { + if n.Sym().Pkg == types.LocalPkg { + base.Fatalf("unexpected Name: %+v", n) + } if inimport { base.Fatalf("recursive inimport") } inimport = true - expandDecl(n.(*ir.Name)) + expandDecl(n) inimport = false return n } diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index f35e9d768bd49..390719e441a84 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -44,7 +44,7 @@ func walk(fn *ir.Func) { // Propagate the used flag for typeswitch variables up to the NONAME in its definition. for _, ln := range fn.Dcl { if ln.Op() == ir.ONAME && (ln.Class() == ir.PAUTO || ln.Class() == ir.PAUTOHEAP) && ln.Defn != nil && ln.Defn.Op() == ir.OTYPESW && ln.Used() { - ln.Defn.Left().Name().SetUsed(true) + ln.Defn.(*ir.TypeSwitchGuard).Used = true } } @@ -52,12 +52,12 @@ func walk(fn *ir.Func) { if ln.Op() != ir.ONAME || (ln.Class() != ir.PAUTO && ln.Class() != ir.PAUTOHEAP) || ln.Sym().Name[0] == '&' || ln.Used() { continue } - if defn := ln.Defn; defn != nil && defn.Op() == ir.OTYPESW { - if defn.Left().Name().Used() { + if defn, ok := ln.Defn.(*ir.TypeSwitchGuard); ok { + if defn.Used { continue } - base.ErrorfAt(defn.Left().Pos(), "%v declared but not used", ln.Sym()) - defn.Left().Name().SetUsed(true) // suppress repeats + base.ErrorfAt(defn.Tag.Pos(), "%v declared but not used", ln.Sym()) + defn.Used = true // suppress repeats } else { base.ErrorfAt(ln.Pos(), "%v declared but not used", ln.Sym()) } diff --git a/src/cmd/compile/internal/ir/mknode.go b/src/cmd/compile/internal/ir/mknode.go index 72034022cbdf6..18d768ceb1bac 100644 --- a/src/cmd/compile/internal/ir/mknode.go +++ b/src/cmd/compile/internal/ir/mknode.go @@ -39,7 +39,7 @@ func main() { nodesType := lookup("Nodes") ptrFieldType := types.NewPointer(lookup("Field")) slicePtrFieldType := types.NewSlice(ptrFieldType) - ptrNameType := types.NewPointer(lookup("Name")) + ptrIdentType := types.NewPointer(lookup("Ident")) var buf bytes.Buffer fmt.Fprintln(&buf, "// Code generated by mknode.go. DO NOT EDIT.") @@ -84,7 +84,7 @@ func main() { fmt.Fprintf(&buf, "func (n *%s) doChildren(do func(Node) error) error { var err error\n", name) forNodeFields(typName, typ, func(name string, is func(types.Type) bool) { switch { - case is(ptrNameType): + case is(ptrIdentType): fmt.Fprintf(&buf, "if n.%s != nil { err = maybeDo(n.%s, err, do) }\n", name, name) case is(nodeType), is(ntypeType): fmt.Fprintf(&buf, "err = maybeDo(n.%s, err, do)\n", name) @@ -101,8 +101,8 @@ func main() { fmt.Fprintf(&buf, "func (n *%s) editChildren(edit func(Node) Node) {\n", name) forNodeFields(typName, typ, func(name string, is func(types.Type) bool) { switch { - case is(ptrNameType): - fmt.Fprintf(&buf, "if n.%s != nil { n.%s = edit(n.%s).(*Name) }\n", name, name, name) + case is(ptrIdentType): + fmt.Fprintf(&buf, "if n.%s != nil { n.%s = edit(n.%s).(*Ident) }\n", name, name, name) case is(nodeType): fmt.Fprintf(&buf, "n.%s = maybeEdit(n.%s, edit)\n", name, name) case is(ntypeType): diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 4cf12f2c5d7f9..2330838f1c548 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -13,6 +13,25 @@ import ( "go/constant" ) +// An Ident is an identifier, possibly qualified. +type Ident struct { + miniExpr + sym *types.Sym + Used bool +} + +func NewIdent(pos src.XPos, sym *types.Sym) *Ident { + n := new(Ident) + n.op = ONONAME + n.pos = pos + n.sym = sym + return n +} + +func (n *Ident) Sym() *types.Sym { return n.sym } + +func (*Ident) CanBeNtype() {} + // Name holds Node fields used only by named nodes (ONAME, OTYPE, some OLITERAL). type Name struct { miniExpr diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 0191014133c2a..598659a3db345 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -794,8 +794,6 @@ func NodAt(pos src.XPos, op Op, nleft, nright Node) Node { return NewSliceHeaderExpr(pos, nil, nleft, nil, nil) case OSWITCH: return NewSwitchStmt(pos, nleft, nil) - case OTYPESW: - return NewTypeSwitchGuard(pos, nleft, nright) case OINLCALL: return NewInlinedCallExpr(pos, nil, nil) } diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index 4eedcfdd29bc8..264171e797cf2 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -450,6 +450,22 @@ func (n *GoDeferStmt) editChildren(edit func(Node) Node) { n.Call = maybeEdit(n.Call, edit) } +func (n *Ident) String() string { return fmt.Sprint(n) } +func (n *Ident) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *Ident) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} +func (n *Ident) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + return err +} +func (n *Ident) editChildren(edit func(Node) Node) { + editList(n.init, edit) +} + func (n *IfStmt) String() string { return fmt.Sprint(n) } func (n *IfStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *IfStmt) copy() Node { @@ -1004,15 +1020,15 @@ func (n *TypeSwitchGuard) copy() Node { } func (n *TypeSwitchGuard) doChildren(do func(Node) error) error { var err error - if n.Name_ != nil { - err = maybeDo(n.Name_, err, do) + if n.Tag != nil { + err = maybeDo(n.Tag, err, do) } err = maybeDo(n.X, err, do) return err } func (n *TypeSwitchGuard) editChildren(edit func(Node) Node) { - if n.Name_ != nil { - n.Name_ = edit(n.Name_).(*Name) + if n.Tag != nil { + n.Tag = edit(n.Tag).(*Ident) } n.X = maybeEdit(n.X, edit) } diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index 28c40c0781357..f41c50c92b192 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -512,32 +512,30 @@ func (n *SwitchStmt) SetHasBreak(x bool) { n.HasBreak_ = x } // A TypeSwitchGuard is the [Name :=] X.(type) in a type switch. type TypeSwitchGuard struct { miniNode - Name_ *Name - X Node + Tag *Ident + X Node + Used bool } -func NewTypeSwitchGuard(pos src.XPos, name, x Node) *TypeSwitchGuard { - n := &TypeSwitchGuard{X: x} - if name != nil { - n.Name_ = name.(*Name) - } +func NewTypeSwitchGuard(pos src.XPos, tag *Ident, x Node) *TypeSwitchGuard { + n := &TypeSwitchGuard{Tag: tag, X: x} n.pos = pos n.op = OTYPESW return n } func (n *TypeSwitchGuard) Left() Node { - if n.Name_ == nil { + if n.Tag == nil { return nil } - return n.Name_ + return n.Tag } func (n *TypeSwitchGuard) SetLeft(x Node) { if x == nil { - n.Name_ = nil + n.Tag = nil return } - n.Name_ = x.(*Name) + n.Tag = x.(*Ident) } func (n *TypeSwitchGuard) Right() Node { return n.X } func (n *TypeSwitchGuard) SetRight(x Node) { n.X = x } From 9c5241e52020cf77683cd260a5fa3f3f029ed80c Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 7 Dec 2020 20:05:17 -0800 Subject: [PATCH 153/474] [dev.regabi] cmd/compile: remove unnecessary String methods There were only a few places these were still used, none of which justify generating all this code. Instead rewrite them to use fmt.Sprint or simpler means. Passes buildall w/ toolstash -cmp. Change-Id: Ibd123a1696941a597f0cb4dcc96cda8ced672140 Reviewed-on: https://go-review.googlesource.com/c/go/+/276072 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Trust: Matthew Dempsky Reviewed-by: Russ Cox --- src/cmd/compile/internal/gc/const.go | 2 +- src/cmd/compile/internal/gc/typecheck.go | 15 ++++--- src/cmd/compile/internal/gc/universe.go | 11 ----- src/cmd/compile/internal/ir/mini.go | 1 - src/cmd/compile/internal/ir/mknode.go | 1 - src/cmd/compile/internal/ir/node.go | 1 - src/cmd/compile/internal/ir/node_gen.go | 56 ------------------------ src/cmd/compile/internal/ssa/op.go | 1 - test/fixedbugs/issue22822.go | 4 +- 9 files changed, 12 insertions(+), 80 deletions(-) diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index 80799580c6d1f..677ed17dd9ee6 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -887,7 +887,7 @@ func (s *constSet) add(pos src.XPos, n ir.Node, what, where string) { // // TODO(mdempsky): This could probably be a fmt.go flag. func nodeAndVal(n ir.Node) string { - show := n.String() + show := fmt.Sprint(n) val := ir.ConstValue(n) if s := fmt.Sprintf("%#v", val); show != s { show += " (value " + s + ")" diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index d88989f83c835..f187880e28cb1 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -956,7 +956,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { t := n.Left().Type() if t == nil { - base.UpdateErrorDot(ir.Line(n), n.Left().String(), n.String()) + base.UpdateErrorDot(ir.Line(n), fmt.Sprint(n.Left()), fmt.Sprint(n)) n.SetType(nil) return n } @@ -1431,14 +1431,15 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { default: n.SetOp(ir.OCALLFUNC) if t.Kind() != types.TFUNC { - name := l.String() - if isBuiltinFuncName(name) && l.Name().Defn != nil { - // be more specific when the function + // TODO(mdempsky): Remove "o.Sym() != nil" once we stop + // using ir.Name for numeric literals. + if o := ir.Orig(l); o.Name() != nil && o.Sym() != nil && types.BuiltinPkg.Lookup(o.Sym().Name).Def != nil { + // be more specific when the non-function // name matches a predeclared function - base.Errorf("cannot call non-function %s (type %v), declared at %s", - name, t, base.FmtPos(l.Name().Defn.Pos())) + base.Errorf("cannot call non-function %L, declared at %s", + l, base.FmtPos(o.Name().Pos())) } else { - base.Errorf("cannot call non-function %s (type %v)", name, t) + base.Errorf("cannot call non-function %L", l) } n.SetType(nil) return n diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index c592e37497ddb..66ca0d01b347e 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -65,17 +65,6 @@ var builtinFuncs = [...]struct { {"recover", ir.ORECOVER}, } -// isBuiltinFuncName reports whether name matches a builtin function -// name. -func isBuiltinFuncName(name string) bool { - for _, fn := range &builtinFuncs { - if fn.name == name { - return true - } - } - return false -} - var unsafeFuncs = [...]struct { name string op ir.Op diff --git a/src/cmd/compile/internal/ir/mini.go b/src/cmd/compile/internal/ir/mini.go index 7ecdcbf32f0af..bf221f75edc09 100644 --- a/src/cmd/compile/internal/ir/mini.go +++ b/src/cmd/compile/internal/ir/mini.go @@ -35,7 +35,6 @@ type miniNode struct { esc uint16 } -func (n *miniNode) String() string { panic(1) } func (n *miniNode) Format(s fmt.State, verb rune) { panic(1) } func (n *miniNode) copy() Node { panic(1) } func (n *miniNode) doChildren(do func(Node) error) error { panic(1) } diff --git a/src/cmd/compile/internal/ir/mknode.go b/src/cmd/compile/internal/ir/mknode.go index 18d768ceb1bac..f9b398fe28d8f 100644 --- a/src/cmd/compile/internal/ir/mknode.go +++ b/src/cmd/compile/internal/ir/mknode.go @@ -65,7 +65,6 @@ func main() { } fmt.Fprintf(&buf, "\n") - fmt.Fprintf(&buf, "func (n *%s) String() string { return fmt.Sprint(n) }\n", name) fmt.Fprintf(&buf, "func (n *%s) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }\n", name) fmt.Fprintf(&buf, "func (n *%s) copy() Node { c := *n\n", name) diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 598659a3db345..dc86b6c683ebc 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -20,7 +20,6 @@ import ( type Node interface { // Formatting Format(s fmt.State, verb rune) - String() string // Source position. Pos() src.XPos diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index 264171e797cf2..39d8f03ddc40c 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -4,7 +4,6 @@ package ir import "fmt" -func (n *AddStringExpr) String() string { return fmt.Sprint(n) } func (n *AddStringExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *AddStringExpr) copy() Node { c := *n @@ -23,7 +22,6 @@ func (n *AddStringExpr) editChildren(edit func(Node) Node) { editList(n.List_, edit) } -func (n *AddrExpr) String() string { return fmt.Sprint(n) } func (n *AddrExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *AddrExpr) copy() Node { c := *n @@ -43,7 +41,6 @@ func (n *AddrExpr) editChildren(edit func(Node) Node) { n.Alloc = maybeEdit(n.Alloc, edit) } -func (n *ArrayType) String() string { return fmt.Sprint(n) } func (n *ArrayType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *ArrayType) copy() Node { c := *n @@ -60,7 +57,6 @@ func (n *ArrayType) editChildren(edit func(Node) Node) { n.Elem = maybeEdit(n.Elem, edit) } -func (n *AssignListStmt) String() string { return fmt.Sprint(n) } func (n *AssignListStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *AssignListStmt) copy() Node { c := *n @@ -82,7 +78,6 @@ func (n *AssignListStmt) editChildren(edit func(Node) Node) { editList(n.Rhs, edit) } -func (n *AssignOpStmt) String() string { return fmt.Sprint(n) } func (n *AssignOpStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *AssignOpStmt) copy() Node { c := *n @@ -102,7 +97,6 @@ func (n *AssignOpStmt) editChildren(edit func(Node) Node) { n.Y = maybeEdit(n.Y, edit) } -func (n *AssignStmt) String() string { return fmt.Sprint(n) } func (n *AssignStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *AssignStmt) copy() Node { c := *n @@ -122,7 +116,6 @@ func (n *AssignStmt) editChildren(edit func(Node) Node) { n.Y = maybeEdit(n.Y, edit) } -func (n *BinaryExpr) String() string { return fmt.Sprint(n) } func (n *BinaryExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *BinaryExpr) copy() Node { c := *n @@ -142,7 +135,6 @@ func (n *BinaryExpr) editChildren(edit func(Node) Node) { n.Y = maybeEdit(n.Y, edit) } -func (n *BlockStmt) String() string { return fmt.Sprint(n) } func (n *BlockStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *BlockStmt) copy() Node { c := *n @@ -161,7 +153,6 @@ func (n *BlockStmt) editChildren(edit func(Node) Node) { editList(n.List_, edit) } -func (n *BranchStmt) String() string { return fmt.Sprint(n) } func (n *BranchStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *BranchStmt) copy() Node { c := *n @@ -177,7 +168,6 @@ func (n *BranchStmt) editChildren(edit func(Node) Node) { editList(n.init, edit) } -func (n *CallExpr) String() string { return fmt.Sprint(n) } func (n *CallExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *CallExpr) copy() Node { c := *n @@ -204,7 +194,6 @@ func (n *CallExpr) editChildren(edit func(Node) Node) { editList(n.Body_, edit) } -func (n *CallPartExpr) String() string { return fmt.Sprint(n) } func (n *CallPartExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *CallPartExpr) copy() Node { c := *n @@ -222,7 +211,6 @@ func (n *CallPartExpr) editChildren(edit func(Node) Node) { n.X = maybeEdit(n.X, edit) } -func (n *CaseStmt) String() string { return fmt.Sprint(n) } func (n *CaseStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *CaseStmt) copy() Node { c := *n @@ -249,7 +237,6 @@ func (n *CaseStmt) editChildren(edit func(Node) Node) { editList(n.Body_, edit) } -func (n *ChanType) String() string { return fmt.Sprint(n) } func (n *ChanType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *ChanType) copy() Node { c := *n @@ -264,7 +251,6 @@ func (n *ChanType) editChildren(edit func(Node) Node) { n.Elem = maybeEdit(n.Elem, edit) } -func (n *ClosureExpr) String() string { return fmt.Sprint(n) } func (n *ClosureExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *ClosureExpr) copy() Node { c := *n @@ -280,7 +266,6 @@ func (n *ClosureExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) } -func (n *ClosureReadExpr) String() string { return fmt.Sprint(n) } func (n *ClosureReadExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *ClosureReadExpr) copy() Node { c := *n @@ -296,7 +281,6 @@ func (n *ClosureReadExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) } -func (n *CompLitExpr) String() string { return fmt.Sprint(n) } func (n *CompLitExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *CompLitExpr) copy() Node { c := *n @@ -317,7 +301,6 @@ func (n *CompLitExpr) editChildren(edit func(Node) Node) { editList(n.List_, edit) } -func (n *ConstExpr) String() string { return fmt.Sprint(n) } func (n *ConstExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *ConstExpr) copy() Node { c := *n @@ -333,7 +316,6 @@ func (n *ConstExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) } -func (n *ConvExpr) String() string { return fmt.Sprint(n) } func (n *ConvExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *ConvExpr) copy() Node { c := *n @@ -351,7 +333,6 @@ func (n *ConvExpr) editChildren(edit func(Node) Node) { n.X = maybeEdit(n.X, edit) } -func (n *Decl) String() string { return fmt.Sprint(n) } func (n *Decl) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *Decl) copy() Node { c := *n @@ -366,7 +347,6 @@ func (n *Decl) editChildren(edit func(Node) Node) { n.X = maybeEdit(n.X, edit) } -func (n *ForStmt) String() string { return fmt.Sprint(n) } func (n *ForStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *ForStmt) copy() Node { c := *n @@ -392,7 +372,6 @@ func (n *ForStmt) editChildren(edit func(Node) Node) { editList(n.Body_, edit) } -func (n *Func) String() string { return fmt.Sprint(n) } func (n *Func) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *Func) copy() Node { c := *n @@ -408,7 +387,6 @@ func (n *Func) editChildren(edit func(Node) Node) { editList(n.Body_, edit) } -func (n *FuncType) String() string { return fmt.Sprint(n) } func (n *FuncType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *FuncType) copy() Node { c := *n @@ -432,7 +410,6 @@ func (n *FuncType) editChildren(edit func(Node) Node) { editFields(n.Results, edit) } -func (n *GoDeferStmt) String() string { return fmt.Sprint(n) } func (n *GoDeferStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *GoDeferStmt) copy() Node { c := *n @@ -450,7 +427,6 @@ func (n *GoDeferStmt) editChildren(edit func(Node) Node) { n.Call = maybeEdit(n.Call, edit) } -func (n *Ident) String() string { return fmt.Sprint(n) } func (n *Ident) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *Ident) copy() Node { c := *n @@ -466,7 +442,6 @@ func (n *Ident) editChildren(edit func(Node) Node) { editList(n.init, edit) } -func (n *IfStmt) String() string { return fmt.Sprint(n) } func (n *IfStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *IfStmt) copy() Node { c := *n @@ -490,7 +465,6 @@ func (n *IfStmt) editChildren(edit func(Node) Node) { editList(n.Else, edit) } -func (n *IndexExpr) String() string { return fmt.Sprint(n) } func (n *IndexExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *IndexExpr) copy() Node { c := *n @@ -510,7 +484,6 @@ func (n *IndexExpr) editChildren(edit func(Node) Node) { n.Index = maybeEdit(n.Index, edit) } -func (n *InlineMarkStmt) String() string { return fmt.Sprint(n) } func (n *InlineMarkStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *InlineMarkStmt) copy() Node { c := *n @@ -526,7 +499,6 @@ func (n *InlineMarkStmt) editChildren(edit func(Node) Node) { editList(n.init, edit) } -func (n *InlinedCallExpr) String() string { return fmt.Sprint(n) } func (n *InlinedCallExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *InlinedCallExpr) copy() Node { c := *n @@ -548,7 +520,6 @@ func (n *InlinedCallExpr) editChildren(edit func(Node) Node) { editList(n.ReturnVars, edit) } -func (n *InterfaceType) String() string { return fmt.Sprint(n) } func (n *InterfaceType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *InterfaceType) copy() Node { c := *n @@ -564,7 +535,6 @@ func (n *InterfaceType) editChildren(edit func(Node) Node) { editFields(n.Methods, edit) } -func (n *KeyExpr) String() string { return fmt.Sprint(n) } func (n *KeyExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *KeyExpr) copy() Node { c := *n @@ -584,7 +554,6 @@ func (n *KeyExpr) editChildren(edit func(Node) Node) { n.Value = maybeEdit(n.Value, edit) } -func (n *LabelStmt) String() string { return fmt.Sprint(n) } func (n *LabelStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *LabelStmt) copy() Node { c := *n @@ -600,7 +569,6 @@ func (n *LabelStmt) editChildren(edit func(Node) Node) { editList(n.init, edit) } -func (n *LogicalExpr) String() string { return fmt.Sprint(n) } func (n *LogicalExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *LogicalExpr) copy() Node { c := *n @@ -620,7 +588,6 @@ func (n *LogicalExpr) editChildren(edit func(Node) Node) { n.Y = maybeEdit(n.Y, edit) } -func (n *MakeExpr) String() string { return fmt.Sprint(n) } func (n *MakeExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *MakeExpr) copy() Node { c := *n @@ -640,7 +607,6 @@ func (n *MakeExpr) editChildren(edit func(Node) Node) { n.Cap = maybeEdit(n.Cap, edit) } -func (n *MapType) String() string { return fmt.Sprint(n) } func (n *MapType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *MapType) copy() Node { c := *n @@ -657,7 +623,6 @@ func (n *MapType) editChildren(edit func(Node) Node) { n.Elem = maybeEdit(n.Elem, edit) } -func (n *MethodExpr) String() string { return fmt.Sprint(n) } func (n *MethodExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *MethodExpr) copy() Node { c := *n @@ -677,7 +642,6 @@ func (n *MethodExpr) editChildren(edit func(Node) Node) { n.M = maybeEdit(n.M, edit) } -func (n *Name) String() string { return fmt.Sprint(n) } func (n *Name) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *Name) copy() Node { c := *n @@ -690,7 +654,6 @@ func (n *Name) doChildren(do func(Node) error) error { func (n *Name) editChildren(edit func(Node) Node) { } -func (n *NilExpr) String() string { return fmt.Sprint(n) } func (n *NilExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *NilExpr) copy() Node { c := *n @@ -706,7 +669,6 @@ func (n *NilExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) } -func (n *ParenExpr) String() string { return fmt.Sprint(n) } func (n *ParenExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *ParenExpr) copy() Node { c := *n @@ -724,7 +686,6 @@ func (n *ParenExpr) editChildren(edit func(Node) Node) { n.X = maybeEdit(n.X, edit) } -func (n *PkgName) String() string { return fmt.Sprint(n) } func (n *PkgName) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *PkgName) copy() Node { c := *n @@ -737,7 +698,6 @@ func (n *PkgName) doChildren(do func(Node) error) error { func (n *PkgName) editChildren(edit func(Node) Node) { } -func (n *RangeStmt) String() string { return fmt.Sprint(n) } func (n *RangeStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *RangeStmt) copy() Node { c := *n @@ -761,7 +721,6 @@ func (n *RangeStmt) editChildren(edit func(Node) Node) { editList(n.Body_, edit) } -func (n *ResultExpr) String() string { return fmt.Sprint(n) } func (n *ResultExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *ResultExpr) copy() Node { c := *n @@ -777,7 +736,6 @@ func (n *ResultExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) } -func (n *ReturnStmt) String() string { return fmt.Sprint(n) } func (n *ReturnStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *ReturnStmt) copy() Node { c := *n @@ -796,7 +754,6 @@ func (n *ReturnStmt) editChildren(edit func(Node) Node) { editList(n.Results, edit) } -func (n *SelectStmt) String() string { return fmt.Sprint(n) } func (n *SelectStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *SelectStmt) copy() Node { c := *n @@ -818,7 +775,6 @@ func (n *SelectStmt) editChildren(edit func(Node) Node) { editList(n.Compiled, edit) } -func (n *SelectorExpr) String() string { return fmt.Sprint(n) } func (n *SelectorExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *SelectorExpr) copy() Node { c := *n @@ -836,7 +792,6 @@ func (n *SelectorExpr) editChildren(edit func(Node) Node) { n.X = maybeEdit(n.X, edit) } -func (n *SendStmt) String() string { return fmt.Sprint(n) } func (n *SendStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *SendStmt) copy() Node { c := *n @@ -856,7 +811,6 @@ func (n *SendStmt) editChildren(edit func(Node) Node) { n.Value = maybeEdit(n.Value, edit) } -func (n *SliceExpr) String() string { return fmt.Sprint(n) } func (n *SliceExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *SliceExpr) copy() Node { c := *n @@ -877,7 +831,6 @@ func (n *SliceExpr) editChildren(edit func(Node) Node) { editList(n.List_, edit) } -func (n *SliceHeaderExpr) String() string { return fmt.Sprint(n) } func (n *SliceHeaderExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *SliceHeaderExpr) copy() Node { c := *n @@ -898,7 +851,6 @@ func (n *SliceHeaderExpr) editChildren(edit func(Node) Node) { editList(n.LenCap_, edit) } -func (n *SliceType) String() string { return fmt.Sprint(n) } func (n *SliceType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *SliceType) copy() Node { c := *n @@ -913,7 +865,6 @@ func (n *SliceType) editChildren(edit func(Node) Node) { n.Elem = maybeEdit(n.Elem, edit) } -func (n *StarExpr) String() string { return fmt.Sprint(n) } func (n *StarExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *StarExpr) copy() Node { c := *n @@ -931,7 +882,6 @@ func (n *StarExpr) editChildren(edit func(Node) Node) { n.X = maybeEdit(n.X, edit) } -func (n *StructKeyExpr) String() string { return fmt.Sprint(n) } func (n *StructKeyExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *StructKeyExpr) copy() Node { c := *n @@ -949,7 +899,6 @@ func (n *StructKeyExpr) editChildren(edit func(Node) Node) { n.Value = maybeEdit(n.Value, edit) } -func (n *StructType) String() string { return fmt.Sprint(n) } func (n *StructType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *StructType) copy() Node { c := *n @@ -965,7 +914,6 @@ func (n *StructType) editChildren(edit func(Node) Node) { editFields(n.Fields, edit) } -func (n *SwitchStmt) String() string { return fmt.Sprint(n) } func (n *SwitchStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *SwitchStmt) copy() Node { c := *n @@ -989,7 +937,6 @@ func (n *SwitchStmt) editChildren(edit func(Node) Node) { editList(n.Compiled, edit) } -func (n *TypeAssertExpr) String() string { return fmt.Sprint(n) } func (n *TypeAssertExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *TypeAssertExpr) copy() Node { c := *n @@ -1012,7 +959,6 @@ func (n *TypeAssertExpr) editChildren(edit func(Node) Node) { editList(n.Itab, edit) } -func (n *TypeSwitchGuard) String() string { return fmt.Sprint(n) } func (n *TypeSwitchGuard) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *TypeSwitchGuard) copy() Node { c := *n @@ -1033,7 +979,6 @@ func (n *TypeSwitchGuard) editChildren(edit func(Node) Node) { n.X = maybeEdit(n.X, edit) } -func (n *UnaryExpr) String() string { return fmt.Sprint(n) } func (n *UnaryExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *UnaryExpr) copy() Node { c := *n @@ -1051,7 +996,6 @@ func (n *UnaryExpr) editChildren(edit func(Node) Node) { n.X = maybeEdit(n.X, edit) } -func (n *typeNode) String() string { return fmt.Sprint(n) } func (n *typeNode) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *typeNode) copy() Node { c := *n diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go index 97726a6f95346..9bc5aaec02014 100644 --- a/src/cmd/compile/internal/ssa/op.go +++ b/src/cmd/compile/internal/ssa/op.go @@ -248,7 +248,6 @@ const ( // - a *obj.LSym, for an offset from SB (the global pointer) // - nil, for no offset type Sym interface { - String() string CanBeAnSSASym() CanBeAnSSAAux() } diff --git a/test/fixedbugs/issue22822.go b/test/fixedbugs/issue22822.go index e449ddb186e24..0e838cb5975f4 100644 --- a/test/fixedbugs/issue22822.go +++ b/test/fixedbugs/issue22822.go @@ -12,5 +12,7 @@ package main func F() { slice := []int{1, 2, 3} len := int(2) - println(len(slice)) // ERROR "cannot call non-function len .type int., declared at" + println(len(slice)) // ERROR "cannot call non-function len .type int., declared at LINE-1" + const iota = 1 + println(iota(slice)) // ERROR "cannot call non-function iota .type int., declared at LINE-1" } From 2b76429eb01ec1752f7622e3011babd7140ab870 Mon Sep 17 00:00:00 2001 From: Than McIntosh Date: Tue, 24 Nov 2020 18:09:00 -0500 Subject: [PATCH 154/474] [dev.regabi] cmd/compile: refactor type initialization code into helper Create a helper routine for initializing the types package, so as make it easier to use in unit testing (in a follow-on patch). Change-Id: I0f937788dfd34ac6641a4f28c16e47008aa08116 Reviewed-on: https://go-review.googlesource.com/c/go/+/273010 Run-TryBot: Than McIntosh TryBot-Result: Go Bot Trust: Than McIntosh Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/main.go | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 503dc449d3ea5..368fe1fcab233 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -210,13 +210,7 @@ func Main(archInit func(*Arch)) { // initialize types package // (we need to do this to break dependencies that otherwise // would lead to import cycles) - types.Widthptr = Widthptr - types.Dowidth = dowidth - types.TypeLinkSym = func(t *types.Type) *obj.LSym { - return typenamesym(t).Linksym() - } - - initUniverse() + initializeTypesPackage() dclcontext = ir.PEXTERN @@ -1125,3 +1119,13 @@ func parseLang(s string) (lang, error) { } return lang{major: major, minor: minor}, nil } + +func initializeTypesPackage() { + types.Widthptr = Widthptr + types.Dowidth = dowidth + types.TypeLinkSym = func(t *types.Type) *obj.LSym { + return typenamesym(t).Linksym() + } + + initUniverse() +} From 7e17b46c58cbb0aff2b42490a73e807bb04757d7 Mon Sep 17 00:00:00 2001 From: Than McIntosh Date: Wed, 2 Dec 2020 16:13:45 -0500 Subject: [PATCH 155/474] [dev.regabi] cmd/compile/internal/types: add IsScalar query method Add method Type.IsScalar() method, which returns TRUE for numeric and pointer-shaped types, false for composites such as string/array/slice/struct. Change-Id: Ie53c71c07c5b3fbae11b48addd172343dc6bf3fd Reviewed-on: https://go-review.googlesource.com/c/go/+/274857 Run-TryBot: Than McIntosh TryBot-Result: Go Bot Trust: Than McIntosh Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/types/type.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index e968a799e3827..4d1d30133c8cf 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -1335,6 +1335,20 @@ func (t *Type) IsEmptyInterface() bool { return t.IsInterface() && t.NumFields() == 0 } +// IsScalar reports whether 't' is a scalar Go type, e.g. +// bool/int/float/complex. Note that struct and array types consisting +// of a single scalar element are not considered scalar, likewise +// pointer types are also not considered scalar. +func (t *Type) IsScalar() bool { + switch t.kind { + case TBOOL, TINT8, TUINT8, TINT16, TUINT16, TINT32, + TUINT32, TINT64, TUINT64, TINT, TUINT, + TUINTPTR, TCOMPLEX64, TCOMPLEX128, TFLOAT32, TFLOAT64: + return true + } + return false +} + func (t *Type) PtrTo() *Type { return NewPtr(t) } From 8ce37e4110316030159972e17c67152e8f8e9359 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Thu, 10 Dec 2020 21:04:41 -0800 Subject: [PATCH 156/474] [dev.regabi] cmd/compile: fix noopt builder The non-simple, phi-insertion algorithm can leave OpFwdRefs in the SSA graph unresolved if they're in dead blocks. Normally, these would be harmlessly removed later during SSA dead-code elimination, but those passes are omitted for -N builds. And so they reach zcse, where the Value.Aux is used within a hash map. This became a problem after golang.org/cl/275788, which added FwdRefAux to wrap OpFwdRef's ir.Node, and to ensure that it's not compared for equality / used as a map key. This CL adds a simple fix: if there are any OpFwdRefs remaining after resolveFwdRef, then they must be dead code and we can simply replace them with OpUnknown. Change-Id: I72e4116d52d3f6441ebb0bf6160906617cd59513 Reviewed-on: https://go-review.googlesource.com/c/go/+/277075 Trust: Matthew Dempsky Reviewed-by: Keith Randall --- src/cmd/compile/internal/gc/phi.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/cmd/compile/internal/gc/phi.go b/src/cmd/compile/internal/gc/phi.go index def11e1be0cb2..32c330b584a76 100644 --- a/src/cmd/compile/internal/gc/phi.go +++ b/src/cmd/compile/internal/gc/phi.go @@ -188,6 +188,11 @@ levels: if v.Op == ssa.OpPhi { v.AuxInt = 0 } + // Any remaining FwdRefs are dead code. + if v.Op == ssa.OpFwdRef { + v.Op = ssa.OpUnknown + v.Aux = nil + } } } } From 89f38323faa57d3f7475016f778be69fcffbe9fb Mon Sep 17 00:00:00 2001 From: Than McIntosh Date: Tue, 24 Nov 2020 18:10:11 -0500 Subject: [PATCH 157/474] [dev.regabi] cmd/compile: add register ABI analysis utilities Introduce a new utility routine for analyzing a given function signature to how its various input and output parameters will be passed (in registers or on the stack) for a given ABI description, along with some unit tests. Change-Id: Id64a98a0a142e42dd9c2dc9f6607c0d827ef84fb Reviewed-on: https://go-review.googlesource.com/c/go/+/273011 Run-TryBot: Than McIntosh Reviewed-by: Jeremy Faller Trust: Than McIntosh --- src/cmd/compile/fmtmap_test.go | 1 + src/cmd/compile/internal/gc/abiutils.go | 351 ++++++++++++++++++ src/cmd/compile/internal/gc/abiutils_test.go | 270 ++++++++++++++ .../compile/internal/gc/abiutilsaux_test.go | 157 ++++++++ 4 files changed, 779 insertions(+) create mode 100644 src/cmd/compile/internal/gc/abiutils.go create mode 100644 src/cmd/compile/internal/gc/abiutils_test.go create mode 100644 src/cmd/compile/internal/gc/abiutilsaux_test.go diff --git a/src/cmd/compile/fmtmap_test.go b/src/cmd/compile/fmtmap_test.go index e62b9613e1840..9bc059c2e44f9 100644 --- a/src/cmd/compile/fmtmap_test.go +++ b/src/cmd/compile/fmtmap_test.go @@ -36,6 +36,7 @@ var knownFormats = map[string]string{ "*math/big.Int %s": "", "[]cmd/compile/internal/syntax.token %s": "", "cmd/compile/internal/arm.shift %d": "", + "cmd/compile/internal/gc.RegIndex %d": "", "cmd/compile/internal/gc.initKind %d": "", "cmd/compile/internal/ir.Class %d": "", "cmd/compile/internal/ir.Node %+v": "", diff --git a/src/cmd/compile/internal/gc/abiutils.go b/src/cmd/compile/internal/gc/abiutils.go new file mode 100644 index 0000000000000..19de14d48c353 --- /dev/null +++ b/src/cmd/compile/internal/gc/abiutils.go @@ -0,0 +1,351 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gc + +import ( + "cmd/compile/internal/types" + "cmd/internal/src" + "fmt" + "sync" +) + +//...................................................................... +// +// Public/exported bits of the ABI utilities. +// + +// ABIParamResultInfo stores the results of processing a given +// function type to compute stack layout and register assignments. For +// each input and output parameter we capture whether the param was +// register-assigned (and to which register(s)) or the stack offset +// for the param if is not going to be passed in registers according +// to the rules in the Go internal ABI specification (1.17). +type ABIParamResultInfo struct { + inparams []ABIParamAssignment // Includes receiver for method calls. Does NOT include hidden closure pointer. + outparams []ABIParamAssignment + intSpillSlots int + floatSpillSlots int + offsetToSpillArea int64 + config ABIConfig // to enable String() method +} + +// RegIndex stores the index into the set of machine registers used by +// the ABI on a specific architecture for parameter passing. RegIndex +// values 0 through N-1 (where N is the number of integer registers +// used for param passing according to the ABI rules) describe integer +// registers; values N through M (where M is the number of floating +// point registers used). Thus if the ABI says there are 5 integer +// registers and 7 floating point registers, then RegIndex value of 4 +// indicates the 5th integer register, and a RegIndex value of 11 +// indicates the 7th floating point register. +type RegIndex uint8 + +// ABIParamAssignment holds information about how a specific param or +// result will be passed: in registers (in which case 'Registers' is +// populated) or on the stack (in which case 'Offset' is set to a +// non-negative stack offset. The values in 'Registers' are indices (as +// described above), not architected registers. +type ABIParamAssignment struct { + Type *types.Type + Registers []RegIndex + Offset int32 +} + +// RegAmounts holds a specified number of integer/float registers. +type RegAmounts struct { + intRegs int + floatRegs int +} + +// ABIConfig captures the number of registers made available +// by the ABI rules for parameter passing and result returning. +type ABIConfig struct { + // Do we need anything more than this? + regAmounts RegAmounts +} + +// ABIAnalyze takes a function type 't' and an ABI rules description +// 'config' and analyzes the function to determine how its parameters +// and results will be passed (in registers or on the stack), returning +// an ABIParamResultInfo object that holds the results of the analysis. +func ABIAnalyze(t *types.Type, config ABIConfig) ABIParamResultInfo { + setup() + s := assignState{ + rTotal: config.regAmounts, + } + result := ABIParamResultInfo{config: config} + + // Receiver + ft := t.FuncType() + if t.NumRecvs() != 0 { + rfsl := ft.Receiver.FieldSlice() + result.inparams = append(result.inparams, + s.assignParamOrReturn(rfsl[0].Type)) + } + + // Inputs + ifsl := ft.Params.FieldSlice() + for _, f := range ifsl { + result.inparams = append(result.inparams, + s.assignParamOrReturn(f.Type)) + } + s.stackOffset = Rnd(s.stackOffset, int64(Widthreg)) + + // Record number of spill slots needed. + result.intSpillSlots = s.rUsed.intRegs + result.floatSpillSlots = s.rUsed.floatRegs + + // Outputs + s.rUsed = RegAmounts{} + ofsl := ft.Results.FieldSlice() + for _, f := range ofsl { + result.outparams = append(result.outparams, s.assignParamOrReturn(f.Type)) + } + result.offsetToSpillArea = s.stackOffset + + return result +} + +//...................................................................... +// +// Non-public portions. + +// regString produces a human-readable version of a RegIndex. +func (c *RegAmounts) regString(r RegIndex) string { + if int(r) < c.intRegs { + return fmt.Sprintf("I%d", int(r)) + } else if int(r) < c.intRegs+c.floatRegs { + return fmt.Sprintf("F%d", int(r)-c.intRegs) + } + return fmt.Sprintf("%d", r) +} + +// toString method renders an ABIParamAssignment in human-readable +// form, suitable for debugging or unit testing. +func (ri *ABIParamAssignment) toString(config ABIConfig) string { + regs := "R{" + for _, r := range ri.Registers { + regs += " " + config.regAmounts.regString(r) + } + return fmt.Sprintf("%s } offset: %d typ: %v", regs, ri.Offset, ri.Type) +} + +// toString method renders an ABIParamResultInfo in human-readable +// form, suitable for debugging or unit testing. +func (ri *ABIParamResultInfo) String() string { + res := "" + for k, p := range ri.inparams { + res += fmt.Sprintf("IN %d: %s\n", k, p.toString(ri.config)) + } + for k, r := range ri.outparams { + res += fmt.Sprintf("OUT %d: %s\n", k, r.toString(ri.config)) + } + res += fmt.Sprintf("intspill: %d floatspill: %d offsetToSpillArea: %d", + ri.intSpillSlots, ri.floatSpillSlots, ri.offsetToSpillArea) + return res +} + +// assignState holds intermediate state during the register assigning process +// for a given function signature. +type assignState struct { + rTotal RegAmounts // total reg amounts from ABI rules + rUsed RegAmounts // regs used by params completely assigned so far + pUsed RegAmounts // regs used by the current param (or pieces therein) + stackOffset int64 // current stack offset +} + +// stackSlot returns a stack offset for a param or result of the +// specified type. +func (state *assignState) stackSlot(t *types.Type) int64 { + if t.Align > 0 { + state.stackOffset = Rnd(state.stackOffset, int64(t.Align)) + } + rv := state.stackOffset + state.stackOffset += t.Width + return rv +} + +// allocateRegs returns a set of register indices for a parameter or result +// that we've just determined to be register-assignable. The number of registers +// needed is assumed to be stored in state.pUsed. +func (state *assignState) allocateRegs() []RegIndex { + regs := []RegIndex{} + + // integer + for r := state.rUsed.intRegs; r < state.rUsed.intRegs+state.pUsed.intRegs; r++ { + regs = append(regs, RegIndex(r)) + } + state.rUsed.intRegs += state.pUsed.intRegs + + // floating + for r := state.rUsed.floatRegs; r < state.rUsed.floatRegs+state.pUsed.floatRegs; r++ { + regs = append(regs, RegIndex(r+state.rTotal.intRegs)) + } + state.rUsed.floatRegs += state.pUsed.floatRegs + + return regs +} + +// regAllocate creates a register ABIParamAssignment object for a param +// or result with the specified type, as a final step (this assumes +// that all of the safety/suitability analysis is complete). +func (state *assignState) regAllocate(t *types.Type) ABIParamAssignment { + return ABIParamAssignment{ + Type: t, + Registers: state.allocateRegs(), + Offset: -1, + } +} + +// stackAllocate creates a stack memory ABIParamAssignment object for +// a param or result with the specified type, as a final step (this +// assumes that all of the safety/suitability analysis is complete). +func (state *assignState) stackAllocate(t *types.Type) ABIParamAssignment { + return ABIParamAssignment{ + Type: t, + Offset: int32(state.stackSlot(t)), + } +} + +// intUsed returns the number of integer registers consumed +// at a given point within an assignment stage. +func (state *assignState) intUsed() int { + return state.rUsed.intRegs + state.pUsed.intRegs +} + +// floatUsed returns the number of floating point registers consumed at +// a given point within an assignment stage. +func (state *assignState) floatUsed() int { + return state.rUsed.floatRegs + state.pUsed.floatRegs +} + +// regassignIntegral examines a param/result of integral type 't' to +// determines whether it can be register-assigned. Returns TRUE if we +// can register allocate, FALSE otherwise (and updates state +// accordingly). +func (state *assignState) regassignIntegral(t *types.Type) bool { + regsNeeded := int(Rnd(t.Width, int64(Widthptr)) / int64(Widthptr)) + + // Floating point and complex. + if t.IsFloat() || t.IsComplex() { + if regsNeeded+state.floatUsed() > state.rTotal.floatRegs { + // not enough regs + return false + } + state.pUsed.floatRegs += regsNeeded + return true + } + + // Non-floating point + if regsNeeded+state.intUsed() > state.rTotal.intRegs { + // not enough regs + return false + } + state.pUsed.intRegs += regsNeeded + return true +} + +// regassignArray processes an array type (or array component within some +// other enclosing type) to determine if it can be register assigned. +// Returns TRUE if we can register allocate, FALSE otherwise. +func (state *assignState) regassignArray(t *types.Type) bool { + + nel := t.NumElem() + if nel == 0 { + return true + } + if nel > 1 { + // Not an array of length 1: stack assign + return false + } + // Visit element + return state.regassign(t.Elem()) +} + +// regassignStruct processes a struct type (or struct component within +// some other enclosing type) to determine if it can be register +// assigned. Returns TRUE if we can register allocate, FALSE otherwise. +func (state *assignState) regassignStruct(t *types.Type) bool { + for _, field := range t.FieldSlice() { + if !state.regassign(field.Type) { + return false + } + } + return true +} + +// synthOnce ensures that we only create the synth* fake types once. +var synthOnce sync.Once + +// synthSlice, synthString, and syncIface are synthesized struct types +// meant to capture the underlying implementations of string/slice/interface. +var synthSlice *types.Type +var synthString *types.Type +var synthIface *types.Type + +// setup performs setup for the register assignment utilities, manufacturing +// a small set of synthesized types that we'll need along the way. +func setup() { + synthOnce.Do(func() { + fname := types.BuiltinPkg.Lookup + nxp := src.NoXPos + unsp := types.Types[types.TUNSAFEPTR] + ui := types.Types[types.TUINTPTR] + synthSlice = types.NewStruct(types.NoPkg, []*types.Field{ + types.NewField(nxp, fname("ptr"), unsp), + types.NewField(nxp, fname("len"), ui), + types.NewField(nxp, fname("cap"), ui), + }) + synthString = types.NewStruct(types.NoPkg, []*types.Field{ + types.NewField(nxp, fname("data"), unsp), + types.NewField(nxp, fname("len"), ui), + }) + synthIface = types.NewStruct(types.NoPkg, []*types.Field{ + types.NewField(nxp, fname("f1"), unsp), + types.NewField(nxp, fname("f2"), unsp), + }) + }) +} + +// regassign examines a given param type (or component within some +// composite) to determine if it can be register assigned. Returns +// TRUE if we can register allocate, FALSE otherwise. +func (state *assignState) regassign(pt *types.Type) bool { + typ := pt.Kind() + if pt.IsScalar() || pt.IsPtrShaped() { + return state.regassignIntegral(pt) + } + switch typ { + case types.TARRAY: + return state.regassignArray(pt) + case types.TSTRUCT: + return state.regassignStruct(pt) + case types.TSLICE: + return state.regassignStruct(synthSlice) + case types.TSTRING: + return state.regassignStruct(synthString) + case types.TINTER: + return state.regassignStruct(synthIface) + default: + panic("not expected") + } +} + +// assignParamOrReturn processes a given receiver, param, or result +// of type 'pt' to determine whether it can be register assigned. +// The result of the analysis is recorded in the result +// ABIParamResultInfo held in 'state'. +func (state *assignState) assignParamOrReturn(pt *types.Type) ABIParamAssignment { + state.pUsed = RegAmounts{} + if pt.Width == types.BADWIDTH { + panic("should never happen") + } else if pt.Width == 0 { + return state.stackAllocate(pt) + } else if state.regassign(pt) { + return state.regAllocate(pt) + } else { + return state.stackAllocate(pt) + } +} diff --git a/src/cmd/compile/internal/gc/abiutils_test.go b/src/cmd/compile/internal/gc/abiutils_test.go new file mode 100644 index 0000000000000..16bd787bea922 --- /dev/null +++ b/src/cmd/compile/internal/gc/abiutils_test.go @@ -0,0 +1,270 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gc + +import ( + "bufio" + "cmd/compile/internal/base" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/obj/x86" + "cmd/internal/src" + "os" + "testing" +) + +// AMD64 registers available: +// - integer: RAX, RBX, RCX, RDI, RSI, R8, R9, r10, R11 +// - floating point: X0 - X14 +var configAMD64 = ABIConfig{ + regAmounts: RegAmounts{ + intRegs: 9, + floatRegs: 15, + }, +} + +func TestMain(m *testing.M) { + thearch.LinkArch = &x86.Linkamd64 + thearch.REGSP = x86.REGSP + thearch.MAXWIDTH = 1 << 50 + base.Ctxt = obj.Linknew(thearch.LinkArch) + base.Ctxt.DiagFunc = base.Errorf + base.Ctxt.DiagFlush = base.FlushErrors + base.Ctxt.Bso = bufio.NewWriter(os.Stdout) + Widthptr = thearch.LinkArch.PtrSize + Widthreg = thearch.LinkArch.RegSize + initializeTypesPackage() + os.Exit(m.Run()) +} + +func TestABIUtilsBasic1(t *testing.T) { + + // func(x int32) int32 + i32 := types.Types[types.TINT32] + ft := mkFuncType(nil, []*types.Type{i32}, []*types.Type{i32}) + + // expected results + exp := makeExpectedDump(` + IN 0: R{ I0 } offset: -1 typ: int32 + OUT 0: R{ I0 } offset: -1 typ: int32 + intspill: 1 floatspill: 0 offsetToSpillArea: 0 +`) + + abitest(t, ft, exp) +} + +func TestABIUtilsBasic2(t *testing.T) { + // func(x int32, y float64) (int32, float64, float64) + i8 := types.Types[types.TINT8] + i16 := types.Types[types.TINT16] + i32 := types.Types[types.TINT32] + i64 := types.Types[types.TINT64] + f32 := types.Types[types.TFLOAT32] + f64 := types.Types[types.TFLOAT64] + c64 := types.Types[types.TCOMPLEX64] + c128 := types.Types[types.TCOMPLEX128] + ft := mkFuncType(nil, + []*types.Type{ + i8, i16, i32, i64, + f32, f32, f64, f64, + i8, i16, i32, i64, + f32, f32, f64, f64, + c128, c128, c128, c128, c64, + i8, i16, i32, i64, + i8, i16, i32, i64}, + []*types.Type{i32, f64, f64}) + exp := makeExpectedDump(` + IN 0: R{ I0 } offset: -1 typ: int8 + IN 1: R{ I1 } offset: -1 typ: int16 + IN 2: R{ I2 } offset: -1 typ: int32 + IN 3: R{ I3 } offset: -1 typ: int64 + IN 4: R{ F0 } offset: -1 typ: float32 + IN 5: R{ F1 } offset: -1 typ: float32 + IN 6: R{ F2 } offset: -1 typ: float64 + IN 7: R{ F3 } offset: -1 typ: float64 + IN 8: R{ I4 } offset: -1 typ: int8 + IN 9: R{ I5 } offset: -1 typ: int16 + IN 10: R{ I6 } offset: -1 typ: int32 + IN 11: R{ I7 } offset: -1 typ: int64 + IN 12: R{ F4 } offset: -1 typ: float32 + IN 13: R{ F5 } offset: -1 typ: float32 + IN 14: R{ F6 } offset: -1 typ: float64 + IN 15: R{ F7 } offset: -1 typ: float64 + IN 16: R{ F8 F9 } offset: -1 typ: complex128 + IN 17: R{ F10 F11 } offset: -1 typ: complex128 + IN 18: R{ F12 F13 } offset: -1 typ: complex128 + IN 19: R{ } offset: 0 typ: complex128 + IN 20: R{ F14 } offset: -1 typ: complex64 + IN 21: R{ I8 } offset: -1 typ: int8 + IN 22: R{ } offset: 16 typ: int16 + IN 23: R{ } offset: 20 typ: int32 + IN 24: R{ } offset: 24 typ: int64 + IN 25: R{ } offset: 32 typ: int8 + IN 26: R{ } offset: 34 typ: int16 + IN 27: R{ } offset: 36 typ: int32 + IN 28: R{ } offset: 40 typ: int64 + OUT 0: R{ I0 } offset: -1 typ: int32 + OUT 1: R{ F0 } offset: -1 typ: float64 + OUT 2: R{ F1 } offset: -1 typ: float64 + intspill: 9 floatspill: 15 offsetToSpillArea: 48 +`) + + abitest(t, ft, exp) +} + +func TestABIUtilsArrays(t *testing.T) { + i32 := types.Types[types.TINT32] + ae := types.NewArray(i32, 0) + a1 := types.NewArray(i32, 1) + a2 := types.NewArray(i32, 2) + aa1 := types.NewArray(a1, 1) + ft := mkFuncType(nil, []*types.Type{a1, ae, aa1, a2}, + []*types.Type{a2, a1, ae, aa1}) + + exp := makeExpectedDump(` + IN 0: R{ I0 } offset: -1 typ: [1]int32 + IN 1: R{ } offset: 0 typ: [0]int32 + IN 2: R{ I1 } offset: -1 typ: [1][1]int32 + IN 3: R{ } offset: 0 typ: [2]int32 + OUT 0: R{ } offset: 8 typ: [2]int32 + OUT 1: R{ I0 } offset: -1 typ: [1]int32 + OUT 2: R{ } offset: 16 typ: [0]int32 + OUT 3: R{ I1 } offset: -1 typ: [1][1]int32 + intspill: 2 floatspill: 0 offsetToSpillArea: 16 +`) + + abitest(t, ft, exp) +} + +func TestABIUtilsStruct1(t *testing.T) { + i8 := types.Types[types.TINT8] + i16 := types.Types[types.TINT16] + i32 := types.Types[types.TINT32] + i64 := types.Types[types.TINT64] + s := mkstruct([]*types.Type{i8, i8, mkstruct([]*types.Type{}), i8, i16}) + ft := mkFuncType(nil, []*types.Type{i8, s, i64}, + []*types.Type{s, i8, i32}) + + exp := makeExpectedDump(` + IN 0: R{ I0 } offset: -1 typ: int8 + IN 1: R{ I1 I2 I3 I4 } offset: -1 typ: struct { int8; int8; struct {}; int8; int16 } + IN 2: R{ I5 } offset: -1 typ: int64 + OUT 0: R{ I0 I1 I2 I3 } offset: -1 typ: struct { int8; int8; struct {}; int8; int16 } + OUT 1: R{ I4 } offset: -1 typ: int8 + OUT 2: R{ I5 } offset: -1 typ: int32 + intspill: 6 floatspill: 0 offsetToSpillArea: 0 +`) + + abitest(t, ft, exp) +} + +func TestABIUtilsStruct2(t *testing.T) { + f64 := types.Types[types.TFLOAT64] + i64 := types.Types[types.TINT64] + s := mkstruct([]*types.Type{i64, mkstruct([]*types.Type{})}) + fs := mkstruct([]*types.Type{f64, s, mkstruct([]*types.Type{})}) + ft := mkFuncType(nil, []*types.Type{s, s, fs}, + []*types.Type{fs, fs}) + + exp := makeExpectedDump(` + IN 0: R{ I0 } offset: -1 typ: struct { int64; struct {} } + IN 1: R{ I1 } offset: -1 typ: struct { int64; struct {} } + IN 2: R{ I2 F0 } offset: -1 typ: struct { float64; struct { int64; struct {} }; struct {} } + OUT 0: R{ I0 F0 } offset: -1 typ: struct { float64; struct { int64; struct {} }; struct {} } + OUT 1: R{ I1 F1 } offset: -1 typ: struct { float64; struct { int64; struct {} }; struct {} } + intspill: 3 floatspill: 1 offsetToSpillArea: 0 +`) + + abitest(t, ft, exp) +} + +func TestABIUtilsSliceString(t *testing.T) { + i32 := types.Types[types.TINT32] + sli32 := types.NewSlice(i32) + str := types.New(types.TSTRING) + i8 := types.Types[types.TINT8] + i64 := types.Types[types.TINT64] + ft := mkFuncType(nil, []*types.Type{sli32, i8, sli32, i8, str, i8, i64, sli32}, + []*types.Type{str, i64, str, sli32}) + + exp := makeExpectedDump(` + IN 0: R{ I0 I1 I2 } offset: -1 typ: []int32 + IN 1: R{ I3 } offset: -1 typ: int8 + IN 2: R{ I4 I5 I6 } offset: -1 typ: []int32 + IN 3: R{ I7 } offset: -1 typ: int8 + IN 4: R{ } offset: 0 typ: string + IN 5: R{ I8 } offset: -1 typ: int8 + IN 6: R{ } offset: 16 typ: int64 + IN 7: R{ } offset: 24 typ: []int32 + OUT 0: R{ I0 I1 } offset: -1 typ: string + OUT 1: R{ I2 } offset: -1 typ: int64 + OUT 2: R{ I3 I4 } offset: -1 typ: string + OUT 3: R{ I5 I6 I7 } offset: -1 typ: []int32 + intspill: 9 floatspill: 0 offsetToSpillArea: 48 +`) + + abitest(t, ft, exp) +} + +func TestABIUtilsMethod(t *testing.T) { + i16 := types.Types[types.TINT16] + i64 := types.Types[types.TINT64] + f64 := types.Types[types.TFLOAT64] + + s1 := mkstruct([]*types.Type{i16, i16, i16}) + ps1 := types.NewPtr(s1) + a7 := types.NewArray(ps1, 7) + ft := mkFuncType(s1, []*types.Type{ps1, a7, f64, i16, i16, i16}, + []*types.Type{a7, f64, i64}) + + exp := makeExpectedDump(` + IN 0: R{ I0 I1 I2 } offset: -1 typ: struct { int16; int16; int16 } + IN 1: R{ I3 } offset: -1 typ: *struct { int16; int16; int16 } + IN 2: R{ } offset: 0 typ: [7]*struct { int16; int16; int16 } + IN 3: R{ F0 } offset: -1 typ: float64 + IN 4: R{ I4 } offset: -1 typ: int16 + IN 5: R{ I5 } offset: -1 typ: int16 + IN 6: R{ I6 } offset: -1 typ: int16 + OUT 0: R{ } offset: 56 typ: [7]*struct { int16; int16; int16 } + OUT 1: R{ F0 } offset: -1 typ: float64 + OUT 2: R{ I0 } offset: -1 typ: int64 + intspill: 7 floatspill: 1 offsetToSpillArea: 112 +`) + + abitest(t, ft, exp) +} + +func TestABIUtilsInterfaces(t *testing.T) { + ei := types.Types[types.TINTER] // interface{} + pei := types.NewPtr(ei) // *interface{} + fldt := mkFuncType(types.FakeRecvType(), []*types.Type{}, + []*types.Type{types.UntypedString}) + field := types.NewField(src.NoXPos, nil, fldt) + // interface{ ...() string } + nei := types.NewInterface(types.LocalPkg, []*types.Field{field}) + + i16 := types.Types[types.TINT16] + tb := types.Types[types.TBOOL] + s1 := mkstruct([]*types.Type{i16, i16, tb}) + + ft := mkFuncType(nil, []*types.Type{s1, ei, ei, nei, pei, nei, i16}, + []*types.Type{ei, nei, pei}) + + exp := makeExpectedDump(` + IN 0: R{ I0 I1 I2 } offset: -1 typ: struct { int16; int16; bool } + IN 1: R{ I3 I4 } offset: -1 typ: interface {} + IN 2: R{ I5 I6 } offset: -1 typ: interface {} + IN 3: R{ I7 I8 } offset: -1 typ: interface { () untyped string } + IN 4: R{ } offset: 0 typ: *interface {} + IN 5: R{ } offset: 8 typ: interface { () untyped string } + IN 6: R{ } offset: 24 typ: int16 + OUT 0: R{ I0 I1 } offset: -1 typ: interface {} + OUT 1: R{ I2 I3 } offset: -1 typ: interface { () untyped string } + OUT 2: R{ I4 } offset: -1 typ: *interface {} + intspill: 9 floatspill: 0 offsetToSpillArea: 32 +`) + + abitest(t, ft, exp) +} diff --git a/src/cmd/compile/internal/gc/abiutilsaux_test.go b/src/cmd/compile/internal/gc/abiutilsaux_test.go new file mode 100644 index 0000000000000..d90d1d45a0574 --- /dev/null +++ b/src/cmd/compile/internal/gc/abiutilsaux_test.go @@ -0,0 +1,157 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gc + +// This file contains utility routines and harness infrastructure used +// by the ABI tests in "abiutils_test.go". + +import ( + "cmd/compile/internal/ir" + "cmd/compile/internal/types" + "cmd/internal/src" + "fmt" + "strings" + "testing" + "text/scanner" +) + +func mkParamResultField(t *types.Type, s *types.Sym, which ir.Class) *types.Field { + field := types.NewField(src.NoXPos, s, t) + n := NewName(s) + n.SetClass(which) + field.Nname = n + n.SetType(t) + return field +} + +// mkstruct is a helper routine to create a struct type with fields +// of the types specified in 'fieldtypes'. +func mkstruct(fieldtypes []*types.Type) *types.Type { + fields := make([]*types.Field, len(fieldtypes)) + for k, t := range fieldtypes { + if t == nil { + panic("bad -- field has no type") + } + f := types.NewField(src.NoXPos, nil, t) + fields[k] = f + } + s := types.NewStruct(types.LocalPkg, fields) + return s +} + +func mkFuncType(rcvr *types.Type, ins []*types.Type, outs []*types.Type) *types.Type { + q := lookup("?") + inf := []*types.Field{} + for _, it := range ins { + inf = append(inf, mkParamResultField(it, q, ir.PPARAM)) + } + outf := []*types.Field{} + for _, ot := range outs { + outf = append(outf, mkParamResultField(ot, q, ir.PPARAMOUT)) + } + var rf *types.Field + if rcvr != nil { + rf = mkParamResultField(rcvr, q, ir.PPARAM) + } + return types.NewSignature(types.LocalPkg, rf, inf, outf) +} + +type expectedDump struct { + dump string + file string + line int +} + +func tokenize(src string) []string { + var s scanner.Scanner + s.Init(strings.NewReader(src)) + res := []string{} + for tok := s.Scan(); tok != scanner.EOF; tok = s.Scan() { + res = append(res, s.TokenText()) + } + return res +} + +func verifyParamResultOffset(t *testing.T, f *types.Field, r ABIParamAssignment, which string, idx int) int { + n := ir.AsNode(f.Nname) + if n == nil { + panic("not expected") + } + if n.Offset() != int64(r.Offset) { + t.Errorf("%s %d: got offset %d wanted %d t=%v", + which, idx, r.Offset, n.Offset(), f.Type) + return 1 + } + return 0 +} + +func makeExpectedDump(e string) expectedDump { + return expectedDump{dump: e} +} + +func difftokens(atoks []string, etoks []string) string { + if len(atoks) != len(etoks) { + return fmt.Sprintf("expected %d tokens got %d", + len(etoks), len(atoks)) + } + for i := 0; i < len(etoks); i++ { + if etoks[i] == atoks[i] { + continue + } + + return fmt.Sprintf("diff at token %d: expected %q got %q", + i, etoks[i], atoks[i]) + } + return "" +} + +func abitest(t *testing.T, ft *types.Type, exp expectedDump) { + + dowidth(ft) + + // Analyze with full set of registers. + regRes := ABIAnalyze(ft, configAMD64) + regResString := strings.TrimSpace(regRes.String()) + + // Check results. + reason := difftokens(tokenize(regResString), tokenize(exp.dump)) + if reason != "" { + t.Errorf("\nexpected:\n%s\ngot:\n%s\nreason: %s", + strings.TrimSpace(exp.dump), regResString, reason) + } + + // Analyze again with empty register set. + empty := ABIConfig{} + emptyRes := ABIAnalyze(ft, empty) + emptyResString := emptyRes.String() + + // Walk the results and make sure the offsets assigned match + // up with those assiged by dowidth. This checks to make sure that + // when we have no available registers the ABI assignment degenerates + // back to the original ABI0. + + // receiver + failed := 0 + rfsl := ft.Recvs().Fields().Slice() + poff := 0 + if len(rfsl) != 0 { + failed |= verifyParamResultOffset(t, rfsl[0], emptyRes.inparams[0], "receiver", 0) + poff = 1 + } + // params + pfsl := ft.Params().Fields().Slice() + for k, f := range pfsl { + verifyParamResultOffset(t, f, emptyRes.inparams[k+poff], "param", k) + } + // results + ofsl := ft.Results().Fields().Slice() + for k, f := range ofsl { + failed |= verifyParamResultOffset(t, f, emptyRes.outparams[k], "result", k) + } + + if failed != 0 { + t.Logf("emptyres:\n%s\n", emptyResString) + } +} From 617383377f0e870a9258230cf29fd11097b9229a Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Sat, 5 Dec 2020 17:25:28 -0800 Subject: [PATCH 158/474] [dev.regabi] cmd/compile: reorg generated array hash loop The ORANGE structure that is being replaced by this CL was causing trouble with another CL (CL 275695). The problem occurs if you typecheck i in the middle of generating the body of the ORANGE loop. If you typecheck i, it ends up typechecking its definition, which secretly typechecks the containing ORANGE. If you then add other items to the ORANGE body, those items will never get typechecked, as the ORANGE is already marked as typechecked. Instead, just steal the loop we use for the equality code. Might as well use the same pattern in both places. Change-Id: Idb1ac77881d2cc9da08c7437a652b50d3ee45e2e Reviewed-on: https://go-review.googlesource.com/c/go/+/275713 Trust: Keith Randall Trust: Dan Scales Run-TryBot: Keith Randall TryBot-Result: Go Bot Reviewed-by: Dan Scales --- src/cmd/compile/internal/gc/alg.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index c786a27415c38..ea57e7398dcdc 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -310,13 +310,13 @@ func genhash(t *types.Type) *obj.LSym { // pure memory. hashel := hashfor(t.Elem()) - n := ir.Nod(ir.ORANGE, nil, ir.Nod(ir.ODEREF, np, nil)) - ni := ir.Node(NewName(lookup("i"))) - ni.SetType(types.Types[types.TINT]) - n.PtrList().Set1(ni) - n.SetColas(true) - colasdefn(n.List().Slice(), n) - ni = n.List().First() + // for i := 0; i < nelem; i++ + ni := temp(types.Types[types.TINT]) + init := ir.Nod(ir.OAS, ni, nodintconst(0)) + cond := ir.Nod(ir.OLT, ni, nodintconst(t.NumElem())) + post := ir.Nod(ir.OAS, ni, ir.Nod(ir.OADD, ni, nodintconst(1))) + loop := ir.Nod(ir.OFOR, cond, post) + loop.PtrInit().Append(init) // h = hashel(&p[i], h) call := ir.Nod(ir.OCALL, hashel, nil) @@ -326,9 +326,9 @@ func genhash(t *types.Type) *obj.LSym { na := ir.Nod(ir.OADDR, nx, nil) call.PtrList().Append(na) call.PtrList().Append(nh) - n.PtrBody().Append(ir.Nod(ir.OAS, nh, call)) + loop.PtrBody().Append(ir.Nod(ir.OAS, nh, call)) - fn.PtrBody().Append(n) + fn.PtrBody().Append(loop) case types.TSTRUCT: // Walk the struct using memhash for runs of AMEM From fea898a4b0f02cee08ea978eb5ce541a85783690 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Sat, 28 Nov 2020 15:53:32 -0800 Subject: [PATCH 159/474] [dev.regabi] cmd/compile: intercept the making of OADDR nodes This is a mechanical change to intercept the construction of all OADDR nodes. We will use the new nodAddr and nodAddrAt functions to compute the Addrtaken bit. Change-Id: I90ee3acb8e32540a198a9999284573418729f422 Reviewed-on: https://go-review.googlesource.com/c/go/+/275694 Run-TryBot: Keith Randall TryBot-Result: Go Bot Trust: Keith Randall Trust: Dan Scales Reviewed-by: Dan Scales --- src/cmd/compile/internal/gc/alg.go | 10 +++--- src/cmd/compile/internal/gc/closure.go | 10 +++--- src/cmd/compile/internal/gc/iimport.go | 4 ++- src/cmd/compile/internal/gc/inl.go | 2 +- src/cmd/compile/internal/gc/range.go | 8 ++--- src/cmd/compile/internal/gc/reflect.go | 6 ++-- src/cmd/compile/internal/gc/select.go | 14 ++++---- src/cmd/compile/internal/gc/sinit.go | 6 ++-- src/cmd/compile/internal/gc/subr.go | 10 +++++- src/cmd/compile/internal/gc/typecheck.go | 6 ++-- src/cmd/compile/internal/gc/walk.go | 46 ++++++++++++------------ 11 files changed, 66 insertions(+), 56 deletions(-) diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index ea57e7398dcdc..7540944201723 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -323,7 +323,7 @@ func genhash(t *types.Type) *obj.LSym { nx := ir.Nod(ir.OINDEX, np, ni) nx.SetBounded(true) - na := ir.Nod(ir.OADDR, nx, nil) + na := nodAddr(nx) call.PtrList().Append(na) call.PtrList().Append(nh) loop.PtrBody().Append(ir.Nod(ir.OAS, nh, call)) @@ -347,7 +347,7 @@ func genhash(t *types.Type) *obj.LSym { hashel := hashfor(f.Type) call := ir.Nod(ir.OCALL, hashel, nil) nx := nodSym(ir.OXDOT, np, f.Sym) // TODO: fields from other packages? - na := ir.Nod(ir.OADDR, nx, nil) + na := nodAddr(nx) call.PtrList().Append(na) call.PtrList().Append(nh) fn.PtrBody().Append(ir.Nod(ir.OAS, nh, call)) @@ -362,7 +362,7 @@ func genhash(t *types.Type) *obj.LSym { hashel := hashmem(f.Type) call := ir.Nod(ir.OCALL, hashel, nil) nx := nodSym(ir.OXDOT, np, f.Sym) // TODO: fields from other packages? - na := ir.Nod(ir.OADDR, nx, nil) + na := nodAddr(nx) call.PtrList().Append(na) call.PtrList().Append(nh) call.PtrList().Append(nodintconst(size)) @@ -868,8 +868,8 @@ func eqinterface(s, t ir.Node) (eqtab, eqdata ir.Node) { // eqmem returns the node // memequal(&p.field, &q.field [, size]) func eqmem(p ir.Node, q ir.Node, field *types.Sym, size int64) ir.Node { - nx := ir.Nod(ir.OADDR, nodSym(ir.OXDOT, p, field), nil) - ny := ir.Nod(ir.OADDR, nodSym(ir.OXDOT, q, field), nil) + nx := nodAddr(nodSym(ir.OXDOT, p, field)) + ny := nodAddr(nodSym(ir.OXDOT, q, field)) nx = typecheck(nx, ctxExpr) ny = typecheck(ny, ctxExpr) diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index b56e255d10a3f..a3d8a46977cd2 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -199,7 +199,7 @@ func capturevars(fn *ir.Func) { v.SetByval(true) } else { outermost.Name().SetAddrtaken(true) - outer = ir.Nod(ir.OADDR, outer, nil) + outer = nodAddr(outer) } if base.Flag.LowerM > 1 { @@ -309,7 +309,7 @@ func transformclosure(fn *ir.Func) { v.Heapaddr = addr var src ir.Node = cr if v.Byval() { - src = ir.Nod(ir.OADDR, cr, nil) + src = nodAddr(cr) } body = append(body, ir.Nod(ir.OAS, addr, src)) } @@ -396,7 +396,7 @@ func walkclosure(clo ir.Node, init *ir.Nodes) ir.Node { clos.SetEsc(clo.Esc()) clos.PtrList().Set(append([]ir.Node{ir.Nod(ir.OCFUNC, fn.Nname, nil)}, fn.ClosureEnter.Slice()...)) - clos = ir.Nod(ir.OADDR, clos, nil) + clos = nodAddr(clos) clos.SetEsc(clo.Esc()) // Force type conversion from *struct to the func type. @@ -475,7 +475,7 @@ func makepartialcall(dot ir.Node, t0 *types.Type, meth *types.Sym) *ir.Func { body = append(body, ir.Nod(ir.OAS, ptr, cr)) } else { ptr.SetType(types.NewPtr(rcvrtype)) - body = append(body, ir.Nod(ir.OAS, ptr, ir.Nod(ir.OADDR, cr, nil))) + body = append(body, ir.Nod(ir.OAS, ptr, nodAddr(cr))) } call := ir.Nod(ir.OCALL, nodSym(ir.OXDOT, ptr, meth), nil) @@ -544,7 +544,7 @@ func walkpartialcall(n *ir.CallPartExpr, init *ir.Nodes) ir.Node { clos.SetEsc(n.Esc()) clos.PtrList().Set2(ir.Nod(ir.OCFUNC, n.Func().Nname, nil), n.Left()) - clos = ir.Nod(ir.OADDR, clos, nil) + clos = nodAddr(clos) clos.SetEsc(n.Esc()) // Force type conversion from *struct to the func type. diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 3c9693e5fce05..194c7427f39ce 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -943,8 +943,10 @@ func (r *importReader) node() ir.Node { return n // unary expressions - case ir.OPLUS, ir.ONEG, ir.OADDR, ir.OBITNOT, ir.ODEREF, ir.ONOT, ir.ORECV: + case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ODEREF, ir.ONOT, ir.ORECV: return ir.NodAt(r.pos(), op, r.expr(), nil) + case ir.OADDR: + return nodAddrAt(r.pos(), r.expr()) // binary expressions case ir.OADD, ir.OAND, ir.OANDAND, ir.OANDNOT, ir.ODIV, ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT, diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 37e5167c25db7..3c17f7d87f48b 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -878,7 +878,7 @@ func mkinlcall(n ir.Node, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool, addr.SetType(types.NewPtr(v.Type())) ia := typecheck(inlvar(addr), ctxExpr) ninit.Append(ir.Nod(ir.ODCL, ia, nil)) - ninit.Append(typecheck(ir.Nod(ir.OAS, ia, ir.Nod(ir.OADDR, o, nil)), ctxStmt)) + ninit.Append(typecheck(ir.Nod(ir.OAS, ia, nodAddr(o)), ctxStmt)) inlvars[addr] = ia // When capturing by reference, all occurrence of the captured var diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go index 8025119c5e250..2589da7b5dca5 100644 --- a/src/cmd/compile/internal/gc/range.go +++ b/src/cmd/compile/internal/gc/range.go @@ -274,7 +274,7 @@ func walkrange(nrange ir.Node) ir.Node { hp := temp(types.NewPtr(nrange.Type().Elem())) tmp := ir.Nod(ir.OINDEX, ha, nodintconst(0)) tmp.SetBounded(true) - init = append(init, ir.Nod(ir.OAS, hp, ir.Nod(ir.OADDR, tmp, nil))) + init = append(init, ir.Nod(ir.OAS, hp, nodAddr(tmp))) // Use OAS2 to correctly handle assignments // of the form "v1, a[v1] := range". @@ -305,12 +305,12 @@ func walkrange(nrange ir.Node) ir.Node { fn := syslook("mapiterinit") fn = substArgTypes(fn, t.Key(), t.Elem(), th) - init = append(init, mkcall1(fn, nil, nil, typename(t), ha, ir.Nod(ir.OADDR, hit, nil))) + init = append(init, mkcall1(fn, nil, nil, typename(t), ha, nodAddr(hit))) nfor.SetLeft(ir.Nod(ir.ONE, nodSym(ir.ODOT, hit, keysym), nodnil())) fn = syslook("mapiternext") fn = substArgTypes(fn, th) - nfor.SetRight(mkcall1(fn, nil, nil, ir.Nod(ir.OADDR, hit, nil))) + nfor.SetRight(mkcall1(fn, nil, nil, nodAddr(hit))) key := nodSym(ir.ODOT, hit, keysym) key = ir.Nod(ir.ODEREF, key, nil) @@ -572,7 +572,7 @@ func arrayClear(loop, v1, v2, a ir.Node) ir.Node { tmp := ir.Nod(ir.OINDEX, a, nodintconst(0)) tmp.SetBounded(true) - tmp = ir.Nod(ir.OADDR, tmp, nil) + tmp = nodAddr(tmp) tmp = convnop(tmp, types.Types[types.TUNSAFEPTR]) n.PtrBody().Append(ir.Nod(ir.OAS, hp, tmp)) diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 9b8f26a84ba55..cfff1baad69d0 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -996,7 +996,7 @@ func typename(t *types.Type) ir.Node { s.Def = n } - n := ir.Nod(ir.OADDR, ir.AsNode(s.Def), nil) + n := nodAddr(ir.AsNode(s.Def)) n.SetType(types.NewPtr(s.Def.Type())) n.SetTypecheck(1) return n @@ -1016,7 +1016,7 @@ func itabname(t, itype *types.Type) ir.Node { itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: s.Linksym()}) } - n := ir.Nod(ir.OADDR, ir.AsNode(s.Def), nil) + n := nodAddr(ir.AsNode(s.Def)) n.SetType(types.NewPtr(s.Def.Type())) n.SetTypecheck(1) return n @@ -1880,7 +1880,7 @@ func zeroaddr(size int64) ir.Node { x.SetTypecheck(1) s.Def = x } - z := ir.Nod(ir.OADDR, ir.AsNode(s.Def), nil) + z := nodAddr(ir.AsNode(s.Def)) z.SetType(types.NewPtr(types.Types[types.TUINT8])) z.SetTypecheck(1) return z diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go index 3afcef69f87ed..ec59f08638aff 100644 --- a/src/cmd/compile/internal/gc/select.go +++ b/src/cmd/compile/internal/gc/select.go @@ -171,18 +171,18 @@ func walkselectcases(cases *ir.Nodes) []ir.Node { switch n.Op() { case ir.OSEND: - n.SetRight(ir.Nod(ir.OADDR, n.Right(), nil)) + n.SetRight(nodAddr(n.Right())) n.SetRight(typecheck(n.Right(), ctxExpr)) case ir.OSELRECV: if !ir.IsBlank(n.Left()) { - n.SetLeft(ir.Nod(ir.OADDR, n.Left(), nil)) + n.SetLeft(nodAddr(n.Left())) n.SetLeft(typecheck(n.Left(), ctxExpr)) } case ir.OSELRECV2: if !ir.IsBlank(n.List().First()) { - n.List().SetIndex(0, ir.Nod(ir.OADDR, n.List().First(), nil)) + n.List().SetIndex(0, nodAddr(n.List().First())) n.List().SetIndex(0, typecheck(n.List().First(), ctxExpr)) } } @@ -225,7 +225,7 @@ func walkselectcases(cases *ir.Nodes) []ir.Node { if ir.IsBlank(elem) { elem = nodnil() } - receivedp := ir.Nod(ir.OADDR, n.List().Second(), nil) + receivedp := nodAddr(n.List().Second()) receivedp = typecheck(receivedp, ctxExpr) call = mkcall1(chanfn("selectnbrecv2", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, receivedp, ch) } @@ -257,7 +257,7 @@ func walkselectcases(cases *ir.Nodes) []ir.Node { var pc0, pcs ir.Node if base.Flag.Race { pcs = temp(types.NewArray(types.Types[types.TUINTPTR], int64(ncas))) - pc0 = typecheck(ir.Nod(ir.OADDR, ir.Nod(ir.OINDEX, pcs, nodintconst(0)), nil), ctxExpr) + pc0 = typecheck(nodAddr(ir.Nod(ir.OINDEX, pcs, nodintconst(0))), ctxExpr) } else { pc0 = nodnil() } @@ -314,7 +314,7 @@ func walkselectcases(cases *ir.Nodes) []ir.Node { // TODO(mdempsky): There should be a cleaner way to // handle this. if base.Flag.Race { - r = mkcall("selectsetpc", nil, nil, ir.Nod(ir.OADDR, ir.Nod(ir.OINDEX, pcs, nodintconst(int64(i))), nil)) + r = mkcall("selectsetpc", nil, nil, nodAddr(ir.Nod(ir.OINDEX, pcs, nodintconst(int64(i))))) init = append(init, r) } } @@ -372,7 +372,7 @@ func walkselectcases(cases *ir.Nodes) []ir.Node { // bytePtrToIndex returns a Node representing "(*byte)(&n[i])". func bytePtrToIndex(n ir.Node, i int64) ir.Node { - s := ir.Nod(ir.OADDR, ir.Nod(ir.OINDEX, n, nodintconst(i)), nil) + s := nodAddr(ir.Nod(ir.OINDEX, n, nodintconst(i))) t := types.NewPtr(types.Types[types.TUINT8]) return convnop(s, t) } diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 3c5f11c5abf1b..646c8dafce68d 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -675,7 +675,7 @@ func slicelit(ctxt initContext, n ir.Node, var_ ir.Node, init *ir.Nodes) { init.Append(ir.Nod(ir.OVARDEF, x, nil)) } - a = ir.Nod(ir.OADDR, x, nil) + a = nodAddr(x) } else if n.Esc() == EscNone { a = temp(t) if vstat == nil { @@ -687,7 +687,7 @@ func slicelit(ctxt initContext, n ir.Node, var_ ir.Node, init *ir.Nodes) { init.Append(ir.Nod(ir.OVARDEF, a, nil)) } - a = ir.Nod(ir.OADDR, a, nil) + a = nodAddr(a) } else { a = ir.Nod(ir.ONEW, ir.TypeNode(t), nil) } @@ -888,7 +888,7 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { if n.Right() != nil { // n.Right is stack temporary used as backing store. init.Append(ir.Nod(ir.OAS, n.Right(), nil)) // zero backing store, just in case (#18410) - r = ir.Nod(ir.OADDR, n.Right(), nil) + r = nodAddr(n.Right()) r = typecheck(r, ctxExpr) } else { r = ir.Nod(ir.ONEW, ir.TypeNode(n.Left().Type()), nil) diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index e05a124b29cef..42f8982c80600 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -135,6 +135,14 @@ func importdot(opkg *types.Pkg, pack *ir.PkgName) { } } +// nodAddr returns a node representing &n. +func nodAddr(n ir.Node) ir.Node { + return ir.Nod(ir.OADDR, n, nil) +} +func nodAddrAt(pos src.XPos, n ir.Node) ir.Node { + return ir.NodAt(pos, ir.OADDR, n, nil) +} + // newname returns a new ONAME Node associated with symbol s. func NewName(s *types.Sym) *ir.Name { n := ir.NewNameAt(base.Pos, s) @@ -1158,7 +1166,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { dot = dot.Left() // skip final .M // TODO(mdempsky): Remove dependency on dotlist. if !dotlist[0].field.Type.IsPtr() { - dot = ir.Nod(ir.OADDR, dot, nil) + dot = nodAddr(dot) } as := ir.Nod(ir.OAS, nthis, convnop(dot, rcvr)) fn.PtrBody().Append(as) diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index f187880e28cb1..ad161b59f0a87 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -1274,7 +1274,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } - n.SetLeft(ir.Nod(ir.OADDR, n.Left(), nil)) + n.SetLeft(nodAddr(n.Left())) n.Left().SetImplicit(true) n.SetLeft(typecheck(n.Left(), ctxExpr)) l = n.Left() @@ -2462,7 +2462,7 @@ func lookdot(n ir.Node, t *types.Type, dostrcmp int) *types.Field { if !types.Identical(rcvr, tt) { if rcvr.IsPtr() && types.Identical(rcvr.Elem(), tt) { checklvalue(n.Left(), "call pointer method on") - n.SetLeft(ir.Nod(ir.OADDR, n.Left(), nil)) + n.SetLeft(nodAddr(n.Left())) n.Left().SetImplicit(true) n.SetLeft(typecheck(n.Left(), ctxType|ctxExpr)) } else if tt.IsPtr() && (!rcvr.IsPtr() || rcvr.IsPtr() && rcvr.Elem().NotInHeap()) && types.Identical(tt.Elem(), rcvr) { @@ -2747,7 +2747,7 @@ func pushtype(n ir.Node, t *types.Type) ir.Node { // For *T, return &T{...}. n.SetRight(ir.TypeNode(t.Elem())) - n = ir.NodAt(n.Pos(), ir.OADDR, n, nil) + n = nodAddrAt(n.Pos(), n) n.SetImplicit(true) } diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 390719e441a84..bbd81de40e6aa 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -615,7 +615,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return mkcall("gopanic", nil, init, n.Left()) case ir.ORECOVER: - return mkcall("gorecover", n.Type(), init, ir.Nod(ir.OADDR, nodfp, nil)) + return mkcall("gorecover", n.Type(), init, nodAddr(nodfp)) case ir.OCLOSUREREAD, ir.OCFUNC: return n @@ -694,7 +694,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // order.stmt made sure x is addressable. n.Right().SetLeft(walkexpr(n.Right().Left(), init)) - n1 := ir.Nod(ir.OADDR, n.Left(), nil) + n1 := nodAddr(n.Left()) r := n.Right().Left() // the channel return mkcall1(chanfn("chanrecv1", 2, r.Type()), nil, init, r, n1) @@ -767,7 +767,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { if ir.IsBlank(n.List().First()) { n1 = nodnil() } else { - n1 = ir.Nod(ir.OADDR, n.List().First(), nil) + n1 = nodAddr(n.List().First()) } fn := chanfn("chanrecv2", 2, r.Left().Type()) ok := n.List().Second() @@ -793,7 +793,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { } else { // standard version takes key by reference // order.expr made sure key is addressable. - key = ir.Nod(ir.OADDR, r.Right(), nil) + key = nodAddr(r.Right()) } // from: @@ -846,7 +846,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { fast := mapfast(t) if fast == mapslow { // order.stmt made sure key is addressable. - key = ir.Nod(ir.OADDR, key, nil) + key = nodAddr(key) } return mkcall1(mapfndel(mapdelete[fast], t), nil, init, typename(t), map_, key) @@ -924,7 +924,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { if value != nil { // Value is identical to n.Left. // Construct the interface directly: {type/itab, &value}. - l := ir.Nod(ir.OEFACE, typeword(), typecheck(ir.Nod(ir.OADDR, value, nil), ctxExpr)) + l := ir.Nod(ir.OEFACE, typeword(), typecheck(nodAddr(value), ctxExpr)) l.SetType(toType) l.SetTypecheck(n.Typecheck()) return l @@ -998,7 +998,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { if !islvalue(v) { v = copyexpr(v, v.Type(), init) } - v = ir.Nod(ir.OADDR, v, nil) + v = nodAddr(v) } dowidth(fromType) @@ -1145,7 +1145,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { if fast == mapslow { // standard version takes key by reference. // order.expr made sure key is addressable. - key = ir.Nod(ir.OADDR, key, nil) + key = nodAddr(key) } n = mkcall1(mapfn(mapassign[fast], t), nil, init, typename(t), map_, key) } else { @@ -1154,7 +1154,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { if fast == mapslow { // standard version takes key by reference. // order.expr made sure key is addressable. - key = ir.Nod(ir.OADDR, key, nil) + key = nodAddr(key) } if w := t.Elem().Width; w <= zeroValSize { @@ -1226,7 +1226,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { r = ir.Nod(ir.OAS, r, nil) // zero temp r = typecheck(r, ctxStmt) init.Append(r) - r = ir.Nod(ir.OADDR, r.Left(), nil) + r = nodAddr(r.Left()) return typecheck(r, ctxExpr) } return callnew(n.Type().Elem()) @@ -1281,7 +1281,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { zero = typecheck(zero, ctxStmt) init.Append(zero) // h = &hv - h = ir.Nod(ir.OADDR, hv, nil) + h = nodAddr(hv) // Allocate one bucket pointed to by hmap.buckets on stack if hint // is not larger than BUCKETSIZE. In case hint is larger than @@ -1309,7 +1309,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { nif.PtrBody().Append(zero) // b = &bv - b := ir.Nod(ir.OADDR, bv, nil) + b := nodAddr(bv) // h.buckets = b bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap @@ -1515,7 +1515,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { a := nodnil() if n.Esc() == EscNone { t := types.NewArray(types.Types[types.TUINT8], 4) - a = ir.Nod(ir.OADDR, temp(t), nil) + a = nodAddr(temp(t)) } // intstring(*[4]byte, rune) return mkcall("intstring", n.Type(), init, a, conv(n.Left(), types.Types[types.TINT64])) @@ -1525,7 +1525,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { if n.Esc() == EscNone { // Create temporary buffer for string on stack. t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize) - a = ir.Nod(ir.OADDR, temp(t), nil) + a = nodAddr(temp(t)) } if n.Op() == ir.ORUNES2STR { // slicerunetostring(*[32]byte, []rune) string @@ -1557,7 +1557,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { t := types.NewArray(types.Types[types.TUINT8], int64(len(sc))) var a ir.Node if n.Esc() == EscNone && len(sc) <= int(maxImplicitStackVarSize) { - a = ir.Nod(ir.OADDR, temp(t), nil) + a = nodAddr(temp(t)) } else { a = callnew(t) } @@ -1585,7 +1585,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { if n.Esc() == EscNone { // Create temporary buffer for slice on stack. t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize) - a = ir.Nod(ir.OADDR, temp(t), nil) + a = nodAddr(temp(t)) } // stringtoslicebyte(*32[byte], string) []byte return mkcall("stringtoslicebyte", n.Type(), init, a, conv(s, types.Types[types.TSTRING])) @@ -1606,7 +1606,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { if n.Esc() == EscNone { // Create temporary buffer for slice on stack. t := types.NewArray(types.Types[types.TINT32], tmpstringbufsize) - a = ir.Nod(ir.OADDR, temp(t), nil) + a = nodAddr(temp(t)) } // stringtoslicerune(*[32]rune, string) []rune return mkcall("stringtoslicerune", n.Type(), init, a, conv(n.Left(), types.Types[types.TSTRING])) @@ -1627,7 +1627,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { n1 := n.Right() n1 = assignconv(n1, n.Left().Type().Elem(), "chan send") n1 = walkexpr(n1, init) - n1 = ir.Nod(ir.OADDR, n1, nil) + n1 = nodAddr(n1) return mkcall1(chanfn("chansend1", 2, n.Left().Type()), nil, init, n.Left(), n1) case ir.OCLOSURE: @@ -2699,7 +2699,7 @@ func addstr(n ir.Node, init *ir.Nodes) ir.Node { if sz < tmpstringbufsize { // Create temporary buffer for result string on stack. t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize) - buf = ir.Nod(ir.OADDR, temp(t), nil) + buf = nodAddr(temp(t)) } } @@ -2842,7 +2842,7 @@ func appendslice(n ir.Node, init *ir.Nodes) ir.Node { // memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T)) nptr1 := ir.Nod(ir.OINDEX, s, ir.Nod(ir.OLEN, l1, nil)) nptr1.SetBounded(true) - nptr1 = ir.Nod(ir.OADDR, nptr1, nil) + nptr1 = nodAddr(nptr1) nptr2 := ir.Nod(ir.OSPTR, l2, nil) @@ -2988,7 +2988,7 @@ func extendslice(n ir.Node, init *ir.Nodes) ir.Node { // hp := &s[len(l1)] hp := ir.Nod(ir.OINDEX, s, ir.Nod(ir.OLEN, l1, nil)) hp.SetBounded(true) - hp = ir.Nod(ir.OADDR, hp, nil) + hp = nodAddr(hp) hp = convnop(hp, types.Types[types.TUNSAFEPTR]) // hn := l2 * sizeof(elem(s)) @@ -3372,8 +3372,8 @@ func walkcompare(n ir.Node, init *ir.Nodes) ir.Node { fn, needsize := eqfor(t) call := ir.Nod(ir.OCALL, fn, nil) - call.PtrList().Append(ir.Nod(ir.OADDR, cmpl, nil)) - call.PtrList().Append(ir.Nod(ir.OADDR, cmpr, nil)) + call.PtrList().Append(nodAddr(cmpl)) + call.PtrList().Append(nodAddr(cmpr)) if needsize { call.PtrList().Append(nodintconst(t.Width)) } From 9f16620f46fc51ff1c8182b440bd60f97eb35278 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 13 Dec 2020 20:17:09 -0800 Subject: [PATCH 160/474] [dev.regabi] cmd/compile: fix latent Sym.SetPkgDef issue Sym.pkgDefPtr is supposed to return a pointer to the types.Object variable currently holding the Sym's package-scope definition. However, in the case of identifiers that were shadowed in the current scope, it was incorrectly returning a pointer to a stack copy of the dclstack variable, rather than a pointer into the dclstack itself. This doesn't affect PkgDef, because it only reads from the variable, so it got the same result anyway. It also happens to not affect our usage of SetPkgDef today, because we currently only call SetPkgDef for the builtin/runtime.go symbols, and those are never shadowed. However, it does affect my upcoming CL to lazily create the ir.Names for imported objects, as that depends on the ability to use SetPkgDef to set shadowed identifiers. Passes buildall w/ toolstash -cmp. Change-Id: I54fc48b33da0670d31725faa1df1170a8730750a Reviewed-on: https://go-review.googlesource.com/c/go/+/277712 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Russ Cox --- src/cmd/compile/internal/types/scope.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/types/scope.go b/src/cmd/compile/internal/types/scope.go index 04ea3c325f05d..d46918f73de68 100644 --- a/src/cmd/compile/internal/types/scope.go +++ b/src/cmd/compile/internal/types/scope.go @@ -94,7 +94,8 @@ func (s *Sym) SetPkgDef(n Object) { func (s *Sym) pkgDefPtr() *Object { // Look for outermost saved declaration, which must be the // package scope definition, if present. - for _, d := range dclstack { + for i := range dclstack { + d := &dclstack[i] if s == d.sym { return &d.def } From 305d93ef84aed971145b3aa1bce1f9f389bc90c0 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 13 Dec 2020 23:01:34 -0800 Subject: [PATCH 161/474] [dev.regabi] cmd/compile: type check externdcl earlier The next CL requires externdcl to be type checked earlier, but this causes toolstash -cmp to complain because it causes src.PosBases to get added in a different order. So split out into a separate CL. Change-Id: Icab4eadd3fa8acffbd3e980bd8100924378351b3 Reviewed-on: https://go-review.googlesource.com/c/go/+/277732 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Russ Cox --- src/cmd/compile/internal/gc/main.go | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 368fe1fcab233..fa4dba4935c77 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -282,9 +282,18 @@ func Main(archInit func(*Arch)) { fcount++ } } - // With all types checked, it's now safe to verify map keys. One single - // check past phase 9 isn't sufficient, as we may exit with other errors - // before then, thus skipping map key errors. + + // Phase 3.11: Check external declarations. + // TODO(mdempsky): This should be handled when type checking their + // corresponding ODCL nodes. + timings.Start("fe", "typecheck", "externdcls") + for i, n := range externdcl { + if n.Op() == ir.ONAME { + externdcl[i] = typecheck(externdcl[i], ctxExpr) + } + } + + // Phase 3.14: With all user code type-checked, it's now safe to verify map keys. checkMapKeys() base.ExitIfErrors() @@ -418,18 +427,6 @@ func Main(archInit func(*Arch)) { base.Flag.GenDwarfInl = 0 } - // Phase 9: Check external declarations. - timings.Start("be", "externaldcls") - for i, n := range externdcl { - if n.Op() == ir.ONAME { - externdcl[i] = typecheck(externdcl[i], ctxExpr) - } - } - // Check the map keys again, since we typechecked the external - // declarations. - checkMapKeys() - base.ExitIfErrors() - // Write object data to disk. timings.Start("be", "dumpobj") dumpdata() From 4c2d66f642286647b640bced33581e8b1665bfe8 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 13 Dec 2020 10:35:20 -0800 Subject: [PATCH 162/474] [dev.regabi] cmd/compile: use ir.Ident for imported identifiers This CL substantially reworks how imported declarations are handled, and fixes a number of issues with dot imports. In particular: 1. It eliminates the stub ir.Name declarations that are created upfront during import-declaration processing, allowing this to be deferred to when the declarations are actually needed. (Eventually, this can be deferred even further so we never have to create ir.Names w/ ONONAME, but this CL is already invasive/subtle enough.) 2. During noding, we now use ir.Idents to represent uses of imported declarations, including of dot-imported declarations. 3. Unused dot imports are now reported after type checking, so that we can correctly distinguish whether composite literal keys are a simple identifier (struct literals) or expressions (array/slice/map literals) and whether it might be a use of a dot-imported declaration. 4. It changes the "redeclared" error messages to report the previous position information in the same style as other compiler error messages that reference other source lines. Passes buildall w/ toolstash -cmp. Fixes #6428. Fixes #43164. Fixes #43167. Updates #42990. Change-Id: I40a0a780ec40daf5700fbc3cfeeb7300e1055981 Reviewed-on: https://go-review.googlesource.com/c/go/+/277713 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Russ Cox Trust: Matthew Dempsky --- src/cmd/compile/internal/gc/dcl.go | 13 ++--- src/cmd/compile/internal/gc/iimport.go | 35 ++++++------- src/cmd/compile/internal/gc/init.go | 4 +- src/cmd/compile/internal/gc/main.go | 9 ++-- src/cmd/compile/internal/gc/noder.go | 2 +- src/cmd/compile/internal/gc/subr.go | 50 +++++++++++++------ src/cmd/compile/internal/gc/typecheck.go | 48 ++++++++++-------- src/cmd/compile/internal/ir/name.go | 3 +- src/cmd/compile/internal/types/sizeof_test.go | 2 +- src/cmd/compile/internal/types/sym.go | 3 +- test/fixedbugs/bug462.go | 4 +- test/fixedbugs/issue20415.go | 6 +-- test/fixedbugs/issue43164.dir/a.go | 13 +++++ test/fixedbugs/issue43164.dir/b.go | 11 ++++ test/fixedbugs/issue43164.go | 7 +++ test/fixedbugs/issue43167.go | 13 +++++ test/fixedbugs/issue6428.go | 15 ++++++ 17 files changed, 159 insertions(+), 79 deletions(-) create mode 100644 test/fixedbugs/issue43164.dir/a.go create mode 100644 test/fixedbugs/issue43164.dir/b.go create mode 100644 test/fixedbugs/issue43164.go create mode 100644 test/fixedbugs/issue43167.go create mode 100644 test/fixedbugs/issue6428.go diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 1ebadd9213725..89873e2facc47 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -28,12 +28,9 @@ func testdclstack() { // redeclare emits a diagnostic about symbol s being redeclared at pos. func redeclare(pos src.XPos, s *types.Sym, where string) { if !s.Lastlineno.IsKnown() { - pkg := s.Origpkg - if pkg == nil { - pkg = s.Pkg - } + pkgName := dotImportRefs[s.Def.(*ir.Ident)] base.ErrorfAt(pos, "%v redeclared %s\n"+ - "\tprevious declaration during import %q", s, where, pkg.Path) + "\t%v: previous declaration during import %q", s, where, base.FmtPos(pkgName.Pos()), pkgName.Pkg.Path) } else { prevPos := s.Lastlineno @@ -46,7 +43,7 @@ func redeclare(pos src.XPos, s *types.Sym, where string) { } base.ErrorfAt(pos, "%v redeclared %s\n"+ - "\tprevious declaration at %v", s, where, base.FmtPos(prevPos)) + "\t%v: previous declaration", s, where, base.FmtPos(prevPos)) } } @@ -210,6 +207,10 @@ func symfield(s *types.Sym, typ *types.Type) *ir.Field { // Automatically creates a new closure variable if the referenced symbol was // declared in a different (containing) function. func oldname(s *types.Sym) ir.Node { + if s.Pkg != types.LocalPkg { + return ir.NewIdent(base.Pos, s) + } + n := ir.AsNode(s.Def) if n == nil { // Maybe a top-level declaration will come along later to diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 194c7427f39ce..0e2af562d0333 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -165,17 +165,9 @@ func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType) s := pkg.Lookup(p.stringAt(ird.uint64())) off := ird.uint64() - if _, ok := declImporter[s]; ok { - continue + if _, ok := declImporter[s]; !ok { + declImporter[s] = iimporterAndOffset{p, off} } - declImporter[s] = iimporterAndOffset{p, off} - - // Create stub declaration. If used, this will - // be overwritten by expandDecl. - if s.Def != nil { - base.Fatalf("unexpected definition for %v: %v", s, ir.AsNode(s.Def)) - } - s.Def = ir.NewDeclNameAt(src.NoXPos, s) } } @@ -187,10 +179,9 @@ func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType) s := pkg.Lookup(p.stringAt(ird.uint64())) off := ird.uint64() - if _, ok := inlineImporter[s]; ok { - continue + if _, ok := inlineImporter[s]; !ok { + inlineImporter[s] = iimporterAndOffset{p, off} } - inlineImporter[s] = iimporterAndOffset{p, off} } } @@ -442,10 +433,16 @@ func (r *importReader) ident() *types.Sym { return pkg.Lookup(name) } -func (r *importReader) qualifiedIdent() *types.Sym { +func (r *importReader) qualifiedIdent() *ir.Name { name := r.string() pkg := r.pkg() - return pkg.Lookup(name) + sym := pkg.Lookup(name) + n := sym.PkgDef() + if n == nil { + n = ir.NewDeclNameAt(src.NoXPos, sym) + sym.SetPkgDef(n) + } + return n.(*ir.Name) } func (r *importReader) pos() src.XPos { @@ -501,9 +498,9 @@ func (r *importReader) typ1() *types.Type { // support inlining functions with local defined // types. Therefore, this must be a package-scope // type. - n := ir.AsNode(r.qualifiedIdent().PkgDef()) + n := r.qualifiedIdent() if n.Op() == ir.ONONAME { - expandDecl(n.(*ir.Name)) + expandDecl(n) } if n.Op() != ir.OTYPE { base.Fatalf("expected OTYPE, got %v: %v, %v", n.Op(), n.Sym(), n) @@ -821,10 +818,10 @@ func (r *importReader) node() ir.Node { return n case ir.ONONAME: - return mkname(r.qualifiedIdent()) + return r.qualifiedIdent() case ir.ONAME: - return mkname(r.ident()) + return r.ident().Def.(*ir.Name) // case OPACK, ONONAME: // unreachable - should have been resolved by typechecking diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go index e0907f952cdf2..2ef9d1ad3532e 100644 --- a/src/cmd/compile/internal/gc/init.go +++ b/src/cmd/compile/internal/gc/init.go @@ -44,8 +44,8 @@ func fninit(n []ir.Node) { // Find imported packages with init tasks. for _, pkg := range sourceOrderImports { - n := resolve(ir.AsNode(pkg.Lookup(".inittask").Def)) - if n == nil { + n := resolve(oldname(pkg.Lookup(".inittask"))) + if n.Op() == ir.ONONAME { continue } if n.Op() != ir.ONAME || n.Class() != ir.PEXTERN { diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index fa4dba4935c77..77b11c5d5d01d 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -293,8 +293,10 @@ func Main(archInit func(*Arch)) { } } - // Phase 3.14: With all user code type-checked, it's now safe to verify map keys. + // Phase 3.14: With all user code type-checked, it's now safe to verify map keys + // and unused dot imports. checkMapKeys() + checkDotImports() base.ExitIfErrors() timings.AddEvent(fcount, "funcs") @@ -953,10 +955,7 @@ func clearImports() { if IsAlias(s) { // throw away top-level name left over // from previous import . "x" - if name := n.Name(); name != nil && name.PkgName != nil && !name.PkgName.Used && base.SyntaxErrors() == 0 { - unused = append(unused, importedPkg{name.PkgName.Pos(), name.PkgName.Pkg.Path, ""}) - name.PkgName.Used = true - } + // We'll report errors after type checking in checkDotImports. s.Def = nil continue } diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 8c765f9dfc25f..55628352bdb91 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -369,7 +369,7 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) { switch my.Name { case ".": - importdot(ipkg, pack) + importDot(pack) return case "init": base.ErrorfAt(pack.Pos(), "cannot import package as init - init must be a func") diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 42f8982c80600..2082544d086f5 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -100,13 +100,26 @@ func autolabel(prefix string) *types.Sym { return lookupN(prefix, int(n)) } -// find all the exported symbols in package opkg +// dotImports tracks all PkgNames that have been dot-imported. +var dotImports []*ir.PkgName + +// dotImportRefs maps idents introduced by importDot back to the +// ir.PkgName they were dot-imported through. +var dotImportRefs map[*ir.Ident]*ir.PkgName + +// find all the exported symbols in package referenced by PkgName, // and make them available in the current package -func importdot(opkg *types.Pkg, pack *ir.PkgName) { - n := 0 +func importDot(pack *ir.PkgName) { + if dotImportRefs == nil { + dotImportRefs = make(map[*ir.Ident]*ir.PkgName) + } + + opkg := pack.Pkg for _, s := range opkg.Syms { if s.Def == nil { - continue + if _, ok := declImporter[s]; !ok { + continue + } } if !types.IsExported(s.Name) || strings.ContainsRune(s.Name, 0xb7) { // 0xb7 = center dot continue @@ -118,21 +131,26 @@ func importdot(opkg *types.Pkg, pack *ir.PkgName) { continue } - s1.Def = s.Def - s1.Block = s.Block - if ir.AsNode(s1.Def).Name() == nil { - ir.Dump("s1def", ir.AsNode(s1.Def)) - base.Fatalf("missing Name") - } - ir.AsNode(s1.Def).Name().PkgName = pack - s1.Origpkg = opkg - n++ + id := ir.NewIdent(src.NoXPos, s) + dotImportRefs[id] = pack + s1.Def = id + s1.Block = 1 } - if n == 0 { - // can't possibly be used - there were no symbols - base.ErrorfAt(pack.Pos(), "imported and not used: %q", opkg.Path) + dotImports = append(dotImports, pack) +} + +// checkDotImports reports errors for any unused dot imports. +func checkDotImports() { + for _, pack := range dotImports { + if !pack.Used { + base.ErrorfAt(pack.Pos(), "imported and not used: %q", pack.Pkg.Path) + } } + + // No longer needed; release memory. + dotImports = nil + dotImportRefs = nil } // nodAddr returns a node representing &n. diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index ad161b59f0a87..49e4289f141ea 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -8,6 +8,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/types" + "cmd/internal/src" "fmt" "go/constant" "go/token" @@ -90,11 +91,24 @@ func resolve(n ir.Node) (res ir.Node) { defer tracePrint("resolve", n)(&res) } - // Stub ir.Name left for us by iimport. - if n, ok := n.(*ir.Name); ok { - if n.Sym().Pkg == types.LocalPkg { - base.Fatalf("unexpected Name: %+v", n) + if sym := n.Sym(); sym.Pkg != types.LocalPkg { + // We might have an ir.Ident from oldname or importDot. + if id, ok := n.(*ir.Ident); ok { + if pkgName := dotImportRefs[id]; pkgName != nil { + pkgName.Used = true + } + + if sym.Def == nil { + if _, ok := declImporter[sym]; !ok { + return n // undeclared name + } + sym.Def = ir.NewDeclNameAt(src.NoXPos, sym) + } + n = ir.AsNode(sym.Def) } + + // Stub ir.Name left for us by iimport. + n := n.(*ir.Name) if inimport { base.Fatalf("recursive inimport") } @@ -2885,31 +2899,25 @@ func typecheckcomplit(n ir.Node) (res ir.Node) { if l.Op() == ir.OKEY { key := l.Left() - sk := ir.NewStructKeyExpr(l.Pos(), nil, l.Right()) - ls[i] = sk - l = sk + // Sym might have resolved to name in other top-level + // package, because of import dot. Redirect to correct sym + // before we do the lookup. + s := key.Sym() + if id, ok := key.(*ir.Ident); ok && dotImportRefs[id] != nil { + s = lookup(s.Name) + } // An OXDOT uses the Sym field to hold // the field to the right of the dot, // so s will be non-nil, but an OXDOT // is never a valid struct literal key. - if key.Sym() == nil || key.Op() == ir.OXDOT || key.Sym().IsBlank() { + if s == nil || s.Pkg != types.LocalPkg || key.Op() == ir.OXDOT || s.IsBlank() { base.Errorf("invalid field name %v in struct initializer", key) - sk.SetLeft(typecheck(sk.Left(), ctxExpr)) continue } - // Sym might have resolved to name in other top-level - // package, because of import dot. Redirect to correct sym - // before we do the lookup. - s := key.Sym() - if s.Pkg != types.LocalPkg && types.IsExported(s.Name) { - s1 := lookup(s.Name) - if s1.Origpkg == s.Pkg { - s = s1 - } - } - sk.SetSym(s) + l = ir.NewStructKeyExpr(l.Pos(), s, l.Right()) + ls[i] = l } if l.Op() != ir.OSTRUCTKEY { diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 2330838f1c548..7f1a47e13cbe9 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -16,8 +16,7 @@ import ( // An Ident is an identifier, possibly qualified. type Ident struct { miniExpr - sym *types.Sym - Used bool + sym *types.Sym } func NewIdent(pos src.XPos, sym *types.Sym) *Ident { diff --git a/src/cmd/compile/internal/types/sizeof_test.go b/src/cmd/compile/internal/types/sizeof_test.go index 72a35bc7daadc..1ca07b12c846b 100644 --- a/src/cmd/compile/internal/types/sizeof_test.go +++ b/src/cmd/compile/internal/types/sizeof_test.go @@ -20,7 +20,7 @@ func TestSizeof(t *testing.T) { _32bit uintptr // size on 32bit platforms _64bit uintptr // size on 64bit platforms }{ - {Sym{}, 52, 88}, + {Sym{}, 48, 80}, {Type{}, 56, 96}, {Map{}, 20, 40}, {Forward{}, 20, 32}, diff --git a/src/cmd/compile/internal/types/sym.go b/src/cmd/compile/internal/types/sym.go index fcb095c53c985..19f06fcf5bbe8 100644 --- a/src/cmd/compile/internal/types/sym.go +++ b/src/cmd/compile/internal/types/sym.go @@ -38,8 +38,7 @@ type Sym struct { Block int32 // blocknumber to catch redeclaration Lastlineno src.XPos // last declaration for diagnostic - flags bitset8 - Origpkg *Pkg // original package for . import + flags bitset8 } const ( diff --git a/test/fixedbugs/bug462.go b/test/fixedbugs/bug462.go index 3df63b091ddbb..bae5ee0aeb019 100644 --- a/test/fixedbugs/bug462.go +++ b/test/fixedbugs/bug462.go @@ -13,7 +13,7 @@ type T struct { } func main() { - _ = T { - os.File: 1, // ERROR "unknown T? ?field" + _ = T{ + os.File: 1, // ERROR "invalid field name os.File|unknown field" } } diff --git a/test/fixedbugs/issue20415.go b/test/fixedbugs/issue20415.go index 6f2c342ce4150..5ad085564b5e1 100644 --- a/test/fixedbugs/issue20415.go +++ b/test/fixedbugs/issue20415.go @@ -11,7 +11,7 @@ package p // 1 var f byte -var f interface{} // ERROR "previous declaration at issue20415.go:12" +var f interface{} // ERROR "issue20415.go:12: previous declaration" func _(f int) { } @@ -22,7 +22,7 @@ var g byte func _(g int) { } -var g interface{} // ERROR "previous declaration at issue20415.go:20" +var g interface{} // ERROR "issue20415.go:20: previous declaration" // 3 func _(h int) { @@ -30,4 +30,4 @@ func _(h int) { var h byte -var h interface{} // ERROR "previous declaration at issue20415.go:31" +var h interface{} // ERROR "issue20415.go:31: previous declaration" diff --git a/test/fixedbugs/issue43164.dir/a.go b/test/fixedbugs/issue43164.dir/a.go new file mode 100644 index 0000000000000..fa10e85061225 --- /dev/null +++ b/test/fixedbugs/issue43164.dir/a.go @@ -0,0 +1,13 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +import . "strings" + +var _ = Index // use strings + +type t struct{ Index int } + +var _ = t{Index: 0} diff --git a/test/fixedbugs/issue43164.dir/b.go b/test/fixedbugs/issue43164.dir/b.go new file mode 100644 index 0000000000000..b025927a059a8 --- /dev/null +++ b/test/fixedbugs/issue43164.dir/b.go @@ -0,0 +1,11 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +import . "bytes" + +var _ = Index // use bytes + +var _ = t{Index: 0} diff --git a/test/fixedbugs/issue43164.go b/test/fixedbugs/issue43164.go new file mode 100644 index 0000000000000..f21d1d5c582a7 --- /dev/null +++ b/test/fixedbugs/issue43164.go @@ -0,0 +1,7 @@ +// compiledir + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ignored diff --git a/test/fixedbugs/issue43167.go b/test/fixedbugs/issue43167.go new file mode 100644 index 0000000000000..1d1b69af58087 --- /dev/null +++ b/test/fixedbugs/issue43167.go @@ -0,0 +1,13 @@ +// errorcheck + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +import . "bytes" + +var _ Buffer // use package bytes + +var Index byte // ERROR "Index redeclared.*\n\tLINE-4: previous declaration during import .bytes.|already declared|redefinition" diff --git a/test/fixedbugs/issue6428.go b/test/fixedbugs/issue6428.go new file mode 100644 index 0000000000000..c3f7b20a983bc --- /dev/null +++ b/test/fixedbugs/issue6428.go @@ -0,0 +1,15 @@ +// errorcheck + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +import . "testing" // ERROR "imported and not used" + +type S struct { + T int +} + +var _ = S{T: 0} From fa06894b36054e80e815ee538fb6f72c9e58f14a Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 7 Dec 2020 14:56:03 -0500 Subject: [PATCH 163/474] [dev.regabi] cmd/compile: cleanup preparing for concrete types Avoid using the same variable for two different concrete Node types in walk. This will smooth the introduction of specific constructors, replacing ir.Nod and friends. Passes buildall w/ toolstash -cmp. Replay of CL 275884, lost to the bad-merge history rewrite. Change-Id: I05628e20a19c9559ed7478526ef6cb2613f735e5 Reviewed-on: https://go-review.googlesource.com/c/go/+/277954 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/walk.go | 181 +++++++++++----------------- 1 file changed, 70 insertions(+), 111 deletions(-) diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index bbd81de40e6aa..c9dbf917020eb 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -207,8 +207,7 @@ func walkstmt(n ir.Node) ir.Node { } nn := ir.Nod(ir.OAS, v.Name().Heapaddr, prealloc[v]) nn.SetColas(true) - nn = typecheck(nn, ctxStmt) - return walkstmt(nn) + return walkstmt(typecheck(nn, ctxStmt)) } return n @@ -480,10 +479,8 @@ func walkexpr(n ir.Node, init *ir.Nodes) ir.Node { if n.Op() == ir.ONAME && n.Class() == ir.PAUTOHEAP { nn := ir.Nod(ir.ODEREF, n.Name().Heapaddr, nil) - nn = typecheck(nn, ctxExpr) - nn = walkexpr(nn, init) nn.Left().MarkNonNil() - return nn + return walkexpr(typecheck(nn, ctxExpr), init) } n = walkexpr1(n, init) @@ -969,10 +966,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { dowidth(fn.Type()) call := ir.Nod(ir.OCALL, fn, nil) call.PtrList().Set1(n.Left()) - call = typecheck(call, ctxExpr) - call = walkexpr(call, init) - call = safeexpr(call, init) - e := ir.Nod(ir.OEFACE, typeword(), call) + e := ir.Nod(ir.OEFACE, typeword(), safeexpr(walkexpr(typecheck(call, ctxExpr), init), init)) e.SetType(toType) e.SetTypecheck(1) return e @@ -1277,9 +1271,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // var hv hmap hv := temp(hmapType) - zero := ir.Nod(ir.OAS, hv, nil) - zero = typecheck(zero, ctxStmt) - init.Append(zero) + init.Append(typecheck(ir.Nod(ir.OAS, hv, nil), ctxStmt)) // h = &hv h = nodAddr(hv) @@ -1305,8 +1297,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // var bv bmap bv := temp(bmap(t)) - zero = ir.Nod(ir.OAS, bv, nil) - nif.PtrBody().Append(zero) + nif.PtrBody().Append(ir.Nod(ir.OAS, bv, nil)) // b = &bv b := nodAddr(bv) @@ -1316,9 +1307,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { na := ir.Nod(ir.OAS, nodSym(ir.ODOT, h, bsym), b) nif.PtrBody().Append(na) - nif = typecheck(nif, ctxStmt) - nif = walkstmt(nif) - init.Append(nif) + init.Append(walkstmt(typecheck(nif, ctxStmt))) } } @@ -1336,10 +1325,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // h.hash0 = fastrand() rand := mkcall("fastrand", types.Types[types.TUINT32], init) hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap - a := ir.Nod(ir.OAS, nodSym(ir.ODOT, h, hashsym), rand) - a = typecheck(a, ctxStmt) - a = walkexpr(a, init) - init.Append(a) + appendWalk(init, ir.Nod(ir.OAS, nodSym(ir.ODOT, h, hashsym), rand)) return convnop(h, t) } // Call runtime.makehmap to allocate an @@ -1408,20 +1394,15 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { niflen := ir.Nod(ir.OIF, ir.Nod(ir.OLT, l, nodintconst(0)), nil) niflen.PtrBody().Set1(mkcall("panicmakeslicelen", nil, init)) nif.PtrBody().Append(niflen, mkcall("panicmakeslicecap", nil, init)) - nif = typecheck(nif, ctxStmt) - init.Append(nif) + init.Append(typecheck(nif, ctxStmt)) t = types.NewArray(t.Elem(), i) // [r]T var_ := temp(t) - a := ir.Nod(ir.OAS, var_, nil) // zero temp - a = typecheck(a, ctxStmt) - init.Append(a) - r := ir.Nod(ir.OSLICE, var_, nil) // arr[:l] + appendWalk(init, ir.Nod(ir.OAS, var_, nil)) // zero temp + r := ir.Nod(ir.OSLICE, var_, nil) // arr[:l] r.SetSliceBounds(nil, l, nil) - r = conv(r, n.Type()) // in case n.Type is named. - r = typecheck(r, ctxExpr) - r = walkexpr(r, init) - return r + // The conv is necessary in case n.Type is named. + return walkexpr(typecheck(conv(r, n.Type()), ctxExpr), init) } // n escapes; set up a call to makeslice. @@ -1449,10 +1430,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { m.SetLeft(mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), conv(len, argtype), conv(cap, argtype))) m.Left().MarkNonNil() m.PtrList().Set2(conv(len, types.Types[types.TINT]), conv(cap, types.Types[types.TINT])) - - m = typecheck(m, ctxExpr) - m = walkexpr(m, init) - return m + return walkexpr(typecheck(m, ctxExpr), init) case ir.OMAKESLICECOPY: if n.Esc() == EscNone { @@ -1569,9 +1547,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { as := ir.Nod(ir.OAS, ir.Nod(ir.ODEREF, p, nil), ir.Nod(ir.ODEREF, convnop(ir.Nod(ir.OSPTR, s, nil), t.PtrTo()), nil)) - as = typecheck(as, ctxStmt) - as = walkstmt(as) - init.Append(as) + appendWalk(init, as) } // Slice the [n]byte to a []byte. @@ -1811,8 +1787,7 @@ func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node { if fncall(l, r.Type) { tmp := ir.Node(temp(r.Type)) tmp = typecheck(tmp, ctxExpr) - a := ir.Nod(ir.OAS, l, tmp) - a = convas(a, &mm) + a := convas(ir.Nod(ir.OAS, l, tmp), &mm) mm.Append(a) l = tmp } @@ -1822,8 +1797,7 @@ func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node { res.SetType(r.Type) res.SetTypecheck(1) - a := ir.Nod(ir.OAS, l, res) - a = convas(a, &nn) + a := convas(ir.Nod(ir.OAS, l, res), &nn) updateHasCall(a) if a.HasCall() { ir.Dump("ascompatet ucount", a) @@ -1917,8 +1891,7 @@ func walkCall(n ir.Node, init *ir.Nodes) { if instrumenting || fncall(arg, t) { // make assignment of fncall to tempAt tmp := temp(t) - a := ir.Nod(ir.OAS, tmp, arg) - a = convas(a, init) + a := convas(ir.Nod(ir.OAS, tmp, arg), init) tempAssigns = append(tempAssigns, a) // replace arg with temp args[i] = tmp @@ -2067,10 +2040,8 @@ func walkprint(nn ir.Node, init *ir.Nodes) ir.Node { walkexprlist(calls, init) r := ir.Nod(ir.OBLOCK, nil, nil) - r = typecheck(r, ctxStmt) - r = walkstmt(r) r.PtrList().Set(calls) - return r + return walkstmt(typecheck(r, ctxStmt)) } func callnew(t *types.Type) ir.Node { @@ -2527,16 +2498,15 @@ func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) ir.Node { base.Fatalf("vmkcall %v needs %v args got %v", fn, n, len(va)) } - r := ir.Nod(ir.OCALL, fn, nil) - r.PtrList().Set(va) + call := ir.Nod(ir.OCALL, fn, nil) + call.PtrList().Set(va) + ctx := ctxStmt if fn.Type().NumResults() > 0 { - r = typecheck(r, ctxExpr|ctxMultiOK) - } else { - r = typecheck(r, ctxStmt) + ctx = ctxExpr | ctxMultiOK } - r = walkexpr(r, init) - r.SetType(t) - return r + r1 := typecheck(call, ctx) + r1.SetType(t) + return walkexpr(r1, init) } func mkcall(name string, t *types.Type, init *ir.Nodes, args ...ir.Node) ir.Node { @@ -2731,11 +2701,11 @@ func addstr(n ir.Node, init *ir.Nodes) ir.Node { cat := syslook(fn) r := ir.Nod(ir.OCALL, cat, nil) r.PtrList().Set(args) - r = typecheck(r, ctxExpr) - r = walkexpr(r, init) - r.SetType(n.Type()) + r1 := typecheck(r, ctxExpr) + r1 = walkexpr(r1, init) + r1.SetType(n.Type()) - return r + return r1 } func walkAppendArgs(n ir.Node, init *ir.Nodes) { @@ -2807,44 +2777,39 @@ func appendslice(n ir.Node, init *ir.Nodes) ir.Node { var ncopy ir.Node if elemtype.HasPointers() { // copy(s[len(l1):], l2) - nptr1 := ir.Nod(ir.OSLICE, s, nil) - nptr1.SetType(s.Type()) - nptr1.SetSliceBounds(ir.Nod(ir.OLEN, l1, nil), nil, nil) - nptr1 = cheapexpr(nptr1, &nodes) - - nptr2 := l2 + slice := ir.Nod(ir.OSLICE, s, nil) + slice.SetType(s.Type()) + slice.SetSliceBounds(ir.Nod(ir.OLEN, l1, nil), nil, nil) Curfn.SetWBPos(n.Pos()) // instantiate typedslicecopy(typ *type, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int fn := syslook("typedslicecopy") fn = substArgTypes(fn, l1.Type().Elem(), l2.Type().Elem()) - ptr1, len1 := backingArrayPtrLen(nptr1) - ptr2, len2 := backingArrayPtrLen(nptr2) + ptr1, len1 := backingArrayPtrLen(cheapexpr(slice, &nodes)) + ptr2, len2 := backingArrayPtrLen(l2) ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, typename(elemtype), ptr1, len1, ptr2, len2) } else if instrumenting && !base.Flag.CompilingRuntime { // rely on runtime to instrument: // copy(s[len(l1):], l2) // l2 can be a slice or string. - nptr1 := ir.Nod(ir.OSLICE, s, nil) - nptr1.SetType(s.Type()) - nptr1.SetSliceBounds(ir.Nod(ir.OLEN, l1, nil), nil, nil) - nptr1 = cheapexpr(nptr1, &nodes) - nptr2 := l2 + slice := ir.Nod(ir.OSLICE, s, nil) + slice.SetType(s.Type()) + slice.SetSliceBounds(ir.Nod(ir.OLEN, l1, nil), nil, nil) - ptr1, len1 := backingArrayPtrLen(nptr1) - ptr2, len2 := backingArrayPtrLen(nptr2) + ptr1, len1 := backingArrayPtrLen(cheapexpr(slice, &nodes)) + ptr2, len2 := backingArrayPtrLen(l2) fn := syslook("slicecopy") fn = substArgTypes(fn, ptr1.Type().Elem(), ptr2.Type().Elem()) ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, ptr1, len1, ptr2, len2, nodintconst(elemtype.Width)) } else { // memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T)) - nptr1 := ir.Nod(ir.OINDEX, s, ir.Nod(ir.OLEN, l1, nil)) - nptr1.SetBounded(true) - nptr1 = nodAddr(nptr1) + ix := ir.Nod(ir.OINDEX, s, ir.Nod(ir.OLEN, l1, nil)) + ix.SetBounded(true) + addr := ir.Nod(ir.OADDR, ix, nil) - nptr2 := ir.Nod(ir.OSPTR, l2, nil) + sptr := ir.Nod(ir.OSPTR, l2, nil) nwid := cheapexpr(conv(ir.Nod(ir.OLEN, l2, nil), types.Types[types.TUINTPTR]), &nodes) nwid = ir.Nod(ir.OMUL, nwid, nodintconst(elemtype.Width)) @@ -2852,7 +2817,7 @@ func appendslice(n ir.Node, init *ir.Nodes) ir.Node { // instantiate func memmove(to *any, frm *any, length uintptr) fn := syslook("memmove") fn = substArgTypes(fn, elemtype, elemtype) - ncopy = mkcall1(fn, nil, &nodes, nptr1, nptr2, nwid) + ncopy = mkcall1(fn, nil, &nodes, addr, sptr, nwid) } ln := append(nodes.Slice(), ncopy) @@ -2986,14 +2951,12 @@ func extendslice(n ir.Node, init *ir.Nodes) ir.Node { nodes = append(nodes, ir.Nod(ir.OAS, sptr, tmp)) // hp := &s[len(l1)] - hp := ir.Nod(ir.OINDEX, s, ir.Nod(ir.OLEN, l1, nil)) - hp.SetBounded(true) - hp = nodAddr(hp) - hp = convnop(hp, types.Types[types.TUNSAFEPTR]) + ix := ir.Nod(ir.OINDEX, s, ir.Nod(ir.OLEN, l1, nil)) + ix.SetBounded(true) + hp := convnop(ir.Nod(ir.OADDR, ix, nil), types.Types[types.TUNSAFEPTR]) // hn := l2 * sizeof(elem(s)) - hn := ir.Nod(ir.OMUL, l2, nodintconst(elemtype.Width)) - hn = conv(hn, types.Types[types.TUINTPTR]) + hn := conv(ir.Nod(ir.OMUL, l2, nodintconst(elemtype.Width)), types.Types[types.TUINTPTR]) clrname := "memclrNoHeapPointers" hasPointers := elemtype.HasPointers() @@ -3083,32 +3046,32 @@ func walkappend(n ir.Node, init *ir.Nodes, dst ir.Node) ir.Node { ns := temp(nsrc.Type()) l = append(l, ir.Nod(ir.OAS, ns, nsrc)) // s = src - na := nodintconst(int64(argc)) // const argc - nx := ir.Nod(ir.OIF, nil, nil) // if cap(s) - len(s) < argc - nx.SetLeft(ir.Nod(ir.OLT, ir.Nod(ir.OSUB, ir.Nod(ir.OCAP, ns, nil), ir.Nod(ir.OLEN, ns, nil)), na)) + na := nodintconst(int64(argc)) // const argc + nif := ir.Nod(ir.OIF, nil, nil) // if cap(s) - len(s) < argc + nif.SetLeft(ir.Nod(ir.OLT, ir.Nod(ir.OSUB, ir.Nod(ir.OCAP, ns, nil), ir.Nod(ir.OLEN, ns, nil)), na)) fn := syslook("growslice") // growslice(, old []T, mincap int) (ret []T) fn = substArgTypes(fn, ns.Type().Elem(), ns.Type().Elem()) - nx.PtrBody().Set1(ir.Nod(ir.OAS, ns, - mkcall1(fn, ns.Type(), nx.PtrInit(), typename(ns.Type().Elem()), ns, + nif.PtrBody().Set1(ir.Nod(ir.OAS, ns, + mkcall1(fn, ns.Type(), nif.PtrInit(), typename(ns.Type().Elem()), ns, ir.Nod(ir.OADD, ir.Nod(ir.OLEN, ns, nil), na)))) - l = append(l, nx) + l = append(l, nif) nn := temp(types.Types[types.TINT]) l = append(l, ir.Nod(ir.OAS, nn, ir.Nod(ir.OLEN, ns, nil))) // n = len(s) - nx = ir.Nod(ir.OSLICE, ns, nil) // ...s[:n+argc] - nx.SetSliceBounds(nil, ir.Nod(ir.OADD, nn, na), nil) - nx.SetBounded(true) - l = append(l, ir.Nod(ir.OAS, ns, nx)) // s = s[:n+argc] + slice := ir.Nod(ir.OSLICE, ns, nil) // ...s[:n+argc] + slice.SetSliceBounds(nil, ir.Nod(ir.OADD, nn, na), nil) + slice.SetBounded(true) + l = append(l, ir.Nod(ir.OAS, ns, slice)) // s = s[:n+argc] ls = n.List().Slice()[1:] for i, n := range ls { - nx = ir.Nod(ir.OINDEX, ns, nn) // s[n] ... - nx.SetBounded(true) - l = append(l, ir.Nod(ir.OAS, nx, n)) // s[n] = arg + ix := ir.Nod(ir.OINDEX, ns, nn) // s[n] ... + ix.SetBounded(true) + l = append(l, ir.Nod(ir.OAS, ix, n)) // s[n] = arg if i+1 < len(ls) { l = append(l, ir.Nod(ir.OAS, nn, ir.Nod(ir.OADD, nn, nodintconst(1)))) // n = n + 1 } @@ -3377,7 +3340,7 @@ func walkcompare(n ir.Node, init *ir.Nodes) ir.Node { if needsize { call.PtrList().Append(nodintconst(t.Width)) } - res := call + res := ir.Node(call) if n.Op() != ir.OEQ { res = ir.Nod(ir.ONOT, res, nil) } @@ -3442,21 +3405,21 @@ func walkcompare(n ir.Node, init *ir.Nodes) ir.Node { remains -= t.Elem().Width } else { elemType := t.Elem().ToUnsigned() - cmplw := ir.Nod(ir.OINDEX, cmpl, nodintconst(i)) + cmplw := ir.Node(ir.Nod(ir.OINDEX, cmpl, nodintconst(i))) cmplw = conv(cmplw, elemType) // convert to unsigned cmplw = conv(cmplw, convType) // widen - cmprw := ir.Nod(ir.OINDEX, cmpr, nodintconst(i)) + cmprw := ir.Node(ir.Nod(ir.OINDEX, cmpr, nodintconst(i))) cmprw = conv(cmprw, elemType) cmprw = conv(cmprw, convType) // For code like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ... // ssa will generate a single large load. for offset := int64(1); offset < step; offset++ { - lb := ir.Nod(ir.OINDEX, cmpl, nodintconst(i+offset)) + lb := ir.Node(ir.Nod(ir.OINDEX, cmpl, nodintconst(i+offset))) lb = conv(lb, elemType) lb = conv(lb, convType) lb = ir.Nod(ir.OLSH, lb, nodintconst(8*t.Elem().Width*offset)) cmplw = ir.Nod(ir.OOR, cmplw, lb) - rb := ir.Nod(ir.OINDEX, cmpr, nodintconst(i+offset)) + rb := ir.Node(ir.Nod(ir.OINDEX, cmpr, nodintconst(i+offset))) rb = conv(rb, elemType) rb = conv(rb, convType) rb = ir.Nod(ir.OLSH, rb, nodintconst(8*t.Elem().Width*offset)) @@ -3473,10 +3436,8 @@ func walkcompare(n ir.Node, init *ir.Nodes) ir.Node { // We still need to use cmpl and cmpr, in case they contain // an expression which might panic. See issue 23837. t := temp(cmpl.Type()) - a1 := ir.Nod(ir.OAS, t, cmpl) - a1 = typecheck(a1, ctxStmt) - a2 := ir.Nod(ir.OAS, t, cmpr) - a2 = typecheck(a2, ctxStmt) + a1 := typecheck(ir.Nod(ir.OAS, t, cmpl), ctxStmt) + a2 := typecheck(ir.Nod(ir.OAS, t, cmpr), ctxStmt) init.Append(a1, a2) } n = finishcompare(n, expr, init) @@ -3583,15 +3544,13 @@ func walkcompareString(n ir.Node, init *ir.Nodes) ir.Node { convType = types.Types[types.TUINT16] step = 2 } - ncsubstr := ir.Nod(ir.OINDEX, ncs, nodintconst(int64(i))) - ncsubstr = conv(ncsubstr, convType) + ncsubstr := conv(ir.Nod(ir.OINDEX, ncs, nodintconst(int64(i))), convType) csubstr := int64(s[i]) // Calculate large constant from bytes as sequence of shifts and ors. // Like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ... // ssa will combine this into a single large load. for offset := 1; offset < step; offset++ { - b := ir.Nod(ir.OINDEX, ncs, nodintconst(int64(i+offset))) - b = conv(b, convType) + b := conv(ir.Nod(ir.OINDEX, ncs, nodintconst(int64(i+offset))), convType) b = ir.Nod(ir.OLSH, b, nodintconst(int64(8*offset))) ncsubstr = ir.Nod(ir.OOR, ncsubstr, b) csubstr |= int64(s[i+offset]) << uint8(8*offset) From 5ae70b85c6c40adb4e785bf988799df9c0a57e16 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 7 Dec 2020 14:56:49 -0500 Subject: [PATCH 164/474] [dev.regabi] cmd/compile: cleanup preparing for concrete types, 2 Avoid using the same variable for two different concrete Node types in other files (beyond walk). This will smooth the introduction of specific constructors, replacing ir.Nod and friends. Passes buildall w/ toolstash -cmp. Replay of CL 275885, lost to the bad-merge history rewrite. Change-Id: I0da89502a0bd636b8766f01b6f843c7821b3e9ab Reviewed-on: https://go-review.googlesource.com/c/go/+/277955 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/alg.go | 20 +++--- src/cmd/compile/internal/gc/closure.go | 32 ++++----- src/cmd/compile/internal/gc/inl.go | 19 ++---- src/cmd/compile/internal/gc/order.go | 45 +++++-------- src/cmd/compile/internal/gc/range.go | 30 ++++----- src/cmd/compile/internal/gc/select.go | 20 ++---- src/cmd/compile/internal/gc/sinit.go | 82 ++++++------------------ src/cmd/compile/internal/gc/subr.go | 26 ++++---- src/cmd/compile/internal/gc/swt.go | 4 +- src/cmd/compile/internal/gc/typecheck.go | 8 +-- src/cmd/compile/internal/gc/walk.go | 31 ++++++--- 11 files changed, 126 insertions(+), 191 deletions(-) diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index 7540944201723..8550edb9e0fb5 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -819,12 +819,12 @@ func eqstring(s, t ir.Node) (eqlen, eqmem ir.Node) { fn = substArgTypes(fn, types.Types[types.TUINT8], types.Types[types.TUINT8]) call := ir.Nod(ir.OCALL, fn, nil) call.PtrList().Append(sptr, tptr, ir.Copy(slen)) - call = typecheck(call, ctxExpr|ctxMultiOK) + call1 := typecheck(call, ctxExpr|ctxMultiOK) cmp := ir.Nod(ir.OEQ, slen, tlen) - cmp = typecheck(cmp, ctxExpr) + cmp1 := typecheck(cmp, ctxExpr) cmp.SetType(types.Types[types.TBOOL]) - return cmp, call + return cmp1, call1 } // eqinterface returns the nodes @@ -857,21 +857,19 @@ func eqinterface(s, t ir.Node) (eqtab, eqdata ir.Node) { call := ir.Nod(ir.OCALL, fn, nil) call.PtrList().Append(stab, sdata, tdata) - call = typecheck(call, ctxExpr|ctxMultiOK) + call1 := typecheck(call, ctxExpr|ctxMultiOK) cmp := ir.Nod(ir.OEQ, stab, ttab) - cmp = typecheck(cmp, ctxExpr) - cmp.SetType(types.Types[types.TBOOL]) - return cmp, call + cmp1 := typecheck(cmp, ctxExpr) + cmp1.SetType(types.Types[types.TBOOL]) + return cmp1, call1 } // eqmem returns the node // memequal(&p.field, &q.field [, size]) func eqmem(p ir.Node, q ir.Node, field *types.Sym, size int64) ir.Node { - nx := nodAddr(nodSym(ir.OXDOT, p, field)) - ny := nodAddr(nodSym(ir.OXDOT, q, field)) - nx = typecheck(nx, ctxExpr) - ny = typecheck(ny, ctxExpr) + nx := typecheck(nodAddr(nodSym(ir.OXDOT, p, field)), ctxExpr) + ny := typecheck(nodAddr(nodSym(ir.OXDOT, q, field)), ctxExpr) fn, needsize := eqmemfunc(size, nx.Type().Elem()) call := ir.Nod(ir.OCALL, fn, nil) diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index a3d8a46977cd2..954fa1a452e8c 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -396,22 +396,22 @@ func walkclosure(clo ir.Node, init *ir.Nodes) ir.Node { clos.SetEsc(clo.Esc()) clos.PtrList().Set(append([]ir.Node{ir.Nod(ir.OCFUNC, fn.Nname, nil)}, fn.ClosureEnter.Slice()...)) - clos = nodAddr(clos) - clos.SetEsc(clo.Esc()) + addr := nodAddr(clos) + addr.SetEsc(clo.Esc()) // Force type conversion from *struct to the func type. - clos = convnop(clos, clo.Type()) + cfn := convnop(addr, clo.Type()) // non-escaping temp to use, if any. if x := prealloc[clo]; x != nil { if !types.Identical(typ, x.Type()) { panic("closure type does not match order's assigned type") } - clos.Left().SetRight(x) + addr.SetRight(x) delete(prealloc, clo) } - return walkexpr(clos, init) + return walkexpr(cfn, init) } func typecheckpartialcall(dot ir.Node, sym *types.Sym) *ir.CallPartExpr { @@ -482,11 +482,12 @@ func makepartialcall(dot ir.Node, t0 *types.Type, meth *types.Sym) *ir.Func { call.PtrList().Set(paramNnames(tfn.Type())) call.SetIsDDD(tfn.Type().IsVariadic()) if t0.NumResults() != 0 { - n := ir.Nod(ir.ORETURN, nil, nil) - n.PtrList().Set1(call) - call = n + ret := ir.Nod(ir.ORETURN, nil, nil) + ret.PtrList().Set1(call) + body = append(body, ret) + } else { + body = append(body, call) } - body = append(body, call) fn.PtrBody().Set(body) funcbody() @@ -530,8 +531,7 @@ func walkpartialcall(n *ir.CallPartExpr, init *ir.Nodes) ir.Node { n.SetLeft(cheapexpr(n.Left(), init)) n.SetLeft(walkexpr(n.Left(), nil)) - tab := ir.Nod(ir.OITAB, n.Left(), nil) - tab = typecheck(tab, ctxExpr) + tab := typecheck(ir.Nod(ir.OITAB, n.Left(), nil), ctxExpr) c := ir.Nod(ir.OCHECKNIL, tab, nil) c.SetTypecheck(1) @@ -544,22 +544,22 @@ func walkpartialcall(n *ir.CallPartExpr, init *ir.Nodes) ir.Node { clos.SetEsc(n.Esc()) clos.PtrList().Set2(ir.Nod(ir.OCFUNC, n.Func().Nname, nil), n.Left()) - clos = nodAddr(clos) - clos.SetEsc(n.Esc()) + addr := nodAddr(clos) + addr.SetEsc(n.Esc()) // Force type conversion from *struct to the func type. - clos = convnop(clos, n.Type()) + cfn := convnop(addr, n.Type()) // non-escaping temp to use, if any. if x := prealloc[n]; x != nil { if !types.Identical(typ, x.Type()) { panic("partial call type does not match order's assigned type") } - clos.Left().SetRight(x) + addr.SetRight(x) delete(prealloc, n) } - return walkexpr(clos, init) + return walkexpr(cfn, init) } // callpartMethod returns the *types.Field representing the method diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 3c17f7d87f48b..04256d5aeb62f 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -1005,13 +1005,11 @@ func mkinlcall(n ir.Node, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool, } if as.Rlist().Len() != 0 { - as = typecheck(as, ctxStmt) - ninit.Append(as) + ninit.Append(typecheck(as, ctxStmt)) } if vas != nil { - vas = typecheck(vas, ctxStmt) - ninit.Append(vas) + ninit.Append(typecheck(vas, ctxStmt)) } if !delayretvars { @@ -1019,8 +1017,7 @@ func mkinlcall(n ir.Node, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool, for _, n := range retvars { ninit.Append(ir.Nod(ir.ODCL, n, nil)) ras := ir.Nod(ir.OAS, n, nil) - ras = typecheck(ras, ctxStmt) - ninit.Append(ras) + ninit.Append(typecheck(ras, ctxStmt)) } } @@ -1235,8 +1232,7 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { } } - as = typecheck(as, ctxStmt) - init = append(init, as) + init = append(init, typecheck(as, ctxStmt)) } init = append(init, nodSym(ir.OGOTO, nil, subst.retlabel)) typecheckslice(init, ctxStmt) @@ -1310,10 +1306,9 @@ func devirtualizeCall(call ir.Node) { return } - x := ir.NodAt(call.Left().Pos(), ir.ODOTTYPE, call.Left().Left(), nil) - x.SetType(typ) - x = nodlSym(call.Left().Pos(), ir.OXDOT, x, call.Left().Sym()) - x = typecheck(x, ctxExpr|ctxCallee) + dt := ir.NodAt(call.Left().Pos(), ir.ODOTTYPE, call.Left().Left(), nil) + dt.SetType(typ) + x := typecheck(nodlSym(call.Left().Pos(), ir.OXDOT, dt, call.Left().Sym()), ctxExpr|ctxCallee) switch x.Op() { case ir.ODOTMETH: if base.Flag.LowerM != 0 { diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index c3645256a6da1..56acdf75286d7 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -60,6 +60,11 @@ func order(fn *ir.Func) { orderBlock(fn.PtrBody(), map[string][]*ir.Name{}) } +// append typechecks stmt and appends it to out. +func (o *Order) append(stmt ir.Node) { + o.out = append(o.out, typecheck(stmt, ctxStmt)) +} + // newTemp allocates a new temporary with the given type, // pushes it onto the temp stack, and returns it. // If clear is true, newTemp emits code to zero the temporary. @@ -82,9 +87,7 @@ func (o *Order) newTemp(t *types.Type, clear bool) *ir.Name { v = temp(t) } if clear { - a := ir.Nod(ir.OAS, v, nil) - a = typecheck(a, ctxStmt) - o.out = append(o.out, a) + o.append(ir.Nod(ir.OAS, v, nil)) } o.temp = append(o.temp, v) @@ -114,9 +117,7 @@ func (o *Order) copyExprClear(n ir.Node) *ir.Name { func (o *Order) copyExpr1(n ir.Node, clear bool) *ir.Name { t := n.Type() v := o.newTemp(t, clear) - a := ir.Nod(ir.OAS, v, n) - a = typecheck(a, ctxStmt) - o.out = append(o.out, a) + o.append(ir.Nod(ir.OAS, v, n)) return v } @@ -306,9 +307,7 @@ func (o *Order) cleanTempNoPop(mark ordermarker) []ir.Node { var out []ir.Node for i := len(o.temp) - 1; i >= int(mark); i-- { n := o.temp[i] - kill := ir.Nod(ir.OVARKILL, n, nil) - kill = typecheck(kill, ctxStmt) - out = append(out, kill) + out = append(out, typecheck(ir.Nod(ir.OVARKILL, n, nil), ctxStmt)) } return out } @@ -407,9 +406,7 @@ func (o *Order) edge() { // counter += 1 incr := ir.Nod(ir.OASOP, counter, nodintconst(1)) incr.SetSubOp(ir.OADD) - incr = typecheck(incr, ctxStmt) - - o.out = append(o.out, incr) + o.append(incr) } // orderBlock orders the block of statements in n into a new slice, @@ -570,8 +567,7 @@ func (o *Order) mapAssign(n ir.Node) { t := o.newTemp(m.Type(), false) n.List().SetIndex(i, t) a := ir.Nod(ir.OAS, m, t) - a = typecheck(a, ctxStmt) - post = append(post, a) + post = append(post, typecheck(a, ctxStmt)) } } @@ -918,27 +914,23 @@ func (o *Order) stmt(n ir.Node) { // the conversion happens in the OAS instead. if r.Colas() { dcl := ir.Nod(ir.ODCL, dst, nil) - dcl = typecheck(dcl, ctxStmt) - n2.PtrInit().Append(dcl) + n2.PtrInit().Append(typecheck(dcl, ctxStmt)) } tmp := o.newTemp(recv.Left().Type().Elem(), recv.Left().Type().Elem().HasPointers()) as := ir.Nod(ir.OAS, dst, tmp) - as = typecheck(as, ctxStmt) - n2.PtrInit().Append(as) + n2.PtrInit().Append(typecheck(as, ctxStmt)) dst = tmp } if !ir.IsBlank(ok) { if r.Colas() { dcl := ir.Nod(ir.ODCL, ok, nil) - dcl = typecheck(dcl, ctxStmt) - n2.PtrInit().Append(dcl) + n2.PtrInit().Append(typecheck(dcl, ctxStmt)) } tmp := o.newTemp(types.Types[types.TBOOL], false) as := ir.Nod(ir.OAS, ok, conv(tmp, ok.Type())) - as = typecheck(as, ctxStmt) - n2.PtrInit().Append(as) + n2.PtrInit().Append(typecheck(as, ctxStmt)) ok = tmp } @@ -1408,8 +1400,7 @@ func (o *Order) as2(n ir.Node) { as := ir.Nod(ir.OAS2, nil, nil) as.PtrList().Set(left) as.PtrRlist().Set(tmplist) - as = typecheck(as, ctxStmt) - o.stmt(as) + o.stmt(typecheck(as, ctxStmt)) } // okAs2 orders OAS2XXX with ok. @@ -1429,14 +1420,12 @@ func (o *Order) okAs2(n ir.Node) { if tmp1 != nil { r := ir.Nod(ir.OAS, n.List().First(), tmp1) - r = typecheck(r, ctxStmt) - o.mapAssign(r) + o.mapAssign(typecheck(r, ctxStmt)) n.List().SetFirst(tmp1) } if tmp2 != nil { r := ir.Nod(ir.OAS, n.List().Second(), conv(tmp2, n.List().Second().Type())) - r = typecheck(r, ctxStmt) - o.mapAssign(r) + o.mapAssign(typecheck(r, ctxStmt)) n.List().SetSecond(tmp2) } } diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go index 2589da7b5dca5..453f5e2198b6c 100644 --- a/src/cmd/compile/internal/gc/range.go +++ b/src/cmd/compile/internal/gc/range.go @@ -288,9 +288,8 @@ func walkrange(nrange ir.Node) ir.Node { // This runs *after* the condition check, so we know // advancing the pointer is safe and won't go past the // end of the allocation. - a = ir.Nod(ir.OAS, hp, addptr(hp, t.Elem().Width)) - a = typecheck(a, ctxStmt) - nfor.PtrList().Set1(a) + as := ir.Nod(ir.OAS, hp, addptr(hp, t.Elem().Width)) + nfor.PtrList().Set1(typecheck(as, ctxStmt)) case types.TMAP: // order.stmt allocated the iterator for us. @@ -312,15 +311,13 @@ func walkrange(nrange ir.Node) ir.Node { fn = substArgTypes(fn, th) nfor.SetRight(mkcall1(fn, nil, nil, nodAddr(hit))) - key := nodSym(ir.ODOT, hit, keysym) - key = ir.Nod(ir.ODEREF, key, nil) + key := ir.Nod(ir.ODEREF, nodSym(ir.ODOT, hit, keysym), nil) if v1 == nil { body = nil } else if v2 == nil { body = []ir.Node{ir.Nod(ir.OAS, v1, key)} } else { - elem := nodSym(ir.ODOT, hit, elemsym) - elem = ir.Nod(ir.ODEREF, elem, nil) + elem := ir.Nod(ir.ODEREF, nodSym(ir.ODOT, hit, elemsym), nil) a := ir.Nod(ir.OAS2, nil, nil) a.PtrList().Set2(v1, v2) a.PtrRlist().Set2(key, elem) @@ -570,19 +567,15 @@ func arrayClear(loop, v1, v2, a ir.Node) ir.Node { // hp = &a[0] hp := temp(types.Types[types.TUNSAFEPTR]) - tmp := ir.Nod(ir.OINDEX, a, nodintconst(0)) - tmp.SetBounded(true) - tmp = nodAddr(tmp) - tmp = convnop(tmp, types.Types[types.TUNSAFEPTR]) - n.PtrBody().Append(ir.Nod(ir.OAS, hp, tmp)) + ix := ir.Nod(ir.OINDEX, a, nodintconst(0)) + ix.SetBounded(true) + addr := convnop(nodAddr(ix), types.Types[types.TUNSAFEPTR]) + n.PtrBody().Append(ir.Nod(ir.OAS, hp, addr)) // hn = len(a) * sizeof(elem(a)) hn := temp(types.Types[types.TUINTPTR]) - - tmp = ir.Nod(ir.OLEN, a, nil) - tmp = ir.Nod(ir.OMUL, tmp, nodintconst(elemsize)) - tmp = conv(tmp, types.Types[types.TUINTPTR]) - n.PtrBody().Append(ir.Nod(ir.OAS, hn, tmp)) + mul := conv(ir.Nod(ir.OMUL, ir.Nod(ir.OLEN, a, nil), nodintconst(elemsize)), types.Types[types.TUINTPTR]) + n.PtrBody().Append(ir.Nod(ir.OAS, hn, mul)) var fn ir.Node if a.Type().Elem().HasPointers() { @@ -604,8 +597,7 @@ func arrayClear(loop, v1, v2, a ir.Node) ir.Node { n.SetLeft(typecheck(n.Left(), ctxExpr)) n.SetLeft(defaultlit(n.Left(), nil)) typecheckslice(n.Body().Slice(), ctxStmt) - n = walkstmt(n) - return n + return walkstmt(n) } // addptr returns (*T)(uintptr(p) + n). diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go index ec59f08638aff..0c2f2a87a208e 100644 --- a/src/cmd/compile/internal/gc/select.go +++ b/src/cmd/compile/internal/gc/select.go @@ -225,8 +225,7 @@ func walkselectcases(cases *ir.Nodes) []ir.Node { if ir.IsBlank(elem) { elem = nodnil() } - receivedp := nodAddr(n.List().Second()) - receivedp = typecheck(receivedp, ctxExpr) + receivedp := typecheck(nodAddr(n.List().Second()), ctxExpr) call = mkcall1(chanfn("selectnbrecv2", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, receivedp, ch) } @@ -247,9 +246,7 @@ func walkselectcases(cases *ir.Nodes) []ir.Node { // generate sel-struct base.Pos = sellineno selv := temp(types.NewArray(scasetype(), int64(ncas))) - r := ir.Nod(ir.OAS, selv, nil) - r = typecheck(r, ctxStmt) - init = append(init, r) + init = append(init, typecheck(ir.Nod(ir.OAS, selv, nil), ctxStmt)) // No initialization for order; runtime.selectgo is responsible for that. order := temp(types.NewArray(types.Types[types.TUINT16], 2*int64(ncas))) @@ -300,8 +297,7 @@ func walkselectcases(cases *ir.Nodes) []ir.Node { setField := func(f string, val ir.Node) { r := ir.Nod(ir.OAS, nodSym(ir.ODOT, ir.Nod(ir.OINDEX, selv, nodintconst(int64(i))), lookup(f)), val) - r = typecheck(r, ctxStmt) - init = append(init, r) + init = append(init, typecheck(r, ctxStmt)) } c = convnop(c, types.Types[types.TUNSAFEPTR]) @@ -314,7 +310,7 @@ func walkselectcases(cases *ir.Nodes) []ir.Node { // TODO(mdempsky): There should be a cleaner way to // handle this. if base.Flag.Race { - r = mkcall("selectsetpc", nil, nil, nodAddr(ir.Nod(ir.OINDEX, pcs, nodintconst(int64(i))))) + r := mkcall("selectsetpc", nil, nil, nodAddr(ir.Nod(ir.OINDEX, pcs, nodintconst(int64(i))))) init = append(init, r) } } @@ -326,12 +322,11 @@ func walkselectcases(cases *ir.Nodes) []ir.Node { base.Pos = sellineno chosen := temp(types.Types[types.TINT]) recvOK := temp(types.Types[types.TBOOL]) - r = ir.Nod(ir.OAS2, nil, nil) + r := ir.Nod(ir.OAS2, nil, nil) r.PtrList().Set2(chosen, recvOK) fn := syslook("selectgo") r.PtrRlist().Set1(mkcall1(fn, fn.Type().Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, nodintconst(int64(nsends)), nodintconst(int64(nrecvs)), nodbool(dflt == nil))) - r = typecheck(r, ctxStmt) - init = append(init, r) + init = append(init, typecheck(r, ctxStmt)) // selv and order are no longer alive after selectgo. init = append(init, ir.Nod(ir.OVARKILL, selv, nil)) @@ -349,8 +344,7 @@ func walkselectcases(cases *ir.Nodes) []ir.Node { if n := cas.Left(); n != nil && n.Op() == ir.OSELRECV2 { x := ir.Nod(ir.OAS, n.List().Second(), recvOK) - x = typecheck(x, ctxStmt) - r.PtrBody().Append(x) + r.PtrBody().Append(typecheck(x, ctxStmt)) } r.PtrBody().AppendNodes(cas.PtrBody()) diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 646c8dafce68d..14ff853ee5e62 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -391,10 +391,7 @@ func isSimpleName(n ir.Node) bool { } func litas(l ir.Node, r ir.Node, init *ir.Nodes) { - a := ir.Nod(ir.OAS, l, r) - a = typecheck(a, ctxStmt) - a = walkexpr(a, init) - init.Append(a) + appendWalkStmt(init, ir.Nod(ir.OAS, l, r)) } // initGenType is a bitmap indicating the types of generation that will occur for a static value. @@ -528,7 +525,7 @@ func fixedlit(ctxt initContext, kind initKind, n ir.Node, var_ ir.Node, init *ir a := ir.Nod(ir.OINDEX, var_, nodintconst(k)) k++ if isBlank { - a = ir.BlankNode + return ir.BlankNode, r } return a, r } @@ -691,20 +688,12 @@ func slicelit(ctxt initContext, n ir.Node, var_ ir.Node, init *ir.Nodes) { } else { a = ir.Nod(ir.ONEW, ir.TypeNode(t), nil) } - - a = ir.Nod(ir.OAS, vauto, a) - a = typecheck(a, ctxStmt) - a = walkexpr(a, init) - init.Append(a) + appendWalkStmt(init, ir.Nod(ir.OAS, vauto, a)) if vstat != nil { // copy static to heap (4) a = ir.Nod(ir.ODEREF, vauto, nil) - - a = ir.Nod(ir.OAS, a, vstat) - a = typecheck(a, ctxStmt) - a = walkexpr(a, init) - init.Append(a) + appendWalkStmt(init, ir.Nod(ir.OAS, a, vstat)) } // put dynamics into array (5) @@ -744,12 +733,10 @@ func slicelit(ctxt initContext, n ir.Node, var_ ir.Node, init *ir.Nodes) { // build list of vauto[c] = expr setlineno(value) - a = ir.Nod(ir.OAS, a, value) - - a = typecheck(a, ctxStmt) - a = orderStmtInPlace(a, map[string][]*ir.Name{}) - a = walkstmt(a) - init.Append(a) + as := typecheck(ir.Nod(ir.OAS, a, value), ctxStmt) + as = orderStmtInPlace(as, map[string][]*ir.Name{}) + as = walkstmt(as) + init.Append(as) } // make slice out of heap (6) @@ -825,9 +812,7 @@ func maplit(n ir.Node, m ir.Node, init *ir.Nodes) { loop.PtrBody().Set1(body) loop.PtrInit().Set1(zero) - loop = typecheck(loop, ctxStmt) - loop = walkstmt(loop) - init.Append(loop) + appendWalkStmt(init, loop) return } // For a small number of entries, just add them directly. @@ -842,30 +827,17 @@ func maplit(n ir.Node, m ir.Node, init *ir.Nodes) { index, elem := r.Left(), r.Right() setlineno(index) - a := ir.Nod(ir.OAS, tmpkey, index) - a = typecheck(a, ctxStmt) - a = walkstmt(a) - init.Append(a) + appendWalkStmt(init, ir.Nod(ir.OAS, tmpkey, index)) setlineno(elem) - a = ir.Nod(ir.OAS, tmpelem, elem) - a = typecheck(a, ctxStmt) - a = walkstmt(a) - init.Append(a) + appendWalkStmt(init, ir.Nod(ir.OAS, tmpelem, elem)) setlineno(tmpelem) - a = ir.Nod(ir.OAS, ir.Nod(ir.OINDEX, m, tmpkey), tmpelem) - a = typecheck(a, ctxStmt) - a = walkstmt(a) - init.Append(a) + appendWalkStmt(init, ir.Nod(ir.OAS, ir.Nod(ir.OINDEX, m, tmpkey), tmpelem)) } - a = ir.Nod(ir.OVARKILL, tmpkey, nil) - a = typecheck(a, ctxStmt) - init.Append(a) - a = ir.Nod(ir.OVARKILL, tmpelem, nil) - a = typecheck(a, ctxStmt) - init.Append(a) + appendWalkStmt(init, ir.Nod(ir.OVARKILL, tmpkey, nil)) + appendWalkStmt(init, ir.Nod(ir.OVARKILL, tmpelem, nil)) } func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { @@ -875,9 +847,7 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { base.Fatalf("anylit: not lit, op=%v node=%v", n.Op(), n) case ir.ONAME, ir.OMETHEXPR: - a := ir.Nod(ir.OAS, var_, n) - a = typecheck(a, ctxStmt) - init.Append(a) + appendWalkStmt(init, ir.Nod(ir.OAS, var_, n)) case ir.OPTRLIT: if !t.IsPtr() { @@ -887,20 +857,13 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { var r ir.Node if n.Right() != nil { // n.Right is stack temporary used as backing store. - init.Append(ir.Nod(ir.OAS, n.Right(), nil)) // zero backing store, just in case (#18410) + appendWalkStmt(init, ir.Nod(ir.OAS, n.Right(), nil)) // zero backing store, just in case (#18410) r = nodAddr(n.Right()) - r = typecheck(r, ctxExpr) } else { r = ir.Nod(ir.ONEW, ir.TypeNode(n.Left().Type()), nil) - r = typecheck(r, ctxExpr) r.SetEsc(n.Esc()) } - - r = walkexpr(r, init) - a := ir.Nod(ir.OAS, var_, r) - - a = typecheck(a, ctxStmt) - init.Append(a) + appendWalkStmt(init, ir.Nod(ir.OAS, var_, r)) var_ = ir.Nod(ir.ODEREF, var_, nil) var_ = typecheck(var_, ctxExpr|ctxAssign) @@ -922,11 +885,7 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { fixedlit(ctxt, initKindStatic, n, vstat, init) // copy static to var - a := ir.Nod(ir.OAS, var_, vstat) - - a = typecheck(a, ctxStmt) - a = walkexpr(a, init) - init.Append(a) + appendWalkStmt(init, ir.Nod(ir.OAS, var_, vstat)) // add expressions to automatic fixedlit(inInitFunction, initKindDynamic, n, var_, init) @@ -941,10 +900,7 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { } // initialization of an array or struct with unspecified components (missing fields or arrays) if isSimpleName(var_) || int64(n.List().Len()) < components { - a := ir.Nod(ir.OAS, var_, nil) - a = typecheck(a, ctxStmt) - a = walkexpr(a, init) - init.Append(a) + appendWalkStmt(init, ir.Nod(ir.OAS, var_, nil)) } fixedlit(inInitFunction, initKindLocalCode, n, var_, init) diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 2082544d086f5..ae100507f6bc1 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -153,12 +153,14 @@ func checkDotImports() { dotImportRefs = nil } -// nodAddr returns a node representing &n. -func nodAddr(n ir.Node) ir.Node { - return ir.Nod(ir.OADDR, n, nil) +// nodAddr returns a node representing &n at base.Pos. +func nodAddr(n ir.Node) *ir.AddrExpr { + return nodAddrAt(base.Pos, n) } -func nodAddrAt(pos src.XPos, n ir.Node) ir.Node { - return ir.NodAt(pos, ir.OADDR, n, nil) + +// nodAddrPos returns a node representing &n at position pos. +func nodAddrAt(pos src.XPos, n ir.Node) *ir.AddrExpr { + return ir.NewAddrExpr(pos, n) } // newname returns a new ONAME Node associated with symbol s. @@ -774,10 +776,7 @@ func safeexpr(n ir.Node, init *ir.Nodes) ir.Node { func copyexpr(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node { l := temp(t) - a := ir.Nod(ir.OAS, l, n) - a = typecheck(a, ctxStmt) - a = walkexpr(a, init) - init.Append(a) + appendWalkStmt(init, ir.Nod(ir.OAS, l, n)) return l } @@ -1195,11 +1194,12 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { call.PtrList().Set(paramNnames(tfn.Type())) call.SetIsDDD(tfn.Type().IsVariadic()) if method.Type.NumResults() > 0 { - n := ir.Nod(ir.ORETURN, nil, nil) - n.PtrList().Set1(call) - call = n + ret := ir.Nod(ir.ORETURN, nil, nil) + ret.PtrList().Set1(call) + fn.PtrBody().Append(ret) + } else { + fn.PtrBody().Append(call) } - fn.PtrBody().Append(call) } if false && base.Flag.LowerR != 0 { diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go index e241721588819..aa4574d334341 100644 --- a/src/cmd/compile/internal/gc/swt.go +++ b/src/cmd/compile/internal/gc/swt.go @@ -654,9 +654,7 @@ func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp ir.Node) { dot := ir.NodAt(pos, ir.ODOTTYPE, s.facename, nil) dot.SetType(typ) // iface.(type) as.PtrRlist().Set1(dot) - as = typecheck(as, ctxStmt) - as = walkexpr(as, &body) - body.Append(as) + appendWalkStmt(&body, as) // if ok { goto label } nif := ir.NodAt(pos, ir.OIF, nil, nil) diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 49e4289f141ea..be868afcd8b6b 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -2163,8 +2163,7 @@ func typecheckargs(n ir.Node) { Curfn = nil } - as = typecheck(as, ctxStmt) - n.PtrInit().Append(as) + n.PtrInit().Append(typecheck(as, ctxStmt)) } func checksliceindex(l ir.Node, r ir.Node, tp *types.Type) bool { @@ -2397,7 +2396,7 @@ func typecheckMethodExpr(n ir.Node) (res ir.Node) { me.SetType(methodfunc(m.Type, n.Left().Type())) me.SetOffset(0) me.SetClass(ir.PFUNC) - me.(*ir.MethodExpr).Method = m + ir.Node(me).(*ir.MethodExpr).Method = m // Issue 25065. Make sure that we emit the symbol for a local method. if base.Ctxt.Flag_dynlink && !inimport && (t.Sym() == nil || t.Sym().Pkg == types.LocalPkg) { @@ -3419,8 +3418,7 @@ func stringtoruneslit(n ir.Node) ir.Node { nn := ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(n.Type())) nn.PtrList().Set(l) - nn = typecheck(nn, ctxExpr) - return nn + return typecheck(nn, ctxExpr) } var mapqueue []*ir.MapType diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index c9dbf917020eb..790e51f1e6b22 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -1306,8 +1306,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap na := ir.Nod(ir.OAS, nodSym(ir.ODOT, h, bsym), b) nif.PtrBody().Append(na) - - init.Append(walkstmt(typecheck(nif, ctxStmt))) + appendWalkStmt(init, nif) } } @@ -1325,7 +1324,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // h.hash0 = fastrand() rand := mkcall("fastrand", types.Types[types.TUINT32], init) hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap - appendWalk(init, ir.Nod(ir.OAS, nodSym(ir.ODOT, h, hashsym), rand)) + appendWalkStmt(init, ir.Nod(ir.OAS, nodSym(ir.ODOT, h, hashsym), rand)) return convnop(h, t) } // Call runtime.makehmap to allocate an @@ -1398,8 +1397,8 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { t = types.NewArray(t.Elem(), i) // [r]T var_ := temp(t) - appendWalk(init, ir.Nod(ir.OAS, var_, nil)) // zero temp - r := ir.Nod(ir.OSLICE, var_, nil) // arr[:l] + appendWalkStmt(init, ir.Nod(ir.OAS, var_, nil)) // zero temp + r := ir.Nod(ir.OSLICE, var_, nil) // arr[:l] r.SetSliceBounds(nil, l, nil) // The conv is necessary in case n.Type is named. return walkexpr(typecheck(conv(r, n.Type()), ctxExpr), init) @@ -1547,7 +1546,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { as := ir.Nod(ir.OAS, ir.Nod(ir.ODEREF, p, nil), ir.Nod(ir.ODEREF, convnop(ir.Nod(ir.OSPTR, s, nil), t.PtrTo()), nil)) - appendWalk(init, as) + appendWalkStmt(init, as) } // Slice the [n]byte to a []byte. @@ -2807,7 +2806,7 @@ func appendslice(n ir.Node, init *ir.Nodes) ir.Node { // memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T)) ix := ir.Nod(ir.OINDEX, s, ir.Nod(ir.OLEN, l1, nil)) ix.SetBounded(true) - addr := ir.Nod(ir.OADDR, ix, nil) + addr := nodAddr(ix) sptr := ir.Nod(ir.OSPTR, l2, nil) @@ -2953,7 +2952,7 @@ func extendslice(n ir.Node, init *ir.Nodes) ir.Node { // hp := &s[len(l1)] ix := ir.Nod(ir.OINDEX, s, ir.Nod(ir.OLEN, l1, nil)) ix.SetBounded(true) - hp := convnop(ir.Nod(ir.OADDR, ix, nil), types.Types[types.TUNSAFEPTR]) + hp := convnop(nodAddr(ix), types.Types[types.TUNSAFEPTR]) // hn := l2 * sizeof(elem(s)) hn := conv(ir.Nod(ir.OMUL, l2, nodintconst(elemtype.Width)), types.Types[types.TUINTPTR]) @@ -4071,3 +4070,19 @@ func walkCheckPtrArithmetic(n ir.Node, init *ir.Nodes) ir.Node { func checkPtr(fn *ir.Func, level int) bool { return base.Debug.Checkptr >= level && fn.Pragma&ir.NoCheckPtr == 0 } + +// appendWalkStmt typechecks and walks stmt and then appends it to init. +func appendWalkStmt(init *ir.Nodes, stmt ir.Node) { + op := stmt.Op() + n := typecheck(stmt, ctxStmt) + if op == ir.OAS || op == ir.OAS2 { + // If the assignment has side effects, walkexpr will append them + // directly to init for us, while walkstmt will wrap it in an OBLOCK. + // We need to append them directly. + // TODO(rsc): Clean this up. + n = walkexpr(n, init) + } else { + n = walkstmt(n) + } + init.Append(n) +} From 578fbbe3aa5cada6e32b686d71a5832d6ca846dc Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 7 Dec 2020 14:58:26 -0500 Subject: [PATCH 165/474] [dev.regabi] cmd/compile: rewrite some generic ir.Nod calls An automated rewrite is going to remove the bulk of the calls to ir.Nod and friends. This CL takes care of the ones that don't have fixed opcodes and so aren't amenable to automatic rewriting. Passes buildall w/ toolstash -cmp. Replay of CL 275886, lost to the bad-merge history rewrite. Change-Id: I5bf8d1d182f847f4ab44b7e278b752913e30e4c8 Reviewed-on: https://go-review.googlesource.com/c/go/+/277956 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/iimport.go | 50 +++++++++++++++++------- src/cmd/compile/internal/gc/noder.go | 27 +++++++++---- src/cmd/compile/internal/gc/order.go | 7 +--- src/cmd/compile/internal/gc/select.go | 6 ++- src/cmd/compile/internal/gc/subr.go | 5 +-- src/cmd/compile/internal/gc/typecheck.go | 20 ++++------ src/cmd/compile/internal/gc/walk.go | 42 ++++++++------------ 7 files changed, 89 insertions(+), 68 deletions(-) diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 0e2af562d0333..1096d7988e37f 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -894,10 +894,10 @@ func (r *importReader) node() ir.Node { // unreachable - mapped to cases below by exporter case ir.OINDEX: - return ir.NodAt(r.pos(), op, r.expr(), r.expr()) + return ir.NodAt(r.pos(), ir.OINDEX, r.expr(), r.expr()) case ir.OSLICE, ir.OSLICE3: - n := ir.NodAt(r.pos(), op, r.expr(), nil) + n := ir.NewSliceExpr(r.pos(), op, r.expr()) low, high := r.exprsOrNil() var max ir.Node if n.Op().IsSlice3() { @@ -940,15 +940,25 @@ func (r *importReader) node() ir.Node { return n // unary expressions - case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ODEREF, ir.ONOT, ir.ORECV: - return ir.NodAt(r.pos(), op, r.expr(), nil) + case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.ORECV: + return ir.NewUnaryExpr(r.pos(), op, r.expr()) + case ir.OADDR: return nodAddrAt(r.pos(), r.expr()) + case ir.ODEREF: + return ir.NewStarExpr(r.pos(), r.expr()) + // binary expressions - case ir.OADD, ir.OAND, ir.OANDAND, ir.OANDNOT, ir.ODIV, ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT, - ir.OLSH, ir.OMOD, ir.OMUL, ir.ONE, ir.OOR, ir.OOROR, ir.ORSH, ir.OSEND, ir.OSUB, ir.OXOR: - return ir.NodAt(r.pos(), op, r.expr(), r.expr()) + case ir.OADD, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT, + ir.OLSH, ir.OMOD, ir.OMUL, ir.ONE, ir.OOR, ir.ORSH, ir.OSUB, ir.OXOR: + return ir.NewBinaryExpr(r.pos(), op, r.expr(), r.expr()) + + case ir.OANDAND, ir.OOROR: + return ir.NewLogicalExpr(r.pos(), op, r.expr(), r.expr()) + + case ir.OSEND: + return ir.NewSendStmt(r.pos(), r.expr(), r.expr()) case ir.OADDSTR: pos := r.pos() @@ -1003,7 +1013,7 @@ func (r *importReader) node() ir.Node { // unreachable - generated by compiler for trampolin routines (not exported) case ir.OGO, ir.ODEFER: - return ir.NodAt(r.pos(), op, r.expr(), nil) + return ir.NewGoDeferStmt(r.pos(), op, r.expr()) case ir.OIF: n := ir.NodAt(r.pos(), ir.OIF, nil, nil) @@ -1029,8 +1039,16 @@ func (r *importReader) node() ir.Node { n.PtrBody().Set(r.stmtList()) return n - case ir.OSELECT, ir.OSWITCH: - n := ir.NodAt(r.pos(), op, nil, nil) + case ir.OSELECT: + n := ir.NodAt(r.pos(), ir.OSELECT, nil, nil) + n.PtrInit().Set(r.stmtList()) + left, _ := r.exprsOrNil() + n.SetLeft(left) + n.PtrList().Set(r.caseList(n)) + return n + + case ir.OSWITCH: + n := ir.NodAt(r.pos(), ir.OSWITCH, nil, nil) n.PtrInit().Set(r.stmtList()) left, _ := r.exprsOrNil() n.SetLeft(left) @@ -1047,12 +1065,16 @@ func (r *importReader) node() ir.Node { // case OEMPTY: // unreachable - not emitted by exporter - case ir.OBREAK, ir.OCONTINUE, ir.OGOTO, ir.OLABEL: - n := ir.NodAt(r.pos(), op, nil, nil) + case ir.OBREAK, ir.OCONTINUE, ir.OGOTO: + var sym *types.Sym + pos := r.pos() if label := r.string(); label != "" { - n.SetSym(lookup(label)) + sym = lookup(label) } - return n + return ir.NewBranchStmt(pos, op, sym) + + case ir.OLABEL: + return ir.NewLabelStmt(r.pos(), lookup(r.string())) case ir.OEND: return nil diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 55628352bdb91..4c8e56731bee2 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -699,7 +699,7 @@ func (p *noder) expr(expr syntax.Expr) ir.Node { if expr.Full { op = ir.OSLICE3 } - n := p.nod(expr, op, p.expr(expr.X), nil) + n := ir.NewSliceExpr(p.pos(expr), op, p.expr(expr.X)) var index [3]ir.Node for i, x := range &expr.Index { if x != nil { @@ -716,9 +716,22 @@ func (p *noder) expr(expr syntax.Expr) ir.Node { } x := p.expr(expr.X) if expr.Y == nil { - return p.nod(expr, p.unOp(expr.Op), x, nil) + pos, op := p.pos(expr), p.unOp(expr.Op) + switch op { + case ir.OADDR: + return nodAddrAt(pos, x) + case ir.ODEREF: + return ir.NewStarExpr(pos, x) + } + return ir.NewUnaryExpr(pos, op, x) + } + + pos, op, y := p.pos(expr), p.binOp(expr.Op), p.expr(expr.Y) + switch op { + case ir.OANDAND, ir.OOROR: + return ir.NewLogicalExpr(pos, op, x, y) } - return p.nod(expr, p.binOp(expr.Op), x, p.expr(expr.Y)) + return ir.NewBinaryExpr(pos, op, x, y) case *syntax.CallExpr: n := p.nod(expr, ir.OCALL, p.expr(expr.Fun), nil) n.PtrList().Set(p.exprs(expr.ArgList)) @@ -1043,11 +1056,11 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node { default: panic("unhandled BranchStmt") } - n := p.nod(stmt, op, nil, nil) + var sym *types.Sym if stmt.Label != nil { - n.SetSym(p.name(stmt.Label)) + sym = p.name(stmt.Label) } - return n + return ir.NewBranchStmt(p.pos(stmt), op, sym) case *syntax.CallStmt: var op ir.Op switch stmt.Tok { @@ -1058,7 +1071,7 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node { default: panic("unhandled CallStmt") } - return p.nod(stmt, op, p.expr(stmt.Call), nil) + return ir.NewGoDeferStmt(p.pos(stmt), op, p.expr(stmt.Call)) case *syntax.ReturnStmt: var results []ir.Node if stmt.Results != nil { diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 56acdf75286d7..fe64738856107 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -619,11 +619,8 @@ func (o *Order) stmt(n ir.Node) { l2.SetIndexMapLValue(false) } l2 = o.copyExpr(l2) - r := ir.NodAt(n.Pos(), n.SubOp(), l2, n.Right()) - r = typecheck(r, ctxExpr) - r = o.expr(r, nil) - n = ir.NodAt(n.Pos(), ir.OAS, l1, r) - n = typecheck(n, ctxStmt) + r := o.expr(typecheck(ir.NewBinaryExpr(n.Pos(), n.SubOp(), l2, n.Right()), ctxExpr), nil) + n = typecheck(ir.NodAt(n.Pos(), ir.OAS, l1, r), ctxStmt) } o.mapAssign(n) diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go index 0c2f2a87a208e..dd08b77b927f2 100644 --- a/src/cmd/compile/internal/gc/select.go +++ b/src/cmd/compile/internal/gc/select.go @@ -70,7 +70,8 @@ func typecheckselect(sel ir.Node) { case ir.ORECV: // convert <-c into OSELRECV(_, <-c) - n = ir.NodAt(n.Pos(), ir.OSELRECV, ir.BlankNode, n) + n = ir.NodAt(n.Pos(), ir.OAS, ir.BlankNode, n) + n.SetOp(ir.OSELRECV) n.SetTypecheck(1) ncase.SetLeft(n) @@ -164,7 +165,8 @@ func walkselectcases(cases *ir.Nodes) []ir.Node { // Lower x, _ = <-c to x = <-c. if n.Op() == ir.OSELRECV2 && ir.IsBlank(n.List().Second()) { - n = ir.NodAt(n.Pos(), ir.OSELRECV, n.List().First(), n.Rlist().First()) + n = ir.NodAt(n.Pos(), ir.OAS, n.List().First(), n.Rlist().First()) + n.SetOp(ir.OSELRECV) n.SetTypecheck(1) cas.SetLeft(n) } diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index ae100507f6bc1..37e49d0544988 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -547,8 +547,7 @@ func assignconvfn(n ir.Node, t *types.Type, context func() string) ir.Node { op = ir.OCONV } - r := ir.Nod(op, n, nil) - r.SetType(t) + r := ir.NewConvExpr(base.Pos, op, t, n) r.SetTypecheck(1) r.SetImplicit(true) return r @@ -1169,7 +1168,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { fn.PtrBody().Append(n) } - dot := adddot(nodSym(ir.OXDOT, nthis, method.Sym)) + dot := adddot(ir.NewSelectorExpr(base.Pos, ir.OXDOT, nthis, method.Sym)) // generate call // It's not possible to use a tail call when dynamic linking on ppc64le. The diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index be868afcd8b6b..6dc9c5820d130 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -766,8 +766,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { dowidth(l.Type()) if r.Type().IsInterface() == l.Type().IsInterface() || l.Type().Width >= 1<<16 { - l = ir.Nod(aop, l, nil) - l.SetType(r.Type()) + l = ir.NewConvExpr(base.Pos, aop, r.Type(), l) l.SetTypecheck(1) n.SetLeft(l) } @@ -788,8 +787,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { dowidth(r.Type()) if r.Type().IsInterface() == l.Type().IsInterface() || r.Type().Width >= 1<<16 { - r = ir.Nod(aop, r, nil) - r.SetType(l.Type()) + r = ir.NewConvExpr(base.Pos, aop, l.Type(), r) r.SetTypecheck(1) n.SetRight(r) } @@ -1361,12 +1359,12 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { switch l.SubOp() { default: base.Fatalf("unknown builtin %v", l) - return n case ir.OAPPEND, ir.ODELETE, ir.OMAKE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER: n.SetOp(l.SubOp()) n.SetLeft(nil) n.SetTypecheck(0) // re-typechecking new op is OK, not a loop + return typecheck(n, top) case ir.OCAP, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.OPANIC, ir.OREAL: typecheckargs(n) @@ -1377,9 +1375,8 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetType(nil) return n } - old := n - n = ir.NodAt(n.Pos(), l.SubOp(), arg, nil) - n = initExpr(old.Init().Slice(), n) // typecheckargs can add to old.Init + u := ir.NewUnaryExpr(n.Pos(), l.SubOp(), arg) + return typecheck(initExpr(n.Init().Slice(), u), top) // typecheckargs can add to old.Init case ir.OCOMPLEX, ir.OCOPY: typecheckargs(n) @@ -1388,11 +1385,10 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetType(nil) return n } - old := n - n = ir.NodAt(n.Pos(), l.SubOp(), arg1, arg2) - n = initExpr(old.Init().Slice(), n) // typecheckargs can add to old.Init + b := ir.NewBinaryExpr(n.Pos(), l.SubOp(), arg1, arg2) + return typecheck(initExpr(n.Init().Slice(), b), top) // typecheckargs can add to old.Init } - return typecheck(n, top) + panic("unreachable") } n.SetLeft(defaultlit(n.Left(), nil)) diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 790e51f1e6b22..ad5103f8514c1 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -666,7 +666,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { if n.Op() == ir.OASOP { // Rewrite x op= y into x = x op y. n = ir.Nod(ir.OAS, n.Left(), - typecheck(ir.Nod(n.SubOp(), n.Left(), n.Right()), ctxExpr)) + typecheck(ir.NewBinaryExpr(base.Pos, n.SubOp(), n.Left(), n.Right()), ctxExpr)) } if oaslit(n, init) { @@ -3232,16 +3232,16 @@ func walkcompare(n ir.Node, init *ir.Nodes) ir.Node { if l.Type().IsEmptyInterface() { tab.SetType(types.NewPtr(types.Types[types.TUINT8])) tab.SetTypecheck(1) - eqtype = ir.Nod(eq, tab, rtyp) + eqtype = ir.NewBinaryExpr(base.Pos, eq, tab, rtyp) } else { - nonnil := ir.Nod(brcom(eq), nodnil(), tab) - match := ir.Nod(eq, itabType(tab), rtyp) - eqtype = ir.Nod(andor, nonnil, match) + nonnil := ir.NewBinaryExpr(base.Pos, brcom(eq), nodnil(), tab) + match := ir.NewBinaryExpr(base.Pos, eq, itabType(tab), rtyp) + eqtype = ir.NewLogicalExpr(base.Pos, andor, nonnil, match) } // Check for data equal. - eqdata := ir.Nod(eq, ifaceData(n.Pos(), l, r.Type()), r) + eqdata := ir.NewBinaryExpr(base.Pos, eq, ifaceData(n.Pos(), l, r.Type()), r) // Put it all together. - expr := ir.Nod(andor, eqtype, eqdata) + expr := ir.NewLogicalExpr(base.Pos, andor, eqtype, eqdata) n = finishcompare(n, expr, init) return n } @@ -3354,11 +3354,11 @@ func walkcompare(n ir.Node, init *ir.Nodes) ir.Node { } var expr ir.Node compare := func(el, er ir.Node) { - a := ir.Nod(n.Op(), el, er) + a := ir.NewBinaryExpr(base.Pos, n.Op(), el, er) if expr == nil { expr = a } else { - expr = ir.Nod(andor, expr, a) + expr = ir.NewLogicalExpr(base.Pos, andor, expr, a) } } cmpl = safeexpr(cmpl, init) @@ -3519,13 +3519,13 @@ func walkcompareString(n ir.Node, init *ir.Nodes) ir.Node { if len(s) > 0 { ncs = safeexpr(ncs, init) } - r := ir.Nod(cmp, ir.Nod(ir.OLEN, ncs, nil), nodintconst(int64(len(s)))) + r := ir.Node(ir.NewBinaryExpr(base.Pos, cmp, ir.Nod(ir.OLEN, ncs, nil), nodintconst(int64(len(s))))) remains := len(s) for i := 0; remains > 0; { if remains == 1 || !canCombineLoads { cb := nodintconst(int64(s[i])) ncb := ir.Nod(ir.OINDEX, ncs, nodintconst(int64(i))) - r = ir.Nod(and, r, ir.Nod(cmp, ncb, cb)) + r = ir.NewLogicalExpr(base.Pos, and, r, ir.NewBinaryExpr(base.Pos, cmp, ncb, cb)) remains-- i++ continue @@ -3556,7 +3556,7 @@ func walkcompareString(n ir.Node, init *ir.Nodes) ir.Node { } csubstrPart := nodintconst(csubstr) // Compare "step" bytes as once - r = ir.Nod(and, r, ir.Nod(cmp, csubstrPart, ncsubstr)) + r = ir.NewLogicalExpr(base.Pos, and, r, ir.NewBinaryExpr(base.Pos, cmp, csubstrPart, ncsubstr)) remains -= step i += step } @@ -3583,7 +3583,7 @@ func walkcompareString(n ir.Node, init *ir.Nodes) ir.Node { } else { // sys_cmpstring(s1, s2) :: 0 r = mkcall("cmpstring", types.Types[types.TINT], init, conv(n.Left(), types.Types[types.TSTRING]), conv(n.Right(), types.Types[types.TSTRING])) - r = ir.Nod(n.Op(), r, nodintconst(0)) + r = ir.NewBinaryExpr(base.Pos, n.Op(), r, nodintconst(0)) } return finishcompare(n, r, init) @@ -3909,17 +3909,13 @@ func wrapCall(n ir.Node, init *ir.Nodes) ir.Node { if origArg == nil { continue } - arg := ir.Nod(origArg.Op(), args[i], nil) - arg.SetType(origArg.Type()) - args[i] = arg + args[i] = ir.NewConvExpr(base.Pos, origArg.Op(), origArg.Type(), args[i]) } - call := ir.Nod(n.Op(), nil, nil) + call := ir.NewCallExpr(base.Pos, n.Op(), n.Left(), args) if !isBuiltinCall { call.SetOp(ir.OCALL) - call.SetLeft(n.Left()) call.SetIsDDD(n.IsDDD()) } - call.PtrList().Set(args) fn.PtrBody().Set1(call) funcbody() @@ -3928,12 +3924,8 @@ func wrapCall(n ir.Node, init *ir.Nodes) ir.Node { typecheckslice(fn.Body().Slice(), ctxStmt) xtop = append(xtop, fn) - call = ir.Nod(ir.OCALL, nil, nil) - call.SetLeft(fn.Nname) - call.PtrList().Set(n.List().Slice()) - call = typecheck(call, ctxStmt) - call = walkexpr(call, init) - return call + call = ir.NewCallExpr(base.Pos, ir.OCALL, fn.Nname, n.List().Slice()) + return walkexpr(typecheck(call, ctxStmt), init) } // substArgTypes substitutes the given list of types for From a997543292df533f5951cd8fda39692a44077151 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 7 Dec 2020 16:07:38 -0500 Subject: [PATCH 166/474] [dev.regabi] cmd/compile: fix potential closure waste in Order I haven't measured this, but it's the only use of EditChildren where we aren't careful to allocate a closure once and use it for the whole recursion. This one is allocating a closure at every level of the recursion, and it was an oversight that it wasn't cleaned up in the original CL. Passes buildall w/ toolstash -cmp. Change-Id: I5e3f1795c6f64c5867a19c077f797643aa1066a3 Reviewed-on: https://go-review.googlesource.com/c/go/+/277914 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/order.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index fe64738856107..e0c0cabcde3ff 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -47,6 +47,7 @@ type Order struct { out []ir.Node // list of generated statements temp []*ir.Name // stack of temporary variables free map[string][]*ir.Name // free list of unused temporaries, by type.LongString(). + edit func(ir.Node) ir.Node // cached closure of o.exprNoLHS } // Order rewrites fn.Nbody to apply the ordering constraints @@ -1072,7 +1073,10 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node { switch n.Op() { default: - ir.EditChildren(n, o.exprNoLHS) + if o.edit == nil { + o.edit = o.exprNoLHS // create closure once + } + ir.EditChildren(n, o.edit) // Addition of strings turns into a function call. // Allocate a temporary to hold the strings. From 4dfc7333f4ebe67e0aa7f429ce73c9d58a2fc309 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 10 Dec 2020 20:55:10 -0500 Subject: [PATCH 167/474] [dev.regabi] cmd/compile: update ir/fmt for concrete types An automated rewrite will add concrete type assertions after a test of n.Op(), when n can be safely type-asserted (meaning, n is not reassigned a different type, n is not reassigned and then used outside the scope of the type assertion, and so on). This sequence of CLs handles the code that the automated rewrite does not: adding specific types to function arguments, adjusting code not to call n.Left() etc when n may have multiple representations, and so on. This CL handles package fmt. There are various type assertions but also some rewriting to lean more heavily on reflection. Passes buildall w/ toolstash -cmp. Change-Id: I503467468b42ace11bff2ba014b03cfa345e6d03 Reviewed-on: https://go-review.googlesource.com/c/go/+/277915 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/fmt.go | 285 +++++++++++++++++++--------- src/cmd/compile/internal/ir/name.go | 22 +-- test/escape_param.go | 4 +- 3 files changed, 208 insertions(+), 103 deletions(-) diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index 4bea6e2ae0938..3cda9c8c38d4b 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -9,6 +9,7 @@ import ( "fmt" "go/constant" "io" + "math" "os" "path/filepath" "reflect" @@ -141,7 +142,7 @@ func FmtNode(n Node, s fmt.State, verb rune) { } if n == nil { - fmt.Fprint(s, "") + fmt.Fprint(s, "") return } @@ -330,12 +331,14 @@ func stmtFmt(n Node, s fmt.State) { switch n.Op() { case ODCL: + n := n.(*Decl) fmt.Fprintf(s, "var %v %v", n.Left().Sym(), n.Left().Type()) // Don't export "v = " initializing statements, hope they're always // preceded by the DCL which will be re-parsed and typechecked to reproduce // the "v = " again. case OAS: + n := n.(*AssignStmt) if n.Colas() && !complexinit { fmt.Fprintf(s, "%v := %v", n.Left(), n.Right()) } else { @@ -343,6 +346,7 @@ func stmtFmt(n Node, s fmt.State) { } case OASOP: + n := n.(*AssignOpStmt) if n.Implicit() { if n.SubOp() == OADD { fmt.Fprintf(s, "%v++", n.Left()) @@ -355,6 +359,7 @@ func stmtFmt(n Node, s fmt.State) { fmt.Fprintf(s, "%v %v= %v", n.Left(), n.SubOp(), n.Right()) case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV: + n := n.(*AssignListStmt) if n.Colas() && !complexinit { fmt.Fprintf(s, "%.v := %.v", n.List(), n.Rlist()) } else { @@ -362,26 +367,33 @@ func stmtFmt(n Node, s fmt.State) { } case OBLOCK: + n := n.(*BlockStmt) if n.List().Len() != 0 { fmt.Fprintf(s, "%v", n.List()) } case ORETURN: + n := n.(*ReturnStmt) fmt.Fprintf(s, "return %.v", n.List()) case ORETJMP: + n := n.(*BranchStmt) fmt.Fprintf(s, "retjmp %v", n.Sym()) case OINLMARK: + n := n.(*InlineMarkStmt) fmt.Fprintf(s, "inlmark %d", n.Offset()) case OGO: + n := n.(*GoDeferStmt) fmt.Fprintf(s, "go %v", n.Left()) case ODEFER: + n := n.(*GoDeferStmt) fmt.Fprintf(s, "defer %v", n.Left()) case OIF: + n := n.(*IfStmt) if simpleinit { fmt.Fprintf(s, "if %v; %v { %v }", n.Init().First(), n.Left(), n.Body()) } else { @@ -392,6 +404,7 @@ func stmtFmt(n Node, s fmt.State) { } case OFOR, OFORUNTIL: + n := n.(*ForStmt) opname := "for" if n.Op() == OFORUNTIL { opname = "foruntil" @@ -425,6 +438,7 @@ func stmtFmt(n Node, s fmt.State) { fmt.Fprintf(s, " { %v }", n.Body()) case ORANGE: + n := n.(*RangeStmt) if !exportFormat { fmt.Fprint(s, "for loop") break @@ -437,23 +451,31 @@ func stmtFmt(n Node, s fmt.State) { fmt.Fprintf(s, "for %.v = range %v { %v }", n.List(), n.Right(), n.Body()) - case OSELECT, OSWITCH: + case OSELECT: + n := n.(*SelectStmt) if !exportFormat { fmt.Fprintf(s, "%v statement", n.Op()) break } + fmt.Fprintf(s, "select { %v }", n.List()) - fmt.Fprintf(s, "%v", n.Op()) + case OSWITCH: + n := n.(*SwitchStmt) + if !exportFormat { + fmt.Fprintf(s, "%v statement", n.Op()) + break + } + fmt.Fprintf(s, "switch") if simpleinit { fmt.Fprintf(s, " %v;", n.Init().First()) } if n.Left() != nil { fmt.Fprintf(s, " %v ", n.Left()) } - fmt.Fprintf(s, " { %v }", n.List()) case OCASE: + n := n.(*CaseStmt) if n.List().Len() != 0 { fmt.Fprintf(s, "case %.v", n.List()) } else { @@ -462,6 +484,7 @@ func stmtFmt(n Node, s fmt.State) { fmt.Fprintf(s, ": %v", n.Body()) case OBREAK, OCONTINUE, OGOTO, OFALL: + n := n.(*BranchStmt) if n.Sym() != nil { fmt.Fprintf(s, "%v %v", n.Op(), n.Sym()) } else { @@ -469,6 +492,7 @@ func stmtFmt(n Node, s fmt.State) { } case OLABEL: + n := n.(*LabelStmt) fmt.Fprintf(s, "%v: ", n.Sym()) } @@ -488,7 +512,7 @@ func exprFmt(n Node, s fmt.State, prec int) { for { if n == nil { - fmt.Fprint(s, "") + fmt.Fprint(s, "") return } @@ -499,10 +523,23 @@ func exprFmt(n Node, s fmt.State, prec int) { } // Skip implicit operations introduced during typechecking. - switch n.Op() { - case OADDR, ODEREF, OCONV, OCONVNOP, OCONVIFACE: - if n.Implicit() { - n = n.Left() + switch nn := n; nn.Op() { + case OADDR: + nn := nn.(*AddrExpr) + if nn.Implicit() { + n = nn.Left() + continue + } + case ODEREF: + nn := nn.(*StarExpr) + if nn.Implicit() { + n = nn.Left() + continue + } + case OCONV, OCONVNOP, OCONVIFACE: + nn := nn.(*ConvExpr) + if nn.Implicit() { + n = nn.Left() continue } } @@ -522,6 +559,7 @@ func exprFmt(n Node, s fmt.State, prec int) { switch n.Op() { case OPAREN: + n := n.(*ParenExpr) fmt.Fprintf(s, "(%v)", n.Left()) case ONIL: @@ -570,6 +608,7 @@ func exprFmt(n Node, s fmt.State, prec int) { } case ODCLFUNC: + n := n.(*Func) if sym := n.Sym(); sym != nil { fmt.Fprint(s, sym) return @@ -577,6 +616,7 @@ func exprFmt(n Node, s fmt.State, prec int) { fmt.Fprintf(s, "") case ONAME: + n := n.(*Name) // Special case: name used as local variable in export. // _ becomes ~b%d internally; print as _ for export if !exportFormat && n.Sym() != nil && n.Sym().Name[0] == '~' && n.Sym().Name[1] == 'b' { @@ -641,17 +681,15 @@ func exprFmt(n Node, s fmt.State, prec int) { fmt.Fprint(s, "") case OCLOSURE: + n := n.(*ClosureExpr) if !exportFormat { fmt.Fprint(s, "func literal") return } - if n.Body().Len() != 0 { - fmt.Fprintf(s, "%v { %v }", n.Type(), n.Body()) - return - } fmt.Fprintf(s, "%v { %v }", n.Type(), n.Func().Body()) case OCOMPLIT: + n := n.(*CompLitExpr) if !exportFormat { if n.Implicit() { fmt.Fprintf(s, "... argument") @@ -668,9 +706,11 @@ func exprFmt(n Node, s fmt.State, prec int) { fmt.Fprintf(s, "(%v{ %.v })", n.Right(), n.List()) case OPTRLIT: + n := n.(*AddrExpr) fmt.Fprintf(s, "&%v", n.Left()) case OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT: + n := n.(*CompLitExpr) if !exportFormat { fmt.Fprintf(s, "%v{%s}", n.Type(), ellipsisIf(n.List().Len() != 0)) return @@ -678,6 +718,7 @@ func exprFmt(n Node, s fmt.State, prec int) { fmt.Fprintf(s, "(%v{ %.v })", n.Type(), n.List()) case OKEY: + n := n.(*KeyExpr) if n.Left() != nil && n.Right() != nil { fmt.Fprintf(s, "%v:%v", n.Left(), n.Right()) return @@ -694,9 +735,11 @@ func exprFmt(n Node, s fmt.State, prec int) { fmt.Fprint(s, ":") case OSTRUCTKEY: + n := n.(*StructKeyExpr) fmt.Fprintf(s, "%v:%v", n.Sym(), n.Left()) case OCALLPART: + n := n.(*CallPartExpr) exprFmt(n.Left(), s, nprec) if n.Sym() == nil { fmt.Fprint(s, ".") @@ -705,6 +748,7 @@ func exprFmt(n Node, s fmt.State, prec int) { fmt.Fprintf(s, ".%s", types.SymMethodName(n.Sym())) case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH: + n := n.(*SelectorExpr) exprFmt(n.Left(), s, nprec) if n.Sym() == nil { fmt.Fprint(s, ".") @@ -713,6 +757,7 @@ func exprFmt(n Node, s fmt.State, prec int) { fmt.Fprintf(s, ".%s", types.SymMethodName(n.Sym())) case ODOTTYPE, ODOTTYPE2: + n := n.(*TypeAssertExpr) exprFmt(n.Left(), s, nprec) if n.Right() != nil { fmt.Fprintf(s, ".(%v)", n.Right()) @@ -721,10 +766,12 @@ func exprFmt(n Node, s fmt.State, prec int) { fmt.Fprintf(s, ".(%v)", n.Type()) case OINDEX, OINDEXMAP: + n := n.(*IndexExpr) exprFmt(n.Left(), s, nprec) fmt.Fprintf(s, "[%v]", n.Right()) case OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR: + n := n.(*SliceExpr) exprFmt(n.Left(), s, nprec) fmt.Fprint(s, "[") low, high, max := n.SliceBounds() @@ -744,17 +791,15 @@ func exprFmt(n Node, s fmt.State, prec int) { fmt.Fprint(s, "]") case OSLICEHEADER: + n := n.(*SliceHeaderExpr) if n.List().Len() != 2 { base.Fatalf("bad OSLICEHEADER list length %d", n.List().Len()) } fmt.Fprintf(s, "sliceheader{%v,%v,%v}", n.Left(), n.List().First(), n.List().Second()) case OCOMPLEX, OCOPY: - if n.Left() != nil { - fmt.Fprintf(s, "%v(%v, %v)", n.Op(), n.Left(), n.Right()) - } else { - fmt.Fprintf(s, "%v(%.v)", n.Op(), n.List()) - } + n := n.(*BinaryExpr) + fmt.Fprintf(s, "%v(%v, %v)", n.Op(), n.Left(), n.Right()) case OCONV, OCONVIFACE, @@ -764,37 +809,34 @@ func exprFmt(n Node, s fmt.State, prec int) { OSTR2BYTES, OSTR2RUNES, ORUNESTR: + n := n.(*ConvExpr) if n.Type() == nil || n.Type().Sym() == nil { fmt.Fprintf(s, "(%v)", n.Type()) } else { fmt.Fprintf(s, "%v", n.Type()) } - if n.Left() != nil { - fmt.Fprintf(s, "(%v)", n.Left()) - } else { - fmt.Fprintf(s, "(%.v)", n.List()) - } + fmt.Fprintf(s, "(%v)", n.Left()) case OREAL, OIMAG, - OAPPEND, OCAP, OCLOSE, - ODELETE, OLEN, - OMAKE, ONEW, OPANIC, - ORECOVER, OALIGNOF, OOFFSETOF, - OSIZEOF, + OSIZEOF: + n := n.(*UnaryExpr) + fmt.Fprintf(s, "%v(%v)", n.Op(), n.Left()) + + case OAPPEND, + ODELETE, + OMAKE, + ORECOVER, OPRINT, OPRINTN: - if n.Left() != nil { - fmt.Fprintf(s, "%v(%v)", n.Op(), n.Left()) - return - } + n := n.(*CallExpr) if n.IsDDD() { fmt.Fprintf(s, "%v(%.v...)", n.Op(), n.List()) return @@ -802,6 +844,7 @@ func exprFmt(n Node, s fmt.State, prec int) { fmt.Fprintf(s, "%v(%.v)", n.Op(), n.List()) case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, OGETG: + n := n.(*CallExpr) exprFmt(n.Left(), s, nprec) if n.IsDDD() { fmt.Fprintf(s, "(%.v...)", n.List()) @@ -810,10 +853,7 @@ func exprFmt(n Node, s fmt.State, prec int) { fmt.Fprintf(s, "(%.v)", n.List()) case OMAKEMAP, OMAKECHAN, OMAKESLICE: - if n.List().Len() != 0 { // pre-typecheck - fmt.Fprintf(s, "make(%v, %.v)", n.Type(), n.List()) - return - } + n := n.(*MakeExpr) if n.Right() != nil { fmt.Fprintf(s, "make(%v, %v, %v)", n.Type(), n.Left(), n.Right()) return @@ -825,20 +865,34 @@ func exprFmt(n Node, s fmt.State, prec int) { fmt.Fprintf(s, "make(%v)", n.Type()) case OMAKESLICECOPY: + n := n.(*MakeExpr) fmt.Fprintf(s, "makeslicecopy(%v, %v, %v)", n.Type(), n.Left(), n.Right()) - case OPLUS, ONEG, OADDR, OBITNOT, ODEREF, ONOT, ORECV: + case OPLUS, ONEG, OBITNOT, ONOT, ORECV: // Unary + n := n.(*UnaryExpr) + fmt.Fprintf(s, "%v", n.Op()) + if n.Left() != nil && n.Left().Op() == n.Op() { + fmt.Fprint(s, " ") + } + exprFmt(n.Left(), s, nprec+1) + + case OADDR: + n := n.(*AddrExpr) fmt.Fprintf(s, "%v", n.Op()) if n.Left() != nil && n.Left().Op() == n.Op() { fmt.Fprint(s, " ") } exprFmt(n.Left(), s, nprec+1) + case ODEREF: + n := n.(*StarExpr) + fmt.Fprintf(s, "%v", n.Op()) + exprFmt(n.Left(), s, nprec+1) + // Binary case OADD, OAND, - OANDAND, OANDNOT, ODIV, OEQ, @@ -851,16 +905,29 @@ func exprFmt(n Node, s fmt.State, prec int) { OMUL, ONE, OOR, - OOROR, ORSH, - OSEND, OSUB, OXOR: + n := n.(*BinaryExpr) + exprFmt(n.Left(), s, nprec) + fmt.Fprintf(s, " %v ", n.Op()) + exprFmt(n.Right(), s, nprec+1) + + case OANDAND, + OOROR: + n := n.(*LogicalExpr) exprFmt(n.Left(), s, nprec) fmt.Fprintf(s, " %v ", n.Op()) exprFmt(n.Right(), s, nprec+1) + case OSEND: + n := n.(*SendStmt) + exprFmt(n.Left(), s, nprec) + fmt.Fprintf(s, " <- ") + exprFmt(n.Right(), s, nprec+1) + case OADDSTR: + n := n.(*AddStringExpr) for i, n1 := range n.List().Slice() { if i != 0 { fmt.Fprint(s, " + ") @@ -951,27 +1018,12 @@ func dumpNodeHeader(w io.Writer, n Node) { if base.Debug.DumpPtrs != 0 { fmt.Fprintf(w, " p(%p)", n) } - if n.Name() != nil && n.Name().Vargen != 0 { - fmt.Fprintf(w, " g(%d)", n.Name().Vargen) - } if base.Debug.DumpPtrs != 0 && n.Name() != nil && n.Name().Defn != nil { // Useful to see where Defn is set and what node it points to fmt.Fprintf(w, " defn(%p)", n.Name().Defn) } - if n.Offset() != types.BADWIDTH { - fmt.Fprintf(w, " x(%d)", n.Offset()) - } - - if n.Class() != 0 { - fmt.Fprintf(w, " class(%v)", n.Class()) - } - - if n.Colas() { - fmt.Fprintf(w, " colas(%v)", n.Colas()) - } - if EscFmt != nil { if esc := EscFmt(n); esc != "" { fmt.Fprintf(w, " %s", esc) @@ -982,47 +1034,62 @@ func dumpNodeHeader(w io.Writer, n Node) { fmt.Fprintf(w, " tc(%d)", n.Typecheck()) } - if n.IsDDD() { - fmt.Fprintf(w, " isddd(%v)", n.IsDDD()) - } - - if n.Implicit() { - fmt.Fprintf(w, " implicit(%v)", n.Implicit()) - } - - if n.Op() == ONAME { - if n.Name().Addrtaken() { - fmt.Fprint(w, " addrtaken") - } - if n.Name().Assigned() { - fmt.Fprint(w, " assigned") - } - if n.Name().IsClosureVar() { - fmt.Fprint(w, " closurevar") - } - if n.Name().Captured() { - fmt.Fprint(w, " captured") + // Print Node-specific fields of basic type in header line. + v := reflect.ValueOf(n).Elem() + t := v.Type() + nf := t.NumField() + for i := 0; i < nf; i++ { + tf := t.Field(i) + if tf.PkgPath != "" { + // skip unexported field - Interface will fail + continue } - if n.Name().IsOutputParamHeapAddr() { - fmt.Fprint(w, " outputparamheapaddr") + k := tf.Type.Kind() + if reflect.Bool <= k && k <= reflect.Complex128 { + name := strings.TrimSuffix(tf.Name, "_") + vf := v.Field(i) + vfi := vf.Interface() + if name == "Offset" && vfi == types.BADWIDTH || name != "Offset" && isZero(vf) { + continue + } + if vfi == true { + fmt.Fprintf(w, " %s", name) + } else { + fmt.Fprintf(w, " %s:%+v", name, vf.Interface()) + } } } - if n.Bounded() { - fmt.Fprint(w, " bounded") - } - if n.NonNil() { - fmt.Fprint(w, " nonnil") - } - - if n.HasCall() { - fmt.Fprint(w, " hascall") - } - if n.Name() != nil && n.Name().Used() { - fmt.Fprint(w, " used") + // Print Node-specific booleans by looking for methods. + // Different v, t from above - want *Struct not Struct, for methods. + v = reflect.ValueOf(n) + t = v.Type() + nm := t.NumMethod() + for i := 0; i < nm; i++ { + tm := t.Method(i) + if tm.PkgPath != "" { + // skip unexported method - call will fail + continue + } + m := v.Method(i) + mt := m.Type() + if mt.NumIn() == 0 && mt.NumOut() == 1 && mt.Out(0).Kind() == reflect.Bool { + // TODO(rsc): Remove the func/defer/recover wrapping, + // which is guarding against panics in miniExpr, + // once we get down to the simpler state in which + // nodes have no getter methods that aren't allowed to be called. + func() { + defer func() { recover() }() + if m.Call(nil)[0].Bool() { + name := strings.TrimSuffix(tm.Name, "_") + fmt.Fprintf(w, " %s", name) + } + }() + } } if n.Op() == OCLOSURE { + n := n.(*ClosureExpr) if fn := n.Func(); fn != nil && fn.Nname.Sym() != nil { fmt.Fprintf(w, " fnName(%+v)", fn.Nname.Sym()) } @@ -1087,6 +1154,7 @@ func dumpNode(w io.Writer, n Node, depth int) { return case OASOP: + n := n.(*AssignOpStmt) fmt.Fprintf(w, "%+v-%+v", n.Op(), n.SubOp()) dumpNodeHeader(w, n) @@ -1120,7 +1188,7 @@ func dumpNode(w io.Writer, n Node, depth int) { if fn.Body().Len() > 0 { indent(w, depth) fmt.Fprintf(w, "%+v-body", n.Op()) - dumpNodes(w, n.Body(), depth+1) + dumpNodes(w, fn.Body(), depth+1) } return } @@ -1186,3 +1254,40 @@ func dumpNodes(w io.Writer, list Nodes, depth int) { dumpNode(w, n, depth) } } + +// reflect.IsZero is not available in Go 1.4 (added in Go 1.13), so we use this copy instead. +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return math.Float64bits(v.Float()) == 0 + case reflect.Complex64, reflect.Complex128: + c := v.Complex() + return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if !isZero(v.Index(i)) { + return false + } + } + return true + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + return v.IsNil() + case reflect.String: + return v.Len() == 0 + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if !isZero(v.Field(i)) { + return false + } + } + return true + default: + return false + } +} diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 7f1a47e13cbe9..96cb0ee0546cb 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -34,13 +34,13 @@ func (*Ident) CanBeNtype() {} // Name holds Node fields used only by named nodes (ONAME, OTYPE, some OLITERAL). type Name struct { miniExpr - subOp Op // uint8 - class Class // uint8 + BuiltinOp Op // uint8 + Class_ Class // uint8 flags bitset16 pragma PragmaFlag // int16 sym *types.Sym fn *Func - offset int64 + Offset_ int64 val constant.Value orig Node embedFiles *[]string // list of embedded files, for ONAME var @@ -180,16 +180,16 @@ func newNameAt(pos src.XPos, op Op, sym *types.Sym) *Name { func (n *Name) Name() *Name { return n } func (n *Name) Sym() *types.Sym { return n.sym } func (n *Name) SetSym(x *types.Sym) { n.sym = x } -func (n *Name) SubOp() Op { return n.subOp } -func (n *Name) SetSubOp(x Op) { n.subOp = x } -func (n *Name) Class() Class { return n.class } -func (n *Name) SetClass(x Class) { n.class = x } +func (n *Name) SubOp() Op { return n.BuiltinOp } +func (n *Name) SetSubOp(x Op) { n.BuiltinOp = x } +func (n *Name) Class() Class { return n.Class_ } +func (n *Name) SetClass(x Class) { n.Class_ = x } func (n *Name) Func() *Func { return n.fn } func (n *Name) SetFunc(x *Func) { n.fn = x } -func (n *Name) Offset() int64 { return n.offset } -func (n *Name) SetOffset(x int64) { n.offset = x } -func (n *Name) Iota() int64 { return n.offset } -func (n *Name) SetIota(x int64) { n.offset = x } +func (n *Name) Offset() int64 { return n.Offset_ } +func (n *Name) SetOffset(x int64) { n.Offset_ = x } +func (n *Name) Iota() int64 { return n.Offset_ } +func (n *Name) SetIota(x int64) { n.Offset_ = x } func (*Name) CanBeNtype() {} func (*Name) CanBeAnSSASym() {} diff --git a/test/escape_param.go b/test/escape_param.go index 993e914e1d0d4..dc93f689cf9fb 100644 --- a/test/escape_param.go +++ b/test/escape_param.go @@ -212,7 +212,7 @@ func caller7() { // **in -> heap func param8(i **int) { // ERROR "i does not escape$" - sink = **i // ERROR "\* \(\*i\) escapes to heap" + sink = **i // ERROR "\*\(\*i\) escapes to heap" } func caller8() { @@ -402,7 +402,7 @@ func caller13h() { var p *int v := &Val{&p} // ERROR "&Val{...} does not escape$" v.param13(&i) - sink = **v.p // ERROR "\* \(\*v\.p\) escapes to heap" + sink = **v.p // ERROR "\*\(\*v\.p\) escapes to heap" } type Node struct { From 114af2a04408d0480bb3e9253bf15aae6b7ed23e Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Fri, 11 Dec 2020 21:29:53 -0500 Subject: [PATCH 168/474] [dev.regabi] cmd/compile: change Nodes to be a slice The Nodes type originally served two purposes: (1) It provided a representation optimized for empty slices, allocating only a single word in that case instead of three, at the cost of a non-empty slice being four words instead of three. This was particularly important with the old Node representation, in which most Nodes were full of unused fields. (2) It provided a few useful helper methods beyond what can be done with slices. The downside of Nodes is that the API is a bit overwhelming, with many ways to spell ordinary slice operations. For example, reassigning the first node in the list can be done with: ns.Slice()[0] = n ns.SetIndex(0, n) ns.SetFirst(n) *ns.Addr(0) = n And APIs must decide whether to use Nodes or []ir.Node and then conversions must be inserted when crossing the boundary. Now that Node structs are specialized to opcode and most Nodes lists are actually non-empty, it makes sense to simplify Nodes to make it actually a slice type, so that ordinary slice operations can be used, and assignments can automatically convert between Nodes and []ir.Node. This CL changes the representation to be a slice and adds a new Take method, which returns the old slice and clears the receiver. In a future CL, the Nodes method set will simplify down to: Copy Take Append Prepend Format with the current methods being rewritten: ns.Len() -> len(ns) ns.Slice() -> ns ns.First() -> ns[0] ns.Second() -> ns[1] ns.Index(i) -> ns[i] ns.Addr(i) -> &ns[i] ns.SetIndex(i, n) -> ns[i] = n ns.SetFirst(n) -> ns[0] = n ns.SetSecond(n) -> ns[1] = n ns.Set1(n) -> ns = []Node{n} ns.Set2(n, n2) -> ns = []Node{n, n2} ns.Set3(n, n2, n3) -> ns = []Node{n, n2, n3} AsNodes(slice) -> Nodes(slice) ns.AppendNodes(pns) -> ns.Append(pns.Take()...) ns.MoveNodes(pns) -> ns = pns.Take() and then all those other methods will be deleted. Simplifying the API down to just those five methods will also make it more reasonable to introduce more specialized slices like Exprs and Stmts at some point in the future. But again this CL just changes the representation to a slice, introduces Take, and leaves the rest alone. Passes buildall w/ toolstash -cmp. Change-Id: I309ab8335c69bb582d811c92c17f938dd6e0c4fe Reviewed-on: https://go-review.googlesource.com/c/go/+/277916 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/copy.go | 6 -- src/cmd/compile/internal/ir/dump.go | 11 +-- src/cmd/compile/internal/ir/mini.go | 8 +- src/cmd/compile/internal/ir/node.go | 91 ++++++++-------------- src/cmd/compile/internal/ir/sizeof_test.go | 4 +- 5 files changed, 39 insertions(+), 81 deletions(-) diff --git a/src/cmd/compile/internal/ir/copy.go b/src/cmd/compile/internal/ir/copy.go index 7f5d3135133af..0ab355f76749c 100644 --- a/src/cmd/compile/internal/ir/copy.go +++ b/src/cmd/compile/internal/ir/copy.go @@ -64,12 +64,6 @@ func Copy(n Node) Node { return c } -func copyList(x Nodes) Nodes { - c := make([]Node, x.Len()) - copy(c, x.Slice()) - return AsNodes(c) -} - // DeepCopy returns a “deep” copy of n, with its entire structure copied // (except for shared nodes like ONAME, ONONAME, OLITERAL, and OTYPE). // If pos.IsKnown(), it sets the source position of newly allocated Nodes to pos. diff --git a/src/cmd/compile/internal/ir/dump.go b/src/cmd/compile/internal/ir/dump.go index bff3a408550a5..9d6042f78a878 100644 --- a/src/cmd/compile/internal/ir/dump.go +++ b/src/cmd/compile/internal/ir/dump.go @@ -140,15 +140,8 @@ func (p *dumper) dump(x reflect.Value, depth int) { return } - // special cases - switch v := x.Interface().(type) { - case Nodes: - // unpack Nodes since reflect cannot look inside - // due to the unexported field in its struct - x = reflect.ValueOf(v.Slice()) - - case src.XPos: - p.printf("%s", base.FmtPos(v)) + if pos, ok := x.Interface().(src.XPos); ok { + p.printf("%s", base.FmtPos(pos)) return } diff --git a/src/cmd/compile/internal/ir/mini.go b/src/cmd/compile/internal/ir/mini.go index bf221f75edc09..d1d2e266edc69 100644 --- a/src/cmd/compile/internal/ir/mini.go +++ b/src/cmd/compile/internal/ir/mini.go @@ -114,22 +114,22 @@ func (n *miniNode) SetRight(x Node) { } } func (n *miniNode) SetInit(x Nodes) { - if x != (Nodes{}) { + if x != nil { panic(n.no("SetInit")) } } func (n *miniNode) SetBody(x Nodes) { - if x != (Nodes{}) { + if x != nil { panic(n.no("SetBody")) } } func (n *miniNode) SetList(x Nodes) { - if x != (Nodes{}) { + if x != nil { panic(n.no("SetList")) } } func (n *miniNode) SetRlist(x Nodes) { - if x != (Nodes{}) { + if x != nil { panic(n.no("SetRlist")) } } diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index dc86b6c683ebc..ccf3671085baf 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -359,7 +359,7 @@ const ( // Nodes is a pointer to a slice of *Node. // For fields that are not used in most nodes, this is used instead of // a slice to save space. -type Nodes struct{ slice *[]Node } +type Nodes []Node // immutableEmptyNodes is an immutable, empty Nodes list. // The methods that would modify it panic instead. @@ -367,43 +367,37 @@ var immutableEmptyNodes = Nodes{} // asNodes returns a slice of *Node as a Nodes value. func AsNodes(s []Node) Nodes { - return Nodes{&s} + return s } // Slice returns the entries in Nodes as a slice. // Changes to the slice entries (as in s[i] = n) will be reflected in // the Nodes. func (n Nodes) Slice() []Node { - if n.slice == nil { - return nil - } - return *n.slice + return n } // Len returns the number of entries in Nodes. func (n Nodes) Len() int { - if n.slice == nil { - return 0 - } - return len(*n.slice) + return len(n) } // Index returns the i'th element of Nodes. // It panics if n does not have at least i+1 elements. func (n Nodes) Index(i int) Node { - return (*n.slice)[i] + return n[i] } // First returns the first element of Nodes (same as n.Index(0)). // It panics if n has no elements. func (n Nodes) First() Node { - return (*n.slice)[0] + return n[0] } // Second returns the second element of Nodes (same as n.Index(1)). // It panics if n has fewer than two elements. func (n Nodes) Second() Node { - return (*n.slice)[1] + return n[1] } func (n *Nodes) mutate() { @@ -422,64 +416,56 @@ func (n *Nodes) Set(s []Node) { } n.mutate() } - if len(s) == 0 { - n.slice = nil - } else { - // Copy s and take address of t rather than s to avoid - // allocation in the case where len(s) == 0 (which is - // over 3x more common, dynamically, for make.bash). - t := s - n.slice = &t - } + *n = s } // Set1 sets n to a slice containing a single node. func (n *Nodes) Set1(n1 Node) { n.mutate() - n.slice = &[]Node{n1} + *n = []Node{n1} } // Set2 sets n to a slice containing two nodes. func (n *Nodes) Set2(n1, n2 Node) { n.mutate() - n.slice = &[]Node{n1, n2} + *n = []Node{n1, n2} } // Set3 sets n to a slice containing three nodes. func (n *Nodes) Set3(n1, n2, n3 Node) { n.mutate() - n.slice = &[]Node{n1, n2, n3} + *n = []Node{n1, n2, n3} } // MoveNodes sets n to the contents of n2, then clears n2. func (n *Nodes) MoveNodes(n2 *Nodes) { n.mutate() - n.slice = n2.slice - n2.slice = nil + *n = *n2 + *n2 = nil } // SetIndex sets the i'th element of Nodes to node. // It panics if n does not have at least i+1 elements. func (n Nodes) SetIndex(i int, node Node) { - (*n.slice)[i] = node + n[i] = node } // SetFirst sets the first element of Nodes to node. // It panics if n does not have at least one elements. func (n Nodes) SetFirst(node Node) { - (*n.slice)[0] = node + n[0] = node } // SetSecond sets the second element of Nodes to node. // It panics if n does not have at least two elements. func (n Nodes) SetSecond(node Node) { - (*n.slice)[1] = node + n[1] = node } // Addr returns the address of the i'th element of Nodes. // It panics if n does not have at least i+1 elements. func (n Nodes) Addr(i int) *Node { - return &(*n.slice)[i] + return &n[i] } // Append appends entries to Nodes. @@ -488,13 +474,7 @@ func (n *Nodes) Append(a ...Node) { return } n.mutate() - if n.slice == nil { - s := make([]Node, len(a)) - copy(s, a) - n.slice = &s - return - } - *n.slice = append(*n.slice, a...) + *n = append(*n, a...) } // Prepend prepends entries to Nodes. @@ -504,38 +484,29 @@ func (n *Nodes) Prepend(a ...Node) { return } n.mutate() - if n.slice == nil { - n.slice = &a - } else { - *n.slice = append(a, *n.slice...) - } + *n = append(a, *n...) +} + +// Take clears n, returning its former contents. +func (n *Nodes) Take() []Node { + ret := *n + *n = nil + return ret } // AppendNodes appends the contents of *n2 to n, then clears n2. func (n *Nodes) AppendNodes(n2 *Nodes) { n.mutate() - switch { - case n2.slice == nil: - case n.slice == nil: - n.slice = n2.slice - default: - *n.slice = append(*n.slice, *n2.slice...) - } - n2.slice = nil + *n = append(*n, n2.Take()...) } // Copy returns a copy of the content of the slice. func (n Nodes) Copy() Nodes { - var c Nodes - if n.slice == nil { - return c - } - c.slice = new([]Node) - if *n.slice == nil { - return c + if n == nil { + return nil } - *c.slice = make([]Node, n.Len()) - copy(*c.slice, n.Slice()) + c := make(Nodes, n.Len()) + copy(c, n) return c } diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go index 181f1462fe97c..2a618f85ed4ea 100644 --- a/src/cmd/compile/internal/ir/sizeof_test.go +++ b/src/cmd/compile/internal/ir/sizeof_test.go @@ -20,8 +20,8 @@ func TestSizeof(t *testing.T) { _32bit uintptr // size on 32bit platforms _64bit uintptr // size on 64bit platforms }{ - {Func{}, 168, 288}, - {Name{}, 124, 216}, + {Func{}, 200, 352}, + {Name{}, 132, 232}, } for _, tt := range tests { From 7fde0d2b507b989cb9a23d6dbae9acaa13328c53 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Fri, 11 Dec 2020 12:55:14 -0500 Subject: [PATCH 169/474] [dev.regabi] cmd/compile: remove use of Initorder, Offset Node fields for initorder The initorder pass is already making heavy use of maps, and it is concerned with relatively few nodes (only the assignments in package-level variable declarations). The tracking of init order for these nodes can be done with another map instead of storing the bits directly in the Node representations. This will let us drop Offset_ from AssignStmt and AssignListStmt and drop Initorder from all nodes. Passes buildall w/ toolstash -cmp. Change-Id: I151c64e84670292c2004da4e8e3d0660a88e3df3 Reviewed-on: https://go-review.googlesource.com/c/go/+/277917 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/initorder.go | 43 ++++++++++----------- src/cmd/compile/internal/ir/mini.go | 12 +----- src/cmd/compile/internal/ir/node.go | 2 - src/cmd/compile/internal/ir/stmt.go | 48 ++++++++++-------------- 4 files changed, 44 insertions(+), 61 deletions(-) diff --git a/src/cmd/compile/internal/gc/initorder.go b/src/cmd/compile/internal/gc/initorder.go index 7f1f3cba92604..d39e8189d7a49 100644 --- a/src/cmd/compile/internal/gc/initorder.go +++ b/src/cmd/compile/internal/gc/initorder.go @@ -11,7 +11,6 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" - "cmd/compile/internal/types" ) // Package initialization @@ -69,6 +68,8 @@ type InitOrder struct { // ready is the queue of Pending initialization assignments // that are ready for initialization. ready declOrder + + order map[ir.Node]int } // initOrder computes initialization order for a list l of @@ -82,6 +83,7 @@ func initOrder(l []ir.Node) []ir.Node { } o := InitOrder{ blocking: make(map[ir.Node][]ir.Node), + order: make(map[ir.Node]int), } // Process all package-level assignment in declaration order. @@ -102,7 +104,7 @@ func initOrder(l []ir.Node) []ir.Node { for _, n := range l { switch n.Op() { case ir.OAS, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV: - if n.Initorder() != InitDone { + if o.order[n] != orderDone { // If there have already been errors // printed, those errors may have // confused us and there might not be @@ -110,7 +112,7 @@ func initOrder(l []ir.Node) []ir.Node { // first. base.ExitIfErrors() - findInitLoopAndExit(firstLHS(n), new([]*ir.Name)) + o.findInitLoopAndExit(firstLHS(n), new([]*ir.Name)) base.Fatalf("initialization unfinished, but failed to identify loop") } } @@ -126,12 +128,10 @@ func initOrder(l []ir.Node) []ir.Node { } func (o *InitOrder) processAssign(n ir.Node) { - if n.Initorder() != InitNotStarted || n.Offset() != types.BADWIDTH { - base.Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Offset()) + if _, ok := o.order[n]; ok { + base.Fatalf("unexpected state: %v, %v", n, o.order[n]) } - - n.SetInitorder(InitPending) - n.SetOffset(0) + o.order[n] = 0 // Compute number of variable dependencies and build the // inverse dependency ("blocking") graph. @@ -139,38 +139,38 @@ func (o *InitOrder) processAssign(n ir.Node) { defn := dep.Defn // Skip dependencies on functions (PFUNC) and // variables already initialized (InitDone). - if dep.Class() != ir.PEXTERN || defn.Initorder() == InitDone { + if dep.Class() != ir.PEXTERN || o.order[defn] == orderDone { continue } - n.SetOffset(n.Offset() + 1) + o.order[n]++ o.blocking[defn] = append(o.blocking[defn], n) } - if n.Offset() == 0 { + if o.order[n] == 0 { heap.Push(&o.ready, n) } } +const orderDone = -1000 + // flushReady repeatedly applies initialize to the earliest (in // declaration order) assignment ready for initialization and updates // the inverse dependency ("blocking") graph. func (o *InitOrder) flushReady(initialize func(ir.Node)) { for o.ready.Len() != 0 { n := heap.Pop(&o.ready).(ir.Node) - if n.Initorder() != InitPending || n.Offset() != 0 { - base.Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Offset()) + if order, ok := o.order[n]; !ok || order != 0 { + base.Fatalf("unexpected state: %v, %v, %v", n, ok, order) } initialize(n) - n.SetInitorder(InitDone) - n.SetOffset(types.BADWIDTH) + o.order[n] = orderDone blocked := o.blocking[n] delete(o.blocking, n) for _, m := range blocked { - m.SetOffset(m.Offset() - 1) - if m.Offset() == 0 { + if o.order[m]--; o.order[m] == 0 { heap.Push(&o.ready, m) } } @@ -183,7 +183,7 @@ func (o *InitOrder) flushReady(initialize func(ir.Node)) { // path points to a slice used for tracking the sequence of // variables/functions visited. Using a pointer to a slice allows the // slice capacity to grow and limit reallocations. -func findInitLoopAndExit(n *ir.Name, path *[]*ir.Name) { +func (o *InitOrder) findInitLoopAndExit(n *ir.Name, path *[]*ir.Name) { // We implement a simple DFS loop-finding algorithm. This // could be faster, but initialization cycles are rare. @@ -203,11 +203,11 @@ func findInitLoopAndExit(n *ir.Name, path *[]*ir.Name) { *path = append(*path, n) for _, ref := range refers { // Short-circuit variables that were initialized. - if ref.Class() == ir.PEXTERN && ref.Defn.Initorder() == InitDone { + if ref.Class() == ir.PEXTERN && o.order[ref.Defn] == orderDone { continue } - findInitLoopAndExit(ref, path) + o.findInitLoopAndExit(ref, path) } *path = (*path)[:len(*path)-1] } @@ -282,9 +282,10 @@ func (d *initDeps) visit(n ir.Node) bool { return false case ir.ONAME: + n := n.(*ir.Name) switch n.Class() { case ir.PEXTERN, ir.PFUNC: - d.foundDep(n.(*ir.Name)) + d.foundDep(n) } case ir.OCLOSURE: diff --git a/src/cmd/compile/internal/ir/mini.go b/src/cmd/compile/internal/ir/mini.go index d1d2e266edc69..7a945c369029e 100644 --- a/src/cmd/compile/internal/ir/mini.go +++ b/src/cmd/compile/internal/ir/mini.go @@ -61,14 +61,12 @@ func (n *miniNode) SetEsc(x uint16) { n.esc = x } const ( miniWalkdefShift = 0 miniTypecheckShift = 2 - miniInitorderShift = 4 - miniDiag = 1 << 6 - miniHasCall = 1 << 7 // for miniStmt + miniDiag = 1 << 4 + miniHasCall = 1 << 5 // for miniStmt ) func (n *miniNode) Walkdef() uint8 { return n.bits.get2(miniWalkdefShift) } func (n *miniNode) Typecheck() uint8 { return n.bits.get2(miniTypecheckShift) } -func (n *miniNode) Initorder() uint8 { return n.bits.get2(miniInitorderShift) } func (n *miniNode) SetWalkdef(x uint8) { if x > 3 { panic(fmt.Sprintf("cannot SetWalkdef %d", x)) @@ -81,12 +79,6 @@ func (n *miniNode) SetTypecheck(x uint8) { } n.bits.set2(miniTypecheckShift, x) } -func (n *miniNode) SetInitorder(x uint8) { - if x > 3 { - panic(fmt.Sprintf("cannot SetInitorder %d", x)) - } - n.bits.set2(miniInitorderShift, x) -} func (n *miniNode) Diag() bool { return n.bits&miniDiag != 0 } func (n *miniNode) SetDiag(x bool) { n.bits.set(miniDiag, x) } diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index ccf3671085baf..0e73731070f72 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -102,8 +102,6 @@ type Node interface { SetBounded(x bool) Typecheck() uint8 SetTypecheck(x uint8) - Initorder() uint8 - SetInitorder(x uint8) NonNil() bool MarkNonNil() HasCall() bool diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index f41c50c92b192..b7d0c1adc4503 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -63,10 +63,9 @@ func (n *miniStmt) SetHasCall(b bool) { n.bits.set(miniHasCall, b) } // If Def is true, the assignment is a :=. type AssignListStmt struct { miniStmt - Lhs Nodes - Def bool - Rhs Nodes - Offset_ int64 // for initorder + Lhs Nodes + Def bool + Rhs Nodes } func NewAssignListStmt(pos src.XPos, op Op, lhs, rhs []Node) *AssignListStmt { @@ -75,20 +74,17 @@ func NewAssignListStmt(pos src.XPos, op Op, lhs, rhs []Node) *AssignListStmt { n.SetOp(op) n.Lhs.Set(lhs) n.Rhs.Set(rhs) - n.Offset_ = types.BADWIDTH return n } -func (n *AssignListStmt) List() Nodes { return n.Lhs } -func (n *AssignListStmt) PtrList() *Nodes { return &n.Lhs } -func (n *AssignListStmt) SetList(x Nodes) { n.Lhs = x } -func (n *AssignListStmt) Rlist() Nodes { return n.Rhs } -func (n *AssignListStmt) PtrRlist() *Nodes { return &n.Rhs } -func (n *AssignListStmt) SetRlist(x Nodes) { n.Rhs = x } -func (n *AssignListStmt) Colas() bool { return n.Def } -func (n *AssignListStmt) SetColas(x bool) { n.Def = x } -func (n *AssignListStmt) Offset() int64 { return n.Offset_ } -func (n *AssignListStmt) SetOffset(x int64) { n.Offset_ = x } +func (n *AssignListStmt) List() Nodes { return n.Lhs } +func (n *AssignListStmt) PtrList() *Nodes { return &n.Lhs } +func (n *AssignListStmt) SetList(x Nodes) { n.Lhs = x } +func (n *AssignListStmt) Rlist() Nodes { return n.Rhs } +func (n *AssignListStmt) PtrRlist() *Nodes { return &n.Rhs } +func (n *AssignListStmt) SetRlist(x Nodes) { n.Rhs = x } +func (n *AssignListStmt) Colas() bool { return n.Def } +func (n *AssignListStmt) SetColas(x bool) { n.Def = x } func (n *AssignListStmt) SetOp(op Op) { switch op { @@ -103,28 +99,24 @@ func (n *AssignListStmt) SetOp(op Op) { // If Def is true, the assignment is a :=. type AssignStmt struct { miniStmt - X Node - Def bool - Y Node - Offset_ int64 // for initorder + X Node + Def bool + Y Node } func NewAssignStmt(pos src.XPos, x, y Node) *AssignStmt { n := &AssignStmt{X: x, Y: y} n.pos = pos n.op = OAS - n.Offset_ = types.BADWIDTH return n } -func (n *AssignStmt) Left() Node { return n.X } -func (n *AssignStmt) SetLeft(x Node) { n.X = x } -func (n *AssignStmt) Right() Node { return n.Y } -func (n *AssignStmt) SetRight(y Node) { n.Y = y } -func (n *AssignStmt) Colas() bool { return n.Def } -func (n *AssignStmt) SetColas(x bool) { n.Def = x } -func (n *AssignStmt) Offset() int64 { return n.Offset_ } -func (n *AssignStmt) SetOffset(x int64) { n.Offset_ = x } +func (n *AssignStmt) Left() Node { return n.X } +func (n *AssignStmt) SetLeft(x Node) { n.X = x } +func (n *AssignStmt) Right() Node { return n.Y } +func (n *AssignStmt) SetRight(y Node) { n.Y = y } +func (n *AssignStmt) Colas() bool { return n.Def } +func (n *AssignStmt) SetColas(x bool) { n.Def = x } func (n *AssignStmt) SetOp(op Op) { switch op { From f6d2834f8f78447a06fdb05f85a2c5690e915892 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Fri, 11 Dec 2020 16:52:21 -0500 Subject: [PATCH 170/474] [dev.regabi] cmd/compile: limit Implicit method to nodes where it is defined The general concept of an "implicit" operation is provided by every expr representation, but it really only makes sense for a few of them, and worse the exact definition of what "implicit" means differs from node to node. This CL moves the method to each node implementation, although they all share the same header bit instead of each defining a bool field that would turn into 8 bytes on 64-bit systems. Now we can say precisely which Nodes have a meaningful Implicit method: AddrExpr, CompLitExpr, ConvExpr, ParenExpr, and StarExpr. Passes buildall w/ toolstash -cmp. Change-Id: I7d85cb0507a514cdcb6eed21347f362e5fb57a91 Reviewed-on: https://go-review.googlesource.com/c/go/+/277918 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/expr.go | 50 +++++++++++++++++------------ 1 file changed, 30 insertions(+), 20 deletions(-) diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 8ea31c1929c89..36a11dad9a2b3 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -52,10 +52,10 @@ type miniExpr struct { const ( miniExprHasCall = 1 << iota - miniExprImplicit miniExprNonNil miniExprTransient miniExprBounded + miniExprImplicit // for use by implementations; not supported by every Expr ) func (*miniExpr) isExpr() {} @@ -66,8 +66,6 @@ func (n *miniExpr) Opt() interface{} { return n.opt } func (n *miniExpr) SetOpt(x interface{}) { n.opt = x } func (n *miniExpr) HasCall() bool { return n.flags&miniExprHasCall != 0 } func (n *miniExpr) SetHasCall(b bool) { n.flags.set(miniExprHasCall, b) } -func (n *miniExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 } -func (n *miniExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) } func (n *miniExpr) NonNil() bool { return n.flags&miniExprNonNil != 0 } func (n *miniExpr) MarkNonNil() { n.flags |= miniExprNonNil } func (n *miniExpr) Transient() bool { return n.flags&miniExprTransient != 0 } @@ -121,10 +119,12 @@ func NewAddrExpr(pos src.XPos, x Node) *AddrExpr { return n } -func (n *AddrExpr) Left() Node { return n.X } -func (n *AddrExpr) SetLeft(x Node) { n.X = x } -func (n *AddrExpr) Right() Node { return n.Alloc } -func (n *AddrExpr) SetRight(x Node) { n.Alloc = x } +func (n *AddrExpr) Left() Node { return n.X } +func (n *AddrExpr) SetLeft(x Node) { n.X = x } +func (n *AddrExpr) Right() Node { return n.Alloc } +func (n *AddrExpr) SetRight(x Node) { n.Alloc = x } +func (n *AddrExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 } +func (n *AddrExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) } func (n *AddrExpr) SetOp(op Op) { switch op { @@ -301,13 +301,15 @@ func NewCompLitExpr(pos src.XPos, op Op, typ Ntype, list []Node) *CompLitExpr { return n } -func (n *CompLitExpr) Orig() Node { return n.orig } -func (n *CompLitExpr) SetOrig(x Node) { n.orig = x } -func (n *CompLitExpr) Right() Node { return n.Ntype } -func (n *CompLitExpr) SetRight(x Node) { n.Ntype = toNtype(x) } -func (n *CompLitExpr) List() Nodes { return n.List_ } -func (n *CompLitExpr) PtrList() *Nodes { return &n.List_ } -func (n *CompLitExpr) SetList(x Nodes) { n.List_ = x } +func (n *CompLitExpr) Orig() Node { return n.orig } +func (n *CompLitExpr) SetOrig(x Node) { n.orig = x } +func (n *CompLitExpr) Right() Node { return n.Ntype } +func (n *CompLitExpr) SetRight(x Node) { n.Ntype = toNtype(x) } +func (n *CompLitExpr) List() Nodes { return n.List_ } +func (n *CompLitExpr) PtrList() *Nodes { return &n.List_ } +func (n *CompLitExpr) SetList(x Nodes) { n.List_ = x } +func (n *CompLitExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 } +func (n *CompLitExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) } func (n *CompLitExpr) SetOp(op Op) { switch op { @@ -354,8 +356,10 @@ func NewConvExpr(pos src.XPos, op Op, typ *types.Type, x Node) *ConvExpr { return n } -func (n *ConvExpr) Left() Node { return n.X } -func (n *ConvExpr) SetLeft(x Node) { n.X = x } +func (n *ConvExpr) Left() Node { return n.X } +func (n *ConvExpr) SetLeft(x Node) { n.X = x } +func (n *ConvExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 } +func (n *ConvExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) } func (n *ConvExpr) SetOp(op Op) { switch op { @@ -583,8 +587,10 @@ func NewParenExpr(pos src.XPos, x Node) *ParenExpr { return n } -func (n *ParenExpr) Left() Node { return n.X } -func (n *ParenExpr) SetLeft(x Node) { n.X = x } +func (n *ParenExpr) Left() Node { return n.X } +func (n *ParenExpr) SetLeft(x Node) { n.X = x } +func (n *ParenExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 } +func (n *ParenExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) } func (*ParenExpr) CanBeNtype() {} @@ -645,6 +651,8 @@ func (n *SelectorExpr) Sym() *types.Sym { return n.Sel } func (n *SelectorExpr) SetSym(x *types.Sym) { n.Sel = x } func (n *SelectorExpr) Offset() int64 { return n.Offset_ } func (n *SelectorExpr) SetOffset(x int64) { n.Offset_ = x } +func (n *SelectorExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 } +func (n *SelectorExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) } // Before type-checking, bytes.Buffer is a SelectorExpr. // After type-checking it becomes a Name. @@ -783,8 +791,10 @@ func NewStarExpr(pos src.XPos, x Node) *StarExpr { return n } -func (n *StarExpr) Left() Node { return n.X } -func (n *StarExpr) SetLeft(x Node) { n.X = x } +func (n *StarExpr) Left() Node { return n.X } +func (n *StarExpr) SetLeft(x Node) { n.X = x } +func (n *StarExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 } +func (n *StarExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) } func (*StarExpr) CanBeNtype() {} From f6efa3d4a4a10c28d7bf13f8416022aa5fc4fa1c Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sat, 12 Dec 2020 18:50:21 -0500 Subject: [PATCH 171/474] [dev.regabi] cmd/compile: simplify ir.Find, replace ir.Inspect with ir.Visit It seems clear after using these for a week that Find need not return anything other than a bool saying whether the target was found. The main reason for not using the boolean earlier was to avoid confusion with Inspect: for Find, returning true means "it was found! stop walking" while for Inspect, returning true means "keep walking the children". But it turns out that none of the uses of Inspect need the boolean. This makes sense because types can contain expressions, expressions can contain statements (inside function literals), and so on, so there are essentially no times when you can say based on the current AST node that the children are irrelevant to a particular operation. So this CL makes two changes: 1) Change Find to return a boolean and to take a callback function returning a boolean. This simplifies all existing calls to Find. 2) Rename Inspect to Visit and change it to take a callback with no result at all. This simplifies all existing calls to Inspect. Removing the boolean result from Inspect's callback avoids having two callbacks with contradictory boolean results in different APIs. Renaming Inspect to Visit avoids confusion with ast.Inspect. Passes buildall w/ toolstash -cmp. Change-Id: I344ebb5e00b6842012be33e779db483c28e5f350 Reviewed-on: https://go-review.googlesource.com/c/go/+/277919 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/alg.go | 10 ++- src/cmd/compile/internal/gc/const.go | 7 +- src/cmd/compile/internal/gc/dcl.go | 11 ++- src/cmd/compile/internal/gc/escape.go | 4 +- src/cmd/compile/internal/gc/initorder.go | 17 +++-- src/cmd/compile/internal/gc/inl.go | 28 +++----- src/cmd/compile/internal/gc/scc.go | 3 +- src/cmd/compile/internal/gc/walk.go | 15 ++-- src/cmd/compile/internal/ir/visit.go | 90 ++++++++++-------------- 9 files changed, 77 insertions(+), 108 deletions(-) diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index 8550edb9e0fb5..3938dce46cebc 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -783,13 +783,11 @@ func geneq(t *types.Type) *obj.LSym { } func hasCall(fn *ir.Func) bool { - found := ir.Find(fn, func(n ir.Node) interface{} { - if op := n.Op(); op == ir.OCALL || op == ir.OCALLFUNC { - return n - } - return nil + return ir.Find(fn, func(n ir.Node) bool { + // TODO(rsc): No methods? + op := n.Op() + return op == ir.OCALL || op == ir.OCALLFUNC }) - return found != nil } // eqfield returns the node diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index 677ed17dd9ee6..1ef199c793457 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -781,7 +781,7 @@ func isGoConst(n ir.Node) bool { // hasCallOrChan reports whether n contains any calls or channel operations. func hasCallOrChan(n ir.Node) bool { - found := ir.Find(n, func(n ir.Node) interface{} { + return ir.Find(n, func(n ir.Node) bool { switch n.Op() { case ir.OAPPEND, ir.OCALL, @@ -803,11 +803,10 @@ func hasCallOrChan(n ir.Node) bool { ir.OREAL, ir.ORECOVER, ir.ORECV: - return n + return true } - return nil + return false }) - return found != nil } // A constSet represents a set of Go constant expressions. diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 89873e2facc47..ad2dc99f89082 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -855,22 +855,22 @@ func newNowritebarrierrecChecker() *nowritebarrierrecChecker { continue } c.curfn = n.(*ir.Func) - ir.Inspect(n, c.findExtraCalls) + ir.Visit(n, c.findExtraCalls) } c.curfn = nil return c } -func (c *nowritebarrierrecChecker) findExtraCalls(n ir.Node) bool { +func (c *nowritebarrierrecChecker) findExtraCalls(n ir.Node) { if n.Op() != ir.OCALLFUNC { - return true + return } fn := n.Left() if fn == nil || fn.Op() != ir.ONAME || fn.Class() != ir.PFUNC || fn.Name().Defn == nil { - return true + return } if !isRuntimePkg(fn.Sym().Pkg) || fn.Sym().Name != "systemstack" { - return true + return } var callee *ir.Func @@ -887,7 +887,6 @@ func (c *nowritebarrierrecChecker) findExtraCalls(n ir.Node) bool { base.Fatalf("expected ODCLFUNC node, got %+v", callee) } c.extraCalls[c.curfn] = append(c.extraCalls[c.curfn], nowritebarrierrecCall{callee, n.Pos()}) - return true } // recordCall records a call from ODCLFUNC node "from", to function diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index f317e9999cc53..5fce1184481a1 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -225,7 +225,7 @@ func (e *Escape) walkFunc(fn *ir.Func) { fn.SetEsc(EscFuncStarted) // Identify labels that mark the head of an unstructured loop. - ir.InspectList(fn.Body(), func(n ir.Node) bool { + ir.Visit(fn, func(n ir.Node) { switch n.Op() { case ir.OLABEL: if e.labels == nil { @@ -240,8 +240,6 @@ func (e *Escape) walkFunc(fn *ir.Func) { e.labels[n.Sym()] = looping } } - - return true }) e.curfn = fn diff --git a/src/cmd/compile/internal/gc/initorder.go b/src/cmd/compile/internal/gc/initorder.go index d39e8189d7a49..7870e00221aac 100644 --- a/src/cmd/compile/internal/gc/initorder.go +++ b/src/cmd/compile/internal/gc/initorder.go @@ -268,18 +268,25 @@ func collectDeps(n ir.Node, transitive bool) ir.NameSet { type initDeps struct { transitive bool seen ir.NameSet + cvisit func(ir.Node) } -func (d *initDeps) inspect(n ir.Node) { ir.Inspect(n, d.visit) } -func (d *initDeps) inspectList(l ir.Nodes) { ir.InspectList(l, d.visit) } +func (d *initDeps) cachedVisit() func(ir.Node) { + if d.cvisit == nil { + d.cvisit = d.visit // cache closure + } + return d.cvisit +} + +func (d *initDeps) inspect(n ir.Node) { ir.Visit(n, d.cachedVisit()) } +func (d *initDeps) inspectList(l ir.Nodes) { ir.VisitList(l, d.cachedVisit()) } // visit calls foundDep on any package-level functions or variables // referenced by n, if any. -func (d *initDeps) visit(n ir.Node) bool { +func (d *initDeps) visit(n ir.Node) { switch n.Op() { case ir.OMETHEXPR: d.foundDep(methodExprName(n)) - return false case ir.ONAME: n := n.(*ir.Name) @@ -294,8 +301,6 @@ func (d *initDeps) visit(n ir.Node) bool { case ir.ODOTMETH, ir.OCALLPART: d.foundDep(methodExprName(n)) } - - return true } // foundDep records that we've found a dependency on n by adding it to diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 04256d5aeb62f..9342046dcce7b 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -255,7 +255,7 @@ func inlFlood(n *ir.Name) { // Recursively identify all referenced functions for // reexport. We want to include even non-called functions, // because after inlining they might be callable. - ir.InspectList(ir.AsNodes(fn.Inl.Body), func(n ir.Node) bool { + ir.VisitList(ir.AsNodes(fn.Inl.Body), func(n ir.Node) { switch n.Op() { case ir.OMETHEXPR, ir.ODOTMETH: inlFlood(methodExprName(n)) @@ -282,7 +282,6 @@ func inlFlood(n *ir.Name) { // inlFlood(n.Func.Closure.Func.Nname) base.Fatalf("unexpected closure in inlinable function") } - return true }) } @@ -458,14 +457,10 @@ func (v *hairyVisitor) doNode(n ir.Node) error { func isBigFunc(fn *ir.Func) bool { budget := inlineBigFunctionNodes - over := ir.Find(fn, func(n ir.Node) interface{} { + return ir.Find(fn, func(n ir.Node) bool { budget-- - if budget <= 0 { - return n - } - return nil + return budget <= 0 }) - return over != nil } // Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any @@ -707,8 +702,6 @@ FindRHS: return rhs } -var errFound = errors.New("found") - // reassigned takes an ONAME node, walks the function in which it is defined, and returns a boolean // indicating whether the name has any assignments other than its declaration. // The second return value is the first such assignment encountered in the walk, if any. It is mostly @@ -723,22 +716,21 @@ func reassigned(name *ir.Name) bool { if name.Curfn == nil { return true } - a := ir.Find(name.Curfn, func(n ir.Node) interface{} { + return ir.Find(name.Curfn, func(n ir.Node) bool { switch n.Op() { case ir.OAS: if n.Left() == name && n != name.Defn { - return n + return true } case ir.OAS2, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2DOTTYPE: for _, p := range n.List().Slice() { if p == name && n != name.Defn { - return n + return true } } } - return nil + return false }) - return a != nil } func inlParam(t *types.Field, as ir.Node, inlvars map[*ir.Name]ir.Node) ir.Node { @@ -916,11 +908,10 @@ func mkinlcall(n ir.Node, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool, } nreturns := 0 - ir.InspectList(ir.AsNodes(fn.Inl.Body), func(n ir.Node) bool { + ir.VisitList(ir.AsNodes(fn.Inl.Body), func(n ir.Node) { if n != nil && n.Op() == ir.ORETURN { nreturns++ } - return true }) // We can delay declaring+initializing result parameters if: @@ -1287,11 +1278,10 @@ func pruneUnusedAutos(ll []*ir.Name, vis *hairyVisitor) []*ir.Name { // concrete-type method calls where applicable. func devirtualize(fn *ir.Func) { Curfn = fn - ir.InspectList(fn.Body(), func(n ir.Node) bool { + ir.VisitList(fn.Body(), func(n ir.Node) { if n.Op() == ir.OCALLINTER { devirtualizeCall(n) } - return true }) } diff --git a/src/cmd/compile/internal/gc/scc.go b/src/cmd/compile/internal/gc/scc.go index 063aaa09bd2bd..fa7af1274b264 100644 --- a/src/cmd/compile/internal/gc/scc.go +++ b/src/cmd/compile/internal/gc/scc.go @@ -75,7 +75,7 @@ func (v *bottomUpVisitor) visit(n *ir.Func) uint32 { min := v.visitgen v.stack = append(v.stack, n) - ir.InspectList(n.Body(), func(n ir.Node) bool { + ir.Visit(n, func(n ir.Node) { switch n.Op() { case ir.ONAME: if n.Class() == ir.PFUNC { @@ -111,7 +111,6 @@ func (v *bottomUpVisitor) visit(n *ir.Func) uint32 { min = m } } - return true }) if (min == id || min == id+1) && !n.IsHiddenClosure() { diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index ad5103f8514c1..041eb900c803a 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -3764,11 +3764,11 @@ func usefield(n ir.Node) { // hasSideEffects reports whether n contains any operations that could have observable side effects. func hasSideEffects(n ir.Node) bool { - found := ir.Find(n, func(n ir.Node) interface{} { + return ir.Find(n, func(n ir.Node) bool { switch n.Op() { // Assume side effects unless we know otherwise. default: - return n + return true // No side effects here (arguments are checked separately). case ir.ONAME, @@ -3824,29 +3824,28 @@ func hasSideEffects(n ir.Node) bool { ir.OREAL, ir.OIMAG, ir.OCOMPLEX: - return nil + return false // Only possible side effect is division by zero. case ir.ODIV, ir.OMOD: if n.Right().Op() != ir.OLITERAL || constant.Sign(n.Right().Val()) == 0 { - return n + return true } // Only possible side effect is panic on invalid size, // but many makechan and makemap use size zero, which is definitely OK. case ir.OMAKECHAN, ir.OMAKEMAP: if !ir.IsConst(n.Left(), constant.Int) || constant.Sign(n.Left().Val()) != 0 { - return n + return true } // Only possible side effect is panic on invalid size. // TODO(rsc): Merge with previous case (probably breaks toolstash -cmp). case ir.OMAKESLICE, ir.OMAKESLICECOPY: - return n + return true } - return nil + return false }) - return found != nil } // Rewrite diff --git a/src/cmd/compile/internal/ir/visit.go b/src/cmd/compile/internal/ir/visit.go index 4f3575614df64..bc2b8083ba298 100644 --- a/src/cmd/compile/internal/ir/visit.go +++ b/src/cmd/compile/internal/ir/visit.go @@ -57,46 +57,40 @@ import ( // } // do(root) // -// The Inspect function illustrates a further simplification of the pattern, -// only considering processing before visiting children, and letting -// that processing decide whether children are visited at all: +// The Visit function illustrates a further simplification of the pattern, +// only processing before visiting children and never stopping: // -// func Inspect(n ir.Node, inspect func(ir.Node) bool) { +// func Visit(n ir.Node, visit func(ir.Node)) { // var do func(ir.Node) error // do = func(x ir.Node) error { -// if inspect(x) { -// ir.DoChildren(x, do) -// } -// return nil +// visit(x) +// return ir.DoChildren(x, do) // } // if n != nil { -// do(n) +// visit(n) // } // } // // The Find function illustrates a different simplification of the pattern, // visiting each node and then its children, recursively, until finding -// a node x such that find(x) returns a non-nil result, -// at which point the entire traversal stops: +// a node x for which find(x) returns true, at which point the entire +// traversal stops and returns true. // -// func Find(n ir.Node, find func(ir.Node) interface{}) interface{} { +// func Find(n ir.Node, find func(ir.Node)) bool { // stop := errors.New("stop") -// var found interface{} // var do func(ir.Node) error // do = func(x ir.Node) error { -// if v := find(x); v != nil { -// found = v +// if find(x) { // return stop // } // return ir.DoChildren(x, do) // } -// do(n) -// return found +// return do(n) == stop // } // -// Inspect and Find are presented above as examples of how to use +// Visit and Find are presented above as examples of how to use // DoChildren effectively, but of course, usage that fits within the -// simplifications captured by Inspect or Find will be best served +// simplifications captured by Visit or Find will be best served // by directly calling the ones provided by this package. func DoChildren(n Node, do func(Node) error) error { if n == nil { @@ -122,71 +116,59 @@ func DoList(list Nodes, do func(Node) error) error { return nil } -// Inspect visits each node x in the IR tree rooted at n -// in a depth-first preorder traversal, calling inspect on each node visited. -// If inspect(x) returns false, then Inspect skips over x's children. -// -// Note that the meaning of the boolean result in the callback function -// passed to Inspect differs from that of Scan. -// During Scan, if scan(x) returns false, then Scan stops the scan. -// During Inspect, if inspect(x) returns false, then Inspect skips x's children -// but continues with the remainder of the tree (x's siblings and so on). -func Inspect(n Node, inspect func(Node) bool) { +// Visit visits each non-nil node x in the IR tree rooted at n +// in a depth-first preorder traversal, calling visit on each node visited. +func Visit(n Node, visit func(Node)) { var do func(Node) error do = func(x Node) error { - if inspect(x) { - DoChildren(x, do) - } - return nil + visit(x) + return DoChildren(x, do) } if n != nil { do(n) } } -// InspectList calls Inspect(x, inspect) for each node x in the list. -func InspectList(list Nodes, inspect func(Node) bool) { +// VisitList calls Visit(x, visit) for each node x in the list. +func VisitList(list Nodes, visit func(Node)) { for _, x := range list.Slice() { - Inspect(x, inspect) + Visit(x, visit) } } var stop = errors.New("stop") // Find looks for a non-nil node x in the IR tree rooted at n -// for which find(x) returns a non-nil value. +// for which find(x) returns true. // Find considers nodes in a depth-first, preorder traversal. -// When Find finds a node x such that find(x) != nil, -// Find ends the traversal and returns the value of find(x) immediately. -// Otherwise Find returns nil. -func Find(n Node, find func(Node) interface{}) interface{} { +// When Find finds a node x such that find(x) is true, +// Find ends the traversal and returns true immediately. +// Otherwise Find returns false after completing the entire traversal. +func Find(n Node, find func(Node) bool) bool { if n == nil { - return nil + return false } - var found interface{} var do func(Node) error do = func(x Node) error { - if v := find(x); v != nil { - found = v + if find(x) { return stop } return DoChildren(x, do) } - do(n) - return found + return do(n) == stop } -// FindList calls Find(x, ok) for each node x in the list, in order. -// If any call find(x) returns a non-nil result, FindList stops and +// FindList calls Find(x, find) for each node x in the list, in order. +// If any call Find(x, find) returns true, FindList stops and // returns that result, skipping the remainder of the list. -// Otherwise FindList returns nil. -func FindList(list Nodes, find func(Node) interface{}) interface{} { +// Otherwise FindList returns false. +func FindList(list Nodes, find func(Node) bool) bool { for _, x := range list.Slice() { - if v := Find(x, find); v != nil { - return v + if Find(x, find) { + return true } } - return nil + return false } // EditChildren edits the child nodes of n, replacing each child x with edit(x). From 4ac6a6317b0e4ecbcc789ba606708ff08871a1df Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 10 Dec 2020 18:42:42 -0500 Subject: [PATCH 172/474] [dev.regabi] cmd/compile: cleanup for concrete types - typecheck An automated rewrite will add concrete type assertions after a test of n.Op(), when n can be safely type-asserted (meaning, n is not reassigned a different type, n is not reassigned and then used outside the scope of the type assertion, and so on). This sequence of CLs handles the code that the automated rewrite does not: adding specific types to function arguments, adjusting code not to call n.Left() etc when n may have multiple representations, and so on. This CL focuses on typecheck.go. Passes buildall w/ toolstash -cmp. Change-Id: I32d1d3b813b0a088b1750c9fd28cd858ed813f1d Reviewed-on: https://go-review.googlesource.com/c/go/+/277920 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/typecheck.go | 388 +++++++++++++++-------- 1 file changed, 248 insertions(+), 140 deletions(-) diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 6dc9c5820d130..ef1955e88b80e 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -250,7 +250,7 @@ func typecheck(n ir.Node, top int) (res ir.Node) { // Skip over parens. for n.Op() == ir.OPAREN { - n = n.Left() + n = n.(*ir.ParenExpr).Left() } // Resolve definition of name and value of iota lazily. @@ -439,10 +439,12 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } - if n.Op() == ir.ONAME && n.SubOp() != 0 && top&ctxCallee == 0 { - base.Errorf("use of builtin %v not in function call", n.Sym()) - n.SetType(nil) - return n + if n.Op() == ir.ONAME { + if n.SubOp() != 0 && top&ctxCallee == 0 { + base.Errorf("use of builtin %v not in function call", n.Sym()) + n.SetType(nil) + return n + } } typecheckdef(n) @@ -651,19 +653,29 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { ir.OOROR, ir.OSUB, ir.OXOR: - var l ir.Node - var op ir.Op - var r ir.Node + var l, r ir.Node + var setLR func() + switch n := n.(type) { + case *ir.AssignOpStmt: + l, r = n.Left(), n.Right() + setLR = func() { n.SetLeft(l); n.SetRight(r) } + case *ir.BinaryExpr: + l, r = n.Left(), n.Right() + setLR = func() { n.SetLeft(l); n.SetRight(r) } + case *ir.LogicalExpr: + l, r = n.Left(), n.Right() + setLR = func() { n.SetLeft(l); n.SetRight(r) } + } + l = typecheck(l, ctxExpr) + r = typecheck(r, ctxExpr) + setLR() + if l.Type() == nil || r.Type() == nil { + n.SetType(nil) + return n + } + op := n.Op() if n.Op() == ir.OASOP { - n.SetLeft(typecheck(n.Left(), ctxExpr)) - n.SetRight(typecheck(n.Right(), ctxExpr)) - l = n.Left() - r = n.Right() - checkassign(n, n.Left()) - if l.Type() == nil || r.Type() == nil { - n.SetType(nil) - return n - } + checkassign(n, l) if n.Implicit() && !okforarith[l.Type().Kind()] { base.Errorf("invalid operation: %v (non-numeric type %v)", n, l.Type()) n.SetType(nil) @@ -671,20 +683,10 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } // TODO(marvin): Fix Node.EType type union. op = n.SubOp() - } else { - n.SetLeft(typecheck(n.Left(), ctxExpr)) - n.SetRight(typecheck(n.Right(), ctxExpr)) - l = n.Left() - r = n.Right() - if l.Type() == nil || r.Type() == nil { - n.SetType(nil) - return n - } - op = n.Op() } if op == ir.OLSH || op == ir.ORSH { r = defaultlit(r, types.Types[types.TUINT]) - n.SetRight(r) + setLR() t := r.Type() if !t.IsInteger() { base.Errorf("invalid operation: %v (shift count type %v, must be integer)", n, r.Type()) @@ -730,9 +732,8 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { // ideal mixed with non-ideal l, r = defaultlit2(l, r, false) + setLR() - n.SetLeft(l) - n.SetRight(r) if l.Type() == nil || r.Type() == nil { n.SetType(nil) return n @@ -768,7 +769,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { if r.Type().IsInterface() == l.Type().IsInterface() || l.Type().Width >= 1<<16 { l = ir.NewConvExpr(base.Pos, aop, r.Type(), l) l.SetTypecheck(1) - n.SetLeft(l) + setLR() } t = r.Type() @@ -789,7 +790,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { if r.Type().IsInterface() == l.Type().IsInterface() || r.Type().Width >= 1<<16 { r = ir.NewConvExpr(base.Pos, aop, l.Type(), r) r.SetTypecheck(1) - n.SetRight(r) + setLR() } t = l.Type() @@ -858,29 +859,30 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { if iscmp[n.Op()] { t = types.UntypedBool n.SetType(t) - n = evalConst(n) - if n.Op() != ir.OLITERAL { - l, r = defaultlit2(l, r, true) - n.SetLeft(l) - n.SetRight(r) + if con := evalConst(n); con.Op() == ir.OLITERAL { + return con } + l, r = defaultlit2(l, r, true) + setLR() + return n } if et == types.TSTRING && n.Op() == ir.OADD { // create or update OADDSTR node with list of strings in x + y + z + (w + v) + ... + var add *ir.AddStringExpr if l.Op() == ir.OADDSTR { - orig := n - n = l - n.SetPos(orig.Pos()) + add = l.(*ir.AddStringExpr) + add.SetPos(n.Pos()) } else { - n = ir.NodAt(n.Pos(), ir.OADDSTR, nil, nil) - n.PtrList().Set1(l) + add = ir.NewAddStringExpr(n.Pos(), []ir.Node{l}) } if r.Op() == ir.OADDSTR { - n.PtrList().AppendNodes(r.PtrList()) + add.PtrList().AppendNodes(r.PtrList()) } else { - n.PtrList().Append(r) + add.PtrList().Append(r) } + add.SetType(t) + return add } if (op == ir.ODIV || op == ir.OMOD) && ir.IsConst(r, constant.Int) { @@ -950,11 +952,12 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.OCOMPLIT: - return typecheckcomplit(n) + return typecheckcomplit(n.(*ir.CompLitExpr)) case ir.OXDOT, ir.ODOT: + n := n.(*ir.SelectorExpr) if n.Op() == ir.OXDOT { - n = adddot(n) + n = adddot(n).(*ir.SelectorExpr) n.SetOp(ir.ODOT) if n.Left() == nil { n.SetType(nil) @@ -1021,7 +1024,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } if (n.Op() == ir.ODOTINTER || n.Op() == ir.ODOTMETH) && top&ctxCallee == 0 { - n = typecheckpartialcall(n, s) + return typecheckpartialcall(n, s) } return n @@ -1286,9 +1289,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } - n.SetLeft(nodAddr(n.Left())) - n.Left().SetImplicit(true) - n.SetLeft(typecheck(n.Left(), ctxExpr)) + addr := nodAddr(n.Left()) + addr.SetImplicit(true) + n.SetLeft(typecheck(addr, ctxExpr)) l = n.Left() } t := l.Type() @@ -1338,9 +1341,10 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { // call and call like case ir.OCALL: - n.(*ir.CallExpr).Use = ir.CallUseExpr + n := n.(*ir.CallExpr) + n.Use = ir.CallUseExpr if top == ctxStmt { - n.(*ir.CallExpr).Use = ir.CallUseStmt + n.Use = ir.CallUseStmt } typecheckslice(n.Init().Slice(), ctxStmt) // imported rewritten f(g()) calls (#30907) n.SetLeft(typecheck(n.Left(), ctxExpr|ctxType|ctxCallee)) @@ -1350,7 +1354,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { l := n.Left() - if l.Op() == ir.ONAME && l.SubOp() != 0 { + if l.Op() == ir.ONAME && l.(*ir.Name).SubOp() != 0 { if n.IsDDD() && l.SubOp() != ir.OAPPEND { base.Errorf("invalid use of ... with builtin %v", l) } @@ -1408,7 +1412,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } - n = ir.NodAt(n.Pos(), ir.OCONV, arg, nil) + n := ir.NodAt(n.Pos(), ir.OCONV, arg, nil) n.SetType(l.Type()) return typecheck1(n, top) } @@ -1463,14 +1467,16 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { if t.NumResults() == 1 { n.SetType(l.Type().Results().Field(0).Type) - if n.Op() == ir.OCALLFUNC && n.Left().Op() == ir.ONAME && isRuntimePkg(n.Left().Sym().Pkg) && n.Left().Sym().Name == "getg" { - // Emit code for runtime.getg() directly instead of calling function. - // Most such rewrites (for example the similar one for math.Sqrt) should be done in walk, - // so that the ordering pass can make sure to preserve the semantics of the original code - // (in particular, the exact time of the function call) by introducing temporaries. - // In this case, we know getg() always returns the same result within a given function - // and we want to avoid the temporaries, so we do the rewrite earlier than is typical. - n.SetOp(ir.OGETG) + if n.Op() == ir.OCALLFUNC && n.Left().Op() == ir.ONAME { + if sym := n.Left().(*ir.Name).Sym(); isRuntimePkg(sym.Pkg) && sym.Name == "getg" { + // Emit code for runtime.getg() directly instead of calling function. + // Most such rewrites (for example the similar one for math.Sqrt) should be done in walk, + // so that the ordering pass can make sure to preserve the semantics of the original code + // (in particular, the exact time of the function call) by introducing temporaries. + // In this case, we know getg() always returns the same result within a given function + // and we want to avoid the temporaries, so we do the rewrite earlier than is typical. + n.SetOp(ir.OGETG) + } } return n } @@ -1733,6 +1739,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.OCONV: + n := n.(*ir.ConvExpr) checkwidth(n.Type()) // ensure width is calculated for backend n.SetLeft(typecheck(n.Left(), ctxExpr)) n.SetLeft(convlit1(n.Left(), n.Type(), true, nil)) @@ -1771,7 +1778,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OSTR2RUNES: if n.Left().Op() == ir.OLITERAL { - n = stringtoruneslit(n) + return stringtoruneslit(n) } } return n @@ -1881,8 +1888,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } nn.SetType(t) - n = nn - return n + return nn case ir.ONEW: if n.Left() == nil { @@ -1990,6 +1996,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { // statements case ir.OAS: + n := n.(*ir.AssignStmt) typecheckas(n) // Code that creates temps does not bother to set defn, so do it here. @@ -1999,7 +2006,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.OAS2: - typecheckas2(n) + typecheckas2(n.(*ir.AssignListStmt)) return n case ir.OBREAK, @@ -2026,6 +2033,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.ODEFER, ir.OGO: + n := n.(*ir.GoDeferStmt) n.SetLeft(typecheck(n.Left(), ctxStmt|ctxExpr)) if !n.Left().Diag() { checkdefergo(n) @@ -2083,15 +2091,15 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.OSELECT: - typecheckselect(n) + typecheckselect(n.(*ir.SelectStmt)) return n case ir.OSWITCH: - typecheckswitch(n) + typecheckswitch(n.(*ir.SwitchStmt)) return n case ir.ORANGE: - typecheckrange(n) + typecheckrange(n.(*ir.RangeStmt)) return n case ir.OTYPESW: @@ -2119,13 +2127,26 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } func typecheckargs(n ir.Node) { - if n.List().Len() != 1 || n.IsDDD() { - typecheckslice(n.List().Slice(), ctxExpr) + var list []ir.Node + switch n := n.(type) { + default: + base.Fatalf("typecheckargs %+v", n.Op()) + case *ir.CallExpr: + list = n.List().Slice() + if n.IsDDD() { + typecheckslice(list, ctxExpr) + return + } + case *ir.ReturnStmt: + list = n.List().Slice() + } + if len(list) != 1 { + typecheckslice(list, ctxExpr) return } - typecheckslice(n.List().Slice(), ctxExpr|ctxMultiOK) - t := n.List().First().Type() + typecheckslice(list, ctxExpr|ctxMultiOK) + t := list[0].Type() if t == nil || !t.IsFuncArgStruct() { return } @@ -2138,7 +2159,7 @@ func typecheckargs(n ir.Node) { } as := ir.Nod(ir.OAS2, nil, nil) - as.PtrRlist().AppendNodes(n.PtrList()) + as.PtrRlist().Append(list...) // If we're outside of function context, then this call will // be executed during the generated init function. However, @@ -2149,16 +2170,24 @@ func typecheckargs(n ir.Node) { if static { Curfn = initTodo } + list = nil for _, f := range t.FieldSlice() { t := temp(f.Type) as.PtrInit().Append(ir.Nod(ir.ODCL, t, nil)) as.PtrList().Append(t) - n.PtrList().Append(t) + list = append(list, t) } if static { Curfn = nil } + switch n := n.(type) { + case *ir.CallExpr: + n.PtrList().Set(list) + case *ir.ReturnStmt: + n.PtrList().Set(list) + } + n.PtrInit().Append(typecheck(as, ctxStmt)) } @@ -2201,7 +2230,7 @@ func checksliceconst(lo ir.Node, hi ir.Node) bool { return true } -func checkdefergo(n ir.Node) { +func checkdefergo(n *ir.GoDeferStmt) { what := "defer" if n.Op() == ir.OGO { what = "go" @@ -2269,13 +2298,12 @@ func implicitstar(n ir.Node) ir.Node { if !t.IsArray() { return n } - n = ir.Nod(ir.ODEREF, n, nil) - n.SetImplicit(true) - n = typecheck(n, ctxExpr) - return n + star := ir.Nod(ir.ODEREF, n, nil) + star.SetImplicit(true) + return typecheck(star, ctxExpr) } -func needOneArg(n ir.Node, f string, args ...interface{}) (ir.Node, bool) { +func needOneArg(n *ir.CallExpr, f string, args ...interface{}) (ir.Node, bool) { if n.List().Len() == 0 { p := fmt.Sprintf(f, args...) base.Errorf("missing argument to %s: %v", p, n) @@ -2291,7 +2319,7 @@ func needOneArg(n ir.Node, f string, args ...interface{}) (ir.Node, bool) { return n.List().First(), true } -func needTwoArgs(n ir.Node) (ir.Node, ir.Node, bool) { +func needTwoArgs(n *ir.CallExpr) (ir.Node, ir.Node, bool) { if n.List().Len() != 2 { if n.List().Len() < 2 { base.Errorf("not enough arguments in call to %v", n) @@ -2334,7 +2362,7 @@ func lookdot1(errnode ir.Node, s *types.Sym, t *types.Type, fs *types.Fields, do // typecheckMethodExpr checks selector expressions (ODOT) where the // base expression is a type expression (OTYPE). -func typecheckMethodExpr(n ir.Node) (res ir.Node) { +func typecheckMethodExpr(n *ir.SelectorExpr) (res ir.Node) { if enableTrace && base.Flag.LowerT { defer tracePrint("typecheckMethodExpr", n)(&res) } @@ -2417,7 +2445,7 @@ func derefall(t *types.Type) *types.Type { return t } -func lookdot(n ir.Node, t *types.Type, dostrcmp int) *types.Field { +func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field { s := n.Sym() dowidth(t) @@ -2449,14 +2477,14 @@ func lookdot(n ir.Node, t *types.Type, dostrcmp int) *types.Field { n.SetType(f1.Type) if t.IsInterface() { if n.Left().Type().IsPtr() { - n.SetLeft(ir.Nod(ir.ODEREF, n.Left(), nil)) // implicitstar - n.Left().SetImplicit(true) - n.SetLeft(typecheck(n.Left(), ctxExpr)) + star := ir.Nod(ir.ODEREF, n.Left(), nil) + star.SetImplicit(true) + n.SetLeft(typecheck(star, ctxExpr)) } n.SetOp(ir.ODOTINTER) } - n.(*ir.SelectorExpr).Selection = f1 + n.Selection = f1 return f1 } @@ -2471,13 +2499,13 @@ func lookdot(n ir.Node, t *types.Type, dostrcmp int) *types.Field { if !types.Identical(rcvr, tt) { if rcvr.IsPtr() && types.Identical(rcvr.Elem(), tt) { checklvalue(n.Left(), "call pointer method on") - n.SetLeft(nodAddr(n.Left())) - n.Left().SetImplicit(true) - n.SetLeft(typecheck(n.Left(), ctxType|ctxExpr)) + addr := nodAddr(n.Left()) + addr.SetImplicit(true) + n.SetLeft(typecheck(addr, ctxType|ctxExpr)) } else if tt.IsPtr() && (!rcvr.IsPtr() || rcvr.IsPtr() && rcvr.Elem().NotInHeap()) && types.Identical(tt.Elem(), rcvr) { - n.SetLeft(ir.Nod(ir.ODEREF, n.Left(), nil)) - n.Left().SetImplicit(true) - n.SetLeft(typecheck(n.Left(), ctxType|ctxExpr)) + star := ir.Nod(ir.ODEREF, n.Left(), nil) + star.SetImplicit(true) + n.SetLeft(typecheck(star, ctxType|ctxExpr)) } else if tt.IsPtr() && tt.Elem().IsPtr() && types.Identical(derefall(tt), derefall(rcvr)) { base.Errorf("calling method %v with receiver %L requires explicit dereference", n.Sym(), n.Left()) for tt.IsPtr() { @@ -2485,9 +2513,9 @@ func lookdot(n ir.Node, t *types.Type, dostrcmp int) *types.Field { if rcvr.IsPtr() && !tt.Elem().IsPtr() { break } - n.SetLeft(ir.Nod(ir.ODEREF, n.Left(), nil)) - n.Left().SetImplicit(true) - n.SetLeft(typecheck(n.Left(), ctxType|ctxExpr)) + star := ir.Nod(ir.ODEREF, n.Left(), nil) + star.SetImplicit(true) + n.SetLeft(typecheck(star, ctxType|ctxExpr)) tt = tt.Elem() } } else { @@ -2495,13 +2523,16 @@ func lookdot(n ir.Node, t *types.Type, dostrcmp int) *types.Field { } } - pll := n - ll := n.Left() - for ll.Left() != nil && (ll.Op() == ir.ODOT || ll.Op() == ir.ODOTPTR || ll.Op() == ir.ODEREF) { - pll = ll - ll = ll.Left() + implicit, ll := n.Implicit(), n.Left() + for ll != nil && (ll.Op() == ir.ODOT || ll.Op() == ir.ODOTPTR || ll.Op() == ir.ODEREF) { + switch l := ll.(type) { + case *ir.SelectorExpr: + implicit, ll = l.Implicit(), l.Left() + case *ir.StarExpr: + implicit, ll = l.Implicit(), l.Left() + } } - if pll.Implicit() && ll.Type().IsPtr() && ll.Type().Sym() != nil && ll.Type().Sym().Def != nil && ir.AsNode(ll.Type().Sym().Def).Op() == ir.OTYPE { + if implicit && ll.Type().IsPtr() && ll.Type().Sym() != nil && ll.Type().Sym().Def != nil && ir.AsNode(ll.Type().Sym().Def).Op() == ir.OTYPE { // It is invalid to automatically dereference a named pointer type when selecting a method. // Make n.Left == ll to clarify error message. n.SetLeft(ll) @@ -2512,7 +2543,7 @@ func lookdot(n ir.Node, t *types.Type, dostrcmp int) *types.Field { n.SetOffset(f2.Offset) n.SetType(f2.Type) n.SetOp(ir.ODOTMETH) - n.(*ir.SelectorExpr).Selection = f2 + n.Selection = f2 return f2 } @@ -2742,8 +2773,12 @@ func iscomptype(t *types.Type) bool { // pushtype adds elided type information for composite literals if // appropriate, and returns the resulting expression. -func pushtype(n ir.Node, t *types.Type) ir.Node { - if n == nil || n.Op() != ir.OCOMPLIT || n.Right() != nil { +func pushtype(nn ir.Node, t *types.Type) ir.Node { + if nn == nil || nn.Op() != ir.OCOMPLIT { + return nn + } + n := nn.(*ir.CompLitExpr) + if n.Right() != nil { return n } @@ -2756,16 +2791,16 @@ func pushtype(n ir.Node, t *types.Type) ir.Node { // For *T, return &T{...}. n.SetRight(ir.TypeNode(t.Elem())) - n = nodAddrAt(n.Pos(), n) - n.SetImplicit(true) + addr := ir.NodAt(n.Pos(), ir.OADDR, n, nil) + addr.SetImplicit(true) + return addr } - return n } // The result of typecheckcomplit MUST be assigned back to n, e.g. // n.Left = typecheckcomplit(n.Left) -func typecheckcomplit(n ir.Node) (res ir.Node) { +func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) { if enableTrace && base.Flag.LowerT { defer tracePrint("typecheckcomplit", n)(&res) } @@ -2782,7 +2817,7 @@ func typecheckcomplit(n ir.Node) (res ir.Node) { } // Save original node (including n.Right) - n.(ir.OrigNode).SetOrig(ir.Copy(n)) + n.SetOrig(ir.Copy(n)) setlineno(n.Right()) @@ -2833,6 +2868,7 @@ func typecheckcomplit(n ir.Node) (res ir.Node) { base.Errorf("missing key in map literal") continue } + l := l.(*ir.KeyExpr) r := l.Left() r = pushtype(r, t.Key()) @@ -2876,9 +2912,9 @@ func typecheckcomplit(n ir.Node) (res ir.Node) { } // No pushtype allowed here. Must name fields for that. n1 = assignconv(n1, f.Type, "field value") - n1 = nodSym(ir.OSTRUCTKEY, n1, f.Sym) - n1.SetOffset(f.Offset) - ls[i] = n1 + sk := nodSym(ir.OSTRUCTKEY, n1, f.Sym) + sk.SetOffset(f.Offset) + ls[i] = sk } if len(ls) < t.NumFields() { base.Errorf("too few values in %v", n) @@ -2892,7 +2928,8 @@ func typecheckcomplit(n ir.Node) (res ir.Node) { setlineno(l) if l.Op() == ir.OKEY { - key := l.Left() + kv := l.(*ir.KeyExpr) + key := kv.Left() // Sym might have resolved to name in other top-level // package, because of import dot. Redirect to correct sym @@ -2911,7 +2948,7 @@ func typecheckcomplit(n ir.Node) (res ir.Node) { continue } - l = ir.NewStructKeyExpr(l.Pos(), s, l.Right()) + l = ir.NewStructKeyExpr(l.Pos(), s, kv.Right()) ls[i] = l } @@ -2923,6 +2960,7 @@ func typecheckcomplit(n ir.Node) (res ir.Node) { ls[i] = typecheck(ls[i], ctxExpr) continue } + l := l.(*ir.StructKeyExpr) f := lookdot1(nil, l.Sym(), t, t.Fields(), 0) if f == nil { @@ -2983,8 +3021,9 @@ func typecheckarraylit(elemType *types.Type, bound int64, elts []ir.Node, ctx st for i, elt := range elts { setlineno(elt) r := elts[i] - var kv ir.Node + var kv *ir.KeyExpr if elt.Op() == ir.OKEY { + elt := elt.(*ir.KeyExpr) elt.SetLeft(typecheck(elt.Left(), ctxExpr)) key = indexconst(elt.Left()) if key < 0 { @@ -3104,9 +3143,9 @@ func checkassign(stmt ir.Node, n ir.Node) { } switch { - case n.Op() == ir.ODOT && n.Left().Op() == ir.OINDEXMAP: + case n.Op() == ir.ODOT && n.(*ir.SelectorExpr).Left().Op() == ir.OINDEXMAP: base.Errorf("cannot assign to struct field %v in map", n) - case (n.Op() == ir.OINDEX && n.Left().Type().IsString()) || n.Op() == ir.OSLICESTR: + case (n.Op() == ir.OINDEX && n.(*ir.IndexExpr).Left().Type().IsString()) || n.Op() == ir.OSLICESTR: base.Errorf("cannot assign to %v (strings are immutable)", n) case n.Op() == ir.OLITERAL && n.Sym() != nil && isGoConst(n): base.Errorf("cannot assign to %v (declared const)", n) @@ -3147,19 +3186,40 @@ func samesafeexpr(l ir.Node, r ir.Node) bool { return l == r case ir.ODOT, ir.ODOTPTR: + l := l.(*ir.SelectorExpr) + r := r.(*ir.SelectorExpr) return l.Sym() != nil && r.Sym() != nil && l.Sym() == r.Sym() && samesafeexpr(l.Left(), r.Left()) - case ir.ODEREF, ir.OCONVNOP, - ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG: + case ir.ODEREF: + l := l.(*ir.StarExpr) + r := r.(*ir.StarExpr) + return samesafeexpr(l.Left(), r.Left()) + + case ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG: + l := l.(*ir.UnaryExpr) + r := r.(*ir.UnaryExpr) + return samesafeexpr(l.Left(), r.Left()) + + case ir.OCONVNOP: + l := l.(*ir.ConvExpr) + r := r.(*ir.ConvExpr) return samesafeexpr(l.Left(), r.Left()) case ir.OCONV: + l := l.(*ir.ConvExpr) + r := r.(*ir.ConvExpr) // Some conversions can't be reused, such as []byte(str). // Allow only numeric-ish types. This is a bit conservative. return issimple[l.Type().Kind()] && samesafeexpr(l.Left(), r.Left()) - case ir.OINDEX, ir.OINDEXMAP, - ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD: + case ir.OINDEX, ir.OINDEXMAP: + l := l.(*ir.IndexExpr) + r := r.(*ir.IndexExpr) + return samesafeexpr(l.Left(), r.Left()) && samesafeexpr(l.Right(), r.Right()) + + case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD: + l := l.(*ir.BinaryExpr) + r := r.(*ir.BinaryExpr) return samesafeexpr(l.Left(), r.Left()) && samesafeexpr(l.Right(), r.Right()) case ir.OLITERAL: @@ -3175,7 +3235,7 @@ func samesafeexpr(l ir.Node, r ir.Node) bool { // type check assignment. // if this assignment is the definition of a var on the left side, // fill in the var's type. -func typecheckas(n ir.Node) { +func typecheckas(n *ir.AssignStmt) { if enableTrace && base.Flag.LowerT { defer tracePrint("typecheckas", n)(nil) } @@ -3199,7 +3259,7 @@ func typecheckas(n ir.Node) { checkassign(n, n.Left()) if n.Right() != nil && n.Right().Type() != nil { if n.Right().Type().IsFuncArgStruct() { - base.Errorf("assignment mismatch: 1 variable but %v returns %d values", n.Right().Left(), n.Right().Type().NumFields()) + base.Errorf("assignment mismatch: 1 variable but %v returns %d values", n.Right().(*ir.CallExpr).Left(), n.Right().Type().NumFields()) // Multi-value RHS isn't actually valid for OAS; nil out // to indicate failed typechecking. n.Right().SetType(nil) @@ -3233,7 +3293,7 @@ func checkassignto(src *types.Type, dst ir.Node) { } } -func typecheckas2(n ir.Node) { +func typecheckas2(n *ir.AssignListStmt) { if enableTrace && base.Flag.LowerT { defer tracePrint("typecheckas2", n)(nil) } @@ -3400,7 +3460,7 @@ func typecheckfunc(n *ir.Func) { // The result of stringtoruneslit MUST be assigned back to n, e.g. // n.Left = stringtoruneslit(n.Left) -func stringtoruneslit(n ir.Node) ir.Node { +func stringtoruneslit(n *ir.ConvExpr) ir.Node { if n.Left().Op() != ir.OLITERAL || n.Left().Val().Kind() != constant.String { base.Fatalf("stringtoarraylit %v", n) } @@ -3683,19 +3743,25 @@ func markBreak(fn *ir.Func) { case ir.OBREAK: if n.Sym() == nil { - if implicit != nil { - implicit.SetHasBreak(true) - } + setHasBreak(implicit) } else { - if lab := labels[n.Sym()]; lab != nil { - lab.SetHasBreak(true) - } + setHasBreak(labels[n.Sym()]) } - case ir.OFOR, ir.OFORUNTIL, ir.OSWITCH, ir.OTYPESW, ir.OSELECT, ir.ORANGE: + case ir.OFOR, ir.OFORUNTIL, ir.OSWITCH, ir.OSELECT, ir.ORANGE: old := implicit implicit = n - sym := n.Sym() + var sym *types.Sym + switch n := n.(type) { + case *ir.ForStmt: + sym = n.Sym() + case *ir.RangeStmt: + sym = n.Sym() + case *ir.SelectStmt: + sym = n.Sym() + case *ir.SwitchStmt: + sym = n.Sym() + } if sym != nil { if labels == nil { // Map creation delayed until we need it - most functions don't. @@ -3715,6 +3781,39 @@ func markBreak(fn *ir.Func) { mark(fn) } +func controlLabel(n ir.Node) *types.Sym { + switch n := n.(type) { + default: + base.Fatalf("controlLabel %+v", n.Op()) + return nil + case *ir.ForStmt: + return n.Sym() + case *ir.RangeStmt: + return n.Sym() + case *ir.SelectStmt: + return n.Sym() + case *ir.SwitchStmt: + return n.Sym() + } +} + +func setHasBreak(n ir.Node) { + switch n := n.(type) { + default: + base.Fatalf("setHasBreak %+v", n.Op()) + case nil: + // ignore + case *ir.ForStmt: + n.SetHasBreak(true) + case *ir.RangeStmt: + n.SetHasBreak(true) + case *ir.SelectStmt: + n.SetHasBreak(true) + case *ir.SwitchStmt: + n.SetHasBreak(true) + } +} + // isTermNodes reports whether the Nodes list ends with a terminating statement. func isTermNodes(l ir.Nodes) bool { s := l.Slice() @@ -3752,23 +3851,32 @@ func isTermNode(n ir.Node) bool { case ir.OIF: return isTermNodes(n.Body()) && isTermNodes(n.Rlist()) - case ir.OSWITCH, ir.OTYPESW, ir.OSELECT: + case ir.OSWITCH: if n.HasBreak() { return false } def := false - for _, n1 := range n.List().Slice() { - if !isTermNodes(n1.Body()) { + for _, cas := range n.List().Slice() { + cas := cas.(*ir.CaseStmt) + if !isTermNodes(cas.Body()) { return false } - if n1.List().Len() == 0 { // default + if cas.List().Len() == 0 { // default def = true } } + return def - if n.Op() != ir.OSELECT && !def { + case ir.OSELECT: + if n.HasBreak() { return false } + for _, cas := range n.List().Slice() { + cas := cas.(*ir.CaseStmt) + if !isTermNodes(cas.Body()) { + return false + } + } return true } From bf9bbbd6ed1d58433019c145c10082f4d5c062c9 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 10 Dec 2020 18:45:35 -0500 Subject: [PATCH 173/474] [dev.regabi] cmd/compile: cleanup for concrete types - order An automated rewrite will add concrete type assertions after a test of n.Op(), when n can be safely type-asserted (meaning, n is not reassigned a different type, n is not reassigned and then used outside the scope of the type assertion, and so on). This sequence of CLs handles the code that the automated rewrite does not: adding specific types to function arguments, adjusting code not to call n.Left() etc when n may have multiple representations, and so on. This CL focuses on order.go. Passes buildall w/ toolstash -cmp. Change-Id: Ib5731905a620175a6fe978f512da593e0dae9d87 Reviewed-on: https://go-review.googlesource.com/c/go/+/277922 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/order.go | 369 +++++++++++++++------------ src/cmd/compile/internal/ir/stmt.go | 4 +- 2 files changed, 210 insertions(+), 163 deletions(-) diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index e0c0cabcde3ff..b0a9c9be3e133 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -139,7 +139,7 @@ func (o *Order) cheapExpr(n ir.Node) ir.Node { if l == n.Left() { return n } - a := ir.SepCopy(n) + a := ir.SepCopy(n).(*ir.UnaryExpr) a.SetLeft(l) return typecheck(a, ctxExpr) } @@ -159,21 +159,39 @@ func (o *Order) safeExpr(n ir.Node) ir.Node { case ir.ONAME, ir.OLITERAL, ir.ONIL: return n - case ir.ODOT, ir.OLEN, ir.OCAP: + case ir.OLEN, ir.OCAP: + l := o.safeExpr(n.Left()) + if l == n.Left() { + return n + } + a := ir.SepCopy(n).(*ir.UnaryExpr) + a.SetLeft(l) + return typecheck(a, ctxExpr) + + case ir.ODOT: l := o.safeExpr(n.Left()) if l == n.Left() { return n } - a := ir.SepCopy(n) + a := ir.SepCopy(n).(*ir.SelectorExpr) + a.SetLeft(l) + return typecheck(a, ctxExpr) + + case ir.ODOTPTR: + l := o.cheapExpr(n.Left()) + if l == n.Left() { + return n + } + a := ir.SepCopy(n).(*ir.SelectorExpr) a.SetLeft(l) return typecheck(a, ctxExpr) - case ir.ODOTPTR, ir.ODEREF: + case ir.ODEREF: l := o.cheapExpr(n.Left()) if l == n.Left() { return n } - a := ir.SepCopy(n) + a := ir.SepCopy(n).(*ir.StarExpr) a.SetLeft(l) return typecheck(a, ctxExpr) @@ -188,7 +206,7 @@ func (o *Order) safeExpr(n ir.Node) ir.Node { if l == n.Left() && r == n.Right() { return n } - a := ir.SepCopy(n) + a := ir.SepCopy(n).(*ir.IndexExpr) a.SetLeft(l) a.SetRight(r) return typecheck(a, ctxExpr) @@ -206,7 +224,7 @@ func (o *Order) safeExpr(n ir.Node) ir.Node { // because we emit explicit VARKILL instructions marking the end of those // temporaries' lifetimes. func isaddrokay(n ir.Node) bool { - return islvalue(n) && (n.Op() != ir.ONAME || n.Class() == ir.PEXTERN || ir.IsAutoTmp(n)) + return islvalue(n) && (n.Op() != ir.ONAME || n.(*ir.Name).Class() == ir.PEXTERN || ir.IsAutoTmp(n)) } // addrTemp ensures that n is okay to pass by address to runtime routines. @@ -225,7 +243,7 @@ func (o *Order) addrTemp(n ir.Node) ir.Node { if s.out != nil { base.Fatalf("staticassign of const generated code: %+v", n) } - vstat = typecheck(vstat, ctxExpr) + vstat = typecheck(vstat, ctxExpr).(*ir.Name) return vstat } if isaddrokay(n) { @@ -267,6 +285,7 @@ func mapKeyReplaceStrConv(n ir.Node) bool { replaced = true case ir.OSTRUCTLIT: for _, elem := range n.List().Slice() { + elem := elem.(*ir.StructKeyExpr) if mapKeyReplaceStrConv(elem.Left()) { replaced = true } @@ -274,7 +293,7 @@ func mapKeyReplaceStrConv(n ir.Node) bool { case ir.OARRAYLIT: for _, elem := range n.List().Slice() { if elem.Op() == ir.OKEY { - elem = elem.Right() + elem = elem.(*ir.KeyExpr).Right() } if mapKeyReplaceStrConv(elem) { replaced = true @@ -337,60 +356,31 @@ func orderMakeSliceCopy(s []ir.Node) { if base.Flag.N != 0 || instrumenting { return } - - if len(s) < 2 { + if len(s) < 2 || s[0] == nil || s[0].Op() != ir.OAS || s[1] == nil || s[1].Op() != ir.OCOPY { return } - asn := s[0] - copyn := s[1] - - if asn == nil || asn.Op() != ir.OAS { - return - } - if asn.Left().Op() != ir.ONAME { - return - } - if ir.IsBlank(asn.Left()) { - return - } - maken := asn.Right() - if maken == nil || maken.Op() != ir.OMAKESLICE { - return - } - if maken.Esc() == EscNone { - return - } - if maken.Left() == nil || maken.Right() != nil { - return - } - if copyn.Op() != ir.OCOPY { - return - } - if copyn.Left().Op() != ir.ONAME { - return - } - if asn.Left().Sym() != copyn.Left().Sym() { - return - } - if copyn.Right().Op() != ir.ONAME { + as := s[0].(*ir.AssignStmt) + cp := s[1].(*ir.BinaryExpr) + if as.Right() == nil || as.Right().Op() != ir.OMAKESLICE || ir.IsBlank(as.Left()) || + as.Left().Op() != ir.ONAME || cp.Left().Op() != ir.ONAME || cp.Right().Op() != ir.ONAME || + as.Left().Name() != cp.Left().Name() || cp.Left().Name() == cp.Right().Name() { + // The line above this one is correct with the differing equality operators: + // we want as.X and cp.X to be the same name, + // but we want the initial data to be coming from a different name. return } - if copyn.Left().Sym() == copyn.Right().Sym() { + mk := as.Right().(*ir.MakeExpr) + if mk.Esc() == EscNone || mk.Left() == nil || mk.Right() != nil { return } - - maken.SetOp(ir.OMAKESLICECOPY) - maken.SetRight(copyn.Right()) + mk.SetOp(ir.OMAKESLICECOPY) + mk.SetRight(cp.Right()) // Set bounded when m = OMAKESLICE([]T, len(s)); OCOPY(m, s) - maken.SetBounded(maken.Left().Op() == ir.OLEN && samesafeexpr(maken.Left().Left(), copyn.Right())) - - maken = typecheck(maken, ctxExpr) - + mk.SetBounded(mk.Left().Op() == ir.OLEN && samesafeexpr(mk.Left().(*ir.UnaryExpr).Left(), cp.Right())) + as.SetRight(typecheck(mk, ctxExpr)) s[1] = nil // remove separate copy call - - return } // edge inserts coverage instrumentation for libfuzzer. @@ -405,8 +395,7 @@ func (o *Order) edge() { counter.Name().SetLibfuzzerExtraCounter(true) // counter += 1 - incr := ir.Nod(ir.OASOP, counter, nodintconst(1)) - incr.SetSubOp(ir.OADD) + incr := ir.NewAssignOpStmt(base.Pos, ir.OADD, counter, nodintconst(1)) o.append(incr) } @@ -469,20 +458,34 @@ func (o *Order) init(n ir.Node) { // call orders the call expression n. // n.Op is OCALLMETH/OCALLFUNC/OCALLINTER or a builtin like OCOPY. -func (o *Order) call(n ir.Node) { - if n.Init().Len() > 0 { - // Caller should have already called o.init(n). - base.Fatalf("%v with unexpected ninit", n.Op()) +func (o *Order) call(nn ir.Node) { + if nn.Init().Len() > 0 { + // Caller should have already called o.init(nn). + base.Fatalf("%v with unexpected ninit", nn.Op()) } // Builtin functions. - if n.Op() != ir.OCALLFUNC && n.Op() != ir.OCALLMETH && n.Op() != ir.OCALLINTER { - n.SetLeft(o.expr(n.Left(), nil)) - n.SetRight(o.expr(n.Right(), nil)) - o.exprList(n.List()) + if nn.Op() != ir.OCALLFUNC && nn.Op() != ir.OCALLMETH && nn.Op() != ir.OCALLINTER { + switch n := nn.(type) { + default: + base.Fatalf("unexpected call: %+v", n) + case *ir.UnaryExpr: + n.SetLeft(o.expr(n.Left(), nil)) + case *ir.ConvExpr: + n.SetLeft(o.expr(n.Left(), nil)) + case *ir.BinaryExpr: + n.SetLeft(o.expr(n.Left(), nil)) + n.SetRight(o.expr(n.Right(), nil)) + case *ir.MakeExpr: + n.SetLeft(o.expr(n.Left(), nil)) + n.SetRight(o.expr(n.Right(), nil)) + case *ir.CallExpr: + o.exprList(n.List()) + } return } + n := nn.(*ir.CallExpr) fixVariadicCall(n) n.SetLeft(o.expr(n.Left(), nil)) o.exprList(n.List()) @@ -495,11 +498,13 @@ func (o *Order) call(n ir.Node) { // arrange for the pointer to be kept alive until the call returns, // by copying it into a temp and marking that temp // still alive when we pop the temp stack. - if arg.Op() == ir.OCONVNOP && arg.Left().Type().IsUnsafePtr() { - x := o.copyExpr(arg.Left()) - arg.SetLeft(x) - x.Name().SetAddrtaken(true) // ensure SSA keeps the x variable - n.PtrBody().Append(typecheck(ir.Nod(ir.OVARLIVE, x, nil), ctxStmt)) + if arg.Op() == ir.OCONVNOP { + if arg.Left().Type().IsUnsafePtr() { + x := o.copyExpr(arg.Left()) + arg.SetLeft(x) + x.Name().SetAddrtaken(true) // ensure SSA keeps the x variable + n.PtrBody().Append(typecheck(ir.Nod(ir.OVARLIVE, x, nil), ctxStmt)) + } } } @@ -537,18 +542,14 @@ func (o *Order) mapAssign(n ir.Node) { default: base.Fatalf("order.mapAssign %v", n.Op()) - case ir.OAS, ir.OASOP: + case ir.OAS: if n.Left().Op() == ir.OINDEXMAP { - // Make sure we evaluate the RHS before starting the map insert. - // We need to make sure the RHS won't panic. See issue 22881. - if n.Right().Op() == ir.OAPPEND { - s := n.Right().List().Slice()[1:] - for i, n := range s { - s[i] = o.cheapExpr(n) - } - } else { - n.SetRight(o.cheapExpr(n.Right())) - } + n.SetRight(o.safeMapRHS(n.Right())) + } + o.out = append(o.out, n) + case ir.OASOP: + if n.Left().Op() == ir.OINDEXMAP { + n.SetRight(o.safeMapRHS(n.Right())) } o.out = append(o.out, n) @@ -557,6 +558,7 @@ func (o *Order) mapAssign(n ir.Node) { for i, m := range n.List().Slice() { switch { case m.Op() == ir.OINDEXMAP: + m := m.(*ir.IndexExpr) if !ir.IsAutoTmp(m.Left()) { m.SetLeft(o.copyExpr(m.Left())) } @@ -577,6 +579,19 @@ func (o *Order) mapAssign(n ir.Node) { } } +func (o *Order) safeMapRHS(r ir.Node) ir.Node { + // Make sure we evaluate the RHS before starting the map insert. + // We need to make sure the RHS won't panic. See issue 22881. + if r.Op() == ir.OAPPEND { + s := r.List().Slice()[1:] + for i, n := range s { + s[i] = o.cheapExpr(n) + } + return r + } + return o.cheapExpr(r) +} + // stmt orders the statement n, appending to o.out. // Temporaries created during the statement are cleaned // up using VARKILL instructions as possible. @@ -616,12 +631,15 @@ func (o *Order) stmt(n ir.Node) { // makes sure there is nothing too deep being copied. l1 := o.safeExpr(n.Left()) l2 := ir.DeepCopy(src.NoXPos, l1) - if l1.Op() == ir.OINDEXMAP { + if l2.Op() == ir.OINDEXMAP { l2.SetIndexMapLValue(false) } l2 = o.copyExpr(l2) r := o.expr(typecheck(ir.NewBinaryExpr(n.Pos(), n.SubOp(), l2, n.Right()), ctxExpr), nil) - n = typecheck(ir.NodAt(n.Pos(), ir.OAS, l1, r), ctxStmt) + as := typecheck(ir.NodAt(n.Pos(), ir.OAS, l1, r), ctxStmt) + o.mapAssign(as) + o.cleanTemp(t) + return } o.mapAssign(n) @@ -636,6 +654,7 @@ func (o *Order) stmt(n ir.Node) { // Special: avoid copy of func call n.Right case ir.OAS2FUNC: + n := n.(*ir.AssignListStmt) t := o.markTemp() o.exprList(n.List()) o.init(n.Rlist().First()) @@ -650,11 +669,14 @@ func (o *Order) stmt(n ir.Node) { // OAS2MAPR: make sure key is addressable if needed, // and make sure OINDEXMAP is not copied out. case ir.OAS2DOTTYPE, ir.OAS2RECV, ir.OAS2MAPR: + n := n.(*ir.AssignListStmt) t := o.markTemp() o.exprList(n.List()) switch r := n.Rlist().First(); r.Op() { - case ir.ODOTTYPE2, ir.ORECV: + case ir.ODOTTYPE2: + r.SetLeft(o.expr(r.Left(), nil)) + case ir.ORECV: r.SetLeft(o.expr(r.Left(), nil)) case ir.OINDEXMAP: r.SetLeft(o.expr(r.Left(), nil)) @@ -692,17 +714,22 @@ func (o *Order) stmt(n ir.Node) { o.out = append(o.out, n) o.cleanTemp(t) - case ir.OCLOSE, - ir.OCOPY, - ir.OPRINT, - ir.OPRINTN, - ir.ORECOVER, - ir.ORECV: + case ir.OCLOSE, ir.ORECV: + t := o.markTemp() + n.SetLeft(o.expr(n.Left(), nil)) + o.out = append(o.out, n) + o.cleanTemp(t) + + case ir.OCOPY: t := o.markTemp() n.SetLeft(o.expr(n.Left(), nil)) n.SetRight(o.expr(n.Right(), nil)) + o.out = append(o.out, n) + o.cleanTemp(t) + + case ir.OPRINT, ir.OPRINTN, ir.ORECOVER: + t := o.markTemp() o.exprList(n.List()) - o.exprList(n.Rlist()) o.out = append(o.out, n) o.cleanTemp(t) @@ -770,8 +797,9 @@ func (o *Order) stmt(n ir.Node) { // Mark []byte(str) range expression to reuse string backing storage. // It is safe because the storage cannot be mutated. + n := n.(*ir.RangeStmt) if n.Right().Op() == ir.OSTR2BYTES { - n.Right().SetOp(ir.OSTR2BYTESTMP) + n.Right().(*ir.ConvExpr).SetOp(ir.OSTR2BYTESTMP) } t := o.markTemp() @@ -845,16 +873,14 @@ func (o *Order) stmt(n ir.Node) { case ir.OSELECT: t := o.markTemp() - for _, n2 := range n.List().Slice() { - if n2.Op() != ir.OCASE { - base.Fatalf("order select case %v", n2.Op()) - } - r := n2.Left() - setlineno(n2) + for _, cas := range n.List().Slice() { + cas := cas.(*ir.CaseStmt) + r := cas.Left() + setlineno(cas) // Append any new body prologue to ninit. // The next loop will insert ninit into nbody. - if n2.Init().Len() != 0 { + if cas.Init().Len() != 0 { base.Fatalf("order select ninit") } if r == nil { @@ -866,26 +892,29 @@ func (o *Order) stmt(n ir.Node) { base.Fatalf("unknown op in select %v", r.Op()) case ir.OSELRECV, ir.OSELRECV2: - var dst, ok, recv ir.Node + var dst, ok ir.Node + var recv *ir.UnaryExpr + var def bool if r.Op() == ir.OSELRECV { // case x = <-c // case <-c (dst is ir.BlankNode) - dst, ok, recv = r.Left(), ir.BlankNode, r.Right() + def, dst, ok, recv = r.Colas(), r.Left(), ir.BlankNode, r.Right().(*ir.UnaryExpr) } else { + r := r.(*ir.AssignListStmt) // case x, ok = <-c - dst, ok, recv = r.List().First(), r.List().Second(), r.Rlist().First() + def, dst, ok, recv = r.Colas(), r.List().First(), r.List().Second(), r.Rlist().First().(*ir.UnaryExpr) } // If this is case x := <-ch or case x, y := <-ch, the case has // the ODCL nodes to declare x and y. We want to delay that // declaration (and possible allocation) until inside the case body. // Delete the ODCL nodes here and recreate them inside the body below. - if r.Colas() { + if def { init := r.Init().Slice() - if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].Left() == dst { + if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].(*ir.Decl).Left() == dst { init = init[1:] } - if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].Left() == ok { + if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].(*ir.Decl).Left() == ok { init = init[1:] } r.PtrInit().Set(init) @@ -910,35 +939,36 @@ func (o *Order) stmt(n ir.Node) { // use channel element type for temporary to avoid conversions, // such as in case interfacevalue = <-intchan. // the conversion happens in the OAS instead. - if r.Colas() { + if def { dcl := ir.Nod(ir.ODCL, dst, nil) - n2.PtrInit().Append(typecheck(dcl, ctxStmt)) + cas.PtrInit().Append(typecheck(dcl, ctxStmt)) } tmp := o.newTemp(recv.Left().Type().Elem(), recv.Left().Type().Elem().HasPointers()) as := ir.Nod(ir.OAS, dst, tmp) - n2.PtrInit().Append(typecheck(as, ctxStmt)) + cas.PtrInit().Append(typecheck(as, ctxStmt)) dst = tmp } if !ir.IsBlank(ok) { - if r.Colas() { + if def { dcl := ir.Nod(ir.ODCL, ok, nil) - n2.PtrInit().Append(typecheck(dcl, ctxStmt)) + cas.PtrInit().Append(typecheck(dcl, ctxStmt)) } tmp := o.newTemp(types.Types[types.TBOOL], false) as := ir.Nod(ir.OAS, ok, conv(tmp, ok.Type())) - n2.PtrInit().Append(typecheck(as, ctxStmt)) + cas.PtrInit().Append(typecheck(as, ctxStmt)) ok = tmp } if r.Op() == ir.OSELRECV { r.SetLeft(dst) } else { + r := r.(*ir.AssignListStmt) r.List().SetIndex(0, dst) r.List().SetIndex(1, ok) } - orderBlock(n2.PtrInit(), o.free) + orderBlock(cas.PtrInit(), o.free) case ir.OSEND: if r.Init().Len() != 0 { @@ -962,14 +992,15 @@ func (o *Order) stmt(n ir.Node) { // Now that we have accumulated all the temporaries, clean them. // Also insert any ninit queued during the previous loop. // (The temporary cleaning must follow that ninit work.) - for _, n3 := range n.List().Slice() { - orderBlock(n3.PtrBody(), o.free) - n3.PtrBody().Prepend(o.cleanTempNoPop(t)...) + for _, cas := range n.List().Slice() { + cas := cas.(*ir.CaseStmt) + orderBlock(cas.PtrBody(), o.free) + cas.PtrBody().Prepend(o.cleanTempNoPop(t)...) // TODO(mdempsky): Is this actually necessary? // walkselect appears to walk Ninit. - n3.PtrBody().Prepend(n3.Init().Slice()...) - n3.PtrInit().Set(nil) + cas.PtrBody().Prepend(cas.Init().Slice()...) + cas.PtrInit().Set(nil) } o.out = append(o.out, n) @@ -998,6 +1029,7 @@ func (o *Order) stmt(n ir.Node) { // For now just clean all the temporaries at the end. // In practice that's fine. case ir.OSWITCH: + n := n.(*ir.SwitchStmt) if base.Debug.Libfuzzer != 0 && !hasDefaultCase(n) { // Add empty "default:" case for instrumentation. n.PtrList().Append(ir.Nod(ir.OCASE, nil, nil)) @@ -1006,9 +1038,7 @@ func (o *Order) stmt(n ir.Node) { t := o.markTemp() n.SetLeft(o.expr(n.Left(), nil)) for _, ncas := range n.List().Slice() { - if ncas.Op() != ir.OCASE { - base.Fatalf("order switch case %v", ncas.Op()) - } + ncas := ncas.(*ir.CaseStmt) o.exprListInPlace(ncas.List()) orderBlock(ncas.PtrBody(), o.free) } @@ -1020,11 +1050,9 @@ func (o *Order) stmt(n ir.Node) { base.Pos = lno } -func hasDefaultCase(n ir.Node) bool { +func hasDefaultCase(n *ir.SwitchStmt) bool { for _, ncas := range n.List().Slice() { - if ncas.Op() != ir.OCASE { - base.Fatalf("expected case, found %v", ncas.Op()) - } + ncas := ncas.(*ir.CaseStmt) if ncas.List().Len() == 0 { return true } @@ -1067,8 +1095,13 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node { if n == nil { return n } - lno := setlineno(n) + n = o.expr1(n, lhs) + base.Pos = lno + return n +} + +func (o *Order) expr1(n, lhs ir.Node) ir.Node { o.init(n) switch n.Op() { @@ -1077,6 +1110,7 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node { o.edit = o.exprNoLHS // create closure once } ir.EditChildren(n, o.edit) + return n // Addition of strings turns into a function call. // Allocate a temporary to hold the strings. @@ -1111,6 +1145,7 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node { } } } + return n case ir.OINDEXMAP: n.SetLeft(o.expr(n.Left(), nil)) @@ -1133,15 +1168,16 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node { // key must be addressable n.SetRight(o.mapKeyTemp(n.Left().Type(), n.Right())) if needCopy { - n = o.copyExpr(n) + return o.copyExpr(n) } + return n // concrete type (not interface) argument might need an addressable // temporary to pass to the runtime conversion routine. case ir.OCONVIFACE: n.SetLeft(o.expr(n.Left(), nil)) if n.Left().Type().IsInterface() { - break + return n } if _, needsaddr := convFuncName(n.Left().Type(), n.Type()); needsaddr || isStaticCompositeLiteral(n.Left()) { // Need a temp if we need to pass the address to the conversion function. @@ -1149,20 +1185,23 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node { // whose address we can put directly in an interface (see OCONVIFACE case in walk). n.SetLeft(o.addrTemp(n.Left())) } + return n case ir.OCONVNOP: if n.Type().IsKind(types.TUNSAFEPTR) && n.Left().Type().IsKind(types.TUINTPTR) && (n.Left().Op() == ir.OCALLFUNC || n.Left().Op() == ir.OCALLINTER || n.Left().Op() == ir.OCALLMETH) { + call := n.Left().(*ir.CallExpr) // When reordering unsafe.Pointer(f()) into a separate // statement, the conversion and function call must stay // together. See golang.org/issue/15329. - o.init(n.Left()) - o.call(n.Left()) + o.init(call) + o.call(call) if lhs == nil || lhs.Op() != ir.ONAME || instrumenting { - n = o.copyExpr(n) + return o.copyExpr(n) } } else { n.SetLeft(o.expr(n.Left(), nil)) } + return n case ir.OANDAND, ir.OOROR: // ... = LHS && RHS @@ -1199,7 +1238,7 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node { nif.PtrRlist().Set(gen) } o.out = append(o.out, nif) - n = r + return r case ir.OCALLFUNC, ir.OCALLINTER, @@ -1222,27 +1261,31 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node { if isRuneCount(n) { // len([]rune(s)) is rewritten to runtime.countrunes(s) later. - n.Left().SetLeft(o.expr(n.Left().Left(), nil)) + conv := n.(*ir.UnaryExpr).Left().(*ir.ConvExpr) + conv.SetLeft(o.expr(conv.Left(), nil)) } else { o.call(n) } if lhs == nil || lhs.Op() != ir.ONAME || instrumenting { - n = o.copyExpr(n) + return o.copyExpr(n) } + return n case ir.OAPPEND: // Check for append(x, make([]T, y)...) . if isAppendOfMake(n) { - n.List().SetFirst(o.expr(n.List().First(), nil)) // order x - n.List().Second().SetLeft(o.expr(n.List().Second().Left(), nil)) // order y + n.List().SetFirst(o.expr(n.List().First(), nil)) // order x + mk := n.List().Second().(*ir.MakeExpr) + mk.SetLeft(o.expr(mk.Left(), nil)) // order y } else { o.exprList(n.List()) } if lhs == nil || lhs.Op() != ir.ONAME && !samesafeexpr(lhs, n.List().First()) { - n = o.copyExpr(n) + return o.copyExpr(n) } + return n case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR: n.SetLeft(o.expr(n.Left(), nil)) @@ -1255,39 +1298,44 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node { max = o.cheapExpr(max) n.SetSliceBounds(low, high, max) if lhs == nil || lhs.Op() != ir.ONAME && !samesafeexpr(lhs, n.Left()) { - n = o.copyExpr(n) + return o.copyExpr(n) } + return n case ir.OCLOSURE: + n := n.(*ir.ClosureExpr) if n.Transient() && len(n.Func().ClosureVars) > 0 { prealloc[n] = o.newTemp(closureType(n), false) } + return n - case ir.OSLICELIT, ir.OCALLPART: + case ir.OCALLPART: + n := n.(*ir.CallPartExpr) n.SetLeft(o.expr(n.Left(), nil)) - n.SetRight(o.expr(n.Right(), nil)) + if n.Transient() { + t := partialCallType(n) + prealloc[n] = o.newTemp(t, false) + } + return n + + case ir.OSLICELIT: o.exprList(n.List()) - o.exprList(n.Rlist()) if n.Transient() { - var t *types.Type - switch n.Op() { - case ir.OSLICELIT: - t = types.NewArray(n.Type().Elem(), ir.Int64Val(n.Right())) - case ir.OCALLPART: - t = partialCallType(n) - } + t := types.NewArray(n.Type().Elem(), ir.Int64Val(n.Right())) prealloc[n] = o.newTemp(t, false) } + return n case ir.ODOTTYPE, ir.ODOTTYPE2: n.SetLeft(o.expr(n.Left(), nil)) if !isdirectiface(n.Type()) || instrumenting { - n = o.copyExprClear(n) + return o.copyExprClear(n) } + return n case ir.ORECV: n.SetLeft(o.expr(n.Left(), nil)) - n = o.copyExprClear(n) + return o.copyExprClear(n) case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE: n.SetLeft(o.expr(n.Left(), nil)) @@ -1300,10 +1348,10 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node { // buffer during conversion. String comparison does not // memorize the strings for later use, so it is safe. if n.Left().Op() == ir.OBYTES2STR { - n.Left().SetOp(ir.OBYTES2STRTMP) + n.Left().(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP) } if n.Right().Op() == ir.OBYTES2STR { - n.Right().SetOp(ir.OBYTES2STRTMP) + n.Right().(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP) } case t.IsStruct() || t.IsArray(): @@ -1312,6 +1360,8 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node { n.SetLeft(o.addrTemp(n.Left())) n.SetRight(o.addrTemp(n.Right())) } + return n + case ir.OMAPLIT: // Order map by converting: // map[int]int{ @@ -1330,11 +1380,9 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node { // See issue 26552. entries := n.List().Slice() statics := entries[:0] - var dynamics []ir.Node + var dynamics []*ir.KeyExpr for _, r := range entries { - if r.Op() != ir.OKEY { - base.Fatalf("OMAPLIT entry not OKEY: %v\n", r) - } + r := r.(*ir.KeyExpr) if !isStaticCompositeLiteral(r.Left()) || !isStaticCompositeLiteral(r.Right()) { dynamics = append(dynamics, r) @@ -1343,7 +1391,7 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node { // Recursively ordering some static entries can change them to dynamic; // e.g., OCONVIFACE nodes. See #31777. - r = o.expr(r, nil) + r = o.expr(r, nil).(*ir.KeyExpr) if !isStaticCompositeLiteral(r.Left()) || !isStaticCompositeLiteral(r.Right()) { dynamics = append(dynamics, r) continue @@ -1354,7 +1402,7 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node { n.PtrList().Set(statics) if len(dynamics) == 0 { - break + return n } // Emit the creation of the map (with all its static entries). @@ -1362,18 +1410,17 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node { as := ir.Nod(ir.OAS, m, n) typecheck(as, ctxStmt) o.stmt(as) - n = m // Emit eval+insert of dynamic entries, one at a time. for _, r := range dynamics { - as := ir.Nod(ir.OAS, ir.Nod(ir.OINDEX, n, r.Left()), r.Right()) + as := ir.Nod(ir.OAS, ir.Nod(ir.OINDEX, m, r.Left()), r.Right()) typecheck(as, ctxStmt) // Note: this converts the OINDEX to an OINDEXMAP o.stmt(as) } + return m } - base.Pos = lno - return n + // No return - type-assertions above. Each case must return for itself. } // as2 orders OAS2XXXX nodes. It creates temporaries to ensure left-to-right assignment. @@ -1384,7 +1431,7 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node { // tmp1, tmp2, tmp3 = ... // a, b, a = tmp1, tmp2, tmp3 // This is necessary to ensure left to right assignment order. -func (o *Order) as2(n ir.Node) { +func (o *Order) as2(n *ir.AssignListStmt) { tmplist := []ir.Node{} left := []ir.Node{} for ni, l := range n.List().Slice() { @@ -1406,7 +1453,7 @@ func (o *Order) as2(n ir.Node) { // okAs2 orders OAS2XXX with ok. // Just like as2, this also adds temporaries to ensure left-to-right assignment. -func (o *Order) okAs2(n ir.Node) { +func (o *Order) okAs2(n *ir.AssignListStmt) { var tmp1, tmp2 ir.Node if !ir.IsBlank(n.List().First()) { typ := n.Rlist().First().Type() diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index b7d0c1adc4503..0302ffcc949ec 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -137,8 +137,8 @@ type AssignOpStmt struct { IncDec bool // actually ++ or -- } -func NewAssignOpStmt(pos src.XPos, op Op, x, y Node) *AssignOpStmt { - n := &AssignOpStmt{AsOp: op, X: x, Y: y} +func NewAssignOpStmt(pos src.XPos, asOp Op, x, y Node) *AssignOpStmt { + n := &AssignOpStmt{AsOp: asOp, X: x, Y: y} n.pos = pos n.op = OASOP return n From 846740c17fe3f65fe4c004e07a7550cba7c028fb Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 10 Dec 2020 18:45:58 -0500 Subject: [PATCH 174/474] [dev.regabi] cmd/compile: cleanup for concrete types - ssa An automated rewrite will add concrete type assertions after a test of n.Op(), when n can be safely type-asserted (meaning, n is not reassigned a different type, n is not reassigned and then used outside the scope of the type assertion, and so on). This sequence of CLs handles the code that the automated rewrite does not: adding specific types to function arguments, adjusting code not to call n.Left() etc when n may have multiple representations, and so on. This CL focuses on ssa.go. Passes buildall w/ toolstash -cmp. Change-Id: Iefacc7104dd9469e3c574149791ab0bff29f7fee Reviewed-on: https://go-review.googlesource.com/c/go/+/277923 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/inl.go | 4 +- src/cmd/compile/internal/gc/phi.go | 8 +- src/cmd/compile/internal/gc/plive.go | 10 +- src/cmd/compile/internal/gc/ssa.go | 374 +++++++++++++++------------ src/cmd/compile/internal/gc/walk.go | 2 +- 5 files changed, 229 insertions(+), 169 deletions(-) diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 9342046dcce7b..3a19efd325241 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -335,7 +335,7 @@ func (v *hairyVisitor) doNode(n ir.Node) error { } } - if isIntrinsicCall(n) { + if isIntrinsicCall(n.(*ir.CallExpr)) { // Treat like any other node. break } @@ -583,7 +583,7 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No if base.Flag.LowerM > 3 { fmt.Printf("%v:call to func %+v\n", ir.Line(n), n.Left()) } - if isIntrinsicCall(n) { + if isIntrinsicCall(n.(*ir.CallExpr)) { break } if fn := inlCallee(n.Left()); fn != nil && fn.Inl != nil { diff --git a/src/cmd/compile/internal/gc/phi.go b/src/cmd/compile/internal/gc/phi.go index 32c330b584a76..75ce18ff841e0 100644 --- a/src/cmd/compile/internal/gc/phi.go +++ b/src/cmd/compile/internal/gc/phi.go @@ -254,7 +254,9 @@ func (s *phiState) insertVarPhis(n int, var_ ir.Node, defs []*ssa.Block, typ *ty hasPhi.add(c.ID) v := c.NewValue0I(currentRoot.Pos, ssa.OpPhi, typ, int64(n)) // TODO: line number right? // Note: we store the variable number in the phi's AuxInt field. Used temporarily by phi building. - s.s.addNamedValue(var_, v) + if var_.Op() == ir.ONAME { + s.s.addNamedValue(var_.(*ir.Name), v) + } for range c.Preds { v.AddArg(s.placeholder) // Actual args will be filled in by resolveFwdRefs. } @@ -546,7 +548,9 @@ func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t *types.Type, var_ ir. // Generate a FwdRef for the variable and return that. v := b.NewValue0A(line, ssa.OpFwdRef, t, FwdRefAux{N: var_}) s.defvars[b.ID][var_] = v - s.s.addNamedValue(var_, v) + if var_.Op() == ir.ONAME { + s.s.addNamedValue(var_.(*ir.Name), v) + } s.fwdrefs = append(s.fwdrefs, v) return v } diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go index 9952bfcf36677..6deb3ecc7a94b 100644 --- a/src/cmd/compile/internal/gc/plive.go +++ b/src/cmd/compile/internal/gc/plive.go @@ -206,8 +206,12 @@ type progeffectscache struct { // nor do we care about non-local variables, // nor do we care about empty structs (handled by the pointer check), // nor do we care about the fake PAUTOHEAP variables. -func livenessShouldTrack(n ir.Node) bool { - return n.Op() == ir.ONAME && (n.Class() == ir.PAUTO || n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Type().HasPointers() +func livenessShouldTrack(nn ir.Node) bool { + if nn.Op() != ir.ONAME { + return false + } + n := nn.(*ir.Name) + return (n.Class() == ir.PAUTO || n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Type().HasPointers() } // getvariables returns the list of on-stack variables that we need to track @@ -1165,7 +1169,7 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) { // Size args bitmaps to be just large enough to hold the largest pointer. // First, find the largest Xoffset node we care about. // (Nodes without pointers aren't in lv.vars; see livenessShouldTrack.) - var maxArgNode ir.Node + var maxArgNode *ir.Name for _, n := range lv.vars { switch n.Class() { case ir.PPARAM, ir.PPARAMOUT: diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index f13c45c2a618b..4d9073a4b6bde 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -187,7 +187,7 @@ func initssaconfig() { // function/method/interface call), where the receiver of a method call is // considered as the 0th parameter. This does not include the receiver of an // interface call. -func getParam(n ir.Node, i int) *types.Field { +func getParam(n *ir.CallExpr, i int) *types.Field { t := n.Left().Type() if n.Op() == ir.OCALLMETH { if i == 0 { @@ -559,20 +559,20 @@ func (s *state) updateUnsetPredPos(b *ssa.Block) { // Information about each open-coded defer. type openDeferInfo struct { - // The ODEFER node representing the function call of the defer - n ir.Node + // The node representing the call of the defer + n *ir.CallExpr // If defer call is closure call, the address of the argtmp where the // closure is stored. closure *ssa.Value // The node representing the argtmp where the closure is stored - used for // function, method, or interface call, to store a closure that panic // processing can use for this defer. - closureNode ir.Node + closureNode *ir.Name // If defer call is interface call, the address of the argtmp where the // receiver is stored rcvr *ssa.Value // The node representing the argtmp where the receiver is stored - rcvrNode ir.Node + rcvrNode *ir.Name // The addresses of the argtmps where the evaluated arguments of the defer // function call are stored. argVals []*ssa.Value @@ -622,7 +622,7 @@ type state struct { sb *ssa.Value // value representing address of where deferBits autotmp is stored deferBitsAddr *ssa.Value - deferBitsTemp ir.Node + deferBitsTemp *ir.Name // line number stack. The current line number is top of stack line []src.XPos @@ -1134,6 +1134,7 @@ func (s *state) stmt(n ir.Node) { // Expression statements case ir.OCALLFUNC: + n := n.(*ir.CallExpr) if isIntrinsicCall(n) { s.intrinsicCall(n) return @@ -1141,8 +1142,9 @@ func (s *state) stmt(n ir.Node) { fallthrough case ir.OCALLMETH, ir.OCALLINTER: + n := n.(*ir.CallExpr) s.callResult(n, callNormal) - if n.Op() == ir.OCALLFUNC && n.Left().Op() == ir.ONAME && n.Left().Class() == ir.PFUNC { + if n.Op() == ir.OCALLFUNC && n.Left().Op() == ir.ONAME && n.Left().(*ir.Name).Class() == ir.PFUNC { if fn := n.Left().Sym().Name; base.Flag.CompilingRuntime && fn == "throw" || n.Left().Sym().Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") { m := s.mem() @@ -1167,19 +1169,19 @@ func (s *state) stmt(n ir.Node) { base.WarnfAt(n.Pos(), "%s defer", defertype) } if s.hasOpenDefers { - s.openDeferRecord(n.Left()) + s.openDeferRecord(n.Left().(*ir.CallExpr)) } else { d := callDefer if n.Esc() == EscNever { d = callDeferStack } - s.callResult(n.Left(), d) + s.callResult(n.Left().(*ir.CallExpr), d) } case ir.OGO: - s.callResult(n.Left(), callGo) + s.callResult(n.Left().(*ir.CallExpr), callGo) case ir.OAS2DOTTYPE: - res, resok := s.dottype(n.Rlist().First(), true) + res, resok := s.dottype(n.Rlist().First().(*ir.TypeAssertExpr), true) deref := false if !canSSAType(n.Rlist().First().Type()) { if res.Op != ssa.OpLoad { @@ -1201,10 +1203,11 @@ func (s *state) stmt(n ir.Node) { case ir.OAS2FUNC: // We come here only when it is an intrinsic call returning two values. - if !isIntrinsicCall(n.Rlist().First()) { - s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Rlist().First()) + call := n.Rlist().First().(*ir.CallExpr) + if !isIntrinsicCall(call) { + s.Fatalf("non-intrinsic AS2FUNC not expanded %v", call) } - v := s.intrinsicCall(n.Rlist().First()) + v := s.intrinsicCall(call) v1 := s.newValue1(ssa.OpSelect0, n.List().First().Type(), v) v2 := s.newValue1(ssa.OpSelect1, n.List().Second().Type(), v) s.assign(n.List().First(), v1, false, 0) @@ -1212,7 +1215,7 @@ func (s *state) stmt(n ir.Node) { return case ir.ODCL: - if n.Left().Class() == ir.PAUTOHEAP { + if n.Left().(*ir.Name).Class() == ir.PAUTOHEAP { s.Fatalf("DCL %v", n) } @@ -1270,6 +1273,7 @@ func (s *state) stmt(n ir.Node) { } rhs = nil case ir.OAPPEND: + rhs := rhs.(*ir.CallExpr) // Check whether we're writing the result of an append back to the same slice. // If so, we handle it specially to avoid write barriers on the fast // (non-growth) path. @@ -1326,7 +1330,7 @@ func (s *state) stmt(n ir.Node) { } var skip skipMask - if rhs != nil && (rhs.Op() == ir.OSLICE || rhs.Op() == ir.OSLICE3 || rhs.Op() == ir.OSLICESTR) && samesafeexpr(rhs.Left(), n.Left()) { + if rhs != nil && (rhs.Op() == ir.OSLICE || rhs.Op() == ir.OSLICE3 || rhs.Op() == ir.OSLICESTR) && samesafeexpr(rhs.(*ir.SliceExpr).Left(), n.Left()) { // We're assigning a slicing operation back to its source. // Don't write back fields we aren't changing. See issue #14855. i, j, k := rhs.SliceBounds() @@ -1409,7 +1413,6 @@ func (s *state) stmt(n ir.Node) { b.Pos = s.lastPos.WithIsStmt() case ir.ORETJMP: - s.stmtList(n.List()) b := s.exit() b.Kind = ssa.BlockRetJmp // override BlockRet b.Aux = n.Sym().Linksym() @@ -1536,15 +1539,27 @@ func (s *state) stmt(n ir.Node) { prevBreak := s.breakTo s.breakTo = bEnd + var sym *types.Sym + var body ir.Nodes + if n.Op() == ir.OSWITCH { + n := n.(*ir.SwitchStmt) + sym = n.Sym() + body = n.Body() + } else { + n := n.(*ir.SelectStmt) + sym = n.Sym() + body = n.Body() + } + var lab *ssaLabel - if sym := n.Sym(); sym != nil { + if sym != nil { // labeled lab = s.label(sym) lab.breakTarget = bEnd } // generate body code - s.stmtList(n.Body()) + s.stmtList(body) s.breakTo = prevBreak if lab != nil { @@ -1576,15 +1591,16 @@ func (s *state) stmt(n ir.Node) { case ir.OVARLIVE: // Insert a varlive op to record that a variable is still live. - if !n.Left().Name().Addrtaken() { - s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left()) + v := n.Left().(*ir.Name) + if !v.Addrtaken() { + s.Fatalf("VARLIVE variable %v must have Addrtaken set", v) } - switch n.Left().Class() { + switch v.Class() { case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT: default: - s.Fatalf("VARLIVE variable %v must be Auto or Arg", n.Left()) + s.Fatalf("VARLIVE variable %v must be Auto or Arg", v) } - s.vars[memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, n.Left().(*ir.Name), s.mem()) + s.vars[memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, v, s.mem()) case ir.OCHECKNIL: p := s.expr(n.Left()) @@ -2395,6 +2411,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { return nil case ir.ODOTTYPE: + n := n.(*ir.TypeAssertExpr) res, _ := s.dottype(n, false) return res @@ -2651,6 +2668,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { return s.load(n.Type(), p) case ir.ODOT: + n := n.(*ir.SelectorExpr) if n.Left().Op() == ir.OSTRUCTLIT { // All literals with nonzero fields have already been // rewritten during walk. Any that remain are just T{} @@ -2726,6 +2744,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { } case ir.OLEN, ir.OCAP: + n := n.(*ir.UnaryExpr) switch { case n.Left().Type().IsSlice(): op := ssa.OpSliceLen @@ -2798,19 +2817,21 @@ func (s *state) expr(n ir.Node) *ssa.Value { return s.newValue2(ssa.OpStringMake, n.Type(), p, l) case ir.OCALLFUNC: + n := n.(*ir.CallExpr) if isIntrinsicCall(n) { return s.intrinsicCall(n) } fallthrough case ir.OCALLINTER, ir.OCALLMETH: + n := n.(*ir.CallExpr) return s.callResult(n, callNormal) case ir.OGETG: return s.newValue1(ssa.OpGetG, n.Type(), s.mem()) case ir.OAPPEND: - return s.append(n, false) + return s.append(n.(*ir.CallExpr), false) case ir.OSTRUCTLIT, ir.OARRAYLIT: // All literals with nonzero fields have already been @@ -2841,7 +2862,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { // If inplace is true, it writes the result of the OAPPEND expression n // back to the slice being appended to, and returns nil. // inplace MUST be set to false if the slice can be SSA'd. -func (s *state) append(n ir.Node, inplace bool) *ssa.Value { +func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value { // If inplace is false, process as expression "append(s, e1, e2, e3)": // // ptr, len, cap := s @@ -2923,9 +2944,12 @@ func (s *state) append(n ir.Node, inplace bool) *ssa.Value { r := s.rtcall(growslice, true, []*types.Type{pt, types.Types[types.TINT], types.Types[types.TINT]}, taddr, p, l, c, nl) if inplace { - if sn.Op() == ir.ONAME && sn.Class() != ir.PEXTERN { - // Tell liveness we're about to build a new slice - s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn.(*ir.Name), s.mem()) + if sn.Op() == ir.ONAME { + sn := sn.(*ir.Name) + if sn.Class() != ir.PEXTERN { + // Tell liveness we're about to build a new slice + s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem()) + } } capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, sliceCapOffset, addr) s.store(types.Types[types.TINT], capaddr, r[2]) @@ -3076,6 +3100,7 @@ func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask // For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c} // Grab information about the structure type. + left := left.(*ir.SelectorExpr) t := left.Left().Type() nf := t.NumFields() idx := fieldIdx(left) @@ -3100,7 +3125,7 @@ func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask // TODO: do we need to update named values here? return } - if left.Op() == ir.OINDEX && left.Left().Type().IsArray() { + if left.Op() == ir.OINDEX && left.(*ir.IndexExpr).Left().Type().IsArray() { s.pushLine(left.Pos()) defer s.popLine() // We're assigning to an element of an ssa-able array. @@ -3126,6 +3151,7 @@ func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask s.assign(left.Left(), v, false, 0) return } + left := left.(*ir.Name) // Update variable assignment. s.vars[left] = right s.addNamedValue(left, right) @@ -3134,7 +3160,7 @@ func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask // If this assignment clobbers an entire local variable, then emit // OpVarDef so liveness analysis knows the variable is redefined. - if base := clobberBase(left); base.Op() == ir.ONAME && base.Class() != ir.PEXTERN && skip == 0 { + if base := clobberBase(left); base.Op() == ir.ONAME && base.(*ir.Name).Class() != ir.PEXTERN && skip == 0 { s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base.(*ir.Name), s.mem(), !ir.IsAutoTmp(base)) } @@ -3309,7 +3335,7 @@ var intrinsics map[intrinsicKey]intrinsicBuilder // An intrinsicBuilder converts a call node n into an ssa value that // implements that call as an intrinsic. args is a list of arguments to the func. -type intrinsicBuilder func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value +type intrinsicBuilder func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value type intrinsicKey struct { arch *sys.Arch @@ -3374,7 +3400,7 @@ func init() { /******** runtime ********/ if !instrumenting { add("runtime", "slicebytetostringtmp", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { // Compiler frontend optimizations emit OBYTES2STRTMP nodes // for the backend instead of slicebytetostringtmp calls // when not instrumenting. @@ -3383,7 +3409,7 @@ func init() { all...) } addF("runtime/internal/math", "MulUintptr", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { if s.config.PtrSize == 4 { return s.newValue2(ssa.OpMul32uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1]) } @@ -3391,90 +3417,90 @@ func init() { }, sys.AMD64, sys.I386, sys.MIPS64) add("runtime", "KeepAlive", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0]) s.vars[memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem()) return nil }, all...) add("runtime", "getclosureptr", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue0(ssa.OpGetClosurePtr, s.f.Config.Types.Uintptr) }, all...) add("runtime", "getcallerpc", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr) }, all...) add("runtime", "getcallersp", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue0(ssa.OpGetCallerSP, s.f.Config.Types.Uintptr) }, all...) /******** runtime/internal/sys ********/ addF("runtime/internal/sys", "Ctz32", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0]) }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) addF("runtime/internal/sys", "Ctz64", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0]) }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) addF("runtime/internal/sys", "Bswap32", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpBswap32, types.Types[types.TUINT32], args[0]) }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X) addF("runtime/internal/sys", "Bswap64", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpBswap64, types.Types[types.TUINT64], args[0]) }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X) /******** runtime/internal/atomic ********/ addF("runtime/internal/atomic", "Load", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v) }, sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Load8", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[types.TUINT8], types.TypeMem), args[0], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT8], v) }, sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Load64", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v) }, sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "LoadAcq", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v) }, sys.PPC64, sys.S390X) addF("runtime/internal/atomic", "LoadAcq64", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v) }, sys.PPC64) addF("runtime/internal/atomic", "Loadp", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v) @@ -3482,62 +3508,62 @@ func init() { sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Store", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Store8", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Store64", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "StorepNoWB", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "StoreRel", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.PPC64, sys.S390X) addF("runtime/internal/atomic", "StoreRel64", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.PPC64) addF("runtime/internal/atomic", "Xchg", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v) }, sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Xchg64", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v) }, sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) - type atomicOpEmitter func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.Kind) + type atomicOpEmitter func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) makeAtomicGuardedIntrinsicARM64 := func(op0, op1 ssa.Op, typ, rtyp types.Kind, emit atomicOpEmitter) intrinsicBuilder { - return func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { // Target Atomic feature is identified by dynamic detection addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), arm64HasATOMICS, s.sb) v := s.load(types.Types[types.TBOOL], addr) @@ -3571,7 +3597,7 @@ func init() { } } - atomicXchgXaddEmitterARM64 := func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.Kind) { + atomicXchgXaddEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) { v := s.newValue3(op, types.NewTuple(types.Types[typ], types.TypeMem), args[0], args[1], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v) @@ -3584,14 +3610,14 @@ func init() { sys.ARM64) addF("runtime/internal/atomic", "Xadd", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v) }, sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Xadd64", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v) @@ -3606,28 +3632,28 @@ func init() { sys.ARM64) addF("runtime/internal/atomic", "Cas", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v) }, sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Cas64", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v) }, sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "CasRel", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v) }, sys.PPC64) - atomicCasEmitterARM64 := func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.Kind) { + atomicCasEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) { v := s.newValue4(op, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v) @@ -3641,31 +3667,31 @@ func init() { sys.ARM64) addF("runtime/internal/atomic", "And8", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X) addF("runtime/internal/atomic", "And", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X) addF("runtime/internal/atomic", "Or8", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X) addF("runtime/internal/atomic", "Or", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X) - atomicAndOrEmitterARM64 := func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.Kind) { + atomicAndOrEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) { s.vars[memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem()) } @@ -3714,52 +3740,52 @@ func init() { /******** math ********/ addF("math", "Sqrt", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpSqrt, types.Types[types.TFLOAT64], args[0]) }, sys.I386, sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm) addF("math", "Trunc", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpTrunc, types.Types[types.TFLOAT64], args[0]) }, sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm) addF("math", "Ceil", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpCeil, types.Types[types.TFLOAT64], args[0]) }, sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm) addF("math", "Floor", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpFloor, types.Types[types.TFLOAT64], args[0]) }, sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm) addF("math", "Round", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpRound, types.Types[types.TFLOAT64], args[0]) }, sys.ARM64, sys.PPC64, sys.S390X) addF("math", "RoundToEven", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpRoundToEven, types.Types[types.TFLOAT64], args[0]) }, sys.ARM64, sys.S390X, sys.Wasm) addF("math", "Abs", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpAbs, types.Types[types.TFLOAT64], args[0]) }, sys.ARM64, sys.ARM, sys.PPC64, sys.Wasm) addF("math", "Copysign", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue2(ssa.OpCopysign, types.Types[types.TFLOAT64], args[0], args[1]) }, sys.PPC64, sys.Wasm) addF("math", "FMA", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2]) }, sys.ARM64, sys.PPC64, sys.S390X) addF("math", "FMA", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { if !s.config.UseFMA { s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64] return s.variable(n, types.Types[types.TFLOAT64]) @@ -3791,7 +3817,7 @@ func init() { }, sys.AMD64) addF("math", "FMA", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { if !s.config.UseFMA { s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64] return s.variable(n, types.Types[types.TFLOAT64]) @@ -3824,8 +3850,8 @@ func init() { }, sys.ARM) - makeRoundAMD64 := func(op ssa.Op) func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { - return func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + makeRoundAMD64 := func(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], x86HasSSE41) b := s.endBlock() b.Kind = ssa.BlockIf @@ -3867,17 +3893,17 @@ func init() { /******** math/bits ********/ addF("math/bits", "TrailingZeros64", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0]) }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm) addF("math/bits", "TrailingZeros32", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0]) }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm) addF("math/bits", "TrailingZeros16", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0]) c := s.constInt32(types.Types[types.TUINT32], 1<<16) y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c) @@ -3885,12 +3911,12 @@ func init() { }, sys.MIPS) addF("math/bits", "TrailingZeros16", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpCtz16, types.Types[types.TINT], args[0]) }, sys.AMD64, sys.I386, sys.ARM, sys.ARM64, sys.Wasm) addF("math/bits", "TrailingZeros16", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0]) c := s.constInt64(types.Types[types.TUINT64], 1<<16) y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c) @@ -3898,7 +3924,7 @@ func init() { }, sys.S390X, sys.PPC64) addF("math/bits", "TrailingZeros8", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0]) c := s.constInt32(types.Types[types.TUINT32], 1<<8) y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c) @@ -3906,12 +3932,12 @@ func init() { }, sys.MIPS) addF("math/bits", "TrailingZeros8", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpCtz8, types.Types[types.TINT], args[0]) }, sys.AMD64, sys.ARM, sys.ARM64, sys.Wasm) addF("math/bits", "TrailingZeros8", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0]) c := s.constInt64(types.Types[types.TUINT64], 1<<8) y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c) @@ -3923,17 +3949,17 @@ func init() { // ReverseBytes inlines correctly, no need to intrinsify it. // ReverseBytes16 lowers to a rotate, no need for anything special here. addF("math/bits", "Len64", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0]) }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm) addF("math/bits", "Len32", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0]) }, sys.AMD64, sys.ARM64) addF("math/bits", "Len32", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { if s.config.PtrSize == 4 { return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0]) } @@ -3942,7 +3968,7 @@ func init() { }, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm) addF("math/bits", "Len16", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { if s.config.PtrSize == 4 { x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0]) return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x) @@ -3952,12 +3978,12 @@ func init() { }, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm) addF("math/bits", "Len16", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpBitLen16, types.Types[types.TINT], args[0]) }, sys.AMD64) addF("math/bits", "Len8", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { if s.config.PtrSize == 4 { x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0]) return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x) @@ -3967,12 +3993,12 @@ func init() { }, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm) addF("math/bits", "Len8", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpBitLen8, types.Types[types.TINT], args[0]) }, sys.AMD64) addF("math/bits", "Len", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { if s.config.PtrSize == 4 { return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0]) } @@ -3981,27 +4007,27 @@ func init() { sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm) // LeadingZeros is handled because it trivially calls Len. addF("math/bits", "Reverse64", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0]) }, sys.ARM64) addF("math/bits", "Reverse32", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0]) }, sys.ARM64) addF("math/bits", "Reverse16", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpBitRev16, types.Types[types.TINT], args[0]) }, sys.ARM64) addF("math/bits", "Reverse8", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpBitRev8, types.Types[types.TINT], args[0]) }, sys.ARM64) addF("math/bits", "Reverse", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { if s.config.PtrSize == 4 { return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0]) } @@ -4009,29 +4035,29 @@ func init() { }, sys.ARM64) addF("math/bits", "RotateLeft8", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue2(ssa.OpRotateLeft8, types.Types[types.TUINT8], args[0], args[1]) }, sys.AMD64) addF("math/bits", "RotateLeft16", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue2(ssa.OpRotateLeft16, types.Types[types.TUINT16], args[0], args[1]) }, sys.AMD64) addF("math/bits", "RotateLeft32", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue2(ssa.OpRotateLeft32, types.Types[types.TUINT32], args[0], args[1]) }, sys.AMD64, sys.ARM, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm) addF("math/bits", "RotateLeft64", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue2(ssa.OpRotateLeft64, types.Types[types.TUINT64], args[0], args[1]) }, sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm) alias("math/bits", "RotateLeft", "math/bits", "RotateLeft64", p8...) - makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { - return func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], x86HasPOPCNT) b := s.endBlock() b.Kind = ssa.BlockIf @@ -4066,7 +4092,7 @@ func init() { makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount64), sys.AMD64) addF("math/bits", "OnesCount64", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpPopCount64, types.Types[types.TINT], args[0]) }, sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm) @@ -4074,7 +4100,7 @@ func init() { makeOnesCountAMD64(ssa.OpPopCount32, ssa.OpPopCount32), sys.AMD64) addF("math/bits", "OnesCount32", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpPopCount32, types.Types[types.TINT], args[0]) }, sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm) @@ -4082,12 +4108,12 @@ func init() { makeOnesCountAMD64(ssa.OpPopCount16, ssa.OpPopCount16), sys.AMD64) addF("math/bits", "OnesCount16", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpPopCount16, types.Types[types.TINT], args[0]) }, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm) addF("math/bits", "OnesCount8", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpPopCount8, types.Types[types.TINT], args[0]) }, sys.S390X, sys.PPC64, sys.Wasm) @@ -4095,25 +4121,25 @@ func init() { makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount32), sys.AMD64) addF("math/bits", "Mul64", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1]) }, sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.MIPS64) alias("math/bits", "Mul", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE) addF("math/bits", "Add64", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2]) }, sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X) alias("math/bits", "Add", "math/bits", "Add64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchS390X) addF("math/bits", "Sub64", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2]) }, sys.AMD64, sys.ARM64, sys.S390X) alias("math/bits", "Sub", "math/bits", "Sub64", sys.ArchAMD64, sys.ArchARM64, sys.ArchS390X) addF("math/bits", "Div64", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { // check for divide-by-zero/overflow and panic with appropriate message cmpZero := s.newValue2(s.ssaOp(ir.ONE, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[2], s.zeroVal(types.Types[types.TUINT64])) s.check(cmpZero, panicdivide) @@ -4173,7 +4199,7 @@ func init() { /******** math/big ********/ add("math/big", "mulWW", - func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1]) }, sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64LE, sys.ArchPPC64, sys.ArchS390X) @@ -4211,7 +4237,7 @@ func findIntrinsic(sym *types.Sym) intrinsicBuilder { return intrinsics[intrinsicKey{thearch.LinkArch.Arch, pkg, fn}] } -func isIntrinsicCall(n ir.Node) bool { +func isIntrinsicCall(n *ir.CallExpr) bool { if n == nil { return false } @@ -4223,7 +4249,7 @@ func isIntrinsicCall(n ir.Node) bool { } // intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation. -func (s *state) intrinsicCall(n ir.Node) *ssa.Value { +func (s *state) intrinsicCall(n *ir.CallExpr) *ssa.Value { v := findIntrinsic(n.Left().Sym())(s, n, s.intrinsicArgs(n)) if ssa.IntrinsicsDebug > 0 { x := v @@ -4239,13 +4265,14 @@ func (s *state) intrinsicCall(n ir.Node) *ssa.Value { } // intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them. -func (s *state) intrinsicArgs(n ir.Node) []*ssa.Value { +func (s *state) intrinsicArgs(n *ir.CallExpr) []*ssa.Value { // Construct map of temps; see comments in s.call about the structure of n. temps := map[ir.Node]*ssa.Value{} for _, a := range n.List().Slice() { if a.Op() != ir.OAS { s.Fatalf("non-assignment as a temp function argument %v", a.Op()) } + a := a.(*ir.AssignStmt) l, r := a.Left(), a.Right() if l.Op() != ir.ONAME { s.Fatalf("non-ONAME temp function argument %v", a.Op()) @@ -4274,7 +4301,7 @@ func (s *state) intrinsicArgs(n ir.Node) []*ssa.Value { // call. We will also record funcdata information on where the args are stored // (as well as the deferBits variable), and this will enable us to run the proper // defer calls during panics. -func (s *state) openDeferRecord(n ir.Node) { +func (s *state) openDeferRecord(n *ir.CallExpr) { // Do any needed expression evaluation for the args (including the // receiver, if any). This may be evaluating something like 'autotmp_3 = // once.mutex'. Such a statement will create a mapping in s.vars[] from @@ -4296,13 +4323,14 @@ func (s *state) openDeferRecord(n ir.Node) { closureVal := s.expr(fn) closure := s.openDeferSave(nil, fn.Type(), closureVal) opendefer.closureNode = closure.Aux.(*ir.Name) - if !(fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC) { + if !(fn.Op() == ir.ONAME && fn.(*ir.Name).Class() == ir.PFUNC) { opendefer.closure = closure } } else if n.Op() == ir.OCALLMETH { if fn.Op() != ir.ODOTMETH { base.Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn) } + fn := fn.(*ir.SelectorExpr) closureVal := s.getMethodClosure(fn) // We must always store the function value in a stack slot for the // runtime panic code to use. But in the defer exit code, we will @@ -4313,6 +4341,7 @@ func (s *state) openDeferRecord(n ir.Node) { if fn.Op() != ir.ODOTINTER { base.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op()) } + fn := fn.(*ir.SelectorExpr) closure, rcvr := s.getClosureAndRcvr(fn) opendefer.closure = s.openDeferSave(nil, closure.Type, closure) // Important to get the receiver type correct, so it is recognized @@ -4517,11 +4546,11 @@ func (s *state) openDeferExit() { // use the first call of the last defer exit to compute liveness // for the deferreturn, so we want all stack slots to be live. if r.closureNode != nil { - s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode.(*ir.Name), s.mem(), false) + s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false) } if r.rcvrNode != nil { if r.rcvrNode.Type().HasPointers() { - s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.rcvrNode.(*ir.Name), s.mem(), false) + s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.rcvrNode, s.mem(), false) } } for _, argNode := range r.argNodes { @@ -4535,17 +4564,17 @@ func (s *state) openDeferExit() { } } -func (s *state) callResult(n ir.Node, k callKind) *ssa.Value { +func (s *state) callResult(n *ir.CallExpr, k callKind) *ssa.Value { return s.call(n, k, false) } -func (s *state) callAddr(n ir.Node, k callKind) *ssa.Value { +func (s *state) callAddr(n *ir.CallExpr, k callKind) *ssa.Value { return s.call(n, k, true) } // Calls the function n using the specified call type. // Returns the address of the return value (or nil if none). -func (s *state) call(n ir.Node, k callKind, returnResultAddr bool) *ssa.Value { +func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Value { s.prevCall = nil var sym *types.Sym // target symbol (if static) var closure *ssa.Value // ptr to closure to run (if dynamic) @@ -4569,7 +4598,7 @@ func (s *state) call(n ir.Node, k callKind, returnResultAddr bool) *ssa.Value { switch n.Op() { case ir.OCALLFUNC: testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f) - if k == callNormal && fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC { + if k == callNormal && fn.Op() == ir.ONAME && fn.(*ir.Name).Class() == ir.PFUNC { sym = fn.Sym() break } @@ -4583,6 +4612,7 @@ func (s *state) call(n ir.Node, k callKind, returnResultAddr bool) *ssa.Value { if fn.Op() != ir.ODOTMETH { s.Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn) } + fn := fn.(*ir.SelectorExpr) testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f) if k == callNormal { sym = fn.Sym() @@ -4595,6 +4625,7 @@ func (s *state) call(n ir.Node, k callKind, returnResultAddr bool) *ssa.Value { if fn.Op() != ir.ODOTINTER { s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op()) } + fn := fn.(*ir.SelectorExpr) testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f) var iclosure *ssa.Value iclosure, rcvr = s.getClosureAndRcvr(fn) @@ -4847,7 +4878,7 @@ func (s *state) maybeNilCheckClosure(closure *ssa.Value, k callKind) { } // getMethodClosure returns a value representing the closure for a method call -func (s *state) getMethodClosure(fn ir.Node) *ssa.Value { +func (s *state) getMethodClosure(fn *ir.SelectorExpr) *ssa.Value { // Make a name n2 for the function. // fn.Sym might be sync.(*Mutex).Unlock. // Make a PFUNC node out of that, then evaluate it. @@ -4864,7 +4895,7 @@ func (s *state) getMethodClosure(fn ir.Node) *ssa.Value { // getClosureAndRcvr returns values for the appropriate closure and receiver of an // interface call -func (s *state) getClosureAndRcvr(fn ir.Node) (*ssa.Value, *ssa.Value) { +func (s *state) getClosureAndRcvr(fn *ir.SelectorExpr) (*ssa.Value, *ssa.Value) { i := s.expr(fn.Left()) itab := s.newValue1(ssa.OpITab, types.Types[types.TUINTPTR], i) s.nilCheck(itab) @@ -4967,6 +4998,7 @@ func (s *state) addr(n ir.Node) *ssa.Value { p := s.exprPtr(n.Left(), n.Bounded(), n.Pos()) return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p) case ir.OCLOSUREREAD: + n := n.(*ir.ClosureReadExpr) return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr)) case ir.OCONVNOP: @@ -4976,8 +5008,10 @@ func (s *state) addr(n ir.Node) *ssa.Value { addr := s.addr(n.Left()) return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH: + n := n.(*ir.CallExpr) return s.callAddr(n, callNormal) case ir.ODOTTYPE: + n := n.(*ir.TypeAssertExpr) v, _ := s.dottype(n, false) if v.Op != ssa.OpLoad { s.Fatalf("dottype of non-load") @@ -4998,22 +5032,34 @@ func (s *state) canSSA(n ir.Node) bool { if base.Flag.N != 0 { return false } - for n.Op() == ir.ODOT || (n.Op() == ir.OINDEX && n.Left().Type().IsArray()) { - n = n.Left() + for { + nn := n + if nn.Op() == ir.ODOT { + n = nn.Left() + continue + } + if nn.Op() == ir.OINDEX { + if nn.Left().Type().IsArray() { + n = nn.Left() + continue + } + } + break } if n.Op() != ir.ONAME { return false } - if n.Name().Addrtaken() { + name := n.(*ir.Name) + if name.Addrtaken() { return false } - if isParamHeapCopy(n) { + if isParamHeapCopy(name) { return false } - if n.Class() == ir.PAUTOHEAP { - s.Fatalf("canSSA of PAUTOHEAP %v", n) + if name.Class() == ir.PAUTOHEAP { + s.Fatalf("canSSA of PAUTOHEAP %v", name) } - switch n.Class() { + switch name.Class() { case ir.PEXTERN: return false case ir.PPARAMOUT: @@ -5031,13 +5077,13 @@ func (s *state) canSSA(n ir.Node) bool { return false } } - if n.Class() == ir.PPARAM && n.Sym() != nil && n.Sym().Name == ".this" { + if name.Class() == ir.PPARAM && name.Sym() != nil && name.Sym().Name == ".this" { // wrappers generated by genwrapper need to update // the .this pointer in place. // TODO: treat as a PPARAMOUT? return false } - return canSSAType(n.Type()) + return canSSAType(name.Type()) // TODO: try to make more variables SSAable? } @@ -5736,7 +5782,7 @@ func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n ir.Node, x *ssa.Value, ft, } // referenceTypeBuiltin generates code for the len/cap builtins for maps and channels. -func (s *state) referenceTypeBuiltin(n ir.Node, x *ssa.Value) *ssa.Value { +func (s *state) referenceTypeBuiltin(n *ir.UnaryExpr, x *ssa.Value) *ssa.Value { if !n.Left().Type().IsMap() && !n.Left().Type().IsChan() { s.Fatalf("node must be a map or a channel") } @@ -5893,7 +5939,7 @@ func (s *state) floatToUint(cvttab *f2uCvtTab, n ir.Node, x *ssa.Value, ft, tt * // dottype generates SSA for a type assertion node. // commaok indicates whether to panic or return a bool. // If commaok is false, resok will be nil. -func (s *state) dottype(n ir.Node, commaok bool) (res, resok *ssa.Value) { +func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Value) { iface := s.expr(n.Left()) // input interface target := s.expr(n.Right()) // target type byteptr := s.f.Config.Types.BytePtr @@ -6029,7 +6075,7 @@ func (s *state) dottype(n ir.Node, commaok bool) (res, resok *ssa.Value) { if !commaok { // on failure, panic by calling panicdottype s.startBlock(bFail) - taddr := s.expr(n.Right().Right()) + taddr := s.expr(n.Right().(*ir.AddrExpr).Right()) if n.Left().Type().IsEmptyInterface() { s.rtcall(panicdottypeE, false, nil, itab, target, taddr) } else { @@ -6095,25 +6141,27 @@ func (s *state) dottype(n ir.Node, commaok bool) (res, resok *ssa.Value) { } // variable returns the value of a variable at the current location. -func (s *state) variable(name ir.Node, t *types.Type) *ssa.Value { - v := s.vars[name] +func (s *state) variable(n ir.Node, t *types.Type) *ssa.Value { + v := s.vars[n] if v != nil { return v } - v = s.fwdVars[name] + v = s.fwdVars[n] if v != nil { return v } if s.curBlock == s.f.Entry { // No variable should be live at entry. - s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, name, v) + s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, n, v) } // Make a FwdRef, which records a value that's live on block input. // We'll find the matching definition as part of insertPhis. - v = s.newValue0A(ssa.OpFwdRef, t, FwdRefAux{N: name}) - s.fwdVars[name] = v - s.addNamedValue(name, v) + v = s.newValue0A(ssa.OpFwdRef, t, FwdRefAux{N: n}) + s.fwdVars[n] = v + if n.Op() == ir.ONAME { + s.addNamedValue(n.(*ir.Name), v) + } return v } @@ -6121,7 +6169,7 @@ func (s *state) mem() *ssa.Value { return s.variable(memVar, types.TypeMem) } -func (s *state) addNamedValue(n ir.Node, v *ssa.Value) { +func (s *state) addNamedValue(n *ir.Name, v *ssa.Value) { if n.Class() == ir.Pxxx { // Don't track our marker nodes (memVar etc.). return @@ -6174,7 +6222,7 @@ type SSAGenState struct { bstart []*obj.Prog // Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include PPC and Sparc V8. - ScratchFpMem ir.Node + ScratchFpMem *ir.Name maxarg int64 // largest frame size for arguments to calls made by the function @@ -6877,9 +6925,9 @@ func CheckLoweredGetClosurePtr(v *ssa.Value) { } } -// AutoVar returns a *Node and int64 representing the auto variable and offset within it +// AutoVar returns a *Name and int64 representing the auto variable and offset within it // where v should be spilled. -func AutoVar(v *ssa.Value) (ir.Node, int64) { +func AutoVar(v *ssa.Value) (*ir.Name, int64) { loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot) if v.Type.Size() > loc.Type.Size() { v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type) @@ -6990,7 +7038,7 @@ func (s *SSAGenState) UseArgs(n int64) { } // fieldIdx finds the index of the field referred to by the ODOT node n. -func fieldIdx(n ir.Node) int { +func fieldIdx(n *ir.SelectorExpr) int { t := n.Left().Type() f := n.Sym() if !t.IsStruct() { @@ -7019,7 +7067,7 @@ func fieldIdx(n ir.Node) int { type ssafn struct { curfn *ir.Func strings map[string]*obj.LSym // map from constant string to data symbols - scratchFpMem ir.Node // temp for floating point register / memory moves on some architectures + scratchFpMem *ir.Name // temp for floating point register / memory moves on some architectures stksize int64 // stack size for current frame stkptrsize int64 // prefix of stack containing pointers log bool // print ssa debug to the stdout @@ -7211,11 +7259,15 @@ func (e *ssafn) MyImportPath() string { } func clobberBase(n ir.Node) ir.Node { - if n.Op() == ir.ODOT && n.Left().Type().NumFields() == 1 { - return clobberBase(n.Left()) + if n.Op() == ir.ODOT { + if n.Left().Type().NumFields() == 1 { + return clobberBase(n.Left()) + } } - if n.Op() == ir.OINDEX && n.Left().Type().IsArray() && n.Left().Type().NumElem() == 1 { - return clobberBase(n.Left()) + if n.Op() == ir.OINDEX { + if n.Left().Type().IsArray() && n.Left().Type().NumElem() == 1 { + return clobberBase(n.Left()) + } } return n } diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 041eb900c803a..cc0b3d847d1ad 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -743,7 +743,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { walkexprlistsafe(n.List().Slice(), init) r = walkexpr(r, init) - if isIntrinsicCall(r) { + if isIntrinsicCall(r.(*ir.CallExpr)) { n.PtrRlist().Set1(r) return n } From aa55d4e54bec7a3e3781c682f9948e9bf0c1df81 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 10 Dec 2020 18:46:13 -0500 Subject: [PATCH 175/474] [dev.regabi] cmd/compile: cleanup for concrete types - escape An automated rewrite will add concrete type assertions after a test of n.Op(), when n can be safely type-asserted (meaning, n is not reassigned a different type, n is not reassigned and then used outside the scope of the type assertion, and so on). This sequence of CLs handles the code that the automated rewrite does not: adding specific types to function arguments, adjusting code not to call n.Left() etc when n may have multiple representations, and so on. This CL focuses on escape.go. Passes buildall w/ toolstash -cmp. Change-Id: I3e76e1ef9b72f28e3adad9633929699635d852dd Reviewed-on: https://go-review.googlesource.com/c/go/+/277924 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/escape.go | 204 ++++++++++++++++++++++---- 1 file changed, 173 insertions(+), 31 deletions(-) diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index 5fce1184481a1..d009a55a96b0d 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -228,6 +228,7 @@ func (e *Escape) walkFunc(fn *ir.Func) { ir.Visit(fn, func(n ir.Node) { switch n.Op() { case ir.OLABEL: + n := n.(*ir.LabelStmt) if e.labels == nil { e.labels = make(map[*types.Sym]labelState) } @@ -236,6 +237,7 @@ func (e *Escape) walkFunc(fn *ir.Func) { case ir.OGOTO: // If we visited the label before the goto, // then this is a looping label. + n := n.(*ir.BranchStmt) if e.labels[n.Sym()] == nonlooping { e.labels[n.Sym()] = looping } @@ -305,15 +307,18 @@ func (e *Escape) stmt(n ir.Node) { // TODO(mdempsky): Handle dead code? case ir.OBLOCK: + n := n.(*ir.BlockStmt) e.stmts(n.List()) case ir.ODCL: // Record loop depth at declaration. + n := n.(*ir.Decl) if !ir.IsBlank(n.Left()) { e.dcl(n.Left()) } case ir.OLABEL: + n := n.(*ir.LabelStmt) switch e.labels[n.Sym()] { case nonlooping: if base.Flag.LowerM > 2 { @@ -330,11 +335,13 @@ func (e *Escape) stmt(n ir.Node) { delete(e.labels, n.Sym()) case ir.OIF: + n := n.(*ir.IfStmt) e.discard(n.Left()) e.block(n.Body()) e.block(n.Rlist()) case ir.OFOR, ir.OFORUNTIL: + n := n.(*ir.ForStmt) e.loopDepth++ e.discard(n.Left()) e.stmt(n.Right()) @@ -343,6 +350,7 @@ func (e *Escape) stmt(n ir.Node) { case ir.ORANGE: // for List = range Right { Nbody } + n := n.(*ir.RangeStmt) e.loopDepth++ ks := e.addrs(n.List()) e.block(n.Body()) @@ -360,11 +368,13 @@ func (e *Escape) stmt(n ir.Node) { e.expr(e.later(k), n.Right()) case ir.OSWITCH: + n := n.(*ir.SwitchStmt) typesw := n.Left() != nil && n.Left().Op() == ir.OTYPESW var ks []EscHole for _, cas := range n.List().Slice() { // cases - if typesw && n.Left().Left() != nil { + cas := cas.(*ir.CaseStmt) + if typesw && n.Left().(*ir.TypeSwitchGuard).Left() != nil { cv := cas.Rlist().First() k := e.dcl(cv) // type switch variables have no ODCL. if cv.Type().HasPointers() { @@ -377,50 +387,65 @@ func (e *Escape) stmt(n ir.Node) { } if typesw { - e.expr(e.teeHole(ks...), n.Left().Right()) + e.expr(e.teeHole(ks...), n.Left().(*ir.TypeSwitchGuard).Right()) } else { e.discard(n.Left()) } case ir.OSELECT: + n := n.(*ir.SelectStmt) for _, cas := range n.List().Slice() { + cas := cas.(*ir.CaseStmt) e.stmt(cas.Left()) e.block(cas.Body()) } case ir.OSELRECV: + n := n.(*ir.AssignStmt) e.assign(n.Left(), n.Right(), "selrecv", n) case ir.OSELRECV2: + n := n.(*ir.AssignListStmt) e.assign(n.List().First(), n.Rlist().First(), "selrecv", n) e.assign(n.List().Second(), nil, "selrecv", n) case ir.ORECV: // TODO(mdempsky): Consider e.discard(n.Left). + n := n.(*ir.UnaryExpr) e.exprSkipInit(e.discardHole(), n) // already visited n.Ninit case ir.OSEND: + n := n.(*ir.SendStmt) e.discard(n.Left()) e.assignHeap(n.Right(), "send", n) - case ir.OAS, ir.OASOP: + case ir.OAS: + n := n.(*ir.AssignStmt) + e.assign(n.Left(), n.Right(), "assign", n) + case ir.OASOP: + n := n.(*ir.AssignOpStmt) e.assign(n.Left(), n.Right(), "assign", n) - case ir.OAS2: + n := n.(*ir.AssignListStmt) for i, nl := range n.List().Slice() { e.assign(nl, n.Rlist().Index(i), "assign-pair", n) } case ir.OAS2DOTTYPE: // v, ok = x.(type) + n := n.(*ir.AssignListStmt) e.assign(n.List().First(), n.Rlist().First(), "assign-pair-dot-type", n) e.assign(n.List().Second(), nil, "assign-pair-dot-type", n) case ir.OAS2MAPR: // v, ok = m[k] + n := n.(*ir.AssignListStmt) e.assign(n.List().First(), n.Rlist().First(), "assign-pair-mapr", n) e.assign(n.List().Second(), nil, "assign-pair-mapr", n) case ir.OAS2RECV: // v, ok = <-ch + n := n.(*ir.AssignListStmt) e.assign(n.List().First(), n.Rlist().First(), "assign-pair-receive", n) e.assign(n.List().Second(), nil, "assign-pair-receive", n) case ir.OAS2FUNC: + n := n.(*ir.AssignListStmt) e.stmts(n.Rlist().First().Init()) e.call(e.addrs(n.List()), n.Rlist().First(), nil) case ir.ORETURN: + n := n.(*ir.ReturnStmt) results := e.curfn.Type().Results().FieldSlice() for i, v := range n.List().Slice() { e.assign(ir.AsNode(results[i].Nname), v, "return", n) @@ -428,6 +453,7 @@ func (e *Escape) stmt(n ir.Node) { case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OCLOSE, ir.OCOPY, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN, ir.ORECOVER: e.call(nil, n, nil) case ir.OGO, ir.ODEFER: + n := n.(*ir.GoDeferStmt) e.stmts(n.Left().Init()) e.call(nil, n.Left(), n) @@ -472,7 +498,7 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { uintptrEscapesHack := k.uintptrEscapesHack k.uintptrEscapesHack = false - if uintptrEscapesHack && n.Op() == ir.OCONVNOP && n.Left().Type().IsUnsafePtr() { + if uintptrEscapesHack && n.Op() == ir.OCONVNOP && n.(*ir.ConvExpr).Left().Type().IsUnsafePtr() { // nop } else if k.derefs >= 0 && !n.Type().HasPointers() { k = e.discardHole() @@ -486,28 +512,40 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { // nop case ir.ONAME: + n := n.(*ir.Name) if n.Class() == ir.PFUNC || n.Class() == ir.PEXTERN { return } e.flow(k, e.oldLoc(n)) case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT: + n := n.(*ir.UnaryExpr) e.discard(n.Left()) - case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE, ir.OANDAND, ir.OOROR: + case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE: + n := n.(*ir.BinaryExpr) + e.discard(n.Left()) + e.discard(n.Right()) + case ir.OANDAND, ir.OOROR: + n := n.(*ir.LogicalExpr) e.discard(n.Left()) e.discard(n.Right()) - case ir.OADDR: + n := n.(*ir.AddrExpr) e.expr(k.addr(n, "address-of"), n.Left()) // "address-of" case ir.ODEREF: + n := n.(*ir.StarExpr) e.expr(k.deref(n, "indirection"), n.Left()) // "indirection" case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER: + n := n.(*ir.SelectorExpr) e.expr(k.note(n, "dot"), n.Left()) case ir.ODOTPTR: + n := n.(*ir.SelectorExpr) e.expr(k.deref(n, "dot of pointer"), n.Left()) // "dot of pointer" case ir.ODOTTYPE, ir.ODOTTYPE2: + n := n.(*ir.TypeAssertExpr) e.expr(k.dotType(n.Type(), n, "dot"), n.Left()) case ir.OINDEX: + n := n.(*ir.IndexExpr) if n.Left().Type().IsArray() { e.expr(k.note(n, "fixed-array-index-of"), n.Left()) } else { @@ -516,9 +554,11 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { } e.discard(n.Right()) case ir.OINDEXMAP: + n := n.(*ir.IndexExpr) e.discard(n.Left()) e.discard(n.Right()) case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR: + n := n.(*ir.SliceExpr) e.expr(k.note(n, "slice"), n.Left()) low, high, max := n.SliceBounds() e.discard(low) @@ -526,6 +566,7 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { e.discard(max) case ir.OCONV, ir.OCONVNOP: + n := n.(*ir.ConvExpr) if checkPtr(e.curfn, 2) && n.Type().IsUnsafePtr() && n.Left().Type().IsPtr() { // When -d=checkptr=2 is enabled, treat // conversions to unsafe.Pointer as an @@ -540,27 +581,33 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { e.expr(k, n.Left()) } case ir.OCONVIFACE: + n := n.(*ir.ConvExpr) if !n.Left().Type().IsInterface() && !isdirectiface(n.Left().Type()) { k = e.spill(k, n) } e.expr(k.note(n, "interface-converted"), n.Left()) case ir.ORECV: + n := n.(*ir.UnaryExpr) e.discard(n.Left()) case ir.OCALLMETH, ir.OCALLFUNC, ir.OCALLINTER, ir.OLEN, ir.OCAP, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCOPY: e.call([]EscHole{k}, n, nil) case ir.ONEW: + n := n.(*ir.UnaryExpr) e.spill(k, n) case ir.OMAKESLICE: + n := n.(*ir.MakeExpr) e.spill(k, n) e.discard(n.Left()) e.discard(n.Right()) case ir.OMAKECHAN: + n := n.(*ir.MakeExpr) e.discard(n.Left()) case ir.OMAKEMAP: + n := n.(*ir.MakeExpr) e.spill(k, n) e.discard(n.Left()) @@ -571,6 +618,7 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { // Flow the receiver argument to both the closure and // to the receiver parameter. + n := n.(*ir.CallPartExpr) closureK := e.spill(k, n) m := callpartMethod(n) @@ -591,37 +639,43 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { e.expr(e.teeHole(paramK, closureK), n.Left()) case ir.OPTRLIT: + n := n.(*ir.AddrExpr) e.expr(e.spill(k, n), n.Left()) case ir.OARRAYLIT: + n := n.(*ir.CompLitExpr) for _, elt := range n.List().Slice() { if elt.Op() == ir.OKEY { - elt = elt.Right() + elt = elt.(*ir.KeyExpr).Right() } e.expr(k.note(n, "array literal element"), elt) } case ir.OSLICELIT: + n := n.(*ir.CompLitExpr) k = e.spill(k, n) k.uintptrEscapesHack = uintptrEscapesHack // for ...uintptr parameters for _, elt := range n.List().Slice() { if elt.Op() == ir.OKEY { - elt = elt.Right() + elt = elt.(*ir.KeyExpr).Right() } e.expr(k.note(n, "slice-literal-element"), elt) } case ir.OSTRUCTLIT: + n := n.(*ir.CompLitExpr) for _, elt := range n.List().Slice() { - e.expr(k.note(n, "struct literal element"), elt.Left()) + e.expr(k.note(n, "struct literal element"), elt.(*ir.StructKeyExpr).Left()) } case ir.OMAPLIT: + n := n.(*ir.CompLitExpr) e.spill(k, n) // Map keys and values are always stored in the heap. for _, elt := range n.List().Slice() { + elt := elt.(*ir.KeyExpr) e.assignHeap(elt.Left(), "map literal key", n) e.assignHeap(elt.Right(), "map literal value", n) } @@ -640,10 +694,12 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { } case ir.ORUNES2STR, ir.OBYTES2STR, ir.OSTR2RUNES, ir.OSTR2BYTES, ir.ORUNESTR: + n := n.(*ir.ConvExpr) e.spill(k, n) e.discard(n.Left()) case ir.OADDSTR: + n := n.(*ir.AddStringExpr) e.spill(k, n) // Arguments of OADDSTR never escape; @@ -663,23 +719,28 @@ func (e *Escape) unsafeValue(k EscHole, n ir.Node) { switch n.Op() { case ir.OCONV, ir.OCONVNOP: + n := n.(*ir.ConvExpr) if n.Left().Type().IsUnsafePtr() { e.expr(k, n.Left()) } else { e.discard(n.Left()) } case ir.ODOTPTR: + n := n.(*ir.SelectorExpr) if isReflectHeaderDataField(n) { e.expr(k.deref(n, "reflect.Header.Data"), n.Left()) } else { e.discard(n.Left()) } case ir.OPLUS, ir.ONEG, ir.OBITNOT: + n := n.(*ir.UnaryExpr) e.unsafeValue(k, n.Left()) case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OAND, ir.OANDNOT: + n := n.(*ir.BinaryExpr) e.unsafeValue(k, n.Left()) e.unsafeValue(k, n.Right()) case ir.OLSH, ir.ORSH: + n := n.(*ir.BinaryExpr) e.unsafeValue(k, n.Left()) // RHS need not be uintptr-typed (#32959) and can't meaningfully // flow pointers anyway. @@ -715,13 +776,16 @@ func (e *Escape) addr(n ir.Node) EscHole { default: base.Fatalf("unexpected addr: %v", n) case ir.ONAME: + n := n.(*ir.Name) if n.Class() == ir.PEXTERN { break } k = e.oldLoc(n).asHole() case ir.ODOT: + n := n.(*ir.SelectorExpr) k = e.addr(n.Left()) case ir.OINDEX: + n := n.(*ir.IndexExpr) e.discard(n.Right()) if n.Left().Type().IsArray() { k = e.addr(n.Left()) @@ -731,6 +795,7 @@ func (e *Escape) addr(n ir.Node) EscHole { case ir.ODEREF, ir.ODOTPTR: e.discard(n) case ir.OINDEXMAP: + n := n.(*ir.IndexExpr) e.discard(n.Left()) e.assignHeap(n.Right(), "key of map put", n) } @@ -803,6 +868,7 @@ func (e *Escape) call(ks []EscHole, call, where ir.Node) { base.Fatalf("unexpected call op: %v", call.Op()) case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER: + call := call.(*ir.CallExpr) fixVariadicCall(call) // Pick out the function callee, if statically known. @@ -810,7 +876,7 @@ func (e *Escape) call(ks []EscHole, call, where ir.Node) { switch call.Op() { case ir.OCALLFUNC: switch v := staticValue(call.Left()); { - case v.Op() == ir.ONAME && v.Class() == ir.PFUNC: + case v.Op() == ir.ONAME && v.(*ir.Name).Class() == ir.PFUNC: fn = v.(*ir.Name) case v.Op() == ir.OCLOSURE: fn = v.Func().Nname @@ -831,7 +897,7 @@ func (e *Escape) call(ks []EscHole, call, where ir.Node) { } if r := fntype.Recv(); r != nil { - argument(e.tagHole(ks, fn, r), call.Left().Left()) + argument(e.tagHole(ks, fn, r), call.Left().(*ir.SelectorExpr).Left()) } else { // Evaluate callee function expression. argument(e.discardHole(), call.Left()) @@ -843,6 +909,7 @@ func (e *Escape) call(ks []EscHole, call, where ir.Node) { } case ir.OAPPEND: + call := call.(*ir.CallExpr) args := call.List().Slice() // Appendee slice may flow directly to the result, if @@ -868,6 +935,7 @@ func (e *Escape) call(ks []EscHole, call, where ir.Node) { } case ir.OCOPY: + call := call.(*ir.BinaryExpr) argument(e.discardHole(), call.Left()) copiedK := e.discardHole() @@ -877,16 +945,20 @@ func (e *Escape) call(ks []EscHole, call, where ir.Node) { argument(copiedK, call.Right()) case ir.OPANIC: + call := call.(*ir.UnaryExpr) argument(e.heapHole(), call.Left()) case ir.OCOMPLEX: + call := call.(*ir.BinaryExpr) argument(e.discardHole(), call.Left()) argument(e.discardHole(), call.Right()) case ir.ODELETE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER: + call := call.(*ir.CallExpr) for _, arg := range call.List().Slice() { argument(e.discardHole(), arg) } case ir.OLEN, ir.OCAP, ir.OREAL, ir.OIMAG, ir.OCLOSE: + call := call.(*ir.UnaryExpr) argument(e.discardHole(), call.Left()) } } @@ -1082,6 +1154,7 @@ func (e *Escape) newLoc(n ir.Node, transient bool) *EscLocation { e.allLocs = append(e.allLocs, loc) if n != nil { if n.Op() == ir.ONAME && n.Name().Curfn != e.curfn { + n := n.(*ir.Name) base.Fatalf("curfn mismatch: %v != %v", n.Name().Curfn, e.curfn) } @@ -1466,14 +1539,24 @@ func (e *Escape) finish(fns []*ir.Func) { } n.SetEsc(EscNone) if loc.transient { - n.SetTransient(true) + switch n.Op() { + case ir.OCLOSURE: + n := n.(*ir.ClosureExpr) + n.SetTransient(true) + case ir.OCALLPART: + n := n.(*ir.CallPartExpr) + n.SetTransient(true) + case ir.OSLICELIT: + n := n.(*ir.CompLitExpr) + n.SetTransient(true) + } } } } } func (l *EscLocation) isName(c ir.Class) bool { - return l.n != nil && l.n.Op() == ir.ONAME && l.n.Class() == c + return l.n != nil && l.n.Op() == ir.ONAME && l.n.(*ir.Name).Class() == c } const numEscResults = 7 @@ -1636,7 +1719,18 @@ func isSliceSelfAssign(dst, src ir.Node) bool { // when we evaluate it for dst and for src. // dst is ONAME dereference. - if dst.Op() != ir.ODEREF && dst.Op() != ir.ODOTPTR || dst.Left().Op() != ir.ONAME { + var dstX ir.Node + switch dst.Op() { + default: + return false + case ir.ODEREF: + dst := dst.(*ir.StarExpr) + dstX = dst.Left() + case ir.ODOTPTR: + dst := dst.(*ir.SelectorExpr) + dstX = dst.Left() + } + if dstX.Op() != ir.ONAME { return false } // src is a slice operation. @@ -1653,6 +1747,7 @@ func isSliceSelfAssign(dst, src ir.Node) bool { // Pointer to an array is OK since it's not stored inside b directly. // For slicing an array (not pointer to array), there is an implicit OADDR. // We check that to determine non-pointer array slicing. + src := src.(*ir.SliceExpr) if src.Left().Op() == ir.OADDR { return false } @@ -1660,11 +1755,22 @@ func isSliceSelfAssign(dst, src ir.Node) bool { return false } // slice is applied to ONAME dereference. - if src.Left().Op() != ir.ODEREF && src.Left().Op() != ir.ODOTPTR || src.Left().Left().Op() != ir.ONAME { + var baseX ir.Node + switch base := src.(*ir.SliceExpr).Left(); base.Op() { + default: + return false + case ir.ODEREF: + base := base.(*ir.StarExpr) + baseX = base.Left() + case ir.ODOTPTR: + base := base.(*ir.SelectorExpr) + baseX = base.Left() + } + if baseX.Op() != ir.ONAME { return false } // dst and src reference the same base ONAME. - return dst.Left() == src.Left().Left() + return dstX.(*ir.Name) == baseX.(*ir.Name) } // isSelfAssign reports whether assignment from src to dst can @@ -1688,19 +1794,23 @@ func isSelfAssign(dst, src ir.Node) bool { return false } + // The expression prefix must be both "safe" and identical. switch dst.Op() { case ir.ODOT, ir.ODOTPTR: // Safe trailing accessors that are permitted to differ. + dst := dst.(*ir.SelectorExpr) + src := src.(*ir.SelectorExpr) + return samesafeexpr(dst.Left(), src.Left()) case ir.OINDEX: + dst := dst.(*ir.IndexExpr) + src := src.(*ir.IndexExpr) if mayAffectMemory(dst.Right()) || mayAffectMemory(src.Right()) { return false } + return samesafeexpr(dst.Left(), src.Left()) default: return false } - - // The expression prefix must be both "safe" and identical. - return samesafeexpr(dst.Left(), src.Left()) } // mayAffectMemory reports whether evaluation of n may affect the program's @@ -1713,17 +1823,36 @@ func mayAffectMemory(n ir.Node) bool { // // We're ignoring things like division by zero, index out of range, // and nil pointer dereference here. + + // TODO(rsc): It seems like it should be possible to replace this with + // an ir.Any looking for any op that's not the ones in the case statement. + // But that produces changes in the compiled output detected by buildall. switch n.Op() { case ir.ONAME, ir.OCLOSUREREAD, ir.OLITERAL, ir.ONIL: return false - // Left+Right group. - case ir.OINDEX, ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD: + case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD: + n := n.(*ir.BinaryExpr) + return mayAffectMemory(n.Left()) || mayAffectMemory(n.Right()) + + case ir.OINDEX: + n := n.(*ir.IndexExpr) return mayAffectMemory(n.Left()) || mayAffectMemory(n.Right()) - // Left group. - case ir.ODOT, ir.ODOTPTR, ir.ODEREF, ir.OCONVNOP, ir.OCONV, ir.OLEN, ir.OCAP, - ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF: + case ir.OCONVNOP, ir.OCONV: + n := n.(*ir.ConvExpr) + return mayAffectMemory(n.Left()) + + case ir.OLEN, ir.OCAP, ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF: + n := n.(*ir.UnaryExpr) + return mayAffectMemory(n.Left()) + + case ir.ODOT, ir.ODOTPTR: + n := n.(*ir.SelectorExpr) + return mayAffectMemory(n.Left()) + + case ir.ODEREF: + n := n.(*ir.StarExpr) return mayAffectMemory(n.Left()) default: @@ -1739,8 +1868,11 @@ func heapAllocReason(n ir.Node) string { } // Parameters are always passed via the stack. - if n.Op() == ir.ONAME && (n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) { - return "" + if n.Op() == ir.ONAME { + n := n.(*ir.Name) + if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT { + return "" + } } if n.Type().Width > maxStackVarSize { @@ -1754,11 +1886,12 @@ func heapAllocReason(n ir.Node) string { if n.Op() == ir.OCLOSURE && closureType(n).Size() >= maxImplicitStackVarSize { return "too large for stack" } - if n.Op() == ir.OCALLPART && partialCallType(n).Size() >= maxImplicitStackVarSize { + if n.Op() == ir.OCALLPART && partialCallType(n.(*ir.CallPartExpr)).Size() >= maxImplicitStackVarSize { return "too large for stack" } if n.Op() == ir.OMAKESLICE { + n := n.(*ir.MakeExpr) r := n.Right() if r == nil { r = n.Left() @@ -1833,10 +1966,20 @@ func addrescapes(n ir.Node) { // In &x[0], if x is a slice, then x does not // escape--the pointer inside x does, but that // is always a heap pointer anyway. - case ir.ODOT, ir.OINDEX, ir.OPAREN, ir.OCONVNOP: + case ir.ODOT: + n := n.(*ir.SelectorExpr) + addrescapes(n.Left()) + case ir.OINDEX: + n := n.(*ir.IndexExpr) if !n.Left().Type().IsSlice() { addrescapes(n.Left()) } + case ir.OPAREN: + n := n.(*ir.ParenExpr) + addrescapes(n.Left()) + case ir.OCONVNOP: + n := n.(*ir.ConvExpr) + addrescapes(n.Left()) } } @@ -1857,7 +2000,6 @@ func moveToHeap(n *ir.Name) { // temp will add it to the function declaration list automatically. heapaddr := temp(types.NewPtr(n.Type())) heapaddr.SetSym(lookup("&" + n.Sym().Name)) - ir.Orig(heapaddr).SetSym(heapaddr.Sym()) heapaddr.SetPos(n.Pos()) // Unset AutoTemp to persist the &foo variable name through SSA to @@ -1933,7 +2075,7 @@ const unsafeUintptrTag = "unsafe-uintptr" // marked go:uintptrescapes. const uintptrEscapesTag = "uintptr-escapes" -func (e *Escape) paramTag(fn ir.Node, narg int, f *types.Field) string { +func (e *Escape) paramTag(fn *ir.Func, narg int, f *types.Field) string { name := func() string { if f.Sym != nil { return f.Sym.Name From 5fe64298a4a00a7fa1655e9ebffbec7a704eb554 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 10 Dec 2020 18:46:29 -0500 Subject: [PATCH 176/474] [dev.regabi] cmd/compile: cleanup for concrete types - import/export An automated rewrite will add concrete type assertions after a test of n.Op(), when n can be safely type-asserted (meaning, n is not reassigned a different type, n is not reassigned and then used outside the scope of the type assertion, and so on). This sequence of CLs handles the code that the automated rewrite does not: adding specific types to function arguments, adjusting code not to call n.Left() etc when n may have multiple representations, and so on. This CL focuses on iimport.go and iexport.go. Passes buildall w/ toolstash -cmp. Change-Id: I63edee54991ae5d982e99efa7a2894478d511910 Reviewed-on: https://go-review.googlesource.com/c/go/+/277925 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/bexport.go | 7 +- src/cmd/compile/internal/gc/bimport.go | 20 ---- src/cmd/compile/internal/gc/iexport.go | 151 +++++++++++++++++-------- src/cmd/compile/internal/gc/iimport.go | 22 ++-- 4 files changed, 122 insertions(+), 78 deletions(-) delete mode 100644 src/cmd/compile/internal/gc/bimport.go diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index 43c4ce7150fe4..31fd251c5e0e1 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -15,8 +15,11 @@ type exporter struct { // markObject visits a reachable object. func (p *exporter) markObject(n ir.Node) { - if n.Op() == ir.ONAME && n.Class() == ir.PFUNC { - inlFlood(n.(*ir.Name)) + if n.Op() == ir.ONAME { + n := n.(*ir.Name) + if n.Class() == ir.PFUNC { + inlFlood(n) + } } p.markType(n.Type()) diff --git a/src/cmd/compile/internal/gc/bimport.go b/src/cmd/compile/internal/gc/bimport.go deleted file mode 100644 index 5a7018d8e6187..0000000000000 --- a/src/cmd/compile/internal/gc/bimport.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gc - -import ( - "cmd/compile/internal/ir" - "cmd/compile/internal/types" - "cmd/internal/src" -) - -func npos(pos src.XPos, n ir.Node) ir.Node { - n.SetPos(pos) - return n -} - -func builtinCall(op ir.Op) ir.Node { - return ir.Nod(ir.OCALL, mkname(types.BuiltinPkg.Lookup(ir.OpNames[op])), nil) -} diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index 14356013de8bb..eac9f29e6550a 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -1069,7 +1069,7 @@ func (w *exportWriter) stmt(n ir.Node) { } } - switch op := n.Op(); op { + switch n.Op() { case ir.OBLOCK: // No OBLOCK in export data. // Inline content into this statement list, @@ -1084,7 +1084,7 @@ func (w *exportWriter) stmt(n ir.Node) { case ir.ODCL: w.op(ir.ODCL) w.pos(n.Left().Pos()) - w.localName(n.Left()) + w.localName(n.Left().(*ir.Name)) w.typ(n.Left().Type()) case ir.OAS: @@ -1099,9 +1099,10 @@ func (w *exportWriter) stmt(n ir.Node) { } case ir.OASOP: + n := n.(*ir.AssignOpStmt) w.op(ir.OASOP) w.pos(n.Pos()) - w.op(n.SubOp()) + w.op(n.AsOp) w.expr(n.Left()) if w.bool(!n.Implicit()) { w.expr(n.Right()) @@ -1122,7 +1123,7 @@ func (w *exportWriter) stmt(n ir.Node) { // unreachable - generated by compiler for trampolin routines case ir.OGO, ir.ODEFER: - w.op(op) + w.op(n.Op()) w.pos(n.Pos()) w.expr(n.Left()) @@ -1148,8 +1149,15 @@ func (w *exportWriter) stmt(n ir.Node) { w.expr(n.Right()) w.stmtList(n.Body()) - case ir.OSELECT, ir.OSWITCH: - w.op(op) + case ir.OSELECT: + w.op(n.Op()) + w.pos(n.Pos()) + w.stmtList(n.Init()) + w.exprsOrNil(nil, nil) // TODO(rsc): Delete (and fix importer). + w.caseList(n) + + case ir.OSWITCH: + w.op(n.Op()) w.pos(n.Pos()) w.stmtList(n.Init()) w.exprsOrNil(n.Left(), nil) @@ -1163,7 +1171,7 @@ func (w *exportWriter) stmt(n ir.Node) { w.pos(n.Pos()) case ir.OBREAK, ir.OCONTINUE, ir.OGOTO, ir.OLABEL: - w.op(op) + w.op(n.Op()) w.pos(n.Pos()) label := "" if sym := n.Sym(); sym != nil { @@ -1176,19 +1184,34 @@ func (w *exportWriter) stmt(n ir.Node) { } } +func isNamedTypeSwitch(n ir.Node) bool { + if n.Op() != ir.OSWITCH { + return false + } + sw := n.(*ir.SwitchStmt) + if sw.Left() == nil || sw.Left().Op() != ir.OTYPESW { + return false + } + guard := sw.Left().(*ir.TypeSwitchGuard) + return guard.Left() != nil +} + func (w *exportWriter) caseList(sw ir.Node) { - namedTypeSwitch := sw.Op() == ir.OSWITCH && sw.Left() != nil && sw.Left().Op() == ir.OTYPESW && sw.Left().Left() != nil + namedTypeSwitch := isNamedTypeSwitch(sw) - cases := sw.List().Slice() + var cases []ir.Node + if sw.Op() == ir.OSWITCH { + cases = sw.(*ir.SwitchStmt).List().Slice() + } else { + cases = sw.(*ir.SelectStmt).List().Slice() + } w.uint64(uint64(len(cases))) for _, cas := range cases { - if cas.Op() != ir.OCASE { - base.Fatalf("expected OCASE, got %v", cas) - } + cas := cas.(*ir.CaseStmt) w.pos(cas.Pos()) w.stmtList(cas.List()) if namedTypeSwitch { - w.localName(cas.Rlist().First()) + w.localName(cas.Rlist().First().(*ir.Name)) } w.stmtList(cas.Body()) } @@ -1201,22 +1224,29 @@ func (w *exportWriter) exprList(list ir.Nodes) { w.op(ir.OEND) } -func (w *exportWriter) expr(n ir.Node) { - // from nodefmt (fmt.go) - // - // nodefmt reverts nodes back to their original - we don't need to do - // it because we are not bound to produce valid Go syntax when exporting - // - // if (fmtmode != FExp || n.Op != OLITERAL) && n.Orig != nil { - // n = n.Orig - // } - - // from exprfmt (fmt.go) - for n.Op() == ir.OPAREN || n.Implicit() && (n.Op() == ir.ODEREF || n.Op() == ir.OADDR || n.Op() == ir.ODOT || n.Op() == ir.ODOTPTR) { - n = n.Left() +func simplifyForExport(n ir.Node) ir.Node { + switch n.Op() { + case ir.OPAREN: + return simplifyForExport(n.Left()) + case ir.ODEREF: + if n.Implicit() { + return simplifyForExport(n.Left()) + } + case ir.OADDR: + if n.Implicit() { + return simplifyForExport(n.Left()) + } + case ir.ODOT, ir.ODOTPTR: + if n.Implicit() { + return simplifyForExport(n.Left()) + } } + return n +} - switch op := n.Op(); op { +func (w *exportWriter) expr(n ir.Node) { + n = simplifyForExport(n) + switch n.Op() { // expressions // (somewhat closely following the structure of exprfmt in fmt.go) case ir.ONIL: @@ -1243,6 +1273,7 @@ func (w *exportWriter) expr(n ir.Node) { case ir.ONAME: // Package scope name. + n := n.(*ir.Name) if (n.Class() == ir.PEXTERN || n.Class() == ir.PFUNC) && !ir.IsBlank(n) { w.op(ir.ONONAME) w.qualifiedIdent(n) @@ -1291,7 +1322,7 @@ func (w *exportWriter) expr(n ir.Node) { w.op(ir.OSTRUCTLIT) w.pos(n.Pos()) w.typ(n.Type()) - w.elemList(n.List()) // special handling of field names + w.fieldList(n.List()) // special handling of field names case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT: w.op(ir.OCOMPLIT) @@ -1349,7 +1380,7 @@ func (w *exportWriter) expr(n ir.Node) { case ir.OCOPY, ir.OCOMPLEX: // treated like other builtin calls (see e.g., OREAL) - w.op(op) + w.op(n.Op()) w.pos(n.Pos()) w.expr(n.Left()) w.expr(n.Right()) @@ -1361,20 +1392,21 @@ func (w *exportWriter) expr(n ir.Node) { w.expr(n.Left()) w.typ(n.Type()) - case ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCAP, ir.OCLOSE, ir.ODELETE, ir.OLEN, ir.OMAKE, ir.ONEW, ir.OPANIC, ir.ORECOVER, ir.OPRINT, ir.OPRINTN: - w.op(op) + case ir.OREAL, ir.OIMAG, ir.OCAP, ir.OCLOSE, ir.OLEN, ir.ONEW, ir.OPANIC: + w.op(n.Op()) w.pos(n.Pos()) - if n.Left() != nil { - w.expr(n.Left()) - w.op(ir.OEND) - } else { - w.exprList(n.List()) // emits terminating OEND - } + w.expr(n.Left()) + w.op(ir.OEND) + + case ir.OAPPEND, ir.ODELETE, ir.ORECOVER, ir.OPRINT, ir.OPRINTN: + w.op(n.Op()) + w.pos(n.Pos()) + w.exprList(n.List()) // emits terminating OEND // only append() calls may contain '...' arguments - if op == ir.OAPPEND { + if n.Op() == ir.OAPPEND { w.bool(n.IsDDD()) } else if n.IsDDD() { - base.Fatalf("exporter: unexpected '...' with %v call", op) + base.Fatalf("exporter: unexpected '...' with %v call", n.Op()) } case ir.OCALL, ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OGETG: @@ -1386,15 +1418,13 @@ func (w *exportWriter) expr(n ir.Node) { w.bool(n.IsDDD()) case ir.OMAKEMAP, ir.OMAKECHAN, ir.OMAKESLICE: - w.op(op) // must keep separate from OMAKE for importer + w.op(n.Op()) // must keep separate from OMAKE for importer w.pos(n.Pos()) w.typ(n.Type()) switch { default: // empty list w.op(ir.OEND) - case n.List().Len() != 0: // pre-typecheck - w.exprList(n.List()) // emits terminating OEND case n.Right() != nil: w.expr(n.Left()) w.expr(n.Right()) @@ -1405,15 +1435,37 @@ func (w *exportWriter) expr(n ir.Node) { } // unary expressions - case ir.OPLUS, ir.ONEG, ir.OADDR, ir.OBITNOT, ir.ODEREF, ir.ONOT, ir.ORECV: - w.op(op) + case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.ORECV: + w.op(n.Op()) + w.pos(n.Pos()) + w.expr(n.Left()) + + case ir.OADDR: + w.op(n.Op()) w.pos(n.Pos()) w.expr(n.Left()) + case ir.ODEREF: + w.op(n.Op()) + w.pos(n.Pos()) + w.expr(n.Left()) + + case ir.OSEND: + w.op(n.Op()) + w.pos(n.Pos()) + w.expr(n.Left()) + w.expr(n.Right()) + // binary expressions - case ir.OADD, ir.OAND, ir.OANDAND, ir.OANDNOT, ir.ODIV, ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT, - ir.OLSH, ir.OMOD, ir.OMUL, ir.ONE, ir.OOR, ir.OOROR, ir.ORSH, ir.OSEND, ir.OSUB, ir.OXOR: - w.op(op) + case ir.OADD, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT, + ir.OLSH, ir.OMOD, ir.OMUL, ir.ONE, ir.OOR, ir.ORSH, ir.OSUB, ir.OXOR: + w.op(n.Op()) + w.pos(n.Pos()) + w.expr(n.Left()) + w.expr(n.Right()) + + case ir.OANDAND, ir.OOROR: + w.op(n.Op()) w.pos(n.Pos()) w.expr(n.Left()) w.expr(n.Right()) @@ -1454,15 +1506,16 @@ func (w *exportWriter) exprsOrNil(a, b ir.Node) { } } -func (w *exportWriter) elemList(list ir.Nodes) { +func (w *exportWriter) fieldList(list ir.Nodes) { w.uint64(uint64(list.Len())) for _, n := range list.Slice() { + n := n.(*ir.StructKeyExpr) w.selector(n.Sym()) w.expr(n.Left()) } } -func (w *exportWriter) localName(n ir.Node) { +func (w *exportWriter) localName(n *ir.Name) { // Escape analysis happens after inline bodies are saved, but // we're using the same ONAME nodes, so we might still see // PAUTOHEAP here. diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 1096d7988e37f..154c4e3a84339 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -753,7 +753,7 @@ func (r *importReader) stmtList() []ir.Node { } func (r *importReader) caseList(sw ir.Node) []ir.Node { - namedTypeSwitch := sw.Op() == ir.OSWITCH && sw.Left() != nil && sw.Left().Op() == ir.OTYPESW && sw.Left().Left() != nil + namedTypeSwitch := isNamedTypeSwitch(sw) cases := make([]ir.Node, r.uint64()) for i := range cases { @@ -766,7 +766,7 @@ func (r *importReader) caseList(sw ir.Node) []ir.Node { caseVar := ir.NewNameAt(cas.Pos(), r.ident()) declare(caseVar, dclcontext) cas.PtrRlist().Set1(caseVar) - caseVar.Defn = sw.Left() + caseVar.Defn = sw.(*ir.SwitchStmt).Left() } cas.PtrBody().Set(r.stmtList()) cases[i] = cas @@ -915,14 +915,14 @@ func (r *importReader) node() ir.Node { return n case ir.OCOPY, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCAP, ir.OCLOSE, ir.ODELETE, ir.OLEN, ir.OMAKE, ir.ONEW, ir.OPANIC, ir.ORECOVER, ir.OPRINT, ir.OPRINTN: - n := npos(r.pos(), builtinCall(op)) + n := builtinCall(r.pos(), op) n.PtrList().Set(r.exprList()) if op == ir.OAPPEND { n.SetIsDDD(r.bool()) } return n - // case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER, OGETG: + // case OCALLFUNC, OCALLMETH, OCALLINTER, OGETG: // unreachable - mapped to OCALL case below by exporter case ir.OCALL: @@ -934,7 +934,7 @@ func (r *importReader) node() ir.Node { return n case ir.OMAKEMAP, ir.OMAKECHAN, ir.OMAKESLICE: - n := npos(r.pos(), builtinCall(ir.OMAKE)) + n := builtinCall(r.pos(), ir.OMAKE) n.PtrList().Append(ir.TypeNode(r.typ())) n.PtrList().Append(r.exprList()...) return n @@ -1042,8 +1042,7 @@ func (r *importReader) node() ir.Node { case ir.OSELECT: n := ir.NodAt(r.pos(), ir.OSELECT, nil, nil) n.PtrInit().Set(r.stmtList()) - left, _ := r.exprsOrNil() - n.SetLeft(left) + r.exprsOrNil() // TODO(rsc): Delete (and fix exporter). These are always nil. n.PtrList().Set(r.caseList(n)) return n @@ -1110,3 +1109,12 @@ func (r *importReader) exprsOrNil() (a, b ir.Node) { } return } + +func builtinCall(pos src.XPos, op ir.Op) *ir.CallExpr { + return ir.NewCallExpr(pos, ir.OCALL, mkname(types.BuiltinPkg.Lookup(ir.OpNames[op])), nil) +} + +func npos(pos src.XPos, n ir.Node) ir.Node { + n.SetPos(pos) + return n +} From 389ae3d5ba24ffec3df63e7e6704d813efc3d719 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 10 Dec 2020 18:46:45 -0500 Subject: [PATCH 177/474] [dev.regabi] cmd/compile: cleanup for concrete types - inl An automated rewrite will add concrete type assertions after a test of n.Op(), when n can be safely type-asserted (meaning, n is not reassigned a different type, n is not reassigned and then used outside the scope of the type assertion, and so on). This sequence of CLs handles the code that the automated rewrite does not: adding specific types to function arguments, adjusting code not to call n.Left() etc when n may have multiple representations, and so on. This CL focuses on inl.go. Passes buildall w/ toolstash -cmp. Change-Id: Iaaee7664cd43e264d9e49d252e3afa7cf719939b Reviewed-on: https://go-review.googlesource.com/c/go/+/277926 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/inl.go | 155 +++++++++++++++++------------ 1 file changed, 92 insertions(+), 63 deletions(-) diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 3a19efd325241..e940e416fd3eb 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -320,22 +320,26 @@ func (v *hairyVisitor) doNode(n ir.Node) error { switch n.Op() { // Call is okay if inlinable and we have the budget for the body. case ir.OCALLFUNC: + n := n.(*ir.CallExpr) // Functions that call runtime.getcaller{pc,sp} can not be inlined // because getcaller{pc,sp} expect a pointer to the caller's first argument. // // runtime.throw is a "cheap call" like panic in normal code. - if n.Left().Op() == ir.ONAME && n.Left().Class() == ir.PFUNC && isRuntimePkg(n.Left().Sym().Pkg) { - fn := n.Left().Sym().Name - if fn == "getcallerpc" || fn == "getcallersp" { - return errors.New("call to " + fn) - } - if fn == "throw" { - v.budget -= inlineExtraThrowCost - break + if n.Left().Op() == ir.ONAME { + name := n.Left().(*ir.Name) + if name.Class() == ir.PFUNC && isRuntimePkg(name.Sym().Pkg) { + fn := name.Sym().Name + if fn == "getcallerpc" || fn == "getcallersp" { + return errors.New("call to " + fn) + } + if fn == "throw" { + v.budget -= inlineExtraThrowCost + break + } } } - if isIntrinsicCall(n.(*ir.CallExpr)) { + if isIntrinsicCall(n) { // Treat like any other node. break } @@ -401,11 +405,15 @@ func (v *hairyVisitor) doNode(n ir.Node) error { // These nodes don't produce code; omit from inlining budget. return nil - case ir.OFOR, ir.OFORUNTIL, ir.OSWITCH: - // ORANGE, OSELECT in "unhandled" above + case ir.OFOR, ir.OFORUNTIL: if n.Sym() != nil { return errors.New("labeled control") } + case ir.OSWITCH: + if n.Sym() != nil { + return errors.New("labeled control") + } + // case ir.ORANGE, ir.OSELECT in "unhandled" above case ir.OBREAK, ir.OCONTINUE: if n.Sym() != nil { @@ -488,7 +496,7 @@ func inlcalls(fn *ir.Func) { } // Turn an OINLCALL into a statement. -func inlconv2stmt(inlcall ir.Node) ir.Node { +func inlconv2stmt(inlcall *ir.InlinedCallExpr) ir.Node { n := ir.NodAt(inlcall.Pos(), ir.OBLOCK, nil, nil) n.SetList(inlcall.Init()) n.PtrList().AppendNodes(inlcall.PtrBody()) @@ -498,7 +506,7 @@ func inlconv2stmt(inlcall ir.Node) ir.Node { // Turn an OINLCALL into a single valued expression. // The result of inlconv2expr MUST be assigned back to n, e.g. // n.Left = inlconv2expr(n.Left) -func inlconv2expr(n ir.Node) ir.Node { +func inlconv2expr(n *ir.InlinedCallExpr) ir.Node { r := n.Rlist().First() return initExpr(append(n.Init().Slice(), n.Body().Slice()...), r) } @@ -508,7 +516,7 @@ func inlconv2expr(n ir.Node) ir.Node { // containing the inlined statements on the first list element so // order will be preserved. Used in return, oas2func and call // statements. -func inlconv2list(n ir.Node) []ir.Node { +func inlconv2list(n *ir.InlinedCallExpr) []ir.Node { if n.Op() != ir.OINLCALL || n.Rlist().Len() == 0 { base.Fatalf("inlconv2list %+v\n", n) } @@ -538,9 +546,9 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No switch n.Op() { case ir.ODEFER, ir.OGO: - switch n.Left().Op() { + switch call := n.Left(); call.Op() { case ir.OCALLFUNC, ir.OCALLMETH: - n.Left().SetNoInline(true) + call.SetNoInline(true) } // TODO do them here (or earlier), @@ -559,11 +567,13 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No ir.EditChildren(n, edit) - if n.Op() == ir.OAS2FUNC && n.Rlist().First().Op() == ir.OINLCALL { - n.PtrRlist().Set(inlconv2list(n.Rlist().First())) - n.SetOp(ir.OAS2) - n.SetTypecheck(0) - n = typecheck(n, ctxStmt) + if as := n; as.Op() == ir.OAS2FUNC { + if as.Rlist().First().Op() == ir.OINLCALL { + as.PtrRlist().Set(inlconv2list(as.Rlist().First().(*ir.InlinedCallExpr))) + as.SetOp(ir.OAS2) + as.SetTypecheck(0) + n = typecheck(as, ctxStmt) + } } // with all the branches out of the way, it is now time to @@ -576,45 +586,46 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No } } - var call ir.Node + var call *ir.CallExpr switch n.Op() { case ir.OCALLFUNC: - call = n + call = n.(*ir.CallExpr) if base.Flag.LowerM > 3 { - fmt.Printf("%v:call to func %+v\n", ir.Line(n), n.Left()) + fmt.Printf("%v:call to func %+v\n", ir.Line(n), call.Left()) } - if isIntrinsicCall(n.(*ir.CallExpr)) { + if isIntrinsicCall(call) { break } - if fn := inlCallee(n.Left()); fn != nil && fn.Inl != nil { - n = mkinlcall(n, fn, maxCost, inlMap, edit) + if fn := inlCallee(call.Left()); fn != nil && fn.Inl != nil { + n = mkinlcall(call, fn, maxCost, inlMap, edit) } case ir.OCALLMETH: - call = n + call = n.(*ir.CallExpr) if base.Flag.LowerM > 3 { - fmt.Printf("%v:call to meth %L\n", ir.Line(n), n.Left().Right()) + fmt.Printf("%v:call to meth %v\n", ir.Line(n), call.Left().(*ir.SelectorExpr).Sel) } // typecheck should have resolved ODOTMETH->type, whose nname points to the actual function. - if n.Left().Type() == nil { - base.Fatalf("no function type for [%p] %+v\n", n.Left(), n.Left()) + if call.Left().Type() == nil { + base.Fatalf("no function type for [%p] %+v\n", call.Left(), call.Left()) } - n = mkinlcall(n, methodExprName(n.Left()).Func(), maxCost, inlMap, edit) + n = mkinlcall(call, methodExprName(call.Left()).Func(), maxCost, inlMap, edit) } base.Pos = lno if n.Op() == ir.OINLCALL { - switch call.(*ir.CallExpr).Use { + ic := n.(*ir.InlinedCallExpr) + switch call.Use { default: ir.Dump("call", call) base.Fatalf("call missing use") case ir.CallUseExpr: - n = inlconv2expr(n) + n = inlconv2expr(ic) case ir.CallUseStmt: - n = inlconv2stmt(n) + n = inlconv2stmt(ic) case ir.CallUseList: // leave for caller to convert } @@ -627,8 +638,8 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No // that it refers to if statically known. Otherwise, it returns nil. func inlCallee(fn ir.Node) *ir.Func { fn = staticValue(fn) - switch { - case fn.Op() == ir.OMETHEXPR: + switch fn.Op() { + case ir.OMETHEXPR: n := methodExprName(fn) // Check that receiver type matches fn.Left. // TODO(mdempsky): Handle implicit dereference @@ -637,9 +648,11 @@ func inlCallee(fn ir.Node) *ir.Func { return nil } return n.Func() - case fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC: - return fn.Func() - case fn.Op() == ir.OCLOSURE: + case ir.ONAME: + if fn.Class() == ir.PFUNC { + return fn.Func() + } + case ir.OCLOSURE: c := fn.Func() caninl(c) return c @@ -650,7 +663,7 @@ func inlCallee(fn ir.Node) *ir.Func { func staticValue(n ir.Node) ir.Node { for { if n.Op() == ir.OCONVNOP { - n = n.Left() + n = n.(*ir.ConvExpr).Left() continue } @@ -665,8 +678,12 @@ func staticValue(n ir.Node) ir.Node { // staticValue1 implements a simple SSA-like optimization. If n is a local variable // that is initialized and never reassigned, staticValue1 returns the initializer // expression. Otherwise, it returns nil. -func staticValue1(n ir.Node) ir.Node { - if n.Op() != ir.ONAME || n.Class() != ir.PAUTO || n.Name().Addrtaken() { +func staticValue1(nn ir.Node) ir.Node { + if nn.Op() != ir.ONAME { + return nil + } + n := nn.(*ir.Name) + if n.Class() != ir.PAUTO || n.Name().Addrtaken() { return nil } @@ -695,7 +712,7 @@ FindRHS: base.Fatalf("RHS is nil: %v", defn) } - if reassigned(n.(*ir.Name)) { + if reassigned(n) { return nil } @@ -757,7 +774,7 @@ var inlgen int // parameters. // The result of mkinlcall MUST be assigned back to n, e.g. // n.Left = mkinlcall(n.Left, fn, isddd) -func mkinlcall(n ir.Node, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.Node) ir.Node) ir.Node { +func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.Node) ir.Node) ir.Node { if fn.Inl == nil { if logopt.Enabled() { logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(Curfn), @@ -830,8 +847,9 @@ func mkinlcall(n ir.Node, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool, if n.Op() == ir.OCALLFUNC { callee := n.Left() for callee.Op() == ir.OCONVNOP { - ninit.AppendNodes(callee.PtrInit()) - callee = callee.Left() + conv := callee.(*ir.ConvExpr) + ninit.AppendNodes(conv.PtrInit()) + callee = conv.Left() } if callee.Op() != ir.ONAME && callee.Op() != ir.OCLOSURE && callee.Op() != ir.OMETHEXPR { base.Fatalf("unexpected callee expression: %v", callee) @@ -952,16 +970,17 @@ func mkinlcall(n ir.Node, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool, as := ir.Nod(ir.OAS2, nil, nil) as.SetColas(true) if n.Op() == ir.OCALLMETH { - if n.Left().Left() == nil { + sel := n.Left().(*ir.SelectorExpr) + if sel.Left() == nil { base.Fatalf("method call without receiver: %+v", n) } - as.PtrRlist().Append(n.Left().Left()) + as.PtrRlist().Append(sel.Left()) } as.PtrRlist().Append(n.List().Slice()...) // For non-dotted calls to variadic functions, we assign the // variadic parameter's temp name separately. - var vas ir.Node + var vas *ir.AssignStmt if recv := fn.Type().Recv(); recv != nil { as.PtrList().Append(inlParam(recv, as, inlvars)) @@ -984,14 +1003,15 @@ func mkinlcall(n ir.Node, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool, } varargs := as.List().Slice()[x:] - vas = ir.Nod(ir.OAS, nil, nil) + vas = ir.NewAssignStmt(base.Pos, nil, nil) vas.SetLeft(inlParam(param, vas, inlvars)) if len(varargs) == 0 { vas.SetRight(nodnil()) vas.Right().SetType(param.Type) } else { - vas.SetRight(ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(param.Type))) - vas.Right().PtrList().Set(varargs) + lit := ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(param.Type)) + lit.PtrList().Set(varargs) + vas.SetRight(lit) } } @@ -1229,13 +1249,20 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { typecheckslice(init, ctxStmt) return ir.NewBlockStmt(base.Pos, init) - case ir.OGOTO, ir.OLABEL: - m := ir.Copy(n) + case ir.OGOTO: + m := ir.Copy(n).(*ir.BranchStmt) m.SetPos(subst.updatedPos(m.Pos())) m.PtrInit().Set(nil) p := fmt.Sprintf("%s·%d", n.Sym().Name, inlgen) m.SetSym(lookup(p)) + return m + case ir.OLABEL: + m := ir.Copy(n).(*ir.LabelStmt) + m.SetPos(subst.updatedPos(m.Pos())) + m.PtrInit().Set(nil) + p := fmt.Sprintf("%s·%d", n.Sym().Name, inlgen) + m.SetSym(lookup(p)) return m } @@ -1280,36 +1307,38 @@ func devirtualize(fn *ir.Func) { Curfn = fn ir.VisitList(fn.Body(), func(n ir.Node) { if n.Op() == ir.OCALLINTER { - devirtualizeCall(n) + devirtualizeCall(n.(*ir.CallExpr)) } }) } -func devirtualizeCall(call ir.Node) { - recv := staticValue(call.Left().Left()) - if recv.Op() != ir.OCONVIFACE { +func devirtualizeCall(call *ir.CallExpr) { + sel := call.Left().(*ir.SelectorExpr) + r := staticValue(sel.Left()) + if r.Op() != ir.OCONVIFACE { return } + recv := r.(*ir.ConvExpr) typ := recv.Left().Type() if typ.IsInterface() { return } - dt := ir.NodAt(call.Left().Pos(), ir.ODOTTYPE, call.Left().Left(), nil) + dt := ir.NodAt(sel.Pos(), ir.ODOTTYPE, sel.Left(), nil) dt.SetType(typ) - x := typecheck(nodlSym(call.Left().Pos(), ir.OXDOT, dt, call.Left().Sym()), ctxExpr|ctxCallee) + x := typecheck(nodlSym(sel.Pos(), ir.OXDOT, dt, sel.Sym()), ctxExpr|ctxCallee) switch x.Op() { case ir.ODOTMETH: if base.Flag.LowerM != 0 { - base.WarnfAt(call.Pos(), "devirtualizing %v to %v", call.Left(), typ) + base.WarnfAt(call.Pos(), "devirtualizing %v to %v", sel, typ) } call.SetOp(ir.OCALLMETH) call.SetLeft(x) case ir.ODOTINTER: // Promoted method from embedded interface-typed field (#42279). if base.Flag.LowerM != 0 { - base.WarnfAt(call.Pos(), "partially devirtualizing %v to %v", call.Left(), typ) + base.WarnfAt(call.Pos(), "partially devirtualizing %v to %v", sel, typ) } call.SetOp(ir.OCALLINTER) call.SetLeft(x) From 42fec2ded44a1bedf739dbc2b33f1b144616ec4c Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 10 Dec 2020 18:46:56 -0500 Subject: [PATCH 178/474] [dev.regabi] cmd/compile: cleanup for concrete types - const An automated rewrite will add concrete type assertions after a test of n.Op(), when n can be safely type-asserted (meaning, n is not reassigned a different type, n is not reassigned and then used outside the scope of the type assertion, and so on). This sequence of CLs handles the code that the automated rewrite does not: adding specific types to function arguments, adjusting code not to call n.Left() etc when n may have multiple representations, and so on. This CL focuses on const.go. Passes buildall w/ toolstash -cmp. Change-Id: I824f18fa0344ddde56df0522f9fa5e237114bbe2 Reviewed-on: https://go-review.googlesource.com/c/go/+/277927 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/const.go | 74 +++++++++++++++++++--------- 1 file changed, 50 insertions(+), 24 deletions(-) diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index 1ef199c793457..358eefd9bba21 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -162,6 +162,7 @@ func convlit1(n ir.Node, t *types.Type, explicit bool, context func() string) ir break } + n := n.(*ir.UnaryExpr) n.SetLeft(convlit(n.Left(), ot)) if n.Left().Type() == nil { n.SetType(nil) @@ -177,14 +178,24 @@ func convlit1(n ir.Node, t *types.Type, explicit bool, context func() string) ir break } - n.SetLeft(convlit(n.Left(), ot)) - n.SetRight(convlit(n.Right(), ot)) - if n.Left().Type() == nil || n.Right().Type() == nil { + var l, r ir.Node + switch n := n.(type) { + case *ir.BinaryExpr: + n.SetLeft(convlit(n.Left(), ot)) + n.SetRight(convlit(n.Right(), ot)) + l, r = n.Left(), n.Right() + case *ir.LogicalExpr: + n.SetLeft(convlit(n.Left(), ot)) + n.SetRight(convlit(n.Right(), ot)) + l, r = n.Left(), n.Right() + } + + if l.Type() == nil || r.Type() == nil { n.SetType(nil) return n } - if !types.Identical(n.Left().Type(), n.Right().Type()) { - base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, n.Left().Type(), n.Right().Type()) + if !types.Identical(l.Type(), r.Type()) { + base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type()) n.SetType(nil) return n } @@ -435,48 +446,56 @@ var tokenForOp = [...]token.Token{ // Otherwise, evalConst returns a new OLITERAL with the same value as n, // and with .Orig pointing back to n. func evalConst(n ir.Node) ir.Node { - nl, nr := n.Left(), n.Right() - // Pick off just the opcodes that can be constant evaluated. - switch op := n.Op(); op { + switch n.Op() { case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT: + nl := n.Left() if nl.Op() == ir.OLITERAL { var prec uint if n.Type().IsUnsigned() { prec = uint(n.Type().Size() * 8) } - return origConst(n, constant.UnaryOp(tokenForOp[op], nl.Val(), prec)) + return origConst(n, constant.UnaryOp(tokenForOp[n.Op()], nl.Val(), prec)) } - case ir.OADD, ir.OSUB, ir.OMUL, ir.ODIV, ir.OMOD, ir.OOR, ir.OXOR, ir.OAND, ir.OANDNOT, ir.OOROR, ir.OANDAND: + case ir.OADD, ir.OSUB, ir.OMUL, ir.ODIV, ir.OMOD, ir.OOR, ir.OXOR, ir.OAND, ir.OANDNOT: + nl, nr := n.Left(), n.Right() if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL { rval := nr.Val() // check for divisor underflow in complex division (see issue 20227) - if op == ir.ODIV && n.Type().IsComplex() && constant.Sign(square(constant.Real(rval))) == 0 && constant.Sign(square(constant.Imag(rval))) == 0 { + if n.Op() == ir.ODIV && n.Type().IsComplex() && constant.Sign(square(constant.Real(rval))) == 0 && constant.Sign(square(constant.Imag(rval))) == 0 { base.Errorf("complex division by zero") n.SetType(nil) return n } - if (op == ir.ODIV || op == ir.OMOD) && constant.Sign(rval) == 0 { + if (n.Op() == ir.ODIV || n.Op() == ir.OMOD) && constant.Sign(rval) == 0 { base.Errorf("division by zero") n.SetType(nil) return n } - tok := tokenForOp[op] - if op == ir.ODIV && n.Type().IsInteger() { + tok := tokenForOp[n.Op()] + if n.Op() == ir.ODIV && n.Type().IsInteger() { tok = token.QUO_ASSIGN // integer division } return origConst(n, constant.BinaryOp(nl.Val(), tok, rval)) } + case ir.OOROR, ir.OANDAND: + nl, nr := n.Left(), n.Right() + if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL { + return origConst(n, constant.BinaryOp(nl.Val(), tokenForOp[n.Op()], nr.Val())) + } + case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE: + nl, nr := n.Left(), n.Right() if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL { - return origBoolConst(n, constant.Compare(nl.Val(), tokenForOp[op], nr.Val())) + return origBoolConst(n, constant.Compare(nl.Val(), tokenForOp[n.Op()], nr.Val())) } case ir.OLSH, ir.ORSH: + nl, nr := n.Left(), n.Right() if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL { // shiftBound from go/types; "so we can express smallestFloat64" const shiftBound = 1023 - 1 + 52 @@ -486,15 +505,17 @@ func evalConst(n ir.Node) ir.Node { n.SetType(nil) break } - return origConst(n, constant.Shift(toint(nl.Val()), tokenForOp[op], uint(s))) + return origConst(n, constant.Shift(toint(nl.Val()), tokenForOp[n.Op()], uint(s))) } case ir.OCONV, ir.ORUNESTR: + nl := n.Left() if ir.OKForConst[n.Type().Kind()] && nl.Op() == ir.OLITERAL { return origConst(n, convertVal(nl.Val(), n.Type(), true)) } case ir.OCONVNOP: + nl := n.Left() if ir.OKForConst[n.Type().Kind()] && nl.Op() == ir.OLITERAL { // set so n.Orig gets OCONV instead of OCONVNOP n.SetOp(ir.OCONV) @@ -532,21 +553,21 @@ func evalConst(n ir.Node) ir.Node { i2++ } - nl := ir.Copy(n) + nl := ir.Copy(n).(*ir.AddStringExpr) nl.PtrList().Set(s[i:i2]) - nl = origConst(nl, constant.MakeString(strings.Join(strs, ""))) - newList = append(newList, nl) + newList = append(newList, origConst(nl, constant.MakeString(strings.Join(strs, "")))) i = i2 - 1 } else { newList = append(newList, s[i]) } } - n = ir.Copy(n) - n.PtrList().Set(newList) - return n + nn := ir.Copy(n).(*ir.AddStringExpr) + nn.PtrList().Set(newList) + return nn case ir.OCAP, ir.OLEN: + nl := n.Left() switch nl.Type().Kind() { case types.TSTRING: if ir.IsConst(nl, constant.String) { @@ -562,16 +583,19 @@ func evalConst(n ir.Node) ir.Node { return origIntConst(n, evalunsafe(n)) case ir.OREAL: + nl := n.Left() if nl.Op() == ir.OLITERAL { return origConst(n, constant.Real(nl.Val())) } case ir.OIMAG: + nl := n.Left() if nl.Op() == ir.OLITERAL { return origConst(n, constant.Imag(nl.Val())) } case ir.OCOMPLEX: + nl, nr := n.Left(), n.Right() if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL { return origConst(n, makeComplex(nl.Val(), nr.Val())) } @@ -829,8 +853,10 @@ type constSetKey struct { // // n must not be an untyped constant. func (s *constSet) add(pos src.XPos, n ir.Node, what, where string) { - if n.Op() == ir.OCONVIFACE && n.Implicit() { - n = n.Left() + if conv := n; conv.Op() == ir.OCONVIFACE { + if conv.Implicit() { + n = conv.Left() + } } if !isGoConst(n) { From dd67b13d07e6324c2b6d3330515c1f1e49fe5a9b Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 10 Dec 2020 18:47:32 -0500 Subject: [PATCH 179/474] [dev.regabi] cmd/compile: cleanup for concrete types - range, select, swt An automated rewrite will add concrete type assertions after a test of n.Op(), when n can be safely type-asserted (meaning, n is not reassigned a different type, n is not reassigned and then used outside the scope of the type assertion, and so on). This sequence of CLs handles the code that the automated rewrite does not: adding specific types to function arguments, adjusting code not to call n.Left() etc when n may have multiple representations, and so on. This CL focuses on range.go, select.go, and swt.go: the big control structures. Passes buildall w/ toolstash -cmp. Change-Id: I033fe056a7b815edb6e8a06f45c12ffd990f4d45 Reviewed-on: https://go-review.googlesource.com/c/go/+/277929 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/range.go | 35 ++++++------ src/cmd/compile/internal/gc/select.go | 81 +++++++++++++++------------ src/cmd/compile/internal/gc/swt.go | 65 +++++++++++---------- src/cmd/compile/internal/gc/walk.go | 3 + 4 files changed, 102 insertions(+), 82 deletions(-) diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go index 453f5e2198b6c..90bee4fc74fdf 100644 --- a/src/cmd/compile/internal/gc/range.go +++ b/src/cmd/compile/internal/gc/range.go @@ -13,7 +13,7 @@ import ( ) // range -func typecheckrange(n ir.Node) { +func typecheckrange(n *ir.RangeStmt) { // Typechecking order is important here: // 0. first typecheck range expression (slice/map/chan), // it is evaluated only once and so logically it is not part of the loop. @@ -39,7 +39,7 @@ func typecheckrange(n ir.Node) { decldepth-- } -func typecheckrangeExpr(n ir.Node) { +func typecheckrangeExpr(n *ir.RangeStmt) { n.SetRight(typecheck(n.Right(), ctxExpr)) t := n.Right().Type() @@ -157,7 +157,7 @@ func cheapComputableIndex(width int64) bool { // simpler forms. The result must be assigned back to n. // Node n may also be modified in place, and may also be // the returned node. -func walkrange(nrange ir.Node) ir.Node { +func walkrange(nrange *ir.RangeStmt) ir.Node { if isMapClear(nrange) { m := nrange.Right() lno := setlineno(m) @@ -204,7 +204,7 @@ func walkrange(nrange ir.Node) ir.Node { base.Fatalf("walkrange: v2 != nil while v1 == nil") } - var ifGuard ir.Node + var ifGuard *ir.IfStmt var body []ir.Node var init []ir.Node @@ -267,7 +267,7 @@ func walkrange(nrange ir.Node) ir.Node { // TODO(austin): OFORUNTIL inhibits bounds-check // elimination on the index variable (see #20711). // Enhance the prove pass to understand this. - ifGuard = ir.Nod(ir.OIF, nil, nil) + ifGuard = ir.NewIfStmt(base.Pos, nil, nil, nil) ifGuard.SetLeft(ir.Nod(ir.OLT, hv1, hn)) nfor.SetOp(ir.OFORUNTIL) @@ -426,7 +426,7 @@ func walkrange(nrange ir.Node) ir.Node { if ifGuard != nil { ifGuard.PtrInit().Append(init...) - ifGuard = typecheck(ifGuard, ctxStmt) + ifGuard = typecheck(ifGuard, ctxStmt).(*ir.IfStmt) } else { nfor.PtrInit().Append(init...) } @@ -459,7 +459,7 @@ func walkrange(nrange ir.Node) ir.Node { // } // // where == for keys of map m is reflexive. -func isMapClear(n ir.Node) bool { +func isMapClear(n *ir.RangeStmt) bool { if base.Flag.N != 0 || instrumenting { return false } @@ -488,7 +488,7 @@ func isMapClear(n ir.Node) bool { } m := n.Right() - if !samesafeexpr(stmt.List().First(), m) || !samesafeexpr(stmt.List().Second(), k) { + if delete := stmt.(*ir.CallExpr); !samesafeexpr(delete.List().First(), m) || !samesafeexpr(delete.List().Second(), k) { return false } @@ -508,11 +508,7 @@ func mapClear(m ir.Node) ir.Node { fn := syslook("mapclear") fn = substArgTypes(fn, t.Key(), t.Elem()) n := mkcall1(fn, nil, nil, typename(t), m) - - n = typecheck(n, ctxStmt) - n = walkstmt(n) - - return n + return walkstmt(typecheck(n, ctxStmt)) } // Lower n into runtime·memclr if possible, for @@ -526,7 +522,7 @@ func mapClear(m ir.Node) ir.Node { // in which the evaluation of a is side-effect-free. // // Parameters are as in walkrange: "for v1, v2 = range a". -func arrayClear(loop, v1, v2, a ir.Node) ir.Node { +func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node { if base.Flag.N != 0 || instrumenting { return nil } @@ -539,12 +535,17 @@ func arrayClear(loop, v1, v2, a ir.Node) ir.Node { return nil } - stmt := loop.Body().First() // only stmt in body - if stmt.Op() != ir.OAS || stmt.Left().Op() != ir.OINDEX { + stmt1 := loop.Body().First() // only stmt in body + if stmt1.Op() != ir.OAS { + return nil + } + stmt := stmt1.(*ir.AssignStmt) + if stmt.Left().Op() != ir.OINDEX { return nil } + lhs := stmt.Left().(*ir.IndexExpr) - if !samesafeexpr(stmt.Left().Left(), a) || !samesafeexpr(stmt.Left().Right(), v1) { + if !samesafeexpr(lhs.Left(), a) || !samesafeexpr(lhs.Right(), v1) { return nil } diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go index dd08b77b927f2..a3ce14128cb80 100644 --- a/src/cmd/compile/internal/gc/select.go +++ b/src/cmd/compile/internal/gc/select.go @@ -11,15 +11,12 @@ import ( ) // select -func typecheckselect(sel ir.Node) { +func typecheckselect(sel *ir.SelectStmt) { var def ir.Node lno := setlineno(sel) typecheckslice(sel.Init().Slice(), ctxStmt) for _, ncase := range sel.List().Slice() { - if ncase.Op() != ir.OCASE { - setlineno(ncase) - base.Fatalf("typecheckselect %v", ncase.Op()) - } + ncase := ncase.(*ir.CaseStmt) if ncase.List().Len() == 0 { // default @@ -51,8 +48,10 @@ func typecheckselect(sel ir.Node) { // convert x = <-c into OSELRECV(x, <-c). // remove implicit conversions; the eventual assignment // will reintroduce them. - if (n.Right().Op() == ir.OCONVNOP || n.Right().Op() == ir.OCONVIFACE) && n.Right().Implicit() { - n.SetRight(n.Right().Left()) + if r := n.Right(); r.Op() == ir.OCONVNOP || r.Op() == ir.OCONVIFACE { + if r.Implicit() { + n.SetRight(r.Left()) + } } if n.Right().Op() != ir.ORECV { base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side") @@ -70,9 +69,10 @@ func typecheckselect(sel ir.Node) { case ir.ORECV: // convert <-c into OSELRECV(_, <-c) - n = ir.NodAt(n.Pos(), ir.OAS, ir.BlankNode, n) - n.SetOp(ir.OSELRECV) - n.SetTypecheck(1) + as := ir.NewAssignStmt(n.Pos(), ir.BlankNode, n) + as.SetOp(ir.OSELRECV) + as.SetTypecheck(1) + n = as ncase.SetLeft(n) case ir.OSEND: @@ -86,7 +86,7 @@ func typecheckselect(sel ir.Node) { base.Pos = lno } -func walkselect(sel ir.Node) { +func walkselect(sel *ir.SelectStmt) { lno := setlineno(sel) if sel.Body().Len() != 0 { base.Fatalf("double walkselect") @@ -95,8 +95,8 @@ func walkselect(sel ir.Node) { init := sel.Init().Slice() sel.PtrInit().Set(nil) - init = append(init, walkselectcases(sel.PtrList())...) - sel.PtrList().Set(nil) + init = append(init, walkselectcases(sel.List())...) + sel.SetList(ir.Nodes{}) sel.PtrBody().Set(init) walkstmtlist(sel.Body().Slice()) @@ -104,7 +104,7 @@ func walkselect(sel ir.Node) { base.Pos = lno } -func walkselectcases(cases *ir.Nodes) []ir.Node { +func walkselectcases(cases ir.Nodes) []ir.Node { ncas := cases.Len() sellineno := base.Pos @@ -115,7 +115,7 @@ func walkselectcases(cases *ir.Nodes) []ir.Node { // optimization: one-case select: single op. if ncas == 1 { - cas := cases.First() + cas := cases.First().(*ir.CaseStmt) setlineno(cas) l := cas.Init().Slice() if cas.Left() != nil { // not default: @@ -130,18 +130,20 @@ func walkselectcases(cases *ir.Nodes) []ir.Node { // already ok case ir.OSELRECV: - if ir.IsBlank(n.Left()) { - n = n.Right() + r := n.(*ir.AssignStmt) + if ir.IsBlank(r.Left()) { + n = r.Right() break } - n.SetOp(ir.OAS) + r.SetOp(ir.OAS) case ir.OSELRECV2: - if ir.IsBlank(n.List().First()) && ir.IsBlank(n.List().Second()) { - n = n.Rlist().First() + r := n.(*ir.AssignListStmt) + if ir.IsBlank(r.List().First()) && ir.IsBlank(r.List().Second()) { + n = r.Rlist().First() break } - n.SetOp(ir.OAS2RECV) + r.SetOp(ir.OAS2RECV) } l = append(l, n) @@ -154,8 +156,9 @@ func walkselectcases(cases *ir.Nodes) []ir.Node { // convert case value arguments to addresses. // this rewrite is used by both the general code and the next optimization. - var dflt ir.Node + var dflt *ir.CaseStmt for _, cas := range cases.Slice() { + cas := cas.(*ir.CaseStmt) setlineno(cas) n := cas.Left() if n == nil { @@ -164,11 +167,14 @@ func walkselectcases(cases *ir.Nodes) []ir.Node { } // Lower x, _ = <-c to x = <-c. - if n.Op() == ir.OSELRECV2 && ir.IsBlank(n.List().Second()) { - n = ir.NodAt(n.Pos(), ir.OAS, n.List().First(), n.Rlist().First()) - n.SetOp(ir.OSELRECV) - n.SetTypecheck(1) - cas.SetLeft(n) + if sel := n; sel.Op() == ir.OSELRECV2 { + if ir.IsBlank(sel.List().Second()) { + as := ir.NewAssignStmt(sel.Pos(), sel.List().First(), sel.Rlist().First()) + as.SetOp(ir.OSELRECV) + as.SetTypecheck(1) + n = as + cas.SetLeft(n) + } } switch n.Op() { @@ -192,9 +198,9 @@ func walkselectcases(cases *ir.Nodes) []ir.Node { // optimization: two-case select but one is default: single non-blocking op. if ncas == 2 && dflt != nil { - cas := cases.First() + cas := cases.First().(*ir.CaseStmt) if cas == dflt { - cas = cases.Second() + cas = cases.Second().(*ir.CaseStmt) } n := cas.Left() @@ -213,7 +219,8 @@ func walkselectcases(cases *ir.Nodes) []ir.Node { case ir.OSELRECV: // if selectnbrecv(&v, c) { body } else { default body } - ch := n.Right().Left() + recv := n.Right().(*ir.UnaryExpr) + ch := recv.Left() elem := n.Left() if ir.IsBlank(elem) { elem = nodnil() @@ -222,7 +229,8 @@ func walkselectcases(cases *ir.Nodes) []ir.Node { case ir.OSELRECV2: // if selectnbrecv2(&v, &received, c) { body } else { default body } - ch := n.Rlist().First().Left() + recv := n.Rlist().First().(*ir.UnaryExpr) + ch := recv.Left() elem := n.List().First() if ir.IsBlank(elem) { elem = nodnil() @@ -240,7 +248,7 @@ func walkselectcases(cases *ir.Nodes) []ir.Node { if dflt != nil { ncas-- } - casorder := make([]ir.Node, ncas) + casorder := make([]*ir.CaseStmt, ncas) nsends, nrecvs := 0, 0 var init []ir.Node @@ -263,6 +271,7 @@ func walkselectcases(cases *ir.Nodes) []ir.Node { // register cases for _, cas := range cases.Slice() { + cas := cas.(*ir.CaseStmt) setlineno(cas) init = append(init, cas.Init().Slice()...) @@ -286,12 +295,14 @@ func walkselectcases(cases *ir.Nodes) []ir.Node { case ir.OSELRECV: nrecvs++ i = ncas - nrecvs - c = n.Right().Left() + recv := n.Right().(*ir.UnaryExpr) + c = recv.Left() elem = n.Left() case ir.OSELRECV2: nrecvs++ i = ncas - nrecvs - c = n.Rlist().First().Left() + recv := n.Rlist().First().(*ir.UnaryExpr) + c = recv.Left() elem = n.List().First() } @@ -338,7 +349,7 @@ func walkselectcases(cases *ir.Nodes) []ir.Node { } // dispatch cases - dispatch := func(cond, cas ir.Node) { + dispatch := func(cond ir.Node, cas *ir.CaseStmt) { cond = typecheck(cond, ctxExpr) cond = defaultlit(cond, nil) diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go index aa4574d334341..fd76a0a60a9d5 100644 --- a/src/cmd/compile/internal/gc/swt.go +++ b/src/cmd/compile/internal/gc/swt.go @@ -15,7 +15,7 @@ import ( ) // typecheckswitch typechecks a switch statement. -func typecheckswitch(n ir.Node) { +func typecheckswitch(n *ir.SwitchStmt) { typecheckslice(n.Init().Slice(), ctxStmt) if n.Left() != nil && n.Left().Op() == ir.OTYPESW { typecheckTypeSwitch(n) @@ -24,24 +24,26 @@ func typecheckswitch(n ir.Node) { } } -func typecheckTypeSwitch(n ir.Node) { - n.Left().SetRight(typecheck(n.Left().Right(), ctxExpr)) - t := n.Left().Right().Type() +func typecheckTypeSwitch(n *ir.SwitchStmt) { + guard := n.Left().(*ir.TypeSwitchGuard) + guard.SetRight(typecheck(guard.Right(), ctxExpr)) + t := guard.Right().Type() if t != nil && !t.IsInterface() { - base.ErrorfAt(n.Pos(), "cannot type switch on non-interface value %L", n.Left().Right()) + base.ErrorfAt(n.Pos(), "cannot type switch on non-interface value %L", guard.Right()) t = nil } // We don't actually declare the type switch's guarded // declaration itself. So if there are no cases, we won't // notice that it went unused. - if v := n.Left().Left(); v != nil && !ir.IsBlank(v) && n.List().Len() == 0 { + if v := guard.Left(); v != nil && !ir.IsBlank(v) && n.List().Len() == 0 { base.ErrorfAt(v.Pos(), "%v declared but not used", v.Sym()) } var defCase, nilCase ir.Node var ts typeSet for _, ncase := range n.List().Slice() { + ncase := ncase.(*ir.CaseStmt) ls := ncase.List().Slice() if len(ls) == 0 { // default: if defCase != nil { @@ -60,31 +62,33 @@ func typecheckTypeSwitch(n ir.Node) { var missing, have *types.Field var ptr int - switch { - case ir.IsNil(n1): // case nil: + if ir.IsNil(n1) { // case nil: if nilCase != nil { base.ErrorfAt(ncase.Pos(), "multiple nil cases in type switch (first at %v)", ir.Line(nilCase)) } else { nilCase = ncase } - case n1.Op() != ir.OTYPE: + continue + } + if n1.Op() != ir.OTYPE { base.ErrorfAt(ncase.Pos(), "%L is not a type", n1) - case !n1.Type().IsInterface() && !implements(n1.Type(), t, &missing, &have, &ptr) && !missing.Broke(): + continue + } + if !n1.Type().IsInterface() && !implements(n1.Type(), t, &missing, &have, &ptr) && !missing.Broke() { if have != nil && !have.Broke() { base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+ - " (wrong type for %v method)\n\thave %v%S\n\twant %v%S", n.Left().Right(), n1.Type(), missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) + " (wrong type for %v method)\n\thave %v%S\n\twant %v%S", guard.Right(), n1.Type(), missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) } else if ptr != 0 { base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+ - " (%v method has pointer receiver)", n.Left().Right(), n1.Type(), missing.Sym) + " (%v method has pointer receiver)", guard.Right(), n1.Type(), missing.Sym) } else { base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+ - " (missing %v method)", n.Left().Right(), n1.Type(), missing.Sym) + " (missing %v method)", guard.Right(), n1.Type(), missing.Sym) } + continue } - if n1.Op() == ir.OTYPE { - ts.add(ncase.Pos(), n1.Type()) - } + ts.add(ncase.Pos(), n1.Type()) } if ncase.Rlist().Len() != 0 { @@ -144,7 +148,7 @@ func (s *typeSet) add(pos src.XPos, typ *types.Type) { s.m[ls] = append(prevs, typeSetEntry{pos, typ}) } -func typecheckExprSwitch(n ir.Node) { +func typecheckExprSwitch(n *ir.SwitchStmt) { t := types.Types[types.TBOOL] if n.Left() != nil { n.SetLeft(typecheck(n.Left(), ctxExpr)) @@ -175,6 +179,7 @@ func typecheckExprSwitch(n ir.Node) { var defCase ir.Node var cs constSet for _, ncase := range n.List().Slice() { + ncase := ncase.(*ir.CaseStmt) ls := ncase.List().Slice() if len(ls) == 0 { // default: if defCase != nil { @@ -225,7 +230,7 @@ func typecheckExprSwitch(n ir.Node) { } // walkswitch walks a switch statement. -func walkswitch(sw ir.Node) { +func walkswitch(sw *ir.SwitchStmt) { // Guard against double walk, see #25776. if sw.List().Len() == 0 && sw.Body().Len() > 0 { return // Was fatal, but eliminating every possible source of double-walking is hard @@ -240,7 +245,7 @@ func walkswitch(sw ir.Node) { // walkExprSwitch generates an AST implementing sw. sw is an // expression switch. -func walkExprSwitch(sw ir.Node) { +func walkExprSwitch(sw *ir.SwitchStmt) { lno := setlineno(sw) cond := sw.Left() @@ -278,6 +283,7 @@ func walkExprSwitch(sw ir.Node) { var defaultGoto ir.Node var body ir.Nodes for _, ncase := range sw.List().Slice() { + ncase := ncase.(*ir.CaseStmt) label := autolabel(".s") jmp := npos(ncase.Pos(), nodSym(ir.OGOTO, nil, label)) @@ -393,7 +399,7 @@ func (s *exprSwitch) flush() { func(i int) ir.Node { return ir.Nod(ir.OLE, ir.Nod(ir.OLEN, s.exprname, nil), nodintconst(runLen(runs[i-1]))) }, - func(i int, nif ir.Node) { + func(i int, nif *ir.IfStmt) { run := runs[i] nif.SetLeft(ir.Nod(ir.OEQ, ir.Nod(ir.OLEN, s.exprname, nil), nodintconst(runLen(run)))) s.search(run, nif.PtrBody()) @@ -428,7 +434,7 @@ func (s *exprSwitch) search(cc []exprClause, out *ir.Nodes) { func(i int) ir.Node { return ir.Nod(ir.OLE, s.exprname, cc[i-1].hi) }, - func(i int, nif ir.Node) { + func(i int, nif *ir.IfStmt) { c := &cc[i] nif.SetLeft(c.test(s.exprname)) nif.PtrBody().Set1(c.jmp) @@ -456,7 +462,7 @@ func (c *exprClause) test(exprname ir.Node) ir.Node { return ir.NodAt(c.pos, ir.OEQ, exprname, c.lo) } -func allCaseExprsAreSideEffectFree(sw ir.Node) bool { +func allCaseExprsAreSideEffectFree(sw *ir.SwitchStmt) bool { // In theory, we could be more aggressive, allowing any // side-effect-free expressions in cases, but it's a bit // tricky because some of that information is unavailable due @@ -465,9 +471,7 @@ func allCaseExprsAreSideEffectFree(sw ir.Node) bool { // enough. for _, ncase := range sw.List().Slice() { - if ncase.Op() != ir.OCASE { - base.Fatalf("switch string(byteslice) bad op: %v", ncase.Op()) - } + ncase := ncase.(*ir.CaseStmt) for _, v := range ncase.List().Slice() { if v.Op() != ir.OLITERAL { return false @@ -497,9 +501,9 @@ func hasFall(stmts []ir.Node) (bool, src.XPos) { // walkTypeSwitch generates an AST that implements sw, where sw is a // type switch. -func walkTypeSwitch(sw ir.Node) { +func walkTypeSwitch(sw *ir.SwitchStmt) { var s typeSwitch - s.facename = sw.Left().Right() + s.facename = sw.Left().(*ir.TypeSwitchGuard).Right() sw.SetLeft(nil) s.facename = walkexpr(s.facename, sw.PtrInit()) @@ -541,6 +545,7 @@ func walkTypeSwitch(sw ir.Node) { var defaultGoto, nilGoto ir.Node var body ir.Nodes for _, ncase := range sw.List().Slice() { + ncase := ncase.(*ir.CaseStmt) var caseVar ir.Node if ncase.Rlist().Len() != 0 { caseVar = ncase.Rlist().First() @@ -704,7 +709,7 @@ func (s *typeSwitch) flush() { func(i int) ir.Node { return ir.Nod(ir.OLE, s.hashname, nodintconst(int64(cc[i-1].hash))) }, - func(i int, nif ir.Node) { + func(i int, nif *ir.IfStmt) { // TODO(mdempsky): Omit hash equality check if // there's only one type. c := cc[i] @@ -723,7 +728,7 @@ func (s *typeSwitch) flush() { // // leaf(i, nif) should setup nif (an OIF node) to test case i. In // particular, it should set nif.Left and nif.Nbody. -func binarySearch(n int, out *ir.Nodes, less func(i int) ir.Node, leaf func(i int, nif ir.Node)) { +func binarySearch(n int, out *ir.Nodes, less func(i int) ir.Node, leaf func(i int, nif *ir.IfStmt)) { const binarySearchMin = 4 // minimum number of cases for binary search var do func(lo, hi int, out *ir.Nodes) @@ -731,7 +736,7 @@ func binarySearch(n int, out *ir.Nodes, less func(i int) ir.Node, leaf func(i in n := hi - lo if n < binarySearchMin { for i := lo; i < hi; i++ { - nif := ir.Nod(ir.OIF, nil, nil) + nif := ir.NewIfStmt(base.Pos, nil, nil, nil) leaf(i, nif) base.Pos = base.Pos.WithNotStmt() nif.SetLeft(typecheck(nif.Left(), ctxExpr)) diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index cc0b3d847d1ad..f2d93df988314 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -349,14 +349,17 @@ func walkstmt(n ir.Node) ir.Node { return n case ir.OSELECT: + n := n.(*ir.SelectStmt) walkselect(n) return n case ir.OSWITCH: + n := n.(*ir.SwitchStmt) walkswitch(n) return n case ir.ORANGE: + n := n.(*ir.RangeStmt) return walkrange(n) } From 5024396563f9f544a3c6413026cf9b302fd83709 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 10 Dec 2020 18:47:58 -0500 Subject: [PATCH 180/474] [dev.regabi] cmd/compile: cleanup for concrete types - subr An automated rewrite will add concrete type assertions after a test of n.Op(), when n can be safely type-asserted (meaning, n is not reassigned a different type, n is not reassigned and then used outside the scope of the type assertion, and so on). This sequence of CLs handles the code that the automated rewrite does not: adding specific types to function arguments, adjusting code not to call n.Left() etc when n may have multiple representations, and so on. This CL focuses on subr.go. Passes buildall w/ toolstash -cmp. Change-Id: I435082167c91e20a4d490aa5d5945c7454f71d61 Reviewed-on: https://go-review.googlesource.com/c/go/+/277930 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/subr.go | 119 ++++++++++++++++------- src/cmd/compile/internal/gc/typecheck.go | 2 +- 2 files changed, 87 insertions(+), 34 deletions(-) diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 37e49d0544988..e519c57273711 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -555,7 +555,7 @@ func assignconvfn(n ir.Node, t *types.Type, context func() string) ir.Node { // backingArrayPtrLen extracts the pointer and length from a slice or string. // This constructs two nodes referring to n, so n must be a cheapexpr. -func backingArrayPtrLen(n ir.Node) (ptr, len ir.Node) { +func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) { var init ir.Nodes c := cheapexpr(n, &init) if c != n || init.Len() != 0 { @@ -567,9 +567,9 @@ func backingArrayPtrLen(n ir.Node) (ptr, len ir.Node) { } else { ptr.SetType(n.Type().Elem().PtrTo()) } - len = ir.Nod(ir.OLEN, n, nil) - len.SetType(types.Types[types.TINT]) - return ptr, len + length = ir.Nod(ir.OLEN, n, nil) + length.SetType(types.Types[types.TINT]) + return ptr, length } func syslook(name string) ir.Node { @@ -605,6 +605,10 @@ func calcHasCall(n ir.Node) bool { } switch n.Op() { + default: + base.Fatalf("calcHasCall %+v", n) + panic("unreachable") + case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.OTYPE: if n.HasCall() { base.Fatalf("OLITERAL/ONAME/OTYPE should never have calls: %+v", n) @@ -617,6 +621,7 @@ func calcHasCall(n ir.Node) bool { if instrumenting { return true } + return n.Left().HasCall() || n.Right().HasCall() case ir.OINDEX, ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR, ir.ODEREF, ir.ODOTPTR, ir.ODOTTYPE, ir.ODIV, ir.OMOD: // These ops might panic, make sure they are done @@ -625,27 +630,68 @@ func calcHasCall(n ir.Node) bool { // When using soft-float, these ops might be rewritten to function calls // so we ensure they are evaluated first. - case ir.OADD, ir.OSUB, ir.ONEG, ir.OMUL: + case ir.OADD, ir.OSUB, ir.OMUL: + if thearch.SoftFloat && (isFloat[n.Type().Kind()] || isComplex[n.Type().Kind()]) { + return true + } + return n.Left().HasCall() || n.Right().HasCall() + case ir.ONEG: if thearch.SoftFloat && (isFloat[n.Type().Kind()] || isComplex[n.Type().Kind()]) { return true } + return n.Left().HasCall() case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT: if thearch.SoftFloat && (isFloat[n.Left().Type().Kind()] || isComplex[n.Left().Type().Kind()]) { return true } + return n.Left().HasCall() || n.Right().HasCall() case ir.OCONV: if thearch.SoftFloat && ((isFloat[n.Type().Kind()] || isComplex[n.Type().Kind()]) || (isFloat[n.Left().Type().Kind()] || isComplex[n.Left().Type().Kind()])) { return true } - } + return n.Left().HasCall() - if n.Left() != nil && n.Left().HasCall() { - return true - } - if n.Right() != nil && n.Right().HasCall() { - return true + case ir.OAND, ir.OANDNOT, ir.OLSH, ir.OOR, ir.ORSH, ir.OXOR, ir.OCOPY, ir.OCOMPLEX, ir.OEFACE: + return n.Left().HasCall() || n.Right().HasCall() + + case ir.OAS: + return n.Left().HasCall() || n.Right() != nil && n.Right().HasCall() + + case ir.OADDR: + return n.Left().HasCall() + case ir.OPAREN: + return n.Left().HasCall() + case ir.OBITNOT, ir.ONOT, ir.OPLUS, ir.ORECV, + ir.OALIGNOF, ir.OCAP, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.ONEW, + ir.OOFFSETOF, ir.OPANIC, ir.OREAL, ir.OSIZEOF, + ir.OCHECKNIL, ir.OCFUNC, ir.OIDATA, ir.OITAB, ir.ONEWOBJ, ir.OSPTR, ir.OVARDEF, ir.OVARKILL, ir.OVARLIVE: + return n.Left().HasCall() + case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER: + return n.Left().HasCall() + + case ir.OGETG, ir.OCLOSUREREAD, ir.OMETHEXPR: + return false + + // TODO(rsc): These look wrong in various ways but are what calcHasCall has always done. + case ir.OADDSTR: + // TODO(rsc): This used to check left and right, which are not part of OADDSTR. + return false + case ir.OBLOCK: + // TODO(rsc): Surely the block's statements matter. + return false + case ir.OCONVIFACE, ir.OCONVNOP, ir.OBYTES2STR, ir.OBYTES2STRTMP, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2BYTESTMP, ir.OSTR2RUNES, ir.ORUNESTR: + // TODO(rsc): Some conversions are themselves calls, no? + return n.Left().HasCall() + case ir.ODOTTYPE2: + // TODO(rsc): Shouldn't this be up with ODOTTYPE above? + return n.Left().HasCall() + case ir.OSLICEHEADER: + // TODO(rsc): What about len and cap? + return n.Left().HasCall() + case ir.OAS2DOTTYPE, ir.OAS2FUNC: + // TODO(rsc): Surely we need to check List and Rlist. + return false } - return false } func badtype(op ir.Op, tl, tr *types.Type) { @@ -727,26 +773,32 @@ func safeexpr(n ir.Node, init *ir.Nodes) ir.Node { case ir.ONAME, ir.OLITERAL, ir.ONIL: return n - case ir.ODOT, ir.OLEN, ir.OCAP: + case ir.OLEN, ir.OCAP: + l := safeexpr(n.Left(), init) + if l == n.Left() { + return n + } + a := ir.Copy(n).(*ir.UnaryExpr) + a.SetLeft(l) + return walkexpr(typecheck(a, ctxExpr), init) + + case ir.ODOT, ir.ODOTPTR: l := safeexpr(n.Left(), init) if l == n.Left() { return n } - r := ir.Copy(n) - r.SetLeft(l) - r = typecheck(r, ctxExpr) - r = walkexpr(r, init) - return r + a := ir.Copy(n).(*ir.SelectorExpr) + a.SetLeft(l) + return walkexpr(typecheck(a, ctxExpr), init) - case ir.ODOTPTR, ir.ODEREF: + case ir.ODEREF: l := safeexpr(n.Left(), init) if l == n.Left() { return n } - a := ir.Copy(n) + a := ir.Copy(n).(*ir.StarExpr) a.SetLeft(l) - a = walkexpr(a, init) - return a + return walkexpr(typecheck(a, ctxExpr), init) case ir.OINDEX, ir.OINDEXMAP: l := safeexpr(n.Left(), init) @@ -754,11 +806,10 @@ func safeexpr(n ir.Node, init *ir.Nodes) ir.Node { if l == n.Left() && r == n.Right() { return n } - a := ir.Copy(n) + a := ir.Copy(n).(*ir.IndexExpr) a.SetLeft(l) a.SetRight(r) - a = walkexpr(a, init) - return a + return walkexpr(typecheck(a, ctxExpr), init) case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT: if isStaticCompositeLiteral(n) { @@ -927,7 +978,7 @@ func dotpath(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) ( // find missing fields that // will give shortest unique addressing. // modify the tree with missing type names. -func adddot(n ir.Node) ir.Node { +func adddot(n *ir.SelectorExpr) *ir.SelectorExpr { n.SetLeft(typecheck(n.Left(), ctxType|ctxExpr)) if n.Left().Diag() { n.SetDiag(true) @@ -950,8 +1001,9 @@ func adddot(n ir.Node) ir.Node { case path != nil: // rebuild elided dots for c := len(path) - 1; c >= 0; c-- { - n.SetLeft(nodSym(ir.ODOT, n.Left(), path[c].field.Sym)) - n.Left().SetImplicit(true) + dot := nodSym(ir.ODOT, n.Left(), path[c].field.Sym) + dot.SetImplicit(true) + n.SetLeft(dot) } case ambig: base.Errorf("ambiguous selector %v", n) @@ -1179,12 +1231,12 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { // value for that function. if !instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !isifacemethod(method.Type) && !(thearch.LinkArch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) { // generate tail call: adjust pointer receiver and jump to embedded method. - dot = dot.Left() // skip final .M + left := dot.Left() // skip final .M // TODO(mdempsky): Remove dependency on dotlist. if !dotlist[0].field.Type.IsPtr() { - dot = nodAddr(dot) + left = ir.Nod(ir.OADDR, left, nil) } - as := ir.Nod(ir.OAS, nthis, convnop(dot, rcvr)) + as := ir.Nod(ir.OAS, nthis, convnop(left, rcvr)) fn.PtrBody().Append(as) fn.PtrBody().Append(nodSym(ir.ORETJMP, nil, methodSym(methodrcvr, method.Sym))) } else { @@ -1387,8 +1439,9 @@ func initExpr(init []ir.Node, n ir.Node) ir.Node { } if ir.MayBeShared(n) { // Introduce OCONVNOP to hold init list. - n = ir.Nod(ir.OCONVNOP, n, nil) - n.SetType(n.Left().Type()) + old := n + n = ir.Nod(ir.OCONVNOP, old, nil) + n.SetType(old.Type()) n.SetTypecheck(1) } diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index ef1955e88b80e..70f05236c0f6a 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -957,7 +957,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OXDOT, ir.ODOT: n := n.(*ir.SelectorExpr) if n.Op() == ir.OXDOT { - n = adddot(n).(*ir.SelectorExpr) + n = adddot(n) n.SetOp(ir.ODOT) if n.Left() == nil { n.SetType(nil) From be64c8becebace2304e6c16408f6988d1da55900 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 10 Dec 2020 18:48:18 -0500 Subject: [PATCH 181/474] [dev.regabi] cmd/compile: cleanup for concrete types - noder An automated rewrite will add concrete type assertions after a test of n.Op(), when n can be safely type-asserted (meaning, n is not reassigned a different type, n is not reassigned and then used outside the scope of the type assertion, and so on). This sequence of CLs handles the code that the automated rewrite does not: adding specific types to function arguments, adjusting code not to call n.Left() etc when n may have multiple representations, and so on. This CL focuses on noder.go. Passes buildall w/ toolstash -cmp. Change-Id: Ie870126b51558e83c738add8e91a2804ed6d7f92 Reviewed-on: https://go-review.googlesource.com/c/go/+/277931 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/noder.go | 79 ++++++++++++++++------------ test/mainsig.go | 13 +++++ 2 files changed, 58 insertions(+), 34 deletions(-) create mode 100644 test/mainsig.go diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 4c8e56731bee2..43ec2ce35008e 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -527,13 +527,13 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) ir.Node { if fun.Recv == nil { if name.Name == "init" { name = renameinit() - if t.List().Len() > 0 || t.Rlist().Len() > 0 { + if len(t.Params) > 0 || len(t.Results) > 0 { base.ErrorfAt(f.Pos(), "func init must have no arguments and no return values") } } if types.LocalPkg.Name == "main" && name.Name == "main" { - if t.List().Len() > 0 || t.Rlist().Len() > 0 { + if len(t.Params) > 0 || len(t.Results) > 0 { base.ErrorfAt(f.Pos(), "func main must have no arguments and no return values") } } @@ -983,10 +983,10 @@ func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []ir.Node { for i, stmt := range stmts { s := p.stmtFall(stmt, fallOK && i+1 == len(stmts)) if s == nil { - } else if s.Op() == ir.OBLOCK && s.List().Len() > 0 { + } else if s.Op() == ir.OBLOCK && s.(*ir.BlockStmt).List().Len() > 0 { // Inline non-empty block. // Empty blocks must be preserved for checkreturn. - nodes = append(nodes, s.List().Slice()...) + nodes = append(nodes, s.(*ir.BlockStmt).List().Slice()...) } else { nodes = append(nodes, s) } @@ -1020,22 +1020,23 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node { return liststmt(p.decls(stmt.DeclList)) case *syntax.AssignStmt: if stmt.Op != 0 && stmt.Op != syntax.Def { - n := p.nod(stmt, ir.OASOP, p.expr(stmt.Lhs), p.expr(stmt.Rhs)) + n := ir.NewAssignOpStmt(p.pos(stmt), p.binOp(stmt.Op), p.expr(stmt.Lhs), p.expr(stmt.Rhs)) n.SetImplicit(stmt.Rhs == syntax.ImplicitOne) - n.SetSubOp(p.binOp(stmt.Op)) return n } rhs := p.exprList(stmt.Rhs) if list, ok := stmt.Lhs.(*syntax.ListExpr); ok && len(list.ElemList) != 1 || len(rhs) != 1 { n := p.nod(stmt, ir.OAS2, nil, nil) - n.PtrList().Set(p.assignList(stmt.Lhs, n, stmt.Op == syntax.Def)) + n.SetColas(stmt.Op == syntax.Def) + n.PtrList().Set(p.assignList(stmt.Lhs, n, n.Colas())) n.PtrRlist().Set(rhs) return n } n := p.nod(stmt, ir.OAS, nil, nil) - n.SetLeft(p.assignList(stmt.Lhs, n, stmt.Op == syntax.Def)[0]) + n.SetColas(stmt.Op == syntax.Def) + n.SetLeft(p.assignList(stmt.Lhs, n, n.Colas())[0]) n.SetRight(rhs[0]) return n @@ -1110,8 +1111,6 @@ func (p *noder) assignList(expr syntax.Expr, defn ir.Node, colas bool) []ir.Node return p.exprList(expr) } - defn.SetColas(true) - var exprs []syntax.Expr if list, ok := expr.(*syntax.ListExpr); ok { exprs = list.ElemList @@ -1196,27 +1195,30 @@ func (p *noder) ifStmt(stmt *syntax.IfStmt) ir.Node { func (p *noder) forStmt(stmt *syntax.ForStmt) ir.Node { p.openScope(stmt.Pos()) - var n ir.Node if r, ok := stmt.Init.(*syntax.RangeClause); ok { if stmt.Cond != nil || stmt.Post != nil { panic("unexpected RangeClause") } - n = p.nod(r, ir.ORANGE, nil, p.expr(r.X)) + n := p.nod(r, ir.ORANGE, nil, p.expr(r.X)) if r.Lhs != nil { - n.PtrList().Set(p.assignList(r.Lhs, n, r.Def)) - } - } else { - n = p.nod(stmt, ir.OFOR, nil, nil) - if stmt.Init != nil { - n.PtrInit().Set1(p.stmt(stmt.Init)) - } - if stmt.Cond != nil { - n.SetLeft(p.expr(stmt.Cond)) - } - if stmt.Post != nil { - n.SetRight(p.stmt(stmt.Post)) + n.SetColas(r.Def) + n.PtrList().Set(p.assignList(r.Lhs, n, n.Colas())) } + n.PtrBody().Set(p.blockStmt(stmt.Body)) + p.closeAnotherScope() + return n + } + + n := p.nod(stmt, ir.OFOR, nil, nil) + if stmt.Init != nil { + n.PtrInit().Set1(p.stmt(stmt.Init)) + } + if stmt.Cond != nil { + n.SetLeft(p.expr(stmt.Cond)) + } + if stmt.Post != nil { + n.SetRight(p.stmt(stmt.Post)) } n.PtrBody().Set(p.blockStmt(stmt.Body)) p.closeAnotherScope() @@ -1233,9 +1235,9 @@ func (p *noder) switchStmt(stmt *syntax.SwitchStmt) ir.Node { n.SetLeft(p.expr(stmt.Tag)) } - tswitch := n.Left() - if tswitch != nil && tswitch.Op() != ir.OTYPESW { - tswitch = nil + var tswitch *ir.TypeSwitchGuard + if l := n.Left(); l != nil && l.Op() == ir.OTYPESW { + tswitch = l.(*ir.TypeSwitchGuard) } n.PtrList().Set(p.caseClauses(stmt.Body, tswitch, stmt.Rbrace)) @@ -1243,7 +1245,7 @@ func (p *noder) switchStmt(stmt *syntax.SwitchStmt) ir.Node { return n } -func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch ir.Node, rbrace syntax.Pos) []ir.Node { +func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.TypeSwitchGuard, rbrace syntax.Pos) []ir.Node { nodes := make([]ir.Node, 0, len(clauses)) for i, clause := range clauses { p.setlineno(clause) @@ -1328,10 +1330,18 @@ func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) ir.Node { var ls ir.Node if label.Stmt != nil { // TODO(mdempsky): Should always be present. ls = p.stmtFall(label.Stmt, fallOK) - switch label.Stmt.(type) { - case *syntax.ForStmt, *syntax.SwitchStmt, *syntax.SelectStmt: - // Attach label directly to control statement too. - ls.SetSym(sym) + // Attach label directly to control statement too. + if ls != nil { + switch ls.Op() { + case ir.OFOR: + ls.SetSym(sym) + case ir.ORANGE: + ls.SetSym(sym) + case ir.OSWITCH: + ls.SetSym(sym) + case ir.OSELECT: + ls.SetSym(sym) + } } } @@ -1483,8 +1493,9 @@ func (p *noder) wrapname(n syntax.Node, x ir.Node) ir.Node { } fallthrough case ir.ONAME, ir.ONONAME, ir.OPACK: - x = p.nod(n, ir.OPAREN, x, nil) - x.SetImplicit(true) + p := p.nod(n, ir.OPAREN, x, nil) + p.SetImplicit(true) + return p } return x } diff --git a/test/mainsig.go b/test/mainsig.go new file mode 100644 index 0000000000000..d006d9cda3142 --- /dev/null +++ b/test/mainsig.go @@ -0,0 +1,13 @@ +// errorcheck + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func main(int) {} // ERROR "func main must have no arguments and no return values" +func main() int { return 1 } // ERROR "func main must have no arguments and no return values" "main redeclared in this block" + +func init(int) {} // ERROR "func init must have no arguments and no return values" +func init() int { return 1 } // ERROR "func init must have no arguments and no return values" From 9c384e881e28d322b854ac702ce8f052868f5f41 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 10 Dec 2020 18:48:33 -0500 Subject: [PATCH 182/474] [dev.regabi] cmd/compile: cleanup for concrete types - mop-up An automated rewrite will add concrete type assertions after a test of n.Op(), when n can be safely type-asserted (meaning, n is not reassigned a different type, n is not reassigned and then used outside the scope of the type assertion, and so on). This sequence of CLs handles the code that the automated rewrite does not: adding specific types to function arguments, adjusting code not to call n.Left() etc when n may have multiple representations, and so on. This CL handles all the little files that are left. Passes buildall w/ toolstash -cmp. Change-Id: I6588c92dbbdd37342a77b365d70e02134a033d2a Reviewed-on: https://go-review.googlesource.com/c/go/+/277932 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/align.go | 1 + src/cmd/compile/internal/gc/closure.go | 13 ++++++------ src/cmd/compile/internal/gc/dcl.go | 22 ++++++++++++--------- src/cmd/compile/internal/gc/embed.go | 2 +- src/cmd/compile/internal/gc/export.go | 7 +++---- src/cmd/compile/internal/gc/gen.go | 12 ++++++++++-- src/cmd/compile/internal/gc/init.go | 8 ++++---- src/cmd/compile/internal/gc/initorder.go | 2 +- src/cmd/compile/internal/gc/main.go | 4 ++-- src/cmd/compile/internal/gc/reflect.go | 4 ++-- src/cmd/compile/internal/gc/scc.go | 8 +++++--- src/cmd/compile/internal/gc/subr.go | 2 +- src/cmd/compile/internal/gc/typecheck.go | 2 +- src/cmd/compile/internal/gc/universe.go | 25 ++++++++++++++---------- src/cmd/compile/internal/gc/unsafe.go | 18 ++++++++++------- 15 files changed, 77 insertions(+), 53 deletions(-) diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go index 212e4c46aea80..9944a3a38ae21 100644 --- a/src/cmd/compile/internal/gc/align.go +++ b/src/cmd/compile/internal/gc/align.go @@ -119,6 +119,7 @@ func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 { } f.Offset = o if n := ir.AsNode(f.Nname); n != nil { + n := n.Name() // addrescapes has similar code to update these offsets. // Usually addrescapes runs after widstruct, // in which case we could drop this, diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index 954fa1a452e8c..6a3ee45a12353 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -192,7 +192,7 @@ func capturevars(fn *ir.Func) { var outer ir.Node outer = v.Outer - outermost := v.Defn + outermost := v.Defn.(*ir.Name) // out parameters will be assigned to implicitly upon return. if outermost.Class() != ir.PPARAMOUT && !outermost.Name().Addrtaken() && !outermost.Name().Assigned() && v.Type().Width <= 128 { @@ -414,25 +414,26 @@ func walkclosure(clo ir.Node, init *ir.Nodes) ir.Node { return walkexpr(cfn, init) } -func typecheckpartialcall(dot ir.Node, sym *types.Sym) *ir.CallPartExpr { - switch dot.Op() { +func typecheckpartialcall(n ir.Node, sym *types.Sym) *ir.CallPartExpr { + switch n.Op() { case ir.ODOTINTER, ir.ODOTMETH: break default: base.Fatalf("invalid typecheckpartialcall") } + dot := n.(*ir.SelectorExpr) // Create top-level function. fn := makepartialcall(dot, dot.Type(), sym) fn.SetWrapper(true) - return ir.NewCallPartExpr(dot.Pos(), dot.Left(), dot.(*ir.SelectorExpr).Selection, fn) + return ir.NewCallPartExpr(dot.Pos(), dot.Left(), dot.Selection, fn) } // makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed // for partial calls. -func makepartialcall(dot ir.Node, t0 *types.Type, meth *types.Sym) *ir.Func { +func makepartialcall(dot *ir.SelectorExpr, t0 *types.Type, meth *types.Sym) *ir.Func { rcvrtype := dot.Left().Type() sym := methodSymSuffix(rcvrtype, meth, "-fm") @@ -508,7 +509,7 @@ func makepartialcall(dot ir.Node, t0 *types.Type, meth *types.Sym) *ir.Func { // partialCallType returns the struct type used to hold all the information // needed in the closure for n (n must be a OCALLPART node). // The address of a variable of the returned type can be cast to a func. -func partialCallType(n ir.Node) *types.Type { +func partialCallType(n *ir.CallPartExpr) *types.Type { t := tostruct([]*ir.Field{ namedfield("F", types.Types[types.TUINTPTR]), namedfield("R", n.Left().Type()), diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index ad2dc99f89082..a2c9edb481044 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -165,10 +165,10 @@ func variter(vl []ir.Node, t ir.Ntype, el []ir.Node) []ir.Node { if Curfn != nil { init = append(init, ir.Nod(ir.ODCL, v, nil)) } - e = ir.Nod(ir.OAS, v, e) - init = append(init, e) - if e.Right() != nil { - v.Defn = e + as := ir.Nod(ir.OAS, v, e) + init = append(init, as) + if e != nil { + v.Defn = as } } } @@ -799,7 +799,7 @@ func makefuncsym(s *types.Sym) { } // setNodeNameFunc marks a node as a function. -func setNodeNameFunc(n ir.Node) { +func setNodeNameFunc(n *ir.Name) { if n.Op() != ir.ONAME || n.Class() != ir.Pxxx { base.Fatalf("expected ONAME/Pxxx node, got %v", n) } @@ -861,12 +861,16 @@ func newNowritebarrierrecChecker() *nowritebarrierrecChecker { return c } -func (c *nowritebarrierrecChecker) findExtraCalls(n ir.Node) { - if n.Op() != ir.OCALLFUNC { +func (c *nowritebarrierrecChecker) findExtraCalls(nn ir.Node) { + if nn.Op() != ir.OCALLFUNC { return } - fn := n.Left() - if fn == nil || fn.Op() != ir.ONAME || fn.Class() != ir.PFUNC || fn.Name().Defn == nil { + n := nn.(*ir.CallExpr) + if n.Left() == nil || n.Left().Op() != ir.ONAME { + return + } + fn := n.Left().(*ir.Name) + if fn.Class() != ir.PFUNC || fn.Name().Defn == nil { return } if !isRuntimePkg(fn.Sym().Pkg) || fn.Sym().Name != "systemstack" { diff --git a/src/cmd/compile/internal/gc/embed.go b/src/cmd/compile/internal/gc/embed.go index 7664bde1c5878..b9c88c0d5b24f 100644 --- a/src/cmd/compile/internal/gc/embed.go +++ b/src/cmd/compile/internal/gc/embed.go @@ -110,7 +110,7 @@ func varEmbed(p *noder, names []ir.Node, typ ir.Ntype, exprs []ir.Node, embeds [ } } - v := names[0] + v := names[0].(*ir.Name) if dclcontext != ir.PEXTERN { numLocalEmbed++ v = ir.NewNameAt(v.Pos(), lookupN("embed.", numLocalEmbed)) diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index 593dd3b2f83c7..16d45a00aa040 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -74,7 +74,7 @@ func dumpexport(bout *bio.Writer) { } } -func importsym(ipkg *types.Pkg, s *types.Sym, op ir.Op) ir.Node { +func importsym(ipkg *types.Pkg, s *types.Sym, op ir.Op) *ir.Name { n := ir.AsNode(s.PkgDef()) if n == nil { // iimport should have created a stub ONONAME @@ -92,7 +92,7 @@ func importsym(ipkg *types.Pkg, s *types.Sym, op ir.Op) ir.Node { if n.Op() != ir.ONONAME && n.Op() != op { redeclare(base.Pos, s, fmt.Sprintf("during import %q", ipkg.Path)) } - return n + return n.(*ir.Name) } // importtype returns the named type declared by symbol s. @@ -102,7 +102,6 @@ func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *types.Type { n := importsym(ipkg, s, ir.OTYPE) if n.Op() != ir.OTYPE { t := types.NewNamed(n) - n.SetOp(ir.OTYPE) n.SetPos(pos) n.SetType(t) @@ -121,7 +120,7 @@ func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *types.Type { func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class, t *types.Type) ir.Node { n := importsym(ipkg, s, op) if n.Op() != ir.ONONAME { - if n.Op() == op && (n.Class() != ctxt || !types.Identical(n.Type(), t)) { + if n.Op() == op && (op == ir.ONAME && n.Class() != ctxt || !types.Identical(n.Type(), t)) { redeclare(base.Pos, s, fmt.Sprintf("during import %q", ipkg.Path)) } return nil diff --git a/src/cmd/compile/internal/gc/gen.go b/src/cmd/compile/internal/gc/gen.go index 39e94259786bd..25b241e23688a 100644 --- a/src/cmd/compile/internal/gc/gen.go +++ b/src/cmd/compile/internal/gc/gen.go @@ -31,13 +31,21 @@ func sysvar(name string) *obj.LSym { // isParamStackCopy reports whether this is the on-stack copy of a // function parameter that moved to the heap. func isParamStackCopy(n ir.Node) bool { - return n.Op() == ir.ONAME && (n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Name().Heapaddr != nil + if n.Op() != ir.ONAME { + return false + } + name := n.(*ir.Name) + return (name.Class() == ir.PPARAM || name.Class() == ir.PPARAMOUT) && name.Heapaddr != nil } // isParamHeapCopy reports whether this is the on-heap copy of // a function parameter that moved to the heap. func isParamHeapCopy(n ir.Node) bool { - return n.Op() == ir.ONAME && n.Class() == ir.PAUTOHEAP && n.Name().Stackcopy != nil + if n.Op() != ir.ONAME { + return false + } + name := n.(*ir.Name) + return name.Class() == ir.PAUTOHEAP && name.Name().Stackcopy != nil } // autotmpname returns the name for an autotmp variable numbered n. diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go index 2ef9d1ad3532e..8de4d84f2d6b0 100644 --- a/src/cmd/compile/internal/gc/init.go +++ b/src/cmd/compile/internal/gc/init.go @@ -48,10 +48,10 @@ func fninit(n []ir.Node) { if n.Op() == ir.ONONAME { continue } - if n.Op() != ir.ONAME || n.Class() != ir.PEXTERN { + if n.Op() != ir.ONAME || n.(*ir.Name).Class() != ir.PEXTERN { base.Fatalf("bad inittask: %v", n) } - deps = append(deps, n.Sym().Linksym()) + deps = append(deps, n.(*ir.Name).Sym().Linksym()) } // Make a function that contains all the initialization statements. @@ -86,10 +86,10 @@ func fninit(n []ir.Node) { // Record user init functions. for i := 0; i < renameinitgen; i++ { s := lookupN("init.", i) - fn := ir.AsNode(s.Def).Name().Defn + fn := ir.AsNode(s.Def).Name().Defn.(*ir.Func) // Skip init functions with empty bodies. if fn.Body().Len() == 1 { - if stmt := fn.Body().First(); stmt.Op() == ir.OBLOCK && stmt.List().Len() == 0 { + if stmt := fn.Body().First(); stmt.Op() == ir.OBLOCK && stmt.(*ir.BlockStmt).List().Len() == 0 { continue } } diff --git a/src/cmd/compile/internal/gc/initorder.go b/src/cmd/compile/internal/gc/initorder.go index 7870e00221aac..9a07ca71bd693 100644 --- a/src/cmd/compile/internal/gc/initorder.go +++ b/src/cmd/compile/internal/gc/initorder.go @@ -323,7 +323,7 @@ func (d *initDeps) foundDep(n *ir.Name) { } d.seen.Add(n) if d.transitive && n.Class() == ir.PFUNC { - d.inspectList(n.Defn.Body()) + d.inspectList(n.Defn.(*ir.Func).Body()) } } diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 77b11c5d5d01d..03e787f7180be 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -244,7 +244,7 @@ func Main(archInit func(*Arch)) { timings.Start("fe", "typecheck", "top1") for i := 0; i < len(xtop); i++ { n := xtop[i] - if op := n.Op(); op != ir.ODCL && op != ir.OAS && op != ir.OAS2 && (op != ir.ODCLTYPE || !n.Left().Name().Alias()) { + if op := n.Op(); op != ir.ODCL && op != ir.OAS && op != ir.OAS2 && (op != ir.ODCLTYPE || !n.(*ir.Decl).Left().Name().Alias()) { xtop[i] = typecheck(n, ctxStmt) } } @@ -256,7 +256,7 @@ func Main(archInit func(*Arch)) { timings.Start("fe", "typecheck", "top2") for i := 0; i < len(xtop); i++ { n := xtop[i] - if op := n.Op(); op == ir.ODCL || op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.Left().Name().Alias() { + if op := n.Op(); op == ir.ODCL || op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.(*ir.Decl).Left().Name().Alias() { xtop[i] = typecheck(n, ctxStmt) } } diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index cfff1baad69d0..615b8bdbf1edf 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -986,7 +986,7 @@ func typenamesym(t *types.Type) *types.Sym { return s } -func typename(t *types.Type) ir.Node { +func typename(t *types.Type) *ir.AddrExpr { s := typenamesym(t) if s.Def == nil { n := ir.NewNameAt(src.NoXPos, s) @@ -1002,7 +1002,7 @@ func typename(t *types.Type) ir.Node { return n } -func itabname(t, itype *types.Type) ir.Node { +func itabname(t, itype *types.Type) *ir.AddrExpr { if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() { base.Fatalf("itabname(%v, %v)", t, itype) } diff --git a/src/cmd/compile/internal/gc/scc.go b/src/cmd/compile/internal/gc/scc.go index fa7af1274b264..6e63d5287a277 100644 --- a/src/cmd/compile/internal/gc/scc.go +++ b/src/cmd/compile/internal/gc/scc.go @@ -101,9 +101,11 @@ func (v *bottomUpVisitor) visit(n *ir.Func) uint32 { } case ir.OCALLPART: fn := ir.AsNode(callpartMethod(n).Nname) - if fn != nil && fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC && fn.Name().Defn != nil { - if m := v.visit(fn.Name().Defn.(*ir.Func)); m < min { - min = m + if fn != nil && fn.Op() == ir.ONAME { + if fn := fn.(*ir.Name); fn.Class() == ir.PFUNC && fn.Name().Defn != nil { + if m := v.visit(fn.Name().Defn.(*ir.Func)); m < min { + min = m + } } } case ir.OCLOSURE: diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index e519c57273711..03998b99bee48 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -1234,7 +1234,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { left := dot.Left() // skip final .M // TODO(mdempsky): Remove dependency on dotlist. if !dotlist[0].field.Type.IsPtr() { - left = ir.Nod(ir.OADDR, left, nil) + left = nodAddr(left) } as := ir.Nod(ir.OAS, nthis, convnop(left, rcvr)) fn.PtrBody().Append(as) diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 70f05236c0f6a..2f3c876c77554 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -2791,7 +2791,7 @@ func pushtype(nn ir.Node, t *types.Type) ir.Node { // For *T, return &T{...}. n.SetRight(ir.TypeNode(t.Elem())) - addr := ir.NodAt(n.Pos(), ir.OADDR, n, nil) + addr := nodAddrAt(n.Pos(), n) addr.SetImplicit(true) return addr } diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index 66ca0d01b347e..21ddc78089de7 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -152,23 +152,27 @@ func initUniverse() { for _, s := range &builtinFuncs { s2 := types.BuiltinPkg.Lookup(s.name) - s2.Def = NewName(s2) - ir.AsNode(s2.Def).SetSubOp(s.op) + def := NewName(s2) + def.SetSubOp(s.op) + s2.Def = def } for _, s := range &unsafeFuncs { s2 := unsafepkg.Lookup(s.name) - s2.Def = NewName(s2) - ir.AsNode(s2.Def).SetSubOp(s.op) + def := NewName(s2) + def.SetSubOp(s.op) + s2.Def = def } s = types.BuiltinPkg.Lookup("true") - s.Def = nodbool(true) - ir.AsNode(s.Def).SetSym(lookup("true")) + b := nodbool(true) + b.(*ir.Name).SetSym(lookup("true")) + s.Def = b s = types.BuiltinPkg.Lookup("false") - s.Def = nodbool(false) - ir.AsNode(s.Def).SetSym(lookup("false")) + b = nodbool(false) + b.(*ir.Name).SetSym(lookup("false")) + s.Def = b s = lookup("_") types.BlankSym = s @@ -187,8 +191,9 @@ func initUniverse() { types.Types[types.TNIL] = types.New(types.TNIL) s = types.BuiltinPkg.Lookup("nil") - s.Def = nodnil() - ir.AsNode(s.Def).SetSym(s) + nnil := nodnil() + nnil.(*ir.NilExpr).SetSym(s) + s.Def = nnil s = types.BuiltinPkg.Lookup("iota") s.Def = ir.NewIota(base.Pos, s) diff --git a/src/cmd/compile/internal/gc/unsafe.go b/src/cmd/compile/internal/gc/unsafe.go index d7ae5d7aaa696..02dd30297554d 100644 --- a/src/cmd/compile/internal/gc/unsafe.go +++ b/src/cmd/compile/internal/gc/unsafe.go @@ -31,18 +31,20 @@ func evalunsafe(n ir.Node) int64 { base.Errorf("invalid expression %v", n) return 0 } + sel := n.Left().(*ir.SelectorExpr) // Remember base of selector to find it back after dot insertion. // Since r->left may be mutated by typechecking, check it explicitly // first to track it correctly. - n.Left().SetLeft(typecheck(n.Left().Left(), ctxExpr)) - sbase := n.Left().Left() + sel.SetLeft(typecheck(sel.Left(), ctxExpr)) + sbase := sel.Left() - n.SetLeft(typecheck(n.Left(), ctxExpr)) - if n.Left().Type() == nil { + tsel := typecheck(sel, ctxExpr) + n.SetLeft(tsel) + if tsel.Type() == nil { return 0 } - switch n.Left().Op() { + switch tsel.Op() { case ir.ODOT, ir.ODOTPTR: break case ir.OCALLPART: @@ -55,7 +57,8 @@ func evalunsafe(n ir.Node) int64 { // Sum offsets for dots until we reach sbase. var v int64 - for r := n.Left(); r != sbase; r = r.Left() { + var next ir.Node + for r := tsel; r != sbase; r = next { switch r.Op() { case ir.ODOTPTR: // For Offsetof(s.f), s may itself be a pointer, @@ -68,8 +71,9 @@ func evalunsafe(n ir.Node) int64 { fallthrough case ir.ODOT: v += r.Offset() + next = r.Left() default: - ir.Dump("unsafenmagic", n.Left()) + ir.Dump("unsafenmagic", tsel) base.Fatalf("impossible %v node after dot insertion", r.Op()) } } From 88e1415d0896824e275fd39bd80cca47275358cc Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 14 Dec 2020 13:20:06 -0500 Subject: [PATCH 183/474] [dev.regabi] cmd/compile: add type assertion in regabi test Change-Id: I7da5165f3679736040be5bfbcea3d4a85deaff2e Reviewed-on: https://go-review.googlesource.com/c/go/+/277957 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/abiutilsaux_test.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/cmd/compile/internal/gc/abiutilsaux_test.go b/src/cmd/compile/internal/gc/abiutilsaux_test.go index d90d1d45a0574..5489a512d260c 100644 --- a/src/cmd/compile/internal/gc/abiutilsaux_test.go +++ b/src/cmd/compile/internal/gc/abiutilsaux_test.go @@ -75,10 +75,7 @@ func tokenize(src string) []string { } func verifyParamResultOffset(t *testing.T, f *types.Field, r ABIParamAssignment, which string, idx int) int { - n := ir.AsNode(f.Nname) - if n == nil { - panic("not expected") - } + n := ir.AsNode(f.Nname).(*ir.Name) if n.Offset() != int64(r.Offset) { t.Errorf("%s %d: got offset %d wanted %d t=%v", which, idx, r.Offset, n.Offset(), f.Type) From 0328c3b660bda2c4e72d0bc0f7b8058b780c9e19 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Mon, 7 Dec 2020 03:24:04 +0700 Subject: [PATCH 184/474] [dev.regabi] cmd/compile: use OSELRECV2 for all <-c variants OSELRECV2 can represent all possible receive clauses that can appear in a select statement, and it simplifies later code, so use it instead. Follow up CL will remove OSELRECV. Passes buildall w/ toolstash -cmp. Change-Id: Ibbdae45287ffd888acd8dc89ca8d99e454277cd1 Reviewed-on: https://go-review.googlesource.com/c/go/+/275458 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Russ Cox --- src/cmd/compile/internal/gc/escape.go | 3 - src/cmd/compile/internal/gc/order.go | 115 +++++++++----------------- src/cmd/compile/internal/gc/select.go | 78 ++++++----------- 3 files changed, 62 insertions(+), 134 deletions(-) diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index d009a55a96b0d..5124af945efea 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -399,9 +399,6 @@ func (e *Escape) stmt(n ir.Node) { e.stmt(cas.Left()) e.block(cas.Body()) } - case ir.OSELRECV: - n := n.(*ir.AssignStmt) - e.assign(n.Left(), n.Right(), "selrecv", n) case ir.OSELRECV2: n := n.(*ir.AssignListStmt) e.assign(n.List().First(), n.Rlist().First(), "selrecv", n) diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index b0a9c9be3e133..0034556995b3b 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -872,15 +872,14 @@ func (o *Order) stmt(n ir.Node) { // give this away). case ir.OSELECT: t := o.markTemp() - - for _, cas := range n.List().Slice() { - cas := cas.(*ir.CaseStmt) - r := cas.Left() - setlineno(cas) + for _, ncas := range n.List().Slice() { + ncas := ncas.(*ir.CaseStmt) + r := ncas.Left() + setlineno(ncas) // Append any new body prologue to ninit. // The next loop will insert ninit into nbody. - if cas.Init().Len() != 0 { + if ncas.Init().Len() != 0 { base.Fatalf("order select ninit") } if r == nil { @@ -891,84 +890,48 @@ func (o *Order) stmt(n ir.Node) { ir.Dump("select case", r) base.Fatalf("unknown op in select %v", r.Op()) - case ir.OSELRECV, ir.OSELRECV2: - var dst, ok ir.Node - var recv *ir.UnaryExpr - var def bool - if r.Op() == ir.OSELRECV { - // case x = <-c - // case <-c (dst is ir.BlankNode) - def, dst, ok, recv = r.Colas(), r.Left(), ir.BlankNode, r.Right().(*ir.UnaryExpr) - } else { - r := r.(*ir.AssignListStmt) - // case x, ok = <-c - def, dst, ok, recv = r.Colas(), r.List().First(), r.List().Second(), r.Rlist().First().(*ir.UnaryExpr) - } - - // If this is case x := <-ch or case x, y := <-ch, the case has - // the ODCL nodes to declare x and y. We want to delay that - // declaration (and possible allocation) until inside the case body. - // Delete the ODCL nodes here and recreate them inside the body below. - if def { - init := r.Init().Slice() - if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].(*ir.Decl).Left() == dst { - init = init[1:] - } - if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].(*ir.Decl).Left() == ok { - init = init[1:] - } - r.PtrInit().Set(init) - } - if r.Init().Len() != 0 { - ir.DumpList("ninit", r.Init()) - base.Fatalf("ninit on select recv") - } - + case ir.OSELRECV2: + // case x, ok = <-c + recv := r.Rlist().First().(*ir.UnaryExpr) recv.SetLeft(o.expr(recv.Left(), nil)) if recv.Left().Op() != ir.ONAME { recv.SetLeft(o.copyExpr(recv.Left())) } - - // Introduce temporary for receive and move actual copy into case body. - // avoids problems with target being addressed, as usual. - // NOTE: If we wanted to be clever, we could arrange for just one - // temporary per distinct type, sharing the temp among all receives - // with that temp. Similarly one ok bool could be shared among all - // the x,ok receives. Not worth doing until there's a clear need. - if !ir.IsBlank(dst) { - // use channel element type for temporary to avoid conversions, - // such as in case interfacevalue = <-intchan. - // the conversion happens in the OAS instead. - if def { - dcl := ir.Nod(ir.ODCL, dst, nil) - cas.PtrInit().Append(typecheck(dcl, ctxStmt)) + r := r.(*ir.AssignListStmt) + init := r.PtrInit().Slice() + r.PtrInit().Set(nil) + + colas := r.Colas() + do := func(i int, t *types.Type) { + n := r.List().Index(i) + if ir.IsBlank(n) { + return } - - tmp := o.newTemp(recv.Left().Type().Elem(), recv.Left().Type().Elem().HasPointers()) - as := ir.Nod(ir.OAS, dst, tmp) - cas.PtrInit().Append(typecheck(as, ctxStmt)) - dst = tmp - } - if !ir.IsBlank(ok) { - if def { - dcl := ir.Nod(ir.ODCL, ok, nil) - cas.PtrInit().Append(typecheck(dcl, ctxStmt)) + // If this is case x := <-ch or case x, y := <-ch, the case has + // the ODCL nodes to declare x and y. We want to delay that + // declaration (and possible allocation) until inside the case body. + // Delete the ODCL nodes here and recreate them inside the body below. + if colas { + if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].(*ir.Decl).Left() == n { + init = init[1:] + } + dcl := ir.Nod(ir.ODCL, n, nil) + dcl = typecheck(dcl, ctxStmt) + ncas.PtrInit().Append(dcl) } - - tmp := o.newTemp(types.Types[types.TBOOL], false) - as := ir.Nod(ir.OAS, ok, conv(tmp, ok.Type())) - cas.PtrInit().Append(typecheck(as, ctxStmt)) - ok = tmp + tmp := o.newTemp(t, t.HasPointers()) + as := ir.Nod(ir.OAS, n, conv(tmp, n.Type())) + as = typecheck(as, ctxStmt) + ncas.PtrInit().Append(as) + r.PtrList().SetIndex(i, tmp) } - - if r.Op() == ir.OSELRECV { - r.SetLeft(dst) - } else { - r := r.(*ir.AssignListStmt) - r.List().SetIndex(0, dst) - r.List().SetIndex(1, ok) + do(0, recv.Left().Type().Elem()) + do(1, types.Types[types.TBOOL]) + if len(init) != 0 { + ir.DumpList("ninit", r.Init()) + base.Fatalf("ninit on select recv") } - orderBlock(cas.PtrInit(), o.free) + orderBlock(ncas.PtrInit(), o.free) case ir.OSEND: if r.Init().Len() != 0 { diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go index a3ce14128cb80..c017b8e29aa31 100644 --- a/src/cmd/compile/internal/gc/select.go +++ b/src/cmd/compile/internal/gc/select.go @@ -32,6 +32,14 @@ func typecheckselect(sel *ir.SelectStmt) { n := ncase.List().First() ncase.SetLeft(n) ncase.PtrList().Set(nil) + oselrecv2 := func(dst, recv ir.Node, colas bool) { + n := ir.NodAt(n.Pos(), ir.OSELRECV2, nil, nil) + n.PtrList().Set2(dst, ir.BlankNode) + n.PtrRlist().Set1(recv) + n.SetColas(colas) + n.SetTypecheck(1) + ncase.SetLeft(n) + } switch n.Op() { default: pos := n.Pos() @@ -45,7 +53,7 @@ func typecheckselect(sel *ir.SelectStmt) { base.ErrorfAt(pos, "select case must be receive, send or assign recv") case ir.OAS: - // convert x = <-c into OSELRECV(x, <-c). + // convert x = <-c into x, _ = <-c // remove implicit conversions; the eventual assignment // will reintroduce them. if r := n.Right(); r.Op() == ir.OCONVNOP || r.Op() == ir.OCONVIFACE { @@ -57,10 +65,9 @@ func typecheckselect(sel *ir.SelectStmt) { base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side") break } - n.SetOp(ir.OSELRECV) + oselrecv2(n.Left(), n.Right(), n.Colas()) case ir.OAS2RECV: - // convert x, ok = <-c into OSELRECV2(x, <-c) with ntest=ok if n.Rlist().First().Op() != ir.ORECV { base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side") break @@ -68,12 +75,8 @@ func typecheckselect(sel *ir.SelectStmt) { n.SetOp(ir.OSELRECV2) case ir.ORECV: - // convert <-c into OSELRECV(_, <-c) - as := ir.NewAssignStmt(n.Pos(), ir.BlankNode, n) - as.SetOp(ir.OSELRECV) - as.SetTypecheck(1) - n = as - ncase.SetLeft(n) + // convert <-c into _, _ = <-c + oselrecv2(ir.BlankNode, n, false) case ir.OSEND: break @@ -129,14 +132,6 @@ func walkselectcases(cases ir.Nodes) []ir.Node { case ir.OSEND: // already ok - case ir.OSELRECV: - r := n.(*ir.AssignStmt) - if ir.IsBlank(r.Left()) { - n = r.Right() - break - } - r.SetOp(ir.OAS) - case ir.OSELRECV2: r := n.(*ir.AssignListStmt) if ir.IsBlank(r.List().First()) && ir.IsBlank(r.List().Second()) { @@ -165,29 +160,11 @@ func walkselectcases(cases ir.Nodes) []ir.Node { dflt = cas continue } - - // Lower x, _ = <-c to x = <-c. - if sel := n; sel.Op() == ir.OSELRECV2 { - if ir.IsBlank(sel.List().Second()) { - as := ir.NewAssignStmt(sel.Pos(), sel.List().First(), sel.Rlist().First()) - as.SetOp(ir.OSELRECV) - as.SetTypecheck(1) - n = as - cas.SetLeft(n) - } - } - switch n.Op() { case ir.OSEND: n.SetRight(nodAddr(n.Right())) n.SetRight(typecheck(n.Right(), ctxExpr)) - case ir.OSELRECV: - if !ir.IsBlank(n.Left()) { - n.SetLeft(nodAddr(n.Left())) - n.SetLeft(typecheck(n.Left(), ctxExpr)) - } - case ir.OSELRECV2: if !ir.IsBlank(n.List().First()) { n.List().SetIndex(0, nodAddr(n.List().First())) @@ -217,26 +194,23 @@ func walkselectcases(cases ir.Nodes) []ir.Node { ch := n.Left() call = mkcall1(chanfn("selectnbsend", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), ch, n.Right()) - case ir.OSELRECV: - // if selectnbrecv(&v, c) { body } else { default body } - recv := n.Right().(*ir.UnaryExpr) - ch := recv.Left() - elem := n.Left() - if ir.IsBlank(elem) { - elem = nodnil() - } - call = mkcall1(chanfn("selectnbrecv", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, ch) - case ir.OSELRECV2: - // if selectnbrecv2(&v, &received, c) { body } else { default body } recv := n.Rlist().First().(*ir.UnaryExpr) ch := recv.Left() elem := n.List().First() if ir.IsBlank(elem) { elem = nodnil() } - receivedp := typecheck(nodAddr(n.List().Second()), ctxExpr) - call = mkcall1(chanfn("selectnbrecv2", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, receivedp, ch) + if ir.IsBlank(n.List().Second()) { + // if selectnbrecv(&v, c) { body } else { default body } + call = mkcall1(chanfn("selectnbrecv", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, ch) + } else { + // TODO(cuonglm): make this use selectnbrecv() + // if selectnbrecv2(&v, &received, c) { body } else { default body } + receivedp := ir.Nod(ir.OADDR, n.List().Second(), nil) + receivedp = typecheck(receivedp, ctxExpr) + call = mkcall1(chanfn("selectnbrecv2", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, receivedp, ch) + } } r.SetLeft(typecheck(call, ctxExpr)) @@ -292,12 +266,6 @@ func walkselectcases(cases ir.Nodes) []ir.Node { nsends++ c = n.Left() elem = n.Right() - case ir.OSELRECV: - nrecvs++ - i = ncas - nrecvs - recv := n.Right().(*ir.UnaryExpr) - c = recv.Left() - elem = n.Left() case ir.OSELRECV2: nrecvs++ i = ncas - nrecvs @@ -355,7 +323,7 @@ func walkselectcases(cases ir.Nodes) []ir.Node { r := ir.Nod(ir.OIF, cond, nil) - if n := cas.Left(); n != nil && n.Op() == ir.OSELRECV2 { + if n := cas.Left(); n != nil && n.Op() == ir.OSELRECV2 && !ir.IsBlank(n.List().Second()) { x := ir.Nod(ir.OAS, n.List().Second(), recvOK) r.PtrBody().Append(typecheck(x, ctxStmt)) } From aeedc9f804e929a8a1c4340f3306b5ef6df8d850 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Mon, 7 Dec 2020 10:51:44 +0700 Subject: [PATCH 185/474] [dev.regabi] cmd/compile: remove OSELRECV Previous CL uses OSELRECV2 instead of OSELRECV, this CL removes it. Make this a separated CL as it's not safe for toolstash. Change-Id: I530ba33fd9311904545e40fe147829af629cf4a8 Reviewed-on: https://go-review.googlesource.com/c/go/+/275459 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Russ Cox --- src/cmd/compile/internal/ir/node.go | 9 +- src/cmd/compile/internal/ir/op_string.go | 105 +++++++++++------------ src/cmd/compile/internal/ir/stmt.go | 2 +- 3 files changed, 55 insertions(+), 61 deletions(-) diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 0e73731070f72..fe6dafe8592c9 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -274,7 +274,6 @@ const ( ORECOVER // recover() ORECV // <-Left ORUNESTR // Type(Left) (Type is string, Left is rune) - OSELRECV // like OAS: Left = Right where Right.Op = ORECV (appears as .Left of OCASE) OSELRECV2 // like OAS2: List = Rlist where len(List)=2, len(Rlist)=1, Rlist[0].Op = ORECV (appears as .Left of OCASE) OIOTA // iota OREAL // real(Left) @@ -666,12 +665,8 @@ func NodAt(pos src.XPos, op Op, nleft, nright Node) Node { typ = nright.(Ntype) } return NewCompLitExpr(pos, op, typ, nil) - case OAS, OSELRECV: - n := NewAssignStmt(pos, nleft, nright) - if op != OAS { - n.SetOp(op) - } - return n + case OAS: + return NewAssignStmt(pos, nleft, nright) case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV, OSELRECV2: n := NewAssignListStmt(pos, op, nil, nil) return n diff --git a/src/cmd/compile/internal/ir/op_string.go b/src/cmd/compile/internal/ir/op_string.go index bb5e16fbbc9e0..33b177d64fb83 100644 --- a/src/cmd/compile/internal/ir/op_string.go +++ b/src/cmd/compile/internal/ir/op_string.go @@ -111,62 +111,61 @@ func _() { _ = x[ORECOVER-100] _ = x[ORECV-101] _ = x[ORUNESTR-102] - _ = x[OSELRECV-103] - _ = x[OSELRECV2-104] - _ = x[OIOTA-105] - _ = x[OREAL-106] - _ = x[OIMAG-107] - _ = x[OCOMPLEX-108] - _ = x[OALIGNOF-109] - _ = x[OOFFSETOF-110] - _ = x[OSIZEOF-111] - _ = x[OMETHEXPR-112] - _ = x[OSTMTEXPR-113] - _ = x[OBLOCK-114] - _ = x[OBREAK-115] - _ = x[OCASE-116] - _ = x[OCONTINUE-117] - _ = x[ODEFER-118] - _ = x[OFALL-119] - _ = x[OFOR-120] - _ = x[OFORUNTIL-121] - _ = x[OGOTO-122] - _ = x[OIF-123] - _ = x[OLABEL-124] - _ = x[OGO-125] - _ = x[ORANGE-126] - _ = x[ORETURN-127] - _ = x[OSELECT-128] - _ = x[OSWITCH-129] - _ = x[OTYPESW-130] - _ = x[OTCHAN-131] - _ = x[OTMAP-132] - _ = x[OTSTRUCT-133] - _ = x[OTINTER-134] - _ = x[OTFUNC-135] - _ = x[OTARRAY-136] - _ = x[OTSLICE-137] - _ = x[OINLCALL-138] - _ = x[OEFACE-139] - _ = x[OITAB-140] - _ = x[OIDATA-141] - _ = x[OSPTR-142] - _ = x[OCLOSUREREAD-143] - _ = x[OCFUNC-144] - _ = x[OCHECKNIL-145] - _ = x[OVARDEF-146] - _ = x[OVARKILL-147] - _ = x[OVARLIVE-148] - _ = x[ORESULT-149] - _ = x[OINLMARK-150] - _ = x[ORETJMP-151] - _ = x[OGETG-152] - _ = x[OEND-153] + _ = x[OSELRECV2-103] + _ = x[OIOTA-104] + _ = x[OREAL-105] + _ = x[OIMAG-106] + _ = x[OCOMPLEX-107] + _ = x[OALIGNOF-108] + _ = x[OOFFSETOF-109] + _ = x[OSIZEOF-110] + _ = x[OMETHEXPR-111] + _ = x[OSTMTEXPR-112] + _ = x[OBLOCK-113] + _ = x[OBREAK-114] + _ = x[OCASE-115] + _ = x[OCONTINUE-116] + _ = x[ODEFER-117] + _ = x[OFALL-118] + _ = x[OFOR-119] + _ = x[OFORUNTIL-120] + _ = x[OGOTO-121] + _ = x[OIF-122] + _ = x[OLABEL-123] + _ = x[OGO-124] + _ = x[ORANGE-125] + _ = x[ORETURN-126] + _ = x[OSELECT-127] + _ = x[OSWITCH-128] + _ = x[OTYPESW-129] + _ = x[OTCHAN-130] + _ = x[OTMAP-131] + _ = x[OTSTRUCT-132] + _ = x[OTINTER-133] + _ = x[OTFUNC-134] + _ = x[OTARRAY-135] + _ = x[OTSLICE-136] + _ = x[OINLCALL-137] + _ = x[OEFACE-138] + _ = x[OITAB-139] + _ = x[OIDATA-140] + _ = x[OSPTR-141] + _ = x[OCLOSUREREAD-142] + _ = x[OCFUNC-143] + _ = x[OCHECKNIL-144] + _ = x[OVARDEF-145] + _ = x[OVARKILL-146] + _ = x[OVARLIVE-147] + _ = x[ORESULT-148] + _ = x[OINLMARK-149] + _ = x[ORETJMP-150] + _ = x[OGETG-151] + _ = x[OEND-152] } -const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRSTMTEXPRBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCLOSUREREADCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKRETJMPGETGEND" +const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRSTMTEXPRBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCLOSUREREADCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKRETJMPGETGEND" -var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 309, 315, 318, 324, 331, 339, 343, 350, 358, 360, 362, 364, 366, 368, 370, 375, 380, 388, 391, 400, 403, 407, 415, 422, 431, 444, 447, 450, 453, 456, 459, 462, 468, 471, 477, 480, 486, 490, 493, 497, 502, 507, 513, 518, 522, 527, 535, 543, 549, 558, 569, 576, 580, 587, 594, 602, 606, 610, 614, 621, 628, 636, 642, 650, 658, 663, 668, 672, 680, 685, 689, 692, 700, 704, 706, 711, 713, 718, 724, 730, 736, 742, 747, 751, 758, 764, 769, 775, 781, 788, 793, 797, 802, 806, 817, 822, 830, 836, 843, 850, 856, 863, 869, 873, 876} +var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 309, 315, 318, 324, 331, 339, 343, 350, 358, 360, 362, 364, 366, 368, 370, 375, 380, 388, 391, 400, 403, 407, 415, 422, 431, 444, 447, 450, 453, 456, 459, 462, 468, 471, 477, 480, 486, 490, 493, 497, 502, 507, 513, 518, 522, 527, 535, 543, 549, 558, 569, 576, 580, 587, 595, 599, 603, 607, 614, 621, 629, 635, 643, 651, 656, 661, 665, 673, 678, 682, 685, 693, 697, 699, 704, 706, 711, 717, 723, 729, 735, 740, 744, 751, 757, 762, 768, 774, 781, 786, 790, 795, 799, 810, 815, 823, 829, 836, 843, 849, 856, 862, 866, 869} func (i Op) String() string { if i >= Op(len(_Op_index)-1) { diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index 0302ffcc949ec..4dd1733074868 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -122,7 +122,7 @@ func (n *AssignStmt) SetOp(op Op) { switch op { default: panic(n.no("SetOp " + op.String())) - case OAS, OSELRECV: + case OAS: n.op = op } } From 0b9cb63b8df352e2cb34b32452d9645ae621f9a1 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 16 Dec 2020 10:53:20 -0500 Subject: [PATCH 186/474] [dev.regabi] cmd/compile: rename ir.Find to ir.Any and update uses ir.Find is called "any" in C#, Dart, Haskell, Python, R, Ruby, and Rust, and "any_of" in C++, "anyMatch" in Java, "some" in JavaScript, "exists in OCaml, and "existsb" in Coq. (Thanks to Matthew Dempsky for the research.) This CL changes Find to Any to use the mostly standard name. It also updates wrapper helpers to use the any terminology: hasCall -> anyCall hasCallOrChan -> anyCallOrChan hasSideEffects -> anySideEffects Unchanged are "hasNamedResults", "hasUniquePos", and "hasDefaultCase", which are all about a single node, not any node in the IR tree. I also renamed hasFall to endsInFallthrough, since its semantics are neither that of "any" nor that of the remaining "has" functions. So the new terminology helps separate different kinds of predicates nicely. Change-Id: I9bb3c9ebf060a30447224be09a5c34ad5244ea0d Reviewed-on: https://go-review.googlesource.com/c/go/+/278912 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/alg.go | 6 ++--- src/cmd/compile/internal/gc/const.go | 8 +++--- src/cmd/compile/internal/gc/inl.go | 4 +-- src/cmd/compile/internal/gc/sinit.go | 4 +-- src/cmd/compile/internal/gc/swt.go | 6 ++--- src/cmd/compile/internal/gc/walk.go | 6 ++--- src/cmd/compile/internal/ir/visit.go | 40 ++++++++++++++-------------- 7 files changed, 37 insertions(+), 37 deletions(-) diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index 3938dce46cebc..3ada2581f711b 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -741,7 +741,7 @@ func geneq(t *types.Type) *obj.LSym { // return (or goto ret) fn.PtrBody().Append(nodSym(ir.OLABEL, nil, neq)) fn.PtrBody().Append(ir.Nod(ir.OAS, nr, nodbool(false))) - if EqCanPanic(t) || hasCall(fn) { + if EqCanPanic(t) || anyCall(fn) { // Epilogue is large, so share it with the equal case. fn.PtrBody().Append(nodSym(ir.OGOTO, nil, ret)) } else { @@ -782,8 +782,8 @@ func geneq(t *types.Type) *obj.LSym { return closure } -func hasCall(fn *ir.Func) bool { - return ir.Find(fn, func(n ir.Node) bool { +func anyCall(fn *ir.Func) bool { + return ir.Any(fn, func(n ir.Node) bool { // TODO(rsc): No methods? op := n.Op() return op == ir.OCALL || op == ir.OCALLFUNC diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index 358eefd9bba21..f8e60ea0a3250 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -574,7 +574,7 @@ func evalConst(n ir.Node) ir.Node { return origIntConst(n, int64(len(ir.StringVal(nl)))) } case types.TARRAY: - if !hasCallOrChan(nl) { + if !anyCallOrChan(nl) { return origIntConst(n, nl.Type().NumElem()) } } @@ -803,9 +803,9 @@ func isGoConst(n ir.Node) bool { return n.Op() == ir.OLITERAL } -// hasCallOrChan reports whether n contains any calls or channel operations. -func hasCallOrChan(n ir.Node) bool { - return ir.Find(n, func(n ir.Node) bool { +// anyCallOrChan reports whether n contains any calls or channel operations. +func anyCallOrChan(n ir.Node) bool { + return ir.Any(n, func(n ir.Node) bool { switch n.Op() { case ir.OAPPEND, ir.OCALL, diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index e940e416fd3eb..8467c20833589 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -465,7 +465,7 @@ func (v *hairyVisitor) doNode(n ir.Node) error { func isBigFunc(fn *ir.Func) bool { budget := inlineBigFunctionNodes - return ir.Find(fn, func(n ir.Node) bool { + return ir.Any(fn, func(n ir.Node) bool { budget-- return budget <= 0 }) @@ -733,7 +733,7 @@ func reassigned(name *ir.Name) bool { if name.Curfn == nil { return true } - return ir.Find(name.Curfn, func(n ir.Node) bool { + return ir.Any(name.Curfn, func(n ir.Node) bool { switch n.Op() { case ir.OAS: if n.Left() == name && n != name.Defn { diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 14ff853ee5e62..6d7a8bc5c9118 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -60,7 +60,7 @@ func (s *InitSchedule) tryStaticInit(n ir.Node) bool { if n.Op() != ir.OAS { return false } - if ir.IsBlank(n.Left()) && !hasSideEffects(n.Right()) { + if ir.IsBlank(n.Left()) && !anySideEffects(n.Right()) { // Discard. return true } @@ -546,7 +546,7 @@ func fixedlit(ctxt initContext, kind initKind, n ir.Node, var_ ir.Node, init *ir for _, r := range n.List().Slice() { a, value := splitnode(r) - if a == ir.BlankNode && !hasSideEffects(value) { + if a == ir.BlankNode && !anySideEffects(value) { // Discard. continue } diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go index fd76a0a60a9d5..882feb47cc61f 100644 --- a/src/cmd/compile/internal/gc/swt.go +++ b/src/cmd/compile/internal/gc/swt.go @@ -302,7 +302,7 @@ func walkExprSwitch(sw *ir.SwitchStmt) { // Process body. body.Append(npos(ncase.Pos(), nodSym(ir.OLABEL, nil, label))) body.Append(ncase.Body().Slice()...) - if fall, pos := hasFall(ncase.Body().Slice()); !fall { + if fall, pos := endsInFallthrough(ncase.Body().Slice()); !fall { br := ir.Nod(ir.OBREAK, nil, nil) br.SetPos(pos) body.Append(br) @@ -481,8 +481,8 @@ func allCaseExprsAreSideEffectFree(sw *ir.SwitchStmt) bool { return true } -// hasFall reports whether stmts ends with a "fallthrough" statement. -func hasFall(stmts []ir.Node) (bool, src.XPos) { +// endsInFallthrough reports whether stmts ends with a "fallthrough" statement. +func endsInFallthrough(stmts []ir.Node) (bool, src.XPos) { // Search backwards for the index of the fallthrough // statement. Do not assume it'll be in the last // position, since in some cases (e.g. when the statement diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index f2d93df988314..420edd5694968 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -3765,9 +3765,9 @@ func usefield(n ir.Node) { Curfn.FieldTrack[sym] = struct{}{} } -// hasSideEffects reports whether n contains any operations that could have observable side effects. -func hasSideEffects(n ir.Node) bool { - return ir.Find(n, func(n ir.Node) bool { +// anySideEffects reports whether n contains any operations that could have observable side effects. +func anySideEffects(n ir.Node) bool { + return ir.Any(n, func(n ir.Node) bool { switch n.Op() { // Assume side effects unless we know otherwise. default: diff --git a/src/cmd/compile/internal/ir/visit.go b/src/cmd/compile/internal/ir/visit.go index bc2b8083ba298..3f5af4ea0ecb8 100644 --- a/src/cmd/compile/internal/ir/visit.go +++ b/src/cmd/compile/internal/ir/visit.go @@ -71,16 +71,16 @@ import ( // } // } // -// The Find function illustrates a different simplification of the pattern, +// The Any function illustrates a different simplification of the pattern, // visiting each node and then its children, recursively, until finding -// a node x for which find(x) returns true, at which point the entire +// a node x for which cond(x) returns true, at which point the entire // traversal stops and returns true. // -// func Find(n ir.Node, find func(ir.Node)) bool { +// func Any(n ir.Node, find cond(ir.Node)) bool { // stop := errors.New("stop") // var do func(ir.Node) error // do = func(x ir.Node) error { -// if find(x) { +// if cond(x) { // return stop // } // return ir.DoChildren(x, do) @@ -88,9 +88,9 @@ import ( // return do(n) == stop // } // -// Visit and Find are presented above as examples of how to use +// Visit and Any are presented above as examples of how to use // DoChildren effectively, but of course, usage that fits within the -// simplifications captured by Visit or Find will be best served +// simplifications captured by Visit or Any will be best served // by directly calling the ones provided by this package. func DoChildren(n Node, do func(Node) error) error { if n == nil { @@ -138,19 +138,19 @@ func VisitList(list Nodes, visit func(Node)) { var stop = errors.New("stop") -// Find looks for a non-nil node x in the IR tree rooted at n -// for which find(x) returns true. -// Find considers nodes in a depth-first, preorder traversal. -// When Find finds a node x such that find(x) is true, -// Find ends the traversal and returns true immediately. -// Otherwise Find returns false after completing the entire traversal. -func Find(n Node, find func(Node) bool) bool { +// Any looks for a non-nil node x in the IR tree rooted at n +// for which cond(x) returns true. +// Any considers nodes in a depth-first, preorder traversal. +// When Any finds a node x such that cond(x) is true, +// Any ends the traversal and returns true immediately. +// Otherwise Any returns false after completing the entire traversal. +func Any(n Node, cond func(Node) bool) bool { if n == nil { return false } var do func(Node) error do = func(x Node) error { - if find(x) { + if cond(x) { return stop } return DoChildren(x, do) @@ -158,13 +158,13 @@ func Find(n Node, find func(Node) bool) bool { return do(n) == stop } -// FindList calls Find(x, find) for each node x in the list, in order. -// If any call Find(x, find) returns true, FindList stops and -// returns that result, skipping the remainder of the list. -// Otherwise FindList returns false. -func FindList(list Nodes, find func(Node) bool) bool { +// AnyList calls Any(x, cond) for each node x in the list, in order. +// If any call returns true, AnyList stops and returns true. +// Otherwise, AnyList returns false after calling Any(x, cond) +// for every x in the list. +func AnyList(list Nodes, cond func(Node) bool) bool { for _, x := range list.Slice() { - if Find(x, find) { + if Any(x, cond) { return true } } From 27aba226518fd126f6dd3413298c919a1eeb9040 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 10 Dec 2020 18:45:14 -0500 Subject: [PATCH 187/474] [dev.regabi] cmd/compile: cleanup for concrete types - walk An automated rewrite will add concrete type assertions after a test of n.Op(), when n can be safely type-asserted (meaning, n is not reassigned a different type, n is not reassigned and then used outside the scope of the type assertion, and so on). This sequence of CLs handles the code that the automated rewrite does not: adding specific types to function arguments, adjusting code not to call n.Left() etc when n may have multiple representations, and so on. This CL focuses on walk.go. Passes buildall w/ toolstash -cmp. Change-Id: I7aab57e4077cf10da1994625575c5e42ad114a9c Reviewed-on: https://go-review.googlesource.com/c/go/+/277921 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/alg.go | 28 +- src/cmd/compile/internal/gc/go.go | 4 +- src/cmd/compile/internal/gc/sinit.go | 2 +- src/cmd/compile/internal/gc/walk.go | 645 ++++++++++++++------------- 4 files changed, 350 insertions(+), 329 deletions(-) diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index 3ada2581f711b..25dadffc24ed1 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -805,7 +805,7 @@ func eqfield(p ir.Node, q ir.Node, field *types.Sym) ir.Node { // memequal(s.ptr, t.ptr, len(s)) // which can be used to construct string equality comparison. // eqlen must be evaluated before eqmem, and shortcircuiting is required. -func eqstring(s, t ir.Node) (eqlen, eqmem ir.Node) { +func eqstring(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) { s = conv(s, types.Types[types.TSTRING]) t = conv(t, types.Types[types.TSTRING]) sptr := ir.Nod(ir.OSPTR, s, nil) @@ -815,14 +815,13 @@ func eqstring(s, t ir.Node) (eqlen, eqmem ir.Node) { fn := syslook("memequal") fn = substArgTypes(fn, types.Types[types.TUINT8], types.Types[types.TUINT8]) - call := ir.Nod(ir.OCALL, fn, nil) - call.PtrList().Append(sptr, tptr, ir.Copy(slen)) - call1 := typecheck(call, ctxExpr|ctxMultiOK) + call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, []ir.Node{sptr, tptr, ir.Copy(slen)}) + call = typecheck(call, ctxExpr|ctxMultiOK).(*ir.CallExpr) - cmp := ir.Nod(ir.OEQ, slen, tlen) - cmp1 := typecheck(cmp, ctxExpr) + cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, slen, tlen) + cmp = typecheck(cmp, ctxExpr).(*ir.BinaryExpr) cmp.SetType(types.Types[types.TBOOL]) - return cmp1, call1 + return cmp, call } // eqinterface returns the nodes @@ -831,7 +830,7 @@ func eqstring(s, t ir.Node) (eqlen, eqmem ir.Node) { // ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate) // which can be used to construct interface equality comparison. // eqtab must be evaluated before eqdata, and shortcircuiting is required. -func eqinterface(s, t ir.Node) (eqtab, eqdata ir.Node) { +func eqinterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) { if !types.Identical(s.Type(), t.Type()) { base.Fatalf("eqinterface %v %v", s.Type(), t.Type()) } @@ -853,14 +852,13 @@ func eqinterface(s, t ir.Node) (eqtab, eqdata ir.Node) { sdata.SetTypecheck(1) tdata.SetTypecheck(1) - call := ir.Nod(ir.OCALL, fn, nil) - call.PtrList().Append(stab, sdata, tdata) - call1 := typecheck(call, ctxExpr|ctxMultiOK) + call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, []ir.Node{stab, sdata, tdata}) + call = typecheck(call, ctxExpr|ctxMultiOK).(*ir.CallExpr) - cmp := ir.Nod(ir.OEQ, stab, ttab) - cmp1 := typecheck(cmp, ctxExpr) - cmp1.SetType(types.Types[types.TBOOL]) - return cmp1, call1 + cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, stab, ttab) + cmp = typecheck(cmp, ctxExpr).(*ir.BinaryExpr) + cmp.SetType(types.Types[types.TBOOL]) + return cmp, call } // eqmem returns the node diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index 5d4e8807421d2..b00a7ca14c1db 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -192,8 +192,8 @@ type Arch struct { var thearch Arch var ( - staticuint64s, - zerobase ir.Node + staticuint64s *ir.Name + zerobase *ir.Name assertE2I, assertE2I2, diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 6d7a8bc5c9118..b3f211ff75854 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -943,7 +943,7 @@ func oaslit(n ir.Node, init *ir.Nodes) bool { return false case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT: - if vmatch1(n.Left(), n.Right()) { + if refersToCommonName(n.Left(), n.Right()) { // not a special composite literal assignment return false } diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 420edd5694968..91d3ad215ef61 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -12,6 +12,7 @@ import ( "cmd/internal/objabi" "cmd/internal/sys" "encoding/binary" + "errors" "fmt" "go/constant" "go/token" @@ -179,9 +180,8 @@ func walkstmt(n ir.Node) ir.Node { n.PtrInit().Set(nil) n.SetLeft(walkexpr(n.Left(), &init)) - n = mkcall1(chanfn("chanrecv1", 2, n.Left().Type()), nil, &init, n.Left(), nodnil()) - n = walkexpr(n, &init) - return initExpr(init.Slice(), n) + call := walkexpr(mkcall1(chanfn("chanrecv1", 2, n.Left().Type()), nil, &init, n.Left(), nodnil()), &init) + return initExpr(init.Slice(), call) case ir.OBREAK, ir.OCONTINUE, @@ -197,7 +197,7 @@ func walkstmt(n ir.Node) ir.Node { return n case ir.ODCL: - v := n.Left() + v := n.Left().(*ir.Name) if v.Class() == ir.PAUTOHEAP { if base.Flag.CompilingRuntime { base.Errorf("%v escapes to heap, not allowed in runtime", v) @@ -236,33 +236,37 @@ func walkstmt(n ir.Node) ir.Node { fallthrough case ir.OGO: var init ir.Nodes - switch n.Left().Op() { + switch call := n.Left(); call.Op() { case ir.OPRINT, ir.OPRINTN: - n.SetLeft(wrapCall(n.Left(), &init)) + call := call.(*ir.CallExpr) + n.SetLeft(wrapCall(call, &init)) case ir.ODELETE: - if mapfast(n.Left().List().First().Type()) == mapslow { - n.SetLeft(wrapCall(n.Left(), &init)) + call := call.(*ir.CallExpr) + if mapfast(call.List().First().Type()) == mapslow { + n.SetLeft(wrapCall(call, &init)) } else { - n.SetLeft(walkexpr(n.Left(), &init)) + n.SetLeft(walkexpr(call, &init)) } case ir.OCOPY: - n.SetLeft(copyany(n.Left(), &init, true)) + call := call.(*ir.BinaryExpr) + n.SetLeft(copyany(call, &init, true)) case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER: - if n.Left().Body().Len() > 0 { - n.SetLeft(wrapCall(n.Left(), &init)) + call := call.(*ir.CallExpr) + if call.Body().Len() > 0 { + n.SetLeft(wrapCall(call, &init)) } else { - n.SetLeft(walkexpr(n.Left(), &init)) + n.SetLeft(walkexpr(call, &init)) } default: - n.SetLeft(walkexpr(n.Left(), &init)) + n.SetLeft(walkexpr(call, &init)) } if init.Len() > 0 { init.Append(n) - n = ir.NewBlockStmt(n.Pos(), init.Slice()) + return ir.NewBlockStmt(n.Pos(), init.Slice()) } return n @@ -295,7 +299,7 @@ func walkstmt(n ir.Node) ir.Node { } if (hasNamedResults(Curfn) && n.List().Len() > 1) || paramoutheap(Curfn) { // assign to the function out parameters, - // so that reorder3 can fix up conflicts + // so that ascompatee can fix up conflicts var rl []ir.Node for _, ln := range Curfn.Dcl { @@ -318,11 +322,10 @@ func walkstmt(n ir.Node) ir.Node { base.Fatalf("expected %v return arguments, have %v", want, got) } - // move function calls out, to make reorder3's job easier. + // move function calls out, to make ascompatee's job easier. walkexprlistsafe(n.List().Slice(), n.PtrInit()) - ll := ascompatee(n.Op(), rl, n.List().Slice(), n.PtrInit()) - n.PtrList().Set(reorder3(ll)) + n.PtrList().Set(ascompatee(n.Op(), rl, n.List().Slice(), n.PtrInit())) return n } walkexprlist(n.List().Slice(), n.PtrInit()) @@ -336,7 +339,7 @@ func walkstmt(n ir.Node) ir.Node { if isParamHeapCopy(nname) { nname = nname.Name().Stackcopy } - a := ir.Nod(ir.OAS, nname, rhs[i]) + a := ir.NewAssignStmt(base.Pos, nname, rhs[i]) res[i] = convas(a, n.PtrInit()) } n.PtrList().Set(res) @@ -480,7 +483,7 @@ func walkexpr(n ir.Node, init *ir.Nodes) ir.Node { base.Fatalf("expression has untyped type: %+v", n) } - if n.Op() == ir.ONAME && n.Class() == ir.PAUTOHEAP { + if n.Op() == ir.ONAME && n.(*ir.Name).Class() == ir.PAUTOHEAP { nn := ir.Nod(ir.ODEREF, n.Name().Heapaddr, nil) nn.Left().MarkNonNil() return walkexpr(typecheck(nn, ctxExpr), init) @@ -534,8 +537,19 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // stringsym for constant strings. return n - case ir.ONOT, ir.ONEG, ir.OPLUS, ir.OBITNOT, ir.OREAL, ir.OIMAG, ir.ODOTMETH, ir.ODOTINTER, - ir.ODEREF, ir.OSPTR, ir.OITAB, ir.OIDATA, ir.OADDR: + case ir.ONOT, ir.ONEG, ir.OPLUS, ir.OBITNOT, ir.OREAL, ir.OIMAG, ir.OSPTR, ir.OITAB, ir.OIDATA: + n.SetLeft(walkexpr(n.Left(), init)) + return n + + case ir.ODOTMETH, ir.ODOTINTER: + n.SetLeft(walkexpr(n.Left(), init)) + return n + + case ir.OADDR: + n.SetLeft(walkexpr(n.Left(), init)) + return n + + case ir.ODEREF: n.SetLeft(walkexpr(n.Left(), init)) return n @@ -545,6 +559,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return n case ir.ODOT, ir.ODOTPTR: + n := n.(*ir.SelectorExpr) usefield(n) n.SetLeft(walkexpr(n.Left(), init)) return n @@ -554,7 +569,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // Set up interface type addresses for back end. n.SetRight(typename(n.Type())) if n.Op() == ir.ODOTTYPE { - n.Right().SetRight(typename(n.Left().Type())) + n.Right().(*ir.AddrExpr).SetRight(typename(n.Left().Type())) } if !n.Type().IsInterface() && !n.Left().Type().IsEmptyInterface() { n.PtrList().Set1(itabname(n.Type(), n.Left().Type())) @@ -564,7 +579,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.OLEN, ir.OCAP: if isRuneCount(n) { // Replace len([]rune(string)) with runtime.countrunes(string). - return mkcall("countrunes", n.Type(), init, conv(n.Left().Left(), types.Types[types.TSTRING])) + return mkcall("countrunes", n.Type(), init, conv(n.Left().(*ir.ConvExpr).Left(), types.Types[types.TSTRING])) } n.SetLeft(walkexpr(n.Left(), init)) @@ -578,22 +593,19 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { } if t.IsArray() { safeexpr(n.Left(), init) - n = origIntConst(n, t.NumElem()) - n.SetTypecheck(1) + con := origIntConst(n, t.NumElem()) + con.SetTypecheck(1) + return con } return n case ir.OCOMPLEX: - // Use results from call expression as arguments for complex. - if n.Left() == nil && n.Right() == nil { - n.SetLeft(n.List().First()) - n.SetRight(n.List().Second()) - } n.SetLeft(walkexpr(n.Left(), init)) n.SetRight(walkexpr(n.Right(), init)) return n case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE: + n := n.(*ir.BinaryExpr) return walkcompare(n, init) case ir.OANDAND, ir.OOROR: @@ -609,7 +621,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return n case ir.OPRINT, ir.OPRINTN: - return walkprint(n, init) + return walkprint(n.(*ir.CallExpr), init) case ir.OPANIC: return mkcall("gopanic", nil, init, n.Left()) @@ -621,6 +633,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return n case ir.OCALLINTER, ir.OCALLFUNC, ir.OCALLMETH: + n := n.(*ir.CallExpr) if n.Op() == ir.OCALLINTER { usemethod(n) markUsedIfaceMethod(n) @@ -652,25 +665,38 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.OAS, ir.OASOP: init.AppendNodes(n.PtrInit()) + var left, right ir.Node + switch n.Op() { + case ir.OAS: + left, right = n.Left(), n.Right() + case ir.OASOP: + left, right = n.Left(), n.Right() + } + // Recognize m[k] = append(m[k], ...) so we can reuse // the mapassign call. - mapAppend := n.Left().Op() == ir.OINDEXMAP && n.Right().Op() == ir.OAPPEND - if mapAppend && !samesafeexpr(n.Left(), n.Right().List().First()) { - base.Fatalf("not same expressions: %v != %v", n.Left(), n.Right().List().First()) + var mapAppend *ir.CallExpr + if left.Op() == ir.OINDEXMAP && right.Op() == ir.OAPPEND { + mapAppend = right.(*ir.CallExpr) + if !samesafeexpr(left, mapAppend.List().First()) { + base.Fatalf("not same expressions: %v != %v", left, mapAppend.List().First()) + } } - n.SetLeft(walkexpr(n.Left(), init)) - n.SetLeft(safeexpr(n.Left(), init)) - - if mapAppend { - n.Right().List().SetFirst(n.Left()) + left = walkexpr(left, init) + left = safeexpr(left, init) + if mapAppend != nil { + mapAppend.List().SetFirst(left) } if n.Op() == ir.OASOP { // Rewrite x op= y into x = x op y. - n = ir.Nod(ir.OAS, n.Left(), - typecheck(ir.NewBinaryExpr(base.Pos, n.SubOp(), n.Left(), n.Right()), ctxExpr)) + n = ir.Nod(ir.OAS, left, + typecheck(ir.NewBinaryExpr(base.Pos, n.(*ir.AssignOpStmt).SubOp(), left, right), ctxExpr)) + } else { + n.(*ir.AssignStmt).SetLeft(left) } + n := n.(*ir.AssignStmt) if oaslit(n, init) { return ir.NodAt(n.Pos(), ir.OBLOCK, nil, nil) @@ -692,33 +718,35 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.ORECV: // x = <-c; n.Left is x, n.Right.Left is c. // order.stmt made sure x is addressable. - n.Right().SetLeft(walkexpr(n.Right().Left(), init)) + recv := n.Right().(*ir.UnaryExpr) + recv.SetLeft(walkexpr(recv.Left(), init)) n1 := nodAddr(n.Left()) - r := n.Right().Left() // the channel + r := recv.Left() // the channel return mkcall1(chanfn("chanrecv1", 2, r.Type()), nil, init, r, n1) case ir.OAPPEND: // x = append(...) - r := n.Right() - if r.Type().Elem().NotInHeap() { - base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", r.Type().Elem()) + call := n.Right().(*ir.CallExpr) + if call.Type().Elem().NotInHeap() { + base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", call.Type().Elem()) } + var r ir.Node switch { - case isAppendOfMake(r): + case isAppendOfMake(call): // x = append(y, make([]T, y)...) - r = extendslice(r, init) - case r.IsDDD(): - r = appendslice(r, init) // also works for append(slice, string). + r = extendslice(call, init) + case call.IsDDD(): + r = appendslice(call, init) // also works for append(slice, string). default: - r = walkappend(r, init, n) + r = walkappend(call, init, n) } n.SetRight(r) if r.Op() == ir.OAPPEND { // Left in place for back end. // Do not add a new write barrier. // Set up address of type for back end. - r.SetLeft(typename(r.Type().Elem())) + r.(*ir.CallExpr).SetLeft(typename(r.Type().Elem())) return n } // Otherwise, lowered for race detector. @@ -726,7 +754,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { } if n.Left() != nil && n.Right() != nil { - n = convas(n, init) + return convas(n, init) } return n @@ -734,9 +762,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { init.AppendNodes(n.PtrInit()) walkexprlistsafe(n.List().Slice(), init) walkexprlistsafe(n.Rlist().Slice(), init) - ll := ascompatee(ir.OAS, n.List().Slice(), n.Rlist().Slice(), init) - ll = reorder3(ll) - return liststmt(ll) + return liststmt(ascompatee(ir.OAS, n.List().Slice(), n.Rlist().Slice(), init)) // a,b,... = fn() case ir.OAS2FUNC: @@ -760,7 +786,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.OAS2RECV: init.AppendNodes(n.PtrInit()) - r := n.Rlist().First() + r := n.Rlist().First().(*ir.UnaryExpr) // recv walkexprlistsafe(n.List().Slice(), init) r.SetLeft(walkexpr(r.Left(), init)) var n1 ir.Node @@ -772,14 +798,13 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { fn := chanfn("chanrecv2", 2, r.Left().Type()) ok := n.List().Second() call := mkcall1(fn, types.Types[types.TBOOL], init, r.Left(), n1) - n = ir.Nod(ir.OAS, ok, call) - return typecheck(n, ctxStmt) + return typecheck(ir.Nod(ir.OAS, ok, call), ctxStmt) // a,b = m[i] case ir.OAS2MAPR: init.AppendNodes(n.PtrInit()) - r := n.Rlist().First() + r := n.Rlist().First().(*ir.IndexExpr) walkexprlistsafe(n.List().Slice(), init) r.SetLeft(walkexpr(r.Left(), init)) r.SetRight(walkexpr(r.Right(), init)) @@ -803,37 +828,39 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // a = *var a := n.List().First() + var call *ir.CallExpr if w := t.Elem().Width; w <= zeroValSize { fn := mapfn(mapaccess2[fast], t) - r = mkcall1(fn, fn.Type().Results(), init, typename(t), r.Left(), key) + call = mkcall1(fn, fn.Type().Results(), init, typename(t), r.Left(), key) } else { fn := mapfn("mapaccess2_fat", t) z := zeroaddr(w) - r = mkcall1(fn, fn.Type().Results(), init, typename(t), r.Left(), key, z) + call = mkcall1(fn, fn.Type().Results(), init, typename(t), r.Left(), key, z) } // mapaccess2* returns a typed bool, but due to spec changes, // the boolean result of i.(T) is now untyped so we make it the // same type as the variable on the lhs. if ok := n.List().Second(); !ir.IsBlank(ok) && ok.Type().IsBoolean() { - r.Type().Field(1).Type = ok.Type() + call.Type().Field(1).Type = ok.Type() } - n.PtrRlist().Set1(r) + n.PtrRlist().Set1(call) n.SetOp(ir.OAS2FUNC) // don't generate a = *var if a is _ - if !ir.IsBlank(a) { - var_ := temp(types.NewPtr(t.Elem())) - var_.SetTypecheck(1) - var_.MarkNonNil() // mapaccess always returns a non-nil pointer - n.List().SetFirst(var_) - n = walkexpr(n, init) - init.Append(n) - n = ir.Nod(ir.OAS, a, ir.Nod(ir.ODEREF, var_, nil)) + if ir.IsBlank(a) { + return walkexpr(typecheck(n, ctxStmt), init) } - n = typecheck(n, ctxStmt) - return walkexpr(n, init) + var_ := temp(types.NewPtr(t.Elem())) + var_.SetTypecheck(1) + var_.MarkNonNil() // mapaccess always returns a non-nil pointer + + n.List().SetFirst(var_) + init.Append(walkexpr(n, init)) + + as := ir.Nod(ir.OAS, a, ir.Nod(ir.ODEREF, var_, nil)) + return walkexpr(typecheck(as, ctxStmt), init) case ir.ODELETE: init.AppendNodes(n.PtrInit()) @@ -910,9 +937,10 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { if thearch.LinkArch.ByteOrder == binary.BigEndian { index = ir.Nod(ir.OADD, index, nodintconst(7)) } - value = ir.Nod(ir.OINDEX, staticuint64s, index) - value.SetBounded(true) - case n.Left().Name() != nil && n.Left().Class() == ir.PEXTERN && n.Left().Name().Readonly(): + xe := ir.Nod(ir.OINDEX, staticuint64s, index) + xe.SetBounded(true) + value = xe + case n.Left().Op() == ir.ONAME && n.Left().(*ir.Name).Class() == ir.PEXTERN && n.Left().(*ir.Name).Readonly(): // n.Left is a readonly global; use it directly. value = n.Left() case !fromType.IsInterface() && n.Esc() == EscNone && fromType.Width <= 1024: @@ -1002,12 +1030,12 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { fn := syslook(fnname) fn = substArgTypes(fn, fromType, toType) dowidth(fn.Type()) - n = ir.Nod(ir.OCALL, fn, nil) - n.PtrList().Set2(tab, v) - n = typecheck(n, ctxExpr) - return walkexpr(n, init) + call := ir.Nod(ir.OCALL, fn, nil) + call.PtrList().Set2(tab, v) + return walkexpr(typecheck(call, ctxExpr), init) case ir.OCONV, ir.OCONVNOP: + n := n.(*ir.ConvExpr) n.SetLeft(walkexpr(n.Left(), init)) if n.Op() == ir.OCONVNOP && n.Type() == n.Left().Type() { return n.Left() @@ -1036,8 +1064,8 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { if isComplex[et] && n.Op() == ir.ODIV { t := n.Type() - n = mkcall("complex128div", types.Types[types.TCOMPLEX128], init, conv(n.Left(), types.Types[types.TCOMPLEX128]), conv(n.Right(), types.Types[types.TCOMPLEX128])) - return conv(n, t) + call := mkcall("complex128div", types.Types[types.TCOMPLEX128], init, conv(n.Left(), types.Types[types.TCOMPLEX128]), conv(n.Right(), types.Types[types.TCOMPLEX128])) + return conv(call, t) } // Nothing to do for float divisions. @@ -1136,6 +1164,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { map_ := n.Left() key := n.Right() t := map_.Type() + var call *ir.CallExpr if n.IndexMapLValue() { // This m[k] expression is on the left-hand side of an assignment. fast := mapfast(t) @@ -1144,7 +1173,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // order.expr made sure key is addressable. key = nodAddr(key) } - n = mkcall1(mapfn(mapassign[fast], t), nil, init, typename(t), map_, key) + call = mkcall1(mapfn(mapassign[fast], t), nil, init, typename(t), map_, key) } else { // m[k] is not the target of an assignment. fast := mapfast(t) @@ -1155,18 +1184,18 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { } if w := t.Elem().Width; w <= zeroValSize { - n = mkcall1(mapfn(mapaccess1[fast], t), types.NewPtr(t.Elem()), init, typename(t), map_, key) + call = mkcall1(mapfn(mapaccess1[fast], t), types.NewPtr(t.Elem()), init, typename(t), map_, key) } else { z := zeroaddr(w) - n = mkcall1(mapfn("mapaccess1_fat", t), types.NewPtr(t.Elem()), init, typename(t), map_, key, z) + call = mkcall1(mapfn("mapaccess1_fat", t), types.NewPtr(t.Elem()), init, typename(t), map_, key, z) } } - n.SetType(types.NewPtr(t.Elem())) - n.MarkNonNil() // mapaccess1* and mapassign always return non-nil pointers. - n = ir.Nod(ir.ODEREF, n, nil) - n.SetType(t.Elem()) - n.SetTypecheck(1) - return n + call.SetType(types.NewPtr(t.Elem())) + call.MarkNonNil() // mapaccess1* and mapassign always return non-nil pointers. + star := ir.Nod(ir.ODEREF, call, nil) + star.SetType(t.Elem()) + star.SetTypecheck(1) + return star case ir.ORECV: base.Fatalf("walkexpr ORECV") // should see inside OAS only @@ -1179,12 +1208,16 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return n case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR: - checkSlice := checkPtr(Curfn, 1) && n.Op() == ir.OSLICE3ARR && n.Left().Op() == ir.OCONVNOP && n.Left().Left().Type().IsUnsafePtr() + n := n.(*ir.SliceExpr) + + checkSlice := checkPtr(Curfn, 1) && n.Op() == ir.OSLICE3ARR && n.Left().Op() == ir.OCONVNOP && n.Left().(*ir.ConvExpr).Left().Type().IsUnsafePtr() if checkSlice { - n.Left().SetLeft(walkexpr(n.Left().Left(), init)) + conv := n.Left().(*ir.ConvExpr) + conv.SetLeft(walkexpr(conv.Left(), init)) } else { n.SetLeft(walkexpr(n.Left(), init)) } + low, high, max := n.SliceBounds() low = walkexpr(low, init) if low != nil && isZero(low) { @@ -1195,10 +1228,11 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { max = walkexpr(max, init) n.SetSliceBounds(low, high, max) if checkSlice { - n.SetLeft(walkCheckPtrAlignment(n.Left(), init, max)) + n.SetLeft(walkCheckPtrAlignment(n.Left().(*ir.ConvExpr), init, max)) } + if n.Op().IsSlice3() { - if max != nil && max.Op() == ir.OCAP && samesafeexpr(n.Left(), max.Left()) { + if max != nil && max.Op() == ir.OCAP && samesafeexpr(n.Left(), max.(*ir.UnaryExpr).Left()) { // Reduce x[i:j:cap(x)] to x[i:j]. if n.Op() == ir.OSLICE3 { n.SetOp(ir.OSLICE) @@ -1219,17 +1253,14 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { if n.Type().Elem().Width >= maxImplicitStackVarSize { base.Fatalf("large ONEW with EscNone: %v", n) } - r := ir.Node(temp(n.Type().Elem())) - r = ir.Nod(ir.OAS, r, nil) // zero temp - r = typecheck(r, ctxStmt) - init.Append(r) - r = nodAddr(r.Left()) - return typecheck(r, ctxExpr) + r := temp(n.Type().Elem()) + init.Append(typecheck(ir.Nod(ir.OAS, r, nil), ctxStmt)) // zero temp + return typecheck(nodAddr(r), ctxExpr) } return callnew(n.Type().Elem()) case ir.OADDSTR: - return addstr(n, init) + return addstr(n.(*ir.AddStringExpr), init) case ir.OAPPEND: // order should make sure we only see OAS(node, OAPPEND), which we handle above. @@ -1237,7 +1268,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { panic("unreachable") case ir.OCOPY: - return copyany(n, init, instrumenting && !base.Flag.CompilingRuntime) + return copyany(n.(*ir.BinaryExpr), init, instrumenting && !base.Flag.CompilingRuntime) case ir.OCLOSE: // cannot use chanfn - closechan takes any, not chan any @@ -1474,9 +1505,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { fn = syslook("memmove") fn = substArgTypes(fn, t.Elem(), t.Elem()) ncopy := mkcall1(fn, nil, init, ir.Nod(ir.OSPTR, s, nil), copyptr, size) - ncopy = typecheck(ncopy, ctxStmt) - ncopy = walkexpr(ncopy, init) - init.Append(ncopy) + init.Append(walkexpr(typecheck(ncopy, ctxStmt), init)) return s } @@ -1488,8 +1517,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { s.Left().MarkNonNil() s.PtrList().Set2(length, length) s.SetType(t) - n = typecheck(s, ctxExpr) - return walkexpr(n, init) + return walkexpr(typecheck(s, ctxExpr), init) case ir.ORUNESTR: a := nodnil() @@ -1591,6 +1619,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT, ir.OPTRLIT: if isStaticCompositeLiteral(n) && !canSSAType(n.Type()) { + n := n.(*ir.CompLitExpr) // not OPTRLIT // n can be directly represented in the read-only data section. // Make direct reference to the static data. See issue 12841. vstat := readonlystaticname(n.Type()) @@ -1633,14 +1662,15 @@ func markTypeUsedInInterface(t *types.Type, from *obj.LSym) { // markUsedIfaceMethod marks that an interface method is used in the current // function. n is OCALLINTER node. -func markUsedIfaceMethod(n ir.Node) { - ityp := n.Left().Left().Type() +func markUsedIfaceMethod(n *ir.CallExpr) { + dot := n.Left().(*ir.SelectorExpr) + ityp := dot.Left().Type() tsym := typenamesym(ityp).Linksym() r := obj.Addrel(Curfn.LSym) r.Sym = tsym - // n.Left.Xoffset is the method index * Widthptr (the offset of code pointer + // dot.Xoffset is the method index * Widthptr (the offset of code pointer // in itab). - midx := n.Left().Offset() / int64(Widthptr) + midx := dot.Offset() / int64(Widthptr) r.Add = ifaceMethodOffset(ityp, midx) r.Type = objabi.R_USEIFACEMETHOD } @@ -1692,9 +1722,9 @@ func rtconvfn(src, dst *types.Type) (param, result types.Kind) { } // TODO(josharian): combine this with its caller and simplify -func reduceSlice(n ir.Node) ir.Node { +func reduceSlice(n *ir.SliceExpr) ir.Node { low, high, max := n.SliceBounds() - if high != nil && high.Op() == ir.OLEN && samesafeexpr(n.Left(), high.Left()) { + if high != nil && high.Op() == ir.OLEN && samesafeexpr(n.Left(), high.(*ir.UnaryExpr).Left()) { // Reduce x[i:len(x)] to x[i:]. high = nil } @@ -1709,10 +1739,10 @@ func reduceSlice(n ir.Node) ir.Node { return n } -func ascompatee1(l ir.Node, r ir.Node, init *ir.Nodes) ir.Node { +func ascompatee1(l ir.Node, r ir.Node, init *ir.Nodes) *ir.AssignStmt { // convas will turn map assigns into function calls, // making it impossible for reorder3 to work. - n := ir.Nod(ir.OAS, l, r) + n := ir.NewAssignStmt(base.Pos, l, r) if l.Op() == ir.OINDEXMAP { return n @@ -1734,7 +1764,7 @@ func ascompatee(op ir.Op, nl, nr []ir.Node, init *ir.Nodes) []ir.Node { nr[i1] = safeexpr(nr[i1], init) } - var nn []ir.Node + var nn []*ir.AssignStmt i := 0 for ; i < len(nl); i++ { if i >= len(nr) { @@ -1754,7 +1784,7 @@ func ascompatee(op ir.Op, nl, nr []ir.Node, init *ir.Nodes) []ir.Node { nrn.Set(nr) base.Fatalf("error in shape across %+v %v %+v / %d %d [%s]", nln, op, nrn, len(nl), len(nr), ir.FuncName(Curfn)) } - return nn + return reorder3(nn) } // fncall reports whether assigning an rvalue of type rt to an lvalue l might involve a function call. @@ -1789,7 +1819,7 @@ func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node { if fncall(l, r.Type) { tmp := ir.Node(temp(r.Type)) tmp = typecheck(tmp, ctxExpr) - a := convas(ir.Nod(ir.OAS, l, tmp), &mm) + a := convas(ir.NewAssignStmt(base.Pos, l, tmp), &mm) mm.Append(a) l = tmp } @@ -1799,7 +1829,7 @@ func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node { res.SetType(r.Type) res.SetTypecheck(1) - a := convas(ir.Nod(ir.OAS, l, res), &nn) + a := convas(ir.NewAssignStmt(base.Pos, l, res), &nn) updateHasCall(a) if a.HasCall() { ir.Dump("ascompatet ucount", a) @@ -1818,9 +1848,10 @@ func mkdotargslice(typ *types.Type, args []ir.Node) ir.Node { n = nodnil() n.SetType(typ) } else { - n = ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(typ)) - n.PtrList().Append(args...) - n.SetImplicit(true) + lit := ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(typ)) + lit.PtrList().Append(args...) + lit.SetImplicit(true) + n = lit } n = typecheck(n, ctxExpr) @@ -1832,7 +1863,7 @@ func mkdotargslice(typ *types.Type, args []ir.Node) ir.Node { // fixVariadicCall rewrites calls to variadic functions to use an // explicit ... argument if one is not already present. -func fixVariadicCall(call ir.Node) { +func fixVariadicCall(call *ir.CallExpr) { fntype := call.Left().Type() if !fntype.IsVariadic() || call.IsDDD() { return @@ -1852,7 +1883,7 @@ func fixVariadicCall(call ir.Node) { call.SetIsDDD(true) } -func walkCall(n ir.Node, init *ir.Nodes) { +func walkCall(n *ir.CallExpr, init *ir.Nodes) { if n.Rlist().Len() != 0 { return // already walked } @@ -1866,8 +1897,9 @@ func walkCall(n ir.Node, init *ir.Nodes) { // If this is a method call, add the receiver at the beginning of the args. if n.Op() == ir.OCALLMETH { withRecv := make([]ir.Node, len(args)+1) - withRecv[0] = n.Left().Left() - n.Left().SetLeft(nil) + dot := n.Left().(*ir.SelectorExpr) + withRecv[0] = dot.Left() + dot.SetLeft(nil) copy(withRecv[1:], args) args = withRecv } @@ -1893,7 +1925,7 @@ func walkCall(n ir.Node, init *ir.Nodes) { if instrumenting || fncall(arg, t) { // make assignment of fncall to tempAt tmp := temp(t) - a := convas(ir.Nod(ir.OAS, tmp, arg), init) + a := convas(ir.NewAssignStmt(base.Pos, tmp, arg), init) tempAssigns = append(tempAssigns, a) // replace arg with temp args[i] = tmp @@ -1905,7 +1937,7 @@ func walkCall(n ir.Node, init *ir.Nodes) { } // generate code for print -func walkprint(nn ir.Node, init *ir.Nodes) ir.Node { +func walkprint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { // Hoist all the argument evaluation up before the lock. walkexprlistcheap(nn.List().Slice(), init) @@ -2078,7 +2110,7 @@ func isReflectHeaderDataField(l ir.Node) bool { return tsym.Name == "SliceHeader" || tsym.Name == "StringHeader" } -func convas(n ir.Node, init *ir.Nodes) ir.Node { +func convas(n *ir.AssignStmt, init *ir.Nodes) *ir.AssignStmt { if n.Op() != ir.OAS { base.Fatalf("convas: not OAS %v", n.Op()) } @@ -2110,13 +2142,14 @@ func convas(n ir.Node, init *ir.Nodes) ir.Node { return n } -// from ascompat[ee] +// reorder3 +// from ascompatee // a,b = c,d // simultaneous assignment. there cannot // be later use of an earlier lvalue. // // function calls have been removed. -func reorder3(all []ir.Node) []ir.Node { +func reorder3(all []*ir.AssignStmt) []ir.Node { // If a needed expression may be affected by an // earlier assignment, make an early copy of that // expression and use the copy instead. @@ -2129,17 +2162,20 @@ func reorder3(all []ir.Node) []ir.Node { // Save subexpressions needed on left side. // Drill through non-dereferences. for { - if l.Op() == ir.ODOT || l.Op() == ir.OPAREN { - l = l.Left() + switch ll := l; ll.Op() { + case ir.ODOT: + l = ll.Left() continue - } - - if l.Op() == ir.OINDEX && l.Left().Type().IsArray() { - l.SetRight(reorder3save(l.Right(), all, i, &early)) - l = l.Left() + case ir.OPAREN: + l = ll.Left() continue + case ir.OINDEX: + if ll.Left().Type().IsArray() { + ll.SetRight(reorder3save(ll.Right(), all, i, &early)) + l = ll.Left() + continue + } } - break } @@ -2157,7 +2193,9 @@ func reorder3(all []ir.Node) []ir.Node { all[i] = convas(all[i], &mapinit) } - case ir.ODEREF, ir.ODOTPTR: + case ir.ODEREF: + l.SetLeft(reorder3save(l.Left(), all, i, &early)) + case ir.ODOTPTR: l.SetLeft(reorder3save(l.Left(), all, i, &early)) } @@ -2166,7 +2204,10 @@ func reorder3(all []ir.Node) []ir.Node { } early = append(mapinit.Slice(), early...) - return append(early, all...) + for _, as := range all { + early = append(early, as) + } + return early } // if the evaluation of *np would be affected by the @@ -2175,31 +2216,36 @@ func reorder3(all []ir.Node) []ir.Node { // replace *np with that temp. // The result of reorder3save MUST be assigned back to n, e.g. // n.Left = reorder3save(n.Left, all, i, early) -func reorder3save(n ir.Node, all []ir.Node, i int, early *[]ir.Node) ir.Node { +func reorder3save(n ir.Node, all []*ir.AssignStmt, i int, early *[]ir.Node) ir.Node { if !aliased(n, all[:i]) { return n } q := ir.Node(temp(n.Type())) - q = ir.Nod(ir.OAS, q, n) - q = typecheck(q, ctxStmt) - *early = append(*early, q) - return q.Left() + as := typecheck(ir.Nod(ir.OAS, q, n), ctxStmt) + *early = append(*early, as) + return q } // what's the outer value that a write to n affects? // outer value means containing struct or array. func outervalue(n ir.Node) ir.Node { for { - switch n.Op() { + switch nn := n; nn.Op() { case ir.OXDOT: base.Fatalf("OXDOT in walk") - case ir.ODOT, ir.OPAREN, ir.OCONVNOP: - n = n.Left() + case ir.ODOT: + n = nn.Left() + continue + case ir.OPAREN: + n = nn.Left() + continue + case ir.OCONVNOP: + n = nn.Left() continue case ir.OINDEX: - if n.Left().Type() != nil && n.Left().Type().IsArray() { - n = n.Left() + if nn.Left().Type() != nil && nn.Left().Type().IsArray() { + n = nn.Left() continue } } @@ -2210,7 +2256,7 @@ func outervalue(n ir.Node) ir.Node { // Is it possible that the computation of r might be // affected by assignments in all? -func aliased(r ir.Node, all []ir.Node) bool { +func aliased(r ir.Node, all []*ir.AssignStmt) bool { if r == nil { return false } @@ -2218,7 +2264,7 @@ func aliased(r ir.Node, all []ir.Node) bool { // Treat all fields of a struct as referring to the whole struct. // We could do better but we would have to keep track of the fields. for r.Op() == ir.ODOT { - r = r.Left() + r = r.(*ir.SelectorExpr).Left() } // Look for obvious aliasing: a variable being assigned @@ -2233,11 +2279,12 @@ func aliased(r ir.Node, all []ir.Node) bool { continue } - l := outervalue(as.Left()) - if l.Op() != ir.ONAME { + lv := outervalue(as.Left()) + if lv.Op() != ir.ONAME { memwrite = true continue } + l := lv.(*ir.Name) switch l.Class() { default: @@ -2253,7 +2300,7 @@ func aliased(r ir.Node, all []ir.Node) bool { continue } - if vmatch2(l, r) { + if refersToName(l, r) { // Direct hit: l appears in r. return true } @@ -2269,10 +2316,10 @@ func aliased(r ir.Node, all []ir.Node) bool { return false } - // If r does not refer to computed addresses - // (that is, if r only refers to variables whose addresses - // have not been taken), no aliasing. - if varexpr(r) { + // If r does not refer to any variables whose addresses have been taken, + // then the only possible writes to r would be directly to the variables, + // and we checked those above, so no aliasing problems. + if !anyAddrTaken(r) { return false } @@ -2281,127 +2328,103 @@ func aliased(r ir.Node, all []ir.Node) bool { return true } -// does the evaluation of n only refer to variables -// whose addresses have not been taken? -// (and no other memory) -func varexpr(n ir.Node) bool { - if n == nil { - return true - } +// anyAddrTaken reports whether the evaluation n, +// which appears on the left side of an assignment, +// may refer to variables whose addresses have been taken. +func anyAddrTaken(n ir.Node) bool { + return ir.Any(n, func(n ir.Node) bool { + switch n.Op() { + case ir.ONAME: + return n.Class() == ir.PEXTERN || n.Class() == ir.PAUTOHEAP || n.Name().Addrtaken() - switch n.Op() { - case ir.OLITERAL, ir.ONIL: - return true + case ir.ODOT: // but not ODOTPTR - should have been handled in aliased. + base.Fatalf("anyAddrTaken unexpected ODOT") - case ir.ONAME: - switch n.Class() { - case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT: - if !n.Name().Addrtaken() { - return true - } + case ir.OADD, + ir.OAND, + ir.OANDAND, + ir.OANDNOT, + ir.OBITNOT, + ir.OCONV, + ir.OCONVIFACE, + ir.OCONVNOP, + ir.ODIV, + ir.ODOTTYPE, + ir.OLITERAL, + ir.OLSH, + ir.OMOD, + ir.OMUL, + ir.ONEG, + ir.ONIL, + ir.OOR, + ir.OOROR, + ir.OPAREN, + ir.OPLUS, + ir.ORSH, + ir.OSUB, + ir.OXOR: + return false } - - return false - - case ir.OADD, - ir.OSUB, - ir.OOR, - ir.OXOR, - ir.OMUL, - ir.ODIV, - ir.OMOD, - ir.OLSH, - ir.ORSH, - ir.OAND, - ir.OANDNOT, - ir.OPLUS, - ir.ONEG, - ir.OBITNOT, - ir.OPAREN, - ir.OANDAND, - ir.OOROR, - ir.OCONV, - ir.OCONVNOP, - ir.OCONVIFACE, - ir.ODOTTYPE: - return varexpr(n.Left()) && varexpr(n.Right()) - - case ir.ODOT: // but not ODOTPTR - // Should have been handled in aliased. - base.Fatalf("varexpr unexpected ODOT") - } - - // Be conservative. - return false + // Be conservative. + return true + }) } -// is the name l mentioned in r? -func vmatch2(l ir.Node, r ir.Node) bool { - if r == nil { - return false - } - switch r.Op() { - // match each right given left - case ir.ONAME: - return l == r - - case ir.OLITERAL, ir.ONIL: - return false - } - - if vmatch2(l, r.Left()) { - return true - } - if vmatch2(l, r.Right()) { - return true - } - for _, n := range r.List().Slice() { - if vmatch2(l, n) { - return true - } - } - return false +// refersToName reports whether r refers to name. +func refersToName(name *ir.Name, r ir.Node) bool { + return ir.Any(r, func(r ir.Node) bool { + return r.Op() == ir.ONAME && r == name + }) } -// is any name mentioned in l also mentioned in r? -// called by sinit.go -func vmatch1(l ir.Node, r ir.Node) bool { - // isolate all left sides +var stop = errors.New("stop") + +// refersToCommonName reports whether any name +// appears in common between l and r. +// This is called from sinit.go. +func refersToCommonName(l ir.Node, r ir.Node) bool { if l == nil || r == nil { return false } - switch l.Op() { - case ir.ONAME: - switch l.Class() { - case ir.PPARAM, ir.PAUTO: - break - default: - // assignment to non-stack variable must be - // delayed if right has function calls. - if r.HasCall() { - return true + // This could be written elegantly as a Find nested inside a Find: + // + // found := ir.Find(l, func(l ir.Node) interface{} { + // if l.Op() == ir.ONAME { + // return ir.Find(r, func(r ir.Node) interface{} { + // if r.Op() == ir.ONAME && l.Name() == r.Name() { + // return r + // } + // return nil + // }) + // } + // return nil + // }) + // return found != nil + // + // But that would allocate a new closure for the inner Find + // for each name found on the left side. + // It may not matter at all, but the below way of writing it + // only allocates two closures, not O(|L|) closures. + + var doL, doR func(ir.Node) error + var targetL *ir.Name + doR = func(r ir.Node) error { + if r.Op() == ir.ONAME && r.Name() == targetL { + return stop + } + return ir.DoChildren(r, doR) + } + doL = func(l ir.Node) error { + if l.Op() == ir.ONAME { + targetL = l.Name() + if doR(r) == stop { + return stop } } - - return vmatch2(l, r) - - case ir.OLITERAL, ir.ONIL: - return false - } - - if vmatch1(l.Left(), r) { - return true - } - if vmatch1(l.Right(), r) { - return true - } - for _, n := range l.List().Slice() { - if vmatch1(n, r) { - return true - } + return ir.DoChildren(l, doL) } - return false + return doL(l) == stop } // paramstoheap returns code to allocate memory for heap-escaped parameters @@ -2490,7 +2513,7 @@ func heapmoves() { base.Pos = lno } -func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) ir.Node { +func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) *ir.CallExpr { if fn.Type() == nil || fn.Type().Kind() != types.TFUNC { base.Fatalf("mkcall %v %v", fn, fn.Type()) } @@ -2508,14 +2531,14 @@ func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) ir.Node { } r1 := typecheck(call, ctx) r1.SetType(t) - return walkexpr(r1, init) + return walkexpr(r1, init).(*ir.CallExpr) } -func mkcall(name string, t *types.Type, init *ir.Nodes, args ...ir.Node) ir.Node { +func mkcall(name string, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr { return vmkcall(syslook(name), t, init, args) } -func mkcall1(fn ir.Node, t *types.Type, init *ir.Nodes, args ...ir.Node) ir.Node { +func mkcall1(fn ir.Node, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr { return vmkcall(fn, t, init, args) } @@ -2650,8 +2673,7 @@ func writebarrierfn(name string, l *types.Type, r *types.Type) ir.Node { return fn } -func addstr(n ir.Node, init *ir.Nodes) ir.Node { - // order.expr rewrote OADDSTR to have a list of strings. +func addstr(n *ir.AddStringExpr, init *ir.Nodes) ir.Node { c := n.List().Len() if c < 2 { @@ -2710,7 +2732,7 @@ func addstr(n ir.Node, init *ir.Nodes) ir.Node { return r1 } -func walkAppendArgs(n ir.Node, init *ir.Nodes) { +func walkAppendArgs(n *ir.CallExpr, init *ir.Nodes) { walkexprlistsafe(n.List().Slice(), init) // walkexprlistsafe will leave OINDEX (s[n]) alone if both s @@ -2736,7 +2758,7 @@ func walkAppendArgs(n ir.Node, init *ir.Nodes) { // s // // l2 is allowed to be a string. -func appendslice(n ir.Node, init *ir.Nodes) ir.Node { +func appendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { walkAppendArgs(n, init) l1 := n.List().First() @@ -2840,12 +2862,16 @@ func isAppendOfMake(n ir.Node) bool { base.Fatalf("missing typecheck: %+v", n) } - if n.Op() != ir.OAPPEND || !n.IsDDD() || n.List().Len() != 2 { + if n.Op() != ir.OAPPEND { + return false + } + call := n.(*ir.CallExpr) + if !call.IsDDD() || call.List().Len() != 2 || call.List().Second().Op() != ir.OMAKESLICE { return false } - second := n.List().Second() - if second.Op() != ir.OMAKESLICE || second.Right() != nil { + mk := call.List().Second().(*ir.MakeExpr) + if mk.Right() != nil { return false } @@ -2855,7 +2881,7 @@ func isAppendOfMake(n ir.Node) bool { // typecheck made sure that constant arguments to make are not negative and fit into an int. // The care of overflow of the len argument to make will be handled by an explicit check of int(len) < 0 during runtime. - y := second.Left() + y := mk.Left() if !ir.IsConst(y, constant.Int) && y.Type().Size() > types.Types[types.TUINT].Size() { return false } @@ -2890,11 +2916,11 @@ func isAppendOfMake(n ir.Node) bool { // } // } // s -func extendslice(n ir.Node, init *ir.Nodes) ir.Node { +func extendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { // isAppendOfMake made sure all possible positive values of l2 fit into an uint. // The case of l2 overflow when converting from e.g. uint to int is handled by an explicit // check of l2 < 0 at runtime which is generated below. - l2 := conv(n.List().Second().Left(), types.Types[types.TINT]) + l2 := conv(n.List().Second().(*ir.MakeExpr).Left(), types.Types[types.TINT]) l2 = typecheck(l2, ctxExpr) n.List().SetSecond(l2) // walkAppendArgs expects l2 in n.List.Second(). @@ -3007,7 +3033,7 @@ func extendslice(n ir.Node, init *ir.Nodes) ir.Node { // ... // } // s -func walkappend(n ir.Node, init *ir.Nodes, dst ir.Node) ir.Node { +func walkappend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node { if !samesafeexpr(dst, n.List().First()) { n.List().SetFirst(safeexpr(n.List().First(), init)) n.List().SetFirst(walkexpr(n.List().First(), init)) @@ -3096,7 +3122,7 @@ func walkappend(n ir.Node, init *ir.Nodes, dst ir.Node) ir.Node { // // Also works if b is a string. // -func copyany(n ir.Node, init *ir.Nodes, runtimecall bool) ir.Node { +func copyany(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node { if n.Left().Type().Elem().HasPointers() { Curfn.SetWBPos(n.Pos()) fn := writebarrierfn("typedslicecopy", n.Left().Type().Elem(), n.Right().Type().Elem()) @@ -3194,7 +3220,7 @@ func eqfor(t *types.Type) (n ir.Node, needsize bool) { // The result of walkcompare MUST be assigned back to n, e.g. // n.Left = walkcompare(n.Left, init) -func walkcompare(n ir.Node, init *ir.Nodes) ir.Node { +func walkcompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { if n.Left().Type().IsInterface() && n.Right().Type().IsInterface() && n.Left().Op() != ir.ONIL && n.Right().Op() != ir.ONIL { return walkcompareInterface(n, init) } @@ -3245,8 +3271,7 @@ func walkcompare(n ir.Node, init *ir.Nodes) ir.Node { eqdata := ir.NewBinaryExpr(base.Pos, eq, ifaceData(n.Pos(), l, r.Type()), r) // Put it all together. expr := ir.NewLogicalExpr(base.Pos, andor, eqtype, eqdata) - n = finishcompare(n, expr, init) - return n + return finishcompare(n, expr, init) } // Must be comparison of array or struct. @@ -3321,11 +3346,11 @@ func walkcompare(n ir.Node, init *ir.Nodes) ir.Node { cmpl := n.Left() for cmpl != nil && cmpl.Op() == ir.OCONVNOP { - cmpl = cmpl.Left() + cmpl = cmpl.(*ir.ConvExpr).Left() } cmpr := n.Right() for cmpr != nil && cmpr.Op() == ir.OCONVNOP { - cmpr = cmpr.Left() + cmpr = cmpr.(*ir.ConvExpr).Left() } // Chose not to inline. Call equality function directly. @@ -3346,8 +3371,7 @@ func walkcompare(n ir.Node, init *ir.Nodes) ir.Node { if n.Op() != ir.OEQ { res = ir.Nod(ir.ONOT, res, nil) } - n = finishcompare(n, res, init) - return n + return finishcompare(n, res, init) } // inline: build boolean expression comparing element by element @@ -3442,8 +3466,7 @@ func walkcompare(n ir.Node, init *ir.Nodes) ir.Node { a2 := typecheck(ir.Nod(ir.OAS, t, cmpr), ctxStmt) init.Append(a1, a2) } - n = finishcompare(n, expr, init) - return n + return finishcompare(n, expr, init) } func tracecmpArg(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node { @@ -3455,7 +3478,7 @@ func tracecmpArg(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node { return conv(n, t) } -func walkcompareInterface(n ir.Node, init *ir.Nodes) ir.Node { +func walkcompareInterface(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { n.SetRight(cheapexpr(n.Right(), init)) n.SetLeft(cheapexpr(n.Left(), init)) eqtab, eqdata := eqinterface(n.Left(), n.Right()) @@ -3469,7 +3492,7 @@ func walkcompareInterface(n ir.Node, init *ir.Nodes) ir.Node { return finishcompare(n, cmp, init) } -func walkcompareString(n ir.Node, init *ir.Nodes) ir.Node { +func walkcompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { // Rewrite comparisons to short constant strings as length+byte-wise comparisons. var cs, ncs ir.Node // const string, non-const string switch { @@ -3594,7 +3617,7 @@ func walkcompareString(n ir.Node, init *ir.Nodes) ir.Node { // The result of finishcompare MUST be assigned back to n, e.g. // n.Left = finishcompare(n.Left, x, r, init) -func finishcompare(n, r ir.Node, init *ir.Nodes) ir.Node { +func finishcompare(n *ir.BinaryExpr, r ir.Node, init *ir.Nodes) ir.Node { r = typecheck(r, ctxExpr) r = conv(r, n.Type()) r = walkexpr(r, init) @@ -3669,7 +3692,7 @@ func bounded(n ir.Node, max int64) bool { } // usemethod checks interface method calls for uses of reflect.Type.Method. -func usemethod(n ir.Node) { +func usemethod(n *ir.CallExpr) { t := n.Left().Type() // Looking for either of: @@ -3714,7 +3737,7 @@ func usemethod(n ir.Node) { } } -func usefield(n ir.Node) { +func usefield(n *ir.SelectorExpr) { if objabi.Fieldtrack_enabled == 0 { return } @@ -3736,7 +3759,7 @@ func usefield(n ir.Node) { if t.IsPtr() { t = t.Elem() } - field := n.(*ir.SelectorExpr).Selection + field := n.Selection if field == nil { base.Fatalf("usefield %v %v without paramfld", n.Left().Type(), n.Sym()) } @@ -3871,7 +3894,7 @@ var wrapCall_prgen int // The result of wrapCall MUST be assigned back to n, e.g. // n.Left = wrapCall(n.Left, init) -func wrapCall(n ir.Node, init *ir.Nodes) ir.Node { +func wrapCall(n *ir.CallExpr, init *ir.Nodes) ir.Node { if n.Init().Len() != 0 { walkstmtlist(n.Init().Slice()) init.AppendNodes(n.PtrInit()) @@ -3893,9 +3916,9 @@ func wrapCall(n ir.Node, init *ir.Nodes) ir.Node { var funcArgs []*ir.Field for i, arg := range n.List().Slice() { s := lookupN("a", i) - if !isBuiltinCall && arg.Op() == ir.OCONVNOP && arg.Type().IsUintptr() && arg.Left().Type().IsUnsafePtr() { + if !isBuiltinCall && arg.Op() == ir.OCONVNOP && arg.Type().IsUintptr() && arg.(*ir.ConvExpr).Left().Type().IsUnsafePtr() { origArgs[i] = arg - arg = arg.Left() + arg = arg.(*ir.ConvExpr).Left() n.List().SetIndex(i, arg) } funcArgs = append(funcArgs, symfield(s, arg.Type())) @@ -3966,10 +3989,10 @@ func canMergeLoads() bool { // isRuneCount reports whether n is of the form len([]rune(string)). // These are optimized into a call to runtime.countrunes. func isRuneCount(n ir.Node) bool { - return base.Flag.N == 0 && !instrumenting && n.Op() == ir.OLEN && n.Left().Op() == ir.OSTR2RUNES + return base.Flag.N == 0 && !instrumenting && n.Op() == ir.OLEN && n.(*ir.UnaryExpr).Left().Op() == ir.OSTR2RUNES } -func walkCheckPtrAlignment(n ir.Node, init *ir.Nodes, count ir.Node) ir.Node { +func walkCheckPtrAlignment(n *ir.ConvExpr, init *ir.Nodes, count ir.Node) ir.Node { if !n.Type().IsPtr() { base.Fatalf("expected pointer type: %v", n.Type()) } @@ -3997,7 +4020,7 @@ func walkCheckPtrAlignment(n ir.Node, init *ir.Nodes, count ir.Node) ir.Node { var walkCheckPtrArithmeticMarker byte -func walkCheckPtrArithmetic(n ir.Node, init *ir.Nodes) ir.Node { +func walkCheckPtrArithmetic(n *ir.ConvExpr, init *ir.Nodes) ir.Node { // Calling cheapexpr(n, init) below leads to a recursive call // to walkexpr, which leads us back here again. Use n.Opt to // prevent infinite loops. @@ -4046,16 +4069,16 @@ func walkCheckPtrArithmetic(n ir.Node, init *ir.Nodes) ir.Node { } walk(n.Left()) - n = cheapexpr(n, init) + cheap := cheapexpr(n, init) slice := mkdotargslice(types.NewSlice(types.Types[types.TUNSAFEPTR]), originals) slice.SetEsc(EscNone) - init.Append(mkcall("checkptrArithmetic", nil, init, convnop(n, types.Types[types.TUNSAFEPTR]), slice)) + init.Append(mkcall("checkptrArithmetic", nil, init, convnop(cheap, types.Types[types.TUNSAFEPTR]), slice)) // TODO(khr): Mark backing store of slice as dead. This will allow us to reuse // the backing store for multiple calls to checkptrArithmetic. - return n + return cheap } // checkPtr reports whether pointer checking should be enabled for From 4e8f1e139f5c69a1d596a54b035d6fc4fb08b94d Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 10 Dec 2020 18:47:09 -0500 Subject: [PATCH 188/474] [dev.regabi] cmd/compile: cleanup for concrete types - sinit An automated rewrite will add concrete type assertions after a test of n.Op(), when n can be safely type-asserted (meaning, n is not reassigned a different type, n is not reassigned and then used outside the scope of the type assertion, and so on). This sequence of CLs handles the code that the automated rewrite does not: adding specific types to function arguments, adjusting code not to call n.Left() etc when n may have multiple representations, and so on. This CL focuses on sinit.go. Passes buildall w/ toolstash -cmp. Change-Id: I3e9458e69a7a9b3f2fe139382bf961bc4473cc42 Reviewed-on: https://go-review.googlesource.com/c/go/+/277928 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/iexport.go | 6 +- src/cmd/compile/internal/gc/initorder.go | 2 +- src/cmd/compile/internal/gc/inl.go | 3 +- src/cmd/compile/internal/gc/obj.go | 25 +-- src/cmd/compile/internal/gc/sinit.go | 214 ++++++++++++++--------- src/cmd/compile/internal/gc/ssa.go | 3 +- src/cmd/compile/internal/gc/typecheck.go | 14 +- src/cmd/compile/internal/ir/expr.go | 40 ++--- src/cmd/compile/internal/ir/fmt.go | 14 +- src/cmd/compile/internal/ir/node.go | 2 - src/cmd/compile/internal/ir/node_gen.go | 8 +- 11 files changed, 194 insertions(+), 137 deletions(-) diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index eac9f29e6550a..b54eeca7cb75a 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -1266,10 +1266,12 @@ func (w *exportWriter) expr(n ir.Node) { // Special case: explicit name of func (*T) method(...) is turned into pkg.(*T).method, // but for export, this should be rendered as (*pkg.T).meth. // These nodes have the special property that they are names with a left OTYPE and a right ONAME. + n := n.(*ir.MethodExpr) w.op(ir.OXDOT) w.pos(n.Pos()) - w.expr(n.Left()) // n.Left.Op == OTYPE - w.selector(n.Right().Sym()) + w.op(ir.OTYPE) + w.typ(n.T) // n.Left.Op == OTYPE + w.selector(n.Method.Sym) case ir.ONAME: // Package scope name. diff --git a/src/cmd/compile/internal/gc/initorder.go b/src/cmd/compile/internal/gc/initorder.go index 9a07ca71bd693..1b21d92f4b7b3 100644 --- a/src/cmd/compile/internal/gc/initorder.go +++ b/src/cmd/compile/internal/gc/initorder.go @@ -79,7 +79,7 @@ type InitOrder struct { func initOrder(l []ir.Node) []ir.Node { s := InitSchedule{ initplans: make(map[ir.Node]*InitPlan), - inittemps: make(map[ir.Node]ir.Node), + inittemps: make(map[ir.Node]*ir.Name), } o := InitOrder{ blocking: make(map[ir.Node][]ir.Node), diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 8467c20833589..e1308718aa630 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -640,11 +640,12 @@ func inlCallee(fn ir.Node) *ir.Func { fn = staticValue(fn) switch fn.Op() { case ir.OMETHEXPR: + fn := fn.(*ir.MethodExpr) n := methodExprName(fn) // Check that receiver type matches fn.Left. // TODO(mdempsky): Handle implicit dereference // of pointer receiver argument? - if n == nil || !types.Identical(n.Type().Recv().Type, fn.Left().Type()) { + if n == nil || !types.Identical(n.Type().Recv().Type, fn.T) { return nil } return n.Func() diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index c34a86d4ebe9f..042b625fc96d8 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -205,13 +205,14 @@ func addptabs() { } for _, exportn := range exportlist { s := exportn.Sym() - n := ir.AsNode(s.Def) - if n == nil { + nn := ir.AsNode(s.Def) + if nn == nil { continue } - if n.Op() != ir.ONAME { + if nn.Op() != ir.ONAME { continue } + n := nn.(*ir.Name) if !types.IsExported(s.Name) { continue } @@ -228,7 +229,7 @@ func addptabs() { } } -func dumpGlobal(n ir.Node) { +func dumpGlobal(n *ir.Name) { if n.Type() == nil { base.Fatalf("external %v nil type\n", n) } @@ -271,7 +272,7 @@ func dumpglobls() { for _, n := range externdcl { switch n.Op() { case ir.ONAME: - dumpGlobal(n) + dumpGlobal(n.(*ir.Name)) case ir.OLITERAL: dumpGlobalConst(n) } @@ -475,7 +476,7 @@ func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj. var slicedataGen int -func slicedata(pos src.XPos, s string) ir.Node { +func slicedata(pos src.XPos, s string) *ir.Name { slicedataGen++ symname := fmt.Sprintf(".gobytes.%d", slicedataGen) sym := types.LocalPkg.Lookup(symname) @@ -489,7 +490,7 @@ func slicedata(pos src.XPos, s string) ir.Node { return symnode } -func slicebytes(nam ir.Node, s string) { +func slicebytes(nam *ir.Name, s string) { if nam.Op() != ir.ONAME { base.Fatalf("slicebytes %v", nam) } @@ -529,8 +530,8 @@ func dsymptrWeakOff(s *obj.LSym, off int, x *obj.LSym) int { } // slicesym writes a static slice symbol {&arr, lencap, lencap} to n. -// arr must be an ONAME. slicesym does not modify n. -func slicesym(n, arr ir.Node, lencap int64) { +// slicesym does not modify n. +func slicesym(n, arr *ir.Name, lencap int64) { s := n.Sym().Linksym() off := n.Offset() if arr.Op() != ir.ONAME { @@ -543,7 +544,7 @@ func slicesym(n, arr ir.Node, lencap int64) { // addrsym writes the static address of a to n. a must be an ONAME. // Neither n nor a is modified. -func addrsym(n, a ir.Node) { +func addrsym(n, a *ir.Name) { if n.Op() != ir.ONAME { base.Fatalf("addrsym n op %v", n.Op()) } @@ -559,7 +560,7 @@ func addrsym(n, a ir.Node) { // pfuncsym writes the static address of f to n. f must be a global function. // Neither n nor f is modified. -func pfuncsym(n, f ir.Node) { +func pfuncsym(n, f *ir.Name) { if n.Op() != ir.ONAME { base.Fatalf("pfuncsym n op %v", n.Op()) } @@ -575,7 +576,7 @@ func pfuncsym(n, f ir.Node) { // litsym writes the static literal c to n. // Neither n nor c is modified. -func litsym(n, c ir.Node, wid int) { +func litsym(n *ir.Name, c ir.Node, wid int) { if n.Op() != ir.ONAME { base.Fatalf("litsym n op %v", n.Op()) } diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index b3f211ff75854..cfda4afcd850c 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -32,7 +32,7 @@ type InitSchedule struct { out []ir.Node initplans map[ir.Node]*InitPlan - inittemps map[ir.Node]ir.Node + inittemps map[ir.Node]*ir.Name } func (s *InitSchedule) append(n ir.Node) { @@ -51,55 +51,57 @@ func (s *InitSchedule) staticInit(n ir.Node) { // tryStaticInit attempts to statically execute an initialization // statement and reports whether it succeeded. -func (s *InitSchedule) tryStaticInit(n ir.Node) bool { +func (s *InitSchedule) tryStaticInit(nn ir.Node) bool { // Only worry about simple "l = r" assignments. Multiple // variable/expression OAS2 assignments have already been // replaced by multiple simple OAS assignments, and the other // OAS2* assignments mostly necessitate dynamic execution // anyway. - if n.Op() != ir.OAS { + if nn.Op() != ir.OAS { return false } + n := nn.(*ir.AssignStmt) if ir.IsBlank(n.Left()) && !anySideEffects(n.Right()) { // Discard. return true } lno := setlineno(n) defer func() { base.Pos = lno }() - return s.staticassign(n.Left(), n.Right()) + return s.staticassign(n.Left().(*ir.Name), n.Right()) } // like staticassign but we are copying an already // initialized value r. -func (s *InitSchedule) staticcopy(l ir.Node, r ir.Node) bool { - if r.Op() != ir.ONAME && r.Op() != ir.OMETHEXPR { - return false - } - if r.Class() == ir.PFUNC { - pfuncsym(l, r) +func (s *InitSchedule) staticcopy(l *ir.Name, rn *ir.Name) bool { + if rn.Class() == ir.PFUNC { + pfuncsym(l, rn) return true } - if r.Class() != ir.PEXTERN || r.Sym().Pkg != types.LocalPkg { + if rn.Class() != ir.PEXTERN || rn.Sym().Pkg != types.LocalPkg { return false } - if r.Name().Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value + if rn.Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value return false } - if r.Name().Defn.Op() != ir.OAS { + if rn.Defn.Op() != ir.OAS { return false } - if r.Type().IsString() { // perhaps overwritten by cmd/link -X (#34675) + if rn.Type().IsString() { // perhaps overwritten by cmd/link -X (#34675) return false } - orig := r - r = r.Name().Defn.Right() + orig := rn + r := rn.Defn.(*ir.AssignStmt).Right() for r.Op() == ir.OCONVNOP && !types.Identical(r.Type(), l.Type()) { - r = r.Left() + r = r.(*ir.ConvExpr).Left() } switch r.Op() { - case ir.ONAME, ir.OMETHEXPR: + case ir.OMETHEXPR: + r = r.(*ir.MethodExpr).FuncName() + fallthrough + case ir.ONAME: + r := r.(*ir.Name) if s.staticcopy(l, r) { return true } @@ -120,6 +122,7 @@ func (s *InitSchedule) staticcopy(l ir.Node, r ir.Node) bool { case ir.OADDR: if a := r.Left(); a.Op() == ir.ONAME { + a := a.(*ir.Name) addrsym(l, a) return true } @@ -141,7 +144,7 @@ func (s *InitSchedule) staticcopy(l ir.Node, r ir.Node) bool { case ir.OARRAYLIT, ir.OSTRUCTLIT: p := s.initplans[r] - n := ir.Copy(l) + n := ir.Copy(l).(*ir.Name) for i := range p.E { e := &p.E[i] n.SetOffset(l.Offset() + e.Xoffset) @@ -150,13 +153,17 @@ func (s *InitSchedule) staticcopy(l ir.Node, r ir.Node) bool { litsym(n, e.Expr, int(n.Type().Width)) continue } - ll := ir.SepCopy(n) - if s.staticcopy(ll, e.Expr) { + ll := ir.SepCopy(n).(*ir.Name) + x := e.Expr + if x.Op() == ir.OMETHEXPR { + x = x.(*ir.MethodExpr).FuncName() + } + if x.Op() == ir.ONAME && s.staticcopy(ll, x.(*ir.Name)) { continue } // Requires computation, but we're // copying someone else's computation. - rr := ir.SepCopy(orig) + rr := ir.SepCopy(orig).(*ir.Name) rr.SetType(ll.Type()) rr.SetOffset(rr.Offset() + e.Xoffset) setlineno(rr) @@ -169,15 +176,20 @@ func (s *InitSchedule) staticcopy(l ir.Node, r ir.Node) bool { return false } -func (s *InitSchedule) staticassign(l ir.Node, r ir.Node) bool { +func (s *InitSchedule) staticassign(l *ir.Name, r ir.Node) bool { for r.Op() == ir.OCONVNOP { - r = r.Left() + r = r.(*ir.ConvExpr).Left() } switch r.Op() { - case ir.ONAME, ir.OMETHEXPR: + case ir.ONAME: + r := r.(*ir.Name) return s.staticcopy(l, r) + case ir.OMETHEXPR: + r := r.(*ir.MethodExpr) + return s.staticcopy(l, r.FuncName()) + case ir.ONIL: return true @@ -236,7 +248,7 @@ func (s *InitSchedule) staticassign(l ir.Node, r ir.Node) bool { s.initplan(r) p := s.initplans[r] - n := ir.Copy(l) + n := ir.Copy(l).(*ir.Name) for i := range p.E { e := &p.E[i] n.SetOffset(l.Offset() + e.Xoffset) @@ -246,7 +258,7 @@ func (s *InitSchedule) staticassign(l ir.Node, r ir.Node) bool { continue } setlineno(e.Expr) - a := ir.SepCopy(n) + a := ir.SepCopy(n).(*ir.Name) if !s.staticassign(a, e.Expr) { s.append(ir.Nod(ir.OAS, a, e.Expr)) } @@ -274,9 +286,9 @@ func (s *InitSchedule) staticassign(l ir.Node, r ir.Node) bool { // If you change something here, change it there, and vice versa. // Determine the underlying concrete type and value we are converting from. - val := r + val := ir.Node(r) for val.Op() == ir.OCONVIFACE { - val = val.Left() + val = val.(*ir.ConvExpr).Left() } if val.Type().IsInterface() { @@ -290,7 +302,7 @@ func (s *InitSchedule) staticassign(l ir.Node, r ir.Node) bool { markTypeUsedInInterface(val.Type(), l.Sym().Linksym()) - var itab ir.Node + var itab *ir.AddrExpr if l.Type().IsEmptyInterface() { itab = typename(val.Type()) } else { @@ -298,10 +310,10 @@ func (s *InitSchedule) staticassign(l ir.Node, r ir.Node) bool { } // Create a copy of l to modify while we emit data. - n := ir.Copy(l) + n := ir.Copy(l).(*ir.Name) // Emit itab, advance offset. - addrsym(n, itab.Left()) // itab is an OADDR node + addrsym(n, itab.Left().(*ir.Name)) n.SetOffset(n.Offset() + int64(Widthptr)) // Emit data. @@ -313,7 +325,7 @@ func (s *InitSchedule) staticassign(l ir.Node, r ir.Node) bool { // Copy val directly into n. n.SetType(val.Type()) setlineno(val) - a := ir.SepCopy(n) + a := ir.SepCopy(n).(*ir.Name) if !s.staticassign(a, val) { s.append(ir.Nod(ir.OAS, a, val)) } @@ -368,7 +380,7 @@ var statuniqgen int // name generator for static temps // staticname returns a name backed by a (writable) static data symbol. // Use readonlystaticname for read-only node. -func staticname(t *types.Type) ir.Node { +func staticname(t *types.Type) *ir.Name { // Don't use lookupN; it interns the resulting string, but these are all unique. n := NewName(lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen))) statuniqgen++ @@ -379,15 +391,19 @@ func staticname(t *types.Type) ir.Node { } // readonlystaticname returns a name backed by a (writable) static data symbol. -func readonlystaticname(t *types.Type) ir.Node { +func readonlystaticname(t *types.Type) *ir.Name { n := staticname(t) n.MarkReadonly() n.Sym().Linksym().Set(obj.AttrContentAddressable, true) return n } -func isSimpleName(n ir.Node) bool { - return (n.Op() == ir.ONAME || n.Op() == ir.OMETHEXPR) && n.Class() != ir.PAUTOHEAP && n.Class() != ir.PEXTERN +func isSimpleName(nn ir.Node) bool { + if nn.Op() != ir.ONAME { + return false + } + n := nn.(*ir.Name) + return n.Class() != ir.PAUTOHEAP && n.Class() != ir.PEXTERN } func litas(l ir.Node, r ir.Node, init *ir.Nodes) { @@ -428,14 +444,15 @@ func getdyn(n ir.Node, top bool) initGenType { case ir.OARRAYLIT, ir.OSTRUCTLIT: } + lit := n.(*ir.CompLitExpr) var mode initGenType - for _, n1 := range n.List().Slice() { + for _, n1 := range lit.List().Slice() { switch n1.Op() { case ir.OKEY: - n1 = n1.Right() + n1 = n1.(*ir.KeyExpr).Right() case ir.OSTRUCTKEY: - n1 = n1.Left() + n1 = n1.(*ir.StructKeyExpr).Left() } mode |= getdyn(n1, false) if mode == initDynamic|initConst { @@ -453,7 +470,7 @@ func isStaticCompositeLiteral(n ir.Node) bool { case ir.OARRAYLIT: for _, r := range n.List().Slice() { if r.Op() == ir.OKEY { - r = r.Right() + r = r.(*ir.KeyExpr).Right() } if !isStaticCompositeLiteral(r) { return false @@ -462,9 +479,7 @@ func isStaticCompositeLiteral(n ir.Node) bool { return true case ir.OSTRUCTLIT: for _, r := range n.List().Slice() { - if r.Op() != ir.OSTRUCTKEY { - base.Fatalf("isStaticCompositeLiteral: rhs not OSTRUCTKEY: %v", r) - } + r := r.(*ir.StructKeyExpr) if !isStaticCompositeLiteral(r.Left()) { return false } @@ -474,9 +489,9 @@ func isStaticCompositeLiteral(n ir.Node) bool { return true case ir.OCONVIFACE: // See staticassign's OCONVIFACE case for comments. - val := n + val := ir.Node(n) for val.Op() == ir.OCONVIFACE { - val = val.Left() + val = val.(*ir.ConvExpr).Left() } if val.Type().IsInterface() { return val.Op() == ir.ONIL @@ -508,7 +523,7 @@ const ( // fixedlit handles struct, array, and slice literals. // TODO: expand documentation. -func fixedlit(ctxt initContext, kind initKind, n ir.Node, var_ ir.Node, init *ir.Nodes) { +func fixedlit(ctxt initContext, kind initKind, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) { isBlank := var_ == ir.BlankNode var splitnode func(ir.Node) (a ir.Node, value ir.Node) switch n.Op() { @@ -516,11 +531,12 @@ func fixedlit(ctxt initContext, kind initKind, n ir.Node, var_ ir.Node, init *ir var k int64 splitnode = func(r ir.Node) (ir.Node, ir.Node) { if r.Op() == ir.OKEY { - k = indexconst(r.Left()) + kv := r.(*ir.KeyExpr) + k = indexconst(kv.Left()) if k < 0 { - base.Fatalf("fixedlit: invalid index %v", r.Left()) + base.Fatalf("fixedlit: invalid index %v", kv.Left()) } - r = r.Right() + r = kv.Right() } a := ir.Nod(ir.OINDEX, var_, nodintconst(k)) k++ @@ -530,10 +546,8 @@ func fixedlit(ctxt initContext, kind initKind, n ir.Node, var_ ir.Node, init *ir return a, r } case ir.OSTRUCTLIT: - splitnode = func(r ir.Node) (ir.Node, ir.Node) { - if r.Op() != ir.OSTRUCTKEY { - base.Fatalf("fixedlit: rhs not OSTRUCTKEY: %v", r) - } + splitnode = func(rn ir.Node) (ir.Node, ir.Node) { + r := rn.(*ir.StructKeyExpr) if r.Sym().IsBlank() || isBlank { return ir.BlankNode, r.Left() } @@ -553,12 +567,14 @@ func fixedlit(ctxt initContext, kind initKind, n ir.Node, var_ ir.Node, init *ir switch value.Op() { case ir.OSLICELIT: + value := value.(*ir.CompLitExpr) if (kind == initKindStatic && ctxt == inNonInitFunction) || (kind == initKindDynamic && ctxt == inInitFunction) { slicelit(ctxt, value, a, init) continue } case ir.OARRAYLIT, ir.OSTRUCTLIT: + value := value.(*ir.CompLitExpr) fixedlit(ctxt, kind, value, a, init) continue } @@ -570,13 +586,13 @@ func fixedlit(ctxt initContext, kind initKind, n ir.Node, var_ ir.Node, init *ir // build list of assignments: var[index] = expr setlineno(a) - a = ir.Nod(ir.OAS, a, value) - a = typecheck(a, ctxStmt) + as := ir.NewAssignStmt(base.Pos, a, value) + as = typecheck(as, ctxStmt).(*ir.AssignStmt) switch kind { case initKindStatic: - genAsStatic(a) + genAsStatic(as) case initKindDynamic, initKindLocalCode: - a = orderStmtInPlace(a, map[string][]*ir.Name{}) + a = orderStmtInPlace(as, map[string][]*ir.Name{}) a = walkstmt(a) init.Append(a) default: @@ -586,7 +602,7 @@ func fixedlit(ctxt initContext, kind initKind, n ir.Node, var_ ir.Node, init *ir } } -func isSmallSliceLit(n ir.Node) bool { +func isSmallSliceLit(n *ir.CompLitExpr) bool { if n.Op() != ir.OSLICELIT { return false } @@ -596,7 +612,7 @@ func isSmallSliceLit(n ir.Node) bool { return smallintconst(r) && (n.Type().Elem().Width == 0 || ir.Int64Val(r) <= smallArrayBytes/n.Type().Elem().Width) } -func slicelit(ctxt initContext, n ir.Node, var_ ir.Node, init *ir.Nodes) { +func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) { // make an array type corresponding the number of elements we have t := types.NewArray(n.Type().Elem(), ir.Int64Val(n.Right())) dowidth(t) @@ -679,7 +695,7 @@ func slicelit(ctxt initContext, n ir.Node, var_ ir.Node, init *ir.Nodes) { a = ir.Nod(ir.OAS, temp(t), nil) a = typecheck(a, ctxStmt) init.Append(a) // zero new temp - a = a.Left() + a = a.(*ir.AssignStmt).Left() } else { init.Append(ir.Nod(ir.OVARDEF, a, nil)) } @@ -700,11 +716,12 @@ func slicelit(ctxt initContext, n ir.Node, var_ ir.Node, init *ir.Nodes) { var index int64 for _, value := range n.List().Slice() { if value.Op() == ir.OKEY { - index = indexconst(value.Left()) + kv := value.(*ir.KeyExpr) + index = indexconst(kv.Left()) if index < 0 { - base.Fatalf("slicelit: invalid index %v", value.Left()) + base.Fatalf("slicelit: invalid index %v", kv.Left()) } - value = value.Right() + value = kv.Right() } a := ir.Nod(ir.OINDEX, vauto, nodintconst(index)) a.SetBounded(true) @@ -717,6 +734,7 @@ func slicelit(ctxt initContext, n ir.Node, var_ ir.Node, init *ir.Nodes) { break case ir.OARRAYLIT, ir.OSTRUCTLIT: + value := value.(*ir.CompLitExpr) k := initKindDynamic if vstat == nil { // Generate both static and dynamic initializations. @@ -748,7 +766,7 @@ func slicelit(ctxt initContext, n ir.Node, var_ ir.Node, init *ir.Nodes) { init.Append(a) } -func maplit(n ir.Node, m ir.Node, init *ir.Nodes) { +func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) { // make the map var a := ir.Nod(ir.OMAKE, nil, nil) a.SetEsc(n.Esc()) @@ -760,6 +778,7 @@ func maplit(n ir.Node, m ir.Node, init *ir.Nodes) { // The order pass already removed any dynamic (runtime-computed) entries. // All remaining entries are static. Double-check that. for _, r := range entries { + r := r.(*ir.KeyExpr) if !isStaticCompositeLiteral(r.Left()) || !isStaticCompositeLiteral(r.Right()) { base.Fatalf("maplit: entry is not a literal: %v", r) } @@ -782,9 +801,10 @@ func maplit(n ir.Node, m ir.Node, init *ir.Nodes) { vstatk := readonlystaticname(tk) vstate := readonlystaticname(te) - datak := ir.Nod(ir.OARRAYLIT, nil, nil) - datae := ir.Nod(ir.OARRAYLIT, nil, nil) + datak := ir.NewCompLitExpr(base.Pos, ir.OARRAYLIT, nil, nil) + datae := ir.NewCompLitExpr(base.Pos, ir.OARRAYLIT, nil, nil) for _, r := range entries { + r := r.(*ir.KeyExpr) datak.PtrList().Append(r.Left()) datae.PtrList().Append(r.Right()) } @@ -824,6 +844,7 @@ func maplit(n ir.Node, m ir.Node, init *ir.Nodes) { tmpelem := temp(m.Type().Elem()) for _, r := range entries { + r := r.(*ir.KeyExpr) index, elem := r.Left(), r.Right() setlineno(index) @@ -846,8 +867,12 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { default: base.Fatalf("anylit: not lit, op=%v node=%v", n.Op(), n) - case ir.ONAME, ir.OMETHEXPR: - appendWalkStmt(init, ir.Nod(ir.OAS, var_, n)) + case ir.ONAME: + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, n)) + + case ir.OMETHEXPR: + n := n.(*ir.MethodExpr) + anylit(n.FuncName(), var_, init) case ir.OPTRLIT: if !t.IsPtr() { @@ -870,6 +895,7 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { anylit(n.Left(), var_, init) case ir.OSTRUCTLIT, ir.OARRAYLIT: + n := n.(*ir.CompLitExpr) if !t.IsStruct() && !t.IsArray() { base.Fatalf("anylit: not struct/array") } @@ -906,9 +932,11 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { fixedlit(inInitFunction, initKindLocalCode, n, var_, init) case ir.OSLICELIT: + n := n.(*ir.CompLitExpr) slicelit(inInitFunction, n, var_, init) case ir.OMAPLIT: + n := n.(*ir.CompLitExpr) if !t.IsMap() { base.Fatalf("anylit: not map") } @@ -919,7 +947,7 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { // oaslit handles special composite literal assignments. // It returns true if n's effects have been added to init, // in which case n should be dropped from the program by the caller. -func oaslit(n ir.Node, init *ir.Nodes) bool { +func oaslit(n *ir.AssignStmt, init *ir.Nodes) bool { if n.Left() == nil || n.Right() == nil { // not a special composite literal assignment return false @@ -961,14 +989,18 @@ func getlit(lit ir.Node) int { } // stataddr returns the static address of n, if n has one, or else nil. -func stataddr(n ir.Node) ir.Node { +func stataddr(n ir.Node) *ir.Name { if n == nil { return nil } switch n.Op() { - case ir.ONAME, ir.OMETHEXPR: - return ir.SepCopy(n) + case ir.ONAME: + return ir.SepCopy(n).(*ir.Name) + + case ir.OMETHEXPR: + n := n.(*ir.MethodExpr) + return stataddr(n.FuncName()) case ir.ODOT: nam := stataddr(n.Left()) @@ -1018,11 +1050,12 @@ func (s *InitSchedule) initplan(n ir.Node) { var k int64 for _, a := range n.List().Slice() { if a.Op() == ir.OKEY { - k = indexconst(a.Left()) + kv := a.(*ir.KeyExpr) + k = indexconst(kv.Left()) if k < 0 { - base.Fatalf("initplan arraylit: invalid index %v", a.Left()) + base.Fatalf("initplan arraylit: invalid index %v", kv.Left()) } - a = a.Right() + a = kv.Right() } s.addvalue(p, k*n.Type().Elem().Width, a) k++ @@ -1033,6 +1066,7 @@ func (s *InitSchedule) initplan(n ir.Node) { if a.Op() != ir.OSTRUCTKEY { base.Fatalf("initplan structlit") } + a := a.(*ir.StructKeyExpr) if a.Sym().IsBlank() { continue } @@ -1044,6 +1078,7 @@ func (s *InitSchedule) initplan(n ir.Node) { if a.Op() != ir.OKEY { base.Fatalf("initplan maplit") } + a := a.(*ir.KeyExpr) s.addvalue(p, -1, a.Right()) } } @@ -1089,7 +1124,7 @@ func isZero(n ir.Node) bool { case ir.OARRAYLIT: for _, n1 := range n.List().Slice() { if n1.Op() == ir.OKEY { - n1 = n1.Right() + n1 = n1.(*ir.KeyExpr).Right() } if !isZero(n1) { return false @@ -1099,6 +1134,7 @@ func isZero(n ir.Node) bool { case ir.OSTRUCTLIT: for _, n1 := range n.List().Slice() { + n1 := n1.(*ir.StructKeyExpr) if !isZero(n1.Left()) { return false } @@ -1113,7 +1149,7 @@ func isvaluelit(n ir.Node) bool { return n.Op() == ir.OARRAYLIT || n.Op() == ir.OSTRUCTLIT } -func genAsStatic(as ir.Node) { +func genAsStatic(as *ir.AssignStmt) { if as.Left().Type() == nil { base.Fatalf("genAsStatic as.Left not typechecked") } @@ -1123,12 +1159,20 @@ func genAsStatic(as ir.Node) { base.Fatalf("genAsStatic: lhs %v", as.Left()) } - switch { - case as.Right().Op() == ir.OLITERAL: - litsym(nam, as.Right(), int(as.Right().Type().Width)) - case (as.Right().Op() == ir.ONAME || as.Right().Op() == ir.OMETHEXPR) && as.Right().Class() == ir.PFUNC: - pfuncsym(nam, as.Right()) - default: - base.Fatalf("genAsStatic: rhs %v", as.Right()) + switch r := as.Right(); r.Op() { + case ir.OLITERAL: + litsym(nam, r, int(r.Type().Width)) + return + case ir.OMETHEXPR: + r := r.(*ir.MethodExpr) + pfuncsym(nam, r.FuncName()) + return + case ir.ONAME: + r := r.(*ir.Name) + if r.Class() == ir.PFUNC { + pfuncsym(nam, r) + return + } } + base.Fatalf("genAsStatic: rhs %v", as.Right()) } diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 4d9073a4b6bde..2a0134703c68d 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -2087,7 +2087,8 @@ func (s *state) expr(n ir.Node) *ssa.Value { aux := n.Left().Sym().Linksym() return s.entryNewValue1A(ssa.OpAddr, n.Type(), aux, s.sb) case ir.OMETHEXPR: - sym := funcsym(n.Sym()).Linksym() + n := n.(*ir.MethodExpr) + sym := funcsym(n.FuncName().Sym()).Linksym() return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb) case ir.ONAME: if n.Class() == ir.PFUNC { diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 2f3c876c77554..5e56ace7c7157 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -2415,16 +2415,16 @@ func typecheckMethodExpr(n *ir.SelectorExpr) (res ir.Node) { return n } - me := ir.NodAt(n.Pos(), ir.OMETHEXPR, n.Left(), NewName(n.Sym())) - me.SetSym(methodSym(t, n.Sym())) + me := ir.NewMethodExpr(n.Pos(), n.Left().Type(), m) me.SetType(methodfunc(m.Type, n.Left().Type())) - me.SetOffset(0) - me.SetClass(ir.PFUNC) - ir.Node(me).(*ir.MethodExpr).Method = m + f := NewName(methodSym(t, m.Sym)) + f.SetClass(ir.PFUNC) + f.SetType(me.Type()) + me.FuncName_ = f // Issue 25065. Make sure that we emit the symbol for a local method. if base.Ctxt.Flag_dynlink && !inimport && (t.Sym() == nil || t.Sym().Pkg == types.LocalPkg) { - makefuncsym(me.Sym()) + makefuncsym(me.FuncName_.Sym()) } return me @@ -4023,7 +4023,7 @@ func deadcodeexpr(n ir.Node) ir.Node { func getIotaValue() int64 { if i := len(typecheckdefstack); i > 0 { if x := typecheckdefstack[i-1]; x.Op() == ir.OLITERAL { - return x.Iota() + return x.(*ir.Name).Iota() } } diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 36a11dad9a2b3..51262d1e072a4 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -526,35 +526,35 @@ func (n *MakeExpr) SetOp(op Op) { } } -// A MethodExpr is a method value X.M (where X is an expression, not a type). +// A MethodExpr is a method expression T.M (where T is a type). type MethodExpr struct { miniExpr - X Node - M Node - Sym_ *types.Sym - Offset_ int64 - Class_ Class - Method *types.Field + T *types.Type + X_Delete Node + M_Delete Node // TODO(rsc): Delete (breaks toolstash b/c inlining costs go down) + Method *types.Field + FuncName_ *Name } -func NewMethodExpr(pos src.XPos, x, m Node) *MethodExpr { - n := &MethodExpr{X: x, M: m} +func NewMethodExpr(pos src.XPos, t *types.Type, method *types.Field) *MethodExpr { + n := &MethodExpr{T: t, Method: method} n.pos = pos n.op = OMETHEXPR - n.Offset_ = types.BADWIDTH + n.X_Delete = TypeNode(t) // TODO(rsc): Delete. + n.M_Delete = NewNameAt(pos, method.Sym) // TODO(rsc): Delete. return n } -func (n *MethodExpr) Left() Node { return n.X } -func (n *MethodExpr) SetLeft(x Node) { n.X = x } -func (n *MethodExpr) Right() Node { return n.M } -func (n *MethodExpr) SetRight(y Node) { n.M = y } -func (n *MethodExpr) Sym() *types.Sym { return n.Sym_ } -func (n *MethodExpr) SetSym(x *types.Sym) { n.Sym_ = x } -func (n *MethodExpr) Offset() int64 { return n.Offset_ } -func (n *MethodExpr) SetOffset(x int64) { n.Offset_ = x } -func (n *MethodExpr) Class() Class { return n.Class_ } -func (n *MethodExpr) SetClass(x Class) { n.Class_ = x } +func (n *MethodExpr) FuncName() *Name { return n.FuncName_ } +func (n *MethodExpr) Left() Node { panic("MethodExpr.Left") } +func (n *MethodExpr) SetLeft(x Node) { panic("MethodExpr.SetLeft") } +func (n *MethodExpr) Right() Node { panic("MethodExpr.Right") } +func (n *MethodExpr) SetRight(x Node) { panic("MethodExpr.SetRight") } +func (n *MethodExpr) Sym() *types.Sym { panic("MethodExpr.Sym") } +func (n *MethodExpr) Offset() int64 { panic("MethodExpr.Offset") } +func (n *MethodExpr) SetOffset(x int64) { panic("MethodExpr.SetOffset") } +func (n *MethodExpr) Class() Class { panic("MethodExpr.Class") } +func (n *MethodExpr) SetClass(x Class) { panic("MethodExpr.SetClass") } // A NilExpr represents the predefined untyped constant nil. // (It may be copied and assigned a type, though.) diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index 3cda9c8c38d4b..a6e90a899e8e9 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -624,9 +624,13 @@ func exprFmt(n Node, s fmt.State, prec int) { return } fallthrough - case OPACK, ONONAME, OMETHEXPR: + case OPACK, ONONAME: fmt.Fprint(s, n.Sym()) + case OMETHEXPR: + n := n.(*MethodExpr) + fmt.Fprint(s, n.FuncName().Sym()) + case OTYPE: if n.Type() == nil && n.Sym() != nil { fmt.Fprint(s, n.Sym()) @@ -1139,7 +1143,7 @@ func dumpNode(w io.Writer, n Node, depth int) { dumpNodeHeader(w, n) return - case ONAME, ONONAME, OMETHEXPR: + case ONAME, ONONAME: if n.Sym() != nil { fmt.Fprintf(w, "%+v-%+v", n.Op(), n.Sym()) } else { @@ -1153,6 +1157,12 @@ func dumpNode(w io.Writer, n Node, depth int) { } return + case OMETHEXPR: + n := n.(*MethodExpr) + fmt.Fprintf(w, "%+v-%+v", n.Op(), n.FuncName().Sym()) + dumpNodeHeader(w, n) + return + case OASOP: n := n.(*AssignOpStmt) fmt.Fprintf(w, "%+v-%+v", n.Op(), n.SubOp()) diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index fe6dafe8592c9..bbe53d821eb7a 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -733,8 +733,6 @@ func NodAt(pos src.XPos, op Op, nleft, nright Node) Node { return newNameAt(pos, op, nil) case OMAKECHAN, OMAKEMAP, OMAKESLICE, OMAKESLICECOPY: return NewMakeExpr(pos, op, nleft, nright) - case OMETHEXPR: - return NewMethodExpr(pos, nleft, nright) case ONIL: return NewNilExpr(pos) case OPACK: diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index 39d8f03ddc40c..80cc755d1a1b3 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -632,14 +632,14 @@ func (n *MethodExpr) copy() Node { func (n *MethodExpr) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) - err = maybeDo(n.M, err, do) + err = maybeDo(n.X_Delete, err, do) + err = maybeDo(n.M_Delete, err, do) return err } func (n *MethodExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) - n.X = maybeEdit(n.X, edit) - n.M = maybeEdit(n.M, edit) + n.X_Delete = maybeEdit(n.X_Delete, edit) + n.M_Delete = maybeEdit(n.M_Delete, edit) } func (n *Name) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } From c76be2a24eb1a07cf731c4a75652e2d5db61aa77 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 17 Dec 2020 00:59:35 -0500 Subject: [PATCH 189/474] [dev.regabi] cmd/compile: add ONAMEOFFSET, delete to-be-deleted fields Breaks toolstash but clearly no effect. Change-Id: Ic05bb7f74db170f140cf3b3cd7d629f159e3aae1 Reviewed-on: https://go-review.googlesource.com/c/go/+/278913 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/expr.go | 19 +++++++++++++++---- src/cmd/compile/internal/ir/node.go | 1 + src/cmd/compile/internal/ir/node_gen.go | 19 +++++++++++++++---- src/cmd/compile/internal/ir/op_string.go | 11 ++++++----- 4 files changed, 37 insertions(+), 13 deletions(-) diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 51262d1e072a4..b18975d063378 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -530,8 +530,6 @@ func (n *MakeExpr) SetOp(op Op) { type MethodExpr struct { miniExpr T *types.Type - X_Delete Node - M_Delete Node // TODO(rsc): Delete (breaks toolstash b/c inlining costs go down) Method *types.Field FuncName_ *Name } @@ -540,8 +538,6 @@ func NewMethodExpr(pos src.XPos, t *types.Type, method *types.Field) *MethodExpr n := &MethodExpr{T: t, Method: method} n.pos = pos n.op = OMETHEXPR - n.X_Delete = TypeNode(t) // TODO(rsc): Delete. - n.M_Delete = NewNameAt(pos, method.Sym) // TODO(rsc): Delete. return n } @@ -619,6 +615,21 @@ func NewResultExpr(pos src.XPos, typ *types.Type, offset int64) *ResultExpr { func (n *ResultExpr) Offset() int64 { return n.Offset_ } func (n *ResultExpr) SetOffset(x int64) { n.Offset_ = x } +// A NameOffsetExpr refers to an offset within a variable. +// It is like a SelectorExpr but without the field name. +type NameOffsetExpr struct { + miniExpr + Name_ *Name + Offset_ int64 +} + +func NewNameOffsetExpr(pos src.XPos, name *Name, offset int64, typ *types.Type) *NameOffsetExpr { + n := &NameOffsetExpr{Name_: name, Offset_: offset} + n.typ = typ + n.op = ONAMEOFFSET + return n +} + // A SelectorExpr is a selector expression X.Sym. type SelectorExpr struct { miniExpr diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index bbe53d821eb7a..ca894cd5f1167 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -345,6 +345,7 @@ const ( OVARLIVE // variable is alive ORESULT // result of a function call; Xoffset is stack offset OINLMARK // start of an inlined body, with file/line of caller. Xoffset is an index into the inline tree. + ONAMEOFFSET // offset within a name // arch-specific opcodes ORETJMP // return to other function diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index 80cc755d1a1b3..10dfe3c927137 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -632,14 +632,10 @@ func (n *MethodExpr) copy() Node { func (n *MethodExpr) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) - err = maybeDo(n.X_Delete, err, do) - err = maybeDo(n.M_Delete, err, do) return err } func (n *MethodExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) - n.X_Delete = maybeEdit(n.X_Delete, edit) - n.M_Delete = maybeEdit(n.M_Delete, edit) } func (n *Name) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } @@ -654,6 +650,21 @@ func (n *Name) doChildren(do func(Node) error) error { func (n *Name) editChildren(edit func(Node) Node) { } +func (n *NameOffsetExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *NameOffsetExpr) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} +func (n *NameOffsetExpr) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + return err +} +func (n *NameOffsetExpr) editChildren(edit func(Node) Node) { + editList(n.init, edit) +} + func (n *NilExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *NilExpr) copy() Node { c := *n diff --git a/src/cmd/compile/internal/ir/op_string.go b/src/cmd/compile/internal/ir/op_string.go index 33b177d64fb83..f23e08c47cbf2 100644 --- a/src/cmd/compile/internal/ir/op_string.go +++ b/src/cmd/compile/internal/ir/op_string.go @@ -158,14 +158,15 @@ func _() { _ = x[OVARLIVE-147] _ = x[ORESULT-148] _ = x[OINLMARK-149] - _ = x[ORETJMP-150] - _ = x[OGETG-151] - _ = x[OEND-152] + _ = x[ONAMEOFFSET-150] + _ = x[ORETJMP-151] + _ = x[OGETG-152] + _ = x[OEND-153] } -const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRSTMTEXPRBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCLOSUREREADCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKRETJMPGETGEND" +const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRSTMTEXPRBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCLOSUREREADCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKNAMEOFFSETRETJMPGETGEND" -var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 309, 315, 318, 324, 331, 339, 343, 350, 358, 360, 362, 364, 366, 368, 370, 375, 380, 388, 391, 400, 403, 407, 415, 422, 431, 444, 447, 450, 453, 456, 459, 462, 468, 471, 477, 480, 486, 490, 493, 497, 502, 507, 513, 518, 522, 527, 535, 543, 549, 558, 569, 576, 580, 587, 595, 599, 603, 607, 614, 621, 629, 635, 643, 651, 656, 661, 665, 673, 678, 682, 685, 693, 697, 699, 704, 706, 711, 717, 723, 729, 735, 740, 744, 751, 757, 762, 768, 774, 781, 786, 790, 795, 799, 810, 815, 823, 829, 836, 843, 849, 856, 862, 866, 869} +var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 309, 315, 318, 324, 331, 339, 343, 350, 358, 360, 362, 364, 366, 368, 370, 375, 380, 388, 391, 400, 403, 407, 415, 422, 431, 444, 447, 450, 453, 456, 459, 462, 468, 471, 477, 480, 486, 490, 493, 497, 502, 507, 513, 518, 522, 527, 535, 543, 549, 558, 569, 576, 580, 587, 595, 599, 603, 607, 614, 621, 629, 635, 643, 651, 656, 661, 665, 673, 678, 682, 685, 693, 697, 699, 704, 706, 711, 717, 723, 729, 735, 740, 744, 751, 757, 762, 768, 774, 781, 786, 790, 795, 799, 810, 815, 823, 829, 836, 843, 849, 856, 866, 872, 876, 879} func (i Op) String() string { if i >= Op(len(_Op_index)-1) { From ffb0cb7044cb412ce8c2f88740d8c7ea2af05837 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 17 Dec 2020 02:56:26 -0500 Subject: [PATCH 190/474] [dev.regabi] cmd/compile: remove uses of Name.Offset, Name.copy For globals, Name.Offset is used as a way to address a field within a global during static initialization. This CL replaces that use with a separate NameOffsetExpr (ONAMEOFFSET) node. For locals, Name.Offset is the stack frame offset. This CL calls it that (FrameOffset, SetFrameOffset). Now there is no longer any use of Name.Offset or Name.SetOffset. And now that copies of Names are not being made to change their offsets, we can lock down use of ir.Copy on Names. The only remaining uses are during inlining and in handling generic system functions. At both those times you do want to create a new name and that can be made explicit by calling the new CloneName method instead. ir.Copy on a name now panics. Passes buildall w/ toolstash -cmp. Change-Id: I0b0a25b9d93aeff7cf4e4025ac53faec7dc8603b Reviewed-on: https://go-review.googlesource.com/c/go/+/278914 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- .../compile/internal/gc/abiutilsaux_test.go | 2 +- src/cmd/compile/internal/gc/alg.go | 2 +- src/cmd/compile/internal/gc/align.go | 6 +- src/cmd/compile/internal/gc/dcl.go | 2 +- src/cmd/compile/internal/gc/escape.go | 13 +- src/cmd/compile/internal/gc/inl.go | 14 +- src/cmd/compile/internal/gc/obj.go | 52 ++++--- src/cmd/compile/internal/gc/order.go | 2 +- src/cmd/compile/internal/gc/pgen.go | 16 ++- src/cmd/compile/internal/gc/pgen_test.go | 4 +- src/cmd/compile/internal/gc/plive.go | 8 +- src/cmd/compile/internal/gc/racewalk.go | 4 +- src/cmd/compile/internal/gc/sinit.go | 136 +++++++++--------- src/cmd/compile/internal/gc/ssa.go | 57 +++++--- src/cmd/compile/internal/gc/subr.go | 8 +- src/cmd/compile/internal/gc/typecheck.go | 7 + src/cmd/compile/internal/gc/walk.go | 8 +- src/cmd/compile/internal/ir/fmt.go | 4 + src/cmd/compile/internal/ir/mknode.go | 29 ++-- src/cmd/compile/internal/ir/name.go | 20 ++- src/cmd/compile/internal/ir/node_gen.go | 5 +- 21 files changed, 223 insertions(+), 176 deletions(-) diff --git a/src/cmd/compile/internal/gc/abiutilsaux_test.go b/src/cmd/compile/internal/gc/abiutilsaux_test.go index 5489a512d260c..fd0b197207be0 100644 --- a/src/cmd/compile/internal/gc/abiutilsaux_test.go +++ b/src/cmd/compile/internal/gc/abiutilsaux_test.go @@ -76,7 +76,7 @@ func tokenize(src string) []string { func verifyParamResultOffset(t *testing.T, f *types.Field, r ABIParamAssignment, which string, idx int) int { n := ir.AsNode(f.Nname).(*ir.Name) - if n.Offset() != int64(r.Offset) { + if n.FrameOffset() != int64(r.Offset) { t.Errorf("%s %d: got offset %d wanted %d t=%v", which, idx, r.Offset, n.Offset(), f.Type) return 1 diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index 25dadffc24ed1..f03aec3237542 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -878,7 +878,7 @@ func eqmem(p ir.Node, q ir.Node, field *types.Sym, size int64) ir.Node { return call } -func eqmemfunc(size int64, t *types.Type) (fn ir.Node, needsize bool) { +func eqmemfunc(size int64, t *types.Type) (fn *ir.Name, needsize bool) { switch size { default: fn = syslook("memequal") diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go index 9944a3a38ae21..95a5dbef29f24 100644 --- a/src/cmd/compile/internal/gc/align.go +++ b/src/cmd/compile/internal/gc/align.go @@ -128,10 +128,10 @@ func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 { // It's possible the ordering has changed and this is // now the common case. I'm not sure. if n.Name().Stackcopy != nil { - n.Name().Stackcopy.SetOffset(o) - n.SetOffset(0) + n.Name().Stackcopy.SetFrameOffset(o) + n.SetFrameOffset(0) } else { - n.SetOffset(o) + n.SetFrameOffset(o) } } diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index a2c9edb481044..34ba3728439df 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -96,7 +96,7 @@ func declare(n *ir.Name, ctxt ir.Class) { } if ctxt == ir.PAUTO { - n.SetOffset(0) + n.SetFrameOffset(0) } if s.Block == types.Block { diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index 5124af945efea..235cef47eaaa9 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -515,6 +515,10 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { } e.flow(k, e.oldLoc(n)) + case ir.ONAMEOFFSET: + n := n.(*ir.NameOffsetExpr) + e.expr(k, n.Name_) + case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT: n := n.(*ir.UnaryExpr) e.discard(n.Left()) @@ -778,6 +782,9 @@ func (e *Escape) addr(n ir.Node) EscHole { break } k = e.oldLoc(n).asHole() + case ir.ONAMEOFFSET: + n := n.(*ir.NameOffsetExpr) + e.addr(n.Name_) case ir.ODOT: n := n.(*ir.SelectorExpr) k = e.addr(n.Left()) @@ -2008,7 +2015,7 @@ func moveToHeap(n *ir.Name) { // in addition to the copy in the heap that may live longer than // the function. if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT { - if n.Offset() == types.BADWIDTH { + if n.FrameOffset() == types.BADWIDTH { base.Fatalf("addrescapes before param assignment") } @@ -2018,7 +2025,7 @@ func moveToHeap(n *ir.Name) { // so that analyses of the local (on-stack) variables use it. stackcopy := NewName(n.Sym()) stackcopy.SetType(n.Type()) - stackcopy.SetOffset(n.Offset()) + stackcopy.SetFrameOffset(n.FrameOffset()) stackcopy.SetClass(n.Class()) stackcopy.Heapaddr = heapaddr if n.Class() == ir.PPARAMOUT { @@ -2055,7 +2062,7 @@ func moveToHeap(n *ir.Name) { // Modify n in place so that uses of n now mean indirection of the heapaddr. n.SetClass(ir.PAUTOHEAP) - n.SetOffset(0) + n.SetFrameOffset(0) n.Heapaddr = heapaddr n.SetEsc(EscHeap) if base.Flag.LowerM != 0 { diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index e1308718aa630..b571c2b9148b5 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -1220,11 +1220,19 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { if n.Sym() != nil { return n } + if n, ok := n.(*ir.Name); ok && n.Op() == ir.OLITERAL { + // This happens for unnamed OLITERAL. + // which should really not be a *Name, but for now it is. + // ir.Copy(n) is not allowed generally and would panic below, + // but it's OK in this situation. + n = n.CloneName() + n.SetPos(subst.updatedPos(n.Pos())) + return n + } - // Since we don't handle bodies with closures, this return is guaranteed to belong to the current inlined function. - - // dump("Return before substitution", n); case ir.ORETURN: + // Since we don't handle bodies with closures, + // this return is guaranteed to belong to the current inlined function. init := subst.list(n.Init()) if len(subst.retvars) != 0 && n.List().Len() != 0 { as := ir.Nod(ir.OAS2, nil, nil) diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 042b625fc96d8..cd1500d1edffb 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -490,11 +490,11 @@ func slicedata(pos src.XPos, s string) *ir.Name { return symnode } -func slicebytes(nam *ir.Name, s string) { +func slicebytes(nam *ir.Name, off int64, s string) { if nam.Op() != ir.ONAME { base.Fatalf("slicebytes %v", nam) } - slicesym(nam, slicedata(nam.Pos(), s), int64(len(s))) + slicesym(nam, off, slicedata(nam.Pos(), s), int64(len(s))) } func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int { @@ -529,22 +529,21 @@ func dsymptrWeakOff(s *obj.LSym, off int, x *obj.LSym) int { return off } -// slicesym writes a static slice symbol {&arr, lencap, lencap} to n. +// slicesym writes a static slice symbol {&arr, lencap, lencap} to n+noff. // slicesym does not modify n. -func slicesym(n, arr *ir.Name, lencap int64) { +func slicesym(n *ir.Name, noff int64, arr *ir.Name, lencap int64) { s := n.Sym().Linksym() - off := n.Offset() if arr.Op() != ir.ONAME { base.Fatalf("slicesym non-name arr %v", arr) } - s.WriteAddr(base.Ctxt, off, Widthptr, arr.Sym().Linksym(), arr.Offset()) - s.WriteInt(base.Ctxt, off+sliceLenOffset, Widthptr, lencap) - s.WriteInt(base.Ctxt, off+sliceCapOffset, Widthptr, lencap) + s.WriteAddr(base.Ctxt, noff, Widthptr, arr.Sym().Linksym(), 0) + s.WriteInt(base.Ctxt, noff+sliceLenOffset, Widthptr, lencap) + s.WriteInt(base.Ctxt, noff+sliceCapOffset, Widthptr, lencap) } // addrsym writes the static address of a to n. a must be an ONAME. // Neither n nor a is modified. -func addrsym(n, a *ir.Name) { +func addrsym(n *ir.Name, noff int64, a *ir.Name, aoff int64) { if n.Op() != ir.ONAME { base.Fatalf("addrsym n op %v", n.Op()) } @@ -555,12 +554,12 @@ func addrsym(n, a *ir.Name) { base.Fatalf("addrsym a op %v", a.Op()) } s := n.Sym().Linksym() - s.WriteAddr(base.Ctxt, n.Offset(), Widthptr, a.Sym().Linksym(), a.Offset()) + s.WriteAddr(base.Ctxt, noff, Widthptr, a.Sym().Linksym(), aoff) } // pfuncsym writes the static address of f to n. f must be a global function. // Neither n nor f is modified. -func pfuncsym(n, f *ir.Name) { +func pfuncsym(n *ir.Name, noff int64, f *ir.Name) { if n.Op() != ir.ONAME { base.Fatalf("pfuncsym n op %v", n.Op()) } @@ -571,21 +570,18 @@ func pfuncsym(n, f *ir.Name) { base.Fatalf("pfuncsym class not PFUNC %d", f.Class()) } s := n.Sym().Linksym() - s.WriteAddr(base.Ctxt, n.Offset(), Widthptr, funcsym(f.Sym()).Linksym(), f.Offset()) + s.WriteAddr(base.Ctxt, noff, Widthptr, funcsym(f.Sym()).Linksym(), 0) } // litsym writes the static literal c to n. // Neither n nor c is modified. -func litsym(n *ir.Name, c ir.Node, wid int) { +func litsym(n *ir.Name, noff int64, c ir.Node, wid int) { if n.Op() != ir.ONAME { base.Fatalf("litsym n op %v", n.Op()) } if n.Sym() == nil { base.Fatalf("litsym nil n sym") } - if !types.Identical(n.Type(), c.Type()) { - base.Fatalf("litsym: type mismatch: %v has type %v, but %v has type %v", n, n.Type(), c, c.Type()) - } if c.Op() == ir.ONIL { return } @@ -596,37 +592,37 @@ func litsym(n *ir.Name, c ir.Node, wid int) { switch u := c.Val(); u.Kind() { case constant.Bool: i := int64(obj.Bool2int(constant.BoolVal(u))) - s.WriteInt(base.Ctxt, n.Offset(), wid, i) + s.WriteInt(base.Ctxt, noff, wid, i) case constant.Int: - s.WriteInt(base.Ctxt, n.Offset(), wid, ir.IntVal(n.Type(), u)) + s.WriteInt(base.Ctxt, noff, wid, ir.IntVal(c.Type(), u)) case constant.Float: f, _ := constant.Float64Val(u) - switch n.Type().Kind() { + switch c.Type().Kind() { case types.TFLOAT32: - s.WriteFloat32(base.Ctxt, n.Offset(), float32(f)) + s.WriteFloat32(base.Ctxt, noff, float32(f)) case types.TFLOAT64: - s.WriteFloat64(base.Ctxt, n.Offset(), f) + s.WriteFloat64(base.Ctxt, noff, f) } case constant.Complex: re, _ := constant.Float64Val(constant.Real(u)) im, _ := constant.Float64Val(constant.Imag(u)) - switch n.Type().Kind() { + switch c.Type().Kind() { case types.TCOMPLEX64: - s.WriteFloat32(base.Ctxt, n.Offset(), float32(re)) - s.WriteFloat32(base.Ctxt, n.Offset()+4, float32(im)) + s.WriteFloat32(base.Ctxt, noff, float32(re)) + s.WriteFloat32(base.Ctxt, noff+4, float32(im)) case types.TCOMPLEX128: - s.WriteFloat64(base.Ctxt, n.Offset(), re) - s.WriteFloat64(base.Ctxt, n.Offset()+8, im) + s.WriteFloat64(base.Ctxt, noff, re) + s.WriteFloat64(base.Ctxt, noff+8, im) } case constant.String: i := constant.StringVal(u) symdata := stringsym(n.Pos(), i) - s.WriteAddr(base.Ctxt, n.Offset(), Widthptr, symdata, 0) - s.WriteInt(base.Ctxt, n.Offset()+int64(Widthptr), Widthptr, int64(len(i))) + s.WriteAddr(base.Ctxt, noff, Widthptr, symdata, 0) + s.WriteInt(base.Ctxt, noff+int64(Widthptr), Widthptr, int64(len(i))) default: base.Fatalf("litsym unhandled OLITERAL %v", c) diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 0034556995b3b..174037e30a4a7 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -239,7 +239,7 @@ func (o *Order) addrTemp(n ir.Node) ir.Node { dowidth(n.Type()) vstat := readonlystaticname(n.Type()) var s InitSchedule - s.staticassign(vstat, n) + s.staticassign(vstat, 0, n, n.Type()) if s.out != nil { base.Fatalf("staticassign of const generated code: %+v", n) } diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index 5b04e106571f6..901af567fab36 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -74,7 +74,7 @@ func cmpstackvarlt(a, b *ir.Name) bool { } if a.Class() != ir.PAUTO { - return a.Offset() < b.Offset() + return a.FrameOffset() < b.FrameOffset() } if a.Used() != b.Used() { @@ -186,7 +186,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { if thearch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) { s.stksize = Rnd(s.stksize, int64(Widthptr)) } - n.SetOffset(-s.stksize) + n.SetFrameOffset(-s.stksize) } s.stksize = Rnd(s.stksize, int64(Widthreg)) @@ -536,10 +536,11 @@ func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Name) ([]*ir.Name, []*dwarf func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var { var abbrev int - offs := n.Offset() + var offs int64 switch n.Class() { case ir.PAUTO: + offs = n.FrameOffset() abbrev = dwarf.DW_ABRV_AUTO if base.Ctxt.FixedFrameSize() == 0 { offs -= int64(Widthptr) @@ -551,7 +552,7 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var { case ir.PPARAM, ir.PPARAMOUT: abbrev = dwarf.DW_ABRV_PARAM - offs += base.Ctxt.FixedFrameSize() + offs = n.FrameOffset() + base.Ctxt.FixedFrameSize() default: base.Fatalf("createSimpleVar unexpected class %v for node %v", n.Class(), n) } @@ -693,7 +694,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir Name: n.Sym().Name, IsReturnValue: isReturnValue, Abbrev: abbrev, - StackOffset: int32(n.Offset()), + StackOffset: int32(n.FrameOffset()), Type: base.Ctxt.Lookup(typename), DeclFile: declpos.RelFilename(), DeclLine: declpos.RelLine(), @@ -737,6 +738,7 @@ func stackOffset(slot ssa.LocalSlot) int32 { var off int64 switch n.Class() { case ir.PAUTO: + off = n.FrameOffset() if base.Ctxt.FixedFrameSize() == 0 { off -= int64(Widthptr) } @@ -745,9 +747,9 @@ func stackOffset(slot ssa.LocalSlot) int32 { off -= int64(Widthptr) } case ir.PPARAM, ir.PPARAMOUT: - off += base.Ctxt.FixedFrameSize() + off = n.FrameOffset() + base.Ctxt.FixedFrameSize() } - return int32(off + n.Offset() + slot.Off) + return int32(off + slot.Off) } // createComplexVar builds a single DWARF variable entry and location list. diff --git a/src/cmd/compile/internal/gc/pgen_test.go b/src/cmd/compile/internal/gc/pgen_test.go index ad8b87c6f539f..3875fb7223461 100644 --- a/src/cmd/compile/internal/gc/pgen_test.go +++ b/src/cmd/compile/internal/gc/pgen_test.go @@ -43,7 +43,7 @@ func TestCmpstackvar(t *testing.T) { } n := NewName(s) n.SetType(t) - n.SetOffset(xoffset) + n.SetFrameOffset(xoffset) n.SetClass(cl) return n } @@ -158,7 +158,7 @@ func TestStackvarSort(t *testing.T) { nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) *ir.Name { n := NewName(s) n.SetType(t) - n.SetOffset(xoffset) + n.SetFrameOffset(xoffset) n.SetClass(cl) return n } diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go index 6deb3ecc7a94b..8e266d6599eca 100644 --- a/src/cmd/compile/internal/gc/plive.go +++ b/src/cmd/compile/internal/gc/plive.go @@ -496,10 +496,10 @@ func (lv *Liveness) pointerMap(liveout bvec, vars []*ir.Name, args, locals bvec) node := vars[i] switch node.Class() { case ir.PAUTO: - onebitwalktype1(node.Type(), node.Offset()+lv.stkptrsize, locals) + onebitwalktype1(node.Type(), node.FrameOffset()+lv.stkptrsize, locals) case ir.PPARAM, ir.PPARAMOUT: - onebitwalktype1(node.Type(), node.Offset(), args) + onebitwalktype1(node.Type(), node.FrameOffset(), args) } } } @@ -1173,7 +1173,7 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) { for _, n := range lv.vars { switch n.Class() { case ir.PPARAM, ir.PPARAMOUT: - if maxArgNode == nil || n.Offset() > maxArgNode.Offset() { + if maxArgNode == nil || n.FrameOffset() > maxArgNode.FrameOffset() { maxArgNode = n } } @@ -1181,7 +1181,7 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) { // Next, find the offset of the largest pointer in the largest node. var maxArgs int64 if maxArgNode != nil { - maxArgs = maxArgNode.Offset() + typeptrdata(maxArgNode.Type()) + maxArgs = maxArgNode.FrameOffset() + typeptrdata(maxArgNode.Type()) } // Size locals bitmaps to be stkptrsize sized. diff --git a/src/cmd/compile/internal/gc/racewalk.go b/src/cmd/compile/internal/gc/racewalk.go index 6b5d53e80647e..472deb16e3722 100644 --- a/src/cmd/compile/internal/gc/racewalk.go +++ b/src/cmd/compile/internal/gc/racewalk.go @@ -83,9 +83,9 @@ func instrument(fn *ir.Func) { // This only works for amd64. This will not // work on arm or others that might support // race in the future. - nodpc := ir.Copy(nodfp).(*ir.Name) + nodpc := nodfp.CloneName() nodpc.SetType(types.Types[types.TUINTPTR]) - nodpc.SetOffset(int64(-Widthptr)) + nodpc.SetFrameOffset(int64(-Widthptr)) fn.Dcl = append(fn.Dcl, nodpc) fn.Enter.Prepend(mkcall("racefuncenter", nil, nil, nodpc)) fn.Exit.Append(mkcall("racefuncexit", nil, nil)) diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index cfda4afcd850c..e2c31e4dd7337 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -67,14 +67,16 @@ func (s *InitSchedule) tryStaticInit(nn ir.Node) bool { } lno := setlineno(n) defer func() { base.Pos = lno }() - return s.staticassign(n.Left().(*ir.Name), n.Right()) + nam := n.Left().(*ir.Name) + return s.staticassign(nam, 0, n.Right(), nam.Type()) } // like staticassign but we are copying an already // initialized value r. -func (s *InitSchedule) staticcopy(l *ir.Name, rn *ir.Name) bool { +func (s *InitSchedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *types.Type) bool { if rn.Class() == ir.PFUNC { - pfuncsym(l, rn) + // TODO if roff != 0 { panic } + pfuncsym(l, loff, rn) return true } if rn.Class() != ir.PEXTERN || rn.Sym().Pkg != types.LocalPkg { @@ -92,7 +94,7 @@ func (s *InitSchedule) staticcopy(l *ir.Name, rn *ir.Name) bool { orig := rn r := rn.Defn.(*ir.AssignStmt).Right() - for r.Op() == ir.OCONVNOP && !types.Identical(r.Type(), l.Type()) { + for r.Op() == ir.OCONVNOP && !types.Identical(r.Type(), typ) { r = r.(*ir.ConvExpr).Left() } @@ -102,12 +104,16 @@ func (s *InitSchedule) staticcopy(l *ir.Name, rn *ir.Name) bool { fallthrough case ir.ONAME: r := r.(*ir.Name) - if s.staticcopy(l, r) { + if s.staticcopy(l, loff, r, typ) { return true } // We may have skipped past one or more OCONVNOPs, so // use conv to ensure r is assignable to l (#13263). - s.append(ir.Nod(ir.OAS, l, conv(r, l.Type()))) + dst := ir.Node(l) + if loff != 0 || !types.Identical(typ, l.Type()) { + dst = ir.NewNameOffsetExpr(base.Pos, l, loff, typ) + } + s.append(ir.Nod(ir.OAS, dst, conv(r, typ))) return true case ir.ONIL: @@ -117,13 +123,13 @@ func (s *InitSchedule) staticcopy(l *ir.Name, rn *ir.Name) bool { if isZero(r) { return true } - litsym(l, r, int(l.Type().Width)) + litsym(l, loff, r, int(typ.Width)) return true case ir.OADDR: if a := r.Left(); a.Op() == ir.ONAME { a := a.(*ir.Name) - addrsym(l, a) + addrsym(l, loff, a, 0) return true } @@ -131,41 +137,35 @@ func (s *InitSchedule) staticcopy(l *ir.Name, rn *ir.Name) bool { switch r.Left().Op() { case ir.OARRAYLIT, ir.OSLICELIT, ir.OSTRUCTLIT, ir.OMAPLIT: // copy pointer - addrsym(l, s.inittemps[r]) + addrsym(l, loff, s.inittemps[r], 0) return true } case ir.OSLICELIT: // copy slice - a := s.inittemps[r] - slicesym(l, a, ir.Int64Val(r.Right())) + slicesym(l, loff, s.inittemps[r], ir.Int64Val(r.Right())) return true case ir.OARRAYLIT, ir.OSTRUCTLIT: p := s.initplans[r] - - n := ir.Copy(l).(*ir.Name) for i := range p.E { e := &p.E[i] - n.SetOffset(l.Offset() + e.Xoffset) - n.SetType(e.Expr.Type()) + typ := e.Expr.Type() if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL { - litsym(n, e.Expr, int(n.Type().Width)) + litsym(l, loff+e.Xoffset, e.Expr, int(typ.Width)) continue } - ll := ir.SepCopy(n).(*ir.Name) x := e.Expr if x.Op() == ir.OMETHEXPR { x = x.(*ir.MethodExpr).FuncName() } - if x.Op() == ir.ONAME && s.staticcopy(ll, x.(*ir.Name)) { + if x.Op() == ir.ONAME && s.staticcopy(l, loff+e.Xoffset, x.(*ir.Name), typ) { continue } // Requires computation, but we're // copying someone else's computation. - rr := ir.SepCopy(orig).(*ir.Name) - rr.SetType(ll.Type()) - rr.SetOffset(rr.Offset() + e.Xoffset) + ll := ir.NewNameOffsetExpr(base.Pos, l, loff+e.Xoffset, typ) + rr := ir.NewNameOffsetExpr(base.Pos, orig, e.Xoffset, typ) setlineno(rr) s.append(ir.Nod(ir.OAS, ll, rr)) } @@ -176,7 +176,7 @@ func (s *InitSchedule) staticcopy(l *ir.Name, rn *ir.Name) bool { return false } -func (s *InitSchedule) staticassign(l *ir.Name, r ir.Node) bool { +func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *types.Type) bool { for r.Op() == ir.OCONVNOP { r = r.(*ir.ConvExpr).Left() } @@ -184,11 +184,11 @@ func (s *InitSchedule) staticassign(l *ir.Name, r ir.Node) bool { switch r.Op() { case ir.ONAME: r := r.(*ir.Name) - return s.staticcopy(l, r) + return s.staticcopy(l, loff, r, typ) case ir.OMETHEXPR: r := r.(*ir.MethodExpr) - return s.staticcopy(l, r.FuncName()) + return s.staticcopy(l, loff, r.FuncName(), typ) case ir.ONIL: return true @@ -197,12 +197,12 @@ func (s *InitSchedule) staticassign(l *ir.Name, r ir.Node) bool { if isZero(r) { return true } - litsym(l, r, int(l.Type().Width)) + litsym(l, loff, r, int(typ.Width)) return true case ir.OADDR: - if nam := stataddr(r.Left()); nam != nil { - addrsym(l, nam) + if name, offset, ok := stataddr(r.Left()); ok { + addrsym(l, loff, name, offset) return true } fallthrough @@ -214,10 +214,10 @@ func (s *InitSchedule) staticassign(l *ir.Name, r ir.Node) bool { a := staticname(r.Left().Type()) s.inittemps[r] = a - addrsym(l, a) + addrsym(l, loff, a, 0) // Init underlying literal. - if !s.staticassign(a, r.Left()) { + if !s.staticassign(a, 0, r.Left(), a.Type()) { s.append(ir.Nod(ir.OAS, a, r.Left())) } return true @@ -227,7 +227,7 @@ func (s *InitSchedule) staticassign(l *ir.Name, r ir.Node) bool { case ir.OSTR2BYTES: if l.Class() == ir.PEXTERN && r.Left().Op() == ir.OLITERAL { sval := ir.StringVal(r.Left()) - slicebytes(l, sval) + slicebytes(l, loff, sval) return true } @@ -239,27 +239,25 @@ func (s *InitSchedule) staticassign(l *ir.Name, r ir.Node) bool { ta.SetNoalg(true) a := staticname(ta) s.inittemps[r] = a - slicesym(l, a, bound) + slicesym(l, loff, a, bound) // Fall through to init underlying array. l = a + loff = 0 fallthrough case ir.OARRAYLIT, ir.OSTRUCTLIT: s.initplan(r) p := s.initplans[r] - n := ir.Copy(l).(*ir.Name) for i := range p.E { e := &p.E[i] - n.SetOffset(l.Offset() + e.Xoffset) - n.SetType(e.Expr.Type()) if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL { - litsym(n, e.Expr, int(n.Type().Width)) + litsym(l, loff+e.Xoffset, e.Expr, int(e.Expr.Type().Width)) continue } setlineno(e.Expr) - a := ir.SepCopy(n).(*ir.Name) - if !s.staticassign(a, e.Expr) { + if !s.staticassign(l, loff+e.Xoffset, e.Expr, e.Expr.Type()) { + a := ir.NewNameOffsetExpr(base.Pos, l, loff+e.Xoffset, e.Expr.Type()) s.append(ir.Nod(ir.OAS, a, e.Expr)) } } @@ -276,7 +274,8 @@ func (s *InitSchedule) staticassign(l *ir.Name, r ir.Node) bool { } // Closures with no captured variables are globals, // so the assignment can be done at link time. - pfuncsym(l, r.Func().Nname) + // TODO if roff != 0 { panic } + pfuncsym(l, loff, r.Func().Nname) return true } closuredebugruntimecheck(r) @@ -303,18 +302,16 @@ func (s *InitSchedule) staticassign(l *ir.Name, r ir.Node) bool { markTypeUsedInInterface(val.Type(), l.Sym().Linksym()) var itab *ir.AddrExpr - if l.Type().IsEmptyInterface() { + if typ.IsEmptyInterface() { itab = typename(val.Type()) } else { - itab = itabname(val.Type(), l.Type()) + itab = itabname(val.Type(), typ) } // Create a copy of l to modify while we emit data. - n := ir.Copy(l).(*ir.Name) // Emit itab, advance offset. - addrsym(n, itab.Left().(*ir.Name)) - n.SetOffset(n.Offset() + int64(Widthptr)) + addrsym(l, loff, itab.Left().(*ir.Name), 0) // Emit data. if isdirectiface(val.Type()) { @@ -323,20 +320,19 @@ func (s *InitSchedule) staticassign(l *ir.Name, r ir.Node) bool { return true } // Copy val directly into n. - n.SetType(val.Type()) setlineno(val) - a := ir.SepCopy(n).(*ir.Name) - if !s.staticassign(a, val) { + if !s.staticassign(l, loff+int64(Widthptr), val, val.Type()) { + a := ir.NewNameOffsetExpr(base.Pos, l, loff+int64(Widthptr), val.Type()) s.append(ir.Nod(ir.OAS, a, val)) } } else { // Construct temp to hold val, write pointer to temp into n. a := staticname(val.Type()) s.inittemps[val] = a - if !s.staticassign(a, val) { + if !s.staticassign(a, 0, val, val.Type()) { s.append(ir.Nod(ir.OAS, a, val)) } - addrsym(n, a) + addrsym(l, loff+int64(Widthptr), a, 0) } return true @@ -626,11 +622,11 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) // copy static to slice var_ = typecheck(var_, ctxExpr|ctxAssign) - nam := stataddr(var_) - if nam == nil || nam.Class() != ir.PEXTERN { + name, offset, ok := stataddr(var_) + if !ok || name.Class() != ir.PEXTERN { base.Fatalf("slicelit: %v", var_) } - slicesym(nam, vstat, t.NumElem()) + slicesym(name, offset, vstat, t.NumElem()) return } @@ -989,34 +985,32 @@ func getlit(lit ir.Node) int { } // stataddr returns the static address of n, if n has one, or else nil. -func stataddr(n ir.Node) *ir.Name { +func stataddr(n ir.Node) (name *ir.Name, offset int64, ok bool) { if n == nil { - return nil + return nil, 0, false } switch n.Op() { case ir.ONAME: - return ir.SepCopy(n).(*ir.Name) + n := n.(*ir.Name) + return n, 0, true case ir.OMETHEXPR: n := n.(*ir.MethodExpr) return stataddr(n.FuncName()) case ir.ODOT: - nam := stataddr(n.Left()) - if nam == nil { + if name, offset, ok = stataddr(n.Left()); !ok { break } - nam.SetOffset(nam.Offset() + n.Offset()) - nam.SetType(n.Type()) - return nam + offset += n.Offset() + return name, offset, true case ir.OINDEX: if n.Left().Type().IsSlice() { break } - nam := stataddr(n.Left()) - if nam == nil { + if name, offset, ok = stataddr(n.Left()); !ok { break } l := getlit(n.Right()) @@ -1028,12 +1022,11 @@ func stataddr(n ir.Node) *ir.Name { if n.Type().Width != 0 && thearch.MAXWIDTH/n.Type().Width <= int64(l) { break } - nam.SetOffset(nam.Offset() + int64(l)*n.Type().Width) - nam.SetType(n.Type()) - return nam + offset += int64(l) * n.Type().Width + return name, offset, true } - return nil + return nil, 0, false } func (s *InitSchedule) initplan(n ir.Node) { @@ -1154,23 +1147,26 @@ func genAsStatic(as *ir.AssignStmt) { base.Fatalf("genAsStatic as.Left not typechecked") } - nam := stataddr(as.Left()) - if nam == nil || (nam.Class() != ir.PEXTERN && as.Left() != ir.BlankNode) { + name, offset, ok := stataddr(as.Left()) + if !ok || (name.Class() != ir.PEXTERN && as.Left() != ir.BlankNode) { base.Fatalf("genAsStatic: lhs %v", as.Left()) } switch r := as.Right(); r.Op() { case ir.OLITERAL: - litsym(nam, r, int(r.Type().Width)) + litsym(name, offset, r, int(r.Type().Width)) return case ir.OMETHEXPR: r := r.(*ir.MethodExpr) - pfuncsym(nam, r.FuncName()) + pfuncsym(name, offset, r.FuncName()) return case ir.ONAME: r := r.(*ir.Name) + if r.Offset() != 0 { + base.Fatalf("genAsStatic %+v", as) + } if r.Class() == ir.PFUNC { - pfuncsym(nam, r) + pfuncsym(name, offset, r) return } } diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 2a0134703c68d..fbfed0640d844 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -258,14 +258,14 @@ func (s *state) emitOpenDeferInfo() { } } off = dvarint(x, off, maxargsize) - off = dvarint(x, off, -s.deferBitsTemp.Offset()) + off = dvarint(x, off, -s.deferBitsTemp.FrameOffset()) off = dvarint(x, off, int64(len(s.openDefers))) // Write in reverse-order, for ease of running in that order at runtime for i := len(s.openDefers) - 1; i >= 0; i-- { r := s.openDefers[i] off = dvarint(x, off, r.n.Left().Type().ArgWidth()) - off = dvarint(x, off, -r.closureNode.Offset()) + off = dvarint(x, off, -r.closureNode.FrameOffset()) numArgs := len(r.argNodes) if r.rcvrNode != nil { // If there's an interface receiver, treat/place it as the first @@ -275,13 +275,13 @@ func (s *state) emitOpenDeferInfo() { } off = dvarint(x, off, int64(numArgs)) if r.rcvrNode != nil { - off = dvarint(x, off, -r.rcvrNode.Offset()) + off = dvarint(x, off, -r.rcvrNode.FrameOffset()) off = dvarint(x, off, s.config.PtrSize) off = dvarint(x, off, 0) } for j, arg := range r.argNodes { f := getParam(r.n, j) - off = dvarint(x, off, -arg.Offset()) + off = dvarint(x, off, -arg.FrameOffset()) off = dvarint(x, off, f.Type.Size()) off = dvarint(x, off, f.Offset) } @@ -418,10 +418,10 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func { switch n.Class() { case ir.PPARAM: s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem) - args = append(args, ssa.Param{Type: n.Type(), Offset: int32(n.Offset())}) + args = append(args, ssa.Param{Type: n.Type(), Offset: int32(n.FrameOffset())}) case ir.PPARAMOUT: s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem) - results = append(results, ssa.Param{Type: n.Type(), Offset: int32(n.Offset())}) + results = append(results, ssa.Param{Type: n.Type(), Offset: int32(n.FrameOffset())}) if s.canSSA(n) { // Save ssa-able PPARAMOUT variables so we can // store them back to the stack at the end of @@ -2101,6 +2101,13 @@ func (s *state) expr(n ir.Node) *ssa.Value { } addr := s.addr(n) return s.load(n.Type(), addr) + case ir.ONAMEOFFSET: + n := n.(*ir.NameOffsetExpr) + if s.canSSAName(n.Name_) && canSSAType(n.Type()) { + return s.variable(n, n.Type()) + } + addr := s.addr(n) + return s.load(n.Type(), addr) case ir.OCLOSUREREAD: addr := s.addr(n) return s.load(n.Type(), addr) @@ -4927,7 +4934,13 @@ func (s *state) addr(n ir.Node) *ssa.Value { } t := types.NewPtr(n.Type()) + var offset int64 switch n.Op() { + case ir.ONAMEOFFSET: + no := n.(*ir.NameOffsetExpr) + offset = no.Offset_ + n = no.Name_ + fallthrough case ir.ONAME: n := n.(*ir.Name) switch n.Class() { @@ -4935,8 +4948,8 @@ func (s *state) addr(n ir.Node) *ssa.Value { // global variable v := s.entryNewValue1A(ssa.OpAddr, t, n.Sym().Linksym(), s.sb) // TODO: Make OpAddr use AuxInt as well as Aux. - if n.Offset() != 0 { - v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Offset(), v) + if offset != 0 { + v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, offset, v) } return v case ir.PPARAM: @@ -5050,7 +5063,10 @@ func (s *state) canSSA(n ir.Node) bool { if n.Op() != ir.ONAME { return false } - name := n.(*ir.Name) + return s.canSSAName(n.(*ir.Name)) && canSSAType(n.Type()) +} + +func (s *state) canSSAName(name *ir.Name) bool { if name.Addrtaken() { return false } @@ -5084,7 +5100,7 @@ func (s *state) canSSA(n ir.Node) bool { // TODO: treat as a PPARAMOUT? return false } - return canSSAType(name.Type()) + return true // TODO: try to make more variables SSAable? } @@ -6184,9 +6200,6 @@ func (s *state) addNamedValue(n *ir.Name, v *ssa.Value) { // from being assigned too early. See #14591 and #14762. TODO: allow this. return } - if n.Class() == ir.PAUTO && n.Offset() != 0 { - s.Fatalf("AUTO var with offset %v %d", n, n.Offset()) - } loc := ssa.LocalSlot{N: n.Name(), Type: n.Type(), Off: 0} values, ok := s.f.NamedValues[loc] if !ok { @@ -6309,7 +6322,7 @@ func (s *SSAGenState) DebugFriendlySetPosFrom(v *ssa.Value) { type byXoffset []*ir.Name func (s byXoffset) Len() int { return len(s) } -func (s byXoffset) Less(i, j int) bool { return s[i].Offset() < s[j].Offset() } +func (s byXoffset) Less(i, j int) bool { return s[i].FrameOffset() < s[j].FrameOffset() } func (s byXoffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func emitStackObjects(e *ssafn, pp *Progs) { @@ -6335,7 +6348,7 @@ func emitStackObjects(e *ssafn, pp *Progs) { // Note: arguments and return values have non-negative Xoffset, // in which case the offset is relative to argp. // Locals have a negative Xoffset, in which case the offset is relative to varp. - off = duintptr(x, off, uint64(v.Offset())) + off = duintptr(x, off, uint64(v.FrameOffset())) if !typesym(v.Type()).Siggen() { e.Fatalf(v.Pos(), "stack object's type symbol not generated for type %s", v.Type()) } @@ -6708,13 +6721,13 @@ func defframe(s *SSAGenState, e *ssafn) { if n.Class() != ir.PAUTO { e.Fatalf(n.Pos(), "needzero class %d", n.Class()) } - if n.Type().Size()%int64(Widthptr) != 0 || n.Offset()%int64(Widthptr) != 0 || n.Type().Size() == 0 { + if n.Type().Size()%int64(Widthptr) != 0 || n.FrameOffset()%int64(Widthptr) != 0 || n.Type().Size() == 0 { e.Fatalf(n.Pos(), "var %L has size %d offset %d", n, n.Type().Size(), n.Offset()) } - if lo != hi && n.Offset()+n.Type().Size() >= lo-int64(2*Widthreg) { + if lo != hi && n.FrameOffset()+n.Type().Size() >= lo-int64(2*Widthreg) { // Merge with range we already have. - lo = n.Offset() + lo = n.FrameOffset() continue } @@ -6722,7 +6735,7 @@ func defframe(s *SSAGenState, e *ssafn) { p = thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state) // Set new range. - lo = n.Offset() + lo = n.FrameOffset() hi = lo + n.Type().Size() } @@ -6793,12 +6806,12 @@ func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) { if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT { a.Name = obj.NAME_PARAM a.Sym = ir.Orig(n).Sym().Linksym() - a.Offset += n.Offset() + a.Offset += n.FrameOffset() break } a.Name = obj.NAME_AUTO a.Sym = n.Sym().Linksym() - a.Offset += n.Offset() + a.Offset += n.FrameOffset() default: v.Fatalf("aux in %s not implemented %#v", v, v.Aux) } @@ -6941,7 +6954,7 @@ func AddrAuto(a *obj.Addr, v *ssa.Value) { a.Type = obj.TYPE_MEM a.Sym = n.Sym().Linksym() a.Reg = int16(thearch.REGSP) - a.Offset = n.Offset() + off + a.Offset = n.FrameOffset() + off if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT { a.Name = obj.NAME_PARAM } else { diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 03998b99bee48..9c26edf136ab9 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -572,12 +572,12 @@ func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) { return ptr, length } -func syslook(name string) ir.Node { +func syslook(name string) *ir.Name { s := Runtimepkg.Lookup(name) if s == nil || s.Def == nil { base.Fatalf("syslook: can't find runtime.%s", name) } - return ir.AsNode(s.Def) + return ir.AsNode(s.Def).(*ir.Name) } // typehash computes a hash value for type t to use in type switch statements. @@ -609,7 +609,7 @@ func calcHasCall(n ir.Node) bool { base.Fatalf("calcHasCall %+v", n) panic("unreachable") - case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.OTYPE: + case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.OTYPE, ir.ONAMEOFFSET: if n.HasCall() { base.Fatalf("OLITERAL/ONAME/OTYPE should never have calls: %+v", n) } @@ -770,7 +770,7 @@ func safeexpr(n ir.Node, init *ir.Nodes) ir.Node { } switch n.Op() { - case ir.ONAME, ir.OLITERAL, ir.ONIL: + case ir.ONAME, ir.OLITERAL, ir.ONIL, ir.ONAMEOFFSET: return n case ir.OLEN, ir.OCAP: diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 5e56ace7c7157..83939fd6bff85 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -488,6 +488,10 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } return n + case ir.ONAMEOFFSET: + // type already set + return n + case ir.OPACK: base.Errorf("use of package %v without selector", n.Sym()) n.SetType(nil) @@ -3106,6 +3110,9 @@ func islvalue(n ir.Node) bool { return false } return true + + case ir.ONAMEOFFSET: + return true } return false diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 91d3ad215ef61..23d1ce6003615 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -530,7 +530,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.ONONAME, ir.OGETG, ir.ONEWOBJ, ir.OMETHEXPR: return n - case ir.OTYPE, ir.ONAME, ir.OLITERAL, ir.ONIL: + case ir.OTYPE, ir.ONAME, ir.OLITERAL, ir.ONIL, ir.ONAMEOFFSET: // TODO(mdempsky): Just return n; see discussion on CL 38655. // Perhaps refactor to use Node.mayBeShared for these instead. // If these return early, make sure to still call @@ -1999,7 +1999,7 @@ func walkprint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { continue } - var on ir.Node + var on *ir.Name switch n.Type().Kind() { case types.TINTER: if n.Type().IsEmptyInterface() { @@ -3958,8 +3958,8 @@ func wrapCall(n *ir.CallExpr, init *ir.Nodes) ir.Node { // type syntax expression n.Type. // The result of substArgTypes MUST be assigned back to old, e.g. // n.Left = substArgTypes(n.Left, t1, t2) -func substArgTypes(old ir.Node, types_ ...*types.Type) ir.Node { - n := ir.Copy(old) +func substArgTypes(old *ir.Name, types_ ...*types.Type) *ir.Name { + n := old.CloneName() for _, t := range types_ { dowidth(t) diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index a6e90a899e8e9..6f15645813e51 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -631,6 +631,10 @@ func exprFmt(n Node, s fmt.State, prec int) { n := n.(*MethodExpr) fmt.Fprint(s, n.FuncName().Sym()) + case ONAMEOFFSET: + n := n.(*NameOffsetExpr) + fmt.Fprintf(s, "(%v)(%v@%d)", n.Type(), n.Name_, n.Offset_) + case OTYPE: if n.Type() == nil && n.Sym() != nil { fmt.Fprint(s, n.Sym()) diff --git a/src/cmd/compile/internal/ir/mknode.go b/src/cmd/compile/internal/ir/mknode.go index f9b398fe28d8f..f5dacee622f9d 100644 --- a/src/cmd/compile/internal/ir/mknode.go +++ b/src/cmd/compile/internal/ir/mknode.go @@ -67,18 +67,23 @@ func main() { fmt.Fprintf(&buf, "\n") fmt.Fprintf(&buf, "func (n *%s) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }\n", name) - fmt.Fprintf(&buf, "func (n *%s) copy() Node { c := *n\n", name) - forNodeFields(typName, typ, func(name string, is func(types.Type) bool) { - switch { - case is(nodesType): - fmt.Fprintf(&buf, "c.%s = c.%s.Copy()\n", name, name) - case is(ptrFieldType): - fmt.Fprintf(&buf, "if c.%s != nil { c.%s = c.%s.copy() }\n", name, name, name) - case is(slicePtrFieldType): - fmt.Fprintf(&buf, "c.%s = copyFields(c.%s)\n", name, name) - } - }) - fmt.Fprintf(&buf, "return &c }\n") + switch name { + case "Name": + fmt.Fprintf(&buf, "func (n *%s) copy() Node {panic(\"%s.copy\")}\n", name, name) + default: + fmt.Fprintf(&buf, "func (n *%s) copy() Node { c := *n\n", name) + forNodeFields(typName, typ, func(name string, is func(types.Type) bool) { + switch { + case is(nodesType): + fmt.Fprintf(&buf, "c.%s = c.%s.Copy()\n", name, name) + case is(ptrFieldType): + fmt.Fprintf(&buf, "if c.%s != nil { c.%s = c.%s.copy() }\n", name, name, name) + case is(slicePtrFieldType): + fmt.Fprintf(&buf, "c.%s = copyFields(c.%s)\n", name, name) + } + }) + fmt.Fprintf(&buf, "return &c }\n") + } fmt.Fprintf(&buf, "func (n *%s) doChildren(do func(Node) error) error { var err error\n", name) forNodeFields(typName, typ, func(name string, is func(types.Type) bool) { diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 96cb0ee0546cb..0c36ffdf7a387 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -139,6 +139,12 @@ type Name struct { Outer *Name } +// CloneName makes a cloned copy of the name. +// It's not ir.Copy(n) because in general that operation is a mistake on names, +// which uniquely identify variables. +// Callers must use n.CloneName to make clear they intend to create a separate name. +func (n *Name) CloneName() *Name { c := *n; return &c } + func (n *Name) isExpr() {} // NewNameAt returns a new ONAME Node associated with symbol s at position pos. @@ -186,10 +192,16 @@ func (n *Name) Class() Class { return n.Class_ } func (n *Name) SetClass(x Class) { n.Class_ = x } func (n *Name) Func() *Func { return n.fn } func (n *Name) SetFunc(x *Func) { n.fn = x } -func (n *Name) Offset() int64 { return n.Offset_ } -func (n *Name) SetOffset(x int64) { n.Offset_ = x } -func (n *Name) Iota() int64 { return n.Offset_ } -func (n *Name) SetIota(x int64) { n.Offset_ = x } +func (n *Name) Offset() int64 { panic("Name.Offset") } +func (n *Name) SetOffset(x int64) { + if x != 0 { + panic("Name.SetOffset") + } +} +func (n *Name) FrameOffset() int64 { return n.Offset_ } +func (n *Name) SetFrameOffset(x int64) { n.Offset_ = x } +func (n *Name) Iota() int64 { return n.Offset_ } +func (n *Name) SetIota(x int64) { n.Offset_ = x } func (*Name) CanBeNtype() {} func (*Name) CanBeAnSSASym() {} diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index 10dfe3c927137..a0fae2b949604 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -639,10 +639,7 @@ func (n *MethodExpr) editChildren(edit func(Node) Node) { } func (n *Name) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *Name) copy() Node { - c := *n - return &c -} +func (n *Name) copy() Node { panic("Name.copy") } func (n *Name) doChildren(do func(Node) error) error { var err error return err From c45313bf451591ab2f7a3ffbbd724bb36d51cba0 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 17 Dec 2020 08:49:22 -0500 Subject: [PATCH 191/474] [dev.regabi] cmd/compile: remove prealloc map The prealloc map seems to exist to avoid adding a field to all nodes. Now we can add a field to just the nodes that need the field, so let's do that and avoid having a magic global with extra node state that isn't preserved by operations like Copy nor printed by Dump. This also makes clear which nodes can be prealloc'ed. In particular, the code in walkstmt looked up an entry in prealloc using an ONAME node, but there's no code that ever stores such an entry, so the lookup never succeeded. Having fields makes that kind of thing easier to see and fix. Passes buildall w/ toolstash -cmp. Change-Id: I418ad0e2847615c08868120c13ee719dc0b2eacb Reviewed-on: https://go-review.googlesource.com/c/go/+/278915 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/closure.go | 10 +++++----- src/cmd/compile/internal/gc/order.go | 17 ++++++++--------- src/cmd/compile/internal/gc/range.go | 2 +- src/cmd/compile/internal/gc/sinit.go | 2 +- src/cmd/compile/internal/gc/walk.go | 15 +++++---------- src/cmd/compile/internal/ir/expr.go | 20 ++++++++++++-------- src/cmd/compile/internal/ir/stmt.go | 1 + 7 files changed, 33 insertions(+), 34 deletions(-) diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index 6a3ee45a12353..85c594787b3f6 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -378,7 +378,7 @@ func closureType(clo ir.Node) *types.Type { return typ } -func walkclosure(clo ir.Node, init *ir.Nodes) ir.Node { +func walkclosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node { fn := clo.Func() // If no closure vars, don't bother wrapping. @@ -403,12 +403,12 @@ func walkclosure(clo ir.Node, init *ir.Nodes) ir.Node { cfn := convnop(addr, clo.Type()) // non-escaping temp to use, if any. - if x := prealloc[clo]; x != nil { + if x := clo.Prealloc; x != nil { if !types.Identical(typ, x.Type()) { panic("closure type does not match order's assigned type") } addr.SetRight(x) - delete(prealloc, clo) + clo.Prealloc = nil } return walkexpr(cfn, init) @@ -552,12 +552,12 @@ func walkpartialcall(n *ir.CallPartExpr, init *ir.Nodes) ir.Node { cfn := convnop(addr, n.Type()) // non-escaping temp to use, if any. - if x := prealloc[n]; x != nil { + if x := n.Prealloc; x != nil { if !types.Identical(typ, x.Type()) { panic("partial call type does not match order's assigned type") } addr.SetRight(x) - delete(prealloc, n) + n.Prealloc = nil } return walkexpr(cfn, init) diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 174037e30a4a7..87d7cf3aa93d1 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -846,9 +846,9 @@ func (o *Order) stmt(n ir.Node) { r := n.Right() n.SetRight(o.copyExpr(r)) - // prealloc[n] is the temp for the iterator. + // n.Prealloc is the temp for the iterator. // hiter contains pointers and needs to be zeroed. - prealloc[n] = o.newTemp(hiter(n.Type()), true) + n.Prealloc = o.newTemp(hiter(n.Type()), true) } o.exprListInPlace(n.List()) if orderBody { @@ -1040,9 +1040,6 @@ func (o *Order) exprListInPlace(l ir.Nodes) { } } -// prealloc[x] records the allocation to use for x. -var prealloc = map[ir.Node]ir.Node{} - func (o *Order) exprNoLHS(n ir.Node) ir.Node { return o.expr(n, nil) } @@ -1079,11 +1076,12 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { // Allocate a temporary to hold the strings. // Fewer than 5 strings use direct runtime helpers. case ir.OADDSTR: + n := n.(*ir.AddStringExpr) o.exprList(n.List()) if n.List().Len() > 5 { t := types.NewArray(types.Types[types.TSTRING], int64(n.List().Len())) - prealloc[n] = o.newTemp(t, false) + n.Prealloc = o.newTemp(t, false) } // Mark string(byteSlice) arguments to reuse byteSlice backing @@ -1268,7 +1266,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { case ir.OCLOSURE: n := n.(*ir.ClosureExpr) if n.Transient() && len(n.Func().ClosureVars) > 0 { - prealloc[n] = o.newTemp(closureType(n), false) + n.Prealloc = o.newTemp(closureType(n), false) } return n @@ -1277,15 +1275,16 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { n.SetLeft(o.expr(n.Left(), nil)) if n.Transient() { t := partialCallType(n) - prealloc[n] = o.newTemp(t, false) + n.Prealloc = o.newTemp(t, false) } return n case ir.OSLICELIT: + n := n.(*ir.CompLitExpr) o.exprList(n.List()) if n.Transient() { t := types.NewArray(n.Type().Elem(), ir.Int64Val(n.Right())) - prealloc[n] = o.newTemp(t, false) + n.Prealloc = o.newTemp(t, false) } return n diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go index 90bee4fc74fdf..aa4f0358c99fb 100644 --- a/src/cmd/compile/internal/gc/range.go +++ b/src/cmd/compile/internal/gc/range.go @@ -296,7 +296,7 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { // we only use a once, so no copy needed. ha := a - hit := prealloc[nrange] + hit := nrange.Prealloc th := hit.Type() keysym := th.Field(0).Sym // depends on layout of iterator struct. See reflect.go:hiter elemsym := th.Field(1).Sym // ditto diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index e2c31e4dd7337..7b710fd511eaa 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -668,7 +668,7 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) // set auto to point at new temp or heap (3 assign) var a ir.Node - if x := prealloc[n]; x != nil { + if x := n.Prealloc; x != nil { // temp allocated during order.go for dddarg if !types.Identical(t, x.Type()) { panic("dotdotdot base type does not match order's assigned type") diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 23d1ce6003615..a4ecc0c44dc09 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -202,10 +202,7 @@ func walkstmt(n ir.Node) ir.Node { if base.Flag.CompilingRuntime { base.Errorf("%v escapes to heap, not allowed in runtime", v) } - if prealloc[v] == nil { - prealloc[v] = callnew(v.Type()) - } - nn := ir.Nod(ir.OAS, v.Name().Heapaddr, prealloc[v]) + nn := ir.Nod(ir.OAS, v.Name().Heapaddr, callnew(v.Type())) nn.SetColas(true) return walkstmt(typecheck(nn, ctxStmt)) } @@ -1638,7 +1635,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return mkcall1(chanfn("chansend1", 2, n.Left().Type()), nil, init, n.Left(), n1) case ir.OCLOSURE: - return walkclosure(n, init) + return walkclosure(n.(*ir.ClosureExpr), init) case ir.OCALLPART: return walkpartialcall(n.(*ir.CallPartExpr), init) @@ -2713,11 +2710,9 @@ func addstr(n *ir.AddStringExpr, init *ir.Nodes) ir.Node { fn = "concatstrings" t := types.NewSlice(types.Types[types.TSTRING]) - slice := ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(t)) - if prealloc[n] != nil { - prealloc[slice] = prealloc[n] - } - slice.PtrList().Set(args[1:]) // skip buf arg + // args[1:] to skip buf arg + slice := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(t), args[1:]) + slice.Prealloc = n.Prealloc args = []ir.Node{buf, slice} slice.SetEsc(EscNone) } diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index b18975d063378..8f43eb0fb21cd 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -89,7 +89,8 @@ func toNtype(x Node) Ntype { // An AddStringExpr is a string concatenation Expr[0] + Exprs[1] + ... + Expr[len(Expr)-1]. type AddStringExpr struct { miniExpr - List_ Nodes + List_ Nodes + Prealloc *Name } func NewAddStringExpr(pos src.XPos, list []Node) *AddStringExpr { @@ -233,9 +234,10 @@ func (n *CallExpr) SetOp(op Op) { // A CallPartExpr is a method expression X.Method (uncalled). type CallPartExpr struct { miniExpr - Func_ *Func - X Node - Method *types.Field + Func_ *Func + X Node + Method *types.Field + Prealloc *Name } func NewCallPartExpr(pos src.XPos, x Node, method *types.Field, fn *Func) *CallPartExpr { @@ -255,7 +257,8 @@ func (n *CallPartExpr) SetLeft(x Node) { n.X = x } // A ClosureExpr is a function literal expression. type ClosureExpr struct { miniExpr - Func_ *Func + Func_ *Func + Prealloc *Name } func NewClosureExpr(pos src.XPos, fn *Func) *ClosureExpr { @@ -287,9 +290,10 @@ func (n *ClosureReadExpr) Offset() int64 { return n.Offset_ } // Before type-checking, the type is Ntype. type CompLitExpr struct { miniExpr - orig Node - Ntype Ntype - List_ Nodes // initialized values + orig Node + Ntype Ntype + List_ Nodes // initialized values + Prealloc *Name } func NewCompLitExpr(pos src.XPos, op Op, typ Ntype, list []Node) *CompLitExpr { diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index 4dd1733074868..12811821ad9ee 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -368,6 +368,7 @@ type RangeStmt struct { Body_ Nodes HasBreak_ bool typ *types.Type // TODO(rsc): Remove - use X.Type() instead + Prealloc *Name } func NewRangeStmt(pos src.XPos, vars []Node, x Node, body []Node) *RangeStmt { From 0bb0baf68338496ded6837294866c8ace3a14e44 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Fri, 18 Dec 2020 11:29:49 -0500 Subject: [PATCH 192/474] [dev.regabi] cmd/compile: cleanup for concrete types - more Accumulated fixes to recent changes, to make the code safe for automated deinterfacing. Change-Id: I200737046cea88f3356b2402f09e2ca477fb8456 Reviewed-on: https://go-review.googlesource.com/c/go/+/279232 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/order.go | 8 +++----- src/cmd/compile/internal/gc/select.go | 11 ++++++----- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 87d7cf3aa93d1..7915e4b2f7cfb 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -892,12 +892,12 @@ func (o *Order) stmt(n ir.Node) { case ir.OSELRECV2: // case x, ok = <-c + r := r.(*ir.AssignListStmt) recv := r.Rlist().First().(*ir.UnaryExpr) recv.SetLeft(o.expr(recv.Left(), nil)) if recv.Left().Op() != ir.ONAME { recv.SetLeft(o.copyExpr(recv.Left())) } - r := r.(*ir.AssignListStmt) init := r.PtrInit().Slice() r.PtrInit().Set(nil) @@ -915,13 +915,11 @@ func (o *Order) stmt(n ir.Node) { if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].(*ir.Decl).Left() == n { init = init[1:] } - dcl := ir.Nod(ir.ODCL, n, nil) - dcl = typecheck(dcl, ctxStmt) + dcl := typecheck(ir.Nod(ir.ODCL, n, nil), ctxStmt) ncas.PtrInit().Append(dcl) } tmp := o.newTemp(t, t.HasPointers()) - as := ir.Nod(ir.OAS, n, conv(tmp, n.Type())) - as = typecheck(as, ctxStmt) + as := typecheck(ir.Nod(ir.OAS, n, conv(tmp, n.Type())), ctxStmt) ncas.PtrInit().Append(as) r.PtrList().SetIndex(i, tmp) } diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go index c017b8e29aa31..974c4b254e7ff 100644 --- a/src/cmd/compile/internal/gc/select.go +++ b/src/cmd/compile/internal/gc/select.go @@ -207,8 +207,7 @@ func walkselectcases(cases ir.Nodes) []ir.Node { } else { // TODO(cuonglm): make this use selectnbrecv() // if selectnbrecv2(&v, &received, c) { body } else { default body } - receivedp := ir.Nod(ir.OADDR, n.List().Second(), nil) - receivedp = typecheck(receivedp, ctxExpr) + receivedp := typecheck(nodAddr(n.List().Second()), ctxExpr) call = mkcall1(chanfn("selectnbrecv2", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, receivedp, ch) } } @@ -323,9 +322,11 @@ func walkselectcases(cases ir.Nodes) []ir.Node { r := ir.Nod(ir.OIF, cond, nil) - if n := cas.Left(); n != nil && n.Op() == ir.OSELRECV2 && !ir.IsBlank(n.List().Second()) { - x := ir.Nod(ir.OAS, n.List().Second(), recvOK) - r.PtrBody().Append(typecheck(x, ctxStmt)) + if n := cas.Left(); n != nil && n.Op() == ir.OSELRECV2 { + if !ir.IsBlank(n.List().Second()) { + x := ir.Nod(ir.OAS, n.List().Second(), recvOK) + r.PtrBody().Append(typecheck(x, ctxStmt)) + } } r.PtrBody().AppendNodes(cas.PtrBody()) From 2153a99914c3c24b98cd4cfccd1d2f670273a4ac Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 21 Dec 2020 01:14:36 -0500 Subject: [PATCH 193/474] [dev.regabi] cmd/compile: setup to move Addrconst, Patch into cmd/internal/obj Deleting the Pc assignment from Patch is safe because the actual PCs are not assigned until well after the compiler is done patching jumps. And it proves that replacing uses of Patch with SetTarget will be safe later. Change-Id: Iffcbe03f0b5949ccd4c91e79c1272cd06be0f434 Reviewed-on: https://go-review.googlesource.com/c/go/+/279296 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/gsubr.go | 8 +------- src/cmd/internal/obj/link.go | 6 ++++++ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go index 79ca669dfbba9..ddb431d5abf68 100644 --- a/src/cmd/compile/internal/gc/gsubr.go +++ b/src/cmd/compile/internal/gc/gsubr.go @@ -321,15 +321,9 @@ func ggloblsym(s *obj.LSym, width int32, flags int16) { } func Addrconst(a *obj.Addr, v int64) { - a.Sym = nil - a.Type = obj.TYPE_CONST - a.Offset = v + a.SetConst(v) } func Patch(p *obj.Prog, to *obj.Prog) { - if p.To.Type != obj.TYPE_BRANCH { - base.Fatalf("patch: not a branch") - } p.To.SetTarget(to) - p.To.Offset = to.Pc } diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go index eaebfaf4b6314..7b5c990a5de7d 100644 --- a/src/cmd/internal/obj/link.go +++ b/src/cmd/internal/obj/link.go @@ -250,6 +250,12 @@ func (a *Addr) SetTarget(t *Prog) { a.Val = t } +func (a *Addr) SetConst(v int64) { + a.Sym = nil + a.Type = TYPE_CONST + a.Offset = v +} + // Prog describes a single machine instruction. // // The general instruction form is: From 1a3b036b836d5b41871515ec350b203377e087a6 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 21 Dec 2020 01:29:02 -0500 Subject: [PATCH 194/474] [dev.regabi] cmd/compile: collect global compilation state There are various global variables tracking the state of the compilation. Collect them in a single global struct instead. The struct definition is in package ir, but the struct itself is still in package gc. It may eventually be threaded through the code, but in the short term will end up in package typecheck. Change-Id: I019db07aaedaed2c9b67dd45a4e138dc6028e54c Reviewed-on: https://go-review.googlesource.com/c/go/+/279297 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/alg.go | 4 +-- src/cmd/compile/internal/gc/bexport.go | 2 +- src/cmd/compile/internal/gc/closure.go | 8 ++--- src/cmd/compile/internal/gc/dcl.go | 8 ++--- src/cmd/compile/internal/gc/embed.go | 8 ++--- src/cmd/compile/internal/gc/export.go | 8 ++--- src/cmd/compile/internal/gc/go.go | 4 --- src/cmd/compile/internal/gc/iexport.go | 4 +-- src/cmd/compile/internal/gc/iimport.go | 2 +- src/cmd/compile/internal/gc/init.go | 13 +++---- src/cmd/compile/internal/gc/inl.go | 6 ++-- src/cmd/compile/internal/gc/main.go | 49 ++++++++++++++------------ src/cmd/compile/internal/gc/noder.go | 9 ++--- src/cmd/compile/internal/gc/obj.go | 46 +++++++++++------------- src/cmd/compile/internal/gc/pgen.go | 2 +- src/cmd/compile/internal/gc/subr.go | 2 +- src/cmd/compile/internal/gc/walk.go | 2 +- src/cmd/compile/internal/ir/package.go | 35 ++++++++++++++++++ 18 files changed, 116 insertions(+), 96 deletions(-) create mode 100644 src/cmd/compile/internal/ir/package.go diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index f03aec3237542..036a1e7491c82 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -394,7 +394,7 @@ func genhash(t *types.Type) *obj.LSym { } fn.SetNilCheckDisabled(true) - xtop = append(xtop, fn) + Target.Decls = append(Target.Decls, fn) // Build closure. It doesn't close over any variables, so // it contains just the function pointer. @@ -774,7 +774,7 @@ func geneq(t *types.Type) *obj.LSym { // neither of which can be nil, and our comparisons // are shallow. fn.SetNilCheckDisabled(true) - xtop = append(xtop, fn) + Target.Decls = append(Target.Decls, fn) // Generate a closure which points at the function we just generated. dsymptr(closure, 0, sym.Linksym(), 0) diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index 31fd251c5e0e1..2347971fc2c38 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -18,7 +18,7 @@ func (p *exporter) markObject(n ir.Node) { if n.Op() == ir.ONAME { n := n.(*ir.Name) if n.Class() == ir.PFUNC { - inlFlood(n) + inlFlood(n, exportsym) } } diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index 85c594787b3f6..e07ed4cd24830 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -89,7 +89,7 @@ func typecheckclosure(clo ir.Node, top int) { fn.SetClosureCalled(top&ctxCallee != 0) // Do not typecheck fn twice, otherwise, we will end up pushing - // fn to xtop multiple times, causing initLSym called twice. + // fn to Target.Decls multiple times, causing initLSym called twice. // See #30709 if fn.Typecheck() == 1 { return @@ -118,7 +118,7 @@ func typecheckclosure(clo ir.Node, top int) { // Type check the body now, but only if we're inside a function. // At top level (in a variable initialization: curfn==nil) we're not // ready to type check code yet; we'll check it later, because the - // underlying closure function we create is added to xtop. + // underlying closure function we create is added to Target.Decls. if Curfn != nil && clo.Type() != nil { oldfn := Curfn Curfn = fn @@ -129,7 +129,7 @@ func typecheckclosure(clo ir.Node, top int) { Curfn = oldfn } - xtop = append(xtop, fn) + Target.Decls = append(Target.Decls, fn) } // globClosgen is like Func.Closgen, but for the global scope. @@ -499,7 +499,7 @@ func makepartialcall(dot *ir.SelectorExpr, t0 *types.Type, meth *types.Sym) *ir. Curfn = fn typecheckslice(fn.Body().Slice(), ctxStmt) sym.Def = fn - xtop = append(xtop, fn) + Target.Decls = append(Target.Decls, fn) Curfn = savecurfn base.Pos = saveLineNo diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 34ba3728439df..20e5edc4cb4ae 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -17,8 +17,6 @@ import ( // Declaration stack & operations -var externdcl []ir.Node - func testdclstack() { if !types.IsDclstackValid() { base.Fatalf("mark left on the dclstack") @@ -75,7 +73,7 @@ func declare(n *ir.Name, ctxt ir.Class) { if s.Name == "main" && s.Pkg.Name == "main" { base.ErrorfAt(n.Pos(), "cannot declare main - must be func") } - externdcl = append(externdcl, n) + Target.Externs = append(Target.Externs, n) } else { if Curfn == nil && ctxt == ir.PAUTO { base.Pos = n.Pos() @@ -850,7 +848,7 @@ func newNowritebarrierrecChecker() *nowritebarrierrecChecker { // important to handle it for this check, so we model it // directly. This has to happen before transformclosure since // it's a lot harder to work out the argument after. - for _, n := range xtop { + for _, n := range Target.Decls { if n.Op() != ir.ODCLFUNC { continue } @@ -925,7 +923,7 @@ func (c *nowritebarrierrecChecker) check() { // q is the queue of ODCLFUNC Nodes to visit in BFS order. var q ir.NameQueue - for _, n := range xtop { + for _, n := range Target.Decls { if n.Op() != ir.ODCLFUNC { continue } diff --git a/src/cmd/compile/internal/gc/embed.go b/src/cmd/compile/internal/gc/embed.go index b9c88c0d5b24f..7d67d2dfd01ba 100644 --- a/src/cmd/compile/internal/gc/embed.go +++ b/src/cmd/compile/internal/gc/embed.go @@ -17,8 +17,6 @@ import ( "strings" ) -var embedlist []ir.Node - const ( embedUnknown = iota embedBytes @@ -117,12 +115,12 @@ func varEmbed(p *noder, names []ir.Node, typ ir.Ntype, exprs []ir.Node, embeds [ v.Sym().Def = v v.Name().Ntype = typ v.SetClass(ir.PEXTERN) - externdcl = append(externdcl, v) + Target.Externs = append(Target.Externs, v) exprs = []ir.Node{v} } v.Name().SetEmbedFiles(list) - embedlist = append(embedlist, v) + Target.Embeds = append(Target.Embeds, v) return exprs } @@ -187,7 +185,7 @@ func embedFileLess(x, y string) bool { } func dumpembeds() { - for _, v := range embedlist { + for _, v := range Target.Embeds { initEmbed(v) } } diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index 16d45a00aa040..42e0db2b20683 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -21,8 +21,6 @@ func exportf(bout *bio.Writer, format string, args ...interface{}) { } } -var asmlist []ir.Node - // exportsym marks n for export (or reexport). func exportsym(n *ir.Name) { if n.Sym().OnExportList() { @@ -34,7 +32,7 @@ func exportsym(n *ir.Name) { fmt.Printf("export symbol %v\n", n.Sym()) } - exportlist = append(exportlist, n) + Target.Exports = append(Target.Exports, n) } func initname(s string) bool { @@ -57,7 +55,7 @@ func autoexport(n *ir.Name, ctxt ir.Class) { } if base.Flag.AsmHdr != "" && !n.Sym().Asm() { n.Sym().SetAsm(true) - asmlist = append(asmlist, n) + Target.Asms = append(Target.Asms, n) } } @@ -202,7 +200,7 @@ func dumpasmhdr() { base.Fatalf("%v", err) } fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", types.LocalPkg.Name) - for _, n := range asmlist { + for _, n := range Target.Asms { if n.Sym().IsBlank() { continue } diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index b00a7ca14c1db..b092e6933c825 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -128,10 +128,6 @@ var ( iscmp [ir.OEND]bool ) -var xtop []ir.Node - -var exportlist []*ir.Name - var importlist []*ir.Func // imported functions and methods with inlinable bodies var ( diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index b54eeca7cb75a..969f6bc3b294f 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -251,7 +251,7 @@ func iexport(out *bufio.Writer) { { // TODO(mdempsky): Separate from bexport logic. p := &exporter{marked: make(map[*types.Type]bool)} - for _, n := range exportlist { + for _, n := range Target.Exports { p.markObject(n) } } @@ -272,7 +272,7 @@ func iexport(out *bufio.Writer) { } // Initialize work queue with exported declarations. - for _, n := range exportlist { + for _, n := range Target.Exports { p.pushDecl(n) } diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 154c4e3a84339..549751335e4d2 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -1111,7 +1111,7 @@ func (r *importReader) exprsOrNil() (a, b ir.Node) { } func builtinCall(pos src.XPos, op ir.Op) *ir.CallExpr { - return ir.NewCallExpr(pos, ir.OCALL, mkname(types.BuiltinPkg.Lookup(ir.OpNames[op])), nil) + return ir.NewCallExpr(pos, ir.OCALL, ir.NewIdent(base.Pos, types.BuiltinPkg.Lookup(ir.OpNames[op])), nil) } func npos(pos src.XPos, n ir.Node) ir.Node { diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go index 8de4d84f2d6b0..f1398f8644fa2 100644 --- a/src/cmd/compile/internal/gc/init.go +++ b/src/cmd/compile/internal/gc/init.go @@ -27,9 +27,6 @@ func renameinit() *types.Sym { return s } -// List of imported packages, in source code order. See #31636. -var sourceOrderImports []*types.Pkg - // fninit makes an initialization record for the package. // See runtime/proc.go:initTask for its layout. // The 3 tasks for initialization are: @@ -43,7 +40,7 @@ func fninit(n []ir.Node) { var fns []*obj.LSym // functions to call for package initialization // Find imported packages with init tasks. - for _, pkg := range sourceOrderImports { + for _, pkg := range Target.Imports { n := resolve(oldname(pkg.Lookup(".inittask"))) if n.Op() == ir.ONONAME { continue @@ -72,7 +69,7 @@ func fninit(n []ir.Node) { Curfn = fn typecheckslice(nf, ctxStmt) Curfn = nil - xtop = append(xtop, fn) + Target.Decls = append(Target.Decls, fn) fns = append(fns, initializers.Linksym()) } if initTodo.Dcl != nil { @@ -84,16 +81,14 @@ func fninit(n []ir.Node) { initTodo = nil // Record user init functions. - for i := 0; i < renameinitgen; i++ { - s := lookupN("init.", i) - fn := ir.AsNode(s.Def).Name().Defn.(*ir.Func) + for _, fn := range Target.Inits { // Skip init functions with empty bodies. if fn.Body().Len() == 1 { if stmt := fn.Body().First(); stmt.Op() == ir.OBLOCK && stmt.(*ir.BlockStmt).List().Len() == 0 { continue } } - fns = append(fns, s.Linksym()) + fns = append(fns, fn.Nname.Sym().Linksym()) } if len(deps) == 0 && len(fns) == 0 && types.LocalPkg.Name != "main" && types.LocalPkg.Name != "runtime" { diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index b571c2b9148b5..6c8f380d87318 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -230,7 +230,7 @@ func caninl(fn *ir.Func) { // inlFlood marks n's inline body for export and recursively ensures // all called functions are marked too. -func inlFlood(n *ir.Name) { +func inlFlood(n *ir.Name, exportsym func(*ir.Name)) { if n == nil { return } @@ -258,13 +258,13 @@ func inlFlood(n *ir.Name) { ir.VisitList(ir.AsNodes(fn.Inl.Body), func(n ir.Node) { switch n.Op() { case ir.OMETHEXPR, ir.ODOTMETH: - inlFlood(methodExprName(n)) + inlFlood(methodExprName(n), exportsym) case ir.ONAME: n := n.(*ir.Name) switch n.Class() { case ir.PFUNC: - inlFlood(n) + inlFlood(n, exportsym) exportsym(n) case ir.PEXTERN: exportsym(n) diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 03e787f7180be..2c598a2329bf0 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -51,6 +51,9 @@ func hidePanic() { } } +// Target is the package being compiled. +var Target *ir.Package + // timing data for compiler phases var timings Timings @@ -207,6 +210,8 @@ func Main(archInit func(*Arch)) { Widthptr = thearch.LinkArch.PtrSize Widthreg = thearch.LinkArch.RegSize + Target = new(ir.Package) + // initialize types package // (we need to do this to break dependencies that otherwise // would lead to import cycles) @@ -240,33 +245,33 @@ func Main(archInit func(*Arch)) { // to avoid cycles like #18640. // TODO(gri) Remove this again once we have a fix for #25838. - // Don't use range--typecheck can add closures to xtop. + // Don't use range--typecheck can add closures to Target.Decls. timings.Start("fe", "typecheck", "top1") - for i := 0; i < len(xtop); i++ { - n := xtop[i] + for i := 0; i < len(Target.Decls); i++ { + n := Target.Decls[i] if op := n.Op(); op != ir.ODCL && op != ir.OAS && op != ir.OAS2 && (op != ir.ODCLTYPE || !n.(*ir.Decl).Left().Name().Alias()) { - xtop[i] = typecheck(n, ctxStmt) + Target.Decls[i] = typecheck(n, ctxStmt) } } // Phase 2: Variable assignments. // To check interface assignments, depends on phase 1. - // Don't use range--typecheck can add closures to xtop. + // Don't use range--typecheck can add closures to Target.Decls. timings.Start("fe", "typecheck", "top2") - for i := 0; i < len(xtop); i++ { - n := xtop[i] + for i := 0; i < len(Target.Decls); i++ { + n := Target.Decls[i] if op := n.Op(); op == ir.ODCL || op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.(*ir.Decl).Left().Name().Alias() { - xtop[i] = typecheck(n, ctxStmt) + Target.Decls[i] = typecheck(n, ctxStmt) } } // Phase 3: Type check function bodies. - // Don't use range--typecheck can add closures to xtop. + // Don't use range--typecheck can add closures to Target.Decls. timings.Start("fe", "typecheck", "func") var fcount int64 - for i := 0; i < len(xtop); i++ { - n := xtop[i] + for i := 0; i < len(Target.Decls); i++ { + n := Target.Decls[i] if n.Op() == ir.ODCLFUNC { Curfn = n.(*ir.Func) decldepth = 1 @@ -287,9 +292,9 @@ func Main(archInit func(*Arch)) { // TODO(mdempsky): This should be handled when type checking their // corresponding ODCL nodes. timings.Start("fe", "typecheck", "externdcls") - for i, n := range externdcl { + for i, n := range Target.Externs { if n.Op() == ir.ONAME { - externdcl[i] = typecheck(externdcl[i], ctxExpr) + Target.Externs[i] = typecheck(Target.Externs[i], ctxExpr) } } @@ -301,13 +306,13 @@ func Main(archInit func(*Arch)) { timings.AddEvent(fcount, "funcs") - fninit(xtop) + fninit(Target.Decls) // Phase 4: Decide how to capture closed variables. // This needs to run before escape analysis, // because variables captured by value do not escape. timings.Start("fe", "capturevars") - for _, n := range xtop { + for _, n := range Target.Decls { if n.Op() == ir.ODCLFUNC && n.Func().OClosure != nil { Curfn = n.(*ir.Func) capturevars(Curfn) @@ -332,7 +337,7 @@ func Main(archInit func(*Arch)) { if base.Flag.LowerL != 0 { // Find functions that can be inlined and clone them before walk expands them. - visitBottomUp(xtop, func(list []*ir.Func, recursive bool) { + visitBottomUp(Target.Decls, func(list []*ir.Func, recursive bool) { numfns := numNonClosures(list) for _, n := range list { if !recursive || numfns > 1 { @@ -350,7 +355,7 @@ func Main(archInit func(*Arch)) { }) } - for _, n := range xtop { + for _, n := range Target.Decls { if n.Op() == ir.ODCLFUNC { devirtualize(n.(*ir.Func)) } @@ -366,7 +371,7 @@ func Main(archInit func(*Arch)) { // Large values are also moved off stack in escape analysis; // because large values may contain pointers, it must happen early. timings.Start("fe", "escapes") - escapes(xtop) + escapes(Target.Decls) // Collect information for go:nowritebarrierrec // checking. This must happen before transformclosure. @@ -380,7 +385,7 @@ func Main(archInit func(*Arch)) { // This needs to happen before walk, because closures must be transformed // before walk reaches a call of a closure. timings.Start("fe", "xclosures") - for _, n := range xtop { + for _, n := range Target.Decls { if n.Op() == ir.ODCLFUNC && n.Func().OClosure != nil { Curfn = n.(*ir.Func) transformclosure(Curfn) @@ -399,11 +404,11 @@ func Main(archInit func(*Arch)) { peekitabs() // Phase 8: Compile top level functions. - // Don't use range--walk can add functions to xtop. + // Don't use range--walk can add functions to Target.Decls. timings.Start("be", "compilefuncs") fcount = 0 - for i := 0; i < len(xtop); i++ { - n := xtop[i] + for i := 0; i < len(Target.Decls); i++ { + n := Target.Decls[i] if n.Op() == ir.ODCLFUNC { funccompile(n.(*ir.Func)) fcount++ diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 43ec2ce35008e..10eac6e815b58 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -27,7 +27,7 @@ import ( // parseFiles concurrently parses files into *syntax.File structures. // Each declaration in every *syntax.File is converted to a syntax tree -// and its root represented by *Node is appended to xtop. +// and its root represented by *Node is appended to Target.Decls. // Returns the total count of parsed lines. func parseFiles(filenames []string) uint { noders := make([]*noder, 0, len(filenames)) @@ -260,7 +260,7 @@ func (p *noder) node() { p.checkUnused(pragma) } - xtop = append(xtop, p.decls(p.file.DeclList)...) + Target.Decls = append(Target.Decls, p.decls(p.file.DeclList)...) base.Pos = src.NoXPos clearImports() @@ -297,7 +297,7 @@ func (p *noder) processPragmas() { } } - pragcgobuf = append(pragcgobuf, p.pragcgobuf...) + Target.CgoPragmas = append(Target.CgoPragmas, p.pragcgobuf...) } func (p *noder) decls(decls []syntax.Decl) (l []ir.Node) { @@ -354,7 +354,7 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) { } if !ipkg.Direct { - sourceOrderImports = append(sourceOrderImports, ipkg) + Target.Imports = append(Target.Imports, ipkg) } ipkg.Direct = true @@ -530,6 +530,7 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) ir.Node { if len(t.Params) > 0 || len(t.Results) > 0 { base.ErrorfAt(f.Pos(), "func init must have no arguments and no return values") } + Target.Inits = append(Target.Inits, f) } if types.LocalPkg.Name == "main" && name.Name == "main" { diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index cd1500d1edffb..094c386218896 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -117,13 +117,14 @@ func dumpCompilerObj(bout *bio.Writer) { } func dumpdata() { - externs := len(externdcl) - xtops := len(xtop) + numExterns := len(Target.Externs) + numDecls := len(Target.Decls) - dumpglobls() + dumpglobls(Target.Externs) + dumpfuncsyms() addptabs() - exportlistLen := len(exportlist) - addsignats(externdcl) + numExports := len(Target.Exports) + addsignats(Target.Externs) dumpsignats() dumptabs() ptabsLen := len(ptabs) @@ -140,28 +141,22 @@ func dumpdata() { // In the typical case, we loop 0 or 1 times. // It was not until issue 24761 that we found any code that required a loop at all. for { - for i := xtops; i < len(xtop); i++ { - n := xtop[i] + for i := numDecls; i < len(Target.Decls); i++ { + n := Target.Decls[i] if n.Op() == ir.ODCLFUNC { funccompile(n.(*ir.Func)) } } - xtops = len(xtop) + numDecls = len(Target.Decls) compileFunctions() dumpsignats() - if xtops == len(xtop) { + if numDecls == len(Target.Decls) { break } } // Dump extra globals. - tmp := externdcl - - if externdcl != nil { - externdcl = externdcl[externs:] - } - dumpglobls() - externdcl = tmp + dumpglobls(Target.Externs[numExterns:]) if zerosize > 0 { zero := mappkg.Lookup("zero") @@ -170,8 +165,8 @@ func dumpdata() { addGCLocals() - if exportlistLen != len(exportlist) { - base.Fatalf("exportlist changed after compile functions loop") + if numExports != len(Target.Exports) { + base.Fatalf("Target.Exports changed after compile functions loop") } if ptabsLen != len(ptabs) { base.Fatalf("ptabs changed after compile functions loop") @@ -184,11 +179,11 @@ func dumpdata() { func dumpLinkerObj(bout *bio.Writer) { printObjHeader(bout) - if len(pragcgobuf) != 0 { + if len(Target.CgoPragmas) != 0 { // write empty export section; must be before cgo section fmt.Fprintf(bout, "\n$$\n\n$$\n\n") fmt.Fprintf(bout, "\n$$ // cgo\n") - if err := json.NewEncoder(bout).Encode(pragcgobuf); err != nil { + if err := json.NewEncoder(bout).Encode(Target.CgoPragmas); err != nil { base.Fatalf("serializing pragcgobuf: %v", err) } fmt.Fprintf(bout, "\n$$\n\n") @@ -203,7 +198,7 @@ func addptabs() { if !base.Ctxt.Flag_dynlink || types.LocalPkg.Name != "main" { return } - for _, exportn := range exportlist { + for _, exportn := range Target.Exports { s := exportn.Sym() nn := ir.AsNode(s.Def) if nn == nil { @@ -267,9 +262,9 @@ func dumpGlobalConst(n ir.Node) { base.Ctxt.DwarfIntConst(base.Ctxt.Pkgpath, n.Sym().Name, typesymname(t), ir.IntVal(t, v)) } -func dumpglobls() { +func dumpglobls(externs []ir.Node) { // add globals - for _, n := range externdcl { + for _, n := range externs { switch n.Op() { case ir.ONAME: dumpGlobal(n.(*ir.Name)) @@ -277,7 +272,9 @@ func dumpglobls() { dumpGlobalConst(n) } } +} +func dumpfuncsyms() { sort.Slice(funcsyms, func(i, j int) bool { return funcsyms[i].LinksymName() < funcsyms[j].LinksymName() }) @@ -286,9 +283,6 @@ func dumpglobls() { dsymptr(sf, 0, s.Linksym(), 0) ggloblsym(sf, int32(Widthptr), obj.DUPOK|obj.RODATA) } - - // Do not reprocess funcsyms on next dumpglobls call. - funcsyms = nil } // addGCLocals adds gcargs, gclocals, gcregs, and stack object symbols to Ctxt.Data. diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index 901af567fab36..5b5288c389fa7 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -287,7 +287,7 @@ func compilenow(fn *ir.Func) bool { // candidate AND was not inlined (yet), put it onto the compile // queue instead of compiling it immediately. This is in case we // wind up inlining it into a method wrapper that is generated by - // compiling a function later on in the xtop list. + // compiling a function later on in the Target.Decls list. if ir.IsMethod(fn) && isInlinableButNotInlined(fn) { return false } diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 9c26edf136ab9..2b0047e1503df 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -1275,7 +1275,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { escapeFuncs([]*ir.Func{fn}, false) Curfn = nil - xtop = append(xtop, fn) + Target.Decls = append(Target.Decls, fn) } func paramNnames(ft *types.Type) []ir.Node { diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index a4ecc0c44dc09..657a744e68d46 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -3942,7 +3942,7 @@ func wrapCall(n *ir.CallExpr, init *ir.Nodes) ir.Node { typecheckFunc(fn) typecheckslice(fn.Body().Slice(), ctxStmt) - xtop = append(xtop, fn) + Target.Decls = append(Target.Decls, fn) call = ir.NewCallExpr(base.Pos, ir.OCALL, fn.Nname, n.List().Slice()) return walkexpr(typecheck(call, ctxStmt), init) diff --git a/src/cmd/compile/internal/ir/package.go b/src/cmd/compile/internal/ir/package.go new file mode 100644 index 0000000000000..3896e2b91b117 --- /dev/null +++ b/src/cmd/compile/internal/ir/package.go @@ -0,0 +1,35 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +import "cmd/compile/internal/types" + +// A Package holds information about the package being compiled. +type Package struct { + // Imports, listed in source order. + // See golang.org/issue/31636. + Imports []*types.Pkg + + // Init functions, listed in source order. + Inits []*Func + + // Top-level declarations. + Decls []Node + + // Extern (package global) declarations. + Externs []Node + + // Assembly function declarations. + Asms []*Name + + // Cgo directives. + CgoPragmas [][]string + + // Variables with //go:embed lines. + Embeds []*Name + + // Exported (or re-exported) symbols. + Exports []*Name +} From 85ce6ecfe3c54075c7bc53538940f0319b57068b Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 21 Dec 2020 09:11:12 -0500 Subject: [PATCH 195/474] [dev.regabi] cmd/compile: separate exportsym more cleanly Clean up a TODO (and make the package gc split easier) by moving the exportsym walk out of iexport proper. Also move exportsym call out of fninit. Change-Id: Ie5887a68d325f7154201f4a35b9b4be4bf4b48dd Reviewed-on: https://go-review.googlesource.com/c/go/+/279298 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/export.go | 5 +++++ src/cmd/compile/internal/gc/iexport.go | 10 ---------- src/cmd/compile/internal/gc/init.go | 20 ++++++++++---------- src/cmd/compile/internal/gc/main.go | 4 +++- 4 files changed, 18 insertions(+), 21 deletions(-) diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index 42e0db2b20683..d26dd9af5d2b8 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -60,6 +60,11 @@ func autoexport(n *ir.Name, ctxt ir.Class) { } func dumpexport(bout *bio.Writer) { + p := &exporter{marked: make(map[*types.Type]bool)} + for _, n := range Target.Exports { + p.markObject(n) + } + // The linker also looks for the $$ marker - use char after $$ to distinguish format. exportf(bout, "\n$$B\n") // indicate binary export format off := bout.Offset() diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index 969f6bc3b294f..c03445044df42 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -246,16 +246,6 @@ const ( ) func iexport(out *bufio.Writer) { - // Mark inline bodies that are reachable through exported objects. - // (Phase 0 of bexport.go.) - { - // TODO(mdempsky): Separate from bexport logic. - p := &exporter{marked: make(map[*types.Type]bool)} - for _, n := range Target.Exports { - p.markObject(n) - } - } - p := iexporter{ allPkgs: map[*types.Pkg]bool{}, stringIndex: map[string]uint64{}, diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go index f1398f8644fa2..1c15ce131838d 100644 --- a/src/cmd/compile/internal/gc/init.go +++ b/src/cmd/compile/internal/gc/init.go @@ -27,21 +27,21 @@ func renameinit() *types.Sym { return s } -// fninit makes an initialization record for the package. +// fninit makes and returns an initialization record for the package. // See runtime/proc.go:initTask for its layout. // The 3 tasks for initialization are: // 1) Initialize all of the packages the current package depends on. // 2) Initialize all the variables that have initializers. // 3) Run any init functions. -func fninit(n []ir.Node) { - nf := initOrder(n) +func fninit() *ir.Name { + nf := initOrder(Target.Decls) var deps []*obj.LSym // initTask records for packages the current package depends on var fns []*obj.LSym // functions to call for package initialization // Find imported packages with init tasks. for _, pkg := range Target.Imports { - n := resolve(oldname(pkg.Lookup(".inittask"))) + n := resolve(ir.NewIdent(base.Pos, pkg.Lookup(".inittask"))) if n.Op() == ir.ONONAME { continue } @@ -92,16 +92,15 @@ func fninit(n []ir.Node) { } if len(deps) == 0 && len(fns) == 0 && types.LocalPkg.Name != "main" && types.LocalPkg.Name != "runtime" { - return // nothing to initialize + return nil // nothing to initialize } // Make an .inittask structure. sym := lookup(".inittask") - nn := NewName(sym) - nn.SetType(types.Types[types.TUINT8]) // fake type - nn.SetClass(ir.PEXTERN) - sym.Def = nn - exportsym(nn) + task := NewName(sym) + task.SetType(types.Types[types.TUINT8]) // fake type + task.SetClass(ir.PEXTERN) + sym.Def = task lsym := sym.Linksym() ot := 0 ot = duintptr(lsym, ot, 0) // state: not initialized yet @@ -116,4 +115,5 @@ func fninit(n []ir.Node) { // An initTask has pointers, but none into the Go heap. // It's not quite read only, the state field must be modifiable. ggloblsym(lsym, int32(ot), obj.NOPTR) + return task } diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 2c598a2329bf0..545491daa1a67 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -306,7 +306,9 @@ func Main(archInit func(*Arch)) { timings.AddEvent(fcount, "funcs") - fninit(Target.Decls) + if initTask := fninit(); initTask != nil { + exportsym(initTask) + } // Phase 4: Decide how to capture closed variables. // This needs to run before escape analysis, From 4836e28ac0482183a3a6af88ee4295ffdbc94f62 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 21 Dec 2020 01:36:15 -0500 Subject: [PATCH 196/474] [dev.regabi] cmd/compile: separate noder more cleanly Separate embed, cgo pragmas, and Main trackScopes variable from noder more cleanly. This lets us split embed and noder into new packages. It also assumes that the local embedded variables will be removed and deletes them now for simplicity. Change-Id: I9638bcc2c5f0e76440de056c6285b6aa2f73a00d Reviewed-on: https://go-review.googlesource.com/c/go/+/279299 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/embed.go | 51 ++++++++++----------- src/cmd/compile/internal/gc/go.go | 6 +-- src/cmd/compile/internal/gc/main.go | 17 ++++++- src/cmd/compile/internal/gc/noder.go | 26 +++-------- src/cmd/compile/internal/ir/name.go | 50 +++++++------------- src/embed/internal/embedtest/embed_test.go | 28 +++-------- src/embed/internal/embedtest/embedx_test.go | 14 ------ 7 files changed, 72 insertions(+), 120 deletions(-) diff --git a/src/cmd/compile/internal/gc/embed.go b/src/cmd/compile/internal/gc/embed.go index 7d67d2dfd01ba..0d4ce83716458 100644 --- a/src/cmd/compile/internal/gc/embed.go +++ b/src/cmd/compile/internal/gc/embed.go @@ -24,8 +24,6 @@ const ( embedFiles ) -var numLocalEmbed int - func varEmbed(p *noder, names []ir.Node, typ ir.Ntype, exprs []ir.Node, embeds []PragmaEmbed) (newExprs []ir.Node) { haveEmbed := false for _, decl := range p.file.DeclList { @@ -63,25 +61,39 @@ func varEmbed(p *noder, names []ir.Node, typ ir.Ntype, exprs []ir.Node, embeds [ p.errorAt(pos, "go:embed cannot apply to var without type") return exprs } + if dclcontext != ir.PEXTERN { + p.errorAt(pos, "go:embed cannot apply to var inside func") + return exprs + } + + v := names[0].(*ir.Name) + Target.Embeds = append(Target.Embeds, v) + v.Embed = new([]ir.Embed) + for _, e := range embeds { + *v.Embed = append(*v.Embed, ir.Embed{Pos: p.makeXPos(e.Pos), Patterns: e.Patterns}) + } + return exprs +} - kind := embedKindApprox(typ) +func embedFileList(v *ir.Name) []string { + kind := embedKind(v.Type()) if kind == embedUnknown { - p.errorAt(pos, "go:embed cannot apply to var of type %v", typ) - return exprs + base.ErrorfAt(v.Pos(), "go:embed cannot apply to var of type %v", v.Type()) + return nil } // Build list of files to store. have := make(map[string]bool) var list []string - for _, e := range embeds { + for _, e := range *v.Embed { for _, pattern := range e.Patterns { files, ok := base.Flag.Cfg.Embed.Patterns[pattern] if !ok { - p.errorAt(e.Pos, "invalid go:embed: build system did not map pattern: %s", pattern) + base.ErrorfAt(e.Pos, "invalid go:embed: build system did not map pattern: %s", pattern) } for _, file := range files { if base.Flag.Cfg.Embed.Files[file] == "" { - p.errorAt(e.Pos, "invalid go:embed: build system did not map file: %s", file) + base.ErrorfAt(e.Pos, "invalid go:embed: build system did not map file: %s", file) continue } if !have[file] { @@ -103,25 +115,12 @@ func varEmbed(p *noder, names []ir.Node, typ ir.Ntype, exprs []ir.Node, embeds [ if kind == embedString || kind == embedBytes { if len(list) > 1 { - p.errorAt(pos, "invalid go:embed: multiple files for type %v", typ) - return exprs + base.ErrorfAt(v.Pos(), "invalid go:embed: multiple files for type %v", v.Type()) + return nil } } - v := names[0].(*ir.Name) - if dclcontext != ir.PEXTERN { - numLocalEmbed++ - v = ir.NewNameAt(v.Pos(), lookupN("embed.", numLocalEmbed)) - v.Sym().Def = v - v.Name().Ntype = typ - v.SetClass(ir.PEXTERN) - Target.Externs = append(Target.Externs, v) - exprs = []ir.Node{v} - } - - v.Name().SetEmbedFiles(list) - Target.Embeds = append(Target.Embeds, v) - return exprs + return list } // embedKindApprox determines the kind of embedding variable, approximately. @@ -192,8 +191,8 @@ func dumpembeds() { // initEmbed emits the init data for a //go:embed variable, // which is either a string, a []byte, or an embed.FS. -func initEmbed(v ir.Node) { - files := v.Name().EmbedFiles() +func initEmbed(v *ir.Name) { + files := embedFileList(v) switch kind := embedKind(v.Type()); kind { case embedUnknown: base.ErrorfAt(v.Pos(), "go:embed cannot apply to var of type %v", v.Type()) diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index b092e6933c825..1707e6a11b862 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -116,13 +116,14 @@ var ( okforadd [types.NTYPE]bool okforand [types.NTYPE]bool okfornone [types.NTYPE]bool - okforcmp [types.NTYPE]bool okforbool [types.NTYPE]bool okforcap [types.NTYPE]bool okforlen [types.NTYPE]bool okforarith [types.NTYPE]bool ) +var okforcmp [types.NTYPE]bool + var ( okfor [ir.OEND][]bool iscmp [ir.OEND]bool @@ -149,9 +150,6 @@ var typecheckok bool // when the race detector is enabled. var instrumenting bool -// Whether we are tracking lexical scopes for DWARF. -var trackScopes bool - var nodfp *ir.Name var autogeneratedPos src.XPos diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 545491daa1a67..45880c5cde5dd 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -205,8 +205,6 @@ func Main(archInit func(*Arch)) { } } - trackScopes = base.Flag.Dwarf - Widthptr = thearch.LinkArch.PtrSize Widthreg = thearch.LinkArch.RegSize @@ -226,6 +224,7 @@ func Main(archInit func(*Arch)) { timings.Start("fe", "parse") lines := parseFiles(flag.Args()) + cgoSymABIs() timings.Stop() timings.AddEvent(int64(lines), "lines") @@ -477,6 +476,20 @@ func Main(archInit func(*Arch)) { } } +func cgoSymABIs() { + // The linker expects an ABI0 wrapper for all cgo-exported + // functions. + for _, prag := range Target.CgoPragmas { + switch prag[0] { + case "cgo_export_static", "cgo_export_dynamic": + if symabiRefs == nil { + symabiRefs = make(map[string]obj.ABI) + } + symabiRefs[prag[1]] = obj.ABI0 + } + } +} + // numNonClosures returns the number of functions in list which are not closures. func numNonClosures(list []*ir.Func) int { count := 0 diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 10eac6e815b58..ee01423833cb5 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -20,7 +20,6 @@ import ( "cmd/compile/internal/ir" "cmd/compile/internal/syntax" "cmd/compile/internal/types" - "cmd/internal/obj" "cmd/internal/objabi" "cmd/internal/src" ) @@ -36,8 +35,9 @@ func parseFiles(filenames []string) uint { for _, filename := range filenames { p := &noder{ - basemap: make(map[*syntax.PosBase]*src.PosBase), - err: make(chan syntax.Error), + basemap: make(map[*syntax.PosBase]*src.PosBase), + err: make(chan syntax.Error), + trackScopes: base.Flag.Dwarf, } noders = append(noders, p) @@ -151,7 +151,8 @@ type noder struct { // scopeVars is a stack tracking the number of variables declared in the // current function at the moment each open scope was opened. - scopeVars []int + trackScopes bool + scopeVars []int lastCloseScopePos syntax.Pos } @@ -179,7 +180,7 @@ func (p *noder) funcBody(fn *ir.Func, block *syntax.BlockStmt) { func (p *noder) openScope(pos syntax.Pos) { types.Markdcl() - if trackScopes { + if p.trackScopes { Curfn.Parents = append(Curfn.Parents, p.scope) p.scopeVars = append(p.scopeVars, len(Curfn.Dcl)) p.scope = ir.ScopeID(len(Curfn.Parents)) @@ -192,7 +193,7 @@ func (p *noder) closeScope(pos syntax.Pos) { p.lastCloseScopePos = pos types.Popdcl() - if trackScopes { + if p.trackScopes { scopeVars := p.scopeVars[len(p.scopeVars)-1] p.scopeVars = p.scopeVars[:len(p.scopeVars)-1] if scopeVars == len(Curfn.Dcl) { @@ -284,19 +285,6 @@ func (p *noder) processPragmas() { } n.Sym().Linkname = l.remote } - - // The linker expects an ABI0 wrapper for all cgo-exported - // functions. - for _, prag := range p.pragcgobuf { - switch prag[0] { - case "cgo_export_static", "cgo_export_dynamic": - if symabiRefs == nil { - symabiRefs = make(map[string]obj.ABI) - } - symabiRefs[prag[1]] = obj.ABI0 - } - } - Target.CgoPragmas = append(Target.CgoPragmas, p.pragcgobuf...) } diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 0c36ffdf7a387..f5f4280fd079e 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -34,16 +34,16 @@ func (*Ident) CanBeNtype() {} // Name holds Node fields used only by named nodes (ONAME, OTYPE, some OLITERAL). type Name struct { miniExpr - BuiltinOp Op // uint8 - Class_ Class // uint8 - flags bitset16 - pragma PragmaFlag // int16 - sym *types.Sym - fn *Func - Offset_ int64 - val constant.Value - orig Node - embedFiles *[]string // list of embedded files, for ONAME var + BuiltinOp Op // uint8 + Class_ Class // uint8 + flags bitset16 + pragma PragmaFlag // int16 + sym *types.Sym + fn *Func + Offset_ int64 + val constant.Value + orig Node + Embed *[]Embed // list of embedded files, for ONAME var PkgName *PkgName // real package for import . names // For a local variable (not param) or extern, the initializing assignment (OAS or OAS2). @@ -139,14 +139,14 @@ type Name struct { Outer *Name } +func (n *Name) isExpr() {} + // CloneName makes a cloned copy of the name. // It's not ir.Copy(n) because in general that operation is a mistake on names, // which uniquely identify variables. // Callers must use n.CloneName to make clear they intend to create a separate name. func (n *Name) CloneName() *Name { c := *n; return &c } -func (n *Name) isExpr() {} - // NewNameAt returns a new ONAME Node associated with symbol s at position pos. // The caller is responsible for setting Curfn. func NewNameAt(pos src.XPos, sym *types.Sym) *Name { @@ -231,27 +231,6 @@ func (n *Name) Alias() bool { return n.flags&nameAlias != 0 } // SetAlias sets whether p, which must be for an OTYPE, is a type alias. func (n *Name) SetAlias(alias bool) { n.flags.set(nameAlias, alias) } -// EmbedFiles returns the list of embedded files for p, -// which must be for an ONAME var. -func (n *Name) EmbedFiles() []string { - if n.embedFiles == nil { - return nil - } - return *n.embedFiles -} - -// SetEmbedFiles sets the list of embedded files for p, -// which must be for an ONAME var. -func (n *Name) SetEmbedFiles(list []string) { - if n.embedFiles == nil && list == nil { - return - } - if n.embedFiles == nil { - n.embedFiles = new([]string) - } - *n.embedFiles = list -} - const ( nameCaptured = 1 << iota // is the variable captured by a closure nameReadonly @@ -389,6 +368,11 @@ const ( _ = uint((1 << 3) - iota) // static assert for iota <= (1 << 3) ) +type Embed struct { + Pos src.XPos + Patterns []string +} + // A Pack is an identifier referring to an imported package. type PkgName struct { miniNode diff --git a/src/embed/internal/embedtest/embed_test.go b/src/embed/internal/embedtest/embed_test.go index c6a7bea7a3312..04c23172c2840 100644 --- a/src/embed/internal/embedtest/embed_test.go +++ b/src/embed/internal/embedtest/embed_test.go @@ -73,24 +73,14 @@ func TestGlobal(t *testing.T) { testString(t, string(glass), "glass", "I can eat glass and it doesn't hurt me.\n") } -func TestLocal(t *testing.T) { - //go:embed testdata/k*.txt - var local embed.FS - testFiles(t, local, "testdata/ken.txt", "If a program is too slow, it must have a loop.\n") - - //go:embed testdata/k*.txt - var s string - testString(t, s, "local variable s", "If a program is too slow, it must have a loop.\n") - - //go:embed testdata/h*.txt - var b []byte - testString(t, string(b), "local variable b", "hello, world\n") -} +//go:embed testdata +var dir embed.FS -func TestDir(t *testing.T) { - //go:embed testdata - var all embed.FS +//go:embed testdata/* +var star embed.FS +func TestDir(t *testing.T) { + all := dir testFiles(t, all, "testdata/hello.txt", "hello, world\n") testFiles(t, all, "testdata/i/i18n.txt", "internationalization\n") testFiles(t, all, "testdata/i/j/k/k8s.txt", "kubernetes\n") @@ -103,12 +93,6 @@ func TestDir(t *testing.T) { } func TestHidden(t *testing.T) { - //go:embed testdata - var dir embed.FS - - //go:embed testdata/* - var star embed.FS - t.Logf("//go:embed testdata") testDir(t, dir, "testdata", diff --git a/src/embed/internal/embedtest/embedx_test.go b/src/embed/internal/embedtest/embedx_test.go index 20d5a28c11df8..27fa11614e926 100644 --- a/src/embed/internal/embedtest/embedx_test.go +++ b/src/embed/internal/embedtest/embedx_test.go @@ -90,17 +90,3 @@ func TestXGlobal(t *testing.T) { } bbig[0] = old } - -func TestXLocal(t *testing.T) { - //go:embed testdata/*o.txt - var local embed.FS - testFiles(t, local, "testdata/hello.txt", "hello, world\n") - - //go:embed testdata/k*.txt - var s string - testString(t, s, "local variable s", "If a program is too slow, it must have a loop.\n") - - //go:embed testdata/h*.txt - var b []byte - testString(t, string(b), "local variable b", "hello, world\n") -} From e999c1702250222b069691491d24dd5d020744de Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 21 Dec 2020 01:44:49 -0500 Subject: [PATCH 197/474] [dev.regabi] cmd/compile: separate ssa from other phases isIntrinsicCall and ssaDumpInline are the only two "forward references" to ssa by earlier phases. Make them a bit more explicit so that the uses and the definitions can end up in different packages. Change-Id: I02c7a27464fbedef9fee43c0e4094fa08b4d7a5c Reviewed-on: https://go-review.googlesource.com/c/go/+/279300 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/inl.go | 15 ++++++++++----- src/cmd/compile/internal/gc/main.go | 3 +++ src/cmd/compile/internal/gc/plive.go | 8 ++++---- src/cmd/compile/internal/gc/ssa.go | 14 ++++++++++---- src/cmd/compile/internal/gc/walk.go | 2 +- 5 files changed, 28 insertions(+), 14 deletions(-) diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 6c8f380d87318..15df2584f0db5 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -39,6 +39,9 @@ import ( "strings" ) +// IsIntrinsicCall reports whether the compiler back end will treat the call as an intrinsic operation. +var IsIntrinsicCall = func(*ir.CallExpr) bool { return false } + // Inlining budget parameters, gathered in one place const ( inlineMaxBudget = 80 @@ -339,7 +342,7 @@ func (v *hairyVisitor) doNode(n ir.Node) error { } } - if isIntrinsicCall(n) { + if IsIntrinsicCall(n) { // Treat like any other node. break } @@ -593,7 +596,7 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No if base.Flag.LowerM > 3 { fmt.Printf("%v:call to func %+v\n", ir.Line(n), call.Left()) } - if isIntrinsicCall(call) { + if IsIntrinsicCall(call) { break } if fn := inlCallee(call.Left()); fn != nil && fn.Inl != nil { @@ -768,6 +771,10 @@ func inlParam(t *types.Field, as ir.Node, inlvars map[*ir.Name]ir.Node) ir.Node var inlgen int +// SSADumpInline gives the SSA back end a chance to dump the function +// when producing output for debugging the compiler itself. +var SSADumpInline = func(*ir.Func) {} + // If n is a call node (OCALLFUNC or OCALLMETH), and fn is an ONAME node for a // function with an inlinable body, return an OINLCALL node that can replace n. // The returned node's Ninit has the parameter assignments, the Nbody is the @@ -835,9 +842,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b fmt.Printf("%v: Before inlining: %+v\n", ir.Line(n), n) } - if ssaDump != "" && ssaDump == ir.FuncName(Curfn) { - ssaDumpInlined = append(ssaDumpInlined, fn) - } + SSADumpInline(fn) ninit := n.Init() diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 45880c5cde5dd..afb47cf15de9d 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -191,6 +191,9 @@ func Main(archInit func(*Arch)) { logopt.LogJsonOption(base.Flag.JSON) } + IsIntrinsicCall = isIntrinsicCall + SSADumpInline = ssaDumpInline + ssaDump = os.Getenv("GOSSAFUNC") ssaDir = os.Getenv("GOSSADIR") if ssaDump != "" { diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go index 8e266d6599eca..77cd9c5b19758 100644 --- a/src/cmd/compile/internal/gc/plive.go +++ b/src/cmd/compile/internal/gc/plive.go @@ -1233,10 +1233,10 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) { // pointer variables in the function and emits a runtime data // structure read by the garbage collector. // Returns a map from GC safe points to their corresponding stack map index. -func liveness(e *ssafn, f *ssa.Func, pp *Progs) LivenessMap { +func liveness(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *Progs) LivenessMap { // Construct the global liveness state. - vars, idx := getvariables(e.curfn) - lv := newliveness(e.curfn, f, vars, idx, e.stkptrsize) + vars, idx := getvariables(curfn) + lv := newliveness(curfn, f, vars, idx, stkptrsize) // Run the dataflow framework. lv.prologue() @@ -1271,7 +1271,7 @@ func liveness(e *ssafn, f *ssa.Func, pp *Progs) LivenessMap { } // Emit the live pointer map data structures - ls := e.curfn.LSym + ls := curfn.LSym fninfo := ls.Func() fninfo.GCArgs, fninfo.GCLocals = lv.emit() diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index fbfed0640d844..4f4860869caa3 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -42,6 +42,12 @@ const maxOpenDefers = 8 // ssaDumpInlined holds all inlined functions when ssaDump contains a function name. var ssaDumpInlined []*ir.Func +func ssaDumpInline(fn *ir.Func) { + if ssaDump != "" && ssaDump == ir.FuncName(fn) { + ssaDumpInlined = append(ssaDumpInlined, fn) + } +} + func initssaconfig() { types_ := ssa.NewTypes() @@ -1135,7 +1141,7 @@ func (s *state) stmt(n ir.Node) { // Expression statements case ir.OCALLFUNC: n := n.(*ir.CallExpr) - if isIntrinsicCall(n) { + if IsIntrinsicCall(n) { s.intrinsicCall(n) return } @@ -1204,7 +1210,7 @@ func (s *state) stmt(n ir.Node) { case ir.OAS2FUNC: // We come here only when it is an intrinsic call returning two values. call := n.Rlist().First().(*ir.CallExpr) - if !isIntrinsicCall(call) { + if !IsIntrinsicCall(call) { s.Fatalf("non-intrinsic AS2FUNC not expanded %v", call) } v := s.intrinsicCall(call) @@ -2826,7 +2832,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { case ir.OCALLFUNC: n := n.(*ir.CallExpr) - if isIntrinsicCall(n) { + if IsIntrinsicCall(n) { return s.intrinsicCall(n) } fallthrough @@ -6375,7 +6381,7 @@ func genssa(f *ssa.Func, pp *Progs) { e := f.Frontend().(*ssafn) - s.livenessMap = liveness(e, f, pp) + s.livenessMap = liveness(e.curfn, f, e.stkptrsize, pp) emitStackObjects(e, pp) openDeferInfo := e.curfn.LSym.Func().OpenCodedDeferInfo diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 657a744e68d46..7651bbca10524 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -769,7 +769,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { walkexprlistsafe(n.List().Slice(), init) r = walkexpr(r, init) - if isIntrinsicCall(r.(*ir.CallExpr)) { + if IsIntrinsicCall(r.(*ir.CallExpr)) { n.PtrRlist().Set1(r) return n } From 1a523c8ab08e95ddfb7c50e19ddd6c73bb45daf5 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 21 Dec 2020 01:56:46 -0500 Subject: [PATCH 198/474] [dev.regabi] cmd/compile: separate nowritebarrierrec from main Main knows a bit too much about nowritebarrierrec. Abstract the API a little bit to make the package split easier. Change-Id: I4b76bdb1fed73dfb0d44e1a6c86de8c2d29a9488 Reviewed-on: https://go-review.googlesource.com/c/go/+/279301 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/dcl.go | 13 ++++++++++++- src/cmd/compile/internal/gc/main.go | 12 ++++-------- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 20e5edc4cb4ae..64b15077cdfaa 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -15,7 +15,18 @@ import ( "strings" ) -// Declaration stack & operations +func EnableNoWriteBarrierRecCheck() { + nowritebarrierrecCheck = newNowritebarrierrecChecker() +} + +func NoWriteBarrierRecCheck() { + // Write barriers are now known. Check the + // call graph. + nowritebarrierrecCheck.check() + nowritebarrierrecCheck = nil +} + +var nowritebarrierrecCheck *nowritebarrierrecChecker func testdclstack() { if !types.IsDclstackValid() { diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index afb47cf15de9d..7f7cd63cdfdde 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -57,8 +57,6 @@ var Target *ir.Package // timing data for compiler phases var timings Timings -var nowritebarrierrecCheck *nowritebarrierrecChecker - // Main parses flags and Go source files specified in the command-line // arguments, type-checks the parsed Go package, compiles functions to machine // code, and finally writes the compiled package definition to disk. @@ -382,7 +380,7 @@ func Main(archInit func(*Arch)) { // We'll do the final check after write barriers are // inserted. if base.Flag.CompilingRuntime { - nowritebarrierrecCheck = newNowritebarrierrecChecker() + EnableNoWriteBarrierRecCheck() } // Phase 7: Transform closure bodies to properly reference captured variables. @@ -422,11 +420,9 @@ func Main(archInit func(*Arch)) { compileFunctions() - if nowritebarrierrecCheck != nil { - // Write barriers are now known. Check the - // call graph. - nowritebarrierrecCheck.check() - nowritebarrierrecCheck = nil + if base.Flag.CompilingRuntime { + // Write barriers are now known. Check the call graph. + NoWriteBarrierRecCheck() } // Finalize DWARF inline routine DIEs, then explicitly turn off From 06915ac14dfb7c80f384e3446bc6fa474e6bfa94 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 19 Dec 2020 19:26:06 -0800 Subject: [PATCH 199/474] [dev.regabi] cmd/compile: move itabname call out of implements We only need to call itabname when actually creating the OCONVIFACE ops, not any time we test whether a type implements an interface. Additionally, by moving this call out of implements, we make it purely based on types, which makes it safe to move to package types. Does not pass toolstash -cmp, because it shuffles symbol creation order. Change-Id: Iea8e0c9374218f4d97b4339020ebd758d051bd03 Reviewed-on: https://go-review.googlesource.com/c/go/+/279333 Reviewed-by: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Trust: Matthew Dempsky --- src/cmd/compile/internal/gc/subr.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 2b0047e1503df..48cbd2505eaaa 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -304,6 +304,14 @@ func assignop(src, dst *types.Type) (ir.Op, string) { var missing, have *types.Field var ptr int if implements(src, dst, &missing, &have, &ptr) { + // Call itabname so that (src, dst) + // gets added to itabs early, which allows + // us to de-virtualize calls through this + // type/interface pair later. See peekitabs in reflect.go + if isdirectiface(src) && !dst.IsEmptyInterface() { + itabname(src, dst) + } + return ir.OCONVIFACE, "" } @@ -1404,14 +1412,6 @@ func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool } } - // We're going to emit an OCONVIFACE. - // Call itabname so that (t, iface) - // gets added to itabs early, which allows - // us to de-virtualize calls through this - // type/interface pair later. See peekitabs in reflect.go - if isdirectiface(t0) && !iface.IsEmptyInterface() { - itabname(t0, iface) - } return true } From cb4898a77d79f457d75f601fad6908dd85bdc772 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Fri, 18 Dec 2020 19:38:13 -0800 Subject: [PATCH 200/474] [dev.regabi] cmd/compile: simplify declaration importing Rather than creating Names w/ ONONAME earlier and later adding in the details, this CL changes the import logic to create and add details at the same time. Passes buildall w/ toolstash -cmp. Change-Id: Ifaabade3cef8cd80ddd6644bff79393b934255d9 Reviewed-on: https://go-review.googlesource.com/c/go/+/279313 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Russ Cox --- src/cmd/compile/internal/gc/export.go | 110 ++++++----------------- src/cmd/compile/internal/gc/iimport.go | 62 +++++++------ src/cmd/compile/internal/gc/typecheck.go | 13 +-- 3 files changed, 58 insertions(+), 127 deletions(-) diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index d26dd9af5d2b8..6ed4327a8fcc8 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -77,126 +77,70 @@ func dumpexport(bout *bio.Writer) { } } -func importsym(ipkg *types.Pkg, s *types.Sym, op ir.Op) *ir.Name { - n := ir.AsNode(s.PkgDef()) - if n == nil { - // iimport should have created a stub ONONAME - // declaration for all imported symbols. The exception - // is declarations for Runtimepkg, which are populated - // by loadsys instead. - if s.Pkg != Runtimepkg { - base.Fatalf("missing ONONAME for %v\n", s) - } - - n = ir.NewDeclNameAt(src.NoXPos, s) - s.SetPkgDef(n) - s.Importdef = ipkg - } - if n.Op() != ir.ONONAME && n.Op() != op { - redeclare(base.Pos, s, fmt.Sprintf("during import %q", ipkg.Path)) +func importsym(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class) *ir.Name { + if n := s.PkgDef(); n != nil { + base.Fatalf("importsym of symbol that already exists: %v", n) } - return n.(*ir.Name) + + n := ir.NewDeclNameAt(pos, s) + n.SetOp(op) // TODO(mdempsky): Add as argument to NewDeclNameAt. + n.SetClass(ctxt) + s.SetPkgDef(n) + s.Importdef = ipkg + return n } // importtype returns the named type declared by symbol s. // If no such type has been declared yet, a forward declaration is returned. // ipkg is the package being imported -func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *types.Type { - n := importsym(ipkg, s, ir.OTYPE) - if n.Op() != ir.OTYPE { - t := types.NewNamed(n) - n.SetOp(ir.OTYPE) - n.SetPos(pos) - n.SetType(t) - n.SetClass(ir.PEXTERN) - } - - t := n.Type() - if t == nil { - base.Fatalf("importtype %v", s) - } - return t +func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *ir.Name { + n := importsym(ipkg, pos, s, ir.OTYPE, ir.PEXTERN) + n.SetType(types.NewNamed(n)) + return n } // importobj declares symbol s as an imported object representable by op. // ipkg is the package being imported -func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class, t *types.Type) ir.Node { - n := importsym(ipkg, s, op) - if n.Op() != ir.ONONAME { - if n.Op() == op && (op == ir.ONAME && n.Class() != ctxt || !types.Identical(n.Type(), t)) { - redeclare(base.Pos, s, fmt.Sprintf("during import %q", ipkg.Path)) - } - return nil - } - - n.SetOp(op) - n.SetPos(pos) - n.SetClass(ctxt) +func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class, t *types.Type) *ir.Name { + n := importsym(ipkg, pos, s, op, ctxt) + n.SetType(t) if ctxt == ir.PFUNC { n.Sym().SetFunc(true) } - n.SetType(t) return n } // importconst declares symbol s as an imported constant with type t and value val. // ipkg is the package being imported -func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val constant.Value) { +func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val constant.Value) *ir.Name { n := importobj(ipkg, pos, s, ir.OLITERAL, ir.PEXTERN, t) - if n == nil { // TODO: Check that value matches. - return - } - n.SetVal(val) - - if base.Flag.E != 0 { - fmt.Printf("import const %v %L = %v\n", s, t, val) - } + return n } // importfunc declares symbol s as an imported function with type t. // ipkg is the package being imported -func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) { +func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.Name { n := importobj(ipkg, pos, s, ir.ONAME, ir.PFUNC, t) - if n == nil { - return - } - name := n.(*ir.Name) fn := ir.NewFunc(pos) fn.SetType(t) - name.SetFunc(fn) - fn.Nname = name + n.SetFunc(fn) + fn.Nname = n - if base.Flag.E != 0 { - fmt.Printf("import func %v%S\n", s, t) - } + return n } // importvar declares symbol s as an imported variable with type t. // ipkg is the package being imported -func importvar(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) { - n := importobj(ipkg, pos, s, ir.ONAME, ir.PEXTERN, t) - if n == nil { - return - } - - if base.Flag.E != 0 { - fmt.Printf("import var %v %L\n", s, t) - } +func importvar(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.Name { + return importobj(ipkg, pos, s, ir.ONAME, ir.PEXTERN, t) } // importalias declares symbol s as an imported type alias with type t. // ipkg is the package being imported -func importalias(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) { - n := importobj(ipkg, pos, s, ir.OTYPE, ir.PEXTERN, t) - if n == nil { - return - } - - if base.Flag.E != 0 { - fmt.Printf("import type %v = %L\n", s, t) - } +func importalias(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.Name { + return importobj(ipkg, pos, s, ir.OTYPE, ir.PEXTERN, t) } func dumpasmhdr() { diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 549751335e4d2..76f55a44e54ee 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -41,18 +41,23 @@ var ( inlineImporter = map[*types.Sym]iimporterAndOffset{} ) -func expandDecl(n *ir.Name) { - if n.Op() != ir.ONONAME { - return +func expandDecl(n ir.Node) ir.Node { + if n, ok := n.(*ir.Name); ok { + return n + } + + id := n.(*ir.Ident) + if n := id.Sym().PkgDef(); n != nil { + return n.(*ir.Name) } - r := importReaderFor(n, declImporter) + r := importReaderFor(id.Sym(), declImporter) if r == nil { // Can happen if user tries to reference an undeclared name. - return + return n } - r.doDecl(n) + return r.doDecl(n.Sym()) } func expandInline(fn *ir.Func) { @@ -60,7 +65,7 @@ func expandInline(fn *ir.Func) { return } - r := importReaderFor(fn.Nname, inlineImporter) + r := importReaderFor(fn.Nname.Sym(), inlineImporter) if r == nil { base.Fatalf("missing import reader for %v", fn) } @@ -68,13 +73,13 @@ func expandInline(fn *ir.Func) { r.doInline(fn) } -func importReaderFor(n *ir.Name, importers map[*types.Sym]iimporterAndOffset) *importReader { - x, ok := importers[n.Sym()] +func importReaderFor(sym *types.Sym, importers map[*types.Sym]iimporterAndOffset) *importReader { + x, ok := importers[sym] if !ok { return nil } - return x.p.newReader(x.off, n.Sym().Pkg) + return x.p.newReader(x.off, sym.Pkg) } type intReader struct { @@ -272,11 +277,7 @@ func (r *importReader) setPkg() { r.currPkg = r.pkg() } -func (r *importReader) doDecl(n ir.Node) { - if n.Op() != ir.ONONAME { - base.Fatalf("doDecl: unexpected Op for %v: %v", n.Sym(), n.Op()) - } - +func (r *importReader) doDecl(sym *types.Sym) *ir.Name { tag := r.byte() pos := r.pos() @@ -284,24 +285,26 @@ func (r *importReader) doDecl(n ir.Node) { case 'A': typ := r.typ() - importalias(r.p.ipkg, pos, n.Sym(), typ) + return importalias(r.p.ipkg, pos, sym, typ) case 'C': typ := r.typ() val := r.value(typ) - importconst(r.p.ipkg, pos, n.Sym(), typ, val) + return importconst(r.p.ipkg, pos, sym, typ, val) case 'F': typ := r.signature(nil) - importfunc(r.p.ipkg, pos, n.Sym(), typ) + n := importfunc(r.p.ipkg, pos, sym, typ) r.funcExt(n) + return n case 'T': // Types can be recursive. We need to setup a stub // declaration before recursing. - t := importtype(r.p.ipkg, pos, n.Sym()) + n := importtype(r.p.ipkg, pos, sym) + t := n.Type() // We also need to defer width calculations until // after the underlying type has been assigned. @@ -312,7 +315,7 @@ func (r *importReader) doDecl(n ir.Node) { if underlying.IsInterface() { r.typeExt(t) - break + return n } ms := make([]*types.Field, r.uint64()) @@ -339,15 +342,18 @@ func (r *importReader) doDecl(n ir.Node) { for _, m := range ms { r.methExt(m) } + return n case 'V': typ := r.typ() - importvar(r.p.ipkg, pos, n.Sym(), typ) + n := importvar(r.p.ipkg, pos, sym, typ) r.varExt(n) + return n default: base.Fatalf("unexpected tag: %v", tag) + panic("unreachable") } } @@ -433,16 +439,11 @@ func (r *importReader) ident() *types.Sym { return pkg.Lookup(name) } -func (r *importReader) qualifiedIdent() *ir.Name { +func (r *importReader) qualifiedIdent() *ir.Ident { name := r.string() pkg := r.pkg() sym := pkg.Lookup(name) - n := sym.PkgDef() - if n == nil { - n = ir.NewDeclNameAt(src.NoXPos, sym) - sym.SetPkgDef(n) - } - return n.(*ir.Name) + return ir.NewIdent(src.NoXPos, sym) } func (r *importReader) pos() src.XPos { @@ -498,10 +499,7 @@ func (r *importReader) typ1() *types.Type { // support inlining functions with local defined // types. Therefore, this must be a package-scope // type. - n := r.qualifiedIdent() - if n.Op() == ir.ONONAME { - expandDecl(n) - } + n := expandDecl(r.qualifiedIdent()) if n.Op() != ir.OTYPE { base.Fatalf("expected OTYPE, got %v: %v, %v", n.Op(), n.Sym(), n) } diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 83939fd6bff85..4fae4a0819f5c 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -8,7 +8,6 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/types" - "cmd/internal/src" "fmt" "go/constant" "go/token" @@ -97,23 +96,13 @@ func resolve(n ir.Node) (res ir.Node) { if pkgName := dotImportRefs[id]; pkgName != nil { pkgName.Used = true } - - if sym.Def == nil { - if _, ok := declImporter[sym]; !ok { - return n // undeclared name - } - sym.Def = ir.NewDeclNameAt(src.NoXPos, sym) - } - n = ir.AsNode(sym.Def) } - // Stub ir.Name left for us by iimport. - n := n.(*ir.Name) if inimport { base.Fatalf("recursive inimport") } inimport = true - expandDecl(n) + n = expandDecl(n) inimport = false return n } From 94cfeca0a5b36a70a8bdd1a0015eb78c7e9a3311 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Fri, 18 Dec 2020 20:14:45 -0800 Subject: [PATCH 201/474] [dev.regabi] cmd/compile: stop using ONONAME with Name This CL changes NewDeclNameAt to take an Op argument to set the Op up front, and updates all callers to provide the appropriate Op. This allows dropping the Name.SetOp method. Passes buildall w/ toolstash -cmp. Change-Id: I20e580f62d3c8a81223d1c162327c11b37bbf3f0 Reviewed-on: https://go-review.googlesource.com/c/go/+/279314 Trust: Matthew Dempsky Reviewed-by: Russ Cox --- src/cmd/compile/internal/gc/dcl.go | 2 -- src/cmd/compile/internal/gc/export.go | 5 ++--- src/cmd/compile/internal/gc/iimport.go | 2 +- src/cmd/compile/internal/gc/noder.go | 17 +++++++---------- src/cmd/compile/internal/gc/universe.go | 6 ++---- src/cmd/compile/internal/ir/name.go | 24 +++++++++--------------- 6 files changed, 21 insertions(+), 35 deletions(-) diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 64b15077cdfaa..04e3506dbacb6 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -141,7 +141,6 @@ func variter(vl []ir.Node, t ir.Ntype, el []ir.Node) []ir.Node { as2.PtrRlist().Set1(e) for _, v := range vl { v := v.(*ir.Name) - v.SetOp(ir.ONAME) declare(v, dclcontext) v.Ntype = t v.Defn = as2 @@ -166,7 +165,6 @@ func variter(vl []ir.Node, t ir.Ntype, el []ir.Node) []ir.Node { el = el[1:] } - v.SetOp(ir.ONAME) declare(v, dclcontext) v.Ntype = t diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index 6ed4327a8fcc8..8a8295537c397 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -82,9 +82,8 @@ func importsym(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Cl base.Fatalf("importsym of symbol that already exists: %v", n) } - n := ir.NewDeclNameAt(pos, s) - n.SetOp(op) // TODO(mdempsky): Add as argument to NewDeclNameAt. - n.SetClass(ctxt) + n := ir.NewDeclNameAt(pos, op, s) + n.SetClass(ctxt) // TODO(mdempsky): Move this into NewDeclNameAt too? s.SetPkgDef(n) s.Importdef = ipkg return n diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 76f55a44e54ee..219ce4bdef271 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -971,7 +971,7 @@ func (r *importReader) node() ir.Node { // statements case ir.ODCL: pos := r.pos() - lhs := ir.NewDeclNameAt(pos, r.ident()) + lhs := ir.NewDeclNameAt(pos, ir.ONAME, r.ident()) typ := ir.TypeNode(r.typ()) return npos(pos, liststmt(variter([]ir.Node{lhs}, typ, nil))) // TODO(gri) avoid list creation diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index ee01423833cb5..b61f19ae2e9e1 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -374,7 +374,7 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) { } func (p *noder) varDecl(decl *syntax.VarDecl) []ir.Node { - names := p.declNames(decl.NameList) + names := p.declNames(ir.ONAME, decl.NameList) typ := p.typeExprOrNil(decl.Type) var exprs []ir.Node @@ -425,7 +425,7 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []ir.Node { p.checkUnused(pragma) } - names := p.declNames(decl.NameList) + names := p.declNames(ir.OLITERAL, decl.NameList) typ := p.typeExprOrNil(decl.Type) var values []ir.Node @@ -450,8 +450,6 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []ir.Node { if decl.Values == nil { v = ir.DeepCopy(n.Pos(), v) } - - n.SetOp(ir.OLITERAL) declare(n, dclcontext) n.Ntype = typ @@ -471,8 +469,7 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []ir.Node { } func (p *noder) typeDecl(decl *syntax.TypeDecl) ir.Node { - n := p.declName(decl.Name) - n.SetOp(ir.OTYPE) + n := p.declName(ir.OTYPE, decl.Name) declare(n, dclcontext) // decl.Type may be nil but in that case we got a syntax error during parsing @@ -495,16 +492,16 @@ func (p *noder) typeDecl(decl *syntax.TypeDecl) ir.Node { return nod } -func (p *noder) declNames(names []*syntax.Name) []ir.Node { +func (p *noder) declNames(op ir.Op, names []*syntax.Name) []ir.Node { nodes := make([]ir.Node, 0, len(names)) for _, name := range names { - nodes = append(nodes, p.declName(name)) + nodes = append(nodes, p.declName(op, name)) } return nodes } -func (p *noder) declName(name *syntax.Name) *ir.Name { - return ir.NewDeclNameAt(p.pos(name), p.name(name)) +func (p *noder) declName(op ir.Op, name *syntax.Name) *ir.Name { + return ir.NewDeclNameAt(p.pos(name), op, p.name(name)) } func (p *noder) funcDecl(fun *syntax.FuncDecl) ir.Node { diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index 21ddc78089de7..c988c575dcd6e 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -97,8 +97,7 @@ func initUniverse() { defBasic := func(kind types.Kind, pkg *types.Pkg, name string) *types.Type { sym := pkg.Lookup(name) - n := ir.NewDeclNameAt(src.NoXPos, sym) - n.SetOp(ir.OTYPE) + n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, sym) t := types.NewBasic(kind, n) n.SetType(t) sym.Def = n @@ -134,8 +133,7 @@ func initUniverse() { // error type s := types.BuiltinPkg.Lookup("error") - n := ir.NewDeclNameAt(src.NoXPos, s) - n.SetOp(ir.OTYPE) + n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, s) types.ErrorType = types.NewNamed(n) types.ErrorType.SetUnderlying(makeErrorInterface()) n.SetType(types.ErrorType) diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index f5f4280fd079e..9cf959b23d792 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -164,13 +164,19 @@ func NewIota(pos src.XPos, sym *types.Sym) *Name { return newNameAt(pos, OIOTA, sym) } -// NewDeclNameAt returns a new ONONAME Node associated with symbol s at position pos. +// NewDeclNameAt returns a new Name associated with symbol s at position pos. // The caller is responsible for setting Curfn. -func NewDeclNameAt(pos src.XPos, sym *types.Sym) *Name { +func NewDeclNameAt(pos src.XPos, op Op, sym *types.Sym) *Name { if sym == nil { base.Fatalf("NewDeclNameAt nil") } - return newNameAt(pos, ONONAME, sym) + switch op { + case ONAME, OTYPE, OLITERAL: + // ok + default: + base.Fatalf("NewDeclNameAt op %v", op) + } + return newNameAt(pos, op, sym) } // newNameAt is like NewNameAt but allows sym == nil. @@ -207,18 +213,6 @@ func (*Name) CanBeNtype() {} func (*Name) CanBeAnSSASym() {} func (*Name) CanBeAnSSAAux() {} -func (n *Name) SetOp(op Op) { - if n.op != ONONAME { - base.Fatalf("%v already has Op %v", n, n.op) - } - switch op { - default: - panic(n.no("SetOp " + op.String())) - case OLITERAL, ONAME, OTYPE, OIOTA: - n.op = op - } -} - // Pragma returns the PragmaFlag for p, which must be for an OTYPE. func (n *Name) Pragma() PragmaFlag { return n.pragma } From 306b2451c849c9a5835069f317dfea851e526a00 Mon Sep 17 00:00:00 2001 From: Than McIntosh Date: Mon, 14 Dec 2020 10:03:37 -0500 Subject: [PATCH 202/474] [dev.regabi] runtime: fix ABI targets in runtime.panic{Index,Slice} shims MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix up the assembly shim routines runtime.panic{Index,Slice} and friends so that their tail calls target ABIInternal and not ABI0 functions. This is so as to ensure that these calls don't go through an ABI0->ABIInternal wrapper (which would throw off the machinery in the called routines designed to detect whether the violation happened in the runtime). Note that when the compiler starts emitting real register calls to these routines, we'll need to rewrite them to update the arg size and ensure that args are in the correct registers. For example, the current shim TEXT runtime·panicIndex(SB),NOSPLIT,$0-16 MOVQ AX, x+0(FP) MOVQ CX, y+8(FP) JMP runtime·goPanicIndex(SB) will need to change to TEXT runtime·panicIndex(SB),NOSPLIT,$0 // AX already set up properly MOVQ CX, BX // second argument expected in BX JMP runtime·goPanicIndex(SB) Change-Id: I48d1b5138fb4d229380ad12735cfaca5c50e6cc3 Reviewed-on: https://go-review.googlesource.com/c/go/+/278755 Reviewed-by: Cherry Zhang Trust: Than McIntosh --- src/runtime/asm_amd64.s | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s index 196252e1dd993..53d1f8e358714 100644 --- a/src/runtime/asm_amd64.s +++ b/src/runtime/asm_amd64.s @@ -1728,67 +1728,67 @@ TEXT runtime·debugCallPanicked(SB),NOSPLIT,$16-16 TEXT runtime·panicIndex(SB),NOSPLIT,$0-16 MOVQ AX, x+0(FP) MOVQ CX, y+8(FP) - JMP runtime·goPanicIndex(SB) + JMP runtime·goPanicIndex(SB) TEXT runtime·panicIndexU(SB),NOSPLIT,$0-16 MOVQ AX, x+0(FP) MOVQ CX, y+8(FP) - JMP runtime·goPanicIndexU(SB) + JMP runtime·goPanicIndexU(SB) TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-16 MOVQ CX, x+0(FP) MOVQ DX, y+8(FP) - JMP runtime·goPanicSliceAlen(SB) + JMP runtime·goPanicSliceAlen(SB) TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-16 MOVQ CX, x+0(FP) MOVQ DX, y+8(FP) - JMP runtime·goPanicSliceAlenU(SB) + JMP runtime·goPanicSliceAlenU(SB) TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-16 MOVQ CX, x+0(FP) MOVQ DX, y+8(FP) - JMP runtime·goPanicSliceAcap(SB) + JMP runtime·goPanicSliceAcap(SB) TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-16 MOVQ CX, x+0(FP) MOVQ DX, y+8(FP) - JMP runtime·goPanicSliceAcapU(SB) + JMP runtime·goPanicSliceAcapU(SB) TEXT runtime·panicSliceB(SB),NOSPLIT,$0-16 MOVQ AX, x+0(FP) MOVQ CX, y+8(FP) - JMP runtime·goPanicSliceB(SB) + JMP runtime·goPanicSliceB(SB) TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-16 MOVQ AX, x+0(FP) MOVQ CX, y+8(FP) - JMP runtime·goPanicSliceBU(SB) + JMP runtime·goPanicSliceBU(SB) TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-16 MOVQ DX, x+0(FP) MOVQ BX, y+8(FP) - JMP runtime·goPanicSlice3Alen(SB) + JMP runtime·goPanicSlice3Alen(SB) TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-16 MOVQ DX, x+0(FP) MOVQ BX, y+8(FP) - JMP runtime·goPanicSlice3AlenU(SB) + JMP runtime·goPanicSlice3AlenU(SB) TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-16 MOVQ DX, x+0(FP) MOVQ BX, y+8(FP) - JMP runtime·goPanicSlice3Acap(SB) + JMP runtime·goPanicSlice3Acap(SB) TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-16 MOVQ DX, x+0(FP) MOVQ BX, y+8(FP) - JMP runtime·goPanicSlice3AcapU(SB) + JMP runtime·goPanicSlice3AcapU(SB) TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-16 MOVQ CX, x+0(FP) MOVQ DX, y+8(FP) - JMP runtime·goPanicSlice3B(SB) + JMP runtime·goPanicSlice3B(SB) TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-16 MOVQ CX, x+0(FP) MOVQ DX, y+8(FP) - JMP runtime·goPanicSlice3BU(SB) + JMP runtime·goPanicSlice3BU(SB) TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-16 MOVQ AX, x+0(FP) MOVQ CX, y+8(FP) - JMP runtime·goPanicSlice3C(SB) + JMP runtime·goPanicSlice3C(SB) TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-16 MOVQ AX, x+0(FP) MOVQ CX, y+8(FP) - JMP runtime·goPanicSlice3CU(SB) + JMP runtime·goPanicSlice3CU(SB) #ifdef GOOS_android // Use the free TLS_SLOT_APP slot #2 on Android Q. From 301af2cb71d2731baa55653df67850ce85032e16 Mon Sep 17 00:00:00 2001 From: Than McIntosh Date: Wed, 16 Dec 2020 13:45:48 -0500 Subject: [PATCH 203/474] [dev.regabi] runtime/race: adjust test pattern match for ABI wrapper Adjust the pattern matching in one of the race output test to allow for the possible introduction of an ABI wrapper. Normally for tests that match traceback output wrappers are not an issue since they are screened out by Go's traceback mechanism, but in this case the race runtime is doing the unwinding, so the wrapper may be visible. Change-Id: I45413b5c4701d4c28cc760fccc8203493dbe2874 Reviewed-on: https://go-review.googlesource.com/c/go/+/278756 Run-TryBot: Than McIntosh TryBot-Result: Go Bot Reviewed-by: Cherry Zhang Trust: Than McIntosh --- src/runtime/race/output_test.go | 82 +++++++++++++++++++++++---------- 1 file changed, 58 insertions(+), 24 deletions(-) diff --git a/src/runtime/race/output_test.go b/src/runtime/race/output_test.go index 69496874c64aa..17dc32013f3ee 100644 --- a/src/runtime/race/output_test.go +++ b/src/runtime/race/output_test.go @@ -7,6 +7,7 @@ package race_test import ( + "fmt" "internal/testenv" "os" "os/exec" @@ -71,9 +72,24 @@ func TestOutput(t *testing.T) { "GORACE="+test.gorace, ) got, _ := cmd.CombinedOutput() - if !regexp.MustCompile(test.re).MatchString(string(got)) { - t.Fatalf("failed test case %v, expect:\n%v\ngot:\n%s", - test.name, test.re, got) + matched := false + for _, re := range test.re { + if regexp.MustCompile(re).MatchString(string(got)) { + matched = true + break + } + } + if !matched { + exp := fmt.Sprintf("expect:\n%v\n", test.re[0]) + if len(test.re) > 1 { + exp = fmt.Sprintf("expected one of %d patterns:\n", + len(test.re)) + for k, re := range test.re { + exp += fmt.Sprintf("pattern %d:\n%v\n", k, re) + } + } + t.Fatalf("failed test case %v, %sgot:\n%s", + test.name, exp, got) } } } @@ -84,7 +100,7 @@ var tests = []struct { goos string gorace string source string - re string + re []string }{ {"simple", "run", "", "atexit_sleep_ms=0", ` package main @@ -107,7 +123,7 @@ func racer(x *int, done chan bool) { store(x, 42) done <- true } -`, `================== +`, []string{`================== WARNING: DATA RACE Write at 0x[0-9,a-f]+ by goroutine [0-9]: main\.store\(\) @@ -129,7 +145,7 @@ Goroutine [0-9] \(running\) created at: ================== Found 1 data race\(s\) exit status 66 -`}, +`}}, {"exitcode", "run", "", "atexit_sleep_ms=0 exitcode=13", ` package main @@ -143,7 +159,7 @@ func main() { x = 43 <-done } -`, `exit status 13`}, +`, []string{`exit status 13`}}, {"strip_path_prefix", "run", "", "atexit_sleep_ms=0 strip_path_prefix=/main.", ` package main @@ -157,9 +173,9 @@ func main() { x = 43 <-done } -`, ` +`, []string{` go:7 \+0x[0-9,a-f]+ -`}, +`}}, {"halt_on_error", "run", "", "atexit_sleep_ms=0 halt_on_error=1", ` package main @@ -173,10 +189,10 @@ func main() { x = 43 <-done } -`, ` +`, []string{` ================== exit status 66 -`}, +`}}, {"test_fails_on_race", "test", "", "atexit_sleep_ms=0", ` package main_test @@ -193,12 +209,12 @@ func TestFail(t *testing.T) { <-done t.Log(t.Failed()) } -`, ` +`, []string{` ================== --- FAIL: TestFail \(0...s\) .*main_test.go:14: true .*testing.go:.*: race detected during execution of test -FAIL`}, +FAIL`}}, {"slicebytetostring_pc", "run", "", "atexit_sleep_ms=0", ` package main @@ -211,11 +227,11 @@ func main() { data[0] = 1 <-done } -`, ` +`, []string{` runtime\.slicebytetostring\(\) .*/runtime/string\.go:.* main\.main\.func1\(\) - .*/main.go:7`}, + .*/main.go:7`}}, // Test for https://golang.org/issue/33309 {"midstack_inlining_traceback", "run", "linux", "atexit_sleep_ms=0", ` @@ -241,7 +257,7 @@ func g(c chan int) { func h(c chan int) { c <- x } -`, `================== +`, []string{`================== WARNING: DATA RACE Read at 0x[0-9,a-f]+ by goroutine [0-9]: main\.h\(\) @@ -261,7 +277,7 @@ Goroutine [0-9] \(running\) created at: ================== Found 1 data race\(s\) exit status 66 -`}, +`}}, // Test for https://golang.org/issue/17190 {"external_cgo_thread", "run", "linux", "atexit_sleep_ms=0", ` @@ -300,7 +316,25 @@ func main() { racy++ <- done } -`, `================== +`, []string{`================== +WARNING: DATA RACE +Read at 0x[0-9,a-f]+ by main goroutine: + main\.main\(\) + .*/main\.go:34 \+0x[0-9,a-f]+ + +Previous write at 0x[0-9,a-f]+ by goroutine [0-9]: + main\.goCallback\(\) + .*/main\.go:27 \+0x[0-9,a-f]+ + _cgoexp_[0-9a-z]+_goCallback\(\) + .*_cgo_gotypes\.go:[0-9]+ \+0x[0-9,a-f]+ + _cgoexp_[0-9a-z]+_goCallback\(\) + :1 \+0x[0-9,a-f]+ + +Goroutine [0-9] \(running\) created at: + runtime\.newextram\(\) + .*/runtime/proc.go:[0-9]+ \+0x[0-9,a-f]+ +==================`, + `================== WARNING: DATA RACE Read at 0x[0-9,a-f]+ by .*: main\..* @@ -313,7 +347,7 @@ Previous write at 0x[0-9,a-f]+ by .*: Goroutine [0-9] \(running\) created at: runtime\.newextram\(\) .*/runtime/proc.go:[0-9]+ \+0x[0-9,a-f]+ -==================`}, +==================`}}, {"second_test_passes", "test", "", "atexit_sleep_ms=0", ` package main_test import "testing" @@ -331,11 +365,11 @@ func TestFail(t *testing.T) { func TestPass(t *testing.T) { } -`, ` +`, []string{` ================== --- FAIL: TestFail \(0...s\) .*testing.go:.*: race detected during execution of test -FAIL`}, +FAIL`}}, {"mutex", "run", "", "atexit_sleep_ms=0", ` package main import ( @@ -366,7 +400,7 @@ func main() { } wg.Wait() if (data == iterations*(threads+1)) { fmt.Println("pass") } -}`, `pass`}, +}`, []string{`pass`}}, // Test for https://github.com/golang/go/issues/37355 {"chanmm", "run", "", "atexit_sleep_ms=0", ` package main @@ -395,7 +429,7 @@ func main() { wg.Wait() _ = data } -`, `================== +`, []string{`================== WARNING: DATA RACE Write at 0x[0-9,a-f]+ by goroutine [0-9]: main\.main\.func2\(\) @@ -408,5 +442,5 @@ Previous write at 0x[0-9,a-f]+ by main goroutine: Goroutine [0-9] \(running\) created at: main\.main\(\) .*/main.go:[0-9]+ \+0x[0-9,a-f]+ -==================`}, +==================`}}, } From 2755361e6abfd3a58acd5f7ebbcd05c23bc8261a Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Fri, 18 Dec 2020 20:49:50 -0800 Subject: [PATCH 204/474] [dev.regabi] cmd/compile: change noder.declNames to returns ir.Names declNames always returns a slice of *ir.Names, so return that directly rather than as []ir.Node. While here, also change iimport to directly create ir.ODCL/ir.OAS statements, rather than calling variter. Allows eliminating a use of ir.TypeNode. Passes buildall w/ toolstash -cmp. Change-Id: Icb75e993c4957b6050c797ba64ee71cfb7a19644 Reviewed-on: https://go-review.googlesource.com/c/go/+/279315 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Trust: Matthew Dempsky Reviewed-by: Russ Cox --- src/cmd/compile/internal/gc/dcl.go | 20 ++++++++------------ src/cmd/compile/internal/gc/embed.go | 4 ++-- src/cmd/compile/internal/gc/iimport.go | 10 ++++++++-- src/cmd/compile/internal/gc/noder.go | 5 ++--- 4 files changed, 20 insertions(+), 19 deletions(-) diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 04e3506dbacb6..09d2e7d8b78c7 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -130,17 +130,16 @@ func declare(n *ir.Name, ctxt ir.Class) { // declare variables from grammar // new_name_list (type | [type] = expr_list) -func variter(vl []ir.Node, t ir.Ntype, el []ir.Node) []ir.Node { +func variter(vl []*ir.Name, t ir.Ntype, el []ir.Node) []ir.Node { var init []ir.Node doexpr := len(el) > 0 if len(el) == 1 && len(vl) > 1 { e := el[0] as2 := ir.Nod(ir.OAS2, nil, nil) - as2.PtrList().Set(vl) as2.PtrRlist().Set1(e) for _, v := range vl { - v := v.(*ir.Name) + as2.PtrList().Append(v) declare(v, dclcontext) v.Ntype = t v.Defn = as2 @@ -152,17 +151,14 @@ func variter(vl []ir.Node, t ir.Ntype, el []ir.Node) []ir.Node { return append(init, as2) } - nel := len(el) - for _, v := range vl { - v := v.(*ir.Name) + for i, v := range vl { var e ir.Node if doexpr { - if len(el) == 0 { - base.Errorf("assignment mismatch: %d variables but %d values", len(vl), nel) + if i >= len(el) { + base.Errorf("assignment mismatch: %d variables but %d values", len(vl), len(el)) break } - e = el[0] - el = el[1:] + e = el[i] } declare(v, dclcontext) @@ -180,8 +176,8 @@ func variter(vl []ir.Node, t ir.Ntype, el []ir.Node) []ir.Node { } } - if len(el) != 0 { - base.Errorf("assignment mismatch: %d variables but %d values", len(vl), nel) + if len(el) > len(vl) { + base.Errorf("assignment mismatch: %d variables but %d values", len(vl), len(el)) } return init } diff --git a/src/cmd/compile/internal/gc/embed.go b/src/cmd/compile/internal/gc/embed.go index 0d4ce83716458..ea23e26069444 100644 --- a/src/cmd/compile/internal/gc/embed.go +++ b/src/cmd/compile/internal/gc/embed.go @@ -24,7 +24,7 @@ const ( embedFiles ) -func varEmbed(p *noder, names []ir.Node, typ ir.Ntype, exprs []ir.Node, embeds []PragmaEmbed) (newExprs []ir.Node) { +func varEmbed(p *noder, names []*ir.Name, typ ir.Ntype, exprs []ir.Node, embeds []PragmaEmbed) (newExprs []ir.Node) { haveEmbed := false for _, decl := range p.file.DeclList { imp, ok := decl.(*syntax.ImportDecl) @@ -66,7 +66,7 @@ func varEmbed(p *noder, names []ir.Node, typ ir.Ntype, exprs []ir.Node, embeds [ return exprs } - v := names[0].(*ir.Name) + v := names[0] Target.Embeds = append(Target.Embeds, v) v.Embed = new([]ir.Embed) for _, e := range embeds { diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 219ce4bdef271..cd66d39b66792 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -972,8 +972,14 @@ func (r *importReader) node() ir.Node { case ir.ODCL: pos := r.pos() lhs := ir.NewDeclNameAt(pos, ir.ONAME, r.ident()) - typ := ir.TypeNode(r.typ()) - return npos(pos, liststmt(variter([]ir.Node{lhs}, typ, nil))) // TODO(gri) avoid list creation + lhs.SetType(r.typ()) + + declare(lhs, ir.PAUTO) + + var stmts ir.Nodes + stmts.Append(ir.Nod(ir.ODCL, lhs, nil)) + stmts.Append(ir.Nod(ir.OAS, lhs, nil)) + return npos(pos, liststmt(stmts.Slice())) // case OAS, OASWB: // unreachable - mapped to OAS case below by exporter diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index b61f19ae2e9e1..97a9ac4396194 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -441,7 +441,6 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []ir.Node { nn := make([]ir.Node, 0, len(names)) for i, n := range names { - n := n.(*ir.Name) if i >= len(values) { base.Errorf("missing value in const declaration") break @@ -492,8 +491,8 @@ func (p *noder) typeDecl(decl *syntax.TypeDecl) ir.Node { return nod } -func (p *noder) declNames(op ir.Op, names []*syntax.Name) []ir.Node { - nodes := make([]ir.Node, 0, len(names)) +func (p *noder) declNames(op ir.Op, names []*syntax.Name) []*ir.Name { + nodes := make([]*ir.Name, 0, len(names)) for _, name := range names { nodes = append(nodes, p.declName(op, name)) } From 3512cde10ac5e466527d69313b8250b2ea0146b1 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Thu, 17 Dec 2020 18:47:26 -0800 Subject: [PATCH 205/474] [dev.regabi] cmd/compile: stop reusing Ntype for OSLICELIT length For OSLICELITs, we were reusing the Ntype field after type checking to hold the length of the OSLICELIT's backing array. However, Ntype is only meant for nodes that can represent types. Today, this works only because we currently use Name for all OLITERAL constants (whether declared or not), whereas we should be able to represent them more compactly with a dedicated type that doesn't implement Ntype. Passes buildall w/ toolstash -cmp. Change-Id: I385f1d787c41b016f507a5bad9489d59ccfde7f2 Reviewed-on: https://go-review.googlesource.com/c/go/+/279152 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Trust: Matthew Dempsky Reviewed-by: Russ Cox --- src/cmd/compile/internal/gc/inl.go | 2 +- src/cmd/compile/internal/gc/order.go | 2 +- src/cmd/compile/internal/gc/sinit.go | 18 +++++++++--------- src/cmd/compile/internal/gc/typecheck.go | 3 ++- src/cmd/compile/internal/ir/expr.go | 1 + 5 files changed, 14 insertions(+), 12 deletions(-) diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 33a309db87bd8..5ada83b715b88 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -452,7 +452,7 @@ func (v *hairyVisitor) doNode(n ir.Node) error { // and don't charge for the OBLOCK itself. The ++ undoes the -- below. v.budget++ - case ir.OCALLPART: + case ir.OCALLPART, ir.OSLICELIT: v.budget-- // Hack for toolstash -cmp. } diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 9c03a5843c402..1a0f0066d02e4 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -1281,7 +1281,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { n := n.(*ir.CompLitExpr) o.exprList(n.List()) if n.Transient() { - t := types.NewArray(n.Type().Elem(), ir.Int64Val(n.Right())) + t := types.NewArray(n.Type().Elem(), n.Len) n.Prealloc = o.newTemp(t, false) } return n diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 7b710fd511eaa..a845bc5d75a2b 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -142,8 +142,9 @@ func (s *InitSchedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *type } case ir.OSLICELIT: + r := r.(*ir.CompLitExpr) // copy slice - slicesym(l, loff, s.inittemps[r], ir.Int64Val(r.Right())) + slicesym(l, loff, s.inittemps[r], r.Len) return true case ir.OARRAYLIT, ir.OSTRUCTLIT: @@ -232,14 +233,14 @@ func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *type } case ir.OSLICELIT: + r := r.(*ir.CompLitExpr) s.initplan(r) // Init slice. - bound := ir.Int64Val(r.Right()) - ta := types.NewArray(r.Type().Elem(), bound) + ta := types.NewArray(r.Type().Elem(), r.Len) ta.SetNoalg(true) a := staticname(ta) s.inittemps[r] = a - slicesym(l, loff, a, bound) + slicesym(l, loff, a, r.Len) // Fall through to init underlying array. l = a loff = 0 @@ -425,10 +426,11 @@ func getdyn(n ir.Node, top bool) initGenType { return initDynamic case ir.OSLICELIT: + n := n.(*ir.CompLitExpr) if !top { return initDynamic } - if ir.Int64Val(n.Right())/4 > int64(n.List().Len()) { + if n.Len/4 > int64(n.List().Len()) { // <25% of entries have explicit values. // Very rough estimation, it takes 4 bytes of instructions // to initialize 1 byte of result. So don't use a static @@ -603,14 +605,12 @@ func isSmallSliceLit(n *ir.CompLitExpr) bool { return false } - r := n.Right() - - return smallintconst(r) && (n.Type().Elem().Width == 0 || ir.Int64Val(r) <= smallArrayBytes/n.Type().Elem().Width) + return n.Type().Elem().Width == 0 || n.Len <= smallArrayBytes/n.Type().Elem().Width } func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) { // make an array type corresponding the number of elements we have - t := types.NewArray(n.Type().Elem(), ir.Int64Val(n.Right())) + t := types.NewArray(n.Type().Elem(), n.Len) dowidth(t) if ctxt == inNonInitFunction { diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 4fae4a0819f5c..2d383ab49e974 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -2850,7 +2850,8 @@ func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) { case types.TSLICE: length := typecheckarraylit(t.Elem(), -1, n.List().Slice(), "slice literal") n.SetOp(ir.OSLICELIT) - n.SetRight(nodintconst(length)) + n.SetRight(nil) + n.Len = length case types.TMAP: var cs constSet diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 8f43eb0fb21cd..d74e7f8763d14 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -294,6 +294,7 @@ type CompLitExpr struct { Ntype Ntype List_ Nodes // initialized values Prealloc *Name + Len int64 // backing array length for OSLICELIT } func NewCompLitExpr(pos src.XPos, op Op, typ Ntype, list []Node) *CompLitExpr { From c8610e4700bee51898197987de5335b8527079e8 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Thu, 17 Dec 2020 20:17:04 -0800 Subject: [PATCH 206/474] [dev.regabi] cmd/compile: add ir.BasicLit to represent literals This CL changes so that all literals are represented with a new, smaller ir.BasicLit type, so that ir.Name is only used to represent declared constants. Passes buildall w/ toolstash -cmp. Change-Id: I4702b8e3fa945617bd05881d7a2be1205f229633 Reviewed-on: https://go-review.googlesource.com/c/go/+/279153 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Trust: Matthew Dempsky Reviewed-by: Russ Cox --- src/cmd/compile/internal/gc/universe.go | 9 +++------ src/cmd/compile/internal/ir/expr.go | 19 +++++++++++++++++++ src/cmd/compile/internal/ir/name.go | 11 +++++++++++ src/cmd/compile/internal/ir/node_gen.go | 15 +++++++++++++++ src/cmd/compile/internal/ir/val.go | 7 +------ 5 files changed, 49 insertions(+), 12 deletions(-) diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index c988c575dcd6e..e11c0eb92c773 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -11,6 +11,7 @@ import ( "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/src" + "go/constant" ) var basicTypes = [...]struct { @@ -163,14 +164,10 @@ func initUniverse() { } s = types.BuiltinPkg.Lookup("true") - b := nodbool(true) - b.(*ir.Name).SetSym(lookup("true")) - s.Def = b + s.Def = ir.NewConstAt(src.NoXPos, s, types.UntypedBool, constant.MakeBool(true)) s = types.BuiltinPkg.Lookup("false") - b = nodbool(false) - b.(*ir.Name).SetSym(lookup("false")) - s.Def = b + s.Def = ir.NewConstAt(src.NoXPos, s, types.UntypedBool, constant.MakeBool(false)) s = lookup("_") types.BlankSym = s diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index d74e7f8763d14..5937798bd4be2 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -136,6 +136,25 @@ func (n *AddrExpr) SetOp(op Op) { } } +// A BasicLit is a literal of basic type. +type BasicLit struct { + miniExpr + val constant.Value +} + +func NewBasicLit(pos src.XPos, val constant.Value) Node { + n := &BasicLit{val: val} + n.op = OLITERAL + n.pos = pos + if k := val.Kind(); k != constant.Unknown { + n.SetType(idealType(k)) + } + return n +} + +func (n *BasicLit) Val() constant.Value { return n.val } +func (n *BasicLit) SetVal(val constant.Value) { n.val = val } + // A BinaryExpr is a binary expression X Op Y, // or Op(X, Y) for builtin functions that do not become calls. type BinaryExpr struct { diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 9cf959b23d792..b0b33cccfaefb 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -179,6 +179,17 @@ func NewDeclNameAt(pos src.XPos, op Op, sym *types.Sym) *Name { return newNameAt(pos, op, sym) } +// NewConstAt returns a new OLITERAL Node associated with symbol s at position pos. +func NewConstAt(pos src.XPos, sym *types.Sym, typ *types.Type, val constant.Value) *Name { + if sym == nil { + base.Fatalf("NewConstAt nil") + } + n := newNameAt(pos, OLITERAL, sym) + n.SetType(typ) + n.SetVal(val) + return n +} + // newNameAt is like NewNameAt but allows sym == nil. func newNameAt(pos src.XPos, op Op, sym *types.Sym) *Name { n := new(Name) diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index a0fae2b949604..a5959ea26f39e 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -116,6 +116,21 @@ func (n *AssignStmt) editChildren(edit func(Node) Node) { n.Y = maybeEdit(n.Y, edit) } +func (n *BasicLit) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *BasicLit) copy() Node { + c := *n + c.init = c.init.Copy() + return &c +} +func (n *BasicLit) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + return err +} +func (n *BasicLit) editChildren(edit func(Node) Node) { + editList(n.init, edit) +} + func (n *BinaryExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *BinaryExpr) copy() Node { c := *n diff --git a/src/cmd/compile/internal/ir/val.go b/src/cmd/compile/internal/ir/val.go index 5b0506c0d04fc..ff45f31074e1c 100644 --- a/src/cmd/compile/internal/ir/val.go +++ b/src/cmd/compile/internal/ir/val.go @@ -92,12 +92,7 @@ func ValidTypeForConst(t *types.Type, v constant.Value) bool { // nodlit returns a new untyped constant with value v. func NewLiteral(v constant.Value) Node { - n := newNameAt(base.Pos, OLITERAL, nil) - if k := v.Kind(); k != constant.Unknown { - n.SetType(idealType(k)) - n.SetVal(v) - } - return n + return NewBasicLit(base.Pos, v) } func idealType(ct constant.Kind) *types.Type { From cb28c96be8b8010dd979e0723bf5a94b11962a93 Mon Sep 17 00:00:00 2001 From: Than McIntosh Date: Thu, 24 Sep 2020 13:14:46 -0400 Subject: [PATCH 207/474] [dev.regabi] cmd/compile,cmd/link: initial support for ABI wrappers Add compiler support for emitting ABI wrappers by creating real IR as opposed to introducing ABI aliases. At the moment these are "no-op" wrappers in the sense that they make a simple call (using the existing ABI) to their target. The assumption here is that once late call expansion can handle both ABI0 and the "new" ABIInternal (register version), it can expand the call to do the right thing. Note that the runtime contains functions that do not strictly follow the rules of the current Go ABI0; this has been handled in most cases by treating these as ABIInternal instead (these changes have been made in previous patches). Generation of ABI wrappers (as opposed to ABI aliases) is currently gated by GOEXPERIMENT=regabi -- wrapper generation is on by default if GOEXPERIMENT=regabi is set and off otherwise (but can be turned on using "-gcflags=all=-abiwrap -ldflags=-abiwrap"). Wrapper generation currently only workd on AMD64; explicitly enabling wrapper for other architectures (via the command line) is not supported. Also in this patch are a few other command line options for debugging (tracing and/or limiting wrapper creation). These will presumably go away at some point. Updates #27539, #40724. Change-Id: I1ee3226fc15a3c32ca2087b8ef8e41dbe6df4a75 Reviewed-on: https://go-review.googlesource.com/c/go/+/270863 Run-TryBot: Than McIntosh TryBot-Result: Go Bot Reviewed-by: Cherry Zhang Trust: Than McIntosh --- src/cmd/compile/internal/base/debug.go | 1 + src/cmd/compile/internal/base/flag.go | 3 + src/cmd/compile/internal/gc/gsubr.go | 191 ++++++++++++++++++++---- src/cmd/compile/internal/gc/main.go | 23 +++ src/cmd/compile/internal/gc/pgen.go | 7 +- src/cmd/compile/internal/gc/racewalk.go | 2 +- src/cmd/compile/internal/gc/ssa.go | 49 +++++- src/cmd/compile/internal/types/sym.go | 17 +++ src/cmd/internal/obj/link.go | 6 + src/cmd/internal/obj/plist.go | 6 + src/cmd/internal/obj/textflag.go | 3 + src/cmd/internal/obj/x86/obj6.go | 4 +- src/cmd/link/internal/ld/main.go | 12 +- src/cmd/link/internal/ld/symtab.go | 37 ++++- src/runtime/textflag.h | 2 + test/nosplit.go | 9 +- 16 files changed, 328 insertions(+), 44 deletions(-) diff --git a/src/cmd/compile/internal/base/debug.go b/src/cmd/compile/internal/base/debug.go index 45a552a4d95e3..3acdcea8463c0 100644 --- a/src/cmd/compile/internal/base/debug.go +++ b/src/cmd/compile/internal/base/debug.go @@ -51,6 +51,7 @@ type DebugFlags struct { TypeAssert int `help:"print information about type assertion inlining"` TypecheckInl int `help:"eager typechecking of inline function bodies"` WB int `help:"print information about write barriers"` + ABIWrap int `help:"print information about ABI wrapper generation"` any bool // set when any of the values have been set } diff --git a/src/cmd/compile/internal/base/flag.go b/src/cmd/compile/internal/base/flag.go index aadc70f49645f..ce87ff730eaa7 100644 --- a/src/cmd/compile/internal/base/flag.go +++ b/src/cmd/compile/internal/base/flag.go @@ -81,6 +81,8 @@ type CmdFlags struct { CompilingRuntime bool "flag:\"+\" help:\"compiling runtime\"" // Longer names + ABIWrap bool "help:\"enable generation of ABI wrappers\"" + ABIWrapLimit int "help:\"emit at most N ABI wrappers (for debugging)\"" AsmHdr string "help:\"write assembly header to `file`\"" Bench string "help:\"append benchmark times to `file`\"" BlockProfile string "help:\"write block profile to `file`\"" @@ -140,6 +142,7 @@ func ParseFlags() { Flag.LowerP = &Ctxt.Pkgpath Flag.LowerV = &Ctxt.Debugvlog + Flag.ABIWrap = objabi.Regabi_enabled != 0 Flag.Dwarf = objabi.GOARCH != "wasm" Flag.DwarfBASEntries = &Ctxt.UseBASEntries Flag.DwarfLocationLists = &Ctxt.Flag_locationlists diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go index ddb431d5abf68..f3ef14c99b771 100644 --- a/src/cmd/compile/internal/gc/gsubr.go +++ b/src/cmd/compile/internal/gc/gsubr.go @@ -34,9 +34,12 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/ssa" + "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/objabi" "cmd/internal/src" + "fmt" + "os" ) var sharedProgArray = new([10000]obj.Prog) // *T instead of T to work around issue 19839 @@ -187,32 +190,154 @@ func (pp *Progs) settext(fn *ir.Func) { ptxt.From.Sym = fn.LSym } +// makeABIWrapper creates a new function that wraps a cross-ABI call +// to "f". The wrapper is marked as an ABIWRAPPER. +func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { + + // Q: is this needed? + savepos := base.Pos + savedclcontext := dclcontext + savedcurfn := Curfn + + base.Pos = autogeneratedPos + dclcontext = ir.PEXTERN + + // At the moment we don't support wrapping a method, we'd need machinery + // below to handle the receiver. Panic if we see this scenario. + ft := f.Nname.Ntype.Type() + if ft.NumRecvs() != 0 { + panic("makeABIWrapper support for wrapping methods not implemented") + } + + // Manufacture a new func type to use for the wrapper. + var noReceiver *ir.Field + tfn := ir.NewFuncType(base.Pos, + noReceiver, + structargs(ft.Params(), true), + structargs(ft.Results(), false)) + + // Reuse f's types.Sym to create a new ODCLFUNC/function. + fn := dclfunc(f.Nname.Sym(), tfn) + fn.SetDupok(true) + fn.SetWrapper(true) // ignore frame for panic+recover matching + + // Select LSYM now. + asym := base.Ctxt.LookupABI(f.LSym.Name, wrapperABI) + asym.Type = objabi.STEXT + if fn.LSym != nil { + panic("unexpected") + } + fn.LSym = asym + + // ABI0-to-ABIInternal wrappers will be mainly loading params from + // stack into registers (and/or storing stack locations back to + // registers after the wrapped call); in most cases they won't + // need to allocate stack space, so it should be OK to mark them + // as NOSPLIT in these cases. In addition, my assumption is that + // functions written in assembly are NOSPLIT in most (but not all) + // cases. In the case of an ABIInternal target that has too many + // parameters to fit into registers, the wrapper would need to + // allocate stack space, but this seems like an unlikely scenario. + // Hence: mark these wrappers NOSPLIT. + // + // ABIInternal-to-ABI0 wrappers on the other hand will be taking + // things in registers and pushing them onto the stack prior to + // the ABI0 call, meaning that they will always need to allocate + // stack space. If the compiler marks them as NOSPLIT this seems + // as though it could lead to situations where the the linker's + // nosplit-overflow analysis would trigger a link failure. On the + // other hand if they not tagged NOSPLIT then this could cause + // problems when building the runtime (since there may be calls to + // asm routine in cases where it's not safe to grow the stack). In + // most cases the wrapper would be (in effect) inlined, but are + // there (perhaps) indirect calls from the runtime that could run + // into trouble here. + // FIXME: at the moment all.bash does not pass when I leave out + // NOSPLIT for these wrappers, so all are currently tagged with NOSPLIT. + setupTextLSym(fn, obj.NOSPLIT|obj.ABIWRAPPER) + + // Generate call. Use tail call if no params and no returns, + // but a regular call otherwise. + // + // Note: ideally we would be using a tail call in cases where + // there are params but no returns for ABI0->ABIInternal wrappers, + // provided that all params fit into registers (e.g. we don't have + // to allocate any stack space). Doing this will require some + // extra work in typecheck/walk/ssa, might want to add a new node + // OTAILCALL or something to this effect. + var call ir.Node + if tfn.Type().NumResults() == 0 && tfn.Type().NumParams() == 0 && tfn.Type().NumRecvs() == 0 { + call = nodSym(ir.ORETJMP, nil, f.Nname.Sym()) + } else { + call = ir.Nod(ir.OCALL, f.Nname, nil) + call.PtrList().Set(paramNnames(tfn.Type())) + call.SetIsDDD(tfn.Type().IsVariadic()) + if tfn.Type().NumResults() > 0 { + n := ir.Nod(ir.ORETURN, nil, nil) + n.PtrList().Set1(call) + call = n + } + } + fn.PtrBody().Append(call) + + funcbody() + if base.Debug.DclStack != 0 { + testdclstack() + } + + typecheckFunc(fn) + Curfn = fn + typecheckslice(fn.Body().Slice(), ctxStmt) + + escapeFuncs([]*ir.Func{fn}, false) + + Target.Decls = append(Target.Decls, fn) + + // Restore previous context. + base.Pos = savepos + dclcontext = savedclcontext + Curfn = savedcurfn +} + // initLSym defines f's obj.LSym and initializes it based on the // properties of f. This includes setting the symbol flags and ABI and // creating and initializing related DWARF symbols. // // initLSym must be called exactly once per function and must be // called for both functions with bodies and functions without bodies. +// For body-less functions, we only create the LSym; for functions +// with bodies call a helper to setup up / populate the LSym. func initLSym(f *ir.Func, hasBody bool) { + // FIXME: for new-style ABI wrappers, we set up the lsym at the + // point the wrapper is created. + if f.LSym != nil && base.Flag.ABIWrap { + return + } + selectLSym(f, hasBody) + if hasBody { + setupTextLSym(f, 0) + } +} + +// selectLSym sets up the LSym for a given function, and +// makes calls to helpers to create ABI wrappers if needed. +func selectLSym(f *ir.Func, hasBody bool) { if f.LSym != nil { base.Fatalf("Func.initLSym called twice") } if nam := f.Nname; !ir.IsBlank(nam) { - f.LSym = nam.Sym().Linksym() - if f.Pragma&ir.Systemstack != 0 { - f.LSym.Set(obj.AttrCFunc, true) - } - var aliasABI obj.ABI - needABIAlias := false - defABI, hasDefABI := symabiDefs[f.LSym.Name] + var wrapperABI obj.ABI + needABIWrapper := false + defABI, hasDefABI := symabiDefs[nam.Sym().LinksymName()] if hasDefABI && defABI == obj.ABI0 { // Symbol is defined as ABI0. Create an // Internal -> ABI0 wrapper. - f.LSym.SetABI(obj.ABI0) - needABIAlias, aliasABI = true, obj.ABIInternal + f.LSym = nam.Sym().LinksymABI0() + needABIWrapper, wrapperABI = true, obj.ABIInternal } else { + f.LSym = nam.Sym().Linksym() // No ABI override. Check that the symbol is // using the expected ABI. want := obj.ABIInternal @@ -220,6 +345,9 @@ func initLSym(f *ir.Func, hasBody bool) { base.Fatalf("function symbol %s has the wrong ABI %v, expected %v", f.LSym.Name, f.LSym.ABI(), want) } } + if f.Pragma&ir.Systemstack != 0 { + f.LSym.Set(obj.AttrCFunc, true) + } isLinknameExported := nam.Sym().Linkname != "" && (hasBody || hasDefABI) if abi, ok := symabiRefs[f.LSym.Name]; (ok && abi == obj.ABI0) || isLinknameExported { @@ -235,32 +363,39 @@ func initLSym(f *ir.Func, hasBody bool) { // using linkname and we don't want to create // duplicate ABI wrappers. if f.LSym.ABI() != obj.ABI0 { - needABIAlias, aliasABI = true, obj.ABI0 + needABIWrapper, wrapperABI = true, obj.ABI0 } } - if needABIAlias { - // These LSyms have the same name as the - // native function, so we create them directly - // rather than looking them up. The uniqueness - // of f.lsym ensures uniqueness of asym. - asym := &obj.LSym{ - Name: f.LSym.Name, - Type: objabi.SABIALIAS, - R: []obj.Reloc{{Sym: f.LSym}}, // 0 size, so "informational" + if needABIWrapper { + if !useABIWrapGen(f) { + // Fallback: use alias instead. FIXME. + + // These LSyms have the same name as the + // native function, so we create them directly + // rather than looking them up. The uniqueness + // of f.lsym ensures uniqueness of asym. + asym := &obj.LSym{ + Name: f.LSym.Name, + Type: objabi.SABIALIAS, + R: []obj.Reloc{{Sym: f.LSym}}, // 0 size, so "informational" + } + asym.SetABI(wrapperABI) + asym.Set(obj.AttrDuplicateOK, true) + base.Ctxt.ABIAliases = append(base.Ctxt.ABIAliases, asym) + } else { + if base.Debug.ABIWrap != 0 { + fmt.Fprintf(os.Stderr, "=-= %v to %v wrapper for %s.%s\n", + wrapperABI, 1-wrapperABI, types.LocalPkg.Path, f.LSym.Name) + } + makeABIWrapper(f, wrapperABI) } - asym.SetABI(aliasABI) - asym.Set(obj.AttrDuplicateOK, true) - base.Ctxt.ABIAliases = append(base.Ctxt.ABIAliases, asym) } } +} - if !hasBody { - // For body-less functions, we only create the LSym. - return - } - - var flag int +// setupTextLsym initializes the LSym for a with-body text symbol. +func setupTextLSym(f *ir.Func, flag int) { if f.Dupok() { flag |= obj.DUPOK } diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 7f7cd63cdfdde..de2b3db36aa7f 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -1144,3 +1144,26 @@ func initializeTypesPackage() { initUniverse() } + +// useNewABIWrapGen returns TRUE if the compiler should generate an +// ABI wrapper for the function 'f'. +func useABIWrapGen(f *ir.Func) bool { + if !base.Flag.ABIWrap { + return false + } + + // Support limit option for bisecting. + if base.Flag.ABIWrapLimit == 1 { + return false + } + if base.Flag.ABIWrapLimit < 1 { + return true + } + base.Flag.ABIWrapLimit-- + if base.Debug.ABIWrap != 0 && base.Flag.ABIWrapLimit == 1 { + fmt.Fprintf(os.Stderr, "=-= limit reached after new wrapper for %s\n", + f.LSym.Name) + } + + return true +} diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index 5b5288c389fa7..dae9d79147599 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -32,7 +32,6 @@ func emitptrargsmap(fn *ir.Func) { return } lsym := base.Ctxt.Lookup(fn.LSym.Name + ".args_stackmap") - nptr := int(fn.Type().ArgWidth() / int64(Widthptr)) bv := bvalloc(int32(nptr) * 2) nbitmap := 1 @@ -399,7 +398,11 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S fn := curfn.(*ir.Func) if fn.Nname != nil { - if expect := fn.Sym().Linksym(); fnsym != expect { + expect := fn.Sym().Linksym() + if fnsym.ABI() == obj.ABI0 { + expect = fn.Sym().LinksymABI0() + } + if fnsym != expect { base.Fatalf("unexpected fnsym: %v != %v", fnsym, expect) } } diff --git a/src/cmd/compile/internal/gc/racewalk.go b/src/cmd/compile/internal/gc/racewalk.go index 472deb16e3722..61a65368aff89 100644 --- a/src/cmd/compile/internal/gc/racewalk.go +++ b/src/cmd/compile/internal/gc/racewalk.go @@ -61,7 +61,7 @@ func ispkgin(pkgs []string) bool { } func instrument(fn *ir.Func) { - if fn.Pragma&ir.Norace != 0 { + if fn.Pragma&ir.Norace != 0 || (fn.Sym().Linksym() != nil && fn.Sym().Linksym().ABIWrapper()) { return } diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index a5340e7f11d11..b4cf8b6dc7994 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -1421,7 +1421,7 @@ func (s *state) stmt(n ir.Node) { case ir.ORETJMP: b := s.exit() b.Kind = ssa.BlockRetJmp // override BlockRet - b.Aux = n.Sym().Linksym() + b.Aux = callTargetLSym(n.Sym(), s.curfn.LSym) case ir.OCONTINUE, ir.OBREAK: var to *ssa.Block @@ -4826,11 +4826,11 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val } case sym != nil: if testLateExpansion { - aux := ssa.StaticAuxCall(sym.Linksym(), ACArgs, ACResults) + aux := ssa.StaticAuxCall(callTargetLSym(sym, s.curfn.LSym), ACArgs, ACResults) call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) call.AddArgs(callArgs...) } else { - call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(sym.Linksym(), ACArgs, ACResults), s.mem()) + call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(callTargetLSym(sym, s.curfn.LSym), ACArgs, ACResults), s.mem()) } default: s.Fatalf("bad call type %v %v", n.Op(), n) @@ -7291,3 +7291,46 @@ func clobberBase(n ir.Node) ir.Node { } return n } + +// callTargetLSym determines the correct LSym for 'callee' when called +// from function 'caller'. There are a couple of different scenarios +// to contend with here: +// +// 1. if 'caller' is an ABI wrapper, then we always want to use the +// LSym from the Func for the callee. +// +// 2. if 'caller' is not an ABI wrapper, then we looked at the callee +// to see if it corresponds to a "known" ABI0 symbol (e.g. assembly +// routine defined in the current package); if so, we want the call to +// directly target the ABI0 symbol (effectively bypassing the +// ABIInternal->ABI0 wrapper for 'callee'). +// +// 3. in all other cases, want the regular ABIInternal linksym +// +func callTargetLSym(callee *types.Sym, callerLSym *obj.LSym) *obj.LSym { + lsym := callee.Linksym() + if !base.Flag.ABIWrap { + return lsym + } + if ir.AsNode(callee.Def) == nil { + return lsym + } + ndclfunc := ir.AsNode(callee.Def).Name().Defn + if ndclfunc == nil { + return lsym + } + // check for case 1 above + if callerLSym.ABIWrapper() { + if nlsym := ndclfunc.Func().LSym; nlsym != nil { + lsym = nlsym + } + } else { + // check for case 2 above + nam := ndclfunc.Func().Nname + defABI, hasDefABI := symabiDefs[nam.Sym().LinksymName()] + if hasDefABI && defABI == obj.ABI0 { + lsym = nam.Sym().LinksymABI0() + } + } + return lsym +} diff --git a/src/cmd/compile/internal/types/sym.go b/src/cmd/compile/internal/types/sym.go index 19f06fcf5bbe8..c512e3a003768 100644 --- a/src/cmd/compile/internal/types/sym.go +++ b/src/cmd/compile/internal/types/sym.go @@ -93,6 +93,23 @@ func (sym *Sym) Linksym() *obj.LSym { return base.Ctxt.LookupInit(sym.LinksymName(), initPkg) } +// LinksymABI0 looks up or creates an ABI0 linker symbol for "sym", +// in cases where we want to specifically select the ABI0 version of +// a symbol (typically used only for ABI wrappers). +func (sym *Sym) LinksymABI0() *obj.LSym { + if sym == nil { + return nil + } + initPkg := func(r *obj.LSym) { + if sym.Linkname != "" { + r.Pkg = "_" + } else { + r.Pkg = sym.Pkg.Prefix + } + } + return base.Ctxt.LookupABIInit(sym.LinksymName(), obj.ABI0, initPkg) +} + // Less reports whether symbol a is ordered before symbol b. // // Symbols are ordered exported before non-exported, then by name, and diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go index 7b5c990a5de7d..977c5c3303321 100644 --- a/src/cmd/internal/obj/link.go +++ b/src/cmd/internal/obj/link.go @@ -635,6 +635,10 @@ const ( // ContentAddressable indicates this is a content-addressable symbol. AttrContentAddressable + // ABI wrapper is set for compiler-generated text symbols that + // convert between ABI0 and ABIInternal calling conventions. + AttrABIWrapper + // attrABIBase is the value at which the ABI is encoded in // Attribute. This must be last; all bits after this are // assumed to be an ABI value. @@ -660,6 +664,7 @@ func (a Attribute) TopFrame() bool { return a&AttrTopFrame != 0 } func (a Attribute) Indexed() bool { return a&AttrIndexed != 0 } func (a Attribute) UsedInIface() bool { return a&AttrUsedInIface != 0 } func (a Attribute) ContentAddressable() bool { return a&AttrContentAddressable != 0 } +func (a Attribute) ABIWrapper() bool { return a&AttrABIWrapper != 0 } func (a *Attribute) Set(flag Attribute, value bool) { if value { @@ -695,6 +700,7 @@ var textAttrStrings = [...]struct { {bit: AttrTopFrame, s: "TOPFRAME"}, {bit: AttrIndexed, s: ""}, {bit: AttrContentAddressable, s: ""}, + {bit: AttrABIWrapper, s: "ABIWRAPPER"}, } // TextAttrString formats a for printing in as part of a TEXT prog. diff --git a/src/cmd/internal/obj/plist.go b/src/cmd/internal/obj/plist.go index 2b096996f7a1b..679ce7eb8f61d 100644 --- a/src/cmd/internal/obj/plist.go +++ b/src/cmd/internal/obj/plist.go @@ -80,6 +80,11 @@ func Flushplist(ctxt *Link, plist *Plist, newprog ProgAlloc, myimportpath string if !strings.HasPrefix(s.Name, "\"\".") { continue } + if s.ABIWrapper() { + // Don't create an args_stackmap symbol reference for an ABI + // wrapper function + continue + } found := false for p := s.Func().Text; p != nil; p = p.Link { if p.As == AFUNCDATA && p.From.Type == TYPE_CONST && p.From.Offset == objabi.FUNCDATA_ArgsPointerMaps { @@ -134,6 +139,7 @@ func (ctxt *Link) InitTextSym(s *LSym, flag int) { s.Set(AttrNoSplit, flag&NOSPLIT != 0) s.Set(AttrReflectMethod, flag&REFLECTMETHOD != 0) s.Set(AttrWrapper, flag&WRAPPER != 0) + s.Set(AttrABIWrapper, flag&ABIWRAPPER != 0) s.Set(AttrNeedCtxt, flag&NEEDCTXT != 0) s.Set(AttrNoFrame, flag&NOFRAME != 0) s.Set(AttrTopFrame, flag&TOPFRAME != 0) diff --git a/src/cmd/internal/obj/textflag.go b/src/cmd/internal/obj/textflag.go index d2cec734b1c1b..fcc4014aa26f4 100644 --- a/src/cmd/internal/obj/textflag.go +++ b/src/cmd/internal/obj/textflag.go @@ -51,4 +51,7 @@ const ( // Function is the top of the call stack. Call stack unwinders should stop // at this function. TOPFRAME = 2048 + + // Function is an ABI wrapper. + ABIWRAPPER = 4096 ) diff --git a/src/cmd/internal/obj/x86/obj6.go b/src/cmd/internal/obj/x86/obj6.go index 184fb4308bd01..839aeb8fe3dfd 100644 --- a/src/cmd/internal/obj/x86/obj6.go +++ b/src/cmd/internal/obj/x86/obj6.go @@ -637,7 +637,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { } } - if !p.From.Sym.NoSplit() || p.From.Sym.Wrapper() { + if !p.From.Sym.NoSplit() || (p.From.Sym.Wrapper() && !p.From.Sym.ABIWrapper()) { p = obj.Appendp(p, newprog) p = load_g_cx(ctxt, p, newprog) // load g into CX } @@ -690,7 +690,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p.To.Reg = REG_BP } - if cursym.Func().Text.From.Sym.Wrapper() { + if cursym.Func().Text.From.Sym.Wrapper() && !cursym.Func().Text.From.Sym.ABIWrapper() { // if g._panic != nil && g._panic.argp == FP { // g._panic.argp = bottom-of-frame // } diff --git a/src/cmd/link/internal/ld/main.go b/src/cmd/link/internal/ld/main.go index 5c8293810f99f..1420030eec41e 100644 --- a/src/cmd/link/internal/ld/main.go +++ b/src/cmd/link/internal/ld/main.go @@ -92,11 +92,10 @@ var ( FlagRound = flag.Int("R", -1, "set address rounding `quantum`") FlagTextAddr = flag.Int64("T", -1, "set text segment `address`") flagEntrySymbol = flag.String("E", "", "set `entry` symbol name") - - cpuprofile = flag.String("cpuprofile", "", "write cpu profile to `file`") - memprofile = flag.String("memprofile", "", "write memory profile to `file`") - memprofilerate = flag.Int64("memprofilerate", 0, "set runtime.MemProfileRate to `rate`") - + cpuprofile = flag.String("cpuprofile", "", "write cpu profile to `file`") + memprofile = flag.String("memprofile", "", "write memory profile to `file`") + memprofilerate = flag.Int64("memprofilerate", 0, "set runtime.MemProfileRate to `rate`") + flagAbiWrap = false benchmarkFlag = flag.String("benchmark", "", "set to 'mem' or 'cpu' to enable phase benchmarking") benchmarkFileFlag = flag.String("benchmarkprofile", "", "emit phase profiles to `base`_phase.{cpu,mem}prof") ) @@ -135,6 +134,9 @@ func Main(arch *sys.Arch, theArch Arch) { objabi.Flagfn1("X", "add string value `definition` of the form importpath.name=value", func(s string) { addstrdata1(ctxt, s) }) objabi.Flagcount("v", "print link trace", &ctxt.Debugvlog) objabi.Flagfn1("importcfg", "read import configuration from `file`", ctxt.readImportCfg) + if objabi.Regabi_enabled != 0 { + flag.BoolVar(&flagAbiWrap, "abiwrap", true, "support ABI wrapper functions") + } objabi.Flagparse(usage) diff --git a/src/cmd/link/internal/ld/symtab.go b/src/cmd/link/internal/ld/symtab.go index c98e4de03f8bf..3b709baf758bd 100644 --- a/src/cmd/link/internal/ld/symtab.go +++ b/src/cmd/link/internal/ld/symtab.go @@ -102,6 +102,41 @@ func putelfsym(ctxt *Link, x loader.Sym, typ elf.SymType, curbind elf.SymBind) { elfshnum = xosect.Elfsect.(*ElfShdr).shnum } + sname := ldr.SymExtname(x) + + // For functions with ABI wrappers, we have to make sure that we + // don't wind up with two elf symbol table entries with the same + // name (since this will generated an error from the external + // linker). In the CgoExportStatic case, we want the ABI0 symbol + // to have the primary symbol table entry (since it's going to be + // called from C), so we rename the ABIInternal symbol. In all + // other cases, we rename the ABI0 symbol, since we want + // cross-load-module calls to target ABIInternal. + // + // TODO: generalize this for non-ELF (put the rename code in the + // loader, and store the rename result in SymExtname). + // + // TODO: avoid the ldr.Lookup calls below by instead using an aux + // sym or marker relocation to associate the wrapper with the + // wrapped function. + // + if flagAbiWrap { + if !ldr.IsExternal(x) && ldr.SymType(x) == sym.STEXT { + // First case + if ldr.SymVersion(x) == sym.SymVerABIInternal { + if s2 := ldr.Lookup(sname, sym.SymVerABI0); s2 != 0 && ldr.AttrCgoExportStatic(s2) && ldr.SymType(s2) == sym.STEXT { + sname = sname + ".abiinternal" + } + } + // Second case + if ldr.SymVersion(x) == sym.SymVerABI0 && !ldr.AttrCgoExportStatic(x) { + if s2 := ldr.Lookup(sname, sym.SymVerABIInternal); s2 != 0 && ldr.SymType(s2) == sym.STEXT { + sname = sname + ".abi0" + } + } + } + } + // One pass for each binding: elf.STB_LOCAL, elf.STB_GLOBAL, // maybe one day elf.STB_WEAK. bind := elf.STB_GLOBAL @@ -140,8 +175,6 @@ func putelfsym(ctxt *Link, x loader.Sym, typ elf.SymType, curbind elf.SymBind) { other |= 3 << 5 } - sname := ldr.SymExtname(x) - // When dynamically linking, we create Symbols by reading the names from // the symbol tables of the shared libraries and so the names need to // match exactly. Tools like DTrace will have to wait for now. diff --git a/src/runtime/textflag.h b/src/runtime/textflag.h index daca36d948330..e727208cd03a0 100644 --- a/src/runtime/textflag.h +++ b/src/runtime/textflag.h @@ -35,3 +35,5 @@ // Function is the top of the call stack. Call stack unwinders should stop // at this function. #define TOPFRAME 2048 +// Function is an ABI wrapper. +#define ABIWRAPPER 4096 diff --git a/test/nosplit.go b/test/nosplit.go index faa7b8c2d8583..8a3fa9bf3538e 100644 --- a/test/nosplit.go +++ b/test/nosplit.go @@ -353,7 +353,14 @@ TestCases: log.Fatal(err) } - cmd := exec.Command("go", "build") + // Turn off ABI0 wrapper generation for now. The problem here is + // that in these test cases main.main is an assembly routine, + // thus calls to it will have to go through an ABI wrapper. The + // ABI wrapper will consume some stack space, which throws off + // the numbers. + workaround := "-gcflags=-abiwrap=0" + + cmd := exec.Command("go", "build", workaround) cmd.Dir = dir output, err := cmd.CombinedOutput() if err == nil { From 7c8f5356abd7aadf32b028ce76a8a76cd5438258 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 21 Dec 2020 01:55:44 -0500 Subject: [PATCH 208/474] [dev.regabi] cmd/compile: separate dowidth better Having a global MaxWidth lets us avoid needing to refer to thearch from split-out packages when all they need is thearch.MAXWIDTH. And make a couple interface changes to let dowidth avoid importing package ir directly. Then it can move into package types. Change-Id: I2c12e8e22252597530e648848320e19bdd490a01 Reviewed-on: https://go-review.googlesource.com/c/go/+/279302 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/abiutils_test.go | 1 + src/cmd/compile/internal/gc/align.go | 29 +++++++++++--------- src/cmd/compile/internal/gc/main.go | 1 + src/cmd/compile/internal/gc/pgen.go | 2 +- src/cmd/compile/internal/gc/reflect.go | 3 +- src/cmd/compile/internal/gc/sinit.go | 2 +- src/cmd/compile/internal/ir/name.go | 18 ++++++++++++ src/cmd/compile/internal/types/type.go | 12 ++++++++ 8 files changed, 51 insertions(+), 17 deletions(-) diff --git a/src/cmd/compile/internal/gc/abiutils_test.go b/src/cmd/compile/internal/gc/abiutils_test.go index 16bd787bea922..14bd7ff0970b9 100644 --- a/src/cmd/compile/internal/gc/abiutils_test.go +++ b/src/cmd/compile/internal/gc/abiutils_test.go @@ -29,6 +29,7 @@ func TestMain(m *testing.M) { thearch.LinkArch = &x86.Linkamd64 thearch.REGSP = x86.REGSP thearch.MAXWIDTH = 1 << 50 + MaxWidth = thearch.MAXWIDTH base.Ctxt = obj.Linknew(thearch.LinkArch) base.Ctxt.DiagFunc = base.Errorf base.Ctxt.DiagFlush = base.FlushErrors diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go index 95a5dbef29f24..a9cf7fb50aa05 100644 --- a/src/cmd/compile/internal/gc/align.go +++ b/src/cmd/compile/internal/gc/align.go @@ -7,12 +7,14 @@ package gc import ( "bytes" "cmd/compile/internal/base" - "cmd/compile/internal/ir" "cmd/compile/internal/types" "fmt" "sort" ) +// MaxWidth is the maximum size of a value on the target architecture. +var MaxWidth int64 + // sizeCalculationDisabled indicates whether it is safe // to calculate Types' widths and alignments. See dowidth. var sizeCalculationDisabled bool @@ -84,7 +86,7 @@ func expandiface(t *types.Type) { sort.Sort(methcmp(methods)) - if int64(len(methods)) >= thearch.MAXWIDTH/int64(Widthptr) { + if int64(len(methods)) >= MaxWidth/int64(Widthptr) { base.ErrorfAt(typePos(t), "interface too large") } for i, m := range methods { @@ -118,8 +120,7 @@ func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 { o = Rnd(o, int64(f.Type.Align)) } f.Offset = o - if n := ir.AsNode(f.Nname); n != nil { - n := n.Name() + if f.Nname != nil { // addrescapes has similar code to update these offsets. // Usually addrescapes runs after widstruct, // in which case we could drop this, @@ -127,12 +128,7 @@ func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 { // NOTE(rsc): This comment may be stale. // It's possible the ordering has changed and this is // now the common case. I'm not sure. - if n.Name().Stackcopy != nil { - n.Name().Stackcopy.SetFrameOffset(o) - n.SetFrameOffset(0) - } else { - n.SetFrameOffset(o) - } + f.Nname.(types.VarObject).RecordFrameOffset(o) } w := f.Type.Width @@ -143,7 +139,7 @@ func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 { lastzero = o } o += w - maxwidth := thearch.MAXWIDTH + maxwidth := MaxWidth // On 32-bit systems, reflect tables impose an additional constraint // that each field start offset must fit in 31 bits. if maxwidth < 1<<32 { @@ -206,7 +202,7 @@ func findTypeLoop(t *types.Type, path *[]*types.Type) bool { } *path = append(*path, t) - if findTypeLoop(t.Obj().(*ir.Name).Ntype.Type(), path) { + if findTypeLoop(t.Obj().(types.TypeObject).TypeDefn(), path) { return true } *path = (*path)[:len(*path)-1] @@ -419,7 +415,7 @@ func dowidth(t *types.Type) { dowidth(t.Elem()) if t.Elem().Width != 0 { - cap := (uint64(thearch.MAXWIDTH) - 1) / uint64(t.Elem().Width) + cap := (uint64(MaxWidth) - 1) / uint64(t.Elem().Width) if uint64(t.NumElem()) > cap { base.ErrorfAt(typePos(t), "type %L larger than address space", t) } @@ -479,6 +475,13 @@ func dowidth(t *types.Type) { resumecheckwidth() } +// CalcStructSize calculates the size of s, +// filling in s.Width and s.Align, +// even if size calculation is otherwise disabled. +func CalcStructSize(s *types.Type) { + s.Width = widstruct(s, s, 0, 1) // sets align +} + // when a type's width should be known, we call checkwidth // to compute it. during a declaration like // diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index de2b3db36aa7f..343ad9d1d9b32 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -208,6 +208,7 @@ func Main(archInit func(*Arch)) { Widthptr = thearch.LinkArch.PtrSize Widthreg = thearch.LinkArch.RegSize + MaxWidth = thearch.MAXWIDTH Target = new(ir.Package) diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index dae9d79147599..8f7aa8e4e74d6 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -164,7 +164,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { dowidth(n.Type()) w := n.Type().Width - if w >= thearch.MAXWIDTH || w < 0 { + if w >= MaxWidth || w < 0 { base.Fatalf("bad width") } if w == 0 && lastHasPtr { diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 615b8bdbf1edf..8e2c6f62e11dc 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -331,8 +331,7 @@ func deferstruct(stksize int64) *types.Type { // build struct holding the above fields s := types.NewStruct(types.NoPkg, fields) s.SetNoalg(true) - s.Width = widstruct(s, s, 0, 1) - s.Align = uint8(Widthptr) + CalcStructSize(s) return s } diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index a845bc5d75a2b..9ef2bd56ebabe 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -1019,7 +1019,7 @@ func stataddr(n ir.Node) (name *ir.Name, offset int64, ok bool) { } // Check for overflow. - if n.Type().Width != 0 && thearch.MAXWIDTH/n.Type().Width <= int64(l) { + if n.Type().Width != 0 && MaxWidth/n.Type().Width <= int64(l) { break } offset += int64(l) * n.Type().Width diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index b0b33cccfaefb..64c60b93d8f57 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -147,6 +147,24 @@ func (n *Name) isExpr() {} // Callers must use n.CloneName to make clear they intend to create a separate name. func (n *Name) CloneName() *Name { c := *n; return &c } +// TypeDefn returns the type definition for a named OTYPE. +// That is, given "type T Defn", it returns Defn. +// It is used by package types. +func (n *Name) TypeDefn() *types.Type { + return n.Ntype.Type() +} + +// RecordFrameOffset records the frame offset for the name. +// It is used by package types when laying out function arguments. +func (n *Name) RecordFrameOffset(offset int64) { + if n.Stackcopy != nil { + n.Stackcopy.SetFrameOffset(offset) + n.SetFrameOffset(0) + } else { + n.SetFrameOffset(offset) + } +} + // NewNameAt returns a new ONAME Node associated with symbol s at position pos. // The caller is responsible for setting Curfn. func NewNameAt(pos src.XPos, sym *types.Sym) *Name { diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index 4d1d30133c8cf..752c268fa2ad2 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -20,6 +20,18 @@ type Object interface { Type() *Type } +// A TypeObject is an Object representing a named type. +type TypeObject interface { + Object + TypeDefn() *Type // for "type T Defn", returns Defn +} + +// A VarObject is an Object representing a function argument, variable, or struct field. +type VarObject interface { + Object + RecordFrameOffset(int64) // save frame offset +} + //go:generate stringer -type EType -trimprefix T // EType describes a kind of type. From 3b12c6dc089f63d0fe2eeda27e65feb51c5e36d4 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 21 Dec 2020 02:22:42 -0500 Subject: [PATCH 209/474] [dev.regabi] cmd/compile: separate typecheck more cleanly Abstract the typecheck API a bit more so that it is easier to move into a new package. Change-Id: Ia0a0146151fa7f6073113e68a2c3f6e42a5d0ad8 Reviewed-on: https://go-review.googlesource.com/c/go/+/279303 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/alg.go | 4 +-- src/cmd/compile/internal/gc/main.go | 4 +++ src/cmd/compile/internal/gc/subr.go | 6 ++-- src/cmd/compile/internal/gc/typecheck.go | 37 ++++++++++++++++++++++-- src/cmd/compile/internal/gc/walk.go | 13 +++------ 5 files changed, 47 insertions(+), 17 deletions(-) diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index 036a1e7491c82..46ae76d58d8f7 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -816,7 +816,7 @@ func eqstring(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) { fn := syslook("memequal") fn = substArgTypes(fn, types.Types[types.TUINT8], types.Types[types.TUINT8]) call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, []ir.Node{sptr, tptr, ir.Copy(slen)}) - call = typecheck(call, ctxExpr|ctxMultiOK).(*ir.CallExpr) + TypecheckCall(call) cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, slen, tlen) cmp = typecheck(cmp, ctxExpr).(*ir.BinaryExpr) @@ -853,7 +853,7 @@ func eqinterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) { tdata.SetTypecheck(1) call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, []ir.Node{stab, sdata, tdata}) - call = typecheck(call, ctxExpr|ctxMultiOK).(*ir.CallExpr) + TypecheckCall(call) cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, stab, ttab) cmp = typecheck(cmp, ctxExpr).(*ir.BinaryExpr) diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 343ad9d1d9b32..2a5ff3f5fda2d 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -212,6 +212,10 @@ func Main(archInit func(*Arch)) { Target = new(ir.Package) + NeedFuncSym = makefuncsym + NeedITab = func(t, iface *types.Type) { itabname(t, iface) } + NeedRuntimeType = addsignat // TODO(rsc): typenamesym for lock? + // initialize types package // (we need to do this to break dependencies that otherwise // would lead to import cycles) diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 48cbd2505eaaa..0f6c7023f2855 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -309,7 +309,7 @@ func assignop(src, dst *types.Type) (ir.Op, string) { // us to de-virtualize calls through this // type/interface pair later. See peekitabs in reflect.go if isdirectiface(src) && !dst.IsEmptyInterface() { - itabname(src, dst) + NeedITab(src, dst) } return ir.OCONVIFACE, "" @@ -1011,6 +1011,7 @@ func adddot(n *ir.SelectorExpr) *ir.SelectorExpr { for c := len(path) - 1; c >= 0; c-- { dot := nodSym(ir.ODOT, n.Left(), path[c].field.Sym) dot.SetImplicit(true) + dot.SetType(path[c].field.Type) n.SetLeft(dot) } case ambig: @@ -1240,8 +1241,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { if !instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !isifacemethod(method.Type) && !(thearch.LinkArch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) { // generate tail call: adjust pointer receiver and jump to embedded method. left := dot.Left() // skip final .M - // TODO(mdempsky): Remove dependency on dotlist. - if !dotlist[0].field.Type.IsPtr() { + if !left.Type().IsPtr() { left = nodAddr(left) } as := ir.Nod(ir.OAS, nthis, convnop(left, rcvr)) diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 2d383ab49e974..1aaa93fc3d454 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -14,6 +14,37 @@ import ( "strings" ) +var ( + NeedFuncSym = func(*types.Sym) {} + NeedITab = func(t, itype *types.Type) {} + NeedRuntimeType = func(*types.Type) {} +) + +func TypecheckAssignExpr(n ir.Node) ir.Node { return typecheck(n, ctxExpr|ctxAssign) } +func TypecheckExpr(n ir.Node) ir.Node { return typecheck(n, ctxExpr) } +func TypecheckStmt(n ir.Node) ir.Node { return typecheck(n, ctxStmt) } + +func TypecheckExprs(exprs []ir.Node) { typecheckslice(exprs, ctxExpr) } +func TypecheckStmts(stmts []ir.Node) { typecheckslice(stmts, ctxStmt) } + +func TypecheckCall(call *ir.CallExpr) { + t := call.X.Type() + if t == nil { + panic("misuse of Call") + } + ctx := ctxStmt + if t.NumResults() > 0 { + ctx = ctxExpr | ctxMultiOK + } + if typecheck(call, ctx) != call { + panic("bad typecheck") + } +} + +func TypecheckCallee(n ir.Node) ir.Node { + return typecheck(n, ctxExpr|ctxCallee) +} + // To enable tracing support (-t flag), set enableTrace to true. const enableTrace = false @@ -2384,7 +2415,7 @@ func typecheckMethodExpr(n *ir.SelectorExpr) (res ir.Node) { // to make sure to generate wrappers for anonymous // receiver types too. if mt.Sym() == nil { - addsignat(t) + NeedRuntimeType(t) } } @@ -2417,7 +2448,7 @@ func typecheckMethodExpr(n *ir.SelectorExpr) (res ir.Node) { // Issue 25065. Make sure that we emit the symbol for a local method. if base.Ctxt.Flag_dynlink && !inimport && (t.Sym() == nil || t.Sym().Pkg == types.LocalPkg) { - makefuncsym(me.FuncName_.Sym()) + NeedFuncSym(me.FuncName_.Sym()) } return me @@ -3451,7 +3482,7 @@ func typecheckfunc(n *ir.Func) { } if base.Ctxt.Flag_dynlink && !inimport && n.Nname != nil { - makefuncsym(n.Sym()) + NeedFuncSym(n.Sym()) } } diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 7651bbca10524..410155b3ea9d3 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -2520,15 +2520,10 @@ func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) *ir.CallEx base.Fatalf("vmkcall %v needs %v args got %v", fn, n, len(va)) } - call := ir.Nod(ir.OCALL, fn, nil) - call.PtrList().Set(va) - ctx := ctxStmt - if fn.Type().NumResults() > 0 { - ctx = ctxExpr | ctxMultiOK - } - r1 := typecheck(call, ctx) - r1.SetType(t) - return walkexpr(r1, init).(*ir.CallExpr) + call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, va) + TypecheckCall(call) + call.SetType(t) + return walkexpr(call, init).(*ir.CallExpr) } func mkcall(name string, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr { From 572f168ed26bb32e83562cffb336f2df3a651d9c Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 21 Dec 2020 02:08:34 -0500 Subject: [PATCH 210/474] [dev.regabi] cmd/compile: separate various from Main Move various code out of Main itself and into helper functions that can be moved into other packages as package gc splits up. Similarly, move order and instrument inside walk to reduce the amount of API surface needed from the eventual package walk. Change-Id: I7849258038c6e39625a0385af9c0edd6a3b654a1 Reviewed-on: https://go-review.googlesource.com/c/go/+/279304 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/abiutils_test.go | 8 +- src/cmd/compile/internal/gc/dcl.go | 6 + src/cmd/compile/internal/gc/go.go | 2 - src/cmd/compile/internal/gc/inl.go | 20 ++ src/cmd/compile/internal/gc/main.go | 203 ++++--------------- src/cmd/compile/internal/gc/pgen.go | 10 +- src/cmd/compile/internal/gc/ssa.go | 19 +- src/cmd/compile/internal/gc/typecheck.go | 114 +++++++++++ src/cmd/compile/internal/gc/walk.go | 8 + 9 files changed, 211 insertions(+), 179 deletions(-) diff --git a/src/cmd/compile/internal/gc/abiutils_test.go b/src/cmd/compile/internal/gc/abiutils_test.go index 14bd7ff0970b9..6ed27d794f743 100644 --- a/src/cmd/compile/internal/gc/abiutils_test.go +++ b/src/cmd/compile/internal/gc/abiutils_test.go @@ -36,7 +36,13 @@ func TestMain(m *testing.M) { base.Ctxt.Bso = bufio.NewWriter(os.Stdout) Widthptr = thearch.LinkArch.PtrSize Widthreg = thearch.LinkArch.RegSize - initializeTypesPackage() + types.TypeLinkSym = func(t *types.Type) *obj.LSym { + return typenamesym(t).Linksym() + } + types.TypeLinkSym = func(t *types.Type) *obj.LSym { + return typenamesym(t).Linksym() + } + TypecheckInit() os.Exit(m.Run()) } diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 09d2e7d8b78c7..bcd127b5f17c9 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -442,6 +442,12 @@ type funcStackEnt struct { dclcontext ir.Class } +func CheckFuncStack() { + if len(funcStack) != 0 { + base.Fatalf("funcStack is non-empty: %v", len(funcStack)) + } +} + // finish the body. // called in auto-declaration context. // returns in extern-declaration context. diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index 1707e6a11b862..df91f6f530aa4 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -129,8 +129,6 @@ var ( iscmp [ir.OEND]bool ) -var importlist []*ir.Func // imported functions and methods with inlinable bodies - var ( funcsymsmu sync.Mutex // protects funcsyms and associated package lookups (see func funcsym) funcsyms []*types.Sym diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 5ada83b715b88..fde4d6910adab 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -55,6 +55,26 @@ const ( inlineBigFunctionMaxCost = 20 // Max cost of inlinee when inlining into a "big" function. ) +func InlinePackage() { + // Find functions that can be inlined and clone them before walk expands them. + visitBottomUp(Target.Decls, func(list []*ir.Func, recursive bool) { + numfns := numNonClosures(list) + for _, n := range list { + if !recursive || numfns > 1 { + // We allow inlining if there is no + // recursion, or the recursion cycle is + // across more than one function. + caninl(n) + } else { + if base.Flag.LowerM > 1 { + fmt.Printf("%v: cannot inline %v: recursive\n", ir.Line(n), n.Nname) + } + } + inlcalls(n) + } + }) +} + // Get the function's package. For ordinary functions it's on the ->sym, but for imported methods // the ->sym can be re-used in the local package, so peel it off the receiver's type. func fnpkg(fn *ir.Name) *types.Pkg { diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 2a5ff3f5fda2d..4aa2a2ca47f4c 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -191,24 +191,15 @@ func Main(archInit func(*Arch)) { IsIntrinsicCall = isIntrinsicCall SSADumpInline = ssaDumpInline - - ssaDump = os.Getenv("GOSSAFUNC") - ssaDir = os.Getenv("GOSSADIR") - if ssaDump != "" { - if strings.HasSuffix(ssaDump, "+") { - ssaDump = ssaDump[:len(ssaDump)-1] - ssaDumpStdout = true - } - spl := strings.Split(ssaDump, ":") - if len(spl) > 1 { - ssaDump = spl[0] - ssaDumpCFG = spl[1] - } - } + initSSAEnv() + initSSATables() Widthptr = thearch.LinkArch.PtrSize Widthreg = thearch.LinkArch.RegSize MaxWidth = thearch.MAXWIDTH + types.TypeLinkSym = func(t *types.Type) *obj.LSym { + return typenamesym(t).Linksym() + } Target = new(ir.Package) @@ -216,152 +207,40 @@ func Main(archInit func(*Arch)) { NeedITab = func(t, iface *types.Type) { itabname(t, iface) } NeedRuntimeType = addsignat // TODO(rsc): typenamesym for lock? - // initialize types package - // (we need to do this to break dependencies that otherwise - // would lead to import cycles) - initializeTypesPackage() - - dclcontext = ir.PEXTERN - autogeneratedPos = makePos(src.NewFileBase("", ""), 1, 0) - timings.Start("fe", "loadsys") - loadsys() + types.TypeLinkSym = func(t *types.Type) *obj.LSym { + return typenamesym(t).Linksym() + } + TypecheckInit() + // Parse input. timings.Start("fe", "parse") lines := parseFiles(flag.Args()) cgoSymABIs() timings.Stop() timings.AddEvent(int64(lines), "lines") - - finishUniverse() - recordPackageName() - typecheckok = true - - // Process top-level declarations in phases. - - // Phase 1: const, type, and names and types of funcs. - // This will gather all the information about types - // and methods but doesn't depend on any of it. - // - // We also defer type alias declarations until phase 2 - // to avoid cycles like #18640. - // TODO(gri) Remove this again once we have a fix for #25838. - - // Don't use range--typecheck can add closures to Target.Decls. - timings.Start("fe", "typecheck", "top1") - for i := 0; i < len(Target.Decls); i++ { - n := Target.Decls[i] - if op := n.Op(); op != ir.ODCL && op != ir.OAS && op != ir.OAS2 && (op != ir.ODCLTYPE || !n.(*ir.Decl).Left().Name().Alias()) { - Target.Decls[i] = typecheck(n, ctxStmt) - } - } + // Typecheck. + TypecheckPackage() - // Phase 2: Variable assignments. - // To check interface assignments, depends on phase 1. - - // Don't use range--typecheck can add closures to Target.Decls. - timings.Start("fe", "typecheck", "top2") - for i := 0; i < len(Target.Decls); i++ { - n := Target.Decls[i] - if op := n.Op(); op == ir.ODCL || op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.(*ir.Decl).Left().Name().Alias() { - Target.Decls[i] = typecheck(n, ctxStmt) - } - } - - // Phase 3: Type check function bodies. - // Don't use range--typecheck can add closures to Target.Decls. - timings.Start("fe", "typecheck", "func") - var fcount int64 - for i := 0; i < len(Target.Decls); i++ { - n := Target.Decls[i] - if n.Op() == ir.ODCLFUNC { - Curfn = n.(*ir.Func) - decldepth = 1 - errorsBefore := base.Errors() - typecheckslice(Curfn.Body().Slice(), ctxStmt) - checkreturn(Curfn) - if base.Errors() > errorsBefore { - Curfn.PtrBody().Set(nil) // type errors; do not compile - } - // Now that we've checked whether n terminates, - // we can eliminate some obviously dead code. - deadcode(Curfn) - fcount++ - } - } - - // Phase 3.11: Check external declarations. - // TODO(mdempsky): This should be handled when type checking their - // corresponding ODCL nodes. - timings.Start("fe", "typecheck", "externdcls") - for i, n := range Target.Externs { - if n.Op() == ir.ONAME { - Target.Externs[i] = typecheck(Target.Externs[i], ctxExpr) - } - } - - // Phase 3.14: With all user code type-checked, it's now safe to verify map keys - // and unused dot imports. - checkMapKeys() + // With all user code typechecked, it's now safe to verify unused dot imports. checkDotImports() base.ExitIfErrors() - timings.AddEvent(fcount, "funcs") - + // Build init task. if initTask := fninit(); initTask != nil { exportsym(initTask) } - // Phase 4: Decide how to capture closed variables. - // This needs to run before escape analysis, - // because variables captured by value do not escape. - timings.Start("fe", "capturevars") - for _, n := range Target.Decls { - if n.Op() == ir.ODCLFUNC && n.Func().OClosure != nil { - Curfn = n.(*ir.Func) - capturevars(Curfn) - } - } - capturevarscomplete = true - Curfn = nil - base.ExitIfErrors() - - // Phase 5: Inlining + // Inlining timings.Start("fe", "inlining") - if base.Debug.TypecheckInl != 0 { - // Typecheck imported function bodies if Debug.l > 1, - // otherwise lazily when used or re-exported. - for _, n := range importlist { - if n.Inl != nil { - typecheckinl(n) - } - } - base.ExitIfErrors() - } - if base.Flag.LowerL != 0 { - // Find functions that can be inlined and clone them before walk expands them. - visitBottomUp(Target.Decls, func(list []*ir.Func, recursive bool) { - numfns := numNonClosures(list) - for _, n := range list { - if !recursive || numfns > 1 { - // We allow inlining if there is no - // recursion, or the recursion cycle is - // across more than one function. - caninl(n) - } else { - if base.Flag.LowerM > 1 { - fmt.Printf("%v: cannot inline %v: recursive\n", ir.Line(n), n.Nname) - } - } - inlcalls(n) - } - }) + InlinePackage() } + // Devirtualize. for _, n := range Target.Decls { if n.Op() == ir.ODCLFUNC { devirtualize(n.(*ir.Func)) @@ -369,7 +248,7 @@ func Main(archInit func(*Arch)) { } Curfn = nil - // Phase 6: Escape analysis. + // Escape analysis. // Required for moving heap allocations onto stack, // which in turn is required by the closure implementation, // which stores the addresses of stack variables into the closure. @@ -388,7 +267,7 @@ func Main(archInit func(*Arch)) { EnableNoWriteBarrierRecCheck() } - // Phase 7: Transform closure bodies to properly reference captured variables. + // Transform closure bodies to properly reference captured variables. // This needs to happen before walk, because closures must be transformed // before walk reaches a call of a closure. timings.Start("fe", "xclosures") @@ -410,10 +289,10 @@ func Main(archInit func(*Arch)) { Curfn = nil peekitabs() - // Phase 8: Compile top level functions. + // Compile top level functions. // Don't use range--walk can add functions to Target.Decls. timings.Start("be", "compilefuncs") - fcount = 0 + fcount := int64(0) for i := 0; i < len(Target.Decls); i++ { n := Target.Decls[i] if n.Op() == ir.ODCLFUNC { @@ -448,21 +327,9 @@ func Main(archInit func(*Arch)) { dumpasmhdr() } - // Check whether any of the functions we have compiled have gigantic stack frames. - sort.Slice(largeStackFrames, func(i, j int) bool { - return largeStackFrames[i].pos.Before(largeStackFrames[j].pos) - }) - for _, large := range largeStackFrames { - if large.callee != 0 { - base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args + %d MB callee", large.locals>>20, large.args>>20, large.callee>>20) - } else { - base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args", large.locals>>20, large.args>>20) - } - } + CheckLargeStacks() + CheckFuncStack() - if len(funcStack) != 0 { - base.Fatalf("funcStack is non-empty: %v", len(funcStack)) - } if len(compilequeue) != 0 { base.Fatalf("%d uncompiled functions", len(compilequeue)) } @@ -480,6 +347,20 @@ func Main(archInit func(*Arch)) { } } +func CheckLargeStacks() { + // Check whether any of the functions we have compiled have gigantic stack frames. + sort.Slice(largeStackFrames, func(i, j int) bool { + return largeStackFrames[i].pos.Before(largeStackFrames[j].pos) + }) + for _, large := range largeStackFrames { + if large.callee != 0 { + base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args + %d MB callee", large.locals>>20, large.args>>20, large.callee>>20) + } else { + base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args", large.locals>>20, large.args>>20) + } + } +} + func cgoSymABIs() { // The linker expects an ABI0 wrapper for all cgo-exported // functions. @@ -1140,16 +1021,6 @@ func parseLang(s string) (lang, error) { return lang{major: major, minor: minor}, nil } -func initializeTypesPackage() { - types.Widthptr = Widthptr - types.Dowidth = dowidth - types.TypeLinkSym = func(t *types.Type) *obj.LSym { - return typenamesym(t).Linksym() - } - - initUniverse() -} - // useNewABIWrapGen returns TRUE if the compiler should generate an // ABI wrapper for the function 'f'. func useABIWrapGen(f *ir.Func) bool { diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index 8f7aa8e4e74d6..e43471dbcaafb 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -222,24 +222,16 @@ func funccompile(fn *ir.Func) { } func compile(fn *ir.Func) { - errorsBefore := base.Errors() - order(fn) - if base.Errors() > errorsBefore { - return - } - // Set up the function's LSym early to avoid data races with the assemblers. // Do this before walk, as walk needs the LSym to set attributes/relocations // (e.g. in markTypeUsedInInterface). initLSym(fn, true) + errorsBefore := base.Errors() walk(fn) if base.Errors() > errorsBefore { return } - if instrumenting { - instrument(fn) - } // From this point, there should be no uses of Curfn. Enforce that. Curfn = nil diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index b4cf8b6dc7994..1fc1feae67509 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -12,6 +12,7 @@ import ( "os" "path/filepath" "sort" + "strings" "bufio" "bytes" @@ -48,6 +49,22 @@ func ssaDumpInline(fn *ir.Func) { } } +func initSSAEnv() { + ssaDump = os.Getenv("GOSSAFUNC") + ssaDir = os.Getenv("GOSSADIR") + if ssaDump != "" { + if strings.HasSuffix(ssaDump, "+") { + ssaDump = ssaDump[:len(ssaDump)-1] + ssaDumpStdout = true + } + spl := strings.Split(ssaDump, ":") + if len(spl) > 1 { + ssaDump = spl[0] + ssaDumpCFG = spl[1] + } + } +} + func initssaconfig() { types_ := ssa.NewTypes() @@ -3357,7 +3374,7 @@ type intrinsicKey struct { fn string } -func init() { +func initSSATables() { intrinsics = map[intrinsicKey]intrinsicBuilder{} var all []*sys.Arch diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 1aaa93fc3d454..cc5df3ebaef3e 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -20,6 +20,96 @@ var ( NeedRuntimeType = func(*types.Type) {} ) +func TypecheckInit() { + types.Widthptr = Widthptr + types.Dowidth = dowidth + initUniverse() + dclcontext = ir.PEXTERN + timings.Start("fe", "loadsys") + loadsys() +} + +func TypecheckPackage() { + finishUniverse() + + typecheckok = true + + // Process top-level declarations in phases. + + // Phase 1: const, type, and names and types of funcs. + // This will gather all the information about types + // and methods but doesn't depend on any of it. + // + // We also defer type alias declarations until phase 2 + // to avoid cycles like #18640. + // TODO(gri) Remove this again once we have a fix for #25838. + + // Don't use range--typecheck can add closures to Target.Decls. + timings.Start("fe", "typecheck", "top1") + for i := 0; i < len(Target.Decls); i++ { + n := Target.Decls[i] + if op := n.Op(); op != ir.ODCL && op != ir.OAS && op != ir.OAS2 && (op != ir.ODCLTYPE || !n.(*ir.Decl).Left().Name().Alias()) { + Target.Decls[i] = typecheck(n, ctxStmt) + } + } + + // Phase 2: Variable assignments. + // To check interface assignments, depends on phase 1. + + // Don't use range--typecheck can add closures to Target.Decls. + timings.Start("fe", "typecheck", "top2") + for i := 0; i < len(Target.Decls); i++ { + n := Target.Decls[i] + if op := n.Op(); op == ir.ODCL || op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.(*ir.Decl).Left().Name().Alias() { + Target.Decls[i] = typecheck(n, ctxStmt) + } + } + + // Phase 3: Type check function bodies. + // Don't use range--typecheck can add closures to Target.Decls. + timings.Start("fe", "typecheck", "func") + var fcount int64 + for i := 0; i < len(Target.Decls); i++ { + n := Target.Decls[i] + if n.Op() == ir.ODCLFUNC { + TypecheckFuncBody(n.(*ir.Func)) + fcount++ + } + } + + // Phase 4: Check external declarations. + // TODO(mdempsky): This should be handled when type checking their + // corresponding ODCL nodes. + timings.Start("fe", "typecheck", "externdcls") + for i, n := range Target.Externs { + if n.Op() == ir.ONAME { + Target.Externs[i] = typecheck(Target.Externs[i], ctxExpr) + } + } + + // Phase 5: With all user code type-checked, it's now safe to verify map keys. + checkMapKeys() + + // Phase 6: Decide how to capture closed variables. + // This needs to run before escape analysis, + // because variables captured by value do not escape. + timings.Start("fe", "capturevars") + for _, n := range Target.Decls { + if n.Op() == ir.ODCLFUNC && n.Func().OClosure != nil { + Curfn = n.(*ir.Func) + capturevars(Curfn) + } + } + capturevarscomplete = true + Curfn = nil + + if base.Debug.TypecheckInl != 0 { + // Typecheck imported function bodies if Debug.l > 1, + // otherwise lazily when used or re-exported. + TypecheckImports() + } +} + func TypecheckAssignExpr(n ir.Node) ir.Node { return typecheck(n, ctxExpr|ctxAssign) } func TypecheckExpr(n ir.Node) ir.Node { return typecheck(n, ctxExpr) } func TypecheckStmt(n ir.Node) ir.Node { return typecheck(n, ctxStmt) } @@ -45,6 +135,30 @@ func TypecheckCallee(n ir.Node) ir.Node { return typecheck(n, ctxExpr|ctxCallee) } +func TypecheckFuncBody(n *ir.Func) { + Curfn = n + decldepth = 1 + errorsBefore := base.Errors() + typecheckslice(n.Body(), ctxStmt) + checkreturn(n) + if base.Errors() > errorsBefore { + n.PtrBody().Set(nil) // type errors; do not compile + } + // Now that we've checked whether n terminates, + // we can eliminate some obviously dead code. + deadcode(n) +} + +var importlist []*ir.Func + +func TypecheckImports() { + for _, n := range importlist { + if n.Inl != nil { + typecheckinl(n) + } + } +} + // To enable tracing support (-t flag), set enableTrace to true. const enableTrace = false diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 410155b3ea9d3..5545dcb3453df 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -26,6 +26,10 @@ const zeroValSize = 1024 // must match value of runtime/map.go:maxZero func walk(fn *ir.Func) { Curfn = fn errorsBefore := base.Errors() + order(fn) + if base.Errors() > errorsBefore { + return + } if base.Flag.W != 0 { s := fmt.Sprintf("\nbefore walk %v", Curfn.Sym()) @@ -80,6 +84,10 @@ func walk(fn *ir.Func) { s := fmt.Sprintf("enter %v", Curfn.Sym()) ir.DumpList(s, Curfn.Enter) } + + if instrumenting { + instrument(fn) + } } func walkstmtlist(s []ir.Node) { From 51ba53f5c2d58dd0c02b5ee1f4ef1db2577c4d3a Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 21 Dec 2020 01:20:20 -0500 Subject: [PATCH 211/474] [dev.regabi] cmd/compile: separate misc for gc split Misc cleanup for splitting package gc: API tweaks and boundary adjustments. The change in ir.NewBlockStmt makes it a drop-in replacement for liststmt. Change-Id: I9455fe8ccae7d71fe8ccf390ac96672389bf4f3d Reviewed-on: https://go-review.googlesource.com/c/go/+/279305 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/escape.go | 4 ---- src/cmd/compile/internal/gc/iimport.go | 15 +++++++++++++++ src/cmd/compile/internal/gc/main.go | 17 ++++++++++------- src/cmd/compile/internal/gc/obj.go | 8 ++++---- src/cmd/compile/internal/gc/reflect.go | 12 ++++++------ src/cmd/compile/internal/gc/timings.go | 2 ++ src/cmd/compile/internal/ir/stmt.go | 7 +++++++ 7 files changed, 44 insertions(+), 21 deletions(-) diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index 235cef47eaaa9..3351cfe968dad 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -143,10 +143,6 @@ type EscEdge struct { notes *EscNote } -func init() { - ir.EscFmt = escFmt -} - // escFmt is called from node printing to print information about escape analysis results. func escFmt(n ir.Node) string { text := "" diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index cd66d39b66792..358fdef294d3f 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -685,6 +685,21 @@ func (r *importReader) typeExt(t *types.Type) { // so we can use index to reference the symbol. var typeSymIdx = make(map[*types.Type][2]int64) +func BaseTypeIndex(t *types.Type) int64 { + tbase := t + if t.IsPtr() && t.Sym() == nil && t.Elem().Sym() != nil { + tbase = t.Elem() + } + i, ok := typeSymIdx[tbase] + if !ok { + return -1 + } + if t != tbase { + return i[1] + } + return i[0] +} + func (r *importReader) doInline(fn *ir.Func) { if len(fn.Inl.Body) != 0 { base.Fatalf("%v already has inline body", fn) diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 4aa2a2ca47f4c..80b17ebbf80d3 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -54,9 +54,6 @@ func hidePanic() { // Target is the package being compiled. var Target *ir.Package -// timing data for compiler phases -var timings Timings - // Main parses flags and Go source files specified in the command-line // arguments, type-checks the parsed Go package, compiles functions to machine // code, and finally writes the compiled package definition to disk. @@ -189,6 +186,7 @@ func Main(archInit func(*Arch)) { logopt.LogJsonOption(base.Flag.JSON) } + ir.EscFmt = escFmt IsIntrinsicCall = isIntrinsicCall SSADumpInline = ssaDumpInline initSSAEnv() @@ -962,9 +960,11 @@ type lang struct { // any language version is supported. var langWant lang -// langSupported reports whether language version major.minor is -// supported in a particular package. -func langSupported(major, minor int, pkg *types.Pkg) bool { +// AllowsGoVersion reports whether a particular package +// is allowed to use Go version major.minor. +// We assume the imported packages have all been checked, +// so we only have to check the local package against the -lang flag. +func AllowsGoVersion(pkg *types.Pkg, major, minor int) bool { if pkg == nil { // TODO(mdempsky): Set Pkg for local types earlier. pkg = types.LocalPkg @@ -973,13 +973,16 @@ func langSupported(major, minor int, pkg *types.Pkg) bool { // Assume imported packages passed type-checking. return true } - if langWant.major == 0 && langWant.minor == 0 { return true } return langWant.major > major || (langWant.major == major && langWant.minor >= minor) } +func langSupported(major, minor int, pkg *types.Pkg) bool { + return AllowsGoVersion(pkg, major, minor) +} + // checkLang verifies that the -lang flag holds a valid value, and // exits if not. It initializes data used by langSupported. func checkLang() { diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 094c386218896..c6625da1daff4 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -127,8 +127,7 @@ func dumpdata() { addsignats(Target.Externs) dumpsignats() dumptabs() - ptabsLen := len(ptabs) - itabsLen := len(itabs) + numPTabs, numITabs := CountTabs() dumpimportstrings() dumpbasictypes() dumpembeds() @@ -168,10 +167,11 @@ func dumpdata() { if numExports != len(Target.Exports) { base.Fatalf("Target.Exports changed after compile functions loop") } - if ptabsLen != len(ptabs) { + newNumPTabs, newNumITabs := CountTabs() + if newNumPTabs != numPTabs { base.Fatalf("ptabs changed after compile functions loop") } - if itabsLen != len(itabs) { + if newNumITabs != numITabs { base.Fatalf("itabs changed after compile functions loop") } } diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 8e2c6f62e11dc..92b04f20d5039 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -34,6 +34,10 @@ type ptabEntry struct { t *types.Type } +func CountTabs() (numPTabs, numITabs int) { + return len(ptabs), len(itabs) +} + // runtime interface and reflection data structures var ( signatmu sync.Mutex // protects signatset and signatslice @@ -1158,13 +1162,9 @@ func dtypesym(t *types.Type) *obj.LSym { if base.Ctxt.Pkgpath != "runtime" || (tbase != types.Types[tbase.Kind()] && tbase != types.ByteType && tbase != types.RuneType && tbase != types.ErrorType) { // int, float, etc // named types from other files are defined only by those files if tbase.Sym() != nil && tbase.Sym().Pkg != types.LocalPkg { - if i, ok := typeSymIdx[tbase]; ok { + if i := BaseTypeIndex(t); i >= 0 { lsym.Pkg = tbase.Sym().Pkg.Prefix - if t != tbase { - lsym.SymIdx = int32(i[1]) - } else { - lsym.SymIdx = int32(i[0]) - } + lsym.SymIdx = int32(i) lsym.Set(obj.AttrIndexed, true) } return lsym diff --git a/src/cmd/compile/internal/gc/timings.go b/src/cmd/compile/internal/gc/timings.go index 56b3899e2f6ae..ac12d78d1e088 100644 --- a/src/cmd/compile/internal/gc/timings.go +++ b/src/cmd/compile/internal/gc/timings.go @@ -11,6 +11,8 @@ import ( "time" ) +var timings Timings + // Timings collects the execution times of labeled phases // which are added trough a sequence of Start/Stop calls. // Events may be associated with each phase via AddEvent. diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index 12811821ad9ee..e2543a554149f 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -5,6 +5,7 @@ package ir import ( + "cmd/compile/internal/base" "cmd/compile/internal/types" "cmd/internal/src" ) @@ -164,6 +165,12 @@ type BlockStmt struct { func NewBlockStmt(pos src.XPos, list []Node) *BlockStmt { n := &BlockStmt{} n.pos = pos + if !pos.IsKnown() { + n.pos = base.Pos + if len(list) > 0 { + n.pos = list[0].Pos() + } + } n.op = OBLOCK n.List_.Set(list) return n From 280e7fd1ee47ad92b0031bbc0fa103ac25552950 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Mon, 21 Dec 2020 15:10:26 -0500 Subject: [PATCH 212/474] [dev.regabi] cmd/compile: only access Func method on concrete types Sets up for removing Func from Node interface. That means that once the Name reorg is done, which will let us remove Name, Sym, and Val, Node will be basically a minimal interface. Passes buildall w/ toolstash -cmp. Change-Id: I6e87897572debd7f8e29b4f5167763dc2792b408 Reviewed-on: https://go-review.googlesource.com/c/go/+/279484 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/closure.go | 8 +++---- src/cmd/compile/internal/gc/dcl.go | 1 + src/cmd/compile/internal/gc/escape.go | 5 +++-- src/cmd/compile/internal/gc/iimport.go | 4 ++-- src/cmd/compile/internal/gc/initorder.go | 1 + src/cmd/compile/internal/gc/inl.go | 3 ++- src/cmd/compile/internal/gc/main.go | 9 +++++--- src/cmd/compile/internal/gc/scc.go | 8 +++++-- src/cmd/compile/internal/gc/scope.go | 2 +- src/cmd/compile/internal/gc/sinit.go | 1 + src/cmd/compile/internal/gc/typecheck.go | 10 ++++++--- src/cmd/compile/internal/gc/walk.go | 7 +++--- src/cmd/compile/internal/ir/fmt.go | 1 + src/cmd/compile/internal/ir/func.go | 28 ++++++++++++++++++++---- 14 files changed, 63 insertions(+), 25 deletions(-) diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index e07ed4cd24830..1f4bf969adad6 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -76,7 +76,7 @@ func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node { // function associated with the closure. // TODO: This creation of the named function should probably really be done in a // separate pass from type-checking. -func typecheckclosure(clo ir.Node, top int) { +func typecheckclosure(clo *ir.ClosureExpr, top int) { fn := clo.Func() // Set current associated iota value, so iota can be used inside // function in ConstSpec, see issue #22344 @@ -327,13 +327,13 @@ func transformclosure(fn *ir.Func) { // hasemptycvars reports whether closure clo has an // empty list of captured vars. -func hasemptycvars(clo ir.Node) bool { +func hasemptycvars(clo *ir.ClosureExpr) bool { return len(clo.Func().ClosureVars) == 0 } // closuredebugruntimecheck applies boilerplate checks for debug flags // and compiling runtime -func closuredebugruntimecheck(clo ir.Node) { +func closuredebugruntimecheck(clo *ir.ClosureExpr) { if base.Debug.Closure > 0 { if clo.Esc() == EscHeap { base.WarnfAt(clo.Pos(), "heap closure, captured vars = %v", clo.Func().ClosureVars) @@ -349,7 +349,7 @@ func closuredebugruntimecheck(clo ir.Node) { // closureType returns the struct type used to hold all the information // needed in the closure for clo (clo must be a OCLOSURE node). // The address of a variable of the returned type can be cast to a func. -func closureType(clo ir.Node) *types.Type { +func closureType(clo *ir.ClosureExpr) *types.Type { // Create closure in the form of a composite literal. // supposing the closure captures an int i and a string s // and has one float64 argument and no results, diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index bcd127b5f17c9..558bdbef9241d 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -892,6 +892,7 @@ func (c *nowritebarrierrecChecker) findExtraCalls(nn ir.Node) { case ir.ONAME: callee = arg.Name().Defn.(*ir.Func) case ir.OCLOSURE: + arg := arg.(*ir.ClosureExpr) callee = arg.Func() default: base.Fatalf("expected ONAME or OCLOSURE node, got %+v", arg) diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index 3351cfe968dad..6510dfc4b3d3c 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -678,6 +678,7 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { } case ir.OCLOSURE: + n := n.(*ir.ClosureExpr) k = e.spill(k, n) // Link addresses of captured variables to closure. @@ -879,7 +880,7 @@ func (e *Escape) call(ks []EscHole, call, where ir.Node) { case v.Op() == ir.ONAME && v.(*ir.Name).Class() == ir.PFUNC: fn = v.(*ir.Name) case v.Op() == ir.OCLOSURE: - fn = v.Func().Nname + fn = v.(*ir.ClosureExpr).Func().Nname } case ir.OCALLMETH: fn = methodExprName(call.Left()) @@ -1883,7 +1884,7 @@ func heapAllocReason(n ir.Node) string { return "too large for stack" } - if n.Op() == ir.OCLOSURE && closureType(n).Size() >= maxImplicitStackVarSize { + if n.Op() == ir.OCLOSURE && closureType(n.(*ir.ClosureExpr)).Size() >= maxImplicitStackVarSize { return "too large for stack" } if n.Op() == ir.OCALLPART && partialCallType(n.(*ir.CallPartExpr)).Size() >= maxImplicitStackVarSize { diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 358fdef294d3f..5f72cedb669d7 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -630,7 +630,7 @@ func (r *importReader) varExt(n ir.Node) { r.symIdx(n.Sym()) } -func (r *importReader) funcExt(n ir.Node) { +func (r *importReader) funcExt(n *ir.Name) { r.linkname(n.Sym()) r.symIdx(n.Sym()) @@ -654,7 +654,7 @@ func (r *importReader) methExt(m *types.Field) { if r.bool() { m.SetNointerface(true) } - r.funcExt(ir.AsNode(m.Nname)) + r.funcExt(m.Nname.(*ir.Name)) } func (r *importReader) linkname(s *types.Sym) { diff --git a/src/cmd/compile/internal/gc/initorder.go b/src/cmd/compile/internal/gc/initorder.go index 1b21d92f4b7b3..c9c3361d3cf81 100644 --- a/src/cmd/compile/internal/gc/initorder.go +++ b/src/cmd/compile/internal/gc/initorder.go @@ -296,6 +296,7 @@ func (d *initDeps) visit(n ir.Node) { } case ir.OCLOSURE: + n := n.(*ir.ClosureExpr) d.inspectList(n.Func().Body()) case ir.ODOTMETH, ir.OCALLPART: diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index fde4d6910adab..fc020000c774a 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -237,7 +237,7 @@ func caninl(fn *ir.Func) { n.Func().Inl = &ir.Inline{ Cost: inlineMaxBudget - visitor.budget, - Dcl: pruneUnusedAutos(n.Defn.Func().Dcl, &visitor), + Dcl: pruneUnusedAutos(n.Defn.(*ir.Func).Func().Dcl, &visitor), Body: ir.DeepCopyList(src.NoXPos, fn.Body().Slice()), } @@ -677,6 +677,7 @@ func inlCallee(fn ir.Node) *ir.Func { return fn.Func() } case ir.OCLOSURE: + fn := fn.(*ir.ClosureExpr) c := fn.Func() caninl(c) return c diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 80b17ebbf80d3..94b4e0e674e82 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -270,9 +270,12 @@ func Main(archInit func(*Arch)) { // before walk reaches a call of a closure. timings.Start("fe", "xclosures") for _, n := range Target.Decls { - if n.Op() == ir.ODCLFUNC && n.Func().OClosure != nil { - Curfn = n.(*ir.Func) - transformclosure(Curfn) + if n.Op() == ir.ODCLFUNC { + n := n.(*ir.Func) + if n.Func().OClosure != nil { + Curfn = n + transformclosure(n) + } } } diff --git a/src/cmd/compile/internal/gc/scc.go b/src/cmd/compile/internal/gc/scc.go index 6e63d5287a277..8fe20a80fd992 100644 --- a/src/cmd/compile/internal/gc/scc.go +++ b/src/cmd/compile/internal/gc/scc.go @@ -56,8 +56,11 @@ func visitBottomUp(list []ir.Node, analyze func(list []*ir.Func, recursive bool) v.analyze = analyze v.nodeID = make(map[*ir.Func]uint32) for _, n := range list { - if n.Op() == ir.ODCLFUNC && !n.Func().IsHiddenClosure() { - v.visit(n.(*ir.Func)) + if n.Op() == ir.ODCLFUNC { + n := n.(*ir.Func) + if !n.Func().IsHiddenClosure() { + v.visit(n) + } } } } @@ -109,6 +112,7 @@ func (v *bottomUpVisitor) visit(n *ir.Func) uint32 { } } case ir.OCLOSURE: + n := n.(*ir.ClosureExpr) if m := v.visit(n.Func()); m < min { min = m } diff --git a/src/cmd/compile/internal/gc/scope.go b/src/cmd/compile/internal/gc/scope.go index fe4e1d185ad14..8dd44b1dd4513 100644 --- a/src/cmd/compile/internal/gc/scope.go +++ b/src/cmd/compile/internal/gc/scope.go @@ -28,7 +28,7 @@ func findScope(marks []ir.Mark, pos src.XPos) ir.ScopeID { return marks[i-1].Scope } -func assembleScopes(fnsym *obj.LSym, fn ir.Node, dwarfVars []*dwarf.Var, varScopes []ir.ScopeID) []dwarf.Scope { +func assembleScopes(fnsym *obj.LSym, fn *ir.Func, dwarfVars []*dwarf.Var, varScopes []ir.ScopeID) []dwarf.Scope { // Initialize the DWARF scope tree based on lexical scopes. dwarfScopes := make([]dwarf.Scope, 1+len(fn.Func().Parents)) for i, parent := range fn.Func().Parents { diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 9ef2bd56ebabe..79c7215d4d438 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -269,6 +269,7 @@ func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *type break case ir.OCLOSURE: + r := r.(*ir.ClosureExpr) if hasemptycvars(r) { if base.Debug.Closure > 0 { base.WarnfAt(r.Pos(), "closure converted to global") diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index cc5df3ebaef3e..bb658999e58a5 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -95,9 +95,12 @@ func TypecheckPackage() { // because variables captured by value do not escape. timings.Start("fe", "capturevars") for _, n := range Target.Decls { - if n.Op() == ir.ODCLFUNC && n.Func().OClosure != nil { - Curfn = n.(*ir.Func) - capturevars(Curfn) + if n.Op() == ir.ODCLFUNC { + n := n.(*ir.Func) + if n.Func().OClosure != nil { + Curfn = n + capturevars(n) + } } } capturevarscomplete = true @@ -2078,6 +2081,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.OCLOSURE: + n := n.(*ir.ClosureExpr) typecheckclosure(n, top) if n.Type() == nil { return n diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 5545dcb3453df..87f08f41c363a 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -649,11 +649,12 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // transformclosure already did all preparation work. // Prepend captured variables to argument list. - n.PtrList().Prepend(n.Left().Func().ClosureEnter.Slice()...) - n.Left().Func().ClosureEnter.Set(nil) + clo := n.Left().(*ir.ClosureExpr) + n.PtrList().Prepend(clo.Func().ClosureEnter.Slice()...) + clo.Func().ClosureEnter.Set(nil) // Replace OCLOSURE with ONAME/PFUNC. - n.SetLeft(n.Left().Func().Nname) + n.SetLeft(clo.Func().Nname) // Update type of OCALLFUNC node. // Output arguments had not changed, but their offsets could. diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index 6f15645813e51..76bb35f971f65 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -1189,6 +1189,7 @@ func dumpNode(w io.Writer, n Node, depth int) { case ODCLFUNC: // Func has many fields we don't want to print. // Bypass reflection and just print what we want. + n := n.(*Func) fmt.Fprintf(w, "%+v", n.Op()) dumpNodeHeader(w, n) fn := n.Func() diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index 8aa6daed6fc51..62ac5791d1a2a 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -213,10 +213,21 @@ func (f *Func) SetWBPos(pos src.XPos) { // funcname returns the name (without the package) of the function n. func FuncName(n Node) string { - if n == nil || n.Func() == nil || n.Func().Nname == nil { + var f *Func + switch n := n.(type) { + case *Func: + f = n + case *Name: + f = n.Func() + case *CallPartExpr: + f = n.Func() + case *ClosureExpr: + f = n.Func() + } + if f == nil || f.Nname == nil { return "" } - return n.Func().Nname.Sym().Name + return f.Nname.Sym().Name } // pkgFuncName returns the name of the function referenced by n, with package prepended. @@ -231,10 +242,19 @@ func PkgFuncName(n Node) string { if n.Op() == ONAME { s = n.Sym() } else { - if n.Func() == nil || n.Func().Nname == nil { + var f *Func + switch n := n.(type) { + case *CallPartExpr: + f = n.Func() + case *ClosureExpr: + f = n.Func() + case *Func: + f = n + } + if f == nil || f.Nname == nil { return "" } - s = n.Func().Nname.Sym() + s = f.Nname.Sym() } pkg := s.Pkg From c40934b33d4d9f85ef5e891f8d26c3035ccce5bb Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Tue, 22 Dec 2020 00:07:40 -0500 Subject: [PATCH 213/474] [dev.regabi] cmd/compile: adjust one case in walkexpr The mid-case n := n.(*ir.AssignExpr) does not lend itself well to pulling the code into a new function, because n will be a function argument and will not be redeclarable. Change-Id: I673f2aa37eea64b083725326ed3fa36447bcc7af Reviewed-on: https://go-review.googlesource.com/c/go/+/279426 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/walk.go | 38 ++++++++++++++--------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 87f08f41c363a..d5d12453a714d 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -702,38 +702,38 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { } else { n.(*ir.AssignStmt).SetLeft(left) } - n := n.(*ir.AssignStmt) + as := n.(*ir.AssignStmt) - if oaslit(n, init) { - return ir.NodAt(n.Pos(), ir.OBLOCK, nil, nil) + if oaslit(as, init) { + return ir.NodAt(as.Pos(), ir.OBLOCK, nil, nil) } - if n.Right() == nil { + if as.Right() == nil { // TODO(austin): Check all "implicit zeroing" - return n + return as } - if !instrumenting && isZero(n.Right()) { - return n + if !instrumenting && isZero(as.Right()) { + return as } - switch n.Right().Op() { + switch as.Right().Op() { default: - n.SetRight(walkexpr(n.Right(), init)) + as.SetRight(walkexpr(as.Right(), init)) case ir.ORECV: - // x = <-c; n.Left is x, n.Right.Left is c. + // x = <-c; as.Left is x, as.Right.Left is c. // order.stmt made sure x is addressable. - recv := n.Right().(*ir.UnaryExpr) + recv := as.Right().(*ir.UnaryExpr) recv.SetLeft(walkexpr(recv.Left(), init)) - n1 := nodAddr(n.Left()) + n1 := nodAddr(as.Left()) r := recv.Left() // the channel return mkcall1(chanfn("chanrecv1", 2, r.Type()), nil, init, r, n1) case ir.OAPPEND: // x = append(...) - call := n.Right().(*ir.CallExpr) + call := as.Right().(*ir.CallExpr) if call.Type().Elem().NotInHeap() { base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", call.Type().Elem()) } @@ -745,24 +745,24 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case call.IsDDD(): r = appendslice(call, init) // also works for append(slice, string). default: - r = walkappend(call, init, n) + r = walkappend(call, init, as) } - n.SetRight(r) + as.SetRight(r) if r.Op() == ir.OAPPEND { // Left in place for back end. // Do not add a new write barrier. // Set up address of type for back end. r.(*ir.CallExpr).SetLeft(typename(r.Type().Elem())) - return n + return as } // Otherwise, lowered for race detector. // Treat as ordinary assignment. } - if n.Left() != nil && n.Right() != nil { - return convas(n, init) + if as.Left() != nil && as.Right() != nil { + return convas(as, init) } - return n + return as case ir.OAS2: init.AppendNodes(n.PtrInit()) From acc32ea124957ad4b097186fb2f6da8122a9a5d1 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Tue, 22 Dec 2020 15:59:09 -0500 Subject: [PATCH 214/474] [dev.regabi] codereview.cfg: add config for dev.regabi Change-Id: Ida5cae7475bc19388fa46ceca25d983f560fa4e8 Reviewed-on: https://go-review.googlesource.com/c/go/+/279524 Trust: Russ Cox Run-TryBot: Russ Cox Reviewed-by: Ian Lance Taylor --- codereview.cfg | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 codereview.cfg diff --git a/codereview.cfg b/codereview.cfg new file mode 100644 index 0000000000000..a23b0a00d1a1c --- /dev/null +++ b/codereview.cfg @@ -0,0 +1,2 @@ +branch: dev.regabi +parent-branch: master From d1d1099c917de7387db9c9435e35ff14c4a63a91 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Tue, 22 Dec 2020 17:22:28 -0500 Subject: [PATCH 215/474] [dev.regabi] cmd/compile: fixes for big rewrite Adjust the new regabi code a bit to make the rewrites apply cleanly. Change-Id: Ice5378e94d94ab45ca0572f44ab8c94b847271b8 Reviewed-on: https://go-review.googlesource.com/c/go/+/279530 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/gsubr.go | 11 ++++++----- src/cmd/compile/internal/gc/ssa.go | 10 ++++++---- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go index f3ef14c99b771..aa498a009705a 100644 --- a/src/cmd/compile/internal/gc/gsubr.go +++ b/src/cmd/compile/internal/gc/gsubr.go @@ -265,20 +265,21 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { // to allocate any stack space). Doing this will require some // extra work in typecheck/walk/ssa, might want to add a new node // OTAILCALL or something to this effect. - var call ir.Node + var tail ir.Node if tfn.Type().NumResults() == 0 && tfn.Type().NumParams() == 0 && tfn.Type().NumRecvs() == 0 { - call = nodSym(ir.ORETJMP, nil, f.Nname.Sym()) + tail = nodSym(ir.ORETJMP, nil, f.Nname.Sym()) } else { - call = ir.Nod(ir.OCALL, f.Nname, nil) + call := ir.Nod(ir.OCALL, f.Nname, nil) call.PtrList().Set(paramNnames(tfn.Type())) call.SetIsDDD(tfn.Type().IsVariadic()) + tail = call if tfn.Type().NumResults() > 0 { n := ir.Nod(ir.ORETURN, nil, nil) n.PtrList().Set1(call) - call = n + tail = n } } - fn.PtrBody().Append(call) + fn.PtrBody().Append(tail) funcbody() if base.Debug.DclStack != 0 { diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 1fc1feae67509..cc5f9eeea62ab 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -7332,18 +7332,20 @@ func callTargetLSym(callee *types.Sym, callerLSym *obj.LSym) *obj.LSym { if ir.AsNode(callee.Def) == nil { return lsym } - ndclfunc := ir.AsNode(callee.Def).Name().Defn - if ndclfunc == nil { + defn := ir.AsNode(callee.Def).Name().Defn + if defn == nil { return lsym } + ndclfunc := defn.(*ir.Func) + // check for case 1 above if callerLSym.ABIWrapper() { - if nlsym := ndclfunc.Func().LSym; nlsym != nil { + if nlsym := ndclfunc.LSym; nlsym != nil { lsym = nlsym } } else { // check for case 2 above - nam := ndclfunc.Func().Nname + nam := ndclfunc.Nname defABI, hasDefABI := symabiDefs[nam.Sym().LinksymName()] if hasDefABI && defABI == obj.ABI0 { lsym = nam.Sym().LinksymABI0() From 6d03cde88a0599bd0a8d6cb1e5b08c5d0a06020a Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 22 Dec 2020 19:32:57 -0800 Subject: [PATCH 216/474] [dev.regabi] cmd/dist: automatically bootstrap cmd subdirs We want almost all cmd subdirectories anyway, and relative to the cost of the rest of toolchain bootstrapping, copying/rewriting a few extra source files is way cheaper than the engineering cost of forgetting to maintain these lists as we split out new packages. While here, also add cmd/internal/archive (and make it compile with Go 1.4) because it'll be needed in subsequent refactorings anyway; and skip files starting with # (emacs temporary files) and test files ending with _test.go. Change-Id: Ic86e680a5fdfaecd617c36d5d04413293b2d6f52 Reviewed-on: https://go-review.googlesource.com/c/go/+/279832 Run-TryBot: Matthew Dempsky Reviewed-by: Russ Cox Trust: Matthew Dempsky --- src/cmd/dist/buildtool.go | 113 +++++++++++----------------- src/cmd/internal/archive/archive.go | 10 +-- 2 files changed, 49 insertions(+), 74 deletions(-) diff --git a/src/cmd/dist/buildtool.go b/src/cmd/dist/buildtool.go index 5e1647cbf063c..eb8729149cd93 100644 --- a/src/cmd/dist/buildtool.go +++ b/src/cmd/dist/buildtool.go @@ -23,78 +23,35 @@ import ( // compiled with a Go 1.4 toolchain to produce the bootstrapTargets. // All directories in this list are relative to and must be below $GOROOT/src. // -// The list has have two kinds of entries: names beginning with cmd/ with +// The list has two kinds of entries: names beginning with cmd/ with // no other slashes, which are commands, and other paths, which are packages // supporting the commands. Packages in the standard library can be listed // if a newer copy needs to be substituted for the Go 1.4 copy when used -// by the command packages. +// by the command packages. Paths ending with /... automatically +// include all packages within subdirectories as well. // These will be imported during bootstrap as bootstrap/name, like bootstrap/math/big. var bootstrapDirs = []string{ "cmd/asm", - "cmd/asm/internal/arch", - "cmd/asm/internal/asm", - "cmd/asm/internal/flags", - "cmd/asm/internal/lex", + "cmd/asm/internal/...", "cmd/cgo", "cmd/compile", - "cmd/compile/internal/amd64", - "cmd/compile/internal/base", - "cmd/compile/internal/arm", - "cmd/compile/internal/arm64", - "cmd/compile/internal/gc", - "cmd/compile/internal/ir", - "cmd/compile/internal/logopt", - "cmd/compile/internal/mips", - "cmd/compile/internal/mips64", - "cmd/compile/internal/ppc64", - "cmd/compile/internal/riscv64", - "cmd/compile/internal/s390x", - "cmd/compile/internal/ssa", - "cmd/compile/internal/syntax", - "cmd/compile/internal/types", - "cmd/compile/internal/x86", - "cmd/compile/internal/wasm", + "cmd/compile/internal/...", + "cmd/internal/archive", "cmd/internal/bio", "cmd/internal/codesign", - "cmd/internal/gcprog", "cmd/internal/dwarf", "cmd/internal/edit", + "cmd/internal/gcprog", "cmd/internal/goobj", + "cmd/internal/obj/...", "cmd/internal/objabi", - "cmd/internal/obj", - "cmd/internal/obj/arm", - "cmd/internal/obj/arm64", - "cmd/internal/obj/mips", - "cmd/internal/obj/ppc64", - "cmd/internal/obj/riscv", - "cmd/internal/obj/s390x", - "cmd/internal/obj/x86", - "cmd/internal/obj/wasm", "cmd/internal/pkgpath", "cmd/internal/src", "cmd/internal/sys", "cmd/link", - "cmd/link/internal/amd64", - "cmd/compile/internal/base", - "cmd/link/internal/arm", - "cmd/link/internal/arm64", - "cmd/link/internal/benchmark", - "cmd/link/internal/ld", - "cmd/link/internal/loadelf", - "cmd/link/internal/loader", - "cmd/link/internal/loadmacho", - "cmd/link/internal/loadpe", - "cmd/link/internal/loadxcoff", - "cmd/link/internal/mips", - "cmd/link/internal/mips64", - "cmd/link/internal/ppc64", - "cmd/link/internal/riscv64", - "cmd/link/internal/s390x", - "cmd/link/internal/sym", - "cmd/link/internal/x86", + "cmd/link/internal/...", "compress/flate", "compress/zlib", - "cmd/link/internal/wasm", "container/heap", "debug/dwarf", "debug/elf", @@ -116,6 +73,7 @@ var bootstrapDirs = []string{ var ignorePrefixes = []string{ ".", "_", + "#", } // File suffixes that use build tags introduced since Go 1.4. @@ -129,6 +87,7 @@ var ignoreSuffixes = []string{ "_wasm.s", "_wasm.go", "_test.s", + "_test.go", } func bootstrapBuildTools() { @@ -154,31 +113,47 @@ func bootstrapBuildTools() { // Copy source code into $GOROOT/pkg/bootstrap and rewrite import paths. writefile("module bootstrap\n", pathf("%s/%s", base, "go.mod"), 0) for _, dir := range bootstrapDirs { - src := pathf("%s/src/%s", goroot, dir) - dst := pathf("%s/%s", base, dir) - xmkdirall(dst) - if dir == "cmd/cgo" { - // Write to src because we need the file both for bootstrap - // and for later in the main build. - mkzdefaultcc("", pathf("%s/zdefaultcc.go", src)) - } - Dir: - for _, name := range xreaddirfiles(src) { + recurse := strings.HasSuffix(dir, "/...") + dir = strings.TrimSuffix(dir, "/...") + filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + fatalf("walking bootstrap dirs failed: %v: %v", path, err) + } + + name := filepath.Base(path) + src := pathf("%s/src/%s", goroot, path) + dst := pathf("%s/%s", base, path) + + if info.IsDir() { + if !recurse && path != dir || name == "testdata" { + return filepath.SkipDir + } + + xmkdirall(dst) + if path == "cmd/cgo" { + // Write to src because we need the file both for bootstrap + // and for later in the main build. + mkzdefaultcc("", pathf("%s/zdefaultcc.go", src)) + mkzdefaultcc("", pathf("%s/zdefaultcc.go", dst)) + } + return nil + } + for _, pre := range ignorePrefixes { if strings.HasPrefix(name, pre) { - continue Dir + return nil } } for _, suf := range ignoreSuffixes { if strings.HasSuffix(name, suf) { - continue Dir + return nil } } - srcFile := pathf("%s/%s", src, name) - dstFile := pathf("%s/%s", dst, name) - text := bootstrapRewriteFile(srcFile) - writefile(text, dstFile, 0) - } + + text := bootstrapRewriteFile(src) + writefile(text, dst, 0) + return nil + }) } // Set up environment for invoking Go 1.4 go command. diff --git a/src/cmd/internal/archive/archive.go b/src/cmd/internal/archive/archive.go index c1661d7711ac9..762e888a04d11 100644 --- a/src/cmd/internal/archive/archive.go +++ b/src/cmd/internal/archive/archive.go @@ -118,9 +118,9 @@ type objReader struct { func (r *objReader) init(f *os.File) { r.a = &Archive{f, nil} - r.offset, _ = f.Seek(0, io.SeekCurrent) - r.limit, _ = f.Seek(0, io.SeekEnd) - f.Seek(r.offset, io.SeekStart) + r.offset, _ = f.Seek(0, os.SEEK_CUR) + r.limit, _ = f.Seek(0, os.SEEK_END) + f.Seek(r.offset, os.SEEK_SET) r.b = bio.NewReader(f) } @@ -221,7 +221,7 @@ func (r *objReader) skip(n int64) { r.readFull(r.tmp[:n]) } else { // Seek, giving up buffered data. - r.b.MustSeek(r.offset+n, io.SeekStart) + r.b.MustSeek(r.offset+n, os.SEEK_SET) r.offset += n } } @@ -426,7 +426,7 @@ func (r *objReader) parseObject(o *GoObj, size int64) error { // AddEntry adds an entry to the end of a, with the content from r. func (a *Archive) AddEntry(typ EntryType, name string, mtime int64, uid, gid int, mode os.FileMode, size int64, r io.Reader) { - off, err := a.f.Seek(0, io.SeekEnd) + off, err := a.f.Seek(0, os.SEEK_END) if err != nil { log.Fatal(err) } From 69cf39089f3e5e6e5356c90c1bd8f30f76658bd0 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Tue, 22 Dec 2020 23:46:07 -0500 Subject: [PATCH 217/474] [dev.regabi] cmd/compile: do not die in early base.FlushErrors Change-Id: I72bac8a85db14494298059f8efddc5cbbf45f7ca Reviewed-on: https://go-review.googlesource.com/c/go/+/279214 Trust: Russ Cox Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/base/print.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/base/print.go b/src/cmd/compile/internal/base/print.go index 6831b3ada314f..ac7333ca4e093 100644 --- a/src/cmd/compile/internal/base/print.go +++ b/src/cmd/compile/internal/base/print.go @@ -73,7 +73,9 @@ func (x byPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] } // FlushErrors sorts errors seen so far by line number, prints them to stdout, // and empties the errors array. func FlushErrors() { - Ctxt.Bso.Flush() + if Ctxt != nil && Ctxt.Bso != nil { + Ctxt.Bso.Flush() + } if len(errorMsgs) == 0 { return } From 6f27d29be0b22e0e5e77972d00d24ef3d6d5fd49 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Tue, 22 Dec 2020 23:55:29 -0500 Subject: [PATCH 218/474] [dev.regabi] cmd/compile: remove ir.Nod [generated] Rewrite all uses of ir.Nod and friends to call the IR constructors directly. This gives the results a more specific type and will play nicely with introduction of more specific types throughout the code in a followup CL. Passes buildall w/ toolstash -cmp. [git-generate] cd src/cmd/compile/internal/gc rf ' ex . ../ir { import "cmd/compile/internal/ir" import "cmd/compile/internal/types" import "cmd/compile/internal/syntax" import "cmd/internal/src" var p *noder var orig syntax.Node var op ir.Op var l, r ir.Node var sym *types.Sym p.nod(orig, op, l, r) -> ir.NodAt(p.pos(orig), op, l, r) p.nodSym(orig, op, l, sym) -> nodlSym(p.pos(orig), op, l, sym) var xpos src.XPos var ns ir.Nodes npos(xpos, nodSym(op, l, sym)) -> nodlSym(xpos, op, l, sym) npos(xpos, liststmt(ns)) -> ir.NewBlockStmt(xpos, ns) } ex . ../ir { import "cmd/compile/internal/base" import "cmd/compile/internal/ir" import "cmd/compile/internal/types" var op ir.Op var l, r ir.Node ir.Nod(op, l, r) -> ir.NodAt(base.Pos, op, l, r) var sym *types.Sym nodSym(op, l, sym) -> nodlSym(base.Pos, op, l, sym) } ex . ../ir { import "cmd/compile/internal/ir" import "cmd/internal/src" # rf overlapping match handling is not quite good enough # for certain nested rewrites, so handle these two - which often contain other ir.NodAt calls - early. var l, r ir.Node var xpos src.XPos ir.NodAt(xpos, ir.OAS, l, r) -> ir.NewAssignStmt(xpos, l, r) ir.NodAt(xpos, ir.OIF, l, nil) -> ir.NewIfStmt(xpos, l, nil, nil) } ex . ../ir { import "cmd/compile/internal/ir" import "cmd/compile/internal/types" import "cmd/internal/src" var l, r ir.Node var sym *types.Sym var xpos src.XPos nodlSym(xpos, ir.ODOT, l, sym) -> ir.NewSelectorExpr(xpos, ir.ODOT, l, sym) nodlSym(xpos, ir.OXDOT, l, sym) -> ir.NewSelectorExpr(xpos, ir.OXDOT, l, sym) nodlSym(xpos, ir.ODOTPTR, l, sym) -> ir.NewSelectorExpr(xpos, ir.ODOTPTR, l, sym) nodlSym(xpos, ir.OGOTO, nil, sym) -> ir.NewBranchStmt(xpos, ir.OGOTO, sym) nodlSym(xpos, ir.ORETJMP, nil, sym) -> ir.NewBranchStmt(xpos, ir.ORETJMP, sym) nodlSym(xpos, ir.OLABEL, nil, sym) -> ir.NewLabelStmt(xpos, sym) nodlSym(xpos, ir.OSTRUCTKEY, l, sym) -> ir.NewStructKeyExpr(xpos, sym, l) ir.NodAt(xpos, ir.OADD, l, r) -> ir.NewBinaryExpr(xpos, ir.OADD, l, r) ir.NodAt(xpos, ir.OAND, l, r) -> ir.NewBinaryExpr(xpos, ir.OAND, l, r) ir.NodAt(xpos, ir.OANDNOT, l, r) -> ir.NewBinaryExpr(xpos, ir.OANDNOT, l, r) ir.NodAt(xpos, ir.ODIV, l, r) -> ir.NewBinaryExpr(xpos, ir.ODIV, l, r) ir.NodAt(xpos, ir.OEQ, l, r) -> ir.NewBinaryExpr(xpos, ir.OEQ, l, r) ir.NodAt(xpos, ir.OGE, l, r) -> ir.NewBinaryExpr(xpos, ir.OGE, l, r) ir.NodAt(xpos, ir.OGT, l, r) -> ir.NewBinaryExpr(xpos, ir.OGT, l, r) ir.NodAt(xpos, ir.OLE, l, r) -> ir.NewBinaryExpr(xpos, ir.OLE, l, r) ir.NodAt(xpos, ir.OLSH, l, r) -> ir.NewBinaryExpr(xpos, ir.OLSH, l, r) ir.NodAt(xpos, ir.OLT, l, r) -> ir.NewBinaryExpr(xpos, ir.OLT, l, r) ir.NodAt(xpos, ir.OMOD, l, r) -> ir.NewBinaryExpr(xpos, ir.OMOD, l, r) ir.NodAt(xpos, ir.OMUL, l, r) -> ir.NewBinaryExpr(xpos, ir.OMUL, l, r) ir.NodAt(xpos, ir.ONE, l, r) -> ir.NewBinaryExpr(xpos, ir.ONE, l, r) ir.NodAt(xpos, ir.OOR, l, r) -> ir.NewBinaryExpr(xpos, ir.OOR, l, r) ir.NodAt(xpos, ir.ORSH, l, r) -> ir.NewBinaryExpr(xpos, ir.ORSH, l, r) ir.NodAt(xpos, ir.OSUB, l, r) -> ir.NewBinaryExpr(xpos, ir.OSUB, l, r) ir.NodAt(xpos, ir.OXOR, l, r) -> ir.NewBinaryExpr(xpos, ir.OXOR, l, r) ir.NodAt(xpos, ir.OCOPY, l, r) -> ir.NewBinaryExpr(xpos, ir.OCOPY, l, r) ir.NodAt(xpos, ir.OCOMPLEX, l, r) -> ir.NewBinaryExpr(xpos, ir.OCOMPLEX, l, r) ir.NodAt(xpos, ir.OEFACE, l, r) -> ir.NewBinaryExpr(xpos, ir.OEFACE, l, r) ir.NodAt(xpos, ir.OADDR, l, nil) -> ir.NewAddrExpr(xpos, l) ir.NodAt(xpos, ir.OADDSTR, nil, nil) -> ir.NewAddStringExpr(xpos, nil) ir.NodAt(xpos, ir.OANDAND, l, r) -> ir.NewLogicalExpr(xpos, ir.OANDAND, l, r) ir.NodAt(xpos, ir.OOROR, l, r) -> ir.NewLogicalExpr(xpos, ir.OOROR, l, r) ir.NodAt(xpos, ir.OARRAYLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OARRAYLIT, nil, nil) ir.NodAt(xpos, ir.OCOMPLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OCOMPLIT, nil, nil) ir.NodAt(xpos, ir.OMAPLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OMAPLIT, nil, nil) ir.NodAt(xpos, ir.OSTRUCTLIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OSTRUCTLIT, nil, nil) ir.NodAt(xpos, ir.OSLICELIT, nil, nil) -> ir.NewCompLitExpr(xpos, ir.OSLICELIT, nil, nil) ir.NodAt(xpos, ir.OARRAYLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OARRAYLIT, r.(ir.Ntype), nil) ir.NodAt(xpos, ir.OCOMPLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OCOMPLIT, r.(ir.Ntype), nil) ir.NodAt(xpos, ir.OMAPLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OMAPLIT, r.(ir.Ntype), nil) ir.NodAt(xpos, ir.OSTRUCTLIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OSTRUCTLIT, r.(ir.Ntype), nil) ir.NodAt(xpos, ir.OSLICELIT, nil, r) -> ir.NewCompLitExpr(xpos, ir.OSLICELIT, r.(ir.Ntype), nil) ir.NodAt(xpos, ir.OAS2, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2, nil, nil) ir.NodAt(xpos, ir.OAS2DOTTYPE, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2DOTTYPE, nil, nil) ir.NodAt(xpos, ir.OAS2FUNC, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2FUNC, nil, nil) ir.NodAt(xpos, ir.OAS2MAPR, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2MAPR, nil, nil) ir.NodAt(xpos, ir.OAS2RECV, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OAS2RECV, nil, nil) ir.NodAt(xpos, ir.OSELRECV2, nil, nil) -> ir.NewAssignListStmt(xpos, ir.OSELRECV2, nil, nil) ir.NodAt(xpos, ir.OASOP, l, r) -> ir.NewAssignOpStmt(xpos, ir.OXXX, l, r) ir.NodAt(xpos, ir.OBITNOT, l, nil) -> ir.NewUnaryExpr(xpos, ir.OBITNOT, l) ir.NodAt(xpos, ir.ONEG, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONEG, l) ir.NodAt(xpos, ir.ONOT, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONOT, l) ir.NodAt(xpos, ir.OPLUS, l, nil) -> ir.NewUnaryExpr(xpos, ir.OPLUS, l) ir.NodAt(xpos, ir.ORECV, l, nil) -> ir.NewUnaryExpr(xpos, ir.ORECV, l) ir.NodAt(xpos, ir.OALIGNOF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OALIGNOF, l) ir.NodAt(xpos, ir.OCAP, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCAP, l) ir.NodAt(xpos, ir.OCLOSE, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCLOSE, l) ir.NodAt(xpos, ir.OIMAG, l, nil) -> ir.NewUnaryExpr(xpos, ir.OIMAG, l) ir.NodAt(xpos, ir.OLEN, l, nil) -> ir.NewUnaryExpr(xpos, ir.OLEN, l) ir.NodAt(xpos, ir.ONEW, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONEW, l) ir.NodAt(xpos, ir.ONEWOBJ, l, nil) -> ir.NewUnaryExpr(xpos, ir.ONEWOBJ, l) ir.NodAt(xpos, ir.OOFFSETOF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OOFFSETOF, l) ir.NodAt(xpos, ir.OPANIC, l, nil) -> ir.NewUnaryExpr(xpos, ir.OPANIC, l) ir.NodAt(xpos, ir.OREAL, l, nil) -> ir.NewUnaryExpr(xpos, ir.OREAL, l) ir.NodAt(xpos, ir.OSIZEOF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OSIZEOF, l) ir.NodAt(xpos, ir.OCHECKNIL, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCHECKNIL, l) ir.NodAt(xpos, ir.OCFUNC, l, nil) -> ir.NewUnaryExpr(xpos, ir.OCFUNC, l) ir.NodAt(xpos, ir.OIDATA, l, nil) -> ir.NewUnaryExpr(xpos, ir.OIDATA, l) ir.NodAt(xpos, ir.OITAB, l, nil) -> ir.NewUnaryExpr(xpos, ir.OITAB, l) ir.NodAt(xpos, ir.OSPTR, l, nil) -> ir.NewUnaryExpr(xpos, ir.OSPTR, l) ir.NodAt(xpos, ir.OVARDEF, l, nil) -> ir.NewUnaryExpr(xpos, ir.OVARDEF, l) ir.NodAt(xpos, ir.OVARKILL, l, nil) -> ir.NewUnaryExpr(xpos, ir.OVARKILL, l) ir.NodAt(xpos, ir.OVARLIVE, l, nil) -> ir.NewUnaryExpr(xpos, ir.OVARLIVE, l) ir.NodAt(xpos, ir.OBLOCK, nil, nil) -> ir.NewBlockStmt(xpos, nil) ir.NodAt(xpos, ir.OBREAK, nil, nil) -> ir.NewBranchStmt(xpos, ir.OBREAK, nil) ir.NodAt(xpos, ir.OCONTINUE, nil, nil) -> ir.NewBranchStmt(xpos, ir.OCONTINUE, nil) ir.NodAt(xpos, ir.OFALL, nil, nil) -> ir.NewBranchStmt(xpos, ir.OFALL, nil) ir.NodAt(xpos, ir.OGOTO, nil, nil) -> ir.NewBranchStmt(xpos, ir.OGOTO, nil) ir.NodAt(xpos, ir.ORETJMP, nil, nil) -> ir.NewBranchStmt(xpos, ir.ORETJMP, nil) ir.NodAt(xpos, ir.OCALL, l, nil) -> ir.NewCallExpr(xpos, ir.OCALL, l, nil) ir.NodAt(xpos, ir.OCALLFUNC, l, nil) -> ir.NewCallExpr(xpos, ir.OCALLFUNC, l, nil) ir.NodAt(xpos, ir.OCALLINTER, l, nil) -> ir.NewCallExpr(xpos, ir.OCALLINTER, l, nil) ir.NodAt(xpos, ir.OCALLMETH, l, nil) -> ir.NewCallExpr(xpos, ir.OCALLMETH, l, nil) ir.NodAt(xpos, ir.OAPPEND, l, nil) -> ir.NewCallExpr(xpos, ir.OAPPEND, l, nil) ir.NodAt(xpos, ir.ODELETE, l, nil) -> ir.NewCallExpr(xpos, ir.ODELETE, l, nil) ir.NodAt(xpos, ir.OGETG, l, nil) -> ir.NewCallExpr(xpos, ir.OGETG, l, nil) ir.NodAt(xpos, ir.OMAKE, l, nil) -> ir.NewCallExpr(xpos, ir.OMAKE, l, nil) ir.NodAt(xpos, ir.OPRINT, l, nil) -> ir.NewCallExpr(xpos, ir.OPRINT, l, nil) ir.NodAt(xpos, ir.OPRINTN, l, nil) -> ir.NewCallExpr(xpos, ir.OPRINTN, l, nil) ir.NodAt(xpos, ir.ORECOVER, l, nil) -> ir.NewCallExpr(xpos, ir.ORECOVER, l, nil) ir.NodAt(xpos, ir.OCASE, nil, nil) -> ir.NewCaseStmt(xpos, nil, nil) ir.NodAt(xpos, ir.OCONV, l, nil) -> ir.NewConvExpr(xpos, ir.OCONV, nil, l) ir.NodAt(xpos, ir.OCONVIFACE, l, nil) -> ir.NewConvExpr(xpos, ir.OCONVIFACE, nil, l) ir.NodAt(xpos, ir.OCONVNOP, l, nil) -> ir.NewConvExpr(xpos, ir.OCONVNOP, nil, l) ir.NodAt(xpos, ir.ORUNESTR, l, nil) -> ir.NewConvExpr(xpos, ir.ORUNESTR, nil, l) ir.NodAt(xpos, ir.ODCL, l, nil) -> ir.NewDecl(xpos, ir.ODCL, l) ir.NodAt(xpos, ir.ODCLCONST, l, nil) -> ir.NewDecl(xpos, ir.ODCLCONST, l) ir.NodAt(xpos, ir.ODCLTYPE, l, nil) -> ir.NewDecl(xpos, ir.ODCLTYPE, l) ir.NodAt(xpos, ir.ODCLFUNC, nil, nil) -> ir.NewFunc(xpos) ir.NodAt(xpos, ir.ODEFER, l, nil) -> ir.NewGoDeferStmt(xpos, ir.ODEFER, l) ir.NodAt(xpos, ir.OGO, l, nil) -> ir.NewGoDeferStmt(xpos, ir.OGO, l) ir.NodAt(xpos, ir.ODEREF, l, nil) -> ir.NewStarExpr(xpos, l) ir.NodAt(xpos, ir.ODOT, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOT, l, nil) ir.NodAt(xpos, ir.ODOTPTR, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOTPTR, l, nil) ir.NodAt(xpos, ir.ODOTMETH, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOTMETH, l, nil) ir.NodAt(xpos, ir.ODOTINTER, l, nil) -> ir.NewSelectorExpr(xpos, ir.ODOTINTER, l, nil) ir.NodAt(xpos, ir.OXDOT, l, nil) -> ir.NewSelectorExpr(xpos, ir.OXDOT, l, nil) ir.NodAt(xpos, ir.ODOTTYPE, l, nil) -> ir.NewTypeAssertExpr(xpos, l, nil) ir.NodAt(xpos, ir.ODOTTYPE, l, r) -> ir.NewTypeAssertExpr(xpos, l, r.(ir.Ntype)) ir.NodAt(xpos, ir.OFOR, l, r) -> ir.NewForStmt(xpos, nil, l, r, nil) ir.NodAt(xpos, ir.OINDEX, l, r) -> ir.NewIndexExpr(xpos, l, r) ir.NodAt(xpos, ir.OINLMARK, nil, nil) -> ir.NewInlineMarkStmt(xpos, types.BADWIDTH) ir.NodAt(xpos, ir.OKEY, l, r) -> ir.NewKeyExpr(xpos, l, r) ir.NodAt(xpos, ir.OLABEL, nil, nil) -> ir.NewLabelStmt(xpos, nil) ir.NodAt(xpos, ir.OMAKECHAN, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKECHAN, l, r) ir.NodAt(xpos, ir.OMAKEMAP, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKEMAP, l, r) ir.NodAt(xpos, ir.OMAKESLICE, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKESLICE, l, r) ir.NodAt(xpos, ir.OMAKESLICECOPY, l, r) -> ir.NewMakeExpr(xpos, ir.OMAKESLICECOPY, l, r) ir.NodAt(xpos, ir.ONIL, nil, nil) -> ir.NewNilExpr(xpos) ir.NodAt(xpos, ir.OPACK, nil, nil) -> ir.NewPkgName(xpos, nil, nil) ir.NodAt(xpos, ir.OPAREN, l, nil) -> ir.NewParenExpr(xpos, l) ir.NodAt(xpos, ir.ORANGE, nil, r) -> ir.NewRangeStmt(xpos, nil, r, nil) ir.NodAt(xpos, ir.ORESULT, nil, nil) -> ir.NewResultExpr(xpos, nil, types.BADWIDTH) ir.NodAt(xpos, ir.ORETURN, nil, nil) -> ir.NewReturnStmt(xpos, nil) ir.NodAt(xpos, ir.OSELECT, nil, nil) -> ir.NewSelectStmt(xpos, nil) ir.NodAt(xpos, ir.OSEND, l, r) -> ir.NewSendStmt(xpos, l, r) ir.NodAt(xpos, ir.OSLICE, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICE, l) ir.NodAt(xpos, ir.OSLICEARR, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICEARR, l) ir.NodAt(xpos, ir.OSLICESTR, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICESTR, l) ir.NodAt(xpos, ir.OSLICE3, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICE3, l) ir.NodAt(xpos, ir.OSLICE3ARR, l, nil) -> ir.NewSliceExpr(xpos, ir.OSLICE3ARR, l) ir.NodAt(xpos, ir.OSLICEHEADER, l, nil) -> ir.NewSliceHeaderExpr(xpos, nil, l, nil, nil) ir.NodAt(xpos, ir.OSWITCH, l, nil) -> ir.NewSwitchStmt(xpos, l, nil) ir.NodAt(xpos, ir.OINLCALL, nil, nil) -> ir.NewInlinedCallExpr(xpos, nil, nil) } rm noder.nod noder.nodSym nodSym nodlSym ir.NodAt ir.Nod ' Change-Id: Ibf1eb708de8463ae74ccc47d7966cc263a18295e Reviewed-on: https://go-review.googlesource.com/c/go/+/277933 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/alg.go | 110 ++++----- src/cmd/compile/internal/gc/closure.go | 24 +- src/cmd/compile/internal/gc/dcl.go | 10 +- src/cmd/compile/internal/gc/gsubr.go | 6 +- src/cmd/compile/internal/gc/iimport.go | 48 ++-- src/cmd/compile/internal/gc/inl.go | 38 +-- src/cmd/compile/internal/gc/noder.go | 62 ++--- src/cmd/compile/internal/gc/order.go | 36 +-- src/cmd/compile/internal/gc/range.go | 92 +++---- src/cmd/compile/internal/gc/select.go | 36 +-- src/cmd/compile/internal/gc/sinit.go | 80 +++---- src/cmd/compile/internal/gc/subr.go | 50 ++-- src/cmd/compile/internal/gc/swt.go | 58 ++--- src/cmd/compile/internal/gc/typecheck.go | 30 +-- src/cmd/compile/internal/gc/walk.go | 290 +++++++++++------------ src/cmd/compile/internal/ir/node.go | 120 ---------- 16 files changed, 471 insertions(+), 619 deletions(-) diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index 46ae76d58d8f7..730db9c1c94f4 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -312,21 +312,21 @@ func genhash(t *types.Type) *obj.LSym { // for i := 0; i < nelem; i++ ni := temp(types.Types[types.TINT]) - init := ir.Nod(ir.OAS, ni, nodintconst(0)) - cond := ir.Nod(ir.OLT, ni, nodintconst(t.NumElem())) - post := ir.Nod(ir.OAS, ni, ir.Nod(ir.OADD, ni, nodintconst(1))) - loop := ir.Nod(ir.OFOR, cond, post) + init := ir.NewAssignStmt(base.Pos, ni, nodintconst(0)) + cond := ir.NewBinaryExpr(base.Pos, ir.OLT, ni, nodintconst(t.NumElem())) + post := ir.NewAssignStmt(base.Pos, ni, ir.NewBinaryExpr(base.Pos, ir.OADD, ni, nodintconst(1))) + loop := ir.NewForStmt(base.Pos, nil, cond, post, nil) loop.PtrInit().Append(init) // h = hashel(&p[i], h) - call := ir.Nod(ir.OCALL, hashel, nil) + call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil) - nx := ir.Nod(ir.OINDEX, np, ni) + nx := ir.NewIndexExpr(base.Pos, np, ni) nx.SetBounded(true) na := nodAddr(nx) call.PtrList().Append(na) call.PtrList().Append(nh) - loop.PtrBody().Append(ir.Nod(ir.OAS, nh, call)) + loop.PtrBody().Append(ir.NewAssignStmt(base.Pos, nh, call)) fn.PtrBody().Append(loop) @@ -345,12 +345,12 @@ func genhash(t *types.Type) *obj.LSym { // Hash non-memory fields with appropriate hash function. if !IsRegularMemory(f.Type) { hashel := hashfor(f.Type) - call := ir.Nod(ir.OCALL, hashel, nil) - nx := nodSym(ir.OXDOT, np, f.Sym) // TODO: fields from other packages? + call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil) + nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym) // TODO: fields from other packages? na := nodAddr(nx) call.PtrList().Append(na) call.PtrList().Append(nh) - fn.PtrBody().Append(ir.Nod(ir.OAS, nh, call)) + fn.PtrBody().Append(ir.NewAssignStmt(base.Pos, nh, call)) i++ continue } @@ -360,19 +360,19 @@ func genhash(t *types.Type) *obj.LSym { // h = hashel(&p.first, size, h) hashel := hashmem(f.Type) - call := ir.Nod(ir.OCALL, hashel, nil) - nx := nodSym(ir.OXDOT, np, f.Sym) // TODO: fields from other packages? + call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil) + nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym) // TODO: fields from other packages? na := nodAddr(nx) call.PtrList().Append(na) call.PtrList().Append(nh) call.PtrList().Append(nodintconst(size)) - fn.PtrBody().Append(ir.Nod(ir.OAS, nh, call)) + fn.PtrBody().Append(ir.NewAssignStmt(base.Pos, nh, call)) i = next } } - r := ir.Nod(ir.ORETURN, nil, nil) + r := ir.NewReturnStmt(base.Pos, nil) r.PtrList().Append(nh) fn.PtrBody().Append(r) @@ -568,11 +568,11 @@ func geneq(t *types.Type) *obj.LSym { // checkIdx generates a node to check for equality at index i. checkIdx := func(i ir.Node) ir.Node { // pi := p[i] - pi := ir.Nod(ir.OINDEX, np, i) + pi := ir.NewIndexExpr(base.Pos, np, i) pi.SetBounded(true) pi.SetType(t.Elem()) // qi := q[i] - qi := ir.Nod(ir.OINDEX, nq, i) + qi := ir.NewIndexExpr(base.Pos, nq, i) qi.SetBounded(true) qi.SetType(t.Elem()) return eq(pi, qi) @@ -586,29 +586,29 @@ func geneq(t *types.Type) *obj.LSym { // Generate a series of checks. for i := int64(0); i < nelem; i++ { // if check {} else { goto neq } - nif := ir.Nod(ir.OIF, checkIdx(nodintconst(i)), nil) - nif.PtrRlist().Append(nodSym(ir.OGOTO, nil, neq)) + nif := ir.NewIfStmt(base.Pos, checkIdx(nodintconst(i)), nil, nil) + nif.PtrRlist().Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq)) fn.PtrBody().Append(nif) } if last { - fn.PtrBody().Append(ir.Nod(ir.OAS, nr, checkIdx(nodintconst(nelem)))) + fn.PtrBody().Append(ir.NewAssignStmt(base.Pos, nr, checkIdx(nodintconst(nelem)))) } } else { // Generate a for loop. // for i := 0; i < nelem; i++ i := temp(types.Types[types.TINT]) - init := ir.Nod(ir.OAS, i, nodintconst(0)) - cond := ir.Nod(ir.OLT, i, nodintconst(nelem)) - post := ir.Nod(ir.OAS, i, ir.Nod(ir.OADD, i, nodintconst(1))) - loop := ir.Nod(ir.OFOR, cond, post) + init := ir.NewAssignStmt(base.Pos, i, nodintconst(0)) + cond := ir.NewBinaryExpr(base.Pos, ir.OLT, i, nodintconst(nelem)) + post := ir.NewAssignStmt(base.Pos, i, ir.NewBinaryExpr(base.Pos, ir.OADD, i, nodintconst(1))) + loop := ir.NewForStmt(base.Pos, nil, cond, post, nil) loop.PtrInit().Append(init) // if eq(pi, qi) {} else { goto neq } - nif := ir.Nod(ir.OIF, checkIdx(i), nil) - nif.PtrRlist().Append(nodSym(ir.OGOTO, nil, neq)) + nif := ir.NewIfStmt(base.Pos, checkIdx(i), nil, nil) + nif.PtrRlist().Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq)) loop.PtrBody().Append(nif) fn.PtrBody().Append(loop) if last { - fn.PtrBody().Append(ir.Nod(ir.OAS, nr, nodbool(true))) + fn.PtrBody().Append(ir.NewAssignStmt(base.Pos, nr, nodbool(true))) } } } @@ -631,13 +631,13 @@ func geneq(t *types.Type) *obj.LSym { case types.TFLOAT32, types.TFLOAT64: checkAll(2, true, func(pi, qi ir.Node) ir.Node { // p[i] == q[i] - return ir.Nod(ir.OEQ, pi, qi) + return ir.NewBinaryExpr(base.Pos, ir.OEQ, pi, qi) }) // TODO: pick apart structs, do them piecemeal too default: checkAll(1, true, func(pi, qi ir.Node) ir.Node { // p[i] == q[i] - return ir.Nod(ir.OEQ, pi, qi) + return ir.NewBinaryExpr(base.Pos, ir.OEQ, pi, qi) }) } @@ -669,15 +669,15 @@ func geneq(t *types.Type) *obj.LSym { // Enforce ordering by starting a new set of reorderable conditions. conds = append(conds, []ir.Node{}) } - p := nodSym(ir.OXDOT, np, f.Sym) - q := nodSym(ir.OXDOT, nq, f.Sym) + p := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym) + q := ir.NewSelectorExpr(base.Pos, ir.OXDOT, nq, f.Sym) switch { case f.Type.IsString(): eqlen, eqmem := eqstring(p, q) and(eqlen) and(eqmem) default: - and(ir.Nod(ir.OEQ, p, q)) + and(ir.NewBinaryExpr(base.Pos, ir.OEQ, p, q)) } if EqCanPanic(f.Type) { // Also enforce ordering after something that can panic. @@ -718,35 +718,35 @@ func geneq(t *types.Type) *obj.LSym { } if len(flatConds) == 0 { - fn.PtrBody().Append(ir.Nod(ir.OAS, nr, nodbool(true))) + fn.PtrBody().Append(ir.NewAssignStmt(base.Pos, nr, nodbool(true))) } else { for _, c := range flatConds[:len(flatConds)-1] { // if cond {} else { goto neq } - n := ir.Nod(ir.OIF, c, nil) - n.PtrRlist().Append(nodSym(ir.OGOTO, nil, neq)) + n := ir.NewIfStmt(base.Pos, c, nil, nil) + n.PtrRlist().Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq)) fn.PtrBody().Append(n) } - fn.PtrBody().Append(ir.Nod(ir.OAS, nr, flatConds[len(flatConds)-1])) + fn.PtrBody().Append(ir.NewAssignStmt(base.Pos, nr, flatConds[len(flatConds)-1])) } } // ret: // return ret := autolabel(".ret") - fn.PtrBody().Append(nodSym(ir.OLABEL, nil, ret)) - fn.PtrBody().Append(ir.Nod(ir.ORETURN, nil, nil)) + fn.PtrBody().Append(ir.NewLabelStmt(base.Pos, ret)) + fn.PtrBody().Append(ir.NewReturnStmt(base.Pos, nil)) // neq: // r = false // return (or goto ret) - fn.PtrBody().Append(nodSym(ir.OLABEL, nil, neq)) - fn.PtrBody().Append(ir.Nod(ir.OAS, nr, nodbool(false))) + fn.PtrBody().Append(ir.NewLabelStmt(base.Pos, neq)) + fn.PtrBody().Append(ir.NewAssignStmt(base.Pos, nr, nodbool(false))) if EqCanPanic(t) || anyCall(fn) { // Epilogue is large, so share it with the equal case. - fn.PtrBody().Append(nodSym(ir.OGOTO, nil, ret)) + fn.PtrBody().Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, ret)) } else { // Epilogue is small, so don't bother sharing. - fn.PtrBody().Append(ir.Nod(ir.ORETURN, nil, nil)) + fn.PtrBody().Append(ir.NewReturnStmt(base.Pos, nil)) } // TODO(khr): the epilogue size detection condition above isn't perfect. // We should really do a generic CL that shares epilogues across @@ -793,9 +793,9 @@ func anyCall(fn *ir.Func) bool { // eqfield returns the node // p.field == q.field func eqfield(p ir.Node, q ir.Node, field *types.Sym) ir.Node { - nx := nodSym(ir.OXDOT, p, field) - ny := nodSym(ir.OXDOT, q, field) - ne := ir.Nod(ir.OEQ, nx, ny) + nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, p, field) + ny := ir.NewSelectorExpr(base.Pos, ir.OXDOT, q, field) + ne := ir.NewBinaryExpr(base.Pos, ir.OEQ, nx, ny) return ne } @@ -808,10 +808,10 @@ func eqfield(p ir.Node, q ir.Node, field *types.Sym) ir.Node { func eqstring(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) { s = conv(s, types.Types[types.TSTRING]) t = conv(t, types.Types[types.TSTRING]) - sptr := ir.Nod(ir.OSPTR, s, nil) - tptr := ir.Nod(ir.OSPTR, t, nil) - slen := conv(ir.Nod(ir.OLEN, s, nil), types.Types[types.TUINTPTR]) - tlen := conv(ir.Nod(ir.OLEN, t, nil), types.Types[types.TUINTPTR]) + sptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, s) + tptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, t) + slen := conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, s), types.Types[types.TUINTPTR]) + tlen := conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, t), types.Types[types.TUINTPTR]) fn := syslook("memequal") fn = substArgTypes(fn, types.Types[types.TUINT8], types.Types[types.TUINT8]) @@ -843,10 +843,10 @@ func eqinterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) { fn = syslook("ifaceeq") } - stab := ir.Nod(ir.OITAB, s, nil) - ttab := ir.Nod(ir.OITAB, t, nil) - sdata := ir.Nod(ir.OIDATA, s, nil) - tdata := ir.Nod(ir.OIDATA, t, nil) + stab := ir.NewUnaryExpr(base.Pos, ir.OITAB, s) + ttab := ir.NewUnaryExpr(base.Pos, ir.OITAB, t) + sdata := ir.NewUnaryExpr(base.Pos, ir.OIDATA, s) + tdata := ir.NewUnaryExpr(base.Pos, ir.OIDATA, t) sdata.SetType(types.Types[types.TUNSAFEPTR]) tdata.SetType(types.Types[types.TUNSAFEPTR]) sdata.SetTypecheck(1) @@ -864,11 +864,11 @@ func eqinterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) { // eqmem returns the node // memequal(&p.field, &q.field [, size]) func eqmem(p ir.Node, q ir.Node, field *types.Sym, size int64) ir.Node { - nx := typecheck(nodAddr(nodSym(ir.OXDOT, p, field)), ctxExpr) - ny := typecheck(nodAddr(nodSym(ir.OXDOT, q, field)), ctxExpr) + nx := typecheck(nodAddr(ir.NewSelectorExpr(base.Pos, ir.OXDOT, p, field)), ctxExpr) + ny := typecheck(nodAddr(ir.NewSelectorExpr(base.Pos, ir.OXDOT, q, field)), ctxExpr) fn, needsize := eqmemfunc(size, nx.Type().Elem()) - call := ir.Nod(ir.OCALL, fn, nil) + call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil) call.PtrList().Append(nx) call.PtrList().Append(ny) if needsize { diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index 1f4bf969adad6..f47b2e2b075cc 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -296,7 +296,7 @@ func transformclosure(fn *ir.Func) { // If it is a small variable captured by value, downgrade it to PAUTO. v.SetClass(ir.PAUTO) fn.Dcl = append(fn.Dcl, v) - body = append(body, ir.Nod(ir.OAS, v, cr)) + body = append(body, ir.NewAssignStmt(base.Pos, v, cr)) } else { // Declare variable holding addresses taken from closure // and initialize in entry prologue. @@ -311,7 +311,7 @@ func transformclosure(fn *ir.Func) { if v.Byval() { src = nodAddr(cr) } - body = append(body, ir.Nod(ir.OAS, addr, src)) + body = append(body, ir.NewAssignStmt(base.Pos, addr, src)) } } @@ -392,9 +392,9 @@ func walkclosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node { typ := closureType(clo) - clos := ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(typ)) + clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ).(ir.Ntype), nil) clos.SetEsc(clo.Esc()) - clos.PtrList().Set(append([]ir.Node{ir.Nod(ir.OCFUNC, fn.Nname, nil)}, fn.ClosureEnter.Slice()...)) + clos.PtrList().Set(append([]ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, fn.Nname)}, fn.ClosureEnter.Slice()...)) addr := nodAddr(clos) addr.SetEsc(clo.Esc()) @@ -473,17 +473,17 @@ func makepartialcall(dot *ir.SelectorExpr, t0 *types.Type, meth *types.Sym) *ir. var body []ir.Node if rcvrtype.IsPtr() || rcvrtype.IsInterface() { ptr.SetType(rcvrtype) - body = append(body, ir.Nod(ir.OAS, ptr, cr)) + body = append(body, ir.NewAssignStmt(base.Pos, ptr, cr)) } else { ptr.SetType(types.NewPtr(rcvrtype)) - body = append(body, ir.Nod(ir.OAS, ptr, nodAddr(cr))) + body = append(body, ir.NewAssignStmt(base.Pos, ptr, nodAddr(cr))) } - call := ir.Nod(ir.OCALL, nodSym(ir.OXDOT, ptr, meth), nil) + call := ir.NewCallExpr(base.Pos, ir.OCALL, ir.NewSelectorExpr(base.Pos, ir.OXDOT, ptr, meth), nil) call.PtrList().Set(paramNnames(tfn.Type())) call.SetIsDDD(tfn.Type().IsVariadic()) if t0.NumResults() != 0 { - ret := ir.Nod(ir.ORETURN, nil, nil) + ret := ir.NewReturnStmt(base.Pos, nil) ret.PtrList().Set1(call) body = append(body, ret) } else { @@ -532,18 +532,18 @@ func walkpartialcall(n *ir.CallPartExpr, init *ir.Nodes) ir.Node { n.SetLeft(cheapexpr(n.Left(), init)) n.SetLeft(walkexpr(n.Left(), nil)) - tab := typecheck(ir.Nod(ir.OITAB, n.Left(), nil), ctxExpr) + tab := typecheck(ir.NewUnaryExpr(base.Pos, ir.OITAB, n.Left()), ctxExpr) - c := ir.Nod(ir.OCHECKNIL, tab, nil) + c := ir.NewUnaryExpr(base.Pos, ir.OCHECKNIL, tab) c.SetTypecheck(1) init.Append(c) } typ := partialCallType(n) - clos := ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(typ)) + clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ).(ir.Ntype), nil) clos.SetEsc(n.Esc()) - clos.PtrList().Set2(ir.Nod(ir.OCFUNC, n.Func().Nname, nil), n.Left()) + clos.PtrList().Set2(ir.NewUnaryExpr(base.Pos, ir.OCFUNC, n.Func().Nname), n.Left()) addr := nodAddr(clos) addr.SetEsc(n.Esc()) diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 558bdbef9241d..3cfb24f2fc4b8 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -136,7 +136,7 @@ func variter(vl []*ir.Name, t ir.Ntype, el []ir.Node) []ir.Node { if len(el) == 1 && len(vl) > 1 { e := el[0] - as2 := ir.Nod(ir.OAS2, nil, nil) + as2 := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) as2.PtrRlist().Set1(e) for _, v := range vl { as2.PtrList().Append(v) @@ -144,7 +144,7 @@ func variter(vl []*ir.Name, t ir.Ntype, el []ir.Node) []ir.Node { v.Ntype = t v.Defn = as2 if Curfn != nil { - init = append(init, ir.Nod(ir.ODCL, v, nil)) + init = append(init, ir.NewDecl(base.Pos, ir.ODCL, v)) } } @@ -166,9 +166,9 @@ func variter(vl []*ir.Name, t ir.Ntype, el []ir.Node) []ir.Node { if e != nil || Curfn != nil || ir.IsBlank(v) { if Curfn != nil { - init = append(init, ir.Nod(ir.ODCL, v, nil)) + init = append(init, ir.NewDecl(base.Pos, ir.ODCL, v)) } - as := ir.Nod(ir.OAS, v, e) + as := ir.NewAssignStmt(base.Pos, v, e) init = append(init, as) if e != nil { v.Defn = as @@ -312,7 +312,7 @@ func colasdefn(left []ir.Node, defn ir.Node) { n := NewName(n.Sym()) declare(n, dclcontext) n.Defn = defn - defn.PtrInit().Append(ir.Nod(ir.ODCL, n, nil)) + defn.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, n)) left[i] = n } diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go index aa498a009705a..b0ad01bc5d262 100644 --- a/src/cmd/compile/internal/gc/gsubr.go +++ b/src/cmd/compile/internal/gc/gsubr.go @@ -267,14 +267,14 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { // OTAILCALL or something to this effect. var tail ir.Node if tfn.Type().NumResults() == 0 && tfn.Type().NumParams() == 0 && tfn.Type().NumRecvs() == 0 { - tail = nodSym(ir.ORETJMP, nil, f.Nname.Sym()) + tail = ir.NewBranchStmt(base.Pos, ir.ORETJMP, f.Nname.Sym()) } else { - call := ir.Nod(ir.OCALL, f.Nname, nil) + call := ir.NewCallExpr(base.Pos, ir.OCALL, f.Nname, nil) call.PtrList().Set(paramNnames(tfn.Type())) call.SetIsDDD(tfn.Type().IsVariadic()) tail = call if tfn.Type().NumResults() > 0 { - n := ir.Nod(ir.ORETURN, nil, nil) + n := ir.NewReturnStmt(base.Pos, nil) n.PtrList().Set1(call) tail = n } diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 5f72cedb669d7..1148d329a3cb2 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -770,7 +770,7 @@ func (r *importReader) caseList(sw ir.Node) []ir.Node { cases := make([]ir.Node, r.uint64()) for i := range cases { - cas := ir.NodAt(r.pos(), ir.OCASE, nil, nil) + cas := ir.NewCaseStmt(r.pos(), nil, nil) cas.PtrList().Set(r.stmtList()) if namedTypeSwitch { // Note: per-case variables will have distinct, dotted @@ -864,7 +864,7 @@ func (r *importReader) node() ir.Node { // TODO(mdempsky): Export position information for OSTRUCTKEY nodes. savedlineno := base.Pos base.Pos = r.pos() - n := ir.NodAt(base.Pos, ir.OCOMPLIT, nil, ir.TypeNode(r.typ())) + n := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(r.typ()).(ir.Ntype), nil) n.PtrList().Set(r.elemList()) // special handling of field names base.Pos = savedlineno return n @@ -873,14 +873,14 @@ func (r *importReader) node() ir.Node { // unreachable - mapped to case OCOMPLIT below by exporter case ir.OCOMPLIT: - n := ir.NodAt(r.pos(), ir.OCOMPLIT, nil, ir.TypeNode(r.typ())) + n := ir.NewCompLitExpr(r.pos(), ir.OCOMPLIT, ir.TypeNode(r.typ()).(ir.Ntype), nil) n.PtrList().Set(r.exprList()) return n case ir.OKEY: pos := r.pos() left, right := r.exprsOrNil() - return ir.NodAt(pos, ir.OKEY, left, right) + return ir.NewKeyExpr(pos, left, right) // case OSTRUCTKEY: // unreachable - handled in case OSTRUCTLIT by elemList @@ -893,13 +893,13 @@ func (r *importReader) node() ir.Node { case ir.OXDOT: // see parser.new_dotname - return npos(r.pos(), nodSym(ir.OXDOT, r.expr(), r.ident())) + return ir.NewSelectorExpr(r.pos(), ir.OXDOT, r.expr(), r.ident()) // case ODOTTYPE, ODOTTYPE2: // unreachable - mapped to case ODOTTYPE below by exporter case ir.ODOTTYPE: - n := ir.NodAt(r.pos(), ir.ODOTTYPE, r.expr(), nil) + n := ir.NewTypeAssertExpr(r.pos(), r.expr(), nil) n.SetType(r.typ()) return n @@ -907,7 +907,7 @@ func (r *importReader) node() ir.Node { // unreachable - mapped to cases below by exporter case ir.OINDEX: - return ir.NodAt(r.pos(), ir.OINDEX, r.expr(), r.expr()) + return ir.NewIndexExpr(r.pos(), r.expr(), r.expr()) case ir.OSLICE, ir.OSLICE3: n := ir.NewSliceExpr(r.pos(), op, r.expr()) @@ -923,7 +923,7 @@ func (r *importReader) node() ir.Node { // unreachable - mapped to OCONV case below by exporter case ir.OCONV: - n := ir.NodAt(r.pos(), ir.OCONV, r.expr(), nil) + n := ir.NewConvExpr(r.pos(), ir.OCONV, nil, r.expr()) n.SetType(r.typ()) return n @@ -939,7 +939,7 @@ func (r *importReader) node() ir.Node { // unreachable - mapped to OCALL case below by exporter case ir.OCALL: - n := ir.NodAt(r.pos(), ir.OCALL, nil, nil) + n := ir.NewCallExpr(r.pos(), ir.OCALL, nil, nil) n.PtrInit().Set(r.stmtList()) n.SetLeft(r.expr()) n.PtrList().Set(r.exprList()) @@ -978,7 +978,7 @@ func (r *importReader) node() ir.Node { list := r.exprList() x := npos(pos, list[0]) for _, y := range list[1:] { - x = ir.NodAt(pos, ir.OADD, x, y) + x = ir.NewBinaryExpr(pos, ir.OADD, x, y) } return x @@ -992,18 +992,18 @@ func (r *importReader) node() ir.Node { declare(lhs, ir.PAUTO) var stmts ir.Nodes - stmts.Append(ir.Nod(ir.ODCL, lhs, nil)) - stmts.Append(ir.Nod(ir.OAS, lhs, nil)) - return npos(pos, liststmt(stmts.Slice())) + stmts.Append(ir.NewDecl(base.Pos, ir.ODCL, lhs)) + stmts.Append(ir.NewAssignStmt(base.Pos, lhs, nil)) + return ir.NewBlockStmt(pos, stmts.Slice()) // case OAS, OASWB: // unreachable - mapped to OAS case below by exporter case ir.OAS: - return ir.NodAt(r.pos(), ir.OAS, r.expr(), r.expr()) + return ir.NewAssignStmt(r.pos(), r.expr(), r.expr()) case ir.OASOP: - n := ir.NodAt(r.pos(), ir.OASOP, nil, nil) + n := ir.NewAssignOpStmt(r.pos(), ir.OXXX, nil, nil) n.SetSubOp(r.op()) n.SetLeft(r.expr()) if !r.bool() { @@ -1018,13 +1018,13 @@ func (r *importReader) node() ir.Node { // unreachable - mapped to OAS2 case below by exporter case ir.OAS2: - n := ir.NodAt(r.pos(), ir.OAS2, nil, nil) + n := ir.NewAssignListStmt(r.pos(), ir.OAS2, nil, nil) n.PtrList().Set(r.exprList()) n.PtrRlist().Set(r.exprList()) return n case ir.ORETURN: - n := ir.NodAt(r.pos(), ir.ORETURN, nil, nil) + n := ir.NewReturnStmt(r.pos(), nil) n.PtrList().Set(r.exprList()) return n @@ -1035,7 +1035,7 @@ func (r *importReader) node() ir.Node { return ir.NewGoDeferStmt(r.pos(), op, r.expr()) case ir.OIF: - n := ir.NodAt(r.pos(), ir.OIF, nil, nil) + n := ir.NewIfStmt(r.pos(), nil, nil, nil) n.PtrInit().Set(r.stmtList()) n.SetLeft(r.expr()) n.PtrBody().Set(r.stmtList()) @@ -1043,7 +1043,7 @@ func (r *importReader) node() ir.Node { return n case ir.OFOR: - n := ir.NodAt(r.pos(), ir.OFOR, nil, nil) + n := ir.NewForStmt(r.pos(), nil, nil, nil, nil) n.PtrInit().Set(r.stmtList()) left, right := r.exprsOrNil() n.SetLeft(left) @@ -1052,21 +1052,21 @@ func (r *importReader) node() ir.Node { return n case ir.ORANGE: - n := ir.NodAt(r.pos(), ir.ORANGE, nil, nil) + n := ir.NewRangeStmt(r.pos(), nil, nil, nil) n.PtrList().Set(r.stmtList()) n.SetRight(r.expr()) n.PtrBody().Set(r.stmtList()) return n case ir.OSELECT: - n := ir.NodAt(r.pos(), ir.OSELECT, nil, nil) + n := ir.NewSelectStmt(r.pos(), nil) n.PtrInit().Set(r.stmtList()) r.exprsOrNil() // TODO(rsc): Delete (and fix exporter). These are always nil. n.PtrList().Set(r.caseList(n)) return n case ir.OSWITCH: - n := ir.NodAt(r.pos(), ir.OSWITCH, nil, nil) + n := ir.NewSwitchStmt(r.pos(), nil, nil) n.PtrInit().Set(r.stmtList()) left, _ := r.exprsOrNil() n.SetLeft(left) @@ -1077,7 +1077,7 @@ func (r *importReader) node() ir.Node { // handled by caseList case ir.OFALL: - n := ir.NodAt(r.pos(), ir.OFALL, nil, nil) + n := ir.NewBranchStmt(r.pos(), ir.OFALL, nil) return n // case OEMPTY: @@ -1113,7 +1113,7 @@ func (r *importReader) elemList() []ir.Node { list := make([]ir.Node, c) for i := range list { s := r.ident() - list[i] = nodSym(ir.OSTRUCTKEY, r.expr(), s) + list[i] = ir.NewStructKeyExpr(base.Pos, s, r.expr()) } return list } diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index fc020000c774a..122c19f6df18e 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -520,7 +520,7 @@ func inlcalls(fn *ir.Func) { // Turn an OINLCALL into a statement. func inlconv2stmt(inlcall *ir.InlinedCallExpr) ir.Node { - n := ir.NodAt(inlcall.Pos(), ir.OBLOCK, nil, nil) + n := ir.NewBlockStmt(inlcall.Pos(), nil) n.SetList(inlcall.Init()) n.PtrList().AppendNodes(inlcall.PtrBody()) return n @@ -785,7 +785,7 @@ func inlParam(t *types.Field, as ir.Node, inlvars map[*ir.Name]ir.Node) ir.Node if inlvar == nil { base.Fatalf("missing inlvar for %v", n) } - as.PtrInit().Append(ir.Nod(ir.ODCL, inlvar, nil)) + as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, inlvar)) inlvar.Name().Defn = as return inlvar } @@ -907,20 +907,20 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b if v.Byval() { iv := typecheck(inlvar(v), ctxExpr) - ninit.Append(ir.Nod(ir.ODCL, iv, nil)) - ninit.Append(typecheck(ir.Nod(ir.OAS, iv, o), ctxStmt)) + ninit.Append(ir.NewDecl(base.Pos, ir.ODCL, iv)) + ninit.Append(typecheck(ir.NewAssignStmt(base.Pos, iv, o), ctxStmt)) inlvars[v] = iv } else { addr := NewName(lookup("&" + v.Sym().Name)) addr.SetType(types.NewPtr(v.Type())) ia := typecheck(inlvar(addr), ctxExpr) - ninit.Append(ir.Nod(ir.ODCL, ia, nil)) - ninit.Append(typecheck(ir.Nod(ir.OAS, ia, nodAddr(o)), ctxStmt)) + ninit.Append(ir.NewDecl(base.Pos, ir.ODCL, ia)) + ninit.Append(typecheck(ir.NewAssignStmt(base.Pos, ia, nodAddr(o)), ctxStmt)) inlvars[addr] = ia // When capturing by reference, all occurrence of the captured var // must be substituted with dereference of the temporary address - inlvars[v] = typecheck(ir.Nod(ir.ODEREF, ia, nil), ctxExpr) + inlvars[v] = typecheck(ir.NewStarExpr(base.Pos, ia), ctxExpr) } } } @@ -994,7 +994,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b } // Assign arguments to the parameters' temp names. - as := ir.Nod(ir.OAS2, nil, nil) + as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) as.SetColas(true) if n.Op() == ir.OCALLMETH { sel := n.Left().(*ir.SelectorExpr) @@ -1036,7 +1036,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b vas.SetRight(nodnil()) vas.Right().SetType(param.Type) } else { - lit := ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(param.Type)) + lit := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(param.Type).(ir.Ntype), nil) lit.PtrList().Set(varargs) vas.SetRight(lit) } @@ -1053,8 +1053,8 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b if !delayretvars { // Zero the return parameters. for _, n := range retvars { - ninit.Append(ir.Nod(ir.ODCL, n, nil)) - ras := ir.Nod(ir.OAS, n, nil) + ninit.Append(ir.NewDecl(base.Pos, ir.ODCL, n)) + ras := ir.NewAssignStmt(base.Pos, n, nil) ninit.Append(typecheck(ras, ctxStmt)) } } @@ -1076,7 +1076,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b // to put a breakpoint. Not sure if that's really necessary or not // (in which case it could go at the end of the function instead). // Note issue 28603. - inlMark := ir.Nod(ir.OINLMARK, nil, nil) + inlMark := ir.NewInlineMarkStmt(base.Pos, types.BADWIDTH) inlMark.SetPos(n.Pos().WithIsStmt()) inlMark.SetOffset(int64(newIndex)) ninit.Append(inlMark) @@ -1100,7 +1100,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b body := subst.list(ir.AsNodes(fn.Inl.Body)) - lab := nodSym(ir.OLABEL, nil, retlabel) + lab := ir.NewLabelStmt(base.Pos, retlabel) body = append(body, lab) typecheckslice(body, ctxStmt) @@ -1113,7 +1113,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b //dumplist("ninit post", ninit); - call := ir.Nod(ir.OINLCALL, nil, nil) + call := ir.NewInlinedCallExpr(base.Pos, nil, nil) call.PtrInit().Set(ninit.Slice()) call.PtrBody().Set(body) call.PtrRlist().Set(retvars) @@ -1261,7 +1261,7 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { // this return is guaranteed to belong to the current inlined function. init := subst.list(n.Init()) if len(subst.retvars) != 0 && n.List().Len() != 0 { - as := ir.Nod(ir.OAS2, nil, nil) + as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) // Make a shallow copy of retvars. // Otherwise OINLCALL.Rlist will be the same list, @@ -1273,14 +1273,14 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { if subst.delayretvars { for _, n := range as.List().Slice() { - as.PtrInit().Append(ir.Nod(ir.ODCL, n, nil)) + as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, n)) n.Name().Defn = as } } init = append(init, typecheck(as, ctxStmt)) } - init = append(init, nodSym(ir.OGOTO, nil, subst.retlabel)) + init = append(init, ir.NewBranchStmt(base.Pos, ir.OGOTO, subst.retlabel)) typecheckslice(init, ctxStmt) return ir.NewBlockStmt(base.Pos, init) @@ -1360,9 +1360,9 @@ func devirtualizeCall(call *ir.CallExpr) { return } - dt := ir.NodAt(sel.Pos(), ir.ODOTTYPE, sel.Left(), nil) + dt := ir.NewTypeAssertExpr(sel.Pos(), sel.Left(), nil) dt.SetType(typ) - x := typecheck(nodlSym(sel.Pos(), ir.OXDOT, dt, sel.Sym()), ctxExpr|ctxCallee) + x := typecheck(ir.NewSelectorExpr(sel.Pos(), ir.OXDOT, dt, sel.Sym()), ctxExpr|ctxCallee) switch x.Op() { case ir.ODOTMETH: if base.Flag.LowerM != 0 { diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 97a9ac4396194..d2d908bf9f6f3 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -165,7 +165,7 @@ func (p *noder) funcBody(fn *ir.Func, block *syntax.BlockStmt) { if block != nil { body := p.stmts(block.List) if body == nil { - body = []ir.Node{ir.Nod(ir.OBLOCK, nil, nil)} + body = []ir.Node{ir.NewBlockStmt(base.Pos, nil)} } fn.PtrBody().Set(body) @@ -455,7 +455,7 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []ir.Node { n.Defn = v n.SetIota(cs.iota) - nn = append(nn, p.nod(decl, ir.ODCLCONST, n, nil)) + nn = append(nn, ir.NewDecl(p.pos(decl), ir.ODCLCONST, n)) } if len(values) > len(names) { @@ -484,7 +484,7 @@ func (p *noder) typeDecl(decl *syntax.TypeDecl) ir.Node { p.checkUnused(pragma) } - nod := p.nod(decl, ir.ODCLTYPE, n, nil) + nod := ir.NewDecl(p.pos(decl), ir.ODCLTYPE, n) if n.Alias() && !langSupported(1, 9, types.LocalPkg) { base.ErrorfAt(nod.Pos(), "type aliases only supported as of -lang=go1.9") } @@ -648,7 +648,7 @@ func (p *noder) expr(expr syntax.Expr) ir.Node { n.SetDiag(expr.Bad) // avoid follow-on errors if there was a syntax error return n case *syntax.CompositeLit: - n := p.nod(expr, ir.OCOMPLIT, nil, nil) + n := ir.NewCompLitExpr(p.pos(expr), ir.OCOMPLIT, nil, nil) if expr.Type != nil { n.SetRight(p.expr(expr.Type)) } @@ -661,11 +661,11 @@ func (p *noder) expr(expr syntax.Expr) ir.Node { return n case *syntax.KeyValueExpr: // use position of expr.Key rather than of expr (which has position of ':') - return p.nod(expr.Key, ir.OKEY, p.expr(expr.Key), p.wrapname(expr.Value, p.expr(expr.Value))) + return ir.NewKeyExpr(p.pos(expr.Key), p.expr(expr.Key), p.wrapname(expr.Value, p.expr(expr.Value))) case *syntax.FuncLit: return p.funcLit(expr) case *syntax.ParenExpr: - return p.nod(expr, ir.OPAREN, p.expr(expr.X), nil) + return ir.NewParenExpr(p.pos(expr), p.expr(expr.X)) case *syntax.SelectorExpr: // parser.new_dotname obj := p.expr(expr.X) @@ -674,11 +674,11 @@ func (p *noder) expr(expr syntax.Expr) ir.Node { pack.Used = true return importName(pack.Pkg.Lookup(expr.Sel.Value)) } - n := nodSym(ir.OXDOT, obj, p.name(expr.Sel)) + n := ir.NewSelectorExpr(base.Pos, ir.OXDOT, obj, p.name(expr.Sel)) n.SetPos(p.pos(expr)) // lineno may have been changed by p.expr(expr.X) return n case *syntax.IndexExpr: - return p.nod(expr, ir.OINDEX, p.expr(expr.X), p.expr(expr.Index)) + return ir.NewIndexExpr(p.pos(expr), p.expr(expr.X), p.expr(expr.Index)) case *syntax.SliceExpr: op := ir.OSLICE if expr.Full { @@ -694,7 +694,7 @@ func (p *noder) expr(expr syntax.Expr) ir.Node { n.SetSliceBounds(index[0], index[1], index[2]) return n case *syntax.AssertExpr: - return p.nod(expr, ir.ODOTTYPE, p.expr(expr.X), p.typeExpr(expr.Type)) + return ir.NewTypeAssertExpr(p.pos(expr), p.expr(expr.X), p.typeExpr(expr.Type).(ir.Ntype)) case *syntax.Operation: if expr.Op == syntax.Add && expr.Y != nil { return p.sum(expr) @@ -718,7 +718,7 @@ func (p *noder) expr(expr syntax.Expr) ir.Node { } return ir.NewBinaryExpr(pos, op, x, y) case *syntax.CallExpr: - n := p.nod(expr, ir.OCALL, p.expr(expr.Fun), nil) + n := ir.NewCallExpr(p.pos(expr), ir.OCALL, p.expr(expr.Fun), nil) n.PtrList().Set(p.exprs(expr.ArgList)) n.SetIsDDD(expr.HasDots) return n @@ -828,7 +828,7 @@ func (p *noder) sum(x syntax.Expr) ir.Node { nstr = nil chunks = chunks[:0] } - n = p.nod(add, ir.OADD, n, r) + n = ir.NewBinaryExpr(p.pos(add), ir.OADD, n, r) } if len(chunks) > 1 { nstr.SetVal(constant.MakeString(strings.Join(chunks, ""))) @@ -994,13 +994,13 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node { l := p.blockStmt(stmt) if len(l) == 0 { // TODO(mdempsky): Line number? - return ir.Nod(ir.OBLOCK, nil, nil) + return ir.NewBlockStmt(base.Pos, nil) } return liststmt(l) case *syntax.ExprStmt: return p.wrapname(stmt, p.expr(stmt.X)) case *syntax.SendStmt: - return p.nod(stmt, ir.OSEND, p.expr(stmt.Chan), p.expr(stmt.Value)) + return ir.NewSendStmt(p.pos(stmt), p.expr(stmt.Chan), p.expr(stmt.Value)) case *syntax.DeclStmt: return liststmt(p.decls(stmt.DeclList)) case *syntax.AssignStmt: @@ -1012,14 +1012,14 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node { rhs := p.exprList(stmt.Rhs) if list, ok := stmt.Lhs.(*syntax.ListExpr); ok && len(list.ElemList) != 1 || len(rhs) != 1 { - n := p.nod(stmt, ir.OAS2, nil, nil) + n := ir.NewAssignListStmt(p.pos(stmt), ir.OAS2, nil, nil) n.SetColas(stmt.Op == syntax.Def) n.PtrList().Set(p.assignList(stmt.Lhs, n, n.Colas())) n.PtrRlist().Set(rhs) return n } - n := p.nod(stmt, ir.OAS, nil, nil) + n := ir.NewAssignStmt(p.pos(stmt), nil, nil) n.SetColas(stmt.Op == syntax.Def) n.SetLeft(p.assignList(stmt.Lhs, n, n.Colas())[0]) n.SetRight(rhs[0]) @@ -1063,7 +1063,7 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node { if stmt.Results != nil { results = p.exprList(stmt.Results) } - n := p.nod(stmt, ir.ORETURN, nil, nil) + n := ir.NewReturnStmt(p.pos(stmt), nil) n.PtrList().Set(results) if n.List().Len() == 0 && Curfn != nil { for _, ln := range Curfn.Dcl { @@ -1139,7 +1139,7 @@ func (p *noder) assignList(expr syntax.Expr, defn ir.Node, colas bool) []ir.Node n := NewName(sym) declare(n, dclcontext) n.Defn = defn - defn.PtrInit().Append(ir.Nod(ir.ODCL, n, nil)) + defn.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, n)) res[i] = n } @@ -1158,7 +1158,7 @@ func (p *noder) blockStmt(stmt *syntax.BlockStmt) []ir.Node { func (p *noder) ifStmt(stmt *syntax.IfStmt) ir.Node { p.openScope(stmt.Pos()) - n := p.nod(stmt, ir.OIF, nil, nil) + n := ir.NewIfStmt(p.pos(stmt), nil, nil, nil) if stmt.Init != nil { n.PtrInit().Set1(p.stmt(stmt.Init)) } @@ -1185,7 +1185,7 @@ func (p *noder) forStmt(stmt *syntax.ForStmt) ir.Node { panic("unexpected RangeClause") } - n := p.nod(r, ir.ORANGE, nil, p.expr(r.X)) + n := ir.NewRangeStmt(p.pos(r), nil, p.expr(r.X), nil) if r.Lhs != nil { n.SetColas(r.Def) n.PtrList().Set(p.assignList(r.Lhs, n, n.Colas())) @@ -1195,7 +1195,7 @@ func (p *noder) forStmt(stmt *syntax.ForStmt) ir.Node { return n } - n := p.nod(stmt, ir.OFOR, nil, nil) + n := ir.NewForStmt(p.pos(stmt), nil, nil, nil, nil) if stmt.Init != nil { n.PtrInit().Set1(p.stmt(stmt.Init)) } @@ -1212,7 +1212,7 @@ func (p *noder) forStmt(stmt *syntax.ForStmt) ir.Node { func (p *noder) switchStmt(stmt *syntax.SwitchStmt) ir.Node { p.openScope(stmt.Pos()) - n := p.nod(stmt, ir.OSWITCH, nil, nil) + n := ir.NewSwitchStmt(p.pos(stmt), nil, nil) if stmt.Init != nil { n.PtrInit().Set1(p.stmt(stmt.Init)) } @@ -1239,7 +1239,7 @@ func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.TypeSwitch } p.openScope(clause.Pos()) - n := p.nod(clause, ir.OCASE, nil, nil) + n := ir.NewCaseStmt(p.pos(clause), nil, nil) if clause.Cases != nil { n.PtrList().Set(p.exprList(clause.Cases)) } @@ -1281,7 +1281,7 @@ func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.TypeSwitch } func (p *noder) selectStmt(stmt *syntax.SelectStmt) ir.Node { - n := p.nod(stmt, ir.OSELECT, nil, nil) + n := ir.NewSelectStmt(p.pos(stmt), nil) n.PtrList().Set(p.commClauses(stmt.Body, stmt.Rbrace)) return n } @@ -1295,7 +1295,7 @@ func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []i } p.openScope(clause.Pos()) - n := p.nod(clause, ir.OCASE, nil, nil) + n := ir.NewCaseStmt(p.pos(clause), nil, nil) if clause.Comm != nil { n.PtrList().Set1(p.stmt(clause.Comm)) } @@ -1310,7 +1310,7 @@ func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []i func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) ir.Node { sym := p.name(label.Label) - lhs := p.nodSym(label, ir.OLABEL, nil, sym) + lhs := ir.NewLabelStmt(p.pos(label), sym) var ls ir.Node if label.Stmt != nil { // TODO(mdempsky): Should always be present. @@ -1478,23 +1478,13 @@ func (p *noder) wrapname(n syntax.Node, x ir.Node) ir.Node { } fallthrough case ir.ONAME, ir.ONONAME, ir.OPACK: - p := p.nod(n, ir.OPAREN, x, nil) + p := ir.NewParenExpr(p.pos(n), x) p.SetImplicit(true) return p } return x } -func (p *noder) nod(orig syntax.Node, op ir.Op, left, right ir.Node) ir.Node { - return ir.NodAt(p.pos(orig), op, left, right) -} - -func (p *noder) nodSym(orig syntax.Node, op ir.Op, left ir.Node, sym *types.Sym) ir.Node { - n := nodSym(op, left, sym) - n.SetPos(p.pos(orig)) - return n -} - func (p *noder) pos(n syntax.Node) src.XPos { // TODO(gri): orig.Pos() should always be known - fix package syntax xpos := base.Pos diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 1a0f0066d02e4..2e7838c2527a1 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -88,7 +88,7 @@ func (o *Order) newTemp(t *types.Type, clear bool) *ir.Name { v = temp(t) } if clear { - o.append(ir.Nod(ir.OAS, v, nil)) + o.append(ir.NewAssignStmt(base.Pos, v, nil)) } o.temp = append(o.temp, v) @@ -118,7 +118,7 @@ func (o *Order) copyExprClear(n ir.Node) *ir.Name { func (o *Order) copyExpr1(n ir.Node, clear bool) *ir.Name { t := n.Type() v := o.newTemp(t, clear) - o.append(ir.Nod(ir.OAS, v, n)) + o.append(ir.NewAssignStmt(base.Pos, v, n)) return v } @@ -327,7 +327,7 @@ func (o *Order) cleanTempNoPop(mark ordermarker) []ir.Node { var out []ir.Node for i := len(o.temp) - 1; i >= int(mark); i-- { n := o.temp[i] - out = append(out, typecheck(ir.Nod(ir.OVARKILL, n, nil), ctxStmt)) + out = append(out, typecheck(ir.NewUnaryExpr(base.Pos, ir.OVARKILL, n), ctxStmt)) } return out } @@ -503,7 +503,7 @@ func (o *Order) call(nn ir.Node) { x := o.copyExpr(arg.Left()) arg.SetLeft(x) x.Name().SetAddrtaken(true) // ensure SSA keeps the x variable - n.PtrBody().Append(typecheck(ir.Nod(ir.OVARLIVE, x, nil), ctxStmt)) + n.PtrBody().Append(typecheck(ir.NewUnaryExpr(base.Pos, ir.OVARLIVE, x), ctxStmt)) } } } @@ -569,7 +569,7 @@ func (o *Order) mapAssign(n ir.Node) { case instrumenting && n.Op() == ir.OAS2FUNC && !ir.IsBlank(m): t := o.newTemp(m.Type(), false) n.List().SetIndex(i, t) - a := ir.Nod(ir.OAS, m, t) + a := ir.NewAssignStmt(base.Pos, m, t) post = append(post, typecheck(a, ctxStmt)) } } @@ -636,7 +636,7 @@ func (o *Order) stmt(n ir.Node) { } l2 = o.copyExpr(l2) r := o.expr(typecheck(ir.NewBinaryExpr(n.Pos(), n.SubOp(), l2, n.Right()), ctxExpr), nil) - as := typecheck(ir.NodAt(n.Pos(), ir.OAS, l1, r), ctxStmt) + as := typecheck(ir.NewAssignStmt(n.Pos(), l1, r), ctxStmt) o.mapAssign(as) o.cleanTemp(t) return @@ -824,7 +824,7 @@ func (o *Order) stmt(n ir.Node) { r := n.Right() if r.Type().IsString() && r.Type() != types.Types[types.TSTRING] { - r = ir.Nod(ir.OCONV, r, nil) + r = ir.NewConvExpr(base.Pos, ir.OCONV, nil, r) r.SetType(types.Types[types.TSTRING]) r = typecheck(r, ctxExpr) } @@ -915,11 +915,11 @@ func (o *Order) stmt(n ir.Node) { if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].(*ir.Decl).Left() == n { init = init[1:] } - dcl := typecheck(ir.Nod(ir.ODCL, n, nil), ctxStmt) + dcl := typecheck(ir.NewDecl(base.Pos, ir.ODCL, n), ctxStmt) ncas.PtrInit().Append(dcl) } tmp := o.newTemp(t, t.HasPointers()) - as := typecheck(ir.Nod(ir.OAS, n, conv(tmp, n.Type())), ctxStmt) + as := typecheck(ir.NewAssignStmt(base.Pos, n, conv(tmp, n.Type())), ctxStmt) ncas.PtrInit().Append(as) r.PtrList().SetIndex(i, tmp) } @@ -993,7 +993,7 @@ func (o *Order) stmt(n ir.Node) { n := n.(*ir.SwitchStmt) if base.Debug.Libfuzzer != 0 && !hasDefaultCase(n) { // Add empty "default:" case for instrumentation. - n.PtrList().Append(ir.Nod(ir.OCASE, nil, nil)) + n.PtrList().Append(ir.NewCaseStmt(base.Pos, nil, nil)) } t := o.markTemp() @@ -1176,7 +1176,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { // Evaluate left-hand side. lhs := o.expr(n.Left(), nil) - o.out = append(o.out, typecheck(ir.Nod(ir.OAS, r, lhs), ctxStmt)) + o.out = append(o.out, typecheck(ir.NewAssignStmt(base.Pos, r, lhs), ctxStmt)) // Evaluate right-hand side, save generated code. saveout := o.out @@ -1184,13 +1184,13 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { t := o.markTemp() o.edge() rhs := o.expr(n.Right(), nil) - o.out = append(o.out, typecheck(ir.Nod(ir.OAS, r, rhs), ctxStmt)) + o.out = append(o.out, typecheck(ir.NewAssignStmt(base.Pos, r, rhs), ctxStmt)) o.cleanTemp(t) gen := o.out o.out = saveout // If left-hand side doesn't cause a short-circuit, issue right-hand side. - nif := ir.Nod(ir.OIF, r, nil) + nif := ir.NewIfStmt(base.Pos, r, nil, nil) if n.Op() == ir.OANDAND { nif.PtrBody().Set(gen) } else { @@ -1367,13 +1367,13 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { // Emit the creation of the map (with all its static entries). m := o.newTemp(n.Type(), false) - as := ir.Nod(ir.OAS, m, n) + as := ir.NewAssignStmt(base.Pos, m, n) typecheck(as, ctxStmt) o.stmt(as) // Emit eval+insert of dynamic entries, one at a time. for _, r := range dynamics { - as := ir.Nod(ir.OAS, ir.Nod(ir.OINDEX, m, r.Left()), r.Right()) + as := ir.NewAssignStmt(base.Pos, ir.NewIndexExpr(base.Pos, m, r.Left()), r.Right()) typecheck(as, ctxStmt) // Note: this converts the OINDEX to an OINDEXMAP o.stmt(as) } @@ -1405,7 +1405,7 @@ func (o *Order) as2(n *ir.AssignListStmt) { o.out = append(o.out, n) - as := ir.Nod(ir.OAS2, nil, nil) + as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) as.PtrList().Set(left) as.PtrRlist().Set(tmplist) o.stmt(typecheck(as, ctxStmt)) @@ -1427,12 +1427,12 @@ func (o *Order) okAs2(n *ir.AssignListStmt) { o.out = append(o.out, n) if tmp1 != nil { - r := ir.Nod(ir.OAS, n.List().First(), tmp1) + r := ir.NewAssignStmt(base.Pos, n.List().First(), tmp1) o.mapAssign(typecheck(r, ctxStmt)) n.List().SetFirst(tmp1) } if tmp2 != nil { - r := ir.Nod(ir.OAS, n.List().Second(), conv(tmp2, n.List().Second().Type())) + r := ir.NewAssignStmt(base.Pos, n.List().Second(), conv(tmp2, n.List().Second().Type())) o.mapAssign(typecheck(r, ctxStmt)) n.List().SetSecond(tmp2) } diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go index aa4f0358c99fb..4a753328f2470 100644 --- a/src/cmd/compile/internal/gc/range.go +++ b/src/cmd/compile/internal/gc/range.go @@ -166,7 +166,7 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { return n } - nfor := ir.NodAt(nrange.Pos(), ir.OFOR, nil, nil) + nfor := ir.NewForStmt(nrange.Pos(), nil, nil, nil, nil) nfor.SetInit(nrange.Init()) nfor.SetSym(nrange.Sym()) @@ -224,11 +224,11 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { hv1 := temp(types.Types[types.TINT]) hn := temp(types.Types[types.TINT]) - init = append(init, ir.Nod(ir.OAS, hv1, nil)) - init = append(init, ir.Nod(ir.OAS, hn, ir.Nod(ir.OLEN, ha, nil))) + init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil)) + init = append(init, ir.NewAssignStmt(base.Pos, hn, ir.NewUnaryExpr(base.Pos, ir.OLEN, ha))) - nfor.SetLeft(ir.Nod(ir.OLT, hv1, hn)) - nfor.SetRight(ir.Nod(ir.OAS, hv1, ir.Nod(ir.OADD, hv1, nodintconst(1)))) + nfor.SetLeft(ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn)) + nfor.SetRight(ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, nodintconst(1)))) // for range ha { body } if v1 == nil { @@ -237,18 +237,18 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { // for v1 := range ha { body } if v2 == nil { - body = []ir.Node{ir.Nod(ir.OAS, v1, hv1)} + body = []ir.Node{ir.NewAssignStmt(base.Pos, v1, hv1)} break } // for v1, v2 := range ha { body } if cheapComputableIndex(nrange.Type().Elem().Width) { // v1, v2 = hv1, ha[hv1] - tmp := ir.Nod(ir.OINDEX, ha, hv1) + tmp := ir.NewIndexExpr(base.Pos, ha, hv1) tmp.SetBounded(true) // Use OAS2 to correctly handle assignments // of the form "v1, a[v1] := range". - a := ir.Nod(ir.OAS2, nil, nil) + a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) a.PtrList().Set2(v1, v2) a.PtrRlist().Set2(hv1, tmp) body = []ir.Node{a} @@ -268,19 +268,19 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { // elimination on the index variable (see #20711). // Enhance the prove pass to understand this. ifGuard = ir.NewIfStmt(base.Pos, nil, nil, nil) - ifGuard.SetLeft(ir.Nod(ir.OLT, hv1, hn)) + ifGuard.SetLeft(ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn)) nfor.SetOp(ir.OFORUNTIL) hp := temp(types.NewPtr(nrange.Type().Elem())) - tmp := ir.Nod(ir.OINDEX, ha, nodintconst(0)) + tmp := ir.NewIndexExpr(base.Pos, ha, nodintconst(0)) tmp.SetBounded(true) - init = append(init, ir.Nod(ir.OAS, hp, nodAddr(tmp))) + init = append(init, ir.NewAssignStmt(base.Pos, hp, nodAddr(tmp))) // Use OAS2 to correctly handle assignments // of the form "v1, a[v1] := range". - a := ir.Nod(ir.OAS2, nil, nil) + a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) a.PtrList().Set2(v1, v2) - a.PtrRlist().Set2(hv1, ir.Nod(ir.ODEREF, hp, nil)) + a.PtrRlist().Set2(hv1, ir.NewStarExpr(base.Pos, hp)) body = append(body, a) // Advance pointer as part of the late increment. @@ -288,7 +288,7 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { // This runs *after* the condition check, so we know // advancing the pointer is safe and won't go past the // end of the allocation. - as := ir.Nod(ir.OAS, hp, addptr(hp, t.Elem().Width)) + as := ir.NewAssignStmt(base.Pos, hp, addptr(hp, t.Elem().Width)) nfor.PtrList().Set1(typecheck(as, ctxStmt)) case types.TMAP: @@ -305,20 +305,20 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { fn = substArgTypes(fn, t.Key(), t.Elem(), th) init = append(init, mkcall1(fn, nil, nil, typename(t), ha, nodAddr(hit))) - nfor.SetLeft(ir.Nod(ir.ONE, nodSym(ir.ODOT, hit, keysym), nodnil())) + nfor.SetLeft(ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym), nodnil())) fn = syslook("mapiternext") fn = substArgTypes(fn, th) nfor.SetRight(mkcall1(fn, nil, nil, nodAddr(hit))) - key := ir.Nod(ir.ODEREF, nodSym(ir.ODOT, hit, keysym), nil) + key := ir.NewStarExpr(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym)) if v1 == nil { body = nil } else if v2 == nil { - body = []ir.Node{ir.Nod(ir.OAS, v1, key)} + body = []ir.Node{ir.NewAssignStmt(base.Pos, v1, key)} } else { - elem := ir.Nod(ir.ODEREF, nodSym(ir.ODOT, hit, elemsym), nil) - a := ir.Nod(ir.OAS2, nil, nil) + elem := ir.NewStarExpr(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, elemsym)) + a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) a.PtrList().Set2(v1, v2) a.PtrRlist().Set2(key, elem) body = []ir.Node{a} @@ -331,25 +331,25 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { hv1 := temp(t.Elem()) hv1.SetTypecheck(1) if t.Elem().HasPointers() { - init = append(init, ir.Nod(ir.OAS, hv1, nil)) + init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil)) } hb := temp(types.Types[types.TBOOL]) - nfor.SetLeft(ir.Nod(ir.ONE, hb, nodbool(false))) - a := ir.Nod(ir.OAS2RECV, nil, nil) + nfor.SetLeft(ir.NewBinaryExpr(base.Pos, ir.ONE, hb, nodbool(false))) + a := ir.NewAssignListStmt(base.Pos, ir.OAS2RECV, nil, nil) a.SetTypecheck(1) a.PtrList().Set2(hv1, hb) - a.PtrRlist().Set1(ir.Nod(ir.ORECV, ha, nil)) + a.PtrRlist().Set1(ir.NewUnaryExpr(base.Pos, ir.ORECV, ha)) nfor.Left().PtrInit().Set1(a) if v1 == nil { body = nil } else { - body = []ir.Node{ir.Nod(ir.OAS, v1, hv1)} + body = []ir.Node{ir.NewAssignStmt(base.Pos, v1, hv1)} } // Zero hv1. This prevents hv1 from being the sole, inaccessible // reference to an otherwise GC-able value during the next channel receive. // See issue 15281. - body = append(body, ir.Nod(ir.OAS, hv1, nil)) + body = append(body, ir.NewAssignStmt(base.Pos, hv1, nil)) case types.TSTRING: // Transform string range statements like "for v1, v2 = range a" into @@ -375,30 +375,30 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { hv2 := temp(types.RuneType) // hv1 := 0 - init = append(init, ir.Nod(ir.OAS, hv1, nil)) + init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil)) // hv1 < len(ha) - nfor.SetLeft(ir.Nod(ir.OLT, hv1, ir.Nod(ir.OLEN, ha, nil))) + nfor.SetLeft(ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, ir.NewUnaryExpr(base.Pos, ir.OLEN, ha))) if v1 != nil { // hv1t = hv1 - body = append(body, ir.Nod(ir.OAS, hv1t, hv1)) + body = append(body, ir.NewAssignStmt(base.Pos, hv1t, hv1)) } // hv2 := rune(ha[hv1]) - nind := ir.Nod(ir.OINDEX, ha, hv1) + nind := ir.NewIndexExpr(base.Pos, ha, hv1) nind.SetBounded(true) - body = append(body, ir.Nod(ir.OAS, hv2, conv(nind, types.RuneType))) + body = append(body, ir.NewAssignStmt(base.Pos, hv2, conv(nind, types.RuneType))) // if hv2 < utf8.RuneSelf - nif := ir.Nod(ir.OIF, nil, nil) - nif.SetLeft(ir.Nod(ir.OLT, hv2, nodintconst(utf8.RuneSelf))) + nif := ir.NewIfStmt(base.Pos, nil, nil, nil) + nif.SetLeft(ir.NewBinaryExpr(base.Pos, ir.OLT, hv2, nodintconst(utf8.RuneSelf))) // hv1++ - nif.PtrBody().Set1(ir.Nod(ir.OAS, hv1, ir.Nod(ir.OADD, hv1, nodintconst(1)))) + nif.PtrBody().Set1(ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, nodintconst(1)))) // } else { - eif := ir.Nod(ir.OAS2, nil, nil) + eif := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) nif.PtrRlist().Set1(eif) // hv2, hv1 = decoderune(ha, hv1) @@ -411,13 +411,13 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { if v1 != nil { if v2 != nil { // v1, v2 = hv1t, hv2 - a := ir.Nod(ir.OAS2, nil, nil) + a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) a.PtrList().Set2(v1, v2) a.PtrRlist().Set2(hv1t, hv2) body = append(body, a) } else { // v1 = hv1t - body = append(body, ir.Nod(ir.OAS, v1, hv1t)) + body = append(body, ir.NewAssignStmt(base.Pos, v1, hv1t)) } } } @@ -561,22 +561,22 @@ func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node { // memclr{NoHeap,Has}Pointers(hp, hn) // i = len(a) - 1 // } - n := ir.Nod(ir.OIF, nil, nil) + n := ir.NewIfStmt(base.Pos, nil, nil, nil) n.PtrBody().Set(nil) - n.SetLeft(ir.Nod(ir.ONE, ir.Nod(ir.OLEN, a, nil), nodintconst(0))) + n.SetLeft(ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), nodintconst(0))) // hp = &a[0] hp := temp(types.Types[types.TUNSAFEPTR]) - ix := ir.Nod(ir.OINDEX, a, nodintconst(0)) + ix := ir.NewIndexExpr(base.Pos, a, nodintconst(0)) ix.SetBounded(true) addr := convnop(nodAddr(ix), types.Types[types.TUNSAFEPTR]) - n.PtrBody().Append(ir.Nod(ir.OAS, hp, addr)) + n.PtrBody().Append(ir.NewAssignStmt(base.Pos, hp, addr)) // hn = len(a) * sizeof(elem(a)) hn := temp(types.Types[types.TUINTPTR]) - mul := conv(ir.Nod(ir.OMUL, ir.Nod(ir.OLEN, a, nil), nodintconst(elemsize)), types.Types[types.TUINTPTR]) - n.PtrBody().Append(ir.Nod(ir.OAS, hn, mul)) + mul := conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), nodintconst(elemsize)), types.Types[types.TUINTPTR]) + n.PtrBody().Append(ir.NewAssignStmt(base.Pos, hn, mul)) var fn ir.Node if a.Type().Elem().HasPointers() { @@ -591,7 +591,7 @@ func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node { n.PtrBody().Append(fn) // i = len(a) - 1 - v1 = ir.Nod(ir.OAS, v1, ir.Nod(ir.OSUB, ir.Nod(ir.OLEN, a, nil), nodintconst(1))) + v1 = ir.NewAssignStmt(base.Pos, v1, ir.NewBinaryExpr(base.Pos, ir.OSUB, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), nodintconst(1))) n.PtrBody().Append(v1) @@ -605,12 +605,12 @@ func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node { func addptr(p ir.Node, n int64) ir.Node { t := p.Type() - p = ir.Nod(ir.OCONVNOP, p, nil) + p = ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, p) p.SetType(types.Types[types.TUINTPTR]) - p = ir.Nod(ir.OADD, p, nodintconst(n)) + p = ir.NewBinaryExpr(base.Pos, ir.OADD, p, nodintconst(n)) - p = ir.Nod(ir.OCONVNOP, p, nil) + p = ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, p) p.SetType(t) return p diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go index 974c4b254e7ff..be2f688eb9c07 100644 --- a/src/cmd/compile/internal/gc/select.go +++ b/src/cmd/compile/internal/gc/select.go @@ -33,7 +33,7 @@ func typecheckselect(sel *ir.SelectStmt) { ncase.SetLeft(n) ncase.PtrList().Set(nil) oselrecv2 := func(dst, recv ir.Node, colas bool) { - n := ir.NodAt(n.Pos(), ir.OSELRECV2, nil, nil) + n := ir.NewAssignListStmt(n.Pos(), ir.OSELRECV2, nil, nil) n.PtrList().Set2(dst, ir.BlankNode) n.PtrRlist().Set1(recv) n.SetColas(colas) @@ -145,7 +145,7 @@ func walkselectcases(cases ir.Nodes) []ir.Node { } l = append(l, cas.Body().Slice()...) - l = append(l, ir.Nod(ir.OBREAK, nil, nil)) + l = append(l, ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)) return l } @@ -182,7 +182,7 @@ func walkselectcases(cases ir.Nodes) []ir.Node { n := cas.Left() setlineno(n) - r := ir.Nod(ir.OIF, nil, nil) + r := ir.NewIfStmt(base.Pos, nil, nil, nil) r.PtrInit().Set(cas.Init().Slice()) var call ir.Node switch n.Op() { @@ -215,7 +215,7 @@ func walkselectcases(cases ir.Nodes) []ir.Node { r.SetLeft(typecheck(call, ctxExpr)) r.PtrBody().Set(cas.Body().Slice()) r.PtrRlist().Set(append(dflt.Init().Slice(), dflt.Body().Slice()...)) - return []ir.Node{r, ir.Nod(ir.OBREAK, nil, nil)} + return []ir.Node{r, ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)} } if dflt != nil { @@ -229,7 +229,7 @@ func walkselectcases(cases ir.Nodes) []ir.Node { // generate sel-struct base.Pos = sellineno selv := temp(types.NewArray(scasetype(), int64(ncas))) - init = append(init, typecheck(ir.Nod(ir.OAS, selv, nil), ctxStmt)) + init = append(init, typecheck(ir.NewAssignStmt(base.Pos, selv, nil), ctxStmt)) // No initialization for order; runtime.selectgo is responsible for that. order := temp(types.NewArray(types.Types[types.TUINT16], 2*int64(ncas))) @@ -237,7 +237,7 @@ func walkselectcases(cases ir.Nodes) []ir.Node { var pc0, pcs ir.Node if base.Flag.Race { pcs = temp(types.NewArray(types.Types[types.TUINTPTR], int64(ncas))) - pc0 = typecheck(nodAddr(ir.Nod(ir.OINDEX, pcs, nodintconst(0))), ctxExpr) + pc0 = typecheck(nodAddr(ir.NewIndexExpr(base.Pos, pcs, nodintconst(0))), ctxExpr) } else { pc0 = nodnil() } @@ -276,7 +276,7 @@ func walkselectcases(cases ir.Nodes) []ir.Node { casorder[i] = cas setField := func(f string, val ir.Node) { - r := ir.Nod(ir.OAS, nodSym(ir.ODOT, ir.Nod(ir.OINDEX, selv, nodintconst(int64(i))), lookup(f)), val) + r := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, ir.NewIndexExpr(base.Pos, selv, nodintconst(int64(i))), lookup(f)), val) init = append(init, typecheck(r, ctxStmt)) } @@ -290,7 +290,7 @@ func walkselectcases(cases ir.Nodes) []ir.Node { // TODO(mdempsky): There should be a cleaner way to // handle this. if base.Flag.Race { - r := mkcall("selectsetpc", nil, nil, nodAddr(ir.Nod(ir.OINDEX, pcs, nodintconst(int64(i))))) + r := mkcall("selectsetpc", nil, nil, nodAddr(ir.NewIndexExpr(base.Pos, pcs, nodintconst(int64(i))))) init = append(init, r) } } @@ -302,17 +302,17 @@ func walkselectcases(cases ir.Nodes) []ir.Node { base.Pos = sellineno chosen := temp(types.Types[types.TINT]) recvOK := temp(types.Types[types.TBOOL]) - r := ir.Nod(ir.OAS2, nil, nil) + r := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) r.PtrList().Set2(chosen, recvOK) fn := syslook("selectgo") r.PtrRlist().Set1(mkcall1(fn, fn.Type().Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, nodintconst(int64(nsends)), nodintconst(int64(nrecvs)), nodbool(dflt == nil))) init = append(init, typecheck(r, ctxStmt)) // selv and order are no longer alive after selectgo. - init = append(init, ir.Nod(ir.OVARKILL, selv, nil)) - init = append(init, ir.Nod(ir.OVARKILL, order, nil)) + init = append(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, selv)) + init = append(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, order)) if base.Flag.Race { - init = append(init, ir.Nod(ir.OVARKILL, pcs, nil)) + init = append(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, pcs)) } // dispatch cases @@ -320,27 +320,27 @@ func walkselectcases(cases ir.Nodes) []ir.Node { cond = typecheck(cond, ctxExpr) cond = defaultlit(cond, nil) - r := ir.Nod(ir.OIF, cond, nil) + r := ir.NewIfStmt(base.Pos, cond, nil, nil) if n := cas.Left(); n != nil && n.Op() == ir.OSELRECV2 { if !ir.IsBlank(n.List().Second()) { - x := ir.Nod(ir.OAS, n.List().Second(), recvOK) + x := ir.NewAssignStmt(base.Pos, n.List().Second(), recvOK) r.PtrBody().Append(typecheck(x, ctxStmt)) } } r.PtrBody().AppendNodes(cas.PtrBody()) - r.PtrBody().Append(ir.Nod(ir.OBREAK, nil, nil)) + r.PtrBody().Append(ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)) init = append(init, r) } if dflt != nil { setlineno(dflt) - dispatch(ir.Nod(ir.OLT, chosen, nodintconst(0)), dflt) + dispatch(ir.NewBinaryExpr(base.Pos, ir.OLT, chosen, nodintconst(0)), dflt) } for i, cas := range casorder { setlineno(cas) - dispatch(ir.Nod(ir.OEQ, chosen, nodintconst(int64(i))), cas) + dispatch(ir.NewBinaryExpr(base.Pos, ir.OEQ, chosen, nodintconst(int64(i))), cas) } return init @@ -348,7 +348,7 @@ func walkselectcases(cases ir.Nodes) []ir.Node { // bytePtrToIndex returns a Node representing "(*byte)(&n[i])". func bytePtrToIndex(n ir.Node, i int64) ir.Node { - s := nodAddr(ir.Nod(ir.OINDEX, n, nodintconst(i))) + s := nodAddr(ir.NewIndexExpr(base.Pos, n, nodintconst(i))) t := types.NewPtr(types.Types[types.TUINT8]) return convnop(s, t) } diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 79c7215d4d438..5a96d4c320f51 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -113,7 +113,7 @@ func (s *InitSchedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *type if loff != 0 || !types.Identical(typ, l.Type()) { dst = ir.NewNameOffsetExpr(base.Pos, l, loff, typ) } - s.append(ir.Nod(ir.OAS, dst, conv(r, typ))) + s.append(ir.NewAssignStmt(base.Pos, dst, conv(r, typ))) return true case ir.ONIL: @@ -168,7 +168,7 @@ func (s *InitSchedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *type ll := ir.NewNameOffsetExpr(base.Pos, l, loff+e.Xoffset, typ) rr := ir.NewNameOffsetExpr(base.Pos, orig, e.Xoffset, typ) setlineno(rr) - s.append(ir.Nod(ir.OAS, ll, rr)) + s.append(ir.NewAssignStmt(base.Pos, ll, rr)) } return true @@ -219,7 +219,7 @@ func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *type // Init underlying literal. if !s.staticassign(a, 0, r.Left(), a.Type()) { - s.append(ir.Nod(ir.OAS, a, r.Left())) + s.append(ir.NewAssignStmt(base.Pos, a, r.Left())) } return true } @@ -259,7 +259,7 @@ func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *type setlineno(e.Expr) if !s.staticassign(l, loff+e.Xoffset, e.Expr, e.Expr.Type()) { a := ir.NewNameOffsetExpr(base.Pos, l, loff+e.Xoffset, e.Expr.Type()) - s.append(ir.Nod(ir.OAS, a, e.Expr)) + s.append(ir.NewAssignStmt(base.Pos, a, e.Expr)) } } @@ -325,14 +325,14 @@ func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *type setlineno(val) if !s.staticassign(l, loff+int64(Widthptr), val, val.Type()) { a := ir.NewNameOffsetExpr(base.Pos, l, loff+int64(Widthptr), val.Type()) - s.append(ir.Nod(ir.OAS, a, val)) + s.append(ir.NewAssignStmt(base.Pos, a, val)) } } else { // Construct temp to hold val, write pointer to temp into n. a := staticname(val.Type()) s.inittemps[val] = a if !s.staticassign(a, 0, val, val.Type()) { - s.append(ir.Nod(ir.OAS, a, val)) + s.append(ir.NewAssignStmt(base.Pos, a, val)) } addrsym(l, loff+int64(Widthptr), a, 0) } @@ -405,7 +405,7 @@ func isSimpleName(nn ir.Node) bool { } func litas(l ir.Node, r ir.Node, init *ir.Nodes) { - appendWalkStmt(init, ir.Nod(ir.OAS, l, r)) + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, l, r)) } // initGenType is a bitmap indicating the types of generation that will occur for a static value. @@ -537,7 +537,7 @@ func fixedlit(ctxt initContext, kind initKind, n *ir.CompLitExpr, var_ ir.Node, } r = kv.Right() } - a := ir.Nod(ir.OINDEX, var_, nodintconst(k)) + a := ir.NewIndexExpr(base.Pos, var_, nodintconst(k)) k++ if isBlank { return ir.BlankNode, r @@ -551,7 +551,7 @@ func fixedlit(ctxt initContext, kind initKind, n *ir.CompLitExpr, var_ ir.Node, return ir.BlankNode, r.Left() } setlineno(r) - return nodSym(ir.ODOT, var_, r.Sym()), r.Left() + return ir.NewSelectorExpr(base.Pos, ir.ODOT, var_, r.Sym()), r.Left() } default: base.Fatalf("fixedlit bad op: %v", n.Op()) @@ -676,37 +676,37 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) } if vstat == nil { - a = ir.Nod(ir.OAS, x, nil) + a = ir.NewAssignStmt(base.Pos, x, nil) a = typecheck(a, ctxStmt) init.Append(a) // zero new temp } else { // Declare that we're about to initialize all of x. // (Which happens at the *vauto = vstat below.) - init.Append(ir.Nod(ir.OVARDEF, x, nil)) + init.Append(ir.NewUnaryExpr(base.Pos, ir.OVARDEF, x)) } a = nodAddr(x) } else if n.Esc() == EscNone { a = temp(t) if vstat == nil { - a = ir.Nod(ir.OAS, temp(t), nil) + a = ir.NewAssignStmt(base.Pos, temp(t), nil) a = typecheck(a, ctxStmt) init.Append(a) // zero new temp a = a.(*ir.AssignStmt).Left() } else { - init.Append(ir.Nod(ir.OVARDEF, a, nil)) + init.Append(ir.NewUnaryExpr(base.Pos, ir.OVARDEF, a)) } a = nodAddr(a) } else { - a = ir.Nod(ir.ONEW, ir.TypeNode(t), nil) + a = ir.NewUnaryExpr(base.Pos, ir.ONEW, ir.TypeNode(t)) } - appendWalkStmt(init, ir.Nod(ir.OAS, vauto, a)) + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, vauto, a)) if vstat != nil { // copy static to heap (4) - a = ir.Nod(ir.ODEREF, vauto, nil) - appendWalkStmt(init, ir.Nod(ir.OAS, a, vstat)) + a = ir.NewStarExpr(base.Pos, vauto) + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, a, vstat)) } // put dynamics into array (5) @@ -720,7 +720,7 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) } value = kv.Right() } - a := ir.Nod(ir.OINDEX, vauto, nodintconst(index)) + a := ir.NewIndexExpr(base.Pos, vauto, nodintconst(index)) a.SetBounded(true) index++ @@ -748,14 +748,14 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) // build list of vauto[c] = expr setlineno(value) - as := typecheck(ir.Nod(ir.OAS, a, value), ctxStmt) + as := typecheck(ir.NewAssignStmt(base.Pos, a, value), ctxStmt) as = orderStmtInPlace(as, map[string][]*ir.Name{}) as = walkstmt(as) init.Append(as) } // make slice out of heap (6) - a = ir.Nod(ir.OAS, var_, ir.Nod(ir.OSLICE, vauto, nil)) + a = ir.NewAssignStmt(base.Pos, var_, ir.NewSliceExpr(base.Pos, ir.OSLICE, vauto)) a = typecheck(a, ctxStmt) a = orderStmtInPlace(a, map[string][]*ir.Name{}) @@ -765,7 +765,7 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) { // make the map var - a := ir.Nod(ir.OMAKE, nil, nil) + a := ir.NewCallExpr(base.Pos, ir.OMAKE, nil, nil) a.SetEsc(n.Esc()) a.PtrList().Set2(ir.TypeNode(n.Type()), nodintconst(int64(n.List().Len()))) litas(m, a, init) @@ -813,19 +813,19 @@ func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) { // map[vstatk[i]] = vstate[i] // } i := temp(types.Types[types.TINT]) - rhs := ir.Nod(ir.OINDEX, vstate, i) + rhs := ir.NewIndexExpr(base.Pos, vstate, i) rhs.SetBounded(true) - kidx := ir.Nod(ir.OINDEX, vstatk, i) + kidx := ir.NewIndexExpr(base.Pos, vstatk, i) kidx.SetBounded(true) - lhs := ir.Nod(ir.OINDEX, m, kidx) + lhs := ir.NewIndexExpr(base.Pos, m, kidx) - zero := ir.Nod(ir.OAS, i, nodintconst(0)) - cond := ir.Nod(ir.OLT, i, nodintconst(tk.NumElem())) - incr := ir.Nod(ir.OAS, i, ir.Nod(ir.OADD, i, nodintconst(1))) - body := ir.Nod(ir.OAS, lhs, rhs) + zero := ir.NewAssignStmt(base.Pos, i, nodintconst(0)) + cond := ir.NewBinaryExpr(base.Pos, ir.OLT, i, nodintconst(tk.NumElem())) + incr := ir.NewAssignStmt(base.Pos, i, ir.NewBinaryExpr(base.Pos, ir.OADD, i, nodintconst(1))) + body := ir.NewAssignStmt(base.Pos, lhs, rhs) - loop := ir.Nod(ir.OFOR, cond, incr) + loop := ir.NewForStmt(base.Pos, nil, cond, incr, nil) loop.PtrBody().Set1(body) loop.PtrInit().Set1(zero) @@ -845,17 +845,17 @@ func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) { index, elem := r.Left(), r.Right() setlineno(index) - appendWalkStmt(init, ir.Nod(ir.OAS, tmpkey, index)) + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, tmpkey, index)) setlineno(elem) - appendWalkStmt(init, ir.Nod(ir.OAS, tmpelem, elem)) + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, tmpelem, elem)) setlineno(tmpelem) - appendWalkStmt(init, ir.Nod(ir.OAS, ir.Nod(ir.OINDEX, m, tmpkey), tmpelem)) + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewIndexExpr(base.Pos, m, tmpkey), tmpelem)) } - appendWalkStmt(init, ir.Nod(ir.OVARKILL, tmpkey, nil)) - appendWalkStmt(init, ir.Nod(ir.OVARKILL, tmpelem, nil)) + appendWalkStmt(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, tmpkey)) + appendWalkStmt(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, tmpelem)) } func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { @@ -879,15 +879,15 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { var r ir.Node if n.Right() != nil { // n.Right is stack temporary used as backing store. - appendWalkStmt(init, ir.Nod(ir.OAS, n.Right(), nil)) // zero backing store, just in case (#18410) + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, n.Right(), nil)) // zero backing store, just in case (#18410) r = nodAddr(n.Right()) } else { - r = ir.Nod(ir.ONEW, ir.TypeNode(n.Left().Type()), nil) + r = ir.NewUnaryExpr(base.Pos, ir.ONEW, ir.TypeNode(n.Left().Type())) r.SetEsc(n.Esc()) } - appendWalkStmt(init, ir.Nod(ir.OAS, var_, r)) + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, r)) - var_ = ir.Nod(ir.ODEREF, var_, nil) + var_ = ir.NewStarExpr(base.Pos, var_) var_ = typecheck(var_, ctxExpr|ctxAssign) anylit(n.Left(), var_, init) @@ -908,7 +908,7 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { fixedlit(ctxt, initKindStatic, n, vstat, init) // copy static to var - appendWalkStmt(init, ir.Nod(ir.OAS, var_, vstat)) + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, vstat)) // add expressions to automatic fixedlit(inInitFunction, initKindDynamic, n, var_, init) @@ -923,7 +923,7 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { } // initialization of an array or struct with unspecified components (missing fields or arrays) if isSimpleName(var_) || int64(n.List().Len()) < components { - appendWalkStmt(init, ir.Nod(ir.OAS, var_, nil)) + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, nil)) } fixedlit(inInitFunction, initKindLocalCode, n, var_, init) diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 0f6c7023f2855..174452def2308 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -170,20 +170,6 @@ func NewName(s *types.Sym) *ir.Name { return n } -// nodSym makes a Node with Op op and with the Left field set to left -// and the Sym field set to sym. This is for ODOT and friends. -func nodSym(op ir.Op, left ir.Node, sym *types.Sym) ir.Node { - return nodlSym(base.Pos, op, left, sym) -} - -// nodlSym makes a Node with position Pos, with Op op, and with the Left field set to left -// and the Sym field set to sym. This is for ODOT and friends. -func nodlSym(pos src.XPos, op ir.Op, left ir.Node, sym *types.Sym) ir.Node { - n := ir.NodAt(pos, op, left, nil) - n.SetSym(sym) - return n -} - // methcmp sorts methods by symbol. type methcmp []*types.Field @@ -196,7 +182,7 @@ func nodintconst(v int64) ir.Node { } func nodnil() ir.Node { - n := ir.Nod(ir.ONIL, nil, nil) + n := ir.NewNilExpr(base.Pos) n.SetType(types.Types[types.TNIL]) return n } @@ -537,7 +523,7 @@ func assignconvfn(n ir.Node, t *types.Type, context func() string) ir.Node { // if the next step is non-bool (like interface{}). if n.Type() == types.UntypedBool && !t.IsBoolean() { if n.Op() == ir.ONAME || n.Op() == ir.OLITERAL { - r := ir.Nod(ir.OCONVNOP, n, nil) + r := ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, n) r.SetType(types.Types[types.TBOOL]) r.SetTypecheck(1) r.SetImplicit(true) @@ -569,13 +555,13 @@ func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) { if c != n || init.Len() != 0 { base.Fatalf("backingArrayPtrLen not cheap: %v", n) } - ptr = ir.Nod(ir.OSPTR, n, nil) + ptr = ir.NewUnaryExpr(base.Pos, ir.OSPTR, n) if n.Type().IsString() { ptr.SetType(types.Types[types.TUINT8].PtrTo()) } else { ptr.SetType(n.Type().Elem().PtrTo()) } - length = ir.Nod(ir.OLEN, n, nil) + length = ir.NewUnaryExpr(base.Pos, ir.OLEN, n) length.SetType(types.Types[types.TINT]) return ptr, length } @@ -834,7 +820,7 @@ func safeexpr(n ir.Node, init *ir.Nodes) ir.Node { func copyexpr(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node { l := temp(t) - appendWalkStmt(init, ir.Nod(ir.OAS, l, n)) + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, l, n)) return l } @@ -1009,7 +995,7 @@ func adddot(n *ir.SelectorExpr) *ir.SelectorExpr { case path != nil: // rebuild elided dots for c := len(path) - 1; c >= 0; c-- { - dot := nodSym(ir.ODOT, n.Left(), path[c].field.Sym) + dot := ir.NewSelectorExpr(base.Pos, ir.ODOT, n.Left(), path[c].field.Sym) dot.SetImplicit(true) dot.SetType(path[c].field.Type) n.SetLeft(dot) @@ -1222,9 +1208,9 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { // generate nil pointer check for better error if rcvr.IsPtr() && rcvr.Elem() == methodrcvr { // generating wrapper from *T to T. - n := ir.Nod(ir.OIF, nil, nil) - n.SetLeft(ir.Nod(ir.OEQ, nthis, nodnil())) - call := ir.Nod(ir.OCALL, syslook("panicwrap"), nil) + n := ir.NewIfStmt(base.Pos, nil, nil, nil) + n.SetLeft(ir.NewBinaryExpr(base.Pos, ir.OEQ, nthis, nodnil())) + call := ir.NewCallExpr(base.Pos, ir.OCALL, syslook("panicwrap"), nil) n.PtrBody().Set1(call) fn.PtrBody().Append(n) } @@ -1244,16 +1230,16 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { if !left.Type().IsPtr() { left = nodAddr(left) } - as := ir.Nod(ir.OAS, nthis, convnop(left, rcvr)) + as := ir.NewAssignStmt(base.Pos, nthis, convnop(left, rcvr)) fn.PtrBody().Append(as) - fn.PtrBody().Append(nodSym(ir.ORETJMP, nil, methodSym(methodrcvr, method.Sym))) + fn.PtrBody().Append(ir.NewBranchStmt(base.Pos, ir.ORETJMP, methodSym(methodrcvr, method.Sym))) } else { fn.SetWrapper(true) // ignore frame for panic+recover matching - call := ir.Nod(ir.OCALL, dot, nil) + call := ir.NewCallExpr(base.Pos, ir.OCALL, dot, nil) call.PtrList().Set(paramNnames(tfn.Type())) call.SetIsDDD(tfn.Type().IsVariadic()) if method.Type.NumResults() > 0 { - ret := ir.Nod(ir.ORETURN, nil, nil) + ret := ir.NewReturnStmt(base.Pos, nil) ret.PtrList().Set1(call) fn.PtrBody().Append(ret) } else { @@ -1416,7 +1402,7 @@ func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool } func liststmt(l []ir.Node) ir.Node { - n := ir.Nod(ir.OBLOCK, nil, nil) + n := ir.NewBlockStmt(base.Pos, nil) n.PtrList().Set(l) if len(l) != 0 { n.SetPos(l[0].Pos()) @@ -1440,7 +1426,7 @@ func initExpr(init []ir.Node, n ir.Node) ir.Node { if ir.MayBeShared(n) { // Introduce OCONVNOP to hold init list. old := n - n = ir.Nod(ir.OCONVNOP, old, nil) + n = ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, old) n.SetType(old.Type()) n.SetTypecheck(1) } @@ -1534,7 +1520,7 @@ func isdirectiface(t *types.Type) bool { // itabType loads the _type field from a runtime.itab struct. func itabType(itab ir.Node) ir.Node { - typ := nodSym(ir.ODOTPTR, itab, nil) + typ := ir.NewSelectorExpr(base.Pos, ir.ODOTPTR, itab, nil) typ.SetType(types.NewPtr(types.Types[types.TUINT8])) typ.SetTypecheck(1) typ.SetOffset(int64(Widthptr)) // offset of _type in runtime.itab @@ -1549,7 +1535,7 @@ func ifaceData(pos src.XPos, n ir.Node, t *types.Type) ir.Node { if t.IsInterface() { base.Fatalf("ifaceData interface: %v", t) } - ptr := ir.NodAt(pos, ir.OIDATA, n, nil) + ptr := ir.NewUnaryExpr(pos, ir.OIDATA, n) if isdirectiface(t) { ptr.SetType(t) ptr.SetTypecheck(1) @@ -1557,7 +1543,7 @@ func ifaceData(pos src.XPos, n ir.Node, t *types.Type) ir.Node { } ptr.SetType(types.NewPtr(t)) ptr.SetTypecheck(1) - ind := ir.NodAt(pos, ir.ODEREF, ptr, nil) + ind := ir.NewStarExpr(pos, ptr) ind.SetType(t) ind.SetTypecheck(1) ind.SetBounded(true) diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go index 882feb47cc61f..1866a6a784a6b 100644 --- a/src/cmd/compile/internal/gc/swt.go +++ b/src/cmd/compile/internal/gc/swt.go @@ -285,7 +285,7 @@ func walkExprSwitch(sw *ir.SwitchStmt) { for _, ncase := range sw.List().Slice() { ncase := ncase.(*ir.CaseStmt) label := autolabel(".s") - jmp := npos(ncase.Pos(), nodSym(ir.OGOTO, nil, label)) + jmp := ir.NewBranchStmt(ncase.Pos(), ir.OGOTO, label) // Process case dispatch. if ncase.List().Len() == 0 { @@ -300,10 +300,10 @@ func walkExprSwitch(sw *ir.SwitchStmt) { } // Process body. - body.Append(npos(ncase.Pos(), nodSym(ir.OLABEL, nil, label))) + body.Append(ir.NewLabelStmt(ncase.Pos(), label)) body.Append(ncase.Body().Slice()...) if fall, pos := endsInFallthrough(ncase.Body().Slice()); !fall { - br := ir.Nod(ir.OBREAK, nil, nil) + br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil) br.SetPos(pos) body.Append(br) } @@ -311,7 +311,7 @@ func walkExprSwitch(sw *ir.SwitchStmt) { sw.PtrList().Set(nil) if defaultGoto == nil { - br := ir.Nod(ir.OBREAK, nil, nil) + br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil) br.SetPos(br.Pos().WithNotStmt()) defaultGoto = br } @@ -397,11 +397,11 @@ func (s *exprSwitch) flush() { // Perform two-level binary search. binarySearch(len(runs), &s.done, func(i int) ir.Node { - return ir.Nod(ir.OLE, ir.Nod(ir.OLEN, s.exprname, nil), nodintconst(runLen(runs[i-1]))) + return ir.NewBinaryExpr(base.Pos, ir.OLE, ir.NewUnaryExpr(base.Pos, ir.OLEN, s.exprname), nodintconst(runLen(runs[i-1]))) }, func(i int, nif *ir.IfStmt) { run := runs[i] - nif.SetLeft(ir.Nod(ir.OEQ, ir.Nod(ir.OLEN, s.exprname, nil), nodintconst(runLen(run)))) + nif.SetLeft(ir.NewBinaryExpr(base.Pos, ir.OEQ, ir.NewUnaryExpr(base.Pos, ir.OLEN, s.exprname), nodintconst(runLen(run)))) s.search(run, nif.PtrBody()) }, ) @@ -432,7 +432,7 @@ func (s *exprSwitch) flush() { func (s *exprSwitch) search(cc []exprClause, out *ir.Nodes) { binarySearch(len(cc), out, func(i int) ir.Node { - return ir.Nod(ir.OLE, s.exprname, cc[i-1].hi) + return ir.NewBinaryExpr(base.Pos, ir.OLE, s.exprname, cc[i-1].hi) }, func(i int, nif *ir.IfStmt) { c := &cc[i] @@ -445,9 +445,9 @@ func (s *exprSwitch) search(cc []exprClause, out *ir.Nodes) { func (c *exprClause) test(exprname ir.Node) ir.Node { // Integer range. if c.hi != c.lo { - low := ir.NodAt(c.pos, ir.OGE, exprname, c.lo) - high := ir.NodAt(c.pos, ir.OLE, exprname, c.hi) - return ir.NodAt(c.pos, ir.OANDAND, low, high) + low := ir.NewBinaryExpr(c.pos, ir.OGE, exprname, c.lo) + high := ir.NewBinaryExpr(c.pos, ir.OLE, exprname, c.hi) + return ir.NewLogicalExpr(c.pos, ir.OANDAND, low, high) } // Optimize "switch true { ...}" and "switch false { ... }". @@ -455,11 +455,11 @@ func (c *exprClause) test(exprname ir.Node) ir.Node { if ir.BoolVal(exprname) { return c.lo } else { - return ir.NodAt(c.pos, ir.ONOT, c.lo, nil) + return ir.NewUnaryExpr(c.pos, ir.ONOT, c.lo) } } - return ir.NodAt(c.pos, ir.OEQ, exprname, c.lo) + return ir.NewBinaryExpr(c.pos, ir.OEQ, exprname, c.lo) } func allCaseExprsAreSideEffectFree(sw *ir.SwitchStmt) bool { @@ -513,7 +513,7 @@ func walkTypeSwitch(sw *ir.SwitchStmt) { // Get interface descriptor word. // For empty interfaces this will be the type. // For non-empty interfaces this will be the itab. - itab := ir.Nod(ir.OITAB, s.facename, nil) + itab := ir.NewUnaryExpr(base.Pos, ir.OITAB, s.facename) // For empty interfaces, do: // if e._type == nil { @@ -521,8 +521,8 @@ func walkTypeSwitch(sw *ir.SwitchStmt) { // } // h := e._type.hash // Use a similar strategy for non-empty interfaces. - ifNil := ir.Nod(ir.OIF, nil, nil) - ifNil.SetLeft(ir.Nod(ir.OEQ, itab, nodnil())) + ifNil := ir.NewIfStmt(base.Pos, nil, nil, nil) + ifNil.SetLeft(ir.NewBinaryExpr(base.Pos, ir.OEQ, itab, nodnil())) base.Pos = base.Pos.WithNotStmt() // disable statement marks after the first check. ifNil.SetLeft(typecheck(ifNil.Left(), ctxExpr)) ifNil.SetLeft(defaultlit(ifNil.Left(), nil)) @@ -530,7 +530,7 @@ func walkTypeSwitch(sw *ir.SwitchStmt) { sw.PtrBody().Append(ifNil) // Load hash from type or itab. - dotHash := nodSym(ir.ODOTPTR, itab, nil) + dotHash := ir.NewSelectorExpr(base.Pos, ir.ODOTPTR, itab, nil) dotHash.SetType(types.Types[types.TUINT32]) dotHash.SetTypecheck(1) if s.facename.Type().IsEmptyInterface() { @@ -541,7 +541,7 @@ func walkTypeSwitch(sw *ir.SwitchStmt) { dotHash.SetBounded(true) // guaranteed not to fault s.hashname = copyexpr(dotHash, dotHash.Type(), sw.PtrBody()) - br := ir.Nod(ir.OBREAK, nil, nil) + br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil) var defaultGoto, nilGoto ir.Node var body ir.Nodes for _, ncase := range sw.List().Slice() { @@ -561,7 +561,7 @@ func walkTypeSwitch(sw *ir.SwitchStmt) { caseVarInitialized := false label := autolabel(".s") - jmp := npos(ncase.Pos(), nodSym(ir.OGOTO, nil, label)) + jmp := ir.NewBranchStmt(ncase.Pos(), ir.OGOTO, label) if ncase.List().Len() == 0 { // default: if defaultGoto != nil { @@ -587,7 +587,7 @@ func walkTypeSwitch(sw *ir.SwitchStmt) { } } - body.Append(npos(ncase.Pos(), nodSym(ir.OLABEL, nil, label))) + body.Append(ir.NewLabelStmt(ncase.Pos(), label)) if caseVar != nil && !caseVarInitialized { val := s.facename if singleType != nil { @@ -598,8 +598,8 @@ func walkTypeSwitch(sw *ir.SwitchStmt) { val = ifaceData(ncase.Pos(), s.facename, singleType) } l := []ir.Node{ - ir.NodAt(ncase.Pos(), ir.ODCL, caseVar, nil), - ir.NodAt(ncase.Pos(), ir.OAS, caseVar, val), + ir.NewDecl(ncase.Pos(), ir.ODCL, caseVar), + ir.NewAssignStmt(ncase.Pos(), caseVar, val), } typecheckslice(l, ctxStmt) body.Append(l...) @@ -644,8 +644,8 @@ func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp ir.Node) { var body ir.Nodes if caseVar != nil { l := []ir.Node{ - ir.NodAt(pos, ir.ODCL, caseVar, nil), - ir.NodAt(pos, ir.OAS, caseVar, nil), + ir.NewDecl(pos, ir.ODCL, caseVar), + ir.NewAssignStmt(pos, caseVar, nil), } typecheckslice(l, ctxStmt) body.Append(l...) @@ -654,15 +654,15 @@ func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp ir.Node) { } // cv, ok = iface.(type) - as := ir.NodAt(pos, ir.OAS2, nil, nil) + as := ir.NewAssignListStmt(pos, ir.OAS2, nil, nil) as.PtrList().Set2(caseVar, s.okname) // cv, ok = - dot := ir.NodAt(pos, ir.ODOTTYPE, s.facename, nil) + dot := ir.NewTypeAssertExpr(pos, s.facename, nil) dot.SetType(typ) // iface.(type) as.PtrRlist().Set1(dot) appendWalkStmt(&body, as) // if ok { goto label } - nif := ir.NodAt(pos, ir.OIF, nil, nil) + nif := ir.NewIfStmt(pos, nil, nil, nil) nif.SetLeft(s.okname) nif.PtrBody().Set1(jmp) body.Append(nif) @@ -707,13 +707,13 @@ func (s *typeSwitch) flush() { binarySearch(len(cc), &s.done, func(i int) ir.Node { - return ir.Nod(ir.OLE, s.hashname, nodintconst(int64(cc[i-1].hash))) + return ir.NewBinaryExpr(base.Pos, ir.OLE, s.hashname, nodintconst(int64(cc[i-1].hash))) }, func(i int, nif *ir.IfStmt) { // TODO(mdempsky): Omit hash equality check if // there's only one type. c := cc[i] - nif.SetLeft(ir.Nod(ir.OEQ, s.hashname, nodintconst(int64(c.hash)))) + nif.SetLeft(ir.NewBinaryExpr(base.Pos, ir.OEQ, s.hashname, nodintconst(int64(c.hash)))) nif.PtrBody().AppendNodes(&c.body) }, ) @@ -748,7 +748,7 @@ func binarySearch(n int, out *ir.Nodes, less func(i int) ir.Node, leaf func(i in } half := lo + n/2 - nif := ir.Nod(ir.OIF, nil, nil) + nif := ir.NewIfStmt(base.Pos, nil, nil, nil) nif.SetLeft(less(half)) base.Pos = base.Pos.WithNotStmt() nif.SetLeft(typecheck(nif.Left(), ctxExpr)) diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index bb658999e58a5..db03fd9e75340 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -1553,7 +1553,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } - n := ir.NodAt(n.Pos(), ir.OCONV, arg, nil) + n := ir.NewConvExpr(n.Pos(), ir.OCONV, nil, arg) n.SetType(l.Type()) return typecheck1(n, top) } @@ -1979,7 +1979,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetType(nil) return n } - nn = ir.NodAt(n.Pos(), ir.OMAKESLICE, l, r) + nn = ir.NewMakeExpr(n.Pos(), ir.OMAKESLICE, l, r) case types.TMAP: if i < len(args) { @@ -1998,7 +1998,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } else { l = nodintconst(0) } - nn = ir.NodAt(n.Pos(), ir.OMAKEMAP, l, nil) + nn = ir.NewMakeExpr(n.Pos(), ir.OMAKEMAP, l, nil) nn.SetEsc(n.Esc()) case types.TCHAN: @@ -2019,7 +2019,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } else { l = nodintconst(0) } - nn = ir.NodAt(n.Pos(), ir.OMAKECHAN, l, nil) + nn = ir.NewMakeExpr(n.Pos(), ir.OMAKECHAN, l, nil) } if i < len(args) { @@ -2170,7 +2170,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { // Empty identifier is valid but useless. // Eliminate now to simplify life later. // See issues 7538, 11589, 11593. - n = ir.NodAt(n.Pos(), ir.OBLOCK, nil, nil) + n = ir.NewBlockStmt(n.Pos(), nil) } return n @@ -2300,7 +2300,7 @@ func typecheckargs(n ir.Node) { n.(ir.OrigNode).SetOrig(ir.SepCopy(n)) } - as := ir.Nod(ir.OAS2, nil, nil) + as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) as.PtrRlist().Append(list...) // If we're outside of function context, then this call will @@ -2315,7 +2315,7 @@ func typecheckargs(n ir.Node) { list = nil for _, f := range t.FieldSlice() { t := temp(f.Type) - as.PtrInit().Append(ir.Nod(ir.ODCL, t, nil)) + as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, t)) as.PtrList().Append(t) list = append(list, t) } @@ -2440,7 +2440,7 @@ func implicitstar(n ir.Node) ir.Node { if !t.IsArray() { return n } - star := ir.Nod(ir.ODEREF, n, nil) + star := ir.NewStarExpr(base.Pos, n) star.SetImplicit(true) return typecheck(star, ctxExpr) } @@ -2619,7 +2619,7 @@ func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field { n.SetType(f1.Type) if t.IsInterface() { if n.Left().Type().IsPtr() { - star := ir.Nod(ir.ODEREF, n.Left(), nil) + star := ir.NewStarExpr(base.Pos, n.Left()) star.SetImplicit(true) n.SetLeft(typecheck(star, ctxExpr)) } @@ -2645,7 +2645,7 @@ func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field { addr.SetImplicit(true) n.SetLeft(typecheck(addr, ctxType|ctxExpr)) } else if tt.IsPtr() && (!rcvr.IsPtr() || rcvr.IsPtr() && rcvr.Elem().NotInHeap()) && types.Identical(tt.Elem(), rcvr) { - star := ir.Nod(ir.ODEREF, n.Left(), nil) + star := ir.NewStarExpr(base.Pos, n.Left()) star.SetImplicit(true) n.SetLeft(typecheck(star, ctxType|ctxExpr)) } else if tt.IsPtr() && tt.Elem().IsPtr() && types.Identical(derefall(tt), derefall(rcvr)) { @@ -2655,7 +2655,7 @@ func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field { if rcvr.IsPtr() && !tt.Elem().IsPtr() { break } - star := ir.Nod(ir.ODEREF, n.Left(), nil) + star := ir.NewStarExpr(base.Pos, n.Left()) star.SetImplicit(true) n.SetLeft(typecheck(star, ctxType|ctxExpr)) tt = tt.Elem() @@ -3055,7 +3055,7 @@ func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) { } // No pushtype allowed here. Must name fields for that. n1 = assignconv(n1, f.Type, "field value") - sk := nodSym(ir.OSTRUCTKEY, n1, f.Sym) + sk := ir.NewStructKeyExpr(base.Pos, f.Sym, n1) sk.SetOffset(f.Offset) ls[i] = sk } @@ -3614,11 +3614,11 @@ func stringtoruneslit(n *ir.ConvExpr) ir.Node { var l []ir.Node i := 0 for _, r := range ir.StringVal(n.Left()) { - l = append(l, ir.Nod(ir.OKEY, nodintconst(int64(i)), nodintconst(int64(r)))) + l = append(l, ir.NewKeyExpr(base.Pos, nodintconst(int64(i)), nodintconst(int64(r)))) i++ } - nn := ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(n.Type())) + nn := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(n.Type()).(ir.Ntype), nil) nn.PtrList().Set(l) return typecheck(nn, ctxExpr) } @@ -4064,7 +4064,7 @@ func deadcode(fn *ir.Func) { } } - fn.PtrBody().Set([]ir.Node{ir.Nod(ir.OBLOCK, nil, nil)}) + fn.PtrBody().Set([]ir.Node{ir.NewBlockStmt(base.Pos, nil)}) } func deadcodeslice(nn *ir.Nodes) { diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index d5d12453a714d..17269746e64ab 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -210,7 +210,7 @@ func walkstmt(n ir.Node) ir.Node { if base.Flag.CompilingRuntime { base.Errorf("%v escapes to heap, not allowed in runtime", v) } - nn := ir.Nod(ir.OAS, v.Name().Heapaddr, callnew(v.Type())) + nn := ir.NewAssignStmt(base.Pos, v.Name().Heapaddr, callnew(v.Type())) nn.SetColas(true) return walkstmt(typecheck(nn, ctxStmt)) } @@ -315,7 +315,7 @@ func walkstmt(n ir.Node) ir.Node { if cl == ir.PPARAMOUT { var ln ir.Node = ln if isParamStackCopy(ln) { - ln = walkexpr(typecheck(ir.Nod(ir.ODEREF, ln.Name().Heapaddr, nil), ctxExpr), nil) + ln = walkexpr(typecheck(ir.NewStarExpr(base.Pos, ln.Name().Heapaddr), ctxExpr), nil) } rl = append(rl, ln) } @@ -489,7 +489,7 @@ func walkexpr(n ir.Node, init *ir.Nodes) ir.Node { } if n.Op() == ir.ONAME && n.(*ir.Name).Class() == ir.PAUTOHEAP { - nn := ir.Nod(ir.ODEREF, n.Name().Heapaddr, nil) + nn := ir.NewStarExpr(base.Pos, n.Name().Heapaddr) nn.Left().MarkNonNil() return walkexpr(typecheck(nn, ctxExpr), init) } @@ -697,15 +697,14 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { if n.Op() == ir.OASOP { // Rewrite x op= y into x = x op y. - n = ir.Nod(ir.OAS, left, - typecheck(ir.NewBinaryExpr(base.Pos, n.(*ir.AssignOpStmt).SubOp(), left, right), ctxExpr)) + n = ir.NewAssignStmt(base.Pos, left, typecheck(ir.NewBinaryExpr(base.Pos, n.(*ir.AssignOpStmt).SubOp(), left, right), ctxExpr)) } else { n.(*ir.AssignStmt).SetLeft(left) } as := n.(*ir.AssignStmt) if oaslit(as, init) { - return ir.NodAt(as.Pos(), ir.OBLOCK, nil, nil) + return ir.NewBlockStmt(as.Pos(), nil) } if as.Right() == nil { @@ -804,7 +803,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { fn := chanfn("chanrecv2", 2, r.Left().Type()) ok := n.List().Second() call := mkcall1(fn, types.Types[types.TBOOL], init, r.Left(), n1) - return typecheck(ir.Nod(ir.OAS, ok, call), ctxStmt) + return typecheck(ir.NewAssignStmt(base.Pos, ok, call), ctxStmt) // a,b = m[i] case ir.OAS2MAPR: @@ -865,7 +864,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { n.List().SetFirst(var_) init.Append(walkexpr(n, init)) - as := ir.Nod(ir.OAS, a, ir.Nod(ir.ODEREF, var_, nil)) + as := ir.NewAssignStmt(base.Pos, a, ir.NewStarExpr(base.Pos, var_)) return walkexpr(typecheck(as, ctxStmt), init) case ir.ODELETE: @@ -908,7 +907,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // Optimize convT2E or convT2I as a two-word copy when T is pointer-shaped. if isdirectiface(fromType) { - l := ir.Nod(ir.OEFACE, typeword(), n.Left()) + l := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), n.Left()) l.SetType(toType) l.SetTypecheck(n.Typecheck()) return l @@ -939,11 +938,11 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // and staticuint64s[n.Left * 8 + 7] on big-endian. n.SetLeft(cheapexpr(n.Left(), init)) // byteindex widens n.Left so that the multiplication doesn't overflow. - index := ir.Nod(ir.OLSH, byteindex(n.Left()), nodintconst(3)) + index := ir.NewBinaryExpr(base.Pos, ir.OLSH, byteindex(n.Left()), nodintconst(3)) if thearch.LinkArch.ByteOrder == binary.BigEndian { - index = ir.Nod(ir.OADD, index, nodintconst(7)) + index = ir.NewBinaryExpr(base.Pos, ir.OADD, index, nodintconst(7)) } - xe := ir.Nod(ir.OINDEX, staticuint64s, index) + xe := ir.NewIndexExpr(base.Pos, staticuint64s, index) xe.SetBounded(true) value = xe case n.Left().Op() == ir.ONAME && n.Left().(*ir.Name).Class() == ir.PEXTERN && n.Left().(*ir.Name).Readonly(): @@ -952,13 +951,13 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case !fromType.IsInterface() && n.Esc() == EscNone && fromType.Width <= 1024: // n.Left does not escape. Use a stack temporary initialized to n.Left. value = temp(fromType) - init.Append(typecheck(ir.Nod(ir.OAS, value, n.Left()), ctxStmt)) + init.Append(typecheck(ir.NewAssignStmt(base.Pos, value, n.Left()), ctxStmt)) } if value != nil { // Value is identical to n.Left. // Construct the interface directly: {type/itab, &value}. - l := ir.Nod(ir.OEFACE, typeword(), typecheck(nodAddr(value), ctxExpr)) + l := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), typecheck(nodAddr(value), ctxExpr)) l.SetType(toType) l.SetTypecheck(n.Typecheck()) return l @@ -973,19 +972,19 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { if toType.IsEmptyInterface() && fromType.IsInterface() && !fromType.IsEmptyInterface() { // Evaluate the input interface. c := temp(fromType) - init.Append(ir.Nod(ir.OAS, c, n.Left())) + init.Append(ir.NewAssignStmt(base.Pos, c, n.Left())) // Get the itab out of the interface. tmp := temp(types.NewPtr(types.Types[types.TUINT8])) - init.Append(ir.Nod(ir.OAS, tmp, typecheck(ir.Nod(ir.OITAB, c, nil), ctxExpr))) + init.Append(ir.NewAssignStmt(base.Pos, tmp, typecheck(ir.NewUnaryExpr(base.Pos, ir.OITAB, c), ctxExpr))) // Get the type out of the itab. - nif := ir.Nod(ir.OIF, typecheck(ir.Nod(ir.ONE, tmp, nodnil()), ctxExpr), nil) - nif.PtrBody().Set1(ir.Nod(ir.OAS, tmp, itabType(tmp))) + nif := ir.NewIfStmt(base.Pos, typecheck(ir.NewBinaryExpr(base.Pos, ir.ONE, tmp, nodnil()), ctxExpr), nil, nil) + nif.PtrBody().Set1(ir.NewAssignStmt(base.Pos, tmp, itabType(tmp))) init.Append(nif) // Build the result. - e := ir.Nod(ir.OEFACE, tmp, ifaceData(n.Pos(), c, types.NewPtr(types.Types[types.TUINT8]))) + e := ir.NewBinaryExpr(base.Pos, ir.OEFACE, tmp, ifaceData(n.Pos(), c, types.NewPtr(types.Types[types.TUINT8]))) e.SetType(toType) // assign type manually, typecheck doesn't understand OEFACE. e.SetTypecheck(1) return e @@ -1001,9 +1000,9 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { dowidth(fromType) fn = substArgTypes(fn, fromType) dowidth(fn.Type()) - call := ir.Nod(ir.OCALL, fn, nil) + call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil) call.PtrList().Set1(n.Left()) - e := ir.Nod(ir.OEFACE, typeword(), safeexpr(walkexpr(typecheck(call, ctxExpr), init), init)) + e := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), safeexpr(walkexpr(typecheck(call, ctxExpr), init), init)) e.SetType(toType) e.SetTypecheck(1) return e @@ -1036,7 +1035,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { fn := syslook(fnname) fn = substArgTypes(fn, fromType, toType) dowidth(fn.Type()) - call := ir.Nod(ir.OCALL, fn, nil) + call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil) call.PtrList().Set2(tab, v) return walkexpr(typecheck(call, ctxExpr), init) @@ -1198,7 +1197,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { } call.SetType(types.NewPtr(t.Elem())) call.MarkNonNil() // mapaccess1* and mapassign always return non-nil pointers. - star := ir.Nod(ir.ODEREF, call, nil) + star := ir.NewStarExpr(base.Pos, call) star.SetType(t.Elem()) star.SetTypecheck(1) return star @@ -1260,7 +1259,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { base.Fatalf("large ONEW with EscNone: %v", n) } r := temp(n.Type().Elem()) - init.Append(typecheck(ir.Nod(ir.OAS, r, nil), ctxStmt)) // zero temp + init.Append(typecheck(ir.NewAssignStmt(base.Pos, r, nil), ctxStmt)) // zero temp return typecheck(nodAddr(r), ctxExpr) } return callnew(n.Type().Elem()) @@ -1311,7 +1310,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // var hv hmap hv := temp(hmapType) - init.Append(typecheck(ir.Nod(ir.OAS, hv, nil), ctxStmt)) + init.Append(typecheck(ir.NewAssignStmt(base.Pos, hv, nil), ctxStmt)) // h = &hv h = nodAddr(hv) @@ -1332,19 +1331,19 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // h.buckets = b // } - nif := ir.Nod(ir.OIF, ir.Nod(ir.OLE, hint, nodintconst(BUCKETSIZE)), nil) + nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, nodintconst(BUCKETSIZE)), nil, nil) nif.SetLikely(true) // var bv bmap bv := temp(bmap(t)) - nif.PtrBody().Append(ir.Nod(ir.OAS, bv, nil)) + nif.PtrBody().Append(ir.NewAssignStmt(base.Pos, bv, nil)) // b = &bv b := nodAddr(bv) // h.buckets = b bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap - na := ir.Nod(ir.OAS, nodSym(ir.ODOT, h, bsym), b) + na := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, bsym), b) nif.PtrBody().Append(na) appendWalkStmt(init, nif) } @@ -1364,7 +1363,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // h.hash0 = fastrand() rand := mkcall("fastrand", types.Types[types.TUINT32], init) hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap - appendWalkStmt(init, ir.Nod(ir.OAS, nodSym(ir.ODOT, h, hashsym), rand)) + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, hashsym), rand)) return convnop(h, t) } // Call runtime.makehmap to allocate an @@ -1429,16 +1428,16 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // if len < 0 { panicmakeslicelen() } // panicmakeslicecap() // } - nif := ir.Nod(ir.OIF, ir.Nod(ir.OGT, conv(l, types.Types[types.TUINT64]), nodintconst(i)), nil) - niflen := ir.Nod(ir.OIF, ir.Nod(ir.OLT, l, nodintconst(0)), nil) + nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGT, conv(l, types.Types[types.TUINT64]), nodintconst(i)), nil, nil) + niflen := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLT, l, nodintconst(0)), nil, nil) niflen.PtrBody().Set1(mkcall("panicmakeslicelen", nil, init)) nif.PtrBody().Append(niflen, mkcall("panicmakeslicecap", nil, init)) init.Append(typecheck(nif, ctxStmt)) t = types.NewArray(t.Elem(), i) // [r]T var_ := temp(t) - appendWalkStmt(init, ir.Nod(ir.OAS, var_, nil)) // zero temp - r := ir.Nod(ir.OSLICE, var_, nil) // arr[:l] + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, nil)) // zero temp + r := ir.NewSliceExpr(base.Pos, ir.OSLICE, var_) // arr[:l] r.SetSliceBounds(nil, l, nil) // The conv is necessary in case n.Type is named. return walkexpr(typecheck(conv(r, n.Type()), ctxExpr), init) @@ -1462,7 +1461,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { argtype = types.Types[types.TINT] } - m := ir.Nod(ir.OSLICEHEADER, nil, nil) + m := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil) m.SetType(t) fn := syslook(fnname) @@ -1482,8 +1481,8 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { } length := conv(n.Left(), types.Types[types.TINT]) - copylen := ir.Nod(ir.OLEN, n.Right(), nil) - copyptr := ir.Nod(ir.OSPTR, n.Right(), nil) + copylen := ir.NewUnaryExpr(base.Pos, ir.OLEN, n.Right()) + copyptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, n.Right()) if !t.Elem().HasPointers() && n.Bounded() { // When len(to)==len(from) and elements have no pointers: @@ -1492,25 +1491,25 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // We do not check for overflow of len(to)*elem.Width here // since len(from) is an existing checked slice capacity // with same elem.Width for the from slice. - size := ir.Nod(ir.OMUL, conv(length, types.Types[types.TUINTPTR]), conv(nodintconst(t.Elem().Width), types.Types[types.TUINTPTR])) + size := ir.NewBinaryExpr(base.Pos, ir.OMUL, conv(length, types.Types[types.TUINTPTR]), conv(nodintconst(t.Elem().Width), types.Types[types.TUINTPTR])) // instantiate mallocgc(size uintptr, typ *byte, needszero bool) unsafe.Pointer fn := syslook("mallocgc") - sh := ir.Nod(ir.OSLICEHEADER, nil, nil) + sh := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil) sh.SetLeft(mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, nodnil(), nodbool(false))) sh.Left().MarkNonNil() sh.PtrList().Set2(length, length) sh.SetType(t) s := temp(t) - r := typecheck(ir.Nod(ir.OAS, s, sh), ctxStmt) + r := typecheck(ir.NewAssignStmt(base.Pos, s, sh), ctxStmt) r = walkexpr(r, init) init.Append(r) // instantiate memmove(to *any, frm *any, size uintptr) fn = syslook("memmove") fn = substArgTypes(fn, t.Elem(), t.Elem()) - ncopy := mkcall1(fn, nil, init, ir.Nod(ir.OSPTR, s, nil), copyptr, size) + ncopy := mkcall1(fn, nil, init, ir.NewUnaryExpr(base.Pos, ir.OSPTR, s), copyptr, size) init.Append(walkexpr(typecheck(ncopy, ctxStmt), init)) return s @@ -1518,7 +1517,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // Replace make+copy with runtime.makeslicecopy. // instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer fn := syslook("makeslicecopy") - s := ir.Nod(ir.OSLICEHEADER, nil, nil) + s := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil) s.SetLeft(mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), length, copylen, conv(copyptr, types.Types[types.TUNSAFEPTR]))) s.Left().MarkNonNil() s.PtrList().Set2(length, length) @@ -1576,18 +1575,16 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { a = callnew(t) } p := temp(t.PtrTo()) // *[n]byte - init.Append(typecheck(ir.Nod(ir.OAS, p, a), ctxStmt)) + init.Append(typecheck(ir.NewAssignStmt(base.Pos, p, a), ctxStmt)) // Copy from the static string data to the [n]byte. if len(sc) > 0 { - as := ir.Nod(ir.OAS, - ir.Nod(ir.ODEREF, p, nil), - ir.Nod(ir.ODEREF, convnop(ir.Nod(ir.OSPTR, s, nil), t.PtrTo()), nil)) + as := ir.NewAssignStmt(base.Pos, ir.NewStarExpr(base.Pos, p), ir.NewStarExpr(base.Pos, convnop(ir.NewUnaryExpr(base.Pos, ir.OSPTR, s), t.PtrTo()))) appendWalkStmt(init, as) } // Slice the [n]byte to a []byte. - slice := ir.NodAt(n.Pos(), ir.OSLICEARR, p, nil) + slice := ir.NewSliceExpr(n.Pos(), ir.OSLICEARR, p) slice.SetType(n.Type()) slice.SetTypecheck(1) return walkexpr(slice, init) @@ -1830,7 +1827,7 @@ func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node { l = tmp } - res := ir.Nod(ir.ORESULT, nil, nil) + res := ir.NewResultExpr(base.Pos, nil, types.BADWIDTH) res.SetOffset(base.Ctxt.FixedFrameSize() + r.Offset) res.SetType(r.Type) res.SetTypecheck(1) @@ -1854,7 +1851,7 @@ func mkdotargslice(typ *types.Type, args []ir.Node) ir.Node { n = nodnil() n.SetType(typ) } else { - lit := ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(typ)) + lit := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ).(ir.Ntype), nil) lit.PtrList().Append(args...) lit.SetImplicit(true) n = lit @@ -2017,9 +2014,9 @@ func walkprint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { case types.TPTR: if n.Type().Elem().NotInHeap() { on = syslook("printuintptr") - n = ir.Nod(ir.OCONV, n, nil) + n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n) n.SetType(types.Types[types.TUNSAFEPTR]) - n = ir.Nod(ir.OCONV, n, nil) + n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n) n.SetType(types.Types[types.TUINTPTR]) break } @@ -2062,11 +2059,11 @@ func walkprint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { continue } - r := ir.Nod(ir.OCALL, on, nil) + r := ir.NewCallExpr(base.Pos, ir.OCALL, on, nil) if params := on.Type().Params().FieldSlice(); len(params) > 0 { t := params[0].Type if !types.Identical(t, n.Type()) { - n = ir.Nod(ir.OCONV, n, nil) + n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n) n.SetType(t) } r.PtrList().Append(n) @@ -2079,14 +2076,14 @@ func walkprint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { typecheckslice(calls, ctxStmt) walkexprlist(calls, init) - r := ir.Nod(ir.OBLOCK, nil, nil) + r := ir.NewBlockStmt(base.Pos, nil) r.PtrList().Set(calls) return walkstmt(typecheck(r, ctxStmt)) } func callnew(t *types.Type) ir.Node { dowidth(t) - n := ir.Nod(ir.ONEWOBJ, typename(t), nil) + n := ir.NewUnaryExpr(base.Pos, ir.ONEWOBJ, typename(t)) n.SetType(types.NewPtr(t)) n.SetTypecheck(1) n.MarkNonNil() @@ -2228,7 +2225,7 @@ func reorder3save(n ir.Node, all []*ir.AssignStmt, i int, early *[]ir.Node) ir.N } q := ir.Node(temp(n.Type())) - as := typecheck(ir.Nod(ir.OAS, q, n), ctxStmt) + as := typecheck(ir.NewAssignStmt(base.Pos, q, n), ctxStmt) *early = append(*early, as) return q } @@ -2447,9 +2444,9 @@ func paramstoheap(params *types.Type) []ir.Node { } if stackcopy := v.Name().Stackcopy; stackcopy != nil { - nn = append(nn, walkstmt(ir.Nod(ir.ODCL, v, nil))) + nn = append(nn, walkstmt(ir.NewDecl(base.Pos, ir.ODCL, v))) if stackcopy.Class() == ir.PPARAM { - nn = append(nn, walkstmt(typecheck(ir.Nod(ir.OAS, v, stackcopy), ctxStmt))) + nn = append(nn, walkstmt(typecheck(ir.NewAssignStmt(base.Pos, v, stackcopy), ctxStmt))) } } } @@ -2483,7 +2480,7 @@ func zeroResults() { v = v.Name().Stackcopy } // Zero the stack location containing f. - Curfn.Enter.Append(ir.NodAt(Curfn.Pos(), ir.OAS, v, nil)) + Curfn.Enter.Append(ir.NewAssignStmt(Curfn.Pos(), v, nil)) } } @@ -2497,7 +2494,7 @@ func returnsfromheap(params *types.Type) []ir.Node { continue } if stackcopy := v.Name().Stackcopy; stackcopy != nil && stackcopy.Class() == ir.PPARAMOUT { - nn = append(nn, walkstmt(typecheck(ir.Nod(ir.OAS, stackcopy, v), ctxStmt))) + nn = append(nn, walkstmt(typecheck(ir.NewAssignStmt(base.Pos, stackcopy, v), ctxStmt))) } } @@ -2547,7 +2544,7 @@ func conv(n ir.Node, t *types.Type) ir.Node { if types.Identical(n.Type(), t) { return n } - n = ir.Nod(ir.OCONV, n, nil) + n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n) n.SetType(t) n = typecheck(n, ctxExpr) return n @@ -2559,7 +2556,7 @@ func convnop(n ir.Node, t *types.Type) ir.Node { if types.Identical(n.Type(), t) { return n } - n = ir.Nod(ir.OCONVNOP, n, nil) + n = ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, n) n.SetType(t) n = typecheck(n, ctxExpr) return n @@ -2574,11 +2571,11 @@ func byteindex(n ir.Node) ir.Node { // the wrong result for negative values. // Reinterpreting the value as an unsigned byte solves both cases. if !types.Identical(n.Type(), types.Types[types.TUINT8]) { - n = ir.Nod(ir.OCONV, n, nil) + n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n) n.SetType(types.Types[types.TUINT8]) n.SetTypecheck(1) } - n = ir.Nod(ir.OCONV, n, nil) + n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n) n.SetType(types.Types[types.TINT]) n.SetTypecheck(1) return n @@ -2722,7 +2719,7 @@ func addstr(n *ir.AddStringExpr, init *ir.Nodes) ir.Node { } cat := syslook(fn) - r := ir.Nod(ir.OCALL, cat, nil) + r := ir.NewCallExpr(base.Pos, ir.OCALL, cat, nil) r.PtrList().Set(args) r1 := typecheck(r, ctxExpr) r1 = walkexpr(r1, init) @@ -2769,40 +2766,40 @@ func appendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { // var s []T s := temp(l1.Type()) - nodes.Append(ir.Nod(ir.OAS, s, l1)) // s = l1 + nodes.Append(ir.NewAssignStmt(base.Pos, s, l1)) // s = l1 elemtype := s.Type().Elem() // n := len(s) + len(l2) nn := temp(types.Types[types.TINT]) - nodes.Append(ir.Nod(ir.OAS, nn, ir.Nod(ir.OADD, ir.Nod(ir.OLEN, s, nil), ir.Nod(ir.OLEN, l2, nil)))) + nodes.Append(ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, s), ir.NewUnaryExpr(base.Pos, ir.OLEN, l2)))) // if uint(n) > uint(cap(s)) - nif := ir.Nod(ir.OIF, nil, nil) + nif := ir.NewIfStmt(base.Pos, nil, nil, nil) nuint := conv(nn, types.Types[types.TUINT]) - scapuint := conv(ir.Nod(ir.OCAP, s, nil), types.Types[types.TUINT]) - nif.SetLeft(ir.Nod(ir.OGT, nuint, scapuint)) + scapuint := conv(ir.NewUnaryExpr(base.Pos, ir.OCAP, s), types.Types[types.TUINT]) + nif.SetLeft(ir.NewBinaryExpr(base.Pos, ir.OGT, nuint, scapuint)) // instantiate growslice(typ *type, []any, int) []any fn := syslook("growslice") fn = substArgTypes(fn, elemtype, elemtype) // s = growslice(T, s, n) - nif.PtrBody().Set1(ir.Nod(ir.OAS, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn))) + nif.PtrBody().Set1(ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn))) nodes.Append(nif) // s = s[:n] - nt := ir.Nod(ir.OSLICE, s, nil) + nt := ir.NewSliceExpr(base.Pos, ir.OSLICE, s) nt.SetSliceBounds(nil, nn, nil) nt.SetBounded(true) - nodes.Append(ir.Nod(ir.OAS, s, nt)) + nodes.Append(ir.NewAssignStmt(base.Pos, s, nt)) var ncopy ir.Node if elemtype.HasPointers() { // copy(s[len(l1):], l2) - slice := ir.Nod(ir.OSLICE, s, nil) + slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, s) slice.SetType(s.Type()) - slice.SetSliceBounds(ir.Nod(ir.OLEN, l1, nil), nil, nil) + slice.SetSliceBounds(ir.NewUnaryExpr(base.Pos, ir.OLEN, l1), nil, nil) Curfn.SetWBPos(n.Pos()) @@ -2816,9 +2813,9 @@ func appendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { // rely on runtime to instrument: // copy(s[len(l1):], l2) // l2 can be a slice or string. - slice := ir.Nod(ir.OSLICE, s, nil) + slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, s) slice.SetType(s.Type()) - slice.SetSliceBounds(ir.Nod(ir.OLEN, l1, nil), nil, nil) + slice.SetSliceBounds(ir.NewUnaryExpr(base.Pos, ir.OLEN, l1), nil, nil) ptr1, len1 := backingArrayPtrLen(cheapexpr(slice, &nodes)) ptr2, len2 := backingArrayPtrLen(l2) @@ -2828,14 +2825,14 @@ func appendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, ptr1, len1, ptr2, len2, nodintconst(elemtype.Width)) } else { // memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T)) - ix := ir.Nod(ir.OINDEX, s, ir.Nod(ir.OLEN, l1, nil)) + ix := ir.NewIndexExpr(base.Pos, s, ir.NewUnaryExpr(base.Pos, ir.OLEN, l1)) ix.SetBounded(true) addr := nodAddr(ix) - sptr := ir.Nod(ir.OSPTR, l2, nil) + sptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, l2) - nwid := cheapexpr(conv(ir.Nod(ir.OLEN, l2, nil), types.Types[types.TUINTPTR]), &nodes) - nwid = ir.Nod(ir.OMUL, nwid, nodintconst(elemtype.Width)) + nwid := cheapexpr(conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, l2), types.Types[types.TUINTPTR]), &nodes) + nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, nodintconst(elemtype.Width)) // instantiate func memmove(to *any, frm *any, length uintptr) fn := syslook("memmove") @@ -2931,7 +2928,7 @@ func extendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { var nodes []ir.Node // if l2 >= 0 (likely happens), do nothing - nifneg := ir.Nod(ir.OIF, ir.Nod(ir.OGE, l2, nodintconst(0)), nil) + nifneg := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGE, l2, nodintconst(0)), nil, nil) nifneg.SetLikely(true) // else panicmakeslicelen() @@ -2940,50 +2937,50 @@ func extendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { // s := l1 s := temp(l1.Type()) - nodes = append(nodes, ir.Nod(ir.OAS, s, l1)) + nodes = append(nodes, ir.NewAssignStmt(base.Pos, s, l1)) elemtype := s.Type().Elem() // n := len(s) + l2 nn := temp(types.Types[types.TINT]) - nodes = append(nodes, ir.Nod(ir.OAS, nn, ir.Nod(ir.OADD, ir.Nod(ir.OLEN, s, nil), l2))) + nodes = append(nodes, ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, s), l2))) // if uint(n) > uint(cap(s)) nuint := conv(nn, types.Types[types.TUINT]) - capuint := conv(ir.Nod(ir.OCAP, s, nil), types.Types[types.TUINT]) - nif := ir.Nod(ir.OIF, ir.Nod(ir.OGT, nuint, capuint), nil) + capuint := conv(ir.NewUnaryExpr(base.Pos, ir.OCAP, s), types.Types[types.TUINT]) + nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGT, nuint, capuint), nil, nil) // instantiate growslice(typ *type, old []any, newcap int) []any fn := syslook("growslice") fn = substArgTypes(fn, elemtype, elemtype) // s = growslice(T, s, n) - nif.PtrBody().Set1(ir.Nod(ir.OAS, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn))) + nif.PtrBody().Set1(ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn))) nodes = append(nodes, nif) // s = s[:n] - nt := ir.Nod(ir.OSLICE, s, nil) + nt := ir.NewSliceExpr(base.Pos, ir.OSLICE, s) nt.SetSliceBounds(nil, nn, nil) nt.SetBounded(true) - nodes = append(nodes, ir.Nod(ir.OAS, s, nt)) + nodes = append(nodes, ir.NewAssignStmt(base.Pos, s, nt)) // lptr := &l1[0] l1ptr := temp(l1.Type().Elem().PtrTo()) - tmp := ir.Nod(ir.OSPTR, l1, nil) - nodes = append(nodes, ir.Nod(ir.OAS, l1ptr, tmp)) + tmp := ir.NewUnaryExpr(base.Pos, ir.OSPTR, l1) + nodes = append(nodes, ir.NewAssignStmt(base.Pos, l1ptr, tmp)) // sptr := &s[0] sptr := temp(elemtype.PtrTo()) - tmp = ir.Nod(ir.OSPTR, s, nil) - nodes = append(nodes, ir.Nod(ir.OAS, sptr, tmp)) + tmp = ir.NewUnaryExpr(base.Pos, ir.OSPTR, s) + nodes = append(nodes, ir.NewAssignStmt(base.Pos, sptr, tmp)) // hp := &s[len(l1)] - ix := ir.Nod(ir.OINDEX, s, ir.Nod(ir.OLEN, l1, nil)) + ix := ir.NewIndexExpr(base.Pos, s, ir.NewUnaryExpr(base.Pos, ir.OLEN, l1)) ix.SetBounded(true) hp := convnop(nodAddr(ix), types.Types[types.TUNSAFEPTR]) // hn := l2 * sizeof(elem(s)) - hn := conv(ir.Nod(ir.OMUL, l2, nodintconst(elemtype.Width)), types.Types[types.TUINTPTR]) + hn := conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, l2, nodintconst(elemtype.Width)), types.Types[types.TUINTPTR]) clrname := "memclrNoHeapPointers" hasPointers := elemtype.HasPointers() @@ -2998,7 +2995,7 @@ func extendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { if hasPointers { // if l1ptr == sptr - nifclr := ir.Nod(ir.OIF, ir.Nod(ir.OEQ, l1ptr, sptr), nil) + nifclr := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OEQ, l1ptr, sptr), nil, nil) nifclr.SetBody(clr) nodes = append(nodes, nifclr) } else { @@ -3071,36 +3068,35 @@ func walkappend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node { var l []ir.Node ns := temp(nsrc.Type()) - l = append(l, ir.Nod(ir.OAS, ns, nsrc)) // s = src + l = append(l, ir.NewAssignStmt(base.Pos, ns, nsrc)) // s = src - na := nodintconst(int64(argc)) // const argc - nif := ir.Nod(ir.OIF, nil, nil) // if cap(s) - len(s) < argc - nif.SetLeft(ir.Nod(ir.OLT, ir.Nod(ir.OSUB, ir.Nod(ir.OCAP, ns, nil), ir.Nod(ir.OLEN, ns, nil)), na)) + na := nodintconst(int64(argc)) // const argc + nif := ir.NewIfStmt(base.Pos, nil, nil, nil) // if cap(s) - len(s) < argc + nif.SetLeft(ir.NewBinaryExpr(base.Pos, ir.OLT, ir.NewBinaryExpr(base.Pos, ir.OSUB, ir.NewUnaryExpr(base.Pos, ir.OCAP, ns), ir.NewUnaryExpr(base.Pos, ir.OLEN, ns)), na)) fn := syslook("growslice") // growslice(, old []T, mincap int) (ret []T) fn = substArgTypes(fn, ns.Type().Elem(), ns.Type().Elem()) - nif.PtrBody().Set1(ir.Nod(ir.OAS, ns, - mkcall1(fn, ns.Type(), nif.PtrInit(), typename(ns.Type().Elem()), ns, - ir.Nod(ir.OADD, ir.Nod(ir.OLEN, ns, nil), na)))) + nif.PtrBody().Set1(ir.NewAssignStmt(base.Pos, ns, mkcall1(fn, ns.Type(), nif.PtrInit(), typename(ns.Type().Elem()), ns, + ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns), na)))) l = append(l, nif) nn := temp(types.Types[types.TINT]) - l = append(l, ir.Nod(ir.OAS, nn, ir.Nod(ir.OLEN, ns, nil))) // n = len(s) + l = append(l, ir.NewAssignStmt(base.Pos, nn, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns))) // n = len(s) - slice := ir.Nod(ir.OSLICE, ns, nil) // ...s[:n+argc] - slice.SetSliceBounds(nil, ir.Nod(ir.OADD, nn, na), nil) + slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, ns) // ...s[:n+argc] + slice.SetSliceBounds(nil, ir.NewBinaryExpr(base.Pos, ir.OADD, nn, na), nil) slice.SetBounded(true) - l = append(l, ir.Nod(ir.OAS, ns, slice)) // s = s[:n+argc] + l = append(l, ir.NewAssignStmt(base.Pos, ns, slice)) // s = s[:n+argc] ls = n.List().Slice()[1:] for i, n := range ls { - ix := ir.Nod(ir.OINDEX, ns, nn) // s[n] ... + ix := ir.NewIndexExpr(base.Pos, ns, nn) // s[n] ... ix.SetBounded(true) - l = append(l, ir.Nod(ir.OAS, ix, n)) // s[n] = arg + l = append(l, ir.NewAssignStmt(base.Pos, ix, n)) // s[n] = arg if i+1 < len(ls) { - l = append(l, ir.Nod(ir.OAS, nn, ir.Nod(ir.OADD, nn, nodintconst(1)))) // n = n + 1 + l = append(l, ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, nn, nodintconst(1)))) // n = n + 1 } } @@ -3153,35 +3149,35 @@ func copyany(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node { nl := temp(n.Left().Type()) nr := temp(n.Right().Type()) var l []ir.Node - l = append(l, ir.Nod(ir.OAS, nl, n.Left())) - l = append(l, ir.Nod(ir.OAS, nr, n.Right())) + l = append(l, ir.NewAssignStmt(base.Pos, nl, n.Left())) + l = append(l, ir.NewAssignStmt(base.Pos, nr, n.Right())) - nfrm := ir.Nod(ir.OSPTR, nr, nil) - nto := ir.Nod(ir.OSPTR, nl, nil) + nfrm := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nr) + nto := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nl) nlen := temp(types.Types[types.TINT]) // n = len(to) - l = append(l, ir.Nod(ir.OAS, nlen, ir.Nod(ir.OLEN, nl, nil))) + l = append(l, ir.NewAssignStmt(base.Pos, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nl))) // if n > len(frm) { n = len(frm) } - nif := ir.Nod(ir.OIF, nil, nil) + nif := ir.NewIfStmt(base.Pos, nil, nil, nil) - nif.SetLeft(ir.Nod(ir.OGT, nlen, ir.Nod(ir.OLEN, nr, nil))) - nif.PtrBody().Append(ir.Nod(ir.OAS, nlen, ir.Nod(ir.OLEN, nr, nil))) + nif.SetLeft(ir.NewBinaryExpr(base.Pos, ir.OGT, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nr))) + nif.PtrBody().Append(ir.NewAssignStmt(base.Pos, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nr))) l = append(l, nif) // if to.ptr != frm.ptr { memmove( ... ) } - ne := ir.Nod(ir.OIF, ir.Nod(ir.ONE, nto, nfrm), nil) + ne := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.ONE, nto, nfrm), nil, nil) ne.SetLikely(true) l = append(l, ne) fn := syslook("memmove") fn = substArgTypes(fn, nl.Type().Elem(), nl.Type().Elem()) nwid := ir.Node(temp(types.Types[types.TUINTPTR])) - setwid := ir.Nod(ir.OAS, nwid, conv(nlen, types.Types[types.TUINTPTR])) + setwid := ir.NewAssignStmt(base.Pos, nwid, conv(nlen, types.Types[types.TUINTPTR])) ne.PtrBody().Append(setwid) - nwid = ir.Nod(ir.OMUL, nwid, nodintconst(nl.Type().Elem().Width)) + nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, nodintconst(nl.Type().Elem().Width)) call := mkcall1(fn, nil, init, nto, nfrm, nwid) ne.PtrBody().Append(call) @@ -3255,7 +3251,7 @@ func walkcompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { // For non-empty interface, this is: // l.tab != nil && l.tab._type == type(r) var eqtype ir.Node - tab := ir.Nod(ir.OITAB, l, nil) + tab := ir.NewUnaryExpr(base.Pos, ir.OITAB, l) rtyp := typename(r.Type()) if l.Type().IsEmptyInterface() { tab.SetType(types.NewPtr(types.Types[types.TUINT8])) @@ -3360,7 +3356,7 @@ func walkcompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { } fn, needsize := eqfor(t) - call := ir.Nod(ir.OCALL, fn, nil) + call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil) call.PtrList().Append(nodAddr(cmpl)) call.PtrList().Append(nodAddr(cmpr)) if needsize { @@ -3368,7 +3364,7 @@ func walkcompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { } res := ir.Node(call) if n.Op() != ir.OEQ { - res = ir.Nod(ir.ONOT, res, nil) + res = ir.NewUnaryExpr(base.Pos, ir.ONOT, res) } return finishcompare(n, res, init) } @@ -3396,8 +3392,8 @@ func walkcompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { continue } compare( - nodSym(ir.OXDOT, cmpl, sym), - nodSym(ir.OXDOT, cmpr, sym), + ir.NewSelectorExpr(base.Pos, ir.OXDOT, cmpl, sym), + ir.NewSelectorExpr(base.Pos, ir.OXDOT, cmpr, sym), ) } } else { @@ -3423,32 +3419,32 @@ func walkcompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { } if step == 1 { compare( - ir.Nod(ir.OINDEX, cmpl, nodintconst(i)), - ir.Nod(ir.OINDEX, cmpr, nodintconst(i)), + ir.NewIndexExpr(base.Pos, cmpl, nodintconst(i)), + ir.NewIndexExpr(base.Pos, cmpr, nodintconst(i)), ) i++ remains -= t.Elem().Width } else { elemType := t.Elem().ToUnsigned() - cmplw := ir.Node(ir.Nod(ir.OINDEX, cmpl, nodintconst(i))) + cmplw := ir.Node(ir.NewIndexExpr(base.Pos, cmpl, nodintconst(i))) cmplw = conv(cmplw, elemType) // convert to unsigned cmplw = conv(cmplw, convType) // widen - cmprw := ir.Node(ir.Nod(ir.OINDEX, cmpr, nodintconst(i))) + cmprw := ir.Node(ir.NewIndexExpr(base.Pos, cmpr, nodintconst(i))) cmprw = conv(cmprw, elemType) cmprw = conv(cmprw, convType) // For code like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ... // ssa will generate a single large load. for offset := int64(1); offset < step; offset++ { - lb := ir.Node(ir.Nod(ir.OINDEX, cmpl, nodintconst(i+offset))) + lb := ir.Node(ir.NewIndexExpr(base.Pos, cmpl, nodintconst(i+offset))) lb = conv(lb, elemType) lb = conv(lb, convType) - lb = ir.Nod(ir.OLSH, lb, nodintconst(8*t.Elem().Width*offset)) - cmplw = ir.Nod(ir.OOR, cmplw, lb) - rb := ir.Node(ir.Nod(ir.OINDEX, cmpr, nodintconst(i+offset))) + lb = ir.NewBinaryExpr(base.Pos, ir.OLSH, lb, nodintconst(8*t.Elem().Width*offset)) + cmplw = ir.NewBinaryExpr(base.Pos, ir.OOR, cmplw, lb) + rb := ir.Node(ir.NewIndexExpr(base.Pos, cmpr, nodintconst(i+offset))) rb = conv(rb, elemType) rb = conv(rb, convType) - rb = ir.Nod(ir.OLSH, rb, nodintconst(8*t.Elem().Width*offset)) - cmprw = ir.Nod(ir.OOR, cmprw, rb) + rb = ir.NewBinaryExpr(base.Pos, ir.OLSH, rb, nodintconst(8*t.Elem().Width*offset)) + cmprw = ir.NewBinaryExpr(base.Pos, ir.OOR, cmprw, rb) } compare(cmplw, cmprw) i += step @@ -3461,8 +3457,8 @@ func walkcompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { // We still need to use cmpl and cmpr, in case they contain // an expression which might panic. See issue 23837. t := temp(cmpl.Type()) - a1 := typecheck(ir.Nod(ir.OAS, t, cmpl), ctxStmt) - a2 := typecheck(ir.Nod(ir.OAS, t, cmpr), ctxStmt) + a1 := typecheck(ir.NewAssignStmt(base.Pos, t, cmpl), ctxStmt) + a2 := typecheck(ir.NewAssignStmt(base.Pos, t, cmpr), ctxStmt) init.Append(a1, a2) } return finishcompare(n, expr, init) @@ -3483,10 +3479,10 @@ func walkcompareInterface(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { eqtab, eqdata := eqinterface(n.Left(), n.Right()) var cmp ir.Node if n.Op() == ir.OEQ { - cmp = ir.Nod(ir.OANDAND, eqtab, eqdata) + cmp = ir.NewLogicalExpr(base.Pos, ir.OANDAND, eqtab, eqdata) } else { eqtab.SetOp(ir.ONE) - cmp = ir.Nod(ir.OOROR, eqtab, ir.Nod(ir.ONOT, eqdata, nil)) + cmp = ir.NewLogicalExpr(base.Pos, ir.OOROR, eqtab, ir.NewUnaryExpr(base.Pos, ir.ONOT, eqdata)) } return finishcompare(n, cmp, init) } @@ -3544,12 +3540,12 @@ func walkcompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { if len(s) > 0 { ncs = safeexpr(ncs, init) } - r := ir.Node(ir.NewBinaryExpr(base.Pos, cmp, ir.Nod(ir.OLEN, ncs, nil), nodintconst(int64(len(s))))) + r := ir.Node(ir.NewBinaryExpr(base.Pos, cmp, ir.NewUnaryExpr(base.Pos, ir.OLEN, ncs), nodintconst(int64(len(s))))) remains := len(s) for i := 0; remains > 0; { if remains == 1 || !canCombineLoads { cb := nodintconst(int64(s[i])) - ncb := ir.Nod(ir.OINDEX, ncs, nodintconst(int64(i))) + ncb := ir.NewIndexExpr(base.Pos, ncs, nodintconst(int64(i))) r = ir.NewLogicalExpr(base.Pos, and, r, ir.NewBinaryExpr(base.Pos, cmp, ncb, cb)) remains-- i++ @@ -3568,15 +3564,15 @@ func walkcompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { convType = types.Types[types.TUINT16] step = 2 } - ncsubstr := conv(ir.Nod(ir.OINDEX, ncs, nodintconst(int64(i))), convType) + ncsubstr := conv(ir.NewIndexExpr(base.Pos, ncs, nodintconst(int64(i))), convType) csubstr := int64(s[i]) // Calculate large constant from bytes as sequence of shifts and ors. // Like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ... // ssa will combine this into a single large load. for offset := 1; offset < step; offset++ { - b := conv(ir.Nod(ir.OINDEX, ncs, nodintconst(int64(i+offset))), convType) - b = ir.Nod(ir.OLSH, b, nodintconst(int64(8*offset))) - ncsubstr = ir.Nod(ir.OOR, ncsubstr, b) + b := conv(ir.NewIndexExpr(base.Pos, ncs, nodintconst(int64(i+offset))), convType) + b = ir.NewBinaryExpr(base.Pos, ir.OLSH, b, nodintconst(int64(8*offset))) + ncsubstr = ir.NewBinaryExpr(base.Pos, ir.OOR, ncsubstr, b) csubstr |= int64(s[i+offset]) << uint8(8*offset) } csubstrPart := nodintconst(csubstr) @@ -3599,11 +3595,11 @@ func walkcompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { // memequal then tests equality up to length len. if n.Op() == ir.OEQ { // len(left) == len(right) && memequal(left, right, len) - r = ir.Nod(ir.OANDAND, eqlen, eqmem) + r = ir.NewLogicalExpr(base.Pos, ir.OANDAND, eqlen, eqmem) } else { // len(left) != len(right) || !memequal(left, right, len) eqlen.SetOp(ir.ONE) - r = ir.Nod(ir.OOROR, eqlen, ir.Nod(ir.ONOT, eqmem, nil)) + r = ir.NewLogicalExpr(base.Pos, ir.OOROR, eqlen, ir.NewUnaryExpr(base.Pos, ir.ONOT, eqmem)) } } else { // sys_cmpstring(s1, s2) :: 0 diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index ca894cd5f1167..1679313c86290 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -640,123 +640,3 @@ func IsBlank(n Node) bool { func IsMethod(n Node) bool { return n.Type().Recv() != nil } - -func Nod(op Op, nleft, nright Node) Node { - return NodAt(base.Pos, op, nleft, nright) -} - -func NodAt(pos src.XPos, op Op, nleft, nright Node) Node { - switch op { - default: - panic("NodAt " + op.String()) - case OADD, OAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE, - OLSH, OLT, OMOD, OMUL, ONE, OOR, ORSH, OSUB, OXOR, - OCOPY, OCOMPLEX, - OEFACE: - return NewBinaryExpr(pos, op, nleft, nright) - case OADDR: - return NewAddrExpr(pos, nleft) - case OADDSTR: - return NewAddStringExpr(pos, nil) - case OANDAND, OOROR: - return NewLogicalExpr(pos, op, nleft, nright) - case OARRAYLIT, OCOMPLIT, OMAPLIT, OSTRUCTLIT, OSLICELIT: - var typ Ntype - if nright != nil { - typ = nright.(Ntype) - } - return NewCompLitExpr(pos, op, typ, nil) - case OAS: - return NewAssignStmt(pos, nleft, nright) - case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV, OSELRECV2: - n := NewAssignListStmt(pos, op, nil, nil) - return n - case OASOP: - return NewAssignOpStmt(pos, OXXX, nleft, nright) - case OBITNOT, ONEG, ONOT, OPLUS, ORECV, - OALIGNOF, OCAP, OCLOSE, OIMAG, OLEN, ONEW, ONEWOBJ, - OOFFSETOF, OPANIC, OREAL, OSIZEOF, - OCHECKNIL, OCFUNC, OIDATA, OITAB, OSPTR, OVARDEF, OVARKILL, OVARLIVE: - if nright != nil { - panic("unary nright") - } - return NewUnaryExpr(pos, op, nleft) - case OBLOCK: - return NewBlockStmt(pos, nil) - case OBREAK, OCONTINUE, OFALL, OGOTO, ORETJMP: - return NewBranchStmt(pos, op, nil) - case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, - OAPPEND, ODELETE, OGETG, OMAKE, OPRINT, OPRINTN, ORECOVER: - return NewCallExpr(pos, op, nleft, nil) - case OCASE: - return NewCaseStmt(pos, nil, nil) - case OCONV, OCONVIFACE, OCONVNOP, ORUNESTR: - return NewConvExpr(pos, op, nil, nleft) - case ODCL, ODCLCONST, ODCLTYPE: - return NewDecl(pos, op, nleft) - case ODCLFUNC: - return NewFunc(pos) - case ODEFER, OGO: - return NewGoDeferStmt(pos, op, nleft) - case ODEREF: - return NewStarExpr(pos, nleft) - case ODOT, ODOTPTR, ODOTMETH, ODOTINTER, OXDOT: - return NewSelectorExpr(pos, op, nleft, nil) - case ODOTTYPE, ODOTTYPE2: - var typ Ntype - if nright != nil { - typ = nright.(Ntype) - } - n := NewTypeAssertExpr(pos, nleft, typ) - if op != ODOTTYPE { - n.SetOp(op) - } - return n - case OFOR: - return NewForStmt(pos, nil, nleft, nright, nil) - case OIF: - return NewIfStmt(pos, nleft, nil, nil) - case OINDEX, OINDEXMAP: - n := NewIndexExpr(pos, nleft, nright) - if op != OINDEX { - n.SetOp(op) - } - return n - case OINLMARK: - return NewInlineMarkStmt(pos, types.BADWIDTH) - case OKEY: - return NewKeyExpr(pos, nleft, nright) - case OSTRUCTKEY: - return NewStructKeyExpr(pos, nil, nleft) - case OLABEL: - return NewLabelStmt(pos, nil) - case OLITERAL, OTYPE, OIOTA: - return newNameAt(pos, op, nil) - case OMAKECHAN, OMAKEMAP, OMAKESLICE, OMAKESLICECOPY: - return NewMakeExpr(pos, op, nleft, nright) - case ONIL: - return NewNilExpr(pos) - case OPACK: - return NewPkgName(pos, nil, nil) - case OPAREN: - return NewParenExpr(pos, nleft) - case ORANGE: - return NewRangeStmt(pos, nil, nright, nil) - case ORESULT: - return NewResultExpr(pos, nil, types.BADWIDTH) - case ORETURN: - return NewReturnStmt(pos, nil) - case OSELECT: - return NewSelectStmt(pos, nil) - case OSEND: - return NewSendStmt(pos, nleft, nright) - case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR: - return NewSliceExpr(pos, op, nleft) - case OSLICEHEADER: - return NewSliceHeaderExpr(pos, nil, nleft, nil, nil) - case OSWITCH: - return NewSwitchStmt(pos, nleft, nil) - case OINLCALL: - return NewInlinedCallExpr(pos, nil, nil) - } -} From 14d667341f9c8c58a9fb38d4954766a230eacf3b Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Tue, 22 Dec 2020 23:56:32 -0500 Subject: [PATCH 219/474] [dev.regabi] cmd/compile: remove Node.Left etc [generated] This automated CL adds type assertions on the true branches of n.Op() equality tests, to redeclare n with a more specific type, when it is safe to do so. (That is, when n is not reassigned with a more general type, when n is not reassigned and then used outside the scope, and so on.) All the "unsafe" times that the automated tool would avoid have been removed or rewritten in earlier CLs, so that after this CL and the next one, which removes the use of ir.Nod, every use of the Left, Right, and so on methods is done using concrete types, never the Node interface. Having done that, the CL locks in the progress by deleting many of the access methods, including Left, SetLeft and so on, from the Node interface. There are still uses of Name, Func, Sym, some of the tracking bits, and a few other miscellaneous fields, but all the main access methods are gone from the Node interface. The others will be cleaned up in smaller CLs. Passes buildall w/ toolstash -cmp. [git-generate] cd src/cmd/compile/internal/gc rf 'typeassert { import "cmd/compile/internal/ir" var n ir.Node n.Op() == ir.OADD -> n.(*ir.BinaryExpr) n.Op() == ir.OADDR -> n.(*ir.AddrExpr) n.Op() == ir.OADDSTR -> n.(*ir.AddStringExpr) n.Op() == ir.OALIGNOF -> n.(*ir.UnaryExpr) n.Op() == ir.OAND -> n.(*ir.BinaryExpr) n.Op() == ir.OANDAND -> n.(*ir.LogicalExpr) n.Op() == ir.OANDNOT -> n.(*ir.BinaryExpr) n.Op() == ir.OAPPEND -> n.(*ir.CallExpr) n.Op() == ir.OARRAYLIT -> n.(*ir.CompLitExpr) n.Op() == ir.OAS -> n.(*ir.AssignStmt) n.Op() == ir.OAS2 -> n.(*ir.AssignListStmt) n.Op() == ir.OAS2DOTTYPE -> n.(*ir.AssignListStmt) n.Op() == ir.OAS2FUNC -> n.(*ir.AssignListStmt) n.Op() == ir.OAS2MAPR -> n.(*ir.AssignListStmt) n.Op() == ir.OAS2RECV -> n.(*ir.AssignListStmt) n.Op() == ir.OASOP -> n.(*ir.AssignOpStmt) n.Op() == ir.OBITNOT -> n.(*ir.UnaryExpr) n.Op() == ir.OBLOCK -> n.(*ir.BlockStmt) n.Op() == ir.OBREAK -> n.(*ir.BranchStmt) n.Op() == ir.OBYTES2STR -> n.(*ir.ConvExpr) n.Op() == ir.OBYTES2STRTMP -> n.(*ir.ConvExpr) n.Op() == ir.OCALL -> n.(*ir.CallExpr) n.Op() == ir.OCALLFUNC -> n.(*ir.CallExpr) n.Op() == ir.OCALLINTER -> n.(*ir.CallExpr) n.Op() == ir.OCALLMETH -> n.(*ir.CallExpr) n.Op() == ir.OCALLPART -> n.(*ir.CallPartExpr) n.Op() == ir.OCAP -> n.(*ir.UnaryExpr) n.Op() == ir.OCASE -> n.(*ir.CaseStmt) n.Op() == ir.OCFUNC -> n.(*ir.UnaryExpr) n.Op() == ir.OCHECKNIL -> n.(*ir.UnaryExpr) n.Op() == ir.OCLOSE -> n.(*ir.UnaryExpr) n.Op() == ir.OCOMPLEX -> n.(*ir.BinaryExpr) n.Op() == ir.OCOMPLIT -> n.(*ir.CompLitExpr) n.Op() == ir.OCONTINUE -> n.(*ir.BranchStmt) n.Op() == ir.OCONV -> n.(*ir.ConvExpr) n.Op() == ir.OCONVIFACE -> n.(*ir.ConvExpr) n.Op() == ir.OCONVNOP -> n.(*ir.ConvExpr) n.Op() == ir.OCOPY -> n.(*ir.BinaryExpr) n.Op() == ir.ODCL -> n.(*ir.Decl) n.Op() == ir.ODCLCONST -> n.(*ir.Decl) n.Op() == ir.ODCLFUNC -> n.(*ir.Func) n.Op() == ir.ODCLTYPE -> n.(*ir.Decl) n.Op() == ir.ODEFER -> n.(*ir.GoDeferStmt) n.Op() == ir.ODELETE -> n.(*ir.CallExpr) n.Op() == ir.ODEREF -> n.(*ir.StarExpr) n.Op() == ir.ODIV -> n.(*ir.BinaryExpr) n.Op() == ir.ODOT -> n.(*ir.SelectorExpr) n.Op() == ir.ODOTINTER -> n.(*ir.SelectorExpr) n.Op() == ir.ODOTMETH -> n.(*ir.SelectorExpr) n.Op() == ir.ODOTPTR -> n.(*ir.SelectorExpr) n.Op() == ir.ODOTTYPE -> n.(*ir.TypeAssertExpr) n.Op() == ir.ODOTTYPE2 -> n.(*ir.TypeAssertExpr) n.Op() == ir.OEFACE -> n.(*ir.BinaryExpr) n.Op() == ir.OEQ -> n.(*ir.BinaryExpr) n.Op() == ir.OFALL -> n.(*ir.BranchStmt) n.Op() == ir.OFOR -> n.(*ir.ForStmt) n.Op() == ir.OFORUNTIL -> n.(*ir.ForStmt) n.Op() == ir.OGE -> n.(*ir.BinaryExpr) n.Op() == ir.OGETG -> n.(*ir.CallExpr) n.Op() == ir.OGO -> n.(*ir.GoDeferStmt) n.Op() == ir.OGOTO -> n.(*ir.BranchStmt) n.Op() == ir.OGT -> n.(*ir.BinaryExpr) n.Op() == ir.OIDATA -> n.(*ir.UnaryExpr) n.Op() == ir.OIF -> n.(*ir.IfStmt) n.Op() == ir.OIMAG -> n.(*ir.UnaryExpr) n.Op() == ir.OINDEX -> n.(*ir.IndexExpr) n.Op() == ir.OINDEXMAP -> n.(*ir.IndexExpr) n.Op() == ir.OINLCALL -> n.(*ir.InlinedCallExpr) n.Op() == ir.OINLMARK -> n.(*ir.InlineMarkStmt) n.Op() == ir.OITAB -> n.(*ir.UnaryExpr) n.Op() == ir.OKEY -> n.(*ir.KeyExpr) n.Op() == ir.OLABEL -> n.(*ir.LabelStmt) n.Op() == ir.OLE -> n.(*ir.BinaryExpr) n.Op() == ir.OLEN -> n.(*ir.UnaryExpr) n.Op() == ir.OLSH -> n.(*ir.BinaryExpr) n.Op() == ir.OLT -> n.(*ir.BinaryExpr) n.Op() == ir.OMAKE -> n.(*ir.CallExpr) n.Op() == ir.OMAKECHAN -> n.(*ir.MakeExpr) n.Op() == ir.OMAKEMAP -> n.(*ir.MakeExpr) n.Op() == ir.OMAKESLICE -> n.(*ir.MakeExpr) n.Op() == ir.OMAKESLICECOPY -> n.(*ir.MakeExpr) n.Op() == ir.OMAPLIT -> n.(*ir.CompLitExpr) n.Op() == ir.OMETHEXPR -> n.(*ir.MethodExpr) n.Op() == ir.OMOD -> n.(*ir.BinaryExpr) n.Op() == ir.OMUL -> n.(*ir.BinaryExpr) n.Op() == ir.ONAME -> n.(*ir.Name) n.Op() == ir.ONE -> n.(*ir.BinaryExpr) n.Op() == ir.ONEG -> n.(*ir.UnaryExpr) n.Op() == ir.ONEW -> n.(*ir.UnaryExpr) n.Op() == ir.ONEWOBJ -> n.(*ir.UnaryExpr) n.Op() == ir.ONIL -> n.(*ir.NilExpr) n.Op() == ir.ONOT -> n.(*ir.UnaryExpr) n.Op() == ir.OOFFSETOF -> n.(*ir.UnaryExpr) n.Op() == ir.OOR -> n.(*ir.BinaryExpr) n.Op() == ir.OOROR -> n.(*ir.LogicalExpr) n.Op() == ir.OPACK -> n.(*ir.PkgName) n.Op() == ir.OPANIC -> n.(*ir.UnaryExpr) n.Op() == ir.OPAREN -> n.(*ir.ParenExpr) n.Op() == ir.OPLUS -> n.(*ir.UnaryExpr) n.Op() == ir.OPRINT -> n.(*ir.CallExpr) n.Op() == ir.OPRINTN -> n.(*ir.CallExpr) n.Op() == ir.OPTRLIT -> n.(*ir.AddrExpr) n.Op() == ir.ORANGE -> n.(*ir.RangeStmt) n.Op() == ir.OREAL -> n.(*ir.UnaryExpr) n.Op() == ir.ORECOVER -> n.(*ir.CallExpr) n.Op() == ir.ORECV -> n.(*ir.UnaryExpr) n.Op() == ir.ORESULT -> n.(*ir.ResultExpr) n.Op() == ir.ORETJMP -> n.(*ir.BranchStmt) n.Op() == ir.ORETURN -> n.(*ir.ReturnStmt) n.Op() == ir.ORSH -> n.(*ir.BinaryExpr) n.Op() == ir.ORUNES2STR -> n.(*ir.ConvExpr) n.Op() == ir.ORUNESTR -> n.(*ir.ConvExpr) n.Op() == ir.OSELECT -> n.(*ir.SelectStmt) n.Op() == ir.OSELRECV2 -> n.(*ir.AssignListStmt) n.Op() == ir.OSEND -> n.(*ir.SendStmt) n.Op() == ir.OSIZEOF -> n.(*ir.UnaryExpr) n.Op() == ir.OSLICE -> n.(*ir.SliceExpr) n.Op() == ir.OSLICE3 -> n.(*ir.SliceExpr) n.Op() == ir.OSLICE3ARR -> n.(*ir.SliceExpr) n.Op() == ir.OSLICEARR -> n.(*ir.SliceExpr) n.Op() == ir.OSLICEHEADER -> n.(*ir.SliceHeaderExpr) n.Op() == ir.OSLICELIT -> n.(*ir.CompLitExpr) n.Op() == ir.OSLICESTR -> n.(*ir.SliceExpr) n.Op() == ir.OSPTR -> n.(*ir.UnaryExpr) n.Op() == ir.OSTR2BYTES -> n.(*ir.ConvExpr) n.Op() == ir.OSTR2BYTESTMP -> n.(*ir.ConvExpr) n.Op() == ir.OSTR2RUNES -> n.(*ir.ConvExpr) n.Op() == ir.OSTRUCTLIT -> n.(*ir.CompLitExpr) n.Op() == ir.OSUB -> n.(*ir.BinaryExpr) n.Op() == ir.OSWITCH -> n.(*ir.SwitchStmt) n.Op() == ir.OTYPESW -> n.(*ir.TypeSwitchGuard) n.Op() == ir.OVARDEF -> n.(*ir.UnaryExpr) n.Op() == ir.OVARKILL -> n.(*ir.UnaryExpr) n.Op() == ir.OVARLIVE -> n.(*ir.UnaryExpr) n.Op() == ir.OXDOT -> n.(*ir.SelectorExpr) n.Op() == ir.OXOR -> n.(*ir.BinaryExpr) } ' cd ../ir rf ' rm \ Node.SetOp \ miniNode.SetOp \ Node.Func \ miniNode.Func \ Node.Left Node.SetLeft \ miniNode.Left miniNode.SetLeft \ Node.Right Node.SetRight \ miniNode.Right miniNode.SetRight \ Node.List Node.PtrList Node.SetList \ miniNode.List miniNode.PtrList miniNode.SetList \ Node.Rlist Node.PtrRlist Node.SetRlist \ miniNode.Rlist miniNode.PtrRlist miniNode.SetRlist \ Node.Body Node.PtrBody Node.SetBody \ miniNode.Body miniNode.PtrBody miniNode.SetBody \ Node.SubOp Node.SetSubOp \ miniNode.SubOp miniNode.SetSubOp \ Node.SetSym \ miniNode.SetSym \ Node.Offset Node.SetOffset \ miniNode.Offset miniNode.SetOffset \ Node.Class Node.SetClass \ miniNode.Class miniNode.SetClass \ Node.Iota Node.SetIota \ miniNode.Iota miniNode.SetIota \ Node.Colas Node.SetColas \ miniNode.Colas miniNode.SetColas \ Node.Transient Node.SetTransient \ miniNode.Transient miniNode.SetTransient \ Node.Implicit Node.SetImplicit \ miniNode.Implicit miniNode.SetImplicit \ Node.IsDDD Node.SetIsDDD \ miniNode.IsDDD miniNode.SetIsDDD \ Node.MarkReadonly \ miniNode.MarkReadonly \ Node.Likely Node.SetLikely \ miniNode.Likely miniNode.SetLikely \ Node.SliceBounds Node.SetSliceBounds \ miniNode.SliceBounds miniNode.SetSliceBounds \ Node.NoInline Node.SetNoInline \ miniNode.NoInline miniNode.SetNoInline \ Node.IndexMapLValue Node.SetIndexMapLValue \ miniNode.IndexMapLValue miniNode.SetIndexMapLValue \ Node.ResetAux \ miniNode.ResetAux \ Node.HasBreak Node.SetHasBreak \ miniNode.HasBreak miniNode.SetHasBreak \ Node.Bounded Node.SetBounded \ miniNode.Bounded miniNode.SetBounded \ miniNode.Embedded miniNode.SetEmbedded \ miniNode.Int64Val miniNode.Uint64Val miniNode.CanInt64 \ miniNode.BoolVal miniNode.StringVal \ miniNode.TChanDir miniNode.SetTChanDir \ miniNode.Format \ miniNode.copy miniNode.doChildren miniNode.editChildren \ ' Change-Id: I2a05b535963b43f83b1849fcf653f82b99af6035 Reviewed-on: https://go-review.googlesource.com/c/go/+/277934 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/const.go | 16 ++++ src/cmd/compile/internal/gc/dcl.go | 2 + src/cmd/compile/internal/gc/iexport.go | 41 +++++++++ src/cmd/compile/internal/gc/iimport.go | 2 + src/cmd/compile/internal/gc/initorder.go | 6 ++ src/cmd/compile/internal/gc/inl.go | 21 +++++ src/cmd/compile/internal/gc/noder.go | 6 ++ src/cmd/compile/internal/gc/order.go | 47 ++++++++++ src/cmd/compile/internal/gc/scc.go | 4 + src/cmd/compile/internal/gc/select.go | 11 +++ src/cmd/compile/internal/gc/sinit.go | 20 ++++ src/cmd/compile/internal/gc/ssa.go | 72 +++++++++++++++ src/cmd/compile/internal/gc/subr.go | 19 ++++ src/cmd/compile/internal/gc/swt.go | 1 + src/cmd/compile/internal/gc/typecheck.go | 75 +++++++++++++++ src/cmd/compile/internal/gc/unsafe.go | 4 + src/cmd/compile/internal/gc/walk.go | 74 +++++++++++++++ src/cmd/compile/internal/ir/mini.go | 112 +++-------------------- src/cmd/compile/internal/ir/node.go | 46 ---------- 19 files changed, 435 insertions(+), 144 deletions(-) diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index f8e60ea0a3250..e54cd0a1028bb 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -204,6 +204,7 @@ func convlit1(n ir.Node, t *types.Type, explicit bool, context func() string) ir return n case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE: + n := n.(*ir.BinaryExpr) if !t.IsBoolean() { break } @@ -211,6 +212,7 @@ func convlit1(n ir.Node, t *types.Type, explicit bool, context func() string) ir return n case ir.OLSH, ir.ORSH: + n := n.(*ir.BinaryExpr) n.SetLeft(convlit1(n.Left(), t, explicit, nil)) n.SetType(n.Left().Type()) if n.Type() != nil && !n.Type().IsInteger() { @@ -449,6 +451,7 @@ func evalConst(n ir.Node) ir.Node { // Pick off just the opcodes that can be constant evaluated. switch n.Op() { case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT: + n := n.(*ir.UnaryExpr) nl := n.Left() if nl.Op() == ir.OLITERAL { var prec uint @@ -459,6 +462,7 @@ func evalConst(n ir.Node) ir.Node { } case ir.OADD, ir.OSUB, ir.OMUL, ir.ODIV, ir.OMOD, ir.OOR, ir.OXOR, ir.OAND, ir.OANDNOT: + n := n.(*ir.BinaryExpr) nl, nr := n.Left(), n.Right() if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL { rval := nr.Val() @@ -483,18 +487,21 @@ func evalConst(n ir.Node) ir.Node { } case ir.OOROR, ir.OANDAND: + n := n.(*ir.LogicalExpr) nl, nr := n.Left(), n.Right() if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL { return origConst(n, constant.BinaryOp(nl.Val(), tokenForOp[n.Op()], nr.Val())) } case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE: + n := n.(*ir.BinaryExpr) nl, nr := n.Left(), n.Right() if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL { return origBoolConst(n, constant.Compare(nl.Val(), tokenForOp[n.Op()], nr.Val())) } case ir.OLSH, ir.ORSH: + n := n.(*ir.BinaryExpr) nl, nr := n.Left(), n.Right() if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL { // shiftBound from go/types; "so we can express smallestFloat64" @@ -509,12 +516,14 @@ func evalConst(n ir.Node) ir.Node { } case ir.OCONV, ir.ORUNESTR: + n := n.(*ir.ConvExpr) nl := n.Left() if ir.OKForConst[n.Type().Kind()] && nl.Op() == ir.OLITERAL { return origConst(n, convertVal(nl.Val(), n.Type(), true)) } case ir.OCONVNOP: + n := n.(*ir.ConvExpr) nl := n.Left() if ir.OKForConst[n.Type().Kind()] && nl.Op() == ir.OLITERAL { // set so n.Orig gets OCONV instead of OCONVNOP @@ -524,6 +533,7 @@ func evalConst(n ir.Node) ir.Node { case ir.OADDSTR: // Merge adjacent constants in the argument list. + n := n.(*ir.AddStringExpr) s := n.List().Slice() need := 0 for i := 0; i < len(s); i++ { @@ -567,6 +577,7 @@ func evalConst(n ir.Node) ir.Node { return nn case ir.OCAP, ir.OLEN: + n := n.(*ir.UnaryExpr) nl := n.Left() switch nl.Type().Kind() { case types.TSTRING: @@ -580,21 +591,25 @@ func evalConst(n ir.Node) ir.Node { } case ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF: + n := n.(*ir.UnaryExpr) return origIntConst(n, evalunsafe(n)) case ir.OREAL: + n := n.(*ir.UnaryExpr) nl := n.Left() if nl.Op() == ir.OLITERAL { return origConst(n, constant.Real(nl.Val())) } case ir.OIMAG: + n := n.(*ir.UnaryExpr) nl := n.Left() if nl.Op() == ir.OLITERAL { return origConst(n, constant.Imag(nl.Val())) } case ir.OCOMPLEX: + n := n.(*ir.BinaryExpr) nl, nr := n.Left(), n.Right() if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL { return origConst(n, makeComplex(nl.Val(), nr.Val())) @@ -854,6 +869,7 @@ type constSetKey struct { // n must not be an untyped constant. func (s *constSet) add(pos src.XPos, n ir.Node, what, where string) { if conv := n; conv.Op() == ir.OCONVIFACE { + conv := conv.(*ir.ConvExpr) if conv.Implicit() { n = conv.Left() } diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 3cfb24f2fc4b8..d85f10faf3c43 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -229,6 +229,7 @@ func oldname(s *types.Sym) ir.Node { // are parsing x := 5 inside the closure, until we get to // the := it looks like a reference to the outer x so we'll // make x a closure variable unnecessarily. + n := n.(*ir.Name) c := n.Name().Innermost if c == nil || c.Curfn != Curfn { // Do not have a closure var for the active closure yet; make one. @@ -890,6 +891,7 @@ func (c *nowritebarrierrecChecker) findExtraCalls(nn ir.Node) { arg := n.List().First() switch arg.Op() { case ir.ONAME: + arg := arg.(*ir.Name) callee = arg.Name().Defn.(*ir.Func) case ir.OCLOSURE: arg := arg.(*ir.ClosureExpr) diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index c03445044df42..0f7d62c5bfe39 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -1067,11 +1067,13 @@ func (w *exportWriter) stmt(n ir.Node) { // (At the moment neither the parser nor the typechecker // generate OBLOCK nodes except to denote an empty // function body, although that may change.) + n := n.(*ir.BlockStmt) for _, n := range n.List().Slice() { w.stmt(n) } case ir.ODCL: + n := n.(*ir.Decl) w.op(ir.ODCL) w.pos(n.Left().Pos()) w.localName(n.Left().(*ir.Name)) @@ -1081,6 +1083,7 @@ func (w *exportWriter) stmt(n ir.Node) { // Don't export "v = " initializing statements, hope they're always // preceded by the DCL which will be re-parsed and typecheck to reproduce // the "v = " again. + n := n.(*ir.AssignStmt) if n.Right() != nil { w.op(ir.OAS) w.pos(n.Pos()) @@ -1099,12 +1102,14 @@ func (w *exportWriter) stmt(n ir.Node) { } case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV: + n := n.(*ir.AssignListStmt) w.op(ir.OAS2) w.pos(n.Pos()) w.exprList(n.List()) w.exprList(n.Rlist()) case ir.ORETURN: + n := n.(*ir.ReturnStmt) w.op(ir.ORETURN) w.pos(n.Pos()) w.exprList(n.List()) @@ -1113,11 +1118,13 @@ func (w *exportWriter) stmt(n ir.Node) { // unreachable - generated by compiler for trampolin routines case ir.OGO, ir.ODEFER: + n := n.(*ir.GoDeferStmt) w.op(n.Op()) w.pos(n.Pos()) w.expr(n.Left()) case ir.OIF: + n := n.(*ir.IfStmt) w.op(ir.OIF) w.pos(n.Pos()) w.stmtList(n.Init()) @@ -1126,6 +1133,7 @@ func (w *exportWriter) stmt(n ir.Node) { w.stmtList(n.Rlist()) case ir.OFOR: + n := n.(*ir.ForStmt) w.op(ir.OFOR) w.pos(n.Pos()) w.stmtList(n.Init()) @@ -1133,6 +1141,7 @@ func (w *exportWriter) stmt(n ir.Node) { w.stmtList(n.Body()) case ir.ORANGE: + n := n.(*ir.RangeStmt) w.op(ir.ORANGE) w.pos(n.Pos()) w.stmtList(n.List()) @@ -1140,6 +1149,7 @@ func (w *exportWriter) stmt(n ir.Node) { w.stmtList(n.Body()) case ir.OSELECT: + n := n.(*ir.SelectStmt) w.op(n.Op()) w.pos(n.Pos()) w.stmtList(n.Init()) @@ -1147,6 +1157,7 @@ func (w *exportWriter) stmt(n ir.Node) { w.caseList(n) case ir.OSWITCH: + n := n.(*ir.SwitchStmt) w.op(n.Op()) w.pos(n.Pos()) w.stmtList(n.Init()) @@ -1157,6 +1168,7 @@ func (w *exportWriter) stmt(n ir.Node) { // handled by caseList case ir.OFALL: + n := n.(*ir.BranchStmt) w.op(ir.OFALL) w.pos(n.Pos()) @@ -1217,16 +1229,20 @@ func (w *exportWriter) exprList(list ir.Nodes) { func simplifyForExport(n ir.Node) ir.Node { switch n.Op() { case ir.OPAREN: + n := n.(*ir.ParenExpr) return simplifyForExport(n.Left()) case ir.ODEREF: + n := n.(*ir.StarExpr) if n.Implicit() { return simplifyForExport(n.Left()) } case ir.OADDR: + n := n.(*ir.AddrExpr) if n.Implicit() { return simplifyForExport(n.Left()) } case ir.ODOT, ir.ODOTPTR: + n := n.(*ir.SelectorExpr) if n.Implicit() { return simplifyForExport(n.Left()) } @@ -1240,6 +1256,7 @@ func (w *exportWriter) expr(n ir.Node) { // expressions // (somewhat closely following the structure of exprfmt in fmt.go) case ir.ONIL: + n := n.(*ir.NilExpr) if !n.Type().HasNil() { base.Fatalf("unexpected type for nil: %v", n.Type()) } @@ -1284,6 +1301,7 @@ func (w *exportWriter) expr(n ir.Node) { w.typ(n.Type()) case ir.OTYPESW: + n := n.(*ir.TypeSwitchGuard) w.op(ir.OTYPESW) w.pos(n.Pos()) var s *types.Sym @@ -1306,23 +1324,27 @@ func (w *exportWriter) expr(n ir.Node) { // should have been resolved by typechecking - handled by default case case ir.OPTRLIT: + n := n.(*ir.AddrExpr) w.op(ir.OADDR) w.pos(n.Pos()) w.expr(n.Left()) case ir.OSTRUCTLIT: + n := n.(*ir.CompLitExpr) w.op(ir.OSTRUCTLIT) w.pos(n.Pos()) w.typ(n.Type()) w.fieldList(n.List()) // special handling of field names case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT: + n := n.(*ir.CompLitExpr) w.op(ir.OCOMPLIT) w.pos(n.Pos()) w.typ(n.Type()) w.exprList(n.List()) case ir.OKEY: + n := n.(*ir.KeyExpr) w.op(ir.OKEY) w.pos(n.Pos()) w.exprsOrNil(n.Left(), n.Right()) @@ -1332,30 +1354,35 @@ func (w *exportWriter) expr(n ir.Node) { case ir.OCALLPART: // An OCALLPART is an OXDOT before type checking. + n := n.(*ir.CallPartExpr) w.op(ir.OXDOT) w.pos(n.Pos()) w.expr(n.Left()) w.selector(n.Sym()) case ir.OXDOT, ir.ODOT, ir.ODOTPTR, ir.ODOTINTER, ir.ODOTMETH: + n := n.(*ir.SelectorExpr) w.op(ir.OXDOT) w.pos(n.Pos()) w.expr(n.Left()) w.selector(n.Sym()) case ir.ODOTTYPE, ir.ODOTTYPE2: + n := n.(*ir.TypeAssertExpr) w.op(ir.ODOTTYPE) w.pos(n.Pos()) w.expr(n.Left()) w.typ(n.Type()) case ir.OINDEX, ir.OINDEXMAP: + n := n.(*ir.IndexExpr) w.op(ir.OINDEX) w.pos(n.Pos()) w.expr(n.Left()) w.expr(n.Right()) case ir.OSLICE, ir.OSLICESTR, ir.OSLICEARR: + n := n.(*ir.SliceExpr) w.op(ir.OSLICE) w.pos(n.Pos()) w.expr(n.Left()) @@ -1363,6 +1390,7 @@ func (w *exportWriter) expr(n ir.Node) { w.exprsOrNil(low, high) case ir.OSLICE3, ir.OSLICE3ARR: + n := n.(*ir.SliceExpr) w.op(ir.OSLICE3) w.pos(n.Pos()) w.expr(n.Left()) @@ -1372,6 +1400,7 @@ func (w *exportWriter) expr(n ir.Node) { case ir.OCOPY, ir.OCOMPLEX: // treated like other builtin calls (see e.g., OREAL) + n := n.(*ir.BinaryExpr) w.op(n.Op()) w.pos(n.Pos()) w.expr(n.Left()) @@ -1379,18 +1408,21 @@ func (w *exportWriter) expr(n ir.Node) { w.op(ir.OEND) case ir.OCONV, ir.OCONVIFACE, ir.OCONVNOP, ir.OBYTES2STR, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2RUNES, ir.ORUNESTR: + n := n.(*ir.ConvExpr) w.op(ir.OCONV) w.pos(n.Pos()) w.expr(n.Left()) w.typ(n.Type()) case ir.OREAL, ir.OIMAG, ir.OCAP, ir.OCLOSE, ir.OLEN, ir.ONEW, ir.OPANIC: + n := n.(*ir.UnaryExpr) w.op(n.Op()) w.pos(n.Pos()) w.expr(n.Left()) w.op(ir.OEND) case ir.OAPPEND, ir.ODELETE, ir.ORECOVER, ir.OPRINT, ir.OPRINTN: + n := n.(*ir.CallExpr) w.op(n.Op()) w.pos(n.Pos()) w.exprList(n.List()) // emits terminating OEND @@ -1402,6 +1434,7 @@ func (w *exportWriter) expr(n ir.Node) { } case ir.OCALL, ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OGETG: + n := n.(*ir.CallExpr) w.op(ir.OCALL) w.pos(n.Pos()) w.stmtList(n.Init()) @@ -1410,6 +1443,7 @@ func (w *exportWriter) expr(n ir.Node) { w.bool(n.IsDDD()) case ir.OMAKEMAP, ir.OMAKECHAN, ir.OMAKESLICE: + n := n.(*ir.MakeExpr) w.op(n.Op()) // must keep separate from OMAKE for importer w.pos(n.Pos()) w.typ(n.Type()) @@ -1428,21 +1462,25 @@ func (w *exportWriter) expr(n ir.Node) { // unary expressions case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.ORECV: + n := n.(*ir.UnaryExpr) w.op(n.Op()) w.pos(n.Pos()) w.expr(n.Left()) case ir.OADDR: + n := n.(*ir.AddrExpr) w.op(n.Op()) w.pos(n.Pos()) w.expr(n.Left()) case ir.ODEREF: + n := n.(*ir.StarExpr) w.op(n.Op()) w.pos(n.Pos()) w.expr(n.Left()) case ir.OSEND: + n := n.(*ir.SendStmt) w.op(n.Op()) w.pos(n.Pos()) w.expr(n.Left()) @@ -1451,18 +1489,21 @@ func (w *exportWriter) expr(n ir.Node) { // binary expressions case ir.OADD, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT, ir.OLSH, ir.OMOD, ir.OMUL, ir.ONE, ir.OOR, ir.ORSH, ir.OSUB, ir.OXOR: + n := n.(*ir.BinaryExpr) w.op(n.Op()) w.pos(n.Pos()) w.expr(n.Left()) w.expr(n.Right()) case ir.OANDAND, ir.OOROR: + n := n.(*ir.LogicalExpr) w.op(n.Op()) w.pos(n.Pos()) w.expr(n.Left()) w.expr(n.Right()) case ir.OADDSTR: + n := n.(*ir.AddStringExpr) w.op(ir.OADDSTR) w.pos(n.Pos()) w.exprList(n.List()) diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 1148d329a3cb2..40f76cae7bb60 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -756,6 +756,7 @@ func (r *importReader) stmtList() []ir.Node { // but the handling of ODCL calls liststmt, which creates one. // Inline them into the statement list. if n.Op() == ir.OBLOCK { + n := n.(*ir.BlockStmt) list = append(list, n.List().Slice()...) } else { list = append(list, n) @@ -802,6 +803,7 @@ func (r *importReader) exprList() []ir.Node { func (r *importReader) expr() ir.Node { n := r.node() if n != nil && n.Op() == ir.OBLOCK { + n := n.(*ir.BlockStmt) base.Fatalf("unexpected block node: %v", n) } return n diff --git a/src/cmd/compile/internal/gc/initorder.go b/src/cmd/compile/internal/gc/initorder.go index c9c3361d3cf81..f99c6dd72c991 100644 --- a/src/cmd/compile/internal/gc/initorder.go +++ b/src/cmd/compile/internal/gc/initorder.go @@ -254,10 +254,13 @@ func collectDeps(n ir.Node, transitive bool) ir.NameSet { d := initDeps{transitive: transitive} switch n.Op() { case ir.OAS: + n := n.(*ir.AssignStmt) d.inspect(n.Right()) case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV: + n := n.(*ir.AssignListStmt) d.inspect(n.Rlist().First()) case ir.ODCLFUNC: + n := n.(*ir.Func) d.inspectList(n.Body()) default: base.Fatalf("unexpected Op: %v", n.Op()) @@ -286,6 +289,7 @@ func (d *initDeps) inspectList(l ir.Nodes) { ir.VisitList(l, d.cachedVisit()) } func (d *initDeps) visit(n ir.Node) { switch n.Op() { case ir.OMETHEXPR: + n := n.(*ir.MethodExpr) d.foundDep(methodExprName(n)) case ir.ONAME: @@ -355,8 +359,10 @@ func (s *declOrder) Pop() interface{} { func firstLHS(n ir.Node) *ir.Name { switch n.Op() { case ir.OAS: + n := n.(*ir.AssignStmt) return n.Left().Name() case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2RECV, ir.OAS2MAPR: + n := n.(*ir.AssignListStmt) return n.List().First().Name() } diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 122c19f6df18e..7cb79468065a9 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -377,6 +377,7 @@ func (v *hairyVisitor) doNode(n ir.Node) error { // Call is okay if inlinable and we have the budget for the body. case ir.OCALLMETH: + n := n.(*ir.CallExpr) t := n.Left().Type() if t == nil { base.Fatalf("no function type for [%p] %+v\n", n.Left(), n.Left()) @@ -429,22 +430,26 @@ func (v *hairyVisitor) doNode(n ir.Node) error { return nil case ir.OFOR, ir.OFORUNTIL: + n := n.(*ir.ForStmt) if n.Sym() != nil { return errors.New("labeled control") } case ir.OSWITCH: + n := n.(*ir.SwitchStmt) if n.Sym() != nil { return errors.New("labeled control") } // case ir.ORANGE, ir.OSELECT in "unhandled" above case ir.OBREAK, ir.OCONTINUE: + n := n.(*ir.BranchStmt) if n.Sym() != nil { // Should have short-circuited due to labeled control error above. base.Fatalf("unexpected labeled break/continue: %v", n) } case ir.OIF: + n := n.(*ir.IfStmt) if ir.IsConst(n.Left(), constant.Bool) { // This if and the condition cost nothing. // TODO(rsc): It seems strange that we visit the dead branch. @@ -569,8 +574,10 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No switch n.Op() { case ir.ODEFER, ir.OGO: + n := n.(*ir.GoDeferStmt) switch call := n.Left(); call.Op() { case ir.OCALLFUNC, ir.OCALLMETH: + call := call.(*ir.CallExpr) call.SetNoInline(true) } @@ -581,6 +588,7 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No case ir.OCALLMETH: // Prevent inlining some reflect.Value methods when using checkptr, // even when package reflect was compiled without it (#35073). + n := n.(*ir.CallExpr) if s := n.Left().Sym(); base.Debug.Checkptr != 0 && isReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") { return n } @@ -591,6 +599,7 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No ir.EditChildren(n, edit) if as := n; as.Op() == ir.OAS2FUNC { + as := as.(*ir.AssignListStmt) if as.Rlist().First().Op() == ir.OINLCALL { as.PtrRlist().Set(inlconv2list(as.Rlist().First().(*ir.InlinedCallExpr))) as.SetOp(ir.OAS2) @@ -604,6 +613,7 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No // switch at the top of this function. switch n.Op() { case ir.OCALLFUNC, ir.OCALLMETH: + n := n.(*ir.CallExpr) if n.NoInline() { return n } @@ -673,6 +683,7 @@ func inlCallee(fn ir.Node) *ir.Func { } return n.Func() case ir.ONAME: + fn := fn.(*ir.Name) if fn.Class() == ir.PFUNC { return fn.Func() } @@ -721,8 +732,10 @@ func staticValue1(nn ir.Node) ir.Node { FindRHS: switch defn.Op() { case ir.OAS: + defn := defn.(*ir.AssignStmt) rhs = defn.Right() case ir.OAS2: + defn := defn.(*ir.AssignListStmt) for i, lhs := range defn.List().Slice() { if lhs == n { rhs = defn.Rlist().Index(i) @@ -761,10 +774,12 @@ func reassigned(name *ir.Name) bool { return ir.Any(name.Curfn, func(n ir.Node) bool { switch n.Op() { case ir.OAS: + n := n.(*ir.AssignStmt) if n.Left() == name && n != name.Defn { return true } case ir.OAS2, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2DOTTYPE, ir.OAS2RECV, ir.OSELRECV2: + n := n.(*ir.AssignListStmt) for _, p := range n.List().Slice() { if p == name && n != name.Defn { return true @@ -1237,6 +1252,7 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { return n case ir.OMETHEXPR: + n := n.(*ir.MethodExpr) return n case ir.OLITERAL, ir.ONIL, ir.OTYPE: @@ -1259,6 +1275,7 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { case ir.ORETURN: // Since we don't handle bodies with closures, // this return is guaranteed to belong to the current inlined function. + n := n.(*ir.ReturnStmt) init := subst.list(n.Init()) if len(subst.retvars) != 0 && n.List().Len() != 0 { as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) @@ -1285,6 +1302,7 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { return ir.NewBlockStmt(base.Pos, init) case ir.OGOTO: + n := n.(*ir.BranchStmt) m := ir.Copy(n).(*ir.BranchStmt) m.SetPos(subst.updatedPos(m.Pos())) m.PtrInit().Set(nil) @@ -1293,6 +1311,7 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { return m case ir.OLABEL: + n := n.(*ir.LabelStmt) m := ir.Copy(n).(*ir.LabelStmt) m.SetPos(subst.updatedPos(m.Pos())) m.PtrInit().Set(nil) @@ -1365,6 +1384,7 @@ func devirtualizeCall(call *ir.CallExpr) { x := typecheck(ir.NewSelectorExpr(sel.Pos(), ir.OXDOT, dt, sel.Sym()), ctxExpr|ctxCallee) switch x.Op() { case ir.ODOTMETH: + x := x.(*ir.SelectorExpr) if base.Flag.LowerM != 0 { base.WarnfAt(call.Pos(), "devirtualizing %v to %v", sel, typ) } @@ -1372,6 +1392,7 @@ func devirtualizeCall(call *ir.CallExpr) { call.SetLeft(x) case ir.ODOTINTER: // Promoted method from embedded interface-typed field (#42279). + x := x.(*ir.SelectorExpr) if base.Flag.LowerM != 0 { base.WarnfAt(call.Pos(), "partially devirtualizing %v to %v", sel, typ) } diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index d2d908bf9f6f3..4b7a22e6548f0 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -1169,6 +1169,7 @@ func (p *noder) ifStmt(stmt *syntax.IfStmt) ir.Node { if stmt.Else != nil { e := p.stmt(stmt.Else) if e.Op() == ir.OBLOCK { + e := e.(*ir.BlockStmt) n.PtrRlist().Set(e.List().Slice()) } else { n.PtrRlist().Set1(e) @@ -1319,12 +1320,16 @@ func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) ir.Node { if ls != nil { switch ls.Op() { case ir.OFOR: + ls := ls.(*ir.ForStmt) ls.SetSym(sym) case ir.ORANGE: + ls := ls.(*ir.RangeStmt) ls.SetSym(sym) case ir.OSWITCH: + ls := ls.(*ir.SwitchStmt) ls.SetSym(sym) case ir.OSELECT: + ls := ls.(*ir.SelectStmt) ls.SetSym(sym) } } @@ -1333,6 +1338,7 @@ func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) ir.Node { l := []ir.Node{lhs} if ls != nil { if ls.Op() == ir.OBLOCK { + ls := ls.(*ir.BlockStmt) l = append(l, ls.List().Slice()...) } else { l = append(l, ls) diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 2e7838c2527a1..96164d09fd5ef 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -135,6 +135,7 @@ func (o *Order) cheapExpr(n ir.Node) ir.Node { case ir.ONAME, ir.OLITERAL, ir.ONIL: return n case ir.OLEN, ir.OCAP: + n := n.(*ir.UnaryExpr) l := o.cheapExpr(n.Left()) if l == n.Left() { return n @@ -160,6 +161,7 @@ func (o *Order) safeExpr(n ir.Node) ir.Node { return n case ir.OLEN, ir.OCAP: + n := n.(*ir.UnaryExpr) l := o.safeExpr(n.Left()) if l == n.Left() { return n @@ -169,6 +171,7 @@ func (o *Order) safeExpr(n ir.Node) ir.Node { return typecheck(a, ctxExpr) case ir.ODOT: + n := n.(*ir.SelectorExpr) l := o.safeExpr(n.Left()) if l == n.Left() { return n @@ -178,6 +181,7 @@ func (o *Order) safeExpr(n ir.Node) ir.Node { return typecheck(a, ctxExpr) case ir.ODOTPTR: + n := n.(*ir.SelectorExpr) l := o.cheapExpr(n.Left()) if l == n.Left() { return n @@ -187,6 +191,7 @@ func (o *Order) safeExpr(n ir.Node) ir.Node { return typecheck(a, ctxExpr) case ir.ODEREF: + n := n.(*ir.StarExpr) l := o.cheapExpr(n.Left()) if l == n.Left() { return n @@ -196,6 +201,7 @@ func (o *Order) safeExpr(n ir.Node) ir.Node { return typecheck(a, ctxExpr) case ir.OINDEX, ir.OINDEXMAP: + n := n.(*ir.IndexExpr) var l ir.Node if n.Left().Type().IsArray() { l = o.safeExpr(n.Left()) @@ -281,9 +287,11 @@ func mapKeyReplaceStrConv(n ir.Node) bool { var replaced bool switch n.Op() { case ir.OBYTES2STR: + n := n.(*ir.ConvExpr) n.SetOp(ir.OBYTES2STRTMP) replaced = true case ir.OSTRUCTLIT: + n := n.(*ir.CompLitExpr) for _, elem := range n.List().Slice() { elem := elem.(*ir.StructKeyExpr) if mapKeyReplaceStrConv(elem.Left()) { @@ -291,6 +299,7 @@ func mapKeyReplaceStrConv(n ir.Node) bool { } } case ir.OARRAYLIT: + n := n.(*ir.CompLitExpr) for _, elem := range n.List().Slice() { if elem.Op() == ir.OKEY { elem = elem.(*ir.KeyExpr).Right() @@ -499,6 +508,7 @@ func (o *Order) call(nn ir.Node) { // by copying it into a temp and marking that temp // still alive when we pop the temp stack. if arg.Op() == ir.OCONVNOP { + arg := arg.(*ir.ConvExpr) if arg.Left().Type().IsUnsafePtr() { x := o.copyExpr(arg.Left()) arg.SetLeft(x) @@ -512,6 +522,7 @@ func (o *Order) call(nn ir.Node) { for i, param := range n.Left().Type().Params().FieldSlice() { if param.Note == unsafeUintptrTag || param.Note == uintptrEscapesTag { if arg := n.List().Index(i); arg.Op() == ir.OSLICELIT { + arg := arg.(*ir.CompLitExpr) for _, elt := range arg.List().Slice() { keepAlive(elt) } @@ -543,17 +554,20 @@ func (o *Order) mapAssign(n ir.Node) { base.Fatalf("order.mapAssign %v", n.Op()) case ir.OAS: + n := n.(*ir.AssignStmt) if n.Left().Op() == ir.OINDEXMAP { n.SetRight(o.safeMapRHS(n.Right())) } o.out = append(o.out, n) case ir.OASOP: + n := n.(*ir.AssignOpStmt) if n.Left().Op() == ir.OINDEXMAP { n.SetRight(o.safeMapRHS(n.Right())) } o.out = append(o.out, n) case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2MAPR, ir.OAS2FUNC: + n := n.(*ir.AssignListStmt) var post []ir.Node for i, m := range n.List().Slice() { switch { @@ -583,6 +597,7 @@ func (o *Order) safeMapRHS(r ir.Node) ir.Node { // Make sure we evaluate the RHS before starting the map insert. // We need to make sure the RHS won't panic. See issue 22881. if r.Op() == ir.OAPPEND { + r := r.(*ir.CallExpr) s := r.List().Slice()[1:] for i, n := range s { s[i] = o.cheapExpr(n) @@ -611,6 +626,7 @@ func (o *Order) stmt(n ir.Node) { o.out = append(o.out, n) case ir.OAS: + n := n.(*ir.AssignStmt) t := o.markTemp() n.SetLeft(o.expr(n.Left(), nil)) n.SetRight(o.expr(n.Right(), n.Left())) @@ -618,6 +634,7 @@ func (o *Order) stmt(n ir.Node) { o.cleanTemp(t) case ir.OASOP: + n := n.(*ir.AssignOpStmt) t := o.markTemp() n.SetLeft(o.expr(n.Left(), nil)) n.SetRight(o.expr(n.Right(), nil)) @@ -632,6 +649,7 @@ func (o *Order) stmt(n ir.Node) { l1 := o.safeExpr(n.Left()) l2 := ir.DeepCopy(src.NoXPos, l1) if l2.Op() == ir.OINDEXMAP { + l2 := l2.(*ir.IndexExpr) l2.SetIndexMapLValue(false) } l2 = o.copyExpr(l2) @@ -646,6 +664,7 @@ func (o *Order) stmt(n ir.Node) { o.cleanTemp(t) case ir.OAS2: + n := n.(*ir.AssignListStmt) t := o.markTemp() o.exprList(n.List()) o.exprList(n.Rlist()) @@ -675,10 +694,13 @@ func (o *Order) stmt(n ir.Node) { switch r := n.Rlist().First(); r.Op() { case ir.ODOTTYPE2: + r := r.(*ir.TypeAssertExpr) r.SetLeft(o.expr(r.Left(), nil)) case ir.ORECV: + r := r.(*ir.UnaryExpr) r.SetLeft(o.expr(r.Left(), nil)) case ir.OINDEXMAP: + r := r.(*ir.IndexExpr) r.SetLeft(o.expr(r.Left(), nil)) r.SetRight(o.expr(r.Right(), nil)) // See similar conversion for OINDEXMAP below. @@ -693,6 +715,7 @@ func (o *Order) stmt(n ir.Node) { // Special: does not save n onto out. case ir.OBLOCK: + n := n.(*ir.BlockStmt) o.stmtList(n.List()) // Special: n->left is not an expression; save as is. @@ -709,18 +732,21 @@ func (o *Order) stmt(n ir.Node) { // Special: handle call arguments. case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH: + n := n.(*ir.CallExpr) t := o.markTemp() o.call(n) o.out = append(o.out, n) o.cleanTemp(t) case ir.OCLOSE, ir.ORECV: + n := n.(*ir.UnaryExpr) t := o.markTemp() n.SetLeft(o.expr(n.Left(), nil)) o.out = append(o.out, n) o.cleanTemp(t) case ir.OCOPY: + n := n.(*ir.BinaryExpr) t := o.markTemp() n.SetLeft(o.expr(n.Left(), nil)) n.SetRight(o.expr(n.Right(), nil)) @@ -728,6 +754,7 @@ func (o *Order) stmt(n ir.Node) { o.cleanTemp(t) case ir.OPRINT, ir.OPRINTN, ir.ORECOVER: + n := n.(*ir.CallExpr) t := o.markTemp() o.exprList(n.List()) o.out = append(o.out, n) @@ -735,6 +762,7 @@ func (o *Order) stmt(n ir.Node) { // Special: order arguments to inner call but not call itself. case ir.ODEFER, ir.OGO: + n := n.(*ir.GoDeferStmt) t := o.markTemp() o.init(n.Left()) o.call(n.Left()) @@ -742,6 +770,7 @@ func (o *Order) stmt(n ir.Node) { o.cleanTemp(t) case ir.ODELETE: + n := n.(*ir.CallExpr) t := o.markTemp() n.List().SetFirst(o.expr(n.List().First(), nil)) n.List().SetSecond(o.expr(n.List().Second(), nil)) @@ -752,6 +781,7 @@ func (o *Order) stmt(n ir.Node) { // Clean temporaries from condition evaluation at // beginning of loop body and after for statement. case ir.OFOR: + n := n.(*ir.ForStmt) t := o.markTemp() n.SetLeft(o.exprInPlace(n.Left())) n.PtrBody().Prepend(o.cleanTempNoPop(t)...) @@ -763,6 +793,7 @@ func (o *Order) stmt(n ir.Node) { // Clean temporaries from condition at // beginning of both branches. case ir.OIF: + n := n.(*ir.IfStmt) t := o.markTemp() n.SetLeft(o.exprInPlace(n.Left())) n.PtrBody().Prepend(o.cleanTempNoPop(t)...) @@ -775,6 +806,7 @@ func (o *Order) stmt(n ir.Node) { // Special: argument will be converted to interface using convT2E // so make sure it is an addressable temporary. case ir.OPANIC: + n := n.(*ir.UnaryExpr) t := o.markTemp() n.SetLeft(o.expr(n.Left(), nil)) if !n.Left().Type().IsInterface() { @@ -858,6 +890,7 @@ func (o *Order) stmt(n ir.Node) { o.cleanTemp(t) case ir.ORETURN: + n := n.(*ir.ReturnStmt) o.exprList(n.List()) o.out = append(o.out, n) @@ -871,6 +904,7 @@ func (o *Order) stmt(n ir.Node) { // case (if p were nil, then the timing of the fault would // give this away). case ir.OSELECT: + n := n.(*ir.SelectStmt) t := o.markTemp() for _, ncas := range n.List().Slice() { ncas := ncas.(*ir.CaseStmt) @@ -932,6 +966,7 @@ func (o *Order) stmt(n ir.Node) { orderBlock(ncas.PtrInit(), o.free) case ir.OSEND: + r := r.(*ir.SendStmt) if r.Init().Len() != 0 { ir.DumpList("ninit", r.Init()) base.Fatalf("ninit on select send") @@ -969,6 +1004,7 @@ func (o *Order) stmt(n ir.Node) { // Special: value being sent is passed as a pointer; make it addressable. case ir.OSEND: + n := n.(*ir.SendStmt) t := o.markTemp() n.SetLeft(o.expr(n.Left(), nil)) n.SetRight(o.expr(n.Right(), nil)) @@ -1100,6 +1136,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { if haslit && hasbyte { for _, n2 := range n.List().Slice() { if n2.Op() == ir.OBYTES2STR { + n2 := n2.(*ir.ConvExpr) n2.SetOp(ir.OBYTES2STRTMP) } } @@ -1107,6 +1144,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { return n case ir.OINDEXMAP: + n := n.(*ir.IndexExpr) n.SetLeft(o.expr(n.Left(), nil)) n.SetRight(o.expr(n.Right(), nil)) needCopy := false @@ -1134,6 +1172,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { // concrete type (not interface) argument might need an addressable // temporary to pass to the runtime conversion routine. case ir.OCONVIFACE: + n := n.(*ir.ConvExpr) n.SetLeft(o.expr(n.Left(), nil)) if n.Left().Type().IsInterface() { return n @@ -1147,6 +1186,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { return n case ir.OCONVNOP: + n := n.(*ir.ConvExpr) if n.Type().IsKind(types.TUNSAFEPTR) && n.Left().Type().IsKind(types.TUINTPTR) && (n.Left().Op() == ir.OCALLFUNC || n.Left().Op() == ir.OCALLINTER || n.Left().Op() == ir.OCALLMETH) { call := n.Left().(*ir.CallExpr) // When reordering unsafe.Pointer(f()) into a separate @@ -1172,6 +1212,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { // } // ... = r + n := n.(*ir.LogicalExpr) r := o.newTemp(n.Type(), false) // Evaluate left-hand side. @@ -1233,6 +1274,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { case ir.OAPPEND: // Check for append(x, make([]T, y)...) . + n := n.(*ir.CallExpr) if isAppendOfMake(n) { n.List().SetFirst(o.expr(n.List().First(), nil)) // order x mk := n.List().Second().(*ir.MakeExpr) @@ -1247,6 +1289,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { return n case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR: + n := n.(*ir.SliceExpr) n.SetLeft(o.expr(n.Left(), nil)) low, high, max := n.SliceBounds() low = o.expr(low, nil) @@ -1287,6 +1330,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { return n case ir.ODOTTYPE, ir.ODOTTYPE2: + n := n.(*ir.TypeAssertExpr) n.SetLeft(o.expr(n.Left(), nil)) if !isdirectiface(n.Type()) || instrumenting { return o.copyExprClear(n) @@ -1294,10 +1338,12 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { return n case ir.ORECV: + n := n.(*ir.UnaryExpr) n.SetLeft(o.expr(n.Left(), nil)) return o.copyExprClear(n) case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE: + n := n.(*ir.BinaryExpr) n.SetLeft(o.expr(n.Left(), nil)) n.SetRight(o.expr(n.Right(), nil)) @@ -1338,6 +1384,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { // Without this special case, order would otherwise compute all // the keys and values before storing any of them to the map. // See issue 26552. + n := n.(*ir.CompLitExpr) entries := n.List().Slice() statics := entries[:0] var dynamics []*ir.KeyExpr diff --git a/src/cmd/compile/internal/gc/scc.go b/src/cmd/compile/internal/gc/scc.go index 8fe20a80fd992..f2d089fa4c7a4 100644 --- a/src/cmd/compile/internal/gc/scc.go +++ b/src/cmd/compile/internal/gc/scc.go @@ -81,6 +81,7 @@ func (v *bottomUpVisitor) visit(n *ir.Func) uint32 { ir.Visit(n, func(n ir.Node) { switch n.Op() { case ir.ONAME: + n := n.(*ir.Name) if n.Class() == ir.PFUNC { if n != nil && n.Name().Defn != nil { if m := v.visit(n.Name().Defn.(*ir.Func)); m < min { @@ -89,6 +90,7 @@ func (v *bottomUpVisitor) visit(n *ir.Func) uint32 { } } case ir.OMETHEXPR: + n := n.(*ir.MethodExpr) fn := methodExprName(n) if fn != nil && fn.Defn != nil { if m := v.visit(fn.Defn.(*ir.Func)); m < min { @@ -96,6 +98,7 @@ func (v *bottomUpVisitor) visit(n *ir.Func) uint32 { } } case ir.ODOTMETH: + n := n.(*ir.SelectorExpr) fn := methodExprName(n) if fn != nil && fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC && fn.Defn != nil { if m := v.visit(fn.Defn.(*ir.Func)); m < min { @@ -103,6 +106,7 @@ func (v *bottomUpVisitor) visit(n *ir.Func) uint32 { } } case ir.OCALLPART: + n := n.(*ir.CallPartExpr) fn := ir.AsNode(callpartMethod(n).Nname) if fn != nil && fn.Op() == ir.ONAME { if fn := fn.(*ir.Name); fn.Class() == ir.PFUNC && fn.Name().Defn != nil { diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go index be2f688eb9c07..64d3461dca4ae 100644 --- a/src/cmd/compile/internal/gc/select.go +++ b/src/cmd/compile/internal/gc/select.go @@ -56,7 +56,9 @@ func typecheckselect(sel *ir.SelectStmt) { // convert x = <-c into x, _ = <-c // remove implicit conversions; the eventual assignment // will reintroduce them. + n := n.(*ir.AssignStmt) if r := n.Right(); r.Op() == ir.OCONVNOP || r.Op() == ir.OCONVIFACE { + r := r.(*ir.ConvExpr) if r.Implicit() { n.SetRight(r.Left()) } @@ -68,6 +70,7 @@ func typecheckselect(sel *ir.SelectStmt) { oselrecv2(n.Left(), n.Right(), n.Colas()) case ir.OAS2RECV: + n := n.(*ir.AssignListStmt) if n.Rlist().First().Op() != ir.ORECV { base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side") break @@ -76,6 +79,7 @@ func typecheckselect(sel *ir.SelectStmt) { case ir.ORECV: // convert <-c into _, _ = <-c + n := n.(*ir.UnaryExpr) oselrecv2(ir.BlankNode, n, false) case ir.OSEND: @@ -162,10 +166,12 @@ func walkselectcases(cases ir.Nodes) []ir.Node { } switch n.Op() { case ir.OSEND: + n := n.(*ir.SendStmt) n.SetRight(nodAddr(n.Right())) n.SetRight(typecheck(n.Right(), ctxExpr)) case ir.OSELRECV2: + n := n.(*ir.AssignListStmt) if !ir.IsBlank(n.List().First()) { n.List().SetIndex(0, nodAddr(n.List().First())) n.List().SetIndex(0, typecheck(n.List().First(), ctxExpr)) @@ -191,10 +197,12 @@ func walkselectcases(cases ir.Nodes) []ir.Node { case ir.OSEND: // if selectnbsend(c, v) { body } else { default body } + n := n.(*ir.SendStmt) ch := n.Left() call = mkcall1(chanfn("selectnbsend", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), ch, n.Right()) case ir.OSELRECV2: + n := n.(*ir.AssignListStmt) recv := n.Rlist().First().(*ir.UnaryExpr) ch := recv.Left() elem := n.List().First() @@ -261,11 +269,13 @@ func walkselectcases(cases ir.Nodes) []ir.Node { default: base.Fatalf("select %v", n.Op()) case ir.OSEND: + n := n.(*ir.SendStmt) i = nsends nsends++ c = n.Left() elem = n.Right() case ir.OSELRECV2: + n := n.(*ir.AssignListStmt) nrecvs++ i = ncas - nrecvs recv := n.Rlist().First().(*ir.UnaryExpr) @@ -323,6 +333,7 @@ func walkselectcases(cases ir.Nodes) []ir.Node { r := ir.NewIfStmt(base.Pos, cond, nil, nil) if n := cas.Left(); n != nil && n.Op() == ir.OSELRECV2 { + n := n.(*ir.AssignListStmt) if !ir.IsBlank(n.List().Second()) { x := ir.NewAssignStmt(base.Pos, n.List().Second(), recvOK) r.PtrBody().Append(typecheck(x, ctxStmt)) diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 5a96d4c320f51..f4988df9ac48a 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -127,6 +127,7 @@ func (s *InitSchedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *type return true case ir.OADDR: + r := r.(*ir.AddrExpr) if a := r.Left(); a.Op() == ir.ONAME { a := a.(*ir.Name) addrsym(l, loff, a, 0) @@ -134,6 +135,7 @@ func (s *InitSchedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *type } case ir.OPTRLIT: + r := r.(*ir.AddrExpr) switch r.Left().Op() { case ir.OARRAYLIT, ir.OSLICELIT, ir.OSTRUCTLIT, ir.OMAPLIT: // copy pointer @@ -148,6 +150,7 @@ func (s *InitSchedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *type return true case ir.OARRAYLIT, ir.OSTRUCTLIT: + r := r.(*ir.CompLitExpr) p := s.initplans[r] for i := range p.E { e := &p.E[i] @@ -202,6 +205,7 @@ func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *type return true case ir.OADDR: + r := r.(*ir.AddrExpr) if name, offset, ok := stataddr(r.Left()); ok { addrsym(l, loff, name, offset) return true @@ -209,6 +213,7 @@ func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *type fallthrough case ir.OPTRLIT: + r := r.(*ir.AddrExpr) switch r.Left().Op() { case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT: // Init pointer. @@ -226,6 +231,7 @@ func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *type //dump("not static ptrlit", r); case ir.OSTR2BYTES: + r := r.(*ir.ConvExpr) if l.Class() == ir.PEXTERN && r.Left().Op() == ir.OLITERAL { sval := ir.StringVal(r.Left()) slicebytes(l, loff, sval) @@ -247,6 +253,7 @@ func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *type fallthrough case ir.OARRAYLIT, ir.OSTRUCTLIT: + r := r.(*ir.CompLitExpr) s.initplan(r) p := s.initplans[r] @@ -287,6 +294,7 @@ func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *type // If you change something here, change it there, and vice versa. // Determine the underlying concrete type and value we are converting from. + r := r.(*ir.ConvExpr) val := ir.Node(r) for val.Op() == ir.OCONVIFACE { val = val.(*ir.ConvExpr).Left() @@ -467,6 +475,7 @@ func isStaticCompositeLiteral(n ir.Node) bool { case ir.OSLICELIT: return false case ir.OARRAYLIT: + n := n.(*ir.CompLitExpr) for _, r := range n.List().Slice() { if r.Op() == ir.OKEY { r = r.(*ir.KeyExpr).Right() @@ -477,6 +486,7 @@ func isStaticCompositeLiteral(n ir.Node) bool { } return true case ir.OSTRUCTLIT: + n := n.(*ir.CompLitExpr) for _, r := range n.List().Slice() { r := r.(*ir.StructKeyExpr) if !isStaticCompositeLiteral(r.Left()) { @@ -488,6 +498,7 @@ func isStaticCompositeLiteral(n ir.Node) bool { return true case ir.OCONVIFACE: // See staticassign's OCONVIFACE case for comments. + n := n.(*ir.ConvExpr) val := ir.Node(n) for val.Op() == ir.OCONVIFACE { val = val.(*ir.ConvExpr).Left() @@ -865,6 +876,7 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { base.Fatalf("anylit: not lit, op=%v node=%v", n.Op(), n) case ir.ONAME: + n := n.(*ir.Name) appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, n)) case ir.OMETHEXPR: @@ -872,6 +884,7 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { anylit(n.FuncName(), var_, init) case ir.OPTRLIT: + n := n.(*ir.AddrExpr) if !t.IsPtr() { base.Fatalf("anylit: not ptr") } @@ -1001,6 +1014,7 @@ func stataddr(n ir.Node) (name *ir.Name, offset int64, ok bool) { return stataddr(n.FuncName()) case ir.ODOT: + n := n.(*ir.SelectorExpr) if name, offset, ok = stataddr(n.Left()); !ok { break } @@ -1008,6 +1022,7 @@ func stataddr(n ir.Node) (name *ir.Name, offset int64, ok bool) { return name, offset, true case ir.OINDEX: + n := n.(*ir.IndexExpr) if n.Left().Type().IsSlice() { break } @@ -1041,6 +1056,7 @@ func (s *InitSchedule) initplan(n ir.Node) { base.Fatalf("initplan") case ir.OARRAYLIT, ir.OSLICELIT: + n := n.(*ir.CompLitExpr) var k int64 for _, a := range n.List().Slice() { if a.Op() == ir.OKEY { @@ -1056,6 +1072,7 @@ func (s *InitSchedule) initplan(n ir.Node) { } case ir.OSTRUCTLIT: + n := n.(*ir.CompLitExpr) for _, a := range n.List().Slice() { if a.Op() != ir.OSTRUCTKEY { base.Fatalf("initplan structlit") @@ -1068,6 +1085,7 @@ func (s *InitSchedule) initplan(n ir.Node) { } case ir.OMAPLIT: + n := n.(*ir.CompLitExpr) for _, a := range n.List().Slice() { if a.Op() != ir.OKEY { base.Fatalf("initplan maplit") @@ -1116,6 +1134,7 @@ func isZero(n ir.Node) bool { } case ir.OARRAYLIT: + n := n.(*ir.CompLitExpr) for _, n1 := range n.List().Slice() { if n1.Op() == ir.OKEY { n1 = n1.(*ir.KeyExpr).Right() @@ -1127,6 +1146,7 @@ func isZero(n ir.Node) bool { return true case ir.OSTRUCTLIT: + n := n.(*ir.CompLitExpr) for _, n1 := range n.List().Slice() { n1 := n1.(*ir.StructKeyExpr) if !isZero(n1.Left()) { diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index cc5f9eeea62ab..dc3ea4be9eb3a 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -1150,6 +1150,7 @@ func (s *state) stmt(n ir.Node) { switch n.Op() { case ir.OBLOCK: + n := n.(*ir.BlockStmt) s.stmtList(n.List()) // No-ops @@ -1180,6 +1181,7 @@ func (s *state) stmt(n ir.Node) { } } case ir.ODEFER: + n := n.(*ir.GoDeferStmt) if base.Debug.Defer > 0 { var defertype string if s.hasOpenDefers { @@ -1201,9 +1203,11 @@ func (s *state) stmt(n ir.Node) { s.callResult(n.Left().(*ir.CallExpr), d) } case ir.OGO: + n := n.(*ir.GoDeferStmt) s.callResult(n.Left().(*ir.CallExpr), callGo) case ir.OAS2DOTTYPE: + n := n.(*ir.AssignListStmt) res, resok := s.dottype(n.Rlist().First().(*ir.TypeAssertExpr), true) deref := false if !canSSAType(n.Rlist().First().Type()) { @@ -1226,6 +1230,7 @@ func (s *state) stmt(n ir.Node) { case ir.OAS2FUNC: // We come here only when it is an intrinsic call returning two values. + n := n.(*ir.AssignListStmt) call := n.Rlist().First().(*ir.CallExpr) if !IsIntrinsicCall(call) { s.Fatalf("non-intrinsic AS2FUNC not expanded %v", call) @@ -1238,11 +1243,13 @@ func (s *state) stmt(n ir.Node) { return case ir.ODCL: + n := n.(*ir.Decl) if n.Left().(*ir.Name).Class() == ir.PAUTOHEAP { s.Fatalf("DCL %v", n) } case ir.OLABEL: + n := n.(*ir.LabelStmt) sym := n.Sym() lab := s.label(sym) @@ -1260,6 +1267,7 @@ func (s *state) stmt(n ir.Node) { s.startBlock(lab.target) case ir.OGOTO: + n := n.(*ir.BranchStmt) sym := n.Sym() lab := s.label(sym) @@ -1272,6 +1280,7 @@ func (s *state) stmt(n ir.Node) { b.AddEdgeTo(lab.target) case ir.OAS: + n := n.(*ir.AssignStmt) if n.Left() == n.Right() && n.Left().Op() == ir.ONAME { // An x=x assignment. No point in doing anything // here. In addition, skipping this assignment @@ -1356,6 +1365,7 @@ func (s *state) stmt(n ir.Node) { if rhs != nil && (rhs.Op() == ir.OSLICE || rhs.Op() == ir.OSLICE3 || rhs.Op() == ir.OSLICESTR) && samesafeexpr(rhs.(*ir.SliceExpr).Left(), n.Left()) { // We're assigning a slicing operation back to its source. // Don't write back fields we aren't changing. See issue #14855. + rhs := rhs.(*ir.SliceExpr) i, j, k := rhs.SliceBounds() if i != nil && (i.Op() == ir.OLITERAL && i.Val().Kind() == constant.Int && ir.Int64Val(i) == 0) { // [0:...] is the same as [:...] @@ -1385,6 +1395,7 @@ func (s *state) stmt(n ir.Node) { s.assign(n.Left(), r, deref, skip) case ir.OIF: + n := n.(*ir.IfStmt) if ir.IsConst(n.Left(), constant.Bool) { s.stmtList(n.Left().Init()) if ir.BoolVal(n.Left()) { @@ -1431,16 +1442,19 @@ func (s *state) stmt(n ir.Node) { s.startBlock(bEnd) case ir.ORETURN: + n := n.(*ir.ReturnStmt) s.stmtList(n.List()) b := s.exit() b.Pos = s.lastPos.WithIsStmt() case ir.ORETJMP: + n := n.(*ir.BranchStmt) b := s.exit() b.Kind = ssa.BlockRetJmp // override BlockRet b.Aux = callTargetLSym(n.Sym(), s.curfn.LSym) case ir.OCONTINUE, ir.OBREAK: + n := n.(*ir.BranchStmt) var to *ssa.Block if n.Sym() == nil { // plain break/continue @@ -1472,6 +1486,7 @@ func (s *state) stmt(n ir.Node) { // // OFORUNTIL: for Ninit; Left; Right; List { Nbody } // => body: { Nbody }; incr: Right; if Left { lateincr: List; goto body }; end: + n := n.(*ir.ForStmt) bCond := s.f.NewBlock(ssa.BlockPlain) bBody := s.f.NewBlock(ssa.BlockPlain) bIncr := s.f.NewBlock(ssa.BlockPlain) @@ -1600,6 +1615,7 @@ func (s *state) stmt(n ir.Node) { s.startBlock(bEnd) case ir.OVARDEF: + n := n.(*ir.UnaryExpr) if !s.canSSA(n.Left()) { s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.Left().(*ir.Name), s.mem(), false) } @@ -1608,12 +1624,14 @@ func (s *state) stmt(n ir.Node) { // We only care about liveness info at call sites, so putting the // varkill in the store chain is enough to keep it correctly ordered // with respect to call ops. + n := n.(*ir.UnaryExpr) if !s.canSSA(n.Left()) { s.vars[memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.Left().(*ir.Name), s.mem(), false) } case ir.OVARLIVE: // Insert a varlive op to record that a variable is still live. + n := n.(*ir.UnaryExpr) v := n.Left().(*ir.Name) if !v.Addrtaken() { s.Fatalf("VARLIVE variable %v must have Addrtaken set", v) @@ -1626,10 +1644,12 @@ func (s *state) stmt(n ir.Node) { s.vars[memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, v, s.mem()) case ir.OCHECKNIL: + n := n.(*ir.UnaryExpr) p := s.expr(n.Left()) s.nilCheck(p) case ir.OINLMARK: + n := n.(*ir.InlineMarkStmt) s.newValue1I(ssa.OpInlMark, types.TypeVoid, n.Offset(), s.mem()) default: @@ -2097,16 +2117,19 @@ func (s *state) expr(n ir.Node) *ssa.Value { s.stmtList(n.Init()) switch n.Op() { case ir.OBYTES2STRTMP: + n := n.(*ir.ConvExpr) slice := s.expr(n.Left()) ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice) len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice) return s.newValue2(ssa.OpStringMake, n.Type(), ptr, len) case ir.OSTR2BYTESTMP: + n := n.(*ir.ConvExpr) str := s.expr(n.Left()) ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str) len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], str) return s.newValue3(ssa.OpSliceMake, n.Type(), ptr, len, len) case ir.OCFUNC: + n := n.(*ir.UnaryExpr) aux := n.Left().Sym().Linksym() return s.entryNewValue1A(ssa.OpAddr, n.Type(), aux, s.sb) case ir.OMETHEXPR: @@ -2114,6 +2137,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { sym := funcsym(n.FuncName().Sym()).Linksym() return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb) case ir.ONAME: + n := n.(*ir.Name) if n.Class() == ir.PFUNC { // "value" of a function is the address of the function's closure sym := funcsym(n.Sym()).Linksym() @@ -2135,6 +2159,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { addr := s.addr(n) return s.load(n.Type(), addr) case ir.ONIL: + n := n.(*ir.NilExpr) t := n.Type() switch { case t.IsSlice(): @@ -2203,6 +2228,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { return nil } case ir.OCONVNOP: + n := n.(*ir.ConvExpr) to := n.Type() from := n.Left().Type() @@ -2271,6 +2297,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { return v case ir.OCONV: + n := n.(*ir.ConvExpr) x := s.expr(n.Left()) ft := n.Left().Type() // from type tt := n.Type() // to type @@ -2448,6 +2475,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { // binary ops case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT: + n := n.(*ir.BinaryExpr) a := s.expr(n.Left()) b := s.expr(n.Right()) if n.Left().Type().IsComplex() { @@ -2481,6 +2509,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { // integer comparison return s.newValue2(s.ssaOp(op, n.Left().Type()), types.Types[types.TBOOL], a, b) case ir.OMUL: + n := n.(*ir.BinaryExpr) a := s.expr(n.Left()) b := s.expr(n.Right()) if n.Type().IsComplex() { @@ -2520,6 +2549,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b) case ir.ODIV: + n := n.(*ir.BinaryExpr) a := s.expr(n.Left()) b := s.expr(n.Right()) if n.Type().IsComplex() { @@ -2567,10 +2597,12 @@ func (s *state) expr(n ir.Node) *ssa.Value { } return s.intDivide(n, a, b) case ir.OMOD: + n := n.(*ir.BinaryExpr) a := s.expr(n.Left()) b := s.expr(n.Right()) return s.intDivide(n, a, b) case ir.OADD, ir.OSUB: + n := n.(*ir.BinaryExpr) a := s.expr(n.Left()) b := s.expr(n.Right()) if n.Type().IsComplex() { @@ -2585,15 +2617,18 @@ func (s *state) expr(n ir.Node) *ssa.Value { } return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b) case ir.OAND, ir.OOR, ir.OXOR: + n := n.(*ir.BinaryExpr) a := s.expr(n.Left()) b := s.expr(n.Right()) return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b) case ir.OANDNOT: + n := n.(*ir.BinaryExpr) a := s.expr(n.Left()) b := s.expr(n.Right()) b = s.newValue1(s.ssaOp(ir.OBITNOT, b.Type), b.Type, b) return s.newValue2(s.ssaOp(ir.OAND, n.Type()), a.Type, a, b) case ir.OLSH, ir.ORSH: + n := n.(*ir.BinaryExpr) a := s.expr(n.Left()) b := s.expr(n.Right()) bt := b.Type @@ -2617,6 +2652,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { // } // Using var in the subsequent block introduces the // necessary phi variable. + n := n.(*ir.LogicalExpr) el := s.expr(n.Left()) s.vars[n] = el @@ -2648,12 +2684,14 @@ func (s *state) expr(n ir.Node) *ssa.Value { s.startBlock(bResult) return s.variable(n, types.Types[types.TBOOL]) case ir.OCOMPLEX: + n := n.(*ir.BinaryExpr) r := s.expr(n.Left()) i := s.expr(n.Right()) return s.newValue2(ssa.OpComplexMake, n.Type(), r, i) // unary ops case ir.ONEG: + n := n.(*ir.UnaryExpr) a := s.expr(n.Left()) if n.Type().IsComplex() { tp := floatForComplex(n.Type()) @@ -2664,18 +2702,23 @@ func (s *state) expr(n ir.Node) *ssa.Value { } return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a) case ir.ONOT, ir.OBITNOT: + n := n.(*ir.UnaryExpr) a := s.expr(n.Left()) return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a) case ir.OIMAG, ir.OREAL: + n := n.(*ir.UnaryExpr) a := s.expr(n.Left()) return s.newValue1(s.ssaOp(n.Op(), n.Left().Type()), n.Type(), a) case ir.OPLUS: + n := n.(*ir.UnaryExpr) return s.expr(n.Left()) case ir.OADDR: + n := n.(*ir.AddrExpr) return s.addr(n.Left()) case ir.ORESULT: + n := n.(*ir.ResultExpr) if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall { // Do the old thing addr := s.constOffPtrSP(types.NewPtr(n.Type()), n.Offset()) @@ -2695,6 +2738,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { } case ir.ODEREF: + n := n.(*ir.StarExpr) p := s.exprPtr(n.Left(), n.Bounded(), n.Pos()) return s.load(n.Type(), p) @@ -2721,11 +2765,13 @@ func (s *state) expr(n ir.Node) *ssa.Value { return s.newValue1I(ssa.OpStructSelect, n.Type(), int64(fieldIdx(n)), v) case ir.ODOTPTR: + n := n.(*ir.SelectorExpr) p := s.exprPtr(n.Left(), n.Bounded(), n.Pos()) p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type()), n.Offset(), p) return s.load(n.Type(), p) case ir.OINDEX: + n := n.(*ir.IndexExpr) switch { case n.Left().Type().IsString(): if n.Bounded() && ir.IsConst(n.Left(), constant.String) && ir.IsConst(n.Right(), constant.Int) { @@ -2792,6 +2838,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { } case ir.OSPTR: + n := n.(*ir.UnaryExpr) a := s.expr(n.Left()) if n.Left().Type().IsSlice() { return s.newValue1(ssa.OpSlicePtr, n.Type(), a) @@ -2800,25 +2847,30 @@ func (s *state) expr(n ir.Node) *ssa.Value { } case ir.OITAB: + n := n.(*ir.UnaryExpr) a := s.expr(n.Left()) return s.newValue1(ssa.OpITab, n.Type(), a) case ir.OIDATA: + n := n.(*ir.UnaryExpr) a := s.expr(n.Left()) return s.newValue1(ssa.OpIData, n.Type(), a) case ir.OEFACE: + n := n.(*ir.BinaryExpr) tab := s.expr(n.Left()) data := s.expr(n.Right()) return s.newValue2(ssa.OpIMake, n.Type(), tab, data) case ir.OSLICEHEADER: + n := n.(*ir.SliceHeaderExpr) p := s.expr(n.Left()) l := s.expr(n.List().First()) c := s.expr(n.List().Second()) return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c) case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR: + n := n.(*ir.SliceExpr) v := s.expr(n.Left()) var i, j, k *ssa.Value low, high, max := n.SliceBounds() @@ -2835,6 +2887,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c) case ir.OSLICESTR: + n := n.(*ir.SliceExpr) v := s.expr(n.Left()) var i, j *ssa.Value low, high, _ := n.SliceBounds() @@ -2859,6 +2912,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { return s.callResult(n, callNormal) case ir.OGETG: + n := n.(*ir.CallExpr) return s.newValue1(ssa.OpGetG, n.Type(), s.mem()) case ir.OAPPEND: @@ -2868,12 +2922,14 @@ func (s *state) expr(n ir.Node) *ssa.Value { // All literals with nonzero fields have already been // rewritten during walk. Any that remain are just T{} // or equivalents. Use the zero value. + n := n.(*ir.CompLitExpr) if !isZero(n) { s.Fatalf("literal with nonzero value in SSA: %v", n) } return s.zeroVal(n.Type()) case ir.ONEWOBJ: + n := n.(*ir.UnaryExpr) if n.Type().Elem().Size() == 0 { return s.newValue1A(ssa.OpAddr, n.Type(), zerobaseSym, s.sb) } @@ -3057,6 +3113,7 @@ func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value { func (s *state) condBranch(cond ir.Node, yes, no *ssa.Block, likely int8) { switch cond.Op() { case ir.OANDAND: + cond := cond.(*ir.LogicalExpr) mid := s.f.NewBlock(ssa.BlockPlain) s.stmtList(cond.Init()) s.condBranch(cond.Left(), mid, no, max8(likely, 0)) @@ -3070,6 +3127,7 @@ func (s *state) condBranch(cond ir.Node, yes, no *ssa.Block, likely int8) { // TODO: have the frontend give us branch prediction hints for // OANDAND and OOROR nodes (if it ever has such info). case ir.OOROR: + cond := cond.(*ir.LogicalExpr) mid := s.f.NewBlock(ssa.BlockPlain) s.stmtList(cond.Init()) s.condBranch(cond.Left(), yes, mid, min8(likely, 0)) @@ -3080,10 +3138,12 @@ func (s *state) condBranch(cond ir.Node, yes, no *ssa.Block, likely int8) { // If likely==1, then we don't have enough info to decide // the likelihood of the first branch. case ir.ONOT: + cond := cond.(*ir.UnaryExpr) s.stmtList(cond.Init()) s.condBranch(cond.Left(), no, yes, -likely) return case ir.OCONVNOP: + cond := cond.(*ir.ConvExpr) s.stmtList(cond.Init()) s.condBranch(cond.Left(), yes, no, likely) return @@ -3157,6 +3217,7 @@ func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask return } if left.Op() == ir.OINDEX && left.(*ir.IndexExpr).Left().Type().IsArray() { + left := left.(*ir.IndexExpr) s.pushLine(left.Pos()) defer s.popLine() // We're assigning to an element of an ssa-able array. @@ -4630,6 +4691,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val case ir.OCALLFUNC: testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f) if k == callNormal && fn.Op() == ir.ONAME && fn.(*ir.Name).Class() == ir.PFUNC { + fn := fn.(*ir.Name) sym = fn.Sym() break } @@ -5000,6 +5062,7 @@ func (s *state) addr(n ir.Node) *ssa.Value { } case ir.ORESULT: // load return from callee + n := n.(*ir.ResultExpr) if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall { return s.constOffPtrSP(t, n.Offset()) } @@ -5012,6 +5075,7 @@ func (s *state) addr(n ir.Node) *ssa.Value { return x case ir.OINDEX: + n := n.(*ir.IndexExpr) if n.Left().Type().IsSlice() { a := s.expr(n.Left()) i := s.expr(n.Right()) @@ -5027,11 +5091,14 @@ func (s *state) addr(n ir.Node) *ssa.Value { return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.Left().Type().Elem()), a, i) } case ir.ODEREF: + n := n.(*ir.StarExpr) return s.exprPtr(n.Left(), n.Bounded(), n.Pos()) case ir.ODOT: + n := n.(*ir.SelectorExpr) p := s.addr(n.Left()) return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p) case ir.ODOTPTR: + n := n.(*ir.SelectorExpr) p := s.exprPtr(n.Left(), n.Bounded(), n.Pos()) return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p) case ir.OCLOSUREREAD: @@ -5039,6 +5106,7 @@ func (s *state) addr(n ir.Node) *ssa.Value { return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr)) case ir.OCONVNOP: + n := n.(*ir.ConvExpr) if n.Type() == n.Left().Type() { return s.addr(n.Left()) } @@ -5072,10 +5140,12 @@ func (s *state) canSSA(n ir.Node) bool { for { nn := n if nn.Op() == ir.ODOT { + nn := nn.(*ir.SelectorExpr) n = nn.Left() continue } if nn.Op() == ir.OINDEX { + nn := nn.(*ir.IndexExpr) if nn.Left().Type().IsArray() { n = nn.Left() continue @@ -7297,11 +7367,13 @@ func (e *ssafn) MyImportPath() string { func clobberBase(n ir.Node) ir.Node { if n.Op() == ir.ODOT { + n := n.(*ir.SelectorExpr) if n.Left().Type().NumFields() == 1 { return clobberBase(n.Left()) } } if n.Op() == ir.OINDEX { + n := n.(*ir.IndexExpr) if n.Left().Type().IsArray() && n.Left().Type().NumElem() == 1 { return clobberBase(n.Left()) } diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 174452def2308..5aebae0b18bcd 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -612,6 +612,7 @@ func calcHasCall(n ir.Node) bool { return true case ir.OANDAND, ir.OOROR: // hard with instrumented code + n := n.(*ir.LogicalExpr) if instrumenting { return true } @@ -625,42 +626,52 @@ func calcHasCall(n ir.Node) bool { // When using soft-float, these ops might be rewritten to function calls // so we ensure they are evaluated first. case ir.OADD, ir.OSUB, ir.OMUL: + n := n.(*ir.BinaryExpr) if thearch.SoftFloat && (isFloat[n.Type().Kind()] || isComplex[n.Type().Kind()]) { return true } return n.Left().HasCall() || n.Right().HasCall() case ir.ONEG: + n := n.(*ir.UnaryExpr) if thearch.SoftFloat && (isFloat[n.Type().Kind()] || isComplex[n.Type().Kind()]) { return true } return n.Left().HasCall() case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT: + n := n.(*ir.BinaryExpr) if thearch.SoftFloat && (isFloat[n.Left().Type().Kind()] || isComplex[n.Left().Type().Kind()]) { return true } return n.Left().HasCall() || n.Right().HasCall() case ir.OCONV: + n := n.(*ir.ConvExpr) if thearch.SoftFloat && ((isFloat[n.Type().Kind()] || isComplex[n.Type().Kind()]) || (isFloat[n.Left().Type().Kind()] || isComplex[n.Left().Type().Kind()])) { return true } return n.Left().HasCall() case ir.OAND, ir.OANDNOT, ir.OLSH, ir.OOR, ir.ORSH, ir.OXOR, ir.OCOPY, ir.OCOMPLEX, ir.OEFACE: + n := n.(*ir.BinaryExpr) return n.Left().HasCall() || n.Right().HasCall() case ir.OAS: + n := n.(*ir.AssignStmt) return n.Left().HasCall() || n.Right() != nil && n.Right().HasCall() case ir.OADDR: + n := n.(*ir.AddrExpr) return n.Left().HasCall() case ir.OPAREN: + n := n.(*ir.ParenExpr) return n.Left().HasCall() case ir.OBITNOT, ir.ONOT, ir.OPLUS, ir.ORECV, ir.OALIGNOF, ir.OCAP, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.ONEW, ir.OOFFSETOF, ir.OPANIC, ir.OREAL, ir.OSIZEOF, ir.OCHECKNIL, ir.OCFUNC, ir.OIDATA, ir.OITAB, ir.ONEWOBJ, ir.OSPTR, ir.OVARDEF, ir.OVARKILL, ir.OVARLIVE: + n := n.(*ir.UnaryExpr) return n.Left().HasCall() case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER: + n := n.(*ir.SelectorExpr) return n.Left().HasCall() case ir.OGETG, ir.OCLOSUREREAD, ir.OMETHEXPR: @@ -675,12 +686,15 @@ func calcHasCall(n ir.Node) bool { return false case ir.OCONVIFACE, ir.OCONVNOP, ir.OBYTES2STR, ir.OBYTES2STRTMP, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2BYTESTMP, ir.OSTR2RUNES, ir.ORUNESTR: // TODO(rsc): Some conversions are themselves calls, no? + n := n.(*ir.ConvExpr) return n.Left().HasCall() case ir.ODOTTYPE2: // TODO(rsc): Shouldn't this be up with ODOTTYPE above? + n := n.(*ir.TypeAssertExpr) return n.Left().HasCall() case ir.OSLICEHEADER: // TODO(rsc): What about len and cap? + n := n.(*ir.SliceHeaderExpr) return n.Left().HasCall() case ir.OAS2DOTTYPE, ir.OAS2FUNC: // TODO(rsc): Surely we need to check List and Rlist. @@ -768,6 +782,7 @@ func safeexpr(n ir.Node, init *ir.Nodes) ir.Node { return n case ir.OLEN, ir.OCAP: + n := n.(*ir.UnaryExpr) l := safeexpr(n.Left(), init) if l == n.Left() { return n @@ -777,6 +792,7 @@ func safeexpr(n ir.Node, init *ir.Nodes) ir.Node { return walkexpr(typecheck(a, ctxExpr), init) case ir.ODOT, ir.ODOTPTR: + n := n.(*ir.SelectorExpr) l := safeexpr(n.Left(), init) if l == n.Left() { return n @@ -786,6 +802,7 @@ func safeexpr(n ir.Node, init *ir.Nodes) ir.Node { return walkexpr(typecheck(a, ctxExpr), init) case ir.ODEREF: + n := n.(*ir.StarExpr) l := safeexpr(n.Left(), init) if l == n.Left() { return n @@ -795,6 +812,7 @@ func safeexpr(n ir.Node, init *ir.Nodes) ir.Node { return walkexpr(typecheck(a, ctxExpr), init) case ir.OINDEX, ir.OINDEXMAP: + n := n.(*ir.IndexExpr) l := safeexpr(n.Left(), init) r := safeexpr(n.Right(), init) if l == n.Left() && r == n.Right() { @@ -806,6 +824,7 @@ func safeexpr(n ir.Node, init *ir.Nodes) ir.Node { return walkexpr(typecheck(a, ctxExpr), init) case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT: + n := n.(*ir.CompLitExpr) if isStaticCompositeLiteral(n) { return n } diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go index 1866a6a784a6b..7cd1c16e00f1d 100644 --- a/src/cmd/compile/internal/gc/swt.go +++ b/src/cmd/compile/internal/gc/swt.go @@ -266,6 +266,7 @@ func walkExprSwitch(sw *ir.SwitchStmt) { // conversion into a runtime call. // See issue 24937 for more discussion. if cond.Op() == ir.OBYTES2STR && allCaseExprsAreSideEffectFree(sw) { + cond := cond.(*ir.ConvExpr) cond.SetOp(ir.OBYTES2STRTMP) } diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index db03fd9e75340..bb5e9fad1e007 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -412,6 +412,7 @@ func typecheck(n ir.Node, top int) (res ir.Node) { switch n.Op() { // We can already diagnose variables used as types. case ir.ONAME: + n := n.(*ir.Name) if top&(ctxExpr|ctxType) == ctxType { base.Errorf("%v is not a type", n) } @@ -477,6 +478,7 @@ func typecheck(n ir.Node, top int) (res ir.Node) { isMulti := false switch n.Op() { case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH: + n := n.(*ir.CallExpr) if t := n.Left().Type(); t != nil && t.Kind() == types.TFUNC { nr := t.NumResults() isMulti = nr > 1 @@ -577,6 +579,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } if n.Op() == ir.ONAME { + n := n.(*ir.Name) if n.SubOp() != 0 && top&ctxCallee == 0 { base.Errorf("use of builtin %v not in function call", n.Sym()) n.SetType(nil) @@ -608,6 +611,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.ONAME: + n := n.(*ir.Name) if n.Name().Decldepth == 0 { n.Name().Decldepth = decldepth } @@ -630,6 +634,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.OPACK: + n := n.(*ir.PkgName) base.Errorf("use of package %v without selector", n.Sym()) n.SetType(nil) return n @@ -816,6 +821,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } op := n.Op() if n.Op() == ir.OASOP { + n := n.(*ir.AssignOpStmt) checkassign(n, l) if n.Implicit() && !okforarith[l.Type().Kind()] { base.Errorf("invalid operation: %v (non-numeric type %v)", n, l.Type()) @@ -859,6 +865,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { // can't be used with "&&" than to report that "x == x" (type untyped bool) // can't be converted to int (see issue #41500). if n.Op() == ir.OANDAND || n.Op() == ir.OOROR { + n := n.(*ir.LogicalExpr) if !n.Left().Type().IsBoolean() { base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.Left().Type())) n.SetType(nil) @@ -1010,6 +1017,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { if et == types.TSTRING && n.Op() == ir.OADD { // create or update OADDSTR node with list of strings in x + y + z + (w + v) + ... + n := n.(*ir.BinaryExpr) var add *ir.AddStringExpr if l.Op() == ir.OADDSTR { add = l.(*ir.AddStringExpr) @@ -1018,6 +1026,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { add = ir.NewAddStringExpr(n.Pos(), []ir.Node{l}) } if r.Op() == ir.OADDSTR { + r := r.(*ir.AddStringExpr) add.PtrList().AppendNodes(r.PtrList()) } else { add.PtrList().Append(r) @@ -1038,6 +1047,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.OBITNOT, ir.ONEG, ir.ONOT, ir.OPLUS: + n := n.(*ir.UnaryExpr) n.SetLeft(typecheck(n.Left(), ctxExpr)) l := n.Left() t := l.Type() @@ -1056,6 +1066,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { // exprs case ir.OADDR: + n := n.(*ir.AddrExpr) n.SetLeft(typecheck(n.Left(), ctxExpr)) if n.Left().Type() == nil { n.SetType(nil) @@ -1070,6 +1081,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { checklvalue(n.Left(), "take the address of") r := outervalue(n.Left()) if r.Op() == ir.ONAME { + r := r.(*ir.Name) if ir.Orig(r) != r { base.Fatalf("found non-orig name node %v", r) // TODO(mdempsky): What does this mean? } @@ -1170,6 +1182,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.ODOTTYPE: + n := n.(*ir.TypeAssertExpr) n.SetLeft(typecheck(n.Left(), ctxExpr)) n.SetLeft(defaultlit(n.Left(), nil)) l := n.Left() @@ -1215,6 +1228,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.OINDEX: + n := n.(*ir.IndexExpr) n.SetLeft(typecheck(n.Left(), ctxExpr)) n.SetLeft(defaultlit(n.Left(), nil)) n.SetLeft(implicitstar(n.Left())) @@ -1273,6 +1287,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.ORECV: + n := n.(*ir.UnaryExpr) n.SetLeft(typecheck(n.Left(), ctxExpr)) n.SetLeft(defaultlit(n.Left(), nil)) l := n.Left() @@ -1297,6 +1312,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.OSEND: + n := n.(*ir.SendStmt) n.SetLeft(typecheck(n.Left(), ctxExpr)) n.SetRight(typecheck(n.Right(), ctxExpr)) n.SetLeft(defaultlit(n.Left(), nil)) @@ -1325,6 +1341,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { // can construct an OSLICEHEADER node. // Components used in OSLICEHEADER that are supplied by parsed source code // have already been typechecked in e.g. OMAKESLICE earlier. + n := n.(*ir.SliceHeaderExpr) t := n.Type() if t == nil { base.Fatalf("no type specified for OSLICEHEADER") @@ -1369,6 +1386,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { // can construct an OMAKESLICECOPY node. // Components used in OMAKESCLICECOPY that are supplied by parsed source code // have already been typechecked in OMAKE and OCOPY earlier. + n := n.(*ir.MakeExpr) t := n.Type() if t == nil { @@ -1407,6 +1425,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.OSLICE, ir.OSLICE3: + n := n.(*ir.SliceExpr) n.SetLeft(typecheck(n.Left(), ctxExpr)) low, high, max := n.SliceBounds() hasmax := n.Op().IsSlice3() @@ -1496,6 +1515,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { l := n.Left() if l.Op() == ir.ONAME && l.(*ir.Name).SubOp() != 0 { + l := l.(*ir.Name) if n.IsDDD() && l.SubOp() != ir.OAPPEND { base.Errorf("invalid use of ... with builtin %v", l) } @@ -1571,6 +1591,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetOp(ir.OCALLINTER) case ir.ODOTMETH: + l := l.(*ir.SelectorExpr) n.SetOp(ir.OCALLMETH) // typecheckaste was used here but there wasn't enough @@ -1632,10 +1653,12 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF: + n := n.(*ir.UnaryExpr) n.SetType(types.Types[types.TUINTPTR]) return n case ir.OCAP, ir.OLEN: + n := n.(*ir.UnaryExpr) n.SetLeft(typecheck(n.Left(), ctxExpr)) n.SetLeft(defaultlit(n.Left(), nil)) n.SetLeft(implicitstar(n.Left())) @@ -1662,6 +1685,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.OREAL, ir.OIMAG: + n := n.(*ir.UnaryExpr) n.SetLeft(typecheck(n.Left(), ctxExpr)) l := n.Left() t := l.Type() @@ -1686,6 +1710,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.OCOMPLEX: + n := n.(*ir.BinaryExpr) l := typecheck(n.Left(), ctxExpr) r := typecheck(n.Right(), ctxExpr) if l.Type() == nil || r.Type() == nil { @@ -1726,6 +1751,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.OCLOSE: + n := n.(*ir.UnaryExpr) n.SetLeft(typecheck(n.Left(), ctxExpr)) n.SetLeft(defaultlit(n.Left(), nil)) l := n.Left() @@ -1748,6 +1774,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.ODELETE: + n := n.(*ir.CallExpr) typecheckargs(n) args := n.List() if args.Len() == 0 { @@ -1780,6 +1807,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.OAPPEND: + n := n.(*ir.CallExpr) typecheckargs(n) args := n.List() if args.Len() == 0 { @@ -1840,6 +1868,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.OCOPY: + n := n.(*ir.BinaryExpr) n.SetType(types.Types[types.TINT]) n.SetLeft(typecheck(n.Left(), ctxExpr)) n.SetLeft(defaultlit(n.Left(), nil)) @@ -1925,6 +1954,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.OMAKE: + n := n.(*ir.CallExpr) args := n.List().Slice() if len(args) == 0 { base.Errorf("missing argument to make") @@ -2032,6 +2062,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return nn case ir.ONEW: + n := n.(*ir.UnaryExpr) if n.Left() == nil { // Fatalf because the OCALL above checked for us, // so this must be an internally-generated mistake. @@ -2049,6 +2080,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.OPRINT, ir.OPRINTN: + n := n.(*ir.CallExpr) typecheckargs(n) ls := n.List().Slice() for i1, n1 := range ls { @@ -2062,6 +2094,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.OPANIC: + n := n.(*ir.UnaryExpr) n.SetLeft(typecheck(n.Left(), ctxExpr)) n.SetLeft(defaultlit(n.Left(), types.Types[types.TINTER])) if n.Left().Type() == nil { @@ -2071,6 +2104,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.ORECOVER: + n := n.(*ir.CallExpr) if n.List().Len() != 0 { base.Errorf("too many arguments to recover") n.SetType(nil) @@ -2089,6 +2123,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.OITAB: + n := n.(*ir.UnaryExpr) n.SetLeft(typecheck(n.Left(), ctxExpr)) t := n.Left().Type() if t == nil { @@ -2104,10 +2139,12 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OIDATA: // Whoever creates the OIDATA node must know a priori the concrete type at that moment, // usually by just having checked the OITAB. + n := n.(*ir.UnaryExpr) base.Fatalf("cannot typecheck interface data %v", n) panic("unreachable") case ir.OSPTR: + n := n.(*ir.UnaryExpr) n.SetLeft(typecheck(n.Left(), ctxExpr)) t := n.Left().Type() if t == nil { @@ -2128,11 +2165,13 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.OCFUNC: + n := n.(*ir.UnaryExpr) n.SetLeft(typecheck(n.Left(), ctxExpr)) n.SetType(types.Types[types.TUINTPTR]) return n case ir.OCONVNOP: + n := n.(*ir.ConvExpr) n.SetLeft(typecheck(n.Left(), ctxExpr)) return n @@ -2161,6 +2200,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.OBLOCK: + n := n.(*ir.BlockStmt) typecheckslice(n.List().Slice(), ctxStmt) return n @@ -2183,6 +2223,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.OFOR, ir.OFORUNTIL: + n := n.(*ir.ForStmt) typecheckslice(n.Init().Slice(), ctxStmt) decldepth++ n.SetLeft(typecheck(n.Left(), ctxExpr)) @@ -2202,6 +2243,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.OIF: + n := n.(*ir.IfStmt) typecheckslice(n.Init().Slice(), ctxStmt) n.SetLeft(typecheck(n.Left(), ctxExpr)) n.SetLeft(defaultlit(n.Left(), nil)) @@ -2216,6 +2258,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.ORETURN: + n := n.(*ir.ReturnStmt) typecheckargs(n) if Curfn == nil { base.Errorf("return outside function") @@ -2230,6 +2273,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.ORETJMP: + n := n.(*ir.BranchStmt) return n case ir.OSELECT: @@ -2245,6 +2289,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.OTYPESW: + n := n.(*ir.TypeSwitchGuard) base.Errorf("use of .(type) outside type switch") n.SetType(nil) return n @@ -2254,10 +2299,12 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.ODCLCONST: + n := n.(*ir.Decl) n.SetLeft(typecheck(n.Left(), ctxExpr)) return n case ir.ODCLTYPE: + n := n.(*ir.Decl) n.SetLeft(typecheck(n.Left(), ctxType)) checkwidth(n.Left().Type()) return n @@ -2814,6 +2861,7 @@ notenough: // Method expressions have the form T.M, and the compiler has // rewritten those to ONAME nodes but left T in Left. if call.Op() == ir.OMETHEXPR { + call := call.(*ir.MethodExpr) base.Errorf("not enough arguments in call to method expression %v%s", call, details) } else { base.Errorf("not enough arguments in call to %v%s", call, details) @@ -3231,6 +3279,7 @@ func nonexported(sym *types.Sym) bool { func islvalue(n ir.Node) bool { switch n.Op() { case ir.OINDEX: + n := n.(*ir.IndexExpr) if n.Left().Type() != nil && n.Left().Type().IsArray() { return islvalue(n.Left()) } @@ -3242,9 +3291,11 @@ func islvalue(n ir.Node) bool { return true case ir.ODOT: + n := n.(*ir.SelectorExpr) return islvalue(n.Left()) case ir.ONAME: + n := n.(*ir.Name) if n.Class() == ir.PFUNC { return false } @@ -3268,6 +3319,7 @@ func checkassign(stmt ir.Node, n ir.Node) { if !ir.DeclaredBy(n, stmt) || stmt.Op() == ir.ORANGE { r := outervalue(n) if r.Op() == ir.ONAME { + r := r.(*ir.Name) r.Name().SetAssigned(true) if r.Name().IsClosureVar() { r.Name().Defn.Name().SetAssigned(true) @@ -3279,6 +3331,7 @@ func checkassign(stmt ir.Node, n ir.Node) { return } if n.Op() == ir.OINDEXMAP { + n := n.(*ir.IndexExpr) n.SetIndexMapLValue(true) return } @@ -3529,6 +3582,7 @@ func typecheckas2(n *ir.AssignListStmt) { case ir.ORECV: n.SetOp(ir.OAS2RECV) case ir.ODOTTYPE: + r := r.(*ir.TypeAssertExpr) n.SetOp(ir.OAS2DOTTYPE) r.SetOp(ir.ODOTTYPE2) } @@ -3554,6 +3608,7 @@ mismatch: default: base.Errorf("assignment mismatch: %d variables but %d values", cl, cr) case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER: + r := r.(*ir.CallExpr) base.Errorf("assignment mismatch: %d variables but %v returns %d values", cl, r.Left(), cr) } @@ -3768,6 +3823,7 @@ func typecheckdef(n ir.Node) { } case ir.ONAME: + n := n.(*ir.Name) if n.Name().Ntype != nil { n.Name().Ntype = typecheckNtype(n.Name().Ntype) n.SetType(n.Name().Ntype.Type()) @@ -3888,6 +3944,7 @@ func markBreak(fn *ir.Func) { ir.DoChildren(n, mark) case ir.OBREAK: + n := n.(*ir.BranchStmt) if n.Sym() == nil { setHasBreak(implicit) } else { @@ -3980,12 +4037,14 @@ func isTermNode(n ir.Node) bool { // skipping over the label. No case OLABEL here. case ir.OBLOCK: + n := n.(*ir.BlockStmt) return isTermNodes(n.List()) case ir.OGOTO, ir.ORETURN, ir.ORETJMP, ir.OPANIC, ir.OFALL: return true case ir.OFOR, ir.OFORUNTIL: + n := n.(*ir.ForStmt) if n.Left() != nil { return false } @@ -3995,9 +4054,11 @@ func isTermNode(n ir.Node) bool { return true case ir.OIF: + n := n.(*ir.IfStmt) return isTermNodes(n.Body()) && isTermNodes(n.Rlist()) case ir.OSWITCH: + n := n.(*ir.SwitchStmt) if n.HasBreak() { return false } @@ -4014,6 +4075,7 @@ func isTermNode(n ir.Node) bool { return def case ir.OSELECT: + n := n.(*ir.SelectStmt) if n.HasBreak() { return false } @@ -4052,10 +4114,12 @@ func deadcode(fn *ir.Func) { } switch n.Op() { case ir.OIF: + n := n.(*ir.IfStmt) if !ir.IsConst(n.Left(), constant.Bool) || n.Body().Len() > 0 || n.Rlist().Len() > 0 { return } case ir.OFOR: + n := n.(*ir.ForStmt) if !ir.IsConst(n.Left(), constant.Bool) || ir.BoolVal(n.Left()) { return } @@ -4083,6 +4147,7 @@ func deadcodeslice(nn *ir.Nodes) { continue } if n.Op() == ir.OIF { + n := n.(*ir.IfStmt) n.SetLeft(deadcodeexpr(n.Left())) if ir.IsConst(n.Left(), constant.Bool) { var body ir.Nodes @@ -4112,19 +4177,26 @@ func deadcodeslice(nn *ir.Nodes) { deadcodeslice(n.PtrInit()) switch n.Op() { case ir.OBLOCK: + n := n.(*ir.BlockStmt) deadcodeslice(n.PtrList()) case ir.OCASE: + n := n.(*ir.CaseStmt) deadcodeslice(n.PtrBody()) case ir.OFOR: + n := n.(*ir.ForStmt) deadcodeslice(n.PtrBody()) case ir.OIF: + n := n.(*ir.IfStmt) deadcodeslice(n.PtrBody()) deadcodeslice(n.PtrRlist()) case ir.ORANGE: + n := n.(*ir.RangeStmt) deadcodeslice(n.PtrBody()) case ir.OSELECT: + n := n.(*ir.SelectStmt) deadcodeslice(n.PtrList()) case ir.OSWITCH: + n := n.(*ir.SwitchStmt) deadcodeslice(n.PtrList()) } @@ -4141,6 +4213,7 @@ func deadcodeexpr(n ir.Node) ir.Node { // producing a constant 'if' condition. switch n.Op() { case ir.OANDAND: + n := n.(*ir.LogicalExpr) n.SetLeft(deadcodeexpr(n.Left())) n.SetRight(deadcodeexpr(n.Right())) if ir.IsConst(n.Left(), constant.Bool) { @@ -4151,6 +4224,7 @@ func deadcodeexpr(n ir.Node) ir.Node { } } case ir.OOROR: + n := n.(*ir.LogicalExpr) n.SetLeft(deadcodeexpr(n.Left())) n.SetRight(deadcodeexpr(n.Right())) if ir.IsConst(n.Left(), constant.Bool) { @@ -4206,6 +4280,7 @@ func methodExprFunc(n ir.Node) *types.Field { case ir.OMETHEXPR: return n.(*ir.MethodExpr).Method case ir.OCALLPART: + n := n.(*ir.CallPartExpr) return callpartMethod(n) } base.Fatalf("unexpected node: %v (%v)", n, n.Op()) diff --git a/src/cmd/compile/internal/gc/unsafe.go b/src/cmd/compile/internal/gc/unsafe.go index 02dd30297554d..eeedea396e0c0 100644 --- a/src/cmd/compile/internal/gc/unsafe.go +++ b/src/cmd/compile/internal/gc/unsafe.go @@ -13,6 +13,7 @@ import ( func evalunsafe(n ir.Node) int64 { switch n.Op() { case ir.OALIGNOF, ir.OSIZEOF: + n := n.(*ir.UnaryExpr) n.SetLeft(typecheck(n.Left(), ctxExpr)) n.SetLeft(defaultlit(n.Left(), nil)) tr := n.Left().Type() @@ -27,6 +28,7 @@ func evalunsafe(n ir.Node) int64 { case ir.OOFFSETOF: // must be a selector. + n := n.(*ir.UnaryExpr) if n.Left().Op() != ir.OXDOT { base.Errorf("invalid expression %v", n) return 0 @@ -64,12 +66,14 @@ func evalunsafe(n ir.Node) int64 { // For Offsetof(s.f), s may itself be a pointer, // but accessing f must not otherwise involve // indirection via embedded pointer types. + r := r.(*ir.SelectorExpr) if r.Left() != sbase { base.Errorf("invalid expression %v: selector implies indirection of embedded %v", n, r.Left()) return 0 } fallthrough case ir.ODOT: + r := r.(*ir.SelectorExpr) v += r.Offset() next = r.Left() default: diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 17269746e64ab..91b7a184cf8dd 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -127,6 +127,7 @@ func walkstmt(n ir.Node) ir.Node { switch n.Op() { default: if n.Op() == ir.ONAME { + n := n.(*ir.Name) base.Errorf("%v is not a top level statement", n.Sym()) } else { base.Errorf("%v is not a top level statement", n.Op()) @@ -181,6 +182,7 @@ func walkstmt(n ir.Node) ir.Node { // special case for a receive where we throw away // the value received. case ir.ORECV: + n := n.(*ir.UnaryExpr) if n.Typecheck() == 0 { base.Fatalf("missing typecheck: %+v", n) } @@ -205,6 +207,7 @@ func walkstmt(n ir.Node) ir.Node { return n case ir.ODCL: + n := n.(*ir.Decl) v := n.Left().(*ir.Name) if v.Class() == ir.PAUTOHEAP { if base.Flag.CompilingRuntime { @@ -217,6 +220,7 @@ func walkstmt(n ir.Node) ir.Node { return n case ir.OBLOCK: + n := n.(*ir.BlockStmt) walkstmtlist(n.List().Slice()) return n @@ -225,6 +229,7 @@ func walkstmt(n ir.Node) ir.Node { panic("unreachable") case ir.ODEFER: + n := n.(*ir.GoDeferStmt) Curfn.SetHasDefer(true) Curfn.NumDefers++ if Curfn.NumDefers > maxOpenDefers { @@ -240,6 +245,7 @@ func walkstmt(n ir.Node) ir.Node { } fallthrough case ir.OGO: + n := n.(*ir.GoDeferStmt) var init ir.Nodes switch call := n.Left(); call.Op() { case ir.OPRINT, ir.OPRINTN: @@ -276,6 +282,7 @@ func walkstmt(n ir.Node) ir.Node { return n case ir.OFOR, ir.OFORUNTIL: + n := n.(*ir.ForStmt) if n.Left() != nil { walkstmtlist(n.Left().Init().Slice()) init := n.Left().Init() @@ -292,12 +299,14 @@ func walkstmt(n ir.Node) ir.Node { return n case ir.OIF: + n := n.(*ir.IfStmt) n.SetLeft(walkexpr(n.Left(), n.PtrInit())) walkstmtlist(n.Body().Slice()) walkstmtlist(n.Rlist().Slice()) return n case ir.ORETURN: + n := n.(*ir.ReturnStmt) Curfn.NumReturns++ if n.List().Len() == 0 { return n @@ -351,9 +360,11 @@ func walkstmt(n ir.Node) ir.Node { return n case ir.ORETJMP: + n := n.(*ir.BranchStmt) return n case ir.OINLMARK: + n := n.(*ir.InlineMarkStmt) return n case ir.OSELECT: @@ -489,6 +500,7 @@ func walkexpr(n ir.Node, init *ir.Nodes) ir.Node { } if n.Op() == ir.ONAME && n.(*ir.Name).Class() == ir.PAUTOHEAP { + n := n.(*ir.Name) nn := ir.NewStarExpr(base.Pos, n.Name().Heapaddr) nn.Left().MarkNonNil() return walkexpr(typecheck(nn, ctxExpr), init) @@ -543,22 +555,27 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return n case ir.ONOT, ir.ONEG, ir.OPLUS, ir.OBITNOT, ir.OREAL, ir.OIMAG, ir.OSPTR, ir.OITAB, ir.OIDATA: + n := n.(*ir.UnaryExpr) n.SetLeft(walkexpr(n.Left(), init)) return n case ir.ODOTMETH, ir.ODOTINTER: + n := n.(*ir.SelectorExpr) n.SetLeft(walkexpr(n.Left(), init)) return n case ir.OADDR: + n := n.(*ir.AddrExpr) n.SetLeft(walkexpr(n.Left(), init)) return n case ir.ODEREF: + n := n.(*ir.StarExpr) n.SetLeft(walkexpr(n.Left(), init)) return n case ir.OEFACE, ir.OAND, ir.OANDNOT, ir.OSUB, ir.OMUL, ir.OADD, ir.OOR, ir.OXOR, ir.OLSH, ir.ORSH: + n := n.(*ir.BinaryExpr) n.SetLeft(walkexpr(n.Left(), init)) n.SetRight(walkexpr(n.Right(), init)) return n @@ -570,6 +587,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return n case ir.ODOTTYPE, ir.ODOTTYPE2: + n := n.(*ir.TypeAssertExpr) n.SetLeft(walkexpr(n.Left(), init)) // Set up interface type addresses for back end. n.SetRight(typename(n.Type())) @@ -582,6 +600,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return n case ir.OLEN, ir.OCAP: + n := n.(*ir.UnaryExpr) if isRuneCount(n) { // Replace len([]rune(string)) with runtime.countrunes(string). return mkcall("countrunes", n.Type(), init, conv(n.Left().(*ir.ConvExpr).Left(), types.Types[types.TSTRING])) @@ -605,6 +624,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return n case ir.OCOMPLEX: + n := n.(*ir.BinaryExpr) n.SetLeft(walkexpr(n.Left(), init)) n.SetRight(walkexpr(n.Right(), init)) return n @@ -614,6 +634,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return walkcompare(n, init) case ir.OANDAND, ir.OOROR: + n := n.(*ir.LogicalExpr) n.SetLeft(walkexpr(n.Left(), init)) // cannot put side effects from n.Right on init, @@ -629,9 +650,11 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return walkprint(n.(*ir.CallExpr), init) case ir.OPANIC: + n := n.(*ir.UnaryExpr) return mkcall("gopanic", nil, init, n.Left()) case ir.ORECOVER: + n := n.(*ir.CallExpr) return mkcall("gorecover", n.Type(), init, nodAddr(nodfp)) case ir.OCLOSUREREAD, ir.OCFUNC: @@ -674,8 +697,10 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { var left, right ir.Node switch n.Op() { case ir.OAS: + n := n.(*ir.AssignStmt) left, right = n.Left(), n.Right() case ir.OASOP: + n := n.(*ir.AssignOpStmt) left, right = n.Left(), n.Right() } @@ -683,6 +708,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // the mapassign call. var mapAppend *ir.CallExpr if left.Op() == ir.OINDEXMAP && right.Op() == ir.OAPPEND { + left := left.(*ir.IndexExpr) mapAppend = right.(*ir.CallExpr) if !samesafeexpr(left, mapAppend.List().First()) { base.Fatalf("not same expressions: %v != %v", left, mapAppend.List().First()) @@ -764,6 +790,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return as case ir.OAS2: + n := n.(*ir.AssignListStmt) init.AppendNodes(n.PtrInit()) walkexprlistsafe(n.List().Slice(), init) walkexprlistsafe(n.Rlist().Slice(), init) @@ -771,6 +798,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // a,b,... = fn() case ir.OAS2FUNC: + n := n.(*ir.AssignListStmt) init.AppendNodes(n.PtrInit()) r := n.Rlist().First() @@ -789,6 +817,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // x, y = <-c // order.stmt made sure x is addressable or blank. case ir.OAS2RECV: + n := n.(*ir.AssignListStmt) init.AppendNodes(n.PtrInit()) r := n.Rlist().First().(*ir.UnaryExpr) // recv @@ -807,6 +836,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // a,b = m[i] case ir.OAS2MAPR: + n := n.(*ir.AssignListStmt) init.AppendNodes(n.PtrInit()) r := n.Rlist().First().(*ir.IndexExpr) @@ -868,6 +898,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return walkexpr(typecheck(as, ctxStmt), init) case ir.ODELETE: + n := n.(*ir.CallExpr) init.AppendNodes(n.PtrInit()) map_ := n.List().First() key := n.List().Second() @@ -883,11 +914,13 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return mkcall1(mapfndel(mapdelete[fast], t), nil, init, typename(t), map_, key) case ir.OAS2DOTTYPE: + n := n.(*ir.AssignListStmt) walkexprlistsafe(n.List().Slice(), init) n.PtrRlist().SetIndex(0, walkexpr(n.Rlist().First(), init)) return n case ir.OCONVIFACE: + n := n.(*ir.ConvExpr) n.SetLeft(walkexpr(n.Left(), init)) fromType := n.Left().Type() @@ -1061,6 +1094,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return conv(mkcall(fn, types.Types[result], init, conv(n.Left(), types.Types[param])), n.Type()) case ir.ODIV, ir.OMOD: + n := n.(*ir.BinaryExpr) n.SetLeft(walkexpr(n.Left(), init)) n.SetRight(walkexpr(n.Right(), init)) @@ -1120,6 +1154,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return n case ir.OINDEX: + n := n.(*ir.IndexExpr) n.SetLeft(walkexpr(n.Left(), init)) // save the original node for bounds checking elision. @@ -1164,6 +1199,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.OINDEXMAP: // Replace m[k] with *map{access1,assign}(maptype, m, &k) + n := n.(*ir.IndexExpr) n.SetLeft(walkexpr(n.Left(), init)) n.SetRight(walkexpr(n.Right(), init)) map_ := n.Left() @@ -1207,6 +1243,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { panic("unreachable") case ir.OSLICEHEADER: + n := n.(*ir.SliceHeaderExpr) n.SetLeft(walkexpr(n.Left(), init)) n.List().SetFirst(walkexpr(n.List().First(), init)) n.List().SetSecond(walkexpr(n.List().Second(), init)) @@ -1251,6 +1288,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return reduceSlice(n) case ir.ONEW: + n := n.(*ir.UnaryExpr) if n.Type().Elem().NotInHeap() { base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type().Elem()) } @@ -1277,6 +1315,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.OCLOSE: // cannot use chanfn - closechan takes any, not chan any + n := n.(*ir.UnaryExpr) fn := syslook("closechan") fn = substArgTypes(fn, n.Left().Type()) return mkcall1(fn, nil, init, n.Left()) @@ -1284,6 +1323,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.OMAKECHAN: // When size fits into int, use makechan instead of // makechan64, which is faster and shorter on 32 bit platforms. + n := n.(*ir.MakeExpr) size := n.Left() fnname := "makechan64" argtype := types.Types[types.TINT64] @@ -1299,6 +1339,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, typename(n.Type()), conv(size, argtype)) case ir.OMAKEMAP: + n := n.(*ir.MakeExpr) t := n.Type() hmapType := hmap(t) hint := n.Left() @@ -1400,6 +1441,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return mkcall1(fn, n.Type(), init, typename(n.Type()), conv(hint, argtype), h) case ir.OMAKESLICE: + n := n.(*ir.MakeExpr) l := n.Left() r := n.Right() if r == nil { @@ -1471,6 +1513,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return walkexpr(typecheck(m, ctxExpr), init) case ir.OMAKESLICECOPY: + n := n.(*ir.MakeExpr) if n.Esc() == EscNone { base.Fatalf("OMAKESLICECOPY with EscNone: %v", n) } @@ -1525,6 +1568,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return walkexpr(typecheck(s, ctxExpr), init) case ir.ORUNESTR: + n := n.(*ir.ConvExpr) a := nodnil() if n.Esc() == EscNone { t := types.NewArray(types.Types[types.TUINT8], 4) @@ -1534,6 +1578,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return mkcall("intstring", n.Type(), init, a, conv(n.Left(), types.Types[types.TINT64])) case ir.OBYTES2STR, ir.ORUNES2STR: + n := n.(*ir.ConvExpr) a := nodnil() if n.Esc() == EscNone { // Create temporary buffer for string on stack. @@ -1550,6 +1595,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return mkcall("slicebytetostring", n.Type(), init, a, ptr, len) case ir.OBYTES2STRTMP: + n := n.(*ir.ConvExpr) n.SetLeft(walkexpr(n.Left(), init)) if !instrumenting { // Let the backend handle OBYTES2STRTMP directly @@ -1562,6 +1608,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return mkcall("slicebytetostringtmp", n.Type(), init, ptr, len) case ir.OSTR2BYTES: + n := n.(*ir.ConvExpr) s := n.Left() if ir.IsConst(s, constant.String) { sc := ir.StringVal(s) @@ -1607,10 +1654,12 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // that know that the slice won't be mutated. // The only such case today is: // for i, c := range []byte(string) + n := n.(*ir.ConvExpr) n.SetLeft(walkexpr(n.Left(), init)) return n case ir.OSTR2RUNES: + n := n.(*ir.ConvExpr) a := nodnil() if n.Esc() == EscNone { // Create temporary buffer for slice on stack. @@ -1634,6 +1683,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return var_ case ir.OSEND: + n := n.(*ir.SendStmt) n1 := n.Right() n1 = assignconv(n1, n.Left().Type().Elem(), "chan send") n1 = walkexpr(n1, init) @@ -2100,8 +2150,10 @@ func isReflectHeaderDataField(l ir.Node) bool { var tsym *types.Sym switch l.Op() { case ir.ODOT: + l := l.(*ir.SelectorExpr) tsym = l.Left().Type().Sym() case ir.ODOTPTR: + l := l.(*ir.SelectorExpr) tsym = l.Left().Type().Elem().Sym() default: return false @@ -2167,12 +2219,15 @@ func reorder3(all []*ir.AssignStmt) []ir.Node { for { switch ll := l; ll.Op() { case ir.ODOT: + ll := ll.(*ir.SelectorExpr) l = ll.Left() continue case ir.OPAREN: + ll := ll.(*ir.ParenExpr) l = ll.Left() continue case ir.OINDEX: + ll := ll.(*ir.IndexExpr) if ll.Left().Type().IsArray() { ll.SetRight(reorder3save(ll.Right(), all, i, &early)) l = ll.Left() @@ -2190,6 +2245,7 @@ func reorder3(all []*ir.AssignStmt) []ir.Node { break case ir.OINDEX, ir.OINDEXMAP: + l := l.(*ir.IndexExpr) l.SetLeft(reorder3save(l.Left(), all, i, &early)) l.SetRight(reorder3save(l.Right(), all, i, &early)) if l.Op() == ir.OINDEXMAP { @@ -2197,8 +2253,10 @@ func reorder3(all []*ir.AssignStmt) []ir.Node { } case ir.ODEREF: + l := l.(*ir.StarExpr) l.SetLeft(reorder3save(l.Left(), all, i, &early)) case ir.ODOTPTR: + l := l.(*ir.SelectorExpr) l.SetLeft(reorder3save(l.Left(), all, i, &early)) } @@ -2238,15 +2296,19 @@ func outervalue(n ir.Node) ir.Node { case ir.OXDOT: base.Fatalf("OXDOT in walk") case ir.ODOT: + nn := nn.(*ir.SelectorExpr) n = nn.Left() continue case ir.OPAREN: + nn := nn.(*ir.ParenExpr) n = nn.Left() continue case ir.OCONVNOP: + nn := nn.(*ir.ConvExpr) n = nn.Left() continue case ir.OINDEX: + nn := nn.(*ir.IndexExpr) if nn.Left().Type() != nil && nn.Left().Type().IsArray() { n = nn.Left() continue @@ -2338,6 +2400,7 @@ func anyAddrTaken(n ir.Node) bool { return ir.Any(n, func(n ir.Node) bool { switch n.Op() { case ir.ONAME: + n := n.(*ir.Name) return n.Class() == ir.PEXTERN || n.Class() == ir.PAUTOHEAP || n.Name().Addrtaken() case ir.ODOT: // but not ODOTPTR - should have been handled in aliased. @@ -2420,6 +2483,7 @@ func refersToCommonName(l ir.Node, r ir.Node) bool { } doL = func(l ir.Node) error { if l.Op() == ir.ONAME { + l := l.(*ir.Name) targetL = l.Name() if doR(r) == stop { return stop @@ -3635,6 +3699,7 @@ func bounded(n ir.Node, max int64) bool { switch n.Op() { case ir.OAND, ir.OANDNOT: + n := n.(*ir.BinaryExpr) v := int64(-1) switch { case smallintconst(n.Left()): @@ -3653,6 +3718,7 @@ func bounded(n ir.Node, max int64) bool { } case ir.OMOD: + n := n.(*ir.BinaryExpr) if !sign && smallintconst(n.Right()) { v := ir.Int64Val(n.Right()) if 0 <= v && v <= max { @@ -3661,6 +3727,7 @@ func bounded(n ir.Node, max int64) bool { } case ir.ODIV: + n := n.(*ir.BinaryExpr) if !sign && smallintconst(n.Right()) { v := ir.Int64Val(n.Right()) for bits > 0 && v >= 2 { @@ -3670,6 +3737,7 @@ func bounded(n ir.Node, max int64) bool { } case ir.ORSH: + n := n.(*ir.BinaryExpr) if !sign && smallintconst(n.Right()) { v := ir.Int64Val(n.Right()) if v > int64(bits) { @@ -3849,6 +3917,7 @@ func anySideEffects(n ir.Node) bool { // Only possible side effect is division by zero. case ir.ODIV, ir.OMOD: + n := n.(*ir.BinaryExpr) if n.Right().Op() != ir.OLITERAL || constant.Sign(n.Right().Val()) == 0 { return true } @@ -3856,6 +3925,7 @@ func anySideEffects(n ir.Node) bool { // Only possible side effect is panic on invalid size, // but many makechan and makemap use size zero, which is definitely OK. case ir.OMAKECHAN, ir.OMAKEMAP: + n := n.(*ir.MakeExpr) if !ir.IsConst(n.Left(), constant.Int) || constant.Sign(n.Left().Val()) != 0 { return true } @@ -3901,6 +3971,7 @@ func wrapCall(n *ir.CallExpr, init *ir.Nodes) ir.Node { if !isBuiltinCall && n.IsDDD() { last := n.List().Len() - 1 if va := n.List().Index(last); va.Op() == ir.OSLICELIT { + va := va.(*ir.CompLitExpr) n.PtrList().Set(append(n.List().Slice()[:last], va.List().Slice()...)) n.SetIsDDD(false) } @@ -4051,11 +4122,14 @@ func walkCheckPtrArithmetic(n *ir.ConvExpr, init *ir.Nodes) ir.Node { walk = func(n ir.Node) { switch n.Op() { case ir.OADD: + n := n.(*ir.BinaryExpr) walk(n.Left()) walk(n.Right()) case ir.OSUB, ir.OANDNOT: + n := n.(*ir.BinaryExpr) walk(n.Left()) case ir.OCONVNOP: + n := n.(*ir.ConvExpr) if n.Left().Type().IsUnsafePtr() { n.SetLeft(cheapexpr(n.Left(), init)) originals = append(originals, convnop(n.Left(), types.Types[types.TUNSAFEPTR])) diff --git a/src/cmd/compile/internal/ir/mini.go b/src/cmd/compile/internal/ir/mini.go index 7a945c369029e..53a63afe9b6d9 100644 --- a/src/cmd/compile/internal/ir/mini.go +++ b/src/cmd/compile/internal/ir/mini.go @@ -35,11 +35,6 @@ type miniNode struct { esc uint16 } -func (n *miniNode) Format(s fmt.State, verb rune) { panic(1) } -func (n *miniNode) copy() Node { panic(1) } -func (n *miniNode) doChildren(do func(Node) error) error { panic(1) } -func (n *miniNode) editChildren(edit func(Node) Node) { panic(1) } - // posOr returns pos if known, or else n.pos. // For use in DeepCopy. func (n *miniNode) posOr(pos src.XPos) src.XPos { @@ -85,106 +80,27 @@ func (n *miniNode) SetDiag(x bool) { n.bits.set(miniDiag, x) } // Empty, immutable graph structure. -func (n *miniNode) Left() Node { return nil } -func (n *miniNode) Right() Node { return nil } -func (n *miniNode) Init() Nodes { return Nodes{} } -func (n *miniNode) PtrInit() *Nodes { return &immutableEmptyNodes } -func (n *miniNode) Body() Nodes { return Nodes{} } -func (n *miniNode) PtrBody() *Nodes { return &immutableEmptyNodes } -func (n *miniNode) List() Nodes { return Nodes{} } -func (n *miniNode) PtrList() *Nodes { return &immutableEmptyNodes } -func (n *miniNode) Rlist() Nodes { return Nodes{} } -func (n *miniNode) PtrRlist() *Nodes { return &immutableEmptyNodes } -func (n *miniNode) SetLeft(x Node) { - if x != nil { - panic(n.no("SetLeft")) - } -} -func (n *miniNode) SetRight(x Node) { - if x != nil { - panic(n.no("SetRight")) - } -} +func (n *miniNode) Init() Nodes { return Nodes{} } +func (n *miniNode) PtrInit() *Nodes { return &immutableEmptyNodes } func (n *miniNode) SetInit(x Nodes) { if x != nil { panic(n.no("SetInit")) } } -func (n *miniNode) SetBody(x Nodes) { - if x != nil { - panic(n.no("SetBody")) - } -} -func (n *miniNode) SetList(x Nodes) { - if x != nil { - panic(n.no("SetList")) - } -} -func (n *miniNode) SetRlist(x Nodes) { - if x != nil { - panic(n.no("SetRlist")) - } -} // Additional functionality unavailable. func (n *miniNode) no(name string) string { return "cannot " + name + " on " + n.op.String() } -func (n *miniNode) SetOp(Op) { panic(n.no("SetOp")) } -func (n *miniNode) SubOp() Op { panic(n.no("SubOp")) } -func (n *miniNode) SetSubOp(Op) { panic(n.no("SetSubOp")) } -func (n *miniNode) Type() *types.Type { return nil } -func (n *miniNode) SetType(*types.Type) { panic(n.no("SetType")) } -func (n *miniNode) Func() *Func { return nil } -func (n *miniNode) Name() *Name { return nil } -func (n *miniNode) Sym() *types.Sym { return nil } -func (n *miniNode) SetSym(*types.Sym) { panic(n.no("SetSym")) } -func (n *miniNode) Offset() int64 { return types.BADWIDTH } -func (n *miniNode) SetOffset(x int64) { panic(n.no("SetOffset")) } -func (n *miniNode) Class() Class { return Pxxx } -func (n *miniNode) SetClass(Class) { panic(n.no("SetClass")) } -func (n *miniNode) Likely() bool { panic(n.no("Likely")) } -func (n *miniNode) SetLikely(bool) { panic(n.no("SetLikely")) } -func (n *miniNode) SliceBounds() (low, high, max Node) { - panic(n.no("SliceBounds")) -} -func (n *miniNode) SetSliceBounds(low, high, max Node) { - panic(n.no("SetSliceBounds")) -} -func (n *miniNode) Iota() int64 { panic(n.no("Iota")) } -func (n *miniNode) SetIota(int64) { panic(n.no("SetIota")) } -func (n *miniNode) Colas() bool { return false } -func (n *miniNode) SetColas(bool) { panic(n.no("SetColas")) } -func (n *miniNode) NoInline() bool { panic(n.no("NoInline")) } -func (n *miniNode) SetNoInline(bool) { panic(n.no("SetNoInline")) } -func (n *miniNode) Transient() bool { panic(n.no("Transient")) } -func (n *miniNode) SetTransient(bool) { panic(n.no("SetTransient")) } -func (n *miniNode) Implicit() bool { return false } -func (n *miniNode) SetImplicit(bool) { panic(n.no("SetImplicit")) } -func (n *miniNode) IsDDD() bool { return false } -func (n *miniNode) SetIsDDD(bool) { panic(n.no("SetIsDDD")) } -func (n *miniNode) Embedded() bool { return false } -func (n *miniNode) SetEmbedded(bool) { panic(n.no("SetEmbedded")) } -func (n *miniNode) IndexMapLValue() bool { panic(n.no("IndexMapLValue")) } -func (n *miniNode) SetIndexMapLValue(bool) { panic(n.no("SetIndexMapLValue")) } -func (n *miniNode) ResetAux() { panic(n.no("ResetAux")) } -func (n *miniNode) HasBreak() bool { panic(n.no("HasBreak")) } -func (n *miniNode) SetHasBreak(bool) { panic(n.no("SetHasBreak")) } -func (n *miniNode) Val() constant.Value { panic(n.no("Val")) } -func (n *miniNode) SetVal(v constant.Value) { panic(n.no("SetVal")) } -func (n *miniNode) Int64Val() int64 { panic(n.no("Int64Val")) } -func (n *miniNode) Uint64Val() uint64 { panic(n.no("Uint64Val")) } -func (n *miniNode) CanInt64() bool { panic(n.no("CanInt64")) } -func (n *miniNode) BoolVal() bool { panic(n.no("BoolVal")) } -func (n *miniNode) StringVal() string { panic(n.no("StringVal")) } -func (n *miniNode) HasCall() bool { return false } -func (n *miniNode) SetHasCall(bool) { panic(n.no("SetHasCall")) } -func (n *miniNode) NonNil() bool { return false } -func (n *miniNode) MarkNonNil() { panic(n.no("MarkNonNil")) } -func (n *miniNode) Bounded() bool { return false } -func (n *miniNode) SetBounded(bool) { panic(n.no("SetBounded")) } -func (n *miniNode) Opt() interface{} { return nil } -func (n *miniNode) SetOpt(interface{}) { panic(n.no("SetOpt")) } -func (n *miniNode) MarkReadonly() { panic(n.no("MarkReadonly")) } -func (n *miniNode) TChanDir() types.ChanDir { panic(n.no("TChanDir")) } -func (n *miniNode) SetTChanDir(types.ChanDir) { panic(n.no("SetTChanDir")) } +func (n *miniNode) Type() *types.Type { return nil } +func (n *miniNode) SetType(*types.Type) { panic(n.no("SetType")) } +func (n *miniNode) Name() *Name { return nil } +func (n *miniNode) Sym() *types.Sym { return nil } +func (n *miniNode) Val() constant.Value { panic(n.no("Val")) } +func (n *miniNode) SetVal(v constant.Value) { panic(n.no("SetVal")) } +func (n *miniNode) HasCall() bool { return false } +func (n *miniNode) SetHasCall(bool) { panic(n.no("SetHasCall")) } +func (n *miniNode) NonNil() bool { return false } +func (n *miniNode) MarkNonNil() { panic(n.no("MarkNonNil")) } +func (n *miniNode) Opt() interface{} { return nil } +func (n *miniNode) SetOpt(interface{}) { panic(n.no("SetOpt")) } diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 1679313c86290..86ef600f266ad 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -33,59 +33,15 @@ type Node interface { // Abstract graph structure, for generic traversals. Op() Op - SetOp(x Op) - SubOp() Op - SetSubOp(x Op) - Left() Node - SetLeft(x Node) - Right() Node - SetRight(x Node) Init() Nodes PtrInit() *Nodes SetInit(x Nodes) - Body() Nodes - PtrBody() *Nodes - SetBody(x Nodes) - List() Nodes - SetList(x Nodes) - PtrList() *Nodes - Rlist() Nodes - SetRlist(x Nodes) - PtrRlist() *Nodes // Fields specific to certain Ops only. Type() *types.Type SetType(t *types.Type) - Func() *Func Name() *Name Sym() *types.Sym - SetSym(x *types.Sym) - Offset() int64 - SetOffset(x int64) - Class() Class - SetClass(x Class) - Likely() bool - SetLikely(x bool) - SliceBounds() (low, high, max Node) - SetSliceBounds(low, high, max Node) - Iota() int64 - SetIota(x int64) - Colas() bool - SetColas(x bool) - NoInline() bool - SetNoInline(x bool) - Transient() bool - SetTransient(x bool) - Implicit() bool - SetImplicit(x bool) - IsDDD() bool - SetIsDDD(x bool) - IndexMapLValue() bool - SetIndexMapLValue(x bool) - ResetAux() - HasBreak() bool - SetHasBreak(x bool) - MarkReadonly() Val() constant.Value SetVal(v constant.Value) @@ -98,8 +54,6 @@ type Node interface { SetOpt(x interface{}) Diag() bool SetDiag(x bool) - Bounded() bool - SetBounded(x bool) Typecheck() uint8 SetTypecheck(x uint8) NonNil() bool From f9d373720e76a45cf2d0cb4507fe49dae33afd25 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 23 Dec 2020 00:02:08 -0500 Subject: [PATCH 220/474] [dev.regabi] cmd/compile: remove Left, Right etc methods [generated] Now that the generic graph structure methods - Left, Right, and so on - have been removed from the Node interface, each implementation's uses can be replaced with direct field access, using more specific names, and the methods themselves can be deleted. Passes buildall w/ toolstash -cmp. [git-generate] cd src/cmd/compile/internal/ir rf ' mv Func.iota Func.Iota_ mv Name.fn Name.Func_ ' cd ../gc rf ' ex . ../ir { import "cmd/compile/internal/ir" import "cmd/compile/internal/types" var ns ir.Nodes var b bool var i64 int64 var n ir.Node var op ir.Op var sym *types.Sym var class ir.Class var decl *ir.Decl decl.Left() -> decl.X decl.SetLeft(n) -> decl.X = n var asl *ir.AssignListStmt asl.List() -> asl.Lhs asl.PtrList() -> &asl.Lhs asl.SetList(ns) -> asl.Lhs = ns asl.Rlist() -> asl.Rhs asl.PtrRlist() -> &asl.Rhs asl.SetRlist(ns) -> asl.Rhs = ns asl.Colas() -> asl.Def asl.SetColas(b) -> asl.Def = b var as *ir.AssignStmt as.Left() -> as.X as.SetLeft(n) -> as.X = n as.Right() -> as.Y as.SetRight(n) -> as.Y = n as.Colas() -> as.Def as.SetColas(b) -> as.Def = b var ao *ir.AssignOpStmt ao.Left() -> ao.X ao.SetLeft(n) -> ao.X = n ao.Right() -> ao.Y ao.SetRight(n) -> ao.Y = n ao.SubOp() -> ao.AsOp ao.SetSubOp(op) -> ao.AsOp = op ao.Implicit() -> ao.IncDec ao.SetImplicit(b) -> ao.IncDec = b var bl *ir.BlockStmt bl.List() -> bl.List_ bl.PtrList() -> &bl.List_ bl.SetList(ns) -> bl.List_ = ns var br *ir.BranchStmt br.Sym() -> br.Label br.SetSym(sym) -> br.Label = sym var cas *ir.CaseStmt cas.List() -> cas.List_ cas.PtrList() -> &cas.List_ cas.SetList(ns) -> cas.List_ = ns cas.Body() -> cas.Body_ cas.PtrBody() -> &cas.Body_ cas.SetBody(ns) -> cas.Body_ = ns cas.Rlist() -> cas.Vars cas.PtrRlist() -> &cas.Vars cas.SetRlist(ns) -> cas.Vars = ns cas.Left() -> cas.Comm cas.SetLeft(n) -> cas.Comm = n var fr *ir.ForStmt fr.Sym() -> fr.Label fr.SetSym(sym) -> fr.Label = sym fr.Left() -> fr.Cond fr.SetLeft(n) -> fr.Cond = n fr.Right() -> fr.Post fr.SetRight(n) -> fr.Post = n fr.Body() -> fr.Body_ fr.PtrBody() -> &fr.Body_ fr.SetBody(ns) -> fr.Body_ = ns fr.List() -> fr.Late fr.PtrList() -> &fr.Late fr.SetList(ns) -> fr.Late = ns fr.HasBreak() -> fr.HasBreak_ fr.SetHasBreak(b) -> fr.HasBreak_ = b var gs *ir.GoDeferStmt gs.Left() -> gs.Call gs.SetLeft(n) -> gs.Call = n var ifs *ir.IfStmt ifs.Left() -> ifs.Cond ifs.SetLeft(n) -> ifs.Cond = n ifs.Body() -> ifs.Body_ ifs.PtrBody() -> &ifs.Body_ ifs.SetBody(ns) -> ifs.Body_ = ns ifs.Rlist() -> ifs.Else ifs.PtrRlist() -> &ifs.Else ifs.SetRlist(ns) -> ifs.Else = ns ifs.Likely() -> ifs.Likely_ ifs.SetLikely(b) -> ifs.Likely_ = b var im *ir.InlineMarkStmt im.Offset() -> im.Index im.SetOffset(i64) -> im.Index = i64 var lab *ir.LabelStmt lab.Sym() -> lab.Label lab.SetSym(sym) -> lab.Label = sym var rng *ir.RangeStmt rng.Sym() -> rng.Label rng.SetSym(sym) -> rng.Label = sym rng.Right() -> rng.X rng.SetRight(n) -> rng.X = n rng.Body() -> rng.Body_ rng.PtrBody() -> &rng.Body_ rng.SetBody(ns) -> rng.Body_ = ns rng.List() -> rng.Vars rng.PtrList() -> &rng.Vars rng.SetList(ns) -> rng.Vars = ns rng.HasBreak() -> rng.HasBreak_ rng.SetHasBreak(b) -> rng.HasBreak_ = b rng.Colas() -> rng.Def rng.SetColas(b) -> rng.Def = b var ret *ir.ReturnStmt ret.List() -> ret.Results ret.PtrList() -> &ret.Results ret.SetList(ns) -> ret.Results = ns var sel *ir.SelectStmt sel.List() -> sel.Cases sel.PtrList() -> &sel.Cases sel.SetList(ns) -> sel.Cases = ns sel.Sym() -> sel.Label sel.SetSym(sym) -> sel.Label = sym sel.HasBreak() -> sel.HasBreak_ sel.SetHasBreak(b) -> sel.HasBreak_ = b sel.Body() -> sel.Compiled sel.PtrBody() -> &sel.Compiled sel.SetBody(ns) -> sel.Compiled = ns var send *ir.SendStmt send.Left() -> send.Chan send.SetLeft(n) -> send.Chan = n send.Right() -> send.Value send.SetRight(n) -> send.Value = n var sw *ir.SwitchStmt sw.Left() -> sw.Tag sw.SetLeft(n) -> sw.Tag = n sw.List() -> sw.Cases sw.PtrList() -> &sw.Cases sw.SetList(ns) -> sw.Cases = ns sw.Body() -> sw.Compiled sw.PtrBody() -> &sw.Compiled sw.SetBody(ns) -> sw.Compiled = ns sw.Sym() -> sw.Label sw.SetSym(sym) -> sw.Label = sym sw.HasBreak() -> sw.HasBreak_ sw.SetHasBreak(b) -> sw.HasBreak_ = b var tg *ir.TypeSwitchGuard tg.Left() -> tg.Tag tg.SetLeft(nil) -> tg.Tag = nil tg.SetLeft(n) -> tg.Tag = n.(*ir.Ident) tg.Right() -> tg.X tg.SetRight(n) -> tg.X = n var adds *ir.AddStringExpr adds.List() -> adds.List_ adds.PtrList() -> &adds.List_ adds.SetList(ns) -> adds.List_ = ns var addr *ir.AddrExpr addr.Left() -> addr.X addr.SetLeft(n) -> addr.X = n addr.Right() -> addr.Alloc addr.SetRight(n) -> addr.Alloc = n var bin *ir.BinaryExpr bin.Left() -> bin.X bin.SetLeft(n) -> bin.X = n bin.Right() -> bin.Y bin.SetRight(n) -> bin.Y = n var log *ir.LogicalExpr log.Left() -> log.X log.SetLeft(n) -> log.X = n log.Right() -> log.Y log.SetRight(n) -> log.Y = n var call *ir.CallExpr call.Left() -> call.X call.SetLeft(n) -> call.X = n call.List() -> call.Args call.PtrList() -> &call.Args call.SetList(ns) -> call.Args = ns call.Rlist() -> call.Rargs call.PtrRlist() -> &call.Rargs call.SetRlist(ns) -> call.Rargs = ns call.IsDDD() -> call.DDD call.SetIsDDD(b) -> call.DDD = b call.NoInline() -> call.NoInline_ call.SetNoInline(b) -> call.NoInline_ = b call.Body() -> call.Body_ call.PtrBody() -> &call.Body_ call.SetBody(ns) -> call.Body_ = ns var cp *ir.CallPartExpr cp.Func() -> cp.Func_ cp.Left() -> cp.X cp.SetLeft(n) -> cp.X = n cp.Sym() -> cp.Method.Sym var clo *ir.ClosureExpr clo.Func() -> clo.Func_ var cr *ir.ClosureReadExpr cr.Offset() -> cr.Offset_ var cl *ir.CompLitExpr cl.Right() -> cl.Ntype cl.SetRight(nil) -> cl.Ntype = nil cl.SetRight(n) -> cl.Ntype = ir.Node(n).(ir.Ntype) cl.List() -> cl.List_ cl.PtrList() -> &cl.List_ cl.SetList(ns) -> cl.List_ = ns var conv *ir.ConvExpr conv.Left() -> conv.X conv.SetLeft(n) -> conv.X = n var ix *ir.IndexExpr ix.Left() -> ix.X ix.SetLeft(n) -> ix.X = n ix.Right() -> ix.Index ix.SetRight(n) -> ix.Index = n ix.IndexMapLValue() -> ix.Assigned ix.SetIndexMapLValue(b) -> ix.Assigned = b var kv *ir.KeyExpr kv.Left() -> kv.Key kv.SetLeft(n) -> kv.Key = n kv.Right() -> kv.Value kv.SetRight(n) -> kv.Value = n var sk *ir.StructKeyExpr sk.Sym() -> sk.Field sk.SetSym(sym) -> sk.Field = sym sk.Left() -> sk.Value sk.SetLeft(n) -> sk.Value = n sk.Offset() -> sk.Offset_ sk.SetOffset(i64) -> sk.Offset_ = i64 var ic *ir.InlinedCallExpr ic.Body() -> ic.Body_ ic.PtrBody() -> &ic.Body_ ic.SetBody(ns) -> ic.Body_ = ns ic.Rlist() -> ic.ReturnVars ic.PtrRlist() -> &ic.ReturnVars ic.SetRlist(ns) -> ic.ReturnVars = ns var mak *ir.MakeExpr mak.Left() -> mak.Len mak.SetLeft(n) -> mak.Len = n mak.Right() -> mak.Cap mak.SetRight(n) -> mak.Cap = n var par *ir.ParenExpr par.Left() -> par.X par.SetLeft(n) -> par.X = n var res *ir.ResultExpr res.Offset() -> res.Offset_ res.SetOffset(i64) -> res.Offset_ = i64 var dot *ir.SelectorExpr dot.Left() -> dot.X dot.SetLeft(n) -> dot.X = n dot.Sym() -> dot.Sel dot.SetSym(sym) -> dot.Sel = sym dot.Offset() -> dot.Offset_ dot.SetOffset(i64) -> dot.Offset_ = i64 var sl *ir.SliceExpr sl.Left() -> sl.X sl.SetLeft(n) -> sl.X = n sl.List() -> sl.List_ sl.PtrList() -> &sl.List_ sl.SetList(ns) -> sl.List_ = ns var sh *ir.SliceHeaderExpr sh.Left() -> sh.Ptr sh.SetLeft(n) -> sh.Ptr = n sh.List() -> sh.LenCap_ sh.PtrList() -> &sh.LenCap_ sh.SetList(ns) -> sh.LenCap_ = ns var st *ir.StarExpr st.Left() -> st.X st.SetLeft(n) -> st.X = n var ta *ir.TypeAssertExpr ta.Left() -> ta.X ta.SetLeft(n) -> ta.X = n ta.Right() -> ta.Ntype ta.SetRight(n) -> ta.Ntype = n ta.List() -> ta.Itab ta.PtrList() -> &ta.Itab ta.SetList(ns) -> ta.Itab = ns var u *ir.UnaryExpr u.Left() -> u.X u.SetLeft(n) -> u.X = n var fn *ir.Func fn.Body() -> fn.Body_ fn.PtrBody() -> &fn.Body_ fn.SetBody(ns) -> fn.Body_ = ns fn.Iota() -> fn.Iota_ fn.SetIota(i64) -> fn.Iota_ = i64 fn.Func() -> fn var nam *ir.Name nam.SubOp() -> nam.BuiltinOp nam.SetSubOp(op) -> nam.BuiltinOp = op nam.Class() -> nam.Class_ nam.SetClass(class) -> nam.Class_ = class nam.Func() -> nam.Func_ nam.Offset() -> nam.Offset_ nam.SetOffset(i64) -> nam.Offset_ = i64 } ex . ../ir { import "cmd/compile/internal/ir" var n ir.Nodes (&n).Append -> n.Append (&n).AppendNodes -> n.AppendNodes (&n).MoveNodes -> n.MoveNodes (&n).Prepend -> n.Prepend (&n).Set -> n.Set (&n).Set1 -> n.Set1 (&n).Set2 -> n.Set2 (&n).Set3 -> n.Set3 var ntype ir.Ntype ir.Node(ntype).(ir.Ntype) -> ntype } ' cd ../ir rf ' rm \ Decl.Left Decl.SetLeft \ AssignListStmt.List AssignListStmt.PtrList AssignListStmt.SetList \ AssignListStmt.Rlist AssignListStmt.PtrRlist AssignListStmt.SetRlist \ AssignListStmt.Colas AssignListStmt.SetColas \ AssignStmt.Left AssignStmt.SetLeft \ AssignStmt.Right AssignStmt.SetRight \ AssignStmt.Colas AssignStmt.SetColas \ AssignOpStmt.Left AssignOpStmt.SetLeft \ AssignOpStmt.Right AssignOpStmt.SetRight \ AssignOpStmt.SubOp AssignOpStmt.SetSubOp \ AssignOpStmt.Implicit AssignOpStmt.SetImplicit \ BlockStmt.List BlockStmt.PtrList BlockStmt.SetList \ BranchStmt.SetSym \ CaseStmt.List CaseStmt.PtrList CaseStmt.SetList \ CaseStmt.Body CaseStmt.PtrBody CaseStmt.SetBody \ CaseStmt.Rlist CaseStmt.PtrRlist CaseStmt.SetRlist \ CaseStmt.Left CaseStmt.SetLeft \ ForStmt.Left ForStmt.SetLeft \ ForStmt.Right ForStmt.SetRight \ ForStmt.Body ForStmt.PtrBody ForStmt.SetBody \ ForStmt.List ForStmt.PtrList ForStmt.SetList \ ForStmt.HasBreak ForStmt.SetHasBreak \ ForStmt.Sym ForStmt.SetSym \ GoDeferStmt.Left GoDeferStmt.SetLeft \ IfStmt.Left IfStmt.SetLeft \ IfStmt.Body IfStmt.PtrBody IfStmt.SetBody \ IfStmt.Rlist IfStmt.PtrRlist IfStmt.SetRlist \ IfStmt.Likely IfStmt.SetLikely \ LabelStmt.SetSym \ RangeStmt.Right RangeStmt.SetRight \ RangeStmt.Body RangeStmt.PtrBody RangeStmt.SetBody \ RangeStmt.List RangeStmt.PtrList RangeStmt.SetList \ RangeStmt.HasBreak RangeStmt.SetHasBreak \ RangeStmt.Colas RangeStmt.SetColas \ RangeStmt.Sym RangeStmt.SetSym \ ReturnStmt.List ReturnStmt.PtrList ReturnStmt.SetList \ SelectStmt.List SelectStmt.PtrList SelectStmt.SetList \ SelectStmt.HasBreak SelectStmt.SetHasBreak \ SelectStmt.Body SelectStmt.PtrBody SelectStmt.SetBody \ SelectStmt.Sym SelectStmt.SetSym \ SendStmt.Left SendStmt.SetLeft \ SendStmt.Right SendStmt.SetRight \ SwitchStmt.Left SwitchStmt.SetLeft \ SwitchStmt.List SwitchStmt.PtrList SwitchStmt.SetList \ SwitchStmt.Body SwitchStmt.PtrBody SwitchStmt.SetBody \ SwitchStmt.HasBreak SwitchStmt.SetHasBreak \ SwitchStmt.Sym SwitchStmt.SetSym \ TypeSwitchGuard.Left TypeSwitchGuard.SetLeft \ TypeSwitchGuard.Right TypeSwitchGuard.SetRight \ AddStringExpr.List AddStringExpr.PtrList AddStringExpr.SetList \ AddrExpr.Left AddrExpr.SetLeft \ AddrExpr.Right AddrExpr.SetRight \ BinaryExpr.Left BinaryExpr.SetLeft \ BinaryExpr.Right BinaryExpr.SetRight \ LogicalExpr.Left LogicalExpr.SetLeft \ LogicalExpr.Right LogicalExpr.SetRight \ CallExpr.Left CallExpr.SetLeft \ CallExpr.List CallExpr.PtrList CallExpr.SetList \ CallExpr.Rlist CallExpr.PtrRlist CallExpr.SetRlist \ CallExpr.NoInline CallExpr.SetNoInline \ CallExpr.Body CallExpr.PtrBody CallExpr.SetBody \ CallExpr.IsDDD CallExpr.SetIsDDD \ CallPartExpr.Left CallPartExpr.SetLeft \ ClosureReadExpr.Offset \ ClosureReadExpr.Type \ # provided by miniExpr already CompLitExpr.Right CompLitExpr.SetRight \ CompLitExpr.List CompLitExpr.PtrList CompLitExpr.SetList \ ConvExpr.Left ConvExpr.SetLeft \ IndexExpr.Left IndexExpr.SetLeft \ IndexExpr.Right IndexExpr.SetRight \ IndexExpr.IndexMapLValue IndexExpr.SetIndexMapLValue \ KeyExpr.Left KeyExpr.SetLeft \ KeyExpr.Right KeyExpr.SetRight \ StructKeyExpr.Left StructKeyExpr.SetLeft \ StructKeyExpr.Offset StructKeyExpr.SetOffset \ StructKeyExpr.SetSym \ InlinedCallExpr.Body InlinedCallExpr.PtrBody InlinedCallExpr.SetBody \ InlinedCallExpr.Rlist InlinedCallExpr.PtrRlist InlinedCallExpr.SetRlist \ MakeExpr.Left MakeExpr.SetLeft \ MakeExpr.Right MakeExpr.SetRight \ MethodExpr.Left MethodExpr.SetLeft \ MethodExpr.Right MethodExpr.SetRight \ MethodExpr.Offset MethodExpr.SetOffset \ MethodExpr.Class MethodExpr.SetClass \ ParenExpr.Left ParenExpr.SetLeft \ ResultExpr.Offset ResultExpr.SetOffset \ ReturnStmt.IsDDD \ SelectorExpr.Left SelectorExpr.SetLeft \ SelectorExpr.Offset SelectorExpr.SetOffset \ SelectorExpr.SetSym \ SliceExpr.Left SliceExpr.SetLeft \ SliceExpr.List SliceExpr.PtrList SliceExpr.SetList \ SliceHeaderExpr.Left SliceHeaderExpr.SetLeft \ SliceHeaderExpr.List SliceHeaderExpr.PtrList SliceHeaderExpr.SetList \ StarExpr.Left StarExpr.SetLeft \ TypeAssertExpr.Left TypeAssertExpr.SetLeft \ TypeAssertExpr.Right TypeAssertExpr.SetRight \ TypeAssertExpr.List TypeAssertExpr.PtrList TypeAssertExpr.SetList \ UnaryExpr.Left UnaryExpr.SetLeft \ Func.Body Func.PtrBody Func.SetBody \ Func.Iota Func.SetIota \ CallPartExpr.Func ClosureExpr.Func Func.Func Name.Func \ mv BlockStmt.List_ BlockStmt.List mv CaseStmt.List_ CaseStmt.List mv CaseStmt.Body_ CaseStmt.Body mv ForStmt.Body_ ForStmt.Body mv ForStmt.HasBreak_ ForStmt.HasBreak mv Func.Iota_ Func.Iota mv IfStmt.Body_ IfStmt.Body mv IfStmt.Likely_ IfStmt.Likely mv RangeStmt.Body_ RangeStmt.Body mv RangeStmt.HasBreak_ RangeStmt.HasBreak mv SelectStmt.HasBreak_ SelectStmt.HasBreak mv SwitchStmt.HasBreak_ SwitchStmt.HasBreak mv AddStringExpr.List_ AddStringExpr.List mv CallExpr.NoInline_ CallExpr.NoInline mv CallExpr.Body_ CallExpr.Body # TODO what is this? mv CallExpr.DDD CallExpr.IsDDD mv ClosureReadExpr.Offset_ ClosureReadExpr.Offset mv CompLitExpr.List_ CompLitExpr.List mv StructKeyExpr.Offset_ StructKeyExpr.Offset mv InlinedCallExpr.Body_ InlinedCallExpr.Body mv ResultExpr.Offset_ ResultExpr.Offset mv SelectorExpr.Offset_ SelectorExpr.Offset mv SliceExpr.List_ SliceExpr.List mv SliceHeaderExpr.LenCap_ SliceHeaderExpr.LenCap mv Func.Body_ Func.Body mv CallPartExpr.Func_ CallPartExpr.Func mv ClosureExpr.Func_ ClosureExpr.Func mv Name.Func_ Name.Func ' Change-Id: Ia2ee59649674f83eb123e63fda7a7781cf91cc56 Reviewed-on: https://go-review.googlesource.com/c/go/+/277935 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- .../compile/internal/gc/abiutilsaux_test.go | 4 +- src/cmd/compile/internal/gc/alg.go | 74 +- src/cmd/compile/internal/gc/bexport.go | 2 +- src/cmd/compile/internal/gc/closure.go | 56 +- src/cmd/compile/internal/gc/const.go | 50 +- src/cmd/compile/internal/gc/dcl.go | 24 +- src/cmd/compile/internal/gc/escape.go | 330 +++---- src/cmd/compile/internal/gc/export.go | 2 +- src/cmd/compile/internal/gc/gen.go | 6 +- src/cmd/compile/internal/gc/gsubr.go | 10 +- src/cmd/compile/internal/gc/iexport.go | 178 ++-- src/cmd/compile/internal/gc/iimport.go | 74 +- src/cmd/compile/internal/gc/init.go | 10 +- src/cmd/compile/internal/gc/initorder.go | 24 +- src/cmd/compile/internal/gc/inl.go | 204 ++--- src/cmd/compile/internal/gc/main.go | 2 +- src/cmd/compile/internal/gc/noder.go | 90 +- src/cmd/compile/internal/gc/obj.go | 8 +- src/cmd/compile/internal/gc/order.go | 420 ++++----- src/cmd/compile/internal/gc/pgen.go | 44 +- src/cmd/compile/internal/gc/pgen_test.go | 4 +- src/cmd/compile/internal/gc/plive.go | 14 +- src/cmd/compile/internal/gc/range.go | 144 +-- src/cmd/compile/internal/gc/reflect.go | 6 +- src/cmd/compile/internal/gc/scc.go | 10 +- src/cmd/compile/internal/gc/scope.go | 6 +- src/cmd/compile/internal/gc/select.go | 116 +-- src/cmd/compile/internal/gc/sinit.go | 180 ++-- src/cmd/compile/internal/gc/ssa.go | 518 +++++------ src/cmd/compile/internal/gc/subr.go | 108 +-- src/cmd/compile/internal/gc/swt.go | 172 ++-- src/cmd/compile/internal/gc/typecheck.go | 816 ++++++++--------- src/cmd/compile/internal/gc/universe.go | 6 +- src/cmd/compile/internal/gc/unsafe.go | 24 +- src/cmd/compile/internal/gc/walk.go | 844 +++++++++--------- src/cmd/compile/internal/ir/expr.go | 218 +---- src/cmd/compile/internal/ir/fmt.go | 220 ++--- src/cmd/compile/internal/ir/func.go | 24 +- src/cmd/compile/internal/ir/name.go | 5 +- src/cmd/compile/internal/ir/node_gen.go | 78 +- src/cmd/compile/internal/ir/stmt.go | 217 +---- 41 files changed, 2539 insertions(+), 2803 deletions(-) diff --git a/src/cmd/compile/internal/gc/abiutilsaux_test.go b/src/cmd/compile/internal/gc/abiutilsaux_test.go index fd0b197207be0..de35e8edd658b 100644 --- a/src/cmd/compile/internal/gc/abiutilsaux_test.go +++ b/src/cmd/compile/internal/gc/abiutilsaux_test.go @@ -20,7 +20,7 @@ import ( func mkParamResultField(t *types.Type, s *types.Sym, which ir.Class) *types.Field { field := types.NewField(src.NoXPos, s, t) n := NewName(s) - n.SetClass(which) + n.Class_ = which field.Nname = n n.SetType(t) return field @@ -78,7 +78,7 @@ func verifyParamResultOffset(t *testing.T, f *types.Field, r ABIParamAssignment, n := ir.AsNode(f.Nname).(*ir.Name) if n.FrameOffset() != int64(r.Offset) { t.Errorf("%s %d: got offset %d wanted %d t=%v", - which, idx, r.Offset, n.Offset(), f.Type) + which, idx, r.Offset, n.Offset_, f.Type) return 1 } return 0 diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index 730db9c1c94f4..bb2717a8b5cef 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -324,11 +324,11 @@ func genhash(t *types.Type) *obj.LSym { nx := ir.NewIndexExpr(base.Pos, np, ni) nx.SetBounded(true) na := nodAddr(nx) - call.PtrList().Append(na) - call.PtrList().Append(nh) - loop.PtrBody().Append(ir.NewAssignStmt(base.Pos, nh, call)) + call.Args.Append(na) + call.Args.Append(nh) + loop.Body.Append(ir.NewAssignStmt(base.Pos, nh, call)) - fn.PtrBody().Append(loop) + fn.Body.Append(loop) case types.TSTRUCT: // Walk the struct using memhash for runs of AMEM @@ -348,9 +348,9 @@ func genhash(t *types.Type) *obj.LSym { call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil) nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym) // TODO: fields from other packages? na := nodAddr(nx) - call.PtrList().Append(na) - call.PtrList().Append(nh) - fn.PtrBody().Append(ir.NewAssignStmt(base.Pos, nh, call)) + call.Args.Append(na) + call.Args.Append(nh) + fn.Body.Append(ir.NewAssignStmt(base.Pos, nh, call)) i++ continue } @@ -363,21 +363,21 @@ func genhash(t *types.Type) *obj.LSym { call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil) nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym) // TODO: fields from other packages? na := nodAddr(nx) - call.PtrList().Append(na) - call.PtrList().Append(nh) - call.PtrList().Append(nodintconst(size)) - fn.PtrBody().Append(ir.NewAssignStmt(base.Pos, nh, call)) + call.Args.Append(na) + call.Args.Append(nh) + call.Args.Append(nodintconst(size)) + fn.Body.Append(ir.NewAssignStmt(base.Pos, nh, call)) i = next } } r := ir.NewReturnStmt(base.Pos, nil) - r.PtrList().Append(nh) - fn.PtrBody().Append(r) + r.Results.Append(nh) + fn.Body.Append(r) if base.Flag.LowerR != 0 { - ir.DumpList("genhash body", fn.Body()) + ir.DumpList("genhash body", fn.Body) } funcbody() @@ -386,7 +386,7 @@ func genhash(t *types.Type) *obj.LSym { typecheckFunc(fn) Curfn = fn - typecheckslice(fn.Body().Slice(), ctxStmt) + typecheckslice(fn.Body.Slice(), ctxStmt) Curfn = nil if base.Debug.DclStack != 0 { @@ -587,11 +587,11 @@ func geneq(t *types.Type) *obj.LSym { for i := int64(0); i < nelem; i++ { // if check {} else { goto neq } nif := ir.NewIfStmt(base.Pos, checkIdx(nodintconst(i)), nil, nil) - nif.PtrRlist().Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq)) - fn.PtrBody().Append(nif) + nif.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq)) + fn.Body.Append(nif) } if last { - fn.PtrBody().Append(ir.NewAssignStmt(base.Pos, nr, checkIdx(nodintconst(nelem)))) + fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, checkIdx(nodintconst(nelem)))) } } else { // Generate a for loop. @@ -604,11 +604,11 @@ func geneq(t *types.Type) *obj.LSym { loop.PtrInit().Append(init) // if eq(pi, qi) {} else { goto neq } nif := ir.NewIfStmt(base.Pos, checkIdx(i), nil, nil) - nif.PtrRlist().Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq)) - loop.PtrBody().Append(nif) - fn.PtrBody().Append(loop) + nif.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq)) + loop.Body.Append(nif) + fn.Body.Append(loop) if last { - fn.PtrBody().Append(ir.NewAssignStmt(base.Pos, nr, nodbool(true))) + fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, nodbool(true))) } } } @@ -718,42 +718,42 @@ func geneq(t *types.Type) *obj.LSym { } if len(flatConds) == 0 { - fn.PtrBody().Append(ir.NewAssignStmt(base.Pos, nr, nodbool(true))) + fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, nodbool(true))) } else { for _, c := range flatConds[:len(flatConds)-1] { // if cond {} else { goto neq } n := ir.NewIfStmt(base.Pos, c, nil, nil) - n.PtrRlist().Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq)) - fn.PtrBody().Append(n) + n.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq)) + fn.Body.Append(n) } - fn.PtrBody().Append(ir.NewAssignStmt(base.Pos, nr, flatConds[len(flatConds)-1])) + fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, flatConds[len(flatConds)-1])) } } // ret: // return ret := autolabel(".ret") - fn.PtrBody().Append(ir.NewLabelStmt(base.Pos, ret)) - fn.PtrBody().Append(ir.NewReturnStmt(base.Pos, nil)) + fn.Body.Append(ir.NewLabelStmt(base.Pos, ret)) + fn.Body.Append(ir.NewReturnStmt(base.Pos, nil)) // neq: // r = false // return (or goto ret) - fn.PtrBody().Append(ir.NewLabelStmt(base.Pos, neq)) - fn.PtrBody().Append(ir.NewAssignStmt(base.Pos, nr, nodbool(false))) + fn.Body.Append(ir.NewLabelStmt(base.Pos, neq)) + fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, nodbool(false))) if EqCanPanic(t) || anyCall(fn) { // Epilogue is large, so share it with the equal case. - fn.PtrBody().Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, ret)) + fn.Body.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, ret)) } else { // Epilogue is small, so don't bother sharing. - fn.PtrBody().Append(ir.NewReturnStmt(base.Pos, nil)) + fn.Body.Append(ir.NewReturnStmt(base.Pos, nil)) } // TODO(khr): the epilogue size detection condition above isn't perfect. // We should really do a generic CL that shares epilogues across // the board. See #24936. if base.Flag.LowerR != 0 { - ir.DumpList("geneq body", fn.Body()) + ir.DumpList("geneq body", fn.Body) } funcbody() @@ -762,7 +762,7 @@ func geneq(t *types.Type) *obj.LSym { typecheckFunc(fn) Curfn = fn - typecheckslice(fn.Body().Slice(), ctxStmt) + typecheckslice(fn.Body.Slice(), ctxStmt) Curfn = nil if base.Debug.DclStack != 0 { @@ -869,10 +869,10 @@ func eqmem(p ir.Node, q ir.Node, field *types.Sym, size int64) ir.Node { fn, needsize := eqmemfunc(size, nx.Type().Elem()) call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil) - call.PtrList().Append(nx) - call.PtrList().Append(ny) + call.Args.Append(nx) + call.Args.Append(ny) if needsize { - call.PtrList().Append(nodintconst(size)) + call.Args.Append(nodintconst(size)) } return call diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index 2347971fc2c38..3c377d8ba35e5 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -17,7 +17,7 @@ type exporter struct { func (p *exporter) markObject(n ir.Node) { if n.Op() == ir.ONAME { n := n.(*ir.Name) - if n.Class() == ir.PFUNC { + if n.Class_ == ir.PFUNC { inlFlood(n, exportsym) } } diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index f47b2e2b075cc..1019cff331533 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -77,11 +77,11 @@ func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node { // TODO: This creation of the named function should probably really be done in a // separate pass from type-checking. func typecheckclosure(clo *ir.ClosureExpr, top int) { - fn := clo.Func() + fn := clo.Func // Set current associated iota value, so iota can be used inside // function in ConstSpec, see issue #22344 if x := getIotaValue(); x >= 0 { - fn.SetIota(x) + fn.Iota = x } fn.ClosureType = typecheck(fn.ClosureType, ctxType) @@ -124,7 +124,7 @@ func typecheckclosure(clo *ir.ClosureExpr, top int) { Curfn = fn olddd := decldepth decldepth = 1 - typecheckslice(fn.Body().Slice(), ctxStmt) + typecheckslice(fn.Body.Slice(), ctxStmt) decldepth = olddd Curfn = oldfn } @@ -195,7 +195,7 @@ func capturevars(fn *ir.Func) { outermost := v.Defn.(*ir.Name) // out parameters will be assigned to implicitly upon return. - if outermost.Class() != ir.PPARAMOUT && !outermost.Name().Addrtaken() && !outermost.Name().Assigned() && v.Type().Width <= 128 { + if outermost.Class_ != ir.PPARAMOUT && !outermost.Name().Addrtaken() && !outermost.Name().Assigned() && v.Type().Width <= 128 { v.SetByval(true) } else { outermost.Name().SetAddrtaken(true) @@ -262,7 +262,7 @@ func transformclosure(fn *ir.Func) { v = addr } - v.SetClass(ir.PPARAM) + v.Class_ = ir.PPARAM decls = append(decls, v) fld := types.NewField(src.NoXPos, v.Sym(), v.Type()) @@ -294,7 +294,7 @@ func transformclosure(fn *ir.Func) { if v.Byval() && v.Type().Width <= int64(2*Widthptr) { // If it is a small variable captured by value, downgrade it to PAUTO. - v.SetClass(ir.PAUTO) + v.Class_ = ir.PAUTO fn.Dcl = append(fn.Dcl, v) body = append(body, ir.NewAssignStmt(base.Pos, v, cr)) } else { @@ -302,7 +302,7 @@ func transformclosure(fn *ir.Func) { // and initialize in entry prologue. addr := NewName(lookup("&" + v.Sym().Name)) addr.SetType(types.NewPtr(v.Type())) - addr.SetClass(ir.PAUTO) + addr.Class_ = ir.PAUTO addr.SetUsed(true) addr.Curfn = fn fn.Dcl = append(fn.Dcl, addr) @@ -328,7 +328,7 @@ func transformclosure(fn *ir.Func) { // hasemptycvars reports whether closure clo has an // empty list of captured vars. func hasemptycvars(clo *ir.ClosureExpr) bool { - return len(clo.Func().ClosureVars) == 0 + return len(clo.Func.ClosureVars) == 0 } // closuredebugruntimecheck applies boilerplate checks for debug flags @@ -336,9 +336,9 @@ func hasemptycvars(clo *ir.ClosureExpr) bool { func closuredebugruntimecheck(clo *ir.ClosureExpr) { if base.Debug.Closure > 0 { if clo.Esc() == EscHeap { - base.WarnfAt(clo.Pos(), "heap closure, captured vars = %v", clo.Func().ClosureVars) + base.WarnfAt(clo.Pos(), "heap closure, captured vars = %v", clo.Func.ClosureVars) } else { - base.WarnfAt(clo.Pos(), "stack closure, captured vars = %v", clo.Func().ClosureVars) + base.WarnfAt(clo.Pos(), "stack closure, captured vars = %v", clo.Func.ClosureVars) } } if base.Flag.CompilingRuntime && clo.Esc() == EscHeap { @@ -366,7 +366,7 @@ func closureType(clo *ir.ClosureExpr) *types.Type { fields := []*ir.Field{ namedfield(".F", types.Types[types.TUINTPTR]), } - for _, v := range clo.Func().ClosureVars { + for _, v := range clo.Func.ClosureVars { typ := v.Type() if !v.Byval() { typ = types.NewPtr(typ) @@ -379,7 +379,7 @@ func closureType(clo *ir.ClosureExpr) *types.Type { } func walkclosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node { - fn := clo.Func() + fn := clo.Func // If no closure vars, don't bother wrapping. if hasemptycvars(clo) { @@ -394,7 +394,7 @@ func walkclosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node { clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ).(ir.Ntype), nil) clos.SetEsc(clo.Esc()) - clos.PtrList().Set(append([]ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, fn.Nname)}, fn.ClosureEnter.Slice()...)) + clos.List.Set(append([]ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, fn.Nname)}, fn.ClosureEnter.Slice()...)) addr := nodAddr(clos) addr.SetEsc(clo.Esc()) @@ -407,7 +407,7 @@ func walkclosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node { if !types.Identical(typ, x.Type()) { panic("closure type does not match order's assigned type") } - addr.SetRight(x) + addr.Alloc = x clo.Prealloc = nil } @@ -428,13 +428,13 @@ func typecheckpartialcall(n ir.Node, sym *types.Sym) *ir.CallPartExpr { fn := makepartialcall(dot, dot.Type(), sym) fn.SetWrapper(true) - return ir.NewCallPartExpr(dot.Pos(), dot.Left(), dot.Selection, fn) + return ir.NewCallPartExpr(dot.Pos(), dot.X, dot.Selection, fn) } // makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed // for partial calls. func makepartialcall(dot *ir.SelectorExpr, t0 *types.Type, meth *types.Sym) *ir.Func { - rcvrtype := dot.Left().Type() + rcvrtype := dot.X.Type() sym := methodSymSuffix(rcvrtype, meth, "-fm") if sym.Uniq() { @@ -480,24 +480,24 @@ func makepartialcall(dot *ir.SelectorExpr, t0 *types.Type, meth *types.Sym) *ir. } call := ir.NewCallExpr(base.Pos, ir.OCALL, ir.NewSelectorExpr(base.Pos, ir.OXDOT, ptr, meth), nil) - call.PtrList().Set(paramNnames(tfn.Type())) - call.SetIsDDD(tfn.Type().IsVariadic()) + call.Args.Set(paramNnames(tfn.Type())) + call.IsDDD = tfn.Type().IsVariadic() if t0.NumResults() != 0 { ret := ir.NewReturnStmt(base.Pos, nil) - ret.PtrList().Set1(call) + ret.Results.Set1(call) body = append(body, ret) } else { body = append(body, call) } - fn.PtrBody().Set(body) + fn.Body.Set(body) funcbody() typecheckFunc(fn) // Need to typecheck the body of the just-generated wrapper. // typecheckslice() requires that Curfn is set when processing an ORETURN. Curfn = fn - typecheckslice(fn.Body().Slice(), ctxStmt) + typecheckslice(fn.Body.Slice(), ctxStmt) sym.Def = fn Target.Decls = append(Target.Decls, fn) Curfn = savecurfn @@ -512,7 +512,7 @@ func makepartialcall(dot *ir.SelectorExpr, t0 *types.Type, meth *types.Sym) *ir. func partialCallType(n *ir.CallPartExpr) *types.Type { t := tostruct([]*ir.Field{ namedfield("F", types.Types[types.TUINTPTR]), - namedfield("R", n.Left().Type()), + namedfield("R", n.X.Type()), }) t.SetNoalg(true) return t @@ -526,13 +526,13 @@ func walkpartialcall(n *ir.CallPartExpr, init *ir.Nodes) ir.Node { // // Like walkclosure above. - if n.Left().Type().IsInterface() { + if n.X.Type().IsInterface() { // Trigger panic for method on nil interface now. // Otherwise it happens in the wrapper and is confusing. - n.SetLeft(cheapexpr(n.Left(), init)) - n.SetLeft(walkexpr(n.Left(), nil)) + n.X = cheapexpr(n.X, init) + n.X = walkexpr(n.X, nil) - tab := typecheck(ir.NewUnaryExpr(base.Pos, ir.OITAB, n.Left()), ctxExpr) + tab := typecheck(ir.NewUnaryExpr(base.Pos, ir.OITAB, n.X), ctxExpr) c := ir.NewUnaryExpr(base.Pos, ir.OCHECKNIL, tab) c.SetTypecheck(1) @@ -543,7 +543,7 @@ func walkpartialcall(n *ir.CallPartExpr, init *ir.Nodes) ir.Node { clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ).(ir.Ntype), nil) clos.SetEsc(n.Esc()) - clos.PtrList().Set2(ir.NewUnaryExpr(base.Pos, ir.OCFUNC, n.Func().Nname), n.Left()) + clos.List.Set2(ir.NewUnaryExpr(base.Pos, ir.OCFUNC, n.Func.Nname), n.X) addr := nodAddr(clos) addr.SetEsc(n.Esc()) @@ -556,7 +556,7 @@ func walkpartialcall(n *ir.CallPartExpr, init *ir.Nodes) ir.Node { if !types.Identical(typ, x.Type()) { panic("partial call type does not match order's assigned type") } - addr.SetRight(x) + addr.Alloc = x n.Prealloc = nil } diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index e54cd0a1028bb..19eb8bc537fe3 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -163,8 +163,8 @@ func convlit1(n ir.Node, t *types.Type, explicit bool, context func() string) ir } n := n.(*ir.UnaryExpr) - n.SetLeft(convlit(n.Left(), ot)) - if n.Left().Type() == nil { + n.X = convlit(n.X, ot) + if n.X.Type() == nil { n.SetType(nil) return n } @@ -181,13 +181,13 @@ func convlit1(n ir.Node, t *types.Type, explicit bool, context func() string) ir var l, r ir.Node switch n := n.(type) { case *ir.BinaryExpr: - n.SetLeft(convlit(n.Left(), ot)) - n.SetRight(convlit(n.Right(), ot)) - l, r = n.Left(), n.Right() + n.X = convlit(n.X, ot) + n.Y = convlit(n.Y, ot) + l, r = n.X, n.Y case *ir.LogicalExpr: - n.SetLeft(convlit(n.Left(), ot)) - n.SetRight(convlit(n.Right(), ot)) - l, r = n.Left(), n.Right() + n.X = convlit(n.X, ot) + n.Y = convlit(n.Y, ot) + l, r = n.X, n.Y } if l.Type() == nil || r.Type() == nil { @@ -213,8 +213,8 @@ func convlit1(n ir.Node, t *types.Type, explicit bool, context func() string) ir case ir.OLSH, ir.ORSH: n := n.(*ir.BinaryExpr) - n.SetLeft(convlit1(n.Left(), t, explicit, nil)) - n.SetType(n.Left().Type()) + n.X = convlit1(n.X, t, explicit, nil) + n.SetType(n.X.Type()) if n.Type() != nil && !n.Type().IsInteger() { base.Errorf("invalid operation: %v (shift of type %v)", n, n.Type()) n.SetType(nil) @@ -452,7 +452,7 @@ func evalConst(n ir.Node) ir.Node { switch n.Op() { case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT: n := n.(*ir.UnaryExpr) - nl := n.Left() + nl := n.X if nl.Op() == ir.OLITERAL { var prec uint if n.Type().IsUnsigned() { @@ -463,7 +463,7 @@ func evalConst(n ir.Node) ir.Node { case ir.OADD, ir.OSUB, ir.OMUL, ir.ODIV, ir.OMOD, ir.OOR, ir.OXOR, ir.OAND, ir.OANDNOT: n := n.(*ir.BinaryExpr) - nl, nr := n.Left(), n.Right() + nl, nr := n.X, n.Y if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL { rval := nr.Val() @@ -488,21 +488,21 @@ func evalConst(n ir.Node) ir.Node { case ir.OOROR, ir.OANDAND: n := n.(*ir.LogicalExpr) - nl, nr := n.Left(), n.Right() + nl, nr := n.X, n.Y if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL { return origConst(n, constant.BinaryOp(nl.Val(), tokenForOp[n.Op()], nr.Val())) } case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE: n := n.(*ir.BinaryExpr) - nl, nr := n.Left(), n.Right() + nl, nr := n.X, n.Y if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL { return origBoolConst(n, constant.Compare(nl.Val(), tokenForOp[n.Op()], nr.Val())) } case ir.OLSH, ir.ORSH: n := n.(*ir.BinaryExpr) - nl, nr := n.Left(), n.Right() + nl, nr := n.X, n.Y if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL { // shiftBound from go/types; "so we can express smallestFloat64" const shiftBound = 1023 - 1 + 52 @@ -517,14 +517,14 @@ func evalConst(n ir.Node) ir.Node { case ir.OCONV, ir.ORUNESTR: n := n.(*ir.ConvExpr) - nl := n.Left() + nl := n.X if ir.OKForConst[n.Type().Kind()] && nl.Op() == ir.OLITERAL { return origConst(n, convertVal(nl.Val(), n.Type(), true)) } case ir.OCONVNOP: n := n.(*ir.ConvExpr) - nl := n.Left() + nl := n.X if ir.OKForConst[n.Type().Kind()] && nl.Op() == ir.OLITERAL { // set so n.Orig gets OCONV instead of OCONVNOP n.SetOp(ir.OCONV) @@ -534,7 +534,7 @@ func evalConst(n ir.Node) ir.Node { case ir.OADDSTR: // Merge adjacent constants in the argument list. n := n.(*ir.AddStringExpr) - s := n.List().Slice() + s := n.List.Slice() need := 0 for i := 0; i < len(s); i++ { if i == 0 || !ir.IsConst(s[i-1], constant.String) || !ir.IsConst(s[i], constant.String) { @@ -564,7 +564,7 @@ func evalConst(n ir.Node) ir.Node { } nl := ir.Copy(n).(*ir.AddStringExpr) - nl.PtrList().Set(s[i:i2]) + nl.List.Set(s[i:i2]) newList = append(newList, origConst(nl, constant.MakeString(strings.Join(strs, "")))) i = i2 - 1 } else { @@ -573,12 +573,12 @@ func evalConst(n ir.Node) ir.Node { } nn := ir.Copy(n).(*ir.AddStringExpr) - nn.PtrList().Set(newList) + nn.List.Set(newList) return nn case ir.OCAP, ir.OLEN: n := n.(*ir.UnaryExpr) - nl := n.Left() + nl := n.X switch nl.Type().Kind() { case types.TSTRING: if ir.IsConst(nl, constant.String) { @@ -596,21 +596,21 @@ func evalConst(n ir.Node) ir.Node { case ir.OREAL: n := n.(*ir.UnaryExpr) - nl := n.Left() + nl := n.X if nl.Op() == ir.OLITERAL { return origConst(n, constant.Real(nl.Val())) } case ir.OIMAG: n := n.(*ir.UnaryExpr) - nl := n.Left() + nl := n.X if nl.Op() == ir.OLITERAL { return origConst(n, constant.Imag(nl.Val())) } case ir.OCOMPLEX: n := n.(*ir.BinaryExpr) - nl, nr := n.Left(), n.Right() + nl, nr := n.X, n.Y if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL { return origConst(n, makeComplex(nl.Val(), nr.Val())) } @@ -871,7 +871,7 @@ func (s *constSet) add(pos src.XPos, n ir.Node, what, where string) { if conv := n; conv.Op() == ir.OCONVIFACE { conv := conv.(*ir.ConvExpr) if conv.Implicit() { - n = conv.Left() + n = conv.X } } diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index d85f10faf3c43..9bd044c3686fc 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -120,7 +120,7 @@ func declare(n *ir.Name, ctxt ir.Class) { s.Lastlineno = base.Pos s.Def = n n.Vargen = int32(gen) - n.SetClass(ctxt) + n.Class_ = ctxt if ctxt == ir.PFUNC { n.Sym().SetFunc(true) } @@ -137,9 +137,9 @@ func variter(vl []*ir.Name, t ir.Ntype, el []ir.Node) []ir.Node { if len(el) == 1 && len(vl) > 1 { e := el[0] as2 := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) - as2.PtrRlist().Set1(e) + as2.Rhs.Set1(e) for _, v := range vl { - as2.PtrList().Append(v) + as2.Lhs.Append(v) declare(v, dclcontext) v.Ntype = t v.Defn = as2 @@ -234,7 +234,7 @@ func oldname(s *types.Sym) ir.Node { if c == nil || c.Curfn != Curfn { // Do not have a closure var for the active closure yet; make one. c = NewName(s) - c.SetClass(ir.PAUTOHEAP) + c.Class_ = ir.PAUTOHEAP c.SetIsClosureVar(true) c.SetIsDDD(n.IsDDD()) c.Defn = n @@ -810,11 +810,11 @@ func makefuncsym(s *types.Sym) { // setNodeNameFunc marks a node as a function. func setNodeNameFunc(n *ir.Name) { - if n.Op() != ir.ONAME || n.Class() != ir.Pxxx { + if n.Op() != ir.ONAME || n.Class_ != ir.Pxxx { base.Fatalf("expected ONAME/Pxxx node, got %v", n) } - n.SetClass(ir.PFUNC) + n.Class_ = ir.PFUNC n.Sym().SetFunc(true) } @@ -876,11 +876,11 @@ func (c *nowritebarrierrecChecker) findExtraCalls(nn ir.Node) { return } n := nn.(*ir.CallExpr) - if n.Left() == nil || n.Left().Op() != ir.ONAME { + if n.X == nil || n.X.Op() != ir.ONAME { return } - fn := n.Left().(*ir.Name) - if fn.Class() != ir.PFUNC || fn.Name().Defn == nil { + fn := n.X.(*ir.Name) + if fn.Class_ != ir.PFUNC || fn.Name().Defn == nil { return } if !isRuntimePkg(fn.Sym().Pkg) || fn.Sym().Name != "systemstack" { @@ -888,14 +888,14 @@ func (c *nowritebarrierrecChecker) findExtraCalls(nn ir.Node) { } var callee *ir.Func - arg := n.List().First() + arg := n.Args.First() switch arg.Op() { case ir.ONAME: arg := arg.(*ir.Name) callee = arg.Name().Defn.(*ir.Func) case ir.OCLOSURE: arg := arg.(*ir.ClosureExpr) - callee = arg.Func() + callee = arg.Func default: base.Fatalf("expected ONAME or OCLOSURE node, got %+v", arg) } @@ -973,7 +973,7 @@ func (c *nowritebarrierrecChecker) check() { q.PushRight(target.Nname) } for !q.Empty() { - fn := q.PopLeft().Func() + fn := q.PopLeft().Func // Check fn. if fn.WBPos.IsKnown() { diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index 6510dfc4b3d3c..21f02e9471755 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -228,21 +228,21 @@ func (e *Escape) walkFunc(fn *ir.Func) { if e.labels == nil { e.labels = make(map[*types.Sym]labelState) } - e.labels[n.Sym()] = nonlooping + e.labels[n.Label] = nonlooping case ir.OGOTO: // If we visited the label before the goto, // then this is a looping label. n := n.(*ir.BranchStmt) - if e.labels[n.Sym()] == nonlooping { - e.labels[n.Sym()] = looping + if e.labels[n.Label] == nonlooping { + e.labels[n.Label] = looping } } }) e.curfn = fn e.loopDepth = 1 - e.block(fn.Body()) + e.block(fn.Body) if len(e.labels) != 0 { base.FatalfAt(fn.Pos(), "leftover labels after walkFunc") @@ -304,18 +304,18 @@ func (e *Escape) stmt(n ir.Node) { case ir.OBLOCK: n := n.(*ir.BlockStmt) - e.stmts(n.List()) + e.stmts(n.List) case ir.ODCL: // Record loop depth at declaration. n := n.(*ir.Decl) - if !ir.IsBlank(n.Left()) { - e.dcl(n.Left()) + if !ir.IsBlank(n.X) { + e.dcl(n.X) } case ir.OLABEL: n := n.(*ir.LabelStmt) - switch e.labels[n.Sym()] { + switch e.labels[n.Label] { case nonlooping: if base.Flag.LowerM > 2 { fmt.Printf("%v:%v non-looping label\n", base.FmtPos(base.Pos), n) @@ -328,127 +328,127 @@ func (e *Escape) stmt(n ir.Node) { default: base.Fatalf("label missing tag") } - delete(e.labels, n.Sym()) + delete(e.labels, n.Label) case ir.OIF: n := n.(*ir.IfStmt) - e.discard(n.Left()) - e.block(n.Body()) - e.block(n.Rlist()) + e.discard(n.Cond) + e.block(n.Body) + e.block(n.Else) case ir.OFOR, ir.OFORUNTIL: n := n.(*ir.ForStmt) e.loopDepth++ - e.discard(n.Left()) - e.stmt(n.Right()) - e.block(n.Body()) + e.discard(n.Cond) + e.stmt(n.Post) + e.block(n.Body) e.loopDepth-- case ir.ORANGE: // for List = range Right { Nbody } n := n.(*ir.RangeStmt) e.loopDepth++ - ks := e.addrs(n.List()) - e.block(n.Body()) + ks := e.addrs(n.Vars) + e.block(n.Body) e.loopDepth-- // Right is evaluated outside the loop. k := e.discardHole() if len(ks) >= 2 { - if n.Right().Type().IsArray() { + if n.X.Type().IsArray() { k = ks[1].note(n, "range") } else { k = ks[1].deref(n, "range-deref") } } - e.expr(e.later(k), n.Right()) + e.expr(e.later(k), n.X) case ir.OSWITCH: n := n.(*ir.SwitchStmt) - typesw := n.Left() != nil && n.Left().Op() == ir.OTYPESW + typesw := n.Tag != nil && n.Tag.Op() == ir.OTYPESW var ks []EscHole - for _, cas := range n.List().Slice() { // cases + for _, cas := range n.Cases.Slice() { // cases cas := cas.(*ir.CaseStmt) - if typesw && n.Left().(*ir.TypeSwitchGuard).Left() != nil { - cv := cas.Rlist().First() + if typesw && n.Tag.(*ir.TypeSwitchGuard).Tag != nil { + cv := cas.Vars.First() k := e.dcl(cv) // type switch variables have no ODCL. if cv.Type().HasPointers() { ks = append(ks, k.dotType(cv.Type(), cas, "switch case")) } } - e.discards(cas.List()) - e.block(cas.Body()) + e.discards(cas.List) + e.block(cas.Body) } if typesw { - e.expr(e.teeHole(ks...), n.Left().(*ir.TypeSwitchGuard).Right()) + e.expr(e.teeHole(ks...), n.Tag.(*ir.TypeSwitchGuard).X) } else { - e.discard(n.Left()) + e.discard(n.Tag) } case ir.OSELECT: n := n.(*ir.SelectStmt) - for _, cas := range n.List().Slice() { + for _, cas := range n.Cases.Slice() { cas := cas.(*ir.CaseStmt) - e.stmt(cas.Left()) - e.block(cas.Body()) + e.stmt(cas.Comm) + e.block(cas.Body) } case ir.OSELRECV2: n := n.(*ir.AssignListStmt) - e.assign(n.List().First(), n.Rlist().First(), "selrecv", n) - e.assign(n.List().Second(), nil, "selrecv", n) + e.assign(n.Lhs.First(), n.Rhs.First(), "selrecv", n) + e.assign(n.Lhs.Second(), nil, "selrecv", n) case ir.ORECV: // TODO(mdempsky): Consider e.discard(n.Left). n := n.(*ir.UnaryExpr) e.exprSkipInit(e.discardHole(), n) // already visited n.Ninit case ir.OSEND: n := n.(*ir.SendStmt) - e.discard(n.Left()) - e.assignHeap(n.Right(), "send", n) + e.discard(n.Chan) + e.assignHeap(n.Value, "send", n) case ir.OAS: n := n.(*ir.AssignStmt) - e.assign(n.Left(), n.Right(), "assign", n) + e.assign(n.X, n.Y, "assign", n) case ir.OASOP: n := n.(*ir.AssignOpStmt) - e.assign(n.Left(), n.Right(), "assign", n) + e.assign(n.X, n.Y, "assign", n) case ir.OAS2: n := n.(*ir.AssignListStmt) - for i, nl := range n.List().Slice() { - e.assign(nl, n.Rlist().Index(i), "assign-pair", n) + for i, nl := range n.Lhs.Slice() { + e.assign(nl, n.Rhs.Index(i), "assign-pair", n) } case ir.OAS2DOTTYPE: // v, ok = x.(type) n := n.(*ir.AssignListStmt) - e.assign(n.List().First(), n.Rlist().First(), "assign-pair-dot-type", n) - e.assign(n.List().Second(), nil, "assign-pair-dot-type", n) + e.assign(n.Lhs.First(), n.Rhs.First(), "assign-pair-dot-type", n) + e.assign(n.Lhs.Second(), nil, "assign-pair-dot-type", n) case ir.OAS2MAPR: // v, ok = m[k] n := n.(*ir.AssignListStmt) - e.assign(n.List().First(), n.Rlist().First(), "assign-pair-mapr", n) - e.assign(n.List().Second(), nil, "assign-pair-mapr", n) + e.assign(n.Lhs.First(), n.Rhs.First(), "assign-pair-mapr", n) + e.assign(n.Lhs.Second(), nil, "assign-pair-mapr", n) case ir.OAS2RECV: // v, ok = <-ch n := n.(*ir.AssignListStmt) - e.assign(n.List().First(), n.Rlist().First(), "assign-pair-receive", n) - e.assign(n.List().Second(), nil, "assign-pair-receive", n) + e.assign(n.Lhs.First(), n.Rhs.First(), "assign-pair-receive", n) + e.assign(n.Lhs.Second(), nil, "assign-pair-receive", n) case ir.OAS2FUNC: n := n.(*ir.AssignListStmt) - e.stmts(n.Rlist().First().Init()) - e.call(e.addrs(n.List()), n.Rlist().First(), nil) + e.stmts(n.Rhs.First().Init()) + e.call(e.addrs(n.Lhs), n.Rhs.First(), nil) case ir.ORETURN: n := n.(*ir.ReturnStmt) results := e.curfn.Type().Results().FieldSlice() - for i, v := range n.List().Slice() { + for i, v := range n.Results.Slice() { e.assign(ir.AsNode(results[i].Nname), v, "return", n) } case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OCLOSE, ir.OCOPY, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN, ir.ORECOVER: e.call(nil, n, nil) case ir.OGO, ir.ODEFER: n := n.(*ir.GoDeferStmt) - e.stmts(n.Left().Init()) - e.call(nil, n.Left(), n) + e.stmts(n.Call.Init()) + e.call(nil, n.Call, n) case ir.ORETJMP: // TODO(mdempsky): What do? esc.go just ignores it. @@ -491,7 +491,7 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { uintptrEscapesHack := k.uintptrEscapesHack k.uintptrEscapesHack = false - if uintptrEscapesHack && n.Op() == ir.OCONVNOP && n.(*ir.ConvExpr).Left().Type().IsUnsafePtr() { + if uintptrEscapesHack && n.Op() == ir.OCONVNOP && n.(*ir.ConvExpr).X.Type().IsUnsafePtr() { // nop } else if k.derefs >= 0 && !n.Type().HasPointers() { k = e.discardHole() @@ -506,7 +506,7 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { case ir.ONAME: n := n.(*ir.Name) - if n.Class() == ir.PFUNC || n.Class() == ir.PEXTERN { + if n.Class_ == ir.PFUNC || n.Class_ == ir.PEXTERN { return } e.flow(k, e.oldLoc(n)) @@ -517,46 +517,46 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT: n := n.(*ir.UnaryExpr) - e.discard(n.Left()) + e.discard(n.X) case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE: n := n.(*ir.BinaryExpr) - e.discard(n.Left()) - e.discard(n.Right()) + e.discard(n.X) + e.discard(n.Y) case ir.OANDAND, ir.OOROR: n := n.(*ir.LogicalExpr) - e.discard(n.Left()) - e.discard(n.Right()) + e.discard(n.X) + e.discard(n.Y) case ir.OADDR: n := n.(*ir.AddrExpr) - e.expr(k.addr(n, "address-of"), n.Left()) // "address-of" + e.expr(k.addr(n, "address-of"), n.X) // "address-of" case ir.ODEREF: n := n.(*ir.StarExpr) - e.expr(k.deref(n, "indirection"), n.Left()) // "indirection" + e.expr(k.deref(n, "indirection"), n.X) // "indirection" case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER: n := n.(*ir.SelectorExpr) - e.expr(k.note(n, "dot"), n.Left()) + e.expr(k.note(n, "dot"), n.X) case ir.ODOTPTR: n := n.(*ir.SelectorExpr) - e.expr(k.deref(n, "dot of pointer"), n.Left()) // "dot of pointer" + e.expr(k.deref(n, "dot of pointer"), n.X) // "dot of pointer" case ir.ODOTTYPE, ir.ODOTTYPE2: n := n.(*ir.TypeAssertExpr) - e.expr(k.dotType(n.Type(), n, "dot"), n.Left()) + e.expr(k.dotType(n.Type(), n, "dot"), n.X) case ir.OINDEX: n := n.(*ir.IndexExpr) - if n.Left().Type().IsArray() { - e.expr(k.note(n, "fixed-array-index-of"), n.Left()) + if n.X.Type().IsArray() { + e.expr(k.note(n, "fixed-array-index-of"), n.X) } else { // TODO(mdempsky): Fix why reason text. - e.expr(k.deref(n, "dot of pointer"), n.Left()) + e.expr(k.deref(n, "dot of pointer"), n.X) } - e.discard(n.Right()) + e.discard(n.Index) case ir.OINDEXMAP: n := n.(*ir.IndexExpr) - e.discard(n.Left()) - e.discard(n.Right()) + e.discard(n.X) + e.discard(n.Index) case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR: n := n.(*ir.SliceExpr) - e.expr(k.note(n, "slice"), n.Left()) + e.expr(k.note(n, "slice"), n.X) low, high, max := n.SliceBounds() e.discard(low) e.discard(high) @@ -564,29 +564,29 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { case ir.OCONV, ir.OCONVNOP: n := n.(*ir.ConvExpr) - if checkPtr(e.curfn, 2) && n.Type().IsUnsafePtr() && n.Left().Type().IsPtr() { + if checkPtr(e.curfn, 2) && n.Type().IsUnsafePtr() && n.X.Type().IsPtr() { // When -d=checkptr=2 is enabled, treat // conversions to unsafe.Pointer as an // escaping operation. This allows better // runtime instrumentation, since we can more // easily detect object boundaries on the heap // than the stack. - e.assignHeap(n.Left(), "conversion to unsafe.Pointer", n) - } else if n.Type().IsUnsafePtr() && n.Left().Type().IsUintptr() { - e.unsafeValue(k, n.Left()) + e.assignHeap(n.X, "conversion to unsafe.Pointer", n) + } else if n.Type().IsUnsafePtr() && n.X.Type().IsUintptr() { + e.unsafeValue(k, n.X) } else { - e.expr(k, n.Left()) + e.expr(k, n.X) } case ir.OCONVIFACE: n := n.(*ir.ConvExpr) - if !n.Left().Type().IsInterface() && !isdirectiface(n.Left().Type()) { + if !n.X.Type().IsInterface() && !isdirectiface(n.X.Type()) { k = e.spill(k, n) } - e.expr(k.note(n, "interface-converted"), n.Left()) + e.expr(k.note(n, "interface-converted"), n.X) case ir.ORECV: n := n.(*ir.UnaryExpr) - e.discard(n.Left()) + e.discard(n.X) case ir.OCALLMETH, ir.OCALLFUNC, ir.OCALLINTER, ir.OLEN, ir.OCAP, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCOPY: e.call([]EscHole{k}, n, nil) @@ -598,15 +598,15 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { case ir.OMAKESLICE: n := n.(*ir.MakeExpr) e.spill(k, n) - e.discard(n.Left()) - e.discard(n.Right()) + e.discard(n.Len) + e.discard(n.Cap) case ir.OMAKECHAN: n := n.(*ir.MakeExpr) - e.discard(n.Left()) + e.discard(n.Len) case ir.OMAKEMAP: n := n.(*ir.MakeExpr) e.spill(k, n) - e.discard(n.Left()) + e.discard(n.Len) case ir.ORECOVER: // nop @@ -633,17 +633,17 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { name, _ := m.Nname.(*ir.Name) paramK := e.tagHole(ks, name, m.Type.Recv()) - e.expr(e.teeHole(paramK, closureK), n.Left()) + e.expr(e.teeHole(paramK, closureK), n.X) case ir.OPTRLIT: n := n.(*ir.AddrExpr) - e.expr(e.spill(k, n), n.Left()) + e.expr(e.spill(k, n), n.X) case ir.OARRAYLIT: n := n.(*ir.CompLitExpr) - for _, elt := range n.List().Slice() { + for _, elt := range n.List.Slice() { if elt.Op() == ir.OKEY { - elt = elt.(*ir.KeyExpr).Right() + elt = elt.(*ir.KeyExpr).Value } e.expr(k.note(n, "array literal element"), elt) } @@ -653,17 +653,17 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { k = e.spill(k, n) k.uintptrEscapesHack = uintptrEscapesHack // for ...uintptr parameters - for _, elt := range n.List().Slice() { + for _, elt := range n.List.Slice() { if elt.Op() == ir.OKEY { - elt = elt.(*ir.KeyExpr).Right() + elt = elt.(*ir.KeyExpr).Value } e.expr(k.note(n, "slice-literal-element"), elt) } case ir.OSTRUCTLIT: n := n.(*ir.CompLitExpr) - for _, elt := range n.List().Slice() { - e.expr(k.note(n, "struct literal element"), elt.(*ir.StructKeyExpr).Left()) + for _, elt := range n.List.Slice() { + e.expr(k.note(n, "struct literal element"), elt.(*ir.StructKeyExpr).Value) } case ir.OMAPLIT: @@ -671,10 +671,10 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { e.spill(k, n) // Map keys and values are always stored in the heap. - for _, elt := range n.List().Slice() { + for _, elt := range n.List.Slice() { elt := elt.(*ir.KeyExpr) - e.assignHeap(elt.Left(), "map literal key", n) - e.assignHeap(elt.Right(), "map literal value", n) + e.assignHeap(elt.Key, "map literal key", n) + e.assignHeap(elt.Value, "map literal value", n) } case ir.OCLOSURE: @@ -682,7 +682,7 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { k = e.spill(k, n) // Link addresses of captured variables to closure. - for _, v := range n.Func().ClosureVars { + for _, v := range n.Func.ClosureVars { k := k if !v.Byval() { k = k.addr(v, "reference") @@ -694,7 +694,7 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { case ir.ORUNES2STR, ir.OBYTES2STR, ir.OSTR2RUNES, ir.OSTR2BYTES, ir.ORUNESTR: n := n.(*ir.ConvExpr) e.spill(k, n) - e.discard(n.Left()) + e.discard(n.X) case ir.OADDSTR: n := n.(*ir.AddStringExpr) @@ -702,7 +702,7 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { // Arguments of OADDSTR never escape; // runtime.concatstrings makes sure of that. - e.discards(n.List()) + e.discards(n.List) } } @@ -718,31 +718,31 @@ func (e *Escape) unsafeValue(k EscHole, n ir.Node) { switch n.Op() { case ir.OCONV, ir.OCONVNOP: n := n.(*ir.ConvExpr) - if n.Left().Type().IsUnsafePtr() { - e.expr(k, n.Left()) + if n.X.Type().IsUnsafePtr() { + e.expr(k, n.X) } else { - e.discard(n.Left()) + e.discard(n.X) } case ir.ODOTPTR: n := n.(*ir.SelectorExpr) if isReflectHeaderDataField(n) { - e.expr(k.deref(n, "reflect.Header.Data"), n.Left()) + e.expr(k.deref(n, "reflect.Header.Data"), n.X) } else { - e.discard(n.Left()) + e.discard(n.X) } case ir.OPLUS, ir.ONEG, ir.OBITNOT: n := n.(*ir.UnaryExpr) - e.unsafeValue(k, n.Left()) + e.unsafeValue(k, n.X) case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OAND, ir.OANDNOT: n := n.(*ir.BinaryExpr) - e.unsafeValue(k, n.Left()) - e.unsafeValue(k, n.Right()) + e.unsafeValue(k, n.X) + e.unsafeValue(k, n.Y) case ir.OLSH, ir.ORSH: n := n.(*ir.BinaryExpr) - e.unsafeValue(k, n.Left()) + e.unsafeValue(k, n.X) // RHS need not be uintptr-typed (#32959) and can't meaningfully // flow pointers anyway. - e.discard(n.Right()) + e.discard(n.Y) default: e.exprSkipInit(e.discardHole(), n) } @@ -775,7 +775,7 @@ func (e *Escape) addr(n ir.Node) EscHole { base.Fatalf("unexpected addr: %v", n) case ir.ONAME: n := n.(*ir.Name) - if n.Class() == ir.PEXTERN { + if n.Class_ == ir.PEXTERN { break } k = e.oldLoc(n).asHole() @@ -784,21 +784,21 @@ func (e *Escape) addr(n ir.Node) EscHole { e.addr(n.Name_) case ir.ODOT: n := n.(*ir.SelectorExpr) - k = e.addr(n.Left()) + k = e.addr(n.X) case ir.OINDEX: n := n.(*ir.IndexExpr) - e.discard(n.Right()) - if n.Left().Type().IsArray() { - k = e.addr(n.Left()) + e.discard(n.Index) + if n.X.Type().IsArray() { + k = e.addr(n.X) } else { - e.discard(n.Left()) + e.discard(n.X) } case ir.ODEREF, ir.ODOTPTR: e.discard(n) case ir.OINDEXMAP: n := n.(*ir.IndexExpr) - e.discard(n.Left()) - e.assignHeap(n.Right(), "key of map put", n) + e.discard(n.X) + e.assignHeap(n.Index, "key of map put", n) } if !n.Type().HasPointers() { @@ -876,17 +876,17 @@ func (e *Escape) call(ks []EscHole, call, where ir.Node) { var fn *ir.Name switch call.Op() { case ir.OCALLFUNC: - switch v := staticValue(call.Left()); { - case v.Op() == ir.ONAME && v.(*ir.Name).Class() == ir.PFUNC: + switch v := staticValue(call.X); { + case v.Op() == ir.ONAME && v.(*ir.Name).Class_ == ir.PFUNC: fn = v.(*ir.Name) case v.Op() == ir.OCLOSURE: - fn = v.(*ir.ClosureExpr).Func().Nname + fn = v.(*ir.ClosureExpr).Func.Nname } case ir.OCALLMETH: - fn = methodExprName(call.Left()) + fn = methodExprName(call.X) } - fntype := call.Left().Type() + fntype := call.X.Type() if fn != nil { fntype = fn.Type() } @@ -898,20 +898,20 @@ func (e *Escape) call(ks []EscHole, call, where ir.Node) { } if r := fntype.Recv(); r != nil { - argument(e.tagHole(ks, fn, r), call.Left().(*ir.SelectorExpr).Left()) + argument(e.tagHole(ks, fn, r), call.X.(*ir.SelectorExpr).X) } else { // Evaluate callee function expression. - argument(e.discardHole(), call.Left()) + argument(e.discardHole(), call.X) } - args := call.List().Slice() + args := call.Args.Slice() for i, param := range fntype.Params().FieldSlice() { argument(e.tagHole(ks, fn, param), args[i]) } case ir.OAPPEND: call := call.(*ir.CallExpr) - args := call.List().Slice() + args := call.Args.Slice() // Appendee slice may flow directly to the result, if // it has enough capacity. Alternatively, a new heap @@ -923,7 +923,7 @@ func (e *Escape) call(ks []EscHole, call, where ir.Node) { } argument(appendeeK, args[0]) - if call.IsDDD() { + if call.IsDDD { appendedK := e.discardHole() if args[1].Type().IsSlice() && args[1].Type().Elem().HasPointers() { appendedK = e.heapHole().deref(call, "appended slice...") @@ -937,30 +937,30 @@ func (e *Escape) call(ks []EscHole, call, where ir.Node) { case ir.OCOPY: call := call.(*ir.BinaryExpr) - argument(e.discardHole(), call.Left()) + argument(e.discardHole(), call.X) copiedK := e.discardHole() - if call.Right().Type().IsSlice() && call.Right().Type().Elem().HasPointers() { + if call.Y.Type().IsSlice() && call.Y.Type().Elem().HasPointers() { copiedK = e.heapHole().deref(call, "copied slice") } - argument(copiedK, call.Right()) + argument(copiedK, call.Y) case ir.OPANIC: call := call.(*ir.UnaryExpr) - argument(e.heapHole(), call.Left()) + argument(e.heapHole(), call.X) case ir.OCOMPLEX: call := call.(*ir.BinaryExpr) - argument(e.discardHole(), call.Left()) - argument(e.discardHole(), call.Right()) + argument(e.discardHole(), call.X) + argument(e.discardHole(), call.Y) case ir.ODELETE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER: call := call.(*ir.CallExpr) - for _, arg := range call.List().Slice() { + for _, arg := range call.Args.Slice() { argument(e.discardHole(), arg) } case ir.OLEN, ir.OCAP, ir.OREAL, ir.OIMAG, ir.OCLOSE: call := call.(*ir.UnaryExpr) - argument(e.discardHole(), call.Left()) + argument(e.discardHole(), call.X) } } @@ -1557,7 +1557,7 @@ func (e *Escape) finish(fns []*ir.Func) { } func (l *EscLocation) isName(c ir.Class) bool { - return l.n != nil && l.n.Op() == ir.ONAME && l.n.(*ir.Name).Class() == c + return l.n != nil && l.n.Op() == ir.ONAME && l.n.(*ir.Name).Class_ == c } const numEscResults = 7 @@ -1726,10 +1726,10 @@ func isSliceSelfAssign(dst, src ir.Node) bool { return false case ir.ODEREF: dst := dst.(*ir.StarExpr) - dstX = dst.Left() + dstX = dst.X case ir.ODOTPTR: dst := dst.(*ir.SelectorExpr) - dstX = dst.Left() + dstX = dst.X } if dstX.Op() != ir.ONAME { return false @@ -1749,7 +1749,7 @@ func isSliceSelfAssign(dst, src ir.Node) bool { // For slicing an array (not pointer to array), there is an implicit OADDR. // We check that to determine non-pointer array slicing. src := src.(*ir.SliceExpr) - if src.Left().Op() == ir.OADDR { + if src.X.Op() == ir.OADDR { return false } default: @@ -1757,15 +1757,15 @@ func isSliceSelfAssign(dst, src ir.Node) bool { } // slice is applied to ONAME dereference. var baseX ir.Node - switch base := src.(*ir.SliceExpr).Left(); base.Op() { + switch base := src.(*ir.SliceExpr).X; base.Op() { default: return false case ir.ODEREF: base := base.(*ir.StarExpr) - baseX = base.Left() + baseX = base.X case ir.ODOTPTR: base := base.(*ir.SelectorExpr) - baseX = base.Left() + baseX = base.X } if baseX.Op() != ir.ONAME { return false @@ -1801,14 +1801,14 @@ func isSelfAssign(dst, src ir.Node) bool { // Safe trailing accessors that are permitted to differ. dst := dst.(*ir.SelectorExpr) src := src.(*ir.SelectorExpr) - return samesafeexpr(dst.Left(), src.Left()) + return samesafeexpr(dst.X, src.X) case ir.OINDEX: dst := dst.(*ir.IndexExpr) src := src.(*ir.IndexExpr) - if mayAffectMemory(dst.Right()) || mayAffectMemory(src.Right()) { + if mayAffectMemory(dst.Index) || mayAffectMemory(src.Index) { return false } - return samesafeexpr(dst.Left(), src.Left()) + return samesafeexpr(dst.X, src.X) default: return false } @@ -1834,27 +1834,27 @@ func mayAffectMemory(n ir.Node) bool { case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD: n := n.(*ir.BinaryExpr) - return mayAffectMemory(n.Left()) || mayAffectMemory(n.Right()) + return mayAffectMemory(n.X) || mayAffectMemory(n.Y) case ir.OINDEX: n := n.(*ir.IndexExpr) - return mayAffectMemory(n.Left()) || mayAffectMemory(n.Right()) + return mayAffectMemory(n.X) || mayAffectMemory(n.Index) case ir.OCONVNOP, ir.OCONV: n := n.(*ir.ConvExpr) - return mayAffectMemory(n.Left()) + return mayAffectMemory(n.X) case ir.OLEN, ir.OCAP, ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF: n := n.(*ir.UnaryExpr) - return mayAffectMemory(n.Left()) + return mayAffectMemory(n.X) case ir.ODOT, ir.ODOTPTR: n := n.(*ir.SelectorExpr) - return mayAffectMemory(n.Left()) + return mayAffectMemory(n.X) case ir.ODEREF: n := n.(*ir.StarExpr) - return mayAffectMemory(n.Left()) + return mayAffectMemory(n.X) default: return true @@ -1871,7 +1871,7 @@ func heapAllocReason(n ir.Node) string { // Parameters are always passed via the stack. if n.Op() == ir.ONAME { n := n.(*ir.Name) - if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT { + if n.Class_ == ir.PPARAM || n.Class_ == ir.PPARAMOUT { return "" } } @@ -1893,9 +1893,9 @@ func heapAllocReason(n ir.Node) string { if n.Op() == ir.OMAKESLICE { n := n.(*ir.MakeExpr) - r := n.Right() + r := n.Cap if r == nil { - r = n.Left() + r = n.Len } if !smallintconst(r) { return "non-constant size" @@ -1928,7 +1928,7 @@ func addrescapes(n ir.Node) { // if this is a tmpname (PAUTO), it was tagged by tmpname as not escaping. // on PPARAM it means something different. - if n.Class() == ir.PAUTO && n.Esc() == EscNever { + if n.Class_ == ir.PAUTO && n.Esc() == EscNever { break } @@ -1938,7 +1938,7 @@ func addrescapes(n ir.Node) { break } - if n.Class() != ir.PPARAM && n.Class() != ir.PPARAMOUT && n.Class() != ir.PAUTO { + if n.Class_ != ir.PPARAM && n.Class_ != ir.PPARAMOUT && n.Class_ != ir.PAUTO { break } @@ -1969,18 +1969,18 @@ func addrescapes(n ir.Node) { // is always a heap pointer anyway. case ir.ODOT: n := n.(*ir.SelectorExpr) - addrescapes(n.Left()) + addrescapes(n.X) case ir.OINDEX: n := n.(*ir.IndexExpr) - if !n.Left().Type().IsSlice() { - addrescapes(n.Left()) + if !n.X.Type().IsSlice() { + addrescapes(n.X) } case ir.OPAREN: n := n.(*ir.ParenExpr) - addrescapes(n.Left()) + addrescapes(n.X) case ir.OCONVNOP: n := n.(*ir.ConvExpr) - addrescapes(n.Left()) + addrescapes(n.X) } } @@ -1992,7 +1992,7 @@ func moveToHeap(n *ir.Name) { if base.Flag.CompilingRuntime { base.Errorf("%v escapes to heap, not allowed in runtime", n) } - if n.Class() == ir.PAUTOHEAP { + if n.Class_ == ir.PAUTOHEAP { ir.Dump("n", n) base.Fatalf("double move to heap") } @@ -2011,7 +2011,7 @@ func moveToHeap(n *ir.Name) { // Parameters have a local stack copy used at function start/end // in addition to the copy in the heap that may live longer than // the function. - if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT { + if n.Class_ == ir.PPARAM || n.Class_ == ir.PPARAMOUT { if n.FrameOffset() == types.BADWIDTH { base.Fatalf("addrescapes before param assignment") } @@ -2023,9 +2023,9 @@ func moveToHeap(n *ir.Name) { stackcopy := NewName(n.Sym()) stackcopy.SetType(n.Type()) stackcopy.SetFrameOffset(n.FrameOffset()) - stackcopy.SetClass(n.Class()) + stackcopy.Class_ = n.Class_ stackcopy.Heapaddr = heapaddr - if n.Class() == ir.PPARAMOUT { + if n.Class_ == ir.PPARAMOUT { // Make sure the pointer to the heap copy is kept live throughout the function. // The function could panic at any point, and then a defer could recover. // Thus, we need the pointer to the heap copy always available so the @@ -2047,7 +2047,7 @@ func moveToHeap(n *ir.Name) { } // Parameters are before locals, so can stop early. // This limits the search even in functions with many local variables. - if d.Class() == ir.PAUTO { + if d.Class_ == ir.PAUTO { break } } @@ -2058,7 +2058,7 @@ func moveToHeap(n *ir.Name) { } // Modify n in place so that uses of n now mean indirection of the heapaddr. - n.SetClass(ir.PAUTOHEAP) + n.Class_ = ir.PAUTOHEAP n.SetFrameOffset(0) n.Heapaddr = heapaddr n.SetEsc(EscHeap) @@ -2084,7 +2084,7 @@ func (e *Escape) paramTag(fn *ir.Func, narg int, f *types.Field) string { return fmt.Sprintf("arg#%d", narg) } - if fn.Body().Len() == 0 { + if fn.Body.Len() == 0 { // Assume that uintptr arguments must be held live across the call. // This is most important for syscall.Syscall. // See golang.org/issue/13372. @@ -2106,7 +2106,7 @@ func (e *Escape) paramTag(fn *ir.Func, narg int, f *types.Field) string { // External functions are assumed unsafe, unless // //go:noescape is given before the declaration. - if fn.Func().Pragma&ir.Noescape != 0 { + if fn.Pragma&ir.Noescape != 0 { if base.Flag.LowerM != 0 && f.Sym != nil { base.WarnfAt(f.Pos, "%v does not escape", name()) } @@ -2120,7 +2120,7 @@ func (e *Escape) paramTag(fn *ir.Func, narg int, f *types.Field) string { return esc.Encode() } - if fn.Func().Pragma&ir.UintptrEscapes != 0 { + if fn.Pragma&ir.UintptrEscapes != 0 { if f.Type.IsUintptr() { if base.Flag.LowerM != 0 { base.WarnfAt(f.Pos, "marking %v as escaping uintptr", name()) diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index 8a8295537c397..2855f815bed62 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -83,7 +83,7 @@ func importsym(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Cl } n := ir.NewDeclNameAt(pos, op, s) - n.SetClass(ctxt) // TODO(mdempsky): Move this into NewDeclNameAt too? + n.Class_ = ctxt // TODO(mdempsky): Move this into NewDeclNameAt too? s.SetPkgDef(n) s.Importdef = ipkg return n diff --git a/src/cmd/compile/internal/gc/gen.go b/src/cmd/compile/internal/gc/gen.go index 25b241e23688a..f83c636472ae4 100644 --- a/src/cmd/compile/internal/gc/gen.go +++ b/src/cmd/compile/internal/gc/gen.go @@ -35,7 +35,7 @@ func isParamStackCopy(n ir.Node) bool { return false } name := n.(*ir.Name) - return (name.Class() == ir.PPARAM || name.Class() == ir.PPARAMOUT) && name.Heapaddr != nil + return (name.Class_ == ir.PPARAM || name.Class_ == ir.PPARAMOUT) && name.Heapaddr != nil } // isParamHeapCopy reports whether this is the on-heap copy of @@ -45,7 +45,7 @@ func isParamHeapCopy(n ir.Node) bool { return false } name := n.(*ir.Name) - return name.Class() == ir.PAUTOHEAP && name.Name().Stackcopy != nil + return name.Class_ == ir.PAUTOHEAP && name.Name().Stackcopy != nil } // autotmpname returns the name for an autotmp variable numbered n. @@ -79,7 +79,7 @@ func tempAt(pos src.XPos, curfn *ir.Func, t *types.Type) *ir.Name { n := ir.NewNameAt(pos, s) s.Def = n n.SetType(t) - n.SetClass(ir.PAUTO) + n.Class_ = ir.PAUTO n.SetEsc(EscNever) n.Curfn = curfn n.SetUsed(true) diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go index b0ad01bc5d262..6008abeff8f52 100644 --- a/src/cmd/compile/internal/gc/gsubr.go +++ b/src/cmd/compile/internal/gc/gsubr.go @@ -270,16 +270,16 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { tail = ir.NewBranchStmt(base.Pos, ir.ORETJMP, f.Nname.Sym()) } else { call := ir.NewCallExpr(base.Pos, ir.OCALL, f.Nname, nil) - call.PtrList().Set(paramNnames(tfn.Type())) - call.SetIsDDD(tfn.Type().IsVariadic()) + call.Args.Set(paramNnames(tfn.Type())) + call.IsDDD = tfn.Type().IsVariadic() tail = call if tfn.Type().NumResults() > 0 { n := ir.NewReturnStmt(base.Pos, nil) - n.PtrList().Set1(call) + n.Results.Set1(call) tail = n } } - fn.PtrBody().Append(tail) + fn.Body.Append(tail) funcbody() if base.Debug.DclStack != 0 { @@ -288,7 +288,7 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { typecheckFunc(fn) Curfn = fn - typecheckslice(fn.Body().Slice(), ctxStmt) + typecheckslice(fn.Body.Slice(), ctxStmt) escapeFuncs([]*ir.Func{fn}, false) diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index 0f7d62c5bfe39..60aa2eae8b4da 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -429,7 +429,7 @@ func (p *iexporter) doDecl(n *ir.Name) { switch n.Op() { case ir.ONAME: - switch n.Class() { + switch n.Class_ { case ir.PEXTERN: // Variable. w.tag('V') @@ -449,7 +449,7 @@ func (p *iexporter) doDecl(n *ir.Name) { w.funcExt(n) default: - base.Fatalf("unexpected class: %v, %v", n, n.Class()) + base.Fatalf("unexpected class: %v, %v", n, n.Class_) } case ir.OLITERAL: @@ -528,7 +528,7 @@ func (p *iexporter) doInline(f *ir.Name) { w := p.newWriter() w.setPkg(fnpkg(f), false) - w.stmtList(ir.AsNodes(f.Func().Inl.Body)) + w.stmtList(ir.AsNodes(f.Func.Inl.Body)) w.finish("inl", p.inlineIndex, f.Sym()) } @@ -983,14 +983,14 @@ func (w *exportWriter) funcExt(n *ir.Name) { } // Inline body. - if n.Func().Inl != nil { - w.uint64(1 + uint64(n.Func().Inl.Cost)) - if n.Func().ExportInline() { + if n.Func.Inl != nil { + w.uint64(1 + uint64(n.Func.Inl.Cost)) + if n.Func.ExportInline() { w.p.doInline(n) } // Endlineno for inlined function. - w.pos(n.Func().Endlineno) + w.pos(n.Func.Endlineno) } else { w.uint64(0) } @@ -1068,27 +1068,27 @@ func (w *exportWriter) stmt(n ir.Node) { // generate OBLOCK nodes except to denote an empty // function body, although that may change.) n := n.(*ir.BlockStmt) - for _, n := range n.List().Slice() { + for _, n := range n.List.Slice() { w.stmt(n) } case ir.ODCL: n := n.(*ir.Decl) w.op(ir.ODCL) - w.pos(n.Left().Pos()) - w.localName(n.Left().(*ir.Name)) - w.typ(n.Left().Type()) + w.pos(n.X.Pos()) + w.localName(n.X.(*ir.Name)) + w.typ(n.X.Type()) case ir.OAS: // Don't export "v = " initializing statements, hope they're always // preceded by the DCL which will be re-parsed and typecheck to reproduce // the "v = " again. n := n.(*ir.AssignStmt) - if n.Right() != nil { + if n.Y != nil { w.op(ir.OAS) w.pos(n.Pos()) - w.expr(n.Left()) - w.expr(n.Right()) + w.expr(n.X) + w.expr(n.Y) } case ir.OASOP: @@ -1096,23 +1096,23 @@ func (w *exportWriter) stmt(n ir.Node) { w.op(ir.OASOP) w.pos(n.Pos()) w.op(n.AsOp) - w.expr(n.Left()) - if w.bool(!n.Implicit()) { - w.expr(n.Right()) + w.expr(n.X) + if w.bool(!n.IncDec) { + w.expr(n.Y) } case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV: n := n.(*ir.AssignListStmt) w.op(ir.OAS2) w.pos(n.Pos()) - w.exprList(n.List()) - w.exprList(n.Rlist()) + w.exprList(n.Lhs) + w.exprList(n.Rhs) case ir.ORETURN: n := n.(*ir.ReturnStmt) w.op(ir.ORETURN) w.pos(n.Pos()) - w.exprList(n.List()) + w.exprList(n.Results) // case ORETJMP: // unreachable - generated by compiler for trampolin routines @@ -1121,32 +1121,32 @@ func (w *exportWriter) stmt(n ir.Node) { n := n.(*ir.GoDeferStmt) w.op(n.Op()) w.pos(n.Pos()) - w.expr(n.Left()) + w.expr(n.Call) case ir.OIF: n := n.(*ir.IfStmt) w.op(ir.OIF) w.pos(n.Pos()) w.stmtList(n.Init()) - w.expr(n.Left()) - w.stmtList(n.Body()) - w.stmtList(n.Rlist()) + w.expr(n.Cond) + w.stmtList(n.Body) + w.stmtList(n.Else) case ir.OFOR: n := n.(*ir.ForStmt) w.op(ir.OFOR) w.pos(n.Pos()) w.stmtList(n.Init()) - w.exprsOrNil(n.Left(), n.Right()) - w.stmtList(n.Body()) + w.exprsOrNil(n.Cond, n.Post) + w.stmtList(n.Body) case ir.ORANGE: n := n.(*ir.RangeStmt) w.op(ir.ORANGE) w.pos(n.Pos()) - w.stmtList(n.List()) - w.expr(n.Right()) - w.stmtList(n.Body()) + w.stmtList(n.Vars) + w.expr(n.X) + w.stmtList(n.Body) case ir.OSELECT: n := n.(*ir.SelectStmt) @@ -1161,7 +1161,7 @@ func (w *exportWriter) stmt(n ir.Node) { w.op(n.Op()) w.pos(n.Pos()) w.stmtList(n.Init()) - w.exprsOrNil(n.Left(), nil) + w.exprsOrNil(n.Tag, nil) w.caseList(n) // case OCASE: @@ -1191,11 +1191,11 @@ func isNamedTypeSwitch(n ir.Node) bool { return false } sw := n.(*ir.SwitchStmt) - if sw.Left() == nil || sw.Left().Op() != ir.OTYPESW { + if sw.Tag == nil || sw.Tag.Op() != ir.OTYPESW { return false } - guard := sw.Left().(*ir.TypeSwitchGuard) - return guard.Left() != nil + guard := sw.Tag.(*ir.TypeSwitchGuard) + return guard.Tag != nil } func (w *exportWriter) caseList(sw ir.Node) { @@ -1203,19 +1203,19 @@ func (w *exportWriter) caseList(sw ir.Node) { var cases []ir.Node if sw.Op() == ir.OSWITCH { - cases = sw.(*ir.SwitchStmt).List().Slice() + cases = sw.(*ir.SwitchStmt).Cases.Slice() } else { - cases = sw.(*ir.SelectStmt).List().Slice() + cases = sw.(*ir.SelectStmt).Cases.Slice() } w.uint64(uint64(len(cases))) for _, cas := range cases { cas := cas.(*ir.CaseStmt) w.pos(cas.Pos()) - w.stmtList(cas.List()) + w.stmtList(cas.List) if namedTypeSwitch { - w.localName(cas.Rlist().First().(*ir.Name)) + w.localName(cas.Vars.First().(*ir.Name)) } - w.stmtList(cas.Body()) + w.stmtList(cas.Body) } } @@ -1230,21 +1230,21 @@ func simplifyForExport(n ir.Node) ir.Node { switch n.Op() { case ir.OPAREN: n := n.(*ir.ParenExpr) - return simplifyForExport(n.Left()) + return simplifyForExport(n.X) case ir.ODEREF: n := n.(*ir.StarExpr) if n.Implicit() { - return simplifyForExport(n.Left()) + return simplifyForExport(n.X) } case ir.OADDR: n := n.(*ir.AddrExpr) if n.Implicit() { - return simplifyForExport(n.Left()) + return simplifyForExport(n.X) } case ir.ODOT, ir.ODOTPTR: n := n.(*ir.SelectorExpr) if n.Implicit() { - return simplifyForExport(n.Left()) + return simplifyForExport(n.X) } } return n @@ -1283,7 +1283,7 @@ func (w *exportWriter) expr(n ir.Node) { case ir.ONAME: // Package scope name. n := n.(*ir.Name) - if (n.Class() == ir.PEXTERN || n.Class() == ir.PFUNC) && !ir.IsBlank(n) { + if (n.Class_ == ir.PEXTERN || n.Class_ == ir.PFUNC) && !ir.IsBlank(n) { w.op(ir.ONONAME) w.qualifiedIdent(n) break @@ -1305,14 +1305,14 @@ func (w *exportWriter) expr(n ir.Node) { w.op(ir.OTYPESW) w.pos(n.Pos()) var s *types.Sym - if n.Left() != nil { - if n.Left().Op() != ir.ONONAME { - base.Fatalf("expected ONONAME, got %v", n.Left()) + if n.Tag != nil { + if n.Tag.Op() != ir.ONONAME { + base.Fatalf("expected ONONAME, got %v", n.Tag) } - s = n.Left().Sym() + s = n.Tag.Sym() } w.localIdent(s, 0) // declared pseudo-variable, if any - w.exprsOrNil(n.Right(), nil) + w.exprsOrNil(n.X, nil) // case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC: // should have been resolved by typechecking - handled by default case @@ -1327,27 +1327,27 @@ func (w *exportWriter) expr(n ir.Node) { n := n.(*ir.AddrExpr) w.op(ir.OADDR) w.pos(n.Pos()) - w.expr(n.Left()) + w.expr(n.X) case ir.OSTRUCTLIT: n := n.(*ir.CompLitExpr) w.op(ir.OSTRUCTLIT) w.pos(n.Pos()) w.typ(n.Type()) - w.fieldList(n.List()) // special handling of field names + w.fieldList(n.List) // special handling of field names case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT: n := n.(*ir.CompLitExpr) w.op(ir.OCOMPLIT) w.pos(n.Pos()) w.typ(n.Type()) - w.exprList(n.List()) + w.exprList(n.List) case ir.OKEY: n := n.(*ir.KeyExpr) w.op(ir.OKEY) w.pos(n.Pos()) - w.exprsOrNil(n.Left(), n.Right()) + w.exprsOrNil(n.Key, n.Value) // case OSTRUCTKEY: // unreachable - handled in case OSTRUCTLIT by elemList @@ -1357,35 +1357,35 @@ func (w *exportWriter) expr(n ir.Node) { n := n.(*ir.CallPartExpr) w.op(ir.OXDOT) w.pos(n.Pos()) - w.expr(n.Left()) - w.selector(n.Sym()) + w.expr(n.X) + w.selector(n.Method.Sym) case ir.OXDOT, ir.ODOT, ir.ODOTPTR, ir.ODOTINTER, ir.ODOTMETH: n := n.(*ir.SelectorExpr) w.op(ir.OXDOT) w.pos(n.Pos()) - w.expr(n.Left()) - w.selector(n.Sym()) + w.expr(n.X) + w.selector(n.Sel) case ir.ODOTTYPE, ir.ODOTTYPE2: n := n.(*ir.TypeAssertExpr) w.op(ir.ODOTTYPE) w.pos(n.Pos()) - w.expr(n.Left()) + w.expr(n.X) w.typ(n.Type()) case ir.OINDEX, ir.OINDEXMAP: n := n.(*ir.IndexExpr) w.op(ir.OINDEX) w.pos(n.Pos()) - w.expr(n.Left()) - w.expr(n.Right()) + w.expr(n.X) + w.expr(n.Index) case ir.OSLICE, ir.OSLICESTR, ir.OSLICEARR: n := n.(*ir.SliceExpr) w.op(ir.OSLICE) w.pos(n.Pos()) - w.expr(n.Left()) + w.expr(n.X) low, high, _ := n.SliceBounds() w.exprsOrNil(low, high) @@ -1393,7 +1393,7 @@ func (w *exportWriter) expr(n ir.Node) { n := n.(*ir.SliceExpr) w.op(ir.OSLICE3) w.pos(n.Pos()) - w.expr(n.Left()) + w.expr(n.X) low, high, max := n.SliceBounds() w.exprsOrNil(low, high) w.expr(max) @@ -1403,33 +1403,33 @@ func (w *exportWriter) expr(n ir.Node) { n := n.(*ir.BinaryExpr) w.op(n.Op()) w.pos(n.Pos()) - w.expr(n.Left()) - w.expr(n.Right()) + w.expr(n.X) + w.expr(n.Y) w.op(ir.OEND) case ir.OCONV, ir.OCONVIFACE, ir.OCONVNOP, ir.OBYTES2STR, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2RUNES, ir.ORUNESTR: n := n.(*ir.ConvExpr) w.op(ir.OCONV) w.pos(n.Pos()) - w.expr(n.Left()) + w.expr(n.X) w.typ(n.Type()) case ir.OREAL, ir.OIMAG, ir.OCAP, ir.OCLOSE, ir.OLEN, ir.ONEW, ir.OPANIC: n := n.(*ir.UnaryExpr) w.op(n.Op()) w.pos(n.Pos()) - w.expr(n.Left()) + w.expr(n.X) w.op(ir.OEND) case ir.OAPPEND, ir.ODELETE, ir.ORECOVER, ir.OPRINT, ir.OPRINTN: n := n.(*ir.CallExpr) w.op(n.Op()) w.pos(n.Pos()) - w.exprList(n.List()) // emits terminating OEND + w.exprList(n.Args) // emits terminating OEND // only append() calls may contain '...' arguments if n.Op() == ir.OAPPEND { - w.bool(n.IsDDD()) - } else if n.IsDDD() { + w.bool(n.IsDDD) + } else if n.IsDDD { base.Fatalf("exporter: unexpected '...' with %v call", n.Op()) } @@ -1438,9 +1438,9 @@ func (w *exportWriter) expr(n ir.Node) { w.op(ir.OCALL) w.pos(n.Pos()) w.stmtList(n.Init()) - w.expr(n.Left()) - w.exprList(n.List()) - w.bool(n.IsDDD()) + w.expr(n.X) + w.exprList(n.Args) + w.bool(n.IsDDD) case ir.OMAKEMAP, ir.OMAKECHAN, ir.OMAKESLICE: n := n.(*ir.MakeExpr) @@ -1451,12 +1451,12 @@ func (w *exportWriter) expr(n ir.Node) { default: // empty list w.op(ir.OEND) - case n.Right() != nil: - w.expr(n.Left()) - w.expr(n.Right()) + case n.Cap != nil: + w.expr(n.Len) + w.expr(n.Cap) w.op(ir.OEND) - case n.Left() != nil && (n.Op() == ir.OMAKESLICE || !n.Left().Type().IsUntyped()): - w.expr(n.Left()) + case n.Len != nil && (n.Op() == ir.OMAKESLICE || !n.Len.Type().IsUntyped()): + w.expr(n.Len) w.op(ir.OEND) } @@ -1465,26 +1465,26 @@ func (w *exportWriter) expr(n ir.Node) { n := n.(*ir.UnaryExpr) w.op(n.Op()) w.pos(n.Pos()) - w.expr(n.Left()) + w.expr(n.X) case ir.OADDR: n := n.(*ir.AddrExpr) w.op(n.Op()) w.pos(n.Pos()) - w.expr(n.Left()) + w.expr(n.X) case ir.ODEREF: n := n.(*ir.StarExpr) w.op(n.Op()) w.pos(n.Pos()) - w.expr(n.Left()) + w.expr(n.X) case ir.OSEND: n := n.(*ir.SendStmt) w.op(n.Op()) w.pos(n.Pos()) - w.expr(n.Left()) - w.expr(n.Right()) + w.expr(n.Chan) + w.expr(n.Value) // binary expressions case ir.OADD, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT, @@ -1492,21 +1492,21 @@ func (w *exportWriter) expr(n ir.Node) { n := n.(*ir.BinaryExpr) w.op(n.Op()) w.pos(n.Pos()) - w.expr(n.Left()) - w.expr(n.Right()) + w.expr(n.X) + w.expr(n.Y) case ir.OANDAND, ir.OOROR: n := n.(*ir.LogicalExpr) w.op(n.Op()) w.pos(n.Pos()) - w.expr(n.Left()) - w.expr(n.Right()) + w.expr(n.X) + w.expr(n.Y) case ir.OADDSTR: n := n.(*ir.AddStringExpr) w.op(ir.OADDSTR) w.pos(n.Pos()) - w.exprList(n.List()) + w.exprList(n.List) case ir.ODCLCONST: // if exporting, DCLCONST should just be removed as its usage @@ -1543,8 +1543,8 @@ func (w *exportWriter) fieldList(list ir.Nodes) { w.uint64(uint64(list.Len())) for _, n := range list.Slice() { n := n.(*ir.StructKeyExpr) - w.selector(n.Sym()) - w.expr(n.Left()) + w.selector(n.Field) + w.expr(n.Value) } } @@ -1557,7 +1557,7 @@ func (w *exportWriter) localName(n *ir.Name) { // PPARAM/PPARAMOUT, because we only want to include vargen in // non-param names. var v int32 - if n.Class() == ir.PAUTO || (n.Class() == ir.PAUTOHEAP && n.Name().Stackcopy == nil) { + if n.Class_ == ir.PAUTO || (n.Class_ == ir.PAUTOHEAP && n.Name().Stackcopy == nil) { v = n.Name().Vargen } diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 40f76cae7bb60..4f460d54a200a 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -329,7 +329,7 @@ func (r *importReader) doDecl(sym *types.Sym) *ir.Name { fn.SetType(mtyp) m := newFuncNameAt(mpos, methodSym(recv.Type, msym), fn) m.SetType(mtyp) - m.SetClass(ir.PFUNC) + m.Class_ = ir.PFUNC // methodSym already marked m.Sym as a function. f := types.NewField(mpos, msym, mtyp) @@ -643,10 +643,10 @@ func (r *importReader) funcExt(n *ir.Name) { // Inline body. if u := r.uint64(); u > 0 { - n.Func().Inl = &ir.Inline{ + n.Func.Inl = &ir.Inline{ Cost: int32(u - 1), } - n.Func().Endlineno = r.pos() + n.Func.Endlineno = r.pos() } } @@ -757,7 +757,7 @@ func (r *importReader) stmtList() []ir.Node { // Inline them into the statement list. if n.Op() == ir.OBLOCK { n := n.(*ir.BlockStmt) - list = append(list, n.List().Slice()...) + list = append(list, n.List.Slice()...) } else { list = append(list, n) } @@ -772,17 +772,17 @@ func (r *importReader) caseList(sw ir.Node) []ir.Node { cases := make([]ir.Node, r.uint64()) for i := range cases { cas := ir.NewCaseStmt(r.pos(), nil, nil) - cas.PtrList().Set(r.stmtList()) + cas.List.Set(r.stmtList()) if namedTypeSwitch { // Note: per-case variables will have distinct, dotted // names after import. That's okay: swt.go only needs // Sym for diagnostics anyway. caseVar := ir.NewNameAt(cas.Pos(), r.ident()) declare(caseVar, dclcontext) - cas.PtrRlist().Set1(caseVar) - caseVar.Defn = sw.(*ir.SwitchStmt).Left() + cas.Vars.Set1(caseVar) + caseVar.Defn = sw.(*ir.SwitchStmt).Tag } - cas.PtrBody().Set(r.stmtList()) + cas.Body.Set(r.stmtList()) cases[i] = cas } return cases @@ -867,7 +867,7 @@ func (r *importReader) node() ir.Node { savedlineno := base.Pos base.Pos = r.pos() n := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(r.typ()).(ir.Ntype), nil) - n.PtrList().Set(r.elemList()) // special handling of field names + n.List.Set(r.elemList()) // special handling of field names base.Pos = savedlineno return n @@ -876,7 +876,7 @@ func (r *importReader) node() ir.Node { case ir.OCOMPLIT: n := ir.NewCompLitExpr(r.pos(), ir.OCOMPLIT, ir.TypeNode(r.typ()).(ir.Ntype), nil) - n.PtrList().Set(r.exprList()) + n.List.Set(r.exprList()) return n case ir.OKEY: @@ -931,9 +931,9 @@ func (r *importReader) node() ir.Node { case ir.OCOPY, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCAP, ir.OCLOSE, ir.ODELETE, ir.OLEN, ir.OMAKE, ir.ONEW, ir.OPANIC, ir.ORECOVER, ir.OPRINT, ir.OPRINTN: n := builtinCall(r.pos(), op) - n.PtrList().Set(r.exprList()) + n.Args.Set(r.exprList()) if op == ir.OAPPEND { - n.SetIsDDD(r.bool()) + n.IsDDD = r.bool() } return n @@ -943,15 +943,15 @@ func (r *importReader) node() ir.Node { case ir.OCALL: n := ir.NewCallExpr(r.pos(), ir.OCALL, nil, nil) n.PtrInit().Set(r.stmtList()) - n.SetLeft(r.expr()) - n.PtrList().Set(r.exprList()) - n.SetIsDDD(r.bool()) + n.X = r.expr() + n.Args.Set(r.exprList()) + n.IsDDD = r.bool() return n case ir.OMAKEMAP, ir.OMAKECHAN, ir.OMAKESLICE: n := builtinCall(r.pos(), ir.OMAKE) - n.PtrList().Append(ir.TypeNode(r.typ())) - n.PtrList().Append(r.exprList()...) + n.Args.Append(ir.TypeNode(r.typ())) + n.Args.Append(r.exprList()...) return n // unary expressions @@ -1006,13 +1006,13 @@ func (r *importReader) node() ir.Node { case ir.OASOP: n := ir.NewAssignOpStmt(r.pos(), ir.OXXX, nil, nil) - n.SetSubOp(r.op()) - n.SetLeft(r.expr()) + n.AsOp = r.op() + n.X = r.expr() if !r.bool() { - n.SetRight(nodintconst(1)) - n.SetImplicit(true) + n.Y = nodintconst(1) + n.IncDec = true } else { - n.SetRight(r.expr()) + n.Y = r.expr() } return n @@ -1021,13 +1021,13 @@ func (r *importReader) node() ir.Node { case ir.OAS2: n := ir.NewAssignListStmt(r.pos(), ir.OAS2, nil, nil) - n.PtrList().Set(r.exprList()) - n.PtrRlist().Set(r.exprList()) + n.Lhs.Set(r.exprList()) + n.Rhs.Set(r.exprList()) return n case ir.ORETURN: n := ir.NewReturnStmt(r.pos(), nil) - n.PtrList().Set(r.exprList()) + n.Results.Set(r.exprList()) return n // case ORETJMP: @@ -1039,40 +1039,40 @@ func (r *importReader) node() ir.Node { case ir.OIF: n := ir.NewIfStmt(r.pos(), nil, nil, nil) n.PtrInit().Set(r.stmtList()) - n.SetLeft(r.expr()) - n.PtrBody().Set(r.stmtList()) - n.PtrRlist().Set(r.stmtList()) + n.Cond = r.expr() + n.Body.Set(r.stmtList()) + n.Else.Set(r.stmtList()) return n case ir.OFOR: n := ir.NewForStmt(r.pos(), nil, nil, nil, nil) n.PtrInit().Set(r.stmtList()) left, right := r.exprsOrNil() - n.SetLeft(left) - n.SetRight(right) - n.PtrBody().Set(r.stmtList()) + n.Cond = left + n.Post = right + n.Body.Set(r.stmtList()) return n case ir.ORANGE: n := ir.NewRangeStmt(r.pos(), nil, nil, nil) - n.PtrList().Set(r.stmtList()) - n.SetRight(r.expr()) - n.PtrBody().Set(r.stmtList()) + n.Vars.Set(r.stmtList()) + n.X = r.expr() + n.Body.Set(r.stmtList()) return n case ir.OSELECT: n := ir.NewSelectStmt(r.pos(), nil) n.PtrInit().Set(r.stmtList()) r.exprsOrNil() // TODO(rsc): Delete (and fix exporter). These are always nil. - n.PtrList().Set(r.caseList(n)) + n.Cases.Set(r.caseList(n)) return n case ir.OSWITCH: n := ir.NewSwitchStmt(r.pos(), nil, nil) n.PtrInit().Set(r.stmtList()) left, _ := r.exprsOrNil() - n.SetLeft(left) - n.PtrList().Set(r.caseList(n)) + n.Tag = left + n.Cases.Set(r.caseList(n)) return n // case OCASE: diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go index 1c15ce131838d..fbc88411cc90c 100644 --- a/src/cmd/compile/internal/gc/init.go +++ b/src/cmd/compile/internal/gc/init.go @@ -45,7 +45,7 @@ func fninit() *ir.Name { if n.Op() == ir.ONONAME { continue } - if n.Op() != ir.ONAME || n.(*ir.Name).Class() != ir.PEXTERN { + if n.Op() != ir.ONAME || n.(*ir.Name).Class_ != ir.PEXTERN { base.Fatalf("bad inittask: %v", n) } deps = append(deps, n.(*ir.Name).Sym().Linksym()) @@ -62,7 +62,7 @@ func fninit() *ir.Name { fn.Dcl = append(fn.Dcl, initTodo.Dcl...) initTodo.Dcl = nil - fn.PtrBody().Set(nf) + fn.Body.Set(nf) funcbody() typecheckFunc(fn) @@ -83,8 +83,8 @@ func fninit() *ir.Name { // Record user init functions. for _, fn := range Target.Inits { // Skip init functions with empty bodies. - if fn.Body().Len() == 1 { - if stmt := fn.Body().First(); stmt.Op() == ir.OBLOCK && stmt.(*ir.BlockStmt).List().Len() == 0 { + if fn.Body.Len() == 1 { + if stmt := fn.Body.First(); stmt.Op() == ir.OBLOCK && stmt.(*ir.BlockStmt).List.Len() == 0 { continue } } @@ -99,7 +99,7 @@ func fninit() *ir.Name { sym := lookup(".inittask") task := NewName(sym) task.SetType(types.Types[types.TUINT8]) // fake type - task.SetClass(ir.PEXTERN) + task.Class_ = ir.PEXTERN sym.Def = task lsym := sym.Linksym() ot := 0 diff --git a/src/cmd/compile/internal/gc/initorder.go b/src/cmd/compile/internal/gc/initorder.go index f99c6dd72c991..ec3d7be45fd4d 100644 --- a/src/cmd/compile/internal/gc/initorder.go +++ b/src/cmd/compile/internal/gc/initorder.go @@ -139,7 +139,7 @@ func (o *InitOrder) processAssign(n ir.Node) { defn := dep.Defn // Skip dependencies on functions (PFUNC) and // variables already initialized (InitDone). - if dep.Class() != ir.PEXTERN || o.order[defn] == orderDone { + if dep.Class_ != ir.PEXTERN || o.order[defn] == orderDone { continue } o.order[n]++ @@ -203,7 +203,7 @@ func (o *InitOrder) findInitLoopAndExit(n *ir.Name, path *[]*ir.Name) { *path = append(*path, n) for _, ref := range refers { // Short-circuit variables that were initialized. - if ref.Class() == ir.PEXTERN && o.order[ref.Defn] == orderDone { + if ref.Class_ == ir.PEXTERN && o.order[ref.Defn] == orderDone { continue } @@ -220,7 +220,7 @@ func reportInitLoopAndExit(l []*ir.Name) { // the start. i := -1 for j, n := range l { - if n.Class() == ir.PEXTERN && (i == -1 || n.Pos().Before(l[i].Pos())) { + if n.Class_ == ir.PEXTERN && (i == -1 || n.Pos().Before(l[i].Pos())) { i = j } } @@ -255,13 +255,13 @@ func collectDeps(n ir.Node, transitive bool) ir.NameSet { switch n.Op() { case ir.OAS: n := n.(*ir.AssignStmt) - d.inspect(n.Right()) + d.inspect(n.Y) case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV: n := n.(*ir.AssignListStmt) - d.inspect(n.Rlist().First()) + d.inspect(n.Rhs.First()) case ir.ODCLFUNC: n := n.(*ir.Func) - d.inspectList(n.Body()) + d.inspectList(n.Body) default: base.Fatalf("unexpected Op: %v", n.Op()) } @@ -294,14 +294,14 @@ func (d *initDeps) visit(n ir.Node) { case ir.ONAME: n := n.(*ir.Name) - switch n.Class() { + switch n.Class_ { case ir.PEXTERN, ir.PFUNC: d.foundDep(n) } case ir.OCLOSURE: n := n.(*ir.ClosureExpr) - d.inspectList(n.Func().Body()) + d.inspectList(n.Func.Body) case ir.ODOTMETH, ir.OCALLPART: d.foundDep(methodExprName(n)) @@ -327,8 +327,8 @@ func (d *initDeps) foundDep(n *ir.Name) { return } d.seen.Add(n) - if d.transitive && n.Class() == ir.PFUNC { - d.inspectList(n.Defn.(*ir.Func).Body()) + if d.transitive && n.Class_ == ir.PFUNC { + d.inspectList(n.Defn.(*ir.Func).Body) } } @@ -360,10 +360,10 @@ func firstLHS(n ir.Node) *ir.Name { switch n.Op() { case ir.OAS: n := n.(*ir.AssignStmt) - return n.Left().Name() + return n.X.Name() case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2RECV, ir.OAS2MAPR: n := n.(*ir.AssignListStmt) - return n.List().First().Name() + return n.Lhs.First().Name() } base.Fatalf("unexpected Op: %v", n.Op()) diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 7cb79468065a9..edb2c5bb4207b 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -196,7 +196,7 @@ func caninl(fn *ir.Func) { } // If fn has no body (is defined outside of Go), cannot inline it. - if fn.Body().Len() == 0 { + if fn.Body.Len() == 0 { reason = "no function body" return } @@ -206,10 +206,10 @@ func caninl(fn *ir.Func) { } n := fn.Nname - if n.Func().InlinabilityChecked() { + if n.Func.InlinabilityChecked() { return } - defer n.Func().SetInlinabilityChecked(true) + defer n.Func.SetInlinabilityChecked(true) cc := int32(inlineExtraCallCost) if base.Flag.LowerL == 4 { @@ -235,14 +235,14 @@ func caninl(fn *ir.Func) { return } - n.Func().Inl = &ir.Inline{ + n.Func.Inl = &ir.Inline{ Cost: inlineMaxBudget - visitor.budget, - Dcl: pruneUnusedAutos(n.Defn.(*ir.Func).Func().Dcl, &visitor), - Body: ir.DeepCopyList(src.NoXPos, fn.Body().Slice()), + Dcl: pruneUnusedAutos(n.Defn.(*ir.Func).Dcl, &visitor), + Body: ir.DeepCopyList(src.NoXPos, fn.Body.Slice()), } if base.Flag.LowerM > 1 { - fmt.Printf("%v: can inline %v with cost %d as: %v { %v }\n", ir.Line(fn), n, inlineMaxBudget-visitor.budget, fn.Type(), ir.AsNodes(n.Func().Inl.Body)) + fmt.Printf("%v: can inline %v with cost %d as: %v { %v }\n", ir.Line(fn), n, inlineMaxBudget-visitor.budget, fn.Type(), ir.AsNodes(n.Func.Inl.Body)) } else if base.Flag.LowerM != 0 { fmt.Printf("%v: can inline %v\n", ir.Line(fn), n) } @@ -257,10 +257,10 @@ func inlFlood(n *ir.Name, exportsym func(*ir.Name)) { if n == nil { return } - if n.Op() != ir.ONAME || n.Class() != ir.PFUNC { - base.Fatalf("inlFlood: unexpected %v, %v, %v", n, n.Op(), n.Class()) + if n.Op() != ir.ONAME || n.Class_ != ir.PFUNC { + base.Fatalf("inlFlood: unexpected %v, %v, %v", n, n.Op(), n.Class_) } - fn := n.Func() + fn := n.Func if fn == nil { base.Fatalf("inlFlood: missing Func on %v", n) } @@ -285,7 +285,7 @@ func inlFlood(n *ir.Name, exportsym func(*ir.Name)) { case ir.ONAME: n := n.(*ir.Name) - switch n.Class() { + switch n.Class_ { case ir.PFUNC: inlFlood(n, exportsym) exportsym(n) @@ -348,9 +348,9 @@ func (v *hairyVisitor) doNode(n ir.Node) error { // because getcaller{pc,sp} expect a pointer to the caller's first argument. // // runtime.throw is a "cheap call" like panic in normal code. - if n.Left().Op() == ir.ONAME { - name := n.Left().(*ir.Name) - if name.Class() == ir.PFUNC && isRuntimePkg(name.Sym().Pkg) { + if n.X.Op() == ir.ONAME { + name := n.X.(*ir.Name) + if name.Class_ == ir.PFUNC && isRuntimePkg(name.Sym().Pkg) { fn := name.Sym().Name if fn == "getcallerpc" || fn == "getcallersp" { return errors.New("call to " + fn) @@ -367,7 +367,7 @@ func (v *hairyVisitor) doNode(n ir.Node) error { break } - if fn := inlCallee(n.Left()); fn != nil && fn.Inl != nil { + if fn := inlCallee(n.X); fn != nil && fn.Inl != nil { v.budget -= fn.Inl.Cost break } @@ -378,12 +378,12 @@ func (v *hairyVisitor) doNode(n ir.Node) error { // Call is okay if inlinable and we have the budget for the body. case ir.OCALLMETH: n := n.(*ir.CallExpr) - t := n.Left().Type() + t := n.X.Type() if t == nil { - base.Fatalf("no function type for [%p] %+v\n", n.Left(), n.Left()) + base.Fatalf("no function type for [%p] %+v\n", n.X, n.X) } - if isRuntimePkg(n.Left().Sym().Pkg) { - fn := n.Left().Sym().Name + if isRuntimePkg(n.X.Sym().Pkg) { + fn := n.X.Sym().Name if fn == "heapBits.nextArena" { // Special case: explicitly allow // mid-stack inlining of @@ -393,7 +393,7 @@ func (v *hairyVisitor) doNode(n ir.Node) error { break } } - if inlfn := methodExprName(n.Left()).Func(); inlfn.Inl != nil { + if inlfn := methodExprName(n.X).Func; inlfn.Inl != nil { v.budget -= inlfn.Inl.Cost break } @@ -431,35 +431,35 @@ func (v *hairyVisitor) doNode(n ir.Node) error { case ir.OFOR, ir.OFORUNTIL: n := n.(*ir.ForStmt) - if n.Sym() != nil { + if n.Label != nil { return errors.New("labeled control") } case ir.OSWITCH: n := n.(*ir.SwitchStmt) - if n.Sym() != nil { + if n.Label != nil { return errors.New("labeled control") } // case ir.ORANGE, ir.OSELECT in "unhandled" above case ir.OBREAK, ir.OCONTINUE: n := n.(*ir.BranchStmt) - if n.Sym() != nil { + if n.Label != nil { // Should have short-circuited due to labeled control error above. base.Fatalf("unexpected labeled break/continue: %v", n) } case ir.OIF: n := n.(*ir.IfStmt) - if ir.IsConst(n.Left(), constant.Bool) { + if ir.IsConst(n.Cond, constant.Bool) { // This if and the condition cost nothing. // TODO(rsc): It seems strange that we visit the dead branch. if err := ir.DoList(n.Init(), v.do); err != nil { return err } - if err := ir.DoList(n.Body(), v.do); err != nil { + if err := ir.DoList(n.Body, v.do); err != nil { return err } - if err := ir.DoList(n.Rlist(), v.do); err != nil { + if err := ir.DoList(n.Else, v.do); err != nil { return err } return nil @@ -467,7 +467,7 @@ func (v *hairyVisitor) doNode(n ir.Node) error { case ir.ONAME: n := n.(*ir.Name) - if n.Class() == ir.PAUTO { + if n.Class_ == ir.PAUTO { v.usedLocals[n] = true } @@ -526,8 +526,8 @@ func inlcalls(fn *ir.Func) { // Turn an OINLCALL into a statement. func inlconv2stmt(inlcall *ir.InlinedCallExpr) ir.Node { n := ir.NewBlockStmt(inlcall.Pos(), nil) - n.SetList(inlcall.Init()) - n.PtrList().AppendNodes(inlcall.PtrBody()) + n.List = inlcall.Init() + n.List.AppendNodes(&inlcall.Body) return n } @@ -535,8 +535,8 @@ func inlconv2stmt(inlcall *ir.InlinedCallExpr) ir.Node { // The result of inlconv2expr MUST be assigned back to n, e.g. // n.Left = inlconv2expr(n.Left) func inlconv2expr(n *ir.InlinedCallExpr) ir.Node { - r := n.Rlist().First() - return initExpr(append(n.Init().Slice(), n.Body().Slice()...), r) + r := n.ReturnVars.First() + return initExpr(append(n.Init().Slice(), n.Body.Slice()...), r) } // Turn the rlist (with the return values) of the OINLCALL in @@ -545,12 +545,12 @@ func inlconv2expr(n *ir.InlinedCallExpr) ir.Node { // order will be preserved. Used in return, oas2func and call // statements. func inlconv2list(n *ir.InlinedCallExpr) []ir.Node { - if n.Op() != ir.OINLCALL || n.Rlist().Len() == 0 { + if n.Op() != ir.OINLCALL || n.ReturnVars.Len() == 0 { base.Fatalf("inlconv2list %+v\n", n) } - s := n.Rlist().Slice() - s[0] = initExpr(append(n.Init().Slice(), n.Body().Slice()...), s[0]) + s := n.ReturnVars.Slice() + s[0] = initExpr(append(n.Init().Slice(), n.Body.Slice()...), s[0]) return s } @@ -575,10 +575,10 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No switch n.Op() { case ir.ODEFER, ir.OGO: n := n.(*ir.GoDeferStmt) - switch call := n.Left(); call.Op() { + switch call := n.Call; call.Op() { case ir.OCALLFUNC, ir.OCALLMETH: call := call.(*ir.CallExpr) - call.SetNoInline(true) + call.NoInline = true } // TODO do them here (or earlier), @@ -589,7 +589,7 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No // Prevent inlining some reflect.Value methods when using checkptr, // even when package reflect was compiled without it (#35073). n := n.(*ir.CallExpr) - if s := n.Left().Sym(); base.Debug.Checkptr != 0 && isReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") { + if s := n.X.Sym(); base.Debug.Checkptr != 0 && isReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") { return n } } @@ -600,8 +600,8 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No if as := n; as.Op() == ir.OAS2FUNC { as := as.(*ir.AssignListStmt) - if as.Rlist().First().Op() == ir.OINLCALL { - as.PtrRlist().Set(inlconv2list(as.Rlist().First().(*ir.InlinedCallExpr))) + if as.Rhs.First().Op() == ir.OINLCALL { + as.Rhs.Set(inlconv2list(as.Rhs.First().(*ir.InlinedCallExpr))) as.SetOp(ir.OAS2) as.SetTypecheck(0) n = typecheck(as, ctxStmt) @@ -614,7 +614,7 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No switch n.Op() { case ir.OCALLFUNC, ir.OCALLMETH: n := n.(*ir.CallExpr) - if n.NoInline() { + if n.NoInline { return n } } @@ -624,27 +624,27 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No case ir.OCALLFUNC: call = n.(*ir.CallExpr) if base.Flag.LowerM > 3 { - fmt.Printf("%v:call to func %+v\n", ir.Line(n), call.Left()) + fmt.Printf("%v:call to func %+v\n", ir.Line(n), call.X) } if IsIntrinsicCall(call) { break } - if fn := inlCallee(call.Left()); fn != nil && fn.Inl != nil { + if fn := inlCallee(call.X); fn != nil && fn.Inl != nil { n = mkinlcall(call, fn, maxCost, inlMap, edit) } case ir.OCALLMETH: call = n.(*ir.CallExpr) if base.Flag.LowerM > 3 { - fmt.Printf("%v:call to meth %v\n", ir.Line(n), call.Left().(*ir.SelectorExpr).Sel) + fmt.Printf("%v:call to meth %v\n", ir.Line(n), call.X.(*ir.SelectorExpr).Sel) } // typecheck should have resolved ODOTMETH->type, whose nname points to the actual function. - if call.Left().Type() == nil { - base.Fatalf("no function type for [%p] %+v\n", call.Left(), call.Left()) + if call.X.Type() == nil { + base.Fatalf("no function type for [%p] %+v\n", call.X, call.X) } - n = mkinlcall(call, methodExprName(call.Left()).Func(), maxCost, inlMap, edit) + n = mkinlcall(call, methodExprName(call.X).Func, maxCost, inlMap, edit) } base.Pos = lno @@ -681,15 +681,15 @@ func inlCallee(fn ir.Node) *ir.Func { if n == nil || !types.Identical(n.Type().Recv().Type, fn.T) { return nil } - return n.Func() + return n.Func case ir.ONAME: fn := fn.(*ir.Name) - if fn.Class() == ir.PFUNC { - return fn.Func() + if fn.Class_ == ir.PFUNC { + return fn.Func } case ir.OCLOSURE: fn := fn.(*ir.ClosureExpr) - c := fn.Func() + c := fn.Func caninl(c) return c } @@ -699,7 +699,7 @@ func inlCallee(fn ir.Node) *ir.Func { func staticValue(n ir.Node) ir.Node { for { if n.Op() == ir.OCONVNOP { - n = n.(*ir.ConvExpr).Left() + n = n.(*ir.ConvExpr).X continue } @@ -719,7 +719,7 @@ func staticValue1(nn ir.Node) ir.Node { return nil } n := nn.(*ir.Name) - if n.Class() != ir.PAUTO || n.Name().Addrtaken() { + if n.Class_ != ir.PAUTO || n.Name().Addrtaken() { return nil } @@ -733,12 +733,12 @@ FindRHS: switch defn.Op() { case ir.OAS: defn := defn.(*ir.AssignStmt) - rhs = defn.Right() + rhs = defn.Y case ir.OAS2: defn := defn.(*ir.AssignListStmt) - for i, lhs := range defn.List().Slice() { + for i, lhs := range defn.Lhs.Slice() { if lhs == n { - rhs = defn.Rlist().Index(i) + rhs = defn.Rhs.Index(i) break FindRHS } } @@ -775,12 +775,12 @@ func reassigned(name *ir.Name) bool { switch n.Op() { case ir.OAS: n := n.(*ir.AssignStmt) - if n.Left() == name && n != name.Defn { + if n.X == name && n != name.Defn { return true } case ir.OAS2, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2DOTTYPE, ir.OAS2RECV, ir.OSELRECV2: n := n.(*ir.AssignListStmt) - for _, p := range n.List().Slice() { + for _, p := range n.Lhs.Slice() { if p == name && n != name.Defn { return true } @@ -887,11 +887,11 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b // inlconv2expr or inlconv2list). Make sure to preserve these, // if necessary (#42703). if n.Op() == ir.OCALLFUNC { - callee := n.Left() + callee := n.X for callee.Op() == ir.OCONVNOP { conv := callee.(*ir.ConvExpr) ninit.AppendNodes(conv.PtrInit()) - callee = conv.Left() + callee = conv.X } if callee.Op() != ir.ONAME && callee.Op() != ir.OCLOSURE && callee.Op() != ir.OMETHEXPR { base.Fatalf("unexpected callee expression: %v", callee) @@ -944,7 +944,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b if ln.Op() != ir.ONAME { continue } - if ln.Class() == ir.PPARAMOUT { // return values handled below. + if ln.Class_ == ir.PPARAMOUT { // return values handled below. continue } if isParamStackCopy(ln) { // ignore the on-stack copy of a parameter that moved to the heap @@ -957,7 +957,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b inlf := typecheck(inlvar(ln), ctxExpr) inlvars[ln] = inlf if base.Flag.GenDwarfInl > 0 { - if ln.Class() == ir.PPARAM { + if ln.Class_ == ir.PPARAM { inlf.Name().SetInlFormal(true) } else { inlf.Name().SetInlLocal(true) @@ -1010,54 +1010,54 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b // Assign arguments to the parameters' temp names. as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) - as.SetColas(true) + as.Def = true if n.Op() == ir.OCALLMETH { - sel := n.Left().(*ir.SelectorExpr) - if sel.Left() == nil { + sel := n.X.(*ir.SelectorExpr) + if sel.X == nil { base.Fatalf("method call without receiver: %+v", n) } - as.PtrRlist().Append(sel.Left()) + as.Rhs.Append(sel.X) } - as.PtrRlist().Append(n.List().Slice()...) + as.Rhs.Append(n.Args.Slice()...) // For non-dotted calls to variadic functions, we assign the // variadic parameter's temp name separately. var vas *ir.AssignStmt if recv := fn.Type().Recv(); recv != nil { - as.PtrList().Append(inlParam(recv, as, inlvars)) + as.Lhs.Append(inlParam(recv, as, inlvars)) } for _, param := range fn.Type().Params().Fields().Slice() { // For ordinary parameters or variadic parameters in // dotted calls, just add the variable to the // assignment list, and we're done. - if !param.IsDDD() || n.IsDDD() { - as.PtrList().Append(inlParam(param, as, inlvars)) + if !param.IsDDD() || n.IsDDD { + as.Lhs.Append(inlParam(param, as, inlvars)) continue } // Otherwise, we need to collect the remaining values // to pass as a slice. - x := as.List().Len() - for as.List().Len() < as.Rlist().Len() { - as.PtrList().Append(argvar(param.Type, as.List().Len())) + x := as.Lhs.Len() + for as.Lhs.Len() < as.Rhs.Len() { + as.Lhs.Append(argvar(param.Type, as.Lhs.Len())) } - varargs := as.List().Slice()[x:] + varargs := as.Lhs.Slice()[x:] vas = ir.NewAssignStmt(base.Pos, nil, nil) - vas.SetLeft(inlParam(param, vas, inlvars)) + vas.X = inlParam(param, vas, inlvars) if len(varargs) == 0 { - vas.SetRight(nodnil()) - vas.Right().SetType(param.Type) + vas.Y = nodnil() + vas.Y.SetType(param.Type) } else { lit := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(param.Type).(ir.Ntype), nil) - lit.PtrList().Set(varargs) - vas.SetRight(lit) + lit.List.Set(varargs) + vas.Y = lit } } - if as.Rlist().Len() != 0 { + if as.Rhs.Len() != 0 { ninit.Append(typecheck(as, ctxStmt)) } @@ -1093,7 +1093,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b // Note issue 28603. inlMark := ir.NewInlineMarkStmt(base.Pos, types.BADWIDTH) inlMark.SetPos(n.Pos().WithIsStmt()) - inlMark.SetOffset(int64(newIndex)) + inlMark.Index = int64(newIndex) ninit.Append(inlMark) if base.Flag.GenDwarfInl > 0 { @@ -1130,8 +1130,8 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b call := ir.NewInlinedCallExpr(base.Pos, nil, nil) call.PtrInit().Set(ninit.Slice()) - call.PtrBody().Set(body) - call.PtrRlist().Set(retvars) + call.Body.Set(body) + call.ReturnVars.Set(retvars) call.SetType(n.Type()) call.SetTypecheck(1) @@ -1160,7 +1160,7 @@ func inlvar(var_ ir.Node) ir.Node { n := NewName(var_.Sym()) n.SetType(var_.Type()) - n.SetClass(ir.PAUTO) + n.Class_ = ir.PAUTO n.SetUsed(true) n.Curfn = Curfn // the calling function, not the called one n.SetAddrtaken(var_.Name().Addrtaken()) @@ -1173,7 +1173,7 @@ func inlvar(var_ ir.Node) ir.Node { func retvar(t *types.Field, i int) ir.Node { n := NewName(lookupN("~R", i)) n.SetType(t.Type) - n.SetClass(ir.PAUTO) + n.Class_ = ir.PAUTO n.SetUsed(true) n.Curfn = Curfn // the calling function, not the called one Curfn.Dcl = append(Curfn.Dcl, n) @@ -1185,7 +1185,7 @@ func retvar(t *types.Field, i int) ir.Node { func argvar(t *types.Type, i int) ir.Node { n := NewName(lookupN("~arg", i)) n.SetType(t.Elem()) - n.SetClass(ir.PAUTO) + n.Class_ = ir.PAUTO n.SetUsed(true) n.Curfn = Curfn // the calling function, not the called one Curfn.Dcl = append(Curfn.Dcl, n) @@ -1277,19 +1277,19 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { // this return is guaranteed to belong to the current inlined function. n := n.(*ir.ReturnStmt) init := subst.list(n.Init()) - if len(subst.retvars) != 0 && n.List().Len() != 0 { + if len(subst.retvars) != 0 && n.Results.Len() != 0 { as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) // Make a shallow copy of retvars. // Otherwise OINLCALL.Rlist will be the same list, // and later walk and typecheck may clobber it. for _, n := range subst.retvars { - as.PtrList().Append(n) + as.Lhs.Append(n) } - as.PtrRlist().Set(subst.list(n.List())) + as.Rhs.Set(subst.list(n.Results)) if subst.delayretvars { - for _, n := range as.List().Slice() { + for _, n := range as.Lhs.Slice() { as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, n)) n.Name().Defn = as } @@ -1306,8 +1306,8 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { m := ir.Copy(n).(*ir.BranchStmt) m.SetPos(subst.updatedPos(m.Pos())) m.PtrInit().Set(nil) - p := fmt.Sprintf("%s·%d", n.Sym().Name, inlgen) - m.SetSym(lookup(p)) + p := fmt.Sprintf("%s·%d", n.Label.Name, inlgen) + m.Label = lookup(p) return m case ir.OLABEL: @@ -1315,8 +1315,8 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { m := ir.Copy(n).(*ir.LabelStmt) m.SetPos(subst.updatedPos(m.Pos())) m.PtrInit().Set(nil) - p := fmt.Sprintf("%s·%d", n.Sym().Name, inlgen) - m.SetSym(lookup(p)) + p := fmt.Sprintf("%s·%d", n.Label.Name, inlgen) + m.Label = lookup(p) return m } @@ -1345,7 +1345,7 @@ func (subst *inlsubst) updatedPos(xpos src.XPos) src.XPos { func pruneUnusedAutos(ll []*ir.Name, vis *hairyVisitor) []*ir.Name { s := make([]*ir.Name, 0, len(ll)) for _, n := range ll { - if n.Class() == ir.PAUTO { + if n.Class_ == ir.PAUTO { if _, found := vis.usedLocals[n]; !found { continue } @@ -1359,7 +1359,7 @@ func pruneUnusedAutos(ll []*ir.Name, vis *hairyVisitor) []*ir.Name { // concrete-type method calls where applicable. func devirtualize(fn *ir.Func) { Curfn = fn - ir.VisitList(fn.Body(), func(n ir.Node) { + ir.VisitList(fn.Body, func(n ir.Node) { if n.Op() == ir.OCALLINTER { devirtualizeCall(n.(*ir.CallExpr)) } @@ -1367,21 +1367,21 @@ func devirtualize(fn *ir.Func) { } func devirtualizeCall(call *ir.CallExpr) { - sel := call.Left().(*ir.SelectorExpr) - r := staticValue(sel.Left()) + sel := call.X.(*ir.SelectorExpr) + r := staticValue(sel.X) if r.Op() != ir.OCONVIFACE { return } recv := r.(*ir.ConvExpr) - typ := recv.Left().Type() + typ := recv.X.Type() if typ.IsInterface() { return } - dt := ir.NewTypeAssertExpr(sel.Pos(), sel.Left(), nil) + dt := ir.NewTypeAssertExpr(sel.Pos(), sel.X, nil) dt.SetType(typ) - x := typecheck(ir.NewSelectorExpr(sel.Pos(), ir.OXDOT, dt, sel.Sym()), ctxExpr|ctxCallee) + x := typecheck(ir.NewSelectorExpr(sel.Pos(), ir.OXDOT, dt, sel.Sel), ctxExpr|ctxCallee) switch x.Op() { case ir.ODOTMETH: x := x.(*ir.SelectorExpr) @@ -1389,7 +1389,7 @@ func devirtualizeCall(call *ir.CallExpr) { base.WarnfAt(call.Pos(), "devirtualizing %v to %v", sel, typ) } call.SetOp(ir.OCALLMETH) - call.SetLeft(x) + call.X = x case ir.ODOTINTER: // Promoted method from embedded interface-typed field (#42279). x := x.(*ir.SelectorExpr) @@ -1397,7 +1397,7 @@ func devirtualizeCall(call *ir.CallExpr) { base.WarnfAt(call.Pos(), "partially devirtualizing %v to %v", sel, typ) } call.SetOp(ir.OCALLINTER) - call.SetLeft(x) + call.X = x default: // TODO(mdempsky): Turn back into Fatalf after more testing. if base.Flag.LowerM != 0 { diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 94b4e0e674e82..c1cc7ed377d33 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -272,7 +272,7 @@ func Main(archInit func(*Arch)) { for _, n := range Target.Decls { if n.Op() == ir.ODCLFUNC { n := n.(*ir.Func) - if n.Func().OClosure != nil { + if n.OClosure != nil { Curfn = n transformclosure(n) } diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 4b7a22e6548f0..728c4b13167e1 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -167,7 +167,7 @@ func (p *noder) funcBody(fn *ir.Func, block *syntax.BlockStmt) { if body == nil { body = []ir.Node{ir.NewBlockStmt(base.Pos, nil)} } - fn.PtrBody().Set(body) + fn.Body.Set(body) base.Pos = p.makeXPos(block.Rbrace) fn.Endlineno = base.Pos @@ -650,13 +650,13 @@ func (p *noder) expr(expr syntax.Expr) ir.Node { case *syntax.CompositeLit: n := ir.NewCompLitExpr(p.pos(expr), ir.OCOMPLIT, nil, nil) if expr.Type != nil { - n.SetRight(p.expr(expr.Type)) + n.Ntype = ir.Node(p.expr(expr.Type)).(ir.Ntype) } l := p.exprs(expr.ElemList) for i, e := range l { l[i] = p.wrapname(expr.ElemList[i], e) } - n.PtrList().Set(l) + n.List.Set(l) base.Pos = p.makeXPos(expr.Rbrace) return n case *syntax.KeyValueExpr: @@ -719,8 +719,8 @@ func (p *noder) expr(expr syntax.Expr) ir.Node { return ir.NewBinaryExpr(pos, op, x, y) case *syntax.CallExpr: n := ir.NewCallExpr(p.pos(expr), ir.OCALL, p.expr(expr.Fun), nil) - n.PtrList().Set(p.exprs(expr.ArgList)) - n.SetIsDDD(expr.HasDots) + n.Args.Set(p.exprs(expr.ArgList)) + n.IsDDD = expr.HasDots return n case *syntax.ArrayType: @@ -968,10 +968,10 @@ func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []ir.Node { for i, stmt := range stmts { s := p.stmtFall(stmt, fallOK && i+1 == len(stmts)) if s == nil { - } else if s.Op() == ir.OBLOCK && s.(*ir.BlockStmt).List().Len() > 0 { + } else if s.Op() == ir.OBLOCK && s.(*ir.BlockStmt).List.Len() > 0 { // Inline non-empty block. // Empty blocks must be preserved for checkreturn. - nodes = append(nodes, s.(*ir.BlockStmt).List().Slice()...) + nodes = append(nodes, s.(*ir.BlockStmt).List.Slice()...) } else { nodes = append(nodes, s) } @@ -1006,23 +1006,23 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node { case *syntax.AssignStmt: if stmt.Op != 0 && stmt.Op != syntax.Def { n := ir.NewAssignOpStmt(p.pos(stmt), p.binOp(stmt.Op), p.expr(stmt.Lhs), p.expr(stmt.Rhs)) - n.SetImplicit(stmt.Rhs == syntax.ImplicitOne) + n.IncDec = stmt.Rhs == syntax.ImplicitOne return n } rhs := p.exprList(stmt.Rhs) if list, ok := stmt.Lhs.(*syntax.ListExpr); ok && len(list.ElemList) != 1 || len(rhs) != 1 { n := ir.NewAssignListStmt(p.pos(stmt), ir.OAS2, nil, nil) - n.SetColas(stmt.Op == syntax.Def) - n.PtrList().Set(p.assignList(stmt.Lhs, n, n.Colas())) - n.PtrRlist().Set(rhs) + n.Def = stmt.Op == syntax.Def + n.Lhs.Set(p.assignList(stmt.Lhs, n, n.Def)) + n.Rhs.Set(rhs) return n } n := ir.NewAssignStmt(p.pos(stmt), nil, nil) - n.SetColas(stmt.Op == syntax.Def) - n.SetLeft(p.assignList(stmt.Lhs, n, n.Colas())[0]) - n.SetRight(rhs[0]) + n.Def = stmt.Op == syntax.Def + n.X = p.assignList(stmt.Lhs, n, n.Def)[0] + n.Y = rhs[0] return n case *syntax.BranchStmt: @@ -1064,13 +1064,13 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node { results = p.exprList(stmt.Results) } n := ir.NewReturnStmt(p.pos(stmt), nil) - n.PtrList().Set(results) - if n.List().Len() == 0 && Curfn != nil { + n.Results.Set(results) + if n.Results.Len() == 0 && Curfn != nil { for _, ln := range Curfn.Dcl { - if ln.Class() == ir.PPARAM { + if ln.Class_ == ir.PPARAM { continue } - if ln.Class() != ir.PPARAMOUT { + if ln.Class_ != ir.PPARAMOUT { break } if ln.Sym().Def != ln { @@ -1163,16 +1163,16 @@ func (p *noder) ifStmt(stmt *syntax.IfStmt) ir.Node { n.PtrInit().Set1(p.stmt(stmt.Init)) } if stmt.Cond != nil { - n.SetLeft(p.expr(stmt.Cond)) + n.Cond = p.expr(stmt.Cond) } - n.PtrBody().Set(p.blockStmt(stmt.Then)) + n.Body.Set(p.blockStmt(stmt.Then)) if stmt.Else != nil { e := p.stmt(stmt.Else) if e.Op() == ir.OBLOCK { e := e.(*ir.BlockStmt) - n.PtrRlist().Set(e.List().Slice()) + n.Else.Set(e.List.Slice()) } else { - n.PtrRlist().Set1(e) + n.Else.Set1(e) } } p.closeAnotherScope() @@ -1188,10 +1188,10 @@ func (p *noder) forStmt(stmt *syntax.ForStmt) ir.Node { n := ir.NewRangeStmt(p.pos(r), nil, p.expr(r.X), nil) if r.Lhs != nil { - n.SetColas(r.Def) - n.PtrList().Set(p.assignList(r.Lhs, n, n.Colas())) + n.Def = r.Def + n.Vars.Set(p.assignList(r.Lhs, n, n.Def)) } - n.PtrBody().Set(p.blockStmt(stmt.Body)) + n.Body.Set(p.blockStmt(stmt.Body)) p.closeAnotherScope() return n } @@ -1201,12 +1201,12 @@ func (p *noder) forStmt(stmt *syntax.ForStmt) ir.Node { n.PtrInit().Set1(p.stmt(stmt.Init)) } if stmt.Cond != nil { - n.SetLeft(p.expr(stmt.Cond)) + n.Cond = p.expr(stmt.Cond) } if stmt.Post != nil { - n.SetRight(p.stmt(stmt.Post)) + n.Post = p.stmt(stmt.Post) } - n.PtrBody().Set(p.blockStmt(stmt.Body)) + n.Body.Set(p.blockStmt(stmt.Body)) p.closeAnotherScope() return n } @@ -1218,14 +1218,14 @@ func (p *noder) switchStmt(stmt *syntax.SwitchStmt) ir.Node { n.PtrInit().Set1(p.stmt(stmt.Init)) } if stmt.Tag != nil { - n.SetLeft(p.expr(stmt.Tag)) + n.Tag = p.expr(stmt.Tag) } var tswitch *ir.TypeSwitchGuard - if l := n.Left(); l != nil && l.Op() == ir.OTYPESW { + if l := n.Tag; l != nil && l.Op() == ir.OTYPESW { tswitch = l.(*ir.TypeSwitchGuard) } - n.PtrList().Set(p.caseClauses(stmt.Body, tswitch, stmt.Rbrace)) + n.Cases.Set(p.caseClauses(stmt.Body, tswitch, stmt.Rbrace)) p.closeScope(stmt.Rbrace) return n @@ -1242,12 +1242,12 @@ func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.TypeSwitch n := ir.NewCaseStmt(p.pos(clause), nil, nil) if clause.Cases != nil { - n.PtrList().Set(p.exprList(clause.Cases)) + n.List.Set(p.exprList(clause.Cases)) } - if tswitch != nil && tswitch.Left() != nil { - nn := NewName(tswitch.Left().Sym()) + if tswitch != nil && tswitch.Tag != nil { + nn := NewName(tswitch.Tag.Sym()) declare(nn, dclcontext) - n.PtrRlist().Set1(nn) + n.Vars.Set1(nn) // keep track of the instances for reporting unused nn.Defn = tswitch } @@ -1263,8 +1263,8 @@ func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.TypeSwitch body = body[:len(body)-1] } - n.PtrBody().Set(p.stmtsFall(body, true)) - if l := n.Body().Len(); l > 0 && n.Body().Index(l-1).Op() == ir.OFALL { + n.Body.Set(p.stmtsFall(body, true)) + if l := n.Body.Len(); l > 0 && n.Body.Index(l-1).Op() == ir.OFALL { if tswitch != nil { base.Errorf("cannot fallthrough in type switch") } @@ -1283,7 +1283,7 @@ func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.TypeSwitch func (p *noder) selectStmt(stmt *syntax.SelectStmt) ir.Node { n := ir.NewSelectStmt(p.pos(stmt), nil) - n.PtrList().Set(p.commClauses(stmt.Body, stmt.Rbrace)) + n.Cases.Set(p.commClauses(stmt.Body, stmt.Rbrace)) return n } @@ -1298,9 +1298,9 @@ func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []i n := ir.NewCaseStmt(p.pos(clause), nil, nil) if clause.Comm != nil { - n.PtrList().Set1(p.stmt(clause.Comm)) + n.List.Set1(p.stmt(clause.Comm)) } - n.PtrBody().Set(p.stmts(clause.Body)) + n.Body.Set(p.stmts(clause.Body)) nodes = append(nodes, n) } if len(clauses) > 0 { @@ -1321,16 +1321,16 @@ func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) ir.Node { switch ls.Op() { case ir.OFOR: ls := ls.(*ir.ForStmt) - ls.SetSym(sym) + ls.Label = sym case ir.ORANGE: ls := ls.(*ir.RangeStmt) - ls.SetSym(sym) + ls.Label = sym case ir.OSWITCH: ls := ls.(*ir.SwitchStmt) - ls.SetSym(sym) + ls.Label = sym case ir.OSELECT: ls := ls.(*ir.SelectStmt) - ls.SetSym(sym) + ls.Label = sym } } } @@ -1339,7 +1339,7 @@ func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) ir.Node { if ls != nil { if ls.Op() == ir.OBLOCK { ls := ls.(*ir.BlockStmt) - l = append(l, ls.List().Slice()...) + l = append(l, ls.List.Slice()...) } else { l = append(l, ls) } diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index c6625da1daff4..9634cd51ae961 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -214,7 +214,7 @@ func addptabs() { if s.Pkg.Name != "main" { continue } - if n.Type().Kind() == types.TFUNC && n.Class() == ir.PFUNC { + if n.Type().Kind() == types.TFUNC && n.Class_ == ir.PFUNC { // function ptabs = append(ptabs, ptabEntry{s: s, t: s.Def.Type()}) } else { @@ -228,7 +228,7 @@ func dumpGlobal(n *ir.Name) { if n.Type() == nil { base.Fatalf("external %v nil type\n", n) } - if n.Class() == ir.PFUNC { + if n.Class_ == ir.PFUNC { return } if n.Sym().Pkg != types.LocalPkg { @@ -560,8 +560,8 @@ func pfuncsym(n *ir.Name, noff int64, f *ir.Name) { if n.Sym() == nil { base.Fatalf("pfuncsym nil n sym") } - if f.Class() != ir.PFUNC { - base.Fatalf("pfuncsym class not PFUNC %d", f.Class()) + if f.Class_ != ir.PFUNC { + base.Fatalf("pfuncsym class not PFUNC %d", f.Class_) } s := n.Sym().Linksym() s.WriteAddr(base.Ctxt, noff, Widthptr, funcsym(f.Sym()).Linksym(), 0) diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 96164d09fd5ef..53d83c0ac874b 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -55,10 +55,10 @@ type Order struct { func order(fn *ir.Func) { if base.Flag.W > 1 { s := fmt.Sprintf("\nbefore order %v", fn.Sym()) - ir.DumpList(s, fn.Body()) + ir.DumpList(s, fn.Body) } - orderBlock(fn.PtrBody(), map[string][]*ir.Name{}) + orderBlock(&fn.Body, map[string][]*ir.Name{}) } // append typechecks stmt and appends it to out. @@ -136,12 +136,12 @@ func (o *Order) cheapExpr(n ir.Node) ir.Node { return n case ir.OLEN, ir.OCAP: n := n.(*ir.UnaryExpr) - l := o.cheapExpr(n.Left()) - if l == n.Left() { + l := o.cheapExpr(n.X) + if l == n.X { return n } a := ir.SepCopy(n).(*ir.UnaryExpr) - a.SetLeft(l) + a.X = l return typecheck(a, ctxExpr) } @@ -162,59 +162,59 @@ func (o *Order) safeExpr(n ir.Node) ir.Node { case ir.OLEN, ir.OCAP: n := n.(*ir.UnaryExpr) - l := o.safeExpr(n.Left()) - if l == n.Left() { + l := o.safeExpr(n.X) + if l == n.X { return n } a := ir.SepCopy(n).(*ir.UnaryExpr) - a.SetLeft(l) + a.X = l return typecheck(a, ctxExpr) case ir.ODOT: n := n.(*ir.SelectorExpr) - l := o.safeExpr(n.Left()) - if l == n.Left() { + l := o.safeExpr(n.X) + if l == n.X { return n } a := ir.SepCopy(n).(*ir.SelectorExpr) - a.SetLeft(l) + a.X = l return typecheck(a, ctxExpr) case ir.ODOTPTR: n := n.(*ir.SelectorExpr) - l := o.cheapExpr(n.Left()) - if l == n.Left() { + l := o.cheapExpr(n.X) + if l == n.X { return n } a := ir.SepCopy(n).(*ir.SelectorExpr) - a.SetLeft(l) + a.X = l return typecheck(a, ctxExpr) case ir.ODEREF: n := n.(*ir.StarExpr) - l := o.cheapExpr(n.Left()) - if l == n.Left() { + l := o.cheapExpr(n.X) + if l == n.X { return n } a := ir.SepCopy(n).(*ir.StarExpr) - a.SetLeft(l) + a.X = l return typecheck(a, ctxExpr) case ir.OINDEX, ir.OINDEXMAP: n := n.(*ir.IndexExpr) var l ir.Node - if n.Left().Type().IsArray() { - l = o.safeExpr(n.Left()) + if n.X.Type().IsArray() { + l = o.safeExpr(n.X) } else { - l = o.cheapExpr(n.Left()) + l = o.cheapExpr(n.X) } - r := o.cheapExpr(n.Right()) - if l == n.Left() && r == n.Right() { + r := o.cheapExpr(n.Index) + if l == n.X && r == n.Index { return n } a := ir.SepCopy(n).(*ir.IndexExpr) - a.SetLeft(l) - a.SetRight(r) + a.X = l + a.Index = r return typecheck(a, ctxExpr) default: @@ -230,7 +230,7 @@ func (o *Order) safeExpr(n ir.Node) ir.Node { // because we emit explicit VARKILL instructions marking the end of those // temporaries' lifetimes. func isaddrokay(n ir.Node) bool { - return islvalue(n) && (n.Op() != ir.ONAME || n.(*ir.Name).Class() == ir.PEXTERN || ir.IsAutoTmp(n)) + return islvalue(n) && (n.Op() != ir.ONAME || n.(*ir.Name).Class_ == ir.PEXTERN || ir.IsAutoTmp(n)) } // addrTemp ensures that n is okay to pass by address to runtime routines. @@ -292,17 +292,17 @@ func mapKeyReplaceStrConv(n ir.Node) bool { replaced = true case ir.OSTRUCTLIT: n := n.(*ir.CompLitExpr) - for _, elem := range n.List().Slice() { + for _, elem := range n.List.Slice() { elem := elem.(*ir.StructKeyExpr) - if mapKeyReplaceStrConv(elem.Left()) { + if mapKeyReplaceStrConv(elem.Value) { replaced = true } } case ir.OARRAYLIT: n := n.(*ir.CompLitExpr) - for _, elem := range n.List().Slice() { + for _, elem := range n.List.Slice() { if elem.Op() == ir.OKEY { - elem = elem.(*ir.KeyExpr).Right() + elem = elem.(*ir.KeyExpr).Value } if mapKeyReplaceStrConv(elem) { replaced = true @@ -371,24 +371,24 @@ func orderMakeSliceCopy(s []ir.Node) { as := s[0].(*ir.AssignStmt) cp := s[1].(*ir.BinaryExpr) - if as.Right() == nil || as.Right().Op() != ir.OMAKESLICE || ir.IsBlank(as.Left()) || - as.Left().Op() != ir.ONAME || cp.Left().Op() != ir.ONAME || cp.Right().Op() != ir.ONAME || - as.Left().Name() != cp.Left().Name() || cp.Left().Name() == cp.Right().Name() { + if as.Y == nil || as.Y.Op() != ir.OMAKESLICE || ir.IsBlank(as.X) || + as.X.Op() != ir.ONAME || cp.X.Op() != ir.ONAME || cp.Y.Op() != ir.ONAME || + as.X.Name() != cp.X.Name() || cp.X.Name() == cp.Y.Name() { // The line above this one is correct with the differing equality operators: // we want as.X and cp.X to be the same name, // but we want the initial data to be coming from a different name. return } - mk := as.Right().(*ir.MakeExpr) - if mk.Esc() == EscNone || mk.Left() == nil || mk.Right() != nil { + mk := as.Y.(*ir.MakeExpr) + if mk.Esc() == EscNone || mk.Len == nil || mk.Cap != nil { return } mk.SetOp(ir.OMAKESLICECOPY) - mk.SetRight(cp.Right()) + mk.Cap = cp.Y // Set bounded when m = OMAKESLICE([]T, len(s)); OCOPY(m, s) - mk.SetBounded(mk.Left().Op() == ir.OLEN && samesafeexpr(mk.Left().(*ir.UnaryExpr).Left(), cp.Right())) - as.SetRight(typecheck(mk, ctxExpr)) + mk.SetBounded(mk.Len.Op() == ir.OLEN && samesafeexpr(mk.Len.(*ir.UnaryExpr).X, cp.Y)) + as.Y = typecheck(mk, ctxExpr) s[1] = nil // remove separate copy call } @@ -479,25 +479,25 @@ func (o *Order) call(nn ir.Node) { default: base.Fatalf("unexpected call: %+v", n) case *ir.UnaryExpr: - n.SetLeft(o.expr(n.Left(), nil)) + n.X = o.expr(n.X, nil) case *ir.ConvExpr: - n.SetLeft(o.expr(n.Left(), nil)) + n.X = o.expr(n.X, nil) case *ir.BinaryExpr: - n.SetLeft(o.expr(n.Left(), nil)) - n.SetRight(o.expr(n.Right(), nil)) + n.X = o.expr(n.X, nil) + n.Y = o.expr(n.Y, nil) case *ir.MakeExpr: - n.SetLeft(o.expr(n.Left(), nil)) - n.SetRight(o.expr(n.Right(), nil)) + n.Len = o.expr(n.Len, nil) + n.Cap = o.expr(n.Cap, nil) case *ir.CallExpr: - o.exprList(n.List()) + o.exprList(n.Args) } return } n := nn.(*ir.CallExpr) fixVariadicCall(n) - n.SetLeft(o.expr(n.Left(), nil)) - o.exprList(n.List()) + n.X = o.expr(n.X, nil) + o.exprList(n.Args) if n.Op() == ir.OCALLINTER { return @@ -509,21 +509,21 @@ func (o *Order) call(nn ir.Node) { // still alive when we pop the temp stack. if arg.Op() == ir.OCONVNOP { arg := arg.(*ir.ConvExpr) - if arg.Left().Type().IsUnsafePtr() { - x := o.copyExpr(arg.Left()) - arg.SetLeft(x) + if arg.X.Type().IsUnsafePtr() { + x := o.copyExpr(arg.X) + arg.X = x x.Name().SetAddrtaken(true) // ensure SSA keeps the x variable - n.PtrBody().Append(typecheck(ir.NewUnaryExpr(base.Pos, ir.OVARLIVE, x), ctxStmt)) + n.Body.Append(typecheck(ir.NewUnaryExpr(base.Pos, ir.OVARLIVE, x), ctxStmt)) } } } // Check for "unsafe-uintptr" tag provided by escape analysis. - for i, param := range n.Left().Type().Params().FieldSlice() { + for i, param := range n.X.Type().Params().FieldSlice() { if param.Note == unsafeUintptrTag || param.Note == uintptrEscapesTag { - if arg := n.List().Index(i); arg.Op() == ir.OSLICELIT { + if arg := n.Args.Index(i); arg.Op() == ir.OSLICELIT { arg := arg.(*ir.CompLitExpr) - for _, elt := range arg.List().Slice() { + for _, elt := range arg.List.Slice() { keepAlive(elt) } } else { @@ -555,34 +555,34 @@ func (o *Order) mapAssign(n ir.Node) { case ir.OAS: n := n.(*ir.AssignStmt) - if n.Left().Op() == ir.OINDEXMAP { - n.SetRight(o.safeMapRHS(n.Right())) + if n.X.Op() == ir.OINDEXMAP { + n.Y = o.safeMapRHS(n.Y) } o.out = append(o.out, n) case ir.OASOP: n := n.(*ir.AssignOpStmt) - if n.Left().Op() == ir.OINDEXMAP { - n.SetRight(o.safeMapRHS(n.Right())) + if n.X.Op() == ir.OINDEXMAP { + n.Y = o.safeMapRHS(n.Y) } o.out = append(o.out, n) case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2MAPR, ir.OAS2FUNC: n := n.(*ir.AssignListStmt) var post []ir.Node - for i, m := range n.List().Slice() { + for i, m := range n.Lhs.Slice() { switch { case m.Op() == ir.OINDEXMAP: m := m.(*ir.IndexExpr) - if !ir.IsAutoTmp(m.Left()) { - m.SetLeft(o.copyExpr(m.Left())) + if !ir.IsAutoTmp(m.X) { + m.X = o.copyExpr(m.X) } - if !ir.IsAutoTmp(m.Right()) { - m.SetRight(o.copyExpr(m.Right())) + if !ir.IsAutoTmp(m.Index) { + m.Index = o.copyExpr(m.Index) } fallthrough case instrumenting && n.Op() == ir.OAS2FUNC && !ir.IsBlank(m): t := o.newTemp(m.Type(), false) - n.List().SetIndex(i, t) + n.Lhs.SetIndex(i, t) a := ir.NewAssignStmt(base.Pos, m, t) post = append(post, typecheck(a, ctxStmt)) } @@ -598,7 +598,7 @@ func (o *Order) safeMapRHS(r ir.Node) ir.Node { // We need to make sure the RHS won't panic. See issue 22881. if r.Op() == ir.OAPPEND { r := r.(*ir.CallExpr) - s := r.List().Slice()[1:] + s := r.Args.Slice()[1:] for i, n := range s { s[i] = o.cheapExpr(n) } @@ -628,32 +628,32 @@ func (o *Order) stmt(n ir.Node) { case ir.OAS: n := n.(*ir.AssignStmt) t := o.markTemp() - n.SetLeft(o.expr(n.Left(), nil)) - n.SetRight(o.expr(n.Right(), n.Left())) + n.X = o.expr(n.X, nil) + n.Y = o.expr(n.Y, n.X) o.mapAssign(n) o.cleanTemp(t) case ir.OASOP: n := n.(*ir.AssignOpStmt) t := o.markTemp() - n.SetLeft(o.expr(n.Left(), nil)) - n.SetRight(o.expr(n.Right(), nil)) + n.X = o.expr(n.X, nil) + n.Y = o.expr(n.Y, nil) - if instrumenting || n.Left().Op() == ir.OINDEXMAP && (n.SubOp() == ir.ODIV || n.SubOp() == ir.OMOD) { + if instrumenting || n.X.Op() == ir.OINDEXMAP && (n.AsOp == ir.ODIV || n.AsOp == ir.OMOD) { // Rewrite m[k] op= r into m[k] = m[k] op r so // that we can ensure that if op panics // because r is zero, the panic happens before // the map assignment. // DeepCopy is a big hammer here, but safeExpr // makes sure there is nothing too deep being copied. - l1 := o.safeExpr(n.Left()) + l1 := o.safeExpr(n.X) l2 := ir.DeepCopy(src.NoXPos, l1) if l2.Op() == ir.OINDEXMAP { l2 := l2.(*ir.IndexExpr) - l2.SetIndexMapLValue(false) + l2.Assigned = false } l2 = o.copyExpr(l2) - r := o.expr(typecheck(ir.NewBinaryExpr(n.Pos(), n.SubOp(), l2, n.Right()), ctxExpr), nil) + r := o.expr(typecheck(ir.NewBinaryExpr(n.Pos(), n.AsOp, l2, n.Y), ctxExpr), nil) as := typecheck(ir.NewAssignStmt(n.Pos(), l1, r), ctxStmt) o.mapAssign(as) o.cleanTemp(t) @@ -666,8 +666,8 @@ func (o *Order) stmt(n ir.Node) { case ir.OAS2: n := n.(*ir.AssignListStmt) t := o.markTemp() - o.exprList(n.List()) - o.exprList(n.Rlist()) + o.exprList(n.Lhs) + o.exprList(n.Rhs) o.mapAssign(n) o.cleanTemp(t) @@ -675,9 +675,9 @@ func (o *Order) stmt(n ir.Node) { case ir.OAS2FUNC: n := n.(*ir.AssignListStmt) t := o.markTemp() - o.exprList(n.List()) - o.init(n.Rlist().First()) - o.call(n.Rlist().First()) + o.exprList(n.Lhs) + o.init(n.Rhs.First()) + o.call(n.Rhs.First()) o.as2(n) o.cleanTemp(t) @@ -690,22 +690,22 @@ func (o *Order) stmt(n ir.Node) { case ir.OAS2DOTTYPE, ir.OAS2RECV, ir.OAS2MAPR: n := n.(*ir.AssignListStmt) t := o.markTemp() - o.exprList(n.List()) + o.exprList(n.Lhs) - switch r := n.Rlist().First(); r.Op() { + switch r := n.Rhs.First(); r.Op() { case ir.ODOTTYPE2: r := r.(*ir.TypeAssertExpr) - r.SetLeft(o.expr(r.Left(), nil)) + r.X = o.expr(r.X, nil) case ir.ORECV: r := r.(*ir.UnaryExpr) - r.SetLeft(o.expr(r.Left(), nil)) + r.X = o.expr(r.X, nil) case ir.OINDEXMAP: r := r.(*ir.IndexExpr) - r.SetLeft(o.expr(r.Left(), nil)) - r.SetRight(o.expr(r.Right(), nil)) + r.X = o.expr(r.X, nil) + r.Index = o.expr(r.Index, nil) // See similar conversion for OINDEXMAP below. - _ = mapKeyReplaceStrConv(r.Right()) - r.SetRight(o.mapKeyTemp(r.Left().Type(), r.Right())) + _ = mapKeyReplaceStrConv(r.Index) + r.Index = o.mapKeyTemp(r.X.Type(), r.Index) default: base.Fatalf("order.stmt: %v", r.Op()) } @@ -716,7 +716,7 @@ func (o *Order) stmt(n ir.Node) { // Special: does not save n onto out. case ir.OBLOCK: n := n.(*ir.BlockStmt) - o.stmtList(n.List()) + o.stmtList(n.List) // Special: n->left is not an expression; save as is. case ir.OBREAK, @@ -741,22 +741,22 @@ func (o *Order) stmt(n ir.Node) { case ir.OCLOSE, ir.ORECV: n := n.(*ir.UnaryExpr) t := o.markTemp() - n.SetLeft(o.expr(n.Left(), nil)) + n.X = o.expr(n.X, nil) o.out = append(o.out, n) o.cleanTemp(t) case ir.OCOPY: n := n.(*ir.BinaryExpr) t := o.markTemp() - n.SetLeft(o.expr(n.Left(), nil)) - n.SetRight(o.expr(n.Right(), nil)) + n.X = o.expr(n.X, nil) + n.Y = o.expr(n.Y, nil) o.out = append(o.out, n) o.cleanTemp(t) case ir.OPRINT, ir.OPRINTN, ir.ORECOVER: n := n.(*ir.CallExpr) t := o.markTemp() - o.exprList(n.List()) + o.exprList(n.Args) o.out = append(o.out, n) o.cleanTemp(t) @@ -764,17 +764,17 @@ func (o *Order) stmt(n ir.Node) { case ir.ODEFER, ir.OGO: n := n.(*ir.GoDeferStmt) t := o.markTemp() - o.init(n.Left()) - o.call(n.Left()) + o.init(n.Call) + o.call(n.Call) o.out = append(o.out, n) o.cleanTemp(t) case ir.ODELETE: n := n.(*ir.CallExpr) t := o.markTemp() - n.List().SetFirst(o.expr(n.List().First(), nil)) - n.List().SetSecond(o.expr(n.List().Second(), nil)) - n.List().SetSecond(o.mapKeyTemp(n.List().First().Type(), n.List().Second())) + n.Args.SetFirst(o.expr(n.Args.First(), nil)) + n.Args.SetSecond(o.expr(n.Args.Second(), nil)) + n.Args.SetSecond(o.mapKeyTemp(n.Args.First().Type(), n.Args.Second())) o.out = append(o.out, n) o.cleanTemp(t) @@ -783,10 +783,10 @@ func (o *Order) stmt(n ir.Node) { case ir.OFOR: n := n.(*ir.ForStmt) t := o.markTemp() - n.SetLeft(o.exprInPlace(n.Left())) - n.PtrBody().Prepend(o.cleanTempNoPop(t)...) - orderBlock(n.PtrBody(), o.free) - n.SetRight(orderStmtInPlace(n.Right(), o.free)) + n.Cond = o.exprInPlace(n.Cond) + n.Body.Prepend(o.cleanTempNoPop(t)...) + orderBlock(&n.Body, o.free) + n.Post = orderStmtInPlace(n.Post, o.free) o.out = append(o.out, n) o.cleanTemp(t) @@ -795,12 +795,12 @@ func (o *Order) stmt(n ir.Node) { case ir.OIF: n := n.(*ir.IfStmt) t := o.markTemp() - n.SetLeft(o.exprInPlace(n.Left())) - n.PtrBody().Prepend(o.cleanTempNoPop(t)...) - n.PtrRlist().Prepend(o.cleanTempNoPop(t)...) + n.Cond = o.exprInPlace(n.Cond) + n.Body.Prepend(o.cleanTempNoPop(t)...) + n.Else.Prepend(o.cleanTempNoPop(t)...) o.popTemp(t) - orderBlock(n.PtrBody(), o.free) - orderBlock(n.PtrRlist(), o.free) + orderBlock(&n.Body, o.free) + orderBlock(&n.Else, o.free) o.out = append(o.out, n) // Special: argument will be converted to interface using convT2E @@ -808,9 +808,9 @@ func (o *Order) stmt(n ir.Node) { case ir.OPANIC: n := n.(*ir.UnaryExpr) t := o.markTemp() - n.SetLeft(o.expr(n.Left(), nil)) - if !n.Left().Type().IsInterface() { - n.SetLeft(o.addrTemp(n.Left())) + n.X = o.expr(n.X, nil) + if !n.X.Type().IsInterface() { + n.X = o.addrTemp(n.X) } o.out = append(o.out, n) o.cleanTemp(t) @@ -830,12 +830,12 @@ func (o *Order) stmt(n ir.Node) { // Mark []byte(str) range expression to reuse string backing storage. // It is safe because the storage cannot be mutated. n := n.(*ir.RangeStmt) - if n.Right().Op() == ir.OSTR2BYTES { - n.Right().(*ir.ConvExpr).SetOp(ir.OSTR2BYTESTMP) + if n.X.Op() == ir.OSTR2BYTES { + n.X.(*ir.ConvExpr).SetOp(ir.OSTR2BYTESTMP) } t := o.markTemp() - n.SetRight(o.expr(n.Right(), nil)) + n.X = o.expr(n.X, nil) orderBody := true switch n.Type().Kind() { @@ -843,7 +843,7 @@ func (o *Order) stmt(n ir.Node) { base.Fatalf("order.stmt range %v", n.Type()) case types.TARRAY, types.TSLICE: - if n.List().Len() < 2 || ir.IsBlank(n.List().Second()) { + if n.Vars.Len() < 2 || ir.IsBlank(n.Vars.Second()) { // for i := range x will only use x once, to compute len(x). // No need to copy it. break @@ -853,7 +853,7 @@ func (o *Order) stmt(n ir.Node) { case types.TCHAN, types.TSTRING: // chan, string, slice, array ranges use value multiple times. // make copy. - r := n.Right() + r := n.X if r.Type().IsString() && r.Type() != types.Types[types.TSTRING] { r = ir.NewConvExpr(base.Pos, ir.OCONV, nil, r) @@ -861,7 +861,7 @@ func (o *Order) stmt(n ir.Node) { r = typecheck(r, ctxExpr) } - n.SetRight(o.copyExpr(r)) + n.X = o.copyExpr(r) case types.TMAP: if isMapClear(n) { @@ -875,23 +875,23 @@ func (o *Order) stmt(n ir.Node) { // copy the map value in case it is a map literal. // TODO(rsc): Make tmp = literal expressions reuse tmp. // For maps tmp is just one word so it hardly matters. - r := n.Right() - n.SetRight(o.copyExpr(r)) + r := n.X + n.X = o.copyExpr(r) // n.Prealloc is the temp for the iterator. // hiter contains pointers and needs to be zeroed. n.Prealloc = o.newTemp(hiter(n.Type()), true) } - o.exprListInPlace(n.List()) + o.exprListInPlace(n.Vars) if orderBody { - orderBlock(n.PtrBody(), o.free) + orderBlock(&n.Body, o.free) } o.out = append(o.out, n) o.cleanTemp(t) case ir.ORETURN: n := n.(*ir.ReturnStmt) - o.exprList(n.List()) + o.exprList(n.Results) o.out = append(o.out, n) // Special: clean case temporaries in each block entry. @@ -906,9 +906,9 @@ func (o *Order) stmt(n ir.Node) { case ir.OSELECT: n := n.(*ir.SelectStmt) t := o.markTemp() - for _, ncas := range n.List().Slice() { + for _, ncas := range n.Cases.Slice() { ncas := ncas.(*ir.CaseStmt) - r := ncas.Left() + r := ncas.Comm setlineno(ncas) // Append any new body prologue to ninit. @@ -927,17 +927,17 @@ func (o *Order) stmt(n ir.Node) { case ir.OSELRECV2: // case x, ok = <-c r := r.(*ir.AssignListStmt) - recv := r.Rlist().First().(*ir.UnaryExpr) - recv.SetLeft(o.expr(recv.Left(), nil)) - if !ir.IsAutoTmp(recv.Left()) { - recv.SetLeft(o.copyExpr(recv.Left())) + recv := r.Rhs.First().(*ir.UnaryExpr) + recv.X = o.expr(recv.X, nil) + if !ir.IsAutoTmp(recv.X) { + recv.X = o.copyExpr(recv.X) } init := r.PtrInit().Slice() r.PtrInit().Set(nil) - colas := r.Colas() + colas := r.Def do := func(i int, t *types.Type) { - n := r.List().Index(i) + n := r.Lhs.Index(i) if ir.IsBlank(n) { return } @@ -946,7 +946,7 @@ func (o *Order) stmt(n ir.Node) { // declaration (and possible allocation) until inside the case body. // Delete the ODCL nodes here and recreate them inside the body below. if colas { - if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].(*ir.Decl).Left() == n { + if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].(*ir.Decl).X == n { init = init[1:] } dcl := typecheck(ir.NewDecl(base.Pos, ir.ODCL, n), ctxStmt) @@ -955,9 +955,9 @@ func (o *Order) stmt(n ir.Node) { tmp := o.newTemp(t, t.HasPointers()) as := typecheck(ir.NewAssignStmt(base.Pos, n, conv(tmp, n.Type())), ctxStmt) ncas.PtrInit().Append(as) - r.PtrList().SetIndex(i, tmp) + (&r.Lhs).SetIndex(i, tmp) } - do(0, recv.Left().Type().Elem()) + do(0, recv.X.Type().Elem()) do(1, types.Types[types.TBOOL]) if len(init) != 0 { ir.DumpList("ninit", r.Init()) @@ -974,28 +974,28 @@ func (o *Order) stmt(n ir.Node) { // case c <- x // r->left is c, r->right is x, both are always evaluated. - r.SetLeft(o.expr(r.Left(), nil)) + r.Chan = o.expr(r.Chan, nil) - if !ir.IsAutoTmp(r.Left()) { - r.SetLeft(o.copyExpr(r.Left())) + if !ir.IsAutoTmp(r.Chan) { + r.Chan = o.copyExpr(r.Chan) } - r.SetRight(o.expr(r.Right(), nil)) - if !ir.IsAutoTmp(r.Right()) { - r.SetRight(o.copyExpr(r.Right())) + r.Value = o.expr(r.Value, nil) + if !ir.IsAutoTmp(r.Value) { + r.Value = o.copyExpr(r.Value) } } } // Now that we have accumulated all the temporaries, clean them. // Also insert any ninit queued during the previous loop. // (The temporary cleaning must follow that ninit work.) - for _, cas := range n.List().Slice() { + for _, cas := range n.Cases.Slice() { cas := cas.(*ir.CaseStmt) - orderBlock(cas.PtrBody(), o.free) - cas.PtrBody().Prepend(o.cleanTempNoPop(t)...) + orderBlock(&cas.Body, o.free) + cas.Body.Prepend(o.cleanTempNoPop(t)...) // TODO(mdempsky): Is this actually necessary? // walkselect appears to walk Ninit. - cas.PtrBody().Prepend(cas.Init().Slice()...) + cas.Body.Prepend(cas.Init().Slice()...) cas.PtrInit().Set(nil) } @@ -1006,14 +1006,14 @@ func (o *Order) stmt(n ir.Node) { case ir.OSEND: n := n.(*ir.SendStmt) t := o.markTemp() - n.SetLeft(o.expr(n.Left(), nil)) - n.SetRight(o.expr(n.Right(), nil)) + n.Chan = o.expr(n.Chan, nil) + n.Value = o.expr(n.Value, nil) if instrumenting { // Force copying to the stack so that (chan T)(nil) <- x // is still instrumented as a read of x. - n.SetRight(o.copyExpr(n.Right())) + n.Value = o.copyExpr(n.Value) } else { - n.SetRight(o.addrTemp(n.Right())) + n.Value = o.addrTemp(n.Value) } o.out = append(o.out, n) o.cleanTemp(t) @@ -1029,15 +1029,15 @@ func (o *Order) stmt(n ir.Node) { n := n.(*ir.SwitchStmt) if base.Debug.Libfuzzer != 0 && !hasDefaultCase(n) { // Add empty "default:" case for instrumentation. - n.PtrList().Append(ir.NewCaseStmt(base.Pos, nil, nil)) + n.Cases.Append(ir.NewCaseStmt(base.Pos, nil, nil)) } t := o.markTemp() - n.SetLeft(o.expr(n.Left(), nil)) - for _, ncas := range n.List().Slice() { + n.Tag = o.expr(n.Tag, nil) + for _, ncas := range n.Cases.Slice() { ncas := ncas.(*ir.CaseStmt) - o.exprListInPlace(ncas.List()) - orderBlock(ncas.PtrBody(), o.free) + o.exprListInPlace(ncas.List) + orderBlock(&ncas.Body, o.free) } o.out = append(o.out, n) @@ -1048,9 +1048,9 @@ func (o *Order) stmt(n ir.Node) { } func hasDefaultCase(n *ir.SwitchStmt) bool { - for _, ncas := range n.List().Slice() { + for _, ncas := range n.Cases.Slice() { ncas := ncas.(*ir.CaseStmt) - if ncas.List().Len() == 0 { + if ncas.List.Len() == 0 { return true } } @@ -1111,10 +1111,10 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { // Fewer than 5 strings use direct runtime helpers. case ir.OADDSTR: n := n.(*ir.AddStringExpr) - o.exprList(n.List()) + o.exprList(n.List) - if n.List().Len() > 5 { - t := types.NewArray(types.Types[types.TSTRING], int64(n.List().Len())) + if n.List.Len() > 5 { + t := types.NewArray(types.Types[types.TSTRING], int64(n.List.Len())) n.Prealloc = o.newTemp(t, false) } @@ -1128,13 +1128,13 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { hasbyte := false haslit := false - for _, n1 := range n.List().Slice() { + for _, n1 := range n.List.Slice() { hasbyte = hasbyte || n1.Op() == ir.OBYTES2STR haslit = haslit || n1.Op() == ir.OLITERAL && len(ir.StringVal(n1)) != 0 } if haslit && hasbyte { - for _, n2 := range n.List().Slice() { + for _, n2 := range n.List.Slice() { if n2.Op() == ir.OBYTES2STR { n2 := n2.(*ir.ConvExpr) n2.SetOp(ir.OBYTES2STRTMP) @@ -1145,16 +1145,16 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { case ir.OINDEXMAP: n := n.(*ir.IndexExpr) - n.SetLeft(o.expr(n.Left(), nil)) - n.SetRight(o.expr(n.Right(), nil)) + n.X = o.expr(n.X, nil) + n.Index = o.expr(n.Index, nil) needCopy := false - if !n.IndexMapLValue() { + if !n.Assigned { // Enforce that any []byte slices we are not copying // can not be changed before the map index by forcing // the map index to happen immediately following the // conversions. See copyExpr a few lines below. - needCopy = mapKeyReplaceStrConv(n.Right()) + needCopy = mapKeyReplaceStrConv(n.Index) if instrumenting { // Race detector needs the copy. @@ -1163,7 +1163,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { } // key must be addressable - n.SetRight(o.mapKeyTemp(n.Left().Type(), n.Right())) + n.Index = o.mapKeyTemp(n.X.Type(), n.Index) if needCopy { return o.copyExpr(n) } @@ -1173,22 +1173,22 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { // temporary to pass to the runtime conversion routine. case ir.OCONVIFACE: n := n.(*ir.ConvExpr) - n.SetLeft(o.expr(n.Left(), nil)) - if n.Left().Type().IsInterface() { + n.X = o.expr(n.X, nil) + if n.X.Type().IsInterface() { return n } - if _, needsaddr := convFuncName(n.Left().Type(), n.Type()); needsaddr || isStaticCompositeLiteral(n.Left()) { + if _, needsaddr := convFuncName(n.X.Type(), n.Type()); needsaddr || isStaticCompositeLiteral(n.X) { // Need a temp if we need to pass the address to the conversion function. // We also process static composite literal node here, making a named static global // whose address we can put directly in an interface (see OCONVIFACE case in walk). - n.SetLeft(o.addrTemp(n.Left())) + n.X = o.addrTemp(n.X) } return n case ir.OCONVNOP: n := n.(*ir.ConvExpr) - if n.Type().IsKind(types.TUNSAFEPTR) && n.Left().Type().IsKind(types.TUINTPTR) && (n.Left().Op() == ir.OCALLFUNC || n.Left().Op() == ir.OCALLINTER || n.Left().Op() == ir.OCALLMETH) { - call := n.Left().(*ir.CallExpr) + if n.Type().IsKind(types.TUNSAFEPTR) && n.X.Type().IsKind(types.TUINTPTR) && (n.X.Op() == ir.OCALLFUNC || n.X.Op() == ir.OCALLINTER || n.X.Op() == ir.OCALLMETH) { + call := n.X.(*ir.CallExpr) // When reordering unsafe.Pointer(f()) into a separate // statement, the conversion and function call must stay // together. See golang.org/issue/15329. @@ -1198,7 +1198,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { return o.copyExpr(n) } } else { - n.SetLeft(o.expr(n.Left(), nil)) + n.X = o.expr(n.X, nil) } return n @@ -1216,7 +1216,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { r := o.newTemp(n.Type(), false) // Evaluate left-hand side. - lhs := o.expr(n.Left(), nil) + lhs := o.expr(n.X, nil) o.out = append(o.out, typecheck(ir.NewAssignStmt(base.Pos, r, lhs), ctxStmt)) // Evaluate right-hand side, save generated code. @@ -1224,7 +1224,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { o.out = nil t := o.markTemp() o.edge() - rhs := o.expr(n.Right(), nil) + rhs := o.expr(n.Y, nil) o.out = append(o.out, typecheck(ir.NewAssignStmt(base.Pos, r, rhs), ctxStmt)) o.cleanTemp(t) gen := o.out @@ -1233,9 +1233,9 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { // If left-hand side doesn't cause a short-circuit, issue right-hand side. nif := ir.NewIfStmt(base.Pos, r, nil, nil) if n.Op() == ir.OANDAND { - nif.PtrBody().Set(gen) + nif.Body.Set(gen) } else { - nif.PtrRlist().Set(gen) + nif.Else.Set(gen) } o.out = append(o.out, nif) return r @@ -1261,8 +1261,8 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { if isRuneCount(n) { // len([]rune(s)) is rewritten to runtime.countrunes(s) later. - conv := n.(*ir.UnaryExpr).Left().(*ir.ConvExpr) - conv.SetLeft(o.expr(conv.Left(), nil)) + conv := n.(*ir.UnaryExpr).X.(*ir.ConvExpr) + conv.X = o.expr(conv.X, nil) } else { o.call(n) } @@ -1276,21 +1276,21 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { // Check for append(x, make([]T, y)...) . n := n.(*ir.CallExpr) if isAppendOfMake(n) { - n.List().SetFirst(o.expr(n.List().First(), nil)) // order x - mk := n.List().Second().(*ir.MakeExpr) - mk.SetLeft(o.expr(mk.Left(), nil)) // order y + n.Args.SetFirst(o.expr(n.Args.First(), nil)) // order x + mk := n.Args.Second().(*ir.MakeExpr) + mk.Len = o.expr(mk.Len, nil) // order y } else { - o.exprList(n.List()) + o.exprList(n.Args) } - if lhs == nil || lhs.Op() != ir.ONAME && !samesafeexpr(lhs, n.List().First()) { + if lhs == nil || lhs.Op() != ir.ONAME && !samesafeexpr(lhs, n.Args.First()) { return o.copyExpr(n) } return n case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR: n := n.(*ir.SliceExpr) - n.SetLeft(o.expr(n.Left(), nil)) + n.X = o.expr(n.X, nil) low, high, max := n.SliceBounds() low = o.expr(low, nil) low = o.cheapExpr(low) @@ -1299,21 +1299,21 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { max = o.expr(max, nil) max = o.cheapExpr(max) n.SetSliceBounds(low, high, max) - if lhs == nil || lhs.Op() != ir.ONAME && !samesafeexpr(lhs, n.Left()) { + if lhs == nil || lhs.Op() != ir.ONAME && !samesafeexpr(lhs, n.X) { return o.copyExpr(n) } return n case ir.OCLOSURE: n := n.(*ir.ClosureExpr) - if n.Transient() && len(n.Func().ClosureVars) > 0 { + if n.Transient() && len(n.Func.ClosureVars) > 0 { n.Prealloc = o.newTemp(closureType(n), false) } return n case ir.OCALLPART: n := n.(*ir.CallPartExpr) - n.SetLeft(o.expr(n.Left(), nil)) + n.X = o.expr(n.X, nil) if n.Transient() { t := partialCallType(n) n.Prealloc = o.newTemp(t, false) @@ -1322,7 +1322,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { case ir.OSLICELIT: n := n.(*ir.CompLitExpr) - o.exprList(n.List()) + o.exprList(n.List) if n.Transient() { t := types.NewArray(n.Type().Elem(), n.Len) n.Prealloc = o.newTemp(t, false) @@ -1331,7 +1331,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { case ir.ODOTTYPE, ir.ODOTTYPE2: n := n.(*ir.TypeAssertExpr) - n.SetLeft(o.expr(n.Left(), nil)) + n.X = o.expr(n.X, nil) if !isdirectiface(n.Type()) || instrumenting { return o.copyExprClear(n) } @@ -1339,32 +1339,32 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { case ir.ORECV: n := n.(*ir.UnaryExpr) - n.SetLeft(o.expr(n.Left(), nil)) + n.X = o.expr(n.X, nil) return o.copyExprClear(n) case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE: n := n.(*ir.BinaryExpr) - n.SetLeft(o.expr(n.Left(), nil)) - n.SetRight(o.expr(n.Right(), nil)) + n.X = o.expr(n.X, nil) + n.Y = o.expr(n.Y, nil) - t := n.Left().Type() + t := n.X.Type() switch { case t.IsString(): // Mark string(byteSlice) arguments to reuse byteSlice backing // buffer during conversion. String comparison does not // memorize the strings for later use, so it is safe. - if n.Left().Op() == ir.OBYTES2STR { - n.Left().(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP) + if n.X.Op() == ir.OBYTES2STR { + n.X.(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP) } - if n.Right().Op() == ir.OBYTES2STR { - n.Right().(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP) + if n.Y.Op() == ir.OBYTES2STR { + n.Y.(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP) } case t.IsStruct() || t.IsArray(): // for complex comparisons, we need both args to be // addressable so we can pass them to the runtime. - n.SetLeft(o.addrTemp(n.Left())) - n.SetRight(o.addrTemp(n.Right())) + n.X = o.addrTemp(n.X) + n.Y = o.addrTemp(n.Y) } return n @@ -1385,13 +1385,13 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { // the keys and values before storing any of them to the map. // See issue 26552. n := n.(*ir.CompLitExpr) - entries := n.List().Slice() + entries := n.List.Slice() statics := entries[:0] var dynamics []*ir.KeyExpr for _, r := range entries { r := r.(*ir.KeyExpr) - if !isStaticCompositeLiteral(r.Left()) || !isStaticCompositeLiteral(r.Right()) { + if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) { dynamics = append(dynamics, r) continue } @@ -1399,14 +1399,14 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { // Recursively ordering some static entries can change them to dynamic; // e.g., OCONVIFACE nodes. See #31777. r = o.expr(r, nil).(*ir.KeyExpr) - if !isStaticCompositeLiteral(r.Left()) || !isStaticCompositeLiteral(r.Right()) { + if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) { dynamics = append(dynamics, r) continue } statics = append(statics, r) } - n.PtrList().Set(statics) + n.List.Set(statics) if len(dynamics) == 0 { return n @@ -1420,7 +1420,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { // Emit eval+insert of dynamic entries, one at a time. for _, r := range dynamics { - as := ir.NewAssignStmt(base.Pos, ir.NewIndexExpr(base.Pos, m, r.Left()), r.Right()) + as := ir.NewAssignStmt(base.Pos, ir.NewIndexExpr(base.Pos, m, r.Key), r.Value) typecheck(as, ctxStmt) // Note: this converts the OINDEX to an OINDEXMAP o.stmt(as) } @@ -1441,10 +1441,10 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { func (o *Order) as2(n *ir.AssignListStmt) { tmplist := []ir.Node{} left := []ir.Node{} - for ni, l := range n.List().Slice() { + for ni, l := range n.Lhs.Slice() { if !ir.IsBlank(l) { tmp := o.newTemp(l.Type(), l.Type().HasPointers()) - n.List().SetIndex(ni, tmp) + n.Lhs.SetIndex(ni, tmp) tmplist = append(tmplist, tmp) left = append(left, l) } @@ -1453,8 +1453,8 @@ func (o *Order) as2(n *ir.AssignListStmt) { o.out = append(o.out, n) as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) - as.PtrList().Set(left) - as.PtrRlist().Set(tmplist) + as.Lhs.Set(left) + as.Rhs.Set(tmplist) o.stmt(typecheck(as, ctxStmt)) } @@ -1462,25 +1462,25 @@ func (o *Order) as2(n *ir.AssignListStmt) { // Just like as2, this also adds temporaries to ensure left-to-right assignment. func (o *Order) okAs2(n *ir.AssignListStmt) { var tmp1, tmp2 ir.Node - if !ir.IsBlank(n.List().First()) { - typ := n.Rlist().First().Type() + if !ir.IsBlank(n.Lhs.First()) { + typ := n.Rhs.First().Type() tmp1 = o.newTemp(typ, typ.HasPointers()) } - if !ir.IsBlank(n.List().Second()) { + if !ir.IsBlank(n.Lhs.Second()) { tmp2 = o.newTemp(types.Types[types.TBOOL], false) } o.out = append(o.out, n) if tmp1 != nil { - r := ir.NewAssignStmt(base.Pos, n.List().First(), tmp1) + r := ir.NewAssignStmt(base.Pos, n.Lhs.First(), tmp1) o.mapAssign(typecheck(r, ctxStmt)) - n.List().SetFirst(tmp1) + n.Lhs.SetFirst(tmp1) } if tmp2 != nil { - r := ir.NewAssignStmt(base.Pos, n.List().Second(), conv(tmp2, n.List().Second().Type())) + r := ir.NewAssignStmt(base.Pos, n.Lhs.Second(), conv(tmp2, n.Lhs.Second().Type())) o.mapAssign(typecheck(r, ctxStmt)) - n.List().SetSecond(tmp2) + n.Lhs.SetSecond(tmp2) } } diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index e43471dbcaafb..32550c8bd429b 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -68,11 +68,11 @@ func emitptrargsmap(fn *ir.Func) { // the top of the stack and increasing in size. // Non-autos sort on offset. func cmpstackvarlt(a, b *ir.Name) bool { - if (a.Class() == ir.PAUTO) != (b.Class() == ir.PAUTO) { - return b.Class() == ir.PAUTO + if (a.Class_ == ir.PAUTO) != (b.Class_ == ir.PAUTO) { + return b.Class_ == ir.PAUTO } - if a.Class() != ir.PAUTO { + if a.Class_ != ir.PAUTO { return a.FrameOffset() < b.FrameOffset() } @@ -113,7 +113,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { // Mark the PAUTO's unused. for _, ln := range fn.Dcl { - if ln.Class() == ir.PAUTO { + if ln.Class_ == ir.PAUTO { ln.SetUsed(false) } } @@ -128,7 +128,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { for _, b := range f.Blocks { for _, v := range b.Values { if n, ok := v.Aux.(*ir.Name); ok { - switch n.Class() { + switch n.Class_ { case ir.PPARAM, ir.PPARAMOUT: // Don't modify nodfp; it is a global. if n != nodfp { @@ -154,7 +154,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { // Reassign stack offsets of the locals that are used. lastHasPtr := false for i, n := range fn.Dcl { - if n.Op() != ir.ONAME || n.Class() != ir.PAUTO { + if n.Op() != ir.ONAME || n.Class_ != ir.PAUTO { continue } if !n.Used() { @@ -207,7 +207,7 @@ func funccompile(fn *ir.Func) { // assign parameter offsets dowidth(fn.Type()) - if fn.Body().Len() == 0 { + if fn.Body.Len() == 0 { // Initialize ABI wrappers if necessary. initLSym(fn, false) emitptrargsmap(fn) @@ -249,7 +249,7 @@ func compile(fn *ir.Func) { // because symbols must be allocated before the parallel // phase of the compiler. for _, n := range fn.Dcl { - switch n.Class() { + switch n.Class_ { case ir.PPARAM, ir.PPARAMOUT, ir.PAUTO: if livenessShouldTrack(n) && n.Addrtaken() { dtypesym(n.Type()) @@ -360,7 +360,7 @@ func compileFunctions() { // since they're most likely to be the slowest. // This helps avoid stragglers. sort.Slice(compilequeue, func(i, j int) bool { - return compilequeue[i].Body().Len() > compilequeue[j].Body().Len() + return compilequeue[i].Body.Len() > compilequeue[j].Body.Len() }) } var wg sync.WaitGroup @@ -440,7 +440,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S if n.Op() != ir.ONAME { // might be OTYPE or OLITERAL continue } - switch n.Class() { + switch n.Class_ { case ir.PAUTO: if !n.Used() { // Text == nil -> generating abstract function @@ -533,7 +533,7 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var { var abbrev int var offs int64 - switch n.Class() { + switch n.Class_ { case ir.PAUTO: offs = n.FrameOffset() abbrev = dwarf.DW_ABRV_AUTO @@ -549,7 +549,7 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var { abbrev = dwarf.DW_ABRV_PARAM offs = n.FrameOffset() + base.Ctxt.FixedFrameSize() default: - base.Fatalf("createSimpleVar unexpected class %v for node %v", n.Class(), n) + base.Fatalf("createSimpleVar unexpected class %v for node %v", n.Class_, n) } typename := dwarf.InfoPrefix + typesymname(n.Type()) @@ -566,7 +566,7 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var { declpos := base.Ctxt.InnermostPos(declPos(n)) return &dwarf.Var{ Name: n.Sym().Name, - IsReturnValue: n.Class() == ir.PPARAMOUT, + IsReturnValue: n.Class_ == ir.PPARAMOUT, IsInlFormal: n.Name().InlFormal(), Abbrev: abbrev, StackOffset: int32(offs), @@ -643,7 +643,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir if c == '.' || n.Type().IsUntyped() { continue } - if n.Class() == ir.PPARAM && !canSSAType(n.Type()) { + if n.Class_ == ir.PPARAM && !canSSAType(n.Type()) { // SSA-able args get location lists, and may move in and // out of registers, so those are handled elsewhere. // Autos and named output params seem to get handled @@ -658,10 +658,10 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir typename := dwarf.InfoPrefix + typesymname(n.Type()) decls = append(decls, n) abbrev := dwarf.DW_ABRV_AUTO_LOCLIST - isReturnValue := (n.Class() == ir.PPARAMOUT) - if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT { + isReturnValue := (n.Class_ == ir.PPARAMOUT) + if n.Class_ == ir.PPARAM || n.Class_ == ir.PPARAMOUT { abbrev = dwarf.DW_ABRV_PARAM_LOCLIST - } else if n.Class() == ir.PAUTOHEAP { + } else if n.Class_ == ir.PAUTOHEAP { // If dcl in question has been promoted to heap, do a bit // of extra work to recover original class (auto or param); // see issue 30908. This insures that we get the proper @@ -670,9 +670,9 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir // and not stack). // TODO(thanm): generate a better location expression stackcopy := n.Name().Stackcopy - if stackcopy != nil && (stackcopy.Class() == ir.PPARAM || stackcopy.Class() == ir.PPARAMOUT) { + if stackcopy != nil && (stackcopy.Class_ == ir.PPARAM || stackcopy.Class_ == ir.PPARAMOUT) { abbrev = dwarf.DW_ABRV_PARAM_LOCLIST - isReturnValue = (stackcopy.Class() == ir.PPARAMOUT) + isReturnValue = (stackcopy.Class_ == ir.PPARAMOUT) } } inlIndex := 0 @@ -731,7 +731,7 @@ func preInliningDcls(fnsym *obj.LSym) []*ir.Name { func stackOffset(slot ssa.LocalSlot) int32 { n := slot.N var off int64 - switch n.Class() { + switch n.Class_ { case ir.PAUTO: off = n.FrameOffset() if base.Ctxt.FixedFrameSize() == 0 { @@ -753,7 +753,7 @@ func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var n := debug.Vars[varID] var abbrev int - switch n.Class() { + switch n.Class_ { case ir.PAUTO: abbrev = dwarf.DW_ABRV_AUTO_LOCLIST case ir.PPARAM, ir.PPARAMOUT: @@ -777,7 +777,7 @@ func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var declpos := base.Ctxt.InnermostPos(n.Pos()) dvar := &dwarf.Var{ Name: n.Sym().Name, - IsReturnValue: n.Class() == ir.PPARAMOUT, + IsReturnValue: n.Class_ == ir.PPARAMOUT, IsInlFormal: n.Name().InlFormal(), Abbrev: abbrev, Type: base.Ctxt.Lookup(typename), diff --git a/src/cmd/compile/internal/gc/pgen_test.go b/src/cmd/compile/internal/gc/pgen_test.go index 3875fb7223461..1170db26818b6 100644 --- a/src/cmd/compile/internal/gc/pgen_test.go +++ b/src/cmd/compile/internal/gc/pgen_test.go @@ -44,7 +44,7 @@ func TestCmpstackvar(t *testing.T) { n := NewName(s) n.SetType(t) n.SetFrameOffset(xoffset) - n.SetClass(cl) + n.Class_ = cl return n } testdata := []struct { @@ -159,7 +159,7 @@ func TestStackvarSort(t *testing.T) { n := NewName(s) n.SetType(t) n.SetFrameOffset(xoffset) - n.SetClass(cl) + n.Class_ = cl return n } inp := []*ir.Name{ diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go index 77cd9c5b19758..0b796ae7fa1b5 100644 --- a/src/cmd/compile/internal/gc/plive.go +++ b/src/cmd/compile/internal/gc/plive.go @@ -211,7 +211,7 @@ func livenessShouldTrack(nn ir.Node) bool { return false } n := nn.(*ir.Name) - return (n.Class() == ir.PAUTO || n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Type().HasPointers() + return (n.Class_ == ir.PAUTO || n.Class_ == ir.PPARAM || n.Class_ == ir.PPARAMOUT) && n.Type().HasPointers() } // getvariables returns the list of on-stack variables that we need to track @@ -238,7 +238,7 @@ func (lv *Liveness) initcache() { lv.cache.initialized = true for i, node := range lv.vars { - switch node.Class() { + switch node.Class_ { case ir.PPARAM: // A return instruction with a p.to is a tail return, which brings // the stack pointer back up (if it ever went down) and then jumps @@ -494,7 +494,7 @@ func (lv *Liveness) pointerMap(liveout bvec, vars []*ir.Name, args, locals bvec) break } node := vars[i] - switch node.Class() { + switch node.Class_ { case ir.PAUTO: onebitwalktype1(node.Type(), node.FrameOffset()+lv.stkptrsize, locals) @@ -795,7 +795,7 @@ func (lv *Liveness) epilogue() { // don't need to keep the stack copy live? if lv.fn.HasDefer() { for i, n := range lv.vars { - if n.Class() == ir.PPARAMOUT { + if n.Class_ == ir.PPARAMOUT { if n.Name().IsOutputParamHeapAddr() { // Just to be paranoid. Heap addresses are PAUTOs. base.Fatalf("variable %v both output param and heap output param", n) @@ -893,7 +893,7 @@ func (lv *Liveness) epilogue() { if !liveout.Get(int32(i)) { continue } - if n.Class() == ir.PPARAM { + if n.Class_ == ir.PPARAM { continue // ok } base.Fatalf("bad live variable at entry of %v: %L", lv.fn.Nname, n) @@ -926,7 +926,7 @@ func (lv *Liveness) epilogue() { // the only things that can possibly be live are the // input parameters. for j, n := range lv.vars { - if n.Class() != ir.PPARAM && lv.stackMaps[0].Get(int32(j)) { + if n.Class_ != ir.PPARAM && lv.stackMaps[0].Get(int32(j)) { lv.f.Fatalf("%v %L recorded as live on entry", lv.fn.Nname, n) } } @@ -1171,7 +1171,7 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) { // (Nodes without pointers aren't in lv.vars; see livenessShouldTrack.) var maxArgNode *ir.Name for _, n := range lv.vars { - switch n.Class() { + switch n.Class_ { case ir.PPARAM, ir.PPARAMOUT: if maxArgNode == nil || n.FrameOffset() > maxArgNode.FrameOffset() { maxArgNode = n diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go index 4a753328f2470..3aa4ff71fa14e 100644 --- a/src/cmd/compile/internal/gc/range.go +++ b/src/cmd/compile/internal/gc/range.go @@ -27,7 +27,7 @@ func typecheckrange(n *ir.RangeStmt) { // second half of dance, the first half being typecheckrangeExpr n.SetTypecheck(1) - ls := n.List().Slice() + ls := n.Vars.Slice() for i1, n1 := range ls { if n1.Typecheck() == 0 { ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign) @@ -35,19 +35,19 @@ func typecheckrange(n *ir.RangeStmt) { } decldepth++ - typecheckslice(n.Body().Slice(), ctxStmt) + typecheckslice(n.Body.Slice(), ctxStmt) decldepth-- } func typecheckrangeExpr(n *ir.RangeStmt) { - n.SetRight(typecheck(n.Right(), ctxExpr)) + n.X = typecheck(n.X, ctxExpr) - t := n.Right().Type() + t := n.X.Type() if t == nil { return } // delicate little dance. see typecheckas2 - ls := n.List().Slice() + ls := n.Vars.Slice() for i1, n1 := range ls { if !ir.DeclaredBy(n1, n) { ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign) @@ -63,7 +63,7 @@ func typecheckrangeExpr(n *ir.RangeStmt) { toomany := false switch t.Kind() { default: - base.ErrorfAt(n.Pos(), "cannot range over %L", n.Right()) + base.ErrorfAt(n.Pos(), "cannot range over %L", n.X) return case types.TARRAY, types.TSLICE: @@ -76,13 +76,13 @@ func typecheckrangeExpr(n *ir.RangeStmt) { case types.TCHAN: if !t.ChanDir().CanRecv() { - base.ErrorfAt(n.Pos(), "invalid operation: range %v (receive from send-only type %v)", n.Right(), n.Right().Type()) + base.ErrorfAt(n.Pos(), "invalid operation: range %v (receive from send-only type %v)", n.X, n.X.Type()) return } t1 = t.Elem() t2 = nil - if n.List().Len() == 2 { + if n.Vars.Len() == 2 { toomany = true } @@ -91,16 +91,16 @@ func typecheckrangeExpr(n *ir.RangeStmt) { t2 = types.RuneType } - if n.List().Len() > 2 || toomany { + if n.Vars.Len() > 2 || toomany { base.ErrorfAt(n.Pos(), "too many variables in range") } var v1, v2 ir.Node - if n.List().Len() != 0 { - v1 = n.List().First() + if n.Vars.Len() != 0 { + v1 = n.Vars.First() } - if n.List().Len() > 1 { - v2 = n.List().Second() + if n.Vars.Len() > 1 { + v2 = n.Vars.Second() } // this is not only an optimization but also a requirement in the spec. @@ -109,7 +109,7 @@ func typecheckrangeExpr(n *ir.RangeStmt) { // present." if ir.IsBlank(v2) { if v1 != nil { - n.PtrList().Set1(v1) + n.Vars.Set1(v1) } v2 = nil } @@ -159,7 +159,7 @@ func cheapComputableIndex(width int64) bool { // the returned node. func walkrange(nrange *ir.RangeStmt) ir.Node { if isMapClear(nrange) { - m := nrange.Right() + m := nrange.X lno := setlineno(m) n := mapClear(m) base.Pos = lno @@ -168,7 +168,7 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { nfor := ir.NewForStmt(nrange.Pos(), nil, nil, nil, nil) nfor.SetInit(nrange.Init()) - nfor.SetSym(nrange.Sym()) + nfor.Label = nrange.Label // variable name conventions: // ohv1, hv1, hv2: hidden (old) val 1, 2 @@ -179,17 +179,17 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { t := nrange.Type() - a := nrange.Right() + a := nrange.X lno := setlineno(a) var v1, v2 ir.Node - l := nrange.List().Len() + l := nrange.Vars.Len() if l > 0 { - v1 = nrange.List().First() + v1 = nrange.Vars.First() } if l > 1 { - v2 = nrange.List().Second() + v2 = nrange.Vars.Second() } if ir.IsBlank(v2) { @@ -227,8 +227,8 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil)) init = append(init, ir.NewAssignStmt(base.Pos, hn, ir.NewUnaryExpr(base.Pos, ir.OLEN, ha))) - nfor.SetLeft(ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn)) - nfor.SetRight(ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, nodintconst(1)))) + nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn) + nfor.Post = ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, nodintconst(1))) // for range ha { body } if v1 == nil { @@ -249,8 +249,8 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { // Use OAS2 to correctly handle assignments // of the form "v1, a[v1] := range". a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) - a.PtrList().Set2(v1, v2) - a.PtrRlist().Set2(hv1, tmp) + a.Lhs.Set2(v1, v2) + a.Rhs.Set2(hv1, tmp) body = []ir.Node{a} break } @@ -268,7 +268,7 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { // elimination on the index variable (see #20711). // Enhance the prove pass to understand this. ifGuard = ir.NewIfStmt(base.Pos, nil, nil, nil) - ifGuard.SetLeft(ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn)) + ifGuard.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn) nfor.SetOp(ir.OFORUNTIL) hp := temp(types.NewPtr(nrange.Type().Elem())) @@ -279,8 +279,8 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { // Use OAS2 to correctly handle assignments // of the form "v1, a[v1] := range". a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) - a.PtrList().Set2(v1, v2) - a.PtrRlist().Set2(hv1, ir.NewStarExpr(base.Pos, hp)) + a.Lhs.Set2(v1, v2) + a.Rhs.Set2(hv1, ir.NewStarExpr(base.Pos, hp)) body = append(body, a) // Advance pointer as part of the late increment. @@ -289,7 +289,7 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { // advancing the pointer is safe and won't go past the // end of the allocation. as := ir.NewAssignStmt(base.Pos, hp, addptr(hp, t.Elem().Width)) - nfor.PtrList().Set1(typecheck(as, ctxStmt)) + nfor.Late.Set1(typecheck(as, ctxStmt)) case types.TMAP: // order.stmt allocated the iterator for us. @@ -305,11 +305,11 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { fn = substArgTypes(fn, t.Key(), t.Elem(), th) init = append(init, mkcall1(fn, nil, nil, typename(t), ha, nodAddr(hit))) - nfor.SetLeft(ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym), nodnil())) + nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym), nodnil()) fn = syslook("mapiternext") fn = substArgTypes(fn, th) - nfor.SetRight(mkcall1(fn, nil, nil, nodAddr(hit))) + nfor.Post = mkcall1(fn, nil, nil, nodAddr(hit)) key := ir.NewStarExpr(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym)) if v1 == nil { @@ -319,8 +319,8 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { } else { elem := ir.NewStarExpr(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, elemsym)) a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) - a.PtrList().Set2(v1, v2) - a.PtrRlist().Set2(key, elem) + a.Lhs.Set2(v1, v2) + a.Rhs.Set2(key, elem) body = []ir.Node{a} } @@ -335,12 +335,12 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { } hb := temp(types.Types[types.TBOOL]) - nfor.SetLeft(ir.NewBinaryExpr(base.Pos, ir.ONE, hb, nodbool(false))) + nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, hb, nodbool(false)) a := ir.NewAssignListStmt(base.Pos, ir.OAS2RECV, nil, nil) a.SetTypecheck(1) - a.PtrList().Set2(hv1, hb) - a.PtrRlist().Set1(ir.NewUnaryExpr(base.Pos, ir.ORECV, ha)) - nfor.Left().PtrInit().Set1(a) + a.Lhs.Set2(hv1, hb) + a.Rhs.Set1(ir.NewUnaryExpr(base.Pos, ir.ORECV, ha)) + nfor.Cond.PtrInit().Set1(a) if v1 == nil { body = nil } else { @@ -378,7 +378,7 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil)) // hv1 < len(ha) - nfor.SetLeft(ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, ir.NewUnaryExpr(base.Pos, ir.OLEN, ha))) + nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, ir.NewUnaryExpr(base.Pos, ir.OLEN, ha)) if v1 != nil { // hv1t = hv1 @@ -392,19 +392,19 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { // if hv2 < utf8.RuneSelf nif := ir.NewIfStmt(base.Pos, nil, nil, nil) - nif.SetLeft(ir.NewBinaryExpr(base.Pos, ir.OLT, hv2, nodintconst(utf8.RuneSelf))) + nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv2, nodintconst(utf8.RuneSelf)) // hv1++ - nif.PtrBody().Set1(ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, nodintconst(1)))) + nif.Body.Set1(ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, nodintconst(1)))) // } else { eif := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) - nif.PtrRlist().Set1(eif) + nif.Else.Set1(eif) // hv2, hv1 = decoderune(ha, hv1) - eif.PtrList().Set2(hv2, hv1) + eif.Lhs.Set2(hv2, hv1) fn := syslook("decoderune") - eif.PtrRlist().Set1(mkcall1(fn, fn.Type().Results(), nil, ha, hv1)) + eif.Rhs.Set1(mkcall1(fn, fn.Type().Results(), nil, ha, hv1)) body = append(body, nif) @@ -412,8 +412,8 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { if v2 != nil { // v1, v2 = hv1t, hv2 a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) - a.PtrList().Set2(v1, v2) - a.PtrRlist().Set2(hv1t, hv2) + a.Lhs.Set2(v1, v2) + a.Rhs.Set2(hv1t, hv2) body = append(body, a) } else { // v1 = hv1t @@ -431,18 +431,18 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { nfor.PtrInit().Append(init...) } - typecheckslice(nfor.Left().Init().Slice(), ctxStmt) + typecheckslice(nfor.Cond.Init().Slice(), ctxStmt) - nfor.SetLeft(typecheck(nfor.Left(), ctxExpr)) - nfor.SetLeft(defaultlit(nfor.Left(), nil)) - nfor.SetRight(typecheck(nfor.Right(), ctxStmt)) + nfor.Cond = typecheck(nfor.Cond, ctxExpr) + nfor.Cond = defaultlit(nfor.Cond, nil) + nfor.Post = typecheck(nfor.Post, ctxStmt) typecheckslice(body, ctxStmt) - nfor.PtrBody().Append(body...) - nfor.PtrBody().Append(nrange.Body().Slice()...) + nfor.Body.Append(body...) + nfor.Body.Append(nrange.Body.Slice()...) var n ir.Node = nfor if ifGuard != nil { - ifGuard.PtrBody().Set1(n) + ifGuard.Body.Set1(n) n = ifGuard } @@ -464,11 +464,11 @@ func isMapClear(n *ir.RangeStmt) bool { return false } - if n.Op() != ir.ORANGE || n.Type().Kind() != types.TMAP || n.List().Len() != 1 { + if n.Op() != ir.ORANGE || n.Type().Kind() != types.TMAP || n.Vars.Len() != 1 { return false } - k := n.List().First() + k := n.Vars.First() if k == nil || ir.IsBlank(k) { return false } @@ -478,17 +478,17 @@ func isMapClear(n *ir.RangeStmt) bool { return false } - if n.Body().Len() != 1 { + if n.Body.Len() != 1 { return false } - stmt := n.Body().First() // only stmt in body + stmt := n.Body.First() // only stmt in body if stmt == nil || stmt.Op() != ir.ODELETE { return false } - m := n.Right() - if delete := stmt.(*ir.CallExpr); !samesafeexpr(delete.List().First(), m) || !samesafeexpr(delete.List().Second(), k) { + m := n.X + if delete := stmt.(*ir.CallExpr); !samesafeexpr(delete.Args.First(), m) || !samesafeexpr(delete.Args.Second(), k) { return false } @@ -531,26 +531,26 @@ func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node { return nil } - if loop.Body().Len() != 1 || loop.Body().First() == nil { + if loop.Body.Len() != 1 || loop.Body.First() == nil { return nil } - stmt1 := loop.Body().First() // only stmt in body + stmt1 := loop.Body.First() // only stmt in body if stmt1.Op() != ir.OAS { return nil } stmt := stmt1.(*ir.AssignStmt) - if stmt.Left().Op() != ir.OINDEX { + if stmt.X.Op() != ir.OINDEX { return nil } - lhs := stmt.Left().(*ir.IndexExpr) + lhs := stmt.X.(*ir.IndexExpr) - if !samesafeexpr(lhs.Left(), a) || !samesafeexpr(lhs.Right(), v1) { + if !samesafeexpr(lhs.X, a) || !samesafeexpr(lhs.Index, v1) { return nil } elemsize := loop.Type().Elem().Width - if elemsize <= 0 || !isZero(stmt.Right()) { + if elemsize <= 0 || !isZero(stmt.Y) { return nil } @@ -562,8 +562,8 @@ func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node { // i = len(a) - 1 // } n := ir.NewIfStmt(base.Pos, nil, nil, nil) - n.PtrBody().Set(nil) - n.SetLeft(ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), nodintconst(0))) + n.Body.Set(nil) + n.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), nodintconst(0)) // hp = &a[0] hp := temp(types.Types[types.TUNSAFEPTR]) @@ -571,12 +571,12 @@ func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node { ix := ir.NewIndexExpr(base.Pos, a, nodintconst(0)) ix.SetBounded(true) addr := convnop(nodAddr(ix), types.Types[types.TUNSAFEPTR]) - n.PtrBody().Append(ir.NewAssignStmt(base.Pos, hp, addr)) + n.Body.Append(ir.NewAssignStmt(base.Pos, hp, addr)) // hn = len(a) * sizeof(elem(a)) hn := temp(types.Types[types.TUINTPTR]) mul := conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), nodintconst(elemsize)), types.Types[types.TUINTPTR]) - n.PtrBody().Append(ir.NewAssignStmt(base.Pos, hn, mul)) + n.Body.Append(ir.NewAssignStmt(base.Pos, hn, mul)) var fn ir.Node if a.Type().Elem().HasPointers() { @@ -588,16 +588,16 @@ func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node { fn = mkcall("memclrNoHeapPointers", nil, nil, hp, hn) } - n.PtrBody().Append(fn) + n.Body.Append(fn) // i = len(a) - 1 v1 = ir.NewAssignStmt(base.Pos, v1, ir.NewBinaryExpr(base.Pos, ir.OSUB, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), nodintconst(1))) - n.PtrBody().Append(v1) + n.Body.Append(v1) - n.SetLeft(typecheck(n.Left(), ctxExpr)) - n.SetLeft(defaultlit(n.Left(), nil)) - typecheckslice(n.Body().Slice(), ctxStmt) + n.Cond = typecheck(n.Cond, ctxExpr) + n.Cond = defaultlit(n.Cond, nil) + typecheckslice(n.Body.Slice(), ctxStmt) return walkstmt(n) } diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 92b04f20d5039..07552e64b4623 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -994,7 +994,7 @@ func typename(t *types.Type) *ir.AddrExpr { if s.Def == nil { n := ir.NewNameAt(src.NoXPos, s) n.SetType(types.Types[types.TUINT8]) - n.SetClass(ir.PEXTERN) + n.Class_ = ir.PEXTERN n.SetTypecheck(1) s.Def = n } @@ -1013,7 +1013,7 @@ func itabname(t, itype *types.Type) *ir.AddrExpr { if s.Def == nil { n := NewName(s) n.SetType(types.Types[types.TUINT8]) - n.SetClass(ir.PEXTERN) + n.Class_ = ir.PEXTERN n.SetTypecheck(1) s.Def = n itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: s.Linksym()}) @@ -1875,7 +1875,7 @@ func zeroaddr(size int64) ir.Node { if s.Def == nil { x := NewName(s) x.SetType(types.Types[types.TUINT8]) - x.SetClass(ir.PEXTERN) + x.Class_ = ir.PEXTERN x.SetTypecheck(1) s.Def = x } diff --git a/src/cmd/compile/internal/gc/scc.go b/src/cmd/compile/internal/gc/scc.go index f2d089fa4c7a4..a5a64809582e8 100644 --- a/src/cmd/compile/internal/gc/scc.go +++ b/src/cmd/compile/internal/gc/scc.go @@ -58,7 +58,7 @@ func visitBottomUp(list []ir.Node, analyze func(list []*ir.Func, recursive bool) for _, n := range list { if n.Op() == ir.ODCLFUNC { n := n.(*ir.Func) - if !n.Func().IsHiddenClosure() { + if !n.IsHiddenClosure() { v.visit(n) } } @@ -82,7 +82,7 @@ func (v *bottomUpVisitor) visit(n *ir.Func) uint32 { switch n.Op() { case ir.ONAME: n := n.(*ir.Name) - if n.Class() == ir.PFUNC { + if n.Class_ == ir.PFUNC { if n != nil && n.Name().Defn != nil { if m := v.visit(n.Name().Defn.(*ir.Func)); m < min { min = m @@ -100,7 +100,7 @@ func (v *bottomUpVisitor) visit(n *ir.Func) uint32 { case ir.ODOTMETH: n := n.(*ir.SelectorExpr) fn := methodExprName(n) - if fn != nil && fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC && fn.Defn != nil { + if fn != nil && fn.Op() == ir.ONAME && fn.Class_ == ir.PFUNC && fn.Defn != nil { if m := v.visit(fn.Defn.(*ir.Func)); m < min { min = m } @@ -109,7 +109,7 @@ func (v *bottomUpVisitor) visit(n *ir.Func) uint32 { n := n.(*ir.CallPartExpr) fn := ir.AsNode(callpartMethod(n).Nname) if fn != nil && fn.Op() == ir.ONAME { - if fn := fn.(*ir.Name); fn.Class() == ir.PFUNC && fn.Name().Defn != nil { + if fn := fn.(*ir.Name); fn.Class_ == ir.PFUNC && fn.Name().Defn != nil { if m := v.visit(fn.Name().Defn.(*ir.Func)); m < min { min = m } @@ -117,7 +117,7 @@ func (v *bottomUpVisitor) visit(n *ir.Func) uint32 { } case ir.OCLOSURE: n := n.(*ir.ClosureExpr) - if m := v.visit(n.Func()); m < min { + if m := v.visit(n.Func); m < min { min = m } } diff --git a/src/cmd/compile/internal/gc/scope.go b/src/cmd/compile/internal/gc/scope.go index 8dd44b1dd4513..9ab33583c89ce 100644 --- a/src/cmd/compile/internal/gc/scope.go +++ b/src/cmd/compile/internal/gc/scope.go @@ -30,13 +30,13 @@ func findScope(marks []ir.Mark, pos src.XPos) ir.ScopeID { func assembleScopes(fnsym *obj.LSym, fn *ir.Func, dwarfVars []*dwarf.Var, varScopes []ir.ScopeID) []dwarf.Scope { // Initialize the DWARF scope tree based on lexical scopes. - dwarfScopes := make([]dwarf.Scope, 1+len(fn.Func().Parents)) - for i, parent := range fn.Func().Parents { + dwarfScopes := make([]dwarf.Scope, 1+len(fn.Parents)) + for i, parent := range fn.Parents { dwarfScopes[i+1].Parent = int32(parent) } scopeVariables(dwarfVars, varScopes, dwarfScopes) - scopePCs(fnsym, fn.Func().Marks, dwarfScopes) + scopePCs(fnsym, fn.Marks, dwarfScopes) return compactScopes(dwarfScopes) } diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go index 64d3461dca4ae..5c69be7e06ecd 100644 --- a/src/cmd/compile/internal/gc/select.go +++ b/src/cmd/compile/internal/gc/select.go @@ -15,30 +15,30 @@ func typecheckselect(sel *ir.SelectStmt) { var def ir.Node lno := setlineno(sel) typecheckslice(sel.Init().Slice(), ctxStmt) - for _, ncase := range sel.List().Slice() { + for _, ncase := range sel.Cases.Slice() { ncase := ncase.(*ir.CaseStmt) - if ncase.List().Len() == 0 { + if ncase.List.Len() == 0 { // default if def != nil { base.ErrorfAt(ncase.Pos(), "multiple defaults in select (first at %v)", ir.Line(def)) } else { def = ncase } - } else if ncase.List().Len() > 1 { + } else if ncase.List.Len() > 1 { base.ErrorfAt(ncase.Pos(), "select cases cannot be lists") } else { - ncase.List().SetFirst(typecheck(ncase.List().First(), ctxStmt)) - n := ncase.List().First() - ncase.SetLeft(n) - ncase.PtrList().Set(nil) + ncase.List.SetFirst(typecheck(ncase.List.First(), ctxStmt)) + n := ncase.List.First() + ncase.Comm = n + ncase.List.Set(nil) oselrecv2 := func(dst, recv ir.Node, colas bool) { n := ir.NewAssignListStmt(n.Pos(), ir.OSELRECV2, nil, nil) - n.PtrList().Set2(dst, ir.BlankNode) - n.PtrRlist().Set1(recv) - n.SetColas(colas) + n.Lhs.Set2(dst, ir.BlankNode) + n.Rhs.Set1(recv) + n.Def = colas n.SetTypecheck(1) - ncase.SetLeft(n) + ncase.Comm = n } switch n.Op() { default: @@ -57,21 +57,21 @@ func typecheckselect(sel *ir.SelectStmt) { // remove implicit conversions; the eventual assignment // will reintroduce them. n := n.(*ir.AssignStmt) - if r := n.Right(); r.Op() == ir.OCONVNOP || r.Op() == ir.OCONVIFACE { + if r := n.Y; r.Op() == ir.OCONVNOP || r.Op() == ir.OCONVIFACE { r := r.(*ir.ConvExpr) if r.Implicit() { - n.SetRight(r.Left()) + n.Y = r.X } } - if n.Right().Op() != ir.ORECV { + if n.Y.Op() != ir.ORECV { base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side") break } - oselrecv2(n.Left(), n.Right(), n.Colas()) + oselrecv2(n.X, n.Y, n.Def) case ir.OAS2RECV: n := n.(*ir.AssignListStmt) - if n.Rlist().First().Op() != ir.ORECV { + if n.Rhs.First().Op() != ir.ORECV { base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side") break } @@ -87,7 +87,7 @@ func typecheckselect(sel *ir.SelectStmt) { } } - typecheckslice(ncase.Body().Slice(), ctxStmt) + typecheckslice(ncase.Body.Slice(), ctxStmt) } base.Pos = lno @@ -95,18 +95,18 @@ func typecheckselect(sel *ir.SelectStmt) { func walkselect(sel *ir.SelectStmt) { lno := setlineno(sel) - if sel.Body().Len() != 0 { + if sel.Compiled.Len() != 0 { base.Fatalf("double walkselect") } init := sel.Init().Slice() sel.PtrInit().Set(nil) - init = append(init, walkselectcases(sel.List())...) - sel.SetList(ir.Nodes{}) + init = append(init, walkselectcases(sel.Cases)...) + sel.Cases = ir.Nodes{} - sel.PtrBody().Set(init) - walkstmtlist(sel.Body().Slice()) + sel.Compiled.Set(init) + walkstmtlist(sel.Compiled.Slice()) base.Pos = lno } @@ -125,8 +125,8 @@ func walkselectcases(cases ir.Nodes) []ir.Node { cas := cases.First().(*ir.CaseStmt) setlineno(cas) l := cas.Init().Slice() - if cas.Left() != nil { // not default: - n := cas.Left() + if cas.Comm != nil { // not default: + n := cas.Comm l = append(l, n.Init().Slice()...) n.PtrInit().Set(nil) switch n.Op() { @@ -138,8 +138,8 @@ func walkselectcases(cases ir.Nodes) []ir.Node { case ir.OSELRECV2: r := n.(*ir.AssignListStmt) - if ir.IsBlank(r.List().First()) && ir.IsBlank(r.List().Second()) { - n = r.Rlist().First() + if ir.IsBlank(r.Lhs.First()) && ir.IsBlank(r.Lhs.Second()) { + n = r.Rhs.First() break } r.SetOp(ir.OAS2RECV) @@ -148,7 +148,7 @@ func walkselectcases(cases ir.Nodes) []ir.Node { l = append(l, n) } - l = append(l, cas.Body().Slice()...) + l = append(l, cas.Body.Slice()...) l = append(l, ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)) return l } @@ -159,7 +159,7 @@ func walkselectcases(cases ir.Nodes) []ir.Node { for _, cas := range cases.Slice() { cas := cas.(*ir.CaseStmt) setlineno(cas) - n := cas.Left() + n := cas.Comm if n == nil { dflt = cas continue @@ -167,14 +167,14 @@ func walkselectcases(cases ir.Nodes) []ir.Node { switch n.Op() { case ir.OSEND: n := n.(*ir.SendStmt) - n.SetRight(nodAddr(n.Right())) - n.SetRight(typecheck(n.Right(), ctxExpr)) + n.Value = nodAddr(n.Value) + n.Value = typecheck(n.Value, ctxExpr) case ir.OSELRECV2: n := n.(*ir.AssignListStmt) - if !ir.IsBlank(n.List().First()) { - n.List().SetIndex(0, nodAddr(n.List().First())) - n.List().SetIndex(0, typecheck(n.List().First(), ctxExpr)) + if !ir.IsBlank(n.Lhs.First()) { + n.Lhs.SetIndex(0, nodAddr(n.Lhs.First())) + n.Lhs.SetIndex(0, typecheck(n.Lhs.First(), ctxExpr)) } } } @@ -186,7 +186,7 @@ func walkselectcases(cases ir.Nodes) []ir.Node { cas = cases.Second().(*ir.CaseStmt) } - n := cas.Left() + n := cas.Comm setlineno(n) r := ir.NewIfStmt(base.Pos, nil, nil, nil) r.PtrInit().Set(cas.Init().Slice()) @@ -198,31 +198,31 @@ func walkselectcases(cases ir.Nodes) []ir.Node { case ir.OSEND: // if selectnbsend(c, v) { body } else { default body } n := n.(*ir.SendStmt) - ch := n.Left() - call = mkcall1(chanfn("selectnbsend", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), ch, n.Right()) + ch := n.Chan + call = mkcall1(chanfn("selectnbsend", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), ch, n.Value) case ir.OSELRECV2: n := n.(*ir.AssignListStmt) - recv := n.Rlist().First().(*ir.UnaryExpr) - ch := recv.Left() - elem := n.List().First() + recv := n.Rhs.First().(*ir.UnaryExpr) + ch := recv.X + elem := n.Lhs.First() if ir.IsBlank(elem) { elem = nodnil() } - if ir.IsBlank(n.List().Second()) { + if ir.IsBlank(n.Lhs.Second()) { // if selectnbrecv(&v, c) { body } else { default body } call = mkcall1(chanfn("selectnbrecv", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, ch) } else { // TODO(cuonglm): make this use selectnbrecv() // if selectnbrecv2(&v, &received, c) { body } else { default body } - receivedp := typecheck(nodAddr(n.List().Second()), ctxExpr) + receivedp := typecheck(nodAddr(n.Lhs.Second()), ctxExpr) call = mkcall1(chanfn("selectnbrecv2", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, receivedp, ch) } } - r.SetLeft(typecheck(call, ctxExpr)) - r.PtrBody().Set(cas.Body().Slice()) - r.PtrRlist().Set(append(dflt.Init().Slice(), dflt.Body().Slice()...)) + r.Cond = typecheck(call, ctxExpr) + r.Body.Set(cas.Body.Slice()) + r.Else.Set(append(dflt.Init().Slice(), dflt.Body.Slice()...)) return []ir.Node{r, ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)} } @@ -258,7 +258,7 @@ func walkselectcases(cases ir.Nodes) []ir.Node { init = append(init, cas.Init().Slice()...) cas.PtrInit().Set(nil) - n := cas.Left() + n := cas.Comm if n == nil { // default: continue } @@ -272,15 +272,15 @@ func walkselectcases(cases ir.Nodes) []ir.Node { n := n.(*ir.SendStmt) i = nsends nsends++ - c = n.Left() - elem = n.Right() + c = n.Chan + elem = n.Value case ir.OSELRECV2: n := n.(*ir.AssignListStmt) nrecvs++ i = ncas - nrecvs - recv := n.Rlist().First().(*ir.UnaryExpr) - c = recv.Left() - elem = n.List().First() + recv := n.Rhs.First().(*ir.UnaryExpr) + c = recv.X + elem = n.Lhs.First() } casorder[i] = cas @@ -313,9 +313,9 @@ func walkselectcases(cases ir.Nodes) []ir.Node { chosen := temp(types.Types[types.TINT]) recvOK := temp(types.Types[types.TBOOL]) r := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) - r.PtrList().Set2(chosen, recvOK) + r.Lhs.Set2(chosen, recvOK) fn := syslook("selectgo") - r.PtrRlist().Set1(mkcall1(fn, fn.Type().Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, nodintconst(int64(nsends)), nodintconst(int64(nrecvs)), nodbool(dflt == nil))) + r.Rhs.Set1(mkcall1(fn, fn.Type().Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, nodintconst(int64(nsends)), nodintconst(int64(nrecvs)), nodbool(dflt == nil))) init = append(init, typecheck(r, ctxStmt)) // selv and order are no longer alive after selectgo. @@ -332,16 +332,16 @@ func walkselectcases(cases ir.Nodes) []ir.Node { r := ir.NewIfStmt(base.Pos, cond, nil, nil) - if n := cas.Left(); n != nil && n.Op() == ir.OSELRECV2 { + if n := cas.Comm; n != nil && n.Op() == ir.OSELRECV2 { n := n.(*ir.AssignListStmt) - if !ir.IsBlank(n.List().Second()) { - x := ir.NewAssignStmt(base.Pos, n.List().Second(), recvOK) - r.PtrBody().Append(typecheck(x, ctxStmt)) + if !ir.IsBlank(n.Lhs.Second()) { + x := ir.NewAssignStmt(base.Pos, n.Lhs.Second(), recvOK) + r.Body.Append(typecheck(x, ctxStmt)) } } - r.PtrBody().AppendNodes(cas.PtrBody()) - r.PtrBody().Append(ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)) + r.Body.AppendNodes(&cas.Body) + r.Body.Append(ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)) init = append(init, r) } diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index f4988df9ac48a..0fc19a69894b5 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -61,25 +61,25 @@ func (s *InitSchedule) tryStaticInit(nn ir.Node) bool { return false } n := nn.(*ir.AssignStmt) - if ir.IsBlank(n.Left()) && !anySideEffects(n.Right()) { + if ir.IsBlank(n.X) && !anySideEffects(n.Y) { // Discard. return true } lno := setlineno(n) defer func() { base.Pos = lno }() - nam := n.Left().(*ir.Name) - return s.staticassign(nam, 0, n.Right(), nam.Type()) + nam := n.X.(*ir.Name) + return s.staticassign(nam, 0, n.Y, nam.Type()) } // like staticassign but we are copying an already // initialized value r. func (s *InitSchedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *types.Type) bool { - if rn.Class() == ir.PFUNC { + if rn.Class_ == ir.PFUNC { // TODO if roff != 0 { panic } pfuncsym(l, loff, rn) return true } - if rn.Class() != ir.PEXTERN || rn.Sym().Pkg != types.LocalPkg { + if rn.Class_ != ir.PEXTERN || rn.Sym().Pkg != types.LocalPkg { return false } if rn.Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value @@ -92,10 +92,10 @@ func (s *InitSchedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *type return false } orig := rn - r := rn.Defn.(*ir.AssignStmt).Right() + r := rn.Defn.(*ir.AssignStmt).Y for r.Op() == ir.OCONVNOP && !types.Identical(r.Type(), typ) { - r = r.(*ir.ConvExpr).Left() + r = r.(*ir.ConvExpr).X } switch r.Op() { @@ -128,7 +128,7 @@ func (s *InitSchedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *type case ir.OADDR: r := r.(*ir.AddrExpr) - if a := r.Left(); a.Op() == ir.ONAME { + if a := r.X; a.Op() == ir.ONAME { a := a.(*ir.Name) addrsym(l, loff, a, 0) return true @@ -136,7 +136,7 @@ func (s *InitSchedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *type case ir.OPTRLIT: r := r.(*ir.AddrExpr) - switch r.Left().Op() { + switch r.X.Op() { case ir.OARRAYLIT, ir.OSLICELIT, ir.OSTRUCTLIT, ir.OMAPLIT: // copy pointer addrsym(l, loff, s.inittemps[r], 0) @@ -182,7 +182,7 @@ func (s *InitSchedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *type func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *types.Type) bool { for r.Op() == ir.OCONVNOP { - r = r.(*ir.ConvExpr).Left() + r = r.(*ir.ConvExpr).X } switch r.Op() { @@ -206,7 +206,7 @@ func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *type case ir.OADDR: r := r.(*ir.AddrExpr) - if name, offset, ok := stataddr(r.Left()); ok { + if name, offset, ok := stataddr(r.X); ok { addrsym(l, loff, name, offset) return true } @@ -214,17 +214,17 @@ func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *type case ir.OPTRLIT: r := r.(*ir.AddrExpr) - switch r.Left().Op() { + switch r.X.Op() { case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT: // Init pointer. - a := staticname(r.Left().Type()) + a := staticname(r.X.Type()) s.inittemps[r] = a addrsym(l, loff, a, 0) // Init underlying literal. - if !s.staticassign(a, 0, r.Left(), a.Type()) { - s.append(ir.NewAssignStmt(base.Pos, a, r.Left())) + if !s.staticassign(a, 0, r.X, a.Type()) { + s.append(ir.NewAssignStmt(base.Pos, a, r.X)) } return true } @@ -232,8 +232,8 @@ func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *type case ir.OSTR2BYTES: r := r.(*ir.ConvExpr) - if l.Class() == ir.PEXTERN && r.Left().Op() == ir.OLITERAL { - sval := ir.StringVal(r.Left()) + if l.Class_ == ir.PEXTERN && r.X.Op() == ir.OLITERAL { + sval := ir.StringVal(r.X) slicebytes(l, loff, sval) return true } @@ -284,7 +284,7 @@ func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *type // Closures with no captured variables are globals, // so the assignment can be done at link time. // TODO if roff != 0 { panic } - pfuncsym(l, loff, r.Func().Nname) + pfuncsym(l, loff, r.Func.Nname) return true } closuredebugruntimecheck(r) @@ -297,7 +297,7 @@ func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *type r := r.(*ir.ConvExpr) val := ir.Node(r) for val.Op() == ir.OCONVIFACE { - val = val.(*ir.ConvExpr).Left() + val = val.(*ir.ConvExpr).X } if val.Type().IsInterface() { @@ -321,7 +321,7 @@ func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *type // Create a copy of l to modify while we emit data. // Emit itab, advance offset. - addrsym(l, loff, itab.Left().(*ir.Name), 0) + addrsym(l, loff, itab.X.(*ir.Name), 0) // Emit data. if isdirectiface(val.Type()) { @@ -409,7 +409,7 @@ func isSimpleName(nn ir.Node) bool { return false } n := nn.(*ir.Name) - return n.Class() != ir.PAUTOHEAP && n.Class() != ir.PEXTERN + return n.Class_ != ir.PAUTOHEAP && n.Class_ != ir.PEXTERN } func litas(l ir.Node, r ir.Node, init *ir.Nodes) { @@ -439,7 +439,7 @@ func getdyn(n ir.Node, top bool) initGenType { if !top { return initDynamic } - if n.Len/4 > int64(n.List().Len()) { + if n.Len/4 > int64(n.List.Len()) { // <25% of entries have explicit values. // Very rough estimation, it takes 4 bytes of instructions // to initialize 1 byte of result. So don't use a static @@ -454,12 +454,12 @@ func getdyn(n ir.Node, top bool) initGenType { lit := n.(*ir.CompLitExpr) var mode initGenType - for _, n1 := range lit.List().Slice() { + for _, n1 := range lit.List.Slice() { switch n1.Op() { case ir.OKEY: - n1 = n1.(*ir.KeyExpr).Right() + n1 = n1.(*ir.KeyExpr).Value case ir.OSTRUCTKEY: - n1 = n1.(*ir.StructKeyExpr).Left() + n1 = n1.(*ir.StructKeyExpr).Value } mode |= getdyn(n1, false) if mode == initDynamic|initConst { @@ -476,9 +476,9 @@ func isStaticCompositeLiteral(n ir.Node) bool { return false case ir.OARRAYLIT: n := n.(*ir.CompLitExpr) - for _, r := range n.List().Slice() { + for _, r := range n.List.Slice() { if r.Op() == ir.OKEY { - r = r.(*ir.KeyExpr).Right() + r = r.(*ir.KeyExpr).Value } if !isStaticCompositeLiteral(r) { return false @@ -487,9 +487,9 @@ func isStaticCompositeLiteral(n ir.Node) bool { return true case ir.OSTRUCTLIT: n := n.(*ir.CompLitExpr) - for _, r := range n.List().Slice() { + for _, r := range n.List.Slice() { r := r.(*ir.StructKeyExpr) - if !isStaticCompositeLiteral(r.Left()) { + if !isStaticCompositeLiteral(r.Value) { return false } } @@ -501,7 +501,7 @@ func isStaticCompositeLiteral(n ir.Node) bool { n := n.(*ir.ConvExpr) val := ir.Node(n) for val.Op() == ir.OCONVIFACE { - val = val.(*ir.ConvExpr).Left() + val = val.(*ir.ConvExpr).X } if val.Type().IsInterface() { return val.Op() == ir.ONIL @@ -542,11 +542,11 @@ func fixedlit(ctxt initContext, kind initKind, n *ir.CompLitExpr, var_ ir.Node, splitnode = func(r ir.Node) (ir.Node, ir.Node) { if r.Op() == ir.OKEY { kv := r.(*ir.KeyExpr) - k = indexconst(kv.Left()) + k = indexconst(kv.Key) if k < 0 { - base.Fatalf("fixedlit: invalid index %v", kv.Left()) + base.Fatalf("fixedlit: invalid index %v", kv.Key) } - r = kv.Right() + r = kv.Value } a := ir.NewIndexExpr(base.Pos, var_, nodintconst(k)) k++ @@ -558,17 +558,17 @@ func fixedlit(ctxt initContext, kind initKind, n *ir.CompLitExpr, var_ ir.Node, case ir.OSTRUCTLIT: splitnode = func(rn ir.Node) (ir.Node, ir.Node) { r := rn.(*ir.StructKeyExpr) - if r.Sym().IsBlank() || isBlank { - return ir.BlankNode, r.Left() + if r.Field.IsBlank() || isBlank { + return ir.BlankNode, r.Value } setlineno(r) - return ir.NewSelectorExpr(base.Pos, ir.ODOT, var_, r.Sym()), r.Left() + return ir.NewSelectorExpr(base.Pos, ir.ODOT, var_, r.Field), r.Value } default: base.Fatalf("fixedlit bad op: %v", n.Op()) } - for _, r := range n.List().Slice() { + for _, r := range n.List.Slice() { a, value := splitnode(r) if a == ir.BlankNode && !anySideEffects(value) { // Discard. @@ -635,7 +635,7 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) // copy static to slice var_ = typecheck(var_, ctxExpr|ctxAssign) name, offset, ok := stataddr(var_) - if !ok || name.Class() != ir.PEXTERN { + if !ok || name.Class_ != ir.PEXTERN { base.Fatalf("slicelit: %v", var_) } slicesym(name, offset, vstat, t.NumElem()) @@ -703,7 +703,7 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) a = ir.NewAssignStmt(base.Pos, temp(t), nil) a = typecheck(a, ctxStmt) init.Append(a) // zero new temp - a = a.(*ir.AssignStmt).Left() + a = a.(*ir.AssignStmt).X } else { init.Append(ir.NewUnaryExpr(base.Pos, ir.OVARDEF, a)) } @@ -722,14 +722,14 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) // put dynamics into array (5) var index int64 - for _, value := range n.List().Slice() { + for _, value := range n.List.Slice() { if value.Op() == ir.OKEY { kv := value.(*ir.KeyExpr) - index = indexconst(kv.Left()) + index = indexconst(kv.Key) if index < 0 { - base.Fatalf("slicelit: invalid index %v", kv.Left()) + base.Fatalf("slicelit: invalid index %v", kv.Key) } - value = kv.Right() + value = kv.Value } a := ir.NewIndexExpr(base.Pos, vauto, nodintconst(index)) a.SetBounded(true) @@ -778,16 +778,16 @@ func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) { // make the map var a := ir.NewCallExpr(base.Pos, ir.OMAKE, nil, nil) a.SetEsc(n.Esc()) - a.PtrList().Set2(ir.TypeNode(n.Type()), nodintconst(int64(n.List().Len()))) + a.Args.Set2(ir.TypeNode(n.Type()), nodintconst(int64(n.List.Len()))) litas(m, a, init) - entries := n.List().Slice() + entries := n.List.Slice() // The order pass already removed any dynamic (runtime-computed) entries. // All remaining entries are static. Double-check that. for _, r := range entries { r := r.(*ir.KeyExpr) - if !isStaticCompositeLiteral(r.Left()) || !isStaticCompositeLiteral(r.Right()) { + if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) { base.Fatalf("maplit: entry is not a literal: %v", r) } } @@ -813,8 +813,8 @@ func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) { datae := ir.NewCompLitExpr(base.Pos, ir.OARRAYLIT, nil, nil) for _, r := range entries { r := r.(*ir.KeyExpr) - datak.PtrList().Append(r.Left()) - datae.PtrList().Append(r.Right()) + datak.List.Append(r.Key) + datae.List.Append(r.Value) } fixedlit(inInitFunction, initKindStatic, datak, vstatk, init) fixedlit(inInitFunction, initKindStatic, datae, vstate, init) @@ -837,7 +837,7 @@ func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) { body := ir.NewAssignStmt(base.Pos, lhs, rhs) loop := ir.NewForStmt(base.Pos, nil, cond, incr, nil) - loop.PtrBody().Set1(body) + loop.Body.Set1(body) loop.PtrInit().Set1(zero) appendWalkStmt(init, loop) @@ -853,7 +853,7 @@ func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) { for _, r := range entries { r := r.(*ir.KeyExpr) - index, elem := r.Left(), r.Right() + index, elem := r.Key, r.Value setlineno(index) appendWalkStmt(init, ir.NewAssignStmt(base.Pos, tmpkey, index)) @@ -890,19 +890,19 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { } var r ir.Node - if n.Right() != nil { + if n.Alloc != nil { // n.Right is stack temporary used as backing store. - appendWalkStmt(init, ir.NewAssignStmt(base.Pos, n.Right(), nil)) // zero backing store, just in case (#18410) - r = nodAddr(n.Right()) + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, n.Alloc, nil)) // zero backing store, just in case (#18410) + r = nodAddr(n.Alloc) } else { - r = ir.NewUnaryExpr(base.Pos, ir.ONEW, ir.TypeNode(n.Left().Type())) + r = ir.NewUnaryExpr(base.Pos, ir.ONEW, ir.TypeNode(n.X.Type())) r.SetEsc(n.Esc()) } appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, r)) var_ = ir.NewStarExpr(base.Pos, var_) var_ = typecheck(var_, ctxExpr|ctxAssign) - anylit(n.Left(), var_, init) + anylit(n.X, var_, init) case ir.OSTRUCTLIT, ir.OARRAYLIT: n := n.(*ir.CompLitExpr) @@ -910,7 +910,7 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { base.Fatalf("anylit: not struct/array") } - if isSimpleName(var_) && n.List().Len() > 4 { + if isSimpleName(var_) && n.List.Len() > 4 { // lay out static data vstat := readonlystaticname(t) @@ -935,7 +935,7 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { components = int64(t.NumFields()) } // initialization of an array or struct with unspecified components (missing fields or arrays) - if isSimpleName(var_) || int64(n.List().Len()) < components { + if isSimpleName(var_) || int64(n.List.Len()) < components { appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, nil)) } @@ -958,34 +958,34 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { // It returns true if n's effects have been added to init, // in which case n should be dropped from the program by the caller. func oaslit(n *ir.AssignStmt, init *ir.Nodes) bool { - if n.Left() == nil || n.Right() == nil { + if n.X == nil || n.Y == nil { // not a special composite literal assignment return false } - if n.Left().Type() == nil || n.Right().Type() == nil { + if n.X.Type() == nil || n.Y.Type() == nil { // not a special composite literal assignment return false } - if !isSimpleName(n.Left()) { + if !isSimpleName(n.X) { // not a special composite literal assignment return false } - if !types.Identical(n.Left().Type(), n.Right().Type()) { + if !types.Identical(n.X.Type(), n.Y.Type()) { // not a special composite literal assignment return false } - switch n.Right().Op() { + switch n.Y.Op() { default: // not a special composite literal assignment return false case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT: - if refersToCommonName(n.Left(), n.Right()) { + if refersToCommonName(n.X, n.Y) { // not a special composite literal assignment return false } - anylit(n.Right(), n.Left(), init) + anylit(n.Y, n.X, init) } return true @@ -1015,21 +1015,21 @@ func stataddr(n ir.Node) (name *ir.Name, offset int64, ok bool) { case ir.ODOT: n := n.(*ir.SelectorExpr) - if name, offset, ok = stataddr(n.Left()); !ok { + if name, offset, ok = stataddr(n.X); !ok { break } - offset += n.Offset() + offset += n.Offset return name, offset, true case ir.OINDEX: n := n.(*ir.IndexExpr) - if n.Left().Type().IsSlice() { + if n.X.Type().IsSlice() { break } - if name, offset, ok = stataddr(n.Left()); !ok { + if name, offset, ok = stataddr(n.X); !ok { break } - l := getlit(n.Right()) + l := getlit(n.Index) if l < 0 { break } @@ -1058,14 +1058,14 @@ func (s *InitSchedule) initplan(n ir.Node) { case ir.OARRAYLIT, ir.OSLICELIT: n := n.(*ir.CompLitExpr) var k int64 - for _, a := range n.List().Slice() { + for _, a := range n.List.Slice() { if a.Op() == ir.OKEY { kv := a.(*ir.KeyExpr) - k = indexconst(kv.Left()) + k = indexconst(kv.Key) if k < 0 { - base.Fatalf("initplan arraylit: invalid index %v", kv.Left()) + base.Fatalf("initplan arraylit: invalid index %v", kv.Key) } - a = kv.Right() + a = kv.Value } s.addvalue(p, k*n.Type().Elem().Width, a) k++ @@ -1073,25 +1073,25 @@ func (s *InitSchedule) initplan(n ir.Node) { case ir.OSTRUCTLIT: n := n.(*ir.CompLitExpr) - for _, a := range n.List().Slice() { + for _, a := range n.List.Slice() { if a.Op() != ir.OSTRUCTKEY { base.Fatalf("initplan structlit") } a := a.(*ir.StructKeyExpr) - if a.Sym().IsBlank() { + if a.Field.IsBlank() { continue } - s.addvalue(p, a.Offset(), a.Left()) + s.addvalue(p, a.Offset, a.Value) } case ir.OMAPLIT: n := n.(*ir.CompLitExpr) - for _, a := range n.List().Slice() { + for _, a := range n.List.Slice() { if a.Op() != ir.OKEY { base.Fatalf("initplan maplit") } a := a.(*ir.KeyExpr) - s.addvalue(p, -1, a.Right()) + s.addvalue(p, -1, a.Value) } } } @@ -1135,9 +1135,9 @@ func isZero(n ir.Node) bool { case ir.OARRAYLIT: n := n.(*ir.CompLitExpr) - for _, n1 := range n.List().Slice() { + for _, n1 := range n.List.Slice() { if n1.Op() == ir.OKEY { - n1 = n1.(*ir.KeyExpr).Right() + n1 = n1.(*ir.KeyExpr).Value } if !isZero(n1) { return false @@ -1147,9 +1147,9 @@ func isZero(n ir.Node) bool { case ir.OSTRUCTLIT: n := n.(*ir.CompLitExpr) - for _, n1 := range n.List().Slice() { + for _, n1 := range n.List.Slice() { n1 := n1.(*ir.StructKeyExpr) - if !isZero(n1.Left()) { + if !isZero(n1.Value) { return false } } @@ -1164,16 +1164,16 @@ func isvaluelit(n ir.Node) bool { } func genAsStatic(as *ir.AssignStmt) { - if as.Left().Type() == nil { + if as.X.Type() == nil { base.Fatalf("genAsStatic as.Left not typechecked") } - name, offset, ok := stataddr(as.Left()) - if !ok || (name.Class() != ir.PEXTERN && as.Left() != ir.BlankNode) { - base.Fatalf("genAsStatic: lhs %v", as.Left()) + name, offset, ok := stataddr(as.X) + if !ok || (name.Class_ != ir.PEXTERN && as.X != ir.BlankNode) { + base.Fatalf("genAsStatic: lhs %v", as.X) } - switch r := as.Right(); r.Op() { + switch r := as.Y; r.Op() { case ir.OLITERAL: litsym(name, offset, r, int(r.Type().Width)) return @@ -1183,13 +1183,13 @@ func genAsStatic(as *ir.AssignStmt) { return case ir.ONAME: r := r.(*ir.Name) - if r.Offset() != 0 { + if r.Offset_ != 0 { base.Fatalf("genAsStatic %+v", as) } - if r.Class() == ir.PFUNC { + if r.Class_ == ir.PFUNC { pfuncsym(name, offset, r) return } } - base.Fatalf("genAsStatic: rhs %v", as.Right()) + base.Fatalf("genAsStatic: rhs %v", as.Y) } diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index dc3ea4be9eb3a..4660da0456c39 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -211,7 +211,7 @@ func initssaconfig() { // considered as the 0th parameter. This does not include the receiver of an // interface call. func getParam(n *ir.CallExpr, i int) *types.Field { - t := n.Left().Type() + t := n.X.Type() if n.Op() == ir.OCALLMETH { if i == 0 { return t.Recv() @@ -275,7 +275,7 @@ func (s *state) emitOpenDeferInfo() { var maxargsize int64 for i := len(s.openDefers) - 1; i >= 0; i-- { r := s.openDefers[i] - argsize := r.n.Left().Type().ArgWidth() + argsize := r.n.X.Type().ArgWidth() if argsize > maxargsize { maxargsize = argsize } @@ -287,7 +287,7 @@ func (s *state) emitOpenDeferInfo() { // Write in reverse-order, for ease of running in that order at runtime for i := len(s.openDefers) - 1; i >= 0; i-- { r := s.openDefers[i] - off = dvarint(x, off, r.n.Left().Type().ArgWidth()) + off = dvarint(x, off, r.n.X.Type().ArgWidth()) off = dvarint(x, off, -r.closureNode.FrameOffset()) numArgs := len(r.argNodes) if r.rcvrNode != nil { @@ -323,7 +323,7 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func { if printssa { astBuf = &bytes.Buffer{} ir.FDumpList(astBuf, "buildssa-enter", fn.Enter) - ir.FDumpList(astBuf, "buildssa-body", fn.Body()) + ir.FDumpList(astBuf, "buildssa-body", fn.Body) ir.FDumpList(astBuf, "buildssa-exit", fn.Exit) if ssaDumpStdout { fmt.Println("generating SSA for", name) @@ -438,7 +438,7 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func { var args []ssa.Param var results []ssa.Param for _, n := range fn.Dcl { - switch n.Class() { + switch n.Class_ { case ir.PPARAM: s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem) args = append(args, ssa.Param{Type: n.Type(), Offset: int32(n.FrameOffset())}) @@ -459,13 +459,13 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func { case ir.PFUNC: // local function - already handled by frontend default: - s.Fatalf("local variable with class %v unimplemented", n.Class()) + s.Fatalf("local variable with class %v unimplemented", n.Class_) } } // Populate SSAable arguments. for _, n := range fn.Dcl { - if n.Class() == ir.PPARAM && s.canSSA(n) { + if n.Class_ == ir.PPARAM && s.canSSA(n) { v := s.newValue0A(ssa.OpArg, n.Type(), n) s.vars[n] = v s.addNamedValue(n, v) // This helps with debugging information, not needed for compilation itself. @@ -474,7 +474,7 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func { // Convert the AST-based IR to the SSA-based IR s.stmtList(fn.Enter) - s.stmtList(fn.Body()) + s.stmtList(fn.Body) // fallthrough to exit if s.curBlock != nil { @@ -1028,7 +1028,7 @@ func (s *state) instrumentMove(t *types.Type, dst, src *ssa.Value) { } func (s *state) instrument2(t *types.Type, addr, addr2 *ssa.Value, kind instrumentKind) { - if !s.curfn.Func().InstrumentBody() { + if !s.curfn.InstrumentBody() { return } @@ -1151,7 +1151,7 @@ func (s *state) stmt(n ir.Node) { case ir.OBLOCK: n := n.(*ir.BlockStmt) - s.stmtList(n.List()) + s.stmtList(n.List) // No-ops case ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL: @@ -1168,9 +1168,9 @@ func (s *state) stmt(n ir.Node) { case ir.OCALLMETH, ir.OCALLINTER: n := n.(*ir.CallExpr) s.callResult(n, callNormal) - if n.Op() == ir.OCALLFUNC && n.Left().Op() == ir.ONAME && n.Left().(*ir.Name).Class() == ir.PFUNC { - if fn := n.Left().Sym().Name; base.Flag.CompilingRuntime && fn == "throw" || - n.Left().Sym().Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") { + if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME && n.X.(*ir.Name).Class_ == ir.PFUNC { + if fn := n.X.Sym().Name; base.Flag.CompilingRuntime && fn == "throw" || + n.X.Sym().Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") { m := s.mem() b := s.endBlock() b.Kind = ssa.BlockExit @@ -1194,23 +1194,23 @@ func (s *state) stmt(n ir.Node) { base.WarnfAt(n.Pos(), "%s defer", defertype) } if s.hasOpenDefers { - s.openDeferRecord(n.Left().(*ir.CallExpr)) + s.openDeferRecord(n.Call.(*ir.CallExpr)) } else { d := callDefer if n.Esc() == EscNever { d = callDeferStack } - s.callResult(n.Left().(*ir.CallExpr), d) + s.callResult(n.Call.(*ir.CallExpr), d) } case ir.OGO: n := n.(*ir.GoDeferStmt) - s.callResult(n.Left().(*ir.CallExpr), callGo) + s.callResult(n.Call.(*ir.CallExpr), callGo) case ir.OAS2DOTTYPE: n := n.(*ir.AssignListStmt) - res, resok := s.dottype(n.Rlist().First().(*ir.TypeAssertExpr), true) + res, resok := s.dottype(n.Rhs.First().(*ir.TypeAssertExpr), true) deref := false - if !canSSAType(n.Rlist().First().Type()) { + if !canSSAType(n.Rhs.First().Type()) { if res.Op != ssa.OpLoad { s.Fatalf("dottype of non-load") } @@ -1224,33 +1224,33 @@ func (s *state) stmt(n ir.Node) { deref = true res = res.Args[0] } - s.assign(n.List().First(), res, deref, 0) - s.assign(n.List().Second(), resok, false, 0) + s.assign(n.Lhs.First(), res, deref, 0) + s.assign(n.Lhs.Second(), resok, false, 0) return case ir.OAS2FUNC: // We come here only when it is an intrinsic call returning two values. n := n.(*ir.AssignListStmt) - call := n.Rlist().First().(*ir.CallExpr) + call := n.Rhs.First().(*ir.CallExpr) if !IsIntrinsicCall(call) { s.Fatalf("non-intrinsic AS2FUNC not expanded %v", call) } v := s.intrinsicCall(call) - v1 := s.newValue1(ssa.OpSelect0, n.List().First().Type(), v) - v2 := s.newValue1(ssa.OpSelect1, n.List().Second().Type(), v) - s.assign(n.List().First(), v1, false, 0) - s.assign(n.List().Second(), v2, false, 0) + v1 := s.newValue1(ssa.OpSelect0, n.Lhs.First().Type(), v) + v2 := s.newValue1(ssa.OpSelect1, n.Lhs.Second().Type(), v) + s.assign(n.Lhs.First(), v1, false, 0) + s.assign(n.Lhs.Second(), v2, false, 0) return case ir.ODCL: n := n.(*ir.Decl) - if n.Left().(*ir.Name).Class() == ir.PAUTOHEAP { + if n.X.(*ir.Name).Class_ == ir.PAUTOHEAP { s.Fatalf("DCL %v", n) } case ir.OLABEL: n := n.(*ir.LabelStmt) - sym := n.Sym() + sym := n.Label lab := s.label(sym) // The label might already have a target block via a goto. @@ -1268,7 +1268,7 @@ func (s *state) stmt(n ir.Node) { case ir.OGOTO: n := n.(*ir.BranchStmt) - sym := n.Sym() + sym := n.Label lab := s.label(sym) if lab.target == nil { @@ -1281,7 +1281,7 @@ func (s *state) stmt(n ir.Node) { case ir.OAS: n := n.(*ir.AssignStmt) - if n.Left() == n.Right() && n.Left().Op() == ir.ONAME { + if n.X == n.Y && n.X.Op() == ir.ONAME { // An x=x assignment. No point in doing anything // here. In addition, skipping this assignment // prevents generating: @@ -1293,7 +1293,7 @@ func (s *state) stmt(n ir.Node) { } // Evaluate RHS. - rhs := n.Right() + rhs := n.Y if rhs != nil { switch rhs.Op() { case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT: @@ -1309,13 +1309,13 @@ func (s *state) stmt(n ir.Node) { // Check whether we're writing the result of an append back to the same slice. // If so, we handle it specially to avoid write barriers on the fast // (non-growth) path. - if !samesafeexpr(n.Left(), rhs.List().First()) || base.Flag.N != 0 { + if !samesafeexpr(n.X, rhs.Args.First()) || base.Flag.N != 0 { break } // If the slice can be SSA'd, it'll be on the stack, // so there will be no write barriers, // so there's no need to attempt to prevent them. - if s.canSSA(n.Left()) { + if s.canSSA(n.X) { if base.Debug.Append > 0 { // replicating old diagnostic message base.WarnfAt(n.Pos(), "append: len-only update (in local slice)") } @@ -1329,7 +1329,7 @@ func (s *state) stmt(n ir.Node) { } } - if ir.IsBlank(n.Left()) { + if ir.IsBlank(n.X) { // _ = rhs // Just evaluate rhs for side-effects. if rhs != nil { @@ -1339,10 +1339,10 @@ func (s *state) stmt(n ir.Node) { } var t *types.Type - if n.Right() != nil { - t = n.Right().Type() + if n.Y != nil { + t = n.Y.Type() } else { - t = n.Left().Type() + t = n.X.Type() } var r *ssa.Value @@ -1362,7 +1362,7 @@ func (s *state) stmt(n ir.Node) { } var skip skipMask - if rhs != nil && (rhs.Op() == ir.OSLICE || rhs.Op() == ir.OSLICE3 || rhs.Op() == ir.OSLICESTR) && samesafeexpr(rhs.(*ir.SliceExpr).Left(), n.Left()) { + if rhs != nil && (rhs.Op() == ir.OSLICE || rhs.Op() == ir.OSLICE3 || rhs.Op() == ir.OSLICESTR) && samesafeexpr(rhs.(*ir.SliceExpr).X, n.X) { // We're assigning a slicing operation back to its source. // Don't write back fields we aren't changing. See issue #14855. rhs := rhs.(*ir.SliceExpr) @@ -1392,49 +1392,49 @@ func (s *state) stmt(n ir.Node) { } } - s.assign(n.Left(), r, deref, skip) + s.assign(n.X, r, deref, skip) case ir.OIF: n := n.(*ir.IfStmt) - if ir.IsConst(n.Left(), constant.Bool) { - s.stmtList(n.Left().Init()) - if ir.BoolVal(n.Left()) { - s.stmtList(n.Body()) + if ir.IsConst(n.Cond, constant.Bool) { + s.stmtList(n.Cond.Init()) + if ir.BoolVal(n.Cond) { + s.stmtList(n.Body) } else { - s.stmtList(n.Rlist()) + s.stmtList(n.Else) } break } bEnd := s.f.NewBlock(ssa.BlockPlain) var likely int8 - if n.Likely() { + if n.Likely { likely = 1 } var bThen *ssa.Block - if n.Body().Len() != 0 { + if n.Body.Len() != 0 { bThen = s.f.NewBlock(ssa.BlockPlain) } else { bThen = bEnd } var bElse *ssa.Block - if n.Rlist().Len() != 0 { + if n.Else.Len() != 0 { bElse = s.f.NewBlock(ssa.BlockPlain) } else { bElse = bEnd } - s.condBranch(n.Left(), bThen, bElse, likely) + s.condBranch(n.Cond, bThen, bElse, likely) - if n.Body().Len() != 0 { + if n.Body.Len() != 0 { s.startBlock(bThen) - s.stmtList(n.Body()) + s.stmtList(n.Body) if b := s.endBlock(); b != nil { b.AddEdgeTo(bEnd) } } - if n.Rlist().Len() != 0 { + if n.Else.Len() != 0 { s.startBlock(bElse) - s.stmtList(n.Rlist()) + s.stmtList(n.Else) if b := s.endBlock(); b != nil { b.AddEdgeTo(bEnd) } @@ -1443,7 +1443,7 @@ func (s *state) stmt(n ir.Node) { case ir.ORETURN: n := n.(*ir.ReturnStmt) - s.stmtList(n.List()) + s.stmtList(n.Results) b := s.exit() b.Pos = s.lastPos.WithIsStmt() @@ -1451,12 +1451,12 @@ func (s *state) stmt(n ir.Node) { n := n.(*ir.BranchStmt) b := s.exit() b.Kind = ssa.BlockRetJmp // override BlockRet - b.Aux = callTargetLSym(n.Sym(), s.curfn.LSym) + b.Aux = callTargetLSym(n.Label, s.curfn.LSym) case ir.OCONTINUE, ir.OBREAK: n := n.(*ir.BranchStmt) var to *ssa.Block - if n.Sym() == nil { + if n.Label == nil { // plain break/continue switch n.Op() { case ir.OCONTINUE: @@ -1466,7 +1466,7 @@ func (s *state) stmt(n ir.Node) { } } else { // labeled break/continue; look up the target - sym := n.Sym() + sym := n.Label lab := s.label(sym) switch n.Op() { case ir.OCONTINUE: @@ -1501,8 +1501,8 @@ func (s *state) stmt(n ir.Node) { b.AddEdgeTo(bCond) // generate code to test condition s.startBlock(bCond) - if n.Left() != nil { - s.condBranch(n.Left(), bBody, bEnd, 1) + if n.Cond != nil { + s.condBranch(n.Cond, bBody, bEnd, 1) } else { b := s.endBlock() b.Kind = ssa.BlockPlain @@ -1519,7 +1519,7 @@ func (s *state) stmt(n ir.Node) { s.continueTo = bIncr s.breakTo = bEnd var lab *ssaLabel - if sym := n.Sym(); sym != nil { + if sym := n.Label; sym != nil { // labeled for loop lab = s.label(sym) lab.continueTarget = bIncr @@ -1528,7 +1528,7 @@ func (s *state) stmt(n ir.Node) { // generate body s.startBlock(bBody) - s.stmtList(n.Body()) + s.stmtList(n.Body) // tear down continue/break s.continueTo = prevContinue @@ -1545,8 +1545,8 @@ func (s *state) stmt(n ir.Node) { // generate incr (and, for OFORUNTIL, condition) s.startBlock(bIncr) - if n.Right() != nil { - s.stmt(n.Right()) + if n.Post != nil { + s.stmt(n.Post) } if n.Op() == ir.OFOR { if b := s.endBlock(); b != nil { @@ -1561,10 +1561,10 @@ func (s *state) stmt(n ir.Node) { // bCond is unused in OFORUNTIL, so repurpose it. bLateIncr := bCond // test condition - s.condBranch(n.Left(), bLateIncr, bEnd, 1) + s.condBranch(n.Cond, bLateIncr, bEnd, 1) // generate late increment s.startBlock(bLateIncr) - s.stmtList(n.List()) + s.stmtList(n.Late) s.endBlock().AddEdgeTo(bBody) } @@ -1581,12 +1581,12 @@ func (s *state) stmt(n ir.Node) { var body ir.Nodes if n.Op() == ir.OSWITCH { n := n.(*ir.SwitchStmt) - sym = n.Sym() - body = n.Body() + sym = n.Label + body = n.Compiled } else { n := n.(*ir.SelectStmt) - sym = n.Sym() - body = n.Body() + sym = n.Label + body = n.Compiled } var lab *ssaLabel @@ -1616,8 +1616,8 @@ func (s *state) stmt(n ir.Node) { case ir.OVARDEF: n := n.(*ir.UnaryExpr) - if !s.canSSA(n.Left()) { - s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.Left().(*ir.Name), s.mem(), false) + if !s.canSSA(n.X) { + s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.X.(*ir.Name), s.mem(), false) } case ir.OVARKILL: // Insert a varkill op to record that a variable is no longer live. @@ -1625,18 +1625,18 @@ func (s *state) stmt(n ir.Node) { // varkill in the store chain is enough to keep it correctly ordered // with respect to call ops. n := n.(*ir.UnaryExpr) - if !s.canSSA(n.Left()) { - s.vars[memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.Left().(*ir.Name), s.mem(), false) + if !s.canSSA(n.X) { + s.vars[memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.X.(*ir.Name), s.mem(), false) } case ir.OVARLIVE: // Insert a varlive op to record that a variable is still live. n := n.(*ir.UnaryExpr) - v := n.Left().(*ir.Name) + v := n.X.(*ir.Name) if !v.Addrtaken() { s.Fatalf("VARLIVE variable %v must have Addrtaken set", v) } - switch v.Class() { + switch v.Class_ { case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT: default: s.Fatalf("VARLIVE variable %v must be Auto or Arg", v) @@ -1645,12 +1645,12 @@ func (s *state) stmt(n ir.Node) { case ir.OCHECKNIL: n := n.(*ir.UnaryExpr) - p := s.expr(n.Left()) + p := s.expr(n.X) s.nilCheck(p) case ir.OINLMARK: n := n.(*ir.InlineMarkStmt) - s.newValue1I(ssa.OpInlMark, types.TypeVoid, n.Offset(), s.mem()) + s.newValue1I(ssa.OpInlMark, types.TypeVoid, n.Index, s.mem()) default: s.Fatalf("unhandled stmt %v", n.Op()) @@ -2118,19 +2118,19 @@ func (s *state) expr(n ir.Node) *ssa.Value { switch n.Op() { case ir.OBYTES2STRTMP: n := n.(*ir.ConvExpr) - slice := s.expr(n.Left()) + slice := s.expr(n.X) ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice) len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice) return s.newValue2(ssa.OpStringMake, n.Type(), ptr, len) case ir.OSTR2BYTESTMP: n := n.(*ir.ConvExpr) - str := s.expr(n.Left()) + str := s.expr(n.X) ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str) len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], str) return s.newValue3(ssa.OpSliceMake, n.Type(), ptr, len, len) case ir.OCFUNC: n := n.(*ir.UnaryExpr) - aux := n.Left().Sym().Linksym() + aux := n.X.Sym().Linksym() return s.entryNewValue1A(ssa.OpAddr, n.Type(), aux, s.sb) case ir.OMETHEXPR: n := n.(*ir.MethodExpr) @@ -2138,7 +2138,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb) case ir.ONAME: n := n.(*ir.Name) - if n.Class() == ir.PFUNC { + if n.Class_ == ir.PFUNC { // "value" of a function is the address of the function's closure sym := funcsym(n.Sym()).Linksym() return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb) @@ -2230,11 +2230,11 @@ func (s *state) expr(n ir.Node) *ssa.Value { case ir.OCONVNOP: n := n.(*ir.ConvExpr) to := n.Type() - from := n.Left().Type() + from := n.X.Type() // Assume everything will work out, so set up our return value. // Anything interesting that happens from here is a fatal. - x := s.expr(n.Left()) + x := s.expr(n.X) if to == from { return x } @@ -2298,9 +2298,9 @@ func (s *state) expr(n ir.Node) *ssa.Value { case ir.OCONV: n := n.(*ir.ConvExpr) - x := s.expr(n.Left()) - ft := n.Left().Type() // from type - tt := n.Type() // to type + x := s.expr(n.X) + ft := n.X.Type() // from type + tt := n.Type() // to type if ft.IsBoolean() && tt.IsKind(types.TUINT8) { // Bool -> uint8 is generated internally when indexing into runtime.staticbyte. return s.newValue1(ssa.OpCopy, n.Type(), x) @@ -2465,7 +2465,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x))) } - s.Fatalf("unhandled OCONV %s -> %s", n.Left().Type().Kind(), n.Type().Kind()) + s.Fatalf("unhandled OCONV %s -> %s", n.X.Type().Kind(), n.Type().Kind()) return nil case ir.ODOTTYPE: @@ -2476,10 +2476,10 @@ func (s *state) expr(n ir.Node) *ssa.Value { // binary ops case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT: n := n.(*ir.BinaryExpr) - a := s.expr(n.Left()) - b := s.expr(n.Right()) - if n.Left().Type().IsComplex() { - pt := floatForComplex(n.Left().Type()) + a := s.expr(n.X) + b := s.expr(n.Y) + if n.X.Type().IsComplex() { + pt := floatForComplex(n.X.Type()) op := s.ssaOp(ir.OEQ, pt) r := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)) i := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)) @@ -2502,16 +2502,16 @@ func (s *state) expr(n ir.Node) *ssa.Value { case ir.OGT: op, a, b = ir.OLT, b, a } - if n.Left().Type().IsFloat() { + if n.X.Type().IsFloat() { // float comparison - return s.newValueOrSfCall2(s.ssaOp(op, n.Left().Type()), types.Types[types.TBOOL], a, b) + return s.newValueOrSfCall2(s.ssaOp(op, n.X.Type()), types.Types[types.TBOOL], a, b) } // integer comparison - return s.newValue2(s.ssaOp(op, n.Left().Type()), types.Types[types.TBOOL], a, b) + return s.newValue2(s.ssaOp(op, n.X.Type()), types.Types[types.TBOOL], a, b) case ir.OMUL: n := n.(*ir.BinaryExpr) - a := s.expr(n.Left()) - b := s.expr(n.Right()) + a := s.expr(n.X) + b := s.expr(n.Y) if n.Type().IsComplex() { mulop := ssa.OpMul64F addop := ssa.OpAdd64F @@ -2550,8 +2550,8 @@ func (s *state) expr(n ir.Node) *ssa.Value { case ir.ODIV: n := n.(*ir.BinaryExpr) - a := s.expr(n.Left()) - b := s.expr(n.Right()) + a := s.expr(n.X) + b := s.expr(n.Y) if n.Type().IsComplex() { // TODO this is not executed because the front-end substitutes a runtime call. // That probably ought to change; with modest optimization the widen/narrow @@ -2598,13 +2598,13 @@ func (s *state) expr(n ir.Node) *ssa.Value { return s.intDivide(n, a, b) case ir.OMOD: n := n.(*ir.BinaryExpr) - a := s.expr(n.Left()) - b := s.expr(n.Right()) + a := s.expr(n.X) + b := s.expr(n.Y) return s.intDivide(n, a, b) case ir.OADD, ir.OSUB: n := n.(*ir.BinaryExpr) - a := s.expr(n.Left()) - b := s.expr(n.Right()) + a := s.expr(n.X) + b := s.expr(n.Y) if n.Type().IsComplex() { pt := floatForComplex(n.Type()) op := s.ssaOp(n.Op(), pt) @@ -2618,19 +2618,19 @@ func (s *state) expr(n ir.Node) *ssa.Value { return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b) case ir.OAND, ir.OOR, ir.OXOR: n := n.(*ir.BinaryExpr) - a := s.expr(n.Left()) - b := s.expr(n.Right()) + a := s.expr(n.X) + b := s.expr(n.Y) return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b) case ir.OANDNOT: n := n.(*ir.BinaryExpr) - a := s.expr(n.Left()) - b := s.expr(n.Right()) + a := s.expr(n.X) + b := s.expr(n.Y) b = s.newValue1(s.ssaOp(ir.OBITNOT, b.Type), b.Type, b) return s.newValue2(s.ssaOp(ir.OAND, n.Type()), a.Type, a, b) case ir.OLSH, ir.ORSH: n := n.(*ir.BinaryExpr) - a := s.expr(n.Left()) - b := s.expr(n.Right()) + a := s.expr(n.X) + b := s.expr(n.Y) bt := b.Type if bt.IsSigned() { cmp := s.newValue2(s.ssaOp(ir.OLE, bt), types.Types[types.TBOOL], s.zeroVal(bt), b) @@ -2653,7 +2653,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { // Using var in the subsequent block introduces the // necessary phi variable. n := n.(*ir.LogicalExpr) - el := s.expr(n.Left()) + el := s.expr(n.X) s.vars[n] = el b := s.endBlock() @@ -2675,7 +2675,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { } s.startBlock(bRight) - er := s.expr(n.Right()) + er := s.expr(n.Y) s.vars[n] = er b = s.endBlock() @@ -2685,14 +2685,14 @@ func (s *state) expr(n ir.Node) *ssa.Value { return s.variable(n, types.Types[types.TBOOL]) case ir.OCOMPLEX: n := n.(*ir.BinaryExpr) - r := s.expr(n.Left()) - i := s.expr(n.Right()) + r := s.expr(n.X) + i := s.expr(n.Y) return s.newValue2(ssa.OpComplexMake, n.Type(), r, i) // unary ops case ir.ONEG: n := n.(*ir.UnaryExpr) - a := s.expr(n.Left()) + a := s.expr(n.X) if n.Type().IsComplex() { tp := floatForComplex(n.Type()) negop := s.ssaOp(n.Op(), tp) @@ -2703,31 +2703,31 @@ func (s *state) expr(n ir.Node) *ssa.Value { return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a) case ir.ONOT, ir.OBITNOT: n := n.(*ir.UnaryExpr) - a := s.expr(n.Left()) + a := s.expr(n.X) return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a) case ir.OIMAG, ir.OREAL: n := n.(*ir.UnaryExpr) - a := s.expr(n.Left()) - return s.newValue1(s.ssaOp(n.Op(), n.Left().Type()), n.Type(), a) + a := s.expr(n.X) + return s.newValue1(s.ssaOp(n.Op(), n.X.Type()), n.Type(), a) case ir.OPLUS: n := n.(*ir.UnaryExpr) - return s.expr(n.Left()) + return s.expr(n.X) case ir.OADDR: n := n.(*ir.AddrExpr) - return s.addr(n.Left()) + return s.addr(n.X) case ir.ORESULT: n := n.(*ir.ResultExpr) if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall { // Do the old thing - addr := s.constOffPtrSP(types.NewPtr(n.Type()), n.Offset()) + addr := s.constOffPtrSP(types.NewPtr(n.Type()), n.Offset) return s.rawLoad(n.Type(), addr) } - which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Offset()) + which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Offset) if which == -1 { // Do the old thing // TODO: Panic instead. - addr := s.constOffPtrSP(types.NewPtr(n.Type()), n.Offset()) + addr := s.constOffPtrSP(types.NewPtr(n.Type()), n.Offset) return s.rawLoad(n.Type(), addr) } if canSSAType(n.Type()) { @@ -2739,17 +2739,17 @@ func (s *state) expr(n ir.Node) *ssa.Value { case ir.ODEREF: n := n.(*ir.StarExpr) - p := s.exprPtr(n.Left(), n.Bounded(), n.Pos()) + p := s.exprPtr(n.X, n.Bounded(), n.Pos()) return s.load(n.Type(), p) case ir.ODOT: n := n.(*ir.SelectorExpr) - if n.Left().Op() == ir.OSTRUCTLIT { + if n.X.Op() == ir.OSTRUCTLIT { // All literals with nonzero fields have already been // rewritten during walk. Any that remain are just T{} // or equivalents. Use the zero value. - if !isZero(n.Left()) { - s.Fatalf("literal with nonzero value in SSA: %v", n.Left()) + if !isZero(n.X) { + s.Fatalf("literal with nonzero value in SSA: %v", n.X) } return s.zeroVal(n.Type()) } @@ -2761,46 +2761,46 @@ func (s *state) expr(n ir.Node) *ssa.Value { p := s.addr(n) return s.load(n.Type(), p) } - v := s.expr(n.Left()) + v := s.expr(n.X) return s.newValue1I(ssa.OpStructSelect, n.Type(), int64(fieldIdx(n)), v) case ir.ODOTPTR: n := n.(*ir.SelectorExpr) - p := s.exprPtr(n.Left(), n.Bounded(), n.Pos()) - p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type()), n.Offset(), p) + p := s.exprPtr(n.X, n.Bounded(), n.Pos()) + p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type()), n.Offset, p) return s.load(n.Type(), p) case ir.OINDEX: n := n.(*ir.IndexExpr) switch { - case n.Left().Type().IsString(): - if n.Bounded() && ir.IsConst(n.Left(), constant.String) && ir.IsConst(n.Right(), constant.Int) { + case n.X.Type().IsString(): + if n.Bounded() && ir.IsConst(n.X, constant.String) && ir.IsConst(n.Index, constant.Int) { // Replace "abc"[1] with 'b'. // Delayed until now because "abc"[1] is not an ideal constant. // See test/fixedbugs/issue11370.go. - return s.newValue0I(ssa.OpConst8, types.Types[types.TUINT8], int64(int8(ir.StringVal(n.Left())[ir.Int64Val(n.Right())]))) + return s.newValue0I(ssa.OpConst8, types.Types[types.TUINT8], int64(int8(ir.StringVal(n.X)[ir.Int64Val(n.Index)]))) } - a := s.expr(n.Left()) - i := s.expr(n.Right()) + a := s.expr(n.X) + i := s.expr(n.Index) len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], a) i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded()) ptrtyp := s.f.Config.Types.BytePtr ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a) - if ir.IsConst(n.Right(), constant.Int) { - ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, ir.Int64Val(n.Right()), ptr) + if ir.IsConst(n.Index, constant.Int) { + ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, ir.Int64Val(n.Index), ptr) } else { ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i) } return s.load(types.Types[types.TUINT8], ptr) - case n.Left().Type().IsSlice(): + case n.X.Type().IsSlice(): p := s.addr(n) - return s.load(n.Left().Type().Elem(), p) - case n.Left().Type().IsArray(): - if canSSAType(n.Left().Type()) { + return s.load(n.X.Type().Elem(), p) + case n.X.Type().IsArray(): + if canSSAType(n.X.Type()) { // SSA can handle arrays of length at most 1. - bound := n.Left().Type().NumElem() - a := s.expr(n.Left()) - i := s.expr(n.Right()) + bound := n.X.Type().NumElem() + a := s.expr(n.X) + i := s.expr(n.Index) if bound == 0 { // Bounds check will never succeed. Might as well // use constants for the bounds check. @@ -2814,33 +2814,33 @@ func (s *state) expr(n ir.Node) *ssa.Value { return s.newValue1I(ssa.OpArraySelect, n.Type(), 0, a) } p := s.addr(n) - return s.load(n.Left().Type().Elem(), p) + return s.load(n.X.Type().Elem(), p) default: - s.Fatalf("bad type for index %v", n.Left().Type()) + s.Fatalf("bad type for index %v", n.X.Type()) return nil } case ir.OLEN, ir.OCAP: n := n.(*ir.UnaryExpr) switch { - case n.Left().Type().IsSlice(): + case n.X.Type().IsSlice(): op := ssa.OpSliceLen if n.Op() == ir.OCAP { op = ssa.OpSliceCap } - return s.newValue1(op, types.Types[types.TINT], s.expr(n.Left())) - case n.Left().Type().IsString(): // string; not reachable for OCAP - return s.newValue1(ssa.OpStringLen, types.Types[types.TINT], s.expr(n.Left())) - case n.Left().Type().IsMap(), n.Left().Type().IsChan(): - return s.referenceTypeBuiltin(n, s.expr(n.Left())) + return s.newValue1(op, types.Types[types.TINT], s.expr(n.X)) + case n.X.Type().IsString(): // string; not reachable for OCAP + return s.newValue1(ssa.OpStringLen, types.Types[types.TINT], s.expr(n.X)) + case n.X.Type().IsMap(), n.X.Type().IsChan(): + return s.referenceTypeBuiltin(n, s.expr(n.X)) default: // array - return s.constInt(types.Types[types.TINT], n.Left().Type().NumElem()) + return s.constInt(types.Types[types.TINT], n.X.Type().NumElem()) } case ir.OSPTR: n := n.(*ir.UnaryExpr) - a := s.expr(n.Left()) - if n.Left().Type().IsSlice() { + a := s.expr(n.X) + if n.X.Type().IsSlice() { return s.newValue1(ssa.OpSlicePtr, n.Type(), a) } else { return s.newValue1(ssa.OpStringPtr, n.Type(), a) @@ -2848,30 +2848,30 @@ func (s *state) expr(n ir.Node) *ssa.Value { case ir.OITAB: n := n.(*ir.UnaryExpr) - a := s.expr(n.Left()) + a := s.expr(n.X) return s.newValue1(ssa.OpITab, n.Type(), a) case ir.OIDATA: n := n.(*ir.UnaryExpr) - a := s.expr(n.Left()) + a := s.expr(n.X) return s.newValue1(ssa.OpIData, n.Type(), a) case ir.OEFACE: n := n.(*ir.BinaryExpr) - tab := s.expr(n.Left()) - data := s.expr(n.Right()) + tab := s.expr(n.X) + data := s.expr(n.Y) return s.newValue2(ssa.OpIMake, n.Type(), tab, data) case ir.OSLICEHEADER: n := n.(*ir.SliceHeaderExpr) - p := s.expr(n.Left()) - l := s.expr(n.List().First()) - c := s.expr(n.List().Second()) + p := s.expr(n.Ptr) + l := s.expr(n.LenCap.First()) + c := s.expr(n.LenCap.Second()) return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c) case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR: n := n.(*ir.SliceExpr) - v := s.expr(n.Left()) + v := s.expr(n.X) var i, j, k *ssa.Value low, high, max := n.SliceBounds() if low != nil { @@ -2888,7 +2888,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { case ir.OSLICESTR: n := n.(*ir.SliceExpr) - v := s.expr(n.Left()) + v := s.expr(n.X) var i, j *ssa.Value low, high, _ := n.SliceBounds() if low != nil { @@ -2933,7 +2933,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { if n.Type().Elem().Size() == 0 { return s.newValue1A(ssa.OpAddr, n.Type(), zerobaseSym, s.sb) } - typ := s.expr(n.Left()) + typ := s.expr(n.X) vv := s.rtcall(newobject, true, []*types.Type{n.Type()}, typ) return vv[0] @@ -2987,7 +2987,7 @@ func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value { pt := types.NewPtr(et) // Evaluate slice - sn := n.List().First() // the slice node is the first in the list + sn := n.Args.First() // the slice node is the first in the list var slice, addr *ssa.Value if inplace { @@ -3002,7 +3002,7 @@ func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value { assign := s.f.NewBlock(ssa.BlockPlain) // Decide if we need to grow - nargs := int64(n.List().Len() - 1) + nargs := int64(n.Args.Len() - 1) p := s.newValue1(ssa.OpSlicePtr, pt, slice) l := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice) c := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], slice) @@ -3027,13 +3027,13 @@ func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value { // Call growslice s.startBlock(grow) - taddr := s.expr(n.Left()) + taddr := s.expr(n.X) r := s.rtcall(growslice, true, []*types.Type{pt, types.Types[types.TINT], types.Types[types.TINT]}, taddr, p, l, c, nl) if inplace { if sn.Op() == ir.ONAME { sn := sn.(*ir.Name) - if sn.Class() != ir.PEXTERN { + if sn.Class_ != ir.PEXTERN { // Tell liveness we're about to build a new slice s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem()) } @@ -3071,7 +3071,7 @@ func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value { store bool } args := make([]argRec, 0, nargs) - for _, n := range n.List().Slice()[1:] { + for _, n := range n.Args.Slice()[1:] { if canSSAType(n.Type()) { args = append(args, argRec{v: s.expr(n), store: true}) } else { @@ -3116,9 +3116,9 @@ func (s *state) condBranch(cond ir.Node, yes, no *ssa.Block, likely int8) { cond := cond.(*ir.LogicalExpr) mid := s.f.NewBlock(ssa.BlockPlain) s.stmtList(cond.Init()) - s.condBranch(cond.Left(), mid, no, max8(likely, 0)) + s.condBranch(cond.X, mid, no, max8(likely, 0)) s.startBlock(mid) - s.condBranch(cond.Right(), yes, no, likely) + s.condBranch(cond.Y, yes, no, likely) return // Note: if likely==1, then both recursive calls pass 1. // If likely==-1, then we don't have enough information to decide @@ -3130,9 +3130,9 @@ func (s *state) condBranch(cond ir.Node, yes, no *ssa.Block, likely int8) { cond := cond.(*ir.LogicalExpr) mid := s.f.NewBlock(ssa.BlockPlain) s.stmtList(cond.Init()) - s.condBranch(cond.Left(), yes, mid, min8(likely, 0)) + s.condBranch(cond.X, yes, mid, min8(likely, 0)) s.startBlock(mid) - s.condBranch(cond.Right(), yes, no, likely) + s.condBranch(cond.Y, yes, no, likely) return // Note: if likely==-1, then both recursive calls pass -1. // If likely==1, then we don't have enough info to decide @@ -3140,12 +3140,12 @@ func (s *state) condBranch(cond ir.Node, yes, no *ssa.Block, likely int8) { case ir.ONOT: cond := cond.(*ir.UnaryExpr) s.stmtList(cond.Init()) - s.condBranch(cond.Left(), no, yes, -likely) + s.condBranch(cond.X, no, yes, -likely) return case ir.OCONVNOP: cond := cond.(*ir.ConvExpr) s.stmtList(cond.Init()) - s.condBranch(cond.Left(), yes, no, likely) + s.condBranch(cond.X, yes, no, likely) return } c := s.expr(cond) @@ -3192,12 +3192,12 @@ func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask // Grab information about the structure type. left := left.(*ir.SelectorExpr) - t := left.Left().Type() + t := left.X.Type() nf := t.NumFields() idx := fieldIdx(left) // Grab old value of structure. - old := s.expr(left.Left()) + old := s.expr(left.X) // Make new structure. new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t) @@ -3212,20 +3212,20 @@ func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask } // Recursively assign the new value we've made to the base of the dot op. - s.assign(left.Left(), new, false, 0) + s.assign(left.X, new, false, 0) // TODO: do we need to update named values here? return } - if left.Op() == ir.OINDEX && left.(*ir.IndexExpr).Left().Type().IsArray() { + if left.Op() == ir.OINDEX && left.(*ir.IndexExpr).X.Type().IsArray() { left := left.(*ir.IndexExpr) s.pushLine(left.Pos()) defer s.popLine() // We're assigning to an element of an ssa-able array. // a[i] = v - t := left.Left().Type() + t := left.X.Type() n := t.NumElem() - i := s.expr(left.Right()) // index + i := s.expr(left.Index) // index if n == 0 { // The bounds check must fail. Might as well // ignore the actual index and just use zeros. @@ -3240,7 +3240,7 @@ func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask len := s.constInt(types.Types[types.TINT], 1) s.boundsCheck(i, len, ssa.BoundsIndex, false) // checks i == 0 v := s.newValue1(ssa.OpArrayMake1, t, right) - s.assign(left.Left(), v, false, 0) + s.assign(left.X, v, false, 0) return } left := left.(*ir.Name) @@ -3252,7 +3252,7 @@ func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask // If this assignment clobbers an entire local variable, then emit // OpVarDef so liveness analysis knows the variable is redefined. - if base := clobberBase(left); base.Op() == ir.ONAME && base.(*ir.Name).Class() != ir.PEXTERN && skip == 0 { + if base := clobberBase(left); base.Op() == ir.ONAME && base.(*ir.Name).Class_ != ir.PEXTERN && skip == 0 { s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base.(*ir.Name), s.mem(), !ir.IsAutoTmp(base)) } @@ -4333,7 +4333,7 @@ func isIntrinsicCall(n *ir.CallExpr) bool { if n == nil { return false } - name, ok := n.Left().(*ir.Name) + name, ok := n.X.(*ir.Name) if !ok { return false } @@ -4342,7 +4342,7 @@ func isIntrinsicCall(n *ir.CallExpr) bool { // intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation. func (s *state) intrinsicCall(n *ir.CallExpr) *ssa.Value { - v := findIntrinsic(n.Left().Sym())(s, n, s.intrinsicArgs(n)) + v := findIntrinsic(n.X.Sym())(s, n, s.intrinsicArgs(n)) if ssa.IntrinsicsDebug > 0 { x := v if x == nil { @@ -4351,7 +4351,7 @@ func (s *state) intrinsicCall(n *ir.CallExpr) *ssa.Value { if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 { x = x.Args[0] } - base.WarnfAt(n.Pos(), "intrinsic substitution for %v with %s", n.Left().Sym().Name, x.LongString()) + base.WarnfAt(n.Pos(), "intrinsic substitution for %v with %s", n.X.Sym().Name, x.LongString()) } return v } @@ -4360,12 +4360,12 @@ func (s *state) intrinsicCall(n *ir.CallExpr) *ssa.Value { func (s *state) intrinsicArgs(n *ir.CallExpr) []*ssa.Value { // Construct map of temps; see comments in s.call about the structure of n. temps := map[ir.Node]*ssa.Value{} - for _, a := range n.List().Slice() { + for _, a := range n.Args.Slice() { if a.Op() != ir.OAS { s.Fatalf("non-assignment as a temp function argument %v", a.Op()) } a := a.(*ir.AssignStmt) - l, r := a.Left(), a.Right() + l, r := a.X, a.Y if l.Op() != ir.ONAME { s.Fatalf("non-ONAME temp function argument %v", a.Op()) } @@ -4373,8 +4373,8 @@ func (s *state) intrinsicArgs(n *ir.CallExpr) []*ssa.Value { // Walk ensures these temporaries are dead outside of n. temps[l] = s.expr(r) } - args := make([]*ssa.Value, n.Rlist().Len()) - for i, n := range n.Rlist().Slice() { + args := make([]*ssa.Value, n.Rargs.Len()) + for i, n := range n.Rargs.Slice() { // Store a value to an argument slot. if x, ok := temps[n]; ok { // This is a previously computed temporary. @@ -4399,7 +4399,7 @@ func (s *state) openDeferRecord(n *ir.CallExpr) { // once.mutex'. Such a statement will create a mapping in s.vars[] from // the autotmp name to the evaluated SSA arg value, but won't do any // stores to the stack. - s.stmtList(n.List()) + s.stmtList(n.Args) var args []*ssa.Value var argNodes []*ir.Name @@ -4407,7 +4407,7 @@ func (s *state) openDeferRecord(n *ir.CallExpr) { opendefer := &openDeferInfo{ n: n, } - fn := n.Left() + fn := n.X if n.Op() == ir.OCALLFUNC { // We must always store the function value in a stack slot for the // runtime panic code to use. But in the defer exit code, we will @@ -4415,7 +4415,7 @@ func (s *state) openDeferRecord(n *ir.CallExpr) { closureVal := s.expr(fn) closure := s.openDeferSave(nil, fn.Type(), closureVal) opendefer.closureNode = closure.Aux.(*ir.Name) - if !(fn.Op() == ir.ONAME && fn.(*ir.Name).Class() == ir.PFUNC) { + if !(fn.Op() == ir.ONAME && fn.(*ir.Name).Class_ == ir.PFUNC) { opendefer.closure = closure } } else if n.Op() == ir.OCALLMETH { @@ -4442,7 +4442,7 @@ func (s *state) openDeferRecord(n *ir.CallExpr) { opendefer.closureNode = opendefer.closure.Aux.(*ir.Name) opendefer.rcvrNode = opendefer.rcvr.Aux.(*ir.Name) } - for _, argn := range n.Rlist().Slice() { + for _, argn := range n.Rargs.Slice() { var v *ssa.Value if canSSAType(argn.Type()) { v = s.openDeferSave(nil, argn.Type(), s.expr(argn)) @@ -4565,7 +4565,7 @@ func (s *state) openDeferExit() { // closure/receiver/args that were stored in argtmps at the point // of the defer statement. argStart := base.Ctxt.FixedFrameSize() - fn := r.n.Left() + fn := r.n.X stksize := fn.Type().ArgWidth() var ACArgs []ssa.Param var ACResults []ssa.Param @@ -4672,11 +4672,11 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val var closure *ssa.Value // ptr to closure to run (if dynamic) var codeptr *ssa.Value // ptr to target code (if dynamic) var rcvr *ssa.Value // receiver to set - fn := n.Left() + fn := n.X var ACArgs []ssa.Param var ACResults []ssa.Param var callArgs []*ssa.Value - res := n.Left().Type().Results() + res := n.X.Type().Results() if k == callNormal { nf := res.NumFields() for i := 0; i < nf; i++ { @@ -4690,7 +4690,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val switch n.Op() { case ir.OCALLFUNC: testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f) - if k == callNormal && fn.Op() == ir.ONAME && fn.(*ir.Name).Class() == ir.PFUNC { + if k == callNormal && fn.Op() == ir.ONAME && fn.(*ir.Name).Class_ == ir.PFUNC { fn := fn.(*ir.Name) sym = fn.Sym() break @@ -4708,7 +4708,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val fn := fn.(*ir.SelectorExpr) testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f) if k == callNormal { - sym = fn.Sym() + sym = fn.Sel break } closure = s.getMethodClosure(fn) @@ -4734,7 +4734,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val // Run all assignments of temps. // The temps are introduced to avoid overwriting argument // slots when arguments themselves require function calls. - s.stmtList(n.List()) + s.stmtList(n.Args) var call *ssa.Value if k == callDeferStack { @@ -4769,7 +4769,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val // Then, store all the arguments of the defer call. ft := fn.Type() off := t.FieldOff(12) - args := n.Rlist().Slice() + args := n.Rargs.Slice() // Set receiver (for interface calls). Always a pointer. if rcvr != nil { @@ -4845,8 +4845,8 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val } // Write args. - t := n.Left().Type() - args := n.Rlist().Slice() + t := n.X.Type() + args := n.Rargs.Slice() if n.Op() == ir.OCALLMETH { f := t.Recv() ACArg, arg := s.putArg(args[0], f.Type, argStart+f.Offset, testLateExpansion) @@ -4923,7 +4923,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val s.vars[memVar] = call } // Insert OVARLIVE nodes - s.stmtList(n.Body()) + s.stmtList(n.Body) // Finish block for defers if k == callDefer || k == callDeferStack { @@ -4977,9 +4977,9 @@ func (s *state) getMethodClosure(fn *ir.SelectorExpr) *ssa.Value { // Make a PFUNC node out of that, then evaluate it. // We get back an SSA value representing &sync.(*Mutex).Unlock·f. // We can then pass that to defer or go. - n2 := ir.NewNameAt(fn.Pos(), fn.Sym()) + n2 := ir.NewNameAt(fn.Pos(), fn.Sel) n2.Curfn = s.curfn - n2.SetClass(ir.PFUNC) + n2.Class_ = ir.PFUNC // n2.Sym already existed, so it's already marked as a function. n2.SetPos(fn.Pos()) n2.SetType(types.Types[types.TUINT8]) // fake type for a static closure. Could use runtime.funcval if we had it. @@ -4989,10 +4989,10 @@ func (s *state) getMethodClosure(fn *ir.SelectorExpr) *ssa.Value { // getClosureAndRcvr returns values for the appropriate closure and receiver of an // interface call func (s *state) getClosureAndRcvr(fn *ir.SelectorExpr) (*ssa.Value, *ssa.Value) { - i := s.expr(fn.Left()) + i := s.expr(fn.X) itab := s.newValue1(ssa.OpITab, types.Types[types.TUINTPTR], i) s.nilCheck(itab) - itabidx := fn.Offset() + 2*int64(Widthptr) + 8 // offset of fun field in runtime.itab + itabidx := fn.Offset + 2*int64(Widthptr) + 8 // offset of fun field in runtime.itab closure := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab) rcvr := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, i) return closure, rcvr @@ -5028,7 +5028,7 @@ func (s *state) addr(n ir.Node) *ssa.Value { fallthrough case ir.ONAME: n := n.(*ir.Name) - switch n.Class() { + switch n.Class_ { case ir.PEXTERN: // global variable v := s.entryNewValue1A(ssa.OpAddr, t, n.Sym().Linksym(), s.sb) @@ -5057,60 +5057,60 @@ func (s *state) addr(n ir.Node) *ssa.Value { // that cse works on their addresses return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), true) default: - s.Fatalf("variable address class %v not implemented", n.Class()) + s.Fatalf("variable address class %v not implemented", n.Class_) return nil } case ir.ORESULT: // load return from callee n := n.(*ir.ResultExpr) if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall { - return s.constOffPtrSP(t, n.Offset()) + return s.constOffPtrSP(t, n.Offset) } - which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Offset()) + which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Offset) if which == -1 { // Do the old thing // TODO: Panic instead. - return s.constOffPtrSP(t, n.Offset()) + return s.constOffPtrSP(t, n.Offset) } x := s.newValue1I(ssa.OpSelectNAddr, t, which, s.prevCall) return x case ir.OINDEX: n := n.(*ir.IndexExpr) - if n.Left().Type().IsSlice() { - a := s.expr(n.Left()) - i := s.expr(n.Right()) + if n.X.Type().IsSlice() { + a := s.expr(n.X) + i := s.expr(n.Index) len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], a) i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded()) p := s.newValue1(ssa.OpSlicePtr, t, a) return s.newValue2(ssa.OpPtrIndex, t, p, i) } else { // array - a := s.addr(n.Left()) - i := s.expr(n.Right()) - len := s.constInt(types.Types[types.TINT], n.Left().Type().NumElem()) + a := s.addr(n.X) + i := s.expr(n.Index) + len := s.constInt(types.Types[types.TINT], n.X.Type().NumElem()) i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded()) - return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.Left().Type().Elem()), a, i) + return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.X.Type().Elem()), a, i) } case ir.ODEREF: n := n.(*ir.StarExpr) - return s.exprPtr(n.Left(), n.Bounded(), n.Pos()) + return s.exprPtr(n.X, n.Bounded(), n.Pos()) case ir.ODOT: n := n.(*ir.SelectorExpr) - p := s.addr(n.Left()) - return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p) + p := s.addr(n.X) + return s.newValue1I(ssa.OpOffPtr, t, n.Offset, p) case ir.ODOTPTR: n := n.(*ir.SelectorExpr) - p := s.exprPtr(n.Left(), n.Bounded(), n.Pos()) - return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p) + p := s.exprPtr(n.X, n.Bounded(), n.Pos()) + return s.newValue1I(ssa.OpOffPtr, t, n.Offset, p) case ir.OCLOSUREREAD: n := n.(*ir.ClosureReadExpr) - return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), + return s.newValue1I(ssa.OpOffPtr, t, n.Offset, s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr)) case ir.OCONVNOP: n := n.(*ir.ConvExpr) - if n.Type() == n.Left().Type() { - return s.addr(n.Left()) + if n.Type() == n.X.Type() { + return s.addr(n.X) } - addr := s.addr(n.Left()) + addr := s.addr(n.X) return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH: n := n.(*ir.CallExpr) @@ -5141,13 +5141,13 @@ func (s *state) canSSA(n ir.Node) bool { nn := n if nn.Op() == ir.ODOT { nn := nn.(*ir.SelectorExpr) - n = nn.Left() + n = nn.X continue } if nn.Op() == ir.OINDEX { nn := nn.(*ir.IndexExpr) - if nn.Left().Type().IsArray() { - n = nn.Left() + if nn.X.Type().IsArray() { + n = nn.X continue } } @@ -5166,10 +5166,10 @@ func (s *state) canSSAName(name *ir.Name) bool { if isParamHeapCopy(name) { return false } - if name.Class() == ir.PAUTOHEAP { + if name.Class_ == ir.PAUTOHEAP { s.Fatalf("canSSA of PAUTOHEAP %v", name) } - switch name.Class() { + switch name.Class_ { case ir.PEXTERN: return false case ir.PPARAMOUT: @@ -5187,7 +5187,7 @@ func (s *state) canSSAName(name *ir.Name) bool { return false } } - if name.Class() == ir.PPARAM && name.Sym() != nil && name.Sym().Name == ".this" { + if name.Class_ == ir.PPARAM && name.Sym() != nil && name.Sym().Name == ".this" { // wrappers generated by genwrapper need to update // the .this pointer in place. // TODO: treat as a PPARAMOUT? @@ -5893,7 +5893,7 @@ func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n ir.Node, x *ssa.Value, ft, // referenceTypeBuiltin generates code for the len/cap builtins for maps and channels. func (s *state) referenceTypeBuiltin(n *ir.UnaryExpr, x *ssa.Value) *ssa.Value { - if !n.Left().Type().IsMap() && !n.Left().Type().IsChan() { + if !n.X.Type().IsMap() && !n.X.Type().IsChan() { s.Fatalf("node must be a map or a channel") } // if n == nil { @@ -6050,8 +6050,8 @@ func (s *state) floatToUint(cvttab *f2uCvtTab, n ir.Node, x *ssa.Value, ft, tt * // commaok indicates whether to panic or return a bool. // If commaok is false, resok will be nil. func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Value) { - iface := s.expr(n.Left()) // input interface - target := s.expr(n.Right()) // target type + iface := s.expr(n.X) // input interface + target := s.expr(n.Ntype) // target type byteptr := s.f.Config.Types.BytePtr if n.Type().IsInterface() { @@ -6067,7 +6067,7 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val // Conversion succeeds iff that field is not nil. cond := s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], itab, s.constNil(byteptr)) - if n.Left().Type().IsEmptyInterface() && commaok { + if n.X.Type().IsEmptyInterface() && commaok { // Converting empty interface to empty interface with ,ok is just a nil check. return iface, cond } @@ -6089,7 +6089,7 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val // On success, return (perhaps modified) input interface. s.startBlock(bOk) - if n.Left().Type().IsEmptyInterface() { + if n.X.Type().IsEmptyInterface() { res = iface // Use input interface unchanged. return } @@ -6128,7 +6128,7 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val if base.Debug.TypeAssert > 0 { base.WarnfAt(n.Pos(), "type assertion not inlined") } - if n.Left().Type().IsEmptyInterface() { + if n.X.Type().IsEmptyInterface() { if commaok { call := s.rtcall(assertE2I2, true, []*types.Type{n.Type(), types.Types[types.TBOOL]}, target, iface) return call[0], call[1] @@ -6153,12 +6153,12 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val base.WarnfAt(n.Pos(), "type assertion inlined") } var targetITab *ssa.Value - if n.Left().Type().IsEmptyInterface() { + if n.X.Type().IsEmptyInterface() { // Looking for pointer to target type. targetITab = target } else { // Looking for pointer to itab for target type and source interface. - targetITab = s.expr(n.List().First()) + targetITab = s.expr(n.Itab.First()) } var tmp ir.Node // temporary for use with large types @@ -6185,8 +6185,8 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val if !commaok { // on failure, panic by calling panicdottype s.startBlock(bFail) - taddr := s.expr(n.Right().(*ir.AddrExpr).Right()) - if n.Left().Type().IsEmptyInterface() { + taddr := s.expr(n.Ntype.(*ir.AddrExpr).Alloc) + if n.X.Type().IsEmptyInterface() { s.rtcall(panicdottypeE, false, nil, itab, target, taddr) } else { s.rtcall(panicdottypeI, false, nil, itab, target, taddr) @@ -6280,7 +6280,7 @@ func (s *state) mem() *ssa.Value { } func (s *state) addNamedValue(n *ir.Name, v *ssa.Value) { - if n.Class() == ir.Pxxx { + if n.Class_ == ir.Pxxx { // Don't track our marker nodes (memVar etc.). return } @@ -6288,7 +6288,7 @@ func (s *state) addNamedValue(n *ir.Name, v *ssa.Value) { // Don't track temporary variables. return } - if n.Class() == ir.PPARAMOUT { + if n.Class_ == ir.PPARAMOUT { // Don't track named output values. This prevents return values // from being assigned too early. See #14591 and #14762. TODO: allow this. return @@ -6811,11 +6811,11 @@ func defframe(s *SSAGenState, e *ssafn) { if !n.Needzero() { continue } - if n.Class() != ir.PAUTO { - e.Fatalf(n.Pos(), "needzero class %d", n.Class()) + if n.Class_ != ir.PAUTO { + e.Fatalf(n.Pos(), "needzero class %d", n.Class_) } if n.Type().Size()%int64(Widthptr) != 0 || n.FrameOffset()%int64(Widthptr) != 0 || n.Type().Size() == 0 { - e.Fatalf(n.Pos(), "var %L has size %d offset %d", n, n.Type().Size(), n.Offset()) + e.Fatalf(n.Pos(), "var %L has size %d offset %d", n, n.Type().Size(), n.Offset_) } if lo != hi && n.FrameOffset()+n.Type().Size() >= lo-int64(2*Widthreg) { @@ -6896,7 +6896,7 @@ func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) { a.Name = obj.NAME_EXTERN a.Sym = n case *ir.Name: - if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT { + if n.Class_ == ir.PPARAM || n.Class_ == ir.PPARAMOUT { a.Name = obj.NAME_PARAM a.Sym = ir.Orig(n).Sym().Linksym() a.Offset += n.FrameOffset() @@ -7048,7 +7048,7 @@ func AddrAuto(a *obj.Addr, v *ssa.Value) { a.Sym = n.Sym().Linksym() a.Reg = int16(thearch.REGSP) a.Offset = n.FrameOffset() + off - if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT { + if n.Class_ == ir.PPARAM || n.Class_ == ir.PPARAMOUT { a.Name = obj.NAME_PARAM } else { a.Name = obj.NAME_AUTO @@ -7063,7 +7063,7 @@ func (s *SSAGenState) AddrScratch(a *obj.Addr) { a.Name = obj.NAME_AUTO a.Sym = s.ScratchFpMem.Sym().Linksym() a.Reg = int16(thearch.REGSP) - a.Offset = s.ScratchFpMem.Offset() + a.Offset = s.ScratchFpMem.Offset_ } // Call returns a new CALL instruction for the SSA value v. @@ -7146,8 +7146,8 @@ func (s *SSAGenState) UseArgs(n int64) { // fieldIdx finds the index of the field referred to by the ODOT node n. func fieldIdx(n *ir.SelectorExpr) int { - t := n.Left().Type() - f := n.Sym() + t := n.X.Type() + f := n.Sel if !t.IsStruct() { panic("ODOT's LHS is not a struct") } @@ -7158,7 +7158,7 @@ func fieldIdx(n *ir.SelectorExpr) int { i++ continue } - if t1.Offset != n.Offset() { + if t1.Offset != n.Offset { panic("field offset doesn't match") } return i @@ -7282,7 +7282,7 @@ func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym { func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot { node := parent.N - if node.Class() != ir.PAUTO || node.Name().Addrtaken() { + if node.Class_ != ir.PAUTO || node.Name().Addrtaken() { // addressed things and non-autos retain their parents (i.e., cannot truly be split) return ssa.LocalSlot{N: node, Type: t, Off: parent.Off + offset} } @@ -7292,7 +7292,7 @@ func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t s.Def = n ir.AsNode(s.Def).Name().SetUsed(true) n.SetType(t) - n.SetClass(ir.PAUTO) + n.Class_ = ir.PAUTO n.SetEsc(EscNever) n.Curfn = e.curfn e.curfn.Dcl = append(e.curfn.Dcl, n) @@ -7368,14 +7368,14 @@ func (e *ssafn) MyImportPath() string { func clobberBase(n ir.Node) ir.Node { if n.Op() == ir.ODOT { n := n.(*ir.SelectorExpr) - if n.Left().Type().NumFields() == 1 { - return clobberBase(n.Left()) + if n.X.Type().NumFields() == 1 { + return clobberBase(n.X) } } if n.Op() == ir.OINDEX { n := n.(*ir.IndexExpr) - if n.Left().Type().IsArray() && n.Left().Type().NumElem() == 1 { - return clobberBase(n.Left()) + if n.X.Type().IsArray() && n.X.Type().NumElem() == 1 { + return clobberBase(n.X) } } return n diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 5aebae0b18bcd..450b20e000173 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -616,7 +616,7 @@ func calcHasCall(n ir.Node) bool { if instrumenting { return true } - return n.Left().HasCall() || n.Right().HasCall() + return n.X.HasCall() || n.Y.HasCall() case ir.OINDEX, ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR, ir.ODEREF, ir.ODOTPTR, ir.ODOTTYPE, ir.ODIV, ir.OMOD: // These ops might panic, make sure they are done @@ -630,49 +630,49 @@ func calcHasCall(n ir.Node) bool { if thearch.SoftFloat && (isFloat[n.Type().Kind()] || isComplex[n.Type().Kind()]) { return true } - return n.Left().HasCall() || n.Right().HasCall() + return n.X.HasCall() || n.Y.HasCall() case ir.ONEG: n := n.(*ir.UnaryExpr) if thearch.SoftFloat && (isFloat[n.Type().Kind()] || isComplex[n.Type().Kind()]) { return true } - return n.Left().HasCall() + return n.X.HasCall() case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT: n := n.(*ir.BinaryExpr) - if thearch.SoftFloat && (isFloat[n.Left().Type().Kind()] || isComplex[n.Left().Type().Kind()]) { + if thearch.SoftFloat && (isFloat[n.X.Type().Kind()] || isComplex[n.X.Type().Kind()]) { return true } - return n.Left().HasCall() || n.Right().HasCall() + return n.X.HasCall() || n.Y.HasCall() case ir.OCONV: n := n.(*ir.ConvExpr) - if thearch.SoftFloat && ((isFloat[n.Type().Kind()] || isComplex[n.Type().Kind()]) || (isFloat[n.Left().Type().Kind()] || isComplex[n.Left().Type().Kind()])) { + if thearch.SoftFloat && ((isFloat[n.Type().Kind()] || isComplex[n.Type().Kind()]) || (isFloat[n.X.Type().Kind()] || isComplex[n.X.Type().Kind()])) { return true } - return n.Left().HasCall() + return n.X.HasCall() case ir.OAND, ir.OANDNOT, ir.OLSH, ir.OOR, ir.ORSH, ir.OXOR, ir.OCOPY, ir.OCOMPLEX, ir.OEFACE: n := n.(*ir.BinaryExpr) - return n.Left().HasCall() || n.Right().HasCall() + return n.X.HasCall() || n.Y.HasCall() case ir.OAS: n := n.(*ir.AssignStmt) - return n.Left().HasCall() || n.Right() != nil && n.Right().HasCall() + return n.X.HasCall() || n.Y != nil && n.Y.HasCall() case ir.OADDR: n := n.(*ir.AddrExpr) - return n.Left().HasCall() + return n.X.HasCall() case ir.OPAREN: n := n.(*ir.ParenExpr) - return n.Left().HasCall() + return n.X.HasCall() case ir.OBITNOT, ir.ONOT, ir.OPLUS, ir.ORECV, ir.OALIGNOF, ir.OCAP, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.ONEW, ir.OOFFSETOF, ir.OPANIC, ir.OREAL, ir.OSIZEOF, ir.OCHECKNIL, ir.OCFUNC, ir.OIDATA, ir.OITAB, ir.ONEWOBJ, ir.OSPTR, ir.OVARDEF, ir.OVARKILL, ir.OVARLIVE: n := n.(*ir.UnaryExpr) - return n.Left().HasCall() + return n.X.HasCall() case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER: n := n.(*ir.SelectorExpr) - return n.Left().HasCall() + return n.X.HasCall() case ir.OGETG, ir.OCLOSUREREAD, ir.OMETHEXPR: return false @@ -687,15 +687,15 @@ func calcHasCall(n ir.Node) bool { case ir.OCONVIFACE, ir.OCONVNOP, ir.OBYTES2STR, ir.OBYTES2STRTMP, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2BYTESTMP, ir.OSTR2RUNES, ir.ORUNESTR: // TODO(rsc): Some conversions are themselves calls, no? n := n.(*ir.ConvExpr) - return n.Left().HasCall() + return n.X.HasCall() case ir.ODOTTYPE2: // TODO(rsc): Shouldn't this be up with ODOTTYPE above? n := n.(*ir.TypeAssertExpr) - return n.Left().HasCall() + return n.X.HasCall() case ir.OSLICEHEADER: // TODO(rsc): What about len and cap? n := n.(*ir.SliceHeaderExpr) - return n.Left().HasCall() + return n.Ptr.HasCall() case ir.OAS2DOTTYPE, ir.OAS2FUNC: // TODO(rsc): Surely we need to check List and Rlist. return false @@ -783,44 +783,44 @@ func safeexpr(n ir.Node, init *ir.Nodes) ir.Node { case ir.OLEN, ir.OCAP: n := n.(*ir.UnaryExpr) - l := safeexpr(n.Left(), init) - if l == n.Left() { + l := safeexpr(n.X, init) + if l == n.X { return n } a := ir.Copy(n).(*ir.UnaryExpr) - a.SetLeft(l) + a.X = l return walkexpr(typecheck(a, ctxExpr), init) case ir.ODOT, ir.ODOTPTR: n := n.(*ir.SelectorExpr) - l := safeexpr(n.Left(), init) - if l == n.Left() { + l := safeexpr(n.X, init) + if l == n.X { return n } a := ir.Copy(n).(*ir.SelectorExpr) - a.SetLeft(l) + a.X = l return walkexpr(typecheck(a, ctxExpr), init) case ir.ODEREF: n := n.(*ir.StarExpr) - l := safeexpr(n.Left(), init) - if l == n.Left() { + l := safeexpr(n.X, init) + if l == n.X { return n } a := ir.Copy(n).(*ir.StarExpr) - a.SetLeft(l) + a.X = l return walkexpr(typecheck(a, ctxExpr), init) case ir.OINDEX, ir.OINDEXMAP: n := n.(*ir.IndexExpr) - l := safeexpr(n.Left(), init) - r := safeexpr(n.Right(), init) - if l == n.Left() && r == n.Right() { + l := safeexpr(n.X, init) + r := safeexpr(n.Index, init) + if l == n.X && r == n.Index { return n } a := ir.Copy(n).(*ir.IndexExpr) - a.SetLeft(l) - a.SetRight(r) + a.X = l + a.Index = r return walkexpr(typecheck(a, ctxExpr), init) case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT: @@ -992,20 +992,20 @@ func dotpath(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) ( // will give shortest unique addressing. // modify the tree with missing type names. func adddot(n *ir.SelectorExpr) *ir.SelectorExpr { - n.SetLeft(typecheck(n.Left(), ctxType|ctxExpr)) - if n.Left().Diag() { + n.X = typecheck(n.X, ctxType|ctxExpr) + if n.X.Diag() { n.SetDiag(true) } - t := n.Left().Type() + t := n.X.Type() if t == nil { return n } - if n.Left().Op() == ir.OTYPE { + if n.X.Op() == ir.OTYPE { return n } - s := n.Sym() + s := n.Sel if s == nil { return n } @@ -1014,14 +1014,14 @@ func adddot(n *ir.SelectorExpr) *ir.SelectorExpr { case path != nil: // rebuild elided dots for c := len(path) - 1; c >= 0; c-- { - dot := ir.NewSelectorExpr(base.Pos, ir.ODOT, n.Left(), path[c].field.Sym) + dot := ir.NewSelectorExpr(base.Pos, ir.ODOT, n.X, path[c].field.Sym) dot.SetImplicit(true) dot.SetType(path[c].field.Type) - n.SetLeft(dot) + n.X = dot } case ambig: base.Errorf("ambiguous selector %v", n) - n.SetLeft(nil) + n.X = nil } return n @@ -1228,10 +1228,10 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { if rcvr.IsPtr() && rcvr.Elem() == methodrcvr { // generating wrapper from *T to T. n := ir.NewIfStmt(base.Pos, nil, nil, nil) - n.SetLeft(ir.NewBinaryExpr(base.Pos, ir.OEQ, nthis, nodnil())) + n.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, nthis, nodnil()) call := ir.NewCallExpr(base.Pos, ir.OCALL, syslook("panicwrap"), nil) - n.PtrBody().Set1(call) - fn.PtrBody().Append(n) + n.Body.Set1(call) + fn.Body.Append(n) } dot := adddot(ir.NewSelectorExpr(base.Pos, ir.OXDOT, nthis, method.Sym)) @@ -1245,29 +1245,29 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { // value for that function. if !instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !isifacemethod(method.Type) && !(thearch.LinkArch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) { // generate tail call: adjust pointer receiver and jump to embedded method. - left := dot.Left() // skip final .M + left := dot.X // skip final .M if !left.Type().IsPtr() { left = nodAddr(left) } as := ir.NewAssignStmt(base.Pos, nthis, convnop(left, rcvr)) - fn.PtrBody().Append(as) - fn.PtrBody().Append(ir.NewBranchStmt(base.Pos, ir.ORETJMP, methodSym(methodrcvr, method.Sym))) + fn.Body.Append(as) + fn.Body.Append(ir.NewBranchStmt(base.Pos, ir.ORETJMP, methodSym(methodrcvr, method.Sym))) } else { fn.SetWrapper(true) // ignore frame for panic+recover matching call := ir.NewCallExpr(base.Pos, ir.OCALL, dot, nil) - call.PtrList().Set(paramNnames(tfn.Type())) - call.SetIsDDD(tfn.Type().IsVariadic()) + call.Args.Set(paramNnames(tfn.Type())) + call.IsDDD = tfn.Type().IsVariadic() if method.Type.NumResults() > 0 { ret := ir.NewReturnStmt(base.Pos, nil) - ret.PtrList().Set1(call) - fn.PtrBody().Append(ret) + ret.Results.Set1(call) + fn.Body.Append(ret) } else { - fn.PtrBody().Append(call) + fn.Body.Append(call) } } if false && base.Flag.LowerR != 0 { - ir.DumpList("genwrapper body", fn.Body()) + ir.DumpList("genwrapper body", fn.Body) } funcbody() @@ -1277,7 +1277,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { typecheckFunc(fn) Curfn = fn - typecheckslice(fn.Body().Slice(), ctxStmt) + typecheckslice(fn.Body.Slice(), ctxStmt) // Inline calls within (*T).M wrappers. This is safe because we only // generate those wrappers within the same compilation unit as (T).M. @@ -1422,7 +1422,7 @@ func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool func liststmt(l []ir.Node) ir.Node { n := ir.NewBlockStmt(base.Pos, nil) - n.PtrList().Set(l) + n.List.Set(l) if len(l) != 0 { n.SetPos(l[0].Pos()) } @@ -1542,8 +1542,8 @@ func itabType(itab ir.Node) ir.Node { typ := ir.NewSelectorExpr(base.Pos, ir.ODOTPTR, itab, nil) typ.SetType(types.NewPtr(types.Types[types.TUINT8])) typ.SetTypecheck(1) - typ.SetOffset(int64(Widthptr)) // offset of _type in runtime.itab - typ.SetBounded(true) // guaranteed not to fault + typ.Offset = int64(Widthptr) // offset of _type in runtime.itab + typ.SetBounded(true) // guaranteed not to fault return typ } diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go index 7cd1c16e00f1d..da781e6f45cdc 100644 --- a/src/cmd/compile/internal/gc/swt.go +++ b/src/cmd/compile/internal/gc/swt.go @@ -17,7 +17,7 @@ import ( // typecheckswitch typechecks a switch statement. func typecheckswitch(n *ir.SwitchStmt) { typecheckslice(n.Init().Slice(), ctxStmt) - if n.Left() != nil && n.Left().Op() == ir.OTYPESW { + if n.Tag != nil && n.Tag.Op() == ir.OTYPESW { typecheckTypeSwitch(n) } else { typecheckExprSwitch(n) @@ -25,26 +25,26 @@ func typecheckswitch(n *ir.SwitchStmt) { } func typecheckTypeSwitch(n *ir.SwitchStmt) { - guard := n.Left().(*ir.TypeSwitchGuard) - guard.SetRight(typecheck(guard.Right(), ctxExpr)) - t := guard.Right().Type() + guard := n.Tag.(*ir.TypeSwitchGuard) + guard.X = typecheck(guard.X, ctxExpr) + t := guard.X.Type() if t != nil && !t.IsInterface() { - base.ErrorfAt(n.Pos(), "cannot type switch on non-interface value %L", guard.Right()) + base.ErrorfAt(n.Pos(), "cannot type switch on non-interface value %L", guard.X) t = nil } // We don't actually declare the type switch's guarded // declaration itself. So if there are no cases, we won't // notice that it went unused. - if v := guard.Left(); v != nil && !ir.IsBlank(v) && n.List().Len() == 0 { + if v := guard.Tag; v != nil && !ir.IsBlank(v) && n.Cases.Len() == 0 { base.ErrorfAt(v.Pos(), "%v declared but not used", v.Sym()) } var defCase, nilCase ir.Node var ts typeSet - for _, ncase := range n.List().Slice() { + for _, ncase := range n.Cases.Slice() { ncase := ncase.(*ir.CaseStmt) - ls := ncase.List().Slice() + ls := ncase.List.Slice() if len(ls) == 0 { // default: if defCase != nil { base.ErrorfAt(ncase.Pos(), "multiple defaults in switch (first at %v)", ir.Line(defCase)) @@ -77,13 +77,13 @@ func typecheckTypeSwitch(n *ir.SwitchStmt) { if !n1.Type().IsInterface() && !implements(n1.Type(), t, &missing, &have, &ptr) && !missing.Broke() { if have != nil && !have.Broke() { base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+ - " (wrong type for %v method)\n\thave %v%S\n\twant %v%S", guard.Right(), n1.Type(), missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) + " (wrong type for %v method)\n\thave %v%S\n\twant %v%S", guard.X, n1.Type(), missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) } else if ptr != 0 { base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+ - " (%v method has pointer receiver)", guard.Right(), n1.Type(), missing.Sym) + " (%v method has pointer receiver)", guard.X, n1.Type(), missing.Sym) } else { base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+ - " (missing %v method)", guard.Right(), n1.Type(), missing.Sym) + " (missing %v method)", guard.X, n1.Type(), missing.Sym) } continue } @@ -91,7 +91,7 @@ func typecheckTypeSwitch(n *ir.SwitchStmt) { ts.add(ncase.Pos(), n1.Type()) } - if ncase.Rlist().Len() != 0 { + if ncase.Vars.Len() != 0 { // Assign the clause variable's type. vt := t if len(ls) == 1 { @@ -104,7 +104,7 @@ func typecheckTypeSwitch(n *ir.SwitchStmt) { } } - nvar := ncase.Rlist().First() + nvar := ncase.Vars.First() nvar.SetType(vt) if vt != nil { nvar = typecheck(nvar, ctxExpr|ctxAssign) @@ -113,10 +113,10 @@ func typecheckTypeSwitch(n *ir.SwitchStmt) { nvar.SetTypecheck(1) nvar.SetWalkdef(1) } - ncase.Rlist().SetFirst(nvar) + ncase.Vars.SetFirst(nvar) } - typecheckslice(ncase.Body().Slice(), ctxStmt) + typecheckslice(ncase.Body.Slice(), ctxStmt) } } @@ -150,10 +150,10 @@ func (s *typeSet) add(pos src.XPos, typ *types.Type) { func typecheckExprSwitch(n *ir.SwitchStmt) { t := types.Types[types.TBOOL] - if n.Left() != nil { - n.SetLeft(typecheck(n.Left(), ctxExpr)) - n.SetLeft(defaultlit(n.Left(), nil)) - t = n.Left().Type() + if n.Tag != nil { + n.Tag = typecheck(n.Tag, ctxExpr) + n.Tag = defaultlit(n.Tag, nil) + t = n.Tag.Type() } var nilonly string @@ -168,9 +168,9 @@ func typecheckExprSwitch(n *ir.SwitchStmt) { case !IsComparable(t): if t.IsStruct() { - base.ErrorfAt(n.Pos(), "cannot switch on %L (struct containing %v cannot be compared)", n.Left(), IncomparableField(t).Type) + base.ErrorfAt(n.Pos(), "cannot switch on %L (struct containing %v cannot be compared)", n.Tag, IncomparableField(t).Type) } else { - base.ErrorfAt(n.Pos(), "cannot switch on %L", n.Left()) + base.ErrorfAt(n.Pos(), "cannot switch on %L", n.Tag) } t = nil } @@ -178,9 +178,9 @@ func typecheckExprSwitch(n *ir.SwitchStmt) { var defCase ir.Node var cs constSet - for _, ncase := range n.List().Slice() { + for _, ncase := range n.Cases.Slice() { ncase := ncase.(*ir.CaseStmt) - ls := ncase.List().Slice() + ls := ncase.List.Slice() if len(ls) == 0 { // default: if defCase != nil { base.ErrorfAt(ncase.Pos(), "multiple defaults in switch (first at %v)", ir.Line(defCase)) @@ -199,15 +199,15 @@ func typecheckExprSwitch(n *ir.SwitchStmt) { } if nilonly != "" && !ir.IsNil(n1) { - base.ErrorfAt(ncase.Pos(), "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Left()) + base.ErrorfAt(ncase.Pos(), "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Tag) } else if t.IsInterface() && !n1.Type().IsInterface() && !IsComparable(n1.Type()) { base.ErrorfAt(ncase.Pos(), "invalid case %L in switch (incomparable type)", n1) } else { op1, _ := assignop(n1.Type(), t) op2, _ := assignop(t, n1.Type()) if op1 == ir.OXXX && op2 == ir.OXXX { - if n.Left() != nil { - base.ErrorfAt(ncase.Pos(), "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Left(), n1.Type(), t) + if n.Tag != nil { + base.ErrorfAt(ncase.Pos(), "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Tag, n1.Type(), t) } else { base.ErrorfAt(ncase.Pos(), "invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type()) } @@ -225,18 +225,18 @@ func typecheckExprSwitch(n *ir.SwitchStmt) { } } - typecheckslice(ncase.Body().Slice(), ctxStmt) + typecheckslice(ncase.Body.Slice(), ctxStmt) } } // walkswitch walks a switch statement. func walkswitch(sw *ir.SwitchStmt) { // Guard against double walk, see #25776. - if sw.List().Len() == 0 && sw.Body().Len() > 0 { + if sw.Cases.Len() == 0 && sw.Compiled.Len() > 0 { return // Was fatal, but eliminating every possible source of double-walking is hard } - if sw.Left() != nil && sw.Left().Op() == ir.OTYPESW { + if sw.Tag != nil && sw.Tag.Op() == ir.OTYPESW { walkTypeSwitch(sw) } else { walkExprSwitch(sw) @@ -248,8 +248,8 @@ func walkswitch(sw *ir.SwitchStmt) { func walkExprSwitch(sw *ir.SwitchStmt) { lno := setlineno(sw) - cond := sw.Left() - sw.SetLeft(nil) + cond := sw.Tag + sw.Tag = nil // convert switch {...} to switch true {...} if cond == nil { @@ -272,7 +272,7 @@ func walkExprSwitch(sw *ir.SwitchStmt) { cond = walkexpr(cond, sw.PtrInit()) if cond.Op() != ir.OLITERAL && cond.Op() != ir.ONIL { - cond = copyexpr(cond, cond.Type(), sw.PtrBody()) + cond = copyexpr(cond, cond.Type(), &sw.Compiled) } base.Pos = lno @@ -283,33 +283,33 @@ func walkExprSwitch(sw *ir.SwitchStmt) { var defaultGoto ir.Node var body ir.Nodes - for _, ncase := range sw.List().Slice() { + for _, ncase := range sw.Cases.Slice() { ncase := ncase.(*ir.CaseStmt) label := autolabel(".s") jmp := ir.NewBranchStmt(ncase.Pos(), ir.OGOTO, label) // Process case dispatch. - if ncase.List().Len() == 0 { + if ncase.List.Len() == 0 { if defaultGoto != nil { base.Fatalf("duplicate default case not detected during typechecking") } defaultGoto = jmp } - for _, n1 := range ncase.List().Slice() { + for _, n1 := range ncase.List.Slice() { s.Add(ncase.Pos(), n1, jmp) } // Process body. body.Append(ir.NewLabelStmt(ncase.Pos(), label)) - body.Append(ncase.Body().Slice()...) - if fall, pos := endsInFallthrough(ncase.Body().Slice()); !fall { + body.Append(ncase.Body.Slice()...) + if fall, pos := endsInFallthrough(ncase.Body.Slice()); !fall { br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil) br.SetPos(pos) body.Append(br) } } - sw.PtrList().Set(nil) + sw.Cases.Set(nil) if defaultGoto == nil { br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil) @@ -317,10 +317,10 @@ func walkExprSwitch(sw *ir.SwitchStmt) { defaultGoto = br } - s.Emit(sw.PtrBody()) - sw.PtrBody().Append(defaultGoto) - sw.PtrBody().AppendNodes(&body) - walkstmtlist(sw.Body().Slice()) + s.Emit(&sw.Compiled) + sw.Compiled.Append(defaultGoto) + sw.Compiled.AppendNodes(&body) + walkstmtlist(sw.Compiled.Slice()) } // An exprSwitch walks an expression switch. @@ -402,8 +402,8 @@ func (s *exprSwitch) flush() { }, func(i int, nif *ir.IfStmt) { run := runs[i] - nif.SetLeft(ir.NewBinaryExpr(base.Pos, ir.OEQ, ir.NewUnaryExpr(base.Pos, ir.OLEN, s.exprname), nodintconst(runLen(run)))) - s.search(run, nif.PtrBody()) + nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, ir.NewUnaryExpr(base.Pos, ir.OLEN, s.exprname), nodintconst(runLen(run))) + s.search(run, &nif.Body) }, ) return @@ -437,8 +437,8 @@ func (s *exprSwitch) search(cc []exprClause, out *ir.Nodes) { }, func(i int, nif *ir.IfStmt) { c := &cc[i] - nif.SetLeft(c.test(s.exprname)) - nif.PtrBody().Set1(c.jmp) + nif.Cond = c.test(s.exprname) + nif.Body.Set1(c.jmp) }, ) } @@ -471,9 +471,9 @@ func allCaseExprsAreSideEffectFree(sw *ir.SwitchStmt) bool { // Restricting to constants is simple and probably powerful // enough. - for _, ncase := range sw.List().Slice() { + for _, ncase := range sw.Cases.Slice() { ncase := ncase.(*ir.CaseStmt) - for _, v := range ncase.List().Slice() { + for _, v := range ncase.List.Slice() { if v.Op() != ir.OLITERAL { return false } @@ -504,11 +504,11 @@ func endsInFallthrough(stmts []ir.Node) (bool, src.XPos) { // type switch. func walkTypeSwitch(sw *ir.SwitchStmt) { var s typeSwitch - s.facename = sw.Left().(*ir.TypeSwitchGuard).Right() - sw.SetLeft(nil) + s.facename = sw.Tag.(*ir.TypeSwitchGuard).X + sw.Tag = nil s.facename = walkexpr(s.facename, sw.PtrInit()) - s.facename = copyexpr(s.facename, s.facename.Type(), sw.PtrBody()) + s.facename = copyexpr(s.facename, s.facename.Type(), &sw.Compiled) s.okname = temp(types.Types[types.TBOOL]) // Get interface descriptor word. @@ -523,55 +523,55 @@ func walkTypeSwitch(sw *ir.SwitchStmt) { // h := e._type.hash // Use a similar strategy for non-empty interfaces. ifNil := ir.NewIfStmt(base.Pos, nil, nil, nil) - ifNil.SetLeft(ir.NewBinaryExpr(base.Pos, ir.OEQ, itab, nodnil())) + ifNil.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, itab, nodnil()) base.Pos = base.Pos.WithNotStmt() // disable statement marks after the first check. - ifNil.SetLeft(typecheck(ifNil.Left(), ctxExpr)) - ifNil.SetLeft(defaultlit(ifNil.Left(), nil)) + ifNil.Cond = typecheck(ifNil.Cond, ctxExpr) + ifNil.Cond = defaultlit(ifNil.Cond, nil) // ifNil.Nbody assigned at end. - sw.PtrBody().Append(ifNil) + sw.Compiled.Append(ifNil) // Load hash from type or itab. dotHash := ir.NewSelectorExpr(base.Pos, ir.ODOTPTR, itab, nil) dotHash.SetType(types.Types[types.TUINT32]) dotHash.SetTypecheck(1) if s.facename.Type().IsEmptyInterface() { - dotHash.SetOffset(int64(2 * Widthptr)) // offset of hash in runtime._type + dotHash.Offset = int64(2 * Widthptr) // offset of hash in runtime._type } else { - dotHash.SetOffset(int64(2 * Widthptr)) // offset of hash in runtime.itab + dotHash.Offset = int64(2 * Widthptr) // offset of hash in runtime.itab } dotHash.SetBounded(true) // guaranteed not to fault - s.hashname = copyexpr(dotHash, dotHash.Type(), sw.PtrBody()) + s.hashname = copyexpr(dotHash, dotHash.Type(), &sw.Compiled) br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil) var defaultGoto, nilGoto ir.Node var body ir.Nodes - for _, ncase := range sw.List().Slice() { + for _, ncase := range sw.Cases.Slice() { ncase := ncase.(*ir.CaseStmt) var caseVar ir.Node - if ncase.Rlist().Len() != 0 { - caseVar = ncase.Rlist().First() + if ncase.Vars.Len() != 0 { + caseVar = ncase.Vars.First() } // For single-type cases with an interface type, // we initialize the case variable as part of the type assertion. // In other cases, we initialize it in the body. var singleType *types.Type - if ncase.List().Len() == 1 && ncase.List().First().Op() == ir.OTYPE { - singleType = ncase.List().First().Type() + if ncase.List.Len() == 1 && ncase.List.First().Op() == ir.OTYPE { + singleType = ncase.List.First().Type() } caseVarInitialized := false label := autolabel(".s") jmp := ir.NewBranchStmt(ncase.Pos(), ir.OGOTO, label) - if ncase.List().Len() == 0 { // default: + if ncase.List.Len() == 0 { // default: if defaultGoto != nil { base.Fatalf("duplicate default case not detected during typechecking") } defaultGoto = jmp } - for _, n1 := range ncase.List().Slice() { + for _, n1 := range ncase.List.Slice() { if ir.IsNil(n1) { // case nil: if nilGoto != nil { base.Fatalf("duplicate nil case not detected during typechecking") @@ -605,10 +605,10 @@ func walkTypeSwitch(sw *ir.SwitchStmt) { typecheckslice(l, ctxStmt) body.Append(l...) } - body.Append(ncase.Body().Slice()...) + body.Append(ncase.Body.Slice()...) body.Append(br) } - sw.PtrList().Set(nil) + sw.Cases.Set(nil) if defaultGoto == nil { defaultGoto = br @@ -616,13 +616,13 @@ func walkTypeSwitch(sw *ir.SwitchStmt) { if nilGoto == nil { nilGoto = defaultGoto } - ifNil.PtrBody().Set1(nilGoto) + ifNil.Body.Set1(nilGoto) - s.Emit(sw.PtrBody()) - sw.PtrBody().Append(defaultGoto) - sw.PtrBody().AppendNodes(&body) + s.Emit(&sw.Compiled) + sw.Compiled.Append(defaultGoto) + sw.Compiled.AppendNodes(&body) - walkstmtlist(sw.Body().Slice()) + walkstmtlist(sw.Compiled.Slice()) } // A typeSwitch walks a type switch. @@ -656,16 +656,16 @@ func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp ir.Node) { // cv, ok = iface.(type) as := ir.NewAssignListStmt(pos, ir.OAS2, nil, nil) - as.PtrList().Set2(caseVar, s.okname) // cv, ok = + as.Lhs.Set2(caseVar, s.okname) // cv, ok = dot := ir.NewTypeAssertExpr(pos, s.facename, nil) dot.SetType(typ) // iface.(type) - as.PtrRlist().Set1(dot) + as.Rhs.Set1(dot) appendWalkStmt(&body, as) // if ok { goto label } nif := ir.NewIfStmt(pos, nil, nil, nil) - nif.SetLeft(s.okname) - nif.PtrBody().Set1(jmp) + nif.Cond = s.okname + nif.Body.Set1(jmp) body.Append(nif) if !typ.IsInterface() { @@ -714,8 +714,8 @@ func (s *typeSwitch) flush() { // TODO(mdempsky): Omit hash equality check if // there's only one type. c := cc[i] - nif.SetLeft(ir.NewBinaryExpr(base.Pos, ir.OEQ, s.hashname, nodintconst(int64(c.hash)))) - nif.PtrBody().AppendNodes(&c.body) + nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, s.hashname, nodintconst(int64(c.hash))) + nif.Body.AppendNodes(&c.body) }, ) } @@ -740,22 +740,22 @@ func binarySearch(n int, out *ir.Nodes, less func(i int) ir.Node, leaf func(i in nif := ir.NewIfStmt(base.Pos, nil, nil, nil) leaf(i, nif) base.Pos = base.Pos.WithNotStmt() - nif.SetLeft(typecheck(nif.Left(), ctxExpr)) - nif.SetLeft(defaultlit(nif.Left(), nil)) + nif.Cond = typecheck(nif.Cond, ctxExpr) + nif.Cond = defaultlit(nif.Cond, nil) out.Append(nif) - out = nif.PtrRlist() + out = &nif.Else } return } half := lo + n/2 nif := ir.NewIfStmt(base.Pos, nil, nil, nil) - nif.SetLeft(less(half)) + nif.Cond = less(half) base.Pos = base.Pos.WithNotStmt() - nif.SetLeft(typecheck(nif.Left(), ctxExpr)) - nif.SetLeft(defaultlit(nif.Left(), nil)) - do(lo, half, nif.PtrBody()) - do(half, hi, nif.PtrRlist()) + nif.Cond = typecheck(nif.Cond, ctxExpr) + nif.Cond = defaultlit(nif.Cond, nil) + do(lo, half, &nif.Body) + do(half, hi, &nif.Else) out.Append(nif) } diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index bb5e9fad1e007..73fb6bb1c1f92 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -48,7 +48,7 @@ func TypecheckPackage() { timings.Start("fe", "typecheck", "top1") for i := 0; i < len(Target.Decls); i++ { n := Target.Decls[i] - if op := n.Op(); op != ir.ODCL && op != ir.OAS && op != ir.OAS2 && (op != ir.ODCLTYPE || !n.(*ir.Decl).Left().Name().Alias()) { + if op := n.Op(); op != ir.ODCL && op != ir.OAS && op != ir.OAS2 && (op != ir.ODCLTYPE || !n.(*ir.Decl).X.Name().Alias()) { Target.Decls[i] = typecheck(n, ctxStmt) } } @@ -60,7 +60,7 @@ func TypecheckPackage() { timings.Start("fe", "typecheck", "top2") for i := 0; i < len(Target.Decls); i++ { n := Target.Decls[i] - if op := n.Op(); op == ir.ODCL || op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.(*ir.Decl).Left().Name().Alias() { + if op := n.Op(); op == ir.ODCL || op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.(*ir.Decl).X.Name().Alias() { Target.Decls[i] = typecheck(n, ctxStmt) } } @@ -97,7 +97,7 @@ func TypecheckPackage() { for _, n := range Target.Decls { if n.Op() == ir.ODCLFUNC { n := n.(*ir.Func) - if n.Func().OClosure != nil { + if n.OClosure != nil { Curfn = n capturevars(n) } @@ -142,10 +142,10 @@ func TypecheckFuncBody(n *ir.Func) { Curfn = n decldepth = 1 errorsBefore := base.Errors() - typecheckslice(n.Body(), ctxStmt) + typecheckslice(n.Body, ctxStmt) checkreturn(n) if base.Errors() > errorsBefore { - n.PtrBody().Set(nil) // type errors; do not compile + n.Body.Set(nil) // type errors; do not compile } // Now that we've checked whether n terminates, // we can eliminate some obviously dead code. @@ -387,7 +387,7 @@ func typecheck(n ir.Node, top int) (res ir.Node) { // Skip over parens. for n.Op() == ir.OPAREN { - n = n.(*ir.ParenExpr).Left() + n = n.(*ir.ParenExpr).X } // Resolve definition of name and value of iota lazily. @@ -479,7 +479,7 @@ func typecheck(n ir.Node, top int) (res ir.Node) { switch n.Op() { case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH: n := n.(*ir.CallExpr) - if t := n.Left().Type(); t != nil && t.Kind() == types.TFUNC { + if t := n.X.Type(); t != nil && t.Kind() == types.TFUNC { nr := t.NumResults() isMulti = nr > 1 if nr == 0 { @@ -580,7 +580,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { if n.Op() == ir.ONAME { n := n.(*ir.Name) - if n.SubOp() != 0 && top&ctxCallee == 0 { + if n.BuiltinOp != 0 && top&ctxCallee == 0 { base.Errorf("use of builtin %v not in function call", n.Sym()) n.SetType(nil) return n @@ -615,7 +615,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { if n.Name().Decldepth == 0 { n.Name().Decldepth = decldepth } - if n.SubOp() != 0 { + if n.BuiltinOp != 0 { return n } if top&ctxAssign == 0 { @@ -767,7 +767,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { if !t.IsPtr() { if top&(ctxExpr|ctxStmt) != 0 { - base.Errorf("invalid indirect of %L", n.Left()) + base.Errorf("invalid indirect of %L", n.X) n.SetType(nil) return n } @@ -803,14 +803,14 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { var setLR func() switch n := n.(type) { case *ir.AssignOpStmt: - l, r = n.Left(), n.Right() - setLR = func() { n.SetLeft(l); n.SetRight(r) } + l, r = n.X, n.Y + setLR = func() { n.X = l; n.Y = r } case *ir.BinaryExpr: - l, r = n.Left(), n.Right() - setLR = func() { n.SetLeft(l); n.SetRight(r) } + l, r = n.X, n.Y + setLR = func() { n.X = l; n.Y = r } case *ir.LogicalExpr: - l, r = n.Left(), n.Right() - setLR = func() { n.SetLeft(l); n.SetRight(r) } + l, r = n.X, n.Y + setLR = func() { n.X = l; n.Y = r } } l = typecheck(l, ctxExpr) r = typecheck(r, ctxExpr) @@ -823,13 +823,13 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { if n.Op() == ir.OASOP { n := n.(*ir.AssignOpStmt) checkassign(n, l) - if n.Implicit() && !okforarith[l.Type().Kind()] { + if n.IncDec && !okforarith[l.Type().Kind()] { base.Errorf("invalid operation: %v (non-numeric type %v)", n, l.Type()) n.SetType(nil) return n } // TODO(marvin): Fix Node.EType type union. - op = n.SubOp() + op = n.AsOp } if op == ir.OLSH || op == ir.ORSH { r = defaultlit(r, types.Types[types.TUINT]) @@ -866,13 +866,13 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { // can't be converted to int (see issue #41500). if n.Op() == ir.OANDAND || n.Op() == ir.OOROR { n := n.(*ir.LogicalExpr) - if !n.Left().Type().IsBoolean() { - base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.Left().Type())) + if !n.X.Type().IsBoolean() { + base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.X.Type())) n.SetType(nil) return n } - if !n.Right().Type().IsBoolean() { - base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.Right().Type())) + if !n.Y.Type().IsBoolean() { + base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.Y.Type())) n.SetType(nil) return n } @@ -1027,9 +1027,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } if r.Op() == ir.OADDSTR { r := r.(*ir.AddStringExpr) - add.PtrList().AppendNodes(r.PtrList()) + add.List.AppendNodes(&r.List) } else { - add.PtrList().Append(r) + add.List.Append(r) } add.SetType(t) return add @@ -1048,8 +1048,8 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OBITNOT, ir.ONEG, ir.ONOT, ir.OPLUS: n := n.(*ir.UnaryExpr) - n.SetLeft(typecheck(n.Left(), ctxExpr)) - l := n.Left() + n.X = typecheck(n.X, ctxExpr) + l := n.X t := l.Type() if t == nil { n.SetType(nil) @@ -1067,19 +1067,19 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { // exprs case ir.OADDR: n := n.(*ir.AddrExpr) - n.SetLeft(typecheck(n.Left(), ctxExpr)) - if n.Left().Type() == nil { + n.X = typecheck(n.X, ctxExpr) + if n.X.Type() == nil { n.SetType(nil) return n } - switch n.Left().Op() { + switch n.X.Op() { case ir.OARRAYLIT, ir.OMAPLIT, ir.OSLICELIT, ir.OSTRUCTLIT: n.SetOp(ir.OPTRLIT) default: - checklvalue(n.Left(), "take the address of") - r := outervalue(n.Left()) + checklvalue(n.X, "take the address of") + r := outervalue(n.X) if r.Op() == ir.ONAME { r := r.(*ir.Name) if ir.Orig(r) != r { @@ -1094,14 +1094,14 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { r.Name().Defn.Name().SetAddrtaken(true) } } - n.SetLeft(defaultlit(n.Left(), nil)) - if n.Left().Type() == nil { + n.X = defaultlit(n.X, nil) + if n.X.Type() == nil { n.SetType(nil) return n } } - n.SetType(types.NewPtr(n.Left().Type())) + n.SetType(types.NewPtr(n.X.Type())) return n case ir.OCOMPLIT: @@ -1112,26 +1112,26 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { if n.Op() == ir.OXDOT { n = adddot(n) n.SetOp(ir.ODOT) - if n.Left() == nil { + if n.X == nil { n.SetType(nil) return n } } - n.SetLeft(typecheck(n.Left(), ctxExpr|ctxType)) + n.X = typecheck(n.X, ctxExpr|ctxType) - n.SetLeft(defaultlit(n.Left(), nil)) + n.X = defaultlit(n.X, nil) - t := n.Left().Type() + t := n.X.Type() if t == nil { - base.UpdateErrorDot(ir.Line(n), fmt.Sprint(n.Left()), fmt.Sprint(n)) + base.UpdateErrorDot(ir.Line(n), fmt.Sprint(n.X), fmt.Sprint(n)) n.SetType(nil) return n } - s := n.Sym() + s := n.Sel - if n.Left().Op() == ir.OTYPE { + if n.X.Op() == ir.OTYPE { return typecheckMethodExpr(n) } @@ -1145,7 +1145,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { checkwidth(t) } - if n.Sym().IsBlank() { + if n.Sel.IsBlank() { base.Errorf("cannot refer to blank field or method") n.SetType(nil) return n @@ -1155,21 +1155,21 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { // Legitimate field or method lookup failed, try to explain the error switch { case t.IsEmptyInterface(): - base.Errorf("%v undefined (type %v is interface with no methods)", n, n.Left().Type()) + base.Errorf("%v undefined (type %v is interface with no methods)", n, n.X.Type()) case t.IsPtr() && t.Elem().IsInterface(): // Pointer to interface is almost always a mistake. - base.Errorf("%v undefined (type %v is pointer to interface, not interface)", n, n.Left().Type()) + base.Errorf("%v undefined (type %v is pointer to interface, not interface)", n, n.X.Type()) case lookdot(n, t, 1) != nil: // Field or method matches by name, but it is not exported. - base.Errorf("%v undefined (cannot refer to unexported field or method %v)", n, n.Sym()) + base.Errorf("%v undefined (cannot refer to unexported field or method %v)", n, n.Sel) default: if mt := lookdot(n, t, 2); mt != nil && visible(mt.Sym) { // Case-insensitive lookup. - base.Errorf("%v undefined (type %v has no field or method %v, but does have %v)", n, n.Left().Type(), n.Sym(), mt.Sym) + base.Errorf("%v undefined (type %v has no field or method %v, but does have %v)", n, n.X.Type(), n.Sel, mt.Sym) } else { - base.Errorf("%v undefined (type %v has no field or method %v)", n, n.Left().Type(), n.Sym()) + base.Errorf("%v undefined (type %v has no field or method %v)", n, n.X.Type(), n.Sel) } } n.SetType(nil) @@ -1183,9 +1183,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.ODOTTYPE: n := n.(*ir.TypeAssertExpr) - n.SetLeft(typecheck(n.Left(), ctxExpr)) - n.SetLeft(defaultlit(n.Left(), nil)) - l := n.Left() + n.X = typecheck(n.X, ctxExpr) + n.X = defaultlit(n.X, nil) + l := n.X t := l.Type() if t == nil { n.SetType(nil) @@ -1197,10 +1197,10 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } - if n.Right() != nil { - n.SetRight(typecheck(n.Right(), ctxType)) - n.SetType(n.Right().Type()) - n.SetRight(nil) + if n.Ntype != nil { + n.Ntype = typecheck(n.Ntype, ctxType) + n.SetType(n.Ntype.Type()) + n.Ntype = nil if n.Type() == nil { return n } @@ -1229,12 +1229,12 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OINDEX: n := n.(*ir.IndexExpr) - n.SetLeft(typecheck(n.Left(), ctxExpr)) - n.SetLeft(defaultlit(n.Left(), nil)) - n.SetLeft(implicitstar(n.Left())) - l := n.Left() - n.SetRight(typecheck(n.Right(), ctxExpr)) - r := n.Right() + n.X = typecheck(n.X, ctxExpr) + n.X = defaultlit(n.X, nil) + n.X = implicitstar(n.X) + l := n.X + n.Index = typecheck(n.Index, ctxExpr) + r := n.Index t := l.Type() if t == nil || r.Type() == nil { n.SetType(nil) @@ -1247,7 +1247,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case types.TSTRING, types.TARRAY, types.TSLICE: - n.SetRight(indexlit(n.Right())) + n.Index = indexlit(n.Index) if t.IsString() { n.SetType(types.ByteType) } else { @@ -1260,37 +1260,37 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { why = "slice" } - if n.Right().Type() != nil && !n.Right().Type().IsInteger() { - base.Errorf("non-integer %s index %v", why, n.Right()) + if n.Index.Type() != nil && !n.Index.Type().IsInteger() { + base.Errorf("non-integer %s index %v", why, n.Index) return n } - if !n.Bounded() && ir.IsConst(n.Right(), constant.Int) { - x := n.Right().Val() + if !n.Bounded() && ir.IsConst(n.Index, constant.Int) { + x := n.Index.Val() if constant.Sign(x) < 0 { - base.Errorf("invalid %s index %v (index must be non-negative)", why, n.Right()) + base.Errorf("invalid %s index %v (index must be non-negative)", why, n.Index) } else if t.IsArray() && constant.Compare(x, token.GEQ, constant.MakeInt64(t.NumElem())) { - base.Errorf("invalid array index %v (out of bounds for %d-element array)", n.Right(), t.NumElem()) - } else if ir.IsConst(n.Left(), constant.String) && constant.Compare(x, token.GEQ, constant.MakeInt64(int64(len(ir.StringVal(n.Left()))))) { - base.Errorf("invalid string index %v (out of bounds for %d-byte string)", n.Right(), len(ir.StringVal(n.Left()))) + base.Errorf("invalid array index %v (out of bounds for %d-element array)", n.Index, t.NumElem()) + } else if ir.IsConst(n.X, constant.String) && constant.Compare(x, token.GEQ, constant.MakeInt64(int64(len(ir.StringVal(n.X))))) { + base.Errorf("invalid string index %v (out of bounds for %d-byte string)", n.Index, len(ir.StringVal(n.X))) } else if doesoverflow(x, types.Types[types.TINT]) { - base.Errorf("invalid %s index %v (index too large)", why, n.Right()) + base.Errorf("invalid %s index %v (index too large)", why, n.Index) } } case types.TMAP: - n.SetRight(assignconv(n.Right(), t.Key(), "map index")) + n.Index = assignconv(n.Index, t.Key(), "map index") n.SetType(t.Elem()) n.SetOp(ir.OINDEXMAP) - n.SetIndexMapLValue(false) + n.Assigned = false } return n case ir.ORECV: n := n.(*ir.UnaryExpr) - n.SetLeft(typecheck(n.Left(), ctxExpr)) - n.SetLeft(defaultlit(n.Left(), nil)) - l := n.Left() + n.X = typecheck(n.X, ctxExpr) + n.X = defaultlit(n.X, nil) + l := n.X t := l.Type() if t == nil { n.SetType(nil) @@ -1313,10 +1313,10 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OSEND: n := n.(*ir.SendStmt) - n.SetLeft(typecheck(n.Left(), ctxExpr)) - n.SetRight(typecheck(n.Right(), ctxExpr)) - n.SetLeft(defaultlit(n.Left(), nil)) - t := n.Left().Type() + n.Chan = typecheck(n.Chan, ctxExpr) + n.Value = typecheck(n.Value, ctxExpr) + n.Chan = defaultlit(n.Chan, nil) + t := n.Chan.Type() if t == nil { return n } @@ -1330,8 +1330,8 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } - n.SetRight(assignconv(n.Right(), t.Elem(), "send")) - if n.Right().Type() == nil { + n.Value = assignconv(n.Value, t.Elem(), "send") + if n.Value.Type() == nil { return n } return n @@ -1351,17 +1351,17 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { base.Fatalf("invalid type %v for OSLICEHEADER", n.Type()) } - if n.Left() == nil || n.Left().Type() == nil || !n.Left().Type().IsUnsafePtr() { + if n.Ptr == nil || n.Ptr.Type() == nil || !n.Ptr.Type().IsUnsafePtr() { base.Fatalf("need unsafe.Pointer for OSLICEHEADER") } - if x := n.List().Len(); x != 2 { + if x := n.LenCap.Len(); x != 2 { base.Fatalf("expected 2 params (len, cap) for OSLICEHEADER, got %d", x) } - n.SetLeft(typecheck(n.Left(), ctxExpr)) - l := typecheck(n.List().First(), ctxExpr) - c := typecheck(n.List().Second(), ctxExpr) + n.Ptr = typecheck(n.Ptr, ctxExpr) + l := typecheck(n.LenCap.First(), ctxExpr) + c := typecheck(n.LenCap.Second(), ctxExpr) l = defaultlit(l, types.Types[types.TINT]) c = defaultlit(c, types.Types[types.TINT]) @@ -1377,8 +1377,8 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { base.Fatalf("len larger than cap for OSLICEHEADER") } - n.List().SetFirst(l) - n.List().SetSecond(c) + n.LenCap.SetFirst(l) + n.LenCap.SetSecond(c) return n case ir.OMAKESLICECOPY: @@ -1397,28 +1397,28 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { base.Fatalf("invalid type %v for OMAKESLICECOPY", n.Type()) } - if n.Left() == nil { + if n.Len == nil { base.Fatalf("missing len argument for OMAKESLICECOPY") } - if n.Right() == nil { + if n.Cap == nil { base.Fatalf("missing slice argument to copy for OMAKESLICECOPY") } - n.SetLeft(typecheck(n.Left(), ctxExpr)) - n.SetRight(typecheck(n.Right(), ctxExpr)) + n.Len = typecheck(n.Len, ctxExpr) + n.Cap = typecheck(n.Cap, ctxExpr) - n.SetLeft(defaultlit(n.Left(), types.Types[types.TINT])) + n.Len = defaultlit(n.Len, types.Types[types.TINT]) - if !n.Left().Type().IsInteger() && n.Type().Kind() != types.TIDEAL { + if !n.Len.Type().IsInteger() && n.Type().Kind() != types.TIDEAL { base.Errorf("non-integer len argument in OMAKESLICECOPY") } - if ir.IsConst(n.Left(), constant.Int) { - if doesoverflow(n.Left().Val(), types.Types[types.TINT]) { + if ir.IsConst(n.Len, constant.Int) { + if doesoverflow(n.Len.Val(), types.Types[types.TINT]) { base.Fatalf("len for OMAKESLICECOPY too large") } - if constant.Sign(n.Left().Val()) < 0 { + if constant.Sign(n.Len.Val()) < 0 { base.Fatalf("len for OMAKESLICECOPY must be non-negative") } } @@ -1426,33 +1426,33 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OSLICE, ir.OSLICE3: n := n.(*ir.SliceExpr) - n.SetLeft(typecheck(n.Left(), ctxExpr)) + n.X = typecheck(n.X, ctxExpr) low, high, max := n.SliceBounds() hasmax := n.Op().IsSlice3() low = typecheck(low, ctxExpr) high = typecheck(high, ctxExpr) max = typecheck(max, ctxExpr) - n.SetLeft(defaultlit(n.Left(), nil)) + n.X = defaultlit(n.X, nil) low = indexlit(low) high = indexlit(high) max = indexlit(max) n.SetSliceBounds(low, high, max) - l := n.Left() + l := n.X if l.Type() == nil { n.SetType(nil) return n } if l.Type().IsArray() { - if !islvalue(n.Left()) { + if !islvalue(n.X) { base.Errorf("invalid operation %v (slice of unaddressable value)", n) n.SetType(nil) return n } - addr := nodAddr(n.Left()) + addr := nodAddr(n.X) addr.SetImplicit(true) - n.SetLeft(typecheck(addr, ctxExpr)) - l = n.Left() + n.X = typecheck(addr, ctxExpr) + l = n.X } t := l.Type() var tp *types.Type @@ -1507,27 +1507,27 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.Use = ir.CallUseStmt } typecheckslice(n.Init().Slice(), ctxStmt) // imported rewritten f(g()) calls (#30907) - n.SetLeft(typecheck(n.Left(), ctxExpr|ctxType|ctxCallee)) - if n.Left().Diag() { + n.X = typecheck(n.X, ctxExpr|ctxType|ctxCallee) + if n.X.Diag() { n.SetDiag(true) } - l := n.Left() + l := n.X - if l.Op() == ir.ONAME && l.(*ir.Name).SubOp() != 0 { + if l.Op() == ir.ONAME && l.(*ir.Name).BuiltinOp != 0 { l := l.(*ir.Name) - if n.IsDDD() && l.SubOp() != ir.OAPPEND { + if n.IsDDD && l.BuiltinOp != ir.OAPPEND { base.Errorf("invalid use of ... with builtin %v", l) } // builtin: OLEN, OCAP, etc. - switch l.SubOp() { + switch l.BuiltinOp { default: base.Fatalf("unknown builtin %v", l) case ir.OAPPEND, ir.ODELETE, ir.OMAKE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER: - n.SetOp(l.SubOp()) - n.SetLeft(nil) + n.SetOp(l.BuiltinOp) + n.X = nil n.SetTypecheck(0) // re-typechecking new op is OK, not a loop return typecheck(n, top) @@ -1540,7 +1540,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetType(nil) return n } - u := ir.NewUnaryExpr(n.Pos(), l.SubOp(), arg) + u := ir.NewUnaryExpr(n.Pos(), l.BuiltinOp, arg) return typecheck(initExpr(n.Init().Slice(), u), top) // typecheckargs can add to old.Init case ir.OCOMPLEX, ir.OCOPY: @@ -1550,16 +1550,16 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetType(nil) return n } - b := ir.NewBinaryExpr(n.Pos(), l.SubOp(), arg1, arg2) + b := ir.NewBinaryExpr(n.Pos(), l.BuiltinOp, arg1, arg2) return typecheck(initExpr(n.Init().Slice(), b), top) // typecheckargs can add to old.Init } panic("unreachable") } - n.SetLeft(defaultlit(n.Left(), nil)) - l = n.Left() + n.X = defaultlit(n.X, nil) + l = n.X if l.Op() == ir.OTYPE { - if n.IsDDD() { + if n.IsDDD { if !l.Type().Broke() { base.Errorf("invalid use of ... in type conversion to %v", l.Type()) } @@ -1600,7 +1600,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { // It isn't necessary, so just do a sanity check. tp := t.Recv().Type - if l.Left() == nil || !types.Identical(l.Left().Type(), tp) { + if l.X == nil || !types.Identical(l.X.Type(), tp) { base.Fatalf("method receiver") } @@ -1622,15 +1622,15 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } } - typecheckaste(ir.OCALL, n.Left(), n.IsDDD(), t.Params(), n.List(), func() string { return fmt.Sprintf("argument to %v", n.Left()) }) + typecheckaste(ir.OCALL, n.X, n.IsDDD, t.Params(), n.Args, func() string { return fmt.Sprintf("argument to %v", n.X) }) if t.NumResults() == 0 { return n } if t.NumResults() == 1 { n.SetType(l.Type().Results().Field(0).Type) - if n.Op() == ir.OCALLFUNC && n.Left().Op() == ir.ONAME { - if sym := n.Left().(*ir.Name).Sym(); isRuntimePkg(sym.Pkg) && sym.Name == "getg" { + if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME { + if sym := n.X.(*ir.Name).Sym(); isRuntimePkg(sym.Pkg) && sym.Name == "getg" { // Emit code for runtime.getg() directly instead of calling function. // Most such rewrites (for example the similar one for math.Sqrt) should be done in walk, // so that the ordering pass can make sure to preserve the semantics of the original code @@ -1659,10 +1659,10 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OCAP, ir.OLEN: n := n.(*ir.UnaryExpr) - n.SetLeft(typecheck(n.Left(), ctxExpr)) - n.SetLeft(defaultlit(n.Left(), nil)) - n.SetLeft(implicitstar(n.Left())) - l := n.Left() + n.X = typecheck(n.X, ctxExpr) + n.X = defaultlit(n.X, nil) + n.X = implicitstar(n.X) + l := n.X t := l.Type() if t == nil { n.SetType(nil) @@ -1686,8 +1686,8 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OREAL, ir.OIMAG: n := n.(*ir.UnaryExpr) - n.SetLeft(typecheck(n.Left(), ctxExpr)) - l := n.Left() + n.X = typecheck(n.X, ctxExpr) + l := n.X t := l.Type() if t == nil { n.SetType(nil) @@ -1711,8 +1711,8 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OCOMPLEX: n := n.(*ir.BinaryExpr) - l := typecheck(n.Left(), ctxExpr) - r := typecheck(n.Right(), ctxExpr) + l := typecheck(n.X, ctxExpr) + r := typecheck(n.Y, ctxExpr) if l.Type() == nil || r.Type() == nil { n.SetType(nil) return n @@ -1722,8 +1722,8 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetType(nil) return n } - n.SetLeft(l) - n.SetRight(r) + n.X = l + n.Y = r if !types.Identical(l.Type(), r.Type()) { base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type()) @@ -1752,9 +1752,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OCLOSE: n := n.(*ir.UnaryExpr) - n.SetLeft(typecheck(n.Left(), ctxExpr)) - n.SetLeft(defaultlit(n.Left(), nil)) - l := n.Left() + n.X = typecheck(n.X, ctxExpr) + n.X = defaultlit(n.X, nil) + l := n.X t := l.Type() if t == nil { n.SetType(nil) @@ -1776,7 +1776,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.ODELETE: n := n.(*ir.CallExpr) typecheckargs(n) - args := n.List() + args := n.Args if args.Len() == 0 { base.Errorf("missing arguments to delete") n.SetType(nil) @@ -1809,7 +1809,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OAPPEND: n := n.(*ir.CallExpr) typecheckargs(n) - args := n.List() + args := n.Args if args.Len() == 0 { base.Errorf("missing arguments to append") n.SetType(nil) @@ -1835,7 +1835,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } - if n.IsDDD() { + if n.IsDDD { if args.Len() == 1 { base.Errorf("cannot use ... on first argument to append") n.SetType(nil) @@ -1870,39 +1870,39 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OCOPY: n := n.(*ir.BinaryExpr) n.SetType(types.Types[types.TINT]) - n.SetLeft(typecheck(n.Left(), ctxExpr)) - n.SetLeft(defaultlit(n.Left(), nil)) - n.SetRight(typecheck(n.Right(), ctxExpr)) - n.SetRight(defaultlit(n.Right(), nil)) - if n.Left().Type() == nil || n.Right().Type() == nil { + n.X = typecheck(n.X, ctxExpr) + n.X = defaultlit(n.X, nil) + n.Y = typecheck(n.Y, ctxExpr) + n.Y = defaultlit(n.Y, nil) + if n.X.Type() == nil || n.Y.Type() == nil { n.SetType(nil) return n } // copy([]byte, string) - if n.Left().Type().IsSlice() && n.Right().Type().IsString() { - if types.Identical(n.Left().Type().Elem(), types.ByteType) { + if n.X.Type().IsSlice() && n.Y.Type().IsString() { + if types.Identical(n.X.Type().Elem(), types.ByteType) { return n } - base.Errorf("arguments to copy have different element types: %L and string", n.Left().Type()) + base.Errorf("arguments to copy have different element types: %L and string", n.X.Type()) n.SetType(nil) return n } - if !n.Left().Type().IsSlice() || !n.Right().Type().IsSlice() { - if !n.Left().Type().IsSlice() && !n.Right().Type().IsSlice() { - base.Errorf("arguments to copy must be slices; have %L, %L", n.Left().Type(), n.Right().Type()) - } else if !n.Left().Type().IsSlice() { - base.Errorf("first argument to copy should be slice; have %L", n.Left().Type()) + if !n.X.Type().IsSlice() || !n.Y.Type().IsSlice() { + if !n.X.Type().IsSlice() && !n.Y.Type().IsSlice() { + base.Errorf("arguments to copy must be slices; have %L, %L", n.X.Type(), n.Y.Type()) + } else if !n.X.Type().IsSlice() { + base.Errorf("first argument to copy should be slice; have %L", n.X.Type()) } else { - base.Errorf("second argument to copy should be slice or string; have %L", n.Right().Type()) + base.Errorf("second argument to copy should be slice or string; have %L", n.Y.Type()) } n.SetType(nil) return n } - if !types.Identical(n.Left().Type().Elem(), n.Right().Type().Elem()) { - base.Errorf("arguments to copy have different element types: %L and %L", n.Left().Type(), n.Right().Type()) + if !types.Identical(n.X.Type().Elem(), n.Y.Type().Elem()) { + base.Errorf("arguments to copy have different element types: %L and %L", n.X.Type(), n.Y.Type()) n.SetType(nil) return n } @@ -1911,17 +1911,17 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OCONV: n := n.(*ir.ConvExpr) checkwidth(n.Type()) // ensure width is calculated for backend - n.SetLeft(typecheck(n.Left(), ctxExpr)) - n.SetLeft(convlit1(n.Left(), n.Type(), true, nil)) - t := n.Left().Type() + n.X = typecheck(n.X, ctxExpr) + n.X = convlit1(n.X, n.Type(), true, nil) + t := n.X.Type() if t == nil || n.Type() == nil { n.SetType(nil) return n } - op, why := convertop(n.Left().Op() == ir.OLITERAL, t, n.Type()) + op, why := convertop(n.X.Op() == ir.OLITERAL, t, n.Type()) if op == ir.OXXX { - if !n.Diag() && !n.Type().Broke() && !n.Left().Diag() { - base.Errorf("cannot convert %L to type %v%s", n.Left(), n.Type(), why) + if !n.Diag() && !n.Type().Broke() && !n.X.Diag() { + base.Errorf("cannot convert %L to type %v%s", n.X, n.Type(), why) n.SetDiag(true) } n.SetOp(ir.OCONV) @@ -1947,7 +1947,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { // ok case ir.OSTR2RUNES: - if n.Left().Op() == ir.OLITERAL { + if n.X.Op() == ir.OLITERAL { return stringtoruneslit(n) } } @@ -1955,14 +1955,14 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OMAKE: n := n.(*ir.CallExpr) - args := n.List().Slice() + args := n.Args.Slice() if len(args) == 0 { base.Errorf("missing argument to make") n.SetType(nil) return n } - n.PtrList().Set(nil) + n.Args.Set(nil) l := args[0] l = typecheck(l, ctxType) t := l.Type() @@ -2063,26 +2063,26 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.ONEW: n := n.(*ir.UnaryExpr) - if n.Left() == nil { + if n.X == nil { // Fatalf because the OCALL above checked for us, // so this must be an internally-generated mistake. base.Fatalf("missing argument to new") } - l := n.Left() + l := n.X l = typecheck(l, ctxType) t := l.Type() if t == nil { n.SetType(nil) return n } - n.SetLeft(l) + n.X = l n.SetType(types.NewPtr(t)) return n case ir.OPRINT, ir.OPRINTN: n := n.(*ir.CallExpr) typecheckargs(n) - ls := n.List().Slice() + ls := n.Args.Slice() for i1, n1 := range ls { // Special case for print: int constant is int64, not int. if ir.IsConst(n1, constant.Int) { @@ -2095,9 +2095,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OPANIC: n := n.(*ir.UnaryExpr) - n.SetLeft(typecheck(n.Left(), ctxExpr)) - n.SetLeft(defaultlit(n.Left(), types.Types[types.TINTER])) - if n.Left().Type() == nil { + n.X = typecheck(n.X, ctxExpr) + n.X = defaultlit(n.X, types.Types[types.TINTER]) + if n.X.Type() == nil { n.SetType(nil) return n } @@ -2105,7 +2105,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.ORECOVER: n := n.(*ir.CallExpr) - if n.List().Len() != 0 { + if n.Args.Len() != 0 { base.Errorf("too many arguments to recover") n.SetType(nil) return n @@ -2124,8 +2124,8 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OITAB: n := n.(*ir.UnaryExpr) - n.SetLeft(typecheck(n.Left(), ctxExpr)) - t := n.Left().Type() + n.X = typecheck(n.X, ctxExpr) + t := n.X.Type() if t == nil { n.SetType(nil) return n @@ -2145,8 +2145,8 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OSPTR: n := n.(*ir.UnaryExpr) - n.SetLeft(typecheck(n.Left(), ctxExpr)) - t := n.Left().Type() + n.X = typecheck(n.X, ctxExpr) + t := n.X.Type() if t == nil { n.SetType(nil) return n @@ -2166,13 +2166,13 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OCFUNC: n := n.(*ir.UnaryExpr) - n.SetLeft(typecheck(n.Left(), ctxExpr)) + n.X = typecheck(n.X, ctxExpr) n.SetType(types.Types[types.TUINTPTR]) return n case ir.OCONVNOP: n := n.(*ir.ConvExpr) - n.SetLeft(typecheck(n.Left(), ctxExpr)) + n.X = typecheck(n.X, ctxExpr) return n // statements @@ -2181,8 +2181,8 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { typecheckas(n) // Code that creates temps does not bother to set defn, so do it here. - if n.Left().Op() == ir.ONAME && ir.IsAutoTmp(n.Left()) { - n.Left().Name().Defn = n + if n.X.Op() == ir.ONAME && ir.IsAutoTmp(n.X) { + n.X.Name().Defn = n } return n @@ -2201,7 +2201,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OBLOCK: n := n.(*ir.BlockStmt) - typecheckslice(n.List().Slice(), ctxStmt) + typecheckslice(n.List.Slice(), ctxStmt) return n case ir.OLABEL: @@ -2216,8 +2216,8 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.ODEFER, ir.OGO: n := n.(*ir.GoDeferStmt) - n.SetLeft(typecheck(n.Left(), ctxStmt|ctxExpr)) - if !n.Left().Diag() { + n.Call = typecheck(n.Call, ctxStmt|ctxExpr) + if !n.Call.Diag() { checkdefergo(n) } return n @@ -2226,35 +2226,35 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n := n.(*ir.ForStmt) typecheckslice(n.Init().Slice(), ctxStmt) decldepth++ - n.SetLeft(typecheck(n.Left(), ctxExpr)) - n.SetLeft(defaultlit(n.Left(), nil)) - if n.Left() != nil { - t := n.Left().Type() + n.Cond = typecheck(n.Cond, ctxExpr) + n.Cond = defaultlit(n.Cond, nil) + if n.Cond != nil { + t := n.Cond.Type() if t != nil && !t.IsBoolean() { - base.Errorf("non-bool %L used as for condition", n.Left()) + base.Errorf("non-bool %L used as for condition", n.Cond) } } - n.SetRight(typecheck(n.Right(), ctxStmt)) + n.Post = typecheck(n.Post, ctxStmt) if n.Op() == ir.OFORUNTIL { - typecheckslice(n.List().Slice(), ctxStmt) + typecheckslice(n.Late.Slice(), ctxStmt) } - typecheckslice(n.Body().Slice(), ctxStmt) + typecheckslice(n.Body.Slice(), ctxStmt) decldepth-- return n case ir.OIF: n := n.(*ir.IfStmt) typecheckslice(n.Init().Slice(), ctxStmt) - n.SetLeft(typecheck(n.Left(), ctxExpr)) - n.SetLeft(defaultlit(n.Left(), nil)) - if n.Left() != nil { - t := n.Left().Type() + n.Cond = typecheck(n.Cond, ctxExpr) + n.Cond = defaultlit(n.Cond, nil) + if n.Cond != nil { + t := n.Cond.Type() if t != nil && !t.IsBoolean() { - base.Errorf("non-bool %L used as if condition", n.Left()) + base.Errorf("non-bool %L used as if condition", n.Cond) } } - typecheckslice(n.Body().Slice(), ctxStmt) - typecheckslice(n.Rlist().Slice(), ctxStmt) + typecheckslice(n.Body.Slice(), ctxStmt) + typecheckslice(n.Else.Slice(), ctxStmt) return n case ir.ORETURN: @@ -2266,10 +2266,10 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } - if hasNamedResults(Curfn) && n.List().Len() == 0 { + if hasNamedResults(Curfn) && n.Results.Len() == 0 { return n } - typecheckaste(ir.ORETURN, nil, false, Curfn.Type().Results(), n.List(), func() string { return "return argument" }) + typecheckaste(ir.ORETURN, nil, false, Curfn.Type().Results(), n.Results, func() string { return "return argument" }) return n case ir.ORETJMP: @@ -2300,13 +2300,13 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.ODCLCONST: n := n.(*ir.Decl) - n.SetLeft(typecheck(n.Left(), ctxExpr)) + n.X = typecheck(n.X, ctxExpr) return n case ir.ODCLTYPE: n := n.(*ir.Decl) - n.SetLeft(typecheck(n.Left(), ctxType)) - checkwidth(n.Left().Type()) + n.X = typecheck(n.X, ctxType) + checkwidth(n.X.Type()) return n } @@ -2321,13 +2321,13 @@ func typecheckargs(n ir.Node) { default: base.Fatalf("typecheckargs %+v", n.Op()) case *ir.CallExpr: - list = n.List().Slice() - if n.IsDDD() { + list = n.Args.Slice() + if n.IsDDD { typecheckslice(list, ctxExpr) return } case *ir.ReturnStmt: - list = n.List().Slice() + list = n.Results.Slice() } if len(list) != 1 { typecheckslice(list, ctxExpr) @@ -2348,7 +2348,7 @@ func typecheckargs(n ir.Node) { } as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) - as.PtrRlist().Append(list...) + as.Rhs.Append(list...) // If we're outside of function context, then this call will // be executed during the generated init function. However, @@ -2363,7 +2363,7 @@ func typecheckargs(n ir.Node) { for _, f := range t.FieldSlice() { t := temp(f.Type) as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, t)) - as.PtrList().Append(t) + as.Lhs.Append(t) list = append(list, t) } if static { @@ -2372,9 +2372,9 @@ func typecheckargs(n ir.Node) { switch n := n.(type) { case *ir.CallExpr: - n.PtrList().Set(list) + n.Args.Set(list) case *ir.ReturnStmt: - n.PtrList().Set(list) + n.Results.Set(list) } n.PtrInit().Append(typecheck(as, ctxStmt)) @@ -2425,7 +2425,7 @@ func checkdefergo(n *ir.GoDeferStmt) { what = "go" } - switch n.Left().Op() { + switch n.Call.Op() { // ok case ir.OCALLINTER, ir.OCALLMETH, @@ -2451,16 +2451,16 @@ func checkdefergo(n *ir.GoDeferStmt) { ir.ONEW, ir.OREAL, ir.OLITERAL: // conversion or unsafe.Alignof, Offsetof, Sizeof - if orig := ir.Orig(n.Left()); orig.Op() == ir.OCONV { + if orig := ir.Orig(n.Call); orig.Op() == ir.OCONV { break } - base.ErrorfAt(n.Pos(), "%s discards result of %v", what, n.Left()) + base.ErrorfAt(n.Pos(), "%s discards result of %v", what, n.Call) return } // type is broken or missing, most likely a method call on a broken type // we will warn about the broken type elsewhere. no need to emit a potentially confusing error - if n.Left().Type() == nil || n.Left().Type().Broke() { + if n.Call.Type() == nil || n.Call.Type().Broke() { return } @@ -2493,31 +2493,31 @@ func implicitstar(n ir.Node) ir.Node { } func needOneArg(n *ir.CallExpr, f string, args ...interface{}) (ir.Node, bool) { - if n.List().Len() == 0 { + if n.Args.Len() == 0 { p := fmt.Sprintf(f, args...) base.Errorf("missing argument to %s: %v", p, n) return nil, false } - if n.List().Len() > 1 { + if n.Args.Len() > 1 { p := fmt.Sprintf(f, args...) base.Errorf("too many arguments to %s: %v", p, n) - return n.List().First(), false + return n.Args.First(), false } - return n.List().First(), true + return n.Args.First(), true } func needTwoArgs(n *ir.CallExpr) (ir.Node, ir.Node, bool) { - if n.List().Len() != 2 { - if n.List().Len() < 2 { + if n.Args.Len() != 2 { + if n.Args.Len() < 2 { base.Errorf("not enough arguments in call to %v", n) } else { base.Errorf("too many arguments in call to %v", n) } return nil, nil, false } - return n.List().First(), n.List().Second(), true + return n.Args.First(), n.Args.Second(), true } func lookdot1(errnode ir.Node, s *types.Sym, t *types.Type, fs *types.Fields, dostrcmp int) *types.Field { @@ -2556,7 +2556,7 @@ func typecheckMethodExpr(n *ir.SelectorExpr) (res ir.Node) { defer tracePrint("typecheckMethodExpr", n)(&res) } - t := n.Left().Type() + t := n.X.Type() // Compute the method set for t. var ms *types.Fields @@ -2565,7 +2565,7 @@ func typecheckMethodExpr(n *ir.SelectorExpr) (res ir.Node) { } else { mt := methtype(t) if mt == nil { - base.Errorf("%v undefined (type %v has no method %v)", n, t, n.Sym()) + base.Errorf("%v undefined (type %v has no method %v)", n, t, n.Sel) n.SetType(nil) return n } @@ -2584,7 +2584,7 @@ func typecheckMethodExpr(n *ir.SelectorExpr) (res ir.Node) { } } - s := n.Sym() + s := n.Sel m := lookdot1(n, s, t, ms, 0) if m == nil { if lookdot1(n, s, t, ms, 1) != nil { @@ -2604,10 +2604,10 @@ func typecheckMethodExpr(n *ir.SelectorExpr) (res ir.Node) { return n } - me := ir.NewMethodExpr(n.Pos(), n.Left().Type(), m) - me.SetType(methodfunc(m.Type, n.Left().Type())) + me := ir.NewMethodExpr(n.Pos(), n.X.Type(), m) + me.SetType(methodfunc(m.Type, n.X.Type())) f := NewName(methodSym(t, m.Sym)) - f.SetClass(ir.PFUNC) + f.Class_ = ir.PFUNC f.SetType(me.Type()) me.FuncName_ = f @@ -2635,7 +2635,7 @@ func derefall(t *types.Type) *types.Type { } func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field { - s := n.Sym() + s := n.Sel dowidth(t) var f1 *types.Field @@ -2644,7 +2644,7 @@ func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field { } var f2 *types.Field - if n.Left().Type() == t || n.Left().Type().Sym() == nil { + if n.X.Type() == t || n.X.Type().Sym() == nil { mt := methtype(t) if mt != nil { f2 = lookdot1(n, s, mt, mt.Methods(), dostrcmp) @@ -2657,18 +2657,18 @@ func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field { return f1 } if f2 != nil { - base.Errorf("%v is both field and method", n.Sym()) + base.Errorf("%v is both field and method", n.Sel) } if f1.Offset == types.BADWIDTH { base.Fatalf("lookdot badwidth %v %p", f1, f1) } - n.SetOffset(f1.Offset) + n.Offset = f1.Offset n.SetType(f1.Type) if t.IsInterface() { - if n.Left().Type().IsPtr() { - star := ir.NewStarExpr(base.Pos, n.Left()) + if n.X.Type().IsPtr() { + star := ir.NewStarExpr(base.Pos, n.X) star.SetImplicit(true) - n.SetLeft(typecheck(star, ctxExpr)) + n.X = typecheck(star, ctxExpr) } n.SetOp(ir.ODOTINTER) @@ -2682,29 +2682,29 @@ func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field { // Already in the process of diagnosing an error. return f2 } - tt := n.Left().Type() + tt := n.X.Type() dowidth(tt) rcvr := f2.Type.Recv().Type if !types.Identical(rcvr, tt) { if rcvr.IsPtr() && types.Identical(rcvr.Elem(), tt) { - checklvalue(n.Left(), "call pointer method on") - addr := nodAddr(n.Left()) + checklvalue(n.X, "call pointer method on") + addr := nodAddr(n.X) addr.SetImplicit(true) - n.SetLeft(typecheck(addr, ctxType|ctxExpr)) + n.X = typecheck(addr, ctxType|ctxExpr) } else if tt.IsPtr() && (!rcvr.IsPtr() || rcvr.IsPtr() && rcvr.Elem().NotInHeap()) && types.Identical(tt.Elem(), rcvr) { - star := ir.NewStarExpr(base.Pos, n.Left()) + star := ir.NewStarExpr(base.Pos, n.X) star.SetImplicit(true) - n.SetLeft(typecheck(star, ctxType|ctxExpr)) + n.X = typecheck(star, ctxType|ctxExpr) } else if tt.IsPtr() && tt.Elem().IsPtr() && types.Identical(derefall(tt), derefall(rcvr)) { - base.Errorf("calling method %v with receiver %L requires explicit dereference", n.Sym(), n.Left()) + base.Errorf("calling method %v with receiver %L requires explicit dereference", n.Sel, n.X) for tt.IsPtr() { // Stop one level early for method with pointer receiver. if rcvr.IsPtr() && !tt.Elem().IsPtr() { break } - star := ir.NewStarExpr(base.Pos, n.Left()) + star := ir.NewStarExpr(base.Pos, n.X) star.SetImplicit(true) - n.SetLeft(typecheck(star, ctxType|ctxExpr)) + n.X = typecheck(star, ctxType|ctxExpr) tt = tt.Elem() } } else { @@ -2712,24 +2712,24 @@ func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field { } } - implicit, ll := n.Implicit(), n.Left() + implicit, ll := n.Implicit(), n.X for ll != nil && (ll.Op() == ir.ODOT || ll.Op() == ir.ODOTPTR || ll.Op() == ir.ODEREF) { switch l := ll.(type) { case *ir.SelectorExpr: - implicit, ll = l.Implicit(), l.Left() + implicit, ll = l.Implicit(), l.X case *ir.StarExpr: - implicit, ll = l.Implicit(), l.Left() + implicit, ll = l.Implicit(), l.X } } if implicit && ll.Type().IsPtr() && ll.Type().Sym() != nil && ll.Type().Sym().Def != nil && ir.AsNode(ll.Type().Sym().Def).Op() == ir.OTYPE { // It is invalid to automatically dereference a named pointer type when selecting a method. // Make n.Left == ll to clarify error message. - n.SetLeft(ll) + n.X = ll return nil } - n.SetSym(methodSym(n.Left().Type(), f2.Sym)) - n.SetOffset(f2.Offset) + n.Sel = methodSym(n.X.Type(), f2.Sym) + n.Offset = f2.Offset n.SetType(f2.Type) n.SetOp(ir.ODOTMETH) n.Selection = f2 @@ -2968,18 +2968,18 @@ func pushtype(nn ir.Node, t *types.Type) ir.Node { return nn } n := nn.(*ir.CompLitExpr) - if n.Right() != nil { + if n.Ntype != nil { return n } switch { case iscomptype(t): // For T, return T{...}. - n.SetRight(ir.TypeNode(t)) + n.Ntype = ir.TypeNode(t) case t.IsPtr() && iscomptype(t.Elem()): // For *T, return &T{...}. - n.SetRight(ir.TypeNode(t.Elem())) + n.Ntype = ir.TypeNode(t.Elem()) addr := nodAddrAt(n.Pos(), n) addr.SetImplicit(true) @@ -3000,7 +3000,7 @@ func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) { base.Pos = lno }() - if n.Right() == nil { + if n.Ntype == nil { base.ErrorfAt(n.Pos(), "missing type in composite literal") n.SetType(nil) return n @@ -3009,25 +3009,25 @@ func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) { // Save original node (including n.Right) n.SetOrig(ir.Copy(n)) - setlineno(n.Right()) + setlineno(n.Ntype) // Need to handle [...]T arrays specially. - if array, ok := n.Right().(*ir.ArrayType); ok && array.Elem != nil && array.Len == nil { + if array, ok := n.Ntype.(*ir.ArrayType); ok && array.Elem != nil && array.Len == nil { array.Elem = typecheck(array.Elem, ctxType) elemType := array.Elem.Type() if elemType == nil { n.SetType(nil) return n } - length := typecheckarraylit(elemType, -1, n.List().Slice(), "array literal") + length := typecheckarraylit(elemType, -1, n.List.Slice(), "array literal") n.SetOp(ir.OARRAYLIT) n.SetType(types.NewArray(elemType, length)) - n.SetRight(nil) + n.Ntype = nil return n } - n.SetRight(typecheck(n.Right(), ctxType)) - t := n.Right().Type() + n.Ntype = ir.Node(typecheck(n.Ntype, ctxType)).(ir.Ntype) + t := n.Ntype.Type() if t == nil { n.SetType(nil) return n @@ -3040,50 +3040,50 @@ func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) { n.SetType(nil) case types.TARRAY: - typecheckarraylit(t.Elem(), t.NumElem(), n.List().Slice(), "array literal") + typecheckarraylit(t.Elem(), t.NumElem(), n.List.Slice(), "array literal") n.SetOp(ir.OARRAYLIT) - n.SetRight(nil) + n.Ntype = nil case types.TSLICE: - length := typecheckarraylit(t.Elem(), -1, n.List().Slice(), "slice literal") + length := typecheckarraylit(t.Elem(), -1, n.List.Slice(), "slice literal") n.SetOp(ir.OSLICELIT) - n.SetRight(nil) + n.Ntype = nil n.Len = length case types.TMAP: var cs constSet - for i3, l := range n.List().Slice() { + for i3, l := range n.List.Slice() { setlineno(l) if l.Op() != ir.OKEY { - n.List().SetIndex(i3, typecheck(l, ctxExpr)) + n.List.SetIndex(i3, typecheck(l, ctxExpr)) base.Errorf("missing key in map literal") continue } l := l.(*ir.KeyExpr) - r := l.Left() + r := l.Key r = pushtype(r, t.Key()) r = typecheck(r, ctxExpr) - l.SetLeft(assignconv(r, t.Key(), "map key")) - cs.add(base.Pos, l.Left(), "key", "map literal") + l.Key = assignconv(r, t.Key(), "map key") + cs.add(base.Pos, l.Key, "key", "map literal") - r = l.Right() + r = l.Value r = pushtype(r, t.Elem()) r = typecheck(r, ctxExpr) - l.SetRight(assignconv(r, t.Elem(), "map value")) + l.Value = assignconv(r, t.Elem(), "map value") } n.SetOp(ir.OMAPLIT) - n.SetRight(nil) + n.Ntype = nil case types.TSTRUCT: // Need valid field offsets for Xoffset below. dowidth(t) errored := false - if n.List().Len() != 0 && nokeys(n.List()) { + if n.List.Len() != 0 && nokeys(n.List) { // simple list of variables - ls := n.List().Slice() + ls := n.List.Slice() for i, n1 := range ls { setlineno(n1) n1 = typecheck(n1, ctxExpr) @@ -3104,7 +3104,7 @@ func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) { // No pushtype allowed here. Must name fields for that. n1 = assignconv(n1, f.Type, "field value") sk := ir.NewStructKeyExpr(base.Pos, f.Sym, n1) - sk.SetOffset(f.Offset) + sk.Offset = f.Offset ls[i] = sk } if len(ls) < t.NumFields() { @@ -3114,13 +3114,13 @@ func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) { hash := make(map[string]bool) // keyed list - ls := n.List().Slice() + ls := n.List.Slice() for i, l := range ls { setlineno(l) if l.Op() == ir.OKEY { kv := l.(*ir.KeyExpr) - key := kv.Left() + key := kv.Key // Sym might have resolved to name in other top-level // package, because of import dot. Redirect to correct sym @@ -3139,7 +3139,7 @@ func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) { continue } - l = ir.NewStructKeyExpr(l.Pos(), s, kv.Right()) + l = ir.NewStructKeyExpr(l.Pos(), s, kv.Value) ls[i] = l } @@ -3153,22 +3153,22 @@ func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) { } l := l.(*ir.StructKeyExpr) - f := lookdot1(nil, l.Sym(), t, t.Fields(), 0) + f := lookdot1(nil, l.Field, t, t.Fields(), 0) if f == nil { - if ci := lookdot1(nil, l.Sym(), t, t.Fields(), 2); ci != nil { // Case-insensitive lookup. + if ci := lookdot1(nil, l.Field, t, t.Fields(), 2); ci != nil { // Case-insensitive lookup. if visible(ci.Sym) { - base.Errorf("unknown field '%v' in struct literal of type %v (but does have %v)", l.Sym(), t, ci.Sym) - } else if nonexported(l.Sym()) && l.Sym().Name == ci.Sym.Name { // Ensure exactness before the suggestion. - base.Errorf("cannot refer to unexported field '%v' in struct literal of type %v", l.Sym(), t) + base.Errorf("unknown field '%v' in struct literal of type %v (but does have %v)", l.Field, t, ci.Sym) + } else if nonexported(l.Field) && l.Field.Name == ci.Sym.Name { // Ensure exactness before the suggestion. + base.Errorf("cannot refer to unexported field '%v' in struct literal of type %v", l.Field, t) } else { - base.Errorf("unknown field '%v' in struct literal of type %v", l.Sym(), t) + base.Errorf("unknown field '%v' in struct literal of type %v", l.Field, t) } continue } var f *types.Field - p, _ := dotpath(l.Sym(), t, &f, true) + p, _ := dotpath(l.Field, t, &f, true) if p == nil || f.IsMethod() { - base.Errorf("unknown field '%v' in struct literal of type %v", l.Sym(), t) + base.Errorf("unknown field '%v' in struct literal of type %v", l.Field, t) continue } // dotpath returns the parent embedded types in reverse order. @@ -3176,21 +3176,21 @@ func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) { for ei := len(p) - 1; ei >= 0; ei-- { ep = append(ep, p[ei].field.Sym.Name) } - ep = append(ep, l.Sym().Name) + ep = append(ep, l.Field.Name) base.Errorf("cannot use promoted field %v in struct literal of type %v", strings.Join(ep, "."), t) continue } fielddup(f.Sym.Name, hash) - l.SetOffset(f.Offset) + l.Offset = f.Offset // No pushtype allowed here. Tried and rejected. - l.SetLeft(typecheck(l.Left(), ctxExpr)) - l.SetLeft(assignconv(l.Left(), f.Type, "field value")) + l.Value = typecheck(l.Value, ctxExpr) + l.Value = assignconv(l.Value, f.Type, "field value") } } n.SetOp(ir.OSTRUCTLIT) - n.SetRight(nil) + n.Ntype = nil } return n @@ -3215,28 +3215,28 @@ func typecheckarraylit(elemType *types.Type, bound int64, elts []ir.Node, ctx st var kv *ir.KeyExpr if elt.Op() == ir.OKEY { elt := elt.(*ir.KeyExpr) - elt.SetLeft(typecheck(elt.Left(), ctxExpr)) - key = indexconst(elt.Left()) + elt.Key = typecheck(elt.Key, ctxExpr) + key = indexconst(elt.Key) if key < 0 { - if !elt.Left().Diag() { + if !elt.Key.Diag() { if key == -2 { base.Errorf("index too large") } else { base.Errorf("index must be non-negative integer constant") } - elt.Left().SetDiag(true) + elt.Key.SetDiag(true) } key = -(1 << 30) // stay negative for a while } kv = elt - r = elt.Right() + r = elt.Value } r = pushtype(r, elemType) r = typecheck(r, ctxExpr) r = assignconv(r, elemType, ctx) if kv != nil { - kv.SetRight(r) + kv.Value = r } else { elts[i] = r } @@ -3280,10 +3280,10 @@ func islvalue(n ir.Node) bool { switch n.Op() { case ir.OINDEX: n := n.(*ir.IndexExpr) - if n.Left().Type() != nil && n.Left().Type().IsArray() { - return islvalue(n.Left()) + if n.X.Type() != nil && n.X.Type().IsArray() { + return islvalue(n.X) } - if n.Left().Type() != nil && n.Left().Type().IsString() { + if n.X.Type() != nil && n.X.Type().IsString() { return false } fallthrough @@ -3292,11 +3292,11 @@ func islvalue(n ir.Node) bool { case ir.ODOT: n := n.(*ir.SelectorExpr) - return islvalue(n.Left()) + return islvalue(n.X) case ir.ONAME: n := n.(*ir.Name) - if n.Class() == ir.PFUNC { + if n.Class_ == ir.PFUNC { return false } return true @@ -3332,7 +3332,7 @@ func checkassign(stmt ir.Node, n ir.Node) { } if n.Op() == ir.OINDEXMAP { n := n.(*ir.IndexExpr) - n.SetIndexMapLValue(true) + n.Assigned = true return } @@ -3342,9 +3342,9 @@ func checkassign(stmt ir.Node, n ir.Node) { } switch { - case n.Op() == ir.ODOT && n.(*ir.SelectorExpr).Left().Op() == ir.OINDEXMAP: + case n.Op() == ir.ODOT && n.(*ir.SelectorExpr).X.Op() == ir.OINDEXMAP: base.Errorf("cannot assign to struct field %v in map", n) - case (n.Op() == ir.OINDEX && n.(*ir.IndexExpr).Left().Type().IsString()) || n.Op() == ir.OSLICESTR: + case (n.Op() == ir.OINDEX && n.(*ir.IndexExpr).X.Type().IsString()) || n.Op() == ir.OSLICESTR: base.Errorf("cannot assign to %v (strings are immutable)", n) case n.Op() == ir.OLITERAL && n.Sym() != nil && isGoConst(n): base.Errorf("cannot assign to %v (declared const)", n) @@ -3387,39 +3387,39 @@ func samesafeexpr(l ir.Node, r ir.Node) bool { case ir.ODOT, ir.ODOTPTR: l := l.(*ir.SelectorExpr) r := r.(*ir.SelectorExpr) - return l.Sym() != nil && r.Sym() != nil && l.Sym() == r.Sym() && samesafeexpr(l.Left(), r.Left()) + return l.Sel != nil && r.Sel != nil && l.Sel == r.Sel && samesafeexpr(l.X, r.X) case ir.ODEREF: l := l.(*ir.StarExpr) r := r.(*ir.StarExpr) - return samesafeexpr(l.Left(), r.Left()) + return samesafeexpr(l.X, r.X) case ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG: l := l.(*ir.UnaryExpr) r := r.(*ir.UnaryExpr) - return samesafeexpr(l.Left(), r.Left()) + return samesafeexpr(l.X, r.X) case ir.OCONVNOP: l := l.(*ir.ConvExpr) r := r.(*ir.ConvExpr) - return samesafeexpr(l.Left(), r.Left()) + return samesafeexpr(l.X, r.X) case ir.OCONV: l := l.(*ir.ConvExpr) r := r.(*ir.ConvExpr) // Some conversions can't be reused, such as []byte(str). // Allow only numeric-ish types. This is a bit conservative. - return issimple[l.Type().Kind()] && samesafeexpr(l.Left(), r.Left()) + return issimple[l.Type().Kind()] && samesafeexpr(l.X, r.X) case ir.OINDEX, ir.OINDEXMAP: l := l.(*ir.IndexExpr) r := r.(*ir.IndexExpr) - return samesafeexpr(l.Left(), r.Left()) && samesafeexpr(l.Right(), r.Right()) + return samesafeexpr(l.X, r.X) && samesafeexpr(l.Index, r.Index) case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD: l := l.(*ir.BinaryExpr) r := r.(*ir.BinaryExpr) - return samesafeexpr(l.Left(), r.Left()) && samesafeexpr(l.Right(), r.Right()) + return samesafeexpr(l.X, r.X) && samesafeexpr(l.Y, r.Y) case ir.OLITERAL: return constant.Compare(l.Val(), token.EQL, r.Val()) @@ -3446,30 +3446,30 @@ func typecheckas(n *ir.AssignStmt) { // if the variable has a type (ntype) then typechecking // will not look at defn, so it is okay (and desirable, // so that the conversion below happens). - n.SetLeft(resolve(n.Left())) + n.X = resolve(n.X) - if !ir.DeclaredBy(n.Left(), n) || n.Left().Name().Ntype != nil { - n.SetLeft(typecheck(n.Left(), ctxExpr|ctxAssign)) + if !ir.DeclaredBy(n.X, n) || n.X.Name().Ntype != nil { + n.X = typecheck(n.X, ctxExpr|ctxAssign) } // Use ctxMultiOK so we can emit an "N variables but M values" error // to be consistent with typecheckas2 (#26616). - n.SetRight(typecheck(n.Right(), ctxExpr|ctxMultiOK)) - checkassign(n, n.Left()) - if n.Right() != nil && n.Right().Type() != nil { - if n.Right().Type().IsFuncArgStruct() { - base.Errorf("assignment mismatch: 1 variable but %v returns %d values", n.Right().(*ir.CallExpr).Left(), n.Right().Type().NumFields()) + n.Y = typecheck(n.Y, ctxExpr|ctxMultiOK) + checkassign(n, n.X) + if n.Y != nil && n.Y.Type() != nil { + if n.Y.Type().IsFuncArgStruct() { + base.Errorf("assignment mismatch: 1 variable but %v returns %d values", n.Y.(*ir.CallExpr).X, n.Y.Type().NumFields()) // Multi-value RHS isn't actually valid for OAS; nil out // to indicate failed typechecking. - n.Right().SetType(nil) - } else if n.Left().Type() != nil { - n.SetRight(assignconv(n.Right(), n.Left().Type(), "assignment")) + n.Y.SetType(nil) + } else if n.X.Type() != nil { + n.Y = assignconv(n.Y, n.X.Type(), "assignment") } } - if ir.DeclaredBy(n.Left(), n) && n.Left().Name().Ntype == nil { - n.SetRight(defaultlit(n.Right(), nil)) - n.Left().SetType(n.Right().Type()) + if ir.DeclaredBy(n.X, n) && n.X.Name().Ntype == nil { + n.Y = defaultlit(n.Y, nil) + n.X.SetType(n.Y.Type()) } // second half of dance. @@ -3477,11 +3477,11 @@ func typecheckas(n *ir.AssignStmt) { // just to get it over with. see dance above. n.SetTypecheck(1) - if n.Left().Typecheck() == 0 { - n.SetLeft(typecheck(n.Left(), ctxExpr|ctxAssign)) + if n.X.Typecheck() == 0 { + n.X = typecheck(n.X, ctxExpr|ctxAssign) } - if !ir.IsBlank(n.Left()) { - checkwidth(n.Left().Type()) // ensure width is calculated for backend + if !ir.IsBlank(n.X) { + checkwidth(n.X.Type()) // ensure width is calculated for backend } } @@ -3497,7 +3497,7 @@ func typecheckas2(n *ir.AssignListStmt) { defer tracePrint("typecheckas2", n)(nil) } - ls := n.List().Slice() + ls := n.Lhs.Slice() for i1, n1 := range ls { // delicate little dance. n1 = resolve(n1) @@ -3508,21 +3508,21 @@ func typecheckas2(n *ir.AssignListStmt) { } } - cl := n.List().Len() - cr := n.Rlist().Len() + cl := n.Lhs.Len() + cr := n.Rhs.Len() if cl > 1 && cr == 1 { - n.Rlist().SetFirst(typecheck(n.Rlist().First(), ctxExpr|ctxMultiOK)) + n.Rhs.SetFirst(typecheck(n.Rhs.First(), ctxExpr|ctxMultiOK)) } else { - typecheckslice(n.Rlist().Slice(), ctxExpr) + typecheckslice(n.Rhs.Slice(), ctxExpr) } - checkassignlist(n, n.List()) + checkassignlist(n, n.Lhs) var l ir.Node var r ir.Node if cl == cr { // easy - ls := n.List().Slice() - rs := n.Rlist().Slice() + ls := n.Lhs.Slice() + rs := n.Rhs.Slice() for il, nl := range ls { nr := rs[il] if nl.Type() != nil && nr.Type() != nil { @@ -3537,8 +3537,8 @@ func typecheckas2(n *ir.AssignListStmt) { goto out } - l = n.List().First() - r = n.Rlist().First() + l = n.Lhs.First() + r = n.Rhs.First() // x,y,z = f() if cr == 1 { @@ -3556,7 +3556,7 @@ func typecheckas2(n *ir.AssignListStmt) { } r.(*ir.CallExpr).Use = ir.CallUseList n.SetOp(ir.OAS2FUNC) - for i, l := range n.List().Slice() { + for i, l := range n.Lhs.Slice() { f := r.Type().Field(i) if f.Type != nil && l.Type() != nil { checkassignto(f.Type, l) @@ -3592,7 +3592,7 @@ func typecheckas2(n *ir.AssignListStmt) { if ir.DeclaredBy(l, n) { l.SetType(r.Type()) } - l := n.List().Second() + l := n.Lhs.Second() if l.Type() != nil && !l.Type().IsBoolean() { checkassignto(types.Types[types.TBOOL], l) } @@ -3609,13 +3609,13 @@ mismatch: base.Errorf("assignment mismatch: %d variables but %d values", cl, cr) case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER: r := r.(*ir.CallExpr) - base.Errorf("assignment mismatch: %d variables but %v returns %d values", cl, r.Left(), cr) + base.Errorf("assignment mismatch: %d variables but %v returns %d values", cl, r.X, cr) } // second half of dance out: n.SetTypecheck(1) - ls = n.List().Slice() + ls = n.Lhs.Slice() for i1, n1 := range ls { if n1.Typecheck() == 0 { ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign) @@ -3632,7 +3632,7 @@ func typecheckfunc(n *ir.Func) { } for _, ln := range n.Dcl { - if ln.Op() == ir.ONAME && (ln.Class() == ir.PPARAM || ln.Class() == ir.PPARAMOUT) { + if ln.Op() == ir.ONAME && (ln.Class_ == ir.PPARAM || ln.Class_ == ir.PPARAMOUT) { ln.Decldepth = 1 } } @@ -3662,19 +3662,19 @@ func typecheckfunc(n *ir.Func) { // The result of stringtoruneslit MUST be assigned back to n, e.g. // n.Left = stringtoruneslit(n.Left) func stringtoruneslit(n *ir.ConvExpr) ir.Node { - if n.Left().Op() != ir.OLITERAL || n.Left().Val().Kind() != constant.String { + if n.X.Op() != ir.OLITERAL || n.X.Val().Kind() != constant.String { base.Fatalf("stringtoarraylit %v", n) } var l []ir.Node i := 0 - for _, r := range ir.StringVal(n.Left()) { + for _, r := range ir.StringVal(n.X) { l = append(l, ir.NewKeyExpr(base.Pos, nodintconst(int64(i)), nodintconst(int64(r)))) i++ } nn := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(n.Type()).(ir.Ntype), nil) - nn.PtrList().Set(l) + nn.List.Set(l) return typecheck(nn, ctxExpr) } @@ -3837,7 +3837,7 @@ func typecheckdef(n ir.Node) { break } if n.Name().Defn == nil { - if n.SubOp() != 0 { // like OPRINTN + if n.BuiltinOp != 0 { // like OPRINTN break } if base.Errors() > 0 { @@ -3945,10 +3945,10 @@ func markBreak(fn *ir.Func) { case ir.OBREAK: n := n.(*ir.BranchStmt) - if n.Sym() == nil { + if n.Label == nil { setHasBreak(implicit) } else { - setHasBreak(labels[n.Sym()]) + setHasBreak(labels[n.Label]) } case ir.OFOR, ir.OFORUNTIL, ir.OSWITCH, ir.OSELECT, ir.ORANGE: @@ -3957,13 +3957,13 @@ func markBreak(fn *ir.Func) { var sym *types.Sym switch n := n.(type) { case *ir.ForStmt: - sym = n.Sym() + sym = n.Label case *ir.RangeStmt: - sym = n.Sym() + sym = n.Label case *ir.SelectStmt: - sym = n.Sym() + sym = n.Label case *ir.SwitchStmt: - sym = n.Sym() + sym = n.Label } if sym != nil { if labels == nil { @@ -3990,13 +3990,13 @@ func controlLabel(n ir.Node) *types.Sym { base.Fatalf("controlLabel %+v", n.Op()) return nil case *ir.ForStmt: - return n.Sym() + return n.Label case *ir.RangeStmt: - return n.Sym() + return n.Label case *ir.SelectStmt: - return n.Sym() + return n.Label case *ir.SwitchStmt: - return n.Sym() + return n.Label } } @@ -4007,13 +4007,13 @@ func setHasBreak(n ir.Node) { case nil: // ignore case *ir.ForStmt: - n.SetHasBreak(true) + n.HasBreak = true case *ir.RangeStmt: - n.SetHasBreak(true) + n.HasBreak = true case *ir.SelectStmt: - n.SetHasBreak(true) + n.HasBreak = true case *ir.SwitchStmt: - n.SetHasBreak(true) + n.HasBreak = true } } @@ -4038,37 +4038,37 @@ func isTermNode(n ir.Node) bool { case ir.OBLOCK: n := n.(*ir.BlockStmt) - return isTermNodes(n.List()) + return isTermNodes(n.List) case ir.OGOTO, ir.ORETURN, ir.ORETJMP, ir.OPANIC, ir.OFALL: return true case ir.OFOR, ir.OFORUNTIL: n := n.(*ir.ForStmt) - if n.Left() != nil { + if n.Cond != nil { return false } - if n.HasBreak() { + if n.HasBreak { return false } return true case ir.OIF: n := n.(*ir.IfStmt) - return isTermNodes(n.Body()) && isTermNodes(n.Rlist()) + return isTermNodes(n.Body) && isTermNodes(n.Else) case ir.OSWITCH: n := n.(*ir.SwitchStmt) - if n.HasBreak() { + if n.HasBreak { return false } def := false - for _, cas := range n.List().Slice() { + for _, cas := range n.Cases.Slice() { cas := cas.(*ir.CaseStmt) - if !isTermNodes(cas.Body()) { + if !isTermNodes(cas.Body) { return false } - if cas.List().Len() == 0 { // default + if cas.List.Len() == 0 { // default def = true } } @@ -4076,12 +4076,12 @@ func isTermNode(n ir.Node) bool { case ir.OSELECT: n := n.(*ir.SelectStmt) - if n.HasBreak() { + if n.HasBreak { return false } - for _, cas := range n.List().Slice() { + for _, cas := range n.Cases.Slice() { cas := cas.(*ir.CaseStmt) - if !isTermNodes(cas.Body()) { + if !isTermNodes(cas.Body) { return false } } @@ -4093,34 +4093,34 @@ func isTermNode(n ir.Node) bool { // checkreturn makes sure that fn terminates appropriately. func checkreturn(fn *ir.Func) { - if fn.Type().NumResults() != 0 && fn.Body().Len() != 0 { + if fn.Type().NumResults() != 0 && fn.Body.Len() != 0 { markBreak(fn) - if !isTermNodes(fn.Body()) { + if !isTermNodes(fn.Body) { base.ErrorfAt(fn.Endlineno, "missing return at end of function") } } } func deadcode(fn *ir.Func) { - deadcodeslice(fn.PtrBody()) + deadcodeslice(&fn.Body) - if fn.Body().Len() == 0 { + if fn.Body.Len() == 0 { return } - for _, n := range fn.Body().Slice() { + for _, n := range fn.Body.Slice() { if n.Init().Len() > 0 { return } switch n.Op() { case ir.OIF: n := n.(*ir.IfStmt) - if !ir.IsConst(n.Left(), constant.Bool) || n.Body().Len() > 0 || n.Rlist().Len() > 0 { + if !ir.IsConst(n.Cond, constant.Bool) || n.Body.Len() > 0 || n.Else.Len() > 0 { return } case ir.OFOR: n := n.(*ir.ForStmt) - if !ir.IsConst(n.Left(), constant.Bool) || ir.BoolVal(n.Left()) { + if !ir.IsConst(n.Cond, constant.Bool) || ir.BoolVal(n.Cond) { return } default: @@ -4128,7 +4128,7 @@ func deadcode(fn *ir.Func) { } } - fn.PtrBody().Set([]ir.Node{ir.NewBlockStmt(base.Pos, nil)}) + fn.Body.Set([]ir.Node{ir.NewBlockStmt(base.Pos, nil)}) } func deadcodeslice(nn *ir.Nodes) { @@ -4148,15 +4148,15 @@ func deadcodeslice(nn *ir.Nodes) { } if n.Op() == ir.OIF { n := n.(*ir.IfStmt) - n.SetLeft(deadcodeexpr(n.Left())) - if ir.IsConst(n.Left(), constant.Bool) { + n.Cond = deadcodeexpr(n.Cond) + if ir.IsConst(n.Cond, constant.Bool) { var body ir.Nodes - if ir.BoolVal(n.Left()) { - n.SetRlist(ir.Nodes{}) - body = n.Body() + if ir.BoolVal(n.Cond) { + n.Else = ir.Nodes{} + body = n.Body } else { - n.SetBody(ir.Nodes{}) - body = n.Rlist() + n.Body = ir.Nodes{} + body = n.Else } // If "then" or "else" branch ends with panic or return statement, // it is safe to remove all statements after this node. @@ -4178,26 +4178,26 @@ func deadcodeslice(nn *ir.Nodes) { switch n.Op() { case ir.OBLOCK: n := n.(*ir.BlockStmt) - deadcodeslice(n.PtrList()) + deadcodeslice(&n.List) case ir.OCASE: n := n.(*ir.CaseStmt) - deadcodeslice(n.PtrBody()) + deadcodeslice(&n.Body) case ir.OFOR: n := n.(*ir.ForStmt) - deadcodeslice(n.PtrBody()) + deadcodeslice(&n.Body) case ir.OIF: n := n.(*ir.IfStmt) - deadcodeslice(n.PtrBody()) - deadcodeslice(n.PtrRlist()) + deadcodeslice(&n.Body) + deadcodeslice(&n.Else) case ir.ORANGE: n := n.(*ir.RangeStmt) - deadcodeslice(n.PtrBody()) + deadcodeslice(&n.Body) case ir.OSELECT: n := n.(*ir.SelectStmt) - deadcodeslice(n.PtrList()) + deadcodeslice(&n.Cases) case ir.OSWITCH: n := n.(*ir.SwitchStmt) - deadcodeslice(n.PtrList()) + deadcodeslice(&n.Cases) } if cut { @@ -4214,24 +4214,24 @@ func deadcodeexpr(n ir.Node) ir.Node { switch n.Op() { case ir.OANDAND: n := n.(*ir.LogicalExpr) - n.SetLeft(deadcodeexpr(n.Left())) - n.SetRight(deadcodeexpr(n.Right())) - if ir.IsConst(n.Left(), constant.Bool) { - if ir.BoolVal(n.Left()) { - return n.Right() // true && x => x + n.X = deadcodeexpr(n.X) + n.Y = deadcodeexpr(n.Y) + if ir.IsConst(n.X, constant.Bool) { + if ir.BoolVal(n.X) { + return n.Y // true && x => x } else { - return n.Left() // false && x => false + return n.X // false && x => false } } case ir.OOROR: n := n.(*ir.LogicalExpr) - n.SetLeft(deadcodeexpr(n.Left())) - n.SetRight(deadcodeexpr(n.Right())) - if ir.IsConst(n.Left(), constant.Bool) { - if ir.BoolVal(n.Left()) { - return n.Left() // true || x => true + n.X = deadcodeexpr(n.X) + n.Y = deadcodeexpr(n.Y) + if ir.IsConst(n.X, constant.Bool) { + if ir.BoolVal(n.X) { + return n.X // true || x => true } else { - return n.Right() // false || x => x + return n.Y // false || x => x } } } @@ -4247,8 +4247,8 @@ func getIotaValue() int64 { } } - if Curfn != nil && Curfn.Iota() >= 0 { - return Curfn.Iota() + if Curfn != nil && Curfn.Iota >= 0 { + return Curfn.Iota } return -1 diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index e11c0eb92c773..cf20583042db4 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -152,14 +152,14 @@ func initUniverse() { for _, s := range &builtinFuncs { s2 := types.BuiltinPkg.Lookup(s.name) def := NewName(s2) - def.SetSubOp(s.op) + def.BuiltinOp = s.op s2.Def = def } for _, s := range &unsafeFuncs { s2 := unsafepkg.Lookup(s.name) def := NewName(s2) - def.SetSubOp(s.op) + def.BuiltinOp = s.op s2.Def = def } @@ -342,6 +342,6 @@ func finishUniverse() { nodfp = NewName(lookup(".fp")) nodfp.SetType(types.Types[types.TINT32]) - nodfp.SetClass(ir.PPARAM) + nodfp.Class_ = ir.PPARAM nodfp.SetUsed(true) } diff --git a/src/cmd/compile/internal/gc/unsafe.go b/src/cmd/compile/internal/gc/unsafe.go index eeedea396e0c0..cecc8720a9e9c 100644 --- a/src/cmd/compile/internal/gc/unsafe.go +++ b/src/cmd/compile/internal/gc/unsafe.go @@ -14,9 +14,9 @@ func evalunsafe(n ir.Node) int64 { switch n.Op() { case ir.OALIGNOF, ir.OSIZEOF: n := n.(*ir.UnaryExpr) - n.SetLeft(typecheck(n.Left(), ctxExpr)) - n.SetLeft(defaultlit(n.Left(), nil)) - tr := n.Left().Type() + n.X = typecheck(n.X, ctxExpr) + n.X = defaultlit(n.X, nil) + tr := n.X.Type() if tr == nil { return 0 } @@ -29,20 +29,20 @@ func evalunsafe(n ir.Node) int64 { case ir.OOFFSETOF: // must be a selector. n := n.(*ir.UnaryExpr) - if n.Left().Op() != ir.OXDOT { + if n.X.Op() != ir.OXDOT { base.Errorf("invalid expression %v", n) return 0 } - sel := n.Left().(*ir.SelectorExpr) + sel := n.X.(*ir.SelectorExpr) // Remember base of selector to find it back after dot insertion. // Since r->left may be mutated by typechecking, check it explicitly // first to track it correctly. - sel.SetLeft(typecheck(sel.Left(), ctxExpr)) - sbase := sel.Left() + sel.X = typecheck(sel.X, ctxExpr) + sbase := sel.X tsel := typecheck(sel, ctxExpr) - n.SetLeft(tsel) + n.X = tsel if tsel.Type() == nil { return 0 } @@ -67,15 +67,15 @@ func evalunsafe(n ir.Node) int64 { // but accessing f must not otherwise involve // indirection via embedded pointer types. r := r.(*ir.SelectorExpr) - if r.Left() != sbase { - base.Errorf("invalid expression %v: selector implies indirection of embedded %v", n, r.Left()) + if r.X != sbase { + base.Errorf("invalid expression %v: selector implies indirection of embedded %v", n, r.X) return 0 } fallthrough case ir.ODOT: r := r.(*ir.SelectorExpr) - v += r.Offset() - next = r.Left() + v += r.Offset + next = r.X default: ir.Dump("unsafenmagic", tsel) base.Fatalf("impossible %v node after dot insertion", r.Op()) diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 91b7a184cf8dd..3fd6c97d68308 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -33,14 +33,14 @@ func walk(fn *ir.Func) { if base.Flag.W != 0 { s := fmt.Sprintf("\nbefore walk %v", Curfn.Sym()) - ir.DumpList(s, Curfn.Body()) + ir.DumpList(s, Curfn.Body) } lno := base.Pos // Final typecheck for any unused variables. for i, ln := range fn.Dcl { - if ln.Op() == ir.ONAME && (ln.Class() == ir.PAUTO || ln.Class() == ir.PAUTOHEAP) { + if ln.Op() == ir.ONAME && (ln.Class_ == ir.PAUTO || ln.Class_ == ir.PAUTOHEAP) { ln = typecheck(ln, ctxExpr|ctxAssign).(*ir.Name) fn.Dcl[i] = ln } @@ -48,13 +48,13 @@ func walk(fn *ir.Func) { // Propagate the used flag for typeswitch variables up to the NONAME in its definition. for _, ln := range fn.Dcl { - if ln.Op() == ir.ONAME && (ln.Class() == ir.PAUTO || ln.Class() == ir.PAUTOHEAP) && ln.Defn != nil && ln.Defn.Op() == ir.OTYPESW && ln.Used() { + if ln.Op() == ir.ONAME && (ln.Class_ == ir.PAUTO || ln.Class_ == ir.PAUTOHEAP) && ln.Defn != nil && ln.Defn.Op() == ir.OTYPESW && ln.Used() { ln.Defn.(*ir.TypeSwitchGuard).Used = true } } for _, ln := range fn.Dcl { - if ln.Op() != ir.ONAME || (ln.Class() != ir.PAUTO && ln.Class() != ir.PAUTOHEAP) || ln.Sym().Name[0] == '&' || ln.Used() { + if ln.Op() != ir.ONAME || (ln.Class_ != ir.PAUTO && ln.Class_ != ir.PAUTOHEAP) || ln.Sym().Name[0] == '&' || ln.Used() { continue } if defn, ok := ln.Defn.(*ir.TypeSwitchGuard); ok { @@ -72,10 +72,10 @@ func walk(fn *ir.Func) { if base.Errors() > errorsBefore { return } - walkstmtlist(Curfn.Body().Slice()) + walkstmtlist(Curfn.Body.Slice()) if base.Flag.W != 0 { s := fmt.Sprintf("after walk %v", Curfn.Sym()) - ir.DumpList(s, Curfn.Body()) + ir.DumpList(s, Curfn.Body) } zeroResults() @@ -98,7 +98,7 @@ func walkstmtlist(s []ir.Node) { func paramoutheap(fn *ir.Func) bool { for _, ln := range fn.Dcl { - switch ln.Class() { + switch ln.Class_ { case ir.PPARAMOUT: if isParamStackCopy(ln) || ln.Addrtaken() { return true @@ -189,8 +189,8 @@ func walkstmt(n ir.Node) ir.Node { init := n.Init() n.PtrInit().Set(nil) - n.SetLeft(walkexpr(n.Left(), &init)) - call := walkexpr(mkcall1(chanfn("chanrecv1", 2, n.Left().Type()), nil, &init, n.Left(), nodnil()), &init) + n.X = walkexpr(n.X, &init) + call := walkexpr(mkcall1(chanfn("chanrecv1", 2, n.X.Type()), nil, &init, n.X, nodnil()), &init) return initExpr(init.Slice(), call) case ir.OBREAK, @@ -208,20 +208,20 @@ func walkstmt(n ir.Node) ir.Node { case ir.ODCL: n := n.(*ir.Decl) - v := n.Left().(*ir.Name) - if v.Class() == ir.PAUTOHEAP { + v := n.X.(*ir.Name) + if v.Class_ == ir.PAUTOHEAP { if base.Flag.CompilingRuntime { base.Errorf("%v escapes to heap, not allowed in runtime", v) } nn := ir.NewAssignStmt(base.Pos, v.Name().Heapaddr, callnew(v.Type())) - nn.SetColas(true) + nn.Def = true return walkstmt(typecheck(nn, ctxStmt)) } return n case ir.OBLOCK: n := n.(*ir.BlockStmt) - walkstmtlist(n.List().Slice()) + walkstmtlist(n.List.Slice()) return n case ir.OCASE: @@ -247,33 +247,33 @@ func walkstmt(n ir.Node) ir.Node { case ir.OGO: n := n.(*ir.GoDeferStmt) var init ir.Nodes - switch call := n.Left(); call.Op() { + switch call := n.Call; call.Op() { case ir.OPRINT, ir.OPRINTN: call := call.(*ir.CallExpr) - n.SetLeft(wrapCall(call, &init)) + n.Call = wrapCall(call, &init) case ir.ODELETE: call := call.(*ir.CallExpr) - if mapfast(call.List().First().Type()) == mapslow { - n.SetLeft(wrapCall(call, &init)) + if mapfast(call.Args.First().Type()) == mapslow { + n.Call = wrapCall(call, &init) } else { - n.SetLeft(walkexpr(call, &init)) + n.Call = walkexpr(call, &init) } case ir.OCOPY: call := call.(*ir.BinaryExpr) - n.SetLeft(copyany(call, &init, true)) + n.Call = copyany(call, &init, true) case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER: call := call.(*ir.CallExpr) - if call.Body().Len() > 0 { - n.SetLeft(wrapCall(call, &init)) + if call.Body.Len() > 0 { + n.Call = wrapCall(call, &init) } else { - n.SetLeft(walkexpr(call, &init)) + n.Call = walkexpr(call, &init) } default: - n.SetLeft(walkexpr(call, &init)) + n.Call = walkexpr(call, &init) } if init.Len() > 0 { init.Append(n) @@ -283,41 +283,41 @@ func walkstmt(n ir.Node) ir.Node { case ir.OFOR, ir.OFORUNTIL: n := n.(*ir.ForStmt) - if n.Left() != nil { - walkstmtlist(n.Left().Init().Slice()) - init := n.Left().Init() - n.Left().PtrInit().Set(nil) - n.SetLeft(walkexpr(n.Left(), &init)) - n.SetLeft(initExpr(init.Slice(), n.Left())) + if n.Cond != nil { + walkstmtlist(n.Cond.Init().Slice()) + init := n.Cond.Init() + n.Cond.PtrInit().Set(nil) + n.Cond = walkexpr(n.Cond, &init) + n.Cond = initExpr(init.Slice(), n.Cond) } - n.SetRight(walkstmt(n.Right())) + n.Post = walkstmt(n.Post) if n.Op() == ir.OFORUNTIL { - walkstmtlist(n.List().Slice()) + walkstmtlist(n.Late.Slice()) } - walkstmtlist(n.Body().Slice()) + walkstmtlist(n.Body.Slice()) return n case ir.OIF: n := n.(*ir.IfStmt) - n.SetLeft(walkexpr(n.Left(), n.PtrInit())) - walkstmtlist(n.Body().Slice()) - walkstmtlist(n.Rlist().Slice()) + n.Cond = walkexpr(n.Cond, n.PtrInit()) + walkstmtlist(n.Body.Slice()) + walkstmtlist(n.Else.Slice()) return n case ir.ORETURN: n := n.(*ir.ReturnStmt) Curfn.NumReturns++ - if n.List().Len() == 0 { + if n.Results.Len() == 0 { return n } - if (hasNamedResults(Curfn) && n.List().Len() > 1) || paramoutheap(Curfn) { + if (hasNamedResults(Curfn) && n.Results.Len() > 1) || paramoutheap(Curfn) { // assign to the function out parameters, // so that ascompatee can fix up conflicts var rl []ir.Node for _, ln := range Curfn.Dcl { - cl := ln.Class() + cl := ln.Class_ if cl == ir.PAUTO || cl == ir.PAUTOHEAP { break } @@ -330,23 +330,23 @@ func walkstmt(n ir.Node) ir.Node { } } - if got, want := n.List().Len(), len(rl); got != want { + if got, want := n.Results.Len(), len(rl); got != want { // order should have rewritten multi-value function calls // with explicit OAS2FUNC nodes. base.Fatalf("expected %v return arguments, have %v", want, got) } // move function calls out, to make ascompatee's job easier. - walkexprlistsafe(n.List().Slice(), n.PtrInit()) + walkexprlistsafe(n.Results.Slice(), n.PtrInit()) - n.PtrList().Set(ascompatee(n.Op(), rl, n.List().Slice(), n.PtrInit())) + n.Results.Set(ascompatee(n.Op(), rl, n.Results.Slice(), n.PtrInit())) return n } - walkexprlist(n.List().Slice(), n.PtrInit()) + walkexprlist(n.Results.Slice(), n.PtrInit()) // For each return parameter (lhs), assign the corresponding result (rhs). lhs := Curfn.Type().Results() - rhs := n.List().Slice() + rhs := n.Results.Slice() res := make([]ir.Node, lhs.NumFields()) for i, nl := range lhs.FieldSlice() { nname := ir.AsNode(nl.Nname) @@ -356,7 +356,7 @@ func walkstmt(n ir.Node) ir.Node { a := ir.NewAssignStmt(base.Pos, nname, rhs[i]) res[i] = convas(a, n.PtrInit()) } - n.PtrList().Set(res) + n.Results.Set(res) return n case ir.ORETJMP: @@ -499,10 +499,10 @@ func walkexpr(n ir.Node, init *ir.Nodes) ir.Node { base.Fatalf("expression has untyped type: %+v", n) } - if n.Op() == ir.ONAME && n.(*ir.Name).Class() == ir.PAUTOHEAP { + if n.Op() == ir.ONAME && n.(*ir.Name).Class_ == ir.PAUTOHEAP { n := n.(*ir.Name) nn := ir.NewStarExpr(base.Pos, n.Name().Heapaddr) - nn.Left().MarkNonNil() + nn.X.MarkNonNil() return walkexpr(typecheck(nn, ctxExpr), init) } @@ -556,46 +556,46 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.ONOT, ir.ONEG, ir.OPLUS, ir.OBITNOT, ir.OREAL, ir.OIMAG, ir.OSPTR, ir.OITAB, ir.OIDATA: n := n.(*ir.UnaryExpr) - n.SetLeft(walkexpr(n.Left(), init)) + n.X = walkexpr(n.X, init) return n case ir.ODOTMETH, ir.ODOTINTER: n := n.(*ir.SelectorExpr) - n.SetLeft(walkexpr(n.Left(), init)) + n.X = walkexpr(n.X, init) return n case ir.OADDR: n := n.(*ir.AddrExpr) - n.SetLeft(walkexpr(n.Left(), init)) + n.X = walkexpr(n.X, init) return n case ir.ODEREF: n := n.(*ir.StarExpr) - n.SetLeft(walkexpr(n.Left(), init)) + n.X = walkexpr(n.X, init) return n case ir.OEFACE, ir.OAND, ir.OANDNOT, ir.OSUB, ir.OMUL, ir.OADD, ir.OOR, ir.OXOR, ir.OLSH, ir.ORSH: n := n.(*ir.BinaryExpr) - n.SetLeft(walkexpr(n.Left(), init)) - n.SetRight(walkexpr(n.Right(), init)) + n.X = walkexpr(n.X, init) + n.Y = walkexpr(n.Y, init) return n case ir.ODOT, ir.ODOTPTR: n := n.(*ir.SelectorExpr) usefield(n) - n.SetLeft(walkexpr(n.Left(), init)) + n.X = walkexpr(n.X, init) return n case ir.ODOTTYPE, ir.ODOTTYPE2: n := n.(*ir.TypeAssertExpr) - n.SetLeft(walkexpr(n.Left(), init)) + n.X = walkexpr(n.X, init) // Set up interface type addresses for back end. - n.SetRight(typename(n.Type())) + n.Ntype = typename(n.Type()) if n.Op() == ir.ODOTTYPE { - n.Right().(*ir.AddrExpr).SetRight(typename(n.Left().Type())) + n.Ntype.(*ir.AddrExpr).Alloc = typename(n.X.Type()) } - if !n.Type().IsInterface() && !n.Left().Type().IsEmptyInterface() { - n.PtrList().Set1(itabname(n.Type(), n.Left().Type())) + if !n.Type().IsInterface() && !n.X.Type().IsEmptyInterface() { + n.Itab.Set1(itabname(n.Type(), n.X.Type())) } return n @@ -603,20 +603,20 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { n := n.(*ir.UnaryExpr) if isRuneCount(n) { // Replace len([]rune(string)) with runtime.countrunes(string). - return mkcall("countrunes", n.Type(), init, conv(n.Left().(*ir.ConvExpr).Left(), types.Types[types.TSTRING])) + return mkcall("countrunes", n.Type(), init, conv(n.X.(*ir.ConvExpr).X, types.Types[types.TSTRING])) } - n.SetLeft(walkexpr(n.Left(), init)) + n.X = walkexpr(n.X, init) // replace len(*[10]int) with 10. // delayed until now to preserve side effects. - t := n.Left().Type() + t := n.X.Type() if t.IsPtr() { t = t.Elem() } if t.IsArray() { - safeexpr(n.Left(), init) + safeexpr(n.X, init) con := origIntConst(n, t.NumElem()) con.SetTypecheck(1) return con @@ -625,8 +625,8 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.OCOMPLEX: n := n.(*ir.BinaryExpr) - n.SetLeft(walkexpr(n.Left(), init)) - n.SetRight(walkexpr(n.Right(), init)) + n.X = walkexpr(n.X, init) + n.Y = walkexpr(n.Y, init) return n case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE: @@ -635,15 +635,15 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.OANDAND, ir.OOROR: n := n.(*ir.LogicalExpr) - n.SetLeft(walkexpr(n.Left(), init)) + n.X = walkexpr(n.X, init) // cannot put side effects from n.Right on init, // because they cannot run before n.Left is checked. // save elsewhere and store on the eventual n.Right. var ll ir.Nodes - n.SetRight(walkexpr(n.Right(), &ll)) - n.SetRight(initExpr(ll.Slice(), n.Right())) + n.Y = walkexpr(n.Y, &ll) + n.Y = initExpr(ll.Slice(), n.Y) return n case ir.OPRINT, ir.OPRINTN: @@ -651,7 +651,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.OPANIC: n := n.(*ir.UnaryExpr) - return mkcall("gopanic", nil, init, n.Left()) + return mkcall("gopanic", nil, init, n.X) case ir.ORECOVER: n := n.(*ir.CallExpr) @@ -667,24 +667,24 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { markUsedIfaceMethod(n) } - if n.Op() == ir.OCALLFUNC && n.Left().Op() == ir.OCLOSURE { + if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.OCLOSURE { // Transform direct call of a closure to call of a normal function. // transformclosure already did all preparation work. // Prepend captured variables to argument list. - clo := n.Left().(*ir.ClosureExpr) - n.PtrList().Prepend(clo.Func().ClosureEnter.Slice()...) - clo.Func().ClosureEnter.Set(nil) + clo := n.X.(*ir.ClosureExpr) + n.Args.Prepend(clo.Func.ClosureEnter.Slice()...) + clo.Func.ClosureEnter.Set(nil) // Replace OCLOSURE with ONAME/PFUNC. - n.SetLeft(clo.Func().Nname) + n.X = clo.Func.Nname // Update type of OCALLFUNC node. // Output arguments had not changed, but their offsets could. - if n.Left().Type().NumResults() == 1 { - n.SetType(n.Left().Type().Results().Field(0).Type) + if n.X.Type().NumResults() == 1 { + n.SetType(n.X.Type().Results().Field(0).Type) } else { - n.SetType(n.Left().Type().Results()) + n.SetType(n.X.Type().Results()) } } @@ -698,10 +698,10 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { switch n.Op() { case ir.OAS: n := n.(*ir.AssignStmt) - left, right = n.Left(), n.Right() + left, right = n.X, n.Y case ir.OASOP: n := n.(*ir.AssignOpStmt) - left, right = n.Left(), n.Right() + left, right = n.X, n.Y } // Recognize m[k] = append(m[k], ...) so we can reuse @@ -710,22 +710,22 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { if left.Op() == ir.OINDEXMAP && right.Op() == ir.OAPPEND { left := left.(*ir.IndexExpr) mapAppend = right.(*ir.CallExpr) - if !samesafeexpr(left, mapAppend.List().First()) { - base.Fatalf("not same expressions: %v != %v", left, mapAppend.List().First()) + if !samesafeexpr(left, mapAppend.Args.First()) { + base.Fatalf("not same expressions: %v != %v", left, mapAppend.Args.First()) } } left = walkexpr(left, init) left = safeexpr(left, init) if mapAppend != nil { - mapAppend.List().SetFirst(left) + mapAppend.Args.SetFirst(left) } if n.Op() == ir.OASOP { // Rewrite x op= y into x = x op y. - n = ir.NewAssignStmt(base.Pos, left, typecheck(ir.NewBinaryExpr(base.Pos, n.(*ir.AssignOpStmt).SubOp(), left, right), ctxExpr)) + n = ir.NewAssignStmt(base.Pos, left, typecheck(ir.NewBinaryExpr(base.Pos, n.(*ir.AssignOpStmt).AsOp, left, right), ctxExpr)) } else { - n.(*ir.AssignStmt).SetLeft(left) + n.(*ir.AssignStmt).X = left } as := n.(*ir.AssignStmt) @@ -733,32 +733,32 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return ir.NewBlockStmt(as.Pos(), nil) } - if as.Right() == nil { + if as.Y == nil { // TODO(austin): Check all "implicit zeroing" return as } - if !instrumenting && isZero(as.Right()) { + if !instrumenting && isZero(as.Y) { return as } - switch as.Right().Op() { + switch as.Y.Op() { default: - as.SetRight(walkexpr(as.Right(), init)) + as.Y = walkexpr(as.Y, init) case ir.ORECV: // x = <-c; as.Left is x, as.Right.Left is c. // order.stmt made sure x is addressable. - recv := as.Right().(*ir.UnaryExpr) - recv.SetLeft(walkexpr(recv.Left(), init)) + recv := as.Y.(*ir.UnaryExpr) + recv.X = walkexpr(recv.X, init) - n1 := nodAddr(as.Left()) - r := recv.Left() // the channel + n1 := nodAddr(as.X) + r := recv.X // the channel return mkcall1(chanfn("chanrecv1", 2, r.Type()), nil, init, r, n1) case ir.OAPPEND: // x = append(...) - call := as.Right().(*ir.CallExpr) + call := as.Y.(*ir.CallExpr) if call.Type().Elem().NotInHeap() { base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", call.Type().Elem()) } @@ -767,24 +767,24 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case isAppendOfMake(call): // x = append(y, make([]T, y)...) r = extendslice(call, init) - case call.IsDDD(): + case call.IsDDD: r = appendslice(call, init) // also works for append(slice, string). default: r = walkappend(call, init, as) } - as.SetRight(r) + as.Y = r if r.Op() == ir.OAPPEND { // Left in place for back end. // Do not add a new write barrier. // Set up address of type for back end. - r.(*ir.CallExpr).SetLeft(typename(r.Type().Elem())) + r.(*ir.CallExpr).X = typename(r.Type().Elem()) return as } // Otherwise, lowered for race detector. // Treat as ordinary assignment. } - if as.Left() != nil && as.Right() != nil { + if as.X != nil && as.Y != nil { return convas(as, init) } return as @@ -792,26 +792,26 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.OAS2: n := n.(*ir.AssignListStmt) init.AppendNodes(n.PtrInit()) - walkexprlistsafe(n.List().Slice(), init) - walkexprlistsafe(n.Rlist().Slice(), init) - return liststmt(ascompatee(ir.OAS, n.List().Slice(), n.Rlist().Slice(), init)) + walkexprlistsafe(n.Lhs.Slice(), init) + walkexprlistsafe(n.Rhs.Slice(), init) + return liststmt(ascompatee(ir.OAS, n.Lhs.Slice(), n.Rhs.Slice(), init)) // a,b,... = fn() case ir.OAS2FUNC: n := n.(*ir.AssignListStmt) init.AppendNodes(n.PtrInit()) - r := n.Rlist().First() - walkexprlistsafe(n.List().Slice(), init) + r := n.Rhs.First() + walkexprlistsafe(n.Lhs.Slice(), init) r = walkexpr(r, init) if IsIntrinsicCall(r.(*ir.CallExpr)) { - n.PtrRlist().Set1(r) + n.Rhs.Set1(r) return n } init.Append(r) - ll := ascompatet(n.List(), r.Type()) + ll := ascompatet(n.Lhs, r.Type()) return liststmt(ll) // x, y = <-c @@ -820,18 +820,18 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { n := n.(*ir.AssignListStmt) init.AppendNodes(n.PtrInit()) - r := n.Rlist().First().(*ir.UnaryExpr) // recv - walkexprlistsafe(n.List().Slice(), init) - r.SetLeft(walkexpr(r.Left(), init)) + r := n.Rhs.First().(*ir.UnaryExpr) // recv + walkexprlistsafe(n.Lhs.Slice(), init) + r.X = walkexpr(r.X, init) var n1 ir.Node - if ir.IsBlank(n.List().First()) { + if ir.IsBlank(n.Lhs.First()) { n1 = nodnil() } else { - n1 = nodAddr(n.List().First()) + n1 = nodAddr(n.Lhs.First()) } - fn := chanfn("chanrecv2", 2, r.Left().Type()) - ok := n.List().Second() - call := mkcall1(fn, types.Types[types.TBOOL], init, r.Left(), n1) + fn := chanfn("chanrecv2", 2, r.X.Type()) + ok := n.Lhs.Second() + call := mkcall1(fn, types.Types[types.TBOOL], init, r.X, n1) return typecheck(ir.NewAssignStmt(base.Pos, ok, call), ctxStmt) // a,b = m[i] @@ -839,21 +839,21 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { n := n.(*ir.AssignListStmt) init.AppendNodes(n.PtrInit()) - r := n.Rlist().First().(*ir.IndexExpr) - walkexprlistsafe(n.List().Slice(), init) - r.SetLeft(walkexpr(r.Left(), init)) - r.SetRight(walkexpr(r.Right(), init)) - t := r.Left().Type() + r := n.Rhs.First().(*ir.IndexExpr) + walkexprlistsafe(n.Lhs.Slice(), init) + r.X = walkexpr(r.X, init) + r.Index = walkexpr(r.Index, init) + t := r.X.Type() fast := mapfast(t) var key ir.Node if fast != mapslow { // fast versions take key by value - key = r.Right() + key = r.Index } else { // standard version takes key by reference // order.expr made sure key is addressable. - key = nodAddr(r.Right()) + key = nodAddr(r.Index) } // from: @@ -861,25 +861,25 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // to: // var,b = mapaccess2*(t, m, i) // a = *var - a := n.List().First() + a := n.Lhs.First() var call *ir.CallExpr if w := t.Elem().Width; w <= zeroValSize { fn := mapfn(mapaccess2[fast], t) - call = mkcall1(fn, fn.Type().Results(), init, typename(t), r.Left(), key) + call = mkcall1(fn, fn.Type().Results(), init, typename(t), r.X, key) } else { fn := mapfn("mapaccess2_fat", t) z := zeroaddr(w) - call = mkcall1(fn, fn.Type().Results(), init, typename(t), r.Left(), key, z) + call = mkcall1(fn, fn.Type().Results(), init, typename(t), r.X, key, z) } // mapaccess2* returns a typed bool, but due to spec changes, // the boolean result of i.(T) is now untyped so we make it the // same type as the variable on the lhs. - if ok := n.List().Second(); !ir.IsBlank(ok) && ok.Type().IsBoolean() { + if ok := n.Lhs.Second(); !ir.IsBlank(ok) && ok.Type().IsBoolean() { call.Type().Field(1).Type = ok.Type() } - n.PtrRlist().Set1(call) + n.Rhs.Set1(call) n.SetOp(ir.OAS2FUNC) // don't generate a = *var if a is _ @@ -891,7 +891,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { var_.SetTypecheck(1) var_.MarkNonNil() // mapaccess always returns a non-nil pointer - n.List().SetFirst(var_) + n.Lhs.SetFirst(var_) init.Append(walkexpr(n, init)) as := ir.NewAssignStmt(base.Pos, a, ir.NewStarExpr(base.Pos, var_)) @@ -900,8 +900,8 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.ODELETE: n := n.(*ir.CallExpr) init.AppendNodes(n.PtrInit()) - map_ := n.List().First() - key := n.List().Second() + map_ := n.Args.First() + key := n.Args.Second() map_ = walkexpr(map_, init) key = walkexpr(key, init) @@ -915,15 +915,15 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.OAS2DOTTYPE: n := n.(*ir.AssignListStmt) - walkexprlistsafe(n.List().Slice(), init) - n.PtrRlist().SetIndex(0, walkexpr(n.Rlist().First(), init)) + walkexprlistsafe(n.Lhs.Slice(), init) + (&n.Rhs).SetIndex(0, walkexpr(n.Rhs.First(), init)) return n case ir.OCONVIFACE: n := n.(*ir.ConvExpr) - n.SetLeft(walkexpr(n.Left(), init)) + n.X = walkexpr(n.X, init) - fromType := n.Left().Type() + fromType := n.X.Type() toType := n.Type() if !fromType.IsInterface() && !ir.IsBlank(Curfn.Nname) { // skip unnamed functions (func _()) @@ -940,7 +940,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // Optimize convT2E or convT2I as a two-word copy when T is pointer-shaped. if isdirectiface(fromType) { - l := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), n.Left()) + l := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), n.X) l.SetType(toType) l.SetTypecheck(n.Typecheck()) return l @@ -948,12 +948,12 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { if staticuint64s == nil { staticuint64s = NewName(Runtimepkg.Lookup("staticuint64s")) - staticuint64s.SetClass(ir.PEXTERN) + staticuint64s.Class_ = ir.PEXTERN // The actual type is [256]uint64, but we use [256*8]uint8 so we can address // individual bytes. staticuint64s.SetType(types.NewArray(types.Types[types.TUINT8], 256*8)) zerobase = NewName(Runtimepkg.Lookup("zerobase")) - zerobase.SetClass(ir.PEXTERN) + zerobase.Class_ = ir.PEXTERN zerobase.SetType(types.Types[types.TUINTPTR]) } @@ -964,27 +964,27 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { switch { case fromType.Size() == 0: // n.Left is zero-sized. Use zerobase. - cheapexpr(n.Left(), init) // Evaluate n.Left for side-effects. See issue 19246. + cheapexpr(n.X, init) // Evaluate n.Left for side-effects. See issue 19246. value = zerobase case fromType.IsBoolean() || (fromType.Size() == 1 && fromType.IsInteger()): // n.Left is a bool/byte. Use staticuint64s[n.Left * 8] on little-endian // and staticuint64s[n.Left * 8 + 7] on big-endian. - n.SetLeft(cheapexpr(n.Left(), init)) + n.X = cheapexpr(n.X, init) // byteindex widens n.Left so that the multiplication doesn't overflow. - index := ir.NewBinaryExpr(base.Pos, ir.OLSH, byteindex(n.Left()), nodintconst(3)) + index := ir.NewBinaryExpr(base.Pos, ir.OLSH, byteindex(n.X), nodintconst(3)) if thearch.LinkArch.ByteOrder == binary.BigEndian { index = ir.NewBinaryExpr(base.Pos, ir.OADD, index, nodintconst(7)) } xe := ir.NewIndexExpr(base.Pos, staticuint64s, index) xe.SetBounded(true) value = xe - case n.Left().Op() == ir.ONAME && n.Left().(*ir.Name).Class() == ir.PEXTERN && n.Left().(*ir.Name).Readonly(): + case n.X.Op() == ir.ONAME && n.X.(*ir.Name).Class_ == ir.PEXTERN && n.X.(*ir.Name).Readonly(): // n.Left is a readonly global; use it directly. - value = n.Left() + value = n.X case !fromType.IsInterface() && n.Esc() == EscNone && fromType.Width <= 1024: // n.Left does not escape. Use a stack temporary initialized to n.Left. value = temp(fromType) - init.Append(typecheck(ir.NewAssignStmt(base.Pos, value, n.Left()), ctxStmt)) + init.Append(typecheck(ir.NewAssignStmt(base.Pos, value, n.X), ctxStmt)) } if value != nil { @@ -1005,7 +1005,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { if toType.IsEmptyInterface() && fromType.IsInterface() && !fromType.IsEmptyInterface() { // Evaluate the input interface. c := temp(fromType) - init.Append(ir.NewAssignStmt(base.Pos, c, n.Left())) + init.Append(ir.NewAssignStmt(base.Pos, c, n.X)) // Get the itab out of the interface. tmp := temp(types.NewPtr(types.Types[types.TUINT8])) @@ -1013,7 +1013,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // Get the type out of the itab. nif := ir.NewIfStmt(base.Pos, typecheck(ir.NewBinaryExpr(base.Pos, ir.ONE, tmp, nodnil()), ctxExpr), nil, nil) - nif.PtrBody().Set1(ir.NewAssignStmt(base.Pos, tmp, itabType(tmp))) + nif.Body.Set1(ir.NewAssignStmt(base.Pos, tmp, itabType(tmp))) init.Append(nif) // Build the result. @@ -1034,7 +1034,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { fn = substArgTypes(fn, fromType) dowidth(fn.Type()) call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil) - call.PtrList().Set1(n.Left()) + call.Args.Set1(n.X) e := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), safeexpr(walkexpr(typecheck(call, ctxExpr), init), init)) e.SetType(toType) e.SetTypecheck(1) @@ -1050,7 +1050,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { tab = typeword() } - v := n.Left() + v := n.X if needsaddr { // Types of large or unknown size are passed by reference. // Orderexpr arranged for n.Left to be a temporary for all @@ -1069,41 +1069,41 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { fn = substArgTypes(fn, fromType, toType) dowidth(fn.Type()) call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil) - call.PtrList().Set2(tab, v) + call.Args.Set2(tab, v) return walkexpr(typecheck(call, ctxExpr), init) case ir.OCONV, ir.OCONVNOP: n := n.(*ir.ConvExpr) - n.SetLeft(walkexpr(n.Left(), init)) - if n.Op() == ir.OCONVNOP && n.Type() == n.Left().Type() { - return n.Left() + n.X = walkexpr(n.X, init) + if n.Op() == ir.OCONVNOP && n.Type() == n.X.Type() { + return n.X } if n.Op() == ir.OCONVNOP && checkPtr(Curfn, 1) { - if n.Type().IsPtr() && n.Left().Type().IsUnsafePtr() { // unsafe.Pointer to *T + if n.Type().IsPtr() && n.X.Type().IsUnsafePtr() { // unsafe.Pointer to *T return walkCheckPtrAlignment(n, init, nil) } - if n.Type().IsUnsafePtr() && n.Left().Type().IsUintptr() { // uintptr to unsafe.Pointer + if n.Type().IsUnsafePtr() && n.X.Type().IsUintptr() { // uintptr to unsafe.Pointer return walkCheckPtrArithmetic(n, init) } } - param, result := rtconvfn(n.Left().Type(), n.Type()) + param, result := rtconvfn(n.X.Type(), n.Type()) if param == types.Txxx { return n } fn := types.BasicTypeNames[param] + "to" + types.BasicTypeNames[result] - return conv(mkcall(fn, types.Types[result], init, conv(n.Left(), types.Types[param])), n.Type()) + return conv(mkcall(fn, types.Types[result], init, conv(n.X, types.Types[param])), n.Type()) case ir.ODIV, ir.OMOD: n := n.(*ir.BinaryExpr) - n.SetLeft(walkexpr(n.Left(), init)) - n.SetRight(walkexpr(n.Right(), init)) + n.X = walkexpr(n.X, init) + n.Y = walkexpr(n.Y, init) // rewrite complex div into function call. - et := n.Left().Type().Kind() + et := n.X.Type().Kind() if isComplex[et] && n.Op() == ir.ODIV { t := n.Type() - call := mkcall("complex128div", types.Types[types.TCOMPLEX128], init, conv(n.Left(), types.Types[types.TCOMPLEX128]), conv(n.Right(), types.Types[types.TCOMPLEX128])) + call := mkcall("complex128div", types.Types[types.TCOMPLEX128], init, conv(n.X, types.Types[types.TCOMPLEX128]), conv(n.Y, types.Types[types.TCOMPLEX128])) return conv(call, t) } @@ -1116,12 +1116,12 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // TODO: Remove this code once we can introduce // runtime calls late in SSA processing. if Widthreg < 8 && (et == types.TINT64 || et == types.TUINT64) { - if n.Right().Op() == ir.OLITERAL { + if n.Y.Op() == ir.OLITERAL { // Leave div/mod by constant powers of 2 or small 16-bit constants. // The SSA backend will handle those. switch et { case types.TINT64: - c := ir.Int64Val(n.Right()) + c := ir.Int64Val(n.Y) if c < 0 { c = -c } @@ -1129,7 +1129,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return n } case types.TUINT64: - c := ir.Uint64Val(n.Right()) + c := ir.Uint64Val(n.Y) if c < 1<<16 { return n } @@ -1149,49 +1149,49 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { } else { fn += "mod" } - return mkcall(fn, n.Type(), init, conv(n.Left(), types.Types[et]), conv(n.Right(), types.Types[et])) + return mkcall(fn, n.Type(), init, conv(n.X, types.Types[et]), conv(n.Y, types.Types[et])) } return n case ir.OINDEX: n := n.(*ir.IndexExpr) - n.SetLeft(walkexpr(n.Left(), init)) + n.X = walkexpr(n.X, init) // save the original node for bounds checking elision. // If it was a ODIV/OMOD walk might rewrite it. - r := n.Right() + r := n.Index - n.SetRight(walkexpr(n.Right(), init)) + n.Index = walkexpr(n.Index, init) // if range of type cannot exceed static array bound, // disable bounds check. if n.Bounded() { return n } - t := n.Left().Type() + t := n.X.Type() if t != nil && t.IsPtr() { t = t.Elem() } if t.IsArray() { n.SetBounded(bounded(r, t.NumElem())) - if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Right(), constant.Int) { + if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Index, constant.Int) { base.Warn("index bounds check elided") } - if smallintconst(n.Right()) && !n.Bounded() { + if smallintconst(n.Index) && !n.Bounded() { base.Errorf("index out of bounds") } - } else if ir.IsConst(n.Left(), constant.String) { - n.SetBounded(bounded(r, int64(len(ir.StringVal(n.Left()))))) - if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Right(), constant.Int) { + } else if ir.IsConst(n.X, constant.String) { + n.SetBounded(bounded(r, int64(len(ir.StringVal(n.X))))) + if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Index, constant.Int) { base.Warn("index bounds check elided") } - if smallintconst(n.Right()) && !n.Bounded() { + if smallintconst(n.Index) && !n.Bounded() { base.Errorf("index out of bounds") } } - if ir.IsConst(n.Right(), constant.Int) { - if v := n.Right().Val(); constant.Sign(v) < 0 || doesoverflow(v, types.Types[types.TINT]) { + if ir.IsConst(n.Index, constant.Int) { + if v := n.Index.Val(); constant.Sign(v) < 0 || doesoverflow(v, types.Types[types.TINT]) { base.Errorf("index out of bounds") } } @@ -1200,13 +1200,13 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.OINDEXMAP: // Replace m[k] with *map{access1,assign}(maptype, m, &k) n := n.(*ir.IndexExpr) - n.SetLeft(walkexpr(n.Left(), init)) - n.SetRight(walkexpr(n.Right(), init)) - map_ := n.Left() - key := n.Right() + n.X = walkexpr(n.X, init) + n.Index = walkexpr(n.Index, init) + map_ := n.X + key := n.Index t := map_.Type() var call *ir.CallExpr - if n.IndexMapLValue() { + if n.Assigned { // This m[k] expression is on the left-hand side of an assignment. fast := mapfast(t) if fast == mapslow { @@ -1244,20 +1244,20 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.OSLICEHEADER: n := n.(*ir.SliceHeaderExpr) - n.SetLeft(walkexpr(n.Left(), init)) - n.List().SetFirst(walkexpr(n.List().First(), init)) - n.List().SetSecond(walkexpr(n.List().Second(), init)) + n.Ptr = walkexpr(n.Ptr, init) + n.LenCap.SetFirst(walkexpr(n.LenCap.First(), init)) + n.LenCap.SetSecond(walkexpr(n.LenCap.Second(), init)) return n case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR: n := n.(*ir.SliceExpr) - checkSlice := checkPtr(Curfn, 1) && n.Op() == ir.OSLICE3ARR && n.Left().Op() == ir.OCONVNOP && n.Left().(*ir.ConvExpr).Left().Type().IsUnsafePtr() + checkSlice := checkPtr(Curfn, 1) && n.Op() == ir.OSLICE3ARR && n.X.Op() == ir.OCONVNOP && n.X.(*ir.ConvExpr).X.Type().IsUnsafePtr() if checkSlice { - conv := n.Left().(*ir.ConvExpr) - conv.SetLeft(walkexpr(conv.Left(), init)) + conv := n.X.(*ir.ConvExpr) + conv.X = walkexpr(conv.X, init) } else { - n.SetLeft(walkexpr(n.Left(), init)) + n.X = walkexpr(n.X, init) } low, high, max := n.SliceBounds() @@ -1270,11 +1270,11 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { max = walkexpr(max, init) n.SetSliceBounds(low, high, max) if checkSlice { - n.SetLeft(walkCheckPtrAlignment(n.Left().(*ir.ConvExpr), init, max)) + n.X = walkCheckPtrAlignment(n.X.(*ir.ConvExpr), init, max) } if n.Op().IsSlice3() { - if max != nil && max.Op() == ir.OCAP && samesafeexpr(n.Left(), max.(*ir.UnaryExpr).Left()) { + if max != nil && max.Op() == ir.OCAP && samesafeexpr(n.X, max.(*ir.UnaryExpr).X) { // Reduce x[i:j:cap(x)] to x[i:j]. if n.Op() == ir.OSLICE3 { n.SetOp(ir.OSLICE) @@ -1317,14 +1317,14 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // cannot use chanfn - closechan takes any, not chan any n := n.(*ir.UnaryExpr) fn := syslook("closechan") - fn = substArgTypes(fn, n.Left().Type()) - return mkcall1(fn, nil, init, n.Left()) + fn = substArgTypes(fn, n.X.Type()) + return mkcall1(fn, nil, init, n.X) case ir.OMAKECHAN: // When size fits into int, use makechan instead of // makechan64, which is faster and shorter on 32 bit platforms. n := n.(*ir.MakeExpr) - size := n.Left() + size := n.Len fnname := "makechan64" argtype := types.Types[types.TINT64] @@ -1342,7 +1342,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { n := n.(*ir.MakeExpr) t := n.Type() hmapType := hmap(t) - hint := n.Left() + hint := n.Len // var h *hmap var h ir.Node @@ -1373,11 +1373,11 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // } nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, nodintconst(BUCKETSIZE)), nil, nil) - nif.SetLikely(true) + nif.Likely = true // var bv bmap bv := temp(bmap(t)) - nif.PtrBody().Append(ir.NewAssignStmt(base.Pos, bv, nil)) + nif.Body.Append(ir.NewAssignStmt(base.Pos, bv, nil)) // b = &bv b := nodAddr(bv) @@ -1385,7 +1385,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // h.buckets = b bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap na := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, bsym), b) - nif.PtrBody().Append(na) + nif.Body.Append(na) appendWalkStmt(init, nif) } } @@ -1442,8 +1442,8 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.OMAKESLICE: n := n.(*ir.MakeExpr) - l := n.Left() - r := n.Right() + l := n.Len + r := n.Cap if r == nil { r = safeexpr(l, init) l = r @@ -1472,8 +1472,8 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // } nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGT, conv(l, types.Types[types.TUINT64]), nodintconst(i)), nil, nil) niflen := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLT, l, nodintconst(0)), nil, nil) - niflen.PtrBody().Set1(mkcall("panicmakeslicelen", nil, init)) - nif.PtrBody().Append(niflen, mkcall("panicmakeslicecap", nil, init)) + niflen.Body.Set1(mkcall("panicmakeslicelen", nil, init)) + nif.Body.Append(niflen, mkcall("panicmakeslicecap", nil, init)) init.Append(typecheck(nif, ctxStmt)) t = types.NewArray(t.Elem(), i) // [r]T @@ -1507,9 +1507,9 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { m.SetType(t) fn := syslook(fnname) - m.SetLeft(mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), conv(len, argtype), conv(cap, argtype))) - m.Left().MarkNonNil() - m.PtrList().Set2(conv(len, types.Types[types.TINT]), conv(cap, types.Types[types.TINT])) + m.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), conv(len, argtype), conv(cap, argtype)) + m.Ptr.MarkNonNil() + m.LenCap.Set2(conv(len, types.Types[types.TINT]), conv(cap, types.Types[types.TINT])) return walkexpr(typecheck(m, ctxExpr), init) case ir.OMAKESLICECOPY: @@ -1523,9 +1523,9 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem()) } - length := conv(n.Left(), types.Types[types.TINT]) - copylen := ir.NewUnaryExpr(base.Pos, ir.OLEN, n.Right()) - copyptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, n.Right()) + length := conv(n.Len, types.Types[types.TINT]) + copylen := ir.NewUnaryExpr(base.Pos, ir.OLEN, n.Cap) + copyptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, n.Cap) if !t.Elem().HasPointers() && n.Bounded() { // When len(to)==len(from) and elements have no pointers: @@ -1539,9 +1539,9 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // instantiate mallocgc(size uintptr, typ *byte, needszero bool) unsafe.Pointer fn := syslook("mallocgc") sh := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil) - sh.SetLeft(mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, nodnil(), nodbool(false))) - sh.Left().MarkNonNil() - sh.PtrList().Set2(length, length) + sh.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, nodnil(), nodbool(false)) + sh.Ptr.MarkNonNil() + sh.LenCap.Set2(length, length) sh.SetType(t) s := temp(t) @@ -1561,9 +1561,9 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer fn := syslook("makeslicecopy") s := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil) - s.SetLeft(mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), length, copylen, conv(copyptr, types.Types[types.TUNSAFEPTR]))) - s.Left().MarkNonNil() - s.PtrList().Set2(length, length) + s.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), length, copylen, conv(copyptr, types.Types[types.TUNSAFEPTR])) + s.Ptr.MarkNonNil() + s.LenCap.Set2(length, length) s.SetType(t) return walkexpr(typecheck(s, ctxExpr), init) @@ -1575,7 +1575,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { a = nodAddr(temp(t)) } // intstring(*[4]byte, rune) - return mkcall("intstring", n.Type(), init, a, conv(n.Left(), types.Types[types.TINT64])) + return mkcall("intstring", n.Type(), init, a, conv(n.X, types.Types[types.TINT64])) case ir.OBYTES2STR, ir.ORUNES2STR: n := n.(*ir.ConvExpr) @@ -1587,29 +1587,29 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { } if n.Op() == ir.ORUNES2STR { // slicerunetostring(*[32]byte, []rune) string - return mkcall("slicerunetostring", n.Type(), init, a, n.Left()) + return mkcall("slicerunetostring", n.Type(), init, a, n.X) } // slicebytetostring(*[32]byte, ptr *byte, n int) string - n.SetLeft(cheapexpr(n.Left(), init)) - ptr, len := backingArrayPtrLen(n.Left()) + n.X = cheapexpr(n.X, init) + ptr, len := backingArrayPtrLen(n.X) return mkcall("slicebytetostring", n.Type(), init, a, ptr, len) case ir.OBYTES2STRTMP: n := n.(*ir.ConvExpr) - n.SetLeft(walkexpr(n.Left(), init)) + n.X = walkexpr(n.X, init) if !instrumenting { // Let the backend handle OBYTES2STRTMP directly // to avoid a function call to slicebytetostringtmp. return n } // slicebytetostringtmp(ptr *byte, n int) string - n.SetLeft(cheapexpr(n.Left(), init)) - ptr, len := backingArrayPtrLen(n.Left()) + n.X = cheapexpr(n.X, init) + ptr, len := backingArrayPtrLen(n.X) return mkcall("slicebytetostringtmp", n.Type(), init, ptr, len) case ir.OSTR2BYTES: n := n.(*ir.ConvExpr) - s := n.Left() + s := n.X if ir.IsConst(s, constant.String) { sc := ir.StringVal(s) @@ -1655,7 +1655,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // The only such case today is: // for i, c := range []byte(string) n := n.(*ir.ConvExpr) - n.SetLeft(walkexpr(n.Left(), init)) + n.X = walkexpr(n.X, init) return n case ir.OSTR2RUNES: @@ -1667,7 +1667,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { a = nodAddr(temp(t)) } // stringtoslicerune(*[32]rune, string) []rune - return mkcall("stringtoslicerune", n.Type(), init, a, conv(n.Left(), types.Types[types.TSTRING])) + return mkcall("stringtoslicerune", n.Type(), init, a, conv(n.X, types.Types[types.TSTRING])) case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT, ir.OPTRLIT: if isStaticCompositeLiteral(n) && !canSSAType(n.Type()) { @@ -1684,11 +1684,11 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.OSEND: n := n.(*ir.SendStmt) - n1 := n.Right() - n1 = assignconv(n1, n.Left().Type().Elem(), "chan send") + n1 := n.Value + n1 = assignconv(n1, n.Chan.Type().Elem(), "chan send") n1 = walkexpr(n1, init) n1 = nodAddr(n1) - return mkcall1(chanfn("chansend1", 2, n.Left().Type()), nil, init, n.Left(), n1) + return mkcall1(chanfn("chansend1", 2, n.Chan.Type()), nil, init, n.Chan, n1) case ir.OCLOSURE: return walkclosure(n.(*ir.ClosureExpr), init) @@ -1716,14 +1716,14 @@ func markTypeUsedInInterface(t *types.Type, from *obj.LSym) { // markUsedIfaceMethod marks that an interface method is used in the current // function. n is OCALLINTER node. func markUsedIfaceMethod(n *ir.CallExpr) { - dot := n.Left().(*ir.SelectorExpr) - ityp := dot.Left().Type() + dot := n.X.(*ir.SelectorExpr) + ityp := dot.X.Type() tsym := typenamesym(ityp).Linksym() r := obj.Addrel(Curfn.LSym) r.Sym = tsym // dot.Xoffset is the method index * Widthptr (the offset of code pointer // in itab). - midx := dot.Offset() / int64(Widthptr) + midx := dot.Offset / int64(Widthptr) r.Add = ifaceMethodOffset(ityp, midx) r.Type = objabi.R_USEIFACEMETHOD } @@ -1777,7 +1777,7 @@ func rtconvfn(src, dst *types.Type) (param, result types.Kind) { // TODO(josharian): combine this with its caller and simplify func reduceSlice(n *ir.SliceExpr) ir.Node { low, high, max := n.SliceBounds() - if high != nil && high.Op() == ir.OLEN && samesafeexpr(n.Left(), high.(*ir.UnaryExpr).Left()) { + if high != nil && high.Op() == ir.OLEN && samesafeexpr(n.X, high.(*ir.UnaryExpr).X) { // Reduce x[i:len(x)] to x[i:]. high = nil } @@ -1787,7 +1787,7 @@ func reduceSlice(n *ir.SliceExpr) ir.Node { if base.Debug.Slice > 0 { base.Warn("slice: omit slice operation") } - return n.Left() + return n.X } return n } @@ -1878,7 +1878,7 @@ func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node { } res := ir.NewResultExpr(base.Pos, nil, types.BADWIDTH) - res.SetOffset(base.Ctxt.FixedFrameSize() + r.Offset) + res.Offset = base.Ctxt.FixedFrameSize() + r.Offset res.SetType(r.Type) res.SetTypecheck(1) @@ -1902,7 +1902,7 @@ func mkdotargslice(typ *types.Type, args []ir.Node) ir.Node { n.SetType(typ) } else { lit := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ).(ir.Ntype), nil) - lit.PtrList().Append(args...) + lit.List.Append(args...) lit.SetImplicit(true) n = lit } @@ -1917,42 +1917,42 @@ func mkdotargslice(typ *types.Type, args []ir.Node) ir.Node { // fixVariadicCall rewrites calls to variadic functions to use an // explicit ... argument if one is not already present. func fixVariadicCall(call *ir.CallExpr) { - fntype := call.Left().Type() - if !fntype.IsVariadic() || call.IsDDD() { + fntype := call.X.Type() + if !fntype.IsVariadic() || call.IsDDD { return } vi := fntype.NumParams() - 1 vt := fntype.Params().Field(vi).Type - args := call.List().Slice() + args := call.Args.Slice() extra := args[vi:] slice := mkdotargslice(vt, extra) for i := range extra { extra[i] = nil // allow GC } - call.PtrList().Set(append(args[:vi], slice)) - call.SetIsDDD(true) + call.Args.Set(append(args[:vi], slice)) + call.IsDDD = true } func walkCall(n *ir.CallExpr, init *ir.Nodes) { - if n.Rlist().Len() != 0 { + if n.Rargs.Len() != 0 { return // already walked } - params := n.Left().Type().Params() - args := n.List().Slice() + params := n.X.Type().Params() + args := n.Args.Slice() - n.SetLeft(walkexpr(n.Left(), init)) + n.X = walkexpr(n.X, init) walkexprlist(args, init) // If this is a method call, add the receiver at the beginning of the args. if n.Op() == ir.OCALLMETH { withRecv := make([]ir.Node, len(args)+1) - dot := n.Left().(*ir.SelectorExpr) - withRecv[0] = dot.Left() - dot.SetLeft(nil) + dot := n.X.(*ir.SelectorExpr) + withRecv[0] = dot.X + dot.X = nil copy(withRecv[1:], args) args = withRecv } @@ -1968,7 +1968,7 @@ func walkCall(n *ir.CallExpr, init *ir.Nodes) { var t *types.Type if n.Op() == ir.OCALLMETH { if i == 0 { - t = n.Left().Type().Recv().Type + t = n.X.Type().Recv().Type } else { t = params.Field(i - 1).Type } @@ -1985,18 +1985,18 @@ func walkCall(n *ir.CallExpr, init *ir.Nodes) { } } - n.PtrList().Set(tempAssigns) - n.PtrRlist().Set(args) + n.Args.Set(tempAssigns) + n.Rargs.Set(args) } // generate code for print func walkprint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { // Hoist all the argument evaluation up before the lock. - walkexprlistcheap(nn.List().Slice(), init) + walkexprlistcheap(nn.Args.Slice(), init) // For println, add " " between elements and "\n" at the end. if nn.Op() == ir.OPRINTN { - s := nn.List().Slice() + s := nn.Args.Slice() t := make([]ir.Node, 0, len(s)*2) for i, n := range s { if i != 0 { @@ -2005,11 +2005,11 @@ func walkprint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { t = append(t, n) } t = append(t, nodstr("\n")) - nn.PtrList().Set(t) + nn.Args.Set(t) } // Collapse runs of constant strings. - s := nn.List().Slice() + s := nn.Args.Slice() t := make([]ir.Node, 0, len(s)) for i := 0; i < len(s); { var strs []string @@ -2025,10 +2025,10 @@ func walkprint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { i++ } } - nn.PtrList().Set(t) + nn.Args.Set(t) calls := []ir.Node{mkcall("printlock", nil, init)} - for i, n := range nn.List().Slice() { + for i, n := range nn.Args.Slice() { if n.Op() == ir.OLITERAL { if n.Type() == types.UntypedRune { n = defaultlit(n, types.RuneType) @@ -2047,7 +2047,7 @@ func walkprint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { n = defaultlit(n, types.Types[types.TINT64]) } n = defaultlit(n, nil) - nn.List().SetIndex(i, n) + nn.Args.SetIndex(i, n) if n.Type() == nil || n.Type().Kind() == types.TFORW { continue } @@ -2116,7 +2116,7 @@ func walkprint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n) n.SetType(t) } - r.PtrList().Append(n) + r.Args.Append(n) } calls = append(calls, r) } @@ -2127,7 +2127,7 @@ func walkprint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { walkexprlist(calls, init) r := ir.NewBlockStmt(base.Pos, nil) - r.PtrList().Set(calls) + r.List.Set(calls) return walkstmt(typecheck(r, ctxStmt)) } @@ -2151,10 +2151,10 @@ func isReflectHeaderDataField(l ir.Node) bool { switch l.Op() { case ir.ODOT: l := l.(*ir.SelectorExpr) - tsym = l.Left().Type().Sym() + tsym = l.X.Type().Sym() case ir.ODOTPTR: l := l.(*ir.SelectorExpr) - tsym = l.Left().Type().Elem().Sym() + tsym = l.X.Type().Elem().Sym() default: return false } @@ -2173,26 +2173,26 @@ func convas(n *ir.AssignStmt, init *ir.Nodes) *ir.AssignStmt { n.SetTypecheck(1) - if n.Left() == nil || n.Right() == nil { + if n.X == nil || n.Y == nil { return n } - lt := n.Left().Type() - rt := n.Right().Type() + lt := n.X.Type() + rt := n.Y.Type() if lt == nil || rt == nil { return n } - if ir.IsBlank(n.Left()) { - n.SetRight(defaultlit(n.Right(), nil)) + if ir.IsBlank(n.X) { + n.Y = defaultlit(n.Y, nil) return n } if !types.Identical(lt, rt) { - n.SetRight(assignconv(n.Right(), lt, "assignment")) - n.SetRight(walkexpr(n.Right(), init)) + n.Y = assignconv(n.Y, lt, "assignment") + n.Y = walkexpr(n.Y, init) } - dowidth(n.Right().Type()) + dowidth(n.Y.Type()) return n } @@ -2212,7 +2212,7 @@ func reorder3(all []*ir.AssignStmt) []ir.Node { var mapinit ir.Nodes for i, n := range all { - l := n.Left() + l := n.X // Save subexpressions needed on left side. // Drill through non-dereferences. @@ -2220,17 +2220,17 @@ func reorder3(all []*ir.AssignStmt) []ir.Node { switch ll := l; ll.Op() { case ir.ODOT: ll := ll.(*ir.SelectorExpr) - l = ll.Left() + l = ll.X continue case ir.OPAREN: ll := ll.(*ir.ParenExpr) - l = ll.Left() + l = ll.X continue case ir.OINDEX: ll := ll.(*ir.IndexExpr) - if ll.Left().Type().IsArray() { - ll.SetRight(reorder3save(ll.Right(), all, i, &early)) - l = ll.Left() + if ll.X.Type().IsArray() { + ll.Index = reorder3save(ll.Index, all, i, &early) + l = ll.X continue } } @@ -2246,22 +2246,22 @@ func reorder3(all []*ir.AssignStmt) []ir.Node { case ir.OINDEX, ir.OINDEXMAP: l := l.(*ir.IndexExpr) - l.SetLeft(reorder3save(l.Left(), all, i, &early)) - l.SetRight(reorder3save(l.Right(), all, i, &early)) + l.X = reorder3save(l.X, all, i, &early) + l.Index = reorder3save(l.Index, all, i, &early) if l.Op() == ir.OINDEXMAP { all[i] = convas(all[i], &mapinit) } case ir.ODEREF: l := l.(*ir.StarExpr) - l.SetLeft(reorder3save(l.Left(), all, i, &early)) + l.X = reorder3save(l.X, all, i, &early) case ir.ODOTPTR: l := l.(*ir.SelectorExpr) - l.SetLeft(reorder3save(l.Left(), all, i, &early)) + l.X = reorder3save(l.X, all, i, &early) } // Save expression on right side. - all[i].SetRight(reorder3save(all[i].Right(), all, i, &early)) + all[i].Y = reorder3save(all[i].Y, all, i, &early) } early = append(mapinit.Slice(), early...) @@ -2297,20 +2297,20 @@ func outervalue(n ir.Node) ir.Node { base.Fatalf("OXDOT in walk") case ir.ODOT: nn := nn.(*ir.SelectorExpr) - n = nn.Left() + n = nn.X continue case ir.OPAREN: nn := nn.(*ir.ParenExpr) - n = nn.Left() + n = nn.X continue case ir.OCONVNOP: nn := nn.(*ir.ConvExpr) - n = nn.Left() + n = nn.X continue case ir.OINDEX: nn := nn.(*ir.IndexExpr) - if nn.Left().Type() != nil && nn.Left().Type().IsArray() { - n = nn.Left() + if nn.X.Type() != nil && nn.X.Type().IsArray() { + n = nn.X continue } } @@ -2329,7 +2329,7 @@ func aliased(r ir.Node, all []*ir.AssignStmt) bool { // Treat all fields of a struct as referring to the whole struct. // We could do better but we would have to keep track of the fields. for r.Op() == ir.ODOT { - r = r.(*ir.SelectorExpr).Left() + r = r.(*ir.SelectorExpr).X } // Look for obvious aliasing: a variable being assigned @@ -2340,20 +2340,20 @@ func aliased(r ir.Node, all []*ir.AssignStmt) bool { memwrite := false for _, as := range all { // We can ignore assignments to blank. - if ir.IsBlank(as.Left()) { + if ir.IsBlank(as.X) { continue } - lv := outervalue(as.Left()) + lv := outervalue(as.X) if lv.Op() != ir.ONAME { memwrite = true continue } l := lv.(*ir.Name) - switch l.Class() { + switch l.Class_ { default: - base.Fatalf("unexpected class: %v, %v", l, l.Class()) + base.Fatalf("unexpected class: %v, %v", l, l.Class_) case ir.PAUTOHEAP, ir.PEXTERN: memwrite = true @@ -2401,7 +2401,7 @@ func anyAddrTaken(n ir.Node) bool { switch n.Op() { case ir.ONAME: n := n.(*ir.Name) - return n.Class() == ir.PEXTERN || n.Class() == ir.PAUTOHEAP || n.Name().Addrtaken() + return n.Class_ == ir.PEXTERN || n.Class_ == ir.PAUTOHEAP || n.Name().Addrtaken() case ir.ODOT: // but not ODOTPTR - should have been handled in aliased. base.Fatalf("anyAddrTaken unexpected ODOT") @@ -2509,7 +2509,7 @@ func paramstoheap(params *types.Type) []ir.Node { if stackcopy := v.Name().Stackcopy; stackcopy != nil { nn = append(nn, walkstmt(ir.NewDecl(base.Pos, ir.ODCL, v))) - if stackcopy.Class() == ir.PPARAM { + if stackcopy.Class_ == ir.PPARAM { nn = append(nn, walkstmt(typecheck(ir.NewAssignStmt(base.Pos, v, stackcopy), ctxStmt))) } } @@ -2557,7 +2557,7 @@ func returnsfromheap(params *types.Type) []ir.Node { if v == nil { continue } - if stackcopy := v.Name().Stackcopy; stackcopy != nil && stackcopy.Class() == ir.PPARAMOUT { + if stackcopy := v.Name().Stackcopy; stackcopy != nil && stackcopy.Class_ == ir.PPARAMOUT { nn = append(nn, walkstmt(typecheck(ir.NewAssignStmt(base.Pos, stackcopy, v), ctxStmt))) } } @@ -2736,7 +2736,7 @@ func writebarrierfn(name string, l *types.Type, r *types.Type) ir.Node { } func addstr(n *ir.AddStringExpr, init *ir.Nodes) ir.Node { - c := n.List().Len() + c := n.List.Len() if c < 2 { base.Fatalf("addstr count %d too small", c) @@ -2745,7 +2745,7 @@ func addstr(n *ir.AddStringExpr, init *ir.Nodes) ir.Node { buf := nodnil() if n.Esc() == EscNone { sz := int64(0) - for _, n1 := range n.List().Slice() { + for _, n1 := range n.List.Slice() { if n1.Op() == ir.OLITERAL { sz += int64(len(ir.StringVal(n1))) } @@ -2761,7 +2761,7 @@ func addstr(n *ir.AddStringExpr, init *ir.Nodes) ir.Node { // build list of string arguments args := []ir.Node{buf} - for _, n2 := range n.List().Slice() { + for _, n2 := range n.List.Slice() { args = append(args, conv(n2, types.Types[types.TSTRING])) } @@ -2784,7 +2784,7 @@ func addstr(n *ir.AddStringExpr, init *ir.Nodes) ir.Node { cat := syslook(fn) r := ir.NewCallExpr(base.Pos, ir.OCALL, cat, nil) - r.PtrList().Set(args) + r.Args.Set(args) r1 := typecheck(r, ctxExpr) r1 = walkexpr(r1, init) r1.SetType(n.Type()) @@ -2793,12 +2793,12 @@ func addstr(n *ir.AddStringExpr, init *ir.Nodes) ir.Node { } func walkAppendArgs(n *ir.CallExpr, init *ir.Nodes) { - walkexprlistsafe(n.List().Slice(), init) + walkexprlistsafe(n.Args.Slice(), init) // walkexprlistsafe will leave OINDEX (s[n]) alone if both s // and n are name or literal, but those may index the slice we're // modifying here. Fix explicitly. - ls := n.List().Slice() + ls := n.Args.Slice() for i1, n1 := range ls { ls[i1] = cheapexpr(n1, init) } @@ -2821,10 +2821,10 @@ func walkAppendArgs(n *ir.CallExpr, init *ir.Nodes) { func appendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { walkAppendArgs(n, init) - l1 := n.List().First() - l2 := n.List().Second() + l1 := n.Args.First() + l2 := n.Args.Second() l2 = cheapexpr(l2, init) - n.List().SetSecond(l2) + n.Args.SetSecond(l2) var nodes ir.Nodes @@ -2842,14 +2842,14 @@ func appendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { nif := ir.NewIfStmt(base.Pos, nil, nil, nil) nuint := conv(nn, types.Types[types.TUINT]) scapuint := conv(ir.NewUnaryExpr(base.Pos, ir.OCAP, s), types.Types[types.TUINT]) - nif.SetLeft(ir.NewBinaryExpr(base.Pos, ir.OGT, nuint, scapuint)) + nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OGT, nuint, scapuint) // instantiate growslice(typ *type, []any, int) []any fn := syslook("growslice") fn = substArgTypes(fn, elemtype, elemtype) // s = growslice(T, s, n) - nif.PtrBody().Set1(ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn))) + nif.Body.Set1(ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn))) nodes.Append(nif) // s = s[:n] @@ -2926,12 +2926,12 @@ func isAppendOfMake(n ir.Node) bool { return false } call := n.(*ir.CallExpr) - if !call.IsDDD() || call.List().Len() != 2 || call.List().Second().Op() != ir.OMAKESLICE { + if !call.IsDDD || call.Args.Len() != 2 || call.Args.Second().Op() != ir.OMAKESLICE { return false } - mk := call.List().Second().(*ir.MakeExpr) - if mk.Right() != nil { + mk := call.Args.Second().(*ir.MakeExpr) + if mk.Cap != nil { return false } @@ -2941,7 +2941,7 @@ func isAppendOfMake(n ir.Node) bool { // typecheck made sure that constant arguments to make are not negative and fit into an int. // The care of overflow of the len argument to make will be handled by an explicit check of int(len) < 0 during runtime. - y := mk.Left() + y := mk.Len if !ir.IsConst(y, constant.Int) && y.Type().Size() > types.Types[types.TUINT].Size() { return false } @@ -2980,23 +2980,23 @@ func extendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { // isAppendOfMake made sure all possible positive values of l2 fit into an uint. // The case of l2 overflow when converting from e.g. uint to int is handled by an explicit // check of l2 < 0 at runtime which is generated below. - l2 := conv(n.List().Second().(*ir.MakeExpr).Left(), types.Types[types.TINT]) + l2 := conv(n.Args.Second().(*ir.MakeExpr).Len, types.Types[types.TINT]) l2 = typecheck(l2, ctxExpr) - n.List().SetSecond(l2) // walkAppendArgs expects l2 in n.List.Second(). + n.Args.SetSecond(l2) // walkAppendArgs expects l2 in n.List.Second(). walkAppendArgs(n, init) - l1 := n.List().First() - l2 = n.List().Second() // re-read l2, as it may have been updated by walkAppendArgs + l1 := n.Args.First() + l2 = n.Args.Second() // re-read l2, as it may have been updated by walkAppendArgs var nodes []ir.Node // if l2 >= 0 (likely happens), do nothing nifneg := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGE, l2, nodintconst(0)), nil, nil) - nifneg.SetLikely(true) + nifneg.Likely = true // else panicmakeslicelen() - nifneg.PtrRlist().Set1(mkcall("panicmakeslicelen", nil, init)) + nifneg.Else.Set1(mkcall("panicmakeslicelen", nil, init)) nodes = append(nodes, nifneg) // s := l1 @@ -3019,7 +3019,7 @@ func extendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { fn = substArgTypes(fn, elemtype, elemtype) // s = growslice(T, s, n) - nif.PtrBody().Set1(ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn))) + nif.Body.Set1(ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn))) nodes = append(nodes, nif) // s = s[:n] @@ -3060,7 +3060,7 @@ func extendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { if hasPointers { // if l1ptr == sptr nifclr := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OEQ, l1ptr, sptr), nil, nil) - nifclr.SetBody(clr) + nifclr.Body = clr nodes = append(nodes, nifclr) } else { nodes = append(nodes, clr.Slice()...) @@ -3094,13 +3094,13 @@ func extendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { // } // s func walkappend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node { - if !samesafeexpr(dst, n.List().First()) { - n.List().SetFirst(safeexpr(n.List().First(), init)) - n.List().SetFirst(walkexpr(n.List().First(), init)) + if !samesafeexpr(dst, n.Args.First()) { + n.Args.SetFirst(safeexpr(n.Args.First(), init)) + n.Args.SetFirst(walkexpr(n.Args.First(), init)) } - walkexprlistsafe(n.List().Slice()[1:], init) + walkexprlistsafe(n.Args.Slice()[1:], init) - nsrc := n.List().First() + nsrc := n.Args.First() // walkexprlistsafe will leave OINDEX (s[n]) alone if both s // and n are name or literal, but those may index the slice we're @@ -3108,7 +3108,7 @@ func walkappend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node { // Using cheapexpr also makes sure that the evaluation // of all arguments (and especially any panics) happen // before we begin to modify the slice in a visible way. - ls := n.List().Slice()[1:] + ls := n.Args.Slice()[1:] for i, n := range ls { n = cheapexpr(n, init) if !types.Identical(n.Type(), nsrc.Type().Elem()) { @@ -3118,7 +3118,7 @@ func walkappend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node { ls[i] = n } - argc := n.List().Len() - 1 + argc := n.Args.Len() - 1 if argc < 1 { return nsrc } @@ -3136,12 +3136,12 @@ func walkappend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node { na := nodintconst(int64(argc)) // const argc nif := ir.NewIfStmt(base.Pos, nil, nil, nil) // if cap(s) - len(s) < argc - nif.SetLeft(ir.NewBinaryExpr(base.Pos, ir.OLT, ir.NewBinaryExpr(base.Pos, ir.OSUB, ir.NewUnaryExpr(base.Pos, ir.OCAP, ns), ir.NewUnaryExpr(base.Pos, ir.OLEN, ns)), na)) + nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, ir.NewBinaryExpr(base.Pos, ir.OSUB, ir.NewUnaryExpr(base.Pos, ir.OCAP, ns), ir.NewUnaryExpr(base.Pos, ir.OLEN, ns)), na) fn := syslook("growslice") // growslice(, old []T, mincap int) (ret []T) fn = substArgTypes(fn, ns.Type().Elem(), ns.Type().Elem()) - nif.PtrBody().Set1(ir.NewAssignStmt(base.Pos, ns, mkcall1(fn, ns.Type(), nif.PtrInit(), typename(ns.Type().Elem()), ns, + nif.Body.Set1(ir.NewAssignStmt(base.Pos, ns, mkcall1(fn, ns.Type(), nif.PtrInit(), typename(ns.Type().Elem()), ns, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns), na)))) l = append(l, nif) @@ -3154,7 +3154,7 @@ func walkappend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node { slice.SetBounded(true) l = append(l, ir.NewAssignStmt(base.Pos, ns, slice)) // s = s[:n+argc] - ls = n.List().Slice()[1:] + ls = n.Args.Slice()[1:] for i, n := range ls { ix := ir.NewIndexExpr(base.Pos, ns, nn) // s[n] ... ix.SetBounded(true) @@ -3182,14 +3182,14 @@ func walkappend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node { // Also works if b is a string. // func copyany(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node { - if n.Left().Type().Elem().HasPointers() { + if n.X.Type().Elem().HasPointers() { Curfn.SetWBPos(n.Pos()) - fn := writebarrierfn("typedslicecopy", n.Left().Type().Elem(), n.Right().Type().Elem()) - n.SetLeft(cheapexpr(n.Left(), init)) - ptrL, lenL := backingArrayPtrLen(n.Left()) - n.SetRight(cheapexpr(n.Right(), init)) - ptrR, lenR := backingArrayPtrLen(n.Right()) - return mkcall1(fn, n.Type(), init, typename(n.Left().Type().Elem()), ptrL, lenL, ptrR, lenR) + fn := writebarrierfn("typedslicecopy", n.X.Type().Elem(), n.Y.Type().Elem()) + n.X = cheapexpr(n.X, init) + ptrL, lenL := backingArrayPtrLen(n.X) + n.Y = cheapexpr(n.Y, init) + ptrR, lenR := backingArrayPtrLen(n.Y) + return mkcall1(fn, n.Type(), init, typename(n.X.Type().Elem()), ptrL, lenL, ptrR, lenR) } if runtimecall { @@ -3197,24 +3197,24 @@ func copyany(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node { // copy(n.Left, n.Right) // n.Right can be a slice or string. - n.SetLeft(cheapexpr(n.Left(), init)) - ptrL, lenL := backingArrayPtrLen(n.Left()) - n.SetRight(cheapexpr(n.Right(), init)) - ptrR, lenR := backingArrayPtrLen(n.Right()) + n.X = cheapexpr(n.X, init) + ptrL, lenL := backingArrayPtrLen(n.X) + n.Y = cheapexpr(n.Y, init) + ptrR, lenR := backingArrayPtrLen(n.Y) fn := syslook("slicecopy") fn = substArgTypes(fn, ptrL.Type().Elem(), ptrR.Type().Elem()) - return mkcall1(fn, n.Type(), init, ptrL, lenL, ptrR, lenR, nodintconst(n.Left().Type().Elem().Width)) + return mkcall1(fn, n.Type(), init, ptrL, lenL, ptrR, lenR, nodintconst(n.X.Type().Elem().Width)) } - n.SetLeft(walkexpr(n.Left(), init)) - n.SetRight(walkexpr(n.Right(), init)) - nl := temp(n.Left().Type()) - nr := temp(n.Right().Type()) + n.X = walkexpr(n.X, init) + n.Y = walkexpr(n.Y, init) + nl := temp(n.X.Type()) + nr := temp(n.Y.Type()) var l []ir.Node - l = append(l, ir.NewAssignStmt(base.Pos, nl, n.Left())) - l = append(l, ir.NewAssignStmt(base.Pos, nr, n.Right())) + l = append(l, ir.NewAssignStmt(base.Pos, nl, n.X)) + l = append(l, ir.NewAssignStmt(base.Pos, nr, n.Y)) nfrm := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nr) nto := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nl) @@ -3227,23 +3227,23 @@ func copyany(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node { // if n > len(frm) { n = len(frm) } nif := ir.NewIfStmt(base.Pos, nil, nil, nil) - nif.SetLeft(ir.NewBinaryExpr(base.Pos, ir.OGT, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nr))) - nif.PtrBody().Append(ir.NewAssignStmt(base.Pos, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nr))) + nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OGT, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nr)) + nif.Body.Append(ir.NewAssignStmt(base.Pos, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nr))) l = append(l, nif) // if to.ptr != frm.ptr { memmove( ... ) } ne := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.ONE, nto, nfrm), nil, nil) - ne.SetLikely(true) + ne.Likely = true l = append(l, ne) fn := syslook("memmove") fn = substArgTypes(fn, nl.Type().Elem(), nl.Type().Elem()) nwid := ir.Node(temp(types.Types[types.TUINTPTR])) setwid := ir.NewAssignStmt(base.Pos, nwid, conv(nlen, types.Types[types.TUINTPTR])) - ne.PtrBody().Append(setwid) + ne.Body.Append(setwid) nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, nodintconst(nl.Type().Elem().Width)) call := mkcall1(fn, nil, init, nto, nfrm, nwid) - ne.PtrBody().Append(call) + ne.Body.Append(call) typecheckslice(l, ctxStmt) walkstmtlist(l) @@ -3280,26 +3280,26 @@ func eqfor(t *types.Type) (n ir.Node, needsize bool) { // The result of walkcompare MUST be assigned back to n, e.g. // n.Left = walkcompare(n.Left, init) func walkcompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { - if n.Left().Type().IsInterface() && n.Right().Type().IsInterface() && n.Left().Op() != ir.ONIL && n.Right().Op() != ir.ONIL { + if n.X.Type().IsInterface() && n.Y.Type().IsInterface() && n.X.Op() != ir.ONIL && n.Y.Op() != ir.ONIL { return walkcompareInterface(n, init) } - if n.Left().Type().IsString() && n.Right().Type().IsString() { + if n.X.Type().IsString() && n.Y.Type().IsString() { return walkcompareString(n, init) } - n.SetLeft(walkexpr(n.Left(), init)) - n.SetRight(walkexpr(n.Right(), init)) + n.X = walkexpr(n.X, init) + n.Y = walkexpr(n.Y, init) // Given mixed interface/concrete comparison, // rewrite into types-equal && data-equal. // This is efficient, avoids allocations, and avoids runtime calls. - if n.Left().Type().IsInterface() != n.Right().Type().IsInterface() { + if n.X.Type().IsInterface() != n.Y.Type().IsInterface() { // Preserve side-effects in case of short-circuiting; see #32187. - l := cheapexpr(n.Left(), init) - r := cheapexpr(n.Right(), init) + l := cheapexpr(n.X, init) + r := cheapexpr(n.Y, init) // Swap so that l is the interface value and r is the concrete value. - if n.Right().Type().IsInterface() { + if n.Y.Type().IsInterface() { l, r = r, l } @@ -3337,7 +3337,7 @@ func walkcompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { // Otherwise back end handles it. // While we're here, decide whether to // inline or call an eq alg. - t := n.Left().Type() + t := n.X.Type() var inline bool maxcmpsize := int64(4) @@ -3350,14 +3350,14 @@ func walkcompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { switch t.Kind() { default: if base.Debug.Libfuzzer != 0 && t.IsInteger() { - n.SetLeft(cheapexpr(n.Left(), init)) - n.SetRight(cheapexpr(n.Right(), init)) + n.X = cheapexpr(n.X, init) + n.Y = cheapexpr(n.Y, init) // If exactly one comparison operand is // constant, invoke the constcmp functions // instead, and arrange for the constant // operand to be the first argument. - l, r := n.Left(), n.Right() + l, r := n.X, n.Y if r.Op() == ir.OLITERAL { l, r = r, l } @@ -3403,13 +3403,13 @@ func walkcompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { inline = t.NumComponents(types.IgnoreBlankFields) <= 4 } - cmpl := n.Left() + cmpl := n.X for cmpl != nil && cmpl.Op() == ir.OCONVNOP { - cmpl = cmpl.(*ir.ConvExpr).Left() + cmpl = cmpl.(*ir.ConvExpr).X } - cmpr := n.Right() + cmpr := n.Y for cmpr != nil && cmpr.Op() == ir.OCONVNOP { - cmpr = cmpr.(*ir.ConvExpr).Left() + cmpr = cmpr.(*ir.ConvExpr).X } // Chose not to inline. Call equality function directly. @@ -3421,10 +3421,10 @@ func walkcompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { fn, needsize := eqfor(t) call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil) - call.PtrList().Append(nodAddr(cmpl)) - call.PtrList().Append(nodAddr(cmpr)) + call.Args.Append(nodAddr(cmpl)) + call.Args.Append(nodAddr(cmpr)) if needsize { - call.PtrList().Append(nodintconst(t.Width)) + call.Args.Append(nodintconst(t.Width)) } res := ir.Node(call) if n.Op() != ir.OEQ { @@ -3538,9 +3538,9 @@ func tracecmpArg(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node { } func walkcompareInterface(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { - n.SetRight(cheapexpr(n.Right(), init)) - n.SetLeft(cheapexpr(n.Left(), init)) - eqtab, eqdata := eqinterface(n.Left(), n.Right()) + n.Y = cheapexpr(n.Y, init) + n.X = cheapexpr(n.X, init) + eqtab, eqdata := eqinterface(n.X, n.Y) var cmp ir.Node if n.Op() == ir.OEQ { cmp = ir.NewLogicalExpr(base.Pos, ir.OANDAND, eqtab, eqdata) @@ -3555,21 +3555,21 @@ func walkcompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { // Rewrite comparisons to short constant strings as length+byte-wise comparisons. var cs, ncs ir.Node // const string, non-const string switch { - case ir.IsConst(n.Left(), constant.String) && ir.IsConst(n.Right(), constant.String): + case ir.IsConst(n.X, constant.String) && ir.IsConst(n.Y, constant.String): // ignore; will be constant evaluated - case ir.IsConst(n.Left(), constant.String): - cs = n.Left() - ncs = n.Right() - case ir.IsConst(n.Right(), constant.String): - cs = n.Right() - ncs = n.Left() + case ir.IsConst(n.X, constant.String): + cs = n.X + ncs = n.Y + case ir.IsConst(n.Y, constant.String): + cs = n.Y + ncs = n.X } if cs != nil { cmp := n.Op() // Our comparison below assumes that the non-constant string // is on the left hand side, so rewrite "" cmp x to x cmp "". // See issue 24817. - if ir.IsConst(n.Left(), constant.String) { + if ir.IsConst(n.X, constant.String) { cmp = brrev(cmp) } @@ -3652,9 +3652,9 @@ func walkcompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { var r ir.Node if n.Op() == ir.OEQ || n.Op() == ir.ONE { // prepare for rewrite below - n.SetLeft(cheapexpr(n.Left(), init)) - n.SetRight(cheapexpr(n.Right(), init)) - eqlen, eqmem := eqstring(n.Left(), n.Right()) + n.X = cheapexpr(n.X, init) + n.Y = cheapexpr(n.Y, init) + eqlen, eqmem := eqstring(n.X, n.Y) // quick check of len before full compare for == or !=. // memequal then tests equality up to length len. if n.Op() == ir.OEQ { @@ -3667,7 +3667,7 @@ func walkcompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { } } else { // sys_cmpstring(s1, s2) :: 0 - r = mkcall("cmpstring", types.Types[types.TINT], init, conv(n.Left(), types.Types[types.TSTRING]), conv(n.Right(), types.Types[types.TSTRING])) + r = mkcall("cmpstring", types.Types[types.TINT], init, conv(n.X, types.Types[types.TSTRING]), conv(n.Y, types.Types[types.TSTRING])) r = ir.NewBinaryExpr(base.Pos, n.Op(), r, nodintconst(0)) } @@ -3702,10 +3702,10 @@ func bounded(n ir.Node, max int64) bool { n := n.(*ir.BinaryExpr) v := int64(-1) switch { - case smallintconst(n.Left()): - v = ir.Int64Val(n.Left()) - case smallintconst(n.Right()): - v = ir.Int64Val(n.Right()) + case smallintconst(n.X): + v = ir.Int64Val(n.X) + case smallintconst(n.Y): + v = ir.Int64Val(n.Y) if n.Op() == ir.OANDNOT { v = ^v if !sign { @@ -3719,8 +3719,8 @@ func bounded(n ir.Node, max int64) bool { case ir.OMOD: n := n.(*ir.BinaryExpr) - if !sign && smallintconst(n.Right()) { - v := ir.Int64Val(n.Right()) + if !sign && smallintconst(n.Y) { + v := ir.Int64Val(n.Y) if 0 <= v && v <= max { return true } @@ -3728,8 +3728,8 @@ func bounded(n ir.Node, max int64) bool { case ir.ODIV: n := n.(*ir.BinaryExpr) - if !sign && smallintconst(n.Right()) { - v := ir.Int64Val(n.Right()) + if !sign && smallintconst(n.Y) { + v := ir.Int64Val(n.Y) for bits > 0 && v >= 2 { bits-- v >>= 1 @@ -3738,8 +3738,8 @@ func bounded(n ir.Node, max int64) bool { case ir.ORSH: n := n.(*ir.BinaryExpr) - if !sign && smallintconst(n.Right()) { - v := ir.Int64Val(n.Right()) + if !sign && smallintconst(n.Y) { + v := ir.Int64Val(n.Y) if v > int64(bits) { return true } @@ -3756,7 +3756,7 @@ func bounded(n ir.Node, max int64) bool { // usemethod checks interface method calls for uses of reflect.Type.Method. func usemethod(n *ir.CallExpr) { - t := n.Left().Type() + t := n.X.Type() // Looking for either of: // Method(int) reflect.Method @@ -3812,28 +3812,28 @@ func usefield(n *ir.SelectorExpr) { case ir.ODOT, ir.ODOTPTR: break } - if n.Sym() == nil { + if n.Sel == nil { // No field name. This DOTPTR was built by the compiler for access // to runtime data structures. Ignore. return } - t := n.Left().Type() + t := n.X.Type() if t.IsPtr() { t = t.Elem() } field := n.Selection if field == nil { - base.Fatalf("usefield %v %v without paramfld", n.Left().Type(), n.Sym()) + base.Fatalf("usefield %v %v without paramfld", n.X.Type(), n.Sel) } - if field.Sym != n.Sym() || field.Offset != n.Offset() { - base.Fatalf("field inconsistency: %v,%v != %v,%v", field.Sym, field.Offset, n.Sym(), n.Offset()) + if field.Sym != n.Sel || field.Offset != n.Offset { + base.Fatalf("field inconsistency: %v,%v != %v,%v", field.Sym, field.Offset, n.Sel, n.Offset) } if !strings.Contains(field.Note, "go:\"track\"") { return } - outer := n.Left().Type() + outer := n.X.Type() if outer.IsPtr() { outer = outer.Elem() } @@ -3918,7 +3918,7 @@ func anySideEffects(n ir.Node) bool { // Only possible side effect is division by zero. case ir.ODIV, ir.OMOD: n := n.(*ir.BinaryExpr) - if n.Right().Op() != ir.OLITERAL || constant.Sign(n.Right().Val()) == 0 { + if n.Y.Op() != ir.OLITERAL || constant.Sign(n.Y.Val()) == 0 { return true } @@ -3926,7 +3926,7 @@ func anySideEffects(n ir.Node) bool { // but many makechan and makemap use size zero, which is definitely OK. case ir.OMAKECHAN, ir.OMAKEMAP: n := n.(*ir.MakeExpr) - if !ir.IsConst(n.Left(), constant.Int) || constant.Sign(n.Left().Val()) != 0 { + if !ir.IsConst(n.Len, constant.Int) || constant.Sign(n.Len.Val()) != 0 { return true } @@ -3968,24 +3968,24 @@ func wrapCall(n *ir.CallExpr, init *ir.Nodes) ir.Node { isBuiltinCall := n.Op() != ir.OCALLFUNC && n.Op() != ir.OCALLMETH && n.Op() != ir.OCALLINTER // Turn f(a, b, []T{c, d, e}...) back into f(a, b, c, d, e). - if !isBuiltinCall && n.IsDDD() { - last := n.List().Len() - 1 - if va := n.List().Index(last); va.Op() == ir.OSLICELIT { + if !isBuiltinCall && n.IsDDD { + last := n.Args.Len() - 1 + if va := n.Args.Index(last); va.Op() == ir.OSLICELIT { va := va.(*ir.CompLitExpr) - n.PtrList().Set(append(n.List().Slice()[:last], va.List().Slice()...)) - n.SetIsDDD(false) + n.Args.Set(append(n.Args.Slice()[:last], va.List.Slice()...)) + n.IsDDD = false } } // origArgs keeps track of what argument is uintptr-unsafe/unsafe-uintptr conversion. - origArgs := make([]ir.Node, n.List().Len()) + origArgs := make([]ir.Node, n.Args.Len()) var funcArgs []*ir.Field - for i, arg := range n.List().Slice() { + for i, arg := range n.Args.Slice() { s := lookupN("a", i) - if !isBuiltinCall && arg.Op() == ir.OCONVNOP && arg.Type().IsUintptr() && arg.(*ir.ConvExpr).Left().Type().IsUnsafePtr() { + if !isBuiltinCall && arg.Op() == ir.OCONVNOP && arg.Type().IsUintptr() && arg.(*ir.ConvExpr).X.Type().IsUnsafePtr() { origArgs[i] = arg - arg = arg.(*ir.ConvExpr).Left() - n.List().SetIndex(i, arg) + arg = arg.(*ir.ConvExpr).X + n.Args.SetIndex(i, arg) } funcArgs = append(funcArgs, symfield(s, arg.Type())) } @@ -4002,20 +4002,20 @@ func wrapCall(n *ir.CallExpr, init *ir.Nodes) ir.Node { } args[i] = ir.NewConvExpr(base.Pos, origArg.Op(), origArg.Type(), args[i]) } - call := ir.NewCallExpr(base.Pos, n.Op(), n.Left(), args) + call := ir.NewCallExpr(base.Pos, n.Op(), n.X, args) if !isBuiltinCall { call.SetOp(ir.OCALL) - call.SetIsDDD(n.IsDDD()) + call.IsDDD = n.IsDDD } - fn.PtrBody().Set1(call) + fn.Body.Set1(call) funcbody() typecheckFunc(fn) - typecheckslice(fn.Body().Slice(), ctxStmt) + typecheckslice(fn.Body.Slice(), ctxStmt) Target.Decls = append(Target.Decls, fn) - call = ir.NewCallExpr(base.Pos, ir.OCALL, fn.Nname, n.List().Slice()) + call = ir.NewCallExpr(base.Pos, ir.OCALL, fn.Nname, n.Args.Slice()) return walkexpr(typecheck(call, ctxStmt), init) } @@ -4055,7 +4055,7 @@ func canMergeLoads() bool { // isRuneCount reports whether n is of the form len([]rune(string)). // These are optimized into a call to runtime.countrunes. func isRuneCount(n ir.Node) bool { - return base.Flag.N == 0 && !instrumenting && n.Op() == ir.OLEN && n.(*ir.UnaryExpr).Left().Op() == ir.OSTR2RUNES + return base.Flag.N == 0 && !instrumenting && n.Op() == ir.OLEN && n.(*ir.UnaryExpr).X.Op() == ir.OSTR2RUNES } func walkCheckPtrAlignment(n *ir.ConvExpr, init *ir.Nodes, count ir.Node) ir.Node { @@ -4079,8 +4079,8 @@ func walkCheckPtrAlignment(n *ir.ConvExpr, init *ir.Nodes, count ir.Node) ir.Nod count = nodintconst(1) } - n.SetLeft(cheapexpr(n.Left(), init)) - init.Append(mkcall("checkptrAlignment", nil, init, convnop(n.Left(), types.Types[types.TUNSAFEPTR]), typename(elem), conv(count, types.Types[types.TUINTPTR]))) + n.X = cheapexpr(n.X, init) + init.Append(mkcall("checkptrAlignment", nil, init, convnop(n.X, types.Types[types.TUNSAFEPTR]), typename(elem), conv(count, types.Types[types.TUINTPTR]))) return n } @@ -4102,12 +4102,12 @@ func walkCheckPtrArithmetic(n *ir.ConvExpr, init *ir.Nodes) ir.Node { // TODO(mdempsky): Make stricter. We only need to exempt // reflect.Value.Pointer and reflect.Value.UnsafeAddr. - switch n.Left().Op() { + switch n.X.Op() { case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER: return n } - if n.Left().Op() == ir.ODOTPTR && isReflectHeaderDataField(n.Left()) { + if n.X.Op() == ir.ODOTPTR && isReflectHeaderDataField(n.X) { return n } @@ -4123,20 +4123,20 @@ func walkCheckPtrArithmetic(n *ir.ConvExpr, init *ir.Nodes) ir.Node { switch n.Op() { case ir.OADD: n := n.(*ir.BinaryExpr) - walk(n.Left()) - walk(n.Right()) + walk(n.X) + walk(n.Y) case ir.OSUB, ir.OANDNOT: n := n.(*ir.BinaryExpr) - walk(n.Left()) + walk(n.X) case ir.OCONVNOP: n := n.(*ir.ConvExpr) - if n.Left().Type().IsUnsafePtr() { - n.SetLeft(cheapexpr(n.Left(), init)) - originals = append(originals, convnop(n.Left(), types.Types[types.TUNSAFEPTR])) + if n.X.Type().IsUnsafePtr() { + n.X = cheapexpr(n.X, init) + originals = append(originals, convnop(n.X, types.Types[types.TUNSAFEPTR])) } } } - walk(n.Left()) + walk(n.X) cheap := cheapexpr(n, init) diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 5937798bd4be2..63ccaa6550859 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -89,7 +89,7 @@ func toNtype(x Node) Ntype { // An AddStringExpr is a string concatenation Expr[0] + Exprs[1] + ... + Expr[len(Expr)-1]. type AddStringExpr struct { miniExpr - List_ Nodes + List Nodes Prealloc *Name } @@ -97,14 +97,10 @@ func NewAddStringExpr(pos src.XPos, list []Node) *AddStringExpr { n := &AddStringExpr{} n.pos = pos n.op = OADDSTR - n.List_.Set(list) + n.List.Set(list) return n } -func (n *AddStringExpr) List() Nodes { return n.List_ } -func (n *AddStringExpr) PtrList() *Nodes { return &n.List_ } -func (n *AddStringExpr) SetList(x Nodes) { n.List_ = x } - // An AddrExpr is an address-of expression &X. // It may end up being a normal address-of or an allocation of a composite literal. type AddrExpr struct { @@ -120,10 +116,6 @@ func NewAddrExpr(pos src.XPos, x Node) *AddrExpr { return n } -func (n *AddrExpr) Left() Node { return n.X } -func (n *AddrExpr) SetLeft(x Node) { n.X = x } -func (n *AddrExpr) Right() Node { return n.Alloc } -func (n *AddrExpr) SetRight(x Node) { n.Alloc = x } func (n *AddrExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 } func (n *AddrExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) } @@ -170,11 +162,6 @@ func NewBinaryExpr(pos src.XPos, op Op, x, y Node) *BinaryExpr { return n } -func (n *BinaryExpr) Left() Node { return n.X } -func (n *BinaryExpr) SetLeft(x Node) { n.X = x } -func (n *BinaryExpr) Right() Node { return n.Y } -func (n *BinaryExpr) SetRight(y Node) { n.Y = y } - func (n *BinaryExpr) SetOp(op Op) { switch op { default: @@ -201,14 +188,14 @@ const ( // A CallExpr is a function call X(Args). type CallExpr struct { miniExpr - orig Node - X Node - Args Nodes - Rargs Nodes // TODO(rsc): Delete. - Body_ Nodes // TODO(rsc): Delete. - DDD bool - Use CallUse - NoInline_ bool + orig Node + X Node + Args Nodes + Rargs Nodes // TODO(rsc): Delete. + Body Nodes // TODO(rsc): Delete. + IsDDD bool + Use CallUse + NoInline bool } func NewCallExpr(pos src.XPos, op Op, fun Node, args []Node) *CallExpr { @@ -222,23 +209,8 @@ func NewCallExpr(pos src.XPos, op Op, fun Node, args []Node) *CallExpr { func (*CallExpr) isStmt() {} -func (n *CallExpr) Orig() Node { return n.orig } -func (n *CallExpr) SetOrig(x Node) { n.orig = x } -func (n *CallExpr) Left() Node { return n.X } -func (n *CallExpr) SetLeft(x Node) { n.X = x } -func (n *CallExpr) List() Nodes { return n.Args } -func (n *CallExpr) PtrList() *Nodes { return &n.Args } -func (n *CallExpr) SetList(x Nodes) { n.Args = x } -func (n *CallExpr) Rlist() Nodes { return n.Rargs } -func (n *CallExpr) PtrRlist() *Nodes { return &n.Rargs } -func (n *CallExpr) SetRlist(x Nodes) { n.Rargs = x } -func (n *CallExpr) IsDDD() bool { return n.DDD } -func (n *CallExpr) SetIsDDD(x bool) { n.DDD = x } -func (n *CallExpr) NoInline() bool { return n.NoInline_ } -func (n *CallExpr) SetNoInline(x bool) { n.NoInline_ = x } -func (n *CallExpr) Body() Nodes { return n.Body_ } -func (n *CallExpr) PtrBody() *Nodes { return &n.Body_ } -func (n *CallExpr) SetBody(x Nodes) { n.Body_ = x } +func (n *CallExpr) Orig() Node { return n.orig } +func (n *CallExpr) SetOrig(x Node) { n.orig = x } func (n *CallExpr) SetOp(op Op) { switch op { @@ -253,65 +225,57 @@ func (n *CallExpr) SetOp(op Op) { // A CallPartExpr is a method expression X.Method (uncalled). type CallPartExpr struct { miniExpr - Func_ *Func + Func *Func X Node Method *types.Field Prealloc *Name } func NewCallPartExpr(pos src.XPos, x Node, method *types.Field, fn *Func) *CallPartExpr { - n := &CallPartExpr{Func_: fn, X: x, Method: method} + n := &CallPartExpr{Func: fn, X: x, Method: method} n.op = OCALLPART n.pos = pos n.typ = fn.Type() - n.Func_ = fn + n.Func = fn return n } -func (n *CallPartExpr) Func() *Func { return n.Func_ } -func (n *CallPartExpr) Left() Node { return n.X } func (n *CallPartExpr) Sym() *types.Sym { return n.Method.Sym } -func (n *CallPartExpr) SetLeft(x Node) { n.X = x } // A ClosureExpr is a function literal expression. type ClosureExpr struct { miniExpr - Func_ *Func + Func *Func Prealloc *Name } func NewClosureExpr(pos src.XPos, fn *Func) *ClosureExpr { - n := &ClosureExpr{Func_: fn} + n := &ClosureExpr{Func: fn} n.op = OCLOSURE n.pos = pos return n } -func (n *ClosureExpr) Func() *Func { return n.Func_ } - // A ClosureRead denotes reading a variable stored within a closure struct. type ClosureReadExpr struct { miniExpr - Offset_ int64 + Offset int64 } func NewClosureRead(typ *types.Type, offset int64) *ClosureReadExpr { - n := &ClosureReadExpr{Offset_: offset} + n := &ClosureReadExpr{Offset: offset} n.typ = typ n.op = OCLOSUREREAD return n } -func (n *ClosureReadExpr) Type() *types.Type { return n.typ } -func (n *ClosureReadExpr) Offset() int64 { return n.Offset_ } - // A CompLitExpr is a composite literal Type{Vals}. // Before type-checking, the type is Ntype. type CompLitExpr struct { miniExpr orig Node Ntype Ntype - List_ Nodes // initialized values + List Nodes // initialized values Prealloc *Name Len int64 // backing array length for OSLICELIT } @@ -320,18 +284,13 @@ func NewCompLitExpr(pos src.XPos, op Op, typ Ntype, list []Node) *CompLitExpr { n := &CompLitExpr{Ntype: typ} n.pos = pos n.SetOp(op) - n.List_.Set(list) + n.List.Set(list) n.orig = n return n } func (n *CompLitExpr) Orig() Node { return n.orig } func (n *CompLitExpr) SetOrig(x Node) { n.orig = x } -func (n *CompLitExpr) Right() Node { return n.Ntype } -func (n *CompLitExpr) SetRight(x Node) { n.Ntype = toNtype(x) } -func (n *CompLitExpr) List() Nodes { return n.List_ } -func (n *CompLitExpr) PtrList() *Nodes { return &n.List_ } -func (n *CompLitExpr) SetList(x Nodes) { n.List_ = x } func (n *CompLitExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 } func (n *CompLitExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) } @@ -380,8 +339,6 @@ func NewConvExpr(pos src.XPos, op Op, typ *types.Type, x Node) *ConvExpr { return n } -func (n *ConvExpr) Left() Node { return n.X } -func (n *ConvExpr) SetLeft(x Node) { n.X = x } func (n *ConvExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 } func (n *ConvExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) } @@ -409,13 +366,6 @@ func NewIndexExpr(pos src.XPos, x, index Node) *IndexExpr { return n } -func (n *IndexExpr) Left() Node { return n.X } -func (n *IndexExpr) SetLeft(x Node) { n.X = x } -func (n *IndexExpr) Right() Node { return n.Index } -func (n *IndexExpr) SetRight(y Node) { n.Index = y } -func (n *IndexExpr) IndexMapLValue() bool { return n.Assigned } -func (n *IndexExpr) SetIndexMapLValue(x bool) { n.Assigned = x } - func (n *IndexExpr) SetOp(op Op) { switch op { default: @@ -439,38 +389,28 @@ func NewKeyExpr(pos src.XPos, key, value Node) *KeyExpr { return n } -func (n *KeyExpr) Left() Node { return n.Key } -func (n *KeyExpr) SetLeft(x Node) { n.Key = x } -func (n *KeyExpr) Right() Node { return n.Value } -func (n *KeyExpr) SetRight(y Node) { n.Value = y } - // A StructKeyExpr is an Field: Value composite literal key. type StructKeyExpr struct { miniExpr - Field *types.Sym - Value Node - Offset_ int64 + Field *types.Sym + Value Node + Offset int64 } func NewStructKeyExpr(pos src.XPos, field *types.Sym, value Node) *StructKeyExpr { n := &StructKeyExpr{Field: field, Value: value} n.pos = pos n.op = OSTRUCTKEY - n.Offset_ = types.BADWIDTH + n.Offset = types.BADWIDTH return n } -func (n *StructKeyExpr) Sym() *types.Sym { return n.Field } -func (n *StructKeyExpr) SetSym(x *types.Sym) { n.Field = x } -func (n *StructKeyExpr) Left() Node { return n.Value } -func (n *StructKeyExpr) SetLeft(x Node) { n.Value = x } -func (n *StructKeyExpr) Offset() int64 { return n.Offset_ } -func (n *StructKeyExpr) SetOffset(x int64) { n.Offset_ = x } +func (n *StructKeyExpr) Sym() *types.Sym { return n.Field } // An InlinedCallExpr is an inlined function call. type InlinedCallExpr struct { miniExpr - Body_ Nodes + Body Nodes ReturnVars Nodes } @@ -478,18 +418,11 @@ func NewInlinedCallExpr(pos src.XPos, body, retvars []Node) *InlinedCallExpr { n := &InlinedCallExpr{} n.pos = pos n.op = OINLCALL - n.Body_.Set(body) + n.Body.Set(body) n.ReturnVars.Set(retvars) return n } -func (n *InlinedCallExpr) Body() Nodes { return n.Body_ } -func (n *InlinedCallExpr) PtrBody() *Nodes { return &n.Body_ } -func (n *InlinedCallExpr) SetBody(x Nodes) { n.Body_ = x } -func (n *InlinedCallExpr) Rlist() Nodes { return n.ReturnVars } -func (n *InlinedCallExpr) PtrRlist() *Nodes { return &n.ReturnVars } -func (n *InlinedCallExpr) SetRlist(x Nodes) { n.ReturnVars = x } - // A LogicalExpr is a expression X Op Y where Op is && or ||. // It is separate from BinaryExpr to make room for statements // that must be executed before Y but after X. @@ -506,11 +439,6 @@ func NewLogicalExpr(pos src.XPos, op Op, x, y Node) *LogicalExpr { return n } -func (n *LogicalExpr) Left() Node { return n.X } -func (n *LogicalExpr) SetLeft(x Node) { n.X = x } -func (n *LogicalExpr) Right() Node { return n.Y } -func (n *LogicalExpr) SetRight(y Node) { n.Y = y } - func (n *LogicalExpr) SetOp(op Op) { switch op { default: @@ -536,11 +464,6 @@ func NewMakeExpr(pos src.XPos, op Op, len, cap Node) *MakeExpr { return n } -func (n *MakeExpr) Left() Node { return n.Len } -func (n *MakeExpr) SetLeft(x Node) { n.Len = x } -func (n *MakeExpr) Right() Node { return n.Cap } -func (n *MakeExpr) SetRight(x Node) { n.Cap = x } - func (n *MakeExpr) SetOp(op Op) { switch op { default: @@ -565,16 +488,8 @@ func NewMethodExpr(pos src.XPos, t *types.Type, method *types.Field) *MethodExpr return n } -func (n *MethodExpr) FuncName() *Name { return n.FuncName_ } -func (n *MethodExpr) Left() Node { panic("MethodExpr.Left") } -func (n *MethodExpr) SetLeft(x Node) { panic("MethodExpr.SetLeft") } -func (n *MethodExpr) Right() Node { panic("MethodExpr.Right") } -func (n *MethodExpr) SetRight(x Node) { panic("MethodExpr.SetRight") } -func (n *MethodExpr) Sym() *types.Sym { panic("MethodExpr.Sym") } -func (n *MethodExpr) Offset() int64 { panic("MethodExpr.Offset") } -func (n *MethodExpr) SetOffset(x int64) { panic("MethodExpr.SetOffset") } -func (n *MethodExpr) Class() Class { panic("MethodExpr.Class") } -func (n *MethodExpr) SetClass(x Class) { panic("MethodExpr.SetClass") } +func (n *MethodExpr) FuncName() *Name { return n.FuncName_ } +func (n *MethodExpr) Sym() *types.Sym { panic("MethodExpr.Sym") } // A NilExpr represents the predefined untyped constant nil. // (It may be copied and assigned a type, though.) @@ -607,8 +522,6 @@ func NewParenExpr(pos src.XPos, x Node) *ParenExpr { return n } -func (n *ParenExpr) Left() Node { return n.X } -func (n *ParenExpr) SetLeft(x Node) { n.X = x } func (n *ParenExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 } func (n *ParenExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) } @@ -625,20 +538,17 @@ func (n *ParenExpr) SetOTYPE(t *types.Type) { // A ResultExpr represents a direct access to a result slot on the stack frame. type ResultExpr struct { miniExpr - Offset_ int64 + Offset int64 } func NewResultExpr(pos src.XPos, typ *types.Type, offset int64) *ResultExpr { - n := &ResultExpr{Offset_: offset} + n := &ResultExpr{Offset: offset} n.pos = pos n.op = ORESULT n.typ = typ return n } -func (n *ResultExpr) Offset() int64 { return n.Offset_ } -func (n *ResultExpr) SetOffset(x int64) { n.Offset_ = x } - // A NameOffsetExpr refers to an offset within a variable. // It is like a SelectorExpr but without the field name. type NameOffsetExpr struct { @@ -659,14 +569,14 @@ type SelectorExpr struct { miniExpr X Node Sel *types.Sym - Offset_ int64 + Offset int64 Selection *types.Field } func NewSelectorExpr(pos src.XPos, op Op, x Node, sel *types.Sym) *SelectorExpr { n := &SelectorExpr{X: x, Sel: sel} n.pos = pos - n.Offset_ = types.BADWIDTH + n.Offset = types.BADWIDTH n.SetOp(op) return n } @@ -680,14 +590,9 @@ func (n *SelectorExpr) SetOp(op Op) { } } -func (n *SelectorExpr) Left() Node { return n.X } -func (n *SelectorExpr) SetLeft(x Node) { n.X = x } -func (n *SelectorExpr) Sym() *types.Sym { return n.Sel } -func (n *SelectorExpr) SetSym(x *types.Sym) { n.Sel = x } -func (n *SelectorExpr) Offset() int64 { return n.Offset_ } -func (n *SelectorExpr) SetOffset(x int64) { n.Offset_ = x } -func (n *SelectorExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 } -func (n *SelectorExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) } +func (n *SelectorExpr) Sym() *types.Sym { return n.Sel } +func (n *SelectorExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 } +func (n *SelectorExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) } // Before type-checking, bytes.Buffer is a SelectorExpr. // After type-checking it becomes a Name. @@ -696,8 +601,8 @@ func (*SelectorExpr) CanBeNtype() {} // A SliceExpr is a slice expression X[Low:High] or X[Low:High:Max]. type SliceExpr struct { miniExpr - X Node - List_ Nodes // TODO(rsc): Use separate Nodes + X Node + List Nodes // TODO(rsc): Use separate Nodes } func NewSliceExpr(pos src.XPos, op Op, x Node) *SliceExpr { @@ -707,12 +612,6 @@ func NewSliceExpr(pos src.XPos, op Op, x Node) *SliceExpr { return n } -func (n *SliceExpr) Left() Node { return n.X } -func (n *SliceExpr) SetLeft(x Node) { n.X = x } -func (n *SliceExpr) List() Nodes { return n.List_ } -func (n *SliceExpr) PtrList() *Nodes { return &n.List_ } -func (n *SliceExpr) SetList(x Nodes) { n.List_ = x } - func (n *SliceExpr) SetOp(op Op) { switch op { default: @@ -725,16 +624,16 @@ func (n *SliceExpr) SetOp(op Op) { // SliceBounds returns n's slice bounds: low, high, and max in expr[low:high:max]. // n must be a slice expression. max is nil if n is a simple slice expression. func (n *SliceExpr) SliceBounds() (low, high, max Node) { - if n.List_.Len() == 0 { + if n.List.Len() == 0 { return nil, nil, nil } switch n.Op() { case OSLICE, OSLICEARR, OSLICESTR: - s := n.List_.Slice() + s := n.List.Slice() return s[0], s[1], nil case OSLICE3, OSLICE3ARR: - s := n.List_.Slice() + s := n.List.Slice() return s[0], s[1], s[2] } base.Fatalf("SliceBounds op %v: %v", n.Op(), n) @@ -749,24 +648,24 @@ func (n *SliceExpr) SetSliceBounds(low, high, max Node) { if max != nil { base.Fatalf("SetSliceBounds %v given three bounds", n.Op()) } - s := n.List_.Slice() + s := n.List.Slice() if s == nil { if low == nil && high == nil { return } - n.List_.Set2(low, high) + n.List.Set2(low, high) return } s[0] = low s[1] = high return case OSLICE3, OSLICE3ARR: - s := n.List_.Slice() + s := n.List.Slice() if s == nil { if low == nil && high == nil && max == nil { return } - n.List_.Set3(low, high, max) + n.List.Set3(low, high, max) return } s[0] = low @@ -793,8 +692,8 @@ func (o Op) IsSlice3() bool { // A SliceHeader expression constructs a slice header from its parts. type SliceHeaderExpr struct { miniExpr - Ptr Node - LenCap_ Nodes // TODO(rsc): Split into two Node fields + Ptr Node + LenCap Nodes // TODO(rsc): Split into two Node fields } func NewSliceHeaderExpr(pos src.XPos, typ *types.Type, ptr, len, cap Node) *SliceHeaderExpr { @@ -802,16 +701,10 @@ func NewSliceHeaderExpr(pos src.XPos, typ *types.Type, ptr, len, cap Node) *Slic n.pos = pos n.op = OSLICEHEADER n.typ = typ - n.LenCap_.Set2(len, cap) + n.LenCap.Set2(len, cap) return n } -func (n *SliceHeaderExpr) Left() Node { return n.Ptr } -func (n *SliceHeaderExpr) SetLeft(x Node) { n.Ptr = x } -func (n *SliceHeaderExpr) List() Nodes { return n.LenCap_ } -func (n *SliceHeaderExpr) PtrList() *Nodes { return &n.LenCap_ } -func (n *SliceHeaderExpr) SetList(x Nodes) { n.LenCap_ = x } - // A StarExpr is a dereference expression *X. // It may end up being a value or a type. type StarExpr struct { @@ -826,8 +719,6 @@ func NewStarExpr(pos src.XPos, x Node) *StarExpr { return n } -func (n *StarExpr) Left() Node { return n.X } -func (n *StarExpr) SetLeft(x Node) { n.X = x } func (n *StarExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 } func (n *StarExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) } @@ -858,14 +749,6 @@ func NewTypeAssertExpr(pos src.XPos, x Node, typ Ntype) *TypeAssertExpr { return n } -func (n *TypeAssertExpr) Left() Node { return n.X } -func (n *TypeAssertExpr) SetLeft(x Node) { n.X = x } -func (n *TypeAssertExpr) Right() Node { return n.Ntype } -func (n *TypeAssertExpr) SetRight(x Node) { n.Ntype = x } // TODO: toNtype(x) -func (n *TypeAssertExpr) List() Nodes { return n.Itab } -func (n *TypeAssertExpr) PtrList() *Nodes { return &n.Itab } -func (n *TypeAssertExpr) SetList(x Nodes) { n.Itab = x } - func (n *TypeAssertExpr) SetOp(op Op) { switch op { default: @@ -889,9 +772,6 @@ func NewUnaryExpr(pos src.XPos, op Op, x Node) *UnaryExpr { return n } -func (n *UnaryExpr) Left() Node { return n.X } -func (n *UnaryExpr) SetLeft(x Node) { n.X = x } - func (n *UnaryExpr) SetOp(op Op) { switch op { default: diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index 76bb35f971f65..49c4ac9a8d372 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -332,75 +332,75 @@ func stmtFmt(n Node, s fmt.State) { switch n.Op() { case ODCL: n := n.(*Decl) - fmt.Fprintf(s, "var %v %v", n.Left().Sym(), n.Left().Type()) + fmt.Fprintf(s, "var %v %v", n.X.Sym(), n.X.Type()) // Don't export "v = " initializing statements, hope they're always // preceded by the DCL which will be re-parsed and typechecked to reproduce // the "v = " again. case OAS: n := n.(*AssignStmt) - if n.Colas() && !complexinit { - fmt.Fprintf(s, "%v := %v", n.Left(), n.Right()) + if n.Def && !complexinit { + fmt.Fprintf(s, "%v := %v", n.X, n.Y) } else { - fmt.Fprintf(s, "%v = %v", n.Left(), n.Right()) + fmt.Fprintf(s, "%v = %v", n.X, n.Y) } case OASOP: n := n.(*AssignOpStmt) - if n.Implicit() { - if n.SubOp() == OADD { - fmt.Fprintf(s, "%v++", n.Left()) + if n.IncDec { + if n.AsOp == OADD { + fmt.Fprintf(s, "%v++", n.X) } else { - fmt.Fprintf(s, "%v--", n.Left()) + fmt.Fprintf(s, "%v--", n.X) } break } - fmt.Fprintf(s, "%v %v= %v", n.Left(), n.SubOp(), n.Right()) + fmt.Fprintf(s, "%v %v= %v", n.X, n.AsOp, n.Y) case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV: n := n.(*AssignListStmt) - if n.Colas() && !complexinit { - fmt.Fprintf(s, "%.v := %.v", n.List(), n.Rlist()) + if n.Def && !complexinit { + fmt.Fprintf(s, "%.v := %.v", n.Lhs, n.Rhs) } else { - fmt.Fprintf(s, "%.v = %.v", n.List(), n.Rlist()) + fmt.Fprintf(s, "%.v = %.v", n.Lhs, n.Rhs) } case OBLOCK: n := n.(*BlockStmt) - if n.List().Len() != 0 { - fmt.Fprintf(s, "%v", n.List()) + if n.List.Len() != 0 { + fmt.Fprintf(s, "%v", n.List) } case ORETURN: n := n.(*ReturnStmt) - fmt.Fprintf(s, "return %.v", n.List()) + fmt.Fprintf(s, "return %.v", n.Results) case ORETJMP: n := n.(*BranchStmt) - fmt.Fprintf(s, "retjmp %v", n.Sym()) + fmt.Fprintf(s, "retjmp %v", n.Label) case OINLMARK: n := n.(*InlineMarkStmt) - fmt.Fprintf(s, "inlmark %d", n.Offset()) + fmt.Fprintf(s, "inlmark %d", n.Index) case OGO: n := n.(*GoDeferStmt) - fmt.Fprintf(s, "go %v", n.Left()) + fmt.Fprintf(s, "go %v", n.Call) case ODEFER: n := n.(*GoDeferStmt) - fmt.Fprintf(s, "defer %v", n.Left()) + fmt.Fprintf(s, "defer %v", n.Call) case OIF: n := n.(*IfStmt) if simpleinit { - fmt.Fprintf(s, "if %v; %v { %v }", n.Init().First(), n.Left(), n.Body()) + fmt.Fprintf(s, "if %v; %v { %v }", n.Init().First(), n.Cond, n.Body) } else { - fmt.Fprintf(s, "if %v { %v }", n.Left(), n.Body()) + fmt.Fprintf(s, "if %v { %v }", n.Cond, n.Body) } - if n.Rlist().Len() != 0 { - fmt.Fprintf(s, " else { %v }", n.Rlist()) + if n.Else.Len() != 0 { + fmt.Fprintf(s, " else { %v }", n.Else) } case OFOR, OFORUNTIL: @@ -417,25 +417,25 @@ func stmtFmt(n Node, s fmt.State) { fmt.Fprint(s, opname) if simpleinit { fmt.Fprintf(s, " %v;", n.Init().First()) - } else if n.Right() != nil { + } else if n.Post != nil { fmt.Fprint(s, " ;") } - if n.Left() != nil { - fmt.Fprintf(s, " %v", n.Left()) + if n.Cond != nil { + fmt.Fprintf(s, " %v", n.Cond) } - if n.Right() != nil { - fmt.Fprintf(s, "; %v", n.Right()) + if n.Post != nil { + fmt.Fprintf(s, "; %v", n.Post) } else if simpleinit { fmt.Fprint(s, ";") } - if n.Op() == OFORUNTIL && n.List().Len() != 0 { - fmt.Fprintf(s, "; %v", n.List()) + if n.Op() == OFORUNTIL && n.Late.Len() != 0 { + fmt.Fprintf(s, "; %v", n.Late) } - fmt.Fprintf(s, " { %v }", n.Body()) + fmt.Fprintf(s, " { %v }", n.Body) case ORANGE: n := n.(*RangeStmt) @@ -444,12 +444,12 @@ func stmtFmt(n Node, s fmt.State) { break } - if n.List().Len() == 0 { - fmt.Fprintf(s, "for range %v { %v }", n.Right(), n.Body()) + if n.Vars.Len() == 0 { + fmt.Fprintf(s, "for range %v { %v }", n.X, n.Body) break } - fmt.Fprintf(s, "for %.v = range %v { %v }", n.List(), n.Right(), n.Body()) + fmt.Fprintf(s, "for %.v = range %v { %v }", n.Vars, n.X, n.Body) case OSELECT: n := n.(*SelectStmt) @@ -457,7 +457,7 @@ func stmtFmt(n Node, s fmt.State) { fmt.Fprintf(s, "%v statement", n.Op()) break } - fmt.Fprintf(s, "select { %v }", n.List()) + fmt.Fprintf(s, "select { %v }", n.Cases) case OSWITCH: n := n.(*SwitchStmt) @@ -469,31 +469,31 @@ func stmtFmt(n Node, s fmt.State) { if simpleinit { fmt.Fprintf(s, " %v;", n.Init().First()) } - if n.Left() != nil { - fmt.Fprintf(s, " %v ", n.Left()) + if n.Tag != nil { + fmt.Fprintf(s, " %v ", n.Tag) } - fmt.Fprintf(s, " { %v }", n.List()) + fmt.Fprintf(s, " { %v }", n.Cases) case OCASE: n := n.(*CaseStmt) - if n.List().Len() != 0 { - fmt.Fprintf(s, "case %.v", n.List()) + if n.List.Len() != 0 { + fmt.Fprintf(s, "case %.v", n.List) } else { fmt.Fprint(s, "default") } - fmt.Fprintf(s, ": %v", n.Body()) + fmt.Fprintf(s, ": %v", n.Body) case OBREAK, OCONTINUE, OGOTO, OFALL: n := n.(*BranchStmt) - if n.Sym() != nil { - fmt.Fprintf(s, "%v %v", n.Op(), n.Sym()) + if n.Label != nil { + fmt.Fprintf(s, "%v %v", n.Op(), n.Label) } else { fmt.Fprintf(s, "%v", n.Op()) } case OLABEL: n := n.(*LabelStmt) - fmt.Fprintf(s, "%v: ", n.Sym()) + fmt.Fprintf(s, "%v: ", n.Label) } if extrablock { @@ -527,19 +527,19 @@ func exprFmt(n Node, s fmt.State, prec int) { case OADDR: nn := nn.(*AddrExpr) if nn.Implicit() { - n = nn.Left() + n = nn.X continue } case ODEREF: nn := nn.(*StarExpr) if nn.Implicit() { - n = nn.Left() + n = nn.X continue } case OCONV, OCONVNOP, OCONVIFACE: nn := nn.(*ConvExpr) if nn.Implicit() { - n = nn.Left() + n = nn.X continue } } @@ -560,7 +560,7 @@ func exprFmt(n Node, s fmt.State, prec int) { switch n.Op() { case OPAREN: n := n.(*ParenExpr) - fmt.Fprintf(s, "(%v)", n.Left()) + fmt.Fprintf(s, "(%v)", n.X) case ONIL: fmt.Fprint(s, "nil") @@ -694,7 +694,7 @@ func exprFmt(n Node, s fmt.State, prec int) { fmt.Fprint(s, "func literal") return } - fmt.Fprintf(s, "%v { %v }", n.Type(), n.Func().Body()) + fmt.Fprintf(s, "%v { %v }", n.Type(), n.Func.Body) case OCOMPLIT: n := n.(*CompLitExpr) @@ -703,84 +703,84 @@ func exprFmt(n Node, s fmt.State, prec int) { fmt.Fprintf(s, "... argument") return } - if n.Right() != nil { - fmt.Fprintf(s, "%v{%s}", n.Right(), ellipsisIf(n.List().Len() != 0)) + if n.Ntype != nil { + fmt.Fprintf(s, "%v{%s}", n.Ntype, ellipsisIf(n.List.Len() != 0)) return } fmt.Fprint(s, "composite literal") return } - fmt.Fprintf(s, "(%v{ %.v })", n.Right(), n.List()) + fmt.Fprintf(s, "(%v{ %.v })", n.Ntype, n.List) case OPTRLIT: n := n.(*AddrExpr) - fmt.Fprintf(s, "&%v", n.Left()) + fmt.Fprintf(s, "&%v", n.X) case OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT: n := n.(*CompLitExpr) if !exportFormat { - fmt.Fprintf(s, "%v{%s}", n.Type(), ellipsisIf(n.List().Len() != 0)) + fmt.Fprintf(s, "%v{%s}", n.Type(), ellipsisIf(n.List.Len() != 0)) return } - fmt.Fprintf(s, "(%v{ %.v })", n.Type(), n.List()) + fmt.Fprintf(s, "(%v{ %.v })", n.Type(), n.List) case OKEY: n := n.(*KeyExpr) - if n.Left() != nil && n.Right() != nil { - fmt.Fprintf(s, "%v:%v", n.Left(), n.Right()) + if n.Key != nil && n.Value != nil { + fmt.Fprintf(s, "%v:%v", n.Key, n.Value) return } - if n.Left() == nil && n.Right() != nil { - fmt.Fprintf(s, ":%v", n.Right()) + if n.Key == nil && n.Value != nil { + fmt.Fprintf(s, ":%v", n.Value) return } - if n.Left() != nil && n.Right() == nil { - fmt.Fprintf(s, "%v:", n.Left()) + if n.Key != nil && n.Value == nil { + fmt.Fprintf(s, "%v:", n.Key) return } fmt.Fprint(s, ":") case OSTRUCTKEY: n := n.(*StructKeyExpr) - fmt.Fprintf(s, "%v:%v", n.Sym(), n.Left()) + fmt.Fprintf(s, "%v:%v", n.Field, n.Value) case OCALLPART: n := n.(*CallPartExpr) - exprFmt(n.Left(), s, nprec) - if n.Sym() == nil { + exprFmt(n.X, s, nprec) + if n.Method.Sym == nil { fmt.Fprint(s, ".") return } - fmt.Fprintf(s, ".%s", types.SymMethodName(n.Sym())) + fmt.Fprintf(s, ".%s", types.SymMethodName(n.Method.Sym)) case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH: n := n.(*SelectorExpr) - exprFmt(n.Left(), s, nprec) - if n.Sym() == nil { + exprFmt(n.X, s, nprec) + if n.Sel == nil { fmt.Fprint(s, ".") return } - fmt.Fprintf(s, ".%s", types.SymMethodName(n.Sym())) + fmt.Fprintf(s, ".%s", types.SymMethodName(n.Sel)) case ODOTTYPE, ODOTTYPE2: n := n.(*TypeAssertExpr) - exprFmt(n.Left(), s, nprec) - if n.Right() != nil { - fmt.Fprintf(s, ".(%v)", n.Right()) + exprFmt(n.X, s, nprec) + if n.Ntype != nil { + fmt.Fprintf(s, ".(%v)", n.Ntype) return } fmt.Fprintf(s, ".(%v)", n.Type()) case OINDEX, OINDEXMAP: n := n.(*IndexExpr) - exprFmt(n.Left(), s, nprec) - fmt.Fprintf(s, "[%v]", n.Right()) + exprFmt(n.X, s, nprec) + fmt.Fprintf(s, "[%v]", n.Index) case OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR: n := n.(*SliceExpr) - exprFmt(n.Left(), s, nprec) + exprFmt(n.X, s, nprec) fmt.Fprint(s, "[") low, high, max := n.SliceBounds() if low != nil { @@ -800,14 +800,14 @@ func exprFmt(n Node, s fmt.State, prec int) { case OSLICEHEADER: n := n.(*SliceHeaderExpr) - if n.List().Len() != 2 { - base.Fatalf("bad OSLICEHEADER list length %d", n.List().Len()) + if n.LenCap.Len() != 2 { + base.Fatalf("bad OSLICEHEADER list length %d", n.LenCap.Len()) } - fmt.Fprintf(s, "sliceheader{%v,%v,%v}", n.Left(), n.List().First(), n.List().Second()) + fmt.Fprintf(s, "sliceheader{%v,%v,%v}", n.Ptr, n.LenCap.First(), n.LenCap.Second()) case OCOMPLEX, OCOPY: n := n.(*BinaryExpr) - fmt.Fprintf(s, "%v(%v, %v)", n.Op(), n.Left(), n.Right()) + fmt.Fprintf(s, "%v(%v, %v)", n.Op(), n.X, n.Y) case OCONV, OCONVIFACE, @@ -823,7 +823,7 @@ func exprFmt(n Node, s fmt.State, prec int) { } else { fmt.Fprintf(s, "%v", n.Type()) } - fmt.Fprintf(s, "(%v)", n.Left()) + fmt.Fprintf(s, "(%v)", n.X) case OREAL, OIMAG, @@ -836,7 +836,7 @@ func exprFmt(n Node, s fmt.State, prec int) { OOFFSETOF, OSIZEOF: n := n.(*UnaryExpr) - fmt.Fprintf(s, "%v(%v)", n.Op(), n.Left()) + fmt.Fprintf(s, "%v(%v)", n.Op(), n.X) case OAPPEND, ODELETE, @@ -845,58 +845,58 @@ func exprFmt(n Node, s fmt.State, prec int) { OPRINT, OPRINTN: n := n.(*CallExpr) - if n.IsDDD() { - fmt.Fprintf(s, "%v(%.v...)", n.Op(), n.List()) + if n.IsDDD { + fmt.Fprintf(s, "%v(%.v...)", n.Op(), n.Args) return } - fmt.Fprintf(s, "%v(%.v)", n.Op(), n.List()) + fmt.Fprintf(s, "%v(%.v)", n.Op(), n.Args) case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, OGETG: n := n.(*CallExpr) - exprFmt(n.Left(), s, nprec) - if n.IsDDD() { - fmt.Fprintf(s, "(%.v...)", n.List()) + exprFmt(n.X, s, nprec) + if n.IsDDD { + fmt.Fprintf(s, "(%.v...)", n.Args) return } - fmt.Fprintf(s, "(%.v)", n.List()) + fmt.Fprintf(s, "(%.v)", n.Args) case OMAKEMAP, OMAKECHAN, OMAKESLICE: n := n.(*MakeExpr) - if n.Right() != nil { - fmt.Fprintf(s, "make(%v, %v, %v)", n.Type(), n.Left(), n.Right()) + if n.Cap != nil { + fmt.Fprintf(s, "make(%v, %v, %v)", n.Type(), n.Len, n.Cap) return } - if n.Left() != nil && (n.Op() == OMAKESLICE || !n.Left().Type().IsUntyped()) { - fmt.Fprintf(s, "make(%v, %v)", n.Type(), n.Left()) + if n.Len != nil && (n.Op() == OMAKESLICE || !n.Len.Type().IsUntyped()) { + fmt.Fprintf(s, "make(%v, %v)", n.Type(), n.Len) return } fmt.Fprintf(s, "make(%v)", n.Type()) case OMAKESLICECOPY: n := n.(*MakeExpr) - fmt.Fprintf(s, "makeslicecopy(%v, %v, %v)", n.Type(), n.Left(), n.Right()) + fmt.Fprintf(s, "makeslicecopy(%v, %v, %v)", n.Type(), n.Len, n.Cap) case OPLUS, ONEG, OBITNOT, ONOT, ORECV: // Unary n := n.(*UnaryExpr) fmt.Fprintf(s, "%v", n.Op()) - if n.Left() != nil && n.Left().Op() == n.Op() { + if n.X != nil && n.X.Op() == n.Op() { fmt.Fprint(s, " ") } - exprFmt(n.Left(), s, nprec+1) + exprFmt(n.X, s, nprec+1) case OADDR: n := n.(*AddrExpr) fmt.Fprintf(s, "%v", n.Op()) - if n.Left() != nil && n.Left().Op() == n.Op() { + if n.X != nil && n.X.Op() == n.Op() { fmt.Fprint(s, " ") } - exprFmt(n.Left(), s, nprec+1) + exprFmt(n.X, s, nprec+1) case ODEREF: n := n.(*StarExpr) fmt.Fprintf(s, "%v", n.Op()) - exprFmt(n.Left(), s, nprec+1) + exprFmt(n.X, s, nprec+1) // Binary case OADD, @@ -917,26 +917,26 @@ func exprFmt(n Node, s fmt.State, prec int) { OSUB, OXOR: n := n.(*BinaryExpr) - exprFmt(n.Left(), s, nprec) + exprFmt(n.X, s, nprec) fmt.Fprintf(s, " %v ", n.Op()) - exprFmt(n.Right(), s, nprec+1) + exprFmt(n.Y, s, nprec+1) case OANDAND, OOROR: n := n.(*LogicalExpr) - exprFmt(n.Left(), s, nprec) + exprFmt(n.X, s, nprec) fmt.Fprintf(s, " %v ", n.Op()) - exprFmt(n.Right(), s, nprec+1) + exprFmt(n.Y, s, nprec+1) case OSEND: n := n.(*SendStmt) - exprFmt(n.Left(), s, nprec) + exprFmt(n.Chan, s, nprec) fmt.Fprintf(s, " <- ") - exprFmt(n.Right(), s, nprec+1) + exprFmt(n.Value, s, nprec+1) case OADDSTR: n := n.(*AddStringExpr) - for i, n1 := range n.List().Slice() { + for i, n1 := range n.List.Slice() { if i != 0 { fmt.Fprint(s, " + ") } @@ -1098,7 +1098,7 @@ func dumpNodeHeader(w io.Writer, n Node) { if n.Op() == OCLOSURE { n := n.(*ClosureExpr) - if fn := n.Func(); fn != nil && fn.Nname.Sym() != nil { + if fn := n.Func; fn != nil && fn.Nname.Sym() != nil { fmt.Fprintf(w, " fnName(%+v)", fn.Nname.Sym()) } } @@ -1169,7 +1169,7 @@ func dumpNode(w io.Writer, n Node, depth int) { case OASOP: n := n.(*AssignOpStmt) - fmt.Fprintf(w, "%+v-%+v", n.Op(), n.SubOp()) + fmt.Fprintf(w, "%+v-%+v", n.Op(), n.AsOp) dumpNodeHeader(w, n) case OTYPE: @@ -1192,18 +1192,18 @@ func dumpNode(w io.Writer, n Node, depth int) { n := n.(*Func) fmt.Fprintf(w, "%+v", n.Op()) dumpNodeHeader(w, n) - fn := n.Func() + fn := n if len(fn.Dcl) > 0 { indent(w, depth) fmt.Fprintf(w, "%+v-Dcl", n.Op()) - for _, dcl := range n.Func().Dcl { + for _, dcl := range n.Dcl { dumpNode(w, dcl, depth+1) } } - if fn.Body().Len() > 0 { + if fn.Body.Len() > 0 { indent(w, depth) fmt.Fprintf(w, "%+v-body", n.Op()) - dumpNodes(w, fn.Body(), depth+1) + dumpNodes(w, fn.Body, depth+1) } return } diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index 62ac5791d1a2a..57837e9e6b84d 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -49,9 +49,9 @@ import ( // pointer from the Func back to the OCALLPART. type Func struct { miniNode - typ *types.Type - Body_ Nodes - iota int64 + typ *types.Type + Body Nodes + Iota int64 Nname *Name // ONAME node OClosure *ClosureExpr // OCLOSURE node @@ -110,20 +110,14 @@ func NewFunc(pos src.XPos) *Func { f := new(Func) f.pos = pos f.op = ODCLFUNC - f.iota = -1 + f.Iota = -1 return f } func (f *Func) isStmt() {} -func (f *Func) Func() *Func { return f } -func (f *Func) Body() Nodes { return f.Body_ } -func (f *Func) PtrBody() *Nodes { return &f.Body_ } -func (f *Func) SetBody(x Nodes) { f.Body_ = x } func (f *Func) Type() *types.Type { return f.typ } func (f *Func) SetType(x *types.Type) { f.typ = x } -func (f *Func) Iota() int64 { return f.iota } -func (f *Func) SetIota(x int64) { f.iota = x } func (f *Func) Sym() *types.Sym { if f.Nname != nil { @@ -218,11 +212,11 @@ func FuncName(n Node) string { case *Func: f = n case *Name: - f = n.Func() + f = n.Func case *CallPartExpr: - f = n.Func() + f = n.Func case *ClosureExpr: - f = n.Func() + f = n.Func } if f == nil || f.Nname == nil { return "" @@ -245,9 +239,9 @@ func PkgFuncName(n Node) string { var f *Func switch n := n.(type) { case *CallPartExpr: - f = n.Func() + f = n.Func case *ClosureExpr: - f = n.Func() + f = n.Func case *Func: f = n } diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 64c60b93d8f57..770f8119e0eab 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -39,7 +39,7 @@ type Name struct { flags bitset16 pragma PragmaFlag // int16 sym *types.Sym - fn *Func + Func *Func Offset_ int64 val constant.Value orig Node @@ -225,8 +225,7 @@ func (n *Name) SubOp() Op { return n.BuiltinOp } func (n *Name) SetSubOp(x Op) { n.BuiltinOp = x } func (n *Name) Class() Class { return n.Class_ } func (n *Name) SetClass(x Class) { n.Class_ = x } -func (n *Name) Func() *Func { return n.fn } -func (n *Name) SetFunc(x *Func) { n.fn = x } +func (n *Name) SetFunc(x *Func) { n.Func = x } func (n *Name) Offset() int64 { panic("Name.Offset") } func (n *Name) SetOffset(x int64) { if x != 0 { diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index a5959ea26f39e..89b1c0ba23a0f 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -8,18 +8,18 @@ func (n *AddStringExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *AddStringExpr) copy() Node { c := *n c.init = c.init.Copy() - c.List_ = c.List_.Copy() + c.List = c.List.Copy() return &c } func (n *AddStringExpr) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) - err = maybeDoList(n.List_, err, do) + err = maybeDoList(n.List, err, do) return err } func (n *AddStringExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) - editList(n.List_, edit) + editList(n.List, edit) } func (n *AddrExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } @@ -154,18 +154,18 @@ func (n *BlockStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *BlockStmt) copy() Node { c := *n c.init = c.init.Copy() - c.List_ = c.List_.Copy() + c.List = c.List.Copy() return &c } func (n *BlockStmt) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) - err = maybeDoList(n.List_, err, do) + err = maybeDoList(n.List, err, do) return err } func (n *BlockStmt) editChildren(edit func(Node) Node) { editList(n.init, edit) - editList(n.List_, edit) + editList(n.List, edit) } func (n *BranchStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } @@ -189,7 +189,7 @@ func (n *CallExpr) copy() Node { c.init = c.init.Copy() c.Args = c.Args.Copy() c.Rargs = c.Rargs.Copy() - c.Body_ = c.Body_.Copy() + c.Body = c.Body.Copy() return &c } func (n *CallExpr) doChildren(do func(Node) error) error { @@ -198,7 +198,7 @@ func (n *CallExpr) doChildren(do func(Node) error) error { err = maybeDo(n.X, err, do) err = maybeDoList(n.Args, err, do) err = maybeDoList(n.Rargs, err, do) - err = maybeDoList(n.Body_, err, do) + err = maybeDoList(n.Body, err, do) return err } func (n *CallExpr) editChildren(edit func(Node) Node) { @@ -206,7 +206,7 @@ func (n *CallExpr) editChildren(edit func(Node) Node) { n.X = maybeEdit(n.X, edit) editList(n.Args, edit) editList(n.Rargs, edit) - editList(n.Body_, edit) + editList(n.Body, edit) } func (n *CallPartExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } @@ -231,25 +231,25 @@ func (n *CaseStmt) copy() Node { c := *n c.init = c.init.Copy() c.Vars = c.Vars.Copy() - c.List_ = c.List_.Copy() - c.Body_ = c.Body_.Copy() + c.List = c.List.Copy() + c.Body = c.Body.Copy() return &c } func (n *CaseStmt) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) err = maybeDoList(n.Vars, err, do) - err = maybeDoList(n.List_, err, do) + err = maybeDoList(n.List, err, do) err = maybeDo(n.Comm, err, do) - err = maybeDoList(n.Body_, err, do) + err = maybeDoList(n.Body, err, do) return err } func (n *CaseStmt) editChildren(edit func(Node) Node) { editList(n.init, edit) editList(n.Vars, edit) - editList(n.List_, edit) + editList(n.List, edit) n.Comm = maybeEdit(n.Comm, edit) - editList(n.Body_, edit) + editList(n.Body, edit) } func (n *ChanType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } @@ -300,20 +300,20 @@ func (n *CompLitExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *CompLitExpr) copy() Node { c := *n c.init = c.init.Copy() - c.List_ = c.List_.Copy() + c.List = c.List.Copy() return &c } func (n *CompLitExpr) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) err = maybeDo(n.Ntype, err, do) - err = maybeDoList(n.List_, err, do) + err = maybeDoList(n.List, err, do) return err } func (n *CompLitExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) n.Ntype = toNtype(maybeEdit(n.Ntype, edit)) - editList(n.List_, edit) + editList(n.List, edit) } func (n *ConstExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } @@ -367,7 +367,7 @@ func (n *ForStmt) copy() Node { c := *n c.init = c.init.Copy() c.Late = c.Late.Copy() - c.Body_ = c.Body_.Copy() + c.Body = c.Body.Copy() return &c } func (n *ForStmt) doChildren(do func(Node) error) error { @@ -376,7 +376,7 @@ func (n *ForStmt) doChildren(do func(Node) error) error { err = maybeDo(n.Cond, err, do) err = maybeDoList(n.Late, err, do) err = maybeDo(n.Post, err, do) - err = maybeDoList(n.Body_, err, do) + err = maybeDoList(n.Body, err, do) return err } func (n *ForStmt) editChildren(edit func(Node) Node) { @@ -384,22 +384,22 @@ func (n *ForStmt) editChildren(edit func(Node) Node) { n.Cond = maybeEdit(n.Cond, edit) editList(n.Late, edit) n.Post = maybeEdit(n.Post, edit) - editList(n.Body_, edit) + editList(n.Body, edit) } func (n *Func) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *Func) copy() Node { c := *n - c.Body_ = c.Body_.Copy() + c.Body = c.Body.Copy() return &c } func (n *Func) doChildren(do func(Node) error) error { var err error - err = maybeDoList(n.Body_, err, do) + err = maybeDoList(n.Body, err, do) return err } func (n *Func) editChildren(edit func(Node) Node) { - editList(n.Body_, edit) + editList(n.Body, edit) } func (n *FuncType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } @@ -461,7 +461,7 @@ func (n *IfStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *IfStmt) copy() Node { c := *n c.init = c.init.Copy() - c.Body_ = c.Body_.Copy() + c.Body = c.Body.Copy() c.Else = c.Else.Copy() return &c } @@ -469,14 +469,14 @@ func (n *IfStmt) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) err = maybeDo(n.Cond, err, do) - err = maybeDoList(n.Body_, err, do) + err = maybeDoList(n.Body, err, do) err = maybeDoList(n.Else, err, do) return err } func (n *IfStmt) editChildren(edit func(Node) Node) { editList(n.init, edit) n.Cond = maybeEdit(n.Cond, edit) - editList(n.Body_, edit) + editList(n.Body, edit) editList(n.Else, edit) } @@ -518,20 +518,20 @@ func (n *InlinedCallExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *InlinedCallExpr) copy() Node { c := *n c.init = c.init.Copy() - c.Body_ = c.Body_.Copy() + c.Body = c.Body.Copy() c.ReturnVars = c.ReturnVars.Copy() return &c } func (n *InlinedCallExpr) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) - err = maybeDoList(n.Body_, err, do) + err = maybeDoList(n.Body, err, do) err = maybeDoList(n.ReturnVars, err, do) return err } func (n *InlinedCallExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) - editList(n.Body_, edit) + editList(n.Body, edit) editList(n.ReturnVars, edit) } @@ -726,7 +726,7 @@ func (n *RangeStmt) copy() Node { c := *n c.init = c.init.Copy() c.Vars = c.Vars.Copy() - c.Body_ = c.Body_.Copy() + c.Body = c.Body.Copy() return &c } func (n *RangeStmt) doChildren(do func(Node) error) error { @@ -734,14 +734,14 @@ func (n *RangeStmt) doChildren(do func(Node) error) error { err = maybeDoList(n.init, err, do) err = maybeDoList(n.Vars, err, do) err = maybeDo(n.X, err, do) - err = maybeDoList(n.Body_, err, do) + err = maybeDoList(n.Body, err, do) return err } func (n *RangeStmt) editChildren(edit func(Node) Node) { editList(n.init, edit) editList(n.Vars, edit) n.X = maybeEdit(n.X, edit) - editList(n.Body_, edit) + editList(n.Body, edit) } func (n *ResultExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } @@ -838,40 +838,40 @@ func (n *SliceExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *SliceExpr) copy() Node { c := *n c.init = c.init.Copy() - c.List_ = c.List_.Copy() + c.List = c.List.Copy() return &c } func (n *SliceExpr) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) err = maybeDo(n.X, err, do) - err = maybeDoList(n.List_, err, do) + err = maybeDoList(n.List, err, do) return err } func (n *SliceExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) n.X = maybeEdit(n.X, edit) - editList(n.List_, edit) + editList(n.List, edit) } func (n *SliceHeaderExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *SliceHeaderExpr) copy() Node { c := *n c.init = c.init.Copy() - c.LenCap_ = c.LenCap_.Copy() + c.LenCap = c.LenCap.Copy() return &c } func (n *SliceHeaderExpr) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) err = maybeDo(n.Ptr, err, do) - err = maybeDoList(n.LenCap_, err, do) + err = maybeDoList(n.LenCap, err, do) return err } func (n *SliceHeaderExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) n.Ptr = maybeEdit(n.Ptr, edit) - editList(n.LenCap_, edit) + editList(n.LenCap, edit) } func (n *SliceType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index e2543a554149f..ad6db436a7f6d 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -30,9 +30,6 @@ func NewDecl(pos src.XPos, op Op, x Node) *Decl { func (*Decl) isStmt() {} -func (n *Decl) Left() Node { return n.X } -func (n *Decl) SetLeft(x Node) { n.X = x } - // A Stmt is a Node that can appear as a statement. // This includes statement-like expressions such as f(). // @@ -78,15 +75,6 @@ func NewAssignListStmt(pos src.XPos, op Op, lhs, rhs []Node) *AssignListStmt { return n } -func (n *AssignListStmt) List() Nodes { return n.Lhs } -func (n *AssignListStmt) PtrList() *Nodes { return &n.Lhs } -func (n *AssignListStmt) SetList(x Nodes) { n.Lhs = x } -func (n *AssignListStmt) Rlist() Nodes { return n.Rhs } -func (n *AssignListStmt) PtrRlist() *Nodes { return &n.Rhs } -func (n *AssignListStmt) SetRlist(x Nodes) { n.Rhs = x } -func (n *AssignListStmt) Colas() bool { return n.Def } -func (n *AssignListStmt) SetColas(x bool) { n.Def = x } - func (n *AssignListStmt) SetOp(op Op) { switch op { default: @@ -112,13 +100,6 @@ func NewAssignStmt(pos src.XPos, x, y Node) *AssignStmt { return n } -func (n *AssignStmt) Left() Node { return n.X } -func (n *AssignStmt) SetLeft(x Node) { n.X = x } -func (n *AssignStmt) Right() Node { return n.Y } -func (n *AssignStmt) SetRight(y Node) { n.Y = y } -func (n *AssignStmt) Colas() bool { return n.Def } -func (n *AssignStmt) SetColas(x bool) { n.Def = x } - func (n *AssignStmt) SetOp(op Op) { switch op { default: @@ -145,21 +126,13 @@ func NewAssignOpStmt(pos src.XPos, asOp Op, x, y Node) *AssignOpStmt { return n } -func (n *AssignOpStmt) Left() Node { return n.X } -func (n *AssignOpStmt) SetLeft(x Node) { n.X = x } -func (n *AssignOpStmt) Right() Node { return n.Y } -func (n *AssignOpStmt) SetRight(y Node) { n.Y = y } -func (n *AssignOpStmt) SubOp() Op { return n.AsOp } -func (n *AssignOpStmt) SetSubOp(x Op) { n.AsOp = x } -func (n *AssignOpStmt) Implicit() bool { return n.IncDec } -func (n *AssignOpStmt) SetImplicit(b bool) { n.IncDec = b } func (n *AssignOpStmt) Type() *types.Type { return n.typ } func (n *AssignOpStmt) SetType(x *types.Type) { n.typ = x } // A BlockStmt is a block: { List }. type BlockStmt struct { miniStmt - List_ Nodes + List Nodes } func NewBlockStmt(pos src.XPos, list []Node) *BlockStmt { @@ -172,14 +145,10 @@ func NewBlockStmt(pos src.XPos, list []Node) *BlockStmt { } } n.op = OBLOCK - n.List_.Set(list) + n.List.Set(list) return n } -func (n *BlockStmt) List() Nodes { return n.List_ } -func (n *BlockStmt) PtrList() *Nodes { return &n.List_ } -func (n *BlockStmt) SetList(x Nodes) { n.List_ = x } - // A BranchStmt is a break, continue, fallthrough, or goto statement. // // For back-end code generation, Op may also be RETJMP (return+jump), @@ -202,49 +171,36 @@ func NewBranchStmt(pos src.XPos, op Op, label *types.Sym) *BranchStmt { return n } -func (n *BranchStmt) Sym() *types.Sym { return n.Label } -func (n *BranchStmt) SetSym(sym *types.Sym) { n.Label = sym } +func (n *BranchStmt) Sym() *types.Sym { return n.Label } // A CaseStmt is a case statement in a switch or select: case List: Body. type CaseStmt struct { miniStmt - Vars Nodes // declared variable for this case in type switch - List_ Nodes // list of expressions for switch, early select - Comm Node // communication case (Exprs[0]) after select is type-checked - Body_ Nodes + Vars Nodes // declared variable for this case in type switch + List Nodes // list of expressions for switch, early select + Comm Node // communication case (Exprs[0]) after select is type-checked + Body Nodes } func NewCaseStmt(pos src.XPos, list, body []Node) *CaseStmt { n := &CaseStmt{} n.pos = pos n.op = OCASE - n.List_.Set(list) - n.Body_.Set(body) + n.List.Set(list) + n.Body.Set(body) return n } -func (n *CaseStmt) List() Nodes { return n.List_ } -func (n *CaseStmt) PtrList() *Nodes { return &n.List_ } -func (n *CaseStmt) SetList(x Nodes) { n.List_ = x } -func (n *CaseStmt) Body() Nodes { return n.Body_ } -func (n *CaseStmt) PtrBody() *Nodes { return &n.Body_ } -func (n *CaseStmt) SetBody(x Nodes) { n.Body_ = x } -func (n *CaseStmt) Rlist() Nodes { return n.Vars } -func (n *CaseStmt) PtrRlist() *Nodes { return &n.Vars } -func (n *CaseStmt) SetRlist(x Nodes) { n.Vars = x } -func (n *CaseStmt) Left() Node { return n.Comm } -func (n *CaseStmt) SetLeft(x Node) { n.Comm = x } - // A ForStmt is a non-range for loop: for Init; Cond; Post { Body } // Op can be OFOR or OFORUNTIL (!Cond). type ForStmt struct { miniStmt - Label *types.Sym - Cond Node - Late Nodes - Post Node - Body_ Nodes - HasBreak_ bool + Label *types.Sym + Cond Node + Late Nodes + Post Node + Body Nodes + HasBreak bool } func NewForStmt(pos src.XPos, init []Node, cond, post Node, body []Node) *ForStmt { @@ -252,25 +208,10 @@ func NewForStmt(pos src.XPos, init []Node, cond, post Node, body []Node) *ForStm n.pos = pos n.op = OFOR n.init.Set(init) - n.Body_.Set(body) + n.Body.Set(body) return n } -func (n *ForStmt) Sym() *types.Sym { return n.Label } -func (n *ForStmt) SetSym(x *types.Sym) { n.Label = x } -func (n *ForStmt) Left() Node { return n.Cond } -func (n *ForStmt) SetLeft(x Node) { n.Cond = x } -func (n *ForStmt) Right() Node { return n.Post } -func (n *ForStmt) SetRight(x Node) { n.Post = x } -func (n *ForStmt) Body() Nodes { return n.Body_ } -func (n *ForStmt) PtrBody() *Nodes { return &n.Body_ } -func (n *ForStmt) SetBody(x Nodes) { n.Body_ = x } -func (n *ForStmt) List() Nodes { return n.Late } -func (n *ForStmt) PtrList() *Nodes { return &n.Late } -func (n *ForStmt) SetList(x Nodes) { n.Late = x } -func (n *ForStmt) HasBreak() bool { return n.HasBreak_ } -func (n *ForStmt) SetHasBreak(b bool) { n.HasBreak_ = b } - func (n *ForStmt) SetOp(op Op) { if op != OFOR && op != OFORUNTIL { panic(n.no("SetOp " + op.String())) @@ -300,38 +241,24 @@ func NewGoDeferStmt(pos src.XPos, op Op, call Node) *GoDeferStmt { return n } -func (n *GoDeferStmt) Left() Node { return n.Call } -func (n *GoDeferStmt) SetLeft(x Node) { n.Call = x } - // A IfStmt is a return statement: if Init; Cond { Then } else { Else }. type IfStmt struct { miniStmt - Cond Node - Body_ Nodes - Else Nodes - Likely_ bool // code layout hint + Cond Node + Body Nodes + Else Nodes + Likely bool // code layout hint } func NewIfStmt(pos src.XPos, cond Node, body, els []Node) *IfStmt { n := &IfStmt{Cond: cond} n.pos = pos n.op = OIF - n.Body_.Set(body) + n.Body.Set(body) n.Else.Set(els) return n } -func (n *IfStmt) Left() Node { return n.Cond } -func (n *IfStmt) SetLeft(x Node) { n.Cond = x } -func (n *IfStmt) Body() Nodes { return n.Body_ } -func (n *IfStmt) PtrBody() *Nodes { return &n.Body_ } -func (n *IfStmt) SetBody(x Nodes) { n.Body_ = x } -func (n *IfStmt) Rlist() Nodes { return n.Else } -func (n *IfStmt) PtrRlist() *Nodes { return &n.Else } -func (n *IfStmt) SetRlist(x Nodes) { n.Else = x } -func (n *IfStmt) Likely() bool { return n.Likely_ } -func (n *IfStmt) SetLikely(x bool) { n.Likely_ = x } - // An InlineMarkStmt is a marker placed just before an inlined body. type InlineMarkStmt struct { miniStmt @@ -361,21 +288,20 @@ func NewLabelStmt(pos src.XPos, label *types.Sym) *LabelStmt { return n } -func (n *LabelStmt) Sym() *types.Sym { return n.Label } -func (n *LabelStmt) SetSym(x *types.Sym) { n.Label = x } +func (n *LabelStmt) Sym() *types.Sym { return n.Label } // A RangeStmt is a range loop: for Vars = range X { Stmts } // Op can be OFOR or OFORUNTIL (!Cond). type RangeStmt struct { miniStmt - Label *types.Sym - Vars Nodes // TODO(rsc): Replace with Key, Value Node - Def bool - X Node - Body_ Nodes - HasBreak_ bool - typ *types.Type // TODO(rsc): Remove - use X.Type() instead - Prealloc *Name + Label *types.Sym + Vars Nodes // TODO(rsc): Replace with Key, Value Node + Def bool + X Node + Body Nodes + HasBreak bool + typ *types.Type // TODO(rsc): Remove - use X.Type() instead + Prealloc *Name } func NewRangeStmt(pos src.XPos, vars []Node, x Node, body []Node) *RangeStmt { @@ -383,24 +309,10 @@ func NewRangeStmt(pos src.XPos, vars []Node, x Node, body []Node) *RangeStmt { n.pos = pos n.op = ORANGE n.Vars.Set(vars) - n.Body_.Set(body) + n.Body.Set(body) return n } -func (n *RangeStmt) Sym() *types.Sym { return n.Label } -func (n *RangeStmt) SetSym(x *types.Sym) { n.Label = x } -func (n *RangeStmt) Right() Node { return n.X } -func (n *RangeStmt) SetRight(x Node) { n.X = x } -func (n *RangeStmt) Body() Nodes { return n.Body_ } -func (n *RangeStmt) PtrBody() *Nodes { return &n.Body_ } -func (n *RangeStmt) SetBody(x Nodes) { n.Body_ = x } -func (n *RangeStmt) List() Nodes { return n.Vars } -func (n *RangeStmt) PtrList() *Nodes { return &n.Vars } -func (n *RangeStmt) SetList(x Nodes) { n.Vars = x } -func (n *RangeStmt) HasBreak() bool { return n.HasBreak_ } -func (n *RangeStmt) SetHasBreak(b bool) { n.HasBreak_ = b } -func (n *RangeStmt) Colas() bool { return n.Def } -func (n *RangeStmt) SetColas(b bool) { n.Def = b } func (n *RangeStmt) Type() *types.Type { return n.typ } func (n *RangeStmt) SetType(x *types.Type) { n.typ = x } @@ -420,19 +332,15 @@ func NewReturnStmt(pos src.XPos, results []Node) *ReturnStmt { return n } -func (n *ReturnStmt) Orig() Node { return n.orig } -func (n *ReturnStmt) SetOrig(x Node) { n.orig = x } -func (n *ReturnStmt) List() Nodes { return n.Results } -func (n *ReturnStmt) PtrList() *Nodes { return &n.Results } -func (n *ReturnStmt) SetList(x Nodes) { n.Results = x } -func (n *ReturnStmt) IsDDD() bool { return false } // typecheckargs asks +func (n *ReturnStmt) Orig() Node { return n.orig } +func (n *ReturnStmt) SetOrig(x Node) { n.orig = x } // A SelectStmt is a block: { Cases }. type SelectStmt struct { miniStmt - Label *types.Sym - Cases Nodes - HasBreak_ bool + Label *types.Sym + Cases Nodes + HasBreak bool // TODO(rsc): Instead of recording here, replace with a block? Compiled Nodes // compiled form, after walkswitch @@ -446,17 +354,6 @@ func NewSelectStmt(pos src.XPos, cases []Node) *SelectStmt { return n } -func (n *SelectStmt) List() Nodes { return n.Cases } -func (n *SelectStmt) PtrList() *Nodes { return &n.Cases } -func (n *SelectStmt) SetList(x Nodes) { n.Cases = x } -func (n *SelectStmt) Sym() *types.Sym { return n.Label } -func (n *SelectStmt) SetSym(x *types.Sym) { n.Label = x } -func (n *SelectStmt) HasBreak() bool { return n.HasBreak_ } -func (n *SelectStmt) SetHasBreak(x bool) { n.HasBreak_ = x } -func (n *SelectStmt) Body() Nodes { return n.Compiled } -func (n *SelectStmt) PtrBody() *Nodes { return &n.Compiled } -func (n *SelectStmt) SetBody(x Nodes) { n.Compiled = x } - // A SendStmt is a send statement: X <- Y. type SendStmt struct { miniStmt @@ -471,18 +368,13 @@ func NewSendStmt(pos src.XPos, ch, value Node) *SendStmt { return n } -func (n *SendStmt) Left() Node { return n.Chan } -func (n *SendStmt) SetLeft(x Node) { n.Chan = x } -func (n *SendStmt) Right() Node { return n.Value } -func (n *SendStmt) SetRight(y Node) { n.Value = y } - // A SwitchStmt is a switch statement: switch Init; Expr { Cases }. type SwitchStmt struct { miniStmt - Tag Node - Cases Nodes // list of *CaseStmt - Label *types.Sym - HasBreak_ bool + Tag Node + Cases Nodes // list of *CaseStmt + Label *types.Sym + HasBreak bool // TODO(rsc): Instead of recording here, replace with a block? Compiled Nodes // compiled form, after walkswitch @@ -496,19 +388,6 @@ func NewSwitchStmt(pos src.XPos, tag Node, cases []Node) *SwitchStmt { return n } -func (n *SwitchStmt) Left() Node { return n.Tag } -func (n *SwitchStmt) SetLeft(x Node) { n.Tag = x } -func (n *SwitchStmt) List() Nodes { return n.Cases } -func (n *SwitchStmt) PtrList() *Nodes { return &n.Cases } -func (n *SwitchStmt) SetList(x Nodes) { n.Cases = x } -func (n *SwitchStmt) Body() Nodes { return n.Compiled } -func (n *SwitchStmt) PtrBody() *Nodes { return &n.Compiled } -func (n *SwitchStmt) SetBody(x Nodes) { n.Compiled = x } -func (n *SwitchStmt) Sym() *types.Sym { return n.Label } -func (n *SwitchStmt) SetSym(x *types.Sym) { n.Label = x } -func (n *SwitchStmt) HasBreak() bool { return n.HasBreak_ } -func (n *SwitchStmt) SetHasBreak(x bool) { n.HasBreak_ = x } - // A TypeSwitchGuard is the [Name :=] X.(type) in a type switch. type TypeSwitchGuard struct { miniNode @@ -523,19 +402,3 @@ func NewTypeSwitchGuard(pos src.XPos, tag *Ident, x Node) *TypeSwitchGuard { n.op = OTYPESW return n } - -func (n *TypeSwitchGuard) Left() Node { - if n.Tag == nil { - return nil - } - return n.Tag -} -func (n *TypeSwitchGuard) SetLeft(x Node) { - if x == nil { - n.Tag = nil - return - } - n.Tag = x.(*Ident) -} -func (n *TypeSwitchGuard) Right() Node { return n.X } -func (n *TypeSwitchGuard) SetRight(x Node) { n.X = x } From 440308ffd7061e0eb386a9a8469575528b41dcd4 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 23 Dec 2020 00:03:33 -0500 Subject: [PATCH 221/474] [dev.regabi] cmd/compile: simplify Nodes usage [generated] Now that Nodes is a slice, most of the methods can be removed in favor of direct slice operations, reducing the new API that must be understood to: Copy Take Append Prepend Format Passes buildall w/ toolstash -cmp. [git-generate] cd src/cmd/compile/internal/ir rf ' ex . ../gc { var ns Nodes var pns *Nodes var n, n2, n3 Node var i int var slice []Node ns.Len() -> len(ns) ns.Slice() -> ns ns.First() -> ns[0] ns.Second() -> ns[1] ns.Index(i) -> ns[i] ns.Addr(i) -> &ns[i] ns.SetIndex(i, n) -> ns[i] = n ns.SetFirst(n) -> ns[0] = n ns.SetSecond(n) -> ns[1] = n ns.Set1(n) -> ns = []Node{n} ns.Set2(n, n2) -> ns = []Node{n, n2} ns.Set3(n, n2, n3) -> ns = []Node{n, n2, n3} ns.Set1(n) -> ns = []Node{n} ns.Set2(n, n2) -> ns = []Node{n, n2} ns.Set3(n, n2, n3) -> ns = []Node{n, n2, n3} AsNodes(slice) -> Nodes(slice) ns.AppendNodes(pns) -> ns.Append(pns.Take()...) ns.MoveNodes(pns) -> ns = pns.Take() } rm \ Nodes.Len Nodes.Slice \ Nodes.First Nodes.Second Nodes.Index Nodes.Addr \ Nodes.SetIndex Nodes.SetFirst Nodes.SetSecond \ Nodes.Set1 Nodes.Set2 Nodes.Set3 \ AsNodes \ Nodes.AppendNodes Nodes.MoveNodes ' Change-Id: Iee86434ced52e67861c3fa71bdd6d994a8cba735 Reviewed-on: https://go-review.googlesource.com/c/go/+/277936 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/alg.go | 4 +- src/cmd/compile/internal/gc/closure.go | 10 +- src/cmd/compile/internal/gc/const.go | 2 +- src/cmd/compile/internal/gc/dcl.go | 4 +- src/cmd/compile/internal/gc/escape.go | 54 ++--- src/cmd/compile/internal/gc/gsubr.go | 4 +- src/cmd/compile/internal/gc/iexport.go | 22 +- src/cmd/compile/internal/gc/iimport.go | 10 +- src/cmd/compile/internal/gc/init.go | 4 +- src/cmd/compile/internal/gc/initorder.go | 4 +- src/cmd/compile/internal/gc/inl.go | 62 +++--- src/cmd/compile/internal/gc/noder.go | 24 +-- src/cmd/compile/internal/gc/order.go | 96 ++++----- src/cmd/compile/internal/gc/pgen.go | 4 +- src/cmd/compile/internal/gc/range.go | 80 ++++---- src/cmd/compile/internal/gc/select.go | 84 ++++---- src/cmd/compile/internal/gc/sinit.go | 34 +-- src/cmd/compile/internal/gc/ssa.go | 56 ++--- src/cmd/compile/internal/gc/subr.go | 16 +- src/cmd/compile/internal/gc/swt.go | 82 ++++---- src/cmd/compile/internal/gc/typecheck.go | 194 +++++++++--------- src/cmd/compile/internal/gc/walk.go | 250 +++++++++++------------ src/cmd/compile/internal/ir/dump.go | 2 +- src/cmd/compile/internal/ir/expr.go | 16 +- src/cmd/compile/internal/ir/fmt.go | 46 ++--- src/cmd/compile/internal/ir/node.go | 92 +-------- src/cmd/compile/internal/ir/visit.go | 10 +- 27 files changed, 588 insertions(+), 678 deletions(-) diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index bb2717a8b5cef..49ce14b0261e9 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -386,7 +386,7 @@ func genhash(t *types.Type) *obj.LSym { typecheckFunc(fn) Curfn = fn - typecheckslice(fn.Body.Slice(), ctxStmt) + typecheckslice(fn.Body, ctxStmt) Curfn = nil if base.Debug.DclStack != 0 { @@ -762,7 +762,7 @@ func geneq(t *types.Type) *obj.LSym { typecheckFunc(fn) Curfn = fn - typecheckslice(fn.Body.Slice(), ctxStmt) + typecheckslice(fn.Body, ctxStmt) Curfn = nil if base.Debug.DclStack != 0 { diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index 1019cff331533..27a9bc7cf8902 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -124,7 +124,7 @@ func typecheckclosure(clo *ir.ClosureExpr, top int) { Curfn = fn olddd := decldepth decldepth = 1 - typecheckslice(fn.Body.Slice(), ctxStmt) + typecheckslice(fn.Body, ctxStmt) decldepth = olddd Curfn = oldfn } @@ -394,7 +394,7 @@ func walkclosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node { clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ).(ir.Ntype), nil) clos.SetEsc(clo.Esc()) - clos.List.Set(append([]ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, fn.Nname)}, fn.ClosureEnter.Slice()...)) + clos.List.Set(append([]ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, fn.Nname)}, fn.ClosureEnter...)) addr := nodAddr(clos) addr.SetEsc(clo.Esc()) @@ -484,7 +484,7 @@ func makepartialcall(dot *ir.SelectorExpr, t0 *types.Type, meth *types.Sym) *ir. call.IsDDD = tfn.Type().IsVariadic() if t0.NumResults() != 0 { ret := ir.NewReturnStmt(base.Pos, nil) - ret.Results.Set1(call) + ret.Results = []ir.Node{call} body = append(body, ret) } else { body = append(body, call) @@ -497,7 +497,7 @@ func makepartialcall(dot *ir.SelectorExpr, t0 *types.Type, meth *types.Sym) *ir. // Need to typecheck the body of the just-generated wrapper. // typecheckslice() requires that Curfn is set when processing an ORETURN. Curfn = fn - typecheckslice(fn.Body.Slice(), ctxStmt) + typecheckslice(fn.Body, ctxStmt) sym.Def = fn Target.Decls = append(Target.Decls, fn) Curfn = savecurfn @@ -543,7 +543,7 @@ func walkpartialcall(n *ir.CallPartExpr, init *ir.Nodes) ir.Node { clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ).(ir.Ntype), nil) clos.SetEsc(n.Esc()) - clos.List.Set2(ir.NewUnaryExpr(base.Pos, ir.OCFUNC, n.Func.Nname), n.X) + clos.List = []ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, n.Func.Nname), n.X} addr := nodAddr(clos) addr.SetEsc(n.Esc()) diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index 19eb8bc537fe3..94bcf63263c64 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -534,7 +534,7 @@ func evalConst(n ir.Node) ir.Node { case ir.OADDSTR: // Merge adjacent constants in the argument list. n := n.(*ir.AddStringExpr) - s := n.List.Slice() + s := n.List need := 0 for i := 0; i < len(s); i++ { if i == 0 || !ir.IsConst(s[i-1], constant.String) || !ir.IsConst(s[i], constant.String) { diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 9bd044c3686fc..62cdff6b8e1dd 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -137,7 +137,7 @@ func variter(vl []*ir.Name, t ir.Ntype, el []ir.Node) []ir.Node { if len(el) == 1 && len(vl) > 1 { e := el[0] as2 := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) - as2.Rhs.Set1(e) + as2.Rhs = []ir.Node{e} for _, v := range vl { as2.Lhs.Append(v) declare(v, dclcontext) @@ -888,7 +888,7 @@ func (c *nowritebarrierrecChecker) findExtraCalls(nn ir.Node) { } var callee *ir.Func - arg := n.Args.First() + arg := n.Args[0] switch arg.Op() { case ir.ONAME: arg := arg.(*ir.Name) diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index 21f02e9471755..fb9cbf2d5107e 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -368,10 +368,10 @@ func (e *Escape) stmt(n ir.Node) { typesw := n.Tag != nil && n.Tag.Op() == ir.OTYPESW var ks []EscHole - for _, cas := range n.Cases.Slice() { // cases + for _, cas := range n.Cases { // cases cas := cas.(*ir.CaseStmt) if typesw && n.Tag.(*ir.TypeSwitchGuard).Tag != nil { - cv := cas.Vars.First() + cv := cas.Vars[0] k := e.dcl(cv) // type switch variables have no ODCL. if cv.Type().HasPointers() { ks = append(ks, k.dotType(cv.Type(), cas, "switch case")) @@ -390,15 +390,15 @@ func (e *Escape) stmt(n ir.Node) { case ir.OSELECT: n := n.(*ir.SelectStmt) - for _, cas := range n.Cases.Slice() { + for _, cas := range n.Cases { cas := cas.(*ir.CaseStmt) e.stmt(cas.Comm) e.block(cas.Body) } case ir.OSELRECV2: n := n.(*ir.AssignListStmt) - e.assign(n.Lhs.First(), n.Rhs.First(), "selrecv", n) - e.assign(n.Lhs.Second(), nil, "selrecv", n) + e.assign(n.Lhs[0], n.Rhs[0], "selrecv", n) + e.assign(n.Lhs[1], nil, "selrecv", n) case ir.ORECV: // TODO(mdempsky): Consider e.discard(n.Left). n := n.(*ir.UnaryExpr) @@ -416,31 +416,31 @@ func (e *Escape) stmt(n ir.Node) { e.assign(n.X, n.Y, "assign", n) case ir.OAS2: n := n.(*ir.AssignListStmt) - for i, nl := range n.Lhs.Slice() { - e.assign(nl, n.Rhs.Index(i), "assign-pair", n) + for i, nl := range n.Lhs { + e.assign(nl, n.Rhs[i], "assign-pair", n) } case ir.OAS2DOTTYPE: // v, ok = x.(type) n := n.(*ir.AssignListStmt) - e.assign(n.Lhs.First(), n.Rhs.First(), "assign-pair-dot-type", n) - e.assign(n.Lhs.Second(), nil, "assign-pair-dot-type", n) + e.assign(n.Lhs[0], n.Rhs[0], "assign-pair-dot-type", n) + e.assign(n.Lhs[1], nil, "assign-pair-dot-type", n) case ir.OAS2MAPR: // v, ok = m[k] n := n.(*ir.AssignListStmt) - e.assign(n.Lhs.First(), n.Rhs.First(), "assign-pair-mapr", n) - e.assign(n.Lhs.Second(), nil, "assign-pair-mapr", n) + e.assign(n.Lhs[0], n.Rhs[0], "assign-pair-mapr", n) + e.assign(n.Lhs[1], nil, "assign-pair-mapr", n) case ir.OAS2RECV: // v, ok = <-ch n := n.(*ir.AssignListStmt) - e.assign(n.Lhs.First(), n.Rhs.First(), "assign-pair-receive", n) - e.assign(n.Lhs.Second(), nil, "assign-pair-receive", n) + e.assign(n.Lhs[0], n.Rhs[0], "assign-pair-receive", n) + e.assign(n.Lhs[1], nil, "assign-pair-receive", n) case ir.OAS2FUNC: n := n.(*ir.AssignListStmt) - e.stmts(n.Rhs.First().Init()) - e.call(e.addrs(n.Lhs), n.Rhs.First(), nil) + e.stmts(n.Rhs[0].Init()) + e.call(e.addrs(n.Lhs), n.Rhs[0], nil) case ir.ORETURN: n := n.(*ir.ReturnStmt) results := e.curfn.Type().Results().FieldSlice() - for i, v := range n.Results.Slice() { + for i, v := range n.Results { e.assign(ir.AsNode(results[i].Nname), v, "return", n) } case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OCLOSE, ir.OCOPY, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN, ir.ORECOVER: @@ -456,7 +456,7 @@ func (e *Escape) stmt(n ir.Node) { } func (e *Escape) stmts(l ir.Nodes) { - for _, n := range l.Slice() { + for _, n := range l { e.stmt(n) } } @@ -641,7 +641,7 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { case ir.OARRAYLIT: n := n.(*ir.CompLitExpr) - for _, elt := range n.List.Slice() { + for _, elt := range n.List { if elt.Op() == ir.OKEY { elt = elt.(*ir.KeyExpr).Value } @@ -653,7 +653,7 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { k = e.spill(k, n) k.uintptrEscapesHack = uintptrEscapesHack // for ...uintptr parameters - for _, elt := range n.List.Slice() { + for _, elt := range n.List { if elt.Op() == ir.OKEY { elt = elt.(*ir.KeyExpr).Value } @@ -662,7 +662,7 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { case ir.OSTRUCTLIT: n := n.(*ir.CompLitExpr) - for _, elt := range n.List.Slice() { + for _, elt := range n.List { e.expr(k.note(n, "struct literal element"), elt.(*ir.StructKeyExpr).Value) } @@ -671,7 +671,7 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { e.spill(k, n) // Map keys and values are always stored in the heap. - for _, elt := range n.List.Slice() { + for _, elt := range n.List { elt := elt.(*ir.KeyExpr) e.assignHeap(elt.Key, "map literal key", n) e.assignHeap(elt.Value, "map literal value", n) @@ -755,7 +755,7 @@ func (e *Escape) discard(n ir.Node) { } func (e *Escape) discards(l ir.Nodes) { - for _, n := range l.Slice() { + for _, n := range l { e.discard(n) } } @@ -810,7 +810,7 @@ func (e *Escape) addr(n ir.Node) EscHole { func (e *Escape) addrs(l ir.Nodes) []EscHole { var ks []EscHole - for _, n := range l.Slice() { + for _, n := range l { ks = append(ks, e.addr(n)) } return ks @@ -904,14 +904,14 @@ func (e *Escape) call(ks []EscHole, call, where ir.Node) { argument(e.discardHole(), call.X) } - args := call.Args.Slice() + args := call.Args for i, param := range fntype.Params().FieldSlice() { argument(e.tagHole(ks, fn, param), args[i]) } case ir.OAPPEND: call := call.(*ir.CallExpr) - args := call.Args.Slice() + args := call.Args // Appendee slice may flow directly to the result, if // it has enough capacity. Alternatively, a new heap @@ -955,7 +955,7 @@ func (e *Escape) call(ks []EscHole, call, where ir.Node) { argument(e.discardHole(), call.Y) case ir.ODELETE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER: call := call.(*ir.CallExpr) - for _, arg := range call.Args.Slice() { + for _, arg := range call.Args { argument(e.discardHole(), arg) } case ir.OLEN, ir.OCAP, ir.OREAL, ir.OIMAG, ir.OCLOSE: @@ -2084,7 +2084,7 @@ func (e *Escape) paramTag(fn *ir.Func, narg int, f *types.Field) string { return fmt.Sprintf("arg#%d", narg) } - if fn.Body.Len() == 0 { + if len(fn.Body) == 0 { // Assume that uintptr arguments must be held live across the call. // This is most important for syscall.Syscall. // See golang.org/issue/13372. diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go index 6008abeff8f52..f4178db477146 100644 --- a/src/cmd/compile/internal/gc/gsubr.go +++ b/src/cmd/compile/internal/gc/gsubr.go @@ -275,7 +275,7 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { tail = call if tfn.Type().NumResults() > 0 { n := ir.NewReturnStmt(base.Pos, nil) - n.Results.Set1(call) + n.Results = []ir.Node{call} tail = n } } @@ -288,7 +288,7 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { typecheckFunc(fn) Curfn = fn - typecheckslice(fn.Body.Slice(), ctxStmt) + typecheckslice(fn.Body, ctxStmt) escapeFuncs([]*ir.Func{fn}, false) diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index 60aa2eae8b4da..d601331ee4d7e 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -528,7 +528,7 @@ func (p *iexporter) doInline(f *ir.Name) { w := p.newWriter() w.setPkg(fnpkg(f), false) - w.stmtList(ir.AsNodes(f.Func.Inl.Body)) + w.stmtList(ir.Nodes(f.Func.Inl.Body)) w.finish("inl", p.inlineIndex, f.Sym()) } @@ -1035,7 +1035,7 @@ func (w *exportWriter) typeExt(t *types.Type) { // Inline bodies. func (w *exportWriter) stmtList(list ir.Nodes) { - for _, n := range list.Slice() { + for _, n := range list { w.node(n) } w.op(ir.OEND) @@ -1052,9 +1052,9 @@ func (w *exportWriter) node(n ir.Node) { // Caution: stmt will emit more than one node for statement nodes n that have a non-empty // n.Ninit and where n cannot have a natural init section (such as in "if", "for", etc.). func (w *exportWriter) stmt(n ir.Node) { - if n.Init().Len() > 0 && !ir.StmtWithInit(n.Op()) { + if len(n.Init()) > 0 && !ir.StmtWithInit(n.Op()) { // can't use stmtList here since we don't want the final OEND - for _, n := range n.Init().Slice() { + for _, n := range n.Init() { w.stmt(n) } } @@ -1068,7 +1068,7 @@ func (w *exportWriter) stmt(n ir.Node) { // generate OBLOCK nodes except to denote an empty // function body, although that may change.) n := n.(*ir.BlockStmt) - for _, n := range n.List.Slice() { + for _, n := range n.List { w.stmt(n) } @@ -1203,9 +1203,9 @@ func (w *exportWriter) caseList(sw ir.Node) { var cases []ir.Node if sw.Op() == ir.OSWITCH { - cases = sw.(*ir.SwitchStmt).Cases.Slice() + cases = sw.(*ir.SwitchStmt).Cases } else { - cases = sw.(*ir.SelectStmt).Cases.Slice() + cases = sw.(*ir.SelectStmt).Cases } w.uint64(uint64(len(cases))) for _, cas := range cases { @@ -1213,14 +1213,14 @@ func (w *exportWriter) caseList(sw ir.Node) { w.pos(cas.Pos()) w.stmtList(cas.List) if namedTypeSwitch { - w.localName(cas.Vars.First().(*ir.Name)) + w.localName(cas.Vars[0].(*ir.Name)) } w.stmtList(cas.Body) } } func (w *exportWriter) exprList(list ir.Nodes) { - for _, n := range list.Slice() { + for _, n := range list { w.expr(n) } w.op(ir.OEND) @@ -1540,8 +1540,8 @@ func (w *exportWriter) exprsOrNil(a, b ir.Node) { } func (w *exportWriter) fieldList(list ir.Nodes) { - w.uint64(uint64(list.Len())) - for _, n := range list.Slice() { + w.uint64(uint64(len(list))) + for _, n := range list { n := n.(*ir.StructKeyExpr) w.selector(n.Field) w.expr(n.Value) diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 4f460d54a200a..90a909d2a3c9c 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -723,9 +723,9 @@ func (r *importReader) doInline(fn *ir.Func) { if base.Flag.E > 0 && base.Flag.LowerM > 2 { if base.Flag.LowerM > 3 { - fmt.Printf("inl body for %v %v: %+v\n", fn, fn.Type(), ir.AsNodes(fn.Inl.Body)) + fmt.Printf("inl body for %v %v: %+v\n", fn, fn.Type(), ir.Nodes(fn.Inl.Body)) } else { - fmt.Printf("inl body for %v %v: %v\n", fn, fn.Type(), ir.AsNodes(fn.Inl.Body)) + fmt.Printf("inl body for %v %v: %v\n", fn, fn.Type(), ir.Nodes(fn.Inl.Body)) } } } @@ -757,7 +757,7 @@ func (r *importReader) stmtList() []ir.Node { // Inline them into the statement list. if n.Op() == ir.OBLOCK { n := n.(*ir.BlockStmt) - list = append(list, n.List.Slice()...) + list = append(list, n.List...) } else { list = append(list, n) } @@ -779,7 +779,7 @@ func (r *importReader) caseList(sw ir.Node) []ir.Node { // Sym for diagnostics anyway. caseVar := ir.NewNameAt(cas.Pos(), r.ident()) declare(caseVar, dclcontext) - cas.Vars.Set1(caseVar) + cas.Vars = []ir.Node{caseVar} caseVar.Defn = sw.(*ir.SwitchStmt).Tag } cas.Body.Set(r.stmtList()) @@ -996,7 +996,7 @@ func (r *importReader) node() ir.Node { var stmts ir.Nodes stmts.Append(ir.NewDecl(base.Pos, ir.ODCL, lhs)) stmts.Append(ir.NewAssignStmt(base.Pos, lhs, nil)) - return ir.NewBlockStmt(pos, stmts.Slice()) + return ir.NewBlockStmt(pos, stmts) // case OAS, OASWB: // unreachable - mapped to OAS case below by exporter diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go index fbc88411cc90c..4495284a07444 100644 --- a/src/cmd/compile/internal/gc/init.go +++ b/src/cmd/compile/internal/gc/init.go @@ -83,8 +83,8 @@ func fninit() *ir.Name { // Record user init functions. for _, fn := range Target.Inits { // Skip init functions with empty bodies. - if fn.Body.Len() == 1 { - if stmt := fn.Body.First(); stmt.Op() == ir.OBLOCK && stmt.(*ir.BlockStmt).List.Len() == 0 { + if len(fn.Body) == 1 { + if stmt := fn.Body[0]; stmt.Op() == ir.OBLOCK && len(stmt.(*ir.BlockStmt).List) == 0 { continue } } diff --git a/src/cmd/compile/internal/gc/initorder.go b/src/cmd/compile/internal/gc/initorder.go index ec3d7be45fd4d..fe131c32a65a9 100644 --- a/src/cmd/compile/internal/gc/initorder.go +++ b/src/cmd/compile/internal/gc/initorder.go @@ -258,7 +258,7 @@ func collectDeps(n ir.Node, transitive bool) ir.NameSet { d.inspect(n.Y) case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV: n := n.(*ir.AssignListStmt) - d.inspect(n.Rhs.First()) + d.inspect(n.Rhs[0]) case ir.ODCLFUNC: n := n.(*ir.Func) d.inspectList(n.Body) @@ -363,7 +363,7 @@ func firstLHS(n ir.Node) *ir.Name { return n.X.Name() case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2RECV, ir.OAS2MAPR: n := n.(*ir.AssignListStmt) - return n.Lhs.First().Name() + return n.Lhs[0].Name() } base.Fatalf("unexpected Op: %v", n.Op()) diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index edb2c5bb4207b..2fb23f1a3f252 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -113,7 +113,7 @@ func typecheckinl(fn *ir.Func) { } if base.Flag.LowerM > 2 || base.Debug.Export != 0 { - fmt.Printf("typecheck import [%v] %L { %v }\n", fn.Sym(), fn, ir.AsNodes(fn.Inl.Body)) + fmt.Printf("typecheck import [%v] %L { %v }\n", fn.Sym(), fn, ir.Nodes(fn.Inl.Body)) } savefn := Curfn @@ -196,7 +196,7 @@ func caninl(fn *ir.Func) { } // If fn has no body (is defined outside of Go), cannot inline it. - if fn.Body.Len() == 0 { + if len(fn.Body) == 0 { reason = "no function body" return } @@ -238,11 +238,11 @@ func caninl(fn *ir.Func) { n.Func.Inl = &ir.Inline{ Cost: inlineMaxBudget - visitor.budget, Dcl: pruneUnusedAutos(n.Defn.(*ir.Func).Dcl, &visitor), - Body: ir.DeepCopyList(src.NoXPos, fn.Body.Slice()), + Body: ir.DeepCopyList(src.NoXPos, fn.Body), } if base.Flag.LowerM > 1 { - fmt.Printf("%v: can inline %v with cost %d as: %v { %v }\n", ir.Line(fn), n, inlineMaxBudget-visitor.budget, fn.Type(), ir.AsNodes(n.Func.Inl.Body)) + fmt.Printf("%v: can inline %v with cost %d as: %v { %v }\n", ir.Line(fn), n, inlineMaxBudget-visitor.budget, fn.Type(), ir.Nodes(n.Func.Inl.Body)) } else if base.Flag.LowerM != 0 { fmt.Printf("%v: can inline %v\n", ir.Line(fn), n) } @@ -278,7 +278,7 @@ func inlFlood(n *ir.Name, exportsym func(*ir.Name)) { // Recursively identify all referenced functions for // reexport. We want to include even non-called functions, // because after inlining they might be callable. - ir.VisitList(ir.AsNodes(fn.Inl.Body), func(n ir.Node) { + ir.VisitList(ir.Nodes(fn.Inl.Body), func(n ir.Node) { switch n.Op() { case ir.OMETHEXPR, ir.ODOTMETH: inlFlood(methodExprName(n), exportsym) @@ -527,7 +527,7 @@ func inlcalls(fn *ir.Func) { func inlconv2stmt(inlcall *ir.InlinedCallExpr) ir.Node { n := ir.NewBlockStmt(inlcall.Pos(), nil) n.List = inlcall.Init() - n.List.AppendNodes(&inlcall.Body) + n.List.Append(inlcall.Body.Take()...) return n } @@ -535,8 +535,8 @@ func inlconv2stmt(inlcall *ir.InlinedCallExpr) ir.Node { // The result of inlconv2expr MUST be assigned back to n, e.g. // n.Left = inlconv2expr(n.Left) func inlconv2expr(n *ir.InlinedCallExpr) ir.Node { - r := n.ReturnVars.First() - return initExpr(append(n.Init().Slice(), n.Body.Slice()...), r) + r := n.ReturnVars[0] + return initExpr(append(n.Init(), n.Body...), r) } // Turn the rlist (with the return values) of the OINLCALL in @@ -545,12 +545,12 @@ func inlconv2expr(n *ir.InlinedCallExpr) ir.Node { // order will be preserved. Used in return, oas2func and call // statements. func inlconv2list(n *ir.InlinedCallExpr) []ir.Node { - if n.Op() != ir.OINLCALL || n.ReturnVars.Len() == 0 { + if n.Op() != ir.OINLCALL || len(n.ReturnVars) == 0 { base.Fatalf("inlconv2list %+v\n", n) } - s := n.ReturnVars.Slice() - s[0] = initExpr(append(n.Init().Slice(), n.Body.Slice()...), s[0]) + s := n.ReturnVars + s[0] = initExpr(append(n.Init(), n.Body...), s[0]) return s } @@ -600,8 +600,8 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No if as := n; as.Op() == ir.OAS2FUNC { as := as.(*ir.AssignListStmt) - if as.Rhs.First().Op() == ir.OINLCALL { - as.Rhs.Set(inlconv2list(as.Rhs.First().(*ir.InlinedCallExpr))) + if as.Rhs[0].Op() == ir.OINLCALL { + as.Rhs.Set(inlconv2list(as.Rhs[0].(*ir.InlinedCallExpr))) as.SetOp(ir.OAS2) as.SetTypecheck(0) n = typecheck(as, ctxStmt) @@ -736,9 +736,9 @@ FindRHS: rhs = defn.Y case ir.OAS2: defn := defn.(*ir.AssignListStmt) - for i, lhs := range defn.Lhs.Slice() { + for i, lhs := range defn.Lhs { if lhs == n { - rhs = defn.Rhs.Index(i) + rhs = defn.Rhs[i] break FindRHS } } @@ -780,7 +780,7 @@ func reassigned(name *ir.Name) bool { } case ir.OAS2, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2DOTTYPE, ir.OAS2RECV, ir.OSELRECV2: n := n.(*ir.AssignListStmt) - for _, p := range n.Lhs.Slice() { + for _, p := range n.Lhs { if p == name && n != name.Defn { return true } @@ -870,7 +870,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b // We have a function node, and it has an inlineable body. if base.Flag.LowerM > 1 { - fmt.Printf("%v: inlining call to %v %v { %v }\n", ir.Line(n), fn.Sym(), fn.Type(), ir.AsNodes(fn.Inl.Body)) + fmt.Printf("%v: inlining call to %v %v { %v }\n", ir.Line(n), fn.Sym(), fn.Type(), ir.Nodes(fn.Inl.Body)) } else if base.Flag.LowerM != 0 { fmt.Printf("%v: inlining call to %v\n", ir.Line(n), fn) } @@ -890,7 +890,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b callee := n.X for callee.Op() == ir.OCONVNOP { conv := callee.(*ir.ConvExpr) - ninit.AppendNodes(conv.PtrInit()) + ninit.Append(conv.PtrInit().Take()...) callee = conv.X } if callee.Op() != ir.ONAME && callee.Op() != ir.OCLOSURE && callee.Op() != ir.OMETHEXPR { @@ -968,7 +968,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b } nreturns := 0 - ir.VisitList(ir.AsNodes(fn.Inl.Body), func(n ir.Node) { + ir.VisitList(ir.Nodes(fn.Inl.Body), func(n ir.Node) { if n != nil && n.Op() == ir.ORETURN { nreturns++ } @@ -1018,7 +1018,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b } as.Rhs.Append(sel.X) } - as.Rhs.Append(n.Args.Slice()...) + as.Rhs.Append(n.Args...) // For non-dotted calls to variadic functions, we assign the // variadic parameter's temp name separately. @@ -1039,11 +1039,11 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b // Otherwise, we need to collect the remaining values // to pass as a slice. - x := as.Lhs.Len() - for as.Lhs.Len() < as.Rhs.Len() { - as.Lhs.Append(argvar(param.Type, as.Lhs.Len())) + x := len(as.Lhs) + for len(as.Lhs) < len(as.Rhs) { + as.Lhs.Append(argvar(param.Type, len(as.Lhs))) } - varargs := as.Lhs.Slice()[x:] + varargs := as.Lhs[x:] vas = ir.NewAssignStmt(base.Pos, nil, nil) vas.X = inlParam(param, vas, inlvars) @@ -1057,7 +1057,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b } } - if as.Rhs.Len() != 0 { + if len(as.Rhs) != 0 { ninit.Append(typecheck(as, ctxStmt)) } @@ -1113,7 +1113,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b } subst.edit = subst.node - body := subst.list(ir.AsNodes(fn.Inl.Body)) + body := subst.list(ir.Nodes(fn.Inl.Body)) lab := ir.NewLabelStmt(base.Pos, retlabel) body = append(body, lab) @@ -1129,7 +1129,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b //dumplist("ninit post", ninit); call := ir.NewInlinedCallExpr(base.Pos, nil, nil) - call.PtrInit().Set(ninit.Slice()) + call.PtrInit().Set(ninit) call.Body.Set(body) call.ReturnVars.Set(retvars) call.SetType(n.Type()) @@ -1220,8 +1220,8 @@ type inlsubst struct { // list inlines a list of nodes. func (subst *inlsubst) list(ll ir.Nodes) []ir.Node { - s := make([]ir.Node, 0, ll.Len()) - for _, n := range ll.Slice() { + s := make([]ir.Node, 0, len(ll)) + for _, n := range ll { s = append(s, subst.node(n)) } return s @@ -1277,7 +1277,7 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { // this return is guaranteed to belong to the current inlined function. n := n.(*ir.ReturnStmt) init := subst.list(n.Init()) - if len(subst.retvars) != 0 && n.Results.Len() != 0 { + if len(subst.retvars) != 0 && len(n.Results) != 0 { as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) // Make a shallow copy of retvars. @@ -1289,7 +1289,7 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { as.Rhs.Set(subst.list(n.Results)) if subst.delayretvars { - for _, n := range as.Lhs.Slice() { + for _, n := range as.Lhs { as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, n)) n.Name().Defn = as } diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 728c4b13167e1..bed37efb87930 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -968,10 +968,10 @@ func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []ir.Node { for i, stmt := range stmts { s := p.stmtFall(stmt, fallOK && i+1 == len(stmts)) if s == nil { - } else if s.Op() == ir.OBLOCK && s.(*ir.BlockStmt).List.Len() > 0 { + } else if s.Op() == ir.OBLOCK && len(s.(*ir.BlockStmt).List) > 0 { // Inline non-empty block. // Empty blocks must be preserved for checkreturn. - nodes = append(nodes, s.(*ir.BlockStmt).List.Slice()...) + nodes = append(nodes, s.(*ir.BlockStmt).List...) } else { nodes = append(nodes, s) } @@ -1065,7 +1065,7 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node { } n := ir.NewReturnStmt(p.pos(stmt), nil) n.Results.Set(results) - if n.Results.Len() == 0 && Curfn != nil { + if len(n.Results) == 0 && Curfn != nil { for _, ln := range Curfn.Dcl { if ln.Class_ == ir.PPARAM { continue @@ -1160,7 +1160,7 @@ func (p *noder) ifStmt(stmt *syntax.IfStmt) ir.Node { p.openScope(stmt.Pos()) n := ir.NewIfStmt(p.pos(stmt), nil, nil, nil) if stmt.Init != nil { - n.PtrInit().Set1(p.stmt(stmt.Init)) + *n.PtrInit() = []ir.Node{p.stmt(stmt.Init)} } if stmt.Cond != nil { n.Cond = p.expr(stmt.Cond) @@ -1170,9 +1170,9 @@ func (p *noder) ifStmt(stmt *syntax.IfStmt) ir.Node { e := p.stmt(stmt.Else) if e.Op() == ir.OBLOCK { e := e.(*ir.BlockStmt) - n.Else.Set(e.List.Slice()) + n.Else.Set(e.List) } else { - n.Else.Set1(e) + n.Else = []ir.Node{e} } } p.closeAnotherScope() @@ -1198,7 +1198,7 @@ func (p *noder) forStmt(stmt *syntax.ForStmt) ir.Node { n := ir.NewForStmt(p.pos(stmt), nil, nil, nil, nil) if stmt.Init != nil { - n.PtrInit().Set1(p.stmt(stmt.Init)) + *n.PtrInit() = []ir.Node{p.stmt(stmt.Init)} } if stmt.Cond != nil { n.Cond = p.expr(stmt.Cond) @@ -1215,7 +1215,7 @@ func (p *noder) switchStmt(stmt *syntax.SwitchStmt) ir.Node { p.openScope(stmt.Pos()) n := ir.NewSwitchStmt(p.pos(stmt), nil, nil) if stmt.Init != nil { - n.PtrInit().Set1(p.stmt(stmt.Init)) + *n.PtrInit() = []ir.Node{p.stmt(stmt.Init)} } if stmt.Tag != nil { n.Tag = p.expr(stmt.Tag) @@ -1247,7 +1247,7 @@ func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.TypeSwitch if tswitch != nil && tswitch.Tag != nil { nn := NewName(tswitch.Tag.Sym()) declare(nn, dclcontext) - n.Vars.Set1(nn) + n.Vars = []ir.Node{nn} // keep track of the instances for reporting unused nn.Defn = tswitch } @@ -1264,7 +1264,7 @@ func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.TypeSwitch } n.Body.Set(p.stmtsFall(body, true)) - if l := n.Body.Len(); l > 0 && n.Body.Index(l-1).Op() == ir.OFALL { + if l := len(n.Body); l > 0 && n.Body[l-1].Op() == ir.OFALL { if tswitch != nil { base.Errorf("cannot fallthrough in type switch") } @@ -1298,7 +1298,7 @@ func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []i n := ir.NewCaseStmt(p.pos(clause), nil, nil) if clause.Comm != nil { - n.List.Set1(p.stmt(clause.Comm)) + n.List = []ir.Node{p.stmt(clause.Comm)} } n.Body.Set(p.stmts(clause.Body)) nodes = append(nodes, n) @@ -1339,7 +1339,7 @@ func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) ir.Node { if ls != nil { if ls.Op() == ir.OBLOCK { ls := ls.(*ir.BlockStmt) - l = append(l, ls.List.Slice()...) + l = append(l, ls.List...) } else { l = append(l, ls) } diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 53d83c0ac874b..45a2e2a43ecae 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -292,7 +292,7 @@ func mapKeyReplaceStrConv(n ir.Node) bool { replaced = true case ir.OSTRUCTLIT: n := n.(*ir.CompLitExpr) - for _, elem := range n.List.Slice() { + for _, elem := range n.List { elem := elem.(*ir.StructKeyExpr) if mapKeyReplaceStrConv(elem.Value) { replaced = true @@ -300,7 +300,7 @@ func mapKeyReplaceStrConv(n ir.Node) bool { } case ir.OARRAYLIT: n := n.(*ir.CompLitExpr) - for _, elem := range n.List.Slice() { + for _, elem := range n.List { if elem.Op() == ir.OKEY { elem = elem.(*ir.KeyExpr).Value } @@ -350,7 +350,7 @@ func (o *Order) cleanTemp(top ordermarker) { // stmtList orders each of the statements in the list. func (o *Order) stmtList(l ir.Nodes) { - s := l.Slice() + s := l for i := range s { orderMakeSliceCopy(s[i:]) o.stmt(s[i]) @@ -456,7 +456,7 @@ func (o *Order) init(n ir.Node) { if ir.MayBeShared(n) { // For concurrency safety, don't mutate potentially shared nodes. // First, ensure that no work is required here. - if n.Init().Len() > 0 { + if len(n.Init()) > 0 { base.Fatalf("order.init shared node with ninit") } return @@ -468,7 +468,7 @@ func (o *Order) init(n ir.Node) { // call orders the call expression n. // n.Op is OCALLMETH/OCALLFUNC/OCALLINTER or a builtin like OCOPY. func (o *Order) call(nn ir.Node) { - if nn.Init().Len() > 0 { + if len(nn.Init()) > 0 { // Caller should have already called o.init(nn). base.Fatalf("%v with unexpected ninit", nn.Op()) } @@ -521,9 +521,9 @@ func (o *Order) call(nn ir.Node) { // Check for "unsafe-uintptr" tag provided by escape analysis. for i, param := range n.X.Type().Params().FieldSlice() { if param.Note == unsafeUintptrTag || param.Note == uintptrEscapesTag { - if arg := n.Args.Index(i); arg.Op() == ir.OSLICELIT { + if arg := n.Args[i]; arg.Op() == ir.OSLICELIT { arg := arg.(*ir.CompLitExpr) - for _, elt := range arg.List.Slice() { + for _, elt := range arg.List { keepAlive(elt) } } else { @@ -569,7 +569,7 @@ func (o *Order) mapAssign(n ir.Node) { case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2MAPR, ir.OAS2FUNC: n := n.(*ir.AssignListStmt) var post []ir.Node - for i, m := range n.Lhs.Slice() { + for i, m := range n.Lhs { switch { case m.Op() == ir.OINDEXMAP: m := m.(*ir.IndexExpr) @@ -582,7 +582,7 @@ func (o *Order) mapAssign(n ir.Node) { fallthrough case instrumenting && n.Op() == ir.OAS2FUNC && !ir.IsBlank(m): t := o.newTemp(m.Type(), false) - n.Lhs.SetIndex(i, t) + n.Lhs[i] = t a := ir.NewAssignStmt(base.Pos, m, t) post = append(post, typecheck(a, ctxStmt)) } @@ -598,7 +598,7 @@ func (o *Order) safeMapRHS(r ir.Node) ir.Node { // We need to make sure the RHS won't panic. See issue 22881. if r.Op() == ir.OAPPEND { r := r.(*ir.CallExpr) - s := r.Args.Slice()[1:] + s := r.Args[1:] for i, n := range s { s[i] = o.cheapExpr(n) } @@ -676,8 +676,8 @@ func (o *Order) stmt(n ir.Node) { n := n.(*ir.AssignListStmt) t := o.markTemp() o.exprList(n.Lhs) - o.init(n.Rhs.First()) - o.call(n.Rhs.First()) + o.init(n.Rhs[0]) + o.call(n.Rhs[0]) o.as2(n) o.cleanTemp(t) @@ -692,7 +692,7 @@ func (o *Order) stmt(n ir.Node) { t := o.markTemp() o.exprList(n.Lhs) - switch r := n.Rhs.First(); r.Op() { + switch r := n.Rhs[0]; r.Op() { case ir.ODOTTYPE2: r := r.(*ir.TypeAssertExpr) r.X = o.expr(r.X, nil) @@ -772,9 +772,9 @@ func (o *Order) stmt(n ir.Node) { case ir.ODELETE: n := n.(*ir.CallExpr) t := o.markTemp() - n.Args.SetFirst(o.expr(n.Args.First(), nil)) - n.Args.SetSecond(o.expr(n.Args.Second(), nil)) - n.Args.SetSecond(o.mapKeyTemp(n.Args.First().Type(), n.Args.Second())) + n.Args[0] = o.expr(n.Args[0], nil) + n.Args[1] = o.expr(n.Args[1], nil) + n.Args[1] = o.mapKeyTemp(n.Args[0].Type(), n.Args[1]) o.out = append(o.out, n) o.cleanTemp(t) @@ -843,7 +843,7 @@ func (o *Order) stmt(n ir.Node) { base.Fatalf("order.stmt range %v", n.Type()) case types.TARRAY, types.TSLICE: - if n.Vars.Len() < 2 || ir.IsBlank(n.Vars.Second()) { + if len(n.Vars) < 2 || ir.IsBlank(n.Vars[1]) { // for i := range x will only use x once, to compute len(x). // No need to copy it. break @@ -906,14 +906,14 @@ func (o *Order) stmt(n ir.Node) { case ir.OSELECT: n := n.(*ir.SelectStmt) t := o.markTemp() - for _, ncas := range n.Cases.Slice() { + for _, ncas := range n.Cases { ncas := ncas.(*ir.CaseStmt) r := ncas.Comm setlineno(ncas) // Append any new body prologue to ninit. // The next loop will insert ninit into nbody. - if ncas.Init().Len() != 0 { + if len(ncas.Init()) != 0 { base.Fatalf("order select ninit") } if r == nil { @@ -927,17 +927,17 @@ func (o *Order) stmt(n ir.Node) { case ir.OSELRECV2: // case x, ok = <-c r := r.(*ir.AssignListStmt) - recv := r.Rhs.First().(*ir.UnaryExpr) + recv := r.Rhs[0].(*ir.UnaryExpr) recv.X = o.expr(recv.X, nil) if !ir.IsAutoTmp(recv.X) { recv.X = o.copyExpr(recv.X) } - init := r.PtrInit().Slice() + init := *r.PtrInit() r.PtrInit().Set(nil) colas := r.Def do := func(i int, t *types.Type) { - n := r.Lhs.Index(i) + n := r.Lhs[i] if ir.IsBlank(n) { return } @@ -955,7 +955,7 @@ func (o *Order) stmt(n ir.Node) { tmp := o.newTemp(t, t.HasPointers()) as := typecheck(ir.NewAssignStmt(base.Pos, n, conv(tmp, n.Type())), ctxStmt) ncas.PtrInit().Append(as) - (&r.Lhs).SetIndex(i, tmp) + r.Lhs[i] = tmp } do(0, recv.X.Type().Elem()) do(1, types.Types[types.TBOOL]) @@ -967,7 +967,7 @@ func (o *Order) stmt(n ir.Node) { case ir.OSEND: r := r.(*ir.SendStmt) - if r.Init().Len() != 0 { + if len(r.Init()) != 0 { ir.DumpList("ninit", r.Init()) base.Fatalf("ninit on select send") } @@ -988,14 +988,14 @@ func (o *Order) stmt(n ir.Node) { // Now that we have accumulated all the temporaries, clean them. // Also insert any ninit queued during the previous loop. // (The temporary cleaning must follow that ninit work.) - for _, cas := range n.Cases.Slice() { + for _, cas := range n.Cases { cas := cas.(*ir.CaseStmt) orderBlock(&cas.Body, o.free) cas.Body.Prepend(o.cleanTempNoPop(t)...) // TODO(mdempsky): Is this actually necessary? // walkselect appears to walk Ninit. - cas.Body.Prepend(cas.Init().Slice()...) + cas.Body.Prepend(cas.Init()...) cas.PtrInit().Set(nil) } @@ -1034,7 +1034,7 @@ func (o *Order) stmt(n ir.Node) { t := o.markTemp() n.Tag = o.expr(n.Tag, nil) - for _, ncas := range n.Cases.Slice() { + for _, ncas := range n.Cases { ncas := ncas.(*ir.CaseStmt) o.exprListInPlace(ncas.List) orderBlock(&ncas.Body, o.free) @@ -1048,9 +1048,9 @@ func (o *Order) stmt(n ir.Node) { } func hasDefaultCase(n *ir.SwitchStmt) bool { - for _, ncas := range n.Cases.Slice() { + for _, ncas := range n.Cases { ncas := ncas.(*ir.CaseStmt) - if ncas.List.Len() == 0 { + if len(ncas.List) == 0 { return true } } @@ -1059,7 +1059,7 @@ func hasDefaultCase(n *ir.SwitchStmt) bool { // exprList orders the expression list l into o. func (o *Order) exprList(l ir.Nodes) { - s := l.Slice() + s := l for i := range s { s[i] = o.expr(s[i], nil) } @@ -1068,7 +1068,7 @@ func (o *Order) exprList(l ir.Nodes) { // exprListInPlace orders the expression list l but saves // the side effects on the individual expression ninit lists. func (o *Order) exprListInPlace(l ir.Nodes) { - s := l.Slice() + s := l for i := range s { s[i] = o.exprInPlace(s[i]) } @@ -1113,8 +1113,8 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { n := n.(*ir.AddStringExpr) o.exprList(n.List) - if n.List.Len() > 5 { - t := types.NewArray(types.Types[types.TSTRING], int64(n.List.Len())) + if len(n.List) > 5 { + t := types.NewArray(types.Types[types.TSTRING], int64(len(n.List))) n.Prealloc = o.newTemp(t, false) } @@ -1128,13 +1128,13 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { hasbyte := false haslit := false - for _, n1 := range n.List.Slice() { + for _, n1 := range n.List { hasbyte = hasbyte || n1.Op() == ir.OBYTES2STR haslit = haslit || n1.Op() == ir.OLITERAL && len(ir.StringVal(n1)) != 0 } if haslit && hasbyte { - for _, n2 := range n.List.Slice() { + for _, n2 := range n.List { if n2.Op() == ir.OBYTES2STR { n2 := n2.(*ir.ConvExpr) n2.SetOp(ir.OBYTES2STRTMP) @@ -1276,14 +1276,14 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { // Check for append(x, make([]T, y)...) . n := n.(*ir.CallExpr) if isAppendOfMake(n) { - n.Args.SetFirst(o.expr(n.Args.First(), nil)) // order x - mk := n.Args.Second().(*ir.MakeExpr) + n.Args[0] = o.expr(n.Args[0], nil) // order x + mk := n.Args[1].(*ir.MakeExpr) mk.Len = o.expr(mk.Len, nil) // order y } else { o.exprList(n.Args) } - if lhs == nil || lhs.Op() != ir.ONAME && !samesafeexpr(lhs, n.Args.First()) { + if lhs == nil || lhs.Op() != ir.ONAME && !samesafeexpr(lhs, n.Args[0]) { return o.copyExpr(n) } return n @@ -1385,7 +1385,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { // the keys and values before storing any of them to the map. // See issue 26552. n := n.(*ir.CompLitExpr) - entries := n.List.Slice() + entries := n.List statics := entries[:0] var dynamics []*ir.KeyExpr for _, r := range entries { @@ -1441,10 +1441,10 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { func (o *Order) as2(n *ir.AssignListStmt) { tmplist := []ir.Node{} left := []ir.Node{} - for ni, l := range n.Lhs.Slice() { + for ni, l := range n.Lhs { if !ir.IsBlank(l) { tmp := o.newTemp(l.Type(), l.Type().HasPointers()) - n.Lhs.SetIndex(ni, tmp) + n.Lhs[ni] = tmp tmplist = append(tmplist, tmp) left = append(left, l) } @@ -1462,25 +1462,25 @@ func (o *Order) as2(n *ir.AssignListStmt) { // Just like as2, this also adds temporaries to ensure left-to-right assignment. func (o *Order) okAs2(n *ir.AssignListStmt) { var tmp1, tmp2 ir.Node - if !ir.IsBlank(n.Lhs.First()) { - typ := n.Rhs.First().Type() + if !ir.IsBlank(n.Lhs[0]) { + typ := n.Rhs[0].Type() tmp1 = o.newTemp(typ, typ.HasPointers()) } - if !ir.IsBlank(n.Lhs.Second()) { + if !ir.IsBlank(n.Lhs[1]) { tmp2 = o.newTemp(types.Types[types.TBOOL], false) } o.out = append(o.out, n) if tmp1 != nil { - r := ir.NewAssignStmt(base.Pos, n.Lhs.First(), tmp1) + r := ir.NewAssignStmt(base.Pos, n.Lhs[0], tmp1) o.mapAssign(typecheck(r, ctxStmt)) - n.Lhs.SetFirst(tmp1) + n.Lhs[0] = tmp1 } if tmp2 != nil { - r := ir.NewAssignStmt(base.Pos, n.Lhs.Second(), conv(tmp2, n.Lhs.Second().Type())) + r := ir.NewAssignStmt(base.Pos, n.Lhs[1], conv(tmp2, n.Lhs[1].Type())) o.mapAssign(typecheck(r, ctxStmt)) - n.Lhs.SetSecond(tmp2) + n.Lhs[1] = tmp2 } } diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index 32550c8bd429b..785e01663f2f1 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -207,7 +207,7 @@ func funccompile(fn *ir.Func) { // assign parameter offsets dowidth(fn.Type()) - if fn.Body.Len() == 0 { + if len(fn.Body) == 0 { // Initialize ABI wrappers if necessary. initLSym(fn, false) emitptrargsmap(fn) @@ -360,7 +360,7 @@ func compileFunctions() { // since they're most likely to be the slowest. // This helps avoid stragglers. sort.Slice(compilequeue, func(i, j int) bool { - return compilequeue[i].Body.Len() > compilequeue[j].Body.Len() + return len(compilequeue[i].Body) > len(compilequeue[j].Body) }) } var wg sync.WaitGroup diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go index 3aa4ff71fa14e..4d2964591b88f 100644 --- a/src/cmd/compile/internal/gc/range.go +++ b/src/cmd/compile/internal/gc/range.go @@ -27,7 +27,7 @@ func typecheckrange(n *ir.RangeStmt) { // second half of dance, the first half being typecheckrangeExpr n.SetTypecheck(1) - ls := n.Vars.Slice() + ls := n.Vars for i1, n1 := range ls { if n1.Typecheck() == 0 { ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign) @@ -35,7 +35,7 @@ func typecheckrange(n *ir.RangeStmt) { } decldepth++ - typecheckslice(n.Body.Slice(), ctxStmt) + typecheckslice(n.Body, ctxStmt) decldepth-- } @@ -47,7 +47,7 @@ func typecheckrangeExpr(n *ir.RangeStmt) { return } // delicate little dance. see typecheckas2 - ls := n.Vars.Slice() + ls := n.Vars for i1, n1 := range ls { if !ir.DeclaredBy(n1, n) { ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign) @@ -82,7 +82,7 @@ func typecheckrangeExpr(n *ir.RangeStmt) { t1 = t.Elem() t2 = nil - if n.Vars.Len() == 2 { + if len(n.Vars) == 2 { toomany = true } @@ -91,16 +91,16 @@ func typecheckrangeExpr(n *ir.RangeStmt) { t2 = types.RuneType } - if n.Vars.Len() > 2 || toomany { + if len(n.Vars) > 2 || toomany { base.ErrorfAt(n.Pos(), "too many variables in range") } var v1, v2 ir.Node - if n.Vars.Len() != 0 { - v1 = n.Vars.First() + if len(n.Vars) != 0 { + v1 = n.Vars[0] } - if n.Vars.Len() > 1 { - v2 = n.Vars.Second() + if len(n.Vars) > 1 { + v2 = n.Vars[1] } // this is not only an optimization but also a requirement in the spec. @@ -109,7 +109,7 @@ func typecheckrangeExpr(n *ir.RangeStmt) { // present." if ir.IsBlank(v2) { if v1 != nil { - n.Vars.Set1(v1) + n.Vars = []ir.Node{v1} } v2 = nil } @@ -183,13 +183,13 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { lno := setlineno(a) var v1, v2 ir.Node - l := nrange.Vars.Len() + l := len(nrange.Vars) if l > 0 { - v1 = nrange.Vars.First() + v1 = nrange.Vars[0] } if l > 1 { - v2 = nrange.Vars.Second() + v2 = nrange.Vars[1] } if ir.IsBlank(v2) { @@ -249,8 +249,8 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { // Use OAS2 to correctly handle assignments // of the form "v1, a[v1] := range". a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) - a.Lhs.Set2(v1, v2) - a.Rhs.Set2(hv1, tmp) + a.Lhs = []ir.Node{v1, v2} + a.Rhs = []ir.Node{hv1, tmp} body = []ir.Node{a} break } @@ -279,8 +279,8 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { // Use OAS2 to correctly handle assignments // of the form "v1, a[v1] := range". a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) - a.Lhs.Set2(v1, v2) - a.Rhs.Set2(hv1, ir.NewStarExpr(base.Pos, hp)) + a.Lhs = []ir.Node{v1, v2} + a.Rhs = []ir.Node{hv1, ir.NewStarExpr(base.Pos, hp)} body = append(body, a) // Advance pointer as part of the late increment. @@ -289,7 +289,7 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { // advancing the pointer is safe and won't go past the // end of the allocation. as := ir.NewAssignStmt(base.Pos, hp, addptr(hp, t.Elem().Width)) - nfor.Late.Set1(typecheck(as, ctxStmt)) + nfor.Late = []ir.Node{typecheck(as, ctxStmt)} case types.TMAP: // order.stmt allocated the iterator for us. @@ -319,8 +319,8 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { } else { elem := ir.NewStarExpr(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, elemsym)) a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) - a.Lhs.Set2(v1, v2) - a.Rhs.Set2(key, elem) + a.Lhs = []ir.Node{v1, v2} + a.Rhs = []ir.Node{key, elem} body = []ir.Node{a} } @@ -338,9 +338,9 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, hb, nodbool(false)) a := ir.NewAssignListStmt(base.Pos, ir.OAS2RECV, nil, nil) a.SetTypecheck(1) - a.Lhs.Set2(hv1, hb) - a.Rhs.Set1(ir.NewUnaryExpr(base.Pos, ir.ORECV, ha)) - nfor.Cond.PtrInit().Set1(a) + a.Lhs = []ir.Node{hv1, hb} + a.Rhs = []ir.Node{ir.NewUnaryExpr(base.Pos, ir.ORECV, ha)} + *nfor.Cond.PtrInit() = []ir.Node{a} if v1 == nil { body = nil } else { @@ -395,16 +395,16 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv2, nodintconst(utf8.RuneSelf)) // hv1++ - nif.Body.Set1(ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, nodintconst(1)))) + nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, nodintconst(1)))} // } else { eif := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) - nif.Else.Set1(eif) + nif.Else = []ir.Node{eif} // hv2, hv1 = decoderune(ha, hv1) - eif.Lhs.Set2(hv2, hv1) + eif.Lhs = []ir.Node{hv2, hv1} fn := syslook("decoderune") - eif.Rhs.Set1(mkcall1(fn, fn.Type().Results(), nil, ha, hv1)) + eif.Rhs = []ir.Node{mkcall1(fn, fn.Type().Results(), nil, ha, hv1)} body = append(body, nif) @@ -412,8 +412,8 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { if v2 != nil { // v1, v2 = hv1t, hv2 a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) - a.Lhs.Set2(v1, v2) - a.Rhs.Set2(hv1t, hv2) + a.Lhs = []ir.Node{v1, v2} + a.Rhs = []ir.Node{hv1t, hv2} body = append(body, a) } else { // v1 = hv1t @@ -431,18 +431,18 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { nfor.PtrInit().Append(init...) } - typecheckslice(nfor.Cond.Init().Slice(), ctxStmt) + typecheckslice(nfor.Cond.Init(), ctxStmt) nfor.Cond = typecheck(nfor.Cond, ctxExpr) nfor.Cond = defaultlit(nfor.Cond, nil) nfor.Post = typecheck(nfor.Post, ctxStmt) typecheckslice(body, ctxStmt) nfor.Body.Append(body...) - nfor.Body.Append(nrange.Body.Slice()...) + nfor.Body.Append(nrange.Body...) var n ir.Node = nfor if ifGuard != nil { - ifGuard.Body.Set1(n) + ifGuard.Body = []ir.Node{n} n = ifGuard } @@ -464,11 +464,11 @@ func isMapClear(n *ir.RangeStmt) bool { return false } - if n.Op() != ir.ORANGE || n.Type().Kind() != types.TMAP || n.Vars.Len() != 1 { + if n.Op() != ir.ORANGE || n.Type().Kind() != types.TMAP || len(n.Vars) != 1 { return false } - k := n.Vars.First() + k := n.Vars[0] if k == nil || ir.IsBlank(k) { return false } @@ -478,17 +478,17 @@ func isMapClear(n *ir.RangeStmt) bool { return false } - if n.Body.Len() != 1 { + if len(n.Body) != 1 { return false } - stmt := n.Body.First() // only stmt in body + stmt := n.Body[0] // only stmt in body if stmt == nil || stmt.Op() != ir.ODELETE { return false } m := n.X - if delete := stmt.(*ir.CallExpr); !samesafeexpr(delete.Args.First(), m) || !samesafeexpr(delete.Args.Second(), k) { + if delete := stmt.(*ir.CallExpr); !samesafeexpr(delete.Args[0], m) || !samesafeexpr(delete.Args[1], k) { return false } @@ -531,11 +531,11 @@ func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node { return nil } - if loop.Body.Len() != 1 || loop.Body.First() == nil { + if len(loop.Body) != 1 || loop.Body[0] == nil { return nil } - stmt1 := loop.Body.First() // only stmt in body + stmt1 := loop.Body[0] // only stmt in body if stmt1.Op() != ir.OAS { return nil } @@ -597,7 +597,7 @@ func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node { n.Cond = typecheck(n.Cond, ctxExpr) n.Cond = defaultlit(n.Cond, nil) - typecheckslice(n.Body.Slice(), ctxStmt) + typecheckslice(n.Body, ctxStmt) return walkstmt(n) } diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go index 5c69be7e06ecd..0bf070aa874e1 100644 --- a/src/cmd/compile/internal/gc/select.go +++ b/src/cmd/compile/internal/gc/select.go @@ -14,28 +14,28 @@ import ( func typecheckselect(sel *ir.SelectStmt) { var def ir.Node lno := setlineno(sel) - typecheckslice(sel.Init().Slice(), ctxStmt) - for _, ncase := range sel.Cases.Slice() { + typecheckslice(sel.Init(), ctxStmt) + for _, ncase := range sel.Cases { ncase := ncase.(*ir.CaseStmt) - if ncase.List.Len() == 0 { + if len(ncase.List) == 0 { // default if def != nil { base.ErrorfAt(ncase.Pos(), "multiple defaults in select (first at %v)", ir.Line(def)) } else { def = ncase } - } else if ncase.List.Len() > 1 { + } else if len(ncase.List) > 1 { base.ErrorfAt(ncase.Pos(), "select cases cannot be lists") } else { - ncase.List.SetFirst(typecheck(ncase.List.First(), ctxStmt)) - n := ncase.List.First() + ncase.List[0] = typecheck(ncase.List[0], ctxStmt) + n := ncase.List[0] ncase.Comm = n ncase.List.Set(nil) oselrecv2 := func(dst, recv ir.Node, colas bool) { n := ir.NewAssignListStmt(n.Pos(), ir.OSELRECV2, nil, nil) - n.Lhs.Set2(dst, ir.BlankNode) - n.Rhs.Set1(recv) + n.Lhs = []ir.Node{dst, ir.BlankNode} + n.Rhs = []ir.Node{recv} n.Def = colas n.SetTypecheck(1) ncase.Comm = n @@ -71,7 +71,7 @@ func typecheckselect(sel *ir.SelectStmt) { case ir.OAS2RECV: n := n.(*ir.AssignListStmt) - if n.Rhs.First().Op() != ir.ORECV { + if n.Rhs[0].Op() != ir.ORECV { base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side") break } @@ -87,7 +87,7 @@ func typecheckselect(sel *ir.SelectStmt) { } } - typecheckslice(ncase.Body.Slice(), ctxStmt) + typecheckslice(ncase.Body, ctxStmt) } base.Pos = lno @@ -95,24 +95,24 @@ func typecheckselect(sel *ir.SelectStmt) { func walkselect(sel *ir.SelectStmt) { lno := setlineno(sel) - if sel.Compiled.Len() != 0 { + if len(sel.Compiled) != 0 { base.Fatalf("double walkselect") } - init := sel.Init().Slice() + init := sel.Init() sel.PtrInit().Set(nil) init = append(init, walkselectcases(sel.Cases)...) sel.Cases = ir.Nodes{} sel.Compiled.Set(init) - walkstmtlist(sel.Compiled.Slice()) + walkstmtlist(sel.Compiled) base.Pos = lno } func walkselectcases(cases ir.Nodes) []ir.Node { - ncas := cases.Len() + ncas := len(cases) sellineno := base.Pos // optimization: zero-case select @@ -122,12 +122,12 @@ func walkselectcases(cases ir.Nodes) []ir.Node { // optimization: one-case select: single op. if ncas == 1 { - cas := cases.First().(*ir.CaseStmt) + cas := cases[0].(*ir.CaseStmt) setlineno(cas) - l := cas.Init().Slice() + l := cas.Init() if cas.Comm != nil { // not default: n := cas.Comm - l = append(l, n.Init().Slice()...) + l = append(l, n.Init()...) n.PtrInit().Set(nil) switch n.Op() { default: @@ -138,8 +138,8 @@ func walkselectcases(cases ir.Nodes) []ir.Node { case ir.OSELRECV2: r := n.(*ir.AssignListStmt) - if ir.IsBlank(r.Lhs.First()) && ir.IsBlank(r.Lhs.Second()) { - n = r.Rhs.First() + if ir.IsBlank(r.Lhs[0]) && ir.IsBlank(r.Lhs[1]) { + n = r.Rhs[0] break } r.SetOp(ir.OAS2RECV) @@ -148,7 +148,7 @@ func walkselectcases(cases ir.Nodes) []ir.Node { l = append(l, n) } - l = append(l, cas.Body.Slice()...) + l = append(l, cas.Body...) l = append(l, ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)) return l } @@ -156,7 +156,7 @@ func walkselectcases(cases ir.Nodes) []ir.Node { // convert case value arguments to addresses. // this rewrite is used by both the general code and the next optimization. var dflt *ir.CaseStmt - for _, cas := range cases.Slice() { + for _, cas := range cases { cas := cas.(*ir.CaseStmt) setlineno(cas) n := cas.Comm @@ -172,24 +172,24 @@ func walkselectcases(cases ir.Nodes) []ir.Node { case ir.OSELRECV2: n := n.(*ir.AssignListStmt) - if !ir.IsBlank(n.Lhs.First()) { - n.Lhs.SetIndex(0, nodAddr(n.Lhs.First())) - n.Lhs.SetIndex(0, typecheck(n.Lhs.First(), ctxExpr)) + if !ir.IsBlank(n.Lhs[0]) { + n.Lhs[0] = nodAddr(n.Lhs[0]) + n.Lhs[0] = typecheck(n.Lhs[0], ctxExpr) } } } // optimization: two-case select but one is default: single non-blocking op. if ncas == 2 && dflt != nil { - cas := cases.First().(*ir.CaseStmt) + cas := cases[0].(*ir.CaseStmt) if cas == dflt { - cas = cases.Second().(*ir.CaseStmt) + cas = cases[1].(*ir.CaseStmt) } n := cas.Comm setlineno(n) r := ir.NewIfStmt(base.Pos, nil, nil, nil) - r.PtrInit().Set(cas.Init().Slice()) + r.PtrInit().Set(cas.Init()) var call ir.Node switch n.Op() { default: @@ -203,26 +203,26 @@ func walkselectcases(cases ir.Nodes) []ir.Node { case ir.OSELRECV2: n := n.(*ir.AssignListStmt) - recv := n.Rhs.First().(*ir.UnaryExpr) + recv := n.Rhs[0].(*ir.UnaryExpr) ch := recv.X - elem := n.Lhs.First() + elem := n.Lhs[0] if ir.IsBlank(elem) { elem = nodnil() } - if ir.IsBlank(n.Lhs.Second()) { + if ir.IsBlank(n.Lhs[1]) { // if selectnbrecv(&v, c) { body } else { default body } call = mkcall1(chanfn("selectnbrecv", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, ch) } else { // TODO(cuonglm): make this use selectnbrecv() // if selectnbrecv2(&v, &received, c) { body } else { default body } - receivedp := typecheck(nodAddr(n.Lhs.Second()), ctxExpr) + receivedp := typecheck(nodAddr(n.Lhs[1]), ctxExpr) call = mkcall1(chanfn("selectnbrecv2", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, receivedp, ch) } } r.Cond = typecheck(call, ctxExpr) - r.Body.Set(cas.Body.Slice()) - r.Else.Set(append(dflt.Init().Slice(), dflt.Body.Slice()...)) + r.Body.Set(cas.Body) + r.Else.Set(append(dflt.Init(), dflt.Body...)) return []ir.Node{r, ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)} } @@ -251,11 +251,11 @@ func walkselectcases(cases ir.Nodes) []ir.Node { } // register cases - for _, cas := range cases.Slice() { + for _, cas := range cases { cas := cas.(*ir.CaseStmt) setlineno(cas) - init = append(init, cas.Init().Slice()...) + init = append(init, cas.Init()...) cas.PtrInit().Set(nil) n := cas.Comm @@ -278,9 +278,9 @@ func walkselectcases(cases ir.Nodes) []ir.Node { n := n.(*ir.AssignListStmt) nrecvs++ i = ncas - nrecvs - recv := n.Rhs.First().(*ir.UnaryExpr) + recv := n.Rhs[0].(*ir.UnaryExpr) c = recv.X - elem = n.Lhs.First() + elem = n.Lhs[0] } casorder[i] = cas @@ -313,9 +313,9 @@ func walkselectcases(cases ir.Nodes) []ir.Node { chosen := temp(types.Types[types.TINT]) recvOK := temp(types.Types[types.TBOOL]) r := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) - r.Lhs.Set2(chosen, recvOK) + r.Lhs = []ir.Node{chosen, recvOK} fn := syslook("selectgo") - r.Rhs.Set1(mkcall1(fn, fn.Type().Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, nodintconst(int64(nsends)), nodintconst(int64(nrecvs)), nodbool(dflt == nil))) + r.Rhs = []ir.Node{mkcall1(fn, fn.Type().Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, nodintconst(int64(nsends)), nodintconst(int64(nrecvs)), nodbool(dflt == nil))} init = append(init, typecheck(r, ctxStmt)) // selv and order are no longer alive after selectgo. @@ -334,13 +334,13 @@ func walkselectcases(cases ir.Nodes) []ir.Node { if n := cas.Comm; n != nil && n.Op() == ir.OSELRECV2 { n := n.(*ir.AssignListStmt) - if !ir.IsBlank(n.Lhs.Second()) { - x := ir.NewAssignStmt(base.Pos, n.Lhs.Second(), recvOK) + if !ir.IsBlank(n.Lhs[1]) { + x := ir.NewAssignStmt(base.Pos, n.Lhs[1], recvOK) r.Body.Append(typecheck(x, ctxStmt)) } } - r.Body.AppendNodes(&cas.Body) + r.Body.Append(cas.Body.Take()...) r.Body.Append(ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)) init = append(init, r) } diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 0fc19a69894b5..9445627b41a7c 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -439,7 +439,7 @@ func getdyn(n ir.Node, top bool) initGenType { if !top { return initDynamic } - if n.Len/4 > int64(n.List.Len()) { + if n.Len/4 > int64(len(n.List)) { // <25% of entries have explicit values. // Very rough estimation, it takes 4 bytes of instructions // to initialize 1 byte of result. So don't use a static @@ -454,7 +454,7 @@ func getdyn(n ir.Node, top bool) initGenType { lit := n.(*ir.CompLitExpr) var mode initGenType - for _, n1 := range lit.List.Slice() { + for _, n1 := range lit.List { switch n1.Op() { case ir.OKEY: n1 = n1.(*ir.KeyExpr).Value @@ -476,7 +476,7 @@ func isStaticCompositeLiteral(n ir.Node) bool { return false case ir.OARRAYLIT: n := n.(*ir.CompLitExpr) - for _, r := range n.List.Slice() { + for _, r := range n.List { if r.Op() == ir.OKEY { r = r.(*ir.KeyExpr).Value } @@ -487,7 +487,7 @@ func isStaticCompositeLiteral(n ir.Node) bool { return true case ir.OSTRUCTLIT: n := n.(*ir.CompLitExpr) - for _, r := range n.List.Slice() { + for _, r := range n.List { r := r.(*ir.StructKeyExpr) if !isStaticCompositeLiteral(r.Value) { return false @@ -568,7 +568,7 @@ func fixedlit(ctxt initContext, kind initKind, n *ir.CompLitExpr, var_ ir.Node, base.Fatalf("fixedlit bad op: %v", n.Op()) } - for _, r := range n.List.Slice() { + for _, r := range n.List { a, value := splitnode(r) if a == ir.BlankNode && !anySideEffects(value) { // Discard. @@ -722,7 +722,7 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) // put dynamics into array (5) var index int64 - for _, value := range n.List.Slice() { + for _, value := range n.List { if value.Op() == ir.OKEY { kv := value.(*ir.KeyExpr) index = indexconst(kv.Key) @@ -778,10 +778,10 @@ func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) { // make the map var a := ir.NewCallExpr(base.Pos, ir.OMAKE, nil, nil) a.SetEsc(n.Esc()) - a.Args.Set2(ir.TypeNode(n.Type()), nodintconst(int64(n.List.Len()))) + a.Args = []ir.Node{ir.TypeNode(n.Type()), nodintconst(int64(len(n.List)))} litas(m, a, init) - entries := n.List.Slice() + entries := n.List // The order pass already removed any dynamic (runtime-computed) entries. // All remaining entries are static. Double-check that. @@ -837,8 +837,8 @@ func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) { body := ir.NewAssignStmt(base.Pos, lhs, rhs) loop := ir.NewForStmt(base.Pos, nil, cond, incr, nil) - loop.Body.Set1(body) - loop.PtrInit().Set1(zero) + loop.Body = []ir.Node{body} + *loop.PtrInit() = []ir.Node{zero} appendWalkStmt(init, loop) return @@ -910,7 +910,7 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { base.Fatalf("anylit: not struct/array") } - if isSimpleName(var_) && n.List.Len() > 4 { + if isSimpleName(var_) && len(n.List) > 4 { // lay out static data vstat := readonlystaticname(t) @@ -935,7 +935,7 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { components = int64(t.NumFields()) } // initialization of an array or struct with unspecified components (missing fields or arrays) - if isSimpleName(var_) || int64(n.List.Len()) < components { + if isSimpleName(var_) || int64(len(n.List)) < components { appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, nil)) } @@ -1058,7 +1058,7 @@ func (s *InitSchedule) initplan(n ir.Node) { case ir.OARRAYLIT, ir.OSLICELIT: n := n.(*ir.CompLitExpr) var k int64 - for _, a := range n.List.Slice() { + for _, a := range n.List { if a.Op() == ir.OKEY { kv := a.(*ir.KeyExpr) k = indexconst(kv.Key) @@ -1073,7 +1073,7 @@ func (s *InitSchedule) initplan(n ir.Node) { case ir.OSTRUCTLIT: n := n.(*ir.CompLitExpr) - for _, a := range n.List.Slice() { + for _, a := range n.List { if a.Op() != ir.OSTRUCTKEY { base.Fatalf("initplan structlit") } @@ -1086,7 +1086,7 @@ func (s *InitSchedule) initplan(n ir.Node) { case ir.OMAPLIT: n := n.(*ir.CompLitExpr) - for _, a := range n.List.Slice() { + for _, a := range n.List { if a.Op() != ir.OKEY { base.Fatalf("initplan maplit") } @@ -1135,7 +1135,7 @@ func isZero(n ir.Node) bool { case ir.OARRAYLIT: n := n.(*ir.CompLitExpr) - for _, n1 := range n.List.Slice() { + for _, n1 := range n.List { if n1.Op() == ir.OKEY { n1 = n1.(*ir.KeyExpr).Value } @@ -1147,7 +1147,7 @@ func isZero(n ir.Node) bool { case ir.OSTRUCTLIT: n := n.(*ir.CompLitExpr) - for _, n1 := range n.List.Slice() { + for _, n1 := range n.List { n1 := n1.(*ir.StructKeyExpr) if !isZero(n1.Value) { return false diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 4660da0456c39..6993b4b1c74af 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -392,7 +392,7 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func { // that we don't track correctly. s.hasOpenDefers = false } - if s.hasOpenDefers && s.curfn.Exit.Len() > 0 { + if s.hasOpenDefers && len(s.curfn.Exit) > 0 { // Skip doing open defers if there is any extra exit code (likely // copying heap-allocated return values or race detection), since // we will not generate that code in the case of the extra @@ -1127,7 +1127,7 @@ func (s *state) move(t *types.Type, dst, src *ssa.Value) { // stmtList converts the statement list n to SSA and adds it to s. func (s *state) stmtList(l ir.Nodes) { - for _, n := range l.Slice() { + for _, n := range l { s.stmt(n) } } @@ -1208,9 +1208,9 @@ func (s *state) stmt(n ir.Node) { case ir.OAS2DOTTYPE: n := n.(*ir.AssignListStmt) - res, resok := s.dottype(n.Rhs.First().(*ir.TypeAssertExpr), true) + res, resok := s.dottype(n.Rhs[0].(*ir.TypeAssertExpr), true) deref := false - if !canSSAType(n.Rhs.First().Type()) { + if !canSSAType(n.Rhs[0].Type()) { if res.Op != ssa.OpLoad { s.Fatalf("dottype of non-load") } @@ -1224,22 +1224,22 @@ func (s *state) stmt(n ir.Node) { deref = true res = res.Args[0] } - s.assign(n.Lhs.First(), res, deref, 0) - s.assign(n.Lhs.Second(), resok, false, 0) + s.assign(n.Lhs[0], res, deref, 0) + s.assign(n.Lhs[1], resok, false, 0) return case ir.OAS2FUNC: // We come here only when it is an intrinsic call returning two values. n := n.(*ir.AssignListStmt) - call := n.Rhs.First().(*ir.CallExpr) + call := n.Rhs[0].(*ir.CallExpr) if !IsIntrinsicCall(call) { s.Fatalf("non-intrinsic AS2FUNC not expanded %v", call) } v := s.intrinsicCall(call) - v1 := s.newValue1(ssa.OpSelect0, n.Lhs.First().Type(), v) - v2 := s.newValue1(ssa.OpSelect1, n.Lhs.Second().Type(), v) - s.assign(n.Lhs.First(), v1, false, 0) - s.assign(n.Lhs.Second(), v2, false, 0) + v1 := s.newValue1(ssa.OpSelect0, n.Lhs[0].Type(), v) + v2 := s.newValue1(ssa.OpSelect1, n.Lhs[1].Type(), v) + s.assign(n.Lhs[0], v1, false, 0) + s.assign(n.Lhs[1], v2, false, 0) return case ir.ODCL: @@ -1309,7 +1309,7 @@ func (s *state) stmt(n ir.Node) { // Check whether we're writing the result of an append back to the same slice. // If so, we handle it specially to avoid write barriers on the fast // (non-growth) path. - if !samesafeexpr(n.X, rhs.Args.First()) || base.Flag.N != 0 { + if !samesafeexpr(n.X, rhs.Args[0]) || base.Flag.N != 0 { break } // If the slice can be SSA'd, it'll be on the stack, @@ -1412,27 +1412,27 @@ func (s *state) stmt(n ir.Node) { likely = 1 } var bThen *ssa.Block - if n.Body.Len() != 0 { + if len(n.Body) != 0 { bThen = s.f.NewBlock(ssa.BlockPlain) } else { bThen = bEnd } var bElse *ssa.Block - if n.Else.Len() != 0 { + if len(n.Else) != 0 { bElse = s.f.NewBlock(ssa.BlockPlain) } else { bElse = bEnd } s.condBranch(n.Cond, bThen, bElse, likely) - if n.Body.Len() != 0 { + if len(n.Body) != 0 { s.startBlock(bThen) s.stmtList(n.Body) if b := s.endBlock(); b != nil { b.AddEdgeTo(bEnd) } } - if n.Else.Len() != 0 { + if len(n.Else) != 0 { s.startBlock(bElse) s.stmtList(n.Else) if b := s.endBlock(); b != nil { @@ -2865,8 +2865,8 @@ func (s *state) expr(n ir.Node) *ssa.Value { case ir.OSLICEHEADER: n := n.(*ir.SliceHeaderExpr) p := s.expr(n.Ptr) - l := s.expr(n.LenCap.First()) - c := s.expr(n.LenCap.Second()) + l := s.expr(n.LenCap[0]) + c := s.expr(n.LenCap[1]) return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c) case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR: @@ -2987,7 +2987,7 @@ func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value { pt := types.NewPtr(et) // Evaluate slice - sn := n.Args.First() // the slice node is the first in the list + sn := n.Args[0] // the slice node is the first in the list var slice, addr *ssa.Value if inplace { @@ -3002,7 +3002,7 @@ func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value { assign := s.f.NewBlock(ssa.BlockPlain) // Decide if we need to grow - nargs := int64(n.Args.Len() - 1) + nargs := int64(len(n.Args) - 1) p := s.newValue1(ssa.OpSlicePtr, pt, slice) l := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice) c := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], slice) @@ -3071,7 +3071,7 @@ func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value { store bool } args := make([]argRec, 0, nargs) - for _, n := range n.Args.Slice()[1:] { + for _, n := range n.Args[1:] { if canSSAType(n.Type()) { args = append(args, argRec{v: s.expr(n), store: true}) } else { @@ -4360,7 +4360,7 @@ func (s *state) intrinsicCall(n *ir.CallExpr) *ssa.Value { func (s *state) intrinsicArgs(n *ir.CallExpr) []*ssa.Value { // Construct map of temps; see comments in s.call about the structure of n. temps := map[ir.Node]*ssa.Value{} - for _, a := range n.Args.Slice() { + for _, a := range n.Args { if a.Op() != ir.OAS { s.Fatalf("non-assignment as a temp function argument %v", a.Op()) } @@ -4373,8 +4373,8 @@ func (s *state) intrinsicArgs(n *ir.CallExpr) []*ssa.Value { // Walk ensures these temporaries are dead outside of n. temps[l] = s.expr(r) } - args := make([]*ssa.Value, n.Rargs.Len()) - for i, n := range n.Rargs.Slice() { + args := make([]*ssa.Value, len(n.Rargs)) + for i, n := range n.Rargs { // Store a value to an argument slot. if x, ok := temps[n]; ok { // This is a previously computed temporary. @@ -4442,7 +4442,7 @@ func (s *state) openDeferRecord(n *ir.CallExpr) { opendefer.closureNode = opendefer.closure.Aux.(*ir.Name) opendefer.rcvrNode = opendefer.rcvr.Aux.(*ir.Name) } - for _, argn := range n.Rargs.Slice() { + for _, argn := range n.Rargs { var v *ssa.Value if canSSAType(argn.Type()) { v = s.openDeferSave(nil, argn.Type(), s.expr(argn)) @@ -4769,7 +4769,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val // Then, store all the arguments of the defer call. ft := fn.Type() off := t.FieldOff(12) - args := n.Rargs.Slice() + args := n.Rargs // Set receiver (for interface calls). Always a pointer. if rcvr != nil { @@ -4846,7 +4846,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val // Write args. t := n.X.Type() - args := n.Rargs.Slice() + args := n.Rargs if n.Op() == ir.OCALLMETH { f := t.Recv() ACArg, arg := s.putArg(args[0], f.Type, argStart+f.Offset, testLateExpansion) @@ -6158,7 +6158,7 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val targetITab = target } else { // Looking for pointer to itab for target type and source interface. - targetITab = s.expr(n.Itab.First()) + targetITab = s.expr(n.Itab[0]) } var tmp ir.Node // temporary for use with large types diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 450b20e000173..59763824fbe0c 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -552,7 +552,7 @@ func assignconvfn(n ir.Node, t *types.Type, context func() string) ir.Node { func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) { var init ir.Nodes c := cheapexpr(n, &init) - if c != n || init.Len() != 0 { + if c != n || len(init) != 0 { base.Fatalf("backingArrayPtrLen not cheap: %v", n) } ptr = ir.NewUnaryExpr(base.Pos, ir.OSPTR, n) @@ -593,7 +593,7 @@ func updateHasCall(n ir.Node) { } func calcHasCall(n ir.Node) bool { - if n.Init().Len() != 0 { + if len(n.Init()) != 0 { // TODO(mdempsky): This seems overly conservative. return true } @@ -772,9 +772,9 @@ func safeexpr(n ir.Node, init *ir.Nodes) ir.Node { return nil } - if n.Init().Len() != 0 { - walkstmtlist(n.Init().Slice()) - init.AppendNodes(n.PtrInit()) + if len(n.Init()) != 0 { + walkstmtlist(n.Init()) + init.Append(n.PtrInit().Take()...) } switch n.Op() { @@ -1230,7 +1230,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { n := ir.NewIfStmt(base.Pos, nil, nil, nil) n.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, nthis, nodnil()) call := ir.NewCallExpr(base.Pos, ir.OCALL, syslook("panicwrap"), nil) - n.Body.Set1(call) + n.Body = []ir.Node{call} fn.Body.Append(n) } @@ -1259,7 +1259,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { call.IsDDD = tfn.Type().IsVariadic() if method.Type.NumResults() > 0 { ret := ir.NewReturnStmt(base.Pos, nil) - ret.Results.Set1(call) + ret.Results = []ir.Node{call} fn.Body.Append(ret) } else { fn.Body.Append(call) @@ -1277,7 +1277,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { typecheckFunc(fn) Curfn = fn - typecheckslice(fn.Body.Slice(), ctxStmt) + typecheckslice(fn.Body, ctxStmt) // Inline calls within (*T).M wrappers. This is safe because we only // generate those wrappers within the same compilation unit as (T).M. diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go index da781e6f45cdc..ab241a38138b8 100644 --- a/src/cmd/compile/internal/gc/swt.go +++ b/src/cmd/compile/internal/gc/swt.go @@ -16,7 +16,7 @@ import ( // typecheckswitch typechecks a switch statement. func typecheckswitch(n *ir.SwitchStmt) { - typecheckslice(n.Init().Slice(), ctxStmt) + typecheckslice(n.Init(), ctxStmt) if n.Tag != nil && n.Tag.Op() == ir.OTYPESW { typecheckTypeSwitch(n) } else { @@ -36,15 +36,15 @@ func typecheckTypeSwitch(n *ir.SwitchStmt) { // We don't actually declare the type switch's guarded // declaration itself. So if there are no cases, we won't // notice that it went unused. - if v := guard.Tag; v != nil && !ir.IsBlank(v) && n.Cases.Len() == 0 { + if v := guard.Tag; v != nil && !ir.IsBlank(v) && len(n.Cases) == 0 { base.ErrorfAt(v.Pos(), "%v declared but not used", v.Sym()) } var defCase, nilCase ir.Node var ts typeSet - for _, ncase := range n.Cases.Slice() { + for _, ncase := range n.Cases { ncase := ncase.(*ir.CaseStmt) - ls := ncase.List.Slice() + ls := ncase.List if len(ls) == 0 { // default: if defCase != nil { base.ErrorfAt(ncase.Pos(), "multiple defaults in switch (first at %v)", ir.Line(defCase)) @@ -91,7 +91,7 @@ func typecheckTypeSwitch(n *ir.SwitchStmt) { ts.add(ncase.Pos(), n1.Type()) } - if ncase.Vars.Len() != 0 { + if len(ncase.Vars) != 0 { // Assign the clause variable's type. vt := t if len(ls) == 1 { @@ -104,7 +104,7 @@ func typecheckTypeSwitch(n *ir.SwitchStmt) { } } - nvar := ncase.Vars.First() + nvar := ncase.Vars[0] nvar.SetType(vt) if vt != nil { nvar = typecheck(nvar, ctxExpr|ctxAssign) @@ -113,10 +113,10 @@ func typecheckTypeSwitch(n *ir.SwitchStmt) { nvar.SetTypecheck(1) nvar.SetWalkdef(1) } - ncase.Vars.SetFirst(nvar) + ncase.Vars[0] = nvar } - typecheckslice(ncase.Body.Slice(), ctxStmt) + typecheckslice(ncase.Body, ctxStmt) } } @@ -178,9 +178,9 @@ func typecheckExprSwitch(n *ir.SwitchStmt) { var defCase ir.Node var cs constSet - for _, ncase := range n.Cases.Slice() { + for _, ncase := range n.Cases { ncase := ncase.(*ir.CaseStmt) - ls := ncase.List.Slice() + ls := ncase.List if len(ls) == 0 { // default: if defCase != nil { base.ErrorfAt(ncase.Pos(), "multiple defaults in switch (first at %v)", ir.Line(defCase)) @@ -225,14 +225,14 @@ func typecheckExprSwitch(n *ir.SwitchStmt) { } } - typecheckslice(ncase.Body.Slice(), ctxStmt) + typecheckslice(ncase.Body, ctxStmt) } } // walkswitch walks a switch statement. func walkswitch(sw *ir.SwitchStmt) { // Guard against double walk, see #25776. - if sw.Cases.Len() == 0 && sw.Compiled.Len() > 0 { + if len(sw.Cases) == 0 && len(sw.Compiled) > 0 { return // Was fatal, but eliminating every possible source of double-walking is hard } @@ -283,27 +283,27 @@ func walkExprSwitch(sw *ir.SwitchStmt) { var defaultGoto ir.Node var body ir.Nodes - for _, ncase := range sw.Cases.Slice() { + for _, ncase := range sw.Cases { ncase := ncase.(*ir.CaseStmt) label := autolabel(".s") jmp := ir.NewBranchStmt(ncase.Pos(), ir.OGOTO, label) // Process case dispatch. - if ncase.List.Len() == 0 { + if len(ncase.List) == 0 { if defaultGoto != nil { base.Fatalf("duplicate default case not detected during typechecking") } defaultGoto = jmp } - for _, n1 := range ncase.List.Slice() { + for _, n1 := range ncase.List { s.Add(ncase.Pos(), n1, jmp) } // Process body. body.Append(ir.NewLabelStmt(ncase.Pos(), label)) - body.Append(ncase.Body.Slice()...) - if fall, pos := endsInFallthrough(ncase.Body.Slice()); !fall { + body.Append(ncase.Body...) + if fall, pos := endsInFallthrough(ncase.Body); !fall { br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil) br.SetPos(pos) body.Append(br) @@ -319,8 +319,8 @@ func walkExprSwitch(sw *ir.SwitchStmt) { s.Emit(&sw.Compiled) sw.Compiled.Append(defaultGoto) - sw.Compiled.AppendNodes(&body) - walkstmtlist(sw.Compiled.Slice()) + sw.Compiled.Append(body.Take()...) + walkstmtlist(sw.Compiled) } // An exprSwitch walks an expression switch. @@ -351,7 +351,7 @@ func (s *exprSwitch) Add(pos src.XPos, expr, jmp ir.Node) { func (s *exprSwitch) Emit(out *ir.Nodes) { s.flush() - out.AppendNodes(&s.done) + out.Append(s.done.Take()...) } func (s *exprSwitch) flush() { @@ -438,7 +438,7 @@ func (s *exprSwitch) search(cc []exprClause, out *ir.Nodes) { func(i int, nif *ir.IfStmt) { c := &cc[i] nif.Cond = c.test(s.exprname) - nif.Body.Set1(c.jmp) + nif.Body = []ir.Node{c.jmp} }, ) } @@ -471,9 +471,9 @@ func allCaseExprsAreSideEffectFree(sw *ir.SwitchStmt) bool { // Restricting to constants is simple and probably powerful // enough. - for _, ncase := range sw.Cases.Slice() { + for _, ncase := range sw.Cases { ncase := ncase.(*ir.CaseStmt) - for _, v := range ncase.List.Slice() { + for _, v := range ncase.List { if v.Op() != ir.OLITERAL { return false } @@ -545,33 +545,33 @@ func walkTypeSwitch(sw *ir.SwitchStmt) { br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil) var defaultGoto, nilGoto ir.Node var body ir.Nodes - for _, ncase := range sw.Cases.Slice() { + for _, ncase := range sw.Cases { ncase := ncase.(*ir.CaseStmt) var caseVar ir.Node - if ncase.Vars.Len() != 0 { - caseVar = ncase.Vars.First() + if len(ncase.Vars) != 0 { + caseVar = ncase.Vars[0] } // For single-type cases with an interface type, // we initialize the case variable as part of the type assertion. // In other cases, we initialize it in the body. var singleType *types.Type - if ncase.List.Len() == 1 && ncase.List.First().Op() == ir.OTYPE { - singleType = ncase.List.First().Type() + if len(ncase.List) == 1 && ncase.List[0].Op() == ir.OTYPE { + singleType = ncase.List[0].Type() } caseVarInitialized := false label := autolabel(".s") jmp := ir.NewBranchStmt(ncase.Pos(), ir.OGOTO, label) - if ncase.List.Len() == 0 { // default: + if len(ncase.List) == 0 { // default: if defaultGoto != nil { base.Fatalf("duplicate default case not detected during typechecking") } defaultGoto = jmp } - for _, n1 := range ncase.List.Slice() { + for _, n1 := range ncase.List { if ir.IsNil(n1) { // case nil: if nilGoto != nil { base.Fatalf("duplicate nil case not detected during typechecking") @@ -605,7 +605,7 @@ func walkTypeSwitch(sw *ir.SwitchStmt) { typecheckslice(l, ctxStmt) body.Append(l...) } - body.Append(ncase.Body.Slice()...) + body.Append(ncase.Body...) body.Append(br) } sw.Cases.Set(nil) @@ -616,13 +616,13 @@ func walkTypeSwitch(sw *ir.SwitchStmt) { if nilGoto == nil { nilGoto = defaultGoto } - ifNil.Body.Set1(nilGoto) + ifNil.Body = []ir.Node{nilGoto} s.Emit(&sw.Compiled) sw.Compiled.Append(defaultGoto) - sw.Compiled.AppendNodes(&body) + sw.Compiled.Append(body.Take()...) - walkstmtlist(sw.Compiled.Slice()) + walkstmtlist(sw.Compiled) } // A typeSwitch walks a type switch. @@ -656,16 +656,16 @@ func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp ir.Node) { // cv, ok = iface.(type) as := ir.NewAssignListStmt(pos, ir.OAS2, nil, nil) - as.Lhs.Set2(caseVar, s.okname) // cv, ok = + as.Lhs = []ir.Node{caseVar, s.okname} // cv, ok = dot := ir.NewTypeAssertExpr(pos, s.facename, nil) dot.SetType(typ) // iface.(type) - as.Rhs.Set1(dot) + as.Rhs = []ir.Node{dot} appendWalkStmt(&body, as) // if ok { goto label } nif := ir.NewIfStmt(pos, nil, nil, nil) nif.Cond = s.okname - nif.Body.Set1(jmp) + nif.Body = []ir.Node{jmp} body.Append(nif) if !typ.IsInterface() { @@ -677,12 +677,12 @@ func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp ir.Node) { } s.flush() - s.done.AppendNodes(&body) + s.done.Append(body.Take()...) } func (s *typeSwitch) Emit(out *ir.Nodes) { s.flush() - out.AppendNodes(&s.done) + out.Append(s.done.Take()...) } func (s *typeSwitch) flush() { @@ -699,7 +699,7 @@ func (s *typeSwitch) flush() { for _, c := range cc[1:] { last := &merged[len(merged)-1] if last.hash == c.hash { - last.body.AppendNodes(&c.body) + last.body.Append(c.body.Take()...) } else { merged = append(merged, c) } @@ -715,7 +715,7 @@ func (s *typeSwitch) flush() { // there's only one type. c := cc[i] nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, s.hashname, nodintconst(int64(c.hash))) - nif.Body.AppendNodes(&c.body) + nif.Body.Append(c.body.Take()...) }, ) } diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 73fb6bb1c1f92..f2e5728d80ab2 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -1027,7 +1027,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } if r.Op() == ir.OADDSTR { r := r.(*ir.AddStringExpr) - add.List.AppendNodes(&r.List) + add.List.Append(r.List.Take()...) } else { add.List.Append(r) } @@ -1355,13 +1355,13 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { base.Fatalf("need unsafe.Pointer for OSLICEHEADER") } - if x := n.LenCap.Len(); x != 2 { + if x := len(n.LenCap); x != 2 { base.Fatalf("expected 2 params (len, cap) for OSLICEHEADER, got %d", x) } n.Ptr = typecheck(n.Ptr, ctxExpr) - l := typecheck(n.LenCap.First(), ctxExpr) - c := typecheck(n.LenCap.Second(), ctxExpr) + l := typecheck(n.LenCap[0], ctxExpr) + c := typecheck(n.LenCap[1], ctxExpr) l = defaultlit(l, types.Types[types.TINT]) c = defaultlit(c, types.Types[types.TINT]) @@ -1377,8 +1377,8 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { base.Fatalf("len larger than cap for OSLICEHEADER") } - n.LenCap.SetFirst(l) - n.LenCap.SetSecond(c) + n.LenCap[0] = l + n.LenCap[1] = c return n case ir.OMAKESLICECOPY: @@ -1506,7 +1506,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { if top == ctxStmt { n.Use = ir.CallUseStmt } - typecheckslice(n.Init().Slice(), ctxStmt) // imported rewritten f(g()) calls (#30907) + typecheckslice(n.Init(), ctxStmt) // imported rewritten f(g()) calls (#30907) n.X = typecheck(n.X, ctxExpr|ctxType|ctxCallee) if n.X.Diag() { n.SetDiag(true) @@ -1541,7 +1541,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } u := ir.NewUnaryExpr(n.Pos(), l.BuiltinOp, arg) - return typecheck(initExpr(n.Init().Slice(), u), top) // typecheckargs can add to old.Init + return typecheck(initExpr(n.Init(), u), top) // typecheckargs can add to old.Init case ir.OCOMPLEX, ir.OCOPY: typecheckargs(n) @@ -1551,7 +1551,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } b := ir.NewBinaryExpr(n.Pos(), l.BuiltinOp, arg1, arg2) - return typecheck(initExpr(n.Init().Slice(), b), top) // typecheckargs can add to old.Init + return typecheck(initExpr(n.Init(), b), top) // typecheckargs can add to old.Init } panic("unreachable") } @@ -1777,46 +1777,46 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n := n.(*ir.CallExpr) typecheckargs(n) args := n.Args - if args.Len() == 0 { + if len(args) == 0 { base.Errorf("missing arguments to delete") n.SetType(nil) return n } - if args.Len() == 1 { + if len(args) == 1 { base.Errorf("missing second (key) argument to delete") n.SetType(nil) return n } - if args.Len() != 2 { + if len(args) != 2 { base.Errorf("too many arguments to delete") n.SetType(nil) return n } - l := args.First() - r := args.Second() + l := args[0] + r := args[1] if l.Type() != nil && !l.Type().IsMap() { base.Errorf("first argument to delete must be map; have %L", l.Type()) n.SetType(nil) return n } - args.SetSecond(assignconv(r, l.Type().Key(), "delete")) + args[1] = assignconv(r, l.Type().Key(), "delete") return n case ir.OAPPEND: n := n.(*ir.CallExpr) typecheckargs(n) args := n.Args - if args.Len() == 0 { + if len(args) == 0 { base.Errorf("missing arguments to append") n.SetType(nil) return n } - t := args.First().Type() + t := args[0].Type() if t == nil { n.SetType(nil) return n @@ -1824,7 +1824,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetType(t) if !t.IsSlice() { - if ir.IsNil(args.First()) { + if ir.IsNil(args[0]) { base.Errorf("first argument to append must be typed slice; have untyped nil") n.SetType(nil) return n @@ -1836,28 +1836,28 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } if n.IsDDD { - if args.Len() == 1 { + if len(args) == 1 { base.Errorf("cannot use ... on first argument to append") n.SetType(nil) return n } - if args.Len() != 2 { + if len(args) != 2 { base.Errorf("too many arguments to append") n.SetType(nil) return n } - if t.Elem().IsKind(types.TUINT8) && args.Second().Type().IsString() { - args.SetSecond(defaultlit(args.Second(), types.Types[types.TSTRING])) + if t.Elem().IsKind(types.TUINT8) && args[1].Type().IsString() { + args[1] = defaultlit(args[1], types.Types[types.TSTRING]) return n } - args.SetSecond(assignconv(args.Second(), t.Underlying(), "append")) + args[1] = assignconv(args[1], t.Underlying(), "append") return n } - as := args.Slice()[1:] + as := args[1:] for i, n := range as { if n.Type() == nil { continue @@ -1955,7 +1955,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OMAKE: n := n.(*ir.CallExpr) - args := n.Args.Slice() + args := n.Args if len(args) == 0 { base.Errorf("missing argument to make") n.SetType(nil) @@ -2082,7 +2082,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OPRINT, ir.OPRINTN: n := n.(*ir.CallExpr) typecheckargs(n) - ls := n.Args.Slice() + ls := n.Args for i1, n1 := range ls { // Special case for print: int constant is int64, not int. if ir.IsConst(n1, constant.Int) { @@ -2105,7 +2105,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.ORECOVER: n := n.(*ir.CallExpr) - if n.Args.Len() != 0 { + if len(n.Args) != 0 { base.Errorf("too many arguments to recover") n.SetType(nil) return n @@ -2201,7 +2201,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OBLOCK: n := n.(*ir.BlockStmt) - typecheckslice(n.List.Slice(), ctxStmt) + typecheckslice(n.List, ctxStmt) return n case ir.OLABEL: @@ -2224,7 +2224,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OFOR, ir.OFORUNTIL: n := n.(*ir.ForStmt) - typecheckslice(n.Init().Slice(), ctxStmt) + typecheckslice(n.Init(), ctxStmt) decldepth++ n.Cond = typecheck(n.Cond, ctxExpr) n.Cond = defaultlit(n.Cond, nil) @@ -2236,15 +2236,15 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } n.Post = typecheck(n.Post, ctxStmt) if n.Op() == ir.OFORUNTIL { - typecheckslice(n.Late.Slice(), ctxStmt) + typecheckslice(n.Late, ctxStmt) } - typecheckslice(n.Body.Slice(), ctxStmt) + typecheckslice(n.Body, ctxStmt) decldepth-- return n case ir.OIF: n := n.(*ir.IfStmt) - typecheckslice(n.Init().Slice(), ctxStmt) + typecheckslice(n.Init(), ctxStmt) n.Cond = typecheck(n.Cond, ctxExpr) n.Cond = defaultlit(n.Cond, nil) if n.Cond != nil { @@ -2253,8 +2253,8 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { base.Errorf("non-bool %L used as if condition", n.Cond) } } - typecheckslice(n.Body.Slice(), ctxStmt) - typecheckslice(n.Else.Slice(), ctxStmt) + typecheckslice(n.Body, ctxStmt) + typecheckslice(n.Else, ctxStmt) return n case ir.ORETURN: @@ -2266,7 +2266,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } - if hasNamedResults(Curfn) && n.Results.Len() == 0 { + if hasNamedResults(Curfn) && len(n.Results) == 0 { return n } typecheckaste(ir.ORETURN, nil, false, Curfn.Type().Results(), n.Results, func() string { return "return argument" }) @@ -2321,13 +2321,13 @@ func typecheckargs(n ir.Node) { default: base.Fatalf("typecheckargs %+v", n.Op()) case *ir.CallExpr: - list = n.Args.Slice() + list = n.Args if n.IsDDD { typecheckslice(list, ctxExpr) return } case *ir.ReturnStmt: - list = n.Results.Slice() + list = n.Results } if len(list) != 1 { typecheckslice(list, ctxExpr) @@ -2493,31 +2493,31 @@ func implicitstar(n ir.Node) ir.Node { } func needOneArg(n *ir.CallExpr, f string, args ...interface{}) (ir.Node, bool) { - if n.Args.Len() == 0 { + if len(n.Args) == 0 { p := fmt.Sprintf(f, args...) base.Errorf("missing argument to %s: %v", p, n) return nil, false } - if n.Args.Len() > 1 { + if len(n.Args) > 1 { p := fmt.Sprintf(f, args...) base.Errorf("too many arguments to %s: %v", p, n) - return n.Args.First(), false + return n.Args[0], false } - return n.Args.First(), true + return n.Args[0], true } func needTwoArgs(n *ir.CallExpr) (ir.Node, ir.Node, bool) { - if n.Args.Len() != 2 { - if n.Args.Len() < 2 { + if len(n.Args) != 2 { + if len(n.Args) < 2 { base.Errorf("not enough arguments in call to %v", n) } else { base.Errorf("too many arguments in call to %v", n) } return nil, nil, false } - return n.Args.First(), n.Args.Second(), true + return n.Args[0], n.Args[1], true } func lookdot1(errnode ir.Node, s *types.Sym, t *types.Type, fs *types.Fields, dostrcmp int) *types.Field { @@ -2741,7 +2741,7 @@ func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field { } func nokeys(l ir.Nodes) bool { - for _, n := range l.Slice() { + for _, n := range l { if n.Op() == ir.OKEY || n.Op() == ir.OSTRUCTKEY { return false } @@ -2772,12 +2772,12 @@ func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl i } var n ir.Node - if nl.Len() == 1 { - n = nl.First() + if len(nl) == 1 { + n = nl[0] } n1 := tstruct.NumFields() - n2 := nl.Len() + n2 := len(nl) if !hasddd(tstruct) { if n2 > n1 { goto toomany @@ -2805,43 +2805,43 @@ func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl i t = tl.Type if tl.IsDDD() { if isddd { - if i >= nl.Len() { + if i >= len(nl) { goto notenough } - if nl.Len()-i > 1 { + if len(nl)-i > 1 { goto toomany } - n = nl.Index(i) + n = nl[i] setlineno(n) if n.Type() != nil { - nl.SetIndex(i, assignconvfn(n, t, desc)) + nl[i] = assignconvfn(n, t, desc) } return } // TODO(mdempsky): Make into ... call with implicit slice. - for ; i < nl.Len(); i++ { - n = nl.Index(i) + for ; i < len(nl); i++ { + n = nl[i] setlineno(n) if n.Type() != nil { - nl.SetIndex(i, assignconvfn(n, t.Elem(), desc)) + nl[i] = assignconvfn(n, t.Elem(), desc) } } return } - if i >= nl.Len() { + if i >= len(nl) { goto notenough } - n = nl.Index(i) + n = nl[i] setlineno(n) if n.Type() != nil { - nl.SetIndex(i, assignconvfn(n, t, desc)) + nl[i] = assignconvfn(n, t, desc) } i++ } - if i < nl.Len() { + if i < len(nl) { goto toomany } if isddd { @@ -2891,7 +2891,7 @@ func errorDetails(nl ir.Nodes, tstruct *types.Type, isddd bool) string { return "" } // If any node has an unknown type, suppress it as well - for _, n := range nl.Slice() { + for _, n := range nl { if n.Type() == nil { return "" } @@ -2929,13 +2929,13 @@ func sigrepr(t *types.Type, isddd bool) string { // sigerr returns the signature of the types at the call or return. func fmtSignature(nl ir.Nodes, isddd bool) string { - if nl.Len() < 1 { + if len(nl) < 1 { return "()" } var typeStrings []string - for i, n := range nl.Slice() { - isdddArg := isddd && i == nl.Len()-1 + for i, n := range nl { + isdddArg := isddd && i == len(nl)-1 typeStrings = append(typeStrings, sigrepr(n.Type(), isdddArg)) } @@ -3019,7 +3019,7 @@ func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) { n.SetType(nil) return n } - length := typecheckarraylit(elemType, -1, n.List.Slice(), "array literal") + length := typecheckarraylit(elemType, -1, n.List, "array literal") n.SetOp(ir.OARRAYLIT) n.SetType(types.NewArray(elemType, length)) n.Ntype = nil @@ -3040,22 +3040,22 @@ func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) { n.SetType(nil) case types.TARRAY: - typecheckarraylit(t.Elem(), t.NumElem(), n.List.Slice(), "array literal") + typecheckarraylit(t.Elem(), t.NumElem(), n.List, "array literal") n.SetOp(ir.OARRAYLIT) n.Ntype = nil case types.TSLICE: - length := typecheckarraylit(t.Elem(), -1, n.List.Slice(), "slice literal") + length := typecheckarraylit(t.Elem(), -1, n.List, "slice literal") n.SetOp(ir.OSLICELIT) n.Ntype = nil n.Len = length case types.TMAP: var cs constSet - for i3, l := range n.List.Slice() { + for i3, l := range n.List { setlineno(l) if l.Op() != ir.OKEY { - n.List.SetIndex(i3, typecheck(l, ctxExpr)) + n.List[i3] = typecheck(l, ctxExpr) base.Errorf("missing key in map literal") continue } @@ -3081,9 +3081,9 @@ func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) { dowidth(t) errored := false - if n.List.Len() != 0 && nokeys(n.List) { + if len(n.List) != 0 && nokeys(n.List) { // simple list of variables - ls := n.List.Slice() + ls := n.List for i, n1 := range ls { setlineno(n1) n1 = typecheck(n1, ctxExpr) @@ -3114,7 +3114,7 @@ func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) { hash := make(map[string]bool) // keyed list - ls := n.List.Slice() + ls := n.List for i, l := range ls { setlineno(l) @@ -3355,7 +3355,7 @@ func checkassign(stmt ir.Node, n ir.Node) { } func checkassignlist(stmt ir.Node, l ir.Nodes) { - for _, n := range l.Slice() { + for _, n := range l { checkassign(stmt, n) } } @@ -3497,7 +3497,7 @@ func typecheckas2(n *ir.AssignListStmt) { defer tracePrint("typecheckas2", n)(nil) } - ls := n.Lhs.Slice() + ls := n.Lhs for i1, n1 := range ls { // delicate little dance. n1 = resolve(n1) @@ -3508,12 +3508,12 @@ func typecheckas2(n *ir.AssignListStmt) { } } - cl := n.Lhs.Len() - cr := n.Rhs.Len() + cl := len(n.Lhs) + cr := len(n.Rhs) if cl > 1 && cr == 1 { - n.Rhs.SetFirst(typecheck(n.Rhs.First(), ctxExpr|ctxMultiOK)) + n.Rhs[0] = typecheck(n.Rhs[0], ctxExpr|ctxMultiOK) } else { - typecheckslice(n.Rhs.Slice(), ctxExpr) + typecheckslice(n.Rhs, ctxExpr) } checkassignlist(n, n.Lhs) @@ -3521,8 +3521,8 @@ func typecheckas2(n *ir.AssignListStmt) { var r ir.Node if cl == cr { // easy - ls := n.Lhs.Slice() - rs := n.Rhs.Slice() + ls := n.Lhs + rs := n.Rhs for il, nl := range ls { nr := rs[il] if nl.Type() != nil && nr.Type() != nil { @@ -3537,8 +3537,8 @@ func typecheckas2(n *ir.AssignListStmt) { goto out } - l = n.Lhs.First() - r = n.Rhs.First() + l = n.Lhs[0] + r = n.Rhs[0] // x,y,z = f() if cr == 1 { @@ -3556,7 +3556,7 @@ func typecheckas2(n *ir.AssignListStmt) { } r.(*ir.CallExpr).Use = ir.CallUseList n.SetOp(ir.OAS2FUNC) - for i, l := range n.Lhs.Slice() { + for i, l := range n.Lhs { f := r.Type().Field(i) if f.Type != nil && l.Type() != nil { checkassignto(f.Type, l) @@ -3592,7 +3592,7 @@ func typecheckas2(n *ir.AssignListStmt) { if ir.DeclaredBy(l, n) { l.SetType(r.Type()) } - l := n.Lhs.Second() + l := n.Lhs[1] if l.Type() != nil && !l.Type().IsBoolean() { checkassignto(types.Types[types.TBOOL], l) } @@ -3615,7 +3615,7 @@ mismatch: // second half of dance out: n.SetTypecheck(1) - ls = n.Lhs.Slice() + ls = n.Lhs for i1, n1 := range ls { if n1.Typecheck() == 0 { ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign) @@ -4019,7 +4019,7 @@ func setHasBreak(n ir.Node) { // isTermNodes reports whether the Nodes list ends with a terminating statement. func isTermNodes(l ir.Nodes) bool { - s := l.Slice() + s := l c := len(s) if c == 0 { return false @@ -4063,12 +4063,12 @@ func isTermNode(n ir.Node) bool { return false } def := false - for _, cas := range n.Cases.Slice() { + for _, cas := range n.Cases { cas := cas.(*ir.CaseStmt) if !isTermNodes(cas.Body) { return false } - if cas.List.Len() == 0 { // default + if len(cas.List) == 0 { // default def = true } } @@ -4079,7 +4079,7 @@ func isTermNode(n ir.Node) bool { if n.HasBreak { return false } - for _, cas := range n.Cases.Slice() { + for _, cas := range n.Cases { cas := cas.(*ir.CaseStmt) if !isTermNodes(cas.Body) { return false @@ -4093,7 +4093,7 @@ func isTermNode(n ir.Node) bool { // checkreturn makes sure that fn terminates appropriately. func checkreturn(fn *ir.Func) { - if fn.Type().NumResults() != 0 && fn.Body.Len() != 0 { + if fn.Type().NumResults() != 0 && len(fn.Body) != 0 { markBreak(fn) if !isTermNodes(fn.Body) { base.ErrorfAt(fn.Endlineno, "missing return at end of function") @@ -4104,18 +4104,18 @@ func checkreturn(fn *ir.Func) { func deadcode(fn *ir.Func) { deadcodeslice(&fn.Body) - if fn.Body.Len() == 0 { + if len(fn.Body) == 0 { return } - for _, n := range fn.Body.Slice() { - if n.Init().Len() > 0 { + for _, n := range fn.Body { + if len(n.Init()) > 0 { return } switch n.Op() { case ir.OIF: n := n.(*ir.IfStmt) - if !ir.IsConst(n.Cond, constant.Bool) || n.Body.Len() > 0 || n.Else.Len() > 0 { + if !ir.IsConst(n.Cond, constant.Bool) || len(n.Body) > 0 || len(n.Else) > 0 { return } case ir.OFOR: @@ -4133,12 +4133,12 @@ func deadcode(fn *ir.Func) { func deadcodeslice(nn *ir.Nodes) { var lastLabel = -1 - for i, n := range nn.Slice() { + for i, n := range *nn { if n != nil && n.Op() == ir.OLABEL { lastLabel = i } } - for i, n := range nn.Slice() { + for i, n := range *nn { // Cut is set to true when all nodes after i'th position // should be removed. // In other words, it marks whole slice "tail" as dead. @@ -4163,7 +4163,7 @@ func deadcodeslice(nn *ir.Nodes) { // isterminating is not used to avoid goto-related complications. // We must be careful not to deadcode-remove labels, as they // might be the target of a goto. See issue 28616. - if body := body.Slice(); len(body) != 0 { + if body := body; len(body) != 0 { switch body[(len(body) - 1)].Op() { case ir.ORETURN, ir.ORETJMP, ir.OPANIC: if i > lastLabel { @@ -4201,7 +4201,7 @@ func deadcodeslice(nn *ir.Nodes) { } if cut { - nn.Set(nn.Slice()[:i+1]) + nn.Set((*nn)[:i+1]) break } } diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 3fd6c97d68308..610c6b6539c82 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -72,7 +72,7 @@ func walk(fn *ir.Func) { if base.Errors() > errorsBefore { return } - walkstmtlist(Curfn.Body.Slice()) + walkstmtlist(Curfn.Body) if base.Flag.W != 0 { s := fmt.Sprintf("after walk %v", Curfn.Sym()) ir.DumpList(s, Curfn.Body) @@ -80,7 +80,7 @@ func walk(fn *ir.Func) { zeroResults() heapmoves() - if base.Flag.W != 0 && Curfn.Enter.Len() > 0 { + if base.Flag.W != 0 && len(Curfn.Enter) > 0 { s := fmt.Sprintf("enter %v", Curfn.Sym()) ir.DumpList(s, Curfn.Enter) } @@ -122,7 +122,7 @@ func walkstmt(n ir.Node) ir.Node { setlineno(n) - walkstmtlist(n.Init().Slice()) + walkstmtlist(n.Init()) switch n.Op() { default: @@ -164,17 +164,17 @@ func walkstmt(n ir.Node) ir.Node { if n.Op() == ir.ONAME { // copy rewrote to a statement list and a temp for the length. // Throw away the temp to avoid plain values as statements. - n = ir.NewBlockStmt(n.Pos(), init.Slice()) + n = ir.NewBlockStmt(n.Pos(), init) init.Set(nil) } - if init.Len() > 0 { + if len(init) > 0 { switch n.Op() { case ir.OAS, ir.OAS2, ir.OBLOCK: - n.PtrInit().Prepend(init.Slice()...) + n.PtrInit().Prepend(init...) default: init.Append(n) - n = ir.NewBlockStmt(n.Pos(), init.Slice()) + n = ir.NewBlockStmt(n.Pos(), init) } } return n @@ -191,7 +191,7 @@ func walkstmt(n ir.Node) ir.Node { n.X = walkexpr(n.X, &init) call := walkexpr(mkcall1(chanfn("chanrecv1", 2, n.X.Type()), nil, &init, n.X, nodnil()), &init) - return initExpr(init.Slice(), call) + return initExpr(init, call) case ir.OBREAK, ir.OCONTINUE, @@ -221,7 +221,7 @@ func walkstmt(n ir.Node) ir.Node { case ir.OBLOCK: n := n.(*ir.BlockStmt) - walkstmtlist(n.List.Slice()) + walkstmtlist(n.List) return n case ir.OCASE: @@ -254,7 +254,7 @@ func walkstmt(n ir.Node) ir.Node { case ir.ODELETE: call := call.(*ir.CallExpr) - if mapfast(call.Args.First().Type()) == mapslow { + if mapfast(call.Args[0].Type()) == mapslow { n.Call = wrapCall(call, &init) } else { n.Call = walkexpr(call, &init) @@ -266,7 +266,7 @@ func walkstmt(n ir.Node) ir.Node { case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER: call := call.(*ir.CallExpr) - if call.Body.Len() > 0 { + if len(call.Body) > 0 { n.Call = wrapCall(call, &init) } else { n.Call = walkexpr(call, &init) @@ -275,43 +275,43 @@ func walkstmt(n ir.Node) ir.Node { default: n.Call = walkexpr(call, &init) } - if init.Len() > 0 { + if len(init) > 0 { init.Append(n) - return ir.NewBlockStmt(n.Pos(), init.Slice()) + return ir.NewBlockStmt(n.Pos(), init) } return n case ir.OFOR, ir.OFORUNTIL: n := n.(*ir.ForStmt) if n.Cond != nil { - walkstmtlist(n.Cond.Init().Slice()) + walkstmtlist(n.Cond.Init()) init := n.Cond.Init() n.Cond.PtrInit().Set(nil) n.Cond = walkexpr(n.Cond, &init) - n.Cond = initExpr(init.Slice(), n.Cond) + n.Cond = initExpr(init, n.Cond) } n.Post = walkstmt(n.Post) if n.Op() == ir.OFORUNTIL { - walkstmtlist(n.Late.Slice()) + walkstmtlist(n.Late) } - walkstmtlist(n.Body.Slice()) + walkstmtlist(n.Body) return n case ir.OIF: n := n.(*ir.IfStmt) n.Cond = walkexpr(n.Cond, n.PtrInit()) - walkstmtlist(n.Body.Slice()) - walkstmtlist(n.Else.Slice()) + walkstmtlist(n.Body) + walkstmtlist(n.Else) return n case ir.ORETURN: n := n.(*ir.ReturnStmt) Curfn.NumReturns++ - if n.Results.Len() == 0 { + if len(n.Results) == 0 { return n } - if (hasNamedResults(Curfn) && n.Results.Len() > 1) || paramoutheap(Curfn) { + if (hasNamedResults(Curfn) && len(n.Results) > 1) || paramoutheap(Curfn) { // assign to the function out parameters, // so that ascompatee can fix up conflicts var rl []ir.Node @@ -330,23 +330,23 @@ func walkstmt(n ir.Node) ir.Node { } } - if got, want := n.Results.Len(), len(rl); got != want { + if got, want := len(n.Results), len(rl); got != want { // order should have rewritten multi-value function calls // with explicit OAS2FUNC nodes. base.Fatalf("expected %v return arguments, have %v", want, got) } // move function calls out, to make ascompatee's job easier. - walkexprlistsafe(n.Results.Slice(), n.PtrInit()) + walkexprlistsafe(n.Results, n.PtrInit()) - n.Results.Set(ascompatee(n.Op(), rl, n.Results.Slice(), n.PtrInit())) + n.Results.Set(ascompatee(n.Op(), rl, n.Results, n.PtrInit())) return n } - walkexprlist(n.Results.Slice(), n.PtrInit()) + walkexprlist(n.Results, n.PtrInit()) // For each return parameter (lhs), assign the corresponding result (rhs). lhs := Curfn.Type().Results() - rhs := n.Results.Slice() + rhs := n.Results res := make([]ir.Node, lhs.NumFields()) for i, nl := range lhs.FieldSlice() { nname := ir.AsNode(nl.Nname) @@ -480,9 +480,9 @@ func walkexpr(n ir.Node, init *ir.Nodes) ir.Node { base.Fatalf("walkexpr init == &n->ninit") } - if n.Init().Len() != 0 { - walkstmtlist(n.Init().Slice()) - init.AppendNodes(n.PtrInit()) + if len(n.Init()) != 0 { + walkstmtlist(n.Init()) + init.Append(n.PtrInit().Take()...) } lno := setlineno(n) @@ -595,7 +595,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { n.Ntype.(*ir.AddrExpr).Alloc = typename(n.X.Type()) } if !n.Type().IsInterface() && !n.X.Type().IsEmptyInterface() { - n.Itab.Set1(itabname(n.Type(), n.X.Type())) + n.Itab = []ir.Node{itabname(n.Type(), n.X.Type())} } return n @@ -643,7 +643,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { var ll ir.Nodes n.Y = walkexpr(n.Y, &ll) - n.Y = initExpr(ll.Slice(), n.Y) + n.Y = initExpr(ll, n.Y) return n case ir.OPRINT, ir.OPRINTN: @@ -673,7 +673,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // Prepend captured variables to argument list. clo := n.X.(*ir.ClosureExpr) - n.Args.Prepend(clo.Func.ClosureEnter.Slice()...) + n.Args.Prepend(clo.Func.ClosureEnter...) clo.Func.ClosureEnter.Set(nil) // Replace OCLOSURE with ONAME/PFUNC. @@ -692,7 +692,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return n case ir.OAS, ir.OASOP: - init.AppendNodes(n.PtrInit()) + init.Append(n.PtrInit().Take()...) var left, right ir.Node switch n.Op() { @@ -710,15 +710,15 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { if left.Op() == ir.OINDEXMAP && right.Op() == ir.OAPPEND { left := left.(*ir.IndexExpr) mapAppend = right.(*ir.CallExpr) - if !samesafeexpr(left, mapAppend.Args.First()) { - base.Fatalf("not same expressions: %v != %v", left, mapAppend.Args.First()) + if !samesafeexpr(left, mapAppend.Args[0]) { + base.Fatalf("not same expressions: %v != %v", left, mapAppend.Args[0]) } } left = walkexpr(left, init) left = safeexpr(left, init) if mapAppend != nil { - mapAppend.Args.SetFirst(left) + mapAppend.Args[0] = left } if n.Op() == ir.OASOP { @@ -791,22 +791,22 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.OAS2: n := n.(*ir.AssignListStmt) - init.AppendNodes(n.PtrInit()) - walkexprlistsafe(n.Lhs.Slice(), init) - walkexprlistsafe(n.Rhs.Slice(), init) - return liststmt(ascompatee(ir.OAS, n.Lhs.Slice(), n.Rhs.Slice(), init)) + init.Append(n.PtrInit().Take()...) + walkexprlistsafe(n.Lhs, init) + walkexprlistsafe(n.Rhs, init) + return liststmt(ascompatee(ir.OAS, n.Lhs, n.Rhs, init)) // a,b,... = fn() case ir.OAS2FUNC: n := n.(*ir.AssignListStmt) - init.AppendNodes(n.PtrInit()) + init.Append(n.PtrInit().Take()...) - r := n.Rhs.First() - walkexprlistsafe(n.Lhs.Slice(), init) + r := n.Rhs[0] + walkexprlistsafe(n.Lhs, init) r = walkexpr(r, init) if IsIntrinsicCall(r.(*ir.CallExpr)) { - n.Rhs.Set1(r) + n.Rhs = []ir.Node{r} return n } init.Append(r) @@ -818,29 +818,29 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // order.stmt made sure x is addressable or blank. case ir.OAS2RECV: n := n.(*ir.AssignListStmt) - init.AppendNodes(n.PtrInit()) + init.Append(n.PtrInit().Take()...) - r := n.Rhs.First().(*ir.UnaryExpr) // recv - walkexprlistsafe(n.Lhs.Slice(), init) + r := n.Rhs[0].(*ir.UnaryExpr) // recv + walkexprlistsafe(n.Lhs, init) r.X = walkexpr(r.X, init) var n1 ir.Node - if ir.IsBlank(n.Lhs.First()) { + if ir.IsBlank(n.Lhs[0]) { n1 = nodnil() } else { - n1 = nodAddr(n.Lhs.First()) + n1 = nodAddr(n.Lhs[0]) } fn := chanfn("chanrecv2", 2, r.X.Type()) - ok := n.Lhs.Second() + ok := n.Lhs[1] call := mkcall1(fn, types.Types[types.TBOOL], init, r.X, n1) return typecheck(ir.NewAssignStmt(base.Pos, ok, call), ctxStmt) // a,b = m[i] case ir.OAS2MAPR: n := n.(*ir.AssignListStmt) - init.AppendNodes(n.PtrInit()) + init.Append(n.PtrInit().Take()...) - r := n.Rhs.First().(*ir.IndexExpr) - walkexprlistsafe(n.Lhs.Slice(), init) + r := n.Rhs[0].(*ir.IndexExpr) + walkexprlistsafe(n.Lhs, init) r.X = walkexpr(r.X, init) r.Index = walkexpr(r.Index, init) t := r.X.Type() @@ -861,7 +861,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // to: // var,b = mapaccess2*(t, m, i) // a = *var - a := n.Lhs.First() + a := n.Lhs[0] var call *ir.CallExpr if w := t.Elem().Width; w <= zeroValSize { @@ -876,10 +876,10 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // mapaccess2* returns a typed bool, but due to spec changes, // the boolean result of i.(T) is now untyped so we make it the // same type as the variable on the lhs. - if ok := n.Lhs.Second(); !ir.IsBlank(ok) && ok.Type().IsBoolean() { + if ok := n.Lhs[1]; !ir.IsBlank(ok) && ok.Type().IsBoolean() { call.Type().Field(1).Type = ok.Type() } - n.Rhs.Set1(call) + n.Rhs = []ir.Node{call} n.SetOp(ir.OAS2FUNC) // don't generate a = *var if a is _ @@ -891,7 +891,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { var_.SetTypecheck(1) var_.MarkNonNil() // mapaccess always returns a non-nil pointer - n.Lhs.SetFirst(var_) + n.Lhs[0] = var_ init.Append(walkexpr(n, init)) as := ir.NewAssignStmt(base.Pos, a, ir.NewStarExpr(base.Pos, var_)) @@ -899,9 +899,9 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.ODELETE: n := n.(*ir.CallExpr) - init.AppendNodes(n.PtrInit()) - map_ := n.Args.First() - key := n.Args.Second() + init.Append(n.PtrInit().Take()...) + map_ := n.Args[0] + key := n.Args[1] map_ = walkexpr(map_, init) key = walkexpr(key, init) @@ -915,8 +915,8 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.OAS2DOTTYPE: n := n.(*ir.AssignListStmt) - walkexprlistsafe(n.Lhs.Slice(), init) - (&n.Rhs).SetIndex(0, walkexpr(n.Rhs.First(), init)) + walkexprlistsafe(n.Lhs, init) + n.Rhs[0] = walkexpr(n.Rhs[0], init) return n case ir.OCONVIFACE: @@ -1013,7 +1013,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // Get the type out of the itab. nif := ir.NewIfStmt(base.Pos, typecheck(ir.NewBinaryExpr(base.Pos, ir.ONE, tmp, nodnil()), ctxExpr), nil, nil) - nif.Body.Set1(ir.NewAssignStmt(base.Pos, tmp, itabType(tmp))) + nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, tmp, itabType(tmp))} init.Append(nif) // Build the result. @@ -1034,7 +1034,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { fn = substArgTypes(fn, fromType) dowidth(fn.Type()) call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil) - call.Args.Set1(n.X) + call.Args = []ir.Node{n.X} e := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), safeexpr(walkexpr(typecheck(call, ctxExpr), init), init)) e.SetType(toType) e.SetTypecheck(1) @@ -1069,7 +1069,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { fn = substArgTypes(fn, fromType, toType) dowidth(fn.Type()) call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil) - call.Args.Set2(tab, v) + call.Args = []ir.Node{tab, v} return walkexpr(typecheck(call, ctxExpr), init) case ir.OCONV, ir.OCONVNOP: @@ -1245,8 +1245,8 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.OSLICEHEADER: n := n.(*ir.SliceHeaderExpr) n.Ptr = walkexpr(n.Ptr, init) - n.LenCap.SetFirst(walkexpr(n.LenCap.First(), init)) - n.LenCap.SetSecond(walkexpr(n.LenCap.Second(), init)) + n.LenCap[0] = walkexpr(n.LenCap[0], init) + n.LenCap[1] = walkexpr(n.LenCap[1], init) return n case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR: @@ -1472,7 +1472,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // } nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGT, conv(l, types.Types[types.TUINT64]), nodintconst(i)), nil, nil) niflen := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLT, l, nodintconst(0)), nil, nil) - niflen.Body.Set1(mkcall("panicmakeslicelen", nil, init)) + niflen.Body = []ir.Node{mkcall("panicmakeslicelen", nil, init)} nif.Body.Append(niflen, mkcall("panicmakeslicecap", nil, init)) init.Append(typecheck(nif, ctxStmt)) @@ -1509,7 +1509,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { fn := syslook(fnname) m.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), conv(len, argtype), conv(cap, argtype)) m.Ptr.MarkNonNil() - m.LenCap.Set2(conv(len, types.Types[types.TINT]), conv(cap, types.Types[types.TINT])) + m.LenCap = []ir.Node{conv(len, types.Types[types.TINT]), conv(cap, types.Types[types.TINT])} return walkexpr(typecheck(m, ctxExpr), init) case ir.OMAKESLICECOPY: @@ -1541,7 +1541,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { sh := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil) sh.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, nodnil(), nodbool(false)) sh.Ptr.MarkNonNil() - sh.LenCap.Set2(length, length) + sh.LenCap = []ir.Node{length, length} sh.SetType(t) s := temp(t) @@ -1563,7 +1563,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { s := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil) s.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), length, copylen, conv(copyptr, types.Types[types.TUNSAFEPTR])) s.Ptr.MarkNonNil() - s.LenCap.Set2(length, length) + s.LenCap = []ir.Node{length, length} s.SetType(t) return walkexpr(typecheck(s, ctxExpr), init) @@ -1856,12 +1856,12 @@ func fncall(l ir.Node, rt *types.Type) bool { // an expression list. called in // expr-list = func() func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node { - if nl.Len() != nr.NumFields() { - base.Fatalf("ascompatet: assignment count mismatch: %d = %d", nl.Len(), nr.NumFields()) + if len(nl) != nr.NumFields() { + base.Fatalf("ascompatet: assignment count mismatch: %d = %d", len(nl), nr.NumFields()) } var nn, mm ir.Nodes - for i, l := range nl.Slice() { + for i, l := range nl { if ir.IsBlank(l) { continue } @@ -1891,7 +1891,7 @@ func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node { nn.Append(a) } - return append(nn.Slice(), mm.Slice()...) + return append(nn, mm...) } // package all the arguments that match a ... T parameter into a []T. @@ -1925,7 +1925,7 @@ func fixVariadicCall(call *ir.CallExpr) { vi := fntype.NumParams() - 1 vt := fntype.Params().Field(vi).Type - args := call.Args.Slice() + args := call.Args extra := args[vi:] slice := mkdotargslice(vt, extra) for i := range extra { @@ -1937,12 +1937,12 @@ func fixVariadicCall(call *ir.CallExpr) { } func walkCall(n *ir.CallExpr, init *ir.Nodes) { - if n.Rargs.Len() != 0 { + if len(n.Rargs) != 0 { return // already walked } params := n.X.Type().Params() - args := n.Args.Slice() + args := n.Args n.X = walkexpr(n.X, init) walkexprlist(args, init) @@ -1992,11 +1992,11 @@ func walkCall(n *ir.CallExpr, init *ir.Nodes) { // generate code for print func walkprint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { // Hoist all the argument evaluation up before the lock. - walkexprlistcheap(nn.Args.Slice(), init) + walkexprlistcheap(nn.Args, init) // For println, add " " between elements and "\n" at the end. if nn.Op() == ir.OPRINTN { - s := nn.Args.Slice() + s := nn.Args t := make([]ir.Node, 0, len(s)*2) for i, n := range s { if i != 0 { @@ -2009,7 +2009,7 @@ func walkprint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { } // Collapse runs of constant strings. - s := nn.Args.Slice() + s := nn.Args t := make([]ir.Node, 0, len(s)) for i := 0; i < len(s); { var strs []string @@ -2028,7 +2028,7 @@ func walkprint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { nn.Args.Set(t) calls := []ir.Node{mkcall("printlock", nil, init)} - for i, n := range nn.Args.Slice() { + for i, n := range nn.Args { if n.Op() == ir.OLITERAL { if n.Type() == types.UntypedRune { n = defaultlit(n, types.RuneType) @@ -2047,7 +2047,7 @@ func walkprint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { n = defaultlit(n, types.Types[types.TINT64]) } n = defaultlit(n, nil) - nn.Args.SetIndex(i, n) + nn.Args[i] = n if n.Type() == nil || n.Type().Kind() == types.TFORW { continue } @@ -2264,7 +2264,7 @@ func reorder3(all []*ir.AssignStmt) []ir.Node { all[i].Y = reorder3save(all[i].Y, all, i, &early) } - early = append(mapinit.Slice(), early...) + early = append(mapinit, early...) for _, as := range all { early = append(early, as) } @@ -2736,7 +2736,7 @@ func writebarrierfn(name string, l *types.Type, r *types.Type) ir.Node { } func addstr(n *ir.AddStringExpr, init *ir.Nodes) ir.Node { - c := n.List.Len() + c := len(n.List) if c < 2 { base.Fatalf("addstr count %d too small", c) @@ -2745,7 +2745,7 @@ func addstr(n *ir.AddStringExpr, init *ir.Nodes) ir.Node { buf := nodnil() if n.Esc() == EscNone { sz := int64(0) - for _, n1 := range n.List.Slice() { + for _, n1 := range n.List { if n1.Op() == ir.OLITERAL { sz += int64(len(ir.StringVal(n1))) } @@ -2761,7 +2761,7 @@ func addstr(n *ir.AddStringExpr, init *ir.Nodes) ir.Node { // build list of string arguments args := []ir.Node{buf} - for _, n2 := range n.List.Slice() { + for _, n2 := range n.List { args = append(args, conv(n2, types.Types[types.TSTRING])) } @@ -2793,12 +2793,12 @@ func addstr(n *ir.AddStringExpr, init *ir.Nodes) ir.Node { } func walkAppendArgs(n *ir.CallExpr, init *ir.Nodes) { - walkexprlistsafe(n.Args.Slice(), init) + walkexprlistsafe(n.Args, init) // walkexprlistsafe will leave OINDEX (s[n]) alone if both s // and n are name or literal, but those may index the slice we're // modifying here. Fix explicitly. - ls := n.Args.Slice() + ls := n.Args for i1, n1 := range ls { ls[i1] = cheapexpr(n1, init) } @@ -2821,10 +2821,10 @@ func walkAppendArgs(n *ir.CallExpr, init *ir.Nodes) { func appendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { walkAppendArgs(n, init) - l1 := n.Args.First() - l2 := n.Args.Second() + l1 := n.Args[0] + l2 := n.Args[1] l2 = cheapexpr(l2, init) - n.Args.SetSecond(l2) + n.Args[1] = l2 var nodes ir.Nodes @@ -2849,7 +2849,7 @@ func appendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { fn = substArgTypes(fn, elemtype, elemtype) // s = growslice(T, s, n) - nif.Body.Set1(ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn))) + nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn))} nodes.Append(nif) // s = s[:n] @@ -2903,7 +2903,7 @@ func appendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { fn = substArgTypes(fn, elemtype, elemtype) ncopy = mkcall1(fn, nil, &nodes, addr, sptr, nwid) } - ln := append(nodes.Slice(), ncopy) + ln := append(nodes, ncopy) typecheckslice(ln, ctxStmt) walkstmtlist(ln) @@ -2926,11 +2926,11 @@ func isAppendOfMake(n ir.Node) bool { return false } call := n.(*ir.CallExpr) - if !call.IsDDD || call.Args.Len() != 2 || call.Args.Second().Op() != ir.OMAKESLICE { + if !call.IsDDD || len(call.Args) != 2 || call.Args[1].Op() != ir.OMAKESLICE { return false } - mk := call.Args.Second().(*ir.MakeExpr) + mk := call.Args[1].(*ir.MakeExpr) if mk.Cap != nil { return false } @@ -2980,14 +2980,14 @@ func extendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { // isAppendOfMake made sure all possible positive values of l2 fit into an uint. // The case of l2 overflow when converting from e.g. uint to int is handled by an explicit // check of l2 < 0 at runtime which is generated below. - l2 := conv(n.Args.Second().(*ir.MakeExpr).Len, types.Types[types.TINT]) + l2 := conv(n.Args[1].(*ir.MakeExpr).Len, types.Types[types.TINT]) l2 = typecheck(l2, ctxExpr) - n.Args.SetSecond(l2) // walkAppendArgs expects l2 in n.List.Second(). + n.Args[1] = l2 // walkAppendArgs expects l2 in n.List.Second(). walkAppendArgs(n, init) - l1 := n.Args.First() - l2 = n.Args.Second() // re-read l2, as it may have been updated by walkAppendArgs + l1 := n.Args[0] + l2 = n.Args[1] // re-read l2, as it may have been updated by walkAppendArgs var nodes []ir.Node @@ -2996,7 +2996,7 @@ func extendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { nifneg.Likely = true // else panicmakeslicelen() - nifneg.Else.Set1(mkcall("panicmakeslicelen", nil, init)) + nifneg.Else = []ir.Node{mkcall("panicmakeslicelen", nil, init)} nodes = append(nodes, nifneg) // s := l1 @@ -3019,7 +3019,7 @@ func extendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { fn = substArgTypes(fn, elemtype, elemtype) // s = growslice(T, s, n) - nif.Body.Set1(ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn))) + nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn))} nodes = append(nodes, nif) // s = s[:n] @@ -3063,7 +3063,7 @@ func extendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { nifclr.Body = clr nodes = append(nodes, nifclr) } else { - nodes = append(nodes, clr.Slice()...) + nodes = append(nodes, clr...) } typecheckslice(nodes, ctxStmt) @@ -3094,13 +3094,13 @@ func extendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { // } // s func walkappend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node { - if !samesafeexpr(dst, n.Args.First()) { - n.Args.SetFirst(safeexpr(n.Args.First(), init)) - n.Args.SetFirst(walkexpr(n.Args.First(), init)) + if !samesafeexpr(dst, n.Args[0]) { + n.Args[0] = safeexpr(n.Args[0], init) + n.Args[0] = walkexpr(n.Args[0], init) } - walkexprlistsafe(n.Args.Slice()[1:], init) + walkexprlistsafe(n.Args[1:], init) - nsrc := n.Args.First() + nsrc := n.Args[0] // walkexprlistsafe will leave OINDEX (s[n]) alone if both s // and n are name or literal, but those may index the slice we're @@ -3108,7 +3108,7 @@ func walkappend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node { // Using cheapexpr also makes sure that the evaluation // of all arguments (and especially any panics) happen // before we begin to modify the slice in a visible way. - ls := n.Args.Slice()[1:] + ls := n.Args[1:] for i, n := range ls { n = cheapexpr(n, init) if !types.Identical(n.Type(), nsrc.Type().Elem()) { @@ -3118,7 +3118,7 @@ func walkappend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node { ls[i] = n } - argc := n.Args.Len() - 1 + argc := len(n.Args) - 1 if argc < 1 { return nsrc } @@ -3141,8 +3141,8 @@ func walkappend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node { fn := syslook("growslice") // growslice(, old []T, mincap int) (ret []T) fn = substArgTypes(fn, ns.Type().Elem(), ns.Type().Elem()) - nif.Body.Set1(ir.NewAssignStmt(base.Pos, ns, mkcall1(fn, ns.Type(), nif.PtrInit(), typename(ns.Type().Elem()), ns, - ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns), na)))) + nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, ns, mkcall1(fn, ns.Type(), nif.PtrInit(), typename(ns.Type().Elem()), ns, + ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns), na)))} l = append(l, nif) @@ -3154,7 +3154,7 @@ func walkappend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node { slice.SetBounded(true) l = append(l, ir.NewAssignStmt(base.Pos, ns, slice)) // s = s[:n+argc] - ls = n.Args.Slice()[1:] + ls = n.Args[1:] for i, n := range ls { ix := ir.NewIndexExpr(base.Pos, ns, nn) // s[n] ... ix.SetBounded(true) @@ -3960,32 +3960,32 @@ var wrapCall_prgen int // The result of wrapCall MUST be assigned back to n, e.g. // n.Left = wrapCall(n.Left, init) func wrapCall(n *ir.CallExpr, init *ir.Nodes) ir.Node { - if n.Init().Len() != 0 { - walkstmtlist(n.Init().Slice()) - init.AppendNodes(n.PtrInit()) + if len(n.Init()) != 0 { + walkstmtlist(n.Init()) + init.Append(n.PtrInit().Take()...) } isBuiltinCall := n.Op() != ir.OCALLFUNC && n.Op() != ir.OCALLMETH && n.Op() != ir.OCALLINTER // Turn f(a, b, []T{c, d, e}...) back into f(a, b, c, d, e). if !isBuiltinCall && n.IsDDD { - last := n.Args.Len() - 1 - if va := n.Args.Index(last); va.Op() == ir.OSLICELIT { + last := len(n.Args) - 1 + if va := n.Args[last]; va.Op() == ir.OSLICELIT { va := va.(*ir.CompLitExpr) - n.Args.Set(append(n.Args.Slice()[:last], va.List.Slice()...)) + n.Args.Set(append(n.Args[:last], va.List...)) n.IsDDD = false } } // origArgs keeps track of what argument is uintptr-unsafe/unsafe-uintptr conversion. - origArgs := make([]ir.Node, n.Args.Len()) + origArgs := make([]ir.Node, len(n.Args)) var funcArgs []*ir.Field - for i, arg := range n.Args.Slice() { + for i, arg := range n.Args { s := lookupN("a", i) if !isBuiltinCall && arg.Op() == ir.OCONVNOP && arg.Type().IsUintptr() && arg.(*ir.ConvExpr).X.Type().IsUnsafePtr() { origArgs[i] = arg arg = arg.(*ir.ConvExpr).X - n.Args.SetIndex(i, arg) + n.Args[i] = arg } funcArgs = append(funcArgs, symfield(s, arg.Type())) } @@ -4007,15 +4007,15 @@ func wrapCall(n *ir.CallExpr, init *ir.Nodes) ir.Node { call.SetOp(ir.OCALL) call.IsDDD = n.IsDDD } - fn.Body.Set1(call) + fn.Body = []ir.Node{call} funcbody() typecheckFunc(fn) - typecheckslice(fn.Body.Slice(), ctxStmt) + typecheckslice(fn.Body, ctxStmt) Target.Decls = append(Target.Decls, fn) - call = ir.NewCallExpr(base.Pos, ir.OCALL, fn.Nname, n.Args.Slice()) + call = ir.NewCallExpr(base.Pos, ir.OCALL, fn.Nname, n.Args) return walkexpr(typecheck(call, ctxStmt), init) } diff --git a/src/cmd/compile/internal/ir/dump.go b/src/cmd/compile/internal/ir/dump.go index 9d6042f78a878..fc995cee6202f 100644 --- a/src/cmd/compile/internal/ir/dump.go +++ b/src/cmd/compile/internal/ir/dump.go @@ -222,7 +222,7 @@ func (p *dumper) dump(x reflect.Value, depth int) { omitted = true continue // exclude zero-valued fields } - if n, ok := x.Interface().(Nodes); ok && n.Len() == 0 { + if n, ok := x.Interface().(Nodes); ok && len(n) == 0 { omitted = true continue // exclude empty Nodes slices } diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 63ccaa6550859..39a408fdc79b3 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -624,16 +624,16 @@ func (n *SliceExpr) SetOp(op Op) { // SliceBounds returns n's slice bounds: low, high, and max in expr[low:high:max]. // n must be a slice expression. max is nil if n is a simple slice expression. func (n *SliceExpr) SliceBounds() (low, high, max Node) { - if n.List.Len() == 0 { + if len(n.List) == 0 { return nil, nil, nil } switch n.Op() { case OSLICE, OSLICEARR, OSLICESTR: - s := n.List.Slice() + s := n.List return s[0], s[1], nil case OSLICE3, OSLICE3ARR: - s := n.List.Slice() + s := n.List return s[0], s[1], s[2] } base.Fatalf("SliceBounds op %v: %v", n.Op(), n) @@ -648,24 +648,24 @@ func (n *SliceExpr) SetSliceBounds(low, high, max Node) { if max != nil { base.Fatalf("SetSliceBounds %v given three bounds", n.Op()) } - s := n.List.Slice() + s := n.List if s == nil { if low == nil && high == nil { return } - n.List.Set2(low, high) + n.List = []Node{low, high} return } s[0] = low s[1] = high return case OSLICE3, OSLICE3ARR: - s := n.List.Slice() + s := n.List if s == nil { if low == nil && high == nil && max == nil { return } - n.List.Set3(low, high, max) + n.List = []Node{low, high, max} return } s[0] = low @@ -701,7 +701,7 @@ func NewSliceHeaderExpr(pos src.XPos, typ *types.Type, ptr, len, cap Node) *Slic n.pos = pos n.op = OSLICEHEADER n.typ = typ - n.LenCap.Set2(len, cap) + n.LenCap = []Node{len, cap} return n } diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index 49c4ac9a8d372..268290853910f 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -313,10 +313,10 @@ func stmtFmt(n Node, s fmt.State) { // block starting with the init statements. // if we can just say "for" n->ninit; ... then do so - simpleinit := n.Init().Len() == 1 && n.Init().First().Init().Len() == 0 && StmtWithInit(n.Op()) + simpleinit := len(n.Init()) == 1 && len(n.Init()[0].Init()) == 0 && StmtWithInit(n.Op()) // otherwise, print the inits as separate statements - complexinit := n.Init().Len() != 0 && !simpleinit && exportFormat + complexinit := len(n.Init()) != 0 && !simpleinit && exportFormat // but if it was for if/for/switch, put in an extra surrounding block to limit the scope extrablock := complexinit && StmtWithInit(n.Op()) @@ -368,7 +368,7 @@ func stmtFmt(n Node, s fmt.State) { case OBLOCK: n := n.(*BlockStmt) - if n.List.Len() != 0 { + if len(n.List) != 0 { fmt.Fprintf(s, "%v", n.List) } @@ -395,11 +395,11 @@ func stmtFmt(n Node, s fmt.State) { case OIF: n := n.(*IfStmt) if simpleinit { - fmt.Fprintf(s, "if %v; %v { %v }", n.Init().First(), n.Cond, n.Body) + fmt.Fprintf(s, "if %v; %v { %v }", n.Init()[0], n.Cond, n.Body) } else { fmt.Fprintf(s, "if %v { %v }", n.Cond, n.Body) } - if n.Else.Len() != 0 { + if len(n.Else) != 0 { fmt.Fprintf(s, " else { %v }", n.Else) } @@ -416,7 +416,7 @@ func stmtFmt(n Node, s fmt.State) { fmt.Fprint(s, opname) if simpleinit { - fmt.Fprintf(s, " %v;", n.Init().First()) + fmt.Fprintf(s, " %v;", n.Init()[0]) } else if n.Post != nil { fmt.Fprint(s, " ;") } @@ -431,7 +431,7 @@ func stmtFmt(n Node, s fmt.State) { fmt.Fprint(s, ";") } - if n.Op() == OFORUNTIL && n.Late.Len() != 0 { + if n.Op() == OFORUNTIL && len(n.Late) != 0 { fmt.Fprintf(s, "; %v", n.Late) } @@ -444,7 +444,7 @@ func stmtFmt(n Node, s fmt.State) { break } - if n.Vars.Len() == 0 { + if len(n.Vars) == 0 { fmt.Fprintf(s, "for range %v { %v }", n.X, n.Body) break } @@ -467,7 +467,7 @@ func stmtFmt(n Node, s fmt.State) { } fmt.Fprintf(s, "switch") if simpleinit { - fmt.Fprintf(s, " %v;", n.Init().First()) + fmt.Fprintf(s, " %v;", n.Init()[0]) } if n.Tag != nil { fmt.Fprintf(s, " %v ", n.Tag) @@ -476,7 +476,7 @@ func stmtFmt(n Node, s fmt.State) { case OCASE: n := n.(*CaseStmt) - if n.List.Len() != 0 { + if len(n.List) != 0 { fmt.Fprintf(s, "case %.v", n.List) } else { fmt.Fprint(s, "default") @@ -704,7 +704,7 @@ func exprFmt(n Node, s fmt.State, prec int) { return } if n.Ntype != nil { - fmt.Fprintf(s, "%v{%s}", n.Ntype, ellipsisIf(n.List.Len() != 0)) + fmt.Fprintf(s, "%v{%s}", n.Ntype, ellipsisIf(len(n.List) != 0)) return } @@ -720,7 +720,7 @@ func exprFmt(n Node, s fmt.State, prec int) { case OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT: n := n.(*CompLitExpr) if !exportFormat { - fmt.Fprintf(s, "%v{%s}", n.Type(), ellipsisIf(n.List.Len() != 0)) + fmt.Fprintf(s, "%v{%s}", n.Type(), ellipsisIf(len(n.List) != 0)) return } fmt.Fprintf(s, "(%v{ %.v })", n.Type(), n.List) @@ -800,10 +800,10 @@ func exprFmt(n Node, s fmt.State, prec int) { case OSLICEHEADER: n := n.(*SliceHeaderExpr) - if n.LenCap.Len() != 2 { - base.Fatalf("bad OSLICEHEADER list length %d", n.LenCap.Len()) + if len(n.LenCap) != 2 { + base.Fatalf("bad OSLICEHEADER list length %d", len(n.LenCap)) } - fmt.Fprintf(s, "sliceheader{%v,%v,%v}", n.Ptr, n.LenCap.First(), n.LenCap.Second()) + fmt.Fprintf(s, "sliceheader{%v,%v,%v}", n.Ptr, n.LenCap[0], n.LenCap[1]) case OCOMPLEX, OCOPY: n := n.(*BinaryExpr) @@ -936,7 +936,7 @@ func exprFmt(n Node, s fmt.State, prec int) { case OADDSTR: n := n.(*AddStringExpr) - for i, n1 := range n.List.Slice() { + for i, n1 := range n.List { if i != 0 { fmt.Fprint(s, " + ") } @@ -980,9 +980,9 @@ func (l Nodes) Format(s fmt.State, verb rune) { sep = ", " } - for i, n := range l.Slice() { + for i, n := range l { fmt.Fprint(s, n) - if i+1 < l.Len() { + if i+1 < len(l) { fmt.Fprint(s, sep) } } @@ -1131,7 +1131,7 @@ func dumpNode(w io.Writer, n Node, depth int) { return } - if n.Init().Len() != 0 { + if len(n.Init()) != 0 { fmt.Fprintf(w, "%+v-init", n.Op()) dumpNodes(w, n.Init(), depth+1) indent(w, depth) @@ -1200,7 +1200,7 @@ func dumpNode(w io.Writer, n Node, depth int) { dumpNode(w, dcl, depth+1) } } - if fn.Body.Len() > 0 { + if len(fn.Body) > 0 { indent(w, depth) fmt.Fprintf(w, "%+v-body", n.Op()) dumpNodes(w, fn.Body, depth+1) @@ -1247,7 +1247,7 @@ func dumpNode(w io.Writer, n Node, depth int) { } dumpNode(w, val, depth+1) case Nodes: - if val.Len() == 0 { + if len(val) == 0 { continue } if name != "" { @@ -1260,12 +1260,12 @@ func dumpNode(w io.Writer, n Node, depth int) { } func dumpNodes(w io.Writer, list Nodes, depth int) { - if list.Len() == 0 { + if len(list) == 0 { fmt.Fprintf(w, " ") return } - for _, n := range list.Slice() { + for _, n := range list { dumpNode(w, n, depth) } } diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 86ef600f266ad..34b89752adc59 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -317,41 +317,6 @@ type Nodes []Node // The methods that would modify it panic instead. var immutableEmptyNodes = Nodes{} -// asNodes returns a slice of *Node as a Nodes value. -func AsNodes(s []Node) Nodes { - return s -} - -// Slice returns the entries in Nodes as a slice. -// Changes to the slice entries (as in s[i] = n) will be reflected in -// the Nodes. -func (n Nodes) Slice() []Node { - return n -} - -// Len returns the number of entries in Nodes. -func (n Nodes) Len() int { - return len(n) -} - -// Index returns the i'th element of Nodes. -// It panics if n does not have at least i+1 elements. -func (n Nodes) Index(i int) Node { - return n[i] -} - -// First returns the first element of Nodes (same as n.Index(0)). -// It panics if n has no elements. -func (n Nodes) First() Node { - return n[0] -} - -// Second returns the second element of Nodes (same as n.Index(1)). -// It panics if n has fewer than two elements. -func (n Nodes) Second() Node { - return n[1] -} - func (n *Nodes) mutate() { if n == &immutableEmptyNodes { panic("immutable Nodes.Set") @@ -371,55 +336,6 @@ func (n *Nodes) Set(s []Node) { *n = s } -// Set1 sets n to a slice containing a single node. -func (n *Nodes) Set1(n1 Node) { - n.mutate() - *n = []Node{n1} -} - -// Set2 sets n to a slice containing two nodes. -func (n *Nodes) Set2(n1, n2 Node) { - n.mutate() - *n = []Node{n1, n2} -} - -// Set3 sets n to a slice containing three nodes. -func (n *Nodes) Set3(n1, n2, n3 Node) { - n.mutate() - *n = []Node{n1, n2, n3} -} - -// MoveNodes sets n to the contents of n2, then clears n2. -func (n *Nodes) MoveNodes(n2 *Nodes) { - n.mutate() - *n = *n2 - *n2 = nil -} - -// SetIndex sets the i'th element of Nodes to node. -// It panics if n does not have at least i+1 elements. -func (n Nodes) SetIndex(i int, node Node) { - n[i] = node -} - -// SetFirst sets the first element of Nodes to node. -// It panics if n does not have at least one elements. -func (n Nodes) SetFirst(node Node) { - n[0] = node -} - -// SetSecond sets the second element of Nodes to node. -// It panics if n does not have at least two elements. -func (n Nodes) SetSecond(node Node) { - n[1] = node -} - -// Addr returns the address of the i'th element of Nodes. -// It panics if n does not have at least i+1 elements. -func (n Nodes) Addr(i int) *Node { - return &n[i] -} - // Append appends entries to Nodes. func (n *Nodes) Append(a ...Node) { if len(a) == 0 { @@ -446,18 +362,12 @@ func (n *Nodes) Take() []Node { return ret } -// AppendNodes appends the contents of *n2 to n, then clears n2. -func (n *Nodes) AppendNodes(n2 *Nodes) { - n.mutate() - *n = append(*n, n2.Take()...) -} - // Copy returns a copy of the content of the slice. func (n Nodes) Copy() Nodes { if n == nil { return nil } - c := make(Nodes, n.Len()) + c := make(Nodes, len(n)) copy(c, n) return c } diff --git a/src/cmd/compile/internal/ir/visit.go b/src/cmd/compile/internal/ir/visit.go index 3f5af4ea0ecb8..a1c345968f703 100644 --- a/src/cmd/compile/internal/ir/visit.go +++ b/src/cmd/compile/internal/ir/visit.go @@ -106,7 +106,7 @@ func DoChildren(n Node, do func(Node) error) error { // Note that DoList only calls do on the nodes in the list, not their children. // If x's children should be processed, do(x) must call DoChildren(x, do) itself. func DoList(list Nodes, do func(Node) error) error { - for _, x := range list.Slice() { + for _, x := range list { if x != nil { if err := do(x); err != nil { return err @@ -131,7 +131,7 @@ func Visit(n Node, visit func(Node)) { // VisitList calls Visit(x, visit) for each node x in the list. func VisitList(list Nodes, visit func(Node)) { - for _, x := range list.Slice() { + for _, x := range list { Visit(x, visit) } } @@ -163,7 +163,7 @@ func Any(n Node, cond func(Node) bool) bool { // Otherwise, AnyList returns false after calling Any(x, cond) // for every x in the list. func AnyList(list Nodes, cond func(Node) bool) bool { - for _, x := range list.Slice() { + for _, x := range list { if Any(x, cond) { return true } @@ -217,8 +217,8 @@ func EditChildren(n Node, edit func(Node) Node) { // Note that editList only calls edit on the nodes in the list, not their children. // If x's children should be processed, edit(x) must call EditChildren(x, edit) itself. func editList(list Nodes, edit func(Node) Node) { - s := list.Slice() - for i, x := range list.Slice() { + s := list + for i, x := range list { if x != nil { s[i] = edit(x) } From ead4957892bc1975d9cc9c32777733c67e5a885e Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 23 Dec 2020 00:05:23 -0500 Subject: [PATCH 222/474] [dev.regabi] cmd/compile: move helpers into package base [generated] [git-generate] cd src/cmd/compile/internal/gc rf ' # Move EnableTrace constant into base, with the other flags. mv enableTrace EnableTrace mv EnableTrace base.go # Move compilation checks to base. mv instrumenting Instrumenting mv ispkgin Compiling mv omit_pkgs NoInstrumentPkgs mv norace_inst_pkgs NoRacePkgs mv Instrumenting Compiling NoInstrumentPkgs NoRacePkgs base.go # Move AutogeneratedPos to package base, next to Pos. mv autogeneratedPos AutogeneratedPos mv AutogeneratedPos print.go mv timings Timer mv base.go print.go timings.go cmd/compile/internal/base ' cd ../base rf ' mv Instrumenting Flag.Cfg.Instrumenting ' Change-Id: I534437fa75857d31531fc499d833c9930c0a06d0 Reviewed-on: https://go-review.googlesource.com/c/go/+/279420 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/base/base.go | 51 +++++++++++++++++++ src/cmd/compile/internal/base/flag.go | 3 ++ src/cmd/compile/internal/base/print.go | 2 + .../compile/internal/{gc => base}/timings.go | 4 +- src/cmd/compile/internal/gc/alg.go | 4 +- src/cmd/compile/internal/gc/align.go | 2 +- src/cmd/compile/internal/gc/go.go | 7 --- src/cmd/compile/internal/gc/gsubr.go | 2 +- src/cmd/compile/internal/gc/inl.go | 2 +- src/cmd/compile/internal/gc/main.go | 32 ++++++------ src/cmd/compile/internal/gc/order.go | 16 +++--- src/cmd/compile/internal/gc/racewalk.go | 50 +----------------- src/cmd/compile/internal/gc/range.go | 4 +- src/cmd/compile/internal/gc/ssa.go | 4 +- src/cmd/compile/internal/gc/subr.go | 6 +-- src/cmd/compile/internal/gc/typecheck.go | 35 ++++++------- src/cmd/compile/internal/gc/walk.go | 18 +++---- 17 files changed, 120 insertions(+), 122 deletions(-) rename src/cmd/compile/internal/{gc => base}/timings.go (99%) diff --git a/src/cmd/compile/internal/base/base.go b/src/cmd/compile/internal/base/base.go index e26b378472da9..5a30fa6a334e6 100644 --- a/src/cmd/compile/internal/base/base.go +++ b/src/cmd/compile/internal/base/base.go @@ -26,3 +26,54 @@ func Exit(code int) { } os.Exit(code) } + +// To enable tracing support (-t flag), set EnableTrace to true. +const EnableTrace = false + +func Compiling(pkgs []string) bool { + if Ctxt.Pkgpath != "" { + for _, p := range pkgs { + if Ctxt.Pkgpath == p { + return true + } + } + } + + return false +} + +// The racewalk pass is currently handled in three parts. +// +// First, for flag_race, it inserts calls to racefuncenter and +// racefuncexit at the start and end (respectively) of each +// function. This is handled below. +// +// Second, during buildssa, it inserts appropriate instrumentation +// calls immediately before each memory load or store. This is handled +// by the (*state).instrument method in ssa.go, so here we just set +// the Func.InstrumentBody flag as needed. For background on why this +// is done during SSA construction rather than a separate SSA pass, +// see issue #19054. +// +// Third we remove calls to racefuncenter and racefuncexit, for leaf +// functions without instrumented operations. This is done as part of +// ssa opt pass via special rule. + +// TODO(dvyukov): do not instrument initialization as writes: +// a := make([]int, 10) + +// Do not instrument the following packages at all, +// at best instrumentation would cause infinite recursion. +var NoInstrumentPkgs = []string{ + "runtime/internal/atomic", + "runtime/internal/sys", + "runtime/internal/math", + "runtime", + "runtime/race", + "runtime/msan", + "internal/cpu", +} + +// Don't insert racefuncenterfp/racefuncexit into the following packages. +// Memory accesses in the packages are either uninteresting or will cause false positives. +var NoRacePkgs = []string{"sync", "sync/atomic"} diff --git a/src/cmd/compile/internal/base/flag.go b/src/cmd/compile/internal/base/flag.go index ce87ff730eaa7..d35b8452f938a 100644 --- a/src/cmd/compile/internal/base/flag.go +++ b/src/cmd/compile/internal/base/flag.go @@ -130,6 +130,9 @@ type CmdFlags struct { ImportMap map[string]string // set by -importmap OR -importcfg PackageFile map[string]string // set by -importcfg; nil means not in use SpectreIndex bool // set by -spectre=index or -spectre=all + // Whether we are adding any sort of code instrumentation, such as + // when the race detector is enabled. + Instrumenting bool } } diff --git a/src/cmd/compile/internal/base/print.go b/src/cmd/compile/internal/base/print.go index ac7333ca4e093..9855dfdad0627 100644 --- a/src/cmd/compile/internal/base/print.go +++ b/src/cmd/compile/internal/base/print.go @@ -260,3 +260,5 @@ func ExitIfErrors() { ErrorExit() } } + +var AutogeneratedPos src.XPos diff --git a/src/cmd/compile/internal/gc/timings.go b/src/cmd/compile/internal/base/timings.go similarity index 99% rename from src/cmd/compile/internal/gc/timings.go rename to src/cmd/compile/internal/base/timings.go index ac12d78d1e088..f599f4e05f633 100644 --- a/src/cmd/compile/internal/gc/timings.go +++ b/src/cmd/compile/internal/base/timings.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package base import ( "fmt" @@ -11,7 +11,7 @@ import ( "time" ) -var timings Timings +var Timer Timings // Timings collects the execution times of labeled phases // which are added trough a sequence of Start/Stop calls. diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index 49ce14b0261e9..8733c6198c7a6 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -288,7 +288,7 @@ func genhash(t *types.Type) *obj.LSym { fmt.Printf("genhash %v %v %v\n", closure, sym, t) } - base.Pos = autogeneratedPos // less confusing than end of input + base.Pos = base.AutogeneratedPos // less confusing than end of input dclcontext = ir.PEXTERN // func sym(p *T, h uintptr) uintptr @@ -517,7 +517,7 @@ func geneq(t *types.Type) *obj.LSym { // Autogenerate code for equality of structs and arrays. - base.Pos = autogeneratedPos // less confusing than end of input + base.Pos = base.AutogeneratedPos // less confusing than end of input dclcontext = ir.PEXTERN // func sym(p, q *T) bool diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go index a9cf7fb50aa05..f2f98bd51f3c2 100644 --- a/src/cmd/compile/internal/gc/align.go +++ b/src/cmd/compile/internal/gc/align.go @@ -270,7 +270,7 @@ func reportTypeLoop(t *types.Type) { func dowidth(t *types.Type) { // Calling dowidth when typecheck tracing enabled is not safe. // See issue #33658. - if enableTrace && skipDowidthForTracing { + if base.EnableTrace && skipDowidthForTracing { return } if Widthptr == 0 { diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index df91f6f530aa4..46ddda0ba76eb 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -10,7 +10,6 @@ import ( "cmd/compile/internal/ssa" "cmd/compile/internal/types" "cmd/internal/obj" - "cmd/internal/src" "sync" ) @@ -144,14 +143,8 @@ var Widthreg int var typecheckok bool -// Whether we are adding any sort of code instrumentation, such as -// when the race detector is enabled. -var instrumenting bool - var nodfp *ir.Name -var autogeneratedPos src.XPos - // interface to back end type Arch struct { diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go index f4178db477146..db55b1035cd0e 100644 --- a/src/cmd/compile/internal/gc/gsubr.go +++ b/src/cmd/compile/internal/gc/gsubr.go @@ -199,7 +199,7 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { savedclcontext := dclcontext savedcurfn := Curfn - base.Pos = autogeneratedPos + base.Pos = base.AutogeneratedPos dclcontext = ir.PEXTERN // At the moment we don't support wrapping a method, we'd need machinery diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 2fb23f1a3f252..49e0bcc470c6a 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -844,7 +844,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b return n } - if instrumenting && isRuntimePkg(fn.Sym().Pkg) { + if base.Flag.Cfg.Instrumenting && isRuntimePkg(fn.Sym().Pkg) { // Runtime package must not be instrumented. // Instrument skips runtime package. However, some runtime code can be // inlined into other packages and instrumented there. To avoid this, diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index c1cc7ed377d33..feded3f9b2bcb 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -58,7 +58,7 @@ var Target *ir.Package // arguments, type-checks the parsed Go package, compiles functions to machine // code, and finally writes the compiled package definition to disk. func Main(archInit func(*Arch)) { - timings.Start("fe", "init") + base.Timer.Start("fe", "init") defer hidePanic() @@ -123,7 +123,7 @@ func Main(archInit func(*Arch)) { // changes in the binary.) recordFlags("B", "N", "l", "msan", "race", "shared", "dynlink", "dwarflocationlists", "dwarfbasentries", "smallframes", "spectre") - if !enableTrace && base.Flag.LowerT { + if !base.EnableTrace && base.Flag.LowerT { log.Fatalf("compiler not built with support for -t") } @@ -159,7 +159,7 @@ func Main(archInit func(*Arch)) { readSymABIs(base.Flag.SymABIs, base.Ctxt.Pkgpath) } - if ispkgin(omit_pkgs) { + if base.Compiling(base.NoInstrumentPkgs) { base.Flag.Race = false base.Flag.MSan = false } @@ -173,7 +173,7 @@ func Main(archInit func(*Arch)) { msanpkg = types.NewPkg("runtime/msan", "") } if base.Flag.Race || base.Flag.MSan { - instrumenting = true + base.Flag.Cfg.Instrumenting = true } if base.Flag.Dwarf { dwarf.EnableLogging(base.Debug.DwarfInl != 0) @@ -205,7 +205,7 @@ func Main(archInit func(*Arch)) { NeedITab = func(t, iface *types.Type) { itabname(t, iface) } NeedRuntimeType = addsignat // TODO(rsc): typenamesym for lock? - autogeneratedPos = makePos(src.NewFileBase("", ""), 1, 0) + base.AutogeneratedPos = makePos(src.NewFileBase("", ""), 1, 0) types.TypeLinkSym = func(t *types.Type) *obj.LSym { return typenamesym(t).Linksym() @@ -213,11 +213,11 @@ func Main(archInit func(*Arch)) { TypecheckInit() // Parse input. - timings.Start("fe", "parse") + base.Timer.Start("fe", "parse") lines := parseFiles(flag.Args()) cgoSymABIs() - timings.Stop() - timings.AddEvent(int64(lines), "lines") + base.Timer.Stop() + base.Timer.AddEvent(int64(lines), "lines") recordPackageName() // Typecheck. @@ -233,7 +233,7 @@ func Main(archInit func(*Arch)) { } // Inlining - timings.Start("fe", "inlining") + base.Timer.Start("fe", "inlining") if base.Flag.LowerL != 0 { InlinePackage() } @@ -254,7 +254,7 @@ func Main(archInit func(*Arch)) { // or else the stack copier will not update it. // Large values are also moved off stack in escape analysis; // because large values may contain pointers, it must happen early. - timings.Start("fe", "escapes") + base.Timer.Start("fe", "escapes") escapes(Target.Decls) // Collect information for go:nowritebarrierrec @@ -268,7 +268,7 @@ func Main(archInit func(*Arch)) { // Transform closure bodies to properly reference captured variables. // This needs to happen before walk, because closures must be transformed // before walk reaches a call of a closure. - timings.Start("fe", "xclosures") + base.Timer.Start("fe", "xclosures") for _, n := range Target.Decls { if n.Op() == ir.ODCLFUNC { n := n.(*ir.Func) @@ -292,7 +292,7 @@ func Main(archInit func(*Arch)) { // Compile top level functions. // Don't use range--walk can add functions to Target.Decls. - timings.Start("be", "compilefuncs") + base.Timer.Start("be", "compilefuncs") fcount := int64(0) for i := 0; i < len(Target.Decls); i++ { n := Target.Decls[i] @@ -301,7 +301,7 @@ func Main(archInit func(*Arch)) { fcount++ } } - timings.AddEvent(fcount, "funcs") + base.Timer.AddEvent(fcount, "funcs") compileFunctions() @@ -320,7 +320,7 @@ func Main(archInit func(*Arch)) { } // Write object data to disk. - timings.Start("be", "dumpobj") + base.Timer.Start("be", "dumpobj") dumpdata() base.Ctxt.NumberSyms() dumpobj() @@ -339,7 +339,7 @@ func Main(archInit func(*Arch)) { base.ExitIfErrors() base.FlushErrors() - timings.Stop() + base.Timer.Stop() if base.Flag.Bench != "" { if err := writebench(base.Flag.Bench); err != nil { @@ -397,7 +397,7 @@ func writebench(filename string) error { fmt.Fprintln(&buf, "commit:", objabi.Version) fmt.Fprintln(&buf, "goos:", runtime.GOOS) fmt.Fprintln(&buf, "goarch:", runtime.GOARCH) - timings.Write(&buf, "BenchmarkCompile:"+base.Ctxt.Pkgpath+":") + base.Timer.Write(&buf, "BenchmarkCompile:"+base.Ctxt.Pkgpath+":") n, err := f.Write(buf.Bytes()) if err != nil { diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 45a2e2a43ecae..738b403b99983 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -362,7 +362,7 @@ func (o *Order) stmtList(l ir.Nodes) { // and rewrites it to: // m = OMAKESLICECOPY([]T, x, s); nil func orderMakeSliceCopy(s []ir.Node) { - if base.Flag.N != 0 || instrumenting { + if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting { return } if len(s) < 2 || s[0] == nil || s[0].Op() != ir.OAS || s[1] == nil || s[1].Op() != ir.OCOPY { @@ -580,7 +580,7 @@ func (o *Order) mapAssign(n ir.Node) { m.Index = o.copyExpr(m.Index) } fallthrough - case instrumenting && n.Op() == ir.OAS2FUNC && !ir.IsBlank(m): + case base.Flag.Cfg.Instrumenting && n.Op() == ir.OAS2FUNC && !ir.IsBlank(m): t := o.newTemp(m.Type(), false) n.Lhs[i] = t a := ir.NewAssignStmt(base.Pos, m, t) @@ -639,7 +639,7 @@ func (o *Order) stmt(n ir.Node) { n.X = o.expr(n.X, nil) n.Y = o.expr(n.Y, nil) - if instrumenting || n.X.Op() == ir.OINDEXMAP && (n.AsOp == ir.ODIV || n.AsOp == ir.OMOD) { + if base.Flag.Cfg.Instrumenting || n.X.Op() == ir.OINDEXMAP && (n.AsOp == ir.ODIV || n.AsOp == ir.OMOD) { // Rewrite m[k] op= r into m[k] = m[k] op r so // that we can ensure that if op panics // because r is zero, the panic happens before @@ -1008,7 +1008,7 @@ func (o *Order) stmt(n ir.Node) { t := o.markTemp() n.Chan = o.expr(n.Chan, nil) n.Value = o.expr(n.Value, nil) - if instrumenting { + if base.Flag.Cfg.Instrumenting { // Force copying to the stack so that (chan T)(nil) <- x // is still instrumented as a read of x. n.Value = o.copyExpr(n.Value) @@ -1156,7 +1156,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { // conversions. See copyExpr a few lines below. needCopy = mapKeyReplaceStrConv(n.Index) - if instrumenting { + if base.Flag.Cfg.Instrumenting { // Race detector needs the copy. needCopy = true } @@ -1194,7 +1194,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { // together. See golang.org/issue/15329. o.init(call) o.call(call) - if lhs == nil || lhs.Op() != ir.ONAME || instrumenting { + if lhs == nil || lhs.Op() != ir.ONAME || base.Flag.Cfg.Instrumenting { return o.copyExpr(n) } } else { @@ -1267,7 +1267,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { o.call(n) } - if lhs == nil || lhs.Op() != ir.ONAME || instrumenting { + if lhs == nil || lhs.Op() != ir.ONAME || base.Flag.Cfg.Instrumenting { return o.copyExpr(n) } return n @@ -1332,7 +1332,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { case ir.ODOTTYPE, ir.ODOTTYPE2: n := n.(*ir.TypeAssertExpr) n.X = o.expr(n.X, nil) - if !isdirectiface(n.Type()) || instrumenting { + if !isdirectiface(n.Type()) || base.Flag.Cfg.Instrumenting { return o.copyExprClear(n) } return n diff --git a/src/cmd/compile/internal/gc/racewalk.go b/src/cmd/compile/internal/gc/racewalk.go index 61a65368aff89..67802fe917b64 100644 --- a/src/cmd/compile/internal/gc/racewalk.go +++ b/src/cmd/compile/internal/gc/racewalk.go @@ -12,60 +12,12 @@ import ( "cmd/internal/sys" ) -// The racewalk pass is currently handled in three parts. -// -// First, for flag_race, it inserts calls to racefuncenter and -// racefuncexit at the start and end (respectively) of each -// function. This is handled below. -// -// Second, during buildssa, it inserts appropriate instrumentation -// calls immediately before each memory load or store. This is handled -// by the (*state).instrument method in ssa.go, so here we just set -// the Func.InstrumentBody flag as needed. For background on why this -// is done during SSA construction rather than a separate SSA pass, -// see issue #19054. -// -// Third we remove calls to racefuncenter and racefuncexit, for leaf -// functions without instrumented operations. This is done as part of -// ssa opt pass via special rule. - -// TODO(dvyukov): do not instrument initialization as writes: -// a := make([]int, 10) - -// Do not instrument the following packages at all, -// at best instrumentation would cause infinite recursion. -var omit_pkgs = []string{ - "runtime/internal/atomic", - "runtime/internal/sys", - "runtime/internal/math", - "runtime", - "runtime/race", - "runtime/msan", - "internal/cpu", -} - -// Don't insert racefuncenterfp/racefuncexit into the following packages. -// Memory accesses in the packages are either uninteresting or will cause false positives. -var norace_inst_pkgs = []string{"sync", "sync/atomic"} - -func ispkgin(pkgs []string) bool { - if base.Ctxt.Pkgpath != "" { - for _, p := range pkgs { - if base.Ctxt.Pkgpath == p { - return true - } - } - } - - return false -} - func instrument(fn *ir.Func) { if fn.Pragma&ir.Norace != 0 || (fn.Sym().Linksym() != nil && fn.Sym().Linksym().ABIWrapper()) { return } - if !base.Flag.Race || !ispkgin(norace_inst_pkgs) { + if !base.Flag.Race || !base.Compiling(base.NoRacePkgs) { fn.SetInstrumentBody(true) } diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go index 4d2964591b88f..078f03bc68790 100644 --- a/src/cmd/compile/internal/gc/range.go +++ b/src/cmd/compile/internal/gc/range.go @@ -460,7 +460,7 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { // // where == for keys of map m is reflexive. func isMapClear(n *ir.RangeStmt) bool { - if base.Flag.N != 0 || instrumenting { + if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting { return false } @@ -523,7 +523,7 @@ func mapClear(m ir.Node) ir.Node { // // Parameters are as in walkrange: "for v1, v2 = range a". func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node { - if base.Flag.N != 0 || instrumenting { + if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting { return nil } diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 6993b4b1c74af..0bca2baa17b41 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -2281,7 +2281,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { return nil } - if instrumenting { + if base.Flag.Cfg.Instrumenting { // These appear to be fine, but they fail the // integer constraint below, so okay them here. // Sample non-integer conversion: map[string]string -> *uint8 @@ -3490,7 +3490,7 @@ func initSSATables() { } /******** runtime ********/ - if !instrumenting { + if !base.Flag.Cfg.Instrumenting { add("runtime", "slicebytetostringtmp", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { // Compiler frontend optimizations emit OBYTES2STRTMP nodes diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 59763824fbe0c..6e130d4889664 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -613,7 +613,7 @@ func calcHasCall(n ir.Node) bool { case ir.OANDAND, ir.OOROR: // hard with instrumented code n := n.(*ir.LogicalExpr) - if instrumenting { + if base.Flag.Cfg.Instrumenting { return true } return n.X.HasCall() || n.Y.HasCall() @@ -1209,7 +1209,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { return } - base.Pos = autogeneratedPos + base.Pos = base.AutogeneratedPos dclcontext = ir.PEXTERN tfn := ir.NewFuncType(base.Pos, @@ -1243,7 +1243,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { // the TOC to the appropriate value for that module. But if it returns // directly to the wrapper's caller, nothing will reset it to the correct // value for that function. - if !instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !isifacemethod(method.Type) && !(thearch.LinkArch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) { + if !base.Flag.Cfg.Instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !isifacemethod(method.Type) && !(thearch.LinkArch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) { // generate tail call: adjust pointer receiver and jump to embedded method. left := dot.X // skip final .M if !left.Type().IsPtr() { diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index f2e5728d80ab2..4f1fe240ec951 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -25,7 +25,7 @@ func TypecheckInit() { types.Dowidth = dowidth initUniverse() dclcontext = ir.PEXTERN - timings.Start("fe", "loadsys") + base.Timer.Start("fe", "loadsys") loadsys() } @@ -45,7 +45,7 @@ func TypecheckPackage() { // TODO(gri) Remove this again once we have a fix for #25838. // Don't use range--typecheck can add closures to Target.Decls. - timings.Start("fe", "typecheck", "top1") + base.Timer.Start("fe", "typecheck", "top1") for i := 0; i < len(Target.Decls); i++ { n := Target.Decls[i] if op := n.Op(); op != ir.ODCL && op != ir.OAS && op != ir.OAS2 && (op != ir.ODCLTYPE || !n.(*ir.Decl).X.Name().Alias()) { @@ -57,7 +57,7 @@ func TypecheckPackage() { // To check interface assignments, depends on phase 1. // Don't use range--typecheck can add closures to Target.Decls. - timings.Start("fe", "typecheck", "top2") + base.Timer.Start("fe", "typecheck", "top2") for i := 0; i < len(Target.Decls); i++ { n := Target.Decls[i] if op := n.Op(); op == ir.ODCL || op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.(*ir.Decl).X.Name().Alias() { @@ -67,7 +67,7 @@ func TypecheckPackage() { // Phase 3: Type check function bodies. // Don't use range--typecheck can add closures to Target.Decls. - timings.Start("fe", "typecheck", "func") + base.Timer.Start("fe", "typecheck", "func") var fcount int64 for i := 0; i < len(Target.Decls); i++ { n := Target.Decls[i] @@ -80,7 +80,7 @@ func TypecheckPackage() { // Phase 4: Check external declarations. // TODO(mdempsky): This should be handled when type checking their // corresponding ODCL nodes. - timings.Start("fe", "typecheck", "externdcls") + base.Timer.Start("fe", "typecheck", "externdcls") for i, n := range Target.Externs { if n.Op() == ir.ONAME { Target.Externs[i] = typecheck(Target.Externs[i], ctxExpr) @@ -93,7 +93,7 @@ func TypecheckPackage() { // Phase 6: Decide how to capture closed variables. // This needs to run before escape analysis, // because variables captured by value do not escape. - timings.Start("fe", "capturevars") + base.Timer.Start("fe", "capturevars") for _, n := range Target.Decls { if n.Op() == ir.ODCLFUNC { n := n.(*ir.Func) @@ -162,9 +162,6 @@ func TypecheckImports() { } } -// To enable tracing support (-t flag), set enableTrace to true. -const enableTrace = false - var traceIndent []byte var skipDowidthForTracing bool @@ -234,7 +231,7 @@ func resolve(n ir.Node) (res ir.Node) { } // only trace if there's work to do - if enableTrace && base.Flag.LowerT { + if base.EnableTrace && base.Flag.LowerT { defer tracePrint("resolve", n)(&res) } @@ -379,7 +376,7 @@ func typecheck(n ir.Node, top int) (res ir.Node) { } // only trace if there's work to do - if enableTrace && base.Flag.LowerT { + if base.EnableTrace && base.Flag.LowerT { defer tracePrint("typecheck", n)(&res) } @@ -568,7 +565,7 @@ func indexlit(n ir.Node) ir.Node { // typecheck1 should ONLY be called from typecheck. func typecheck1(n ir.Node, top int) (res ir.Node) { - if enableTrace && base.Flag.LowerT { + if base.EnableTrace && base.Flag.LowerT { defer tracePrint("typecheck1", n)(&res) } @@ -2552,7 +2549,7 @@ func lookdot1(errnode ir.Node, s *types.Sym, t *types.Type, fs *types.Fields, do // typecheckMethodExpr checks selector expressions (ODOT) where the // base expression is a type expression (OTYPE). func typecheckMethodExpr(n *ir.SelectorExpr) (res ir.Node) { - if enableTrace && base.Flag.LowerT { + if base.EnableTrace && base.Flag.LowerT { defer tracePrint("typecheckMethodExpr", n)(&res) } @@ -2991,7 +2988,7 @@ func pushtype(nn ir.Node, t *types.Type) ir.Node { // The result of typecheckcomplit MUST be assigned back to n, e.g. // n.Left = typecheckcomplit(n.Left) func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) { - if enableTrace && base.Flag.LowerT { + if base.EnableTrace && base.Flag.LowerT { defer tracePrint("typecheckcomplit", n)(&res) } @@ -3435,7 +3432,7 @@ func samesafeexpr(l ir.Node, r ir.Node) bool { // if this assignment is the definition of a var on the left side, // fill in the var's type. func typecheckas(n *ir.AssignStmt) { - if enableTrace && base.Flag.LowerT { + if base.EnableTrace && base.Flag.LowerT { defer tracePrint("typecheckas", n)(nil) } @@ -3493,7 +3490,7 @@ func checkassignto(src *types.Type, dst ir.Node) { } func typecheckas2(n *ir.AssignListStmt) { - if enableTrace && base.Flag.LowerT { + if base.EnableTrace && base.Flag.LowerT { defer tracePrint("typecheckas2", n)(nil) } @@ -3627,7 +3624,7 @@ out: // To be called by typecheck, not directly. // (Call typecheckFunc instead.) func typecheckfunc(n *ir.Func) { - if enableTrace && base.Flag.LowerT { + if base.EnableTrace && base.Flag.LowerT { defer tracePrint("typecheckfunc", n)(nil) } @@ -3691,7 +3688,7 @@ func checkMapKeys() { } func typecheckdeftype(n *ir.Name) { - if enableTrace && base.Flag.LowerT { + if base.EnableTrace && base.Flag.LowerT { defer tracePrint("typecheckdeftype", n)(nil) } @@ -3723,7 +3720,7 @@ func typecheckdeftype(n *ir.Name) { } func typecheckdef(n ir.Node) { - if enableTrace && base.Flag.LowerT { + if base.EnableTrace && base.Flag.LowerT { defer tracePrint("typecheckdef", n)(nil) } diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 610c6b6539c82..57edc4328007e 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -85,7 +85,7 @@ func walk(fn *ir.Func) { ir.DumpList(s, Curfn.Enter) } - if instrumenting { + if base.Flag.Cfg.Instrumenting { instrument(fn) } } @@ -738,7 +738,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return as } - if !instrumenting && isZero(as.Y) { + if !base.Flag.Cfg.Instrumenting && isZero(as.Y) { return as } @@ -1311,7 +1311,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { panic("unreachable") case ir.OCOPY: - return copyany(n.(*ir.BinaryExpr), init, instrumenting && !base.Flag.CompilingRuntime) + return copyany(n.(*ir.BinaryExpr), init, base.Flag.Cfg.Instrumenting && !base.Flag.CompilingRuntime) case ir.OCLOSE: // cannot use chanfn - closechan takes any, not chan any @@ -1597,7 +1597,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.OBYTES2STRTMP: n := n.(*ir.ConvExpr) n.X = walkexpr(n.X, init) - if !instrumenting { + if !base.Flag.Cfg.Instrumenting { // Let the backend handle OBYTES2STRTMP directly // to avoid a function call to slicebytetostringtmp. return n @@ -1975,7 +1975,7 @@ func walkCall(n *ir.CallExpr, init *ir.Nodes) { } else { t = params.Field(i).Type } - if instrumenting || fncall(arg, t) { + if base.Flag.Cfg.Instrumenting || fncall(arg, t) { // make assignment of fncall to tempAt tmp := temp(t) a := convas(ir.NewAssignStmt(base.Pos, tmp, arg), init) @@ -2873,7 +2873,7 @@ func appendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { ptr1, len1 := backingArrayPtrLen(cheapexpr(slice, &nodes)) ptr2, len2 := backingArrayPtrLen(l2) ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, typename(elemtype), ptr1, len1, ptr2, len2) - } else if instrumenting && !base.Flag.CompilingRuntime { + } else if base.Flag.Cfg.Instrumenting && !base.Flag.CompilingRuntime { // rely on runtime to instrument: // copy(s[len(l1):], l2) // l2 can be a slice or string. @@ -2914,7 +2914,7 @@ func appendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { // isAppendOfMake reports whether n is of the form append(x , make([]T, y)...). // isAppendOfMake assumes n has already been typechecked. func isAppendOfMake(n ir.Node) bool { - if base.Flag.N != 0 || instrumenting { + if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting { return false } @@ -3125,7 +3125,7 @@ func walkappend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node { // General case, with no function calls left as arguments. // Leave for gen, except that instrumentation requires old form. - if !instrumenting || base.Flag.CompilingRuntime { + if !base.Flag.Cfg.Instrumenting || base.Flag.CompilingRuntime { return n } @@ -4055,7 +4055,7 @@ func canMergeLoads() bool { // isRuneCount reports whether n is of the form len([]rune(string)). // These are optimized into a call to runtime.countrunes. func isRuneCount(n ir.Node) bool { - return base.Flag.N == 0 && !instrumenting && n.Op() == ir.OLEN && n.(*ir.UnaryExpr).X.Op() == ir.OSTR2RUNES + return base.Flag.N == 0 && !base.Flag.Cfg.Instrumenting && n.Op() == ir.OLEN && n.(*ir.UnaryExpr).X.Op() == ir.OSTR2RUNES } func walkCheckPtrAlignment(n *ir.ConvExpr, init *ir.Nodes, count ir.Node) ir.Node { From 9ee309255a94499c6f4e6d3ac7653b5eeb4ae7b7 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 23 Dec 2020 00:08:03 -0500 Subject: [PATCH 223/474] [dev.regabi] cmd/compile: move helpers into package types [generated] [git-generate] cd src/cmd/compile/internal/gc rf ' # Type hash (formatting). mv typehash TypeHash mv TypeHash fmt.go # Method sorting. mv methcmp MethodsByName mv MethodsByName MethodsByName.Len MethodsByName.Swap \ MethodsByName.Less sort.go # Move version check into types. # A little surprising, but its keyed off the types.Pkg. ex { import "cmd/compile/internal/types" var p *types.Pkg var major, minor int langSupported(major, minor, p) -> AllowsGoVersion(p, major, minor) } rm langSupported mv checkLang ParseLangFlag mv lang langWant AllowsGoVersion ParseLangFlag \ parseLang currentLang goVersionRE goversion.go mv testdclstack CheckDclstack mv CheckDclstack scope.go mv algtype1 AlgType mv isComplex IsComplex mv isFloat IsFloat mv isInt IsInt mv issimple IsSimple mv okforcmp IsOrdered mv floatForComplex FloatForComplex mv complexForFloat ComplexForFloat mv isdirectiface IsDirectIface mv isifacemethod IsInterfaceMethod mv isMethodApplicable IsMethodApplicable mv ispaddedfield IsPaddedField mv isRuntimePkg IsRuntimePkg mv isReflectPkg IsReflectPkg mv methtype ReceiverBaseType mv typesymname TypeSymName mv typesym TypeSym mv typeLookup TypeSymLookup mv IsAlias IsDotAlias mv isreflexive IsReflexive mv simtype SimType # The type1.go here is to avoid an undiagnosed bug in rf # that does not get the follow-up typechecking right if we # move directly to type.go during the mv into package types below. mv \ IsInt IsOrdered IsReflexive \ IsDirectIface IsInterfaceMethod IsMethodApplicable IsPaddedField \ IsRuntimePkg IsReflectPkg ReceiverBaseType \ FloatForComplex ComplexForFloat \ TypeSym TypeSymLookup TypeSymName \ typepkg SimType \ type1.go # The alg1.go here is because we are only moving part of alg.go. mv typeHasNoAlg TypeHasNoAlg mv AlgKind ANOEQ AlgType TypeHasNoAlg IsComparable IncomparableField IsPaddedField alg1.go mv IsDotAlias pkg.go mv alg1.go algkind_string.go fmt.go goversion.go pkg.go \ CheckDclstack \ # scope.go sort.go type1.go \ cmd/compile/internal/types ' cd ../types rf ' mv IsDclstackValid isDclstackValid mv alg1.go alg.go mv type1.go type.go ' Change-Id: I8bd53b21c7bdd1770e1b525de32f136833e84c9d Reviewed-on: https://go-review.googlesource.com/c/go/+/279307 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/alg.go | 271 ++++-------------- src/cmd/compile/internal/gc/align.go | 8 +- src/cmd/compile/internal/gc/const.go | 6 +- src/cmd/compile/internal/gc/dcl.go | 17 +- src/cmd/compile/internal/gc/escape.go | 4 +- src/cmd/compile/internal/gc/go.go | 28 -- src/cmd/compile/internal/gc/gsubr.go | 2 +- src/cmd/compile/internal/gc/iexport.go | 6 +- src/cmd/compile/internal/gc/inl.go | 8 +- src/cmd/compile/internal/gc/main.go | 93 +----- src/cmd/compile/internal/gc/noder.go | 6 +- src/cmd/compile/internal/gc/obj.go | 2 +- src/cmd/compile/internal/gc/order.go | 2 +- src/cmd/compile/internal/gc/pgen.go | 4 +- src/cmd/compile/internal/gc/range.go | 2 +- src/cmd/compile/internal/gc/reflect.go | 108 +------ src/cmd/compile/internal/gc/sinit.go | 4 +- src/cmd/compile/internal/gc/ssa.go | 44 +-- src/cmd/compile/internal/gc/subr.go | 112 +------- src/cmd/compile/internal/gc/swt.go | 10 +- src/cmd/compile/internal/gc/typecheck.go | 30 +- src/cmd/compile/internal/gc/universe.go | 56 ++-- src/cmd/compile/internal/gc/walk.go | 24 +- src/cmd/compile/internal/types/alg.go | 173 +++++++++++ .../internal/{gc => types}/algkind_string.go | 2 +- src/cmd/compile/internal/types/fmt.go | 11 + src/cmd/compile/internal/types/goversion.go | 96 +++++++ src/cmd/compile/internal/types/pkg.go | 4 + src/cmd/compile/internal/types/scope.go | 8 +- src/cmd/compile/internal/types/sort.go | 14 + src/cmd/compile/internal/types/type.go | 202 +++++++++++++ 31 files changed, 691 insertions(+), 666 deletions(-) create mode 100644 src/cmd/compile/internal/types/alg.go rename src/cmd/compile/internal/{gc => types}/algkind_string.go (98%) create mode 100644 src/cmd/compile/internal/types/goversion.go create mode 100644 src/cmd/compile/internal/types/sort.go diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index 8733c6198c7a6..08237d40556a5 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -13,56 +13,10 @@ import ( "sort" ) -// AlgKind describes the kind of algorithms used for comparing and -// hashing a Type. -type AlgKind int - -//go:generate stringer -type AlgKind -trimprefix A - -const ( - // These values are known by runtime. - ANOEQ AlgKind = iota - AMEM0 - AMEM8 - AMEM16 - AMEM32 - AMEM64 - AMEM128 - ASTRING - AINTER - ANILINTER - AFLOAT32 - AFLOAT64 - ACPLX64 - ACPLX128 - - // Type can be compared/hashed as regular memory. - AMEM AlgKind = 100 - - // Type needs special comparison/hashing functions. - ASPECIAL AlgKind = -1 -) - -// IsComparable reports whether t is a comparable type. -func IsComparable(t *types.Type) bool { - a, _ := algtype1(t) - return a != ANOEQ -} - // IsRegularMemory reports whether t can be compared/hashed as regular memory. func IsRegularMemory(t *types.Type) bool { - a, _ := algtype1(t) - return a == AMEM -} - -// IncomparableField returns an incomparable Field of struct Type t, if any. -func IncomparableField(t *types.Type) *types.Field { - for _, f := range t.FieldSlice() { - if !IsComparable(f.Type) { - return f - } - } - return nil + a, _ := types.AlgType(t) + return a == types.AMEM } // EqCanPanic reports whether == on type t could panic (has an interface somewhere). @@ -87,128 +41,28 @@ func EqCanPanic(t *types.Type) bool { // algtype is like algtype1, except it returns the fixed-width AMEMxx variants // instead of the general AMEM kind when possible. -func algtype(t *types.Type) AlgKind { - a, _ := algtype1(t) - if a == AMEM { +func algtype(t *types.Type) types.AlgKind { + a, _ := types.AlgType(t) + if a == types.AMEM { switch t.Width { case 0: - return AMEM0 + return types.AMEM0 case 1: - return AMEM8 + return types.AMEM8 case 2: - return AMEM16 + return types.AMEM16 case 4: - return AMEM32 + return types.AMEM32 case 8: - return AMEM64 + return types.AMEM64 case 16: - return AMEM128 + return types.AMEM128 } } return a } -// algtype1 returns the AlgKind used for comparing and hashing Type t. -// If it returns ANOEQ, it also returns the component type of t that -// makes it incomparable. -func algtype1(t *types.Type) (AlgKind, *types.Type) { - if t.Broke() { - return AMEM, nil - } - if t.Noalg() { - return ANOEQ, t - } - - switch t.Kind() { - case types.TANY, types.TFORW: - // will be defined later. - return ANOEQ, t - - case types.TINT8, types.TUINT8, types.TINT16, types.TUINT16, - types.TINT32, types.TUINT32, types.TINT64, types.TUINT64, - types.TINT, types.TUINT, types.TUINTPTR, - types.TBOOL, types.TPTR, - types.TCHAN, types.TUNSAFEPTR: - return AMEM, nil - - case types.TFUNC, types.TMAP: - return ANOEQ, t - - case types.TFLOAT32: - return AFLOAT32, nil - - case types.TFLOAT64: - return AFLOAT64, nil - - case types.TCOMPLEX64: - return ACPLX64, nil - - case types.TCOMPLEX128: - return ACPLX128, nil - - case types.TSTRING: - return ASTRING, nil - - case types.TINTER: - if t.IsEmptyInterface() { - return ANILINTER, nil - } - return AINTER, nil - - case types.TSLICE: - return ANOEQ, t - - case types.TARRAY: - a, bad := algtype1(t.Elem()) - switch a { - case AMEM: - return AMEM, nil - case ANOEQ: - return ANOEQ, bad - } - - switch t.NumElem() { - case 0: - // We checked above that the element type is comparable. - return AMEM, nil - case 1: - // Single-element array is same as its lone element. - return a, nil - } - - return ASPECIAL, nil - - case types.TSTRUCT: - fields := t.FieldSlice() - - // One-field struct is same as that one field alone. - if len(fields) == 1 && !fields[0].Sym.IsBlank() { - return algtype1(fields[0].Type) - } - - ret := AMEM - for i, f := range fields { - // All fields must be comparable. - a, bad := algtype1(f.Type) - if a == ANOEQ { - return ANOEQ, bad - } - - // Blank fields, padded fields, fields with non-memory - // equality need special compare. - if a != AMEM || f.Sym.IsBlank() || ispaddedfield(t, i) { - ret = ASPECIAL - } - } - - return ret, nil - } - - base.Fatalf("algtype1: unexpected type %v", t) - return 0, nil -} - // genhash returns a symbol which is the closure used to compute // the hash of a value of type t. // Note: the generated function must match runtime.typehash exactly. @@ -217,37 +71,37 @@ func genhash(t *types.Type) *obj.LSym { default: // genhash is only called for types that have equality base.Fatalf("genhash %v", t) - case AMEM0: + case types.AMEM0: return sysClosure("memhash0") - case AMEM8: + case types.AMEM8: return sysClosure("memhash8") - case AMEM16: + case types.AMEM16: return sysClosure("memhash16") - case AMEM32: + case types.AMEM32: return sysClosure("memhash32") - case AMEM64: + case types.AMEM64: return sysClosure("memhash64") - case AMEM128: + case types.AMEM128: return sysClosure("memhash128") - case ASTRING: + case types.ASTRING: return sysClosure("strhash") - case AINTER: + case types.AINTER: return sysClosure("interhash") - case ANILINTER: + case types.ANILINTER: return sysClosure("nilinterhash") - case AFLOAT32: + case types.AFLOAT32: return sysClosure("f32hash") - case AFLOAT64: + case types.AFLOAT64: return sysClosure("f64hash") - case ACPLX64: + case types.ACPLX64: return sysClosure("c64hash") - case ACPLX128: + case types.ACPLX128: return sysClosure("c128hash") - case AMEM: + case types.AMEM: // For other sizes of plain memory, we build a closure // that calls memhash_varlen. The size of the memory is // encoded in the first slot of the closure. - closure := typeLookup(fmt.Sprintf(".hashfunc%d", t.Width)).Linksym() + closure := types.TypeSymLookup(fmt.Sprintf(".hashfunc%d", t.Width)).Linksym() if len(closure.P) > 0 { // already generated return closure } @@ -259,7 +113,7 @@ func genhash(t *types.Type) *obj.LSym { ot = duintptr(closure, ot, uint64(t.Width)) // size encoded in closure ggloblsym(closure, int32(ot), obj.DUPOK|obj.RODATA) return closure - case ASPECIAL: + case types.ASPECIAL: break } @@ -390,7 +244,7 @@ func genhash(t *types.Type) *obj.LSym { Curfn = nil if base.Debug.DclStack != 0 { - testdclstack() + types.CheckDclstack() } fn.SetNilCheckDisabled(true) @@ -407,22 +261,22 @@ func genhash(t *types.Type) *obj.LSym { func hashfor(t *types.Type) ir.Node { var sym *types.Sym - switch a, _ := algtype1(t); a { - case AMEM: + switch a, _ := types.AlgType(t); a { + case types.AMEM: base.Fatalf("hashfor with AMEM type") - case AINTER: + case types.AINTER: sym = Runtimepkg.Lookup("interhash") - case ANILINTER: + case types.ANILINTER: sym = Runtimepkg.Lookup("nilinterhash") - case ASTRING: + case types.ASTRING: sym = Runtimepkg.Lookup("strhash") - case AFLOAT32: + case types.AFLOAT32: sym = Runtimepkg.Lookup("f32hash") - case AFLOAT64: + case types.AFLOAT64: sym = Runtimepkg.Lookup("f64hash") - case ACPLX64: + case types.ACPLX64: sym = Runtimepkg.Lookup("c64hash") - case ACPLX128: + case types.ACPLX128: sym = Runtimepkg.Lookup("c128hash") default: // Note: the caller of hashfor ensured that this symbol @@ -457,40 +311,40 @@ func sysClosure(name string) *obj.LSym { // equality for two objects of type t. func geneq(t *types.Type) *obj.LSym { switch algtype(t) { - case ANOEQ: + case types.ANOEQ: // The runtime will panic if it tries to compare // a type with a nil equality function. return nil - case AMEM0: + case types.AMEM0: return sysClosure("memequal0") - case AMEM8: + case types.AMEM8: return sysClosure("memequal8") - case AMEM16: + case types.AMEM16: return sysClosure("memequal16") - case AMEM32: + case types.AMEM32: return sysClosure("memequal32") - case AMEM64: + case types.AMEM64: return sysClosure("memequal64") - case AMEM128: + case types.AMEM128: return sysClosure("memequal128") - case ASTRING: + case types.ASTRING: return sysClosure("strequal") - case AINTER: + case types.AINTER: return sysClosure("interequal") - case ANILINTER: + case types.ANILINTER: return sysClosure("nilinterequal") - case AFLOAT32: + case types.AFLOAT32: return sysClosure("f32equal") - case AFLOAT64: + case types.AFLOAT64: return sysClosure("f64equal") - case ACPLX64: + case types.ACPLX64: return sysClosure("c64equal") - case ACPLX128: + case types.ACPLX128: return sysClosure("c128equal") - case AMEM: + case types.AMEM: // make equality closure. The size of the type // is encoded in the closure. - closure := typeLookup(fmt.Sprintf(".eqfunc%d", t.Width)).Linksym() + closure := types.TypeSymLookup(fmt.Sprintf(".eqfunc%d", t.Width)).Linksym() if len(closure.P) != 0 { return closure } @@ -502,7 +356,7 @@ func geneq(t *types.Type) *obj.LSym { ot = duintptr(closure, ot, uint64(t.Width)) ggloblsym(closure, int32(ot), obj.DUPOK|obj.RODATA) return closure - case ASPECIAL: + case types.ASPECIAL: break } @@ -766,7 +620,7 @@ func geneq(t *types.Type) *obj.LSym { Curfn = nil if base.Debug.DclStack != 0 { - testdclstack() + types.CheckDclstack() } // Disable checknils while compiling this code. @@ -904,7 +758,7 @@ func memrun(t *types.Type, start int) (size int64, next int) { break } // Stop run after a padded field. - if ispaddedfield(t, next-1) { + if types.IsPaddedField(t, next-1) { break } // Also, stop before a blank or non-memory field. @@ -914,16 +768,3 @@ func memrun(t *types.Type, start int) (size int64, next int) { } return t.Field(next-1).End() - t.Field(start).Offset, next } - -// ispaddedfield reports whether the i'th field of struct type t is followed -// by padding. -func ispaddedfield(t *types.Type, i int) bool { - if !t.IsStruct() { - base.Fatalf("ispaddedfield called non-struct %v", t) - } - end := t.Width - if i+1 < t.NumFields() { - end = t.Field(i + 1).Offset - } - return t.Field(i).End() != end -} diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go index f2f98bd51f3c2..92826d003bf25 100644 --- a/src/cmd/compile/internal/gc/align.go +++ b/src/cmd/compile/internal/gc/align.go @@ -40,7 +40,7 @@ func expandiface(t *types.Type) { switch prev := seen[m.Sym]; { case prev == nil: seen[m.Sym] = m - case langSupported(1, 14, t.Pkg()) && !explicit && types.Identical(m.Type, prev.Type): + case types.AllowsGoVersion(t.Pkg(), 1, 14) && !explicit && types.Identical(m.Type, prev.Type): return default: base.ErrorfAt(m.Pos, "duplicate method %s", m.Sym.Name) @@ -84,7 +84,7 @@ func expandiface(t *types.Type) { } } - sort.Sort(methcmp(methods)) + sort.Sort(types.MethodsByName(methods)) if int64(len(methods)) >= MaxWidth/int64(Widthptr) { base.ErrorfAt(typePos(t), "interface too large") @@ -325,8 +325,8 @@ func dowidth(t *types.Type) { // simtype == 0 during bootstrap default: - if simtype[t.Kind()] != 0 { - et = simtype[t.Kind()] + if types.SimType[t.Kind()] != 0 { + et = types.SimType[t.Kind()] } } diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index 94bcf63263c64..553f06757f0c1 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -242,11 +242,11 @@ func operandType(op ir.Op, t *types.Type) *types.Type { switch op { case ir.OCOMPLEX: if t.IsComplex() { - return floatForComplex(t) + return types.FloatForComplex(t) } case ir.OREAL, ir.OIMAG: if t.IsFloat() { - return complexForFloat(t) + return types.ComplexForFloat(t) } default: if okfor[op][t.Kind()] { @@ -377,7 +377,7 @@ func doesoverflow(v constant.Value, t *types.Type) bool { return math.IsInf(f, 0) } case t.IsComplex(): - ft := floatForComplex(t) + ft := types.FloatForComplex(t) return doesoverflow(constant.Real(v), ft) || doesoverflow(constant.Imag(v), ft) } base.Fatalf("doesoverflow: %v, %v", v, t) diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 62cdff6b8e1dd..5a5f670a08616 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -28,12 +28,6 @@ func NoWriteBarrierRecCheck() { var nowritebarrierrecCheck *nowritebarrierrecChecker -func testdclstack() { - if !types.IsDclstackValid() { - base.Fatalf("mark left on the dclstack") - } -} - // redeclare emits a diagnostic about symbol s being redeclared at pos. func redeclare(pos src.XPos, s *types.Sym, where string) { if !s.Lastlineno.IsKnown() { @@ -555,13 +549,6 @@ func fakeRecvField() *types.Field { return types.NewField(src.NoXPos, nil, types.FakeRecvType()) } -// isifacemethod reports whether (field) m is -// an interface method. Such methods have the -// special receiver type types.FakeRecvType(). -func isifacemethod(f *types.Type) bool { - return f.Recv().Type == types.FakeRecvType() -} - // turn a parsed function declaration into a type func functype(nrecv *ir.Field, nparams, nresults []*ir.Field) *types.Type { funarg := func(n *ir.Field) *types.Field { @@ -685,7 +672,7 @@ func addmethod(n *ir.Func, msym *types.Sym, t *types.Type, local, nointerface bo return nil } - mt := methtype(rf.Type) + mt := types.ReceiverBaseType(rf.Type) if mt == nil || mt.Sym() == nil { pa := rf.Type t := pa @@ -883,7 +870,7 @@ func (c *nowritebarrierrecChecker) findExtraCalls(nn ir.Node) { if fn.Class_ != ir.PFUNC || fn.Name().Defn == nil { return } - if !isRuntimePkg(fn.Sym().Pkg) || fn.Sym().Name != "systemstack" { + if !types.IsRuntimePkg(fn.Sym().Pkg) || fn.Sym().Name != "systemstack" { return } diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index fb9cbf2d5107e..4366a5cc2c179 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -579,7 +579,7 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { } case ir.OCONVIFACE: n := n.(*ir.ConvExpr) - if !n.X.Type().IsInterface() && !isdirectiface(n.X.Type()) { + if !n.X.Type().IsInterface() && !types.IsDirectIface(n.X.Type()) { k = e.spill(k, n) } e.expr(k.note(n, "interface-converted"), n.X) @@ -1064,7 +1064,7 @@ func (k EscHole) deref(where ir.Node, why string) EscHole { return k.shift(1).no func (k EscHole) addr(where ir.Node, why string) EscHole { return k.shift(-1).note(where, why) } func (k EscHole) dotType(t *types.Type, where ir.Node, why string) EscHole { - if !t.IsInterface() && !isdirectiface(t) { + if !t.IsInterface() && !types.IsDirectIface(t) { k = k.shift(1) } return k.note(where, why) diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index 46ddda0ba76eb..7ec59852eecbf 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -5,7 +5,6 @@ package gc import ( - "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/ssa" "cmd/compile/internal/types" @@ -34,22 +33,6 @@ var ( smallArrayBytes = int64(256) ) -// isRuntimePkg reports whether p is package runtime. -func isRuntimePkg(p *types.Pkg) bool { - if base.Flag.CompilingRuntime && p == types.LocalPkg { - return true - } - return p.Path == "runtime" -} - -// isReflectPkg reports whether p is package reflect. -func isReflectPkg(p *types.Pkg) bool { - if p == types.LocalPkg { - return base.Ctxt.Pkgpath == "reflect" - } - return p.Path == "reflect" -} - // Slices in the runtime are represented by three components: // // type slice struct { @@ -101,15 +84,6 @@ var gopkg *types.Pkg // pseudo-package for method symbols on anonymous receiver var zerosize int64 -var simtype [types.NTYPE]types.Kind - -var ( - isInt [types.NTYPE]bool - isFloat [types.NTYPE]bool - isComplex [types.NTYPE]bool - issimple [types.NTYPE]bool -) - var ( okforeq [types.NTYPE]bool okforadd [types.NTYPE]bool @@ -121,8 +95,6 @@ var ( okforarith [types.NTYPE]bool ) -var okforcmp [types.NTYPE]bool - var ( okfor [ir.OEND][]bool iscmp [ir.OEND]bool diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go index db55b1035cd0e..da2345c289bb5 100644 --- a/src/cmd/compile/internal/gc/gsubr.go +++ b/src/cmd/compile/internal/gc/gsubr.go @@ -283,7 +283,7 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { funcbody() if base.Debug.DclStack != 0 { - testdclstack() + types.CheckDclstack() } typecheckFunc(fn) diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index d601331ee4d7e..87db08e0d14e6 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -461,7 +461,7 @@ func (p *iexporter) doDecl(n *ir.Name) { w.value(n.Type(), n.Val()) case ir.OTYPE: - if IsAlias(n.Sym()) { + if types.IsDotAlias(n.Sym()) { // Alias. w.tag('A') w.pos(n.Pos()) @@ -1028,8 +1028,8 @@ func (w *exportWriter) typeExt(t *types.Type) { w.int64(i[1]) return } - w.symIdx(typesym(t)) - w.symIdx(typesym(t.PtrTo())) + w.symIdx(types.TypeSym(t)) + w.symIdx(types.TypeSym(t.PtrTo())) } // Inline bodies. diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 49e0bcc470c6a..47fdc7b9b70ca 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -350,7 +350,7 @@ func (v *hairyVisitor) doNode(n ir.Node) error { // runtime.throw is a "cheap call" like panic in normal code. if n.X.Op() == ir.ONAME { name := n.X.(*ir.Name) - if name.Class_ == ir.PFUNC && isRuntimePkg(name.Sym().Pkg) { + if name.Class_ == ir.PFUNC && types.IsRuntimePkg(name.Sym().Pkg) { fn := name.Sym().Name if fn == "getcallerpc" || fn == "getcallersp" { return errors.New("call to " + fn) @@ -382,7 +382,7 @@ func (v *hairyVisitor) doNode(n ir.Node) error { if t == nil { base.Fatalf("no function type for [%p] %+v\n", n.X, n.X) } - if isRuntimePkg(n.X.Sym().Pkg) { + if types.IsRuntimePkg(n.X.Sym().Pkg) { fn := n.X.Sym().Name if fn == "heapBits.nextArena" { // Special case: explicitly allow @@ -589,7 +589,7 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No // Prevent inlining some reflect.Value methods when using checkptr, // even when package reflect was compiled without it (#35073). n := n.(*ir.CallExpr) - if s := n.X.Sym(); base.Debug.Checkptr != 0 && isReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") { + if s := n.X.Sym(); base.Debug.Checkptr != 0 && types.IsReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") { return n } } @@ -844,7 +844,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b return n } - if base.Flag.Cfg.Instrumenting && isRuntimePkg(fn.Sym().Pkg) { + if base.Flag.Cfg.Instrumenting && types.IsRuntimePkg(fn.Sym().Pkg) { // Runtime package must not be instrumented. // Instrument skips runtime package. However, some runtime code can be // inlined into other packages and instrumented there. To avoid this, diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index feded3f9b2bcb..15646ff8c759a 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -23,13 +23,11 @@ import ( "flag" "fmt" "go/constant" - "internal/goversion" "io" "io/ioutil" "log" "os" "path" - "regexp" "runtime" "sort" "strconv" @@ -153,7 +151,7 @@ func Main(archInit func(*Arch)) { log.Fatalf("location lists requested but register mapping not available on %v", base.Ctxt.Arch.Name) } - checkLang() + types.ParseLangFlag() if base.Flag.SymABIs != "" { readSymABIs(base.Flag.SymABIs, base.Ctxt.Pkgpath) @@ -858,7 +856,7 @@ func clearImports() { s.Def = nil continue } - if IsAlias(s) { + if types.IsDotAlias(s) { // throw away top-level name left over // from previous import . "x" // We'll report errors after type checking in checkDotImports. @@ -873,10 +871,6 @@ func clearImports() { } } -func IsAlias(sym *types.Sym) bool { - return sym.Def != nil && sym.Def.Sym() != sym -} - // recordFlags records the specified command-line flags to be placed // in the DWARF info. func recordFlags(flags ...string) { @@ -944,89 +938,6 @@ func recordPackageName() { s.P = []byte(types.LocalPkg.Name) } -// currentLang returns the current language version. -func currentLang() string { - return fmt.Sprintf("go1.%d", goversion.Version) -} - -// goVersionRE is a regular expression that matches the valid -// arguments to the -lang flag. -var goVersionRE = regexp.MustCompile(`^go([1-9][0-9]*)\.(0|[1-9][0-9]*)$`) - -// A lang is a language version broken into major and minor numbers. -type lang struct { - major, minor int -} - -// langWant is the desired language version set by the -lang flag. -// If the -lang flag is not set, this is the zero value, meaning that -// any language version is supported. -var langWant lang - -// AllowsGoVersion reports whether a particular package -// is allowed to use Go version major.minor. -// We assume the imported packages have all been checked, -// so we only have to check the local package against the -lang flag. -func AllowsGoVersion(pkg *types.Pkg, major, minor int) bool { - if pkg == nil { - // TODO(mdempsky): Set Pkg for local types earlier. - pkg = types.LocalPkg - } - if pkg != types.LocalPkg { - // Assume imported packages passed type-checking. - return true - } - if langWant.major == 0 && langWant.minor == 0 { - return true - } - return langWant.major > major || (langWant.major == major && langWant.minor >= minor) -} - -func langSupported(major, minor int, pkg *types.Pkg) bool { - return AllowsGoVersion(pkg, major, minor) -} - -// checkLang verifies that the -lang flag holds a valid value, and -// exits if not. It initializes data used by langSupported. -func checkLang() { - if base.Flag.Lang == "" { - return - } - - var err error - langWant, err = parseLang(base.Flag.Lang) - if err != nil { - log.Fatalf("invalid value %q for -lang: %v", base.Flag.Lang, err) - } - - if def := currentLang(); base.Flag.Lang != def { - defVers, err := parseLang(def) - if err != nil { - log.Fatalf("internal error parsing default lang %q: %v", def, err) - } - if langWant.major > defVers.major || (langWant.major == defVers.major && langWant.minor > defVers.minor) { - log.Fatalf("invalid value %q for -lang: max known version is %q", base.Flag.Lang, def) - } - } -} - -// parseLang parses a -lang option into a langVer. -func parseLang(s string) (lang, error) { - matches := goVersionRE.FindStringSubmatch(s) - if matches == nil { - return lang{}, fmt.Errorf(`should be something like "go1.12"`) - } - major, err := strconv.Atoi(matches[1]) - if err != nil { - return lang{}, err - } - minor, err := strconv.Atoi(matches[2]) - if err != nil { - return lang{}, err - } - return lang{major: major, minor: minor}, nil -} - // useNewABIWrapGen returns TRUE if the compiler should generate an // ABI wrapper for the function 'f'. func useABIWrapGen(f *ir.Func) bool { diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index bed37efb87930..77a45f002309d 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -72,7 +72,7 @@ func parseFiles(filenames []string) uint { base.ErrorExit() } // Always run testdclstack here, even when debug_dclstack is not set, as a sanity measure. - testdclstack() + types.CheckDclstack() } for _, p := range noders { @@ -485,7 +485,7 @@ func (p *noder) typeDecl(decl *syntax.TypeDecl) ir.Node { } nod := ir.NewDecl(p.pos(decl), ir.ODCLTYPE, n) - if n.Alias() && !langSupported(1, 9, types.LocalPkg) { + if n.Alias() && !types.AllowsGoVersion(types.LocalPkg, 1, 9) { base.ErrorfAt(nod.Pos(), "type aliases only supported as of -lang=go1.9") } return nod @@ -1401,7 +1401,7 @@ func (p *noder) binOp(op syntax.Operator) ir.Op { // literal is not compatible with the current language version. func checkLangCompat(lit *syntax.BasicLit) { s := lit.Value - if len(s) <= 2 || langSupported(1, 13, types.LocalPkg) { + if len(s) <= 2 || types.AllowsGoVersion(types.LocalPkg, 1, 13) { return } // len(s) > 2 diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 9634cd51ae961..883033e0c26be 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -259,7 +259,7 @@ func dumpGlobalConst(n ir.Node) { return } } - base.Ctxt.DwarfIntConst(base.Ctxt.Pkgpath, n.Sym().Name, typesymname(t), ir.IntVal(t, v)) + base.Ctxt.DwarfIntConst(base.Ctxt.Pkgpath, n.Sym().Name, types.TypeSymName(t), ir.IntVal(t, v)) } func dumpglobls(externs []ir.Node) { diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 738b403b99983..9e792d153c8bf 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -1332,7 +1332,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { case ir.ODOTTYPE, ir.ODOTTYPE2: n := n.(*ir.TypeAssertExpr) n.X = o.expr(n.X, nil) - if !isdirectiface(n.Type()) || base.Flag.Cfg.Instrumenting { + if !types.IsDirectIface(n.Type()) || base.Flag.Cfg.Instrumenting { return o.copyExprClear(n) } return n diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index 785e01663f2f1..d6c15f113b987 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -552,7 +552,7 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var { base.Fatalf("createSimpleVar unexpected class %v for node %v", n.Class_, n) } - typename := dwarf.InfoPrefix + typesymname(n.Type()) + typename := dwarf.InfoPrefix + types.TypeSymName(n.Type()) delete(fnsym.Func().Autot, ngotype(n).Linksym()) inlIndex := 0 if base.Flag.GenDwarfInl > 1 { @@ -655,7 +655,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir decls = append(decls, n) continue } - typename := dwarf.InfoPrefix + typesymname(n.Type()) + typename := dwarf.InfoPrefix + types.TypeSymName(n.Type()) decls = append(decls, n) abbrev := dwarf.DW_ABRV_AUTO_LOCLIST isReturnValue := (n.Class_ == ir.PPARAMOUT) diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go index 078f03bc68790..463d0c55bdda0 100644 --- a/src/cmd/compile/internal/gc/range.go +++ b/src/cmd/compile/internal/gc/range.go @@ -493,7 +493,7 @@ func isMapClear(n *ir.RangeStmt) bool { } // Keys where equality is not reflexive can not be deleted from maps. - if !isreflexive(m.Type().Key()) { + if !types.IsReflexive(m.Type().Key()) { return false } diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 07552e64b4623..12fc6b7fa72b3 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -135,7 +135,7 @@ func bmap(t *types.Type) *types.Type { dowidth(bucket) // Check invariants that map code depends on. - if !IsComparable(t.Key()) { + if !types.IsComparable(t.Key()) { base.Fatalf("unsupported map key type for %v", t) } if BUCKETSIZE < 8 { @@ -373,7 +373,7 @@ func methodfunc(f *types.Type, receiver *types.Type) *types.Type { // Generates stub functions as needed. func methods(t *types.Type) []*Sig { // method type - mt := methtype(t) + mt := types.ReceiverBaseType(t) if mt == nil { return nil @@ -383,7 +383,7 @@ func methods(t *types.Type) []*Sig { // type stored in interface word it := t - if !isdirectiface(it) { + if !types.IsDirectIface(it) { it = types.NewPtr(t) } @@ -410,7 +410,7 @@ func methods(t *types.Type) []*Sig { // if pointer receiver but non-pointer t and // this is not an embedded pointer inside a struct, // method does not apply. - if !isMethodApplicable(t, f) { + if !types.IsMethodApplicable(t, f) { continue } @@ -848,7 +848,7 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int { ot := 0 ot = duintptr(lsym, ot, uint64(t.Width)) ot = duintptr(lsym, ot, uint64(ptrdata)) - ot = duint32(lsym, ot, typehash(t)) + ot = duint32(lsym, ot, types.TypeHash(t)) var tflag uint8 if uncommonSize(t) != 0 { @@ -895,7 +895,7 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int { ot = duint8(lsym, ot, t.Align) // fieldAlign i = kinds[t.Kind()] - if isdirectiface(t) { + if types.IsDirectIface(t) { i |= objabi.KindDirectIface } if useGCProg { @@ -923,40 +923,6 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int { return ot } -// typeHasNoAlg reports whether t does not have any associated hash/eq -// algorithms because t, or some component of t, is marked Noalg. -func typeHasNoAlg(t *types.Type) bool { - a, bad := algtype1(t) - return a == ANOEQ && bad.Noalg() -} - -func typesymname(t *types.Type) string { - name := t.ShortString() - // Use a separate symbol name for Noalg types for #17752. - if typeHasNoAlg(t) { - name = "noalg." + name - } - return name -} - -// Fake package for runtime type info (headers) -// Don't access directly, use typeLookup below. -var ( - typepkgmu sync.Mutex // protects typepkg lookups - typepkg = types.NewPkg("type", "type") -) - -func typeLookup(name string) *types.Sym { - typepkgmu.Lock() - s := typepkg.Lookup(name) - typepkgmu.Unlock() - return s -} - -func typesym(t *types.Type) *types.Sym { - return typeLookup(typesymname(t)) -} - // tracksym returns the symbol for tracking use of field/method f, assumed // to be a member of struct/interface type t. func tracksym(t *types.Type, f *types.Field) *types.Sym { @@ -965,7 +931,7 @@ func tracksym(t *types.Type, f *types.Field) *types.Sym { func typesymprefix(prefix string, t *types.Type) *types.Sym { p := prefix + "." + t.ShortString() - s := typeLookup(p) + s := types.TypeSymLookup(p) // This function is for looking up type-related generated functions // (e.g. eq and hash). Make sure they are indeed generated. @@ -982,7 +948,7 @@ func typenamesym(t *types.Type) *types.Sym { if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() { base.Fatalf("typenamesym %v", t) } - s := typesym(t) + s := types.TypeSym(t) signatmu.Lock() addsignat(t) signatmu.Unlock() @@ -1025,52 +991,6 @@ func itabname(t, itype *types.Type) *ir.AddrExpr { return n } -// isreflexive reports whether t has a reflexive equality operator. -// That is, if x==x for all x of type t. -func isreflexive(t *types.Type) bool { - switch t.Kind() { - case types.TBOOL, - types.TINT, - types.TUINT, - types.TINT8, - types.TUINT8, - types.TINT16, - types.TUINT16, - types.TINT32, - types.TUINT32, - types.TINT64, - types.TUINT64, - types.TUINTPTR, - types.TPTR, - types.TUNSAFEPTR, - types.TSTRING, - types.TCHAN: - return true - - case types.TFLOAT32, - types.TFLOAT64, - types.TCOMPLEX64, - types.TCOMPLEX128, - types.TINTER: - return false - - case types.TARRAY: - return isreflexive(t.Elem()) - - case types.TSTRUCT: - for _, t1 := range t.Fields().Slice() { - if !isreflexive(t1.Type) { - return false - } - } - return true - - default: - base.Fatalf("bad type for map key: %v", t) - return false - } -} - // needkeyupdate reports whether map updates with t as a key // need the key to be updated. func needkeyupdate(t *types.Type) bool { @@ -1139,7 +1059,7 @@ func dtypesym(t *types.Type) *obj.LSym { base.Fatalf("dtypesym %v", t) } - s := typesym(t) + s := types.TypeSym(t) lsym := s.Linksym() if s.Siggen() { return lsym @@ -1310,7 +1230,7 @@ func dtypesym(t *types.Type) *obj.LSym { ot = duint8(lsym, ot, uint8(t.Elem().Width)) } ot = duint16(lsym, ot, uint16(bmap(t).Width)) - if isreflexive(t.Key()) { + if types.IsReflexive(t.Key()) { flags |= 4 // reflexive key } if needkeyupdate(t.Key()) { @@ -1404,7 +1324,7 @@ func dtypesym(t *types.Type) *obj.LSym { } } // Do not put Noalg types in typelinks. See issue #22605. - if typeHasNoAlg(t) { + if types.TypeHasNoAlg(t) { keep = false } lsym.Set(obj.AttrMakeTypelink, keep) @@ -1528,7 +1448,7 @@ func dumpsignats() { signats = signats[:0] // Transfer entries to a slice and sort, for reproducible builds. for _, t := range signatslice { - signats = append(signats, typeAndStr{t: t, short: typesymname(t), regular: t.String()}) + signats = append(signats, typeAndStr{t: t, short: types.TypeSymName(t), regular: t.String()}) delete(signatset, t) } signatslice = signatslice[:0] @@ -1556,8 +1476,8 @@ func dumptabs() { // } o := dsymptr(i.lsym, 0, dtypesym(i.itype), 0) o = dsymptr(i.lsym, o, dtypesym(i.t), 0) - o = duint32(i.lsym, o, typehash(i.t)) // copy of type hash - o += 4 // skip unused field + o = duint32(i.lsym, o, types.TypeHash(i.t)) // copy of type hash + o += 4 // skip unused field for _, fn := range genfun(i.t, i.itype) { o = dsymptr(i.lsym, o, fn, 0) // method pointer for each method } diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 9445627b41a7c..c9a554079d68a 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -324,7 +324,7 @@ func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *type addrsym(l, loff, itab.X.(*ir.Name), 0) // Emit data. - if isdirectiface(val.Type()) { + if types.IsDirectIface(val.Type()) { if val.Op() == ir.ONIL { // Nil is zero, nothing to do. return true @@ -506,7 +506,7 @@ func isStaticCompositeLiteral(n ir.Node) bool { if val.Type().IsInterface() { return val.Op() == ir.ONIL } - if isdirectiface(val.Type()) && val.Op() == ir.ONIL { + if types.IsDirectIface(val.Type()) && val.Op() == ir.ONIL { return true } return isStaticCompositeLiteral(val) diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 0bca2baa17b41..722a3257da87f 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -1916,28 +1916,6 @@ func (s *state) ssaOp(op ir.Op, t *types.Type) ssa.Op { return x } -func floatForComplex(t *types.Type) *types.Type { - switch t.Kind() { - case types.TCOMPLEX64: - return types.Types[types.TFLOAT32] - case types.TCOMPLEX128: - return types.Types[types.TFLOAT64] - } - base.Fatalf("unexpected type: %v", t) - return nil -} - -func complexForFloat(t *types.Type) *types.Type { - switch t.Kind() { - case types.TFLOAT32: - return types.Types[types.TCOMPLEX64] - case types.TFLOAT64: - return types.Types[types.TCOMPLEX128] - } - base.Fatalf("unexpected type: %v", t) - return nil -} - type opAndTwoTypes struct { op ir.Op etype1 types.Kind @@ -2458,8 +2436,8 @@ func (s *state) expr(n ir.Node) *ssa.Value { } else { s.Fatalf("weird complex conversion %v -> %v", ft, tt) } - ftp := floatForComplex(ft) - ttp := floatForComplex(tt) + ftp := types.FloatForComplex(ft) + ttp := types.FloatForComplex(tt) return s.newValue2(ssa.OpComplexMake, tt, s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)), s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x))) @@ -2479,7 +2457,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { a := s.expr(n.X) b := s.expr(n.Y) if n.X.Type().IsComplex() { - pt := floatForComplex(n.X.Type()) + pt := types.FloatForComplex(n.X.Type()) op := s.ssaOp(ir.OEQ, pt) r := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)) i := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)) @@ -2516,8 +2494,8 @@ func (s *state) expr(n ir.Node) *ssa.Value { mulop := ssa.OpMul64F addop := ssa.OpAdd64F subop := ssa.OpSub64F - pt := floatForComplex(n.Type()) // Could be Float32 or Float64 - wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error + pt := types.FloatForComplex(n.Type()) // Could be Float32 or Float64 + wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error areal := s.newValue1(ssa.OpComplexReal, pt, a) breal := s.newValue1(ssa.OpComplexReal, pt, b) @@ -2560,8 +2538,8 @@ func (s *state) expr(n ir.Node) *ssa.Value { addop := ssa.OpAdd64F subop := ssa.OpSub64F divop := ssa.OpDiv64F - pt := floatForComplex(n.Type()) // Could be Float32 or Float64 - wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error + pt := types.FloatForComplex(n.Type()) // Could be Float32 or Float64 + wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error areal := s.newValue1(ssa.OpComplexReal, pt, a) breal := s.newValue1(ssa.OpComplexReal, pt, b) @@ -2606,7 +2584,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { a := s.expr(n.X) b := s.expr(n.Y) if n.Type().IsComplex() { - pt := floatForComplex(n.Type()) + pt := types.FloatForComplex(n.Type()) op := s.ssaOp(n.Op(), pt) return s.newValue2(ssa.OpComplexMake, n.Type(), s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)), @@ -2694,7 +2672,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { n := n.(*ir.UnaryExpr) a := s.expr(n.X) if n.Type().IsComplex() { - tp := floatForComplex(n.Type()) + tp := types.FloatForComplex(n.Type()) negop := s.ssaOp(n.Op(), tp) return s.newValue2(ssa.OpComplexMake, n.Type(), s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)), @@ -6147,7 +6125,7 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val } // Converting to a concrete type. - direct := isdirectiface(n.Type()) + direct := types.IsDirectIface(n.Type()) itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface if base.Debug.TypeAssert > 0 { base.WarnfAt(n.Pos(), "type assertion inlined") @@ -6442,7 +6420,7 @@ func emitStackObjects(e *ssafn, pp *Progs) { // in which case the offset is relative to argp. // Locals have a negative Xoffset, in which case the offset is relative to varp. off = duintptr(x, off, uint64(v.FrameOffset())) - if !typesym(v.Type()).Siggen() { + if !types.TypeSym(v.Type()).Siggen() { e.Fatalf(v.Pos(), "stack object's type symbol not generated for type %s", v.Type()) } off = dsymptr(x, off, dtypesym(v.Type()), 0) diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 6e130d4889664..d8956633b27b8 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -9,8 +9,6 @@ import ( "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/src" - "crypto/md5" - "encoding/binary" "fmt" "go/constant" "sort" @@ -170,13 +168,6 @@ func NewName(s *types.Sym) *ir.Name { return n } -// methcmp sorts methods by symbol. -type methcmp []*types.Field - -func (x methcmp) Len() int { return len(x) } -func (x methcmp) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (x methcmp) Less(i, j int) bool { return x[i].Sym.Less(x[j].Sym) } - func nodintconst(v int64) ir.Node { return ir.NewLiteral(constant.MakeInt64(v)) } @@ -212,41 +203,6 @@ func isptrto(t *types.Type, et types.Kind) bool { return true } -// methtype returns the underlying type, if any, -// that owns methods with receiver parameter t. -// The result is either a named type or an anonymous struct. -func methtype(t *types.Type) *types.Type { - if t == nil { - return nil - } - - // Strip away pointer if it's there. - if t.IsPtr() { - if t.Sym() != nil { - return nil - } - t = t.Elem() - if t == nil { - return nil - } - } - - // Must be a named type or anonymous struct. - if t.Sym() == nil && !t.IsStruct() { - return nil - } - - // Check types. - if issimple[t.Kind()] { - return t - } - switch t.Kind() { - case types.TARRAY, types.TCHAN, types.TFUNC, types.TMAP, types.TSLICE, types.TSTRING, types.TSTRUCT: - return t - } - return nil -} - // Is type src assignment compatible to type dst? // If so, return op code to use in conversion. // If not, return OXXX. In this case, the string return parameter may @@ -294,7 +250,7 @@ func assignop(src, dst *types.Type) (ir.Op, string) { // gets added to itabs early, which allows // us to de-virtualize calls through this // type/interface pair later. See peekitabs in reflect.go - if isdirectiface(src) && !dst.IsEmptyInterface() { + if types.IsDirectIface(src) && !dst.IsEmptyInterface() { NeedITab(src, dst) } @@ -429,7 +385,7 @@ func convertop(srcConstant bool, src, dst *types.Type) (ir.Op, string) { // 4. src and dst are both integer or floating point types. if (src.IsInteger() || src.IsFloat()) && (dst.IsInteger() || dst.IsFloat()) { - if simtype[src.Kind()] == simtype[dst.Kind()] { + if types.SimType[src.Kind()] == types.SimType[dst.Kind()] { return ir.OCONVNOP, "" } return ir.OCONV, "" @@ -437,7 +393,7 @@ func convertop(srcConstant bool, src, dst *types.Type) (ir.Op, string) { // 5. src and dst are both complex types. if src.IsComplex() && dst.IsComplex() { - if simtype[src.Kind()] == simtype[dst.Kind()] { + if types.SimType[src.Kind()] == types.SimType[dst.Kind()] { return ir.OCONVNOP, "" } return ir.OCONV, "" @@ -574,15 +530,6 @@ func syslook(name string) *ir.Name { return ir.AsNode(s.Def).(*ir.Name) } -// typehash computes a hash value for type t to use in type switch statements. -func typehash(t *types.Type) uint32 { - p := t.LongString() - - // Using MD5 is overkill, but reduces accidental collisions. - h := md5.Sum([]byte(p)) - return binary.LittleEndian.Uint32(h[:4]) -} - // updateHasCall checks whether expression n contains any function // calls and sets the n.HasCall flag if so. func updateHasCall(n ir.Node) { @@ -627,25 +574,25 @@ func calcHasCall(n ir.Node) bool { // so we ensure they are evaluated first. case ir.OADD, ir.OSUB, ir.OMUL: n := n.(*ir.BinaryExpr) - if thearch.SoftFloat && (isFloat[n.Type().Kind()] || isComplex[n.Type().Kind()]) { + if thearch.SoftFloat && (types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) { return true } return n.X.HasCall() || n.Y.HasCall() case ir.ONEG: n := n.(*ir.UnaryExpr) - if thearch.SoftFloat && (isFloat[n.Type().Kind()] || isComplex[n.Type().Kind()]) { + if thearch.SoftFloat && (types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) { return true } return n.X.HasCall() case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT: n := n.(*ir.BinaryExpr) - if thearch.SoftFloat && (isFloat[n.X.Type().Kind()] || isComplex[n.X.Type().Kind()]) { + if thearch.SoftFloat && (types.IsFloat[n.X.Type().Kind()] || types.IsComplex[n.X.Type().Kind()]) { return true } return n.X.HasCall() || n.Y.HasCall() case ir.OCONV: n := n.(*ir.ConvExpr) - if thearch.SoftFloat && ((isFloat[n.Type().Kind()] || isComplex[n.Type().Kind()]) || (isFloat[n.X.Type().Kind()] || isComplex[n.X.Type().Kind()])) { + if thearch.SoftFloat && ((types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) || (types.IsFloat[n.X.Type().Kind()] || types.IsComplex[n.X.Type().Kind()])) { return true } return n.X.HasCall() @@ -893,7 +840,7 @@ func lookdot0(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) // If t is a defined pointer type, then x.m is shorthand for (*x).m. u = t.Elem() } - u = methtype(u) + u = types.ReceiverBaseType(u) if u != nil { for _, f := range u.Methods().Slice() { if f.Embedded == 0 && (f.Sym == s || (ignorecase && strings.EqualFold(f.Sym.Name, s.Name))) { @@ -1056,7 +1003,7 @@ func expand0(t *types.Type) { return } - u = methtype(t) + u = types.ReceiverBaseType(t) if u != nil { for _, f := range u.Methods().Slice() { if f.Sym.Uniq() { @@ -1147,7 +1094,7 @@ func expandmeth(t *types.Type) { } ms = append(ms, t.Methods().Slice()...) - sort.Sort(methcmp(ms)) + sort.Sort(types.MethodsByName(ms)) t.AllMethods().Set(ms) } @@ -1243,7 +1190,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { // the TOC to the appropriate value for that module. But if it returns // directly to the wrapper's caller, nothing will reset it to the correct // value for that function. - if !base.Flag.Cfg.Instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !isifacemethod(method.Type) && !(thearch.LinkArch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) { + if !base.Flag.Cfg.Instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !types.IsInterfaceMethod(method.Type) && !(thearch.LinkArch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) { // generate tail call: adjust pointer receiver and jump to embedded method. left := dot.X // skip final .M if !left.Type().IsPtr() { @@ -1272,7 +1219,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { funcbody() if base.Debug.DclStack != 0 { - testdclstack() + types.CheckDclstack() } typecheckFunc(fn) @@ -1373,7 +1320,7 @@ func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool return true } - t = methtype(t) + t = types.ReceiverBaseType(t) var tms []*types.Field if t != nil { expandmeth(t) @@ -1405,7 +1352,7 @@ func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool // if pointer receiver in method, // the method does not exist for value types. rcvr := tm.Type.Recv().Type - if rcvr.IsPtr() && !t0.IsPtr() && !followptr && !isifacemethod(tm.Type) { + if rcvr.IsPtr() && !t0.IsPtr() && !followptr && !types.IsInterfaceMethod(tm.Type) { if false && base.Flag.LowerR != 0 { base.Errorf("interface pointer mismatch") } @@ -1508,35 +1455,6 @@ func isbadimport(path string, allowSpace bool) bool { return false } -// Can this type be stored directly in an interface word? -// Yes, if the representation is a single pointer. -func isdirectiface(t *types.Type) bool { - if t.Broke() { - return false - } - - switch t.Kind() { - case types.TPTR: - // Pointers to notinheap types must be stored indirectly. See issue 42076. - return !t.Elem().NotInHeap() - case types.TCHAN, - types.TMAP, - types.TFUNC, - types.TUNSAFEPTR: - return true - - case types.TARRAY: - // Array of 1 direct iface type can be direct. - return t.NumElem() == 1 && isdirectiface(t.Elem()) - - case types.TSTRUCT: - // Struct with 1 field of direct iface type can be direct. - return t.NumFields() == 1 && isdirectiface(t.Field(0).Type) - } - - return false -} - // itabType loads the _type field from a runtime.itab struct. func itabType(itab ir.Node) ir.Node { typ := ir.NewSelectorExpr(base.Pos, ir.ODOTPTR, itab, nil) @@ -1555,7 +1473,7 @@ func ifaceData(pos src.XPos, n ir.Node, t *types.Type) ir.Node { base.Fatalf("ifaceData interface: %v", t) } ptr := ir.NewUnaryExpr(pos, ir.OIDATA, n) - if isdirectiface(t) { + if types.IsDirectIface(t) { ptr.SetType(t) ptr.SetTypecheck(1) return ptr diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go index ab241a38138b8..513b890355ec3 100644 --- a/src/cmd/compile/internal/gc/swt.go +++ b/src/cmd/compile/internal/gc/swt.go @@ -166,9 +166,9 @@ func typecheckExprSwitch(n *ir.SwitchStmt) { case t.IsSlice(): nilonly = "slice" - case !IsComparable(t): + case !types.IsComparable(t): if t.IsStruct() { - base.ErrorfAt(n.Pos(), "cannot switch on %L (struct containing %v cannot be compared)", n.Tag, IncomparableField(t).Type) + base.ErrorfAt(n.Pos(), "cannot switch on %L (struct containing %v cannot be compared)", n.Tag, types.IncomparableField(t).Type) } else { base.ErrorfAt(n.Pos(), "cannot switch on %L", n.Tag) } @@ -200,7 +200,7 @@ func typecheckExprSwitch(n *ir.SwitchStmt) { if nilonly != "" && !ir.IsNil(n1) { base.ErrorfAt(ncase.Pos(), "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Tag) - } else if t.IsInterface() && !n1.Type().IsInterface() && !IsComparable(n1.Type()) { + } else if t.IsInterface() && !n1.Type().IsInterface() && !types.IsComparable(n1.Type()) { base.ErrorfAt(ncase.Pos(), "invalid case %L in switch (incomparable type)", n1) } else { op1, _ := assignop(n1.Type(), t) @@ -339,7 +339,7 @@ type exprClause struct { func (s *exprSwitch) Add(pos src.XPos, expr, jmp ir.Node) { c := exprClause{pos: pos, lo: expr, hi: expr, jmp: jmp} - if okforcmp[s.exprname.Type().Kind()] && expr.Op() == ir.OLITERAL { + if types.IsOrdered[s.exprname.Type().Kind()] && expr.Op() == ir.OLITERAL { s.clauses = append(s.clauses, c) return } @@ -670,7 +670,7 @@ func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp ir.Node) { if !typ.IsInterface() { s.clauses = append(s.clauses, typeClause{ - hash: typehash(typ), + hash: types.TypeHash(typ), body: body, }) return diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 4f1fe240ec951..5e13facc4f9fa 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -837,7 +837,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetType(nil) return n } - if t.IsSigned() && !langSupported(1, 13, curpkg()) { + if t.IsSigned() && !types.AllowsGoVersion(curpkg(), 1, 13) { base.ErrorfVers("go1.13", "invalid operation: %v (signed shift count type %v)", n, r.Type()) n.SetType(nil) return n @@ -904,7 +904,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { if r.Type().Kind() != types.TBLANK { aop, _ = assignop(l.Type(), r.Type()) if aop != ir.OXXX { - if r.Type().IsInterface() && !l.Type().IsInterface() && !IsComparable(l.Type()) { + if r.Type().IsInterface() && !l.Type().IsInterface() && !types.IsComparable(l.Type()) { base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(l.Type())) n.SetType(nil) return n @@ -925,7 +925,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { if !converted && l.Type().Kind() != types.TBLANK { aop, _ = assignop(r.Type(), l.Type()) if aop != ir.OXXX { - if l.Type().IsInterface() && !r.Type().IsInterface() && !IsComparable(r.Type()) { + if l.Type().IsInterface() && !r.Type().IsInterface() && !types.IsComparable(r.Type()) { base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(r.Type())) n.SetType(nil) return n @@ -969,7 +969,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { // okfor allows any array == array, map == map, func == func. // restrict to slice/map/func == nil and nil == slice/map/func. - if l.Type().IsArray() && !IsComparable(l.Type()) { + if l.Type().IsArray() && !types.IsComparable(l.Type()) { base.Errorf("invalid operation: %v (%v cannot be compared)", n, l.Type()) n.SetType(nil) return n @@ -994,7 +994,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } if l.Type().IsStruct() { - if f := IncomparableField(l.Type()); f != nil { + if f := types.IncomparableField(l.Type()); f != nil { base.Errorf("invalid operation: %v (struct containing %v cannot be compared)", n, f.Type) n.SetType(nil) return n @@ -1627,7 +1627,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetType(l.Type().Results().Field(0).Type) if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME { - if sym := n.X.(*ir.Name).Sym(); isRuntimePkg(sym.Pkg) && sym.Name == "getg" { + if sym := n.X.(*ir.Name).Sym(); types.IsRuntimePkg(sym.Pkg) && sym.Name == "getg" { // Emit code for runtime.getg() directly instead of calling function. // Most such rewrites (for example the similar one for math.Sqrt) should be done in walk, // so that the ordering pass can make sure to preserve the semantics of the original code @@ -2560,7 +2560,7 @@ func typecheckMethodExpr(n *ir.SelectorExpr) (res ir.Node) { if t.IsInterface() { ms = t.Fields() } else { - mt := methtype(t) + mt := types.ReceiverBaseType(t) if mt == nil { base.Errorf("%v undefined (type %v has no method %v)", n, t, n.Sel) n.SetType(nil) @@ -2595,7 +2595,7 @@ func typecheckMethodExpr(n *ir.SelectorExpr) (res ir.Node) { return n } - if !isMethodApplicable(t, m) { + if !types.IsMethodApplicable(t, m) { base.Errorf("invalid method expression %v (needs pointer receiver: (*%v).%S)", n, t, s) n.SetType(nil) return n @@ -2616,14 +2616,6 @@ func typecheckMethodExpr(n *ir.SelectorExpr) (res ir.Node) { return me } -// isMethodApplicable reports whether method m can be called on a -// value of type t. This is necessary because we compute a single -// method set for both T and *T, but some *T methods are not -// applicable to T receivers. -func isMethodApplicable(t *types.Type, m *types.Field) bool { - return t.IsPtr() || !m.Type.Recv().Type.IsPtr() || isifacemethod(m.Type) || m.Embedded == 2 -} - func derefall(t *types.Type) *types.Type { for t != nil && t.IsPtr() { t = t.Elem() @@ -2642,7 +2634,7 @@ func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field { var f2 *types.Field if n.X.Type() == t || n.X.Type().Sym() == nil { - mt := methtype(t) + mt := types.ReceiverBaseType(t) if mt != nil { f2 = lookdot1(n, s, mt, mt.Methods(), dostrcmp) } @@ -3406,7 +3398,7 @@ func samesafeexpr(l ir.Node, r ir.Node) bool { r := r.(*ir.ConvExpr) // Some conversions can't be reused, such as []byte(str). // Allow only numeric-ish types. This is a bit conservative. - return issimple[l.Type().Kind()] && samesafeexpr(l.X, r.X) + return types.IsSimple[l.Type().Kind()] && samesafeexpr(l.X, r.X) case ir.OINDEX, ir.OINDEXMAP: l := l.(*ir.IndexExpr) @@ -3680,7 +3672,7 @@ var mapqueue []*ir.MapType func checkMapKeys() { for _, n := range mapqueue { k := n.Type().MapType().Key - if !k.Broke() && !IsComparable(k) { + if !k.Broke() && !types.IsComparable(k) { base.ErrorfAt(n.Pos(), "invalid map key type %v", k) } } diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index cf20583042db4..f2c719db38eef 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -90,7 +90,7 @@ func initUniverse() { sizeofString = Rnd(sliceLenOffset+int64(Widthptr), int64(Widthptr)) for et := types.Kind(0); et < types.NTYPE; et++ { - simtype[et] = et + types.SimType[et] = et } types.Types[types.TANY] = types.New(types.TANY) @@ -117,7 +117,7 @@ func initUniverse() { if Widthptr == 8 { sameas = s.sameas64 } - simtype[s.etype] = sameas + types.SimType[s.etype] = sameas types.Types[s.etype] = defBasic(s.etype, types.BuiltinPkg, s.name) } @@ -144,10 +144,10 @@ func initUniverse() { types.Types[types.TUNSAFEPTR] = defBasic(types.TUNSAFEPTR, unsafepkg, "Pointer") // simple aliases - simtype[types.TMAP] = types.TPTR - simtype[types.TCHAN] = types.TPTR - simtype[types.TFUNC] = types.TPTR - simtype[types.TUNSAFEPTR] = types.TPTR + types.SimType[types.TMAP] = types.TPTR + types.SimType[types.TCHAN] = types.TPTR + types.SimType[types.TFUNC] = types.TPTR + types.SimType[types.TUNSAFEPTR] = types.TPTR for _, s := range &builtinFuncs { s2 := types.BuiltinPkg.Lookup(s.name) @@ -194,49 +194,49 @@ func initUniverse() { s.Def = ir.NewIota(base.Pos, s) for et := types.TINT8; et <= types.TUINT64; et++ { - isInt[et] = true + types.IsInt[et] = true } - isInt[types.TINT] = true - isInt[types.TUINT] = true - isInt[types.TUINTPTR] = true + types.IsInt[types.TINT] = true + types.IsInt[types.TUINT] = true + types.IsInt[types.TUINTPTR] = true - isFloat[types.TFLOAT32] = true - isFloat[types.TFLOAT64] = true + types.IsFloat[types.TFLOAT32] = true + types.IsFloat[types.TFLOAT64] = true - isComplex[types.TCOMPLEX64] = true - isComplex[types.TCOMPLEX128] = true + types.IsComplex[types.TCOMPLEX64] = true + types.IsComplex[types.TCOMPLEX128] = true // initialize okfor for et := types.Kind(0); et < types.NTYPE; et++ { - if isInt[et] || et == types.TIDEAL { + if types.IsInt[et] || et == types.TIDEAL { okforeq[et] = true - okforcmp[et] = true + types.IsOrdered[et] = true okforarith[et] = true okforadd[et] = true okforand[et] = true ir.OKForConst[et] = true - issimple[et] = true + types.IsSimple[et] = true } - if isFloat[et] { + if types.IsFloat[et] { okforeq[et] = true - okforcmp[et] = true + types.IsOrdered[et] = true okforadd[et] = true okforarith[et] = true ir.OKForConst[et] = true - issimple[et] = true + types.IsSimple[et] = true } - if isComplex[et] { + if types.IsComplex[et] { okforeq[et] = true okforadd[et] = true okforarith[et] = true ir.OKForConst[et] = true - issimple[et] = true + types.IsSimple[et] = true } } - issimple[types.TBOOL] = true + types.IsSimple[types.TBOOL] = true okforadd[types.TSTRING] = true @@ -267,7 +267,7 @@ func initUniverse() { okforeq[types.TARRAY] = true // only if element type is comparable; refined in typecheck okforeq[types.TSTRUCT] = true // only if all struct fields are comparable; refined in typecheck - okforcmp[types.TSTRING] = true + types.IsOrdered[types.TSTRING] = true for i := range okfor { okfor[i] = okfornone[:] @@ -280,10 +280,10 @@ func initUniverse() { okfor[ir.OANDNOT] = okforand[:] okfor[ir.ODIV] = okforarith[:] okfor[ir.OEQ] = okforeq[:] - okfor[ir.OGE] = okforcmp[:] - okfor[ir.OGT] = okforcmp[:] - okfor[ir.OLE] = okforcmp[:] - okfor[ir.OLT] = okforcmp[:] + okfor[ir.OGE] = types.IsOrdered[:] + okfor[ir.OGT] = types.IsOrdered[:] + okfor[ir.OLE] = types.IsOrdered[:] + okfor[ir.OLT] = types.IsOrdered[:] okfor[ir.OMOD] = okforand[:] okfor[ir.OMUL] = okforarith[:] okfor[ir.ONE] = okforeq[:] diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 57edc4328007e..7f68efeed1d78 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -939,7 +939,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { } // Optimize convT2E or convT2I as a two-word copy when T is pointer-shaped. - if isdirectiface(fromType) { + if types.IsDirectIface(fromType) { l := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), n.X) l.SetType(toType) l.SetTypecheck(n.Typecheck()) @@ -1101,14 +1101,14 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // rewrite complex div into function call. et := n.X.Type().Kind() - if isComplex[et] && n.Op() == ir.ODIV { + if types.IsComplex[et] && n.Op() == ir.ODIV { t := n.Type() call := mkcall("complex128div", types.Types[types.TCOMPLEX128], init, conv(n.X, types.Types[types.TCOMPLEX128]), conv(n.Y, types.Types[types.TCOMPLEX128])) return conv(call, t) } // Nothing to do for float divisions. - if isFloat[et] { + if types.IsFloat[et] { return n } @@ -2078,7 +2078,7 @@ func walkprint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { on = syslook("printslice") on = substArgTypes(on, n.Type()) // any-1 case types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR: - if isRuntimePkg(n.Type().Sym().Pkg) && n.Type().Sym().Name == "hex" { + if types.IsRuntimePkg(n.Type().Sym().Pkg) && n.Type().Sym().Name == "hex" { on = syslook("printhex") } else { on = syslook("printuint") @@ -2706,7 +2706,7 @@ func mapfast(t *types.Type) int { return mapslow } switch algtype(t.Key()) { - case AMEM32: + case types.AMEM32: if !t.Key().HasPointers() { return mapfast32 } @@ -2714,7 +2714,7 @@ func mapfast(t *types.Type) int { return mapfast32ptr } base.Fatalf("small pointer %v", t.Key()) - case AMEM64: + case types.AMEM64: if !t.Key().HasPointers() { return mapfast64 } @@ -2723,7 +2723,7 @@ func mapfast(t *types.Type) int { } // Two-word object, at least one of which is a pointer. // Use the slow path. - case ASTRING: + case types.ASTRING: return mapfaststr } return mapslow @@ -3256,12 +3256,12 @@ func eqfor(t *types.Type) (n ir.Node, needsize bool) { // a struct/array containing a non-memory field/element. // Small memory is handled inline, and single non-memory // is handled by walkcompare. - switch a, _ := algtype1(t); a { - case AMEM: + switch a, _ := types.AlgType(t); a { + case types.AMEM: n := syslook("memequal") n = substArgTypes(n, t, t) return n, true - case ASPECIAL: + case types.ASPECIAL: sym := typesymprefix(".eq", t) n := NewName(sym) setNodeNameFunc(n) @@ -3398,7 +3398,7 @@ func walkcompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { return n case types.TARRAY: // We can compare several elements at once with 2/4/8 byte integer compares - inline = t.NumElem() <= 1 || (issimple[t.Elem().Kind()] && (t.NumElem() <= 4 || t.Elem().Width*t.NumElem() <= maxcmpsize)) + inline = t.NumElem() <= 1 || (types.IsSimple[t.Elem().Kind()] && (t.NumElem() <= 4 || t.Elem().Width*t.NumElem() <= maxcmpsize)) case types.TSTRUCT: inline = t.NumComponents(types.IgnoreBlankFields) <= 4 } @@ -3793,7 +3793,7 @@ func usemethod(n *ir.CallExpr) { // Note: Don't rely on res0.Type.String() since its formatting depends on multiple factors // (including global variables such as numImports - was issue #19028). // Also need to check for reflect package itself (see Issue #38515). - if s := res0.Type.Sym(); s != nil && s.Name == "Method" && isReflectPkg(s.Pkg) { + if s := res0.Type.Sym(); s != nil && s.Name == "Method" && types.IsReflectPkg(s.Pkg) { Curfn.SetReflectMethod(true) // The LSym is initialized at this point. We need to set the attribute on the LSym. Curfn.LSym.Set(obj.AttrReflectMethod, true) diff --git a/src/cmd/compile/internal/types/alg.go b/src/cmd/compile/internal/types/alg.go new file mode 100644 index 0000000000000..14200e0d162a4 --- /dev/null +++ b/src/cmd/compile/internal/types/alg.go @@ -0,0 +1,173 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types + +import "cmd/compile/internal/base" + +// AlgKind describes the kind of algorithms used for comparing and +// hashing a Type. +type AlgKind int + +//go:generate stringer -type AlgKind -trimprefix A + +const ( + // These values are known by runtime. + ANOEQ AlgKind = iota + AMEM0 + AMEM8 + AMEM16 + AMEM32 + AMEM64 + AMEM128 + ASTRING + AINTER + ANILINTER + AFLOAT32 + AFLOAT64 + ACPLX64 + ACPLX128 + + // Type can be compared/hashed as regular memory. + AMEM AlgKind = 100 + + // Type needs special comparison/hashing functions. + ASPECIAL AlgKind = -1 +) + +// AlgType returns the AlgKind used for comparing and hashing Type t. +// If it returns ANOEQ, it also returns the component type of t that +// makes it incomparable. +func AlgType(t *Type) (AlgKind, *Type) { + if t.Broke() { + return AMEM, nil + } + if t.Noalg() { + return ANOEQ, t + } + + switch t.Kind() { + case TANY, TFORW: + // will be defined later. + return ANOEQ, t + + case TINT8, TUINT8, TINT16, TUINT16, + TINT32, TUINT32, TINT64, TUINT64, + TINT, TUINT, TUINTPTR, + TBOOL, TPTR, + TCHAN, TUNSAFEPTR: + return AMEM, nil + + case TFUNC, TMAP: + return ANOEQ, t + + case TFLOAT32: + return AFLOAT32, nil + + case TFLOAT64: + return AFLOAT64, nil + + case TCOMPLEX64: + return ACPLX64, nil + + case TCOMPLEX128: + return ACPLX128, nil + + case TSTRING: + return ASTRING, nil + + case TINTER: + if t.IsEmptyInterface() { + return ANILINTER, nil + } + return AINTER, nil + + case TSLICE: + return ANOEQ, t + + case TARRAY: + a, bad := AlgType(t.Elem()) + switch a { + case AMEM: + return AMEM, nil + case ANOEQ: + return ANOEQ, bad + } + + switch t.NumElem() { + case 0: + // We checked above that the element type is comparable. + return AMEM, nil + case 1: + // Single-element array is same as its lone element. + return a, nil + } + + return ASPECIAL, nil + + case TSTRUCT: + fields := t.FieldSlice() + + // One-field struct is same as that one field alone. + if len(fields) == 1 && !fields[0].Sym.IsBlank() { + return AlgType(fields[0].Type) + } + + ret := AMEM + for i, f := range fields { + // All fields must be comparable. + a, bad := AlgType(f.Type) + if a == ANOEQ { + return ANOEQ, bad + } + + // Blank fields, padded fields, fields with non-memory + // equality need special compare. + if a != AMEM || f.Sym.IsBlank() || IsPaddedField(t, i) { + ret = ASPECIAL + } + } + + return ret, nil + } + + base.Fatalf("algtype1: unexpected type %v", t) + return 0, nil +} + +// TypeHasNoAlg reports whether t does not have any associated hash/eq +// algorithms because t, or some component of t, is marked Noalg. +func TypeHasNoAlg(t *Type) bool { + a, bad := AlgType(t) + return a == ANOEQ && bad.Noalg() +} + +// IsComparable reports whether t is a comparable type. +func IsComparable(t *Type) bool { + a, _ := AlgType(t) + return a != ANOEQ +} + +// IncomparableField returns an incomparable Field of struct Type t, if any. +func IncomparableField(t *Type) *Field { + for _, f := range t.FieldSlice() { + if !IsComparable(f.Type) { + return f + } + } + return nil +} + +// IsPaddedField reports whether the i'th field of struct type t is followed +// by padding. +func IsPaddedField(t *Type, i int) bool { + if !t.IsStruct() { + base.Fatalf("ispaddedfield called non-struct %v", t) + } + end := t.Width + if i+1 < t.NumFields() { + end = t.Field(i + 1).Offset + } + return t.Field(i).End() != end +} diff --git a/src/cmd/compile/internal/gc/algkind_string.go b/src/cmd/compile/internal/types/algkind_string.go similarity index 98% rename from src/cmd/compile/internal/gc/algkind_string.go rename to src/cmd/compile/internal/types/algkind_string.go index 52b5399956170..8c5a0bc287ee5 100644 --- a/src/cmd/compile/internal/gc/algkind_string.go +++ b/src/cmd/compile/internal/types/algkind_string.go @@ -1,6 +1,6 @@ // Code generated by "stringer -type AlgKind -trimprefix A"; DO NOT EDIT. -package gc +package types import "strconv" diff --git a/src/cmd/compile/internal/types/fmt.go b/src/cmd/compile/internal/types/fmt.go index d63f7a4f8d4a1..bf37f01922a43 100644 --- a/src/cmd/compile/internal/types/fmt.go +++ b/src/cmd/compile/internal/types/fmt.go @@ -6,6 +6,8 @@ package types import ( "bytes" + "crypto/md5" + "encoding/binary" "fmt" "go/constant" "strconv" @@ -659,3 +661,12 @@ func FmtConst(v constant.Value, sharp bool) string { return v.String() } + +// TypeHash computes a hash value for type t to use in type switch statements. +func TypeHash(t *Type) uint32 { + p := t.LongString() + + // Using MD5 is overkill, but reduces accidental collisions. + h := md5.Sum([]byte(p)) + return binary.LittleEndian.Uint32(h[:4]) +} diff --git a/src/cmd/compile/internal/types/goversion.go b/src/cmd/compile/internal/types/goversion.go new file mode 100644 index 0000000000000..2265f472cf673 --- /dev/null +++ b/src/cmd/compile/internal/types/goversion.go @@ -0,0 +1,96 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run mkbuiltin.go + +package types + +import ( + "fmt" + "internal/goversion" + "log" + "regexp" + "strconv" + + "cmd/compile/internal/base" +) + +// A lang is a language version broken into major and minor numbers. +type lang struct { + major, minor int +} + +// langWant is the desired language version set by the -lang flag. +// If the -lang flag is not set, this is the zero value, meaning that +// any language version is supported. +var langWant lang + +// AllowsGoVersion reports whether a particular package +// is allowed to use Go version major.minor. +// We assume the imported packages have all been checked, +// so we only have to check the local package against the -lang flag. +func AllowsGoVersion(pkg *Pkg, major, minor int) bool { + if pkg == nil { + // TODO(mdempsky): Set Pkg for local types earlier. + pkg = LocalPkg + } + if pkg != LocalPkg { + // Assume imported packages passed type-checking. + return true + } + if langWant.major == 0 && langWant.minor == 0 { + return true + } + return langWant.major > major || (langWant.major == major && langWant.minor >= minor) +} + +// ParseLangFlag verifies that the -lang flag holds a valid value, and +// exits if not. It initializes data used by langSupported. +func ParseLangFlag() { + if base.Flag.Lang == "" { + return + } + + var err error + langWant, err = parseLang(base.Flag.Lang) + if err != nil { + log.Fatalf("invalid value %q for -lang: %v", base.Flag.Lang, err) + } + + if def := currentLang(); base.Flag.Lang != def { + defVers, err := parseLang(def) + if err != nil { + log.Fatalf("internal error parsing default lang %q: %v", def, err) + } + if langWant.major > defVers.major || (langWant.major == defVers.major && langWant.minor > defVers.minor) { + log.Fatalf("invalid value %q for -lang: max known version is %q", base.Flag.Lang, def) + } + } +} + +// parseLang parses a -lang option into a langVer. +func parseLang(s string) (lang, error) { + matches := goVersionRE.FindStringSubmatch(s) + if matches == nil { + return lang{}, fmt.Errorf(`should be something like "go1.12"`) + } + major, err := strconv.Atoi(matches[1]) + if err != nil { + return lang{}, err + } + minor, err := strconv.Atoi(matches[2]) + if err != nil { + return lang{}, err + } + return lang{major: major, minor: minor}, nil +} + +// currentLang returns the current language version. +func currentLang() string { + return fmt.Sprintf("go1.%d", goversion.Version) +} + +// goVersionRE is a regular expression that matches the valid +// arguments to the -lang flag. +var goVersionRE = regexp.MustCompile(`^go([1-9][0-9]*)\.(0|[1-9][0-9]*)$`) diff --git a/src/cmd/compile/internal/types/pkg.go b/src/cmd/compile/internal/types/pkg.go index bf90570b537b3..de45d32bfa30e 100644 --- a/src/cmd/compile/internal/types/pkg.go +++ b/src/cmd/compile/internal/types/pkg.go @@ -138,3 +138,7 @@ func CleanroomDo(f func()) { f() pkgMap = saved } + +func IsDotAlias(sym *Sym) bool { + return sym.Def != nil && sym.Def.Sym() != sym +} diff --git a/src/cmd/compile/internal/types/scope.go b/src/cmd/compile/internal/types/scope.go index d46918f73de68..a9669ffafcbc6 100644 --- a/src/cmd/compile/internal/types/scope.go +++ b/src/cmd/compile/internal/types/scope.go @@ -72,7 +72,7 @@ func Markdcl() { Block = blockgen } -func IsDclstackValid() bool { +func isDclstackValid() bool { for _, d := range dclstack { if d.sym == nil { return false @@ -105,3 +105,9 @@ func (s *Sym) pkgDefPtr() *Object { // function scope. return &s.Def } + +func CheckDclstack() { + if !isDclstackValid() { + base.Fatalf("mark left on the dclstack") + } +} diff --git a/src/cmd/compile/internal/types/sort.go b/src/cmd/compile/internal/types/sort.go new file mode 100644 index 0000000000000..dc59b06415328 --- /dev/null +++ b/src/cmd/compile/internal/types/sort.go @@ -0,0 +1,14 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types + +// MethodsByName sorts methods by symbol. +type MethodsByName []*Field + +func (x MethodsByName) Len() int { return len(x) } + +func (x MethodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x MethodsByName) Less(i, j int) bool { return x[i].Sym.Less(x[j].Sym) } diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index 752c268fa2ad2..21d96c430a749 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -9,6 +9,7 @@ import ( "cmd/internal/obj" "cmd/internal/src" "fmt" + "sync" ) // IRNode represents an ir.Node, but without needing to import cmd/compile/internal/ir, @@ -1695,3 +1696,204 @@ func anyBroke(fields []*Field) bool { } return false } + +var ( + IsInt [NTYPE]bool + IsFloat [NTYPE]bool + IsComplex [NTYPE]bool + IsSimple [NTYPE]bool +) + +var IsOrdered [NTYPE]bool + +// IsReflexive reports whether t has a reflexive equality operator. +// That is, if x==x for all x of type t. +func IsReflexive(t *Type) bool { + switch t.Kind() { + case TBOOL, + TINT, + TUINT, + TINT8, + TUINT8, + TINT16, + TUINT16, + TINT32, + TUINT32, + TINT64, + TUINT64, + TUINTPTR, + TPTR, + TUNSAFEPTR, + TSTRING, + TCHAN: + return true + + case TFLOAT32, + TFLOAT64, + TCOMPLEX64, + TCOMPLEX128, + TINTER: + return false + + case TARRAY: + return IsReflexive(t.Elem()) + + case TSTRUCT: + for _, t1 := range t.Fields().Slice() { + if !IsReflexive(t1.Type) { + return false + } + } + return true + + default: + base.Fatalf("bad type for map key: %v", t) + return false + } +} + +// Can this type be stored directly in an interface word? +// Yes, if the representation is a single pointer. +func IsDirectIface(t *Type) bool { + if t.Broke() { + return false + } + + switch t.Kind() { + case TPTR: + // Pointers to notinheap types must be stored indirectly. See issue 42076. + return !t.Elem().NotInHeap() + case TCHAN, + TMAP, + TFUNC, + TUNSAFEPTR: + return true + + case TARRAY: + // Array of 1 direct iface type can be direct. + return t.NumElem() == 1 && IsDirectIface(t.Elem()) + + case TSTRUCT: + // Struct with 1 field of direct iface type can be direct. + return t.NumFields() == 1 && IsDirectIface(t.Field(0).Type) + } + + return false +} + +// IsInterfaceMethod reports whether (field) m is +// an interface method. Such methods have the +// special receiver type types.FakeRecvType(). +func IsInterfaceMethod(f *Type) bool { + return f.Recv().Type == FakeRecvType() +} + +// IsMethodApplicable reports whether method m can be called on a +// value of type t. This is necessary because we compute a single +// method set for both T and *T, but some *T methods are not +// applicable to T receivers. +func IsMethodApplicable(t *Type, m *Field) bool { + return t.IsPtr() || !m.Type.Recv().Type.IsPtr() || IsInterfaceMethod(m.Type) || m.Embedded == 2 +} + +// IsRuntimePkg reports whether p is package runtime. +func IsRuntimePkg(p *Pkg) bool { + if base.Flag.CompilingRuntime && p == LocalPkg { + return true + } + return p.Path == "runtime" +} + +// IsReflectPkg reports whether p is package reflect. +func IsReflectPkg(p *Pkg) bool { + if p == LocalPkg { + return base.Ctxt.Pkgpath == "reflect" + } + return p.Path == "reflect" +} + +// ReceiverBaseType returns the underlying type, if any, +// that owns methods with receiver parameter t. +// The result is either a named type or an anonymous struct. +func ReceiverBaseType(t *Type) *Type { + if t == nil { + return nil + } + + // Strip away pointer if it's there. + if t.IsPtr() { + if t.Sym() != nil { + return nil + } + t = t.Elem() + if t == nil { + return nil + } + } + + // Must be a named type or anonymous struct. + if t.Sym() == nil && !t.IsStruct() { + return nil + } + + // Check types. + if IsSimple[t.Kind()] { + return t + } + switch t.Kind() { + case TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRING, TSTRUCT: + return t + } + return nil +} + +func FloatForComplex(t *Type) *Type { + switch t.Kind() { + case TCOMPLEX64: + return Types[TFLOAT32] + case TCOMPLEX128: + return Types[TFLOAT64] + } + base.Fatalf("unexpected type: %v", t) + return nil +} + +func ComplexForFloat(t *Type) *Type { + switch t.Kind() { + case TFLOAT32: + return Types[TCOMPLEX64] + case TFLOAT64: + return Types[TCOMPLEX128] + } + base.Fatalf("unexpected type: %v", t) + return nil +} + +func TypeSym(t *Type) *Sym { + return TypeSymLookup(TypeSymName(t)) +} + +func TypeSymLookup(name string) *Sym { + typepkgmu.Lock() + s := typepkg.Lookup(name) + typepkgmu.Unlock() + return s +} + +func TypeSymName(t *Type) string { + name := t.ShortString() + // Use a separate symbol name for Noalg types for #17752. + if TypeHasNoAlg(t) { + name = "noalg." + name + } + return name +} + +// Fake package for runtime type info (headers) +// Don't access directly, use typeLookup below. +var ( + typepkgmu sync.Mutex // protects typepkg lookups + typepkg = NewPkg("type", "type") +) + +var SimType [NTYPE]Kind From 65c4c6dfb22c344415e27b72ccdc58d95ca8f6c2 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 23 Dec 2020 00:10:25 -0500 Subject: [PATCH 224/474] [dev.regabi] cmd/compile: group known symbols, packages, names [generated] There are a handful of pre-computed magic symbols known by package gc, and we need a place to store them. If we keep them together, the need for type *ir.Name means that package ir is the lowest package in the import hierarchy that they can go in. And package ir needs gopkg for methodSymSuffix (in a later CL), so they can't go any higher either, at least not all together. So package ir it is. Rather than dump them all into the top-level package ir namespace, however, we introduce global structs, Syms, Pkgs, and Names, and make the known symbols, packages, and names fields of those. [git-generate] cd src/cmd/compile/internal/gc rf ' add go.go:$ \ // Names holds known names. \ var Names struct{} \ \ // Syms holds known symbols. \ var Syms struct {} \ \ // Pkgs holds known packages. \ var Pkgs struct {} \ mv staticuint64s Names.Staticuint64s mv zerobase Names.Zerobase mv assertE2I Syms.AssertE2I mv assertE2I2 Syms.AssertE2I2 mv assertI2I Syms.AssertI2I mv assertI2I2 Syms.AssertI2I2 mv deferproc Syms.Deferproc mv deferprocStack Syms.DeferprocStack mv Deferreturn Syms.Deferreturn mv Duffcopy Syms.Duffcopy mv Duffzero Syms.Duffzero mv gcWriteBarrier Syms.GCWriteBarrier mv goschedguarded Syms.Goschedguarded mv growslice Syms.Growslice mv msanread Syms.Msanread mv msanwrite Syms.Msanwrite mv msanmove Syms.Msanmove mv newobject Syms.Newobject mv newproc Syms.Newproc mv panicdivide Syms.Panicdivide mv panicshift Syms.Panicshift mv panicdottypeE Syms.PanicdottypeE mv panicdottypeI Syms.PanicdottypeI mv panicnildottype Syms.Panicnildottype mv panicoverflow Syms.Panicoverflow mv raceread Syms.Raceread mv racereadrange Syms.Racereadrange mv racewrite Syms.Racewrite mv racewriterange Syms.Racewriterange mv SigPanic Syms.SigPanic mv typedmemclr Syms.Typedmemclr mv typedmemmove Syms.Typedmemmove mv Udiv Syms.Udiv mv writeBarrier Syms.WriteBarrier mv zerobaseSym Syms.Zerobase mv arm64HasATOMICS Syms.ARM64HasATOMICS mv armHasVFPv4 Syms.ARMHasVFPv4 mv x86HasFMA Syms.X86HasFMA mv x86HasPOPCNT Syms.X86HasPOPCNT mv x86HasSSE41 Syms.X86HasSSE41 mv WasmDiv Syms.WasmDiv mv WasmMove Syms.WasmMove mv WasmZero Syms.WasmZero mv WasmTruncS Syms.WasmTruncS mv WasmTruncU Syms.WasmTruncU mv gopkg Pkgs.Go mv itabpkg Pkgs.Itab mv itablinkpkg Pkgs.Itablink mv mappkg Pkgs.Map mv msanpkg Pkgs.Msan mv racepkg Pkgs.Race mv Runtimepkg Pkgs.Runtime mv trackpkg Pkgs.Track mv unsafepkg Pkgs.Unsafe mv Names Syms Pkgs symtab.go mv symtab.go cmd/compile/internal/ir ' Change-Id: Ic143862148569a3bcde8e70b26d75421aa2d00f3 Reviewed-on: https://go-review.googlesource.com/c/go/+/279235 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/amd64/ggen.go | 3 +- src/cmd/compile/internal/amd64/ssa.go | 5 +- src/cmd/compile/internal/arm/ggen.go | 3 +- src/cmd/compile/internal/arm/ssa.go | 6 +- src/cmd/compile/internal/arm64/ggen.go | 3 +- src/cmd/compile/internal/arm64/ssa.go | 4 +- src/cmd/compile/internal/gc/alg.go | 14 +- src/cmd/compile/internal/gc/dcl.go | 2 +- src/cmd/compile/internal/gc/gen.go | 4 +- src/cmd/compile/internal/gc/go.go | 67 --------- src/cmd/compile/internal/gc/iexport.go | 4 +- src/cmd/compile/internal/gc/main.go | 36 ++--- src/cmd/compile/internal/gc/noder.go | 2 +- src/cmd/compile/internal/gc/obj.go | 2 +- src/cmd/compile/internal/gc/plive.go | 4 +- src/cmd/compile/internal/gc/reflect.go | 16 +-- src/cmd/compile/internal/gc/ssa.go | 164 +++++++++++------------ src/cmd/compile/internal/gc/subr.go | 4 +- src/cmd/compile/internal/gc/universe.go | 4 +- src/cmd/compile/internal/gc/walk.go | 18 +-- src/cmd/compile/internal/ir/symtab.go | 82 ++++++++++++ src/cmd/compile/internal/mips64/ggen.go | 3 +- src/cmd/compile/internal/mips64/ssa.go | 4 +- src/cmd/compile/internal/ppc64/ggen.go | 3 +- src/cmd/compile/internal/riscv64/ggen.go | 3 +- src/cmd/compile/internal/riscv64/ssa.go | 4 +- src/cmd/compile/internal/wasm/ssa.go | 14 +- src/cmd/compile/internal/x86/ggen.go | 3 +- src/cmd/compile/internal/x86/ssa.go | 5 +- 29 files changed, 255 insertions(+), 231 deletions(-) create mode 100644 src/cmd/compile/internal/ir/symtab.go diff --git a/src/cmd/compile/internal/amd64/ggen.go b/src/cmd/compile/internal/amd64/ggen.go index ec98b8cca126c..0bb0627f92e3e 100644 --- a/src/cmd/compile/internal/amd64/ggen.go +++ b/src/cmd/compile/internal/amd64/ggen.go @@ -7,6 +7,7 @@ package amd64 import ( "cmd/compile/internal/base" "cmd/compile/internal/gc" + "cmd/compile/internal/ir" "cmd/internal/obj" "cmd/internal/obj/x86" "cmd/internal/objabi" @@ -102,7 +103,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Pr } p = pp.Appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0) p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt)) - p.To.Sym = gc.Duffzero + p.To.Sym = ir.Syms.Duffzero if cnt%16 != 0 { p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8)) diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 5e3b962076b0d..055d1894d406b 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -10,6 +10,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/gc" + "cmd/compile/internal/ir" "cmd/compile/internal/logopt" "cmd/compile/internal/ssa" "cmd/compile/internal/types" @@ -912,7 +913,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { } p = s.Prog(obj.ADUFFZERO) p.To.Type = obj.TYPE_ADDR - p.To.Sym = gc.Duffzero + p.To.Sym = ir.Syms.Duffzero p.To.Offset = off case ssa.OpAMD64MOVOconst: if v.AuxInt != 0 { @@ -923,7 +924,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case ssa.OpAMD64DUFFCOPY: p := s.Prog(obj.ADUFFCOPY) p.To.Type = obj.TYPE_ADDR - p.To.Sym = gc.Duffcopy + p.To.Sym = ir.Syms.Duffcopy if v.AuxInt%16 != 0 { v.Fatalf("bad DUFFCOPY AuxInt %v", v.AuxInt) } diff --git a/src/cmd/compile/internal/arm/ggen.go b/src/cmd/compile/internal/arm/ggen.go index bd8d7ff40b8f8..2e4de9893bced 100644 --- a/src/cmd/compile/internal/arm/ggen.go +++ b/src/cmd/compile/internal/arm/ggen.go @@ -6,6 +6,7 @@ package arm import ( "cmd/compile/internal/gc" + "cmd/compile/internal/ir" "cmd/internal/obj" "cmd/internal/obj/arm" ) @@ -28,7 +29,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, r0 *uint32) *obj.Prog p.Reg = arm.REGSP p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) p.To.Name = obj.NAME_EXTERN - p.To.Sym = gc.Duffzero + p.To.Sym = ir.Syms.Duffzero p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr)) } else { p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0) diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go index 8b155712aa80a..ab7ec6176b388 100644 --- a/src/cmd/compile/internal/arm/ssa.go +++ b/src/cmd/compile/internal/arm/ssa.go @@ -702,7 +702,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN - p.To.Sym = gc.Udiv + p.To.Sym = ir.Syms.Udiv case ssa.OpARMLoweredWB: p := s.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM @@ -724,13 +724,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(obj.ADUFFZERO) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN - p.To.Sym = gc.Duffzero + p.To.Sym = ir.Syms.Duffzero p.To.Offset = v.AuxInt case ssa.OpARMDUFFCOPY: p := s.Prog(obj.ADUFFCOPY) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN - p.To.Sym = gc.Duffcopy + p.To.Sym = ir.Syms.Duffcopy p.To.Offset = v.AuxInt case ssa.OpARMLoweredNilCheck: // Issue a load which will fault if arg is nil. diff --git a/src/cmd/compile/internal/arm64/ggen.go b/src/cmd/compile/internal/arm64/ggen.go index f3fec03854f16..6c280267b6e20 100644 --- a/src/cmd/compile/internal/arm64/ggen.go +++ b/src/cmd/compile/internal/arm64/ggen.go @@ -6,6 +6,7 @@ package arm64 import ( "cmd/compile/internal/gc" + "cmd/compile/internal/ir" "cmd/internal/obj" "cmd/internal/obj/arm64" "cmd/internal/objabi" @@ -41,7 +42,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { p.Reg = arm64.REG_R20 p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) p.To.Name = obj.NAME_EXTERN - p.To.Sym = gc.Duffzero + p.To.Sym = ir.Syms.Duffzero p.To.Offset = 4 * (64 - cnt/(2*int64(gc.Widthptr))) } else { // Not using REGTMP, so this is async preemptible (async preemption clobbers REGTMP). diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go index 3eb0ae65573b3..bb634cc38c83a 100644 --- a/src/cmd/compile/internal/arm64/ssa.go +++ b/src/cmd/compile/internal/arm64/ssa.go @@ -961,7 +961,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(obj.ADUFFZERO) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN - p.To.Sym = gc.Duffzero + p.To.Sym = ir.Syms.Duffzero p.To.Offset = v.AuxInt case ssa.OpARM64LoweredZero: // STP.P (ZR,ZR), 16(R16) @@ -987,7 +987,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(obj.ADUFFCOPY) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN - p.To.Sym = gc.Duffcopy + p.To.Sym = ir.Syms.Duffcopy p.To.Offset = v.AuxInt case ssa.OpARM64LoweredMove: // MOVD.P 8(R16), Rtmp diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index 08237d40556a5..bcf992ba4b9b7 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -265,19 +265,19 @@ func hashfor(t *types.Type) ir.Node { case types.AMEM: base.Fatalf("hashfor with AMEM type") case types.AINTER: - sym = Runtimepkg.Lookup("interhash") + sym = ir.Pkgs.Runtime.Lookup("interhash") case types.ANILINTER: - sym = Runtimepkg.Lookup("nilinterhash") + sym = ir.Pkgs.Runtime.Lookup("nilinterhash") case types.ASTRING: - sym = Runtimepkg.Lookup("strhash") + sym = ir.Pkgs.Runtime.Lookup("strhash") case types.AFLOAT32: - sym = Runtimepkg.Lookup("f32hash") + sym = ir.Pkgs.Runtime.Lookup("f32hash") case types.AFLOAT64: - sym = Runtimepkg.Lookup("f64hash") + sym = ir.Pkgs.Runtime.Lookup("f64hash") case types.ACPLX64: - sym = Runtimepkg.Lookup("c64hash") + sym = ir.Pkgs.Runtime.Lookup("c64hash") case types.ACPLX128: - sym = Runtimepkg.Lookup("c128hash") + sym = ir.Pkgs.Runtime.Lookup("c128hash") default: // Note: the caller of hashfor ensured that this symbol // exists and has a body by calling genhash for t. diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 5a5f670a08616..c084565f3dafb 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -626,7 +626,7 @@ func methodSymSuffix(recv *types.Type, msym *types.Sym, suffix string) *types.Sy // Find the package the receiver type appeared in. For // anonymous receiver types (i.e., anonymous structs with // embedded fields), use the "go" pseudo-package instead. - rpkg := gopkg + rpkg := ir.Pkgs.Go if rsym != nil { rpkg = rsym.Pkg } diff --git a/src/cmd/compile/internal/gc/gen.go b/src/cmd/compile/internal/gc/gen.go index f83c636472ae4..bcd58fd2c5b2c 100644 --- a/src/cmd/compile/internal/gc/gen.go +++ b/src/cmd/compile/internal/gc/gen.go @@ -16,7 +16,7 @@ import ( // sysfunc looks up Go function name in package runtime. This function // must follow the internal calling convention. func sysfunc(name string) *obj.LSym { - s := Runtimepkg.Lookup(name) + s := ir.Pkgs.Runtime.Lookup(name) s.SetFunc(true) return s.Linksym() } @@ -25,7 +25,7 @@ func sysfunc(name string) *obj.LSym { // runtime. If this is a function, it may have a special calling // convention. func sysvar(name string) *obj.LSym { - return Runtimepkg.Lookup(name).Linksym() + return ir.Pkgs.Runtime.Lookup(name).Linksym() } // isParamStackCopy reports whether this is the on-stack copy of a diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index 7ec59852eecbf..4b6ffe58d1de9 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -64,24 +64,6 @@ var decldepth int32 var inimport bool // set during import -var itabpkg *types.Pkg // fake pkg for itab entries - -var itablinkpkg *types.Pkg // fake package for runtime itab entries - -var Runtimepkg *types.Pkg // fake package runtime - -var racepkg *types.Pkg // package runtime/race - -var msanpkg *types.Pkg // package runtime/msan - -var unsafepkg *types.Pkg // package unsafe - -var trackpkg *types.Pkg // fake package for field tracking - -var mappkg *types.Pkg // fake package for map zero value - -var gopkg *types.Pkg // pseudo-package for method symbols on anonymous receiver types - var zerosize int64 var ( @@ -149,57 +131,8 @@ type Arch struct { var thearch Arch var ( - staticuint64s *ir.Name - zerobase *ir.Name - - assertE2I, - assertE2I2, - assertI2I, - assertI2I2, - deferproc, - deferprocStack, - Deferreturn, - Duffcopy, - Duffzero, - gcWriteBarrier, - goschedguarded, - growslice, - msanread, - msanwrite, - msanmove, - newobject, - newproc, - panicdivide, - panicshift, - panicdottypeE, - panicdottypeI, - panicnildottype, - panicoverflow, - raceread, - racereadrange, - racewrite, - racewriterange, - x86HasPOPCNT, - x86HasSSE41, - x86HasFMA, - armHasVFPv4, - arm64HasATOMICS, - typedmemclr, - typedmemmove, - Udiv, - writeBarrier, - zerobaseSym *obj.LSym - BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym - - // Wasm - WasmMove, - WasmZero, - WasmDiv, - WasmTruncS, - WasmTruncU, - SigPanic *obj.LSym ) // GCWriteBarrierReg maps from registers to gcWriteBarrier implementation LSyms. diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index 87db08e0d14e6..56d2e81df1ecf 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -400,7 +400,7 @@ func (p *iexporter) pushDecl(n *ir.Name) { } // Don't export predeclared declarations. - if n.Sym().Pkg == types.BuiltinPkg || n.Sym().Pkg == unsafepkg { + if n.Sym().Pkg == types.BuiltinPkg || n.Sym().Pkg == ir.Pkgs.Unsafe { return } @@ -647,7 +647,7 @@ func (w *exportWriter) startType(k itag) { func (w *exportWriter) doTyp(t *types.Type) { if t.Sym() != nil { - if t.Sym().Pkg == types.BuiltinPkg || t.Sym().Pkg == unsafepkg { + if t.Sym().Pkg == types.BuiltinPkg || t.Sym().Pkg == ir.Pkgs.Unsafe { base.Fatalf("builtin type missing from typIndex: %v", t) } diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 15646ff8c759a..1c52426802735 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -86,32 +86,32 @@ func Main(archInit func(*Arch)) { types.BuiltinPkg.Prefix = "go.builtin" // not go%2ebuiltin // pseudo-package, accessed by import "unsafe" - unsafepkg = types.NewPkg("unsafe", "unsafe") + ir.Pkgs.Unsafe = types.NewPkg("unsafe", "unsafe") // Pseudo-package that contains the compiler's builtin // declarations for package runtime. These are declared in a // separate package to avoid conflicts with package runtime's // actual declarations, which may differ intentionally but // insignificantly. - Runtimepkg = types.NewPkg("go.runtime", "runtime") - Runtimepkg.Prefix = "runtime" + ir.Pkgs.Runtime = types.NewPkg("go.runtime", "runtime") + ir.Pkgs.Runtime.Prefix = "runtime" // pseudo-packages used in symbol tables - itabpkg = types.NewPkg("go.itab", "go.itab") - itabpkg.Prefix = "go.itab" // not go%2eitab + ir.Pkgs.Itab = types.NewPkg("go.itab", "go.itab") + ir.Pkgs.Itab.Prefix = "go.itab" // not go%2eitab - itablinkpkg = types.NewPkg("go.itablink", "go.itablink") - itablinkpkg.Prefix = "go.itablink" // not go%2eitablink + ir.Pkgs.Itablink = types.NewPkg("go.itablink", "go.itablink") + ir.Pkgs.Itablink.Prefix = "go.itablink" // not go%2eitablink - trackpkg = types.NewPkg("go.track", "go.track") - trackpkg.Prefix = "go.track" // not go%2etrack + ir.Pkgs.Track = types.NewPkg("go.track", "go.track") + ir.Pkgs.Track.Prefix = "go.track" // not go%2etrack // pseudo-package used for map zero values - mappkg = types.NewPkg("go.map", "go.map") - mappkg.Prefix = "go.map" + ir.Pkgs.Map = types.NewPkg("go.map", "go.map") + ir.Pkgs.Map.Prefix = "go.map" // pseudo-package used for methods with anonymous receivers - gopkg = types.NewPkg("go", "") + ir.Pkgs.Go = types.NewPkg("go", "") base.DebugSSA = ssa.PhaseOption base.ParseFlags() @@ -165,10 +165,10 @@ func Main(archInit func(*Arch)) { thearch.LinkArch.Init(base.Ctxt) startProfile() if base.Flag.Race { - racepkg = types.NewPkg("runtime/race", "") + ir.Pkgs.Race = types.NewPkg("runtime/race", "") } if base.Flag.MSan { - msanpkg = types.NewPkg("runtime/msan", "") + ir.Pkgs.Msan = types.NewPkg("runtime/msan", "") } if base.Flag.Race || base.Flag.MSan { base.Flag.Cfg.Instrumenting = true @@ -592,13 +592,13 @@ func loadsys() { typs := runtimeTypes() for _, d := range &runtimeDecls { - sym := Runtimepkg.Lookup(d.name) + sym := ir.Pkgs.Runtime.Lookup(d.name) typ := typs[d.typ] switch d.tag { case funcTag: - importfunc(Runtimepkg, src.NoXPos, sym, typ) + importfunc(ir.Pkgs.Runtime, src.NoXPos, sym, typ) case varTag: - importvar(Runtimepkg, src.NoXPos, sym, typ) + importvar(ir.Pkgs.Runtime, src.NoXPos, sym, typ) default: base.Fatalf("unhandled declaration tag %v", d.tag) } @@ -647,7 +647,7 @@ func importfile(f constant.Value) *types.Pkg { } if path_ == "unsafe" { - return unsafepkg + return ir.Pkgs.Unsafe } if islocalname(path_) { diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 77a45f002309d..799887d6b85a4 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -334,7 +334,7 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) { return } - if ipkg == unsafepkg { + if ipkg == ir.Pkgs.Unsafe { p.importedUnsafe = true } if ipkg.Path == "embed" { diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 883033e0c26be..897bcce36f74b 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -158,7 +158,7 @@ func dumpdata() { dumpglobls(Target.Externs[numExterns:]) if zerosize > 0 { - zero := mappkg.Lookup("zero") + zero := ir.Pkgs.Map.Lookup("zero") ggloblsym(zero.Linksym(), int32(zerosize), obj.DUPOK|obj.RODATA) } diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go index 0b796ae7fa1b5..f13889efda5bd 100644 --- a/src/cmd/compile/internal/gc/plive.go +++ b/src/cmd/compile/internal/gc/plive.go @@ -571,7 +571,7 @@ func (lv *Liveness) markUnsafePoints() { var load *ssa.Value v := wbBlock.Controls[0] for { - if sym, ok := v.Aux.(*obj.LSym); ok && sym == writeBarrier { + if sym, ok := v.Aux.(*obj.LSym); ok && sym == ir.Syms.WriteBarrier { load = v break } @@ -690,7 +690,7 @@ func (lv *Liveness) hasStackMap(v *ssa.Value) bool { // typedmemclr and typedmemmove are write barriers and // deeply non-preemptible. They are unsafe points and // hence should not have liveness maps. - if sym, ok := v.Aux.(*ssa.AuxCall); ok && (sym.Fn == typedmemclr || sym.Fn == typedmemmove) { + if sym, ok := v.Aux.(*ssa.AuxCall); ok && (sym.Fn == ir.Syms.Typedmemclr || sym.Fn == ir.Syms.Typedmemmove) { return false } return true diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 12fc6b7fa72b3..41c9f93bf0d77 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -489,7 +489,7 @@ func dimportpath(p *types.Pkg) { // If we are compiling the runtime package, there are two runtime packages around // -- localpkg and Runtimepkg. We don't want to produce import path symbols for // both of them, so just produce one for localpkg. - if base.Ctxt.Pkgpath == "runtime" && p == Runtimepkg { + if base.Ctxt.Pkgpath == "runtime" && p == ir.Pkgs.Runtime { return } @@ -926,7 +926,7 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int { // tracksym returns the symbol for tracking use of field/method f, assumed // to be a member of struct/interface type t. func tracksym(t *types.Type, f *types.Field) *types.Sym { - return trackpkg.Lookup(t.ShortString() + "." + f.Sym.Name) + return ir.Pkgs.Track.Lookup(t.ShortString() + "." + f.Sym.Name) } func typesymprefix(prefix string, t *types.Type) *types.Sym { @@ -975,7 +975,7 @@ func itabname(t, itype *types.Type) *ir.AddrExpr { if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() { base.Fatalf("itabname(%v, %v)", t, itype) } - s := itabpkg.Lookup(t.ShortString() + "," + itype.ShortString()) + s := ir.Pkgs.Itab.Lookup(t.ShortString() + "," + itype.ShortString()) if s.Def == nil { n := NewName(s) n.SetType(types.Types[types.TUINT8]) @@ -1544,13 +1544,13 @@ func dumpbasictypes() { dtypesym(functype(nil, []*ir.Field{anonfield(types.ErrorType)}, []*ir.Field{anonfield(types.Types[types.TSTRING])})) // add paths for runtime and main, which 6l imports implicitly. - dimportpath(Runtimepkg) + dimportpath(ir.Pkgs.Runtime) if base.Flag.Race { - dimportpath(racepkg) + dimportpath(ir.Pkgs.Race) } if base.Flag.MSan { - dimportpath(msanpkg) + dimportpath(ir.Pkgs.Msan) } dimportpath(types.NewPkg("main", "")) } @@ -1642,7 +1642,7 @@ func dgcptrmask(t *types.Type) *obj.LSym { fillptrmask(t, ptrmask) p := fmt.Sprintf("gcbits.%x", ptrmask) - sym := Runtimepkg.Lookup(p) + sym := ir.Pkgs.Runtime.Lookup(p) lsym := sym.Linksym() if !sym.Uniq() { sym.SetUniq(true) @@ -1791,7 +1791,7 @@ func zeroaddr(size int64) ir.Node { if zerosize < size { zerosize = size } - s := mappkg.Lookup("zero") + s := ir.Pkgs.Map.Lookup("zero") if s.Def == nil { x := NewName(s) x.SetType(types.Types[types.TUINT8]) diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 722a3257da87f..22cc868f36383 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -91,43 +91,43 @@ func initssaconfig() { ssaCaches = make([]ssa.Cache, base.Flag.LowerC) // Set up some runtime functions we'll need to call. - assertE2I = sysfunc("assertE2I") - assertE2I2 = sysfunc("assertE2I2") - assertI2I = sysfunc("assertI2I") - assertI2I2 = sysfunc("assertI2I2") - deferproc = sysfunc("deferproc") - deferprocStack = sysfunc("deferprocStack") - Deferreturn = sysfunc("deferreturn") - Duffcopy = sysfunc("duffcopy") - Duffzero = sysfunc("duffzero") - gcWriteBarrier = sysfunc("gcWriteBarrier") - goschedguarded = sysfunc("goschedguarded") - growslice = sysfunc("growslice") - msanread = sysfunc("msanread") - msanwrite = sysfunc("msanwrite") - msanmove = sysfunc("msanmove") - newobject = sysfunc("newobject") - newproc = sysfunc("newproc") - panicdivide = sysfunc("panicdivide") - panicdottypeE = sysfunc("panicdottypeE") - panicdottypeI = sysfunc("panicdottypeI") - panicnildottype = sysfunc("panicnildottype") - panicoverflow = sysfunc("panicoverflow") - panicshift = sysfunc("panicshift") - raceread = sysfunc("raceread") - racereadrange = sysfunc("racereadrange") - racewrite = sysfunc("racewrite") - racewriterange = sysfunc("racewriterange") - x86HasPOPCNT = sysvar("x86HasPOPCNT") // bool - x86HasSSE41 = sysvar("x86HasSSE41") // bool - x86HasFMA = sysvar("x86HasFMA") // bool - armHasVFPv4 = sysvar("armHasVFPv4") // bool - arm64HasATOMICS = sysvar("arm64HasATOMICS") // bool - typedmemclr = sysfunc("typedmemclr") - typedmemmove = sysfunc("typedmemmove") - Udiv = sysvar("udiv") // asm func with special ABI - writeBarrier = sysvar("writeBarrier") // struct { bool; ... } - zerobaseSym = sysvar("zerobase") + ir.Syms.AssertE2I = sysfunc("assertE2I") + ir.Syms.AssertE2I2 = sysfunc("assertE2I2") + ir.Syms.AssertI2I = sysfunc("assertI2I") + ir.Syms.AssertI2I2 = sysfunc("assertI2I2") + ir.Syms.Deferproc = sysfunc("deferproc") + ir.Syms.DeferprocStack = sysfunc("deferprocStack") + ir.Syms.Deferreturn = sysfunc("deferreturn") + ir.Syms.Duffcopy = sysfunc("duffcopy") + ir.Syms.Duffzero = sysfunc("duffzero") + ir.Syms.GCWriteBarrier = sysfunc("gcWriteBarrier") + ir.Syms.Goschedguarded = sysfunc("goschedguarded") + ir.Syms.Growslice = sysfunc("growslice") + ir.Syms.Msanread = sysfunc("msanread") + ir.Syms.Msanwrite = sysfunc("msanwrite") + ir.Syms.Msanmove = sysfunc("msanmove") + ir.Syms.Newobject = sysfunc("newobject") + ir.Syms.Newproc = sysfunc("newproc") + ir.Syms.Panicdivide = sysfunc("panicdivide") + ir.Syms.PanicdottypeE = sysfunc("panicdottypeE") + ir.Syms.PanicdottypeI = sysfunc("panicdottypeI") + ir.Syms.Panicnildottype = sysfunc("panicnildottype") + ir.Syms.Panicoverflow = sysfunc("panicoverflow") + ir.Syms.Panicshift = sysfunc("panicshift") + ir.Syms.Raceread = sysfunc("raceread") + ir.Syms.Racereadrange = sysfunc("racereadrange") + ir.Syms.Racewrite = sysfunc("racewrite") + ir.Syms.Racewriterange = sysfunc("racewriterange") + ir.Syms.X86HasPOPCNT = sysvar("x86HasPOPCNT") // bool + ir.Syms.X86HasSSE41 = sysvar("x86HasSSE41") // bool + ir.Syms.X86HasFMA = sysvar("x86HasFMA") // bool + ir.Syms.ARMHasVFPv4 = sysvar("armHasVFPv4") // bool + ir.Syms.ARM64HasATOMICS = sysvar("arm64HasATOMICS") // bool + ir.Syms.Typedmemclr = sysfunc("typedmemclr") + ir.Syms.Typedmemmove = sysfunc("typedmemmove") + ir.Syms.Udiv = sysvar("udiv") // asm func with special ABI + ir.Syms.WriteBarrier = sysvar("writeBarrier") // struct { bool; ... } + ir.Syms.Zerobase = sysvar("zerobase") // asm funcs with special ABI if thearch.LinkArch.Name == "amd64" { @@ -198,12 +198,12 @@ func initssaconfig() { } // Wasm (all asm funcs with special ABIs) - WasmMove = sysvar("wasmMove") - WasmZero = sysvar("wasmZero") - WasmDiv = sysvar("wasmDiv") - WasmTruncS = sysvar("wasmTruncS") - WasmTruncU = sysvar("wasmTruncU") - SigPanic = sysfunc("sigpanic") + ir.Syms.WasmMove = sysvar("wasmMove") + ir.Syms.WasmZero = sysvar("wasmZero") + ir.Syms.WasmDiv = sysvar("wasmDiv") + ir.Syms.WasmTruncS = sysvar("wasmTruncS") + ir.Syms.WasmTruncU = sysvar("wasmTruncU") + ir.Syms.SigPanic = sysfunc("sigpanic") } // getParam returns the Field of ith param of node n (which is a @@ -1051,11 +1051,11 @@ func (s *state) instrument2(t *types.Type, addr, addr2 *ssa.Value, kind instrume if base.Flag.MSan { switch kind { case instrumentRead: - fn = msanread + fn = ir.Syms.Msanread case instrumentWrite: - fn = msanwrite + fn = ir.Syms.Msanwrite case instrumentMove: - fn = msanmove + fn = ir.Syms.Msanmove default: panic("unreachable") } @@ -1066,9 +1066,9 @@ func (s *state) instrument2(t *types.Type, addr, addr2 *ssa.Value, kind instrume // composites with only one element don't have subobjects, though. switch kind { case instrumentRead: - fn = racereadrange + fn = ir.Syms.Racereadrange case instrumentWrite: - fn = racewriterange + fn = ir.Syms.Racewriterange default: panic("unreachable") } @@ -1078,9 +1078,9 @@ func (s *state) instrument2(t *types.Type, addr, addr2 *ssa.Value, kind instrume // address, as any write must write the first byte. switch kind { case instrumentRead: - fn = raceread + fn = ir.Syms.Raceread case instrumentWrite: - fn = racewrite + fn = ir.Syms.Racewrite default: panic("unreachable") } @@ -1170,7 +1170,7 @@ func (s *state) stmt(n ir.Node) { s.callResult(n, callNormal) if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME && n.X.(*ir.Name).Class_ == ir.PFUNC { if fn := n.X.Sym().Name; base.Flag.CompilingRuntime && fn == "throw" || - n.X.Sym().Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") { + n.X.Sym().Pkg == ir.Pkgs.Runtime && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") { m := s.mem() b := s.endBlock() b.Kind = ssa.BlockExit @@ -1677,7 +1677,7 @@ func (s *state) exit() *ssa.Block { } s.openDeferExit() } else { - s.rtcall(Deferreturn, true, nil) + s.rtcall(ir.Syms.Deferreturn, true, nil) } } @@ -2612,7 +2612,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { bt := b.Type if bt.IsSigned() { cmp := s.newValue2(s.ssaOp(ir.OLE, bt), types.Types[types.TBOOL], s.zeroVal(bt), b) - s.check(cmp, panicshift) + s.check(cmp, ir.Syms.Panicshift) bt = bt.ToUnsigned() } return s.newValue2(s.ssaShiftOp(n.Op(), n.Type(), bt), a.Type, a, b) @@ -2909,10 +2909,10 @@ func (s *state) expr(n ir.Node) *ssa.Value { case ir.ONEWOBJ: n := n.(*ir.UnaryExpr) if n.Type().Elem().Size() == 0 { - return s.newValue1A(ssa.OpAddr, n.Type(), zerobaseSym, s.sb) + return s.newValue1A(ssa.OpAddr, n.Type(), ir.Syms.Zerobase, s.sb) } typ := s.expr(n.X) - vv := s.rtcall(newobject, true, []*types.Type{n.Type()}, typ) + vv := s.rtcall(ir.Syms.Newobject, true, []*types.Type{n.Type()}, typ) return vv[0] default: @@ -3006,7 +3006,7 @@ func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value { // Call growslice s.startBlock(grow) taddr := s.expr(n.X) - r := s.rtcall(growslice, true, []*types.Type{pt, types.Types[types.TINT], types.Types[types.TINT]}, taddr, p, l, c, nl) + r := s.rtcall(ir.Syms.Growslice, true, []*types.Type{pt, types.Types[types.TINT], types.Types[types.TINT]}, taddr, p, l, c, nl) if inplace { if sn.Op() == ir.ONAME { @@ -3635,7 +3635,7 @@ func initSSATables() { return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { // Target Atomic feature is identified by dynamic detection - addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), arm64HasATOMICS, s.sb) + addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), ir.Syms.ARM64HasATOMICS, s.sb) v := s.load(types.Types[types.TBOOL], addr) b := s.endBlock() b.Kind = ssa.BlockIf @@ -3860,7 +3860,7 @@ func initSSATables() { s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64] return s.variable(n, types.Types[types.TFLOAT64]) } - v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], x86HasFMA) + v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasFMA) b := s.endBlock() b.Kind = ssa.BlockIf b.SetControl(v) @@ -3892,7 +3892,7 @@ func initSSATables() { s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64] return s.variable(n, types.Types[types.TFLOAT64]) } - addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), armHasVFPv4, s.sb) + addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), ir.Syms.ARMHasVFPv4, s.sb) v := s.load(types.Types[types.TBOOL], addr) b := s.endBlock() b.Kind = ssa.BlockIf @@ -3922,7 +3922,7 @@ func initSSATables() { makeRoundAMD64 := func(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], x86HasSSE41) + v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasSSE41) b := s.endBlock() b.Kind = ssa.BlockIf b.SetControl(v) @@ -4128,7 +4128,7 @@ func initSSATables() { makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], x86HasPOPCNT) + v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasPOPCNT) b := s.endBlock() b.Kind = ssa.BlockIf b.SetControl(v) @@ -4212,9 +4212,9 @@ func initSSATables() { func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { // check for divide-by-zero/overflow and panic with appropriate message cmpZero := s.newValue2(s.ssaOp(ir.ONE, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[2], s.zeroVal(types.Types[types.TUINT64])) - s.check(cmpZero, panicdivide) + s.check(cmpZero, ir.Syms.Panicdivide) cmpOverflow := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[0], args[2]) - s.check(cmpOverflow, panicoverflow) + s.check(cmpOverflow, ir.Syms.Panicoverflow) return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2]) }, sys.AMD64) @@ -4768,7 +4768,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val // Call runtime.deferprocStack with pointer to _defer record. ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(base.Ctxt.FixedFrameSize())}) - aux := ssa.StaticAuxCall(deferprocStack, ACArgs, ACResults) + aux := ssa.StaticAuxCall(ir.Syms.DeferprocStack, ACArgs, ACResults) if testLateExpansion { callArgs = append(callArgs, addr, s.mem()) call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) @@ -4844,7 +4844,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val // call target switch { case k == callDefer: - aux := ssa.StaticAuxCall(deferproc, ACArgs, ACResults) + aux := ssa.StaticAuxCall(ir.Syms.Deferproc, ACArgs, ACResults) if testLateExpansion { call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) call.AddArgs(callArgs...) @@ -4852,7 +4852,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem()) } case k == callGo: - aux := ssa.StaticAuxCall(newproc, ACArgs, ACResults) + aux := ssa.StaticAuxCall(ir.Syms.Newproc, ACArgs, ACResults) if testLateExpansion { call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) call.AddArgs(callArgs...) @@ -5359,7 +5359,7 @@ func (s *state) intDivide(n ir.Node, a, b *ssa.Value) *ssa.Value { if needcheck { // do a size-appropriate check for zero cmp := s.newValue2(s.ssaOp(ir.ONE, n.Type()), types.Types[types.TBOOL], b, s.zeroVal(n.Type())) - s.check(cmp, panicdivide) + s.check(cmp, ir.Syms.Panicdivide) } return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b) } @@ -6063,7 +6063,7 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val if !commaok { // On failure, panic by calling panicnildottype. s.startBlock(bFail) - s.rtcall(panicnildottype, false, nil, target) + s.rtcall(ir.Syms.Panicnildottype, false, nil, target) // On success, return (perhaps modified) input interface. s.startBlock(bOk) @@ -6108,16 +6108,16 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val } if n.X.Type().IsEmptyInterface() { if commaok { - call := s.rtcall(assertE2I2, true, []*types.Type{n.Type(), types.Types[types.TBOOL]}, target, iface) + call := s.rtcall(ir.Syms.AssertE2I2, true, []*types.Type{n.Type(), types.Types[types.TBOOL]}, target, iface) return call[0], call[1] } - return s.rtcall(assertE2I, true, []*types.Type{n.Type()}, target, iface)[0], nil + return s.rtcall(ir.Syms.AssertE2I, true, []*types.Type{n.Type()}, target, iface)[0], nil } if commaok { - call := s.rtcall(assertI2I2, true, []*types.Type{n.Type(), types.Types[types.TBOOL]}, target, iface) + call := s.rtcall(ir.Syms.AssertI2I2, true, []*types.Type{n.Type(), types.Types[types.TBOOL]}, target, iface) return call[0], call[1] } - return s.rtcall(assertI2I, true, []*types.Type{n.Type()}, target, iface)[0], nil + return s.rtcall(ir.Syms.AssertI2I, true, []*types.Type{n.Type()}, target, iface)[0], nil } if base.Debug.TypeAssert > 0 { @@ -6165,9 +6165,9 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val s.startBlock(bFail) taddr := s.expr(n.Ntype.(*ir.AddrExpr).Alloc) if n.X.Type().IsEmptyInterface() { - s.rtcall(panicdottypeE, false, nil, itab, target, taddr) + s.rtcall(ir.Syms.PanicdottypeE, false, nil, itab, target, taddr) } else { - s.rtcall(panicdottypeI, false, nil, itab, target, taddr) + s.rtcall(ir.Syms.PanicdottypeI, false, nil, itab, target, taddr) } // on success, return data from interface @@ -6623,7 +6623,7 @@ func genssa(f *ssa.Func, pp *Progs) { // deferreturn and a return. This will be used to during panic // recovery to unwind the stack and return back to the runtime. s.pp.nextLive = s.livenessMap.deferreturn - gencallret(pp, Deferreturn) + gencallret(pp, ir.Syms.Deferreturn) } if inlMarks != nil { @@ -7082,14 +7082,14 @@ func (s *SSAGenState) PrepareCall(v *ssa.Value) { idx := s.livenessMap.Get(v) if !idx.StackMapValid() { // See Liveness.hasStackMap. - if sym, ok := v.Aux.(*ssa.AuxCall); !ok || !(sym.Fn == typedmemclr || sym.Fn == typedmemmove) { + if sym, ok := v.Aux.(*ssa.AuxCall); !ok || !(sym.Fn == ir.Syms.Typedmemclr || sym.Fn == ir.Syms.Typedmemmove) { base.Fatalf("missing stack map index for %v", v.LongString()) } } call, ok := v.Aux.(*ssa.AuxCall) - if ok && call.Fn == Deferreturn { + if ok && call.Fn == ir.Syms.Deferreturn { // Deferred calls will appear to be returning to // the CALL deferreturn(SB) that we are about to emit. // However, the stack trace code will show the line @@ -7321,15 +7321,15 @@ func (e *ssafn) UseWriteBarrier() bool { func (e *ssafn) Syslook(name string) *obj.LSym { switch name { case "goschedguarded": - return goschedguarded + return ir.Syms.Goschedguarded case "writeBarrier": - return writeBarrier + return ir.Syms.WriteBarrier case "gcWriteBarrier": - return gcWriteBarrier + return ir.Syms.GCWriteBarrier case "typedmemmove": - return typedmemmove + return ir.Syms.Typedmemmove case "typedmemclr": - return typedmemclr + return ir.Syms.Typedmemclr } e.Fatalf(src.NoXPos, "unknown Syslook func %v", name) return nil diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index d8956633b27b8..a845abeb3a79d 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -523,7 +523,7 @@ func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) { } func syslook(name string) *ir.Name { - s := Runtimepkg.Lookup(name) + s := ir.Pkgs.Runtime.Lookup(name) if s == nil || s.Def == nil { base.Fatalf("syslook: can't find runtime.%s", name) } @@ -1247,7 +1247,7 @@ func paramNnames(ft *types.Type) []ir.Node { } func hashmem(t *types.Type) ir.Node { - sym := Runtimepkg.Lookup("memhash") + sym := ir.Pkgs.Runtime.Lookup("memhash") n := NewName(sym) setNodeNameFunc(n) diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index f2c719db38eef..c9cce4b4884b4 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -141,7 +141,7 @@ func initUniverse() { s.Def = n dowidth(types.ErrorType) - types.Types[types.TUNSAFEPTR] = defBasic(types.TUNSAFEPTR, unsafepkg, "Pointer") + types.Types[types.TUNSAFEPTR] = defBasic(types.TUNSAFEPTR, ir.Pkgs.Unsafe, "Pointer") // simple aliases types.SimType[types.TMAP] = types.TPTR @@ -157,7 +157,7 @@ func initUniverse() { } for _, s := range &unsafeFuncs { - s2 := unsafepkg.Lookup(s.name) + s2 := ir.Pkgs.Unsafe.Lookup(s.name) def := NewName(s2) def.BuiltinOp = s.op s2.Def = def diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 7f68efeed1d78..5d812064b6b3c 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -946,15 +946,15 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return l } - if staticuint64s == nil { - staticuint64s = NewName(Runtimepkg.Lookup("staticuint64s")) - staticuint64s.Class_ = ir.PEXTERN + if ir.Names.Staticuint64s == nil { + ir.Names.Staticuint64s = NewName(ir.Pkgs.Runtime.Lookup("staticuint64s")) + ir.Names.Staticuint64s.Class_ = ir.PEXTERN // The actual type is [256]uint64, but we use [256*8]uint8 so we can address // individual bytes. - staticuint64s.SetType(types.NewArray(types.Types[types.TUINT8], 256*8)) - zerobase = NewName(Runtimepkg.Lookup("zerobase")) - zerobase.Class_ = ir.PEXTERN - zerobase.SetType(types.Types[types.TUINTPTR]) + ir.Names.Staticuint64s.SetType(types.NewArray(types.Types[types.TUINT8], 256*8)) + ir.Names.Zerobase = NewName(ir.Pkgs.Runtime.Lookup("zerobase")) + ir.Names.Zerobase.Class_ = ir.PEXTERN + ir.Names.Zerobase.SetType(types.Types[types.TUINTPTR]) } // Optimize convT2{E,I} for many cases in which T is not pointer-shaped, @@ -965,7 +965,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case fromType.Size() == 0: // n.Left is zero-sized. Use zerobase. cheapexpr(n.X, init) // Evaluate n.Left for side-effects. See issue 19246. - value = zerobase + value = ir.Names.Zerobase case fromType.IsBoolean() || (fromType.Size() == 1 && fromType.IsInteger()): // n.Left is a bool/byte. Use staticuint64s[n.Left * 8] on little-endian // and staticuint64s[n.Left * 8 + 7] on big-endian. @@ -975,7 +975,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { if thearch.LinkArch.ByteOrder == binary.BigEndian { index = ir.NewBinaryExpr(base.Pos, ir.OADD, index, nodintconst(7)) } - xe := ir.NewIndexExpr(base.Pos, staticuint64s, index) + xe := ir.NewIndexExpr(base.Pos, ir.Names.Staticuint64s, index) xe.SetBounded(true) value = xe case n.X.Op() == ir.ONAME && n.X.(*ir.Name).Class_ == ir.PEXTERN && n.X.(*ir.Name).Readonly(): diff --git a/src/cmd/compile/internal/ir/symtab.go b/src/cmd/compile/internal/ir/symtab.go new file mode 100644 index 0000000000000..df694f6c848ea --- /dev/null +++ b/src/cmd/compile/internal/ir/symtab.go @@ -0,0 +1,82 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +import ( + "cmd/compile/internal/types" + "cmd/internal/obj" +) + +// Names holds known names. +var Names struct { + Staticuint64s *Name + Zerobase *Name +} + +// Syms holds known symbols. +var Syms struct { + AssertE2I *obj.LSym + AssertE2I2 *obj.LSym + AssertI2I *obj.LSym + AssertI2I2 *obj.LSym + Deferproc *obj.LSym + DeferprocStack *obj.LSym + Deferreturn *obj.LSym + Duffcopy *obj.LSym + Duffzero *obj.LSym + GCWriteBarrier *obj.LSym + Goschedguarded *obj.LSym + Growslice *obj.LSym + Msanread *obj.LSym + Msanwrite *obj.LSym + Msanmove *obj.LSym + Newobject *obj.LSym + Newproc *obj.LSym + Panicdivide *obj.LSym + Panicshift *obj.LSym + PanicdottypeE *obj.LSym + PanicdottypeI *obj.LSym + Panicnildottype *obj.LSym + Panicoverflow *obj.LSym + Raceread *obj.LSym + Racereadrange *obj.LSym + Racewrite *obj.LSym + Racewriterange *obj.LSym + // Wasm + SigPanic *obj.LSym + Typedmemclr *obj.LSym + Typedmemmove *obj.LSym + Udiv *obj.LSym + WriteBarrier *obj.LSym + Zerobase *obj.LSym + ARM64HasATOMICS *obj.LSym + ARMHasVFPv4 *obj.LSym + X86HasFMA *obj.LSym + X86HasPOPCNT *obj.LSym + X86HasSSE41 *obj.LSym + // Wasm + WasmDiv *obj.LSym + // Wasm + WasmMove *obj.LSym + // Wasm + WasmZero *obj.LSym + // Wasm + WasmTruncS *obj.LSym + // Wasm + WasmTruncU *obj.LSym +} + +// Pkgs holds known packages. +var Pkgs struct { + Go *types.Pkg + Itab *types.Pkg + Itablink *types.Pkg + Map *types.Pkg + Msan *types.Pkg + Race *types.Pkg + Runtime *types.Pkg + Track *types.Pkg + Unsafe *types.Pkg +} diff --git a/src/cmd/compile/internal/mips64/ggen.go b/src/cmd/compile/internal/mips64/ggen.go index 04e7a66e417b9..4be5bc6f6e9ff 100644 --- a/src/cmd/compile/internal/mips64/ggen.go +++ b/src/cmd/compile/internal/mips64/ggen.go @@ -6,6 +6,7 @@ package mips64 import ( "cmd/compile/internal/gc" + "cmd/compile/internal/ir" "cmd/internal/obj" "cmd/internal/obj/mips" ) @@ -23,7 +24,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { p.Reg = mips.REGSP p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) p.To.Name = obj.NAME_EXTERN - p.To.Sym = gc.Duffzero + p.To.Sym = ir.Syms.Duffzero p.To.Offset = 8 * (128 - cnt/int64(gc.Widthptr)) } else { // ADDV $(8+frame+lo-8), SP, r1 diff --git a/src/cmd/compile/internal/mips64/ssa.go b/src/cmd/compile/internal/mips64/ssa.go index 9aaf8715de3f7..0da5eebe8d17a 100644 --- a/src/cmd/compile/internal/mips64/ssa.go +++ b/src/cmd/compile/internal/mips64/ssa.go @@ -383,7 +383,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p = s.Prog(obj.ADUFFZERO) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN - p.To.Sym = gc.Duffzero + p.To.Sym = ir.Syms.Duffzero p.To.Offset = v.AuxInt case ssa.OpMIPS64LoweredZero: // SUBV $8, R1 @@ -433,7 +433,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(obj.ADUFFCOPY) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN - p.To.Sym = gc.Duffcopy + p.To.Sym = ir.Syms.Duffcopy p.To.Offset = v.AuxInt case ssa.OpMIPS64LoweredMove: // SUBV $8, R1 diff --git a/src/cmd/compile/internal/ppc64/ggen.go b/src/cmd/compile/internal/ppc64/ggen.go index 8f5caf5f991b9..29376badf9c99 100644 --- a/src/cmd/compile/internal/ppc64/ggen.go +++ b/src/cmd/compile/internal/ppc64/ggen.go @@ -7,6 +7,7 @@ package ppc64 import ( "cmd/compile/internal/base" "cmd/compile/internal/gc" + "cmd/compile/internal/ir" "cmd/internal/obj" "cmd/internal/obj/ppc64" ) @@ -24,7 +25,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { p.Reg = ppc64.REGSP p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) p.To.Name = obj.NAME_EXTERN - p.To.Sym = gc.Duffzero + p.To.Sym = ir.Syms.Duffzero p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr)) } else { p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGTMP, 0) diff --git a/src/cmd/compile/internal/riscv64/ggen.go b/src/cmd/compile/internal/riscv64/ggen.go index 18905a4aea361..c77640765ffff 100644 --- a/src/cmd/compile/internal/riscv64/ggen.go +++ b/src/cmd/compile/internal/riscv64/ggen.go @@ -7,6 +7,7 @@ package riscv64 import ( "cmd/compile/internal/base" "cmd/compile/internal/gc" + "cmd/compile/internal/ir" "cmd/internal/obj" "cmd/internal/obj/riscv" ) @@ -31,7 +32,7 @@ func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { p.Reg = riscv.REG_SP p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) p.To.Name = obj.NAME_EXTERN - p.To.Sym = gc.Duffzero + p.To.Sym = ir.Syms.Duffzero p.To.Offset = 8 * (128 - cnt/int64(gc.Widthptr)) return p } diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go index d382304d72e79..616b76e5f6174 100644 --- a/src/cmd/compile/internal/riscv64/ssa.go +++ b/src/cmd/compile/internal/riscv64/ssa.go @@ -614,14 +614,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(obj.ADUFFZERO) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN - p.To.Sym = gc.Duffzero + p.To.Sym = ir.Syms.Duffzero p.To.Offset = v.AuxInt case ssa.OpRISCV64DUFFCOPY: p := s.Prog(obj.ADUFFCOPY) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN - p.To.Sym = gc.Duffcopy + p.To.Sym = ir.Syms.Duffcopy p.To.Offset = v.AuxInt default: diff --git a/src/cmd/compile/internal/wasm/ssa.go b/src/cmd/compile/internal/wasm/ssa.go index c9a52a5f736c6..4e5aa433d970f 100644 --- a/src/cmd/compile/internal/wasm/ssa.go +++ b/src/cmd/compile/internal/wasm/ssa.go @@ -124,7 +124,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { switch v.Op { case ssa.OpWasmLoweredStaticCall, ssa.OpWasmLoweredClosureCall, ssa.OpWasmLoweredInterCall: s.PrepareCall(v) - if call, ok := v.Aux.(*ssa.AuxCall); ok && call.Fn == gc.Deferreturn { + if call, ok := v.Aux.(*ssa.AuxCall); ok && call.Fn == ir.Syms.Deferreturn { // add a resume point before call to deferreturn so it can be called again via jmpdefer s.Prog(wasm.ARESUMEPOINT) } @@ -149,20 +149,20 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { getValue32(s, v.Args[1]) i32Const(s, int32(v.AuxInt)) p := s.Prog(wasm.ACall) - p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: gc.WasmMove} + p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmMove} case ssa.OpWasmLoweredZero: getValue32(s, v.Args[0]) i32Const(s, int32(v.AuxInt)) p := s.Prog(wasm.ACall) - p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: gc.WasmZero} + p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmZero} case ssa.OpWasmLoweredNilCheck: getValue64(s, v.Args[0]) s.Prog(wasm.AI64Eqz) s.Prog(wasm.AIf) p := s.Prog(wasm.ACALLNORESUME) - p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: gc.SigPanic} + p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.SigPanic} s.Prog(wasm.AEnd) if logopt.Enabled() { logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) @@ -314,7 +314,7 @@ func ssaGenValueOnStack(s *gc.SSAGenState, v *ssa.Value, extend bool) { if v.Type.Size() == 8 { // Division of int64 needs helper function wasmDiv to handle the MinInt64 / -1 case. p := s.Prog(wasm.ACall) - p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: gc.WasmDiv} + p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmDiv} break } s.Prog(wasm.AI64DivS) @@ -328,7 +328,7 @@ func ssaGenValueOnStack(s *gc.SSAGenState, v *ssa.Value, extend bool) { s.Prog(wasm.AF64PromoteF32) } p := s.Prog(wasm.ACall) - p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: gc.WasmTruncS} + p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmTruncS} } case ssa.OpWasmI64TruncSatF32U, ssa.OpWasmI64TruncSatF64U: @@ -340,7 +340,7 @@ func ssaGenValueOnStack(s *gc.SSAGenState, v *ssa.Value, extend bool) { s.Prog(wasm.AF64PromoteF32) } p := s.Prog(wasm.ACall) - p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: gc.WasmTruncU} + p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmTruncU} } case ssa.OpWasmF32DemoteF64: diff --git a/src/cmd/compile/internal/x86/ggen.go b/src/cmd/compile/internal/x86/ggen.go index a33ddc81e3dc0..f5d08a68ed272 100644 --- a/src/cmd/compile/internal/x86/ggen.go +++ b/src/cmd/compile/internal/x86/ggen.go @@ -6,6 +6,7 @@ package x86 import ( "cmd/compile/internal/gc" + "cmd/compile/internal/ir" "cmd/internal/obj" "cmd/internal/obj/x86" ) @@ -26,7 +27,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, ax *uint32) *obj.Prog } else if cnt <= int64(128*gc.Widthreg) { p = pp.Appendpp(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0) p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, 1*(128-cnt/int64(gc.Widthreg))) - p.To.Sym = gc.Duffzero + p.To.Sym = ir.Syms.Duffzero } else { p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0) p = pp.Appendpp(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0) diff --git a/src/cmd/compile/internal/x86/ssa.go b/src/cmd/compile/internal/x86/ssa.go index a3aaf03c95845..d3d60591ccc4a 100644 --- a/src/cmd/compile/internal/x86/ssa.go +++ b/src/cmd/compile/internal/x86/ssa.go @@ -10,6 +10,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/gc" + "cmd/compile/internal/ir" "cmd/compile/internal/logopt" "cmd/compile/internal/ssa" "cmd/compile/internal/types" @@ -671,12 +672,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case ssa.Op386DUFFZERO: p := s.Prog(obj.ADUFFZERO) p.To.Type = obj.TYPE_ADDR - p.To.Sym = gc.Duffzero + p.To.Sym = ir.Syms.Duffzero p.To.Offset = v.AuxInt case ssa.Op386DUFFCOPY: p := s.Prog(obj.ADUFFCOPY) p.To.Type = obj.TYPE_ADDR - p.To.Sym = gc.Duffcopy + p.To.Sym = ir.Syms.Duffcopy p.To.Offset = v.AuxInt case ssa.OpCopy: // TODO: use MOVLreg for reg->reg copies instead of OpCopy? From 527a1895d675ec0384f564dd76e56b3631948dd4 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 23 Dec 2020 00:38:15 -0500 Subject: [PATCH 225/474] [dev.regabi] cmd/compile: move helpers into package ir [generated] [git-generate] cd src/cmd/compile/internal/gc sed -i '' 's/TestBuiltin.*/& t.Skip("mkbuiltin needs fixing")/' builtin_test.go gofmt -w builtin_test.go rf ' # Inline a few little-used constructors to avoid bringing them. ex { import "cmd/compile/internal/base" import "cmd/compile/internal/ir" import "cmd/compile/internal/types" import "cmd/internal/src" var typ *types.Type var sym *types.Sym var str string symfield(sym, typ) -> ir.NewField(base.Pos, sym, nil, typ) anonfield(typ) -> ir.NewField(base.Pos, nil, nil, typ) namedfield(str, typ) -> ir.NewField(base.Pos, lookup(str), nil, typ) var cp *ir.CallPartExpr callpartMethod(cp) -> cp.Method var n ir.Node callpartMethod(n) -> n.(*ir.CallPartExpr).Method var ns []ir.Node liststmt(ns) -> ir.NewBlockStmt(src.NoXPos, ns) } rm symfield anonfield namedfield liststmt callpartMethod mv maxStackVarSize MaxStackVarSize mv maxImplicitStackVarSize MaxImplicitStackVarSize mv smallArrayBytes MaxSmallArraySize mv MaxStackVarSize cfg.go mv nodbool NewBool mv nodintconst NewInt mv nodstr NewString mv NewBool NewInt NewString const.go mv Mpprec ConstPrec mv bigFloatVal BigFloat mv doesoverflow ConstOverflow mv isGoConst IsConstNode mv smallintconst IsSmallIntConst mv isZero IsZero mv islvalue IsAssignable mv staticValue StaticValue mv samesafeexpr SameSafeExpr mv checkPtr ShouldCheckPtr mv isReflectHeaderDataField IsReflectHeaderDataField mv paramNnames ParamNames mv methodSym MethodSym mv methodSymSuffix MethodSymSuffix mv methodExprFunc MethodExprFunc mv methodExprName MethodExprName mv IsZero IsAssignable StaticValue staticValue1 reassigned \ IsIntrinsicCall \ SameSafeExpr ShouldCheckPtr IsReflectHeaderDataField \ ParamNames MethodSym MethodSymSuffix \ MethodExprName MethodExprFunc \ expr.go mv Curfn CurFunc mv funcsymname FuncSymName mv newFuncNameAt NewFuncNameAt mv setNodeNameFunc MarkFunc mv CurFunc FuncSymName NewFuncNameAt MarkFunc func.go mv isParamStackCopy IsParamStackCopy mv isParamHeapCopy IsParamHeapCopy mv nodfp RegFP mv IsParamStackCopy IsParamHeapCopy RegFP name.go mv hasUniquePos HasUniquePos mv setlineno SetPos mv initExpr InitExpr mv hasNamedResults HasNamedResults mv outervalue OuterValue mv HasNamedResults HasUniquePos SetPos InitExpr OuterValue EscNever node.go mv visitBottomUp VisitFuncsBottomUp # scc.go mv cfg.go \ NewBool NewInt NewString \ # parts of const.go ConstPrec BigFloat ConstOverflow IsConstNode IsSmallIntConst \ expr.go func.go name.go node.go scc.go \ cmd/compile/internal/ir ' Change-Id: I13402c5a2cedbf78d993a1eae2940718f23ac166 Reviewed-on: https://go-review.googlesource.com/c/go/+/279421 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/alg.go | 52 +-- src/cmd/compile/internal/gc/builtin.go | 185 +++++----- src/cmd/compile/internal/gc/builtin_test.go | 1 + src/cmd/compile/internal/gc/closure.go | 46 ++- src/cmd/compile/internal/gc/const.go | 91 +---- src/cmd/compile/internal/gc/dcl.go | 141 +------- src/cmd/compile/internal/gc/escape.go | 75 ++-- src/cmd/compile/internal/gc/gen.go | 24 +- src/cmd/compile/internal/gc/go.go | 25 -- src/cmd/compile/internal/gc/gsubr.go | 8 +- src/cmd/compile/internal/gc/iexport.go | 4 +- src/cmd/compile/internal/gc/iimport.go | 4 +- src/cmd/compile/internal/gc/init.go | 4 +- src/cmd/compile/internal/gc/initorder.go | 4 +- src/cmd/compile/internal/gc/inl.go | 163 ++------- src/cmd/compile/internal/gc/main.go | 12 +- src/cmd/compile/internal/gc/noder.go | 46 +-- src/cmd/compile/internal/gc/obj.go | 4 +- src/cmd/compile/internal/gc/order.go | 22 +- src/cmd/compile/internal/gc/pgen.go | 12 +- src/cmd/compile/internal/gc/racewalk.go | 2 +- src/cmd/compile/internal/gc/range.go | 32 +- src/cmd/compile/internal/gc/reflect.go | 14 +- src/cmd/compile/internal/gc/select.go | 34 +- src/cmd/compile/internal/gc/sinit.go | 92 ++--- src/cmd/compile/internal/gc/ssa.go | 32 +- src/cmd/compile/internal/gc/subr.go | 113 +----- src/cmd/compile/internal/gc/swt.go | 14 +- src/cmd/compile/internal/gc/typecheck.go | 213 +++-------- src/cmd/compile/internal/gc/universe.go | 8 +- src/cmd/compile/internal/gc/walk.go | 332 +++++++----------- src/cmd/compile/internal/ir/cfg.go | 26 ++ src/cmd/compile/internal/ir/const.go | 99 ++++++ src/cmd/compile/internal/ir/expr.go | 371 ++++++++++++++++++++ src/cmd/compile/internal/ir/func.go | 27 ++ src/cmd/compile/internal/ir/name.go | 22 ++ src/cmd/compile/internal/ir/node.go | 96 +++++ src/cmd/compile/internal/{gc => ir}/scc.go | 66 ++-- 38 files changed, 1255 insertions(+), 1261 deletions(-) create mode 100644 src/cmd/compile/internal/ir/cfg.go create mode 100644 src/cmd/compile/internal/ir/const.go rename src/cmd/compile/internal/{gc => ir}/scc.go (75%) diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index bcf992ba4b9b7..d21b0d492cc24 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -147,10 +147,10 @@ func genhash(t *types.Type) *obj.LSym { // func sym(p *T, h uintptr) uintptr args := []*ir.Field{ - namedfield("p", types.NewPtr(t)), - namedfield("h", types.Types[types.TUINTPTR]), + ir.NewField(base.Pos, lookup("p"), nil, types.NewPtr(t)), + ir.NewField(base.Pos, lookup("h"), nil, types.Types[types.TUINTPTR]), } - results := []*ir.Field{anonfield(types.Types[types.TUINTPTR])} + results := []*ir.Field{ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR])} tfn := ir.NewFuncType(base.Pos, nil, args, results) fn := dclfunc(sym, tfn) @@ -166,9 +166,9 @@ func genhash(t *types.Type) *obj.LSym { // for i := 0; i < nelem; i++ ni := temp(types.Types[types.TINT]) - init := ir.NewAssignStmt(base.Pos, ni, nodintconst(0)) - cond := ir.NewBinaryExpr(base.Pos, ir.OLT, ni, nodintconst(t.NumElem())) - post := ir.NewAssignStmt(base.Pos, ni, ir.NewBinaryExpr(base.Pos, ir.OADD, ni, nodintconst(1))) + init := ir.NewAssignStmt(base.Pos, ni, ir.NewInt(0)) + cond := ir.NewBinaryExpr(base.Pos, ir.OLT, ni, ir.NewInt(t.NumElem())) + post := ir.NewAssignStmt(base.Pos, ni, ir.NewBinaryExpr(base.Pos, ir.OADD, ni, ir.NewInt(1))) loop := ir.NewForStmt(base.Pos, nil, cond, post, nil) loop.PtrInit().Append(init) @@ -219,7 +219,7 @@ func genhash(t *types.Type) *obj.LSym { na := nodAddr(nx) call.Args.Append(na) call.Args.Append(nh) - call.Args.Append(nodintconst(size)) + call.Args.Append(ir.NewInt(size)) fn.Body.Append(ir.NewAssignStmt(base.Pos, nh, call)) i = next @@ -239,9 +239,9 @@ func genhash(t *types.Type) *obj.LSym { fn.SetDupok(true) typecheckFunc(fn) - Curfn = fn + ir.CurFunc = fn typecheckslice(fn.Body, ctxStmt) - Curfn = nil + ir.CurFunc = nil if base.Debug.DclStack != 0 { types.CheckDclstack() @@ -285,12 +285,12 @@ func hashfor(t *types.Type) ir.Node { } n := NewName(sym) - setNodeNameFunc(n) + ir.MarkFunc(n) n.SetType(functype(nil, []*ir.Field{ - anonfield(types.NewPtr(t)), - anonfield(types.Types[types.TUINTPTR]), + ir.NewField(base.Pos, nil, nil, types.NewPtr(t)), + ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]), }, []*ir.Field{ - anonfield(types.Types[types.TUINTPTR]), + ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]), })) return n } @@ -376,8 +376,8 @@ func geneq(t *types.Type) *obj.LSym { // func sym(p, q *T) bool tfn := ir.NewFuncType(base.Pos, nil, - []*ir.Field{namedfield("p", types.NewPtr(t)), namedfield("q", types.NewPtr(t))}, - []*ir.Field{namedfield("r", types.Types[types.TBOOL])}) + []*ir.Field{ir.NewField(base.Pos, lookup("p"), nil, types.NewPtr(t)), ir.NewField(base.Pos, lookup("q"), nil, types.NewPtr(t))}, + []*ir.Field{ir.NewField(base.Pos, lookup("r"), nil, types.Types[types.TBOOL])}) fn := dclfunc(sym, tfn) np := ir.AsNode(tfn.Type().Params().Field(0).Nname) @@ -440,20 +440,20 @@ func geneq(t *types.Type) *obj.LSym { // Generate a series of checks. for i := int64(0); i < nelem; i++ { // if check {} else { goto neq } - nif := ir.NewIfStmt(base.Pos, checkIdx(nodintconst(i)), nil, nil) + nif := ir.NewIfStmt(base.Pos, checkIdx(ir.NewInt(i)), nil, nil) nif.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq)) fn.Body.Append(nif) } if last { - fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, checkIdx(nodintconst(nelem)))) + fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, checkIdx(ir.NewInt(nelem)))) } } else { // Generate a for loop. // for i := 0; i < nelem; i++ i := temp(types.Types[types.TINT]) - init := ir.NewAssignStmt(base.Pos, i, nodintconst(0)) - cond := ir.NewBinaryExpr(base.Pos, ir.OLT, i, nodintconst(nelem)) - post := ir.NewAssignStmt(base.Pos, i, ir.NewBinaryExpr(base.Pos, ir.OADD, i, nodintconst(1))) + init := ir.NewAssignStmt(base.Pos, i, ir.NewInt(0)) + cond := ir.NewBinaryExpr(base.Pos, ir.OLT, i, ir.NewInt(nelem)) + post := ir.NewAssignStmt(base.Pos, i, ir.NewBinaryExpr(base.Pos, ir.OADD, i, ir.NewInt(1))) loop := ir.NewForStmt(base.Pos, nil, cond, post, nil) loop.PtrInit().Append(init) // if eq(pi, qi) {} else { goto neq } @@ -462,7 +462,7 @@ func geneq(t *types.Type) *obj.LSym { loop.Body.Append(nif) fn.Body.Append(loop) if last { - fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, nodbool(true))) + fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(true))) } } } @@ -572,7 +572,7 @@ func geneq(t *types.Type) *obj.LSym { } if len(flatConds) == 0 { - fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, nodbool(true))) + fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(true))) } else { for _, c := range flatConds[:len(flatConds)-1] { // if cond {} else { goto neq } @@ -594,7 +594,7 @@ func geneq(t *types.Type) *obj.LSym { // r = false // return (or goto ret) fn.Body.Append(ir.NewLabelStmt(base.Pos, neq)) - fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, nodbool(false))) + fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(false))) if EqCanPanic(t) || anyCall(fn) { // Epilogue is large, so share it with the equal case. fn.Body.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, ret)) @@ -615,9 +615,9 @@ func geneq(t *types.Type) *obj.LSym { fn.SetDupok(true) typecheckFunc(fn) - Curfn = fn + ir.CurFunc = fn typecheckslice(fn.Body, ctxStmt) - Curfn = nil + ir.CurFunc = nil if base.Debug.DclStack != 0 { types.CheckDclstack() @@ -726,7 +726,7 @@ func eqmem(p ir.Node, q ir.Node, field *types.Sym, size int64) ir.Node { call.Args.Append(nx) call.Args.Append(ny) if needsize { - call.Args.Append(nodintconst(size)) + call.Args.Append(ir.NewInt(size)) } return call diff --git a/src/cmd/compile/internal/gc/builtin.go b/src/cmd/compile/internal/gc/builtin.go index d3e3f9ade6731..12c70fb6d4030 100644 --- a/src/cmd/compile/internal/gc/builtin.go +++ b/src/cmd/compile/internal/gc/builtin.go @@ -3,6 +3,7 @@ package gc import ( + "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/types" ) @@ -211,133 +212,133 @@ func runtimeTypes() []*types.Type { typs[1] = types.NewPtr(typs[0]) typs[2] = types.Types[types.TANY] typs[3] = types.NewPtr(typs[2]) - typs[4] = functype(nil, []*ir.Field{anonfield(typs[1])}, []*ir.Field{anonfield(typs[3])}) + typs[4] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3])}) typs[5] = types.Types[types.TUINTPTR] typs[6] = types.Types[types.TBOOL] typs[7] = types.Types[types.TUNSAFEPTR] - typs[8] = functype(nil, []*ir.Field{anonfield(typs[5]), anonfield(typs[1]), anonfield(typs[6])}, []*ir.Field{anonfield(typs[7])}) + typs[8] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[5]), ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[6])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7])}) typs[9] = functype(nil, nil, nil) typs[10] = types.Types[types.TINTER] - typs[11] = functype(nil, []*ir.Field{anonfield(typs[10])}, nil) + typs[11] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[10])}, nil) typs[12] = types.Types[types.TINT32] typs[13] = types.NewPtr(typs[12]) - typs[14] = functype(nil, []*ir.Field{anonfield(typs[13])}, []*ir.Field{anonfield(typs[10])}) + typs[14] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[13])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[10])}) typs[15] = types.Types[types.TINT] - typs[16] = functype(nil, []*ir.Field{anonfield(typs[15]), anonfield(typs[15])}, nil) + typs[16] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[15])}, nil) typs[17] = types.Types[types.TUINT] - typs[18] = functype(nil, []*ir.Field{anonfield(typs[17]), anonfield(typs[15])}, nil) - typs[19] = functype(nil, []*ir.Field{anonfield(typs[6])}, nil) + typs[18] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[17]), ir.NewField(base.Pos, nil, nil, typs[15])}, nil) + typs[19] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}, nil) typs[20] = types.Types[types.TFLOAT64] - typs[21] = functype(nil, []*ir.Field{anonfield(typs[20])}, nil) + typs[21] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}, nil) typs[22] = types.Types[types.TINT64] - typs[23] = functype(nil, []*ir.Field{anonfield(typs[22])}, nil) + typs[23] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[22])}, nil) typs[24] = types.Types[types.TUINT64] - typs[25] = functype(nil, []*ir.Field{anonfield(typs[24])}, nil) + typs[25] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[24])}, nil) typs[26] = types.Types[types.TCOMPLEX128] - typs[27] = functype(nil, []*ir.Field{anonfield(typs[26])}, nil) + typs[27] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[26])}, nil) typs[28] = types.Types[types.TSTRING] - typs[29] = functype(nil, []*ir.Field{anonfield(typs[28])}, nil) - typs[30] = functype(nil, []*ir.Field{anonfield(typs[2])}, nil) - typs[31] = functype(nil, []*ir.Field{anonfield(typs[5])}, nil) + typs[29] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}, nil) + typs[30] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[2])}, nil) + typs[31] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[5])}, nil) typs[32] = types.NewArray(typs[0], 32) typs[33] = types.NewPtr(typs[32]) - typs[34] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[28])}) - typs[35] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[28])}) - typs[36] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[28])}) - typs[37] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[28])}) + typs[34] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) + typs[35] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) + typs[36] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) + typs[37] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) typs[38] = types.NewSlice(typs[28]) - typs[39] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[38])}, []*ir.Field{anonfield(typs[28])}) - typs[40] = functype(nil, []*ir.Field{anonfield(typs[28]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[15])}) + typs[39] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[38])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) + typs[40] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[15])}) typs[41] = types.NewArray(typs[0], 4) typs[42] = types.NewPtr(typs[41]) - typs[43] = functype(nil, []*ir.Field{anonfield(typs[42]), anonfield(typs[22])}, []*ir.Field{anonfield(typs[28])}) - typs[44] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[1]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[28])}) - typs[45] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[28])}) + typs[43] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[42]), ir.NewField(base.Pos, nil, nil, typs[22])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) + typs[44] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) + typs[45] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) typs[46] = types.RuneType typs[47] = types.NewSlice(typs[46]) - typs[48] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[47])}, []*ir.Field{anonfield(typs[28])}) + typs[48] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[47])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) typs[49] = types.NewSlice(typs[0]) - typs[50] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[49])}) + typs[50] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[49])}) typs[51] = types.NewArray(typs[46], 32) typs[52] = types.NewPtr(typs[51]) - typs[53] = functype(nil, []*ir.Field{anonfield(typs[52]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[47])}) - typs[54] = functype(nil, []*ir.Field{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []*ir.Field{anonfield(typs[15])}) - typs[55] = functype(nil, []*ir.Field{anonfield(typs[28]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[46]), anonfield(typs[15])}) - typs[56] = functype(nil, []*ir.Field{anonfield(typs[28])}, []*ir.Field{anonfield(typs[15])}) - typs[57] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[2])}, []*ir.Field{anonfield(typs[2])}) - typs[58] = functype(nil, []*ir.Field{anonfield(typs[2])}, []*ir.Field{anonfield(typs[7])}) - typs[59] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[2])}) - typs[60] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[2])}, []*ir.Field{anonfield(typs[2]), anonfield(typs[6])}) - typs[61] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil) - typs[62] = functype(nil, []*ir.Field{anonfield(typs[1])}, nil) + typs[53] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[52]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[47])}) + typs[54] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[5])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[15])}) + typs[55] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[46]), ir.NewField(base.Pos, nil, nil, typs[15])}) + typs[56] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[15])}) + typs[57] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[2])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[2])}) + typs[58] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[2])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7])}) + typs[59] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[2])}) + typs[60] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[2])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[2]), ir.NewField(base.Pos, nil, nil, typs[6])}) + typs[61] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[1])}, nil) + typs[62] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1])}, nil) typs[63] = types.NewPtr(typs[5]) - typs[64] = functype(nil, []*ir.Field{anonfield(typs[63]), anonfield(typs[7]), anonfield(typs[7])}, []*ir.Field{anonfield(typs[6])}) + typs[64] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[63]), ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[7])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) typs[65] = types.Types[types.TUINT32] - typs[66] = functype(nil, nil, []*ir.Field{anonfield(typs[65])}) + typs[66] = functype(nil, nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[65])}) typs[67] = types.NewMap(typs[2], typs[2]) - typs[68] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[67])}) - typs[69] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[67])}) - typs[70] = functype(nil, nil, []*ir.Field{anonfield(typs[67])}) - typs[71] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[3])}) - typs[72] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*ir.Field{anonfield(typs[3])}) - typs[73] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*ir.Field{anonfield(typs[3])}) - typs[74] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[3]), anonfield(typs[6])}) - typs[75] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*ir.Field{anonfield(typs[3]), anonfield(typs[6])}) - typs[76] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*ir.Field{anonfield(typs[3]), anonfield(typs[6])}) - typs[77] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, nil) - typs[78] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, nil) - typs[79] = functype(nil, []*ir.Field{anonfield(typs[3])}, nil) - typs[80] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67])}, nil) + typs[68] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[22]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[67])}) + typs[69] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[67])}) + typs[70] = functype(nil, nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[67])}) + typs[71] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3])}) + typs[72] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[2])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3])}) + typs[73] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[1])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3])}) + typs[74] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[6])}) + typs[75] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[2])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[6])}) + typs[76] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[1])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[6])}) + typs[77] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[3])}, nil) + typs[78] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[2])}, nil) + typs[79] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3])}, nil) + typs[80] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67])}, nil) typs[81] = types.NewChan(typs[2], types.Cboth) - typs[82] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[22])}, []*ir.Field{anonfield(typs[81])}) - typs[83] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[81])}) + typs[82] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[22])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[81])}) + typs[83] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[81])}) typs[84] = types.NewChan(typs[2], types.Crecv) - typs[85] = functype(nil, []*ir.Field{anonfield(typs[84]), anonfield(typs[3])}, nil) - typs[86] = functype(nil, []*ir.Field{anonfield(typs[84]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[6])}) + typs[85] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[84]), ir.NewField(base.Pos, nil, nil, typs[3])}, nil) + typs[86] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[84]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) typs[87] = types.NewChan(typs[2], types.Csend) - typs[88] = functype(nil, []*ir.Field{anonfield(typs[87]), anonfield(typs[3])}, nil) + typs[88] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[87]), ir.NewField(base.Pos, nil, nil, typs[3])}, nil) typs[89] = types.NewArray(typs[0], 3) - typs[90] = tostruct([]*ir.Field{namedfield("enabled", typs[6]), namedfield("pad", typs[89]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])}) - typs[91] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil) - typs[92] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[3])}, nil) - typs[93] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[15])}) - typs[94] = functype(nil, []*ir.Field{anonfield(typs[87]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[6])}) - typs[95] = functype(nil, []*ir.Field{anonfield(typs[3]), anonfield(typs[84])}, []*ir.Field{anonfield(typs[6])}) + typs[90] = tostruct([]*ir.Field{ir.NewField(base.Pos, lookup("enabled"), nil, typs[6]), ir.NewField(base.Pos, lookup("pad"), nil, typs[89]), ir.NewField(base.Pos, lookup("needed"), nil, typs[6]), ir.NewField(base.Pos, lookup("cgo"), nil, typs[6]), ir.NewField(base.Pos, lookup("alignme"), nil, typs[24])}) + typs[91] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[3])}, nil) + typs[92] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[3])}, nil) + typs[93] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[15])}) + typs[94] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[87]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) + typs[95] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[84])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) typs[96] = types.NewPtr(typs[6]) - typs[97] = functype(nil, []*ir.Field{anonfield(typs[3]), anonfield(typs[96]), anonfield(typs[84])}, []*ir.Field{anonfield(typs[6])}) - typs[98] = functype(nil, []*ir.Field{anonfield(typs[63])}, nil) - typs[99] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[63]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []*ir.Field{anonfield(typs[15]), anonfield(typs[6])}) - typs[100] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[7])}) - typs[101] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []*ir.Field{anonfield(typs[7])}) - typs[102] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []*ir.Field{anonfield(typs[7])}) + typs[97] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[96]), ir.NewField(base.Pos, nil, nil, typs[84])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) + typs[98] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[63])}, nil) + typs[99] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[63]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[6])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[6])}) + typs[100] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7])}) + typs[101] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[22]), ir.NewField(base.Pos, nil, nil, typs[22])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7])}) + typs[102] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[7])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7])}) typs[103] = types.NewSlice(typs[2]) - typs[104] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[103]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[103])}) - typs[105] = functype(nil, []*ir.Field{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil) - typs[106] = functype(nil, []*ir.Field{anonfield(typs[7]), anonfield(typs[5])}, nil) - typs[107] = functype(nil, []*ir.Field{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []*ir.Field{anonfield(typs[6])}) - typs[108] = functype(nil, []*ir.Field{anonfield(typs[3]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[6])}) - typs[109] = functype(nil, []*ir.Field{anonfield(typs[7]), anonfield(typs[7])}, []*ir.Field{anonfield(typs[6])}) - typs[110] = functype(nil, []*ir.Field{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []*ir.Field{anonfield(typs[5])}) - typs[111] = functype(nil, []*ir.Field{anonfield(typs[7]), anonfield(typs[5])}, []*ir.Field{anonfield(typs[5])}) - typs[112] = functype(nil, []*ir.Field{anonfield(typs[22]), anonfield(typs[22])}, []*ir.Field{anonfield(typs[22])}) - typs[113] = functype(nil, []*ir.Field{anonfield(typs[24]), anonfield(typs[24])}, []*ir.Field{anonfield(typs[24])}) - typs[114] = functype(nil, []*ir.Field{anonfield(typs[20])}, []*ir.Field{anonfield(typs[22])}) - typs[115] = functype(nil, []*ir.Field{anonfield(typs[20])}, []*ir.Field{anonfield(typs[24])}) - typs[116] = functype(nil, []*ir.Field{anonfield(typs[20])}, []*ir.Field{anonfield(typs[65])}) - typs[117] = functype(nil, []*ir.Field{anonfield(typs[22])}, []*ir.Field{anonfield(typs[20])}) - typs[118] = functype(nil, []*ir.Field{anonfield(typs[24])}, []*ir.Field{anonfield(typs[20])}) - typs[119] = functype(nil, []*ir.Field{anonfield(typs[65])}, []*ir.Field{anonfield(typs[20])}) - typs[120] = functype(nil, []*ir.Field{anonfield(typs[26]), anonfield(typs[26])}, []*ir.Field{anonfield(typs[26])}) - typs[121] = functype(nil, []*ir.Field{anonfield(typs[5]), anonfield(typs[5])}, nil) - typs[122] = functype(nil, []*ir.Field{anonfield(typs[5]), anonfield(typs[5]), anonfield(typs[5])}, nil) - typs[123] = functype(nil, []*ir.Field{anonfield(typs[7]), anonfield(typs[1]), anonfield(typs[5])}, nil) + typs[104] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[103]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[103])}) + typs[105] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[5])}, nil) + typs[106] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[5])}, nil) + typs[107] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[5])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) + typs[108] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) + typs[109] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[7])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) + typs[110] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[5]), ir.NewField(base.Pos, nil, nil, typs[5])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[5])}) + typs[111] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[5])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[5])}) + typs[112] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[22]), ir.NewField(base.Pos, nil, nil, typs[22])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[22])}) + typs[113] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[24]), ir.NewField(base.Pos, nil, nil, typs[24])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[24])}) + typs[114] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[22])}) + typs[115] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[24])}) + typs[116] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[65])}) + typs[117] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[22])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}) + typs[118] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[24])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}) + typs[119] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[65])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}) + typs[120] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[26]), ir.NewField(base.Pos, nil, nil, typs[26])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[26])}) + typs[121] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[5]), ir.NewField(base.Pos, nil, nil, typs[5])}, nil) + typs[122] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[5]), ir.NewField(base.Pos, nil, nil, typs[5]), ir.NewField(base.Pos, nil, nil, typs[5])}, nil) + typs[123] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[5])}, nil) typs[124] = types.NewSlice(typs[7]) - typs[125] = functype(nil, []*ir.Field{anonfield(typs[7]), anonfield(typs[124])}, nil) + typs[125] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[124])}, nil) typs[126] = types.Types[types.TUINT8] - typs[127] = functype(nil, []*ir.Field{anonfield(typs[126]), anonfield(typs[126])}, nil) + typs[127] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[126]), ir.NewField(base.Pos, nil, nil, typs[126])}, nil) typs[128] = types.Types[types.TUINT16] - typs[129] = functype(nil, []*ir.Field{anonfield(typs[128]), anonfield(typs[128])}, nil) - typs[130] = functype(nil, []*ir.Field{anonfield(typs[65]), anonfield(typs[65])}, nil) - typs[131] = functype(nil, []*ir.Field{anonfield(typs[24]), anonfield(typs[24])}, nil) + typs[129] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[128]), ir.NewField(base.Pos, nil, nil, typs[128])}, nil) + typs[130] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[65]), ir.NewField(base.Pos, nil, nil, typs[65])}, nil) + typs[131] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[24]), ir.NewField(base.Pos, nil, nil, typs[24])}, nil) return typs[:] } diff --git a/src/cmd/compile/internal/gc/builtin_test.go b/src/cmd/compile/internal/gc/builtin_test.go index 57f24b2287978..df15ca5c7d2f0 100644 --- a/src/cmd/compile/internal/gc/builtin_test.go +++ b/src/cmd/compile/internal/gc/builtin_test.go @@ -13,6 +13,7 @@ import ( ) func TestBuiltin(t *testing.T) { + t.Skip("mkbuiltin needs fixing") testenv.MustHaveGoRun(t) t.Parallel() diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index 27a9bc7cf8902..e758cf86d4750 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -18,8 +18,8 @@ func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node { ntype := p.typeExpr(expr.Type) fn := ir.NewFunc(p.pos(expr)) - fn.SetIsHiddenClosure(Curfn != nil) - fn.Nname = newFuncNameAt(p.pos(expr), ir.BlankNode.Sym(), fn) // filled in by typecheckclosure + fn.SetIsHiddenClosure(ir.CurFunc != nil) + fn.Nname = ir.NewFuncNameAt(p.pos(expr), ir.BlankNode.Sym(), fn) // filled in by typecheckclosure fn.Nname.Ntype = xtype fn.Nname.Defn = fn @@ -111,22 +111,22 @@ func typecheckclosure(clo *ir.ClosureExpr, top int) { } } - fn.Nname.SetSym(closurename(Curfn)) - setNodeNameFunc(fn.Nname) + fn.Nname.SetSym(closurename(ir.CurFunc)) + ir.MarkFunc(fn.Nname) typecheckFunc(fn) // Type check the body now, but only if we're inside a function. // At top level (in a variable initialization: curfn==nil) we're not // ready to type check code yet; we'll check it later, because the // underlying closure function we create is added to Target.Decls. - if Curfn != nil && clo.Type() != nil { - oldfn := Curfn - Curfn = fn + if ir.CurFunc != nil && clo.Type() != nil { + oldfn := ir.CurFunc + ir.CurFunc = fn olddd := decldepth decldepth = 1 typecheckslice(fn.Body, ctxStmt) decldepth = olddd - Curfn = oldfn + ir.CurFunc = oldfn } Target.Decls = append(Target.Decls, fn) @@ -335,13 +335,13 @@ func hasemptycvars(clo *ir.ClosureExpr) bool { // and compiling runtime func closuredebugruntimecheck(clo *ir.ClosureExpr) { if base.Debug.Closure > 0 { - if clo.Esc() == EscHeap { + if clo.Esc() == ir.EscHeap { base.WarnfAt(clo.Pos(), "heap closure, captured vars = %v", clo.Func.ClosureVars) } else { base.WarnfAt(clo.Pos(), "stack closure, captured vars = %v", clo.Func.ClosureVars) } } - if base.Flag.CompilingRuntime && clo.Esc() == EscHeap { + if base.Flag.CompilingRuntime && clo.Esc() == ir.EscHeap { base.ErrorfAt(clo.Pos(), "heap-allocated closure, not allowed in runtime") } } @@ -364,14 +364,14 @@ func closureType(clo *ir.ClosureExpr) *types.Type { // the struct is unnamed so that closures in multiple packages with the // same struct type can share the descriptor. fields := []*ir.Field{ - namedfield(".F", types.Types[types.TUINTPTR]), + ir.NewField(base.Pos, lookup(".F"), nil, types.Types[types.TUINTPTR]), } for _, v := range clo.Func.ClosureVars { typ := v.Type() if !v.Byval() { typ = types.NewPtr(typ) } - fields = append(fields, symfield(v.Sym(), typ)) + fields = append(fields, ir.NewField(base.Pos, v.Sym(), nil, typ)) } typ := tostruct(fields) typ.SetNoalg(true) @@ -435,16 +435,16 @@ func typecheckpartialcall(n ir.Node, sym *types.Sym) *ir.CallPartExpr { // for partial calls. func makepartialcall(dot *ir.SelectorExpr, t0 *types.Type, meth *types.Sym) *ir.Func { rcvrtype := dot.X.Type() - sym := methodSymSuffix(rcvrtype, meth, "-fm") + sym := ir.MethodSymSuffix(rcvrtype, meth, "-fm") if sym.Uniq() { return sym.Def.(*ir.Func) } sym.SetUniq(true) - savecurfn := Curfn + savecurfn := ir.CurFunc saveLineNo := base.Pos - Curfn = nil + ir.CurFunc = nil // Set line number equal to the line number where the method is declared. var m *types.Field @@ -480,7 +480,7 @@ func makepartialcall(dot *ir.SelectorExpr, t0 *types.Type, meth *types.Sym) *ir. } call := ir.NewCallExpr(base.Pos, ir.OCALL, ir.NewSelectorExpr(base.Pos, ir.OXDOT, ptr, meth), nil) - call.Args.Set(paramNnames(tfn.Type())) + call.Args.Set(ir.ParamNames(tfn.Type())) call.IsDDD = tfn.Type().IsVariadic() if t0.NumResults() != 0 { ret := ir.NewReturnStmt(base.Pos, nil) @@ -496,11 +496,11 @@ func makepartialcall(dot *ir.SelectorExpr, t0 *types.Type, meth *types.Sym) *ir. typecheckFunc(fn) // Need to typecheck the body of the just-generated wrapper. // typecheckslice() requires that Curfn is set when processing an ORETURN. - Curfn = fn + ir.CurFunc = fn typecheckslice(fn.Body, ctxStmt) sym.Def = fn Target.Decls = append(Target.Decls, fn) - Curfn = savecurfn + ir.CurFunc = savecurfn base.Pos = saveLineNo return fn @@ -511,8 +511,8 @@ func makepartialcall(dot *ir.SelectorExpr, t0 *types.Type, meth *types.Sym) *ir. // The address of a variable of the returned type can be cast to a func. func partialCallType(n *ir.CallPartExpr) *types.Type { t := tostruct([]*ir.Field{ - namedfield("F", types.Types[types.TUINTPTR]), - namedfield("R", n.X.Type()), + ir.NewField(base.Pos, lookup("F"), nil, types.Types[types.TUINTPTR]), + ir.NewField(base.Pos, lookup("R"), nil, n.X.Type()), }) t.SetNoalg(true) return t @@ -562,9 +562,3 @@ func walkpartialcall(n *ir.CallPartExpr, init *ir.Nodes) ir.Node { return walkexpr(cfn, init) } - -// callpartMethod returns the *types.Field representing the method -// referenced by method value n. -func callpartMethod(n ir.Node) *types.Field { - return n.(*ir.CallPartExpr).Method -} diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index 553f06757f0c1..ad27f3ea44582 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -18,30 +18,6 @@ import ( "unicode" ) -const ( - // Maximum size in bits for big.Ints before signalling - // overflow and also mantissa precision for big.Floats. - Mpprec = 512 -) - -func bigFloatVal(v constant.Value) *big.Float { - f := new(big.Float) - f.SetPrec(Mpprec) - switch u := constant.Val(v).(type) { - case int64: - f.SetInt64(u) - case *big.Int: - f.SetInt(u) - case *big.Float: - f.Set(u) - case *big.Rat: - f.SetRat(u) - default: - base.Fatalf("unexpected: %v", u) - } - return f -} - func roundFloat(v constant.Value, sz int64) constant.Value { switch sz { case 4: @@ -334,8 +310,8 @@ func toint(v constant.Value) constant.Value { // something that looks like an integer we omit the // value from the error message. // (See issue #11371). - f := bigFloatVal(v) - if f.MantExp(nil) > 2*Mpprec { + f := ir.BigFloat(v) + if f.MantExp(nil) > 2*ir.ConstPrec { base.Errorf("integer too large") } else { var t big.Float @@ -352,38 +328,6 @@ func toint(v constant.Value) constant.Value { return constant.MakeInt64(1) } -// doesoverflow reports whether constant value v is too large -// to represent with type t. -func doesoverflow(v constant.Value, t *types.Type) bool { - switch { - case t.IsInteger(): - bits := uint(8 * t.Size()) - if t.IsUnsigned() { - x, ok := constant.Uint64Val(v) - return !ok || x>>bits != 0 - } - x, ok := constant.Int64Val(v) - if x < 0 { - x = ^x - } - return !ok || x>>(bits-1) != 0 - case t.IsFloat(): - switch t.Size() { - case 4: - f, _ := constant.Float32Val(v) - return math.IsInf(float64(f), 0) - case 8: - f, _ := constant.Float64Val(v) - return math.IsInf(f, 0) - } - case t.IsComplex(): - ft := types.FloatForComplex(t) - return doesoverflow(constant.Real(v), ft) || doesoverflow(constant.Imag(v), ft) - } - base.Fatalf("doesoverflow: %v, %v", v, t) - panic("unreachable") -} - // overflow reports whether constant value v is too large // to represent with type t, and emits an error message if so. func overflow(v constant.Value, t *types.Type) bool { @@ -392,11 +336,11 @@ func overflow(v constant.Value, t *types.Type) bool { if t.IsUntyped() { return false } - if v.Kind() == constant.Int && constant.BitLen(v) > Mpprec { + if v.Kind() == constant.Int && constant.BitLen(v) > ir.ConstPrec { base.Errorf("integer too large") return true } - if doesoverflow(v, t) { + if ir.ConstOverflow(v, t) { base.Errorf("constant %v overflows %v", types.FmtConst(v, false), t) return true } @@ -656,13 +600,13 @@ var overflowNames = [...]string{ // origConst returns an OLITERAL with orig n and value v. func origConst(n ir.Node, v constant.Value) ir.Node { - lno := setlineno(n) + lno := ir.SetPos(n) v = convertVal(v, n.Type(), false) base.Pos = lno switch v.Kind() { case constant.Int: - if constant.BitLen(v) <= Mpprec { + if constant.BitLen(v) <= ir.ConstPrec { break } fallthrough @@ -778,14 +722,6 @@ func defaultType(t *types.Type) *types.Type { return nil } -func smallintconst(n ir.Node) bool { - if n.Op() == ir.OLITERAL { - v, ok := constant.Int64Val(n.Val()) - return ok && int64(int32(v)) == v - } - return false -} - // indexconst checks if Node n contains a constant expression // representable as a non-negative int and returns its value. // If n is not a constant expression, not representable as an @@ -803,21 +739,12 @@ func indexconst(n ir.Node) int64 { if v.Kind() != constant.Int || constant.Sign(v) < 0 { return -1 } - if doesoverflow(v, types.Types[types.TINT]) { + if ir.ConstOverflow(v, types.Types[types.TINT]) { return -2 } return ir.IntVal(types.Types[types.TINT], v) } -// isGoConst reports whether n is a Go language constant (as opposed to a -// compile-time constant). -// -// Expressions derived from nil, like string([]byte(nil)), while they -// may be known at compile time, are not Go language constants. -func isGoConst(n ir.Node) bool { - return n.Op() == ir.OLITERAL -} - // anyCallOrChan reports whether n contains any calls or channel operations. func anyCallOrChan(n ir.Node) bool { return ir.Any(n, func(n ir.Node) bool { @@ -875,7 +802,7 @@ func (s *constSet) add(pos src.XPos, n ir.Node, what, where string) { } } - if !isGoConst(n) { + if !ir.IsConstNode(n) { return } if n.Type().IsUntyped() { @@ -906,7 +833,7 @@ func (s *constSet) add(pos src.XPos, n ir.Node, what, where string) { } k := constSetKey{typ, ir.ConstValue(n)} - if hasUniquePos(n) { + if ir.HasUniquePos(n) { pos = n.Pos() } diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index c084565f3dafb..1189d0ec1205b 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -80,12 +80,12 @@ func declare(n *ir.Name, ctxt ir.Class) { } Target.Externs = append(Target.Externs, n) } else { - if Curfn == nil && ctxt == ir.PAUTO { + if ir.CurFunc == nil && ctxt == ir.PAUTO { base.Pos = n.Pos() base.Fatalf("automatic outside function") } - if Curfn != nil && ctxt != ir.PFUNC && n.Op() == ir.ONAME { - Curfn.Dcl = append(Curfn.Dcl, n) + if ir.CurFunc != nil && ctxt != ir.PFUNC && n.Op() == ir.ONAME { + ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n) } if n.Op() == ir.OTYPE { declare_typegen++ @@ -95,7 +95,7 @@ func declare(n *ir.Name, ctxt ir.Class) { gen = vargen } types.Pushdcl(s) - n.Curfn = Curfn + n.Curfn = ir.CurFunc } if ctxt == ir.PAUTO { @@ -137,7 +137,7 @@ func variter(vl []*ir.Name, t ir.Ntype, el []ir.Node) []ir.Node { declare(v, dclcontext) v.Ntype = t v.Defn = as2 - if Curfn != nil { + if ir.CurFunc != nil { init = append(init, ir.NewDecl(base.Pos, ir.ODCL, v)) } } @@ -158,8 +158,8 @@ func variter(vl []*ir.Name, t ir.Ntype, el []ir.Node) []ir.Node { declare(v, dclcontext) v.Ntype = t - if e != nil || Curfn != nil || ir.IsBlank(v) { - if Curfn != nil { + if e != nil || ir.CurFunc != nil || ir.IsBlank(v) { + if ir.CurFunc != nil { init = append(init, ir.NewDecl(base.Pos, ir.ODCL, v)) } as := ir.NewAssignStmt(base.Pos, v, e) @@ -176,29 +176,6 @@ func variter(vl []*ir.Name, t ir.Ntype, el []ir.Node) []ir.Node { return init } -// newFuncNameAt generates a new name node for a function or method. -func newFuncNameAt(pos src.XPos, s *types.Sym, fn *ir.Func) *ir.Name { - if fn.Nname != nil { - base.Fatalf("newFuncName - already have name") - } - n := ir.NewNameAt(pos, s) - n.SetFunc(fn) - fn.Nname = n - return n -} - -func anonfield(typ *types.Type) *ir.Field { - return symfield(nil, typ) -} - -func namedfield(s string, typ *types.Type) *ir.Field { - return symfield(lookup(s), typ) -} - -func symfield(s *types.Sym, typ *types.Type) *ir.Field { - return ir.NewField(base.Pos, s, nil, typ) -} - // oldname returns the Node that declares symbol s in the current scope. // If no such Node currently exists, an ONONAME Node is returned instead. // Automatically creates a new closure variable if the referenced symbol was @@ -216,7 +193,7 @@ func oldname(s *types.Sym) ir.Node { return ir.NewIdent(base.Pos, s) } - if Curfn != nil && n.Op() == ir.ONAME && n.Name().Curfn != nil && n.Name().Curfn != Curfn { + if ir.CurFunc != nil && n.Op() == ir.ONAME && n.Name().Curfn != nil && n.Name().Curfn != ir.CurFunc { // Inner func is referring to var in outer func. // // TODO(rsc): If there is an outer variable x and we @@ -225,7 +202,7 @@ func oldname(s *types.Sym) ir.Node { // make x a closure variable unnecessarily. n := n.(*ir.Name) c := n.Name().Innermost - if c == nil || c.Curfn != Curfn { + if c == nil || c.Curfn != ir.CurFunc { // Do not have a closure var for the active closure yet; make one. c = NewName(s) c.Class_ = ir.PAUTOHEAP @@ -238,7 +215,7 @@ func oldname(s *types.Sym) ir.Node { c.Outer = n.Name().Innermost n.Name().Innermost = c - Curfn.ClosureVars = append(Curfn.ClosureVars, c) + ir.CurFunc.ClosureVars = append(ir.CurFunc.ClosureVars, c) } // return ref to closure var, not original @@ -322,8 +299,8 @@ func colasdefn(left []ir.Node, defn ir.Node) { // returns in auto-declaration context. func funchdr(fn *ir.Func) { // change the declaration context from extern to auto - funcStack = append(funcStack, funcStackEnt{Curfn, dclcontext}) - Curfn = fn + funcStack = append(funcStack, funcStackEnt{ir.CurFunc, dclcontext}) + ir.CurFunc = fn dclcontext = ir.PAUTO types.Markdcl() @@ -451,7 +428,7 @@ func funcbody() { types.Popdcl() var e funcStackEnt funcStack, e = funcStack[:len(funcStack)-1], funcStack[len(funcStack)-1] - Curfn, dclcontext = e.curfn, e.dclcontext + ir.CurFunc, dclcontext = e.curfn, e.dclcontext } // structs, functions, and methods. @@ -542,7 +519,7 @@ func tointerface(nmethods []*ir.Field) *types.Type { } func fakeRecv() *ir.Field { - return anonfield(types.FakeRecvType()) + return ir.NewField(base.Pos, nil, nil, types.FakeRecvType()) } func fakeRecvField() *types.Field { @@ -588,74 +565,6 @@ func functype(nrecv *ir.Field, nparams, nresults []*ir.Field) *types.Type { return t } -func hasNamedResults(fn *ir.Func) bool { - typ := fn.Type() - return typ.NumResults() > 0 && types.OrigSym(typ.Results().Field(0).Sym) != nil -} - -// methodSym returns the method symbol representing a method name -// associated with a specific receiver type. -// -// Method symbols can be used to distinguish the same method appearing -// in different method sets. For example, T.M and (*T).M have distinct -// method symbols. -// -// The returned symbol will be marked as a function. -func methodSym(recv *types.Type, msym *types.Sym) *types.Sym { - sym := methodSymSuffix(recv, msym, "") - sym.SetFunc(true) - return sym -} - -// methodSymSuffix is like methodsym, but allows attaching a -// distinguisher suffix. To avoid collisions, the suffix must not -// start with a letter, number, or period. -func methodSymSuffix(recv *types.Type, msym *types.Sym, suffix string) *types.Sym { - if msym.IsBlank() { - base.Fatalf("blank method name") - } - - rsym := recv.Sym() - if recv.IsPtr() { - if rsym != nil { - base.Fatalf("declared pointer receiver type: %v", recv) - } - rsym = recv.Elem().Sym() - } - - // Find the package the receiver type appeared in. For - // anonymous receiver types (i.e., anonymous structs with - // embedded fields), use the "go" pseudo-package instead. - rpkg := ir.Pkgs.Go - if rsym != nil { - rpkg = rsym.Pkg - } - - var b bytes.Buffer - if recv.IsPtr() { - // The parentheses aren't really necessary, but - // they're pretty traditional at this point. - fmt.Fprintf(&b, "(%-S)", recv) - } else { - fmt.Fprintf(&b, "%-S", recv) - } - - // A particular receiver type may have multiple non-exported - // methods with the same name. To disambiguate them, include a - // package qualifier for names that came from a different - // package than the receiver type. - if !types.IsExported(msym.Name) && msym.Pkg != rpkg { - b.WriteString(".") - b.WriteString(msym.Pkg.Prefix) - } - - b.WriteString(".") - b.WriteString(msym.Name) - b.WriteString(suffix) - - return rpkg.LookupBytes(b.Bytes()) -} - // Add a method, declared as a function. // - msym is the method symbol // - t is function type (with receiver) @@ -740,10 +649,6 @@ func addmethod(n *ir.Func, msym *types.Sym, t *types.Type, local, nointerface bo return f } -func funcsymname(s *types.Sym) string { - return s.Name + "·f" -} - // funcsym returns s·f. func funcsym(s *types.Sym) *types.Sym { // funcsymsmu here serves to protect not just mutations of funcsyms (below), @@ -756,7 +661,7 @@ func funcsym(s *types.Sym) *types.Sym { // Note makefuncsym also does package look-up of func sym names, // but that it is only called serially, from the front end. funcsymsmu.Lock() - sf, existed := s.Pkg.LookupOK(funcsymname(s)) + sf, existed := s.Pkg.LookupOK(ir.FuncSymName(s)) // Don't export s·f when compiling for dynamic linking. // When dynamically linking, the necessary function // symbols will be created explicitly with makefuncsym. @@ -790,31 +695,21 @@ func makefuncsym(s *types.Sym) { // get funcsyms. return } - if _, existed := s.Pkg.LookupOK(funcsymname(s)); !existed { + if _, existed := s.Pkg.LookupOK(ir.FuncSymName(s)); !existed { funcsyms = append(funcsyms, s) } } -// setNodeNameFunc marks a node as a function. -func setNodeNameFunc(n *ir.Name) { - if n.Op() != ir.ONAME || n.Class_ != ir.Pxxx { - base.Fatalf("expected ONAME/Pxxx node, got %v", n) - } - - n.Class_ = ir.PFUNC - n.Sym().SetFunc(true) -} - func dclfunc(sym *types.Sym, tfn ir.Ntype) *ir.Func { if tfn.Op() != ir.OTFUNC { base.Fatalf("expected OTFUNC node, got %v", tfn) } fn := ir.NewFunc(base.Pos) - fn.Nname = newFuncNameAt(base.Pos, sym, fn) + fn.Nname = ir.NewFuncNameAt(base.Pos, sym, fn) fn.Nname.Defn = fn fn.Nname.Ntype = tfn - setNodeNameFunc(fn.Nname) + ir.MarkFunc(fn.Nname) funchdr(fn) fn.Nname.Ntype = typecheckNtype(fn.Nname.Ntype) return fn diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index 4366a5cc2c179..6843d8b00e45b 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -147,16 +147,16 @@ type EscEdge struct { func escFmt(n ir.Node) string { text := "" switch n.Esc() { - case EscUnknown: + case ir.EscUnknown: break - case EscHeap: + case ir.EscHeap: text = "esc(h)" - case EscNone: + case ir.EscNone: text = "esc(no)" - case EscNever: + case ir.EscNever: text = "esc(N)" default: @@ -281,7 +281,7 @@ func (e *Escape) stmt(n ir.Node) { return } - lno := setlineno(n) + lno := ir.SetPos(n) defer func() { base.Pos = lno }() @@ -483,7 +483,7 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { return } - lno := setlineno(n) + lno := ir.SetPos(n) defer func() { base.Pos = lno }() @@ -564,7 +564,7 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { case ir.OCONV, ir.OCONVNOP: n := n.(*ir.ConvExpr) - if checkPtr(e.curfn, 2) && n.Type().IsUnsafePtr() && n.X.Type().IsPtr() { + if ir.ShouldCheckPtr(e.curfn, 2) && n.Type().IsUnsafePtr() && n.X.Type().IsPtr() { // When -d=checkptr=2 is enabled, treat // conversions to unsafe.Pointer as an // escaping operation. This allows better @@ -618,7 +618,7 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { n := n.(*ir.CallPartExpr) closureK := e.spill(k, n) - m := callpartMethod(n) + m := n.Method // We don't know how the method value will be called // later, so conservatively assume the result @@ -725,7 +725,7 @@ func (e *Escape) unsafeValue(k EscHole, n ir.Node) { } case ir.ODOTPTR: n := n.(*ir.SelectorExpr) - if isReflectHeaderDataField(n) { + if ir.IsReflectHeaderDataField(n) { e.expr(k.deref(n, "reflect.Header.Data"), n.X) } else { e.discard(n.X) @@ -825,7 +825,7 @@ func (e *Escape) assign(dst, src ir.Node, why string, where ir.Node) { } k := e.addr(dst) - if dst != nil && dst.Op() == ir.ODOTPTR && isReflectHeaderDataField(dst) { + if dst != nil && dst.Op() == ir.ODOTPTR && ir.IsReflectHeaderDataField(dst) { e.unsafeValue(e.heapHole().note(where, why), src) } else { if ignore { @@ -847,7 +847,7 @@ func (e *Escape) call(ks []EscHole, call, where ir.Node) { if topLevelDefer { // force stack allocation of defer record, unless // open-coded defers are used (see ssa.go) - where.SetEsc(EscNever) + where.SetEsc(ir.EscNever) } argument := func(k EscHole, arg ir.Node) { @@ -876,14 +876,14 @@ func (e *Escape) call(ks []EscHole, call, where ir.Node) { var fn *ir.Name switch call.Op() { case ir.OCALLFUNC: - switch v := staticValue(call.X); { + switch v := ir.StaticValue(call.X); { case v.Op() == ir.ONAME && v.(*ir.Name).Class_ == ir.PFUNC: fn = v.(*ir.Name) case v.Op() == ir.OCLOSURE: fn = v.(*ir.ClosureExpr).Func.Nname } case ir.OCALLMETH: - fn = methodExprName(call.X) + fn = ir.MethodExprName(call.X) } fntype := call.X.Type() @@ -1532,13 +1532,13 @@ func (e *Escape) finish(fns []*ir.Func) { logopt.LogOpt(n.Pos(), "escape", "escape", ir.FuncName(e.curfn)) } } - n.SetEsc(EscHeap) + n.SetEsc(ir.EscHeap) addrescapes(n) } else { if base.Flag.LowerM != 0 && n.Op() != ir.ONAME { base.WarnfAt(n.Pos(), "%v does not escape", n) } - n.SetEsc(EscNone) + n.SetEsc(ir.EscNone) if loc.transient { switch n.Op() { case ir.OCLOSURE: @@ -1656,7 +1656,7 @@ func ParseLeaks(s string) EscLeaks { } func escapes(all []ir.Node) { - visitBottomUp(all, escapeFuncs) + ir.VisitFuncsBottomUp(all, escapeFuncs) } const ( @@ -1680,13 +1680,6 @@ func max8(a, b int8) int8 { return b } -const ( - EscUnknown = iota - EscNone // Does not escape to heap, result, or parameters. - EscHeap // Reachable from the heap - EscNever // By construction will not escape. -) - // funcSym returns fn.Nname.Sym if no nils are encountered along the way. func funcSym(fn *ir.Func) *types.Sym { if fn == nil || fn.Nname == nil { @@ -1801,14 +1794,14 @@ func isSelfAssign(dst, src ir.Node) bool { // Safe trailing accessors that are permitted to differ. dst := dst.(*ir.SelectorExpr) src := src.(*ir.SelectorExpr) - return samesafeexpr(dst.X, src.X) + return ir.SameSafeExpr(dst.X, src.X) case ir.OINDEX: dst := dst.(*ir.IndexExpr) src := src.(*ir.IndexExpr) if mayAffectMemory(dst.Index) || mayAffectMemory(src.Index) { return false } - return samesafeexpr(dst.X, src.X) + return ir.SameSafeExpr(dst.X, src.X) default: return false } @@ -1876,18 +1869,18 @@ func heapAllocReason(n ir.Node) string { } } - if n.Type().Width > maxStackVarSize { + if n.Type().Width > ir.MaxStackVarSize { return "too large for stack" } - if (n.Op() == ir.ONEW || n.Op() == ir.OPTRLIT) && n.Type().Elem().Width >= maxImplicitStackVarSize { + if (n.Op() == ir.ONEW || n.Op() == ir.OPTRLIT) && n.Type().Elem().Width >= ir.MaxImplicitStackVarSize { return "too large for stack" } - if n.Op() == ir.OCLOSURE && closureType(n.(*ir.ClosureExpr)).Size() >= maxImplicitStackVarSize { + if n.Op() == ir.OCLOSURE && closureType(n.(*ir.ClosureExpr)).Size() >= ir.MaxImplicitStackVarSize { return "too large for stack" } - if n.Op() == ir.OCALLPART && partialCallType(n.(*ir.CallPartExpr)).Size() >= maxImplicitStackVarSize { + if n.Op() == ir.OCALLPART && partialCallType(n.(*ir.CallPartExpr)).Size() >= ir.MaxImplicitStackVarSize { return "too large for stack" } @@ -1897,10 +1890,10 @@ func heapAllocReason(n ir.Node) string { if r == nil { r = n.Len } - if !smallintconst(r) { + if !ir.IsSmallIntConst(r) { return "non-constant size" } - if t := n.Type(); t.Elem().Width != 0 && ir.Int64Val(r) >= maxImplicitStackVarSize/t.Elem().Width { + if t := n.Type(); t.Elem().Width != 0 && ir.Int64Val(r) >= ir.MaxImplicitStackVarSize/t.Elem().Width { return "too large for stack" } } @@ -1922,13 +1915,13 @@ func addrescapes(n ir.Node) { case ir.ONAME: n := n.(*ir.Name) - if n == nodfp { + if n == ir.RegFP { break } // if this is a tmpname (PAUTO), it was tagged by tmpname as not escaping. // on PPARAM it means something different. - if n.Class_ == ir.PAUTO && n.Esc() == EscNever { + if n.Class_ == ir.PAUTO && n.Esc() == ir.EscNever { break } @@ -1954,12 +1947,12 @@ func addrescapes(n ir.Node) { // // then we're analyzing the inner closure but we need to move x to the // heap in f, not in the inner closure. Flip over to f before calling moveToHeap. - oldfn := Curfn - Curfn = n.Curfn + oldfn := ir.CurFunc + ir.CurFunc = n.Curfn ln := base.Pos - base.Pos = Curfn.Pos() + base.Pos = ir.CurFunc.Pos() moveToHeap(n) - Curfn = oldfn + ir.CurFunc = oldfn base.Pos = ln // ODOTPTR has already been introduced, @@ -2039,9 +2032,9 @@ func moveToHeap(n *ir.Name) { // liveness and other analyses use the underlying stack slot // and not the now-pseudo-variable n. found := false - for i, d := range Curfn.Dcl { + for i, d := range ir.CurFunc.Dcl { if d == n { - Curfn.Dcl[i] = stackcopy + ir.CurFunc.Dcl[i] = stackcopy found = true break } @@ -2054,14 +2047,14 @@ func moveToHeap(n *ir.Name) { if !found { base.Fatalf("cannot find %v in local variable list", n) } - Curfn.Dcl = append(Curfn.Dcl, n) + ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n) } // Modify n in place so that uses of n now mean indirection of the heapaddr. n.Class_ = ir.PAUTOHEAP n.SetFrameOffset(0) n.Heapaddr = heapaddr - n.SetEsc(EscHeap) + n.SetEsc(ir.EscHeap) if base.Flag.LowerM != 0 { base.WarnfAt(n.Pos(), "moved to heap: %v", n) } diff --git a/src/cmd/compile/internal/gc/gen.go b/src/cmd/compile/internal/gc/gen.go index bcd58fd2c5b2c..53298c878de9e 100644 --- a/src/cmd/compile/internal/gc/gen.go +++ b/src/cmd/compile/internal/gc/gen.go @@ -28,26 +28,6 @@ func sysvar(name string) *obj.LSym { return ir.Pkgs.Runtime.Lookup(name).Linksym() } -// isParamStackCopy reports whether this is the on-stack copy of a -// function parameter that moved to the heap. -func isParamStackCopy(n ir.Node) bool { - if n.Op() != ir.ONAME { - return false - } - name := n.(*ir.Name) - return (name.Class_ == ir.PPARAM || name.Class_ == ir.PPARAMOUT) && name.Heapaddr != nil -} - -// isParamHeapCopy reports whether this is the on-heap copy of -// a function parameter that moved to the heap. -func isParamHeapCopy(n ir.Node) bool { - if n.Op() != ir.ONAME { - return false - } - name := n.(*ir.Name) - return name.Class_ == ir.PAUTOHEAP && name.Name().Stackcopy != nil -} - // autotmpname returns the name for an autotmp variable numbered n. func autotmpname(n int) string { // Give each tmp a different name so that they can be registerized. @@ -80,7 +60,7 @@ func tempAt(pos src.XPos, curfn *ir.Func, t *types.Type) *ir.Name { s.Def = n n.SetType(t) n.Class_ = ir.PAUTO - n.SetEsc(EscNever) + n.SetEsc(ir.EscNever) n.Curfn = curfn n.SetUsed(true) n.SetAutoTemp(true) @@ -92,5 +72,5 @@ func tempAt(pos src.XPos, curfn *ir.Func, t *types.Type) *ir.Name { } func temp(t *types.Type) *ir.Name { - return tempAt(base.Pos, Curfn, t) + return tempAt(base.Pos, ir.CurFunc, t) } diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index 4b6ffe58d1de9..4370a06839142 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -12,27 +12,6 @@ import ( "sync" ) -var ( - // maximum size variable which we will allocate on the stack. - // This limit is for explicit variable declarations like "var x T" or "x := ...". - // Note: the flag smallframes can update this value. - maxStackVarSize = int64(10 * 1024 * 1024) - - // maximum size of implicit variables that we will allocate on the stack. - // p := new(T) allocating T on the stack - // p := &T{} allocating T on the stack - // s := make([]T, n) allocating [n]T on the stack - // s := []byte("...") allocating [n]byte on the stack - // Note: the flag smallframes can update this value. - maxImplicitStackVarSize = int64(64 * 1024) - - // smallArrayBytes is the maximum size of an array which is considered small. - // Small arrays will be initialized directly with a sequence of constant stores. - // Large arrays will be initialized by copying from a static temp. - // 256 bytes was chosen to minimize generated code + statictmp size. - smallArrayBytes = int64(256) -) - // Slices in the runtime are represented by three components: // // type slice struct { @@ -89,16 +68,12 @@ var ( var dclcontext ir.Class // PEXTERN/PAUTO -var Curfn *ir.Func - var Widthptr int var Widthreg int var typecheckok bool -var nodfp *ir.Name - // interface to back end type Arch struct { diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go index da2345c289bb5..6ea9b354ab1e7 100644 --- a/src/cmd/compile/internal/gc/gsubr.go +++ b/src/cmd/compile/internal/gc/gsubr.go @@ -197,7 +197,7 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { // Q: is this needed? savepos := base.Pos savedclcontext := dclcontext - savedcurfn := Curfn + savedcurfn := ir.CurFunc base.Pos = base.AutogeneratedPos dclcontext = ir.PEXTERN @@ -270,7 +270,7 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { tail = ir.NewBranchStmt(base.Pos, ir.ORETJMP, f.Nname.Sym()) } else { call := ir.NewCallExpr(base.Pos, ir.OCALL, f.Nname, nil) - call.Args.Set(paramNnames(tfn.Type())) + call.Args.Set(ir.ParamNames(tfn.Type())) call.IsDDD = tfn.Type().IsVariadic() tail = call if tfn.Type().NumResults() > 0 { @@ -287,7 +287,7 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { } typecheckFunc(fn) - Curfn = fn + ir.CurFunc = fn typecheckslice(fn.Body, ctxStmt) escapeFuncs([]*ir.Func{fn}, false) @@ -297,7 +297,7 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { // Restore previous context. base.Pos = savepos dclcontext = savedclcontext - Curfn = savedcurfn + ir.CurFunc = savedcurfn } // initLSym defines f's obj.LSym and initializes it based on the diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index 56d2e81df1ecf..fd64b690774d5 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -816,7 +816,7 @@ func (w *exportWriter) value(typ *types.Type, v constant.Value) { func intSize(typ *types.Type) (signed bool, maxBytes uint) { if typ.IsUntyped() { - return true, Mpprec / 8 + return true, ir.ConstPrec / 8 } switch typ.Kind() { @@ -927,7 +927,7 @@ func (w *exportWriter) mpint(x constant.Value, typ *types.Type) { // multi-precision integer) and then the exponent, except exponent is // omitted if mantissa is zero. func (w *exportWriter) mpfloat(v constant.Value, typ *types.Type) { - f := bigFloatVal(v) + f := ir.BigFloat(v) if f.IsInf() { base.Fatalf("infinite constant") } diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 90a909d2a3c9c..d04c432e5e26f 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -327,7 +327,7 @@ func (r *importReader) doDecl(sym *types.Sym) *ir.Name { fn := ir.NewFunc(mpos) fn.SetType(mtyp) - m := newFuncNameAt(mpos, methodSym(recv.Type, msym), fn) + m := ir.NewFuncNameAt(mpos, ir.MethodSym(recv.Type, msym), fn) m.SetType(mtyp) m.Class_ = ir.PFUNC // methodSym already marked m.Sym as a function. @@ -1009,7 +1009,7 @@ func (r *importReader) node() ir.Node { n.AsOp = r.op() n.X = r.expr() if !r.bool() { - n.Y = nodintconst(1) + n.Y = ir.NewInt(1) n.IncDec = true } else { n.Y = r.expr() diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go index 4495284a07444..f22e49efba6b1 100644 --- a/src/cmd/compile/internal/gc/init.go +++ b/src/cmd/compile/internal/gc/init.go @@ -66,9 +66,9 @@ func fninit() *ir.Name { funcbody() typecheckFunc(fn) - Curfn = fn + ir.CurFunc = fn typecheckslice(nf, ctxStmt) - Curfn = nil + ir.CurFunc = nil Target.Decls = append(Target.Decls, fn) fns = append(fns, initializers.Linksym()) } diff --git a/src/cmd/compile/internal/gc/initorder.go b/src/cmd/compile/internal/gc/initorder.go index fe131c32a65a9..5caa2e769f134 100644 --- a/src/cmd/compile/internal/gc/initorder.go +++ b/src/cmd/compile/internal/gc/initorder.go @@ -290,7 +290,7 @@ func (d *initDeps) visit(n ir.Node) { switch n.Op() { case ir.OMETHEXPR: n := n.(*ir.MethodExpr) - d.foundDep(methodExprName(n)) + d.foundDep(ir.MethodExprName(n)) case ir.ONAME: n := n.(*ir.Name) @@ -304,7 +304,7 @@ func (d *initDeps) visit(n ir.Node) { d.inspectList(n.Func.Body) case ir.ODOTMETH, ir.OCALLPART: - d.foundDep(methodExprName(n)) + d.foundDep(ir.MethodExprName(n)) } } diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index 47fdc7b9b70ca..f21494b29183f 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -39,9 +39,6 @@ import ( "strings" ) -// IsIntrinsicCall reports whether the compiler back end will treat the call as an intrinsic operation. -var IsIntrinsicCall = func(*ir.CallExpr) bool { return false } - // Inlining budget parameters, gathered in one place const ( inlineMaxBudget = 80 @@ -57,7 +54,7 @@ const ( func InlinePackage() { // Find functions that can be inlined and clone them before walk expands them. - visitBottomUp(Target.Decls, func(list []*ir.Func, recursive bool) { + ir.VisitFuncsBottomUp(Target.Decls, func(list []*ir.Func, recursive bool) { numfns := numNonClosures(list) for _, n := range list { if !recursive || numfns > 1 { @@ -98,7 +95,7 @@ func fnpkg(fn *ir.Name) *types.Pkg { // Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck // because they're a copy of an already checked body. func typecheckinl(fn *ir.Func) { - lno := setlineno(fn.Nname) + lno := ir.SetPos(fn.Nname) expandInline(fn) @@ -116,10 +113,10 @@ func typecheckinl(fn *ir.Func) { fmt.Printf("typecheck import [%v] %L { %v }\n", fn.Sym(), fn, ir.Nodes(fn.Inl.Body)) } - savefn := Curfn - Curfn = fn + savefn := ir.CurFunc + ir.CurFunc = fn typecheckslice(fn.Inl.Body, ctxStmt) - Curfn = savefn + ir.CurFunc = savefn // During expandInline (which imports fn.Func.Inl.Body), // declarations are added to fn.Func.Dcl by funcHdr(). Move them @@ -281,7 +278,7 @@ func inlFlood(n *ir.Name, exportsym func(*ir.Name)) { ir.VisitList(ir.Nodes(fn.Inl.Body), func(n ir.Node) { switch n.Op() { case ir.OMETHEXPR, ir.ODOTMETH: - inlFlood(methodExprName(n), exportsym) + inlFlood(ir.MethodExprName(n), exportsym) case ir.ONAME: n := n.(*ir.Name) @@ -362,7 +359,7 @@ func (v *hairyVisitor) doNode(n ir.Node) error { } } - if IsIntrinsicCall(n) { + if ir.IsIntrinsicCall(n) { // Treat like any other node. break } @@ -393,7 +390,7 @@ func (v *hairyVisitor) doNode(n ir.Node) error { break } } - if inlfn := methodExprName(n.X).Func; inlfn.Inl != nil { + if inlfn := ir.MethodExprName(n.X).Func; inlfn.Inl != nil { v.budget -= inlfn.Inl.Cost break } @@ -502,8 +499,8 @@ func isBigFunc(fn *ir.Func) bool { // Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any // calls made to inlineable functions. This is the external entry point. func inlcalls(fn *ir.Func) { - savefn := Curfn - Curfn = fn + savefn := ir.CurFunc + ir.CurFunc = fn maxCost := int32(inlineMaxBudget) if isBigFunc(fn) { maxCost = inlineBigFunctionMaxCost @@ -520,7 +517,7 @@ func inlcalls(fn *ir.Func) { return inlnode(n, maxCost, inlMap, edit) } ir.EditChildren(fn, edit) - Curfn = savefn + ir.CurFunc = savefn } // Turn an OINLCALL into a statement. @@ -536,7 +533,7 @@ func inlconv2stmt(inlcall *ir.InlinedCallExpr) ir.Node { // n.Left = inlconv2expr(n.Left) func inlconv2expr(n *ir.InlinedCallExpr) ir.Node { r := n.ReturnVars[0] - return initExpr(append(n.Init(), n.Body...), r) + return ir.InitExpr(append(n.Init(), n.Body...), r) } // Turn the rlist (with the return values) of the OINLCALL in @@ -550,7 +547,7 @@ func inlconv2list(n *ir.InlinedCallExpr) []ir.Node { } s := n.ReturnVars - s[0] = initExpr(append(n.Init(), n.Body...), s[0]) + s[0] = ir.InitExpr(append(n.Init(), n.Body...), s[0]) return s } @@ -594,7 +591,7 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No } } - lno := setlineno(n) + lno := ir.SetPos(n) ir.EditChildren(n, edit) @@ -626,7 +623,7 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No if base.Flag.LowerM > 3 { fmt.Printf("%v:call to func %+v\n", ir.Line(n), call.X) } - if IsIntrinsicCall(call) { + if ir.IsIntrinsicCall(call) { break } if fn := inlCallee(call.X); fn != nil && fn.Inl != nil { @@ -644,7 +641,7 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No base.Fatalf("no function type for [%p] %+v\n", call.X, call.X) } - n = mkinlcall(call, methodExprName(call.X).Func, maxCost, inlMap, edit) + n = mkinlcall(call, ir.MethodExprName(call.X).Func, maxCost, inlMap, edit) } base.Pos = lno @@ -670,11 +667,11 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No // inlCallee takes a function-typed expression and returns the underlying function ONAME // that it refers to if statically known. Otherwise, it returns nil. func inlCallee(fn ir.Node) *ir.Func { - fn = staticValue(fn) + fn = ir.StaticValue(fn) switch fn.Op() { case ir.OMETHEXPR: fn := fn.(*ir.MethodExpr) - n := methodExprName(fn) + n := ir.MethodExprName(fn) // Check that receiver type matches fn.Left. // TODO(mdempsky): Handle implicit dereference // of pointer receiver argument? @@ -696,100 +693,6 @@ func inlCallee(fn ir.Node) *ir.Func { return nil } -func staticValue(n ir.Node) ir.Node { - for { - if n.Op() == ir.OCONVNOP { - n = n.(*ir.ConvExpr).X - continue - } - - n1 := staticValue1(n) - if n1 == nil { - return n - } - n = n1 - } -} - -// staticValue1 implements a simple SSA-like optimization. If n is a local variable -// that is initialized and never reassigned, staticValue1 returns the initializer -// expression. Otherwise, it returns nil. -func staticValue1(nn ir.Node) ir.Node { - if nn.Op() != ir.ONAME { - return nil - } - n := nn.(*ir.Name) - if n.Class_ != ir.PAUTO || n.Name().Addrtaken() { - return nil - } - - defn := n.Name().Defn - if defn == nil { - return nil - } - - var rhs ir.Node -FindRHS: - switch defn.Op() { - case ir.OAS: - defn := defn.(*ir.AssignStmt) - rhs = defn.Y - case ir.OAS2: - defn := defn.(*ir.AssignListStmt) - for i, lhs := range defn.Lhs { - if lhs == n { - rhs = defn.Rhs[i] - break FindRHS - } - } - base.Fatalf("%v missing from LHS of %v", n, defn) - default: - return nil - } - if rhs == nil { - base.Fatalf("RHS is nil: %v", defn) - } - - if reassigned(n) { - return nil - } - - return rhs -} - -// reassigned takes an ONAME node, walks the function in which it is defined, and returns a boolean -// indicating whether the name has any assignments other than its declaration. -// The second return value is the first such assignment encountered in the walk, if any. It is mostly -// useful for -m output documenting the reason for inhibited optimizations. -// NB: global variables are always considered to be re-assigned. -// TODO: handle initial declaration not including an assignment and followed by a single assignment? -func reassigned(name *ir.Name) bool { - if name.Op() != ir.ONAME { - base.Fatalf("reassigned %v", name) - } - // no way to reliably check for no-reassignment of globals, assume it can be - if name.Curfn == nil { - return true - } - return ir.Any(name.Curfn, func(n ir.Node) bool { - switch n.Op() { - case ir.OAS: - n := n.(*ir.AssignStmt) - if n.X == name && n != name.Defn { - return true - } - case ir.OAS2, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2DOTTYPE, ir.OAS2RECV, ir.OSELRECV2: - n := n.(*ir.AssignListStmt) - for _, p := range n.Lhs { - if p == name && n != name.Defn { - return true - } - } - } - return false - }) -} - func inlParam(t *types.Field, as ir.Node, inlvars map[*ir.Name]ir.Node) ir.Node { n := ir.AsNode(t.Nname) if n == nil || ir.IsBlank(n) { @@ -821,7 +724,7 @@ var SSADumpInline = func(*ir.Func) {} func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.Node) ir.Node) ir.Node { if fn.Inl == nil { if logopt.Enabled() { - logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(Curfn), + logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(ir.CurFunc), fmt.Sprintf("%s cannot be inlined", ir.PkgFuncName(fn))) } return n @@ -830,16 +733,16 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b // The inlined function body is too big. Typically we use this check to restrict // inlining into very big functions. See issue 26546 and 17566. if logopt.Enabled() { - logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(Curfn), + logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(ir.CurFunc), fmt.Sprintf("cost %d of %s exceeds max large caller cost %d", fn.Inl.Cost, ir.PkgFuncName(fn), maxCost)) } return n } - if fn == Curfn { + if fn == ir.CurFunc { // Can't recursively inline a function into itself. if logopt.Enabled() { - logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", fmt.Sprintf("recursive call to %s", ir.FuncName(Curfn))) + logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", fmt.Sprintf("recursive call to %s", ir.FuncName(ir.CurFunc))) } return n } @@ -856,7 +759,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b if inlMap[fn] { if base.Flag.LowerM > 1 { - fmt.Printf("%v: cannot inline %v into %v: repeated recursive cycle\n", ir.Line(n), fn, ir.FuncName(Curfn)) + fmt.Printf("%v: cannot inline %v into %v: repeated recursive cycle\n", ir.Line(n), fn, ir.FuncName(ir.CurFunc)) } return n } @@ -916,7 +819,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b // NB: if we enabled inlining of functions containing OCLOSURE or refined // the reassigned check via some sort of copy propagation this would most // likely need to be changed to a loop to walk up to the correct Param - if o == nil || o.Curfn != Curfn { + if o == nil || o.Curfn != ir.CurFunc { base.Fatalf("%v: unresolvable capture %v %v\n", ir.Line(n), fn, v) } @@ -947,7 +850,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b if ln.Class_ == ir.PPARAMOUT { // return values handled below. continue } - if isParamStackCopy(ln) { // ignore the on-stack copy of a parameter that moved to the heap + if ir.IsParamStackCopy(ln) { // ignore the on-stack copy of a parameter that moved to the heap // TODO(mdempsky): Remove once I'm confident // this never actually happens. We currently // perform inlining before escape analysis, so @@ -1162,10 +1065,10 @@ func inlvar(var_ ir.Node) ir.Node { n.SetType(var_.Type()) n.Class_ = ir.PAUTO n.SetUsed(true) - n.Curfn = Curfn // the calling function, not the called one + n.Curfn = ir.CurFunc // the calling function, not the called one n.SetAddrtaken(var_.Name().Addrtaken()) - Curfn.Dcl = append(Curfn.Dcl, n) + ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n) return n } @@ -1175,8 +1078,8 @@ func retvar(t *types.Field, i int) ir.Node { n.SetType(t.Type) n.Class_ = ir.PAUTO n.SetUsed(true) - n.Curfn = Curfn // the calling function, not the called one - Curfn.Dcl = append(Curfn.Dcl, n) + n.Curfn = ir.CurFunc // the calling function, not the called one + ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n) return n } @@ -1187,8 +1090,8 @@ func argvar(t *types.Type, i int) ir.Node { n.SetType(t.Elem()) n.Class_ = ir.PAUTO n.SetUsed(true) - n.Curfn = Curfn // the calling function, not the called one - Curfn.Dcl = append(Curfn.Dcl, n) + n.Curfn = ir.CurFunc // the calling function, not the called one + ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n) return n } @@ -1358,7 +1261,7 @@ func pruneUnusedAutos(ll []*ir.Name, vis *hairyVisitor) []*ir.Name { // devirtualize replaces interface method calls within fn with direct // concrete-type method calls where applicable. func devirtualize(fn *ir.Func) { - Curfn = fn + ir.CurFunc = fn ir.VisitList(fn.Body, func(n ir.Node) { if n.Op() == ir.OCALLINTER { devirtualizeCall(n.(*ir.CallExpr)) @@ -1368,7 +1271,7 @@ func devirtualize(fn *ir.Func) { func devirtualizeCall(call *ir.CallExpr) { sel := call.X.(*ir.SelectorExpr) - r := staticValue(sel.X) + r := ir.StaticValue(sel.X) if r.Op() != ir.OCONVIFACE { return } diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 1c52426802735..d55a8b0a7cac9 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -134,8 +134,8 @@ func Main(archInit func(*Arch)) { } if base.Flag.SmallFrames { - maxStackVarSize = 128 * 1024 - maxImplicitStackVarSize = 16 * 1024 + ir.MaxStackVarSize = 128 * 1024 + ir.MaxImplicitStackVarSize = 16 * 1024 } if base.Flag.Dwarf { @@ -185,7 +185,7 @@ func Main(archInit func(*Arch)) { } ir.EscFmt = escFmt - IsIntrinsicCall = isIntrinsicCall + ir.IsIntrinsicCall = isIntrinsicCall SSADumpInline = ssaDumpInline initSSAEnv() initSSATables() @@ -242,7 +242,7 @@ func Main(archInit func(*Arch)) { devirtualize(n.(*ir.Func)) } } - Curfn = nil + ir.CurFunc = nil // Escape analysis. // Required for moving heap allocations onto stack, @@ -271,7 +271,7 @@ func Main(archInit func(*Arch)) { if n.Op() == ir.ODCLFUNC { n := n.(*ir.Func) if n.OClosure != nil { - Curfn = n + ir.CurFunc = n transformclosure(n) } } @@ -285,7 +285,7 @@ func Main(archInit func(*Arch)) { // Just before compilation, compile itabs found on // the right side of OCONVIFACE so that methods // can be de-virtualized during compilation. - Curfn = nil + ir.CurFunc = nil peekitabs() // Compile top level functions. diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 799887d6b85a4..c83b60dcd4c51 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -181,9 +181,9 @@ func (p *noder) openScope(pos syntax.Pos) { types.Markdcl() if p.trackScopes { - Curfn.Parents = append(Curfn.Parents, p.scope) - p.scopeVars = append(p.scopeVars, len(Curfn.Dcl)) - p.scope = ir.ScopeID(len(Curfn.Parents)) + ir.CurFunc.Parents = append(ir.CurFunc.Parents, p.scope) + p.scopeVars = append(p.scopeVars, len(ir.CurFunc.Dcl)) + p.scope = ir.ScopeID(len(ir.CurFunc.Parents)) p.markScope(pos) } @@ -196,29 +196,29 @@ func (p *noder) closeScope(pos syntax.Pos) { if p.trackScopes { scopeVars := p.scopeVars[len(p.scopeVars)-1] p.scopeVars = p.scopeVars[:len(p.scopeVars)-1] - if scopeVars == len(Curfn.Dcl) { + if scopeVars == len(ir.CurFunc.Dcl) { // no variables were declared in this scope, so we can retract it. - if int(p.scope) != len(Curfn.Parents) { + if int(p.scope) != len(ir.CurFunc.Parents) { base.Fatalf("scope tracking inconsistency, no variables declared but scopes were not retracted") } - p.scope = Curfn.Parents[p.scope-1] - Curfn.Parents = Curfn.Parents[:len(Curfn.Parents)-1] + p.scope = ir.CurFunc.Parents[p.scope-1] + ir.CurFunc.Parents = ir.CurFunc.Parents[:len(ir.CurFunc.Parents)-1] - nmarks := len(Curfn.Marks) - Curfn.Marks[nmarks-1].Scope = p.scope + nmarks := len(ir.CurFunc.Marks) + ir.CurFunc.Marks[nmarks-1].Scope = p.scope prevScope := ir.ScopeID(0) if nmarks >= 2 { - prevScope = Curfn.Marks[nmarks-2].Scope + prevScope = ir.CurFunc.Marks[nmarks-2].Scope } - if Curfn.Marks[nmarks-1].Scope == prevScope { - Curfn.Marks = Curfn.Marks[:nmarks-1] + if ir.CurFunc.Marks[nmarks-1].Scope == prevScope { + ir.CurFunc.Marks = ir.CurFunc.Marks[:nmarks-1] } return } - p.scope = Curfn.Parents[p.scope-1] + p.scope = ir.CurFunc.Parents[p.scope-1] p.markScope(pos) } @@ -226,10 +226,10 @@ func (p *noder) closeScope(pos syntax.Pos) { func (p *noder) markScope(pos syntax.Pos) { xpos := p.makeXPos(pos) - if i := len(Curfn.Marks); i > 0 && Curfn.Marks[i-1].Pos == xpos { - Curfn.Marks[i-1].Scope = p.scope + if i := len(ir.CurFunc.Marks); i > 0 && ir.CurFunc.Marks[i-1].Pos == xpos { + ir.CurFunc.Marks[i-1].Scope = p.scope } else { - Curfn.Marks = append(Curfn.Marks, ir.Mark{Pos: xpos, Scope: p.scope}) + ir.CurFunc.Marks = append(ir.CurFunc.Marks, ir.Mark{Pos: xpos, Scope: p.scope}) } } @@ -527,7 +527,7 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) ir.Node { name = ir.BlankNode.Sym() // filled in by typecheckfunc } - f.Nname = newFuncNameAt(p.pos(fun.Name), name, f) + f.Nname = ir.NewFuncNameAt(p.pos(fun.Name), name, f) f.Nname.Defn = f f.Nname.Ntype = t @@ -996,13 +996,13 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node { // TODO(mdempsky): Line number? return ir.NewBlockStmt(base.Pos, nil) } - return liststmt(l) + return ir.NewBlockStmt(src.NoXPos, l) case *syntax.ExprStmt: return p.wrapname(stmt, p.expr(stmt.X)) case *syntax.SendStmt: return ir.NewSendStmt(p.pos(stmt), p.expr(stmt.Chan), p.expr(stmt.Value)) case *syntax.DeclStmt: - return liststmt(p.decls(stmt.DeclList)) + return ir.NewBlockStmt(src.NoXPos, p.decls(stmt.DeclList)) case *syntax.AssignStmt: if stmt.Op != 0 && stmt.Op != syntax.Def { n := ir.NewAssignOpStmt(p.pos(stmt), p.binOp(stmt.Op), p.expr(stmt.Lhs), p.expr(stmt.Rhs)) @@ -1065,8 +1065,8 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node { } n := ir.NewReturnStmt(p.pos(stmt), nil) n.Results.Set(results) - if len(n.Results) == 0 && Curfn != nil { - for _, ln := range Curfn.Dcl { + if len(n.Results) == 0 && ir.CurFunc != nil { + for _, ln := range ir.CurFunc.Dcl { if ln.Class_ == ir.PPARAM { continue } @@ -1344,7 +1344,7 @@ func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) ir.Node { l = append(l, ls) } } - return liststmt(l) + return ir.NewBlockStmt(src.NoXPos, l) } var unOps = [...]ir.Op{ @@ -1451,7 +1451,7 @@ func (p *noder) basicLit(lit *syntax.BasicLit) constant.Value { // to big.Float to match cmd/compile's historical precision. // TODO(mdempsky): Remove. if v.Kind() == constant.Float { - v = constant.Make(bigFloatVal(v)) + v = constant.Make(ir.BigFloat(v)) } return v diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 897bcce36f74b..e56e34a7a198b 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -255,7 +255,7 @@ func dumpGlobalConst(n ir.Node) { if t.IsUntyped() { // Export untyped integers as int (if they fit). t = types.Types[types.TINT] - if doesoverflow(v, t) { + if ir.ConstOverflow(v, t) { return } } @@ -279,7 +279,7 @@ func dumpfuncsyms() { return funcsyms[i].LinksymName() < funcsyms[j].LinksymName() }) for _, s := range funcsyms { - sf := s.Pkg.Lookup(funcsymname(s)).Linksym() + sf := s.Pkg.Lookup(ir.FuncSymName(s)).Linksym() dsymptr(sf, 0, s.Linksym(), 0) ggloblsym(sf, int32(Widthptr), obj.DUPOK|obj.RODATA) } diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 9e792d153c8bf..1cd33b2cb5dae 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -230,7 +230,7 @@ func (o *Order) safeExpr(n ir.Node) ir.Node { // because we emit explicit VARKILL instructions marking the end of those // temporaries' lifetimes. func isaddrokay(n ir.Node) bool { - return islvalue(n) && (n.Op() != ir.ONAME || n.(*ir.Name).Class_ == ir.PEXTERN || ir.IsAutoTmp(n)) + return ir.IsAssignable(n) && (n.Op() != ir.ONAME || n.(*ir.Name).Class_ == ir.PEXTERN || ir.IsAutoTmp(n)) } // addrTemp ensures that n is okay to pass by address to runtime routines. @@ -381,13 +381,13 @@ func orderMakeSliceCopy(s []ir.Node) { } mk := as.Y.(*ir.MakeExpr) - if mk.Esc() == EscNone || mk.Len == nil || mk.Cap != nil { + if mk.Esc() == ir.EscNone || mk.Len == nil || mk.Cap != nil { return } mk.SetOp(ir.OMAKESLICECOPY) mk.Cap = cp.Y // Set bounded when m = OMAKESLICE([]T, len(s)); OCOPY(m, s) - mk.SetBounded(mk.Len.Op() == ir.OLEN && samesafeexpr(mk.Len.(*ir.UnaryExpr).X, cp.Y)) + mk.SetBounded(mk.Len.Op() == ir.OLEN && ir.SameSafeExpr(mk.Len.(*ir.UnaryExpr).X, cp.Y)) as.Y = typecheck(mk, ctxExpr) s[1] = nil // remove separate copy call } @@ -404,7 +404,7 @@ func (o *Order) edge() { counter.Name().SetLibfuzzerExtraCounter(true) // counter += 1 - incr := ir.NewAssignOpStmt(base.Pos, ir.OADD, counter, nodintconst(1)) + incr := ir.NewAssignOpStmt(base.Pos, ir.OADD, counter, ir.NewInt(1)) o.append(incr) } @@ -429,7 +429,7 @@ func (o *Order) exprInPlace(n ir.Node) ir.Node { var order Order order.free = o.free n = order.expr(n, nil) - n = initExpr(order.out, n) + n = ir.InitExpr(order.out, n) // insert new temporaries from order // at head of outer list. @@ -448,7 +448,7 @@ func orderStmtInPlace(n ir.Node, free map[string][]*ir.Name) ir.Node { mark := order.markTemp() order.stmt(n) order.cleanTemp(mark) - return liststmt(order.out) + return ir.NewBlockStmt(src.NoXPos, order.out) } // init moves n's init list to o.out. @@ -615,7 +615,7 @@ func (o *Order) stmt(n ir.Node) { return } - lno := setlineno(n) + lno := ir.SetPos(n) o.init(n) switch n.Op() { @@ -909,7 +909,7 @@ func (o *Order) stmt(n ir.Node) { for _, ncas := range n.Cases { ncas := ncas.(*ir.CaseStmt) r := ncas.Comm - setlineno(ncas) + ir.SetPos(ncas) // Append any new body prologue to ninit. // The next loop will insert ninit into nbody. @@ -1089,7 +1089,7 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node { if n == nil { return n } - lno := setlineno(n) + lno := ir.SetPos(n) n = o.expr1(n, lhs) base.Pos = lno return n @@ -1283,7 +1283,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { o.exprList(n.Args) } - if lhs == nil || lhs.Op() != ir.ONAME && !samesafeexpr(lhs, n.Args[0]) { + if lhs == nil || lhs.Op() != ir.ONAME && !ir.SameSafeExpr(lhs, n.Args[0]) { return o.copyExpr(n) } return n @@ -1299,7 +1299,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { max = o.expr(max, nil) max = o.cheapExpr(max) n.SetSliceBounds(low, high, max) - if lhs == nil || lhs.Op() != ir.ONAME && !samesafeexpr(lhs, n.X) { + if lhs == nil || lhs.Op() != ir.ONAME && !ir.SameSafeExpr(lhs, n.X) { return o.copyExpr(n) } return n diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index d6c15f113b987..44b614ba7016c 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -131,7 +131,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { switch n.Class_ { case ir.PPARAM, ir.PPARAMOUT: // Don't modify nodfp; it is a global. - if n != nodfp { + if n != ir.RegFP { n.Name().SetUsed(true) } case ir.PAUTO: @@ -193,8 +193,8 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { } func funccompile(fn *ir.Func) { - if Curfn != nil { - base.Fatalf("funccompile %v inside %v", fn.Sym(), Curfn.Sym()) + if ir.CurFunc != nil { + base.Fatalf("funccompile %v inside %v", fn.Sym(), ir.CurFunc.Sym()) } if fn.Type() == nil { @@ -215,9 +215,9 @@ func funccompile(fn *ir.Func) { } dclcontext = ir.PAUTO - Curfn = fn + ir.CurFunc = fn compile(fn) - Curfn = nil + ir.CurFunc = nil dclcontext = ir.PEXTERN } @@ -234,7 +234,7 @@ func compile(fn *ir.Func) { } // From this point, there should be no uses of Curfn. Enforce that. - Curfn = nil + ir.CurFunc = nil if ir.FuncName(fn) == "_" { // We don't need to generate code for this function, just report errors in its body. diff --git a/src/cmd/compile/internal/gc/racewalk.go b/src/cmd/compile/internal/gc/racewalk.go index 67802fe917b64..e73e7fbbe1711 100644 --- a/src/cmd/compile/internal/gc/racewalk.go +++ b/src/cmd/compile/internal/gc/racewalk.go @@ -35,7 +35,7 @@ func instrument(fn *ir.Func) { // This only works for amd64. This will not // work on arm or others that might support // race in the future. - nodpc := nodfp.CloneName() + nodpc := ir.RegFP.CloneName() nodpc.SetType(types.Types[types.TUINTPTR]) nodpc.SetFrameOffset(int64(-Widthptr)) fn.Dcl = append(fn.Dcl, nodpc) diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go index 463d0c55bdda0..a9447189c21e2 100644 --- a/src/cmd/compile/internal/gc/range.go +++ b/src/cmd/compile/internal/gc/range.go @@ -160,7 +160,7 @@ func cheapComputableIndex(width int64) bool { func walkrange(nrange *ir.RangeStmt) ir.Node { if isMapClear(nrange) { m := nrange.X - lno := setlineno(m) + lno := ir.SetPos(m) n := mapClear(m) base.Pos = lno return n @@ -180,7 +180,7 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { t := nrange.Type() a := nrange.X - lno := setlineno(a) + lno := ir.SetPos(a) var v1, v2 ir.Node l := len(nrange.Vars) @@ -228,7 +228,7 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { init = append(init, ir.NewAssignStmt(base.Pos, hn, ir.NewUnaryExpr(base.Pos, ir.OLEN, ha))) nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn) - nfor.Post = ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, nodintconst(1))) + nfor.Post = ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, ir.NewInt(1))) // for range ha { body } if v1 == nil { @@ -272,7 +272,7 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { nfor.SetOp(ir.OFORUNTIL) hp := temp(types.NewPtr(nrange.Type().Elem())) - tmp := ir.NewIndexExpr(base.Pos, ha, nodintconst(0)) + tmp := ir.NewIndexExpr(base.Pos, ha, ir.NewInt(0)) tmp.SetBounded(true) init = append(init, ir.NewAssignStmt(base.Pos, hp, nodAddr(tmp))) @@ -335,7 +335,7 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { } hb := temp(types.Types[types.TBOOL]) - nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, hb, nodbool(false)) + nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, hb, ir.NewBool(false)) a := ir.NewAssignListStmt(base.Pos, ir.OAS2RECV, nil, nil) a.SetTypecheck(1) a.Lhs = []ir.Node{hv1, hb} @@ -392,10 +392,10 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { // if hv2 < utf8.RuneSelf nif := ir.NewIfStmt(base.Pos, nil, nil, nil) - nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv2, nodintconst(utf8.RuneSelf)) + nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv2, ir.NewInt(utf8.RuneSelf)) // hv1++ - nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, nodintconst(1)))} + nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, ir.NewInt(1)))} // } else { eif := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) @@ -488,7 +488,7 @@ func isMapClear(n *ir.RangeStmt) bool { } m := n.X - if delete := stmt.(*ir.CallExpr); !samesafeexpr(delete.Args[0], m) || !samesafeexpr(delete.Args[1], k) { + if delete := stmt.(*ir.CallExpr); !ir.SameSafeExpr(delete.Args[0], m) || !ir.SameSafeExpr(delete.Args[1], k) { return false } @@ -545,12 +545,12 @@ func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node { } lhs := stmt.X.(*ir.IndexExpr) - if !samesafeexpr(lhs.X, a) || !samesafeexpr(lhs.Index, v1) { + if !ir.SameSafeExpr(lhs.X, a) || !ir.SameSafeExpr(lhs.Index, v1) { return nil } elemsize := loop.Type().Elem().Width - if elemsize <= 0 || !isZero(stmt.Y) { + if elemsize <= 0 || !ir.IsZero(stmt.Y) { return nil } @@ -563,25 +563,25 @@ func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node { // } n := ir.NewIfStmt(base.Pos, nil, nil, nil) n.Body.Set(nil) - n.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), nodintconst(0)) + n.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(0)) // hp = &a[0] hp := temp(types.Types[types.TUNSAFEPTR]) - ix := ir.NewIndexExpr(base.Pos, a, nodintconst(0)) + ix := ir.NewIndexExpr(base.Pos, a, ir.NewInt(0)) ix.SetBounded(true) addr := convnop(nodAddr(ix), types.Types[types.TUNSAFEPTR]) n.Body.Append(ir.NewAssignStmt(base.Pos, hp, addr)) // hn = len(a) * sizeof(elem(a)) hn := temp(types.Types[types.TUINTPTR]) - mul := conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), nodintconst(elemsize)), types.Types[types.TUINTPTR]) + mul := conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(elemsize)), types.Types[types.TUINTPTR]) n.Body.Append(ir.NewAssignStmt(base.Pos, hn, mul)) var fn ir.Node if a.Type().Elem().HasPointers() { // memclrHasPointers(hp, hn) - Curfn.SetWBPos(stmt.Pos()) + ir.CurFunc.SetWBPos(stmt.Pos()) fn = mkcall("memclrHasPointers", nil, nil, hp, hn) } else { // memclrNoHeapPointers(hp, hn) @@ -591,7 +591,7 @@ func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node { n.Body.Append(fn) // i = len(a) - 1 - v1 = ir.NewAssignStmt(base.Pos, v1, ir.NewBinaryExpr(base.Pos, ir.OSUB, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), nodintconst(1))) + v1 = ir.NewAssignStmt(base.Pos, v1, ir.NewBinaryExpr(base.Pos, ir.OSUB, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(1))) n.Body.Append(v1) @@ -608,7 +608,7 @@ func addptr(p ir.Node, n int64) ir.Node { p = ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, p) p.SetType(types.Types[types.TUINTPTR]) - p = ir.NewBinaryExpr(base.Pos, ir.OADD, p, nodintconst(n)) + p = ir.NewBinaryExpr(base.Pos, ir.OADD, p, ir.NewInt(n)) p = ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, p) p.SetType(t) diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 41c9f93bf0d77..8b393a8979aa4 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -349,12 +349,12 @@ func methodfunc(f *types.Type, receiver *types.Type) *types.Type { in := make([]*ir.Field, 0, inLen) if receiver != nil { - d := anonfield(receiver) + d := ir.NewField(base.Pos, nil, nil, receiver) in = append(in, d) } for _, t := range f.Params().Fields().Slice() { - d := anonfield(t.Type) + d := ir.NewField(base.Pos, nil, nil, t.Type) d.IsDDD = t.IsDDD() in = append(in, d) } @@ -362,7 +362,7 @@ func methodfunc(f *types.Type, receiver *types.Type) *types.Type { outLen := f.Results().Fields().Len() out := make([]*ir.Field, 0, outLen) for _, t := range f.Results().Fields().Slice() { - d := anonfield(t.Type) + d := ir.NewField(base.Pos, nil, nil, t.Type) out = append(out, d) } @@ -416,8 +416,8 @@ func methods(t *types.Type) []*Sig { sig := &Sig{ name: method, - isym: methodSym(it, method), - tsym: methodSym(t, method), + isym: ir.MethodSym(it, method), + tsym: ir.MethodSym(t, method), type_: methodfunc(f.Type, t), mtype: methodfunc(f.Type, nil), } @@ -471,7 +471,7 @@ func imethods(t *types.Type) []*Sig { // IfaceType.Method is not in the reflect data. // Generate the method body, so that compiled // code can refer to it. - isym := methodSym(t, f.Sym) + isym := ir.MethodSym(t, f.Sym) if !isym.Siggen() { isym.SetSiggen(true) genwrapper(t, f, isym) @@ -1541,7 +1541,7 @@ func dumpbasictypes() { // The latter is the type of an auto-generated wrapper. dtypesym(types.NewPtr(types.ErrorType)) - dtypesym(functype(nil, []*ir.Field{anonfield(types.ErrorType)}, []*ir.Field{anonfield(types.Types[types.TSTRING])})) + dtypesym(functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, types.ErrorType)}, []*ir.Field{ir.NewField(base.Pos, nil, nil, types.Types[types.TSTRING])})) // add paths for runtime and main, which 6l imports implicitly. dimportpath(ir.Pkgs.Runtime) diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go index 0bf070aa874e1..67a2cfd312d99 100644 --- a/src/cmd/compile/internal/gc/select.go +++ b/src/cmd/compile/internal/gc/select.go @@ -13,7 +13,7 @@ import ( // select func typecheckselect(sel *ir.SelectStmt) { var def ir.Node - lno := setlineno(sel) + lno := ir.SetPos(sel) typecheckslice(sel.Init(), ctxStmt) for _, ncase := range sel.Cases { ncase := ncase.(*ir.CaseStmt) @@ -94,7 +94,7 @@ func typecheckselect(sel *ir.SelectStmt) { } func walkselect(sel *ir.SelectStmt) { - lno := setlineno(sel) + lno := ir.SetPos(sel) if len(sel.Compiled) != 0 { base.Fatalf("double walkselect") } @@ -123,7 +123,7 @@ func walkselectcases(cases ir.Nodes) []ir.Node { // optimization: one-case select: single op. if ncas == 1 { cas := cases[0].(*ir.CaseStmt) - setlineno(cas) + ir.SetPos(cas) l := cas.Init() if cas.Comm != nil { // not default: n := cas.Comm @@ -158,7 +158,7 @@ func walkselectcases(cases ir.Nodes) []ir.Node { var dflt *ir.CaseStmt for _, cas := range cases { cas := cas.(*ir.CaseStmt) - setlineno(cas) + ir.SetPos(cas) n := cas.Comm if n == nil { dflt = cas @@ -187,7 +187,7 @@ func walkselectcases(cases ir.Nodes) []ir.Node { } n := cas.Comm - setlineno(n) + ir.SetPos(n) r := ir.NewIfStmt(base.Pos, nil, nil, nil) r.PtrInit().Set(cas.Init()) var call ir.Node @@ -245,7 +245,7 @@ func walkselectcases(cases ir.Nodes) []ir.Node { var pc0, pcs ir.Node if base.Flag.Race { pcs = temp(types.NewArray(types.Types[types.TUINTPTR], int64(ncas))) - pc0 = typecheck(nodAddr(ir.NewIndexExpr(base.Pos, pcs, nodintconst(0))), ctxExpr) + pc0 = typecheck(nodAddr(ir.NewIndexExpr(base.Pos, pcs, ir.NewInt(0))), ctxExpr) } else { pc0 = nodnil() } @@ -253,7 +253,7 @@ func walkselectcases(cases ir.Nodes) []ir.Node { // register cases for _, cas := range cases { cas := cas.(*ir.CaseStmt) - setlineno(cas) + ir.SetPos(cas) init = append(init, cas.Init()...) cas.PtrInit().Set(nil) @@ -286,7 +286,7 @@ func walkselectcases(cases ir.Nodes) []ir.Node { casorder[i] = cas setField := func(f string, val ir.Node) { - r := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, ir.NewIndexExpr(base.Pos, selv, nodintconst(int64(i))), lookup(f)), val) + r := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, ir.NewIndexExpr(base.Pos, selv, ir.NewInt(int64(i))), lookup(f)), val) init = append(init, typecheck(r, ctxStmt)) } @@ -300,7 +300,7 @@ func walkselectcases(cases ir.Nodes) []ir.Node { // TODO(mdempsky): There should be a cleaner way to // handle this. if base.Flag.Race { - r := mkcall("selectsetpc", nil, nil, nodAddr(ir.NewIndexExpr(base.Pos, pcs, nodintconst(int64(i))))) + r := mkcall("selectsetpc", nil, nil, nodAddr(ir.NewIndexExpr(base.Pos, pcs, ir.NewInt(int64(i))))) init = append(init, r) } } @@ -315,7 +315,7 @@ func walkselectcases(cases ir.Nodes) []ir.Node { r := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) r.Lhs = []ir.Node{chosen, recvOK} fn := syslook("selectgo") - r.Rhs = []ir.Node{mkcall1(fn, fn.Type().Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, nodintconst(int64(nsends)), nodintconst(int64(nrecvs)), nodbool(dflt == nil))} + r.Rhs = []ir.Node{mkcall1(fn, fn.Type().Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, ir.NewInt(int64(nsends)), ir.NewInt(int64(nrecvs)), ir.NewBool(dflt == nil))} init = append(init, typecheck(r, ctxStmt)) // selv and order are no longer alive after selectgo. @@ -346,12 +346,12 @@ func walkselectcases(cases ir.Nodes) []ir.Node { } if dflt != nil { - setlineno(dflt) - dispatch(ir.NewBinaryExpr(base.Pos, ir.OLT, chosen, nodintconst(0)), dflt) + ir.SetPos(dflt) + dispatch(ir.NewBinaryExpr(base.Pos, ir.OLT, chosen, ir.NewInt(0)), dflt) } for i, cas := range casorder { - setlineno(cas) - dispatch(ir.NewBinaryExpr(base.Pos, ir.OEQ, chosen, nodintconst(int64(i))), cas) + ir.SetPos(cas) + dispatch(ir.NewBinaryExpr(base.Pos, ir.OEQ, chosen, ir.NewInt(int64(i))), cas) } return init @@ -359,7 +359,7 @@ func walkselectcases(cases ir.Nodes) []ir.Node { // bytePtrToIndex returns a Node representing "(*byte)(&n[i])". func bytePtrToIndex(n ir.Node, i int64) ir.Node { - s := nodAddr(ir.NewIndexExpr(base.Pos, n, nodintconst(i))) + s := nodAddr(ir.NewIndexExpr(base.Pos, n, ir.NewInt(i))) t := types.NewPtr(types.Types[types.TUINT8]) return convnop(s, t) } @@ -370,8 +370,8 @@ var scase *types.Type func scasetype() *types.Type { if scase == nil { scase = tostruct([]*ir.Field{ - namedfield("c", types.Types[types.TUNSAFEPTR]), - namedfield("elem", types.Types[types.TUNSAFEPTR]), + ir.NewField(base.Pos, lookup("c"), nil, types.Types[types.TUNSAFEPTR]), + ir.NewField(base.Pos, lookup("elem"), nil, types.Types[types.TUNSAFEPTR]), }) scase.SetNoalg(true) } diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index c9a554079d68a..936edb3d70331 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -10,7 +10,6 @@ import ( "cmd/compile/internal/types" "cmd/internal/obj" "fmt" - "go/constant" ) type InitEntry struct { @@ -65,7 +64,7 @@ func (s *InitSchedule) tryStaticInit(nn ir.Node) bool { // Discard. return true } - lno := setlineno(n) + lno := ir.SetPos(n) defer func() { base.Pos = lno }() nam := n.X.(*ir.Name) return s.staticassign(nam, 0, n.Y, nam.Type()) @@ -120,7 +119,7 @@ func (s *InitSchedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *type return true case ir.OLITERAL: - if isZero(r) { + if ir.IsZero(r) { return true } litsym(l, loff, r, int(typ.Width)) @@ -170,7 +169,7 @@ func (s *InitSchedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *type // copying someone else's computation. ll := ir.NewNameOffsetExpr(base.Pos, l, loff+e.Xoffset, typ) rr := ir.NewNameOffsetExpr(base.Pos, orig, e.Xoffset, typ) - setlineno(rr) + ir.SetPos(rr) s.append(ir.NewAssignStmt(base.Pos, ll, rr)) } @@ -198,7 +197,7 @@ func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *type return true case ir.OLITERAL: - if isZero(r) { + if ir.IsZero(r) { return true } litsym(l, loff, r, int(typ.Width)) @@ -263,7 +262,7 @@ func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *type litsym(l, loff+e.Xoffset, e.Expr, int(e.Expr.Type().Width)) continue } - setlineno(e.Expr) + ir.SetPos(e.Expr) if !s.staticassign(l, loff+e.Xoffset, e.Expr, e.Expr.Type()) { a := ir.NewNameOffsetExpr(base.Pos, l, loff+e.Xoffset, e.Expr.Type()) s.append(ir.NewAssignStmt(base.Pos, a, e.Expr)) @@ -330,7 +329,7 @@ func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *type return true } // Copy val directly into n. - setlineno(val) + ir.SetPos(val) if !s.staticassign(l, loff+int64(Widthptr), val, val.Type()) { a := ir.NewNameOffsetExpr(base.Pos, l, loff+int64(Widthptr), val.Type()) s.append(ir.NewAssignStmt(base.Pos, a, val)) @@ -429,7 +428,7 @@ const ( func getdyn(n ir.Node, top bool) initGenType { switch n.Op() { default: - if isGoConst(n) { + if ir.IsConstNode(n) { return initConst } return initDynamic @@ -548,7 +547,7 @@ func fixedlit(ctxt initContext, kind initKind, n *ir.CompLitExpr, var_ ir.Node, } r = kv.Value } - a := ir.NewIndexExpr(base.Pos, var_, nodintconst(k)) + a := ir.NewIndexExpr(base.Pos, var_, ir.NewInt(k)) k++ if isBlank { return ir.BlankNode, r @@ -561,7 +560,7 @@ func fixedlit(ctxt initContext, kind initKind, n *ir.CompLitExpr, var_ ir.Node, if r.Field.IsBlank() || isBlank { return ir.BlankNode, r.Value } - setlineno(r) + ir.SetPos(r) return ir.NewSelectorExpr(base.Pos, ir.ODOT, var_, r.Field), r.Value } default: @@ -589,13 +588,13 @@ func fixedlit(ctxt initContext, kind initKind, n *ir.CompLitExpr, var_ ir.Node, continue } - islit := isGoConst(value) + islit := ir.IsConstNode(value) if (kind == initKindStatic && !islit) || (kind == initKindDynamic && islit) { continue } // build list of assignments: var[index] = expr - setlineno(a) + ir.SetPos(a) as := ir.NewAssignStmt(base.Pos, a, value) as = typecheck(as, ctxStmt).(*ir.AssignStmt) switch kind { @@ -617,7 +616,7 @@ func isSmallSliceLit(n *ir.CompLitExpr) bool { return false } - return n.Type().Elem().Width == 0 || n.Len <= smallArrayBytes/n.Type().Elem().Width + return n.Type().Elem().Width == 0 || n.Len <= ir.MaxSmallArraySize/n.Type().Elem().Width } func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) { @@ -697,7 +696,7 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) } a = nodAddr(x) - } else if n.Esc() == EscNone { + } else if n.Esc() == ir.EscNone { a = temp(t) if vstat == nil { a = ir.NewAssignStmt(base.Pos, temp(t), nil) @@ -731,7 +730,7 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) } value = kv.Value } - a := ir.NewIndexExpr(base.Pos, vauto, nodintconst(index)) + a := ir.NewIndexExpr(base.Pos, vauto, ir.NewInt(index)) a.SetBounded(true) index++ @@ -753,12 +752,12 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) continue } - if vstat != nil && isGoConst(value) { // already set by copy from static value + if vstat != nil && ir.IsConstNode(value) { // already set by copy from static value continue } // build list of vauto[c] = expr - setlineno(value) + ir.SetPos(value) as := typecheck(ir.NewAssignStmt(base.Pos, a, value), ctxStmt) as = orderStmtInPlace(as, map[string][]*ir.Name{}) as = walkstmt(as) @@ -778,7 +777,7 @@ func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) { // make the map var a := ir.NewCallExpr(base.Pos, ir.OMAKE, nil, nil) a.SetEsc(n.Esc()) - a.Args = []ir.Node{ir.TypeNode(n.Type()), nodintconst(int64(len(n.List)))} + a.Args = []ir.Node{ir.TypeNode(n.Type()), ir.NewInt(int64(len(n.List)))} litas(m, a, init) entries := n.List @@ -831,9 +830,9 @@ func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) { kidx.SetBounded(true) lhs := ir.NewIndexExpr(base.Pos, m, kidx) - zero := ir.NewAssignStmt(base.Pos, i, nodintconst(0)) - cond := ir.NewBinaryExpr(base.Pos, ir.OLT, i, nodintconst(tk.NumElem())) - incr := ir.NewAssignStmt(base.Pos, i, ir.NewBinaryExpr(base.Pos, ir.OADD, i, nodintconst(1))) + zero := ir.NewAssignStmt(base.Pos, i, ir.NewInt(0)) + cond := ir.NewBinaryExpr(base.Pos, ir.OLT, i, ir.NewInt(tk.NumElem())) + incr := ir.NewAssignStmt(base.Pos, i, ir.NewBinaryExpr(base.Pos, ir.OADD, i, ir.NewInt(1))) body := ir.NewAssignStmt(base.Pos, lhs, rhs) loop := ir.NewForStmt(base.Pos, nil, cond, incr, nil) @@ -855,13 +854,13 @@ func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) { r := r.(*ir.KeyExpr) index, elem := r.Key, r.Value - setlineno(index) + ir.SetPos(index) appendWalkStmt(init, ir.NewAssignStmt(base.Pos, tmpkey, index)) - setlineno(elem) + ir.SetPos(elem) appendWalkStmt(init, ir.NewAssignStmt(base.Pos, tmpelem, elem)) - setlineno(tmpelem) + ir.SetPos(tmpelem) appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewIndexExpr(base.Pos, m, tmpkey), tmpelem)) } @@ -992,7 +991,7 @@ func oaslit(n *ir.AssignStmt, init *ir.Nodes) bool { } func getlit(lit ir.Node) int { - if smallintconst(lit) { + if ir.IsSmallIntConst(lit) { return int(ir.Int64Val(lit)) } return -1 @@ -1098,7 +1097,7 @@ func (s *InitSchedule) initplan(n ir.Node) { func (s *InitSchedule) addvalue(p *InitPlan, xoffset int64, n ir.Node) { // special case: zero can be dropped entirely - if isZero(n) { + if ir.IsZero(n) { return } @@ -1118,47 +1117,6 @@ func (s *InitSchedule) addvalue(p *InitPlan, xoffset int64, n ir.Node) { p.E = append(p.E, InitEntry{Xoffset: xoffset, Expr: n}) } -func isZero(n ir.Node) bool { - switch n.Op() { - case ir.ONIL: - return true - - case ir.OLITERAL: - switch u := n.Val(); u.Kind() { - case constant.String: - return constant.StringVal(u) == "" - case constant.Bool: - return !constant.BoolVal(u) - default: - return constant.Sign(u) == 0 - } - - case ir.OARRAYLIT: - n := n.(*ir.CompLitExpr) - for _, n1 := range n.List { - if n1.Op() == ir.OKEY { - n1 = n1.(*ir.KeyExpr).Value - } - if !isZero(n1) { - return false - } - } - return true - - case ir.OSTRUCTLIT: - n := n.(*ir.CompLitExpr) - for _, n1 := range n.List { - n1 := n1.(*ir.StructKeyExpr) - if !isZero(n1.Value) { - return false - } - } - return true - } - - return false -} - func isvaluelit(n ir.Node) bool { return n.Op() == ir.OARRAYLIT || n.Op() == ir.OSTRUCTLIT } diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 22cc868f36383..f879d8b86d621 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -1159,7 +1159,7 @@ func (s *state) stmt(n ir.Node) { // Expression statements case ir.OCALLFUNC: n := n.(*ir.CallExpr) - if IsIntrinsicCall(n) { + if ir.IsIntrinsicCall(n) { s.intrinsicCall(n) return } @@ -1186,7 +1186,7 @@ func (s *state) stmt(n ir.Node) { var defertype string if s.hasOpenDefers { defertype = "open-coded" - } else if n.Esc() == EscNever { + } else if n.Esc() == ir.EscNever { defertype = "stack-allocated" } else { defertype = "heap-allocated" @@ -1197,7 +1197,7 @@ func (s *state) stmt(n ir.Node) { s.openDeferRecord(n.Call.(*ir.CallExpr)) } else { d := callDefer - if n.Esc() == EscNever { + if n.Esc() == ir.EscNever { d = callDeferStack } s.callResult(n.Call.(*ir.CallExpr), d) @@ -1232,7 +1232,7 @@ func (s *state) stmt(n ir.Node) { // We come here only when it is an intrinsic call returning two values. n := n.(*ir.AssignListStmt) call := n.Rhs[0].(*ir.CallExpr) - if !IsIntrinsicCall(call) { + if !ir.IsIntrinsicCall(call) { s.Fatalf("non-intrinsic AS2FUNC not expanded %v", call) } v := s.intrinsicCall(call) @@ -1300,7 +1300,7 @@ func (s *state) stmt(n ir.Node) { // All literals with nonzero fields have already been // rewritten during walk. Any that remain are just T{} // or equivalents. Use the zero value. - if !isZero(rhs) { + if !ir.IsZero(rhs) { s.Fatalf("literal with nonzero value in SSA: %v", rhs) } rhs = nil @@ -1309,7 +1309,7 @@ func (s *state) stmt(n ir.Node) { // Check whether we're writing the result of an append back to the same slice. // If so, we handle it specially to avoid write barriers on the fast // (non-growth) path. - if !samesafeexpr(n.X, rhs.Args[0]) || base.Flag.N != 0 { + if !ir.SameSafeExpr(n.X, rhs.Args[0]) || base.Flag.N != 0 { break } // If the slice can be SSA'd, it'll be on the stack, @@ -1362,7 +1362,7 @@ func (s *state) stmt(n ir.Node) { } var skip skipMask - if rhs != nil && (rhs.Op() == ir.OSLICE || rhs.Op() == ir.OSLICE3 || rhs.Op() == ir.OSLICESTR) && samesafeexpr(rhs.(*ir.SliceExpr).X, n.X) { + if rhs != nil && (rhs.Op() == ir.OSLICE || rhs.Op() == ir.OSLICE3 || rhs.Op() == ir.OSLICESTR) && ir.SameSafeExpr(rhs.(*ir.SliceExpr).X, n.X) { // We're assigning a slicing operation back to its source. // Don't write back fields we aren't changing. See issue #14855. rhs := rhs.(*ir.SliceExpr) @@ -2085,7 +2085,7 @@ func (s *state) ssaShiftOp(op ir.Op, t *types.Type, u *types.Type) ssa.Op { // expr converts the expression n to ssa, adds it to s and returns the ssa result. func (s *state) expr(n ir.Node) *ssa.Value { - if hasUniquePos(n) { + if ir.HasUniquePos(n) { // ONAMEs and named OLITERALs have the line number // of the decl, not the use. See issue 14742. s.pushLine(n.Pos()) @@ -2726,7 +2726,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { // All literals with nonzero fields have already been // rewritten during walk. Any that remain are just T{} // or equivalents. Use the zero value. - if !isZero(n.X) { + if !ir.IsZero(n.X) { s.Fatalf("literal with nonzero value in SSA: %v", n.X) } return s.zeroVal(n.Type()) @@ -2735,7 +2735,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { // SSA, then load just the selected field. This // prevents false memory dependencies in race/msan // instrumentation. - if islvalue(n) && !s.canSSA(n) { + if ir.IsAssignable(n) && !s.canSSA(n) { p := s.addr(n) return s.load(n.Type(), p) } @@ -2880,7 +2880,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { case ir.OCALLFUNC: n := n.(*ir.CallExpr) - if IsIntrinsicCall(n) { + if ir.IsIntrinsicCall(n) { return s.intrinsicCall(n) } fallthrough @@ -2901,7 +2901,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { // rewritten during walk. Any that remain are just T{} // or equivalents. Use the zero value. n := n.(*ir.CompLitExpr) - if !isZero(n) { + if !ir.IsZero(n) { s.Fatalf("literal with nonzero value in SSA: %v", n) } return s.zeroVal(n.Type()) @@ -3236,7 +3236,7 @@ func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask // Left is not ssa-able. Compute its address. addr := s.addr(left) - if isReflectHeaderDataField(left) { + if ir.IsReflectHeaderDataField(left) { // Package unsafe's documentation says storing pointers into // reflect.SliceHeader and reflect.StringHeader's Data fields // is valid, even though they have type uintptr (#19168). @@ -5021,7 +5021,7 @@ func (s *state) addr(n ir.Node) *ssa.Value { if v != nil { return v } - if n == nodfp { + if n == ir.RegFP { // Special arg that points to the frame pointer (Used by ORECOVER). return s.entryNewValue2A(ssa.OpLocalAddr, t, n, s.sp, s.startmem) } @@ -5141,7 +5141,7 @@ func (s *state) canSSAName(name *ir.Name) bool { if name.Addrtaken() { return false } - if isParamHeapCopy(name) { + if ir.IsParamHeapCopy(name) { return false } if name.Class_ == ir.PAUTOHEAP { @@ -7271,7 +7271,7 @@ func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t ir.AsNode(s.Def).Name().SetUsed(true) n.SetType(t) n.Class_ = ir.PAUTO - n.SetEsc(EscNever) + n.SetEsc(ir.EscNever) n.Curfn = e.curfn e.curfn.Dcl = append(e.curfn.Dcl, n) dowidth(t) diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index a845abeb3a79d..bcf17e42d6ad9 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -10,7 +10,6 @@ import ( "cmd/compile/internal/types" "cmd/internal/src" "fmt" - "go/constant" "sort" "strconv" "strings" @@ -32,40 +31,6 @@ var ( largeStackFrames []largeStack ) -// hasUniquePos reports whether n has a unique position that can be -// used for reporting error messages. -// -// It's primarily used to distinguish references to named objects, -// whose Pos will point back to their declaration position rather than -// their usage position. -func hasUniquePos(n ir.Node) bool { - switch n.Op() { - case ir.ONAME, ir.OPACK: - return false - case ir.OLITERAL, ir.ONIL, ir.OTYPE: - if n.Sym() != nil { - return false - } - } - - if !n.Pos().IsKnown() { - if base.Flag.K != 0 { - base.Warn("setlineno: unknown position (line 0)") - } - return false - } - - return true -} - -func setlineno(n ir.Node) src.XPos { - lno := base.Pos - if n != nil && hasUniquePos(n) { - base.Pos = n.Pos() - } - return lno -} - func lookup(name string) *types.Sym { return types.LocalPkg.Lookup(name) } @@ -89,8 +54,8 @@ func autolabel(prefix string) *types.Sym { if prefix[0] != '.' { base.Fatalf("autolabel prefix must start with '.', have %q", prefix) } - fn := Curfn - if Curfn == nil { + fn := ir.CurFunc + if ir.CurFunc == nil { base.Fatalf("autolabel outside function") } n := fn.Label @@ -164,28 +129,16 @@ func nodAddrAt(pos src.XPos, n ir.Node) *ir.AddrExpr { // newname returns a new ONAME Node associated with symbol s. func NewName(s *types.Sym) *ir.Name { n := ir.NewNameAt(base.Pos, s) - n.Curfn = Curfn + n.Curfn = ir.CurFunc return n } -func nodintconst(v int64) ir.Node { - return ir.NewLiteral(constant.MakeInt64(v)) -} - func nodnil() ir.Node { n := ir.NewNilExpr(base.Pos) n.SetType(types.Types[types.TNIL]) return n } -func nodbool(b bool) ir.Node { - return ir.NewLiteral(constant.MakeBool(b)) -} - -func nodstr(s string) ir.Node { - return ir.NewLiteral(constant.MakeString(s)) -} - func isptrto(t *types.Type, et types.Kind) bool { if t == nil { return false @@ -778,7 +731,7 @@ func safeexpr(n ir.Node, init *ir.Nodes) ir.Node { } // make a copy; must not be used as an lvalue - if islvalue(n) { + if ir.IsAssignable(n) { base.Fatalf("missing lvalue case in safeexpr: %v", n) } return cheapexpr(n, init) @@ -1109,7 +1062,7 @@ func structargs(tl *types.Type, mustname bool) []*ir.Field { s = lookupN(".anon", gen) gen++ } - a := symfield(s, t.Type) + a := ir.NewField(base.Pos, s, nil, t.Type) a.Pos = t.Pos a.IsDDD = t.IsDDD() args = append(args, a) @@ -1160,7 +1113,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { dclcontext = ir.PEXTERN tfn := ir.NewFuncType(base.Pos, - namedfield(".this", rcvr), + ir.NewField(base.Pos, lookup(".this"), nil, rcvr), structargs(method.Type.Params(), true), structargs(method.Type.Results(), false)) @@ -1198,11 +1151,11 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { } as := ir.NewAssignStmt(base.Pos, nthis, convnop(left, rcvr)) fn.Body.Append(as) - fn.Body.Append(ir.NewBranchStmt(base.Pos, ir.ORETJMP, methodSym(methodrcvr, method.Sym))) + fn.Body.Append(ir.NewBranchStmt(base.Pos, ir.ORETJMP, ir.MethodSym(methodrcvr, method.Sym))) } else { fn.SetWrapper(true) // ignore frame for panic+recover matching call := ir.NewCallExpr(base.Pos, ir.OCALL, dot, nil) - call.Args.Set(paramNnames(tfn.Type())) + call.Args.Set(ir.ParamNames(tfn.Type())) call.IsDDD = tfn.Type().IsVariadic() if method.Type.NumResults() > 0 { ret := ir.NewReturnStmt(base.Pos, nil) @@ -1223,7 +1176,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { } typecheckFunc(fn) - Curfn = fn + ir.CurFunc = fn typecheckslice(fn.Body, ctxStmt) // Inline calls within (*T).M wrappers. This is safe because we only @@ -1234,29 +1187,21 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { } escapeFuncs([]*ir.Func{fn}, false) - Curfn = nil + ir.CurFunc = nil Target.Decls = append(Target.Decls, fn) } -func paramNnames(ft *types.Type) []ir.Node { - args := make([]ir.Node, ft.NumParams()) - for i, f := range ft.Params().FieldSlice() { - args[i] = ir.AsNode(f.Nname) - } - return args -} - func hashmem(t *types.Type) ir.Node { sym := ir.Pkgs.Runtime.Lookup("memhash") n := NewName(sym) - setNodeNameFunc(n) + ir.MarkFunc(n) n.SetType(functype(nil, []*ir.Field{ - anonfield(types.NewPtr(t)), - anonfield(types.Types[types.TUINTPTR]), - anonfield(types.Types[types.TUINTPTR]), + ir.NewField(base.Pos, nil, nil, types.NewPtr(t)), + ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]), + ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]), }, []*ir.Field{ - anonfield(types.Types[types.TUINTPTR]), + ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]), })) return n } @@ -1367,15 +1312,6 @@ func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool return true } -func liststmt(l []ir.Node) ir.Node { - n := ir.NewBlockStmt(base.Pos, nil) - n.List.Set(l) - if len(l) != 0 { - n.SetPos(l[0].Pos()) - } - return n -} - func ngotype(n ir.Node) *types.Sym { if n.Type() != nil { return typenamesym(n.Type()) @@ -1383,25 +1319,6 @@ func ngotype(n ir.Node) *types.Sym { return nil } -// The result of initExpr MUST be assigned back to n, e.g. -// n.Left = initExpr(init, n.Left) -func initExpr(init []ir.Node, n ir.Node) ir.Node { - if len(init) == 0 { - return n - } - if ir.MayBeShared(n) { - // Introduce OCONVNOP to hold init list. - old := n - n = ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, old) - n.SetType(old.Type()) - n.SetTypecheck(1) - } - - n.PtrInit().Prepend(init...) - n.SetHasCall(true) - return n -} - // The linker uses the magic symbol prefixes "go." and "type." // Avoid potential confusion between import paths and symbols // by rejecting these reserved imports for now. Also, people diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go index 513b890355ec3..5bbc91fcc13ca 100644 --- a/src/cmd/compile/internal/gc/swt.go +++ b/src/cmd/compile/internal/gc/swt.go @@ -190,7 +190,7 @@ func typecheckExprSwitch(n *ir.SwitchStmt) { } for i := range ls { - setlineno(ncase) + ir.SetPos(ncase) ls[i] = typecheck(ls[i], ctxExpr) ls[i] = defaultlit(ls[i], t) n1 := ls[i] @@ -246,14 +246,14 @@ func walkswitch(sw *ir.SwitchStmt) { // walkExprSwitch generates an AST implementing sw. sw is an // expression switch. func walkExprSwitch(sw *ir.SwitchStmt) { - lno := setlineno(sw) + lno := ir.SetPos(sw) cond := sw.Tag sw.Tag = nil // convert switch {...} to switch true {...} if cond == nil { - cond = nodbool(true) + cond = ir.NewBool(true) cond = typecheck(cond, ctxExpr) cond = defaultlit(cond, nil) } @@ -398,11 +398,11 @@ func (s *exprSwitch) flush() { // Perform two-level binary search. binarySearch(len(runs), &s.done, func(i int) ir.Node { - return ir.NewBinaryExpr(base.Pos, ir.OLE, ir.NewUnaryExpr(base.Pos, ir.OLEN, s.exprname), nodintconst(runLen(runs[i-1]))) + return ir.NewBinaryExpr(base.Pos, ir.OLE, ir.NewUnaryExpr(base.Pos, ir.OLEN, s.exprname), ir.NewInt(runLen(runs[i-1]))) }, func(i int, nif *ir.IfStmt) { run := runs[i] - nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, ir.NewUnaryExpr(base.Pos, ir.OLEN, s.exprname), nodintconst(runLen(run))) + nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, ir.NewUnaryExpr(base.Pos, ir.OLEN, s.exprname), ir.NewInt(runLen(run))) s.search(run, &nif.Body) }, ) @@ -708,13 +708,13 @@ func (s *typeSwitch) flush() { binarySearch(len(cc), &s.done, func(i int) ir.Node { - return ir.NewBinaryExpr(base.Pos, ir.OLE, s.hashname, nodintconst(int64(cc[i-1].hash))) + return ir.NewBinaryExpr(base.Pos, ir.OLE, s.hashname, ir.NewInt(int64(cc[i-1].hash))) }, func(i int, nif *ir.IfStmt) { // TODO(mdempsky): Omit hash equality check if // there's only one type. c := cc[i] - nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, s.hashname, nodintconst(int64(c.hash))) + nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, s.hashname, ir.NewInt(int64(c.hash))) nif.Body.Append(c.body.Take()...) }, ) diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 5e13facc4f9fa..0beb5712d4821 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -98,13 +98,13 @@ func TypecheckPackage() { if n.Op() == ir.ODCLFUNC { n := n.(*ir.Func) if n.OClosure != nil { - Curfn = n + ir.CurFunc = n capturevars(n) } } } capturevarscomplete = true - Curfn = nil + ir.CurFunc = nil if base.Debug.TypecheckInl != 0 { // Typecheck imported function bodies if Debug.l > 1, @@ -139,7 +139,7 @@ func TypecheckCallee(n ir.Node) ir.Node { } func TypecheckFuncBody(n *ir.Func) { - Curfn = n + ir.CurFunc = n decldepth = 1 errorsBefore := base.Errors() typecheckslice(n.Body, ctxStmt) @@ -259,7 +259,7 @@ func resolve(n ir.Node) (res ir.Node) { if r.Op() == ir.OIOTA { if x := getIotaValue(); x >= 0 { - return nodintconst(x) + return ir.NewInt(x) } return n } @@ -380,7 +380,7 @@ func typecheck(n ir.Node, top int) (res ir.Node) { defer tracePrint("typecheck", n)(&res) } - lno := setlineno(n) + lno := ir.SetPos(n) // Skip over parens. for n.Op() == ir.OPAREN { @@ -682,7 +682,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } v := size.Val() - if doesoverflow(v, types.Types[types.TINT]) { + if ir.ConstOverflow(v, types.Types[types.TINT]) { base.Errorf("array bound is too large") return n } @@ -1076,7 +1076,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { default: checklvalue(n.X, "take the address of") - r := outervalue(n.X) + r := ir.OuterValue(n.X) if r.Op() == ir.ONAME { r := r.(*ir.Name) if ir.Orig(r) != r { @@ -1270,7 +1270,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { base.Errorf("invalid array index %v (out of bounds for %d-element array)", n.Index, t.NumElem()) } else if ir.IsConst(n.X, constant.String) && constant.Compare(x, token.GEQ, constant.MakeInt64(int64(len(ir.StringVal(n.X))))) { base.Errorf("invalid string index %v (out of bounds for %d-byte string)", n.Index, len(ir.StringVal(n.X))) - } else if doesoverflow(x, types.Types[types.TINT]) { + } else if ir.ConstOverflow(x, types.Types[types.TINT]) { base.Errorf("invalid %s index %v (index too large)", why, n.Index) } } @@ -1412,7 +1412,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } if ir.IsConst(n.Len, constant.Int) { - if doesoverflow(n.Len.Val(), types.Types[types.TINT]) { + if ir.ConstOverflow(n.Len.Val(), types.Types[types.TINT]) { base.Fatalf("len for OMAKESLICECOPY too large") } if constant.Sign(n.Len.Val()) < 0 { @@ -1440,7 +1440,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } if l.Type().IsArray() { - if !islvalue(n.X) { + if !ir.IsAssignable(n.X) { base.Errorf("invalid operation %v (slice of unaddressable value)", n) n.SetType(nil) return n @@ -1538,7 +1538,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } u := ir.NewUnaryExpr(n.Pos(), l.BuiltinOp, arg) - return typecheck(initExpr(n.Init(), u), top) // typecheckargs can add to old.Init + return typecheck(ir.InitExpr(n.Init(), u), top) // typecheckargs can add to old.Init case ir.OCOMPLEX, ir.OCOPY: typecheckargs(n) @@ -1548,7 +1548,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } b := ir.NewBinaryExpr(n.Pos(), l.BuiltinOp, arg1, arg2) - return typecheck(initExpr(n.Init(), b), top) // typecheckargs can add to old.Init + return typecheck(ir.InitExpr(n.Init(), b), top) // typecheckargs can add to old.Init } panic("unreachable") } @@ -2023,7 +2023,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } } else { - l = nodintconst(0) + l = ir.NewInt(0) } nn = ir.NewMakeExpr(n.Pos(), ir.OMAKEMAP, l, nil) nn.SetEsc(n.Esc()) @@ -2044,7 +2044,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } } else { - l = nodintconst(0) + l = ir.NewInt(0) } nn = ir.NewMakeExpr(n.Pos(), ir.OMAKECHAN, l, nil) } @@ -2257,16 +2257,16 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.ORETURN: n := n.(*ir.ReturnStmt) typecheckargs(n) - if Curfn == nil { + if ir.CurFunc == nil { base.Errorf("return outside function") n.SetType(nil) return n } - if hasNamedResults(Curfn) && len(n.Results) == 0 { + if ir.HasNamedResults(ir.CurFunc) && len(n.Results) == 0 { return n } - typecheckaste(ir.ORETURN, nil, false, Curfn.Type().Results(), n.Results, func() string { return "return argument" }) + typecheckaste(ir.ORETURN, nil, false, ir.CurFunc.Type().Results(), n.Results, func() string { return "return argument" }) return n case ir.ORETJMP: @@ -2352,9 +2352,9 @@ func typecheckargs(n ir.Node) { // init.go hasn't yet created it. Instead, associate the // temporary variables with initTodo for now, and init.go // will reassociate them later when it's appropriate. - static := Curfn == nil + static := ir.CurFunc == nil if static { - Curfn = initTodo + ir.CurFunc = initTodo } list = nil for _, f := range t.FieldSlice() { @@ -2364,7 +2364,7 @@ func typecheckargs(n ir.Node) { list = append(list, t) } if static { - Curfn = nil + ir.CurFunc = nil } switch n := n.(type) { @@ -2398,7 +2398,7 @@ func checksliceindex(l ir.Node, r ir.Node, tp *types.Type) bool { } else if ir.IsConst(l, constant.String) && constant.Compare(x, token.GTR, constant.MakeInt64(int64(len(ir.StringVal(l))))) { base.Errorf("invalid slice index %v (out of bounds for %d-byte string)", r, len(ir.StringVal(l))) return false - } else if doesoverflow(x, types.Types[types.TINT]) { + } else if ir.ConstOverflow(x, types.Types[types.TINT]) { base.Errorf("invalid slice index %v (index too large)", r) return false } @@ -2603,7 +2603,7 @@ func typecheckMethodExpr(n *ir.SelectorExpr) (res ir.Node) { me := ir.NewMethodExpr(n.Pos(), n.X.Type(), m) me.SetType(methodfunc(m.Type, n.X.Type())) - f := NewName(methodSym(t, m.Sym)) + f := NewName(ir.MethodSym(t, m.Sym)) f.Class_ = ir.PFUNC f.SetType(me.Type()) me.FuncName_ = f @@ -2717,7 +2717,7 @@ func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field { return nil } - n.Sel = methodSym(n.X.Type(), f2.Sym) + n.Sel = ir.MethodSym(n.X.Type(), f2.Sym) n.Offset = f2.Offset n.SetType(f2.Type) n.SetOp(ir.ODOTMETH) @@ -2801,7 +2801,7 @@ func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl i goto toomany } n = nl[i] - setlineno(n) + ir.SetPos(n) if n.Type() != nil { nl[i] = assignconvfn(n, t, desc) } @@ -2811,7 +2811,7 @@ func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl i // TODO(mdempsky): Make into ... call with implicit slice. for ; i < len(nl); i++ { n = nl[i] - setlineno(n) + ir.SetPos(n) if n.Type() != nil { nl[i] = assignconvfn(n, t.Elem(), desc) } @@ -2823,7 +2823,7 @@ func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl i goto notenough } n = nl[i] - setlineno(n) + ir.SetPos(n) if n.Type() != nil { nl[i] = assignconvfn(n, t, desc) } @@ -2998,7 +2998,7 @@ func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) { // Save original node (including n.Right) n.SetOrig(ir.Copy(n)) - setlineno(n.Ntype) + ir.SetPos(n.Ntype) // Need to handle [...]T arrays specially. if array, ok := n.Ntype.(*ir.ArrayType); ok && array.Elem != nil && array.Len == nil { @@ -3042,7 +3042,7 @@ func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) { case types.TMAP: var cs constSet for i3, l := range n.List { - setlineno(l) + ir.SetPos(l) if l.Op() != ir.OKEY { n.List[i3] = typecheck(l, ctxExpr) base.Errorf("missing key in map literal") @@ -3074,7 +3074,7 @@ func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) { // simple list of variables ls := n.List for i, n1 := range ls { - setlineno(n1) + ir.SetPos(n1) n1 = typecheck(n1, ctxExpr) ls[i] = n1 if i >= t.NumFields() { @@ -3105,7 +3105,7 @@ func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) { // keyed list ls := n.List for i, l := range ls { - setlineno(l) + ir.SetPos(l) if l.Op() == ir.OKEY { kv := l.(*ir.KeyExpr) @@ -3199,7 +3199,7 @@ func typecheckarraylit(elemType *types.Type, bound int64, elts []ir.Node, ctx st var key, length int64 for i, elt := range elts { - setlineno(elt) + ir.SetPos(elt) r := elts[i] var kv *ir.KeyExpr if elt.Op() == ir.OKEY { @@ -3264,41 +3264,8 @@ func nonexported(sym *types.Sym) bool { return sym != nil && !types.IsExported(sym.Name) } -// lvalue etc -func islvalue(n ir.Node) bool { - switch n.Op() { - case ir.OINDEX: - n := n.(*ir.IndexExpr) - if n.X.Type() != nil && n.X.Type().IsArray() { - return islvalue(n.X) - } - if n.X.Type() != nil && n.X.Type().IsString() { - return false - } - fallthrough - case ir.ODEREF, ir.ODOTPTR, ir.OCLOSUREREAD: - return true - - case ir.ODOT: - n := n.(*ir.SelectorExpr) - return islvalue(n.X) - - case ir.ONAME: - n := n.(*ir.Name) - if n.Class_ == ir.PFUNC { - return false - } - return true - - case ir.ONAMEOFFSET: - return true - } - - return false -} - func checklvalue(n ir.Node, verb string) { - if !islvalue(n) { + if !ir.IsAssignable(n) { base.Errorf("cannot %s %v", verb, n) } } @@ -3306,7 +3273,7 @@ func checklvalue(n ir.Node, verb string) { func checkassign(stmt ir.Node, n ir.Node) { // Variables declared in ORANGE are assigned on every iteration. if !ir.DeclaredBy(n, stmt) || stmt.Op() == ir.ORANGE { - r := outervalue(n) + r := ir.OuterValue(n) if r.Op() == ir.ONAME { r := r.(*ir.Name) r.Name().SetAssigned(true) @@ -3316,7 +3283,7 @@ func checkassign(stmt ir.Node, n ir.Node) { } } - if islvalue(n) { + if ir.IsAssignable(n) { return } if n.Op() == ir.OINDEXMAP { @@ -3335,7 +3302,7 @@ func checkassign(stmt ir.Node, n ir.Node) { base.Errorf("cannot assign to struct field %v in map", n) case (n.Op() == ir.OINDEX && n.(*ir.IndexExpr).X.Type().IsString()) || n.Op() == ir.OSLICESTR: base.Errorf("cannot assign to %v (strings are immutable)", n) - case n.Op() == ir.OLITERAL && n.Sym() != nil && isGoConst(n): + case n.Op() == ir.OLITERAL && n.Sym() != nil && ir.IsConstNode(n): base.Errorf("cannot assign to %v (declared const)", n) default: base.Errorf("cannot assign to %v", n) @@ -3349,77 +3316,6 @@ func checkassignlist(stmt ir.Node, l ir.Nodes) { } } -// samesafeexpr checks whether it is safe to reuse one of l and r -// instead of computing both. samesafeexpr assumes that l and r are -// used in the same statement or expression. In order for it to be -// safe to reuse l or r, they must: -// * be the same expression -// * not have side-effects (no function calls, no channel ops); -// however, panics are ok -// * not cause inappropriate aliasing; e.g. two string to []byte -// conversions, must result in two distinct slices -// -// The handling of OINDEXMAP is subtle. OINDEXMAP can occur both -// as an lvalue (map assignment) and an rvalue (map access). This is -// currently OK, since the only place samesafeexpr gets used on an -// lvalue expression is for OSLICE and OAPPEND optimizations, and it -// is correct in those settings. -func samesafeexpr(l ir.Node, r ir.Node) bool { - if l.Op() != r.Op() || !types.Identical(l.Type(), r.Type()) { - return false - } - - switch l.Op() { - case ir.ONAME, ir.OCLOSUREREAD: - return l == r - - case ir.ODOT, ir.ODOTPTR: - l := l.(*ir.SelectorExpr) - r := r.(*ir.SelectorExpr) - return l.Sel != nil && r.Sel != nil && l.Sel == r.Sel && samesafeexpr(l.X, r.X) - - case ir.ODEREF: - l := l.(*ir.StarExpr) - r := r.(*ir.StarExpr) - return samesafeexpr(l.X, r.X) - - case ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG: - l := l.(*ir.UnaryExpr) - r := r.(*ir.UnaryExpr) - return samesafeexpr(l.X, r.X) - - case ir.OCONVNOP: - l := l.(*ir.ConvExpr) - r := r.(*ir.ConvExpr) - return samesafeexpr(l.X, r.X) - - case ir.OCONV: - l := l.(*ir.ConvExpr) - r := r.(*ir.ConvExpr) - // Some conversions can't be reused, such as []byte(str). - // Allow only numeric-ish types. This is a bit conservative. - return types.IsSimple[l.Type().Kind()] && samesafeexpr(l.X, r.X) - - case ir.OINDEX, ir.OINDEXMAP: - l := l.(*ir.IndexExpr) - r := r.(*ir.IndexExpr) - return samesafeexpr(l.X, r.X) && samesafeexpr(l.Index, r.Index) - - case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD: - l := l.(*ir.BinaryExpr) - r := r.(*ir.BinaryExpr) - return samesafeexpr(l.X, r.X) && samesafeexpr(l.Y, r.Y) - - case ir.OLITERAL: - return constant.Compare(l.Val(), token.EQL, r.Val()) - - case ir.ONIL: - return true - } - - return false -} - // type check assignment. // if this assignment is the definition of a var on the left side, // fill in the var's type. @@ -3639,7 +3535,7 @@ func typecheckfunc(n *ir.Func) { return } - n.Nname.SetSym(methodSym(rcvr.Type, n.Shortname)) + n.Nname.SetSym(ir.MethodSym(rcvr.Type, n.Shortname)) declare(n.Nname, ir.PFUNC) } @@ -3658,7 +3554,7 @@ func stringtoruneslit(n *ir.ConvExpr) ir.Node { var l []ir.Node i := 0 for _, r := range ir.StringVal(n.X) { - l = append(l, ir.NewKeyExpr(base.Pos, nodintconst(int64(i)), nodintconst(int64(r)))) + l = append(l, ir.NewKeyExpr(base.Pos, ir.NewInt(int64(i)), ir.NewInt(int64(r)))) i++ } @@ -3716,7 +3612,7 @@ func typecheckdef(n ir.Node) { defer tracePrint("typecheckdef", n)(nil) } - lno := setlineno(n) + lno := ir.SetPos(n) if n.Op() == ir.ONONAME { if !n.Diag() { @@ -3779,7 +3675,7 @@ func typecheckdef(n ir.Node) { if e.Type() == nil { goto ret } - if !isGoConst(e) { + if !ir.IsConstNode(e) { if !e.Diag() { if e.Op() == ir.ONIL { base.ErrorfAt(n.Pos(), "const initializer cannot be nil") @@ -3904,7 +3800,7 @@ func checkmake(t *types.Type, arg string, np *ir.Node) bool { base.Errorf("negative %s argument in make(%v)", arg, t) return false } - if doesoverflow(v, types.Types[types.TINT]) { + if ir.ConstOverflow(v, types.Types[types.TINT]) { base.Errorf("%s argument too large in make(%v)", arg, t) return false } @@ -4236,8 +4132,8 @@ func getIotaValue() int64 { } } - if Curfn != nil && Curfn.Iota >= 0 { - return Curfn.Iota + if ir.CurFunc != nil && ir.CurFunc.Iota >= 0 { + return ir.CurFunc.Iota } return -1 @@ -4245,33 +4141,10 @@ func getIotaValue() int64 { // curpkg returns the current package, based on Curfn. func curpkg() *types.Pkg { - fn := Curfn + fn := ir.CurFunc if fn == nil { // Initialization expressions for package-scope variables. return types.LocalPkg } return fnpkg(fn.Nname) } - -// MethodName returns the ONAME representing the method -// referenced by expression n, which must be a method selector, -// method expression, or method value. -func methodExprName(n ir.Node) *ir.Name { - name, _ := methodExprFunc(n).Nname.(*ir.Name) - return name -} - -// MethodFunc is like MethodName, but returns the types.Field instead. -func methodExprFunc(n ir.Node) *types.Field { - switch n.Op() { - case ir.ODOTMETH: - return n.(*ir.SelectorExpr).Selection - case ir.OMETHEXPR: - return n.(*ir.MethodExpr).Method - case ir.OCALLPART: - n := n.(*ir.CallPartExpr) - return callpartMethod(n) - } - base.Fatalf("unexpected node: %v (%v)", n, n.Op()) - panic("unreachable") -} diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index c9cce4b4884b4..b7472ede0f9e9 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -340,8 +340,8 @@ func finishUniverse() { s1.Block = s.Block } - nodfp = NewName(lookup(".fp")) - nodfp.SetType(types.Types[types.TINT32]) - nodfp.Class_ = ir.PPARAM - nodfp.SetUsed(true) + ir.RegFP = NewName(lookup(".fp")) + ir.RegFP.SetType(types.Types[types.TINT32]) + ir.RegFP.Class_ = ir.PPARAM + ir.RegFP.SetUsed(true) } diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 5d812064b6b3c..dd376a8835a02 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -10,6 +10,7 @@ import ( "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/objabi" + "cmd/internal/src" "cmd/internal/sys" "encoding/binary" "errors" @@ -24,7 +25,7 @@ const tmpstringbufsize = 32 const zeroValSize = 1024 // must match value of runtime/map.go:maxZero func walk(fn *ir.Func) { - Curfn = fn + ir.CurFunc = fn errorsBefore := base.Errors() order(fn) if base.Errors() > errorsBefore { @@ -32,8 +33,8 @@ func walk(fn *ir.Func) { } if base.Flag.W != 0 { - s := fmt.Sprintf("\nbefore walk %v", Curfn.Sym()) - ir.DumpList(s, Curfn.Body) + s := fmt.Sprintf("\nbefore walk %v", ir.CurFunc.Sym()) + ir.DumpList(s, ir.CurFunc.Body) } lno := base.Pos @@ -72,17 +73,17 @@ func walk(fn *ir.Func) { if base.Errors() > errorsBefore { return } - walkstmtlist(Curfn.Body) + walkstmtlist(ir.CurFunc.Body) if base.Flag.W != 0 { - s := fmt.Sprintf("after walk %v", Curfn.Sym()) - ir.DumpList(s, Curfn.Body) + s := fmt.Sprintf("after walk %v", ir.CurFunc.Sym()) + ir.DumpList(s, ir.CurFunc.Body) } zeroResults() heapmoves() - if base.Flag.W != 0 && len(Curfn.Enter) > 0 { - s := fmt.Sprintf("enter %v", Curfn.Sym()) - ir.DumpList(s, Curfn.Enter) + if base.Flag.W != 0 && len(ir.CurFunc.Enter) > 0 { + s := fmt.Sprintf("enter %v", ir.CurFunc.Sym()) + ir.DumpList(s, ir.CurFunc.Enter) } if base.Flag.Cfg.Instrumenting { @@ -100,7 +101,7 @@ func paramoutheap(fn *ir.Func) bool { for _, ln := range fn.Dcl { switch ln.Class_ { case ir.PPARAMOUT: - if isParamStackCopy(ln) || ln.Addrtaken() { + if ir.IsParamStackCopy(ln) || ln.Addrtaken() { return true } @@ -120,7 +121,7 @@ func walkstmt(n ir.Node) ir.Node { return n } - setlineno(n) + ir.SetPos(n) walkstmtlist(n.Init()) @@ -191,7 +192,7 @@ func walkstmt(n ir.Node) ir.Node { n.X = walkexpr(n.X, &init) call := walkexpr(mkcall1(chanfn("chanrecv1", 2, n.X.Type()), nil, &init, n.X, nodnil()), &init) - return initExpr(init, call) + return ir.InitExpr(init, call) case ir.OBREAK, ir.OCONTINUE, @@ -230,18 +231,18 @@ func walkstmt(n ir.Node) ir.Node { case ir.ODEFER: n := n.(*ir.GoDeferStmt) - Curfn.SetHasDefer(true) - Curfn.NumDefers++ - if Curfn.NumDefers > maxOpenDefers { + ir.CurFunc.SetHasDefer(true) + ir.CurFunc.NumDefers++ + if ir.CurFunc.NumDefers > maxOpenDefers { // Don't allow open-coded defers if there are more than // 8 defers in the function, since we use a single // byte to record active defers. - Curfn.SetOpenCodedDeferDisallowed(true) + ir.CurFunc.SetOpenCodedDeferDisallowed(true) } - if n.Esc() != EscNever { + if n.Esc() != ir.EscNever { // If n.Esc is not EscNever, then this defer occurs in a loop, // so open-coded defers cannot be used in this function. - Curfn.SetOpenCodedDeferDisallowed(true) + ir.CurFunc.SetOpenCodedDeferDisallowed(true) } fallthrough case ir.OGO: @@ -288,7 +289,7 @@ func walkstmt(n ir.Node) ir.Node { init := n.Cond.Init() n.Cond.PtrInit().Set(nil) n.Cond = walkexpr(n.Cond, &init) - n.Cond = initExpr(init, n.Cond) + n.Cond = ir.InitExpr(init, n.Cond) } n.Post = walkstmt(n.Post) @@ -307,23 +308,23 @@ func walkstmt(n ir.Node) ir.Node { case ir.ORETURN: n := n.(*ir.ReturnStmt) - Curfn.NumReturns++ + ir.CurFunc.NumReturns++ if len(n.Results) == 0 { return n } - if (hasNamedResults(Curfn) && len(n.Results) > 1) || paramoutheap(Curfn) { + if (ir.HasNamedResults(ir.CurFunc) && len(n.Results) > 1) || paramoutheap(ir.CurFunc) { // assign to the function out parameters, // so that ascompatee can fix up conflicts var rl []ir.Node - for _, ln := range Curfn.Dcl { + for _, ln := range ir.CurFunc.Dcl { cl := ln.Class_ if cl == ir.PAUTO || cl == ir.PAUTOHEAP { break } if cl == ir.PPARAMOUT { var ln ir.Node = ln - if isParamStackCopy(ln) { + if ir.IsParamStackCopy(ln) { ln = walkexpr(typecheck(ir.NewStarExpr(base.Pos, ln.Name().Heapaddr), ctxExpr), nil) } rl = append(rl, ln) @@ -345,12 +346,12 @@ func walkstmt(n ir.Node) ir.Node { walkexprlist(n.Results, n.PtrInit()) // For each return parameter (lhs), assign the corresponding result (rhs). - lhs := Curfn.Type().Results() + lhs := ir.CurFunc.Type().Results() rhs := n.Results res := make([]ir.Node, lhs.NumFields()) for i, nl := range lhs.FieldSlice() { nname := ir.AsNode(nl.Nname) - if isParamHeapCopy(nname) { + if ir.IsParamHeapCopy(nname) { nname = nname.Name().Stackcopy } a := ir.NewAssignStmt(base.Pos, nname, rhs[i]) @@ -485,7 +486,7 @@ func walkexpr(n ir.Node, init *ir.Nodes) ir.Node { init.Append(n.PtrInit().Take()...) } - lno := setlineno(n) + lno := ir.SetPos(n) if base.Flag.LowerW > 1 { ir.Dump("before walk expr", n) @@ -643,7 +644,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { var ll ir.Nodes n.Y = walkexpr(n.Y, &ll) - n.Y = initExpr(ll, n.Y) + n.Y = ir.InitExpr(ll, n.Y) return n case ir.OPRINT, ir.OPRINTN: @@ -655,7 +656,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.ORECOVER: n := n.(*ir.CallExpr) - return mkcall("gorecover", n.Type(), init, nodAddr(nodfp)) + return mkcall("gorecover", n.Type(), init, nodAddr(ir.RegFP)) case ir.OCLOSUREREAD, ir.OCFUNC: return n @@ -710,7 +711,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { if left.Op() == ir.OINDEXMAP && right.Op() == ir.OAPPEND { left := left.(*ir.IndexExpr) mapAppend = right.(*ir.CallExpr) - if !samesafeexpr(left, mapAppend.Args[0]) { + if !ir.SameSafeExpr(left, mapAppend.Args[0]) { base.Fatalf("not same expressions: %v != %v", left, mapAppend.Args[0]) } } @@ -738,7 +739,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return as } - if !base.Flag.Cfg.Instrumenting && isZero(as.Y) { + if !base.Flag.Cfg.Instrumenting && ir.IsZero(as.Y) { return as } @@ -794,7 +795,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { init.Append(n.PtrInit().Take()...) walkexprlistsafe(n.Lhs, init) walkexprlistsafe(n.Rhs, init) - return liststmt(ascompatee(ir.OAS, n.Lhs, n.Rhs, init)) + return ir.NewBlockStmt(src.NoXPos, ascompatee(ir.OAS, n.Lhs, n.Rhs, init)) // a,b,... = fn() case ir.OAS2FUNC: @@ -805,14 +806,14 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { walkexprlistsafe(n.Lhs, init) r = walkexpr(r, init) - if IsIntrinsicCall(r.(*ir.CallExpr)) { + if ir.IsIntrinsicCall(r.(*ir.CallExpr)) { n.Rhs = []ir.Node{r} return n } init.Append(r) ll := ascompatet(n.Lhs, r.Type()) - return liststmt(ll) + return ir.NewBlockStmt(src.NoXPos, ll) // x, y = <-c // order.stmt made sure x is addressable or blank. @@ -926,8 +927,8 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { fromType := n.X.Type() toType := n.Type() - if !fromType.IsInterface() && !ir.IsBlank(Curfn.Nname) { // skip unnamed functions (func _()) - markTypeUsedInInterface(fromType, Curfn.LSym) + if !fromType.IsInterface() && !ir.IsBlank(ir.CurFunc.Nname) { // skip unnamed functions (func _()) + markTypeUsedInInterface(fromType, ir.CurFunc.LSym) } // typeword generates the type word of the interface value. @@ -971,9 +972,9 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // and staticuint64s[n.Left * 8 + 7] on big-endian. n.X = cheapexpr(n.X, init) // byteindex widens n.Left so that the multiplication doesn't overflow. - index := ir.NewBinaryExpr(base.Pos, ir.OLSH, byteindex(n.X), nodintconst(3)) + index := ir.NewBinaryExpr(base.Pos, ir.OLSH, byteindex(n.X), ir.NewInt(3)) if thearch.LinkArch.ByteOrder == binary.BigEndian { - index = ir.NewBinaryExpr(base.Pos, ir.OADD, index, nodintconst(7)) + index = ir.NewBinaryExpr(base.Pos, ir.OADD, index, ir.NewInt(7)) } xe := ir.NewIndexExpr(base.Pos, ir.Names.Staticuint64s, index) xe.SetBounded(true) @@ -981,7 +982,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case n.X.Op() == ir.ONAME && n.X.(*ir.Name).Class_ == ir.PEXTERN && n.X.(*ir.Name).Readonly(): // n.Left is a readonly global; use it directly. value = n.X - case !fromType.IsInterface() && n.Esc() == EscNone && fromType.Width <= 1024: + case !fromType.IsInterface() && n.Esc() == ir.EscNone && fromType.Width <= 1024: // n.Left does not escape. Use a stack temporary initialized to n.Left. value = temp(fromType) init.Append(typecheck(ir.NewAssignStmt(base.Pos, value, n.X), ctxStmt)) @@ -1058,7 +1059,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // with a non-interface, especially in a switch on interface value // with non-interface cases, is not visible to order.stmt, so we // have to fall back on allocating a temp here. - if !islvalue(v) { + if !ir.IsAssignable(v) { v = copyexpr(v, v.Type(), init) } v = nodAddr(v) @@ -1078,7 +1079,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { if n.Op() == ir.OCONVNOP && n.Type() == n.X.Type() { return n.X } - if n.Op() == ir.OCONVNOP && checkPtr(Curfn, 1) { + if n.Op() == ir.OCONVNOP && ir.ShouldCheckPtr(ir.CurFunc, 1) { if n.Type().IsPtr() && n.X.Type().IsUnsafePtr() { // unsafe.Pointer to *T return walkCheckPtrAlignment(n, init, nil) } @@ -1177,7 +1178,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Index, constant.Int) { base.Warn("index bounds check elided") } - if smallintconst(n.Index) && !n.Bounded() { + if ir.IsSmallIntConst(n.Index) && !n.Bounded() { base.Errorf("index out of bounds") } } else if ir.IsConst(n.X, constant.String) { @@ -1185,13 +1186,13 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Index, constant.Int) { base.Warn("index bounds check elided") } - if smallintconst(n.Index) && !n.Bounded() { + if ir.IsSmallIntConst(n.Index) && !n.Bounded() { base.Errorf("index out of bounds") } } if ir.IsConst(n.Index, constant.Int) { - if v := n.Index.Val(); constant.Sign(v) < 0 || doesoverflow(v, types.Types[types.TINT]) { + if v := n.Index.Val(); constant.Sign(v) < 0 || ir.ConstOverflow(v, types.Types[types.TINT]) { base.Errorf("index out of bounds") } } @@ -1252,7 +1253,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR: n := n.(*ir.SliceExpr) - checkSlice := checkPtr(Curfn, 1) && n.Op() == ir.OSLICE3ARR && n.X.Op() == ir.OCONVNOP && n.X.(*ir.ConvExpr).X.Type().IsUnsafePtr() + checkSlice := ir.ShouldCheckPtr(ir.CurFunc, 1) && n.Op() == ir.OSLICE3ARR && n.X.Op() == ir.OCONVNOP && n.X.(*ir.ConvExpr).X.Type().IsUnsafePtr() if checkSlice { conv := n.X.(*ir.ConvExpr) conv.X = walkexpr(conv.X, init) @@ -1262,7 +1263,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { low, high, max := n.SliceBounds() low = walkexpr(low, init) - if low != nil && isZero(low) { + if low != nil && ir.IsZero(low) { // Reduce x[0:j] to x[:j] and x[0:j:k] to x[:j:k]. low = nil } @@ -1274,7 +1275,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { } if n.Op().IsSlice3() { - if max != nil && max.Op() == ir.OCAP && samesafeexpr(n.X, max.(*ir.UnaryExpr).X) { + if max != nil && max.Op() == ir.OCAP && ir.SameSafeExpr(n.X, max.(*ir.UnaryExpr).X) { // Reduce x[i:j:cap(x)] to x[i:j]. if n.Op() == ir.OSLICE3 { n.SetOp(ir.OSLICE) @@ -1292,8 +1293,8 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { if n.Type().Elem().NotInHeap() { base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type().Elem()) } - if n.Esc() == EscNone { - if n.Type().Elem().Width >= maxImplicitStackVarSize { + if n.Esc() == ir.EscNone { + if n.Type().Elem().Width >= ir.MaxImplicitStackVarSize { base.Fatalf("large ONEW with EscNone: %v", n) } r := temp(n.Type().Elem()) @@ -1346,7 +1347,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // var h *hmap var h ir.Node - if n.Esc() == EscNone { + if n.Esc() == ir.EscNone { // Allocate hmap on stack. // var hv hmap @@ -1372,7 +1373,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // h.buckets = b // } - nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, nodintconst(BUCKETSIZE)), nil, nil) + nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(BUCKETSIZE)), nil, nil) nif.Likely = true // var bv bmap @@ -1398,7 +1399,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // For hint <= BUCKETSIZE overLoadFactor(hint, 0) is false // and no buckets will be allocated by makemap. Therefore, // no buckets need to be allocated in this code path. - if n.Esc() == EscNone { + if n.Esc() == ir.EscNone { // Only need to initialize h.hash0 since // hmap h has been allocated on the stack already. // h.hash0 = fastrand() @@ -1414,7 +1415,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return mkcall1(fn, n.Type(), init) } - if n.Esc() != EscNone { + if n.Esc() != ir.EscNone { h = nodnil() } // Map initialization with a variable or large hint is @@ -1452,7 +1453,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { if t.Elem().NotInHeap() { base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem()) } - if n.Esc() == EscNone { + if n.Esc() == ir.EscNone { if why := heapAllocReason(n); why != "" { base.Fatalf("%v has EscNone, but %v", n, why) } @@ -1470,8 +1471,8 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // if len < 0 { panicmakeslicelen() } // panicmakeslicecap() // } - nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGT, conv(l, types.Types[types.TUINT64]), nodintconst(i)), nil, nil) - niflen := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLT, l, nodintconst(0)), nil, nil) + nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGT, conv(l, types.Types[types.TUINT64]), ir.NewInt(i)), nil, nil) + niflen := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLT, l, ir.NewInt(0)), nil, nil) niflen.Body = []ir.Node{mkcall("panicmakeslicelen", nil, init)} nif.Body.Append(niflen, mkcall("panicmakeslicecap", nil, init)) init.Append(typecheck(nif, ctxStmt)) @@ -1514,7 +1515,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.OMAKESLICECOPY: n := n.(*ir.MakeExpr) - if n.Esc() == EscNone { + if n.Esc() == ir.EscNone { base.Fatalf("OMAKESLICECOPY with EscNone: %v", n) } @@ -1534,12 +1535,12 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // We do not check for overflow of len(to)*elem.Width here // since len(from) is an existing checked slice capacity // with same elem.Width for the from slice. - size := ir.NewBinaryExpr(base.Pos, ir.OMUL, conv(length, types.Types[types.TUINTPTR]), conv(nodintconst(t.Elem().Width), types.Types[types.TUINTPTR])) + size := ir.NewBinaryExpr(base.Pos, ir.OMUL, conv(length, types.Types[types.TUINTPTR]), conv(ir.NewInt(t.Elem().Width), types.Types[types.TUINTPTR])) // instantiate mallocgc(size uintptr, typ *byte, needszero bool) unsafe.Pointer fn := syslook("mallocgc") sh := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil) - sh.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, nodnil(), nodbool(false)) + sh.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, nodnil(), ir.NewBool(false)) sh.Ptr.MarkNonNil() sh.LenCap = []ir.Node{length, length} sh.SetType(t) @@ -1570,7 +1571,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.ORUNESTR: n := n.(*ir.ConvExpr) a := nodnil() - if n.Esc() == EscNone { + if n.Esc() == ir.EscNone { t := types.NewArray(types.Types[types.TUINT8], 4) a = nodAddr(temp(t)) } @@ -1580,7 +1581,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.OBYTES2STR, ir.ORUNES2STR: n := n.(*ir.ConvExpr) a := nodnil() - if n.Esc() == EscNone { + if n.Esc() == ir.EscNone { // Create temporary buffer for string on stack. t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize) a = nodAddr(temp(t)) @@ -1616,7 +1617,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // Allocate a [n]byte of the right size. t := types.NewArray(types.Types[types.TUINT8], int64(len(sc))) var a ir.Node - if n.Esc() == EscNone && len(sc) <= int(maxImplicitStackVarSize) { + if n.Esc() == ir.EscNone && len(sc) <= int(ir.MaxImplicitStackVarSize) { a = nodAddr(temp(t)) } else { a = callnew(t) @@ -1638,7 +1639,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { } a := nodnil() - if n.Esc() == EscNone { + if n.Esc() == ir.EscNone { // Create temporary buffer for slice on stack. t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize) a = nodAddr(temp(t)) @@ -1661,7 +1662,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.OSTR2RUNES: n := n.(*ir.ConvExpr) a := nodnil() - if n.Esc() == EscNone { + if n.Esc() == ir.EscNone { // Create temporary buffer for slice on stack. t := types.NewArray(types.Types[types.TINT32], tmpstringbufsize) a = nodAddr(temp(t)) @@ -1719,7 +1720,7 @@ func markUsedIfaceMethod(n *ir.CallExpr) { dot := n.X.(*ir.SelectorExpr) ityp := dot.X.Type() tsym := typenamesym(ityp).Linksym() - r := obj.Addrel(Curfn.LSym) + r := obj.Addrel(ir.CurFunc.LSym) r.Sym = tsym // dot.Xoffset is the method index * Widthptr (the offset of code pointer // in itab). @@ -1777,7 +1778,7 @@ func rtconvfn(src, dst *types.Type) (param, result types.Kind) { // TODO(josharian): combine this with its caller and simplify func reduceSlice(n *ir.SliceExpr) ir.Node { low, high, max := n.SliceBounds() - if high != nil && high.Op() == ir.OLEN && samesafeexpr(n.X, high.(*ir.UnaryExpr).X) { + if high != nil && high.Op() == ir.OLEN && ir.SameSafeExpr(n.X, high.(*ir.UnaryExpr).X) { // Reduce x[i:len(x)] to x[i:]. high = nil } @@ -1824,7 +1825,7 @@ func ascompatee(op ir.Op, nl, nr []ir.Node, init *ir.Nodes) []ir.Node { break } // Do not generate 'x = x' during return. See issue 4014. - if op == ir.ORETURN && samesafeexpr(nl[i], nr[i]) { + if op == ir.ORETURN && ir.SameSafeExpr(nl[i], nr[i]) { continue } nn = append(nn, ascompatee1(nl[i], nr[i], init)) @@ -1835,7 +1836,7 @@ func ascompatee(op ir.Op, nl, nr []ir.Node, init *ir.Nodes) []ir.Node { var nln, nrn ir.Nodes nln.Set(nl) nrn.Set(nr) - base.Fatalf("error in shape across %+v %v %+v / %d %d [%s]", nln, op, nrn, len(nl), len(nr), ir.FuncName(Curfn)) + base.Fatalf("error in shape across %+v %v %+v / %d %d [%s]", nln, op, nrn, len(nl), len(nr), ir.FuncName(ir.CurFunc)) } return reorder3(nn) } @@ -2000,11 +2001,11 @@ func walkprint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { t := make([]ir.Node, 0, len(s)*2) for i, n := range s { if i != 0 { - t = append(t, nodstr(" ")) + t = append(t, ir.NewString(" ")) } t = append(t, n) } - t = append(t, nodstr("\n")) + t = append(t, ir.NewString("\n")) nn.Args.Set(t) } @@ -2018,7 +2019,7 @@ func walkprint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { i++ } if len(strs) > 0 { - t = append(t, nodstr(strings.Join(strs, ""))) + t = append(t, ir.NewString(strings.Join(strs, ""))) } if i < len(s) { t = append(t, s[i]) @@ -2140,31 +2141,6 @@ func callnew(t *types.Type) ir.Node { return n } -// isReflectHeaderDataField reports whether l is an expression p.Data -// where p has type reflect.SliceHeader or reflect.StringHeader. -func isReflectHeaderDataField(l ir.Node) bool { - if l.Type() != types.Types[types.TUINTPTR] { - return false - } - - var tsym *types.Sym - switch l.Op() { - case ir.ODOT: - l := l.(*ir.SelectorExpr) - tsym = l.X.Type().Sym() - case ir.ODOTPTR: - l := l.(*ir.SelectorExpr) - tsym = l.X.Type().Elem().Sym() - default: - return false - } - - if tsym == nil || l.Sym().Name != "Data" || tsym.Pkg.Path != "reflect" { - return false - } - return tsym.Name == "SliceHeader" || tsym.Name == "StringHeader" -} - func convas(n *ir.AssignStmt, init *ir.Nodes) *ir.AssignStmt { if n.Op() != ir.OAS { base.Fatalf("convas: not OAS %v", n.Op()) @@ -2288,37 +2264,6 @@ func reorder3save(n ir.Node, all []*ir.AssignStmt, i int, early *[]ir.Node) ir.N return q } -// what's the outer value that a write to n affects? -// outer value means containing struct or array. -func outervalue(n ir.Node) ir.Node { - for { - switch nn := n; nn.Op() { - case ir.OXDOT: - base.Fatalf("OXDOT in walk") - case ir.ODOT: - nn := nn.(*ir.SelectorExpr) - n = nn.X - continue - case ir.OPAREN: - nn := nn.(*ir.ParenExpr) - n = nn.X - continue - case ir.OCONVNOP: - nn := nn.(*ir.ConvExpr) - n = nn.X - continue - case ir.OINDEX: - nn := nn.(*ir.IndexExpr) - if nn.X.Type() != nil && nn.X.Type().IsArray() { - n = nn.X - continue - } - } - - return n - } -} - // Is it possible that the computation of r might be // affected by assignments in all? func aliased(r ir.Node, all []*ir.AssignStmt) bool { @@ -2344,7 +2289,7 @@ func aliased(r ir.Node, all []*ir.AssignStmt) bool { continue } - lv := outervalue(as.X) + lv := ir.OuterValue(as.X) if lv.Op() != ir.ONAME { memwrite = true continue @@ -2526,7 +2471,7 @@ func paramstoheap(params *types.Type) []ir.Node { // even allocations to move params/results to the heap. // The generated code is added to Curfn's Enter list. func zeroResults() { - for _, f := range Curfn.Type().Results().Fields().Slice() { + for _, f := range ir.CurFunc.Type().Results().Fields().Slice() { v := ir.AsNode(f.Nname) if v != nil && v.Name().Heapaddr != nil { // The local which points to the return value is the @@ -2534,7 +2479,7 @@ func zeroResults() { // by a Needzero annotation in plive.go:livenessepilogue. continue } - if isParamHeapCopy(v) { + if ir.IsParamHeapCopy(v) { // TODO(josharian/khr): Investigate whether we can switch to "continue" here, // and document more in either case. // In the review of CL 114797, Keith wrote (roughly): @@ -2544,7 +2489,7 @@ func zeroResults() { v = v.Name().Stackcopy } // Zero the stack location containing f. - Curfn.Enter.Append(ir.NewAssignStmt(Curfn.Pos(), v, nil)) + ir.CurFunc.Enter.Append(ir.NewAssignStmt(ir.CurFunc.Pos(), v, nil)) } } @@ -2570,13 +2515,13 @@ func returnsfromheap(params *types.Type) []ir.Node { // Enter and Exit lists. func heapmoves() { lno := base.Pos - base.Pos = Curfn.Pos() - nn := paramstoheap(Curfn.Type().Recvs()) - nn = append(nn, paramstoheap(Curfn.Type().Params())...) - nn = append(nn, paramstoheap(Curfn.Type().Results())...) - Curfn.Enter.Append(nn...) - base.Pos = Curfn.Endlineno - Curfn.Exit.Append(returnsfromheap(Curfn.Type().Results())...) + base.Pos = ir.CurFunc.Pos() + nn := paramstoheap(ir.CurFunc.Type().Recvs()) + nn = append(nn, paramstoheap(ir.CurFunc.Type().Params())...) + nn = append(nn, paramstoheap(ir.CurFunc.Type().Results())...) + ir.CurFunc.Enter.Append(nn...) + base.Pos = ir.CurFunc.Endlineno + ir.CurFunc.Exit.Append(returnsfromheap(ir.CurFunc.Type().Results())...) base.Pos = lno } @@ -2743,7 +2688,7 @@ func addstr(n *ir.AddStringExpr, init *ir.Nodes) ir.Node { } buf := nodnil() - if n.Esc() == EscNone { + if n.Esc() == ir.EscNone { sz := int64(0) for _, n1 := range n.List { if n1.Op() == ir.OLITERAL { @@ -2779,7 +2724,7 @@ func addstr(n *ir.AddStringExpr, init *ir.Nodes) ir.Node { slice := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(t), args[1:]) slice.Prealloc = n.Prealloc args = []ir.Node{buf, slice} - slice.SetEsc(EscNone) + slice.SetEsc(ir.EscNone) } cat := syslook(fn) @@ -2865,7 +2810,7 @@ func appendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { slice.SetType(s.Type()) slice.SetSliceBounds(ir.NewUnaryExpr(base.Pos, ir.OLEN, l1), nil, nil) - Curfn.SetWBPos(n.Pos()) + ir.CurFunc.SetWBPos(n.Pos()) // instantiate typedslicecopy(typ *type, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int fn := syslook("typedslicecopy") @@ -2886,7 +2831,7 @@ func appendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { fn := syslook("slicecopy") fn = substArgTypes(fn, ptr1.Type().Elem(), ptr2.Type().Elem()) - ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, ptr1, len1, ptr2, len2, nodintconst(elemtype.Width)) + ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, ptr1, len1, ptr2, len2, ir.NewInt(elemtype.Width)) } else { // memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T)) ix := ir.NewIndexExpr(base.Pos, s, ir.NewUnaryExpr(base.Pos, ir.OLEN, l1)) @@ -2896,7 +2841,7 @@ func appendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { sptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, l2) nwid := cheapexpr(conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, l2), types.Types[types.TUINTPTR]), &nodes) - nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, nodintconst(elemtype.Width)) + nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, ir.NewInt(elemtype.Width)) // instantiate func memmove(to *any, frm *any, length uintptr) fn := syslook("memmove") @@ -2992,7 +2937,7 @@ func extendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { var nodes []ir.Node // if l2 >= 0 (likely happens), do nothing - nifneg := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGE, l2, nodintconst(0)), nil, nil) + nifneg := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGE, l2, ir.NewInt(0)), nil, nil) nifneg.Likely = true // else panicmakeslicelen() @@ -3044,13 +2989,13 @@ func extendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { hp := convnop(nodAddr(ix), types.Types[types.TUNSAFEPTR]) // hn := l2 * sizeof(elem(s)) - hn := conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, l2, nodintconst(elemtype.Width)), types.Types[types.TUINTPTR]) + hn := conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, l2, ir.NewInt(elemtype.Width)), types.Types[types.TUINTPTR]) clrname := "memclrNoHeapPointers" hasPointers := elemtype.HasPointers() if hasPointers { clrname = "memclrHasPointers" - Curfn.SetWBPos(n.Pos()) + ir.CurFunc.SetWBPos(n.Pos()) } var clr ir.Nodes @@ -3094,7 +3039,7 @@ func extendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { // } // s func walkappend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node { - if !samesafeexpr(dst, n.Args[0]) { + if !ir.SameSafeExpr(dst, n.Args[0]) { n.Args[0] = safeexpr(n.Args[0], init) n.Args[0] = walkexpr(n.Args[0], init) } @@ -3134,7 +3079,7 @@ func walkappend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node { ns := temp(nsrc.Type()) l = append(l, ir.NewAssignStmt(base.Pos, ns, nsrc)) // s = src - na := nodintconst(int64(argc)) // const argc + na := ir.NewInt(int64(argc)) // const argc nif := ir.NewIfStmt(base.Pos, nil, nil, nil) // if cap(s) - len(s) < argc nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, ir.NewBinaryExpr(base.Pos, ir.OSUB, ir.NewUnaryExpr(base.Pos, ir.OCAP, ns), ir.NewUnaryExpr(base.Pos, ir.OLEN, ns)), na) @@ -3160,7 +3105,7 @@ func walkappend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node { ix.SetBounded(true) l = append(l, ir.NewAssignStmt(base.Pos, ix, n)) // s[n] = arg if i+1 < len(ls) { - l = append(l, ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, nn, nodintconst(1)))) // n = n + 1 + l = append(l, ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, nn, ir.NewInt(1)))) // n = n + 1 } } @@ -3183,7 +3128,7 @@ func walkappend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node { // func copyany(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node { if n.X.Type().Elem().HasPointers() { - Curfn.SetWBPos(n.Pos()) + ir.CurFunc.SetWBPos(n.Pos()) fn := writebarrierfn("typedslicecopy", n.X.Type().Elem(), n.Y.Type().Elem()) n.X = cheapexpr(n.X, init) ptrL, lenL := backingArrayPtrLen(n.X) @@ -3205,7 +3150,7 @@ func copyany(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node { fn := syslook("slicecopy") fn = substArgTypes(fn, ptrL.Type().Elem(), ptrR.Type().Elem()) - return mkcall1(fn, n.Type(), init, ptrL, lenL, ptrR, lenR, nodintconst(n.X.Type().Elem().Width)) + return mkcall1(fn, n.Type(), init, ptrL, lenL, ptrR, lenR, ir.NewInt(n.X.Type().Elem().Width)) } n.X = walkexpr(n.X, init) @@ -3241,7 +3186,7 @@ func copyany(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node { nwid := ir.Node(temp(types.Types[types.TUINTPTR])) setwid := ir.NewAssignStmt(base.Pos, nwid, conv(nlen, types.Types[types.TUINTPTR])) ne.Body.Append(setwid) - nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, nodintconst(nl.Type().Elem().Width)) + nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, ir.NewInt(nl.Type().Elem().Width)) call := mkcall1(fn, nil, init, nto, nfrm, nwid) ne.Body.Append(call) @@ -3264,12 +3209,12 @@ func eqfor(t *types.Type) (n ir.Node, needsize bool) { case types.ASPECIAL: sym := typesymprefix(".eq", t) n := NewName(sym) - setNodeNameFunc(n) + ir.MarkFunc(n) n.SetType(functype(nil, []*ir.Field{ - anonfield(types.NewPtr(t)), - anonfield(types.NewPtr(t)), + ir.NewField(base.Pos, nil, nil, types.NewPtr(t)), + ir.NewField(base.Pos, nil, nil, types.NewPtr(t)), }, []*ir.Field{ - anonfield(types.Types[types.TBOOL]), + ir.NewField(base.Pos, nil, nil, types.Types[types.TBOOL]), })) return n, false } @@ -3415,7 +3360,7 @@ func walkcompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { // Chose not to inline. Call equality function directly. if !inline { // eq algs take pointers; cmpl and cmpr must be addressable - if !islvalue(cmpl) || !islvalue(cmpr) { + if !ir.IsAssignable(cmpl) || !ir.IsAssignable(cmpr) { base.Fatalf("arguments of comparison must be lvalues - %v %v", cmpl, cmpr) } @@ -3424,7 +3369,7 @@ func walkcompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { call.Args.Append(nodAddr(cmpl)) call.Args.Append(nodAddr(cmpr)) if needsize { - call.Args.Append(nodintconst(t.Width)) + call.Args.Append(ir.NewInt(t.Width)) } res := ir.Node(call) if n.Op() != ir.OEQ { @@ -3483,31 +3428,31 @@ func walkcompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { } if step == 1 { compare( - ir.NewIndexExpr(base.Pos, cmpl, nodintconst(i)), - ir.NewIndexExpr(base.Pos, cmpr, nodintconst(i)), + ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(i)), + ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(i)), ) i++ remains -= t.Elem().Width } else { elemType := t.Elem().ToUnsigned() - cmplw := ir.Node(ir.NewIndexExpr(base.Pos, cmpl, nodintconst(i))) + cmplw := ir.Node(ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(i))) cmplw = conv(cmplw, elemType) // convert to unsigned cmplw = conv(cmplw, convType) // widen - cmprw := ir.Node(ir.NewIndexExpr(base.Pos, cmpr, nodintconst(i))) + cmprw := ir.Node(ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(i))) cmprw = conv(cmprw, elemType) cmprw = conv(cmprw, convType) // For code like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ... // ssa will generate a single large load. for offset := int64(1); offset < step; offset++ { - lb := ir.Node(ir.NewIndexExpr(base.Pos, cmpl, nodintconst(i+offset))) + lb := ir.Node(ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(i+offset))) lb = conv(lb, elemType) lb = conv(lb, convType) - lb = ir.NewBinaryExpr(base.Pos, ir.OLSH, lb, nodintconst(8*t.Elem().Width*offset)) + lb = ir.NewBinaryExpr(base.Pos, ir.OLSH, lb, ir.NewInt(8*t.Elem().Width*offset)) cmplw = ir.NewBinaryExpr(base.Pos, ir.OOR, cmplw, lb) - rb := ir.Node(ir.NewIndexExpr(base.Pos, cmpr, nodintconst(i+offset))) + rb := ir.Node(ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(i+offset))) rb = conv(rb, elemType) rb = conv(rb, convType) - rb = ir.NewBinaryExpr(base.Pos, ir.OLSH, rb, nodintconst(8*t.Elem().Width*offset)) + rb = ir.NewBinaryExpr(base.Pos, ir.OLSH, rb, ir.NewInt(8*t.Elem().Width*offset)) cmprw = ir.NewBinaryExpr(base.Pos, ir.OOR, cmprw, rb) } compare(cmplw, cmprw) @@ -3517,7 +3462,7 @@ func walkcompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { } } if expr == nil { - expr = nodbool(n.Op() == ir.OEQ) + expr = ir.NewBool(n.Op() == ir.OEQ) // We still need to use cmpl and cmpr, in case they contain // an expression which might panic. See issue 23837. t := temp(cmpl.Type()) @@ -3604,12 +3549,12 @@ func walkcompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { if len(s) > 0 { ncs = safeexpr(ncs, init) } - r := ir.Node(ir.NewBinaryExpr(base.Pos, cmp, ir.NewUnaryExpr(base.Pos, ir.OLEN, ncs), nodintconst(int64(len(s))))) + r := ir.Node(ir.NewBinaryExpr(base.Pos, cmp, ir.NewUnaryExpr(base.Pos, ir.OLEN, ncs), ir.NewInt(int64(len(s))))) remains := len(s) for i := 0; remains > 0; { if remains == 1 || !canCombineLoads { - cb := nodintconst(int64(s[i])) - ncb := ir.NewIndexExpr(base.Pos, ncs, nodintconst(int64(i))) + cb := ir.NewInt(int64(s[i])) + ncb := ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(int64(i))) r = ir.NewLogicalExpr(base.Pos, and, r, ir.NewBinaryExpr(base.Pos, cmp, ncb, cb)) remains-- i++ @@ -3628,18 +3573,18 @@ func walkcompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { convType = types.Types[types.TUINT16] step = 2 } - ncsubstr := conv(ir.NewIndexExpr(base.Pos, ncs, nodintconst(int64(i))), convType) + ncsubstr := conv(ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(int64(i))), convType) csubstr := int64(s[i]) // Calculate large constant from bytes as sequence of shifts and ors. // Like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ... // ssa will combine this into a single large load. for offset := 1; offset < step; offset++ { - b := conv(ir.NewIndexExpr(base.Pos, ncs, nodintconst(int64(i+offset))), convType) - b = ir.NewBinaryExpr(base.Pos, ir.OLSH, b, nodintconst(int64(8*offset))) + b := conv(ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(int64(i+offset))), convType) + b = ir.NewBinaryExpr(base.Pos, ir.OLSH, b, ir.NewInt(int64(8*offset))) ncsubstr = ir.NewBinaryExpr(base.Pos, ir.OOR, ncsubstr, b) csubstr |= int64(s[i+offset]) << uint8(8*offset) } - csubstrPart := nodintconst(csubstr) + csubstrPart := ir.NewInt(csubstr) // Compare "step" bytes as once r = ir.NewLogicalExpr(base.Pos, and, r, ir.NewBinaryExpr(base.Pos, cmp, csubstrPart, ncsubstr)) remains -= step @@ -3668,7 +3613,7 @@ func walkcompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { } else { // sys_cmpstring(s1, s2) :: 0 r = mkcall("cmpstring", types.Types[types.TINT], init, conv(n.X, types.Types[types.TSTRING]), conv(n.Y, types.Types[types.TSTRING])) - r = ir.NewBinaryExpr(base.Pos, n.Op(), r, nodintconst(0)) + r = ir.NewBinaryExpr(base.Pos, n.Op(), r, ir.NewInt(0)) } return finishcompare(n, r, init) @@ -3692,7 +3637,7 @@ func bounded(n ir.Node, max int64) bool { sign := n.Type().IsSigned() bits := int32(8 * n.Type().Width) - if smallintconst(n) { + if ir.IsSmallIntConst(n) { v := ir.Int64Val(n) return 0 <= v && v < max } @@ -3702,9 +3647,9 @@ func bounded(n ir.Node, max int64) bool { n := n.(*ir.BinaryExpr) v := int64(-1) switch { - case smallintconst(n.X): + case ir.IsSmallIntConst(n.X): v = ir.Int64Val(n.X) - case smallintconst(n.Y): + case ir.IsSmallIntConst(n.Y): v = ir.Int64Val(n.Y) if n.Op() == ir.OANDNOT { v = ^v @@ -3719,7 +3664,7 @@ func bounded(n ir.Node, max int64) bool { case ir.OMOD: n := n.(*ir.BinaryExpr) - if !sign && smallintconst(n.Y) { + if !sign && ir.IsSmallIntConst(n.Y) { v := ir.Int64Val(n.Y) if 0 <= v && v <= max { return true @@ -3728,7 +3673,7 @@ func bounded(n ir.Node, max int64) bool { case ir.ODIV: n := n.(*ir.BinaryExpr) - if !sign && smallintconst(n.Y) { + if !sign && ir.IsSmallIntConst(n.Y) { v := ir.Int64Val(n.Y) for bits > 0 && v >= 2 { bits-- @@ -3738,7 +3683,7 @@ func bounded(n ir.Node, max int64) bool { case ir.ORSH: n := n.(*ir.BinaryExpr) - if !sign && smallintconst(n.Y) { + if !sign && ir.IsSmallIntConst(n.Y) { v := ir.Int64Val(n.Y) if v > int64(bits) { return true @@ -3794,9 +3739,9 @@ func usemethod(n *ir.CallExpr) { // (including global variables such as numImports - was issue #19028). // Also need to check for reflect package itself (see Issue #38515). if s := res0.Type.Sym(); s != nil && s.Name == "Method" && types.IsReflectPkg(s.Pkg) { - Curfn.SetReflectMethod(true) + ir.CurFunc.SetReflectMethod(true) // The LSym is initialized at this point. We need to set the attribute on the LSym. - Curfn.LSym.Set(obj.AttrReflectMethod, true) + ir.CurFunc.LSym.Set(obj.AttrReflectMethod, true) } } @@ -3845,10 +3790,10 @@ func usefield(n *ir.SelectorExpr) { } sym := tracksym(outer, field) - if Curfn.FieldTrack == nil { - Curfn.FieldTrack = make(map[*types.Sym]struct{}) + if ir.CurFunc.FieldTrack == nil { + ir.CurFunc.FieldTrack = make(map[*types.Sym]struct{}) } - Curfn.FieldTrack[sym] = struct{}{} + ir.CurFunc.FieldTrack[sym] = struct{}{} } // anySideEffects reports whether n contains any operations that could have observable side effects. @@ -3987,7 +3932,7 @@ func wrapCall(n *ir.CallExpr, init *ir.Nodes) ir.Node { arg = arg.(*ir.ConvExpr).X n.Args[i] = arg } - funcArgs = append(funcArgs, symfield(s, arg.Type())) + funcArgs = append(funcArgs, ir.NewField(base.Pos, s, nil, arg.Type())) } t := ir.NewFuncType(base.Pos, nil, funcArgs, nil) @@ -3995,7 +3940,7 @@ func wrapCall(n *ir.CallExpr, init *ir.Nodes) ir.Node { sym := lookupN("wrap·", wrapCall_prgen) fn := dclfunc(sym, t) - args := paramNnames(t.Type()) + args := ir.ParamNames(t.Type()) for i, origArg := range origArgs { if origArg == nil { continue @@ -4076,7 +4021,7 @@ func walkCheckPtrAlignment(n *ir.ConvExpr, init *ir.Nodes, count ir.Node) ir.Nod } if count == nil { - count = nodintconst(1) + count = ir.NewInt(1) } n.X = cheapexpr(n.X, init) @@ -4107,7 +4052,7 @@ func walkCheckPtrArithmetic(n *ir.ConvExpr, init *ir.Nodes) ir.Node { return n } - if n.X.Op() == ir.ODOTPTR && isReflectHeaderDataField(n.X) { + if n.X.Op() == ir.ODOTPTR && ir.IsReflectHeaderDataField(n.X) { return n } @@ -4141,7 +4086,7 @@ func walkCheckPtrArithmetic(n *ir.ConvExpr, init *ir.Nodes) ir.Node { cheap := cheapexpr(n, init) slice := mkdotargslice(types.NewSlice(types.Types[types.TUNSAFEPTR]), originals) - slice.SetEsc(EscNone) + slice.SetEsc(ir.EscNone) init.Append(mkcall("checkptrArithmetic", nil, init, convnop(cheap, types.Types[types.TUNSAFEPTR]), slice)) // TODO(khr): Mark backing store of slice as dead. This will allow us to reuse @@ -4150,13 +4095,6 @@ func walkCheckPtrArithmetic(n *ir.ConvExpr, init *ir.Nodes) ir.Node { return cheap } -// checkPtr reports whether pointer checking should be enabled for -// function fn at a given level. See debugHelpFooter for defined -// levels. -func checkPtr(fn *ir.Func, level int) bool { - return base.Debug.Checkptr >= level && fn.Pragma&ir.NoCheckPtr == 0 -} - // appendWalkStmt typechecks and walks stmt and then appends it to init. func appendWalkStmt(init *ir.Nodes, stmt ir.Node) { op := stmt.Op() diff --git a/src/cmd/compile/internal/ir/cfg.go b/src/cmd/compile/internal/ir/cfg.go new file mode 100644 index 0000000000000..d986ac3a1e974 --- /dev/null +++ b/src/cmd/compile/internal/ir/cfg.go @@ -0,0 +1,26 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +var ( + // maximum size variable which we will allocate on the stack. + // This limit is for explicit variable declarations like "var x T" or "x := ...". + // Note: the flag smallframes can update this value. + MaxStackVarSize = int64(10 * 1024 * 1024) + + // maximum size of implicit variables that we will allocate on the stack. + // p := new(T) allocating T on the stack + // p := &T{} allocating T on the stack + // s := make([]T, n) allocating [n]T on the stack + // s := []byte("...") allocating [n]byte on the stack + // Note: the flag smallframes can update this value. + MaxImplicitStackVarSize = int64(64 * 1024) + + // MaxSmallArraySize is the maximum size of an array which is considered small. + // Small arrays will be initialized directly with a sequence of constant stores. + // Large arrays will be initialized by copying from a static temp. + // 256 bytes was chosen to minimize generated code + statictmp size. + MaxSmallArraySize = int64(256) +) diff --git a/src/cmd/compile/internal/ir/const.go b/src/cmd/compile/internal/ir/const.go new file mode 100644 index 0000000000000..bfa013623255c --- /dev/null +++ b/src/cmd/compile/internal/ir/const.go @@ -0,0 +1,99 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +import ( + "go/constant" + "math" + "math/big" + + "cmd/compile/internal/base" + "cmd/compile/internal/types" +) + +func NewBool(b bool) Node { + return NewLiteral(constant.MakeBool(b)) +} + +func NewInt(v int64) Node { + return NewLiteral(constant.MakeInt64(v)) +} + +func NewString(s string) Node { + return NewLiteral(constant.MakeString(s)) +} + +const ( + // Maximum size in bits for big.Ints before signalling + // overflow and also mantissa precision for big.Floats. + ConstPrec = 512 +) + +func BigFloat(v constant.Value) *big.Float { + f := new(big.Float) + f.SetPrec(ConstPrec) + switch u := constant.Val(v).(type) { + case int64: + f.SetInt64(u) + case *big.Int: + f.SetInt(u) + case *big.Float: + f.Set(u) + case *big.Rat: + f.SetRat(u) + default: + base.Fatalf("unexpected: %v", u) + } + return f +} + +// ConstOverflow reports whether constant value v is too large +// to represent with type t. +func ConstOverflow(v constant.Value, t *types.Type) bool { + switch { + case t.IsInteger(): + bits := uint(8 * t.Size()) + if t.IsUnsigned() { + x, ok := constant.Uint64Val(v) + return !ok || x>>bits != 0 + } + x, ok := constant.Int64Val(v) + if x < 0 { + x = ^x + } + return !ok || x>>(bits-1) != 0 + case t.IsFloat(): + switch t.Size() { + case 4: + f, _ := constant.Float32Val(v) + return math.IsInf(float64(f), 0) + case 8: + f, _ := constant.Float64Val(v) + return math.IsInf(f, 0) + } + case t.IsComplex(): + ft := types.FloatForComplex(t) + return ConstOverflow(constant.Real(v), ft) || ConstOverflow(constant.Imag(v), ft) + } + base.Fatalf("doesoverflow: %v, %v", v, t) + panic("unreachable") +} + +// IsConstNode reports whether n is a Go language constant (as opposed to a +// compile-time constant). +// +// Expressions derived from nil, like string([]byte(nil)), while they +// may be known at compile time, are not Go language constants. +func IsConstNode(n Node) bool { + return n.Op() == OLITERAL +} + +func IsSmallIntConst(n Node) bool { + if n.Op() == OLITERAL { + v, ok := constant.Int64Val(n.Val()) + return ok && int64(int32(v)) == v + } + return false +} diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 39a408fdc79b3..640cc039546ec 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -5,10 +5,13 @@ package ir import ( + "bytes" "cmd/compile/internal/base" "cmd/compile/internal/types" "cmd/internal/src" + "fmt" "go/constant" + "go/token" ) func maybeDo(x Node, err error, do func(Node) error) error { @@ -783,3 +786,371 @@ func (n *UnaryExpr) SetOp(op Op) { n.op = op } } + +func IsZero(n Node) bool { + switch n.Op() { + case ONIL: + return true + + case OLITERAL: + switch u := n.Val(); u.Kind() { + case constant.String: + return constant.StringVal(u) == "" + case constant.Bool: + return !constant.BoolVal(u) + default: + return constant.Sign(u) == 0 + } + + case OARRAYLIT: + n := n.(*CompLitExpr) + for _, n1 := range n.List { + if n1.Op() == OKEY { + n1 = n1.(*KeyExpr).Value + } + if !IsZero(n1) { + return false + } + } + return true + + case OSTRUCTLIT: + n := n.(*CompLitExpr) + for _, n1 := range n.List { + n1 := n1.(*StructKeyExpr) + if !IsZero(n1.Value) { + return false + } + } + return true + } + + return false +} + +// lvalue etc +func IsAssignable(n Node) bool { + switch n.Op() { + case OINDEX: + n := n.(*IndexExpr) + if n.X.Type() != nil && n.X.Type().IsArray() { + return IsAssignable(n.X) + } + if n.X.Type() != nil && n.X.Type().IsString() { + return false + } + fallthrough + case ODEREF, ODOTPTR, OCLOSUREREAD: + return true + + case ODOT: + n := n.(*SelectorExpr) + return IsAssignable(n.X) + + case ONAME: + n := n.(*Name) + if n.Class_ == PFUNC { + return false + } + return true + + case ONAMEOFFSET: + return true + } + + return false +} + +func StaticValue(n Node) Node { + for { + if n.Op() == OCONVNOP { + n = n.(*ConvExpr).X + continue + } + + n1 := staticValue1(n) + if n1 == nil { + return n + } + n = n1 + } +} + +// staticValue1 implements a simple SSA-like optimization. If n is a local variable +// that is initialized and never reassigned, staticValue1 returns the initializer +// expression. Otherwise, it returns nil. +func staticValue1(nn Node) Node { + if nn.Op() != ONAME { + return nil + } + n := nn.(*Name) + if n.Class_ != PAUTO || n.Name().Addrtaken() { + return nil + } + + defn := n.Name().Defn + if defn == nil { + return nil + } + + var rhs Node +FindRHS: + switch defn.Op() { + case OAS: + defn := defn.(*AssignStmt) + rhs = defn.Y + case OAS2: + defn := defn.(*AssignListStmt) + for i, lhs := range defn.Lhs { + if lhs == n { + rhs = defn.Rhs[i] + break FindRHS + } + } + base.Fatalf("%v missing from LHS of %v", n, defn) + default: + return nil + } + if rhs == nil { + base.Fatalf("RHS is nil: %v", defn) + } + + if reassigned(n) { + return nil + } + + return rhs +} + +// reassigned takes an ONAME node, walks the function in which it is defined, and returns a boolean +// indicating whether the name has any assignments other than its declaration. +// The second return value is the first such assignment encountered in the walk, if any. It is mostly +// useful for -m output documenting the reason for inhibited optimizations. +// NB: global variables are always considered to be re-assigned. +// TODO: handle initial declaration not including an assignment and followed by a single assignment? +func reassigned(name *Name) bool { + if name.Op() != ONAME { + base.Fatalf("reassigned %v", name) + } + // no way to reliably check for no-reassignment of globals, assume it can be + if name.Curfn == nil { + return true + } + return Any(name.Curfn, func(n Node) bool { + switch n.Op() { + case OAS: + n := n.(*AssignStmt) + if n.X == name && n != name.Defn { + return true + } + case OAS2, OAS2FUNC, OAS2MAPR, OAS2DOTTYPE, OAS2RECV, OSELRECV2: + n := n.(*AssignListStmt) + for _, p := range n.Lhs { + if p == name && n != name.Defn { + return true + } + } + } + return false + }) +} + +// IsIntrinsicCall reports whether the compiler back end will treat the call as an intrinsic operation. +var IsIntrinsicCall = func(*CallExpr) bool { return false } + +// SameSafeExpr checks whether it is safe to reuse one of l and r +// instead of computing both. SameSafeExpr assumes that l and r are +// used in the same statement or expression. In order for it to be +// safe to reuse l or r, they must: +// * be the same expression +// * not have side-effects (no function calls, no channel ops); +// however, panics are ok +// * not cause inappropriate aliasing; e.g. two string to []byte +// conversions, must result in two distinct slices +// +// The handling of OINDEXMAP is subtle. OINDEXMAP can occur both +// as an lvalue (map assignment) and an rvalue (map access). This is +// currently OK, since the only place SameSafeExpr gets used on an +// lvalue expression is for OSLICE and OAPPEND optimizations, and it +// is correct in those settings. +func SameSafeExpr(l Node, r Node) bool { + if l.Op() != r.Op() || !types.Identical(l.Type(), r.Type()) { + return false + } + + switch l.Op() { + case ONAME, OCLOSUREREAD: + return l == r + + case ODOT, ODOTPTR: + l := l.(*SelectorExpr) + r := r.(*SelectorExpr) + return l.Sel != nil && r.Sel != nil && l.Sel == r.Sel && SameSafeExpr(l.X, r.X) + + case ODEREF: + l := l.(*StarExpr) + r := r.(*StarExpr) + return SameSafeExpr(l.X, r.X) + + case ONOT, OBITNOT, OPLUS, ONEG: + l := l.(*UnaryExpr) + r := r.(*UnaryExpr) + return SameSafeExpr(l.X, r.X) + + case OCONVNOP: + l := l.(*ConvExpr) + r := r.(*ConvExpr) + return SameSafeExpr(l.X, r.X) + + case OCONV: + l := l.(*ConvExpr) + r := r.(*ConvExpr) + // Some conversions can't be reused, such as []byte(str). + // Allow only numeric-ish types. This is a bit conservative. + return types.IsSimple[l.Type().Kind()] && SameSafeExpr(l.X, r.X) + + case OINDEX, OINDEXMAP: + l := l.(*IndexExpr) + r := r.(*IndexExpr) + return SameSafeExpr(l.X, r.X) && SameSafeExpr(l.Index, r.Index) + + case OADD, OSUB, OOR, OXOR, OMUL, OLSH, ORSH, OAND, OANDNOT, ODIV, OMOD: + l := l.(*BinaryExpr) + r := r.(*BinaryExpr) + return SameSafeExpr(l.X, r.X) && SameSafeExpr(l.Y, r.Y) + + case OLITERAL: + return constant.Compare(l.Val(), token.EQL, r.Val()) + + case ONIL: + return true + } + + return false +} + +// ShouldCheckPtr reports whether pointer checking should be enabled for +// function fn at a given level. See debugHelpFooter for defined +// levels. +func ShouldCheckPtr(fn *Func, level int) bool { + return base.Debug.Checkptr >= level && fn.Pragma&NoCheckPtr == 0 +} + +// IsReflectHeaderDataField reports whether l is an expression p.Data +// where p has type reflect.SliceHeader or reflect.StringHeader. +func IsReflectHeaderDataField(l Node) bool { + if l.Type() != types.Types[types.TUINTPTR] { + return false + } + + var tsym *types.Sym + switch l.Op() { + case ODOT: + l := l.(*SelectorExpr) + tsym = l.X.Type().Sym() + case ODOTPTR: + l := l.(*SelectorExpr) + tsym = l.X.Type().Elem().Sym() + default: + return false + } + + if tsym == nil || l.Sym().Name != "Data" || tsym.Pkg.Path != "reflect" { + return false + } + return tsym.Name == "SliceHeader" || tsym.Name == "StringHeader" +} + +func ParamNames(ft *types.Type) []Node { + args := make([]Node, ft.NumParams()) + for i, f := range ft.Params().FieldSlice() { + args[i] = AsNode(f.Nname) + } + return args +} + +// MethodSym returns the method symbol representing a method name +// associated with a specific receiver type. +// +// Method symbols can be used to distinguish the same method appearing +// in different method sets. For example, T.M and (*T).M have distinct +// method symbols. +// +// The returned symbol will be marked as a function. +func MethodSym(recv *types.Type, msym *types.Sym) *types.Sym { + sym := MethodSymSuffix(recv, msym, "") + sym.SetFunc(true) + return sym +} + +// MethodSymSuffix is like methodsym, but allows attaching a +// distinguisher suffix. To avoid collisions, the suffix must not +// start with a letter, number, or period. +func MethodSymSuffix(recv *types.Type, msym *types.Sym, suffix string) *types.Sym { + if msym.IsBlank() { + base.Fatalf("blank method name") + } + + rsym := recv.Sym() + if recv.IsPtr() { + if rsym != nil { + base.Fatalf("declared pointer receiver type: %v", recv) + } + rsym = recv.Elem().Sym() + } + + // Find the package the receiver type appeared in. For + // anonymous receiver types (i.e., anonymous structs with + // embedded fields), use the "go" pseudo-package instead. + rpkg := Pkgs.Go + if rsym != nil { + rpkg = rsym.Pkg + } + + var b bytes.Buffer + if recv.IsPtr() { + // The parentheses aren't really necessary, but + // they're pretty traditional at this point. + fmt.Fprintf(&b, "(%-S)", recv) + } else { + fmt.Fprintf(&b, "%-S", recv) + } + + // A particular receiver type may have multiple non-exported + // methods with the same name. To disambiguate them, include a + // package qualifier for names that came from a different + // package than the receiver type. + if !types.IsExported(msym.Name) && msym.Pkg != rpkg { + b.WriteString(".") + b.WriteString(msym.Pkg.Prefix) + } + + b.WriteString(".") + b.WriteString(msym.Name) + b.WriteString(suffix) + + return rpkg.LookupBytes(b.Bytes()) +} + +// MethodName returns the ONAME representing the method +// referenced by expression n, which must be a method selector, +// method expression, or method value. +func MethodExprName(n Node) *Name { + name, _ := MethodExprFunc(n).Nname.(*Name) + return name +} + +// MethodFunc is like MethodName, but returns the types.Field instead. +func MethodExprFunc(n Node) *types.Field { + switch n.Op() { + case ODOTMETH: + return n.(*SelectorExpr).Selection + case OMETHEXPR: + return n.(*MethodExpr).Method + case OCALLPART: + n := n.(*CallPartExpr) + return n.Method + } + base.Fatalf("unexpected node: %v (%v)", n, n.Op()) + panic("unreachable") +} diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index 57837e9e6b84d..a93516d716240 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -261,3 +261,30 @@ func PkgFuncName(n Node) string { } return p + "." + s.Name } + +var CurFunc *Func + +func FuncSymName(s *types.Sym) string { + return s.Name + "·f" +} + +// NewFuncNameAt generates a new name node for a function or method. +func NewFuncNameAt(pos src.XPos, s *types.Sym, fn *Func) *Name { + if fn.Nname != nil { + base.Fatalf("newFuncName - already have name") + } + n := NewNameAt(pos, s) + n.SetFunc(fn) + fn.Nname = n + return n +} + +// MarkFunc marks a node as a function. +func MarkFunc(n *Name) { + if n.Op() != ONAME || n.Class_ != Pxxx { + base.Fatalf("expected ONAME/Pxxx node, got %v", n) + } + + n.Class_ = PFUNC + n.Sym().SetFunc(true) +} diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 770f8119e0eab..93535f4ceecc4 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -413,3 +413,25 @@ func NewPkgName(pos src.XPos, sym *types.Sym, pkg *types.Pkg) *PkgName { p.pos = pos return p } + +// IsParamStackCopy reports whether this is the on-stack copy of a +// function parameter that moved to the heap. +func IsParamStackCopy(n Node) bool { + if n.Op() != ONAME { + return false + } + name := n.(*Name) + return (name.Class_ == PPARAM || name.Class_ == PPARAMOUT) && name.Heapaddr != nil +} + +// IsParamHeapCopy reports whether this is the on-heap copy of +// a function parameter that moved to the heap. +func IsParamHeapCopy(n Node) bool { + if n.Op() != ONAME { + return false + } + name := n.(*Name) + return name.Class_ == PAUTOHEAP && name.Name().Stackcopy != nil +} + +var RegFP *Name diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 34b89752adc59..b4a557f290d79 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -504,3 +504,99 @@ func IsBlank(n Node) bool { func IsMethod(n Node) bool { return n.Type().Recv() != nil } + +func HasNamedResults(fn *Func) bool { + typ := fn.Type() + return typ.NumResults() > 0 && types.OrigSym(typ.Results().Field(0).Sym) != nil +} + +// HasUniquePos reports whether n has a unique position that can be +// used for reporting error messages. +// +// It's primarily used to distinguish references to named objects, +// whose Pos will point back to their declaration position rather than +// their usage position. +func HasUniquePos(n Node) bool { + switch n.Op() { + case ONAME, OPACK: + return false + case OLITERAL, ONIL, OTYPE: + if n.Sym() != nil { + return false + } + } + + if !n.Pos().IsKnown() { + if base.Flag.K != 0 { + base.Warn("setlineno: unknown position (line 0)") + } + return false + } + + return true +} + +func SetPos(n Node) src.XPos { + lno := base.Pos + if n != nil && HasUniquePos(n) { + base.Pos = n.Pos() + } + return lno +} + +// The result of InitExpr MUST be assigned back to n, e.g. +// n.Left = InitExpr(init, n.Left) +func InitExpr(init []Node, n Node) Node { + if len(init) == 0 { + return n + } + if MayBeShared(n) { + // Introduce OCONVNOP to hold init list. + old := n + n = NewConvExpr(base.Pos, OCONVNOP, nil, old) + n.SetType(old.Type()) + n.SetTypecheck(1) + } + + n.PtrInit().Prepend(init...) + n.SetHasCall(true) + return n +} + +// what's the outer value that a write to n affects? +// outer value means containing struct or array. +func OuterValue(n Node) Node { + for { + switch nn := n; nn.Op() { + case OXDOT: + base.Fatalf("OXDOT in walk") + case ODOT: + nn := nn.(*SelectorExpr) + n = nn.X + continue + case OPAREN: + nn := nn.(*ParenExpr) + n = nn.X + continue + case OCONVNOP: + nn := nn.(*ConvExpr) + n = nn.X + continue + case OINDEX: + nn := nn.(*IndexExpr) + if nn.X.Type() != nil && nn.X.Type().IsArray() { + n = nn.X + continue + } + } + + return n + } +} + +const ( + EscUnknown = iota + EscNone // Does not escape to heap, result, or parameters. + EscHeap // Reachable from the heap + EscNever // By construction will not escape. +) diff --git a/src/cmd/compile/internal/gc/scc.go b/src/cmd/compile/internal/ir/scc.go similarity index 75% rename from src/cmd/compile/internal/gc/scc.go rename to src/cmd/compile/internal/ir/scc.go index a5a64809582e8..4f646e22b5ab1 100644 --- a/src/cmd/compile/internal/gc/scc.go +++ b/src/cmd/compile/internal/ir/scc.go @@ -2,9 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc - -import "cmd/compile/internal/ir" +package ir // Strongly connected components. // @@ -32,13 +30,13 @@ import "cmd/compile/internal/ir" // when analyzing a set of mutually recursive functions. type bottomUpVisitor struct { - analyze func([]*ir.Func, bool) + analyze func([]*Func, bool) visitgen uint32 - nodeID map[*ir.Func]uint32 - stack []*ir.Func + nodeID map[*Func]uint32 + stack []*Func } -// visitBottomUp invokes analyze on the ODCLFUNC nodes listed in list. +// VisitFuncsBottomUp invokes analyze on the ODCLFUNC nodes listed in list. // It calls analyze with successive groups of functions, working from // the bottom of the call graph upward. Each time analyze is called with // a list of functions, every function on that list only calls other functions @@ -51,13 +49,13 @@ type bottomUpVisitor struct { // If recursive is false, the list consists of only a single function and its closures. // If recursive is true, the list may still contain only a single function, // if that function is itself recursive. -func visitBottomUp(list []ir.Node, analyze func(list []*ir.Func, recursive bool)) { +func VisitFuncsBottomUp(list []Node, analyze func(list []*Func, recursive bool)) { var v bottomUpVisitor v.analyze = analyze - v.nodeID = make(map[*ir.Func]uint32) + v.nodeID = make(map[*Func]uint32) for _, n := range list { - if n.Op() == ir.ODCLFUNC { - n := n.(*ir.Func) + if n.Op() == ODCLFUNC { + n := n.(*Func) if !n.IsHiddenClosure() { v.visit(n) } @@ -65,7 +63,7 @@ func visitBottomUp(list []ir.Node, analyze func(list []*ir.Func, recursive bool) } } -func (v *bottomUpVisitor) visit(n *ir.Func) uint32 { +func (v *bottomUpVisitor) visit(n *Func) uint32 { if id := v.nodeID[n]; id > 0 { // already visited return id @@ -78,45 +76,45 @@ func (v *bottomUpVisitor) visit(n *ir.Func) uint32 { min := v.visitgen v.stack = append(v.stack, n) - ir.Visit(n, func(n ir.Node) { + Visit(n, func(n Node) { switch n.Op() { - case ir.ONAME: - n := n.(*ir.Name) - if n.Class_ == ir.PFUNC { + case ONAME: + n := n.(*Name) + if n.Class_ == PFUNC { if n != nil && n.Name().Defn != nil { - if m := v.visit(n.Name().Defn.(*ir.Func)); m < min { + if m := v.visit(n.Name().Defn.(*Func)); m < min { min = m } } } - case ir.OMETHEXPR: - n := n.(*ir.MethodExpr) - fn := methodExprName(n) + case OMETHEXPR: + n := n.(*MethodExpr) + fn := MethodExprName(n) if fn != nil && fn.Defn != nil { - if m := v.visit(fn.Defn.(*ir.Func)); m < min { + if m := v.visit(fn.Defn.(*Func)); m < min { min = m } } - case ir.ODOTMETH: - n := n.(*ir.SelectorExpr) - fn := methodExprName(n) - if fn != nil && fn.Op() == ir.ONAME && fn.Class_ == ir.PFUNC && fn.Defn != nil { - if m := v.visit(fn.Defn.(*ir.Func)); m < min { + case ODOTMETH: + n := n.(*SelectorExpr) + fn := MethodExprName(n) + if fn != nil && fn.Op() == ONAME && fn.Class_ == PFUNC && fn.Defn != nil { + if m := v.visit(fn.Defn.(*Func)); m < min { min = m } } - case ir.OCALLPART: - n := n.(*ir.CallPartExpr) - fn := ir.AsNode(callpartMethod(n).Nname) - if fn != nil && fn.Op() == ir.ONAME { - if fn := fn.(*ir.Name); fn.Class_ == ir.PFUNC && fn.Name().Defn != nil { - if m := v.visit(fn.Name().Defn.(*ir.Func)); m < min { + case OCALLPART: + n := n.(*CallPartExpr) + fn := AsNode(n.Method.Nname) + if fn != nil && fn.Op() == ONAME { + if fn := fn.(*Name); fn.Class_ == PFUNC && fn.Name().Defn != nil { + if m := v.visit(fn.Name().Defn.(*Func)); m < min { min = m } } } - case ir.OCLOSURE: - n := n.(*ir.ClosureExpr) + case OCLOSURE: + n := n.(*ClosureExpr) if m := v.visit(n.Func); m < min { min = m } From dac0de3748cc816352da56f516506f80c33db4a5 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 23 Dec 2020 00:39:45 -0500 Subject: [PATCH 226/474] [dev.regabi] cmd/compile: move type size calculations into package types [generated] To break up package gc, we need to put these calculations somewhere lower in the import graph, either an existing or new package. Package types already needs this code and is using hacks to get it without an import cycle. We can remove the hacks and set up for the new package gc by moving the code into package types itself. [git-generate] cd src/cmd/compile/internal/gc rf ' # Remove old import cycle hacks in gc. rm TypecheckInit:/types.Widthptr =/-0,/types.Dowidth =/+0 \ ../ssa/export_test.go:/types.Dowidth =/-+ ex { import "cmd/compile/internal/types" types.Widthptr -> Widthptr types.Dowidth -> dowidth } # Disable CalcSize in tests instead of base.Fatalf sub dowidth:/base.Fatalf\("dowidth without betypeinit"\)/ \ // Assume this is a test. \ return # Move size calculation into cmd/compile/internal/types mv Widthptr PtrSize mv Widthreg RegSize mv slicePtrOffset SlicePtrOffset mv sliceLenOffset SliceLenOffset mv sliceCapOffset SliceCapOffset mv sizeofSlice SliceSize mv sizeofString StringSize mv skipDowidthForTracing SkipSizeForTracing mv dowidth CalcSize mv checkwidth CheckSize mv widstruct calcStructOffset mv sizeCalculationDisabled CalcSizeDisabled mv defercheckwidth DeferCheckSize mv resumecheckwidth ResumeCheckSize mv typeptrdata PtrDataSize mv \ PtrSize RegSize SlicePtrOffset SkipSizeForTracing typePos align.go PtrDataSize \ size.go mv size.go cmd/compile/internal/types ' : # Remove old import cycle hacks in types. cd ../types rf ' ex { Widthptr -> PtrSize Dowidth -> CalcSize } rm Widthptr Dowidth ' Change-Id: Ib96cdc6bda2617235480c29392ea5cfb20f60cd8 Reviewed-on: https://go-review.googlesource.com/c/go/+/279234 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/amd64/ggen.go | 15 +- src/cmd/compile/internal/amd64/ssa.go | 4 +- src/cmd/compile/internal/arm/ggen.go | 9 +- src/cmd/compile/internal/arm64/ggen.go | 17 +- src/cmd/compile/internal/gc/abiutils.go | 6 +- src/cmd/compile/internal/gc/abiutils_test.go | 6 +- .../compile/internal/gc/abiutilsaux_test.go | 2 +- src/cmd/compile/internal/gc/alg.go | 6 +- src/cmd/compile/internal/gc/closure.go | 12 +- src/cmd/compile/internal/gc/embed.go | 2 +- src/cmd/compile/internal/gc/gen.go | 2 +- src/cmd/compile/internal/gc/go.go | 29 -- src/cmd/compile/internal/gc/iimport.go | 6 +- src/cmd/compile/internal/gc/inl.go | 2 +- src/cmd/compile/internal/gc/main.go | 6 +- src/cmd/compile/internal/gc/obj.go | 26 +- src/cmd/compile/internal/gc/order.go | 2 +- src/cmd/compile/internal/gc/pgen.go | 28 +- src/cmd/compile/internal/gc/plive.go | 22 +- src/cmd/compile/internal/gc/racewalk.go | 2 +- src/cmd/compile/internal/gc/reflect.go | 127 +++----- src/cmd/compile/internal/gc/sinit.go | 14 +- src/cmd/compile/internal/gc/ssa.go | 56 ++-- src/cmd/compile/internal/gc/subr.go | 14 +- src/cmd/compile/internal/gc/swt.go | 4 +- src/cmd/compile/internal/gc/typecheck.go | 47 ++- src/cmd/compile/internal/gc/universe.go | 18 +- src/cmd/compile/internal/gc/unsafe.go | 3 +- src/cmd/compile/internal/gc/walk.go | 26 +- src/cmd/compile/internal/mips/ggen.go | 9 +- src/cmd/compile/internal/mips64/ggen.go | 13 +- src/cmd/compile/internal/ppc64/ggen.go | 11 +- src/cmd/compile/internal/riscv64/ggen.go | 11 +- src/cmd/compile/internal/ssa/export_test.go | 1 - .../internal/{gc/align.go => types/size.go} | 289 ++++++++++++------ src/cmd/compile/internal/types/type.go | 10 +- src/cmd/compile/internal/types/utils.go | 2 - src/cmd/compile/internal/x86/ggen.go | 11 +- 38 files changed, 439 insertions(+), 431 deletions(-) rename src/cmd/compile/internal/{gc/align.go => types/size.go} (66%) diff --git a/src/cmd/compile/internal/amd64/ggen.go b/src/cmd/compile/internal/amd64/ggen.go index 0bb0627f92e3e..48b00b3da9e32 100644 --- a/src/cmd/compile/internal/amd64/ggen.go +++ b/src/cmd/compile/internal/amd64/ggen.go @@ -8,6 +8,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/gc" "cmd/compile/internal/ir" + "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/x86" "cmd/internal/objabi" @@ -63,9 +64,9 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Pr return p } - if cnt%int64(gc.Widthreg) != 0 { + if cnt%int64(types.RegSize) != 0 { // should only happen with nacl - if cnt%int64(gc.Widthptr) != 0 { + if cnt%int64(types.PtrSize) != 0 { base.Fatalf("zerorange count not a multiple of widthptr %d", cnt) } if *state&ax == 0 { @@ -73,8 +74,8 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Pr *state |= ax } p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off) - off += int64(gc.Widthptr) - cnt -= int64(gc.Widthptr) + off += int64(types.PtrSize) + cnt -= int64(types.PtrSize) } if cnt == 8 { @@ -83,7 +84,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Pr *state |= ax } p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off) - } else if !isPlan9 && cnt <= int64(8*gc.Widthreg) { + } else if !isPlan9 && cnt <= int64(8*types.RegSize) { if *state&x0 == 0 { p = pp.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0) *state |= x0 @@ -96,7 +97,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Pr if cnt%16 != 0 { p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16)) } - } else if !isPlan9 && (cnt <= int64(128*gc.Widthreg)) { + } else if !isPlan9 && (cnt <= int64(128*types.RegSize)) { if *state&x0 == 0 { p = pp.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0) *state |= x0 @@ -114,7 +115,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Pr *state |= ax } - p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0) + p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(types.RegSize), obj.TYPE_REG, x86.REG_CX, 0) p = pp.Appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0) p = pp.Appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0) p = pp.Appendpp(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0) diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 055d1894d406b..0150bd296ae1e 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1014,7 +1014,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case ssa.OpAMD64LoweredGetCallerSP: // caller's SP is the address of the first arg mov := x86.AMOVQ - if gc.Widthptr == 4 { + if types.PtrSize == 4 { mov = x86.AMOVL } p := s.Prog(mov) @@ -1036,7 +1036,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = gc.BoundsCheckFunc[v.AuxInt] - s.UseArgs(int64(2 * gc.Widthptr)) // space used in callee args area by assembly stubs + s.UseArgs(int64(2 * types.PtrSize)) // space used in callee args area by assembly stubs case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL, ssa.OpAMD64BSWAPQ, ssa.OpAMD64BSWAPL, diff --git a/src/cmd/compile/internal/arm/ggen.go b/src/cmd/compile/internal/arm/ggen.go index 2e4de9893bced..2363d76346e29 100644 --- a/src/cmd/compile/internal/arm/ggen.go +++ b/src/cmd/compile/internal/arm/ggen.go @@ -7,6 +7,7 @@ package arm import ( "cmd/compile/internal/gc" "cmd/compile/internal/ir" + "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/arm" ) @@ -20,17 +21,17 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, r0 *uint32) *obj.Prog *r0 = 1 } - if cnt < int64(4*gc.Widthptr) { - for i := int64(0); i < cnt; i += int64(gc.Widthptr) { + if cnt < int64(4*types.PtrSize) { + for i := int64(0); i < cnt; i += int64(types.PtrSize) { p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+off+i) } - } else if cnt <= int64(128*gc.Widthptr) { + } else if cnt <= int64(128*types.PtrSize) { p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0) p.Reg = arm.REGSP p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) p.To.Name = obj.NAME_EXTERN p.To.Sym = ir.Syms.Duffzero - p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr)) + p.To.Offset = 4 * (128 - cnt/int64(types.PtrSize)) } else { p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0) p.Reg = arm.REGSP diff --git a/src/cmd/compile/internal/arm64/ggen.go b/src/cmd/compile/internal/arm64/ggen.go index 6c280267b6e20..37f11e0ff64cf 100644 --- a/src/cmd/compile/internal/arm64/ggen.go +++ b/src/cmd/compile/internal/arm64/ggen.go @@ -7,6 +7,7 @@ package arm64 import ( "cmd/compile/internal/gc" "cmd/compile/internal/ir" + "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/arm64" "cmd/internal/objabi" @@ -27,15 +28,15 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { if cnt == 0 { return p } - if cnt < int64(4*gc.Widthptr) { - for i := int64(0); i < cnt; i += int64(gc.Widthptr) { + if cnt < int64(4*types.PtrSize) { + for i := int64(0); i < cnt; i += int64(types.PtrSize) { p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off+i) } - } else if cnt <= int64(128*gc.Widthptr) && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend - if cnt%(2*int64(gc.Widthptr)) != 0 { + } else if cnt <= int64(128*types.PtrSize) && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend + if cnt%(2*int64(types.PtrSize)) != 0 { p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off) - off += int64(gc.Widthptr) - cnt -= int64(gc.Widthptr) + off += int64(types.PtrSize) + cnt -= int64(types.PtrSize) } p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REG_R20, 0) p = pp.Appendpp(p, arm64.AADD, obj.TYPE_CONST, 0, 8+off, obj.TYPE_REG, arm64.REG_R20, 0) @@ -43,7 +44,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) p.To.Name = obj.NAME_EXTERN p.To.Sym = ir.Syms.Duffzero - p.To.Offset = 4 * (64 - cnt/(2*int64(gc.Widthptr))) + p.To.Offset = 4 * (64 - cnt/(2*int64(types.PtrSize))) } else { // Not using REGTMP, so this is async preemptible (async preemption clobbers REGTMP). // We are at the function entry, where no register is live, so it is okay to clobber @@ -56,7 +57,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, rtmp, 0) p = pp.Appendpp(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT2, 0) p.Reg = arm64.REGRT1 - p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(gc.Widthptr)) + p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(types.PtrSize)) p.Scond = arm64.C_XPRE p1 := p p = pp.Appendpp(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0) diff --git a/src/cmd/compile/internal/gc/abiutils.go b/src/cmd/compile/internal/gc/abiutils.go index 19de14d48c353..5822c088f969e 100644 --- a/src/cmd/compile/internal/gc/abiutils.go +++ b/src/cmd/compile/internal/gc/abiutils.go @@ -91,7 +91,7 @@ func ABIAnalyze(t *types.Type, config ABIConfig) ABIParamResultInfo { result.inparams = append(result.inparams, s.assignParamOrReturn(f.Type)) } - s.stackOffset = Rnd(s.stackOffset, int64(Widthreg)) + s.stackOffset = types.Rnd(s.stackOffset, int64(types.RegSize)) // Record number of spill slots needed. result.intSpillSlots = s.rUsed.intRegs @@ -160,7 +160,7 @@ type assignState struct { // specified type. func (state *assignState) stackSlot(t *types.Type) int64 { if t.Align > 0 { - state.stackOffset = Rnd(state.stackOffset, int64(t.Align)) + state.stackOffset = types.Rnd(state.stackOffset, int64(t.Align)) } rv := state.stackOffset state.stackOffset += t.Width @@ -226,7 +226,7 @@ func (state *assignState) floatUsed() int { // can register allocate, FALSE otherwise (and updates state // accordingly). func (state *assignState) regassignIntegral(t *types.Type) bool { - regsNeeded := int(Rnd(t.Width, int64(Widthptr)) / int64(Widthptr)) + regsNeeded := int(types.Rnd(t.Width, int64(types.PtrSize)) / int64(types.PtrSize)) // Floating point and complex. if t.IsFloat() || t.IsComplex() { diff --git a/src/cmd/compile/internal/gc/abiutils_test.go b/src/cmd/compile/internal/gc/abiutils_test.go index 6ed27d794f743..5a88332de80be 100644 --- a/src/cmd/compile/internal/gc/abiutils_test.go +++ b/src/cmd/compile/internal/gc/abiutils_test.go @@ -29,13 +29,13 @@ func TestMain(m *testing.M) { thearch.LinkArch = &x86.Linkamd64 thearch.REGSP = x86.REGSP thearch.MAXWIDTH = 1 << 50 - MaxWidth = thearch.MAXWIDTH + types.MaxWidth = thearch.MAXWIDTH base.Ctxt = obj.Linknew(thearch.LinkArch) base.Ctxt.DiagFunc = base.Errorf base.Ctxt.DiagFlush = base.FlushErrors base.Ctxt.Bso = bufio.NewWriter(os.Stdout) - Widthptr = thearch.LinkArch.PtrSize - Widthreg = thearch.LinkArch.RegSize + types.PtrSize = thearch.LinkArch.PtrSize + types.RegSize = thearch.LinkArch.RegSize types.TypeLinkSym = func(t *types.Type) *obj.LSym { return typenamesym(t).Linksym() } diff --git a/src/cmd/compile/internal/gc/abiutilsaux_test.go b/src/cmd/compile/internal/gc/abiutilsaux_test.go index de35e8edd658b..8585ab9a30446 100644 --- a/src/cmd/compile/internal/gc/abiutilsaux_test.go +++ b/src/cmd/compile/internal/gc/abiutilsaux_test.go @@ -106,7 +106,7 @@ func difftokens(atoks []string, etoks []string) string { func abitest(t *testing.T, ft *types.Type, exp expectedDump) { - dowidth(ft) + types.CalcSize(ft) // Analyze with full set of registers. regRes := ABIAnalyze(ft, configAMD64) diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index d21b0d492cc24..dab27b49292a7 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -253,7 +253,7 @@ func genhash(t *types.Type) *obj.LSym { // Build closure. It doesn't close over any variables, so // it contains just the function pointer. dsymptr(closure, 0, sym.Linksym(), 0) - ggloblsym(closure, int32(Widthptr), obj.DUPOK|obj.RODATA) + ggloblsym(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA) return closure } @@ -302,7 +302,7 @@ func sysClosure(name string) *obj.LSym { if len(s.P) == 0 { f := sysfunc(name) dsymptr(s, 0, f, 0) - ggloblsym(s, int32(Widthptr), obj.DUPOK|obj.RODATA) + ggloblsym(s, int32(types.PtrSize), obj.DUPOK|obj.RODATA) } return s } @@ -632,7 +632,7 @@ func geneq(t *types.Type) *obj.LSym { // Generate a closure which points at the function we just generated. dsymptr(closure, 0, sym.Linksym(), 0) - ggloblsym(closure, int32(Widthptr), obj.DUPOK|obj.RODATA) + ggloblsym(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA) return closure } diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index e758cf86d4750..454d97e17f0b1 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -188,7 +188,7 @@ func capturevars(fn *ir.Func) { // type check the & of closed variables outside the closure, // so that the outer frame also grabs them and knows they escape. - dowidth(v.Type()) + types.CalcSize(v.Type()) var outer ir.Node outer = v.Outer @@ -276,23 +276,23 @@ func transformclosure(fn *ir.Func) { fn.Dcl = append(decls, fn.Dcl...) } - dowidth(f.Type()) + types.CalcSize(f.Type()) fn.SetType(f.Type()) // update type of ODCLFUNC } else { // The closure is not called, so it is going to stay as closure. var body []ir.Node - offset := int64(Widthptr) + offset := int64(types.PtrSize) for _, v := range fn.ClosureVars { // cv refers to the field inside of closure OSTRUCTLIT. typ := v.Type() if !v.Byval() { typ = types.NewPtr(typ) } - offset = Rnd(offset, int64(typ.Align)) + offset = types.Rnd(offset, int64(typ.Align)) cr := ir.NewClosureRead(typ, offset) offset += typ.Width - if v.Byval() && v.Type().Width <= int64(2*Widthptr) { + if v.Byval() && v.Type().Width <= int64(2*types.PtrSize) { // If it is a small variable captured by value, downgrade it to PAUTO. v.Class_ = ir.PAUTO fn.Dcl = append(fn.Dcl, v) @@ -466,7 +466,7 @@ func makepartialcall(dot *ir.SelectorExpr, t0 *types.Type, meth *types.Sym) *ir. fn.SetNeedctxt(true) // Declare and initialize variable holding receiver. - cr := ir.NewClosureRead(rcvrtype, Rnd(int64(Widthptr), int64(rcvrtype.Align))) + cr := ir.NewClosureRead(rcvrtype, types.Rnd(int64(types.PtrSize), int64(rcvrtype.Align))) ptr := NewName(lookup(".this")) declare(ptr, ir.PAUTO) ptr.SetUsed(true) diff --git a/src/cmd/compile/internal/gc/embed.go b/src/cmd/compile/internal/gc/embed.go index ea23e26069444..70c5c2a25a048 100644 --- a/src/cmd/compile/internal/gc/embed.go +++ b/src/cmd/compile/internal/gc/embed.go @@ -215,7 +215,7 @@ func initEmbed(v *ir.Name) { slicedata := base.Ctxt.Lookup(`"".` + v.Sym().Name + `.files`) off := 0 // []files pointed at by Files - off = dsymptr(slicedata, off, slicedata, 3*Widthptr) // []file, pointing just past slice + off = dsymptr(slicedata, off, slicedata, 3*types.PtrSize) // []file, pointing just past slice off = duintptr(slicedata, off, uint64(len(files))) off = duintptr(slicedata, off, uint64(len(files))) diff --git a/src/cmd/compile/internal/gc/gen.go b/src/cmd/compile/internal/gc/gen.go index 53298c878de9e..1084ff883f370 100644 --- a/src/cmd/compile/internal/gc/gen.go +++ b/src/cmd/compile/internal/gc/gen.go @@ -66,7 +66,7 @@ func tempAt(pos src.XPos, curfn *ir.Func, t *types.Type) *ir.Name { n.SetAutoTemp(true) curfn.Dcl = append(curfn.Dcl, n) - dowidth(t) + types.CalcSize(t) return n } diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index 4370a06839142..a2587b33616e4 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -12,31 +12,6 @@ import ( "sync" ) -// Slices in the runtime are represented by three components: -// -// type slice struct { -// ptr unsafe.Pointer -// len int -// cap int -// } -// -// Strings in the runtime are represented by two components: -// -// type string struct { -// ptr unsafe.Pointer -// len int -// } -// -// These variables are the offsets of fields and sizes of these structs. -var ( - slicePtrOffset int64 - sliceLenOffset int64 - sliceCapOffset int64 - - sizeofSlice int64 - sizeofString int64 -) - var pragcgobuf [][]string var decldepth int32 @@ -68,10 +43,6 @@ var ( var dclcontext ir.Class // PEXTERN/PAUTO -var Widthptr int - -var Widthreg int - var typecheckok bool // interface to back end diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index d04c432e5e26f..e9dc2a3248724 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -308,10 +308,10 @@ func (r *importReader) doDecl(sym *types.Sym) *ir.Name { // We also need to defer width calculations until // after the underlying type has been assigned. - defercheckwidth() + types.DeferCheckSize() underlying := r.typ() t.SetUnderlying(underlying) - resumecheckwidth() + types.ResumeCheckSize() if underlying.IsInterface() { r.typeExt(t) @@ -565,7 +565,7 @@ func (r *importReader) typ1() *types.Type { t := types.NewInterface(r.currPkg, append(embeddeds, methods...)) // Ensure we expand the interface in the frontend (#25055). - checkwidth(t) + types.CheckSize(t) return t } } diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index f21494b29183f..b9e19da43f4a1 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -1315,7 +1315,7 @@ func devirtualizeCall(call *ir.CallExpr) { // Receiver parameter size may have changed; need to update // call.Type to get correct stack offsets for result // parameters. - checkwidth(x.Type()) + types.CheckSize(x.Type()) switch ft := x.Type(); ft.NumResults() { case 0: case 1: diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index d55a8b0a7cac9..69ec5c8f2f2ca 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -190,9 +190,9 @@ func Main(archInit func(*Arch)) { initSSAEnv() initSSATables() - Widthptr = thearch.LinkArch.PtrSize - Widthreg = thearch.LinkArch.RegSize - MaxWidth = thearch.MAXWIDTH + types.PtrSize = thearch.LinkArch.PtrSize + types.RegSize = thearch.LinkArch.RegSize + types.MaxWidth = thearch.MAXWIDTH types.TypeLinkSym = func(t *types.Type) *obj.LSym { return typenamesym(t).Linksym() } diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index e56e34a7a198b..372277552f841 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -234,7 +234,7 @@ func dumpGlobal(n *ir.Name) { if n.Sym().Pkg != types.LocalPkg { return } - dowidth(n.Type()) + types.CalcSize(n.Type()) ggloblnod(n) } @@ -281,7 +281,7 @@ func dumpfuncsyms() { for _, s := range funcsyms { sf := s.Pkg.Lookup(ir.FuncSymName(s)).Linksym() dsymptr(sf, 0, s.Linksym(), 0) - ggloblsym(sf, int32(Widthptr), obj.DUPOK|obj.RODATA) + ggloblsym(sf, int32(types.PtrSize), obj.DUPOK|obj.RODATA) } } @@ -332,7 +332,7 @@ func duint32(s *obj.LSym, off int, v uint32) int { } func duintptr(s *obj.LSym, off int, v uint64) int { - return duintxx(s, off, v, Widthptr) + return duintxx(s, off, v, types.PtrSize) } func dbvec(s *obj.LSym, off int, bv bvec) int { @@ -505,9 +505,9 @@ func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int } func dsymptr(s *obj.LSym, off int, x *obj.LSym, xoff int) int { - off = int(Rnd(int64(off), int64(Widthptr))) - s.WriteAddr(base.Ctxt, int64(off), Widthptr, x, int64(xoff)) - off += Widthptr + off = int(types.Rnd(int64(off), int64(types.PtrSize))) + s.WriteAddr(base.Ctxt, int64(off), types.PtrSize, x, int64(xoff)) + off += types.PtrSize return off } @@ -530,9 +530,9 @@ func slicesym(n *ir.Name, noff int64, arr *ir.Name, lencap int64) { if arr.Op() != ir.ONAME { base.Fatalf("slicesym non-name arr %v", arr) } - s.WriteAddr(base.Ctxt, noff, Widthptr, arr.Sym().Linksym(), 0) - s.WriteInt(base.Ctxt, noff+sliceLenOffset, Widthptr, lencap) - s.WriteInt(base.Ctxt, noff+sliceCapOffset, Widthptr, lencap) + s.WriteAddr(base.Ctxt, noff, types.PtrSize, arr.Sym().Linksym(), 0) + s.WriteInt(base.Ctxt, noff+types.SliceLenOffset, types.PtrSize, lencap) + s.WriteInt(base.Ctxt, noff+types.SliceCapOffset, types.PtrSize, lencap) } // addrsym writes the static address of a to n. a must be an ONAME. @@ -548,7 +548,7 @@ func addrsym(n *ir.Name, noff int64, a *ir.Name, aoff int64) { base.Fatalf("addrsym a op %v", a.Op()) } s := n.Sym().Linksym() - s.WriteAddr(base.Ctxt, noff, Widthptr, a.Sym().Linksym(), aoff) + s.WriteAddr(base.Ctxt, noff, types.PtrSize, a.Sym().Linksym(), aoff) } // pfuncsym writes the static address of f to n. f must be a global function. @@ -564,7 +564,7 @@ func pfuncsym(n *ir.Name, noff int64, f *ir.Name) { base.Fatalf("pfuncsym class not PFUNC %d", f.Class_) } s := n.Sym().Linksym() - s.WriteAddr(base.Ctxt, noff, Widthptr, funcsym(f.Sym()).Linksym(), 0) + s.WriteAddr(base.Ctxt, noff, types.PtrSize, funcsym(f.Sym()).Linksym(), 0) } // litsym writes the static literal c to n. @@ -615,8 +615,8 @@ func litsym(n *ir.Name, noff int64, c ir.Node, wid int) { case constant.String: i := constant.StringVal(u) symdata := stringsym(n.Pos(), i) - s.WriteAddr(base.Ctxt, noff, Widthptr, symdata, 0) - s.WriteInt(base.Ctxt, noff+int64(Widthptr), Widthptr, int64(len(i))) + s.WriteAddr(base.Ctxt, noff, types.PtrSize, symdata, 0) + s.WriteInt(base.Ctxt, noff+int64(types.PtrSize), types.PtrSize, int64(len(i))) default: base.Fatalf("litsym unhandled OLITERAL %v", c) diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 1cd33b2cb5dae..3d35094a5892a 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -242,7 +242,7 @@ func (o *Order) addrTemp(n ir.Node) ir.Node { if n.Op() == ir.OLITERAL || n.Op() == ir.ONIL { // TODO: expand this to all static composite literal nodes? n = defaultlit(n, nil) - dowidth(n.Type()) + types.CalcSize(n.Type()) vstat := readonlystaticname(n.Type()) var s InitSchedule s.staticassign(vstat, 0, n, n.Type()) diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index 44b614ba7016c..337556ea41502 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -32,7 +32,7 @@ func emitptrargsmap(fn *ir.Func) { return } lsym := base.Ctxt.Lookup(fn.LSym.Name + ".args_stackmap") - nptr := int(fn.Type().ArgWidth() / int64(Widthptr)) + nptr := int(fn.Type().ArgWidth() / int64(types.PtrSize)) bv := bvalloc(int32(nptr) * 2) nbitmap := 1 if fn.Type().NumResults() > 0 { @@ -162,9 +162,9 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { break } - dowidth(n.Type()) + types.CalcSize(n.Type()) w := n.Type().Width - if w >= MaxWidth || w < 0 { + if w >= types.MaxWidth || w < 0 { base.Fatalf("bad width") } if w == 0 && lastHasPtr { @@ -175,7 +175,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { w = 1 } s.stksize += w - s.stksize = Rnd(s.stksize, int64(n.Type().Align)) + s.stksize = types.Rnd(s.stksize, int64(n.Type().Align)) if n.Type().HasPointers() { s.stkptrsize = s.stksize lastHasPtr = true @@ -183,13 +183,13 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { lastHasPtr = false } if thearch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) { - s.stksize = Rnd(s.stksize, int64(Widthptr)) + s.stksize = types.Rnd(s.stksize, int64(types.PtrSize)) } n.SetFrameOffset(-s.stksize) } - s.stksize = Rnd(s.stksize, int64(Widthreg)) - s.stkptrsize = Rnd(s.stkptrsize, int64(Widthreg)) + s.stksize = types.Rnd(s.stksize, int64(types.RegSize)) + s.stkptrsize = types.Rnd(s.stkptrsize, int64(types.RegSize)) } func funccompile(fn *ir.Func) { @@ -205,7 +205,7 @@ func funccompile(fn *ir.Func) { } // assign parameter offsets - dowidth(fn.Type()) + types.CalcSize(fn.Type()) if len(fn.Body) == 0 { // Initialize ABI wrappers if necessary. @@ -346,7 +346,7 @@ func init() { // and waits for them to complete. func compileFunctions() { if len(compilequeue) != 0 { - sizeCalculationDisabled = true // not safe to calculate sizes concurrently + types.CalcSizeDisabled = true // not safe to calculate sizes concurrently if race.Enabled { // Randomize compilation order to try to shake out races. tmp := make([]*ir.Func, len(compilequeue)) @@ -382,7 +382,7 @@ func compileFunctions() { compilequeue = nil wg.Wait() base.Ctxt.InParallel = false - sizeCalculationDisabled = false + types.CalcSizeDisabled = false } } @@ -538,11 +538,11 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var { offs = n.FrameOffset() abbrev = dwarf.DW_ABRV_AUTO if base.Ctxt.FixedFrameSize() == 0 { - offs -= int64(Widthptr) + offs -= int64(types.PtrSize) } if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" { // There is a word space for FP on ARM64 even if the frame pointer is disabled - offs -= int64(Widthptr) + offs -= int64(types.PtrSize) } case ir.PPARAM, ir.PPARAMOUT: @@ -735,11 +735,11 @@ func stackOffset(slot ssa.LocalSlot) int32 { case ir.PAUTO: off = n.FrameOffset() if base.Ctxt.FixedFrameSize() == 0 { - off -= int64(Widthptr) + off -= int64(types.PtrSize) } if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" { // There is a word space for FP on ARM64 even if the frame pointer is disabled - off -= int64(Widthptr) + off -= int64(types.PtrSize) } case ir.PPARAM, ir.PPARAMOUT: off = n.FrameOffset() + base.Ctxt.FixedFrameSize() diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go index f13889efda5bd..ac3b4bcd31c0c 100644 --- a/src/cmd/compile/internal/gc/plive.go +++ b/src/cmd/compile/internal/gc/plive.go @@ -423,23 +423,23 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) { switch t.Kind() { case types.TPTR, types.TUNSAFEPTR, types.TFUNC, types.TCHAN, types.TMAP: - if off&int64(Widthptr-1) != 0 { + if off&int64(types.PtrSize-1) != 0 { base.Fatalf("onebitwalktype1: invalid alignment, %v", t) } - bv.Set(int32(off / int64(Widthptr))) // pointer + bv.Set(int32(off / int64(types.PtrSize))) // pointer case types.TSTRING: // struct { byte *str; intgo len; } - if off&int64(Widthptr-1) != 0 { + if off&int64(types.PtrSize-1) != 0 { base.Fatalf("onebitwalktype1: invalid alignment, %v", t) } - bv.Set(int32(off / int64(Widthptr))) //pointer in first slot + bv.Set(int32(off / int64(types.PtrSize))) //pointer in first slot case types.TINTER: // struct { Itab *tab; void *data; } // or, when isnilinter(t)==true: // struct { Type *type; void *data; } - if off&int64(Widthptr-1) != 0 { + if off&int64(types.PtrSize-1) != 0 { base.Fatalf("onebitwalktype1: invalid alignment, %v", t) } // The first word of an interface is a pointer, but we don't @@ -454,14 +454,14 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) { // the underlying type so it won't be GCd. // If we ever have a moving GC, we need to change this for 2b (as // well as scan itabs to update their itab._type fields). - bv.Set(int32(off/int64(Widthptr) + 1)) // pointer in second slot + bv.Set(int32(off/int64(types.PtrSize) + 1)) // pointer in second slot case types.TSLICE: // struct { byte *array; uintgo len; uintgo cap; } - if off&int64(Widthptr-1) != 0 { + if off&int64(types.PtrSize-1) != 0 { base.Fatalf("onebitwalktype1: invalid TARRAY alignment, %v", t) } - bv.Set(int32(off / int64(Widthptr))) // pointer in first slot (BitsPointer) + bv.Set(int32(off / int64(types.PtrSize))) // pointer in first slot (BitsPointer) case types.TARRAY: elt := t.Elem() @@ -1181,7 +1181,7 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) { // Next, find the offset of the largest pointer in the largest node. var maxArgs int64 if maxArgNode != nil { - maxArgs = maxArgNode.FrameOffset() + typeptrdata(maxArgNode.Type()) + maxArgs = maxArgNode.FrameOffset() + types.PtrDataSize(maxArgNode.Type()) } // Size locals bitmaps to be stkptrsize sized. @@ -1196,11 +1196,11 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) { // Temporary symbols for encoding bitmaps. var argsSymTmp, liveSymTmp obj.LSym - args := bvalloc(int32(maxArgs / int64(Widthptr))) + args := bvalloc(int32(maxArgs / int64(types.PtrSize))) aoff := duint32(&argsSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps aoff = duint32(&argsSymTmp, aoff, uint32(args.n)) // number of bits in each bitmap - locals := bvalloc(int32(maxLocals / int64(Widthptr))) + locals := bvalloc(int32(maxLocals / int64(types.PtrSize))) loff := duint32(&liveSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps loff = duint32(&liveSymTmp, loff, uint32(locals.n)) // number of bits in each bitmap diff --git a/src/cmd/compile/internal/gc/racewalk.go b/src/cmd/compile/internal/gc/racewalk.go index e73e7fbbe1711..1ad3b9b4226e0 100644 --- a/src/cmd/compile/internal/gc/racewalk.go +++ b/src/cmd/compile/internal/gc/racewalk.go @@ -37,7 +37,7 @@ func instrument(fn *ir.Func) { // race in the future. nodpc := ir.RegFP.CloneName() nodpc.SetType(types.Types[types.TUINTPTR]) - nodpc.SetFrameOffset(int64(-Widthptr)) + nodpc.SetFrameOffset(int64(-types.PtrSize)) fn.Dcl = append(fn.Dcl, nodpc) fn.Enter.Prepend(mkcall("racefuncenter", nil, nil, nodpc)) fn.Exit.Append(mkcall("racefuncexit", nil, nil)) diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 8b393a8979aa4..987b2d6ee2852 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -67,9 +67,9 @@ const ( MAXELEMSIZE = 128 ) -func structfieldSize() int { return 3 * Widthptr } // Sizeof(runtime.structfield{}) -func imethodSize() int { return 4 + 4 } // Sizeof(runtime.imethod{}) -func commonSize() int { return 4*Widthptr + 8 + 8 } // Sizeof(runtime._type{}) +func structfieldSize() int { return 3 * types.PtrSize } // Sizeof(runtime.structfield{}) +func imethodSize() int { return 4 + 4 } // Sizeof(runtime.imethod{}) +func commonSize() int { return 4*types.PtrSize + 8 + 8 } // Sizeof(runtime._type{}) func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{}) if t.Sym() == nil && len(methods(t)) == 0 { @@ -91,8 +91,8 @@ func bmap(t *types.Type) *types.Type { keytype := t.Key() elemtype := t.Elem() - dowidth(keytype) - dowidth(elemtype) + types.CalcSize(keytype) + types.CalcSize(elemtype) if keytype.Width > MAXKEYSIZE { keytype = types.NewPtr(keytype) } @@ -132,7 +132,7 @@ func bmap(t *types.Type) *types.Type { // link up fields bucket := types.NewStruct(types.NoPkg, field[:]) bucket.SetNoalg(true) - dowidth(bucket) + types.CalcSize(bucket) // Check invariants that map code depends on. if !types.IsComparable(t.Key()) { @@ -180,7 +180,7 @@ func bmap(t *types.Type) *types.Type { // Double-check that overflow field is final memory in struct, // with no padding at end. - if overflow.Offset != bucket.Width-int64(Widthptr) { + if overflow.Offset != bucket.Width-int64(types.PtrSize) { base.Fatalf("bad offset of overflow in bmap for %v", t) } @@ -226,11 +226,11 @@ func hmap(t *types.Type) *types.Type { hmap := types.NewStruct(types.NoPkg, fields) hmap.SetNoalg(true) - dowidth(hmap) + types.CalcSize(hmap) // The size of hmap should be 48 bytes on 64 bit // and 28 bytes on 32 bit platforms. - if size := int64(8 + 5*Widthptr); hmap.Width != size { + if size := int64(8 + 5*types.PtrSize); hmap.Width != size { base.Fatalf("hmap size not correct: got %d, want %d", hmap.Width, size) } @@ -289,9 +289,9 @@ func hiter(t *types.Type) *types.Type { // build iterator struct holding the above fields hiter := types.NewStruct(types.NoPkg, fields) hiter.SetNoalg(true) - dowidth(hiter) - if hiter.Width != int64(12*Widthptr) { - base.Fatalf("hash_iter size not correct %d %d", hiter.Width, 12*Widthptr) + types.CalcSize(hiter) + if hiter.Width != int64(12*types.PtrSize) { + base.Fatalf("hash_iter size not correct %d %d", hiter.Width, 12*types.PtrSize) } t.MapType().Hiter = hiter hiter.StructType().Map = t @@ -335,7 +335,7 @@ func deferstruct(stksize int64) *types.Type { // build struct holding the above fields s := types.NewStruct(types.NoPkg, fields) s.SetNoalg(true) - CalcStructSize(s) + types.CalcStructSize(s) return s } @@ -642,7 +642,7 @@ func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int { if t.Sym() == nil && len(m) == 0 { return ot } - noff := int(Rnd(int64(ot), int64(Widthptr))) + noff := int(types.Rnd(int64(ot), int64(types.PtrSize))) if noff != ot { base.Fatalf("unexpected alignment in dextratype for %v", t) } @@ -745,55 +745,6 @@ var kinds = []int{ types.TUNSAFEPTR: objabi.KindUnsafePointer, } -// typeptrdata returns the length in bytes of the prefix of t -// containing pointer data. Anything after this offset is scalar data. -func typeptrdata(t *types.Type) int64 { - if !t.HasPointers() { - return 0 - } - - switch t.Kind() { - case types.TPTR, - types.TUNSAFEPTR, - types.TFUNC, - types.TCHAN, - types.TMAP: - return int64(Widthptr) - - case types.TSTRING: - // struct { byte *str; intgo len; } - return int64(Widthptr) - - case types.TINTER: - // struct { Itab *tab; void *data; } or - // struct { Type *type; void *data; } - // Note: see comment in plive.go:onebitwalktype1. - return 2 * int64(Widthptr) - - case types.TSLICE: - // struct { byte *array; uintgo len; uintgo cap; } - return int64(Widthptr) - - case types.TARRAY: - // haspointers already eliminated t.NumElem() == 0. - return (t.NumElem()-1)*t.Elem().Width + typeptrdata(t.Elem()) - - case types.TSTRUCT: - // Find the last field that has pointers. - var lastPtrField *types.Field - for _, t1 := range t.Fields().Slice() { - if t1.Type.HasPointers() { - lastPtrField = t1 - } - } - return lastPtrField.Offset + typeptrdata(lastPtrField.Type) - - default: - base.Fatalf("typeptrdata: unexpected type, %v", t) - return 0 - } -} - // tflag is documented in reflect/type.go. // // tflag values must be kept in sync with copies in: @@ -815,7 +766,7 @@ var ( // dcommontype dumps the contents of a reflect.rtype (runtime._type). func dcommontype(lsym *obj.LSym, t *types.Type) int { - dowidth(t) + types.CalcSize(t) eqfunc := geneq(t) sptrWeak := true @@ -1148,11 +1099,11 @@ func dtypesym(t *types.Type) *obj.LSym { } ot = duint16(lsym, ot, uint16(inCount)) ot = duint16(lsym, ot, uint16(outCount)) - if Widthptr == 8 { + if types.PtrSize == 8 { ot += 4 // align for *rtype } - dataAdd := (inCount + t.NumResults()) * Widthptr + dataAdd := (inCount + t.NumResults()) * types.PtrSize ot = dextratype(lsym, ot, t, dataAdd) // Array of rtype pointers follows funcType. @@ -1182,7 +1133,7 @@ func dtypesym(t *types.Type) *obj.LSym { } ot = dgopkgpath(lsym, ot, tpkg) - ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t)) + ot = dsymptr(lsym, ot, lsym, ot+3*types.PtrSize+uncommonSize(t)) ot = duintptr(lsym, ot, uint64(n)) ot = duintptr(lsym, ot, uint64(n)) dataAdd := imethodSize() * n @@ -1217,14 +1168,14 @@ func dtypesym(t *types.Type) *obj.LSym { // Note: flags must match maptype accessors in ../../../../runtime/type.go // and maptype builder in ../../../../reflect/type.go:MapOf. if t.Key().Width > MAXKEYSIZE { - ot = duint8(lsym, ot, uint8(Widthptr)) + ot = duint8(lsym, ot, uint8(types.PtrSize)) flags |= 1 // indirect key } else { ot = duint8(lsym, ot, uint8(t.Key().Width)) } if t.Elem().Width > MAXELEMSIZE { - ot = duint8(lsym, ot, uint8(Widthptr)) + ot = duint8(lsym, ot, uint8(types.PtrSize)) flags |= 2 // indirect value } else { ot = duint8(lsym, ot, uint8(t.Elem().Width)) @@ -1281,7 +1232,7 @@ func dtypesym(t *types.Type) *obj.LSym { ot = dcommontype(lsym, t) ot = dgopkgpath(lsym, ot, spkg) - ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t)) + ot = dsymptr(lsym, ot, lsym, ot+3*types.PtrSize+uncommonSize(t)) ot = duintptr(lsym, ot, uint64(len(fields))) ot = duintptr(lsym, ot, uint64(len(fields))) @@ -1343,7 +1294,7 @@ func ifaceMethodOffset(ityp *types.Type, i int64) int64 { // [...]imethod // } // The size of imethod is 8. - return int64(commonSize()+4*Widthptr+uncommonSize(ityp)) + i*8 + return int64(commonSize()+4*types.PtrSize+uncommonSize(ityp)) + i*8 } // for each itabEntry, gather the methods on @@ -1416,7 +1367,7 @@ func itabsym(it *obj.LSym, offset int64) *obj.LSym { } // keep this arithmetic in sync with *itab layout - methodnum := int((offset - 2*int64(Widthptr) - 8) / int64(Widthptr)) + methodnum := int((offset - 2*int64(types.PtrSize) - 8) / int64(types.PtrSize)) if methodnum >= len(syms) { return nil } @@ -1625,8 +1576,8 @@ const maxPtrmaskBytes = 2048 // along with a boolean reporting whether the UseGCProg bit should be set in // the type kind, and the ptrdata field to record in the reflect type information. func dgcsym(t *types.Type) (lsym *obj.LSym, useGCProg bool, ptrdata int64) { - ptrdata = typeptrdata(t) - if ptrdata/int64(Widthptr) <= maxPtrmaskBytes*8 { + ptrdata = types.PtrDataSize(t) + if ptrdata/int64(types.PtrSize) <= maxPtrmaskBytes*8 { lsym = dgcptrmask(t) return } @@ -1638,7 +1589,7 @@ func dgcsym(t *types.Type) (lsym *obj.LSym, useGCProg bool, ptrdata int64) { // dgcptrmask emits and returns the symbol containing a pointer mask for type t. func dgcptrmask(t *types.Type) *obj.LSym { - ptrmask := make([]byte, (typeptrdata(t)/int64(Widthptr)+7)/8) + ptrmask := make([]byte, (types.PtrDataSize(t)/int64(types.PtrSize)+7)/8) fillptrmask(t, ptrmask) p := fmt.Sprintf("gcbits.%x", ptrmask) @@ -1669,7 +1620,7 @@ func fillptrmask(t *types.Type, ptrmask []byte) { vec := bvalloc(8 * int32(len(ptrmask))) onebitwalktype1(t, 0, vec) - nptr := typeptrdata(t) / int64(Widthptr) + nptr := types.PtrDataSize(t) / int64(types.PtrSize) for i := int64(0); i < nptr; i++ { if vec.Get(int32(i)) { ptrmask[i/8] |= 1 << (uint(i) % 8) @@ -1682,7 +1633,7 @@ func fillptrmask(t *types.Type, ptrmask []byte) { // In practice, the size is typeptrdata(t) except for non-trivial arrays. // For non-trivial arrays, the program describes the full t.Width size. func dgcprog(t *types.Type) (*obj.LSym, int64) { - dowidth(t) + types.CalcSize(t) if t.Width == types.BADWIDTH { base.Fatalf("dgcprog: %v badwidth", t) } @@ -1690,9 +1641,9 @@ func dgcprog(t *types.Type) (*obj.LSym, int64) { var p GCProg p.init(lsym) p.emit(t, 0) - offset := p.w.BitIndex() * int64(Widthptr) + offset := p.w.BitIndex() * int64(types.PtrSize) p.end() - if ptrdata := typeptrdata(t); offset < ptrdata || offset > t.Width { + if ptrdata := types.PtrDataSize(t); offset < ptrdata || offset > t.Width { base.Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width) } return lsym, offset @@ -1728,12 +1679,12 @@ func (p *GCProg) end() { } func (p *GCProg) emit(t *types.Type, offset int64) { - dowidth(t) + types.CalcSize(t) if !t.HasPointers() { return } - if t.Width == int64(Widthptr) { - p.w.Ptr(offset / int64(Widthptr)) + if t.Width == int64(types.PtrSize) { + p.w.Ptr(offset / int64(types.PtrSize)) return } switch t.Kind() { @@ -1741,14 +1692,14 @@ func (p *GCProg) emit(t *types.Type, offset int64) { base.Fatalf("GCProg.emit: unexpected type %v", t) case types.TSTRING: - p.w.Ptr(offset / int64(Widthptr)) + p.w.Ptr(offset / int64(types.PtrSize)) case types.TINTER: // Note: the first word isn't a pointer. See comment in plive.go:onebitwalktype1. - p.w.Ptr(offset/int64(Widthptr) + 1) + p.w.Ptr(offset/int64(types.PtrSize) + 1) case types.TSLICE: - p.w.Ptr(offset / int64(Widthptr)) + p.w.Ptr(offset / int64(types.PtrSize)) case types.TARRAY: if t.NumElem() == 0 { @@ -1764,7 +1715,7 @@ func (p *GCProg) emit(t *types.Type, offset int64) { elem = elem.Elem() } - if !p.w.ShouldRepeat(elem.Width/int64(Widthptr), count) { + if !p.w.ShouldRepeat(elem.Width/int64(types.PtrSize), count) { // Cheaper to just emit the bits. for i := int64(0); i < count; i++ { p.emit(elem, offset+i*elem.Width) @@ -1772,8 +1723,8 @@ func (p *GCProg) emit(t *types.Type, offset int64) { return } p.emit(elem, offset) - p.w.ZeroUntil((offset + elem.Width) / int64(Widthptr)) - p.w.Repeat(elem.Width/int64(Widthptr), count-1) + p.w.ZeroUntil((offset + elem.Width) / int64(types.PtrSize)) + p.w.Repeat(elem.Width/int64(types.PtrSize), count-1) case types.TSTRUCT: for _, t1 := range t.Fields().Slice() { diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 936edb3d70331..e9a4590043021 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -330,8 +330,8 @@ func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *type } // Copy val directly into n. ir.SetPos(val) - if !s.staticassign(l, loff+int64(Widthptr), val, val.Type()) { - a := ir.NewNameOffsetExpr(base.Pos, l, loff+int64(Widthptr), val.Type()) + if !s.staticassign(l, loff+int64(types.PtrSize), val, val.Type()) { + a := ir.NewNameOffsetExpr(base.Pos, l, loff+int64(types.PtrSize), val.Type()) s.append(ir.NewAssignStmt(base.Pos, a, val)) } } else { @@ -341,7 +341,7 @@ func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *type if !s.staticassign(a, 0, val, val.Type()) { s.append(ir.NewAssignStmt(base.Pos, a, val)) } - addrsym(l, loff+int64(Widthptr), a, 0) + addrsym(l, loff+int64(types.PtrSize), a, 0) } return true @@ -622,7 +622,7 @@ func isSmallSliceLit(n *ir.CompLitExpr) bool { func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) { // make an array type corresponding the number of elements we have t := types.NewArray(n.Type().Elem(), n.Len) - dowidth(t) + types.CalcSize(t) if ctxt == inNonInitFunction { // put everything into static array @@ -801,8 +801,8 @@ func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) { tk.SetNoalg(true) te.SetNoalg(true) - dowidth(tk) - dowidth(te) + types.CalcSize(tk) + types.CalcSize(te) // make and initialize static arrays vstatk := readonlystaticname(tk) @@ -1034,7 +1034,7 @@ func stataddr(n ir.Node) (name *ir.Name, offset int64, ok bool) { } // Check for overflow. - if n.Type().Width != 0 && MaxWidth/n.Type().Width <= int64(l) { + if n.Type().Width != 0 && types.MaxWidth/n.Type().Width <= int64(l) { break } offset += int64(l) * n.Type().Width diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index f879d8b86d621..21925a0d651fe 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -2248,8 +2248,8 @@ func (s *state) expr(n ir.Node) *ssa.Value { return v } - dowidth(from) - dowidth(to) + types.CalcSize(from) + types.CalcSize(to) if from.Width != to.Width { s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width) return nil @@ -3016,7 +3016,7 @@ func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value { s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem()) } } - capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, sliceCapOffset, addr) + capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, types.SliceCapOffset, addr) s.store(types.Types[types.TINT], capaddr, r[2]) s.store(pt, addr, r[0]) // load the value we just stored to avoid having to spill it @@ -3037,7 +3037,7 @@ func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value { if inplace { l = s.variable(lenVar, types.Types[types.TINT]) // generates phi for len nl = s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], l, s.constInt(types.Types[types.TINT], nargs)) - lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, sliceLenOffset, addr) + lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, types.SliceLenOffset, addr) s.store(types.Types[types.TINT], lenaddr, nl) } @@ -3153,7 +3153,7 @@ func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask return } t := left.Type() - dowidth(t) + types.CalcSize(t) if s.canSSA(left) { if deref { s.Fatalf("can SSA LHS %v but not RHS %s", left, right) @@ -4706,7 +4706,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val closure = iclosure } } - dowidth(fn.Type()) + types.CalcSize(fn.Type()) stksize := fn.Type().ArgWidth() // includes receiver, args, and results // Run all assignments of temps. @@ -4778,11 +4778,11 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val s.store(types.Types[types.TUINTPTR], arg0, addr) call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem()) } - if stksize < int64(Widthptr) { + if stksize < int64(types.PtrSize) { // We need room for both the call to deferprocStack and the call to // the deferred function. // TODO Revisit this if/when we pass args in registers. - stksize = int64(Widthptr) + stksize = int64(types.PtrSize) } call.AuxInt = stksize } else { @@ -4800,15 +4800,15 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val addr := s.constOffPtrSP(s.f.Config.Types.UInt32Ptr, argStart) s.store(types.Types[types.TUINT32], addr, argsize) } - ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(argStart) + int32(Widthptr)}) + ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(argStart) + int32(types.PtrSize)}) if testLateExpansion { callArgs = append(callArgs, closure) } else { - addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(Widthptr)) + addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(types.PtrSize)) s.store(types.Types[types.TUINTPTR], addr, closure) } - stksize += 2 * int64(Widthptr) - argStart += 2 * int64(Widthptr) + stksize += 2 * int64(types.PtrSize) + argStart += 2 * int64(types.PtrSize) } // Set receiver (for interface calls). @@ -4970,7 +4970,7 @@ func (s *state) getClosureAndRcvr(fn *ir.SelectorExpr) (*ssa.Value, *ssa.Value) i := s.expr(fn.X) itab := s.newValue1(ssa.OpITab, types.Types[types.TUINTPTR], i) s.nilCheck(itab) - itabidx := fn.Offset + 2*int64(Widthptr) + 8 // offset of fun field in runtime.itab + itabidx := fn.Offset + 2*int64(types.PtrSize) + 8 // offset of fun field in runtime.itab closure := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab) rcvr := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, i) return closure, rcvr @@ -5177,8 +5177,8 @@ func (s *state) canSSAName(name *ir.Name) bool { // canSSA reports whether variables of type t are SSA-able. func canSSAType(t *types.Type) bool { - dowidth(t) - if t.Width > int64(4*Widthptr) { + types.CalcSize(t) + if t.Width > int64(4*types.PtrSize) { // 4*Widthptr is an arbitrary constant. We want it // to be at least 3*Widthptr so slices can be registerized. // Too big and we'll introduce too much register pressure. @@ -5379,7 +5379,7 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args . for _, arg := range args { t := arg.Type - off = Rnd(off, t.Alignment()) + off = types.Rnd(off, t.Alignment()) size := t.Size() ACArgs = append(ACArgs, ssa.Param{Type: t, Offset: int32(off)}) if testLateExpansion { @@ -5390,12 +5390,12 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args . } off += size } - off = Rnd(off, int64(Widthreg)) + off = types.Rnd(off, int64(types.RegSize)) // Accumulate results types and offsets offR := off for _, t := range results { - offR = Rnd(offR, t.Alignment()) + offR = types.Rnd(offR, t.Alignment()) ACResults = append(ACResults, ssa.Param{Type: t, Offset: int32(offR)}) offR += t.Size() } @@ -5429,7 +5429,7 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args . res := make([]*ssa.Value, len(results)) if testLateExpansion { for i, t := range results { - off = Rnd(off, t.Alignment()) + off = types.Rnd(off, t.Alignment()) if canSSAType(t) { res[i] = s.newValue1I(ssa.OpSelectN, t, int64(i), call) } else { @@ -5440,13 +5440,13 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args . } } else { for i, t := range results { - off = Rnd(off, t.Alignment()) + off = types.Rnd(off, t.Alignment()) ptr := s.constOffPtrSP(types.NewPtr(t), off) res[i] = s.load(t, ptr) off += t.Size() } } - off = Rnd(off, int64(Widthptr)) + off = types.Rnd(off, int64(types.PtrSize)) // Remember how much callee stack space we needed. call.AuxInt = off @@ -6072,7 +6072,7 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val return } // Load type out of itab, build interface with existing idata. - off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab) + off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab) typ := s.load(byteptr, off) idata := s.newValue1(ssa.OpIData, byteptr, iface) res = s.newValue2(ssa.OpIMake, n.Type(), typ, idata) @@ -6082,7 +6082,7 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val s.startBlock(bOk) // nonempty -> empty // Need to load type from itab - off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab) + off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab) s.vars[typVar] = s.load(byteptr, off) s.endBlock() @@ -6764,14 +6764,14 @@ func genssa(f *ssa.Func, pp *Progs) { func defframe(s *SSAGenState, e *ssafn) { pp := s.pp - frame := Rnd(s.maxarg+e.stksize, int64(Widthreg)) + frame := types.Rnd(s.maxarg+e.stksize, int64(types.RegSize)) if thearch.PadFrame != nil { frame = thearch.PadFrame(frame) } // Fill in argument and frame size. pp.Text.To.Type = obj.TYPE_TEXTSIZE - pp.Text.To.Val = int32(Rnd(e.curfn.Type().ArgWidth(), int64(Widthreg))) + pp.Text.To.Val = int32(types.Rnd(e.curfn.Type().ArgWidth(), int64(types.RegSize))) pp.Text.To.Offset = frame // Insert code to zero ambiguously live variables so that the @@ -6792,11 +6792,11 @@ func defframe(s *SSAGenState, e *ssafn) { if n.Class_ != ir.PAUTO { e.Fatalf(n.Pos(), "needzero class %d", n.Class_) } - if n.Type().Size()%int64(Widthptr) != 0 || n.FrameOffset()%int64(Widthptr) != 0 || n.Type().Size() == 0 { + if n.Type().Size()%int64(types.PtrSize) != 0 || n.FrameOffset()%int64(types.PtrSize) != 0 || n.Type().Size() == 0 { e.Fatalf(n.Pos(), "var %L has size %d offset %d", n, n.Type().Size(), n.Offset_) } - if lo != hi && n.FrameOffset()+n.Type().Size() >= lo-int64(2*Widthreg) { + if lo != hi && n.FrameOffset()+n.Type().Size() >= lo-int64(2*types.RegSize) { // Merge with range we already have. lo = n.FrameOffset() continue @@ -7274,7 +7274,7 @@ func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t n.SetEsc(ir.EscNever) n.Curfn = e.curfn e.curfn.Dcl = append(e.curfn.Dcl, n) - dowidth(t) + types.CalcSize(t) return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset} } diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index bcf17e42d6ad9..d4c7c6db1a6c3 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -1377,8 +1377,8 @@ func itabType(itab ir.Node) ir.Node { typ := ir.NewSelectorExpr(base.Pos, ir.ODOTPTR, itab, nil) typ.SetType(types.NewPtr(types.Types[types.TUINT8])) typ.SetTypecheck(1) - typ.Offset = int64(Widthptr) // offset of _type in runtime.itab - typ.SetBounded(true) // guaranteed not to fault + typ.Offset = int64(types.PtrSize) // offset of _type in runtime.itab + typ.SetBounded(true) // guaranteed not to fault return typ } @@ -1403,13 +1403,3 @@ func ifaceData(pos src.XPos, n ir.Node, t *types.Type) ir.Node { ind.SetBounded(true) return ind } - -// typePos returns the position associated with t. -// This is where t was declared or where it appeared as a type expression. -func typePos(t *types.Type) src.XPos { - if pos := t.Pos(); pos.IsKnown() { - return pos - } - base.Fatalf("bad type: %v", t) - panic("unreachable") -} diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go index 5bbc91fcc13ca..4e7ff00434119 100644 --- a/src/cmd/compile/internal/gc/swt.go +++ b/src/cmd/compile/internal/gc/swt.go @@ -535,9 +535,9 @@ func walkTypeSwitch(sw *ir.SwitchStmt) { dotHash.SetType(types.Types[types.TUINT32]) dotHash.SetTypecheck(1) if s.facename.Type().IsEmptyInterface() { - dotHash.Offset = int64(2 * Widthptr) // offset of hash in runtime._type + dotHash.Offset = int64(2 * types.PtrSize) // offset of hash in runtime._type } else { - dotHash.Offset = int64(2 * Widthptr) // offset of hash in runtime.itab + dotHash.Offset = int64(2 * types.PtrSize) // offset of hash in runtime.itab } dotHash.SetBounded(true) // guaranteed not to fault s.hashname = copyexpr(dotHash, dotHash.Type(), &sw.Compiled) diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 0beb5712d4821..0552dd180f220 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -21,8 +21,6 @@ var ( ) func TypecheckInit() { - types.Widthptr = Widthptr - types.Dowidth = dowidth initUniverse() dclcontext = ir.PEXTERN base.Timer.Start("fe", "loadsys") @@ -163,7 +161,6 @@ func TypecheckImports() { } var traceIndent []byte -var skipDowidthForTracing bool func tracePrint(title string, n ir.Node) func(np *ir.Node) { indent := traceIndent @@ -177,8 +174,8 @@ func tracePrint(title string, n ir.Node) func(np *ir.Node) { tc = n.Typecheck() } - skipDowidthForTracing = true - defer func() { skipDowidthForTracing = false }() + types.SkipSizeForTracing = true + defer func() { types.SkipSizeForTracing = false }() fmt.Printf("%s: %s%s %p %s %v tc=%d\n", pos, indent, title, n, op, n, tc) traceIndent = append(traceIndent, ". "...) @@ -201,8 +198,8 @@ func tracePrint(title string, n ir.Node) func(np *ir.Node) { typ = n.Type() } - skipDowidthForTracing = true - defer func() { skipDowidthForTracing = false }() + types.SkipSizeForTracing = true + defer func() { types.SkipSizeForTracing = false }() fmt.Printf("%s: %s=> %p %s %v tc=%d type=%L\n", pos, indent, n, op, n, tc, typ) } } @@ -503,7 +500,7 @@ func typecheck(n ir.Node, top int) (res ir.Node) { break default: - checkwidth(t) + types.CheckSize(t) } } if t != nil { @@ -651,7 +648,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } t := types.NewSlice(n.Elem.Type()) n.SetOTYPE(t) - checkwidth(t) + types.CheckSize(t) return n case ir.OTARRAY: @@ -695,7 +692,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { bound, _ := constant.Int64Val(v) t := types.NewArray(n.Elem.Type(), bound) n.SetOTYPE(t) - checkwidth(t) + types.CheckSize(t) return n case ir.OTMAP: @@ -758,7 +755,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { if l.Op() == ir.OTYPE { n.SetOTYPE(types.NewPtr(l.Type())) // Ensure l.Type gets dowidth'd for the backend. Issue 20174. - checkwidth(l.Type()) + types.CheckSize(l.Type()) return n } @@ -910,7 +907,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } - dowidth(l.Type()) + types.CalcSize(l.Type()) if r.Type().IsInterface() == l.Type().IsInterface() || l.Type().Width >= 1<<16 { l = ir.NewConvExpr(base.Pos, aop, r.Type(), l) l.SetTypecheck(1) @@ -931,7 +928,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } - dowidth(r.Type()) + types.CalcSize(r.Type()) if r.Type().IsInterface() == l.Type().IsInterface() || r.Type().Width >= 1<<16 { r = ir.NewConvExpr(base.Pos, aop, l.Type(), r) r.SetTypecheck(1) @@ -1139,7 +1136,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } n.SetOp(ir.ODOTPTR) - checkwidth(t) + types.CheckSize(t) } if n.Sel.IsBlank() { @@ -1464,7 +1461,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } else if t.IsPtr() && t.Elem().IsArray() { tp = t.Elem() n.SetType(types.NewSlice(tp.Elem())) - dowidth(n.Type()) + types.CalcSize(n.Type()) if hasmax { n.SetOp(ir.OSLICE3ARR) } else { @@ -1581,7 +1578,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetType(nil) return n } - checkwidth(t) + types.CheckSize(t) switch l.Op() { case ir.ODOTINTER: @@ -1860,7 +1857,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { continue } as[i] = assignconv(n, t.Elem(), "append") - checkwidth(as[i].Type()) // ensure width is calculated for backend + types.CheckSize(as[i].Type()) // ensure width is calculated for backend } return n @@ -1907,7 +1904,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OCONV: n := n.(*ir.ConvExpr) - checkwidth(n.Type()) // ensure width is calculated for backend + types.CheckSize(n.Type()) // ensure width is calculated for backend n.X = typecheck(n.X, ctxExpr) n.X = convlit1(n.X, n.Type(), true, nil) t := n.X.Type() @@ -2303,7 +2300,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.ODCLTYPE: n := n.(*ir.Decl) n.X = typecheck(n.X, ctxType) - checkwidth(n.X.Type()) + types.CheckSize(n.X.Type()) return n } @@ -2626,7 +2623,7 @@ func derefall(t *types.Type) *types.Type { func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field { s := n.Sel - dowidth(t) + types.CalcSize(t) var f1 *types.Field if t.IsStruct() || t.IsInterface() { f1 = lookdot1(n, s, t, t.Fields(), dostrcmp) @@ -2672,7 +2669,7 @@ func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field { return f2 } tt := n.X.Type() - dowidth(tt) + types.CalcSize(tt) rcvr := f2.Type.Recv().Type if !types.Identical(rcvr, tt) { if rcvr.IsPtr() && types.Identical(rcvr.Elem(), tt) { @@ -3067,7 +3064,7 @@ func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) { case types.TSTRUCT: // Need valid field offsets for Xoffset below. - dowidth(t) + types.CalcSize(t) errored := false if len(n.List) != 0 && nokeys(n.List) { @@ -3366,7 +3363,7 @@ func typecheckas(n *ir.AssignStmt) { n.X = typecheck(n.X, ctxExpr|ctxAssign) } if !ir.IsBlank(n.X) { - checkwidth(n.X.Type()) // ensure width is calculated for backend + types.CheckSize(n.X.Type()) // ensure width is calculated for backend } } @@ -3590,7 +3587,7 @@ func typecheckdeftype(n *ir.Name) { n.SetTypecheck(1) n.SetWalkdef(1) - defercheckwidth() + types.DeferCheckSize() errorsBefore := base.Errors() n.Ntype = typecheckNtype(n.Ntype) if underlying := n.Ntype.Type(); underlying != nil { @@ -3604,7 +3601,7 @@ func typecheckdeftype(n *ir.Name) { // but it was reported. Silence future errors. t.SetBroke(true) } - resumecheckwidth() + types.ResumeCheckSize() } func typecheckdef(n ir.Node) { diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index b7472ede0f9e9..5d59fdbbc534f 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -77,17 +77,17 @@ var unsafeFuncs = [...]struct { // initUniverse initializes the universe block. func initUniverse() { - if Widthptr == 0 { + if types.PtrSize == 0 { base.Fatalf("typeinit before betypeinit") } - slicePtrOffset = 0 - sliceLenOffset = Rnd(slicePtrOffset+int64(Widthptr), int64(Widthptr)) - sliceCapOffset = Rnd(sliceLenOffset+int64(Widthptr), int64(Widthptr)) - sizeofSlice = Rnd(sliceCapOffset+int64(Widthptr), int64(Widthptr)) + types.SlicePtrOffset = 0 + types.SliceLenOffset = types.Rnd(types.SlicePtrOffset+int64(types.PtrSize), int64(types.PtrSize)) + types.SliceCapOffset = types.Rnd(types.SliceLenOffset+int64(types.PtrSize), int64(types.PtrSize)) + types.SliceSize = types.Rnd(types.SliceCapOffset+int64(types.PtrSize), int64(types.PtrSize)) // string is same as slice wo the cap - sizeofString = Rnd(sliceLenOffset+int64(Widthptr), int64(Widthptr)) + types.StringSize = types.Rnd(types.SliceLenOffset+int64(types.PtrSize), int64(types.PtrSize)) for et := types.Kind(0); et < types.NTYPE; et++ { types.SimType[et] = et @@ -103,7 +103,7 @@ func initUniverse() { n.SetType(t) sym.Def = n if kind != types.TANY { - dowidth(t) + types.CalcSize(t) } return t } @@ -114,7 +114,7 @@ func initUniverse() { for _, s := range &typedefs { sameas := s.sameas32 - if Widthptr == 8 { + if types.PtrSize == 8 { sameas = s.sameas64 } types.SimType[s.etype] = sameas @@ -139,7 +139,7 @@ func initUniverse() { types.ErrorType.SetUnderlying(makeErrorInterface()) n.SetType(types.ErrorType) s.Def = n - dowidth(types.ErrorType) + types.CalcSize(types.ErrorType) types.Types[types.TUNSAFEPTR] = defBasic(types.TUNSAFEPTR, ir.Pkgs.Unsafe, "Pointer") diff --git a/src/cmd/compile/internal/gc/unsafe.go b/src/cmd/compile/internal/gc/unsafe.go index cecc8720a9e9c..d37ebfff31ea1 100644 --- a/src/cmd/compile/internal/gc/unsafe.go +++ b/src/cmd/compile/internal/gc/unsafe.go @@ -7,6 +7,7 @@ package gc import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" + "cmd/compile/internal/types" ) // evalunsafe evaluates a package unsafe operation and returns the result. @@ -20,7 +21,7 @@ func evalunsafe(n ir.Node) int64 { if tr == nil { return 0 } - dowidth(tr) + types.CalcSize(tr) if n.Op() == ir.OALIGNOF { return int64(tr.Align) } diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index dd376a8835a02..764c5c41b051f 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -470,7 +470,7 @@ func walkexpr(n ir.Node, init *ir.Nodes) ir.Node { switch n.Type().Kind() { case types.TBLANK, types.TNIL, types.TIDEAL: default: - checkwidth(n.Type()) + types.CheckSize(n.Type()) } } @@ -1031,9 +1031,9 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // ptr = convT2X(val) // e = iface{typ/tab, ptr} fn := syslook(fnname) - dowidth(fromType) + types.CalcSize(fromType) fn = substArgTypes(fn, fromType) - dowidth(fn.Type()) + types.CalcSize(fn.Type()) call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil) call.Args = []ir.Node{n.X} e := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), safeexpr(walkexpr(typecheck(call, ctxExpr), init), init)) @@ -1065,10 +1065,10 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { v = nodAddr(v) } - dowidth(fromType) + types.CalcSize(fromType) fn := syslook(fnname) fn = substArgTypes(fn, fromType, toType) - dowidth(fn.Type()) + types.CalcSize(fn.Type()) call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil) call.Args = []ir.Node{tab, v} return walkexpr(typecheck(call, ctxExpr), init) @@ -1116,7 +1116,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // rewrite 64-bit div and mod on 32-bit architectures. // TODO: Remove this code once we can introduce // runtime calls late in SSA processing. - if Widthreg < 8 && (et == types.TINT64 || et == types.TUINT64) { + if types.RegSize < 8 && (et == types.TINT64 || et == types.TUINT64) { if n.Y.Op() == ir.OLITERAL { // Leave div/mod by constant powers of 2 or small 16-bit constants. // The SSA backend will handle those. @@ -1724,7 +1724,7 @@ func markUsedIfaceMethod(n *ir.CallExpr) { r.Sym = tsym // dot.Xoffset is the method index * Widthptr (the offset of code pointer // in itab). - midx := dot.Offset / int64(Widthptr) + midx := dot.Offset / int64(types.PtrSize) r.Add = ifaceMethodOffset(ityp, midx) r.Type = objabi.R_USEIFACEMETHOD } @@ -2133,7 +2133,7 @@ func walkprint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { } func callnew(t *types.Type) ir.Node { - dowidth(t) + types.CalcSize(t) n := ir.NewUnaryExpr(base.Pos, ir.ONEWOBJ, typename(t)) n.SetType(types.NewPtr(t)) n.SetTypecheck(1) @@ -2168,7 +2168,7 @@ func convas(n *ir.AssignStmt, init *ir.Nodes) *ir.AssignStmt { n.Y = assignconv(n.Y, lt, "assignment") n.Y = walkexpr(n.Y, init) } - dowidth(n.Y.Type()) + types.CalcSize(n.Y.Type()) return n } @@ -2655,7 +2655,7 @@ func mapfast(t *types.Type) int { if !t.Key().HasPointers() { return mapfast32 } - if Widthptr == 4 { + if types.PtrSize == 4 { return mapfast32ptr } base.Fatalf("small pointer %v", t.Key()) @@ -2663,7 +2663,7 @@ func mapfast(t *types.Type) int { if !t.Key().HasPointers() { return mapfast64 } - if Widthptr == 8 { + if types.PtrSize == 8 { return mapfast64ptr } // Two-word object, at least one of which is a pointer. @@ -3408,7 +3408,7 @@ func walkcompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { } else { step := int64(1) remains := t.NumElem() * t.Elem().Width - combine64bit := unalignedLoad && Widthreg == 8 && t.Elem().Width <= 4 && t.Elem().IsInteger() + combine64bit := unalignedLoad && types.RegSize == 8 && t.Elem().Width <= 4 && t.Elem().IsInteger() combine32bit := unalignedLoad && t.Elem().Width <= 2 && t.Elem().IsInteger() combine16bit := unalignedLoad && t.Elem().Width == 1 && t.Elem().IsInteger() for i := int64(0); remains > 0; { @@ -3973,7 +3973,7 @@ func substArgTypes(old *ir.Name, types_ ...*types.Type) *ir.Name { n := old.CloneName() for _, t := range types_ { - dowidth(t) + types.CalcSize(t) } n.SetType(types.SubstAny(n.Type(), &types_)) if len(types_) > 0 { diff --git a/src/cmd/compile/internal/mips/ggen.go b/src/cmd/compile/internal/mips/ggen.go index 2356267df7267..9cce68821b973 100644 --- a/src/cmd/compile/internal/mips/ggen.go +++ b/src/cmd/compile/internal/mips/ggen.go @@ -7,6 +7,7 @@ package mips import ( "cmd/compile/internal/base" "cmd/compile/internal/gc" + "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/mips" ) @@ -17,8 +18,8 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { if cnt == 0 { return p } - if cnt < int64(4*gc.Widthptr) { - for i := int64(0); i < cnt; i += int64(gc.Widthptr) { + if cnt < int64(4*types.PtrSize) { + for i := int64(0); i < cnt; i += int64(types.PtrSize) { p = pp.Appendpp(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, base.Ctxt.FixedFrameSize()+off+i) } } else { @@ -33,9 +34,9 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { p.Reg = mips.REGSP p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0) p.Reg = mips.REGRT1 - p = pp.Appendpp(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(gc.Widthptr)) + p = pp.Appendpp(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(types.PtrSize)) p1 := p - p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, int64(gc.Widthptr), obj.TYPE_REG, mips.REGRT1, 0) + p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, mips.REGRT1, 0) p = pp.Appendpp(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0) p.Reg = mips.REGRT2 gc.Patch(p, p1) diff --git a/src/cmd/compile/internal/mips64/ggen.go b/src/cmd/compile/internal/mips64/ggen.go index 4be5bc6f6e9ff..dc5f95960d1ec 100644 --- a/src/cmd/compile/internal/mips64/ggen.go +++ b/src/cmd/compile/internal/mips64/ggen.go @@ -7,6 +7,7 @@ package mips64 import ( "cmd/compile/internal/gc" "cmd/compile/internal/ir" + "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/mips" ) @@ -15,17 +16,17 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { if cnt == 0 { return p } - if cnt < int64(4*gc.Widthptr) { - for i := int64(0); i < cnt; i += int64(gc.Widthptr) { + if cnt < int64(4*types.PtrSize) { + for i := int64(0); i < cnt; i += int64(types.PtrSize) { p = pp.Appendpp(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, 8+off+i) } - } else if cnt <= int64(128*gc.Widthptr) { + } else if cnt <= int64(128*types.PtrSize) { p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0) p.Reg = mips.REGSP p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) p.To.Name = obj.NAME_EXTERN p.To.Sym = ir.Syms.Duffzero - p.To.Offset = 8 * (128 - cnt/int64(gc.Widthptr)) + p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize)) } else { // ADDV $(8+frame+lo-8), SP, r1 // ADDV $cnt, r1, r2 @@ -37,9 +38,9 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { p.Reg = mips.REGSP p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0) p.Reg = mips.REGRT1 - p = pp.Appendpp(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(gc.Widthptr)) + p = pp.Appendpp(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(types.PtrSize)) p1 := p - p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, int64(gc.Widthptr), obj.TYPE_REG, mips.REGRT1, 0) + p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, mips.REGRT1, 0) p = pp.Appendpp(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0) p.Reg = mips.REGRT2 gc.Patch(p, p1) diff --git a/src/cmd/compile/internal/ppc64/ggen.go b/src/cmd/compile/internal/ppc64/ggen.go index 29376badf9c99..9e5723186329d 100644 --- a/src/cmd/compile/internal/ppc64/ggen.go +++ b/src/cmd/compile/internal/ppc64/ggen.go @@ -8,6 +8,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/gc" "cmd/compile/internal/ir" + "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/ppc64" ) @@ -16,17 +17,17 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { if cnt == 0 { return p } - if cnt < int64(4*gc.Widthptr) { - for i := int64(0); i < cnt; i += int64(gc.Widthptr) { + if cnt < int64(4*types.PtrSize) { + for i := int64(0); i < cnt; i += int64(types.PtrSize) { p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, base.Ctxt.FixedFrameSize()+off+i) } - } else if cnt <= int64(128*gc.Widthptr) { + } else if cnt <= int64(128*types.PtrSize) { p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGRT1, 0) p.Reg = ppc64.REGSP p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) p.To.Name = obj.NAME_EXTERN p.To.Sym = ir.Syms.Duffzero - p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr)) + p.To.Offset = 4 * (128 - cnt/int64(types.PtrSize)) } else { p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGTMP, 0) p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0) @@ -34,7 +35,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0) p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0) p.Reg = ppc64.REGRT1 - p = pp.Appendpp(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(gc.Widthptr)) + p = pp.Appendpp(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(types.PtrSize)) p1 := p p = pp.Appendpp(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0) p = pp.Appendpp(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0) diff --git a/src/cmd/compile/internal/riscv64/ggen.go b/src/cmd/compile/internal/riscv64/ggen.go index c77640765ffff..d18644bb1b2dd 100644 --- a/src/cmd/compile/internal/riscv64/ggen.go +++ b/src/cmd/compile/internal/riscv64/ggen.go @@ -8,6 +8,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/gc" "cmd/compile/internal/ir" + "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/riscv" ) @@ -20,20 +21,20 @@ func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { // Adjust the frame to account for LR. off += base.Ctxt.FixedFrameSize() - if cnt < int64(4*gc.Widthptr) { - for i := int64(0); i < cnt; i += int64(gc.Widthptr) { + if cnt < int64(4*types.PtrSize) { + for i := int64(0); i < cnt; i += int64(types.PtrSize) { p = pp.Appendpp(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_SP, off+i) } return p } - if cnt <= int64(128*gc.Widthptr) { + if cnt <= int64(128*types.PtrSize) { p = pp.Appendpp(p, riscv.AADDI, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_A0, 0) p.Reg = riscv.REG_SP p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) p.To.Name = obj.NAME_EXTERN p.To.Sym = ir.Syms.Duffzero - p.To.Offset = 8 * (128 - cnt/int64(gc.Widthptr)) + p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize)) return p } @@ -50,7 +51,7 @@ func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { p.Reg = riscv.REG_T0 p = pp.Appendpp(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_T0, 0) loop := p - p = pp.Appendpp(p, riscv.AADD, obj.TYPE_CONST, 0, int64(gc.Widthptr), obj.TYPE_REG, riscv.REG_T0, 0) + p = pp.Appendpp(p, riscv.AADD, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, riscv.REG_T0, 0) p = pp.Appendpp(p, riscv.ABNE, obj.TYPE_REG, riscv.REG_T0, 0, obj.TYPE_BRANCH, 0, 0) p.Reg = riscv.REG_T1 gc.Patch(p, loop) diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go index 644baa8548e72..8712ff78c15da 100644 --- a/src/cmd/compile/internal/ssa/export_test.go +++ b/src/cmd/compile/internal/ssa/export_test.go @@ -137,7 +137,6 @@ func init() { // Initialize just enough of the universe and the types package to make our tests function. // TODO(josharian): move universe initialization to the types package, // so this test setup can share it. - types.Dowidth = func(t *types.Type) {} for _, typ := range [...]struct { width int64 diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/types/size.go similarity index 66% rename from src/cmd/compile/internal/gc/align.go rename to src/cmd/compile/internal/types/size.go index 92826d003bf25..a54c086ded00f 100644 --- a/src/cmd/compile/internal/gc/align.go +++ b/src/cmd/compile/internal/types/size.go @@ -2,22 +2,64 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package types import ( "bytes" - "cmd/compile/internal/base" - "cmd/compile/internal/types" "fmt" "sort" + + "cmd/compile/internal/base" + "cmd/internal/src" ) +var PtrSize int + +var RegSize int + +// Slices in the runtime are represented by three components: +// +// type slice struct { +// ptr unsafe.Pointer +// len int +// cap int +// } +// +// Strings in the runtime are represented by two components: +// +// type string struct { +// ptr unsafe.Pointer +// len int +// } +// +// These variables are the offsets of fields and sizes of these structs. +var ( + SlicePtrOffset int64 + SliceLenOffset int64 + SliceCapOffset int64 + + SliceSize int64 + StringSize int64 +) + +var SkipSizeForTracing bool + +// typePos returns the position associated with t. +// This is where t was declared or where it appeared as a type expression. +func typePos(t *Type) src.XPos { + if pos := t.Pos(); pos.IsKnown() { + return pos + } + base.Fatalf("bad type: %v", t) + panic("unreachable") +} + // MaxWidth is the maximum size of a value on the target architecture. var MaxWidth int64 -// sizeCalculationDisabled indicates whether it is safe +// CalcSizeDisabled indicates whether it is safe // to calculate Types' widths and alignments. See dowidth. -var sizeCalculationDisabled bool +var CalcSizeDisabled bool // machine size and rounding alignment is dictated around // the size of a pointer, set in betypeinit (see ../amd64/galign.go). @@ -32,15 +74,15 @@ func Rnd(o int64, r int64) int64 { // expandiface computes the method set for interface type t by // expanding embedded interfaces. -func expandiface(t *types.Type) { - seen := make(map[*types.Sym]*types.Field) - var methods []*types.Field +func expandiface(t *Type) { + seen := make(map[*Sym]*Field) + var methods []*Field - addMethod := func(m *types.Field, explicit bool) { + addMethod := func(m *Field, explicit bool) { switch prev := seen[m.Sym]; { case prev == nil: seen[m.Sym] = m - case types.AllowsGoVersion(t.Pkg(), 1, 14) && !explicit && types.Identical(m.Type, prev.Type): + case AllowsGoVersion(t.Pkg(), 1, 14) && !explicit && Identical(m.Type, prev.Type): return default: base.ErrorfAt(m.Pos, "duplicate method %s", m.Sym.Name) @@ -53,7 +95,7 @@ func expandiface(t *types.Type) { continue } - checkwidth(m.Type) + CheckSize(m.Type) addMethod(m, true) } @@ -79,26 +121,26 @@ func expandiface(t *types.Type) { // method set. for _, t1 := range m.Type.Fields().Slice() { // Use m.Pos rather than t1.Pos to preserve embedding position. - f := types.NewField(m.Pos, t1.Sym, t1.Type) + f := NewField(m.Pos, t1.Sym, t1.Type) addMethod(f, false) } } - sort.Sort(types.MethodsByName(methods)) + sort.Sort(MethodsByName(methods)) - if int64(len(methods)) >= MaxWidth/int64(Widthptr) { + if int64(len(methods)) >= MaxWidth/int64(PtrSize) { base.ErrorfAt(typePos(t), "interface too large") } for i, m := range methods { - m.Offset = int64(i) * int64(Widthptr) + m.Offset = int64(i) * int64(PtrSize) } // Access fields directly to avoid recursively calling dowidth // within Type.Fields(). - t.Extra.(*types.Interface).Fields.Set(methods) + t.Extra.(*Interface).Fields.Set(methods) } -func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 { +func calcStructOffset(errtype *Type, t *Type, o int64, flag int) int64 { starto := o maxalign := int32(flag) if maxalign < 1 { @@ -112,7 +154,7 @@ func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 { continue } - dowidth(f.Type) + CalcSize(f.Type) if int32(f.Type.Align) > maxalign { maxalign = int32(f.Type.Align) } @@ -128,7 +170,7 @@ func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 { // NOTE(rsc): This comment may be stale. // It's possible the ordering has changed and this is // now the common case. I'm not sure. - f.Nname.(types.VarObject).RecordFrameOffset(o) + f.Nname.(VarObject).RecordFrameOffset(o) } w := f.Type.Width @@ -178,7 +220,7 @@ func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 { // path points to a slice used for tracking the sequence of types // visited. Using a pointer to a slice allows the slice capacity to // grow and limit reallocations. -func findTypeLoop(t *types.Type, path *[]*types.Type) bool { +func findTypeLoop(t *Type, path *[]*Type) bool { // We implement a simple DFS loop-finding algorithm. This // could be faster, but type cycles are rare. @@ -190,7 +232,7 @@ func findTypeLoop(t *types.Type, path *[]*types.Type) bool { // Type imported from package, so it can't be part of // a type loop (otherwise that package should have // failed to compile). - if t.Sym().Pkg != types.LocalPkg { + if t.Sym().Pkg != LocalPkg { return false } @@ -202,7 +244,7 @@ func findTypeLoop(t *types.Type, path *[]*types.Type) bool { } *path = append(*path, t) - if findTypeLoop(t.Obj().(types.TypeObject).TypeDefn(), path) { + if findTypeLoop(t.Obj().(TypeObject).TypeDefn(), path) { return true } *path = (*path)[:len(*path)-1] @@ -210,17 +252,17 @@ func findTypeLoop(t *types.Type, path *[]*types.Type) bool { // Anonymous type. Recurse on contained types. switch t.Kind() { - case types.TARRAY: + case TARRAY: if findTypeLoop(t.Elem(), path) { return true } - case types.TSTRUCT: + case TSTRUCT: for _, f := range t.Fields().Slice() { if findTypeLoop(f.Type, path) { return true } } - case types.TINTER: + case TINTER: for _, m := range t.Methods().Slice() { if m.Type.IsInterface() { // embedded interface if findTypeLoop(m.Type, path) { @@ -234,12 +276,12 @@ func findTypeLoop(t *types.Type, path *[]*types.Type) bool { return false } -func reportTypeLoop(t *types.Type) { +func reportTypeLoop(t *Type) { if t.Broke() { return } - var l []*types.Type + var l []*Type if !findTypeLoop(t, &l) { base.Fatalf("failed to find type loop for: %v", t) } @@ -263,18 +305,20 @@ func reportTypeLoop(t *types.Type) { base.ErrorfAt(typePos(l[0]), msg.String()) } -// dowidth calculates and stores the size and alignment for t. +// CalcSize calculates and stores the size and alignment for t. // If sizeCalculationDisabled is set, and the size/alignment // have not already been calculated, it calls Fatal. // This is used to prevent data races in the back end. -func dowidth(t *types.Type) { +func CalcSize(t *Type) { // Calling dowidth when typecheck tracing enabled is not safe. // See issue #33658. - if base.EnableTrace && skipDowidthForTracing { + if base.EnableTrace && SkipSizeForTracing { return } - if Widthptr == 0 { - base.Fatalf("dowidth without betypeinit") + if PtrSize == 0 { + + // Assume this is a test. + return } if t == nil { @@ -292,7 +336,7 @@ func dowidth(t *types.Type) { return } - if sizeCalculationDisabled { + if CalcSizeDisabled { if t.Broke() { // break infinite recursion from Fatal call below return @@ -308,7 +352,7 @@ func dowidth(t *types.Type) { } // defer checkwidth calls until after we're done - defercheckwidth() + DeferCheckSize() lno := base.Pos if pos := t.Pos(); pos.IsKnown() { @@ -320,13 +364,13 @@ func dowidth(t *types.Type) { et := t.Kind() switch et { - case types.TFUNC, types.TCHAN, types.TMAP, types.TSTRING: + case TFUNC, TCHAN, TMAP, TSTRING: break // simtype == 0 during bootstrap default: - if types.SimType[t.Kind()] != 0 { - et = types.SimType[t.Kind()] + if SimType[t.Kind()] != 0 { + et = SimType[t.Kind()] } } @@ -336,84 +380,84 @@ func dowidth(t *types.Type) { base.Fatalf("dowidth: unknown type: %v", t) // compiler-specific stuff - case types.TINT8, types.TUINT8, types.TBOOL: + case TINT8, TUINT8, TBOOL: // bool is int8 w = 1 - case types.TINT16, types.TUINT16: + case TINT16, TUINT16: w = 2 - case types.TINT32, types.TUINT32, types.TFLOAT32: + case TINT32, TUINT32, TFLOAT32: w = 4 - case types.TINT64, types.TUINT64, types.TFLOAT64: + case TINT64, TUINT64, TFLOAT64: w = 8 - t.Align = uint8(Widthreg) + t.Align = uint8(RegSize) - case types.TCOMPLEX64: + case TCOMPLEX64: w = 8 t.Align = 4 - case types.TCOMPLEX128: + case TCOMPLEX128: w = 16 - t.Align = uint8(Widthreg) + t.Align = uint8(RegSize) - case types.TPTR: - w = int64(Widthptr) - checkwidth(t.Elem()) + case TPTR: + w = int64(PtrSize) + CheckSize(t.Elem()) - case types.TUNSAFEPTR: - w = int64(Widthptr) + case TUNSAFEPTR: + w = int64(PtrSize) - case types.TINTER: // implemented as 2 pointers - w = 2 * int64(Widthptr) - t.Align = uint8(Widthptr) + case TINTER: // implemented as 2 pointers + w = 2 * int64(PtrSize) + t.Align = uint8(PtrSize) expandiface(t) - case types.TCHAN: // implemented as pointer - w = int64(Widthptr) + case TCHAN: // implemented as pointer + w = int64(PtrSize) - checkwidth(t.Elem()) + CheckSize(t.Elem()) // make fake type to check later to // trigger channel argument check. - t1 := types.NewChanArgs(t) - checkwidth(t1) + t1 := NewChanArgs(t) + CheckSize(t1) - case types.TCHANARGS: + case TCHANARGS: t1 := t.ChanArgs() - dowidth(t1) // just in case + CalcSize(t1) // just in case if t1.Elem().Width >= 1<<16 { base.ErrorfAt(typePos(t1), "channel element type too large (>64kB)") } w = 1 // anything will do - case types.TMAP: // implemented as pointer - w = int64(Widthptr) - checkwidth(t.Elem()) - checkwidth(t.Key()) + case TMAP: // implemented as pointer + w = int64(PtrSize) + CheckSize(t.Elem()) + CheckSize(t.Key()) - case types.TFORW: // should have been filled in + case TFORW: // should have been filled in reportTypeLoop(t) w = 1 // anything will do - case types.TANY: + case TANY: // not a real type; should be replaced before use. base.Fatalf("dowidth any") - case types.TSTRING: - if sizeofString == 0 { + case TSTRING: + if StringSize == 0 { base.Fatalf("early dowidth string") } - w = sizeofString - t.Align = uint8(Widthptr) + w = StringSize + t.Align = uint8(PtrSize) - case types.TARRAY: + case TARRAY: if t.Elem() == nil { break } - dowidth(t.Elem()) + CalcSize(t.Elem()) if t.Elem().Width != 0 { cap := (uint64(MaxWidth) - 1) / uint64(t.Elem().Width) if uint64(t.NumElem()) > cap { @@ -423,42 +467,42 @@ func dowidth(t *types.Type) { w = t.NumElem() * t.Elem().Width t.Align = t.Elem().Align - case types.TSLICE: + case TSLICE: if t.Elem() == nil { break } - w = sizeofSlice - checkwidth(t.Elem()) - t.Align = uint8(Widthptr) + w = SliceSize + CheckSize(t.Elem()) + t.Align = uint8(PtrSize) - case types.TSTRUCT: + case TSTRUCT: if t.IsFuncArgStruct() { base.Fatalf("dowidth fn struct %v", t) } - w = widstruct(t, t, 0, 1) + w = calcStructOffset(t, t, 0, 1) // make fake type to check later to // trigger function argument computation. - case types.TFUNC: - t1 := types.NewFuncArgs(t) - checkwidth(t1) - w = int64(Widthptr) // width of func type is pointer + case TFUNC: + t1 := NewFuncArgs(t) + CheckSize(t1) + w = int64(PtrSize) // width of func type is pointer // function is 3 cated structures; // compute their widths as side-effect. - case types.TFUNCARGS: + case TFUNCARGS: t1 := t.FuncArgs() - w = widstruct(t1, t1.Recvs(), 0, 0) - w = widstruct(t1, t1.Params(), w, Widthreg) - w = widstruct(t1, t1.Results(), w, Widthreg) - t1.Extra.(*types.Func).Argwid = w - if w%int64(Widthreg) != 0 { + w = calcStructOffset(t1, t1.Recvs(), 0, 0) + w = calcStructOffset(t1, t1.Params(), w, RegSize) + w = calcStructOffset(t1, t1.Results(), w, RegSize) + t1.Extra.(*Func).Argwid = w + if w%int64(RegSize) != 0 { base.Warn("bad type %v %d\n", t1, w) } t.Align = 1 } - if Widthptr == 4 && w != int64(int32(w)) { + if PtrSize == 4 && w != int64(int32(w)) { base.ErrorfAt(typePos(t), "type %v too large", t) } @@ -472,14 +516,14 @@ func dowidth(t *types.Type) { base.Pos = lno - resumecheckwidth() + ResumeCheckSize() } // CalcStructSize calculates the size of s, // filling in s.Width and s.Align, // even if size calculation is otherwise disabled. -func CalcStructSize(s *types.Type) { - s.Width = widstruct(s, s, 0, 1) // sets align +func CalcStructSize(s *Type) { + s.Width = calcStructOffset(s, s, 0, 1) // sets align } // when a type's width should be known, we call checkwidth @@ -498,9 +542,9 @@ func CalcStructSize(s *types.Type) { // is needed immediately. checkwidth makes sure the // size is evaluated eventually. -var deferredTypeStack []*types.Type +var deferredTypeStack []*Type -func checkwidth(t *types.Type) { +func CheckSize(t *Type) { if t == nil { return } @@ -512,7 +556,7 @@ func checkwidth(t *types.Type) { } if defercalc == 0 { - dowidth(t) + CalcSize(t) return } @@ -523,19 +567,68 @@ func checkwidth(t *types.Type) { } } -func defercheckwidth() { +func DeferCheckSize() { defercalc++ } -func resumecheckwidth() { +func ResumeCheckSize() { if defercalc == 1 { for len(deferredTypeStack) > 0 { t := deferredTypeStack[len(deferredTypeStack)-1] deferredTypeStack = deferredTypeStack[:len(deferredTypeStack)-1] t.SetDeferwidth(false) - dowidth(t) + CalcSize(t) } } defercalc-- } + +// PtrDataSize returns the length in bytes of the prefix of t +// containing pointer data. Anything after this offset is scalar data. +func PtrDataSize(t *Type) int64 { + if !t.HasPointers() { + return 0 + } + + switch t.Kind() { + case TPTR, + TUNSAFEPTR, + TFUNC, + TCHAN, + TMAP: + return int64(PtrSize) + + case TSTRING: + // struct { byte *str; intgo len; } + return int64(PtrSize) + + case TINTER: + // struct { Itab *tab; void *data; } or + // struct { Type *type; void *data; } + // Note: see comment in plive.go:onebitwalktype1. + return 2 * int64(PtrSize) + + case TSLICE: + // struct { byte *array; uintgo len; uintgo cap; } + return int64(PtrSize) + + case TARRAY: + // haspointers already eliminated t.NumElem() == 0. + return (t.NumElem()-1)*t.Elem().Width + PtrDataSize(t.Elem()) + + case TSTRUCT: + // Find the last field that has pointers. + var lastPtrField *Field + for _, t1 := range t.Fields().Slice() { + if t1.Type.HasPointers() { + lastPtrField = t1 + } + } + return lastPtrField.Offset + PtrDataSize(lastPtrField.Type) + + default: + base.Fatalf("typeptrdata: unexpected type, %v", t) + return 0 + } +} diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index 21d96c430a749..b5557b492eb7a 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -596,8 +596,8 @@ func NewPtr(elem *Type) *Type { t := New(TPTR) t.Extra = Ptr{Elem: elem} - t.Width = int64(Widthptr) - t.Align = uint8(Widthptr) + t.Width = int64(PtrSize) + t.Align = uint8(PtrSize) if NewPtrCacheEnabled { elem.cache.ptr = t } @@ -862,7 +862,7 @@ func (t *Type) Fields() *Fields { case TSTRUCT: return &t.Extra.(*Struct).fields case TINTER: - Dowidth(t) + CalcSize(t) return &t.Extra.(*Interface).Fields } base.Fatalf("Fields: type %v does not have fields", t) @@ -929,12 +929,12 @@ func (t *Type) Size() int64 { } return 0 } - Dowidth(t) + CalcSize(t) return t.Width } func (t *Type) Alignment() int64 { - Dowidth(t) + CalcSize(t) return int64(t.Align) } diff --git a/src/cmd/compile/internal/types/utils.go b/src/cmd/compile/internal/types/utils.go index 531f3ea1cab9e..2477f1da668a6 100644 --- a/src/cmd/compile/internal/types/utils.go +++ b/src/cmd/compile/internal/types/utils.go @@ -14,8 +14,6 @@ const BADWIDTH = -1000000000 // They are here to break import cycles. // TODO(gri) eliminate these dependencies. var ( - Widthptr int - Dowidth func(*Type) TypeLinkSym func(*Type) *obj.LSym ) diff --git a/src/cmd/compile/internal/x86/ggen.go b/src/cmd/compile/internal/x86/ggen.go index f5d08a68ed272..de43594e88d3a 100644 --- a/src/cmd/compile/internal/x86/ggen.go +++ b/src/cmd/compile/internal/x86/ggen.go @@ -7,6 +7,7 @@ package x86 import ( "cmd/compile/internal/gc" "cmd/compile/internal/ir" + "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/x86" ) @@ -20,16 +21,16 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, ax *uint32) *obj.Prog *ax = 1 } - if cnt <= int64(4*gc.Widthreg) { - for i := int64(0); i < cnt; i += int64(gc.Widthreg) { + if cnt <= int64(4*types.RegSize) { + for i := int64(0); i < cnt; i += int64(types.RegSize) { p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off+i) } - } else if cnt <= int64(128*gc.Widthreg) { + } else if cnt <= int64(128*types.RegSize) { p = pp.Appendpp(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0) - p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, 1*(128-cnt/int64(gc.Widthreg))) + p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, 1*(128-cnt/int64(types.RegSize))) p.To.Sym = ir.Syms.Duffzero } else { - p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0) + p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_CONST, 0, cnt/int64(types.RegSize), obj.TYPE_REG, x86.REG_CX, 0) p = pp.Appendpp(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0) p = pp.Appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0) p = pp.Appendpp(p, x86.ASTOSL, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0) From b9693d7627089204e6c2448f543c3512d86dae70 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 23 Dec 2020 00:41:49 -0500 Subject: [PATCH 227/474] [dev.regabi] cmd/compile: split out package typecheck [generated] This commit splits the typechecking logic into its own package, the first of a sequence of CLs to break package gc into more manageable units. [git-generate] cd src/cmd/compile/internal/gc rf ' # The binary import/export has to be part of typechecking, # because we load inlined function bodies lazily, but "exporter" # should not be. Move that out of bexport.go. mv exporter exporter.markObject exporter.markType export.go # Use the typechecking helpers, so that the calls left behind # in package gc do not need access to ctxExpr etc. ex { import "cmd/compile/internal/ir" # TODO(rsc): Should not be necessary. avoid TypecheckExpr avoid TypecheckStmt avoid TypecheckExprs avoid TypecheckStmts avoid TypecheckAssignExpr avoid TypecheckCallee var n ir.Node var ns []ir.Node typecheck(n, ctxExpr) -> TypecheckExpr(n) typecheck(n, ctxStmt) -> TypecheckStmt(n) typecheckslice(ns, ctxExpr) -> TypecheckExprs(ns) typecheckslice(ns, ctxStmt) -> TypecheckStmts(ns) typecheck(n, ctxExpr|ctxAssign) -> TypecheckAssignExpr(n) typecheck(n, ctxExpr|ctxCallee) -> TypecheckCallee(n) } # Move some typechecking API to typecheck. mv syslook LookupRuntime mv substArgTypes SubstArgTypes mv LookupRuntime SubstArgTypes syms.go mv conv Conv mv convnop ConvNop mv Conv ConvNop typecheck.go mv colasdefn AssignDefn mv colasname assignableName mv Target target.go mv initname autoexport exportsym dcl.go mv exportsym Export # Export API to be called from outside typecheck. # The ones with "Typecheck" prefixes will be renamed later to drop the prefix. mv adddot AddImplicitDots mv assignconv AssignConv mv expandmeth CalcMethods mv capturevarscomplete CaptureVarsComplete mv checkMapKeys CheckMapKeys mv checkreturn CheckReturn mv dclcontext DeclContext mv dclfunc DeclFunc mv declare Declare mv dotImportRefs DotImportRefs mv declImporter DeclImporter mv variter DeclVars mv defaultlit DefaultLit mv evalConst EvalConst mv expandInline ImportBody mv finishUniverse declareUniverse mv funcbody FinishFuncBody mv funchdr StartFuncBody mv indexconst IndexConst mv initTodo InitTodoFunc mv lookup Lookup mv resolve Resolve mv lookupN LookupNum mv nodAddr NodAddr mv nodAddrAt NodAddrAt mv nodnil NodNil mv origBoolConst OrigBool mv origConst OrigConst mv origIntConst OrigInt mv redeclare Redeclared mv tostruct NewStructType mv functype NewFuncType mv methodfunc NewMethodType mv structargs NewFuncParams mv temp Temp mv tempAt TempAt mv typecheckok TypecheckAllowed mv typecheck _typecheck # make room for typecheck pkg mv typecheckinl TypecheckImportedBody mv typecheckFunc TypecheckFunc mv iimport ReadImports mv iexport WriteExports mv sysfunc LookupRuntimeFunc mv sysvar LookupRuntimeVar # Move function constructors to typecheck. mv mkdotargslice MakeDotArgs mv fixVariadicCall FixVariadicCall mv closureType ClosureType mv partialCallType PartialCallType mv capturevars CaptureVars mv MakeDotArgs FixVariadicCall ClosureType PartialCallType CaptureVars typecheckclosure func.go mv autolabel AutoLabel mv AutoLabel syms.go mv Dlist dlist mv Symlink symlink mv \ AssignDefn assignableName \ AssignConv \ CaptureVarsComplete \ DeclContext \ DeclFunc \ DeclImporter \ DeclVars \ Declare \ DotImportRefs \ Export \ InitTodoFunc \ Lookup \ LookupNum \ LookupRuntimeFunc \ LookupRuntimeVar \ NewFuncParams \ NewName \ NodAddr \ NodAddrAt \ NodNil \ Redeclared \ StartFuncBody \ FinishFuncBody \ TypecheckImportedBody \ AddImplicitDots \ CalcMethods \ CheckFuncStack \ NewFuncType \ NewMethodType \ NewStructType \ TypecheckAllowed \ Temp \ TempAt \ adddot1 \ dotlist \ addmethod \ assignconvfn \ assignop \ autotmpname \ autoexport \ bexport.go \ checkdupfields \ checkembeddedtype \ closurename \ convertop \ declare_typegen \ decldepth \ dlist \ dotpath \ expand0 \ expand1 \ expandDecl \ fakeRecvField \ fnpkg \ funcStack \ funcStackEnt \ funcarg \ funcarg2 \ funcargs \ funcargs2 \ globClosgen \ ifacelookdot \ implements \ importalias \ importconst \ importfunc \ importobj \ importsym \ importtype \ importvar \ inimport \ initname \ isptrto \ loadsys \ lookdot0 \ lookdot1 \ makepartialcall \ okfor \ okforlen \ operandType \ slist \ symlink \ tointerface \ typeSet \ typeSet.add \ typeSetEntry \ typecheckExprSwitch \ typecheckTypeSwitch \ typecheckpartialcall \ typecheckrange \ typecheckrangeExpr \ typecheckselect \ typecheckswitch \ vargen \ builtin.go \ builtin_test.go \ const.go \ func.go \ iexport.go \ iimport.go \ mapfile_mmap.go \ syms.go \ target.go \ typecheck.go \ unsafe.go \ universe.go \ cmd/compile/internal/typecheck ' rm gen.go types.go types_acc.go sed -i '' 's/package gc/package typecheck/' mapfile_read.go mkbuiltin.go mv mapfile_read.go ../typecheck # not part of default build mv mkbuiltin.go ../typecheck # package main helper mv builtin ../typecheck cd ../typecheck mv dcl.go dcl1.go mv typecheck.go typecheck1.go mv universe.go universe1.go rf ' # Sweep some small files into larger ones. # "mv sym... file1.go file.go" (after the mv file1.go file.go above) # lets us insert sym... at the top of file.go. mv okfor okforeq universe1.go universe.go mv DeclContext vargen dcl1.go Temp TempAt autotmpname NewMethodType dcl.go mv InitTodoFunc inimport decldepth TypecheckAllowed typecheck1.go typecheck.go mv inl.go closure.go func.go mv range.go select.go swt.go stmt.go mv Lookup loadsys LookupRuntimeFunc LookupRuntimeVar syms.go mv unsafe.go const.go mv TypecheckAssignExpr AssignExpr mv TypecheckExpr Expr mv TypecheckStmt Stmt mv TypecheckExprs Exprs mv TypecheckStmts Stmts mv TypecheckCall Call mv TypecheckCallee Callee mv _typecheck check mv TypecheckFunc Func mv TypecheckFuncBody FuncBody mv TypecheckImports AllImportedBodies mv TypecheckImportedBody ImportedBody mv TypecheckInit Init mv TypecheckPackage Package ' rm gen.go go.go init.go main.go reflect.go Change-Id: Iea6a7aaf6407d690670ec58aeb36cc0b280f80b0 Reviewed-on: https://go-review.googlesource.com/c/go/+/279236 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/abiutils_test.go | 3 +- .../compile/internal/gc/abiutilsaux_test.go | 5 +- src/cmd/compile/internal/gc/alg.go | 93 +- src/cmd/compile/internal/gc/bexport.go | 185 ---- src/cmd/compile/internal/gc/builtin.go | 344 ------- src/cmd/compile/internal/gc/closure.go | 310 +------ src/cmd/compile/internal/gc/dcl.go | 580 +----------- src/cmd/compile/internal/gc/embed.go | 7 +- src/cmd/compile/internal/gc/escape.go | 13 +- src/cmd/compile/internal/gc/export.go | 191 ++-- src/cmd/compile/internal/gc/gen.go | 76 -- src/cmd/compile/internal/gc/go.go | 25 - src/cmd/compile/internal/gc/gsubr.go | 21 +- src/cmd/compile/internal/gc/init.go | 41 +- src/cmd/compile/internal/gc/inl.go | 110 +-- src/cmd/compile/internal/gc/main.go | 61 +- src/cmd/compile/internal/gc/noder.go | 41 +- src/cmd/compile/internal/gc/obj.go | 31 +- src/cmd/compile/internal/gc/order.go | 61 +- src/cmd/compile/internal/gc/pgen.go | 7 +- src/cmd/compile/internal/gc/pgen_test.go | 5 +- src/cmd/compile/internal/gc/range.go | 198 +--- src/cmd/compile/internal/gc/reflect.go | 53 +- src/cmd/compile/internal/gc/select.go | 144 +-- src/cmd/compile/internal/gc/sinit.go | 45 +- src/cmd/compile/internal/gc/ssa.go | 275 +++--- src/cmd/compile/internal/gc/subr.go | 866 +----------------- src/cmd/compile/internal/gc/swt.go | 244 +---- src/cmd/compile/internal/gc/types_acc.go | 8 - src/cmd/compile/internal/gc/unsafe.go | 90 -- src/cmd/compile/internal/gc/walk.go | 543 +++++------ src/cmd/compile/internal/typecheck/bexport.go | 102 +++ src/cmd/compile/internal/typecheck/builtin.go | 344 +++++++ .../{gc => typecheck}/builtin/runtime.go | 0 .../{gc => typecheck}/builtin_test.go | 2 +- .../internal/{gc => typecheck}/const.go | 150 ++- src/cmd/compile/internal/typecheck/dcl.go | 705 ++++++++++++++ src/cmd/compile/internal/typecheck/export.go | 79 ++ src/cmd/compile/internal/typecheck/func.go | 398 ++++++++ .../internal/{gc => typecheck}/iexport.go | 17 +- .../internal/{gc => typecheck}/iimport.go | 47 +- .../{gc => typecheck}/mapfile_mmap.go | 2 +- .../{gc => typecheck}/mapfile_read.go | 2 +- .../internal/{gc => typecheck}/mkbuiltin.go | 2 +- src/cmd/compile/internal/typecheck/stmt.go | 435 +++++++++ src/cmd/compile/internal/typecheck/subr.go | 793 ++++++++++++++++ src/cmd/compile/internal/typecheck/syms.go | 104 +++ .../{gc/types.go => typecheck/target.go} | 9 +- .../internal/{gc => typecheck}/typecheck.go | 437 +++++---- .../internal/{gc => typecheck}/universe.go | 37 +- 50 files changed, 4208 insertions(+), 4133 deletions(-) delete mode 100644 src/cmd/compile/internal/gc/bexport.go delete mode 100644 src/cmd/compile/internal/gc/builtin.go delete mode 100644 src/cmd/compile/internal/gc/gen.go delete mode 100644 src/cmd/compile/internal/gc/types_acc.go delete mode 100644 src/cmd/compile/internal/gc/unsafe.go create mode 100644 src/cmd/compile/internal/typecheck/bexport.go create mode 100644 src/cmd/compile/internal/typecheck/builtin.go rename src/cmd/compile/internal/{gc => typecheck}/builtin/runtime.go (100%) rename src/cmd/compile/internal/{gc => typecheck}/builtin_test.go (97%) rename src/cmd/compile/internal/{gc => typecheck}/const.go (85%) create mode 100644 src/cmd/compile/internal/typecheck/dcl.go create mode 100644 src/cmd/compile/internal/typecheck/export.go create mode 100644 src/cmd/compile/internal/typecheck/func.go rename src/cmd/compile/internal/{gc => typecheck}/iexport.go (99%) rename src/cmd/compile/internal/{gc => typecheck}/iimport.go (97%) rename src/cmd/compile/internal/{gc => typecheck}/mapfile_mmap.go (98%) rename src/cmd/compile/internal/{gc => typecheck}/mapfile_read.go (96%) rename src/cmd/compile/internal/{gc => typecheck}/mkbuiltin.go (99%) create mode 100644 src/cmd/compile/internal/typecheck/stmt.go create mode 100644 src/cmd/compile/internal/typecheck/subr.go create mode 100644 src/cmd/compile/internal/typecheck/syms.go rename src/cmd/compile/internal/{gc/types.go => typecheck/target.go} (51%) rename src/cmd/compile/internal/{gc => typecheck}/typecheck.go (92%) rename src/cmd/compile/internal/{gc => typecheck}/universe.go (93%) diff --git a/src/cmd/compile/internal/gc/abiutils_test.go b/src/cmd/compile/internal/gc/abiutils_test.go index 5a88332de80be..fe9a838688196 100644 --- a/src/cmd/compile/internal/gc/abiutils_test.go +++ b/src/cmd/compile/internal/gc/abiutils_test.go @@ -7,6 +7,7 @@ package gc import ( "bufio" "cmd/compile/internal/base" + "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/x86" @@ -42,7 +43,7 @@ func TestMain(m *testing.M) { types.TypeLinkSym = func(t *types.Type) *obj.LSym { return typenamesym(t).Linksym() } - TypecheckInit() + typecheck.Init() os.Exit(m.Run()) } diff --git a/src/cmd/compile/internal/gc/abiutilsaux_test.go b/src/cmd/compile/internal/gc/abiutilsaux_test.go index 8585ab9a30446..e6590beac0544 100644 --- a/src/cmd/compile/internal/gc/abiutilsaux_test.go +++ b/src/cmd/compile/internal/gc/abiutilsaux_test.go @@ -9,6 +9,7 @@ package gc import ( "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/src" "fmt" @@ -19,7 +20,7 @@ import ( func mkParamResultField(t *types.Type, s *types.Sym, which ir.Class) *types.Field { field := types.NewField(src.NoXPos, s, t) - n := NewName(s) + n := typecheck.NewName(s) n.Class_ = which field.Nname = n n.SetType(t) @@ -42,7 +43,7 @@ func mkstruct(fieldtypes []*types.Type) *types.Type { } func mkFuncType(rcvr *types.Type, ins []*types.Type, outs []*types.Type) *types.Type { - q := lookup("?") + q := typecheck.Lookup("?") inf := []*types.Field{} for _, it := range ins { inf = append(inf, mkParamResultField(it, q, ir.PPARAM)) diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index dab27b49292a7..b0d46eab2feee 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -7,6 +7,7 @@ package gc import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/obj" "fmt" @@ -106,7 +107,7 @@ func genhash(t *types.Type) *obj.LSym { return closure } if memhashvarlen == nil { - memhashvarlen = sysfunc("memhash_varlen") + memhashvarlen = typecheck.LookupRuntimeFunc("memhash_varlen") } ot := 0 ot = dsymptr(closure, ot, memhashvarlen, 0) @@ -143,17 +144,17 @@ func genhash(t *types.Type) *obj.LSym { } base.Pos = base.AutogeneratedPos // less confusing than end of input - dclcontext = ir.PEXTERN + typecheck.DeclContext = ir.PEXTERN // func sym(p *T, h uintptr) uintptr args := []*ir.Field{ - ir.NewField(base.Pos, lookup("p"), nil, types.NewPtr(t)), - ir.NewField(base.Pos, lookup("h"), nil, types.Types[types.TUINTPTR]), + ir.NewField(base.Pos, typecheck.Lookup("p"), nil, types.NewPtr(t)), + ir.NewField(base.Pos, typecheck.Lookup("h"), nil, types.Types[types.TUINTPTR]), } results := []*ir.Field{ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR])} tfn := ir.NewFuncType(base.Pos, nil, args, results) - fn := dclfunc(sym, tfn) + fn := typecheck.DeclFunc(sym, tfn) np := ir.AsNode(tfn.Type().Params().Field(0).Nname) nh := ir.AsNode(tfn.Type().Params().Field(1).Nname) @@ -165,7 +166,7 @@ func genhash(t *types.Type) *obj.LSym { hashel := hashfor(t.Elem()) // for i := 0; i < nelem; i++ - ni := temp(types.Types[types.TINT]) + ni := typecheck.Temp(types.Types[types.TINT]) init := ir.NewAssignStmt(base.Pos, ni, ir.NewInt(0)) cond := ir.NewBinaryExpr(base.Pos, ir.OLT, ni, ir.NewInt(t.NumElem())) post := ir.NewAssignStmt(base.Pos, ni, ir.NewBinaryExpr(base.Pos, ir.OADD, ni, ir.NewInt(1))) @@ -177,7 +178,7 @@ func genhash(t *types.Type) *obj.LSym { nx := ir.NewIndexExpr(base.Pos, np, ni) nx.SetBounded(true) - na := nodAddr(nx) + na := typecheck.NodAddr(nx) call.Args.Append(na) call.Args.Append(nh) loop.Body.Append(ir.NewAssignStmt(base.Pos, nh, call)) @@ -201,7 +202,7 @@ func genhash(t *types.Type) *obj.LSym { hashel := hashfor(f.Type) call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil) nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym) // TODO: fields from other packages? - na := nodAddr(nx) + na := typecheck.NodAddr(nx) call.Args.Append(na) call.Args.Append(nh) fn.Body.Append(ir.NewAssignStmt(base.Pos, nh, call)) @@ -216,7 +217,7 @@ func genhash(t *types.Type) *obj.LSym { hashel := hashmem(f.Type) call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil) nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym) // TODO: fields from other packages? - na := nodAddr(nx) + na := typecheck.NodAddr(nx) call.Args.Append(na) call.Args.Append(nh) call.Args.Append(ir.NewInt(size)) @@ -234,13 +235,13 @@ func genhash(t *types.Type) *obj.LSym { ir.DumpList("genhash body", fn.Body) } - funcbody() + typecheck.FinishFuncBody() fn.SetDupok(true) - typecheckFunc(fn) + typecheck.Func(fn) ir.CurFunc = fn - typecheckslice(fn.Body, ctxStmt) + typecheck.Stmts(fn.Body) ir.CurFunc = nil if base.Debug.DclStack != 0 { @@ -248,7 +249,7 @@ func genhash(t *types.Type) *obj.LSym { } fn.SetNilCheckDisabled(true) - Target.Decls = append(Target.Decls, fn) + typecheck.Target.Decls = append(typecheck.Target.Decls, fn) // Build closure. It doesn't close over any variables, so // it contains just the function pointer. @@ -284,9 +285,9 @@ func hashfor(t *types.Type) ir.Node { sym = typesymprefix(".hash", t) } - n := NewName(sym) + n := typecheck.NewName(sym) ir.MarkFunc(n) - n.SetType(functype(nil, []*ir.Field{ + n.SetType(typecheck.NewFuncType(nil, []*ir.Field{ ir.NewField(base.Pos, nil, nil, types.NewPtr(t)), ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]), }, []*ir.Field{ @@ -298,9 +299,9 @@ func hashfor(t *types.Type) ir.Node { // sysClosure returns a closure which will call the // given runtime function (with no closed-over variables). func sysClosure(name string) *obj.LSym { - s := sysvar(name + "·f") + s := typecheck.LookupRuntimeVar(name + "·f") if len(s.P) == 0 { - f := sysfunc(name) + f := typecheck.LookupRuntimeFunc(name) dsymptr(s, 0, f, 0) ggloblsym(s, int32(types.PtrSize), obj.DUPOK|obj.RODATA) } @@ -349,7 +350,7 @@ func geneq(t *types.Type) *obj.LSym { return closure } if memequalvarlen == nil { - memequalvarlen = sysvar("memequal_varlen") // asm func + memequalvarlen = typecheck.LookupRuntimeVar("memequal_varlen") // asm func } ot := 0 ot = dsymptr(closure, ot, memequalvarlen, 0) @@ -372,20 +373,20 @@ func geneq(t *types.Type) *obj.LSym { // Autogenerate code for equality of structs and arrays. base.Pos = base.AutogeneratedPos // less confusing than end of input - dclcontext = ir.PEXTERN + typecheck.DeclContext = ir.PEXTERN // func sym(p, q *T) bool tfn := ir.NewFuncType(base.Pos, nil, - []*ir.Field{ir.NewField(base.Pos, lookup("p"), nil, types.NewPtr(t)), ir.NewField(base.Pos, lookup("q"), nil, types.NewPtr(t))}, - []*ir.Field{ir.NewField(base.Pos, lookup("r"), nil, types.Types[types.TBOOL])}) + []*ir.Field{ir.NewField(base.Pos, typecheck.Lookup("p"), nil, types.NewPtr(t)), ir.NewField(base.Pos, typecheck.Lookup("q"), nil, types.NewPtr(t))}, + []*ir.Field{ir.NewField(base.Pos, typecheck.Lookup("r"), nil, types.Types[types.TBOOL])}) - fn := dclfunc(sym, tfn) + fn := typecheck.DeclFunc(sym, tfn) np := ir.AsNode(tfn.Type().Params().Field(0).Nname) nq := ir.AsNode(tfn.Type().Params().Field(1).Nname) nr := ir.AsNode(tfn.Type().Results().Field(0).Nname) // Label to jump to if an equality test fails. - neq := autolabel(".neq") + neq := typecheck.AutoLabel(".neq") // We reach here only for types that have equality but // cannot be handled by the standard algorithms, @@ -450,7 +451,7 @@ func geneq(t *types.Type) *obj.LSym { } else { // Generate a for loop. // for i := 0; i < nelem; i++ - i := temp(types.Types[types.TINT]) + i := typecheck.Temp(types.Types[types.TINT]) init := ir.NewAssignStmt(base.Pos, i, ir.NewInt(0)) cond := ir.NewBinaryExpr(base.Pos, ir.OLT, i, ir.NewInt(nelem)) post := ir.NewAssignStmt(base.Pos, i, ir.NewBinaryExpr(base.Pos, ir.OADD, i, ir.NewInt(1))) @@ -586,7 +587,7 @@ func geneq(t *types.Type) *obj.LSym { // ret: // return - ret := autolabel(".ret") + ret := typecheck.AutoLabel(".ret") fn.Body.Append(ir.NewLabelStmt(base.Pos, ret)) fn.Body.Append(ir.NewReturnStmt(base.Pos, nil)) @@ -610,13 +611,13 @@ func geneq(t *types.Type) *obj.LSym { ir.DumpList("geneq body", fn.Body) } - funcbody() + typecheck.FinishFuncBody() fn.SetDupok(true) - typecheckFunc(fn) + typecheck.Func(fn) ir.CurFunc = fn - typecheckslice(fn.Body, ctxStmt) + typecheck.Stmts(fn.Body) ir.CurFunc = nil if base.Debug.DclStack != 0 { @@ -628,7 +629,7 @@ func geneq(t *types.Type) *obj.LSym { // neither of which can be nil, and our comparisons // are shallow. fn.SetNilCheckDisabled(true) - Target.Decls = append(Target.Decls, fn) + typecheck.Target.Decls = append(typecheck.Target.Decls, fn) // Generate a closure which points at the function we just generated. dsymptr(closure, 0, sym.Linksym(), 0) @@ -660,20 +661,20 @@ func eqfield(p ir.Node, q ir.Node, field *types.Sym) ir.Node { // which can be used to construct string equality comparison. // eqlen must be evaluated before eqmem, and shortcircuiting is required. func eqstring(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) { - s = conv(s, types.Types[types.TSTRING]) - t = conv(t, types.Types[types.TSTRING]) + s = typecheck.Conv(s, types.Types[types.TSTRING]) + t = typecheck.Conv(t, types.Types[types.TSTRING]) sptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, s) tptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, t) - slen := conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, s), types.Types[types.TUINTPTR]) - tlen := conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, t), types.Types[types.TUINTPTR]) + slen := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, s), types.Types[types.TUINTPTR]) + tlen := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, t), types.Types[types.TUINTPTR]) - fn := syslook("memequal") - fn = substArgTypes(fn, types.Types[types.TUINT8], types.Types[types.TUINT8]) + fn := typecheck.LookupRuntime("memequal") + fn = typecheck.SubstArgTypes(fn, types.Types[types.TUINT8], types.Types[types.TUINT8]) call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, []ir.Node{sptr, tptr, ir.Copy(slen)}) - TypecheckCall(call) + typecheck.Call(call) cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, slen, tlen) - cmp = typecheck(cmp, ctxExpr).(*ir.BinaryExpr) + cmp = typecheck.Expr(cmp).(*ir.BinaryExpr) cmp.SetType(types.Types[types.TBOOL]) return cmp, call } @@ -692,9 +693,9 @@ func eqinterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) { // func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool) var fn ir.Node if s.Type().IsEmptyInterface() { - fn = syslook("efaceeq") + fn = typecheck.LookupRuntime("efaceeq") } else { - fn = syslook("ifaceeq") + fn = typecheck.LookupRuntime("ifaceeq") } stab := ir.NewUnaryExpr(base.Pos, ir.OITAB, s) @@ -707,10 +708,10 @@ func eqinterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) { tdata.SetTypecheck(1) call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, []ir.Node{stab, sdata, tdata}) - TypecheckCall(call) + typecheck.Call(call) cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, stab, ttab) - cmp = typecheck(cmp, ctxExpr).(*ir.BinaryExpr) + cmp = typecheck.Expr(cmp).(*ir.BinaryExpr) cmp.SetType(types.Types[types.TBOOL]) return cmp, call } @@ -718,8 +719,8 @@ func eqinterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) { // eqmem returns the node // memequal(&p.field, &q.field [, size]) func eqmem(p ir.Node, q ir.Node, field *types.Sym, size int64) ir.Node { - nx := typecheck(nodAddr(ir.NewSelectorExpr(base.Pos, ir.OXDOT, p, field)), ctxExpr) - ny := typecheck(nodAddr(ir.NewSelectorExpr(base.Pos, ir.OXDOT, q, field)), ctxExpr) + nx := typecheck.Expr(typecheck.NodAddr(ir.NewSelectorExpr(base.Pos, ir.OXDOT, p, field))) + ny := typecheck.Expr(typecheck.NodAddr(ir.NewSelectorExpr(base.Pos, ir.OXDOT, q, field))) fn, needsize := eqmemfunc(size, nx.Type().Elem()) call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil) @@ -735,14 +736,14 @@ func eqmem(p ir.Node, q ir.Node, field *types.Sym, size int64) ir.Node { func eqmemfunc(size int64, t *types.Type) (fn *ir.Name, needsize bool) { switch size { default: - fn = syslook("memequal") + fn = typecheck.LookupRuntime("memequal") needsize = true case 1, 2, 4, 8, 16: buf := fmt.Sprintf("memequal%d", int(size)*8) - fn = syslook(buf) + fn = typecheck.LookupRuntime(buf) } - fn = substArgTypes(fn, t, t) + fn = typecheck.SubstArgTypes(fn, t, t) return fn, needsize } diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go deleted file mode 100644 index 3c377d8ba35e5..0000000000000 --- a/src/cmd/compile/internal/gc/bexport.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gc - -import ( - "cmd/compile/internal/ir" - "cmd/compile/internal/types" -) - -type exporter struct { - marked map[*types.Type]bool // types already seen by markType -} - -// markObject visits a reachable object. -func (p *exporter) markObject(n ir.Node) { - if n.Op() == ir.ONAME { - n := n.(*ir.Name) - if n.Class_ == ir.PFUNC { - inlFlood(n, exportsym) - } - } - - p.markType(n.Type()) -} - -// markType recursively visits types reachable from t to identify -// functions whose inline bodies may be needed. -func (p *exporter) markType(t *types.Type) { - if p.marked[t] { - return - } - p.marked[t] = true - - // If this is a named type, mark all of its associated - // methods. Skip interface types because t.Methods contains - // only their unexpanded method set (i.e., exclusive of - // interface embeddings), and the switch statement below - // handles their full method set. - if t.Sym() != nil && t.Kind() != types.TINTER { - for _, m := range t.Methods().Slice() { - if types.IsExported(m.Sym.Name) { - p.markObject(ir.AsNode(m.Nname)) - } - } - } - - // Recursively mark any types that can be produced given a - // value of type t: dereferencing a pointer; indexing or - // iterating over an array, slice, or map; receiving from a - // channel; accessing a struct field or interface method; or - // calling a function. - // - // Notably, we don't mark function parameter types, because - // the user already needs some way to construct values of - // those types. - switch t.Kind() { - case types.TPTR, types.TARRAY, types.TSLICE: - p.markType(t.Elem()) - - case types.TCHAN: - if t.ChanDir().CanRecv() { - p.markType(t.Elem()) - } - - case types.TMAP: - p.markType(t.Key()) - p.markType(t.Elem()) - - case types.TSTRUCT: - for _, f := range t.FieldSlice() { - if types.IsExported(f.Sym.Name) || f.Embedded != 0 { - p.markType(f.Type) - } - } - - case types.TFUNC: - for _, f := range t.Results().FieldSlice() { - p.markType(f.Type) - } - - case types.TINTER: - for _, f := range t.FieldSlice() { - if types.IsExported(f.Sym.Name) { - p.markType(f.Type) - } - } - } -} - -// ---------------------------------------------------------------------------- -// Export format - -// Tags. Must be < 0. -const ( - // Objects - packageTag = -(iota + 1) - constTag - typeTag - varTag - funcTag - endTag - - // Types - namedTag - arrayTag - sliceTag - dddTag - structTag - pointerTag - signatureTag - interfaceTag - mapTag - chanTag - - // Values - falseTag - trueTag - int64Tag - floatTag - fractionTag // not used by gc - complexTag - stringTag - nilTag - unknownTag // not used by gc (only appears in packages with errors) - - // Type aliases - aliasTag -) - -var predecl []*types.Type // initialized lazily - -func predeclared() []*types.Type { - if predecl == nil { - // initialize lazily to be sure that all - // elements have been initialized before - predecl = []*types.Type{ - // basic types - types.Types[types.TBOOL], - types.Types[types.TINT], - types.Types[types.TINT8], - types.Types[types.TINT16], - types.Types[types.TINT32], - types.Types[types.TINT64], - types.Types[types.TUINT], - types.Types[types.TUINT8], - types.Types[types.TUINT16], - types.Types[types.TUINT32], - types.Types[types.TUINT64], - types.Types[types.TUINTPTR], - types.Types[types.TFLOAT32], - types.Types[types.TFLOAT64], - types.Types[types.TCOMPLEX64], - types.Types[types.TCOMPLEX128], - types.Types[types.TSTRING], - - // basic type aliases - types.ByteType, - types.RuneType, - - // error - types.ErrorType, - - // untyped types - types.UntypedBool, - types.UntypedInt, - types.UntypedRune, - types.UntypedFloat, - types.UntypedComplex, - types.UntypedString, - types.Types[types.TNIL], - - // package unsafe - types.Types[types.TUNSAFEPTR], - - // invalid type (package contains errors) - types.Types[types.Txxx], - - // any type, for builtin export data - types.Types[types.TANY], - } - } - return predecl -} diff --git a/src/cmd/compile/internal/gc/builtin.go b/src/cmd/compile/internal/gc/builtin.go deleted file mode 100644 index 12c70fb6d4030..0000000000000 --- a/src/cmd/compile/internal/gc/builtin.go +++ /dev/null @@ -1,344 +0,0 @@ -// Code generated by mkbuiltin.go. DO NOT EDIT. - -package gc - -import ( - "cmd/compile/internal/base" - "cmd/compile/internal/ir" - "cmd/compile/internal/types" -) - -var runtimeDecls = [...]struct { - name string - tag int - typ int -}{ - {"newobject", funcTag, 4}, - {"mallocgc", funcTag, 8}, - {"panicdivide", funcTag, 9}, - {"panicshift", funcTag, 9}, - {"panicmakeslicelen", funcTag, 9}, - {"panicmakeslicecap", funcTag, 9}, - {"throwinit", funcTag, 9}, - {"panicwrap", funcTag, 9}, - {"gopanic", funcTag, 11}, - {"gorecover", funcTag, 14}, - {"goschedguarded", funcTag, 9}, - {"goPanicIndex", funcTag, 16}, - {"goPanicIndexU", funcTag, 18}, - {"goPanicSliceAlen", funcTag, 16}, - {"goPanicSliceAlenU", funcTag, 18}, - {"goPanicSliceAcap", funcTag, 16}, - {"goPanicSliceAcapU", funcTag, 18}, - {"goPanicSliceB", funcTag, 16}, - {"goPanicSliceBU", funcTag, 18}, - {"goPanicSlice3Alen", funcTag, 16}, - {"goPanicSlice3AlenU", funcTag, 18}, - {"goPanicSlice3Acap", funcTag, 16}, - {"goPanicSlice3AcapU", funcTag, 18}, - {"goPanicSlice3B", funcTag, 16}, - {"goPanicSlice3BU", funcTag, 18}, - {"goPanicSlice3C", funcTag, 16}, - {"goPanicSlice3CU", funcTag, 18}, - {"printbool", funcTag, 19}, - {"printfloat", funcTag, 21}, - {"printint", funcTag, 23}, - {"printhex", funcTag, 25}, - {"printuint", funcTag, 25}, - {"printcomplex", funcTag, 27}, - {"printstring", funcTag, 29}, - {"printpointer", funcTag, 30}, - {"printuintptr", funcTag, 31}, - {"printiface", funcTag, 30}, - {"printeface", funcTag, 30}, - {"printslice", funcTag, 30}, - {"printnl", funcTag, 9}, - {"printsp", funcTag, 9}, - {"printlock", funcTag, 9}, - {"printunlock", funcTag, 9}, - {"concatstring2", funcTag, 34}, - {"concatstring3", funcTag, 35}, - {"concatstring4", funcTag, 36}, - {"concatstring5", funcTag, 37}, - {"concatstrings", funcTag, 39}, - {"cmpstring", funcTag, 40}, - {"intstring", funcTag, 43}, - {"slicebytetostring", funcTag, 44}, - {"slicebytetostringtmp", funcTag, 45}, - {"slicerunetostring", funcTag, 48}, - {"stringtoslicebyte", funcTag, 50}, - {"stringtoslicerune", funcTag, 53}, - {"slicecopy", funcTag, 54}, - {"decoderune", funcTag, 55}, - {"countrunes", funcTag, 56}, - {"convI2I", funcTag, 57}, - {"convT16", funcTag, 58}, - {"convT32", funcTag, 58}, - {"convT64", funcTag, 58}, - {"convTstring", funcTag, 58}, - {"convTslice", funcTag, 58}, - {"convT2E", funcTag, 59}, - {"convT2Enoptr", funcTag, 59}, - {"convT2I", funcTag, 59}, - {"convT2Inoptr", funcTag, 59}, - {"assertE2I", funcTag, 57}, - {"assertE2I2", funcTag, 60}, - {"assertI2I", funcTag, 57}, - {"assertI2I2", funcTag, 60}, - {"panicdottypeE", funcTag, 61}, - {"panicdottypeI", funcTag, 61}, - {"panicnildottype", funcTag, 62}, - {"ifaceeq", funcTag, 64}, - {"efaceeq", funcTag, 64}, - {"fastrand", funcTag, 66}, - {"makemap64", funcTag, 68}, - {"makemap", funcTag, 69}, - {"makemap_small", funcTag, 70}, - {"mapaccess1", funcTag, 71}, - {"mapaccess1_fast32", funcTag, 72}, - {"mapaccess1_fast64", funcTag, 72}, - {"mapaccess1_faststr", funcTag, 72}, - {"mapaccess1_fat", funcTag, 73}, - {"mapaccess2", funcTag, 74}, - {"mapaccess2_fast32", funcTag, 75}, - {"mapaccess2_fast64", funcTag, 75}, - {"mapaccess2_faststr", funcTag, 75}, - {"mapaccess2_fat", funcTag, 76}, - {"mapassign", funcTag, 71}, - {"mapassign_fast32", funcTag, 72}, - {"mapassign_fast32ptr", funcTag, 72}, - {"mapassign_fast64", funcTag, 72}, - {"mapassign_fast64ptr", funcTag, 72}, - {"mapassign_faststr", funcTag, 72}, - {"mapiterinit", funcTag, 77}, - {"mapdelete", funcTag, 77}, - {"mapdelete_fast32", funcTag, 78}, - {"mapdelete_fast64", funcTag, 78}, - {"mapdelete_faststr", funcTag, 78}, - {"mapiternext", funcTag, 79}, - {"mapclear", funcTag, 80}, - {"makechan64", funcTag, 82}, - {"makechan", funcTag, 83}, - {"chanrecv1", funcTag, 85}, - {"chanrecv2", funcTag, 86}, - {"chansend1", funcTag, 88}, - {"closechan", funcTag, 30}, - {"writeBarrier", varTag, 90}, - {"typedmemmove", funcTag, 91}, - {"typedmemclr", funcTag, 92}, - {"typedslicecopy", funcTag, 93}, - {"selectnbsend", funcTag, 94}, - {"selectnbrecv", funcTag, 95}, - {"selectnbrecv2", funcTag, 97}, - {"selectsetpc", funcTag, 98}, - {"selectgo", funcTag, 99}, - {"block", funcTag, 9}, - {"makeslice", funcTag, 100}, - {"makeslice64", funcTag, 101}, - {"makeslicecopy", funcTag, 102}, - {"growslice", funcTag, 104}, - {"memmove", funcTag, 105}, - {"memclrNoHeapPointers", funcTag, 106}, - {"memclrHasPointers", funcTag, 106}, - {"memequal", funcTag, 107}, - {"memequal0", funcTag, 108}, - {"memequal8", funcTag, 108}, - {"memequal16", funcTag, 108}, - {"memequal32", funcTag, 108}, - {"memequal64", funcTag, 108}, - {"memequal128", funcTag, 108}, - {"f32equal", funcTag, 109}, - {"f64equal", funcTag, 109}, - {"c64equal", funcTag, 109}, - {"c128equal", funcTag, 109}, - {"strequal", funcTag, 109}, - {"interequal", funcTag, 109}, - {"nilinterequal", funcTag, 109}, - {"memhash", funcTag, 110}, - {"memhash0", funcTag, 111}, - {"memhash8", funcTag, 111}, - {"memhash16", funcTag, 111}, - {"memhash32", funcTag, 111}, - {"memhash64", funcTag, 111}, - {"memhash128", funcTag, 111}, - {"f32hash", funcTag, 111}, - {"f64hash", funcTag, 111}, - {"c64hash", funcTag, 111}, - {"c128hash", funcTag, 111}, - {"strhash", funcTag, 111}, - {"interhash", funcTag, 111}, - {"nilinterhash", funcTag, 111}, - {"int64div", funcTag, 112}, - {"uint64div", funcTag, 113}, - {"int64mod", funcTag, 112}, - {"uint64mod", funcTag, 113}, - {"float64toint64", funcTag, 114}, - {"float64touint64", funcTag, 115}, - {"float64touint32", funcTag, 116}, - {"int64tofloat64", funcTag, 117}, - {"uint64tofloat64", funcTag, 118}, - {"uint32tofloat64", funcTag, 119}, - {"complex128div", funcTag, 120}, - {"racefuncenter", funcTag, 31}, - {"racefuncenterfp", funcTag, 9}, - {"racefuncexit", funcTag, 9}, - {"raceread", funcTag, 31}, - {"racewrite", funcTag, 31}, - {"racereadrange", funcTag, 121}, - {"racewriterange", funcTag, 121}, - {"msanread", funcTag, 121}, - {"msanwrite", funcTag, 121}, - {"msanmove", funcTag, 122}, - {"checkptrAlignment", funcTag, 123}, - {"checkptrArithmetic", funcTag, 125}, - {"libfuzzerTraceCmp1", funcTag, 127}, - {"libfuzzerTraceCmp2", funcTag, 129}, - {"libfuzzerTraceCmp4", funcTag, 130}, - {"libfuzzerTraceCmp8", funcTag, 131}, - {"libfuzzerTraceConstCmp1", funcTag, 127}, - {"libfuzzerTraceConstCmp2", funcTag, 129}, - {"libfuzzerTraceConstCmp4", funcTag, 130}, - {"libfuzzerTraceConstCmp8", funcTag, 131}, - {"x86HasPOPCNT", varTag, 6}, - {"x86HasSSE41", varTag, 6}, - {"x86HasFMA", varTag, 6}, - {"armHasVFPv4", varTag, 6}, - {"arm64HasATOMICS", varTag, 6}, -} - -func runtimeTypes() []*types.Type { - var typs [132]*types.Type - typs[0] = types.ByteType - typs[1] = types.NewPtr(typs[0]) - typs[2] = types.Types[types.TANY] - typs[3] = types.NewPtr(typs[2]) - typs[4] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3])}) - typs[5] = types.Types[types.TUINTPTR] - typs[6] = types.Types[types.TBOOL] - typs[7] = types.Types[types.TUNSAFEPTR] - typs[8] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[5]), ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[6])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7])}) - typs[9] = functype(nil, nil, nil) - typs[10] = types.Types[types.TINTER] - typs[11] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[10])}, nil) - typs[12] = types.Types[types.TINT32] - typs[13] = types.NewPtr(typs[12]) - typs[14] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[13])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[10])}) - typs[15] = types.Types[types.TINT] - typs[16] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[15])}, nil) - typs[17] = types.Types[types.TUINT] - typs[18] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[17]), ir.NewField(base.Pos, nil, nil, typs[15])}, nil) - typs[19] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}, nil) - typs[20] = types.Types[types.TFLOAT64] - typs[21] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}, nil) - typs[22] = types.Types[types.TINT64] - typs[23] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[22])}, nil) - typs[24] = types.Types[types.TUINT64] - typs[25] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[24])}, nil) - typs[26] = types.Types[types.TCOMPLEX128] - typs[27] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[26])}, nil) - typs[28] = types.Types[types.TSTRING] - typs[29] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}, nil) - typs[30] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[2])}, nil) - typs[31] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[5])}, nil) - typs[32] = types.NewArray(typs[0], 32) - typs[33] = types.NewPtr(typs[32]) - typs[34] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) - typs[35] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) - typs[36] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) - typs[37] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) - typs[38] = types.NewSlice(typs[28]) - typs[39] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[38])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) - typs[40] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[15])}) - typs[41] = types.NewArray(typs[0], 4) - typs[42] = types.NewPtr(typs[41]) - typs[43] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[42]), ir.NewField(base.Pos, nil, nil, typs[22])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) - typs[44] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) - typs[45] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) - typs[46] = types.RuneType - typs[47] = types.NewSlice(typs[46]) - typs[48] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[47])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) - typs[49] = types.NewSlice(typs[0]) - typs[50] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[49])}) - typs[51] = types.NewArray(typs[46], 32) - typs[52] = types.NewPtr(typs[51]) - typs[53] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[52]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[47])}) - typs[54] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[5])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[15])}) - typs[55] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[46]), ir.NewField(base.Pos, nil, nil, typs[15])}) - typs[56] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[15])}) - typs[57] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[2])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[2])}) - typs[58] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[2])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7])}) - typs[59] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[2])}) - typs[60] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[2])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[2]), ir.NewField(base.Pos, nil, nil, typs[6])}) - typs[61] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[1])}, nil) - typs[62] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1])}, nil) - typs[63] = types.NewPtr(typs[5]) - typs[64] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[63]), ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[7])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) - typs[65] = types.Types[types.TUINT32] - typs[66] = functype(nil, nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[65])}) - typs[67] = types.NewMap(typs[2], typs[2]) - typs[68] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[22]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[67])}) - typs[69] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[67])}) - typs[70] = functype(nil, nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[67])}) - typs[71] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3])}) - typs[72] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[2])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3])}) - typs[73] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[1])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3])}) - typs[74] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[6])}) - typs[75] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[2])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[6])}) - typs[76] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[1])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[6])}) - typs[77] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[3])}, nil) - typs[78] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[2])}, nil) - typs[79] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3])}, nil) - typs[80] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67])}, nil) - typs[81] = types.NewChan(typs[2], types.Cboth) - typs[82] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[22])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[81])}) - typs[83] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[81])}) - typs[84] = types.NewChan(typs[2], types.Crecv) - typs[85] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[84]), ir.NewField(base.Pos, nil, nil, typs[3])}, nil) - typs[86] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[84]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) - typs[87] = types.NewChan(typs[2], types.Csend) - typs[88] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[87]), ir.NewField(base.Pos, nil, nil, typs[3])}, nil) - typs[89] = types.NewArray(typs[0], 3) - typs[90] = tostruct([]*ir.Field{ir.NewField(base.Pos, lookup("enabled"), nil, typs[6]), ir.NewField(base.Pos, lookup("pad"), nil, typs[89]), ir.NewField(base.Pos, lookup("needed"), nil, typs[6]), ir.NewField(base.Pos, lookup("cgo"), nil, typs[6]), ir.NewField(base.Pos, lookup("alignme"), nil, typs[24])}) - typs[91] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[3])}, nil) - typs[92] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[3])}, nil) - typs[93] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[15])}) - typs[94] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[87]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) - typs[95] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[84])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) - typs[96] = types.NewPtr(typs[6]) - typs[97] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[96]), ir.NewField(base.Pos, nil, nil, typs[84])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) - typs[98] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[63])}, nil) - typs[99] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[63]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[6])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[6])}) - typs[100] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7])}) - typs[101] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[22]), ir.NewField(base.Pos, nil, nil, typs[22])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7])}) - typs[102] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[7])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7])}) - typs[103] = types.NewSlice(typs[2]) - typs[104] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[103]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[103])}) - typs[105] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[5])}, nil) - typs[106] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[5])}, nil) - typs[107] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[5])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) - typs[108] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) - typs[109] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[7])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) - typs[110] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[5]), ir.NewField(base.Pos, nil, nil, typs[5])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[5])}) - typs[111] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[5])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[5])}) - typs[112] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[22]), ir.NewField(base.Pos, nil, nil, typs[22])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[22])}) - typs[113] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[24]), ir.NewField(base.Pos, nil, nil, typs[24])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[24])}) - typs[114] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[22])}) - typs[115] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[24])}) - typs[116] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[65])}) - typs[117] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[22])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}) - typs[118] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[24])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}) - typs[119] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[65])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}) - typs[120] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[26]), ir.NewField(base.Pos, nil, nil, typs[26])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[26])}) - typs[121] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[5]), ir.NewField(base.Pos, nil, nil, typs[5])}, nil) - typs[122] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[5]), ir.NewField(base.Pos, nil, nil, typs[5]), ir.NewField(base.Pos, nil, nil, typs[5])}, nil) - typs[123] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[5])}, nil) - typs[124] = types.NewSlice(typs[7]) - typs[125] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[124])}, nil) - typs[126] = types.Types[types.TUINT8] - typs[127] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[126]), ir.NewField(base.Pos, nil, nil, typs[126])}, nil) - typs[128] = types.Types[types.TUINT16] - typs[129] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[128]), ir.NewField(base.Pos, nil, nil, typs[128])}, nil) - typs[130] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[65]), ir.NewField(base.Pos, nil, nil, typs[65])}, nil) - typs[131] = functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[24]), ir.NewField(base.Pos, nil, nil, typs[24])}, nil) - return typs[:] -} diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index 454d97e17f0b1..29455bffd8f78 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -8,9 +8,9 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/syntax" + "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/src" - "fmt" ) func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node { @@ -72,156 +72,6 @@ func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node { return clo } -// typecheckclosure typechecks an OCLOSURE node. It also creates the named -// function associated with the closure. -// TODO: This creation of the named function should probably really be done in a -// separate pass from type-checking. -func typecheckclosure(clo *ir.ClosureExpr, top int) { - fn := clo.Func - // Set current associated iota value, so iota can be used inside - // function in ConstSpec, see issue #22344 - if x := getIotaValue(); x >= 0 { - fn.Iota = x - } - - fn.ClosureType = typecheck(fn.ClosureType, ctxType) - clo.SetType(fn.ClosureType.Type()) - fn.SetClosureCalled(top&ctxCallee != 0) - - // Do not typecheck fn twice, otherwise, we will end up pushing - // fn to Target.Decls multiple times, causing initLSym called twice. - // See #30709 - if fn.Typecheck() == 1 { - return - } - - for _, ln := range fn.ClosureVars { - n := ln.Defn - if !n.Name().Captured() { - n.Name().SetCaptured(true) - if n.Name().Decldepth == 0 { - base.Fatalf("typecheckclosure: var %v does not have decldepth assigned", n) - } - - // Ignore assignments to the variable in straightline code - // preceding the first capturing by a closure. - if n.Name().Decldepth == decldepth { - n.Name().SetAssigned(false) - } - } - } - - fn.Nname.SetSym(closurename(ir.CurFunc)) - ir.MarkFunc(fn.Nname) - typecheckFunc(fn) - - // Type check the body now, but only if we're inside a function. - // At top level (in a variable initialization: curfn==nil) we're not - // ready to type check code yet; we'll check it later, because the - // underlying closure function we create is added to Target.Decls. - if ir.CurFunc != nil && clo.Type() != nil { - oldfn := ir.CurFunc - ir.CurFunc = fn - olddd := decldepth - decldepth = 1 - typecheckslice(fn.Body, ctxStmt) - decldepth = olddd - ir.CurFunc = oldfn - } - - Target.Decls = append(Target.Decls, fn) -} - -// globClosgen is like Func.Closgen, but for the global scope. -var globClosgen int32 - -// closurename generates a new unique name for a closure within -// outerfunc. -func closurename(outerfunc *ir.Func) *types.Sym { - outer := "glob." - prefix := "func" - gen := &globClosgen - - if outerfunc != nil { - if outerfunc.OClosure != nil { - prefix = "" - } - - outer = ir.FuncName(outerfunc) - - // There may be multiple functions named "_". In those - // cases, we can't use their individual Closgens as it - // would lead to name clashes. - if !ir.IsBlank(outerfunc.Nname) { - gen = &outerfunc.Closgen - } - } - - *gen++ - return lookup(fmt.Sprintf("%s.%s%d", outer, prefix, *gen)) -} - -// capturevarscomplete is set to true when the capturevars phase is done. -var capturevarscomplete bool - -// capturevars is called in a separate phase after all typechecking is done. -// It decides whether each variable captured by a closure should be captured -// by value or by reference. -// We use value capturing for values <= 128 bytes that are never reassigned -// after capturing (effectively constant). -func capturevars(fn *ir.Func) { - lno := base.Pos - base.Pos = fn.Pos() - cvars := fn.ClosureVars - out := cvars[:0] - for _, v := range cvars { - if v.Type() == nil { - // If v.Type is nil, it means v looked like it - // was going to be used in the closure, but - // isn't. This happens in struct literals like - // s{f: x} where we can't distinguish whether - // f is a field identifier or expression until - // resolving s. - continue - } - out = append(out, v) - - // type check the & of closed variables outside the closure, - // so that the outer frame also grabs them and knows they escape. - types.CalcSize(v.Type()) - - var outer ir.Node - outer = v.Outer - outermost := v.Defn.(*ir.Name) - - // out parameters will be assigned to implicitly upon return. - if outermost.Class_ != ir.PPARAMOUT && !outermost.Name().Addrtaken() && !outermost.Name().Assigned() && v.Type().Width <= 128 { - v.SetByval(true) - } else { - outermost.Name().SetAddrtaken(true) - outer = nodAddr(outer) - } - - if base.Flag.LowerM > 1 { - var name *types.Sym - if v.Curfn != nil && v.Curfn.Nname != nil { - name = v.Curfn.Sym() - } - how := "ref" - if v.Byval() { - how = "value" - } - base.WarnfAt(v.Pos(), "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym(), outermost.Name().Addrtaken(), outermost.Name().Assigned(), int32(v.Type().Width)) - } - - outer = typecheck(outer, ctxExpr) - fn.ClosureEnter.Append(outer) - } - - fn.ClosureVars = out - base.Pos = lno -} - // transformclosure is called in a separate phase after escape analysis. // It transform closure bodies to properly reference captured variables. func transformclosure(fn *ir.Func) { @@ -256,7 +106,7 @@ func transformclosure(fn *ir.Func) { // we introduce function param &v *T // and v remains PAUTOHEAP with &v heapaddr // (accesses will implicitly deref &v). - addr := NewName(lookup("&" + v.Sym().Name)) + addr := typecheck.NewName(typecheck.Lookup("&" + v.Sym().Name)) addr.SetType(types.NewPtr(v.Type())) v.Heapaddr = addr v = addr @@ -300,7 +150,7 @@ func transformclosure(fn *ir.Func) { } else { // Declare variable holding addresses taken from closure // and initialize in entry prologue. - addr := NewName(lookup("&" + v.Sym().Name)) + addr := typecheck.NewName(typecheck.Lookup("&" + v.Sym().Name)) addr.SetType(types.NewPtr(v.Type())) addr.Class_ = ir.PAUTO addr.SetUsed(true) @@ -309,14 +159,14 @@ func transformclosure(fn *ir.Func) { v.Heapaddr = addr var src ir.Node = cr if v.Byval() { - src = nodAddr(cr) + src = typecheck.NodAddr(cr) } body = append(body, ir.NewAssignStmt(base.Pos, addr, src)) } } if len(body) > 0 { - typecheckslice(body, ctxStmt) + typecheck.Stmts(body) fn.Enter.Set(body) fn.SetNeedctxt(true) } @@ -346,38 +196,6 @@ func closuredebugruntimecheck(clo *ir.ClosureExpr) { } } -// closureType returns the struct type used to hold all the information -// needed in the closure for clo (clo must be a OCLOSURE node). -// The address of a variable of the returned type can be cast to a func. -func closureType(clo *ir.ClosureExpr) *types.Type { - // Create closure in the form of a composite literal. - // supposing the closure captures an int i and a string s - // and has one float64 argument and no results, - // the generated code looks like: - // - // clos = &struct{.F uintptr; i *int; s *string}{func.1, &i, &s} - // - // The use of the struct provides type information to the garbage - // collector so that it can walk the closure. We could use (in this case) - // [3]unsafe.Pointer instead, but that would leave the gc in the dark. - // The information appears in the binary in the form of type descriptors; - // the struct is unnamed so that closures in multiple packages with the - // same struct type can share the descriptor. - fields := []*ir.Field{ - ir.NewField(base.Pos, lookup(".F"), nil, types.Types[types.TUINTPTR]), - } - for _, v := range clo.Func.ClosureVars { - typ := v.Type() - if !v.Byval() { - typ = types.NewPtr(typ) - } - fields = append(fields, ir.NewField(base.Pos, v.Sym(), nil, typ)) - } - typ := tostruct(fields) - typ.SetNoalg(true) - return typ -} - func walkclosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node { fn := clo.Func @@ -390,17 +208,17 @@ func walkclosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node { } closuredebugruntimecheck(clo) - typ := closureType(clo) + typ := typecheck.ClosureType(clo) clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ).(ir.Ntype), nil) clos.SetEsc(clo.Esc()) clos.List.Set(append([]ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, fn.Nname)}, fn.ClosureEnter...)) - addr := nodAddr(clos) + addr := typecheck.NodAddr(clos) addr.SetEsc(clo.Esc()) // Force type conversion from *struct to the func type. - cfn := convnop(addr, clo.Type()) + cfn := typecheck.ConvNop(addr, clo.Type()) // non-escaping temp to use, if any. if x := clo.Prealloc; x != nil { @@ -414,110 +232,6 @@ func walkclosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node { return walkexpr(cfn, init) } -func typecheckpartialcall(n ir.Node, sym *types.Sym) *ir.CallPartExpr { - switch n.Op() { - case ir.ODOTINTER, ir.ODOTMETH: - break - - default: - base.Fatalf("invalid typecheckpartialcall") - } - dot := n.(*ir.SelectorExpr) - - // Create top-level function. - fn := makepartialcall(dot, dot.Type(), sym) - fn.SetWrapper(true) - - return ir.NewCallPartExpr(dot.Pos(), dot.X, dot.Selection, fn) -} - -// makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed -// for partial calls. -func makepartialcall(dot *ir.SelectorExpr, t0 *types.Type, meth *types.Sym) *ir.Func { - rcvrtype := dot.X.Type() - sym := ir.MethodSymSuffix(rcvrtype, meth, "-fm") - - if sym.Uniq() { - return sym.Def.(*ir.Func) - } - sym.SetUniq(true) - - savecurfn := ir.CurFunc - saveLineNo := base.Pos - ir.CurFunc = nil - - // Set line number equal to the line number where the method is declared. - var m *types.Field - if lookdot0(meth, rcvrtype, &m, false) == 1 && m.Pos.IsKnown() { - base.Pos = m.Pos - } - // Note: !m.Pos.IsKnown() happens for method expressions where - // the method is implicitly declared. The Error method of the - // built-in error type is one such method. We leave the line - // number at the use of the method expression in this - // case. See issue 29389. - - tfn := ir.NewFuncType(base.Pos, nil, - structargs(t0.Params(), true), - structargs(t0.Results(), false)) - - fn := dclfunc(sym, tfn) - fn.SetDupok(true) - fn.SetNeedctxt(true) - - // Declare and initialize variable holding receiver. - cr := ir.NewClosureRead(rcvrtype, types.Rnd(int64(types.PtrSize), int64(rcvrtype.Align))) - ptr := NewName(lookup(".this")) - declare(ptr, ir.PAUTO) - ptr.SetUsed(true) - var body []ir.Node - if rcvrtype.IsPtr() || rcvrtype.IsInterface() { - ptr.SetType(rcvrtype) - body = append(body, ir.NewAssignStmt(base.Pos, ptr, cr)) - } else { - ptr.SetType(types.NewPtr(rcvrtype)) - body = append(body, ir.NewAssignStmt(base.Pos, ptr, nodAddr(cr))) - } - - call := ir.NewCallExpr(base.Pos, ir.OCALL, ir.NewSelectorExpr(base.Pos, ir.OXDOT, ptr, meth), nil) - call.Args.Set(ir.ParamNames(tfn.Type())) - call.IsDDD = tfn.Type().IsVariadic() - if t0.NumResults() != 0 { - ret := ir.NewReturnStmt(base.Pos, nil) - ret.Results = []ir.Node{call} - body = append(body, ret) - } else { - body = append(body, call) - } - - fn.Body.Set(body) - funcbody() - - typecheckFunc(fn) - // Need to typecheck the body of the just-generated wrapper. - // typecheckslice() requires that Curfn is set when processing an ORETURN. - ir.CurFunc = fn - typecheckslice(fn.Body, ctxStmt) - sym.Def = fn - Target.Decls = append(Target.Decls, fn) - ir.CurFunc = savecurfn - base.Pos = saveLineNo - - return fn -} - -// partialCallType returns the struct type used to hold all the information -// needed in the closure for n (n must be a OCALLPART node). -// The address of a variable of the returned type can be cast to a func. -func partialCallType(n *ir.CallPartExpr) *types.Type { - t := tostruct([]*ir.Field{ - ir.NewField(base.Pos, lookup("F"), nil, types.Types[types.TUINTPTR]), - ir.NewField(base.Pos, lookup("R"), nil, n.X.Type()), - }) - t.SetNoalg(true) - return t -} - func walkpartialcall(n *ir.CallPartExpr, init *ir.Nodes) ir.Node { // Create closure in the form of a composite literal. // For x.M with receiver (x) type T, the generated code looks like: @@ -532,24 +246,24 @@ func walkpartialcall(n *ir.CallPartExpr, init *ir.Nodes) ir.Node { n.X = cheapexpr(n.X, init) n.X = walkexpr(n.X, nil) - tab := typecheck(ir.NewUnaryExpr(base.Pos, ir.OITAB, n.X), ctxExpr) + tab := typecheck.Expr(ir.NewUnaryExpr(base.Pos, ir.OITAB, n.X)) c := ir.NewUnaryExpr(base.Pos, ir.OCHECKNIL, tab) c.SetTypecheck(1) init.Append(c) } - typ := partialCallType(n) + typ := typecheck.PartialCallType(n) clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ).(ir.Ntype), nil) clos.SetEsc(n.Esc()) clos.List = []ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, n.Func.Nname), n.X} - addr := nodAddr(clos) + addr := typecheck.NodAddr(clos) addr.SetEsc(n.Esc()) // Force type conversion from *struct to the func type. - cfn := convnop(addr, n.Type()) + cfn := typecheck.ConvNop(addr, n.Type()) // non-escaping temp to use, if any. if x := n.Prealloc; x != nil { diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 1189d0ec1205b..e53bba44adc37 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -8,11 +8,11 @@ import ( "bytes" "cmd/compile/internal/base" "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/src" "fmt" - "strings" ) func EnableNoWriteBarrierRecCheck() { @@ -28,154 +28,6 @@ func NoWriteBarrierRecCheck() { var nowritebarrierrecCheck *nowritebarrierrecChecker -// redeclare emits a diagnostic about symbol s being redeclared at pos. -func redeclare(pos src.XPos, s *types.Sym, where string) { - if !s.Lastlineno.IsKnown() { - pkgName := dotImportRefs[s.Def.(*ir.Ident)] - base.ErrorfAt(pos, "%v redeclared %s\n"+ - "\t%v: previous declaration during import %q", s, where, base.FmtPos(pkgName.Pos()), pkgName.Pkg.Path) - } else { - prevPos := s.Lastlineno - - // When an import and a declaration collide in separate files, - // present the import as the "redeclared", because the declaration - // is visible where the import is, but not vice versa. - // See issue 4510. - if s.Def == nil { - pos, prevPos = prevPos, pos - } - - base.ErrorfAt(pos, "%v redeclared %s\n"+ - "\t%v: previous declaration", s, where, base.FmtPos(prevPos)) - } -} - -var vargen int - -// declare individual names - var, typ, const - -var declare_typegen int - -// declare records that Node n declares symbol n.Sym in the specified -// declaration context. -func declare(n *ir.Name, ctxt ir.Class) { - if ir.IsBlank(n) { - return - } - - s := n.Sym() - - // kludgy: typecheckok means we're past parsing. Eg genwrapper may declare out of package names later. - if !inimport && !typecheckok && s.Pkg != types.LocalPkg { - base.ErrorfAt(n.Pos(), "cannot declare name %v", s) - } - - gen := 0 - if ctxt == ir.PEXTERN { - if s.Name == "init" { - base.ErrorfAt(n.Pos(), "cannot declare init - must be func") - } - if s.Name == "main" && s.Pkg.Name == "main" { - base.ErrorfAt(n.Pos(), "cannot declare main - must be func") - } - Target.Externs = append(Target.Externs, n) - } else { - if ir.CurFunc == nil && ctxt == ir.PAUTO { - base.Pos = n.Pos() - base.Fatalf("automatic outside function") - } - if ir.CurFunc != nil && ctxt != ir.PFUNC && n.Op() == ir.ONAME { - ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n) - } - if n.Op() == ir.OTYPE { - declare_typegen++ - gen = declare_typegen - } else if n.Op() == ir.ONAME && ctxt == ir.PAUTO && !strings.Contains(s.Name, "·") { - vargen++ - gen = vargen - } - types.Pushdcl(s) - n.Curfn = ir.CurFunc - } - - if ctxt == ir.PAUTO { - n.SetFrameOffset(0) - } - - if s.Block == types.Block { - // functype will print errors about duplicate function arguments. - // Don't repeat the error here. - if ctxt != ir.PPARAM && ctxt != ir.PPARAMOUT { - redeclare(n.Pos(), s, "in this block") - } - } - - s.Block = types.Block - s.Lastlineno = base.Pos - s.Def = n - n.Vargen = int32(gen) - n.Class_ = ctxt - if ctxt == ir.PFUNC { - n.Sym().SetFunc(true) - } - - autoexport(n, ctxt) -} - -// declare variables from grammar -// new_name_list (type | [type] = expr_list) -func variter(vl []*ir.Name, t ir.Ntype, el []ir.Node) []ir.Node { - var init []ir.Node - doexpr := len(el) > 0 - - if len(el) == 1 && len(vl) > 1 { - e := el[0] - as2 := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) - as2.Rhs = []ir.Node{e} - for _, v := range vl { - as2.Lhs.Append(v) - declare(v, dclcontext) - v.Ntype = t - v.Defn = as2 - if ir.CurFunc != nil { - init = append(init, ir.NewDecl(base.Pos, ir.ODCL, v)) - } - } - - return append(init, as2) - } - - for i, v := range vl { - var e ir.Node - if doexpr { - if i >= len(el) { - base.Errorf("assignment mismatch: %d variables but %d values", len(vl), len(el)) - break - } - e = el[i] - } - - declare(v, dclcontext) - v.Ntype = t - - if e != nil || ir.CurFunc != nil || ir.IsBlank(v) { - if ir.CurFunc != nil { - init = append(init, ir.NewDecl(base.Pos, ir.ODCL, v)) - } - as := ir.NewAssignStmt(base.Pos, v, e) - init = append(init, as) - if e != nil { - v.Defn = as - } - } - } - - if len(el) > len(vl) { - base.Errorf("assignment mismatch: %d variables but %d values", len(vl), len(el)) - } - return init -} - // oldname returns the Node that declares symbol s in the current scope. // If no such Node currently exists, an ONONAME Node is returned instead. // Automatically creates a new closure variable if the referenced symbol was @@ -204,7 +56,7 @@ func oldname(s *types.Sym) ir.Node { c := n.Name().Innermost if c == nil || c.Curfn != ir.CurFunc { // Do not have a closure var for the active closure yet; make one. - c = NewName(s) + c = typecheck.NewName(s) c.Class_ = ir.PAUTOHEAP c.SetIsClosureVar(true) c.SetIsDDD(n.IsDDD()) @@ -236,419 +88,10 @@ func importName(sym *types.Sym) ir.Node { return n } -// := declarations -func colasname(n ir.Node) bool { - switch n.Op() { - case ir.ONAME, - ir.ONONAME, - ir.OPACK, - ir.OTYPE, - ir.OLITERAL: - return n.Sym() != nil - } - - return false -} - -func colasdefn(left []ir.Node, defn ir.Node) { - for _, n := range left { - if n.Sym() != nil { - n.Sym().SetUniq(true) - } - } - - var nnew, nerr int - for i, n := range left { - if ir.IsBlank(n) { - continue - } - if !colasname(n) { - base.ErrorfAt(defn.Pos(), "non-name %v on left side of :=", n) - nerr++ - continue - } - - if !n.Sym().Uniq() { - base.ErrorfAt(defn.Pos(), "%v repeated on left side of :=", n.Sym()) - n.SetDiag(true) - nerr++ - continue - } - - n.Sym().SetUniq(false) - if n.Sym().Block == types.Block { - continue - } - - nnew++ - n := NewName(n.Sym()) - declare(n, dclcontext) - n.Defn = defn - defn.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, n)) - left[i] = n - } - - if nnew == 0 && nerr == 0 { - base.ErrorfAt(defn.Pos(), "no new variables on left side of :=") - } -} - -// declare the function proper -// and declare the arguments. -// called in extern-declaration context -// returns in auto-declaration context. -func funchdr(fn *ir.Func) { - // change the declaration context from extern to auto - funcStack = append(funcStack, funcStackEnt{ir.CurFunc, dclcontext}) - ir.CurFunc = fn - dclcontext = ir.PAUTO - - types.Markdcl() - - if fn.Nname.Ntype != nil { - funcargs(fn.Nname.Ntype.(*ir.FuncType)) - } else { - funcargs2(fn.Type()) - } -} - -func funcargs(nt *ir.FuncType) { - if nt.Op() != ir.OTFUNC { - base.Fatalf("funcargs %v", nt.Op()) - } - - // re-start the variable generation number - // we want to use small numbers for the return variables, - // so let them have the chunk starting at 1. - // - // TODO(mdempsky): This is ugly, and only necessary because - // esc.go uses Vargen to figure out result parameters' index - // within the result tuple. - vargen = len(nt.Results) - - // declare the receiver and in arguments. - if nt.Recv != nil { - funcarg(nt.Recv, ir.PPARAM) - } - for _, n := range nt.Params { - funcarg(n, ir.PPARAM) - } - - oldvargen := vargen - vargen = 0 - - // declare the out arguments. - gen := len(nt.Params) - for _, n := range nt.Results { - if n.Sym == nil { - // Name so that escape analysis can track it. ~r stands for 'result'. - n.Sym = lookupN("~r", gen) - gen++ - } - if n.Sym.IsBlank() { - // Give it a name so we can assign to it during return. ~b stands for 'blank'. - // The name must be different from ~r above because if you have - // func f() (_ int) - // func g() int - // f is allowed to use a plain 'return' with no arguments, while g is not. - // So the two cases must be distinguished. - n.Sym = lookupN("~b", gen) - gen++ - } - - funcarg(n, ir.PPARAMOUT) - } - - vargen = oldvargen -} - -func funcarg(n *ir.Field, ctxt ir.Class) { - if n.Sym == nil { - return - } - - name := ir.NewNameAt(n.Pos, n.Sym) - n.Decl = name - name.Ntype = n.Ntype - name.SetIsDDD(n.IsDDD) - declare(name, ctxt) - - vargen++ - n.Decl.Vargen = int32(vargen) -} - -// Same as funcargs, except run over an already constructed TFUNC. -// This happens during import, where the hidden_fndcl rule has -// used functype directly to parse the function's type. -func funcargs2(t *types.Type) { - if t.Kind() != types.TFUNC { - base.Fatalf("funcargs2 %v", t) - } - - for _, f := range t.Recvs().Fields().Slice() { - funcarg2(f, ir.PPARAM) - } - for _, f := range t.Params().Fields().Slice() { - funcarg2(f, ir.PPARAM) - } - for _, f := range t.Results().Fields().Slice() { - funcarg2(f, ir.PPARAMOUT) - } -} - -func funcarg2(f *types.Field, ctxt ir.Class) { - if f.Sym == nil { - return - } - n := ir.NewNameAt(f.Pos, f.Sym) - f.Nname = n - n.SetType(f.Type) - n.SetIsDDD(f.IsDDD()) - declare(n, ctxt) -} - -var funcStack []funcStackEnt // stack of previous values of Curfn/dclcontext - -type funcStackEnt struct { - curfn *ir.Func - dclcontext ir.Class -} - -func CheckFuncStack() { - if len(funcStack) != 0 { - base.Fatalf("funcStack is non-empty: %v", len(funcStack)) - } -} - -// finish the body. -// called in auto-declaration context. -// returns in extern-declaration context. -func funcbody() { - // change the declaration context from auto to previous context - types.Popdcl() - var e funcStackEnt - funcStack, e = funcStack[:len(funcStack)-1], funcStack[len(funcStack)-1] - ir.CurFunc, dclcontext = e.curfn, e.dclcontext -} - -// structs, functions, and methods. -// they don't belong here, but where do they belong? -func checkembeddedtype(t *types.Type) { - if t == nil { - return - } - - if t.Sym() == nil && t.IsPtr() { - t = t.Elem() - if t.IsInterface() { - base.Errorf("embedded type cannot be a pointer to interface") - } - } - - if t.IsPtr() || t.IsUnsafePtr() { - base.Errorf("embedded type cannot be a pointer") - } else if t.Kind() == types.TFORW && !t.ForwardType().Embedlineno.IsKnown() { - t.ForwardType().Embedlineno = base.Pos - } -} - -// checkdupfields emits errors for duplicately named fields or methods in -// a list of struct or interface types. -func checkdupfields(what string, fss ...[]*types.Field) { - seen := make(map[*types.Sym]bool) - for _, fs := range fss { - for _, f := range fs { - if f.Sym == nil || f.Sym.IsBlank() { - continue - } - if seen[f.Sym] { - base.ErrorfAt(f.Pos, "duplicate %s %s", what, f.Sym.Name) - continue - } - seen[f.Sym] = true - } - } -} - -// convert a parsed id/type list into -// a type for struct/interface/arglist -func tostruct(l []*ir.Field) *types.Type { - lno := base.Pos - - fields := make([]*types.Field, len(l)) - for i, n := range l { - base.Pos = n.Pos - - if n.Ntype != nil { - n.Type = typecheckNtype(n.Ntype).Type() - n.Ntype = nil - } - f := types.NewField(n.Pos, n.Sym, n.Type) - if n.Embedded { - checkembeddedtype(n.Type) - f.Embedded = 1 - } - f.Note = n.Note - fields[i] = f - } - checkdupfields("field", fields) - - base.Pos = lno - return types.NewStruct(types.LocalPkg, fields) -} - -func tointerface(nmethods []*ir.Field) *types.Type { - if len(nmethods) == 0 { - return types.Types[types.TINTER] - } - - lno := base.Pos - - methods := make([]*types.Field, len(nmethods)) - for i, n := range nmethods { - base.Pos = n.Pos - if n.Ntype != nil { - n.Type = typecheckNtype(n.Ntype).Type() - n.Ntype = nil - } - methods[i] = types.NewField(n.Pos, n.Sym, n.Type) - } - - base.Pos = lno - return types.NewInterface(types.LocalPkg, methods) -} - func fakeRecv() *ir.Field { return ir.NewField(base.Pos, nil, nil, types.FakeRecvType()) } -func fakeRecvField() *types.Field { - return types.NewField(src.NoXPos, nil, types.FakeRecvType()) -} - -// turn a parsed function declaration into a type -func functype(nrecv *ir.Field, nparams, nresults []*ir.Field) *types.Type { - funarg := func(n *ir.Field) *types.Field { - lno := base.Pos - base.Pos = n.Pos - - if n.Ntype != nil { - n.Type = typecheckNtype(n.Ntype).Type() - n.Ntype = nil - } - - f := types.NewField(n.Pos, n.Sym, n.Type) - f.SetIsDDD(n.IsDDD) - if n.Decl != nil { - n.Decl.SetType(f.Type) - f.Nname = n.Decl - } - - base.Pos = lno - return f - } - funargs := func(nn []*ir.Field) []*types.Field { - res := make([]*types.Field, len(nn)) - for i, n := range nn { - res[i] = funarg(n) - } - return res - } - - var recv *types.Field - if nrecv != nil { - recv = funarg(nrecv) - } - - t := types.NewSignature(types.LocalPkg, recv, funargs(nparams), funargs(nresults)) - checkdupfields("argument", t.Recvs().FieldSlice(), t.Params().FieldSlice(), t.Results().FieldSlice()) - return t -} - -// Add a method, declared as a function. -// - msym is the method symbol -// - t is function type (with receiver) -// Returns a pointer to the existing or added Field; or nil if there's an error. -func addmethod(n *ir.Func, msym *types.Sym, t *types.Type, local, nointerface bool) *types.Field { - if msym == nil { - base.Fatalf("no method symbol") - } - - // get parent type sym - rf := t.Recv() // ptr to this structure - if rf == nil { - base.Errorf("missing receiver") - return nil - } - - mt := types.ReceiverBaseType(rf.Type) - if mt == nil || mt.Sym() == nil { - pa := rf.Type - t := pa - if t != nil && t.IsPtr() { - if t.Sym() != nil { - base.Errorf("invalid receiver type %v (%v is a pointer type)", pa, t) - return nil - } - t = t.Elem() - } - - switch { - case t == nil || t.Broke(): - // rely on typecheck having complained before - case t.Sym() == nil: - base.Errorf("invalid receiver type %v (%v is not a defined type)", pa, t) - case t.IsPtr(): - base.Errorf("invalid receiver type %v (%v is a pointer type)", pa, t) - case t.IsInterface(): - base.Errorf("invalid receiver type %v (%v is an interface type)", pa, t) - default: - // Should have picked off all the reasons above, - // but just in case, fall back to generic error. - base.Errorf("invalid receiver type %v (%L / %L)", pa, pa, t) - } - return nil - } - - if local && mt.Sym().Pkg != types.LocalPkg { - base.Errorf("cannot define new methods on non-local type %v", mt) - return nil - } - - if msym.IsBlank() { - return nil - } - - if mt.IsStruct() { - for _, f := range mt.Fields().Slice() { - if f.Sym == msym { - base.Errorf("type %v has both field and method named %v", mt, msym) - f.SetBroke(true) - return nil - } - } - } - - for _, f := range mt.Methods().Slice() { - if msym.Name != f.Sym.Name { - continue - } - // types.Identical only checks that incoming and result parameters match, - // so explicitly check that the receiver parameters match too. - if !types.Identical(t, f.Type) || !types.Identical(t.Recv().Type, f.Type.Recv().Type) { - base.Errorf("method redeclared: %v.%v\n\t%v\n\t%v", mt, msym, f.Type, t) - } - return f - } - - f := types.NewField(base.Pos, msym, t) - f.Nname = n.Nname - f.SetNointerface(nointerface) - - mt.Methods().Append(f) - return f -} - // funcsym returns s·f. func funcsym(s *types.Sym) *types.Sym { // funcsymsmu here serves to protect not just mutations of funcsyms (below), @@ -700,21 +143,6 @@ func makefuncsym(s *types.Sym) { } } -func dclfunc(sym *types.Sym, tfn ir.Ntype) *ir.Func { - if tfn.Op() != ir.OTFUNC { - base.Fatalf("expected OTFUNC node, got %v", tfn) - } - - fn := ir.NewFunc(base.Pos) - fn.Nname = ir.NewFuncNameAt(base.Pos, sym, fn) - fn.Nname.Defn = fn - fn.Nname.Ntype = tfn - ir.MarkFunc(fn.Nname) - funchdr(fn) - fn.Nname.Ntype = typecheckNtype(fn.Nname.Ntype) - return fn -} - type nowritebarrierrecChecker struct { // extraCalls contains extra function calls that may not be // visible during later analysis. It maps from the ODCLFUNC of @@ -742,7 +170,7 @@ func newNowritebarrierrecChecker() *nowritebarrierrecChecker { // important to handle it for this check, so we model it // directly. This has to happen before transformclosure since // it's a lot harder to work out the argument after. - for _, n := range Target.Decls { + for _, n := range typecheck.Target.Decls { if n.Op() != ir.ODCLFUNC { continue } @@ -819,7 +247,7 @@ func (c *nowritebarrierrecChecker) check() { // q is the queue of ODCLFUNC Nodes to visit in BFS order. var q ir.NameQueue - for _, n := range Target.Decls { + for _, n := range typecheck.Target.Decls { if n.Op() != ir.ODCLFUNC { continue } diff --git a/src/cmd/compile/internal/gc/embed.go b/src/cmd/compile/internal/gc/embed.go index 70c5c2a25a048..bcfec3cad32e7 100644 --- a/src/cmd/compile/internal/gc/embed.go +++ b/src/cmd/compile/internal/gc/embed.go @@ -8,6 +8,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/syntax" + "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/obj" @@ -61,13 +62,13 @@ func varEmbed(p *noder, names []*ir.Name, typ ir.Ntype, exprs []ir.Node, embeds p.errorAt(pos, "go:embed cannot apply to var without type") return exprs } - if dclcontext != ir.PEXTERN { + if typecheck.DeclContext != ir.PEXTERN { p.errorAt(pos, "go:embed cannot apply to var inside func") return exprs } v := names[0] - Target.Embeds = append(Target.Embeds, v) + typecheck.Target.Embeds = append(typecheck.Target.Embeds, v) v.Embed = new([]ir.Embed) for _, e := range embeds { *v.Embed = append(*v.Embed, ir.Embed{Pos: p.makeXPos(e.Pos), Patterns: e.Patterns}) @@ -184,7 +185,7 @@ func embedFileLess(x, y string) bool { } func dumpembeds() { - for _, v := range Target.Embeds { + for _, v := range typecheck.Target.Embeds { initEmbed(v) } } diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index 6843d8b00e45b..187313695fa1c 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -8,6 +8,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/logopt" + "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/src" "fmt" @@ -870,7 +871,7 @@ func (e *Escape) call(ks []EscHole, call, where ir.Node) { case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER: call := call.(*ir.CallExpr) - fixVariadicCall(call) + typecheck.FixVariadicCall(call) // Pick out the function callee, if statically known. var fn *ir.Name @@ -1877,10 +1878,10 @@ func heapAllocReason(n ir.Node) string { return "too large for stack" } - if n.Op() == ir.OCLOSURE && closureType(n.(*ir.ClosureExpr)).Size() >= ir.MaxImplicitStackVarSize { + if n.Op() == ir.OCLOSURE && typecheck.ClosureType(n.(*ir.ClosureExpr)).Size() >= ir.MaxImplicitStackVarSize { return "too large for stack" } - if n.Op() == ir.OCALLPART && partialCallType(n.(*ir.CallPartExpr)).Size() >= ir.MaxImplicitStackVarSize { + if n.Op() == ir.OCALLPART && typecheck.PartialCallType(n.(*ir.CallPartExpr)).Size() >= ir.MaxImplicitStackVarSize { return "too large for stack" } @@ -1992,8 +1993,8 @@ func moveToHeap(n *ir.Name) { // Allocate a local stack variable to hold the pointer to the heap copy. // temp will add it to the function declaration list automatically. - heapaddr := temp(types.NewPtr(n.Type())) - heapaddr.SetSym(lookup("&" + n.Sym().Name)) + heapaddr := typecheck.Temp(types.NewPtr(n.Type())) + heapaddr.SetSym(typecheck.Lookup("&" + n.Sym().Name)) heapaddr.SetPos(n.Pos()) // Unset AutoTemp to persist the &foo variable name through SSA to @@ -2013,7 +2014,7 @@ func moveToHeap(n *ir.Name) { // Preserve a copy so we can still write code referring to the original, // and substitute that copy into the function declaration list // so that analyses of the local (on-stack) variables use it. - stackcopy := NewName(n.Sym()) + stackcopy := typecheck.NewName(n.Sym()) stackcopy.SetType(n.Type()) stackcopy.SetFrameOffset(n.FrameOffset()) stackcopy.Class_ = n.Class_ diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index 2855f815bed62..a414962431b8a 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -7,9 +7,9 @@ package gc import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/bio" - "cmd/internal/src" "fmt" "go/constant" ) @@ -21,54 +21,16 @@ func exportf(bout *bio.Writer, format string, args ...interface{}) { } } -// exportsym marks n for export (or reexport). -func exportsym(n *ir.Name) { - if n.Sym().OnExportList() { - return - } - n.Sym().SetOnExportList(true) - - if base.Flag.E != 0 { - fmt.Printf("export symbol %v\n", n.Sym()) - } - - Target.Exports = append(Target.Exports, n) -} - -func initname(s string) bool { - return s == "init" -} - -func autoexport(n *ir.Name, ctxt ir.Class) { - if n.Sym().Pkg != types.LocalPkg { - return - } - if (ctxt != ir.PEXTERN && ctxt != ir.PFUNC) || dclcontext != ir.PEXTERN { - return - } - if n.Type() != nil && n.Type().IsKind(types.TFUNC) && ir.IsMethod(n) { - return - } - - if types.IsExported(n.Sym().Name) || initname(n.Sym().Name) { - exportsym(n) - } - if base.Flag.AsmHdr != "" && !n.Sym().Asm() { - n.Sym().SetAsm(true) - Target.Asms = append(Target.Asms, n) - } -} - func dumpexport(bout *bio.Writer) { p := &exporter{marked: make(map[*types.Type]bool)} - for _, n := range Target.Exports { + for _, n := range typecheck.Target.Exports { p.markObject(n) } // The linker also looks for the $$ marker - use char after $$ to distinguish format. exportf(bout, "\n$$B\n") // indicate binary export format off := bout.Offset() - iexport(bout.Writer) + typecheck.WriteExports(bout.Writer) size := bout.Offset() - off exportf(bout, "\n$$\n") @@ -77,78 +39,13 @@ func dumpexport(bout *bio.Writer) { } } -func importsym(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class) *ir.Name { - if n := s.PkgDef(); n != nil { - base.Fatalf("importsym of symbol that already exists: %v", n) - } - - n := ir.NewDeclNameAt(pos, op, s) - n.Class_ = ctxt // TODO(mdempsky): Move this into NewDeclNameAt too? - s.SetPkgDef(n) - s.Importdef = ipkg - return n -} - -// importtype returns the named type declared by symbol s. -// If no such type has been declared yet, a forward declaration is returned. -// ipkg is the package being imported -func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *ir.Name { - n := importsym(ipkg, pos, s, ir.OTYPE, ir.PEXTERN) - n.SetType(types.NewNamed(n)) - return n -} - -// importobj declares symbol s as an imported object representable by op. -// ipkg is the package being imported -func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class, t *types.Type) *ir.Name { - n := importsym(ipkg, pos, s, op, ctxt) - n.SetType(t) - if ctxt == ir.PFUNC { - n.Sym().SetFunc(true) - } - return n -} - -// importconst declares symbol s as an imported constant with type t and value val. -// ipkg is the package being imported -func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val constant.Value) *ir.Name { - n := importobj(ipkg, pos, s, ir.OLITERAL, ir.PEXTERN, t) - n.SetVal(val) - return n -} - -// importfunc declares symbol s as an imported function with type t. -// ipkg is the package being imported -func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.Name { - n := importobj(ipkg, pos, s, ir.ONAME, ir.PFUNC, t) - - fn := ir.NewFunc(pos) - fn.SetType(t) - n.SetFunc(fn) - fn.Nname = n - - return n -} - -// importvar declares symbol s as an imported variable with type t. -// ipkg is the package being imported -func importvar(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.Name { - return importobj(ipkg, pos, s, ir.ONAME, ir.PEXTERN, t) -} - -// importalias declares symbol s as an imported type alias with type t. -// ipkg is the package being imported -func importalias(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.Name { - return importobj(ipkg, pos, s, ir.OTYPE, ir.PEXTERN, t) -} - func dumpasmhdr() { b, err := bio.Create(base.Flag.AsmHdr) if err != nil { base.Fatalf("%v", err) } fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", types.LocalPkg.Name) - for _, n := range Target.Asms { + for _, n := range typecheck.Target.Asms { if n.Sym().IsBlank() { continue } @@ -176,3 +73,83 @@ func dumpasmhdr() { b.Close() } + +type exporter struct { + marked map[*types.Type]bool // types already seen by markType +} + +// markObject visits a reachable object. +func (p *exporter) markObject(n ir.Node) { + if n.Op() == ir.ONAME { + n := n.(*ir.Name) + if n.Class_ == ir.PFUNC { + inlFlood(n, typecheck.Export) + } + } + + p.markType(n.Type()) +} + +// markType recursively visits types reachable from t to identify +// functions whose inline bodies may be needed. +func (p *exporter) markType(t *types.Type) { + if p.marked[t] { + return + } + p.marked[t] = true + + // If this is a named type, mark all of its associated + // methods. Skip interface types because t.Methods contains + // only their unexpanded method set (i.e., exclusive of + // interface embeddings), and the switch statement below + // handles their full method set. + if t.Sym() != nil && t.Kind() != types.TINTER { + for _, m := range t.Methods().Slice() { + if types.IsExported(m.Sym.Name) { + p.markObject(ir.AsNode(m.Nname)) + } + } + } + + // Recursively mark any types that can be produced given a + // value of type t: dereferencing a pointer; indexing or + // iterating over an array, slice, or map; receiving from a + // channel; accessing a struct field or interface method; or + // calling a function. + // + // Notably, we don't mark function parameter types, because + // the user already needs some way to construct values of + // those types. + switch t.Kind() { + case types.TPTR, types.TARRAY, types.TSLICE: + p.markType(t.Elem()) + + case types.TCHAN: + if t.ChanDir().CanRecv() { + p.markType(t.Elem()) + } + + case types.TMAP: + p.markType(t.Key()) + p.markType(t.Elem()) + + case types.TSTRUCT: + for _, f := range t.FieldSlice() { + if types.IsExported(f.Sym.Name) || f.Embedded != 0 { + p.markType(f.Type) + } + } + + case types.TFUNC: + for _, f := range t.Results().FieldSlice() { + p.markType(f.Type) + } + + case types.TINTER: + for _, f := range t.FieldSlice() { + if types.IsExported(f.Sym.Name) { + p.markType(f.Type) + } + } + } +} diff --git a/src/cmd/compile/internal/gc/gen.go b/src/cmd/compile/internal/gc/gen.go deleted file mode 100644 index 1084ff883f370..0000000000000 --- a/src/cmd/compile/internal/gc/gen.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gc - -import ( - "cmd/compile/internal/base" - "cmd/compile/internal/ir" - "cmd/compile/internal/types" - "cmd/internal/obj" - "cmd/internal/src" - "strconv" -) - -// sysfunc looks up Go function name in package runtime. This function -// must follow the internal calling convention. -func sysfunc(name string) *obj.LSym { - s := ir.Pkgs.Runtime.Lookup(name) - s.SetFunc(true) - return s.Linksym() -} - -// sysvar looks up a variable (or assembly function) name in package -// runtime. If this is a function, it may have a special calling -// convention. -func sysvar(name string) *obj.LSym { - return ir.Pkgs.Runtime.Lookup(name).Linksym() -} - -// autotmpname returns the name for an autotmp variable numbered n. -func autotmpname(n int) string { - // Give each tmp a different name so that they can be registerized. - // Add a preceding . to avoid clashing with legal names. - const prefix = ".autotmp_" - // Start with a buffer big enough to hold a large n. - b := []byte(prefix + " ")[:len(prefix)] - b = strconv.AppendInt(b, int64(n), 10) - return types.InternString(b) -} - -// make a new Node off the books -func tempAt(pos src.XPos, curfn *ir.Func, t *types.Type) *ir.Name { - if curfn == nil { - base.Fatalf("no curfn for tempAt") - } - if curfn.Op() == ir.OCLOSURE { - ir.Dump("tempAt", curfn) - base.Fatalf("adding tempAt to wrong closure function") - } - if t == nil { - base.Fatalf("tempAt called with nil type") - } - - s := &types.Sym{ - Name: autotmpname(len(curfn.Dcl)), - Pkg: types.LocalPkg, - } - n := ir.NewNameAt(pos, s) - s.Def = n - n.SetType(t) - n.Class_ = ir.PAUTO - n.SetEsc(ir.EscNever) - n.Curfn = curfn - n.SetUsed(true) - n.SetAutoTemp(true) - curfn.Dcl = append(curfn.Dcl, n) - - types.CalcSize(t) - - return n -} - -func temp(t *types.Type) *ir.Name { - return tempAt(base.Pos, ir.CurFunc, t) -} diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index a2587b33616e4..7648e910d57de 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -5,7 +5,6 @@ package gc import ( - "cmd/compile/internal/ir" "cmd/compile/internal/ssa" "cmd/compile/internal/types" "cmd/internal/obj" @@ -14,37 +13,13 @@ import ( var pragcgobuf [][]string -var decldepth int32 - -var inimport bool // set during import - var zerosize int64 -var ( - okforeq [types.NTYPE]bool - okforadd [types.NTYPE]bool - okforand [types.NTYPE]bool - okfornone [types.NTYPE]bool - okforbool [types.NTYPE]bool - okforcap [types.NTYPE]bool - okforlen [types.NTYPE]bool - okforarith [types.NTYPE]bool -) - -var ( - okfor [ir.OEND][]bool - iscmp [ir.OEND]bool -) - var ( funcsymsmu sync.Mutex // protects funcsyms and associated package lookups (see func funcsym) funcsyms []*types.Sym ) -var dclcontext ir.Class // PEXTERN/PAUTO - -var typecheckok bool - // interface to back end type Arch struct { diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go index 6ea9b354ab1e7..f24687ec0f34c 100644 --- a/src/cmd/compile/internal/gc/gsubr.go +++ b/src/cmd/compile/internal/gc/gsubr.go @@ -34,6 +34,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/ssa" + "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/objabi" @@ -196,11 +197,11 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { // Q: is this needed? savepos := base.Pos - savedclcontext := dclcontext + savedclcontext := typecheck.DeclContext savedcurfn := ir.CurFunc base.Pos = base.AutogeneratedPos - dclcontext = ir.PEXTERN + typecheck.DeclContext = ir.PEXTERN // At the moment we don't support wrapping a method, we'd need machinery // below to handle the receiver. Panic if we see this scenario. @@ -213,11 +214,11 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { var noReceiver *ir.Field tfn := ir.NewFuncType(base.Pos, noReceiver, - structargs(ft.Params(), true), - structargs(ft.Results(), false)) + typecheck.NewFuncParams(ft.Params(), true), + typecheck.NewFuncParams(ft.Results(), false)) // Reuse f's types.Sym to create a new ODCLFUNC/function. - fn := dclfunc(f.Nname.Sym(), tfn) + fn := typecheck.DeclFunc(f.Nname.Sym(), tfn) fn.SetDupok(true) fn.SetWrapper(true) // ignore frame for panic+recover matching @@ -281,22 +282,22 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { } fn.Body.Append(tail) - funcbody() + typecheck.FinishFuncBody() if base.Debug.DclStack != 0 { types.CheckDclstack() } - typecheckFunc(fn) + typecheck.Func(fn) ir.CurFunc = fn - typecheckslice(fn.Body, ctxStmt) + typecheck.Stmts(fn.Body) escapeFuncs([]*ir.Func{fn}, false) - Target.Decls = append(Target.Decls, fn) + typecheck.Target.Decls = append(typecheck.Target.Decls, fn) // Restore previous context. base.Pos = savepos - dclcontext = savedclcontext + typecheck.DeclContext = savedclcontext ir.CurFunc = savedcurfn } diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go index f22e49efba6b1..ed61c11522bec 100644 --- a/src/cmd/compile/internal/gc/init.go +++ b/src/cmd/compile/internal/gc/init.go @@ -7,6 +7,7 @@ package gc import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/obj" ) @@ -17,12 +18,8 @@ import ( // the name, normally "pkg.init", is altered to "pkg.init.0". var renameinitgen int -// Function collecting autotmps generated during typechecking, -// to be included in the package-level init function. -var initTodo = ir.NewFunc(base.Pos) - func renameinit() *types.Sym { - s := lookupN("init.", renameinitgen) + s := typecheck.LookupNum("init.", renameinitgen) renameinitgen++ return s } @@ -34,14 +31,14 @@ func renameinit() *types.Sym { // 2) Initialize all the variables that have initializers. // 3) Run any init functions. func fninit() *ir.Name { - nf := initOrder(Target.Decls) + nf := initOrder(typecheck.Target.Decls) var deps []*obj.LSym // initTask records for packages the current package depends on var fns []*obj.LSym // functions to call for package initialization // Find imported packages with init tasks. - for _, pkg := range Target.Imports { - n := resolve(ir.NewIdent(base.Pos, pkg.Lookup(".inittask"))) + for _, pkg := range typecheck.Target.Imports { + n := typecheck.Resolve(ir.NewIdent(base.Pos, pkg.Lookup(".inittask"))) if n.Op() == ir.ONONAME { continue } @@ -54,34 +51,34 @@ func fninit() *ir.Name { // Make a function that contains all the initialization statements. if len(nf) > 0 { base.Pos = nf[0].Pos() // prolog/epilog gets line number of first init stmt - initializers := lookup("init") - fn := dclfunc(initializers, ir.NewFuncType(base.Pos, nil, nil, nil)) - for _, dcl := range initTodo.Dcl { + initializers := typecheck.Lookup("init") + fn := typecheck.DeclFunc(initializers, ir.NewFuncType(base.Pos, nil, nil, nil)) + for _, dcl := range typecheck.InitTodoFunc.Dcl { dcl.Curfn = fn } - fn.Dcl = append(fn.Dcl, initTodo.Dcl...) - initTodo.Dcl = nil + fn.Dcl = append(fn.Dcl, typecheck.InitTodoFunc.Dcl...) + typecheck.InitTodoFunc.Dcl = nil fn.Body.Set(nf) - funcbody() + typecheck.FinishFuncBody() - typecheckFunc(fn) + typecheck.Func(fn) ir.CurFunc = fn - typecheckslice(nf, ctxStmt) + typecheck.Stmts(nf) ir.CurFunc = nil - Target.Decls = append(Target.Decls, fn) + typecheck.Target.Decls = append(typecheck.Target.Decls, fn) fns = append(fns, initializers.Linksym()) } - if initTodo.Dcl != nil { + if typecheck.InitTodoFunc.Dcl != nil { // We only generate temps using initTodo if there // are package-scope initialization statements, so // something's weird if we get here. base.Fatalf("initTodo still has declarations") } - initTodo = nil + typecheck.InitTodoFunc = nil // Record user init functions. - for _, fn := range Target.Inits { + for _, fn := range typecheck.Target.Inits { // Skip init functions with empty bodies. if len(fn.Body) == 1 { if stmt := fn.Body[0]; stmt.Op() == ir.OBLOCK && len(stmt.(*ir.BlockStmt).List) == 0 { @@ -96,8 +93,8 @@ func fninit() *ir.Name { } // Make an .inittask structure. - sym := lookup(".inittask") - task := NewName(sym) + sym := typecheck.Lookup(".inittask") + task := typecheck.NewName(sym) task.SetType(types.Types[types.TUINT8]) // fake type task.Class_ = ir.PEXTERN sym.Def = task diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index b9e19da43f4a1..9cf23caf0e80c 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -30,6 +30,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/logopt" + "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/src" @@ -54,7 +55,7 @@ const ( func InlinePackage() { // Find functions that can be inlined and clone them before walk expands them. - ir.VisitFuncsBottomUp(Target.Decls, func(list []*ir.Func, recursive bool) { + ir.VisitFuncsBottomUp(typecheck.Target.Decls, func(list []*ir.Func, recursive bool) { numfns := numNonClosures(list) for _, n := range list { if !recursive || numfns > 1 { @@ -72,63 +73,6 @@ func InlinePackage() { }) } -// Get the function's package. For ordinary functions it's on the ->sym, but for imported methods -// the ->sym can be re-used in the local package, so peel it off the receiver's type. -func fnpkg(fn *ir.Name) *types.Pkg { - if ir.IsMethod(fn) { - // method - rcvr := fn.Type().Recv().Type - - if rcvr.IsPtr() { - rcvr = rcvr.Elem() - } - if rcvr.Sym() == nil { - base.Fatalf("receiver with no sym: [%v] %L (%v)", fn.Sym(), fn, rcvr) - } - return rcvr.Sym().Pkg - } - - // non-method - return fn.Sym().Pkg -} - -// Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck -// because they're a copy of an already checked body. -func typecheckinl(fn *ir.Func) { - lno := ir.SetPos(fn.Nname) - - expandInline(fn) - - // typecheckinl is only for imported functions; - // their bodies may refer to unsafe as long as the package - // was marked safe during import (which was checked then). - // the ->inl of a local function has been typechecked before caninl copied it. - pkg := fnpkg(fn.Nname) - - if pkg == types.LocalPkg || pkg == nil { - return // typecheckinl on local function - } - - if base.Flag.LowerM > 2 || base.Debug.Export != 0 { - fmt.Printf("typecheck import [%v] %L { %v }\n", fn.Sym(), fn, ir.Nodes(fn.Inl.Body)) - } - - savefn := ir.CurFunc - ir.CurFunc = fn - typecheckslice(fn.Inl.Body, ctxStmt) - ir.CurFunc = savefn - - // During expandInline (which imports fn.Func.Inl.Body), - // declarations are added to fn.Func.Dcl by funcHdr(). Move them - // to fn.Func.Inl.Dcl for consistency with how local functions - // behave. (Append because typecheckinl may be called multiple - // times.) - fn.Inl.Dcl = append(fn.Inl.Dcl, fn.Dcl...) - fn.Dcl = nil - - base.Pos = lno -} - // Caninl determines whether fn is inlineable. // If so, caninl saves fn->nbody in fn->inl and substitutes it with a copy. // fn and ->nbody will already have been typechecked. @@ -270,7 +214,7 @@ func inlFlood(n *ir.Name, exportsym func(*ir.Name)) { } fn.SetExportInline(true) - typecheckinl(fn) + typecheck.ImportedBody(fn) // Recursively identify all referenced functions for // reexport. We want to include even non-called functions, @@ -601,7 +545,7 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No as.Rhs.Set(inlconv2list(as.Rhs[0].(*ir.InlinedCallExpr))) as.SetOp(ir.OAS2) as.SetTypecheck(0) - n = typecheck(as, ctxStmt) + n = typecheck.Stmt(as) } } @@ -768,7 +712,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b inlMap[fn] = false }() if base.Debug.TypecheckInl == 0 { - typecheckinl(fn) + typecheck.ImportedBody(fn) } // We have a function node, and it has an inlineable body. @@ -824,21 +768,21 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b } if v.Byval() { - iv := typecheck(inlvar(v), ctxExpr) + iv := typecheck.Expr(inlvar(v)) ninit.Append(ir.NewDecl(base.Pos, ir.ODCL, iv)) - ninit.Append(typecheck(ir.NewAssignStmt(base.Pos, iv, o), ctxStmt)) + ninit.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, iv, o))) inlvars[v] = iv } else { - addr := NewName(lookup("&" + v.Sym().Name)) + addr := typecheck.NewName(typecheck.Lookup("&" + v.Sym().Name)) addr.SetType(types.NewPtr(v.Type())) - ia := typecheck(inlvar(addr), ctxExpr) + ia := typecheck.Expr(inlvar(addr)) ninit.Append(ir.NewDecl(base.Pos, ir.ODCL, ia)) - ninit.Append(typecheck(ir.NewAssignStmt(base.Pos, ia, nodAddr(o)), ctxStmt)) + ninit.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, ia, typecheck.NodAddr(o)))) inlvars[addr] = ia // When capturing by reference, all occurrence of the captured var // must be substituted with dereference of the temporary address - inlvars[v] = typecheck(ir.NewStarExpr(base.Pos, ia), ctxExpr) + inlvars[v] = typecheck.Expr(ir.NewStarExpr(base.Pos, ia)) } } } @@ -857,7 +801,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b // nothing should have moved to the heap yet. base.Fatalf("impossible: %v", ln) } - inlf := typecheck(inlvar(ln), ctxExpr) + inlf := typecheck.Expr(inlvar(ln)) inlvars[ln] = inlf if base.Flag.GenDwarfInl > 0 { if ln.Class_ == ir.PPARAM { @@ -889,7 +833,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b if n := ir.AsNode(t.Nname); n != nil && !ir.IsBlank(n) && !strings.HasPrefix(n.Sym().Name, "~r") { n := n.(*ir.Name) m = inlvar(n) - m = typecheck(m, ctxExpr) + m = typecheck.Expr(m) inlvars[n] = m delayretvars = false // found a named result parameter } else { @@ -951,7 +895,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b vas = ir.NewAssignStmt(base.Pos, nil, nil) vas.X = inlParam(param, vas, inlvars) if len(varargs) == 0 { - vas.Y = nodnil() + vas.Y = typecheck.NodNil() vas.Y.SetType(param.Type) } else { lit := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(param.Type).(ir.Ntype), nil) @@ -961,11 +905,11 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b } if len(as.Rhs) != 0 { - ninit.Append(typecheck(as, ctxStmt)) + ninit.Append(typecheck.Stmt(as)) } if vas != nil { - ninit.Append(typecheck(vas, ctxStmt)) + ninit.Append(typecheck.Stmt(vas)) } if !delayretvars { @@ -973,11 +917,11 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b for _, n := range retvars { ninit.Append(ir.NewDecl(base.Pos, ir.ODCL, n)) ras := ir.NewAssignStmt(base.Pos, n, nil) - ninit.Append(typecheck(ras, ctxStmt)) + ninit.Append(typecheck.Stmt(ras)) } } - retlabel := autolabel(".i") + retlabel := typecheck.AutoLabel(".i") inlgen++ @@ -1021,7 +965,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b lab := ir.NewLabelStmt(base.Pos, retlabel) body = append(body, lab) - typecheckslice(body, ctxStmt) + typecheck.Stmts(body) if base.Flag.GenDwarfInl > 0 { for _, v := range inlfvars { @@ -1061,7 +1005,7 @@ func inlvar(var_ ir.Node) ir.Node { fmt.Printf("inlvar %+v\n", var_) } - n := NewName(var_.Sym()) + n := typecheck.NewName(var_.Sym()) n.SetType(var_.Type()) n.Class_ = ir.PAUTO n.SetUsed(true) @@ -1074,7 +1018,7 @@ func inlvar(var_ ir.Node) ir.Node { // Synthesize a variable to store the inlined function's results in. func retvar(t *types.Field, i int) ir.Node { - n := NewName(lookupN("~R", i)) + n := typecheck.NewName(typecheck.LookupNum("~R", i)) n.SetType(t.Type) n.Class_ = ir.PAUTO n.SetUsed(true) @@ -1086,7 +1030,7 @@ func retvar(t *types.Field, i int) ir.Node { // Synthesize a variable to store the inlined function's arguments // when they come from a multiple return call. func argvar(t *types.Type, i int) ir.Node { - n := NewName(lookupN("~arg", i)) + n := typecheck.NewName(typecheck.LookupNum("~arg", i)) n.SetType(t.Elem()) n.Class_ = ir.PAUTO n.SetUsed(true) @@ -1198,10 +1142,10 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { } } - init = append(init, typecheck(as, ctxStmt)) + init = append(init, typecheck.Stmt(as)) } init = append(init, ir.NewBranchStmt(base.Pos, ir.OGOTO, subst.retlabel)) - typecheckslice(init, ctxStmt) + typecheck.Stmts(init) return ir.NewBlockStmt(base.Pos, init) case ir.OGOTO: @@ -1210,7 +1154,7 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { m.SetPos(subst.updatedPos(m.Pos())) m.PtrInit().Set(nil) p := fmt.Sprintf("%s·%d", n.Label.Name, inlgen) - m.Label = lookup(p) + m.Label = typecheck.Lookup(p) return m case ir.OLABEL: @@ -1219,7 +1163,7 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { m.SetPos(subst.updatedPos(m.Pos())) m.PtrInit().Set(nil) p := fmt.Sprintf("%s·%d", n.Label.Name, inlgen) - m.Label = lookup(p) + m.Label = typecheck.Lookup(p) return m } @@ -1284,7 +1228,7 @@ func devirtualizeCall(call *ir.CallExpr) { dt := ir.NewTypeAssertExpr(sel.Pos(), sel.X, nil) dt.SetType(typ) - x := typecheck(ir.NewSelectorExpr(sel.Pos(), ir.OXDOT, dt, sel.Sel), ctxExpr|ctxCallee) + x := typecheck.Callee(ir.NewSelectorExpr(sel.Pos(), ir.OXDOT, dt, sel.Sel)) switch x.Op() { case ir.ODOTMETH: x := x.(*ir.SelectorExpr) diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 69ec5c8f2f2ca..b98d1f2e10b52 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -13,6 +13,7 @@ import ( "cmd/compile/internal/ir" "cmd/compile/internal/logopt" "cmd/compile/internal/ssa" + "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/bio" "cmd/internal/dwarf" @@ -49,9 +50,6 @@ func hidePanic() { } } -// Target is the package being compiled. -var Target *ir.Package - // Main parses flags and Go source files specified in the command-line // arguments, type-checks the parsed Go package, compiles functions to machine // code, and finally writes the compiled package definition to disk. @@ -197,18 +195,18 @@ func Main(archInit func(*Arch)) { return typenamesym(t).Linksym() } - Target = new(ir.Package) + typecheck.Target = new(ir.Package) - NeedFuncSym = makefuncsym - NeedITab = func(t, iface *types.Type) { itabname(t, iface) } - NeedRuntimeType = addsignat // TODO(rsc): typenamesym for lock? + typecheck.NeedFuncSym = makefuncsym + typecheck.NeedITab = func(t, iface *types.Type) { itabname(t, iface) } + typecheck.NeedRuntimeType = addsignat // TODO(rsc): typenamesym for lock? base.AutogeneratedPos = makePos(src.NewFileBase("", ""), 1, 0) types.TypeLinkSym = func(t *types.Type) *obj.LSym { return typenamesym(t).Linksym() } - TypecheckInit() + typecheck.Init() // Parse input. base.Timer.Start("fe", "parse") @@ -219,7 +217,7 @@ func Main(archInit func(*Arch)) { recordPackageName() // Typecheck. - TypecheckPackage() + typecheck.Package() // With all user code typechecked, it's now safe to verify unused dot imports. checkDotImports() @@ -227,7 +225,7 @@ func Main(archInit func(*Arch)) { // Build init task. if initTask := fninit(); initTask != nil { - exportsym(initTask) + typecheck.Export(initTask) } // Inlining @@ -237,7 +235,7 @@ func Main(archInit func(*Arch)) { } // Devirtualize. - for _, n := range Target.Decls { + for _, n := range typecheck.Target.Decls { if n.Op() == ir.ODCLFUNC { devirtualize(n.(*ir.Func)) } @@ -253,7 +251,7 @@ func Main(archInit func(*Arch)) { // Large values are also moved off stack in escape analysis; // because large values may contain pointers, it must happen early. base.Timer.Start("fe", "escapes") - escapes(Target.Decls) + escapes(typecheck.Target.Decls) // Collect information for go:nowritebarrierrec // checking. This must happen before transformclosure. @@ -267,7 +265,7 @@ func Main(archInit func(*Arch)) { // This needs to happen before walk, because closures must be transformed // before walk reaches a call of a closure. base.Timer.Start("fe", "xclosures") - for _, n := range Target.Decls { + for _, n := range typecheck.Target.Decls { if n.Op() == ir.ODCLFUNC { n := n.(*ir.Func) if n.OClosure != nil { @@ -292,8 +290,8 @@ func Main(archInit func(*Arch)) { // Don't use range--walk can add functions to Target.Decls. base.Timer.Start("be", "compilefuncs") fcount := int64(0) - for i := 0; i < len(Target.Decls); i++ { - n := Target.Decls[i] + for i := 0; i < len(typecheck.Target.Decls); i++ { + n := typecheck.Target.Decls[i] if n.Op() == ir.ODCLFUNC { funccompile(n.(*ir.Func)) fcount++ @@ -327,7 +325,7 @@ func Main(archInit func(*Arch)) { } CheckLargeStacks() - CheckFuncStack() + typecheck.CheckFuncStack() if len(compilequeue) != 0 { base.Fatalf("%d uncompiled functions", len(compilequeue)) @@ -363,7 +361,7 @@ func CheckLargeStacks() { func cgoSymABIs() { // The linker expects an ABI0 wrapper for all cgo-exported // functions. - for _, prag := range Target.CgoPragmas { + for _, prag := range typecheck.Target.CgoPragmas { switch prag[0] { case "cgo_export_static", "cgo_export_dynamic": if symabiRefs == nil { @@ -581,33 +579,6 @@ func findpkg(name string) (file string, ok bool) { return "", false } -// loadsys loads the definitions for the low-level runtime functions, -// so that the compiler can generate calls to them, -// but does not make them visible to user code. -func loadsys() { - types.Block = 1 - - inimport = true - typecheckok = true - - typs := runtimeTypes() - for _, d := range &runtimeDecls { - sym := ir.Pkgs.Runtime.Lookup(d.name) - typ := typs[d.typ] - switch d.tag { - case funcTag: - importfunc(ir.Pkgs.Runtime, src.NoXPos, sym, typ) - case varTag: - importvar(ir.Pkgs.Runtime, src.NoXPos, sym, typ) - default: - base.Fatalf("unhandled declaration tag %v", d.tag) - } - } - - typecheckok = false - inimport = false -} - // myheight tracks the local package's height based on packages // imported so far. var myheight int @@ -776,7 +747,7 @@ func importfile(f constant.Value) *types.Pkg { base.Errorf("import %s: unexpected package format byte: %v", file, c) base.ErrorExit() } - fingerprint = iimport(importpkg, imp) + fingerprint = typecheck.ReadImports(importpkg, imp) default: base.Errorf("no import in %q", path_) diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index c83b60dcd4c51..3e8703f0507d4 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -19,6 +19,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/syntax" + "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/objabi" "cmd/internal/src" @@ -160,7 +161,7 @@ type noder struct { func (p *noder) funcBody(fn *ir.Func, block *syntax.BlockStmt) { oldScope := p.scope p.scope = 0 - funchdr(fn) + typecheck.StartFuncBody(fn) if block != nil { body := p.stmts(block.List) @@ -173,7 +174,7 @@ func (p *noder) funcBody(fn *ir.Func, block *syntax.BlockStmt) { fn.Endlineno = base.Pos } - funcbody() + typecheck.FinishFuncBody() p.scope = oldScope } @@ -261,7 +262,7 @@ func (p *noder) node() { p.checkUnused(pragma) } - Target.Decls = append(Target.Decls, p.decls(p.file.DeclList)...) + typecheck.Target.Decls = append(typecheck.Target.Decls, p.decls(p.file.DeclList)...) base.Pos = src.NoXPos clearImports() @@ -273,7 +274,7 @@ func (p *noder) processPragmas() { p.errorAt(l.pos, "//go:linkname only allowed in Go files that import \"unsafe\"") continue } - n := ir.AsNode(lookup(l.local).Def) + n := ir.AsNode(typecheck.Lookup(l.local).Def) if n == nil || n.Op() != ir.ONAME { // TODO(mdempsky): Change to p.errorAt before Go 1.17 release. // base.WarnfAt(p.makeXPos(l.pos), "//go:linkname must refer to declared function or variable (will be an error in Go 1.17)") @@ -285,7 +286,7 @@ func (p *noder) processPragmas() { } n.Sym().Linkname = l.remote } - Target.CgoPragmas = append(Target.CgoPragmas, p.pragcgobuf...) + typecheck.Target.CgoPragmas = append(typecheck.Target.CgoPragmas, p.pragcgobuf...) } func (p *noder) decls(decls []syntax.Decl) (l []ir.Node) { @@ -342,7 +343,7 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) { } if !ipkg.Direct { - Target.Imports = append(Target.Imports, ipkg) + typecheck.Target.Imports = append(typecheck.Target.Imports, ipkg) } ipkg.Direct = true @@ -350,7 +351,7 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) { if imp.LocalPkgName != nil { my = p.name(imp.LocalPkgName) } else { - my = lookup(ipkg.Name) + my = typecheck.Lookup(ipkg.Name) } pack := ir.NewPkgName(p.pos(imp), my, ipkg) @@ -366,7 +367,7 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) { return } if my.Def != nil { - redeclare(pack.Pos(), my, "as imported package name") + typecheck.Redeclared(pack.Pos(), my, "as imported package name") } my.Def = pack my.Lastlineno = pack.Pos() @@ -401,7 +402,7 @@ func (p *noder) varDecl(decl *syntax.VarDecl) []ir.Node { } p.setlineno(decl) - return variter(names, typ, exprs) + return typecheck.DeclVars(names, typ, exprs) } // constState tracks state between constant specifiers within a @@ -449,7 +450,7 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []ir.Node { if decl.Values == nil { v = ir.DeepCopy(n.Pos(), v) } - declare(n, dclcontext) + typecheck.Declare(n, typecheck.DeclContext) n.Ntype = typ n.Defn = v @@ -469,7 +470,7 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []ir.Node { func (p *noder) typeDecl(decl *syntax.TypeDecl) ir.Node { n := p.declName(ir.OTYPE, decl.Name) - declare(n, dclcontext) + typecheck.Declare(n, typecheck.DeclContext) // decl.Type may be nil but in that case we got a syntax error during parsing typ := p.typeExprOrNil(decl.Type) @@ -514,7 +515,7 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) ir.Node { if len(t.Params) > 0 || len(t.Results) > 0 { base.ErrorfAt(f.Pos(), "func init must have no arguments and no return values") } - Target.Inits = append(Target.Inits, f) + typecheck.Target.Inits = append(typecheck.Target.Inits, f) } if types.LocalPkg.Name == "main" && name.Name == "main" { @@ -541,7 +542,7 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) ir.Node { } if fun.Recv == nil { - declare(f.Nname, ir.PFUNC) + typecheck.Declare(f.Nname, ir.PFUNC) } p.funcBody(f, fun.Body) @@ -704,7 +705,7 @@ func (p *noder) expr(expr syntax.Expr) ir.Node { pos, op := p.pos(expr), p.unOp(expr.Op) switch op { case ir.OADDR: - return nodAddrAt(pos, x) + return typecheck.NodAddrAt(pos, x) case ir.ODEREF: return ir.NewStarExpr(pos, x) } @@ -950,7 +951,7 @@ func (p *noder) embedded(typ syntax.Expr) *ir.Field { } sym := p.packname(typ) - n := ir.NewField(p.pos(typ), lookup(sym.Name), importName(sym).(ir.Ntype), nil) + n := ir.NewField(p.pos(typ), typecheck.Lookup(sym.Name), importName(sym).(ir.Ntype), nil) n.Embedded = true if isStar { @@ -1136,8 +1137,8 @@ func (p *noder) assignList(expr syntax.Expr, defn ir.Node, colas bool) []ir.Node } newOrErr = true - n := NewName(sym) - declare(n, dclcontext) + n := typecheck.NewName(sym) + typecheck.Declare(n, typecheck.DeclContext) n.Defn = defn defn.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, n)) res[i] = n @@ -1245,8 +1246,8 @@ func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.TypeSwitch n.List.Set(p.exprList(clause.Cases)) } if tswitch != nil && tswitch.Tag != nil { - nn := NewName(tswitch.Tag.Sym()) - declare(nn, dclcontext) + nn := typecheck.NewName(tswitch.Tag.Sym()) + typecheck.Declare(nn, typecheck.DeclContext) n.Vars = []ir.Node{nn} // keep track of the instances for reporting unused nn.Defn = tswitch @@ -1466,7 +1467,7 @@ var tokenForLitKind = [...]token.Token{ } func (p *noder) name(name *syntax.Name) *types.Sym { - return lookup(name.Value) + return typecheck.Lookup(name.Value) } func (p *noder) mkname(name *syntax.Name) ir.Node { diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 372277552f841..1b4ba50e6bb28 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -7,6 +7,7 @@ package gc import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/bio" "cmd/internal/obj" @@ -117,14 +118,14 @@ func dumpCompilerObj(bout *bio.Writer) { } func dumpdata() { - numExterns := len(Target.Externs) - numDecls := len(Target.Decls) + numExterns := len(typecheck.Target.Externs) + numDecls := len(typecheck.Target.Decls) - dumpglobls(Target.Externs) + dumpglobls(typecheck.Target.Externs) dumpfuncsyms() addptabs() - numExports := len(Target.Exports) - addsignats(Target.Externs) + numExports := len(typecheck.Target.Exports) + addsignats(typecheck.Target.Externs) dumpsignats() dumptabs() numPTabs, numITabs := CountTabs() @@ -140,22 +141,22 @@ func dumpdata() { // In the typical case, we loop 0 or 1 times. // It was not until issue 24761 that we found any code that required a loop at all. for { - for i := numDecls; i < len(Target.Decls); i++ { - n := Target.Decls[i] + for i := numDecls; i < len(typecheck.Target.Decls); i++ { + n := typecheck.Target.Decls[i] if n.Op() == ir.ODCLFUNC { funccompile(n.(*ir.Func)) } } - numDecls = len(Target.Decls) + numDecls = len(typecheck.Target.Decls) compileFunctions() dumpsignats() - if numDecls == len(Target.Decls) { + if numDecls == len(typecheck.Target.Decls) { break } } // Dump extra globals. - dumpglobls(Target.Externs[numExterns:]) + dumpglobls(typecheck.Target.Externs[numExterns:]) if zerosize > 0 { zero := ir.Pkgs.Map.Lookup("zero") @@ -164,7 +165,7 @@ func dumpdata() { addGCLocals() - if numExports != len(Target.Exports) { + if numExports != len(typecheck.Target.Exports) { base.Fatalf("Target.Exports changed after compile functions loop") } newNumPTabs, newNumITabs := CountTabs() @@ -179,11 +180,11 @@ func dumpdata() { func dumpLinkerObj(bout *bio.Writer) { printObjHeader(bout) - if len(Target.CgoPragmas) != 0 { + if len(typecheck.Target.CgoPragmas) != 0 { // write empty export section; must be before cgo section fmt.Fprintf(bout, "\n$$\n\n$$\n\n") fmt.Fprintf(bout, "\n$$ // cgo\n") - if err := json.NewEncoder(bout).Encode(Target.CgoPragmas); err != nil { + if err := json.NewEncoder(bout).Encode(typecheck.Target.CgoPragmas); err != nil { base.Fatalf("serializing pragcgobuf: %v", err) } fmt.Fprintf(bout, "\n$$\n\n") @@ -198,7 +199,7 @@ func addptabs() { if !base.Ctxt.Flag_dynlink || types.LocalPkg.Name != "main" { return } - for _, exportn := range Target.Exports { + for _, exportn := range typecheck.Target.Exports { s := exportn.Sym() nn := ir.AsNode(s.Def) if nn == nil { @@ -474,7 +475,7 @@ func slicedata(pos src.XPos, s string) *ir.Name { slicedataGen++ symname := fmt.Sprintf(".gobytes.%d", slicedataGen) sym := types.LocalPkg.Lookup(symname) - symnode := NewName(sym) + symnode := typecheck.NewName(sym) sym.Def = symnode lsym := sym.Linksym() diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 3d35094a5892a..075bcea92cf17 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -7,6 +7,7 @@ package gc import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/src" "fmt" @@ -63,7 +64,7 @@ func order(fn *ir.Func) { // append typechecks stmt and appends it to out. func (o *Order) append(stmt ir.Node) { - o.out = append(o.out, typecheck(stmt, ctxStmt)) + o.out = append(o.out, typecheck.Stmt(stmt)) } // newTemp allocates a new temporary with the given type, @@ -85,7 +86,7 @@ func (o *Order) newTemp(t *types.Type, clear bool) *ir.Name { } } if v == nil { - v = temp(t) + v = typecheck.Temp(t) } if clear { o.append(ir.NewAssignStmt(base.Pos, v, nil)) @@ -142,7 +143,7 @@ func (o *Order) cheapExpr(n ir.Node) ir.Node { } a := ir.SepCopy(n).(*ir.UnaryExpr) a.X = l - return typecheck(a, ctxExpr) + return typecheck.Expr(a) } return o.copyExpr(n) @@ -168,7 +169,7 @@ func (o *Order) safeExpr(n ir.Node) ir.Node { } a := ir.SepCopy(n).(*ir.UnaryExpr) a.X = l - return typecheck(a, ctxExpr) + return typecheck.Expr(a) case ir.ODOT: n := n.(*ir.SelectorExpr) @@ -178,7 +179,7 @@ func (o *Order) safeExpr(n ir.Node) ir.Node { } a := ir.SepCopy(n).(*ir.SelectorExpr) a.X = l - return typecheck(a, ctxExpr) + return typecheck.Expr(a) case ir.ODOTPTR: n := n.(*ir.SelectorExpr) @@ -188,7 +189,7 @@ func (o *Order) safeExpr(n ir.Node) ir.Node { } a := ir.SepCopy(n).(*ir.SelectorExpr) a.X = l - return typecheck(a, ctxExpr) + return typecheck.Expr(a) case ir.ODEREF: n := n.(*ir.StarExpr) @@ -198,7 +199,7 @@ func (o *Order) safeExpr(n ir.Node) ir.Node { } a := ir.SepCopy(n).(*ir.StarExpr) a.X = l - return typecheck(a, ctxExpr) + return typecheck.Expr(a) case ir.OINDEX, ir.OINDEXMAP: n := n.(*ir.IndexExpr) @@ -215,7 +216,7 @@ func (o *Order) safeExpr(n ir.Node) ir.Node { a := ir.SepCopy(n).(*ir.IndexExpr) a.X = l a.Index = r - return typecheck(a, ctxExpr) + return typecheck.Expr(a) default: base.Fatalf("order.safeExpr %v", n.Op()) @@ -241,7 +242,7 @@ func isaddrokay(n ir.Node) bool { func (o *Order) addrTemp(n ir.Node) ir.Node { if n.Op() == ir.OLITERAL || n.Op() == ir.ONIL { // TODO: expand this to all static composite literal nodes? - n = defaultlit(n, nil) + n = typecheck.DefaultLit(n, nil) types.CalcSize(n.Type()) vstat := readonlystaticname(n.Type()) var s InitSchedule @@ -249,7 +250,7 @@ func (o *Order) addrTemp(n ir.Node) ir.Node { if s.out != nil { base.Fatalf("staticassign of const generated code: %+v", n) } - vstat = typecheck(vstat, ctxExpr).(*ir.Name) + vstat = typecheck.Expr(vstat).(*ir.Name) return vstat } if isaddrokay(n) { @@ -336,7 +337,7 @@ func (o *Order) cleanTempNoPop(mark ordermarker) []ir.Node { var out []ir.Node for i := len(o.temp) - 1; i >= int(mark); i-- { n := o.temp[i] - out = append(out, typecheck(ir.NewUnaryExpr(base.Pos, ir.OVARKILL, n), ctxStmt)) + out = append(out, typecheck.Stmt(ir.NewUnaryExpr(base.Pos, ir.OVARKILL, n))) } return out } @@ -388,7 +389,7 @@ func orderMakeSliceCopy(s []ir.Node) { mk.Cap = cp.Y // Set bounded when m = OMAKESLICE([]T, len(s)); OCOPY(m, s) mk.SetBounded(mk.Len.Op() == ir.OLEN && ir.SameSafeExpr(mk.Len.(*ir.UnaryExpr).X, cp.Y)) - as.Y = typecheck(mk, ctxExpr) + as.Y = typecheck.Expr(mk) s[1] = nil // remove separate copy call } @@ -495,7 +496,7 @@ func (o *Order) call(nn ir.Node) { } n := nn.(*ir.CallExpr) - fixVariadicCall(n) + typecheck.FixVariadicCall(n) n.X = o.expr(n.X, nil) o.exprList(n.Args) @@ -513,7 +514,7 @@ func (o *Order) call(nn ir.Node) { x := o.copyExpr(arg.X) arg.X = x x.Name().SetAddrtaken(true) // ensure SSA keeps the x variable - n.Body.Append(typecheck(ir.NewUnaryExpr(base.Pos, ir.OVARLIVE, x), ctxStmt)) + n.Body.Append(typecheck.Stmt(ir.NewUnaryExpr(base.Pos, ir.OVARLIVE, x))) } } } @@ -584,7 +585,7 @@ func (o *Order) mapAssign(n ir.Node) { t := o.newTemp(m.Type(), false) n.Lhs[i] = t a := ir.NewAssignStmt(base.Pos, m, t) - post = append(post, typecheck(a, ctxStmt)) + post = append(post, typecheck.Stmt(a)) } } @@ -653,8 +654,8 @@ func (o *Order) stmt(n ir.Node) { l2.Assigned = false } l2 = o.copyExpr(l2) - r := o.expr(typecheck(ir.NewBinaryExpr(n.Pos(), n.AsOp, l2, n.Y), ctxExpr), nil) - as := typecheck(ir.NewAssignStmt(n.Pos(), l1, r), ctxStmt) + r := o.expr(typecheck.Expr(ir.NewBinaryExpr(n.Pos(), n.AsOp, l2, n.Y)), nil) + as := typecheck.Stmt(ir.NewAssignStmt(n.Pos(), l1, r)) o.mapAssign(as) o.cleanTemp(t) return @@ -858,7 +859,7 @@ func (o *Order) stmt(n ir.Node) { if r.Type().IsString() && r.Type() != types.Types[types.TSTRING] { r = ir.NewConvExpr(base.Pos, ir.OCONV, nil, r) r.SetType(types.Types[types.TSTRING]) - r = typecheck(r, ctxExpr) + r = typecheck.Expr(r) } n.X = o.copyExpr(r) @@ -949,11 +950,11 @@ func (o *Order) stmt(n ir.Node) { if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].(*ir.Decl).X == n { init = init[1:] } - dcl := typecheck(ir.NewDecl(base.Pos, ir.ODCL, n), ctxStmt) + dcl := typecheck.Stmt(ir.NewDecl(base.Pos, ir.ODCL, n)) ncas.PtrInit().Append(dcl) } tmp := o.newTemp(t, t.HasPointers()) - as := typecheck(ir.NewAssignStmt(base.Pos, n, conv(tmp, n.Type())), ctxStmt) + as := typecheck.Stmt(ir.NewAssignStmt(base.Pos, n, typecheck.Conv(tmp, n.Type()))) ncas.PtrInit().Append(as) r.Lhs[i] = tmp } @@ -1217,7 +1218,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { // Evaluate left-hand side. lhs := o.expr(n.X, nil) - o.out = append(o.out, typecheck(ir.NewAssignStmt(base.Pos, r, lhs), ctxStmt)) + o.out = append(o.out, typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, lhs))) // Evaluate right-hand side, save generated code. saveout := o.out @@ -1225,7 +1226,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { t := o.markTemp() o.edge() rhs := o.expr(n.Y, nil) - o.out = append(o.out, typecheck(ir.NewAssignStmt(base.Pos, r, rhs), ctxStmt)) + o.out = append(o.out, typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, rhs))) o.cleanTemp(t) gen := o.out o.out = saveout @@ -1307,7 +1308,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { case ir.OCLOSURE: n := n.(*ir.ClosureExpr) if n.Transient() && len(n.Func.ClosureVars) > 0 { - n.Prealloc = o.newTemp(closureType(n), false) + n.Prealloc = o.newTemp(typecheck.ClosureType(n), false) } return n @@ -1315,7 +1316,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { n := n.(*ir.CallPartExpr) n.X = o.expr(n.X, nil) if n.Transient() { - t := partialCallType(n) + t := typecheck.PartialCallType(n) n.Prealloc = o.newTemp(t, false) } return n @@ -1415,13 +1416,13 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { // Emit the creation of the map (with all its static entries). m := o.newTemp(n.Type(), false) as := ir.NewAssignStmt(base.Pos, m, n) - typecheck(as, ctxStmt) + typecheck.Stmt(as) o.stmt(as) // Emit eval+insert of dynamic entries, one at a time. for _, r := range dynamics { as := ir.NewAssignStmt(base.Pos, ir.NewIndexExpr(base.Pos, m, r.Key), r.Value) - typecheck(as, ctxStmt) // Note: this converts the OINDEX to an OINDEXMAP + typecheck.Stmt(as) // Note: this converts the OINDEX to an OINDEXMAP o.stmt(as) } return m @@ -1455,7 +1456,7 @@ func (o *Order) as2(n *ir.AssignListStmt) { as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) as.Lhs.Set(left) as.Rhs.Set(tmplist) - o.stmt(typecheck(as, ctxStmt)) + o.stmt(typecheck.Stmt(as)) } // okAs2 orders OAS2XXX with ok. @@ -1475,12 +1476,12 @@ func (o *Order) okAs2(n *ir.AssignListStmt) { if tmp1 != nil { r := ir.NewAssignStmt(base.Pos, n.Lhs[0], tmp1) - o.mapAssign(typecheck(r, ctxStmt)) + o.mapAssign(typecheck.Stmt(r)) n.Lhs[0] = tmp1 } if tmp2 != nil { - r := ir.NewAssignStmt(base.Pos, n.Lhs[1], conv(tmp2, n.Lhs[1].Type())) - o.mapAssign(typecheck(r, ctxStmt)) + r := ir.NewAssignStmt(base.Pos, n.Lhs[1], typecheck.Conv(tmp2, n.Lhs[1].Type())) + o.mapAssign(typecheck.Stmt(r)) n.Lhs[1] = tmp2 } } diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index 337556ea41502..c0f3326454e6b 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -8,6 +8,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/ssa" + "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/dwarf" "cmd/internal/obj" @@ -146,7 +147,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { } if f.Config.NeedsFpScratch && scratchUsed { - s.scratchFpMem = tempAt(src.NoXPos, s.curfn, types.Types[types.TUINT64]) + s.scratchFpMem = typecheck.TempAt(src.NoXPos, s.curfn, types.Types[types.TUINT64]) } sort.Sort(byStackVar(fn.Dcl)) @@ -214,11 +215,11 @@ func funccompile(fn *ir.Func) { return } - dclcontext = ir.PAUTO + typecheck.DeclContext = ir.PAUTO ir.CurFunc = fn compile(fn) ir.CurFunc = nil - dclcontext = ir.PEXTERN + typecheck.DeclContext = ir.PEXTERN } func compile(fn *ir.Func) { diff --git a/src/cmd/compile/internal/gc/pgen_test.go b/src/cmd/compile/internal/gc/pgen_test.go index 1170db26818b6..95c4b24fa1eee 100644 --- a/src/cmd/compile/internal/gc/pgen_test.go +++ b/src/cmd/compile/internal/gc/pgen_test.go @@ -6,6 +6,7 @@ package gc import ( "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/src" "reflect" @@ -41,7 +42,7 @@ func TestCmpstackvar(t *testing.T) { if s == nil { s = &types.Sym{Name: "."} } - n := NewName(s) + n := typecheck.NewName(s) n.SetType(t) n.SetFrameOffset(xoffset) n.Class_ = cl @@ -156,7 +157,7 @@ func TestCmpstackvar(t *testing.T) { func TestStackvarSort(t *testing.T) { nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) *ir.Name { - n := NewName(s) + n := typecheck.NewName(s) n.SetType(t) n.SetFrameOffset(xoffset) n.Class_ = cl diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go index a9447189c21e2..c040811932be2 100644 --- a/src/cmd/compile/internal/gc/range.go +++ b/src/cmd/compile/internal/gc/range.go @@ -7,136 +7,12 @@ package gc import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/sys" "unicode/utf8" ) -// range -func typecheckrange(n *ir.RangeStmt) { - // Typechecking order is important here: - // 0. first typecheck range expression (slice/map/chan), - // it is evaluated only once and so logically it is not part of the loop. - // 1. typecheck produced values, - // this part can declare new vars and so it must be typechecked before body, - // because body can contain a closure that captures the vars. - // 2. decldepth++ to denote loop body. - // 3. typecheck body. - // 4. decldepth--. - typecheckrangeExpr(n) - - // second half of dance, the first half being typecheckrangeExpr - n.SetTypecheck(1) - ls := n.Vars - for i1, n1 := range ls { - if n1.Typecheck() == 0 { - ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign) - } - } - - decldepth++ - typecheckslice(n.Body, ctxStmt) - decldepth-- -} - -func typecheckrangeExpr(n *ir.RangeStmt) { - n.X = typecheck(n.X, ctxExpr) - - t := n.X.Type() - if t == nil { - return - } - // delicate little dance. see typecheckas2 - ls := n.Vars - for i1, n1 := range ls { - if !ir.DeclaredBy(n1, n) { - ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign) - } - } - - if t.IsPtr() && t.Elem().IsArray() { - t = t.Elem() - } - n.SetType(t) - - var t1, t2 *types.Type - toomany := false - switch t.Kind() { - default: - base.ErrorfAt(n.Pos(), "cannot range over %L", n.X) - return - - case types.TARRAY, types.TSLICE: - t1 = types.Types[types.TINT] - t2 = t.Elem() - - case types.TMAP: - t1 = t.Key() - t2 = t.Elem() - - case types.TCHAN: - if !t.ChanDir().CanRecv() { - base.ErrorfAt(n.Pos(), "invalid operation: range %v (receive from send-only type %v)", n.X, n.X.Type()) - return - } - - t1 = t.Elem() - t2 = nil - if len(n.Vars) == 2 { - toomany = true - } - - case types.TSTRING: - t1 = types.Types[types.TINT] - t2 = types.RuneType - } - - if len(n.Vars) > 2 || toomany { - base.ErrorfAt(n.Pos(), "too many variables in range") - } - - var v1, v2 ir.Node - if len(n.Vars) != 0 { - v1 = n.Vars[0] - } - if len(n.Vars) > 1 { - v2 = n.Vars[1] - } - - // this is not only an optimization but also a requirement in the spec. - // "if the second iteration variable is the blank identifier, the range - // clause is equivalent to the same clause with only the first variable - // present." - if ir.IsBlank(v2) { - if v1 != nil { - n.Vars = []ir.Node{v1} - } - v2 = nil - } - - if v1 != nil { - if ir.DeclaredBy(v1, n) { - v1.SetType(t1) - } else if v1.Type() != nil { - if op, why := assignop(t1, v1.Type()); op == ir.OXXX { - base.ErrorfAt(n.Pos(), "cannot assign type %v to %L in range%s", t1, v1, why) - } - } - checkassign(n, v1) - } - - if v2 != nil { - if ir.DeclaredBy(v2, n) { - v2.SetType(t2) - } else if v2.Type() != nil { - if op, why := assignop(t2, v2.Type()); op == ir.OXXX { - base.ErrorfAt(n.Pos(), "cannot assign type %v to %L in range%s", t2, v2, why) - } - } - checkassign(n, v2) - } -} - func cheapComputableIndex(width int64) bool { switch thearch.LinkArch.Family { // MIPS does not have R+R addressing @@ -221,8 +97,8 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { // order.stmt arranged for a copy of the array/slice variable if needed. ha := a - hv1 := temp(types.Types[types.TINT]) - hn := temp(types.Types[types.TINT]) + hv1 := typecheck.Temp(types.Types[types.TINT]) + hn := typecheck.Temp(types.Types[types.TINT]) init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil)) init = append(init, ir.NewAssignStmt(base.Pos, hn, ir.NewUnaryExpr(base.Pos, ir.OLEN, ha))) @@ -271,10 +147,10 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { ifGuard.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn) nfor.SetOp(ir.OFORUNTIL) - hp := temp(types.NewPtr(nrange.Type().Elem())) + hp := typecheck.Temp(types.NewPtr(nrange.Type().Elem())) tmp := ir.NewIndexExpr(base.Pos, ha, ir.NewInt(0)) tmp.SetBounded(true) - init = append(init, ir.NewAssignStmt(base.Pos, hp, nodAddr(tmp))) + init = append(init, ir.NewAssignStmt(base.Pos, hp, typecheck.NodAddr(tmp))) // Use OAS2 to correctly handle assignments // of the form "v1, a[v1] := range". @@ -289,7 +165,7 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { // advancing the pointer is safe and won't go past the // end of the allocation. as := ir.NewAssignStmt(base.Pos, hp, addptr(hp, t.Elem().Width)) - nfor.Late = []ir.Node{typecheck(as, ctxStmt)} + nfor.Late = []ir.Node{typecheck.Stmt(as)} case types.TMAP: // order.stmt allocated the iterator for us. @@ -301,15 +177,15 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { keysym := th.Field(0).Sym // depends on layout of iterator struct. See reflect.go:hiter elemsym := th.Field(1).Sym // ditto - fn := syslook("mapiterinit") + fn := typecheck.LookupRuntime("mapiterinit") - fn = substArgTypes(fn, t.Key(), t.Elem(), th) - init = append(init, mkcall1(fn, nil, nil, typename(t), ha, nodAddr(hit))) - nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym), nodnil()) + fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), th) + init = append(init, mkcall1(fn, nil, nil, typename(t), ha, typecheck.NodAddr(hit))) + nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym), typecheck.NodNil()) - fn = syslook("mapiternext") - fn = substArgTypes(fn, th) - nfor.Post = mkcall1(fn, nil, nil, nodAddr(hit)) + fn = typecheck.LookupRuntime("mapiternext") + fn = typecheck.SubstArgTypes(fn, th) + nfor.Post = mkcall1(fn, nil, nil, typecheck.NodAddr(hit)) key := ir.NewStarExpr(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym)) if v1 == nil { @@ -328,12 +204,12 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { // order.stmt arranged for a copy of the channel variable. ha := a - hv1 := temp(t.Elem()) + hv1 := typecheck.Temp(t.Elem()) hv1.SetTypecheck(1) if t.Elem().HasPointers() { init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil)) } - hb := temp(types.Types[types.TBOOL]) + hb := typecheck.Temp(types.Types[types.TBOOL]) nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, hb, ir.NewBool(false)) a := ir.NewAssignListStmt(base.Pos, ir.OAS2RECV, nil, nil) @@ -370,9 +246,9 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { // order.stmt arranged for a copy of the string variable. ha := a - hv1 := temp(types.Types[types.TINT]) - hv1t := temp(types.Types[types.TINT]) - hv2 := temp(types.RuneType) + hv1 := typecheck.Temp(types.Types[types.TINT]) + hv1t := typecheck.Temp(types.Types[types.TINT]) + hv2 := typecheck.Temp(types.RuneType) // hv1 := 0 init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil)) @@ -388,7 +264,7 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { // hv2 := rune(ha[hv1]) nind := ir.NewIndexExpr(base.Pos, ha, hv1) nind.SetBounded(true) - body = append(body, ir.NewAssignStmt(base.Pos, hv2, conv(nind, types.RuneType))) + body = append(body, ir.NewAssignStmt(base.Pos, hv2, typecheck.Conv(nind, types.RuneType))) // if hv2 < utf8.RuneSelf nif := ir.NewIfStmt(base.Pos, nil, nil, nil) @@ -403,7 +279,7 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { // hv2, hv1 = decoderune(ha, hv1) eif.Lhs = []ir.Node{hv2, hv1} - fn := syslook("decoderune") + fn := typecheck.LookupRuntime("decoderune") eif.Rhs = []ir.Node{mkcall1(fn, fn.Type().Results(), nil, ha, hv1)} body = append(body, nif) @@ -422,21 +298,21 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { } } - typecheckslice(init, ctxStmt) + typecheck.Stmts(init) if ifGuard != nil { ifGuard.PtrInit().Append(init...) - ifGuard = typecheck(ifGuard, ctxStmt).(*ir.IfStmt) + ifGuard = typecheck.Stmt(ifGuard).(*ir.IfStmt) } else { nfor.PtrInit().Append(init...) } - typecheckslice(nfor.Cond.Init(), ctxStmt) + typecheck.Stmts(nfor.Cond.Init()) - nfor.Cond = typecheck(nfor.Cond, ctxExpr) - nfor.Cond = defaultlit(nfor.Cond, nil) - nfor.Post = typecheck(nfor.Post, ctxStmt) - typecheckslice(body, ctxStmt) + nfor.Cond = typecheck.Expr(nfor.Cond) + nfor.Cond = typecheck.DefaultLit(nfor.Cond, nil) + nfor.Post = typecheck.Stmt(nfor.Post) + typecheck.Stmts(body) nfor.Body.Append(body...) nfor.Body.Append(nrange.Body...) @@ -505,10 +381,10 @@ func mapClear(m ir.Node) ir.Node { t := m.Type() // instantiate mapclear(typ *type, hmap map[any]any) - fn := syslook("mapclear") - fn = substArgTypes(fn, t.Key(), t.Elem()) + fn := typecheck.LookupRuntime("mapclear") + fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem()) n := mkcall1(fn, nil, nil, typename(t), m) - return walkstmt(typecheck(n, ctxStmt)) + return walkstmt(typecheck.Stmt(n)) } // Lower n into runtime·memclr if possible, for @@ -566,16 +442,16 @@ func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node { n.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(0)) // hp = &a[0] - hp := temp(types.Types[types.TUNSAFEPTR]) + hp := typecheck.Temp(types.Types[types.TUNSAFEPTR]) ix := ir.NewIndexExpr(base.Pos, a, ir.NewInt(0)) ix.SetBounded(true) - addr := convnop(nodAddr(ix), types.Types[types.TUNSAFEPTR]) + addr := typecheck.ConvNop(typecheck.NodAddr(ix), types.Types[types.TUNSAFEPTR]) n.Body.Append(ir.NewAssignStmt(base.Pos, hp, addr)) // hn = len(a) * sizeof(elem(a)) - hn := temp(types.Types[types.TUINTPTR]) - mul := conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(elemsize)), types.Types[types.TUINTPTR]) + hn := typecheck.Temp(types.Types[types.TUINTPTR]) + mul := typecheck.Conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(elemsize)), types.Types[types.TUINTPTR]) n.Body.Append(ir.NewAssignStmt(base.Pos, hn, mul)) var fn ir.Node @@ -595,9 +471,9 @@ func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node { n.Body.Append(v1) - n.Cond = typecheck(n.Cond, ctxExpr) - n.Cond = defaultlit(n.Cond, nil) - typecheckslice(n.Body, ctxStmt) + n.Cond = typecheck.Expr(n.Cond) + n.Cond = typecheck.DefaultLit(n.Cond, nil) + typecheck.Stmts(n.Body) return walkstmt(n) } diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 987b2d6ee2852..7594884f9f35e 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -7,6 +7,7 @@ package gc import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/gcprog" "cmd/internal/obj" @@ -339,36 +340,6 @@ func deferstruct(stksize int64) *types.Type { return s } -// f is method type, with receiver. -// return function type, receiver as first argument (or not). -func methodfunc(f *types.Type, receiver *types.Type) *types.Type { - inLen := f.Params().Fields().Len() - if receiver != nil { - inLen++ - } - in := make([]*ir.Field, 0, inLen) - - if receiver != nil { - d := ir.NewField(base.Pos, nil, nil, receiver) - in = append(in, d) - } - - for _, t := range f.Params().Fields().Slice() { - d := ir.NewField(base.Pos, nil, nil, t.Type) - d.IsDDD = t.IsDDD() - in = append(in, d) - } - - outLen := f.Results().Fields().Len() - out := make([]*ir.Field, 0, outLen) - for _, t := range f.Results().Fields().Slice() { - d := ir.NewField(base.Pos, nil, nil, t.Type) - out = append(out, d) - } - - return functype(nil, in, out) -} - // methods returns the methods of the non-interface type t, sorted by name. // Generates stub functions as needed. func methods(t *types.Type) []*Sig { @@ -378,7 +349,7 @@ func methods(t *types.Type) []*Sig { if mt == nil { return nil } - expandmeth(mt) + typecheck.CalcMethods(mt) // type stored in interface word it := t @@ -418,8 +389,8 @@ func methods(t *types.Type) []*Sig { name: method, isym: ir.MethodSym(it, method), tsym: ir.MethodSym(t, method), - type_: methodfunc(f.Type, t), - mtype: methodfunc(f.Type, nil), + type_: typecheck.NewMethodType(f.Type, t), + mtype: typecheck.NewMethodType(f.Type, nil), } ms = append(ms, sig) @@ -463,7 +434,7 @@ func imethods(t *types.Type) []*Sig { sig := &Sig{ name: f.Sym, mtype: f.Type, - type_: methodfunc(f.Type, nil), + type_: typecheck.NewMethodType(f.Type, nil), } methods = append(methods, sig) @@ -916,7 +887,7 @@ func typename(t *types.Type) *ir.AddrExpr { s.Def = n } - n := nodAddr(ir.AsNode(s.Def)) + n := typecheck.NodAddr(ir.AsNode(s.Def)) n.SetType(types.NewPtr(s.Def.Type())) n.SetTypecheck(1) return n @@ -928,7 +899,7 @@ func itabname(t, itype *types.Type) *ir.AddrExpr { } s := ir.Pkgs.Itab.Lookup(t.ShortString() + "," + itype.ShortString()) if s.Def == nil { - n := NewName(s) + n := typecheck.NewName(s) n.SetType(types.Types[types.TUINT8]) n.Class_ = ir.PEXTERN n.SetTypecheck(1) @@ -936,7 +907,7 @@ func itabname(t, itype *types.Type) *ir.AddrExpr { itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: s.Linksym()}) } - n := nodAddr(ir.AsNode(s.Def)) + n := typecheck.NodAddr(ir.AsNode(s.Def)) n.SetType(types.NewPtr(s.Def.Type())) n.SetTypecheck(1) return n @@ -1033,7 +1004,7 @@ func dtypesym(t *types.Type) *obj.LSym { if base.Ctxt.Pkgpath != "runtime" || (tbase != types.Types[tbase.Kind()] && tbase != types.ByteType && tbase != types.RuneType && tbase != types.ErrorType) { // int, float, etc // named types from other files are defined only by those files if tbase.Sym() != nil && tbase.Sym().Pkg != types.LocalPkg { - if i := BaseTypeIndex(t); i >= 0 { + if i := typecheck.BaseTypeIndex(t); i >= 0 { lsym.Pkg = tbase.Sym().Pkg.Prefix lsym.SymIdx = int32(i) lsym.Set(obj.AttrIndexed, true) @@ -1492,7 +1463,7 @@ func dumpbasictypes() { // The latter is the type of an auto-generated wrapper. dtypesym(types.NewPtr(types.ErrorType)) - dtypesym(functype(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, types.ErrorType)}, []*ir.Field{ir.NewField(base.Pos, nil, nil, types.Types[types.TSTRING])})) + dtypesym(typecheck.NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, types.ErrorType)}, []*ir.Field{ir.NewField(base.Pos, nil, nil, types.Types[types.TSTRING])})) // add paths for runtime and main, which 6l imports implicitly. dimportpath(ir.Pkgs.Runtime) @@ -1744,13 +1715,13 @@ func zeroaddr(size int64) ir.Node { } s := ir.Pkgs.Map.Lookup("zero") if s.Def == nil { - x := NewName(s) + x := typecheck.NewName(s) x.SetType(types.Types[types.TUINT8]) x.Class_ = ir.PEXTERN x.SetTypecheck(1) s.Def = x } - z := nodAddr(ir.AsNode(s.Def)) + z := typecheck.NodAddr(ir.AsNode(s.Def)) z.SetType(types.NewPtr(types.Types[types.TUINT8])) z.SetTypecheck(1) return z diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go index 67a2cfd312d99..51bb1e5355ba6 100644 --- a/src/cmd/compile/internal/gc/select.go +++ b/src/cmd/compile/internal/gc/select.go @@ -7,92 +7,10 @@ package gc import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" "cmd/compile/internal/types" ) -// select -func typecheckselect(sel *ir.SelectStmt) { - var def ir.Node - lno := ir.SetPos(sel) - typecheckslice(sel.Init(), ctxStmt) - for _, ncase := range sel.Cases { - ncase := ncase.(*ir.CaseStmt) - - if len(ncase.List) == 0 { - // default - if def != nil { - base.ErrorfAt(ncase.Pos(), "multiple defaults in select (first at %v)", ir.Line(def)) - } else { - def = ncase - } - } else if len(ncase.List) > 1 { - base.ErrorfAt(ncase.Pos(), "select cases cannot be lists") - } else { - ncase.List[0] = typecheck(ncase.List[0], ctxStmt) - n := ncase.List[0] - ncase.Comm = n - ncase.List.Set(nil) - oselrecv2 := func(dst, recv ir.Node, colas bool) { - n := ir.NewAssignListStmt(n.Pos(), ir.OSELRECV2, nil, nil) - n.Lhs = []ir.Node{dst, ir.BlankNode} - n.Rhs = []ir.Node{recv} - n.Def = colas - n.SetTypecheck(1) - ncase.Comm = n - } - switch n.Op() { - default: - pos := n.Pos() - if n.Op() == ir.ONAME { - // We don't have the right position for ONAME nodes (see #15459 and - // others). Using ncase.Pos for now as it will provide the correct - // line number (assuming the expression follows the "case" keyword - // on the same line). This matches the approach before 1.10. - pos = ncase.Pos() - } - base.ErrorfAt(pos, "select case must be receive, send or assign recv") - - case ir.OAS: - // convert x = <-c into x, _ = <-c - // remove implicit conversions; the eventual assignment - // will reintroduce them. - n := n.(*ir.AssignStmt) - if r := n.Y; r.Op() == ir.OCONVNOP || r.Op() == ir.OCONVIFACE { - r := r.(*ir.ConvExpr) - if r.Implicit() { - n.Y = r.X - } - } - if n.Y.Op() != ir.ORECV { - base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side") - break - } - oselrecv2(n.X, n.Y, n.Def) - - case ir.OAS2RECV: - n := n.(*ir.AssignListStmt) - if n.Rhs[0].Op() != ir.ORECV { - base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side") - break - } - n.SetOp(ir.OSELRECV2) - - case ir.ORECV: - // convert <-c into _, _ = <-c - n := n.(*ir.UnaryExpr) - oselrecv2(ir.BlankNode, n, false) - - case ir.OSEND: - break - } - } - - typecheckslice(ncase.Body, ctxStmt) - } - - base.Pos = lno -} - func walkselect(sel *ir.SelectStmt) { lno := ir.SetPos(sel) if len(sel.Compiled) != 0 { @@ -167,14 +85,14 @@ func walkselectcases(cases ir.Nodes) []ir.Node { switch n.Op() { case ir.OSEND: n := n.(*ir.SendStmt) - n.Value = nodAddr(n.Value) - n.Value = typecheck(n.Value, ctxExpr) + n.Value = typecheck.NodAddr(n.Value) + n.Value = typecheck.Expr(n.Value) case ir.OSELRECV2: n := n.(*ir.AssignListStmt) if !ir.IsBlank(n.Lhs[0]) { - n.Lhs[0] = nodAddr(n.Lhs[0]) - n.Lhs[0] = typecheck(n.Lhs[0], ctxExpr) + n.Lhs[0] = typecheck.NodAddr(n.Lhs[0]) + n.Lhs[0] = typecheck.Expr(n.Lhs[0]) } } } @@ -207,7 +125,7 @@ func walkselectcases(cases ir.Nodes) []ir.Node { ch := recv.X elem := n.Lhs[0] if ir.IsBlank(elem) { - elem = nodnil() + elem = typecheck.NodNil() } if ir.IsBlank(n.Lhs[1]) { // if selectnbrecv(&v, c) { body } else { default body } @@ -215,12 +133,12 @@ func walkselectcases(cases ir.Nodes) []ir.Node { } else { // TODO(cuonglm): make this use selectnbrecv() // if selectnbrecv2(&v, &received, c) { body } else { default body } - receivedp := typecheck(nodAddr(n.Lhs[1]), ctxExpr) + receivedp := typecheck.Expr(typecheck.NodAddr(n.Lhs[1])) call = mkcall1(chanfn("selectnbrecv2", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, receivedp, ch) } } - r.Cond = typecheck(call, ctxExpr) + r.Cond = typecheck.Expr(call) r.Body.Set(cas.Body) r.Else.Set(append(dflt.Init(), dflt.Body...)) return []ir.Node{r, ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)} @@ -236,18 +154,18 @@ func walkselectcases(cases ir.Nodes) []ir.Node { // generate sel-struct base.Pos = sellineno - selv := temp(types.NewArray(scasetype(), int64(ncas))) - init = append(init, typecheck(ir.NewAssignStmt(base.Pos, selv, nil), ctxStmt)) + selv := typecheck.Temp(types.NewArray(scasetype(), int64(ncas))) + init = append(init, typecheck.Stmt(ir.NewAssignStmt(base.Pos, selv, nil))) // No initialization for order; runtime.selectgo is responsible for that. - order := temp(types.NewArray(types.Types[types.TUINT16], 2*int64(ncas))) + order := typecheck.Temp(types.NewArray(types.Types[types.TUINT16], 2*int64(ncas))) var pc0, pcs ir.Node if base.Flag.Race { - pcs = temp(types.NewArray(types.Types[types.TUINTPTR], int64(ncas))) - pc0 = typecheck(nodAddr(ir.NewIndexExpr(base.Pos, pcs, ir.NewInt(0))), ctxExpr) + pcs = typecheck.Temp(types.NewArray(types.Types[types.TUINTPTR], int64(ncas))) + pc0 = typecheck.Expr(typecheck.NodAddr(ir.NewIndexExpr(base.Pos, pcs, ir.NewInt(0)))) } else { - pc0 = nodnil() + pc0 = typecheck.NodNil() } // register cases @@ -286,21 +204,21 @@ func walkselectcases(cases ir.Nodes) []ir.Node { casorder[i] = cas setField := func(f string, val ir.Node) { - r := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, ir.NewIndexExpr(base.Pos, selv, ir.NewInt(int64(i))), lookup(f)), val) - init = append(init, typecheck(r, ctxStmt)) + r := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, ir.NewIndexExpr(base.Pos, selv, ir.NewInt(int64(i))), typecheck.Lookup(f)), val) + init = append(init, typecheck.Stmt(r)) } - c = convnop(c, types.Types[types.TUNSAFEPTR]) + c = typecheck.ConvNop(c, types.Types[types.TUNSAFEPTR]) setField("c", c) if !ir.IsBlank(elem) { - elem = convnop(elem, types.Types[types.TUNSAFEPTR]) + elem = typecheck.ConvNop(elem, types.Types[types.TUNSAFEPTR]) setField("elem", elem) } // TODO(mdempsky): There should be a cleaner way to // handle this. if base.Flag.Race { - r := mkcall("selectsetpc", nil, nil, nodAddr(ir.NewIndexExpr(base.Pos, pcs, ir.NewInt(int64(i))))) + r := mkcall("selectsetpc", nil, nil, typecheck.NodAddr(ir.NewIndexExpr(base.Pos, pcs, ir.NewInt(int64(i))))) init = append(init, r) } } @@ -310,13 +228,13 @@ func walkselectcases(cases ir.Nodes) []ir.Node { // run the select base.Pos = sellineno - chosen := temp(types.Types[types.TINT]) - recvOK := temp(types.Types[types.TBOOL]) + chosen := typecheck.Temp(types.Types[types.TINT]) + recvOK := typecheck.Temp(types.Types[types.TBOOL]) r := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) r.Lhs = []ir.Node{chosen, recvOK} - fn := syslook("selectgo") + fn := typecheck.LookupRuntime("selectgo") r.Rhs = []ir.Node{mkcall1(fn, fn.Type().Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, ir.NewInt(int64(nsends)), ir.NewInt(int64(nrecvs)), ir.NewBool(dflt == nil))} - init = append(init, typecheck(r, ctxStmt)) + init = append(init, typecheck.Stmt(r)) // selv and order are no longer alive after selectgo. init = append(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, selv)) @@ -327,8 +245,8 @@ func walkselectcases(cases ir.Nodes) []ir.Node { // dispatch cases dispatch := func(cond ir.Node, cas *ir.CaseStmt) { - cond = typecheck(cond, ctxExpr) - cond = defaultlit(cond, nil) + cond = typecheck.Expr(cond) + cond = typecheck.DefaultLit(cond, nil) r := ir.NewIfStmt(base.Pos, cond, nil, nil) @@ -336,7 +254,7 @@ func walkselectcases(cases ir.Nodes) []ir.Node { n := n.(*ir.AssignListStmt) if !ir.IsBlank(n.Lhs[1]) { x := ir.NewAssignStmt(base.Pos, n.Lhs[1], recvOK) - r.Body.Append(typecheck(x, ctxStmt)) + r.Body.Append(typecheck.Stmt(x)) } } @@ -359,9 +277,9 @@ func walkselectcases(cases ir.Nodes) []ir.Node { // bytePtrToIndex returns a Node representing "(*byte)(&n[i])". func bytePtrToIndex(n ir.Node, i int64) ir.Node { - s := nodAddr(ir.NewIndexExpr(base.Pos, n, ir.NewInt(i))) + s := typecheck.NodAddr(ir.NewIndexExpr(base.Pos, n, ir.NewInt(i))) t := types.NewPtr(types.Types[types.TUINT8]) - return convnop(s, t) + return typecheck.ConvNop(s, t) } var scase *types.Type @@ -369,9 +287,9 @@ var scase *types.Type // Keep in sync with src/runtime/select.go. func scasetype() *types.Type { if scase == nil { - scase = tostruct([]*ir.Field{ - ir.NewField(base.Pos, lookup("c"), nil, types.Types[types.TUNSAFEPTR]), - ir.NewField(base.Pos, lookup("elem"), nil, types.Types[types.TUNSAFEPTR]), + scase = typecheck.NewStructType([]*ir.Field{ + ir.NewField(base.Pos, typecheck.Lookup("c"), nil, types.Types[types.TUNSAFEPTR]), + ir.NewField(base.Pos, typecheck.Lookup("elem"), nil, types.Types[types.TUNSAFEPTR]), }) scase.SetNoalg(true) } diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index e9a4590043021..26591ad5abf29 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -7,6 +7,7 @@ package gc import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/obj" "fmt" @@ -112,7 +113,7 @@ func (s *InitSchedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *type if loff != 0 || !types.Identical(typ, l.Type()) { dst = ir.NewNameOffsetExpr(base.Pos, l, loff, typ) } - s.append(ir.NewAssignStmt(base.Pos, dst, conv(r, typ))) + s.append(ir.NewAssignStmt(base.Pos, dst, typecheck.Conv(r, typ))) return true case ir.ONIL: @@ -387,9 +388,9 @@ var statuniqgen int // name generator for static temps // Use readonlystaticname for read-only node. func staticname(t *types.Type) *ir.Name { // Don't use lookupN; it interns the resulting string, but these are all unique. - n := NewName(lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen))) + n := typecheck.NewName(typecheck.Lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen))) statuniqgen++ - declare(n, ir.PEXTERN) + typecheck.Declare(n, ir.PEXTERN) n.SetType(t) n.Sym().Linksym().Set(obj.AttrLocal, true) return n @@ -541,7 +542,7 @@ func fixedlit(ctxt initContext, kind initKind, n *ir.CompLitExpr, var_ ir.Node, splitnode = func(r ir.Node) (ir.Node, ir.Node) { if r.Op() == ir.OKEY { kv := r.(*ir.KeyExpr) - k = indexconst(kv.Key) + k = typecheck.IndexConst(kv.Key) if k < 0 { base.Fatalf("fixedlit: invalid index %v", kv.Key) } @@ -596,7 +597,7 @@ func fixedlit(ctxt initContext, kind initKind, n *ir.CompLitExpr, var_ ir.Node, // build list of assignments: var[index] = expr ir.SetPos(a) as := ir.NewAssignStmt(base.Pos, a, value) - as = typecheck(as, ctxStmt).(*ir.AssignStmt) + as = typecheck.Stmt(as).(*ir.AssignStmt) switch kind { case initKindStatic: genAsStatic(as) @@ -632,7 +633,7 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) fixedlit(ctxt, initKindDynamic, n, vstat, init) // copy static to slice - var_ = typecheck(var_, ctxExpr|ctxAssign) + var_ = typecheck.AssignExpr(var_) name, offset, ok := stataddr(var_) if !ok || name.Class_ != ir.PEXTERN { base.Fatalf("slicelit: %v", var_) @@ -675,7 +676,7 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) } // make new auto *array (3 declare) - vauto := temp(types.NewPtr(t)) + vauto := typecheck.Temp(types.NewPtr(t)) // set auto to point at new temp or heap (3 assign) var a ir.Node @@ -687,7 +688,7 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) if vstat == nil { a = ir.NewAssignStmt(base.Pos, x, nil) - a = typecheck(a, ctxStmt) + a = typecheck.Stmt(a) init.Append(a) // zero new temp } else { // Declare that we're about to initialize all of x. @@ -695,19 +696,19 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) init.Append(ir.NewUnaryExpr(base.Pos, ir.OVARDEF, x)) } - a = nodAddr(x) + a = typecheck.NodAddr(x) } else if n.Esc() == ir.EscNone { - a = temp(t) + a = typecheck.Temp(t) if vstat == nil { - a = ir.NewAssignStmt(base.Pos, temp(t), nil) - a = typecheck(a, ctxStmt) + a = ir.NewAssignStmt(base.Pos, typecheck.Temp(t), nil) + a = typecheck.Stmt(a) init.Append(a) // zero new temp a = a.(*ir.AssignStmt).X } else { init.Append(ir.NewUnaryExpr(base.Pos, ir.OVARDEF, a)) } - a = nodAddr(a) + a = typecheck.NodAddr(a) } else { a = ir.NewUnaryExpr(base.Pos, ir.ONEW, ir.TypeNode(t)) } @@ -724,7 +725,7 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) for _, value := range n.List { if value.Op() == ir.OKEY { kv := value.(*ir.KeyExpr) - index = indexconst(kv.Key) + index = typecheck.IndexConst(kv.Key) if index < 0 { base.Fatalf("slicelit: invalid index %v", kv.Key) } @@ -758,7 +759,7 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) // build list of vauto[c] = expr ir.SetPos(value) - as := typecheck(ir.NewAssignStmt(base.Pos, a, value), ctxStmt) + as := typecheck.Stmt(ir.NewAssignStmt(base.Pos, a, value)) as = orderStmtInPlace(as, map[string][]*ir.Name{}) as = walkstmt(as) init.Append(as) @@ -767,7 +768,7 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) // make slice out of heap (6) a = ir.NewAssignStmt(base.Pos, var_, ir.NewSliceExpr(base.Pos, ir.OSLICE, vauto)) - a = typecheck(a, ctxStmt) + a = typecheck.Stmt(a) a = orderStmtInPlace(a, map[string][]*ir.Name{}) a = walkstmt(a) init.Append(a) @@ -822,7 +823,7 @@ func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) { // for i = 0; i < len(vstatk); i++ { // map[vstatk[i]] = vstate[i] // } - i := temp(types.Types[types.TINT]) + i := typecheck.Temp(types.Types[types.TINT]) rhs := ir.NewIndexExpr(base.Pos, vstate, i) rhs.SetBounded(true) @@ -847,8 +848,8 @@ func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) { // Build list of var[c] = expr. // Use temporaries so that mapassign1 can have addressable key, elem. // TODO(josharian): avoid map key temporaries for mapfast_* assignments with literal keys. - tmpkey := temp(m.Type().Key()) - tmpelem := temp(m.Type().Elem()) + tmpkey := typecheck.Temp(m.Type().Key()) + tmpelem := typecheck.Temp(m.Type().Elem()) for _, r := range entries { r := r.(*ir.KeyExpr) @@ -892,7 +893,7 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { if n.Alloc != nil { // n.Right is stack temporary used as backing store. appendWalkStmt(init, ir.NewAssignStmt(base.Pos, n.Alloc, nil)) // zero backing store, just in case (#18410) - r = nodAddr(n.Alloc) + r = typecheck.NodAddr(n.Alloc) } else { r = ir.NewUnaryExpr(base.Pos, ir.ONEW, ir.TypeNode(n.X.Type())) r.SetEsc(n.Esc()) @@ -900,7 +901,7 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, r)) var_ = ir.NewStarExpr(base.Pos, var_) - var_ = typecheck(var_, ctxExpr|ctxAssign) + var_ = typecheck.AssignExpr(var_) anylit(n.X, var_, init) case ir.OSTRUCTLIT, ir.OARRAYLIT: @@ -1060,7 +1061,7 @@ func (s *InitSchedule) initplan(n ir.Node) { for _, a := range n.List { if a.Op() == ir.OKEY { kv := a.(*ir.KeyExpr) - k = indexconst(kv.Key) + k = typecheck.IndexConst(kv.Key) if k < 0 { base.Fatalf("initplan arraylit: invalid index %v", kv.Key) } diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 21925a0d651fe..382e4d4320026 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -19,6 +19,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/ssa" + "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/x86" @@ -91,119 +92,119 @@ func initssaconfig() { ssaCaches = make([]ssa.Cache, base.Flag.LowerC) // Set up some runtime functions we'll need to call. - ir.Syms.AssertE2I = sysfunc("assertE2I") - ir.Syms.AssertE2I2 = sysfunc("assertE2I2") - ir.Syms.AssertI2I = sysfunc("assertI2I") - ir.Syms.AssertI2I2 = sysfunc("assertI2I2") - ir.Syms.Deferproc = sysfunc("deferproc") - ir.Syms.DeferprocStack = sysfunc("deferprocStack") - ir.Syms.Deferreturn = sysfunc("deferreturn") - ir.Syms.Duffcopy = sysfunc("duffcopy") - ir.Syms.Duffzero = sysfunc("duffzero") - ir.Syms.GCWriteBarrier = sysfunc("gcWriteBarrier") - ir.Syms.Goschedguarded = sysfunc("goschedguarded") - ir.Syms.Growslice = sysfunc("growslice") - ir.Syms.Msanread = sysfunc("msanread") - ir.Syms.Msanwrite = sysfunc("msanwrite") - ir.Syms.Msanmove = sysfunc("msanmove") - ir.Syms.Newobject = sysfunc("newobject") - ir.Syms.Newproc = sysfunc("newproc") - ir.Syms.Panicdivide = sysfunc("panicdivide") - ir.Syms.PanicdottypeE = sysfunc("panicdottypeE") - ir.Syms.PanicdottypeI = sysfunc("panicdottypeI") - ir.Syms.Panicnildottype = sysfunc("panicnildottype") - ir.Syms.Panicoverflow = sysfunc("panicoverflow") - ir.Syms.Panicshift = sysfunc("panicshift") - ir.Syms.Raceread = sysfunc("raceread") - ir.Syms.Racereadrange = sysfunc("racereadrange") - ir.Syms.Racewrite = sysfunc("racewrite") - ir.Syms.Racewriterange = sysfunc("racewriterange") - ir.Syms.X86HasPOPCNT = sysvar("x86HasPOPCNT") // bool - ir.Syms.X86HasSSE41 = sysvar("x86HasSSE41") // bool - ir.Syms.X86HasFMA = sysvar("x86HasFMA") // bool - ir.Syms.ARMHasVFPv4 = sysvar("armHasVFPv4") // bool - ir.Syms.ARM64HasATOMICS = sysvar("arm64HasATOMICS") // bool - ir.Syms.Typedmemclr = sysfunc("typedmemclr") - ir.Syms.Typedmemmove = sysfunc("typedmemmove") - ir.Syms.Udiv = sysvar("udiv") // asm func with special ABI - ir.Syms.WriteBarrier = sysvar("writeBarrier") // struct { bool; ... } - ir.Syms.Zerobase = sysvar("zerobase") + ir.Syms.AssertE2I = typecheck.LookupRuntimeFunc("assertE2I") + ir.Syms.AssertE2I2 = typecheck.LookupRuntimeFunc("assertE2I2") + ir.Syms.AssertI2I = typecheck.LookupRuntimeFunc("assertI2I") + ir.Syms.AssertI2I2 = typecheck.LookupRuntimeFunc("assertI2I2") + ir.Syms.Deferproc = typecheck.LookupRuntimeFunc("deferproc") + ir.Syms.DeferprocStack = typecheck.LookupRuntimeFunc("deferprocStack") + ir.Syms.Deferreturn = typecheck.LookupRuntimeFunc("deferreturn") + ir.Syms.Duffcopy = typecheck.LookupRuntimeFunc("duffcopy") + ir.Syms.Duffzero = typecheck.LookupRuntimeFunc("duffzero") + ir.Syms.GCWriteBarrier = typecheck.LookupRuntimeFunc("gcWriteBarrier") + ir.Syms.Goschedguarded = typecheck.LookupRuntimeFunc("goschedguarded") + ir.Syms.Growslice = typecheck.LookupRuntimeFunc("growslice") + ir.Syms.Msanread = typecheck.LookupRuntimeFunc("msanread") + ir.Syms.Msanwrite = typecheck.LookupRuntimeFunc("msanwrite") + ir.Syms.Msanmove = typecheck.LookupRuntimeFunc("msanmove") + ir.Syms.Newobject = typecheck.LookupRuntimeFunc("newobject") + ir.Syms.Newproc = typecheck.LookupRuntimeFunc("newproc") + ir.Syms.Panicdivide = typecheck.LookupRuntimeFunc("panicdivide") + ir.Syms.PanicdottypeE = typecheck.LookupRuntimeFunc("panicdottypeE") + ir.Syms.PanicdottypeI = typecheck.LookupRuntimeFunc("panicdottypeI") + ir.Syms.Panicnildottype = typecheck.LookupRuntimeFunc("panicnildottype") + ir.Syms.Panicoverflow = typecheck.LookupRuntimeFunc("panicoverflow") + ir.Syms.Panicshift = typecheck.LookupRuntimeFunc("panicshift") + ir.Syms.Raceread = typecheck.LookupRuntimeFunc("raceread") + ir.Syms.Racereadrange = typecheck.LookupRuntimeFunc("racereadrange") + ir.Syms.Racewrite = typecheck.LookupRuntimeFunc("racewrite") + ir.Syms.Racewriterange = typecheck.LookupRuntimeFunc("racewriterange") + ir.Syms.X86HasPOPCNT = typecheck.LookupRuntimeVar("x86HasPOPCNT") // bool + ir.Syms.X86HasSSE41 = typecheck.LookupRuntimeVar("x86HasSSE41") // bool + ir.Syms.X86HasFMA = typecheck.LookupRuntimeVar("x86HasFMA") // bool + ir.Syms.ARMHasVFPv4 = typecheck.LookupRuntimeVar("armHasVFPv4") // bool + ir.Syms.ARM64HasATOMICS = typecheck.LookupRuntimeVar("arm64HasATOMICS") // bool + ir.Syms.Typedmemclr = typecheck.LookupRuntimeFunc("typedmemclr") + ir.Syms.Typedmemmove = typecheck.LookupRuntimeFunc("typedmemmove") + ir.Syms.Udiv = typecheck.LookupRuntimeVar("udiv") // asm func with special ABI + ir.Syms.WriteBarrier = typecheck.LookupRuntimeVar("writeBarrier") // struct { bool; ... } + ir.Syms.Zerobase = typecheck.LookupRuntimeVar("zerobase") // asm funcs with special ABI if thearch.LinkArch.Name == "amd64" { GCWriteBarrierReg = map[int16]*obj.LSym{ - x86.REG_AX: sysfunc("gcWriteBarrier"), - x86.REG_CX: sysfunc("gcWriteBarrierCX"), - x86.REG_DX: sysfunc("gcWriteBarrierDX"), - x86.REG_BX: sysfunc("gcWriteBarrierBX"), - x86.REG_BP: sysfunc("gcWriteBarrierBP"), - x86.REG_SI: sysfunc("gcWriteBarrierSI"), - x86.REG_R8: sysfunc("gcWriteBarrierR8"), - x86.REG_R9: sysfunc("gcWriteBarrierR9"), + x86.REG_AX: typecheck.LookupRuntimeFunc("gcWriteBarrier"), + x86.REG_CX: typecheck.LookupRuntimeFunc("gcWriteBarrierCX"), + x86.REG_DX: typecheck.LookupRuntimeFunc("gcWriteBarrierDX"), + x86.REG_BX: typecheck.LookupRuntimeFunc("gcWriteBarrierBX"), + x86.REG_BP: typecheck.LookupRuntimeFunc("gcWriteBarrierBP"), + x86.REG_SI: typecheck.LookupRuntimeFunc("gcWriteBarrierSI"), + x86.REG_R8: typecheck.LookupRuntimeFunc("gcWriteBarrierR8"), + x86.REG_R9: typecheck.LookupRuntimeFunc("gcWriteBarrierR9"), } } if thearch.LinkArch.Family == sys.Wasm { - BoundsCheckFunc[ssa.BoundsIndex] = sysfunc("goPanicIndex") - BoundsCheckFunc[ssa.BoundsIndexU] = sysfunc("goPanicIndexU") - BoundsCheckFunc[ssa.BoundsSliceAlen] = sysfunc("goPanicSliceAlen") - BoundsCheckFunc[ssa.BoundsSliceAlenU] = sysfunc("goPanicSliceAlenU") - BoundsCheckFunc[ssa.BoundsSliceAcap] = sysfunc("goPanicSliceAcap") - BoundsCheckFunc[ssa.BoundsSliceAcapU] = sysfunc("goPanicSliceAcapU") - BoundsCheckFunc[ssa.BoundsSliceB] = sysfunc("goPanicSliceB") - BoundsCheckFunc[ssa.BoundsSliceBU] = sysfunc("goPanicSliceBU") - BoundsCheckFunc[ssa.BoundsSlice3Alen] = sysfunc("goPanicSlice3Alen") - BoundsCheckFunc[ssa.BoundsSlice3AlenU] = sysfunc("goPanicSlice3AlenU") - BoundsCheckFunc[ssa.BoundsSlice3Acap] = sysfunc("goPanicSlice3Acap") - BoundsCheckFunc[ssa.BoundsSlice3AcapU] = sysfunc("goPanicSlice3AcapU") - BoundsCheckFunc[ssa.BoundsSlice3B] = sysfunc("goPanicSlice3B") - BoundsCheckFunc[ssa.BoundsSlice3BU] = sysfunc("goPanicSlice3BU") - BoundsCheckFunc[ssa.BoundsSlice3C] = sysfunc("goPanicSlice3C") - BoundsCheckFunc[ssa.BoundsSlice3CU] = sysfunc("goPanicSlice3CU") + BoundsCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeFunc("goPanicIndex") + BoundsCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeFunc("goPanicIndexU") + BoundsCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeFunc("goPanicSliceAlen") + BoundsCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeFunc("goPanicSliceAlenU") + BoundsCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeFunc("goPanicSliceAcap") + BoundsCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeFunc("goPanicSliceAcapU") + BoundsCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeFunc("goPanicSliceB") + BoundsCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeFunc("goPanicSliceBU") + BoundsCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeFunc("goPanicSlice3Alen") + BoundsCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeFunc("goPanicSlice3AlenU") + BoundsCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeFunc("goPanicSlice3Acap") + BoundsCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeFunc("goPanicSlice3AcapU") + BoundsCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeFunc("goPanicSlice3B") + BoundsCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeFunc("goPanicSlice3BU") + BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("goPanicSlice3C") + BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("goPanicSlice3CU") } else { - BoundsCheckFunc[ssa.BoundsIndex] = sysfunc("panicIndex") - BoundsCheckFunc[ssa.BoundsIndexU] = sysfunc("panicIndexU") - BoundsCheckFunc[ssa.BoundsSliceAlen] = sysfunc("panicSliceAlen") - BoundsCheckFunc[ssa.BoundsSliceAlenU] = sysfunc("panicSliceAlenU") - BoundsCheckFunc[ssa.BoundsSliceAcap] = sysfunc("panicSliceAcap") - BoundsCheckFunc[ssa.BoundsSliceAcapU] = sysfunc("panicSliceAcapU") - BoundsCheckFunc[ssa.BoundsSliceB] = sysfunc("panicSliceB") - BoundsCheckFunc[ssa.BoundsSliceBU] = sysfunc("panicSliceBU") - BoundsCheckFunc[ssa.BoundsSlice3Alen] = sysfunc("panicSlice3Alen") - BoundsCheckFunc[ssa.BoundsSlice3AlenU] = sysfunc("panicSlice3AlenU") - BoundsCheckFunc[ssa.BoundsSlice3Acap] = sysfunc("panicSlice3Acap") - BoundsCheckFunc[ssa.BoundsSlice3AcapU] = sysfunc("panicSlice3AcapU") - BoundsCheckFunc[ssa.BoundsSlice3B] = sysfunc("panicSlice3B") - BoundsCheckFunc[ssa.BoundsSlice3BU] = sysfunc("panicSlice3BU") - BoundsCheckFunc[ssa.BoundsSlice3C] = sysfunc("panicSlice3C") - BoundsCheckFunc[ssa.BoundsSlice3CU] = sysfunc("panicSlice3CU") + BoundsCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeFunc("panicIndex") + BoundsCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeFunc("panicIndexU") + BoundsCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeFunc("panicSliceAlen") + BoundsCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeFunc("panicSliceAlenU") + BoundsCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeFunc("panicSliceAcap") + BoundsCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeFunc("panicSliceAcapU") + BoundsCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeFunc("panicSliceB") + BoundsCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeFunc("panicSliceBU") + BoundsCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeFunc("panicSlice3Alen") + BoundsCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeFunc("panicSlice3AlenU") + BoundsCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeFunc("panicSlice3Acap") + BoundsCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeFunc("panicSlice3AcapU") + BoundsCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeFunc("panicSlice3B") + BoundsCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeFunc("panicSlice3BU") + BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("panicSlice3C") + BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("panicSlice3CU") } if thearch.LinkArch.PtrSize == 4 { - ExtendCheckFunc[ssa.BoundsIndex] = sysvar("panicExtendIndex") - ExtendCheckFunc[ssa.BoundsIndexU] = sysvar("panicExtendIndexU") - ExtendCheckFunc[ssa.BoundsSliceAlen] = sysvar("panicExtendSliceAlen") - ExtendCheckFunc[ssa.BoundsSliceAlenU] = sysvar("panicExtendSliceAlenU") - ExtendCheckFunc[ssa.BoundsSliceAcap] = sysvar("panicExtendSliceAcap") - ExtendCheckFunc[ssa.BoundsSliceAcapU] = sysvar("panicExtendSliceAcapU") - ExtendCheckFunc[ssa.BoundsSliceB] = sysvar("panicExtendSliceB") - ExtendCheckFunc[ssa.BoundsSliceBU] = sysvar("panicExtendSliceBU") - ExtendCheckFunc[ssa.BoundsSlice3Alen] = sysvar("panicExtendSlice3Alen") - ExtendCheckFunc[ssa.BoundsSlice3AlenU] = sysvar("panicExtendSlice3AlenU") - ExtendCheckFunc[ssa.BoundsSlice3Acap] = sysvar("panicExtendSlice3Acap") - ExtendCheckFunc[ssa.BoundsSlice3AcapU] = sysvar("panicExtendSlice3AcapU") - ExtendCheckFunc[ssa.BoundsSlice3B] = sysvar("panicExtendSlice3B") - ExtendCheckFunc[ssa.BoundsSlice3BU] = sysvar("panicExtendSlice3BU") - ExtendCheckFunc[ssa.BoundsSlice3C] = sysvar("panicExtendSlice3C") - ExtendCheckFunc[ssa.BoundsSlice3CU] = sysvar("panicExtendSlice3CU") + ExtendCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeVar("panicExtendIndex") + ExtendCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeVar("panicExtendIndexU") + ExtendCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeVar("panicExtendSliceAlen") + ExtendCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeVar("panicExtendSliceAlenU") + ExtendCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeVar("panicExtendSliceAcap") + ExtendCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeVar("panicExtendSliceAcapU") + ExtendCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeVar("panicExtendSliceB") + ExtendCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeVar("panicExtendSliceBU") + ExtendCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeVar("panicExtendSlice3Alen") + ExtendCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeVar("panicExtendSlice3AlenU") + ExtendCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeVar("panicExtendSlice3Acap") + ExtendCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeVar("panicExtendSlice3AcapU") + ExtendCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeVar("panicExtendSlice3B") + ExtendCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeVar("panicExtendSlice3BU") + ExtendCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeVar("panicExtendSlice3C") + ExtendCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeVar("panicExtendSlice3CU") } // Wasm (all asm funcs with special ABIs) - ir.Syms.WasmMove = sysvar("wasmMove") - ir.Syms.WasmZero = sysvar("wasmZero") - ir.Syms.WasmDiv = sysvar("wasmDiv") - ir.Syms.WasmTruncS = sysvar("wasmTruncS") - ir.Syms.WasmTruncU = sysvar("wasmTruncU") - ir.Syms.SigPanic = sysfunc("sigpanic") + ir.Syms.WasmMove = typecheck.LookupRuntimeVar("wasmMove") + ir.Syms.WasmZero = typecheck.LookupRuntimeVar("wasmZero") + ir.Syms.WasmDiv = typecheck.LookupRuntimeVar("wasmDiv") + ir.Syms.WasmTruncS = typecheck.LookupRuntimeVar("wasmTruncS") + ir.Syms.WasmTruncU = typecheck.LookupRuntimeVar("wasmTruncU") + ir.Syms.SigPanic = typecheck.LookupRuntimeFunc("sigpanic") } // getParam returns the Field of ith param of node n (which is a @@ -418,7 +419,7 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func { // Create the deferBits variable and stack slot. deferBits is a // bitmask showing which of the open-coded defers in this function // have been activated. - deferBitsTemp := tempAt(src.NoXPos, s.curfn, types.Types[types.TUINT8]) + deferBitsTemp := typecheck.TempAt(src.NoXPos, s.curfn, types.Types[types.TUINT8]) s.deferBitsTemp = deferBitsTemp // For this value, AuxInt is initialized to zero by default startDeferBits := s.entryNewValue0(ssa.OpConst8, types.Types[types.TUINT8]) @@ -710,7 +711,7 @@ func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() } func ssaMarker(name string) *ir.Name { - return NewName(&types.Sym{Name: name}) + return typecheck.NewName(&types.Sym{Name: name}) } var ( @@ -3342,38 +3343,38 @@ var softFloatOps map[ssa.Op]sfRtCallDef func softfloatInit() { // Some of these operations get transformed by sfcall. softFloatOps = map[ssa.Op]sfRtCallDef{ - ssa.OpAdd32F: sfRtCallDef{sysfunc("fadd32"), types.TFLOAT32}, - ssa.OpAdd64F: sfRtCallDef{sysfunc("fadd64"), types.TFLOAT64}, - ssa.OpSub32F: sfRtCallDef{sysfunc("fadd32"), types.TFLOAT32}, - ssa.OpSub64F: sfRtCallDef{sysfunc("fadd64"), types.TFLOAT64}, - ssa.OpMul32F: sfRtCallDef{sysfunc("fmul32"), types.TFLOAT32}, - ssa.OpMul64F: sfRtCallDef{sysfunc("fmul64"), types.TFLOAT64}, - ssa.OpDiv32F: sfRtCallDef{sysfunc("fdiv32"), types.TFLOAT32}, - ssa.OpDiv64F: sfRtCallDef{sysfunc("fdiv64"), types.TFLOAT64}, - - ssa.OpEq64F: sfRtCallDef{sysfunc("feq64"), types.TBOOL}, - ssa.OpEq32F: sfRtCallDef{sysfunc("feq32"), types.TBOOL}, - ssa.OpNeq64F: sfRtCallDef{sysfunc("feq64"), types.TBOOL}, - ssa.OpNeq32F: sfRtCallDef{sysfunc("feq32"), types.TBOOL}, - ssa.OpLess64F: sfRtCallDef{sysfunc("fgt64"), types.TBOOL}, - ssa.OpLess32F: sfRtCallDef{sysfunc("fgt32"), types.TBOOL}, - ssa.OpLeq64F: sfRtCallDef{sysfunc("fge64"), types.TBOOL}, - ssa.OpLeq32F: sfRtCallDef{sysfunc("fge32"), types.TBOOL}, - - ssa.OpCvt32to32F: sfRtCallDef{sysfunc("fint32to32"), types.TFLOAT32}, - ssa.OpCvt32Fto32: sfRtCallDef{sysfunc("f32toint32"), types.TINT32}, - ssa.OpCvt64to32F: sfRtCallDef{sysfunc("fint64to32"), types.TFLOAT32}, - ssa.OpCvt32Fto64: sfRtCallDef{sysfunc("f32toint64"), types.TINT64}, - ssa.OpCvt64Uto32F: sfRtCallDef{sysfunc("fuint64to32"), types.TFLOAT32}, - ssa.OpCvt32Fto64U: sfRtCallDef{sysfunc("f32touint64"), types.TUINT64}, - ssa.OpCvt32to64F: sfRtCallDef{sysfunc("fint32to64"), types.TFLOAT64}, - ssa.OpCvt64Fto32: sfRtCallDef{sysfunc("f64toint32"), types.TINT32}, - ssa.OpCvt64to64F: sfRtCallDef{sysfunc("fint64to64"), types.TFLOAT64}, - ssa.OpCvt64Fto64: sfRtCallDef{sysfunc("f64toint64"), types.TINT64}, - ssa.OpCvt64Uto64F: sfRtCallDef{sysfunc("fuint64to64"), types.TFLOAT64}, - ssa.OpCvt64Fto64U: sfRtCallDef{sysfunc("f64touint64"), types.TUINT64}, - ssa.OpCvt32Fto64F: sfRtCallDef{sysfunc("f32to64"), types.TFLOAT64}, - ssa.OpCvt64Fto32F: sfRtCallDef{sysfunc("f64to32"), types.TFLOAT32}, + ssa.OpAdd32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd32"), types.TFLOAT32}, + ssa.OpAdd64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd64"), types.TFLOAT64}, + ssa.OpSub32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd32"), types.TFLOAT32}, + ssa.OpSub64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd64"), types.TFLOAT64}, + ssa.OpMul32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fmul32"), types.TFLOAT32}, + ssa.OpMul64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fmul64"), types.TFLOAT64}, + ssa.OpDiv32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fdiv32"), types.TFLOAT32}, + ssa.OpDiv64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fdiv64"), types.TFLOAT64}, + + ssa.OpEq64F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq64"), types.TBOOL}, + ssa.OpEq32F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq32"), types.TBOOL}, + ssa.OpNeq64F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq64"), types.TBOOL}, + ssa.OpNeq32F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq32"), types.TBOOL}, + ssa.OpLess64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fgt64"), types.TBOOL}, + ssa.OpLess32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fgt32"), types.TBOOL}, + ssa.OpLeq64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fge64"), types.TBOOL}, + ssa.OpLeq32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fge32"), types.TBOOL}, + + ssa.OpCvt32to32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint32to32"), types.TFLOAT32}, + ssa.OpCvt32Fto32: sfRtCallDef{typecheck.LookupRuntimeFunc("f32toint32"), types.TINT32}, + ssa.OpCvt64to32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint64to32"), types.TFLOAT32}, + ssa.OpCvt32Fto64: sfRtCallDef{typecheck.LookupRuntimeFunc("f32toint64"), types.TINT64}, + ssa.OpCvt64Uto32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fuint64to32"), types.TFLOAT32}, + ssa.OpCvt32Fto64U: sfRtCallDef{typecheck.LookupRuntimeFunc("f32touint64"), types.TUINT64}, + ssa.OpCvt32to64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint32to64"), types.TFLOAT64}, + ssa.OpCvt64Fto32: sfRtCallDef{typecheck.LookupRuntimeFunc("f64toint32"), types.TINT32}, + ssa.OpCvt64to64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint64to64"), types.TFLOAT64}, + ssa.OpCvt64Fto64: sfRtCallDef{typecheck.LookupRuntimeFunc("f64toint64"), types.TINT64}, + ssa.OpCvt64Uto64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fuint64to64"), types.TFLOAT64}, + ssa.OpCvt64Fto64U: sfRtCallDef{typecheck.LookupRuntimeFunc("f64touint64"), types.TUINT64}, + ssa.OpCvt32Fto64F: sfRtCallDef{typecheck.LookupRuntimeFunc("f32to64"), types.TFLOAT64}, + ssa.OpCvt64Fto32F: sfRtCallDef{typecheck.LookupRuntimeFunc("f64to32"), types.TFLOAT32}, } } @@ -4458,7 +4459,7 @@ func (s *state) openDeferSave(n ir.Node, t *types.Type, val *ssa.Value) *ssa.Val } else { pos = n.Pos() } - argTemp := tempAt(pos.WithNotStmt(), s.curfn, t) + argTemp := typecheck.TempAt(pos.WithNotStmt(), s.curfn, t) argTemp.SetOpenDeferSlot(true) var addrArgTemp *ssa.Value // Use OpVarLive to make sure stack slots for the args, etc. are not @@ -4719,7 +4720,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val testLateExpansion = ssa.LateCallExpansionEnabledWithin(s.f) // Make a defer struct d on the stack. t := deferstruct(stksize) - d := tempAt(n.Pos(), s.curfn, t) + d := typecheck.TempAt(n.Pos(), s.curfn, t) s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, d, s.mem()) addr := s.addr(d) @@ -6144,7 +6145,7 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val if commaok && !canSSAType(n.Type()) { // unSSAable type, use temporary. // TODO: get rid of some of these temporaries. - tmp = tempAt(n.Pos(), s.curfn, n.Type()) + tmp = typecheck.TempAt(n.Pos(), s.curfn, n.Type()) s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp.(*ir.Name), s.mem()) addr = s.addr(tmp) } @@ -7173,7 +7174,7 @@ func (e *ssafn) StringData(s string) *obj.LSym { } func (e *ssafn) Auto(pos src.XPos, t *types.Type) *ir.Name { - return tempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list + return typecheck.TempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list } func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index d4c7c6db1a6c3..8e2093d4883f6 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -7,11 +7,10 @@ package gc import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/src" "fmt" - "sort" - "strconv" "strings" "sync" "unicode" @@ -31,71 +30,35 @@ var ( largeStackFrames []largeStack ) -func lookup(name string) *types.Sym { - return types.LocalPkg.Lookup(name) -} - -// lookupN looks up the symbol starting with prefix and ending with -// the decimal n. If prefix is too long, lookupN panics. -func lookupN(prefix string, n int) *types.Sym { - var buf [20]byte // plenty long enough for all current users - copy(buf[:], prefix) - b := strconv.AppendInt(buf[:len(prefix)], int64(n), 10) - return types.LocalPkg.LookupBytes(b) -} - -// autolabel generates a new Name node for use with -// an automatically generated label. -// prefix is a short mnemonic (e.g. ".s" for switch) -// to help with debugging. -// It should begin with "." to avoid conflicts with -// user labels. -func autolabel(prefix string) *types.Sym { - if prefix[0] != '.' { - base.Fatalf("autolabel prefix must start with '.', have %q", prefix) - } - fn := ir.CurFunc - if ir.CurFunc == nil { - base.Fatalf("autolabel outside function") - } - n := fn.Label - fn.Label++ - return lookupN(prefix, int(n)) -} - // dotImports tracks all PkgNames that have been dot-imported. var dotImports []*ir.PkgName -// dotImportRefs maps idents introduced by importDot back to the -// ir.PkgName they were dot-imported through. -var dotImportRefs map[*ir.Ident]*ir.PkgName - // find all the exported symbols in package referenced by PkgName, // and make them available in the current package func importDot(pack *ir.PkgName) { - if dotImportRefs == nil { - dotImportRefs = make(map[*ir.Ident]*ir.PkgName) + if typecheck.DotImportRefs == nil { + typecheck.DotImportRefs = make(map[*ir.Ident]*ir.PkgName) } opkg := pack.Pkg for _, s := range opkg.Syms { if s.Def == nil { - if _, ok := declImporter[s]; !ok { + if _, ok := typecheck.DeclImporter[s]; !ok { continue } } if !types.IsExported(s.Name) || strings.ContainsRune(s.Name, 0xb7) { // 0xb7 = center dot continue } - s1 := lookup(s.Name) + s1 := typecheck.Lookup(s.Name) if s1.Def != nil { pkgerror := fmt.Sprintf("during import %q", opkg.Path) - redeclare(base.Pos, s1, pkgerror) + typecheck.Redeclared(base.Pos, s1, pkgerror) continue } id := ir.NewIdent(src.NoXPos, s) - dotImportRefs[id] = pack + typecheck.DotImportRefs[id] = pack s1.Def = id s1.Block = 1 } @@ -113,347 +76,7 @@ func checkDotImports() { // No longer needed; release memory. dotImports = nil - dotImportRefs = nil -} - -// nodAddr returns a node representing &n at base.Pos. -func nodAddr(n ir.Node) *ir.AddrExpr { - return nodAddrAt(base.Pos, n) -} - -// nodAddrPos returns a node representing &n at position pos. -func nodAddrAt(pos src.XPos, n ir.Node) *ir.AddrExpr { - return ir.NewAddrExpr(pos, n) -} - -// newname returns a new ONAME Node associated with symbol s. -func NewName(s *types.Sym) *ir.Name { - n := ir.NewNameAt(base.Pos, s) - n.Curfn = ir.CurFunc - return n -} - -func nodnil() ir.Node { - n := ir.NewNilExpr(base.Pos) - n.SetType(types.Types[types.TNIL]) - return n -} - -func isptrto(t *types.Type, et types.Kind) bool { - if t == nil { - return false - } - if !t.IsPtr() { - return false - } - t = t.Elem() - if t == nil { - return false - } - if t.Kind() != et { - return false - } - return true -} - -// Is type src assignment compatible to type dst? -// If so, return op code to use in conversion. -// If not, return OXXX. In this case, the string return parameter may -// hold a reason why. In all other cases, it'll be the empty string. -func assignop(src, dst *types.Type) (ir.Op, string) { - if src == dst { - return ir.OCONVNOP, "" - } - if src == nil || dst == nil || src.Kind() == types.TFORW || dst.Kind() == types.TFORW || src.Underlying() == nil || dst.Underlying() == nil { - return ir.OXXX, "" - } - - // 1. src type is identical to dst. - if types.Identical(src, dst) { - return ir.OCONVNOP, "" - } - - // 2. src and dst have identical underlying types - // and either src or dst is not a named type or - // both are empty interface types. - // For assignable but different non-empty interface types, - // we want to recompute the itab. Recomputing the itab ensures - // that itabs are unique (thus an interface with a compile-time - // type I has an itab with interface type I). - if types.Identical(src.Underlying(), dst.Underlying()) { - if src.IsEmptyInterface() { - // Conversion between two empty interfaces - // requires no code. - return ir.OCONVNOP, "" - } - if (src.Sym() == nil || dst.Sym() == nil) && !src.IsInterface() { - // Conversion between two types, at least one unnamed, - // needs no conversion. The exception is nonempty interfaces - // which need to have their itab updated. - return ir.OCONVNOP, "" - } - } - - // 3. dst is an interface type and src implements dst. - if dst.IsInterface() && src.Kind() != types.TNIL { - var missing, have *types.Field - var ptr int - if implements(src, dst, &missing, &have, &ptr) { - // Call itabname so that (src, dst) - // gets added to itabs early, which allows - // us to de-virtualize calls through this - // type/interface pair later. See peekitabs in reflect.go - if types.IsDirectIface(src) && !dst.IsEmptyInterface() { - NeedITab(src, dst) - } - - return ir.OCONVIFACE, "" - } - - // we'll have complained about this method anyway, suppress spurious messages. - if have != nil && have.Sym == missing.Sym && (have.Type.Broke() || missing.Type.Broke()) { - return ir.OCONVIFACE, "" - } - - var why string - if isptrto(src, types.TINTER) { - why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", src) - } else if have != nil && have.Sym == missing.Sym && have.Nointerface() { - why = fmt.Sprintf(":\n\t%v does not implement %v (%v method is marked 'nointerface')", src, dst, missing.Sym) - } else if have != nil && have.Sym == missing.Sym { - why = fmt.Sprintf(":\n\t%v does not implement %v (wrong type for %v method)\n"+ - "\t\thave %v%S\n\t\twant %v%S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) - } else if ptr != 0 { - why = fmt.Sprintf(":\n\t%v does not implement %v (%v method has pointer receiver)", src, dst, missing.Sym) - } else if have != nil { - why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)\n"+ - "\t\thave %v%S\n\t\twant %v%S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) - } else { - why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)", src, dst, missing.Sym) - } - - return ir.OXXX, why - } - - if isptrto(dst, types.TINTER) { - why := fmt.Sprintf(":\n\t%v is pointer to interface, not interface", dst) - return ir.OXXX, why - } - - if src.IsInterface() && dst.Kind() != types.TBLANK { - var missing, have *types.Field - var ptr int - var why string - if implements(dst, src, &missing, &have, &ptr) { - why = ": need type assertion" - } - return ir.OXXX, why - } - - // 4. src is a bidirectional channel value, dst is a channel type, - // src and dst have identical element types, and - // either src or dst is not a named type. - if src.IsChan() && src.ChanDir() == types.Cboth && dst.IsChan() { - if types.Identical(src.Elem(), dst.Elem()) && (src.Sym() == nil || dst.Sym() == nil) { - return ir.OCONVNOP, "" - } - } - - // 5. src is the predeclared identifier nil and dst is a nillable type. - if src.Kind() == types.TNIL { - switch dst.Kind() { - case types.TPTR, - types.TFUNC, - types.TMAP, - types.TCHAN, - types.TINTER, - types.TSLICE: - return ir.OCONVNOP, "" - } - } - - // 6. rule about untyped constants - already converted by defaultlit. - - // 7. Any typed value can be assigned to the blank identifier. - if dst.Kind() == types.TBLANK { - return ir.OCONVNOP, "" - } - - return ir.OXXX, "" -} - -// Can we convert a value of type src to a value of type dst? -// If so, return op code to use in conversion (maybe OCONVNOP). -// If not, return OXXX. In this case, the string return parameter may -// hold a reason why. In all other cases, it'll be the empty string. -// srcConstant indicates whether the value of type src is a constant. -func convertop(srcConstant bool, src, dst *types.Type) (ir.Op, string) { - if src == dst { - return ir.OCONVNOP, "" - } - if src == nil || dst == nil { - return ir.OXXX, "" - } - - // Conversions from regular to go:notinheap are not allowed - // (unless it's unsafe.Pointer). These are runtime-specific - // rules. - // (a) Disallow (*T) to (*U) where T is go:notinheap but U isn't. - if src.IsPtr() && dst.IsPtr() && dst.Elem().NotInHeap() && !src.Elem().NotInHeap() { - why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable), but %v is not", dst.Elem(), src.Elem()) - return ir.OXXX, why - } - // (b) Disallow string to []T where T is go:notinheap. - if src.IsString() && dst.IsSlice() && dst.Elem().NotInHeap() && (dst.Elem().Kind() == types.ByteType.Kind() || dst.Elem().Kind() == types.RuneType.Kind()) { - why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable)", dst.Elem()) - return ir.OXXX, why - } - - // 1. src can be assigned to dst. - op, why := assignop(src, dst) - if op != ir.OXXX { - return op, why - } - - // The rules for interfaces are no different in conversions - // than assignments. If interfaces are involved, stop now - // with the good message from assignop. - // Otherwise clear the error. - if src.IsInterface() || dst.IsInterface() { - return ir.OXXX, why - } - - // 2. Ignoring struct tags, src and dst have identical underlying types. - if types.IdenticalIgnoreTags(src.Underlying(), dst.Underlying()) { - return ir.OCONVNOP, "" - } - - // 3. src and dst are unnamed pointer types and, ignoring struct tags, - // their base types have identical underlying types. - if src.IsPtr() && dst.IsPtr() && src.Sym() == nil && dst.Sym() == nil { - if types.IdenticalIgnoreTags(src.Elem().Underlying(), dst.Elem().Underlying()) { - return ir.OCONVNOP, "" - } - } - - // 4. src and dst are both integer or floating point types. - if (src.IsInteger() || src.IsFloat()) && (dst.IsInteger() || dst.IsFloat()) { - if types.SimType[src.Kind()] == types.SimType[dst.Kind()] { - return ir.OCONVNOP, "" - } - return ir.OCONV, "" - } - - // 5. src and dst are both complex types. - if src.IsComplex() && dst.IsComplex() { - if types.SimType[src.Kind()] == types.SimType[dst.Kind()] { - return ir.OCONVNOP, "" - } - return ir.OCONV, "" - } - - // Special case for constant conversions: any numeric - // conversion is potentially okay. We'll validate further - // within evconst. See #38117. - if srcConstant && (src.IsInteger() || src.IsFloat() || src.IsComplex()) && (dst.IsInteger() || dst.IsFloat() || dst.IsComplex()) { - return ir.OCONV, "" - } - - // 6. src is an integer or has type []byte or []rune - // and dst is a string type. - if src.IsInteger() && dst.IsString() { - return ir.ORUNESTR, "" - } - - if src.IsSlice() && dst.IsString() { - if src.Elem().Kind() == types.ByteType.Kind() { - return ir.OBYTES2STR, "" - } - if src.Elem().Kind() == types.RuneType.Kind() { - return ir.ORUNES2STR, "" - } - } - - // 7. src is a string and dst is []byte or []rune. - // String to slice. - if src.IsString() && dst.IsSlice() { - if dst.Elem().Kind() == types.ByteType.Kind() { - return ir.OSTR2BYTES, "" - } - if dst.Elem().Kind() == types.RuneType.Kind() { - return ir.OSTR2RUNES, "" - } - } - - // 8. src is a pointer or uintptr and dst is unsafe.Pointer. - if (src.IsPtr() || src.IsUintptr()) && dst.IsUnsafePtr() { - return ir.OCONVNOP, "" - } - - // 9. src is unsafe.Pointer and dst is a pointer or uintptr. - if src.IsUnsafePtr() && (dst.IsPtr() || dst.IsUintptr()) { - return ir.OCONVNOP, "" - } - - // src is map and dst is a pointer to corresponding hmap. - // This rule is needed for the implementation detail that - // go gc maps are implemented as a pointer to a hmap struct. - if src.Kind() == types.TMAP && dst.IsPtr() && - src.MapType().Hmap == dst.Elem() { - return ir.OCONVNOP, "" - } - - return ir.OXXX, "" -} - -func assignconv(n ir.Node, t *types.Type, context string) ir.Node { - return assignconvfn(n, t, func() string { return context }) -} - -// Convert node n for assignment to type t. -func assignconvfn(n ir.Node, t *types.Type, context func() string) ir.Node { - if n == nil || n.Type() == nil || n.Type().Broke() { - return n - } - - if t.Kind() == types.TBLANK && n.Type().Kind() == types.TNIL { - base.Errorf("use of untyped nil") - } - - n = convlit1(n, t, false, context) - if n.Type() == nil { - return n - } - if t.Kind() == types.TBLANK { - return n - } - - // Convert ideal bool from comparison to plain bool - // if the next step is non-bool (like interface{}). - if n.Type() == types.UntypedBool && !t.IsBoolean() { - if n.Op() == ir.ONAME || n.Op() == ir.OLITERAL { - r := ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, n) - r.SetType(types.Types[types.TBOOL]) - r.SetTypecheck(1) - r.SetImplicit(true) - n = r - } - } - - if types.Identical(n.Type(), t) { - return n - } - - op, why := assignop(n.Type(), t) - if op == ir.OXXX { - base.Errorf("cannot use %L as type %v in %s%s", n, t, context(), why) - op = ir.OCONV - } - - r := ir.NewConvExpr(base.Pos, op, t, n) - r.SetTypecheck(1) - r.SetImplicit(true) - return r + typecheck.DotImportRefs = nil } // backingArrayPtrLen extracts the pointer and length from a slice or string. @@ -475,14 +98,6 @@ func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) { return ptr, length } -func syslook(name string) *ir.Name { - s := ir.Pkgs.Runtime.Lookup(name) - if s == nil || s.Def == nil { - base.Fatalf("syslook: can't find runtime.%s", name) - } - return ir.AsNode(s.Def).(*ir.Name) -} - // updateHasCall checks whether expression n contains any function // calls and sets the n.HasCall flag if so. func updateHasCall(n ir.Node) { @@ -689,7 +304,7 @@ func safeexpr(n ir.Node, init *ir.Nodes) ir.Node { } a := ir.Copy(n).(*ir.UnaryExpr) a.X = l - return walkexpr(typecheck(a, ctxExpr), init) + return walkexpr(typecheck.Expr(a), init) case ir.ODOT, ir.ODOTPTR: n := n.(*ir.SelectorExpr) @@ -699,7 +314,7 @@ func safeexpr(n ir.Node, init *ir.Nodes) ir.Node { } a := ir.Copy(n).(*ir.SelectorExpr) a.X = l - return walkexpr(typecheck(a, ctxExpr), init) + return walkexpr(typecheck.Expr(a), init) case ir.ODEREF: n := n.(*ir.StarExpr) @@ -709,7 +324,7 @@ func safeexpr(n ir.Node, init *ir.Nodes) ir.Node { } a := ir.Copy(n).(*ir.StarExpr) a.X = l - return walkexpr(typecheck(a, ctxExpr), init) + return walkexpr(typecheck.Expr(a), init) case ir.OINDEX, ir.OINDEXMAP: n := n.(*ir.IndexExpr) @@ -721,7 +336,7 @@ func safeexpr(n ir.Node, init *ir.Nodes) ir.Node { a := ir.Copy(n).(*ir.IndexExpr) a.X = l a.Index = r - return walkexpr(typecheck(a, ctxExpr), init) + return walkexpr(typecheck.Expr(a), init) case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT: n := n.(*ir.CompLitExpr) @@ -738,7 +353,7 @@ func safeexpr(n ir.Node, init *ir.Nodes) ir.Node { } func copyexpr(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node { - l := temp(t) + l := typecheck.Temp(t) appendWalkStmt(init, ir.NewAssignStmt(base.Pos, l, n)) return l } @@ -754,323 +369,6 @@ func cheapexpr(n ir.Node, init *ir.Nodes) ir.Node { return copyexpr(n, n.Type(), init) } -// Code to resolve elided DOTs in embedded types. - -// A Dlist stores a pointer to a TFIELD Type embedded within -// a TSTRUCT or TINTER Type. -type Dlist struct { - field *types.Field -} - -// dotlist is used by adddot1 to record the path of embedded fields -// used to access a target field or method. -// Must be non-nil so that dotpath returns a non-nil slice even if d is zero. -var dotlist = make([]Dlist, 10) - -// lookdot0 returns the number of fields or methods named s associated -// with Type t. If exactly one exists, it will be returned in *save -// (if save is not nil). -func lookdot0(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) int { - u := t - if u.IsPtr() { - u = u.Elem() - } - - c := 0 - if u.IsStruct() || u.IsInterface() { - for _, f := range u.Fields().Slice() { - if f.Sym == s || (ignorecase && f.IsMethod() && strings.EqualFold(f.Sym.Name, s.Name)) { - if save != nil { - *save = f - } - c++ - } - } - } - - u = t - if t.Sym() != nil && t.IsPtr() && !t.Elem().IsPtr() { - // If t is a defined pointer type, then x.m is shorthand for (*x).m. - u = t.Elem() - } - u = types.ReceiverBaseType(u) - if u != nil { - for _, f := range u.Methods().Slice() { - if f.Embedded == 0 && (f.Sym == s || (ignorecase && strings.EqualFold(f.Sym.Name, s.Name))) { - if save != nil { - *save = f - } - c++ - } - } - } - - return c -} - -// adddot1 returns the number of fields or methods named s at depth d in Type t. -// If exactly one exists, it will be returned in *save (if save is not nil), -// and dotlist will contain the path of embedded fields traversed to find it, -// in reverse order. If none exist, more will indicate whether t contains any -// embedded fields at depth d, so callers can decide whether to retry at -// a greater depth. -func adddot1(s *types.Sym, t *types.Type, d int, save **types.Field, ignorecase bool) (c int, more bool) { - if t.Recur() { - return - } - t.SetRecur(true) - defer t.SetRecur(false) - - var u *types.Type - d-- - if d < 0 { - // We've reached our target depth. If t has any fields/methods - // named s, then we're done. Otherwise, we still need to check - // below for embedded fields. - c = lookdot0(s, t, save, ignorecase) - if c != 0 { - return c, false - } - } - - u = t - if u.IsPtr() { - u = u.Elem() - } - if !u.IsStruct() && !u.IsInterface() { - return c, false - } - - for _, f := range u.Fields().Slice() { - if f.Embedded == 0 || f.Sym == nil { - continue - } - if d < 0 { - // Found an embedded field at target depth. - return c, true - } - a, more1 := adddot1(s, f.Type, d, save, ignorecase) - if a != 0 && c == 0 { - dotlist[d].field = f - } - c += a - if more1 { - more = true - } - } - - return c, more -} - -// dotpath computes the unique shortest explicit selector path to fully qualify -// a selection expression x.f, where x is of type t and f is the symbol s. -// If no such path exists, dotpath returns nil. -// If there are multiple shortest paths to the same depth, ambig is true. -func dotpath(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) (path []Dlist, ambig bool) { - // The embedding of types within structs imposes a tree structure onto - // types: structs parent the types they embed, and types parent their - // fields or methods. Our goal here is to find the shortest path to - // a field or method named s in the subtree rooted at t. To accomplish - // that, we iteratively perform depth-first searches of increasing depth - // until we either find the named field/method or exhaust the tree. - for d := 0; ; d++ { - if d > len(dotlist) { - dotlist = append(dotlist, Dlist{}) - } - if c, more := adddot1(s, t, d, save, ignorecase); c == 1 { - return dotlist[:d], false - } else if c > 1 { - return nil, true - } else if !more { - return nil, false - } - } -} - -// in T.field -// find missing fields that -// will give shortest unique addressing. -// modify the tree with missing type names. -func adddot(n *ir.SelectorExpr) *ir.SelectorExpr { - n.X = typecheck(n.X, ctxType|ctxExpr) - if n.X.Diag() { - n.SetDiag(true) - } - t := n.X.Type() - if t == nil { - return n - } - - if n.X.Op() == ir.OTYPE { - return n - } - - s := n.Sel - if s == nil { - return n - } - - switch path, ambig := dotpath(s, t, nil, false); { - case path != nil: - // rebuild elided dots - for c := len(path) - 1; c >= 0; c-- { - dot := ir.NewSelectorExpr(base.Pos, ir.ODOT, n.X, path[c].field.Sym) - dot.SetImplicit(true) - dot.SetType(path[c].field.Type) - n.X = dot - } - case ambig: - base.Errorf("ambiguous selector %v", n) - n.X = nil - } - - return n -} - -// Code to help generate trampoline functions for methods on embedded -// types. These are approx the same as the corresponding adddot -// routines except that they expect to be called with unique tasks and -// they return the actual methods. - -type Symlink struct { - field *types.Field -} - -var slist []Symlink - -func expand0(t *types.Type) { - u := t - if u.IsPtr() { - u = u.Elem() - } - - if u.IsInterface() { - for _, f := range u.Fields().Slice() { - if f.Sym.Uniq() { - continue - } - f.Sym.SetUniq(true) - slist = append(slist, Symlink{field: f}) - } - - return - } - - u = types.ReceiverBaseType(t) - if u != nil { - for _, f := range u.Methods().Slice() { - if f.Sym.Uniq() { - continue - } - f.Sym.SetUniq(true) - slist = append(slist, Symlink{field: f}) - } - } -} - -func expand1(t *types.Type, top bool) { - if t.Recur() { - return - } - t.SetRecur(true) - - if !top { - expand0(t) - } - - u := t - if u.IsPtr() { - u = u.Elem() - } - - if u.IsStruct() || u.IsInterface() { - for _, f := range u.Fields().Slice() { - if f.Embedded == 0 { - continue - } - if f.Sym == nil { - continue - } - expand1(f.Type, false) - } - } - - t.SetRecur(false) -} - -func expandmeth(t *types.Type) { - if t == nil || t.AllMethods().Len() != 0 { - return - } - - // mark top-level method symbols - // so that expand1 doesn't consider them. - for _, f := range t.Methods().Slice() { - f.Sym.SetUniq(true) - } - - // generate all reachable methods - slist = slist[:0] - expand1(t, true) - - // check each method to be uniquely reachable - var ms []*types.Field - for i, sl := range slist { - slist[i].field = nil - sl.field.Sym.SetUniq(false) - - var f *types.Field - path, _ := dotpath(sl.field.Sym, t, &f, false) - if path == nil { - continue - } - - // dotpath may have dug out arbitrary fields, we only want methods. - if !f.IsMethod() { - continue - } - - // add it to the base type method list - f = f.Copy() - f.Embedded = 1 // needs a trampoline - for _, d := range path { - if d.field.Type.IsPtr() { - f.Embedded = 2 - break - } - } - ms = append(ms, f) - } - - for _, f := range t.Methods().Slice() { - f.Sym.SetUniq(false) - } - - ms = append(ms, t.Methods().Slice()...) - sort.Sort(types.MethodsByName(ms)) - t.AllMethods().Set(ms) -} - -// Given funarg struct list, return list of fn args. -func structargs(tl *types.Type, mustname bool) []*ir.Field { - var args []*ir.Field - gen := 0 - for _, t := range tl.Fields().Slice() { - s := t.Sym - if mustname && (s == nil || s.Name == "_") { - // invent a name so that we can refer to it in the trampoline - s = lookupN(".anon", gen) - gen++ - } - a := ir.NewField(base.Pos, s, nil, t.Type) - a.Pos = t.Pos - a.IsDDD = t.IsDDD() - args = append(args, a) - } - - return args -} - // Generate a wrapper function to convert from // a receiver of type T to a receiver of type U. // That is, @@ -1110,14 +408,14 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { } base.Pos = base.AutogeneratedPos - dclcontext = ir.PEXTERN + typecheck.DeclContext = ir.PEXTERN tfn := ir.NewFuncType(base.Pos, - ir.NewField(base.Pos, lookup(".this"), nil, rcvr), - structargs(method.Type.Params(), true), - structargs(method.Type.Results(), false)) + ir.NewField(base.Pos, typecheck.Lookup(".this"), nil, rcvr), + typecheck.NewFuncParams(method.Type.Params(), true), + typecheck.NewFuncParams(method.Type.Results(), false)) - fn := dclfunc(newnam, tfn) + fn := typecheck.DeclFunc(newnam, tfn) fn.SetDupok(true) nthis := ir.AsNode(tfn.Type().Recv().Nname) @@ -1128,13 +426,13 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { if rcvr.IsPtr() && rcvr.Elem() == methodrcvr { // generating wrapper from *T to T. n := ir.NewIfStmt(base.Pos, nil, nil, nil) - n.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, nthis, nodnil()) - call := ir.NewCallExpr(base.Pos, ir.OCALL, syslook("panicwrap"), nil) + n.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, nthis, typecheck.NodNil()) + call := ir.NewCallExpr(base.Pos, ir.OCALL, typecheck.LookupRuntime("panicwrap"), nil) n.Body = []ir.Node{call} fn.Body.Append(n) } - dot := adddot(ir.NewSelectorExpr(base.Pos, ir.OXDOT, nthis, method.Sym)) + dot := typecheck.AddImplicitDots(ir.NewSelectorExpr(base.Pos, ir.OXDOT, nthis, method.Sym)) // generate call // It's not possible to use a tail call when dynamic linking on ppc64le. The @@ -1147,9 +445,9 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { // generate tail call: adjust pointer receiver and jump to embedded method. left := dot.X // skip final .M if !left.Type().IsPtr() { - left = nodAddr(left) + left = typecheck.NodAddr(left) } - as := ir.NewAssignStmt(base.Pos, nthis, convnop(left, rcvr)) + as := ir.NewAssignStmt(base.Pos, nthis, typecheck.ConvNop(left, rcvr)) fn.Body.Append(as) fn.Body.Append(ir.NewBranchStmt(base.Pos, ir.ORETJMP, ir.MethodSym(methodrcvr, method.Sym))) } else { @@ -1170,14 +468,14 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { ir.DumpList("genwrapper body", fn.Body) } - funcbody() + typecheck.FinishFuncBody() if base.Debug.DclStack != 0 { types.CheckDclstack() } - typecheckFunc(fn) + typecheck.Func(fn) ir.CurFunc = fn - typecheckslice(fn.Body, ctxStmt) + typecheck.Stmts(fn.Body) // Inline calls within (*T).M wrappers. This is safe because we only // generate those wrappers within the same compilation unit as (T).M. @@ -1188,15 +486,15 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { escapeFuncs([]*ir.Func{fn}, false) ir.CurFunc = nil - Target.Decls = append(Target.Decls, fn) + typecheck.Target.Decls = append(typecheck.Target.Decls, fn) } func hashmem(t *types.Type) ir.Node { sym := ir.Pkgs.Runtime.Lookup("memhash") - n := NewName(sym) + n := typecheck.NewName(sym) ir.MarkFunc(n) - n.SetType(functype(nil, []*ir.Field{ + n.SetType(typecheck.NewFuncType(nil, []*ir.Field{ ir.NewField(base.Pos, nil, nil, types.NewPtr(t)), ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]), ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]), @@ -1206,112 +504,6 @@ func hashmem(t *types.Type) ir.Node { return n } -func ifacelookdot(s *types.Sym, t *types.Type, ignorecase bool) (m *types.Field, followptr bool) { - if t == nil { - return nil, false - } - - path, ambig := dotpath(s, t, &m, ignorecase) - if path == nil { - if ambig { - base.Errorf("%v.%v is ambiguous", t, s) - } - return nil, false - } - - for _, d := range path { - if d.field.Type.IsPtr() { - followptr = true - break - } - } - - if !m.IsMethod() { - base.Errorf("%v.%v is a field, not a method", t, s) - return nil, followptr - } - - return m, followptr -} - -func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool { - t0 := t - if t == nil { - return false - } - - if t.IsInterface() { - i := 0 - tms := t.Fields().Slice() - for _, im := range iface.Fields().Slice() { - for i < len(tms) && tms[i].Sym != im.Sym { - i++ - } - if i == len(tms) { - *m = im - *samename = nil - *ptr = 0 - return false - } - tm := tms[i] - if !types.Identical(tm.Type, im.Type) { - *m = im - *samename = tm - *ptr = 0 - return false - } - } - - return true - } - - t = types.ReceiverBaseType(t) - var tms []*types.Field - if t != nil { - expandmeth(t) - tms = t.AllMethods().Slice() - } - i := 0 - for _, im := range iface.Fields().Slice() { - if im.Broke() { - continue - } - for i < len(tms) && tms[i].Sym != im.Sym { - i++ - } - if i == len(tms) { - *m = im - *samename, _ = ifacelookdot(im.Sym, t, true) - *ptr = 0 - return false - } - tm := tms[i] - if tm.Nointerface() || !types.Identical(tm.Type, im.Type) { - *m = im - *samename = tm - *ptr = 0 - return false - } - followptr := tm.Embedded == 2 - - // if pointer receiver in method, - // the method does not exist for value types. - rcvr := tm.Type.Recv().Type - if rcvr.IsPtr() && !t0.IsPtr() && !followptr && !types.IsInterfaceMethod(tm.Type) { - if false && base.Flag.LowerR != 0 { - base.Errorf("interface pointer mismatch") - } - - *m = im - *samename = nil - *ptr = 1 - return false - } - } - - return true -} - func ngotype(n ir.Node) *types.Sym { if n.Type() != nil { return typenamesym(n.Type()) diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go index 4e7ff00434119..9ffa8b67bb277 100644 --- a/src/cmd/compile/internal/gc/swt.go +++ b/src/cmd/compile/internal/gc/swt.go @@ -7,6 +7,7 @@ package gc import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/src" "go/constant" @@ -14,221 +15,6 @@ import ( "sort" ) -// typecheckswitch typechecks a switch statement. -func typecheckswitch(n *ir.SwitchStmt) { - typecheckslice(n.Init(), ctxStmt) - if n.Tag != nil && n.Tag.Op() == ir.OTYPESW { - typecheckTypeSwitch(n) - } else { - typecheckExprSwitch(n) - } -} - -func typecheckTypeSwitch(n *ir.SwitchStmt) { - guard := n.Tag.(*ir.TypeSwitchGuard) - guard.X = typecheck(guard.X, ctxExpr) - t := guard.X.Type() - if t != nil && !t.IsInterface() { - base.ErrorfAt(n.Pos(), "cannot type switch on non-interface value %L", guard.X) - t = nil - } - - // We don't actually declare the type switch's guarded - // declaration itself. So if there are no cases, we won't - // notice that it went unused. - if v := guard.Tag; v != nil && !ir.IsBlank(v) && len(n.Cases) == 0 { - base.ErrorfAt(v.Pos(), "%v declared but not used", v.Sym()) - } - - var defCase, nilCase ir.Node - var ts typeSet - for _, ncase := range n.Cases { - ncase := ncase.(*ir.CaseStmt) - ls := ncase.List - if len(ls) == 0 { // default: - if defCase != nil { - base.ErrorfAt(ncase.Pos(), "multiple defaults in switch (first at %v)", ir.Line(defCase)) - } else { - defCase = ncase - } - } - - for i := range ls { - ls[i] = typecheck(ls[i], ctxExpr|ctxType) - n1 := ls[i] - if t == nil || n1.Type() == nil { - continue - } - - var missing, have *types.Field - var ptr int - if ir.IsNil(n1) { // case nil: - if nilCase != nil { - base.ErrorfAt(ncase.Pos(), "multiple nil cases in type switch (first at %v)", ir.Line(nilCase)) - } else { - nilCase = ncase - } - continue - } - if n1.Op() != ir.OTYPE { - base.ErrorfAt(ncase.Pos(), "%L is not a type", n1) - continue - } - if !n1.Type().IsInterface() && !implements(n1.Type(), t, &missing, &have, &ptr) && !missing.Broke() { - if have != nil && !have.Broke() { - base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+ - " (wrong type for %v method)\n\thave %v%S\n\twant %v%S", guard.X, n1.Type(), missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) - } else if ptr != 0 { - base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+ - " (%v method has pointer receiver)", guard.X, n1.Type(), missing.Sym) - } else { - base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+ - " (missing %v method)", guard.X, n1.Type(), missing.Sym) - } - continue - } - - ts.add(ncase.Pos(), n1.Type()) - } - - if len(ncase.Vars) != 0 { - // Assign the clause variable's type. - vt := t - if len(ls) == 1 { - if ls[0].Op() == ir.OTYPE { - vt = ls[0].Type() - } else if !ir.IsNil(ls[0]) { - // Invalid single-type case; - // mark variable as broken. - vt = nil - } - } - - nvar := ncase.Vars[0] - nvar.SetType(vt) - if vt != nil { - nvar = typecheck(nvar, ctxExpr|ctxAssign) - } else { - // Clause variable is broken; prevent typechecking. - nvar.SetTypecheck(1) - nvar.SetWalkdef(1) - } - ncase.Vars[0] = nvar - } - - typecheckslice(ncase.Body, ctxStmt) - } -} - -type typeSet struct { - m map[string][]typeSetEntry -} - -type typeSetEntry struct { - pos src.XPos - typ *types.Type -} - -func (s *typeSet) add(pos src.XPos, typ *types.Type) { - if s.m == nil { - s.m = make(map[string][]typeSetEntry) - } - - // LongString does not uniquely identify types, so we need to - // disambiguate collisions with types.Identical. - // TODO(mdempsky): Add a method that *is* unique. - ls := typ.LongString() - prevs := s.m[ls] - for _, prev := range prevs { - if types.Identical(typ, prev.typ) { - base.ErrorfAt(pos, "duplicate case %v in type switch\n\tprevious case at %s", typ, base.FmtPos(prev.pos)) - return - } - } - s.m[ls] = append(prevs, typeSetEntry{pos, typ}) -} - -func typecheckExprSwitch(n *ir.SwitchStmt) { - t := types.Types[types.TBOOL] - if n.Tag != nil { - n.Tag = typecheck(n.Tag, ctxExpr) - n.Tag = defaultlit(n.Tag, nil) - t = n.Tag.Type() - } - - var nilonly string - if t != nil { - switch { - case t.IsMap(): - nilonly = "map" - case t.Kind() == types.TFUNC: - nilonly = "func" - case t.IsSlice(): - nilonly = "slice" - - case !types.IsComparable(t): - if t.IsStruct() { - base.ErrorfAt(n.Pos(), "cannot switch on %L (struct containing %v cannot be compared)", n.Tag, types.IncomparableField(t).Type) - } else { - base.ErrorfAt(n.Pos(), "cannot switch on %L", n.Tag) - } - t = nil - } - } - - var defCase ir.Node - var cs constSet - for _, ncase := range n.Cases { - ncase := ncase.(*ir.CaseStmt) - ls := ncase.List - if len(ls) == 0 { // default: - if defCase != nil { - base.ErrorfAt(ncase.Pos(), "multiple defaults in switch (first at %v)", ir.Line(defCase)) - } else { - defCase = ncase - } - } - - for i := range ls { - ir.SetPos(ncase) - ls[i] = typecheck(ls[i], ctxExpr) - ls[i] = defaultlit(ls[i], t) - n1 := ls[i] - if t == nil || n1.Type() == nil { - continue - } - - if nilonly != "" && !ir.IsNil(n1) { - base.ErrorfAt(ncase.Pos(), "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Tag) - } else if t.IsInterface() && !n1.Type().IsInterface() && !types.IsComparable(n1.Type()) { - base.ErrorfAt(ncase.Pos(), "invalid case %L in switch (incomparable type)", n1) - } else { - op1, _ := assignop(n1.Type(), t) - op2, _ := assignop(t, n1.Type()) - if op1 == ir.OXXX && op2 == ir.OXXX { - if n.Tag != nil { - base.ErrorfAt(ncase.Pos(), "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Tag, n1.Type(), t) - } else { - base.ErrorfAt(ncase.Pos(), "invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type()) - } - } - } - - // Don't check for duplicate bools. Although the spec allows it, - // (1) the compiler hasn't checked it in the past, so compatibility mandates it, and - // (2) it would disallow useful things like - // case GOARCH == "arm" && GOARM == "5": - // case GOARCH == "arm": - // which would both evaluate to false for non-ARM compiles. - if !n1.Type().IsBoolean() { - cs.add(ncase.Pos(), n1, "case", "switch") - } - } - - typecheckslice(ncase.Body, ctxStmt) - } -} - // walkswitch walks a switch statement. func walkswitch(sw *ir.SwitchStmt) { // Guard against double walk, see #25776. @@ -254,8 +40,8 @@ func walkExprSwitch(sw *ir.SwitchStmt) { // convert switch {...} to switch true {...} if cond == nil { cond = ir.NewBool(true) - cond = typecheck(cond, ctxExpr) - cond = defaultlit(cond, nil) + cond = typecheck.Expr(cond) + cond = typecheck.DefaultLit(cond, nil) } // Given "switch string(byteslice)", @@ -285,7 +71,7 @@ func walkExprSwitch(sw *ir.SwitchStmt) { var body ir.Nodes for _, ncase := range sw.Cases { ncase := ncase.(*ir.CaseStmt) - label := autolabel(".s") + label := typecheck.AutoLabel(".s") jmp := ir.NewBranchStmt(ncase.Pos(), ir.OGOTO, label) // Process case dispatch. @@ -509,7 +295,7 @@ func walkTypeSwitch(sw *ir.SwitchStmt) { s.facename = walkexpr(s.facename, sw.PtrInit()) s.facename = copyexpr(s.facename, s.facename.Type(), &sw.Compiled) - s.okname = temp(types.Types[types.TBOOL]) + s.okname = typecheck.Temp(types.Types[types.TBOOL]) // Get interface descriptor word. // For empty interfaces this will be the type. @@ -523,10 +309,10 @@ func walkTypeSwitch(sw *ir.SwitchStmt) { // h := e._type.hash // Use a similar strategy for non-empty interfaces. ifNil := ir.NewIfStmt(base.Pos, nil, nil, nil) - ifNil.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, itab, nodnil()) + ifNil.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, itab, typecheck.NodNil()) base.Pos = base.Pos.WithNotStmt() // disable statement marks after the first check. - ifNil.Cond = typecheck(ifNil.Cond, ctxExpr) - ifNil.Cond = defaultlit(ifNil.Cond, nil) + ifNil.Cond = typecheck.Expr(ifNil.Cond) + ifNil.Cond = typecheck.DefaultLit(ifNil.Cond, nil) // ifNil.Nbody assigned at end. sw.Compiled.Append(ifNil) @@ -561,7 +347,7 @@ func walkTypeSwitch(sw *ir.SwitchStmt) { } caseVarInitialized := false - label := autolabel(".s") + label := typecheck.AutoLabel(".s") jmp := ir.NewBranchStmt(ncase.Pos(), ir.OGOTO, label) if len(ncase.List) == 0 { // default: @@ -602,7 +388,7 @@ func walkTypeSwitch(sw *ir.SwitchStmt) { ir.NewDecl(ncase.Pos(), ir.ODCL, caseVar), ir.NewAssignStmt(ncase.Pos(), caseVar, val), } - typecheckslice(l, ctxStmt) + typecheck.Stmts(l) body.Append(l...) } body.Append(ncase.Body...) @@ -648,7 +434,7 @@ func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp ir.Node) { ir.NewDecl(pos, ir.ODCL, caseVar), ir.NewAssignStmt(pos, caseVar, nil), } - typecheckslice(l, ctxStmt) + typecheck.Stmts(l) body.Append(l...) } else { caseVar = ir.BlankNode @@ -740,8 +526,8 @@ func binarySearch(n int, out *ir.Nodes, less func(i int) ir.Node, leaf func(i in nif := ir.NewIfStmt(base.Pos, nil, nil, nil) leaf(i, nif) base.Pos = base.Pos.WithNotStmt() - nif.Cond = typecheck(nif.Cond, ctxExpr) - nif.Cond = defaultlit(nif.Cond, nil) + nif.Cond = typecheck.Expr(nif.Cond) + nif.Cond = typecheck.DefaultLit(nif.Cond, nil) out.Append(nif) out = &nif.Else } @@ -752,8 +538,8 @@ func binarySearch(n int, out *ir.Nodes, less func(i int) ir.Node, leaf func(i in nif := ir.NewIfStmt(base.Pos, nil, nil, nil) nif.Cond = less(half) base.Pos = base.Pos.WithNotStmt() - nif.Cond = typecheck(nif.Cond, ctxExpr) - nif.Cond = defaultlit(nif.Cond, nil) + nif.Cond = typecheck.Expr(nif.Cond) + nif.Cond = typecheck.DefaultLit(nif.Cond, nil) do(lo, half, &nif.Body) do(half, hi, &nif.Else) out.Append(nif) diff --git a/src/cmd/compile/internal/gc/types_acc.go b/src/cmd/compile/internal/gc/types_acc.go deleted file mode 100644 index d6d53f05cc949..0000000000000 --- a/src/cmd/compile/internal/gc/types_acc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file implements convertions between *types.Node and *Node. -// TODO(gri) try to eliminate these soon - -package gc diff --git a/src/cmd/compile/internal/gc/unsafe.go b/src/cmd/compile/internal/gc/unsafe.go deleted file mode 100644 index d37ebfff31ea1..0000000000000 --- a/src/cmd/compile/internal/gc/unsafe.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gc - -import ( - "cmd/compile/internal/base" - "cmd/compile/internal/ir" - "cmd/compile/internal/types" -) - -// evalunsafe evaluates a package unsafe operation and returns the result. -func evalunsafe(n ir.Node) int64 { - switch n.Op() { - case ir.OALIGNOF, ir.OSIZEOF: - n := n.(*ir.UnaryExpr) - n.X = typecheck(n.X, ctxExpr) - n.X = defaultlit(n.X, nil) - tr := n.X.Type() - if tr == nil { - return 0 - } - types.CalcSize(tr) - if n.Op() == ir.OALIGNOF { - return int64(tr.Align) - } - return tr.Width - - case ir.OOFFSETOF: - // must be a selector. - n := n.(*ir.UnaryExpr) - if n.X.Op() != ir.OXDOT { - base.Errorf("invalid expression %v", n) - return 0 - } - sel := n.X.(*ir.SelectorExpr) - - // Remember base of selector to find it back after dot insertion. - // Since r->left may be mutated by typechecking, check it explicitly - // first to track it correctly. - sel.X = typecheck(sel.X, ctxExpr) - sbase := sel.X - - tsel := typecheck(sel, ctxExpr) - n.X = tsel - if tsel.Type() == nil { - return 0 - } - switch tsel.Op() { - case ir.ODOT, ir.ODOTPTR: - break - case ir.OCALLPART: - base.Errorf("invalid expression %v: argument is a method value", n) - return 0 - default: - base.Errorf("invalid expression %v", n) - return 0 - } - - // Sum offsets for dots until we reach sbase. - var v int64 - var next ir.Node - for r := tsel; r != sbase; r = next { - switch r.Op() { - case ir.ODOTPTR: - // For Offsetof(s.f), s may itself be a pointer, - // but accessing f must not otherwise involve - // indirection via embedded pointer types. - r := r.(*ir.SelectorExpr) - if r.X != sbase { - base.Errorf("invalid expression %v: selector implies indirection of embedded %v", n, r.X) - return 0 - } - fallthrough - case ir.ODOT: - r := r.(*ir.SelectorExpr) - v += r.Offset - next = r.X - default: - ir.Dump("unsafenmagic", tsel) - base.Fatalf("impossible %v node after dot insertion", r.Op()) - } - } - return v - } - - base.Fatalf("unexpected op %v", n.Op()) - return 0 -} diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 764c5c41b051f..73f82f333c09a 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -7,6 +7,7 @@ package gc import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/objabi" @@ -42,7 +43,7 @@ func walk(fn *ir.Func) { // Final typecheck for any unused variables. for i, ln := range fn.Dcl { if ln.Op() == ir.ONAME && (ln.Class_ == ir.PAUTO || ln.Class_ == ir.PAUTOHEAP) { - ln = typecheck(ln, ctxExpr|ctxAssign).(*ir.Name) + ln = typecheck.AssignExpr(ln).(*ir.Name) fn.Dcl[i] = ln } } @@ -191,7 +192,7 @@ func walkstmt(n ir.Node) ir.Node { n.PtrInit().Set(nil) n.X = walkexpr(n.X, &init) - call := walkexpr(mkcall1(chanfn("chanrecv1", 2, n.X.Type()), nil, &init, n.X, nodnil()), &init) + call := walkexpr(mkcall1(chanfn("chanrecv1", 2, n.X.Type()), nil, &init, n.X, typecheck.NodNil()), &init) return ir.InitExpr(init, call) case ir.OBREAK, @@ -216,7 +217,7 @@ func walkstmt(n ir.Node) ir.Node { } nn := ir.NewAssignStmt(base.Pos, v.Name().Heapaddr, callnew(v.Type())) nn.Def = true - return walkstmt(typecheck(nn, ctxStmt)) + return walkstmt(typecheck.Stmt(nn)) } return n @@ -325,7 +326,7 @@ func walkstmt(n ir.Node) ir.Node { if cl == ir.PPARAMOUT { var ln ir.Node = ln if ir.IsParamStackCopy(ln) { - ln = walkexpr(typecheck(ir.NewStarExpr(base.Pos, ln.Name().Heapaddr), ctxExpr), nil) + ln = walkexpr(typecheck.Expr(ir.NewStarExpr(base.Pos, ln.Name().Heapaddr)), nil) } rl = append(rl, ln) } @@ -504,7 +505,7 @@ func walkexpr(n ir.Node, init *ir.Nodes) ir.Node { n := n.(*ir.Name) nn := ir.NewStarExpr(base.Pos, n.Name().Heapaddr) nn.X.MarkNonNil() - return walkexpr(typecheck(nn, ctxExpr), init) + return walkexpr(typecheck.Expr(nn), init) } n = walkexpr1(n, init) @@ -515,12 +516,12 @@ func walkexpr(n ir.Node, init *ir.Nodes) ir.Node { // walk of y%1 may have replaced it by 0. // Check whether n with its updated args is itself now a constant. t := n.Type() - n = evalConst(n) + n = typecheck.EvalConst(n) if n.Type() != t { base.Fatalf("evconst changed Type: %v had type %v, now %v", n, t, n.Type()) } if n.Op() == ir.OLITERAL { - n = typecheck(n, ctxExpr) + n = typecheck.Expr(n) // Emit string symbol now to avoid emitting // any concurrently during the backend. if v := n.Val(); v.Kind() == constant.String { @@ -604,7 +605,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { n := n.(*ir.UnaryExpr) if isRuneCount(n) { // Replace len([]rune(string)) with runtime.countrunes(string). - return mkcall("countrunes", n.Type(), init, conv(n.X.(*ir.ConvExpr).X, types.Types[types.TSTRING])) + return mkcall("countrunes", n.Type(), init, typecheck.Conv(n.X.(*ir.ConvExpr).X, types.Types[types.TSTRING])) } n.X = walkexpr(n.X, init) @@ -618,7 +619,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { } if t.IsArray() { safeexpr(n.X, init) - con := origIntConst(n, t.NumElem()) + con := typecheck.OrigInt(n, t.NumElem()) con.SetTypecheck(1) return con } @@ -656,7 +657,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.ORECOVER: n := n.(*ir.CallExpr) - return mkcall("gorecover", n.Type(), init, nodAddr(ir.RegFP)) + return mkcall("gorecover", n.Type(), init, typecheck.NodAddr(ir.RegFP)) case ir.OCLOSUREREAD, ir.OCFUNC: return n @@ -724,7 +725,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { if n.Op() == ir.OASOP { // Rewrite x op= y into x = x op y. - n = ir.NewAssignStmt(base.Pos, left, typecheck(ir.NewBinaryExpr(base.Pos, n.(*ir.AssignOpStmt).AsOp, left, right), ctxExpr)) + n = ir.NewAssignStmt(base.Pos, left, typecheck.Expr(ir.NewBinaryExpr(base.Pos, n.(*ir.AssignOpStmt).AsOp, left, right))) } else { n.(*ir.AssignStmt).X = left } @@ -753,7 +754,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { recv := as.Y.(*ir.UnaryExpr) recv.X = walkexpr(recv.X, init) - n1 := nodAddr(as.X) + n1 := typecheck.NodAddr(as.X) r := recv.X // the channel return mkcall1(chanfn("chanrecv1", 2, r.Type()), nil, init, r, n1) @@ -826,14 +827,14 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { r.X = walkexpr(r.X, init) var n1 ir.Node if ir.IsBlank(n.Lhs[0]) { - n1 = nodnil() + n1 = typecheck.NodNil() } else { - n1 = nodAddr(n.Lhs[0]) + n1 = typecheck.NodAddr(n.Lhs[0]) } fn := chanfn("chanrecv2", 2, r.X.Type()) ok := n.Lhs[1] call := mkcall1(fn, types.Types[types.TBOOL], init, r.X, n1) - return typecheck(ir.NewAssignStmt(base.Pos, ok, call), ctxStmt) + return typecheck.Stmt(ir.NewAssignStmt(base.Pos, ok, call)) // a,b = m[i] case ir.OAS2MAPR: @@ -854,7 +855,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { } else { // standard version takes key by reference // order.expr made sure key is addressable. - key = nodAddr(r.Index) + key = typecheck.NodAddr(r.Index) } // from: @@ -885,10 +886,10 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // don't generate a = *var if a is _ if ir.IsBlank(a) { - return walkexpr(typecheck(n, ctxStmt), init) + return walkexpr(typecheck.Stmt(n), init) } - var_ := temp(types.NewPtr(t.Elem())) + var_ := typecheck.Temp(types.NewPtr(t.Elem())) var_.SetTypecheck(1) var_.MarkNonNil() // mapaccess always returns a non-nil pointer @@ -896,7 +897,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { init.Append(walkexpr(n, init)) as := ir.NewAssignStmt(base.Pos, a, ir.NewStarExpr(base.Pos, var_)) - return walkexpr(typecheck(as, ctxStmt), init) + return walkexpr(typecheck.Stmt(as), init) case ir.ODELETE: n := n.(*ir.CallExpr) @@ -910,7 +911,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { fast := mapfast(t) if fast == mapslow { // order.stmt made sure key is addressable. - key = nodAddr(key) + key = typecheck.NodAddr(key) } return mkcall1(mapfndel(mapdelete[fast], t), nil, init, typename(t), map_, key) @@ -948,12 +949,12 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { } if ir.Names.Staticuint64s == nil { - ir.Names.Staticuint64s = NewName(ir.Pkgs.Runtime.Lookup("staticuint64s")) + ir.Names.Staticuint64s = typecheck.NewName(ir.Pkgs.Runtime.Lookup("staticuint64s")) ir.Names.Staticuint64s.Class_ = ir.PEXTERN // The actual type is [256]uint64, but we use [256*8]uint8 so we can address // individual bytes. ir.Names.Staticuint64s.SetType(types.NewArray(types.Types[types.TUINT8], 256*8)) - ir.Names.Zerobase = NewName(ir.Pkgs.Runtime.Lookup("zerobase")) + ir.Names.Zerobase = typecheck.NewName(ir.Pkgs.Runtime.Lookup("zerobase")) ir.Names.Zerobase.Class_ = ir.PEXTERN ir.Names.Zerobase.SetType(types.Types[types.TUINTPTR]) } @@ -984,14 +985,14 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { value = n.X case !fromType.IsInterface() && n.Esc() == ir.EscNone && fromType.Width <= 1024: // n.Left does not escape. Use a stack temporary initialized to n.Left. - value = temp(fromType) - init.Append(typecheck(ir.NewAssignStmt(base.Pos, value, n.X), ctxStmt)) + value = typecheck.Temp(fromType) + init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, value, n.X))) } if value != nil { // Value is identical to n.Left. // Construct the interface directly: {type/itab, &value}. - l := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), typecheck(nodAddr(value), ctxExpr)) + l := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), typecheck.Expr(typecheck.NodAddr(value))) l.SetType(toType) l.SetTypecheck(n.Typecheck()) return l @@ -1005,15 +1006,15 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // e = iface{tmp, i.data} if toType.IsEmptyInterface() && fromType.IsInterface() && !fromType.IsEmptyInterface() { // Evaluate the input interface. - c := temp(fromType) + c := typecheck.Temp(fromType) init.Append(ir.NewAssignStmt(base.Pos, c, n.X)) // Get the itab out of the interface. - tmp := temp(types.NewPtr(types.Types[types.TUINT8])) - init.Append(ir.NewAssignStmt(base.Pos, tmp, typecheck(ir.NewUnaryExpr(base.Pos, ir.OITAB, c), ctxExpr))) + tmp := typecheck.Temp(types.NewPtr(types.Types[types.TUINT8])) + init.Append(ir.NewAssignStmt(base.Pos, tmp, typecheck.Expr(ir.NewUnaryExpr(base.Pos, ir.OITAB, c)))) // Get the type out of the itab. - nif := ir.NewIfStmt(base.Pos, typecheck(ir.NewBinaryExpr(base.Pos, ir.ONE, tmp, nodnil()), ctxExpr), nil, nil) + nif := ir.NewIfStmt(base.Pos, typecheck.Expr(ir.NewBinaryExpr(base.Pos, ir.ONE, tmp, typecheck.NodNil())), nil, nil) nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, tmp, itabType(tmp))} init.Append(nif) @@ -1030,13 +1031,13 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // Use a specialized conversion routine that only returns a data pointer. // ptr = convT2X(val) // e = iface{typ/tab, ptr} - fn := syslook(fnname) + fn := typecheck.LookupRuntime(fnname) types.CalcSize(fromType) - fn = substArgTypes(fn, fromType) + fn = typecheck.SubstArgTypes(fn, fromType) types.CalcSize(fn.Type()) call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil) call.Args = []ir.Node{n.X} - e := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), safeexpr(walkexpr(typecheck(call, ctxExpr), init), init)) + e := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), safeexpr(walkexpr(typecheck.Expr(call), init), init)) e.SetType(toType) e.SetTypecheck(1) return e @@ -1062,16 +1063,16 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { if !ir.IsAssignable(v) { v = copyexpr(v, v.Type(), init) } - v = nodAddr(v) + v = typecheck.NodAddr(v) } types.CalcSize(fromType) - fn := syslook(fnname) - fn = substArgTypes(fn, fromType, toType) + fn := typecheck.LookupRuntime(fnname) + fn = typecheck.SubstArgTypes(fn, fromType, toType) types.CalcSize(fn.Type()) call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil) call.Args = []ir.Node{tab, v} - return walkexpr(typecheck(call, ctxExpr), init) + return walkexpr(typecheck.Expr(call), init) case ir.OCONV, ir.OCONVNOP: n := n.(*ir.ConvExpr) @@ -1092,7 +1093,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return n } fn := types.BasicTypeNames[param] + "to" + types.BasicTypeNames[result] - return conv(mkcall(fn, types.Types[result], init, conv(n.X, types.Types[param])), n.Type()) + return typecheck.Conv(mkcall(fn, types.Types[result], init, typecheck.Conv(n.X, types.Types[param])), n.Type()) case ir.ODIV, ir.OMOD: n := n.(*ir.BinaryExpr) @@ -1104,8 +1105,8 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { if types.IsComplex[et] && n.Op() == ir.ODIV { t := n.Type() - call := mkcall("complex128div", types.Types[types.TCOMPLEX128], init, conv(n.X, types.Types[types.TCOMPLEX128]), conv(n.Y, types.Types[types.TCOMPLEX128])) - return conv(call, t) + call := mkcall("complex128div", types.Types[types.TCOMPLEX128], init, typecheck.Conv(n.X, types.Types[types.TCOMPLEX128]), typecheck.Conv(n.Y, types.Types[types.TCOMPLEX128])) + return typecheck.Conv(call, t) } // Nothing to do for float divisions. @@ -1150,7 +1151,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { } else { fn += "mod" } - return mkcall(fn, n.Type(), init, conv(n.X, types.Types[et]), conv(n.Y, types.Types[et])) + return mkcall(fn, n.Type(), init, typecheck.Conv(n.X, types.Types[et]), typecheck.Conv(n.Y, types.Types[et])) } return n @@ -1213,7 +1214,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { if fast == mapslow { // standard version takes key by reference. // order.expr made sure key is addressable. - key = nodAddr(key) + key = typecheck.NodAddr(key) } call = mkcall1(mapfn(mapassign[fast], t), nil, init, typename(t), map_, key) } else { @@ -1222,7 +1223,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { if fast == mapslow { // standard version takes key by reference. // order.expr made sure key is addressable. - key = nodAddr(key) + key = typecheck.NodAddr(key) } if w := t.Elem().Width; w <= zeroValSize { @@ -1297,9 +1298,9 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { if n.Type().Elem().Width >= ir.MaxImplicitStackVarSize { base.Fatalf("large ONEW with EscNone: %v", n) } - r := temp(n.Type().Elem()) - init.Append(typecheck(ir.NewAssignStmt(base.Pos, r, nil), ctxStmt)) // zero temp - return typecheck(nodAddr(r), ctxExpr) + r := typecheck.Temp(n.Type().Elem()) + init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, nil))) // zero temp + return typecheck.Expr(typecheck.NodAddr(r)) } return callnew(n.Type().Elem()) @@ -1317,8 +1318,8 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.OCLOSE: // cannot use chanfn - closechan takes any, not chan any n := n.(*ir.UnaryExpr) - fn := syslook("closechan") - fn = substArgTypes(fn, n.X.Type()) + fn := typecheck.LookupRuntime("closechan") + fn = typecheck.SubstArgTypes(fn, n.X.Type()) return mkcall1(fn, nil, init, n.X) case ir.OMAKECHAN: @@ -1337,7 +1338,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { argtype = types.Types[types.TINT] } - return mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, typename(n.Type()), conv(size, argtype)) + return mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, typename(n.Type()), typecheck.Conv(size, argtype)) case ir.OMAKEMAP: n := n.(*ir.MakeExpr) @@ -1351,10 +1352,10 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // Allocate hmap on stack. // var hv hmap - hv := temp(hmapType) - init.Append(typecheck(ir.NewAssignStmt(base.Pos, hv, nil), ctxStmt)) + hv := typecheck.Temp(hmapType) + init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, hv, nil))) // h = &hv - h = nodAddr(hv) + h = typecheck.NodAddr(hv) // Allocate one bucket pointed to by hmap.buckets on stack if hint // is not larger than BUCKETSIZE. In case hint is larger than @@ -1377,11 +1378,11 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { nif.Likely = true // var bv bmap - bv := temp(bmap(t)) + bv := typecheck.Temp(bmap(t)) nif.Body.Append(ir.NewAssignStmt(base.Pos, bv, nil)) // b = &bv - b := nodAddr(bv) + b := typecheck.NodAddr(bv) // h.buckets = b bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap @@ -1406,17 +1407,17 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { rand := mkcall("fastrand", types.Types[types.TUINT32], init) hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, hashsym), rand)) - return convnop(h, t) + return typecheck.ConvNop(h, t) } // Call runtime.makehmap to allocate an // hmap on the heap and initialize hmap's hash0 field. - fn := syslook("makemap_small") - fn = substArgTypes(fn, t.Key(), t.Elem()) + fn := typecheck.LookupRuntime("makemap_small") + fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem()) return mkcall1(fn, n.Type(), init) } if n.Esc() != ir.EscNone { - h = nodnil() + h = typecheck.NodNil() } // Map initialization with a variable or large hint is // more complicated. We therefore generate a call to @@ -1437,9 +1438,9 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { argtype = types.Types[types.TINT] } - fn := syslook(fnname) - fn = substArgTypes(fn, hmapType, t.Key(), t.Elem()) - return mkcall1(fn, n.Type(), init, typename(n.Type()), conv(hint, argtype), h) + fn := typecheck.LookupRuntime(fnname) + fn = typecheck.SubstArgTypes(fn, hmapType, t.Key(), t.Elem()) + return mkcall1(fn, n.Type(), init, typename(n.Type()), typecheck.Conv(hint, argtype), h) case ir.OMAKESLICE: n := n.(*ir.MakeExpr) @@ -1459,7 +1460,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { } // var arr [r]T // n = arr[:l] - i := indexconst(r) + i := typecheck.IndexConst(r) if i < 0 { base.Fatalf("walkexpr: invalid index %v", r) } @@ -1471,19 +1472,19 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // if len < 0 { panicmakeslicelen() } // panicmakeslicecap() // } - nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGT, conv(l, types.Types[types.TUINT64]), ir.NewInt(i)), nil, nil) + nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGT, typecheck.Conv(l, types.Types[types.TUINT64]), ir.NewInt(i)), nil, nil) niflen := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLT, l, ir.NewInt(0)), nil, nil) niflen.Body = []ir.Node{mkcall("panicmakeslicelen", nil, init)} nif.Body.Append(niflen, mkcall("panicmakeslicecap", nil, init)) - init.Append(typecheck(nif, ctxStmt)) + init.Append(typecheck.Stmt(nif)) t = types.NewArray(t.Elem(), i) // [r]T - var_ := temp(t) + var_ := typecheck.Temp(t) appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, nil)) // zero temp r := ir.NewSliceExpr(base.Pos, ir.OSLICE, var_) // arr[:l] r.SetSliceBounds(nil, l, nil) // The conv is necessary in case n.Type is named. - return walkexpr(typecheck(conv(r, n.Type()), ctxExpr), init) + return walkexpr(typecheck.Expr(typecheck.Conv(r, n.Type())), init) } // n escapes; set up a call to makeslice. @@ -1507,11 +1508,11 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { m := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil) m.SetType(t) - fn := syslook(fnname) - m.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), conv(len, argtype), conv(cap, argtype)) + fn := typecheck.LookupRuntime(fnname) + m.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), typecheck.Conv(len, argtype), typecheck.Conv(cap, argtype)) m.Ptr.MarkNonNil() - m.LenCap = []ir.Node{conv(len, types.Types[types.TINT]), conv(cap, types.Types[types.TINT])} - return walkexpr(typecheck(m, ctxExpr), init) + m.LenCap = []ir.Node{typecheck.Conv(len, types.Types[types.TINT]), typecheck.Conv(cap, types.Types[types.TINT])} + return walkexpr(typecheck.Expr(m), init) case ir.OMAKESLICECOPY: n := n.(*ir.MakeExpr) @@ -1524,7 +1525,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem()) } - length := conv(n.Len, types.Types[types.TINT]) + length := typecheck.Conv(n.Len, types.Types[types.TINT]) copylen := ir.NewUnaryExpr(base.Pos, ir.OLEN, n.Cap) copyptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, n.Cap) @@ -1535,56 +1536,56 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // We do not check for overflow of len(to)*elem.Width here // since len(from) is an existing checked slice capacity // with same elem.Width for the from slice. - size := ir.NewBinaryExpr(base.Pos, ir.OMUL, conv(length, types.Types[types.TUINTPTR]), conv(ir.NewInt(t.Elem().Width), types.Types[types.TUINTPTR])) + size := ir.NewBinaryExpr(base.Pos, ir.OMUL, typecheck.Conv(length, types.Types[types.TUINTPTR]), typecheck.Conv(ir.NewInt(t.Elem().Width), types.Types[types.TUINTPTR])) // instantiate mallocgc(size uintptr, typ *byte, needszero bool) unsafe.Pointer - fn := syslook("mallocgc") + fn := typecheck.LookupRuntime("mallocgc") sh := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil) - sh.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, nodnil(), ir.NewBool(false)) + sh.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, typecheck.NodNil(), ir.NewBool(false)) sh.Ptr.MarkNonNil() sh.LenCap = []ir.Node{length, length} sh.SetType(t) - s := temp(t) - r := typecheck(ir.NewAssignStmt(base.Pos, s, sh), ctxStmt) + s := typecheck.Temp(t) + r := typecheck.Stmt(ir.NewAssignStmt(base.Pos, s, sh)) r = walkexpr(r, init) init.Append(r) // instantiate memmove(to *any, frm *any, size uintptr) - fn = syslook("memmove") - fn = substArgTypes(fn, t.Elem(), t.Elem()) + fn = typecheck.LookupRuntime("memmove") + fn = typecheck.SubstArgTypes(fn, t.Elem(), t.Elem()) ncopy := mkcall1(fn, nil, init, ir.NewUnaryExpr(base.Pos, ir.OSPTR, s), copyptr, size) - init.Append(walkexpr(typecheck(ncopy, ctxStmt), init)) + init.Append(walkexpr(typecheck.Stmt(ncopy), init)) return s } // Replace make+copy with runtime.makeslicecopy. // instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer - fn := syslook("makeslicecopy") + fn := typecheck.LookupRuntime("makeslicecopy") s := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil) - s.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), length, copylen, conv(copyptr, types.Types[types.TUNSAFEPTR])) + s.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), length, copylen, typecheck.Conv(copyptr, types.Types[types.TUNSAFEPTR])) s.Ptr.MarkNonNil() s.LenCap = []ir.Node{length, length} s.SetType(t) - return walkexpr(typecheck(s, ctxExpr), init) + return walkexpr(typecheck.Expr(s), init) case ir.ORUNESTR: n := n.(*ir.ConvExpr) - a := nodnil() + a := typecheck.NodNil() if n.Esc() == ir.EscNone { t := types.NewArray(types.Types[types.TUINT8], 4) - a = nodAddr(temp(t)) + a = typecheck.NodAddr(typecheck.Temp(t)) } // intstring(*[4]byte, rune) - return mkcall("intstring", n.Type(), init, a, conv(n.X, types.Types[types.TINT64])) + return mkcall("intstring", n.Type(), init, a, typecheck.Conv(n.X, types.Types[types.TINT64])) case ir.OBYTES2STR, ir.ORUNES2STR: n := n.(*ir.ConvExpr) - a := nodnil() + a := typecheck.NodNil() if n.Esc() == ir.EscNone { // Create temporary buffer for string on stack. t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize) - a = nodAddr(temp(t)) + a = typecheck.NodAddr(typecheck.Temp(t)) } if n.Op() == ir.ORUNES2STR { // slicerunetostring(*[32]byte, []rune) string @@ -1618,16 +1619,16 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { t := types.NewArray(types.Types[types.TUINT8], int64(len(sc))) var a ir.Node if n.Esc() == ir.EscNone && len(sc) <= int(ir.MaxImplicitStackVarSize) { - a = nodAddr(temp(t)) + a = typecheck.NodAddr(typecheck.Temp(t)) } else { a = callnew(t) } - p := temp(t.PtrTo()) // *[n]byte - init.Append(typecheck(ir.NewAssignStmt(base.Pos, p, a), ctxStmt)) + p := typecheck.Temp(t.PtrTo()) // *[n]byte + init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, p, a))) // Copy from the static string data to the [n]byte. if len(sc) > 0 { - as := ir.NewAssignStmt(base.Pos, ir.NewStarExpr(base.Pos, p), ir.NewStarExpr(base.Pos, convnop(ir.NewUnaryExpr(base.Pos, ir.OSPTR, s), t.PtrTo()))) + as := ir.NewAssignStmt(base.Pos, ir.NewStarExpr(base.Pos, p), ir.NewStarExpr(base.Pos, typecheck.ConvNop(ir.NewUnaryExpr(base.Pos, ir.OSPTR, s), t.PtrTo()))) appendWalkStmt(init, as) } @@ -1638,14 +1639,14 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return walkexpr(slice, init) } - a := nodnil() + a := typecheck.NodNil() if n.Esc() == ir.EscNone { // Create temporary buffer for slice on stack. t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize) - a = nodAddr(temp(t)) + a = typecheck.NodAddr(typecheck.Temp(t)) } // stringtoslicebyte(*32[byte], string) []byte - return mkcall("stringtoslicebyte", n.Type(), init, a, conv(s, types.Types[types.TSTRING])) + return mkcall("stringtoslicebyte", n.Type(), init, a, typecheck.Conv(s, types.Types[types.TSTRING])) case ir.OSTR2BYTESTMP: // []byte(string) conversion that creates a slice @@ -1661,14 +1662,14 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.OSTR2RUNES: n := n.(*ir.ConvExpr) - a := nodnil() + a := typecheck.NodNil() if n.Esc() == ir.EscNone { // Create temporary buffer for slice on stack. t := types.NewArray(types.Types[types.TINT32], tmpstringbufsize) - a = nodAddr(temp(t)) + a = typecheck.NodAddr(typecheck.Temp(t)) } // stringtoslicerune(*[32]rune, string) []rune - return mkcall("stringtoslicerune", n.Type(), init, a, conv(n.X, types.Types[types.TSTRING])) + return mkcall("stringtoslicerune", n.Type(), init, a, typecheck.Conv(n.X, types.Types[types.TSTRING])) case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT, ir.OPTRLIT: if isStaticCompositeLiteral(n) && !canSSAType(n.Type()) { @@ -1677,18 +1678,18 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // Make direct reference to the static data. See issue 12841. vstat := readonlystaticname(n.Type()) fixedlit(inInitFunction, initKindStatic, n, vstat, init) - return typecheck(vstat, ctxExpr) + return typecheck.Expr(vstat) } - var_ := temp(n.Type()) + var_ := typecheck.Temp(n.Type()) anylit(n, var_, init) return var_ case ir.OSEND: n := n.(*ir.SendStmt) n1 := n.Value - n1 = assignconv(n1, n.Chan.Type().Elem(), "chan send") + n1 = typecheck.AssignConv(n1, n.Chan.Type().Elem(), "chan send") n1 = walkexpr(n1, init) - n1 = nodAddr(n1) + n1 = typecheck.NodAddr(n1) return mkcall1(chanfn("chansend1", 2, n.Chan.Type()), nil, init, n.Chan, n1) case ir.OCLOSURE: @@ -1871,8 +1872,8 @@ func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node { // Any assignment to an lvalue that might cause a function call must be // deferred until all the returned values have been read. if fncall(l, r.Type) { - tmp := ir.Node(temp(r.Type)) - tmp = typecheck(tmp, ctxExpr) + tmp := ir.Node(typecheck.Temp(r.Type)) + tmp = typecheck.Expr(tmp) a := convas(ir.NewAssignStmt(base.Pos, l, tmp), &mm) mm.Append(a) l = tmp @@ -1895,48 +1896,6 @@ func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node { return append(nn, mm...) } -// package all the arguments that match a ... T parameter into a []T. -func mkdotargslice(typ *types.Type, args []ir.Node) ir.Node { - var n ir.Node - if len(args) == 0 { - n = nodnil() - n.SetType(typ) - } else { - lit := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ).(ir.Ntype), nil) - lit.List.Append(args...) - lit.SetImplicit(true) - n = lit - } - - n = typecheck(n, ctxExpr) - if n.Type() == nil { - base.Fatalf("mkdotargslice: typecheck failed") - } - return n -} - -// fixVariadicCall rewrites calls to variadic functions to use an -// explicit ... argument if one is not already present. -func fixVariadicCall(call *ir.CallExpr) { - fntype := call.X.Type() - if !fntype.IsVariadic() || call.IsDDD { - return - } - - vi := fntype.NumParams() - 1 - vt := fntype.Params().Field(vi).Type - - args := call.Args - extra := args[vi:] - slice := mkdotargslice(vt, extra) - for i := range extra { - extra[i] = nil // allow GC - } - - call.Args.Set(append(args[:vi], slice)) - call.IsDDD = true -} - func walkCall(n *ir.CallExpr, init *ir.Nodes) { if len(n.Rargs) != 0 { return // already walked @@ -1978,7 +1937,7 @@ func walkCall(n *ir.CallExpr, init *ir.Nodes) { } if base.Flag.Cfg.Instrumenting || fncall(arg, t) { // make assignment of fncall to tempAt - tmp := temp(t) + tmp := typecheck.Temp(t) a := convas(ir.NewAssignStmt(base.Pos, tmp, arg), init) tempAssigns = append(tempAssigns, a) // replace arg with temp @@ -2032,22 +1991,22 @@ func walkprint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { for i, n := range nn.Args { if n.Op() == ir.OLITERAL { if n.Type() == types.UntypedRune { - n = defaultlit(n, types.RuneType) + n = typecheck.DefaultLit(n, types.RuneType) } switch n.Val().Kind() { case constant.Int: - n = defaultlit(n, types.Types[types.TINT64]) + n = typecheck.DefaultLit(n, types.Types[types.TINT64]) case constant.Float: - n = defaultlit(n, types.Types[types.TFLOAT64]) + n = typecheck.DefaultLit(n, types.Types[types.TFLOAT64]) } } if n.Op() != ir.OLITERAL && n.Type() != nil && n.Type().Kind() == types.TIDEAL { - n = defaultlit(n, types.Types[types.TINT64]) + n = typecheck.DefaultLit(n, types.Types[types.TINT64]) } - n = defaultlit(n, nil) + n = typecheck.DefaultLit(n, nil) nn.Args[i] = n if n.Type() == nil || n.Type().Kind() == types.TFORW { continue @@ -2057,14 +2016,14 @@ func walkprint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { switch n.Type().Kind() { case types.TINTER: if n.Type().IsEmptyInterface() { - on = syslook("printeface") + on = typecheck.LookupRuntime("printeface") } else { - on = syslook("printiface") + on = typecheck.LookupRuntime("printiface") } - on = substArgTypes(on, n.Type()) // any-1 + on = typecheck.SubstArgTypes(on, n.Type()) // any-1 case types.TPTR: if n.Type().Elem().NotInHeap() { - on = syslook("printuintptr") + on = typecheck.LookupRuntime("printuintptr") n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n) n.SetType(types.Types[types.TUNSAFEPTR]) n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n) @@ -2073,25 +2032,25 @@ func walkprint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { } fallthrough case types.TCHAN, types.TMAP, types.TFUNC, types.TUNSAFEPTR: - on = syslook("printpointer") - on = substArgTypes(on, n.Type()) // any-1 + on = typecheck.LookupRuntime("printpointer") + on = typecheck.SubstArgTypes(on, n.Type()) // any-1 case types.TSLICE: - on = syslook("printslice") - on = substArgTypes(on, n.Type()) // any-1 + on = typecheck.LookupRuntime("printslice") + on = typecheck.SubstArgTypes(on, n.Type()) // any-1 case types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR: if types.IsRuntimePkg(n.Type().Sym().Pkg) && n.Type().Sym().Name == "hex" { - on = syslook("printhex") + on = typecheck.LookupRuntime("printhex") } else { - on = syslook("printuint") + on = typecheck.LookupRuntime("printuint") } case types.TINT, types.TINT8, types.TINT16, types.TINT32, types.TINT64: - on = syslook("printint") + on = typecheck.LookupRuntime("printint") case types.TFLOAT32, types.TFLOAT64: - on = syslook("printfloat") + on = typecheck.LookupRuntime("printfloat") case types.TCOMPLEX64, types.TCOMPLEX128: - on = syslook("printcomplex") + on = typecheck.LookupRuntime("printcomplex") case types.TBOOL: - on = syslook("printbool") + on = typecheck.LookupRuntime("printbool") case types.TSTRING: cs := "" if ir.IsConst(n, constant.String) { @@ -2099,11 +2058,11 @@ func walkprint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { } switch cs { case " ": - on = syslook("printsp") + on = typecheck.LookupRuntime("printsp") case "\n": - on = syslook("printnl") + on = typecheck.LookupRuntime("printnl") default: - on = syslook("printstring") + on = typecheck.LookupRuntime("printstring") } default: badtype(ir.OPRINT, n.Type(), nil) @@ -2124,12 +2083,12 @@ func walkprint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { calls = append(calls, mkcall("printunlock", nil, init)) - typecheckslice(calls, ctxStmt) + typecheck.Stmts(calls) walkexprlist(calls, init) r := ir.NewBlockStmt(base.Pos, nil) r.List.Set(calls) - return walkstmt(typecheck(r, ctxStmt)) + return walkstmt(typecheck.Stmt(r)) } func callnew(t *types.Type) ir.Node { @@ -2160,12 +2119,12 @@ func convas(n *ir.AssignStmt, init *ir.Nodes) *ir.AssignStmt { } if ir.IsBlank(n.X) { - n.Y = defaultlit(n.Y, nil) + n.Y = typecheck.DefaultLit(n.Y, nil) return n } if !types.Identical(lt, rt) { - n.Y = assignconv(n.Y, lt, "assignment") + n.Y = typecheck.AssignConv(n.Y, lt, "assignment") n.Y = walkexpr(n.Y, init) } types.CalcSize(n.Y.Type()) @@ -2258,8 +2217,8 @@ func reorder3save(n ir.Node, all []*ir.AssignStmt, i int, early *[]ir.Node) ir.N return n } - q := ir.Node(temp(n.Type())) - as := typecheck(ir.NewAssignStmt(base.Pos, q, n), ctxStmt) + q := ir.Node(typecheck.Temp(n.Type())) + as := typecheck.Stmt(ir.NewAssignStmt(base.Pos, q, n)) *early = append(*early, as) return q } @@ -2455,7 +2414,7 @@ func paramstoheap(params *types.Type) []ir.Node { if stackcopy := v.Name().Stackcopy; stackcopy != nil { nn = append(nn, walkstmt(ir.NewDecl(base.Pos, ir.ODCL, v))) if stackcopy.Class_ == ir.PPARAM { - nn = append(nn, walkstmt(typecheck(ir.NewAssignStmt(base.Pos, v, stackcopy), ctxStmt))) + nn = append(nn, walkstmt(typecheck.Stmt(ir.NewAssignStmt(base.Pos, v, stackcopy)))) } } } @@ -2503,7 +2462,7 @@ func returnsfromheap(params *types.Type) []ir.Node { continue } if stackcopy := v.Name().Stackcopy; stackcopy != nil && stackcopy.Class_ == ir.PPARAMOUT { - nn = append(nn, walkstmt(typecheck(ir.NewAssignStmt(base.Pos, stackcopy, v), ctxStmt))) + nn = append(nn, walkstmt(typecheck.Stmt(ir.NewAssignStmt(base.Pos, stackcopy, v)))) } } @@ -2536,41 +2495,19 @@ func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) *ir.CallEx } call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, va) - TypecheckCall(call) + typecheck.Call(call) call.SetType(t) return walkexpr(call, init).(*ir.CallExpr) } func mkcall(name string, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr { - return vmkcall(syslook(name), t, init, args) + return vmkcall(typecheck.LookupRuntime(name), t, init, args) } func mkcall1(fn ir.Node, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr { return vmkcall(fn, t, init, args) } -func conv(n ir.Node, t *types.Type) ir.Node { - if types.Identical(n.Type(), t) { - return n - } - n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n) - n.SetType(t) - n = typecheck(n, ctxExpr) - return n -} - -// convnop converts node n to type t using the OCONVNOP op -// and typechecks the result with ctxExpr. -func convnop(n ir.Node, t *types.Type) ir.Node { - if types.Identical(n.Type(), t) { - return n - } - n = ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, n) - n.SetType(t) - n = typecheck(n, ctxExpr) - return n -} - // byteindex converts n, which is byte-sized, to an int used to index into an array. // We cannot use conv, because we allow converting bool to int here, // which is forbidden in user code. @@ -2594,14 +2531,14 @@ func chanfn(name string, n int, t *types.Type) ir.Node { if !t.IsChan() { base.Fatalf("chanfn %v", t) } - fn := syslook(name) + fn := typecheck.LookupRuntime(name) switch n { default: base.Fatalf("chanfn %d", n) case 1: - fn = substArgTypes(fn, t.Elem()) + fn = typecheck.SubstArgTypes(fn, t.Elem()) case 2: - fn = substArgTypes(fn, t.Elem(), t.Elem()) + fn = typecheck.SubstArgTypes(fn, t.Elem(), t.Elem()) } return fn } @@ -2610,8 +2547,8 @@ func mapfn(name string, t *types.Type) ir.Node { if !t.IsMap() { base.Fatalf("mapfn %v", t) } - fn := syslook(name) - fn = substArgTypes(fn, t.Key(), t.Elem(), t.Key(), t.Elem()) + fn := typecheck.LookupRuntime(name) + fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), t.Key(), t.Elem()) return fn } @@ -2619,8 +2556,8 @@ func mapfndel(name string, t *types.Type) ir.Node { if !t.IsMap() { base.Fatalf("mapfn %v", t) } - fn := syslook(name) - fn = substArgTypes(fn, t.Key(), t.Elem(), t.Key()) + fn := typecheck.LookupRuntime(name) + fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), t.Key()) return fn } @@ -2675,8 +2612,8 @@ func mapfast(t *types.Type) int { } func writebarrierfn(name string, l *types.Type, r *types.Type) ir.Node { - fn := syslook(name) - fn = substArgTypes(fn, l, r) + fn := typecheck.LookupRuntime(name) + fn = typecheck.SubstArgTypes(fn, l, r) return fn } @@ -2687,7 +2624,7 @@ func addstr(n *ir.AddStringExpr, init *ir.Nodes) ir.Node { base.Fatalf("addstr count %d too small", c) } - buf := nodnil() + buf := typecheck.NodNil() if n.Esc() == ir.EscNone { sz := int64(0) for _, n1 := range n.List { @@ -2700,14 +2637,14 @@ func addstr(n *ir.AddStringExpr, init *ir.Nodes) ir.Node { if sz < tmpstringbufsize { // Create temporary buffer for result string on stack. t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize) - buf = nodAddr(temp(t)) + buf = typecheck.NodAddr(typecheck.Temp(t)) } } // build list of string arguments args := []ir.Node{buf} for _, n2 := range n.List { - args = append(args, conv(n2, types.Types[types.TSTRING])) + args = append(args, typecheck.Conv(n2, types.Types[types.TSTRING])) } var fn string @@ -2727,10 +2664,10 @@ func addstr(n *ir.AddStringExpr, init *ir.Nodes) ir.Node { slice.SetEsc(ir.EscNone) } - cat := syslook(fn) + cat := typecheck.LookupRuntime(fn) r := ir.NewCallExpr(base.Pos, ir.OCALL, cat, nil) r.Args.Set(args) - r1 := typecheck(r, ctxExpr) + r1 := typecheck.Expr(r) r1 = walkexpr(r1, init) r1.SetType(n.Type()) @@ -2774,24 +2711,24 @@ func appendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { var nodes ir.Nodes // var s []T - s := temp(l1.Type()) + s := typecheck.Temp(l1.Type()) nodes.Append(ir.NewAssignStmt(base.Pos, s, l1)) // s = l1 elemtype := s.Type().Elem() // n := len(s) + len(l2) - nn := temp(types.Types[types.TINT]) + nn := typecheck.Temp(types.Types[types.TINT]) nodes.Append(ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, s), ir.NewUnaryExpr(base.Pos, ir.OLEN, l2)))) // if uint(n) > uint(cap(s)) nif := ir.NewIfStmt(base.Pos, nil, nil, nil) - nuint := conv(nn, types.Types[types.TUINT]) - scapuint := conv(ir.NewUnaryExpr(base.Pos, ir.OCAP, s), types.Types[types.TUINT]) + nuint := typecheck.Conv(nn, types.Types[types.TUINT]) + scapuint := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OCAP, s), types.Types[types.TUINT]) nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OGT, nuint, scapuint) // instantiate growslice(typ *type, []any, int) []any - fn := syslook("growslice") - fn = substArgTypes(fn, elemtype, elemtype) + fn := typecheck.LookupRuntime("growslice") + fn = typecheck.SubstArgTypes(fn, elemtype, elemtype) // s = growslice(T, s, n) nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn))} @@ -2813,8 +2750,8 @@ func appendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { ir.CurFunc.SetWBPos(n.Pos()) // instantiate typedslicecopy(typ *type, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int - fn := syslook("typedslicecopy") - fn = substArgTypes(fn, l1.Type().Elem(), l2.Type().Elem()) + fn := typecheck.LookupRuntime("typedslicecopy") + fn = typecheck.SubstArgTypes(fn, l1.Type().Elem(), l2.Type().Elem()) ptr1, len1 := backingArrayPtrLen(cheapexpr(slice, &nodes)) ptr2, len2 := backingArrayPtrLen(l2) ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, typename(elemtype), ptr1, len1, ptr2, len2) @@ -2829,28 +2766,28 @@ func appendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { ptr1, len1 := backingArrayPtrLen(cheapexpr(slice, &nodes)) ptr2, len2 := backingArrayPtrLen(l2) - fn := syslook("slicecopy") - fn = substArgTypes(fn, ptr1.Type().Elem(), ptr2.Type().Elem()) + fn := typecheck.LookupRuntime("slicecopy") + fn = typecheck.SubstArgTypes(fn, ptr1.Type().Elem(), ptr2.Type().Elem()) ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, ptr1, len1, ptr2, len2, ir.NewInt(elemtype.Width)) } else { // memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T)) ix := ir.NewIndexExpr(base.Pos, s, ir.NewUnaryExpr(base.Pos, ir.OLEN, l1)) ix.SetBounded(true) - addr := nodAddr(ix) + addr := typecheck.NodAddr(ix) sptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, l2) - nwid := cheapexpr(conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, l2), types.Types[types.TUINTPTR]), &nodes) + nwid := cheapexpr(typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, l2), types.Types[types.TUINTPTR]), &nodes) nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, ir.NewInt(elemtype.Width)) // instantiate func memmove(to *any, frm *any, length uintptr) - fn := syslook("memmove") - fn = substArgTypes(fn, elemtype, elemtype) + fn := typecheck.LookupRuntime("memmove") + fn = typecheck.SubstArgTypes(fn, elemtype, elemtype) ncopy = mkcall1(fn, nil, &nodes, addr, sptr, nwid) } ln := append(nodes, ncopy) - typecheckslice(ln, ctxStmt) + typecheck.Stmts(ln) walkstmtlist(ln) init.Append(ln...) return s @@ -2925,8 +2862,8 @@ func extendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { // isAppendOfMake made sure all possible positive values of l2 fit into an uint. // The case of l2 overflow when converting from e.g. uint to int is handled by an explicit // check of l2 < 0 at runtime which is generated below. - l2 := conv(n.Args[1].(*ir.MakeExpr).Len, types.Types[types.TINT]) - l2 = typecheck(l2, ctxExpr) + l2 := typecheck.Conv(n.Args[1].(*ir.MakeExpr).Len, types.Types[types.TINT]) + l2 = typecheck.Expr(l2) n.Args[1] = l2 // walkAppendArgs expects l2 in n.List.Second(). walkAppendArgs(n, init) @@ -2945,23 +2882,23 @@ func extendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { nodes = append(nodes, nifneg) // s := l1 - s := temp(l1.Type()) + s := typecheck.Temp(l1.Type()) nodes = append(nodes, ir.NewAssignStmt(base.Pos, s, l1)) elemtype := s.Type().Elem() // n := len(s) + l2 - nn := temp(types.Types[types.TINT]) + nn := typecheck.Temp(types.Types[types.TINT]) nodes = append(nodes, ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, s), l2))) // if uint(n) > uint(cap(s)) - nuint := conv(nn, types.Types[types.TUINT]) - capuint := conv(ir.NewUnaryExpr(base.Pos, ir.OCAP, s), types.Types[types.TUINT]) + nuint := typecheck.Conv(nn, types.Types[types.TUINT]) + capuint := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OCAP, s), types.Types[types.TUINT]) nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGT, nuint, capuint), nil, nil) // instantiate growslice(typ *type, old []any, newcap int) []any - fn := syslook("growslice") - fn = substArgTypes(fn, elemtype, elemtype) + fn := typecheck.LookupRuntime("growslice") + fn = typecheck.SubstArgTypes(fn, elemtype, elemtype) // s = growslice(T, s, n) nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn))} @@ -2974,22 +2911,22 @@ func extendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { nodes = append(nodes, ir.NewAssignStmt(base.Pos, s, nt)) // lptr := &l1[0] - l1ptr := temp(l1.Type().Elem().PtrTo()) + l1ptr := typecheck.Temp(l1.Type().Elem().PtrTo()) tmp := ir.NewUnaryExpr(base.Pos, ir.OSPTR, l1) nodes = append(nodes, ir.NewAssignStmt(base.Pos, l1ptr, tmp)) // sptr := &s[0] - sptr := temp(elemtype.PtrTo()) + sptr := typecheck.Temp(elemtype.PtrTo()) tmp = ir.NewUnaryExpr(base.Pos, ir.OSPTR, s) nodes = append(nodes, ir.NewAssignStmt(base.Pos, sptr, tmp)) // hp := &s[len(l1)] ix := ir.NewIndexExpr(base.Pos, s, ir.NewUnaryExpr(base.Pos, ir.OLEN, l1)) ix.SetBounded(true) - hp := convnop(nodAddr(ix), types.Types[types.TUNSAFEPTR]) + hp := typecheck.ConvNop(typecheck.NodAddr(ix), types.Types[types.TUNSAFEPTR]) // hn := l2 * sizeof(elem(s)) - hn := conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, l2, ir.NewInt(elemtype.Width)), types.Types[types.TUINTPTR]) + hn := typecheck.Conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, l2, ir.NewInt(elemtype.Width)), types.Types[types.TUINTPTR]) clrname := "memclrNoHeapPointers" hasPointers := elemtype.HasPointers() @@ -3011,7 +2948,7 @@ func extendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { nodes = append(nodes, clr...) } - typecheckslice(nodes, ctxStmt) + typecheck.Stmts(nodes) walkstmtlist(nodes) init.Append(nodes...) return s @@ -3057,7 +2994,7 @@ func walkappend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node { for i, n := range ls { n = cheapexpr(n, init) if !types.Identical(n.Type(), nsrc.Type().Elem()) { - n = assignconv(n, nsrc.Type().Elem(), "append") + n = typecheck.AssignConv(n, nsrc.Type().Elem(), "append") n = walkexpr(n, init) } ls[i] = n @@ -3076,22 +3013,22 @@ func walkappend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node { var l []ir.Node - ns := temp(nsrc.Type()) + ns := typecheck.Temp(nsrc.Type()) l = append(l, ir.NewAssignStmt(base.Pos, ns, nsrc)) // s = src na := ir.NewInt(int64(argc)) // const argc nif := ir.NewIfStmt(base.Pos, nil, nil, nil) // if cap(s) - len(s) < argc nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, ir.NewBinaryExpr(base.Pos, ir.OSUB, ir.NewUnaryExpr(base.Pos, ir.OCAP, ns), ir.NewUnaryExpr(base.Pos, ir.OLEN, ns)), na) - fn := syslook("growslice") // growslice(, old []T, mincap int) (ret []T) - fn = substArgTypes(fn, ns.Type().Elem(), ns.Type().Elem()) + fn := typecheck.LookupRuntime("growslice") // growslice(, old []T, mincap int) (ret []T) + fn = typecheck.SubstArgTypes(fn, ns.Type().Elem(), ns.Type().Elem()) nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, ns, mkcall1(fn, ns.Type(), nif.PtrInit(), typename(ns.Type().Elem()), ns, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns), na)))} l = append(l, nif) - nn := temp(types.Types[types.TINT]) + nn := typecheck.Temp(types.Types[types.TINT]) l = append(l, ir.NewAssignStmt(base.Pos, nn, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns))) // n = len(s) slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, ns) // ...s[:n+argc] @@ -3109,7 +3046,7 @@ func walkappend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node { } } - typecheckslice(l, ctxStmt) + typecheck.Stmts(l) walkstmtlist(l) init.Append(l...) return ns @@ -3147,16 +3084,16 @@ func copyany(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node { n.Y = cheapexpr(n.Y, init) ptrR, lenR := backingArrayPtrLen(n.Y) - fn := syslook("slicecopy") - fn = substArgTypes(fn, ptrL.Type().Elem(), ptrR.Type().Elem()) + fn := typecheck.LookupRuntime("slicecopy") + fn = typecheck.SubstArgTypes(fn, ptrL.Type().Elem(), ptrR.Type().Elem()) return mkcall1(fn, n.Type(), init, ptrL, lenL, ptrR, lenR, ir.NewInt(n.X.Type().Elem().Width)) } n.X = walkexpr(n.X, init) n.Y = walkexpr(n.Y, init) - nl := temp(n.X.Type()) - nr := temp(n.Y.Type()) + nl := typecheck.Temp(n.X.Type()) + nr := typecheck.Temp(n.Y.Type()) var l []ir.Node l = append(l, ir.NewAssignStmt(base.Pos, nl, n.X)) l = append(l, ir.NewAssignStmt(base.Pos, nr, n.Y)) @@ -3164,7 +3101,7 @@ func copyany(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node { nfrm := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nr) nto := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nl) - nlen := temp(types.Types[types.TINT]) + nlen := typecheck.Temp(types.Types[types.TINT]) // n = len(to) l = append(l, ir.NewAssignStmt(base.Pos, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nl))) @@ -3181,16 +3118,16 @@ func copyany(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node { ne.Likely = true l = append(l, ne) - fn := syslook("memmove") - fn = substArgTypes(fn, nl.Type().Elem(), nl.Type().Elem()) - nwid := ir.Node(temp(types.Types[types.TUINTPTR])) - setwid := ir.NewAssignStmt(base.Pos, nwid, conv(nlen, types.Types[types.TUINTPTR])) + fn := typecheck.LookupRuntime("memmove") + fn = typecheck.SubstArgTypes(fn, nl.Type().Elem(), nl.Type().Elem()) + nwid := ir.Node(typecheck.Temp(types.Types[types.TUINTPTR])) + setwid := ir.NewAssignStmt(base.Pos, nwid, typecheck.Conv(nlen, types.Types[types.TUINTPTR])) ne.Body.Append(setwid) nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, ir.NewInt(nl.Type().Elem().Width)) call := mkcall1(fn, nil, init, nto, nfrm, nwid) ne.Body.Append(call) - typecheckslice(l, ctxStmt) + typecheck.Stmts(l) walkstmtlist(l) init.Append(l...) return nlen @@ -3203,14 +3140,14 @@ func eqfor(t *types.Type) (n ir.Node, needsize bool) { // is handled by walkcompare. switch a, _ := types.AlgType(t); a { case types.AMEM: - n := syslook("memequal") - n = substArgTypes(n, t, t) + n := typecheck.LookupRuntime("memequal") + n = typecheck.SubstArgTypes(n, t, t) return n, true case types.ASPECIAL: sym := typesymprefix(".eq", t) - n := NewName(sym) + n := typecheck.NewName(sym) ir.MarkFunc(n) - n.SetType(functype(nil, []*ir.Field{ + n.SetType(typecheck.NewFuncType(nil, []*ir.Field{ ir.NewField(base.Pos, nil, nil, types.NewPtr(t)), ir.NewField(base.Pos, nil, nil, types.NewPtr(t)), }, []*ir.Field{ @@ -3267,7 +3204,7 @@ func walkcompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { tab.SetTypecheck(1) eqtype = ir.NewBinaryExpr(base.Pos, eq, tab, rtyp) } else { - nonnil := ir.NewBinaryExpr(base.Pos, brcom(eq), nodnil(), tab) + nonnil := ir.NewBinaryExpr(base.Pos, brcom(eq), typecheck.NodNil(), tab) match := ir.NewBinaryExpr(base.Pos, eq, itabType(tab), rtyp) eqtype = ir.NewLogicalExpr(base.Pos, andor, nonnil, match) } @@ -3366,8 +3303,8 @@ func walkcompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { fn, needsize := eqfor(t) call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil) - call.Args.Append(nodAddr(cmpl)) - call.Args.Append(nodAddr(cmpr)) + call.Args.Append(typecheck.NodAddr(cmpl)) + call.Args.Append(typecheck.NodAddr(cmpr)) if needsize { call.Args.Append(ir.NewInt(t.Width)) } @@ -3436,22 +3373,22 @@ func walkcompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { } else { elemType := t.Elem().ToUnsigned() cmplw := ir.Node(ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(i))) - cmplw = conv(cmplw, elemType) // convert to unsigned - cmplw = conv(cmplw, convType) // widen + cmplw = typecheck.Conv(cmplw, elemType) // convert to unsigned + cmplw = typecheck.Conv(cmplw, convType) // widen cmprw := ir.Node(ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(i))) - cmprw = conv(cmprw, elemType) - cmprw = conv(cmprw, convType) + cmprw = typecheck.Conv(cmprw, elemType) + cmprw = typecheck.Conv(cmprw, convType) // For code like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ... // ssa will generate a single large load. for offset := int64(1); offset < step; offset++ { lb := ir.Node(ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(i+offset))) - lb = conv(lb, elemType) - lb = conv(lb, convType) + lb = typecheck.Conv(lb, elemType) + lb = typecheck.Conv(lb, convType) lb = ir.NewBinaryExpr(base.Pos, ir.OLSH, lb, ir.NewInt(8*t.Elem().Width*offset)) cmplw = ir.NewBinaryExpr(base.Pos, ir.OOR, cmplw, lb) rb := ir.Node(ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(i+offset))) - rb = conv(rb, elemType) - rb = conv(rb, convType) + rb = typecheck.Conv(rb, elemType) + rb = typecheck.Conv(rb, convType) rb = ir.NewBinaryExpr(base.Pos, ir.OLSH, rb, ir.NewInt(8*t.Elem().Width*offset)) cmprw = ir.NewBinaryExpr(base.Pos, ir.OOR, cmprw, rb) } @@ -3465,9 +3402,9 @@ func walkcompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { expr = ir.NewBool(n.Op() == ir.OEQ) // We still need to use cmpl and cmpr, in case they contain // an expression which might panic. See issue 23837. - t := temp(cmpl.Type()) - a1 := typecheck(ir.NewAssignStmt(base.Pos, t, cmpl), ctxStmt) - a2 := typecheck(ir.NewAssignStmt(base.Pos, t, cmpr), ctxStmt) + t := typecheck.Temp(cmpl.Type()) + a1 := typecheck.Stmt(ir.NewAssignStmt(base.Pos, t, cmpl)) + a2 := typecheck.Stmt(ir.NewAssignStmt(base.Pos, t, cmpr)) init.Append(a1, a2) } return finishcompare(n, expr, init) @@ -3479,7 +3416,7 @@ func tracecmpArg(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node { n = copyexpr(n, n.Type(), init) } - return conv(n, t) + return typecheck.Conv(n, t) } func walkcompareInterface(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { @@ -3573,13 +3510,13 @@ func walkcompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { convType = types.Types[types.TUINT16] step = 2 } - ncsubstr := conv(ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(int64(i))), convType) + ncsubstr := typecheck.Conv(ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(int64(i))), convType) csubstr := int64(s[i]) // Calculate large constant from bytes as sequence of shifts and ors. // Like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ... // ssa will combine this into a single large load. for offset := 1; offset < step; offset++ { - b := conv(ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(int64(i+offset))), convType) + b := typecheck.Conv(ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(int64(i+offset))), convType) b = ir.NewBinaryExpr(base.Pos, ir.OLSH, b, ir.NewInt(int64(8*offset))) ncsubstr = ir.NewBinaryExpr(base.Pos, ir.OOR, ncsubstr, b) csubstr |= int64(s[i+offset]) << uint8(8*offset) @@ -3612,7 +3549,7 @@ func walkcompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { } } else { // sys_cmpstring(s1, s2) :: 0 - r = mkcall("cmpstring", types.Types[types.TINT], init, conv(n.X, types.Types[types.TSTRING]), conv(n.Y, types.Types[types.TSTRING])) + r = mkcall("cmpstring", types.Types[types.TINT], init, typecheck.Conv(n.X, types.Types[types.TSTRING]), typecheck.Conv(n.Y, types.Types[types.TSTRING])) r = ir.NewBinaryExpr(base.Pos, n.Op(), r, ir.NewInt(0)) } @@ -3622,8 +3559,8 @@ func walkcompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { // The result of finishcompare MUST be assigned back to n, e.g. // n.Left = finishcompare(n.Left, x, r, init) func finishcompare(n *ir.BinaryExpr, r ir.Node, init *ir.Nodes) ir.Node { - r = typecheck(r, ctxExpr) - r = conv(r, n.Type()) + r = typecheck.Expr(r) + r = typecheck.Conv(r, n.Type()) r = walkexpr(r, init) return r } @@ -3926,7 +3863,7 @@ func wrapCall(n *ir.CallExpr, init *ir.Nodes) ir.Node { origArgs := make([]ir.Node, len(n.Args)) var funcArgs []*ir.Field for i, arg := range n.Args { - s := lookupN("a", i) + s := typecheck.LookupNum("a", i) if !isBuiltinCall && arg.Op() == ir.OCONVNOP && arg.Type().IsUintptr() && arg.(*ir.ConvExpr).X.Type().IsUnsafePtr() { origArgs[i] = arg arg = arg.(*ir.ConvExpr).X @@ -3937,8 +3874,8 @@ func wrapCall(n *ir.CallExpr, init *ir.Nodes) ir.Node { t := ir.NewFuncType(base.Pos, nil, funcArgs, nil) wrapCall_prgen++ - sym := lookupN("wrap·", wrapCall_prgen) - fn := dclfunc(sym, t) + sym := typecheck.LookupNum("wrap·", wrapCall_prgen) + fn := typecheck.DeclFunc(sym, t) args := ir.ParamNames(t.Type()) for i, origArg := range origArgs { @@ -3954,32 +3891,14 @@ func wrapCall(n *ir.CallExpr, init *ir.Nodes) ir.Node { } fn.Body = []ir.Node{call} - funcbody() + typecheck.FinishFuncBody() - typecheckFunc(fn) - typecheckslice(fn.Body, ctxStmt) - Target.Decls = append(Target.Decls, fn) + typecheck.Func(fn) + typecheck.Stmts(fn.Body) + typecheck.Target.Decls = append(typecheck.Target.Decls, fn) call = ir.NewCallExpr(base.Pos, ir.OCALL, fn.Nname, n.Args) - return walkexpr(typecheck(call, ctxStmt), init) -} - -// substArgTypes substitutes the given list of types for -// successive occurrences of the "any" placeholder in the -// type syntax expression n.Type. -// The result of substArgTypes MUST be assigned back to old, e.g. -// n.Left = substArgTypes(n.Left, t1, t2) -func substArgTypes(old *ir.Name, types_ ...*types.Type) *ir.Name { - n := old.CloneName() - - for _, t := range types_ { - types.CalcSize(t) - } - n.SetType(types.SubstAny(n.Type(), &types_)) - if len(types_) > 0 { - base.Fatalf("substArgTypes: too many argument types") - } - return n + return walkexpr(typecheck.Stmt(call), init) } // canMergeLoads reports whether the backend optimization passes for @@ -4025,7 +3944,7 @@ func walkCheckPtrAlignment(n *ir.ConvExpr, init *ir.Nodes, count ir.Node) ir.Nod } n.X = cheapexpr(n.X, init) - init.Append(mkcall("checkptrAlignment", nil, init, convnop(n.X, types.Types[types.TUNSAFEPTR]), typename(elem), conv(count, types.Types[types.TUINTPTR]))) + init.Append(mkcall("checkptrAlignment", nil, init, typecheck.ConvNop(n.X, types.Types[types.TUNSAFEPTR]), typename(elem), typecheck.Conv(count, types.Types[types.TUINTPTR]))) return n } @@ -4077,7 +3996,7 @@ func walkCheckPtrArithmetic(n *ir.ConvExpr, init *ir.Nodes) ir.Node { n := n.(*ir.ConvExpr) if n.X.Type().IsUnsafePtr() { n.X = cheapexpr(n.X, init) - originals = append(originals, convnop(n.X, types.Types[types.TUNSAFEPTR])) + originals = append(originals, typecheck.ConvNop(n.X, types.Types[types.TUNSAFEPTR])) } } } @@ -4085,10 +4004,10 @@ func walkCheckPtrArithmetic(n *ir.ConvExpr, init *ir.Nodes) ir.Node { cheap := cheapexpr(n, init) - slice := mkdotargslice(types.NewSlice(types.Types[types.TUNSAFEPTR]), originals) + slice := typecheck.MakeDotArgs(types.NewSlice(types.Types[types.TUNSAFEPTR]), originals) slice.SetEsc(ir.EscNone) - init.Append(mkcall("checkptrArithmetic", nil, init, convnop(cheap, types.Types[types.TUNSAFEPTR]), slice)) + init.Append(mkcall("checkptrArithmetic", nil, init, typecheck.ConvNop(cheap, types.Types[types.TUNSAFEPTR]), slice)) // TODO(khr): Mark backing store of slice as dead. This will allow us to reuse // the backing store for multiple calls to checkptrArithmetic. @@ -4098,7 +4017,7 @@ func walkCheckPtrArithmetic(n *ir.ConvExpr, init *ir.Nodes) ir.Node { // appendWalkStmt typechecks and walks stmt and then appends it to init. func appendWalkStmt(init *ir.Nodes, stmt ir.Node) { op := stmt.Op() - n := typecheck(stmt, ctxStmt) + n := typecheck.Stmt(stmt) if op == ir.OAS || op == ir.OAS2 { // If the assignment has side effects, walkexpr will append them // directly to init for us, while walkstmt will wrap it in an OBLOCK. diff --git a/src/cmd/compile/internal/typecheck/bexport.go b/src/cmd/compile/internal/typecheck/bexport.go new file mode 100644 index 0000000000000..4a84bb13fa48e --- /dev/null +++ b/src/cmd/compile/internal/typecheck/bexport.go @@ -0,0 +1,102 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typecheck + +import "cmd/compile/internal/types" + +// ---------------------------------------------------------------------------- +// Export format + +// Tags. Must be < 0. +const ( + // Objects + packageTag = -(iota + 1) + constTag + typeTag + varTag + funcTag + endTag + + // Types + namedTag + arrayTag + sliceTag + dddTag + structTag + pointerTag + signatureTag + interfaceTag + mapTag + chanTag + + // Values + falseTag + trueTag + int64Tag + floatTag + fractionTag // not used by gc + complexTag + stringTag + nilTag + unknownTag // not used by gc (only appears in packages with errors) + + // Type aliases + aliasTag +) + +var predecl []*types.Type // initialized lazily + +func predeclared() []*types.Type { + if predecl == nil { + // initialize lazily to be sure that all + // elements have been initialized before + predecl = []*types.Type{ + // basic types + types.Types[types.TBOOL], + types.Types[types.TINT], + types.Types[types.TINT8], + types.Types[types.TINT16], + types.Types[types.TINT32], + types.Types[types.TINT64], + types.Types[types.TUINT], + types.Types[types.TUINT8], + types.Types[types.TUINT16], + types.Types[types.TUINT32], + types.Types[types.TUINT64], + types.Types[types.TUINTPTR], + types.Types[types.TFLOAT32], + types.Types[types.TFLOAT64], + types.Types[types.TCOMPLEX64], + types.Types[types.TCOMPLEX128], + types.Types[types.TSTRING], + + // basic type aliases + types.ByteType, + types.RuneType, + + // error + types.ErrorType, + + // untyped types + types.UntypedBool, + types.UntypedInt, + types.UntypedRune, + types.UntypedFloat, + types.UntypedComplex, + types.UntypedString, + types.Types[types.TNIL], + + // package unsafe + types.Types[types.TUNSAFEPTR], + + // invalid type (package contains errors) + types.Types[types.Txxx], + + // any type, for builtin export data + types.Types[types.TANY], + } + } + return predecl +} diff --git a/src/cmd/compile/internal/typecheck/builtin.go b/src/cmd/compile/internal/typecheck/builtin.go new file mode 100644 index 0000000000000..d3c30fbf50c33 --- /dev/null +++ b/src/cmd/compile/internal/typecheck/builtin.go @@ -0,0 +1,344 @@ +// Code generated by mkbuiltin.go. DO NOT EDIT. + +package typecheck + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" +) + +var runtimeDecls = [...]struct { + name string + tag int + typ int +}{ + {"newobject", funcTag, 4}, + {"mallocgc", funcTag, 8}, + {"panicdivide", funcTag, 9}, + {"panicshift", funcTag, 9}, + {"panicmakeslicelen", funcTag, 9}, + {"panicmakeslicecap", funcTag, 9}, + {"throwinit", funcTag, 9}, + {"panicwrap", funcTag, 9}, + {"gopanic", funcTag, 11}, + {"gorecover", funcTag, 14}, + {"goschedguarded", funcTag, 9}, + {"goPanicIndex", funcTag, 16}, + {"goPanicIndexU", funcTag, 18}, + {"goPanicSliceAlen", funcTag, 16}, + {"goPanicSliceAlenU", funcTag, 18}, + {"goPanicSliceAcap", funcTag, 16}, + {"goPanicSliceAcapU", funcTag, 18}, + {"goPanicSliceB", funcTag, 16}, + {"goPanicSliceBU", funcTag, 18}, + {"goPanicSlice3Alen", funcTag, 16}, + {"goPanicSlice3AlenU", funcTag, 18}, + {"goPanicSlice3Acap", funcTag, 16}, + {"goPanicSlice3AcapU", funcTag, 18}, + {"goPanicSlice3B", funcTag, 16}, + {"goPanicSlice3BU", funcTag, 18}, + {"goPanicSlice3C", funcTag, 16}, + {"goPanicSlice3CU", funcTag, 18}, + {"printbool", funcTag, 19}, + {"printfloat", funcTag, 21}, + {"printint", funcTag, 23}, + {"printhex", funcTag, 25}, + {"printuint", funcTag, 25}, + {"printcomplex", funcTag, 27}, + {"printstring", funcTag, 29}, + {"printpointer", funcTag, 30}, + {"printuintptr", funcTag, 31}, + {"printiface", funcTag, 30}, + {"printeface", funcTag, 30}, + {"printslice", funcTag, 30}, + {"printnl", funcTag, 9}, + {"printsp", funcTag, 9}, + {"printlock", funcTag, 9}, + {"printunlock", funcTag, 9}, + {"concatstring2", funcTag, 34}, + {"concatstring3", funcTag, 35}, + {"concatstring4", funcTag, 36}, + {"concatstring5", funcTag, 37}, + {"concatstrings", funcTag, 39}, + {"cmpstring", funcTag, 40}, + {"intstring", funcTag, 43}, + {"slicebytetostring", funcTag, 44}, + {"slicebytetostringtmp", funcTag, 45}, + {"slicerunetostring", funcTag, 48}, + {"stringtoslicebyte", funcTag, 50}, + {"stringtoslicerune", funcTag, 53}, + {"slicecopy", funcTag, 54}, + {"decoderune", funcTag, 55}, + {"countrunes", funcTag, 56}, + {"convI2I", funcTag, 57}, + {"convT16", funcTag, 58}, + {"convT32", funcTag, 58}, + {"convT64", funcTag, 58}, + {"convTstring", funcTag, 58}, + {"convTslice", funcTag, 58}, + {"convT2E", funcTag, 59}, + {"convT2Enoptr", funcTag, 59}, + {"convT2I", funcTag, 59}, + {"convT2Inoptr", funcTag, 59}, + {"assertE2I", funcTag, 57}, + {"assertE2I2", funcTag, 60}, + {"assertI2I", funcTag, 57}, + {"assertI2I2", funcTag, 60}, + {"panicdottypeE", funcTag, 61}, + {"panicdottypeI", funcTag, 61}, + {"panicnildottype", funcTag, 62}, + {"ifaceeq", funcTag, 64}, + {"efaceeq", funcTag, 64}, + {"fastrand", funcTag, 66}, + {"makemap64", funcTag, 68}, + {"makemap", funcTag, 69}, + {"makemap_small", funcTag, 70}, + {"mapaccess1", funcTag, 71}, + {"mapaccess1_fast32", funcTag, 72}, + {"mapaccess1_fast64", funcTag, 72}, + {"mapaccess1_faststr", funcTag, 72}, + {"mapaccess1_fat", funcTag, 73}, + {"mapaccess2", funcTag, 74}, + {"mapaccess2_fast32", funcTag, 75}, + {"mapaccess2_fast64", funcTag, 75}, + {"mapaccess2_faststr", funcTag, 75}, + {"mapaccess2_fat", funcTag, 76}, + {"mapassign", funcTag, 71}, + {"mapassign_fast32", funcTag, 72}, + {"mapassign_fast32ptr", funcTag, 72}, + {"mapassign_fast64", funcTag, 72}, + {"mapassign_fast64ptr", funcTag, 72}, + {"mapassign_faststr", funcTag, 72}, + {"mapiterinit", funcTag, 77}, + {"mapdelete", funcTag, 77}, + {"mapdelete_fast32", funcTag, 78}, + {"mapdelete_fast64", funcTag, 78}, + {"mapdelete_faststr", funcTag, 78}, + {"mapiternext", funcTag, 79}, + {"mapclear", funcTag, 80}, + {"makechan64", funcTag, 82}, + {"makechan", funcTag, 83}, + {"chanrecv1", funcTag, 85}, + {"chanrecv2", funcTag, 86}, + {"chansend1", funcTag, 88}, + {"closechan", funcTag, 30}, + {"writeBarrier", varTag, 90}, + {"typedmemmove", funcTag, 91}, + {"typedmemclr", funcTag, 92}, + {"typedslicecopy", funcTag, 93}, + {"selectnbsend", funcTag, 94}, + {"selectnbrecv", funcTag, 95}, + {"selectnbrecv2", funcTag, 97}, + {"selectsetpc", funcTag, 98}, + {"selectgo", funcTag, 99}, + {"block", funcTag, 9}, + {"makeslice", funcTag, 100}, + {"makeslice64", funcTag, 101}, + {"makeslicecopy", funcTag, 102}, + {"growslice", funcTag, 104}, + {"memmove", funcTag, 105}, + {"memclrNoHeapPointers", funcTag, 106}, + {"memclrHasPointers", funcTag, 106}, + {"memequal", funcTag, 107}, + {"memequal0", funcTag, 108}, + {"memequal8", funcTag, 108}, + {"memequal16", funcTag, 108}, + {"memequal32", funcTag, 108}, + {"memequal64", funcTag, 108}, + {"memequal128", funcTag, 108}, + {"f32equal", funcTag, 109}, + {"f64equal", funcTag, 109}, + {"c64equal", funcTag, 109}, + {"c128equal", funcTag, 109}, + {"strequal", funcTag, 109}, + {"interequal", funcTag, 109}, + {"nilinterequal", funcTag, 109}, + {"memhash", funcTag, 110}, + {"memhash0", funcTag, 111}, + {"memhash8", funcTag, 111}, + {"memhash16", funcTag, 111}, + {"memhash32", funcTag, 111}, + {"memhash64", funcTag, 111}, + {"memhash128", funcTag, 111}, + {"f32hash", funcTag, 111}, + {"f64hash", funcTag, 111}, + {"c64hash", funcTag, 111}, + {"c128hash", funcTag, 111}, + {"strhash", funcTag, 111}, + {"interhash", funcTag, 111}, + {"nilinterhash", funcTag, 111}, + {"int64div", funcTag, 112}, + {"uint64div", funcTag, 113}, + {"int64mod", funcTag, 112}, + {"uint64mod", funcTag, 113}, + {"float64toint64", funcTag, 114}, + {"float64touint64", funcTag, 115}, + {"float64touint32", funcTag, 116}, + {"int64tofloat64", funcTag, 117}, + {"uint64tofloat64", funcTag, 118}, + {"uint32tofloat64", funcTag, 119}, + {"complex128div", funcTag, 120}, + {"racefuncenter", funcTag, 31}, + {"racefuncenterfp", funcTag, 9}, + {"racefuncexit", funcTag, 9}, + {"raceread", funcTag, 31}, + {"racewrite", funcTag, 31}, + {"racereadrange", funcTag, 121}, + {"racewriterange", funcTag, 121}, + {"msanread", funcTag, 121}, + {"msanwrite", funcTag, 121}, + {"msanmove", funcTag, 122}, + {"checkptrAlignment", funcTag, 123}, + {"checkptrArithmetic", funcTag, 125}, + {"libfuzzerTraceCmp1", funcTag, 127}, + {"libfuzzerTraceCmp2", funcTag, 129}, + {"libfuzzerTraceCmp4", funcTag, 130}, + {"libfuzzerTraceCmp8", funcTag, 131}, + {"libfuzzerTraceConstCmp1", funcTag, 127}, + {"libfuzzerTraceConstCmp2", funcTag, 129}, + {"libfuzzerTraceConstCmp4", funcTag, 130}, + {"libfuzzerTraceConstCmp8", funcTag, 131}, + {"x86HasPOPCNT", varTag, 6}, + {"x86HasSSE41", varTag, 6}, + {"x86HasFMA", varTag, 6}, + {"armHasVFPv4", varTag, 6}, + {"arm64HasATOMICS", varTag, 6}, +} + +func runtimeTypes() []*types.Type { + var typs [132]*types.Type + typs[0] = types.ByteType + typs[1] = types.NewPtr(typs[0]) + typs[2] = types.Types[types.TANY] + typs[3] = types.NewPtr(typs[2]) + typs[4] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3])}) + typs[5] = types.Types[types.TUINTPTR] + typs[6] = types.Types[types.TBOOL] + typs[7] = types.Types[types.TUNSAFEPTR] + typs[8] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[5]), ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[6])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7])}) + typs[9] = NewFuncType(nil, nil, nil) + typs[10] = types.Types[types.TINTER] + typs[11] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[10])}, nil) + typs[12] = types.Types[types.TINT32] + typs[13] = types.NewPtr(typs[12]) + typs[14] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[13])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[10])}) + typs[15] = types.Types[types.TINT] + typs[16] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[15])}, nil) + typs[17] = types.Types[types.TUINT] + typs[18] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[17]), ir.NewField(base.Pos, nil, nil, typs[15])}, nil) + typs[19] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}, nil) + typs[20] = types.Types[types.TFLOAT64] + typs[21] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}, nil) + typs[22] = types.Types[types.TINT64] + typs[23] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[22])}, nil) + typs[24] = types.Types[types.TUINT64] + typs[25] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[24])}, nil) + typs[26] = types.Types[types.TCOMPLEX128] + typs[27] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[26])}, nil) + typs[28] = types.Types[types.TSTRING] + typs[29] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}, nil) + typs[30] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[2])}, nil) + typs[31] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[5])}, nil) + typs[32] = types.NewArray(typs[0], 32) + typs[33] = types.NewPtr(typs[32]) + typs[34] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) + typs[35] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) + typs[36] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) + typs[37] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) + typs[38] = types.NewSlice(typs[28]) + typs[39] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[38])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) + typs[40] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[15])}) + typs[41] = types.NewArray(typs[0], 4) + typs[42] = types.NewPtr(typs[41]) + typs[43] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[42]), ir.NewField(base.Pos, nil, nil, typs[22])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) + typs[44] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) + typs[45] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) + typs[46] = types.RuneType + typs[47] = types.NewSlice(typs[46]) + typs[48] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[47])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) + typs[49] = types.NewSlice(typs[0]) + typs[50] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[49])}) + typs[51] = types.NewArray(typs[46], 32) + typs[52] = types.NewPtr(typs[51]) + typs[53] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[52]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[47])}) + typs[54] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[5])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[15])}) + typs[55] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[46]), ir.NewField(base.Pos, nil, nil, typs[15])}) + typs[56] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[15])}) + typs[57] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[2])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[2])}) + typs[58] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[2])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7])}) + typs[59] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[2])}) + typs[60] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[2])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[2]), ir.NewField(base.Pos, nil, nil, typs[6])}) + typs[61] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[1])}, nil) + typs[62] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1])}, nil) + typs[63] = types.NewPtr(typs[5]) + typs[64] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[63]), ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[7])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) + typs[65] = types.Types[types.TUINT32] + typs[66] = NewFuncType(nil, nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[65])}) + typs[67] = types.NewMap(typs[2], typs[2]) + typs[68] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[22]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[67])}) + typs[69] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[67])}) + typs[70] = NewFuncType(nil, nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[67])}) + typs[71] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3])}) + typs[72] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[2])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3])}) + typs[73] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[1])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3])}) + typs[74] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[6])}) + typs[75] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[2])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[6])}) + typs[76] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[1])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[6])}) + typs[77] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[3])}, nil) + typs[78] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[2])}, nil) + typs[79] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3])}, nil) + typs[80] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67])}, nil) + typs[81] = types.NewChan(typs[2], types.Cboth) + typs[82] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[22])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[81])}) + typs[83] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[81])}) + typs[84] = types.NewChan(typs[2], types.Crecv) + typs[85] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[84]), ir.NewField(base.Pos, nil, nil, typs[3])}, nil) + typs[86] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[84]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) + typs[87] = types.NewChan(typs[2], types.Csend) + typs[88] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[87]), ir.NewField(base.Pos, nil, nil, typs[3])}, nil) + typs[89] = types.NewArray(typs[0], 3) + typs[90] = NewStructType([]*ir.Field{ir.NewField(base.Pos, Lookup("enabled"), nil, typs[6]), ir.NewField(base.Pos, Lookup("pad"), nil, typs[89]), ir.NewField(base.Pos, Lookup("needed"), nil, typs[6]), ir.NewField(base.Pos, Lookup("cgo"), nil, typs[6]), ir.NewField(base.Pos, Lookup("alignme"), nil, typs[24])}) + typs[91] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[3])}, nil) + typs[92] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[3])}, nil) + typs[93] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[15])}) + typs[94] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[87]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) + typs[95] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[84])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) + typs[96] = types.NewPtr(typs[6]) + typs[97] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[96]), ir.NewField(base.Pos, nil, nil, typs[84])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) + typs[98] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[63])}, nil) + typs[99] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[63]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[6])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[6])}) + typs[100] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7])}) + typs[101] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[22]), ir.NewField(base.Pos, nil, nil, typs[22])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7])}) + typs[102] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[7])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7])}) + typs[103] = types.NewSlice(typs[2]) + typs[104] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[103]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[103])}) + typs[105] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[5])}, nil) + typs[106] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[5])}, nil) + typs[107] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[5])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) + typs[108] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) + typs[109] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[7])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) + typs[110] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[5]), ir.NewField(base.Pos, nil, nil, typs[5])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[5])}) + typs[111] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[5])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[5])}) + typs[112] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[22]), ir.NewField(base.Pos, nil, nil, typs[22])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[22])}) + typs[113] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[24]), ir.NewField(base.Pos, nil, nil, typs[24])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[24])}) + typs[114] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[22])}) + typs[115] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[24])}) + typs[116] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[65])}) + typs[117] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[22])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}) + typs[118] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[24])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}) + typs[119] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[65])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}) + typs[120] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[26]), ir.NewField(base.Pos, nil, nil, typs[26])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[26])}) + typs[121] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[5]), ir.NewField(base.Pos, nil, nil, typs[5])}, nil) + typs[122] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[5]), ir.NewField(base.Pos, nil, nil, typs[5]), ir.NewField(base.Pos, nil, nil, typs[5])}, nil) + typs[123] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[5])}, nil) + typs[124] = types.NewSlice(typs[7]) + typs[125] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[124])}, nil) + typs[126] = types.Types[types.TUINT8] + typs[127] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[126]), ir.NewField(base.Pos, nil, nil, typs[126])}, nil) + typs[128] = types.Types[types.TUINT16] + typs[129] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[128]), ir.NewField(base.Pos, nil, nil, typs[128])}, nil) + typs[130] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[65]), ir.NewField(base.Pos, nil, nil, typs[65])}, nil) + typs[131] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[24]), ir.NewField(base.Pos, nil, nil, typs[24])}, nil) + return typs[:] +} diff --git a/src/cmd/compile/internal/gc/builtin/runtime.go b/src/cmd/compile/internal/typecheck/builtin/runtime.go similarity index 100% rename from src/cmd/compile/internal/gc/builtin/runtime.go rename to src/cmd/compile/internal/typecheck/builtin/runtime.go diff --git a/src/cmd/compile/internal/gc/builtin_test.go b/src/cmd/compile/internal/typecheck/builtin_test.go similarity index 97% rename from src/cmd/compile/internal/gc/builtin_test.go rename to src/cmd/compile/internal/typecheck/builtin_test.go index df15ca5c7d2f0..cc8d49730aaea 100644 --- a/src/cmd/compile/internal/gc/builtin_test.go +++ b/src/cmd/compile/internal/typecheck/builtin_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc_test +package typecheck import ( "bytes" diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/typecheck/const.go similarity index 85% rename from src/cmd/compile/internal/gc/const.go rename to src/cmd/compile/internal/typecheck/const.go index ad27f3ea44582..54d70cb8350ca 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/typecheck/const.go @@ -2,13 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package typecheck import ( - "cmd/compile/internal/base" - "cmd/compile/internal/ir" - "cmd/compile/internal/types" - "cmd/internal/src" "fmt" "go/constant" "go/token" @@ -16,6 +12,11 @@ import ( "math/big" "strings" "unicode" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" + "cmd/internal/src" ) func roundFloat(v constant.Value, sz int64) constant.Value { @@ -61,7 +62,7 @@ func trunccmplxlit(v constant.Value, t *types.Type) constant.Value { // TODO(mdempsky): Replace these with better APIs. func convlit(n ir.Node, t *types.Type) ir.Node { return convlit1(n, t, false, nil) } -func defaultlit(n ir.Node, t *types.Type) ir.Node { return convlit1(n, t, false, nil) } +func DefaultLit(n ir.Node, t *types.Type) ir.Node { return convlit1(n, t, false, nil) } // convlit1 converts an untyped expression n to type t. If n already // has a type, convlit1 has no effect. @@ -134,7 +135,7 @@ func convlit1(n ir.Node, t *types.Type, explicit bool, context func() string) ir case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.OREAL, ir.OIMAG: ot := operandType(n.Op(), t) if ot == nil { - n = defaultlit(n, nil) + n = DefaultLit(n, nil) break } @@ -150,7 +151,7 @@ func convlit1(n ir.Node, t *types.Type, explicit bool, context func() string) ir case ir.OADD, ir.OSUB, ir.OMUL, ir.ODIV, ir.OMOD, ir.OOR, ir.OXOR, ir.OAND, ir.OANDNOT, ir.OOROR, ir.OANDAND, ir.OCOMPLEX: ot := operandType(n.Op(), t) if ot == nil { - n = defaultlit(n, nil) + n = DefaultLit(n, nil) break } @@ -387,11 +388,11 @@ var tokenForOp = [...]token.Token{ ir.ORSH: token.SHR, } -// evalConst returns a constant-evaluated expression equivalent to n. -// If n is not a constant, evalConst returns n. -// Otherwise, evalConst returns a new OLITERAL with the same value as n, +// EvalConst returns a constant-evaluated expression equivalent to n. +// If n is not a constant, EvalConst returns n. +// Otherwise, EvalConst returns a new OLITERAL with the same value as n, // and with .Orig pointing back to n. -func evalConst(n ir.Node) ir.Node { +func EvalConst(n ir.Node) ir.Node { // Pick off just the opcodes that can be constant evaluated. switch n.Op() { case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT: @@ -402,7 +403,7 @@ func evalConst(n ir.Node) ir.Node { if n.Type().IsUnsigned() { prec = uint(n.Type().Size() * 8) } - return origConst(n, constant.UnaryOp(tokenForOp[n.Op()], nl.Val(), prec)) + return OrigConst(n, constant.UnaryOp(tokenForOp[n.Op()], nl.Val(), prec)) } case ir.OADD, ir.OSUB, ir.OMUL, ir.ODIV, ir.OMOD, ir.OOR, ir.OXOR, ir.OAND, ir.OANDNOT: @@ -427,21 +428,21 @@ func evalConst(n ir.Node) ir.Node { if n.Op() == ir.ODIV && n.Type().IsInteger() { tok = token.QUO_ASSIGN // integer division } - return origConst(n, constant.BinaryOp(nl.Val(), tok, rval)) + return OrigConst(n, constant.BinaryOp(nl.Val(), tok, rval)) } case ir.OOROR, ir.OANDAND: n := n.(*ir.LogicalExpr) nl, nr := n.X, n.Y if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL { - return origConst(n, constant.BinaryOp(nl.Val(), tokenForOp[n.Op()], nr.Val())) + return OrigConst(n, constant.BinaryOp(nl.Val(), tokenForOp[n.Op()], nr.Val())) } case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE: n := n.(*ir.BinaryExpr) nl, nr := n.X, n.Y if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL { - return origBoolConst(n, constant.Compare(nl.Val(), tokenForOp[n.Op()], nr.Val())) + return OrigBool(n, constant.Compare(nl.Val(), tokenForOp[n.Op()], nr.Val())) } case ir.OLSH, ir.ORSH: @@ -456,14 +457,14 @@ func evalConst(n ir.Node) ir.Node { n.SetType(nil) break } - return origConst(n, constant.Shift(toint(nl.Val()), tokenForOp[n.Op()], uint(s))) + return OrigConst(n, constant.Shift(toint(nl.Val()), tokenForOp[n.Op()], uint(s))) } case ir.OCONV, ir.ORUNESTR: n := n.(*ir.ConvExpr) nl := n.X if ir.OKForConst[n.Type().Kind()] && nl.Op() == ir.OLITERAL { - return origConst(n, convertVal(nl.Val(), n.Type(), true)) + return OrigConst(n, convertVal(nl.Val(), n.Type(), true)) } case ir.OCONVNOP: @@ -472,7 +473,7 @@ func evalConst(n ir.Node) ir.Node { if ir.OKForConst[n.Type().Kind()] && nl.Op() == ir.OLITERAL { // set so n.Orig gets OCONV instead of OCONVNOP n.SetOp(ir.OCONV) - return origConst(n, nl.Val()) + return OrigConst(n, nl.Val()) } case ir.OADDSTR: @@ -494,7 +495,7 @@ func evalConst(n ir.Node) ir.Node { for _, c := range s { strs = append(strs, ir.StringVal(c)) } - return origConst(n, constant.MakeString(strings.Join(strs, ""))) + return OrigConst(n, constant.MakeString(strings.Join(strs, ""))) } newList := make([]ir.Node, 0, need) for i := 0; i < len(s); i++ { @@ -509,7 +510,7 @@ func evalConst(n ir.Node) ir.Node { nl := ir.Copy(n).(*ir.AddStringExpr) nl.List.Set(s[i:i2]) - newList = append(newList, origConst(nl, constant.MakeString(strings.Join(strs, "")))) + newList = append(newList, OrigConst(nl, constant.MakeString(strings.Join(strs, "")))) i = i2 - 1 } else { newList = append(newList, s[i]) @@ -526,37 +527,37 @@ func evalConst(n ir.Node) ir.Node { switch nl.Type().Kind() { case types.TSTRING: if ir.IsConst(nl, constant.String) { - return origIntConst(n, int64(len(ir.StringVal(nl)))) + return OrigInt(n, int64(len(ir.StringVal(nl)))) } case types.TARRAY: if !anyCallOrChan(nl) { - return origIntConst(n, nl.Type().NumElem()) + return OrigInt(n, nl.Type().NumElem()) } } case ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF: n := n.(*ir.UnaryExpr) - return origIntConst(n, evalunsafe(n)) + return OrigInt(n, evalunsafe(n)) case ir.OREAL: n := n.(*ir.UnaryExpr) nl := n.X if nl.Op() == ir.OLITERAL { - return origConst(n, constant.Real(nl.Val())) + return OrigConst(n, constant.Real(nl.Val())) } case ir.OIMAG: n := n.(*ir.UnaryExpr) nl := n.X if nl.Op() == ir.OLITERAL { - return origConst(n, constant.Imag(nl.Val())) + return OrigConst(n, constant.Imag(nl.Val())) } case ir.OCOMPLEX: n := n.(*ir.BinaryExpr) nl, nr := n.X, n.Y if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL { - return origConst(n, makeComplex(nl.Val(), nr.Val())) + return OrigConst(n, makeComplex(nl.Val(), nr.Val())) } } @@ -598,8 +599,8 @@ var overflowNames = [...]string{ ir.OBITNOT: "bitwise complement", } -// origConst returns an OLITERAL with orig n and value v. -func origConst(n ir.Node, v constant.Value) ir.Node { +// OrigConst returns an OLITERAL with orig n and value v. +func OrigConst(n ir.Node, v constant.Value) ir.Node { lno := ir.SetPos(n) v = convertVal(v, n.Type(), false) base.Pos = lno @@ -623,12 +624,12 @@ func origConst(n ir.Node, v constant.Value) ir.Node { return ir.NewConstExpr(v, n) } -func origBoolConst(n ir.Node, v bool) ir.Node { - return origConst(n, constant.MakeBool(v)) +func OrigBool(n ir.Node, v bool) ir.Node { + return OrigConst(n, constant.MakeBool(v)) } -func origIntConst(n ir.Node, v int64) ir.Node { - return origConst(n, constant.MakeInt64(v)) +func OrigInt(n ir.Node, v int64) ir.Node { + return OrigConst(n, constant.MakeInt64(v)) } // defaultlit on both nodes simultaneously; @@ -722,12 +723,12 @@ func defaultType(t *types.Type) *types.Type { return nil } -// indexconst checks if Node n contains a constant expression +// IndexConst checks if Node n contains a constant expression // representable as a non-negative int and returns its value. // If n is not a constant expression, not representable as an // integer, or negative, it returns -1. If n is too large, it // returns -2. -func indexconst(n ir.Node) int64 { +func IndexConst(n ir.Node) int64 { if n.Op() != ir.OLITERAL { return -1 } @@ -862,3 +863,82 @@ func nodeAndVal(n ir.Node) string { } return show } + +// evalunsafe evaluates a package unsafe operation and returns the result. +func evalunsafe(n ir.Node) int64 { + switch n.Op() { + case ir.OALIGNOF, ir.OSIZEOF: + n := n.(*ir.UnaryExpr) + n.X = Expr(n.X) + n.X = DefaultLit(n.X, nil) + tr := n.X.Type() + if tr == nil { + return 0 + } + types.CalcSize(tr) + if n.Op() == ir.OALIGNOF { + return int64(tr.Align) + } + return tr.Width + + case ir.OOFFSETOF: + // must be a selector. + n := n.(*ir.UnaryExpr) + if n.X.Op() != ir.OXDOT { + base.Errorf("invalid expression %v", n) + return 0 + } + sel := n.X.(*ir.SelectorExpr) + + // Remember base of selector to find it back after dot insertion. + // Since r->left may be mutated by typechecking, check it explicitly + // first to track it correctly. + sel.X = Expr(sel.X) + sbase := sel.X + + tsel := Expr(sel) + n.X = tsel + if tsel.Type() == nil { + return 0 + } + switch tsel.Op() { + case ir.ODOT, ir.ODOTPTR: + break + case ir.OCALLPART: + base.Errorf("invalid expression %v: argument is a method value", n) + return 0 + default: + base.Errorf("invalid expression %v", n) + return 0 + } + + // Sum offsets for dots until we reach sbase. + var v int64 + var next ir.Node + for r := tsel; r != sbase; r = next { + switch r.Op() { + case ir.ODOTPTR: + // For Offsetof(s.f), s may itself be a pointer, + // but accessing f must not otherwise involve + // indirection via embedded pointer types. + r := r.(*ir.SelectorExpr) + if r.X != sbase { + base.Errorf("invalid expression %v: selector implies indirection of embedded %v", n, r.X) + return 0 + } + fallthrough + case ir.ODOT: + r := r.(*ir.SelectorExpr) + v += r.Offset + next = r.X + default: + ir.Dump("unsafenmagic", tsel) + base.Fatalf("impossible %v node after dot insertion", r.Op()) + } + } + return v + } + + base.Fatalf("unexpected op %v", n.Op()) + return 0 +} diff --git a/src/cmd/compile/internal/typecheck/dcl.go b/src/cmd/compile/internal/typecheck/dcl.go new file mode 100644 index 0000000000000..9f66d0fa17be0 --- /dev/null +++ b/src/cmd/compile/internal/typecheck/dcl.go @@ -0,0 +1,705 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typecheck + +import ( + "fmt" + "strconv" + "strings" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" + "cmd/internal/src" +) + +var DeclContext ir.Class // PEXTERN/PAUTO + +func AssignDefn(left []ir.Node, defn ir.Node) { + for _, n := range left { + if n.Sym() != nil { + n.Sym().SetUniq(true) + } + } + + var nnew, nerr int + for i, n := range left { + if ir.IsBlank(n) { + continue + } + if !assignableName(n) { + base.ErrorfAt(defn.Pos(), "non-name %v on left side of :=", n) + nerr++ + continue + } + + if !n.Sym().Uniq() { + base.ErrorfAt(defn.Pos(), "%v repeated on left side of :=", n.Sym()) + n.SetDiag(true) + nerr++ + continue + } + + n.Sym().SetUniq(false) + if n.Sym().Block == types.Block { + continue + } + + nnew++ + n := NewName(n.Sym()) + Declare(n, DeclContext) + n.Defn = defn + defn.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, n)) + left[i] = n + } + + if nnew == 0 && nerr == 0 { + base.ErrorfAt(defn.Pos(), "no new variables on left side of :=") + } +} + +// := declarations +func assignableName(n ir.Node) bool { + switch n.Op() { + case ir.ONAME, + ir.ONONAME, + ir.OPACK, + ir.OTYPE, + ir.OLITERAL: + return n.Sym() != nil + } + + return false +} + +func DeclFunc(sym *types.Sym, tfn ir.Ntype) *ir.Func { + if tfn.Op() != ir.OTFUNC { + base.Fatalf("expected OTFUNC node, got %v", tfn) + } + + fn := ir.NewFunc(base.Pos) + fn.Nname = ir.NewFuncNameAt(base.Pos, sym, fn) + fn.Nname.Defn = fn + fn.Nname.Ntype = tfn + ir.MarkFunc(fn.Nname) + StartFuncBody(fn) + fn.Nname.Ntype = typecheckNtype(fn.Nname.Ntype) + return fn +} + +// declare variables from grammar +// new_name_list (type | [type] = expr_list) +func DeclVars(vl []*ir.Name, t ir.Ntype, el []ir.Node) []ir.Node { + var init []ir.Node + doexpr := len(el) > 0 + + if len(el) == 1 && len(vl) > 1 { + e := el[0] + as2 := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) + as2.Rhs = []ir.Node{e} + for _, v := range vl { + as2.Lhs.Append(v) + Declare(v, DeclContext) + v.Ntype = t + v.Defn = as2 + if ir.CurFunc != nil { + init = append(init, ir.NewDecl(base.Pos, ir.ODCL, v)) + } + } + + return append(init, as2) + } + + for i, v := range vl { + var e ir.Node + if doexpr { + if i >= len(el) { + base.Errorf("assignment mismatch: %d variables but %d values", len(vl), len(el)) + break + } + e = el[i] + } + + Declare(v, DeclContext) + v.Ntype = t + + if e != nil || ir.CurFunc != nil || ir.IsBlank(v) { + if ir.CurFunc != nil { + init = append(init, ir.NewDecl(base.Pos, ir.ODCL, v)) + } + as := ir.NewAssignStmt(base.Pos, v, e) + init = append(init, as) + if e != nil { + v.Defn = as + } + } + } + + if len(el) > len(vl) { + base.Errorf("assignment mismatch: %d variables but %d values", len(vl), len(el)) + } + return init +} + +// Declare records that Node n declares symbol n.Sym in the specified +// declaration context. +func Declare(n *ir.Name, ctxt ir.Class) { + if ir.IsBlank(n) { + return + } + + s := n.Sym() + + // kludgy: typecheckok means we're past parsing. Eg genwrapper may declare out of package names later. + if !inimport && !TypecheckAllowed && s.Pkg != types.LocalPkg { + base.ErrorfAt(n.Pos(), "cannot declare name %v", s) + } + + gen := 0 + if ctxt == ir.PEXTERN { + if s.Name == "init" { + base.ErrorfAt(n.Pos(), "cannot declare init - must be func") + } + if s.Name == "main" && s.Pkg.Name == "main" { + base.ErrorfAt(n.Pos(), "cannot declare main - must be func") + } + Target.Externs = append(Target.Externs, n) + } else { + if ir.CurFunc == nil && ctxt == ir.PAUTO { + base.Pos = n.Pos() + base.Fatalf("automatic outside function") + } + if ir.CurFunc != nil && ctxt != ir.PFUNC && n.Op() == ir.ONAME { + ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n) + } + if n.Op() == ir.OTYPE { + declare_typegen++ + gen = declare_typegen + } else if n.Op() == ir.ONAME && ctxt == ir.PAUTO && !strings.Contains(s.Name, "·") { + vargen++ + gen = vargen + } + types.Pushdcl(s) + n.Curfn = ir.CurFunc + } + + if ctxt == ir.PAUTO { + n.SetFrameOffset(0) + } + + if s.Block == types.Block { + // functype will print errors about duplicate function arguments. + // Don't repeat the error here. + if ctxt != ir.PPARAM && ctxt != ir.PPARAMOUT { + Redeclared(n.Pos(), s, "in this block") + } + } + + s.Block = types.Block + s.Lastlineno = base.Pos + s.Def = n + n.Vargen = int32(gen) + n.Class_ = ctxt + if ctxt == ir.PFUNC { + n.Sym().SetFunc(true) + } + + autoexport(n, ctxt) +} + +// Export marks n for export (or reexport). +func Export(n *ir.Name) { + if n.Sym().OnExportList() { + return + } + n.Sym().SetOnExportList(true) + + if base.Flag.E != 0 { + fmt.Printf("export symbol %v\n", n.Sym()) + } + + Target.Exports = append(Target.Exports, n) +} + +// Redeclared emits a diagnostic about symbol s being redeclared at pos. +func Redeclared(pos src.XPos, s *types.Sym, where string) { + if !s.Lastlineno.IsKnown() { + pkgName := DotImportRefs[s.Def.(*ir.Ident)] + base.ErrorfAt(pos, "%v redeclared %s\n"+ + "\t%v: previous declaration during import %q", s, where, base.FmtPos(pkgName.Pos()), pkgName.Pkg.Path) + } else { + prevPos := s.Lastlineno + + // When an import and a declaration collide in separate files, + // present the import as the "redeclared", because the declaration + // is visible where the import is, but not vice versa. + // See issue 4510. + if s.Def == nil { + pos, prevPos = prevPos, pos + } + + base.ErrorfAt(pos, "%v redeclared %s\n"+ + "\t%v: previous declaration", s, where, base.FmtPos(prevPos)) + } +} + +// declare the function proper +// and declare the arguments. +// called in extern-declaration context +// returns in auto-declaration context. +func StartFuncBody(fn *ir.Func) { + // change the declaration context from extern to auto + funcStack = append(funcStack, funcStackEnt{ir.CurFunc, DeclContext}) + ir.CurFunc = fn + DeclContext = ir.PAUTO + + types.Markdcl() + + if fn.Nname.Ntype != nil { + funcargs(fn.Nname.Ntype.(*ir.FuncType)) + } else { + funcargs2(fn.Type()) + } +} + +// finish the body. +// called in auto-declaration context. +// returns in extern-declaration context. +func FinishFuncBody() { + // change the declaration context from auto to previous context + types.Popdcl() + var e funcStackEnt + funcStack, e = funcStack[:len(funcStack)-1], funcStack[len(funcStack)-1] + ir.CurFunc, DeclContext = e.curfn, e.dclcontext +} + +func CheckFuncStack() { + if len(funcStack) != 0 { + base.Fatalf("funcStack is non-empty: %v", len(funcStack)) + } +} + +// turn a parsed function declaration into a type +func NewFuncType(nrecv *ir.Field, nparams, nresults []*ir.Field) *types.Type { + funarg := func(n *ir.Field) *types.Field { + lno := base.Pos + base.Pos = n.Pos + + if n.Ntype != nil { + n.Type = typecheckNtype(n.Ntype).Type() + n.Ntype = nil + } + + f := types.NewField(n.Pos, n.Sym, n.Type) + f.SetIsDDD(n.IsDDD) + if n.Decl != nil { + n.Decl.SetType(f.Type) + f.Nname = n.Decl + } + + base.Pos = lno + return f + } + funargs := func(nn []*ir.Field) []*types.Field { + res := make([]*types.Field, len(nn)) + for i, n := range nn { + res[i] = funarg(n) + } + return res + } + + var recv *types.Field + if nrecv != nil { + recv = funarg(nrecv) + } + + t := types.NewSignature(types.LocalPkg, recv, funargs(nparams), funargs(nresults)) + checkdupfields("argument", t.Recvs().FieldSlice(), t.Params().FieldSlice(), t.Results().FieldSlice()) + return t +} + +// convert a parsed id/type list into +// a type for struct/interface/arglist +func NewStructType(l []*ir.Field) *types.Type { + lno := base.Pos + + fields := make([]*types.Field, len(l)) + for i, n := range l { + base.Pos = n.Pos + + if n.Ntype != nil { + n.Type = typecheckNtype(n.Ntype).Type() + n.Ntype = nil + } + f := types.NewField(n.Pos, n.Sym, n.Type) + if n.Embedded { + checkembeddedtype(n.Type) + f.Embedded = 1 + } + f.Note = n.Note + fields[i] = f + } + checkdupfields("field", fields) + + base.Pos = lno + return types.NewStruct(types.LocalPkg, fields) +} + +// Add a method, declared as a function. +// - msym is the method symbol +// - t is function type (with receiver) +// Returns a pointer to the existing or added Field; or nil if there's an error. +func addmethod(n *ir.Func, msym *types.Sym, t *types.Type, local, nointerface bool) *types.Field { + if msym == nil { + base.Fatalf("no method symbol") + } + + // get parent type sym + rf := t.Recv() // ptr to this structure + if rf == nil { + base.Errorf("missing receiver") + return nil + } + + mt := types.ReceiverBaseType(rf.Type) + if mt == nil || mt.Sym() == nil { + pa := rf.Type + t := pa + if t != nil && t.IsPtr() { + if t.Sym() != nil { + base.Errorf("invalid receiver type %v (%v is a pointer type)", pa, t) + return nil + } + t = t.Elem() + } + + switch { + case t == nil || t.Broke(): + // rely on typecheck having complained before + case t.Sym() == nil: + base.Errorf("invalid receiver type %v (%v is not a defined type)", pa, t) + case t.IsPtr(): + base.Errorf("invalid receiver type %v (%v is a pointer type)", pa, t) + case t.IsInterface(): + base.Errorf("invalid receiver type %v (%v is an interface type)", pa, t) + default: + // Should have picked off all the reasons above, + // but just in case, fall back to generic error. + base.Errorf("invalid receiver type %v (%L / %L)", pa, pa, t) + } + return nil + } + + if local && mt.Sym().Pkg != types.LocalPkg { + base.Errorf("cannot define new methods on non-local type %v", mt) + return nil + } + + if msym.IsBlank() { + return nil + } + + if mt.IsStruct() { + for _, f := range mt.Fields().Slice() { + if f.Sym == msym { + base.Errorf("type %v has both field and method named %v", mt, msym) + f.SetBroke(true) + return nil + } + } + } + + for _, f := range mt.Methods().Slice() { + if msym.Name != f.Sym.Name { + continue + } + // types.Identical only checks that incoming and result parameters match, + // so explicitly check that the receiver parameters match too. + if !types.Identical(t, f.Type) || !types.Identical(t.Recv().Type, f.Type.Recv().Type) { + base.Errorf("method redeclared: %v.%v\n\t%v\n\t%v", mt, msym, f.Type, t) + } + return f + } + + f := types.NewField(base.Pos, msym, t) + f.Nname = n.Nname + f.SetNointerface(nointerface) + + mt.Methods().Append(f) + return f +} + +func autoexport(n *ir.Name, ctxt ir.Class) { + if n.Sym().Pkg != types.LocalPkg { + return + } + if (ctxt != ir.PEXTERN && ctxt != ir.PFUNC) || DeclContext != ir.PEXTERN { + return + } + if n.Type() != nil && n.Type().IsKind(types.TFUNC) && ir.IsMethod(n) { + return + } + + if types.IsExported(n.Sym().Name) || initname(n.Sym().Name) { + Export(n) + } + if base.Flag.AsmHdr != "" && !n.Sym().Asm() { + n.Sym().SetAsm(true) + Target.Asms = append(Target.Asms, n) + } +} + +// checkdupfields emits errors for duplicately named fields or methods in +// a list of struct or interface types. +func checkdupfields(what string, fss ...[]*types.Field) { + seen := make(map[*types.Sym]bool) + for _, fs := range fss { + for _, f := range fs { + if f.Sym == nil || f.Sym.IsBlank() { + continue + } + if seen[f.Sym] { + base.ErrorfAt(f.Pos, "duplicate %s %s", what, f.Sym.Name) + continue + } + seen[f.Sym] = true + } + } +} + +// structs, functions, and methods. +// they don't belong here, but where do they belong? +func checkembeddedtype(t *types.Type) { + if t == nil { + return + } + + if t.Sym() == nil && t.IsPtr() { + t = t.Elem() + if t.IsInterface() { + base.Errorf("embedded type cannot be a pointer to interface") + } + } + + if t.IsPtr() || t.IsUnsafePtr() { + base.Errorf("embedded type cannot be a pointer") + } else if t.Kind() == types.TFORW && !t.ForwardType().Embedlineno.IsKnown() { + t.ForwardType().Embedlineno = base.Pos + } +} + +// declare individual names - var, typ, const + +var declare_typegen int + +func fakeRecvField() *types.Field { + return types.NewField(src.NoXPos, nil, types.FakeRecvType()) +} + +var funcStack []funcStackEnt // stack of previous values of Curfn/dclcontext + +type funcStackEnt struct { + curfn *ir.Func + dclcontext ir.Class +} + +func funcarg(n *ir.Field, ctxt ir.Class) { + if n.Sym == nil { + return + } + + name := ir.NewNameAt(n.Pos, n.Sym) + n.Decl = name + name.Ntype = n.Ntype + name.SetIsDDD(n.IsDDD) + Declare(name, ctxt) + + vargen++ + n.Decl.Vargen = int32(vargen) +} + +func funcarg2(f *types.Field, ctxt ir.Class) { + if f.Sym == nil { + return + } + n := ir.NewNameAt(f.Pos, f.Sym) + f.Nname = n + n.SetType(f.Type) + n.SetIsDDD(f.IsDDD()) + Declare(n, ctxt) +} + +func funcargs(nt *ir.FuncType) { + if nt.Op() != ir.OTFUNC { + base.Fatalf("funcargs %v", nt.Op()) + } + + // re-start the variable generation number + // we want to use small numbers for the return variables, + // so let them have the chunk starting at 1. + // + // TODO(mdempsky): This is ugly, and only necessary because + // esc.go uses Vargen to figure out result parameters' index + // within the result tuple. + vargen = len(nt.Results) + + // declare the receiver and in arguments. + if nt.Recv != nil { + funcarg(nt.Recv, ir.PPARAM) + } + for _, n := range nt.Params { + funcarg(n, ir.PPARAM) + } + + oldvargen := vargen + vargen = 0 + + // declare the out arguments. + gen := len(nt.Params) + for _, n := range nt.Results { + if n.Sym == nil { + // Name so that escape analysis can track it. ~r stands for 'result'. + n.Sym = LookupNum("~r", gen) + gen++ + } + if n.Sym.IsBlank() { + // Give it a name so we can assign to it during return. ~b stands for 'blank'. + // The name must be different from ~r above because if you have + // func f() (_ int) + // func g() int + // f is allowed to use a plain 'return' with no arguments, while g is not. + // So the two cases must be distinguished. + n.Sym = LookupNum("~b", gen) + gen++ + } + + funcarg(n, ir.PPARAMOUT) + } + + vargen = oldvargen +} + +// Same as funcargs, except run over an already constructed TFUNC. +// This happens during import, where the hidden_fndcl rule has +// used functype directly to parse the function's type. +func funcargs2(t *types.Type) { + if t.Kind() != types.TFUNC { + base.Fatalf("funcargs2 %v", t) + } + + for _, f := range t.Recvs().Fields().Slice() { + funcarg2(f, ir.PPARAM) + } + for _, f := range t.Params().Fields().Slice() { + funcarg2(f, ir.PPARAM) + } + for _, f := range t.Results().Fields().Slice() { + funcarg2(f, ir.PPARAMOUT) + } +} + +func initname(s string) bool { + return s == "init" +} + +func tointerface(nmethods []*ir.Field) *types.Type { + if len(nmethods) == 0 { + return types.Types[types.TINTER] + } + + lno := base.Pos + + methods := make([]*types.Field, len(nmethods)) + for i, n := range nmethods { + base.Pos = n.Pos + if n.Ntype != nil { + n.Type = typecheckNtype(n.Ntype).Type() + n.Ntype = nil + } + methods[i] = types.NewField(n.Pos, n.Sym, n.Type) + } + + base.Pos = lno + return types.NewInterface(types.LocalPkg, methods) +} + +var vargen int + +func Temp(t *types.Type) *ir.Name { + return TempAt(base.Pos, ir.CurFunc, t) +} + +// make a new Node off the books +func TempAt(pos src.XPos, curfn *ir.Func, t *types.Type) *ir.Name { + if curfn == nil { + base.Fatalf("no curfn for tempAt") + } + if curfn.Op() == ir.OCLOSURE { + ir.Dump("tempAt", curfn) + base.Fatalf("adding tempAt to wrong closure function") + } + if t == nil { + base.Fatalf("tempAt called with nil type") + } + + s := &types.Sym{ + Name: autotmpname(len(curfn.Dcl)), + Pkg: types.LocalPkg, + } + n := ir.NewNameAt(pos, s) + s.Def = n + n.SetType(t) + n.Class_ = ir.PAUTO + n.SetEsc(ir.EscNever) + n.Curfn = curfn + n.SetUsed(true) + n.SetAutoTemp(true) + curfn.Dcl = append(curfn.Dcl, n) + + types.CalcSize(t) + + return n +} + +// autotmpname returns the name for an autotmp variable numbered n. +func autotmpname(n int) string { + // Give each tmp a different name so that they can be registerized. + // Add a preceding . to avoid clashing with legal names. + const prefix = ".autotmp_" + // Start with a buffer big enough to hold a large n. + b := []byte(prefix + " ")[:len(prefix)] + b = strconv.AppendInt(b, int64(n), 10) + return types.InternString(b) +} + +// f is method type, with receiver. +// return function type, receiver as first argument (or not). +func NewMethodType(f *types.Type, receiver *types.Type) *types.Type { + inLen := f.Params().Fields().Len() + if receiver != nil { + inLen++ + } + in := make([]*ir.Field, 0, inLen) + + if receiver != nil { + d := ir.NewField(base.Pos, nil, nil, receiver) + in = append(in, d) + } + + for _, t := range f.Params().Fields().Slice() { + d := ir.NewField(base.Pos, nil, nil, t.Type) + d.IsDDD = t.IsDDD() + in = append(in, d) + } + + outLen := f.Results().Fields().Len() + out := make([]*ir.Field, 0, outLen) + for _, t := range f.Results().Fields().Slice() { + d := ir.NewField(base.Pos, nil, nil, t.Type) + out = append(out, d) + } + + return NewFuncType(nil, in, out) +} diff --git a/src/cmd/compile/internal/typecheck/export.go b/src/cmd/compile/internal/typecheck/export.go new file mode 100644 index 0000000000000..381a28e3ed489 --- /dev/null +++ b/src/cmd/compile/internal/typecheck/export.go @@ -0,0 +1,79 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typecheck + +import ( + "go/constant" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" + "cmd/internal/src" +) + +// importalias declares symbol s as an imported type alias with type t. +// ipkg is the package being imported +func importalias(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.Name { + return importobj(ipkg, pos, s, ir.OTYPE, ir.PEXTERN, t) +} + +// importconst declares symbol s as an imported constant with type t and value val. +// ipkg is the package being imported +func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val constant.Value) *ir.Name { + n := importobj(ipkg, pos, s, ir.OLITERAL, ir.PEXTERN, t) + n.SetVal(val) + return n +} + +// importfunc declares symbol s as an imported function with type t. +// ipkg is the package being imported +func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.Name { + n := importobj(ipkg, pos, s, ir.ONAME, ir.PFUNC, t) + + fn := ir.NewFunc(pos) + fn.SetType(t) + n.SetFunc(fn) + fn.Nname = n + + return n +} + +// importobj declares symbol s as an imported object representable by op. +// ipkg is the package being imported +func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class, t *types.Type) *ir.Name { + n := importsym(ipkg, pos, s, op, ctxt) + n.SetType(t) + if ctxt == ir.PFUNC { + n.Sym().SetFunc(true) + } + return n +} + +func importsym(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class) *ir.Name { + if n := s.PkgDef(); n != nil { + base.Fatalf("importsym of symbol that already exists: %v", n) + } + + n := ir.NewDeclNameAt(pos, op, s) + n.Class_ = ctxt // TODO(mdempsky): Move this into NewDeclNameAt too? + s.SetPkgDef(n) + s.Importdef = ipkg + return n +} + +// importtype returns the named type declared by symbol s. +// If no such type has been declared yet, a forward declaration is returned. +// ipkg is the package being imported +func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *ir.Name { + n := importsym(ipkg, pos, s, ir.OTYPE, ir.PEXTERN) + n.SetType(types.NewNamed(n)) + return n +} + +// importvar declares symbol s as an imported variable with type t. +// ipkg is the package being imported +func importvar(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.Name { + return importobj(ipkg, pos, s, ir.ONAME, ir.PEXTERN, t) +} diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go new file mode 100644 index 0000000000000..4675de6cad410 --- /dev/null +++ b/src/cmd/compile/internal/typecheck/func.go @@ -0,0 +1,398 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typecheck + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" + + "fmt" +) + +// package all the arguments that match a ... T parameter into a []T. +func MakeDotArgs(typ *types.Type, args []ir.Node) ir.Node { + var n ir.Node + if len(args) == 0 { + n = NodNil() + n.SetType(typ) + } else { + lit := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ).(ir.Ntype), nil) + lit.List.Append(args...) + lit.SetImplicit(true) + n = lit + } + + n = Expr(n) + if n.Type() == nil { + base.Fatalf("mkdotargslice: typecheck failed") + } + return n +} + +// FixVariadicCall rewrites calls to variadic functions to use an +// explicit ... argument if one is not already present. +func FixVariadicCall(call *ir.CallExpr) { + fntype := call.X.Type() + if !fntype.IsVariadic() || call.IsDDD { + return + } + + vi := fntype.NumParams() - 1 + vt := fntype.Params().Field(vi).Type + + args := call.Args + extra := args[vi:] + slice := MakeDotArgs(vt, extra) + for i := range extra { + extra[i] = nil // allow GC + } + + call.Args.Set(append(args[:vi], slice)) + call.IsDDD = true +} + +// ClosureType returns the struct type used to hold all the information +// needed in the closure for clo (clo must be a OCLOSURE node). +// The address of a variable of the returned type can be cast to a func. +func ClosureType(clo *ir.ClosureExpr) *types.Type { + // Create closure in the form of a composite literal. + // supposing the closure captures an int i and a string s + // and has one float64 argument and no results, + // the generated code looks like: + // + // clos = &struct{.F uintptr; i *int; s *string}{func.1, &i, &s} + // + // The use of the struct provides type information to the garbage + // collector so that it can walk the closure. We could use (in this case) + // [3]unsafe.Pointer instead, but that would leave the gc in the dark. + // The information appears in the binary in the form of type descriptors; + // the struct is unnamed so that closures in multiple packages with the + // same struct type can share the descriptor. + fields := []*ir.Field{ + ir.NewField(base.Pos, Lookup(".F"), nil, types.Types[types.TUINTPTR]), + } + for _, v := range clo.Func.ClosureVars { + typ := v.Type() + if !v.Byval() { + typ = types.NewPtr(typ) + } + fields = append(fields, ir.NewField(base.Pos, v.Sym(), nil, typ)) + } + typ := NewStructType(fields) + typ.SetNoalg(true) + return typ +} + +// PartialCallType returns the struct type used to hold all the information +// needed in the closure for n (n must be a OCALLPART node). +// The address of a variable of the returned type can be cast to a func. +func PartialCallType(n *ir.CallPartExpr) *types.Type { + t := NewStructType([]*ir.Field{ + ir.NewField(base.Pos, Lookup("F"), nil, types.Types[types.TUINTPTR]), + ir.NewField(base.Pos, Lookup("R"), nil, n.X.Type()), + }) + t.SetNoalg(true) + return t +} + +// CaptureVars is called in a separate phase after all typechecking is done. +// It decides whether each variable captured by a closure should be captured +// by value or by reference. +// We use value capturing for values <= 128 bytes that are never reassigned +// after capturing (effectively constant). +func CaptureVars(fn *ir.Func) { + lno := base.Pos + base.Pos = fn.Pos() + cvars := fn.ClosureVars + out := cvars[:0] + for _, v := range cvars { + if v.Type() == nil { + // If v.Type is nil, it means v looked like it + // was going to be used in the closure, but + // isn't. This happens in struct literals like + // s{f: x} where we can't distinguish whether + // f is a field identifier or expression until + // resolving s. + continue + } + out = append(out, v) + + // type check the & of closed variables outside the closure, + // so that the outer frame also grabs them and knows they escape. + types.CalcSize(v.Type()) + + var outer ir.Node + outer = v.Outer + outermost := v.Defn.(*ir.Name) + + // out parameters will be assigned to implicitly upon return. + if outermost.Class_ != ir.PPARAMOUT && !outermost.Name().Addrtaken() && !outermost.Name().Assigned() && v.Type().Width <= 128 { + v.SetByval(true) + } else { + outermost.Name().SetAddrtaken(true) + outer = NodAddr(outer) + } + + if base.Flag.LowerM > 1 { + var name *types.Sym + if v.Curfn != nil && v.Curfn.Nname != nil { + name = v.Curfn.Sym() + } + how := "ref" + if v.Byval() { + how = "value" + } + base.WarnfAt(v.Pos(), "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym(), outermost.Name().Addrtaken(), outermost.Name().Assigned(), int32(v.Type().Width)) + } + + outer = Expr(outer) + fn.ClosureEnter.Append(outer) + } + + fn.ClosureVars = out + base.Pos = lno +} + +// typecheckclosure typechecks an OCLOSURE node. It also creates the named +// function associated with the closure. +// TODO: This creation of the named function should probably really be done in a +// separate pass from type-checking. +func typecheckclosure(clo *ir.ClosureExpr, top int) { + fn := clo.Func + // Set current associated iota value, so iota can be used inside + // function in ConstSpec, see issue #22344 + if x := getIotaValue(); x >= 0 { + fn.Iota = x + } + + fn.ClosureType = check(fn.ClosureType, ctxType) + clo.SetType(fn.ClosureType.Type()) + fn.SetClosureCalled(top&ctxCallee != 0) + + // Do not typecheck fn twice, otherwise, we will end up pushing + // fn to Target.Decls multiple times, causing initLSym called twice. + // See #30709 + if fn.Typecheck() == 1 { + return + } + + for _, ln := range fn.ClosureVars { + n := ln.Defn + if !n.Name().Captured() { + n.Name().SetCaptured(true) + if n.Name().Decldepth == 0 { + base.Fatalf("typecheckclosure: var %v does not have decldepth assigned", n) + } + + // Ignore assignments to the variable in straightline code + // preceding the first capturing by a closure. + if n.Name().Decldepth == decldepth { + n.Name().SetAssigned(false) + } + } + } + + fn.Nname.SetSym(closurename(ir.CurFunc)) + ir.MarkFunc(fn.Nname) + Func(fn) + + // Type check the body now, but only if we're inside a function. + // At top level (in a variable initialization: curfn==nil) we're not + // ready to type check code yet; we'll check it later, because the + // underlying closure function we create is added to Target.Decls. + if ir.CurFunc != nil && clo.Type() != nil { + oldfn := ir.CurFunc + ir.CurFunc = fn + olddd := decldepth + decldepth = 1 + Stmts(fn.Body) + decldepth = olddd + ir.CurFunc = oldfn + } + + Target.Decls = append(Target.Decls, fn) +} + +// Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck +// because they're a copy of an already checked body. +func ImportedBody(fn *ir.Func) { + lno := ir.SetPos(fn.Nname) + + ImportBody(fn) + + // typecheckinl is only for imported functions; + // their bodies may refer to unsafe as long as the package + // was marked safe during import (which was checked then). + // the ->inl of a local function has been typechecked before caninl copied it. + pkg := fnpkg(fn.Nname) + + if pkg == types.LocalPkg || pkg == nil { + return // typecheckinl on local function + } + + if base.Flag.LowerM > 2 || base.Debug.Export != 0 { + fmt.Printf("typecheck import [%v] %L { %v }\n", fn.Sym(), fn, ir.Nodes(fn.Inl.Body)) + } + + savefn := ir.CurFunc + ir.CurFunc = fn + Stmts(fn.Inl.Body) + ir.CurFunc = savefn + + // During expandInline (which imports fn.Func.Inl.Body), + // declarations are added to fn.Func.Dcl by funcHdr(). Move them + // to fn.Func.Inl.Dcl for consistency with how local functions + // behave. (Append because typecheckinl may be called multiple + // times.) + fn.Inl.Dcl = append(fn.Inl.Dcl, fn.Dcl...) + fn.Dcl = nil + + base.Pos = lno +} + +// Get the function's package. For ordinary functions it's on the ->sym, but for imported methods +// the ->sym can be re-used in the local package, so peel it off the receiver's type. +func fnpkg(fn *ir.Name) *types.Pkg { + if ir.IsMethod(fn) { + // method + rcvr := fn.Type().Recv().Type + + if rcvr.IsPtr() { + rcvr = rcvr.Elem() + } + if rcvr.Sym() == nil { + base.Fatalf("receiver with no sym: [%v] %L (%v)", fn.Sym(), fn, rcvr) + } + return rcvr.Sym().Pkg + } + + // non-method + return fn.Sym().Pkg +} + +// CaptureVarsComplete is set to true when the capturevars phase is done. +var CaptureVarsComplete bool + +// closurename generates a new unique name for a closure within +// outerfunc. +func closurename(outerfunc *ir.Func) *types.Sym { + outer := "glob." + prefix := "func" + gen := &globClosgen + + if outerfunc != nil { + if outerfunc.OClosure != nil { + prefix = "" + } + + outer = ir.FuncName(outerfunc) + + // There may be multiple functions named "_". In those + // cases, we can't use their individual Closgens as it + // would lead to name clashes. + if !ir.IsBlank(outerfunc.Nname) { + gen = &outerfunc.Closgen + } + } + + *gen++ + return Lookup(fmt.Sprintf("%s.%s%d", outer, prefix, *gen)) +} + +// globClosgen is like Func.Closgen, but for the global scope. +var globClosgen int32 + +// makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed +// for partial calls. +func makepartialcall(dot *ir.SelectorExpr, t0 *types.Type, meth *types.Sym) *ir.Func { + rcvrtype := dot.X.Type() + sym := ir.MethodSymSuffix(rcvrtype, meth, "-fm") + + if sym.Uniq() { + return sym.Def.(*ir.Func) + } + sym.SetUniq(true) + + savecurfn := ir.CurFunc + saveLineNo := base.Pos + ir.CurFunc = nil + + // Set line number equal to the line number where the method is declared. + var m *types.Field + if lookdot0(meth, rcvrtype, &m, false) == 1 && m.Pos.IsKnown() { + base.Pos = m.Pos + } + // Note: !m.Pos.IsKnown() happens for method expressions where + // the method is implicitly declared. The Error method of the + // built-in error type is one such method. We leave the line + // number at the use of the method expression in this + // case. See issue 29389. + + tfn := ir.NewFuncType(base.Pos, nil, + NewFuncParams(t0.Params(), true), + NewFuncParams(t0.Results(), false)) + + fn := DeclFunc(sym, tfn) + fn.SetDupok(true) + fn.SetNeedctxt(true) + + // Declare and initialize variable holding receiver. + cr := ir.NewClosureRead(rcvrtype, types.Rnd(int64(types.PtrSize), int64(rcvrtype.Align))) + ptr := NewName(Lookup(".this")) + Declare(ptr, ir.PAUTO) + ptr.SetUsed(true) + var body []ir.Node + if rcvrtype.IsPtr() || rcvrtype.IsInterface() { + ptr.SetType(rcvrtype) + body = append(body, ir.NewAssignStmt(base.Pos, ptr, cr)) + } else { + ptr.SetType(types.NewPtr(rcvrtype)) + body = append(body, ir.NewAssignStmt(base.Pos, ptr, NodAddr(cr))) + } + + call := ir.NewCallExpr(base.Pos, ir.OCALL, ir.NewSelectorExpr(base.Pos, ir.OXDOT, ptr, meth), nil) + call.Args.Set(ir.ParamNames(tfn.Type())) + call.IsDDD = tfn.Type().IsVariadic() + if t0.NumResults() != 0 { + ret := ir.NewReturnStmt(base.Pos, nil) + ret.Results = []ir.Node{call} + body = append(body, ret) + } else { + body = append(body, call) + } + + fn.Body.Set(body) + FinishFuncBody() + + Func(fn) + // Need to typecheck the body of the just-generated wrapper. + // typecheckslice() requires that Curfn is set when processing an ORETURN. + ir.CurFunc = fn + Stmts(fn.Body) + sym.Def = fn + Target.Decls = append(Target.Decls, fn) + ir.CurFunc = savecurfn + base.Pos = saveLineNo + + return fn +} + +func typecheckpartialcall(n ir.Node, sym *types.Sym) *ir.CallPartExpr { + switch n.Op() { + case ir.ODOTINTER, ir.ODOTMETH: + break + + default: + base.Fatalf("invalid typecheckpartialcall") + } + dot := n.(*ir.SelectorExpr) + + // Create top-level function. + fn := makepartialcall(dot, dot.Type(), sym) + fn.SetWrapper(true) + + return ir.NewCallPartExpr(dot.Pos(), dot.X, dot.Selection, fn) +} diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go similarity index 99% rename from src/cmd/compile/internal/gc/iexport.go rename to src/cmd/compile/internal/typecheck/iexport.go index fd64b690774d5..4ddee01b5a9c0 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/typecheck/iexport.go @@ -199,16 +199,11 @@ // they're expected to change much more rapidly, so they're omitted // here. See exportWriter's varExt/funcExt/etc methods for details. -package gc +package typecheck import ( "bufio" "bytes" - "cmd/compile/internal/base" - "cmd/compile/internal/ir" - "cmd/compile/internal/types" - "cmd/internal/goobj" - "cmd/internal/src" "crypto/md5" "encoding/binary" "fmt" @@ -217,6 +212,12 @@ import ( "math/big" "sort" "strings" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" + "cmd/internal/goobj" + "cmd/internal/src" ) // Current indexed export format version. Increase with each format change. @@ -245,7 +246,7 @@ const ( interfaceType ) -func iexport(out *bufio.Writer) { +func WriteExports(out *bufio.Writer) { p := iexporter{ allPkgs: map[*types.Pkg]bool{}, stringIndex: map[string]uint64{}, @@ -455,7 +456,7 @@ func (p *iexporter) doDecl(n *ir.Name) { case ir.OLITERAL: // Constant. // TODO(mdempsky): Do we still need this typecheck? If so, why? - n = typecheck(n, ctxExpr).(*ir.Name) + n = Expr(n).(*ir.Name) w.tag('C') w.pos(n.Pos()) w.value(n.Type(), n.Val()) diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go similarity index 97% rename from src/cmd/compile/internal/gc/iimport.go rename to src/cmd/compile/internal/typecheck/iimport.go index e9dc2a3248724..ab43d4f71bceb 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/typecheck/iimport.go @@ -5,16 +5,9 @@ // Indexed package import. // See iexport.go for the export data format. -package gc +package typecheck import ( - "cmd/compile/internal/base" - "cmd/compile/internal/ir" - "cmd/compile/internal/types" - "cmd/internal/bio" - "cmd/internal/goobj" - "cmd/internal/obj" - "cmd/internal/src" "encoding/binary" "fmt" "go/constant" @@ -22,6 +15,14 @@ import ( "math/big" "os" "strings" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" + "cmd/internal/bio" + "cmd/internal/goobj" + "cmd/internal/obj" + "cmd/internal/src" ) // An iimporterAndOffset identifies an importer and an offset within @@ -32,9 +33,9 @@ type iimporterAndOffset struct { } var ( - // declImporter maps from imported identifiers to an importer + // DeclImporter maps from imported identifiers to an importer // and offset where that identifier's declaration can be read. - declImporter = map[*types.Sym]iimporterAndOffset{} + DeclImporter = map[*types.Sym]iimporterAndOffset{} // inlineImporter is like declImporter, but for inline bodies // for function and method symbols. @@ -51,7 +52,7 @@ func expandDecl(n ir.Node) ir.Node { return n.(*ir.Name) } - r := importReaderFor(id.Sym(), declImporter) + r := importReaderFor(id.Sym(), DeclImporter) if r == nil { // Can happen if user tries to reference an undeclared name. return n @@ -60,7 +61,7 @@ func expandDecl(n ir.Node) ir.Node { return r.doDecl(n.Sym()) } -func expandInline(fn *ir.Func) { +func ImportBody(fn *ir.Func) { if fn.Inl.Body != nil { return } @@ -105,7 +106,7 @@ func (r *intReader) uint64() uint64 { return i } -func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType) { +func ReadImports(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType) { ird := &intReader{in, pkg} version := ird.uint64() @@ -170,8 +171,8 @@ func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType) s := pkg.Lookup(p.stringAt(ird.uint64())) off := ird.uint64() - if _, ok := declImporter[s]; !ok { - declImporter[s] = iimporterAndOffset{p, off} + if _, ok := DeclImporter[s]; !ok { + DeclImporter[s] = iimporterAndOffset{p, off} } } } @@ -705,9 +706,9 @@ func (r *importReader) doInline(fn *ir.Func) { base.Fatalf("%v already has inline body", fn) } - funchdr(fn) + StartFuncBody(fn) body := r.stmtList() - funcbody() + FinishFuncBody() if body == nil { // // Make sure empty body is not interpreted as @@ -778,7 +779,7 @@ func (r *importReader) caseList(sw ir.Node) []ir.Node { // names after import. That's okay: swt.go only needs // Sym for diagnostics anyway. caseVar := ir.NewNameAt(cas.Pos(), r.ident()) - declare(caseVar, dclcontext) + Declare(caseVar, DeclContext) cas.Vars = []ir.Node{caseVar} caseVar.Defn = sw.(*ir.SwitchStmt).Tag } @@ -820,7 +821,7 @@ func (r *importReader) node() ir.Node { pos := r.pos() typ := r.typ() - n := npos(pos, nodnil()) + n := npos(pos, NodNil()) n.SetType(typ) return n @@ -959,7 +960,7 @@ func (r *importReader) node() ir.Node { return ir.NewUnaryExpr(r.pos(), op, r.expr()) case ir.OADDR: - return nodAddrAt(r.pos(), r.expr()) + return NodAddrAt(r.pos(), r.expr()) case ir.ODEREF: return ir.NewStarExpr(r.pos(), r.expr()) @@ -991,7 +992,7 @@ func (r *importReader) node() ir.Node { lhs := ir.NewDeclNameAt(pos, ir.ONAME, r.ident()) lhs.SetType(r.typ()) - declare(lhs, ir.PAUTO) + Declare(lhs, ir.PAUTO) var stmts ir.Nodes stmts.Append(ir.NewDecl(base.Pos, ir.ODCL, lhs)) @@ -1089,12 +1090,12 @@ func (r *importReader) node() ir.Node { var sym *types.Sym pos := r.pos() if label := r.string(); label != "" { - sym = lookup(label) + sym = Lookup(label) } return ir.NewBranchStmt(pos, op, sym) case ir.OLABEL: - return ir.NewLabelStmt(r.pos(), lookup(r.string())) + return ir.NewLabelStmt(r.pos(), Lookup(r.string())) case ir.OEND: return nil diff --git a/src/cmd/compile/internal/gc/mapfile_mmap.go b/src/cmd/compile/internal/typecheck/mapfile_mmap.go similarity index 98% rename from src/cmd/compile/internal/gc/mapfile_mmap.go rename to src/cmd/compile/internal/typecheck/mapfile_mmap.go index 9483688d68034..2f3aa16decfaa 100644 --- a/src/cmd/compile/internal/gc/mapfile_mmap.go +++ b/src/cmd/compile/internal/typecheck/mapfile_mmap.go @@ -4,7 +4,7 @@ // +build darwin dragonfly freebsd linux netbsd openbsd -package gc +package typecheck import ( "os" diff --git a/src/cmd/compile/internal/gc/mapfile_read.go b/src/cmd/compile/internal/typecheck/mapfile_read.go similarity index 96% rename from src/cmd/compile/internal/gc/mapfile_read.go rename to src/cmd/compile/internal/typecheck/mapfile_read.go index c6f68ed5df7a7..4059f261d4932 100644 --- a/src/cmd/compile/internal/gc/mapfile_read.go +++ b/src/cmd/compile/internal/typecheck/mapfile_read.go @@ -4,7 +4,7 @@ // +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd -package gc +package typecheck import ( "io" diff --git a/src/cmd/compile/internal/gc/mkbuiltin.go b/src/cmd/compile/internal/typecheck/mkbuiltin.go similarity index 99% rename from src/cmd/compile/internal/gc/mkbuiltin.go rename to src/cmd/compile/internal/typecheck/mkbuiltin.go index 38aa6016457e2..2a208d960f5f4 100644 --- a/src/cmd/compile/internal/gc/mkbuiltin.go +++ b/src/cmd/compile/internal/typecheck/mkbuiltin.go @@ -33,7 +33,7 @@ func main() { var b bytes.Buffer fmt.Fprintln(&b, "// Code generated by mkbuiltin.go. DO NOT EDIT.") fmt.Fprintln(&b) - fmt.Fprintln(&b, "package gc") + fmt.Fprintln(&b, "package typecheck") fmt.Fprintln(&b) fmt.Fprintln(&b, `import (`) fmt.Fprintln(&b, ` "cmd/compile/internal/ir"`) diff --git a/src/cmd/compile/internal/typecheck/stmt.go b/src/cmd/compile/internal/typecheck/stmt.go new file mode 100644 index 0000000000000..889ee06d6e7d6 --- /dev/null +++ b/src/cmd/compile/internal/typecheck/stmt.go @@ -0,0 +1,435 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typecheck + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" + "cmd/internal/src" +) + +// range +func typecheckrange(n *ir.RangeStmt) { + // Typechecking order is important here: + // 0. first typecheck range expression (slice/map/chan), + // it is evaluated only once and so logically it is not part of the loop. + // 1. typecheck produced values, + // this part can declare new vars and so it must be typechecked before body, + // because body can contain a closure that captures the vars. + // 2. decldepth++ to denote loop body. + // 3. typecheck body. + // 4. decldepth--. + typecheckrangeExpr(n) + + // second half of dance, the first half being typecheckrangeExpr + n.SetTypecheck(1) + ls := n.Vars + for i1, n1 := range ls { + if n1.Typecheck() == 0 { + ls[i1] = AssignExpr(ls[i1]) + } + } + + decldepth++ + Stmts(n.Body) + decldepth-- +} + +func typecheckrangeExpr(n *ir.RangeStmt) { + n.X = Expr(n.X) + + t := n.X.Type() + if t == nil { + return + } + // delicate little dance. see typecheckas2 + ls := n.Vars + for i1, n1 := range ls { + if !ir.DeclaredBy(n1, n) { + ls[i1] = AssignExpr(ls[i1]) + } + } + + if t.IsPtr() && t.Elem().IsArray() { + t = t.Elem() + } + n.SetType(t) + + var t1, t2 *types.Type + toomany := false + switch t.Kind() { + default: + base.ErrorfAt(n.Pos(), "cannot range over %L", n.X) + return + + case types.TARRAY, types.TSLICE: + t1 = types.Types[types.TINT] + t2 = t.Elem() + + case types.TMAP: + t1 = t.Key() + t2 = t.Elem() + + case types.TCHAN: + if !t.ChanDir().CanRecv() { + base.ErrorfAt(n.Pos(), "invalid operation: range %v (receive from send-only type %v)", n.X, n.X.Type()) + return + } + + t1 = t.Elem() + t2 = nil + if len(n.Vars) == 2 { + toomany = true + } + + case types.TSTRING: + t1 = types.Types[types.TINT] + t2 = types.RuneType + } + + if len(n.Vars) > 2 || toomany { + base.ErrorfAt(n.Pos(), "too many variables in range") + } + + var v1, v2 ir.Node + if len(n.Vars) != 0 { + v1 = n.Vars[0] + } + if len(n.Vars) > 1 { + v2 = n.Vars[1] + } + + // this is not only an optimization but also a requirement in the spec. + // "if the second iteration variable is the blank identifier, the range + // clause is equivalent to the same clause with only the first variable + // present." + if ir.IsBlank(v2) { + if v1 != nil { + n.Vars = []ir.Node{v1} + } + v2 = nil + } + + if v1 != nil { + if ir.DeclaredBy(v1, n) { + v1.SetType(t1) + } else if v1.Type() != nil { + if op, why := assignop(t1, v1.Type()); op == ir.OXXX { + base.ErrorfAt(n.Pos(), "cannot assign type %v to %L in range%s", t1, v1, why) + } + } + checkassign(n, v1) + } + + if v2 != nil { + if ir.DeclaredBy(v2, n) { + v2.SetType(t2) + } else if v2.Type() != nil { + if op, why := assignop(t2, v2.Type()); op == ir.OXXX { + base.ErrorfAt(n.Pos(), "cannot assign type %v to %L in range%s", t2, v2, why) + } + } + checkassign(n, v2) + } +} + +// select +func typecheckselect(sel *ir.SelectStmt) { + var def ir.Node + lno := ir.SetPos(sel) + Stmts(sel.Init()) + for _, ncase := range sel.Cases { + ncase := ncase.(*ir.CaseStmt) + + if len(ncase.List) == 0 { + // default + if def != nil { + base.ErrorfAt(ncase.Pos(), "multiple defaults in select (first at %v)", ir.Line(def)) + } else { + def = ncase + } + } else if len(ncase.List) > 1 { + base.ErrorfAt(ncase.Pos(), "select cases cannot be lists") + } else { + ncase.List[0] = Stmt(ncase.List[0]) + n := ncase.List[0] + ncase.Comm = n + ncase.List.Set(nil) + oselrecv2 := func(dst, recv ir.Node, colas bool) { + n := ir.NewAssignListStmt(n.Pos(), ir.OSELRECV2, nil, nil) + n.Lhs = []ir.Node{dst, ir.BlankNode} + n.Rhs = []ir.Node{recv} + n.Def = colas + n.SetTypecheck(1) + ncase.Comm = n + } + switch n.Op() { + default: + pos := n.Pos() + if n.Op() == ir.ONAME { + // We don't have the right position for ONAME nodes (see #15459 and + // others). Using ncase.Pos for now as it will provide the correct + // line number (assuming the expression follows the "case" keyword + // on the same line). This matches the approach before 1.10. + pos = ncase.Pos() + } + base.ErrorfAt(pos, "select case must be receive, send or assign recv") + + case ir.OAS: + // convert x = <-c into x, _ = <-c + // remove implicit conversions; the eventual assignment + // will reintroduce them. + n := n.(*ir.AssignStmt) + if r := n.Y; r.Op() == ir.OCONVNOP || r.Op() == ir.OCONVIFACE { + r := r.(*ir.ConvExpr) + if r.Implicit() { + n.Y = r.X + } + } + if n.Y.Op() != ir.ORECV { + base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side") + break + } + oselrecv2(n.X, n.Y, n.Def) + + case ir.OAS2RECV: + n := n.(*ir.AssignListStmt) + if n.Rhs[0].Op() != ir.ORECV { + base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side") + break + } + n.SetOp(ir.OSELRECV2) + + case ir.ORECV: + // convert <-c into _, _ = <-c + n := n.(*ir.UnaryExpr) + oselrecv2(ir.BlankNode, n, false) + + case ir.OSEND: + break + } + } + + Stmts(ncase.Body) + } + + base.Pos = lno +} + +type typeSet struct { + m map[string][]typeSetEntry +} + +func (s *typeSet) add(pos src.XPos, typ *types.Type) { + if s.m == nil { + s.m = make(map[string][]typeSetEntry) + } + + // LongString does not uniquely identify types, so we need to + // disambiguate collisions with types.Identical. + // TODO(mdempsky): Add a method that *is* unique. + ls := typ.LongString() + prevs := s.m[ls] + for _, prev := range prevs { + if types.Identical(typ, prev.typ) { + base.ErrorfAt(pos, "duplicate case %v in type switch\n\tprevious case at %s", typ, base.FmtPos(prev.pos)) + return + } + } + s.m[ls] = append(prevs, typeSetEntry{pos, typ}) +} + +type typeSetEntry struct { + pos src.XPos + typ *types.Type +} + +func typecheckExprSwitch(n *ir.SwitchStmt) { + t := types.Types[types.TBOOL] + if n.Tag != nil { + n.Tag = Expr(n.Tag) + n.Tag = DefaultLit(n.Tag, nil) + t = n.Tag.Type() + } + + var nilonly string + if t != nil { + switch { + case t.IsMap(): + nilonly = "map" + case t.Kind() == types.TFUNC: + nilonly = "func" + case t.IsSlice(): + nilonly = "slice" + + case !types.IsComparable(t): + if t.IsStruct() { + base.ErrorfAt(n.Pos(), "cannot switch on %L (struct containing %v cannot be compared)", n.Tag, types.IncomparableField(t).Type) + } else { + base.ErrorfAt(n.Pos(), "cannot switch on %L", n.Tag) + } + t = nil + } + } + + var defCase ir.Node + var cs constSet + for _, ncase := range n.Cases { + ncase := ncase.(*ir.CaseStmt) + ls := ncase.List + if len(ls) == 0 { // default: + if defCase != nil { + base.ErrorfAt(ncase.Pos(), "multiple defaults in switch (first at %v)", ir.Line(defCase)) + } else { + defCase = ncase + } + } + + for i := range ls { + ir.SetPos(ncase) + ls[i] = Expr(ls[i]) + ls[i] = DefaultLit(ls[i], t) + n1 := ls[i] + if t == nil || n1.Type() == nil { + continue + } + + if nilonly != "" && !ir.IsNil(n1) { + base.ErrorfAt(ncase.Pos(), "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Tag) + } else if t.IsInterface() && !n1.Type().IsInterface() && !types.IsComparable(n1.Type()) { + base.ErrorfAt(ncase.Pos(), "invalid case %L in switch (incomparable type)", n1) + } else { + op1, _ := assignop(n1.Type(), t) + op2, _ := assignop(t, n1.Type()) + if op1 == ir.OXXX && op2 == ir.OXXX { + if n.Tag != nil { + base.ErrorfAt(ncase.Pos(), "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Tag, n1.Type(), t) + } else { + base.ErrorfAt(ncase.Pos(), "invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type()) + } + } + } + + // Don't check for duplicate bools. Although the spec allows it, + // (1) the compiler hasn't checked it in the past, so compatibility mandates it, and + // (2) it would disallow useful things like + // case GOARCH == "arm" && GOARM == "5": + // case GOARCH == "arm": + // which would both evaluate to false for non-ARM compiles. + if !n1.Type().IsBoolean() { + cs.add(ncase.Pos(), n1, "case", "switch") + } + } + + Stmts(ncase.Body) + } +} + +func typecheckTypeSwitch(n *ir.SwitchStmt) { + guard := n.Tag.(*ir.TypeSwitchGuard) + guard.X = Expr(guard.X) + t := guard.X.Type() + if t != nil && !t.IsInterface() { + base.ErrorfAt(n.Pos(), "cannot type switch on non-interface value %L", guard.X) + t = nil + } + + // We don't actually declare the type switch's guarded + // declaration itself. So if there are no cases, we won't + // notice that it went unused. + if v := guard.Tag; v != nil && !ir.IsBlank(v) && len(n.Cases) == 0 { + base.ErrorfAt(v.Pos(), "%v declared but not used", v.Sym()) + } + + var defCase, nilCase ir.Node + var ts typeSet + for _, ncase := range n.Cases { + ncase := ncase.(*ir.CaseStmt) + ls := ncase.List + if len(ls) == 0 { // default: + if defCase != nil { + base.ErrorfAt(ncase.Pos(), "multiple defaults in switch (first at %v)", ir.Line(defCase)) + } else { + defCase = ncase + } + } + + for i := range ls { + ls[i] = check(ls[i], ctxExpr|ctxType) + n1 := ls[i] + if t == nil || n1.Type() == nil { + continue + } + + var missing, have *types.Field + var ptr int + if ir.IsNil(n1) { // case nil: + if nilCase != nil { + base.ErrorfAt(ncase.Pos(), "multiple nil cases in type switch (first at %v)", ir.Line(nilCase)) + } else { + nilCase = ncase + } + continue + } + if n1.Op() != ir.OTYPE { + base.ErrorfAt(ncase.Pos(), "%L is not a type", n1) + continue + } + if !n1.Type().IsInterface() && !implements(n1.Type(), t, &missing, &have, &ptr) && !missing.Broke() { + if have != nil && !have.Broke() { + base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+ + " (wrong type for %v method)\n\thave %v%S\n\twant %v%S", guard.X, n1.Type(), missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) + } else if ptr != 0 { + base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+ + " (%v method has pointer receiver)", guard.X, n1.Type(), missing.Sym) + } else { + base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+ + " (missing %v method)", guard.X, n1.Type(), missing.Sym) + } + continue + } + + ts.add(ncase.Pos(), n1.Type()) + } + + if len(ncase.Vars) != 0 { + // Assign the clause variable's type. + vt := t + if len(ls) == 1 { + if ls[0].Op() == ir.OTYPE { + vt = ls[0].Type() + } else if !ir.IsNil(ls[0]) { + // Invalid single-type case; + // mark variable as broken. + vt = nil + } + } + + nvar := ncase.Vars[0] + nvar.SetType(vt) + if vt != nil { + nvar = AssignExpr(nvar) + } else { + // Clause variable is broken; prevent typechecking. + nvar.SetTypecheck(1) + nvar.SetWalkdef(1) + } + ncase.Vars[0] = nvar + } + + Stmts(ncase.Body) + } +} + +// typecheckswitch typechecks a switch statement. +func typecheckswitch(n *ir.SwitchStmt) { + Stmts(n.Init()) + if n.Tag != nil && n.Tag.Op() == ir.OTYPESW { + typecheckTypeSwitch(n) + } else { + typecheckExprSwitch(n) + } +} diff --git a/src/cmd/compile/internal/typecheck/subr.go b/src/cmd/compile/internal/typecheck/subr.go new file mode 100644 index 0000000000000..22ebf2a4b3ea2 --- /dev/null +++ b/src/cmd/compile/internal/typecheck/subr.go @@ -0,0 +1,793 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typecheck + +import ( + "fmt" + "sort" + "strconv" + "strings" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" + "cmd/internal/src" +) + +func AssignConv(n ir.Node, t *types.Type, context string) ir.Node { + return assignconvfn(n, t, func() string { return context }) +} + +// DotImportRefs maps idents introduced by importDot back to the +// ir.PkgName they were dot-imported through. +var DotImportRefs map[*ir.Ident]*ir.PkgName + +// LookupNum looks up the symbol starting with prefix and ending with +// the decimal n. If prefix is too long, LookupNum panics. +func LookupNum(prefix string, n int) *types.Sym { + var buf [20]byte // plenty long enough for all current users + copy(buf[:], prefix) + b := strconv.AppendInt(buf[:len(prefix)], int64(n), 10) + return types.LocalPkg.LookupBytes(b) +} + +// Given funarg struct list, return list of fn args. +func NewFuncParams(tl *types.Type, mustname bool) []*ir.Field { + var args []*ir.Field + gen := 0 + for _, t := range tl.Fields().Slice() { + s := t.Sym + if mustname && (s == nil || s.Name == "_") { + // invent a name so that we can refer to it in the trampoline + s = LookupNum(".anon", gen) + gen++ + } + a := ir.NewField(base.Pos, s, nil, t.Type) + a.Pos = t.Pos + a.IsDDD = t.IsDDD() + args = append(args, a) + } + + return args +} + +// newname returns a new ONAME Node associated with symbol s. +func NewName(s *types.Sym) *ir.Name { + n := ir.NewNameAt(base.Pos, s) + n.Curfn = ir.CurFunc + return n +} + +// NodAddr returns a node representing &n at base.Pos. +func NodAddr(n ir.Node) *ir.AddrExpr { + return NodAddrAt(base.Pos, n) +} + +// nodAddrPos returns a node representing &n at position pos. +func NodAddrAt(pos src.XPos, n ir.Node) *ir.AddrExpr { + return ir.NewAddrExpr(pos, n) +} + +func NodNil() ir.Node { + n := ir.NewNilExpr(base.Pos) + n.SetType(types.Types[types.TNIL]) + return n +} + +// in T.field +// find missing fields that +// will give shortest unique addressing. +// modify the tree with missing type names. +func AddImplicitDots(n *ir.SelectorExpr) *ir.SelectorExpr { + n.X = check(n.X, ctxType|ctxExpr) + if n.X.Diag() { + n.SetDiag(true) + } + t := n.X.Type() + if t == nil { + return n + } + + if n.X.Op() == ir.OTYPE { + return n + } + + s := n.Sel + if s == nil { + return n + } + + switch path, ambig := dotpath(s, t, nil, false); { + case path != nil: + // rebuild elided dots + for c := len(path) - 1; c >= 0; c-- { + dot := ir.NewSelectorExpr(base.Pos, ir.ODOT, n.X, path[c].field.Sym) + dot.SetImplicit(true) + dot.SetType(path[c].field.Type) + n.X = dot + } + case ambig: + base.Errorf("ambiguous selector %v", n) + n.X = nil + } + + return n +} + +func CalcMethods(t *types.Type) { + if t == nil || t.AllMethods().Len() != 0 { + return + } + + // mark top-level method symbols + // so that expand1 doesn't consider them. + for _, f := range t.Methods().Slice() { + f.Sym.SetUniq(true) + } + + // generate all reachable methods + slist = slist[:0] + expand1(t, true) + + // check each method to be uniquely reachable + var ms []*types.Field + for i, sl := range slist { + slist[i].field = nil + sl.field.Sym.SetUniq(false) + + var f *types.Field + path, _ := dotpath(sl.field.Sym, t, &f, false) + if path == nil { + continue + } + + // dotpath may have dug out arbitrary fields, we only want methods. + if !f.IsMethod() { + continue + } + + // add it to the base type method list + f = f.Copy() + f.Embedded = 1 // needs a trampoline + for _, d := range path { + if d.field.Type.IsPtr() { + f.Embedded = 2 + break + } + } + ms = append(ms, f) + } + + for _, f := range t.Methods().Slice() { + f.Sym.SetUniq(false) + } + + ms = append(ms, t.Methods().Slice()...) + sort.Sort(types.MethodsByName(ms)) + t.AllMethods().Set(ms) +} + +// adddot1 returns the number of fields or methods named s at depth d in Type t. +// If exactly one exists, it will be returned in *save (if save is not nil), +// and dotlist will contain the path of embedded fields traversed to find it, +// in reverse order. If none exist, more will indicate whether t contains any +// embedded fields at depth d, so callers can decide whether to retry at +// a greater depth. +func adddot1(s *types.Sym, t *types.Type, d int, save **types.Field, ignorecase bool) (c int, more bool) { + if t.Recur() { + return + } + t.SetRecur(true) + defer t.SetRecur(false) + + var u *types.Type + d-- + if d < 0 { + // We've reached our target depth. If t has any fields/methods + // named s, then we're done. Otherwise, we still need to check + // below for embedded fields. + c = lookdot0(s, t, save, ignorecase) + if c != 0 { + return c, false + } + } + + u = t + if u.IsPtr() { + u = u.Elem() + } + if !u.IsStruct() && !u.IsInterface() { + return c, false + } + + for _, f := range u.Fields().Slice() { + if f.Embedded == 0 || f.Sym == nil { + continue + } + if d < 0 { + // Found an embedded field at target depth. + return c, true + } + a, more1 := adddot1(s, f.Type, d, save, ignorecase) + if a != 0 && c == 0 { + dotlist[d].field = f + } + c += a + if more1 { + more = true + } + } + + return c, more +} + +// dotlist is used by adddot1 to record the path of embedded fields +// used to access a target field or method. +// Must be non-nil so that dotpath returns a non-nil slice even if d is zero. +var dotlist = make([]dlist, 10) + +// Convert node n for assignment to type t. +func assignconvfn(n ir.Node, t *types.Type, context func() string) ir.Node { + if n == nil || n.Type() == nil || n.Type().Broke() { + return n + } + + if t.Kind() == types.TBLANK && n.Type().Kind() == types.TNIL { + base.Errorf("use of untyped nil") + } + + n = convlit1(n, t, false, context) + if n.Type() == nil { + return n + } + if t.Kind() == types.TBLANK { + return n + } + + // Convert ideal bool from comparison to plain bool + // if the next step is non-bool (like interface{}). + if n.Type() == types.UntypedBool && !t.IsBoolean() { + if n.Op() == ir.ONAME || n.Op() == ir.OLITERAL { + r := ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, n) + r.SetType(types.Types[types.TBOOL]) + r.SetTypecheck(1) + r.SetImplicit(true) + n = r + } + } + + if types.Identical(n.Type(), t) { + return n + } + + op, why := assignop(n.Type(), t) + if op == ir.OXXX { + base.Errorf("cannot use %L as type %v in %s%s", n, t, context(), why) + op = ir.OCONV + } + + r := ir.NewConvExpr(base.Pos, op, t, n) + r.SetTypecheck(1) + r.SetImplicit(true) + return r +} + +// Is type src assignment compatible to type dst? +// If so, return op code to use in conversion. +// If not, return OXXX. In this case, the string return parameter may +// hold a reason why. In all other cases, it'll be the empty string. +func assignop(src, dst *types.Type) (ir.Op, string) { + if src == dst { + return ir.OCONVNOP, "" + } + if src == nil || dst == nil || src.Kind() == types.TFORW || dst.Kind() == types.TFORW || src.Underlying() == nil || dst.Underlying() == nil { + return ir.OXXX, "" + } + + // 1. src type is identical to dst. + if types.Identical(src, dst) { + return ir.OCONVNOP, "" + } + + // 2. src and dst have identical underlying types + // and either src or dst is not a named type or + // both are empty interface types. + // For assignable but different non-empty interface types, + // we want to recompute the itab. Recomputing the itab ensures + // that itabs are unique (thus an interface with a compile-time + // type I has an itab with interface type I). + if types.Identical(src.Underlying(), dst.Underlying()) { + if src.IsEmptyInterface() { + // Conversion between two empty interfaces + // requires no code. + return ir.OCONVNOP, "" + } + if (src.Sym() == nil || dst.Sym() == nil) && !src.IsInterface() { + // Conversion between two types, at least one unnamed, + // needs no conversion. The exception is nonempty interfaces + // which need to have their itab updated. + return ir.OCONVNOP, "" + } + } + + // 3. dst is an interface type and src implements dst. + if dst.IsInterface() && src.Kind() != types.TNIL { + var missing, have *types.Field + var ptr int + if implements(src, dst, &missing, &have, &ptr) { + // Call itabname so that (src, dst) + // gets added to itabs early, which allows + // us to de-virtualize calls through this + // type/interface pair later. See peekitabs in reflect.go + if types.IsDirectIface(src) && !dst.IsEmptyInterface() { + NeedITab(src, dst) + } + + return ir.OCONVIFACE, "" + } + + // we'll have complained about this method anyway, suppress spurious messages. + if have != nil && have.Sym == missing.Sym && (have.Type.Broke() || missing.Type.Broke()) { + return ir.OCONVIFACE, "" + } + + var why string + if isptrto(src, types.TINTER) { + why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", src) + } else if have != nil && have.Sym == missing.Sym && have.Nointerface() { + why = fmt.Sprintf(":\n\t%v does not implement %v (%v method is marked 'nointerface')", src, dst, missing.Sym) + } else if have != nil && have.Sym == missing.Sym { + why = fmt.Sprintf(":\n\t%v does not implement %v (wrong type for %v method)\n"+ + "\t\thave %v%S\n\t\twant %v%S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) + } else if ptr != 0 { + why = fmt.Sprintf(":\n\t%v does not implement %v (%v method has pointer receiver)", src, dst, missing.Sym) + } else if have != nil { + why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)\n"+ + "\t\thave %v%S\n\t\twant %v%S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) + } else { + why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)", src, dst, missing.Sym) + } + + return ir.OXXX, why + } + + if isptrto(dst, types.TINTER) { + why := fmt.Sprintf(":\n\t%v is pointer to interface, not interface", dst) + return ir.OXXX, why + } + + if src.IsInterface() && dst.Kind() != types.TBLANK { + var missing, have *types.Field + var ptr int + var why string + if implements(dst, src, &missing, &have, &ptr) { + why = ": need type assertion" + } + return ir.OXXX, why + } + + // 4. src is a bidirectional channel value, dst is a channel type, + // src and dst have identical element types, and + // either src or dst is not a named type. + if src.IsChan() && src.ChanDir() == types.Cboth && dst.IsChan() { + if types.Identical(src.Elem(), dst.Elem()) && (src.Sym() == nil || dst.Sym() == nil) { + return ir.OCONVNOP, "" + } + } + + // 5. src is the predeclared identifier nil and dst is a nillable type. + if src.Kind() == types.TNIL { + switch dst.Kind() { + case types.TPTR, + types.TFUNC, + types.TMAP, + types.TCHAN, + types.TINTER, + types.TSLICE: + return ir.OCONVNOP, "" + } + } + + // 6. rule about untyped constants - already converted by defaultlit. + + // 7. Any typed value can be assigned to the blank identifier. + if dst.Kind() == types.TBLANK { + return ir.OCONVNOP, "" + } + + return ir.OXXX, "" +} + +// Can we convert a value of type src to a value of type dst? +// If so, return op code to use in conversion (maybe OCONVNOP). +// If not, return OXXX. In this case, the string return parameter may +// hold a reason why. In all other cases, it'll be the empty string. +// srcConstant indicates whether the value of type src is a constant. +func convertop(srcConstant bool, src, dst *types.Type) (ir.Op, string) { + if src == dst { + return ir.OCONVNOP, "" + } + if src == nil || dst == nil { + return ir.OXXX, "" + } + + // Conversions from regular to go:notinheap are not allowed + // (unless it's unsafe.Pointer). These are runtime-specific + // rules. + // (a) Disallow (*T) to (*U) where T is go:notinheap but U isn't. + if src.IsPtr() && dst.IsPtr() && dst.Elem().NotInHeap() && !src.Elem().NotInHeap() { + why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable), but %v is not", dst.Elem(), src.Elem()) + return ir.OXXX, why + } + // (b) Disallow string to []T where T is go:notinheap. + if src.IsString() && dst.IsSlice() && dst.Elem().NotInHeap() && (dst.Elem().Kind() == types.ByteType.Kind() || dst.Elem().Kind() == types.RuneType.Kind()) { + why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable)", dst.Elem()) + return ir.OXXX, why + } + + // 1. src can be assigned to dst. + op, why := assignop(src, dst) + if op != ir.OXXX { + return op, why + } + + // The rules for interfaces are no different in conversions + // than assignments. If interfaces are involved, stop now + // with the good message from assignop. + // Otherwise clear the error. + if src.IsInterface() || dst.IsInterface() { + return ir.OXXX, why + } + + // 2. Ignoring struct tags, src and dst have identical underlying types. + if types.IdenticalIgnoreTags(src.Underlying(), dst.Underlying()) { + return ir.OCONVNOP, "" + } + + // 3. src and dst are unnamed pointer types and, ignoring struct tags, + // their base types have identical underlying types. + if src.IsPtr() && dst.IsPtr() && src.Sym() == nil && dst.Sym() == nil { + if types.IdenticalIgnoreTags(src.Elem().Underlying(), dst.Elem().Underlying()) { + return ir.OCONVNOP, "" + } + } + + // 4. src and dst are both integer or floating point types. + if (src.IsInteger() || src.IsFloat()) && (dst.IsInteger() || dst.IsFloat()) { + if types.SimType[src.Kind()] == types.SimType[dst.Kind()] { + return ir.OCONVNOP, "" + } + return ir.OCONV, "" + } + + // 5. src and dst are both complex types. + if src.IsComplex() && dst.IsComplex() { + if types.SimType[src.Kind()] == types.SimType[dst.Kind()] { + return ir.OCONVNOP, "" + } + return ir.OCONV, "" + } + + // Special case for constant conversions: any numeric + // conversion is potentially okay. We'll validate further + // within evconst. See #38117. + if srcConstant && (src.IsInteger() || src.IsFloat() || src.IsComplex()) && (dst.IsInteger() || dst.IsFloat() || dst.IsComplex()) { + return ir.OCONV, "" + } + + // 6. src is an integer or has type []byte or []rune + // and dst is a string type. + if src.IsInteger() && dst.IsString() { + return ir.ORUNESTR, "" + } + + if src.IsSlice() && dst.IsString() { + if src.Elem().Kind() == types.ByteType.Kind() { + return ir.OBYTES2STR, "" + } + if src.Elem().Kind() == types.RuneType.Kind() { + return ir.ORUNES2STR, "" + } + } + + // 7. src is a string and dst is []byte or []rune. + // String to slice. + if src.IsString() && dst.IsSlice() { + if dst.Elem().Kind() == types.ByteType.Kind() { + return ir.OSTR2BYTES, "" + } + if dst.Elem().Kind() == types.RuneType.Kind() { + return ir.OSTR2RUNES, "" + } + } + + // 8. src is a pointer or uintptr and dst is unsafe.Pointer. + if (src.IsPtr() || src.IsUintptr()) && dst.IsUnsafePtr() { + return ir.OCONVNOP, "" + } + + // 9. src is unsafe.Pointer and dst is a pointer or uintptr. + if src.IsUnsafePtr() && (dst.IsPtr() || dst.IsUintptr()) { + return ir.OCONVNOP, "" + } + + // src is map and dst is a pointer to corresponding hmap. + // This rule is needed for the implementation detail that + // go gc maps are implemented as a pointer to a hmap struct. + if src.Kind() == types.TMAP && dst.IsPtr() && + src.MapType().Hmap == dst.Elem() { + return ir.OCONVNOP, "" + } + + return ir.OXXX, "" +} + +// Code to resolve elided DOTs in embedded types. + +// A dlist stores a pointer to a TFIELD Type embedded within +// a TSTRUCT or TINTER Type. +type dlist struct { + field *types.Field +} + +// dotpath computes the unique shortest explicit selector path to fully qualify +// a selection expression x.f, where x is of type t and f is the symbol s. +// If no such path exists, dotpath returns nil. +// If there are multiple shortest paths to the same depth, ambig is true. +func dotpath(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) (path []dlist, ambig bool) { + // The embedding of types within structs imposes a tree structure onto + // types: structs parent the types they embed, and types parent their + // fields or methods. Our goal here is to find the shortest path to + // a field or method named s in the subtree rooted at t. To accomplish + // that, we iteratively perform depth-first searches of increasing depth + // until we either find the named field/method or exhaust the tree. + for d := 0; ; d++ { + if d > len(dotlist) { + dotlist = append(dotlist, dlist{}) + } + if c, more := adddot1(s, t, d, save, ignorecase); c == 1 { + return dotlist[:d], false + } else if c > 1 { + return nil, true + } else if !more { + return nil, false + } + } +} + +func expand0(t *types.Type) { + u := t + if u.IsPtr() { + u = u.Elem() + } + + if u.IsInterface() { + for _, f := range u.Fields().Slice() { + if f.Sym.Uniq() { + continue + } + f.Sym.SetUniq(true) + slist = append(slist, symlink{field: f}) + } + + return + } + + u = types.ReceiverBaseType(t) + if u != nil { + for _, f := range u.Methods().Slice() { + if f.Sym.Uniq() { + continue + } + f.Sym.SetUniq(true) + slist = append(slist, symlink{field: f}) + } + } +} + +func expand1(t *types.Type, top bool) { + if t.Recur() { + return + } + t.SetRecur(true) + + if !top { + expand0(t) + } + + u := t + if u.IsPtr() { + u = u.Elem() + } + + if u.IsStruct() || u.IsInterface() { + for _, f := range u.Fields().Slice() { + if f.Embedded == 0 { + continue + } + if f.Sym == nil { + continue + } + expand1(f.Type, false) + } + } + + t.SetRecur(false) +} + +func ifacelookdot(s *types.Sym, t *types.Type, ignorecase bool) (m *types.Field, followptr bool) { + if t == nil { + return nil, false + } + + path, ambig := dotpath(s, t, &m, ignorecase) + if path == nil { + if ambig { + base.Errorf("%v.%v is ambiguous", t, s) + } + return nil, false + } + + for _, d := range path { + if d.field.Type.IsPtr() { + followptr = true + break + } + } + + if !m.IsMethod() { + base.Errorf("%v.%v is a field, not a method", t, s) + return nil, followptr + } + + return m, followptr +} + +func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool { + t0 := t + if t == nil { + return false + } + + if t.IsInterface() { + i := 0 + tms := t.Fields().Slice() + for _, im := range iface.Fields().Slice() { + for i < len(tms) && tms[i].Sym != im.Sym { + i++ + } + if i == len(tms) { + *m = im + *samename = nil + *ptr = 0 + return false + } + tm := tms[i] + if !types.Identical(tm.Type, im.Type) { + *m = im + *samename = tm + *ptr = 0 + return false + } + } + + return true + } + + t = types.ReceiverBaseType(t) + var tms []*types.Field + if t != nil { + CalcMethods(t) + tms = t.AllMethods().Slice() + } + i := 0 + for _, im := range iface.Fields().Slice() { + if im.Broke() { + continue + } + for i < len(tms) && tms[i].Sym != im.Sym { + i++ + } + if i == len(tms) { + *m = im + *samename, _ = ifacelookdot(im.Sym, t, true) + *ptr = 0 + return false + } + tm := tms[i] + if tm.Nointerface() || !types.Identical(tm.Type, im.Type) { + *m = im + *samename = tm + *ptr = 0 + return false + } + followptr := tm.Embedded == 2 + + // if pointer receiver in method, + // the method does not exist for value types. + rcvr := tm.Type.Recv().Type + if rcvr.IsPtr() && !t0.IsPtr() && !followptr && !types.IsInterfaceMethod(tm.Type) { + if false && base.Flag.LowerR != 0 { + base.Errorf("interface pointer mismatch") + } + + *m = im + *samename = nil + *ptr = 1 + return false + } + } + + return true +} + +func isptrto(t *types.Type, et types.Kind) bool { + if t == nil { + return false + } + if !t.IsPtr() { + return false + } + t = t.Elem() + if t == nil { + return false + } + if t.Kind() != et { + return false + } + return true +} + +// lookdot0 returns the number of fields or methods named s associated +// with Type t. If exactly one exists, it will be returned in *save +// (if save is not nil). +func lookdot0(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) int { + u := t + if u.IsPtr() { + u = u.Elem() + } + + c := 0 + if u.IsStruct() || u.IsInterface() { + for _, f := range u.Fields().Slice() { + if f.Sym == s || (ignorecase && f.IsMethod() && strings.EqualFold(f.Sym.Name, s.Name)) { + if save != nil { + *save = f + } + c++ + } + } + } + + u = t + if t.Sym() != nil && t.IsPtr() && !t.Elem().IsPtr() { + // If t is a defined pointer type, then x.m is shorthand for (*x).m. + u = t.Elem() + } + u = types.ReceiverBaseType(u) + if u != nil { + for _, f := range u.Methods().Slice() { + if f.Embedded == 0 && (f.Sym == s || (ignorecase && strings.EqualFold(f.Sym.Name, s.Name))) { + if save != nil { + *save = f + } + c++ + } + } + } + + return c +} + +var slist []symlink + +// Code to help generate trampoline functions for methods on embedded +// types. These are approx the same as the corresponding adddot +// routines except that they expect to be called with unique tasks and +// they return the actual methods. + +type symlink struct { + field *types.Field +} diff --git a/src/cmd/compile/internal/typecheck/syms.go b/src/cmd/compile/internal/typecheck/syms.go new file mode 100644 index 0000000000000..ab3384bf901e1 --- /dev/null +++ b/src/cmd/compile/internal/typecheck/syms.go @@ -0,0 +1,104 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typecheck + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/src" +) + +func LookupRuntime(name string) *ir.Name { + s := ir.Pkgs.Runtime.Lookup(name) + if s == nil || s.Def == nil { + base.Fatalf("syslook: can't find runtime.%s", name) + } + return ir.AsNode(s.Def).(*ir.Name) +} + +// SubstArgTypes substitutes the given list of types for +// successive occurrences of the "any" placeholder in the +// type syntax expression n.Type. +// The result of SubstArgTypes MUST be assigned back to old, e.g. +// n.Left = SubstArgTypes(n.Left, t1, t2) +func SubstArgTypes(old *ir.Name, types_ ...*types.Type) *ir.Name { + n := old.CloneName() + + for _, t := range types_ { + types.CalcSize(t) + } + n.SetType(types.SubstAny(n.Type(), &types_)) + if len(types_) > 0 { + base.Fatalf("substArgTypes: too many argument types") + } + return n +} + +// AutoLabel generates a new Name node for use with +// an automatically generated label. +// prefix is a short mnemonic (e.g. ".s" for switch) +// to help with debugging. +// It should begin with "." to avoid conflicts with +// user labels. +func AutoLabel(prefix string) *types.Sym { + if prefix[0] != '.' { + base.Fatalf("autolabel prefix must start with '.', have %q", prefix) + } + fn := ir.CurFunc + if ir.CurFunc == nil { + base.Fatalf("autolabel outside function") + } + n := fn.Label + fn.Label++ + return LookupNum(prefix, int(n)) +} + +func Lookup(name string) *types.Sym { + return types.LocalPkg.Lookup(name) +} + +// loadsys loads the definitions for the low-level runtime functions, +// so that the compiler can generate calls to them, +// but does not make them visible to user code. +func loadsys() { + types.Block = 1 + + inimport = true + TypecheckAllowed = true + + typs := runtimeTypes() + for _, d := range &runtimeDecls { + sym := ir.Pkgs.Runtime.Lookup(d.name) + typ := typs[d.typ] + switch d.tag { + case funcTag: + importfunc(ir.Pkgs.Runtime, src.NoXPos, sym, typ) + case varTag: + importvar(ir.Pkgs.Runtime, src.NoXPos, sym, typ) + default: + base.Fatalf("unhandled declaration tag %v", d.tag) + } + } + + TypecheckAllowed = false + inimport = false +} + +// LookupRuntimeFunc looks up Go function name in package runtime. This function +// must follow the internal calling convention. +func LookupRuntimeFunc(name string) *obj.LSym { + s := ir.Pkgs.Runtime.Lookup(name) + s.SetFunc(true) + return s.Linksym() +} + +// LookupRuntimeVar looks up a variable (or assembly function) name in package +// runtime. If this is a function, it may have a special calling +// convention. +func LookupRuntimeVar(name string) *obj.LSym { + return ir.Pkgs.Runtime.Lookup(name).Linksym() +} diff --git a/src/cmd/compile/internal/gc/types.go b/src/cmd/compile/internal/typecheck/target.go similarity index 51% rename from src/cmd/compile/internal/gc/types.go rename to src/cmd/compile/internal/typecheck/target.go index e46735df28dbe..018614d68bfc4 100644 --- a/src/cmd/compile/internal/gc/types.go +++ b/src/cmd/compile/internal/typecheck/target.go @@ -2,4 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +//go:generate go run mkbuiltin.go + +package typecheck + +import "cmd/compile/internal/ir" + +// Target is the package being compiled. +var Target *ir.Package diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go similarity index 92% rename from src/cmd/compile/internal/gc/typecheck.go rename to src/cmd/compile/internal/typecheck/typecheck.go index 0552dd180f220..2abf0a78248cc 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -2,35 +2,46 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package typecheck import ( - "cmd/compile/internal/base" - "cmd/compile/internal/ir" - "cmd/compile/internal/types" "fmt" "go/constant" "go/token" "strings" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" ) +// Function collecting autotmps generated during typechecking, +// to be included in the package-level init function. +var InitTodoFunc = ir.NewFunc(base.Pos) + +var inimport bool // set during import + +var decldepth int32 + +var TypecheckAllowed bool + var ( NeedFuncSym = func(*types.Sym) {} NeedITab = func(t, itype *types.Type) {} NeedRuntimeType = func(*types.Type) {} ) -func TypecheckInit() { +func Init() { initUniverse() - dclcontext = ir.PEXTERN + DeclContext = ir.PEXTERN base.Timer.Start("fe", "loadsys") loadsys() } -func TypecheckPackage() { - finishUniverse() +func Package() { + declareUniverse() - typecheckok = true + TypecheckAllowed = true // Process top-level declarations in phases. @@ -47,7 +58,7 @@ func TypecheckPackage() { for i := 0; i < len(Target.Decls); i++ { n := Target.Decls[i] if op := n.Op(); op != ir.ODCL && op != ir.OAS && op != ir.OAS2 && (op != ir.ODCLTYPE || !n.(*ir.Decl).X.Name().Alias()) { - Target.Decls[i] = typecheck(n, ctxStmt) + Target.Decls[i] = Stmt(n) } } @@ -59,7 +70,7 @@ func TypecheckPackage() { for i := 0; i < len(Target.Decls); i++ { n := Target.Decls[i] if op := n.Op(); op == ir.ODCL || op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.(*ir.Decl).X.Name().Alias() { - Target.Decls[i] = typecheck(n, ctxStmt) + Target.Decls[i] = Stmt(n) } } @@ -70,7 +81,7 @@ func TypecheckPackage() { for i := 0; i < len(Target.Decls); i++ { n := Target.Decls[i] if n.Op() == ir.ODCLFUNC { - TypecheckFuncBody(n.(*ir.Func)) + FuncBody(n.(*ir.Func)) fcount++ } } @@ -81,12 +92,12 @@ func TypecheckPackage() { base.Timer.Start("fe", "typecheck", "externdcls") for i, n := range Target.Externs { if n.Op() == ir.ONAME { - Target.Externs[i] = typecheck(Target.Externs[i], ctxExpr) + Target.Externs[i] = Expr(Target.Externs[i]) } } // Phase 5: With all user code type-checked, it's now safe to verify map keys. - checkMapKeys() + CheckMapKeys() // Phase 6: Decide how to capture closed variables. // This needs to run before escape analysis, @@ -97,28 +108,28 @@ func TypecheckPackage() { n := n.(*ir.Func) if n.OClosure != nil { ir.CurFunc = n - capturevars(n) + CaptureVars(n) } } } - capturevarscomplete = true + CaptureVarsComplete = true ir.CurFunc = nil if base.Debug.TypecheckInl != 0 { // Typecheck imported function bodies if Debug.l > 1, // otherwise lazily when used or re-exported. - TypecheckImports() + AllImportedBodies() } } -func TypecheckAssignExpr(n ir.Node) ir.Node { return typecheck(n, ctxExpr|ctxAssign) } -func TypecheckExpr(n ir.Node) ir.Node { return typecheck(n, ctxExpr) } -func TypecheckStmt(n ir.Node) ir.Node { return typecheck(n, ctxStmt) } +func AssignExpr(n ir.Node) ir.Node { return check(n, ctxExpr|ctxAssign) } +func Expr(n ir.Node) ir.Node { return check(n, ctxExpr) } +func Stmt(n ir.Node) ir.Node { return check(n, ctxStmt) } -func TypecheckExprs(exprs []ir.Node) { typecheckslice(exprs, ctxExpr) } -func TypecheckStmts(stmts []ir.Node) { typecheckslice(stmts, ctxStmt) } +func Exprs(exprs []ir.Node) { typecheckslice(exprs, ctxExpr) } +func Stmts(stmts []ir.Node) { typecheckslice(stmts, ctxStmt) } -func TypecheckCall(call *ir.CallExpr) { +func Call(call *ir.CallExpr) { t := call.X.Type() if t == nil { panic("misuse of Call") @@ -127,21 +138,21 @@ func TypecheckCall(call *ir.CallExpr) { if t.NumResults() > 0 { ctx = ctxExpr | ctxMultiOK } - if typecheck(call, ctx) != call { + if check(call, ctx) != call { panic("bad typecheck") } } -func TypecheckCallee(n ir.Node) ir.Node { - return typecheck(n, ctxExpr|ctxCallee) +func Callee(n ir.Node) ir.Node { + return check(n, ctxExpr|ctxCallee) } -func TypecheckFuncBody(n *ir.Func) { +func FuncBody(n *ir.Func) { ir.CurFunc = n decldepth = 1 errorsBefore := base.Errors() - typecheckslice(n.Body, ctxStmt) - checkreturn(n) + Stmts(n.Body) + CheckReturn(n) if base.Errors() > errorsBefore { n.Body.Set(nil) // type errors; do not compile } @@ -152,10 +163,10 @@ func TypecheckFuncBody(n *ir.Func) { var importlist []*ir.Func -func TypecheckImports() { +func AllImportedBodies() { for _, n := range importlist { if n.Inl != nil { - typecheckinl(n) + ImportedBody(n) } } } @@ -221,8 +232,8 @@ const ( var typecheckdefstack []ir.Node -// resolve ONONAME to definition, if any. -func resolve(n ir.Node) (res ir.Node) { +// Resolve ONONAME to definition, if any. +func Resolve(n ir.Node) (res ir.Node) { if n == nil || n.Op() != ir.ONONAME { return n } @@ -235,7 +246,7 @@ func resolve(n ir.Node) (res ir.Node) { if sym := n.Sym(); sym.Pkg != types.LocalPkg { // We might have an ir.Ident from oldname or importDot. if id, ok := n.(*ir.Ident); ok { - if pkgName := dotImportRefs[id]; pkgName != nil { + if pkgName := DotImportRefs[id]; pkgName != nil { pkgName.Used = true } } @@ -266,7 +277,7 @@ func resolve(n ir.Node) (res ir.Node) { func typecheckslice(l []ir.Node, top int) { for i := range l { - l[i] = typecheck(l[i], top) + l[i] = check(l[i], top) } } @@ -348,23 +359,23 @@ func cycleTrace(cycle []ir.Node) string { var typecheck_tcstack []ir.Node -func typecheckFunc(fn *ir.Func) { - new := typecheck(fn, ctxStmt) +func Func(fn *ir.Func) { + new := Stmt(fn) if new != fn { base.Fatalf("typecheck changed func") } } func typecheckNtype(n ir.Ntype) ir.Ntype { - return typecheck(n, ctxType).(ir.Ntype) + return check(n, ctxType).(ir.Ntype) } -// typecheck type checks node n. -// The result of typecheck MUST be assigned back to n, e.g. -// n.Left = typecheck(n.Left, top) -func typecheck(n ir.Node, top int) (res ir.Node) { +// check type checks node n. +// The result of check MUST be assigned back to n, e.g. +// n.Left = check(n.Left, top) +func check(n ir.Node, top int) (res ir.Node) { // cannot type check until all the source has been parsed - if !typecheckok { + if !TypecheckAllowed { base.Fatalf("early typecheck") } @@ -385,7 +396,7 @@ func typecheck(n ir.Node, top int) (res ir.Node) { } // Resolve definition of name and value of iota lazily. - n = resolve(n) + n = Resolve(n) // Skip typecheck if already done. // But re-typecheck ONAME/OTYPE/OLITERAL/OPACK node in case context has changed. @@ -504,7 +515,7 @@ func typecheck(n ir.Node, top int) (res ir.Node) { } } if t != nil { - n = evalConst(n) + n = EvalConst(n) t = n.Type() } @@ -555,7 +566,7 @@ func typecheck(n ir.Node, top int) (res ir.Node) { // n.Left = indexlit(n.Left) func indexlit(n ir.Node) ir.Node { if n != nil && n.Type() != nil && n.Type().Kind() == types.TIDEAL { - return defaultlit(n, types.Types[types.TINT]) + return DefaultLit(n, types.Types[types.TINT]) } return n } @@ -642,7 +653,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OTSLICE: n := n.(*ir.SliceType) - n.Elem = typecheck(n.Elem, ctxType) + n.Elem = check(n.Elem, ctxType) if n.Elem.Type() == nil { return n } @@ -653,7 +664,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OTARRAY: n := n.(*ir.ArrayType) - n.Elem = typecheck(n.Elem, ctxType) + n.Elem = check(n.Elem, ctxType) if n.Elem.Type() == nil { return n } @@ -664,7 +675,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } return n } - n.Len = indexlit(typecheck(n.Len, ctxExpr)) + n.Len = indexlit(Expr(n.Len)) size := n.Len if ir.ConstType(size) != constant.Int { switch { @@ -697,8 +708,8 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OTMAP: n := n.(*ir.MapType) - n.Key = typecheck(n.Key, ctxType) - n.Elem = typecheck(n.Elem, ctxType) + n.Key = check(n.Key, ctxType) + n.Elem = check(n.Elem, ctxType) l := n.Key r := n.Elem if l.Type() == nil || r.Type() == nil { @@ -716,7 +727,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OTCHAN: n := n.(*ir.ChanType) - n.Elem = typecheck(n.Elem, ctxType) + n.Elem = check(n.Elem, ctxType) l := n.Elem if l.Type() == nil { return n @@ -729,7 +740,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OTSTRUCT: n := n.(*ir.StructType) - n.SetOTYPE(tostruct(n.Fields)) + n.SetOTYPE(NewStructType(n.Fields)) return n case ir.OTINTER: @@ -739,13 +750,13 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OTFUNC: n := n.(*ir.FuncType) - n.SetOTYPE(functype(n.Recv, n.Params, n.Results)) + n.SetOTYPE(NewFuncType(n.Recv, n.Params, n.Results)) return n // type or expr case ir.ODEREF: n := n.(*ir.StarExpr) - n.X = typecheck(n.X, ctxExpr|ctxType) + n.X = check(n.X, ctxExpr|ctxType) l := n.X t := l.Type() if t == nil { @@ -806,8 +817,8 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { l, r = n.X, n.Y setLR = func() { n.X = l; n.Y = r } } - l = typecheck(l, ctxExpr) - r = typecheck(r, ctxExpr) + l = Expr(l) + r = Expr(r) setLR() if l.Type() == nil || r.Type() == nil { n.SetType(nil) @@ -826,7 +837,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { op = n.AsOp } if op == ir.OLSH || op == ir.ORSH { - r = defaultlit(r, types.Types[types.TUINT]) + r = DefaultLit(r, types.Types[types.TUINT]) setLR() t := r.Type() if !t.IsInteger() { @@ -1001,7 +1012,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { if iscmp[n.Op()] { t = types.UntypedBool n.SetType(t) - if con := evalConst(n); con.Op() == ir.OLITERAL { + if con := EvalConst(n); con.Op() == ir.OLITERAL { return con } l, r = defaultlit2(l, r, true) @@ -1042,7 +1053,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OBITNOT, ir.ONEG, ir.ONOT, ir.OPLUS: n := n.(*ir.UnaryExpr) - n.X = typecheck(n.X, ctxExpr) + n.X = Expr(n.X) l := n.X t := l.Type() if t == nil { @@ -1061,7 +1072,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { // exprs case ir.OADDR: n := n.(*ir.AddrExpr) - n.X = typecheck(n.X, ctxExpr) + n.X = Expr(n.X) if n.X.Type() == nil { n.SetType(nil) return n @@ -1080,7 +1091,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { base.Fatalf("found non-orig name node %v", r) // TODO(mdempsky): What does this mean? } r.Name().SetAddrtaken(true) - if r.Name().IsClosureVar() && !capturevarscomplete { + if r.Name().IsClosureVar() && !CaptureVarsComplete { // Mark the original variable as Addrtaken so that capturevars // knows not to pass it by value. // But if the capturevars phase is complete, don't touch it, @@ -1088,7 +1099,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { r.Name().Defn.Name().SetAddrtaken(true) } } - n.X = defaultlit(n.X, nil) + n.X = DefaultLit(n.X, nil) if n.X.Type() == nil { n.SetType(nil) return n @@ -1104,7 +1115,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OXDOT, ir.ODOT: n := n.(*ir.SelectorExpr) if n.Op() == ir.OXDOT { - n = adddot(n) + n = AddImplicitDots(n) n.SetOp(ir.ODOT) if n.X == nil { n.SetType(nil) @@ -1112,9 +1123,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } } - n.X = typecheck(n.X, ctxExpr|ctxType) + n.X = check(n.X, ctxExpr|ctxType) - n.X = defaultlit(n.X, nil) + n.X = DefaultLit(n.X, nil) t := n.X.Type() if t == nil { @@ -1177,8 +1188,8 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.ODOTTYPE: n := n.(*ir.TypeAssertExpr) - n.X = typecheck(n.X, ctxExpr) - n.X = defaultlit(n.X, nil) + n.X = Expr(n.X) + n.X = DefaultLit(n.X, nil) l := n.X t := l.Type() if t == nil { @@ -1192,7 +1203,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } if n.Ntype != nil { - n.Ntype = typecheck(n.Ntype, ctxType) + n.Ntype = check(n.Ntype, ctxType) n.SetType(n.Ntype.Type()) n.Ntype = nil if n.Type() == nil { @@ -1223,11 +1234,11 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OINDEX: n := n.(*ir.IndexExpr) - n.X = typecheck(n.X, ctxExpr) - n.X = defaultlit(n.X, nil) + n.X = Expr(n.X) + n.X = DefaultLit(n.X, nil) n.X = implicitstar(n.X) l := n.X - n.Index = typecheck(n.Index, ctxExpr) + n.Index = Expr(n.Index) r := n.Index t := l.Type() if t == nil || r.Type() == nil { @@ -1273,7 +1284,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } case types.TMAP: - n.Index = assignconv(n.Index, t.Key(), "map index") + n.Index = AssignConv(n.Index, t.Key(), "map index") n.SetType(t.Elem()) n.SetOp(ir.OINDEXMAP) n.Assigned = false @@ -1282,8 +1293,8 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.ORECV: n := n.(*ir.UnaryExpr) - n.X = typecheck(n.X, ctxExpr) - n.X = defaultlit(n.X, nil) + n.X = Expr(n.X) + n.X = DefaultLit(n.X, nil) l := n.X t := l.Type() if t == nil { @@ -1307,9 +1318,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OSEND: n := n.(*ir.SendStmt) - n.Chan = typecheck(n.Chan, ctxExpr) - n.Value = typecheck(n.Value, ctxExpr) - n.Chan = defaultlit(n.Chan, nil) + n.Chan = Expr(n.Chan) + n.Value = Expr(n.Value) + n.Chan = DefaultLit(n.Chan, nil) t := n.Chan.Type() if t == nil { return n @@ -1324,7 +1335,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } - n.Value = assignconv(n.Value, t.Elem(), "send") + n.Value = AssignConv(n.Value, t.Elem(), "send") if n.Value.Type() == nil { return n } @@ -1353,11 +1364,11 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { base.Fatalf("expected 2 params (len, cap) for OSLICEHEADER, got %d", x) } - n.Ptr = typecheck(n.Ptr, ctxExpr) - l := typecheck(n.LenCap[0], ctxExpr) - c := typecheck(n.LenCap[1], ctxExpr) - l = defaultlit(l, types.Types[types.TINT]) - c = defaultlit(c, types.Types[types.TINT]) + n.Ptr = Expr(n.Ptr) + l := Expr(n.LenCap[0]) + c := Expr(n.LenCap[1]) + l = DefaultLit(l, types.Types[types.TINT]) + c = DefaultLit(c, types.Types[types.TINT]) if ir.IsConst(l, constant.Int) && ir.Int64Val(l) < 0 { base.Fatalf("len for OSLICEHEADER must be non-negative") @@ -1399,10 +1410,10 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { base.Fatalf("missing slice argument to copy for OMAKESLICECOPY") } - n.Len = typecheck(n.Len, ctxExpr) - n.Cap = typecheck(n.Cap, ctxExpr) + n.Len = Expr(n.Len) + n.Cap = Expr(n.Cap) - n.Len = defaultlit(n.Len, types.Types[types.TINT]) + n.Len = DefaultLit(n.Len, types.Types[types.TINT]) if !n.Len.Type().IsInteger() && n.Type().Kind() != types.TIDEAL { base.Errorf("non-integer len argument in OMAKESLICECOPY") @@ -1420,13 +1431,13 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OSLICE, ir.OSLICE3: n := n.(*ir.SliceExpr) - n.X = typecheck(n.X, ctxExpr) + n.X = Expr(n.X) low, high, max := n.SliceBounds() hasmax := n.Op().IsSlice3() - low = typecheck(low, ctxExpr) - high = typecheck(high, ctxExpr) - max = typecheck(max, ctxExpr) - n.X = defaultlit(n.X, nil) + low = Expr(low) + high = Expr(high) + max = Expr(max) + n.X = DefaultLit(n.X, nil) low = indexlit(low) high = indexlit(high) max = indexlit(max) @@ -1443,9 +1454,9 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } - addr := nodAddr(n.X) + addr := NodAddr(n.X) addr.SetImplicit(true) - n.X = typecheck(addr, ctxExpr) + n.X = Expr(addr) l = n.X } t := l.Type() @@ -1500,8 +1511,8 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { if top == ctxStmt { n.Use = ir.CallUseStmt } - typecheckslice(n.Init(), ctxStmt) // imported rewritten f(g()) calls (#30907) - n.X = typecheck(n.X, ctxExpr|ctxType|ctxCallee) + Stmts(n.Init()) // imported rewritten f(g()) calls (#30907) + n.X = check(n.X, ctxExpr|ctxType|ctxCallee) if n.X.Diag() { n.SetDiag(true) } @@ -1523,7 +1534,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.SetOp(l.BuiltinOp) n.X = nil n.SetTypecheck(0) // re-typechecking new op is OK, not a loop - return typecheck(n, top) + return check(n, top) case ir.OCAP, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.OPANIC, ir.OREAL: typecheckargs(n) @@ -1535,7 +1546,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } u := ir.NewUnaryExpr(n.Pos(), l.BuiltinOp, arg) - return typecheck(ir.InitExpr(n.Init(), u), top) // typecheckargs can add to old.Init + return check(ir.InitExpr(n.Init(), u), top) // typecheckargs can add to old.Init case ir.OCOMPLEX, ir.OCOPY: typecheckargs(n) @@ -1545,12 +1556,12 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } b := ir.NewBinaryExpr(n.Pos(), l.BuiltinOp, arg1, arg2) - return typecheck(ir.InitExpr(n.Init(), b), top) // typecheckargs can add to old.Init + return check(ir.InitExpr(n.Init(), b), top) // typecheckargs can add to old.Init } panic("unreachable") } - n.X = defaultlit(n.X, nil) + n.X = DefaultLit(n.X, nil) l = n.X if l.Op() == ir.OTYPE { if n.IsDDD { @@ -1653,8 +1664,8 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OCAP, ir.OLEN: n := n.(*ir.UnaryExpr) - n.X = typecheck(n.X, ctxExpr) - n.X = defaultlit(n.X, nil) + n.X = Expr(n.X) + n.X = DefaultLit(n.X, nil) n.X = implicitstar(n.X) l := n.X t := l.Type() @@ -1680,7 +1691,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OREAL, ir.OIMAG: n := n.(*ir.UnaryExpr) - n.X = typecheck(n.X, ctxExpr) + n.X = Expr(n.X) l := n.X t := l.Type() if t == nil { @@ -1705,8 +1716,8 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OCOMPLEX: n := n.(*ir.BinaryExpr) - l := typecheck(n.X, ctxExpr) - r := typecheck(n.Y, ctxExpr) + l := Expr(n.X) + r := Expr(n.Y) if l.Type() == nil || r.Type() == nil { n.SetType(nil) return n @@ -1746,8 +1757,8 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OCLOSE: n := n.(*ir.UnaryExpr) - n.X = typecheck(n.X, ctxExpr) - n.X = defaultlit(n.X, nil) + n.X = Expr(n.X) + n.X = DefaultLit(n.X, nil) l := n.X t := l.Type() if t == nil { @@ -1797,7 +1808,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n } - args[1] = assignconv(r, l.Type().Key(), "delete") + args[1] = AssignConv(r, l.Type().Key(), "delete") return n case ir.OAPPEND: @@ -1843,11 +1854,11 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { } if t.Elem().IsKind(types.TUINT8) && args[1].Type().IsString() { - args[1] = defaultlit(args[1], types.Types[types.TSTRING]) + args[1] = DefaultLit(args[1], types.Types[types.TSTRING]) return n } - args[1] = assignconv(args[1], t.Underlying(), "append") + args[1] = AssignConv(args[1], t.Underlying(), "append") return n } @@ -1856,7 +1867,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { if n.Type() == nil { continue } - as[i] = assignconv(n, t.Elem(), "append") + as[i] = AssignConv(n, t.Elem(), "append") types.CheckSize(as[i].Type()) // ensure width is calculated for backend } return n @@ -1864,10 +1875,10 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OCOPY: n := n.(*ir.BinaryExpr) n.SetType(types.Types[types.TINT]) - n.X = typecheck(n.X, ctxExpr) - n.X = defaultlit(n.X, nil) - n.Y = typecheck(n.Y, ctxExpr) - n.Y = defaultlit(n.Y, nil) + n.X = Expr(n.X) + n.X = DefaultLit(n.X, nil) + n.Y = Expr(n.Y) + n.Y = DefaultLit(n.Y, nil) if n.X.Type() == nil || n.Y.Type() == nil { n.SetType(nil) return n @@ -1905,7 +1916,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OCONV: n := n.(*ir.ConvExpr) types.CheckSize(n.Type()) // ensure width is calculated for backend - n.X = typecheck(n.X, ctxExpr) + n.X = Expr(n.X) n.X = convlit1(n.X, n.Type(), true, nil) t := n.X.Type() if t == nil || n.Type() == nil { @@ -1958,7 +1969,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { n.Args.Set(nil) l := args[0] - l = typecheck(l, ctxType) + l = check(l, ctxType) t := l.Type() if t == nil { n.SetType(nil) @@ -1982,12 +1993,12 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { l = args[i] i++ - l = typecheck(l, ctxExpr) + l = Expr(l) var r ir.Node if i < len(args) { r = args[i] i++ - r = typecheck(r, ctxExpr) + r = Expr(r) } if l.Type() == nil || (r != nil && r.Type() == nil) { @@ -2009,8 +2020,8 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { if i < len(args) { l = args[i] i++ - l = typecheck(l, ctxExpr) - l = defaultlit(l, types.Types[types.TINT]) + l = Expr(l) + l = DefaultLit(l, types.Types[types.TINT]) if l.Type() == nil { n.SetType(nil) return n @@ -2030,8 +2041,8 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { if i < len(args) { l = args[i] i++ - l = typecheck(l, ctxExpr) - l = defaultlit(l, types.Types[types.TINT]) + l = Expr(l) + l = DefaultLit(l, types.Types[types.TINT]) if l.Type() == nil { n.SetType(nil) return n @@ -2063,7 +2074,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { base.Fatalf("missing argument to new") } l := n.X - l = typecheck(l, ctxType) + l = check(l, ctxType) t := l.Type() if t == nil { n.SetType(nil) @@ -2080,17 +2091,17 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { for i1, n1 := range ls { // Special case for print: int constant is int64, not int. if ir.IsConst(n1, constant.Int) { - ls[i1] = defaultlit(ls[i1], types.Types[types.TINT64]) + ls[i1] = DefaultLit(ls[i1], types.Types[types.TINT64]) } else { - ls[i1] = defaultlit(ls[i1], nil) + ls[i1] = DefaultLit(ls[i1], nil) } } return n case ir.OPANIC: n := n.(*ir.UnaryExpr) - n.X = typecheck(n.X, ctxExpr) - n.X = defaultlit(n.X, types.Types[types.TINTER]) + n.X = Expr(n.X) + n.X = DefaultLit(n.X, types.Types[types.TINTER]) if n.X.Type() == nil { n.SetType(nil) return n @@ -2118,7 +2129,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OITAB: n := n.(*ir.UnaryExpr) - n.X = typecheck(n.X, ctxExpr) + n.X = Expr(n.X) t := n.X.Type() if t == nil { n.SetType(nil) @@ -2139,7 +2150,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OSPTR: n := n.(*ir.UnaryExpr) - n.X = typecheck(n.X, ctxExpr) + n.X = Expr(n.X) t := n.X.Type() if t == nil { n.SetType(nil) @@ -2160,13 +2171,13 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OCFUNC: n := n.(*ir.UnaryExpr) - n.X = typecheck(n.X, ctxExpr) + n.X = Expr(n.X) n.SetType(types.Types[types.TUINTPTR]) return n case ir.OCONVNOP: n := n.(*ir.ConvExpr) - n.X = typecheck(n.X, ctxExpr) + n.X = Expr(n.X) return n // statements @@ -2195,7 +2206,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OBLOCK: n := n.(*ir.BlockStmt) - typecheckslice(n.List, ctxStmt) + Stmts(n.List) return n case ir.OLABEL: @@ -2210,7 +2221,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.ODEFER, ir.OGO: n := n.(*ir.GoDeferStmt) - n.Call = typecheck(n.Call, ctxStmt|ctxExpr) + n.Call = check(n.Call, ctxStmt|ctxExpr) if !n.Call.Diag() { checkdefergo(n) } @@ -2218,37 +2229,37 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OFOR, ir.OFORUNTIL: n := n.(*ir.ForStmt) - typecheckslice(n.Init(), ctxStmt) + Stmts(n.Init()) decldepth++ - n.Cond = typecheck(n.Cond, ctxExpr) - n.Cond = defaultlit(n.Cond, nil) + n.Cond = Expr(n.Cond) + n.Cond = DefaultLit(n.Cond, nil) if n.Cond != nil { t := n.Cond.Type() if t != nil && !t.IsBoolean() { base.Errorf("non-bool %L used as for condition", n.Cond) } } - n.Post = typecheck(n.Post, ctxStmt) + n.Post = Stmt(n.Post) if n.Op() == ir.OFORUNTIL { - typecheckslice(n.Late, ctxStmt) + Stmts(n.Late) } - typecheckslice(n.Body, ctxStmt) + Stmts(n.Body) decldepth-- return n case ir.OIF: n := n.(*ir.IfStmt) - typecheckslice(n.Init(), ctxStmt) - n.Cond = typecheck(n.Cond, ctxExpr) - n.Cond = defaultlit(n.Cond, nil) + Stmts(n.Init()) + n.Cond = Expr(n.Cond) + n.Cond = DefaultLit(n.Cond, nil) if n.Cond != nil { t := n.Cond.Type() if t != nil && !t.IsBoolean() { base.Errorf("non-bool %L used as if condition", n.Cond) } } - typecheckslice(n.Body, ctxStmt) - typecheckslice(n.Else, ctxStmt) + Stmts(n.Body) + Stmts(n.Else) return n case ir.ORETURN: @@ -2294,12 +2305,12 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.ODCLCONST: n := n.(*ir.Decl) - n.X = typecheck(n.X, ctxExpr) + n.X = Expr(n.X) return n case ir.ODCLTYPE: n := n.(*ir.Decl) - n.X = typecheck(n.X, ctxType) + n.X = check(n.X, ctxType) types.CheckSize(n.X.Type()) return n } @@ -2317,14 +2328,14 @@ func typecheckargs(n ir.Node) { case *ir.CallExpr: list = n.Args if n.IsDDD { - typecheckslice(list, ctxExpr) + Exprs(list) return } case *ir.ReturnStmt: list = n.Results } if len(list) != 1 { - typecheckslice(list, ctxExpr) + Exprs(list) return } @@ -2351,11 +2362,11 @@ func typecheckargs(n ir.Node) { // will reassociate them later when it's appropriate. static := ir.CurFunc == nil if static { - ir.CurFunc = initTodo + ir.CurFunc = InitTodoFunc } list = nil for _, f := range t.FieldSlice() { - t := temp(f.Type) + t := Temp(f.Type) as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, t)) as.Lhs.Append(t) list = append(list, t) @@ -2371,7 +2382,7 @@ func typecheckargs(n ir.Node) { n.Results.Set(list) } - n.PtrInit().Append(typecheck(as, ctxStmt)) + n.PtrInit().Append(Stmt(as)) } func checksliceindex(l ir.Node, r ir.Node, tp *types.Type) bool { @@ -2483,7 +2494,7 @@ func implicitstar(n ir.Node) ir.Node { } star := ir.NewStarExpr(base.Pos, n) star.SetImplicit(true) - return typecheck(star, ctxExpr) + return Expr(star) } func needOneArg(n *ir.CallExpr, f string, args ...interface{}) (ir.Node, bool) { @@ -2563,7 +2574,7 @@ func typecheckMethodExpr(n *ir.SelectorExpr) (res ir.Node) { n.SetType(nil) return n } - expandmeth(mt) + CalcMethods(mt) ms = mt.AllMethods() // The method expression T.m requires a wrapper when T @@ -2599,7 +2610,7 @@ func typecheckMethodExpr(n *ir.SelectorExpr) (res ir.Node) { } me := ir.NewMethodExpr(n.Pos(), n.X.Type(), m) - me.SetType(methodfunc(m.Type, n.X.Type())) + me.SetType(NewMethodType(m.Type, n.X.Type())) f := NewName(ir.MethodSym(t, m.Sym)) f.Class_ = ir.PFUNC f.SetType(me.Type()) @@ -2654,7 +2665,7 @@ func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field { if n.X.Type().IsPtr() { star := ir.NewStarExpr(base.Pos, n.X) star.SetImplicit(true) - n.X = typecheck(star, ctxExpr) + n.X = Expr(star) } n.SetOp(ir.ODOTINTER) @@ -2674,13 +2685,13 @@ func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field { if !types.Identical(rcvr, tt) { if rcvr.IsPtr() && types.Identical(rcvr.Elem(), tt) { checklvalue(n.X, "call pointer method on") - addr := nodAddr(n.X) + addr := NodAddr(n.X) addr.SetImplicit(true) - n.X = typecheck(addr, ctxType|ctxExpr) + n.X = check(addr, ctxType|ctxExpr) } else if tt.IsPtr() && (!rcvr.IsPtr() || rcvr.IsPtr() && rcvr.Elem().NotInHeap()) && types.Identical(tt.Elem(), rcvr) { star := ir.NewStarExpr(base.Pos, n.X) star.SetImplicit(true) - n.X = typecheck(star, ctxType|ctxExpr) + n.X = check(star, ctxType|ctxExpr) } else if tt.IsPtr() && tt.Elem().IsPtr() && types.Identical(derefall(tt), derefall(rcvr)) { base.Errorf("calling method %v with receiver %L requires explicit dereference", n.Sel, n.X) for tt.IsPtr() { @@ -2690,7 +2701,7 @@ func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field { } star := ir.NewStarExpr(base.Pos, n.X) star.SetImplicit(true) - n.X = typecheck(star, ctxType|ctxExpr) + n.X = check(star, ctxType|ctxExpr) tt = tt.Elem() } } else { @@ -2967,7 +2978,7 @@ func pushtype(nn ir.Node, t *types.Type) ir.Node { // For *T, return &T{...}. n.Ntype = ir.TypeNode(t.Elem()) - addr := nodAddrAt(n.Pos(), n) + addr := NodAddrAt(n.Pos(), n) addr.SetImplicit(true) return addr } @@ -2999,7 +3010,7 @@ func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) { // Need to handle [...]T arrays specially. if array, ok := n.Ntype.(*ir.ArrayType); ok && array.Elem != nil && array.Len == nil { - array.Elem = typecheck(array.Elem, ctxType) + array.Elem = check(array.Elem, ctxType) elemType := array.Elem.Type() if elemType == nil { n.SetType(nil) @@ -3012,7 +3023,7 @@ func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) { return n } - n.Ntype = ir.Node(typecheck(n.Ntype, ctxType)).(ir.Ntype) + n.Ntype = ir.Node(check(n.Ntype, ctxType)).(ir.Ntype) t := n.Ntype.Type() if t == nil { n.SetType(nil) @@ -3041,7 +3052,7 @@ func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) { for i3, l := range n.List { ir.SetPos(l) if l.Op() != ir.OKEY { - n.List[i3] = typecheck(l, ctxExpr) + n.List[i3] = Expr(l) base.Errorf("missing key in map literal") continue } @@ -3049,14 +3060,14 @@ func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) { r := l.Key r = pushtype(r, t.Key()) - r = typecheck(r, ctxExpr) - l.Key = assignconv(r, t.Key(), "map key") + r = Expr(r) + l.Key = AssignConv(r, t.Key(), "map key") cs.add(base.Pos, l.Key, "key", "map literal") r = l.Value r = pushtype(r, t.Elem()) - r = typecheck(r, ctxExpr) - l.Value = assignconv(r, t.Elem(), "map value") + r = Expr(r) + l.Value = AssignConv(r, t.Elem(), "map value") } n.SetOp(ir.OMAPLIT) @@ -3072,7 +3083,7 @@ func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) { ls := n.List for i, n1 := range ls { ir.SetPos(n1) - n1 = typecheck(n1, ctxExpr) + n1 = Expr(n1) ls[i] = n1 if i >= t.NumFields() { if !errored { @@ -3088,7 +3099,7 @@ func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) { base.Errorf("implicit assignment of unexported field '%s' in %v literal", s.Name, t) } // No pushtype allowed here. Must name fields for that. - n1 = assignconv(n1, f.Type, "field value") + n1 = AssignConv(n1, f.Type, "field value") sk := ir.NewStructKeyExpr(base.Pos, f.Sym, n1) sk.Offset = f.Offset ls[i] = sk @@ -3112,8 +3123,8 @@ func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) { // package, because of import dot. Redirect to correct sym // before we do the lookup. s := key.Sym() - if id, ok := key.(*ir.Ident); ok && dotImportRefs[id] != nil { - s = lookup(s.Name) + if id, ok := key.(*ir.Ident); ok && DotImportRefs[id] != nil { + s = Lookup(s.Name) } // An OXDOT uses the Sym field to hold @@ -3134,7 +3145,7 @@ func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) { base.Errorf("mixture of field:value and value initializers") errored = true } - ls[i] = typecheck(ls[i], ctxExpr) + ls[i] = Expr(ls[i]) continue } l := l.(*ir.StructKeyExpr) @@ -3170,8 +3181,8 @@ func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) { l.Offset = f.Offset // No pushtype allowed here. Tried and rejected. - l.Value = typecheck(l.Value, ctxExpr) - l.Value = assignconv(l.Value, f.Type, "field value") + l.Value = Expr(l.Value) + l.Value = AssignConv(l.Value, f.Type, "field value") } } @@ -3201,8 +3212,8 @@ func typecheckarraylit(elemType *types.Type, bound int64, elts []ir.Node, ctx st var kv *ir.KeyExpr if elt.Op() == ir.OKEY { elt := elt.(*ir.KeyExpr) - elt.Key = typecheck(elt.Key, ctxExpr) - key = indexconst(elt.Key) + elt.Key = Expr(elt.Key) + key = IndexConst(elt.Key) if key < 0 { if !elt.Key.Diag() { if key == -2 { @@ -3219,8 +3230,8 @@ func typecheckarraylit(elemType *types.Type, bound int64, elts []ir.Node, ctx st } r = pushtype(r, elemType) - r = typecheck(r, ctxExpr) - r = assignconv(r, elemType, ctx) + r = Expr(r) + r = AssignConv(r, elemType, ctx) if kv != nil { kv.Value = r } else { @@ -3328,15 +3339,15 @@ func typecheckas(n *ir.AssignStmt) { // if the variable has a type (ntype) then typechecking // will not look at defn, so it is okay (and desirable, // so that the conversion below happens). - n.X = resolve(n.X) + n.X = Resolve(n.X) if !ir.DeclaredBy(n.X, n) || n.X.Name().Ntype != nil { - n.X = typecheck(n.X, ctxExpr|ctxAssign) + n.X = AssignExpr(n.X) } // Use ctxMultiOK so we can emit an "N variables but M values" error // to be consistent with typecheckas2 (#26616). - n.Y = typecheck(n.Y, ctxExpr|ctxMultiOK) + n.Y = check(n.Y, ctxExpr|ctxMultiOK) checkassign(n, n.X) if n.Y != nil && n.Y.Type() != nil { if n.Y.Type().IsFuncArgStruct() { @@ -3345,12 +3356,12 @@ func typecheckas(n *ir.AssignStmt) { // to indicate failed typechecking. n.Y.SetType(nil) } else if n.X.Type() != nil { - n.Y = assignconv(n.Y, n.X.Type(), "assignment") + n.Y = AssignConv(n.Y, n.X.Type(), "assignment") } } if ir.DeclaredBy(n.X, n) && n.X.Name().Ntype == nil { - n.Y = defaultlit(n.Y, nil) + n.Y = DefaultLit(n.Y, nil) n.X.SetType(n.Y.Type()) } @@ -3360,7 +3371,7 @@ func typecheckas(n *ir.AssignStmt) { n.SetTypecheck(1) if n.X.Typecheck() == 0 { - n.X = typecheck(n.X, ctxExpr|ctxAssign) + n.X = AssignExpr(n.X) } if !ir.IsBlank(n.X) { types.CheckSize(n.X.Type()) // ensure width is calculated for backend @@ -3382,20 +3393,20 @@ func typecheckas2(n *ir.AssignListStmt) { ls := n.Lhs for i1, n1 := range ls { // delicate little dance. - n1 = resolve(n1) + n1 = Resolve(n1) ls[i1] = n1 if !ir.DeclaredBy(n1, n) || n1.Name().Ntype != nil { - ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign) + ls[i1] = AssignExpr(ls[i1]) } } cl := len(n.Lhs) cr := len(n.Rhs) if cl > 1 && cr == 1 { - n.Rhs[0] = typecheck(n.Rhs[0], ctxExpr|ctxMultiOK) + n.Rhs[0] = check(n.Rhs[0], ctxExpr|ctxMultiOK) } else { - typecheckslice(n.Rhs, ctxExpr) + Exprs(n.Rhs) } checkassignlist(n, n.Lhs) @@ -3408,10 +3419,10 @@ func typecheckas2(n *ir.AssignListStmt) { for il, nl := range ls { nr := rs[il] if nl.Type() != nil && nr.Type() != nil { - rs[il] = assignconv(nr, nl.Type(), "assignment") + rs[il] = AssignConv(nr, nl.Type(), "assignment") } if ir.DeclaredBy(nl, n) && nl.Name().Ntype == nil { - rs[il] = defaultlit(rs[il], nil) + rs[il] = DefaultLit(rs[il], nil) nl.SetType(rs[il].Type()) } } @@ -3500,7 +3511,7 @@ out: ls = n.Lhs for i1, n1 := range ls { if n1.Typecheck() == 0 { - ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign) + ls[i1] = AssignExpr(ls[i1]) } } } @@ -3519,7 +3530,7 @@ func typecheckfunc(n *ir.Func) { } } - n.Nname = typecheck(n.Nname, ctxExpr|ctxAssign).(*ir.Name) + n.Nname = AssignExpr(n.Nname).(*ir.Name) t := n.Nname.Type() if t == nil { return @@ -3533,7 +3544,7 @@ func typecheckfunc(n *ir.Func) { } n.Nname.SetSym(ir.MethodSym(rcvr.Type, n.Shortname)) - declare(n.Nname, ir.PFUNC) + Declare(n.Nname, ir.PFUNC) } if base.Ctxt.Flag_dynlink && !inimport && n.Nname != nil { @@ -3557,12 +3568,12 @@ func stringtoruneslit(n *ir.ConvExpr) ir.Node { nn := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(n.Type()).(ir.Ntype), nil) nn.List.Set(l) - return typecheck(nn, ctxExpr) + return Expr(nn) } var mapqueue []*ir.MapType -func checkMapKeys() { +func CheckMapKeys() { for _, n := range mapqueue { k := n.Type().MapType().Key if !k.Broke() && !types.IsComparable(k) { @@ -3668,7 +3679,7 @@ func typecheckdef(n ir.Node) { base.ErrorfAt(n.Pos(), "xxx") } - e = typecheck(e, ctxExpr) + e = Expr(e) if e.Type() == nil { goto ret } @@ -3734,12 +3745,12 @@ func typecheckdef(n ir.Node) { } if n.Name().Defn.Op() == ir.ONAME { - n.Name().Defn = typecheck(n.Name().Defn, ctxExpr) + n.Name().Defn = Expr(n.Name().Defn) n.SetType(n.Name().Defn.Type()) break } - n.Name().Defn = typecheck(n.Name().Defn, ctxStmt) // fills in n.Type + n.Name().Defn = Stmt(n.Name().Defn) // fills in n.Type case ir.OTYPE: n := n.(*ir.Name) @@ -3808,7 +3819,7 @@ func checkmake(t *types.Type, arg string, np *ir.Node) bool { // are the same as for index expressions. Factor the code better; // for instance, indexlit might be called here and incorporate some // of the bounds checks done for make. - n = defaultlit(n, types.Types[types.TINT]) + n = DefaultLit(n, types.Types[types.TINT]) *np = n return true @@ -3973,8 +3984,8 @@ func isTermNode(n ir.Node) bool { return false } -// checkreturn makes sure that fn terminates appropriately. -func checkreturn(fn *ir.Func) { +// CheckReturn makes sure that fn terminates appropriately. +func CheckReturn(fn *ir.Func) { if fn.Type().NumResults() != 0 && len(fn.Body) != 0 { markBreak(fn) if !isTermNodes(fn.Body) { @@ -4145,3 +4156,25 @@ func curpkg() *types.Pkg { } return fnpkg(fn.Nname) } + +func Conv(n ir.Node, t *types.Type) ir.Node { + if types.Identical(n.Type(), t) { + return n + } + n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n) + n.SetType(t) + n = Expr(n) + return n +} + +// ConvNop converts node n to type t using the OCONVNOP op +// and typechecks the result with ctxExpr. +func ConvNop(n ir.Node, t *types.Type) ir.Node { + if types.Identical(n.Type(), t) { + return n + } + n = ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, n) + n.SetType(t) + n = Expr(n) + return n +} diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/typecheck/universe.go similarity index 93% rename from src/cmd/compile/internal/gc/universe.go rename to src/cmd/compile/internal/typecheck/universe.go index 5d59fdbbc534f..fc8e962e28fad 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/typecheck/universe.go @@ -2,16 +2,31 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// TODO(gri) This file should probably become part of package types. - -package gc +package typecheck import ( + "go/constant" + "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/src" - "go/constant" +) + +var ( + okfor [ir.OEND][]bool + iscmp [ir.OEND]bool +) + +var ( + okforeq [types.NTYPE]bool + okforadd [types.NTYPE]bool + okforand [types.NTYPE]bool + okfornone [types.NTYPE]bool + okforbool [types.NTYPE]bool + okforcap [types.NTYPE]bool + okforlen [types.NTYPE]bool + okforarith [types.NTYPE]bool ) var basicTypes = [...]struct { @@ -169,7 +184,7 @@ func initUniverse() { s = types.BuiltinPkg.Lookup("false") s.Def = ir.NewConstAt(src.NoXPos, s, types.UntypedBool, constant.MakeBool(false)) - s = lookup("_") + s = Lookup("_") types.BlankSym = s s.Block = -100 s.Def = NewName(s) @@ -186,7 +201,7 @@ func initUniverse() { types.Types[types.TNIL] = types.New(types.TNIL) s = types.BuiltinPkg.Lookup("nil") - nnil := nodnil() + nnil := NodNil() nnil.(*ir.NilExpr).SetSym(s) s.Def = nnil @@ -317,12 +332,12 @@ func makeErrorInterface() *types.Type { sig := types.NewSignature(types.NoPkg, fakeRecvField(), nil, []*types.Field{ types.NewField(src.NoXPos, nil, types.Types[types.TSTRING]), }) - method := types.NewField(src.NoXPos, lookup("Error"), sig) + method := types.NewField(src.NoXPos, Lookup("Error"), sig) return types.NewInterface(types.NoPkg, []*types.Field{method}) } -// finishUniverse makes the universe block visible within the current package. -func finishUniverse() { +// declareUniverse makes the universe block visible within the current package. +func declareUniverse() { // Operationally, this is similar to a dot import of builtinpkg, except // that we silently skip symbols that are already declared in the // package block rather than emitting a redeclared symbol error. @@ -331,7 +346,7 @@ func finishUniverse() { if s.Def == nil { continue } - s1 := lookup(s.Name) + s1 := Lookup(s.Name) if s1.Def != nil { continue } @@ -340,7 +355,7 @@ func finishUniverse() { s1.Block = s.Block } - ir.RegFP = NewName(lookup(".fp")) + ir.RegFP = NewName(Lookup(".fp")) ir.RegFP.SetType(types.Types[types.TINT32]) ir.RegFP.Class_ = ir.PPARAM ir.RegFP.SetUsed(true) From 0256ba99a893f2faf870105fc93fff94e5caf241 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 23 Dec 2020 00:43:42 -0500 Subject: [PATCH 228/474] [dev.regabi] cmd/compile: split up typecheck1 [generated] typecheck1 is the largest non-machine-generated function in the compiler. weighing in at 1,747 lines. Since we are destroying the git blame history anyway, now is a good time to split each different case into its own function, making future work on this function more manageable. [git-generate] cd src/cmd/compile/internal/typecheck rf ' # Remove tracing print from typecheck1 - the one in typecheck is fine. # Removing it lets us remove the named result. # That lets all the cut-out functions not have named results. rm typecheck.go:/^func typecheck1/+0,/^func typecheck1/+4 sub typecheck.go:/^func typecheck1/+/\(res ir\.Node\)/ ir.Node mv typecheckselect tcSelect mv typecheckswitch tcSwitch mv typecheckrange tcRange mv typecheckfunc tcFunc mv checkdefergo tcGoDefer mv typecheckclosure tcClosure mv check typecheck mv typecheckcomplit tcCompLit mv typecheckas tcAssign mv typecheckas2 tcAssignList mv typecheckpartialcall tcCallPart mv typecheckExprSwitch tcSwitchExpr mv typecheckTypeSwitch tcSwitchType mv typecheck1:/^\tcase ir.ORETURN:/+2,/^\tcase /-2 tcReturn add typecheck.go:/^func tcReturn/-0 \ // tcReturn typechecks an ORETURN node. mv typecheck1:/^\tcase ir.OIF:/+2,/^\tcase /-2 tcIf add typecheck.go:/^func tcIf/-0 \ // tcIf typechecks an OIF node. mv typecheck1:/^\tcase ir.OFOR,/+2,/^\tcase /-2 tcFor add typecheck.go:/^func tcFor/-0 \ // tcFor typechecks an OFOR node. mv typecheck1:/^\tcase ir.OSPTR:/+2,/^\tcase /-2 tcSPtr add typecheck.go:/^func tcSPtr/-0 \ // tcSPtr typechecks an OSPTR node. mv typecheck1:/^\tcase ir.OITAB:/+2,/^\tcase /-2 tcITab add typecheck.go:/^func tcITab/-0 \ // tcITab typechecks an OITAB node. mv typecheck1:/^\tcase ir.ORECOVER:/+2,/^\tcase /-2 tcRecover add typecheck.go:/^func tcRecover/-0 \ // tcRecover typechecks an ORECOVER node. mv typecheck1:/^\tcase ir.OPANIC:/+2,/^\tcase /-2 tcPanic add typecheck.go:/^func tcPanic/-0 \ // tcPanic typechecks an OPANIC node. mv typecheck1:/^\tcase ir.OPRINT,/+2,/^\tcase /-2 tcPrint add typecheck.go:/^func tcPrint/-0 \ // tcPrint typechecks an OPRINT or OPRINTN node. mv typecheck1:/^\tcase ir.ONEW:/+2,/^\tcase /-2 tcNew add typecheck.go:/^func tcNew/-0 \ // tcNew typechecks an ONEW node. mv typecheck1:/^\tcase ir.OMAKE:/+2,/^\tcase /-2 tcMake add typecheck.go:/^func tcMake/-0 \ // tcMake typechecks an OMAKE node. mv typecheck1:/^\tcase ir.OCONV:/+2,/^\tcase /-2 tcConv add typecheck.go:/^func tcConv/-0 \ // tcConv typechecks an OCONV node. mv typecheck1:/^\tcase ir.OCOPY:/+2,/^\tcase /-2 tcCopy add typecheck.go:/^func tcCopy/-0 \ // tcCopy typechecks an OCOPY node. mv typecheck1:/^\tcase ir.OAPPEND:/+2,/^\tcase /-2 tcAppend add typecheck.go:/^func tcAppend/-0 \ // tcAppend typechecks an OAPPEND node. mv typecheck1:/^\tcase ir.ODELETE:/+2,/^\tcase /-2 tcDelete add typecheck.go:/^func tcDelete/-0 \ // tcDelete typechecks an ODELETE node. mv typecheck1:/^\tcase ir.OCLOSE:/+2,/^\tcase /-2 tcClose add typecheck.go:/^func tcClose/-0 \ // tcClose typechecks an OCLOSE node. mv typecheck1:/^\tcase ir.OCOMPLEX:/+2,/^\tcase /-2 tcComplex add typecheck.go:/^func tcComplex/-0 \ // tcComplex typechecks an OCOMPLEX node. mv typecheck1:/^\tcase ir.OREAL,/+2,/^\tcase /-2 tcRealImag add typecheck.go:/^func tcRealImag/-0 \ // tcRealImag typechecks an OREAL or OIMAG node. mv typecheck1:/^\tcase ir.OCAP,/+2,/^\tcase /-2 tcLenCap add typecheck.go:/^func tcLenCap/-0 \ // tcLenCap typechecks an OLEN or OCAP node. mv typecheck1:/^\tcase ir.OCALL:/+2,/^\tcase /-2 tcCall add typecheck.go:/^func tcCall/-0 \ // tcCall typechecks an OCALL node. mv typecheck1:/^\tcase ir.OSLICE,/+2,/^\tcase /-3 tcSlice add typecheck.go:/^func tcSlice/-0 \ // tcSlice typechecks an OSLICE or OSLICE3 node. # move type assertion above comment mv typecheck1:/^\tcase ir.OMAKESLICECOPY:/+/n := n/-+ typecheck1:/^\tcase ir.OMAKESLICECOPY:/+0 mv typecheck1:/^\tcase ir.OMAKESLICECOPY:/+2,/^\tcase /-2 tcMakeSliceCopy add typecheck.go:/^func tcMakeSliceCopy/-0 \ // tcMakeSliceCopy typechecks an OMAKESLICECOPY node. # move type assertion above comment mv typecheck1:/^\tcase ir.OSLICEHEADER:/+/n := n/-+ typecheck1:/^\tcase ir.OSLICEHEADER:/+0 mv typecheck1:/^\tcase ir.OSLICEHEADER:/+2,/^\tcase /-2 tcSliceHeader add typecheck.go:/^func tcSliceHeader/-0 \ // tcSliceHeader typechecks an OSLICEHEADER node. mv typecheck1:/^\tcase ir.OSEND:/+2,/^\tcase /-2 tcSend add typecheck.go:/^func tcSend/-0 \ // tcSend typechecks an OSEND node. mv typecheck1:/^\tcase ir.ORECV:/+2,/^\tcase /-2 tcRecv add typecheck.go:/^func tcRecv/-0 \ // tcRecv typechecks an ORECV node. mv typecheck1:/^\tcase ir.OINDEX:/+2,/^\tcase /-2 tcIndex add typecheck.go:/^func tcIndex/-0 \ // tcIndex typechecks an OINDEX node. mv typecheck1:/^\tcase ir.ODOTTYPE:/+2,/^\tcase /-2 tcDotType add typecheck.go:/^func tcDotType/-0 \ // tcDotType typechecks an ODOTTYPE node. mv typecheck1:/^\tcase ir.OXDOT,/+2,/^\tcase /-2 tcDot add typecheck.go:/^func tcDot/-0 \ // tcDot typechecks an OXDOT or ODOT node. mv typecheck1:/^\tcase ir.OADDR:/+2,/^\tcase /-2 tcAddr add typecheck.go:/^func tcAddr/-0 \ // tcAddr typechecks an OADDR node. mv typecheck1:/^\tcase ir.OBITNOT,/+2,/^\tcase /-3 tcUnaryArith add typecheck.go:/^func tcUnaryArith/-0 \ // tcUnaryArith typechecks a unary arithmetic expression. mv typecheck1:/^\t\tir.OXOR:/+1,/^\tcase /-2 tcArith add typecheck.go:/^func tcArith/-0 \ // tcArith typechecks a binary arithmetic expression. mv typecheck1:/^\tcase ir.ODEREF:/+2,/^\tcase /-2 tcStar add typecheck.go:/^func tcStar/-0 \ // tcStar typechecks an ODEREF node, which may be an expression or a type. mv typecheck1:/^\tcase ir.OTFUNC:/+2,/^\tcase /-2 tcFuncType add typecheck.go:/^func tcFuncType/-0 \ // tcFuncType typechecks an OTFUNC node. mv typecheck1:/^\tcase ir.OTINTER:/+2,/^\tcase /-2 tcInterfaceType add typecheck.go:/^func tcInterfaceType/-0 \ // tcInterfaceType typechecks an OTINTER node. mv typecheck1:/^\tcase ir.OTSTRUCT:/+2,/^\tcase /-2 tcStructType add typecheck.go:/^func tcStructType/-0 \ // tcStructType typechecks an OTSTRUCT node. mv typecheck1:/^\tcase ir.OTCHAN:/+2,/^\tcase /-2 tcChanType add typecheck.go:/^func tcChanType/-0 \ // tcChanType typechecks an OTCHAN node. mv typecheck1:/^\tcase ir.OTMAP:/+2,/^\tcase /-2 tcMapType add typecheck.go:/^func tcMapType/-0 \ // tcMapType typechecks an OTMAP node. mv typecheck1:/^\tcase ir.OTARRAY:/+2,/^\tcase /-2 tcArrayType add typecheck.go:/^func tcArrayType/-0 \ // tcArrayType typechecks an OTARRAY node. mv typecheck1:/^\tcase ir.OTSLICE:/+2,/^\tcase /-2 tcSliceType add typecheck.go:/^func tcSliceType/-0 \ // tcSliceType typechecks an OTSLICE node. mv \ tcAssign \ tcAssignList \ tcFor \ tcGoDefer \ tcIf \ tcRange \ tcReturn \ tcSelect \ tcSend \ tcSwitch \ tcSwitchExpr \ tcSwitchType \ typeSet \ typeSetEntry \ typeSet.add \ stmt1.go mv stmt1.go stmt.go mv \ tcAddr \ tcArith \ tcArrayType \ tcChanType \ tcClosure \ tcCompLit \ tcConv \ tcDot \ tcDotType \ tcFuncType \ tcITab \ tcIndex \ tcInterfaceType \ tcLenCap \ tcMapType \ tcRecv \ tcSPtr \ tcSlice \ tcSliceHeader \ tcSliceType \ tcStar \ tcStructType \ tcUnaryArith \ expr.go mv \ tcClosure \ tcCallPart \ tcFunc \ tcCall \ tcAppend \ tcClose \ tcComplex \ tcCopy \ tcDelete \ tcMake \ tcMakeSliceCopy \ tcNew \ tcPanic \ tcPrint \ tcRealImag \ tcRecover \ func1.go mv func1.go func.go mv \ tcArrayType \ tcChanType \ tcFuncType \ tcInterfaceType \ tcMapType \ tcSliceType \ tcStructType \ type.go ' Change-Id: I0fb0a3039005bc1783575291daff1e6c306895ff Reviewed-on: https://go-review.googlesource.com/c/go/+/279429 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/typecheck/expr.go | 1001 ++++++++ src/cmd/compile/internal/typecheck/func.go | 753 +++++- src/cmd/compile/internal/typecheck/stmt.go | 433 +++- src/cmd/compile/internal/typecheck/subr.go | 2 +- src/cmd/compile/internal/typecheck/type.go | 122 + .../compile/internal/typecheck/typecheck.go | 2036 +---------------- 6 files changed, 2281 insertions(+), 2066 deletions(-) create mode 100644 src/cmd/compile/internal/typecheck/expr.go create mode 100644 src/cmd/compile/internal/typecheck/type.go diff --git a/src/cmd/compile/internal/typecheck/expr.go b/src/cmd/compile/internal/typecheck/expr.go new file mode 100644 index 0000000000000..f940a2e73d913 --- /dev/null +++ b/src/cmd/compile/internal/typecheck/expr.go @@ -0,0 +1,1001 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typecheck + +import ( + "fmt" + "go/constant" + "go/token" + "strings" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" +) + +// tcAddr typechecks an OADDR node. +func tcAddr(n *ir.AddrExpr) ir.Node { + n.X = Expr(n.X) + if n.X.Type() == nil { + n.SetType(nil) + return n + } + + switch n.X.Op() { + case ir.OARRAYLIT, ir.OMAPLIT, ir.OSLICELIT, ir.OSTRUCTLIT: + n.SetOp(ir.OPTRLIT) + + default: + checklvalue(n.X, "take the address of") + r := ir.OuterValue(n.X) + if r.Op() == ir.ONAME { + r := r.(*ir.Name) + if ir.Orig(r) != r { + base.Fatalf("found non-orig name node %v", r) // TODO(mdempsky): What does this mean? + } + r.Name().SetAddrtaken(true) + if r.Name().IsClosureVar() && !CaptureVarsComplete { + // Mark the original variable as Addrtaken so that capturevars + // knows not to pass it by value. + // But if the capturevars phase is complete, don't touch it, + // in case l.Name's containing function has not yet been compiled. + r.Name().Defn.Name().SetAddrtaken(true) + } + } + n.X = DefaultLit(n.X, nil) + if n.X.Type() == nil { + n.SetType(nil) + return n + } + } + + n.SetType(types.NewPtr(n.X.Type())) + return n +} + +// tcArith typechecks a binary arithmetic expression. +func tcArith(n ir.Node) ir.Node { + var l, r ir.Node + var setLR func() + switch n := n.(type) { + case *ir.AssignOpStmt: + l, r = n.X, n.Y + setLR = func() { n.X = l; n.Y = r } + case *ir.BinaryExpr: + l, r = n.X, n.Y + setLR = func() { n.X = l; n.Y = r } + case *ir.LogicalExpr: + l, r = n.X, n.Y + setLR = func() { n.X = l; n.Y = r } + } + l = Expr(l) + r = Expr(r) + setLR() + if l.Type() == nil || r.Type() == nil { + n.SetType(nil) + return n + } + op := n.Op() + if n.Op() == ir.OASOP { + n := n.(*ir.AssignOpStmt) + checkassign(n, l) + if n.IncDec && !okforarith[l.Type().Kind()] { + base.Errorf("invalid operation: %v (non-numeric type %v)", n, l.Type()) + n.SetType(nil) + return n + } + // TODO(marvin): Fix Node.EType type union. + op = n.AsOp + } + if op == ir.OLSH || op == ir.ORSH { + r = DefaultLit(r, types.Types[types.TUINT]) + setLR() + t := r.Type() + if !t.IsInteger() { + base.Errorf("invalid operation: %v (shift count type %v, must be integer)", n, r.Type()) + n.SetType(nil) + return n + } + if t.IsSigned() && !types.AllowsGoVersion(curpkg(), 1, 13) { + base.ErrorfVers("go1.13", "invalid operation: %v (signed shift count type %v)", n, r.Type()) + n.SetType(nil) + return n + } + t = l.Type() + if t != nil && t.Kind() != types.TIDEAL && !t.IsInteger() { + base.Errorf("invalid operation: %v (shift of type %v)", n, t) + n.SetType(nil) + return n + } + + // no defaultlit for left + // the outer context gives the type + n.SetType(l.Type()) + if (l.Type() == types.UntypedFloat || l.Type() == types.UntypedComplex) && r.Op() == ir.OLITERAL { + n.SetType(types.UntypedInt) + } + return n + } + + // For "x == x && len(s)", it's better to report that "len(s)" (type int) + // can't be used with "&&" than to report that "x == x" (type untyped bool) + // can't be converted to int (see issue #41500). + if n.Op() == ir.OANDAND || n.Op() == ir.OOROR { + n := n.(*ir.LogicalExpr) + if !n.X.Type().IsBoolean() { + base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.X.Type())) + n.SetType(nil) + return n + } + if !n.Y.Type().IsBoolean() { + base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.Y.Type())) + n.SetType(nil) + return n + } + } + + // ideal mixed with non-ideal + l, r = defaultlit2(l, r, false) + setLR() + + if l.Type() == nil || r.Type() == nil { + n.SetType(nil) + return n + } + t := l.Type() + if t.Kind() == types.TIDEAL { + t = r.Type() + } + et := t.Kind() + if et == types.TIDEAL { + et = types.TINT + } + aop := ir.OXXX + if iscmp[n.Op()] && t.Kind() != types.TIDEAL && !types.Identical(l.Type(), r.Type()) { + // comparison is okay as long as one side is + // assignable to the other. convert so they have + // the same type. + // + // the only conversion that isn't a no-op is concrete == interface. + // in that case, check comparability of the concrete type. + // The conversion allocates, so only do it if the concrete type is huge. + converted := false + if r.Type().Kind() != types.TBLANK { + aop, _ = assignop(l.Type(), r.Type()) + if aop != ir.OXXX { + if r.Type().IsInterface() && !l.Type().IsInterface() && !types.IsComparable(l.Type()) { + base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(l.Type())) + n.SetType(nil) + return n + } + + types.CalcSize(l.Type()) + if r.Type().IsInterface() == l.Type().IsInterface() || l.Type().Width >= 1<<16 { + l = ir.NewConvExpr(base.Pos, aop, r.Type(), l) + l.SetTypecheck(1) + setLR() + } + + t = r.Type() + converted = true + } + } + + if !converted && l.Type().Kind() != types.TBLANK { + aop, _ = assignop(r.Type(), l.Type()) + if aop != ir.OXXX { + if l.Type().IsInterface() && !r.Type().IsInterface() && !types.IsComparable(r.Type()) { + base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(r.Type())) + n.SetType(nil) + return n + } + + types.CalcSize(r.Type()) + if r.Type().IsInterface() == l.Type().IsInterface() || r.Type().Width >= 1<<16 { + r = ir.NewConvExpr(base.Pos, aop, l.Type(), r) + r.SetTypecheck(1) + setLR() + } + + t = l.Type() + } + } + + et = t.Kind() + } + + if t.Kind() != types.TIDEAL && !types.Identical(l.Type(), r.Type()) { + l, r = defaultlit2(l, r, true) + if l.Type() == nil || r.Type() == nil { + n.SetType(nil) + return n + } + if l.Type().IsInterface() == r.Type().IsInterface() || aop == 0 { + base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type()) + n.SetType(nil) + return n + } + } + + if t.Kind() == types.TIDEAL { + t = mixUntyped(l.Type(), r.Type()) + } + if dt := defaultType(t); !okfor[op][dt.Kind()] { + base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(t)) + n.SetType(nil) + return n + } + + // okfor allows any array == array, map == map, func == func. + // restrict to slice/map/func == nil and nil == slice/map/func. + if l.Type().IsArray() && !types.IsComparable(l.Type()) { + base.Errorf("invalid operation: %v (%v cannot be compared)", n, l.Type()) + n.SetType(nil) + return n + } + + if l.Type().IsSlice() && !ir.IsNil(l) && !ir.IsNil(r) { + base.Errorf("invalid operation: %v (slice can only be compared to nil)", n) + n.SetType(nil) + return n + } + + if l.Type().IsMap() && !ir.IsNil(l) && !ir.IsNil(r) { + base.Errorf("invalid operation: %v (map can only be compared to nil)", n) + n.SetType(nil) + return n + } + + if l.Type().Kind() == types.TFUNC && !ir.IsNil(l) && !ir.IsNil(r) { + base.Errorf("invalid operation: %v (func can only be compared to nil)", n) + n.SetType(nil) + return n + } + + if l.Type().IsStruct() { + if f := types.IncomparableField(l.Type()); f != nil { + base.Errorf("invalid operation: %v (struct containing %v cannot be compared)", n, f.Type) + n.SetType(nil) + return n + } + } + + if iscmp[n.Op()] { + t = types.UntypedBool + n.SetType(t) + if con := EvalConst(n); con.Op() == ir.OLITERAL { + return con + } + l, r = defaultlit2(l, r, true) + setLR() + return n + } + + if et == types.TSTRING && n.Op() == ir.OADD { + // create or update OADDSTR node with list of strings in x + y + z + (w + v) + ... + n := n.(*ir.BinaryExpr) + var add *ir.AddStringExpr + if l.Op() == ir.OADDSTR { + add = l.(*ir.AddStringExpr) + add.SetPos(n.Pos()) + } else { + add = ir.NewAddStringExpr(n.Pos(), []ir.Node{l}) + } + if r.Op() == ir.OADDSTR { + r := r.(*ir.AddStringExpr) + add.List.Append(r.List.Take()...) + } else { + add.List.Append(r) + } + add.SetType(t) + return add + } + + if (op == ir.ODIV || op == ir.OMOD) && ir.IsConst(r, constant.Int) { + if constant.Sign(r.Val()) == 0 { + base.Errorf("division by zero") + n.SetType(nil) + return n + } + } + + n.SetType(t) + return n +} + +// The result of tcCompLit MUST be assigned back to n, e.g. +// n.Left = tcCompLit(n.Left) +func tcCompLit(n *ir.CompLitExpr) (res ir.Node) { + if base.EnableTrace && base.Flag.LowerT { + defer tracePrint("typecheckcomplit", n)(&res) + } + + lno := base.Pos + defer func() { + base.Pos = lno + }() + + if n.Ntype == nil { + base.ErrorfAt(n.Pos(), "missing type in composite literal") + n.SetType(nil) + return n + } + + // Save original node (including n.Right) + n.SetOrig(ir.Copy(n)) + + ir.SetPos(n.Ntype) + + // Need to handle [...]T arrays specially. + if array, ok := n.Ntype.(*ir.ArrayType); ok && array.Elem != nil && array.Len == nil { + array.Elem = typecheck(array.Elem, ctxType) + elemType := array.Elem.Type() + if elemType == nil { + n.SetType(nil) + return n + } + length := typecheckarraylit(elemType, -1, n.List, "array literal") + n.SetOp(ir.OARRAYLIT) + n.SetType(types.NewArray(elemType, length)) + n.Ntype = nil + return n + } + + n.Ntype = ir.Node(typecheck(n.Ntype, ctxType)).(ir.Ntype) + t := n.Ntype.Type() + if t == nil { + n.SetType(nil) + return n + } + n.SetType(t) + + switch t.Kind() { + default: + base.Errorf("invalid composite literal type %v", t) + n.SetType(nil) + + case types.TARRAY: + typecheckarraylit(t.Elem(), t.NumElem(), n.List, "array literal") + n.SetOp(ir.OARRAYLIT) + n.Ntype = nil + + case types.TSLICE: + length := typecheckarraylit(t.Elem(), -1, n.List, "slice literal") + n.SetOp(ir.OSLICELIT) + n.Ntype = nil + n.Len = length + + case types.TMAP: + var cs constSet + for i3, l := range n.List { + ir.SetPos(l) + if l.Op() != ir.OKEY { + n.List[i3] = Expr(l) + base.Errorf("missing key in map literal") + continue + } + l := l.(*ir.KeyExpr) + + r := l.Key + r = pushtype(r, t.Key()) + r = Expr(r) + l.Key = AssignConv(r, t.Key(), "map key") + cs.add(base.Pos, l.Key, "key", "map literal") + + r = l.Value + r = pushtype(r, t.Elem()) + r = Expr(r) + l.Value = AssignConv(r, t.Elem(), "map value") + } + + n.SetOp(ir.OMAPLIT) + n.Ntype = nil + + case types.TSTRUCT: + // Need valid field offsets for Xoffset below. + types.CalcSize(t) + + errored := false + if len(n.List) != 0 && nokeys(n.List) { + // simple list of variables + ls := n.List + for i, n1 := range ls { + ir.SetPos(n1) + n1 = Expr(n1) + ls[i] = n1 + if i >= t.NumFields() { + if !errored { + base.Errorf("too many values in %v", n) + errored = true + } + continue + } + + f := t.Field(i) + s := f.Sym + if s != nil && !types.IsExported(s.Name) && s.Pkg != types.LocalPkg { + base.Errorf("implicit assignment of unexported field '%s' in %v literal", s.Name, t) + } + // No pushtype allowed here. Must name fields for that. + n1 = AssignConv(n1, f.Type, "field value") + sk := ir.NewStructKeyExpr(base.Pos, f.Sym, n1) + sk.Offset = f.Offset + ls[i] = sk + } + if len(ls) < t.NumFields() { + base.Errorf("too few values in %v", n) + } + } else { + hash := make(map[string]bool) + + // keyed list + ls := n.List + for i, l := range ls { + ir.SetPos(l) + + if l.Op() == ir.OKEY { + kv := l.(*ir.KeyExpr) + key := kv.Key + + // Sym might have resolved to name in other top-level + // package, because of import dot. Redirect to correct sym + // before we do the lookup. + s := key.Sym() + if id, ok := key.(*ir.Ident); ok && DotImportRefs[id] != nil { + s = Lookup(s.Name) + } + + // An OXDOT uses the Sym field to hold + // the field to the right of the dot, + // so s will be non-nil, but an OXDOT + // is never a valid struct literal key. + if s == nil || s.Pkg != types.LocalPkg || key.Op() == ir.OXDOT || s.IsBlank() { + base.Errorf("invalid field name %v in struct initializer", key) + continue + } + + l = ir.NewStructKeyExpr(l.Pos(), s, kv.Value) + ls[i] = l + } + + if l.Op() != ir.OSTRUCTKEY { + if !errored { + base.Errorf("mixture of field:value and value initializers") + errored = true + } + ls[i] = Expr(ls[i]) + continue + } + l := l.(*ir.StructKeyExpr) + + f := lookdot1(nil, l.Field, t, t.Fields(), 0) + if f == nil { + if ci := lookdot1(nil, l.Field, t, t.Fields(), 2); ci != nil { // Case-insensitive lookup. + if visible(ci.Sym) { + base.Errorf("unknown field '%v' in struct literal of type %v (but does have %v)", l.Field, t, ci.Sym) + } else if nonexported(l.Field) && l.Field.Name == ci.Sym.Name { // Ensure exactness before the suggestion. + base.Errorf("cannot refer to unexported field '%v' in struct literal of type %v", l.Field, t) + } else { + base.Errorf("unknown field '%v' in struct literal of type %v", l.Field, t) + } + continue + } + var f *types.Field + p, _ := dotpath(l.Field, t, &f, true) + if p == nil || f.IsMethod() { + base.Errorf("unknown field '%v' in struct literal of type %v", l.Field, t) + continue + } + // dotpath returns the parent embedded types in reverse order. + var ep []string + for ei := len(p) - 1; ei >= 0; ei-- { + ep = append(ep, p[ei].field.Sym.Name) + } + ep = append(ep, l.Field.Name) + base.Errorf("cannot use promoted field %v in struct literal of type %v", strings.Join(ep, "."), t) + continue + } + fielddup(f.Sym.Name, hash) + l.Offset = f.Offset + + // No pushtype allowed here. Tried and rejected. + l.Value = Expr(l.Value) + l.Value = AssignConv(l.Value, f.Type, "field value") + } + } + + n.SetOp(ir.OSTRUCTLIT) + n.Ntype = nil + } + + return n +} + +// tcConv typechecks an OCONV node. +func tcConv(n *ir.ConvExpr) ir.Node { + types.CheckSize(n.Type()) // ensure width is calculated for backend + n.X = Expr(n.X) + n.X = convlit1(n.X, n.Type(), true, nil) + t := n.X.Type() + if t == nil || n.Type() == nil { + n.SetType(nil) + return n + } + op, why := convertop(n.X.Op() == ir.OLITERAL, t, n.Type()) + if op == ir.OXXX { + if !n.Diag() && !n.Type().Broke() && !n.X.Diag() { + base.Errorf("cannot convert %L to type %v%s", n.X, n.Type(), why) + n.SetDiag(true) + } + n.SetOp(ir.OCONV) + n.SetType(nil) + return n + } + + n.SetOp(op) + switch n.Op() { + case ir.OCONVNOP: + if t.Kind() == n.Type().Kind() { + switch t.Kind() { + case types.TFLOAT32, types.TFLOAT64, types.TCOMPLEX64, types.TCOMPLEX128: + // Floating point casts imply rounding and + // so the conversion must be kept. + n.SetOp(ir.OCONV) + } + } + + // do not convert to []byte literal. See CL 125796. + // generated code and compiler memory footprint is better without it. + case ir.OSTR2BYTES: + // ok + + case ir.OSTR2RUNES: + if n.X.Op() == ir.OLITERAL { + return stringtoruneslit(n) + } + } + return n +} + +// tcDot typechecks an OXDOT or ODOT node. +func tcDot(n *ir.SelectorExpr, top int) ir.Node { + if n.Op() == ir.OXDOT { + n = AddImplicitDots(n) + n.SetOp(ir.ODOT) + if n.X == nil { + n.SetType(nil) + return n + } + } + + n.X = typecheck(n.X, ctxExpr|ctxType) + + n.X = DefaultLit(n.X, nil) + + t := n.X.Type() + if t == nil { + base.UpdateErrorDot(ir.Line(n), fmt.Sprint(n.X), fmt.Sprint(n)) + n.SetType(nil) + return n + } + + s := n.Sel + + if n.X.Op() == ir.OTYPE { + return typecheckMethodExpr(n) + } + + if t.IsPtr() && !t.Elem().IsInterface() { + t = t.Elem() + if t == nil { + n.SetType(nil) + return n + } + n.SetOp(ir.ODOTPTR) + types.CheckSize(t) + } + + if n.Sel.IsBlank() { + base.Errorf("cannot refer to blank field or method") + n.SetType(nil) + return n + } + + if lookdot(n, t, 0) == nil { + // Legitimate field or method lookup failed, try to explain the error + switch { + case t.IsEmptyInterface(): + base.Errorf("%v undefined (type %v is interface with no methods)", n, n.X.Type()) + + case t.IsPtr() && t.Elem().IsInterface(): + // Pointer to interface is almost always a mistake. + base.Errorf("%v undefined (type %v is pointer to interface, not interface)", n, n.X.Type()) + + case lookdot(n, t, 1) != nil: + // Field or method matches by name, but it is not exported. + base.Errorf("%v undefined (cannot refer to unexported field or method %v)", n, n.Sel) + + default: + if mt := lookdot(n, t, 2); mt != nil && visible(mt.Sym) { // Case-insensitive lookup. + base.Errorf("%v undefined (type %v has no field or method %v, but does have %v)", n, n.X.Type(), n.Sel, mt.Sym) + } else { + base.Errorf("%v undefined (type %v has no field or method %v)", n, n.X.Type(), n.Sel) + } + } + n.SetType(nil) + return n + } + + if (n.Op() == ir.ODOTINTER || n.Op() == ir.ODOTMETH) && top&ctxCallee == 0 { + return tcCallPart(n, s) + } + return n +} + +// tcDotType typechecks an ODOTTYPE node. +func tcDotType(n *ir.TypeAssertExpr) ir.Node { + n.X = Expr(n.X) + n.X = DefaultLit(n.X, nil) + l := n.X + t := l.Type() + if t == nil { + n.SetType(nil) + return n + } + if !t.IsInterface() { + base.Errorf("invalid type assertion: %v (non-interface type %v on left)", n, t) + n.SetType(nil) + return n + } + + if n.Ntype != nil { + n.Ntype = typecheck(n.Ntype, ctxType) + n.SetType(n.Ntype.Type()) + n.Ntype = nil + if n.Type() == nil { + return n + } + } + + if n.Type() != nil && !n.Type().IsInterface() { + var missing, have *types.Field + var ptr int + if !implements(n.Type(), t, &missing, &have, &ptr) { + if have != nil && have.Sym == missing.Sym { + base.Errorf("impossible type assertion:\n\t%v does not implement %v (wrong type for %v method)\n"+ + "\t\thave %v%S\n\t\twant %v%S", n.Type(), t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) + } else if ptr != 0 { + base.Errorf("impossible type assertion:\n\t%v does not implement %v (%v method has pointer receiver)", n.Type(), t, missing.Sym) + } else if have != nil { + base.Errorf("impossible type assertion:\n\t%v does not implement %v (missing %v method)\n"+ + "\t\thave %v%S\n\t\twant %v%S", n.Type(), t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) + } else { + base.Errorf("impossible type assertion:\n\t%v does not implement %v (missing %v method)", n.Type(), t, missing.Sym) + } + n.SetType(nil) + return n + } + } + return n +} + +// tcITab typechecks an OITAB node. +func tcITab(n *ir.UnaryExpr) ir.Node { + n.X = Expr(n.X) + t := n.X.Type() + if t == nil { + n.SetType(nil) + return n + } + if !t.IsInterface() { + base.Fatalf("OITAB of %v", t) + } + n.SetType(types.NewPtr(types.Types[types.TUINTPTR])) + return n +} + +// tcIndex typechecks an OINDEX node. +func tcIndex(n *ir.IndexExpr) ir.Node { + n.X = Expr(n.X) + n.X = DefaultLit(n.X, nil) + n.X = implicitstar(n.X) + l := n.X + n.Index = Expr(n.Index) + r := n.Index + t := l.Type() + if t == nil || r.Type() == nil { + n.SetType(nil) + return n + } + switch t.Kind() { + default: + base.Errorf("invalid operation: %v (type %v does not support indexing)", n, t) + n.SetType(nil) + return n + + case types.TSTRING, types.TARRAY, types.TSLICE: + n.Index = indexlit(n.Index) + if t.IsString() { + n.SetType(types.ByteType) + } else { + n.SetType(t.Elem()) + } + why := "string" + if t.IsArray() { + why = "array" + } else if t.IsSlice() { + why = "slice" + } + + if n.Index.Type() != nil && !n.Index.Type().IsInteger() { + base.Errorf("non-integer %s index %v", why, n.Index) + return n + } + + if !n.Bounded() && ir.IsConst(n.Index, constant.Int) { + x := n.Index.Val() + if constant.Sign(x) < 0 { + base.Errorf("invalid %s index %v (index must be non-negative)", why, n.Index) + } else if t.IsArray() && constant.Compare(x, token.GEQ, constant.MakeInt64(t.NumElem())) { + base.Errorf("invalid array index %v (out of bounds for %d-element array)", n.Index, t.NumElem()) + } else if ir.IsConst(n.X, constant.String) && constant.Compare(x, token.GEQ, constant.MakeInt64(int64(len(ir.StringVal(n.X))))) { + base.Errorf("invalid string index %v (out of bounds for %d-byte string)", n.Index, len(ir.StringVal(n.X))) + } else if ir.ConstOverflow(x, types.Types[types.TINT]) { + base.Errorf("invalid %s index %v (index too large)", why, n.Index) + } + } + + case types.TMAP: + n.Index = AssignConv(n.Index, t.Key(), "map index") + n.SetType(t.Elem()) + n.SetOp(ir.OINDEXMAP) + n.Assigned = false + } + return n +} + +// tcLenCap typechecks an OLEN or OCAP node. +func tcLenCap(n *ir.UnaryExpr) ir.Node { + n.X = Expr(n.X) + n.X = DefaultLit(n.X, nil) + n.X = implicitstar(n.X) + l := n.X + t := l.Type() + if t == nil { + n.SetType(nil) + return n + } + + var ok bool + if n.Op() == ir.OLEN { + ok = okforlen[t.Kind()] + } else { + ok = okforcap[t.Kind()] + } + if !ok { + base.Errorf("invalid argument %L for %v", l, n.Op()) + n.SetType(nil) + return n + } + + n.SetType(types.Types[types.TINT]) + return n +} + +// tcRecv typechecks an ORECV node. +func tcRecv(n *ir.UnaryExpr) ir.Node { + n.X = Expr(n.X) + n.X = DefaultLit(n.X, nil) + l := n.X + t := l.Type() + if t == nil { + n.SetType(nil) + return n + } + if !t.IsChan() { + base.Errorf("invalid operation: %v (receive from non-chan type %v)", n, t) + n.SetType(nil) + return n + } + + if !t.ChanDir().CanRecv() { + base.Errorf("invalid operation: %v (receive from send-only type %v)", n, t) + n.SetType(nil) + return n + } + + n.SetType(t.Elem()) + return n +} + +// tcSPtr typechecks an OSPTR node. +func tcSPtr(n *ir.UnaryExpr) ir.Node { + n.X = Expr(n.X) + t := n.X.Type() + if t == nil { + n.SetType(nil) + return n + } + if !t.IsSlice() && !t.IsString() { + base.Fatalf("OSPTR of %v", t) + } + if t.IsString() { + n.SetType(types.NewPtr(types.Types[types.TUINT8])) + } else { + n.SetType(types.NewPtr(t.Elem())) + } + return n +} + +// tcSlice typechecks an OSLICE or OSLICE3 node. +func tcSlice(n *ir.SliceExpr) ir.Node { + n.X = Expr(n.X) + low, high, max := n.SliceBounds() + hasmax := n.Op().IsSlice3() + low = Expr(low) + high = Expr(high) + max = Expr(max) + n.X = DefaultLit(n.X, nil) + low = indexlit(low) + high = indexlit(high) + max = indexlit(max) + n.SetSliceBounds(low, high, max) + l := n.X + if l.Type() == nil { + n.SetType(nil) + return n + } + if l.Type().IsArray() { + if !ir.IsAssignable(n.X) { + base.Errorf("invalid operation %v (slice of unaddressable value)", n) + n.SetType(nil) + return n + } + + addr := NodAddr(n.X) + addr.SetImplicit(true) + n.X = Expr(addr) + l = n.X + } + t := l.Type() + var tp *types.Type + if t.IsString() { + if hasmax { + base.Errorf("invalid operation %v (3-index slice of string)", n) + n.SetType(nil) + return n + } + n.SetType(t) + n.SetOp(ir.OSLICESTR) + } else if t.IsPtr() && t.Elem().IsArray() { + tp = t.Elem() + n.SetType(types.NewSlice(tp.Elem())) + types.CalcSize(n.Type()) + if hasmax { + n.SetOp(ir.OSLICE3ARR) + } else { + n.SetOp(ir.OSLICEARR) + } + } else if t.IsSlice() { + n.SetType(t) + } else { + base.Errorf("cannot slice %v (type %v)", l, t) + n.SetType(nil) + return n + } + + if low != nil && !checksliceindex(l, low, tp) { + n.SetType(nil) + return n + } + if high != nil && !checksliceindex(l, high, tp) { + n.SetType(nil) + return n + } + if max != nil && !checksliceindex(l, max, tp) { + n.SetType(nil) + return n + } + if !checksliceconst(low, high) || !checksliceconst(low, max) || !checksliceconst(high, max) { + n.SetType(nil) + return n + } + return n +} + +// tcSliceHeader typechecks an OSLICEHEADER node. +func tcSliceHeader(n *ir.SliceHeaderExpr) ir.Node { + // Errors here are Fatalf instead of Errorf because only the compiler + // can construct an OSLICEHEADER node. + // Components used in OSLICEHEADER that are supplied by parsed source code + // have already been typechecked in e.g. OMAKESLICE earlier. + t := n.Type() + if t == nil { + base.Fatalf("no type specified for OSLICEHEADER") + } + + if !t.IsSlice() { + base.Fatalf("invalid type %v for OSLICEHEADER", n.Type()) + } + + if n.Ptr == nil || n.Ptr.Type() == nil || !n.Ptr.Type().IsUnsafePtr() { + base.Fatalf("need unsafe.Pointer for OSLICEHEADER") + } + + if x := len(n.LenCap); x != 2 { + base.Fatalf("expected 2 params (len, cap) for OSLICEHEADER, got %d", x) + } + + n.Ptr = Expr(n.Ptr) + l := Expr(n.LenCap[0]) + c := Expr(n.LenCap[1]) + l = DefaultLit(l, types.Types[types.TINT]) + c = DefaultLit(c, types.Types[types.TINT]) + + if ir.IsConst(l, constant.Int) && ir.Int64Val(l) < 0 { + base.Fatalf("len for OSLICEHEADER must be non-negative") + } + + if ir.IsConst(c, constant.Int) && ir.Int64Val(c) < 0 { + base.Fatalf("cap for OSLICEHEADER must be non-negative") + } + + if ir.IsConst(l, constant.Int) && ir.IsConst(c, constant.Int) && constant.Compare(l.Val(), token.GTR, c.Val()) { + base.Fatalf("len larger than cap for OSLICEHEADER") + } + + n.LenCap[0] = l + n.LenCap[1] = c + return n +} + +// tcStar typechecks an ODEREF node, which may be an expression or a type. +func tcStar(n *ir.StarExpr, top int) ir.Node { + n.X = typecheck(n.X, ctxExpr|ctxType) + l := n.X + t := l.Type() + if t == nil { + n.SetType(nil) + return n + } + if l.Op() == ir.OTYPE { + n.SetOTYPE(types.NewPtr(l.Type())) + // Ensure l.Type gets dowidth'd for the backend. Issue 20174. + types.CheckSize(l.Type()) + return n + } + + if !t.IsPtr() { + if top&(ctxExpr|ctxStmt) != 0 { + base.Errorf("invalid indirect of %L", n.X) + n.SetType(nil) + return n + } + base.Errorf("%v is not a type", l) + return n + } + + n.SetType(t.Elem()) + return n +} + +// tcUnaryArith typechecks a unary arithmetic expression. +func tcUnaryArith(n *ir.UnaryExpr) ir.Node { + n.X = Expr(n.X) + l := n.X + t := l.Type() + if t == nil { + n.SetType(nil) + return n + } + if !okfor[n.Op()][defaultType(t).Kind()] { + base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(t)) + n.SetType(nil) + return n + } + + n.SetType(t) + return n +} diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go index 4675de6cad410..99d81dcedece1 100644 --- a/src/cmd/compile/internal/typecheck/func.go +++ b/src/cmd/compile/internal/typecheck/func.go @@ -10,6 +10,8 @@ import ( "cmd/compile/internal/types" "fmt" + "go/constant" + "go/token" ) // package all the arguments that match a ... T parameter into a []T. @@ -156,66 +158,6 @@ func CaptureVars(fn *ir.Func) { base.Pos = lno } -// typecheckclosure typechecks an OCLOSURE node. It also creates the named -// function associated with the closure. -// TODO: This creation of the named function should probably really be done in a -// separate pass from type-checking. -func typecheckclosure(clo *ir.ClosureExpr, top int) { - fn := clo.Func - // Set current associated iota value, so iota can be used inside - // function in ConstSpec, see issue #22344 - if x := getIotaValue(); x >= 0 { - fn.Iota = x - } - - fn.ClosureType = check(fn.ClosureType, ctxType) - clo.SetType(fn.ClosureType.Type()) - fn.SetClosureCalled(top&ctxCallee != 0) - - // Do not typecheck fn twice, otherwise, we will end up pushing - // fn to Target.Decls multiple times, causing initLSym called twice. - // See #30709 - if fn.Typecheck() == 1 { - return - } - - for _, ln := range fn.ClosureVars { - n := ln.Defn - if !n.Name().Captured() { - n.Name().SetCaptured(true) - if n.Name().Decldepth == 0 { - base.Fatalf("typecheckclosure: var %v does not have decldepth assigned", n) - } - - // Ignore assignments to the variable in straightline code - // preceding the first capturing by a closure. - if n.Name().Decldepth == decldepth { - n.Name().SetAssigned(false) - } - } - } - - fn.Nname.SetSym(closurename(ir.CurFunc)) - ir.MarkFunc(fn.Nname) - Func(fn) - - // Type check the body now, but only if we're inside a function. - // At top level (in a variable initialization: curfn==nil) we're not - // ready to type check code yet; we'll check it later, because the - // underlying closure function we create is added to Target.Decls. - if ir.CurFunc != nil && clo.Type() != nil { - oldfn := ir.CurFunc - ir.CurFunc = fn - olddd := decldepth - decldepth = 1 - Stmts(fn.Body) - decldepth = olddd - ir.CurFunc = oldfn - } - - Target.Decls = append(Target.Decls, fn) -} - // Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck // because they're a copy of an already checked body. func ImportedBody(fn *ir.Func) { @@ -380,7 +322,67 @@ func makepartialcall(dot *ir.SelectorExpr, t0 *types.Type, meth *types.Sym) *ir. return fn } -func typecheckpartialcall(n ir.Node, sym *types.Sym) *ir.CallPartExpr { +// tcClosure typechecks an OCLOSURE node. It also creates the named +// function associated with the closure. +// TODO: This creation of the named function should probably really be done in a +// separate pass from type-checking. +func tcClosure(clo *ir.ClosureExpr, top int) { + fn := clo.Func + // Set current associated iota value, so iota can be used inside + // function in ConstSpec, see issue #22344 + if x := getIotaValue(); x >= 0 { + fn.Iota = x + } + + fn.ClosureType = typecheck(fn.ClosureType, ctxType) + clo.SetType(fn.ClosureType.Type()) + fn.SetClosureCalled(top&ctxCallee != 0) + + // Do not typecheck fn twice, otherwise, we will end up pushing + // fn to Target.Decls multiple times, causing initLSym called twice. + // See #30709 + if fn.Typecheck() == 1 { + return + } + + for _, ln := range fn.ClosureVars { + n := ln.Defn + if !n.Name().Captured() { + n.Name().SetCaptured(true) + if n.Name().Decldepth == 0 { + base.Fatalf("typecheckclosure: var %v does not have decldepth assigned", n) + } + + // Ignore assignments to the variable in straightline code + // preceding the first capturing by a closure. + if n.Name().Decldepth == decldepth { + n.Name().SetAssigned(false) + } + } + } + + fn.Nname.SetSym(closurename(ir.CurFunc)) + ir.MarkFunc(fn.Nname) + Func(fn) + + // Type check the body now, but only if we're inside a function. + // At top level (in a variable initialization: curfn==nil) we're not + // ready to type check code yet; we'll check it later, because the + // underlying closure function we create is added to Target.Decls. + if ir.CurFunc != nil && clo.Type() != nil { + oldfn := ir.CurFunc + ir.CurFunc = fn + olddd := decldepth + decldepth = 1 + Stmts(fn.Body) + decldepth = olddd + ir.CurFunc = oldfn + } + + Target.Decls = append(Target.Decls, fn) +} + +func tcCallPart(n ir.Node, sym *types.Sym) *ir.CallPartExpr { switch n.Op() { case ir.ODOTINTER, ir.ODOTMETH: break @@ -396,3 +398,632 @@ func typecheckpartialcall(n ir.Node, sym *types.Sym) *ir.CallPartExpr { return ir.NewCallPartExpr(dot.Pos(), dot.X, dot.Selection, fn) } + +// type check function definition +// To be called by typecheck, not directly. +// (Call typecheckFunc instead.) +func tcFunc(n *ir.Func) { + if base.EnableTrace && base.Flag.LowerT { + defer tracePrint("typecheckfunc", n)(nil) + } + + for _, ln := range n.Dcl { + if ln.Op() == ir.ONAME && (ln.Class_ == ir.PPARAM || ln.Class_ == ir.PPARAMOUT) { + ln.Decldepth = 1 + } + } + + n.Nname = AssignExpr(n.Nname).(*ir.Name) + t := n.Nname.Type() + if t == nil { + return + } + n.SetType(t) + rcvr := t.Recv() + if rcvr != nil && n.Shortname != nil { + m := addmethod(n, n.Shortname, t, true, n.Pragma&ir.Nointerface != 0) + if m == nil { + return + } + + n.Nname.SetSym(ir.MethodSym(rcvr.Type, n.Shortname)) + Declare(n.Nname, ir.PFUNC) + } + + if base.Ctxt.Flag_dynlink && !inimport && n.Nname != nil { + NeedFuncSym(n.Sym()) + } +} + +// tcCall typechecks an OCALL node. +func tcCall(n *ir.CallExpr, top int) ir.Node { + n.Use = ir.CallUseExpr + if top == ctxStmt { + n.Use = ir.CallUseStmt + } + Stmts(n.Init()) // imported rewritten f(g()) calls (#30907) + n.X = typecheck(n.X, ctxExpr|ctxType|ctxCallee) + if n.X.Diag() { + n.SetDiag(true) + } + + l := n.X + + if l.Op() == ir.ONAME && l.(*ir.Name).BuiltinOp != 0 { + l := l.(*ir.Name) + if n.IsDDD && l.BuiltinOp != ir.OAPPEND { + base.Errorf("invalid use of ... with builtin %v", l) + } + + // builtin: OLEN, OCAP, etc. + switch l.BuiltinOp { + default: + base.Fatalf("unknown builtin %v", l) + + case ir.OAPPEND, ir.ODELETE, ir.OMAKE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER: + n.SetOp(l.BuiltinOp) + n.X = nil + n.SetTypecheck(0) // re-typechecking new op is OK, not a loop + return typecheck(n, top) + + case ir.OCAP, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.OPANIC, ir.OREAL: + typecheckargs(n) + fallthrough + case ir.ONEW, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF: + arg, ok := needOneArg(n, "%v", n.Op()) + if !ok { + n.SetType(nil) + return n + } + u := ir.NewUnaryExpr(n.Pos(), l.BuiltinOp, arg) + return typecheck(ir.InitExpr(n.Init(), u), top) // typecheckargs can add to old.Init + + case ir.OCOMPLEX, ir.OCOPY: + typecheckargs(n) + arg1, arg2, ok := needTwoArgs(n) + if !ok { + n.SetType(nil) + return n + } + b := ir.NewBinaryExpr(n.Pos(), l.BuiltinOp, arg1, arg2) + return typecheck(ir.InitExpr(n.Init(), b), top) // typecheckargs can add to old.Init + } + panic("unreachable") + } + + n.X = DefaultLit(n.X, nil) + l = n.X + if l.Op() == ir.OTYPE { + if n.IsDDD { + if !l.Type().Broke() { + base.Errorf("invalid use of ... in type conversion to %v", l.Type()) + } + n.SetDiag(true) + } + + // pick off before type-checking arguments + arg, ok := needOneArg(n, "conversion to %v", l.Type()) + if !ok { + n.SetType(nil) + return n + } + + n := ir.NewConvExpr(n.Pos(), ir.OCONV, nil, arg) + n.SetType(l.Type()) + return typecheck1(n, top) + } + + typecheckargs(n) + t := l.Type() + if t == nil { + n.SetType(nil) + return n + } + types.CheckSize(t) + + switch l.Op() { + case ir.ODOTINTER: + n.SetOp(ir.OCALLINTER) + + case ir.ODOTMETH: + l := l.(*ir.SelectorExpr) + n.SetOp(ir.OCALLMETH) + + // typecheckaste was used here but there wasn't enough + // information further down the call chain to know if we + // were testing a method receiver for unexported fields. + // It isn't necessary, so just do a sanity check. + tp := t.Recv().Type + + if l.X == nil || !types.Identical(l.X.Type(), tp) { + base.Fatalf("method receiver") + } + + default: + n.SetOp(ir.OCALLFUNC) + if t.Kind() != types.TFUNC { + // TODO(mdempsky): Remove "o.Sym() != nil" once we stop + // using ir.Name for numeric literals. + if o := ir.Orig(l); o.Name() != nil && o.Sym() != nil && types.BuiltinPkg.Lookup(o.Sym().Name).Def != nil { + // be more specific when the non-function + // name matches a predeclared function + base.Errorf("cannot call non-function %L, declared at %s", + l, base.FmtPos(o.Name().Pos())) + } else { + base.Errorf("cannot call non-function %L", l) + } + n.SetType(nil) + return n + } + } + + typecheckaste(ir.OCALL, n.X, n.IsDDD, t.Params(), n.Args, func() string { return fmt.Sprintf("argument to %v", n.X) }) + if t.NumResults() == 0 { + return n + } + if t.NumResults() == 1 { + n.SetType(l.Type().Results().Field(0).Type) + + if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME { + if sym := n.X.(*ir.Name).Sym(); types.IsRuntimePkg(sym.Pkg) && sym.Name == "getg" { + // Emit code for runtime.getg() directly instead of calling function. + // Most such rewrites (for example the similar one for math.Sqrt) should be done in walk, + // so that the ordering pass can make sure to preserve the semantics of the original code + // (in particular, the exact time of the function call) by introducing temporaries. + // In this case, we know getg() always returns the same result within a given function + // and we want to avoid the temporaries, so we do the rewrite earlier than is typical. + n.SetOp(ir.OGETG) + } + } + return n + } + + // multiple return + if top&(ctxMultiOK|ctxStmt) == 0 { + base.Errorf("multiple-value %v() in single-value context", l) + return n + } + + n.SetType(l.Type().Results()) + return n +} + +// tcAppend typechecks an OAPPEND node. +func tcAppend(n *ir.CallExpr) ir.Node { + typecheckargs(n) + args := n.Args + if len(args) == 0 { + base.Errorf("missing arguments to append") + n.SetType(nil) + return n + } + + t := args[0].Type() + if t == nil { + n.SetType(nil) + return n + } + + n.SetType(t) + if !t.IsSlice() { + if ir.IsNil(args[0]) { + base.Errorf("first argument to append must be typed slice; have untyped nil") + n.SetType(nil) + return n + } + + base.Errorf("first argument to append must be slice; have %L", t) + n.SetType(nil) + return n + } + + if n.IsDDD { + if len(args) == 1 { + base.Errorf("cannot use ... on first argument to append") + n.SetType(nil) + return n + } + + if len(args) != 2 { + base.Errorf("too many arguments to append") + n.SetType(nil) + return n + } + + if t.Elem().IsKind(types.TUINT8) && args[1].Type().IsString() { + args[1] = DefaultLit(args[1], types.Types[types.TSTRING]) + return n + } + + args[1] = AssignConv(args[1], t.Underlying(), "append") + return n + } + + as := args[1:] + for i, n := range as { + if n.Type() == nil { + continue + } + as[i] = AssignConv(n, t.Elem(), "append") + types.CheckSize(as[i].Type()) // ensure width is calculated for backend + } + return n +} + +// tcClose typechecks an OCLOSE node. +func tcClose(n *ir.UnaryExpr) ir.Node { + n.X = Expr(n.X) + n.X = DefaultLit(n.X, nil) + l := n.X + t := l.Type() + if t == nil { + n.SetType(nil) + return n + } + if !t.IsChan() { + base.Errorf("invalid operation: %v (non-chan type %v)", n, t) + n.SetType(nil) + return n + } + + if !t.ChanDir().CanSend() { + base.Errorf("invalid operation: %v (cannot close receive-only channel)", n) + n.SetType(nil) + return n + } + return n +} + +// tcComplex typechecks an OCOMPLEX node. +func tcComplex(n *ir.BinaryExpr) ir.Node { + l := Expr(n.X) + r := Expr(n.Y) + if l.Type() == nil || r.Type() == nil { + n.SetType(nil) + return n + } + l, r = defaultlit2(l, r, false) + if l.Type() == nil || r.Type() == nil { + n.SetType(nil) + return n + } + n.X = l + n.Y = r + + if !types.Identical(l.Type(), r.Type()) { + base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type()) + n.SetType(nil) + return n + } + + var t *types.Type + switch l.Type().Kind() { + default: + base.Errorf("invalid operation: %v (arguments have type %v, expected floating-point)", n, l.Type()) + n.SetType(nil) + return n + + case types.TIDEAL: + t = types.UntypedComplex + + case types.TFLOAT32: + t = types.Types[types.TCOMPLEX64] + + case types.TFLOAT64: + t = types.Types[types.TCOMPLEX128] + } + n.SetType(t) + return n +} + +// tcCopy typechecks an OCOPY node. +func tcCopy(n *ir.BinaryExpr) ir.Node { + n.SetType(types.Types[types.TINT]) + n.X = Expr(n.X) + n.X = DefaultLit(n.X, nil) + n.Y = Expr(n.Y) + n.Y = DefaultLit(n.Y, nil) + if n.X.Type() == nil || n.Y.Type() == nil { + n.SetType(nil) + return n + } + + // copy([]byte, string) + if n.X.Type().IsSlice() && n.Y.Type().IsString() { + if types.Identical(n.X.Type().Elem(), types.ByteType) { + return n + } + base.Errorf("arguments to copy have different element types: %L and string", n.X.Type()) + n.SetType(nil) + return n + } + + if !n.X.Type().IsSlice() || !n.Y.Type().IsSlice() { + if !n.X.Type().IsSlice() && !n.Y.Type().IsSlice() { + base.Errorf("arguments to copy must be slices; have %L, %L", n.X.Type(), n.Y.Type()) + } else if !n.X.Type().IsSlice() { + base.Errorf("first argument to copy should be slice; have %L", n.X.Type()) + } else { + base.Errorf("second argument to copy should be slice or string; have %L", n.Y.Type()) + } + n.SetType(nil) + return n + } + + if !types.Identical(n.X.Type().Elem(), n.Y.Type().Elem()) { + base.Errorf("arguments to copy have different element types: %L and %L", n.X.Type(), n.Y.Type()) + n.SetType(nil) + return n + } + return n +} + +// tcDelete typechecks an ODELETE node. +func tcDelete(n *ir.CallExpr) ir.Node { + typecheckargs(n) + args := n.Args + if len(args) == 0 { + base.Errorf("missing arguments to delete") + n.SetType(nil) + return n + } + + if len(args) == 1 { + base.Errorf("missing second (key) argument to delete") + n.SetType(nil) + return n + } + + if len(args) != 2 { + base.Errorf("too many arguments to delete") + n.SetType(nil) + return n + } + + l := args[0] + r := args[1] + if l.Type() != nil && !l.Type().IsMap() { + base.Errorf("first argument to delete must be map; have %L", l.Type()) + n.SetType(nil) + return n + } + + args[1] = AssignConv(r, l.Type().Key(), "delete") + return n +} + +// tcMake typechecks an OMAKE node. +func tcMake(n *ir.CallExpr) ir.Node { + args := n.Args + if len(args) == 0 { + base.Errorf("missing argument to make") + n.SetType(nil) + return n + } + + n.Args.Set(nil) + l := args[0] + l = typecheck(l, ctxType) + t := l.Type() + if t == nil { + n.SetType(nil) + return n + } + + i := 1 + var nn ir.Node + switch t.Kind() { + default: + base.Errorf("cannot make type %v", t) + n.SetType(nil) + return n + + case types.TSLICE: + if i >= len(args) { + base.Errorf("missing len argument to make(%v)", t) + n.SetType(nil) + return n + } + + l = args[i] + i++ + l = Expr(l) + var r ir.Node + if i < len(args) { + r = args[i] + i++ + r = Expr(r) + } + + if l.Type() == nil || (r != nil && r.Type() == nil) { + n.SetType(nil) + return n + } + if !checkmake(t, "len", &l) || r != nil && !checkmake(t, "cap", &r) { + n.SetType(nil) + return n + } + if ir.IsConst(l, constant.Int) && r != nil && ir.IsConst(r, constant.Int) && constant.Compare(l.Val(), token.GTR, r.Val()) { + base.Errorf("len larger than cap in make(%v)", t) + n.SetType(nil) + return n + } + nn = ir.NewMakeExpr(n.Pos(), ir.OMAKESLICE, l, r) + + case types.TMAP: + if i < len(args) { + l = args[i] + i++ + l = Expr(l) + l = DefaultLit(l, types.Types[types.TINT]) + if l.Type() == nil { + n.SetType(nil) + return n + } + if !checkmake(t, "size", &l) { + n.SetType(nil) + return n + } + } else { + l = ir.NewInt(0) + } + nn = ir.NewMakeExpr(n.Pos(), ir.OMAKEMAP, l, nil) + nn.SetEsc(n.Esc()) + + case types.TCHAN: + l = nil + if i < len(args) { + l = args[i] + i++ + l = Expr(l) + l = DefaultLit(l, types.Types[types.TINT]) + if l.Type() == nil { + n.SetType(nil) + return n + } + if !checkmake(t, "buffer", &l) { + n.SetType(nil) + return n + } + } else { + l = ir.NewInt(0) + } + nn = ir.NewMakeExpr(n.Pos(), ir.OMAKECHAN, l, nil) + } + + if i < len(args) { + base.Errorf("too many arguments to make(%v)", t) + n.SetType(nil) + return n + } + + nn.SetType(t) + return nn +} + +// tcMakeSliceCopy typechecks an OMAKESLICECOPY node. +func tcMakeSliceCopy(n *ir.MakeExpr) ir.Node { + // Errors here are Fatalf instead of Errorf because only the compiler + // can construct an OMAKESLICECOPY node. + // Components used in OMAKESCLICECOPY that are supplied by parsed source code + // have already been typechecked in OMAKE and OCOPY earlier. + t := n.Type() + + if t == nil { + base.Fatalf("no type specified for OMAKESLICECOPY") + } + + if !t.IsSlice() { + base.Fatalf("invalid type %v for OMAKESLICECOPY", n.Type()) + } + + if n.Len == nil { + base.Fatalf("missing len argument for OMAKESLICECOPY") + } + + if n.Cap == nil { + base.Fatalf("missing slice argument to copy for OMAKESLICECOPY") + } + + n.Len = Expr(n.Len) + n.Cap = Expr(n.Cap) + + n.Len = DefaultLit(n.Len, types.Types[types.TINT]) + + if !n.Len.Type().IsInteger() && n.Type().Kind() != types.TIDEAL { + base.Errorf("non-integer len argument in OMAKESLICECOPY") + } + + if ir.IsConst(n.Len, constant.Int) { + if ir.ConstOverflow(n.Len.Val(), types.Types[types.TINT]) { + base.Fatalf("len for OMAKESLICECOPY too large") + } + if constant.Sign(n.Len.Val()) < 0 { + base.Fatalf("len for OMAKESLICECOPY must be non-negative") + } + } + return n +} + +// tcNew typechecks an ONEW node. +func tcNew(n *ir.UnaryExpr) ir.Node { + if n.X == nil { + // Fatalf because the OCALL above checked for us, + // so this must be an internally-generated mistake. + base.Fatalf("missing argument to new") + } + l := n.X + l = typecheck(l, ctxType) + t := l.Type() + if t == nil { + n.SetType(nil) + return n + } + n.X = l + n.SetType(types.NewPtr(t)) + return n +} + +// tcPanic typechecks an OPANIC node. +func tcPanic(n *ir.UnaryExpr) ir.Node { + n.X = Expr(n.X) + n.X = DefaultLit(n.X, types.Types[types.TINTER]) + if n.X.Type() == nil { + n.SetType(nil) + return n + } + return n +} + +// tcPrint typechecks an OPRINT or OPRINTN node. +func tcPrint(n *ir.CallExpr) ir.Node { + typecheckargs(n) + ls := n.Args + for i1, n1 := range ls { + // Special case for print: int constant is int64, not int. + if ir.IsConst(n1, constant.Int) { + ls[i1] = DefaultLit(ls[i1], types.Types[types.TINT64]) + } else { + ls[i1] = DefaultLit(ls[i1], nil) + } + } + return n +} + +// tcRealImag typechecks an OREAL or OIMAG node. +func tcRealImag(n *ir.UnaryExpr) ir.Node { + n.X = Expr(n.X) + l := n.X + t := l.Type() + if t == nil { + n.SetType(nil) + return n + } + + // Determine result type. + switch t.Kind() { + case types.TIDEAL: + n.SetType(types.UntypedFloat) + case types.TCOMPLEX64: + n.SetType(types.Types[types.TFLOAT32]) + case types.TCOMPLEX128: + n.SetType(types.Types[types.TFLOAT64]) + default: + base.Errorf("invalid argument %L for %v", l, n.Op()) + n.SetType(nil) + return n + } + return n +} + +// tcRecover typechecks an ORECOVER node. +func tcRecover(n *ir.CallExpr) ir.Node { + if len(n.Args) != 0 { + base.Errorf("too many arguments to recover") + n.SetType(nil) + return n + } + + n.SetType(types.Types[types.TINTER]) + return n +} diff --git a/src/cmd/compile/internal/typecheck/stmt.go b/src/cmd/compile/internal/typecheck/stmt.go index 889ee06d6e7d6..bf3801eea274c 100644 --- a/src/cmd/compile/internal/typecheck/stmt.go +++ b/src/cmd/compile/internal/typecheck/stmt.go @@ -11,33 +11,6 @@ import ( "cmd/internal/src" ) -// range -func typecheckrange(n *ir.RangeStmt) { - // Typechecking order is important here: - // 0. first typecheck range expression (slice/map/chan), - // it is evaluated only once and so logically it is not part of the loop. - // 1. typecheck produced values, - // this part can declare new vars and so it must be typechecked before body, - // because body can contain a closure that captures the vars. - // 2. decldepth++ to denote loop body. - // 3. typecheck body. - // 4. decldepth--. - typecheckrangeExpr(n) - - // second half of dance, the first half being typecheckrangeExpr - n.SetTypecheck(1) - ls := n.Vars - for i1, n1 := range ls { - if n1.Typecheck() == 0 { - ls[i1] = AssignExpr(ls[i1]) - } - } - - decldepth++ - Stmts(n.Body) - decldepth-- -} - func typecheckrangeExpr(n *ir.RangeStmt) { n.X = Expr(n.X) @@ -136,8 +109,326 @@ func typecheckrangeExpr(n *ir.RangeStmt) { } } +// type check assignment. +// if this assignment is the definition of a var on the left side, +// fill in the var's type. +func tcAssign(n *ir.AssignStmt) { + if base.EnableTrace && base.Flag.LowerT { + defer tracePrint("typecheckas", n)(nil) + } + + // delicate little dance. + // the definition of n may refer to this assignment + // as its definition, in which case it will call typecheckas. + // in that case, do not call typecheck back, or it will cycle. + // if the variable has a type (ntype) then typechecking + // will not look at defn, so it is okay (and desirable, + // so that the conversion below happens). + n.X = Resolve(n.X) + + if !ir.DeclaredBy(n.X, n) || n.X.Name().Ntype != nil { + n.X = AssignExpr(n.X) + } + + // Use ctxMultiOK so we can emit an "N variables but M values" error + // to be consistent with typecheckas2 (#26616). + n.Y = typecheck(n.Y, ctxExpr|ctxMultiOK) + checkassign(n, n.X) + if n.Y != nil && n.Y.Type() != nil { + if n.Y.Type().IsFuncArgStruct() { + base.Errorf("assignment mismatch: 1 variable but %v returns %d values", n.Y.(*ir.CallExpr).X, n.Y.Type().NumFields()) + // Multi-value RHS isn't actually valid for OAS; nil out + // to indicate failed typechecking. + n.Y.SetType(nil) + } else if n.X.Type() != nil { + n.Y = AssignConv(n.Y, n.X.Type(), "assignment") + } + } + + if ir.DeclaredBy(n.X, n) && n.X.Name().Ntype == nil { + n.Y = DefaultLit(n.Y, nil) + n.X.SetType(n.Y.Type()) + } + + // second half of dance. + // now that right is done, typecheck the left + // just to get it over with. see dance above. + n.SetTypecheck(1) + + if n.X.Typecheck() == 0 { + n.X = AssignExpr(n.X) + } + if !ir.IsBlank(n.X) { + types.CheckSize(n.X.Type()) // ensure width is calculated for backend + } +} + +func tcAssignList(n *ir.AssignListStmt) { + if base.EnableTrace && base.Flag.LowerT { + defer tracePrint("typecheckas2", n)(nil) + } + + ls := n.Lhs + for i1, n1 := range ls { + // delicate little dance. + n1 = Resolve(n1) + ls[i1] = n1 + + if !ir.DeclaredBy(n1, n) || n1.Name().Ntype != nil { + ls[i1] = AssignExpr(ls[i1]) + } + } + + cl := len(n.Lhs) + cr := len(n.Rhs) + if cl > 1 && cr == 1 { + n.Rhs[0] = typecheck(n.Rhs[0], ctxExpr|ctxMultiOK) + } else { + Exprs(n.Rhs) + } + checkassignlist(n, n.Lhs) + + var l ir.Node + var r ir.Node + if cl == cr { + // easy + ls := n.Lhs + rs := n.Rhs + for il, nl := range ls { + nr := rs[il] + if nl.Type() != nil && nr.Type() != nil { + rs[il] = AssignConv(nr, nl.Type(), "assignment") + } + if ir.DeclaredBy(nl, n) && nl.Name().Ntype == nil { + rs[il] = DefaultLit(rs[il], nil) + nl.SetType(rs[il].Type()) + } + } + + goto out + } + + l = n.Lhs[0] + r = n.Rhs[0] + + // x,y,z = f() + if cr == 1 { + if r.Type() == nil { + goto out + } + switch r.Op() { + case ir.OCALLMETH, ir.OCALLINTER, ir.OCALLFUNC: + if !r.Type().IsFuncArgStruct() { + break + } + cr = r.Type().NumFields() + if cr != cl { + goto mismatch + } + r.(*ir.CallExpr).Use = ir.CallUseList + n.SetOp(ir.OAS2FUNC) + for i, l := range n.Lhs { + f := r.Type().Field(i) + if f.Type != nil && l.Type() != nil { + checkassignto(f.Type, l) + } + if ir.DeclaredBy(l, n) && l.Name().Ntype == nil { + l.SetType(f.Type) + } + } + goto out + } + } + + // x, ok = y + if cl == 2 && cr == 1 { + if r.Type() == nil { + goto out + } + switch r.Op() { + case ir.OINDEXMAP, ir.ORECV, ir.ODOTTYPE: + switch r.Op() { + case ir.OINDEXMAP: + n.SetOp(ir.OAS2MAPR) + case ir.ORECV: + n.SetOp(ir.OAS2RECV) + case ir.ODOTTYPE: + r := r.(*ir.TypeAssertExpr) + n.SetOp(ir.OAS2DOTTYPE) + r.SetOp(ir.ODOTTYPE2) + } + if l.Type() != nil { + checkassignto(r.Type(), l) + } + if ir.DeclaredBy(l, n) { + l.SetType(r.Type()) + } + l := n.Lhs[1] + if l.Type() != nil && !l.Type().IsBoolean() { + checkassignto(types.Types[types.TBOOL], l) + } + if ir.DeclaredBy(l, n) && l.Name().Ntype == nil { + l.SetType(types.Types[types.TBOOL]) + } + goto out + } + } + +mismatch: + switch r.Op() { + default: + base.Errorf("assignment mismatch: %d variables but %d values", cl, cr) + case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER: + r := r.(*ir.CallExpr) + base.Errorf("assignment mismatch: %d variables but %v returns %d values", cl, r.X, cr) + } + + // second half of dance +out: + n.SetTypecheck(1) + ls = n.Lhs + for i1, n1 := range ls { + if n1.Typecheck() == 0 { + ls[i1] = AssignExpr(ls[i1]) + } + } +} + +// tcFor typechecks an OFOR node. +func tcFor(n *ir.ForStmt) ir.Node { + Stmts(n.Init()) + decldepth++ + n.Cond = Expr(n.Cond) + n.Cond = DefaultLit(n.Cond, nil) + if n.Cond != nil { + t := n.Cond.Type() + if t != nil && !t.IsBoolean() { + base.Errorf("non-bool %L used as for condition", n.Cond) + } + } + n.Post = Stmt(n.Post) + if n.Op() == ir.OFORUNTIL { + Stmts(n.Late) + } + Stmts(n.Body) + decldepth-- + return n +} + +func tcGoDefer(n *ir.GoDeferStmt) { + what := "defer" + if n.Op() == ir.OGO { + what = "go" + } + + switch n.Call.Op() { + // ok + case ir.OCALLINTER, + ir.OCALLMETH, + ir.OCALLFUNC, + ir.OCLOSE, + ir.OCOPY, + ir.ODELETE, + ir.OPANIC, + ir.OPRINT, + ir.OPRINTN, + ir.ORECOVER: + return + + case ir.OAPPEND, + ir.OCAP, + ir.OCOMPLEX, + ir.OIMAG, + ir.OLEN, + ir.OMAKE, + ir.OMAKESLICE, + ir.OMAKECHAN, + ir.OMAKEMAP, + ir.ONEW, + ir.OREAL, + ir.OLITERAL: // conversion or unsafe.Alignof, Offsetof, Sizeof + if orig := ir.Orig(n.Call); orig.Op() == ir.OCONV { + break + } + base.ErrorfAt(n.Pos(), "%s discards result of %v", what, n.Call) + return + } + + // type is broken or missing, most likely a method call on a broken type + // we will warn about the broken type elsewhere. no need to emit a potentially confusing error + if n.Call.Type() == nil || n.Call.Type().Broke() { + return + } + + if !n.Diag() { + // The syntax made sure it was a call, so this must be + // a conversion. + n.SetDiag(true) + base.ErrorfAt(n.Pos(), "%s requires function call, not conversion", what) + } +} + +// tcIf typechecks an OIF node. +func tcIf(n *ir.IfStmt) ir.Node { + Stmts(n.Init()) + n.Cond = Expr(n.Cond) + n.Cond = DefaultLit(n.Cond, nil) + if n.Cond != nil { + t := n.Cond.Type() + if t != nil && !t.IsBoolean() { + base.Errorf("non-bool %L used as if condition", n.Cond) + } + } + Stmts(n.Body) + Stmts(n.Else) + return n +} + +// range +func tcRange(n *ir.RangeStmt) { + // Typechecking order is important here: + // 0. first typecheck range expression (slice/map/chan), + // it is evaluated only once and so logically it is not part of the loop. + // 1. typecheck produced values, + // this part can declare new vars and so it must be typechecked before body, + // because body can contain a closure that captures the vars. + // 2. decldepth++ to denote loop body. + // 3. typecheck body. + // 4. decldepth--. + typecheckrangeExpr(n) + + // second half of dance, the first half being typecheckrangeExpr + n.SetTypecheck(1) + ls := n.Vars + for i1, n1 := range ls { + if n1.Typecheck() == 0 { + ls[i1] = AssignExpr(ls[i1]) + } + } + + decldepth++ + Stmts(n.Body) + decldepth-- +} + +// tcReturn typechecks an ORETURN node. +func tcReturn(n *ir.ReturnStmt) ir.Node { + typecheckargs(n) + if ir.CurFunc == nil { + base.Errorf("return outside function") + n.SetType(nil) + return n + } + + if ir.HasNamedResults(ir.CurFunc) && len(n.Results) == 0 { + return n + } + typecheckaste(ir.ORETURN, nil, false, ir.CurFunc.Type().Results(), n.Results, func() string { return "return argument" }) + return n +} + // select -func typecheckselect(sel *ir.SelectStmt) { +func tcSelect(sel *ir.SelectStmt) { var def ir.Node lno := ir.SetPos(sel) Stmts(sel.Init()) @@ -219,35 +510,43 @@ func typecheckselect(sel *ir.SelectStmt) { base.Pos = lno } -type typeSet struct { - m map[string][]typeSetEntry -} +// tcSend typechecks an OSEND node. +func tcSend(n *ir.SendStmt) ir.Node { + n.Chan = Expr(n.Chan) + n.Value = Expr(n.Value) + n.Chan = DefaultLit(n.Chan, nil) + t := n.Chan.Type() + if t == nil { + return n + } + if !t.IsChan() { + base.Errorf("invalid operation: %v (send to non-chan type %v)", n, t) + return n + } -func (s *typeSet) add(pos src.XPos, typ *types.Type) { - if s.m == nil { - s.m = make(map[string][]typeSetEntry) + if !t.ChanDir().CanSend() { + base.Errorf("invalid operation: %v (send to receive-only type %v)", n, t) + return n } - // LongString does not uniquely identify types, so we need to - // disambiguate collisions with types.Identical. - // TODO(mdempsky): Add a method that *is* unique. - ls := typ.LongString() - prevs := s.m[ls] - for _, prev := range prevs { - if types.Identical(typ, prev.typ) { - base.ErrorfAt(pos, "duplicate case %v in type switch\n\tprevious case at %s", typ, base.FmtPos(prev.pos)) - return - } + n.Value = AssignConv(n.Value, t.Elem(), "send") + if n.Value.Type() == nil { + return n } - s.m[ls] = append(prevs, typeSetEntry{pos, typ}) + return n } -type typeSetEntry struct { - pos src.XPos - typ *types.Type +// tcSwitch typechecks a switch statement. +func tcSwitch(n *ir.SwitchStmt) { + Stmts(n.Init()) + if n.Tag != nil && n.Tag.Op() == ir.OTYPESW { + tcSwitchType(n) + } else { + tcSwitchExpr(n) + } } -func typecheckExprSwitch(n *ir.SwitchStmt) { +func tcSwitchExpr(n *ir.SwitchStmt) { t := types.Types[types.TBOOL] if n.Tag != nil { n.Tag = Expr(n.Tag) @@ -328,7 +627,7 @@ func typecheckExprSwitch(n *ir.SwitchStmt) { } } -func typecheckTypeSwitch(n *ir.SwitchStmt) { +func tcSwitchType(n *ir.SwitchStmt) { guard := n.Tag.(*ir.TypeSwitchGuard) guard.X = Expr(guard.X) t := guard.X.Type() @@ -358,7 +657,7 @@ func typecheckTypeSwitch(n *ir.SwitchStmt) { } for i := range ls { - ls[i] = check(ls[i], ctxExpr|ctxType) + ls[i] = typecheck(ls[i], ctxExpr|ctxType) n1 := ls[i] if t == nil || n1.Type() == nil { continue @@ -424,12 +723,30 @@ func typecheckTypeSwitch(n *ir.SwitchStmt) { } } -// typecheckswitch typechecks a switch statement. -func typecheckswitch(n *ir.SwitchStmt) { - Stmts(n.Init()) - if n.Tag != nil && n.Tag.Op() == ir.OTYPESW { - typecheckTypeSwitch(n) - } else { - typecheckExprSwitch(n) +type typeSet struct { + m map[string][]typeSetEntry +} + +type typeSetEntry struct { + pos src.XPos + typ *types.Type +} + +func (s *typeSet) add(pos src.XPos, typ *types.Type) { + if s.m == nil { + s.m = make(map[string][]typeSetEntry) } + + // LongString does not uniquely identify types, so we need to + // disambiguate collisions with types.Identical. + // TODO(mdempsky): Add a method that *is* unique. + ls := typ.LongString() + prevs := s.m[ls] + for _, prev := range prevs { + if types.Identical(typ, prev.typ) { + base.ErrorfAt(pos, "duplicate case %v in type switch\n\tprevious case at %s", typ, base.FmtPos(prev.pos)) + return + } + } + s.m[ls] = append(prevs, typeSetEntry{pos, typ}) } diff --git a/src/cmd/compile/internal/typecheck/subr.go b/src/cmd/compile/internal/typecheck/subr.go index 22ebf2a4b3ea2..178eba4484e1c 100644 --- a/src/cmd/compile/internal/typecheck/subr.go +++ b/src/cmd/compile/internal/typecheck/subr.go @@ -81,7 +81,7 @@ func NodNil() ir.Node { // will give shortest unique addressing. // modify the tree with missing type names. func AddImplicitDots(n *ir.SelectorExpr) *ir.SelectorExpr { - n.X = check(n.X, ctxType|ctxExpr) + n.X = typecheck(n.X, ctxType|ctxExpr) if n.X.Diag() { n.SetDiag(true) } diff --git a/src/cmd/compile/internal/typecheck/type.go b/src/cmd/compile/internal/typecheck/type.go new file mode 100644 index 0000000000000..4782bb9c3180a --- /dev/null +++ b/src/cmd/compile/internal/typecheck/type.go @@ -0,0 +1,122 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typecheck + +import ( + "go/constant" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" +) + +// tcArrayType typechecks an OTARRAY node. +func tcArrayType(n *ir.ArrayType) ir.Node { + n.Elem = typecheck(n.Elem, ctxType) + if n.Elem.Type() == nil { + return n + } + if n.Len == nil { // [...]T + if !n.Diag() { + n.SetDiag(true) + base.Errorf("use of [...] array outside of array literal") + } + return n + } + n.Len = indexlit(Expr(n.Len)) + size := n.Len + if ir.ConstType(size) != constant.Int { + switch { + case size.Type() == nil: + // Error already reported elsewhere. + case size.Type().IsInteger() && size.Op() != ir.OLITERAL: + base.Errorf("non-constant array bound %v", size) + default: + base.Errorf("invalid array bound %v", size) + } + return n + } + + v := size.Val() + if ir.ConstOverflow(v, types.Types[types.TINT]) { + base.Errorf("array bound is too large") + return n + } + + if constant.Sign(v) < 0 { + base.Errorf("array bound must be non-negative") + return n + } + + bound, _ := constant.Int64Val(v) + t := types.NewArray(n.Elem.Type(), bound) + n.SetOTYPE(t) + types.CheckSize(t) + return n +} + +// tcChanType typechecks an OTCHAN node. +func tcChanType(n *ir.ChanType) ir.Node { + n.Elem = typecheck(n.Elem, ctxType) + l := n.Elem + if l.Type() == nil { + return n + } + if l.Type().NotInHeap() { + base.Errorf("chan of incomplete (or unallocatable) type not allowed") + } + n.SetOTYPE(types.NewChan(l.Type(), n.Dir)) + return n +} + +// tcFuncType typechecks an OTFUNC node. +func tcFuncType(n *ir.FuncType) ir.Node { + n.SetOTYPE(NewFuncType(n.Recv, n.Params, n.Results)) + return n +} + +// tcInterfaceType typechecks an OTINTER node. +func tcInterfaceType(n *ir.InterfaceType) ir.Node { + n.SetOTYPE(tointerface(n.Methods)) + return n +} + +// tcMapType typechecks an OTMAP node. +func tcMapType(n *ir.MapType) ir.Node { + n.Key = typecheck(n.Key, ctxType) + n.Elem = typecheck(n.Elem, ctxType) + l := n.Key + r := n.Elem + if l.Type() == nil || r.Type() == nil { + return n + } + if l.Type().NotInHeap() { + base.Errorf("incomplete (or unallocatable) map key not allowed") + } + if r.Type().NotInHeap() { + base.Errorf("incomplete (or unallocatable) map value not allowed") + } + n.SetOTYPE(types.NewMap(l.Type(), r.Type())) + mapqueue = append(mapqueue, n) // check map keys when all types are settled + return n +} + +// tcSliceType typechecks an OTSLICE node. +func tcSliceType(n *ir.SliceType) ir.Node { + n.Elem = typecheck(n.Elem, ctxType) + if n.Elem.Type() == nil { + return n + } + t := types.NewSlice(n.Elem.Type()) + n.SetOTYPE(t) + types.CheckSize(t) + return n +} + +// tcStructType typechecks an OTSTRUCT node. +func tcStructType(n *ir.StructType) ir.Node { + n.SetOTYPE(NewStructType(n.Fields)) + return n +} diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index 2abf0a78248cc..bf43402d3dabe 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -122,9 +122,9 @@ func Package() { } } -func AssignExpr(n ir.Node) ir.Node { return check(n, ctxExpr|ctxAssign) } -func Expr(n ir.Node) ir.Node { return check(n, ctxExpr) } -func Stmt(n ir.Node) ir.Node { return check(n, ctxStmt) } +func AssignExpr(n ir.Node) ir.Node { return typecheck(n, ctxExpr|ctxAssign) } +func Expr(n ir.Node) ir.Node { return typecheck(n, ctxExpr) } +func Stmt(n ir.Node) ir.Node { return typecheck(n, ctxStmt) } func Exprs(exprs []ir.Node) { typecheckslice(exprs, ctxExpr) } func Stmts(stmts []ir.Node) { typecheckslice(stmts, ctxStmt) } @@ -138,13 +138,13 @@ func Call(call *ir.CallExpr) { if t.NumResults() > 0 { ctx = ctxExpr | ctxMultiOK } - if check(call, ctx) != call { + if typecheck(call, ctx) != call { panic("bad typecheck") } } func Callee(n ir.Node) ir.Node { - return check(n, ctxExpr|ctxCallee) + return typecheck(n, ctxExpr|ctxCallee) } func FuncBody(n *ir.Func) { @@ -277,7 +277,7 @@ func Resolve(n ir.Node) (res ir.Node) { func typecheckslice(l []ir.Node, top int) { for i := range l { - l[i] = check(l[i], top) + l[i] = typecheck(l[i], top) } } @@ -367,13 +367,13 @@ func Func(fn *ir.Func) { } func typecheckNtype(n ir.Ntype) ir.Ntype { - return check(n, ctxType).(ir.Ntype) + return typecheck(n, ctxType).(ir.Ntype) } -// check type checks node n. -// The result of check MUST be assigned back to n, e.g. -// n.Left = check(n.Left, top) -func check(n ir.Node, top int) (res ir.Node) { +// typecheck type checks node n. +// The result of typecheck MUST be assigned back to n, e.g. +// n.Left = typecheck(n.Left, top) +func typecheck(n ir.Node, top int) (res ir.Node) { // cannot type check until all the source has been parsed if !TypecheckAllowed { base.Fatalf("early typecheck") @@ -572,11 +572,7 @@ func indexlit(n ir.Node) ir.Node { } // typecheck1 should ONLY be called from typecheck. -func typecheck1(n ir.Node, top int) (res ir.Node) { - if base.EnableTrace && base.Flag.LowerT { - defer tracePrint("typecheck1", n)(&res) - } - +func typecheck1(n ir.Node, top int) ir.Node { switch n.Op() { case ir.OLITERAL, ir.ONAME, ir.ONONAME, ir.OTYPE: if n.Sym() == nil { @@ -653,136 +649,35 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OTSLICE: n := n.(*ir.SliceType) - n.Elem = check(n.Elem, ctxType) - if n.Elem.Type() == nil { - return n - } - t := types.NewSlice(n.Elem.Type()) - n.SetOTYPE(t) - types.CheckSize(t) - return n + return tcSliceType(n) case ir.OTARRAY: n := n.(*ir.ArrayType) - n.Elem = check(n.Elem, ctxType) - if n.Elem.Type() == nil { - return n - } - if n.Len == nil { // [...]T - if !n.Diag() { - n.SetDiag(true) - base.Errorf("use of [...] array outside of array literal") - } - return n - } - n.Len = indexlit(Expr(n.Len)) - size := n.Len - if ir.ConstType(size) != constant.Int { - switch { - case size.Type() == nil: - // Error already reported elsewhere. - case size.Type().IsInteger() && size.Op() != ir.OLITERAL: - base.Errorf("non-constant array bound %v", size) - default: - base.Errorf("invalid array bound %v", size) - } - return n - } - - v := size.Val() - if ir.ConstOverflow(v, types.Types[types.TINT]) { - base.Errorf("array bound is too large") - return n - } - - if constant.Sign(v) < 0 { - base.Errorf("array bound must be non-negative") - return n - } - - bound, _ := constant.Int64Val(v) - t := types.NewArray(n.Elem.Type(), bound) - n.SetOTYPE(t) - types.CheckSize(t) - return n + return tcArrayType(n) case ir.OTMAP: n := n.(*ir.MapType) - n.Key = check(n.Key, ctxType) - n.Elem = check(n.Elem, ctxType) - l := n.Key - r := n.Elem - if l.Type() == nil || r.Type() == nil { - return n - } - if l.Type().NotInHeap() { - base.Errorf("incomplete (or unallocatable) map key not allowed") - } - if r.Type().NotInHeap() { - base.Errorf("incomplete (or unallocatable) map value not allowed") - } - n.SetOTYPE(types.NewMap(l.Type(), r.Type())) - mapqueue = append(mapqueue, n) // check map keys when all types are settled - return n + return tcMapType(n) case ir.OTCHAN: n := n.(*ir.ChanType) - n.Elem = check(n.Elem, ctxType) - l := n.Elem - if l.Type() == nil { - return n - } - if l.Type().NotInHeap() { - base.Errorf("chan of incomplete (or unallocatable) type not allowed") - } - n.SetOTYPE(types.NewChan(l.Type(), n.Dir)) - return n + return tcChanType(n) case ir.OTSTRUCT: n := n.(*ir.StructType) - n.SetOTYPE(NewStructType(n.Fields)) - return n + return tcStructType(n) case ir.OTINTER: n := n.(*ir.InterfaceType) - n.SetOTYPE(tointerface(n.Methods)) - return n + return tcInterfaceType(n) case ir.OTFUNC: n := n.(*ir.FuncType) - n.SetOTYPE(NewFuncType(n.Recv, n.Params, n.Results)) - return n - + return tcFuncType(n) // type or expr case ir.ODEREF: n := n.(*ir.StarExpr) - n.X = check(n.X, ctxExpr|ctxType) - l := n.X - t := l.Type() - if t == nil { - n.SetType(nil) - return n - } - if l.Op() == ir.OTYPE { - n.SetOTYPE(types.NewPtr(l.Type())) - // Ensure l.Type gets dowidth'd for the backend. Issue 20174. - types.CheckSize(l.Type()) - return n - } - - if !t.IsPtr() { - if top&(ctxExpr|ctxStmt) != 0 { - base.Errorf("invalid indirect of %L", n.X) - n.SetType(nil) - return n - } - base.Errorf("%v is not a type", l) - return n - } - - n.SetType(t.Elem()) - return n - + return tcStar(n, top) // arithmetic exprs case ir.OASOP, ir.OADD, @@ -804,1324 +699,117 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { ir.OOROR, ir.OSUB, ir.OXOR: - var l, r ir.Node - var setLR func() - switch n := n.(type) { - case *ir.AssignOpStmt: - l, r = n.X, n.Y - setLR = func() { n.X = l; n.Y = r } - case *ir.BinaryExpr: - l, r = n.X, n.Y - setLR = func() { n.X = l; n.Y = r } - case *ir.LogicalExpr: - l, r = n.X, n.Y - setLR = func() { n.X = l; n.Y = r } - } - l = Expr(l) - r = Expr(r) - setLR() - if l.Type() == nil || r.Type() == nil { - n.SetType(nil) - return n - } - op := n.Op() - if n.Op() == ir.OASOP { - n := n.(*ir.AssignOpStmt) - checkassign(n, l) - if n.IncDec && !okforarith[l.Type().Kind()] { - base.Errorf("invalid operation: %v (non-numeric type %v)", n, l.Type()) - n.SetType(nil) - return n - } - // TODO(marvin): Fix Node.EType type union. - op = n.AsOp - } - if op == ir.OLSH || op == ir.ORSH { - r = DefaultLit(r, types.Types[types.TUINT]) - setLR() - t := r.Type() - if !t.IsInteger() { - base.Errorf("invalid operation: %v (shift count type %v, must be integer)", n, r.Type()) - n.SetType(nil) - return n - } - if t.IsSigned() && !types.AllowsGoVersion(curpkg(), 1, 13) { - base.ErrorfVers("go1.13", "invalid operation: %v (signed shift count type %v)", n, r.Type()) - n.SetType(nil) - return n - } - t = l.Type() - if t != nil && t.Kind() != types.TIDEAL && !t.IsInteger() { - base.Errorf("invalid operation: %v (shift of type %v)", n, t) - n.SetType(nil) - return n - } - - // no defaultlit for left - // the outer context gives the type - n.SetType(l.Type()) - if (l.Type() == types.UntypedFloat || l.Type() == types.UntypedComplex) && r.Op() == ir.OLITERAL { - n.SetType(types.UntypedInt) - } - return n - } - - // For "x == x && len(s)", it's better to report that "len(s)" (type int) - // can't be used with "&&" than to report that "x == x" (type untyped bool) - // can't be converted to int (see issue #41500). - if n.Op() == ir.OANDAND || n.Op() == ir.OOROR { - n := n.(*ir.LogicalExpr) - if !n.X.Type().IsBoolean() { - base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.X.Type())) - n.SetType(nil) - return n - } - if !n.Y.Type().IsBoolean() { - base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.Y.Type())) - n.SetType(nil) - return n - } - } - - // ideal mixed with non-ideal - l, r = defaultlit2(l, r, false) - setLR() - - if l.Type() == nil || r.Type() == nil { - n.SetType(nil) - return n - } - t := l.Type() - if t.Kind() == types.TIDEAL { - t = r.Type() - } - et := t.Kind() - if et == types.TIDEAL { - et = types.TINT - } - aop := ir.OXXX - if iscmp[n.Op()] && t.Kind() != types.TIDEAL && !types.Identical(l.Type(), r.Type()) { - // comparison is okay as long as one side is - // assignable to the other. convert so they have - // the same type. - // - // the only conversion that isn't a no-op is concrete == interface. - // in that case, check comparability of the concrete type. - // The conversion allocates, so only do it if the concrete type is huge. - converted := false - if r.Type().Kind() != types.TBLANK { - aop, _ = assignop(l.Type(), r.Type()) - if aop != ir.OXXX { - if r.Type().IsInterface() && !l.Type().IsInterface() && !types.IsComparable(l.Type()) { - base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(l.Type())) - n.SetType(nil) - return n - } - - types.CalcSize(l.Type()) - if r.Type().IsInterface() == l.Type().IsInterface() || l.Type().Width >= 1<<16 { - l = ir.NewConvExpr(base.Pos, aop, r.Type(), l) - l.SetTypecheck(1) - setLR() - } - - t = r.Type() - converted = true - } - } - - if !converted && l.Type().Kind() != types.TBLANK { - aop, _ = assignop(r.Type(), l.Type()) - if aop != ir.OXXX { - if l.Type().IsInterface() && !r.Type().IsInterface() && !types.IsComparable(r.Type()) { - base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(r.Type())) - n.SetType(nil) - return n - } - - types.CalcSize(r.Type()) - if r.Type().IsInterface() == l.Type().IsInterface() || r.Type().Width >= 1<<16 { - r = ir.NewConvExpr(base.Pos, aop, l.Type(), r) - r.SetTypecheck(1) - setLR() - } - - t = l.Type() - } - } - - et = t.Kind() - } - - if t.Kind() != types.TIDEAL && !types.Identical(l.Type(), r.Type()) { - l, r = defaultlit2(l, r, true) - if l.Type() == nil || r.Type() == nil { - n.SetType(nil) - return n - } - if l.Type().IsInterface() == r.Type().IsInterface() || aop == 0 { - base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type()) - n.SetType(nil) - return n - } - } - - if t.Kind() == types.TIDEAL { - t = mixUntyped(l.Type(), r.Type()) - } - if dt := defaultType(t); !okfor[op][dt.Kind()] { - base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(t)) - n.SetType(nil) - return n - } - - // okfor allows any array == array, map == map, func == func. - // restrict to slice/map/func == nil and nil == slice/map/func. - if l.Type().IsArray() && !types.IsComparable(l.Type()) { - base.Errorf("invalid operation: %v (%v cannot be compared)", n, l.Type()) - n.SetType(nil) - return n - } - - if l.Type().IsSlice() && !ir.IsNil(l) && !ir.IsNil(r) { - base.Errorf("invalid operation: %v (slice can only be compared to nil)", n) - n.SetType(nil) - return n - } - - if l.Type().IsMap() && !ir.IsNil(l) && !ir.IsNil(r) { - base.Errorf("invalid operation: %v (map can only be compared to nil)", n) - n.SetType(nil) - return n - } - - if l.Type().Kind() == types.TFUNC && !ir.IsNil(l) && !ir.IsNil(r) { - base.Errorf("invalid operation: %v (func can only be compared to nil)", n) - n.SetType(nil) - return n - } - - if l.Type().IsStruct() { - if f := types.IncomparableField(l.Type()); f != nil { - base.Errorf("invalid operation: %v (struct containing %v cannot be compared)", n, f.Type) - n.SetType(nil) - return n - } - } - - if iscmp[n.Op()] { - t = types.UntypedBool - n.SetType(t) - if con := EvalConst(n); con.Op() == ir.OLITERAL { - return con - } - l, r = defaultlit2(l, r, true) - setLR() - return n - } - - if et == types.TSTRING && n.Op() == ir.OADD { - // create or update OADDSTR node with list of strings in x + y + z + (w + v) + ... - n := n.(*ir.BinaryExpr) - var add *ir.AddStringExpr - if l.Op() == ir.OADDSTR { - add = l.(*ir.AddStringExpr) - add.SetPos(n.Pos()) - } else { - add = ir.NewAddStringExpr(n.Pos(), []ir.Node{l}) - } - if r.Op() == ir.OADDSTR { - r := r.(*ir.AddStringExpr) - add.List.Append(r.List.Take()...) - } else { - add.List.Append(r) - } - add.SetType(t) - return add - } - - if (op == ir.ODIV || op == ir.OMOD) && ir.IsConst(r, constant.Int) { - if constant.Sign(r.Val()) == 0 { - base.Errorf("division by zero") - n.SetType(nil) - return n - } - } - - n.SetType(t) - return n + return tcArith(n) case ir.OBITNOT, ir.ONEG, ir.ONOT, ir.OPLUS: n := n.(*ir.UnaryExpr) - n.X = Expr(n.X) - l := n.X - t := l.Type() - if t == nil { - n.SetType(nil) - return n - } - if !okfor[n.Op()][defaultType(t).Kind()] { - base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(t)) - n.SetType(nil) - return n - } - - n.SetType(t) - return n + return tcUnaryArith(n) // exprs case ir.OADDR: n := n.(*ir.AddrExpr) - n.X = Expr(n.X) - if n.X.Type() == nil { - n.SetType(nil) - return n - } - - switch n.X.Op() { - case ir.OARRAYLIT, ir.OMAPLIT, ir.OSLICELIT, ir.OSTRUCTLIT: - n.SetOp(ir.OPTRLIT) - - default: - checklvalue(n.X, "take the address of") - r := ir.OuterValue(n.X) - if r.Op() == ir.ONAME { - r := r.(*ir.Name) - if ir.Orig(r) != r { - base.Fatalf("found non-orig name node %v", r) // TODO(mdempsky): What does this mean? - } - r.Name().SetAddrtaken(true) - if r.Name().IsClosureVar() && !CaptureVarsComplete { - // Mark the original variable as Addrtaken so that capturevars - // knows not to pass it by value. - // But if the capturevars phase is complete, don't touch it, - // in case l.Name's containing function has not yet been compiled. - r.Name().Defn.Name().SetAddrtaken(true) - } - } - n.X = DefaultLit(n.X, nil) - if n.X.Type() == nil { - n.SetType(nil) - return n - } - } - - n.SetType(types.NewPtr(n.X.Type())) - return n + return tcAddr(n) case ir.OCOMPLIT: - return typecheckcomplit(n.(*ir.CompLitExpr)) + return tcCompLit(n.(*ir.CompLitExpr)) case ir.OXDOT, ir.ODOT: n := n.(*ir.SelectorExpr) - if n.Op() == ir.OXDOT { - n = AddImplicitDots(n) - n.SetOp(ir.ODOT) - if n.X == nil { - n.SetType(nil) - return n - } - } - - n.X = check(n.X, ctxExpr|ctxType) - - n.X = DefaultLit(n.X, nil) - - t := n.X.Type() - if t == nil { - base.UpdateErrorDot(ir.Line(n), fmt.Sprint(n.X), fmt.Sprint(n)) - n.SetType(nil) - return n - } - - s := n.Sel - - if n.X.Op() == ir.OTYPE { - return typecheckMethodExpr(n) - } - - if t.IsPtr() && !t.Elem().IsInterface() { - t = t.Elem() - if t == nil { - n.SetType(nil) - return n - } - n.SetOp(ir.ODOTPTR) - types.CheckSize(t) - } - - if n.Sel.IsBlank() { - base.Errorf("cannot refer to blank field or method") - n.SetType(nil) - return n - } - - if lookdot(n, t, 0) == nil { - // Legitimate field or method lookup failed, try to explain the error - switch { - case t.IsEmptyInterface(): - base.Errorf("%v undefined (type %v is interface with no methods)", n, n.X.Type()) - - case t.IsPtr() && t.Elem().IsInterface(): - // Pointer to interface is almost always a mistake. - base.Errorf("%v undefined (type %v is pointer to interface, not interface)", n, n.X.Type()) - - case lookdot(n, t, 1) != nil: - // Field or method matches by name, but it is not exported. - base.Errorf("%v undefined (cannot refer to unexported field or method %v)", n, n.Sel) - - default: - if mt := lookdot(n, t, 2); mt != nil && visible(mt.Sym) { // Case-insensitive lookup. - base.Errorf("%v undefined (type %v has no field or method %v, but does have %v)", n, n.X.Type(), n.Sel, mt.Sym) - } else { - base.Errorf("%v undefined (type %v has no field or method %v)", n, n.X.Type(), n.Sel) - } - } - n.SetType(nil) - return n - } - - if (n.Op() == ir.ODOTINTER || n.Op() == ir.ODOTMETH) && top&ctxCallee == 0 { - return typecheckpartialcall(n, s) - } - return n + return tcDot(n, top) case ir.ODOTTYPE: n := n.(*ir.TypeAssertExpr) - n.X = Expr(n.X) - n.X = DefaultLit(n.X, nil) - l := n.X - t := l.Type() - if t == nil { - n.SetType(nil) - return n - } - if !t.IsInterface() { - base.Errorf("invalid type assertion: %v (non-interface type %v on left)", n, t) - n.SetType(nil) - return n - } - - if n.Ntype != nil { - n.Ntype = check(n.Ntype, ctxType) - n.SetType(n.Ntype.Type()) - n.Ntype = nil - if n.Type() == nil { - return n - } - } - - if n.Type() != nil && !n.Type().IsInterface() { - var missing, have *types.Field - var ptr int - if !implements(n.Type(), t, &missing, &have, &ptr) { - if have != nil && have.Sym == missing.Sym { - base.Errorf("impossible type assertion:\n\t%v does not implement %v (wrong type for %v method)\n"+ - "\t\thave %v%S\n\t\twant %v%S", n.Type(), t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) - } else if ptr != 0 { - base.Errorf("impossible type assertion:\n\t%v does not implement %v (%v method has pointer receiver)", n.Type(), t, missing.Sym) - } else if have != nil { - base.Errorf("impossible type assertion:\n\t%v does not implement %v (missing %v method)\n"+ - "\t\thave %v%S\n\t\twant %v%S", n.Type(), t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) - } else { - base.Errorf("impossible type assertion:\n\t%v does not implement %v (missing %v method)", n.Type(), t, missing.Sym) - } - n.SetType(nil) - return n - } - } - return n + return tcDotType(n) case ir.OINDEX: n := n.(*ir.IndexExpr) - n.X = Expr(n.X) - n.X = DefaultLit(n.X, nil) - n.X = implicitstar(n.X) - l := n.X - n.Index = Expr(n.Index) - r := n.Index - t := l.Type() - if t == nil || r.Type() == nil { - n.SetType(nil) - return n - } - switch t.Kind() { - default: - base.Errorf("invalid operation: %v (type %v does not support indexing)", n, t) - n.SetType(nil) - return n + return tcIndex(n) - case types.TSTRING, types.TARRAY, types.TSLICE: - n.Index = indexlit(n.Index) - if t.IsString() { - n.SetType(types.ByteType) - } else { - n.SetType(t.Elem()) - } - why := "string" - if t.IsArray() { - why = "array" - } else if t.IsSlice() { - why = "slice" - } + case ir.ORECV: + n := n.(*ir.UnaryExpr) + return tcRecv(n) - if n.Index.Type() != nil && !n.Index.Type().IsInteger() { - base.Errorf("non-integer %s index %v", why, n.Index) - return n - } - - if !n.Bounded() && ir.IsConst(n.Index, constant.Int) { - x := n.Index.Val() - if constant.Sign(x) < 0 { - base.Errorf("invalid %s index %v (index must be non-negative)", why, n.Index) - } else if t.IsArray() && constant.Compare(x, token.GEQ, constant.MakeInt64(t.NumElem())) { - base.Errorf("invalid array index %v (out of bounds for %d-element array)", n.Index, t.NumElem()) - } else if ir.IsConst(n.X, constant.String) && constant.Compare(x, token.GEQ, constant.MakeInt64(int64(len(ir.StringVal(n.X))))) { - base.Errorf("invalid string index %v (out of bounds for %d-byte string)", n.Index, len(ir.StringVal(n.X))) - } else if ir.ConstOverflow(x, types.Types[types.TINT]) { - base.Errorf("invalid %s index %v (index too large)", why, n.Index) - } - } - - case types.TMAP: - n.Index = AssignConv(n.Index, t.Key(), "map index") - n.SetType(t.Elem()) - n.SetOp(ir.OINDEXMAP) - n.Assigned = false - } - return n - - case ir.ORECV: - n := n.(*ir.UnaryExpr) - n.X = Expr(n.X) - n.X = DefaultLit(n.X, nil) - l := n.X - t := l.Type() - if t == nil { - n.SetType(nil) - return n - } - if !t.IsChan() { - base.Errorf("invalid operation: %v (receive from non-chan type %v)", n, t) - n.SetType(nil) - return n - } - - if !t.ChanDir().CanRecv() { - base.Errorf("invalid operation: %v (receive from send-only type %v)", n, t) - n.SetType(nil) - return n - } - - n.SetType(t.Elem()) - return n - - case ir.OSEND: - n := n.(*ir.SendStmt) - n.Chan = Expr(n.Chan) - n.Value = Expr(n.Value) - n.Chan = DefaultLit(n.Chan, nil) - t := n.Chan.Type() - if t == nil { - return n - } - if !t.IsChan() { - base.Errorf("invalid operation: %v (send to non-chan type %v)", n, t) - return n - } - - if !t.ChanDir().CanSend() { - base.Errorf("invalid operation: %v (send to receive-only type %v)", n, t) - return n - } - - n.Value = AssignConv(n.Value, t.Elem(), "send") - if n.Value.Type() == nil { - return n - } - return n + case ir.OSEND: + n := n.(*ir.SendStmt) + return tcSend(n) case ir.OSLICEHEADER: - // Errors here are Fatalf instead of Errorf because only the compiler - // can construct an OSLICEHEADER node. - // Components used in OSLICEHEADER that are supplied by parsed source code - // have already been typechecked in e.g. OMAKESLICE earlier. n := n.(*ir.SliceHeaderExpr) - t := n.Type() - if t == nil { - base.Fatalf("no type specified for OSLICEHEADER") - } - - if !t.IsSlice() { - base.Fatalf("invalid type %v for OSLICEHEADER", n.Type()) - } - - if n.Ptr == nil || n.Ptr.Type() == nil || !n.Ptr.Type().IsUnsafePtr() { - base.Fatalf("need unsafe.Pointer for OSLICEHEADER") - } - - if x := len(n.LenCap); x != 2 { - base.Fatalf("expected 2 params (len, cap) for OSLICEHEADER, got %d", x) - } - - n.Ptr = Expr(n.Ptr) - l := Expr(n.LenCap[0]) - c := Expr(n.LenCap[1]) - l = DefaultLit(l, types.Types[types.TINT]) - c = DefaultLit(c, types.Types[types.TINT]) - - if ir.IsConst(l, constant.Int) && ir.Int64Val(l) < 0 { - base.Fatalf("len for OSLICEHEADER must be non-negative") - } - - if ir.IsConst(c, constant.Int) && ir.Int64Val(c) < 0 { - base.Fatalf("cap for OSLICEHEADER must be non-negative") - } - - if ir.IsConst(l, constant.Int) && ir.IsConst(c, constant.Int) && constant.Compare(l.Val(), token.GTR, c.Val()) { - base.Fatalf("len larger than cap for OSLICEHEADER") - } - - n.LenCap[0] = l - n.LenCap[1] = c - return n + return tcSliceHeader(n) case ir.OMAKESLICECOPY: - // Errors here are Fatalf instead of Errorf because only the compiler - // can construct an OMAKESLICECOPY node. - // Components used in OMAKESCLICECOPY that are supplied by parsed source code - // have already been typechecked in OMAKE and OCOPY earlier. n := n.(*ir.MakeExpr) - t := n.Type() - - if t == nil { - base.Fatalf("no type specified for OMAKESLICECOPY") - } - - if !t.IsSlice() { - base.Fatalf("invalid type %v for OMAKESLICECOPY", n.Type()) - } - - if n.Len == nil { - base.Fatalf("missing len argument for OMAKESLICECOPY") - } - - if n.Cap == nil { - base.Fatalf("missing slice argument to copy for OMAKESLICECOPY") - } - - n.Len = Expr(n.Len) - n.Cap = Expr(n.Cap) - - n.Len = DefaultLit(n.Len, types.Types[types.TINT]) - - if !n.Len.Type().IsInteger() && n.Type().Kind() != types.TIDEAL { - base.Errorf("non-integer len argument in OMAKESLICECOPY") - } - - if ir.IsConst(n.Len, constant.Int) { - if ir.ConstOverflow(n.Len.Val(), types.Types[types.TINT]) { - base.Fatalf("len for OMAKESLICECOPY too large") - } - if constant.Sign(n.Len.Val()) < 0 { - base.Fatalf("len for OMAKESLICECOPY must be non-negative") - } - } - return n + return tcMakeSliceCopy(n) case ir.OSLICE, ir.OSLICE3: n := n.(*ir.SliceExpr) - n.X = Expr(n.X) - low, high, max := n.SliceBounds() - hasmax := n.Op().IsSlice3() - low = Expr(low) - high = Expr(high) - max = Expr(max) - n.X = DefaultLit(n.X, nil) - low = indexlit(low) - high = indexlit(high) - max = indexlit(max) - n.SetSliceBounds(low, high, max) - l := n.X - if l.Type() == nil { - n.SetType(nil) - return n - } - if l.Type().IsArray() { - if !ir.IsAssignable(n.X) { - base.Errorf("invalid operation %v (slice of unaddressable value)", n) - n.SetType(nil) - return n - } - - addr := NodAddr(n.X) - addr.SetImplicit(true) - n.X = Expr(addr) - l = n.X - } - t := l.Type() - var tp *types.Type - if t.IsString() { - if hasmax { - base.Errorf("invalid operation %v (3-index slice of string)", n) - n.SetType(nil) - return n - } - n.SetType(t) - n.SetOp(ir.OSLICESTR) - } else if t.IsPtr() && t.Elem().IsArray() { - tp = t.Elem() - n.SetType(types.NewSlice(tp.Elem())) - types.CalcSize(n.Type()) - if hasmax { - n.SetOp(ir.OSLICE3ARR) - } else { - n.SetOp(ir.OSLICEARR) - } - } else if t.IsSlice() { - n.SetType(t) - } else { - base.Errorf("cannot slice %v (type %v)", l, t) - n.SetType(nil) - return n - } - - if low != nil && !checksliceindex(l, low, tp) { - n.SetType(nil) - return n - } - if high != nil && !checksliceindex(l, high, tp) { - n.SetType(nil) - return n - } - if max != nil && !checksliceindex(l, max, tp) { - n.SetType(nil) - return n - } - if !checksliceconst(low, high) || !checksliceconst(low, max) || !checksliceconst(high, max) { - n.SetType(nil) - return n - } - return n + return tcSlice(n) // call and call like case ir.OCALL: n := n.(*ir.CallExpr) - n.Use = ir.CallUseExpr - if top == ctxStmt { - n.Use = ir.CallUseStmt - } - Stmts(n.Init()) // imported rewritten f(g()) calls (#30907) - n.X = check(n.X, ctxExpr|ctxType|ctxCallee) - if n.X.Diag() { - n.SetDiag(true) - } - - l := n.X - - if l.Op() == ir.ONAME && l.(*ir.Name).BuiltinOp != 0 { - l := l.(*ir.Name) - if n.IsDDD && l.BuiltinOp != ir.OAPPEND { - base.Errorf("invalid use of ... with builtin %v", l) - } - - // builtin: OLEN, OCAP, etc. - switch l.BuiltinOp { - default: - base.Fatalf("unknown builtin %v", l) - - case ir.OAPPEND, ir.ODELETE, ir.OMAKE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER: - n.SetOp(l.BuiltinOp) - n.X = nil - n.SetTypecheck(0) // re-typechecking new op is OK, not a loop - return check(n, top) - - case ir.OCAP, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.OPANIC, ir.OREAL: - typecheckargs(n) - fallthrough - case ir.ONEW, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF: - arg, ok := needOneArg(n, "%v", n.Op()) - if !ok { - n.SetType(nil) - return n - } - u := ir.NewUnaryExpr(n.Pos(), l.BuiltinOp, arg) - return check(ir.InitExpr(n.Init(), u), top) // typecheckargs can add to old.Init - - case ir.OCOMPLEX, ir.OCOPY: - typecheckargs(n) - arg1, arg2, ok := needTwoArgs(n) - if !ok { - n.SetType(nil) - return n - } - b := ir.NewBinaryExpr(n.Pos(), l.BuiltinOp, arg1, arg2) - return check(ir.InitExpr(n.Init(), b), top) // typecheckargs can add to old.Init - } - panic("unreachable") - } - - n.X = DefaultLit(n.X, nil) - l = n.X - if l.Op() == ir.OTYPE { - if n.IsDDD { - if !l.Type().Broke() { - base.Errorf("invalid use of ... in type conversion to %v", l.Type()) - } - n.SetDiag(true) - } - - // pick off before type-checking arguments - arg, ok := needOneArg(n, "conversion to %v", l.Type()) - if !ok { - n.SetType(nil) - return n - } + return tcCall(n, top) - n := ir.NewConvExpr(n.Pos(), ir.OCONV, nil, arg) - n.SetType(l.Type()) - return typecheck1(n, top) - } - - typecheckargs(n) - t := l.Type() - if t == nil { - n.SetType(nil) - return n - } - types.CheckSize(t) - - switch l.Op() { - case ir.ODOTINTER: - n.SetOp(ir.OCALLINTER) - - case ir.ODOTMETH: - l := l.(*ir.SelectorExpr) - n.SetOp(ir.OCALLMETH) - - // typecheckaste was used here but there wasn't enough - // information further down the call chain to know if we - // were testing a method receiver for unexported fields. - // It isn't necessary, so just do a sanity check. - tp := t.Recv().Type - - if l.X == nil || !types.Identical(l.X.Type(), tp) { - base.Fatalf("method receiver") - } - - default: - n.SetOp(ir.OCALLFUNC) - if t.Kind() != types.TFUNC { - // TODO(mdempsky): Remove "o.Sym() != nil" once we stop - // using ir.Name for numeric literals. - if o := ir.Orig(l); o.Name() != nil && o.Sym() != nil && types.BuiltinPkg.Lookup(o.Sym().Name).Def != nil { - // be more specific when the non-function - // name matches a predeclared function - base.Errorf("cannot call non-function %L, declared at %s", - l, base.FmtPos(o.Name().Pos())) - } else { - base.Errorf("cannot call non-function %L", l) - } - n.SetType(nil) - return n - } - } - - typecheckaste(ir.OCALL, n.X, n.IsDDD, t.Params(), n.Args, func() string { return fmt.Sprintf("argument to %v", n.X) }) - if t.NumResults() == 0 { - return n - } - if t.NumResults() == 1 { - n.SetType(l.Type().Results().Field(0).Type) - - if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME { - if sym := n.X.(*ir.Name).Sym(); types.IsRuntimePkg(sym.Pkg) && sym.Name == "getg" { - // Emit code for runtime.getg() directly instead of calling function. - // Most such rewrites (for example the similar one for math.Sqrt) should be done in walk, - // so that the ordering pass can make sure to preserve the semantics of the original code - // (in particular, the exact time of the function call) by introducing temporaries. - // In this case, we know getg() always returns the same result within a given function - // and we want to avoid the temporaries, so we do the rewrite earlier than is typical. - n.SetOp(ir.OGETG) - } - } - return n - } - - // multiple return - if top&(ctxMultiOK|ctxStmt) == 0 { - base.Errorf("multiple-value %v() in single-value context", l) - return n - } - - n.SetType(l.Type().Results()) - return n - - case ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF: - n := n.(*ir.UnaryExpr) - n.SetType(types.Types[types.TUINTPTR]) - return n - - case ir.OCAP, ir.OLEN: - n := n.(*ir.UnaryExpr) - n.X = Expr(n.X) - n.X = DefaultLit(n.X, nil) - n.X = implicitstar(n.X) - l := n.X - t := l.Type() - if t == nil { - n.SetType(nil) - return n - } - - var ok bool - if n.Op() == ir.OLEN { - ok = okforlen[t.Kind()] - } else { - ok = okforcap[t.Kind()] - } - if !ok { - base.Errorf("invalid argument %L for %v", l, n.Op()) - n.SetType(nil) - return n - } - - n.SetType(types.Types[types.TINT]) - return n - - case ir.OREAL, ir.OIMAG: - n := n.(*ir.UnaryExpr) - n.X = Expr(n.X) - l := n.X - t := l.Type() - if t == nil { - n.SetType(nil) - return n - } - - // Determine result type. - switch t.Kind() { - case types.TIDEAL: - n.SetType(types.UntypedFloat) - case types.TCOMPLEX64: - n.SetType(types.Types[types.TFLOAT32]) - case types.TCOMPLEX128: - n.SetType(types.Types[types.TFLOAT64]) - default: - base.Errorf("invalid argument %L for %v", l, n.Op()) - n.SetType(nil) - return n - } - return n - - case ir.OCOMPLEX: - n := n.(*ir.BinaryExpr) - l := Expr(n.X) - r := Expr(n.Y) - if l.Type() == nil || r.Type() == nil { - n.SetType(nil) - return n - } - l, r = defaultlit2(l, r, false) - if l.Type() == nil || r.Type() == nil { - n.SetType(nil) - return n - } - n.X = l - n.Y = r - - if !types.Identical(l.Type(), r.Type()) { - base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type()) - n.SetType(nil) - return n - } - - var t *types.Type - switch l.Type().Kind() { - default: - base.Errorf("invalid operation: %v (arguments have type %v, expected floating-point)", n, l.Type()) - n.SetType(nil) - return n - - case types.TIDEAL: - t = types.UntypedComplex - - case types.TFLOAT32: - t = types.Types[types.TCOMPLEX64] - - case types.TFLOAT64: - t = types.Types[types.TCOMPLEX128] - } - n.SetType(t) - return n - - case ir.OCLOSE: - n := n.(*ir.UnaryExpr) - n.X = Expr(n.X) - n.X = DefaultLit(n.X, nil) - l := n.X - t := l.Type() - if t == nil { - n.SetType(nil) - return n - } - if !t.IsChan() { - base.Errorf("invalid operation: %v (non-chan type %v)", n, t) - n.SetType(nil) - return n - } - - if !t.ChanDir().CanSend() { - base.Errorf("invalid operation: %v (cannot close receive-only channel)", n) - n.SetType(nil) - return n - } - return n - - case ir.ODELETE: - n := n.(*ir.CallExpr) - typecheckargs(n) - args := n.Args - if len(args) == 0 { - base.Errorf("missing arguments to delete") - n.SetType(nil) - return n - } - - if len(args) == 1 { - base.Errorf("missing second (key) argument to delete") - n.SetType(nil) - return n - } - - if len(args) != 2 { - base.Errorf("too many arguments to delete") - n.SetType(nil) - return n - } - - l := args[0] - r := args[1] - if l.Type() != nil && !l.Type().IsMap() { - base.Errorf("first argument to delete must be map; have %L", l.Type()) - n.SetType(nil) - return n - } - - args[1] = AssignConv(r, l.Type().Key(), "delete") - return n - - case ir.OAPPEND: - n := n.(*ir.CallExpr) - typecheckargs(n) - args := n.Args - if len(args) == 0 { - base.Errorf("missing arguments to append") - n.SetType(nil) - return n - } - - t := args[0].Type() - if t == nil { - n.SetType(nil) - return n - } - - n.SetType(t) - if !t.IsSlice() { - if ir.IsNil(args[0]) { - base.Errorf("first argument to append must be typed slice; have untyped nil") - n.SetType(nil) - return n - } - - base.Errorf("first argument to append must be slice; have %L", t) - n.SetType(nil) - return n - } - - if n.IsDDD { - if len(args) == 1 { - base.Errorf("cannot use ... on first argument to append") - n.SetType(nil) - return n - } - - if len(args) != 2 { - base.Errorf("too many arguments to append") - n.SetType(nil) - return n - } - - if t.Elem().IsKind(types.TUINT8) && args[1].Type().IsString() { - args[1] = DefaultLit(args[1], types.Types[types.TSTRING]) - return n - } - - args[1] = AssignConv(args[1], t.Underlying(), "append") - return n - } - - as := args[1:] - for i, n := range as { - if n.Type() == nil { - continue - } - as[i] = AssignConv(n, t.Elem(), "append") - types.CheckSize(as[i].Type()) // ensure width is calculated for backend - } - return n - - case ir.OCOPY: - n := n.(*ir.BinaryExpr) - n.SetType(types.Types[types.TINT]) - n.X = Expr(n.X) - n.X = DefaultLit(n.X, nil) - n.Y = Expr(n.Y) - n.Y = DefaultLit(n.Y, nil) - if n.X.Type() == nil || n.Y.Type() == nil { - n.SetType(nil) - return n - } - - // copy([]byte, string) - if n.X.Type().IsSlice() && n.Y.Type().IsString() { - if types.Identical(n.X.Type().Elem(), types.ByteType) { - return n - } - base.Errorf("arguments to copy have different element types: %L and string", n.X.Type()) - n.SetType(nil) - return n - } - - if !n.X.Type().IsSlice() || !n.Y.Type().IsSlice() { - if !n.X.Type().IsSlice() && !n.Y.Type().IsSlice() { - base.Errorf("arguments to copy must be slices; have %L, %L", n.X.Type(), n.Y.Type()) - } else if !n.X.Type().IsSlice() { - base.Errorf("first argument to copy should be slice; have %L", n.X.Type()) - } else { - base.Errorf("second argument to copy should be slice or string; have %L", n.Y.Type()) - } - n.SetType(nil) - return n - } - - if !types.Identical(n.X.Type().Elem(), n.Y.Type().Elem()) { - base.Errorf("arguments to copy have different element types: %L and %L", n.X.Type(), n.Y.Type()) - n.SetType(nil) - return n - } - return n - - case ir.OCONV: - n := n.(*ir.ConvExpr) - types.CheckSize(n.Type()) // ensure width is calculated for backend - n.X = Expr(n.X) - n.X = convlit1(n.X, n.Type(), true, nil) - t := n.X.Type() - if t == nil || n.Type() == nil { - n.SetType(nil) - return n - } - op, why := convertop(n.X.Op() == ir.OLITERAL, t, n.Type()) - if op == ir.OXXX { - if !n.Diag() && !n.Type().Broke() && !n.X.Diag() { - base.Errorf("cannot convert %L to type %v%s", n.X, n.Type(), why) - n.SetDiag(true) - } - n.SetOp(ir.OCONV) - n.SetType(nil) - return n - } - - n.SetOp(op) - switch n.Op() { - case ir.OCONVNOP: - if t.Kind() == n.Type().Kind() { - switch t.Kind() { - case types.TFLOAT32, types.TFLOAT64, types.TCOMPLEX64, types.TCOMPLEX128: - // Floating point casts imply rounding and - // so the conversion must be kept. - n.SetOp(ir.OCONV) - } - } - - // do not convert to []byte literal. See CL 125796. - // generated code and compiler memory footprint is better without it. - case ir.OSTR2BYTES: - // ok - - case ir.OSTR2RUNES: - if n.X.Op() == ir.OLITERAL { - return stringtoruneslit(n) - } - } + case ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF: + n := n.(*ir.UnaryExpr) + n.SetType(types.Types[types.TUINTPTR]) return n - case ir.OMAKE: - n := n.(*ir.CallExpr) - args := n.Args - if len(args) == 0 { - base.Errorf("missing argument to make") - n.SetType(nil) - return n - } + case ir.OCAP, ir.OLEN: + n := n.(*ir.UnaryExpr) + return tcLenCap(n) - n.Args.Set(nil) - l := args[0] - l = check(l, ctxType) - t := l.Type() - if t == nil { - n.SetType(nil) - return n - } + case ir.OREAL, ir.OIMAG: + n := n.(*ir.UnaryExpr) + return tcRealImag(n) - i := 1 - var nn ir.Node - switch t.Kind() { - default: - base.Errorf("cannot make type %v", t) - n.SetType(nil) - return n + case ir.OCOMPLEX: + n := n.(*ir.BinaryExpr) + return tcComplex(n) - case types.TSLICE: - if i >= len(args) { - base.Errorf("missing len argument to make(%v)", t) - n.SetType(nil) - return n - } + case ir.OCLOSE: + n := n.(*ir.UnaryExpr) + return tcClose(n) - l = args[i] - i++ - l = Expr(l) - var r ir.Node - if i < len(args) { - r = args[i] - i++ - r = Expr(r) - } + case ir.ODELETE: + n := n.(*ir.CallExpr) + return tcDelete(n) - if l.Type() == nil || (r != nil && r.Type() == nil) { - n.SetType(nil) - return n - } - if !checkmake(t, "len", &l) || r != nil && !checkmake(t, "cap", &r) { - n.SetType(nil) - return n - } - if ir.IsConst(l, constant.Int) && r != nil && ir.IsConst(r, constant.Int) && constant.Compare(l.Val(), token.GTR, r.Val()) { - base.Errorf("len larger than cap in make(%v)", t) - n.SetType(nil) - return n - } - nn = ir.NewMakeExpr(n.Pos(), ir.OMAKESLICE, l, r) - - case types.TMAP: - if i < len(args) { - l = args[i] - i++ - l = Expr(l) - l = DefaultLit(l, types.Types[types.TINT]) - if l.Type() == nil { - n.SetType(nil) - return n - } - if !checkmake(t, "size", &l) { - n.SetType(nil) - return n - } - } else { - l = ir.NewInt(0) - } - nn = ir.NewMakeExpr(n.Pos(), ir.OMAKEMAP, l, nil) - nn.SetEsc(n.Esc()) - - case types.TCHAN: - l = nil - if i < len(args) { - l = args[i] - i++ - l = Expr(l) - l = DefaultLit(l, types.Types[types.TINT]) - if l.Type() == nil { - n.SetType(nil) - return n - } - if !checkmake(t, "buffer", &l) { - n.SetType(nil) - return n - } - } else { - l = ir.NewInt(0) - } - nn = ir.NewMakeExpr(n.Pos(), ir.OMAKECHAN, l, nil) - } + case ir.OAPPEND: + n := n.(*ir.CallExpr) + return tcAppend(n) - if i < len(args) { - base.Errorf("too many arguments to make(%v)", t) - n.SetType(nil) - return n - } + case ir.OCOPY: + n := n.(*ir.BinaryExpr) + return tcCopy(n) - nn.SetType(t) - return nn + case ir.OCONV: + n := n.(*ir.ConvExpr) + return tcConv(n) + + case ir.OMAKE: + n := n.(*ir.CallExpr) + return tcMake(n) case ir.ONEW: n := n.(*ir.UnaryExpr) - if n.X == nil { - // Fatalf because the OCALL above checked for us, - // so this must be an internally-generated mistake. - base.Fatalf("missing argument to new") - } - l := n.X - l = check(l, ctxType) - t := l.Type() - if t == nil { - n.SetType(nil) - return n - } - n.X = l - n.SetType(types.NewPtr(t)) - return n + return tcNew(n) case ir.OPRINT, ir.OPRINTN: n := n.(*ir.CallExpr) - typecheckargs(n) - ls := n.Args - for i1, n1 := range ls { - // Special case for print: int constant is int64, not int. - if ir.IsConst(n1, constant.Int) { - ls[i1] = DefaultLit(ls[i1], types.Types[types.TINT64]) - } else { - ls[i1] = DefaultLit(ls[i1], nil) - } - } - return n + return tcPrint(n) case ir.OPANIC: n := n.(*ir.UnaryExpr) - n.X = Expr(n.X) - n.X = DefaultLit(n.X, types.Types[types.TINTER]) - if n.X.Type() == nil { - n.SetType(nil) - return n - } - return n + return tcPanic(n) case ir.ORECOVER: n := n.(*ir.CallExpr) - if len(n.Args) != 0 { - base.Errorf("too many arguments to recover") - n.SetType(nil) - return n - } - - n.SetType(types.Types[types.TINTER]) - return n + return tcRecover(n) case ir.OCLOSURE: n := n.(*ir.ClosureExpr) - typecheckclosure(n, top) + tcClosure(n, top) if n.Type() == nil { return n } @@ -2129,17 +817,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OITAB: n := n.(*ir.UnaryExpr) - n.X = Expr(n.X) - t := n.X.Type() - if t == nil { - n.SetType(nil) - return n - } - if !t.IsInterface() { - base.Fatalf("OITAB of %v", t) - } - n.SetType(types.NewPtr(types.Types[types.TUINTPTR])) - return n + return tcITab(n) case ir.OIDATA: // Whoever creates the OIDATA node must know a priori the concrete type at that moment, @@ -2150,21 +828,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.OSPTR: n := n.(*ir.UnaryExpr) - n.X = Expr(n.X) - t := n.X.Type() - if t == nil { - n.SetType(nil) - return n - } - if !t.IsSlice() && !t.IsString() { - base.Fatalf("OSPTR of %v", t) - } - if t.IsString() { - n.SetType(types.NewPtr(types.Types[types.TUINT8])) - } else { - n.SetType(types.NewPtr(t.Elem())) - } - return n + return tcSPtr(n) case ir.OCLOSUREREAD: return n @@ -2183,7 +847,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { // statements case ir.OAS: n := n.(*ir.AssignStmt) - typecheckas(n) + tcAssign(n) // Code that creates temps does not bother to set defn, so do it here. if n.X.Op() == ir.ONAME && ir.IsAutoTmp(n.X) { @@ -2192,7 +856,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.OAS2: - typecheckas2(n.(*ir.AssignListStmt)) + tcAssignList(n.(*ir.AssignListStmt)) return n case ir.OBREAK, @@ -2221,76 +885,38 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.ODEFER, ir.OGO: n := n.(*ir.GoDeferStmt) - n.Call = check(n.Call, ctxStmt|ctxExpr) + n.Call = typecheck(n.Call, ctxStmt|ctxExpr) if !n.Call.Diag() { - checkdefergo(n) + tcGoDefer(n) } return n case ir.OFOR, ir.OFORUNTIL: n := n.(*ir.ForStmt) - Stmts(n.Init()) - decldepth++ - n.Cond = Expr(n.Cond) - n.Cond = DefaultLit(n.Cond, nil) - if n.Cond != nil { - t := n.Cond.Type() - if t != nil && !t.IsBoolean() { - base.Errorf("non-bool %L used as for condition", n.Cond) - } - } - n.Post = Stmt(n.Post) - if n.Op() == ir.OFORUNTIL { - Stmts(n.Late) - } - Stmts(n.Body) - decldepth-- - return n + return tcFor(n) case ir.OIF: n := n.(*ir.IfStmt) - Stmts(n.Init()) - n.Cond = Expr(n.Cond) - n.Cond = DefaultLit(n.Cond, nil) - if n.Cond != nil { - t := n.Cond.Type() - if t != nil && !t.IsBoolean() { - base.Errorf("non-bool %L used as if condition", n.Cond) - } - } - Stmts(n.Body) - Stmts(n.Else) - return n + return tcIf(n) case ir.ORETURN: n := n.(*ir.ReturnStmt) - typecheckargs(n) - if ir.CurFunc == nil { - base.Errorf("return outside function") - n.SetType(nil) - return n - } - - if ir.HasNamedResults(ir.CurFunc) && len(n.Results) == 0 { - return n - } - typecheckaste(ir.ORETURN, nil, false, ir.CurFunc.Type().Results(), n.Results, func() string { return "return argument" }) - return n + return tcReturn(n) case ir.ORETJMP: n := n.(*ir.BranchStmt) return n case ir.OSELECT: - typecheckselect(n.(*ir.SelectStmt)) + tcSelect(n.(*ir.SelectStmt)) return n case ir.OSWITCH: - typecheckswitch(n.(*ir.SwitchStmt)) + tcSwitch(n.(*ir.SwitchStmt)) return n case ir.ORANGE: - typecheckrange(n.(*ir.RangeStmt)) + tcRange(n.(*ir.RangeStmt)) return n case ir.OTYPESW: @@ -2300,7 +926,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { return n case ir.ODCLFUNC: - typecheckfunc(n.(*ir.Func)) + tcFunc(n.(*ir.Func)) return n case ir.ODCLCONST: @@ -2310,7 +936,7 @@ func typecheck1(n ir.Node, top int) (res ir.Node) { case ir.ODCLTYPE: n := n.(*ir.Decl) - n.X = check(n.X, ctxType) + n.X = typecheck(n.X, ctxType) types.CheckSize(n.X.Type()) return n } @@ -2424,59 +1050,6 @@ func checksliceconst(lo ir.Node, hi ir.Node) bool { return true } -func checkdefergo(n *ir.GoDeferStmt) { - what := "defer" - if n.Op() == ir.OGO { - what = "go" - } - - switch n.Call.Op() { - // ok - case ir.OCALLINTER, - ir.OCALLMETH, - ir.OCALLFUNC, - ir.OCLOSE, - ir.OCOPY, - ir.ODELETE, - ir.OPANIC, - ir.OPRINT, - ir.OPRINTN, - ir.ORECOVER: - return - - case ir.OAPPEND, - ir.OCAP, - ir.OCOMPLEX, - ir.OIMAG, - ir.OLEN, - ir.OMAKE, - ir.OMAKESLICE, - ir.OMAKECHAN, - ir.OMAKEMAP, - ir.ONEW, - ir.OREAL, - ir.OLITERAL: // conversion or unsafe.Alignof, Offsetof, Sizeof - if orig := ir.Orig(n.Call); orig.Op() == ir.OCONV { - break - } - base.ErrorfAt(n.Pos(), "%s discards result of %v", what, n.Call) - return - } - - // type is broken or missing, most likely a method call on a broken type - // we will warn about the broken type elsewhere. no need to emit a potentially confusing error - if n.Call.Type() == nil || n.Call.Type().Broke() { - return - } - - if !n.Diag() { - // The syntax made sure it was a call, so this must be - // a conversion. - n.SetDiag(true) - base.ErrorfAt(n.Pos(), "%s requires function call, not conversion", what) - } -} - // The result of implicitstar MUST be assigned back to n, e.g. // n.Left = implicitstar(n.Left) func implicitstar(n ir.Node) ir.Node { @@ -2687,11 +1260,11 @@ func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field { checklvalue(n.X, "call pointer method on") addr := NodAddr(n.X) addr.SetImplicit(true) - n.X = check(addr, ctxType|ctxExpr) + n.X = typecheck(addr, ctxType|ctxExpr) } else if tt.IsPtr() && (!rcvr.IsPtr() || rcvr.IsPtr() && rcvr.Elem().NotInHeap()) && types.Identical(tt.Elem(), rcvr) { star := ir.NewStarExpr(base.Pos, n.X) star.SetImplicit(true) - n.X = check(star, ctxType|ctxExpr) + n.X = typecheck(star, ctxType|ctxExpr) } else if tt.IsPtr() && tt.Elem().IsPtr() && types.Identical(derefall(tt), derefall(rcvr)) { base.Errorf("calling method %v with receiver %L requires explicit dereference", n.Sel, n.X) for tt.IsPtr() { @@ -2701,7 +1274,7 @@ func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field { } star := ir.NewStarExpr(base.Pos, n.X) star.SetImplicit(true) - n.X = check(star, ctxType|ctxExpr) + n.X = typecheck(star, ctxType|ctxExpr) tt = tt.Elem() } } else { @@ -2985,214 +1558,6 @@ func pushtype(nn ir.Node, t *types.Type) ir.Node { return n } -// The result of typecheckcomplit MUST be assigned back to n, e.g. -// n.Left = typecheckcomplit(n.Left) -func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) { - if base.EnableTrace && base.Flag.LowerT { - defer tracePrint("typecheckcomplit", n)(&res) - } - - lno := base.Pos - defer func() { - base.Pos = lno - }() - - if n.Ntype == nil { - base.ErrorfAt(n.Pos(), "missing type in composite literal") - n.SetType(nil) - return n - } - - // Save original node (including n.Right) - n.SetOrig(ir.Copy(n)) - - ir.SetPos(n.Ntype) - - // Need to handle [...]T arrays specially. - if array, ok := n.Ntype.(*ir.ArrayType); ok && array.Elem != nil && array.Len == nil { - array.Elem = check(array.Elem, ctxType) - elemType := array.Elem.Type() - if elemType == nil { - n.SetType(nil) - return n - } - length := typecheckarraylit(elemType, -1, n.List, "array literal") - n.SetOp(ir.OARRAYLIT) - n.SetType(types.NewArray(elemType, length)) - n.Ntype = nil - return n - } - - n.Ntype = ir.Node(check(n.Ntype, ctxType)).(ir.Ntype) - t := n.Ntype.Type() - if t == nil { - n.SetType(nil) - return n - } - n.SetType(t) - - switch t.Kind() { - default: - base.Errorf("invalid composite literal type %v", t) - n.SetType(nil) - - case types.TARRAY: - typecheckarraylit(t.Elem(), t.NumElem(), n.List, "array literal") - n.SetOp(ir.OARRAYLIT) - n.Ntype = nil - - case types.TSLICE: - length := typecheckarraylit(t.Elem(), -1, n.List, "slice literal") - n.SetOp(ir.OSLICELIT) - n.Ntype = nil - n.Len = length - - case types.TMAP: - var cs constSet - for i3, l := range n.List { - ir.SetPos(l) - if l.Op() != ir.OKEY { - n.List[i3] = Expr(l) - base.Errorf("missing key in map literal") - continue - } - l := l.(*ir.KeyExpr) - - r := l.Key - r = pushtype(r, t.Key()) - r = Expr(r) - l.Key = AssignConv(r, t.Key(), "map key") - cs.add(base.Pos, l.Key, "key", "map literal") - - r = l.Value - r = pushtype(r, t.Elem()) - r = Expr(r) - l.Value = AssignConv(r, t.Elem(), "map value") - } - - n.SetOp(ir.OMAPLIT) - n.Ntype = nil - - case types.TSTRUCT: - // Need valid field offsets for Xoffset below. - types.CalcSize(t) - - errored := false - if len(n.List) != 0 && nokeys(n.List) { - // simple list of variables - ls := n.List - for i, n1 := range ls { - ir.SetPos(n1) - n1 = Expr(n1) - ls[i] = n1 - if i >= t.NumFields() { - if !errored { - base.Errorf("too many values in %v", n) - errored = true - } - continue - } - - f := t.Field(i) - s := f.Sym - if s != nil && !types.IsExported(s.Name) && s.Pkg != types.LocalPkg { - base.Errorf("implicit assignment of unexported field '%s' in %v literal", s.Name, t) - } - // No pushtype allowed here. Must name fields for that. - n1 = AssignConv(n1, f.Type, "field value") - sk := ir.NewStructKeyExpr(base.Pos, f.Sym, n1) - sk.Offset = f.Offset - ls[i] = sk - } - if len(ls) < t.NumFields() { - base.Errorf("too few values in %v", n) - } - } else { - hash := make(map[string]bool) - - // keyed list - ls := n.List - for i, l := range ls { - ir.SetPos(l) - - if l.Op() == ir.OKEY { - kv := l.(*ir.KeyExpr) - key := kv.Key - - // Sym might have resolved to name in other top-level - // package, because of import dot. Redirect to correct sym - // before we do the lookup. - s := key.Sym() - if id, ok := key.(*ir.Ident); ok && DotImportRefs[id] != nil { - s = Lookup(s.Name) - } - - // An OXDOT uses the Sym field to hold - // the field to the right of the dot, - // so s will be non-nil, but an OXDOT - // is never a valid struct literal key. - if s == nil || s.Pkg != types.LocalPkg || key.Op() == ir.OXDOT || s.IsBlank() { - base.Errorf("invalid field name %v in struct initializer", key) - continue - } - - l = ir.NewStructKeyExpr(l.Pos(), s, kv.Value) - ls[i] = l - } - - if l.Op() != ir.OSTRUCTKEY { - if !errored { - base.Errorf("mixture of field:value and value initializers") - errored = true - } - ls[i] = Expr(ls[i]) - continue - } - l := l.(*ir.StructKeyExpr) - - f := lookdot1(nil, l.Field, t, t.Fields(), 0) - if f == nil { - if ci := lookdot1(nil, l.Field, t, t.Fields(), 2); ci != nil { // Case-insensitive lookup. - if visible(ci.Sym) { - base.Errorf("unknown field '%v' in struct literal of type %v (but does have %v)", l.Field, t, ci.Sym) - } else if nonexported(l.Field) && l.Field.Name == ci.Sym.Name { // Ensure exactness before the suggestion. - base.Errorf("cannot refer to unexported field '%v' in struct literal of type %v", l.Field, t) - } else { - base.Errorf("unknown field '%v' in struct literal of type %v", l.Field, t) - } - continue - } - var f *types.Field - p, _ := dotpath(l.Field, t, &f, true) - if p == nil || f.IsMethod() { - base.Errorf("unknown field '%v' in struct literal of type %v", l.Field, t) - continue - } - // dotpath returns the parent embedded types in reverse order. - var ep []string - for ei := len(p) - 1; ei >= 0; ei-- { - ep = append(ep, p[ei].field.Sym.Name) - } - ep = append(ep, l.Field.Name) - base.Errorf("cannot use promoted field %v in struct literal of type %v", strings.Join(ep, "."), t) - continue - } - fielddup(f.Sym.Name, hash) - l.Offset = f.Offset - - // No pushtype allowed here. Tried and rejected. - l.Value = Expr(l.Value) - l.Value = AssignConv(l.Value, f.Type, "field value") - } - } - - n.SetOp(ir.OSTRUCTLIT) - n.Ntype = nil - } - - return n -} - // typecheckarraylit type-checks a sequence of slice/array literal elements. func typecheckarraylit(elemType *types.Type, bound int64, elts []ir.Node, ctx string) int64 { // If there are key/value pairs, create a map to keep seen @@ -3324,60 +1689,6 @@ func checkassignlist(stmt ir.Node, l ir.Nodes) { } } -// type check assignment. -// if this assignment is the definition of a var on the left side, -// fill in the var's type. -func typecheckas(n *ir.AssignStmt) { - if base.EnableTrace && base.Flag.LowerT { - defer tracePrint("typecheckas", n)(nil) - } - - // delicate little dance. - // the definition of n may refer to this assignment - // as its definition, in which case it will call typecheckas. - // in that case, do not call typecheck back, or it will cycle. - // if the variable has a type (ntype) then typechecking - // will not look at defn, so it is okay (and desirable, - // so that the conversion below happens). - n.X = Resolve(n.X) - - if !ir.DeclaredBy(n.X, n) || n.X.Name().Ntype != nil { - n.X = AssignExpr(n.X) - } - - // Use ctxMultiOK so we can emit an "N variables but M values" error - // to be consistent with typecheckas2 (#26616). - n.Y = check(n.Y, ctxExpr|ctxMultiOK) - checkassign(n, n.X) - if n.Y != nil && n.Y.Type() != nil { - if n.Y.Type().IsFuncArgStruct() { - base.Errorf("assignment mismatch: 1 variable but %v returns %d values", n.Y.(*ir.CallExpr).X, n.Y.Type().NumFields()) - // Multi-value RHS isn't actually valid for OAS; nil out - // to indicate failed typechecking. - n.Y.SetType(nil) - } else if n.X.Type() != nil { - n.Y = AssignConv(n.Y, n.X.Type(), "assignment") - } - } - - if ir.DeclaredBy(n.X, n) && n.X.Name().Ntype == nil { - n.Y = DefaultLit(n.Y, nil) - n.X.SetType(n.Y.Type()) - } - - // second half of dance. - // now that right is done, typecheck the left - // just to get it over with. see dance above. - n.SetTypecheck(1) - - if n.X.Typecheck() == 0 { - n.X = AssignExpr(n.X) - } - if !ir.IsBlank(n.X) { - types.CheckSize(n.X.Type()) // ensure width is calculated for backend - } -} - func checkassignto(src *types.Type, dst ir.Node) { if op, why := assignop(src, dst.Type()); op == ir.OXXX { base.Errorf("cannot assign %v to %L in multiple assignment%s", src, dst, why) @@ -3385,173 +1696,6 @@ func checkassignto(src *types.Type, dst ir.Node) { } } -func typecheckas2(n *ir.AssignListStmt) { - if base.EnableTrace && base.Flag.LowerT { - defer tracePrint("typecheckas2", n)(nil) - } - - ls := n.Lhs - for i1, n1 := range ls { - // delicate little dance. - n1 = Resolve(n1) - ls[i1] = n1 - - if !ir.DeclaredBy(n1, n) || n1.Name().Ntype != nil { - ls[i1] = AssignExpr(ls[i1]) - } - } - - cl := len(n.Lhs) - cr := len(n.Rhs) - if cl > 1 && cr == 1 { - n.Rhs[0] = check(n.Rhs[0], ctxExpr|ctxMultiOK) - } else { - Exprs(n.Rhs) - } - checkassignlist(n, n.Lhs) - - var l ir.Node - var r ir.Node - if cl == cr { - // easy - ls := n.Lhs - rs := n.Rhs - for il, nl := range ls { - nr := rs[il] - if nl.Type() != nil && nr.Type() != nil { - rs[il] = AssignConv(nr, nl.Type(), "assignment") - } - if ir.DeclaredBy(nl, n) && nl.Name().Ntype == nil { - rs[il] = DefaultLit(rs[il], nil) - nl.SetType(rs[il].Type()) - } - } - - goto out - } - - l = n.Lhs[0] - r = n.Rhs[0] - - // x,y,z = f() - if cr == 1 { - if r.Type() == nil { - goto out - } - switch r.Op() { - case ir.OCALLMETH, ir.OCALLINTER, ir.OCALLFUNC: - if !r.Type().IsFuncArgStruct() { - break - } - cr = r.Type().NumFields() - if cr != cl { - goto mismatch - } - r.(*ir.CallExpr).Use = ir.CallUseList - n.SetOp(ir.OAS2FUNC) - for i, l := range n.Lhs { - f := r.Type().Field(i) - if f.Type != nil && l.Type() != nil { - checkassignto(f.Type, l) - } - if ir.DeclaredBy(l, n) && l.Name().Ntype == nil { - l.SetType(f.Type) - } - } - goto out - } - } - - // x, ok = y - if cl == 2 && cr == 1 { - if r.Type() == nil { - goto out - } - switch r.Op() { - case ir.OINDEXMAP, ir.ORECV, ir.ODOTTYPE: - switch r.Op() { - case ir.OINDEXMAP: - n.SetOp(ir.OAS2MAPR) - case ir.ORECV: - n.SetOp(ir.OAS2RECV) - case ir.ODOTTYPE: - r := r.(*ir.TypeAssertExpr) - n.SetOp(ir.OAS2DOTTYPE) - r.SetOp(ir.ODOTTYPE2) - } - if l.Type() != nil { - checkassignto(r.Type(), l) - } - if ir.DeclaredBy(l, n) { - l.SetType(r.Type()) - } - l := n.Lhs[1] - if l.Type() != nil && !l.Type().IsBoolean() { - checkassignto(types.Types[types.TBOOL], l) - } - if ir.DeclaredBy(l, n) && l.Name().Ntype == nil { - l.SetType(types.Types[types.TBOOL]) - } - goto out - } - } - -mismatch: - switch r.Op() { - default: - base.Errorf("assignment mismatch: %d variables but %d values", cl, cr) - case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER: - r := r.(*ir.CallExpr) - base.Errorf("assignment mismatch: %d variables but %v returns %d values", cl, r.X, cr) - } - - // second half of dance -out: - n.SetTypecheck(1) - ls = n.Lhs - for i1, n1 := range ls { - if n1.Typecheck() == 0 { - ls[i1] = AssignExpr(ls[i1]) - } - } -} - -// type check function definition -// To be called by typecheck, not directly. -// (Call typecheckFunc instead.) -func typecheckfunc(n *ir.Func) { - if base.EnableTrace && base.Flag.LowerT { - defer tracePrint("typecheckfunc", n)(nil) - } - - for _, ln := range n.Dcl { - if ln.Op() == ir.ONAME && (ln.Class_ == ir.PPARAM || ln.Class_ == ir.PPARAMOUT) { - ln.Decldepth = 1 - } - } - - n.Nname = AssignExpr(n.Nname).(*ir.Name) - t := n.Nname.Type() - if t == nil { - return - } - n.SetType(t) - rcvr := t.Recv() - if rcvr != nil && n.Shortname != nil { - m := addmethod(n, n.Shortname, t, true, n.Pragma&ir.Nointerface != 0) - if m == nil { - return - } - - n.Nname.SetSym(ir.MethodSym(rcvr.Type, n.Shortname)) - Declare(n.Nname, ir.PFUNC) - } - - if base.Ctxt.Flag_dynlink && !inimport && n.Nname != nil { - NeedFuncSym(n.Sym()) - } -} - // The result of stringtoruneslit MUST be assigned back to n, e.g. // n.Left = stringtoruneslit(n.Left) func stringtoruneslit(n *ir.ConvExpr) ir.Node { From 575fd6ff0a886675412f1c24b390500b8413cebc Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 23 Dec 2020 00:44:42 -0500 Subject: [PATCH 229/474] [dev.regabi] cmd/compile: split out package inline [generated] [git-generate] cd src/cmd/compile/internal/gc rf ' mv numNonClosures inl.go mv inlFlood Inline_Flood mv inlcalls InlineCalls mv devirtualize Devirtualize mv caninl CanInline mv inl.go cmd/compile/internal/inline ' Change-Id: Iee1f5b1e82d5cea6be4ecd91e6920500810f21de Reviewed-on: https://go-review.googlesource.com/c/go/+/279309 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/export.go | 3 +- src/cmd/compile/internal/gc/main.go | 18 ++------ src/cmd/compile/internal/gc/subr.go | 3 +- .../compile/internal/{gc => inline}/inl.go | 46 ++++++++++++------- 4 files changed, 37 insertions(+), 33 deletions(-) rename src/cmd/compile/internal/{gc => inline}/inl.go (97%) diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index a414962431b8a..c65c6c8335be2 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -6,6 +6,7 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/inline" "cmd/compile/internal/ir" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" @@ -83,7 +84,7 @@ func (p *exporter) markObject(n ir.Node) { if n.Op() == ir.ONAME { n := n.(*ir.Name) if n.Class_ == ir.PFUNC { - inlFlood(n, typecheck.Export) + inline.Inline_Flood(n, typecheck.Export) } } diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index b98d1f2e10b52..7f20d6b8a565c 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -10,6 +10,7 @@ import ( "bufio" "bytes" "cmd/compile/internal/base" + "cmd/compile/internal/inline" "cmd/compile/internal/ir" "cmd/compile/internal/logopt" "cmd/compile/internal/ssa" @@ -184,7 +185,7 @@ func Main(archInit func(*Arch)) { ir.EscFmt = escFmt ir.IsIntrinsicCall = isIntrinsicCall - SSADumpInline = ssaDumpInline + inline.SSADumpInline = ssaDumpInline initSSAEnv() initSSATables() @@ -231,13 +232,13 @@ func Main(archInit func(*Arch)) { // Inlining base.Timer.Start("fe", "inlining") if base.Flag.LowerL != 0 { - InlinePackage() + inline.InlinePackage() } // Devirtualize. for _, n := range typecheck.Target.Decls { if n.Op() == ir.ODCLFUNC { - devirtualize(n.(*ir.Func)) + inline.Devirtualize(n.(*ir.Func)) } } ir.CurFunc = nil @@ -372,17 +373,6 @@ func cgoSymABIs() { } } -// numNonClosures returns the number of functions in list which are not closures. -func numNonClosures(list []*ir.Func) int { - count := 0 - for _, fn := range list { - if fn.OClosure == nil { - count++ - } - } - return count -} - func writebench(filename string) error { f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) if err != nil { diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 8e2093d4883f6..f76fb8e24a2b8 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -6,6 +6,7 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/inline" "cmd/compile/internal/ir" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" @@ -481,7 +482,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { // generate those wrappers within the same compilation unit as (T).M. // TODO(mdempsky): Investigate why we can't enable this more generally. if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym() != nil { - inlcalls(fn) + inline.InlineCalls(fn) } escapeFuncs([]*ir.Func{fn}, false) diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/inline/inl.go similarity index 97% rename from src/cmd/compile/internal/gc/inl.go rename to src/cmd/compile/internal/inline/inl.go index 9cf23caf0e80c..222e62d0cc894 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -24,9 +24,14 @@ // The Debug.m flag enables diagnostic output. a single -m is useful for verifying // which calls get inlined or not, more is for debugging, and may go away at any point. -package gc +package inline import ( + "errors" + "fmt" + "go/constant" + "strings" + "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/logopt" @@ -34,10 +39,6 @@ import ( "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/src" - "errors" - "fmt" - "go/constant" - "strings" ) // Inlining budget parameters, gathered in one place @@ -62,21 +63,21 @@ func InlinePackage() { // We allow inlining if there is no // recursion, or the recursion cycle is // across more than one function. - caninl(n) + CanInline(n) } else { if base.Flag.LowerM > 1 { fmt.Printf("%v: cannot inline %v: recursive\n", ir.Line(n), n.Nname) } } - inlcalls(n) + InlineCalls(n) } }) } // Caninl determines whether fn is inlineable. -// If so, caninl saves fn->nbody in fn->inl and substitutes it with a copy. +// If so, CanInline saves fn->nbody in fn->inl and substitutes it with a copy. // fn and ->nbody will already have been typechecked. -func caninl(fn *ir.Func) { +func CanInline(fn *ir.Func) { if fn.Nname == nil { base.Fatalf("caninl no nname %+v", fn) } @@ -192,9 +193,9 @@ func caninl(fn *ir.Func) { } } -// inlFlood marks n's inline body for export and recursively ensures +// Inline_Flood marks n's inline body for export and recursively ensures // all called functions are marked too. -func inlFlood(n *ir.Name, exportsym func(*ir.Name)) { +func Inline_Flood(n *ir.Name, exportsym func(*ir.Name)) { if n == nil { return } @@ -222,13 +223,13 @@ func inlFlood(n *ir.Name, exportsym func(*ir.Name)) { ir.VisitList(ir.Nodes(fn.Inl.Body), func(n ir.Node) { switch n.Op() { case ir.OMETHEXPR, ir.ODOTMETH: - inlFlood(ir.MethodExprName(n), exportsym) + Inline_Flood(ir.MethodExprName(n), exportsym) case ir.ONAME: n := n.(*ir.Name) switch n.Class_ { case ir.PFUNC: - inlFlood(n, exportsym) + Inline_Flood(n, exportsym) exportsym(n) case ir.PEXTERN: exportsym(n) @@ -442,7 +443,7 @@ func isBigFunc(fn *ir.Func) bool { // Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any // calls made to inlineable functions. This is the external entry point. -func inlcalls(fn *ir.Func) { +func InlineCalls(fn *ir.Func) { savefn := ir.CurFunc ir.CurFunc = fn maxCost := int32(inlineMaxBudget) @@ -631,7 +632,7 @@ func inlCallee(fn ir.Node) *ir.Func { case ir.OCLOSURE: fn := fn.(*ir.ClosureExpr) c := fn.Func - caninl(c) + CanInline(c) return c } return nil @@ -1202,9 +1203,9 @@ func pruneUnusedAutos(ll []*ir.Name, vis *hairyVisitor) []*ir.Name { return s } -// devirtualize replaces interface method calls within fn with direct +// Devirtualize replaces interface method calls within fn with direct // concrete-type method calls where applicable. -func devirtualize(fn *ir.Func) { +func Devirtualize(fn *ir.Func) { ir.CurFunc = fn ir.VisitList(fn.Body, func(n ir.Node) { if n.Op() == ir.OCALLINTER { @@ -1268,3 +1269,14 @@ func devirtualizeCall(call *ir.CallExpr) { call.SetType(ft.Results()) } } + +// numNonClosures returns the number of functions in list which are not closures. +func numNonClosures(list []*ir.Func) int { + count := 0 + for _, fn := range list { + if fn.OClosure == nil { + count++ + } + } + return count +} From 0ced54062e9d58f8ff6b3beff0c8694e799d47a8 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 23 Dec 2020 00:46:27 -0500 Subject: [PATCH 230/474] [dev.regabi] cmd/compile: split out package objw [generated] Object file writing routines are used not just at the end of the compilation but also during static data layout in walk. Split them into their own package. [git-generate] cd src/cmd/compile/internal/gc rf ' # Move bit vector to new package bitvec mv bvec.n bvec.N mv bvec.b bvec.B mv bvec BitVec mv bvalloc New mv bvbulkalloc NewBulk mv bulkBvec.next bulkBvec.Next mv bulkBvec Bulk mv H0 h0 mv Hp hp # Leave bvecSet and bitmap hashes behind - not needed as broadly. mv bvecSet.extractUniqe bvecSet.extractUnique mv h0 bvecSet bvecSet.grow bvecSet.add \ bvecSet.extractUnique hashbitmap bvset.go mv bv.go cmd/compile/internal/bitvec ex . ../arm ../arm64 ../mips ../mips64 ../ppc64 ../s390x ../riscv64 { import "cmd/internal/obj" var a *obj.Addr var i int64 Addrconst(a, i) -> a.SetConst(i) var p, to *obj.Prog Patch(p, to) -> p.To.SetTarget(to) } rm Addrconst Patch # Move object-writing API to new package objw mv duint8 Objw_Uint8 mv duint16 Objw_Uint16 mv duint32 Objw_Uint32 mv duintptr Objw_Uintptr mv duintxx Objw_UintN mv dsymptr Objw_SymPtr mv dsymptrOff Objw_SymPtrOff mv dsymptrWeakOff Objw_SymPtrWeakOff mv ggloblsym Objw_Global mv dbvec Objw_BitVec mv newProgs NewProgs mv Progs.clearp Progs.Clear mv Progs.settext Progs.SetText mv Progs.next Progs.Next mv Progs.pc Progs.PC mv Progs.pos Progs.Pos mv Progs.curfn Progs.CurFunc mv Progs.progcache Progs.Cache mv Progs.cacheidx Progs.CacheIndex mv Progs.nextLive Progs.NextLive mv Progs.prevLive Progs.PrevLive mv Progs.Appendpp Progs.Append mv LivenessIndex.stackMapIndex LivenessIndex.StackMapIndex mv LivenessIndex.isUnsafePoint LivenessIndex.IsUnsafePoint mv Objw_Uint8 Objw_Uint16 Objw_Uint32 Objw_Uintptr Objw_UintN \ Objw_SymPtr Objw_SymPtrOff Objw_SymPtrWeakOff Objw_Global \ Objw_BitVec \ objw.go mv sharedProgArray NewProgs Progs \ LivenessIndex StackMapDontCare \ LivenessDontCare LivenessIndex.StackMapValid \ Progs.NewProg Progs.Flush Progs.Free Progs.Prog Progs.Clear Progs.Append Progs.SetText \ prog.go mv prog.go objw.go cmd/compile/internal/objw # Move ggloblnod to obj with the rest of the non-objw higher-level writing. mv ggloblnod obj.go ' cd ../objw rf ' mv Objw_Uint8 Uint8 mv Objw_Uint16 Uint16 mv Objw_Uint32 Uint32 mv Objw_Uintptr Uintptr mv Objw_UintN UintN mv Objw_SymPtr SymPtr mv Objw_SymPtrOff SymPtrOff mv Objw_SymPtrWeakOff SymPtrWeakOff mv Objw_Global Global mv Objw_BitVec BitVec ' Change-Id: I2b87085aa788564fb322e9c55bddd73347b4d5fd Reviewed-on: https://go-review.googlesource.com/c/go/+/279310 Trust: Russ Cox Run-TryBot: Russ Cox TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/amd64/ggen.go | 38 +-- src/cmd/compile/internal/arm/ggen.go | 26 +- src/cmd/compile/internal/arm/ssa.go | 4 +- src/cmd/compile/internal/arm64/ggen.go | 34 +-- src/cmd/compile/internal/arm64/ssa.go | 14 +- src/cmd/compile/internal/bitvec/bv.go | 190 +++++++++++++++ src/cmd/compile/internal/gc/alg.go | 25 +- src/cmd/compile/internal/gc/bv.go | 280 ---------------------- src/cmd/compile/internal/gc/bvset.go | 97 ++++++++ src/cmd/compile/internal/gc/embed.go | 29 +-- src/cmd/compile/internal/gc/go.go | 7 +- src/cmd/compile/internal/gc/gsubr.go | 188 --------------- src/cmd/compile/internal/gc/init.go | 13 +- src/cmd/compile/internal/gc/obj.go | 93 +++---- src/cmd/compile/internal/gc/pgen.go | 16 +- src/cmd/compile/internal/gc/plive.go | 142 +++++------ src/cmd/compile/internal/gc/reflect.go | 160 +++++++------ src/cmd/compile/internal/gc/ssa.go | 73 +++--- src/cmd/compile/internal/mips/ggen.go | 20 +- src/cmd/compile/internal/mips/ssa.go | 16 +- src/cmd/compile/internal/mips64/ggen.go | 24 +- src/cmd/compile/internal/mips64/ssa.go | 16 +- src/cmd/compile/internal/objw/objw.go | 72 ++++++ src/cmd/compile/internal/objw/prog.go | 218 +++++++++++++++++ src/cmd/compile/internal/ppc64/ggen.go | 30 +-- src/cmd/compile/internal/ppc64/ssa.go | 32 +-- src/cmd/compile/internal/riscv64/ggen.go | 22 +- src/cmd/compile/internal/riscv64/gsubr.go | 4 +- src/cmd/compile/internal/riscv64/ssa.go | 8 +- src/cmd/compile/internal/s390x/ggen.go | 22 +- src/cmd/compile/internal/s390x/ssa.go | 8 +- src/cmd/compile/internal/wasm/ssa.go | 11 +- src/cmd/compile/internal/x86/ggen.go | 22 +- 33 files changed, 1008 insertions(+), 946 deletions(-) create mode 100644 src/cmd/compile/internal/bitvec/bv.go delete mode 100644 src/cmd/compile/internal/gc/bv.go create mode 100644 src/cmd/compile/internal/gc/bvset.go create mode 100644 src/cmd/compile/internal/objw/objw.go create mode 100644 src/cmd/compile/internal/objw/prog.go diff --git a/src/cmd/compile/internal/amd64/ggen.go b/src/cmd/compile/internal/amd64/ggen.go index 48b00b3da9e32..dacdb07a3837d 100644 --- a/src/cmd/compile/internal/amd64/ggen.go +++ b/src/cmd/compile/internal/amd64/ggen.go @@ -6,8 +6,8 @@ package amd64 import ( "cmd/compile/internal/base" - "cmd/compile/internal/gc" "cmd/compile/internal/ir" + "cmd/compile/internal/objw" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/x86" @@ -54,7 +54,7 @@ func dzDI(b int64) int64 { return -dzClearStep * (dzBlockLen - tailSteps) } -func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog { +func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog { const ( ax = 1 << iota x0 @@ -70,61 +70,61 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Pr base.Fatalf("zerorange count not a multiple of widthptr %d", cnt) } if *state&ax == 0 { - p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0) + p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0) *state |= ax } - p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off) + p = pp.Append(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off) off += int64(types.PtrSize) cnt -= int64(types.PtrSize) } if cnt == 8 { if *state&ax == 0 { - p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0) + p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0) *state |= ax } - p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off) + p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off) } else if !isPlan9 && cnt <= int64(8*types.RegSize) { if *state&x0 == 0 { - p = pp.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0) + p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0) *state |= x0 } for i := int64(0); i < cnt/16; i++ { - p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16) + p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16) } if cnt%16 != 0 { - p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16)) + p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16)) } } else if !isPlan9 && (cnt <= int64(128*types.RegSize)) { if *state&x0 == 0 { - p = pp.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0) + p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0) *state |= x0 } - p = pp.Appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0) - p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt)) + p = pp.Append(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0) + p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt)) p.To.Sym = ir.Syms.Duffzero if cnt%16 != 0 { - p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8)) + p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8)) } } else { if *state&ax == 0 { - p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0) + p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0) *state |= ax } - p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(types.RegSize), obj.TYPE_REG, x86.REG_CX, 0) - p = pp.Appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0) - p = pp.Appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0) - p = pp.Appendpp(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0) + p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(types.RegSize), obj.TYPE_REG, x86.REG_CX, 0) + p = pp.Append(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0) + p = pp.Append(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0) + p = pp.Append(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0) } return p } -func ginsnop(pp *gc.Progs) *obj.Prog { +func ginsnop(pp *objw.Progs) *obj.Prog { // This is a hardware nop (1-byte 0x90) instruction, // even though we describe it as an explicit XCHGL here. // Particularly, this does not zero the high 32 bits diff --git a/src/cmd/compile/internal/arm/ggen.go b/src/cmd/compile/internal/arm/ggen.go index 2363d76346e29..f2c676300a93a 100644 --- a/src/cmd/compile/internal/arm/ggen.go +++ b/src/cmd/compile/internal/arm/ggen.go @@ -5,51 +5,51 @@ package arm import ( - "cmd/compile/internal/gc" "cmd/compile/internal/ir" + "cmd/compile/internal/objw" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/arm" ) -func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, r0 *uint32) *obj.Prog { +func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, r0 *uint32) *obj.Prog { if cnt == 0 { return p } if *r0 == 0 { - p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0) + p = pp.Append(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0) *r0 = 1 } if cnt < int64(4*types.PtrSize) { for i := int64(0); i < cnt; i += int64(types.PtrSize) { - p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+off+i) + p = pp.Append(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+off+i) } } else if cnt <= int64(128*types.PtrSize) { - p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0) + p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0) p.Reg = arm.REGSP - p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) + p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) p.To.Name = obj.NAME_EXTERN p.To.Sym = ir.Syms.Duffzero p.To.Offset = 4 * (128 - cnt/int64(types.PtrSize)) } else { - p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0) + p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0) p.Reg = arm.REGSP - p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, arm.REG_R2, 0) + p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, arm.REG_R2, 0) p.Reg = arm.REG_R1 - p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4) + p = pp.Append(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4) p1 := p p.Scond |= arm.C_PBIT - p = pp.Appendpp(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0) + p = pp.Append(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0) p.Reg = arm.REG_R2 - p = pp.Appendpp(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0) - gc.Patch(p, p1) + p = pp.Append(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0) + p.To.SetTarget(p1) } return p } -func ginsnop(pp *gc.Progs) *obj.Prog { +func ginsnop(pp *objw.Progs) *obj.Prog { p := pp.Prog(arm.AAND) p.From.Type = obj.TYPE_REG p.From.Reg = arm.REG_R0 diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go index ab7ec6176b388..30eae59331c3a 100644 --- a/src/cmd/compile/internal/arm/ssa.go +++ b/src/cmd/compile/internal/arm/ssa.go @@ -779,7 +779,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p2.Reg = arm.REG_R1 p3 := s.Prog(arm.ABLE) p3.To.Type = obj.TYPE_BRANCH - gc.Patch(p3, p) + p3.To.SetTarget(p) case ssa.OpARMLoweredMove: // MOVW.P 4(R1), Rtmp // MOVW.P Rtmp, 4(R2) @@ -820,7 +820,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p3.Reg = arm.REG_R1 p4 := s.Prog(arm.ABLE) p4.To.Type = obj.TYPE_BRANCH - gc.Patch(p4, p) + p4.To.SetTarget(p) case ssa.OpARMEqual, ssa.OpARMNotEqual, ssa.OpARMLessThan, diff --git a/src/cmd/compile/internal/arm64/ggen.go b/src/cmd/compile/internal/arm64/ggen.go index 37f11e0ff64cf..8364535f63bb8 100644 --- a/src/cmd/compile/internal/arm64/ggen.go +++ b/src/cmd/compile/internal/arm64/ggen.go @@ -5,8 +5,8 @@ package arm64 import ( - "cmd/compile/internal/gc" "cmd/compile/internal/ir" + "cmd/compile/internal/objw" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/arm64" @@ -24,24 +24,24 @@ func padframe(frame int64) int64 { return frame } -func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { +func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { if cnt == 0 { return p } if cnt < int64(4*types.PtrSize) { for i := int64(0); i < cnt; i += int64(types.PtrSize) { - p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off+i) + p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off+i) } } else if cnt <= int64(128*types.PtrSize) && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend if cnt%(2*int64(types.PtrSize)) != 0 { - p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off) + p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off) off += int64(types.PtrSize) cnt -= int64(types.PtrSize) } - p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REG_R20, 0) - p = pp.Appendpp(p, arm64.AADD, obj.TYPE_CONST, 0, 8+off, obj.TYPE_REG, arm64.REG_R20, 0) + p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REG_R20, 0) + p = pp.Append(p, arm64.AADD, obj.TYPE_CONST, 0, 8+off, obj.TYPE_REG, arm64.REG_R20, 0) p.Reg = arm64.REG_R20 - p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) + p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) p.To.Name = obj.NAME_EXTERN p.To.Sym = ir.Syms.Duffzero p.To.Offset = 4 * (64 - cnt/(2*int64(types.PtrSize))) @@ -50,26 +50,26 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { // We are at the function entry, where no register is live, so it is okay to clobber // other registers const rtmp = arm64.REG_R20 - p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, rtmp, 0) - p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0) - p = pp.Appendpp(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT1, 0) + p = pp.Append(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, rtmp, 0) + p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0) + p = pp.Append(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT1, 0) p.Reg = arm64.REGRT1 - p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, rtmp, 0) - p = pp.Appendpp(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT2, 0) + p = pp.Append(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, rtmp, 0) + p = pp.Append(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT2, 0) p.Reg = arm64.REGRT1 - p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(types.PtrSize)) + p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(types.PtrSize)) p.Scond = arm64.C_XPRE p1 := p - p = pp.Appendpp(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0) + p = pp.Append(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0) p.Reg = arm64.REGRT2 - p = pp.Appendpp(p, arm64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0) - gc.Patch(p, p1) + p = pp.Append(p, arm64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0) + p.To.SetTarget(p1) } return p } -func ginsnop(pp *gc.Progs) *obj.Prog { +func ginsnop(pp *objw.Progs) *obj.Prog { p := pp.Prog(arm64.AHINT) p.From.Type = obj.TYPE_CONST return p diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go index bb634cc38c83a..9bdea3ee2a4d9 100644 --- a/src/cmd/compile/internal/arm64/ssa.go +++ b/src/cmd/compile/internal/arm64/ssa.go @@ -582,7 +582,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p2.From.Type = obj.TYPE_REG p2.From.Reg = arm64.REGTMP p2.To.Type = obj.TYPE_BRANCH - gc.Patch(p2, p) + p2.To.SetTarget(p) case ssa.OpARM64LoweredAtomicExchange64Variant, ssa.OpARM64LoweredAtomicExchange32Variant: swap := arm64.ASWPALD @@ -636,7 +636,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p3.From.Type = obj.TYPE_REG p3.From.Reg = arm64.REGTMP p3.To.Type = obj.TYPE_BRANCH - gc.Patch(p3, p) + p3.To.SetTarget(p) case ssa.OpARM64LoweredAtomicAdd64Variant, ssa.OpARM64LoweredAtomicAdd32Variant: // LDADDAL Rarg1, (Rarg0), Rout @@ -700,13 +700,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p4.From.Type = obj.TYPE_REG p4.From.Reg = arm64.REGTMP p4.To.Type = obj.TYPE_BRANCH - gc.Patch(p4, p) + p4.To.SetTarget(p) p5 := s.Prog(arm64.ACSET) p5.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg p5.From.Reg = arm64.COND_EQ p5.To.Type = obj.TYPE_REG p5.To.Reg = out - gc.Patch(p2, p5) + p2.To.SetTarget(p5) case ssa.OpARM64LoweredAtomicCas64Variant, ssa.OpARM64LoweredAtomicCas32Variant: // Rarg0: ptr @@ -794,7 +794,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p3.From.Type = obj.TYPE_REG p3.From.Reg = arm64.REGTMP p3.To.Type = obj.TYPE_BRANCH - gc.Patch(p3, p) + p3.To.SetTarget(p) case ssa.OpARM64LoweredAtomicAnd8Variant, ssa.OpARM64LoweredAtomicAnd32Variant: atomic_clear := arm64.ALDCLRALW @@ -982,7 +982,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p2.Reg = arm64.REG_R16 p3 := s.Prog(arm64.ABLE) p3.To.Type = obj.TYPE_BRANCH - gc.Patch(p3, p) + p3.To.SetTarget(p) case ssa.OpARM64DUFFCOPY: p := s.Prog(obj.ADUFFCOPY) p.To.Type = obj.TYPE_MEM @@ -1015,7 +1015,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p3.Reg = arm64.REG_R16 p4 := s.Prog(arm64.ABLE) p4.To.Type = obj.TYPE_BRANCH - gc.Patch(p4, p) + p4.To.SetTarget(p) case ssa.OpARM64CALLstatic, ssa.OpARM64CALLclosure, ssa.OpARM64CALLinter: s.Call(v) case ssa.OpARM64LoweredWB: diff --git a/src/cmd/compile/internal/bitvec/bv.go b/src/cmd/compile/internal/bitvec/bv.go new file mode 100644 index 0000000000000..1e084576d1301 --- /dev/null +++ b/src/cmd/compile/internal/bitvec/bv.go @@ -0,0 +1,190 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bitvec + +import ( + "math/bits" + + "cmd/compile/internal/base" +) + +const ( + wordBits = 32 + wordMask = wordBits - 1 + wordShift = 5 +) + +// A BitVec is a bit vector. +type BitVec struct { + N int32 // number of bits in vector + B []uint32 // words holding bits +} + +func New(n int32) BitVec { + nword := (n + wordBits - 1) / wordBits + return BitVec{n, make([]uint32, nword)} +} + +type Bulk struct { + words []uint32 + nbit int32 + nword int32 +} + +func NewBulk(nbit int32, count int32) Bulk { + nword := (nbit + wordBits - 1) / wordBits + size := int64(nword) * int64(count) + if int64(int32(size*4)) != size*4 { + base.Fatalf("bvbulkalloc too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size) + } + return Bulk{ + words: make([]uint32, size), + nbit: nbit, + nword: nword, + } +} + +func (b *Bulk) Next() BitVec { + out := BitVec{b.nbit, b.words[:b.nword]} + b.words = b.words[b.nword:] + return out +} + +func (bv1 BitVec) Eq(bv2 BitVec) bool { + if bv1.N != bv2.N { + base.Fatalf("bvequal: lengths %d and %d are not equal", bv1.N, bv2.N) + } + for i, x := range bv1.B { + if x != bv2.B[i] { + return false + } + } + return true +} + +func (dst BitVec) Copy(src BitVec) { + copy(dst.B, src.B) +} + +func (bv BitVec) Get(i int32) bool { + if i < 0 || i >= bv.N { + base.Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.N) + } + mask := uint32(1 << uint(i%wordBits)) + return bv.B[i>>wordShift]&mask != 0 +} + +func (bv BitVec) Set(i int32) { + if i < 0 || i >= bv.N { + base.Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.N) + } + mask := uint32(1 << uint(i%wordBits)) + bv.B[i/wordBits] |= mask +} + +func (bv BitVec) Unset(i int32) { + if i < 0 || i >= bv.N { + base.Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.N) + } + mask := uint32(1 << uint(i%wordBits)) + bv.B[i/wordBits] &^= mask +} + +// bvnext returns the smallest index >= i for which bvget(bv, i) == 1. +// If there is no such index, bvnext returns -1. +func (bv BitVec) Next(i int32) int32 { + if i >= bv.N { + return -1 + } + + // Jump i ahead to next word with bits. + if bv.B[i>>wordShift]>>uint(i&wordMask) == 0 { + i &^= wordMask + i += wordBits + for i < bv.N && bv.B[i>>wordShift] == 0 { + i += wordBits + } + } + + if i >= bv.N { + return -1 + } + + // Find 1 bit. + w := bv.B[i>>wordShift] >> uint(i&wordMask) + i += int32(bits.TrailingZeros32(w)) + + return i +} + +func (bv BitVec) IsEmpty() bool { + for _, x := range bv.B { + if x != 0 { + return false + } + } + return true +} + +func (bv BitVec) Not() { + for i, x := range bv.B { + bv.B[i] = ^x + } +} + +// union +func (dst BitVec) Or(src1, src2 BitVec) { + if len(src1.B) == 0 { + return + } + _, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop + + for i, x := range src1.B { + dst.B[i] = x | src2.B[i] + } +} + +// intersection +func (dst BitVec) And(src1, src2 BitVec) { + if len(src1.B) == 0 { + return + } + _, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop + + for i, x := range src1.B { + dst.B[i] = x & src2.B[i] + } +} + +// difference +func (dst BitVec) AndNot(src1, src2 BitVec) { + if len(src1.B) == 0 { + return + } + _, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop + + for i, x := range src1.B { + dst.B[i] = x &^ src2.B[i] + } +} + +func (bv BitVec) String() string { + s := make([]byte, 2+bv.N) + copy(s, "#*") + for i := int32(0); i < bv.N; i++ { + ch := byte('0') + if bv.Get(i) { + ch = '1' + } + s[2+i] = ch + } + return string(s) +} + +func (bv BitVec) Clear() { + for i := range bv.B { + bv.B[i] = 0 + } +} diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index b0d46eab2feee..4fc8cf04eff5c 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -7,6 +7,7 @@ package gc import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" + "cmd/compile/internal/objw" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/obj" @@ -110,9 +111,9 @@ func genhash(t *types.Type) *obj.LSym { memhashvarlen = typecheck.LookupRuntimeFunc("memhash_varlen") } ot := 0 - ot = dsymptr(closure, ot, memhashvarlen, 0) - ot = duintptr(closure, ot, uint64(t.Width)) // size encoded in closure - ggloblsym(closure, int32(ot), obj.DUPOK|obj.RODATA) + ot = objw.SymPtr(closure, ot, memhashvarlen, 0) + ot = objw.Uintptr(closure, ot, uint64(t.Width)) // size encoded in closure + objw.Global(closure, int32(ot), obj.DUPOK|obj.RODATA) return closure case types.ASPECIAL: break @@ -253,8 +254,8 @@ func genhash(t *types.Type) *obj.LSym { // Build closure. It doesn't close over any variables, so // it contains just the function pointer. - dsymptr(closure, 0, sym.Linksym(), 0) - ggloblsym(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA) + objw.SymPtr(closure, 0, sym.Linksym(), 0) + objw.Global(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA) return closure } @@ -302,8 +303,8 @@ func sysClosure(name string) *obj.LSym { s := typecheck.LookupRuntimeVar(name + "·f") if len(s.P) == 0 { f := typecheck.LookupRuntimeFunc(name) - dsymptr(s, 0, f, 0) - ggloblsym(s, int32(types.PtrSize), obj.DUPOK|obj.RODATA) + objw.SymPtr(s, 0, f, 0) + objw.Global(s, int32(types.PtrSize), obj.DUPOK|obj.RODATA) } return s } @@ -353,9 +354,9 @@ func geneq(t *types.Type) *obj.LSym { memequalvarlen = typecheck.LookupRuntimeVar("memequal_varlen") // asm func } ot := 0 - ot = dsymptr(closure, ot, memequalvarlen, 0) - ot = duintptr(closure, ot, uint64(t.Width)) - ggloblsym(closure, int32(ot), obj.DUPOK|obj.RODATA) + ot = objw.SymPtr(closure, ot, memequalvarlen, 0) + ot = objw.Uintptr(closure, ot, uint64(t.Width)) + objw.Global(closure, int32(ot), obj.DUPOK|obj.RODATA) return closure case types.ASPECIAL: break @@ -632,8 +633,8 @@ func geneq(t *types.Type) *obj.LSym { typecheck.Target.Decls = append(typecheck.Target.Decls, fn) // Generate a closure which points at the function we just generated. - dsymptr(closure, 0, sym.Linksym(), 0) - ggloblsym(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA) + objw.SymPtr(closure, 0, sym.Linksym(), 0) + objw.Global(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA) return closure } diff --git a/src/cmd/compile/internal/gc/bv.go b/src/cmd/compile/internal/gc/bv.go deleted file mode 100644 index d82851e7cb494..0000000000000 --- a/src/cmd/compile/internal/gc/bv.go +++ /dev/null @@ -1,280 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gc - -import ( - "math/bits" - - "cmd/compile/internal/base" -) - -const ( - wordBits = 32 - wordMask = wordBits - 1 - wordShift = 5 -) - -// A bvec is a bit vector. -type bvec struct { - n int32 // number of bits in vector - b []uint32 // words holding bits -} - -func bvalloc(n int32) bvec { - nword := (n + wordBits - 1) / wordBits - return bvec{n, make([]uint32, nword)} -} - -type bulkBvec struct { - words []uint32 - nbit int32 - nword int32 -} - -func bvbulkalloc(nbit int32, count int32) bulkBvec { - nword := (nbit + wordBits - 1) / wordBits - size := int64(nword) * int64(count) - if int64(int32(size*4)) != size*4 { - base.Fatalf("bvbulkalloc too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size) - } - return bulkBvec{ - words: make([]uint32, size), - nbit: nbit, - nword: nword, - } -} - -func (b *bulkBvec) next() bvec { - out := bvec{b.nbit, b.words[:b.nword]} - b.words = b.words[b.nword:] - return out -} - -func (bv1 bvec) Eq(bv2 bvec) bool { - if bv1.n != bv2.n { - base.Fatalf("bvequal: lengths %d and %d are not equal", bv1.n, bv2.n) - } - for i, x := range bv1.b { - if x != bv2.b[i] { - return false - } - } - return true -} - -func (dst bvec) Copy(src bvec) { - copy(dst.b, src.b) -} - -func (bv bvec) Get(i int32) bool { - if i < 0 || i >= bv.n { - base.Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.n) - } - mask := uint32(1 << uint(i%wordBits)) - return bv.b[i>>wordShift]&mask != 0 -} - -func (bv bvec) Set(i int32) { - if i < 0 || i >= bv.n { - base.Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.n) - } - mask := uint32(1 << uint(i%wordBits)) - bv.b[i/wordBits] |= mask -} - -func (bv bvec) Unset(i int32) { - if i < 0 || i >= bv.n { - base.Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.n) - } - mask := uint32(1 << uint(i%wordBits)) - bv.b[i/wordBits] &^= mask -} - -// bvnext returns the smallest index >= i for which bvget(bv, i) == 1. -// If there is no such index, bvnext returns -1. -func (bv bvec) Next(i int32) int32 { - if i >= bv.n { - return -1 - } - - // Jump i ahead to next word with bits. - if bv.b[i>>wordShift]>>uint(i&wordMask) == 0 { - i &^= wordMask - i += wordBits - for i < bv.n && bv.b[i>>wordShift] == 0 { - i += wordBits - } - } - - if i >= bv.n { - return -1 - } - - // Find 1 bit. - w := bv.b[i>>wordShift] >> uint(i&wordMask) - i += int32(bits.TrailingZeros32(w)) - - return i -} - -func (bv bvec) IsEmpty() bool { - for _, x := range bv.b { - if x != 0 { - return false - } - } - return true -} - -func (bv bvec) Not() { - for i, x := range bv.b { - bv.b[i] = ^x - } -} - -// union -func (dst bvec) Or(src1, src2 bvec) { - if len(src1.b) == 0 { - return - } - _, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop - - for i, x := range src1.b { - dst.b[i] = x | src2.b[i] - } -} - -// intersection -func (dst bvec) And(src1, src2 bvec) { - if len(src1.b) == 0 { - return - } - _, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop - - for i, x := range src1.b { - dst.b[i] = x & src2.b[i] - } -} - -// difference -func (dst bvec) AndNot(src1, src2 bvec) { - if len(src1.b) == 0 { - return - } - _, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop - - for i, x := range src1.b { - dst.b[i] = x &^ src2.b[i] - } -} - -func (bv bvec) String() string { - s := make([]byte, 2+bv.n) - copy(s, "#*") - for i := int32(0); i < bv.n; i++ { - ch := byte('0') - if bv.Get(i) { - ch = '1' - } - s[2+i] = ch - } - return string(s) -} - -func (bv bvec) Clear() { - for i := range bv.b { - bv.b[i] = 0 - } -} - -// FNV-1 hash function constants. -const ( - H0 = 2166136261 - Hp = 16777619 -) - -func hashbitmap(h uint32, bv bvec) uint32 { - n := int((bv.n + 31) / 32) - for i := 0; i < n; i++ { - w := bv.b[i] - h = (h * Hp) ^ (w & 0xff) - h = (h * Hp) ^ ((w >> 8) & 0xff) - h = (h * Hp) ^ ((w >> 16) & 0xff) - h = (h * Hp) ^ ((w >> 24) & 0xff) - } - - return h -} - -// bvecSet is a set of bvecs, in initial insertion order. -type bvecSet struct { - index []int // hash -> uniq index. -1 indicates empty slot. - uniq []bvec // unique bvecs, in insertion order -} - -func (m *bvecSet) grow() { - // Allocate new index. - n := len(m.index) * 2 - if n == 0 { - n = 32 - } - newIndex := make([]int, n) - for i := range newIndex { - newIndex[i] = -1 - } - - // Rehash into newIndex. - for i, bv := range m.uniq { - h := hashbitmap(H0, bv) % uint32(len(newIndex)) - for { - j := newIndex[h] - if j < 0 { - newIndex[h] = i - break - } - h++ - if h == uint32(len(newIndex)) { - h = 0 - } - } - } - m.index = newIndex -} - -// add adds bv to the set and returns its index in m.extractUniqe. -// The caller must not modify bv after this. -func (m *bvecSet) add(bv bvec) int { - if len(m.uniq)*4 >= len(m.index) { - m.grow() - } - - index := m.index - h := hashbitmap(H0, bv) % uint32(len(index)) - for { - j := index[h] - if j < 0 { - // New bvec. - index[h] = len(m.uniq) - m.uniq = append(m.uniq, bv) - return len(m.uniq) - 1 - } - jlive := m.uniq[j] - if bv.Eq(jlive) { - // Existing bvec. - return j - } - - h++ - if h == uint32(len(index)) { - h = 0 - } - } -} - -// extractUniqe returns this slice of unique bit vectors in m, as -// indexed by the result of bvecSet.add. -func (m *bvecSet) extractUniqe() []bvec { - return m.uniq -} diff --git a/src/cmd/compile/internal/gc/bvset.go b/src/cmd/compile/internal/gc/bvset.go new file mode 100644 index 0000000000000..7f5f41fb5c421 --- /dev/null +++ b/src/cmd/compile/internal/gc/bvset.go @@ -0,0 +1,97 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gc + +import "cmd/compile/internal/bitvec" + +// FNV-1 hash function constants. +const ( + h0 = 2166136261 + hp = 16777619 +) + +// bvecSet is a set of bvecs, in initial insertion order. +type bvecSet struct { + index []int // hash -> uniq index. -1 indicates empty slot. + uniq []bitvec.BitVec // unique bvecs, in insertion order +} + +func (m *bvecSet) grow() { + // Allocate new index. + n := len(m.index) * 2 + if n == 0 { + n = 32 + } + newIndex := make([]int, n) + for i := range newIndex { + newIndex[i] = -1 + } + + // Rehash into newIndex. + for i, bv := range m.uniq { + h := hashbitmap(h0, bv) % uint32(len(newIndex)) + for { + j := newIndex[h] + if j < 0 { + newIndex[h] = i + break + } + h++ + if h == uint32(len(newIndex)) { + h = 0 + } + } + } + m.index = newIndex +} + +// add adds bv to the set and returns its index in m.extractUniqe. +// The caller must not modify bv after this. +func (m *bvecSet) add(bv bitvec.BitVec) int { + if len(m.uniq)*4 >= len(m.index) { + m.grow() + } + + index := m.index + h := hashbitmap(h0, bv) % uint32(len(index)) + for { + j := index[h] + if j < 0 { + // New bvec. + index[h] = len(m.uniq) + m.uniq = append(m.uniq, bv) + return len(m.uniq) - 1 + } + jlive := m.uniq[j] + if bv.Eq(jlive) { + // Existing bvec. + return j + } + + h++ + if h == uint32(len(index)) { + h = 0 + } + } +} + +// extractUnique returns this slice of unique bit vectors in m, as +// indexed by the result of bvecSet.add. +func (m *bvecSet) extractUnique() []bitvec.BitVec { + return m.uniq +} + +func hashbitmap(h uint32, bv bitvec.BitVec) uint32 { + n := int((bv.N + 31) / 32) + for i := 0; i < n; i++ { + w := bv.B[i] + h = (h * hp) ^ (w & 0xff) + h = (h * hp) ^ ((w >> 8) & 0xff) + h = (h * hp) ^ ((w >> 16) & 0xff) + h = (h * hp) ^ ((w >> 24) & 0xff) + } + + return h +} diff --git a/src/cmd/compile/internal/gc/embed.go b/src/cmd/compile/internal/gc/embed.go index bcfec3cad32e7..282e718b29bc2 100644 --- a/src/cmd/compile/internal/gc/embed.go +++ b/src/cmd/compile/internal/gc/embed.go @@ -7,6 +7,7 @@ package gc import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" + "cmd/compile/internal/objw" "cmd/compile/internal/syntax" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" @@ -206,19 +207,19 @@ func initEmbed(v *ir.Name) { } sym := v.Sym().Linksym() off := 0 - off = dsymptr(sym, off, fsym, 0) // data string - off = duintptr(sym, off, uint64(size)) // len + off = objw.SymPtr(sym, off, fsym, 0) // data string + off = objw.Uintptr(sym, off, uint64(size)) // len if kind == embedBytes { - duintptr(sym, off, uint64(size)) // cap for slice + objw.Uintptr(sym, off, uint64(size)) // cap for slice } case embedFiles: slicedata := base.Ctxt.Lookup(`"".` + v.Sym().Name + `.files`) off := 0 // []files pointed at by Files - off = dsymptr(slicedata, off, slicedata, 3*types.PtrSize) // []file, pointing just past slice - off = duintptr(slicedata, off, uint64(len(files))) - off = duintptr(slicedata, off, uint64(len(files))) + off = objw.SymPtr(slicedata, off, slicedata, 3*types.PtrSize) // []file, pointing just past slice + off = objw.Uintptr(slicedata, off, uint64(len(files))) + off = objw.Uintptr(slicedata, off, uint64(len(files))) // embed/embed.go type file is: // name string @@ -228,25 +229,25 @@ func initEmbed(v *ir.Name) { const hashSize = 16 hash := make([]byte, hashSize) for _, file := range files { - off = dsymptr(slicedata, off, stringsym(v.Pos(), file), 0) // file string - off = duintptr(slicedata, off, uint64(len(file))) + off = objw.SymPtr(slicedata, off, stringsym(v.Pos(), file), 0) // file string + off = objw.Uintptr(slicedata, off, uint64(len(file))) if strings.HasSuffix(file, "/") { // entry for directory - no data - off = duintptr(slicedata, off, 0) - off = duintptr(slicedata, off, 0) + off = objw.Uintptr(slicedata, off, 0) + off = objw.Uintptr(slicedata, off, 0) off += hashSize } else { fsym, size, err := fileStringSym(v.Pos(), base.Flag.Cfg.Embed.Files[file], true, hash) if err != nil { base.ErrorfAt(v.Pos(), "embed %s: %v", file, err) } - off = dsymptr(slicedata, off, fsym, 0) // data string - off = duintptr(slicedata, off, uint64(size)) + off = objw.SymPtr(slicedata, off, fsym, 0) // data string + off = objw.Uintptr(slicedata, off, uint64(size)) off = int(slicedata.WriteBytes(base.Ctxt, int64(off), hash)) } } - ggloblsym(slicedata, int32(off), obj.RODATA|obj.LOCAL) + objw.Global(slicedata, int32(off), obj.RODATA|obj.LOCAL) sym := v.Sym().Linksym() - dsymptr(sym, 0, slicedata, 0) + objw.SymPtr(sym, 0, slicedata, 0) } } diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index 7648e910d57de..c979edcdf8372 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -5,6 +5,7 @@ package gc import ( + "cmd/compile/internal/objw" "cmd/compile/internal/ssa" "cmd/compile/internal/types" "cmd/internal/obj" @@ -33,10 +34,10 @@ type Arch struct { // ZeroRange zeroes a range of memory on stack. It is only inserted // at function entry, and it is ok to clobber registers. - ZeroRange func(*Progs, *obj.Prog, int64, int64, *uint32) *obj.Prog + ZeroRange func(*objw.Progs, *obj.Prog, int64, int64, *uint32) *obj.Prog - Ginsnop func(*Progs) *obj.Prog - Ginsnopdefer func(*Progs) *obj.Prog // special ginsnop for deferreturn + Ginsnop func(*objw.Progs) *obj.Prog + Ginsnopdefer func(*objw.Progs) *obj.Prog // special ginsnop for deferreturn // SSAMarkMoves marks any MOVXconst ops that need to avoid clobbering flags. SSAMarkMoves func(*SSAGenState, *ssa.Block) diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go index f24687ec0f34c..f746a358caf5b 100644 --- a/src/cmd/compile/internal/gc/gsubr.go +++ b/src/cmd/compile/internal/gc/gsubr.go @@ -33,164 +33,14 @@ package gc import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" - "cmd/compile/internal/ssa" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/objabi" - "cmd/internal/src" "fmt" "os" ) -var sharedProgArray = new([10000]obj.Prog) // *T instead of T to work around issue 19839 - -// Progs accumulates Progs for a function and converts them into machine code. -type Progs struct { - Text *obj.Prog // ATEXT Prog for this function - next *obj.Prog // next Prog - pc int64 // virtual PC; count of Progs - pos src.XPos // position to use for new Progs - curfn *ir.Func // fn these Progs are for - progcache []obj.Prog // local progcache - cacheidx int // first free element of progcache - - nextLive LivenessIndex // liveness index for the next Prog - prevLive LivenessIndex // last emitted liveness index -} - -// newProgs returns a new Progs for fn. -// worker indicates which of the backend workers will use the Progs. -func newProgs(fn *ir.Func, worker int) *Progs { - pp := new(Progs) - if base.Ctxt.CanReuseProgs() { - sz := len(sharedProgArray) / base.Flag.LowerC - pp.progcache = sharedProgArray[sz*worker : sz*(worker+1)] - } - pp.curfn = fn - - // prime the pump - pp.next = pp.NewProg() - pp.clearp(pp.next) - - pp.pos = fn.Pos() - pp.settext(fn) - // PCDATA tables implicitly start with index -1. - pp.prevLive = LivenessIndex{-1, false} - pp.nextLive = pp.prevLive - return pp -} - -func (pp *Progs) NewProg() *obj.Prog { - var p *obj.Prog - if pp.cacheidx < len(pp.progcache) { - p = &pp.progcache[pp.cacheidx] - pp.cacheidx++ - } else { - p = new(obj.Prog) - } - p.Ctxt = base.Ctxt - return p -} - -// Flush converts from pp to machine code. -func (pp *Progs) Flush() { - plist := &obj.Plist{Firstpc: pp.Text, Curfn: pp.curfn} - obj.Flushplist(base.Ctxt, plist, pp.NewProg, base.Ctxt.Pkgpath) -} - -// Free clears pp and any associated resources. -func (pp *Progs) Free() { - if base.Ctxt.CanReuseProgs() { - // Clear progs to enable GC and avoid abuse. - s := pp.progcache[:pp.cacheidx] - for i := range s { - s[i] = obj.Prog{} - } - } - // Clear pp to avoid abuse. - *pp = Progs{} -} - -// Prog adds a Prog with instruction As to pp. -func (pp *Progs) Prog(as obj.As) *obj.Prog { - if pp.nextLive.StackMapValid() && pp.nextLive.stackMapIndex != pp.prevLive.stackMapIndex { - // Emit stack map index change. - idx := pp.nextLive.stackMapIndex - pp.prevLive.stackMapIndex = idx - p := pp.Prog(obj.APCDATA) - Addrconst(&p.From, objabi.PCDATA_StackMapIndex) - Addrconst(&p.To, int64(idx)) - } - if pp.nextLive.isUnsafePoint != pp.prevLive.isUnsafePoint { - // Emit unsafe-point marker. - pp.prevLive.isUnsafePoint = pp.nextLive.isUnsafePoint - p := pp.Prog(obj.APCDATA) - Addrconst(&p.From, objabi.PCDATA_UnsafePoint) - if pp.nextLive.isUnsafePoint { - Addrconst(&p.To, objabi.PCDATA_UnsafePointUnsafe) - } else { - Addrconst(&p.To, objabi.PCDATA_UnsafePointSafe) - } - } - - p := pp.next - pp.next = pp.NewProg() - pp.clearp(pp.next) - p.Link = pp.next - - if !pp.pos.IsKnown() && base.Flag.K != 0 { - base.Warn("prog: unknown position (line 0)") - } - - p.As = as - p.Pos = pp.pos - if pp.pos.IsStmt() == src.PosIsStmt { - // Clear IsStmt for later Progs at this pos provided that as can be marked as a stmt - if ssa.LosesStmtMark(as) { - return p - } - pp.pos = pp.pos.WithNotStmt() - } - return p -} - -func (pp *Progs) clearp(p *obj.Prog) { - obj.Nopout(p) - p.As = obj.AEND - p.Pc = pp.pc - pp.pc++ -} - -func (pp *Progs) Appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16, foffset int64, ttype obj.AddrType, treg int16, toffset int64) *obj.Prog { - q := pp.NewProg() - pp.clearp(q) - q.As = as - q.Pos = p.Pos - q.From.Type = ftype - q.From.Reg = freg - q.From.Offset = foffset - q.To.Type = ttype - q.To.Reg = treg - q.To.Offset = toffset - q.Link = p.Link - p.Link = q - return q -} - -func (pp *Progs) settext(fn *ir.Func) { - if pp.Text != nil { - base.Fatalf("Progs.settext called twice") - } - ptxt := pp.Prog(obj.ATEXT) - pp.Text = ptxt - - fn.LSym.Func().Text = ptxt - ptxt.From.Type = obj.TYPE_MEM - ptxt.From.Name = obj.NAME_EXTERN - ptxt.From.Sym = fn.LSym -} - // makeABIWrapper creates a new function that wraps a cross-ABI call // to "f". The wrapper is marked as an ABIWRAPPER. func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { @@ -426,41 +276,3 @@ func setupTextLSym(f *ir.Func, flag int) { base.Ctxt.InitTextSym(f.LSym, flag) } - -func ggloblnod(nam ir.Node) { - s := nam.Sym().Linksym() - s.Gotype = ngotype(nam).Linksym() - flags := 0 - if nam.Name().Readonly() { - flags = obj.RODATA - } - if nam.Type() != nil && !nam.Type().HasPointers() { - flags |= obj.NOPTR - } - base.Ctxt.Globl(s, nam.Type().Width, flags) - if nam.Name().LibfuzzerExtraCounter() { - s.Type = objabi.SLIBFUZZER_EXTRA_COUNTER - } - if nam.Sym().Linkname != "" { - // Make sure linkname'd symbol is non-package. When a symbol is - // both imported and linkname'd, s.Pkg may not set to "_" in - // types.Sym.Linksym because LSym already exists. Set it here. - s.Pkg = "_" - } -} - -func ggloblsym(s *obj.LSym, width int32, flags int16) { - if flags&obj.LOCAL != 0 { - s.Set(obj.AttrLocal, true) - flags &^= obj.LOCAL - } - base.Ctxt.Globl(s, int64(width), int(flags)) -} - -func Addrconst(a *obj.Addr, v int64) { - a.SetConst(v) -} - -func Patch(p *obj.Prog, to *obj.Prog) { - p.To.SetTarget(to) -} diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go index ed61c11522bec..da3f40f4e889a 100644 --- a/src/cmd/compile/internal/gc/init.go +++ b/src/cmd/compile/internal/gc/init.go @@ -7,6 +7,7 @@ package gc import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" + "cmd/compile/internal/objw" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/obj" @@ -100,17 +101,17 @@ func fninit() *ir.Name { sym.Def = task lsym := sym.Linksym() ot := 0 - ot = duintptr(lsym, ot, 0) // state: not initialized yet - ot = duintptr(lsym, ot, uint64(len(deps))) - ot = duintptr(lsym, ot, uint64(len(fns))) + ot = objw.Uintptr(lsym, ot, 0) // state: not initialized yet + ot = objw.Uintptr(lsym, ot, uint64(len(deps))) + ot = objw.Uintptr(lsym, ot, uint64(len(fns))) for _, d := range deps { - ot = dsymptr(lsym, ot, d, 0) + ot = objw.SymPtr(lsym, ot, d, 0) } for _, f := range fns { - ot = dsymptr(lsym, ot, f, 0) + ot = objw.SymPtr(lsym, ot, f, 0) } // An initTask has pointers, but none into the Go heap. // It's not quite read only, the state field must be modifiable. - ggloblsym(lsym, int32(ot), obj.NOPTR) + objw.Global(lsym, int32(ot), obj.NOPTR) return task } diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 1b4ba50e6bb28..1d0a0f7a04cd9 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -7,6 +7,7 @@ package gc import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" + "cmd/compile/internal/objw" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/bio" @@ -160,7 +161,7 @@ func dumpdata() { if zerosize > 0 { zero := ir.Pkgs.Map.Lookup("zero") - ggloblsym(zero.Linksym(), int32(zerosize), obj.DUPOK|obj.RODATA) + objw.Global(zero.Linksym(), int32(zerosize), obj.DUPOK|obj.RODATA) } addGCLocals() @@ -281,8 +282,8 @@ func dumpfuncsyms() { }) for _, s := range funcsyms { sf := s.Pkg.Lookup(ir.FuncSymName(s)).Linksym() - dsymptr(sf, 0, s.Linksym(), 0) - ggloblsym(sf, int32(types.PtrSize), obj.DUPOK|obj.RODATA) + objw.SymPtr(sf, 0, s.Linksym(), 0) + objw.Global(sf, int32(types.PtrSize), obj.DUPOK|obj.RODATA) } } @@ -298,53 +299,20 @@ func addGCLocals() { } for _, gcsym := range []*obj.LSym{fn.GCArgs, fn.GCLocals} { if gcsym != nil && !gcsym.OnList() { - ggloblsym(gcsym, int32(len(gcsym.P)), obj.RODATA|obj.DUPOK) + objw.Global(gcsym, int32(len(gcsym.P)), obj.RODATA|obj.DUPOK) } } if x := fn.StackObjects; x != nil { attr := int16(obj.RODATA) - ggloblsym(x, int32(len(x.P)), attr) + objw.Global(x, int32(len(x.P)), attr) x.Set(obj.AttrStatic, true) } if x := fn.OpenCodedDeferInfo; x != nil { - ggloblsym(x, int32(len(x.P)), obj.RODATA|obj.DUPOK) + objw.Global(x, int32(len(x.P)), obj.RODATA|obj.DUPOK) } } } -func duintxx(s *obj.LSym, off int, v uint64, wid int) int { - if off&(wid-1) != 0 { - base.Fatalf("duintxxLSym: misaligned: v=%d wid=%d off=%d", v, wid, off) - } - s.WriteInt(base.Ctxt, int64(off), wid, int64(v)) - return off + wid -} - -func duint8(s *obj.LSym, off int, v uint8) int { - return duintxx(s, off, uint64(v), 1) -} - -func duint16(s *obj.LSym, off int, v uint16) int { - return duintxx(s, off, uint64(v), 2) -} - -func duint32(s *obj.LSym, off int, v uint32) int { - return duintxx(s, off, uint64(v), 4) -} - -func duintptr(s *obj.LSym, off int, v uint64) int { - return duintxx(s, off, v, types.PtrSize) -} - -func dbvec(s *obj.LSym, off int, bv bvec) int { - // Runtime reads the bitmaps as byte arrays. Oblige. - for j := 0; int32(j) < bv.n; j += 8 { - word := bv.b[j/32] - off = duint8(s, off, uint8(word>>(uint(j)%32))) - } - return off -} - const ( stringSymPrefix = "go.string." stringSymPattern = ".gostring.%d.%x" @@ -370,7 +338,7 @@ func stringsym(pos src.XPos, s string) (data *obj.LSym) { symdata := base.Ctxt.Lookup(stringSymPrefix + symname) if !symdata.OnList() { off := dstringdata(symdata, 0, s, pos, "string") - ggloblsym(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL) + objw.Global(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL) symdata.Set(obj.AttrContentAddressable, true) } @@ -450,7 +418,7 @@ func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj. info := symdata.NewFileInfo() info.Name = file info.Size = size - ggloblsym(symdata, int32(size), obj.DUPOK|obj.RODATA|obj.LOCAL) + objw.Global(symdata, int32(size), obj.DUPOK|obj.RODATA|obj.LOCAL) // Note: AttrContentAddressable cannot be set here, // because the content-addressable-handling code // does not know about file symbols. @@ -480,7 +448,7 @@ func slicedata(pos src.XPos, s string) *ir.Name { lsym := sym.Linksym() off := dstringdata(lsym, 0, s, pos, "slice") - ggloblsym(lsym, int32(off), obj.NOPTR|obj.LOCAL) + objw.Global(lsym, int32(off), obj.NOPTR|obj.LOCAL) return symnode } @@ -505,25 +473,6 @@ func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int return off + len(t) } -func dsymptr(s *obj.LSym, off int, x *obj.LSym, xoff int) int { - off = int(types.Rnd(int64(off), int64(types.PtrSize))) - s.WriteAddr(base.Ctxt, int64(off), types.PtrSize, x, int64(xoff)) - off += types.PtrSize - return off -} - -func dsymptrOff(s *obj.LSym, off int, x *obj.LSym) int { - s.WriteOff(base.Ctxt, int64(off), x, 0) - off += 4 - return off -} - -func dsymptrWeakOff(s *obj.LSym, off int, x *obj.LSym) int { - s.WriteWeakOff(base.Ctxt, int64(off), x, 0) - off += 4 - return off -} - // slicesym writes a static slice symbol {&arr, lencap, lencap} to n+noff. // slicesym does not modify n. func slicesym(n *ir.Name, noff int64, arr *ir.Name, lencap int64) { @@ -623,3 +572,25 @@ func litsym(n *ir.Name, noff int64, c ir.Node, wid int) { base.Fatalf("litsym unhandled OLITERAL %v", c) } } + +func ggloblnod(nam ir.Node) { + s := nam.Sym().Linksym() + s.Gotype = ngotype(nam).Linksym() + flags := 0 + if nam.Name().Readonly() { + flags = obj.RODATA + } + if nam.Type() != nil && !nam.Type().HasPointers() { + flags |= obj.NOPTR + } + base.Ctxt.Globl(s, nam.Type().Width, flags) + if nam.Name().LibfuzzerExtraCounter() { + s.Type = objabi.SLIBFUZZER_EXTRA_COUNTER + } + if nam.Sym().Linkname != "" { + // Make sure linkname'd symbol is non-package. When a symbol is + // both imported and linkname'd, s.Pkg may not set to "_" in + // types.Sym.Linksym because LSym already exists. Set it here. + s.Pkg = "_" + } +} diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index c0f3326454e6b..40a2195a122ad 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -6,7 +6,9 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/bitvec" "cmd/compile/internal/ir" + "cmd/compile/internal/objw" "cmd/compile/internal/ssa" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" @@ -34,13 +36,13 @@ func emitptrargsmap(fn *ir.Func) { } lsym := base.Ctxt.Lookup(fn.LSym.Name + ".args_stackmap") nptr := int(fn.Type().ArgWidth() / int64(types.PtrSize)) - bv := bvalloc(int32(nptr) * 2) + bv := bitvec.New(int32(nptr) * 2) nbitmap := 1 if fn.Type().NumResults() > 0 { nbitmap = 2 } - off := duint32(lsym, 0, uint32(nbitmap)) - off = duint32(lsym, off, uint32(bv.n)) + off := objw.Uint32(lsym, 0, uint32(nbitmap)) + off = objw.Uint32(lsym, off, uint32(bv.N)) if ir.IsMethod(fn) { onebitwalktype1(fn.Type().Recvs(), 0, bv) @@ -48,14 +50,14 @@ func emitptrargsmap(fn *ir.Func) { if fn.Type().NumParams() > 0 { onebitwalktype1(fn.Type().Params(), 0, bv) } - off = dbvec(lsym, off, bv) + off = objw.BitVec(lsym, off, bv) if fn.Type().NumResults() > 0 { onebitwalktype1(fn.Type().Results(), 0, bv) - off = dbvec(lsym, off, bv) + off = objw.BitVec(lsym, off, bv) } - ggloblsym(lsym, int32(off), obj.RODATA|obj.LOCAL) + objw.Global(lsym, int32(off), obj.RODATA|obj.LOCAL) } // cmpstackvarlt reports whether the stack variable a sorts before b. @@ -314,7 +316,7 @@ func compileSSA(fn *ir.Func, worker int) { largeStackFramesMu.Unlock() return } - pp := newProgs(fn, worker) + pp := objw.NewProgs(fn, worker) defer pp.Free() genssa(f, pp) // Check frame size again. diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go index ac3b4bcd31c0c..260edda9ce7d2 100644 --- a/src/cmd/compile/internal/gc/plive.go +++ b/src/cmd/compile/internal/gc/plive.go @@ -16,7 +16,9 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/bitvec" "cmd/compile/internal/ir" + "cmd/compile/internal/objw" "cmd/compile/internal/ssa" "cmd/compile/internal/types" "cmd/internal/obj" @@ -88,15 +90,15 @@ type BlockEffects struct { // // uevar: upward exposed variables (used before set in block) // varkill: killed variables (set in block) - uevar bvec - varkill bvec + uevar bitvec.BitVec + varkill bitvec.BitVec // Computed during Liveness.solve using control flow information: // // livein: variables live at block entry // liveout: variables live at block exit - livein bvec - liveout bvec + livein bitvec.BitVec + liveout bitvec.BitVec } // A collection of global state used by liveness analysis. @@ -114,84 +116,54 @@ type Liveness struct { allUnsafe bool // unsafePoints bit i is set if Value ID i is an unsafe-point // (preemption is not allowed). Only valid if !allUnsafe. - unsafePoints bvec + unsafePoints bitvec.BitVec // An array with a bit vector for each safe point in the // current Block during Liveness.epilogue. Indexed in Value // order for that block. Additionally, for the entry block // livevars[0] is the entry bitmap. Liveness.compact moves // these to stackMaps. - livevars []bvec + livevars []bitvec.BitVec // livenessMap maps from safe points (i.e., CALLs) to their // liveness map indexes. livenessMap LivenessMap stackMapSet bvecSet - stackMaps []bvec + stackMaps []bitvec.BitVec cache progeffectscache } // LivenessMap maps from *ssa.Value to LivenessIndex. type LivenessMap struct { - vals map[ssa.ID]LivenessIndex + vals map[ssa.ID]objw.LivenessIndex // The set of live, pointer-containing variables at the deferreturn // call (only set when open-coded defers are used). - deferreturn LivenessIndex + deferreturn objw.LivenessIndex } func (m *LivenessMap) reset() { if m.vals == nil { - m.vals = make(map[ssa.ID]LivenessIndex) + m.vals = make(map[ssa.ID]objw.LivenessIndex) } else { for k := range m.vals { delete(m.vals, k) } } - m.deferreturn = LivenessDontCare + m.deferreturn = objw.LivenessDontCare } -func (m *LivenessMap) set(v *ssa.Value, i LivenessIndex) { +func (m *LivenessMap) set(v *ssa.Value, i objw.LivenessIndex) { m.vals[v.ID] = i } -func (m LivenessMap) Get(v *ssa.Value) LivenessIndex { +func (m LivenessMap) Get(v *ssa.Value) objw.LivenessIndex { // If v isn't in the map, then it's a "don't care" and not an // unsafe-point. if idx, ok := m.vals[v.ID]; ok { return idx } - return LivenessIndex{StackMapDontCare, false} -} - -// LivenessIndex stores the liveness map information for a Value. -type LivenessIndex struct { - stackMapIndex int - - // isUnsafePoint indicates that this is an unsafe-point. - // - // Note that it's possible for a call Value to have a stack - // map while also being an unsafe-point. This means it cannot - // be preempted at this instruction, but that a preemption or - // stack growth may happen in the called function. - isUnsafePoint bool -} - -// LivenessDontCare indicates that the liveness information doesn't -// matter. Currently it is used in deferreturn liveness when we don't -// actually need it. It should never be emitted to the PCDATA stream. -var LivenessDontCare = LivenessIndex{StackMapDontCare, true} - -// StackMapDontCare indicates that the stack map index at a Value -// doesn't matter. -// -// This is a sentinel value that should never be emitted to the PCDATA -// stream. We use -1000 because that's obviously never a valid stack -// index (but -1 is). -const StackMapDontCare = -1000 - -func (idx LivenessIndex) StackMapValid() bool { - return idx.stackMapIndex != StackMapDontCare + return objw.LivenessIndex{StackMapIndex: objw.StackMapDontCare, IsUnsafePoint: false} } type progeffectscache struct { @@ -380,7 +352,7 @@ func newliveness(fn *ir.Func, f *ssa.Func, vars []*ir.Name, idx map[*ir.Name]int if cap(lc.be) >= f.NumBlocks() { lv.be = lc.be[:f.NumBlocks()] } - lv.livenessMap = LivenessMap{vals: lc.livenessMap.vals, deferreturn: LivenessDontCare} + lv.livenessMap = LivenessMap{vals: lc.livenessMap.vals, deferreturn: objw.LivenessDontCare} lc.livenessMap.vals = nil } if lv.be == nil { @@ -389,14 +361,14 @@ func newliveness(fn *ir.Func, f *ssa.Func, vars []*ir.Name, idx map[*ir.Name]int nblocks := int32(len(f.Blocks)) nvars := int32(len(vars)) - bulk := bvbulkalloc(nvars, nblocks*7) + bulk := bitvec.NewBulk(nvars, nblocks*7) for _, b := range f.Blocks { be := lv.blockEffects(b) - be.uevar = bulk.next() - be.varkill = bulk.next() - be.livein = bulk.next() - be.liveout = bulk.next() + be.uevar = bulk.Next() + be.varkill = bulk.Next() + be.livein = bulk.Next() + be.liveout = bulk.Next() } lv.livenessMap.reset() @@ -411,7 +383,7 @@ func (lv *Liveness) blockEffects(b *ssa.Block) *BlockEffects { // NOTE: The bitmap for a specific type t could be cached in t after // the first run and then simply copied into bv at the correct offset // on future calls with the same type t. -func onebitwalktype1(t *types.Type, off int64, bv bvec) { +func onebitwalktype1(t *types.Type, off int64, bv bitvec.BitVec) { if t.Align > 0 && off&int64(t.Align-1) != 0 { base.Fatalf("onebitwalktype1: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off) } @@ -487,7 +459,7 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) { // Generates live pointer value maps for arguments and local variables. The // this argument and the in arguments are always assumed live. The vars // argument is a slice of *Nodes. -func (lv *Liveness) pointerMap(liveout bvec, vars []*ir.Name, args, locals bvec) { +func (lv *Liveness) pointerMap(liveout bitvec.BitVec, vars []*ir.Name, args, locals bitvec.BitVec) { for i := int32(0); ; i++ { i = liveout.Next(i) if i < 0 { @@ -527,7 +499,7 @@ func (lv *Liveness) markUnsafePoints() { return } - lv.unsafePoints = bvalloc(int32(lv.f.NumValues())) + lv.unsafePoints = bitvec.New(int32(lv.f.NumValues())) // Mark architecture-specific unsafe points. for _, b := range lv.f.Blocks { @@ -638,11 +610,11 @@ func (lv *Liveness) markUnsafePoints() { // nice to only flood as far as the unsafe.Pointer -> uintptr // conversion, but it's hard to know which argument of an Add // or Sub to follow. - var flooded bvec + var flooded bitvec.BitVec var flood func(b *ssa.Block, vi int) flood = func(b *ssa.Block, vi int) { - if flooded.n == 0 { - flooded = bvalloc(int32(lv.f.NumBlocks())) + if flooded.N == 0 { + flooded = bitvec.New(int32(lv.f.NumBlocks())) } if flooded.Get(int32(b.ID)) { return @@ -725,8 +697,8 @@ func (lv *Liveness) solve() { // These temporary bitvectors exist to avoid successive allocations and // frees within the loop. nvars := int32(len(lv.vars)) - newlivein := bvalloc(nvars) - newliveout := bvalloc(nvars) + newlivein := bitvec.New(nvars) + newliveout := bitvec.New(nvars) // Walk blocks in postorder ordering. This improves convergence. po := lv.f.Postorder() @@ -783,8 +755,8 @@ func (lv *Liveness) solve() { // variables at each safe point locations. func (lv *Liveness) epilogue() { nvars := int32(len(lv.vars)) - liveout := bvalloc(nvars) - livedefer := bvalloc(nvars) // always-live variables + liveout := bitvec.New(nvars) + livedefer := bitvec.New(nvars) // always-live variables // If there is a defer (that could recover), then all output // parameters are live all the time. In addition, any locals @@ -838,7 +810,7 @@ func (lv *Liveness) epilogue() { { // Reserve an entry for function entry. - live := bvalloc(nvars) + live := bitvec.New(nvars) lv.livevars = append(lv.livevars, live) } @@ -852,7 +824,7 @@ func (lv *Liveness) epilogue() { continue } - live := bvalloc(nvars) + live := bitvec.New(nvars) lv.livevars = append(lv.livevars, live) } @@ -910,16 +882,16 @@ func (lv *Liveness) epilogue() { // If we have an open-coded deferreturn call, make a liveness map for it. if lv.fn.OpenCodedDeferDisallowed() { - lv.livenessMap.deferreturn = LivenessDontCare + lv.livenessMap.deferreturn = objw.LivenessDontCare } else { - lv.livenessMap.deferreturn = LivenessIndex{ - stackMapIndex: lv.stackMapSet.add(livedefer), - isUnsafePoint: false, + lv.livenessMap.deferreturn = objw.LivenessIndex{ + StackMapIndex: lv.stackMapSet.add(livedefer), + IsUnsafePoint: false, } } // Done compacting. Throw out the stack map set. - lv.stackMaps = lv.stackMapSet.extractUniqe() + lv.stackMaps = lv.stackMapSet.extractUnique() lv.stackMapSet = bvecSet{} // Useful sanity check: on entry to the function, @@ -958,9 +930,9 @@ func (lv *Liveness) compact(b *ssa.Block) { for _, v := range b.Values { hasStackMap := lv.hasStackMap(v) isUnsafePoint := lv.allUnsafe || lv.unsafePoints.Get(int32(v.ID)) - idx := LivenessIndex{StackMapDontCare, isUnsafePoint} + idx := objw.LivenessIndex{StackMapIndex: objw.StackMapDontCare, IsUnsafePoint: isUnsafePoint} if hasStackMap { - idx.stackMapIndex = lv.stackMapSet.add(lv.livevars[pos]) + idx.StackMapIndex = lv.stackMapSet.add(lv.livevars[pos]) pos++ } if hasStackMap || isUnsafePoint { @@ -972,7 +944,7 @@ func (lv *Liveness) compact(b *ssa.Block) { lv.livevars = lv.livevars[:0] } -func (lv *Liveness) showlive(v *ssa.Value, live bvec) { +func (lv *Liveness) showlive(v *ssa.Value, live bitvec.BitVec) { if base.Flag.Live == 0 || ir.FuncName(lv.fn) == "init" || strings.HasPrefix(ir.FuncName(lv.fn), ".") { return } @@ -1012,7 +984,7 @@ func (lv *Liveness) showlive(v *ssa.Value, live bvec) { base.WarnfAt(pos, s) } -func (lv *Liveness) printbvec(printed bool, name string, live bvec) bool { +func (lv *Liveness) printbvec(printed bool, name string, live bitvec.BitVec) bool { if live.IsEmpty() { return printed } @@ -1128,7 +1100,7 @@ func (lv *Liveness) printDebug() { fmt.Printf("\tlive=") printed = false if pcdata.StackMapValid() { - live := lv.stackMaps[pcdata.stackMapIndex] + live := lv.stackMaps[pcdata.StackMapIndex] for j, n := range lv.vars { if !live.Get(int32(j)) { continue @@ -1143,7 +1115,7 @@ func (lv *Liveness) printDebug() { fmt.Printf("\n") } - if pcdata.isUnsafePoint { + if pcdata.IsUnsafePoint { fmt.Printf("\tunsafe-point\n") } } @@ -1196,13 +1168,13 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) { // Temporary symbols for encoding bitmaps. var argsSymTmp, liveSymTmp obj.LSym - args := bvalloc(int32(maxArgs / int64(types.PtrSize))) - aoff := duint32(&argsSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps - aoff = duint32(&argsSymTmp, aoff, uint32(args.n)) // number of bits in each bitmap + args := bitvec.New(int32(maxArgs / int64(types.PtrSize))) + aoff := objw.Uint32(&argsSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps + aoff = objw.Uint32(&argsSymTmp, aoff, uint32(args.N)) // number of bits in each bitmap - locals := bvalloc(int32(maxLocals / int64(types.PtrSize))) - loff := duint32(&liveSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps - loff = duint32(&liveSymTmp, loff, uint32(locals.n)) // number of bits in each bitmap + locals := bitvec.New(int32(maxLocals / int64(types.PtrSize))) + loff := objw.Uint32(&liveSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps + loff = objw.Uint32(&liveSymTmp, loff, uint32(locals.N)) // number of bits in each bitmap for _, live := range lv.stackMaps { args.Clear() @@ -1210,8 +1182,8 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) { lv.pointerMap(live, lv.vars, args, locals) - aoff = dbvec(&argsSymTmp, aoff, args) - loff = dbvec(&liveSymTmp, loff, locals) + aoff = objw.BitVec(&argsSymTmp, aoff, args) + loff = objw.BitVec(&liveSymTmp, loff, locals) } // Give these LSyms content-addressable names, @@ -1233,7 +1205,7 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) { // pointer variables in the function and emits a runtime data // structure read by the garbage collector. // Returns a map from GC safe points to their corresponding stack map index. -func liveness(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *Progs) LivenessMap { +func liveness(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *objw.Progs) LivenessMap { // Construct the global liveness state. vars, idx := getvariables(curfn) lv := newliveness(curfn, f, vars, idx, stkptrsize) @@ -1247,7 +1219,7 @@ func liveness(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *Progs) Liveness for _, b := range f.Blocks { for _, val := range b.Values { if idx := lv.livenessMap.Get(val); idx.StackMapValid() { - lv.showlive(val, lv.stackMaps[idx.stackMapIndex]) + lv.showlive(val, lv.stackMaps[idx.StackMapIndex]) } } } @@ -1276,13 +1248,13 @@ func liveness(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *Progs) Liveness fninfo.GCArgs, fninfo.GCLocals = lv.emit() p := pp.Prog(obj.AFUNCDATA) - Addrconst(&p.From, objabi.FUNCDATA_ArgsPointerMaps) + p.From.SetConst(objabi.FUNCDATA_ArgsPointerMaps) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = fninfo.GCArgs p = pp.Prog(obj.AFUNCDATA) - Addrconst(&p.From, objabi.FUNCDATA_LocalsPointerMaps) + p.From.SetConst(objabi.FUNCDATA_LocalsPointerMaps) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = fninfo.GCLocals diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 7594884f9f35e..dcb2620f1f03d 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -6,7 +6,9 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/bitvec" "cmd/compile/internal/ir" + "cmd/compile/internal/objw" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/gcprog" @@ -472,14 +474,14 @@ func dimportpath(p *types.Pkg) { s := base.Ctxt.Lookup("type..importpath." + p.Prefix + ".") ot := dnameData(s, 0, str, "", nil, false) - ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) + objw.Global(s, int32(ot), obj.DUPOK|obj.RODATA) s.Set(obj.AttrContentAddressable, true) p.Pathsym = s } func dgopkgpath(s *obj.LSym, ot int, pkg *types.Pkg) int { if pkg == nil { - return duintptr(s, ot, 0) + return objw.Uintptr(s, ot, 0) } if pkg == types.LocalPkg && base.Ctxt.Pkgpath == "" { @@ -489,17 +491,17 @@ func dgopkgpath(s *obj.LSym, ot int, pkg *types.Pkg) int { // Every package that imports this one directly defines the symbol. // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. ns := base.Ctxt.Lookup(`type..importpath."".`) - return dsymptr(s, ot, ns, 0) + return objw.SymPtr(s, ot, ns, 0) } dimportpath(pkg) - return dsymptr(s, ot, pkg.Pathsym, 0) + return objw.SymPtr(s, ot, pkg.Pathsym, 0) } // dgopkgpathOff writes an offset relocation in s at offset ot to the pkg path symbol. func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int { if pkg == nil { - return duint32(s, ot, 0) + return objw.Uint32(s, ot, 0) } if pkg == types.LocalPkg && base.Ctxt.Pkgpath == "" { // If we don't know the full import path of the package being compiled @@ -508,11 +510,11 @@ func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int { // Every package that imports this one directly defines the symbol. // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. ns := base.Ctxt.Lookup(`type..importpath."".`) - return dsymptrOff(s, ot, ns) + return objw.SymPtrOff(s, ot, ns) } dimportpath(pkg) - return dsymptrOff(s, ot, pkg.Pathsym) + return objw.SymPtrOff(s, ot, pkg.Pathsym) } // dnameField dumps a reflect.name for a struct field. @@ -521,7 +523,7 @@ func dnameField(lsym *obj.LSym, ot int, spkg *types.Pkg, ft *types.Field) int { base.Fatalf("package mismatch for %v", ft.Sym) } nsym := dname(ft.Sym.Name, ft.Note, nil, types.IsExported(ft.Sym.Name)) - return dsymptr(lsym, ot, nsym, 0) + return objw.SymPtr(lsym, ot, nsym, 0) } // dnameData writes the contents of a reflect.name into s at offset ot. @@ -600,7 +602,7 @@ func dname(name, tag string, pkg *types.Pkg, exported bool) *obj.LSym { return s } ot := dnameData(s, 0, name, tag, pkg, exported) - ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) + objw.Global(s, int32(ot), obj.DUPOK|obj.RODATA) s.Set(obj.AttrContentAddressable, true) return s } @@ -634,10 +636,10 @@ func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int { base.Fatalf("methods are too far away on %v: %d", t, dataAdd) } - ot = duint16(lsym, ot, uint16(mcount)) - ot = duint16(lsym, ot, uint16(xcount)) - ot = duint32(lsym, ot, uint32(dataAdd)) - ot = duint32(lsym, ot, 0) + ot = objw.Uint16(lsym, ot, uint16(mcount)) + ot = objw.Uint16(lsym, ot, uint16(xcount)) + ot = objw.Uint32(lsym, ot, uint32(dataAdd)) + ot = objw.Uint32(lsym, ot, 0) return ot } @@ -669,7 +671,7 @@ func dextratypeData(lsym *obj.LSym, ot int, t *types.Type) int { } nsym := dname(a.name.Name, "", pkg, exported) - ot = dsymptrOff(lsym, ot, nsym) + ot = objw.SymPtrOff(lsym, ot, nsym) ot = dmethodptrOff(lsym, ot, dtypesym(a.mtype)) ot = dmethodptrOff(lsym, ot, a.isym.Linksym()) ot = dmethodptrOff(lsym, ot, a.tsym.Linksym()) @@ -678,7 +680,7 @@ func dextratypeData(lsym *obj.LSym, ot int, t *types.Type) int { } func dmethodptrOff(s *obj.LSym, ot int, x *obj.LSym) int { - duint32(s, ot, 0) + objw.Uint32(s, ot, 0) r := obj.Addrel(s) r.Off = int32(ot) r.Siz = 4 @@ -768,9 +770,9 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int { // ptrToThis typeOff // } ot := 0 - ot = duintptr(lsym, ot, uint64(t.Width)) - ot = duintptr(lsym, ot, uint64(ptrdata)) - ot = duint32(lsym, ot, types.TypeHash(t)) + ot = objw.Uintptr(lsym, ot, uint64(t.Width)) + ot = objw.Uintptr(lsym, ot, uint64(ptrdata)) + ot = objw.Uint32(lsym, ot, types.TypeHash(t)) var tflag uint8 if uncommonSize(t) != 0 { @@ -802,7 +804,7 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int { } } - ot = duint8(lsym, ot, tflag) + ot = objw.Uint8(lsym, ot, tflag) // runtime (and common sense) expects alignment to be a power of two. i := int(t.Align) @@ -813,8 +815,8 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int { if i&(i-1) != 0 { base.Fatalf("invalid alignment %d for %v", t.Align, t) } - ot = duint8(lsym, ot, t.Align) // align - ot = duint8(lsym, ot, t.Align) // fieldAlign + ot = objw.Uint8(lsym, ot, t.Align) // align + ot = objw.Uint8(lsym, ot, t.Align) // fieldAlign i = kinds[t.Kind()] if types.IsDirectIface(t) { @@ -823,23 +825,23 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int { if useGCProg { i |= objabi.KindGCProg } - ot = duint8(lsym, ot, uint8(i)) // kind + ot = objw.Uint8(lsym, ot, uint8(i)) // kind if eqfunc != nil { - ot = dsymptr(lsym, ot, eqfunc, 0) // equality function + ot = objw.SymPtr(lsym, ot, eqfunc, 0) // equality function } else { - ot = duintptr(lsym, ot, 0) // type we can't do == with + ot = objw.Uintptr(lsym, ot, 0) // type we can't do == with } - ot = dsymptr(lsym, ot, gcsym, 0) // gcdata + ot = objw.SymPtr(lsym, ot, gcsym, 0) // gcdata nsym := dname(p, "", nil, exported) - ot = dsymptrOff(lsym, ot, nsym) // str + ot = objw.SymPtrOff(lsym, ot, nsym) // str // ptrToThis if sptr == nil { - ot = duint32(lsym, ot, 0) + ot = objw.Uint32(lsym, ot, 0) } else if sptrWeak { - ot = dsymptrWeakOff(lsym, ot, sptr) + ot = objw.SymPtrWeakOff(lsym, ot, sptr) } else { - ot = dsymptrOff(lsym, ot, sptr) + ot = objw.SymPtrOff(lsym, ot, sptr) } return ot @@ -1029,24 +1031,24 @@ func dtypesym(t *types.Type) *obj.LSym { t2 := types.NewSlice(t.Elem()) s2 := dtypesym(t2) ot = dcommontype(lsym, t) - ot = dsymptr(lsym, ot, s1, 0) - ot = dsymptr(lsym, ot, s2, 0) - ot = duintptr(lsym, ot, uint64(t.NumElem())) + ot = objw.SymPtr(lsym, ot, s1, 0) + ot = objw.SymPtr(lsym, ot, s2, 0) + ot = objw.Uintptr(lsym, ot, uint64(t.NumElem())) ot = dextratype(lsym, ot, t, 0) case types.TSLICE: // ../../../../runtime/type.go:/sliceType s1 := dtypesym(t.Elem()) ot = dcommontype(lsym, t) - ot = dsymptr(lsym, ot, s1, 0) + ot = objw.SymPtr(lsym, ot, s1, 0) ot = dextratype(lsym, ot, t, 0) case types.TCHAN: // ../../../../runtime/type.go:/chanType s1 := dtypesym(t.Elem()) ot = dcommontype(lsym, t) - ot = dsymptr(lsym, ot, s1, 0) - ot = duintptr(lsym, ot, uint64(t.ChanDir())) + ot = objw.SymPtr(lsym, ot, s1, 0) + ot = objw.Uintptr(lsym, ot, uint64(t.ChanDir())) ot = dextratype(lsym, ot, t, 0) case types.TFUNC: @@ -1068,8 +1070,8 @@ func dtypesym(t *types.Type) *obj.LSym { if isddd { outCount |= 1 << 15 } - ot = duint16(lsym, ot, uint16(inCount)) - ot = duint16(lsym, ot, uint16(outCount)) + ot = objw.Uint16(lsym, ot, uint16(inCount)) + ot = objw.Uint16(lsym, ot, uint16(outCount)) if types.PtrSize == 8 { ot += 4 // align for *rtype } @@ -1079,13 +1081,13 @@ func dtypesym(t *types.Type) *obj.LSym { // Array of rtype pointers follows funcType. for _, t1 := range t.Recvs().Fields().Slice() { - ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0) + ot = objw.SymPtr(lsym, ot, dtypesym(t1.Type), 0) } for _, t1 := range t.Params().Fields().Slice() { - ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0) + ot = objw.SymPtr(lsym, ot, dtypesym(t1.Type), 0) } for _, t1 := range t.Results().Fields().Slice() { - ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0) + ot = objw.SymPtr(lsym, ot, dtypesym(t1.Type), 0) } case types.TINTER: @@ -1104,9 +1106,9 @@ func dtypesym(t *types.Type) *obj.LSym { } ot = dgopkgpath(lsym, ot, tpkg) - ot = dsymptr(lsym, ot, lsym, ot+3*types.PtrSize+uncommonSize(t)) - ot = duintptr(lsym, ot, uint64(n)) - ot = duintptr(lsym, ot, uint64(n)) + ot = objw.SymPtr(lsym, ot, lsym, ot+3*types.PtrSize+uncommonSize(t)) + ot = objw.Uintptr(lsym, ot, uint64(n)) + ot = objw.Uintptr(lsym, ot, uint64(n)) dataAdd := imethodSize() * n ot = dextratype(lsym, ot, t, dataAdd) @@ -1119,8 +1121,8 @@ func dtypesym(t *types.Type) *obj.LSym { } nsym := dname(a.name.Name, "", pkg, exported) - ot = dsymptrOff(lsym, ot, nsym) - ot = dsymptrOff(lsym, ot, dtypesym(a.type_)) + ot = objw.SymPtrOff(lsym, ot, nsym) + ot = objw.SymPtrOff(lsym, ot, dtypesym(a.type_)) } // ../../../../runtime/type.go:/mapType @@ -1131,27 +1133,27 @@ func dtypesym(t *types.Type) *obj.LSym { hasher := genhash(t.Key()) ot = dcommontype(lsym, t) - ot = dsymptr(lsym, ot, s1, 0) - ot = dsymptr(lsym, ot, s2, 0) - ot = dsymptr(lsym, ot, s3, 0) - ot = dsymptr(lsym, ot, hasher, 0) + ot = objw.SymPtr(lsym, ot, s1, 0) + ot = objw.SymPtr(lsym, ot, s2, 0) + ot = objw.SymPtr(lsym, ot, s3, 0) + ot = objw.SymPtr(lsym, ot, hasher, 0) var flags uint32 // Note: flags must match maptype accessors in ../../../../runtime/type.go // and maptype builder in ../../../../reflect/type.go:MapOf. if t.Key().Width > MAXKEYSIZE { - ot = duint8(lsym, ot, uint8(types.PtrSize)) + ot = objw.Uint8(lsym, ot, uint8(types.PtrSize)) flags |= 1 // indirect key } else { - ot = duint8(lsym, ot, uint8(t.Key().Width)) + ot = objw.Uint8(lsym, ot, uint8(t.Key().Width)) } if t.Elem().Width > MAXELEMSIZE { - ot = duint8(lsym, ot, uint8(types.PtrSize)) + ot = objw.Uint8(lsym, ot, uint8(types.PtrSize)) flags |= 2 // indirect value } else { - ot = duint8(lsym, ot, uint8(t.Elem().Width)) + ot = objw.Uint8(lsym, ot, uint8(t.Elem().Width)) } - ot = duint16(lsym, ot, uint16(bmap(t).Width)) + ot = objw.Uint16(lsym, ot, uint16(bmap(t).Width)) if types.IsReflexive(t.Key()) { flags |= 4 // reflexive key } @@ -1161,7 +1163,7 @@ func dtypesym(t *types.Type) *obj.LSym { if hashMightPanic(t.Key()) { flags |= 16 // hash might panic } - ot = duint32(lsym, ot, flags) + ot = objw.Uint32(lsym, ot, flags) ot = dextratype(lsym, ot, t, 0) case types.TPTR: @@ -1177,7 +1179,7 @@ func dtypesym(t *types.Type) *obj.LSym { s1 := dtypesym(t.Elem()) ot = dcommontype(lsym, t) - ot = dsymptr(lsym, ot, s1, 0) + ot = objw.SymPtr(lsym, ot, s1, 0) ot = dextratype(lsym, ot, t, 0) // ../../../../runtime/type.go:/structType @@ -1203,9 +1205,9 @@ func dtypesym(t *types.Type) *obj.LSym { ot = dcommontype(lsym, t) ot = dgopkgpath(lsym, ot, spkg) - ot = dsymptr(lsym, ot, lsym, ot+3*types.PtrSize+uncommonSize(t)) - ot = duintptr(lsym, ot, uint64(len(fields))) - ot = duintptr(lsym, ot, uint64(len(fields))) + ot = objw.SymPtr(lsym, ot, lsym, ot+3*types.PtrSize+uncommonSize(t)) + ot = objw.Uintptr(lsym, ot, uint64(len(fields))) + ot = objw.Uintptr(lsym, ot, uint64(len(fields))) dataAdd := len(fields) * structfieldSize() ot = dextratype(lsym, ot, t, dataAdd) @@ -1213,7 +1215,7 @@ func dtypesym(t *types.Type) *obj.LSym { for _, f := range fields { // ../../../../runtime/type.go:/structField ot = dnameField(lsym, ot, spkg, f) - ot = dsymptr(lsym, ot, dtypesym(f.Type), 0) + ot = objw.SymPtr(lsym, ot, dtypesym(f.Type), 0) offsetAnon := uint64(f.Offset) << 1 if offsetAnon>>1 != uint64(f.Offset) { base.Fatalf("%v: bad field offset for %s", t, f.Sym.Name) @@ -1221,12 +1223,12 @@ func dtypesym(t *types.Type) *obj.LSym { if f.Embedded != 0 { offsetAnon |= 1 } - ot = duintptr(lsym, ot, offsetAnon) + ot = objw.Uintptr(lsym, ot, offsetAnon) } } ot = dextratypeData(lsym, ot, t) - ggloblsym(lsym, int32(ot), int16(dupok|obj.RODATA)) + objw.Global(lsym, int32(ot), int16(dupok|obj.RODATA)) // The linker will leave a table of all the typelinks for // types in the binary, so the runtime can find them. @@ -1396,15 +1398,15 @@ func dumptabs() { // _ [4]byte // fun [1]uintptr // variable sized // } - o := dsymptr(i.lsym, 0, dtypesym(i.itype), 0) - o = dsymptr(i.lsym, o, dtypesym(i.t), 0) - o = duint32(i.lsym, o, types.TypeHash(i.t)) // copy of type hash - o += 4 // skip unused field + o := objw.SymPtr(i.lsym, 0, dtypesym(i.itype), 0) + o = objw.SymPtr(i.lsym, o, dtypesym(i.t), 0) + o = objw.Uint32(i.lsym, o, types.TypeHash(i.t)) // copy of type hash + o += 4 // skip unused field for _, fn := range genfun(i.t, i.itype) { - o = dsymptr(i.lsym, o, fn, 0) // method pointer for each method + o = objw.SymPtr(i.lsym, o, fn, 0) // method pointer for each method } // Nothing writes static itabs, so they are read only. - ggloblsym(i.lsym, int32(o), int16(obj.DUPOK|obj.RODATA)) + objw.Global(i.lsym, int32(o), int16(obj.DUPOK|obj.RODATA)) i.lsym.Set(obj.AttrContentAddressable, true) } @@ -1421,20 +1423,20 @@ func dumptabs() { // } nsym := dname(p.s.Name, "", nil, true) tsym := dtypesym(p.t) - ot = dsymptrOff(s, ot, nsym) - ot = dsymptrOff(s, ot, tsym) + ot = objw.SymPtrOff(s, ot, nsym) + ot = objw.SymPtrOff(s, ot, tsym) // Plugin exports symbols as interfaces. Mark their types // as UsedInIface. tsym.Set(obj.AttrUsedInIface, true) } - ggloblsym(s, int32(ot), int16(obj.RODATA)) + objw.Global(s, int32(ot), int16(obj.RODATA)) ot = 0 s = base.Ctxt.Lookup("go.plugin.exports") for _, p := range ptabs { - ot = dsymptr(s, ot, p.s.Linksym(), 0) + ot = objw.SymPtr(s, ot, p.s.Linksym(), 0) } - ggloblsym(s, int32(ot), int16(obj.RODATA)) + objw.Global(s, int32(ot), int16(obj.RODATA)) } } @@ -1569,9 +1571,9 @@ func dgcptrmask(t *types.Type) *obj.LSym { if !sym.Uniq() { sym.SetUniq(true) for i, x := range ptrmask { - duint8(lsym, i, x) + objw.Uint8(lsym, i, x) } - ggloblsym(lsym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL) + objw.Global(lsym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL) lsym.Set(obj.AttrContentAddressable, true) } return lsym @@ -1588,7 +1590,7 @@ func fillptrmask(t *types.Type, ptrmask []byte) { return } - vec := bvalloc(8 * int32(len(ptrmask))) + vec := bitvec.New(8 * int32(len(ptrmask))) onebitwalktype1(t, 0, vec) nptr := types.PtrDataSize(t) / int64(types.PtrSize) @@ -1637,13 +1639,13 @@ func (p *GCProg) init(lsym *obj.LSym) { } func (p *GCProg) writeByte(x byte) { - p.symoff = duint8(p.lsym, p.symoff, x) + p.symoff = objw.Uint8(p.lsym, p.symoff, x) } func (p *GCProg) end() { p.w.End() - duint32(p.lsym, 0, uint32(p.symoff-4)) - ggloblsym(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL) + objw.Uint32(p.lsym, 0, uint32(p.symoff-4)) + objw.Global(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL) if base.Debug.GCProg > 0 { fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.lsym) } diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 382e4d4320026..44e199abbf268 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -18,6 +18,7 @@ import ( "bytes" "cmd/compile/internal/base" "cmd/compile/internal/ir" + "cmd/compile/internal/objw" "cmd/compile/internal/ssa" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" @@ -228,22 +229,22 @@ func dvarint(x *obj.LSym, off int, v int64) int { panic(fmt.Sprintf("dvarint: bad offset for funcdata - %v", v)) } if v < 1<<7 { - return duint8(x, off, uint8(v)) + return objw.Uint8(x, off, uint8(v)) } - off = duint8(x, off, uint8((v&127)|128)) + off = objw.Uint8(x, off, uint8((v&127)|128)) if v < 1<<14 { - return duint8(x, off, uint8(v>>7)) + return objw.Uint8(x, off, uint8(v>>7)) } - off = duint8(x, off, uint8(((v>>7)&127)|128)) + off = objw.Uint8(x, off, uint8(((v>>7)&127)|128)) if v < 1<<21 { - return duint8(x, off, uint8(v>>14)) + return objw.Uint8(x, off, uint8(v>>14)) } - off = duint8(x, off, uint8(((v>>14)&127)|128)) + off = objw.Uint8(x, off, uint8(((v>>14)&127)|128)) if v < 1<<28 { - return duint8(x, off, uint8(v>>21)) + return objw.Uint8(x, off, uint8(v>>21)) } - off = duint8(x, off, uint8(((v>>21)&127)|128)) - return duint8(x, off, uint8(v>>28)) + off = objw.Uint8(x, off, uint8(((v>>21)&127)|128)) + return objw.Uint8(x, off, uint8(v>>28)) } // emitOpenDeferInfo emits FUNCDATA information about the defers in a function @@ -6281,7 +6282,7 @@ func (s *state) addNamedValue(n *ir.Name, v *ssa.Value) { } // Generate a disconnected call to a runtime routine and a return. -func gencallret(pp *Progs, sym *obj.LSym) *obj.Prog { +func gencallret(pp *objw.Progs, sym *obj.LSym) *obj.Prog { p := pp.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN @@ -6298,7 +6299,7 @@ type Branch struct { // SSAGenState contains state needed during Prog generation. type SSAGenState struct { - pp *Progs + pp *objw.Progs // Branches remembers all the branch instructions we've seen // and where they would like to go. @@ -6344,12 +6345,12 @@ func (s *SSAGenState) Prog(as obj.As) *obj.Prog { // Pc returns the current Prog. func (s *SSAGenState) Pc() *obj.Prog { - return s.pp.next + return s.pp.Next } // SetPos sets the current source position. func (s *SSAGenState) SetPos(pos src.XPos) { - s.pp.pos = pos + s.pp.Pos = pos } // Br emits a single branch instruction and returns the instruction. @@ -6385,7 +6386,7 @@ func (s *SSAGenState) DebugFriendlySetPosFrom(v *ssa.Value) { } s.SetPos(p) } else { - s.SetPos(s.pp.pos.WithNotStmt()) + s.SetPos(s.pp.Pos.WithNotStmt()) } } } @@ -6397,7 +6398,7 @@ func (s byXoffset) Len() int { return len(s) } func (s byXoffset) Less(i, j int) bool { return s[i].FrameOffset() < s[j].FrameOffset() } func (s byXoffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func emitStackObjects(e *ssafn, pp *Progs) { +func emitStackObjects(e *ssafn, pp *objw.Progs) { var vars []*ir.Name for _, n := range e.curfn.Dcl { if livenessShouldTrack(n) && n.Addrtaken() { @@ -6415,21 +6416,21 @@ func emitStackObjects(e *ssafn, pp *Progs) { // Format must match runtime/stack.go:stackObjectRecord. x := e.curfn.LSym.Func().StackObjects off := 0 - off = duintptr(x, off, uint64(len(vars))) + off = objw.Uintptr(x, off, uint64(len(vars))) for _, v := range vars { // Note: arguments and return values have non-negative Xoffset, // in which case the offset is relative to argp. // Locals have a negative Xoffset, in which case the offset is relative to varp. - off = duintptr(x, off, uint64(v.FrameOffset())) + off = objw.Uintptr(x, off, uint64(v.FrameOffset())) if !types.TypeSym(v.Type()).Siggen() { e.Fatalf(v.Pos(), "stack object's type symbol not generated for type %s", v.Type()) } - off = dsymptr(x, off, dtypesym(v.Type()), 0) + off = objw.SymPtr(x, off, dtypesym(v.Type()), 0) } // Emit a funcdata pointing at the stack object data. p := pp.Prog(obj.AFUNCDATA) - Addrconst(&p.From, objabi.FUNCDATA_StackObjects) + p.From.SetConst(objabi.FUNCDATA_StackObjects) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = x @@ -6442,7 +6443,7 @@ func emitStackObjects(e *ssafn, pp *Progs) { } // genssa appends entries to pp for each instruction in f. -func genssa(f *ssa.Func, pp *Progs) { +func genssa(f *ssa.Func, pp *objw.Progs) { var s SSAGenState e := f.Frontend().(*ssafn) @@ -6455,7 +6456,7 @@ func genssa(f *ssa.Func, pp *Progs) { // This function uses open-coded defers -- write out the funcdata // info that we computed at the end of genssa. p := pp.Prog(obj.AFUNCDATA) - Addrconst(&p.From, objabi.FUNCDATA_OpenCodedDeferInfo) + p.From.SetConst(objabi.FUNCDATA_OpenCodedDeferInfo) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = openDeferInfo @@ -6471,7 +6472,7 @@ func genssa(f *ssa.Func, pp *Progs) { progToValue = make(map[*obj.Prog]*ssa.Value, f.NumValues()) progToBlock = make(map[*obj.Prog]*ssa.Block, f.NumBlocks()) f.Logf("genssa %s\n", f.Name) - progToBlock[s.pp.next] = f.Blocks[0] + progToBlock[s.pp.Next] = f.Blocks[0] } s.ScratchFpMem = e.scratchFpMem @@ -6509,7 +6510,7 @@ func genssa(f *ssa.Func, pp *Progs) { // Emit basic blocks for i, b := range f.Blocks { - s.bstart[b.ID] = s.pp.next + s.bstart[b.ID] = s.pp.Next s.lineRunStart = nil // Attach a "default" liveness info. Normally this will be @@ -6518,12 +6519,12 @@ func genssa(f *ssa.Func, pp *Progs) { // instruction. We won't use the actual liveness map on a // control instruction. Just mark it something that is // preemptible, unless this function is "all unsafe". - s.pp.nextLive = LivenessIndex{-1, allUnsafe(f)} + s.pp.NextLive = objw.LivenessIndex{StackMapIndex: -1, IsUnsafePoint: allUnsafe(f)} // Emit values in block thearch.SSAMarkMoves(&s, b) for _, v := range b.Values { - x := s.pp.next + x := s.pp.Next s.DebugFriendlySetPosFrom(v) switch v.Op { @@ -6561,7 +6562,7 @@ func genssa(f *ssa.Func, pp *Progs) { default: // Attach this safe point to the next // instruction. - s.pp.nextLive = s.livenessMap.Get(v) + s.pp.NextLive = s.livenessMap.Get(v) // Special case for first line in function; move it to the start. if firstPos != src.NoXPos { @@ -6573,17 +6574,17 @@ func genssa(f *ssa.Func, pp *Progs) { } if base.Ctxt.Flag_locationlists { - valueToProgAfter[v.ID] = s.pp.next + valueToProgAfter[v.ID] = s.pp.Next } if f.PrintOrHtmlSSA { - for ; x != s.pp.next; x = x.Link { + for ; x != s.pp.Next; x = x.Link { progToValue[x] = v } } } // If this is an empty infinite loop, stick a hardware NOP in there so that debuggers are less confused. - if s.bstart[b.ID] == s.pp.next && len(b.Succs) == 1 && b.Succs[0].Block() == b { + if s.bstart[b.ID] == s.pp.Next && len(b.Succs) == 1 && b.Succs[0].Block() == b { p := thearch.Ginsnop(s.pp) p.Pos = p.Pos.WithIsStmt() if b.Pos == src.NoXPos { @@ -6603,11 +6604,11 @@ func genssa(f *ssa.Func, pp *Progs) { // line numbers for otherwise empty blocks. next = f.Blocks[i+1] } - x := s.pp.next + x := s.pp.Next s.SetPos(b.Pos) thearch.SSAGenBlock(&s, b, next) if f.PrintOrHtmlSSA { - for ; x != s.pp.next; x = x.Link { + for ; x != s.pp.Next; x = x.Link { progToBlock[x] = b } } @@ -6623,7 +6624,7 @@ func genssa(f *ssa.Func, pp *Progs) { // When doing open-coded defers, generate a disconnected call to // deferreturn and a return. This will be used to during panic // recovery to unwind the stack and return back to the runtime. - s.pp.nextLive = s.livenessMap.deferreturn + s.pp.NextLive = s.livenessMap.deferreturn gencallret(pp, ir.Syms.Deferreturn) } @@ -6655,7 +6656,7 @@ func genssa(f *ssa.Func, pp *Progs) { // some of the inline marks. // Use this instruction instead. p.Pos = p.Pos.WithIsStmt() // promote position to a statement - pp.curfn.LSym.Func().AddInlMark(p, inlMarks[m]) + pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[m]) // Make the inline mark a real nop, so it doesn't generate any code. m.As = obj.ANOP m.Pos = src.NoXPos @@ -6667,7 +6668,7 @@ func genssa(f *ssa.Func, pp *Progs) { // Any unmatched inline marks now need to be added to the inlining tree (and will generate a nop instruction). for _, p := range inlMarkList { if p.As != obj.ANOP { - pp.curfn.LSym.Func().AddInlMark(p, inlMarks[p]) + pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[p]) } } } @@ -7048,7 +7049,7 @@ func (s *SSAGenState) AddrScratch(a *obj.Addr) { // Call returns a new CALL instruction for the SSA value v. // It uses PrepareCall to prepare the call. func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog { - pPosIsStmt := s.pp.pos.IsStmt() // The statement-ness fo the call comes from ssaGenState + pPosIsStmt := s.pp.Pos.IsStmt() // The statement-ness fo the call comes from ssaGenState s.PrepareCall(v) p := s.Prog(obj.ACALL) @@ -7106,7 +7107,7 @@ func (s *SSAGenState) PrepareCall(v *ssa.Value) { // Record call graph information for nowritebarrierrec // analysis. if nowritebarrierrecCheck != nil { - nowritebarrierrecCheck.recordCall(s.pp.curfn, call.Fn, v.Pos) + nowritebarrierrecCheck.recordCall(s.pp.CurFunc, call.Fn, v.Pos) } } diff --git a/src/cmd/compile/internal/mips/ggen.go b/src/cmd/compile/internal/mips/ggen.go index 9cce68821b973..1a5125207dd0e 100644 --- a/src/cmd/compile/internal/mips/ggen.go +++ b/src/cmd/compile/internal/mips/ggen.go @@ -6,21 +6,21 @@ package mips import ( "cmd/compile/internal/base" - "cmd/compile/internal/gc" + "cmd/compile/internal/objw" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/mips" ) // TODO(mips): implement DUFFZERO -func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { +func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { if cnt == 0 { return p } if cnt < int64(4*types.PtrSize) { for i := int64(0); i < cnt; i += int64(types.PtrSize) { - p = pp.Appendpp(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, base.Ctxt.FixedFrameSize()+off+i) + p = pp.Append(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, base.Ctxt.FixedFrameSize()+off+i) } } else { //fmt.Printf("zerorange frame:%v, lo: %v, hi:%v \n", frame ,lo, hi) @@ -30,22 +30,22 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { // MOVW R0, (Widthptr)r1 // ADD $Widthptr, r1 // BNE r1, r2, loop - p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-4, obj.TYPE_REG, mips.REGRT1, 0) + p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-4, obj.TYPE_REG, mips.REGRT1, 0) p.Reg = mips.REGSP - p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0) + p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0) p.Reg = mips.REGRT1 - p = pp.Appendpp(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(types.PtrSize)) + p = pp.Append(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(types.PtrSize)) p1 := p - p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, mips.REGRT1, 0) - p = pp.Appendpp(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0) + p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, mips.REGRT1, 0) + p = pp.Append(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0) p.Reg = mips.REGRT2 - gc.Patch(p, p1) + p.To.SetTarget(p1) } return p } -func ginsnop(pp *gc.Progs) *obj.Prog { +func ginsnop(pp *objw.Progs) *obj.Prog { p := pp.Prog(mips.ANOR) p.From.Type = obj.TYPE_REG p.From.Reg = mips.REG_R0 diff --git a/src/cmd/compile/internal/mips/ssa.go b/src/cmd/compile/internal/mips/ssa.go index 10453c27d5f83..e46d87e17d24a 100644 --- a/src/cmd/compile/internal/mips/ssa.go +++ b/src/cmd/compile/internal/mips/ssa.go @@ -427,7 +427,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p4.From.Reg = v.Args[1].Reg() p4.Reg = mips.REG_R1 p4.To.Type = obj.TYPE_BRANCH - gc.Patch(p4, p2) + p4.To.SetTarget(p2) case ssa.OpMIPSLoweredMove: // SUBU $4, R1 // MOVW 4(R1), Rtmp @@ -480,7 +480,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p6.From.Reg = v.Args[2].Reg() p6.Reg = mips.REG_R1 p6.To.Type = obj.TYPE_BRANCH - gc.Patch(p6, p2) + p6.To.SetTarget(p2) case ssa.OpMIPSCALLstatic, ssa.OpMIPSCALLclosure, ssa.OpMIPSCALLinter: s.Call(v) case ssa.OpMIPSLoweredWB: @@ -577,7 +577,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p3.From.Type = obj.TYPE_REG p3.From.Reg = mips.REGTMP p3.To.Type = obj.TYPE_BRANCH - gc.Patch(p3, p) + p3.To.SetTarget(p) s.Prog(mips.ASYNC) case ssa.OpMIPSLoweredAtomicAdd: @@ -613,7 +613,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p3.From.Type = obj.TYPE_REG p3.From.Reg = mips.REGTMP p3.To.Type = obj.TYPE_BRANCH - gc.Patch(p3, p) + p3.To.SetTarget(p) s.Prog(mips.ASYNC) @@ -657,7 +657,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p3.From.Type = obj.TYPE_REG p3.From.Reg = mips.REGTMP p3.To.Type = obj.TYPE_BRANCH - gc.Patch(p3, p) + p3.To.SetTarget(p) s.Prog(mips.ASYNC) @@ -701,7 +701,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p3.From.Type = obj.TYPE_REG p3.From.Reg = mips.REGTMP p3.To.Type = obj.TYPE_BRANCH - gc.Patch(p3, p) + p3.To.SetTarget(p) s.Prog(mips.ASYNC) @@ -750,12 +750,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p5.From.Type = obj.TYPE_REG p5.From.Reg = v.Reg0() p5.To.Type = obj.TYPE_BRANCH - gc.Patch(p5, p1) + p5.To.SetTarget(p1) s.Prog(mips.ASYNC) p6 := s.Prog(obj.ANOP) - gc.Patch(p2, p6) + p2.To.SetTarget(p6) case ssa.OpMIPSLoweredNilCheck: // Issue a load which will fault if arg is nil. diff --git a/src/cmd/compile/internal/mips64/ggen.go b/src/cmd/compile/internal/mips64/ggen.go index dc5f95960d1ec..37bb871958bf4 100644 --- a/src/cmd/compile/internal/mips64/ggen.go +++ b/src/cmd/compile/internal/mips64/ggen.go @@ -5,25 +5,25 @@ package mips64 import ( - "cmd/compile/internal/gc" "cmd/compile/internal/ir" + "cmd/compile/internal/objw" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/mips" ) -func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { +func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { if cnt == 0 { return p } if cnt < int64(4*types.PtrSize) { for i := int64(0); i < cnt; i += int64(types.PtrSize) { - p = pp.Appendpp(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, 8+off+i) + p = pp.Append(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, 8+off+i) } } else if cnt <= int64(128*types.PtrSize) { - p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0) + p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0) p.Reg = mips.REGSP - p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) + p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) p.To.Name = obj.NAME_EXTERN p.To.Sym = ir.Syms.Duffzero p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize)) @@ -34,22 +34,22 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { // MOVV R0, (Widthptr)r1 // ADDV $Widthptr, r1 // BNE r1, r2, loop - p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0) + p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0) p.Reg = mips.REGSP - p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0) + p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0) p.Reg = mips.REGRT1 - p = pp.Appendpp(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(types.PtrSize)) + p = pp.Append(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(types.PtrSize)) p1 := p - p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, mips.REGRT1, 0) - p = pp.Appendpp(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0) + p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, mips.REGRT1, 0) + p = pp.Append(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0) p.Reg = mips.REGRT2 - gc.Patch(p, p1) + p.To.SetTarget(p1) } return p } -func ginsnop(pp *gc.Progs) *obj.Prog { +func ginsnop(pp *objw.Progs) *obj.Prog { p := pp.Prog(mips.ANOR) p.From.Type = obj.TYPE_REG p.From.Reg = mips.REG_R0 diff --git a/src/cmd/compile/internal/mips64/ssa.go b/src/cmd/compile/internal/mips64/ssa.go index 0da5eebe8d17a..096e7048ce254 100644 --- a/src/cmd/compile/internal/mips64/ssa.go +++ b/src/cmd/compile/internal/mips64/ssa.go @@ -428,7 +428,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p4.From.Reg = v.Args[1].Reg() p4.Reg = mips.REG_R1 p4.To.Type = obj.TYPE_BRANCH - gc.Patch(p4, p2) + p4.To.SetTarget(p2) case ssa.OpMIPS64DUFFCOPY: p := s.Prog(obj.ADUFFCOPY) p.To.Type = obj.TYPE_MEM @@ -490,7 +490,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p6.From.Reg = v.Args[2].Reg() p6.Reg = mips.REG_R1 p6.To.Type = obj.TYPE_BRANCH - gc.Patch(p6, p2) + p6.To.SetTarget(p2) case ssa.OpMIPS64CALLstatic, ssa.OpMIPS64CALLclosure, ssa.OpMIPS64CALLinter: s.Call(v) case ssa.OpMIPS64LoweredWB: @@ -579,7 +579,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p3.From.Type = obj.TYPE_REG p3.From.Reg = mips.REGTMP p3.To.Type = obj.TYPE_BRANCH - gc.Patch(p3, p) + p3.To.SetTarget(p) s.Prog(mips.ASYNC) case ssa.OpMIPS64LoweredAtomicAdd32, ssa.OpMIPS64LoweredAtomicAdd64: // SYNC @@ -616,7 +616,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p3.From.Type = obj.TYPE_REG p3.From.Reg = mips.REGTMP p3.To.Type = obj.TYPE_BRANCH - gc.Patch(p3, p) + p3.To.SetTarget(p) s.Prog(mips.ASYNC) p4 := s.Prog(mips.AADDVU) p4.From.Type = obj.TYPE_REG @@ -659,7 +659,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p3.From.Type = obj.TYPE_REG p3.From.Reg = mips.REGTMP p3.To.Type = obj.TYPE_BRANCH - gc.Patch(p3, p) + p3.To.SetTarget(p) s.Prog(mips.ASYNC) p4 := s.Prog(mips.AADDVU) p4.From.Type = obj.TYPE_CONST @@ -712,9 +712,9 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p5.From.Type = obj.TYPE_REG p5.From.Reg = v.Reg0() p5.To.Type = obj.TYPE_BRANCH - gc.Patch(p5, p1) + p5.To.SetTarget(p1) p6 := s.Prog(mips.ASYNC) - gc.Patch(p2, p6) + p2.To.SetTarget(p6) case ssa.OpMIPS64LoweredNilCheck: // Issue a load which will fault if arg is nil. p := s.Prog(mips.AMOVB) @@ -751,7 +751,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p3.To.Type = obj.TYPE_REG p3.To.Reg = v.Reg() p4 := s.Prog(obj.ANOP) // not a machine instruction, for branch to land - gc.Patch(p2, p4) + p2.To.SetTarget(p4) case ssa.OpMIPS64LoweredGetClosurePtr: // Closure pointer is R22 (mips.REGCTXT). gc.CheckLoweredGetClosurePtr(v) diff --git a/src/cmd/compile/internal/objw/objw.go b/src/cmd/compile/internal/objw/objw.go new file mode 100644 index 0000000000000..dfbcf5155655c --- /dev/null +++ b/src/cmd/compile/internal/objw/objw.go @@ -0,0 +1,72 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package objw + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/bitvec" + "cmd/compile/internal/types" + "cmd/internal/obj" +) + +func Uint8(s *obj.LSym, off int, v uint8) int { + return UintN(s, off, uint64(v), 1) +} + +func Uint16(s *obj.LSym, off int, v uint16) int { + return UintN(s, off, uint64(v), 2) +} + +func Uint32(s *obj.LSym, off int, v uint32) int { + return UintN(s, off, uint64(v), 4) +} + +func Uintptr(s *obj.LSym, off int, v uint64) int { + return UintN(s, off, v, types.PtrSize) +} + +func UintN(s *obj.LSym, off int, v uint64, wid int) int { + if off&(wid-1) != 0 { + base.Fatalf("duintxxLSym: misaligned: v=%d wid=%d off=%d", v, wid, off) + } + s.WriteInt(base.Ctxt, int64(off), wid, int64(v)) + return off + wid +} + +func SymPtr(s *obj.LSym, off int, x *obj.LSym, xoff int) int { + off = int(types.Rnd(int64(off), int64(types.PtrSize))) + s.WriteAddr(base.Ctxt, int64(off), types.PtrSize, x, int64(xoff)) + off += types.PtrSize + return off +} + +func SymPtrOff(s *obj.LSym, off int, x *obj.LSym) int { + s.WriteOff(base.Ctxt, int64(off), x, 0) + off += 4 + return off +} + +func SymPtrWeakOff(s *obj.LSym, off int, x *obj.LSym) int { + s.WriteWeakOff(base.Ctxt, int64(off), x, 0) + off += 4 + return off +} + +func Global(s *obj.LSym, width int32, flags int16) { + if flags&obj.LOCAL != 0 { + s.Set(obj.AttrLocal, true) + flags &^= obj.LOCAL + } + base.Ctxt.Globl(s, int64(width), int(flags)) +} + +func BitVec(s *obj.LSym, off int, bv bitvec.BitVec) int { + // Runtime reads the bitmaps as byte arrays. Oblige. + for j := 0; int32(j) < bv.N; j += 8 { + word := bv.B[j/32] + off = Uint8(s, off, uint8(word>>(uint(j)%32))) + } + return off +} diff --git a/src/cmd/compile/internal/objw/prog.go b/src/cmd/compile/internal/objw/prog.go new file mode 100644 index 0000000000000..54028e47fd23c --- /dev/null +++ b/src/cmd/compile/internal/objw/prog.go @@ -0,0 +1,218 @@ +// Derived from Inferno utils/6c/txt.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6c/txt.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package objw + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/ssa" + "cmd/internal/obj" + "cmd/internal/objabi" + "cmd/internal/src" +) + +var sharedProgArray = new([10000]obj.Prog) // *T instead of T to work around issue 19839 + +// NewProgs returns a new Progs for fn. +// worker indicates which of the backend workers will use the Progs. +func NewProgs(fn *ir.Func, worker int) *Progs { + pp := new(Progs) + if base.Ctxt.CanReuseProgs() { + sz := len(sharedProgArray) / base.Flag.LowerC + pp.Cache = sharedProgArray[sz*worker : sz*(worker+1)] + } + pp.CurFunc = fn + + // prime the pump + pp.Next = pp.NewProg() + pp.Clear(pp.Next) + + pp.Pos = fn.Pos() + pp.SetText(fn) + // PCDATA tables implicitly start with index -1. + pp.PrevLive = LivenessIndex{-1, false} + pp.NextLive = pp.PrevLive + return pp +} + +// Progs accumulates Progs for a function and converts them into machine code. +type Progs struct { + Text *obj.Prog // ATEXT Prog for this function + Next *obj.Prog // next Prog + PC int64 // virtual PC; count of Progs + Pos src.XPos // position to use for new Progs + CurFunc *ir.Func // fn these Progs are for + Cache []obj.Prog // local progcache + CacheIndex int // first free element of progcache + + NextLive LivenessIndex // liveness index for the next Prog + PrevLive LivenessIndex // last emitted liveness index +} + +// LivenessIndex stores the liveness map information for a Value. +type LivenessIndex struct { + StackMapIndex int + + // IsUnsafePoint indicates that this is an unsafe-point. + // + // Note that it's possible for a call Value to have a stack + // map while also being an unsafe-point. This means it cannot + // be preempted at this instruction, but that a preemption or + // stack growth may happen in the called function. + IsUnsafePoint bool +} + +// StackMapDontCare indicates that the stack map index at a Value +// doesn't matter. +// +// This is a sentinel value that should never be emitted to the PCDATA +// stream. We use -1000 because that's obviously never a valid stack +// index (but -1 is). +const StackMapDontCare = -1000 + +// LivenessDontCare indicates that the liveness information doesn't +// matter. Currently it is used in deferreturn liveness when we don't +// actually need it. It should never be emitted to the PCDATA stream. +var LivenessDontCare = LivenessIndex{StackMapDontCare, true} + +func (idx LivenessIndex) StackMapValid() bool { + return idx.StackMapIndex != StackMapDontCare +} + +func (pp *Progs) NewProg() *obj.Prog { + var p *obj.Prog + if pp.CacheIndex < len(pp.Cache) { + p = &pp.Cache[pp.CacheIndex] + pp.CacheIndex++ + } else { + p = new(obj.Prog) + } + p.Ctxt = base.Ctxt + return p +} + +// Flush converts from pp to machine code. +func (pp *Progs) Flush() { + plist := &obj.Plist{Firstpc: pp.Text, Curfn: pp.CurFunc} + obj.Flushplist(base.Ctxt, plist, pp.NewProg, base.Ctxt.Pkgpath) +} + +// Free clears pp and any associated resources. +func (pp *Progs) Free() { + if base.Ctxt.CanReuseProgs() { + // Clear progs to enable GC and avoid abuse. + s := pp.Cache[:pp.CacheIndex] + for i := range s { + s[i] = obj.Prog{} + } + } + // Clear pp to avoid abuse. + *pp = Progs{} +} + +// Prog adds a Prog with instruction As to pp. +func (pp *Progs) Prog(as obj.As) *obj.Prog { + if pp.NextLive.StackMapValid() && pp.NextLive.StackMapIndex != pp.PrevLive.StackMapIndex { + // Emit stack map index change. + idx := pp.NextLive.StackMapIndex + pp.PrevLive.StackMapIndex = idx + p := pp.Prog(obj.APCDATA) + p.From.SetConst(objabi.PCDATA_StackMapIndex) + p.To.SetConst(int64(idx)) + } + if pp.NextLive.IsUnsafePoint != pp.PrevLive.IsUnsafePoint { + // Emit unsafe-point marker. + pp.PrevLive.IsUnsafePoint = pp.NextLive.IsUnsafePoint + p := pp.Prog(obj.APCDATA) + p.From.SetConst(objabi.PCDATA_UnsafePoint) + if pp.NextLive.IsUnsafePoint { + p.To.SetConst(objabi.PCDATA_UnsafePointUnsafe) + } else { + p.To.SetConst(objabi.PCDATA_UnsafePointSafe) + } + } + + p := pp.Next + pp.Next = pp.NewProg() + pp.Clear(pp.Next) + p.Link = pp.Next + + if !pp.Pos.IsKnown() && base.Flag.K != 0 { + base.Warn("prog: unknown position (line 0)") + } + + p.As = as + p.Pos = pp.Pos + if pp.Pos.IsStmt() == src.PosIsStmt { + // Clear IsStmt for later Progs at this pos provided that as can be marked as a stmt + if ssa.LosesStmtMark(as) { + return p + } + pp.Pos = pp.Pos.WithNotStmt() + } + return p +} + +func (pp *Progs) Clear(p *obj.Prog) { + obj.Nopout(p) + p.As = obj.AEND + p.Pc = pp.PC + pp.PC++ +} + +func (pp *Progs) Append(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16, foffset int64, ttype obj.AddrType, treg int16, toffset int64) *obj.Prog { + q := pp.NewProg() + pp.Clear(q) + q.As = as + q.Pos = p.Pos + q.From.Type = ftype + q.From.Reg = freg + q.From.Offset = foffset + q.To.Type = ttype + q.To.Reg = treg + q.To.Offset = toffset + q.Link = p.Link + p.Link = q + return q +} + +func (pp *Progs) SetText(fn *ir.Func) { + if pp.Text != nil { + base.Fatalf("Progs.settext called twice") + } + ptxt := pp.Prog(obj.ATEXT) + pp.Text = ptxt + + fn.LSym.Func().Text = ptxt + ptxt.From.Type = obj.TYPE_MEM + ptxt.From.Name = obj.NAME_EXTERN + ptxt.From.Sym = fn.LSym +} diff --git a/src/cmd/compile/internal/ppc64/ggen.go b/src/cmd/compile/internal/ppc64/ggen.go index 9e5723186329d..c76962cfb8113 100644 --- a/src/cmd/compile/internal/ppc64/ggen.go +++ b/src/cmd/compile/internal/ppc64/ggen.go @@ -6,46 +6,46 @@ package ppc64 import ( "cmd/compile/internal/base" - "cmd/compile/internal/gc" "cmd/compile/internal/ir" + "cmd/compile/internal/objw" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/ppc64" ) -func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { +func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { if cnt == 0 { return p } if cnt < int64(4*types.PtrSize) { for i := int64(0); i < cnt; i += int64(types.PtrSize) { - p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, base.Ctxt.FixedFrameSize()+off+i) + p = pp.Append(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, base.Ctxt.FixedFrameSize()+off+i) } } else if cnt <= int64(128*types.PtrSize) { - p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGRT1, 0) + p = pp.Append(p, ppc64.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGRT1, 0) p.Reg = ppc64.REGSP - p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) + p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) p.To.Name = obj.NAME_EXTERN p.To.Sym = ir.Syms.Duffzero p.To.Offset = 4 * (128 - cnt/int64(types.PtrSize)) } else { - p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGTMP, 0) - p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0) + p = pp.Append(p, ppc64.AMOVD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGTMP, 0) + p = pp.Append(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0) p.Reg = ppc64.REGSP - p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0) - p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0) + p = pp.Append(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0) + p = pp.Append(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0) p.Reg = ppc64.REGRT1 - p = pp.Appendpp(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(types.PtrSize)) + p = pp.Append(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(types.PtrSize)) p1 := p - p = pp.Appendpp(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0) - p = pp.Appendpp(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0) - gc.Patch(p, p1) + p = pp.Append(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0) + p = pp.Append(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0) + p.To.SetTarget(p1) } return p } -func ginsnop(pp *gc.Progs) *obj.Prog { +func ginsnop(pp *objw.Progs) *obj.Prog { p := pp.Prog(ppc64.AOR) p.From.Type = obj.TYPE_REG p.From.Reg = ppc64.REG_R0 @@ -54,7 +54,7 @@ func ginsnop(pp *gc.Progs) *obj.Prog { return p } -func ginsnopdefer(pp *gc.Progs) *obj.Prog { +func ginsnopdefer(pp *objw.Progs) *obj.Prog { // On PPC64 two nops are required in the defer case. // // (see gc/cgen.go, gc/plive.go -- copy of comment below) diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go index 32e9be8417523..edcaad03ecc83 100644 --- a/src/cmd/compile/internal/ppc64/ssa.go +++ b/src/cmd/compile/internal/ppc64/ssa.go @@ -210,7 +210,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { // BNE retry p3 := s.Prog(ppc64.ABNE) p3.To.Type = obj.TYPE_BRANCH - gc.Patch(p3, p) + p3.To.SetTarget(p) case ssa.OpPPC64LoweredAtomicAdd32, ssa.OpPPC64LoweredAtomicAdd64: @@ -254,7 +254,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { // BNE retry p4 := s.Prog(ppc64.ABNE) p4.To.Type = obj.TYPE_BRANCH - gc.Patch(p4, p) + p4.To.SetTarget(p) // Ensure a 32 bit result if v.Op == ssa.OpPPC64LoweredAtomicAdd32 { @@ -300,7 +300,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { // BNE retry p2 := s.Prog(ppc64.ABNE) p2.To.Type = obj.TYPE_BRANCH - gc.Patch(p2, p) + p2.To.SetTarget(p) // ISYNC pisync := s.Prog(ppc64.AISYNC) pisync.To.Type = obj.TYPE_NONE @@ -348,7 +348,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { // ISYNC pisync := s.Prog(ppc64.AISYNC) pisync.To.Type = obj.TYPE_NONE - gc.Patch(p2, pisync) + p2.To.SetTarget(pisync) case ssa.OpPPC64LoweredAtomicStore8, ssa.OpPPC64LoweredAtomicStore32, @@ -439,7 +439,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { // BNE retry p4 := s.Prog(ppc64.ABNE) p4.To.Type = obj.TYPE_BRANCH - gc.Patch(p4, p) + p4.To.SetTarget(p) // LWSYNC - Assuming shared data not write-through-required nor // caching-inhibited. See Appendix B.2.1.1 in the ISA 2.07b. // If the operation is a CAS-Release, then synchronization is not necessary. @@ -462,10 +462,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p7.From.Offset = 0 p7.To.Type = obj.TYPE_REG p7.To.Reg = out - gc.Patch(p2, p7) + p2.To.SetTarget(p7) // done (label) p8 := s.Prog(obj.ANOP) - gc.Patch(p6, p8) + p6.To.SetTarget(p8) case ssa.OpPPC64LoweredGetClosurePtr: // Closure pointer is R11 (already) @@ -539,10 +539,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Reg = r p.From.Type = obj.TYPE_REG p.From.Reg = r0 - gc.Patch(pbahead, p) + pbahead.To.SetTarget(p) p = s.Prog(obj.ANOP) - gc.Patch(pbover, p) + pbover.To.SetTarget(p) case ssa.OpPPC64DIVW: // word-width version of above @@ -574,10 +574,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Reg = r p.From.Type = obj.TYPE_REG p.From.Reg = r0 - gc.Patch(pbahead, p) + pbahead.To.SetTarget(p) p = s.Prog(obj.ANOP) - gc.Patch(pbover, p) + pbover.To.SetTarget(p) case ssa.OpPPC64CLRLSLWI: r := v.Reg() @@ -1028,7 +1028,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Offset = ppc64.BO_BCTR p.Reg = ppc64.REG_R0 p.To.Type = obj.TYPE_BRANCH - gc.Patch(p, top) + p.To.SetTarget(top) } // When ctr == 1 the loop was not generated but // there are at least 64 bytes to clear, so add @@ -1228,7 +1228,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Offset = ppc64.BO_BCTR p.Reg = ppc64.REG_R0 p.To.Type = obj.TYPE_BRANCH - gc.Patch(p, top) + p.To.SetTarget(top) } // when ctr == 1 the loop was not generated but @@ -1407,7 +1407,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Offset = ppc64.BO_BCTR p.Reg = ppc64.REG_R0 p.To.Type = obj.TYPE_BRANCH - gc.Patch(p, top) + p.To.SetTarget(top) // srcReg and dstReg were incremented in the loop, so // later instructions start with offset 0. @@ -1654,7 +1654,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Offset = ppc64.BO_BCTR p.Reg = ppc64.REG_R0 p.To.Type = obj.TYPE_BRANCH - gc.Patch(p, top) + p.To.SetTarget(top) // srcReg and dstReg were incremented in the loop, so // later instructions start with offset 0. @@ -1840,7 +1840,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { // NOP (so the BNE has somewhere to land) nop := s.Prog(obj.ANOP) - gc.Patch(p2, nop) + p2.To.SetTarget(nop) } else { // Issue a load which will fault if arg is nil. diff --git a/src/cmd/compile/internal/riscv64/ggen.go b/src/cmd/compile/internal/riscv64/ggen.go index d18644bb1b2dd..9df739456b9e5 100644 --- a/src/cmd/compile/internal/riscv64/ggen.go +++ b/src/cmd/compile/internal/riscv64/ggen.go @@ -6,14 +6,14 @@ package riscv64 import ( "cmd/compile/internal/base" - "cmd/compile/internal/gc" "cmd/compile/internal/ir" + "cmd/compile/internal/objw" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/riscv" ) -func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { +func zeroRange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { if cnt == 0 { return p } @@ -23,15 +23,15 @@ func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { if cnt < int64(4*types.PtrSize) { for i := int64(0); i < cnt; i += int64(types.PtrSize) { - p = pp.Appendpp(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_SP, off+i) + p = pp.Append(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_SP, off+i) } return p } if cnt <= int64(128*types.PtrSize) { - p = pp.Appendpp(p, riscv.AADDI, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_A0, 0) + p = pp.Append(p, riscv.AADDI, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_A0, 0) p.Reg = riscv.REG_SP - p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) + p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) p.To.Name = obj.NAME_EXTERN p.To.Sym = ir.Syms.Duffzero p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize)) @@ -45,15 +45,15 @@ func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { // MOV ZERO, (T0) // ADD $Widthptr, T0 // BNE T0, T1, loop - p = pp.Appendpp(p, riscv.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_T0, 0) + p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_T0, 0) p.Reg = riscv.REG_SP - p = pp.Appendpp(p, riscv.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, riscv.REG_T1, 0) + p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, riscv.REG_T1, 0) p.Reg = riscv.REG_T0 - p = pp.Appendpp(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_T0, 0) + p = pp.Append(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_T0, 0) loop := p - p = pp.Appendpp(p, riscv.AADD, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, riscv.REG_T0, 0) - p = pp.Appendpp(p, riscv.ABNE, obj.TYPE_REG, riscv.REG_T0, 0, obj.TYPE_BRANCH, 0, 0) + p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, riscv.REG_T0, 0) + p = pp.Append(p, riscv.ABNE, obj.TYPE_REG, riscv.REG_T0, 0, obj.TYPE_BRANCH, 0, 0) p.Reg = riscv.REG_T1 - gc.Patch(p, loop) + p.To.SetTarget(loop) return p } diff --git a/src/cmd/compile/internal/riscv64/gsubr.go b/src/cmd/compile/internal/riscv64/gsubr.go index d40bdf7a1d362..74bccf8d42ab1 100644 --- a/src/cmd/compile/internal/riscv64/gsubr.go +++ b/src/cmd/compile/internal/riscv64/gsubr.go @@ -5,12 +5,12 @@ package riscv64 import ( - "cmd/compile/internal/gc" + "cmd/compile/internal/objw" "cmd/internal/obj" "cmd/internal/obj/riscv" ) -func ginsnop(pp *gc.Progs) *obj.Prog { +func ginsnop(pp *objw.Progs) *obj.Prog { // Hardware nop is ADD $0, ZERO p := pp.Prog(riscv.AADD) p.From.Type = obj.TYPE_CONST diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go index 616b76e5f6174..d08cebdcf5b76 100644 --- a/src/cmd/compile/internal/riscv64/ssa.go +++ b/src/cmd/compile/internal/riscv64/ssa.go @@ -502,7 +502,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p4.From.Reg = riscv.REG_TMP p4.Reg = riscv.REG_ZERO p4.To.Type = obj.TYPE_BRANCH - gc.Patch(p4, p1) + p4.To.SetTarget(p1) p5 := s.Prog(riscv.AMOV) p5.From.Type = obj.TYPE_CONST @@ -511,7 +511,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p5.To.Reg = out p6 := s.Prog(obj.ANOP) - gc.Patch(p2, p6) + p2.To.SetTarget(p6) case ssa.OpRISCV64LoweredZero: mov, sz := largestMove(v.AuxInt) @@ -537,7 +537,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p3.Reg = v.Args[0].Reg() p3.From.Type = obj.TYPE_REG p3.From.Reg = v.Args[1].Reg() - gc.Patch(p3, p) + p3.To.SetTarget(p) case ssa.OpRISCV64LoweredMove: mov, sz := largestMove(v.AuxInt) @@ -577,7 +577,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p5.Reg = v.Args[1].Reg() p5.From.Type = obj.TYPE_REG p5.From.Reg = v.Args[2].Reg() - gc.Patch(p5, p) + p5.To.SetTarget(p) case ssa.OpRISCV64LoweredNilCheck: // Issue a load which will fault if arg is nil. diff --git a/src/cmd/compile/internal/s390x/ggen.go b/src/cmd/compile/internal/s390x/ggen.go index 0e2f48bf4cc3e..488a080c46888 100644 --- a/src/cmd/compile/internal/s390x/ggen.go +++ b/src/cmd/compile/internal/s390x/ggen.go @@ -6,7 +6,7 @@ package s390x import ( "cmd/compile/internal/base" - "cmd/compile/internal/gc" + "cmd/compile/internal/objw" "cmd/internal/obj" "cmd/internal/obj/s390x" ) @@ -18,7 +18,7 @@ import ( const clearLoopCutoff = 1024 // zerorange clears the stack in the given range. -func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { +func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { if cnt == 0 { return p } @@ -31,7 +31,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { // need to create a copy of the stack pointer that we can adjust. // We also need to do this if we are going to loop. if off < 0 || off > 4096-clearLoopCutoff || cnt > clearLoopCutoff { - p = pp.Appendpp(p, s390x.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, s390x.REGRT1, 0) + p = pp.Append(p, s390x.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, s390x.REGRT1, 0) p.Reg = int16(s390x.REGSP) reg = s390x.REGRT1 off = 0 @@ -40,12 +40,12 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { // Generate a loop of large clears. if cnt > clearLoopCutoff { ireg := int16(s390x.REGRT2) // register holds number of remaining loop iterations - p = pp.Appendpp(p, s390x.AMOVD, obj.TYPE_CONST, 0, cnt/256, obj.TYPE_REG, ireg, 0) - p = pp.Appendpp(p, s390x.ACLEAR, obj.TYPE_CONST, 0, 256, obj.TYPE_MEM, reg, off) + p = pp.Append(p, s390x.AMOVD, obj.TYPE_CONST, 0, cnt/256, obj.TYPE_REG, ireg, 0) + p = pp.Append(p, s390x.ACLEAR, obj.TYPE_CONST, 0, 256, obj.TYPE_MEM, reg, off) pl := p - p = pp.Appendpp(p, s390x.AADD, obj.TYPE_CONST, 0, 256, obj.TYPE_REG, reg, 0) - p = pp.Appendpp(p, s390x.ABRCTG, obj.TYPE_REG, ireg, 0, obj.TYPE_BRANCH, 0, 0) - gc.Patch(p, pl) + p = pp.Append(p, s390x.AADD, obj.TYPE_CONST, 0, 256, obj.TYPE_REG, reg, 0) + p = pp.Append(p, s390x.ABRCTG, obj.TYPE_REG, ireg, 0, obj.TYPE_BRANCH, 0, 0) + p.To.SetTarget(pl) cnt = cnt % 256 } @@ -70,11 +70,11 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { case 2: ins = s390x.AMOVH } - p = pp.Appendpp(p, ins, obj.TYPE_CONST, 0, 0, obj.TYPE_MEM, reg, off) + p = pp.Append(p, ins, obj.TYPE_CONST, 0, 0, obj.TYPE_MEM, reg, off) // Handle clears that would require multiple move instructions with CLEAR (assembled as XC). default: - p = pp.Appendpp(p, s390x.ACLEAR, obj.TYPE_CONST, 0, n, obj.TYPE_MEM, reg, off) + p = pp.Append(p, s390x.ACLEAR, obj.TYPE_CONST, 0, n, obj.TYPE_MEM, reg, off) } cnt -= n @@ -84,6 +84,6 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { return p } -func ginsnop(pp *gc.Progs) *obj.Prog { +func ginsnop(pp *objw.Progs) *obj.Prog { return pp.Prog(s390x.ANOPH) } diff --git a/src/cmd/compile/internal/s390x/ssa.go b/src/cmd/compile/internal/s390x/ssa.go index 366adffd986f6..dc01401348be9 100644 --- a/src/cmd/compile/internal/s390x/ssa.go +++ b/src/cmd/compile/internal/s390x/ssa.go @@ -709,7 +709,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { bne := s.Prog(s390x.ABLT) bne.To.Type = obj.TYPE_BRANCH - gc.Patch(bne, mvc) + bne.To.SetTarget(mvc) if v.AuxInt > 0 { mvc := s.Prog(s390x.AMVC) @@ -751,7 +751,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { bne := s.Prog(s390x.ABLT) bne.To.Type = obj.TYPE_BRANCH - gc.Patch(bne, clear) + bne.To.SetTarget(clear) if v.AuxInt > 0 { clear := s.Prog(s390x.ACLEAR) @@ -846,7 +846,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { // NOP (so the BNE has somewhere to land) nop := s.Prog(obj.ANOP) - gc.Patch(bne, nop) + bne.To.SetTarget(nop) case ssa.OpS390XLoweredAtomicExchange32, ssa.OpS390XLoweredAtomicExchange64: // Loop until the CS{,G} succeeds. // MOV{WZ,D} arg0, ret @@ -873,7 +873,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { // BNE cs bne := s.Prog(s390x.ABNE) bne.To.Type = obj.TYPE_BRANCH - gc.Patch(bne, cs) + bne.To.SetTarget(cs) case ssa.OpS390XSYNC: s.Prog(s390x.ASYNC) case ssa.OpClobber: diff --git a/src/cmd/compile/internal/wasm/ssa.go b/src/cmd/compile/internal/wasm/ssa.go index 4e5aa433d970f..ee86fc62d27a5 100644 --- a/src/cmd/compile/internal/wasm/ssa.go +++ b/src/cmd/compile/internal/wasm/ssa.go @@ -9,6 +9,7 @@ import ( "cmd/compile/internal/gc" "cmd/compile/internal/ir" "cmd/compile/internal/logopt" + "cmd/compile/internal/objw" "cmd/compile/internal/ssa" "cmd/compile/internal/types" "cmd/internal/obj" @@ -30,7 +31,7 @@ func Init(arch *gc.Arch) { arch.SSAGenBlock = ssaGenBlock } -func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog { +func zeroRange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog { if cnt == 0 { return p } @@ -39,15 +40,15 @@ func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Pr } for i := int64(0); i < cnt; i += 8 { - p = pp.Appendpp(p, wasm.AGet, obj.TYPE_REG, wasm.REG_SP, 0, 0, 0, 0) - p = pp.Appendpp(p, wasm.AI64Const, obj.TYPE_CONST, 0, 0, 0, 0, 0) - p = pp.Appendpp(p, wasm.AI64Store, 0, 0, 0, obj.TYPE_CONST, 0, off+i) + p = pp.Append(p, wasm.AGet, obj.TYPE_REG, wasm.REG_SP, 0, 0, 0, 0) + p = pp.Append(p, wasm.AI64Const, obj.TYPE_CONST, 0, 0, 0, 0, 0) + p = pp.Append(p, wasm.AI64Store, 0, 0, 0, obj.TYPE_CONST, 0, off+i) } return p } -func ginsnop(pp *gc.Progs) *obj.Prog { +func ginsnop(pp *objw.Progs) *obj.Prog { return pp.Prog(wasm.ANop) } diff --git a/src/cmd/compile/internal/x86/ggen.go b/src/cmd/compile/internal/x86/ggen.go index de43594e88d3a..3ca479763e63a 100644 --- a/src/cmd/compile/internal/x86/ggen.go +++ b/src/cmd/compile/internal/x86/ggen.go @@ -5,41 +5,41 @@ package x86 import ( - "cmd/compile/internal/gc" "cmd/compile/internal/ir" + "cmd/compile/internal/objw" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/x86" ) -func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, ax *uint32) *obj.Prog { +func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, ax *uint32) *obj.Prog { if cnt == 0 { return p } if *ax == 0 { - p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0) + p = pp.Append(p, x86.AMOVL, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0) *ax = 1 } if cnt <= int64(4*types.RegSize) { for i := int64(0); i < cnt; i += int64(types.RegSize) { - p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off+i) + p = pp.Append(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off+i) } } else if cnt <= int64(128*types.RegSize) { - p = pp.Appendpp(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0) - p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, 1*(128-cnt/int64(types.RegSize))) + p = pp.Append(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0) + p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, 1*(128-cnt/int64(types.RegSize))) p.To.Sym = ir.Syms.Duffzero } else { - p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_CONST, 0, cnt/int64(types.RegSize), obj.TYPE_REG, x86.REG_CX, 0) - p = pp.Appendpp(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0) - p = pp.Appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0) - p = pp.Appendpp(p, x86.ASTOSL, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0) + p = pp.Append(p, x86.AMOVL, obj.TYPE_CONST, 0, cnt/int64(types.RegSize), obj.TYPE_REG, x86.REG_CX, 0) + p = pp.Append(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0) + p = pp.Append(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0) + p = pp.Append(p, x86.ASTOSL, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0) } return p } -func ginsnop(pp *gc.Progs) *obj.Prog { +func ginsnop(pp *objw.Progs) *obj.Prog { // See comment in ../amd64/ggen.go. p := pp.Prog(x86.AXCHGL) p.From.Type = obj.TYPE_REG From 071ab0a14c294cda484e6f03140cb3cd27a5dca9 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 23 Dec 2020 00:48:08 -0500 Subject: [PATCH 231/474] [dev.regabi] cmd/compile: split out package liveness [generated] [git-generate] cd src/cmd/compile/internal/gc rf ' # AutoVar is essentially an ssa helper; move it there. mv AutoVar value.go mv value.go cmd/compile/internal/ssa # Export liveness API and unexport non-API. mv LivenessMap Map mv Map.vals Map.Vals mv Map.deferreturn Map.DeferReturn mv livenessShouldTrack ShouldTrack mv onebitwalktype1 SetTypeBits mv allUnsafe IsUnsafe mv liveness Compute mv BlockEffects blockEffects mv Liveness liveness mv liveness _liveness # make room for import mv emitptrargsmap WriteFuncMap mv WriteFuncMap plive.go mv bvset.go plive.go cmd/compile/internal/liveness ' cd ../liveness rf ' mv _liveness liveness ' Change-Id: I3b86e5025bd9d32a7e19f44714fa16be4125059e Reviewed-on: https://go-review.googlesource.com/c/go/+/279311 Trust: Russ Cox Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/pgen.go | 36 +--- src/cmd/compile/internal/gc/reflect.go | 3 +- src/cmd/compile/internal/gc/ssa.go | 23 +-- .../internal/{gc => liveness}/bvset.go | 2 +- .../internal/{gc => liveness}/plive.go | 163 +++++++++++------- src/cmd/compile/internal/ssa/value.go | 11 ++ 6 files changed, 121 insertions(+), 117 deletions(-) rename src/cmd/compile/internal/{gc => liveness}/bvset.go (99%) rename src/cmd/compile/internal/{gc => liveness}/plive.go (91%) diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index 40a2195a122ad..dcba5c7ecb665 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -6,8 +6,8 @@ package gc import ( "cmd/compile/internal/base" - "cmd/compile/internal/bitvec" "cmd/compile/internal/ir" + "cmd/compile/internal/liveness" "cmd/compile/internal/objw" "cmd/compile/internal/ssa" "cmd/compile/internal/typecheck" @@ -30,36 +30,6 @@ var ( compilequeue []*ir.Func // functions waiting to be compiled ) -func emitptrargsmap(fn *ir.Func) { - if ir.FuncName(fn) == "_" || fn.Sym().Linkname != "" { - return - } - lsym := base.Ctxt.Lookup(fn.LSym.Name + ".args_stackmap") - nptr := int(fn.Type().ArgWidth() / int64(types.PtrSize)) - bv := bitvec.New(int32(nptr) * 2) - nbitmap := 1 - if fn.Type().NumResults() > 0 { - nbitmap = 2 - } - off := objw.Uint32(lsym, 0, uint32(nbitmap)) - off = objw.Uint32(lsym, off, uint32(bv.N)) - - if ir.IsMethod(fn) { - onebitwalktype1(fn.Type().Recvs(), 0, bv) - } - if fn.Type().NumParams() > 0 { - onebitwalktype1(fn.Type().Params(), 0, bv) - } - off = objw.BitVec(lsym, off, bv) - - if fn.Type().NumResults() > 0 { - onebitwalktype1(fn.Type().Results(), 0, bv) - off = objw.BitVec(lsym, off, bv) - } - - objw.Global(lsym, int32(off), obj.RODATA|obj.LOCAL) -} - // cmpstackvarlt reports whether the stack variable a sorts before b. // // Sort the list of stack variables. Autos after anything else, @@ -213,7 +183,7 @@ func funccompile(fn *ir.Func) { if len(fn.Body) == 0 { // Initialize ABI wrappers if necessary. initLSym(fn, false) - emitptrargsmap(fn) + liveness.WriteFuncMap(fn) return } @@ -254,7 +224,7 @@ func compile(fn *ir.Func) { for _, n := range fn.Dcl { switch n.Class_ { case ir.PPARAM, ir.PPARAMOUT, ir.PAUTO: - if livenessShouldTrack(n) && n.Addrtaken() { + if liveness.ShouldTrack(n) && n.Addrtaken() { dtypesym(n.Type()) // Also make sure we allocate a linker symbol // for the stack object data, for the same reason. diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index dcb2620f1f03d..42f441a44aaf2 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -8,6 +8,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/bitvec" "cmd/compile/internal/ir" + "cmd/compile/internal/liveness" "cmd/compile/internal/objw" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" @@ -1591,7 +1592,7 @@ func fillptrmask(t *types.Type, ptrmask []byte) { } vec := bitvec.New(8 * int32(len(ptrmask))) - onebitwalktype1(t, 0, vec) + liveness.SetTypeBits(t, 0, vec) nptr := types.PtrDataSize(t) / int64(types.PtrSize) for i := int64(0); i < nptr; i++ { diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 44e199abbf268..5c36e922a635f 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -18,6 +18,7 @@ import ( "bytes" "cmd/compile/internal/base" "cmd/compile/internal/ir" + "cmd/compile/internal/liveness" "cmd/compile/internal/objw" "cmd/compile/internal/ssa" "cmd/compile/internal/typecheck" @@ -6315,7 +6316,7 @@ type SSAGenState struct { // Map from GC safe points to liveness index, generated by // liveness analysis. - livenessMap LivenessMap + livenessMap liveness.Map // lineRunStart records the beginning of the current run of instructions // within a single block sharing the same line number @@ -6401,7 +6402,7 @@ func (s byXoffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func emitStackObjects(e *ssafn, pp *objw.Progs) { var vars []*ir.Name for _, n := range e.curfn.Dcl { - if livenessShouldTrack(n) && n.Addrtaken() { + if liveness.ShouldTrack(n) && n.Addrtaken() { vars = append(vars, n) } } @@ -6448,7 +6449,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) { e := f.Frontend().(*ssafn) - s.livenessMap = liveness(e.curfn, f, e.stkptrsize, pp) + s.livenessMap = liveness.Compute(e.curfn, f, e.stkptrsize, pp) emitStackObjects(e, pp) openDeferInfo := e.curfn.LSym.Func().OpenCodedDeferInfo @@ -6519,7 +6520,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) { // instruction. We won't use the actual liveness map on a // control instruction. Just mark it something that is // preemptible, unless this function is "all unsafe". - s.pp.NextLive = objw.LivenessIndex{StackMapIndex: -1, IsUnsafePoint: allUnsafe(f)} + s.pp.NextLive = objw.LivenessIndex{StackMapIndex: -1, IsUnsafePoint: liveness.IsUnsafe(f)} // Emit values in block thearch.SSAMarkMoves(&s, b) @@ -6624,7 +6625,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) { // When doing open-coded defers, generate a disconnected call to // deferreturn and a return. This will be used to during panic // recovery to unwind the stack and return back to the runtime. - s.pp.NextLive = s.livenessMap.deferreturn + s.pp.NextLive = s.livenessMap.DeferReturn gencallret(pp, ir.Syms.Deferreturn) } @@ -7012,18 +7013,8 @@ func CheckLoweredGetClosurePtr(v *ssa.Value) { } } -// AutoVar returns a *Name and int64 representing the auto variable and offset within it -// where v should be spilled. -func AutoVar(v *ssa.Value) (*ir.Name, int64) { - loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot) - if v.Type.Size() > loc.Type.Size() { - v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type) - } - return loc.N, loc.Off -} - func AddrAuto(a *obj.Addr, v *ssa.Value) { - n, off := AutoVar(v) + n, off := ssa.AutoVar(v) a.Type = obj.TYPE_MEM a.Sym = n.Sym().Linksym() a.Reg = int16(thearch.REGSP) diff --git a/src/cmd/compile/internal/gc/bvset.go b/src/cmd/compile/internal/liveness/bvset.go similarity index 99% rename from src/cmd/compile/internal/gc/bvset.go rename to src/cmd/compile/internal/liveness/bvset.go index 7f5f41fb5c421..21bc1fee4d62b 100644 --- a/src/cmd/compile/internal/gc/bvset.go +++ b/src/cmd/compile/internal/liveness/bvset.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package liveness import "cmd/compile/internal/bitvec" diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/liveness/plive.go similarity index 91% rename from src/cmd/compile/internal/gc/plive.go rename to src/cmd/compile/internal/liveness/plive.go index 260edda9ce7d2..785a3a29deba7 100644 --- a/src/cmd/compile/internal/gc/plive.go +++ b/src/cmd/compile/internal/liveness/plive.go @@ -12,9 +12,13 @@ // // Each level includes the earlier output as well. -package gc +package liveness import ( + "crypto/md5" + "fmt" + "strings" + "cmd/compile/internal/base" "cmd/compile/internal/bitvec" "cmd/compile/internal/ir" @@ -23,9 +27,6 @@ import ( "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/objabi" - "crypto/md5" - "fmt" - "strings" ) // OpVarDef is an annotation for the liveness analysis, marking a place @@ -83,8 +84,8 @@ import ( // so the compiler can allocate two temps to the same location. Here it's now // useless, since the implementation of stack objects. -// BlockEffects summarizes the liveness effects on an SSA block. -type BlockEffects struct { +// blockEffects summarizes the liveness effects on an SSA block. +type blockEffects struct { // Computed during Liveness.prologue using only the content of // individual blocks: // @@ -102,14 +103,14 @@ type BlockEffects struct { } // A collection of global state used by liveness analysis. -type Liveness struct { +type liveness struct { fn *ir.Func f *ssa.Func vars []*ir.Name idx map[*ir.Name]int32 stkptrsize int64 - be []BlockEffects + be []blockEffects // allUnsafe indicates that all points in this function are // unsafe-points. @@ -127,40 +128,40 @@ type Liveness struct { // livenessMap maps from safe points (i.e., CALLs) to their // liveness map indexes. - livenessMap LivenessMap + livenessMap Map stackMapSet bvecSet stackMaps []bitvec.BitVec cache progeffectscache } -// LivenessMap maps from *ssa.Value to LivenessIndex. -type LivenessMap struct { - vals map[ssa.ID]objw.LivenessIndex - // The set of live, pointer-containing variables at the deferreturn +// Map maps from *ssa.Value to LivenessIndex. +type Map struct { + Vals map[ssa.ID]objw.LivenessIndex + // The set of live, pointer-containing variables at the DeferReturn // call (only set when open-coded defers are used). - deferreturn objw.LivenessIndex + DeferReturn objw.LivenessIndex } -func (m *LivenessMap) reset() { - if m.vals == nil { - m.vals = make(map[ssa.ID]objw.LivenessIndex) +func (m *Map) reset() { + if m.Vals == nil { + m.Vals = make(map[ssa.ID]objw.LivenessIndex) } else { - for k := range m.vals { - delete(m.vals, k) + for k := range m.Vals { + delete(m.Vals, k) } } - m.deferreturn = objw.LivenessDontCare + m.DeferReturn = objw.LivenessDontCare } -func (m *LivenessMap) set(v *ssa.Value, i objw.LivenessIndex) { - m.vals[v.ID] = i +func (m *Map) set(v *ssa.Value, i objw.LivenessIndex) { + m.Vals[v.ID] = i } -func (m LivenessMap) Get(v *ssa.Value) objw.LivenessIndex { +func (m Map) Get(v *ssa.Value) objw.LivenessIndex { // If v isn't in the map, then it's a "don't care" and not an // unsafe-point. - if idx, ok := m.vals[v.ID]; ok { + if idx, ok := m.Vals[v.ID]; ok { return idx } return objw.LivenessIndex{StackMapIndex: objw.StackMapDontCare, IsUnsafePoint: false} @@ -172,13 +173,13 @@ type progeffectscache struct { initialized bool } -// livenessShouldTrack reports whether the liveness analysis +// ShouldTrack reports whether the liveness analysis // should track the variable n. // We don't care about variables that have no pointers, // nor do we care about non-local variables, // nor do we care about empty structs (handled by the pointer check), // nor do we care about the fake PAUTOHEAP variables. -func livenessShouldTrack(nn ir.Node) bool { +func ShouldTrack(nn ir.Node) bool { if nn.Op() != ir.ONAME { return false } @@ -191,7 +192,7 @@ func livenessShouldTrack(nn ir.Node) bool { func getvariables(fn *ir.Func) ([]*ir.Name, map[*ir.Name]int32) { var vars []*ir.Name for _, n := range fn.Dcl { - if livenessShouldTrack(n) { + if ShouldTrack(n) { vars = append(vars, n) } } @@ -202,7 +203,7 @@ func getvariables(fn *ir.Func) ([]*ir.Name, map[*ir.Name]int32) { return vars, idx } -func (lv *Liveness) initcache() { +func (lv *liveness) initcache() { if lv.cache.initialized { base.Fatalf("liveness cache initialized twice") return @@ -246,7 +247,7 @@ const ( // valueEffects returns the index of a variable in lv.vars and the // liveness effects v has on that variable. // If v does not affect any tracked variables, it returns -1, 0. -func (lv *Liveness) valueEffects(v *ssa.Value) (int32, liveEffect) { +func (lv *liveness) valueEffects(v *ssa.Value) (int32, liveEffect) { n, e := affectedNode(v) if e == 0 || n == nil || n.Op() != ir.ONAME { // cheapest checks first return -1, 0 @@ -293,10 +294,10 @@ func affectedNode(v *ssa.Value) (ir.Node, ssa.SymEffect) { // Special cases. switch v.Op { case ssa.OpLoadReg: - n, _ := AutoVar(v.Args[0]) + n, _ := ssa.AutoVar(v.Args[0]) return n, ssa.SymRead case ssa.OpStoreReg: - n, _ := AutoVar(v) + n, _ := ssa.AutoVar(v) return n, ssa.SymWrite case ssa.OpVarLive: @@ -304,7 +305,7 @@ func affectedNode(v *ssa.Value) (ir.Node, ssa.SymEffect) { case ssa.OpVarDef, ssa.OpVarKill: return v.Aux.(*ir.Name), ssa.SymWrite case ssa.OpKeepAlive: - n, _ := AutoVar(v.Args[0]) + n, _ := ssa.AutoVar(v.Args[0]) return n, ssa.SymRead } @@ -326,15 +327,15 @@ func affectedNode(v *ssa.Value) (ir.Node, ssa.SymEffect) { } type livenessFuncCache struct { - be []BlockEffects - livenessMap LivenessMap + be []blockEffects + livenessMap Map } // Constructs a new liveness structure used to hold the global state of the // liveness computation. The cfg argument is a slice of *BasicBlocks and the // vars argument is a slice of *Nodes. -func newliveness(fn *ir.Func, f *ssa.Func, vars []*ir.Name, idx map[*ir.Name]int32, stkptrsize int64) *Liveness { - lv := &Liveness{ +func newliveness(fn *ir.Func, f *ssa.Func, vars []*ir.Name, idx map[*ir.Name]int32, stkptrsize int64) *liveness { + lv := &liveness{ fn: fn, f: f, vars: vars, @@ -352,11 +353,11 @@ func newliveness(fn *ir.Func, f *ssa.Func, vars []*ir.Name, idx map[*ir.Name]int if cap(lc.be) >= f.NumBlocks() { lv.be = lc.be[:f.NumBlocks()] } - lv.livenessMap = LivenessMap{vals: lc.livenessMap.vals, deferreturn: objw.LivenessDontCare} - lc.livenessMap.vals = nil + lv.livenessMap = Map{Vals: lc.livenessMap.Vals, DeferReturn: objw.LivenessDontCare} + lc.livenessMap.Vals = nil } if lv.be == nil { - lv.be = make([]BlockEffects, f.NumBlocks()) + lv.be = make([]blockEffects, f.NumBlocks()) } nblocks := int32(len(f.Blocks)) @@ -376,14 +377,14 @@ func newliveness(fn *ir.Func, f *ssa.Func, vars []*ir.Name, idx map[*ir.Name]int return lv } -func (lv *Liveness) blockEffects(b *ssa.Block) *BlockEffects { +func (lv *liveness) blockEffects(b *ssa.Block) *blockEffects { return &lv.be[b.ID] } // NOTE: The bitmap for a specific type t could be cached in t after // the first run and then simply copied into bv at the correct offset // on future calls with the same type t. -func onebitwalktype1(t *types.Type, off int64, bv bitvec.BitVec) { +func SetTypeBits(t *types.Type, off int64, bv bitvec.BitVec) { if t.Align > 0 && off&int64(t.Align-1) != 0 { base.Fatalf("onebitwalktype1: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off) } @@ -442,13 +443,13 @@ func onebitwalktype1(t *types.Type, off int64, bv bitvec.BitVec) { break } for i := int64(0); i < t.NumElem(); i++ { - onebitwalktype1(elt, off, bv) + SetTypeBits(elt, off, bv) off += elt.Width } case types.TSTRUCT: for _, f := range t.Fields().Slice() { - onebitwalktype1(f.Type, off+f.Offset, bv) + SetTypeBits(f.Type, off+f.Offset, bv) } default: @@ -459,7 +460,7 @@ func onebitwalktype1(t *types.Type, off int64, bv bitvec.BitVec) { // Generates live pointer value maps for arguments and local variables. The // this argument and the in arguments are always assumed live. The vars // argument is a slice of *Nodes. -func (lv *Liveness) pointerMap(liveout bitvec.BitVec, vars []*ir.Name, args, locals bitvec.BitVec) { +func (lv *liveness) pointerMap(liveout bitvec.BitVec, vars []*ir.Name, args, locals bitvec.BitVec) { for i := int32(0); ; i++ { i = liveout.Next(i) if i < 0 { @@ -468,17 +469,17 @@ func (lv *Liveness) pointerMap(liveout bitvec.BitVec, vars []*ir.Name, args, loc node := vars[i] switch node.Class_ { case ir.PAUTO: - onebitwalktype1(node.Type(), node.FrameOffset()+lv.stkptrsize, locals) + SetTypeBits(node.Type(), node.FrameOffset()+lv.stkptrsize, locals) case ir.PPARAM, ir.PPARAMOUT: - onebitwalktype1(node.Type(), node.FrameOffset(), args) + SetTypeBits(node.Type(), node.FrameOffset(), args) } } } -// allUnsafe indicates that all points in this function are +// IsUnsafe indicates that all points in this function are // unsafe-points. -func allUnsafe(f *ssa.Func) bool { +func IsUnsafe(f *ssa.Func) bool { // The runtime assumes the only safe-points are function // prologues (because that's how it used to be). We could and // should improve that, but for now keep consider all points @@ -492,8 +493,8 @@ func allUnsafe(f *ssa.Func) bool { } // markUnsafePoints finds unsafe points and computes lv.unsafePoints. -func (lv *Liveness) markUnsafePoints() { - if allUnsafe(lv.f) { +func (lv *liveness) markUnsafePoints() { + if IsUnsafe(lv.f) { // No complex analysis necessary. lv.allUnsafe = true return @@ -655,7 +656,7 @@ func (lv *Liveness) markUnsafePoints() { // This does not necessarily mean the instruction is a safe-point. In // particular, call Values can have a stack map in case the callee // grows the stack, but not themselves be a safe-point. -func (lv *Liveness) hasStackMap(v *ssa.Value) bool { +func (lv *liveness) hasStackMap(v *ssa.Value) bool { if !v.Op.IsCall() { return false } @@ -671,7 +672,7 @@ func (lv *Liveness) hasStackMap(v *ssa.Value) bool { // Initializes the sets for solving the live variables. Visits all the // instructions in each basic block to summarizes the information at each basic // block -func (lv *Liveness) prologue() { +func (lv *liveness) prologue() { lv.initcache() for _, b := range lv.f.Blocks { @@ -693,7 +694,7 @@ func (lv *Liveness) prologue() { } // Solve the liveness dataflow equations. -func (lv *Liveness) solve() { +func (lv *liveness) solve() { // These temporary bitvectors exist to avoid successive allocations and // frees within the loop. nvars := int32(len(lv.vars)) @@ -753,7 +754,7 @@ func (lv *Liveness) solve() { // Visits all instructions in a basic block and computes a bit vector of live // variables at each safe point locations. -func (lv *Liveness) epilogue() { +func (lv *liveness) epilogue() { nvars := int32(len(lv.vars)) liveout := bitvec.New(nvars) livedefer := bitvec.New(nvars) // always-live variables @@ -882,9 +883,9 @@ func (lv *Liveness) epilogue() { // If we have an open-coded deferreturn call, make a liveness map for it. if lv.fn.OpenCodedDeferDisallowed() { - lv.livenessMap.deferreturn = objw.LivenessDontCare + lv.livenessMap.DeferReturn = objw.LivenessDontCare } else { - lv.livenessMap.deferreturn = objw.LivenessIndex{ + lv.livenessMap.DeferReturn = objw.LivenessIndex{ StackMapIndex: lv.stackMapSet.add(livedefer), IsUnsafePoint: false, } @@ -920,7 +921,7 @@ func (lv *Liveness) epilogue() { // is actually a net loss: we save about 50k of argument bitmaps but the new // PCDATA tables cost about 100k. So for now we keep using a single index for // both bitmap lists. -func (lv *Liveness) compact(b *ssa.Block) { +func (lv *liveness) compact(b *ssa.Block) { pos := 0 if b == lv.f.Entry { // Handle entry stack map. @@ -944,7 +945,7 @@ func (lv *Liveness) compact(b *ssa.Block) { lv.livevars = lv.livevars[:0] } -func (lv *Liveness) showlive(v *ssa.Value, live bitvec.BitVec) { +func (lv *liveness) showlive(v *ssa.Value, live bitvec.BitVec) { if base.Flag.Live == 0 || ir.FuncName(lv.fn) == "init" || strings.HasPrefix(ir.FuncName(lv.fn), ".") { return } @@ -984,7 +985,7 @@ func (lv *Liveness) showlive(v *ssa.Value, live bitvec.BitVec) { base.WarnfAt(pos, s) } -func (lv *Liveness) printbvec(printed bool, name string, live bitvec.BitVec) bool { +func (lv *liveness) printbvec(printed bool, name string, live bitvec.BitVec) bool { if live.IsEmpty() { return printed } @@ -1008,7 +1009,7 @@ func (lv *Liveness) printbvec(printed bool, name string, live bitvec.BitVec) boo } // printeffect is like printbvec, but for valueEffects. -func (lv *Liveness) printeffect(printed bool, name string, pos int32, x bool) bool { +func (lv *liveness) printeffect(printed bool, name string, pos int32, x bool) bool { if !x { return printed } @@ -1028,7 +1029,7 @@ func (lv *Liveness) printeffect(printed bool, name string, pos int32, x bool) bo // Prints the computed liveness information and inputs, for debugging. // This format synthesizes the information used during the multiple passes // into a single presentation. -func (lv *Liveness) printDebug() { +func (lv *liveness) printDebug() { fmt.Printf("liveness: %s\n", ir.FuncName(lv.fn)) for i, b := range lv.f.Blocks { @@ -1137,7 +1138,7 @@ func (lv *Liveness) printDebug() { // first word dumped is the total number of bitmaps. The second word is the // length of the bitmaps. All bitmaps are assumed to be of equal length. The // remaining bytes are the raw bitmaps. -func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) { +func (lv *liveness) emit() (argsSym, liveSym *obj.LSym) { // Size args bitmaps to be just large enough to hold the largest pointer. // First, find the largest Xoffset node we care about. // (Nodes without pointers aren't in lv.vars; see livenessShouldTrack.) @@ -1201,11 +1202,11 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) { return makeSym(&argsSymTmp), makeSym(&liveSymTmp) } -// Entry pointer for liveness analysis. Solves for the liveness of +// Entry pointer for Compute analysis. Solves for the Compute of // pointer variables in the function and emits a runtime data // structure read by the garbage collector. // Returns a map from GC safe points to their corresponding stack map index. -func liveness(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *objw.Progs) LivenessMap { +func Compute(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *objw.Progs) Map { // Construct the global liveness state. vars, idx := getvariables(curfn) lv := newliveness(curfn, f, vars, idx, stkptrsize) @@ -1233,11 +1234,11 @@ func liveness(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *objw.Progs) Liv cache := f.Cache.Liveness.(*livenessFuncCache) if cap(lv.be) < 2000 { // Threshold from ssa.Cache slices. for i := range lv.be { - lv.be[i] = BlockEffects{} + lv.be[i] = blockEffects{} } cache.be = lv.be } - if len(lv.livenessMap.vals) < 2000 { + if len(lv.livenessMap.Vals) < 2000 { cache.livenessMap = lv.livenessMap } } @@ -1298,3 +1299,33 @@ func isfat(t *types.Type) bool { return false } + +func WriteFuncMap(fn *ir.Func) { + if ir.FuncName(fn) == "_" || fn.Sym().Linkname != "" { + return + } + lsym := base.Ctxt.Lookup(fn.LSym.Name + ".args_stackmap") + nptr := int(fn.Type().ArgWidth() / int64(types.PtrSize)) + bv := bitvec.New(int32(nptr) * 2) + nbitmap := 1 + if fn.Type().NumResults() > 0 { + nbitmap = 2 + } + off := objw.Uint32(lsym, 0, uint32(nbitmap)) + off = objw.Uint32(lsym, off, uint32(bv.N)) + + if ir.IsMethod(fn) { + SetTypeBits(fn.Type().Recvs(), 0, bv) + } + if fn.Type().NumParams() > 0 { + SetTypeBits(fn.Type().Params(), 0, bv) + } + off = objw.BitVec(lsym, off, bv) + + if fn.Type().NumResults() > 0 { + SetTypeBits(fn.Type().Results(), 0, bv) + off = objw.BitVec(lsym, off, bv) + } + + objw.Global(lsym, int32(off), obj.RODATA|obj.LOCAL) +} diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go index 993c5a580f4dd..d000b7cce011a 100644 --- a/src/cmd/compile/internal/ssa/value.go +++ b/src/cmd/compile/internal/ssa/value.go @@ -5,6 +5,7 @@ package ssa import ( + "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/src" "fmt" @@ -495,3 +496,13 @@ func (v *Value) removeable() bool { // TODO(mdempsky): Shouldn't be necessary; see discussion at golang.org/cl/275756 func (*Value) CanBeAnSSAAux() {} + +// AutoVar returns a *Name and int64 representing the auto variable and offset within it +// where v should be spilled. +func AutoVar(v *Value) (*ir.Name, int64) { + loc := v.Block.Func.RegAlloc[v.ID].(LocalSlot) + if v.Type.Size() > loc.Type.Size() { + v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type) + } + return loc.N, loc.Off +} From de454eef5f47212dc8a9d9c2c8b598fa343d2c2b Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 23 Dec 2020 00:51:28 -0500 Subject: [PATCH 232/474] [dev.regabi] cmd/compile: split out package escape [generated] [git-generate] cd src/cmd/compile/internal/gc rf ' # Trivial min, max defined in escape.go but only used in ssa.go. mv min8 max8 ssa.go # Export package escape API. mv escapes Funcs mv escapeFuncs Batch mv escFmt Fmt mv unsafeUintptrTag UnsafeUintptrNote mv uintptrEscapesTag UintptrEscapesNote mv heapAllocReason HeapAllocReason # Unexport non-API. mv EscEdge edge mv EscHole hole mv EscLeaks leaks mv ParseLeaks parseLeaks mv EscLocation location mv EscNote note mv Escape _escape # leave room for escape import, fixed below mv EscFuncUnknown escFuncUnknown mv EscFuncPlanned escFuncPlanned mv EscFuncStarted escFuncStarted mv EscFuncTagged escFuncTagged mv escape.go cmd/compile/internal/escape ' cd ../escape rf ' mv _escape escape ' Change-Id: I3a6d1bfb6eba12bea936354ea1fe9813cbde425c Reviewed-on: https://go-review.googlesource.com/c/go/+/279472 Trust: Russ Cox Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky --- .../compile/internal/{gc => escape}/escape.go | 275 +++++++++--------- src/cmd/compile/internal/gc/gsubr.go | 3 +- src/cmd/compile/internal/gc/main.go | 5 +- src/cmd/compile/internal/gc/order.go | 3 +- src/cmd/compile/internal/gc/ssa.go | 14 + src/cmd/compile/internal/gc/subr.go | 3 +- src/cmd/compile/internal/gc/walk.go | 3 +- 7 files changed, 156 insertions(+), 150 deletions(-) rename src/cmd/compile/internal/{gc => escape}/escape.go (90%) diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/escape/escape.go similarity index 90% rename from src/cmd/compile/internal/gc/escape.go rename to src/cmd/compile/internal/escape/escape.go index 187313695fa1c..b7cb56b997d28 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/escape/escape.go @@ -2,18 +2,19 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package escape import ( + "fmt" + "math" + "strings" + "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/logopt" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/src" - "fmt" - "math" - "strings" ) // Escape analysis. @@ -84,8 +85,8 @@ import ( // u[2], etc. However, we do record the implicit dereference involved // in indexing a slice. -type Escape struct { - allLocs []*EscLocation +type escape struct { + allLocs []*location labels map[*types.Sym]labelState // known labels curfn *ir.Func @@ -96,17 +97,17 @@ type Escape struct { // unstructured loop). loopDepth int - heapLoc EscLocation - blankLoc EscLocation + heapLoc location + blankLoc location } -// An EscLocation represents an abstract location that stores a Go +// An location represents an abstract location that stores a Go // variable. -type EscLocation struct { - n ir.Node // represented variable or expression, if any - curfn *ir.Func // enclosing function - edges []EscEdge // incoming edges - loopDepth int // loopDepth at declaration +type location struct { + n ir.Node // represented variable or expression, if any + curfn *ir.Func // enclosing function + edges []edge // incoming edges + loopDepth int // loopDepth at declaration // derefs and walkgen are used during walkOne to track the // minimal dereferences from the walk root. @@ -116,7 +117,7 @@ type EscLocation struct { // dst and dstEdgeindex track the next immediate assignment // destination location during walkone, along with the index // of the edge pointing back to this location. - dst *EscLocation + dst *location dstEdgeIdx int // queued is used by walkAll to track whether this location is @@ -134,18 +135,18 @@ type EscLocation struct { transient bool // paramEsc records the represented parameter's leak set. - paramEsc EscLeaks + paramEsc leaks } -// An EscEdge represents an assignment edge between two Go variables. -type EscEdge struct { - src *EscLocation +// An edge represents an assignment edge between two Go variables. +type edge struct { + src *location derefs int // >= -1 - notes *EscNote + notes *note } -// escFmt is called from node printing to print information about escape analysis results. -func escFmt(n ir.Node) string { +// Fmt is called from node printing to print information about escape analysis results. +func Fmt(n ir.Node) string { text := "" switch n.Esc() { case ir.EscUnknown: @@ -164,7 +165,7 @@ func escFmt(n ir.Node) string { text = fmt.Sprintf("esc(%d)", n.Esc()) } - if e, ok := n.Opt().(*EscLocation); ok && e.loopDepth != 0 { + if e, ok := n.Opt().(*location); ok && e.loopDepth != 0 { if text != "" { text += " " } @@ -173,16 +174,16 @@ func escFmt(n ir.Node) string { return text } -// escapeFuncs performs escape analysis on a minimal batch of +// Batch performs escape analysis on a minimal batch of // functions. -func escapeFuncs(fns []*ir.Func, recursive bool) { +func Batch(fns []*ir.Func, recursive bool) { for _, fn := range fns { if fn.Op() != ir.ODCLFUNC { base.Fatalf("unexpected node: %v", fn) } } - var e Escape + var e escape e.heapLoc.escapes = true // Construct data-flow graph from syntax trees. @@ -198,11 +199,11 @@ func escapeFuncs(fns []*ir.Func, recursive bool) { e.finish(fns) } -func (e *Escape) initFunc(fn *ir.Func) { - if fn.Esc() != EscFuncUnknown { +func (e *escape) initFunc(fn *ir.Func) { + if fn.Esc() != escFuncUnknown { base.Fatalf("unexpected node: %v", fn) } - fn.SetEsc(EscFuncPlanned) + fn.SetEsc(escFuncPlanned) if base.Flag.LowerM > 3 { ir.Dump("escAnalyze", fn) } @@ -218,8 +219,8 @@ func (e *Escape) initFunc(fn *ir.Func) { } } -func (e *Escape) walkFunc(fn *ir.Func) { - fn.SetEsc(EscFuncStarted) +func (e *escape) walkFunc(fn *ir.Func) { + fn.SetEsc(escFuncStarted) // Identify labels that mark the head of an unstructured loop. ir.Visit(fn, func(n ir.Node) { @@ -277,7 +278,7 @@ func (e *Escape) walkFunc(fn *ir.Func) { // } // stmt evaluates a single Go statement. -func (e *Escape) stmt(n ir.Node) { +func (e *escape) stmt(n ir.Node) { if n == nil { return } @@ -368,7 +369,7 @@ func (e *Escape) stmt(n ir.Node) { n := n.(*ir.SwitchStmt) typesw := n.Tag != nil && n.Tag.Op() == ir.OTYPESW - var ks []EscHole + var ks []hole for _, cas := range n.Cases { // cases cas := cas.(*ir.CaseStmt) if typesw && n.Tag.(*ir.TypeSwitchGuard).Tag != nil { @@ -456,14 +457,14 @@ func (e *Escape) stmt(n ir.Node) { } } -func (e *Escape) stmts(l ir.Nodes) { +func (e *escape) stmts(l ir.Nodes) { for _, n := range l { e.stmt(n) } } // block is like stmts, but preserves loopDepth. -func (e *Escape) block(l ir.Nodes) { +func (e *escape) block(l ir.Nodes) { old := e.loopDepth e.stmts(l) e.loopDepth = old @@ -471,7 +472,7 @@ func (e *Escape) block(l ir.Nodes) { // expr models evaluating an expression n and flowing the result into // hole k. -func (e *Escape) expr(k EscHole, n ir.Node) { +func (e *escape) expr(k hole, n ir.Node) { if n == nil { return } @@ -479,7 +480,7 @@ func (e *Escape) expr(k EscHole, n ir.Node) { e.exprSkipInit(k, n) } -func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { +func (e *escape) exprSkipInit(k hole, n ir.Node) { if n == nil { return } @@ -590,7 +591,7 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { e.discard(n.X) case ir.OCALLMETH, ir.OCALLFUNC, ir.OCALLINTER, ir.OLEN, ir.OCAP, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCOPY: - e.call([]EscHole{k}, n, nil) + e.call([]hole{k}, n, nil) case ir.ONEW: n := n.(*ir.UnaryExpr) @@ -627,7 +628,7 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { // // TODO(mdempsky): Change ks into a callback, so that // we don't have to create this slice? - var ks []EscHole + var ks []hole for i := m.Type.NumResults(); i > 0; i-- { ks = append(ks, e.heapHole()) } @@ -709,7 +710,7 @@ func (e *Escape) exprSkipInit(k EscHole, n ir.Node) { // unsafeValue evaluates a uintptr-typed arithmetic expression looking // for conversions from an unsafe.Pointer. -func (e *Escape) unsafeValue(k EscHole, n ir.Node) { +func (e *escape) unsafeValue(k hole, n ir.Node) { if n.Type().Kind() != types.TUINTPTR { base.Fatalf("unexpected type %v for %v", n.Type(), n) } @@ -751,11 +752,11 @@ func (e *Escape) unsafeValue(k EscHole, n ir.Node) { // discard evaluates an expression n for side-effects, but discards // its value. -func (e *Escape) discard(n ir.Node) { +func (e *escape) discard(n ir.Node) { e.expr(e.discardHole(), n) } -func (e *Escape) discards(l ir.Nodes) { +func (e *escape) discards(l ir.Nodes) { for _, n := range l { e.discard(n) } @@ -763,7 +764,7 @@ func (e *Escape) discards(l ir.Nodes) { // addr evaluates an addressable expression n and returns an EscHole // that represents storing into the represented location. -func (e *Escape) addr(n ir.Node) EscHole { +func (e *escape) addr(n ir.Node) hole { if n == nil || ir.IsBlank(n) { // Can happen in select case, range, maybe others. return e.discardHole() @@ -809,8 +810,8 @@ func (e *Escape) addr(n ir.Node) EscHole { return k } -func (e *Escape) addrs(l ir.Nodes) []EscHole { - var ks []EscHole +func (e *escape) addrs(l ir.Nodes) []hole { + var ks []hole for _, n := range l { ks = append(ks, e.addr(n)) } @@ -818,7 +819,7 @@ func (e *Escape) addrs(l ir.Nodes) []EscHole { } // assign evaluates the assignment dst = src. -func (e *Escape) assign(dst, src ir.Node, why string, where ir.Node) { +func (e *escape) assign(dst, src ir.Node, why string, where ir.Node) { // Filter out some no-op assignments for escape analysis. ignore := dst != nil && src != nil && isSelfAssign(dst, src) if ignore && base.Flag.LowerM != 0 { @@ -836,14 +837,14 @@ func (e *Escape) assign(dst, src ir.Node, why string, where ir.Node) { } } -func (e *Escape) assignHeap(src ir.Node, why string, where ir.Node) { +func (e *escape) assignHeap(src ir.Node, why string, where ir.Node) { e.expr(e.heapHole().note(where, why), src) } // call evaluates a call expressions, including builtin calls. ks // should contain the holes representing where the function callee's // results flows; where is the OGO/ODEFER context of the call, if any. -func (e *Escape) call(ks []EscHole, call, where ir.Node) { +func (e *escape) call(ks []hole, call, where ir.Node) { topLevelDefer := where != nil && where.Op() == ir.ODEFER && e.loopDepth == 1 if topLevelDefer { // force stack allocation of defer record, unless @@ -851,7 +852,7 @@ func (e *Escape) call(ks []EscHole, call, where ir.Node) { where.SetEsc(ir.EscNever) } - argument := func(k EscHole, arg ir.Node) { + argument := func(k hole, arg ir.Node) { if topLevelDefer { // Top level defers arguments don't escape to // heap, but they do need to last until end of @@ -969,7 +970,7 @@ func (e *Escape) call(ks []EscHole, call, where ir.Node) { // ks should contain the holes representing where the function // callee's results flows. fn is the statically-known callee function, // if any. -func (e *Escape) tagHole(ks []EscHole, fn *ir.Name, param *types.Field) EscHole { +func (e *escape) tagHole(ks []hole, fn *ir.Name, param *types.Field) hole { // If this is a dynamic call, we can't rely on param.Note. if fn == nil { return e.heapHole() @@ -981,15 +982,15 @@ func (e *Escape) tagHole(ks []EscHole, fn *ir.Name, param *types.Field) EscHole // Call to previously tagged function. - if param.Note == uintptrEscapesTag { + if param.Note == UintptrEscapesNote { k := e.heapHole() k.uintptrEscapesHack = true return k } - var tagKs []EscHole + var tagKs []hole - esc := ParseLeaks(param.Note) + esc := parseLeaks(param.Note) if x := esc.Heap(); x >= 0 { tagKs = append(tagKs, e.heapHole().shift(x)) } @@ -1010,9 +1011,9 @@ func (e *Escape) tagHole(ks []EscHole, fn *ir.Name, param *types.Field) EscHole // fn has not yet been analyzed, so its parameters and results // should be incorporated directly into the flow graph instead of // relying on its escape analysis tagging. -func (e *Escape) inMutualBatch(fn *ir.Name) bool { - if fn.Defn != nil && fn.Defn.Esc() < EscFuncTagged { - if fn.Defn.Esc() == EscFuncUnknown { +func (e *escape) inMutualBatch(fn *ir.Name) bool { + if fn.Defn != nil && fn.Defn.Esc() < escFuncTagged { + if fn.Defn.Esc() == escFuncUnknown { base.Fatalf("graph inconsistency") } return true @@ -1020,31 +1021,31 @@ func (e *Escape) inMutualBatch(fn *ir.Name) bool { return false } -// An EscHole represents a context for evaluation a Go +// An hole represents a context for evaluation a Go // expression. E.g., when evaluating p in "x = **p", we'd have a hole // with dst==x and derefs==2. -type EscHole struct { - dst *EscLocation +type hole struct { + dst *location derefs int // >= -1 - notes *EscNote + notes *note // uintptrEscapesHack indicates this context is evaluating an // argument for a //go:uintptrescapes function. uintptrEscapesHack bool } -type EscNote struct { - next *EscNote +type note struct { + next *note where ir.Node why string } -func (k EscHole) note(where ir.Node, why string) EscHole { +func (k hole) note(where ir.Node, why string) hole { if where == nil || why == "" { base.Fatalf("note: missing where/why") } if base.Flag.LowerM >= 2 || logopt.Enabled() { - k.notes = &EscNote{ + k.notes = ¬e{ next: k.notes, where: where, why: why, @@ -1053,7 +1054,7 @@ func (k EscHole) note(where ir.Node, why string) EscHole { return k } -func (k EscHole) shift(delta int) EscHole { +func (k hole) shift(delta int) hole { k.derefs += delta if k.derefs < -1 { base.Fatalf("derefs underflow: %v", k.derefs) @@ -1061,10 +1062,10 @@ func (k EscHole) shift(delta int) EscHole { return k } -func (k EscHole) deref(where ir.Node, why string) EscHole { return k.shift(1).note(where, why) } -func (k EscHole) addr(where ir.Node, why string) EscHole { return k.shift(-1).note(where, why) } +func (k hole) deref(where ir.Node, why string) hole { return k.shift(1).note(where, why) } +func (k hole) addr(where ir.Node, why string) hole { return k.shift(-1).note(where, why) } -func (k EscHole) dotType(t *types.Type, where ir.Node, why string) EscHole { +func (k hole) dotType(t *types.Type, where ir.Node, why string) hole { if !t.IsInterface() && !types.IsDirectIface(t) { k = k.shift(1) } @@ -1073,7 +1074,7 @@ func (k EscHole) dotType(t *types.Type, where ir.Node, why string) EscHole { // teeHole returns a new hole that flows into each hole of ks, // similar to the Unix tee(1) command. -func (e *Escape) teeHole(ks ...EscHole) EscHole { +func (e *escape) teeHole(ks ...hole) hole { if len(ks) == 0 { return e.discardHole() } @@ -1101,7 +1102,7 @@ func (e *Escape) teeHole(ks ...EscHole) EscHole { return loc.asHole() } -func (e *Escape) dcl(n ir.Node) EscHole { +func (e *escape) dcl(n ir.Node) hole { loc := e.oldLoc(n) loc.loopDepth = e.loopDepth return loc.asHole() @@ -1110,7 +1111,7 @@ func (e *Escape) dcl(n ir.Node) EscHole { // spill allocates a new location associated with expression n, flows // its address to k, and returns a hole that flows values to it. It's // intended for use with most expressions that allocate storage. -func (e *Escape) spill(k EscHole, n ir.Node) EscHole { +func (e *escape) spill(k hole, n ir.Node) hole { loc := e.newLoc(n, true) e.flow(k.addr(n, "spill"), loc) return loc.asHole() @@ -1119,7 +1120,7 @@ func (e *Escape) spill(k EscHole, n ir.Node) EscHole { // later returns a new hole that flows into k, but some time later. // Its main effect is to prevent immediate reuse of temporary // variables introduced during Order. -func (e *Escape) later(k EscHole) EscHole { +func (e *escape) later(k hole) hole { loc := e.newLoc(nil, false) e.flow(k, loc) return loc.asHole() @@ -1138,7 +1139,7 @@ func canonicalNode(n ir.Node) ir.Node { return n } -func (e *Escape) newLoc(n ir.Node, transient bool) *EscLocation { +func (e *escape) newLoc(n ir.Node, transient bool) *location { if e.curfn == nil { base.Fatalf("e.curfn isn't set") } @@ -1147,7 +1148,7 @@ func (e *Escape) newLoc(n ir.Node, transient bool) *EscLocation { } n = canonicalNode(n) - loc := &EscLocation{ + loc := &location{ n: n, curfn: e.curfn, loopDepth: e.loopDepth, @@ -1165,23 +1166,23 @@ func (e *Escape) newLoc(n ir.Node, transient bool) *EscLocation { } n.SetOpt(loc) - if why := heapAllocReason(n); why != "" { + if why := HeapAllocReason(n); why != "" { e.flow(e.heapHole().addr(n, why), loc) } } return loc } -func (e *Escape) oldLoc(n ir.Node) *EscLocation { +func (e *escape) oldLoc(n ir.Node) *location { n = canonicalNode(n) - return n.Opt().(*EscLocation) + return n.Opt().(*location) } -func (l *EscLocation) asHole() EscHole { - return EscHole{dst: l} +func (l *location) asHole() hole { + return hole{dst: l} } -func (e *Escape) flow(k EscHole, src *EscLocation) { +func (e *escape) flow(k hole, src *location) { dst := k.dst if dst == &e.blankLoc { return @@ -1206,15 +1207,15 @@ func (e *Escape) flow(k EscHole, src *EscLocation) { } // TODO(mdempsky): Deduplicate edges? - dst.edges = append(dst.edges, EscEdge{src: src, derefs: k.derefs, notes: k.notes}) + dst.edges = append(dst.edges, edge{src: src, derefs: k.derefs, notes: k.notes}) } -func (e *Escape) heapHole() EscHole { return e.heapLoc.asHole() } -func (e *Escape) discardHole() EscHole { return e.blankLoc.asHole() } +func (e *escape) heapHole() hole { return e.heapLoc.asHole() } +func (e *escape) discardHole() hole { return e.blankLoc.asHole() } // walkAll computes the minimal dereferences between all pairs of // locations. -func (e *Escape) walkAll() { +func (e *escape) walkAll() { // We use a work queue to keep track of locations that we need // to visit, and repeatedly walk until we reach a fixed point. // @@ -1224,8 +1225,8 @@ func (e *Escape) walkAll() { // happen at most once. So we take Θ(len(e.allLocs)) walks. // LIFO queue, has enough room for e.allLocs and e.heapLoc. - todo := make([]*EscLocation, 0, len(e.allLocs)+1) - enqueue := func(loc *EscLocation) { + todo := make([]*location, 0, len(e.allLocs)+1) + enqueue := func(loc *location) { if !loc.queued { todo = append(todo, loc) loc.queued = true @@ -1250,7 +1251,7 @@ func (e *Escape) walkAll() { // walkOne computes the minimal number of dereferences from root to // all other locations. -func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLocation)) { +func (e *escape) walkOne(root *location, walkgen uint32, enqueue func(*location)) { // The data flow graph has negative edges (from addressing // operations), so we use the Bellman-Ford algorithm. However, // we don't have to worry about infinite negative cycles since @@ -1260,7 +1261,7 @@ func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLoc root.derefs = 0 root.dst = nil - todo := []*EscLocation{root} // LIFO queue + todo := []*location{root} // LIFO queue for len(todo) > 0 { l := todo[len(todo)-1] todo = todo[:len(todo)-1] @@ -1341,8 +1342,8 @@ func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLoc } // explainPath prints an explanation of how src flows to the walk root. -func (e *Escape) explainPath(root, src *EscLocation) []*logopt.LoggedOpt { - visited := make(map[*EscLocation]bool) +func (e *escape) explainPath(root, src *location) []*logopt.LoggedOpt { + visited := make(map[*location]bool) pos := base.FmtPos(src.n.Pos()) var explanation []*logopt.LoggedOpt for { @@ -1371,7 +1372,7 @@ func (e *Escape) explainPath(root, src *EscLocation) []*logopt.LoggedOpt { return explanation } -func (e *Escape) explainFlow(pos string, dst, srcloc *EscLocation, derefs int, notes *EscNote, explanation []*logopt.LoggedOpt) []*logopt.LoggedOpt { +func (e *escape) explainFlow(pos string, dst, srcloc *location, derefs int, notes *note, explanation []*logopt.LoggedOpt) []*logopt.LoggedOpt { ops := "&" if derefs >= 0 { ops = strings.Repeat("*", derefs) @@ -1404,7 +1405,7 @@ func (e *Escape) explainFlow(pos string, dst, srcloc *EscLocation, derefs int, n return explanation } -func (e *Escape) explainLoc(l *EscLocation) string { +func (e *escape) explainLoc(l *location) string { if l == &e.heapLoc { return "{heap}" } @@ -1420,7 +1421,7 @@ func (e *Escape) explainLoc(l *EscLocation) string { // outlives reports whether values stored in l may survive beyond // other's lifetime if stack allocated. -func (e *Escape) outlives(l, other *EscLocation) bool { +func (e *escape) outlives(l, other *location) bool { // The heap outlives everything. if l.escapes { return true @@ -1484,7 +1485,7 @@ func containsClosure(f, c *ir.Func) bool { } // leak records that parameter l leaks to sink. -func (l *EscLocation) leakTo(sink *EscLocation, derefs int) { +func (l *location) leakTo(sink *location, derefs int) { // If sink is a result parameter and we can fit return bits // into the escape analysis tag, then record a return leak. if sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn { @@ -1501,10 +1502,10 @@ func (l *EscLocation) leakTo(sink *EscLocation, derefs int) { l.paramEsc.AddHeap(derefs) } -func (e *Escape) finish(fns []*ir.Func) { +func (e *escape) finish(fns []*ir.Func) { // Record parameter tags for package export data. for _, fn := range fns { - fn.SetEsc(EscFuncTagged) + fn.SetEsc(escFuncTagged) narg := 0 for _, fs := range &types.RecvsParams { @@ -1557,47 +1558,47 @@ func (e *Escape) finish(fns []*ir.Func) { } } -func (l *EscLocation) isName(c ir.Class) bool { +func (l *location) isName(c ir.Class) bool { return l.n != nil && l.n.Op() == ir.ONAME && l.n.(*ir.Name).Class_ == c } const numEscResults = 7 -// An EscLeaks represents a set of assignment flows from a parameter +// An leaks represents a set of assignment flows from a parameter // to the heap or to any of its function's (first numEscResults) // result parameters. -type EscLeaks [1 + numEscResults]uint8 +type leaks [1 + numEscResults]uint8 // Empty reports whether l is an empty set (i.e., no assignment flows). -func (l EscLeaks) Empty() bool { return l == EscLeaks{} } +func (l leaks) Empty() bool { return l == leaks{} } // Heap returns the minimum deref count of any assignment flow from l // to the heap. If no such flows exist, Heap returns -1. -func (l EscLeaks) Heap() int { return l.get(0) } +func (l leaks) Heap() int { return l.get(0) } // Result returns the minimum deref count of any assignment flow from // l to its function's i'th result parameter. If no such flows exist, // Result returns -1. -func (l EscLeaks) Result(i int) int { return l.get(1 + i) } +func (l leaks) Result(i int) int { return l.get(1 + i) } // AddHeap adds an assignment flow from l to the heap. -func (l *EscLeaks) AddHeap(derefs int) { l.add(0, derefs) } +func (l *leaks) AddHeap(derefs int) { l.add(0, derefs) } // AddResult adds an assignment flow from l to its function's i'th // result parameter. -func (l *EscLeaks) AddResult(i, derefs int) { l.add(1+i, derefs) } +func (l *leaks) AddResult(i, derefs int) { l.add(1+i, derefs) } -func (l *EscLeaks) setResult(i, derefs int) { l.set(1+i, derefs) } +func (l *leaks) setResult(i, derefs int) { l.set(1+i, derefs) } -func (l EscLeaks) get(i int) int { return int(l[i]) - 1 } +func (l leaks) get(i int) int { return int(l[i]) - 1 } -func (l *EscLeaks) add(i, derefs int) { +func (l *leaks) add(i, derefs int) { if old := l.get(i); old < 0 || derefs < old { l.set(i, derefs) } } -func (l *EscLeaks) set(i, derefs int) { +func (l *leaks) set(i, derefs int) { v := derefs + 1 if v < 0 { base.Fatalf("invalid derefs count: %v", derefs) @@ -1611,7 +1612,7 @@ func (l *EscLeaks) set(i, derefs int) { // Optimize removes result flow paths that are equal in length or // longer than the shortest heap flow path. -func (l *EscLeaks) Optimize() { +func (l *leaks) Optimize() { // If we have a path to the heap, then there's no use in // keeping equal or longer paths elsewhere. if x := l.Heap(); x >= 0 { @@ -1623,10 +1624,10 @@ func (l *EscLeaks) Optimize() { } } -var leakTagCache = map[EscLeaks]string{} +var leakTagCache = map[leaks]string{} // Encode converts l into a binary string for export data. -func (l EscLeaks) Encode() string { +func (l leaks) Encode() string { if l.Heap() == 0 { // Space optimization: empty string encodes more // efficiently in export data. @@ -1645,9 +1646,9 @@ func (l EscLeaks) Encode() string { return s } -// ParseLeaks parses a binary string representing an EscLeaks. -func ParseLeaks(s string) EscLeaks { - var l EscLeaks +// parseLeaks parses a binary string representing an EscLeaks. +func parseLeaks(s string) leaks { + var l leaks if !strings.HasPrefix(s, "esc:") { l.AddHeap(0) return l @@ -1656,31 +1657,17 @@ func ParseLeaks(s string) EscLeaks { return l } -func escapes(all []ir.Node) { - ir.VisitFuncsBottomUp(all, escapeFuncs) +func Funcs(all []ir.Node) { + ir.VisitFuncsBottomUp(all, Batch) } const ( - EscFuncUnknown = 0 + iota - EscFuncPlanned - EscFuncStarted - EscFuncTagged + escFuncUnknown = 0 + iota + escFuncPlanned + escFuncStarted + escFuncTagged ) -func min8(a, b int8) int8 { - if a < b { - return a - } - return b -} - -func max8(a, b int8) int8 { - if a > b { - return a - } - return b -} - // funcSym returns fn.Nname.Sym if no nils are encountered along the way. func funcSym(fn *ir.Func) *types.Sym { if fn == nil || fn.Nname == nil { @@ -1855,9 +1842,9 @@ func mayAffectMemory(n ir.Node) bool { } } -// heapAllocReason returns the reason the given Node must be heap +// HeapAllocReason returns the reason the given Node must be heap // allocated, or the empty string if it doesn't. -func heapAllocReason(n ir.Node) string { +func HeapAllocReason(n ir.Node) string { if n.Type() == nil { return "" } @@ -2064,13 +2051,13 @@ func moveToHeap(n *ir.Name) { // This special tag is applied to uintptr variables // that we believe may hold unsafe.Pointers for // calls into assembly functions. -const unsafeUintptrTag = "unsafe-uintptr" +const UnsafeUintptrNote = "unsafe-uintptr" // This special tag is applied to uintptr parameters of functions // marked go:uintptrescapes. -const uintptrEscapesTag = "uintptr-escapes" +const UintptrEscapesNote = "uintptr-escapes" -func (e *Escape) paramTag(fn *ir.Func, narg int, f *types.Field) string { +func (e *escape) paramTag(fn *ir.Func, narg int, f *types.Field) string { name := func() string { if f.Sym != nil { return f.Sym.Name @@ -2089,14 +2076,14 @@ func (e *Escape) paramTag(fn *ir.Func, narg int, f *types.Field) string { if base.Flag.LowerM != 0 { base.WarnfAt(f.Pos, "assuming %v is unsafe uintptr", name()) } - return unsafeUintptrTag + return UnsafeUintptrNote } if !f.Type.HasPointers() { // don't bother tagging for scalars return "" } - var esc EscLeaks + var esc leaks // External functions are assumed unsafe, unless // //go:noescape is given before the declaration. @@ -2119,14 +2106,14 @@ func (e *Escape) paramTag(fn *ir.Func, narg int, f *types.Field) string { if base.Flag.LowerM != 0 { base.WarnfAt(f.Pos, "marking %v as escaping uintptr", name()) } - return uintptrEscapesTag + return UintptrEscapesNote } if f.IsDDD() && f.Type.Elem().IsUintptr() { // final argument is ...uintptr. if base.Flag.LowerM != 0 { base.WarnfAt(f.Pos, "marking %v as escaping ...uintptr", name()) } - return uintptrEscapesTag + return UintptrEscapesNote } } @@ -2136,7 +2123,7 @@ func (e *Escape) paramTag(fn *ir.Func, narg int, f *types.Field) string { // Unnamed parameters are unused and therefore do not escape. if f.Sym == nil || f.Sym.IsBlank() { - var esc EscLeaks + var esc leaks return esc.Encode() } diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go index f746a358caf5b..81f7956d2e87a 100644 --- a/src/cmd/compile/internal/gc/gsubr.go +++ b/src/cmd/compile/internal/gc/gsubr.go @@ -32,6 +32,7 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/escape" "cmd/compile/internal/ir" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" @@ -141,7 +142,7 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { ir.CurFunc = fn typecheck.Stmts(fn.Body) - escapeFuncs([]*ir.Func{fn}, false) + escape.Batch([]*ir.Func{fn}, false) typecheck.Target.Decls = append(typecheck.Target.Decls, fn) diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 7f20d6b8a565c..cda00fb9aebf4 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -10,6 +10,7 @@ import ( "bufio" "bytes" "cmd/compile/internal/base" + "cmd/compile/internal/escape" "cmd/compile/internal/inline" "cmd/compile/internal/ir" "cmd/compile/internal/logopt" @@ -183,7 +184,7 @@ func Main(archInit func(*Arch)) { logopt.LogJsonOption(base.Flag.JSON) } - ir.EscFmt = escFmt + ir.EscFmt = escape.Fmt ir.IsIntrinsicCall = isIntrinsicCall inline.SSADumpInline = ssaDumpInline initSSAEnv() @@ -252,7 +253,7 @@ func Main(archInit func(*Arch)) { // Large values are also moved off stack in escape analysis; // because large values may contain pointers, it must happen early. base.Timer.Start("fe", "escapes") - escapes(typecheck.Target.Decls) + escape.Funcs(typecheck.Target.Decls) // Collect information for go:nowritebarrierrec // checking. This must happen before transformclosure. diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 075bcea92cf17..32a355ae6b013 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -6,6 +6,7 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/escape" "cmd/compile/internal/ir" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" @@ -521,7 +522,7 @@ func (o *Order) call(nn ir.Node) { // Check for "unsafe-uintptr" tag provided by escape analysis. for i, param := range n.X.Type().Params().FieldSlice() { - if param.Note == unsafeUintptrTag || param.Note == uintptrEscapesTag { + if param.Note == escape.UnsafeUintptrNote || param.Note == escape.UintptrEscapesNote { if arg := n.Args[i]; arg.Op() == ir.OSLICELIT { arg := arg.(*ir.CompLitExpr) for _, elt := range arg.List { diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 5c36e922a635f..feb2d0de8f0da 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -7396,3 +7396,17 @@ func callTargetLSym(callee *types.Sym, callerLSym *obj.LSym) *obj.LSym { } return lsym } + +func min8(a, b int8) int8 { + if a < b { + return a + } + return b +} + +func max8(a, b int8) int8 { + if a > b { + return a + } + return b +} diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index f76fb8e24a2b8..cba9bdc253aa6 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -6,6 +6,7 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/escape" "cmd/compile/internal/inline" "cmd/compile/internal/ir" "cmd/compile/internal/typecheck" @@ -484,7 +485,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym() != nil { inline.InlineCalls(fn) } - escapeFuncs([]*ir.Func{fn}, false) + escape.Batch([]*ir.Func{fn}, false) ir.CurFunc = nil typecheck.Target.Decls = append(typecheck.Target.Decls, fn) diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 73f82f333c09a..9e4de7f804afd 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -6,6 +6,7 @@ package gc import ( "cmd/compile/internal/base" + "cmd/compile/internal/escape" "cmd/compile/internal/ir" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" @@ -1455,7 +1456,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem()) } if n.Esc() == ir.EscNone { - if why := heapAllocReason(n); why != "" { + if why := escape.HeapAllocReason(n); why != "" { base.Fatalf("%v has EscNone, but %v", n, why) } // var arr [r]T From fbc82f03b104ba9bde67ad202e9cb00a13842dca Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 23 Dec 2020 00:52:53 -0500 Subject: [PATCH 233/474] [dev.regabi] cmd/compile: split out package noder [generated] [git-generate] cd src/cmd/compile/internal/gc rf ' mv ArhdrSize HeaderSize mv arsize ReadHeader mv formathdr FormatHeader mv HeaderSize ReadHeader FormatHeader archive.go mv archive.go cmd/internal/archive mv makePos main.go mv checkDotImports CheckDotImports mv parseFiles ParseFiles mv Pragma pragmas mv PragmaEmbed pragmaEmbed mv PragmaPos pragmaPos mv FuncPragmas funcPragmas mv TypePragmas typePragmas mv fakeRecv noder.funcLit renameinitgen renameinit oldname varEmbed noder.go mv isDriveLetter islocalname findpkg myheight importfile \ reservedimports isbadimport \ pkgnotused \ mkpackage clearImports \ CheckDotImports dotImports importDot \ importName \ import.go mv noder _noder mv import.go lex.go lex_test.go noder.go cmd/compile/internal/noder ' cd ../noder rf ' mv _noder noder ' Change-Id: Iac2b856f7b86143c666d818e4b7c5b261cf387d5 Reviewed-on: https://go-review.googlesource.com/c/go/+/279473 Trust: Russ Cox Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/closure.go | 60 --- src/cmd/compile/internal/gc/dcl.go | 64 --- src/cmd/compile/internal/gc/embed.go | 53 -- src/cmd/compile/internal/gc/init.go | 12 - src/cmd/compile/internal/gc/main.go | 380 +------------- src/cmd/compile/internal/gc/obj.go | 16 +- src/cmd/compile/internal/gc/subr.go | 105 ---- src/cmd/compile/internal/noder/import.go | 493 ++++++++++++++++++ src/cmd/compile/internal/{gc => noder}/lex.go | 17 +- .../internal/{gc => noder}/lex_test.go | 5 +- .../compile/internal/{gc => noder}/noder.go | 225 +++++++- src/cmd/internal/archive/archive.go | 21 + 12 files changed, 735 insertions(+), 716 deletions(-) create mode 100644 src/cmd/compile/internal/noder/import.go rename src/cmd/compile/internal/{gc => noder}/lex.go (95%) rename src/cmd/compile/internal/{gc => noder}/lex_test.go (99%) rename src/cmd/compile/internal/{gc => noder}/noder.go (87%) diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index 29455bffd8f78..4679b6535bcd1 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -7,71 +7,11 @@ package gc import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" - "cmd/compile/internal/syntax" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/src" ) -func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node { - xtype := p.typeExpr(expr.Type) - ntype := p.typeExpr(expr.Type) - - fn := ir.NewFunc(p.pos(expr)) - fn.SetIsHiddenClosure(ir.CurFunc != nil) - fn.Nname = ir.NewFuncNameAt(p.pos(expr), ir.BlankNode.Sym(), fn) // filled in by typecheckclosure - fn.Nname.Ntype = xtype - fn.Nname.Defn = fn - - clo := ir.NewClosureExpr(p.pos(expr), fn) - fn.ClosureType = ntype - fn.OClosure = clo - - p.funcBody(fn, expr.Body) - - // closure-specific variables are hanging off the - // ordinary ones in the symbol table; see oldname. - // unhook them. - // make the list of pointers for the closure call. - for _, v := range fn.ClosureVars { - // Unlink from v1; see comment in syntax.go type Param for these fields. - v1 := v.Defn - v1.Name().Innermost = v.Outer - - // If the closure usage of v is not dense, - // we need to make it dense; now that we're out - // of the function in which v appeared, - // look up v.Sym in the enclosing function - // and keep it around for use in the compiled code. - // - // That is, suppose we just finished parsing the innermost - // closure f4 in this code: - // - // func f() { - // v := 1 - // func() { // f2 - // use(v) - // func() { // f3 - // func() { // f4 - // use(v) - // }() - // }() - // }() - // } - // - // At this point v.Outer is f2's v; there is no f3's v. - // To construct the closure f4 from within f3, - // we need to use f3's v and in this case we need to create f3's v. - // We are now in the context of f3, so calling oldname(v.Sym) - // obtains f3's v, creating it if necessary (as it is in the example). - // - // capturevars will decide whether to use v directly or &v. - v.Outer = oldname(v.Sym()).(*ir.Name) - } - - return clo -} - // transformclosure is called in a separate phase after escape analysis. // It transform closure bodies to properly reference captured variables. func transformclosure(fn *ir.Func) { diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index e53bba44adc37..aaf5b35057ecf 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -28,70 +28,6 @@ func NoWriteBarrierRecCheck() { var nowritebarrierrecCheck *nowritebarrierrecChecker -// oldname returns the Node that declares symbol s in the current scope. -// If no such Node currently exists, an ONONAME Node is returned instead. -// Automatically creates a new closure variable if the referenced symbol was -// declared in a different (containing) function. -func oldname(s *types.Sym) ir.Node { - if s.Pkg != types.LocalPkg { - return ir.NewIdent(base.Pos, s) - } - - n := ir.AsNode(s.Def) - if n == nil { - // Maybe a top-level declaration will come along later to - // define s. resolve will check s.Def again once all input - // source has been processed. - return ir.NewIdent(base.Pos, s) - } - - if ir.CurFunc != nil && n.Op() == ir.ONAME && n.Name().Curfn != nil && n.Name().Curfn != ir.CurFunc { - // Inner func is referring to var in outer func. - // - // TODO(rsc): If there is an outer variable x and we - // are parsing x := 5 inside the closure, until we get to - // the := it looks like a reference to the outer x so we'll - // make x a closure variable unnecessarily. - n := n.(*ir.Name) - c := n.Name().Innermost - if c == nil || c.Curfn != ir.CurFunc { - // Do not have a closure var for the active closure yet; make one. - c = typecheck.NewName(s) - c.Class_ = ir.PAUTOHEAP - c.SetIsClosureVar(true) - c.SetIsDDD(n.IsDDD()) - c.Defn = n - - // Link into list of active closure variables. - // Popped from list in func funcLit. - c.Outer = n.Name().Innermost - n.Name().Innermost = c - - ir.CurFunc.ClosureVars = append(ir.CurFunc.ClosureVars, c) - } - - // return ref to closure var, not original - return c - } - - return n -} - -// importName is like oldname, -// but it reports an error if sym is from another package and not exported. -func importName(sym *types.Sym) ir.Node { - n := oldname(sym) - if !types.IsExported(sym.Name) && sym.Pkg != types.LocalPkg { - n.SetDiag(true) - base.Errorf("cannot refer to unexported name %s.%s", sym.Pkg.Name, sym.Name) - } - return n -} - -func fakeRecv() *ir.Field { - return ir.NewField(base.Pos, nil, nil, types.FakeRecvType()) -} - // funcsym returns s·f. func funcsym(s *types.Sym) *types.Sym { // funcsymsmu here serves to protect not just mutations of funcsyms (below), diff --git a/src/cmd/compile/internal/gc/embed.go b/src/cmd/compile/internal/gc/embed.go index 282e718b29bc2..959d8cd7fe335 100644 --- a/src/cmd/compile/internal/gc/embed.go +++ b/src/cmd/compile/internal/gc/embed.go @@ -8,14 +8,12 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/objw" - "cmd/compile/internal/syntax" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/obj" "path" "sort" - "strconv" "strings" ) @@ -26,57 +24,6 @@ const ( embedFiles ) -func varEmbed(p *noder, names []*ir.Name, typ ir.Ntype, exprs []ir.Node, embeds []PragmaEmbed) (newExprs []ir.Node) { - haveEmbed := false - for _, decl := range p.file.DeclList { - imp, ok := decl.(*syntax.ImportDecl) - if !ok { - // imports always come first - break - } - path, _ := strconv.Unquote(imp.Path.Value) - if path == "embed" { - haveEmbed = true - break - } - } - - pos := embeds[0].Pos - if !haveEmbed { - p.errorAt(pos, "invalid go:embed: missing import \"embed\"") - return exprs - } - if base.Flag.Cfg.Embed.Patterns == nil { - p.errorAt(pos, "invalid go:embed: build system did not supply embed configuration") - return exprs - } - if len(names) > 1 { - p.errorAt(pos, "go:embed cannot apply to multiple vars") - return exprs - } - if len(exprs) > 0 { - p.errorAt(pos, "go:embed cannot apply to var with initializer") - return exprs - } - if typ == nil { - // Should not happen, since len(exprs) == 0 now. - p.errorAt(pos, "go:embed cannot apply to var without type") - return exprs - } - if typecheck.DeclContext != ir.PEXTERN { - p.errorAt(pos, "go:embed cannot apply to var inside func") - return exprs - } - - v := names[0] - typecheck.Target.Embeds = append(typecheck.Target.Embeds, v) - v.Embed = new([]ir.Embed) - for _, e := range embeds { - *v.Embed = append(*v.Embed, ir.Embed{Pos: p.makeXPos(e.Pos), Patterns: e.Patterns}) - } - return exprs -} - func embedFileList(v *ir.Name) []string { kind := embedKind(v.Type()) if kind == embedUnknown { diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go index da3f40f4e889a..a299b8688b28a 100644 --- a/src/cmd/compile/internal/gc/init.go +++ b/src/cmd/compile/internal/gc/init.go @@ -13,18 +13,6 @@ import ( "cmd/internal/obj" ) -// A function named init is a special case. -// It is called by the initialization before main is run. -// To make it unique within a package and also uncallable, -// the name, normally "pkg.init", is altered to "pkg.init.0". -var renameinitgen int - -func renameinit() *types.Sym { - s := typecheck.LookupNum("init.", renameinitgen) - renameinitgen++ - return s -} - // fninit makes and returns an initialization record for the package. // See runtime/proc.go:initTask for its layout. // The 3 tasks for initialization are: diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index cda00fb9aebf4..7b540d8675aaa 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -14,26 +14,21 @@ import ( "cmd/compile/internal/inline" "cmd/compile/internal/ir" "cmd/compile/internal/logopt" + "cmd/compile/internal/noder" "cmd/compile/internal/ssa" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" - "cmd/internal/bio" "cmd/internal/dwarf" - "cmd/internal/goobj" "cmd/internal/obj" "cmd/internal/objabi" "cmd/internal/src" "flag" "fmt" - "go/constant" - "io" "io/ioutil" "log" "os" - "path" "runtime" "sort" - "strconv" "strings" ) @@ -212,7 +207,7 @@ func Main(archInit func(*Arch)) { // Parse input. base.Timer.Start("fe", "parse") - lines := parseFiles(flag.Args()) + lines := noder.ParseFiles(flag.Args()) cgoSymABIs() base.Timer.Stop() base.Timer.AddEvent(int64(lines), "lines") @@ -222,7 +217,7 @@ func Main(archInit func(*Arch)) { typecheck.Package() // With all user code typechecked, it's now safe to verify unused dot imports. - checkDotImports() + noder.CheckDotImports() base.ExitIfErrors() // Build init task. @@ -468,371 +463,6 @@ func readSymABIs(file, myimportpath string) { } } -func arsize(b *bufio.Reader, name string) int { - var buf [ArhdrSize]byte - if _, err := io.ReadFull(b, buf[:]); err != nil { - return -1 - } - aname := strings.Trim(string(buf[0:16]), " ") - if !strings.HasPrefix(aname, name) { - return -1 - } - asize := strings.Trim(string(buf[48:58]), " ") - i, _ := strconv.Atoi(asize) - return i -} - -func isDriveLetter(b byte) bool { - return 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z' -} - -// is this path a local name? begins with ./ or ../ or / -func islocalname(name string) bool { - return strings.HasPrefix(name, "/") || - runtime.GOOS == "windows" && len(name) >= 3 && isDriveLetter(name[0]) && name[1] == ':' && name[2] == '/' || - strings.HasPrefix(name, "./") || name == "." || - strings.HasPrefix(name, "../") || name == ".." -} - -func findpkg(name string) (file string, ok bool) { - if islocalname(name) { - if base.Flag.NoLocalImports { - return "", false - } - - if base.Flag.Cfg.PackageFile != nil { - file, ok = base.Flag.Cfg.PackageFile[name] - return file, ok - } - - // try .a before .6. important for building libraries: - // if there is an array.6 in the array.a library, - // want to find all of array.a, not just array.6. - file = fmt.Sprintf("%s.a", name) - if _, err := os.Stat(file); err == nil { - return file, true - } - file = fmt.Sprintf("%s.o", name) - if _, err := os.Stat(file); err == nil { - return file, true - } - return "", false - } - - // local imports should be canonicalized already. - // don't want to see "encoding/../encoding/base64" - // as different from "encoding/base64". - if q := path.Clean(name); q != name { - base.Errorf("non-canonical import path %q (should be %q)", name, q) - return "", false - } - - if base.Flag.Cfg.PackageFile != nil { - file, ok = base.Flag.Cfg.PackageFile[name] - return file, ok - } - - for _, dir := range base.Flag.Cfg.ImportDirs { - file = fmt.Sprintf("%s/%s.a", dir, name) - if _, err := os.Stat(file); err == nil { - return file, true - } - file = fmt.Sprintf("%s/%s.o", dir, name) - if _, err := os.Stat(file); err == nil { - return file, true - } - } - - if objabi.GOROOT != "" { - suffix := "" - suffixsep := "" - if base.Flag.InstallSuffix != "" { - suffixsep = "_" - suffix = base.Flag.InstallSuffix - } else if base.Flag.Race { - suffixsep = "_" - suffix = "race" - } else if base.Flag.MSan { - suffixsep = "_" - suffix = "msan" - } - - file = fmt.Sprintf("%s/pkg/%s_%s%s%s/%s.a", objabi.GOROOT, objabi.GOOS, objabi.GOARCH, suffixsep, suffix, name) - if _, err := os.Stat(file); err == nil { - return file, true - } - file = fmt.Sprintf("%s/pkg/%s_%s%s%s/%s.o", objabi.GOROOT, objabi.GOOS, objabi.GOARCH, suffixsep, suffix, name) - if _, err := os.Stat(file); err == nil { - return file, true - } - } - - return "", false -} - -// myheight tracks the local package's height based on packages -// imported so far. -var myheight int - -func importfile(f constant.Value) *types.Pkg { - if f.Kind() != constant.String { - base.Errorf("import path must be a string") - return nil - } - - path_ := constant.StringVal(f) - if len(path_) == 0 { - base.Errorf("import path is empty") - return nil - } - - if isbadimport(path_, false) { - return nil - } - - // The package name main is no longer reserved, - // but we reserve the import path "main" to identify - // the main package, just as we reserve the import - // path "math" to identify the standard math package. - if path_ == "main" { - base.Errorf("cannot import \"main\"") - base.ErrorExit() - } - - if base.Ctxt.Pkgpath != "" && path_ == base.Ctxt.Pkgpath { - base.Errorf("import %q while compiling that package (import cycle)", path_) - base.ErrorExit() - } - - if mapped, ok := base.Flag.Cfg.ImportMap[path_]; ok { - path_ = mapped - } - - if path_ == "unsafe" { - return ir.Pkgs.Unsafe - } - - if islocalname(path_) { - if path_[0] == '/' { - base.Errorf("import path cannot be absolute path") - return nil - } - - prefix := base.Ctxt.Pathname - if base.Flag.D != "" { - prefix = base.Flag.D - } - path_ = path.Join(prefix, path_) - - if isbadimport(path_, true) { - return nil - } - } - - file, found := findpkg(path_) - if !found { - base.Errorf("can't find import: %q", path_) - base.ErrorExit() - } - - importpkg := types.NewPkg(path_, "") - if importpkg.Imported { - return importpkg - } - - importpkg.Imported = true - - imp, err := bio.Open(file) - if err != nil { - base.Errorf("can't open import: %q: %v", path_, err) - base.ErrorExit() - } - defer imp.Close() - - // check object header - p, err := imp.ReadString('\n') - if err != nil { - base.Errorf("import %s: reading input: %v", file, err) - base.ErrorExit() - } - - if p == "!\n" { // package archive - // package export block should be first - sz := arsize(imp.Reader, "__.PKGDEF") - if sz <= 0 { - base.Errorf("import %s: not a package file", file) - base.ErrorExit() - } - p, err = imp.ReadString('\n') - if err != nil { - base.Errorf("import %s: reading input: %v", file, err) - base.ErrorExit() - } - } - - if !strings.HasPrefix(p, "go object ") { - base.Errorf("import %s: not a go object file: %s", file, p) - base.ErrorExit() - } - q := fmt.Sprintf("%s %s %s %s\n", objabi.GOOS, objabi.GOARCH, objabi.Version, objabi.Expstring()) - if p[10:] != q { - base.Errorf("import %s: object is [%s] expected [%s]", file, p[10:], q) - base.ErrorExit() - } - - // process header lines - for { - p, err = imp.ReadString('\n') - if err != nil { - base.Errorf("import %s: reading input: %v", file, err) - base.ErrorExit() - } - if p == "\n" { - break // header ends with blank line - } - } - - // Expect $$B\n to signal binary import format. - - // look for $$ - var c byte - for { - c, err = imp.ReadByte() - if err != nil { - break - } - if c == '$' { - c, err = imp.ReadByte() - if c == '$' || err != nil { - break - } - } - } - - // get character after $$ - if err == nil { - c, _ = imp.ReadByte() - } - - var fingerprint goobj.FingerprintType - switch c { - case '\n': - base.Errorf("cannot import %s: old export format no longer supported (recompile library)", path_) - return nil - - case 'B': - if base.Debug.Export != 0 { - fmt.Printf("importing %s (%s)\n", path_, file) - } - imp.ReadByte() // skip \n after $$B - - c, err = imp.ReadByte() - if err != nil { - base.Errorf("import %s: reading input: %v", file, err) - base.ErrorExit() - } - - // Indexed format is distinguished by an 'i' byte, - // whereas previous export formats started with 'c', 'd', or 'v'. - if c != 'i' { - base.Errorf("import %s: unexpected package format byte: %v", file, c) - base.ErrorExit() - } - fingerprint = typecheck.ReadImports(importpkg, imp) - - default: - base.Errorf("no import in %q", path_) - base.ErrorExit() - } - - // assume files move (get installed) so don't record the full path - if base.Flag.Cfg.PackageFile != nil { - // If using a packageFile map, assume path_ can be recorded directly. - base.Ctxt.AddImport(path_, fingerprint) - } else { - // For file "/Users/foo/go/pkg/darwin_amd64/math.a" record "math.a". - base.Ctxt.AddImport(file[len(file)-len(path_)-len(".a"):], fingerprint) - } - - if importpkg.Height >= myheight { - myheight = importpkg.Height + 1 - } - - return importpkg -} - -func pkgnotused(lineno src.XPos, path string, name string) { - // If the package was imported with a name other than the final - // import path element, show it explicitly in the error message. - // Note that this handles both renamed imports and imports of - // packages containing unconventional package declarations. - // Note that this uses / always, even on Windows, because Go import - // paths always use forward slashes. - elem := path - if i := strings.LastIndex(elem, "/"); i >= 0 { - elem = elem[i+1:] - } - if name == "" || elem == name { - base.ErrorfAt(lineno, "imported and not used: %q", path) - } else { - base.ErrorfAt(lineno, "imported and not used: %q as %s", path, name) - } -} - -func mkpackage(pkgname string) { - if types.LocalPkg.Name == "" { - if pkgname == "_" { - base.Errorf("invalid package name _") - } - types.LocalPkg.Name = pkgname - } else { - if pkgname != types.LocalPkg.Name { - base.Errorf("package %s; expected %s", pkgname, types.LocalPkg.Name) - } - } -} - -func clearImports() { - type importedPkg struct { - pos src.XPos - path string - name string - } - var unused []importedPkg - - for _, s := range types.LocalPkg.Syms { - n := ir.AsNode(s.Def) - if n == nil { - continue - } - if n.Op() == ir.OPACK { - // throw away top-level package name left over - // from previous file. - // leave s->block set to cause redeclaration - // errors if a conflicting top-level name is - // introduced by a different file. - p := n.(*ir.PkgName) - if !p.Used && base.SyntaxErrors() == 0 { - unused = append(unused, importedPkg{p.Pos(), p.Pkg.Path, s.Name}) - } - s.Def = nil - continue - } - if types.IsDotAlias(s) { - // throw away top-level name left over - // from previous import . "x" - // We'll report errors after type checking in checkDotImports. - s.Def = nil - continue - } - } - - sort.Slice(unused, func(i, j int) bool { return unused[i].pos.Before(unused[j].pos) }) - for _, pkg := range unused { - pkgnotused(pkg.pos, pkg.path, pkg.name) - } -} - // recordFlags records the specified command-line flags to be placed // in the DWARF info. func recordFlags(flags ...string) { @@ -922,3 +552,7 @@ func useABIWrapGen(f *ir.Func) bool { return true } + +func makePos(b *src.PosBase, line, col uint) src.XPos { + return base.Ctxt.PosTable.XPos(src.MakePos(b, line, col)) +} diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 1d0a0f7a04cd9..0dbe1da8d43bd 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -10,6 +10,7 @@ import ( "cmd/compile/internal/objw" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" + "cmd/internal/archive" "cmd/internal/bio" "cmd/internal/obj" "cmd/internal/objabi" @@ -25,13 +26,6 @@ import ( "strconv" ) -// architecture-independent object file output -const ArhdrSize = 60 - -func formathdr(arhdr []byte, name string, size int64) { - copy(arhdr[:], fmt.Sprintf("%-16s%-12d%-6d%-6d%-8o%-10d`\n", name, 0, 0, 0, 0644, size)) -} - // These modes say which kind of object file to generate. // The default use of the toolchain is to set both bits, // generating a combined compiler+linker object, one that @@ -93,7 +87,7 @@ func printObjHeader(bout *bio.Writer) { } func startArchiveEntry(bout *bio.Writer) int64 { - var arhdr [ArhdrSize]byte + var arhdr [archive.HeaderSize]byte bout.Write(arhdr[:]) return bout.Offset() } @@ -104,10 +98,10 @@ func finishArchiveEntry(bout *bio.Writer, start int64, name string) { if size&1 != 0 { bout.WriteByte(0) } - bout.MustSeek(start-ArhdrSize, 0) + bout.MustSeek(start-archive.HeaderSize, 0) - var arhdr [ArhdrSize]byte - formathdr(arhdr[:], name, size) + var arhdr [archive.HeaderSize]byte + archive.FormatHeader(arhdr[:], name, size) bout.Write(arhdr[:]) bout.Flush() bout.MustSeek(start+size+(size&1), 0) diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index cba9bdc253aa6..362c5162b64ac 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -13,10 +13,7 @@ import ( "cmd/compile/internal/types" "cmd/internal/src" "fmt" - "strings" "sync" - "unicode" - "unicode/utf8" ) // largeStack is info about a function whose stack frame is too large (rare). @@ -32,55 +29,6 @@ var ( largeStackFrames []largeStack ) -// dotImports tracks all PkgNames that have been dot-imported. -var dotImports []*ir.PkgName - -// find all the exported symbols in package referenced by PkgName, -// and make them available in the current package -func importDot(pack *ir.PkgName) { - if typecheck.DotImportRefs == nil { - typecheck.DotImportRefs = make(map[*ir.Ident]*ir.PkgName) - } - - opkg := pack.Pkg - for _, s := range opkg.Syms { - if s.Def == nil { - if _, ok := typecheck.DeclImporter[s]; !ok { - continue - } - } - if !types.IsExported(s.Name) || strings.ContainsRune(s.Name, 0xb7) { // 0xb7 = center dot - continue - } - s1 := typecheck.Lookup(s.Name) - if s1.Def != nil { - pkgerror := fmt.Sprintf("during import %q", opkg.Path) - typecheck.Redeclared(base.Pos, s1, pkgerror) - continue - } - - id := ir.NewIdent(src.NoXPos, s) - typecheck.DotImportRefs[id] = pack - s1.Def = id - s1.Block = 1 - } - - dotImports = append(dotImports, pack) -} - -// checkDotImports reports errors for any unused dot imports. -func checkDotImports() { - for _, pack := range dotImports { - if !pack.Used { - base.ErrorfAt(pack.Pos(), "imported and not used: %q", pack.Pkg.Path) - } - } - - // No longer needed; release memory. - dotImports = nil - typecheck.DotImportRefs = nil -} - // backingArrayPtrLen extracts the pointer and length from a slice or string. // This constructs two nodes referring to n, so n must be a cheapexpr. func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) { @@ -513,59 +461,6 @@ func ngotype(n ir.Node) *types.Sym { return nil } -// The linker uses the magic symbol prefixes "go." and "type." -// Avoid potential confusion between import paths and symbols -// by rejecting these reserved imports for now. Also, people -// "can do weird things in GOPATH and we'd prefer they didn't -// do _that_ weird thing" (per rsc). See also #4257. -var reservedimports = []string{ - "go", - "type", -} - -func isbadimport(path string, allowSpace bool) bool { - if strings.Contains(path, "\x00") { - base.Errorf("import path contains NUL") - return true - } - - for _, ri := range reservedimports { - if path == ri { - base.Errorf("import path %q is reserved and cannot be used", path) - return true - } - } - - for _, r := range path { - if r == utf8.RuneError { - base.Errorf("import path contains invalid UTF-8 sequence: %q", path) - return true - } - - if r < 0x20 || r == 0x7f { - base.Errorf("import path contains control character: %q", path) - return true - } - - if r == '\\' { - base.Errorf("import path contains backslash; use slash: %q", path) - return true - } - - if !allowSpace && unicode.IsSpace(r) { - base.Errorf("import path contains space character: %q", path) - return true - } - - if strings.ContainsRune("!\"#$%&'()*,:;<=>?[]^`{|}", r) { - base.Errorf("import path contains invalid character '%c': %q", r, path) - return true - } - } - - return false -} - // itabType loads the _type field from a runtime.itab struct. func itabType(itab ir.Node) ir.Node { typ := ir.NewSelectorExpr(base.Pos, ir.ODOTPTR, itab, nil) diff --git a/src/cmd/compile/internal/noder/import.go b/src/cmd/compile/internal/noder/import.go new file mode 100644 index 0000000000000..a39be9864b414 --- /dev/null +++ b/src/cmd/compile/internal/noder/import.go @@ -0,0 +1,493 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run mkbuiltin.go + +package noder + +import ( + "fmt" + "go/constant" + "os" + "path" + "runtime" + "sort" + "strings" + "unicode" + "unicode/utf8" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/archive" + "cmd/internal/bio" + "cmd/internal/goobj" + "cmd/internal/objabi" + "cmd/internal/src" +) + +func isDriveLetter(b byte) bool { + return 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z' +} + +// is this path a local name? begins with ./ or ../ or / +func islocalname(name string) bool { + return strings.HasPrefix(name, "/") || + runtime.GOOS == "windows" && len(name) >= 3 && isDriveLetter(name[0]) && name[1] == ':' && name[2] == '/' || + strings.HasPrefix(name, "./") || name == "." || + strings.HasPrefix(name, "../") || name == ".." +} + +func findpkg(name string) (file string, ok bool) { + if islocalname(name) { + if base.Flag.NoLocalImports { + return "", false + } + + if base.Flag.Cfg.PackageFile != nil { + file, ok = base.Flag.Cfg.PackageFile[name] + return file, ok + } + + // try .a before .6. important for building libraries: + // if there is an array.6 in the array.a library, + // want to find all of array.a, not just array.6. + file = fmt.Sprintf("%s.a", name) + if _, err := os.Stat(file); err == nil { + return file, true + } + file = fmt.Sprintf("%s.o", name) + if _, err := os.Stat(file); err == nil { + return file, true + } + return "", false + } + + // local imports should be canonicalized already. + // don't want to see "encoding/../encoding/base64" + // as different from "encoding/base64". + if q := path.Clean(name); q != name { + base.Errorf("non-canonical import path %q (should be %q)", name, q) + return "", false + } + + if base.Flag.Cfg.PackageFile != nil { + file, ok = base.Flag.Cfg.PackageFile[name] + return file, ok + } + + for _, dir := range base.Flag.Cfg.ImportDirs { + file = fmt.Sprintf("%s/%s.a", dir, name) + if _, err := os.Stat(file); err == nil { + return file, true + } + file = fmt.Sprintf("%s/%s.o", dir, name) + if _, err := os.Stat(file); err == nil { + return file, true + } + } + + if objabi.GOROOT != "" { + suffix := "" + suffixsep := "" + if base.Flag.InstallSuffix != "" { + suffixsep = "_" + suffix = base.Flag.InstallSuffix + } else if base.Flag.Race { + suffixsep = "_" + suffix = "race" + } else if base.Flag.MSan { + suffixsep = "_" + suffix = "msan" + } + + file = fmt.Sprintf("%s/pkg/%s_%s%s%s/%s.a", objabi.GOROOT, objabi.GOOS, objabi.GOARCH, suffixsep, suffix, name) + if _, err := os.Stat(file); err == nil { + return file, true + } + file = fmt.Sprintf("%s/pkg/%s_%s%s%s/%s.o", objabi.GOROOT, objabi.GOOS, objabi.GOARCH, suffixsep, suffix, name) + if _, err := os.Stat(file); err == nil { + return file, true + } + } + + return "", false +} + +// myheight tracks the local package's height based on packages +// imported so far. +var myheight int + +func importfile(f constant.Value) *types.Pkg { + if f.Kind() != constant.String { + base.Errorf("import path must be a string") + return nil + } + + path_ := constant.StringVal(f) + if len(path_) == 0 { + base.Errorf("import path is empty") + return nil + } + + if isbadimport(path_, false) { + return nil + } + + // The package name main is no longer reserved, + // but we reserve the import path "main" to identify + // the main package, just as we reserve the import + // path "math" to identify the standard math package. + if path_ == "main" { + base.Errorf("cannot import \"main\"") + base.ErrorExit() + } + + if base.Ctxt.Pkgpath != "" && path_ == base.Ctxt.Pkgpath { + base.Errorf("import %q while compiling that package (import cycle)", path_) + base.ErrorExit() + } + + if mapped, ok := base.Flag.Cfg.ImportMap[path_]; ok { + path_ = mapped + } + + if path_ == "unsafe" { + return ir.Pkgs.Unsafe + } + + if islocalname(path_) { + if path_[0] == '/' { + base.Errorf("import path cannot be absolute path") + return nil + } + + prefix := base.Ctxt.Pathname + if base.Flag.D != "" { + prefix = base.Flag.D + } + path_ = path.Join(prefix, path_) + + if isbadimport(path_, true) { + return nil + } + } + + file, found := findpkg(path_) + if !found { + base.Errorf("can't find import: %q", path_) + base.ErrorExit() + } + + importpkg := types.NewPkg(path_, "") + if importpkg.Imported { + return importpkg + } + + importpkg.Imported = true + + imp, err := bio.Open(file) + if err != nil { + base.Errorf("can't open import: %q: %v", path_, err) + base.ErrorExit() + } + defer imp.Close() + + // check object header + p, err := imp.ReadString('\n') + if err != nil { + base.Errorf("import %s: reading input: %v", file, err) + base.ErrorExit() + } + + if p == "!\n" { // package archive + // package export block should be first + sz := archive.ReadHeader(imp.Reader, "__.PKGDEF") + if sz <= 0 { + base.Errorf("import %s: not a package file", file) + base.ErrorExit() + } + p, err = imp.ReadString('\n') + if err != nil { + base.Errorf("import %s: reading input: %v", file, err) + base.ErrorExit() + } + } + + if !strings.HasPrefix(p, "go object ") { + base.Errorf("import %s: not a go object file: %s", file, p) + base.ErrorExit() + } + q := fmt.Sprintf("%s %s %s %s\n", objabi.GOOS, objabi.GOARCH, objabi.Version, objabi.Expstring()) + if p[10:] != q { + base.Errorf("import %s: object is [%s] expected [%s]", file, p[10:], q) + base.ErrorExit() + } + + // process header lines + for { + p, err = imp.ReadString('\n') + if err != nil { + base.Errorf("import %s: reading input: %v", file, err) + base.ErrorExit() + } + if p == "\n" { + break // header ends with blank line + } + } + + // Expect $$B\n to signal binary import format. + + // look for $$ + var c byte + for { + c, err = imp.ReadByte() + if err != nil { + break + } + if c == '$' { + c, err = imp.ReadByte() + if c == '$' || err != nil { + break + } + } + } + + // get character after $$ + if err == nil { + c, _ = imp.ReadByte() + } + + var fingerprint goobj.FingerprintType + switch c { + case '\n': + base.Errorf("cannot import %s: old export format no longer supported (recompile library)", path_) + return nil + + case 'B': + if base.Debug.Export != 0 { + fmt.Printf("importing %s (%s)\n", path_, file) + } + imp.ReadByte() // skip \n after $$B + + c, err = imp.ReadByte() + if err != nil { + base.Errorf("import %s: reading input: %v", file, err) + base.ErrorExit() + } + + // Indexed format is distinguished by an 'i' byte, + // whereas previous export formats started with 'c', 'd', or 'v'. + if c != 'i' { + base.Errorf("import %s: unexpected package format byte: %v", file, c) + base.ErrorExit() + } + fingerprint = typecheck.ReadImports(importpkg, imp) + + default: + base.Errorf("no import in %q", path_) + base.ErrorExit() + } + + // assume files move (get installed) so don't record the full path + if base.Flag.Cfg.PackageFile != nil { + // If using a packageFile map, assume path_ can be recorded directly. + base.Ctxt.AddImport(path_, fingerprint) + } else { + // For file "/Users/foo/go/pkg/darwin_amd64/math.a" record "math.a". + base.Ctxt.AddImport(file[len(file)-len(path_)-len(".a"):], fingerprint) + } + + if importpkg.Height >= myheight { + myheight = importpkg.Height + 1 + } + + return importpkg +} + +// The linker uses the magic symbol prefixes "go." and "type." +// Avoid potential confusion between import paths and symbols +// by rejecting these reserved imports for now. Also, people +// "can do weird things in GOPATH and we'd prefer they didn't +// do _that_ weird thing" (per rsc). See also #4257. +var reservedimports = []string{ + "go", + "type", +} + +func isbadimport(path string, allowSpace bool) bool { + if strings.Contains(path, "\x00") { + base.Errorf("import path contains NUL") + return true + } + + for _, ri := range reservedimports { + if path == ri { + base.Errorf("import path %q is reserved and cannot be used", path) + return true + } + } + + for _, r := range path { + if r == utf8.RuneError { + base.Errorf("import path contains invalid UTF-8 sequence: %q", path) + return true + } + + if r < 0x20 || r == 0x7f { + base.Errorf("import path contains control character: %q", path) + return true + } + + if r == '\\' { + base.Errorf("import path contains backslash; use slash: %q", path) + return true + } + + if !allowSpace && unicode.IsSpace(r) { + base.Errorf("import path contains space character: %q", path) + return true + } + + if strings.ContainsRune("!\"#$%&'()*,:;<=>?[]^`{|}", r) { + base.Errorf("import path contains invalid character '%c': %q", r, path) + return true + } + } + + return false +} + +func pkgnotused(lineno src.XPos, path string, name string) { + // If the package was imported with a name other than the final + // import path element, show it explicitly in the error message. + // Note that this handles both renamed imports and imports of + // packages containing unconventional package declarations. + // Note that this uses / always, even on Windows, because Go import + // paths always use forward slashes. + elem := path + if i := strings.LastIndex(elem, "/"); i >= 0 { + elem = elem[i+1:] + } + if name == "" || elem == name { + base.ErrorfAt(lineno, "imported and not used: %q", path) + } else { + base.ErrorfAt(lineno, "imported and not used: %q as %s", path, name) + } +} + +func mkpackage(pkgname string) { + if types.LocalPkg.Name == "" { + if pkgname == "_" { + base.Errorf("invalid package name _") + } + types.LocalPkg.Name = pkgname + } else { + if pkgname != types.LocalPkg.Name { + base.Errorf("package %s; expected %s", pkgname, types.LocalPkg.Name) + } + } +} + +func clearImports() { + type importedPkg struct { + pos src.XPos + path string + name string + } + var unused []importedPkg + + for _, s := range types.LocalPkg.Syms { + n := ir.AsNode(s.Def) + if n == nil { + continue + } + if n.Op() == ir.OPACK { + // throw away top-level package name left over + // from previous file. + // leave s->block set to cause redeclaration + // errors if a conflicting top-level name is + // introduced by a different file. + p := n.(*ir.PkgName) + if !p.Used && base.SyntaxErrors() == 0 { + unused = append(unused, importedPkg{p.Pos(), p.Pkg.Path, s.Name}) + } + s.Def = nil + continue + } + if types.IsDotAlias(s) { + // throw away top-level name left over + // from previous import . "x" + // We'll report errors after type checking in checkDotImports. + s.Def = nil + continue + } + } + + sort.Slice(unused, func(i, j int) bool { return unused[i].pos.Before(unused[j].pos) }) + for _, pkg := range unused { + pkgnotused(pkg.pos, pkg.path, pkg.name) + } +} + +// CheckDotImports reports errors for any unused dot imports. +func CheckDotImports() { + for _, pack := range dotImports { + if !pack.Used { + base.ErrorfAt(pack.Pos(), "imported and not used: %q", pack.Pkg.Path) + } + } + + // No longer needed; release memory. + dotImports = nil + typecheck.DotImportRefs = nil +} + +// dotImports tracks all PkgNames that have been dot-imported. +var dotImports []*ir.PkgName + +// find all the exported symbols in package referenced by PkgName, +// and make them available in the current package +func importDot(pack *ir.PkgName) { + if typecheck.DotImportRefs == nil { + typecheck.DotImportRefs = make(map[*ir.Ident]*ir.PkgName) + } + + opkg := pack.Pkg + for _, s := range opkg.Syms { + if s.Def == nil { + if _, ok := typecheck.DeclImporter[s]; !ok { + continue + } + } + if !types.IsExported(s.Name) || strings.ContainsRune(s.Name, 0xb7) { // 0xb7 = center dot + continue + } + s1 := typecheck.Lookup(s.Name) + if s1.Def != nil { + pkgerror := fmt.Sprintf("during import %q", opkg.Path) + typecheck.Redeclared(base.Pos, s1, pkgerror) + continue + } + + id := ir.NewIdent(src.NoXPos, s) + typecheck.DotImportRefs[id] = pack + s1.Def = id + s1.Block = 1 + } + + dotImports = append(dotImports, pack) +} + +// importName is like oldname, +// but it reports an error if sym is from another package and not exported. +func importName(sym *types.Sym) ir.Node { + n := oldname(sym) + if !types.IsExported(sym.Name) && sym.Pkg != types.LocalPkg { + n.SetDiag(true) + base.Errorf("cannot refer to unexported name %s.%s", sym.Pkg.Name, sym.Name) + } + return n +} diff --git a/src/cmd/compile/internal/gc/lex.go b/src/cmd/compile/internal/noder/lex.go similarity index 95% rename from src/cmd/compile/internal/gc/lex.go rename to src/cmd/compile/internal/noder/lex.go index 39d73867e4d79..1095f3344a3f9 100644 --- a/src/cmd/compile/internal/gc/lex.go +++ b/src/cmd/compile/internal/noder/lex.go @@ -2,22 +2,17 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package noder import ( - "cmd/compile/internal/base" + "fmt" + "strings" + "cmd/compile/internal/ir" "cmd/compile/internal/syntax" "cmd/internal/objabi" - "cmd/internal/src" - "fmt" - "strings" ) -func makePos(b *src.PosBase, line, col uint) src.XPos { - return base.Ctxt.PosTable.XPos(src.MakePos(b, line, col)) -} - func isSpace(c rune) bool { return c == ' ' || c == '\t' || c == '\n' || c == '\r' } @@ -27,7 +22,7 @@ func isQuoted(s string) bool { } const ( - FuncPragmas = ir.Nointerface | + funcPragmas = ir.Nointerface | ir.Noescape | ir.Norace | ir.Nosplit | @@ -40,7 +35,7 @@ const ( ir.Nowritebarrierrec | ir.Yeswritebarrierrec - TypePragmas = ir.NotInHeap + typePragmas = ir.NotInHeap ) func pragmaFlag(verb string) ir.PragmaFlag { diff --git a/src/cmd/compile/internal/gc/lex_test.go b/src/cmd/compile/internal/noder/lex_test.go similarity index 99% rename from src/cmd/compile/internal/gc/lex_test.go rename to src/cmd/compile/internal/noder/lex_test.go index b2081a1732bbb..85a3f06759ad7 100644 --- a/src/cmd/compile/internal/gc/lex_test.go +++ b/src/cmd/compile/internal/noder/lex_test.go @@ -2,13 +2,14 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package noder import ( - "cmd/compile/internal/syntax" "reflect" "runtime" "testing" + + "cmd/compile/internal/syntax" ) func eq(a, b []string) bool { diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/noder/noder.go similarity index 87% rename from src/cmd/compile/internal/gc/noder.go rename to src/cmd/compile/internal/noder/noder.go index 3e8703f0507d4..a684673c8f490 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/noder/noder.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package noder import ( "fmt" @@ -25,11 +25,11 @@ import ( "cmd/internal/src" ) -// parseFiles concurrently parses files into *syntax.File structures. +// ParseFiles concurrently parses files into *syntax.File structures. // Each declaration in every *syntax.File is converted to a syntax tree // and its root represented by *Node is appended to Target.Decls. // Returns the total count of parsed lines. -func parseFiles(filenames []string) uint { +func ParseFiles(filenames []string) uint { noders := make([]*noder, 0, len(filenames)) // Limit the number of simultaneously open files. sem := make(chan struct{}, runtime.GOMAXPROCS(0)+10) @@ -257,7 +257,7 @@ func (p *noder) node() { p.setlineno(p.file.PkgName) mkpackage(p.file.PkgName.Value) - if pragma, ok := p.file.Pragma.(*Pragma); ok { + if pragma, ok := p.file.Pragma.(*pragmas); ok { pragma.Flag &^= ir.GoBuildPragma p.checkUnused(pragma) } @@ -323,7 +323,7 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) { return // avoid follow-on errors if there was a syntax error } - if pragma, ok := imp.Pragma.(*Pragma); ok { + if pragma, ok := imp.Pragma.(*pragmas); ok { p.checkUnused(pragma) } @@ -383,7 +383,7 @@ func (p *noder) varDecl(decl *syntax.VarDecl) []ir.Node { exprs = p.exprList(decl.Values) } - if pragma, ok := decl.Pragma.(*Pragma); ok { + if pragma, ok := decl.Pragma.(*pragmas); ok { if len(pragma.Embeds) > 0 { if !p.importedEmbed { // This check can't be done when building the list pragma.Embeds @@ -422,7 +422,7 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []ir.Node { } } - if pragma, ok := decl.Pragma.(*Pragma); ok { + if pragma, ok := decl.Pragma.(*pragmas); ok { p.checkUnused(pragma) } @@ -477,10 +477,10 @@ func (p *noder) typeDecl(decl *syntax.TypeDecl) ir.Node { n.Ntype = typ n.SetAlias(decl.Alias) - if pragma, ok := decl.Pragma.(*Pragma); ok { + if pragma, ok := decl.Pragma.(*pragmas); ok { if !decl.Alias { - n.SetPragma(pragma.Flag & TypePragmas) - pragma.Flag &^= TypePragmas + n.SetPragma(pragma.Flag & typePragmas) + pragma.Flag &^= typePragmas } p.checkUnused(pragma) } @@ -532,12 +532,12 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) ir.Node { f.Nname.Defn = f f.Nname.Ntype = t - if pragma, ok := fun.Pragma.(*Pragma); ok { - f.Pragma = pragma.Flag & FuncPragmas + if pragma, ok := fun.Pragma.(*pragmas); ok { + f.Pragma = pragma.Flag & funcPragmas if pragma.Flag&ir.Systemstack != 0 && pragma.Flag&ir.Nosplit != 0 { base.ErrorfAt(f.Pos(), "go:nosplit and go:systemstack cannot be combined") } - pragma.Flag &^= FuncPragmas + pragma.Flag &^= funcPragmas p.checkUnused(pragma) } @@ -1525,24 +1525,24 @@ var allowedStdPragmas = map[string]bool{ "go:generate": true, } -// *Pragma is the value stored in a syntax.Pragma during parsing. -type Pragma struct { +// *pragmas is the value stored in a syntax.pragmas during parsing. +type pragmas struct { Flag ir.PragmaFlag // collected bits - Pos []PragmaPos // position of each individual flag - Embeds []PragmaEmbed + Pos []pragmaPos // position of each individual flag + Embeds []pragmaEmbed } -type PragmaPos struct { +type pragmaPos struct { Flag ir.PragmaFlag Pos syntax.Pos } -type PragmaEmbed struct { +type pragmaEmbed struct { Pos syntax.Pos Patterns []string } -func (p *noder) checkUnused(pragma *Pragma) { +func (p *noder) checkUnused(pragma *pragmas) { for _, pos := range pragma.Pos { if pos.Flag&pragma.Flag != 0 { p.errorAt(pos.Pos, "misplaced compiler directive") @@ -1555,7 +1555,7 @@ func (p *noder) checkUnused(pragma *Pragma) { } } -func (p *noder) checkUnusedDuringParse(pragma *Pragma) { +func (p *noder) checkUnusedDuringParse(pragma *pragmas) { for _, pos := range pragma.Pos { if pos.Flag&pragma.Flag != 0 { p.error(syntax.Error{Pos: pos.Pos, Msg: "misplaced compiler directive"}) @@ -1570,9 +1570,9 @@ func (p *noder) checkUnusedDuringParse(pragma *Pragma) { // pragma is called concurrently if files are parsed concurrently. func (p *noder) pragma(pos syntax.Pos, blankLine bool, text string, old syntax.Pragma) syntax.Pragma { - pragma, _ := old.(*Pragma) + pragma, _ := old.(*pragmas) if pragma == nil { - pragma = new(Pragma) + pragma = new(pragmas) } if text == "" { @@ -1626,7 +1626,7 @@ func (p *noder) pragma(pos syntax.Pos, blankLine bool, text string, old syntax.P p.error(syntax.Error{Pos: pos, Msg: "usage: //go:embed pattern..."}) break } - pragma.Embeds = append(pragma.Embeds, PragmaEmbed{pos, args}) + pragma.Embeds = append(pragma.Embeds, pragmaEmbed{pos, args}) case strings.HasPrefix(text, "go:cgo_import_dynamic "): // This is permitted for general use because Solaris @@ -1665,7 +1665,7 @@ func (p *noder) pragma(pos syntax.Pos, blankLine bool, text string, old syntax.P p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s is not allowed in the standard library", verb)}) } pragma.Flag |= flag - pragma.Pos = append(pragma.Pos, PragmaPos{flag, pos}) + pragma.Pos = append(pragma.Pos, pragmaPos{flag, pos}) } return pragma @@ -1761,3 +1761,178 @@ func parseGoEmbed(args string) ([]string, error) { } return list, nil } + +func fakeRecv() *ir.Field { + return ir.NewField(base.Pos, nil, nil, types.FakeRecvType()) +} + +func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node { + xtype := p.typeExpr(expr.Type) + ntype := p.typeExpr(expr.Type) + + fn := ir.NewFunc(p.pos(expr)) + fn.SetIsHiddenClosure(ir.CurFunc != nil) + fn.Nname = ir.NewFuncNameAt(p.pos(expr), ir.BlankNode.Sym(), fn) // filled in by typecheckclosure + fn.Nname.Ntype = xtype + fn.Nname.Defn = fn + + clo := ir.NewClosureExpr(p.pos(expr), fn) + fn.ClosureType = ntype + fn.OClosure = clo + + p.funcBody(fn, expr.Body) + + // closure-specific variables are hanging off the + // ordinary ones in the symbol table; see oldname. + // unhook them. + // make the list of pointers for the closure call. + for _, v := range fn.ClosureVars { + // Unlink from v1; see comment in syntax.go type Param for these fields. + v1 := v.Defn + v1.Name().Innermost = v.Outer + + // If the closure usage of v is not dense, + // we need to make it dense; now that we're out + // of the function in which v appeared, + // look up v.Sym in the enclosing function + // and keep it around for use in the compiled code. + // + // That is, suppose we just finished parsing the innermost + // closure f4 in this code: + // + // func f() { + // v := 1 + // func() { // f2 + // use(v) + // func() { // f3 + // func() { // f4 + // use(v) + // }() + // }() + // }() + // } + // + // At this point v.Outer is f2's v; there is no f3's v. + // To construct the closure f4 from within f3, + // we need to use f3's v and in this case we need to create f3's v. + // We are now in the context of f3, so calling oldname(v.Sym) + // obtains f3's v, creating it if necessary (as it is in the example). + // + // capturevars will decide whether to use v directly or &v. + v.Outer = oldname(v.Sym()).(*ir.Name) + } + + return clo +} + +// A function named init is a special case. +// It is called by the initialization before main is run. +// To make it unique within a package and also uncallable, +// the name, normally "pkg.init", is altered to "pkg.init.0". +var renameinitgen int + +func renameinit() *types.Sym { + s := typecheck.LookupNum("init.", renameinitgen) + renameinitgen++ + return s +} + +// oldname returns the Node that declares symbol s in the current scope. +// If no such Node currently exists, an ONONAME Node is returned instead. +// Automatically creates a new closure variable if the referenced symbol was +// declared in a different (containing) function. +func oldname(s *types.Sym) ir.Node { + if s.Pkg != types.LocalPkg { + return ir.NewIdent(base.Pos, s) + } + + n := ir.AsNode(s.Def) + if n == nil { + // Maybe a top-level declaration will come along later to + // define s. resolve will check s.Def again once all input + // source has been processed. + return ir.NewIdent(base.Pos, s) + } + + if ir.CurFunc != nil && n.Op() == ir.ONAME && n.Name().Curfn != nil && n.Name().Curfn != ir.CurFunc { + // Inner func is referring to var in outer func. + // + // TODO(rsc): If there is an outer variable x and we + // are parsing x := 5 inside the closure, until we get to + // the := it looks like a reference to the outer x so we'll + // make x a closure variable unnecessarily. + n := n.(*ir.Name) + c := n.Name().Innermost + if c == nil || c.Curfn != ir.CurFunc { + // Do not have a closure var for the active closure yet; make one. + c = typecheck.NewName(s) + c.Class_ = ir.PAUTOHEAP + c.SetIsClosureVar(true) + c.SetIsDDD(n.IsDDD()) + c.Defn = n + + // Link into list of active closure variables. + // Popped from list in func funcLit. + c.Outer = n.Name().Innermost + n.Name().Innermost = c + + ir.CurFunc.ClosureVars = append(ir.CurFunc.ClosureVars, c) + } + + // return ref to closure var, not original + return c + } + + return n +} + +func varEmbed(p *noder, names []*ir.Name, typ ir.Ntype, exprs []ir.Node, embeds []pragmaEmbed) (newExprs []ir.Node) { + haveEmbed := false + for _, decl := range p.file.DeclList { + imp, ok := decl.(*syntax.ImportDecl) + if !ok { + // imports always come first + break + } + path, _ := strconv.Unquote(imp.Path.Value) + if path == "embed" { + haveEmbed = true + break + } + } + + pos := embeds[0].Pos + if !haveEmbed { + p.errorAt(pos, "invalid go:embed: missing import \"embed\"") + return exprs + } + if base.Flag.Cfg.Embed.Patterns == nil { + p.errorAt(pos, "invalid go:embed: build system did not supply embed configuration") + return exprs + } + if len(names) > 1 { + p.errorAt(pos, "go:embed cannot apply to multiple vars") + return exprs + } + if len(exprs) > 0 { + p.errorAt(pos, "go:embed cannot apply to var with initializer") + return exprs + } + if typ == nil { + // Should not happen, since len(exprs) == 0 now. + p.errorAt(pos, "go:embed cannot apply to var without type") + return exprs + } + if typecheck.DeclContext != ir.PEXTERN { + p.errorAt(pos, "go:embed cannot apply to var inside func") + return exprs + } + + v := names[0] + typecheck.Target.Embeds = append(typecheck.Target.Embeds, v) + v.Embed = new([]ir.Embed) + for _, e := range embeds { + *v.Embed = append(*v.Embed, ir.Embed{Pos: p.makeXPos(e.Pos), Patterns: e.Patterns}) + } + return exprs +} diff --git a/src/cmd/internal/archive/archive.go b/src/cmd/internal/archive/archive.go index 762e888a04d11..e9b25fe240a34 100644 --- a/src/cmd/internal/archive/archive.go +++ b/src/cmd/internal/archive/archive.go @@ -464,3 +464,24 @@ func exactly16Bytes(s string) string { s += sixteenSpaces[:16-len(s)] return s } + +// architecture-independent object file output +const HeaderSize = 60 + +func ReadHeader(b *bufio.Reader, name string) int { + var buf [HeaderSize]byte + if _, err := io.ReadFull(b, buf[:]); err != nil { + return -1 + } + aname := strings.Trim(string(buf[0:16]), " ") + if !strings.HasPrefix(aname, name) { + return -1 + } + asize := strings.Trim(string(buf[48:58]), " ") + i, _ := strconv.Atoi(asize) + return i +} + +func FormatHeader(arhdr []byte, name string, size int64) { + copy(arhdr[:], fmt.Sprintf("%-16s%-12d%-6d%-6d%-8o%-10d`\n", name, 0, 0, 0, 0644, size)) +} From 4dfb5d91a86dfcc046ced03cee6e844df0751e41 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 23 Dec 2020 00:54:11 -0500 Subject: [PATCH 234/474] [dev.regabi] cmd/compile: split out package staticdata [generated] [git-generate] cd src/cmd/compile/internal/gc rf ' # Export API and move to its own files. mv addrsym InitAddr mv pfuncsym InitFunc mv slicesym InitSlice mv slicebytes InitSliceBytes mv stringsym StringSym mv funcsym FuncSym mv makefuncsym NeedFuncSym mv dumpfuncsyms WriteFuncSyms mv InitAddr InitFunc InitSlice InitSliceBytes stringSymPrefix \ StringSym fileStringSym slicedataGen slicedata dstringdata \ funcsyms FuncSym NeedFuncSym WriteFuncSyms \ data.go mv initEmbed WriteEmbed mv dumpembeds obj.go mv data.go embed.go cmd/compile/internal/staticdata ' Change-Id: I209c5e597c8acfa29a48527695a9ddc1e9ea8e6a Reviewed-on: https://go-review.googlesource.com/c/go/+/279474 Trust: Russ Cox Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/dcl.go | 51 --- src/cmd/compile/internal/gc/go.go | 7 - src/cmd/compile/internal/gc/main.go | 3 +- src/cmd/compile/internal/gc/obj.go | 233 +------------- src/cmd/compile/internal/gc/sinit.go | 29 +- src/cmd/compile/internal/gc/ssa.go | 7 +- src/cmd/compile/internal/gc/walk.go | 3 +- src/cmd/compile/internal/staticdata/data.go | 296 ++++++++++++++++++ .../internal/{gc => staticdata}/embed.go | 23 +- 9 files changed, 336 insertions(+), 316 deletions(-) create mode 100644 src/cmd/compile/internal/staticdata/data.go rename src/cmd/compile/internal/{gc => staticdata}/embed.go (95%) diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index aaf5b35057ecf..7b2bf5b606a9c 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -28,57 +28,6 @@ func NoWriteBarrierRecCheck() { var nowritebarrierrecCheck *nowritebarrierrecChecker -// funcsym returns s·f. -func funcsym(s *types.Sym) *types.Sym { - // funcsymsmu here serves to protect not just mutations of funcsyms (below), - // but also the package lookup of the func sym name, - // since this function gets called concurrently from the backend. - // There are no other concurrent package lookups in the backend, - // except for the types package, which is protected separately. - // Reusing funcsymsmu to also cover this package lookup - // avoids a general, broader, expensive package lookup mutex. - // Note makefuncsym also does package look-up of func sym names, - // but that it is only called serially, from the front end. - funcsymsmu.Lock() - sf, existed := s.Pkg.LookupOK(ir.FuncSymName(s)) - // Don't export s·f when compiling for dynamic linking. - // When dynamically linking, the necessary function - // symbols will be created explicitly with makefuncsym. - // See the makefuncsym comment for details. - if !base.Ctxt.Flag_dynlink && !existed { - funcsyms = append(funcsyms, s) - } - funcsymsmu.Unlock() - return sf -} - -// makefuncsym ensures that s·f is exported. -// It is only used with -dynlink. -// When not compiling for dynamic linking, -// the funcsyms are created as needed by -// the packages that use them. -// Normally we emit the s·f stubs as DUPOK syms, -// but DUPOK doesn't work across shared library boundaries. -// So instead, when dynamic linking, we only create -// the s·f stubs in s's package. -func makefuncsym(s *types.Sym) { - if !base.Ctxt.Flag_dynlink { - base.Fatalf("makefuncsym dynlink") - } - if s.IsBlank() { - return - } - if base.Flag.CompilingRuntime && (s.Name == "getg" || s.Name == "getclosureptr" || s.Name == "getcallerpc" || s.Name == "getcallersp") { - // runtime.getg(), getclosureptr(), getcallerpc(), and - // getcallersp() are not real functions and so do not - // get funcsyms. - return - } - if _, existed := s.Pkg.LookupOK(ir.FuncSymName(s)); !existed { - funcsyms = append(funcsyms, s) - } -} - type nowritebarrierrecChecker struct { // extraCalls contains extra function calls that may not be // visible during later analysis. It maps from the ODCLFUNC of diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index c979edcdf8372..6f97d43fef885 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -7,20 +7,13 @@ package gc import ( "cmd/compile/internal/objw" "cmd/compile/internal/ssa" - "cmd/compile/internal/types" "cmd/internal/obj" - "sync" ) var pragcgobuf [][]string var zerosize int64 -var ( - funcsymsmu sync.Mutex // protects funcsyms and associated package lookups (see func funcsym) - funcsyms []*types.Sym -) - // interface to back end type Arch struct { diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 7b540d8675aaa..bb6ace6562800 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -16,6 +16,7 @@ import ( "cmd/compile/internal/logopt" "cmd/compile/internal/noder" "cmd/compile/internal/ssa" + "cmd/compile/internal/staticdata" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/dwarf" @@ -194,7 +195,7 @@ func Main(archInit func(*Arch)) { typecheck.Target = new(ir.Package) - typecheck.NeedFuncSym = makefuncsym + typecheck.NeedFuncSym = staticdata.NeedFuncSym typecheck.NeedITab = func(t, iface *types.Type) { itabname(t, iface) } typecheck.NeedRuntimeType = addsignat // TODO(rsc): typenamesym for lock? diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 0dbe1da8d43bd..50935d4e98c46 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -8,22 +8,16 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/objw" + "cmd/compile/internal/staticdata" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/archive" "cmd/internal/bio" "cmd/internal/obj" "cmd/internal/objabi" - "cmd/internal/src" - "crypto/sha256" "encoding/json" "fmt" "go/constant" - "io" - "io/ioutil" - "os" - "sort" - "strconv" ) // These modes say which kind of object file to generate. @@ -117,7 +111,7 @@ func dumpdata() { numDecls := len(typecheck.Target.Decls) dumpglobls(typecheck.Target.Externs) - dumpfuncsyms() + staticdata.WriteFuncSyms() addptabs() numExports := len(typecheck.Target.Exports) addsignats(typecheck.Target.Externs) @@ -270,17 +264,6 @@ func dumpglobls(externs []ir.Node) { } } -func dumpfuncsyms() { - sort.Slice(funcsyms, func(i, j int) bool { - return funcsyms[i].LinksymName() < funcsyms[j].LinksymName() - }) - for _, s := range funcsyms { - sf := s.Pkg.Lookup(ir.FuncSymName(s)).Linksym() - objw.SymPtr(sf, 0, s.Linksym(), 0) - objw.Global(sf, int32(types.PtrSize), obj.DUPOK|obj.RODATA) - } -} - // addGCLocals adds gcargs, gclocals, gcregs, and stack object symbols to Ctxt.Data. // // This is done during the sequential phase after compilation, since @@ -307,210 +290,6 @@ func addGCLocals() { } } -const ( - stringSymPrefix = "go.string." - stringSymPattern = ".gostring.%d.%x" -) - -// stringsym returns a symbol containing the string s. -// The symbol contains the string data, not a string header. -func stringsym(pos src.XPos, s string) (data *obj.LSym) { - var symname string - if len(s) > 100 { - // Huge strings are hashed to avoid long names in object files. - // Indulge in some paranoia by writing the length of s, too, - // as protection against length extension attacks. - // Same pattern is known to fileStringSym below. - h := sha256.New() - io.WriteString(h, s) - symname = fmt.Sprintf(stringSymPattern, len(s), h.Sum(nil)) - } else { - // Small strings get named directly by their contents. - symname = strconv.Quote(s) - } - - symdata := base.Ctxt.Lookup(stringSymPrefix + symname) - if !symdata.OnList() { - off := dstringdata(symdata, 0, s, pos, "string") - objw.Global(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL) - symdata.Set(obj.AttrContentAddressable, true) - } - - return symdata -} - -// fileStringSym returns a symbol for the contents and the size of file. -// If readonly is true, the symbol shares storage with any literal string -// or other file with the same content and is placed in a read-only section. -// If readonly is false, the symbol is a read-write copy separate from any other, -// for use as the backing store of a []byte. -// The content hash of file is copied into hash. (If hash is nil, nothing is copied.) -// The returned symbol contains the data itself, not a string header. -func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.LSym, int64, error) { - f, err := os.Open(file) - if err != nil { - return nil, 0, err - } - defer f.Close() - info, err := f.Stat() - if err != nil { - return nil, 0, err - } - if !info.Mode().IsRegular() { - return nil, 0, fmt.Errorf("not a regular file") - } - size := info.Size() - if size <= 1*1024 { - data, err := ioutil.ReadAll(f) - if err != nil { - return nil, 0, err - } - if int64(len(data)) != size { - return nil, 0, fmt.Errorf("file changed between reads") - } - var sym *obj.LSym - if readonly { - sym = stringsym(pos, string(data)) - } else { - sym = slicedata(pos, string(data)).Sym().Linksym() - } - if len(hash) > 0 { - sum := sha256.Sum256(data) - copy(hash, sum[:]) - } - return sym, size, nil - } - if size > 2e9 { - // ggloblsym takes an int32, - // and probably the rest of the toolchain - // can't handle such big symbols either. - // See golang.org/issue/9862. - return nil, 0, fmt.Errorf("file too large") - } - - // File is too big to read and keep in memory. - // Compute hash if needed for read-only content hashing or if the caller wants it. - var sum []byte - if readonly || len(hash) > 0 { - h := sha256.New() - n, err := io.Copy(h, f) - if err != nil { - return nil, 0, err - } - if n != size { - return nil, 0, fmt.Errorf("file changed between reads") - } - sum = h.Sum(nil) - copy(hash, sum) - } - - var symdata *obj.LSym - if readonly { - symname := fmt.Sprintf(stringSymPattern, size, sum) - symdata = base.Ctxt.Lookup(stringSymPrefix + symname) - if !symdata.OnList() { - info := symdata.NewFileInfo() - info.Name = file - info.Size = size - objw.Global(symdata, int32(size), obj.DUPOK|obj.RODATA|obj.LOCAL) - // Note: AttrContentAddressable cannot be set here, - // because the content-addressable-handling code - // does not know about file symbols. - } - } else { - // Emit a zero-length data symbol - // and then fix up length and content to use file. - symdata = slicedata(pos, "").Sym().Linksym() - symdata.Size = size - symdata.Type = objabi.SNOPTRDATA - info := symdata.NewFileInfo() - info.Name = file - info.Size = size - } - - return symdata, size, nil -} - -var slicedataGen int - -func slicedata(pos src.XPos, s string) *ir.Name { - slicedataGen++ - symname := fmt.Sprintf(".gobytes.%d", slicedataGen) - sym := types.LocalPkg.Lookup(symname) - symnode := typecheck.NewName(sym) - sym.Def = symnode - - lsym := sym.Linksym() - off := dstringdata(lsym, 0, s, pos, "slice") - objw.Global(lsym, int32(off), obj.NOPTR|obj.LOCAL) - - return symnode -} - -func slicebytes(nam *ir.Name, off int64, s string) { - if nam.Op() != ir.ONAME { - base.Fatalf("slicebytes %v", nam) - } - slicesym(nam, off, slicedata(nam.Pos(), s), int64(len(s))) -} - -func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int { - // Objects that are too large will cause the data section to overflow right away, - // causing a cryptic error message by the linker. Check for oversize objects here - // and provide a useful error message instead. - if int64(len(t)) > 2e9 { - base.ErrorfAt(pos, "%v with length %v is too big", what, len(t)) - return 0 - } - - s.WriteString(base.Ctxt, int64(off), len(t), t) - return off + len(t) -} - -// slicesym writes a static slice symbol {&arr, lencap, lencap} to n+noff. -// slicesym does not modify n. -func slicesym(n *ir.Name, noff int64, arr *ir.Name, lencap int64) { - s := n.Sym().Linksym() - if arr.Op() != ir.ONAME { - base.Fatalf("slicesym non-name arr %v", arr) - } - s.WriteAddr(base.Ctxt, noff, types.PtrSize, arr.Sym().Linksym(), 0) - s.WriteInt(base.Ctxt, noff+types.SliceLenOffset, types.PtrSize, lencap) - s.WriteInt(base.Ctxt, noff+types.SliceCapOffset, types.PtrSize, lencap) -} - -// addrsym writes the static address of a to n. a must be an ONAME. -// Neither n nor a is modified. -func addrsym(n *ir.Name, noff int64, a *ir.Name, aoff int64) { - if n.Op() != ir.ONAME { - base.Fatalf("addrsym n op %v", n.Op()) - } - if n.Sym() == nil { - base.Fatalf("addrsym nil n sym") - } - if a.Op() != ir.ONAME { - base.Fatalf("addrsym a op %v", a.Op()) - } - s := n.Sym().Linksym() - s.WriteAddr(base.Ctxt, noff, types.PtrSize, a.Sym().Linksym(), aoff) -} - -// pfuncsym writes the static address of f to n. f must be a global function. -// Neither n nor f is modified. -func pfuncsym(n *ir.Name, noff int64, f *ir.Name) { - if n.Op() != ir.ONAME { - base.Fatalf("pfuncsym n op %v", n.Op()) - } - if n.Sym() == nil { - base.Fatalf("pfuncsym nil n sym") - } - if f.Class_ != ir.PFUNC { - base.Fatalf("pfuncsym class not PFUNC %d", f.Class_) - } - s := n.Sym().Linksym() - s.WriteAddr(base.Ctxt, noff, types.PtrSize, funcsym(f.Sym()).Linksym(), 0) -} - // litsym writes the static literal c to n. // Neither n nor c is modified. func litsym(n *ir.Name, noff int64, c ir.Node, wid int) { @@ -558,7 +337,7 @@ func litsym(n *ir.Name, noff int64, c ir.Node, wid int) { case constant.String: i := constant.StringVal(u) - symdata := stringsym(n.Pos(), i) + symdata := staticdata.StringSym(n.Pos(), i) s.WriteAddr(base.Ctxt, noff, types.PtrSize, symdata, 0) s.WriteInt(base.Ctxt, noff+int64(types.PtrSize), types.PtrSize, int64(len(i))) @@ -588,3 +367,9 @@ func ggloblnod(nam ir.Node) { s.Pkg = "_" } } + +func dumpembeds() { + for _, v := range typecheck.Target.Embeds { + staticdata.WriteEmbed(v) + } +} diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 26591ad5abf29..d818be94a40c7 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -7,6 +7,7 @@ package gc import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" + "cmd/compile/internal/staticdata" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/obj" @@ -76,7 +77,7 @@ func (s *InitSchedule) tryStaticInit(nn ir.Node) bool { func (s *InitSchedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *types.Type) bool { if rn.Class_ == ir.PFUNC { // TODO if roff != 0 { panic } - pfuncsym(l, loff, rn) + staticdata.InitFunc(l, loff, rn) return true } if rn.Class_ != ir.PEXTERN || rn.Sym().Pkg != types.LocalPkg { @@ -130,7 +131,7 @@ func (s *InitSchedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *type r := r.(*ir.AddrExpr) if a := r.X; a.Op() == ir.ONAME { a := a.(*ir.Name) - addrsym(l, loff, a, 0) + staticdata.InitAddr(l, loff, a, 0) return true } @@ -139,14 +140,14 @@ func (s *InitSchedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *type switch r.X.Op() { case ir.OARRAYLIT, ir.OSLICELIT, ir.OSTRUCTLIT, ir.OMAPLIT: // copy pointer - addrsym(l, loff, s.inittemps[r], 0) + staticdata.InitAddr(l, loff, s.inittemps[r], 0) return true } case ir.OSLICELIT: r := r.(*ir.CompLitExpr) // copy slice - slicesym(l, loff, s.inittemps[r], r.Len) + staticdata.InitSlice(l, loff, s.inittemps[r], r.Len) return true case ir.OARRAYLIT, ir.OSTRUCTLIT: @@ -207,7 +208,7 @@ func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *type case ir.OADDR: r := r.(*ir.AddrExpr) if name, offset, ok := stataddr(r.X); ok { - addrsym(l, loff, name, offset) + staticdata.InitAddr(l, loff, name, offset) return true } fallthrough @@ -220,7 +221,7 @@ func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *type a := staticname(r.X.Type()) s.inittemps[r] = a - addrsym(l, loff, a, 0) + staticdata.InitAddr(l, loff, a, 0) // Init underlying literal. if !s.staticassign(a, 0, r.X, a.Type()) { @@ -234,7 +235,7 @@ func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *type r := r.(*ir.ConvExpr) if l.Class_ == ir.PEXTERN && r.X.Op() == ir.OLITERAL { sval := ir.StringVal(r.X) - slicebytes(l, loff, sval) + staticdata.InitSliceBytes(l, loff, sval) return true } @@ -246,7 +247,7 @@ func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *type ta.SetNoalg(true) a := staticname(ta) s.inittemps[r] = a - slicesym(l, loff, a, r.Len) + staticdata.InitSlice(l, loff, a, r.Len) // Fall through to init underlying array. l = a loff = 0 @@ -284,7 +285,7 @@ func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *type // Closures with no captured variables are globals, // so the assignment can be done at link time. // TODO if roff != 0 { panic } - pfuncsym(l, loff, r.Func.Nname) + staticdata.InitFunc(l, loff, r.Func.Nname) return true } closuredebugruntimecheck(r) @@ -321,7 +322,7 @@ func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *type // Create a copy of l to modify while we emit data. // Emit itab, advance offset. - addrsym(l, loff, itab.X.(*ir.Name), 0) + staticdata.InitAddr(l, loff, itab.X.(*ir.Name), 0) // Emit data. if types.IsDirectIface(val.Type()) { @@ -342,7 +343,7 @@ func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *type if !s.staticassign(a, 0, val, val.Type()) { s.append(ir.NewAssignStmt(base.Pos, a, val)) } - addrsym(l, loff+int64(types.PtrSize), a, 0) + staticdata.InitAddr(l, loff+int64(types.PtrSize), a, 0) } return true @@ -638,7 +639,7 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) if !ok || name.Class_ != ir.PEXTERN { base.Fatalf("slicelit: %v", var_) } - slicesym(name, offset, vstat, t.NumElem()) + staticdata.InitSlice(name, offset, vstat, t.NumElem()) return } @@ -1138,7 +1139,7 @@ func genAsStatic(as *ir.AssignStmt) { return case ir.OMETHEXPR: r := r.(*ir.MethodExpr) - pfuncsym(name, offset, r.FuncName()) + staticdata.InitFunc(name, offset, r.FuncName()) return case ir.ONAME: r := r.(*ir.Name) @@ -1146,7 +1147,7 @@ func genAsStatic(as *ir.AssignStmt) { base.Fatalf("genAsStatic %+v", as) } if r.Class_ == ir.PFUNC { - pfuncsym(name, offset, r) + staticdata.InitFunc(name, offset, r) return } } diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index feb2d0de8f0da..51eeb9315ac52 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -21,6 +21,7 @@ import ( "cmd/compile/internal/liveness" "cmd/compile/internal/objw" "cmd/compile/internal/ssa" + "cmd/compile/internal/staticdata" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/obj" @@ -2115,13 +2116,13 @@ func (s *state) expr(n ir.Node) *ssa.Value { return s.entryNewValue1A(ssa.OpAddr, n.Type(), aux, s.sb) case ir.OMETHEXPR: n := n.(*ir.MethodExpr) - sym := funcsym(n.FuncName().Sym()).Linksym() + sym := staticdata.FuncSym(n.FuncName().Sym()).Linksym() return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb) case ir.ONAME: n := n.(*ir.Name) if n.Class_ == ir.PFUNC { // "value" of a function is the address of the function's closure - sym := funcsym(n.Sym()).Linksym() + sym := staticdata.FuncSym(n.Sym()).Linksym() return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb) } if s.canSSA(n) { @@ -7160,7 +7161,7 @@ func (e *ssafn) StringData(s string) *obj.LSym { if e.strings == nil { e.strings = make(map[string]*obj.LSym) } - data := stringsym(e.curfn.Pos(), s) + data := staticdata.StringSym(e.curfn.Pos(), s) e.strings[s] = data return data } diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 9e4de7f804afd..9c2484f3dc29f 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -8,6 +8,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/escape" "cmd/compile/internal/ir" + "cmd/compile/internal/staticdata" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/obj" @@ -526,7 +527,7 @@ func walkexpr(n ir.Node, init *ir.Nodes) ir.Node { // Emit string symbol now to avoid emitting // any concurrently during the backend. if v := n.Val(); v.Kind() == constant.String { - _ = stringsym(n.Pos(), constant.StringVal(v)) + _ = staticdata.StringSym(n.Pos(), constant.StringVal(v)) } } diff --git a/src/cmd/compile/internal/staticdata/data.go b/src/cmd/compile/internal/staticdata/data.go new file mode 100644 index 0000000000000..7627aaa11a115 --- /dev/null +++ b/src/cmd/compile/internal/staticdata/data.go @@ -0,0 +1,296 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package staticdata + +import ( + "crypto/sha256" + "fmt" + "io" + "io/ioutil" + "os" + "sort" + "strconv" + "sync" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/objw" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/objabi" + "cmd/internal/src" +) + +// InitAddr writes the static address of a to n. a must be an ONAME. +// Neither n nor a is modified. +func InitAddr(n *ir.Name, noff int64, a *ir.Name, aoff int64) { + if n.Op() != ir.ONAME { + base.Fatalf("addrsym n op %v", n.Op()) + } + if n.Sym() == nil { + base.Fatalf("addrsym nil n sym") + } + if a.Op() != ir.ONAME { + base.Fatalf("addrsym a op %v", a.Op()) + } + s := n.Sym().Linksym() + s.WriteAddr(base.Ctxt, noff, types.PtrSize, a.Sym().Linksym(), aoff) +} + +// InitFunc writes the static address of f to n. f must be a global function. +// Neither n nor f is modified. +func InitFunc(n *ir.Name, noff int64, f *ir.Name) { + if n.Op() != ir.ONAME { + base.Fatalf("pfuncsym n op %v", n.Op()) + } + if n.Sym() == nil { + base.Fatalf("pfuncsym nil n sym") + } + if f.Class_ != ir.PFUNC { + base.Fatalf("pfuncsym class not PFUNC %d", f.Class_) + } + s := n.Sym().Linksym() + s.WriteAddr(base.Ctxt, noff, types.PtrSize, FuncSym(f.Sym()).Linksym(), 0) +} + +// InitSlice writes a static slice symbol {&arr, lencap, lencap} to n+noff. +// InitSlice does not modify n. +func InitSlice(n *ir.Name, noff int64, arr *ir.Name, lencap int64) { + s := n.Sym().Linksym() + if arr.Op() != ir.ONAME { + base.Fatalf("slicesym non-name arr %v", arr) + } + s.WriteAddr(base.Ctxt, noff, types.PtrSize, arr.Sym().Linksym(), 0) + s.WriteInt(base.Ctxt, noff+types.SliceLenOffset, types.PtrSize, lencap) + s.WriteInt(base.Ctxt, noff+types.SliceCapOffset, types.PtrSize, lencap) +} + +func InitSliceBytes(nam *ir.Name, off int64, s string) { + if nam.Op() != ir.ONAME { + base.Fatalf("slicebytes %v", nam) + } + InitSlice(nam, off, slicedata(nam.Pos(), s), int64(len(s))) +} + +const ( + stringSymPrefix = "go.string." + stringSymPattern = ".gostring.%d.%x" +) + +// StringSym returns a symbol containing the string s. +// The symbol contains the string data, not a string header. +func StringSym(pos src.XPos, s string) (data *obj.LSym) { + var symname string + if len(s) > 100 { + // Huge strings are hashed to avoid long names in object files. + // Indulge in some paranoia by writing the length of s, too, + // as protection against length extension attacks. + // Same pattern is known to fileStringSym below. + h := sha256.New() + io.WriteString(h, s) + symname = fmt.Sprintf(stringSymPattern, len(s), h.Sum(nil)) + } else { + // Small strings get named directly by their contents. + symname = strconv.Quote(s) + } + + symdata := base.Ctxt.Lookup(stringSymPrefix + symname) + if !symdata.OnList() { + off := dstringdata(symdata, 0, s, pos, "string") + objw.Global(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL) + symdata.Set(obj.AttrContentAddressable, true) + } + + return symdata +} + +// fileStringSym returns a symbol for the contents and the size of file. +// If readonly is true, the symbol shares storage with any literal string +// or other file with the same content and is placed in a read-only section. +// If readonly is false, the symbol is a read-write copy separate from any other, +// for use as the backing store of a []byte. +// The content hash of file is copied into hash. (If hash is nil, nothing is copied.) +// The returned symbol contains the data itself, not a string header. +func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.LSym, int64, error) { + f, err := os.Open(file) + if err != nil { + return nil, 0, err + } + defer f.Close() + info, err := f.Stat() + if err != nil { + return nil, 0, err + } + if !info.Mode().IsRegular() { + return nil, 0, fmt.Errorf("not a regular file") + } + size := info.Size() + if size <= 1*1024 { + data, err := ioutil.ReadAll(f) + if err != nil { + return nil, 0, err + } + if int64(len(data)) != size { + return nil, 0, fmt.Errorf("file changed between reads") + } + var sym *obj.LSym + if readonly { + sym = StringSym(pos, string(data)) + } else { + sym = slicedata(pos, string(data)).Sym().Linksym() + } + if len(hash) > 0 { + sum := sha256.Sum256(data) + copy(hash, sum[:]) + } + return sym, size, nil + } + if size > 2e9 { + // ggloblsym takes an int32, + // and probably the rest of the toolchain + // can't handle such big symbols either. + // See golang.org/issue/9862. + return nil, 0, fmt.Errorf("file too large") + } + + // File is too big to read and keep in memory. + // Compute hash if needed for read-only content hashing or if the caller wants it. + var sum []byte + if readonly || len(hash) > 0 { + h := sha256.New() + n, err := io.Copy(h, f) + if err != nil { + return nil, 0, err + } + if n != size { + return nil, 0, fmt.Errorf("file changed between reads") + } + sum = h.Sum(nil) + copy(hash, sum) + } + + var symdata *obj.LSym + if readonly { + symname := fmt.Sprintf(stringSymPattern, size, sum) + symdata = base.Ctxt.Lookup(stringSymPrefix + symname) + if !symdata.OnList() { + info := symdata.NewFileInfo() + info.Name = file + info.Size = size + objw.Global(symdata, int32(size), obj.DUPOK|obj.RODATA|obj.LOCAL) + // Note: AttrContentAddressable cannot be set here, + // because the content-addressable-handling code + // does not know about file symbols. + } + } else { + // Emit a zero-length data symbol + // and then fix up length and content to use file. + symdata = slicedata(pos, "").Sym().Linksym() + symdata.Size = size + symdata.Type = objabi.SNOPTRDATA + info := symdata.NewFileInfo() + info.Name = file + info.Size = size + } + + return symdata, size, nil +} + +var slicedataGen int + +func slicedata(pos src.XPos, s string) *ir.Name { + slicedataGen++ + symname := fmt.Sprintf(".gobytes.%d", slicedataGen) + sym := types.LocalPkg.Lookup(symname) + symnode := typecheck.NewName(sym) + sym.Def = symnode + + lsym := sym.Linksym() + off := dstringdata(lsym, 0, s, pos, "slice") + objw.Global(lsym, int32(off), obj.NOPTR|obj.LOCAL) + + return symnode +} + +func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int { + // Objects that are too large will cause the data section to overflow right away, + // causing a cryptic error message by the linker. Check for oversize objects here + // and provide a useful error message instead. + if int64(len(t)) > 2e9 { + base.ErrorfAt(pos, "%v with length %v is too big", what, len(t)) + return 0 + } + + s.WriteString(base.Ctxt, int64(off), len(t), t) + return off + len(t) +} + +var ( + funcsymsmu sync.Mutex // protects funcsyms and associated package lookups (see func funcsym) + funcsyms []*types.Sym +) + +// FuncSym returns s·f. +func FuncSym(s *types.Sym) *types.Sym { + // funcsymsmu here serves to protect not just mutations of funcsyms (below), + // but also the package lookup of the func sym name, + // since this function gets called concurrently from the backend. + // There are no other concurrent package lookups in the backend, + // except for the types package, which is protected separately. + // Reusing funcsymsmu to also cover this package lookup + // avoids a general, broader, expensive package lookup mutex. + // Note makefuncsym also does package look-up of func sym names, + // but that it is only called serially, from the front end. + funcsymsmu.Lock() + sf, existed := s.Pkg.LookupOK(ir.FuncSymName(s)) + // Don't export s·f when compiling for dynamic linking. + // When dynamically linking, the necessary function + // symbols will be created explicitly with makefuncsym. + // See the makefuncsym comment for details. + if !base.Ctxt.Flag_dynlink && !existed { + funcsyms = append(funcsyms, s) + } + funcsymsmu.Unlock() + return sf +} + +// NeedFuncSym ensures that s·f is exported. +// It is only used with -dynlink. +// When not compiling for dynamic linking, +// the funcsyms are created as needed by +// the packages that use them. +// Normally we emit the s·f stubs as DUPOK syms, +// but DUPOK doesn't work across shared library boundaries. +// So instead, when dynamic linking, we only create +// the s·f stubs in s's package. +func NeedFuncSym(s *types.Sym) { + if !base.Ctxt.Flag_dynlink { + base.Fatalf("makefuncsym dynlink") + } + if s.IsBlank() { + return + } + if base.Flag.CompilingRuntime && (s.Name == "getg" || s.Name == "getclosureptr" || s.Name == "getcallerpc" || s.Name == "getcallersp") { + // runtime.getg(), getclosureptr(), getcallerpc(), and + // getcallersp() are not real functions and so do not + // get funcsyms. + return + } + if _, existed := s.Pkg.LookupOK(ir.FuncSymName(s)); !existed { + funcsyms = append(funcsyms, s) + } +} + +func WriteFuncSyms() { + sort.Slice(funcsyms, func(i, j int) bool { + return funcsyms[i].LinksymName() < funcsyms[j].LinksymName() + }) + for _, s := range funcsyms { + sf := s.Pkg.Lookup(ir.FuncSymName(s)).Linksym() + objw.SymPtr(sf, 0, s.Linksym(), 0) + objw.Global(sf, int32(types.PtrSize), obj.DUPOK|obj.RODATA) + } +} diff --git a/src/cmd/compile/internal/gc/embed.go b/src/cmd/compile/internal/staticdata/embed.go similarity index 95% rename from src/cmd/compile/internal/gc/embed.go rename to src/cmd/compile/internal/staticdata/embed.go index 959d8cd7fe335..55c9a3356e8d6 100644 --- a/src/cmd/compile/internal/gc/embed.go +++ b/src/cmd/compile/internal/staticdata/embed.go @@ -2,19 +2,18 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package staticdata import ( + "path" + "sort" + "strings" + "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/objw" - "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/obj" - - "path" - "sort" - "strings" ) const ( @@ -132,15 +131,9 @@ func embedFileLess(x, y string) bool { return xdir < ydir || xdir == ydir && xelem < yelem } -func dumpembeds() { - for _, v := range typecheck.Target.Embeds { - initEmbed(v) - } -} - -// initEmbed emits the init data for a //go:embed variable, +// WriteEmbed emits the init data for a //go:embed variable, // which is either a string, a []byte, or an embed.FS. -func initEmbed(v *ir.Name) { +func WriteEmbed(v *ir.Name) { files := embedFileList(v) switch kind := embedKind(v.Type()); kind { case embedUnknown: @@ -176,7 +169,7 @@ func initEmbed(v *ir.Name) { const hashSize = 16 hash := make([]byte, hashSize) for _, file := range files { - off = objw.SymPtr(slicedata, off, stringsym(v.Pos(), file), 0) // file string + off = objw.SymPtr(slicedata, off, StringSym(v.Pos(), file), 0) // file string off = objw.Uintptr(slicedata, off, uint64(len(file))) if strings.HasSuffix(file, "/") { // entry for directory - no data From de65151e507e7b3c8e46d74f223d7c562177bedc Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 23 Dec 2020 00:55:38 -0500 Subject: [PATCH 235/474] [dev.regabi] cmd/compile: split out package reflectdata [generated] [git-generate] cd src/cmd/compile/internal/gc rf ' ex { import "cmd/compile/internal/base" thearch.LinkArch.Name -> base.Ctxt.Arch.Name } # Move out of reflect.go a few functions that should stay. mv addsignats obj.go mv deferstruct ssa.go # Export reflectdata API. mv zerosize ZeroSize mv hmap MapType mv bmap MapBucketType mv hiter MapIterType mv addsignat NeedRuntimeType mv typename TypePtr mv typenamesym TypeSym mv typesymprefix TypeSymPrefix mv itabsym ITabSym mv tracksym TrackSym mv zeroaddr ZeroAddr mv itabname ITabAddr mv ifaceMethodOffset InterfaceMethodOffset mv peekitabs CompileITabs mv addptabs CollectPTabs mv algtype AlgType mv dtypesym WriteType mv dumpbasictypes WriteBasicTypes mv dumpimportstrings WriteImportStrings mv dumpsignats WriteRuntimeTypes mv dumptabs WriteTabs mv eqinterface EqInterface mv eqstring EqString mv GCProg gcProg mv EqCanPanic eqCanPanic mv IsRegularMemory isRegularMemory mv Sig typeSig mv hashmem alg.go mv CollectPTabs genwrapper ZeroSize reflect.go mv alg.go reflect.go cmd/compile/internal/reflectdata ' Change-Id: Iaae9da9e9fad5f772f5216004823ccff2ea8f139 Reviewed-on: https://go-review.googlesource.com/c/go/+/279475 Trust: Russ Cox Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/abiutils_test.go | 5 +- src/cmd/compile/internal/gc/go.go | 2 - src/cmd/compile/internal/gc/main.go | 11 +- src/cmd/compile/internal/gc/obj.go | 60 +-- src/cmd/compile/internal/gc/order.go | 3 +- src/cmd/compile/internal/gc/pgen.go | 3 +- src/cmd/compile/internal/gc/range.go | 5 +- src/cmd/compile/internal/gc/sinit.go | 5 +- src/cmd/compile/internal/gc/ssa.go | 52 ++- src/cmd/compile/internal/gc/subr.go | 140 +------ src/cmd/compile/internal/gc/walk.go | 81 ++-- .../internal/{gc => reflectdata}/alg.go | 78 ++-- .../internal/{gc => reflectdata}/reflect.go | 379 +++++++++++------- 13 files changed, 418 insertions(+), 406 deletions(-) rename src/cmd/compile/internal/{gc => reflectdata}/alg.go (93%) rename src/cmd/compile/internal/{gc => reflectdata}/reflect.go (84%) diff --git a/src/cmd/compile/internal/gc/abiutils_test.go b/src/cmd/compile/internal/gc/abiutils_test.go index fe9a838688196..4b2a30d00ca5e 100644 --- a/src/cmd/compile/internal/gc/abiutils_test.go +++ b/src/cmd/compile/internal/gc/abiutils_test.go @@ -7,6 +7,7 @@ package gc import ( "bufio" "cmd/compile/internal/base" + "cmd/compile/internal/reflectdata" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/obj" @@ -38,10 +39,10 @@ func TestMain(m *testing.M) { types.PtrSize = thearch.LinkArch.PtrSize types.RegSize = thearch.LinkArch.RegSize types.TypeLinkSym = func(t *types.Type) *obj.LSym { - return typenamesym(t).Linksym() + return reflectdata.TypeSym(t).Linksym() } types.TypeLinkSym = func(t *types.Type) *obj.LSym { - return typenamesym(t).Linksym() + return reflectdata.TypeSym(t).Linksym() } typecheck.Init() os.Exit(m.Run()) diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index 6f97d43fef885..ba838a5ff528a 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -12,8 +12,6 @@ import ( var pragcgobuf [][]string -var zerosize int64 - // interface to back end type Arch struct { diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index bb6ace6562800..e66b877fd0b92 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -15,6 +15,7 @@ import ( "cmd/compile/internal/ir" "cmd/compile/internal/logopt" "cmd/compile/internal/noder" + "cmd/compile/internal/reflectdata" "cmd/compile/internal/ssa" "cmd/compile/internal/staticdata" "cmd/compile/internal/typecheck" @@ -190,19 +191,19 @@ func Main(archInit func(*Arch)) { types.RegSize = thearch.LinkArch.RegSize types.MaxWidth = thearch.MAXWIDTH types.TypeLinkSym = func(t *types.Type) *obj.LSym { - return typenamesym(t).Linksym() + return reflectdata.TypeSym(t).Linksym() } typecheck.Target = new(ir.Package) typecheck.NeedFuncSym = staticdata.NeedFuncSym - typecheck.NeedITab = func(t, iface *types.Type) { itabname(t, iface) } - typecheck.NeedRuntimeType = addsignat // TODO(rsc): typenamesym for lock? + typecheck.NeedITab = func(t, iface *types.Type) { reflectdata.ITabAddr(t, iface) } + typecheck.NeedRuntimeType = reflectdata.NeedRuntimeType // TODO(rsc): typenamesym for lock? base.AutogeneratedPos = makePos(src.NewFileBase("", ""), 1, 0) types.TypeLinkSym = func(t *types.Type) *obj.LSym { - return typenamesym(t).Linksym() + return reflectdata.TypeSym(t).Linksym() } typecheck.Init() @@ -282,7 +283,7 @@ func Main(archInit func(*Arch)) { // the right side of OCONVIFACE so that methods // can be de-virtualized during compilation. ir.CurFunc = nil - peekitabs() + reflectdata.CompileITabs() // Compile top level functions. // Don't use range--walk can add functions to Target.Decls. diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 50935d4e98c46..4db2ad9d4a75d 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -8,6 +8,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/objw" + "cmd/compile/internal/reflectdata" "cmd/compile/internal/staticdata" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" @@ -112,14 +113,14 @@ func dumpdata() { dumpglobls(typecheck.Target.Externs) staticdata.WriteFuncSyms() - addptabs() + reflectdata.CollectPTabs() numExports := len(typecheck.Target.Exports) addsignats(typecheck.Target.Externs) - dumpsignats() - dumptabs() - numPTabs, numITabs := CountTabs() - dumpimportstrings() - dumpbasictypes() + reflectdata.WriteRuntimeTypes() + reflectdata.WriteTabs() + numPTabs, numITabs := reflectdata.CountTabs() + reflectdata.WriteImportStrings() + reflectdata.WriteBasicTypes() dumpembeds() // Calls to dumpsignats can generate functions, @@ -138,7 +139,7 @@ func dumpdata() { } numDecls = len(typecheck.Target.Decls) compileFunctions() - dumpsignats() + reflectdata.WriteRuntimeTypes() if numDecls == len(typecheck.Target.Decls) { break } @@ -147,9 +148,9 @@ func dumpdata() { // Dump extra globals. dumpglobls(typecheck.Target.Externs[numExterns:]) - if zerosize > 0 { + if reflectdata.ZeroSize > 0 { zero := ir.Pkgs.Map.Lookup("zero") - objw.Global(zero.Linksym(), int32(zerosize), obj.DUPOK|obj.RODATA) + objw.Global(zero.Linksym(), int32(reflectdata.ZeroSize), obj.DUPOK|obj.RODATA) } addGCLocals() @@ -157,7 +158,7 @@ func dumpdata() { if numExports != len(typecheck.Target.Exports) { base.Fatalf("Target.Exports changed after compile functions loop") } - newNumPTabs, newNumITabs := CountTabs() + newNumPTabs, newNumITabs := reflectdata.CountTabs() if newNumPTabs != numPTabs { base.Fatalf("ptabs changed after compile functions loop") } @@ -184,36 +185,6 @@ func dumpLinkerObj(bout *bio.Writer) { obj.WriteObjFile(base.Ctxt, bout) } -func addptabs() { - if !base.Ctxt.Flag_dynlink || types.LocalPkg.Name != "main" { - return - } - for _, exportn := range typecheck.Target.Exports { - s := exportn.Sym() - nn := ir.AsNode(s.Def) - if nn == nil { - continue - } - if nn.Op() != ir.ONAME { - continue - } - n := nn.(*ir.Name) - if !types.IsExported(s.Name) { - continue - } - if s.Pkg.Name != "main" { - continue - } - if n.Type().Kind() == types.TFUNC && n.Class_ == ir.PFUNC { - // function - ptabs = append(ptabs, ptabEntry{s: s, t: s.Def.Type()}) - } else { - // variable - ptabs = append(ptabs, ptabEntry{s: s, t: types.NewPtr(s.Def.Type())}) - } - } -} - func dumpGlobal(n *ir.Name) { if n.Type() == nil { base.Fatalf("external %v nil type\n", n) @@ -373,3 +344,12 @@ func dumpembeds() { staticdata.WriteEmbed(v) } } + +func addsignats(dcls []ir.Node) { + // copy types from dcl list to signatset + for _, n := range dcls { + if n.Op() == ir.OTYPE { + reflectdata.NeedRuntimeType(n.Type()) + } + } +} diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 32a355ae6b013..d1c5bb04a1b98 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -8,6 +8,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/escape" "cmd/compile/internal/ir" + "cmd/compile/internal/reflectdata" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/src" @@ -882,7 +883,7 @@ func (o *Order) stmt(n ir.Node) { // n.Prealloc is the temp for the iterator. // hiter contains pointers and needs to be zeroed. - n.Prealloc = o.newTemp(hiter(n.Type()), true) + n.Prealloc = o.newTemp(reflectdata.MapIterType(n.Type()), true) } o.exprListInPlace(n.Vars) if orderBody { diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index dcba5c7ecb665..4d990e7dbacb5 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -9,6 +9,7 @@ import ( "cmd/compile/internal/ir" "cmd/compile/internal/liveness" "cmd/compile/internal/objw" + "cmd/compile/internal/reflectdata" "cmd/compile/internal/ssa" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" @@ -225,7 +226,7 @@ func compile(fn *ir.Func) { switch n.Class_ { case ir.PPARAM, ir.PPARAMOUT, ir.PAUTO: if liveness.ShouldTrack(n) && n.Addrtaken() { - dtypesym(n.Type()) + reflectdata.WriteType(n.Type()) // Also make sure we allocate a linker symbol // for the stack object data, for the same reason. if fn.LSym.Func().StackObjects == nil { diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go index c040811932be2..4ba0654aef27e 100644 --- a/src/cmd/compile/internal/gc/range.go +++ b/src/cmd/compile/internal/gc/range.go @@ -7,6 +7,7 @@ package gc import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" + "cmd/compile/internal/reflectdata" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/sys" @@ -180,7 +181,7 @@ func walkrange(nrange *ir.RangeStmt) ir.Node { fn := typecheck.LookupRuntime("mapiterinit") fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), th) - init = append(init, mkcall1(fn, nil, nil, typename(t), ha, typecheck.NodAddr(hit))) + init = append(init, mkcall1(fn, nil, nil, reflectdata.TypePtr(t), ha, typecheck.NodAddr(hit))) nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym), typecheck.NodNil()) fn = typecheck.LookupRuntime("mapiternext") @@ -383,7 +384,7 @@ func mapClear(m ir.Node) ir.Node { // instantiate mapclear(typ *type, hmap map[any]any) fn := typecheck.LookupRuntime("mapclear") fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem()) - n := mkcall1(fn, nil, nil, typename(t), m) + n := mkcall1(fn, nil, nil, reflectdata.TypePtr(t), m) return walkstmt(typecheck.Stmt(n)) } diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index d818be94a40c7..337b67af46222 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -7,6 +7,7 @@ package gc import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" + "cmd/compile/internal/reflectdata" "cmd/compile/internal/staticdata" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" @@ -314,9 +315,9 @@ func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *type var itab *ir.AddrExpr if typ.IsEmptyInterface() { - itab = typename(val.Type()) + itab = reflectdata.TypePtr(val.Type()) } else { - itab = itabname(val.Type(), typ) + itab = reflectdata.ITabAddr(val.Type(), typ) } // Create a copy of l to modify while we emit data. diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 51eeb9315ac52..997bcb6d5edcc 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -20,6 +20,7 @@ import ( "cmd/compile/internal/ir" "cmd/compile/internal/liveness" "cmd/compile/internal/objw" + "cmd/compile/internal/reflectdata" "cmd/compile/internal/ssa" "cmd/compile/internal/staticdata" "cmd/compile/internal/typecheck" @@ -89,7 +90,7 @@ func initssaconfig() { _ = types.NewPtr(types.Types[types.TINT64]) // *int64 _ = types.NewPtr(types.ErrorType) // *error types.NewPtrCacheEnabled = false - ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, *types_, base.Ctxt, base.Flag.N == 0) + ssaConfig = ssa.NewConfig(base.Ctxt.Arch.Name, *types_, base.Ctxt, base.Flag.N == 0) ssaConfig.SoftFloat = thearch.SoftFloat ssaConfig.Race = base.Flag.Race ssaCaches = make([]ssa.Cache, base.Flag.LowerC) @@ -134,7 +135,7 @@ func initssaconfig() { ir.Syms.Zerobase = typecheck.LookupRuntimeVar("zerobase") // asm funcs with special ABI - if thearch.LinkArch.Name == "amd64" { + if base.Ctxt.Arch.Name == "amd64" { GCWriteBarrierReg = map[int16]*obj.LSym{ x86.REG_AX: typecheck.LookupRuntimeFunc("gcWriteBarrier"), x86.REG_CX: typecheck.LookupRuntimeFunc("gcWriteBarrierCX"), @@ -389,7 +390,7 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func { s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.OpenCodedDeferDisallowed() switch { - case s.hasOpenDefers && (base.Ctxt.Flag_shared || base.Ctxt.Flag_dynlink) && thearch.LinkArch.Name == "386": + case s.hasOpenDefers && (base.Ctxt.Flag_shared || base.Ctxt.Flag_dynlink) && base.Ctxt.Arch.Name == "386": // Don't support open-coded defers for 386 ONLY when using shared // libraries, because there is extra code (added by rewriteToUseGot()) // preceding the deferreturn/ret code that is generated by gencallret() @@ -6427,7 +6428,7 @@ func emitStackObjects(e *ssafn, pp *objw.Progs) { if !types.TypeSym(v.Type()).Siggen() { e.Fatalf(v.Pos(), "stack object's type symbol not generated for type %s", v.Type()) } - off = objw.SymPtr(x, off, dtypesym(v.Type()), 0) + off = objw.SymPtr(x, off, reflectdata.WriteType(v.Type()), 0) } // Emit a funcdata pointing at the stack object data. @@ -7247,7 +7248,7 @@ func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot { } func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym { - return itabsym(it, offset) + return reflectdata.ITabSym(it, offset) } // SplitSlot returns a slot representing the data of parent starting at offset. @@ -7411,3 +7412,44 @@ func max8(a, b int8) int8 { } return b } + +// deferstruct makes a runtime._defer structure, with additional space for +// stksize bytes of args. +func deferstruct(stksize int64) *types.Type { + makefield := func(name string, typ *types.Type) *types.Field { + // Unlike the global makefield function, this one needs to set Pkg + // because these types might be compared (in SSA CSE sorting). + // TODO: unify this makefield and the global one above. + sym := &types.Sym{Name: name, Pkg: types.LocalPkg} + return types.NewField(src.NoXPos, sym, typ) + } + argtype := types.NewArray(types.Types[types.TUINT8], stksize) + argtype.Width = stksize + argtype.Align = 1 + // These fields must match the ones in runtime/runtime2.go:_defer and + // cmd/compile/internal/gc/ssa.go:(*state).call. + fields := []*types.Field{ + makefield("siz", types.Types[types.TUINT32]), + makefield("started", types.Types[types.TBOOL]), + makefield("heap", types.Types[types.TBOOL]), + makefield("openDefer", types.Types[types.TBOOL]), + makefield("sp", types.Types[types.TUINTPTR]), + makefield("pc", types.Types[types.TUINTPTR]), + // Note: the types here don't really matter. Defer structures + // are always scanned explicitly during stack copying and GC, + // so we make them uintptr type even though they are real pointers. + makefield("fn", types.Types[types.TUINTPTR]), + makefield("_panic", types.Types[types.TUINTPTR]), + makefield("link", types.Types[types.TUINTPTR]), + makefield("framepc", types.Types[types.TUINTPTR]), + makefield("varp", types.Types[types.TUINTPTR]), + makefield("fd", types.Types[types.TUINTPTR]), + makefield("args", argtype), + } + + // build struct holding the above fields + s := types.NewStruct(types.NoPkg, fields) + s.SetNoalg(true) + types.CalcStructSize(s) + return s +} diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 362c5162b64ac..89baaf7eee9de 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -6,9 +6,8 @@ package gc import ( "cmd/compile/internal/base" - "cmd/compile/internal/escape" - "cmd/compile/internal/inline" "cmd/compile/internal/ir" + "cmd/compile/internal/reflectdata" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/src" @@ -319,144 +318,9 @@ func cheapexpr(n ir.Node, init *ir.Nodes) ir.Node { return copyexpr(n, n.Type(), init) } -// Generate a wrapper function to convert from -// a receiver of type T to a receiver of type U. -// That is, -// -// func (t T) M() { -// ... -// } -// -// already exists; this function generates -// -// func (u U) M() { -// u.M() -// } -// -// where the types T and U are such that u.M() is valid -// and calls the T.M method. -// The resulting function is for use in method tables. -// -// rcvr - U -// method - M func (t T)(), a TFIELD type struct -// newnam - the eventual mangled name of this function -func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { - if false && base.Flag.LowerR != 0 { - fmt.Printf("genwrapper rcvrtype=%v method=%v newnam=%v\n", rcvr, method, newnam) - } - - // Only generate (*T).M wrappers for T.M in T's own package. - if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && - rcvr.Elem().Sym() != nil && rcvr.Elem().Sym().Pkg != types.LocalPkg { - return - } - - // Only generate I.M wrappers for I in I's own package - // but keep doing it for error.Error (was issue #29304). - if rcvr.IsInterface() && rcvr.Sym() != nil && rcvr.Sym().Pkg != types.LocalPkg && rcvr != types.ErrorType { - return - } - - base.Pos = base.AutogeneratedPos - typecheck.DeclContext = ir.PEXTERN - - tfn := ir.NewFuncType(base.Pos, - ir.NewField(base.Pos, typecheck.Lookup(".this"), nil, rcvr), - typecheck.NewFuncParams(method.Type.Params(), true), - typecheck.NewFuncParams(method.Type.Results(), false)) - - fn := typecheck.DeclFunc(newnam, tfn) - fn.SetDupok(true) - - nthis := ir.AsNode(tfn.Type().Recv().Nname) - - methodrcvr := method.Type.Recv().Type - - // generate nil pointer check for better error - if rcvr.IsPtr() && rcvr.Elem() == methodrcvr { - // generating wrapper from *T to T. - n := ir.NewIfStmt(base.Pos, nil, nil, nil) - n.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, nthis, typecheck.NodNil()) - call := ir.NewCallExpr(base.Pos, ir.OCALL, typecheck.LookupRuntime("panicwrap"), nil) - n.Body = []ir.Node{call} - fn.Body.Append(n) - } - - dot := typecheck.AddImplicitDots(ir.NewSelectorExpr(base.Pos, ir.OXDOT, nthis, method.Sym)) - - // generate call - // It's not possible to use a tail call when dynamic linking on ppc64le. The - // bad scenario is when a local call is made to the wrapper: the wrapper will - // call the implementation, which might be in a different module and so set - // the TOC to the appropriate value for that module. But if it returns - // directly to the wrapper's caller, nothing will reset it to the correct - // value for that function. - if !base.Flag.Cfg.Instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !types.IsInterfaceMethod(method.Type) && !(thearch.LinkArch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) { - // generate tail call: adjust pointer receiver and jump to embedded method. - left := dot.X // skip final .M - if !left.Type().IsPtr() { - left = typecheck.NodAddr(left) - } - as := ir.NewAssignStmt(base.Pos, nthis, typecheck.ConvNop(left, rcvr)) - fn.Body.Append(as) - fn.Body.Append(ir.NewBranchStmt(base.Pos, ir.ORETJMP, ir.MethodSym(methodrcvr, method.Sym))) - } else { - fn.SetWrapper(true) // ignore frame for panic+recover matching - call := ir.NewCallExpr(base.Pos, ir.OCALL, dot, nil) - call.Args.Set(ir.ParamNames(tfn.Type())) - call.IsDDD = tfn.Type().IsVariadic() - if method.Type.NumResults() > 0 { - ret := ir.NewReturnStmt(base.Pos, nil) - ret.Results = []ir.Node{call} - fn.Body.Append(ret) - } else { - fn.Body.Append(call) - } - } - - if false && base.Flag.LowerR != 0 { - ir.DumpList("genwrapper body", fn.Body) - } - - typecheck.FinishFuncBody() - if base.Debug.DclStack != 0 { - types.CheckDclstack() - } - - typecheck.Func(fn) - ir.CurFunc = fn - typecheck.Stmts(fn.Body) - - // Inline calls within (*T).M wrappers. This is safe because we only - // generate those wrappers within the same compilation unit as (T).M. - // TODO(mdempsky): Investigate why we can't enable this more generally. - if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym() != nil { - inline.InlineCalls(fn) - } - escape.Batch([]*ir.Func{fn}, false) - - ir.CurFunc = nil - typecheck.Target.Decls = append(typecheck.Target.Decls, fn) -} - -func hashmem(t *types.Type) ir.Node { - sym := ir.Pkgs.Runtime.Lookup("memhash") - - n := typecheck.NewName(sym) - ir.MarkFunc(n) - n.SetType(typecheck.NewFuncType(nil, []*ir.Field{ - ir.NewField(base.Pos, nil, nil, types.NewPtr(t)), - ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]), - ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]), - }, []*ir.Field{ - ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]), - })) - return n -} - func ngotype(n ir.Node) *types.Sym { if n.Type() != nil { - return typenamesym(n.Type()) + return reflectdata.TypeSym(n.Type()) } return nil } diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 9c2484f3dc29f..9b49b06c34acc 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -8,6 +8,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/escape" "cmd/compile/internal/ir" + "cmd/compile/internal/reflectdata" "cmd/compile/internal/staticdata" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" @@ -594,12 +595,12 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { n := n.(*ir.TypeAssertExpr) n.X = walkexpr(n.X, init) // Set up interface type addresses for back end. - n.Ntype = typename(n.Type()) + n.Ntype = reflectdata.TypePtr(n.Type()) if n.Op() == ir.ODOTTYPE { - n.Ntype.(*ir.AddrExpr).Alloc = typename(n.X.Type()) + n.Ntype.(*ir.AddrExpr).Alloc = reflectdata.TypePtr(n.X.Type()) } if !n.Type().IsInterface() && !n.X.Type().IsEmptyInterface() { - n.Itab = []ir.Node{itabname(n.Type(), n.X.Type())} + n.Itab = []ir.Node{reflectdata.ITabAddr(n.Type(), n.X.Type())} } return n @@ -781,7 +782,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // Left in place for back end. // Do not add a new write barrier. // Set up address of type for back end. - r.(*ir.CallExpr).X = typename(r.Type().Elem()) + r.(*ir.CallExpr).X = reflectdata.TypePtr(r.Type().Elem()) return as } // Otherwise, lowered for race detector. @@ -870,11 +871,11 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { var call *ir.CallExpr if w := t.Elem().Width; w <= zeroValSize { fn := mapfn(mapaccess2[fast], t) - call = mkcall1(fn, fn.Type().Results(), init, typename(t), r.X, key) + call = mkcall1(fn, fn.Type().Results(), init, reflectdata.TypePtr(t), r.X, key) } else { fn := mapfn("mapaccess2_fat", t) - z := zeroaddr(w) - call = mkcall1(fn, fn.Type().Results(), init, typename(t), r.X, key, z) + z := reflectdata.ZeroAddr(w) + call = mkcall1(fn, fn.Type().Results(), init, reflectdata.TypePtr(t), r.X, key, z) } // mapaccess2* returns a typed bool, but due to spec changes, @@ -915,7 +916,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // order.stmt made sure key is addressable. key = typecheck.NodAddr(key) } - return mkcall1(mapfndel(mapdelete[fast], t), nil, init, typename(t), map_, key) + return mkcall1(mapfndel(mapdelete[fast], t), nil, init, reflectdata.TypePtr(t), map_, key) case ir.OAS2DOTTYPE: n := n.(*ir.AssignListStmt) @@ -937,9 +938,9 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // typeword generates the type word of the interface value. typeword := func() ir.Node { if toType.IsEmptyInterface() { - return typename(fromType) + return reflectdata.TypePtr(fromType) } - return itabname(fromType, toType) + return reflectdata.ITabAddr(fromType, toType) } // Optimize convT2E or convT2I as a two-word copy when T is pointer-shaped. @@ -1048,7 +1049,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { var tab ir.Node if fromType.IsInterface() { // convI2I - tab = typename(toType) + tab = reflectdata.TypePtr(toType) } else { // convT2x tab = typeword() @@ -1218,7 +1219,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // order.expr made sure key is addressable. key = typecheck.NodAddr(key) } - call = mkcall1(mapfn(mapassign[fast], t), nil, init, typename(t), map_, key) + call = mkcall1(mapfn(mapassign[fast], t), nil, init, reflectdata.TypePtr(t), map_, key) } else { // m[k] is not the target of an assignment. fast := mapfast(t) @@ -1229,10 +1230,10 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { } if w := t.Elem().Width; w <= zeroValSize { - call = mkcall1(mapfn(mapaccess1[fast], t), types.NewPtr(t.Elem()), init, typename(t), map_, key) + call = mkcall1(mapfn(mapaccess1[fast], t), types.NewPtr(t.Elem()), init, reflectdata.TypePtr(t), map_, key) } else { - z := zeroaddr(w) - call = mkcall1(mapfn("mapaccess1_fat", t), types.NewPtr(t.Elem()), init, typename(t), map_, key, z) + z := reflectdata.ZeroAddr(w) + call = mkcall1(mapfn("mapaccess1_fat", t), types.NewPtr(t.Elem()), init, reflectdata.TypePtr(t), map_, key, z) } } call.SetType(types.NewPtr(t.Elem())) @@ -1340,12 +1341,12 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { argtype = types.Types[types.TINT] } - return mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, typename(n.Type()), typecheck.Conv(size, argtype)) + return mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, reflectdata.TypePtr(n.Type()), typecheck.Conv(size, argtype)) case ir.OMAKEMAP: n := n.(*ir.MakeExpr) t := n.Type() - hmapType := hmap(t) + hmapType := reflectdata.MapType(t) hint := n.Len // var h *hmap @@ -1365,7 +1366,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // Maximum key and elem size is 128 bytes, larger objects // are stored with an indirection. So max bucket size is 2048+eps. if !ir.IsConst(hint, constant.Int) || - constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(BUCKETSIZE)) { + constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(reflectdata.BUCKETSIZE)) { // In case hint is larger than BUCKETSIZE runtime.makemap // will allocate the buckets on the heap, see #20184 @@ -1376,11 +1377,11 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // h.buckets = b // } - nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(BUCKETSIZE)), nil, nil) + nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(reflectdata.BUCKETSIZE)), nil, nil) nif.Likely = true // var bv bmap - bv := typecheck.Temp(bmap(t)) + bv := typecheck.Temp(reflectdata.MapBucketType(t)) nif.Body.Append(ir.NewAssignStmt(base.Pos, bv, nil)) // b = &bv @@ -1394,7 +1395,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { } } - if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(BUCKETSIZE)) { + if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(reflectdata.BUCKETSIZE)) { // Handling make(map[any]any) and // make(map[any]any, hint) where hint <= BUCKETSIZE // special allows for faster map initialization and @@ -1442,7 +1443,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { fn := typecheck.LookupRuntime(fnname) fn = typecheck.SubstArgTypes(fn, hmapType, t.Key(), t.Elem()) - return mkcall1(fn, n.Type(), init, typename(n.Type()), typecheck.Conv(hint, argtype), h) + return mkcall1(fn, n.Type(), init, reflectdata.TypePtr(n.Type()), typecheck.Conv(hint, argtype), h) case ir.OMAKESLICE: n := n.(*ir.MakeExpr) @@ -1511,7 +1512,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { m.SetType(t) fn := typecheck.LookupRuntime(fnname) - m.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), typecheck.Conv(len, argtype), typecheck.Conv(cap, argtype)) + m.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), typecheck.Conv(len, argtype), typecheck.Conv(cap, argtype)) m.Ptr.MarkNonNil() m.LenCap = []ir.Node{typecheck.Conv(len, types.Types[types.TINT]), typecheck.Conv(cap, types.Types[types.TINT])} return walkexpr(typecheck.Expr(m), init) @@ -1565,7 +1566,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer fn := typecheck.LookupRuntime("makeslicecopy") s := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil) - s.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), length, copylen, typecheck.Conv(copyptr, types.Types[types.TUNSAFEPTR])) + s.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), length, copylen, typecheck.Conv(copyptr, types.Types[types.TUNSAFEPTR])) s.Ptr.MarkNonNil() s.LenCap = []ir.Node{length, length} s.SetType(t) @@ -1709,7 +1710,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // markTypeUsedInInterface marks that type t is converted to an interface. // This information is used in the linker in dead method elimination. func markTypeUsedInInterface(t *types.Type, from *obj.LSym) { - tsym := typenamesym(t).Linksym() + tsym := reflectdata.TypeSym(t).Linksym() // Emit a marker relocation. The linker will know the type is converted // to an interface if "from" is reachable. r := obj.Addrel(from) @@ -1722,13 +1723,13 @@ func markTypeUsedInInterface(t *types.Type, from *obj.LSym) { func markUsedIfaceMethod(n *ir.CallExpr) { dot := n.X.(*ir.SelectorExpr) ityp := dot.X.Type() - tsym := typenamesym(ityp).Linksym() + tsym := reflectdata.TypeSym(ityp).Linksym() r := obj.Addrel(ir.CurFunc.LSym) r.Sym = tsym // dot.Xoffset is the method index * Widthptr (the offset of code pointer // in itab). midx := dot.Offset / int64(types.PtrSize) - r.Add = ifaceMethodOffset(ityp, midx) + r.Add = reflectdata.InterfaceMethodOffset(ityp, midx) r.Type = objabi.R_USEIFACEMETHOD } @@ -2095,7 +2096,7 @@ func walkprint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { func callnew(t *types.Type) ir.Node { types.CalcSize(t) - n := ir.NewUnaryExpr(base.Pos, ir.ONEWOBJ, typename(t)) + n := ir.NewUnaryExpr(base.Pos, ir.ONEWOBJ, reflectdata.TypePtr(t)) n.SetType(types.NewPtr(t)) n.SetTypecheck(1) n.MarkNonNil() @@ -2589,7 +2590,7 @@ func mapfast(t *types.Type) int { if t.Elem().Width > 128 { return mapslow } - switch algtype(t.Key()) { + switch reflectdata.AlgType(t.Key()) { case types.AMEM32: if !t.Key().HasPointers() { return mapfast32 @@ -2733,7 +2734,7 @@ func appendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { fn = typecheck.SubstArgTypes(fn, elemtype, elemtype) // s = growslice(T, s, n) - nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn))} + nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), reflectdata.TypePtr(elemtype), s, nn))} nodes.Append(nif) // s = s[:n] @@ -2756,7 +2757,7 @@ func appendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { fn = typecheck.SubstArgTypes(fn, l1.Type().Elem(), l2.Type().Elem()) ptr1, len1 := backingArrayPtrLen(cheapexpr(slice, &nodes)) ptr2, len2 := backingArrayPtrLen(l2) - ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, typename(elemtype), ptr1, len1, ptr2, len2) + ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, reflectdata.TypePtr(elemtype), ptr1, len1, ptr2, len2) } else if base.Flag.Cfg.Instrumenting && !base.Flag.CompilingRuntime { // rely on runtime to instrument: // copy(s[len(l1):], l2) @@ -2903,7 +2904,7 @@ func extendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { fn = typecheck.SubstArgTypes(fn, elemtype, elemtype) // s = growslice(T, s, n) - nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn))} + nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), reflectdata.TypePtr(elemtype), s, nn))} nodes = append(nodes, nif) // s = s[:n] @@ -3025,7 +3026,7 @@ func walkappend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node { fn := typecheck.LookupRuntime("growslice") // growslice(, old []T, mincap int) (ret []T) fn = typecheck.SubstArgTypes(fn, ns.Type().Elem(), ns.Type().Elem()) - nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, ns, mkcall1(fn, ns.Type(), nif.PtrInit(), typename(ns.Type().Elem()), ns, + nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, ns, mkcall1(fn, ns.Type(), nif.PtrInit(), reflectdata.TypePtr(ns.Type().Elem()), ns, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns), na)))} l = append(l, nif) @@ -3073,7 +3074,7 @@ func copyany(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node { ptrL, lenL := backingArrayPtrLen(n.X) n.Y = cheapexpr(n.Y, init) ptrR, lenR := backingArrayPtrLen(n.Y) - return mkcall1(fn, n.Type(), init, typename(n.X.Type().Elem()), ptrL, lenL, ptrR, lenR) + return mkcall1(fn, n.Type(), init, reflectdata.TypePtr(n.X.Type().Elem()), ptrL, lenL, ptrR, lenR) } if runtimecall { @@ -3146,7 +3147,7 @@ func eqfor(t *types.Type) (n ir.Node, needsize bool) { n = typecheck.SubstArgTypes(n, t, t) return n, true case types.ASPECIAL: - sym := typesymprefix(".eq", t) + sym := reflectdata.TypeSymPrefix(".eq", t) n := typecheck.NewName(sym) ir.MarkFunc(n) n.SetType(typecheck.NewFuncType(nil, []*ir.Field{ @@ -3200,7 +3201,7 @@ func walkcompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { // l.tab != nil && l.tab._type == type(r) var eqtype ir.Node tab := ir.NewUnaryExpr(base.Pos, ir.OITAB, l) - rtyp := typename(r.Type()) + rtyp := reflectdata.TypePtr(r.Type()) if l.Type().IsEmptyInterface() { tab.SetType(types.NewPtr(types.Types[types.TUINT8])) tab.SetTypecheck(1) @@ -3424,7 +3425,7 @@ func tracecmpArg(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node { func walkcompareInterface(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { n.Y = cheapexpr(n.Y, init) n.X = cheapexpr(n.X, init) - eqtab, eqdata := eqinterface(n.X, n.Y) + eqtab, eqdata := reflectdata.EqInterface(n.X, n.Y) var cmp ir.Node if n.Op() == ir.OEQ { cmp = ir.NewLogicalExpr(base.Pos, ir.OANDAND, eqtab, eqdata) @@ -3538,7 +3539,7 @@ func walkcompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { // prepare for rewrite below n.X = cheapexpr(n.X, init) n.Y = cheapexpr(n.Y, init) - eqlen, eqmem := eqstring(n.X, n.Y) + eqlen, eqmem := reflectdata.EqString(n.X, n.Y) // quick check of len before full compare for == or !=. // memequal then tests equality up to length len. if n.Op() == ir.OEQ { @@ -3728,7 +3729,7 @@ func usefield(n *ir.SelectorExpr) { base.Errorf("tracked field must be exported (upper case)") } - sym := tracksym(outer, field) + sym := reflectdata.TrackSym(outer, field) if ir.CurFunc.FieldTrack == nil { ir.CurFunc.FieldTrack = make(map[*types.Sym]struct{}) } @@ -3946,7 +3947,7 @@ func walkCheckPtrAlignment(n *ir.ConvExpr, init *ir.Nodes, count ir.Node) ir.Nod } n.X = cheapexpr(n.X, init) - init.Append(mkcall("checkptrAlignment", nil, init, typecheck.ConvNop(n.X, types.Types[types.TUNSAFEPTR]), typename(elem), typecheck.Conv(count, types.Types[types.TUINTPTR]))) + init.Append(mkcall("checkptrAlignment", nil, init, typecheck.ConvNop(n.X, types.Types[types.TUNSAFEPTR]), reflectdata.TypePtr(elem), typecheck.Conv(count, types.Types[types.TUINTPTR]))) return n } diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/reflectdata/alg.go similarity index 93% rename from src/cmd/compile/internal/gc/alg.go rename to src/cmd/compile/internal/reflectdata/alg.go index 4fc8cf04eff5c..8391486e50ebe 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/reflectdata/alg.go @@ -2,38 +2,39 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package reflectdata import ( + "fmt" + "sort" + "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/objw" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/obj" - "fmt" - "sort" ) -// IsRegularMemory reports whether t can be compared/hashed as regular memory. -func IsRegularMemory(t *types.Type) bool { +// isRegularMemory reports whether t can be compared/hashed as regular memory. +func isRegularMemory(t *types.Type) bool { a, _ := types.AlgType(t) return a == types.AMEM } -// EqCanPanic reports whether == on type t could panic (has an interface somewhere). +// eqCanPanic reports whether == on type t could panic (has an interface somewhere). // t must be comparable. -func EqCanPanic(t *types.Type) bool { +func eqCanPanic(t *types.Type) bool { switch t.Kind() { default: return false case types.TINTER: return true case types.TARRAY: - return EqCanPanic(t.Elem()) + return eqCanPanic(t.Elem()) case types.TSTRUCT: for _, f := range t.FieldSlice() { - if !f.Sym.IsBlank() && EqCanPanic(f.Type) { + if !f.Sym.IsBlank() && eqCanPanic(f.Type) { return true } } @@ -41,9 +42,9 @@ func EqCanPanic(t *types.Type) bool { } } -// algtype is like algtype1, except it returns the fixed-width AMEMxx variants +// AlgType is like algtype1, except it returns the fixed-width AMEMxx variants // instead of the general AMEM kind when possible. -func algtype(t *types.Type) types.AlgKind { +func AlgType(t *types.Type) types.AlgKind { a, _ := types.AlgType(t) if a == types.AMEM { switch t.Width { @@ -69,7 +70,7 @@ func algtype(t *types.Type) types.AlgKind { // the hash of a value of type t. // Note: the generated function must match runtime.typehash exactly. func genhash(t *types.Type) *obj.LSym { - switch algtype(t) { + switch AlgType(t) { default: // genhash is only called for types that have equality base.Fatalf("genhash %v", t) @@ -119,7 +120,7 @@ func genhash(t *types.Type) *obj.LSym { break } - closure := typesymprefix(".hashfunc", t).Linksym() + closure := TypeSymPrefix(".hashfunc", t).Linksym() if len(closure.P) > 0 { // already generated return closure } @@ -139,7 +140,7 @@ func genhash(t *types.Type) *obj.LSym { } } - sym := typesymprefix(".hash", t) + sym := TypeSymPrefix(".hash", t) if base.Flag.LowerR != 0 { fmt.Printf("genhash %v %v %v\n", closure, sym, t) } @@ -199,7 +200,7 @@ func genhash(t *types.Type) *obj.LSym { } // Hash non-memory fields with appropriate hash function. - if !IsRegularMemory(f.Type) { + if !isRegularMemory(f.Type) { hashel := hashfor(f.Type) call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil) nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym) // TODO: fields from other packages? @@ -283,7 +284,7 @@ func hashfor(t *types.Type) ir.Node { default: // Note: the caller of hashfor ensured that this symbol // exists and has a body by calling genhash for t. - sym = typesymprefix(".hash", t) + sym = TypeSymPrefix(".hash", t) } n := typecheck.NewName(sym) @@ -312,7 +313,7 @@ func sysClosure(name string) *obj.LSym { // geneq returns a symbol which is the closure used to compute // equality for two objects of type t. func geneq(t *types.Type) *obj.LSym { - switch algtype(t) { + switch AlgType(t) { case types.ANOEQ: // The runtime will panic if it tries to compare // a type with a nil equality function. @@ -362,11 +363,11 @@ func geneq(t *types.Type) *obj.LSym { break } - closure := typesymprefix(".eqfunc", t).Linksym() + closure := TypeSymPrefix(".eqfunc", t).Linksym() if len(closure.P) > 0 { // already generated return closure } - sym := typesymprefix(".eq", t) + sym := TypeSymPrefix(".eq", t) if base.Flag.LowerR != 0 { fmt.Printf("geneq %v\n", t) } @@ -476,12 +477,12 @@ func geneq(t *types.Type) *obj.LSym { // TODO: when the array size is small, unroll the length match checks. checkAll(3, false, func(pi, qi ir.Node) ir.Node { // Compare lengths. - eqlen, _ := eqstring(pi, qi) + eqlen, _ := EqString(pi, qi) return eqlen }) checkAll(1, true, func(pi, qi ir.Node) ir.Node { // Compare contents. - _, eqmem := eqstring(pi, qi) + _, eqmem := EqString(pi, qi) return eqmem }) case types.TFLOAT32, types.TFLOAT64: @@ -520,8 +521,8 @@ func geneq(t *types.Type) *obj.LSym { } // Compare non-memory fields with field equality. - if !IsRegularMemory(f.Type) { - if EqCanPanic(f.Type) { + if !isRegularMemory(f.Type) { + if eqCanPanic(f.Type) { // Enforce ordering by starting a new set of reorderable conditions. conds = append(conds, []ir.Node{}) } @@ -529,13 +530,13 @@ func geneq(t *types.Type) *obj.LSym { q := ir.NewSelectorExpr(base.Pos, ir.OXDOT, nq, f.Sym) switch { case f.Type.IsString(): - eqlen, eqmem := eqstring(p, q) + eqlen, eqmem := EqString(p, q) and(eqlen) and(eqmem) default: and(ir.NewBinaryExpr(base.Pos, ir.OEQ, p, q)) } - if EqCanPanic(f.Type) { + if eqCanPanic(f.Type) { // Also enforce ordering after something that can panic. conds = append(conds, []ir.Node{}) } @@ -597,7 +598,7 @@ func geneq(t *types.Type) *obj.LSym { // return (or goto ret) fn.Body.Append(ir.NewLabelStmt(base.Pos, neq)) fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(false))) - if EqCanPanic(t) || anyCall(fn) { + if eqCanPanic(t) || anyCall(fn) { // Epilogue is large, so share it with the equal case. fn.Body.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, ret)) } else { @@ -655,13 +656,13 @@ func eqfield(p ir.Node, q ir.Node, field *types.Sym) ir.Node { return ne } -// eqstring returns the nodes +// EqString returns the nodes // len(s) == len(t) // and // memequal(s.ptr, t.ptr, len(s)) // which can be used to construct string equality comparison. // eqlen must be evaluated before eqmem, and shortcircuiting is required. -func eqstring(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) { +func EqString(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) { s = typecheck.Conv(s, types.Types[types.TSTRING]) t = typecheck.Conv(t, types.Types[types.TSTRING]) sptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, s) @@ -680,13 +681,13 @@ func eqstring(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) { return cmp, call } -// eqinterface returns the nodes +// EqInterface returns the nodes // s.tab == t.tab (or s.typ == t.typ, as appropriate) // and // ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate) // which can be used to construct interface equality comparison. // eqtab must be evaluated before eqdata, and shortcircuiting is required. -func eqinterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) { +func EqInterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) { if !types.Identical(s.Type(), t.Type()) { base.Fatalf("eqinterface %v %v", s.Type(), t.Type()) } @@ -764,9 +765,24 @@ func memrun(t *types.Type, start int) (size int64, next int) { break } // Also, stop before a blank or non-memory field. - if f := t.Field(next); f.Sym.IsBlank() || !IsRegularMemory(f.Type) { + if f := t.Field(next); f.Sym.IsBlank() || !isRegularMemory(f.Type) { break } } return t.Field(next-1).End() - t.Field(start).Offset, next } + +func hashmem(t *types.Type) ir.Node { + sym := ir.Pkgs.Runtime.Lookup("memhash") + + n := typecheck.NewName(sym) + ir.MarkFunc(n) + n.SetType(typecheck.NewFuncType(nil, []*ir.Field{ + ir.NewField(base.Pos, nil, nil, types.NewPtr(t)), + ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]), + ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]), + }, []*ir.Field{ + ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]), + })) + return n +} diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go similarity index 84% rename from src/cmd/compile/internal/gc/reflect.go rename to src/cmd/compile/internal/reflectdata/reflect.go index 42f441a44aaf2..a5e2fb407aaf3 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/reflectdata/reflect.go @@ -2,11 +2,19 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package reflectdata import ( + "fmt" + "os" + "sort" + "strings" + "sync" + "cmd/compile/internal/base" "cmd/compile/internal/bitvec" + "cmd/compile/internal/escape" + "cmd/compile/internal/inline" "cmd/compile/internal/ir" "cmd/compile/internal/liveness" "cmd/compile/internal/objw" @@ -16,11 +24,6 @@ import ( "cmd/internal/obj" "cmd/internal/objabi" "cmd/internal/src" - "fmt" - "os" - "sort" - "strings" - "sync" ) type itabEntry struct { @@ -52,7 +55,7 @@ var ( ptabs []ptabEntry ) -type Sig struct { +type typeSig struct { name *types.Sym isym *types.Sym tsym *types.Sym @@ -87,8 +90,8 @@ func makefield(name string, t *types.Type) *types.Field { return types.NewField(src.NoXPos, sym, t) } -// bmap makes the map bucket type given the type of the map. -func bmap(t *types.Type) *types.Type { +// MapBucketType makes the map bucket type given the type of the map. +func MapBucketType(t *types.Type) *types.Type { if t.MapType().Bucket != nil { return t.MapType().Bucket } @@ -194,14 +197,14 @@ func bmap(t *types.Type) *types.Type { return bucket } -// hmap builds a type representing a Hmap structure for the given map type. +// MapType builds a type representing a Hmap structure for the given map type. // Make sure this stays in sync with runtime/map.go. -func hmap(t *types.Type) *types.Type { +func MapType(t *types.Type) *types.Type { if t.MapType().Hmap != nil { return t.MapType().Hmap } - bmap := bmap(t) + bmap := MapBucketType(t) // build a struct: // type hmap struct { @@ -243,15 +246,15 @@ func hmap(t *types.Type) *types.Type { return hmap } -// hiter builds a type representing an Hiter structure for the given map type. +// MapIterType builds a type representing an Hiter structure for the given map type. // Make sure this stays in sync with runtime/map.go. -func hiter(t *types.Type) *types.Type { +func MapIterType(t *types.Type) *types.Type { if t.MapType().Hiter != nil { return t.MapType().Hiter } - hmap := hmap(t) - bmap := bmap(t) + hmap := MapType(t) + bmap := MapBucketType(t) // build a struct: // type hiter struct { @@ -302,50 +305,9 @@ func hiter(t *types.Type) *types.Type { return hiter } -// deferstruct makes a runtime._defer structure, with additional space for -// stksize bytes of args. -func deferstruct(stksize int64) *types.Type { - makefield := func(name string, typ *types.Type) *types.Field { - // Unlike the global makefield function, this one needs to set Pkg - // because these types might be compared (in SSA CSE sorting). - // TODO: unify this makefield and the global one above. - sym := &types.Sym{Name: name, Pkg: types.LocalPkg} - return types.NewField(src.NoXPos, sym, typ) - } - argtype := types.NewArray(types.Types[types.TUINT8], stksize) - argtype.Width = stksize - argtype.Align = 1 - // These fields must match the ones in runtime/runtime2.go:_defer and - // cmd/compile/internal/gc/ssa.go:(*state).call. - fields := []*types.Field{ - makefield("siz", types.Types[types.TUINT32]), - makefield("started", types.Types[types.TBOOL]), - makefield("heap", types.Types[types.TBOOL]), - makefield("openDefer", types.Types[types.TBOOL]), - makefield("sp", types.Types[types.TUINTPTR]), - makefield("pc", types.Types[types.TUINTPTR]), - // Note: the types here don't really matter. Defer structures - // are always scanned explicitly during stack copying and GC, - // so we make them uintptr type even though they are real pointers. - makefield("fn", types.Types[types.TUINTPTR]), - makefield("_panic", types.Types[types.TUINTPTR]), - makefield("link", types.Types[types.TUINTPTR]), - makefield("framepc", types.Types[types.TUINTPTR]), - makefield("varp", types.Types[types.TUINTPTR]), - makefield("fd", types.Types[types.TUINTPTR]), - makefield("args", argtype), - } - - // build struct holding the above fields - s := types.NewStruct(types.NoPkg, fields) - s.SetNoalg(true) - types.CalcStructSize(s) - return s -} - // methods returns the methods of the non-interface type t, sorted by name. // Generates stub functions as needed. -func methods(t *types.Type) []*Sig { +func methods(t *types.Type) []*typeSig { // method type mt := types.ReceiverBaseType(t) @@ -363,7 +325,7 @@ func methods(t *types.Type) []*Sig { // make list of methods for t, // generating code if necessary. - var ms []*Sig + var ms []*typeSig for _, f := range mt.AllMethods().Slice() { if !f.IsMethod() { base.Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f) @@ -388,7 +350,7 @@ func methods(t *types.Type) []*Sig { continue } - sig := &Sig{ + sig := &typeSig{ name: method, isym: ir.MethodSym(it, method), tsym: ir.MethodSym(t, method), @@ -418,8 +380,8 @@ func methods(t *types.Type) []*Sig { } // imethods returns the methods of the interface type t, sorted by name. -func imethods(t *types.Type) []*Sig { - var methods []*Sig +func imethods(t *types.Type) []*typeSig { + var methods []*typeSig for _, f := range t.Fields().Slice() { if f.Type.Kind() != types.TFUNC || f.Sym == nil { continue @@ -434,7 +396,7 @@ func imethods(t *types.Type) []*Sig { } } - sig := &Sig{ + sig := &typeSig{ name: f.Sym, mtype: f.Type, type_: typecheck.NewMethodType(f.Type, nil), @@ -622,7 +584,7 @@ func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int { } for _, a := range m { - dtypesym(a.type_) + WriteType(a.type_) } ot = dgopkgpathOff(lsym, ot, typePkg(t)) @@ -673,7 +635,7 @@ func dextratypeData(lsym *obj.LSym, ot int, t *types.Type) int { nsym := dname(a.name.Name, "", pkg, exported) ot = objw.SymPtrOff(lsym, ot, nsym) - ot = dmethodptrOff(lsym, ot, dtypesym(a.mtype)) + ot = dmethodptrOff(lsym, ot, WriteType(a.mtype)) ot = dmethodptrOff(lsym, ot, a.isym.Linksym()) ot = dmethodptrOff(lsym, ot, a.tsym.Linksym()) } @@ -750,7 +712,7 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int { if t.Sym() != nil || methods(tptr) != nil { sptrWeak = false } - sptr = dtypesym(tptr) + sptr = WriteType(tptr) } gcsym, useGCProg, ptrdata := dgcsym(t) @@ -782,7 +744,7 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int { if t.Sym() != nil && t.Sym().Name != "" { tflag |= tflagNamed } - if IsRegularMemory(t) { + if isRegularMemory(t) { tflag |= tflagRegularMemory } @@ -848,20 +810,20 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int { return ot } -// tracksym returns the symbol for tracking use of field/method f, assumed +// TrackSym returns the symbol for tracking use of field/method f, assumed // to be a member of struct/interface type t. -func tracksym(t *types.Type, f *types.Field) *types.Sym { +func TrackSym(t *types.Type, f *types.Field) *types.Sym { return ir.Pkgs.Track.Lookup(t.ShortString() + "." + f.Sym.Name) } -func typesymprefix(prefix string, t *types.Type) *types.Sym { +func TypeSymPrefix(prefix string, t *types.Type) *types.Sym { p := prefix + "." + t.ShortString() s := types.TypeSymLookup(p) // This function is for looking up type-related generated functions // (e.g. eq and hash). Make sure they are indeed generated. signatmu.Lock() - addsignat(t) + NeedRuntimeType(t) signatmu.Unlock() //print("algsym: %s -> %+S\n", p, s); @@ -869,19 +831,19 @@ func typesymprefix(prefix string, t *types.Type) *types.Sym { return s } -func typenamesym(t *types.Type) *types.Sym { +func TypeSym(t *types.Type) *types.Sym { if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() { base.Fatalf("typenamesym %v", t) } s := types.TypeSym(t) signatmu.Lock() - addsignat(t) + NeedRuntimeType(t) signatmu.Unlock() return s } -func typename(t *types.Type) *ir.AddrExpr { - s := typenamesym(t) +func TypePtr(t *types.Type) *ir.AddrExpr { + s := TypeSym(t) if s.Def == nil { n := ir.NewNameAt(src.NoXPos, s) n.SetType(types.Types[types.TUINT8]) @@ -896,7 +858,7 @@ func typename(t *types.Type) *ir.AddrExpr { return n } -func itabname(t, itype *types.Type) *ir.AddrExpr { +func ITabAddr(t, itype *types.Type) *ir.AddrExpr { if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() { base.Fatalf("itabname(%v, %v)", t, itype) } @@ -978,7 +940,7 @@ func formalType(t *types.Type) *types.Type { return t } -func dtypesym(t *types.Type) *obj.LSym { +func WriteType(t *types.Type) *obj.LSym { t = formalType(t) if t.IsUntyped() { base.Fatalf("dtypesym %v", t) @@ -1028,9 +990,9 @@ func dtypesym(t *types.Type) *obj.LSym { case types.TARRAY: // ../../../../runtime/type.go:/arrayType - s1 := dtypesym(t.Elem()) + s1 := WriteType(t.Elem()) t2 := types.NewSlice(t.Elem()) - s2 := dtypesym(t2) + s2 := WriteType(t2) ot = dcommontype(lsym, t) ot = objw.SymPtr(lsym, ot, s1, 0) ot = objw.SymPtr(lsym, ot, s2, 0) @@ -1039,14 +1001,14 @@ func dtypesym(t *types.Type) *obj.LSym { case types.TSLICE: // ../../../../runtime/type.go:/sliceType - s1 := dtypesym(t.Elem()) + s1 := WriteType(t.Elem()) ot = dcommontype(lsym, t) ot = objw.SymPtr(lsym, ot, s1, 0) ot = dextratype(lsym, ot, t, 0) case types.TCHAN: // ../../../../runtime/type.go:/chanType - s1 := dtypesym(t.Elem()) + s1 := WriteType(t.Elem()) ot = dcommontype(lsym, t) ot = objw.SymPtr(lsym, ot, s1, 0) ot = objw.Uintptr(lsym, ot, uint64(t.ChanDir())) @@ -1054,15 +1016,15 @@ func dtypesym(t *types.Type) *obj.LSym { case types.TFUNC: for _, t1 := range t.Recvs().Fields().Slice() { - dtypesym(t1.Type) + WriteType(t1.Type) } isddd := false for _, t1 := range t.Params().Fields().Slice() { isddd = t1.IsDDD() - dtypesym(t1.Type) + WriteType(t1.Type) } for _, t1 := range t.Results().Fields().Slice() { - dtypesym(t1.Type) + WriteType(t1.Type) } ot = dcommontype(lsym, t) @@ -1082,20 +1044,20 @@ func dtypesym(t *types.Type) *obj.LSym { // Array of rtype pointers follows funcType. for _, t1 := range t.Recvs().Fields().Slice() { - ot = objw.SymPtr(lsym, ot, dtypesym(t1.Type), 0) + ot = objw.SymPtr(lsym, ot, WriteType(t1.Type), 0) } for _, t1 := range t.Params().Fields().Slice() { - ot = objw.SymPtr(lsym, ot, dtypesym(t1.Type), 0) + ot = objw.SymPtr(lsym, ot, WriteType(t1.Type), 0) } for _, t1 := range t.Results().Fields().Slice() { - ot = objw.SymPtr(lsym, ot, dtypesym(t1.Type), 0) + ot = objw.SymPtr(lsym, ot, WriteType(t1.Type), 0) } case types.TINTER: m := imethods(t) n := len(m) for _, a := range m { - dtypesym(a.type_) + WriteType(a.type_) } // ../../../../runtime/type.go:/interfaceType @@ -1123,14 +1085,14 @@ func dtypesym(t *types.Type) *obj.LSym { nsym := dname(a.name.Name, "", pkg, exported) ot = objw.SymPtrOff(lsym, ot, nsym) - ot = objw.SymPtrOff(lsym, ot, dtypesym(a.type_)) + ot = objw.SymPtrOff(lsym, ot, WriteType(a.type_)) } // ../../../../runtime/type.go:/mapType case types.TMAP: - s1 := dtypesym(t.Key()) - s2 := dtypesym(t.Elem()) - s3 := dtypesym(bmap(t)) + s1 := WriteType(t.Key()) + s2 := WriteType(t.Elem()) + s3 := WriteType(MapBucketType(t)) hasher := genhash(t.Key()) ot = dcommontype(lsym, t) @@ -1154,7 +1116,7 @@ func dtypesym(t *types.Type) *obj.LSym { } else { ot = objw.Uint8(lsym, ot, uint8(t.Elem().Width)) } - ot = objw.Uint16(lsym, ot, uint16(bmap(t).Width)) + ot = objw.Uint16(lsym, ot, uint16(MapBucketType(t).Width)) if types.IsReflexive(t.Key()) { flags |= 4 // reflexive key } @@ -1177,7 +1139,7 @@ func dtypesym(t *types.Type) *obj.LSym { } // ../../../../runtime/type.go:/ptrType - s1 := dtypesym(t.Elem()) + s1 := WriteType(t.Elem()) ot = dcommontype(lsym, t) ot = objw.SymPtr(lsym, ot, s1, 0) @@ -1188,7 +1150,7 @@ func dtypesym(t *types.Type) *obj.LSym { case types.TSTRUCT: fields := t.Fields().Slice() for _, t1 := range fields { - dtypesym(t1.Type) + WriteType(t1.Type) } // All non-exported struct field names within a struct @@ -1216,7 +1178,7 @@ func dtypesym(t *types.Type) *obj.LSym { for _, f := range fields { // ../../../../runtime/type.go:/structField ot = dnameField(lsym, ot, spkg, f) - ot = objw.SymPtr(lsym, ot, dtypesym(f.Type), 0) + ot = objw.SymPtr(lsym, ot, WriteType(f.Type), 0) offsetAnon := uint64(f.Offset) << 1 if offsetAnon>>1 != uint64(f.Offset) { base.Fatalf("%v: bad field offset for %s", t, f.Sym.Name) @@ -1257,9 +1219,9 @@ func dtypesym(t *types.Type) *obj.LSym { return lsym } -// ifaceMethodOffset returns the offset of the i-th method in the interface +// InterfaceMethodOffset returns the offset of the i-th method in the interface // type descriptor, ityp. -func ifaceMethodOffset(ityp *types.Type, i int64) int64 { +func InterfaceMethodOffset(ityp *types.Type, i int64) int64 { // interface type descriptor layout is struct { // _type // commonSize // pkgpath // 1 word @@ -1273,7 +1235,7 @@ func ifaceMethodOffset(ityp *types.Type, i int64) int64 { // for each itabEntry, gather the methods on // the concrete type that implement the interface -func peekitabs() { +func CompileITabs() { for i := range itabs { tab := &itabs[i] methods := genfun(tab.t, tab.itype) @@ -1319,11 +1281,11 @@ func genfun(t, it *types.Type) []*obj.LSym { return out } -// itabsym uses the information gathered in +// ITabSym uses the information gathered in // peekitabs to de-virtualize interface methods. // Since this is called by the SSA backend, it shouldn't // generate additional Nodes, Syms, etc. -func itabsym(it *obj.LSym, offset int64) *obj.LSym { +func ITabSym(it *obj.LSym, offset int64) *obj.LSym { var syms []*obj.LSym if it == nil { return nil @@ -1348,24 +1310,15 @@ func itabsym(it *obj.LSym, offset int64) *obj.LSym { return syms[methodnum] } -// addsignat ensures that a runtime type descriptor is emitted for t. -func addsignat(t *types.Type) { +// NeedRuntimeType ensures that a runtime type descriptor is emitted for t. +func NeedRuntimeType(t *types.Type) { if _, ok := signatset[t]; !ok { signatset[t] = struct{}{} signatslice = append(signatslice, t) } } -func addsignats(dcls []ir.Node) { - // copy types from dcl list to signatset - for _, n := range dcls { - if n.Op() == ir.OTYPE { - addsignat(n.Type()) - } - } -} - -func dumpsignats() { +func WriteRuntimeTypes() { // Process signatset. Use a loop, as dtypesym adds // entries to signatset while it is being processed. signats := make([]typeAndStr, len(signatslice)) @@ -1380,15 +1333,15 @@ func dumpsignats() { sort.Sort(typesByString(signats)) for _, ts := range signats { t := ts.t - dtypesym(t) + WriteType(t) if t.Sym() != nil { - dtypesym(types.NewPtr(t)) + WriteType(types.NewPtr(t)) } } } } -func dumptabs() { +func WriteTabs() { // process itabs for _, i := range itabs { // dump empty itab symbol into i.sym @@ -1399,8 +1352,8 @@ func dumptabs() { // _ [4]byte // fun [1]uintptr // variable sized // } - o := objw.SymPtr(i.lsym, 0, dtypesym(i.itype), 0) - o = objw.SymPtr(i.lsym, o, dtypesym(i.t), 0) + o := objw.SymPtr(i.lsym, 0, WriteType(i.itype), 0) + o = objw.SymPtr(i.lsym, o, WriteType(i.t), 0) o = objw.Uint32(i.lsym, o, types.TypeHash(i.t)) // copy of type hash o += 4 // skip unused field for _, fn := range genfun(i.t, i.itype) { @@ -1423,7 +1376,7 @@ func dumptabs() { // typ typeOff // pointer to symbol // } nsym := dname(p.s.Name, "", nil, true) - tsym := dtypesym(p.t) + tsym := WriteType(p.t) ot = objw.SymPtrOff(s, ot, nsym) ot = objw.SymPtrOff(s, ot, tsym) // Plugin exports symbols as interfaces. Mark their types @@ -1441,14 +1394,14 @@ func dumptabs() { } } -func dumpimportstrings() { +func WriteImportStrings() { // generate import strings for imported packages for _, p := range types.ImportedPkgList() { dimportpath(p) } } -func dumpbasictypes() { +func WriteBasicTypes() { // do basic types if compiling package runtime. // they have to be in at least one package, // and runtime is always loaded implicitly, @@ -1457,16 +1410,16 @@ func dumpbasictypes() { // but using runtime means fewer copies in object files. if base.Ctxt.Pkgpath == "runtime" { for i := types.Kind(1); i <= types.TBOOL; i++ { - dtypesym(types.NewPtr(types.Types[i])) + WriteType(types.NewPtr(types.Types[i])) } - dtypesym(types.NewPtr(types.Types[types.TSTRING])) - dtypesym(types.NewPtr(types.Types[types.TUNSAFEPTR])) + WriteType(types.NewPtr(types.Types[types.TSTRING])) + WriteType(types.NewPtr(types.Types[types.TUNSAFEPTR])) // emit type structs for error and func(error) string. // The latter is the type of an auto-generated wrapper. - dtypesym(types.NewPtr(types.ErrorType)) + WriteType(types.NewPtr(types.ErrorType)) - dtypesym(typecheck.NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, types.ErrorType)}, []*ir.Field{ir.NewField(base.Pos, nil, nil, types.Types[types.TSTRING])})) + WriteType(typecheck.NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, types.ErrorType)}, []*ir.Field{ir.NewField(base.Pos, nil, nil, types.Types[types.TSTRING])})) // add paths for runtime and main, which 6l imports implicitly. dimportpath(ir.Pkgs.Runtime) @@ -1611,8 +1564,8 @@ func dgcprog(t *types.Type) (*obj.LSym, int64) { if t.Width == types.BADWIDTH { base.Fatalf("dgcprog: %v badwidth", t) } - lsym := typesymprefix(".gcprog", t).Linksym() - var p GCProg + lsym := TypeSymPrefix(".gcprog", t).Linksym() + var p gcProg p.init(lsym) p.emit(t, 0) offset := p.w.BitIndex() * int64(types.PtrSize) @@ -1623,13 +1576,13 @@ func dgcprog(t *types.Type) (*obj.LSym, int64) { return lsym, offset } -type GCProg struct { +type gcProg struct { lsym *obj.LSym symoff int w gcprog.Writer } -func (p *GCProg) init(lsym *obj.LSym) { +func (p *gcProg) init(lsym *obj.LSym) { p.lsym = lsym p.symoff = 4 // first 4 bytes hold program length p.w.Init(p.writeByte) @@ -1639,11 +1592,11 @@ func (p *GCProg) init(lsym *obj.LSym) { } } -func (p *GCProg) writeByte(x byte) { +func (p *gcProg) writeByte(x byte) { p.symoff = objw.Uint8(p.lsym, p.symoff, x) } -func (p *GCProg) end() { +func (p *gcProg) end() { p.w.End() objw.Uint32(p.lsym, 0, uint32(p.symoff-4)) objw.Global(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL) @@ -1652,7 +1605,7 @@ func (p *GCProg) end() { } } -func (p *GCProg) emit(t *types.Type, offset int64) { +func (p *gcProg) emit(t *types.Type, offset int64) { types.CalcSize(t) if !t.HasPointers() { return @@ -1707,14 +1660,14 @@ func (p *GCProg) emit(t *types.Type, offset int64) { } } -// zeroaddr returns the address of a symbol with at least +// ZeroAddr returns the address of a symbol with at least // size bytes of zeros. -func zeroaddr(size int64) ir.Node { +func ZeroAddr(size int64) ir.Node { if size >= 1<<31 { base.Fatalf("map elem too big %d", size) } - if zerosize < size { - zerosize = size + if ZeroSize < size { + ZeroSize = size } s := ir.Pkgs.Map.Lookup("zero") if s.Def == nil { @@ -1729,3 +1682,155 @@ func zeroaddr(size int64) ir.Node { z.SetTypecheck(1) return z } + +func CollectPTabs() { + if !base.Ctxt.Flag_dynlink || types.LocalPkg.Name != "main" { + return + } + for _, exportn := range typecheck.Target.Exports { + s := exportn.Sym() + nn := ir.AsNode(s.Def) + if nn == nil { + continue + } + if nn.Op() != ir.ONAME { + continue + } + n := nn.(*ir.Name) + if !types.IsExported(s.Name) { + continue + } + if s.Pkg.Name != "main" { + continue + } + if n.Type().Kind() == types.TFUNC && n.Class_ == ir.PFUNC { + // function + ptabs = append(ptabs, ptabEntry{s: s, t: s.Def.Type()}) + } else { + // variable + ptabs = append(ptabs, ptabEntry{s: s, t: types.NewPtr(s.Def.Type())}) + } + } +} + +// Generate a wrapper function to convert from +// a receiver of type T to a receiver of type U. +// That is, +// +// func (t T) M() { +// ... +// } +// +// already exists; this function generates +// +// func (u U) M() { +// u.M() +// } +// +// where the types T and U are such that u.M() is valid +// and calls the T.M method. +// The resulting function is for use in method tables. +// +// rcvr - U +// method - M func (t T)(), a TFIELD type struct +// newnam - the eventual mangled name of this function +func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { + if false && base.Flag.LowerR != 0 { + fmt.Printf("genwrapper rcvrtype=%v method=%v newnam=%v\n", rcvr, method, newnam) + } + + // Only generate (*T).M wrappers for T.M in T's own package. + if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && + rcvr.Elem().Sym() != nil && rcvr.Elem().Sym().Pkg != types.LocalPkg { + return + } + + // Only generate I.M wrappers for I in I's own package + // but keep doing it for error.Error (was issue #29304). + if rcvr.IsInterface() && rcvr.Sym() != nil && rcvr.Sym().Pkg != types.LocalPkg && rcvr != types.ErrorType { + return + } + + base.Pos = base.AutogeneratedPos + typecheck.DeclContext = ir.PEXTERN + + tfn := ir.NewFuncType(base.Pos, + ir.NewField(base.Pos, typecheck.Lookup(".this"), nil, rcvr), + typecheck.NewFuncParams(method.Type.Params(), true), + typecheck.NewFuncParams(method.Type.Results(), false)) + + fn := typecheck.DeclFunc(newnam, tfn) + fn.SetDupok(true) + + nthis := ir.AsNode(tfn.Type().Recv().Nname) + + methodrcvr := method.Type.Recv().Type + + // generate nil pointer check for better error + if rcvr.IsPtr() && rcvr.Elem() == methodrcvr { + // generating wrapper from *T to T. + n := ir.NewIfStmt(base.Pos, nil, nil, nil) + n.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, nthis, typecheck.NodNil()) + call := ir.NewCallExpr(base.Pos, ir.OCALL, typecheck.LookupRuntime("panicwrap"), nil) + n.Body = []ir.Node{call} + fn.Body.Append(n) + } + + dot := typecheck.AddImplicitDots(ir.NewSelectorExpr(base.Pos, ir.OXDOT, nthis, method.Sym)) + + // generate call + // It's not possible to use a tail call when dynamic linking on ppc64le. The + // bad scenario is when a local call is made to the wrapper: the wrapper will + // call the implementation, which might be in a different module and so set + // the TOC to the appropriate value for that module. But if it returns + // directly to the wrapper's caller, nothing will reset it to the correct + // value for that function. + if !base.Flag.Cfg.Instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !types.IsInterfaceMethod(method.Type) && !(base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) { + // generate tail call: adjust pointer receiver and jump to embedded method. + left := dot.X // skip final .M + if !left.Type().IsPtr() { + left = typecheck.NodAddr(left) + } + as := ir.NewAssignStmt(base.Pos, nthis, typecheck.ConvNop(left, rcvr)) + fn.Body.Append(as) + fn.Body.Append(ir.NewBranchStmt(base.Pos, ir.ORETJMP, ir.MethodSym(methodrcvr, method.Sym))) + } else { + fn.SetWrapper(true) // ignore frame for panic+recover matching + call := ir.NewCallExpr(base.Pos, ir.OCALL, dot, nil) + call.Args.Set(ir.ParamNames(tfn.Type())) + call.IsDDD = tfn.Type().IsVariadic() + if method.Type.NumResults() > 0 { + ret := ir.NewReturnStmt(base.Pos, nil) + ret.Results = []ir.Node{call} + fn.Body.Append(ret) + } else { + fn.Body.Append(call) + } + } + + if false && base.Flag.LowerR != 0 { + ir.DumpList("genwrapper body", fn.Body) + } + + typecheck.FinishFuncBody() + if base.Debug.DclStack != 0 { + types.CheckDclstack() + } + + typecheck.Func(fn) + ir.CurFunc = fn + typecheck.Stmts(fn.Body) + + // Inline calls within (*T).M wrappers. This is safe because we only + // generate those wrappers within the same compilation unit as (T).M. + // TODO(mdempsky): Investigate why we can't enable this more generally. + if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym() != nil { + inline.InlineCalls(fn) + } + escape.Batch([]*ir.Func{fn}, false) + + ir.CurFunc = nil + typecheck.Target.Decls = append(typecheck.Target.Decls, fn) +} + +var ZeroSize int64 From 6c34d2f42077bd7757c942c8d1b466366190b45a Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 23 Dec 2020 00:57:10 -0500 Subject: [PATCH 236/474] [dev.regabi] cmd/compile: split out package ssagen [generated] [git-generate] cd src/cmd/compile/internal/gc rf ' # maxOpenDefers is declared in ssa.go but used only by walk. mv maxOpenDefers walk.go # gc.Arch -> ssagen.Arch # It is not as nice but will do for now. mv Arch ArchInfo mv thearch Arch mv Arch ArchInfo arch.go # Pull dwarf out of pgen.go. mv debuginfo declPos createDwarfVars preInliningDcls \ createSimpleVars createSimpleVar \ createComplexVars createComplexVar \ dwarf.go # Pull high-level compilation out of pgen.go, # leaving only the SSA code. mv compilequeue funccompile compile compilenow \ compileFunctions isInlinableButNotInlined \ initLSym \ compile.go mv BoundsCheckFunc GCWriteBarrierReg ssa.go mv largeStack largeStackFrames CheckLargeStacks pgen.go # All that is left in dcl.go is the nowritebarrierrecCheck mv dcl.go nowb.go # Export API and unexport non-API. mv initssaconfig InitConfig mv isIntrinsicCall IsIntrinsicCall mv ssaDumpInline DumpInline mv initSSATables InitTables mv initSSAEnv InitEnv mv compileSSA Compile mv stackOffset StackOffset mv canSSAType TypeOK mv SSAGenState State mv FwdRefAux fwdRefAux mv cgoSymABIs CgoSymABIs mv readSymABIs ReadSymABIs mv initLSym InitLSym mv useABIWrapGen symabiDefs CgoSymABIs ReadSymABIs InitLSym selectLSym makeABIWrapper setupTextLSym abi.go mv arch.go abi.go nowb.go phi.go pgen.go pgen_test.go ssa.go cmd/compile/internal/ssagen ' rm go.go gsubr.go Change-Id: I47fad6cbf1d1e583fd9139003a08401d7cd048a1 Reviewed-on: https://go-review.googlesource.com/c/go/+/279476 Trust: Russ Cox Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/amd64/galign.go | 4 +- src/cmd/compile/internal/amd64/ssa.go | 80 +-- src/cmd/compile/internal/arm/galign.go | 6 +- src/cmd/compile/internal/arm/ssa.go | 40 +- src/cmd/compile/internal/arm64/galign.go | 6 +- src/cmd/compile/internal/arm64/ssa.go | 44 +- src/cmd/compile/internal/gc/abiutils_test.go | 15 +- src/cmd/compile/internal/gc/compile.go | 177 ++++++ .../compile/internal/gc/{pgen.go => dwarf.go} | 568 +++--------------- src/cmd/compile/internal/gc/main.go | 164 +---- src/cmd/compile/internal/gc/racewalk.go | 3 +- src/cmd/compile/internal/gc/range.go | 3 +- src/cmd/compile/internal/gc/subr.go | 23 +- src/cmd/compile/internal/gc/walk.go | 23 +- src/cmd/compile/internal/mips/galign.go | 6 +- src/cmd/compile/internal/mips/ssa.go | 34 +- src/cmd/compile/internal/mips64/galign.go | 6 +- src/cmd/compile/internal/mips64/ssa.go | 32 +- src/cmd/compile/internal/ppc64/galign.go | 4 +- src/cmd/compile/internal/ppc64/ssa.go | 34 +- src/cmd/compile/internal/riscv64/galign.go | 4 +- src/cmd/compile/internal/riscv64/ssa.go | 36 +- src/cmd/compile/internal/s390x/galign.go | 4 +- src/cmd/compile/internal/s390x/ssa.go | 56 +- .../internal/{gc/gsubr.go => ssagen/abi.go} | 342 +++++++---- .../internal/{gc/go.go => ssagen/arch.go} | 22 +- .../internal/{gc/dcl.go => ssagen/nowb.go} | 5 +- src/cmd/compile/internal/ssagen/pgen.go | 279 +++++++++ .../internal/{gc => ssagen}/pgen_test.go | 9 +- .../compile/internal/{gc => ssagen}/phi.go | 23 +- .../compile/internal/{gc => ssagen}/ssa.go | 154 ++--- src/cmd/compile/internal/wasm/ssa.go | 36 +- src/cmd/compile/internal/x86/galign.go | 4 +- src/cmd/compile/internal/x86/ssa.go | 64 +- src/cmd/compile/main.go | 3 +- 35 files changed, 1167 insertions(+), 1146 deletions(-) create mode 100644 src/cmd/compile/internal/gc/compile.go rename src/cmd/compile/internal/gc/{pgen.go => dwarf.go} (55%) rename src/cmd/compile/internal/{gc/gsubr.go => ssagen/abi.go} (69%) rename src/cmd/compile/internal/{gc/go.go => ssagen/arch.go} (68%) rename src/cmd/compile/internal/{gc/dcl.go => ssagen/nowb.go} (99%) create mode 100644 src/cmd/compile/internal/ssagen/pgen.go rename src/cmd/compile/internal/{gc => ssagen}/pgen_test.go (99%) rename src/cmd/compile/internal/{gc => ssagen}/phi.go (97%) rename src/cmd/compile/internal/{gc => ssagen}/ssa.go (98%) diff --git a/src/cmd/compile/internal/amd64/galign.go b/src/cmd/compile/internal/amd64/galign.go index af58440502e00..ce1c402902f7e 100644 --- a/src/cmd/compile/internal/amd64/galign.go +++ b/src/cmd/compile/internal/amd64/galign.go @@ -5,13 +5,13 @@ package amd64 import ( - "cmd/compile/internal/gc" + "cmd/compile/internal/ssagen" "cmd/internal/obj/x86" ) var leaptr = x86.ALEAQ -func Init(arch *gc.Arch) { +func Init(arch *ssagen.ArchInfo) { arch.LinkArch = &x86.Linkamd64 arch.REGSP = x86.REGSP arch.MAXWIDTH = 1 << 50 diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 0150bd296ae1e..da355c49d1e2a 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -9,17 +9,17 @@ import ( "math" "cmd/compile/internal/base" - "cmd/compile/internal/gc" "cmd/compile/internal/ir" "cmd/compile/internal/logopt" "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/x86" ) // markMoves marks any MOVXconst ops that need to avoid clobbering flags. -func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) { +func ssaMarkMoves(s *ssagen.State, b *ssa.Block) { flive := b.FlagsLiveAtEnd for _, c := range b.ControlValues() { flive = c.Type.IsFlags() || flive @@ -112,7 +112,7 @@ func moveByType(t *types.Type) obj.As { // dest := dest(To) op src(From) // and also returns the created obj.Prog so it // may be further adjusted (offset, scale, etc). -func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog { +func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog { p := s.Prog(op) p.From.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG @@ -166,7 +166,7 @@ func duff(size int64) (int64, int64) { return off, adj } -func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { +func ssaGenValue(s *ssagen.State, v *ssa.Value) { switch v.Op { case ssa.OpAMD64VFMADD231SD: p := s.Prog(v.Op.Asm()) @@ -632,12 +632,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = o } - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) case ssa.OpAMD64LEAQ, ssa.OpAMD64LEAL, ssa.OpAMD64LEAW: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpAMD64CMPQ, ssa.OpAMD64CMPL, ssa.OpAMD64CMPW, ssa.OpAMD64CMPB, @@ -673,7 +673,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Args[1].Reg() case ssa.OpAMD64CMPQconstload, ssa.OpAMD64CMPLconstload, ssa.OpAMD64CMPWconstload, ssa.OpAMD64CMPBconstload: @@ -681,20 +681,20 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() - gc.AddAux2(&p.From, v, sc.Off()) + ssagen.AddAux2(&p.From, v, sc.Off()) p.To.Type = obj.TYPE_CONST p.To.Offset = sc.Val() case ssa.OpAMD64CMPQloadidx8, ssa.OpAMD64CMPQloadidx1, ssa.OpAMD64CMPLloadidx4, ssa.OpAMD64CMPLloadidx1, ssa.OpAMD64CMPWloadidx2, ssa.OpAMD64CMPWloadidx1, ssa.OpAMD64CMPBloadidx1: p := s.Prog(v.Op.Asm()) memIdx(&p.From, v) - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Args[2].Reg() case ssa.OpAMD64CMPQconstloadidx8, ssa.OpAMD64CMPQconstloadidx1, ssa.OpAMD64CMPLconstloadidx4, ssa.OpAMD64CMPLconstloadidx1, ssa.OpAMD64CMPWconstloadidx2, ssa.OpAMD64CMPWconstloadidx1, ssa.OpAMD64CMPBconstloadidx1: sc := v.AuxValAndOff() p := s.Prog(v.Op.Asm()) memIdx(&p.From, v) - gc.AddAux2(&p.From, v, sc.Off()) + ssagen.AddAux2(&p.From, v, sc.Off()) p.To.Type = obj.TYPE_CONST p.To.Offset = sc.Val() case ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst: @@ -734,14 +734,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpAMD64MOVBloadidx1, ssa.OpAMD64MOVWloadidx1, ssa.OpAMD64MOVLloadidx1, ssa.OpAMD64MOVQloadidx1, ssa.OpAMD64MOVSSloadidx1, ssa.OpAMD64MOVSDloadidx1, ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8, ssa.OpAMD64MOVLloadidx8, ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4, ssa.OpAMD64MOVWloadidx2: p := s.Prog(v.Op.Asm()) memIdx(&p.From, v) - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVSSstore, ssa.OpAMD64MOVSDstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore, ssa.OpAMD64MOVOstore, @@ -753,7 +753,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Reg = v.Args[1].Reg() p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - gc.AddAux(&p.To, v) + ssagen.AddAux(&p.To, v) case ssa.OpAMD64MOVBstoreidx1, ssa.OpAMD64MOVWstoreidx1, ssa.OpAMD64MOVLstoreidx1, ssa.OpAMD64MOVQstoreidx1, ssa.OpAMD64MOVSSstoreidx1, ssa.OpAMD64MOVSDstoreidx1, ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8, ssa.OpAMD64MOVLstoreidx8, ssa.OpAMD64MOVSSstoreidx4, ssa.OpAMD64MOVLstoreidx4, ssa.OpAMD64MOVWstoreidx2, ssa.OpAMD64ADDLmodifyidx1, ssa.OpAMD64ADDLmodifyidx4, ssa.OpAMD64ADDLmodifyidx8, ssa.OpAMD64ADDQmodifyidx1, ssa.OpAMD64ADDQmodifyidx8, @@ -765,7 +765,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[2].Reg() memIdx(&p.To, v) - gc.AddAux(&p.To, v) + ssagen.AddAux(&p.To, v) case ssa.OpAMD64ADDQconstmodify, ssa.OpAMD64ADDLconstmodify: sc := v.AuxValAndOff() off := sc.Off() @@ -788,7 +788,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(asm) p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - gc.AddAux2(&p.To, v, off) + ssagen.AddAux2(&p.To, v, off) break } fallthrough @@ -803,7 +803,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Offset = val p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - gc.AddAux2(&p.To, v, off) + ssagen.AddAux2(&p.To, v, off) case ssa.OpAMD64MOVQstoreconst, ssa.OpAMD64MOVLstoreconst, ssa.OpAMD64MOVWstoreconst, ssa.OpAMD64MOVBstoreconst: p := s.Prog(v.Op.Asm()) @@ -812,7 +812,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Offset = sc.Val() p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - gc.AddAux2(&p.To, v, sc.Off()) + ssagen.AddAux2(&p.To, v, sc.Off()) case ssa.OpAMD64MOVQstoreconstidx1, ssa.OpAMD64MOVQstoreconstidx8, ssa.OpAMD64MOVLstoreconstidx1, ssa.OpAMD64MOVLstoreconstidx4, ssa.OpAMD64MOVWstoreconstidx1, ssa.OpAMD64MOVWstoreconstidx2, ssa.OpAMD64MOVBstoreconstidx1, ssa.OpAMD64ADDLconstmodifyidx1, ssa.OpAMD64ADDLconstmodifyidx4, ssa.OpAMD64ADDLconstmodifyidx8, ssa.OpAMD64ADDQconstmodifyidx1, ssa.OpAMD64ADDQconstmodifyidx8, ssa.OpAMD64ANDLconstmodifyidx1, ssa.OpAMD64ANDLconstmodifyidx4, ssa.OpAMD64ANDLconstmodifyidx8, ssa.OpAMD64ANDQconstmodifyidx1, ssa.OpAMD64ANDQconstmodifyidx8, @@ -837,7 +837,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Type = obj.TYPE_NONE } memIdx(&p.To, v) - gc.AddAux2(&p.To, v, sc.Off()) + ssagen.AddAux2(&p.To, v, sc.Off()) case ssa.OpAMD64MOVLQSX, ssa.OpAMD64MOVWQSX, ssa.OpAMD64MOVBQSX, ssa.OpAMD64MOVLQZX, ssa.OpAMD64MOVWQZX, ssa.OpAMD64MOVBQZX, ssa.OpAMD64CVTTSS2SL, ssa.OpAMD64CVTTSD2SL, ssa.OpAMD64CVTTSS2SQ, ssa.OpAMD64CVTTSD2SQ, ssa.OpAMD64CVTSS2SD, ssa.OpAMD64CVTSD2SS: @@ -867,7 +867,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[1].Reg() - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() if v.Reg() != v.Args[0].Reg() { @@ -893,7 +893,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Reg = r p.From.Index = i - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() if v.Reg() != v.Args[0].Reg() { @@ -951,7 +951,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { return } p := s.Prog(loadByType(v.Type)) - gc.AddrAuto(&p.From, v.Args[0]) + ssagen.AddrAuto(&p.From, v.Args[0]) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() @@ -963,16 +963,16 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(storeByType(v.Type)) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[0].Reg() - gc.AddrAuto(&p.To, v) + ssagen.AddrAuto(&p.To, v) case ssa.OpAMD64LoweredHasCPUFeature: p := s.Prog(x86.AMOVBQZX) p.From.Type = obj.TYPE_MEM - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpAMD64LoweredGetClosurePtr: // Closure pointer is DX. - gc.CheckLoweredGetClosurePtr(v) + ssagen.CheckLoweredGetClosurePtr(v) case ssa.OpAMD64LoweredGetG: r := v.Reg() // See the comments in cmd/internal/obj/x86/obj6.go @@ -1029,13 +1029,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN // arg0 is in DI. Set sym to match where regalloc put arg1. - p.To.Sym = gc.GCWriteBarrierReg[v.Args[1].Reg()] + p.To.Sym = ssagen.GCWriteBarrierReg[v.Args[1].Reg()] case ssa.OpAMD64LoweredPanicBoundsA, ssa.OpAMD64LoweredPanicBoundsB, ssa.OpAMD64LoweredPanicBoundsC: p := s.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN - p.To.Sym = gc.BoundsCheckFunc[v.AuxInt] + p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt] s.UseArgs(int64(2 * types.PtrSize)) // space used in callee args area by assembly stubs case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL, @@ -1117,7 +1117,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - gc.AddAux(&p.To, v) + ssagen.AddAux(&p.To, v) case ssa.OpAMD64SETNEF: p := s.Prog(v.Op.Asm()) @@ -1173,7 +1173,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg0() case ssa.OpAMD64XCHGB, ssa.OpAMD64XCHGL, ssa.OpAMD64XCHGQ: @@ -1186,7 +1186,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Reg = r p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[1].Reg() - gc.AddAux(&p.To, v) + ssagen.AddAux(&p.To, v) case ssa.OpAMD64XADDLlock, ssa.OpAMD64XADDQlock: r := v.Reg0() if r != v.Args[0].Reg() { @@ -1198,7 +1198,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Reg = r p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[1].Reg() - gc.AddAux(&p.To, v) + ssagen.AddAux(&p.To, v) case ssa.OpAMD64CMPXCHGLlock, ssa.OpAMD64CMPXCHGQlock: if v.Args[1].Reg() != x86.REG_AX { v.Fatalf("input[1] not in AX %s", v.LongString()) @@ -1209,7 +1209,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Reg = v.Args[2].Reg() p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - gc.AddAux(&p.To, v) + ssagen.AddAux(&p.To, v) p = s.Prog(x86.ASETEQ) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg0() @@ -1220,20 +1220,20 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Reg = v.Args[1].Reg() p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - gc.AddAux(&p.To, v) + ssagen.AddAux(&p.To, v) case ssa.OpClobber: p := s.Prog(x86.AMOVL) p.From.Type = obj.TYPE_CONST p.From.Offset = 0xdeaddead p.To.Type = obj.TYPE_MEM p.To.Reg = x86.REG_SP - gc.AddAux(&p.To, v) + ssagen.AddAux(&p.To, v) p = s.Prog(x86.AMOVL) p.From.Type = obj.TYPE_CONST p.From.Offset = 0xdeaddead p.To.Type = obj.TYPE_MEM p.To.Reg = x86.REG_SP - gc.AddAux(&p.To, v) + ssagen.AddAux(&p.To, v) p.To.Offset += 4 default: v.Fatalf("genValue not implemented: %s", v.LongString()) @@ -1259,22 +1259,22 @@ var blockJump = [...]struct { ssa.BlockAMD64NAN: {x86.AJPS, x86.AJPC}, } -var eqfJumps = [2][2]gc.IndexJump{ +var eqfJumps = [2][2]ssagen.IndexJump{ {{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPS, Index: 1}}, // next == b.Succs[0] {{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPC, Index: 0}}, // next == b.Succs[1] } -var nefJumps = [2][2]gc.IndexJump{ +var nefJumps = [2][2]ssagen.IndexJump{ {{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPC, Index: 1}}, // next == b.Succs[0] {{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPS, Index: 0}}, // next == b.Succs[1] } -func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { +func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { switch b.Kind { case ssa.BlockPlain: if b.Succs[0].Block() != next { p := s.Prog(obj.AJMP) p.To.Type = obj.TYPE_BRANCH - s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) } case ssa.BlockDefer: // defer returns in rax: @@ -1287,11 +1287,11 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { p.To.Reg = x86.REG_AX p = s.Prog(x86.AJNE) p.To.Type = obj.TYPE_BRANCH - s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()}) if b.Succs[0].Block() != next { p := s.Prog(obj.AJMP) p.To.Type = obj.TYPE_BRANCH - s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) } case ssa.BlockExit: case ssa.BlockRet: diff --git a/src/cmd/compile/internal/arm/galign.go b/src/cmd/compile/internal/arm/galign.go index 20e2f43a91c0d..81959ae0abcb3 100644 --- a/src/cmd/compile/internal/arm/galign.go +++ b/src/cmd/compile/internal/arm/galign.go @@ -5,13 +5,13 @@ package arm import ( - "cmd/compile/internal/gc" "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" "cmd/internal/obj/arm" "cmd/internal/objabi" ) -func Init(arch *gc.Arch) { +func Init(arch *ssagen.ArchInfo) { arch.LinkArch = &arm.Linkarm arch.REGSP = arm.REGSP arch.MAXWIDTH = (1 << 32) - 1 @@ -20,7 +20,7 @@ func Init(arch *gc.Arch) { arch.Ginsnop = ginsnop arch.Ginsnopdefer = ginsnop - arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {} + arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {} arch.SSAGenValue = ssaGenValue arch.SSAGenBlock = ssaGenBlock } diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go index 30eae59331c3a..729d2dab2d809 100644 --- a/src/cmd/compile/internal/arm/ssa.go +++ b/src/cmd/compile/internal/arm/ssa.go @@ -10,10 +10,10 @@ import ( "math/bits" "cmd/compile/internal/base" - "cmd/compile/internal/gc" "cmd/compile/internal/ir" "cmd/compile/internal/logopt" "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/arm" @@ -93,7 +93,7 @@ func makeshift(reg int16, typ int64, s int64) shift { } // genshift generates a Prog for r = r0 op (r1 shifted by n) -func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog { +func genshift(s *ssagen.State, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog { p := s.Prog(as) p.From.Type = obj.TYPE_SHIFT p.From.Offset = int64(makeshift(r1, typ, n)) @@ -111,7 +111,7 @@ func makeregshift(r1 int16, typ int64, r2 int16) shift { } // genregshift generates a Prog for r = r0 op (r1 shifted by r2) -func genregshift(s *gc.SSAGenState, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog { +func genregshift(s *ssagen.State, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog { p := s.Prog(as) p.From.Type = obj.TYPE_SHIFT p.From.Offset = int64(makeregshift(r1, typ, r2)) @@ -145,7 +145,7 @@ func getBFC(v uint32) (uint32, uint32) { return 0xffffffff, 0 } -func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { +func ssaGenValue(s *ssagen.State, v *ssa.Value) { switch v.Op { case ssa.OpCopy, ssa.OpARMMOVWreg: if v.Type.IsMemory() { @@ -183,7 +183,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { return } p := s.Prog(loadByType(v.Type)) - gc.AddrAuto(&p.From, v.Args[0]) + ssagen.AddrAuto(&p.From, v.Args[0]) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpStoreReg: @@ -194,7 +194,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(storeByType(v.Type)) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[0].Reg() - gc.AddrAuto(&p.To, v) + ssagen.AddrAuto(&p.To, v) case ssa.OpARMADD, ssa.OpARMADC, ssa.OpARMSUB, @@ -545,10 +545,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { v.Fatalf("aux is of unknown type %T", v.Aux) case *obj.LSym: wantreg = "SB" - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) case *ir.Name: wantreg = "SP" - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) case nil: // No sym, just MOVW $off(SP), R wantreg = "SP" @@ -568,7 +568,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpARMMOVBstore, @@ -581,7 +581,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Reg = v.Args[1].Reg() p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - gc.AddAux(&p.To, v) + ssagen.AddAux(&p.To, v) case ssa.OpARMMOVWloadidx, ssa.OpARMMOVBUloadidx, ssa.OpARMMOVBloadidx, ssa.OpARMMOVHUloadidx, ssa.OpARMMOVHloadidx: // this is just shift 0 bits fallthrough @@ -712,13 +712,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN - p.To.Sym = gc.BoundsCheckFunc[v.AuxInt] + p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt] s.UseArgs(8) // space used in callee args area by assembly stubs case ssa.OpARMLoweredPanicExtendA, ssa.OpARMLoweredPanicExtendB, ssa.OpARMLoweredPanicExtendC: p := s.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN - p.To.Sym = gc.ExtendCheckFunc[v.AuxInt] + p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt] s.UseArgs(12) // space used in callee args area by assembly stubs case ssa.OpARMDUFFZERO: p := s.Prog(obj.ADUFFZERO) @@ -737,7 +737,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(arm.AMOVB) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = arm.REGTMP if logopt.Enabled() { @@ -846,7 +846,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Reg = v.Reg() case ssa.OpARMLoweredGetClosurePtr: // Closure pointer is R7 (arm.REGCTXT). - gc.CheckLoweredGetClosurePtr(v) + ssagen.CheckLoweredGetClosurePtr(v) case ssa.OpARMLoweredGetCallerSP: // caller's SP is FixedFrameSize below the address of the first arg p := s.Prog(arm.AMOVW) @@ -901,24 +901,24 @@ var blockJump = map[ssa.BlockKind]struct { } // To model a 'LEnoov' ('<=' without overflow checking) branching -var leJumps = [2][2]gc.IndexJump{ +var leJumps = [2][2]ssagen.IndexJump{ {{Jump: arm.ABEQ, Index: 0}, {Jump: arm.ABPL, Index: 1}}, // next == b.Succs[0] {{Jump: arm.ABMI, Index: 0}, {Jump: arm.ABEQ, Index: 0}}, // next == b.Succs[1] } // To model a 'GTnoov' ('>' without overflow checking) branching -var gtJumps = [2][2]gc.IndexJump{ +var gtJumps = [2][2]ssagen.IndexJump{ {{Jump: arm.ABMI, Index: 1}, {Jump: arm.ABEQ, Index: 1}}, // next == b.Succs[0] {{Jump: arm.ABEQ, Index: 1}, {Jump: arm.ABPL, Index: 0}}, // next == b.Succs[1] } -func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { +func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { switch b.Kind { case ssa.BlockPlain: if b.Succs[0].Block() != next { p := s.Prog(obj.AJMP) p.To.Type = obj.TYPE_BRANCH - s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) } case ssa.BlockDefer: @@ -931,11 +931,11 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { p.Reg = arm.REG_R0 p = s.Prog(arm.ABNE) p.To.Type = obj.TYPE_BRANCH - s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()}) if b.Succs[0].Block() != next { p := s.Prog(obj.AJMP) p.To.Type = obj.TYPE_BRANCH - s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) } case ssa.BlockExit: diff --git a/src/cmd/compile/internal/arm64/galign.go b/src/cmd/compile/internal/arm64/galign.go index 40d6e17ae2296..d3db37e16f43b 100644 --- a/src/cmd/compile/internal/arm64/galign.go +++ b/src/cmd/compile/internal/arm64/galign.go @@ -5,12 +5,12 @@ package arm64 import ( - "cmd/compile/internal/gc" "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" "cmd/internal/obj/arm64" ) -func Init(arch *gc.Arch) { +func Init(arch *ssagen.ArchInfo) { arch.LinkArch = &arm64.Linkarm64 arch.REGSP = arm64.REGSP arch.MAXWIDTH = 1 << 50 @@ -20,7 +20,7 @@ func Init(arch *gc.Arch) { arch.Ginsnop = ginsnop arch.Ginsnopdefer = ginsnop - arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {} + arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {} arch.SSAGenValue = ssaGenValue arch.SSAGenBlock = ssaGenBlock } diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go index 9bdea3ee2a4d9..8d25fa8592d71 100644 --- a/src/cmd/compile/internal/arm64/ssa.go +++ b/src/cmd/compile/internal/arm64/ssa.go @@ -8,10 +8,10 @@ import ( "math" "cmd/compile/internal/base" - "cmd/compile/internal/gc" "cmd/compile/internal/ir" "cmd/compile/internal/logopt" "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/arm64" @@ -83,7 +83,7 @@ func makeshift(reg int16, typ int64, s int64) int64 { } // genshift generates a Prog for r = r0 op (r1 shifted by n) -func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog { +func genshift(s *ssagen.State, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog { p := s.Prog(as) p.From.Type = obj.TYPE_SHIFT p.From.Offset = makeshift(r1, typ, n) @@ -112,7 +112,7 @@ func genIndexedOperand(v *ssa.Value) obj.Addr { return mop } -func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { +func ssaGenValue(s *ssagen.State, v *ssa.Value) { switch v.Op { case ssa.OpCopy, ssa.OpARM64MOVDreg: if v.Type.IsMemory() { @@ -150,7 +150,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { return } p := s.Prog(loadByType(v.Type)) - gc.AddrAuto(&p.From, v.Args[0]) + ssagen.AddrAuto(&p.From, v.Args[0]) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpStoreReg: @@ -161,7 +161,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(storeByType(v.Type)) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[0].Reg() - gc.AddrAuto(&p.To, v) + ssagen.AddrAuto(&p.To, v) case ssa.OpARM64ADD, ssa.OpARM64SUB, ssa.OpARM64AND, @@ -395,10 +395,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { v.Fatalf("aux is of unknown type %T", v.Aux) case *obj.LSym: wantreg = "SB" - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) case *ir.Name: wantreg = "SP" - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) case nil: // No sym, just MOVD $off(SP), R wantreg = "SP" @@ -419,7 +419,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpARM64MOVBloadidx, @@ -446,7 +446,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg0() case ssa.OpARM64MOVBstore, @@ -463,7 +463,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Reg = v.Args[1].Reg() p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - gc.AddAux(&p.To, v) + ssagen.AddAux(&p.To, v) case ssa.OpARM64MOVBstoreidx, ssa.OpARM64MOVHstoreidx, ssa.OpARM64MOVWstoreidx, @@ -484,7 +484,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Offset = int64(v.Args[2].Reg()) p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - gc.AddAux(&p.To, v) + ssagen.AddAux(&p.To, v) case ssa.OpARM64MOVBstorezero, ssa.OpARM64MOVHstorezero, ssa.OpARM64MOVWstorezero, @@ -494,7 +494,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Reg = arm64.REGZERO p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - gc.AddAux(&p.To, v) + ssagen.AddAux(&p.To, v) case ssa.OpARM64MOVBstorezeroidx, ssa.OpARM64MOVHstorezeroidx, ssa.OpARM64MOVWstorezeroidx, @@ -513,7 +513,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Offset = int64(arm64.REGZERO) p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - gc.AddAux(&p.To, v) + ssagen.AddAux(&p.To, v) case ssa.OpARM64BFI, ssa.OpARM64BFXIL: r := v.Reg() @@ -1027,14 +1027,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN - p.To.Sym = gc.BoundsCheckFunc[v.AuxInt] + p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt] s.UseArgs(16) // space used in callee args area by assembly stubs case ssa.OpARM64LoweredNilCheck: // Issue a load which will fault if arg is nil. p := s.Prog(arm64.AMOVB) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = arm64.REGTMP if logopt.Enabled() { @@ -1065,7 +1065,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Reg = v.Reg() case ssa.OpARM64LoweredGetClosurePtr: // Closure pointer is R26 (arm64.REGCTXT). - gc.CheckLoweredGetClosurePtr(v) + ssagen.CheckLoweredGetClosurePtr(v) case ssa.OpARM64LoweredGetCallerSP: // caller's SP is FixedFrameSize below the address of the first arg p := s.Prog(arm64.AMOVD) @@ -1134,24 +1134,24 @@ var blockJump = map[ssa.BlockKind]struct { } // To model a 'LEnoov' ('<=' without overflow checking) branching -var leJumps = [2][2]gc.IndexJump{ +var leJumps = [2][2]ssagen.IndexJump{ {{Jump: arm64.ABEQ, Index: 0}, {Jump: arm64.ABPL, Index: 1}}, // next == b.Succs[0] {{Jump: arm64.ABMI, Index: 0}, {Jump: arm64.ABEQ, Index: 0}}, // next == b.Succs[1] } // To model a 'GTnoov' ('>' without overflow checking) branching -var gtJumps = [2][2]gc.IndexJump{ +var gtJumps = [2][2]ssagen.IndexJump{ {{Jump: arm64.ABMI, Index: 1}, {Jump: arm64.ABEQ, Index: 1}}, // next == b.Succs[0] {{Jump: arm64.ABEQ, Index: 1}, {Jump: arm64.ABPL, Index: 0}}, // next == b.Succs[1] } -func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { +func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { switch b.Kind { case ssa.BlockPlain: if b.Succs[0].Block() != next { p := s.Prog(obj.AJMP) p.To.Type = obj.TYPE_BRANCH - s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) } case ssa.BlockDefer: @@ -1164,11 +1164,11 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { p.Reg = arm64.REG_R0 p = s.Prog(arm64.ABNE) p.To.Type = obj.TYPE_BRANCH - s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()}) if b.Succs[0].Block() != next { p := s.Prog(obj.AJMP) p.To.Type = obj.TYPE_BRANCH - s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) } case ssa.BlockExit: diff --git a/src/cmd/compile/internal/gc/abiutils_test.go b/src/cmd/compile/internal/gc/abiutils_test.go index 4b2a30d00ca5e..a421a229dc7a0 100644 --- a/src/cmd/compile/internal/gc/abiutils_test.go +++ b/src/cmd/compile/internal/gc/abiutils_test.go @@ -8,6 +8,7 @@ import ( "bufio" "cmd/compile/internal/base" "cmd/compile/internal/reflectdata" + "cmd/compile/internal/ssagen" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/obj" @@ -28,16 +29,16 @@ var configAMD64 = ABIConfig{ } func TestMain(m *testing.M) { - thearch.LinkArch = &x86.Linkamd64 - thearch.REGSP = x86.REGSP - thearch.MAXWIDTH = 1 << 50 - types.MaxWidth = thearch.MAXWIDTH - base.Ctxt = obj.Linknew(thearch.LinkArch) + ssagen.Arch.LinkArch = &x86.Linkamd64 + ssagen.Arch.REGSP = x86.REGSP + ssagen.Arch.MAXWIDTH = 1 << 50 + types.MaxWidth = ssagen.Arch.MAXWIDTH + base.Ctxt = obj.Linknew(ssagen.Arch.LinkArch) base.Ctxt.DiagFunc = base.Errorf base.Ctxt.DiagFlush = base.FlushErrors base.Ctxt.Bso = bufio.NewWriter(os.Stdout) - types.PtrSize = thearch.LinkArch.PtrSize - types.RegSize = thearch.LinkArch.RegSize + types.PtrSize = ssagen.Arch.LinkArch.PtrSize + types.RegSize = ssagen.Arch.LinkArch.RegSize types.TypeLinkSym = func(t *types.Type) *obj.LSym { return reflectdata.TypeSym(t).Linksym() } diff --git a/src/cmd/compile/internal/gc/compile.go b/src/cmd/compile/internal/gc/compile.go new file mode 100644 index 0000000000000..c2a6a9e327719 --- /dev/null +++ b/src/cmd/compile/internal/gc/compile.go @@ -0,0 +1,177 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gc + +import ( + "internal/race" + "math/rand" + "sort" + "sync" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/liveness" + "cmd/compile/internal/reflectdata" + "cmd/compile/internal/ssagen" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" +) + +// "Portable" code generation. + +var ( + compilequeue []*ir.Func // functions waiting to be compiled +) + +func funccompile(fn *ir.Func) { + if ir.CurFunc != nil { + base.Fatalf("funccompile %v inside %v", fn.Sym(), ir.CurFunc.Sym()) + } + + if fn.Type() == nil { + if base.Errors() == 0 { + base.Fatalf("funccompile missing type") + } + return + } + + // assign parameter offsets + types.CalcSize(fn.Type()) + + if len(fn.Body) == 0 { + // Initialize ABI wrappers if necessary. + ssagen.InitLSym(fn, false) + liveness.WriteFuncMap(fn) + return + } + + typecheck.DeclContext = ir.PAUTO + ir.CurFunc = fn + compile(fn) + ir.CurFunc = nil + typecheck.DeclContext = ir.PEXTERN +} + +func compile(fn *ir.Func) { + // Set up the function's LSym early to avoid data races with the assemblers. + // Do this before walk, as walk needs the LSym to set attributes/relocations + // (e.g. in markTypeUsedInInterface). + ssagen.InitLSym(fn, true) + + errorsBefore := base.Errors() + walk(fn) + if base.Errors() > errorsBefore { + return + } + + // From this point, there should be no uses of Curfn. Enforce that. + ir.CurFunc = nil + + if ir.FuncName(fn) == "_" { + // We don't need to generate code for this function, just report errors in its body. + // At this point we've generated any errors needed. + // (Beyond here we generate only non-spec errors, like "stack frame too large".) + // See issue 29870. + return + } + + // Make sure type syms are declared for all types that might + // be types of stack objects. We need to do this here + // because symbols must be allocated before the parallel + // phase of the compiler. + for _, n := range fn.Dcl { + switch n.Class_ { + case ir.PPARAM, ir.PPARAMOUT, ir.PAUTO: + if liveness.ShouldTrack(n) && n.Addrtaken() { + reflectdata.WriteType(n.Type()) + // Also make sure we allocate a linker symbol + // for the stack object data, for the same reason. + if fn.LSym.Func().StackObjects == nil { + fn.LSym.Func().StackObjects = base.Ctxt.Lookup(fn.LSym.Name + ".stkobj") + } + } + } + } + + if compilenow(fn) { + ssagen.Compile(fn, 0) + } else { + compilequeue = append(compilequeue, fn) + } +} + +// compilenow reports whether to compile immediately. +// If functions are not compiled immediately, +// they are enqueued in compilequeue, +// which is drained by compileFunctions. +func compilenow(fn *ir.Func) bool { + // Issue 38068: if this function is a method AND an inline + // candidate AND was not inlined (yet), put it onto the compile + // queue instead of compiling it immediately. This is in case we + // wind up inlining it into a method wrapper that is generated by + // compiling a function later on in the Target.Decls list. + if ir.IsMethod(fn) && isInlinableButNotInlined(fn) { + return false + } + return base.Flag.LowerC == 1 && base.Debug.CompileLater == 0 +} + +// compileFunctions compiles all functions in compilequeue. +// It fans out nBackendWorkers to do the work +// and waits for them to complete. +func compileFunctions() { + if len(compilequeue) != 0 { + types.CalcSizeDisabled = true // not safe to calculate sizes concurrently + if race.Enabled { + // Randomize compilation order to try to shake out races. + tmp := make([]*ir.Func, len(compilequeue)) + perm := rand.Perm(len(compilequeue)) + for i, v := range perm { + tmp[v] = compilequeue[i] + } + copy(compilequeue, tmp) + } else { + // Compile the longest functions first, + // since they're most likely to be the slowest. + // This helps avoid stragglers. + sort.Slice(compilequeue, func(i, j int) bool { + return len(compilequeue[i].Body) > len(compilequeue[j].Body) + }) + } + var wg sync.WaitGroup + base.Ctxt.InParallel = true + c := make(chan *ir.Func, base.Flag.LowerC) + for i := 0; i < base.Flag.LowerC; i++ { + wg.Add(1) + go func(worker int) { + for fn := range c { + ssagen.Compile(fn, worker) + } + wg.Done() + }(i) + } + for _, fn := range compilequeue { + c <- fn + } + close(c) + compilequeue = nil + wg.Wait() + base.Ctxt.InParallel = false + types.CalcSizeDisabled = false + } +} + +// isInlinableButNotInlined returns true if 'fn' was marked as an +// inline candidate but then never inlined (presumably because we +// found no call sites). +func isInlinableButNotInlined(fn *ir.Func) bool { + if fn.Inl == nil { + return false + } + if fn.Sym() == nil { + return true + } + return !fn.Sym().Linksym().WasInlined() +} diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/dwarf.go similarity index 55% rename from src/cmd/compile/internal/gc/pgen.go rename to src/cmd/compile/internal/gc/dwarf.go index 4d990e7dbacb5..e853c51422795 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/dwarf.go @@ -5,361 +5,19 @@ package gc import ( + "sort" + "cmd/compile/internal/base" "cmd/compile/internal/ir" - "cmd/compile/internal/liveness" - "cmd/compile/internal/objw" - "cmd/compile/internal/reflectdata" "cmd/compile/internal/ssa" - "cmd/compile/internal/typecheck" + "cmd/compile/internal/ssagen" "cmd/compile/internal/types" "cmd/internal/dwarf" "cmd/internal/obj" "cmd/internal/objabi" "cmd/internal/src" - "cmd/internal/sys" - "internal/race" - "math/rand" - "sort" - "sync" - "time" ) -// "Portable" code generation. - -var ( - compilequeue []*ir.Func // functions waiting to be compiled -) - -// cmpstackvarlt reports whether the stack variable a sorts before b. -// -// Sort the list of stack variables. Autos after anything else, -// within autos, unused after used, within used, things with -// pointers first, zeroed things first, and then decreasing size. -// Because autos are laid out in decreasing addresses -// on the stack, pointers first, zeroed things first and decreasing size -// really means, in memory, things with pointers needing zeroing at -// the top of the stack and increasing in size. -// Non-autos sort on offset. -func cmpstackvarlt(a, b *ir.Name) bool { - if (a.Class_ == ir.PAUTO) != (b.Class_ == ir.PAUTO) { - return b.Class_ == ir.PAUTO - } - - if a.Class_ != ir.PAUTO { - return a.FrameOffset() < b.FrameOffset() - } - - if a.Used() != b.Used() { - return a.Used() - } - - ap := a.Type().HasPointers() - bp := b.Type().HasPointers() - if ap != bp { - return ap - } - - ap = a.Needzero() - bp = b.Needzero() - if ap != bp { - return ap - } - - if a.Type().Width != b.Type().Width { - return a.Type().Width > b.Type().Width - } - - return a.Sym().Name < b.Sym().Name -} - -// byStackvar implements sort.Interface for []*Node using cmpstackvarlt. -type byStackVar []*ir.Name - -func (s byStackVar) Len() int { return len(s) } -func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) } -func (s byStackVar) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -func (s *ssafn) AllocFrame(f *ssa.Func) { - s.stksize = 0 - s.stkptrsize = 0 - fn := s.curfn - - // Mark the PAUTO's unused. - for _, ln := range fn.Dcl { - if ln.Class_ == ir.PAUTO { - ln.SetUsed(false) - } - } - - for _, l := range f.RegAlloc { - if ls, ok := l.(ssa.LocalSlot); ok { - ls.N.Name().SetUsed(true) - } - } - - scratchUsed := false - for _, b := range f.Blocks { - for _, v := range b.Values { - if n, ok := v.Aux.(*ir.Name); ok { - switch n.Class_ { - case ir.PPARAM, ir.PPARAMOUT: - // Don't modify nodfp; it is a global. - if n != ir.RegFP { - n.Name().SetUsed(true) - } - case ir.PAUTO: - n.Name().SetUsed(true) - } - } - if !scratchUsed { - scratchUsed = v.Op.UsesScratch() - } - - } - } - - if f.Config.NeedsFpScratch && scratchUsed { - s.scratchFpMem = typecheck.TempAt(src.NoXPos, s.curfn, types.Types[types.TUINT64]) - } - - sort.Sort(byStackVar(fn.Dcl)) - - // Reassign stack offsets of the locals that are used. - lastHasPtr := false - for i, n := range fn.Dcl { - if n.Op() != ir.ONAME || n.Class_ != ir.PAUTO { - continue - } - if !n.Used() { - fn.Dcl = fn.Dcl[:i] - break - } - - types.CalcSize(n.Type()) - w := n.Type().Width - if w >= types.MaxWidth || w < 0 { - base.Fatalf("bad width") - } - if w == 0 && lastHasPtr { - // Pad between a pointer-containing object and a zero-sized object. - // This prevents a pointer to the zero-sized object from being interpreted - // as a pointer to the pointer-containing object (and causing it - // to be scanned when it shouldn't be). See issue 24993. - w = 1 - } - s.stksize += w - s.stksize = types.Rnd(s.stksize, int64(n.Type().Align)) - if n.Type().HasPointers() { - s.stkptrsize = s.stksize - lastHasPtr = true - } else { - lastHasPtr = false - } - if thearch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) { - s.stksize = types.Rnd(s.stksize, int64(types.PtrSize)) - } - n.SetFrameOffset(-s.stksize) - } - - s.stksize = types.Rnd(s.stksize, int64(types.RegSize)) - s.stkptrsize = types.Rnd(s.stkptrsize, int64(types.RegSize)) -} - -func funccompile(fn *ir.Func) { - if ir.CurFunc != nil { - base.Fatalf("funccompile %v inside %v", fn.Sym(), ir.CurFunc.Sym()) - } - - if fn.Type() == nil { - if base.Errors() == 0 { - base.Fatalf("funccompile missing type") - } - return - } - - // assign parameter offsets - types.CalcSize(fn.Type()) - - if len(fn.Body) == 0 { - // Initialize ABI wrappers if necessary. - initLSym(fn, false) - liveness.WriteFuncMap(fn) - return - } - - typecheck.DeclContext = ir.PAUTO - ir.CurFunc = fn - compile(fn) - ir.CurFunc = nil - typecheck.DeclContext = ir.PEXTERN -} - -func compile(fn *ir.Func) { - // Set up the function's LSym early to avoid data races with the assemblers. - // Do this before walk, as walk needs the LSym to set attributes/relocations - // (e.g. in markTypeUsedInInterface). - initLSym(fn, true) - - errorsBefore := base.Errors() - walk(fn) - if base.Errors() > errorsBefore { - return - } - - // From this point, there should be no uses of Curfn. Enforce that. - ir.CurFunc = nil - - if ir.FuncName(fn) == "_" { - // We don't need to generate code for this function, just report errors in its body. - // At this point we've generated any errors needed. - // (Beyond here we generate only non-spec errors, like "stack frame too large".) - // See issue 29870. - return - } - - // Make sure type syms are declared for all types that might - // be types of stack objects. We need to do this here - // because symbols must be allocated before the parallel - // phase of the compiler. - for _, n := range fn.Dcl { - switch n.Class_ { - case ir.PPARAM, ir.PPARAMOUT, ir.PAUTO: - if liveness.ShouldTrack(n) && n.Addrtaken() { - reflectdata.WriteType(n.Type()) - // Also make sure we allocate a linker symbol - // for the stack object data, for the same reason. - if fn.LSym.Func().StackObjects == nil { - fn.LSym.Func().StackObjects = base.Ctxt.Lookup(fn.LSym.Name + ".stkobj") - } - } - } - } - - if compilenow(fn) { - compileSSA(fn, 0) - } else { - compilequeue = append(compilequeue, fn) - } -} - -// compilenow reports whether to compile immediately. -// If functions are not compiled immediately, -// they are enqueued in compilequeue, -// which is drained by compileFunctions. -func compilenow(fn *ir.Func) bool { - // Issue 38068: if this function is a method AND an inline - // candidate AND was not inlined (yet), put it onto the compile - // queue instead of compiling it immediately. This is in case we - // wind up inlining it into a method wrapper that is generated by - // compiling a function later on in the Target.Decls list. - if ir.IsMethod(fn) && isInlinableButNotInlined(fn) { - return false - } - return base.Flag.LowerC == 1 && base.Debug.CompileLater == 0 -} - -// isInlinableButNotInlined returns true if 'fn' was marked as an -// inline candidate but then never inlined (presumably because we -// found no call sites). -func isInlinableButNotInlined(fn *ir.Func) bool { - if fn.Inl == nil { - return false - } - if fn.Sym() == nil { - return true - } - return !fn.Sym().Linksym().WasInlined() -} - -const maxStackSize = 1 << 30 - -// compileSSA builds an SSA backend function, -// uses it to generate a plist, -// and flushes that plist to machine code. -// worker indicates which of the backend workers is doing the processing. -func compileSSA(fn *ir.Func, worker int) { - f := buildssa(fn, worker) - // Note: check arg size to fix issue 25507. - if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type().ArgWidth() >= maxStackSize { - largeStackFramesMu.Lock() - largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: fn.Type().ArgWidth(), pos: fn.Pos()}) - largeStackFramesMu.Unlock() - return - } - pp := objw.NewProgs(fn, worker) - defer pp.Free() - genssa(f, pp) - // Check frame size again. - // The check above included only the space needed for local variables. - // After genssa, the space needed includes local variables and the callee arg region. - // We must do this check prior to calling pp.Flush. - // If there are any oversized stack frames, - // the assembler may emit inscrutable complaints about invalid instructions. - if pp.Text.To.Offset >= maxStackSize { - largeStackFramesMu.Lock() - locals := f.Frontend().(*ssafn).stksize - largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: fn.Type().ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos()}) - largeStackFramesMu.Unlock() - return - } - - pp.Flush() // assemble, fill in boilerplate, etc. - // fieldtrack must be called after pp.Flush. See issue 20014. - fieldtrack(pp.Text.From.Sym, fn.FieldTrack) -} - -func init() { - if race.Enabled { - rand.Seed(time.Now().UnixNano()) - } -} - -// compileFunctions compiles all functions in compilequeue. -// It fans out nBackendWorkers to do the work -// and waits for them to complete. -func compileFunctions() { - if len(compilequeue) != 0 { - types.CalcSizeDisabled = true // not safe to calculate sizes concurrently - if race.Enabled { - // Randomize compilation order to try to shake out races. - tmp := make([]*ir.Func, len(compilequeue)) - perm := rand.Perm(len(compilequeue)) - for i, v := range perm { - tmp[v] = compilequeue[i] - } - copy(compilequeue, tmp) - } else { - // Compile the longest functions first, - // since they're most likely to be the slowest. - // This helps avoid stragglers. - sort.Slice(compilequeue, func(i, j int) bool { - return len(compilequeue[i].Body) > len(compilequeue[j].Body) - }) - } - var wg sync.WaitGroup - base.Ctxt.InParallel = true - c := make(chan *ir.Func, base.Flag.LowerC) - for i := 0; i < base.Flag.LowerC; i++ { - wg.Add(1) - go func(worker int) { - for fn := range c { - compileSSA(fn, worker) - } - wg.Done() - }(i) - } - for _, fn := range compilequeue { - c <- fn - } - close(c) - compilequeue = nil - wg.Wait() - base.Ctxt.InParallel = false - types.CalcSizeDisabled = false - } -} - func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) { fn := curfn.(*ir.Func) @@ -485,100 +143,6 @@ func declPos(decl *ir.Name) src.XPos { return decl.Pos() } -// createSimpleVars creates a DWARF entry for every variable declared in the -// function, claiming that they are permanently on the stack. -func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var, map[*ir.Name]bool) { - var vars []*dwarf.Var - var decls []*ir.Name - selected := make(map[*ir.Name]bool) - for _, n := range apDecls { - if ir.IsAutoTmp(n) { - continue - } - - decls = append(decls, n) - vars = append(vars, createSimpleVar(fnsym, n)) - selected[n] = true - } - return decls, vars, selected -} - -func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var { - var abbrev int - var offs int64 - - switch n.Class_ { - case ir.PAUTO: - offs = n.FrameOffset() - abbrev = dwarf.DW_ABRV_AUTO - if base.Ctxt.FixedFrameSize() == 0 { - offs -= int64(types.PtrSize) - } - if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" { - // There is a word space for FP on ARM64 even if the frame pointer is disabled - offs -= int64(types.PtrSize) - } - - case ir.PPARAM, ir.PPARAMOUT: - abbrev = dwarf.DW_ABRV_PARAM - offs = n.FrameOffset() + base.Ctxt.FixedFrameSize() - default: - base.Fatalf("createSimpleVar unexpected class %v for node %v", n.Class_, n) - } - - typename := dwarf.InfoPrefix + types.TypeSymName(n.Type()) - delete(fnsym.Func().Autot, ngotype(n).Linksym()) - inlIndex := 0 - if base.Flag.GenDwarfInl > 1 { - if n.Name().InlFormal() || n.Name().InlLocal() { - inlIndex = posInlIndex(n.Pos()) + 1 - if n.Name().InlFormal() { - abbrev = dwarf.DW_ABRV_PARAM - } - } - } - declpos := base.Ctxt.InnermostPos(declPos(n)) - return &dwarf.Var{ - Name: n.Sym().Name, - IsReturnValue: n.Class_ == ir.PPARAMOUT, - IsInlFormal: n.Name().InlFormal(), - Abbrev: abbrev, - StackOffset: int32(offs), - Type: base.Ctxt.Lookup(typename), - DeclFile: declpos.RelFilename(), - DeclLine: declpos.RelLine(), - DeclCol: declpos.Col(), - InlIndex: int32(inlIndex), - ChildIndex: -1, - } -} - -// createComplexVars creates recomposed DWARF vars with location lists, -// suitable for describing optimized code. -func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Name, []*dwarf.Var, map[*ir.Name]bool) { - debugInfo := fn.DebugInfo.(*ssa.FuncDebug) - - // Produce a DWARF variable entry for each user variable. - var decls []*ir.Name - var vars []*dwarf.Var - ssaVars := make(map[*ir.Name]bool) - - for varID, dvar := range debugInfo.Vars { - n := dvar - ssaVars[n] = true - for _, slot := range debugInfo.VarSlots[varID] { - ssaVars[debugInfo.Slots[slot].N] = true - } - - if dvar := createComplexVar(fnsym, fn, ssa.VarID(varID)); dvar != nil { - decls = append(decls, n) - vars = append(vars, dvar) - } - } - - return decls, vars, ssaVars -} - // createDwarfVars process fn, returning a list of DWARF variables and the // Nodes they represent. func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var) { @@ -617,7 +181,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir if c == '.' || n.Type().IsUntyped() { continue } - if n.Class_ == ir.PPARAM && !canSSAType(n.Type()) { + if n.Class_ == ir.PPARAM && !ssagen.TypeOK(n.Type()) { // SSA-able args get location lists, and may move in and // out of registers, so those are handled elsewhere. // Autos and named output params seem to get handled @@ -699,26 +263,98 @@ func preInliningDcls(fnsym *obj.LSym) []*ir.Name { return rdcl } -// stackOffset returns the stack location of a LocalSlot relative to the -// stack pointer, suitable for use in a DWARF location entry. This has nothing -// to do with its offset in the user variable. -func stackOffset(slot ssa.LocalSlot) int32 { - n := slot.N - var off int64 +// createSimpleVars creates a DWARF entry for every variable declared in the +// function, claiming that they are permanently on the stack. +func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var, map[*ir.Name]bool) { + var vars []*dwarf.Var + var decls []*ir.Name + selected := make(map[*ir.Name]bool) + for _, n := range apDecls { + if ir.IsAutoTmp(n) { + continue + } + + decls = append(decls, n) + vars = append(vars, createSimpleVar(fnsym, n)) + selected[n] = true + } + return decls, vars, selected +} + +func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var { + var abbrev int + var offs int64 + switch n.Class_ { case ir.PAUTO: - off = n.FrameOffset() + offs = n.FrameOffset() + abbrev = dwarf.DW_ABRV_AUTO if base.Ctxt.FixedFrameSize() == 0 { - off -= int64(types.PtrSize) + offs -= int64(types.PtrSize) } if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" { // There is a word space for FP on ARM64 even if the frame pointer is disabled - off -= int64(types.PtrSize) + offs -= int64(types.PtrSize) } + case ir.PPARAM, ir.PPARAMOUT: - off = n.FrameOffset() + base.Ctxt.FixedFrameSize() + abbrev = dwarf.DW_ABRV_PARAM + offs = n.FrameOffset() + base.Ctxt.FixedFrameSize() + default: + base.Fatalf("createSimpleVar unexpected class %v for node %v", n.Class_, n) + } + + typename := dwarf.InfoPrefix + types.TypeSymName(n.Type()) + delete(fnsym.Func().Autot, ngotype(n).Linksym()) + inlIndex := 0 + if base.Flag.GenDwarfInl > 1 { + if n.Name().InlFormal() || n.Name().InlLocal() { + inlIndex = posInlIndex(n.Pos()) + 1 + if n.Name().InlFormal() { + abbrev = dwarf.DW_ABRV_PARAM + } + } + } + declpos := base.Ctxt.InnermostPos(declPos(n)) + return &dwarf.Var{ + Name: n.Sym().Name, + IsReturnValue: n.Class_ == ir.PPARAMOUT, + IsInlFormal: n.Name().InlFormal(), + Abbrev: abbrev, + StackOffset: int32(offs), + Type: base.Ctxt.Lookup(typename), + DeclFile: declpos.RelFilename(), + DeclLine: declpos.RelLine(), + DeclCol: declpos.Col(), + InlIndex: int32(inlIndex), + ChildIndex: -1, } - return int32(off + slot.Off) +} + +// createComplexVars creates recomposed DWARF vars with location lists, +// suitable for describing optimized code. +func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Name, []*dwarf.Var, map[*ir.Name]bool) { + debugInfo := fn.DebugInfo.(*ssa.FuncDebug) + + // Produce a DWARF variable entry for each user variable. + var decls []*ir.Name + var vars []*dwarf.Var + ssaVars := make(map[*ir.Name]bool) + + for varID, dvar := range debugInfo.Vars { + n := dvar + ssaVars[n] = true + for _, slot := range debugInfo.VarSlots[varID] { + ssaVars[debugInfo.Slots[slot].N] = true + } + + if dvar := createComplexVar(fnsym, fn, ssa.VarID(varID)); dvar != nil { + decls = append(decls, n) + vars = append(vars, dvar) + } + } + + return decls, vars, ssaVars } // createComplexVar builds a single DWARF variable entry and location list. @@ -759,7 +395,7 @@ func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var // variables just give it the first one. It's not used otherwise. // This won't work well if the first slot hasn't been assigned a stack // location, but it's not obvious how to do better. - StackOffset: stackOffset(debug.Slots[debug.VarSlots[varID][0]]), + StackOffset: ssagen.StackOffset(debug.Slots[debug.VarSlots[varID][0]]), DeclFile: declpos.RelFilename(), DeclLine: declpos.RelLine(), DeclCol: declpos.Col(), @@ -774,31 +410,3 @@ func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var } return dvar } - -// fieldtrack adds R_USEFIELD relocations to fnsym to record any -// struct fields that it used. -func fieldtrack(fnsym *obj.LSym, tracked map[*types.Sym]struct{}) { - if fnsym == nil { - return - } - if objabi.Fieldtrack_enabled == 0 || len(tracked) == 0 { - return - } - - trackSyms := make([]*types.Sym, 0, len(tracked)) - for sym := range tracked { - trackSyms = append(trackSyms, sym) - } - sort.Sort(symByName(trackSyms)) - for _, sym := range trackSyms { - r := obj.Addrel(fnsym) - r.Sym = sym.Linksym() - r.Type = objabi.R_USEFIELD - } -} - -type symByName []*types.Sym - -func (a symByName) Len() int { return len(a) } -func (a symByName) Less(i, j int) bool { return a[i].Name < a[j].Name } -func (a symByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index e66b877fd0b92..154235f744fb4 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -17,6 +17,7 @@ import ( "cmd/compile/internal/noder" "cmd/compile/internal/reflectdata" "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" "cmd/compile/internal/staticdata" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" @@ -26,12 +27,9 @@ import ( "cmd/internal/src" "flag" "fmt" - "io/ioutil" "log" "os" "runtime" - "sort" - "strings" ) func hidePanic() { @@ -52,14 +50,14 @@ func hidePanic() { // Main parses flags and Go source files specified in the command-line // arguments, type-checks the parsed Go package, compiles functions to machine // code, and finally writes the compiled package definition to disk. -func Main(archInit func(*Arch)) { +func Main(archInit func(*ssagen.ArchInfo)) { base.Timer.Start("fe", "init") defer hidePanic() - archInit(&thearch) + archInit(&ssagen.Arch) - base.Ctxt = obj.Linknew(thearch.LinkArch) + base.Ctxt = obj.Linknew(ssagen.Arch.LinkArch) base.Ctxt.DiagFunc = base.Errorf base.Ctxt.DiagFlush = base.FlushErrors base.Ctxt.Bso = bufio.NewWriter(os.Stdout) @@ -151,7 +149,7 @@ func Main(archInit func(*Arch)) { types.ParseLangFlag() if base.Flag.SymABIs != "" { - readSymABIs(base.Flag.SymABIs, base.Ctxt.Pkgpath) + ssagen.ReadSymABIs(base.Flag.SymABIs, base.Ctxt.Pkgpath) } if base.Compiling(base.NoInstrumentPkgs) { @@ -159,7 +157,7 @@ func Main(archInit func(*Arch)) { base.Flag.MSan = false } - thearch.LinkArch.Init(base.Ctxt) + ssagen.Arch.LinkArch.Init(base.Ctxt) startProfile() if base.Flag.Race { ir.Pkgs.Race = types.NewPkg("runtime/race", "") @@ -174,7 +172,7 @@ func Main(archInit func(*Arch)) { dwarf.EnableLogging(base.Debug.DwarfInl != 0) } if base.Debug.SoftFloat != 0 { - thearch.SoftFloat = true + ssagen.Arch.SoftFloat = true } if base.Flag.JSON != "" { // parse version,destination from json logging optimization. @@ -182,14 +180,14 @@ func Main(archInit func(*Arch)) { } ir.EscFmt = escape.Fmt - ir.IsIntrinsicCall = isIntrinsicCall - inline.SSADumpInline = ssaDumpInline - initSSAEnv() - initSSATables() - - types.PtrSize = thearch.LinkArch.PtrSize - types.RegSize = thearch.LinkArch.RegSize - types.MaxWidth = thearch.MAXWIDTH + ir.IsIntrinsicCall = ssagen.IsIntrinsicCall + inline.SSADumpInline = ssagen.DumpInline + ssagen.InitEnv() + ssagen.InitTables() + + types.PtrSize = ssagen.Arch.LinkArch.PtrSize + types.RegSize = ssagen.Arch.LinkArch.RegSize + types.MaxWidth = ssagen.Arch.MAXWIDTH types.TypeLinkSym = func(t *types.Type) *obj.LSym { return reflectdata.TypeSym(t).Linksym() } @@ -210,7 +208,7 @@ func Main(archInit func(*Arch)) { // Parse input. base.Timer.Start("fe", "parse") lines := noder.ParseFiles(flag.Args()) - cgoSymABIs() + ssagen.CgoSymABIs() base.Timer.Stop() base.Timer.AddEvent(int64(lines), "lines") recordPackageName() @@ -257,7 +255,7 @@ func Main(archInit func(*Arch)) { // We'll do the final check after write barriers are // inserted. if base.Flag.CompilingRuntime { - EnableNoWriteBarrierRecCheck() + ssagen.EnableNoWriteBarrierRecCheck() } // Transform closure bodies to properly reference captured variables. @@ -277,7 +275,7 @@ func Main(archInit func(*Arch)) { // Prepare for SSA compilation. // This must be before peekitabs, because peekitabs // can trigger function compilation. - initssaconfig() + ssagen.InitConfig() // Just before compilation, compile itabs found on // the right side of OCONVIFACE so that methods @@ -302,7 +300,7 @@ func Main(archInit func(*Arch)) { if base.Flag.CompilingRuntime { // Write barriers are now known. Check the call graph. - NoWriteBarrierRecCheck() + ssagen.NoWriteBarrierRecCheck() } // Finalize DWARF inline routine DIEs, then explicitly turn off @@ -323,7 +321,7 @@ func Main(archInit func(*Arch)) { dumpasmhdr() } - CheckLargeStacks() + ssagen.CheckLargeStacks() typecheck.CheckFuncStack() if len(compilequeue) != 0 { @@ -343,34 +341,6 @@ func Main(archInit func(*Arch)) { } } -func CheckLargeStacks() { - // Check whether any of the functions we have compiled have gigantic stack frames. - sort.Slice(largeStackFrames, func(i, j int) bool { - return largeStackFrames[i].pos.Before(largeStackFrames[j].pos) - }) - for _, large := range largeStackFrames { - if large.callee != 0 { - base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args + %d MB callee", large.locals>>20, large.args>>20, large.callee>>20) - } else { - base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args", large.locals>>20, large.args>>20) - } - } -} - -func cgoSymABIs() { - // The linker expects an ABI0 wrapper for all cgo-exported - // functions. - for _, prag := range typecheck.Target.CgoPragmas { - switch prag[0] { - case "cgo_export_static", "cgo_export_dynamic": - if symabiRefs == nil { - symabiRefs = make(map[string]obj.ABI) - } - symabiRefs[prag[1]] = obj.ABI0 - } - } -} - func writebench(filename string) error { f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) if err != nil { @@ -394,77 +364,6 @@ func writebench(filename string) error { return f.Close() } -// symabiDefs and symabiRefs record the defined and referenced ABIs of -// symbols required by non-Go code. These are keyed by link symbol -// name, where the local package prefix is always `"".` -var symabiDefs, symabiRefs map[string]obj.ABI - -// readSymABIs reads a symabis file that specifies definitions and -// references of text symbols by ABI. -// -// The symabis format is a set of lines, where each line is a sequence -// of whitespace-separated fields. The first field is a verb and is -// either "def" for defining a symbol ABI or "ref" for referencing a -// symbol using an ABI. For both "def" and "ref", the second field is -// the symbol name and the third field is the ABI name, as one of the -// named cmd/internal/obj.ABI constants. -func readSymABIs(file, myimportpath string) { - data, err := ioutil.ReadFile(file) - if err != nil { - log.Fatalf("-symabis: %v", err) - } - - symabiDefs = make(map[string]obj.ABI) - symabiRefs = make(map[string]obj.ABI) - - localPrefix := "" - if myimportpath != "" { - // Symbols in this package may be written either as - // "".X or with the package's import path already in - // the symbol. - localPrefix = objabi.PathToPrefix(myimportpath) + "." - } - - for lineNum, line := range strings.Split(string(data), "\n") { - lineNum++ // 1-based - line = strings.TrimSpace(line) - if line == "" || strings.HasPrefix(line, "#") { - continue - } - - parts := strings.Fields(line) - switch parts[0] { - case "def", "ref": - // Parse line. - if len(parts) != 3 { - log.Fatalf(`%s:%d: invalid symabi: syntax is "%s sym abi"`, file, lineNum, parts[0]) - } - sym, abistr := parts[1], parts[2] - abi, valid := obj.ParseABI(abistr) - if !valid { - log.Fatalf(`%s:%d: invalid symabi: unknown abi "%s"`, file, lineNum, abistr) - } - - // If the symbol is already prefixed with - // myimportpath, rewrite it to start with "" - // so it matches the compiler's internal - // symbol names. - if localPrefix != "" && strings.HasPrefix(sym, localPrefix) { - sym = `"".` + sym[len(localPrefix):] - } - - // Record for later. - if parts[0] == "def" { - symabiDefs[sym] = abi - } else { - symabiRefs[sym] = abi - } - default: - log.Fatalf(`%s:%d: invalid symabi type "%s"`, file, lineNum, parts[0]) - } - } -} - // recordFlags records the specified command-line flags to be placed // in the DWARF info. func recordFlags(flags ...string) { @@ -532,29 +431,6 @@ func recordPackageName() { s.P = []byte(types.LocalPkg.Name) } -// useNewABIWrapGen returns TRUE if the compiler should generate an -// ABI wrapper for the function 'f'. -func useABIWrapGen(f *ir.Func) bool { - if !base.Flag.ABIWrap { - return false - } - - // Support limit option for bisecting. - if base.Flag.ABIWrapLimit == 1 { - return false - } - if base.Flag.ABIWrapLimit < 1 { - return true - } - base.Flag.ABIWrapLimit-- - if base.Debug.ABIWrap != 0 && base.Flag.ABIWrapLimit == 1 { - fmt.Fprintf(os.Stderr, "=-= limit reached after new wrapper for %s\n", - f.LSym.Name) - } - - return true -} - func makePos(b *src.PosBase, line, col uint) src.XPos { return base.Ctxt.PosTable.XPos(src.MakePos(b, line, col)) } diff --git a/src/cmd/compile/internal/gc/racewalk.go b/src/cmd/compile/internal/gc/racewalk.go index 1ad3b9b4226e0..c52bf1479b1f0 100644 --- a/src/cmd/compile/internal/gc/racewalk.go +++ b/src/cmd/compile/internal/gc/racewalk.go @@ -7,6 +7,7 @@ package gc import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" + "cmd/compile/internal/ssagen" "cmd/compile/internal/types" "cmd/internal/src" "cmd/internal/sys" @@ -25,7 +26,7 @@ func instrument(fn *ir.Func) { lno := base.Pos base.Pos = src.NoXPos - if thearch.LinkArch.Arch.Family != sys.AMD64 { + if ssagen.Arch.LinkArch.Arch.Family != sys.AMD64 { fn.Enter.Prepend(mkcall("racefuncenterfp", nil, nil)) fn.Exit.Append(mkcall("racefuncexit", nil, nil)) } else { diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go index 4ba0654aef27e..2b2178a8bd2f0 100644 --- a/src/cmd/compile/internal/gc/range.go +++ b/src/cmd/compile/internal/gc/range.go @@ -8,6 +8,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/reflectdata" + "cmd/compile/internal/ssagen" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/sys" @@ -15,7 +16,7 @@ import ( ) func cheapComputableIndex(width int64) bool { - switch thearch.LinkArch.Family { + switch ssagen.Arch.LinkArch.Family { // MIPS does not have R+R addressing // Arm64 may lack ability to generate this code in our assembler, // but the architecture supports it. diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 89baaf7eee9de..02a4c0a688332 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -8,24 +8,11 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/reflectdata" + "cmd/compile/internal/ssagen" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/src" "fmt" - "sync" -) - -// largeStack is info about a function whose stack frame is too large (rare). -type largeStack struct { - locals int64 - args int64 - callee int64 - pos src.XPos -} - -var ( - largeStackFramesMu sync.Mutex // protects largeStackFrames - largeStackFrames []largeStack ) // backingArrayPtrLen extracts the pointer and length from a slice or string. @@ -91,25 +78,25 @@ func calcHasCall(n ir.Node) bool { // so we ensure they are evaluated first. case ir.OADD, ir.OSUB, ir.OMUL: n := n.(*ir.BinaryExpr) - if thearch.SoftFloat && (types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) { + if ssagen.Arch.SoftFloat && (types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) { return true } return n.X.HasCall() || n.Y.HasCall() case ir.ONEG: n := n.(*ir.UnaryExpr) - if thearch.SoftFloat && (types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) { + if ssagen.Arch.SoftFloat && (types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) { return true } return n.X.HasCall() case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT: n := n.(*ir.BinaryExpr) - if thearch.SoftFloat && (types.IsFloat[n.X.Type().Kind()] || types.IsComplex[n.X.Type().Kind()]) { + if ssagen.Arch.SoftFloat && (types.IsFloat[n.X.Type().Kind()] || types.IsComplex[n.X.Type().Kind()]) { return true } return n.X.HasCall() || n.Y.HasCall() case ir.OCONV: n := n.(*ir.ConvExpr) - if thearch.SoftFloat && ((types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) || (types.IsFloat[n.X.Type().Kind()] || types.IsComplex[n.X.Type().Kind()])) { + if ssagen.Arch.SoftFloat && ((types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) || (types.IsFloat[n.X.Type().Kind()] || types.IsComplex[n.X.Type().Kind()])) { return true } return n.X.HasCall() diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 9b49b06c34acc..f86dbba2c9824 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -9,6 +9,7 @@ import ( "cmd/compile/internal/escape" "cmd/compile/internal/ir" "cmd/compile/internal/reflectdata" + "cmd/compile/internal/ssagen" "cmd/compile/internal/staticdata" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" @@ -977,7 +978,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { n.X = cheapexpr(n.X, init) // byteindex widens n.Left so that the multiplication doesn't overflow. index := ir.NewBinaryExpr(base.Pos, ir.OLSH, byteindex(n.X), ir.NewInt(3)) - if thearch.LinkArch.ByteOrder == binary.BigEndian { + if ssagen.Arch.LinkArch.ByteOrder == binary.BigEndian { index = ir.NewBinaryExpr(base.Pos, ir.OADD, index, ir.NewInt(7)) } xe := ir.NewIndexExpr(base.Pos, ir.Names.Staticuint64s, index) @@ -1675,7 +1676,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { return mkcall("stringtoslicerune", n.Type(), init, a, typecheck.Conv(n.X, types.Types[types.TSTRING])) case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT, ir.OPTRLIT: - if isStaticCompositeLiteral(n) && !canSSAType(n.Type()) { + if isStaticCompositeLiteral(n) && !ssagen.TypeOK(n.Type()) { n := n.(*ir.CompLitExpr) // not OPTRLIT // n can be directly represented in the read-only data section. // Make direct reference to the static data. See issue 12841. @@ -1739,11 +1740,11 @@ func markUsedIfaceMethod(n *ir.CallExpr) { // // If no such function is necessary, it returns (Txxx, Txxx). func rtconvfn(src, dst *types.Type) (param, result types.Kind) { - if thearch.SoftFloat { + if ssagen.Arch.SoftFloat { return types.Txxx, types.Txxx } - switch thearch.LinkArch.Family { + switch ssagen.Arch.LinkArch.Family { case sys.ARM, sys.MIPS: if src.IsFloat() { switch dst.Kind() { @@ -3229,7 +3230,7 @@ func walkcompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { unalignedLoad := canMergeLoads() if unalignedLoad { // Keep this low enough to generate less code than a function call. - maxcmpsize = 2 * int64(thearch.LinkArch.RegSize) + maxcmpsize = 2 * int64(ssagen.Arch.LinkArch.RegSize) } switch t.Kind() { @@ -3469,8 +3470,8 @@ func walkcompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { combine64bit := false if canCombineLoads { // Keep this low enough to generate less code than a function call. - maxRewriteLen = 2 * thearch.LinkArch.RegSize - combine64bit = thearch.LinkArch.RegSize >= 8 + maxRewriteLen = 2 * ssagen.Arch.LinkArch.RegSize + combine64bit = ssagen.Arch.LinkArch.RegSize >= 8 } var and ir.Op @@ -3909,12 +3910,12 @@ func wrapCall(n *ir.CallExpr, init *ir.Nodes) ir.Node { // larger, possibly unaligned, load. Note that currently the // optimizations must be able to handle little endian byte order. func canMergeLoads() bool { - switch thearch.LinkArch.Family { + switch ssagen.Arch.LinkArch.Family { case sys.ARM64, sys.AMD64, sys.I386, sys.S390X: return true case sys.PPC64: // Load combining only supported on ppc64le. - return thearch.LinkArch.ByteOrder == binary.LittleEndian + return ssagen.Arch.LinkArch.ByteOrder == binary.LittleEndian } return false } @@ -4032,3 +4033,7 @@ func appendWalkStmt(init *ir.Nodes, stmt ir.Node) { } init.Append(n) } + +// The max number of defers in a function using open-coded defers. We enforce this +// limit because the deferBits bitmask is currently a single byte (to minimize code size) +const maxOpenDefers = 8 diff --git a/src/cmd/compile/internal/mips/galign.go b/src/cmd/compile/internal/mips/galign.go index be40c16dde0b7..599163550bb29 100644 --- a/src/cmd/compile/internal/mips/galign.go +++ b/src/cmd/compile/internal/mips/galign.go @@ -5,13 +5,13 @@ package mips import ( - "cmd/compile/internal/gc" "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" "cmd/internal/obj/mips" "cmd/internal/objabi" ) -func Init(arch *gc.Arch) { +func Init(arch *ssagen.ArchInfo) { arch.LinkArch = &mips.Linkmips if objabi.GOARCH == "mipsle" { arch.LinkArch = &mips.Linkmipsle @@ -22,7 +22,7 @@ func Init(arch *gc.Arch) { arch.ZeroRange = zerorange arch.Ginsnop = ginsnop arch.Ginsnopdefer = ginsnop - arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {} + arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {} arch.SSAGenValue = ssaGenValue arch.SSAGenBlock = ssaGenBlock } diff --git a/src/cmd/compile/internal/mips/ssa.go b/src/cmd/compile/internal/mips/ssa.go index e46d87e17d24a..f1cdbd3241a65 100644 --- a/src/cmd/compile/internal/mips/ssa.go +++ b/src/cmd/compile/internal/mips/ssa.go @@ -8,10 +8,10 @@ import ( "math" "cmd/compile/internal/base" - "cmd/compile/internal/gc" "cmd/compile/internal/ir" "cmd/compile/internal/logopt" "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/mips" @@ -77,7 +77,7 @@ func storeByType(t *types.Type, r int16) obj.As { panic("bad store type") } -func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { +func ssaGenValue(s *ssagen.State, v *ssa.Value) { switch v.Op { case ssa.OpCopy, ssa.OpMIPSMOVWreg: t := v.Type @@ -123,7 +123,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { } r := v.Reg() p := s.Prog(loadByType(v.Type, r)) - gc.AddrAuto(&p.From, v.Args[0]) + ssagen.AddrAuto(&p.From, v.Args[0]) p.To.Type = obj.TYPE_REG p.To.Reg = r if isHILO(r) { @@ -153,7 +153,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(storeByType(v.Type, r)) p.From.Type = obj.TYPE_REG p.From.Reg = r - gc.AddrAuto(&p.To, v) + ssagen.AddrAuto(&p.To, v) case ssa.OpMIPSADD, ssa.OpMIPSSUB, ssa.OpMIPSAND, @@ -288,10 +288,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { v.Fatalf("aux is of unknown type %T", v.Aux) case *obj.LSym: wantreg = "SB" - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) case *ir.Name: wantreg = "SP" - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) case nil: // No sym, just MOVW $off(SP), R wantreg = "SP" @@ -312,7 +312,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpMIPSMOVBstore, @@ -325,7 +325,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Reg = v.Args[1].Reg() p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - gc.AddAux(&p.To, v) + ssagen.AddAux(&p.To, v) case ssa.OpMIPSMOVBstorezero, ssa.OpMIPSMOVHstorezero, ssa.OpMIPSMOVWstorezero: @@ -334,7 +334,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Reg = mips.REGZERO p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - gc.AddAux(&p.To, v) + ssagen.AddAux(&p.To, v) case ssa.OpMIPSMOVBreg, ssa.OpMIPSMOVBUreg, ssa.OpMIPSMOVHreg, @@ -492,13 +492,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN - p.To.Sym = gc.BoundsCheckFunc[v.AuxInt] + p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt] s.UseArgs(8) // space used in callee args area by assembly stubs case ssa.OpMIPSLoweredPanicExtendA, ssa.OpMIPSLoweredPanicExtendB, ssa.OpMIPSLoweredPanicExtendC: p := s.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN - p.To.Sym = gc.ExtendCheckFunc[v.AuxInt] + p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt] s.UseArgs(12) // space used in callee args area by assembly stubs case ssa.OpMIPSLoweredAtomicLoad8, ssa.OpMIPSLoweredAtomicLoad32: @@ -762,7 +762,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(mips.AMOVB) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = mips.REGTMP if logopt.Enabled() { @@ -793,7 +793,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case ssa.OpMIPSLoweredGetClosurePtr: // Closure pointer is R22 (mips.REGCTXT). - gc.CheckLoweredGetClosurePtr(v) + ssagen.CheckLoweredGetClosurePtr(v) case ssa.OpMIPSLoweredGetCallerSP: // caller's SP is FixedFrameSize below the address of the first arg p := s.Prog(mips.AMOVW) @@ -826,13 +826,13 @@ var blockJump = map[ssa.BlockKind]struct { ssa.BlockMIPSFPF: {mips.ABFPF, mips.ABFPT}, } -func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { +func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { switch b.Kind { case ssa.BlockPlain: if b.Succs[0].Block() != next { p := s.Prog(obj.AJMP) p.To.Type = obj.TYPE_BRANCH - s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) } case ssa.BlockDefer: // defer returns in R1: @@ -843,11 +843,11 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { p.From.Reg = mips.REGZERO p.Reg = mips.REG_R1 p.To.Type = obj.TYPE_BRANCH - s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()}) if b.Succs[0].Block() != next { p := s.Prog(obj.AJMP) p.To.Type = obj.TYPE_BRANCH - s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) } case ssa.BlockExit: case ssa.BlockRet: diff --git a/src/cmd/compile/internal/mips64/galign.go b/src/cmd/compile/internal/mips64/galign.go index 90c381a50b6df..fc0a34228c8f4 100644 --- a/src/cmd/compile/internal/mips64/galign.go +++ b/src/cmd/compile/internal/mips64/galign.go @@ -5,13 +5,13 @@ package mips64 import ( - "cmd/compile/internal/gc" "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" "cmd/internal/obj/mips" "cmd/internal/objabi" ) -func Init(arch *gc.Arch) { +func Init(arch *ssagen.ArchInfo) { arch.LinkArch = &mips.Linkmips64 if objabi.GOARCH == "mips64le" { arch.LinkArch = &mips.Linkmips64le @@ -23,7 +23,7 @@ func Init(arch *gc.Arch) { arch.Ginsnop = ginsnop arch.Ginsnopdefer = ginsnop - arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {} + arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {} arch.SSAGenValue = ssaGenValue arch.SSAGenBlock = ssaGenBlock } diff --git a/src/cmd/compile/internal/mips64/ssa.go b/src/cmd/compile/internal/mips64/ssa.go index 096e7048ce254..14cf7af143703 100644 --- a/src/cmd/compile/internal/mips64/ssa.go +++ b/src/cmd/compile/internal/mips64/ssa.go @@ -8,10 +8,10 @@ import ( "math" "cmd/compile/internal/base" - "cmd/compile/internal/gc" "cmd/compile/internal/ir" "cmd/compile/internal/logopt" "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/mips" @@ -85,7 +85,7 @@ func storeByType(t *types.Type, r int16) obj.As { panic("bad store type") } -func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { +func ssaGenValue(s *ssagen.State, v *ssa.Value) { switch v.Op { case ssa.OpCopy, ssa.OpMIPS64MOVVreg: if v.Type.IsMemory() { @@ -126,7 +126,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { } r := v.Reg() p := s.Prog(loadByType(v.Type, r)) - gc.AddrAuto(&p.From, v.Args[0]) + ssagen.AddrAuto(&p.From, v.Args[0]) p.To.Type = obj.TYPE_REG p.To.Reg = r if isHILO(r) { @@ -156,7 +156,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(storeByType(v.Type, r)) p.From.Type = obj.TYPE_REG p.From.Reg = r - gc.AddrAuto(&p.To, v) + ssagen.AddrAuto(&p.To, v) case ssa.OpMIPS64ADDV, ssa.OpMIPS64SUBV, ssa.OpMIPS64AND, @@ -262,10 +262,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { v.Fatalf("aux is of unknown type %T", v.Aux) case *obj.LSym: wantreg = "SB" - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) case *ir.Name: wantreg = "SP" - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) case nil: // No sym, just MOVV $off(SP), R wantreg = "SP" @@ -288,7 +288,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpMIPS64MOVBstore, @@ -302,7 +302,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Reg = v.Args[1].Reg() p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - gc.AddAux(&p.To, v) + ssagen.AddAux(&p.To, v) case ssa.OpMIPS64MOVBstorezero, ssa.OpMIPS64MOVHstorezero, ssa.OpMIPS64MOVWstorezero, @@ -312,7 +312,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Reg = mips.REGZERO p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - gc.AddAux(&p.To, v) + ssagen.AddAux(&p.To, v) case ssa.OpMIPS64MOVBreg, ssa.OpMIPS64MOVBUreg, ssa.OpMIPS64MOVHreg, @@ -502,7 +502,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN - p.To.Sym = gc.BoundsCheckFunc[v.AuxInt] + p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt] s.UseArgs(16) // space used in callee args area by assembly stubs case ssa.OpMIPS64LoweredAtomicLoad8, ssa.OpMIPS64LoweredAtomicLoad32, ssa.OpMIPS64LoweredAtomicLoad64: as := mips.AMOVV @@ -720,7 +720,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(mips.AMOVB) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = mips.REGTMP if logopt.Enabled() { @@ -754,7 +754,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p2.To.SetTarget(p4) case ssa.OpMIPS64LoweredGetClosurePtr: // Closure pointer is R22 (mips.REGCTXT). - gc.CheckLoweredGetClosurePtr(v) + ssagen.CheckLoweredGetClosurePtr(v) case ssa.OpMIPS64LoweredGetCallerSP: // caller's SP is FixedFrameSize below the address of the first arg p := s.Prog(mips.AMOVV) @@ -787,13 +787,13 @@ var blockJump = map[ssa.BlockKind]struct { ssa.BlockMIPS64FPF: {mips.ABFPF, mips.ABFPT}, } -func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { +func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { switch b.Kind { case ssa.BlockPlain: if b.Succs[0].Block() != next { p := s.Prog(obj.AJMP) p.To.Type = obj.TYPE_BRANCH - s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) } case ssa.BlockDefer: // defer returns in R1: @@ -804,11 +804,11 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { p.From.Reg = mips.REGZERO p.Reg = mips.REG_R1 p.To.Type = obj.TYPE_BRANCH - s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()}) if b.Succs[0].Block() != next { p := s.Prog(obj.AJMP) p.To.Type = obj.TYPE_BRANCH - s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) } case ssa.BlockExit: case ssa.BlockRet: diff --git a/src/cmd/compile/internal/ppc64/galign.go b/src/cmd/compile/internal/ppc64/galign.go index c8ef567dc3fd0..c72d1aa8348c4 100644 --- a/src/cmd/compile/internal/ppc64/galign.go +++ b/src/cmd/compile/internal/ppc64/galign.go @@ -5,12 +5,12 @@ package ppc64 import ( - "cmd/compile/internal/gc" + "cmd/compile/internal/ssagen" "cmd/internal/obj/ppc64" "cmd/internal/objabi" ) -func Init(arch *gc.Arch) { +func Init(arch *ssagen.ArchInfo) { arch.LinkArch = &ppc64.Linkppc64 if objabi.GOARCH == "ppc64le" { arch.LinkArch = &ppc64.Linkppc64le diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go index edcaad03ecc83..c85e110ed3584 100644 --- a/src/cmd/compile/internal/ppc64/ssa.go +++ b/src/cmd/compile/internal/ppc64/ssa.go @@ -6,10 +6,10 @@ package ppc64 import ( "cmd/compile/internal/base" - "cmd/compile/internal/gc" "cmd/compile/internal/ir" "cmd/compile/internal/logopt" "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/ppc64" @@ -19,7 +19,7 @@ import ( ) // markMoves marks any MOVXconst ops that need to avoid clobbering flags. -func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) { +func ssaMarkMoves(s *ssagen.State, b *ssa.Block) { // flive := b.FlagsLiveAtEnd // if b.Control != nil && b.Control.Type.IsFlags() { // flive = true @@ -101,7 +101,7 @@ func storeByType(t *types.Type) obj.As { panic("bad store type") } -func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { +func ssaGenValue(s *ssagen.State, v *ssa.Value) { switch v.Op { case ssa.OpCopy: t := v.Type @@ -469,7 +469,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case ssa.OpPPC64LoweredGetClosurePtr: // Closure pointer is R11 (already) - gc.CheckLoweredGetClosurePtr(v) + ssagen.CheckLoweredGetClosurePtr(v) case ssa.OpPPC64LoweredGetCallerSP: // caller's SP is FixedFrameSize below the address of the first arg @@ -491,7 +491,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case ssa.OpLoadReg: loadOp := loadByType(v.Type) p := s.Prog(loadOp) - gc.AddrAuto(&p.From, v.Args[0]) + ssagen.AddrAuto(&p.From, v.Args[0]) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() @@ -500,7 +500,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(storeOp) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[0].Reg() - gc.AddrAuto(&p.To, v) + ssagen.AddrAuto(&p.To, v) case ssa.OpPPC64DIVD: // For now, @@ -758,7 +758,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Reg = v.Args[0].Reg() p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) } @@ -819,7 +819,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(ppc64.AMOVD) p.From.Type = obj.TYPE_ADDR p.From.Reg = v.Args[0].Reg() - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() // Load go.string using 0 offset @@ -837,7 +837,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() @@ -871,7 +871,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Reg = ppc64.REGZERO p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - gc.AddAux(&p.To, v) + ssagen.AddAux(&p.To, v) case ssa.OpPPC64MOVDstore, ssa.OpPPC64MOVWstore, ssa.OpPPC64MOVHstore, ssa.OpPPC64MOVBstore, ssa.OpPPC64FMOVDstore, ssa.OpPPC64FMOVSstore: p := s.Prog(v.Op.Asm()) @@ -879,7 +879,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Reg = v.Args[1].Reg() p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - gc.AddAux(&p.To, v) + ssagen.AddAux(&p.To, v) case ssa.OpPPC64MOVDstoreidx, ssa.OpPPC64MOVWstoreidx, ssa.OpPPC64MOVHstoreidx, ssa.OpPPC64MOVBstoreidx, ssa.OpPPC64FMOVDstoreidx, ssa.OpPPC64FMOVSstoreidx, ssa.OpPPC64MOVDBRstoreidx, ssa.OpPPC64MOVWBRstoreidx, @@ -1809,7 +1809,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN - p.To.Sym = gc.BoundsCheckFunc[v.AuxInt] + p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt] s.UseArgs(16) // space used in callee args area by assembly stubs case ssa.OpPPC64LoweredNilCheck: @@ -1847,7 +1847,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(ppc64.AMOVBZ) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = ppc64.REGTMP } @@ -1893,7 +1893,7 @@ var blockJump = [...]struct { ssa.BlockPPC64FGT: {ppc64.ABGT, ppc64.ABLE, false, false}, } -func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { +func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { switch b.Kind { case ssa.BlockDefer: // defer returns in R3: @@ -1907,18 +1907,18 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { p = s.Prog(ppc64.ABNE) p.To.Type = obj.TYPE_BRANCH - s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()}) if b.Succs[0].Block() != next { p := s.Prog(obj.AJMP) p.To.Type = obj.TYPE_BRANCH - s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) } case ssa.BlockPlain: if b.Succs[0].Block() != next { p := s.Prog(obj.AJMP) p.To.Type = obj.TYPE_BRANCH - s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) } case ssa.BlockExit: case ssa.BlockRet: diff --git a/src/cmd/compile/internal/riscv64/galign.go b/src/cmd/compile/internal/riscv64/galign.go index 4db0fac52e42b..338248a7cf27a 100644 --- a/src/cmd/compile/internal/riscv64/galign.go +++ b/src/cmd/compile/internal/riscv64/galign.go @@ -5,11 +5,11 @@ package riscv64 import ( - "cmd/compile/internal/gc" + "cmd/compile/internal/ssagen" "cmd/internal/obj/riscv" ) -func Init(arch *gc.Arch) { +func Init(arch *ssagen.ArchInfo) { arch.LinkArch = &riscv.LinkRISCV64 arch.REGSP = riscv.REG_SP diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go index d08cebdcf5b76..70c29a4b7b343 100644 --- a/src/cmd/compile/internal/riscv64/ssa.go +++ b/src/cmd/compile/internal/riscv64/ssa.go @@ -6,9 +6,9 @@ package riscv64 import ( "cmd/compile/internal/base" - "cmd/compile/internal/gc" "cmd/compile/internal/ir" "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/riscv" @@ -180,9 +180,9 @@ func largestMove(alignment int64) (obj.As, int64) { // markMoves marks any MOVXconst ops that need to avoid clobbering flags. // RISC-V has no flags, so this is a no-op. -func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {} +func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {} -func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { +func ssaGenValue(s *ssagen.State, v *ssa.Value) { s.SetPos(v.Pos) switch v.Op { @@ -191,7 +191,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case ssa.OpArg: // input args need no code case ssa.OpPhi: - gc.CheckLoweredPhi(v) + ssagen.CheckLoweredPhi(v) case ssa.OpCopy, ssa.OpRISCV64MOVconvert, ssa.OpRISCV64MOVDreg: if v.Type.IsMemory() { return @@ -221,7 +221,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { return } p := s.Prog(loadByType(v.Type)) - gc.AddrAuto(&p.From, v.Args[0]) + ssagen.AddrAuto(&p.From, v.Args[0]) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpStoreReg: @@ -232,7 +232,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(storeByType(v.Type)) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[0].Reg() - gc.AddrAuto(&p.To, v) + ssagen.AddrAuto(&p.To, v) case ssa.OpSP, ssa.OpSB, ssa.OpGetG: // nothing to do case ssa.OpRISCV64MOVBreg, ssa.OpRISCV64MOVHreg, ssa.OpRISCV64MOVWreg, @@ -323,10 +323,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { v.Fatalf("aux is of unknown type %T", v.Aux) case *obj.LSym: wantreg = "SB" - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) case *ir.Name: wantreg = "SP" - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) case nil: // No sym, just MOVW $off(SP), R wantreg = "SP" @@ -342,7 +342,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpRISCV64MOVBstore, ssa.OpRISCV64MOVHstore, ssa.OpRISCV64MOVWstore, ssa.OpRISCV64MOVDstore, @@ -352,14 +352,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Reg = v.Args[1].Reg() p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - gc.AddAux(&p.To, v) + ssagen.AddAux(&p.To, v) case ssa.OpRISCV64MOVBstorezero, ssa.OpRISCV64MOVHstorezero, ssa.OpRISCV64MOVWstorezero, ssa.OpRISCV64MOVDstorezero: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = riscv.REG_ZERO p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - gc.AddAux(&p.To, v) + ssagen.AddAux(&p.To, v) case ssa.OpRISCV64SEQZ, ssa.OpRISCV64SNEZ: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG @@ -377,7 +377,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN - p.To.Sym = gc.BoundsCheckFunc[v.AuxInt] + p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt] s.UseArgs(16) // space used in callee args area by assembly stubs case ssa.OpRISCV64LoweredAtomicLoad8: @@ -585,7 +585,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(riscv.AMOVB) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = riscv.REG_ZERO if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos == 1 in generated wrappers @@ -594,7 +594,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case ssa.OpRISCV64LoweredGetClosurePtr: // Closure pointer is S4 (riscv.REG_CTXT). - gc.CheckLoweredGetClosurePtr(v) + ssagen.CheckLoweredGetClosurePtr(v) case ssa.OpRISCV64LoweredGetCallerSP: // caller's SP is FixedFrameSize below the address of the first arg @@ -644,7 +644,7 @@ var blockBranch = [...]obj.As{ ssa.BlockRISCV64BNEZ: riscv.ABNEZ, } -func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { +func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { s.SetPos(b.Pos) switch b.Kind { @@ -657,17 +657,17 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { p.From.Type = obj.TYPE_REG p.From.Reg = riscv.REG_ZERO p.Reg = riscv.REG_A0 - s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()}) if b.Succs[0].Block() != next { p := s.Prog(obj.AJMP) p.To.Type = obj.TYPE_BRANCH - s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) } case ssa.BlockPlain: if b.Succs[0].Block() != next { p := s.Prog(obj.AJMP) p.To.Type = obj.TYPE_BRANCH - s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) } case ssa.BlockExit: case ssa.BlockRet: diff --git a/src/cmd/compile/internal/s390x/galign.go b/src/cmd/compile/internal/s390x/galign.go index cb68fd36c14bf..b004a2db0a39b 100644 --- a/src/cmd/compile/internal/s390x/galign.go +++ b/src/cmd/compile/internal/s390x/galign.go @@ -5,11 +5,11 @@ package s390x import ( - "cmd/compile/internal/gc" + "cmd/compile/internal/ssagen" "cmd/internal/obj/s390x" ) -func Init(arch *gc.Arch) { +func Init(arch *ssagen.ArchInfo) { arch.LinkArch = &s390x.Links390x arch.REGSP = s390x.REGSP arch.MAXWIDTH = 1 << 50 diff --git a/src/cmd/compile/internal/s390x/ssa.go b/src/cmd/compile/internal/s390x/ssa.go index dc01401348be9..d4c7a286e2688 100644 --- a/src/cmd/compile/internal/s390x/ssa.go +++ b/src/cmd/compile/internal/s390x/ssa.go @@ -8,16 +8,16 @@ import ( "math" "cmd/compile/internal/base" - "cmd/compile/internal/gc" "cmd/compile/internal/logopt" "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/s390x" ) // markMoves marks any MOVXconst ops that need to avoid clobbering flags. -func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) { +func ssaMarkMoves(s *ssagen.State, b *ssa.Block) { flive := b.FlagsLiveAtEnd for _, c := range b.ControlValues() { flive = c.Type.IsFlags() || flive @@ -135,7 +135,7 @@ func moveByType(t *types.Type) obj.As { // dest := dest(To) op src(From) // and also returns the created obj.Prog so it // may be further adjusted (offset, scale, etc). -func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog { +func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog { p := s.Prog(op) p.From.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG @@ -148,7 +148,7 @@ func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog { // dest := src(From) op off // and also returns the created obj.Prog so it // may be further adjusted (offset, scale, etc). -func opregregimm(s *gc.SSAGenState, op obj.As, dest, src int16, off int64) *obj.Prog { +func opregregimm(s *ssagen.State, op obj.As, dest, src int16, off int64) *obj.Prog { p := s.Prog(op) p.From.Type = obj.TYPE_CONST p.From.Offset = off @@ -158,7 +158,7 @@ func opregregimm(s *gc.SSAGenState, op obj.As, dest, src int16, off int64) *obj. return p } -func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { +func ssaGenValue(s *ssagen.State, v *ssa.Value) { switch v.Op { case ssa.OpS390XSLD, ssa.OpS390XSLW, ssa.OpS390XSRD, ssa.OpS390XSRW, @@ -395,14 +395,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Type = obj.TYPE_ADDR p.From.Reg = r p.From.Index = i - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpS390XMOVDaddr: p := s.Prog(s390x.AMOVD) p.From.Type = obj.TYPE_ADDR p.From.Reg = v.Args[0].Reg() - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpS390XCMP, ssa.OpS390XCMPW, ssa.OpS390XCMPU, ssa.OpS390XCMPWU: @@ -448,7 +448,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[1].Reg() - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = r case ssa.OpS390XMOVDload, @@ -459,7 +459,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpS390XMOVBZloadidx, ssa.OpS390XMOVHZloadidx, ssa.OpS390XMOVWZloadidx, @@ -476,7 +476,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Reg = r p.From.Scale = 1 p.From.Index = i - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpS390XMOVBstore, ssa.OpS390XMOVHstore, ssa.OpS390XMOVWstore, ssa.OpS390XMOVDstore, @@ -487,7 +487,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Reg = v.Args[1].Reg() p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - gc.AddAux(&p.To, v) + ssagen.AddAux(&p.To, v) case ssa.OpS390XMOVBstoreidx, ssa.OpS390XMOVHstoreidx, ssa.OpS390XMOVWstoreidx, ssa.OpS390XMOVDstoreidx, ssa.OpS390XMOVHBRstoreidx, ssa.OpS390XMOVWBRstoreidx, ssa.OpS390XMOVDBRstoreidx, ssa.OpS390XFMOVSstoreidx, ssa.OpS390XFMOVDstoreidx: @@ -503,7 +503,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Reg = r p.To.Scale = 1 p.To.Index = i - gc.AddAux(&p.To, v) + ssagen.AddAux(&p.To, v) case ssa.OpS390XMOVDstoreconst, ssa.OpS390XMOVWstoreconst, ssa.OpS390XMOVHstoreconst, ssa.OpS390XMOVBstoreconst: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST @@ -511,7 +511,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Offset = sc.Val() p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - gc.AddAux2(&p.To, v, sc.Off()) + ssagen.AddAux2(&p.To, v, sc.Off()) case ssa.OpS390XMOVBreg, ssa.OpS390XMOVHreg, ssa.OpS390XMOVWreg, ssa.OpS390XMOVBZreg, ssa.OpS390XMOVHZreg, ssa.OpS390XMOVWZreg, ssa.OpS390XLDGR, ssa.OpS390XLGDR, @@ -530,7 +530,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Offset = sc.Val() p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - gc.AddAux2(&p.To, v, sc.Off()) + ssagen.AddAux2(&p.To, v, sc.Off()) case ssa.OpCopy: if v.Type.IsMemory() { return @@ -546,7 +546,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { return } p := s.Prog(loadByType(v.Type)) - gc.AddrAuto(&p.From, v.Args[0]) + ssagen.AddrAuto(&p.From, v.Args[0]) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpStoreReg: @@ -557,10 +557,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(storeByType(v.Type)) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[0].Reg() - gc.AddrAuto(&p.To, v) + ssagen.AddrAuto(&p.To, v) case ssa.OpS390XLoweredGetClosurePtr: // Closure pointer is R12 (already) - gc.CheckLoweredGetClosurePtr(v) + ssagen.CheckLoweredGetClosurePtr(v) case ssa.OpS390XLoweredRound32F, ssa.OpS390XLoweredRound64F: // input is already rounded case ssa.OpS390XLoweredGetG: @@ -593,7 +593,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN - p.To.Sym = gc.BoundsCheckFunc[v.AuxInt] + p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt] s.UseArgs(16) // space used in callee args area by assembly stubs case ssa.OpS390XFLOGR, ssa.OpS390XPOPCNT, ssa.OpS390XNEG, ssa.OpS390XNEGW, @@ -637,7 +637,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(s390x.AMOVBZ) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = s390x.REGTMP if logopt.Enabled() { @@ -672,7 +672,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.Reg = v.Args[len(v.Args)-2].Reg() p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - gc.AddAux(&p.To, v) + ssagen.AddAux(&p.To, v) case ssa.OpS390XLoweredMove: // Inputs must be valid pointers to memory, // so adjust arg0 and arg1 as part of the expansion. @@ -764,7 +764,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg0() case ssa.OpS390XMOVBatomicstore, ssa.OpS390XMOVWatomicstore, ssa.OpS390XMOVDatomicstore: @@ -773,7 +773,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Reg = v.Args[1].Reg() p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - gc.AddAux(&p.To, v) + ssagen.AddAux(&p.To, v) case ssa.OpS390XLAN, ssa.OpS390XLAO: // LA(N|O) Ry, TMP, 0(Rx) op := s.Prog(v.Op.Asm()) @@ -808,7 +808,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Reg = v.Args[1].Reg() p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - gc.AddAux(&p.To, v) + ssagen.AddAux(&p.To, v) case ssa.OpS390XLoweredAtomicCas32, ssa.OpS390XLoweredAtomicCas64: // Convert the flags output of CS{,G} into a bool. // CS{,G} arg1, arg2, arg0 @@ -824,7 +824,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { cs.Reg = v.Args[2].Reg() // new cs.To.Type = obj.TYPE_MEM cs.To.Reg = v.Args[0].Reg() - gc.AddAux(&cs.To, v) + ssagen.AddAux(&cs.To, v) // MOVD $0, ret movd := s.Prog(s390x.AMOVD) @@ -859,7 +859,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { load.From.Reg = v.Args[0].Reg() load.To.Type = obj.TYPE_REG load.To.Reg = v.Reg0() - gc.AddAux(&load.From, v) + ssagen.AddAux(&load.From, v) // CS{,G} ret, arg1, arg0 cs := s.Prog(v.Op.Asm()) @@ -868,7 +868,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { cs.Reg = v.Args[1].Reg() // new cs.To.Type = obj.TYPE_MEM cs.To.Reg = v.Args[0].Reg() - gc.AddAux(&cs.To, v) + ssagen.AddAux(&cs.To, v) // BNE cs bne := s.Prog(s390x.ABNE) @@ -908,14 +908,14 @@ func blockAsm(b *ssa.Block) obj.As { panic("unreachable") } -func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { +func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { // Handle generic blocks first. switch b.Kind { case ssa.BlockPlain: if b.Succs[0].Block() != next { p := s.Prog(s390x.ABR) p.To.Type = obj.TYPE_BRANCH - s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) } return case ssa.BlockDefer: diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/ssagen/abi.go similarity index 69% rename from src/cmd/compile/internal/gc/gsubr.go rename to src/cmd/compile/internal/ssagen/abi.go index 81f7956d2e87a..af08fcb7c3b72 100644 --- a/src/cmd/compile/internal/gc/gsubr.go +++ b/src/cmd/compile/internal/ssagen/abi.go @@ -1,36 +1,18 @@ -// Derived from Inferno utils/6c/txt.c -// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6c/txt.c -// -// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. -// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) -// Portions Copyright © 1997-1999 Vita Nuova Limited -// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) -// Portions Copyright © 2004,2006 Bruce Ellis -// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) -// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others -// Portions Copyright © 2009 The Go Authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. -package gc +//go:generate go run mkbuiltin.go + +package ssagen import ( + "fmt" + "io/ioutil" + "log" + "os" + "strings" + "cmd/compile/internal/base" "cmd/compile/internal/escape" "cmd/compile/internal/ir" @@ -38,10 +20,211 @@ import ( "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/objabi" - "fmt" - "os" ) +// useNewABIWrapGen returns TRUE if the compiler should generate an +// ABI wrapper for the function 'f'. +func useABIWrapGen(f *ir.Func) bool { + if !base.Flag.ABIWrap { + return false + } + + // Support limit option for bisecting. + if base.Flag.ABIWrapLimit == 1 { + return false + } + if base.Flag.ABIWrapLimit < 1 { + return true + } + base.Flag.ABIWrapLimit-- + if base.Debug.ABIWrap != 0 && base.Flag.ABIWrapLimit == 1 { + fmt.Fprintf(os.Stderr, "=-= limit reached after new wrapper for %s\n", + f.LSym.Name) + } + + return true +} + +// symabiDefs and symabiRefs record the defined and referenced ABIs of +// symbols required by non-Go code. These are keyed by link symbol +// name, where the local package prefix is always `"".` +var symabiDefs, symabiRefs map[string]obj.ABI + +func CgoSymABIs() { + // The linker expects an ABI0 wrapper for all cgo-exported + // functions. + for _, prag := range typecheck.Target.CgoPragmas { + switch prag[0] { + case "cgo_export_static", "cgo_export_dynamic": + if symabiRefs == nil { + symabiRefs = make(map[string]obj.ABI) + } + symabiRefs[prag[1]] = obj.ABI0 + } + } +} + +// ReadSymABIs reads a symabis file that specifies definitions and +// references of text symbols by ABI. +// +// The symabis format is a set of lines, where each line is a sequence +// of whitespace-separated fields. The first field is a verb and is +// either "def" for defining a symbol ABI or "ref" for referencing a +// symbol using an ABI. For both "def" and "ref", the second field is +// the symbol name and the third field is the ABI name, as one of the +// named cmd/internal/obj.ABI constants. +func ReadSymABIs(file, myimportpath string) { + data, err := ioutil.ReadFile(file) + if err != nil { + log.Fatalf("-symabis: %v", err) + } + + symabiDefs = make(map[string]obj.ABI) + symabiRefs = make(map[string]obj.ABI) + + localPrefix := "" + if myimportpath != "" { + // Symbols in this package may be written either as + // "".X or with the package's import path already in + // the symbol. + localPrefix = objabi.PathToPrefix(myimportpath) + "." + } + + for lineNum, line := range strings.Split(string(data), "\n") { + lineNum++ // 1-based + line = strings.TrimSpace(line) + if line == "" || strings.HasPrefix(line, "#") { + continue + } + + parts := strings.Fields(line) + switch parts[0] { + case "def", "ref": + // Parse line. + if len(parts) != 3 { + log.Fatalf(`%s:%d: invalid symabi: syntax is "%s sym abi"`, file, lineNum, parts[0]) + } + sym, abistr := parts[1], parts[2] + abi, valid := obj.ParseABI(abistr) + if !valid { + log.Fatalf(`%s:%d: invalid symabi: unknown abi "%s"`, file, lineNum, abistr) + } + + // If the symbol is already prefixed with + // myimportpath, rewrite it to start with "" + // so it matches the compiler's internal + // symbol names. + if localPrefix != "" && strings.HasPrefix(sym, localPrefix) { + sym = `"".` + sym[len(localPrefix):] + } + + // Record for later. + if parts[0] == "def" { + symabiDefs[sym] = abi + } else { + symabiRefs[sym] = abi + } + default: + log.Fatalf(`%s:%d: invalid symabi type "%s"`, file, lineNum, parts[0]) + } + } +} + +// InitLSym defines f's obj.LSym and initializes it based on the +// properties of f. This includes setting the symbol flags and ABI and +// creating and initializing related DWARF symbols. +// +// InitLSym must be called exactly once per function and must be +// called for both functions with bodies and functions without bodies. +// For body-less functions, we only create the LSym; for functions +// with bodies call a helper to setup up / populate the LSym. +func InitLSym(f *ir.Func, hasBody bool) { + // FIXME: for new-style ABI wrappers, we set up the lsym at the + // point the wrapper is created. + if f.LSym != nil && base.Flag.ABIWrap { + return + } + selectLSym(f, hasBody) + if hasBody { + setupTextLSym(f, 0) + } +} + +// selectLSym sets up the LSym for a given function, and +// makes calls to helpers to create ABI wrappers if needed. +func selectLSym(f *ir.Func, hasBody bool) { + if f.LSym != nil { + base.Fatalf("Func.initLSym called twice") + } + + if nam := f.Nname; !ir.IsBlank(nam) { + + var wrapperABI obj.ABI + needABIWrapper := false + defABI, hasDefABI := symabiDefs[nam.Sym().LinksymName()] + if hasDefABI && defABI == obj.ABI0 { + // Symbol is defined as ABI0. Create an + // Internal -> ABI0 wrapper. + f.LSym = nam.Sym().LinksymABI0() + needABIWrapper, wrapperABI = true, obj.ABIInternal + } else { + f.LSym = nam.Sym().Linksym() + // No ABI override. Check that the symbol is + // using the expected ABI. + want := obj.ABIInternal + if f.LSym.ABI() != want { + base.Fatalf("function symbol %s has the wrong ABI %v, expected %v", f.LSym.Name, f.LSym.ABI(), want) + } + } + if f.Pragma&ir.Systemstack != 0 { + f.LSym.Set(obj.AttrCFunc, true) + } + + isLinknameExported := nam.Sym().Linkname != "" && (hasBody || hasDefABI) + if abi, ok := symabiRefs[f.LSym.Name]; (ok && abi == obj.ABI0) || isLinknameExported { + // Either 1) this symbol is definitely + // referenced as ABI0 from this package; or 2) + // this symbol is defined in this package but + // given a linkname, indicating that it may be + // referenced from another package. Create an + // ABI0 -> Internal wrapper so it can be + // called as ABI0. In case 2, it's important + // that we know it's defined in this package + // since other packages may "pull" symbols + // using linkname and we don't want to create + // duplicate ABI wrappers. + if f.LSym.ABI() != obj.ABI0 { + needABIWrapper, wrapperABI = true, obj.ABI0 + } + } + + if needABIWrapper { + if !useABIWrapGen(f) { + // Fallback: use alias instead. FIXME. + + // These LSyms have the same name as the + // native function, so we create them directly + // rather than looking them up. The uniqueness + // of f.lsym ensures uniqueness of asym. + asym := &obj.LSym{ + Name: f.LSym.Name, + Type: objabi.SABIALIAS, + R: []obj.Reloc{{Sym: f.LSym}}, // 0 size, so "informational" + } + asym.SetABI(wrapperABI) + asym.Set(obj.AttrDuplicateOK, true) + base.Ctxt.ABIAliases = append(base.Ctxt.ABIAliases, asym) + } else { + if base.Debug.ABIWrap != 0 { + fmt.Fprintf(os.Stderr, "=-= %v to %v wrapper for %s.%s\n", + wrapperABI, 1-wrapperABI, types.LocalPkg.Path, f.LSym.Name) + } + makeABIWrapper(f, wrapperABI) + } + } + } +} + // makeABIWrapper creates a new function that wraps a cross-ABI call // to "f". The wrapper is marked as an ABIWRAPPER. func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { @@ -152,101 +335,6 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { ir.CurFunc = savedcurfn } -// initLSym defines f's obj.LSym and initializes it based on the -// properties of f. This includes setting the symbol flags and ABI and -// creating and initializing related DWARF symbols. -// -// initLSym must be called exactly once per function and must be -// called for both functions with bodies and functions without bodies. -// For body-less functions, we only create the LSym; for functions -// with bodies call a helper to setup up / populate the LSym. -func initLSym(f *ir.Func, hasBody bool) { - // FIXME: for new-style ABI wrappers, we set up the lsym at the - // point the wrapper is created. - if f.LSym != nil && base.Flag.ABIWrap { - return - } - selectLSym(f, hasBody) - if hasBody { - setupTextLSym(f, 0) - } -} - -// selectLSym sets up the LSym for a given function, and -// makes calls to helpers to create ABI wrappers if needed. -func selectLSym(f *ir.Func, hasBody bool) { - if f.LSym != nil { - base.Fatalf("Func.initLSym called twice") - } - - if nam := f.Nname; !ir.IsBlank(nam) { - - var wrapperABI obj.ABI - needABIWrapper := false - defABI, hasDefABI := symabiDefs[nam.Sym().LinksymName()] - if hasDefABI && defABI == obj.ABI0 { - // Symbol is defined as ABI0. Create an - // Internal -> ABI0 wrapper. - f.LSym = nam.Sym().LinksymABI0() - needABIWrapper, wrapperABI = true, obj.ABIInternal - } else { - f.LSym = nam.Sym().Linksym() - // No ABI override. Check that the symbol is - // using the expected ABI. - want := obj.ABIInternal - if f.LSym.ABI() != want { - base.Fatalf("function symbol %s has the wrong ABI %v, expected %v", f.LSym.Name, f.LSym.ABI(), want) - } - } - if f.Pragma&ir.Systemstack != 0 { - f.LSym.Set(obj.AttrCFunc, true) - } - - isLinknameExported := nam.Sym().Linkname != "" && (hasBody || hasDefABI) - if abi, ok := symabiRefs[f.LSym.Name]; (ok && abi == obj.ABI0) || isLinknameExported { - // Either 1) this symbol is definitely - // referenced as ABI0 from this package; or 2) - // this symbol is defined in this package but - // given a linkname, indicating that it may be - // referenced from another package. Create an - // ABI0 -> Internal wrapper so it can be - // called as ABI0. In case 2, it's important - // that we know it's defined in this package - // since other packages may "pull" symbols - // using linkname and we don't want to create - // duplicate ABI wrappers. - if f.LSym.ABI() != obj.ABI0 { - needABIWrapper, wrapperABI = true, obj.ABI0 - } - } - - if needABIWrapper { - if !useABIWrapGen(f) { - // Fallback: use alias instead. FIXME. - - // These LSyms have the same name as the - // native function, so we create them directly - // rather than looking them up. The uniqueness - // of f.lsym ensures uniqueness of asym. - asym := &obj.LSym{ - Name: f.LSym.Name, - Type: objabi.SABIALIAS, - R: []obj.Reloc{{Sym: f.LSym}}, // 0 size, so "informational" - } - asym.SetABI(wrapperABI) - asym.Set(obj.AttrDuplicateOK, true) - base.Ctxt.ABIAliases = append(base.Ctxt.ABIAliases, asym) - } else { - if base.Debug.ABIWrap != 0 { - fmt.Fprintf(os.Stderr, "=-= %v to %v wrapper for %s.%s\n", - wrapperABI, 1-wrapperABI, types.LocalPkg.Path, f.LSym.Name) - } - makeABIWrapper(f, wrapperABI) - } - } - } -} - // setupTextLsym initializes the LSym for a with-body text symbol. func setupTextLSym(f *ir.Func, flag int) { if f.Dupok() { diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/ssagen/arch.go similarity index 68% rename from src/cmd/compile/internal/gc/go.go rename to src/cmd/compile/internal/ssagen/arch.go index ba838a5ff528a..cc50ab36b5dd6 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/ssagen/arch.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package ssagen import ( "cmd/compile/internal/objw" @@ -10,11 +10,11 @@ import ( "cmd/internal/obj" ) -var pragcgobuf [][]string +var Arch ArchInfo // interface to back end -type Arch struct { +type ArchInfo struct { LinkArch *obj.LinkArch REGSP int @@ -31,22 +31,12 @@ type Arch struct { Ginsnopdefer func(*objw.Progs) *obj.Prog // special ginsnop for deferreturn // SSAMarkMoves marks any MOVXconst ops that need to avoid clobbering flags. - SSAMarkMoves func(*SSAGenState, *ssa.Block) + SSAMarkMoves func(*State, *ssa.Block) // SSAGenValue emits Prog(s) for the Value. - SSAGenValue func(*SSAGenState, *ssa.Value) + SSAGenValue func(*State, *ssa.Value) // SSAGenBlock emits end-of-block Progs. SSAGenValue should be called // for all values in the block before SSAGenBlock. - SSAGenBlock func(s *SSAGenState, b, next *ssa.Block) + SSAGenBlock func(s *State, b, next *ssa.Block) } - -var thearch Arch - -var ( - BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym - ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym -) - -// GCWriteBarrierReg maps from registers to gcWriteBarrier implementation LSyms. -var GCWriteBarrierReg map[int16]*obj.LSym diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/ssagen/nowb.go similarity index 99% rename from src/cmd/compile/internal/gc/dcl.go rename to src/cmd/compile/internal/ssagen/nowb.go index 7b2bf5b606a9c..7b2e68c8e79ee 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/ssagen/nowb.go @@ -2,17 +2,18 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package ssagen import ( "bytes" + "fmt" + "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/src" - "fmt" ) func EnableNoWriteBarrierRecCheck() { diff --git a/src/cmd/compile/internal/ssagen/pgen.go b/src/cmd/compile/internal/ssagen/pgen.go new file mode 100644 index 0000000000000..bc6be20d86835 --- /dev/null +++ b/src/cmd/compile/internal/ssagen/pgen.go @@ -0,0 +1,279 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssagen + +import ( + "internal/race" + "math/rand" + "sort" + "sync" + "time" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/objw" + "cmd/compile/internal/ssa" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/objabi" + "cmd/internal/src" + "cmd/internal/sys" +) + +// cmpstackvarlt reports whether the stack variable a sorts before b. +// +// Sort the list of stack variables. Autos after anything else, +// within autos, unused after used, within used, things with +// pointers first, zeroed things first, and then decreasing size. +// Because autos are laid out in decreasing addresses +// on the stack, pointers first, zeroed things first and decreasing size +// really means, in memory, things with pointers needing zeroing at +// the top of the stack and increasing in size. +// Non-autos sort on offset. +func cmpstackvarlt(a, b *ir.Name) bool { + if (a.Class_ == ir.PAUTO) != (b.Class_ == ir.PAUTO) { + return b.Class_ == ir.PAUTO + } + + if a.Class_ != ir.PAUTO { + return a.FrameOffset() < b.FrameOffset() + } + + if a.Used() != b.Used() { + return a.Used() + } + + ap := a.Type().HasPointers() + bp := b.Type().HasPointers() + if ap != bp { + return ap + } + + ap = a.Needzero() + bp = b.Needzero() + if ap != bp { + return ap + } + + if a.Type().Width != b.Type().Width { + return a.Type().Width > b.Type().Width + } + + return a.Sym().Name < b.Sym().Name +} + +// byStackvar implements sort.Interface for []*Node using cmpstackvarlt. +type byStackVar []*ir.Name + +func (s byStackVar) Len() int { return len(s) } +func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) } +func (s byStackVar) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func (s *ssafn) AllocFrame(f *ssa.Func) { + s.stksize = 0 + s.stkptrsize = 0 + fn := s.curfn + + // Mark the PAUTO's unused. + for _, ln := range fn.Dcl { + if ln.Class_ == ir.PAUTO { + ln.SetUsed(false) + } + } + + for _, l := range f.RegAlloc { + if ls, ok := l.(ssa.LocalSlot); ok { + ls.N.Name().SetUsed(true) + } + } + + scratchUsed := false + for _, b := range f.Blocks { + for _, v := range b.Values { + if n, ok := v.Aux.(*ir.Name); ok { + switch n.Class_ { + case ir.PPARAM, ir.PPARAMOUT: + // Don't modify nodfp; it is a global. + if n != ir.RegFP { + n.Name().SetUsed(true) + } + case ir.PAUTO: + n.Name().SetUsed(true) + } + } + if !scratchUsed { + scratchUsed = v.Op.UsesScratch() + } + + } + } + + if f.Config.NeedsFpScratch && scratchUsed { + s.scratchFpMem = typecheck.TempAt(src.NoXPos, s.curfn, types.Types[types.TUINT64]) + } + + sort.Sort(byStackVar(fn.Dcl)) + + // Reassign stack offsets of the locals that are used. + lastHasPtr := false + for i, n := range fn.Dcl { + if n.Op() != ir.ONAME || n.Class_ != ir.PAUTO { + continue + } + if !n.Used() { + fn.Dcl = fn.Dcl[:i] + break + } + + types.CalcSize(n.Type()) + w := n.Type().Width + if w >= types.MaxWidth || w < 0 { + base.Fatalf("bad width") + } + if w == 0 && lastHasPtr { + // Pad between a pointer-containing object and a zero-sized object. + // This prevents a pointer to the zero-sized object from being interpreted + // as a pointer to the pointer-containing object (and causing it + // to be scanned when it shouldn't be). See issue 24993. + w = 1 + } + s.stksize += w + s.stksize = types.Rnd(s.stksize, int64(n.Type().Align)) + if n.Type().HasPointers() { + s.stkptrsize = s.stksize + lastHasPtr = true + } else { + lastHasPtr = false + } + if Arch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) { + s.stksize = types.Rnd(s.stksize, int64(types.PtrSize)) + } + n.SetFrameOffset(-s.stksize) + } + + s.stksize = types.Rnd(s.stksize, int64(types.RegSize)) + s.stkptrsize = types.Rnd(s.stkptrsize, int64(types.RegSize)) +} + +const maxStackSize = 1 << 30 + +// Compile builds an SSA backend function, +// uses it to generate a plist, +// and flushes that plist to machine code. +// worker indicates which of the backend workers is doing the processing. +func Compile(fn *ir.Func, worker int) { + f := buildssa(fn, worker) + // Note: check arg size to fix issue 25507. + if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type().ArgWidth() >= maxStackSize { + largeStackFramesMu.Lock() + largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: fn.Type().ArgWidth(), pos: fn.Pos()}) + largeStackFramesMu.Unlock() + return + } + pp := objw.NewProgs(fn, worker) + defer pp.Free() + genssa(f, pp) + // Check frame size again. + // The check above included only the space needed for local variables. + // After genssa, the space needed includes local variables and the callee arg region. + // We must do this check prior to calling pp.Flush. + // If there are any oversized stack frames, + // the assembler may emit inscrutable complaints about invalid instructions. + if pp.Text.To.Offset >= maxStackSize { + largeStackFramesMu.Lock() + locals := f.Frontend().(*ssafn).stksize + largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: fn.Type().ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos()}) + largeStackFramesMu.Unlock() + return + } + + pp.Flush() // assemble, fill in boilerplate, etc. + // fieldtrack must be called after pp.Flush. See issue 20014. + fieldtrack(pp.Text.From.Sym, fn.FieldTrack) +} + +func init() { + if race.Enabled { + rand.Seed(time.Now().UnixNano()) + } +} + +// StackOffset returns the stack location of a LocalSlot relative to the +// stack pointer, suitable for use in a DWARF location entry. This has nothing +// to do with its offset in the user variable. +func StackOffset(slot ssa.LocalSlot) int32 { + n := slot.N + var off int64 + switch n.Class_ { + case ir.PAUTO: + off = n.FrameOffset() + if base.Ctxt.FixedFrameSize() == 0 { + off -= int64(types.PtrSize) + } + if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" { + // There is a word space for FP on ARM64 even if the frame pointer is disabled + off -= int64(types.PtrSize) + } + case ir.PPARAM, ir.PPARAMOUT: + off = n.FrameOffset() + base.Ctxt.FixedFrameSize() + } + return int32(off + slot.Off) +} + +// fieldtrack adds R_USEFIELD relocations to fnsym to record any +// struct fields that it used. +func fieldtrack(fnsym *obj.LSym, tracked map[*types.Sym]struct{}) { + if fnsym == nil { + return + } + if objabi.Fieldtrack_enabled == 0 || len(tracked) == 0 { + return + } + + trackSyms := make([]*types.Sym, 0, len(tracked)) + for sym := range tracked { + trackSyms = append(trackSyms, sym) + } + sort.Sort(symByName(trackSyms)) + for _, sym := range trackSyms { + r := obj.Addrel(fnsym) + r.Sym = sym.Linksym() + r.Type = objabi.R_USEFIELD + } +} + +type symByName []*types.Sym + +func (a symByName) Len() int { return len(a) } +func (a symByName) Less(i, j int) bool { return a[i].Name < a[j].Name } +func (a symByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// largeStack is info about a function whose stack frame is too large (rare). +type largeStack struct { + locals int64 + args int64 + callee int64 + pos src.XPos +} + +var ( + largeStackFramesMu sync.Mutex // protects largeStackFrames + largeStackFrames []largeStack +) + +func CheckLargeStacks() { + // Check whether any of the functions we have compiled have gigantic stack frames. + sort.Slice(largeStackFrames, func(i, j int) bool { + return largeStackFrames[i].pos.Before(largeStackFrames[j].pos) + }) + for _, large := range largeStackFrames { + if large.callee != 0 { + base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args + %d MB callee", large.locals>>20, large.args>>20, large.callee>>20) + } else { + base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args", large.locals>>20, large.args>>20) + } + } +} diff --git a/src/cmd/compile/internal/gc/pgen_test.go b/src/cmd/compile/internal/ssagen/pgen_test.go similarity index 99% rename from src/cmd/compile/internal/gc/pgen_test.go rename to src/cmd/compile/internal/ssagen/pgen_test.go index 95c4b24fa1eee..82d8447e9fd6f 100644 --- a/src/cmd/compile/internal/gc/pgen_test.go +++ b/src/cmd/compile/internal/ssagen/pgen_test.go @@ -2,16 +2,17 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package ssagen import ( + "reflect" + "sort" + "testing" + "cmd/compile/internal/ir" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/src" - "reflect" - "sort" - "testing" ) func typeWithoutPointers() *types.Type { diff --git a/src/cmd/compile/internal/gc/phi.go b/src/cmd/compile/internal/ssagen/phi.go similarity index 97% rename from src/cmd/compile/internal/gc/phi.go rename to src/cmd/compile/internal/ssagen/phi.go index 75ce18ff841e0..01ad211282cf3 100644 --- a/src/cmd/compile/internal/gc/phi.go +++ b/src/cmd/compile/internal/ssagen/phi.go @@ -2,15 +2,16 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package ssagen import ( + "container/heap" + "fmt" + "cmd/compile/internal/ir" "cmd/compile/internal/ssa" "cmd/compile/internal/types" "cmd/internal/src" - "container/heap" - "fmt" ) // This file contains the algorithm to place phi nodes in a function. @@ -23,13 +24,13 @@ const smallBlocks = 500 const debugPhi = false -// FwdRefAux wraps an arbitrary ir.Node as an ssa.Aux for use with OpFwdref. -type FwdRefAux struct { +// fwdRefAux wraps an arbitrary ir.Node as an ssa.Aux for use with OpFwdref. +type fwdRefAux struct { _ [0]func() // ensure ir.Node isn't compared for equality N ir.Node } -func (FwdRefAux) CanBeAnSSAAux() {} +func (fwdRefAux) CanBeAnSSAAux() {} // insertPhis finds all the places in the function where a phi is // necessary and inserts them. @@ -87,7 +88,7 @@ func (s *phiState) insertPhis() { if v.Op != ssa.OpFwdRef { continue } - var_ := v.Aux.(FwdRefAux).N + var_ := v.Aux.(fwdRefAux).N // Optimization: look back 1 block for the definition. if len(b.Preds) == 1 { @@ -334,7 +335,7 @@ func (s *phiState) resolveFwdRefs() { if v.Op != ssa.OpFwdRef { continue } - n := s.varnum[v.Aux.(FwdRefAux).N] + n := s.varnum[v.Aux.(fwdRefAux).N] v.Op = ssa.OpCopy v.Aux = nil v.AddArg(values[n]) @@ -465,7 +466,7 @@ func (s *simplePhiState) insertPhis() { continue } s.fwdrefs = append(s.fwdrefs, v) - var_ := v.Aux.(FwdRefAux).N + var_ := v.Aux.(fwdRefAux).N if _, ok := s.defvars[b.ID][var_]; !ok { s.defvars[b.ID][var_] = v // treat FwdDefs as definitions. } @@ -479,7 +480,7 @@ loop: v := s.fwdrefs[len(s.fwdrefs)-1] s.fwdrefs = s.fwdrefs[:len(s.fwdrefs)-1] b := v.Block - var_ := v.Aux.(FwdRefAux).N + var_ := v.Aux.(fwdRefAux).N if b == s.f.Entry { // No variable should be live at entry. s.s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, var_, v) @@ -546,7 +547,7 @@ func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t *types.Type, var_ ir. } } // Generate a FwdRef for the variable and return that. - v := b.NewValue0A(line, ssa.OpFwdRef, t, FwdRefAux{N: var_}) + v := b.NewValue0A(line, ssa.OpFwdRef, t, fwdRefAux{N: var_}) s.defvars[b.ID][var_] = v if var_.Op() == ir.ONAME { s.s.addNamedValue(var_.(*ir.Name), v) diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go similarity index 98% rename from src/cmd/compile/internal/gc/ssa.go rename to src/cmd/compile/internal/ssagen/ssa.go index 997bcb6d5edcc..a77e57a5b6bac 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -2,9 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package ssagen import ( + "bufio" + "bytes" "encoding/binary" "fmt" "go/constant" @@ -14,8 +16,6 @@ import ( "sort" "strings" - "bufio" - "bytes" "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/liveness" @@ -41,20 +41,16 @@ var ssaDumpStdout bool // whether to dump to stdout var ssaDumpCFG string // generate CFGs for these phases const ssaDumpFile = "ssa.html" -// The max number of defers in a function using open-coded defers. We enforce this -// limit because the deferBits bitmask is currently a single byte (to minimize code size) -const maxOpenDefers = 8 - // ssaDumpInlined holds all inlined functions when ssaDump contains a function name. var ssaDumpInlined []*ir.Func -func ssaDumpInline(fn *ir.Func) { +func DumpInline(fn *ir.Func) { if ssaDump != "" && ssaDump == ir.FuncName(fn) { ssaDumpInlined = append(ssaDumpInlined, fn) } } -func initSSAEnv() { +func InitEnv() { ssaDump = os.Getenv("GOSSAFUNC") ssaDir = os.Getenv("GOSSADIR") if ssaDump != "" { @@ -70,10 +66,10 @@ func initSSAEnv() { } } -func initssaconfig() { +func InitConfig() { types_ := ssa.NewTypes() - if thearch.SoftFloat { + if Arch.SoftFloat { softfloatInit() } @@ -91,7 +87,7 @@ func initssaconfig() { _ = types.NewPtr(types.ErrorType) // *error types.NewPtrCacheEnabled = false ssaConfig = ssa.NewConfig(base.Ctxt.Arch.Name, *types_, base.Ctxt, base.Flag.N == 0) - ssaConfig.SoftFloat = thearch.SoftFloat + ssaConfig.SoftFloat = Arch.SoftFloat ssaConfig.Race = base.Flag.Race ssaCaches = make([]ssa.Cache, base.Flag.LowerC) @@ -148,7 +144,7 @@ func initssaconfig() { } } - if thearch.LinkArch.Family == sys.Wasm { + if Arch.LinkArch.Family == sys.Wasm { BoundsCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeFunc("goPanicIndex") BoundsCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeFunc("goPanicIndexU") BoundsCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeFunc("goPanicSliceAlen") @@ -183,7 +179,7 @@ func initssaconfig() { BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("panicSlice3C") BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("panicSlice3CU") } - if thearch.LinkArch.PtrSize == 4 { + if Arch.LinkArch.PtrSize == 4 { ExtendCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeVar("panicExtendIndex") ExtendCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeVar("panicExtendIndexU") ExtendCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeVar("panicExtendSliceAlen") @@ -1215,7 +1211,7 @@ func (s *state) stmt(n ir.Node) { n := n.(*ir.AssignListStmt) res, resok := s.dottype(n.Rhs[0].(*ir.TypeAssertExpr), true) deref := false - if !canSSAType(n.Rhs[0].Type()) { + if !TypeOK(n.Rhs[0].Type()) { if res.Op != ssa.OpLoad { s.Fatalf("dottype of non-load") } @@ -1351,7 +1347,7 @@ func (s *state) stmt(n ir.Node) { } var r *ssa.Value - deref := !canSSAType(t) + deref := !TypeOK(t) if deref { if rhs == nil { r = nil // Signal assign to use OpZero. @@ -2133,7 +2129,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { return s.load(n.Type(), addr) case ir.ONAMEOFFSET: n := n.(*ir.NameOffsetExpr) - if s.canSSAName(n.Name_) && canSSAType(n.Type()) { + if s.canSSAName(n.Name_) && TypeOK(n.Type()) { return s.variable(n, n.Type()) } addr := s.addr(n) @@ -2352,18 +2348,18 @@ func (s *state) expr(n ir.Node) *ssa.Value { if ft.IsFloat() || tt.IsFloat() { conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}] - if s.config.RegSize == 4 && thearch.LinkArch.Family != sys.MIPS && !s.softFloat { + if s.config.RegSize == 4 && Arch.LinkArch.Family != sys.MIPS && !s.softFloat { if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { conv = conv1 } } - if thearch.LinkArch.Family == sys.ARM64 || thearch.LinkArch.Family == sys.Wasm || thearch.LinkArch.Family == sys.S390X || s.softFloat { + if Arch.LinkArch.Family == sys.ARM64 || Arch.LinkArch.Family == sys.Wasm || Arch.LinkArch.Family == sys.S390X || s.softFloat { if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { conv = conv1 } } - if thearch.LinkArch.Family == sys.MIPS && !s.softFloat { + if Arch.LinkArch.Family == sys.MIPS && !s.softFloat { if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() { // tt is float32 or float64, and ft is also unsigned if tt.Size() == 4 { @@ -2713,7 +2709,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { addr := s.constOffPtrSP(types.NewPtr(n.Type()), n.Offset) return s.rawLoad(n.Type(), addr) } - if canSSAType(n.Type()) { + if TypeOK(n.Type()) { return s.newValue1I(ssa.OpSelectN, n.Type(), which, s.prevCall) } else { addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(n.Type()), which, s.prevCall) @@ -2779,7 +2775,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { p := s.addr(n) return s.load(n.X.Type().Elem(), p) case n.X.Type().IsArray(): - if canSSAType(n.X.Type()) { + if TypeOK(n.X.Type()) { // SSA can handle arrays of length at most 1. bound := n.X.Type().NumElem() a := s.expr(n.X) @@ -3055,7 +3051,7 @@ func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value { } args := make([]argRec, 0, nargs) for _, n := range n.Args[1:] { - if canSSAType(n.Type()) { + if TypeOK(n.Type()) { args = append(args, argRec{v: s.expr(n), store: true}) } else { v := s.addr(n) @@ -3418,7 +3414,7 @@ type intrinsicKey struct { fn string } -func initSSATables() { +func InitTables() { intrinsics = map[intrinsicKey]intrinsicBuilder{} var all []*sys.Arch @@ -4297,7 +4293,7 @@ func findIntrinsic(sym *types.Sym) intrinsicBuilder { } // Skip intrinsifying math functions (which may contain hard-float // instructions) when soft-float - if thearch.SoftFloat && pkg == "math" { + if Arch.SoftFloat && pkg == "math" { return nil } @@ -4309,10 +4305,10 @@ func findIntrinsic(sym *types.Sym) intrinsicBuilder { return nil } } - return intrinsics[intrinsicKey{thearch.LinkArch.Arch, pkg, fn}] + return intrinsics[intrinsicKey{Arch.LinkArch.Arch, pkg, fn}] } -func isIntrinsicCall(n *ir.CallExpr) bool { +func IsIntrinsicCall(n *ir.CallExpr) bool { if n == nil { return false } @@ -4427,7 +4423,7 @@ func (s *state) openDeferRecord(n *ir.CallExpr) { } for _, argn := range n.Rargs { var v *ssa.Value - if canSSAType(argn.Type()) { + if TypeOK(argn.Type()) { v = s.openDeferSave(nil, argn.Type(), s.expr(argn)) } else { v = s.openDeferSave(argn, argn.Type(), nil) @@ -4456,7 +4452,7 @@ func (s *state) openDeferRecord(n *ir.CallExpr) { // evaluated (via s.addr() below) to get the value that is to be stored. The // function returns an SSA value representing a pointer to the autotmp location. func (s *state) openDeferSave(n ir.Node, t *types.Type, val *ssa.Value) *ssa.Value { - canSSA := canSSAType(t) + canSSA := TypeOK(t) var pos src.XPos if canSSA { pos = val.Pos @@ -4570,7 +4566,7 @@ func (s *state) openDeferExit() { ACArgs = append(ACArgs, ssa.Param{Type: f.Type, Offset: int32(argStart + f.Offset)}) if testLateExpansion { var a *ssa.Value - if !canSSAType(f.Type) { + if !TypeOK(f.Type) { a = s.newValue2(ssa.OpDereference, f.Type, argAddrVal, s.mem()) } else { a = s.load(f.Type, argAddrVal) @@ -4578,7 +4574,7 @@ func (s *state) openDeferExit() { callArgs = append(callArgs, a) } else { addr := s.constOffPtrSP(pt, argStart+f.Offset) - if !canSSAType(f.Type) { + if !TypeOK(f.Type) { s.move(f.Type, addr, argAddrVal) } else { argVal := s.load(f.Type, argAddrVal) @@ -4946,7 +4942,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val // maybeNilCheckClosure checks if a nil check of a closure is needed in some // architecture-dependent situations and, if so, emits the nil check. func (s *state) maybeNilCheckClosure(closure *ssa.Value, k callKind) { - if thearch.LinkArch.Family == sys.Wasm || objabi.GOOS == "aix" && k != callGo { + if Arch.LinkArch.Family == sys.Wasm || objabi.GOOS == "aix" && k != callGo { // On AIX, the closure needs to be verified as fn can be nil, except if it's a call go. This needs to be handled by the runtime to have the "go of nil func value" error. // TODO(neelance): On other architectures this should be eliminated by the optimization steps s.nilCheck(closure) @@ -5139,7 +5135,7 @@ func (s *state) canSSA(n ir.Node) bool { if n.Op() != ir.ONAME { return false } - return s.canSSAName(n.(*ir.Name)) && canSSAType(n.Type()) + return s.canSSAName(n.(*ir.Name)) && TypeOK(n.Type()) } func (s *state) canSSAName(name *ir.Name) bool { @@ -5181,7 +5177,7 @@ func (s *state) canSSAName(name *ir.Name) bool { } // canSSA reports whether variables of type t are SSA-able. -func canSSAType(t *types.Type) bool { +func TypeOK(t *types.Type) bool { types.CalcSize(t) if t.Width > int64(4*types.PtrSize) { // 4*Widthptr is an arbitrary constant. We want it @@ -5195,7 +5191,7 @@ func canSSAType(t *types.Type) bool { // not supported on SSA variables. // TODO: allow if all indexes are constant. if t.NumElem() <= 1 { - return canSSAType(t.Elem()) + return TypeOK(t.Elem()) } return false case types.TSTRUCT: @@ -5203,7 +5199,7 @@ func canSSAType(t *types.Type) bool { return false } for _, t1 := range t.Fields().Slice() { - if !canSSAType(t1.Type) { + if !TypeOK(t1.Type) { return false } } @@ -5307,7 +5303,7 @@ func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bo b.AddEdgeTo(bPanic) s.startBlock(bPanic) - if thearch.LinkArch.Family == sys.Wasm { + if Arch.LinkArch.Family == sys.Wasm { // TODO(khr): figure out how to do "register" based calling convention for bounds checks. // Should be similar to gcWriteBarrier, but I can't make it work. s.rtcall(BoundsCheckFunc[kind], false, nil, idx, len) @@ -5435,7 +5431,7 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args . if testLateExpansion { for i, t := range results { off = types.Rnd(off, t.Alignment()) - if canSSAType(t) { + if TypeOK(t) { res[i] = s.newValue1I(ssa.OpSelectN, t, int64(i), call) } else { addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), int64(i), call) @@ -5575,7 +5571,7 @@ func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) { func (s *state) putArg(n ir.Node, t *types.Type, off int64, forLateExpandedCall bool) (ssa.Param, *ssa.Value) { var a *ssa.Value if forLateExpandedCall { - if !canSSAType(t) { + if !TypeOK(t) { a = s.newValue2(ssa.OpDereference, t, s.addr(n), s.mem()) } else { a = s.expr(n) @@ -5596,7 +5592,7 @@ func (s *state) storeArgWithBase(n ir.Node, t *types.Type, base *ssa.Value, off addr = s.newValue1I(ssa.OpOffPtr, pt, off, base) } - if !canSSAType(t) { + if !TypeOK(t) { a := s.addr(n) s.move(t, addr, a) return @@ -6146,7 +6142,7 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val var tmp ir.Node // temporary for use with large types var addr *ssa.Value // address of tmp - if commaok && !canSSAType(n.Type()) { + if commaok && !TypeOK(n.Type()) { // unSSAable type, use temporary. // TODO: get rid of some of these temporaries. tmp = typecheck.TempAt(n.Pos(), s.curfn, n.Type()) @@ -6250,7 +6246,7 @@ func (s *state) variable(n ir.Node, t *types.Type) *ssa.Value { } // Make a FwdRef, which records a value that's live on block input. // We'll find the matching definition as part of insertPhis. - v = s.newValue0A(ssa.OpFwdRef, t, FwdRefAux{N: n}) + v = s.newValue0A(ssa.OpFwdRef, t, fwdRefAux{N: n}) s.fwdVars[n] = v if n.Op() == ir.ONAME { s.addNamedValue(n.(*ir.Name), v) @@ -6300,8 +6296,8 @@ type Branch struct { B *ssa.Block // target } -// SSAGenState contains state needed during Prog generation. -type SSAGenState struct { +// State contains state needed during Prog generation. +type State struct { pp *objw.Progs // Branches remembers all the branch instructions we've seen @@ -6330,7 +6326,7 @@ type SSAGenState struct { } // Prog appends a new Prog. -func (s *SSAGenState) Prog(as obj.As) *obj.Prog { +func (s *State) Prog(as obj.As) *obj.Prog { p := s.pp.Prog(as) if ssa.LosesStmtMark(as) { return p @@ -6347,19 +6343,19 @@ func (s *SSAGenState) Prog(as obj.As) *obj.Prog { } // Pc returns the current Prog. -func (s *SSAGenState) Pc() *obj.Prog { +func (s *State) Pc() *obj.Prog { return s.pp.Next } // SetPos sets the current source position. -func (s *SSAGenState) SetPos(pos src.XPos) { +func (s *State) SetPos(pos src.XPos) { s.pp.Pos = pos } // Br emits a single branch instruction and returns the instruction. // Not all architectures need the returned instruction, but otherwise // the boilerplate is common to all. -func (s *SSAGenState) Br(op obj.As, target *ssa.Block) *obj.Prog { +func (s *State) Br(op obj.As, target *ssa.Block) *obj.Prog { p := s.Prog(op) p.To.Type = obj.TYPE_BRANCH s.Branches = append(s.Branches, Branch{P: p, B: target}) @@ -6371,7 +6367,7 @@ func (s *SSAGenState) Br(op obj.As, target *ssa.Block) *obj.Prog { // Spill/fill/copy instructions from the register allocator, // phi functions, and instructions with a no-pos position // are examples of instructions that can cause churn. -func (s *SSAGenState) DebugFriendlySetPosFrom(v *ssa.Value) { +func (s *State) DebugFriendlySetPosFrom(v *ssa.Value) { switch v.Op { case ssa.OpPhi, ssa.OpCopy, ssa.OpLoadReg, ssa.OpStoreReg: // These are not statements @@ -6447,7 +6443,7 @@ func emitStackObjects(e *ssafn, pp *objw.Progs) { // genssa appends entries to pp for each instruction in f. func genssa(f *ssa.Func, pp *objw.Progs) { - var s SSAGenState + var s State e := f.Frontend().(*ssafn) @@ -6525,7 +6521,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) { s.pp.NextLive = objw.LivenessIndex{StackMapIndex: -1, IsUnsafePoint: liveness.IsUnsafe(f)} // Emit values in block - thearch.SSAMarkMoves(&s, b) + Arch.SSAMarkMoves(&s, b) for _, v := range b.Values { x := s.pp.Next s.DebugFriendlySetPosFrom(v) @@ -6552,7 +6548,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) { v.Fatalf("OpConvert should be a no-op: %s; %s", v.Args[0].LongString(), v.LongString()) } case ssa.OpInlMark: - p := thearch.Ginsnop(s.pp) + p := Arch.Ginsnop(s.pp) if inlMarks == nil { inlMarks = map[*obj.Prog]int32{} inlMarksByPos = map[src.XPos][]*obj.Prog{} @@ -6573,7 +6569,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) { firstPos = src.NoXPos } // let the backend handle it - thearch.SSAGenValue(&s, v) + Arch.SSAGenValue(&s, v) } if base.Ctxt.Flag_locationlists { @@ -6588,7 +6584,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) { } // If this is an empty infinite loop, stick a hardware NOP in there so that debuggers are less confused. if s.bstart[b.ID] == s.pp.Next && len(b.Succs) == 1 && b.Succs[0].Block() == b { - p := thearch.Ginsnop(s.pp) + p := Arch.Ginsnop(s.pp) p.Pos = p.Pos.WithIsStmt() if b.Pos == src.NoXPos { b.Pos = p.Pos // It needs a file, otherwise a no-file non-zero line causes confusion. See #35652. @@ -6609,7 +6605,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) { } x := s.pp.Next s.SetPos(b.Pos) - thearch.SSAGenBlock(&s, b, next) + Arch.SSAGenBlock(&s, b, next) if f.PrintOrHtmlSSA { for ; x != s.pp.Next; x = x.Link { progToBlock[x] = b @@ -6621,7 +6617,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) { // still be inside the function in question. So if // it ends in a call which doesn't return, add a // nop (which will never execute) after the call. - thearch.Ginsnop(pp) + Arch.Ginsnop(pp) } if openDeferInfo != nil { // When doing open-coded defers, generate a disconnected call to @@ -6636,7 +6632,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) { // going to emit anyway, and use those instructions instead of the // inline marks. for p := pp.Text; p != nil; p = p.Link { - if p.As == obj.ANOP || p.As == obj.AFUNCDATA || p.As == obj.APCDATA || p.As == obj.ATEXT || p.As == obj.APCALIGN || thearch.LinkArch.Family == sys.Wasm { + if p.As == obj.ANOP || p.As == obj.AFUNCDATA || p.As == obj.APCDATA || p.As == obj.ATEXT || p.As == obj.APCALIGN || Arch.LinkArch.Family == sys.Wasm { // Don't use 0-sized instructions as inline marks, because we need // to identify inline mark instructions by pc offset. // (Some of these instructions are sometimes zero-sized, sometimes not. @@ -6677,7 +6673,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) { } if base.Ctxt.Flag_locationlists { - debugInfo := ssa.BuildFuncDebug(base.Ctxt, f, base.Debug.LocationLists > 1, stackOffset) + debugInfo := ssa.BuildFuncDebug(base.Ctxt, f, base.Debug.LocationLists > 1, StackOffset) e.curfn.DebugInfo = debugInfo bstart := s.bstart // Note that at this moment, Prog.Pc is a sequence number; it's @@ -6766,12 +6762,12 @@ func genssa(f *ssa.Func, pp *objw.Progs) { f.HTMLWriter = nil } -func defframe(s *SSAGenState, e *ssafn) { +func defframe(s *State, e *ssafn) { pp := s.pp frame := types.Rnd(s.maxarg+e.stksize, int64(types.RegSize)) - if thearch.PadFrame != nil { - frame = thearch.PadFrame(frame) + if Arch.PadFrame != nil { + frame = Arch.PadFrame(frame) } // Fill in argument and frame size. @@ -6808,7 +6804,7 @@ func defframe(s *SSAGenState, e *ssafn) { } // Zero old range - p = thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state) + p = Arch.ZeroRange(pp, p, frame+lo, hi-lo, &state) // Set new range. lo = n.FrameOffset() @@ -6816,7 +6812,7 @@ func defframe(s *SSAGenState, e *ssafn) { } // Zero final range. - thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state) + Arch.ZeroRange(pp, p, frame+lo, hi-lo, &state) } // For generating consecutive jump instructions to model a specific branching @@ -6825,14 +6821,14 @@ type IndexJump struct { Index int } -func (s *SSAGenState) oneJump(b *ssa.Block, jump *IndexJump) { +func (s *State) oneJump(b *ssa.Block, jump *IndexJump) { p := s.Br(jump.Jump, b.Succs[jump.Index].Block()) p.Pos = b.Pos } // CombJump generates combinational instructions (2 at present) for a block jump, // thereby the behaviour of non-standard condition codes could be simulated -func (s *SSAGenState) CombJump(b, next *ssa.Block, jumps *[2][2]IndexJump) { +func (s *State) CombJump(b, next *ssa.Block, jumps *[2][2]IndexJump) { switch next { case b.Succs[0].Block(): s.oneJump(b, &jumps[0][0]) @@ -7019,7 +7015,7 @@ func AddrAuto(a *obj.Addr, v *ssa.Value) { n, off := ssa.AutoVar(v) a.Type = obj.TYPE_MEM a.Sym = n.Sym().Linksym() - a.Reg = int16(thearch.REGSP) + a.Reg = int16(Arch.REGSP) a.Offset = n.FrameOffset() + off if n.Class_ == ir.PPARAM || n.Class_ == ir.PPARAMOUT { a.Name = obj.NAME_PARAM @@ -7028,20 +7024,20 @@ func AddrAuto(a *obj.Addr, v *ssa.Value) { } } -func (s *SSAGenState) AddrScratch(a *obj.Addr) { +func (s *State) AddrScratch(a *obj.Addr) { if s.ScratchFpMem == nil { panic("no scratch memory available; forgot to declare usesScratch for Op?") } a.Type = obj.TYPE_MEM a.Name = obj.NAME_AUTO a.Sym = s.ScratchFpMem.Sym().Linksym() - a.Reg = int16(thearch.REGSP) + a.Reg = int16(Arch.REGSP) a.Offset = s.ScratchFpMem.Offset_ } // Call returns a new CALL instruction for the SSA value v. // It uses PrepareCall to prepare the call. -func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog { +func (s *State) Call(v *ssa.Value) *obj.Prog { pPosIsStmt := s.pp.Pos.IsStmt() // The statement-ness fo the call comes from ssaGenState s.PrepareCall(v) @@ -7057,7 +7053,7 @@ func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog { p.To.Sym = sym.Fn } else { // TODO(mdempsky): Can these differences be eliminated? - switch thearch.LinkArch.Family { + switch Arch.LinkArch.Family { case sys.AMD64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm: p.To.Type = obj.TYPE_REG case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64: @@ -7073,7 +7069,7 @@ func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog { // PrepareCall prepares to emit a CALL instruction for v and does call-related bookkeeping. // It must be called immediately before emitting the actual CALL instruction, // since it emits PCDATA for the stack map at the call (calls are safe points). -func (s *SSAGenState) PrepareCall(v *ssa.Value) { +func (s *State) PrepareCall(v *ssa.Value) { idx := s.livenessMap.Get(v) if !idx.StackMapValid() { // See Liveness.hasStackMap. @@ -7093,7 +7089,7 @@ func (s *SSAGenState) PrepareCall(v *ssa.Value) { // insert an actual hardware NOP that will have the right line number. // This is different from obj.ANOP, which is a virtual no-op // that doesn't make it into the instruction stream. - thearch.Ginsnopdefer(s.pp) + Arch.Ginsnopdefer(s.pp) } if ok { @@ -7111,7 +7107,7 @@ func (s *SSAGenState) PrepareCall(v *ssa.Value) { // UseArgs records the fact that an instruction needs a certain amount of // callee args space for its use. -func (s *SSAGenState) UseArgs(n int64) { +func (s *State) UseArgs(n int64) { if s.maxarg < n { s.maxarg = n } @@ -7223,7 +7219,7 @@ func (e *ssafn) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { } else { t = types.Types[types.TUINT32] } - if thearch.LinkArch.ByteOrder == binary.BigEndian { + if Arch.LinkArch.ByteOrder == binary.BigEndian { return e.SplitSlot(&name, ".hi", 0, t), e.SplitSlot(&name, ".lo", t.Size(), types.Types[types.TUINT32]) } return e.SplitSlot(&name, ".hi", t.Size(), t), e.SplitSlot(&name, ".lo", 0, types.Types[types.TUINT32]) @@ -7274,7 +7270,7 @@ func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t } func (e *ssafn) CanSSA(t *types.Type) bool { - return canSSAType(t) + return TypeOK(t) } func (e *ssafn) Line(pos src.XPos) string { @@ -7453,3 +7449,11 @@ func deferstruct(stksize int64) *types.Type { types.CalcStructSize(s) return s } + +var ( + BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym + ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym +) + +// GCWriteBarrierReg maps from registers to gcWriteBarrier implementation LSyms. +var GCWriteBarrierReg map[int16]*obj.LSym diff --git a/src/cmd/compile/internal/wasm/ssa.go b/src/cmd/compile/internal/wasm/ssa.go index ee86fc62d27a5..e4ef9d7c6a876 100644 --- a/src/cmd/compile/internal/wasm/ssa.go +++ b/src/cmd/compile/internal/wasm/ssa.go @@ -6,18 +6,18 @@ package wasm import ( "cmd/compile/internal/base" - "cmd/compile/internal/gc" "cmd/compile/internal/ir" "cmd/compile/internal/logopt" "cmd/compile/internal/objw" "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/wasm" "cmd/internal/objabi" ) -func Init(arch *gc.Arch) { +func Init(arch *ssagen.ArchInfo) { arch.LinkArch = &wasm.Linkwasm arch.REGSP = wasm.REG_SP arch.MAXWIDTH = 1 << 50 @@ -52,10 +52,10 @@ func ginsnop(pp *objw.Progs) *obj.Prog { return pp.Prog(wasm.ANop) } -func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) { +func ssaMarkMoves(s *ssagen.State, b *ssa.Block) { } -func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { +func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { switch b.Kind { case ssa.BlockPlain: if next != b.Succs[0].Block() { @@ -121,7 +121,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { } } -func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { +func ssaGenValue(s *ssagen.State, v *ssa.Value) { switch v.Op { case ssa.OpWasmLoweredStaticCall, ssa.OpWasmLoweredClosureCall, ssa.OpWasmLoweredInterCall: s.PrepareCall(v) @@ -188,7 +188,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { getReg(s, wasm.REG_SP) getValue64(s, v.Args[0]) p := s.Prog(storeOp(v.Type)) - gc.AddrAuto(&p.To, v) + ssagen.AddrAuto(&p.To, v) default: if v.Type.IsMemory() { @@ -208,7 +208,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { } } -func ssaGenValueOnStack(s *gc.SSAGenState, v *ssa.Value, extend bool) { +func ssaGenValueOnStack(s *ssagen.State, v *ssa.Value, extend bool) { switch v.Op { case ssa.OpWasmLoweredGetClosurePtr: getReg(s, wasm.REG_CTXT) @@ -243,10 +243,10 @@ func ssaGenValueOnStack(s *gc.SSAGenState, v *ssa.Value, extend bool) { p.From.Type = obj.TYPE_ADDR switch v.Aux.(type) { case *obj.LSym: - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) case *ir.Name: p.From.Reg = v.Args[0].Reg() - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) default: panic("wasm: bad LoweredAddr") } @@ -363,7 +363,7 @@ func ssaGenValueOnStack(s *gc.SSAGenState, v *ssa.Value, extend bool) { case ssa.OpLoadReg: p := s.Prog(loadOp(v.Type)) - gc.AddrAuto(&p.From, v.Args[0]) + ssagen.AddrAuto(&p.From, v.Args[0]) case ssa.OpCopy: getValue64(s, v.Args[0]) @@ -385,7 +385,7 @@ func isCmp(v *ssa.Value) bool { } } -func getValue32(s *gc.SSAGenState, v *ssa.Value) { +func getValue32(s *ssagen.State, v *ssa.Value) { if v.OnWasmStack { s.OnWasmStackSkipped-- ssaGenValueOnStack(s, v, false) @@ -402,7 +402,7 @@ func getValue32(s *gc.SSAGenState, v *ssa.Value) { } } -func getValue64(s *gc.SSAGenState, v *ssa.Value) { +func getValue64(s *ssagen.State, v *ssa.Value) { if v.OnWasmStack { s.OnWasmStackSkipped-- ssaGenValueOnStack(s, v, true) @@ -416,32 +416,32 @@ func getValue64(s *gc.SSAGenState, v *ssa.Value) { } } -func i32Const(s *gc.SSAGenState, val int32) { +func i32Const(s *ssagen.State, val int32) { p := s.Prog(wasm.AI32Const) p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(val)} } -func i64Const(s *gc.SSAGenState, val int64) { +func i64Const(s *ssagen.State, val int64) { p := s.Prog(wasm.AI64Const) p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: val} } -func f32Const(s *gc.SSAGenState, val float64) { +func f32Const(s *ssagen.State, val float64) { p := s.Prog(wasm.AF32Const) p.From = obj.Addr{Type: obj.TYPE_FCONST, Val: val} } -func f64Const(s *gc.SSAGenState, val float64) { +func f64Const(s *ssagen.State, val float64) { p := s.Prog(wasm.AF64Const) p.From = obj.Addr{Type: obj.TYPE_FCONST, Val: val} } -func getReg(s *gc.SSAGenState, reg int16) { +func getReg(s *ssagen.State, reg int16) { p := s.Prog(wasm.AGet) p.From = obj.Addr{Type: obj.TYPE_REG, Reg: reg} } -func setReg(s *gc.SSAGenState, reg int16) { +func setReg(s *ssagen.State, reg int16) { p := s.Prog(wasm.ASet) p.To = obj.Addr{Type: obj.TYPE_REG, Reg: reg} } diff --git a/src/cmd/compile/internal/x86/galign.go b/src/cmd/compile/internal/x86/galign.go index 7d628f9b7c5df..fc806f91196df 100644 --- a/src/cmd/compile/internal/x86/galign.go +++ b/src/cmd/compile/internal/x86/galign.go @@ -6,14 +6,14 @@ package x86 import ( "cmd/compile/internal/base" - "cmd/compile/internal/gc" + "cmd/compile/internal/ssagen" "cmd/internal/obj/x86" "cmd/internal/objabi" "fmt" "os" ) -func Init(arch *gc.Arch) { +func Init(arch *ssagen.ArchInfo) { arch.LinkArch = &x86.Link386 arch.REGSP = x86.REGSP arch.SSAGenValue = ssaGenValue diff --git a/src/cmd/compile/internal/x86/ssa.go b/src/cmd/compile/internal/x86/ssa.go index d3d60591ccc4a..00dfa07bf78be 100644 --- a/src/cmd/compile/internal/x86/ssa.go +++ b/src/cmd/compile/internal/x86/ssa.go @@ -9,17 +9,17 @@ import ( "math" "cmd/compile/internal/base" - "cmd/compile/internal/gc" "cmd/compile/internal/ir" "cmd/compile/internal/logopt" "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/x86" ) // markMoves marks any MOVXconst ops that need to avoid clobbering flags. -func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) { +func ssaMarkMoves(s *ssagen.State, b *ssa.Block) { flive := b.FlagsLiveAtEnd for _, c := range b.ControlValues() { flive = c.Type.IsFlags() || flive @@ -109,7 +109,7 @@ func moveByType(t *types.Type) obj.As { // dest := dest(To) op src(From) // and also returns the created obj.Prog so it // may be further adjusted (offset, scale, etc). -func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog { +func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog { p := s.Prog(op) p.From.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG @@ -118,7 +118,7 @@ func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog { return p } -func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { +func ssaGenValue(s *ssagen.State, v *ssa.Value) { switch v.Op { case ssa.Op386ADDL: r := v.Reg() @@ -406,14 +406,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Type = obj.TYPE_MEM p.From.Reg = r p.From.Index = i - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.Op386LEAL: p := s.Prog(x86.ALEAL) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.Op386CMPL, ssa.Op386CMPW, ssa.Op386CMPB, @@ -439,7 +439,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Args[1].Reg() case ssa.Op386CMPLconstload, ssa.Op386CMPWconstload, ssa.Op386CMPBconstload: @@ -447,7 +447,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() - gc.AddAux2(&p.From, v, sc.Off()) + ssagen.AddAux2(&p.From, v, sc.Off()) p.To.Type = obj.TYPE_CONST p.To.Offset = sc.Val() case ssa.Op386MOVLconst: @@ -499,7 +499,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.Op386MOVBloadidx1, ssa.Op386MOVWloadidx1, ssa.Op386MOVLloadidx1, ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1, @@ -523,7 +523,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { } p.From.Reg = r p.From.Index = i - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.Op386ADDLloadidx4, ssa.Op386SUBLloadidx4, ssa.Op386MULLloadidx4, @@ -533,7 +533,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Reg = v.Args[1].Reg() p.From.Index = v.Args[2].Reg() p.From.Scale = 4 - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() if v.Reg() != v.Args[0].Reg() { @@ -546,7 +546,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[1].Reg() - gc.AddAux(&p.From, v) + ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() if v.Reg() != v.Args[0].Reg() { @@ -559,7 +559,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Reg = v.Args[1].Reg() p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - gc.AddAux(&p.To, v) + ssagen.AddAux(&p.To, v) case ssa.Op386ADDLconstmodify: sc := v.AuxValAndOff() val := sc.Val() @@ -573,7 +573,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { off := sc.Off() p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - gc.AddAux2(&p.To, v, off) + ssagen.AddAux2(&p.To, v, off) break } fallthrough @@ -586,7 +586,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Offset = val p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - gc.AddAux2(&p.To, v, off) + ssagen.AddAux2(&p.To, v, off) case ssa.Op386MOVBstoreidx1, ssa.Op386MOVWstoreidx1, ssa.Op386MOVLstoreidx1, ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSDstoreidx1, ssa.Op386MOVSDstoreidx8, ssa.Op386MOVSSstoreidx4, ssa.Op386MOVLstoreidx4, ssa.Op386MOVWstoreidx2, ssa.Op386ADDLmodifyidx4, ssa.Op386SUBLmodifyidx4, ssa.Op386ANDLmodifyidx4, ssa.Op386ORLmodifyidx4, ssa.Op386XORLmodifyidx4: @@ -612,7 +612,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { } p.To.Reg = r p.To.Index = i - gc.AddAux(&p.To, v) + ssagen.AddAux(&p.To, v) case ssa.Op386MOVLstoreconst, ssa.Op386MOVWstoreconst, ssa.Op386MOVBstoreconst: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST @@ -620,7 +620,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Offset = sc.Val() p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - gc.AddAux2(&p.To, v, sc.Off()) + ssagen.AddAux2(&p.To, v, sc.Off()) case ssa.Op386ADDLconstmodifyidx4: sc := v.AuxValAndOff() val := sc.Val() @@ -636,7 +636,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Reg = v.Args[0].Reg() p.To.Scale = 4 p.To.Index = v.Args[1].Reg() - gc.AddAux2(&p.To, v, off) + ssagen.AddAux2(&p.To, v, off) break } fallthrough @@ -663,7 +663,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Type = obj.TYPE_MEM p.To.Reg = r p.To.Index = i - gc.AddAux2(&p.To, v, sc.Off()) + ssagen.AddAux2(&p.To, v, sc.Off()) case ssa.Op386MOVWLSX, ssa.Op386MOVBLSX, ssa.Op386MOVWLZX, ssa.Op386MOVBLZX, ssa.Op386CVTSL2SS, ssa.Op386CVTSL2SD, ssa.Op386CVTTSS2SL, ssa.Op386CVTTSD2SL, @@ -695,7 +695,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { return } p := s.Prog(loadByType(v.Type)) - gc.AddrAuto(&p.From, v.Args[0]) + ssagen.AddrAuto(&p.From, v.Args[0]) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() @@ -707,10 +707,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(storeByType(v.Type)) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[0].Reg() - gc.AddrAuto(&p.To, v) + ssagen.AddrAuto(&p.To, v) case ssa.Op386LoweredGetClosurePtr: // Closure pointer is DX. - gc.CheckLoweredGetClosurePtr(v) + ssagen.CheckLoweredGetClosurePtr(v) case ssa.Op386LoweredGetG: r := v.Reg() // See the comments in cmd/internal/obj/x86/obj6.go @@ -766,14 +766,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN - p.To.Sym = gc.BoundsCheckFunc[v.AuxInt] + p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt] s.UseArgs(8) // space used in callee args area by assembly stubs case ssa.Op386LoweredPanicExtendA, ssa.Op386LoweredPanicExtendB, ssa.Op386LoweredPanicExtendC: p := s.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN - p.To.Sym = gc.ExtendCheckFunc[v.AuxInt] + p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt] s.UseArgs(12) // space used in callee args area by assembly stubs case ssa.Op386CALLstatic, ssa.Op386CALLclosure, ssa.Op386CALLinter: @@ -848,7 +848,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Reg = x86.REG_AX p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - gc.AddAux(&p.To, v) + ssagen.AddAux(&p.To, v) if logopt.Enabled() { logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) } @@ -861,7 +861,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Offset = 0xdeaddead p.To.Type = obj.TYPE_MEM p.To.Reg = x86.REG_SP - gc.AddAux(&p.To, v) + ssagen.AddAux(&p.To, v) default: v.Fatalf("genValue not implemented: %s", v.LongString()) } @@ -886,22 +886,22 @@ var blockJump = [...]struct { ssa.Block386NAN: {x86.AJPS, x86.AJPC}, } -var eqfJumps = [2][2]gc.IndexJump{ +var eqfJumps = [2][2]ssagen.IndexJump{ {{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPS, Index: 1}}, // next == b.Succs[0] {{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPC, Index: 0}}, // next == b.Succs[1] } -var nefJumps = [2][2]gc.IndexJump{ +var nefJumps = [2][2]ssagen.IndexJump{ {{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPC, Index: 1}}, // next == b.Succs[0] {{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPS, Index: 0}}, // next == b.Succs[1] } -func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { +func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { switch b.Kind { case ssa.BlockPlain: if b.Succs[0].Block() != next { p := s.Prog(obj.AJMP) p.To.Type = obj.TYPE_BRANCH - s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) } case ssa.BlockDefer: // defer returns in rax: @@ -914,11 +914,11 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { p.To.Reg = x86.REG_AX p = s.Prog(x86.AJNE) p.To.Type = obj.TYPE_BRANCH - s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()}) if b.Succs[0].Block() != next { p := s.Prog(obj.AJMP) p.To.Type = obj.TYPE_BRANCH - s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) } case ssa.BlockExit: case ssa.BlockRet: diff --git a/src/cmd/compile/main.go b/src/cmd/compile/main.go index 5a33719d870a2..cb2f4e8cf475a 100644 --- a/src/cmd/compile/main.go +++ b/src/cmd/compile/main.go @@ -15,6 +15,7 @@ import ( "cmd/compile/internal/ppc64" "cmd/compile/internal/riscv64" "cmd/compile/internal/s390x" + "cmd/compile/internal/ssagen" "cmd/compile/internal/wasm" "cmd/compile/internal/x86" "cmd/internal/objabi" @@ -23,7 +24,7 @@ import ( "os" ) -var archInits = map[string]func(*gc.Arch){ +var archInits = map[string]func(*ssagen.ArchInfo){ "386": x86.Init, "amd64": amd64.Init, "arm": arm.Init, From 01fd2d05c8b7bfc083977ca73123a5541b289737 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 23 Dec 2020 00:58:27 -0500 Subject: [PATCH 237/474] [dev.regabi] cmd/compile: split out package dwarfgen [generated] [git-generate] cd src/cmd/compile/internal/gc rf ' # Inline and remove ngotype. ex { import "cmd/compile/internal/ir" import "cmd/compile/internal/reflectdata" var n ir.Node ngotype(n) -> reflectdata.TypeSym(n.Type()) } rm ngotype mv recordFlags RecordFlags mv recordPackageName RecordPackageName mv RecordFlags RecordPackageName dwarf.go mv debuginfo Info mv genAbstractFunc AbstractFunc mv scope.go scope_test.go dwarf.go dwinl.go cmd/compile/internal/dwarfgen ' Change-Id: I31fa982900dbba2066ca4c7a706af922e5481c70 Reviewed-on: https://go-review.googlesource.com/c/go/+/279477 Trust: Russ Cox Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky --- .../internal/{gc => dwarfgen}/dwarf.go | 83 +++++++++++++++++-- .../internal/{gc => dwarfgen}/dwinl.go | 9 +- .../internal/{gc => dwarfgen}/scope.go | 5 +- .../internal/{gc => dwarfgen}/scope_test.go | 5 +- src/cmd/compile/internal/gc/main.go | 76 ++--------------- src/cmd/compile/internal/gc/obj.go | 2 +- src/cmd/compile/internal/gc/subr.go | 8 -- 7 files changed, 94 insertions(+), 94 deletions(-) rename src/cmd/compile/internal/{gc => dwarfgen}/dwarf.go (85%) rename src/cmd/compile/internal/{gc => dwarfgen}/dwinl.go (99%) rename src/cmd/compile/internal/{gc => dwarfgen}/scope.go (99%) rename src/cmd/compile/internal/{gc => dwarfgen}/scope_test.go (99%) diff --git a/src/cmd/compile/internal/gc/dwarf.go b/src/cmd/compile/internal/dwarfgen/dwarf.go similarity index 85% rename from src/cmd/compile/internal/gc/dwarf.go rename to src/cmd/compile/internal/dwarfgen/dwarf.go index e853c51422795..19cb70058c98e 100644 --- a/src/cmd/compile/internal/gc/dwarf.go +++ b/src/cmd/compile/internal/dwarfgen/dwarf.go @@ -2,13 +2,17 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package dwarfgen import ( + "bytes" + "flag" + "fmt" "sort" "cmd/compile/internal/base" "cmd/compile/internal/ir" + "cmd/compile/internal/reflectdata" "cmd/compile/internal/ssa" "cmd/compile/internal/ssagen" "cmd/compile/internal/types" @@ -18,7 +22,7 @@ import ( "cmd/internal/src" ) -func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) { +func Info(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) { fn := curfn.(*ir.Func) if fn.Nname != nil { @@ -86,7 +90,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S continue } apdecls = append(apdecls, n) - fnsym.Func().RecordAutoType(ngotype(n).Linksym()) + fnsym.Func().RecordAutoType(reflectdata.TypeSym(n.Type()).Linksym()) } } @@ -236,7 +240,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir ChildIndex: -1, }) // Record go type of to insure that it gets emitted by the linker. - fnsym.Func().RecordAutoType(ngotype(n).Linksym()) + fnsym.Func().RecordAutoType(reflectdata.TypeSym(n.Type()).Linksym()) } return decls, vars @@ -305,7 +309,7 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var { } typename := dwarf.InfoPrefix + types.TypeSymName(n.Type()) - delete(fnsym.Func().Autot, ngotype(n).Linksym()) + delete(fnsym.Func().Autot, reflectdata.TypeSym(n.Type()).Linksym()) inlIndex := 0 if base.Flag.GenDwarfInl > 1 { if n.Name().InlFormal() || n.Name().InlLocal() { @@ -372,7 +376,7 @@ func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var return nil } - gotype := ngotype(n).Linksym() + gotype := reflectdata.TypeSym(n.Type()).Linksym() delete(fnsym.Func().Autot, gotype) typename := dwarf.InfoPrefix + gotype.Name[len("type."):] inlIndex := 0 @@ -410,3 +414,70 @@ func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var } return dvar } + +// RecordFlags records the specified command-line flags to be placed +// in the DWARF info. +func RecordFlags(flags ...string) { + if base.Ctxt.Pkgpath == "" { + // We can't record the flags if we don't know what the + // package name is. + return + } + + type BoolFlag interface { + IsBoolFlag() bool + } + type CountFlag interface { + IsCountFlag() bool + } + var cmd bytes.Buffer + for _, name := range flags { + f := flag.Lookup(name) + if f == nil { + continue + } + getter := f.Value.(flag.Getter) + if getter.String() == f.DefValue { + // Flag has default value, so omit it. + continue + } + if bf, ok := f.Value.(BoolFlag); ok && bf.IsBoolFlag() { + val, ok := getter.Get().(bool) + if ok && val { + fmt.Fprintf(&cmd, " -%s", f.Name) + continue + } + } + if cf, ok := f.Value.(CountFlag); ok && cf.IsCountFlag() { + val, ok := getter.Get().(int) + if ok && val == 1 { + fmt.Fprintf(&cmd, " -%s", f.Name) + continue + } + } + fmt.Fprintf(&cmd, " -%s=%v", f.Name, getter.Get()) + } + + if cmd.Len() == 0 { + return + } + s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "producer." + base.Ctxt.Pkgpath) + s.Type = objabi.SDWARFCUINFO + // Sometimes (for example when building tests) we can link + // together two package main archives. So allow dups. + s.Set(obj.AttrDuplicateOK, true) + base.Ctxt.Data = append(base.Ctxt.Data, s) + s.P = cmd.Bytes()[1:] +} + +// RecordPackageName records the name of the package being +// compiled, so that the linker can save it in the compile unit's DIE. +func RecordPackageName() { + s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "packagename." + base.Ctxt.Pkgpath) + s.Type = objabi.SDWARFCUINFO + // Sometimes (for example when building tests) we can link + // together two package main archives. So allow dups. + s.Set(obj.AttrDuplicateOK, true) + base.Ctxt.Data = append(base.Ctxt.Data, s) + s.P = []byte(types.LocalPkg.Name) +} diff --git a/src/cmd/compile/internal/gc/dwinl.go b/src/cmd/compile/internal/dwarfgen/dwinl.go similarity index 99% rename from src/cmd/compile/internal/gc/dwinl.go rename to src/cmd/compile/internal/dwarfgen/dwinl.go index d9eb930037456..d5687cb1d72bb 100644 --- a/src/cmd/compile/internal/gc/dwinl.go +++ b/src/cmd/compile/internal/dwarfgen/dwinl.go @@ -2,16 +2,17 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package dwarfgen import ( + "fmt" + "strings" + "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/internal/dwarf" "cmd/internal/obj" "cmd/internal/src" - "fmt" - "strings" ) // To identify variables by original source position. @@ -206,7 +207,7 @@ func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls { // late in the compilation when it is determined that we need an // abstract function DIE for an inlined routine imported from a // previously compiled package. -func genAbstractFunc(fn *obj.LSym) { +func AbstractFunc(fn *obj.LSym) { ifn := base.Ctxt.DwFixups.GetPrecursorFunc(fn) if ifn == nil { base.Ctxt.Diag("failed to locate precursor fn for %v", fn) diff --git a/src/cmd/compile/internal/gc/scope.go b/src/cmd/compile/internal/dwarfgen/scope.go similarity index 99% rename from src/cmd/compile/internal/gc/scope.go rename to src/cmd/compile/internal/dwarfgen/scope.go index 9ab33583c89ce..1c040edc28423 100644 --- a/src/cmd/compile/internal/gc/scope.go +++ b/src/cmd/compile/internal/dwarfgen/scope.go @@ -2,15 +2,16 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package dwarfgen import ( + "sort" + "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/internal/dwarf" "cmd/internal/obj" "cmd/internal/src" - "sort" ) // See golang.org/issue/20390. diff --git a/src/cmd/compile/internal/gc/scope_test.go b/src/cmd/compile/internal/dwarfgen/scope_test.go similarity index 99% rename from src/cmd/compile/internal/gc/scope_test.go rename to src/cmd/compile/internal/dwarfgen/scope_test.go index b0e038d27f5db..fcfcf85f84c2a 100644 --- a/src/cmd/compile/internal/gc/scope_test.go +++ b/src/cmd/compile/internal/dwarfgen/scope_test.go @@ -2,10 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc_test +package dwarfgen import ( - "cmd/internal/objfile" "debug/dwarf" "fmt" "internal/testenv" @@ -18,6 +17,8 @@ import ( "strconv" "strings" "testing" + + "cmd/internal/objfile" ) type testline struct { diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 154235f744fb4..2a8012b462e53 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -10,6 +10,7 @@ import ( "bufio" "bytes" "cmd/compile/internal/base" + "cmd/compile/internal/dwarfgen" "cmd/compile/internal/escape" "cmd/compile/internal/inline" "cmd/compile/internal/ir" @@ -114,7 +115,7 @@ func Main(archInit func(*ssagen.ArchInfo)) { // Record flags that affect the build result. (And don't // record flags that don't, since that would cause spurious // changes in the binary.) - recordFlags("B", "N", "l", "msan", "race", "shared", "dynlink", "dwarflocationlists", "dwarfbasentries", "smallframes", "spectre") + dwarfgen.RecordFlags("B", "N", "l", "msan", "race", "shared", "dynlink", "dwarflocationlists", "dwarfbasentries", "smallframes", "spectre") if !base.EnableTrace && base.Flag.LowerT { log.Fatalf("compiler not built with support for -t") @@ -134,8 +135,8 @@ func Main(archInit func(*ssagen.ArchInfo)) { } if base.Flag.Dwarf { - base.Ctxt.DebugInfo = debuginfo - base.Ctxt.GenAbstractFunc = genAbstractFunc + base.Ctxt.DebugInfo = dwarfgen.Info + base.Ctxt.GenAbstractFunc = dwarfgen.AbstractFunc base.Ctxt.DwFixups = obj.NewDwarfFixupTable(base.Ctxt) } else { // turn off inline generation if no dwarf at all @@ -211,7 +212,7 @@ func Main(archInit func(*ssagen.ArchInfo)) { ssagen.CgoSymABIs() base.Timer.Stop() base.Timer.AddEvent(int64(lines), "lines") - recordPackageName() + dwarfgen.RecordPackageName() // Typecheck. typecheck.Package() @@ -364,73 +365,6 @@ func writebench(filename string) error { return f.Close() } -// recordFlags records the specified command-line flags to be placed -// in the DWARF info. -func recordFlags(flags ...string) { - if base.Ctxt.Pkgpath == "" { - // We can't record the flags if we don't know what the - // package name is. - return - } - - type BoolFlag interface { - IsBoolFlag() bool - } - type CountFlag interface { - IsCountFlag() bool - } - var cmd bytes.Buffer - for _, name := range flags { - f := flag.Lookup(name) - if f == nil { - continue - } - getter := f.Value.(flag.Getter) - if getter.String() == f.DefValue { - // Flag has default value, so omit it. - continue - } - if bf, ok := f.Value.(BoolFlag); ok && bf.IsBoolFlag() { - val, ok := getter.Get().(bool) - if ok && val { - fmt.Fprintf(&cmd, " -%s", f.Name) - continue - } - } - if cf, ok := f.Value.(CountFlag); ok && cf.IsCountFlag() { - val, ok := getter.Get().(int) - if ok && val == 1 { - fmt.Fprintf(&cmd, " -%s", f.Name) - continue - } - } - fmt.Fprintf(&cmd, " -%s=%v", f.Name, getter.Get()) - } - - if cmd.Len() == 0 { - return - } - s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "producer." + base.Ctxt.Pkgpath) - s.Type = objabi.SDWARFCUINFO - // Sometimes (for example when building tests) we can link - // together two package main archives. So allow dups. - s.Set(obj.AttrDuplicateOK, true) - base.Ctxt.Data = append(base.Ctxt.Data, s) - s.P = cmd.Bytes()[1:] -} - -// recordPackageName records the name of the package being -// compiled, so that the linker can save it in the compile unit's DIE. -func recordPackageName() { - s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "packagename." + base.Ctxt.Pkgpath) - s.Type = objabi.SDWARFCUINFO - // Sometimes (for example when building tests) we can link - // together two package main archives. So allow dups. - s.Set(obj.AttrDuplicateOK, true) - base.Ctxt.Data = append(base.Ctxt.Data, s) - s.P = []byte(types.LocalPkg.Name) -} - func makePos(b *src.PosBase, line, col uint) src.XPos { return base.Ctxt.PosTable.XPos(src.MakePos(b, line, col)) } diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 4db2ad9d4a75d..f159256da665e 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -319,7 +319,7 @@ func litsym(n *ir.Name, noff int64, c ir.Node, wid int) { func ggloblnod(nam ir.Node) { s := nam.Sym().Linksym() - s.Gotype = ngotype(nam).Linksym() + s.Gotype = reflectdata.TypeSym(nam.Type()).Linksym() flags := 0 if nam.Name().Readonly() { flags = obj.RODATA diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 02a4c0a688332..17bbd1c3a2d24 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -7,7 +7,6 @@ package gc import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" - "cmd/compile/internal/reflectdata" "cmd/compile/internal/ssagen" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" @@ -305,13 +304,6 @@ func cheapexpr(n ir.Node, init *ir.Nodes) ir.Node { return copyexpr(n, n.Type(), init) } -func ngotype(n ir.Node) *types.Sym { - if n.Type() != nil { - return reflectdata.TypeSym(n.Type()) - } - return nil -} - // itabType loads the _type field from a runtime.itab struct. func itabType(itab ir.Node) ir.Node { typ := ir.NewSelectorExpr(base.Pos, ir.ODOTPTR, itab, nil) From e4895ab4c0eb44de6ddc5dc8d860a827b20d2781 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 23 Dec 2020 01:05:16 -0500 Subject: [PATCH 238/474] [dev.regabi] cmd/compile: split out package walk [generated] [git-generate] cd src/cmd/compile/internal/gc rf ' # Late addition to package ir. mv closuredebugruntimecheck ClosureDebugRuntimeCheck mv hasemptycvars IsTrivialClosure mv ClosureDebugRuntimeCheck IsTrivialClosure func.go mv func.go cmd/compile/internal/ir # Late addition to package reflectdata. mv markTypeUsedInInterface MarkTypeUsedInInterface mv markUsedIfaceMethod MarkUsedIfaceMethod mv MarkTypeUsedInInterface MarkUsedIfaceMethod reflect.go mv reflect.go cmd/compile/internal/reflectdata # Late addition to package staticdata. mv litsym InitConst mv InitConst data.go mv data.go cmd/compile/internal/staticdata # Extract staticinit out of walk into its own package. mv InitEntry InitPlan InitSchedule InitSchedule.append InitSchedule.staticInit \ InitSchedule.tryStaticInit InitSchedule.staticcopy \ InitSchedule.staticassign InitSchedule.initplan InitSchedule.addvalue \ statuniqgen staticname stataddr anySideEffects getlit isvaluelit \ sched.go mv InitSchedule.initplans InitSchedule.Plans mv InitSchedule.inittemps InitSchedule.Temps mv InitSchedule.out InitSchedule.Out mv InitSchedule.staticInit InitSchedule.StaticInit mv InitSchedule.staticassign InitSchedule.StaticAssign mv InitSchedule Schedule mv InitPlan Plan mv InitEntry Entry mv anySideEffects AnySideEffects mv staticname StaticName mv stataddr StaticLoc mv sched.go cmd/compile/internal/staticinit # Export API and unexport non-API. mv transformclosure Closure mv walk Walk mv Order orderState mv swt.go switch.go mv racewalk.go race.go mv closure.go order.go range.go select.go switch.go race.go \ sinit.go subr.go walk.go \ cmd/compile/internal/walk ' : # Update format test. cd ../../ go install cmd/compile/... cmd/internal/archive go test -u || go test -u rm -rf ../../../pkg/darwin_amd64/cmd Change-Id: I11c7a45f74d4a9e963da15c080e1018caaa99c05 Reviewed-on: https://go-review.googlesource.com/c/go/+/279478 Trust: Russ Cox Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/fmtmap_test.go | 2 +- src/cmd/compile/internal/gc/compile.go | 3 +- src/cmd/compile/internal/gc/initorder.go | 11 +- src/cmd/compile/internal/gc/main.go | 3 +- src/cmd/compile/internal/gc/obj.go | 57 -- src/cmd/compile/internal/ir/func.go | 21 + .../compile/internal/reflectdata/reflect.go | 26 + src/cmd/compile/internal/staticdata/data.go | 57 ++ src/cmd/compile/internal/staticinit/sched.go | 596 ++++++++++++++++++ .../compile/internal/{gc => walk}/closure.go | 31 +- .../compile/internal/{gc => walk}/order.go | 80 +-- .../internal/{gc/racewalk.go => walk/race.go} | 2 +- .../compile/internal/{gc => walk}/range.go | 5 +- .../compile/internal/{gc => walk}/select.go | 2 +- .../compile/internal/{gc => walk}/sinit.go | 509 +-------------- src/cmd/compile/internal/{gc => walk}/subr.go | 5 +- .../internal/{gc/swt.go => walk/switch.go} | 9 +- src/cmd/compile/internal/{gc => walk}/walk.go | 135 +--- 18 files changed, 790 insertions(+), 764 deletions(-) create mode 100644 src/cmd/compile/internal/staticinit/sched.go rename src/cmd/compile/internal/{gc => walk}/closure.go (85%) rename src/cmd/compile/internal/{gc => walk}/order.go (95%) rename src/cmd/compile/internal/{gc/racewalk.go => walk/race.go} (99%) rename src/cmd/compile/internal/{gc => walk}/range.go (99%) rename src/cmd/compile/internal/{gc => walk}/select.go (99%) rename src/cmd/compile/internal/{gc => walk}/sinit.go (59%) rename src/cmd/compile/internal/{gc => walk}/subr.go (99%) rename src/cmd/compile/internal/{gc/swt.go => walk/switch.go} (99%) rename src/cmd/compile/internal/{gc => walk}/walk.go (97%) diff --git a/src/cmd/compile/fmtmap_test.go b/src/cmd/compile/fmtmap_test.go index 9bc059c2e44f9..a925ec05ace7f 100644 --- a/src/cmd/compile/fmtmap_test.go +++ b/src/cmd/compile/fmtmap_test.go @@ -37,7 +37,6 @@ var knownFormats = map[string]string{ "[]cmd/compile/internal/syntax.token %s": "", "cmd/compile/internal/arm.shift %d": "", "cmd/compile/internal/gc.RegIndex %d": "", - "cmd/compile/internal/gc.initKind %d": "", "cmd/compile/internal/ir.Class %d": "", "cmd/compile/internal/ir.Node %+v": "", "cmd/compile/internal/ir.Node %L": "", @@ -68,6 +67,7 @@ var knownFormats = map[string]string{ "cmd/compile/internal/syntax.token %s": "", "cmd/compile/internal/types.Kind %d": "", "cmd/compile/internal/types.Kind %s": "", + "cmd/compile/internal/walk.initKind %d": "", "go/constant.Value %#v": "", "math/big.Accuracy %s": "", "reflect.Type %s": "", diff --git a/src/cmd/compile/internal/gc/compile.go b/src/cmd/compile/internal/gc/compile.go index c2a6a9e327719..926b2dee95245 100644 --- a/src/cmd/compile/internal/gc/compile.go +++ b/src/cmd/compile/internal/gc/compile.go @@ -17,6 +17,7 @@ import ( "cmd/compile/internal/ssagen" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" + "cmd/compile/internal/walk" ) // "Portable" code generation. @@ -61,7 +62,7 @@ func compile(fn *ir.Func) { ssagen.InitLSym(fn, true) errorsBefore := base.Errors() - walk(fn) + walk.Walk(fn) if base.Errors() > errorsBefore { return } diff --git a/src/cmd/compile/internal/gc/initorder.go b/src/cmd/compile/internal/gc/initorder.go index 5caa2e769f134..4ac468fb4e5b3 100644 --- a/src/cmd/compile/internal/gc/initorder.go +++ b/src/cmd/compile/internal/gc/initorder.go @@ -11,6 +11,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" + "cmd/compile/internal/staticinit" ) // Package initialization @@ -77,9 +78,9 @@ type InitOrder struct { // corresponding list of statements to include in the init() function // body. func initOrder(l []ir.Node) []ir.Node { - s := InitSchedule{ - initplans: make(map[ir.Node]*InitPlan), - inittemps: make(map[ir.Node]*ir.Name), + s := staticinit.Schedule{ + Plans: make(map[ir.Node]*staticinit.Plan), + Temps: make(map[ir.Node]*ir.Name), } o := InitOrder{ blocking: make(map[ir.Node][]ir.Node), @@ -91,7 +92,7 @@ func initOrder(l []ir.Node) []ir.Node { switch n.Op() { case ir.OAS, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV: o.processAssign(n) - o.flushReady(s.staticInit) + o.flushReady(s.StaticInit) case ir.ODCLCONST, ir.ODCLFUNC, ir.ODCLTYPE: // nop default: @@ -124,7 +125,7 @@ func initOrder(l []ir.Node) []ir.Node { base.Fatalf("expected empty map: %v", o.blocking) } - return s.out + return s.Out } func (o *InitOrder) processAssign(n ir.Node) { diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 2a8012b462e53..aeb58a3310483 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -22,6 +22,7 @@ import ( "cmd/compile/internal/staticdata" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" + "cmd/compile/internal/walk" "cmd/internal/dwarf" "cmd/internal/obj" "cmd/internal/objabi" @@ -268,7 +269,7 @@ func Main(archInit func(*ssagen.ArchInfo)) { n := n.(*ir.Func) if n.OClosure != nil { ir.CurFunc = n - transformclosure(n) + walk.Closure(n) } } } diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index f159256da665e..0ab3a8dad43ae 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -18,7 +18,6 @@ import ( "cmd/internal/objabi" "encoding/json" "fmt" - "go/constant" ) // These modes say which kind of object file to generate. @@ -261,62 +260,6 @@ func addGCLocals() { } } -// litsym writes the static literal c to n. -// Neither n nor c is modified. -func litsym(n *ir.Name, noff int64, c ir.Node, wid int) { - if n.Op() != ir.ONAME { - base.Fatalf("litsym n op %v", n.Op()) - } - if n.Sym() == nil { - base.Fatalf("litsym nil n sym") - } - if c.Op() == ir.ONIL { - return - } - if c.Op() != ir.OLITERAL { - base.Fatalf("litsym c op %v", c.Op()) - } - s := n.Sym().Linksym() - switch u := c.Val(); u.Kind() { - case constant.Bool: - i := int64(obj.Bool2int(constant.BoolVal(u))) - s.WriteInt(base.Ctxt, noff, wid, i) - - case constant.Int: - s.WriteInt(base.Ctxt, noff, wid, ir.IntVal(c.Type(), u)) - - case constant.Float: - f, _ := constant.Float64Val(u) - switch c.Type().Kind() { - case types.TFLOAT32: - s.WriteFloat32(base.Ctxt, noff, float32(f)) - case types.TFLOAT64: - s.WriteFloat64(base.Ctxt, noff, f) - } - - case constant.Complex: - re, _ := constant.Float64Val(constant.Real(u)) - im, _ := constant.Float64Val(constant.Imag(u)) - switch c.Type().Kind() { - case types.TCOMPLEX64: - s.WriteFloat32(base.Ctxt, noff, float32(re)) - s.WriteFloat32(base.Ctxt, noff+4, float32(im)) - case types.TCOMPLEX128: - s.WriteFloat64(base.Ctxt, noff, re) - s.WriteFloat64(base.Ctxt, noff+8, im) - } - - case constant.String: - i := constant.StringVal(u) - symdata := staticdata.StringSym(n.Pos(), i) - s.WriteAddr(base.Ctxt, noff, types.PtrSize, symdata, 0) - s.WriteInt(base.Ctxt, noff+int64(types.PtrSize), types.PtrSize, int64(len(i))) - - default: - base.Fatalf("litsym unhandled OLITERAL %v", c) - } -} - func ggloblnod(nam ir.Node) { s := nam.Sym().Linksym() s.Gotype = reflectdata.TypeSym(nam.Type()).Linksym() diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index a93516d716240..6bc8cd574c579 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -288,3 +288,24 @@ func MarkFunc(n *Name) { n.Class_ = PFUNC n.Sym().SetFunc(true) } + +// ClosureDebugRuntimeCheck applies boilerplate checks for debug flags +// and compiling runtime +func ClosureDebugRuntimeCheck(clo *ClosureExpr) { + if base.Debug.Closure > 0 { + if clo.Esc() == EscHeap { + base.WarnfAt(clo.Pos(), "heap closure, captured vars = %v", clo.Func.ClosureVars) + } else { + base.WarnfAt(clo.Pos(), "stack closure, captured vars = %v", clo.Func.ClosureVars) + } + } + if base.Flag.CompilingRuntime && clo.Esc() == EscHeap { + base.ErrorfAt(clo.Pos(), "heap-allocated closure, not allowed in runtime") + } +} + +// IsTrivialClosure reports whether closure clo has an +// empty list of captured vars. +func IsTrivialClosure(clo *ClosureExpr) bool { + return len(clo.Func.ClosureVars) == 0 +} diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go index a5e2fb407aaf3..ba3e0fa75ef97 100644 --- a/src/cmd/compile/internal/reflectdata/reflect.go +++ b/src/cmd/compile/internal/reflectdata/reflect.go @@ -1834,3 +1834,29 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { } var ZeroSize int64 + +// MarkTypeUsedInInterface marks that type t is converted to an interface. +// This information is used in the linker in dead method elimination. +func MarkTypeUsedInInterface(t *types.Type, from *obj.LSym) { + tsym := TypeSym(t).Linksym() + // Emit a marker relocation. The linker will know the type is converted + // to an interface if "from" is reachable. + r := obj.Addrel(from) + r.Sym = tsym + r.Type = objabi.R_USEIFACE +} + +// MarkUsedIfaceMethod marks that an interface method is used in the current +// function. n is OCALLINTER node. +func MarkUsedIfaceMethod(n *ir.CallExpr) { + dot := n.X.(*ir.SelectorExpr) + ityp := dot.X.Type() + tsym := TypeSym(ityp).Linksym() + r := obj.Addrel(ir.CurFunc.LSym) + r.Sym = tsym + // dot.Xoffset is the method index * Widthptr (the offset of code pointer + // in itab). + midx := dot.Offset / int64(types.PtrSize) + r.Add = InterfaceMethodOffset(ityp, midx) + r.Type = objabi.R_USEIFACEMETHOD +} diff --git a/src/cmd/compile/internal/staticdata/data.go b/src/cmd/compile/internal/staticdata/data.go index 7627aaa11a115..342a2e2bbc25f 100644 --- a/src/cmd/compile/internal/staticdata/data.go +++ b/src/cmd/compile/internal/staticdata/data.go @@ -7,6 +7,7 @@ package staticdata import ( "crypto/sha256" "fmt" + "go/constant" "io" "io/ioutil" "os" @@ -294,3 +295,59 @@ func WriteFuncSyms() { objw.Global(sf, int32(types.PtrSize), obj.DUPOK|obj.RODATA) } } + +// InitConst writes the static literal c to n. +// Neither n nor c is modified. +func InitConst(n *ir.Name, noff int64, c ir.Node, wid int) { + if n.Op() != ir.ONAME { + base.Fatalf("litsym n op %v", n.Op()) + } + if n.Sym() == nil { + base.Fatalf("litsym nil n sym") + } + if c.Op() == ir.ONIL { + return + } + if c.Op() != ir.OLITERAL { + base.Fatalf("litsym c op %v", c.Op()) + } + s := n.Sym().Linksym() + switch u := c.Val(); u.Kind() { + case constant.Bool: + i := int64(obj.Bool2int(constant.BoolVal(u))) + s.WriteInt(base.Ctxt, noff, wid, i) + + case constant.Int: + s.WriteInt(base.Ctxt, noff, wid, ir.IntVal(c.Type(), u)) + + case constant.Float: + f, _ := constant.Float64Val(u) + switch c.Type().Kind() { + case types.TFLOAT32: + s.WriteFloat32(base.Ctxt, noff, float32(f)) + case types.TFLOAT64: + s.WriteFloat64(base.Ctxt, noff, f) + } + + case constant.Complex: + re, _ := constant.Float64Val(constant.Real(u)) + im, _ := constant.Float64Val(constant.Imag(u)) + switch c.Type().Kind() { + case types.TCOMPLEX64: + s.WriteFloat32(base.Ctxt, noff, float32(re)) + s.WriteFloat32(base.Ctxt, noff+4, float32(im)) + case types.TCOMPLEX128: + s.WriteFloat64(base.Ctxt, noff, re) + s.WriteFloat64(base.Ctxt, noff+8, im) + } + + case constant.String: + i := constant.StringVal(u) + symdata := StringSym(n.Pos(), i) + s.WriteAddr(base.Ctxt, noff, types.PtrSize, symdata, 0) + s.WriteInt(base.Ctxt, noff+int64(types.PtrSize), types.PtrSize, int64(len(i))) + + default: + base.Fatalf("litsym unhandled OLITERAL %v", c) + } +} diff --git a/src/cmd/compile/internal/staticinit/sched.go b/src/cmd/compile/internal/staticinit/sched.go new file mode 100644 index 0000000000000..2a499d6eedbaa --- /dev/null +++ b/src/cmd/compile/internal/staticinit/sched.go @@ -0,0 +1,596 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package staticinit + +import ( + "fmt" + "go/constant" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/reflectdata" + "cmd/compile/internal/staticdata" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/obj" +) + +type Entry struct { + Xoffset int64 // struct, array only + Expr ir.Node // bytes of run-time computed expressions +} + +type Plan struct { + E []Entry +} + +// An Schedule is used to decompose assignment statements into +// static and dynamic initialization parts. Static initializations are +// handled by populating variables' linker symbol data, while dynamic +// initializations are accumulated to be executed in order. +type Schedule struct { + // Out is the ordered list of dynamic initialization + // statements. + Out []ir.Node + + Plans map[ir.Node]*Plan + Temps map[ir.Node]*ir.Name +} + +func (s *Schedule) append(n ir.Node) { + s.Out = append(s.Out, n) +} + +// StaticInit adds an initialization statement n to the schedule. +func (s *Schedule) StaticInit(n ir.Node) { + if !s.tryStaticInit(n) { + if base.Flag.Percent != 0 { + ir.Dump("nonstatic", n) + } + s.append(n) + } +} + +// tryStaticInit attempts to statically execute an initialization +// statement and reports whether it succeeded. +func (s *Schedule) tryStaticInit(nn ir.Node) bool { + // Only worry about simple "l = r" assignments. Multiple + // variable/expression OAS2 assignments have already been + // replaced by multiple simple OAS assignments, and the other + // OAS2* assignments mostly necessitate dynamic execution + // anyway. + if nn.Op() != ir.OAS { + return false + } + n := nn.(*ir.AssignStmt) + if ir.IsBlank(n.X) && !AnySideEffects(n.Y) { + // Discard. + return true + } + lno := ir.SetPos(n) + defer func() { base.Pos = lno }() + nam := n.X.(*ir.Name) + return s.StaticAssign(nam, 0, n.Y, nam.Type()) +} + +// like staticassign but we are copying an already +// initialized value r. +func (s *Schedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *types.Type) bool { + if rn.Class_ == ir.PFUNC { + // TODO if roff != 0 { panic } + staticdata.InitFunc(l, loff, rn) + return true + } + if rn.Class_ != ir.PEXTERN || rn.Sym().Pkg != types.LocalPkg { + return false + } + if rn.Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value + return false + } + if rn.Defn.Op() != ir.OAS { + return false + } + if rn.Type().IsString() { // perhaps overwritten by cmd/link -X (#34675) + return false + } + orig := rn + r := rn.Defn.(*ir.AssignStmt).Y + + for r.Op() == ir.OCONVNOP && !types.Identical(r.Type(), typ) { + r = r.(*ir.ConvExpr).X + } + + switch r.Op() { + case ir.OMETHEXPR: + r = r.(*ir.MethodExpr).FuncName() + fallthrough + case ir.ONAME: + r := r.(*ir.Name) + if s.staticcopy(l, loff, r, typ) { + return true + } + // We may have skipped past one or more OCONVNOPs, so + // use conv to ensure r is assignable to l (#13263). + dst := ir.Node(l) + if loff != 0 || !types.Identical(typ, l.Type()) { + dst = ir.NewNameOffsetExpr(base.Pos, l, loff, typ) + } + s.append(ir.NewAssignStmt(base.Pos, dst, typecheck.Conv(r, typ))) + return true + + case ir.ONIL: + return true + + case ir.OLITERAL: + if ir.IsZero(r) { + return true + } + staticdata.InitConst(l, loff, r, int(typ.Width)) + return true + + case ir.OADDR: + r := r.(*ir.AddrExpr) + if a := r.X; a.Op() == ir.ONAME { + a := a.(*ir.Name) + staticdata.InitAddr(l, loff, a, 0) + return true + } + + case ir.OPTRLIT: + r := r.(*ir.AddrExpr) + switch r.X.Op() { + case ir.OARRAYLIT, ir.OSLICELIT, ir.OSTRUCTLIT, ir.OMAPLIT: + // copy pointer + staticdata.InitAddr(l, loff, s.Temps[r], 0) + return true + } + + case ir.OSLICELIT: + r := r.(*ir.CompLitExpr) + // copy slice + staticdata.InitSlice(l, loff, s.Temps[r], r.Len) + return true + + case ir.OARRAYLIT, ir.OSTRUCTLIT: + r := r.(*ir.CompLitExpr) + p := s.Plans[r] + for i := range p.E { + e := &p.E[i] + typ := e.Expr.Type() + if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL { + staticdata.InitConst(l, loff+e.Xoffset, e.Expr, int(typ.Width)) + continue + } + x := e.Expr + if x.Op() == ir.OMETHEXPR { + x = x.(*ir.MethodExpr).FuncName() + } + if x.Op() == ir.ONAME && s.staticcopy(l, loff+e.Xoffset, x.(*ir.Name), typ) { + continue + } + // Requires computation, but we're + // copying someone else's computation. + ll := ir.NewNameOffsetExpr(base.Pos, l, loff+e.Xoffset, typ) + rr := ir.NewNameOffsetExpr(base.Pos, orig, e.Xoffset, typ) + ir.SetPos(rr) + s.append(ir.NewAssignStmt(base.Pos, ll, rr)) + } + + return true + } + + return false +} + +func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Type) bool { + for r.Op() == ir.OCONVNOP { + r = r.(*ir.ConvExpr).X + } + + switch r.Op() { + case ir.ONAME: + r := r.(*ir.Name) + return s.staticcopy(l, loff, r, typ) + + case ir.OMETHEXPR: + r := r.(*ir.MethodExpr) + return s.staticcopy(l, loff, r.FuncName(), typ) + + case ir.ONIL: + return true + + case ir.OLITERAL: + if ir.IsZero(r) { + return true + } + staticdata.InitConst(l, loff, r, int(typ.Width)) + return true + + case ir.OADDR: + r := r.(*ir.AddrExpr) + if name, offset, ok := StaticLoc(r.X); ok { + staticdata.InitAddr(l, loff, name, offset) + return true + } + fallthrough + + case ir.OPTRLIT: + r := r.(*ir.AddrExpr) + switch r.X.Op() { + case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT: + // Init pointer. + a := StaticName(r.X.Type()) + + s.Temps[r] = a + staticdata.InitAddr(l, loff, a, 0) + + // Init underlying literal. + if !s.StaticAssign(a, 0, r.X, a.Type()) { + s.append(ir.NewAssignStmt(base.Pos, a, r.X)) + } + return true + } + //dump("not static ptrlit", r); + + case ir.OSTR2BYTES: + r := r.(*ir.ConvExpr) + if l.Class_ == ir.PEXTERN && r.X.Op() == ir.OLITERAL { + sval := ir.StringVal(r.X) + staticdata.InitSliceBytes(l, loff, sval) + return true + } + + case ir.OSLICELIT: + r := r.(*ir.CompLitExpr) + s.initplan(r) + // Init slice. + ta := types.NewArray(r.Type().Elem(), r.Len) + ta.SetNoalg(true) + a := StaticName(ta) + s.Temps[r] = a + staticdata.InitSlice(l, loff, a, r.Len) + // Fall through to init underlying array. + l = a + loff = 0 + fallthrough + + case ir.OARRAYLIT, ir.OSTRUCTLIT: + r := r.(*ir.CompLitExpr) + s.initplan(r) + + p := s.Plans[r] + for i := range p.E { + e := &p.E[i] + if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL { + staticdata.InitConst(l, loff+e.Xoffset, e.Expr, int(e.Expr.Type().Width)) + continue + } + ir.SetPos(e.Expr) + if !s.StaticAssign(l, loff+e.Xoffset, e.Expr, e.Expr.Type()) { + a := ir.NewNameOffsetExpr(base.Pos, l, loff+e.Xoffset, e.Expr.Type()) + s.append(ir.NewAssignStmt(base.Pos, a, e.Expr)) + } + } + + return true + + case ir.OMAPLIT: + break + + case ir.OCLOSURE: + r := r.(*ir.ClosureExpr) + if ir.IsTrivialClosure(r) { + if base.Debug.Closure > 0 { + base.WarnfAt(r.Pos(), "closure converted to global") + } + // Closures with no captured variables are globals, + // so the assignment can be done at link time. + // TODO if roff != 0 { panic } + staticdata.InitFunc(l, loff, r.Func.Nname) + return true + } + ir.ClosureDebugRuntimeCheck(r) + + case ir.OCONVIFACE: + // This logic is mirrored in isStaticCompositeLiteral. + // If you change something here, change it there, and vice versa. + + // Determine the underlying concrete type and value we are converting from. + r := r.(*ir.ConvExpr) + val := ir.Node(r) + for val.Op() == ir.OCONVIFACE { + val = val.(*ir.ConvExpr).X + } + + if val.Type().IsInterface() { + // val is an interface type. + // If val is nil, we can statically initialize l; + // both words are zero and so there no work to do, so report success. + // If val is non-nil, we have no concrete type to record, + // and we won't be able to statically initialize its value, so report failure. + return val.Op() == ir.ONIL + } + + reflectdata.MarkTypeUsedInInterface(val.Type(), l.Sym().Linksym()) + + var itab *ir.AddrExpr + if typ.IsEmptyInterface() { + itab = reflectdata.TypePtr(val.Type()) + } else { + itab = reflectdata.ITabAddr(val.Type(), typ) + } + + // Create a copy of l to modify while we emit data. + + // Emit itab, advance offset. + staticdata.InitAddr(l, loff, itab.X.(*ir.Name), 0) + + // Emit data. + if types.IsDirectIface(val.Type()) { + if val.Op() == ir.ONIL { + // Nil is zero, nothing to do. + return true + } + // Copy val directly into n. + ir.SetPos(val) + if !s.StaticAssign(l, loff+int64(types.PtrSize), val, val.Type()) { + a := ir.NewNameOffsetExpr(base.Pos, l, loff+int64(types.PtrSize), val.Type()) + s.append(ir.NewAssignStmt(base.Pos, a, val)) + } + } else { + // Construct temp to hold val, write pointer to temp into n. + a := StaticName(val.Type()) + s.Temps[val] = a + if !s.StaticAssign(a, 0, val, val.Type()) { + s.append(ir.NewAssignStmt(base.Pos, a, val)) + } + staticdata.InitAddr(l, loff+int64(types.PtrSize), a, 0) + } + + return true + } + + //dump("not static", r); + return false +} + +func (s *Schedule) initplan(n ir.Node) { + if s.Plans[n] != nil { + return + } + p := new(Plan) + s.Plans[n] = p + switch n.Op() { + default: + base.Fatalf("initplan") + + case ir.OARRAYLIT, ir.OSLICELIT: + n := n.(*ir.CompLitExpr) + var k int64 + for _, a := range n.List { + if a.Op() == ir.OKEY { + kv := a.(*ir.KeyExpr) + k = typecheck.IndexConst(kv.Key) + if k < 0 { + base.Fatalf("initplan arraylit: invalid index %v", kv.Key) + } + a = kv.Value + } + s.addvalue(p, k*n.Type().Elem().Width, a) + k++ + } + + case ir.OSTRUCTLIT: + n := n.(*ir.CompLitExpr) + for _, a := range n.List { + if a.Op() != ir.OSTRUCTKEY { + base.Fatalf("initplan structlit") + } + a := a.(*ir.StructKeyExpr) + if a.Field.IsBlank() { + continue + } + s.addvalue(p, a.Offset, a.Value) + } + + case ir.OMAPLIT: + n := n.(*ir.CompLitExpr) + for _, a := range n.List { + if a.Op() != ir.OKEY { + base.Fatalf("initplan maplit") + } + a := a.(*ir.KeyExpr) + s.addvalue(p, -1, a.Value) + } + } +} + +func (s *Schedule) addvalue(p *Plan, xoffset int64, n ir.Node) { + // special case: zero can be dropped entirely + if ir.IsZero(n) { + return + } + + // special case: inline struct and array (not slice) literals + if isvaluelit(n) { + s.initplan(n) + q := s.Plans[n] + for _, qe := range q.E { + // qe is a copy; we are not modifying entries in q.E + qe.Xoffset += xoffset + p.E = append(p.E, qe) + } + return + } + + // add to plan + p.E = append(p.E, Entry{Xoffset: xoffset, Expr: n}) +} + +// from here down is the walk analysis +// of composite literals. +// most of the work is to generate +// data statements for the constant +// part of the composite literal. + +var statuniqgen int // name generator for static temps + +// StaticName returns a name backed by a (writable) static data symbol. +// Use readonlystaticname for read-only node. +func StaticName(t *types.Type) *ir.Name { + // Don't use lookupN; it interns the resulting string, but these are all unique. + n := typecheck.NewName(typecheck.Lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen))) + statuniqgen++ + typecheck.Declare(n, ir.PEXTERN) + n.SetType(t) + n.Sym().Linksym().Set(obj.AttrLocal, true) + return n +} + +// StaticLoc returns the static address of n, if n has one, or else nil. +func StaticLoc(n ir.Node) (name *ir.Name, offset int64, ok bool) { + if n == nil { + return nil, 0, false + } + + switch n.Op() { + case ir.ONAME: + n := n.(*ir.Name) + return n, 0, true + + case ir.OMETHEXPR: + n := n.(*ir.MethodExpr) + return StaticLoc(n.FuncName()) + + case ir.ODOT: + n := n.(*ir.SelectorExpr) + if name, offset, ok = StaticLoc(n.X); !ok { + break + } + offset += n.Offset + return name, offset, true + + case ir.OINDEX: + n := n.(*ir.IndexExpr) + if n.X.Type().IsSlice() { + break + } + if name, offset, ok = StaticLoc(n.X); !ok { + break + } + l := getlit(n.Index) + if l < 0 { + break + } + + // Check for overflow. + if n.Type().Width != 0 && types.MaxWidth/n.Type().Width <= int64(l) { + break + } + offset += int64(l) * n.Type().Width + return name, offset, true + } + + return nil, 0, false +} + +// AnySideEffects reports whether n contains any operations that could have observable side effects. +func AnySideEffects(n ir.Node) bool { + return ir.Any(n, func(n ir.Node) bool { + switch n.Op() { + // Assume side effects unless we know otherwise. + default: + return true + + // No side effects here (arguments are checked separately). + case ir.ONAME, + ir.ONONAME, + ir.OTYPE, + ir.OPACK, + ir.OLITERAL, + ir.ONIL, + ir.OADD, + ir.OSUB, + ir.OOR, + ir.OXOR, + ir.OADDSTR, + ir.OADDR, + ir.OANDAND, + ir.OBYTES2STR, + ir.ORUNES2STR, + ir.OSTR2BYTES, + ir.OSTR2RUNES, + ir.OCAP, + ir.OCOMPLIT, + ir.OMAPLIT, + ir.OSTRUCTLIT, + ir.OARRAYLIT, + ir.OSLICELIT, + ir.OPTRLIT, + ir.OCONV, + ir.OCONVIFACE, + ir.OCONVNOP, + ir.ODOT, + ir.OEQ, + ir.ONE, + ir.OLT, + ir.OLE, + ir.OGT, + ir.OGE, + ir.OKEY, + ir.OSTRUCTKEY, + ir.OLEN, + ir.OMUL, + ir.OLSH, + ir.ORSH, + ir.OAND, + ir.OANDNOT, + ir.ONEW, + ir.ONOT, + ir.OBITNOT, + ir.OPLUS, + ir.ONEG, + ir.OOROR, + ir.OPAREN, + ir.ORUNESTR, + ir.OREAL, + ir.OIMAG, + ir.OCOMPLEX: + return false + + // Only possible side effect is division by zero. + case ir.ODIV, ir.OMOD: + n := n.(*ir.BinaryExpr) + if n.Y.Op() != ir.OLITERAL || constant.Sign(n.Y.Val()) == 0 { + return true + } + + // Only possible side effect is panic on invalid size, + // but many makechan and makemap use size zero, which is definitely OK. + case ir.OMAKECHAN, ir.OMAKEMAP: + n := n.(*ir.MakeExpr) + if !ir.IsConst(n.Len, constant.Int) || constant.Sign(n.Len.Val()) != 0 { + return true + } + + // Only possible side effect is panic on invalid size. + // TODO(rsc): Merge with previous case (probably breaks toolstash -cmp). + case ir.OMAKESLICE, ir.OMAKESLICECOPY: + return true + } + return false + }) +} + +func getlit(lit ir.Node) int { + if ir.IsSmallIntConst(lit) { + return int(ir.Int64Val(lit)) + } + return -1 +} + +func isvaluelit(n ir.Node) bool { + return n.Op() == ir.OARRAYLIT || n.Op() == ir.OSTRUCTLIT +} diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/walk/closure.go similarity index 85% rename from src/cmd/compile/internal/gc/closure.go rename to src/cmd/compile/internal/walk/closure.go index 4679b6535bcd1..545c762ac7b6d 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/walk/closure.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package walk import ( "cmd/compile/internal/base" @@ -12,9 +12,9 @@ import ( "cmd/internal/src" ) -// transformclosure is called in a separate phase after escape analysis. +// Closure is called in a separate phase after escape analysis. // It transform closure bodies to properly reference captured variables. -func transformclosure(fn *ir.Func) { +func Closure(fn *ir.Func) { lno := base.Pos base.Pos = fn.Pos() @@ -115,38 +115,17 @@ func transformclosure(fn *ir.Func) { base.Pos = lno } -// hasemptycvars reports whether closure clo has an -// empty list of captured vars. -func hasemptycvars(clo *ir.ClosureExpr) bool { - return len(clo.Func.ClosureVars) == 0 -} - -// closuredebugruntimecheck applies boilerplate checks for debug flags -// and compiling runtime -func closuredebugruntimecheck(clo *ir.ClosureExpr) { - if base.Debug.Closure > 0 { - if clo.Esc() == ir.EscHeap { - base.WarnfAt(clo.Pos(), "heap closure, captured vars = %v", clo.Func.ClosureVars) - } else { - base.WarnfAt(clo.Pos(), "stack closure, captured vars = %v", clo.Func.ClosureVars) - } - } - if base.Flag.CompilingRuntime && clo.Esc() == ir.EscHeap { - base.ErrorfAt(clo.Pos(), "heap-allocated closure, not allowed in runtime") - } -} - func walkclosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node { fn := clo.Func // If no closure vars, don't bother wrapping. - if hasemptycvars(clo) { + if ir.IsTrivialClosure(clo) { if base.Debug.Closure > 0 { base.WarnfAt(clo.Pos(), "closure converted to global") } return fn.Nname } - closuredebugruntimecheck(clo) + ir.ClosureDebugRuntimeCheck(clo) typ := typecheck.ClosureType(clo) diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/walk/order.go similarity index 95% rename from src/cmd/compile/internal/gc/order.go rename to src/cmd/compile/internal/walk/order.go index d1c5bb04a1b98..03310a50c6395 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/walk/order.go @@ -2,17 +2,19 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package walk import ( + "fmt" + "cmd/compile/internal/base" "cmd/compile/internal/escape" "cmd/compile/internal/ir" "cmd/compile/internal/reflectdata" + "cmd/compile/internal/staticinit" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/src" - "fmt" ) // Rewrite tree to use separate statements to enforce @@ -45,8 +47,8 @@ import ( // it can result in unnecessary zeroing of those variables in the function // prologue. -// Order holds state during the ordering process. -type Order struct { +// orderState holds state during the ordering process. +type orderState struct { out []ir.Node // list of generated statements temp []*ir.Name // stack of temporary variables free map[string][]*ir.Name // free list of unused temporaries, by type.LongString(). @@ -65,14 +67,14 @@ func order(fn *ir.Func) { } // append typechecks stmt and appends it to out. -func (o *Order) append(stmt ir.Node) { +func (o *orderState) append(stmt ir.Node) { o.out = append(o.out, typecheck.Stmt(stmt)) } // newTemp allocates a new temporary with the given type, // pushes it onto the temp stack, and returns it. // If clear is true, newTemp emits code to zero the temporary. -func (o *Order) newTemp(t *types.Type, clear bool) *ir.Name { +func (o *orderState) newTemp(t *types.Type, clear bool) *ir.Name { var v *ir.Name // Note: LongString is close to the type equality we want, // but not exactly. We still need to double-check with types.Identical. @@ -100,7 +102,7 @@ func (o *Order) newTemp(t *types.Type, clear bool) *ir.Name { // copyExpr behaves like newTemp but also emits // code to initialize the temporary to the value n. -func (o *Order) copyExpr(n ir.Node) ir.Node { +func (o *orderState) copyExpr(n ir.Node) ir.Node { return o.copyExpr1(n, false) } @@ -114,11 +116,11 @@ func (o *Order) copyExpr(n ir.Node) ir.Node { // (The other candidate would be map access, but map access // returns a pointer to the result data instead of taking a pointer // to be filled in.) -func (o *Order) copyExprClear(n ir.Node) *ir.Name { +func (o *orderState) copyExprClear(n ir.Node) *ir.Name { return o.copyExpr1(n, true) } -func (o *Order) copyExpr1(n ir.Node, clear bool) *ir.Name { +func (o *orderState) copyExpr1(n ir.Node, clear bool) *ir.Name { t := n.Type() v := o.newTemp(t, clear) o.append(ir.NewAssignStmt(base.Pos, v, n)) @@ -129,7 +131,7 @@ func (o *Order) copyExpr1(n ir.Node, clear bool) *ir.Name { // The definition of cheap is that n is a variable or constant. // If not, cheapExpr allocates a new tmp, emits tmp = n, // and then returns tmp. -func (o *Order) cheapExpr(n ir.Node) ir.Node { +func (o *orderState) cheapExpr(n ir.Node) ir.Node { if n == nil { return nil } @@ -158,7 +160,7 @@ func (o *Order) cheapExpr(n ir.Node) ir.Node { // as assigning to the original n. // // The intended use is to apply to x when rewriting x += y into x = x + y. -func (o *Order) safeExpr(n ir.Node) ir.Node { +func (o *orderState) safeExpr(n ir.Node) ir.Node { switch n.Op() { case ir.ONAME, ir.OLITERAL, ir.ONIL: return n @@ -241,15 +243,15 @@ func isaddrokay(n ir.Node) bool { // tmp = n, and then returns tmp. // The result of addrTemp MUST be assigned back to n, e.g. // n.Left = o.addrTemp(n.Left) -func (o *Order) addrTemp(n ir.Node) ir.Node { +func (o *orderState) addrTemp(n ir.Node) ir.Node { if n.Op() == ir.OLITERAL || n.Op() == ir.ONIL { // TODO: expand this to all static composite literal nodes? n = typecheck.DefaultLit(n, nil) types.CalcSize(n.Type()) vstat := readonlystaticname(n.Type()) - var s InitSchedule - s.staticassign(vstat, 0, n, n.Type()) - if s.out != nil { + var s staticinit.Schedule + s.StaticAssign(vstat, 0, n, n.Type()) + if s.Out != nil { base.Fatalf("staticassign of const generated code: %+v", n) } vstat = typecheck.Expr(vstat).(*ir.Name) @@ -263,7 +265,7 @@ func (o *Order) addrTemp(n ir.Node) ir.Node { // mapKeyTemp prepares n to be a key in a map runtime call and returns n. // It should only be used for map runtime calls which have *_fast* versions. -func (o *Order) mapKeyTemp(t *types.Type, n ir.Node) ir.Node { +func (o *orderState) mapKeyTemp(t *types.Type, n ir.Node) ir.Node { // Most map calls need to take the address of the key. // Exception: map*_fast* calls. See golang.org/issue/19015. if mapfast(t) == mapslow { @@ -318,13 +320,13 @@ func mapKeyReplaceStrConv(n ir.Node) bool { type ordermarker int // markTemp returns the top of the temporary variable stack. -func (o *Order) markTemp() ordermarker { +func (o *orderState) markTemp() ordermarker { return ordermarker(len(o.temp)) } // popTemp pops temporaries off the stack until reaching the mark, // which must have been returned by markTemp. -func (o *Order) popTemp(mark ordermarker) { +func (o *orderState) popTemp(mark ordermarker) { for _, n := range o.temp[mark:] { key := n.Type().LongString() o.free[key] = append(o.free[key], n) @@ -335,7 +337,7 @@ func (o *Order) popTemp(mark ordermarker) { // cleanTempNoPop emits VARKILL instructions to *out // for each temporary above the mark on the temporary stack. // It does not pop the temporaries from the stack. -func (o *Order) cleanTempNoPop(mark ordermarker) []ir.Node { +func (o *orderState) cleanTempNoPop(mark ordermarker) []ir.Node { var out []ir.Node for i := len(o.temp) - 1; i >= int(mark); i-- { n := o.temp[i] @@ -346,13 +348,13 @@ func (o *Order) cleanTempNoPop(mark ordermarker) []ir.Node { // cleanTemp emits VARKILL instructions for each temporary above the // mark on the temporary stack and removes them from the stack. -func (o *Order) cleanTemp(top ordermarker) { +func (o *orderState) cleanTemp(top ordermarker) { o.out = append(o.out, o.cleanTempNoPop(top)...) o.popTemp(top) } // stmtList orders each of the statements in the list. -func (o *Order) stmtList(l ir.Nodes) { +func (o *orderState) stmtList(l ir.Nodes) { s := l for i := range s { orderMakeSliceCopy(s[i:]) @@ -396,14 +398,14 @@ func orderMakeSliceCopy(s []ir.Node) { } // edge inserts coverage instrumentation for libfuzzer. -func (o *Order) edge() { +func (o *orderState) edge() { if base.Debug.Libfuzzer == 0 { return } // Create a new uint8 counter to be allocated in section // __libfuzzer_extra_counters. - counter := staticname(types.Types[types.TUINT8]) + counter := staticinit.StaticName(types.Types[types.TUINT8]) counter.Name().SetLibfuzzerExtraCounter(true) // counter += 1 @@ -415,7 +417,7 @@ func (o *Order) edge() { // and then replaces the old slice in n with the new slice. // free is a map that can be used to obtain temporary variables by type. func orderBlock(n *ir.Nodes, free map[string][]*ir.Name) { - var order Order + var order orderState order.free = free mark := order.markTemp() order.edge() @@ -428,8 +430,8 @@ func orderBlock(n *ir.Nodes, free map[string][]*ir.Name) { // leaves them as the init list of the final *np. // The result of exprInPlace MUST be assigned back to n, e.g. // n.Left = o.exprInPlace(n.Left) -func (o *Order) exprInPlace(n ir.Node) ir.Node { - var order Order +func (o *orderState) exprInPlace(n ir.Node) ir.Node { + var order orderState order.free = o.free n = order.expr(n, nil) n = ir.InitExpr(order.out, n) @@ -446,7 +448,7 @@ func (o *Order) exprInPlace(n ir.Node) ir.Node { // n.Left = orderStmtInPlace(n.Left) // free is a map that can be used to obtain temporary variables by type. func orderStmtInPlace(n ir.Node, free map[string][]*ir.Name) ir.Node { - var order Order + var order orderState order.free = free mark := order.markTemp() order.stmt(n) @@ -455,7 +457,7 @@ func orderStmtInPlace(n ir.Node, free map[string][]*ir.Name) ir.Node { } // init moves n's init list to o.out. -func (o *Order) init(n ir.Node) { +func (o *orderState) init(n ir.Node) { if ir.MayBeShared(n) { // For concurrency safety, don't mutate potentially shared nodes. // First, ensure that no work is required here. @@ -470,7 +472,7 @@ func (o *Order) init(n ir.Node) { // call orders the call expression n. // n.Op is OCALLMETH/OCALLFUNC/OCALLINTER or a builtin like OCOPY. -func (o *Order) call(nn ir.Node) { +func (o *orderState) call(nn ir.Node) { if len(nn.Init()) > 0 { // Caller should have already called o.init(nn). base.Fatalf("%v with unexpected ninit", nn.Op()) @@ -551,7 +553,7 @@ func (o *Order) call(nn ir.Node) { // cases they are also typically registerizable, so not much harm done. // And this only applies to the multiple-assignment form. // We could do a more precise analysis if needed, like in walk.go. -func (o *Order) mapAssign(n ir.Node) { +func (o *orderState) mapAssign(n ir.Node) { switch n.Op() { default: base.Fatalf("order.mapAssign %v", n.Op()) @@ -596,7 +598,7 @@ func (o *Order) mapAssign(n ir.Node) { } } -func (o *Order) safeMapRHS(r ir.Node) ir.Node { +func (o *orderState) safeMapRHS(r ir.Node) ir.Node { // Make sure we evaluate the RHS before starting the map insert. // We need to make sure the RHS won't panic. See issue 22881. if r.Op() == ir.OAPPEND { @@ -613,7 +615,7 @@ func (o *Order) safeMapRHS(r ir.Node) ir.Node { // stmt orders the statement n, appending to o.out. // Temporaries created during the statement are cleaned // up using VARKILL instructions as possible. -func (o *Order) stmt(n ir.Node) { +func (o *orderState) stmt(n ir.Node) { if n == nil { return } @@ -1061,7 +1063,7 @@ func hasDefaultCase(n *ir.SwitchStmt) bool { } // exprList orders the expression list l into o. -func (o *Order) exprList(l ir.Nodes) { +func (o *orderState) exprList(l ir.Nodes) { s := l for i := range s { s[i] = o.expr(s[i], nil) @@ -1070,14 +1072,14 @@ func (o *Order) exprList(l ir.Nodes) { // exprListInPlace orders the expression list l but saves // the side effects on the individual expression ninit lists. -func (o *Order) exprListInPlace(l ir.Nodes) { +func (o *orderState) exprListInPlace(l ir.Nodes) { s := l for i := range s { s[i] = o.exprInPlace(s[i]) } } -func (o *Order) exprNoLHS(n ir.Node) ir.Node { +func (o *orderState) exprNoLHS(n ir.Node) ir.Node { return o.expr(n, nil) } @@ -1088,7 +1090,7 @@ func (o *Order) exprNoLHS(n ir.Node) ir.Node { // to avoid copying the result of the expression to a temporary.) // The result of expr MUST be assigned back to n, e.g. // n.Left = o.expr(n.Left, lhs) -func (o *Order) expr(n, lhs ir.Node) ir.Node { +func (o *orderState) expr(n, lhs ir.Node) ir.Node { if n == nil { return n } @@ -1098,7 +1100,7 @@ func (o *Order) expr(n, lhs ir.Node) ir.Node { return n } -func (o *Order) expr1(n, lhs ir.Node) ir.Node { +func (o *orderState) expr1(n, lhs ir.Node) ir.Node { o.init(n) switch n.Op() { @@ -1441,7 +1443,7 @@ func (o *Order) expr1(n, lhs ir.Node) ir.Node { // tmp1, tmp2, tmp3 = ... // a, b, a = tmp1, tmp2, tmp3 // This is necessary to ensure left to right assignment order. -func (o *Order) as2(n *ir.AssignListStmt) { +func (o *orderState) as2(n *ir.AssignListStmt) { tmplist := []ir.Node{} left := []ir.Node{} for ni, l := range n.Lhs { @@ -1463,7 +1465,7 @@ func (o *Order) as2(n *ir.AssignListStmt) { // okAs2 orders OAS2XXX with ok. // Just like as2, this also adds temporaries to ensure left-to-right assignment. -func (o *Order) okAs2(n *ir.AssignListStmt) { +func (o *orderState) okAs2(n *ir.AssignListStmt) { var tmp1, tmp2 ir.Node if !ir.IsBlank(n.Lhs[0]) { typ := n.Rhs[0].Type() diff --git a/src/cmd/compile/internal/gc/racewalk.go b/src/cmd/compile/internal/walk/race.go similarity index 99% rename from src/cmd/compile/internal/gc/racewalk.go rename to src/cmd/compile/internal/walk/race.go index c52bf1479b1f0..1fe439a99a423 100644 --- a/src/cmd/compile/internal/gc/racewalk.go +++ b/src/cmd/compile/internal/walk/race.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package walk import ( "cmd/compile/internal/base" diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/walk/range.go similarity index 99% rename from src/cmd/compile/internal/gc/range.go rename to src/cmd/compile/internal/walk/range.go index 2b2178a8bd2f0..ea23761a39847 100644 --- a/src/cmd/compile/internal/gc/range.go +++ b/src/cmd/compile/internal/walk/range.go @@ -2,9 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package walk import ( + "unicode/utf8" + "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/reflectdata" @@ -12,7 +14,6 @@ import ( "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/sys" - "unicode/utf8" ) func cheapComputableIndex(width int64) bool { diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/walk/select.go similarity index 99% rename from src/cmd/compile/internal/gc/select.go rename to src/cmd/compile/internal/walk/select.go index 51bb1e5355ba6..006833eb7ba6f 100644 --- a/src/cmd/compile/internal/gc/select.go +++ b/src/cmd/compile/internal/walk/select.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package walk import ( "cmd/compile/internal/base" diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/walk/sinit.go similarity index 59% rename from src/cmd/compile/internal/gc/sinit.go rename to src/cmd/compile/internal/walk/sinit.go index 337b67af46222..dbb17dfe5015c 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/walk/sinit.go @@ -2,358 +2,18 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package walk import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" - "cmd/compile/internal/reflectdata" "cmd/compile/internal/staticdata" + "cmd/compile/internal/staticinit" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/obj" - "fmt" ) -type InitEntry struct { - Xoffset int64 // struct, array only - Expr ir.Node // bytes of run-time computed expressions -} - -type InitPlan struct { - E []InitEntry -} - -// An InitSchedule is used to decompose assignment statements into -// static and dynamic initialization parts. Static initializations are -// handled by populating variables' linker symbol data, while dynamic -// initializations are accumulated to be executed in order. -type InitSchedule struct { - // out is the ordered list of dynamic initialization - // statements. - out []ir.Node - - initplans map[ir.Node]*InitPlan - inittemps map[ir.Node]*ir.Name -} - -func (s *InitSchedule) append(n ir.Node) { - s.out = append(s.out, n) -} - -// staticInit adds an initialization statement n to the schedule. -func (s *InitSchedule) staticInit(n ir.Node) { - if !s.tryStaticInit(n) { - if base.Flag.Percent != 0 { - ir.Dump("nonstatic", n) - } - s.append(n) - } -} - -// tryStaticInit attempts to statically execute an initialization -// statement and reports whether it succeeded. -func (s *InitSchedule) tryStaticInit(nn ir.Node) bool { - // Only worry about simple "l = r" assignments. Multiple - // variable/expression OAS2 assignments have already been - // replaced by multiple simple OAS assignments, and the other - // OAS2* assignments mostly necessitate dynamic execution - // anyway. - if nn.Op() != ir.OAS { - return false - } - n := nn.(*ir.AssignStmt) - if ir.IsBlank(n.X) && !anySideEffects(n.Y) { - // Discard. - return true - } - lno := ir.SetPos(n) - defer func() { base.Pos = lno }() - nam := n.X.(*ir.Name) - return s.staticassign(nam, 0, n.Y, nam.Type()) -} - -// like staticassign but we are copying an already -// initialized value r. -func (s *InitSchedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *types.Type) bool { - if rn.Class_ == ir.PFUNC { - // TODO if roff != 0 { panic } - staticdata.InitFunc(l, loff, rn) - return true - } - if rn.Class_ != ir.PEXTERN || rn.Sym().Pkg != types.LocalPkg { - return false - } - if rn.Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value - return false - } - if rn.Defn.Op() != ir.OAS { - return false - } - if rn.Type().IsString() { // perhaps overwritten by cmd/link -X (#34675) - return false - } - orig := rn - r := rn.Defn.(*ir.AssignStmt).Y - - for r.Op() == ir.OCONVNOP && !types.Identical(r.Type(), typ) { - r = r.(*ir.ConvExpr).X - } - - switch r.Op() { - case ir.OMETHEXPR: - r = r.(*ir.MethodExpr).FuncName() - fallthrough - case ir.ONAME: - r := r.(*ir.Name) - if s.staticcopy(l, loff, r, typ) { - return true - } - // We may have skipped past one or more OCONVNOPs, so - // use conv to ensure r is assignable to l (#13263). - dst := ir.Node(l) - if loff != 0 || !types.Identical(typ, l.Type()) { - dst = ir.NewNameOffsetExpr(base.Pos, l, loff, typ) - } - s.append(ir.NewAssignStmt(base.Pos, dst, typecheck.Conv(r, typ))) - return true - - case ir.ONIL: - return true - - case ir.OLITERAL: - if ir.IsZero(r) { - return true - } - litsym(l, loff, r, int(typ.Width)) - return true - - case ir.OADDR: - r := r.(*ir.AddrExpr) - if a := r.X; a.Op() == ir.ONAME { - a := a.(*ir.Name) - staticdata.InitAddr(l, loff, a, 0) - return true - } - - case ir.OPTRLIT: - r := r.(*ir.AddrExpr) - switch r.X.Op() { - case ir.OARRAYLIT, ir.OSLICELIT, ir.OSTRUCTLIT, ir.OMAPLIT: - // copy pointer - staticdata.InitAddr(l, loff, s.inittemps[r], 0) - return true - } - - case ir.OSLICELIT: - r := r.(*ir.CompLitExpr) - // copy slice - staticdata.InitSlice(l, loff, s.inittemps[r], r.Len) - return true - - case ir.OARRAYLIT, ir.OSTRUCTLIT: - r := r.(*ir.CompLitExpr) - p := s.initplans[r] - for i := range p.E { - e := &p.E[i] - typ := e.Expr.Type() - if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL { - litsym(l, loff+e.Xoffset, e.Expr, int(typ.Width)) - continue - } - x := e.Expr - if x.Op() == ir.OMETHEXPR { - x = x.(*ir.MethodExpr).FuncName() - } - if x.Op() == ir.ONAME && s.staticcopy(l, loff+e.Xoffset, x.(*ir.Name), typ) { - continue - } - // Requires computation, but we're - // copying someone else's computation. - ll := ir.NewNameOffsetExpr(base.Pos, l, loff+e.Xoffset, typ) - rr := ir.NewNameOffsetExpr(base.Pos, orig, e.Xoffset, typ) - ir.SetPos(rr) - s.append(ir.NewAssignStmt(base.Pos, ll, rr)) - } - - return true - } - - return false -} - -func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *types.Type) bool { - for r.Op() == ir.OCONVNOP { - r = r.(*ir.ConvExpr).X - } - - switch r.Op() { - case ir.ONAME: - r := r.(*ir.Name) - return s.staticcopy(l, loff, r, typ) - - case ir.OMETHEXPR: - r := r.(*ir.MethodExpr) - return s.staticcopy(l, loff, r.FuncName(), typ) - - case ir.ONIL: - return true - - case ir.OLITERAL: - if ir.IsZero(r) { - return true - } - litsym(l, loff, r, int(typ.Width)) - return true - - case ir.OADDR: - r := r.(*ir.AddrExpr) - if name, offset, ok := stataddr(r.X); ok { - staticdata.InitAddr(l, loff, name, offset) - return true - } - fallthrough - - case ir.OPTRLIT: - r := r.(*ir.AddrExpr) - switch r.X.Op() { - case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT: - // Init pointer. - a := staticname(r.X.Type()) - - s.inittemps[r] = a - staticdata.InitAddr(l, loff, a, 0) - - // Init underlying literal. - if !s.staticassign(a, 0, r.X, a.Type()) { - s.append(ir.NewAssignStmt(base.Pos, a, r.X)) - } - return true - } - //dump("not static ptrlit", r); - - case ir.OSTR2BYTES: - r := r.(*ir.ConvExpr) - if l.Class_ == ir.PEXTERN && r.X.Op() == ir.OLITERAL { - sval := ir.StringVal(r.X) - staticdata.InitSliceBytes(l, loff, sval) - return true - } - - case ir.OSLICELIT: - r := r.(*ir.CompLitExpr) - s.initplan(r) - // Init slice. - ta := types.NewArray(r.Type().Elem(), r.Len) - ta.SetNoalg(true) - a := staticname(ta) - s.inittemps[r] = a - staticdata.InitSlice(l, loff, a, r.Len) - // Fall through to init underlying array. - l = a - loff = 0 - fallthrough - - case ir.OARRAYLIT, ir.OSTRUCTLIT: - r := r.(*ir.CompLitExpr) - s.initplan(r) - - p := s.initplans[r] - for i := range p.E { - e := &p.E[i] - if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL { - litsym(l, loff+e.Xoffset, e.Expr, int(e.Expr.Type().Width)) - continue - } - ir.SetPos(e.Expr) - if !s.staticassign(l, loff+e.Xoffset, e.Expr, e.Expr.Type()) { - a := ir.NewNameOffsetExpr(base.Pos, l, loff+e.Xoffset, e.Expr.Type()) - s.append(ir.NewAssignStmt(base.Pos, a, e.Expr)) - } - } - - return true - - case ir.OMAPLIT: - break - - case ir.OCLOSURE: - r := r.(*ir.ClosureExpr) - if hasemptycvars(r) { - if base.Debug.Closure > 0 { - base.WarnfAt(r.Pos(), "closure converted to global") - } - // Closures with no captured variables are globals, - // so the assignment can be done at link time. - // TODO if roff != 0 { panic } - staticdata.InitFunc(l, loff, r.Func.Nname) - return true - } - closuredebugruntimecheck(r) - - case ir.OCONVIFACE: - // This logic is mirrored in isStaticCompositeLiteral. - // If you change something here, change it there, and vice versa. - - // Determine the underlying concrete type and value we are converting from. - r := r.(*ir.ConvExpr) - val := ir.Node(r) - for val.Op() == ir.OCONVIFACE { - val = val.(*ir.ConvExpr).X - } - - if val.Type().IsInterface() { - // val is an interface type. - // If val is nil, we can statically initialize l; - // both words are zero and so there no work to do, so report success. - // If val is non-nil, we have no concrete type to record, - // and we won't be able to statically initialize its value, so report failure. - return val.Op() == ir.ONIL - } - - markTypeUsedInInterface(val.Type(), l.Sym().Linksym()) - - var itab *ir.AddrExpr - if typ.IsEmptyInterface() { - itab = reflectdata.TypePtr(val.Type()) - } else { - itab = reflectdata.ITabAddr(val.Type(), typ) - } - - // Create a copy of l to modify while we emit data. - - // Emit itab, advance offset. - staticdata.InitAddr(l, loff, itab.X.(*ir.Name), 0) - - // Emit data. - if types.IsDirectIface(val.Type()) { - if val.Op() == ir.ONIL { - // Nil is zero, nothing to do. - return true - } - // Copy val directly into n. - ir.SetPos(val) - if !s.staticassign(l, loff+int64(types.PtrSize), val, val.Type()) { - a := ir.NewNameOffsetExpr(base.Pos, l, loff+int64(types.PtrSize), val.Type()) - s.append(ir.NewAssignStmt(base.Pos, a, val)) - } - } else { - // Construct temp to hold val, write pointer to temp into n. - a := staticname(val.Type()) - s.inittemps[val] = a - if !s.staticassign(a, 0, val, val.Type()) { - s.append(ir.NewAssignStmt(base.Pos, a, val)) - } - staticdata.InitAddr(l, loff+int64(types.PtrSize), a, 0) - } - - return true - } - - //dump("not static", r); - return false -} - // initContext is the context in which static data is populated. // It is either in an init function or in any other function. // Static data populated in an init function will be written either @@ -378,29 +38,9 @@ func (c initContext) String() string { return "inNonInitFunction" } -// from here down is the walk analysis -// of composite literals. -// most of the work is to generate -// data statements for the constant -// part of the composite literal. - -var statuniqgen int // name generator for static temps - -// staticname returns a name backed by a (writable) static data symbol. -// Use readonlystaticname for read-only node. -func staticname(t *types.Type) *ir.Name { - // Don't use lookupN; it interns the resulting string, but these are all unique. - n := typecheck.NewName(typecheck.Lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen))) - statuniqgen++ - typecheck.Declare(n, ir.PEXTERN) - n.SetType(t) - n.Sym().Linksym().Set(obj.AttrLocal, true) - return n -} - // readonlystaticname returns a name backed by a (writable) static data symbol. func readonlystaticname(t *types.Type) *ir.Name { - n := staticname(t) + n := staticinit.StaticName(t) n.MarkReadonly() n.Sym().Linksym().Set(obj.AttrContentAddressable, true) return n @@ -572,7 +212,7 @@ func fixedlit(ctxt initContext, kind initKind, n *ir.CompLitExpr, var_ ir.Node, for _, r := range n.List { a, value := splitnode(r) - if a == ir.BlankNode && !anySideEffects(value) { + if a == ir.BlankNode && !staticinit.AnySideEffects(value) { // Discard. continue } @@ -629,14 +269,14 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) if ctxt == inNonInitFunction { // put everything into static array - vstat := staticname(t) + vstat := staticinit.StaticName(t) fixedlit(ctxt, initKindStatic, n, vstat, init) fixedlit(ctxt, initKindDynamic, n, vstat, init) // copy static to slice var_ = typecheck.AssignExpr(var_) - name, offset, ok := stataddr(var_) + name, offset, ok := staticinit.StaticLoc(var_) if !ok || name.Class_ != ir.PEXTERN { base.Fatalf("slicelit: %v", var_) } @@ -672,7 +312,7 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) if ctxt == inInitFunction { vstat = readonlystaticname(t) } else { - vstat = staticname(t) + vstat = staticinit.StaticName(t) } fixedlit(ctxt, initKindStatic, n, vstat, init) } @@ -993,150 +633,19 @@ func oaslit(n *ir.AssignStmt, init *ir.Nodes) bool { return true } -func getlit(lit ir.Node) int { - if ir.IsSmallIntConst(lit) { - return int(ir.Int64Val(lit)) - } - return -1 -} - -// stataddr returns the static address of n, if n has one, or else nil. -func stataddr(n ir.Node) (name *ir.Name, offset int64, ok bool) { - if n == nil { - return nil, 0, false - } - - switch n.Op() { - case ir.ONAME: - n := n.(*ir.Name) - return n, 0, true - - case ir.OMETHEXPR: - n := n.(*ir.MethodExpr) - return stataddr(n.FuncName()) - - case ir.ODOT: - n := n.(*ir.SelectorExpr) - if name, offset, ok = stataddr(n.X); !ok { - break - } - offset += n.Offset - return name, offset, true - - case ir.OINDEX: - n := n.(*ir.IndexExpr) - if n.X.Type().IsSlice() { - break - } - if name, offset, ok = stataddr(n.X); !ok { - break - } - l := getlit(n.Index) - if l < 0 { - break - } - - // Check for overflow. - if n.Type().Width != 0 && types.MaxWidth/n.Type().Width <= int64(l) { - break - } - offset += int64(l) * n.Type().Width - return name, offset, true - } - - return nil, 0, false -} - -func (s *InitSchedule) initplan(n ir.Node) { - if s.initplans[n] != nil { - return - } - p := new(InitPlan) - s.initplans[n] = p - switch n.Op() { - default: - base.Fatalf("initplan") - - case ir.OARRAYLIT, ir.OSLICELIT: - n := n.(*ir.CompLitExpr) - var k int64 - for _, a := range n.List { - if a.Op() == ir.OKEY { - kv := a.(*ir.KeyExpr) - k = typecheck.IndexConst(kv.Key) - if k < 0 { - base.Fatalf("initplan arraylit: invalid index %v", kv.Key) - } - a = kv.Value - } - s.addvalue(p, k*n.Type().Elem().Width, a) - k++ - } - - case ir.OSTRUCTLIT: - n := n.(*ir.CompLitExpr) - for _, a := range n.List { - if a.Op() != ir.OSTRUCTKEY { - base.Fatalf("initplan structlit") - } - a := a.(*ir.StructKeyExpr) - if a.Field.IsBlank() { - continue - } - s.addvalue(p, a.Offset, a.Value) - } - - case ir.OMAPLIT: - n := n.(*ir.CompLitExpr) - for _, a := range n.List { - if a.Op() != ir.OKEY { - base.Fatalf("initplan maplit") - } - a := a.(*ir.KeyExpr) - s.addvalue(p, -1, a.Value) - } - } -} - -func (s *InitSchedule) addvalue(p *InitPlan, xoffset int64, n ir.Node) { - // special case: zero can be dropped entirely - if ir.IsZero(n) { - return - } - - // special case: inline struct and array (not slice) literals - if isvaluelit(n) { - s.initplan(n) - q := s.initplans[n] - for _, qe := range q.E { - // qe is a copy; we are not modifying entries in q.E - qe.Xoffset += xoffset - p.E = append(p.E, qe) - } - return - } - - // add to plan - p.E = append(p.E, InitEntry{Xoffset: xoffset, Expr: n}) -} - -func isvaluelit(n ir.Node) bool { - return n.Op() == ir.OARRAYLIT || n.Op() == ir.OSTRUCTLIT -} - func genAsStatic(as *ir.AssignStmt) { if as.X.Type() == nil { base.Fatalf("genAsStatic as.Left not typechecked") } - name, offset, ok := stataddr(as.X) + name, offset, ok := staticinit.StaticLoc(as.X) if !ok || (name.Class_ != ir.PEXTERN && as.X != ir.BlankNode) { base.Fatalf("genAsStatic: lhs %v", as.X) } switch r := as.Y; r.Op() { case ir.OLITERAL: - litsym(name, offset, r, int(r.Type().Width)) + staticdata.InitConst(name, offset, r, int(r.Type().Width)) return case ir.OMETHEXPR: r := r.(*ir.MethodExpr) diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/walk/subr.go similarity index 99% rename from src/cmd/compile/internal/gc/subr.go rename to src/cmd/compile/internal/walk/subr.go index 17bbd1c3a2d24..bc65432d4992a 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/walk/subr.go @@ -2,16 +2,17 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package walk import ( + "fmt" + "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/ssagen" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/src" - "fmt" ) // backingArrayPtrLen extracts the pointer and length from a slice or string. diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/walk/switch.go similarity index 99% rename from src/cmd/compile/internal/gc/swt.go rename to src/cmd/compile/internal/walk/switch.go index 9ffa8b67bb277..9becd0e404ff5 100644 --- a/src/cmd/compile/internal/gc/swt.go +++ b/src/cmd/compile/internal/walk/switch.go @@ -2,17 +2,18 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package walk import ( + "go/constant" + "go/token" + "sort" + "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/src" - "go/constant" - "go/token" - "sort" ) // walkswitch walks a switch statement. diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/walk/walk.go similarity index 97% rename from src/cmd/compile/internal/gc/walk.go rename to src/cmd/compile/internal/walk/walk.go index f86dbba2c9824..cb3018a4ac454 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/walk/walk.go @@ -2,9 +2,16 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package walk import ( + "encoding/binary" + "errors" + "fmt" + "go/constant" + "go/token" + "strings" + "cmd/compile/internal/base" "cmd/compile/internal/escape" "cmd/compile/internal/ir" @@ -17,19 +24,13 @@ import ( "cmd/internal/objabi" "cmd/internal/src" "cmd/internal/sys" - "encoding/binary" - "errors" - "fmt" - "go/constant" - "go/token" - "strings" ) // The constant is known to runtime. const tmpstringbufsize = 32 const zeroValSize = 1024 // must match value of runtime/map.go:maxZero -func walk(fn *ir.Func) { +func Walk(fn *ir.Func) { ir.CurFunc = fn errorsBefore := base.Errors() order(fn) @@ -670,7 +671,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { n := n.(*ir.CallExpr) if n.Op() == ir.OCALLINTER { usemethod(n) - markUsedIfaceMethod(n) + reflectdata.MarkUsedIfaceMethod(n) } if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.OCLOSURE { @@ -933,7 +934,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { toType := n.Type() if !fromType.IsInterface() && !ir.IsBlank(ir.CurFunc.Nname) { // skip unnamed functions (func _()) - markTypeUsedInInterface(fromType, ir.CurFunc.LSym) + reflectdata.MarkTypeUsedInInterface(fromType, ir.CurFunc.LSym) } // typeword generates the type word of the interface value. @@ -1708,32 +1709,6 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { // in the presence of type assertions. } -// markTypeUsedInInterface marks that type t is converted to an interface. -// This information is used in the linker in dead method elimination. -func markTypeUsedInInterface(t *types.Type, from *obj.LSym) { - tsym := reflectdata.TypeSym(t).Linksym() - // Emit a marker relocation. The linker will know the type is converted - // to an interface if "from" is reachable. - r := obj.Addrel(from) - r.Sym = tsym - r.Type = objabi.R_USEIFACE -} - -// markUsedIfaceMethod marks that an interface method is used in the current -// function. n is OCALLINTER node. -func markUsedIfaceMethod(n *ir.CallExpr) { - dot := n.X.(*ir.SelectorExpr) - ityp := dot.X.Type() - tsym := reflectdata.TypeSym(ityp).Linksym() - r := obj.Addrel(ir.CurFunc.LSym) - r.Sym = tsym - // dot.Xoffset is the method index * Widthptr (the offset of code pointer - // in itab). - midx := dot.Offset / int64(types.PtrSize) - r.Add = reflectdata.InterfaceMethodOffset(ityp, midx) - r.Type = objabi.R_USEIFACEMETHOD -} - // rtconvfn returns the parameter and result types that will be used by a // runtime function to convert from type src to type dst. The runtime function // name can be derived from the names of the returned types. @@ -3737,94 +3712,6 @@ func usefield(n *ir.SelectorExpr) { ir.CurFunc.FieldTrack[sym] = struct{}{} } -// anySideEffects reports whether n contains any operations that could have observable side effects. -func anySideEffects(n ir.Node) bool { - return ir.Any(n, func(n ir.Node) bool { - switch n.Op() { - // Assume side effects unless we know otherwise. - default: - return true - - // No side effects here (arguments are checked separately). - case ir.ONAME, - ir.ONONAME, - ir.OTYPE, - ir.OPACK, - ir.OLITERAL, - ir.ONIL, - ir.OADD, - ir.OSUB, - ir.OOR, - ir.OXOR, - ir.OADDSTR, - ir.OADDR, - ir.OANDAND, - ir.OBYTES2STR, - ir.ORUNES2STR, - ir.OSTR2BYTES, - ir.OSTR2RUNES, - ir.OCAP, - ir.OCOMPLIT, - ir.OMAPLIT, - ir.OSTRUCTLIT, - ir.OARRAYLIT, - ir.OSLICELIT, - ir.OPTRLIT, - ir.OCONV, - ir.OCONVIFACE, - ir.OCONVNOP, - ir.ODOT, - ir.OEQ, - ir.ONE, - ir.OLT, - ir.OLE, - ir.OGT, - ir.OGE, - ir.OKEY, - ir.OSTRUCTKEY, - ir.OLEN, - ir.OMUL, - ir.OLSH, - ir.ORSH, - ir.OAND, - ir.OANDNOT, - ir.ONEW, - ir.ONOT, - ir.OBITNOT, - ir.OPLUS, - ir.ONEG, - ir.OOROR, - ir.OPAREN, - ir.ORUNESTR, - ir.OREAL, - ir.OIMAG, - ir.OCOMPLEX: - return false - - // Only possible side effect is division by zero. - case ir.ODIV, ir.OMOD: - n := n.(*ir.BinaryExpr) - if n.Y.Op() != ir.OLITERAL || constant.Sign(n.Y.Val()) == 0 { - return true - } - - // Only possible side effect is panic on invalid size, - // but many makechan and makemap use size zero, which is definitely OK. - case ir.OMAKECHAN, ir.OMAKEMAP: - n := n.(*ir.MakeExpr) - if !ir.IsConst(n.Len, constant.Int) || constant.Sign(n.Len.Val()) != 0 { - return true - } - - // Only possible side effect is panic on invalid size. - // TODO(rsc): Merge with previous case (probably breaks toolstash -cmp). - case ir.OMAKESLICE, ir.OMAKESLICECOPY: - return true - } - return false - }) -} - // Rewrite // go builtin(x, y, z) // into From 3f04d964ab05c31a41efa1590a8303376901ab60 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 23 Dec 2020 01:07:07 -0500 Subject: [PATCH 239/474] [dev.regabi] cmd/compile: split up walkexpr1, walkstmt [generated] walkexpr1 is the second largest non-machine-generated function in the compiler. weighing in at 1,164 lines. Since we are destroying the git blame history anyway, now is a good time to split each different case into its own function, making future work on this function more manageable. Do the same to walkstmt too for consistency, even though it is a paltry 259 lines. [git-generate] cd src/cmd/compile/internal/walk rf ' mv addstr walkAddString mv walkCall walkCall1 mv walkpartialcall walkCallPart mv walkclosure walkClosure mv walkrange walkRange mv walkselect walkSelect mv walkselectcases walkSelectCases mv walkswitch walkSwitch mv walkExprSwitch walkSwitchExpr mv walkTypeSwitch walkSwitchType mv walkstmt walkStmt mv walkstmtlist walkStmtList mv walkexprlist walkExprList mv walkexprlistsafe walkExprListSafe mv walkexprlistcheap walkExprListCheap mv walkexpr walkExpr mv walkexpr1 walkExpr1 mv walkprint walkPrint mv walkappend walkAppend mv walkcompare walkCompare mv walkcompareInterface walkCompareInterface mv walkcompareString walkCompareString mv appendslice appendSlice mv cheapexpr cheapExpr mv copyany walkCopy mv copyexpr copyExpr mv eqfor eqFor mv extendslice extendSlice mv finishcompare finishCompare mv safeexpr safeExpr mv walkStmt:/^\tcase ir.ORECV:/+2,/^\tcase /-2 walkRecv add walk.go:/^func walkRecv/-0 \ // walkRecv walks an ORECV node. mv walkStmt:/^\tcase ir.ODCL:/+2,/^\tcase /-2 walkDecl add walk.go:/^func walkDecl/-0 \ // walkDecl walks an ODCL node. mv walkStmt:/^\tcase ir.OGO:/+2,/^\tcase /-2 walkGoDefer add walk.go:/^func walkGoDefer/-0 \ // walkGoDefer walks an OGO or ODEFER node. mv walkStmt:/^\tcase ir.OFOR,/+2,/^\tcase /-2 walkFor add walk.go:/^func walkFor/-0 \ // walkFor walks an OFOR or OFORUNTIL node. mv walkStmt:/^\tcase ir.OIF:/+2,/^\tcase /-2 walkIf add walk.go:/^func walkIf/-0 \ // walkIf walks an OIF node. mv walkStmt:/^\tcase ir.ORETURN:/+2,/^\tcase /-2 walkReturn add walk.go:/^func walkReturn/-0 \ // walkReturn walks an ORETURN node. mv walkExpr1:/^\tcase ir.ODOT,/+2,/^\tcase /-2 walkDot add walk.go:/^func walkDot/-0 \ // walkDot walks an ODOT or ODOTPTR node. mv walkExpr1:/^\tcase ir.ODOTTYPE,/+2,/^\tcase /-2 walkDotType add walk.go:/^func walkDotType/-0 \ // walkDotType walks an ODOTTYPE or ODOTTYPE2 node. mv walkExpr1:/^\tcase ir.OLEN,/+2,/^\tcase /-2 walkLenCap add walk.go:/^func walkLenCap/-0 \ // walkLenCap walks an OLEN or OCAP node. mv walkExpr1:/^\tcase ir.OANDAND,/+2,/^\tcase /-2 walkLogical add walk.go:/^func walkLogical/-0 \ // walkLogical walks an OANDAND or OOROR node. mv walkExpr1:/^\tcase ir.OCALLINTER,/+2,/^\tcase /-2 walkCall add walk.go:/^func walkCall/-0 \ // walkCall walks an OCALLFUNC, OCALLINTER, or OCALLMETH node. mv walkExpr1:/^\tcase ir.OAS,/+1,/^\tcase /-2 walkAssign add walk.go:/^func walkAssign/-0 \ // walkAssign walks an OAS (AssignExpr) or OASOP (AssignOpExpr) node. mv walkExpr1:/^\tcase ir.OAS2:/+2,/^\tcase /-3 walkAssignList add walk.go:/^func walkAssignList/-0 \ // walkAssignList walks an OAS2 node. mv walkExpr1:/^\tcase ir.OAS2FUNC:/+2,/^\tcase /-4 walkAssignFunc add walk.go:/^func walkAssignFunc/-0 \ // walkAssignFunc walks an OAS2FUNC node. mv walkExpr1:/^\tcase ir.OAS2RECV:/+2,/^\tcase /-3 walkAssignRecv add walk.go:/^func walkAssignRecv/-0 \ // walkAssignRecv walks an OAS2RECV node. mv walkExpr1:/^\tcase ir.OAS2MAPR:/+2,/^\tcase /-2 walkAssignMapRead add walk.go:/^func walkAssignMapRead/-0 \ // walkAssignMapRead walks an OAS2MAPR node. mv walkExpr1:/^\tcase ir.ODELETE:/+2,/^\tcase /-2 walkDelete add walk.go:/^func walkDelete/-0 \ // walkDelete walks an ODELETE node. mv walkExpr1:/^\tcase ir.OAS2DOTTYPE:/+2,/^\tcase /-2 walkAssignDotType add walk.go:/^func walkAssignDotType/-0 \ // walkAssignDotType walks an OAS2DOTTYPE node. mv walkExpr1:/^\tcase ir.OCONVIFACE:/+2,/^\tcase /-2 walkConvInterface add walk.go:/^func walkConvInterface/-0 \ // walkConvInterface walks an OCONVIFACE node. mv walkExpr1:/^\tcase ir.OCONV,/+2,/^\tcase /-2 walkConv add walk.go:/^func walkConv/-0 \ // walkConv walks an OCONV or OCONVNOP (but not OCONVIFACE) node. mv walkExpr1:/^\tcase ir.ODIV,/+2,/^\tcase /-2 walkDivMod add walk.go:/^func walkDivMod/-0 \ // walkDivMod walks an ODIV or OMOD node. mv walkExpr1:/^\tcase ir.OINDEX:/+2,/^\tcase /-2 walkIndex add walk.go:/^func walkIndex/-0 \ // walkIndex walks an OINDEX node. # move type assertion above comment mv walkExpr1:/^\tcase ir.OINDEXMAP:/+/n := n/-+ walkExpr1:/^\tcase ir.OINDEXMAP:/+0 mv walkExpr1:/^\tcase ir.OINDEXMAP:/+2,/^\tcase /-2 walkIndexMap add walk.go:/^func walkIndexMap/-0 \ // walkIndexMap walks an OINDEXMAP node. mv walkExpr1:/^\tcase ir.OSLICEHEADER:/+2,/^\tcase /-2 walkSliceHeader add walk.go:/^func walkSliceHeader/-0 \ // walkSliceHeader walks an OSLICEHEADER node. mv walkExpr1:/^\tcase ir.OSLICE,/+2,/^\tcase /-2 walkSlice add walk.go:/^func walkSlice/-0 \ // walkSlice walks an OSLICE, OSLICEARR, OSLICESTR, OSLICE3, or OSLICE3ARR node. mv walkExpr1:/^\tcase ir.ONEW:/+2,/^\tcase /-2 walkNew add walk.go:/^func walkNew/-0 \ // walkNew walks an ONEW node. # move type assertion above comment mv walkExpr1:/^\tcase ir.OCLOSE:/+/n := n/-+ walkExpr1:/^\tcase ir.OCLOSE:/+0 mv walkExpr1:/^\tcase ir.OCLOSE:/+2,/^\tcase /-2 walkClose add walk.go:/^func walkClose/-0 \ // walkClose walks an OCLOSE node. # move type assertion above comment mv walkExpr1:/^\tcase ir.OMAKECHAN:/+/n := n/-+ walkExpr1:/^\tcase ir.OMAKECHAN:/+0 mv walkExpr1:/^\tcase ir.OMAKECHAN:/+2,/^\tcase /-2 walkMakeChan add walk.go:/^func walkMakeChan/-0 \ // walkMakeChan walks an OMAKECHAN node. mv walkExpr1:/^\tcase ir.OMAKEMAP:/+2,/^\tcase /-2 walkMakeMap add walk.go:/^func walkMakeMap/-0 \ // walkMakeMap walks an OMAKEMAP node. mv walkExpr1:/^\tcase ir.OMAKESLICE:/+2,/^\tcase /-2 walkMakeSlice add walk.go:/^func walkMakeSlice/-0 \ // walkMakeSlice walks an OMAKESLICE node. mv walkExpr1:/^\tcase ir.OMAKESLICECOPY:/+2,/^\tcase /-2 walkMakeSliceCopy add walk.go:/^func walkMakeSliceCopy/-0 \ // walkMakeSliceCopy walks an OMAKESLICECOPY node. mv walkExpr1:/^\tcase ir.ORUNESTR:/+2,/^\tcase /-2 walkRuneToString add walk.go:/^func walkRuneToString/-0 \ // walkRuneToString walks an ORUNESTR node. mv walkExpr1:/^\tcase ir.OBYTES2STR,/+2,/^\tcase /-2 walkBytesRunesToString add walk.go:/^func walkBytesRunesToString/-0 \ // walkBytesRunesToString walks an OBYTES2STR or ORUNES2STR node. mv walkExpr1:/^\tcase ir.OBYTES2STRTMP:/+2,/^\tcase /-2 walkBytesToStringTemp add walk.go:/^func walkBytesToStringTemp/-0 \ // walkBytesToStringTemp walks an OBYTES2STRTMP node. mv walkExpr1:/^\tcase ir.OSTR2BYTES:/+2,/^\tcase /-2 walkStringToBytes add walk.go:/^func walkStringToBytes/-0 \ // walkStringToBytes walks an OSTR2BYTES node. # move type assertion above comment mv walkExpr1:/^\tcase ir.OSTR2BYTESTMP:/+/n := n/-+ walkExpr1:/^\tcase ir.OSTR2BYTESTMP:/+0 mv walkExpr1:/^\tcase ir.OSTR2BYTESTMP:/+2,/^\tcase /-2 walkStringToBytesTemp add walk.go:/^func walkStringToBytesTemp/-0 \ // walkStringToBytesTemp walks an OSTR2BYTESTMP node. mv walkExpr1:/^\tcase ir.OSTR2RUNES:/+2,/^\tcase /-2 walkStringToRunes add walk.go:/^func walkStringToRunes/-0 \ // walkStringToRunes walks an OSTR2RUNES node. mv walkExpr1:/^\tcase ir.OARRAYLIT,/+1,/^\tcase /-2 walkCompLit add walk.go:/^func walkCompLit/-0 \ // walkCompLit walks a composite literal node: \ // OARRAYLIT, OSLICELIT, OMAPLIT, OSTRUCTLIT (all CompLitExpr), or OPTRLIT (AddrExpr). mv walkExpr1:/^\tcase ir.OSEND:/+2,/^\tcase /-2 walkSend add walk.go:/^func walkSend/-0 \ // walkSend walks an OSEND node. mv walkStmt walkStmtList \ walkDecl \ walkFor \ walkGoDefer \ walkIf \ wrapCall \ stmt.go mv walkExpr walkExpr1 walkExprList walkExprListCheap walkExprListSafe \ cheapExpr safeExpr copyExpr \ walkAddString \ walkCall \ walkCall1 \ walkDivMod \ walkDot \ walkDotType \ walkIndex \ walkIndexMap \ walkLogical \ walkSend \ walkSlice \ walkSliceHeader \ reduceSlice \ bounded \ usemethod \ usefield \ expr.go mv \ walkAssign \ walkAssignDotType \ walkAssignFunc \ walkAssignList \ walkAssignMapRead \ walkAssignRecv \ walkReturn \ fncall \ ascompatee \ ascompatee1 \ ascompatet \ reorder3 \ reorder3save \ aliased \ anyAddrTaken \ refersToName \ refersToCommonName \ appendSlice \ isAppendOfMake \ extendSlice \ assign.go mv \ walkCompare \ walkCompareInterface \ walkCompareString \ finishCompare \ eqFor \ brcom \ brrev \ tracecmpArg \ canMergeLoads \ compare.go mv \ walkConv \ walkConvInterface \ walkBytesRunesToString \ walkBytesToStringTemp \ walkRuneToString \ walkStringToBytes \ walkStringToBytesTemp \ walkStringToRunes \ convFuncName \ rtconvfn \ byteindex \ walkCheckPtrAlignment \ walkCheckPtrArithmetic \ convert.go mv \ walkAppend \ walkClose \ walkCopy \ walkDelete \ walkLenCap \ walkMakeChan \ walkMakeMap \ walkMakeSlice \ walkMakeSliceCopy \ walkNew \ walkPrint \ badtype \ callnew \ writebarrierfn \ isRuneCount \ builtin.go mv \ walkCompLit \ sinit.go \ complit.go mv subr.go walk.go ' Change-Id: Ie0cf3ba4adf363c120c134d57cb7ef37934eaab9 Reviewed-on: https://go-review.googlesource.com/c/go/+/279430 Trust: Russ Cox Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/walk/assign.go | 920 ++++ src/cmd/compile/internal/walk/builtin.go | 699 +++ src/cmd/compile/internal/walk/closure.go | 12 +- src/cmd/compile/internal/walk/compare.go | 507 ++ .../internal/walk/{sinit.go => complit.go} | 23 +- src/cmd/compile/internal/walk/convert.go | 502 ++ src/cmd/compile/internal/walk/expr.go | 1009 ++++ src/cmd/compile/internal/walk/range.go | 10 +- src/cmd/compile/internal/walk/select.go | 8 +- src/cmd/compile/internal/walk/stmt.go | 315 ++ src/cmd/compile/internal/walk/subr.go | 338 -- src/cmd/compile/internal/walk/switch.go | 30 +- src/cmd/compile/internal/walk/walk.go | 4080 ++--------------- 13 files changed, 4364 insertions(+), 4089 deletions(-) create mode 100644 src/cmd/compile/internal/walk/assign.go create mode 100644 src/cmd/compile/internal/walk/builtin.go create mode 100644 src/cmd/compile/internal/walk/compare.go rename src/cmd/compile/internal/walk/{sinit.go => complit.go} (96%) create mode 100644 src/cmd/compile/internal/walk/convert.go create mode 100644 src/cmd/compile/internal/walk/expr.go create mode 100644 src/cmd/compile/internal/walk/stmt.go delete mode 100644 src/cmd/compile/internal/walk/subr.go diff --git a/src/cmd/compile/internal/walk/assign.go b/src/cmd/compile/internal/walk/assign.go new file mode 100644 index 0000000000000..6b0e2b272c730 --- /dev/null +++ b/src/cmd/compile/internal/walk/assign.go @@ -0,0 +1,920 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package walk + +import ( + "go/constant" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/reflectdata" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/src" +) + +// walkAssign walks an OAS (AssignExpr) or OASOP (AssignOpExpr) node. +func walkAssign(init *ir.Nodes, n ir.Node) ir.Node { + init.Append(n.PtrInit().Take()...) + + var left, right ir.Node + switch n.Op() { + case ir.OAS: + n := n.(*ir.AssignStmt) + left, right = n.X, n.Y + case ir.OASOP: + n := n.(*ir.AssignOpStmt) + left, right = n.X, n.Y + } + + // Recognize m[k] = append(m[k], ...) so we can reuse + // the mapassign call. + var mapAppend *ir.CallExpr + if left.Op() == ir.OINDEXMAP && right.Op() == ir.OAPPEND { + left := left.(*ir.IndexExpr) + mapAppend = right.(*ir.CallExpr) + if !ir.SameSafeExpr(left, mapAppend.Args[0]) { + base.Fatalf("not same expressions: %v != %v", left, mapAppend.Args[0]) + } + } + + left = walkExpr(left, init) + left = safeExpr(left, init) + if mapAppend != nil { + mapAppend.Args[0] = left + } + + if n.Op() == ir.OASOP { + // Rewrite x op= y into x = x op y. + n = ir.NewAssignStmt(base.Pos, left, typecheck.Expr(ir.NewBinaryExpr(base.Pos, n.(*ir.AssignOpStmt).AsOp, left, right))) + } else { + n.(*ir.AssignStmt).X = left + } + as := n.(*ir.AssignStmt) + + if oaslit(as, init) { + return ir.NewBlockStmt(as.Pos(), nil) + } + + if as.Y == nil { + // TODO(austin): Check all "implicit zeroing" + return as + } + + if !base.Flag.Cfg.Instrumenting && ir.IsZero(as.Y) { + return as + } + + switch as.Y.Op() { + default: + as.Y = walkExpr(as.Y, init) + + case ir.ORECV: + // x = <-c; as.Left is x, as.Right.Left is c. + // order.stmt made sure x is addressable. + recv := as.Y.(*ir.UnaryExpr) + recv.X = walkExpr(recv.X, init) + + n1 := typecheck.NodAddr(as.X) + r := recv.X // the channel + return mkcall1(chanfn("chanrecv1", 2, r.Type()), nil, init, r, n1) + + case ir.OAPPEND: + // x = append(...) + call := as.Y.(*ir.CallExpr) + if call.Type().Elem().NotInHeap() { + base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", call.Type().Elem()) + } + var r ir.Node + switch { + case isAppendOfMake(call): + // x = append(y, make([]T, y)...) + r = extendSlice(call, init) + case call.IsDDD: + r = appendSlice(call, init) // also works for append(slice, string). + default: + r = walkAppend(call, init, as) + } + as.Y = r + if r.Op() == ir.OAPPEND { + // Left in place for back end. + // Do not add a new write barrier. + // Set up address of type for back end. + r.(*ir.CallExpr).X = reflectdata.TypePtr(r.Type().Elem()) + return as + } + // Otherwise, lowered for race detector. + // Treat as ordinary assignment. + } + + if as.X != nil && as.Y != nil { + return convas(as, init) + } + return as +} + +// walkAssignDotType walks an OAS2DOTTYPE node. +func walkAssignDotType(n *ir.AssignListStmt, init *ir.Nodes) ir.Node { + walkExprListSafe(n.Lhs, init) + n.Rhs[0] = walkExpr(n.Rhs[0], init) + return n +} + +// walkAssignFunc walks an OAS2FUNC node. +func walkAssignFunc(init *ir.Nodes, n *ir.AssignListStmt) ir.Node { + init.Append(n.PtrInit().Take()...) + + r := n.Rhs[0] + walkExprListSafe(n.Lhs, init) + r = walkExpr(r, init) + + if ir.IsIntrinsicCall(r.(*ir.CallExpr)) { + n.Rhs = []ir.Node{r} + return n + } + init.Append(r) + + ll := ascompatet(n.Lhs, r.Type()) + return ir.NewBlockStmt(src.NoXPos, ll) +} + +// walkAssignList walks an OAS2 node. +func walkAssignList(init *ir.Nodes, n *ir.AssignListStmt) ir.Node { + init.Append(n.PtrInit().Take()...) + walkExprListSafe(n.Lhs, init) + walkExprListSafe(n.Rhs, init) + return ir.NewBlockStmt(src.NoXPos, ascompatee(ir.OAS, n.Lhs, n.Rhs, init)) +} + +// walkAssignMapRead walks an OAS2MAPR node. +func walkAssignMapRead(init *ir.Nodes, n *ir.AssignListStmt) ir.Node { + init.Append(n.PtrInit().Take()...) + + r := n.Rhs[0].(*ir.IndexExpr) + walkExprListSafe(n.Lhs, init) + r.X = walkExpr(r.X, init) + r.Index = walkExpr(r.Index, init) + t := r.X.Type() + + fast := mapfast(t) + var key ir.Node + if fast != mapslow { + // fast versions take key by value + key = r.Index + } else { + // standard version takes key by reference + // order.expr made sure key is addressable. + key = typecheck.NodAddr(r.Index) + } + + // from: + // a,b = m[i] + // to: + // var,b = mapaccess2*(t, m, i) + // a = *var + a := n.Lhs[0] + + var call *ir.CallExpr + if w := t.Elem().Width; w <= zeroValSize { + fn := mapfn(mapaccess2[fast], t) + call = mkcall1(fn, fn.Type().Results(), init, reflectdata.TypePtr(t), r.X, key) + } else { + fn := mapfn("mapaccess2_fat", t) + z := reflectdata.ZeroAddr(w) + call = mkcall1(fn, fn.Type().Results(), init, reflectdata.TypePtr(t), r.X, key, z) + } + + // mapaccess2* returns a typed bool, but due to spec changes, + // the boolean result of i.(T) is now untyped so we make it the + // same type as the variable on the lhs. + if ok := n.Lhs[1]; !ir.IsBlank(ok) && ok.Type().IsBoolean() { + call.Type().Field(1).Type = ok.Type() + } + n.Rhs = []ir.Node{call} + n.SetOp(ir.OAS2FUNC) + + // don't generate a = *var if a is _ + if ir.IsBlank(a) { + return walkExpr(typecheck.Stmt(n), init) + } + + var_ := typecheck.Temp(types.NewPtr(t.Elem())) + var_.SetTypecheck(1) + var_.MarkNonNil() // mapaccess always returns a non-nil pointer + + n.Lhs[0] = var_ + init.Append(walkExpr(n, init)) + + as := ir.NewAssignStmt(base.Pos, a, ir.NewStarExpr(base.Pos, var_)) + return walkExpr(typecheck.Stmt(as), init) +} + +// walkAssignRecv walks an OAS2RECV node. +func walkAssignRecv(init *ir.Nodes, n *ir.AssignListStmt) ir.Node { + init.Append(n.PtrInit().Take()...) + + r := n.Rhs[0].(*ir.UnaryExpr) // recv + walkExprListSafe(n.Lhs, init) + r.X = walkExpr(r.X, init) + var n1 ir.Node + if ir.IsBlank(n.Lhs[0]) { + n1 = typecheck.NodNil() + } else { + n1 = typecheck.NodAddr(n.Lhs[0]) + } + fn := chanfn("chanrecv2", 2, r.X.Type()) + ok := n.Lhs[1] + call := mkcall1(fn, types.Types[types.TBOOL], init, r.X, n1) + return typecheck.Stmt(ir.NewAssignStmt(base.Pos, ok, call)) +} + +// walkReturn walks an ORETURN node. +func walkReturn(n *ir.ReturnStmt) ir.Node { + ir.CurFunc.NumReturns++ + if len(n.Results) == 0 { + return n + } + if (ir.HasNamedResults(ir.CurFunc) && len(n.Results) > 1) || paramoutheap(ir.CurFunc) { + // assign to the function out parameters, + // so that ascompatee can fix up conflicts + var rl []ir.Node + + for _, ln := range ir.CurFunc.Dcl { + cl := ln.Class_ + if cl == ir.PAUTO || cl == ir.PAUTOHEAP { + break + } + if cl == ir.PPARAMOUT { + var ln ir.Node = ln + if ir.IsParamStackCopy(ln) { + ln = walkExpr(typecheck.Expr(ir.NewStarExpr(base.Pos, ln.Name().Heapaddr)), nil) + } + rl = append(rl, ln) + } + } + + if got, want := len(n.Results), len(rl); got != want { + // order should have rewritten multi-value function calls + // with explicit OAS2FUNC nodes. + base.Fatalf("expected %v return arguments, have %v", want, got) + } + + // move function calls out, to make ascompatee's job easier. + walkExprListSafe(n.Results, n.PtrInit()) + + n.Results.Set(ascompatee(n.Op(), rl, n.Results, n.PtrInit())) + return n + } + walkExprList(n.Results, n.PtrInit()) + + // For each return parameter (lhs), assign the corresponding result (rhs). + lhs := ir.CurFunc.Type().Results() + rhs := n.Results + res := make([]ir.Node, lhs.NumFields()) + for i, nl := range lhs.FieldSlice() { + nname := ir.AsNode(nl.Nname) + if ir.IsParamHeapCopy(nname) { + nname = nname.Name().Stackcopy + } + a := ir.NewAssignStmt(base.Pos, nname, rhs[i]) + res[i] = convas(a, n.PtrInit()) + } + n.Results.Set(res) + return n +} + +// fncall reports whether assigning an rvalue of type rt to an lvalue l might involve a function call. +func fncall(l ir.Node, rt *types.Type) bool { + if l.HasCall() || l.Op() == ir.OINDEXMAP { + return true + } + if types.Identical(l.Type(), rt) { + return false + } + // There might be a conversion required, which might involve a runtime call. + return true +} + +func ascompatee(op ir.Op, nl, nr []ir.Node, init *ir.Nodes) []ir.Node { + // check assign expression list to + // an expression list. called in + // expr-list = expr-list + + // ensure order of evaluation for function calls + for i := range nl { + nl[i] = safeExpr(nl[i], init) + } + for i1 := range nr { + nr[i1] = safeExpr(nr[i1], init) + } + + var nn []*ir.AssignStmt + i := 0 + for ; i < len(nl); i++ { + if i >= len(nr) { + break + } + // Do not generate 'x = x' during return. See issue 4014. + if op == ir.ORETURN && ir.SameSafeExpr(nl[i], nr[i]) { + continue + } + nn = append(nn, ascompatee1(nl[i], nr[i], init)) + } + + // cannot happen: caller checked that lists had same length + if i < len(nl) || i < len(nr) { + var nln, nrn ir.Nodes + nln.Set(nl) + nrn.Set(nr) + base.Fatalf("error in shape across %+v %v %+v / %d %d [%s]", nln, op, nrn, len(nl), len(nr), ir.FuncName(ir.CurFunc)) + } + return reorder3(nn) +} + +func ascompatee1(l ir.Node, r ir.Node, init *ir.Nodes) *ir.AssignStmt { + // convas will turn map assigns into function calls, + // making it impossible for reorder3 to work. + n := ir.NewAssignStmt(base.Pos, l, r) + + if l.Op() == ir.OINDEXMAP { + return n + } + + return convas(n, init) +} + +// check assign type list to +// an expression list. called in +// expr-list = func() +func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node { + if len(nl) != nr.NumFields() { + base.Fatalf("ascompatet: assignment count mismatch: %d = %d", len(nl), nr.NumFields()) + } + + var nn, mm ir.Nodes + for i, l := range nl { + if ir.IsBlank(l) { + continue + } + r := nr.Field(i) + + // Any assignment to an lvalue that might cause a function call must be + // deferred until all the returned values have been read. + if fncall(l, r.Type) { + tmp := ir.Node(typecheck.Temp(r.Type)) + tmp = typecheck.Expr(tmp) + a := convas(ir.NewAssignStmt(base.Pos, l, tmp), &mm) + mm.Append(a) + l = tmp + } + + res := ir.NewResultExpr(base.Pos, nil, types.BADWIDTH) + res.Offset = base.Ctxt.FixedFrameSize() + r.Offset + res.SetType(r.Type) + res.SetTypecheck(1) + + a := convas(ir.NewAssignStmt(base.Pos, l, res), &nn) + updateHasCall(a) + if a.HasCall() { + ir.Dump("ascompatet ucount", a) + base.Fatalf("ascompatet: too many function calls evaluating parameters") + } + + nn.Append(a) + } + return append(nn, mm...) +} + +// reorder3 +// from ascompatee +// a,b = c,d +// simultaneous assignment. there cannot +// be later use of an earlier lvalue. +// +// function calls have been removed. +func reorder3(all []*ir.AssignStmt) []ir.Node { + // If a needed expression may be affected by an + // earlier assignment, make an early copy of that + // expression and use the copy instead. + var early []ir.Node + + var mapinit ir.Nodes + for i, n := range all { + l := n.X + + // Save subexpressions needed on left side. + // Drill through non-dereferences. + for { + switch ll := l; ll.Op() { + case ir.ODOT: + ll := ll.(*ir.SelectorExpr) + l = ll.X + continue + case ir.OPAREN: + ll := ll.(*ir.ParenExpr) + l = ll.X + continue + case ir.OINDEX: + ll := ll.(*ir.IndexExpr) + if ll.X.Type().IsArray() { + ll.Index = reorder3save(ll.Index, all, i, &early) + l = ll.X + continue + } + } + break + } + + switch l.Op() { + default: + base.Fatalf("reorder3 unexpected lvalue %v", l.Op()) + + case ir.ONAME: + break + + case ir.OINDEX, ir.OINDEXMAP: + l := l.(*ir.IndexExpr) + l.X = reorder3save(l.X, all, i, &early) + l.Index = reorder3save(l.Index, all, i, &early) + if l.Op() == ir.OINDEXMAP { + all[i] = convas(all[i], &mapinit) + } + + case ir.ODEREF: + l := l.(*ir.StarExpr) + l.X = reorder3save(l.X, all, i, &early) + case ir.ODOTPTR: + l := l.(*ir.SelectorExpr) + l.X = reorder3save(l.X, all, i, &early) + } + + // Save expression on right side. + all[i].Y = reorder3save(all[i].Y, all, i, &early) + } + + early = append(mapinit, early...) + for _, as := range all { + early = append(early, as) + } + return early +} + +// if the evaluation of *np would be affected by the +// assignments in all up to but not including the ith assignment, +// copy into a temporary during *early and +// replace *np with that temp. +// The result of reorder3save MUST be assigned back to n, e.g. +// n.Left = reorder3save(n.Left, all, i, early) +func reorder3save(n ir.Node, all []*ir.AssignStmt, i int, early *[]ir.Node) ir.Node { + if !aliased(n, all[:i]) { + return n + } + + q := ir.Node(typecheck.Temp(n.Type())) + as := typecheck.Stmt(ir.NewAssignStmt(base.Pos, q, n)) + *early = append(*early, as) + return q +} + +// Is it possible that the computation of r might be +// affected by assignments in all? +func aliased(r ir.Node, all []*ir.AssignStmt) bool { + if r == nil { + return false + } + + // Treat all fields of a struct as referring to the whole struct. + // We could do better but we would have to keep track of the fields. + for r.Op() == ir.ODOT { + r = r.(*ir.SelectorExpr).X + } + + // Look for obvious aliasing: a variable being assigned + // during the all list and appearing in n. + // Also record whether there are any writes to addressable + // memory (either main memory or variables whose addresses + // have been taken). + memwrite := false + for _, as := range all { + // We can ignore assignments to blank. + if ir.IsBlank(as.X) { + continue + } + + lv := ir.OuterValue(as.X) + if lv.Op() != ir.ONAME { + memwrite = true + continue + } + l := lv.(*ir.Name) + + switch l.Class_ { + default: + base.Fatalf("unexpected class: %v, %v", l, l.Class_) + + case ir.PAUTOHEAP, ir.PEXTERN: + memwrite = true + continue + + case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT: + if l.Name().Addrtaken() { + memwrite = true + continue + } + + if refersToName(l, r) { + // Direct hit: l appears in r. + return true + } + } + } + + // The variables being written do not appear in r. + // However, r might refer to computed addresses + // that are being written. + + // If no computed addresses are affected by the writes, no aliasing. + if !memwrite { + return false + } + + // If r does not refer to any variables whose addresses have been taken, + // then the only possible writes to r would be directly to the variables, + // and we checked those above, so no aliasing problems. + if !anyAddrTaken(r) { + return false + } + + // Otherwise, both the writes and r refer to computed memory addresses. + // Assume that they might conflict. + return true +} + +// anyAddrTaken reports whether the evaluation n, +// which appears on the left side of an assignment, +// may refer to variables whose addresses have been taken. +func anyAddrTaken(n ir.Node) bool { + return ir.Any(n, func(n ir.Node) bool { + switch n.Op() { + case ir.ONAME: + n := n.(*ir.Name) + return n.Class_ == ir.PEXTERN || n.Class_ == ir.PAUTOHEAP || n.Name().Addrtaken() + + case ir.ODOT: // but not ODOTPTR - should have been handled in aliased. + base.Fatalf("anyAddrTaken unexpected ODOT") + + case ir.OADD, + ir.OAND, + ir.OANDAND, + ir.OANDNOT, + ir.OBITNOT, + ir.OCONV, + ir.OCONVIFACE, + ir.OCONVNOP, + ir.ODIV, + ir.ODOTTYPE, + ir.OLITERAL, + ir.OLSH, + ir.OMOD, + ir.OMUL, + ir.ONEG, + ir.ONIL, + ir.OOR, + ir.OOROR, + ir.OPAREN, + ir.OPLUS, + ir.ORSH, + ir.OSUB, + ir.OXOR: + return false + } + // Be conservative. + return true + }) +} + +// refersToName reports whether r refers to name. +func refersToName(name *ir.Name, r ir.Node) bool { + return ir.Any(r, func(r ir.Node) bool { + return r.Op() == ir.ONAME && r == name + }) +} + +// refersToCommonName reports whether any name +// appears in common between l and r. +// This is called from sinit.go. +func refersToCommonName(l ir.Node, r ir.Node) bool { + if l == nil || r == nil { + return false + } + + // This could be written elegantly as a Find nested inside a Find: + // + // found := ir.Find(l, func(l ir.Node) interface{} { + // if l.Op() == ir.ONAME { + // return ir.Find(r, func(r ir.Node) interface{} { + // if r.Op() == ir.ONAME && l.Name() == r.Name() { + // return r + // } + // return nil + // }) + // } + // return nil + // }) + // return found != nil + // + // But that would allocate a new closure for the inner Find + // for each name found on the left side. + // It may not matter at all, but the below way of writing it + // only allocates two closures, not O(|L|) closures. + + var doL, doR func(ir.Node) error + var targetL *ir.Name + doR = func(r ir.Node) error { + if r.Op() == ir.ONAME && r.Name() == targetL { + return stop + } + return ir.DoChildren(r, doR) + } + doL = func(l ir.Node) error { + if l.Op() == ir.ONAME { + l := l.(*ir.Name) + targetL = l.Name() + if doR(r) == stop { + return stop + } + } + return ir.DoChildren(l, doL) + } + return doL(l) == stop +} + +// expand append(l1, l2...) to +// init { +// s := l1 +// n := len(s) + len(l2) +// // Compare as uint so growslice can panic on overflow. +// if uint(n) > uint(cap(s)) { +// s = growslice(s, n) +// } +// s = s[:n] +// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T)) +// } +// s +// +// l2 is allowed to be a string. +func appendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node { + walkAppendArgs(n, init) + + l1 := n.Args[0] + l2 := n.Args[1] + l2 = cheapExpr(l2, init) + n.Args[1] = l2 + + var nodes ir.Nodes + + // var s []T + s := typecheck.Temp(l1.Type()) + nodes.Append(ir.NewAssignStmt(base.Pos, s, l1)) // s = l1 + + elemtype := s.Type().Elem() + + // n := len(s) + len(l2) + nn := typecheck.Temp(types.Types[types.TINT]) + nodes.Append(ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, s), ir.NewUnaryExpr(base.Pos, ir.OLEN, l2)))) + + // if uint(n) > uint(cap(s)) + nif := ir.NewIfStmt(base.Pos, nil, nil, nil) + nuint := typecheck.Conv(nn, types.Types[types.TUINT]) + scapuint := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OCAP, s), types.Types[types.TUINT]) + nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OGT, nuint, scapuint) + + // instantiate growslice(typ *type, []any, int) []any + fn := typecheck.LookupRuntime("growslice") + fn = typecheck.SubstArgTypes(fn, elemtype, elemtype) + + // s = growslice(T, s, n) + nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), reflectdata.TypePtr(elemtype), s, nn))} + nodes.Append(nif) + + // s = s[:n] + nt := ir.NewSliceExpr(base.Pos, ir.OSLICE, s) + nt.SetSliceBounds(nil, nn, nil) + nt.SetBounded(true) + nodes.Append(ir.NewAssignStmt(base.Pos, s, nt)) + + var ncopy ir.Node + if elemtype.HasPointers() { + // copy(s[len(l1):], l2) + slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, s) + slice.SetType(s.Type()) + slice.SetSliceBounds(ir.NewUnaryExpr(base.Pos, ir.OLEN, l1), nil, nil) + + ir.CurFunc.SetWBPos(n.Pos()) + + // instantiate typedslicecopy(typ *type, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int + fn := typecheck.LookupRuntime("typedslicecopy") + fn = typecheck.SubstArgTypes(fn, l1.Type().Elem(), l2.Type().Elem()) + ptr1, len1 := backingArrayPtrLen(cheapExpr(slice, &nodes)) + ptr2, len2 := backingArrayPtrLen(l2) + ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, reflectdata.TypePtr(elemtype), ptr1, len1, ptr2, len2) + } else if base.Flag.Cfg.Instrumenting && !base.Flag.CompilingRuntime { + // rely on runtime to instrument: + // copy(s[len(l1):], l2) + // l2 can be a slice or string. + slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, s) + slice.SetType(s.Type()) + slice.SetSliceBounds(ir.NewUnaryExpr(base.Pos, ir.OLEN, l1), nil, nil) + + ptr1, len1 := backingArrayPtrLen(cheapExpr(slice, &nodes)) + ptr2, len2 := backingArrayPtrLen(l2) + + fn := typecheck.LookupRuntime("slicecopy") + fn = typecheck.SubstArgTypes(fn, ptr1.Type().Elem(), ptr2.Type().Elem()) + ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, ptr1, len1, ptr2, len2, ir.NewInt(elemtype.Width)) + } else { + // memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T)) + ix := ir.NewIndexExpr(base.Pos, s, ir.NewUnaryExpr(base.Pos, ir.OLEN, l1)) + ix.SetBounded(true) + addr := typecheck.NodAddr(ix) + + sptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, l2) + + nwid := cheapExpr(typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, l2), types.Types[types.TUINTPTR]), &nodes) + nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, ir.NewInt(elemtype.Width)) + + // instantiate func memmove(to *any, frm *any, length uintptr) + fn := typecheck.LookupRuntime("memmove") + fn = typecheck.SubstArgTypes(fn, elemtype, elemtype) + ncopy = mkcall1(fn, nil, &nodes, addr, sptr, nwid) + } + ln := append(nodes, ncopy) + + typecheck.Stmts(ln) + walkStmtList(ln) + init.Append(ln...) + return s +} + +// isAppendOfMake reports whether n is of the form append(x , make([]T, y)...). +// isAppendOfMake assumes n has already been typechecked. +func isAppendOfMake(n ir.Node) bool { + if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting { + return false + } + + if n.Typecheck() == 0 { + base.Fatalf("missing typecheck: %+v", n) + } + + if n.Op() != ir.OAPPEND { + return false + } + call := n.(*ir.CallExpr) + if !call.IsDDD || len(call.Args) != 2 || call.Args[1].Op() != ir.OMAKESLICE { + return false + } + + mk := call.Args[1].(*ir.MakeExpr) + if mk.Cap != nil { + return false + } + + // y must be either an integer constant or the largest possible positive value + // of variable y needs to fit into an uint. + + // typecheck made sure that constant arguments to make are not negative and fit into an int. + + // The care of overflow of the len argument to make will be handled by an explicit check of int(len) < 0 during runtime. + y := mk.Len + if !ir.IsConst(y, constant.Int) && y.Type().Size() > types.Types[types.TUINT].Size() { + return false + } + + return true +} + +// extendSlice rewrites append(l1, make([]T, l2)...) to +// init { +// if l2 >= 0 { // Empty if block here for more meaningful node.SetLikely(true) +// } else { +// panicmakeslicelen() +// } +// s := l1 +// n := len(s) + l2 +// // Compare n and s as uint so growslice can panic on overflow of len(s) + l2. +// // cap is a positive int and n can become negative when len(s) + l2 +// // overflows int. Interpreting n when negative as uint makes it larger +// // than cap(s). growslice will check the int n arg and panic if n is +// // negative. This prevents the overflow from being undetected. +// if uint(n) > uint(cap(s)) { +// s = growslice(T, s, n) +// } +// s = s[:n] +// lptr := &l1[0] +// sptr := &s[0] +// if lptr == sptr || !T.HasPointers() { +// // growslice did not clear the whole underlying array (or did not get called) +// hp := &s[len(l1)] +// hn := l2 * sizeof(T) +// memclr(hp, hn) +// } +// } +// s +func extendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node { + // isAppendOfMake made sure all possible positive values of l2 fit into an uint. + // The case of l2 overflow when converting from e.g. uint to int is handled by an explicit + // check of l2 < 0 at runtime which is generated below. + l2 := typecheck.Conv(n.Args[1].(*ir.MakeExpr).Len, types.Types[types.TINT]) + l2 = typecheck.Expr(l2) + n.Args[1] = l2 // walkAppendArgs expects l2 in n.List.Second(). + + walkAppendArgs(n, init) + + l1 := n.Args[0] + l2 = n.Args[1] // re-read l2, as it may have been updated by walkAppendArgs + + var nodes []ir.Node + + // if l2 >= 0 (likely happens), do nothing + nifneg := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGE, l2, ir.NewInt(0)), nil, nil) + nifneg.Likely = true + + // else panicmakeslicelen() + nifneg.Else = []ir.Node{mkcall("panicmakeslicelen", nil, init)} + nodes = append(nodes, nifneg) + + // s := l1 + s := typecheck.Temp(l1.Type()) + nodes = append(nodes, ir.NewAssignStmt(base.Pos, s, l1)) + + elemtype := s.Type().Elem() + + // n := len(s) + l2 + nn := typecheck.Temp(types.Types[types.TINT]) + nodes = append(nodes, ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, s), l2))) + + // if uint(n) > uint(cap(s)) + nuint := typecheck.Conv(nn, types.Types[types.TUINT]) + capuint := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OCAP, s), types.Types[types.TUINT]) + nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGT, nuint, capuint), nil, nil) + + // instantiate growslice(typ *type, old []any, newcap int) []any + fn := typecheck.LookupRuntime("growslice") + fn = typecheck.SubstArgTypes(fn, elemtype, elemtype) + + // s = growslice(T, s, n) + nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), reflectdata.TypePtr(elemtype), s, nn))} + nodes = append(nodes, nif) + + // s = s[:n] + nt := ir.NewSliceExpr(base.Pos, ir.OSLICE, s) + nt.SetSliceBounds(nil, nn, nil) + nt.SetBounded(true) + nodes = append(nodes, ir.NewAssignStmt(base.Pos, s, nt)) + + // lptr := &l1[0] + l1ptr := typecheck.Temp(l1.Type().Elem().PtrTo()) + tmp := ir.NewUnaryExpr(base.Pos, ir.OSPTR, l1) + nodes = append(nodes, ir.NewAssignStmt(base.Pos, l1ptr, tmp)) + + // sptr := &s[0] + sptr := typecheck.Temp(elemtype.PtrTo()) + tmp = ir.NewUnaryExpr(base.Pos, ir.OSPTR, s) + nodes = append(nodes, ir.NewAssignStmt(base.Pos, sptr, tmp)) + + // hp := &s[len(l1)] + ix := ir.NewIndexExpr(base.Pos, s, ir.NewUnaryExpr(base.Pos, ir.OLEN, l1)) + ix.SetBounded(true) + hp := typecheck.ConvNop(typecheck.NodAddr(ix), types.Types[types.TUNSAFEPTR]) + + // hn := l2 * sizeof(elem(s)) + hn := typecheck.Conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, l2, ir.NewInt(elemtype.Width)), types.Types[types.TUINTPTR]) + + clrname := "memclrNoHeapPointers" + hasPointers := elemtype.HasPointers() + if hasPointers { + clrname = "memclrHasPointers" + ir.CurFunc.SetWBPos(n.Pos()) + } + + var clr ir.Nodes + clrfn := mkcall(clrname, nil, &clr, hp, hn) + clr.Append(clrfn) + + if hasPointers { + // if l1ptr == sptr + nifclr := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OEQ, l1ptr, sptr), nil, nil) + nifclr.Body = clr + nodes = append(nodes, nifclr) + } else { + nodes = append(nodes, clr...) + } + + typecheck.Stmts(nodes) + walkStmtList(nodes) + init.Append(nodes...) + return s +} diff --git a/src/cmd/compile/internal/walk/builtin.go b/src/cmd/compile/internal/walk/builtin.go new file mode 100644 index 0000000000000..61a555b773c87 --- /dev/null +++ b/src/cmd/compile/internal/walk/builtin.go @@ -0,0 +1,699 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package walk + +import ( + "fmt" + "go/constant" + "go/token" + "strings" + + "cmd/compile/internal/base" + "cmd/compile/internal/escape" + "cmd/compile/internal/ir" + "cmd/compile/internal/reflectdata" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" +) + +// Rewrite append(src, x, y, z) so that any side effects in +// x, y, z (including runtime panics) are evaluated in +// initialization statements before the append. +// For normal code generation, stop there and leave the +// rest to cgen_append. +// +// For race detector, expand append(src, a [, b]* ) to +// +// init { +// s := src +// const argc = len(args) - 1 +// if cap(s) - len(s) < argc { +// s = growslice(s, len(s)+argc) +// } +// n := len(s) +// s = s[:n+argc] +// s[n] = a +// s[n+1] = b +// ... +// } +// s +func walkAppend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node { + if !ir.SameSafeExpr(dst, n.Args[0]) { + n.Args[0] = safeExpr(n.Args[0], init) + n.Args[0] = walkExpr(n.Args[0], init) + } + walkExprListSafe(n.Args[1:], init) + + nsrc := n.Args[0] + + // walkexprlistsafe will leave OINDEX (s[n]) alone if both s + // and n are name or literal, but those may index the slice we're + // modifying here. Fix explicitly. + // Using cheapexpr also makes sure that the evaluation + // of all arguments (and especially any panics) happen + // before we begin to modify the slice in a visible way. + ls := n.Args[1:] + for i, n := range ls { + n = cheapExpr(n, init) + if !types.Identical(n.Type(), nsrc.Type().Elem()) { + n = typecheck.AssignConv(n, nsrc.Type().Elem(), "append") + n = walkExpr(n, init) + } + ls[i] = n + } + + argc := len(n.Args) - 1 + if argc < 1 { + return nsrc + } + + // General case, with no function calls left as arguments. + // Leave for gen, except that instrumentation requires old form. + if !base.Flag.Cfg.Instrumenting || base.Flag.CompilingRuntime { + return n + } + + var l []ir.Node + + ns := typecheck.Temp(nsrc.Type()) + l = append(l, ir.NewAssignStmt(base.Pos, ns, nsrc)) // s = src + + na := ir.NewInt(int64(argc)) // const argc + nif := ir.NewIfStmt(base.Pos, nil, nil, nil) // if cap(s) - len(s) < argc + nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, ir.NewBinaryExpr(base.Pos, ir.OSUB, ir.NewUnaryExpr(base.Pos, ir.OCAP, ns), ir.NewUnaryExpr(base.Pos, ir.OLEN, ns)), na) + + fn := typecheck.LookupRuntime("growslice") // growslice(, old []T, mincap int) (ret []T) + fn = typecheck.SubstArgTypes(fn, ns.Type().Elem(), ns.Type().Elem()) + + nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, ns, mkcall1(fn, ns.Type(), nif.PtrInit(), reflectdata.TypePtr(ns.Type().Elem()), ns, + ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns), na)))} + + l = append(l, nif) + + nn := typecheck.Temp(types.Types[types.TINT]) + l = append(l, ir.NewAssignStmt(base.Pos, nn, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns))) // n = len(s) + + slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, ns) // ...s[:n+argc] + slice.SetSliceBounds(nil, ir.NewBinaryExpr(base.Pos, ir.OADD, nn, na), nil) + slice.SetBounded(true) + l = append(l, ir.NewAssignStmt(base.Pos, ns, slice)) // s = s[:n+argc] + + ls = n.Args[1:] + for i, n := range ls { + ix := ir.NewIndexExpr(base.Pos, ns, nn) // s[n] ... + ix.SetBounded(true) + l = append(l, ir.NewAssignStmt(base.Pos, ix, n)) // s[n] = arg + if i+1 < len(ls) { + l = append(l, ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, nn, ir.NewInt(1)))) // n = n + 1 + } + } + + typecheck.Stmts(l) + walkStmtList(l) + init.Append(l...) + return ns +} + +// walkClose walks an OCLOSE node. +func walkClose(n *ir.UnaryExpr, init *ir.Nodes) ir.Node { + // cannot use chanfn - closechan takes any, not chan any + fn := typecheck.LookupRuntime("closechan") + fn = typecheck.SubstArgTypes(fn, n.X.Type()) + return mkcall1(fn, nil, init, n.X) +} + +// Lower copy(a, b) to a memmove call or a runtime call. +// +// init { +// n := len(a) +// if n > len(b) { n = len(b) } +// if a.ptr != b.ptr { memmove(a.ptr, b.ptr, n*sizeof(elem(a))) } +// } +// n; +// +// Also works if b is a string. +// +func walkCopy(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node { + if n.X.Type().Elem().HasPointers() { + ir.CurFunc.SetWBPos(n.Pos()) + fn := writebarrierfn("typedslicecopy", n.X.Type().Elem(), n.Y.Type().Elem()) + n.X = cheapExpr(n.X, init) + ptrL, lenL := backingArrayPtrLen(n.X) + n.Y = cheapExpr(n.Y, init) + ptrR, lenR := backingArrayPtrLen(n.Y) + return mkcall1(fn, n.Type(), init, reflectdata.TypePtr(n.X.Type().Elem()), ptrL, lenL, ptrR, lenR) + } + + if runtimecall { + // rely on runtime to instrument: + // copy(n.Left, n.Right) + // n.Right can be a slice or string. + + n.X = cheapExpr(n.X, init) + ptrL, lenL := backingArrayPtrLen(n.X) + n.Y = cheapExpr(n.Y, init) + ptrR, lenR := backingArrayPtrLen(n.Y) + + fn := typecheck.LookupRuntime("slicecopy") + fn = typecheck.SubstArgTypes(fn, ptrL.Type().Elem(), ptrR.Type().Elem()) + + return mkcall1(fn, n.Type(), init, ptrL, lenL, ptrR, lenR, ir.NewInt(n.X.Type().Elem().Width)) + } + + n.X = walkExpr(n.X, init) + n.Y = walkExpr(n.Y, init) + nl := typecheck.Temp(n.X.Type()) + nr := typecheck.Temp(n.Y.Type()) + var l []ir.Node + l = append(l, ir.NewAssignStmt(base.Pos, nl, n.X)) + l = append(l, ir.NewAssignStmt(base.Pos, nr, n.Y)) + + nfrm := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nr) + nto := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nl) + + nlen := typecheck.Temp(types.Types[types.TINT]) + + // n = len(to) + l = append(l, ir.NewAssignStmt(base.Pos, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nl))) + + // if n > len(frm) { n = len(frm) } + nif := ir.NewIfStmt(base.Pos, nil, nil, nil) + + nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OGT, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nr)) + nif.Body.Append(ir.NewAssignStmt(base.Pos, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nr))) + l = append(l, nif) + + // if to.ptr != frm.ptr { memmove( ... ) } + ne := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.ONE, nto, nfrm), nil, nil) + ne.Likely = true + l = append(l, ne) + + fn := typecheck.LookupRuntime("memmove") + fn = typecheck.SubstArgTypes(fn, nl.Type().Elem(), nl.Type().Elem()) + nwid := ir.Node(typecheck.Temp(types.Types[types.TUINTPTR])) + setwid := ir.NewAssignStmt(base.Pos, nwid, typecheck.Conv(nlen, types.Types[types.TUINTPTR])) + ne.Body.Append(setwid) + nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, ir.NewInt(nl.Type().Elem().Width)) + call := mkcall1(fn, nil, init, nto, nfrm, nwid) + ne.Body.Append(call) + + typecheck.Stmts(l) + walkStmtList(l) + init.Append(l...) + return nlen +} + +// walkDelete walks an ODELETE node. +func walkDelete(init *ir.Nodes, n *ir.CallExpr) ir.Node { + init.Append(n.PtrInit().Take()...) + map_ := n.Args[0] + key := n.Args[1] + map_ = walkExpr(map_, init) + key = walkExpr(key, init) + + t := map_.Type() + fast := mapfast(t) + if fast == mapslow { + // order.stmt made sure key is addressable. + key = typecheck.NodAddr(key) + } + return mkcall1(mapfndel(mapdelete[fast], t), nil, init, reflectdata.TypePtr(t), map_, key) +} + +// walkLenCap walks an OLEN or OCAP node. +func walkLenCap(n *ir.UnaryExpr, init *ir.Nodes) ir.Node { + if isRuneCount(n) { + // Replace len([]rune(string)) with runtime.countrunes(string). + return mkcall("countrunes", n.Type(), init, typecheck.Conv(n.X.(*ir.ConvExpr).X, types.Types[types.TSTRING])) + } + + n.X = walkExpr(n.X, init) + + // replace len(*[10]int) with 10. + // delayed until now to preserve side effects. + t := n.X.Type() + + if t.IsPtr() { + t = t.Elem() + } + if t.IsArray() { + safeExpr(n.X, init) + con := typecheck.OrigInt(n, t.NumElem()) + con.SetTypecheck(1) + return con + } + return n +} + +// walkMakeChan walks an OMAKECHAN node. +func walkMakeChan(n *ir.MakeExpr, init *ir.Nodes) ir.Node { + // When size fits into int, use makechan instead of + // makechan64, which is faster and shorter on 32 bit platforms. + size := n.Len + fnname := "makechan64" + argtype := types.Types[types.TINT64] + + // Type checking guarantees that TIDEAL size is positive and fits in an int. + // The case of size overflow when converting TUINT or TUINTPTR to TINT + // will be handled by the negative range checks in makechan during runtime. + if size.Type().IsKind(types.TIDEAL) || size.Type().Size() <= types.Types[types.TUINT].Size() { + fnname = "makechan" + argtype = types.Types[types.TINT] + } + + return mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, reflectdata.TypePtr(n.Type()), typecheck.Conv(size, argtype)) +} + +// walkMakeMap walks an OMAKEMAP node. +func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node { + t := n.Type() + hmapType := reflectdata.MapType(t) + hint := n.Len + + // var h *hmap + var h ir.Node + if n.Esc() == ir.EscNone { + // Allocate hmap on stack. + + // var hv hmap + hv := typecheck.Temp(hmapType) + init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, hv, nil))) + // h = &hv + h = typecheck.NodAddr(hv) + + // Allocate one bucket pointed to by hmap.buckets on stack if hint + // is not larger than BUCKETSIZE. In case hint is larger than + // BUCKETSIZE runtime.makemap will allocate the buckets on the heap. + // Maximum key and elem size is 128 bytes, larger objects + // are stored with an indirection. So max bucket size is 2048+eps. + if !ir.IsConst(hint, constant.Int) || + constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(reflectdata.BUCKETSIZE)) { + + // In case hint is larger than BUCKETSIZE runtime.makemap + // will allocate the buckets on the heap, see #20184 + // + // if hint <= BUCKETSIZE { + // var bv bmap + // b = &bv + // h.buckets = b + // } + + nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(reflectdata.BUCKETSIZE)), nil, nil) + nif.Likely = true + + // var bv bmap + bv := typecheck.Temp(reflectdata.MapBucketType(t)) + nif.Body.Append(ir.NewAssignStmt(base.Pos, bv, nil)) + + // b = &bv + b := typecheck.NodAddr(bv) + + // h.buckets = b + bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap + na := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, bsym), b) + nif.Body.Append(na) + appendWalkStmt(init, nif) + } + } + + if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(reflectdata.BUCKETSIZE)) { + // Handling make(map[any]any) and + // make(map[any]any, hint) where hint <= BUCKETSIZE + // special allows for faster map initialization and + // improves binary size by using calls with fewer arguments. + // For hint <= BUCKETSIZE overLoadFactor(hint, 0) is false + // and no buckets will be allocated by makemap. Therefore, + // no buckets need to be allocated in this code path. + if n.Esc() == ir.EscNone { + // Only need to initialize h.hash0 since + // hmap h has been allocated on the stack already. + // h.hash0 = fastrand() + rand := mkcall("fastrand", types.Types[types.TUINT32], init) + hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, hashsym), rand)) + return typecheck.ConvNop(h, t) + } + // Call runtime.makehmap to allocate an + // hmap on the heap and initialize hmap's hash0 field. + fn := typecheck.LookupRuntime("makemap_small") + fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem()) + return mkcall1(fn, n.Type(), init) + } + + if n.Esc() != ir.EscNone { + h = typecheck.NodNil() + } + // Map initialization with a variable or large hint is + // more complicated. We therefore generate a call to + // runtime.makemap to initialize hmap and allocate the + // map buckets. + + // When hint fits into int, use makemap instead of + // makemap64, which is faster and shorter on 32 bit platforms. + fnname := "makemap64" + argtype := types.Types[types.TINT64] + + // Type checking guarantees that TIDEAL hint is positive and fits in an int. + // See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function. + // The case of hint overflow when converting TUINT or TUINTPTR to TINT + // will be handled by the negative range checks in makemap during runtime. + if hint.Type().IsKind(types.TIDEAL) || hint.Type().Size() <= types.Types[types.TUINT].Size() { + fnname = "makemap" + argtype = types.Types[types.TINT] + } + + fn := typecheck.LookupRuntime(fnname) + fn = typecheck.SubstArgTypes(fn, hmapType, t.Key(), t.Elem()) + return mkcall1(fn, n.Type(), init, reflectdata.TypePtr(n.Type()), typecheck.Conv(hint, argtype), h) +} + +// walkMakeSlice walks an OMAKESLICE node. +func walkMakeSlice(n *ir.MakeExpr, init *ir.Nodes) ir.Node { + l := n.Len + r := n.Cap + if r == nil { + r = safeExpr(l, init) + l = r + } + t := n.Type() + if t.Elem().NotInHeap() { + base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem()) + } + if n.Esc() == ir.EscNone { + if why := escape.HeapAllocReason(n); why != "" { + base.Fatalf("%v has EscNone, but %v", n, why) + } + // var arr [r]T + // n = arr[:l] + i := typecheck.IndexConst(r) + if i < 0 { + base.Fatalf("walkexpr: invalid index %v", r) + } + + // cap is constrained to [0,2^31) or [0,2^63) depending on whether + // we're in 32-bit or 64-bit systems. So it's safe to do: + // + // if uint64(len) > cap { + // if len < 0 { panicmakeslicelen() } + // panicmakeslicecap() + // } + nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGT, typecheck.Conv(l, types.Types[types.TUINT64]), ir.NewInt(i)), nil, nil) + niflen := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLT, l, ir.NewInt(0)), nil, nil) + niflen.Body = []ir.Node{mkcall("panicmakeslicelen", nil, init)} + nif.Body.Append(niflen, mkcall("panicmakeslicecap", nil, init)) + init.Append(typecheck.Stmt(nif)) + + t = types.NewArray(t.Elem(), i) // [r]T + var_ := typecheck.Temp(t) + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, nil)) // zero temp + r := ir.NewSliceExpr(base.Pos, ir.OSLICE, var_) // arr[:l] + r.SetSliceBounds(nil, l, nil) + // The conv is necessary in case n.Type is named. + return walkExpr(typecheck.Expr(typecheck.Conv(r, n.Type())), init) + } + + // n escapes; set up a call to makeslice. + // When len and cap can fit into int, use makeslice instead of + // makeslice64, which is faster and shorter on 32 bit platforms. + + len, cap := l, r + + fnname := "makeslice64" + argtype := types.Types[types.TINT64] + + // Type checking guarantees that TIDEAL len/cap are positive and fit in an int. + // The case of len or cap overflow when converting TUINT or TUINTPTR to TINT + // will be handled by the negative range checks in makeslice during runtime. + if (len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size()) && + (cap.Type().IsKind(types.TIDEAL) || cap.Type().Size() <= types.Types[types.TUINT].Size()) { + fnname = "makeslice" + argtype = types.Types[types.TINT] + } + + m := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil) + m.SetType(t) + + fn := typecheck.LookupRuntime(fnname) + m.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), typecheck.Conv(len, argtype), typecheck.Conv(cap, argtype)) + m.Ptr.MarkNonNil() + m.LenCap = []ir.Node{typecheck.Conv(len, types.Types[types.TINT]), typecheck.Conv(cap, types.Types[types.TINT])} + return walkExpr(typecheck.Expr(m), init) +} + +// walkMakeSliceCopy walks an OMAKESLICECOPY node. +func walkMakeSliceCopy(n *ir.MakeExpr, init *ir.Nodes) ir.Node { + if n.Esc() == ir.EscNone { + base.Fatalf("OMAKESLICECOPY with EscNone: %v", n) + } + + t := n.Type() + if t.Elem().NotInHeap() { + base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem()) + } + + length := typecheck.Conv(n.Len, types.Types[types.TINT]) + copylen := ir.NewUnaryExpr(base.Pos, ir.OLEN, n.Cap) + copyptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, n.Cap) + + if !t.Elem().HasPointers() && n.Bounded() { + // When len(to)==len(from) and elements have no pointers: + // replace make+copy with runtime.mallocgc+runtime.memmove. + + // We do not check for overflow of len(to)*elem.Width here + // since len(from) is an existing checked slice capacity + // with same elem.Width for the from slice. + size := ir.NewBinaryExpr(base.Pos, ir.OMUL, typecheck.Conv(length, types.Types[types.TUINTPTR]), typecheck.Conv(ir.NewInt(t.Elem().Width), types.Types[types.TUINTPTR])) + + // instantiate mallocgc(size uintptr, typ *byte, needszero bool) unsafe.Pointer + fn := typecheck.LookupRuntime("mallocgc") + sh := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil) + sh.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, typecheck.NodNil(), ir.NewBool(false)) + sh.Ptr.MarkNonNil() + sh.LenCap = []ir.Node{length, length} + sh.SetType(t) + + s := typecheck.Temp(t) + r := typecheck.Stmt(ir.NewAssignStmt(base.Pos, s, sh)) + r = walkExpr(r, init) + init.Append(r) + + // instantiate memmove(to *any, frm *any, size uintptr) + fn = typecheck.LookupRuntime("memmove") + fn = typecheck.SubstArgTypes(fn, t.Elem(), t.Elem()) + ncopy := mkcall1(fn, nil, init, ir.NewUnaryExpr(base.Pos, ir.OSPTR, s), copyptr, size) + init.Append(walkExpr(typecheck.Stmt(ncopy), init)) + + return s + } + // Replace make+copy with runtime.makeslicecopy. + // instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer + fn := typecheck.LookupRuntime("makeslicecopy") + s := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil) + s.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), length, copylen, typecheck.Conv(copyptr, types.Types[types.TUNSAFEPTR])) + s.Ptr.MarkNonNil() + s.LenCap = []ir.Node{length, length} + s.SetType(t) + return walkExpr(typecheck.Expr(s), init) +} + +// walkNew walks an ONEW node. +func walkNew(n *ir.UnaryExpr, init *ir.Nodes) ir.Node { + if n.Type().Elem().NotInHeap() { + base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type().Elem()) + } + if n.Esc() == ir.EscNone { + if n.Type().Elem().Width >= ir.MaxImplicitStackVarSize { + base.Fatalf("large ONEW with EscNone: %v", n) + } + r := typecheck.Temp(n.Type().Elem()) + init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, nil))) // zero temp + return typecheck.Expr(typecheck.NodAddr(r)) + } + return callnew(n.Type().Elem()) +} + +// generate code for print +func walkPrint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { + // Hoist all the argument evaluation up before the lock. + walkExprListCheap(nn.Args, init) + + // For println, add " " between elements and "\n" at the end. + if nn.Op() == ir.OPRINTN { + s := nn.Args + t := make([]ir.Node, 0, len(s)*2) + for i, n := range s { + if i != 0 { + t = append(t, ir.NewString(" ")) + } + t = append(t, n) + } + t = append(t, ir.NewString("\n")) + nn.Args.Set(t) + } + + // Collapse runs of constant strings. + s := nn.Args + t := make([]ir.Node, 0, len(s)) + for i := 0; i < len(s); { + var strs []string + for i < len(s) && ir.IsConst(s[i], constant.String) { + strs = append(strs, ir.StringVal(s[i])) + i++ + } + if len(strs) > 0 { + t = append(t, ir.NewString(strings.Join(strs, ""))) + } + if i < len(s) { + t = append(t, s[i]) + i++ + } + } + nn.Args.Set(t) + + calls := []ir.Node{mkcall("printlock", nil, init)} + for i, n := range nn.Args { + if n.Op() == ir.OLITERAL { + if n.Type() == types.UntypedRune { + n = typecheck.DefaultLit(n, types.RuneType) + } + + switch n.Val().Kind() { + case constant.Int: + n = typecheck.DefaultLit(n, types.Types[types.TINT64]) + + case constant.Float: + n = typecheck.DefaultLit(n, types.Types[types.TFLOAT64]) + } + } + + if n.Op() != ir.OLITERAL && n.Type() != nil && n.Type().Kind() == types.TIDEAL { + n = typecheck.DefaultLit(n, types.Types[types.TINT64]) + } + n = typecheck.DefaultLit(n, nil) + nn.Args[i] = n + if n.Type() == nil || n.Type().Kind() == types.TFORW { + continue + } + + var on *ir.Name + switch n.Type().Kind() { + case types.TINTER: + if n.Type().IsEmptyInterface() { + on = typecheck.LookupRuntime("printeface") + } else { + on = typecheck.LookupRuntime("printiface") + } + on = typecheck.SubstArgTypes(on, n.Type()) // any-1 + case types.TPTR: + if n.Type().Elem().NotInHeap() { + on = typecheck.LookupRuntime("printuintptr") + n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n) + n.SetType(types.Types[types.TUNSAFEPTR]) + n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n) + n.SetType(types.Types[types.TUINTPTR]) + break + } + fallthrough + case types.TCHAN, types.TMAP, types.TFUNC, types.TUNSAFEPTR: + on = typecheck.LookupRuntime("printpointer") + on = typecheck.SubstArgTypes(on, n.Type()) // any-1 + case types.TSLICE: + on = typecheck.LookupRuntime("printslice") + on = typecheck.SubstArgTypes(on, n.Type()) // any-1 + case types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR: + if types.IsRuntimePkg(n.Type().Sym().Pkg) && n.Type().Sym().Name == "hex" { + on = typecheck.LookupRuntime("printhex") + } else { + on = typecheck.LookupRuntime("printuint") + } + case types.TINT, types.TINT8, types.TINT16, types.TINT32, types.TINT64: + on = typecheck.LookupRuntime("printint") + case types.TFLOAT32, types.TFLOAT64: + on = typecheck.LookupRuntime("printfloat") + case types.TCOMPLEX64, types.TCOMPLEX128: + on = typecheck.LookupRuntime("printcomplex") + case types.TBOOL: + on = typecheck.LookupRuntime("printbool") + case types.TSTRING: + cs := "" + if ir.IsConst(n, constant.String) { + cs = ir.StringVal(n) + } + switch cs { + case " ": + on = typecheck.LookupRuntime("printsp") + case "\n": + on = typecheck.LookupRuntime("printnl") + default: + on = typecheck.LookupRuntime("printstring") + } + default: + badtype(ir.OPRINT, n.Type(), nil) + continue + } + + r := ir.NewCallExpr(base.Pos, ir.OCALL, on, nil) + if params := on.Type().Params().FieldSlice(); len(params) > 0 { + t := params[0].Type + if !types.Identical(t, n.Type()) { + n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n) + n.SetType(t) + } + r.Args.Append(n) + } + calls = append(calls, r) + } + + calls = append(calls, mkcall("printunlock", nil, init)) + + typecheck.Stmts(calls) + walkExprList(calls, init) + + r := ir.NewBlockStmt(base.Pos, nil) + r.List.Set(calls) + return walkStmt(typecheck.Stmt(r)) +} + +func badtype(op ir.Op, tl, tr *types.Type) { + var s string + if tl != nil { + s += fmt.Sprintf("\n\t%v", tl) + } + if tr != nil { + s += fmt.Sprintf("\n\t%v", tr) + } + + // common mistake: *struct and *interface. + if tl != nil && tr != nil && tl.IsPtr() && tr.IsPtr() { + if tl.Elem().IsStruct() && tr.Elem().IsInterface() { + s += "\n\t(*struct vs *interface)" + } else if tl.Elem().IsInterface() && tr.Elem().IsStruct() { + s += "\n\t(*interface vs *struct)" + } + } + + base.Errorf("illegal types for operand: %v%s", op, s) +} + +func callnew(t *types.Type) ir.Node { + types.CalcSize(t) + n := ir.NewUnaryExpr(base.Pos, ir.ONEWOBJ, reflectdata.TypePtr(t)) + n.SetType(types.NewPtr(t)) + n.SetTypecheck(1) + n.MarkNonNil() + return n +} + +func writebarrierfn(name string, l *types.Type, r *types.Type) ir.Node { + fn := typecheck.LookupRuntime(name) + fn = typecheck.SubstArgTypes(fn, l, r) + return fn +} + +// isRuneCount reports whether n is of the form len([]rune(string)). +// These are optimized into a call to runtime.countrunes. +func isRuneCount(n ir.Node) bool { + return base.Flag.N == 0 && !base.Flag.Cfg.Instrumenting && n.Op() == ir.OLEN && n.(*ir.UnaryExpr).X.Op() == ir.OSTR2RUNES +} diff --git a/src/cmd/compile/internal/walk/closure.go b/src/cmd/compile/internal/walk/closure.go index 545c762ac7b6d..30f86f0965a78 100644 --- a/src/cmd/compile/internal/walk/closure.go +++ b/src/cmd/compile/internal/walk/closure.go @@ -115,7 +115,7 @@ func Closure(fn *ir.Func) { base.Pos = lno } -func walkclosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node { +func walkClosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node { fn := clo.Func // If no closure vars, don't bother wrapping. @@ -148,10 +148,10 @@ func walkclosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node { clo.Prealloc = nil } - return walkexpr(cfn, init) + return walkExpr(cfn, init) } -func walkpartialcall(n *ir.CallPartExpr, init *ir.Nodes) ir.Node { +func walkCallPart(n *ir.CallPartExpr, init *ir.Nodes) ir.Node { // Create closure in the form of a composite literal. // For x.M with receiver (x) type T, the generated code looks like: // @@ -162,8 +162,8 @@ func walkpartialcall(n *ir.CallPartExpr, init *ir.Nodes) ir.Node { if n.X.Type().IsInterface() { // Trigger panic for method on nil interface now. // Otherwise it happens in the wrapper and is confusing. - n.X = cheapexpr(n.X, init) - n.X = walkexpr(n.X, nil) + n.X = cheapExpr(n.X, init) + n.X = walkExpr(n.X, nil) tab := typecheck.Expr(ir.NewUnaryExpr(base.Pos, ir.OITAB, n.X)) @@ -193,5 +193,5 @@ func walkpartialcall(n *ir.CallPartExpr, init *ir.Nodes) ir.Node { n.Prealloc = nil } - return walkexpr(cfn, init) + return walkExpr(cfn, init) } diff --git a/src/cmd/compile/internal/walk/compare.go b/src/cmd/compile/internal/walk/compare.go new file mode 100644 index 0000000000000..b1ab42782b471 --- /dev/null +++ b/src/cmd/compile/internal/walk/compare.go @@ -0,0 +1,507 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package walk + +import ( + "encoding/binary" + "go/constant" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/reflectdata" + "cmd/compile/internal/ssagen" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/sys" +) + +// The result of walkCompare MUST be assigned back to n, e.g. +// n.Left = walkCompare(n.Left, init) +func walkCompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { + if n.X.Type().IsInterface() && n.Y.Type().IsInterface() && n.X.Op() != ir.ONIL && n.Y.Op() != ir.ONIL { + return walkCompareInterface(n, init) + } + + if n.X.Type().IsString() && n.Y.Type().IsString() { + return walkCompareString(n, init) + } + + n.X = walkExpr(n.X, init) + n.Y = walkExpr(n.Y, init) + + // Given mixed interface/concrete comparison, + // rewrite into types-equal && data-equal. + // This is efficient, avoids allocations, and avoids runtime calls. + if n.X.Type().IsInterface() != n.Y.Type().IsInterface() { + // Preserve side-effects in case of short-circuiting; see #32187. + l := cheapExpr(n.X, init) + r := cheapExpr(n.Y, init) + // Swap so that l is the interface value and r is the concrete value. + if n.Y.Type().IsInterface() { + l, r = r, l + } + + // Handle both == and !=. + eq := n.Op() + andor := ir.OOROR + if eq == ir.OEQ { + andor = ir.OANDAND + } + // Check for types equal. + // For empty interface, this is: + // l.tab == type(r) + // For non-empty interface, this is: + // l.tab != nil && l.tab._type == type(r) + var eqtype ir.Node + tab := ir.NewUnaryExpr(base.Pos, ir.OITAB, l) + rtyp := reflectdata.TypePtr(r.Type()) + if l.Type().IsEmptyInterface() { + tab.SetType(types.NewPtr(types.Types[types.TUINT8])) + tab.SetTypecheck(1) + eqtype = ir.NewBinaryExpr(base.Pos, eq, tab, rtyp) + } else { + nonnil := ir.NewBinaryExpr(base.Pos, brcom(eq), typecheck.NodNil(), tab) + match := ir.NewBinaryExpr(base.Pos, eq, itabType(tab), rtyp) + eqtype = ir.NewLogicalExpr(base.Pos, andor, nonnil, match) + } + // Check for data equal. + eqdata := ir.NewBinaryExpr(base.Pos, eq, ifaceData(n.Pos(), l, r.Type()), r) + // Put it all together. + expr := ir.NewLogicalExpr(base.Pos, andor, eqtype, eqdata) + return finishCompare(n, expr, init) + } + + // Must be comparison of array or struct. + // Otherwise back end handles it. + // While we're here, decide whether to + // inline or call an eq alg. + t := n.X.Type() + var inline bool + + maxcmpsize := int64(4) + unalignedLoad := canMergeLoads() + if unalignedLoad { + // Keep this low enough to generate less code than a function call. + maxcmpsize = 2 * int64(ssagen.Arch.LinkArch.RegSize) + } + + switch t.Kind() { + default: + if base.Debug.Libfuzzer != 0 && t.IsInteger() { + n.X = cheapExpr(n.X, init) + n.Y = cheapExpr(n.Y, init) + + // If exactly one comparison operand is + // constant, invoke the constcmp functions + // instead, and arrange for the constant + // operand to be the first argument. + l, r := n.X, n.Y + if r.Op() == ir.OLITERAL { + l, r = r, l + } + constcmp := l.Op() == ir.OLITERAL && r.Op() != ir.OLITERAL + + var fn string + var paramType *types.Type + switch t.Size() { + case 1: + fn = "libfuzzerTraceCmp1" + if constcmp { + fn = "libfuzzerTraceConstCmp1" + } + paramType = types.Types[types.TUINT8] + case 2: + fn = "libfuzzerTraceCmp2" + if constcmp { + fn = "libfuzzerTraceConstCmp2" + } + paramType = types.Types[types.TUINT16] + case 4: + fn = "libfuzzerTraceCmp4" + if constcmp { + fn = "libfuzzerTraceConstCmp4" + } + paramType = types.Types[types.TUINT32] + case 8: + fn = "libfuzzerTraceCmp8" + if constcmp { + fn = "libfuzzerTraceConstCmp8" + } + paramType = types.Types[types.TUINT64] + default: + base.Fatalf("unexpected integer size %d for %v", t.Size(), t) + } + init.Append(mkcall(fn, nil, init, tracecmpArg(l, paramType, init), tracecmpArg(r, paramType, init))) + } + return n + case types.TARRAY: + // We can compare several elements at once with 2/4/8 byte integer compares + inline = t.NumElem() <= 1 || (types.IsSimple[t.Elem().Kind()] && (t.NumElem() <= 4 || t.Elem().Width*t.NumElem() <= maxcmpsize)) + case types.TSTRUCT: + inline = t.NumComponents(types.IgnoreBlankFields) <= 4 + } + + cmpl := n.X + for cmpl != nil && cmpl.Op() == ir.OCONVNOP { + cmpl = cmpl.(*ir.ConvExpr).X + } + cmpr := n.Y + for cmpr != nil && cmpr.Op() == ir.OCONVNOP { + cmpr = cmpr.(*ir.ConvExpr).X + } + + // Chose not to inline. Call equality function directly. + if !inline { + // eq algs take pointers; cmpl and cmpr must be addressable + if !ir.IsAssignable(cmpl) || !ir.IsAssignable(cmpr) { + base.Fatalf("arguments of comparison must be lvalues - %v %v", cmpl, cmpr) + } + + fn, needsize := eqFor(t) + call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil) + call.Args.Append(typecheck.NodAddr(cmpl)) + call.Args.Append(typecheck.NodAddr(cmpr)) + if needsize { + call.Args.Append(ir.NewInt(t.Width)) + } + res := ir.Node(call) + if n.Op() != ir.OEQ { + res = ir.NewUnaryExpr(base.Pos, ir.ONOT, res) + } + return finishCompare(n, res, init) + } + + // inline: build boolean expression comparing element by element + andor := ir.OANDAND + if n.Op() == ir.ONE { + andor = ir.OOROR + } + var expr ir.Node + compare := func(el, er ir.Node) { + a := ir.NewBinaryExpr(base.Pos, n.Op(), el, er) + if expr == nil { + expr = a + } else { + expr = ir.NewLogicalExpr(base.Pos, andor, expr, a) + } + } + cmpl = safeExpr(cmpl, init) + cmpr = safeExpr(cmpr, init) + if t.IsStruct() { + for _, f := range t.Fields().Slice() { + sym := f.Sym + if sym.IsBlank() { + continue + } + compare( + ir.NewSelectorExpr(base.Pos, ir.OXDOT, cmpl, sym), + ir.NewSelectorExpr(base.Pos, ir.OXDOT, cmpr, sym), + ) + } + } else { + step := int64(1) + remains := t.NumElem() * t.Elem().Width + combine64bit := unalignedLoad && types.RegSize == 8 && t.Elem().Width <= 4 && t.Elem().IsInteger() + combine32bit := unalignedLoad && t.Elem().Width <= 2 && t.Elem().IsInteger() + combine16bit := unalignedLoad && t.Elem().Width == 1 && t.Elem().IsInteger() + for i := int64(0); remains > 0; { + var convType *types.Type + switch { + case remains >= 8 && combine64bit: + convType = types.Types[types.TINT64] + step = 8 / t.Elem().Width + case remains >= 4 && combine32bit: + convType = types.Types[types.TUINT32] + step = 4 / t.Elem().Width + case remains >= 2 && combine16bit: + convType = types.Types[types.TUINT16] + step = 2 / t.Elem().Width + default: + step = 1 + } + if step == 1 { + compare( + ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(i)), + ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(i)), + ) + i++ + remains -= t.Elem().Width + } else { + elemType := t.Elem().ToUnsigned() + cmplw := ir.Node(ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(i))) + cmplw = typecheck.Conv(cmplw, elemType) // convert to unsigned + cmplw = typecheck.Conv(cmplw, convType) // widen + cmprw := ir.Node(ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(i))) + cmprw = typecheck.Conv(cmprw, elemType) + cmprw = typecheck.Conv(cmprw, convType) + // For code like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ... + // ssa will generate a single large load. + for offset := int64(1); offset < step; offset++ { + lb := ir.Node(ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(i+offset))) + lb = typecheck.Conv(lb, elemType) + lb = typecheck.Conv(lb, convType) + lb = ir.NewBinaryExpr(base.Pos, ir.OLSH, lb, ir.NewInt(8*t.Elem().Width*offset)) + cmplw = ir.NewBinaryExpr(base.Pos, ir.OOR, cmplw, lb) + rb := ir.Node(ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(i+offset))) + rb = typecheck.Conv(rb, elemType) + rb = typecheck.Conv(rb, convType) + rb = ir.NewBinaryExpr(base.Pos, ir.OLSH, rb, ir.NewInt(8*t.Elem().Width*offset)) + cmprw = ir.NewBinaryExpr(base.Pos, ir.OOR, cmprw, rb) + } + compare(cmplw, cmprw) + i += step + remains -= step * t.Elem().Width + } + } + } + if expr == nil { + expr = ir.NewBool(n.Op() == ir.OEQ) + // We still need to use cmpl and cmpr, in case they contain + // an expression which might panic. See issue 23837. + t := typecheck.Temp(cmpl.Type()) + a1 := typecheck.Stmt(ir.NewAssignStmt(base.Pos, t, cmpl)) + a2 := typecheck.Stmt(ir.NewAssignStmt(base.Pos, t, cmpr)) + init.Append(a1, a2) + } + return finishCompare(n, expr, init) +} + +func walkCompareInterface(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { + n.Y = cheapExpr(n.Y, init) + n.X = cheapExpr(n.X, init) + eqtab, eqdata := reflectdata.EqInterface(n.X, n.Y) + var cmp ir.Node + if n.Op() == ir.OEQ { + cmp = ir.NewLogicalExpr(base.Pos, ir.OANDAND, eqtab, eqdata) + } else { + eqtab.SetOp(ir.ONE) + cmp = ir.NewLogicalExpr(base.Pos, ir.OOROR, eqtab, ir.NewUnaryExpr(base.Pos, ir.ONOT, eqdata)) + } + return finishCompare(n, cmp, init) +} + +func walkCompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { + // Rewrite comparisons to short constant strings as length+byte-wise comparisons. + var cs, ncs ir.Node // const string, non-const string + switch { + case ir.IsConst(n.X, constant.String) && ir.IsConst(n.Y, constant.String): + // ignore; will be constant evaluated + case ir.IsConst(n.X, constant.String): + cs = n.X + ncs = n.Y + case ir.IsConst(n.Y, constant.String): + cs = n.Y + ncs = n.X + } + if cs != nil { + cmp := n.Op() + // Our comparison below assumes that the non-constant string + // is on the left hand side, so rewrite "" cmp x to x cmp "". + // See issue 24817. + if ir.IsConst(n.X, constant.String) { + cmp = brrev(cmp) + } + + // maxRewriteLen was chosen empirically. + // It is the value that minimizes cmd/go file size + // across most architectures. + // See the commit description for CL 26758 for details. + maxRewriteLen := 6 + // Some architectures can load unaligned byte sequence as 1 word. + // So we can cover longer strings with the same amount of code. + canCombineLoads := canMergeLoads() + combine64bit := false + if canCombineLoads { + // Keep this low enough to generate less code than a function call. + maxRewriteLen = 2 * ssagen.Arch.LinkArch.RegSize + combine64bit = ssagen.Arch.LinkArch.RegSize >= 8 + } + + var and ir.Op + switch cmp { + case ir.OEQ: + and = ir.OANDAND + case ir.ONE: + and = ir.OOROR + default: + // Don't do byte-wise comparisons for <, <=, etc. + // They're fairly complicated. + // Length-only checks are ok, though. + maxRewriteLen = 0 + } + if s := ir.StringVal(cs); len(s) <= maxRewriteLen { + if len(s) > 0 { + ncs = safeExpr(ncs, init) + } + r := ir.Node(ir.NewBinaryExpr(base.Pos, cmp, ir.NewUnaryExpr(base.Pos, ir.OLEN, ncs), ir.NewInt(int64(len(s))))) + remains := len(s) + for i := 0; remains > 0; { + if remains == 1 || !canCombineLoads { + cb := ir.NewInt(int64(s[i])) + ncb := ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(int64(i))) + r = ir.NewLogicalExpr(base.Pos, and, r, ir.NewBinaryExpr(base.Pos, cmp, ncb, cb)) + remains-- + i++ + continue + } + var step int + var convType *types.Type + switch { + case remains >= 8 && combine64bit: + convType = types.Types[types.TINT64] + step = 8 + case remains >= 4: + convType = types.Types[types.TUINT32] + step = 4 + case remains >= 2: + convType = types.Types[types.TUINT16] + step = 2 + } + ncsubstr := typecheck.Conv(ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(int64(i))), convType) + csubstr := int64(s[i]) + // Calculate large constant from bytes as sequence of shifts and ors. + // Like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ... + // ssa will combine this into a single large load. + for offset := 1; offset < step; offset++ { + b := typecheck.Conv(ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(int64(i+offset))), convType) + b = ir.NewBinaryExpr(base.Pos, ir.OLSH, b, ir.NewInt(int64(8*offset))) + ncsubstr = ir.NewBinaryExpr(base.Pos, ir.OOR, ncsubstr, b) + csubstr |= int64(s[i+offset]) << uint8(8*offset) + } + csubstrPart := ir.NewInt(csubstr) + // Compare "step" bytes as once + r = ir.NewLogicalExpr(base.Pos, and, r, ir.NewBinaryExpr(base.Pos, cmp, csubstrPart, ncsubstr)) + remains -= step + i += step + } + return finishCompare(n, r, init) + } + } + + var r ir.Node + if n.Op() == ir.OEQ || n.Op() == ir.ONE { + // prepare for rewrite below + n.X = cheapExpr(n.X, init) + n.Y = cheapExpr(n.Y, init) + eqlen, eqmem := reflectdata.EqString(n.X, n.Y) + // quick check of len before full compare for == or !=. + // memequal then tests equality up to length len. + if n.Op() == ir.OEQ { + // len(left) == len(right) && memequal(left, right, len) + r = ir.NewLogicalExpr(base.Pos, ir.OANDAND, eqlen, eqmem) + } else { + // len(left) != len(right) || !memequal(left, right, len) + eqlen.SetOp(ir.ONE) + r = ir.NewLogicalExpr(base.Pos, ir.OOROR, eqlen, ir.NewUnaryExpr(base.Pos, ir.ONOT, eqmem)) + } + } else { + // sys_cmpstring(s1, s2) :: 0 + r = mkcall("cmpstring", types.Types[types.TINT], init, typecheck.Conv(n.X, types.Types[types.TSTRING]), typecheck.Conv(n.Y, types.Types[types.TSTRING])) + r = ir.NewBinaryExpr(base.Pos, n.Op(), r, ir.NewInt(0)) + } + + return finishCompare(n, r, init) +} + +// The result of finishCompare MUST be assigned back to n, e.g. +// n.Left = finishCompare(n.Left, x, r, init) +func finishCompare(n *ir.BinaryExpr, r ir.Node, init *ir.Nodes) ir.Node { + r = typecheck.Expr(r) + r = typecheck.Conv(r, n.Type()) + r = walkExpr(r, init) + return r +} + +func eqFor(t *types.Type) (n ir.Node, needsize bool) { + // Should only arrive here with large memory or + // a struct/array containing a non-memory field/element. + // Small memory is handled inline, and single non-memory + // is handled by walkcompare. + switch a, _ := types.AlgType(t); a { + case types.AMEM: + n := typecheck.LookupRuntime("memequal") + n = typecheck.SubstArgTypes(n, t, t) + return n, true + case types.ASPECIAL: + sym := reflectdata.TypeSymPrefix(".eq", t) + n := typecheck.NewName(sym) + ir.MarkFunc(n) + n.SetType(typecheck.NewFuncType(nil, []*ir.Field{ + ir.NewField(base.Pos, nil, nil, types.NewPtr(t)), + ir.NewField(base.Pos, nil, nil, types.NewPtr(t)), + }, []*ir.Field{ + ir.NewField(base.Pos, nil, nil, types.Types[types.TBOOL]), + })) + return n, false + } + base.Fatalf("eqfor %v", t) + return nil, false +} + +// brcom returns !(op). +// For example, brcom(==) is !=. +func brcom(op ir.Op) ir.Op { + switch op { + case ir.OEQ: + return ir.ONE + case ir.ONE: + return ir.OEQ + case ir.OLT: + return ir.OGE + case ir.OGT: + return ir.OLE + case ir.OLE: + return ir.OGT + case ir.OGE: + return ir.OLT + } + base.Fatalf("brcom: no com for %v\n", op) + return op +} + +// brrev returns reverse(op). +// For example, Brrev(<) is >. +func brrev(op ir.Op) ir.Op { + switch op { + case ir.OEQ: + return ir.OEQ + case ir.ONE: + return ir.ONE + case ir.OLT: + return ir.OGT + case ir.OGT: + return ir.OLT + case ir.OLE: + return ir.OGE + case ir.OGE: + return ir.OLE + } + base.Fatalf("brrev: no rev for %v\n", op) + return op +} + +func tracecmpArg(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node { + // Ugly hack to avoid "constant -1 overflows uintptr" errors, etc. + if n.Op() == ir.OLITERAL && n.Type().IsSigned() && ir.Int64Val(n) < 0 { + n = copyExpr(n, n.Type(), init) + } + + return typecheck.Conv(n, t) +} + +// canMergeLoads reports whether the backend optimization passes for +// the current architecture can combine adjacent loads into a single +// larger, possibly unaligned, load. Note that currently the +// optimizations must be able to handle little endian byte order. +func canMergeLoads() bool { + switch ssagen.Arch.LinkArch.Family { + case sys.ARM64, sys.AMD64, sys.I386, sys.S390X: + return true + case sys.PPC64: + // Load combining only supported on ppc64le. + return ssagen.Arch.LinkArch.ByteOrder == binary.LittleEndian + } + return false +} diff --git a/src/cmd/compile/internal/walk/sinit.go b/src/cmd/compile/internal/walk/complit.go similarity index 96% rename from src/cmd/compile/internal/walk/sinit.go rename to src/cmd/compile/internal/walk/complit.go index dbb17dfe5015c..6fbbee92846b8 100644 --- a/src/cmd/compile/internal/walk/sinit.go +++ b/src/cmd/compile/internal/walk/complit.go @@ -7,6 +7,7 @@ package walk import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" + "cmd/compile/internal/ssagen" "cmd/compile/internal/staticdata" "cmd/compile/internal/staticinit" "cmd/compile/internal/typecheck" @@ -14,6 +15,22 @@ import ( "cmd/internal/obj" ) +// walkCompLit walks a composite literal node: +// OARRAYLIT, OSLICELIT, OMAPLIT, OSTRUCTLIT (all CompLitExpr), or OPTRLIT (AddrExpr). +func walkCompLit(n ir.Node, init *ir.Nodes) ir.Node { + if isStaticCompositeLiteral(n) && !ssagen.TypeOK(n.Type()) { + n := n.(*ir.CompLitExpr) // not OPTRLIT + // n can be directly represented in the read-only data section. + // Make direct reference to the static data. See issue 12841. + vstat := readonlystaticname(n.Type()) + fixedlit(inInitFunction, initKindStatic, n, vstat, init) + return typecheck.Expr(vstat) + } + var_ := typecheck.Temp(n.Type()) + anylit(n, var_, init) + return var_ +} + // initContext is the context in which static data is populated. // It is either in an init function or in any other function. // Static data populated in an init function will be written either @@ -245,7 +262,7 @@ func fixedlit(ctxt initContext, kind initKind, n *ir.CompLitExpr, var_ ir.Node, genAsStatic(as) case initKindDynamic, initKindLocalCode: a = orderStmtInPlace(as, map[string][]*ir.Name{}) - a = walkstmt(a) + a = walkStmt(a) init.Append(a) default: base.Fatalf("fixedlit: bad kind %d", kind) @@ -403,7 +420,7 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) ir.SetPos(value) as := typecheck.Stmt(ir.NewAssignStmt(base.Pos, a, value)) as = orderStmtInPlace(as, map[string][]*ir.Name{}) - as = walkstmt(as) + as = walkStmt(as) init.Append(as) } @@ -412,7 +429,7 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) a = typecheck.Stmt(a) a = orderStmtInPlace(a, map[string][]*ir.Name{}) - a = walkstmt(a) + a = walkStmt(a) init.Append(a) } diff --git a/src/cmd/compile/internal/walk/convert.go b/src/cmd/compile/internal/walk/convert.go new file mode 100644 index 0000000000000..21426c9817388 --- /dev/null +++ b/src/cmd/compile/internal/walk/convert.go @@ -0,0 +1,502 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package walk + +import ( + "encoding/binary" + "go/constant" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/reflectdata" + "cmd/compile/internal/ssagen" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/sys" +) + +// walkConv walks an OCONV or OCONVNOP (but not OCONVIFACE) node. +func walkConv(n *ir.ConvExpr, init *ir.Nodes) ir.Node { + n.X = walkExpr(n.X, init) + if n.Op() == ir.OCONVNOP && n.Type() == n.X.Type() { + return n.X + } + if n.Op() == ir.OCONVNOP && ir.ShouldCheckPtr(ir.CurFunc, 1) { + if n.Type().IsPtr() && n.X.Type().IsUnsafePtr() { // unsafe.Pointer to *T + return walkCheckPtrAlignment(n, init, nil) + } + if n.Type().IsUnsafePtr() && n.X.Type().IsUintptr() { // uintptr to unsafe.Pointer + return walkCheckPtrArithmetic(n, init) + } + } + param, result := rtconvfn(n.X.Type(), n.Type()) + if param == types.Txxx { + return n + } + fn := types.BasicTypeNames[param] + "to" + types.BasicTypeNames[result] + return typecheck.Conv(mkcall(fn, types.Types[result], init, typecheck.Conv(n.X, types.Types[param])), n.Type()) +} + +// walkConvInterface walks an OCONVIFACE node. +func walkConvInterface(n *ir.ConvExpr, init *ir.Nodes) ir.Node { + n.X = walkExpr(n.X, init) + + fromType := n.X.Type() + toType := n.Type() + + if !fromType.IsInterface() && !ir.IsBlank(ir.CurFunc.Nname) { // skip unnamed functions (func _()) + reflectdata.MarkTypeUsedInInterface(fromType, ir.CurFunc.LSym) + } + + // typeword generates the type word of the interface value. + typeword := func() ir.Node { + if toType.IsEmptyInterface() { + return reflectdata.TypePtr(fromType) + } + return reflectdata.ITabAddr(fromType, toType) + } + + // Optimize convT2E or convT2I as a two-word copy when T is pointer-shaped. + if types.IsDirectIface(fromType) { + l := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), n.X) + l.SetType(toType) + l.SetTypecheck(n.Typecheck()) + return l + } + + if ir.Names.Staticuint64s == nil { + ir.Names.Staticuint64s = typecheck.NewName(ir.Pkgs.Runtime.Lookup("staticuint64s")) + ir.Names.Staticuint64s.Class_ = ir.PEXTERN + // The actual type is [256]uint64, but we use [256*8]uint8 so we can address + // individual bytes. + ir.Names.Staticuint64s.SetType(types.NewArray(types.Types[types.TUINT8], 256*8)) + ir.Names.Zerobase = typecheck.NewName(ir.Pkgs.Runtime.Lookup("zerobase")) + ir.Names.Zerobase.Class_ = ir.PEXTERN + ir.Names.Zerobase.SetType(types.Types[types.TUINTPTR]) + } + + // Optimize convT2{E,I} for many cases in which T is not pointer-shaped, + // by using an existing addressable value identical to n.Left + // or creating one on the stack. + var value ir.Node + switch { + case fromType.Size() == 0: + // n.Left is zero-sized. Use zerobase. + cheapExpr(n.X, init) // Evaluate n.Left for side-effects. See issue 19246. + value = ir.Names.Zerobase + case fromType.IsBoolean() || (fromType.Size() == 1 && fromType.IsInteger()): + // n.Left is a bool/byte. Use staticuint64s[n.Left * 8] on little-endian + // and staticuint64s[n.Left * 8 + 7] on big-endian. + n.X = cheapExpr(n.X, init) + // byteindex widens n.Left so that the multiplication doesn't overflow. + index := ir.NewBinaryExpr(base.Pos, ir.OLSH, byteindex(n.X), ir.NewInt(3)) + if ssagen.Arch.LinkArch.ByteOrder == binary.BigEndian { + index = ir.NewBinaryExpr(base.Pos, ir.OADD, index, ir.NewInt(7)) + } + xe := ir.NewIndexExpr(base.Pos, ir.Names.Staticuint64s, index) + xe.SetBounded(true) + value = xe + case n.X.Op() == ir.ONAME && n.X.(*ir.Name).Class_ == ir.PEXTERN && n.X.(*ir.Name).Readonly(): + // n.Left is a readonly global; use it directly. + value = n.X + case !fromType.IsInterface() && n.Esc() == ir.EscNone && fromType.Width <= 1024: + // n.Left does not escape. Use a stack temporary initialized to n.Left. + value = typecheck.Temp(fromType) + init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, value, n.X))) + } + + if value != nil { + // Value is identical to n.Left. + // Construct the interface directly: {type/itab, &value}. + l := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), typecheck.Expr(typecheck.NodAddr(value))) + l.SetType(toType) + l.SetTypecheck(n.Typecheck()) + return l + } + + // Implement interface to empty interface conversion. + // tmp = i.itab + // if tmp != nil { + // tmp = tmp.type + // } + // e = iface{tmp, i.data} + if toType.IsEmptyInterface() && fromType.IsInterface() && !fromType.IsEmptyInterface() { + // Evaluate the input interface. + c := typecheck.Temp(fromType) + init.Append(ir.NewAssignStmt(base.Pos, c, n.X)) + + // Get the itab out of the interface. + tmp := typecheck.Temp(types.NewPtr(types.Types[types.TUINT8])) + init.Append(ir.NewAssignStmt(base.Pos, tmp, typecheck.Expr(ir.NewUnaryExpr(base.Pos, ir.OITAB, c)))) + + // Get the type out of the itab. + nif := ir.NewIfStmt(base.Pos, typecheck.Expr(ir.NewBinaryExpr(base.Pos, ir.ONE, tmp, typecheck.NodNil())), nil, nil) + nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, tmp, itabType(tmp))} + init.Append(nif) + + // Build the result. + e := ir.NewBinaryExpr(base.Pos, ir.OEFACE, tmp, ifaceData(n.Pos(), c, types.NewPtr(types.Types[types.TUINT8]))) + e.SetType(toType) // assign type manually, typecheck doesn't understand OEFACE. + e.SetTypecheck(1) + return e + } + + fnname, needsaddr := convFuncName(fromType, toType) + + if !needsaddr && !fromType.IsInterface() { + // Use a specialized conversion routine that only returns a data pointer. + // ptr = convT2X(val) + // e = iface{typ/tab, ptr} + fn := typecheck.LookupRuntime(fnname) + types.CalcSize(fromType) + fn = typecheck.SubstArgTypes(fn, fromType) + types.CalcSize(fn.Type()) + call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil) + call.Args = []ir.Node{n.X} + e := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), safeExpr(walkExpr(typecheck.Expr(call), init), init)) + e.SetType(toType) + e.SetTypecheck(1) + return e + } + + var tab ir.Node + if fromType.IsInterface() { + // convI2I + tab = reflectdata.TypePtr(toType) + } else { + // convT2x + tab = typeword() + } + + v := n.X + if needsaddr { + // Types of large or unknown size are passed by reference. + // Orderexpr arranged for n.Left to be a temporary for all + // the conversions it could see. Comparison of an interface + // with a non-interface, especially in a switch on interface value + // with non-interface cases, is not visible to order.stmt, so we + // have to fall back on allocating a temp here. + if !ir.IsAssignable(v) { + v = copyExpr(v, v.Type(), init) + } + v = typecheck.NodAddr(v) + } + + types.CalcSize(fromType) + fn := typecheck.LookupRuntime(fnname) + fn = typecheck.SubstArgTypes(fn, fromType, toType) + types.CalcSize(fn.Type()) + call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil) + call.Args = []ir.Node{tab, v} + return walkExpr(typecheck.Expr(call), init) +} + +// walkBytesRunesToString walks an OBYTES2STR or ORUNES2STR node. +func walkBytesRunesToString(n *ir.ConvExpr, init *ir.Nodes) ir.Node { + a := typecheck.NodNil() + if n.Esc() == ir.EscNone { + // Create temporary buffer for string on stack. + t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize) + a = typecheck.NodAddr(typecheck.Temp(t)) + } + if n.Op() == ir.ORUNES2STR { + // slicerunetostring(*[32]byte, []rune) string + return mkcall("slicerunetostring", n.Type(), init, a, n.X) + } + // slicebytetostring(*[32]byte, ptr *byte, n int) string + n.X = cheapExpr(n.X, init) + ptr, len := backingArrayPtrLen(n.X) + return mkcall("slicebytetostring", n.Type(), init, a, ptr, len) +} + +// walkBytesToStringTemp walks an OBYTES2STRTMP node. +func walkBytesToStringTemp(n *ir.ConvExpr, init *ir.Nodes) ir.Node { + n.X = walkExpr(n.X, init) + if !base.Flag.Cfg.Instrumenting { + // Let the backend handle OBYTES2STRTMP directly + // to avoid a function call to slicebytetostringtmp. + return n + } + // slicebytetostringtmp(ptr *byte, n int) string + n.X = cheapExpr(n.X, init) + ptr, len := backingArrayPtrLen(n.X) + return mkcall("slicebytetostringtmp", n.Type(), init, ptr, len) +} + +// walkRuneToString walks an ORUNESTR node. +func walkRuneToString(n *ir.ConvExpr, init *ir.Nodes) ir.Node { + a := typecheck.NodNil() + if n.Esc() == ir.EscNone { + t := types.NewArray(types.Types[types.TUINT8], 4) + a = typecheck.NodAddr(typecheck.Temp(t)) + } + // intstring(*[4]byte, rune) + return mkcall("intstring", n.Type(), init, a, typecheck.Conv(n.X, types.Types[types.TINT64])) +} + +// walkStringToBytes walks an OSTR2BYTES node. +func walkStringToBytes(n *ir.ConvExpr, init *ir.Nodes) ir.Node { + s := n.X + if ir.IsConst(s, constant.String) { + sc := ir.StringVal(s) + + // Allocate a [n]byte of the right size. + t := types.NewArray(types.Types[types.TUINT8], int64(len(sc))) + var a ir.Node + if n.Esc() == ir.EscNone && len(sc) <= int(ir.MaxImplicitStackVarSize) { + a = typecheck.NodAddr(typecheck.Temp(t)) + } else { + a = callnew(t) + } + p := typecheck.Temp(t.PtrTo()) // *[n]byte + init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, p, a))) + + // Copy from the static string data to the [n]byte. + if len(sc) > 0 { + as := ir.NewAssignStmt(base.Pos, ir.NewStarExpr(base.Pos, p), ir.NewStarExpr(base.Pos, typecheck.ConvNop(ir.NewUnaryExpr(base.Pos, ir.OSPTR, s), t.PtrTo()))) + appendWalkStmt(init, as) + } + + // Slice the [n]byte to a []byte. + slice := ir.NewSliceExpr(n.Pos(), ir.OSLICEARR, p) + slice.SetType(n.Type()) + slice.SetTypecheck(1) + return walkExpr(slice, init) + } + + a := typecheck.NodNil() + if n.Esc() == ir.EscNone { + // Create temporary buffer for slice on stack. + t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize) + a = typecheck.NodAddr(typecheck.Temp(t)) + } + // stringtoslicebyte(*32[byte], string) []byte + return mkcall("stringtoslicebyte", n.Type(), init, a, typecheck.Conv(s, types.Types[types.TSTRING])) +} + +// walkStringToBytesTemp walks an OSTR2BYTESTMP node. +func walkStringToBytesTemp(n *ir.ConvExpr, init *ir.Nodes) ir.Node { + // []byte(string) conversion that creates a slice + // referring to the actual string bytes. + // This conversion is handled later by the backend and + // is only for use by internal compiler optimizations + // that know that the slice won't be mutated. + // The only such case today is: + // for i, c := range []byte(string) + n.X = walkExpr(n.X, init) + return n +} + +// walkStringToRunes walks an OSTR2RUNES node. +func walkStringToRunes(n *ir.ConvExpr, init *ir.Nodes) ir.Node { + a := typecheck.NodNil() + if n.Esc() == ir.EscNone { + // Create temporary buffer for slice on stack. + t := types.NewArray(types.Types[types.TINT32], tmpstringbufsize) + a = typecheck.NodAddr(typecheck.Temp(t)) + } + // stringtoslicerune(*[32]rune, string) []rune + return mkcall("stringtoslicerune", n.Type(), init, a, typecheck.Conv(n.X, types.Types[types.TSTRING])) +} + +// convFuncName builds the runtime function name for interface conversion. +// It also reports whether the function expects the data by address. +// Not all names are possible. For example, we never generate convE2E or convE2I. +func convFuncName(from, to *types.Type) (fnname string, needsaddr bool) { + tkind := to.Tie() + switch from.Tie() { + case 'I': + if tkind == 'I' { + return "convI2I", false + } + case 'T': + switch { + case from.Size() == 2 && from.Align == 2: + return "convT16", false + case from.Size() == 4 && from.Align == 4 && !from.HasPointers(): + return "convT32", false + case from.Size() == 8 && from.Align == types.Types[types.TUINT64].Align && !from.HasPointers(): + return "convT64", false + } + if sc := from.SoleComponent(); sc != nil { + switch { + case sc.IsString(): + return "convTstring", false + case sc.IsSlice(): + return "convTslice", false + } + } + + switch tkind { + case 'E': + if !from.HasPointers() { + return "convT2Enoptr", true + } + return "convT2E", true + case 'I': + if !from.HasPointers() { + return "convT2Inoptr", true + } + return "convT2I", true + } + } + base.Fatalf("unknown conv func %c2%c", from.Tie(), to.Tie()) + panic("unreachable") +} + +// rtconvfn returns the parameter and result types that will be used by a +// runtime function to convert from type src to type dst. The runtime function +// name can be derived from the names of the returned types. +// +// If no such function is necessary, it returns (Txxx, Txxx). +func rtconvfn(src, dst *types.Type) (param, result types.Kind) { + if ssagen.Arch.SoftFloat { + return types.Txxx, types.Txxx + } + + switch ssagen.Arch.LinkArch.Family { + case sys.ARM, sys.MIPS: + if src.IsFloat() { + switch dst.Kind() { + case types.TINT64, types.TUINT64: + return types.TFLOAT64, dst.Kind() + } + } + if dst.IsFloat() { + switch src.Kind() { + case types.TINT64, types.TUINT64: + return src.Kind(), types.TFLOAT64 + } + } + + case sys.I386: + if src.IsFloat() { + switch dst.Kind() { + case types.TINT64, types.TUINT64: + return types.TFLOAT64, dst.Kind() + case types.TUINT32, types.TUINT, types.TUINTPTR: + return types.TFLOAT64, types.TUINT32 + } + } + if dst.IsFloat() { + switch src.Kind() { + case types.TINT64, types.TUINT64: + return src.Kind(), types.TFLOAT64 + case types.TUINT32, types.TUINT, types.TUINTPTR: + return types.TUINT32, types.TFLOAT64 + } + } + } + return types.Txxx, types.Txxx +} + +// byteindex converts n, which is byte-sized, to an int used to index into an array. +// We cannot use conv, because we allow converting bool to int here, +// which is forbidden in user code. +func byteindex(n ir.Node) ir.Node { + // We cannot convert from bool to int directly. + // While converting from int8 to int is possible, it would yield + // the wrong result for negative values. + // Reinterpreting the value as an unsigned byte solves both cases. + if !types.Identical(n.Type(), types.Types[types.TUINT8]) { + n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n) + n.SetType(types.Types[types.TUINT8]) + n.SetTypecheck(1) + } + n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n) + n.SetType(types.Types[types.TINT]) + n.SetTypecheck(1) + return n +} + +func walkCheckPtrAlignment(n *ir.ConvExpr, init *ir.Nodes, count ir.Node) ir.Node { + if !n.Type().IsPtr() { + base.Fatalf("expected pointer type: %v", n.Type()) + } + elem := n.Type().Elem() + if count != nil { + if !elem.IsArray() { + base.Fatalf("expected array type: %v", elem) + } + elem = elem.Elem() + } + + size := elem.Size() + if elem.Alignment() == 1 && (size == 0 || size == 1 && count == nil) { + return n + } + + if count == nil { + count = ir.NewInt(1) + } + + n.X = cheapExpr(n.X, init) + init.Append(mkcall("checkptrAlignment", nil, init, typecheck.ConvNop(n.X, types.Types[types.TUNSAFEPTR]), reflectdata.TypePtr(elem), typecheck.Conv(count, types.Types[types.TUINTPTR]))) + return n +} + +func walkCheckPtrArithmetic(n *ir.ConvExpr, init *ir.Nodes) ir.Node { + // Calling cheapexpr(n, init) below leads to a recursive call + // to walkexpr, which leads us back here again. Use n.Opt to + // prevent infinite loops. + if opt := n.Opt(); opt == &walkCheckPtrArithmeticMarker { + return n + } else if opt != nil { + // We use n.Opt() here because today it's not used for OCONVNOP. If that changes, + // there's no guarantee that temporarily replacing it is safe, so just hard fail here. + base.Fatalf("unexpected Opt: %v", opt) + } + n.SetOpt(&walkCheckPtrArithmeticMarker) + defer n.SetOpt(nil) + + // TODO(mdempsky): Make stricter. We only need to exempt + // reflect.Value.Pointer and reflect.Value.UnsafeAddr. + switch n.X.Op() { + case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER: + return n + } + + if n.X.Op() == ir.ODOTPTR && ir.IsReflectHeaderDataField(n.X) { + return n + } + + // Find original unsafe.Pointer operands involved in this + // arithmetic expression. + // + // "It is valid both to add and to subtract offsets from a + // pointer in this way. It is also valid to use &^ to round + // pointers, usually for alignment." + var originals []ir.Node + var walk func(n ir.Node) + walk = func(n ir.Node) { + switch n.Op() { + case ir.OADD: + n := n.(*ir.BinaryExpr) + walk(n.X) + walk(n.Y) + case ir.OSUB, ir.OANDNOT: + n := n.(*ir.BinaryExpr) + walk(n.X) + case ir.OCONVNOP: + n := n.(*ir.ConvExpr) + if n.X.Type().IsUnsafePtr() { + n.X = cheapExpr(n.X, init) + originals = append(originals, typecheck.ConvNop(n.X, types.Types[types.TUNSAFEPTR])) + } + } + } + walk(n.X) + + cheap := cheapExpr(n, init) + + slice := typecheck.MakeDotArgs(types.NewSlice(types.Types[types.TUNSAFEPTR]), originals) + slice.SetEsc(ir.EscNone) + + init.Append(mkcall("checkptrArithmetic", nil, init, typecheck.ConvNop(cheap, types.Types[types.TUNSAFEPTR]), slice)) + // TODO(khr): Mark backing store of slice as dead. This will allow us to reuse + // the backing store for multiple calls to checkptrArithmetic. + + return cheap +} diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go new file mode 100644 index 0000000000000..2029a6aef68d8 --- /dev/null +++ b/src/cmd/compile/internal/walk/expr.go @@ -0,0 +1,1009 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package walk + +import ( + "fmt" + "go/constant" + "strings" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/reflectdata" + "cmd/compile/internal/staticdata" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/objabi" +) + +// The result of walkExpr MUST be assigned back to n, e.g. +// n.Left = walkExpr(n.Left, init) +func walkExpr(n ir.Node, init *ir.Nodes) ir.Node { + if n == nil { + return n + } + + // Eagerly checkwidth all expressions for the back end. + if n.Type() != nil && !n.Type().WidthCalculated() { + switch n.Type().Kind() { + case types.TBLANK, types.TNIL, types.TIDEAL: + default: + types.CheckSize(n.Type()) + } + } + + if init == n.PtrInit() { + // not okay to use n->ninit when walking n, + // because we might replace n with some other node + // and would lose the init list. + base.Fatalf("walkexpr init == &n->ninit") + } + + if len(n.Init()) != 0 { + walkStmtList(n.Init()) + init.Append(n.PtrInit().Take()...) + } + + lno := ir.SetPos(n) + + if base.Flag.LowerW > 1 { + ir.Dump("before walk expr", n) + } + + if n.Typecheck() != 1 { + base.Fatalf("missed typecheck: %+v", n) + } + + if n.Type().IsUntyped() { + base.Fatalf("expression has untyped type: %+v", n) + } + + if n.Op() == ir.ONAME && n.(*ir.Name).Class_ == ir.PAUTOHEAP { + n := n.(*ir.Name) + nn := ir.NewStarExpr(base.Pos, n.Name().Heapaddr) + nn.X.MarkNonNil() + return walkExpr(typecheck.Expr(nn), init) + } + + n = walkExpr1(n, init) + + // Expressions that are constant at run time but not + // considered const by the language spec are not turned into + // constants until walk. For example, if n is y%1 == 0, the + // walk of y%1 may have replaced it by 0. + // Check whether n with its updated args is itself now a constant. + t := n.Type() + n = typecheck.EvalConst(n) + if n.Type() != t { + base.Fatalf("evconst changed Type: %v had type %v, now %v", n, t, n.Type()) + } + if n.Op() == ir.OLITERAL { + n = typecheck.Expr(n) + // Emit string symbol now to avoid emitting + // any concurrently during the backend. + if v := n.Val(); v.Kind() == constant.String { + _ = staticdata.StringSym(n.Pos(), constant.StringVal(v)) + } + } + + updateHasCall(n) + + if base.Flag.LowerW != 0 && n != nil { + ir.Dump("after walk expr", n) + } + + base.Pos = lno + return n +} + +func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node { + switch n.Op() { + default: + ir.Dump("walk", n) + base.Fatalf("walkexpr: switch 1 unknown op %+v", n.Op()) + panic("unreachable") + + case ir.ONONAME, ir.OGETG, ir.ONEWOBJ, ir.OMETHEXPR: + return n + + case ir.OTYPE, ir.ONAME, ir.OLITERAL, ir.ONIL, ir.ONAMEOFFSET: + // TODO(mdempsky): Just return n; see discussion on CL 38655. + // Perhaps refactor to use Node.mayBeShared for these instead. + // If these return early, make sure to still call + // stringsym for constant strings. + return n + + case ir.ONOT, ir.ONEG, ir.OPLUS, ir.OBITNOT, ir.OREAL, ir.OIMAG, ir.OSPTR, ir.OITAB, ir.OIDATA: + n := n.(*ir.UnaryExpr) + n.X = walkExpr(n.X, init) + return n + + case ir.ODOTMETH, ir.ODOTINTER: + n := n.(*ir.SelectorExpr) + n.X = walkExpr(n.X, init) + return n + + case ir.OADDR: + n := n.(*ir.AddrExpr) + n.X = walkExpr(n.X, init) + return n + + case ir.ODEREF: + n := n.(*ir.StarExpr) + n.X = walkExpr(n.X, init) + return n + + case ir.OEFACE, ir.OAND, ir.OANDNOT, ir.OSUB, ir.OMUL, ir.OADD, ir.OOR, ir.OXOR, ir.OLSH, ir.ORSH: + n := n.(*ir.BinaryExpr) + n.X = walkExpr(n.X, init) + n.Y = walkExpr(n.Y, init) + return n + + case ir.ODOT, ir.ODOTPTR: + n := n.(*ir.SelectorExpr) + return walkDot(n, init) + + case ir.ODOTTYPE, ir.ODOTTYPE2: + n := n.(*ir.TypeAssertExpr) + return walkDotType(n, init) + + case ir.OLEN, ir.OCAP: + n := n.(*ir.UnaryExpr) + return walkLenCap(n, init) + + case ir.OCOMPLEX: + n := n.(*ir.BinaryExpr) + n.X = walkExpr(n.X, init) + n.Y = walkExpr(n.Y, init) + return n + + case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE: + n := n.(*ir.BinaryExpr) + return walkCompare(n, init) + + case ir.OANDAND, ir.OOROR: + n := n.(*ir.LogicalExpr) + return walkLogical(n, init) + + case ir.OPRINT, ir.OPRINTN: + return walkPrint(n.(*ir.CallExpr), init) + + case ir.OPANIC: + n := n.(*ir.UnaryExpr) + return mkcall("gopanic", nil, init, n.X) + + case ir.ORECOVER: + n := n.(*ir.CallExpr) + return mkcall("gorecover", n.Type(), init, typecheck.NodAddr(ir.RegFP)) + + case ir.OCLOSUREREAD, ir.OCFUNC: + return n + + case ir.OCALLINTER, ir.OCALLFUNC, ir.OCALLMETH: + n := n.(*ir.CallExpr) + return walkCall(n, init) + + case ir.OAS, ir.OASOP: + return walkAssign(init, n) + + case ir.OAS2: + n := n.(*ir.AssignListStmt) + return walkAssignList(init, n) + + // a,b,... = fn() + case ir.OAS2FUNC: + n := n.(*ir.AssignListStmt) + return walkAssignFunc(init, n) + + // x, y = <-c + // order.stmt made sure x is addressable or blank. + case ir.OAS2RECV: + n := n.(*ir.AssignListStmt) + return walkAssignRecv(init, n) + + // a,b = m[i] + case ir.OAS2MAPR: + n := n.(*ir.AssignListStmt) + return walkAssignMapRead(init, n) + + case ir.ODELETE: + n := n.(*ir.CallExpr) + return walkDelete(init, n) + + case ir.OAS2DOTTYPE: + n := n.(*ir.AssignListStmt) + return walkAssignDotType(n, init) + + case ir.OCONVIFACE: + n := n.(*ir.ConvExpr) + return walkConvInterface(n, init) + + case ir.OCONV, ir.OCONVNOP: + n := n.(*ir.ConvExpr) + return walkConv(n, init) + + case ir.ODIV, ir.OMOD: + n := n.(*ir.BinaryExpr) + return walkDivMod(n, init) + + case ir.OINDEX: + n := n.(*ir.IndexExpr) + return walkIndex(n, init) + + case ir.OINDEXMAP: + n := n.(*ir.IndexExpr) + return walkIndexMap(n, init) + + case ir.ORECV: + base.Fatalf("walkexpr ORECV") // should see inside OAS only + panic("unreachable") + + case ir.OSLICEHEADER: + n := n.(*ir.SliceHeaderExpr) + return walkSliceHeader(n, init) + + case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR: + n := n.(*ir.SliceExpr) + return walkSlice(n, init) + + case ir.ONEW: + n := n.(*ir.UnaryExpr) + return walkNew(n, init) + + case ir.OADDSTR: + return walkAddString(n.(*ir.AddStringExpr), init) + + case ir.OAPPEND: + // order should make sure we only see OAS(node, OAPPEND), which we handle above. + base.Fatalf("append outside assignment") + panic("unreachable") + + case ir.OCOPY: + return walkCopy(n.(*ir.BinaryExpr), init, base.Flag.Cfg.Instrumenting && !base.Flag.CompilingRuntime) + + case ir.OCLOSE: + n := n.(*ir.UnaryExpr) + return walkClose(n, init) + + case ir.OMAKECHAN: + n := n.(*ir.MakeExpr) + return walkMakeChan(n, init) + + case ir.OMAKEMAP: + n := n.(*ir.MakeExpr) + return walkMakeMap(n, init) + + case ir.OMAKESLICE: + n := n.(*ir.MakeExpr) + return walkMakeSlice(n, init) + + case ir.OMAKESLICECOPY: + n := n.(*ir.MakeExpr) + return walkMakeSliceCopy(n, init) + + case ir.ORUNESTR: + n := n.(*ir.ConvExpr) + return walkRuneToString(n, init) + + case ir.OBYTES2STR, ir.ORUNES2STR: + n := n.(*ir.ConvExpr) + return walkBytesRunesToString(n, init) + + case ir.OBYTES2STRTMP: + n := n.(*ir.ConvExpr) + return walkBytesToStringTemp(n, init) + + case ir.OSTR2BYTES: + n := n.(*ir.ConvExpr) + return walkStringToBytes(n, init) + + case ir.OSTR2BYTESTMP: + n := n.(*ir.ConvExpr) + return walkStringToBytesTemp(n, init) + + case ir.OSTR2RUNES: + n := n.(*ir.ConvExpr) + return walkStringToRunes(n, init) + + case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT, ir.OPTRLIT: + return walkCompLit(n, init) + + case ir.OSEND: + n := n.(*ir.SendStmt) + return walkSend(n, init) + + case ir.OCLOSURE: + return walkClosure(n.(*ir.ClosureExpr), init) + + case ir.OCALLPART: + return walkCallPart(n.(*ir.CallPartExpr), init) + } + + // No return! Each case must return (or panic), + // to avoid confusion about what gets returned + // in the presence of type assertions. +} + +// walk the whole tree of the body of an +// expression or simple statement. +// the types expressions are calculated. +// compile-time constants are evaluated. +// complex side effects like statements are appended to init +func walkExprList(s []ir.Node, init *ir.Nodes) { + for i := range s { + s[i] = walkExpr(s[i], init) + } +} + +func walkExprListCheap(s []ir.Node, init *ir.Nodes) { + for i, n := range s { + s[i] = cheapExpr(n, init) + s[i] = walkExpr(s[i], init) + } +} + +func walkExprListSafe(s []ir.Node, init *ir.Nodes) { + for i, n := range s { + s[i] = safeExpr(n, init) + s[i] = walkExpr(s[i], init) + } +} + +// return side-effect free and cheap n, appending side effects to init. +// result may not be assignable. +func cheapExpr(n ir.Node, init *ir.Nodes) ir.Node { + switch n.Op() { + case ir.ONAME, ir.OLITERAL, ir.ONIL: + return n + } + + return copyExpr(n, n.Type(), init) +} + +// return side effect-free n, appending side effects to init. +// result is assignable if n is. +func safeExpr(n ir.Node, init *ir.Nodes) ir.Node { + if n == nil { + return nil + } + + if len(n.Init()) != 0 { + walkStmtList(n.Init()) + init.Append(n.PtrInit().Take()...) + } + + switch n.Op() { + case ir.ONAME, ir.OLITERAL, ir.ONIL, ir.ONAMEOFFSET: + return n + + case ir.OLEN, ir.OCAP: + n := n.(*ir.UnaryExpr) + l := safeExpr(n.X, init) + if l == n.X { + return n + } + a := ir.Copy(n).(*ir.UnaryExpr) + a.X = l + return walkExpr(typecheck.Expr(a), init) + + case ir.ODOT, ir.ODOTPTR: + n := n.(*ir.SelectorExpr) + l := safeExpr(n.X, init) + if l == n.X { + return n + } + a := ir.Copy(n).(*ir.SelectorExpr) + a.X = l + return walkExpr(typecheck.Expr(a), init) + + case ir.ODEREF: + n := n.(*ir.StarExpr) + l := safeExpr(n.X, init) + if l == n.X { + return n + } + a := ir.Copy(n).(*ir.StarExpr) + a.X = l + return walkExpr(typecheck.Expr(a), init) + + case ir.OINDEX, ir.OINDEXMAP: + n := n.(*ir.IndexExpr) + l := safeExpr(n.X, init) + r := safeExpr(n.Index, init) + if l == n.X && r == n.Index { + return n + } + a := ir.Copy(n).(*ir.IndexExpr) + a.X = l + a.Index = r + return walkExpr(typecheck.Expr(a), init) + + case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT: + n := n.(*ir.CompLitExpr) + if isStaticCompositeLiteral(n) { + return n + } + } + + // make a copy; must not be used as an lvalue + if ir.IsAssignable(n) { + base.Fatalf("missing lvalue case in safeexpr: %v", n) + } + return cheapExpr(n, init) +} + +func copyExpr(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node { + l := typecheck.Temp(t) + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, l, n)) + return l +} + +func walkAddString(n *ir.AddStringExpr, init *ir.Nodes) ir.Node { + c := len(n.List) + + if c < 2 { + base.Fatalf("addstr count %d too small", c) + } + + buf := typecheck.NodNil() + if n.Esc() == ir.EscNone { + sz := int64(0) + for _, n1 := range n.List { + if n1.Op() == ir.OLITERAL { + sz += int64(len(ir.StringVal(n1))) + } + } + + // Don't allocate the buffer if the result won't fit. + if sz < tmpstringbufsize { + // Create temporary buffer for result string on stack. + t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize) + buf = typecheck.NodAddr(typecheck.Temp(t)) + } + } + + // build list of string arguments + args := []ir.Node{buf} + for _, n2 := range n.List { + args = append(args, typecheck.Conv(n2, types.Types[types.TSTRING])) + } + + var fn string + if c <= 5 { + // small numbers of strings use direct runtime helpers. + // note: order.expr knows this cutoff too. + fn = fmt.Sprintf("concatstring%d", c) + } else { + // large numbers of strings are passed to the runtime as a slice. + fn = "concatstrings" + + t := types.NewSlice(types.Types[types.TSTRING]) + // args[1:] to skip buf arg + slice := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(t), args[1:]) + slice.Prealloc = n.Prealloc + args = []ir.Node{buf, slice} + slice.SetEsc(ir.EscNone) + } + + cat := typecheck.LookupRuntime(fn) + r := ir.NewCallExpr(base.Pos, ir.OCALL, cat, nil) + r.Args.Set(args) + r1 := typecheck.Expr(r) + r1 = walkExpr(r1, init) + r1.SetType(n.Type()) + + return r1 +} + +// walkCall walks an OCALLFUNC, OCALLINTER, or OCALLMETH node. +func walkCall(n *ir.CallExpr, init *ir.Nodes) ir.Node { + if n.Op() == ir.OCALLINTER { + usemethod(n) + reflectdata.MarkUsedIfaceMethod(n) + } + + if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.OCLOSURE { + // Transform direct call of a closure to call of a normal function. + // transformclosure already did all preparation work. + + // Prepend captured variables to argument list. + clo := n.X.(*ir.ClosureExpr) + n.Args.Prepend(clo.Func.ClosureEnter...) + clo.Func.ClosureEnter.Set(nil) + + // Replace OCLOSURE with ONAME/PFUNC. + n.X = clo.Func.Nname + + // Update type of OCALLFUNC node. + // Output arguments had not changed, but their offsets could. + if n.X.Type().NumResults() == 1 { + n.SetType(n.X.Type().Results().Field(0).Type) + } else { + n.SetType(n.X.Type().Results()) + } + } + + walkCall1(n, init) + return n +} + +func walkCall1(n *ir.CallExpr, init *ir.Nodes) { + if len(n.Rargs) != 0 { + return // already walked + } + + params := n.X.Type().Params() + args := n.Args + + n.X = walkExpr(n.X, init) + walkExprList(args, init) + + // If this is a method call, add the receiver at the beginning of the args. + if n.Op() == ir.OCALLMETH { + withRecv := make([]ir.Node, len(args)+1) + dot := n.X.(*ir.SelectorExpr) + withRecv[0] = dot.X + dot.X = nil + copy(withRecv[1:], args) + args = withRecv + } + + // For any argument whose evaluation might require a function call, + // store that argument into a temporary variable, + // to prevent that calls from clobbering arguments already on the stack. + // When instrumenting, all arguments might require function calls. + var tempAssigns []ir.Node + for i, arg := range args { + updateHasCall(arg) + // Determine param type. + var t *types.Type + if n.Op() == ir.OCALLMETH { + if i == 0 { + t = n.X.Type().Recv().Type + } else { + t = params.Field(i - 1).Type + } + } else { + t = params.Field(i).Type + } + if base.Flag.Cfg.Instrumenting || fncall(arg, t) { + // make assignment of fncall to tempAt + tmp := typecheck.Temp(t) + a := convas(ir.NewAssignStmt(base.Pos, tmp, arg), init) + tempAssigns = append(tempAssigns, a) + // replace arg with temp + args[i] = tmp + } + } + + n.Args.Set(tempAssigns) + n.Rargs.Set(args) +} + +// walkDivMod walks an ODIV or OMOD node. +func walkDivMod(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { + n.X = walkExpr(n.X, init) + n.Y = walkExpr(n.Y, init) + + // rewrite complex div into function call. + et := n.X.Type().Kind() + + if types.IsComplex[et] && n.Op() == ir.ODIV { + t := n.Type() + call := mkcall("complex128div", types.Types[types.TCOMPLEX128], init, typecheck.Conv(n.X, types.Types[types.TCOMPLEX128]), typecheck.Conv(n.Y, types.Types[types.TCOMPLEX128])) + return typecheck.Conv(call, t) + } + + // Nothing to do for float divisions. + if types.IsFloat[et] { + return n + } + + // rewrite 64-bit div and mod on 32-bit architectures. + // TODO: Remove this code once we can introduce + // runtime calls late in SSA processing. + if types.RegSize < 8 && (et == types.TINT64 || et == types.TUINT64) { + if n.Y.Op() == ir.OLITERAL { + // Leave div/mod by constant powers of 2 or small 16-bit constants. + // The SSA backend will handle those. + switch et { + case types.TINT64: + c := ir.Int64Val(n.Y) + if c < 0 { + c = -c + } + if c != 0 && c&(c-1) == 0 { + return n + } + case types.TUINT64: + c := ir.Uint64Val(n.Y) + if c < 1<<16 { + return n + } + if c != 0 && c&(c-1) == 0 { + return n + } + } + } + var fn string + if et == types.TINT64 { + fn = "int64" + } else { + fn = "uint64" + } + if n.Op() == ir.ODIV { + fn += "div" + } else { + fn += "mod" + } + return mkcall(fn, n.Type(), init, typecheck.Conv(n.X, types.Types[et]), typecheck.Conv(n.Y, types.Types[et])) + } + return n +} + +// walkDot walks an ODOT or ODOTPTR node. +func walkDot(n *ir.SelectorExpr, init *ir.Nodes) ir.Node { + usefield(n) + n.X = walkExpr(n.X, init) + return n +} + +// walkDotType walks an ODOTTYPE or ODOTTYPE2 node. +func walkDotType(n *ir.TypeAssertExpr, init *ir.Nodes) ir.Node { + n.X = walkExpr(n.X, init) + // Set up interface type addresses for back end. + n.Ntype = reflectdata.TypePtr(n.Type()) + if n.Op() == ir.ODOTTYPE { + n.Ntype.(*ir.AddrExpr).Alloc = reflectdata.TypePtr(n.X.Type()) + } + if !n.Type().IsInterface() && !n.X.Type().IsEmptyInterface() { + n.Itab = []ir.Node{reflectdata.ITabAddr(n.Type(), n.X.Type())} + } + return n +} + +// walkIndex walks an OINDEX node. +func walkIndex(n *ir.IndexExpr, init *ir.Nodes) ir.Node { + n.X = walkExpr(n.X, init) + + // save the original node for bounds checking elision. + // If it was a ODIV/OMOD walk might rewrite it. + r := n.Index + + n.Index = walkExpr(n.Index, init) + + // if range of type cannot exceed static array bound, + // disable bounds check. + if n.Bounded() { + return n + } + t := n.X.Type() + if t != nil && t.IsPtr() { + t = t.Elem() + } + if t.IsArray() { + n.SetBounded(bounded(r, t.NumElem())) + if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Index, constant.Int) { + base.Warn("index bounds check elided") + } + if ir.IsSmallIntConst(n.Index) && !n.Bounded() { + base.Errorf("index out of bounds") + } + } else if ir.IsConst(n.X, constant.String) { + n.SetBounded(bounded(r, int64(len(ir.StringVal(n.X))))) + if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Index, constant.Int) { + base.Warn("index bounds check elided") + } + if ir.IsSmallIntConst(n.Index) && !n.Bounded() { + base.Errorf("index out of bounds") + } + } + + if ir.IsConst(n.Index, constant.Int) { + if v := n.Index.Val(); constant.Sign(v) < 0 || ir.ConstOverflow(v, types.Types[types.TINT]) { + base.Errorf("index out of bounds") + } + } + return n +} + +// walkIndexMap walks an OINDEXMAP node. +func walkIndexMap(n *ir.IndexExpr, init *ir.Nodes) ir.Node { + // Replace m[k] with *map{access1,assign}(maptype, m, &k) + n.X = walkExpr(n.X, init) + n.Index = walkExpr(n.Index, init) + map_ := n.X + key := n.Index + t := map_.Type() + var call *ir.CallExpr + if n.Assigned { + // This m[k] expression is on the left-hand side of an assignment. + fast := mapfast(t) + if fast == mapslow { + // standard version takes key by reference. + // order.expr made sure key is addressable. + key = typecheck.NodAddr(key) + } + call = mkcall1(mapfn(mapassign[fast], t), nil, init, reflectdata.TypePtr(t), map_, key) + } else { + // m[k] is not the target of an assignment. + fast := mapfast(t) + if fast == mapslow { + // standard version takes key by reference. + // order.expr made sure key is addressable. + key = typecheck.NodAddr(key) + } + + if w := t.Elem().Width; w <= zeroValSize { + call = mkcall1(mapfn(mapaccess1[fast], t), types.NewPtr(t.Elem()), init, reflectdata.TypePtr(t), map_, key) + } else { + z := reflectdata.ZeroAddr(w) + call = mkcall1(mapfn("mapaccess1_fat", t), types.NewPtr(t.Elem()), init, reflectdata.TypePtr(t), map_, key, z) + } + } + call.SetType(types.NewPtr(t.Elem())) + call.MarkNonNil() // mapaccess1* and mapassign always return non-nil pointers. + star := ir.NewStarExpr(base.Pos, call) + star.SetType(t.Elem()) + star.SetTypecheck(1) + return star +} + +// walkLogical walks an OANDAND or OOROR node. +func walkLogical(n *ir.LogicalExpr, init *ir.Nodes) ir.Node { + n.X = walkExpr(n.X, init) + + // cannot put side effects from n.Right on init, + // because they cannot run before n.Left is checked. + // save elsewhere and store on the eventual n.Right. + var ll ir.Nodes + + n.Y = walkExpr(n.Y, &ll) + n.Y = ir.InitExpr(ll, n.Y) + return n +} + +// walkSend walks an OSEND node. +func walkSend(n *ir.SendStmt, init *ir.Nodes) ir.Node { + n1 := n.Value + n1 = typecheck.AssignConv(n1, n.Chan.Type().Elem(), "chan send") + n1 = walkExpr(n1, init) + n1 = typecheck.NodAddr(n1) + return mkcall1(chanfn("chansend1", 2, n.Chan.Type()), nil, init, n.Chan, n1) +} + +// walkSlice walks an OSLICE, OSLICEARR, OSLICESTR, OSLICE3, or OSLICE3ARR node. +func walkSlice(n *ir.SliceExpr, init *ir.Nodes) ir.Node { + + checkSlice := ir.ShouldCheckPtr(ir.CurFunc, 1) && n.Op() == ir.OSLICE3ARR && n.X.Op() == ir.OCONVNOP && n.X.(*ir.ConvExpr).X.Type().IsUnsafePtr() + if checkSlice { + conv := n.X.(*ir.ConvExpr) + conv.X = walkExpr(conv.X, init) + } else { + n.X = walkExpr(n.X, init) + } + + low, high, max := n.SliceBounds() + low = walkExpr(low, init) + if low != nil && ir.IsZero(low) { + // Reduce x[0:j] to x[:j] and x[0:j:k] to x[:j:k]. + low = nil + } + high = walkExpr(high, init) + max = walkExpr(max, init) + n.SetSliceBounds(low, high, max) + if checkSlice { + n.X = walkCheckPtrAlignment(n.X.(*ir.ConvExpr), init, max) + } + + if n.Op().IsSlice3() { + if max != nil && max.Op() == ir.OCAP && ir.SameSafeExpr(n.X, max.(*ir.UnaryExpr).X) { + // Reduce x[i:j:cap(x)] to x[i:j]. + if n.Op() == ir.OSLICE3 { + n.SetOp(ir.OSLICE) + } else { + n.SetOp(ir.OSLICEARR) + } + return reduceSlice(n) + } + return n + } + return reduceSlice(n) +} + +// walkSliceHeader walks an OSLICEHEADER node. +func walkSliceHeader(n *ir.SliceHeaderExpr, init *ir.Nodes) ir.Node { + n.Ptr = walkExpr(n.Ptr, init) + n.LenCap[0] = walkExpr(n.LenCap[0], init) + n.LenCap[1] = walkExpr(n.LenCap[1], init) + return n +} + +// TODO(josharian): combine this with its caller and simplify +func reduceSlice(n *ir.SliceExpr) ir.Node { + low, high, max := n.SliceBounds() + if high != nil && high.Op() == ir.OLEN && ir.SameSafeExpr(n.X, high.(*ir.UnaryExpr).X) { + // Reduce x[i:len(x)] to x[i:]. + high = nil + } + n.SetSliceBounds(low, high, max) + if (n.Op() == ir.OSLICE || n.Op() == ir.OSLICESTR) && low == nil && high == nil { + // Reduce x[:] to x. + if base.Debug.Slice > 0 { + base.Warn("slice: omit slice operation") + } + return n.X + } + return n +} + +// return 1 if integer n must be in range [0, max), 0 otherwise +func bounded(n ir.Node, max int64) bool { + if n.Type() == nil || !n.Type().IsInteger() { + return false + } + + sign := n.Type().IsSigned() + bits := int32(8 * n.Type().Width) + + if ir.IsSmallIntConst(n) { + v := ir.Int64Val(n) + return 0 <= v && v < max + } + + switch n.Op() { + case ir.OAND, ir.OANDNOT: + n := n.(*ir.BinaryExpr) + v := int64(-1) + switch { + case ir.IsSmallIntConst(n.X): + v = ir.Int64Val(n.X) + case ir.IsSmallIntConst(n.Y): + v = ir.Int64Val(n.Y) + if n.Op() == ir.OANDNOT { + v = ^v + if !sign { + v &= 1< 0 && v >= 2 { + bits-- + v >>= 1 + } + } + + case ir.ORSH: + n := n.(*ir.BinaryExpr) + if !sign && ir.IsSmallIntConst(n.Y) { + v := ir.Int64Val(n.Y) + if v > int64(bits) { + return true + } + bits -= int32(v) + } + } + + if !sign && bits <= 62 && 1< 0 { + switch n.Op() { + case ir.OAS, ir.OAS2, ir.OBLOCK: + n.PtrInit().Prepend(init...) + + default: + init.Append(n) + n = ir.NewBlockStmt(n.Pos(), init) + } + } + return n + + // special case for a receive where we throw away + // the value received. + case ir.ORECV: + n := n.(*ir.UnaryExpr) + return walkRecv(n) + + case ir.OBREAK, + ir.OCONTINUE, + ir.OFALL, + ir.OGOTO, + ir.OLABEL, + ir.ODCLCONST, + ir.ODCLTYPE, + ir.OCHECKNIL, + ir.OVARDEF, + ir.OVARKILL, + ir.OVARLIVE: + return n + + case ir.ODCL: + n := n.(*ir.Decl) + return walkDecl(n) + + case ir.OBLOCK: + n := n.(*ir.BlockStmt) + walkStmtList(n.List) + return n + + case ir.OCASE: + base.Errorf("case statement out of place") + panic("unreachable") + + case ir.ODEFER: + n := n.(*ir.GoDeferStmt) + ir.CurFunc.SetHasDefer(true) + ir.CurFunc.NumDefers++ + if ir.CurFunc.NumDefers > maxOpenDefers { + // Don't allow open-coded defers if there are more than + // 8 defers in the function, since we use a single + // byte to record active defers. + ir.CurFunc.SetOpenCodedDeferDisallowed(true) + } + if n.Esc() != ir.EscNever { + // If n.Esc is not EscNever, then this defer occurs in a loop, + // so open-coded defers cannot be used in this function. + ir.CurFunc.SetOpenCodedDeferDisallowed(true) + } + fallthrough + case ir.OGO: + n := n.(*ir.GoDeferStmt) + return walkGoDefer(n) + + case ir.OFOR, ir.OFORUNTIL: + n := n.(*ir.ForStmt) + return walkFor(n) + + case ir.OIF: + n := n.(*ir.IfStmt) + return walkIf(n) + + case ir.ORETURN: + n := n.(*ir.ReturnStmt) + return walkReturn(n) + + case ir.ORETJMP: + n := n.(*ir.BranchStmt) + return n + + case ir.OINLMARK: + n := n.(*ir.InlineMarkStmt) + return n + + case ir.OSELECT: + n := n.(*ir.SelectStmt) + walkSelect(n) + return n + + case ir.OSWITCH: + n := n.(*ir.SwitchStmt) + walkSwitch(n) + return n + + case ir.ORANGE: + n := n.(*ir.RangeStmt) + return walkRange(n) + } + + // No return! Each case must return (or panic), + // to avoid confusion about what gets returned + // in the presence of type assertions. +} + +func walkStmtList(s []ir.Node) { + for i := range s { + s[i] = walkStmt(s[i]) + } +} + +// walkDecl walks an ODCL node. +func walkDecl(n *ir.Decl) ir.Node { + v := n.X.(*ir.Name) + if v.Class_ == ir.PAUTOHEAP { + if base.Flag.CompilingRuntime { + base.Errorf("%v escapes to heap, not allowed in runtime", v) + } + nn := ir.NewAssignStmt(base.Pos, v.Name().Heapaddr, callnew(v.Type())) + nn.Def = true + return walkStmt(typecheck.Stmt(nn)) + } + return n +} + +// walkFor walks an OFOR or OFORUNTIL node. +func walkFor(n *ir.ForStmt) ir.Node { + if n.Cond != nil { + walkStmtList(n.Cond.Init()) + init := n.Cond.Init() + n.Cond.PtrInit().Set(nil) + n.Cond = walkExpr(n.Cond, &init) + n.Cond = ir.InitExpr(init, n.Cond) + } + + n.Post = walkStmt(n.Post) + if n.Op() == ir.OFORUNTIL { + walkStmtList(n.Late) + } + walkStmtList(n.Body) + return n +} + +// walkGoDefer walks an OGO or ODEFER node. +func walkGoDefer(n *ir.GoDeferStmt) ir.Node { + var init ir.Nodes + switch call := n.Call; call.Op() { + case ir.OPRINT, ir.OPRINTN: + call := call.(*ir.CallExpr) + n.Call = wrapCall(call, &init) + + case ir.ODELETE: + call := call.(*ir.CallExpr) + if mapfast(call.Args[0].Type()) == mapslow { + n.Call = wrapCall(call, &init) + } else { + n.Call = walkExpr(call, &init) + } + + case ir.OCOPY: + call := call.(*ir.BinaryExpr) + n.Call = walkCopy(call, &init, true) + + case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER: + call := call.(*ir.CallExpr) + if len(call.Body) > 0 { + n.Call = wrapCall(call, &init) + } else { + n.Call = walkExpr(call, &init) + } + + default: + n.Call = walkExpr(call, &init) + } + if len(init) > 0 { + init.Append(n) + return ir.NewBlockStmt(n.Pos(), init) + } + return n +} + +// walkIf walks an OIF node. +func walkIf(n *ir.IfStmt) ir.Node { + n.Cond = walkExpr(n.Cond, n.PtrInit()) + walkStmtList(n.Body) + walkStmtList(n.Else) + return n +} + +// The result of wrapCall MUST be assigned back to n, e.g. +// n.Left = wrapCall(n.Left, init) +func wrapCall(n *ir.CallExpr, init *ir.Nodes) ir.Node { + if len(n.Init()) != 0 { + walkStmtList(n.Init()) + init.Append(n.PtrInit().Take()...) + } + + isBuiltinCall := n.Op() != ir.OCALLFUNC && n.Op() != ir.OCALLMETH && n.Op() != ir.OCALLINTER + + // Turn f(a, b, []T{c, d, e}...) back into f(a, b, c, d, e). + if !isBuiltinCall && n.IsDDD { + last := len(n.Args) - 1 + if va := n.Args[last]; va.Op() == ir.OSLICELIT { + va := va.(*ir.CompLitExpr) + n.Args.Set(append(n.Args[:last], va.List...)) + n.IsDDD = false + } + } + + // origArgs keeps track of what argument is uintptr-unsafe/unsafe-uintptr conversion. + origArgs := make([]ir.Node, len(n.Args)) + var funcArgs []*ir.Field + for i, arg := range n.Args { + s := typecheck.LookupNum("a", i) + if !isBuiltinCall && arg.Op() == ir.OCONVNOP && arg.Type().IsUintptr() && arg.(*ir.ConvExpr).X.Type().IsUnsafePtr() { + origArgs[i] = arg + arg = arg.(*ir.ConvExpr).X + n.Args[i] = arg + } + funcArgs = append(funcArgs, ir.NewField(base.Pos, s, nil, arg.Type())) + } + t := ir.NewFuncType(base.Pos, nil, funcArgs, nil) + + wrapCall_prgen++ + sym := typecheck.LookupNum("wrap·", wrapCall_prgen) + fn := typecheck.DeclFunc(sym, t) + + args := ir.ParamNames(t.Type()) + for i, origArg := range origArgs { + if origArg == nil { + continue + } + args[i] = ir.NewConvExpr(base.Pos, origArg.Op(), origArg.Type(), args[i]) + } + call := ir.NewCallExpr(base.Pos, n.Op(), n.X, args) + if !isBuiltinCall { + call.SetOp(ir.OCALL) + call.IsDDD = n.IsDDD + } + fn.Body = []ir.Node{call} + + typecheck.FinishFuncBody() + + typecheck.Func(fn) + typecheck.Stmts(fn.Body) + typecheck.Target.Decls = append(typecheck.Target.Decls, fn) + + call = ir.NewCallExpr(base.Pos, ir.OCALL, fn.Nname, n.Args) + return walkExpr(typecheck.Stmt(call), init) +} diff --git a/src/cmd/compile/internal/walk/subr.go b/src/cmd/compile/internal/walk/subr.go deleted file mode 100644 index bc65432d4992a..0000000000000 --- a/src/cmd/compile/internal/walk/subr.go +++ /dev/null @@ -1,338 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package walk - -import ( - "fmt" - - "cmd/compile/internal/base" - "cmd/compile/internal/ir" - "cmd/compile/internal/ssagen" - "cmd/compile/internal/typecheck" - "cmd/compile/internal/types" - "cmd/internal/src" -) - -// backingArrayPtrLen extracts the pointer and length from a slice or string. -// This constructs two nodes referring to n, so n must be a cheapexpr. -func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) { - var init ir.Nodes - c := cheapexpr(n, &init) - if c != n || len(init) != 0 { - base.Fatalf("backingArrayPtrLen not cheap: %v", n) - } - ptr = ir.NewUnaryExpr(base.Pos, ir.OSPTR, n) - if n.Type().IsString() { - ptr.SetType(types.Types[types.TUINT8].PtrTo()) - } else { - ptr.SetType(n.Type().Elem().PtrTo()) - } - length = ir.NewUnaryExpr(base.Pos, ir.OLEN, n) - length.SetType(types.Types[types.TINT]) - return ptr, length -} - -// updateHasCall checks whether expression n contains any function -// calls and sets the n.HasCall flag if so. -func updateHasCall(n ir.Node) { - if n == nil { - return - } - n.SetHasCall(calcHasCall(n)) -} - -func calcHasCall(n ir.Node) bool { - if len(n.Init()) != 0 { - // TODO(mdempsky): This seems overly conservative. - return true - } - - switch n.Op() { - default: - base.Fatalf("calcHasCall %+v", n) - panic("unreachable") - - case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.OTYPE, ir.ONAMEOFFSET: - if n.HasCall() { - base.Fatalf("OLITERAL/ONAME/OTYPE should never have calls: %+v", n) - } - return false - case ir.OCALL, ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER: - return true - case ir.OANDAND, ir.OOROR: - // hard with instrumented code - n := n.(*ir.LogicalExpr) - if base.Flag.Cfg.Instrumenting { - return true - } - return n.X.HasCall() || n.Y.HasCall() - case ir.OINDEX, ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR, - ir.ODEREF, ir.ODOTPTR, ir.ODOTTYPE, ir.ODIV, ir.OMOD: - // These ops might panic, make sure they are done - // before we start marshaling args for a call. See issue 16760. - return true - - // When using soft-float, these ops might be rewritten to function calls - // so we ensure they are evaluated first. - case ir.OADD, ir.OSUB, ir.OMUL: - n := n.(*ir.BinaryExpr) - if ssagen.Arch.SoftFloat && (types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) { - return true - } - return n.X.HasCall() || n.Y.HasCall() - case ir.ONEG: - n := n.(*ir.UnaryExpr) - if ssagen.Arch.SoftFloat && (types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) { - return true - } - return n.X.HasCall() - case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT: - n := n.(*ir.BinaryExpr) - if ssagen.Arch.SoftFloat && (types.IsFloat[n.X.Type().Kind()] || types.IsComplex[n.X.Type().Kind()]) { - return true - } - return n.X.HasCall() || n.Y.HasCall() - case ir.OCONV: - n := n.(*ir.ConvExpr) - if ssagen.Arch.SoftFloat && ((types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) || (types.IsFloat[n.X.Type().Kind()] || types.IsComplex[n.X.Type().Kind()])) { - return true - } - return n.X.HasCall() - - case ir.OAND, ir.OANDNOT, ir.OLSH, ir.OOR, ir.ORSH, ir.OXOR, ir.OCOPY, ir.OCOMPLEX, ir.OEFACE: - n := n.(*ir.BinaryExpr) - return n.X.HasCall() || n.Y.HasCall() - - case ir.OAS: - n := n.(*ir.AssignStmt) - return n.X.HasCall() || n.Y != nil && n.Y.HasCall() - - case ir.OADDR: - n := n.(*ir.AddrExpr) - return n.X.HasCall() - case ir.OPAREN: - n := n.(*ir.ParenExpr) - return n.X.HasCall() - case ir.OBITNOT, ir.ONOT, ir.OPLUS, ir.ORECV, - ir.OALIGNOF, ir.OCAP, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.ONEW, - ir.OOFFSETOF, ir.OPANIC, ir.OREAL, ir.OSIZEOF, - ir.OCHECKNIL, ir.OCFUNC, ir.OIDATA, ir.OITAB, ir.ONEWOBJ, ir.OSPTR, ir.OVARDEF, ir.OVARKILL, ir.OVARLIVE: - n := n.(*ir.UnaryExpr) - return n.X.HasCall() - case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER: - n := n.(*ir.SelectorExpr) - return n.X.HasCall() - - case ir.OGETG, ir.OCLOSUREREAD, ir.OMETHEXPR: - return false - - // TODO(rsc): These look wrong in various ways but are what calcHasCall has always done. - case ir.OADDSTR: - // TODO(rsc): This used to check left and right, which are not part of OADDSTR. - return false - case ir.OBLOCK: - // TODO(rsc): Surely the block's statements matter. - return false - case ir.OCONVIFACE, ir.OCONVNOP, ir.OBYTES2STR, ir.OBYTES2STRTMP, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2BYTESTMP, ir.OSTR2RUNES, ir.ORUNESTR: - // TODO(rsc): Some conversions are themselves calls, no? - n := n.(*ir.ConvExpr) - return n.X.HasCall() - case ir.ODOTTYPE2: - // TODO(rsc): Shouldn't this be up with ODOTTYPE above? - n := n.(*ir.TypeAssertExpr) - return n.X.HasCall() - case ir.OSLICEHEADER: - // TODO(rsc): What about len and cap? - n := n.(*ir.SliceHeaderExpr) - return n.Ptr.HasCall() - case ir.OAS2DOTTYPE, ir.OAS2FUNC: - // TODO(rsc): Surely we need to check List and Rlist. - return false - } -} - -func badtype(op ir.Op, tl, tr *types.Type) { - var s string - if tl != nil { - s += fmt.Sprintf("\n\t%v", tl) - } - if tr != nil { - s += fmt.Sprintf("\n\t%v", tr) - } - - // common mistake: *struct and *interface. - if tl != nil && tr != nil && tl.IsPtr() && tr.IsPtr() { - if tl.Elem().IsStruct() && tr.Elem().IsInterface() { - s += "\n\t(*struct vs *interface)" - } else if tl.Elem().IsInterface() && tr.Elem().IsStruct() { - s += "\n\t(*interface vs *struct)" - } - } - - base.Errorf("illegal types for operand: %v%s", op, s) -} - -// brcom returns !(op). -// For example, brcom(==) is !=. -func brcom(op ir.Op) ir.Op { - switch op { - case ir.OEQ: - return ir.ONE - case ir.ONE: - return ir.OEQ - case ir.OLT: - return ir.OGE - case ir.OGT: - return ir.OLE - case ir.OLE: - return ir.OGT - case ir.OGE: - return ir.OLT - } - base.Fatalf("brcom: no com for %v\n", op) - return op -} - -// brrev returns reverse(op). -// For example, Brrev(<) is >. -func brrev(op ir.Op) ir.Op { - switch op { - case ir.OEQ: - return ir.OEQ - case ir.ONE: - return ir.ONE - case ir.OLT: - return ir.OGT - case ir.OGT: - return ir.OLT - case ir.OLE: - return ir.OGE - case ir.OGE: - return ir.OLE - } - base.Fatalf("brrev: no rev for %v\n", op) - return op -} - -// return side effect-free n, appending side effects to init. -// result is assignable if n is. -func safeexpr(n ir.Node, init *ir.Nodes) ir.Node { - if n == nil { - return nil - } - - if len(n.Init()) != 0 { - walkstmtlist(n.Init()) - init.Append(n.PtrInit().Take()...) - } - - switch n.Op() { - case ir.ONAME, ir.OLITERAL, ir.ONIL, ir.ONAMEOFFSET: - return n - - case ir.OLEN, ir.OCAP: - n := n.(*ir.UnaryExpr) - l := safeexpr(n.X, init) - if l == n.X { - return n - } - a := ir.Copy(n).(*ir.UnaryExpr) - a.X = l - return walkexpr(typecheck.Expr(a), init) - - case ir.ODOT, ir.ODOTPTR: - n := n.(*ir.SelectorExpr) - l := safeexpr(n.X, init) - if l == n.X { - return n - } - a := ir.Copy(n).(*ir.SelectorExpr) - a.X = l - return walkexpr(typecheck.Expr(a), init) - - case ir.ODEREF: - n := n.(*ir.StarExpr) - l := safeexpr(n.X, init) - if l == n.X { - return n - } - a := ir.Copy(n).(*ir.StarExpr) - a.X = l - return walkexpr(typecheck.Expr(a), init) - - case ir.OINDEX, ir.OINDEXMAP: - n := n.(*ir.IndexExpr) - l := safeexpr(n.X, init) - r := safeexpr(n.Index, init) - if l == n.X && r == n.Index { - return n - } - a := ir.Copy(n).(*ir.IndexExpr) - a.X = l - a.Index = r - return walkexpr(typecheck.Expr(a), init) - - case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT: - n := n.(*ir.CompLitExpr) - if isStaticCompositeLiteral(n) { - return n - } - } - - // make a copy; must not be used as an lvalue - if ir.IsAssignable(n) { - base.Fatalf("missing lvalue case in safeexpr: %v", n) - } - return cheapexpr(n, init) -} - -func copyexpr(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node { - l := typecheck.Temp(t) - appendWalkStmt(init, ir.NewAssignStmt(base.Pos, l, n)) - return l -} - -// return side-effect free and cheap n, appending side effects to init. -// result may not be assignable. -func cheapexpr(n ir.Node, init *ir.Nodes) ir.Node { - switch n.Op() { - case ir.ONAME, ir.OLITERAL, ir.ONIL: - return n - } - - return copyexpr(n, n.Type(), init) -} - -// itabType loads the _type field from a runtime.itab struct. -func itabType(itab ir.Node) ir.Node { - typ := ir.NewSelectorExpr(base.Pos, ir.ODOTPTR, itab, nil) - typ.SetType(types.NewPtr(types.Types[types.TUINT8])) - typ.SetTypecheck(1) - typ.Offset = int64(types.PtrSize) // offset of _type in runtime.itab - typ.SetBounded(true) // guaranteed not to fault - return typ -} - -// ifaceData loads the data field from an interface. -// The concrete type must be known to have type t. -// It follows the pointer if !isdirectiface(t). -func ifaceData(pos src.XPos, n ir.Node, t *types.Type) ir.Node { - if t.IsInterface() { - base.Fatalf("ifaceData interface: %v", t) - } - ptr := ir.NewUnaryExpr(pos, ir.OIDATA, n) - if types.IsDirectIface(t) { - ptr.SetType(t) - ptr.SetTypecheck(1) - return ptr - } - ptr.SetType(types.NewPtr(t)) - ptr.SetTypecheck(1) - ind := ir.NewStarExpr(pos, ptr) - ind.SetType(t) - ind.SetTypecheck(1) - ind.SetBounded(true) - return ind -} diff --git a/src/cmd/compile/internal/walk/switch.go b/src/cmd/compile/internal/walk/switch.go index 9becd0e404ff5..360086ec79723 100644 --- a/src/cmd/compile/internal/walk/switch.go +++ b/src/cmd/compile/internal/walk/switch.go @@ -16,23 +16,23 @@ import ( "cmd/internal/src" ) -// walkswitch walks a switch statement. -func walkswitch(sw *ir.SwitchStmt) { +// walkSwitch walks a switch statement. +func walkSwitch(sw *ir.SwitchStmt) { // Guard against double walk, see #25776. if len(sw.Cases) == 0 && len(sw.Compiled) > 0 { return // Was fatal, but eliminating every possible source of double-walking is hard } if sw.Tag != nil && sw.Tag.Op() == ir.OTYPESW { - walkTypeSwitch(sw) + walkSwitchType(sw) } else { - walkExprSwitch(sw) + walkSwitchExpr(sw) } } -// walkExprSwitch generates an AST implementing sw. sw is an +// walkSwitchExpr generates an AST implementing sw. sw is an // expression switch. -func walkExprSwitch(sw *ir.SwitchStmt) { +func walkSwitchExpr(sw *ir.SwitchStmt) { lno := ir.SetPos(sw) cond := sw.Tag @@ -57,9 +57,9 @@ func walkExprSwitch(sw *ir.SwitchStmt) { cond.SetOp(ir.OBYTES2STRTMP) } - cond = walkexpr(cond, sw.PtrInit()) + cond = walkExpr(cond, sw.PtrInit()) if cond.Op() != ir.OLITERAL && cond.Op() != ir.ONIL { - cond = copyexpr(cond, cond.Type(), &sw.Compiled) + cond = copyExpr(cond, cond.Type(), &sw.Compiled) } base.Pos = lno @@ -107,7 +107,7 @@ func walkExprSwitch(sw *ir.SwitchStmt) { s.Emit(&sw.Compiled) sw.Compiled.Append(defaultGoto) sw.Compiled.Append(body.Take()...) - walkstmtlist(sw.Compiled) + walkStmtList(sw.Compiled) } // An exprSwitch walks an expression switch. @@ -287,15 +287,15 @@ func endsInFallthrough(stmts []ir.Node) (bool, src.XPos) { return stmts[i].Op() == ir.OFALL, stmts[i].Pos() } -// walkTypeSwitch generates an AST that implements sw, where sw is a +// walkSwitchType generates an AST that implements sw, where sw is a // type switch. -func walkTypeSwitch(sw *ir.SwitchStmt) { +func walkSwitchType(sw *ir.SwitchStmt) { var s typeSwitch s.facename = sw.Tag.(*ir.TypeSwitchGuard).X sw.Tag = nil - s.facename = walkexpr(s.facename, sw.PtrInit()) - s.facename = copyexpr(s.facename, s.facename.Type(), &sw.Compiled) + s.facename = walkExpr(s.facename, sw.PtrInit()) + s.facename = copyExpr(s.facename, s.facename.Type(), &sw.Compiled) s.okname = typecheck.Temp(types.Types[types.TBOOL]) // Get interface descriptor word. @@ -327,7 +327,7 @@ func walkTypeSwitch(sw *ir.SwitchStmt) { dotHash.Offset = int64(2 * types.PtrSize) // offset of hash in runtime.itab } dotHash.SetBounded(true) // guaranteed not to fault - s.hashname = copyexpr(dotHash, dotHash.Type(), &sw.Compiled) + s.hashname = copyExpr(dotHash, dotHash.Type(), &sw.Compiled) br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil) var defaultGoto, nilGoto ir.Node @@ -409,7 +409,7 @@ func walkTypeSwitch(sw *ir.SwitchStmt) { sw.Compiled.Append(defaultGoto) sw.Compiled.Append(body.Take()...) - walkstmtlist(sw.Compiled) + walkStmtList(sw.Compiled) } // A typeSwitch walks a type switch. diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go index cb3018a4ac454..9dda367b4d1eb 100644 --- a/src/cmd/compile/internal/walk/walk.go +++ b/src/cmd/compile/internal/walk/walk.go @@ -5,25 +5,17 @@ package walk import ( - "encoding/binary" "errors" "fmt" - "go/constant" - "go/token" "strings" "cmd/compile/internal/base" - "cmd/compile/internal/escape" "cmd/compile/internal/ir" "cmd/compile/internal/reflectdata" "cmd/compile/internal/ssagen" - "cmd/compile/internal/staticdata" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" - "cmd/internal/obj" - "cmd/internal/objabi" "cmd/internal/src" - "cmd/internal/sys" ) // The constant is known to runtime. @@ -79,7 +71,7 @@ func Walk(fn *ir.Func) { if base.Errors() > errorsBefore { return } - walkstmtlist(ir.CurFunc.Body) + walkStmtList(ir.CurFunc.Body) if base.Flag.W != 0 { s := fmt.Sprintf("after walk %v", ir.CurFunc.Sym()) ir.DumpList(s, ir.CurFunc.Body) @@ -97,12 +89,6 @@ func Walk(fn *ir.Func) { } } -func walkstmtlist(s []ir.Node) { - for i := range s { - s[i] = walkstmt(s[i]) - } -} - func paramoutheap(fn *ir.Func) bool { for _, ln := range fn.Dcl { switch ln.Class_ { @@ -120,3596 +106,257 @@ func paramoutheap(fn *ir.Func) bool { return false } -// The result of walkstmt MUST be assigned back to n, e.g. -// n.Left = walkstmt(n.Left) -func walkstmt(n ir.Node) ir.Node { - if n == nil { - return n +// walkRecv walks an ORECV node. +func walkRecv(n *ir.UnaryExpr) ir.Node { + if n.Typecheck() == 0 { + base.Fatalf("missing typecheck: %+v", n) } + init := n.Init() + n.PtrInit().Set(nil) - ir.SetPos(n) + n.X = walkExpr(n.X, &init) + call := walkExpr(mkcall1(chanfn("chanrecv1", 2, n.X.Type()), nil, &init, n.X, typecheck.NodNil()), &init) + return ir.InitExpr(init, call) +} - walkstmtlist(n.Init()) +func convas(n *ir.AssignStmt, init *ir.Nodes) *ir.AssignStmt { + if n.Op() != ir.OAS { + base.Fatalf("convas: not OAS %v", n.Op()) + } + defer updateHasCall(n) - switch n.Op() { - default: - if n.Op() == ir.ONAME { - n := n.(*ir.Name) - base.Errorf("%v is not a top level statement", n.Sym()) - } else { - base.Errorf("%v is not a top level statement", n.Op()) - } - ir.Dump("nottop", n) - return n + n.SetTypecheck(1) - case ir.OAS, - ir.OASOP, - ir.OAS2, - ir.OAS2DOTTYPE, - ir.OAS2RECV, - ir.OAS2FUNC, - ir.OAS2MAPR, - ir.OCLOSE, - ir.OCOPY, - ir.OCALLMETH, - ir.OCALLINTER, - ir.OCALL, - ir.OCALLFUNC, - ir.ODELETE, - ir.OSEND, - ir.OPRINT, - ir.OPRINTN, - ir.OPANIC, - ir.ORECOVER, - ir.OGETG: - if n.Typecheck() == 0 { - base.Fatalf("missing typecheck: %+v", n) - } - init := n.Init() - n.PtrInit().Set(nil) - n = walkexpr(n, &init) - if n.Op() == ir.ONAME { - // copy rewrote to a statement list and a temp for the length. - // Throw away the temp to avoid plain values as statements. - n = ir.NewBlockStmt(n.Pos(), init) - init.Set(nil) - } - if len(init) > 0 { - switch n.Op() { - case ir.OAS, ir.OAS2, ir.OBLOCK: - n.PtrInit().Prepend(init...) - - default: - init.Append(n) - n = ir.NewBlockStmt(n.Pos(), init) - } - } + if n.X == nil || n.Y == nil { return n + } - // special case for a receive where we throw away - // the value received. - case ir.ORECV: - n := n.(*ir.UnaryExpr) - if n.Typecheck() == 0 { - base.Fatalf("missing typecheck: %+v", n) - } - init := n.Init() - n.PtrInit().Set(nil) - - n.X = walkexpr(n.X, &init) - call := walkexpr(mkcall1(chanfn("chanrecv1", 2, n.X.Type()), nil, &init, n.X, typecheck.NodNil()), &init) - return ir.InitExpr(init, call) - - case ir.OBREAK, - ir.OCONTINUE, - ir.OFALL, - ir.OGOTO, - ir.OLABEL, - ir.ODCLCONST, - ir.ODCLTYPE, - ir.OCHECKNIL, - ir.OVARDEF, - ir.OVARKILL, - ir.OVARLIVE: + lt := n.X.Type() + rt := n.Y.Type() + if lt == nil || rt == nil { return n + } - case ir.ODCL: - n := n.(*ir.Decl) - v := n.X.(*ir.Name) - if v.Class_ == ir.PAUTOHEAP { - if base.Flag.CompilingRuntime { - base.Errorf("%v escapes to heap, not allowed in runtime", v) - } - nn := ir.NewAssignStmt(base.Pos, v.Name().Heapaddr, callnew(v.Type())) - nn.Def = true - return walkstmt(typecheck.Stmt(nn)) - } + if ir.IsBlank(n.X) { + n.Y = typecheck.DefaultLit(n.Y, nil) return n + } - case ir.OBLOCK: - n := n.(*ir.BlockStmt) - walkstmtlist(n.List) - return n + if !types.Identical(lt, rt) { + n.Y = typecheck.AssignConv(n.Y, lt, "assignment") + n.Y = walkExpr(n.Y, init) + } + types.CalcSize(n.Y.Type()) - case ir.OCASE: - base.Errorf("case statement out of place") - panic("unreachable") + return n +} + +var stop = errors.New("stop") - case ir.ODEFER: - n := n.(*ir.GoDeferStmt) - ir.CurFunc.SetHasDefer(true) - ir.CurFunc.NumDefers++ - if ir.CurFunc.NumDefers > maxOpenDefers { - // Don't allow open-coded defers if there are more than - // 8 defers in the function, since we use a single - // byte to record active defers. - ir.CurFunc.SetOpenCodedDeferDisallowed(true) +// paramstoheap returns code to allocate memory for heap-escaped parameters +// and to copy non-result parameters' values from the stack. +func paramstoheap(params *types.Type) []ir.Node { + var nn []ir.Node + for _, t := range params.Fields().Slice() { + v := ir.AsNode(t.Nname) + if v != nil && v.Sym() != nil && strings.HasPrefix(v.Sym().Name, "~r") { // unnamed result + v = nil } - if n.Esc() != ir.EscNever { - // If n.Esc is not EscNever, then this defer occurs in a loop, - // so open-coded defers cannot be used in this function. - ir.CurFunc.SetOpenCodedDeferDisallowed(true) + if v == nil { + continue } - fallthrough - case ir.OGO: - n := n.(*ir.GoDeferStmt) - var init ir.Nodes - switch call := n.Call; call.Op() { - case ir.OPRINT, ir.OPRINTN: - call := call.(*ir.CallExpr) - n.Call = wrapCall(call, &init) - - case ir.ODELETE: - call := call.(*ir.CallExpr) - if mapfast(call.Args[0].Type()) == mapslow { - n.Call = wrapCall(call, &init) - } else { - n.Call = walkexpr(call, &init) - } - case ir.OCOPY: - call := call.(*ir.BinaryExpr) - n.Call = copyany(call, &init, true) - - case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER: - call := call.(*ir.CallExpr) - if len(call.Body) > 0 { - n.Call = wrapCall(call, &init) - } else { - n.Call = walkexpr(call, &init) + if stackcopy := v.Name().Stackcopy; stackcopy != nil { + nn = append(nn, walkStmt(ir.NewDecl(base.Pos, ir.ODCL, v))) + if stackcopy.Class_ == ir.PPARAM { + nn = append(nn, walkStmt(typecheck.Stmt(ir.NewAssignStmt(base.Pos, v, stackcopy)))) } - - default: - n.Call = walkexpr(call, &init) } - if len(init) > 0 { - init.Append(n) - return ir.NewBlockStmt(n.Pos(), init) - } - return n + } - case ir.OFOR, ir.OFORUNTIL: - n := n.(*ir.ForStmt) - if n.Cond != nil { - walkstmtlist(n.Cond.Init()) - init := n.Cond.Init() - n.Cond.PtrInit().Set(nil) - n.Cond = walkexpr(n.Cond, &init) - n.Cond = ir.InitExpr(init, n.Cond) - } + return nn +} - n.Post = walkstmt(n.Post) - if n.Op() == ir.OFORUNTIL { - walkstmtlist(n.Late) +// zeroResults zeros the return values at the start of the function. +// We need to do this very early in the function. Defer might stop a +// panic and show the return values as they exist at the time of +// panic. For precise stacks, the garbage collector assumes results +// are always live, so we need to zero them before any allocations, +// even allocations to move params/results to the heap. +// The generated code is added to Curfn's Enter list. +func zeroResults() { + for _, f := range ir.CurFunc.Type().Results().Fields().Slice() { + v := ir.AsNode(f.Nname) + if v != nil && v.Name().Heapaddr != nil { + // The local which points to the return value is the + // thing that needs zeroing. This is already handled + // by a Needzero annotation in plive.go:livenessepilogue. + continue } - walkstmtlist(n.Body) - return n - - case ir.OIF: - n := n.(*ir.IfStmt) - n.Cond = walkexpr(n.Cond, n.PtrInit()) - walkstmtlist(n.Body) - walkstmtlist(n.Else) - return n - - case ir.ORETURN: - n := n.(*ir.ReturnStmt) - ir.CurFunc.NumReturns++ - if len(n.Results) == 0 { - return n + if ir.IsParamHeapCopy(v) { + // TODO(josharian/khr): Investigate whether we can switch to "continue" here, + // and document more in either case. + // In the review of CL 114797, Keith wrote (roughly): + // I don't think the zeroing below matters. + // The stack return value will never be marked as live anywhere in the function. + // It is not written to until deferreturn returns. + v = v.Name().Stackcopy } - if (ir.HasNamedResults(ir.CurFunc) && len(n.Results) > 1) || paramoutheap(ir.CurFunc) { - // assign to the function out parameters, - // so that ascompatee can fix up conflicts - var rl []ir.Node - - for _, ln := range ir.CurFunc.Dcl { - cl := ln.Class_ - if cl == ir.PAUTO || cl == ir.PAUTOHEAP { - break - } - if cl == ir.PPARAMOUT { - var ln ir.Node = ln - if ir.IsParamStackCopy(ln) { - ln = walkexpr(typecheck.Expr(ir.NewStarExpr(base.Pos, ln.Name().Heapaddr)), nil) - } - rl = append(rl, ln) - } - } - - if got, want := len(n.Results), len(rl); got != want { - // order should have rewritten multi-value function calls - // with explicit OAS2FUNC nodes. - base.Fatalf("expected %v return arguments, have %v", want, got) - } - - // move function calls out, to make ascompatee's job easier. - walkexprlistsafe(n.Results, n.PtrInit()) + // Zero the stack location containing f. + ir.CurFunc.Enter.Append(ir.NewAssignStmt(ir.CurFunc.Pos(), v, nil)) + } +} - n.Results.Set(ascompatee(n.Op(), rl, n.Results, n.PtrInit())) - return n +// returnsfromheap returns code to copy values for heap-escaped parameters +// back to the stack. +func returnsfromheap(params *types.Type) []ir.Node { + var nn []ir.Node + for _, t := range params.Fields().Slice() { + v := ir.AsNode(t.Nname) + if v == nil { + continue } - walkexprlist(n.Results, n.PtrInit()) - - // For each return parameter (lhs), assign the corresponding result (rhs). - lhs := ir.CurFunc.Type().Results() - rhs := n.Results - res := make([]ir.Node, lhs.NumFields()) - for i, nl := range lhs.FieldSlice() { - nname := ir.AsNode(nl.Nname) - if ir.IsParamHeapCopy(nname) { - nname = nname.Name().Stackcopy - } - a := ir.NewAssignStmt(base.Pos, nname, rhs[i]) - res[i] = convas(a, n.PtrInit()) + if stackcopy := v.Name().Stackcopy; stackcopy != nil && stackcopy.Class_ == ir.PPARAMOUT { + nn = append(nn, walkStmt(typecheck.Stmt(ir.NewAssignStmt(base.Pos, stackcopy, v)))) } - n.Results.Set(res) - return n - - case ir.ORETJMP: - n := n.(*ir.BranchStmt) - return n - - case ir.OINLMARK: - n := n.(*ir.InlineMarkStmt) - return n - - case ir.OSELECT: - n := n.(*ir.SelectStmt) - walkselect(n) - return n - - case ir.OSWITCH: - n := n.(*ir.SwitchStmt) - walkswitch(n) - return n - - case ir.ORANGE: - n := n.(*ir.RangeStmt) - return walkrange(n) } - // No return! Each case must return (or panic), - // to avoid confusion about what gets returned - // in the presence of type assertions. + return nn } -// walk the whole tree of the body of an -// expression or simple statement. -// the types expressions are calculated. -// compile-time constants are evaluated. -// complex side effects like statements are appended to init -func walkexprlist(s []ir.Node, init *ir.Nodes) { - for i := range s { - s[i] = walkexpr(s[i], init) - } +// heapmoves generates code to handle migrating heap-escaped parameters +// between the stack and the heap. The generated code is added to Curfn's +// Enter and Exit lists. +func heapmoves() { + lno := base.Pos + base.Pos = ir.CurFunc.Pos() + nn := paramstoheap(ir.CurFunc.Type().Recvs()) + nn = append(nn, paramstoheap(ir.CurFunc.Type().Params())...) + nn = append(nn, paramstoheap(ir.CurFunc.Type().Results())...) + ir.CurFunc.Enter.Append(nn...) + base.Pos = ir.CurFunc.Endlineno + ir.CurFunc.Exit.Append(returnsfromheap(ir.CurFunc.Type().Results())...) + base.Pos = lno } -func walkexprlistsafe(s []ir.Node, init *ir.Nodes) { - for i, n := range s { - s[i] = safeexpr(n, init) - s[i] = walkexpr(s[i], init) +func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) *ir.CallExpr { + if fn.Type() == nil || fn.Type().Kind() != types.TFUNC { + base.Fatalf("mkcall %v %v", fn, fn.Type()) } -} -func walkexprlistcheap(s []ir.Node, init *ir.Nodes) { - for i, n := range s { - s[i] = cheapexpr(n, init) - s[i] = walkexpr(s[i], init) + n := fn.Type().NumParams() + if n != len(va) { + base.Fatalf("vmkcall %v needs %v args got %v", fn, n, len(va)) } + + call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, va) + typecheck.Call(call) + call.SetType(t) + return walkExpr(call, init).(*ir.CallExpr) } -// convFuncName builds the runtime function name for interface conversion. -// It also reports whether the function expects the data by address. -// Not all names are possible. For example, we never generate convE2E or convE2I. -func convFuncName(from, to *types.Type) (fnname string, needsaddr bool) { - tkind := to.Tie() - switch from.Tie() { - case 'I': - if tkind == 'I' { - return "convI2I", false - } - case 'T': - switch { - case from.Size() == 2 && from.Align == 2: - return "convT16", false - case from.Size() == 4 && from.Align == 4 && !from.HasPointers(): - return "convT32", false - case from.Size() == 8 && from.Align == types.Types[types.TUINT64].Align && !from.HasPointers(): - return "convT64", false - } - if sc := from.SoleComponent(); sc != nil { - switch { - case sc.IsString(): - return "convTstring", false - case sc.IsSlice(): - return "convTslice", false - } - } +func mkcall(name string, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr { + return vmkcall(typecheck.LookupRuntime(name), t, init, args) +} - switch tkind { - case 'E': - if !from.HasPointers() { - return "convT2Enoptr", true - } - return "convT2E", true - case 'I': - if !from.HasPointers() { - return "convT2Inoptr", true - } - return "convT2I", true - } - } - base.Fatalf("unknown conv func %c2%c", from.Tie(), to.Tie()) - panic("unreachable") +func mkcall1(fn ir.Node, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr { + return vmkcall(fn, t, init, args) } -// The result of walkexpr MUST be assigned back to n, e.g. -// n.Left = walkexpr(n.Left, init) -func walkexpr(n ir.Node, init *ir.Nodes) ir.Node { - if n == nil { - return n +func chanfn(name string, n int, t *types.Type) ir.Node { + if !t.IsChan() { + base.Fatalf("chanfn %v", t) } - - // Eagerly checkwidth all expressions for the back end. - if n.Type() != nil && !n.Type().WidthCalculated() { - switch n.Type().Kind() { - case types.TBLANK, types.TNIL, types.TIDEAL: - default: - types.CheckSize(n.Type()) - } + fn := typecheck.LookupRuntime(name) + switch n { + default: + base.Fatalf("chanfn %d", n) + case 1: + fn = typecheck.SubstArgTypes(fn, t.Elem()) + case 2: + fn = typecheck.SubstArgTypes(fn, t.Elem(), t.Elem()) } + return fn +} - if init == n.PtrInit() { - // not okay to use n->ninit when walking n, - // because we might replace n with some other node - // and would lose the init list. - base.Fatalf("walkexpr init == &n->ninit") +func mapfn(name string, t *types.Type) ir.Node { + if !t.IsMap() { + base.Fatalf("mapfn %v", t) } + fn := typecheck.LookupRuntime(name) + fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), t.Key(), t.Elem()) + return fn +} - if len(n.Init()) != 0 { - walkstmtlist(n.Init()) - init.Append(n.PtrInit().Take()...) +func mapfndel(name string, t *types.Type) ir.Node { + if !t.IsMap() { + base.Fatalf("mapfn %v", t) } + fn := typecheck.LookupRuntime(name) + fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), t.Key()) + return fn +} - lno := ir.SetPos(n) - - if base.Flag.LowerW > 1 { - ir.Dump("before walk expr", n) - } +const ( + mapslow = iota + mapfast32 + mapfast32ptr + mapfast64 + mapfast64ptr + mapfaststr + nmapfast +) - if n.Typecheck() != 1 { - base.Fatalf("missed typecheck: %+v", n) - } +type mapnames [nmapfast]string - if n.Type().IsUntyped() { - base.Fatalf("expression has untyped type: %+v", n) - } +func mkmapnames(base string, ptr string) mapnames { + return mapnames{base, base + "_fast32", base + "_fast32" + ptr, base + "_fast64", base + "_fast64" + ptr, base + "_faststr"} +} - if n.Op() == ir.ONAME && n.(*ir.Name).Class_ == ir.PAUTOHEAP { - n := n.(*ir.Name) - nn := ir.NewStarExpr(base.Pos, n.Name().Heapaddr) - nn.X.MarkNonNil() - return walkexpr(typecheck.Expr(nn), init) - } +var mapaccess1 = mkmapnames("mapaccess1", "") +var mapaccess2 = mkmapnames("mapaccess2", "") +var mapassign = mkmapnames("mapassign", "ptr") +var mapdelete = mkmapnames("mapdelete", "") - n = walkexpr1(n, init) - - // Expressions that are constant at run time but not - // considered const by the language spec are not turned into - // constants until walk. For example, if n is y%1 == 0, the - // walk of y%1 may have replaced it by 0. - // Check whether n with its updated args is itself now a constant. - t := n.Type() - n = typecheck.EvalConst(n) - if n.Type() != t { - base.Fatalf("evconst changed Type: %v had type %v, now %v", n, t, n.Type()) +func mapfast(t *types.Type) int { + // Check runtime/map.go:maxElemSize before changing. + if t.Elem().Width > 128 { + return mapslow } - if n.Op() == ir.OLITERAL { - n = typecheck.Expr(n) - // Emit string symbol now to avoid emitting - // any concurrently during the backend. - if v := n.Val(); v.Kind() == constant.String { - _ = staticdata.StringSym(n.Pos(), constant.StringVal(v)) + switch reflectdata.AlgType(t.Key()) { + case types.AMEM32: + if !t.Key().HasPointers() { + return mapfast32 } + if types.PtrSize == 4 { + return mapfast32ptr + } + base.Fatalf("small pointer %v", t.Key()) + case types.AMEM64: + if !t.Key().HasPointers() { + return mapfast64 + } + if types.PtrSize == 8 { + return mapfast64ptr + } + // Two-word object, at least one of which is a pointer. + // Use the slow path. + case types.ASTRING: + return mapfaststr } + return mapslow +} - updateHasCall(n) +func walkAppendArgs(n *ir.CallExpr, init *ir.Nodes) { + walkExprListSafe(n.Args, init) - if base.Flag.LowerW != 0 && n != nil { - ir.Dump("after walk expr", n) + // walkexprlistsafe will leave OINDEX (s[n]) alone if both s + // and n are name or literal, but those may index the slice we're + // modifying here. Fix explicitly. + ls := n.Args + for i1, n1 := range ls { + ls[i1] = cheapExpr(n1, init) } - - base.Pos = lno - return n -} - -func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node { - switch n.Op() { - default: - ir.Dump("walk", n) - base.Fatalf("walkexpr: switch 1 unknown op %+v", n.Op()) - panic("unreachable") - - case ir.ONONAME, ir.OGETG, ir.ONEWOBJ, ir.OMETHEXPR: - return n - - case ir.OTYPE, ir.ONAME, ir.OLITERAL, ir.ONIL, ir.ONAMEOFFSET: - // TODO(mdempsky): Just return n; see discussion on CL 38655. - // Perhaps refactor to use Node.mayBeShared for these instead. - // If these return early, make sure to still call - // stringsym for constant strings. - return n - - case ir.ONOT, ir.ONEG, ir.OPLUS, ir.OBITNOT, ir.OREAL, ir.OIMAG, ir.OSPTR, ir.OITAB, ir.OIDATA: - n := n.(*ir.UnaryExpr) - n.X = walkexpr(n.X, init) - return n - - case ir.ODOTMETH, ir.ODOTINTER: - n := n.(*ir.SelectorExpr) - n.X = walkexpr(n.X, init) - return n - - case ir.OADDR: - n := n.(*ir.AddrExpr) - n.X = walkexpr(n.X, init) - return n - - case ir.ODEREF: - n := n.(*ir.StarExpr) - n.X = walkexpr(n.X, init) - return n - - case ir.OEFACE, ir.OAND, ir.OANDNOT, ir.OSUB, ir.OMUL, ir.OADD, ir.OOR, ir.OXOR, ir.OLSH, ir.ORSH: - n := n.(*ir.BinaryExpr) - n.X = walkexpr(n.X, init) - n.Y = walkexpr(n.Y, init) - return n - - case ir.ODOT, ir.ODOTPTR: - n := n.(*ir.SelectorExpr) - usefield(n) - n.X = walkexpr(n.X, init) - return n - - case ir.ODOTTYPE, ir.ODOTTYPE2: - n := n.(*ir.TypeAssertExpr) - n.X = walkexpr(n.X, init) - // Set up interface type addresses for back end. - n.Ntype = reflectdata.TypePtr(n.Type()) - if n.Op() == ir.ODOTTYPE { - n.Ntype.(*ir.AddrExpr).Alloc = reflectdata.TypePtr(n.X.Type()) - } - if !n.Type().IsInterface() && !n.X.Type().IsEmptyInterface() { - n.Itab = []ir.Node{reflectdata.ITabAddr(n.Type(), n.X.Type())} - } - return n - - case ir.OLEN, ir.OCAP: - n := n.(*ir.UnaryExpr) - if isRuneCount(n) { - // Replace len([]rune(string)) with runtime.countrunes(string). - return mkcall("countrunes", n.Type(), init, typecheck.Conv(n.X.(*ir.ConvExpr).X, types.Types[types.TSTRING])) - } - - n.X = walkexpr(n.X, init) - - // replace len(*[10]int) with 10. - // delayed until now to preserve side effects. - t := n.X.Type() - - if t.IsPtr() { - t = t.Elem() - } - if t.IsArray() { - safeexpr(n.X, init) - con := typecheck.OrigInt(n, t.NumElem()) - con.SetTypecheck(1) - return con - } - return n - - case ir.OCOMPLEX: - n := n.(*ir.BinaryExpr) - n.X = walkexpr(n.X, init) - n.Y = walkexpr(n.Y, init) - return n - - case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE: - n := n.(*ir.BinaryExpr) - return walkcompare(n, init) - - case ir.OANDAND, ir.OOROR: - n := n.(*ir.LogicalExpr) - n.X = walkexpr(n.X, init) - - // cannot put side effects from n.Right on init, - // because they cannot run before n.Left is checked. - // save elsewhere and store on the eventual n.Right. - var ll ir.Nodes - - n.Y = walkexpr(n.Y, &ll) - n.Y = ir.InitExpr(ll, n.Y) - return n - - case ir.OPRINT, ir.OPRINTN: - return walkprint(n.(*ir.CallExpr), init) - - case ir.OPANIC: - n := n.(*ir.UnaryExpr) - return mkcall("gopanic", nil, init, n.X) - - case ir.ORECOVER: - n := n.(*ir.CallExpr) - return mkcall("gorecover", n.Type(), init, typecheck.NodAddr(ir.RegFP)) - - case ir.OCLOSUREREAD, ir.OCFUNC: - return n - - case ir.OCALLINTER, ir.OCALLFUNC, ir.OCALLMETH: - n := n.(*ir.CallExpr) - if n.Op() == ir.OCALLINTER { - usemethod(n) - reflectdata.MarkUsedIfaceMethod(n) - } - - if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.OCLOSURE { - // Transform direct call of a closure to call of a normal function. - // transformclosure already did all preparation work. - - // Prepend captured variables to argument list. - clo := n.X.(*ir.ClosureExpr) - n.Args.Prepend(clo.Func.ClosureEnter...) - clo.Func.ClosureEnter.Set(nil) - - // Replace OCLOSURE with ONAME/PFUNC. - n.X = clo.Func.Nname - - // Update type of OCALLFUNC node. - // Output arguments had not changed, but their offsets could. - if n.X.Type().NumResults() == 1 { - n.SetType(n.X.Type().Results().Field(0).Type) - } else { - n.SetType(n.X.Type().Results()) - } - } - - walkCall(n, init) - return n - - case ir.OAS, ir.OASOP: - init.Append(n.PtrInit().Take()...) - - var left, right ir.Node - switch n.Op() { - case ir.OAS: - n := n.(*ir.AssignStmt) - left, right = n.X, n.Y - case ir.OASOP: - n := n.(*ir.AssignOpStmt) - left, right = n.X, n.Y - } - - // Recognize m[k] = append(m[k], ...) so we can reuse - // the mapassign call. - var mapAppend *ir.CallExpr - if left.Op() == ir.OINDEXMAP && right.Op() == ir.OAPPEND { - left := left.(*ir.IndexExpr) - mapAppend = right.(*ir.CallExpr) - if !ir.SameSafeExpr(left, mapAppend.Args[0]) { - base.Fatalf("not same expressions: %v != %v", left, mapAppend.Args[0]) - } - } - - left = walkexpr(left, init) - left = safeexpr(left, init) - if mapAppend != nil { - mapAppend.Args[0] = left - } - - if n.Op() == ir.OASOP { - // Rewrite x op= y into x = x op y. - n = ir.NewAssignStmt(base.Pos, left, typecheck.Expr(ir.NewBinaryExpr(base.Pos, n.(*ir.AssignOpStmt).AsOp, left, right))) - } else { - n.(*ir.AssignStmt).X = left - } - as := n.(*ir.AssignStmt) - - if oaslit(as, init) { - return ir.NewBlockStmt(as.Pos(), nil) - } - - if as.Y == nil { - // TODO(austin): Check all "implicit zeroing" - return as - } - - if !base.Flag.Cfg.Instrumenting && ir.IsZero(as.Y) { - return as - } - - switch as.Y.Op() { - default: - as.Y = walkexpr(as.Y, init) - - case ir.ORECV: - // x = <-c; as.Left is x, as.Right.Left is c. - // order.stmt made sure x is addressable. - recv := as.Y.(*ir.UnaryExpr) - recv.X = walkexpr(recv.X, init) - - n1 := typecheck.NodAddr(as.X) - r := recv.X // the channel - return mkcall1(chanfn("chanrecv1", 2, r.Type()), nil, init, r, n1) - - case ir.OAPPEND: - // x = append(...) - call := as.Y.(*ir.CallExpr) - if call.Type().Elem().NotInHeap() { - base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", call.Type().Elem()) - } - var r ir.Node - switch { - case isAppendOfMake(call): - // x = append(y, make([]T, y)...) - r = extendslice(call, init) - case call.IsDDD: - r = appendslice(call, init) // also works for append(slice, string). - default: - r = walkappend(call, init, as) - } - as.Y = r - if r.Op() == ir.OAPPEND { - // Left in place for back end. - // Do not add a new write barrier. - // Set up address of type for back end. - r.(*ir.CallExpr).X = reflectdata.TypePtr(r.Type().Elem()) - return as - } - // Otherwise, lowered for race detector. - // Treat as ordinary assignment. - } - - if as.X != nil && as.Y != nil { - return convas(as, init) - } - return as - - case ir.OAS2: - n := n.(*ir.AssignListStmt) - init.Append(n.PtrInit().Take()...) - walkexprlistsafe(n.Lhs, init) - walkexprlistsafe(n.Rhs, init) - return ir.NewBlockStmt(src.NoXPos, ascompatee(ir.OAS, n.Lhs, n.Rhs, init)) - - // a,b,... = fn() - case ir.OAS2FUNC: - n := n.(*ir.AssignListStmt) - init.Append(n.PtrInit().Take()...) - - r := n.Rhs[0] - walkexprlistsafe(n.Lhs, init) - r = walkexpr(r, init) - - if ir.IsIntrinsicCall(r.(*ir.CallExpr)) { - n.Rhs = []ir.Node{r} - return n - } - init.Append(r) - - ll := ascompatet(n.Lhs, r.Type()) - return ir.NewBlockStmt(src.NoXPos, ll) - - // x, y = <-c - // order.stmt made sure x is addressable or blank. - case ir.OAS2RECV: - n := n.(*ir.AssignListStmt) - init.Append(n.PtrInit().Take()...) - - r := n.Rhs[0].(*ir.UnaryExpr) // recv - walkexprlistsafe(n.Lhs, init) - r.X = walkexpr(r.X, init) - var n1 ir.Node - if ir.IsBlank(n.Lhs[0]) { - n1 = typecheck.NodNil() - } else { - n1 = typecheck.NodAddr(n.Lhs[0]) - } - fn := chanfn("chanrecv2", 2, r.X.Type()) - ok := n.Lhs[1] - call := mkcall1(fn, types.Types[types.TBOOL], init, r.X, n1) - return typecheck.Stmt(ir.NewAssignStmt(base.Pos, ok, call)) - - // a,b = m[i] - case ir.OAS2MAPR: - n := n.(*ir.AssignListStmt) - init.Append(n.PtrInit().Take()...) - - r := n.Rhs[0].(*ir.IndexExpr) - walkexprlistsafe(n.Lhs, init) - r.X = walkexpr(r.X, init) - r.Index = walkexpr(r.Index, init) - t := r.X.Type() - - fast := mapfast(t) - var key ir.Node - if fast != mapslow { - // fast versions take key by value - key = r.Index - } else { - // standard version takes key by reference - // order.expr made sure key is addressable. - key = typecheck.NodAddr(r.Index) - } - - // from: - // a,b = m[i] - // to: - // var,b = mapaccess2*(t, m, i) - // a = *var - a := n.Lhs[0] - - var call *ir.CallExpr - if w := t.Elem().Width; w <= zeroValSize { - fn := mapfn(mapaccess2[fast], t) - call = mkcall1(fn, fn.Type().Results(), init, reflectdata.TypePtr(t), r.X, key) - } else { - fn := mapfn("mapaccess2_fat", t) - z := reflectdata.ZeroAddr(w) - call = mkcall1(fn, fn.Type().Results(), init, reflectdata.TypePtr(t), r.X, key, z) - } - - // mapaccess2* returns a typed bool, but due to spec changes, - // the boolean result of i.(T) is now untyped so we make it the - // same type as the variable on the lhs. - if ok := n.Lhs[1]; !ir.IsBlank(ok) && ok.Type().IsBoolean() { - call.Type().Field(1).Type = ok.Type() - } - n.Rhs = []ir.Node{call} - n.SetOp(ir.OAS2FUNC) - - // don't generate a = *var if a is _ - if ir.IsBlank(a) { - return walkexpr(typecheck.Stmt(n), init) - } - - var_ := typecheck.Temp(types.NewPtr(t.Elem())) - var_.SetTypecheck(1) - var_.MarkNonNil() // mapaccess always returns a non-nil pointer - - n.Lhs[0] = var_ - init.Append(walkexpr(n, init)) - - as := ir.NewAssignStmt(base.Pos, a, ir.NewStarExpr(base.Pos, var_)) - return walkexpr(typecheck.Stmt(as), init) - - case ir.ODELETE: - n := n.(*ir.CallExpr) - init.Append(n.PtrInit().Take()...) - map_ := n.Args[0] - key := n.Args[1] - map_ = walkexpr(map_, init) - key = walkexpr(key, init) - - t := map_.Type() - fast := mapfast(t) - if fast == mapslow { - // order.stmt made sure key is addressable. - key = typecheck.NodAddr(key) - } - return mkcall1(mapfndel(mapdelete[fast], t), nil, init, reflectdata.TypePtr(t), map_, key) - - case ir.OAS2DOTTYPE: - n := n.(*ir.AssignListStmt) - walkexprlistsafe(n.Lhs, init) - n.Rhs[0] = walkexpr(n.Rhs[0], init) - return n - - case ir.OCONVIFACE: - n := n.(*ir.ConvExpr) - n.X = walkexpr(n.X, init) - - fromType := n.X.Type() - toType := n.Type() - - if !fromType.IsInterface() && !ir.IsBlank(ir.CurFunc.Nname) { // skip unnamed functions (func _()) - reflectdata.MarkTypeUsedInInterface(fromType, ir.CurFunc.LSym) - } - - // typeword generates the type word of the interface value. - typeword := func() ir.Node { - if toType.IsEmptyInterface() { - return reflectdata.TypePtr(fromType) - } - return reflectdata.ITabAddr(fromType, toType) - } - - // Optimize convT2E or convT2I as a two-word copy when T is pointer-shaped. - if types.IsDirectIface(fromType) { - l := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), n.X) - l.SetType(toType) - l.SetTypecheck(n.Typecheck()) - return l - } - - if ir.Names.Staticuint64s == nil { - ir.Names.Staticuint64s = typecheck.NewName(ir.Pkgs.Runtime.Lookup("staticuint64s")) - ir.Names.Staticuint64s.Class_ = ir.PEXTERN - // The actual type is [256]uint64, but we use [256*8]uint8 so we can address - // individual bytes. - ir.Names.Staticuint64s.SetType(types.NewArray(types.Types[types.TUINT8], 256*8)) - ir.Names.Zerobase = typecheck.NewName(ir.Pkgs.Runtime.Lookup("zerobase")) - ir.Names.Zerobase.Class_ = ir.PEXTERN - ir.Names.Zerobase.SetType(types.Types[types.TUINTPTR]) - } - - // Optimize convT2{E,I} for many cases in which T is not pointer-shaped, - // by using an existing addressable value identical to n.Left - // or creating one on the stack. - var value ir.Node - switch { - case fromType.Size() == 0: - // n.Left is zero-sized. Use zerobase. - cheapexpr(n.X, init) // Evaluate n.Left for side-effects. See issue 19246. - value = ir.Names.Zerobase - case fromType.IsBoolean() || (fromType.Size() == 1 && fromType.IsInteger()): - // n.Left is a bool/byte. Use staticuint64s[n.Left * 8] on little-endian - // and staticuint64s[n.Left * 8 + 7] on big-endian. - n.X = cheapexpr(n.X, init) - // byteindex widens n.Left so that the multiplication doesn't overflow. - index := ir.NewBinaryExpr(base.Pos, ir.OLSH, byteindex(n.X), ir.NewInt(3)) - if ssagen.Arch.LinkArch.ByteOrder == binary.BigEndian { - index = ir.NewBinaryExpr(base.Pos, ir.OADD, index, ir.NewInt(7)) - } - xe := ir.NewIndexExpr(base.Pos, ir.Names.Staticuint64s, index) - xe.SetBounded(true) - value = xe - case n.X.Op() == ir.ONAME && n.X.(*ir.Name).Class_ == ir.PEXTERN && n.X.(*ir.Name).Readonly(): - // n.Left is a readonly global; use it directly. - value = n.X - case !fromType.IsInterface() && n.Esc() == ir.EscNone && fromType.Width <= 1024: - // n.Left does not escape. Use a stack temporary initialized to n.Left. - value = typecheck.Temp(fromType) - init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, value, n.X))) - } - - if value != nil { - // Value is identical to n.Left. - // Construct the interface directly: {type/itab, &value}. - l := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), typecheck.Expr(typecheck.NodAddr(value))) - l.SetType(toType) - l.SetTypecheck(n.Typecheck()) - return l - } - - // Implement interface to empty interface conversion. - // tmp = i.itab - // if tmp != nil { - // tmp = tmp.type - // } - // e = iface{tmp, i.data} - if toType.IsEmptyInterface() && fromType.IsInterface() && !fromType.IsEmptyInterface() { - // Evaluate the input interface. - c := typecheck.Temp(fromType) - init.Append(ir.NewAssignStmt(base.Pos, c, n.X)) - - // Get the itab out of the interface. - tmp := typecheck.Temp(types.NewPtr(types.Types[types.TUINT8])) - init.Append(ir.NewAssignStmt(base.Pos, tmp, typecheck.Expr(ir.NewUnaryExpr(base.Pos, ir.OITAB, c)))) - - // Get the type out of the itab. - nif := ir.NewIfStmt(base.Pos, typecheck.Expr(ir.NewBinaryExpr(base.Pos, ir.ONE, tmp, typecheck.NodNil())), nil, nil) - nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, tmp, itabType(tmp))} - init.Append(nif) - - // Build the result. - e := ir.NewBinaryExpr(base.Pos, ir.OEFACE, tmp, ifaceData(n.Pos(), c, types.NewPtr(types.Types[types.TUINT8]))) - e.SetType(toType) // assign type manually, typecheck doesn't understand OEFACE. - e.SetTypecheck(1) - return e - } - - fnname, needsaddr := convFuncName(fromType, toType) - - if !needsaddr && !fromType.IsInterface() { - // Use a specialized conversion routine that only returns a data pointer. - // ptr = convT2X(val) - // e = iface{typ/tab, ptr} - fn := typecheck.LookupRuntime(fnname) - types.CalcSize(fromType) - fn = typecheck.SubstArgTypes(fn, fromType) - types.CalcSize(fn.Type()) - call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil) - call.Args = []ir.Node{n.X} - e := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), safeexpr(walkexpr(typecheck.Expr(call), init), init)) - e.SetType(toType) - e.SetTypecheck(1) - return e - } - - var tab ir.Node - if fromType.IsInterface() { - // convI2I - tab = reflectdata.TypePtr(toType) - } else { - // convT2x - tab = typeword() - } - - v := n.X - if needsaddr { - // Types of large or unknown size are passed by reference. - // Orderexpr arranged for n.Left to be a temporary for all - // the conversions it could see. Comparison of an interface - // with a non-interface, especially in a switch on interface value - // with non-interface cases, is not visible to order.stmt, so we - // have to fall back on allocating a temp here. - if !ir.IsAssignable(v) { - v = copyexpr(v, v.Type(), init) - } - v = typecheck.NodAddr(v) - } - - types.CalcSize(fromType) - fn := typecheck.LookupRuntime(fnname) - fn = typecheck.SubstArgTypes(fn, fromType, toType) - types.CalcSize(fn.Type()) - call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil) - call.Args = []ir.Node{tab, v} - return walkexpr(typecheck.Expr(call), init) - - case ir.OCONV, ir.OCONVNOP: - n := n.(*ir.ConvExpr) - n.X = walkexpr(n.X, init) - if n.Op() == ir.OCONVNOP && n.Type() == n.X.Type() { - return n.X - } - if n.Op() == ir.OCONVNOP && ir.ShouldCheckPtr(ir.CurFunc, 1) { - if n.Type().IsPtr() && n.X.Type().IsUnsafePtr() { // unsafe.Pointer to *T - return walkCheckPtrAlignment(n, init, nil) - } - if n.Type().IsUnsafePtr() && n.X.Type().IsUintptr() { // uintptr to unsafe.Pointer - return walkCheckPtrArithmetic(n, init) - } - } - param, result := rtconvfn(n.X.Type(), n.Type()) - if param == types.Txxx { - return n - } - fn := types.BasicTypeNames[param] + "to" + types.BasicTypeNames[result] - return typecheck.Conv(mkcall(fn, types.Types[result], init, typecheck.Conv(n.X, types.Types[param])), n.Type()) - - case ir.ODIV, ir.OMOD: - n := n.(*ir.BinaryExpr) - n.X = walkexpr(n.X, init) - n.Y = walkexpr(n.Y, init) - - // rewrite complex div into function call. - et := n.X.Type().Kind() - - if types.IsComplex[et] && n.Op() == ir.ODIV { - t := n.Type() - call := mkcall("complex128div", types.Types[types.TCOMPLEX128], init, typecheck.Conv(n.X, types.Types[types.TCOMPLEX128]), typecheck.Conv(n.Y, types.Types[types.TCOMPLEX128])) - return typecheck.Conv(call, t) - } - - // Nothing to do for float divisions. - if types.IsFloat[et] { - return n - } - - // rewrite 64-bit div and mod on 32-bit architectures. - // TODO: Remove this code once we can introduce - // runtime calls late in SSA processing. - if types.RegSize < 8 && (et == types.TINT64 || et == types.TUINT64) { - if n.Y.Op() == ir.OLITERAL { - // Leave div/mod by constant powers of 2 or small 16-bit constants. - // The SSA backend will handle those. - switch et { - case types.TINT64: - c := ir.Int64Val(n.Y) - if c < 0 { - c = -c - } - if c != 0 && c&(c-1) == 0 { - return n - } - case types.TUINT64: - c := ir.Uint64Val(n.Y) - if c < 1<<16 { - return n - } - if c != 0 && c&(c-1) == 0 { - return n - } - } - } - var fn string - if et == types.TINT64 { - fn = "int64" - } else { - fn = "uint64" - } - if n.Op() == ir.ODIV { - fn += "div" - } else { - fn += "mod" - } - return mkcall(fn, n.Type(), init, typecheck.Conv(n.X, types.Types[et]), typecheck.Conv(n.Y, types.Types[et])) - } - return n - - case ir.OINDEX: - n := n.(*ir.IndexExpr) - n.X = walkexpr(n.X, init) - - // save the original node for bounds checking elision. - // If it was a ODIV/OMOD walk might rewrite it. - r := n.Index - - n.Index = walkexpr(n.Index, init) - - // if range of type cannot exceed static array bound, - // disable bounds check. - if n.Bounded() { - return n - } - t := n.X.Type() - if t != nil && t.IsPtr() { - t = t.Elem() - } - if t.IsArray() { - n.SetBounded(bounded(r, t.NumElem())) - if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Index, constant.Int) { - base.Warn("index bounds check elided") - } - if ir.IsSmallIntConst(n.Index) && !n.Bounded() { - base.Errorf("index out of bounds") - } - } else if ir.IsConst(n.X, constant.String) { - n.SetBounded(bounded(r, int64(len(ir.StringVal(n.X))))) - if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Index, constant.Int) { - base.Warn("index bounds check elided") - } - if ir.IsSmallIntConst(n.Index) && !n.Bounded() { - base.Errorf("index out of bounds") - } - } - - if ir.IsConst(n.Index, constant.Int) { - if v := n.Index.Val(); constant.Sign(v) < 0 || ir.ConstOverflow(v, types.Types[types.TINT]) { - base.Errorf("index out of bounds") - } - } - return n - - case ir.OINDEXMAP: - // Replace m[k] with *map{access1,assign}(maptype, m, &k) - n := n.(*ir.IndexExpr) - n.X = walkexpr(n.X, init) - n.Index = walkexpr(n.Index, init) - map_ := n.X - key := n.Index - t := map_.Type() - var call *ir.CallExpr - if n.Assigned { - // This m[k] expression is on the left-hand side of an assignment. - fast := mapfast(t) - if fast == mapslow { - // standard version takes key by reference. - // order.expr made sure key is addressable. - key = typecheck.NodAddr(key) - } - call = mkcall1(mapfn(mapassign[fast], t), nil, init, reflectdata.TypePtr(t), map_, key) - } else { - // m[k] is not the target of an assignment. - fast := mapfast(t) - if fast == mapslow { - // standard version takes key by reference. - // order.expr made sure key is addressable. - key = typecheck.NodAddr(key) - } - - if w := t.Elem().Width; w <= zeroValSize { - call = mkcall1(mapfn(mapaccess1[fast], t), types.NewPtr(t.Elem()), init, reflectdata.TypePtr(t), map_, key) - } else { - z := reflectdata.ZeroAddr(w) - call = mkcall1(mapfn("mapaccess1_fat", t), types.NewPtr(t.Elem()), init, reflectdata.TypePtr(t), map_, key, z) - } - } - call.SetType(types.NewPtr(t.Elem())) - call.MarkNonNil() // mapaccess1* and mapassign always return non-nil pointers. - star := ir.NewStarExpr(base.Pos, call) - star.SetType(t.Elem()) - star.SetTypecheck(1) - return star - - case ir.ORECV: - base.Fatalf("walkexpr ORECV") // should see inside OAS only - panic("unreachable") - - case ir.OSLICEHEADER: - n := n.(*ir.SliceHeaderExpr) - n.Ptr = walkexpr(n.Ptr, init) - n.LenCap[0] = walkexpr(n.LenCap[0], init) - n.LenCap[1] = walkexpr(n.LenCap[1], init) - return n - - case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR: - n := n.(*ir.SliceExpr) - - checkSlice := ir.ShouldCheckPtr(ir.CurFunc, 1) && n.Op() == ir.OSLICE3ARR && n.X.Op() == ir.OCONVNOP && n.X.(*ir.ConvExpr).X.Type().IsUnsafePtr() - if checkSlice { - conv := n.X.(*ir.ConvExpr) - conv.X = walkexpr(conv.X, init) - } else { - n.X = walkexpr(n.X, init) - } - - low, high, max := n.SliceBounds() - low = walkexpr(low, init) - if low != nil && ir.IsZero(low) { - // Reduce x[0:j] to x[:j] and x[0:j:k] to x[:j:k]. - low = nil - } - high = walkexpr(high, init) - max = walkexpr(max, init) - n.SetSliceBounds(low, high, max) - if checkSlice { - n.X = walkCheckPtrAlignment(n.X.(*ir.ConvExpr), init, max) - } - - if n.Op().IsSlice3() { - if max != nil && max.Op() == ir.OCAP && ir.SameSafeExpr(n.X, max.(*ir.UnaryExpr).X) { - // Reduce x[i:j:cap(x)] to x[i:j]. - if n.Op() == ir.OSLICE3 { - n.SetOp(ir.OSLICE) - } else { - n.SetOp(ir.OSLICEARR) - } - return reduceSlice(n) - } - return n - } - return reduceSlice(n) - - case ir.ONEW: - n := n.(*ir.UnaryExpr) - if n.Type().Elem().NotInHeap() { - base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type().Elem()) - } - if n.Esc() == ir.EscNone { - if n.Type().Elem().Width >= ir.MaxImplicitStackVarSize { - base.Fatalf("large ONEW with EscNone: %v", n) - } - r := typecheck.Temp(n.Type().Elem()) - init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, nil))) // zero temp - return typecheck.Expr(typecheck.NodAddr(r)) - } - return callnew(n.Type().Elem()) - - case ir.OADDSTR: - return addstr(n.(*ir.AddStringExpr), init) - - case ir.OAPPEND: - // order should make sure we only see OAS(node, OAPPEND), which we handle above. - base.Fatalf("append outside assignment") - panic("unreachable") - - case ir.OCOPY: - return copyany(n.(*ir.BinaryExpr), init, base.Flag.Cfg.Instrumenting && !base.Flag.CompilingRuntime) - - case ir.OCLOSE: - // cannot use chanfn - closechan takes any, not chan any - n := n.(*ir.UnaryExpr) - fn := typecheck.LookupRuntime("closechan") - fn = typecheck.SubstArgTypes(fn, n.X.Type()) - return mkcall1(fn, nil, init, n.X) - - case ir.OMAKECHAN: - // When size fits into int, use makechan instead of - // makechan64, which is faster and shorter on 32 bit platforms. - n := n.(*ir.MakeExpr) - size := n.Len - fnname := "makechan64" - argtype := types.Types[types.TINT64] - - // Type checking guarantees that TIDEAL size is positive and fits in an int. - // The case of size overflow when converting TUINT or TUINTPTR to TINT - // will be handled by the negative range checks in makechan during runtime. - if size.Type().IsKind(types.TIDEAL) || size.Type().Size() <= types.Types[types.TUINT].Size() { - fnname = "makechan" - argtype = types.Types[types.TINT] - } - - return mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, reflectdata.TypePtr(n.Type()), typecheck.Conv(size, argtype)) - - case ir.OMAKEMAP: - n := n.(*ir.MakeExpr) - t := n.Type() - hmapType := reflectdata.MapType(t) - hint := n.Len - - // var h *hmap - var h ir.Node - if n.Esc() == ir.EscNone { - // Allocate hmap on stack. - - // var hv hmap - hv := typecheck.Temp(hmapType) - init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, hv, nil))) - // h = &hv - h = typecheck.NodAddr(hv) - - // Allocate one bucket pointed to by hmap.buckets on stack if hint - // is not larger than BUCKETSIZE. In case hint is larger than - // BUCKETSIZE runtime.makemap will allocate the buckets on the heap. - // Maximum key and elem size is 128 bytes, larger objects - // are stored with an indirection. So max bucket size is 2048+eps. - if !ir.IsConst(hint, constant.Int) || - constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(reflectdata.BUCKETSIZE)) { - - // In case hint is larger than BUCKETSIZE runtime.makemap - // will allocate the buckets on the heap, see #20184 - // - // if hint <= BUCKETSIZE { - // var bv bmap - // b = &bv - // h.buckets = b - // } - - nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(reflectdata.BUCKETSIZE)), nil, nil) - nif.Likely = true - - // var bv bmap - bv := typecheck.Temp(reflectdata.MapBucketType(t)) - nif.Body.Append(ir.NewAssignStmt(base.Pos, bv, nil)) - - // b = &bv - b := typecheck.NodAddr(bv) - - // h.buckets = b - bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap - na := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, bsym), b) - nif.Body.Append(na) - appendWalkStmt(init, nif) - } - } - - if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(reflectdata.BUCKETSIZE)) { - // Handling make(map[any]any) and - // make(map[any]any, hint) where hint <= BUCKETSIZE - // special allows for faster map initialization and - // improves binary size by using calls with fewer arguments. - // For hint <= BUCKETSIZE overLoadFactor(hint, 0) is false - // and no buckets will be allocated by makemap. Therefore, - // no buckets need to be allocated in this code path. - if n.Esc() == ir.EscNone { - // Only need to initialize h.hash0 since - // hmap h has been allocated on the stack already. - // h.hash0 = fastrand() - rand := mkcall("fastrand", types.Types[types.TUINT32], init) - hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap - appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, hashsym), rand)) - return typecheck.ConvNop(h, t) - } - // Call runtime.makehmap to allocate an - // hmap on the heap and initialize hmap's hash0 field. - fn := typecheck.LookupRuntime("makemap_small") - fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem()) - return mkcall1(fn, n.Type(), init) - } - - if n.Esc() != ir.EscNone { - h = typecheck.NodNil() - } - // Map initialization with a variable or large hint is - // more complicated. We therefore generate a call to - // runtime.makemap to initialize hmap and allocate the - // map buckets. - - // When hint fits into int, use makemap instead of - // makemap64, which is faster and shorter on 32 bit platforms. - fnname := "makemap64" - argtype := types.Types[types.TINT64] - - // Type checking guarantees that TIDEAL hint is positive and fits in an int. - // See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function. - // The case of hint overflow when converting TUINT or TUINTPTR to TINT - // will be handled by the negative range checks in makemap during runtime. - if hint.Type().IsKind(types.TIDEAL) || hint.Type().Size() <= types.Types[types.TUINT].Size() { - fnname = "makemap" - argtype = types.Types[types.TINT] - } - - fn := typecheck.LookupRuntime(fnname) - fn = typecheck.SubstArgTypes(fn, hmapType, t.Key(), t.Elem()) - return mkcall1(fn, n.Type(), init, reflectdata.TypePtr(n.Type()), typecheck.Conv(hint, argtype), h) - - case ir.OMAKESLICE: - n := n.(*ir.MakeExpr) - l := n.Len - r := n.Cap - if r == nil { - r = safeexpr(l, init) - l = r - } - t := n.Type() - if t.Elem().NotInHeap() { - base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem()) - } - if n.Esc() == ir.EscNone { - if why := escape.HeapAllocReason(n); why != "" { - base.Fatalf("%v has EscNone, but %v", n, why) - } - // var arr [r]T - // n = arr[:l] - i := typecheck.IndexConst(r) - if i < 0 { - base.Fatalf("walkexpr: invalid index %v", r) - } - - // cap is constrained to [0,2^31) or [0,2^63) depending on whether - // we're in 32-bit or 64-bit systems. So it's safe to do: - // - // if uint64(len) > cap { - // if len < 0 { panicmakeslicelen() } - // panicmakeslicecap() - // } - nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGT, typecheck.Conv(l, types.Types[types.TUINT64]), ir.NewInt(i)), nil, nil) - niflen := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLT, l, ir.NewInt(0)), nil, nil) - niflen.Body = []ir.Node{mkcall("panicmakeslicelen", nil, init)} - nif.Body.Append(niflen, mkcall("panicmakeslicecap", nil, init)) - init.Append(typecheck.Stmt(nif)) - - t = types.NewArray(t.Elem(), i) // [r]T - var_ := typecheck.Temp(t) - appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, nil)) // zero temp - r := ir.NewSliceExpr(base.Pos, ir.OSLICE, var_) // arr[:l] - r.SetSliceBounds(nil, l, nil) - // The conv is necessary in case n.Type is named. - return walkexpr(typecheck.Expr(typecheck.Conv(r, n.Type())), init) - } - - // n escapes; set up a call to makeslice. - // When len and cap can fit into int, use makeslice instead of - // makeslice64, which is faster and shorter on 32 bit platforms. - - len, cap := l, r - - fnname := "makeslice64" - argtype := types.Types[types.TINT64] - - // Type checking guarantees that TIDEAL len/cap are positive and fit in an int. - // The case of len or cap overflow when converting TUINT or TUINTPTR to TINT - // will be handled by the negative range checks in makeslice during runtime. - if (len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size()) && - (cap.Type().IsKind(types.TIDEAL) || cap.Type().Size() <= types.Types[types.TUINT].Size()) { - fnname = "makeslice" - argtype = types.Types[types.TINT] - } - - m := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil) - m.SetType(t) - - fn := typecheck.LookupRuntime(fnname) - m.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), typecheck.Conv(len, argtype), typecheck.Conv(cap, argtype)) - m.Ptr.MarkNonNil() - m.LenCap = []ir.Node{typecheck.Conv(len, types.Types[types.TINT]), typecheck.Conv(cap, types.Types[types.TINT])} - return walkexpr(typecheck.Expr(m), init) - - case ir.OMAKESLICECOPY: - n := n.(*ir.MakeExpr) - if n.Esc() == ir.EscNone { - base.Fatalf("OMAKESLICECOPY with EscNone: %v", n) - } - - t := n.Type() - if t.Elem().NotInHeap() { - base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem()) - } - - length := typecheck.Conv(n.Len, types.Types[types.TINT]) - copylen := ir.NewUnaryExpr(base.Pos, ir.OLEN, n.Cap) - copyptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, n.Cap) - - if !t.Elem().HasPointers() && n.Bounded() { - // When len(to)==len(from) and elements have no pointers: - // replace make+copy with runtime.mallocgc+runtime.memmove. - - // We do not check for overflow of len(to)*elem.Width here - // since len(from) is an existing checked slice capacity - // with same elem.Width for the from slice. - size := ir.NewBinaryExpr(base.Pos, ir.OMUL, typecheck.Conv(length, types.Types[types.TUINTPTR]), typecheck.Conv(ir.NewInt(t.Elem().Width), types.Types[types.TUINTPTR])) - - // instantiate mallocgc(size uintptr, typ *byte, needszero bool) unsafe.Pointer - fn := typecheck.LookupRuntime("mallocgc") - sh := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil) - sh.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, typecheck.NodNil(), ir.NewBool(false)) - sh.Ptr.MarkNonNil() - sh.LenCap = []ir.Node{length, length} - sh.SetType(t) - - s := typecheck.Temp(t) - r := typecheck.Stmt(ir.NewAssignStmt(base.Pos, s, sh)) - r = walkexpr(r, init) - init.Append(r) - - // instantiate memmove(to *any, frm *any, size uintptr) - fn = typecheck.LookupRuntime("memmove") - fn = typecheck.SubstArgTypes(fn, t.Elem(), t.Elem()) - ncopy := mkcall1(fn, nil, init, ir.NewUnaryExpr(base.Pos, ir.OSPTR, s), copyptr, size) - init.Append(walkexpr(typecheck.Stmt(ncopy), init)) - - return s - } - // Replace make+copy with runtime.makeslicecopy. - // instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer - fn := typecheck.LookupRuntime("makeslicecopy") - s := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil) - s.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), length, copylen, typecheck.Conv(copyptr, types.Types[types.TUNSAFEPTR])) - s.Ptr.MarkNonNil() - s.LenCap = []ir.Node{length, length} - s.SetType(t) - return walkexpr(typecheck.Expr(s), init) - - case ir.ORUNESTR: - n := n.(*ir.ConvExpr) - a := typecheck.NodNil() - if n.Esc() == ir.EscNone { - t := types.NewArray(types.Types[types.TUINT8], 4) - a = typecheck.NodAddr(typecheck.Temp(t)) - } - // intstring(*[4]byte, rune) - return mkcall("intstring", n.Type(), init, a, typecheck.Conv(n.X, types.Types[types.TINT64])) - - case ir.OBYTES2STR, ir.ORUNES2STR: - n := n.(*ir.ConvExpr) - a := typecheck.NodNil() - if n.Esc() == ir.EscNone { - // Create temporary buffer for string on stack. - t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize) - a = typecheck.NodAddr(typecheck.Temp(t)) - } - if n.Op() == ir.ORUNES2STR { - // slicerunetostring(*[32]byte, []rune) string - return mkcall("slicerunetostring", n.Type(), init, a, n.X) - } - // slicebytetostring(*[32]byte, ptr *byte, n int) string - n.X = cheapexpr(n.X, init) - ptr, len := backingArrayPtrLen(n.X) - return mkcall("slicebytetostring", n.Type(), init, a, ptr, len) - - case ir.OBYTES2STRTMP: - n := n.(*ir.ConvExpr) - n.X = walkexpr(n.X, init) - if !base.Flag.Cfg.Instrumenting { - // Let the backend handle OBYTES2STRTMP directly - // to avoid a function call to slicebytetostringtmp. - return n - } - // slicebytetostringtmp(ptr *byte, n int) string - n.X = cheapexpr(n.X, init) - ptr, len := backingArrayPtrLen(n.X) - return mkcall("slicebytetostringtmp", n.Type(), init, ptr, len) - - case ir.OSTR2BYTES: - n := n.(*ir.ConvExpr) - s := n.X - if ir.IsConst(s, constant.String) { - sc := ir.StringVal(s) - - // Allocate a [n]byte of the right size. - t := types.NewArray(types.Types[types.TUINT8], int64(len(sc))) - var a ir.Node - if n.Esc() == ir.EscNone && len(sc) <= int(ir.MaxImplicitStackVarSize) { - a = typecheck.NodAddr(typecheck.Temp(t)) - } else { - a = callnew(t) - } - p := typecheck.Temp(t.PtrTo()) // *[n]byte - init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, p, a))) - - // Copy from the static string data to the [n]byte. - if len(sc) > 0 { - as := ir.NewAssignStmt(base.Pos, ir.NewStarExpr(base.Pos, p), ir.NewStarExpr(base.Pos, typecheck.ConvNop(ir.NewUnaryExpr(base.Pos, ir.OSPTR, s), t.PtrTo()))) - appendWalkStmt(init, as) - } - - // Slice the [n]byte to a []byte. - slice := ir.NewSliceExpr(n.Pos(), ir.OSLICEARR, p) - slice.SetType(n.Type()) - slice.SetTypecheck(1) - return walkexpr(slice, init) - } - - a := typecheck.NodNil() - if n.Esc() == ir.EscNone { - // Create temporary buffer for slice on stack. - t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize) - a = typecheck.NodAddr(typecheck.Temp(t)) - } - // stringtoslicebyte(*32[byte], string) []byte - return mkcall("stringtoslicebyte", n.Type(), init, a, typecheck.Conv(s, types.Types[types.TSTRING])) - - case ir.OSTR2BYTESTMP: - // []byte(string) conversion that creates a slice - // referring to the actual string bytes. - // This conversion is handled later by the backend and - // is only for use by internal compiler optimizations - // that know that the slice won't be mutated. - // The only such case today is: - // for i, c := range []byte(string) - n := n.(*ir.ConvExpr) - n.X = walkexpr(n.X, init) - return n - - case ir.OSTR2RUNES: - n := n.(*ir.ConvExpr) - a := typecheck.NodNil() - if n.Esc() == ir.EscNone { - // Create temporary buffer for slice on stack. - t := types.NewArray(types.Types[types.TINT32], tmpstringbufsize) - a = typecheck.NodAddr(typecheck.Temp(t)) - } - // stringtoslicerune(*[32]rune, string) []rune - return mkcall("stringtoslicerune", n.Type(), init, a, typecheck.Conv(n.X, types.Types[types.TSTRING])) - - case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT, ir.OPTRLIT: - if isStaticCompositeLiteral(n) && !ssagen.TypeOK(n.Type()) { - n := n.(*ir.CompLitExpr) // not OPTRLIT - // n can be directly represented in the read-only data section. - // Make direct reference to the static data. See issue 12841. - vstat := readonlystaticname(n.Type()) - fixedlit(inInitFunction, initKindStatic, n, vstat, init) - return typecheck.Expr(vstat) - } - var_ := typecheck.Temp(n.Type()) - anylit(n, var_, init) - return var_ - - case ir.OSEND: - n := n.(*ir.SendStmt) - n1 := n.Value - n1 = typecheck.AssignConv(n1, n.Chan.Type().Elem(), "chan send") - n1 = walkexpr(n1, init) - n1 = typecheck.NodAddr(n1) - return mkcall1(chanfn("chansend1", 2, n.Chan.Type()), nil, init, n.Chan, n1) - - case ir.OCLOSURE: - return walkclosure(n.(*ir.ClosureExpr), init) - - case ir.OCALLPART: - return walkpartialcall(n.(*ir.CallPartExpr), init) - } - - // No return! Each case must return (or panic), - // to avoid confusion about what gets returned - // in the presence of type assertions. -} - -// rtconvfn returns the parameter and result types that will be used by a -// runtime function to convert from type src to type dst. The runtime function -// name can be derived from the names of the returned types. -// -// If no such function is necessary, it returns (Txxx, Txxx). -func rtconvfn(src, dst *types.Type) (param, result types.Kind) { - if ssagen.Arch.SoftFloat { - return types.Txxx, types.Txxx - } - - switch ssagen.Arch.LinkArch.Family { - case sys.ARM, sys.MIPS: - if src.IsFloat() { - switch dst.Kind() { - case types.TINT64, types.TUINT64: - return types.TFLOAT64, dst.Kind() - } - } - if dst.IsFloat() { - switch src.Kind() { - case types.TINT64, types.TUINT64: - return src.Kind(), types.TFLOAT64 - } - } - - case sys.I386: - if src.IsFloat() { - switch dst.Kind() { - case types.TINT64, types.TUINT64: - return types.TFLOAT64, dst.Kind() - case types.TUINT32, types.TUINT, types.TUINTPTR: - return types.TFLOAT64, types.TUINT32 - } - } - if dst.IsFloat() { - switch src.Kind() { - case types.TINT64, types.TUINT64: - return src.Kind(), types.TFLOAT64 - case types.TUINT32, types.TUINT, types.TUINTPTR: - return types.TUINT32, types.TFLOAT64 - } - } - } - return types.Txxx, types.Txxx -} - -// TODO(josharian): combine this with its caller and simplify -func reduceSlice(n *ir.SliceExpr) ir.Node { - low, high, max := n.SliceBounds() - if high != nil && high.Op() == ir.OLEN && ir.SameSafeExpr(n.X, high.(*ir.UnaryExpr).X) { - // Reduce x[i:len(x)] to x[i:]. - high = nil - } - n.SetSliceBounds(low, high, max) - if (n.Op() == ir.OSLICE || n.Op() == ir.OSLICESTR) && low == nil && high == nil { - // Reduce x[:] to x. - if base.Debug.Slice > 0 { - base.Warn("slice: omit slice operation") - } - return n.X - } - return n -} - -func ascompatee1(l ir.Node, r ir.Node, init *ir.Nodes) *ir.AssignStmt { - // convas will turn map assigns into function calls, - // making it impossible for reorder3 to work. - n := ir.NewAssignStmt(base.Pos, l, r) - - if l.Op() == ir.OINDEXMAP { - return n - } - - return convas(n, init) -} - -func ascompatee(op ir.Op, nl, nr []ir.Node, init *ir.Nodes) []ir.Node { - // check assign expression list to - // an expression list. called in - // expr-list = expr-list - - // ensure order of evaluation for function calls - for i := range nl { - nl[i] = safeexpr(nl[i], init) - } - for i1 := range nr { - nr[i1] = safeexpr(nr[i1], init) - } - - var nn []*ir.AssignStmt - i := 0 - for ; i < len(nl); i++ { - if i >= len(nr) { - break - } - // Do not generate 'x = x' during return. See issue 4014. - if op == ir.ORETURN && ir.SameSafeExpr(nl[i], nr[i]) { - continue - } - nn = append(nn, ascompatee1(nl[i], nr[i], init)) - } - - // cannot happen: caller checked that lists had same length - if i < len(nl) || i < len(nr) { - var nln, nrn ir.Nodes - nln.Set(nl) - nrn.Set(nr) - base.Fatalf("error in shape across %+v %v %+v / %d %d [%s]", nln, op, nrn, len(nl), len(nr), ir.FuncName(ir.CurFunc)) - } - return reorder3(nn) -} - -// fncall reports whether assigning an rvalue of type rt to an lvalue l might involve a function call. -func fncall(l ir.Node, rt *types.Type) bool { - if l.HasCall() || l.Op() == ir.OINDEXMAP { - return true - } - if types.Identical(l.Type(), rt) { - return false - } - // There might be a conversion required, which might involve a runtime call. - return true -} - -// check assign type list to -// an expression list. called in -// expr-list = func() -func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node { - if len(nl) != nr.NumFields() { - base.Fatalf("ascompatet: assignment count mismatch: %d = %d", len(nl), nr.NumFields()) - } - - var nn, mm ir.Nodes - for i, l := range nl { - if ir.IsBlank(l) { - continue - } - r := nr.Field(i) - - // Any assignment to an lvalue that might cause a function call must be - // deferred until all the returned values have been read. - if fncall(l, r.Type) { - tmp := ir.Node(typecheck.Temp(r.Type)) - tmp = typecheck.Expr(tmp) - a := convas(ir.NewAssignStmt(base.Pos, l, tmp), &mm) - mm.Append(a) - l = tmp - } - - res := ir.NewResultExpr(base.Pos, nil, types.BADWIDTH) - res.Offset = base.Ctxt.FixedFrameSize() + r.Offset - res.SetType(r.Type) - res.SetTypecheck(1) - - a := convas(ir.NewAssignStmt(base.Pos, l, res), &nn) - updateHasCall(a) - if a.HasCall() { - ir.Dump("ascompatet ucount", a) - base.Fatalf("ascompatet: too many function calls evaluating parameters") - } - - nn.Append(a) - } - return append(nn, mm...) -} - -func walkCall(n *ir.CallExpr, init *ir.Nodes) { - if len(n.Rargs) != 0 { - return // already walked - } - - params := n.X.Type().Params() - args := n.Args - - n.X = walkexpr(n.X, init) - walkexprlist(args, init) - - // If this is a method call, add the receiver at the beginning of the args. - if n.Op() == ir.OCALLMETH { - withRecv := make([]ir.Node, len(args)+1) - dot := n.X.(*ir.SelectorExpr) - withRecv[0] = dot.X - dot.X = nil - copy(withRecv[1:], args) - args = withRecv - } - - // For any argument whose evaluation might require a function call, - // store that argument into a temporary variable, - // to prevent that calls from clobbering arguments already on the stack. - // When instrumenting, all arguments might require function calls. - var tempAssigns []ir.Node - for i, arg := range args { - updateHasCall(arg) - // Determine param type. - var t *types.Type - if n.Op() == ir.OCALLMETH { - if i == 0 { - t = n.X.Type().Recv().Type - } else { - t = params.Field(i - 1).Type - } - } else { - t = params.Field(i).Type - } - if base.Flag.Cfg.Instrumenting || fncall(arg, t) { - // make assignment of fncall to tempAt - tmp := typecheck.Temp(t) - a := convas(ir.NewAssignStmt(base.Pos, tmp, arg), init) - tempAssigns = append(tempAssigns, a) - // replace arg with temp - args[i] = tmp - } - } - - n.Args.Set(tempAssigns) - n.Rargs.Set(args) -} - -// generate code for print -func walkprint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { - // Hoist all the argument evaluation up before the lock. - walkexprlistcheap(nn.Args, init) - - // For println, add " " between elements and "\n" at the end. - if nn.Op() == ir.OPRINTN { - s := nn.Args - t := make([]ir.Node, 0, len(s)*2) - for i, n := range s { - if i != 0 { - t = append(t, ir.NewString(" ")) - } - t = append(t, n) - } - t = append(t, ir.NewString("\n")) - nn.Args.Set(t) - } - - // Collapse runs of constant strings. - s := nn.Args - t := make([]ir.Node, 0, len(s)) - for i := 0; i < len(s); { - var strs []string - for i < len(s) && ir.IsConst(s[i], constant.String) { - strs = append(strs, ir.StringVal(s[i])) - i++ - } - if len(strs) > 0 { - t = append(t, ir.NewString(strings.Join(strs, ""))) - } - if i < len(s) { - t = append(t, s[i]) - i++ - } - } - nn.Args.Set(t) - - calls := []ir.Node{mkcall("printlock", nil, init)} - for i, n := range nn.Args { - if n.Op() == ir.OLITERAL { - if n.Type() == types.UntypedRune { - n = typecheck.DefaultLit(n, types.RuneType) - } - - switch n.Val().Kind() { - case constant.Int: - n = typecheck.DefaultLit(n, types.Types[types.TINT64]) - - case constant.Float: - n = typecheck.DefaultLit(n, types.Types[types.TFLOAT64]) - } - } - - if n.Op() != ir.OLITERAL && n.Type() != nil && n.Type().Kind() == types.TIDEAL { - n = typecheck.DefaultLit(n, types.Types[types.TINT64]) - } - n = typecheck.DefaultLit(n, nil) - nn.Args[i] = n - if n.Type() == nil || n.Type().Kind() == types.TFORW { - continue - } - - var on *ir.Name - switch n.Type().Kind() { - case types.TINTER: - if n.Type().IsEmptyInterface() { - on = typecheck.LookupRuntime("printeface") - } else { - on = typecheck.LookupRuntime("printiface") - } - on = typecheck.SubstArgTypes(on, n.Type()) // any-1 - case types.TPTR: - if n.Type().Elem().NotInHeap() { - on = typecheck.LookupRuntime("printuintptr") - n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n) - n.SetType(types.Types[types.TUNSAFEPTR]) - n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n) - n.SetType(types.Types[types.TUINTPTR]) - break - } - fallthrough - case types.TCHAN, types.TMAP, types.TFUNC, types.TUNSAFEPTR: - on = typecheck.LookupRuntime("printpointer") - on = typecheck.SubstArgTypes(on, n.Type()) // any-1 - case types.TSLICE: - on = typecheck.LookupRuntime("printslice") - on = typecheck.SubstArgTypes(on, n.Type()) // any-1 - case types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR: - if types.IsRuntimePkg(n.Type().Sym().Pkg) && n.Type().Sym().Name == "hex" { - on = typecheck.LookupRuntime("printhex") - } else { - on = typecheck.LookupRuntime("printuint") - } - case types.TINT, types.TINT8, types.TINT16, types.TINT32, types.TINT64: - on = typecheck.LookupRuntime("printint") - case types.TFLOAT32, types.TFLOAT64: - on = typecheck.LookupRuntime("printfloat") - case types.TCOMPLEX64, types.TCOMPLEX128: - on = typecheck.LookupRuntime("printcomplex") - case types.TBOOL: - on = typecheck.LookupRuntime("printbool") - case types.TSTRING: - cs := "" - if ir.IsConst(n, constant.String) { - cs = ir.StringVal(n) - } - switch cs { - case " ": - on = typecheck.LookupRuntime("printsp") - case "\n": - on = typecheck.LookupRuntime("printnl") - default: - on = typecheck.LookupRuntime("printstring") - } - default: - badtype(ir.OPRINT, n.Type(), nil) - continue - } - - r := ir.NewCallExpr(base.Pos, ir.OCALL, on, nil) - if params := on.Type().Params().FieldSlice(); len(params) > 0 { - t := params[0].Type - if !types.Identical(t, n.Type()) { - n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n) - n.SetType(t) - } - r.Args.Append(n) - } - calls = append(calls, r) - } - - calls = append(calls, mkcall("printunlock", nil, init)) - - typecheck.Stmts(calls) - walkexprlist(calls, init) - - r := ir.NewBlockStmt(base.Pos, nil) - r.List.Set(calls) - return walkstmt(typecheck.Stmt(r)) -} - -func callnew(t *types.Type) ir.Node { - types.CalcSize(t) - n := ir.NewUnaryExpr(base.Pos, ir.ONEWOBJ, reflectdata.TypePtr(t)) - n.SetType(types.NewPtr(t)) - n.SetTypecheck(1) - n.MarkNonNil() - return n -} - -func convas(n *ir.AssignStmt, init *ir.Nodes) *ir.AssignStmt { - if n.Op() != ir.OAS { - base.Fatalf("convas: not OAS %v", n.Op()) - } - defer updateHasCall(n) - - n.SetTypecheck(1) - - if n.X == nil || n.Y == nil { - return n - } - - lt := n.X.Type() - rt := n.Y.Type() - if lt == nil || rt == nil { - return n - } - - if ir.IsBlank(n.X) { - n.Y = typecheck.DefaultLit(n.Y, nil) - return n - } - - if !types.Identical(lt, rt) { - n.Y = typecheck.AssignConv(n.Y, lt, "assignment") - n.Y = walkexpr(n.Y, init) - } - types.CalcSize(n.Y.Type()) - - return n -} - -// reorder3 -// from ascompatee -// a,b = c,d -// simultaneous assignment. there cannot -// be later use of an earlier lvalue. -// -// function calls have been removed. -func reorder3(all []*ir.AssignStmt) []ir.Node { - // If a needed expression may be affected by an - // earlier assignment, make an early copy of that - // expression and use the copy instead. - var early []ir.Node - - var mapinit ir.Nodes - for i, n := range all { - l := n.X - - // Save subexpressions needed on left side. - // Drill through non-dereferences. - for { - switch ll := l; ll.Op() { - case ir.ODOT: - ll := ll.(*ir.SelectorExpr) - l = ll.X - continue - case ir.OPAREN: - ll := ll.(*ir.ParenExpr) - l = ll.X - continue - case ir.OINDEX: - ll := ll.(*ir.IndexExpr) - if ll.X.Type().IsArray() { - ll.Index = reorder3save(ll.Index, all, i, &early) - l = ll.X - continue - } - } - break - } - - switch l.Op() { - default: - base.Fatalf("reorder3 unexpected lvalue %v", l.Op()) - - case ir.ONAME: - break - - case ir.OINDEX, ir.OINDEXMAP: - l := l.(*ir.IndexExpr) - l.X = reorder3save(l.X, all, i, &early) - l.Index = reorder3save(l.Index, all, i, &early) - if l.Op() == ir.OINDEXMAP { - all[i] = convas(all[i], &mapinit) - } - - case ir.ODEREF: - l := l.(*ir.StarExpr) - l.X = reorder3save(l.X, all, i, &early) - case ir.ODOTPTR: - l := l.(*ir.SelectorExpr) - l.X = reorder3save(l.X, all, i, &early) - } - - // Save expression on right side. - all[i].Y = reorder3save(all[i].Y, all, i, &early) - } - - early = append(mapinit, early...) - for _, as := range all { - early = append(early, as) - } - return early -} - -// if the evaluation of *np would be affected by the -// assignments in all up to but not including the ith assignment, -// copy into a temporary during *early and -// replace *np with that temp. -// The result of reorder3save MUST be assigned back to n, e.g. -// n.Left = reorder3save(n.Left, all, i, early) -func reorder3save(n ir.Node, all []*ir.AssignStmt, i int, early *[]ir.Node) ir.Node { - if !aliased(n, all[:i]) { - return n - } - - q := ir.Node(typecheck.Temp(n.Type())) - as := typecheck.Stmt(ir.NewAssignStmt(base.Pos, q, n)) - *early = append(*early, as) - return q -} - -// Is it possible that the computation of r might be -// affected by assignments in all? -func aliased(r ir.Node, all []*ir.AssignStmt) bool { - if r == nil { - return false - } - - // Treat all fields of a struct as referring to the whole struct. - // We could do better but we would have to keep track of the fields. - for r.Op() == ir.ODOT { - r = r.(*ir.SelectorExpr).X - } - - // Look for obvious aliasing: a variable being assigned - // during the all list and appearing in n. - // Also record whether there are any writes to addressable - // memory (either main memory or variables whose addresses - // have been taken). - memwrite := false - for _, as := range all { - // We can ignore assignments to blank. - if ir.IsBlank(as.X) { - continue - } - - lv := ir.OuterValue(as.X) - if lv.Op() != ir.ONAME { - memwrite = true - continue - } - l := lv.(*ir.Name) - - switch l.Class_ { - default: - base.Fatalf("unexpected class: %v, %v", l, l.Class_) - - case ir.PAUTOHEAP, ir.PEXTERN: - memwrite = true - continue - - case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT: - if l.Name().Addrtaken() { - memwrite = true - continue - } - - if refersToName(l, r) { - // Direct hit: l appears in r. - return true - } - } - } - - // The variables being written do not appear in r. - // However, r might refer to computed addresses - // that are being written. - - // If no computed addresses are affected by the writes, no aliasing. - if !memwrite { - return false - } - - // If r does not refer to any variables whose addresses have been taken, - // then the only possible writes to r would be directly to the variables, - // and we checked those above, so no aliasing problems. - if !anyAddrTaken(r) { - return false - } - - // Otherwise, both the writes and r refer to computed memory addresses. - // Assume that they might conflict. - return true -} - -// anyAddrTaken reports whether the evaluation n, -// which appears on the left side of an assignment, -// may refer to variables whose addresses have been taken. -func anyAddrTaken(n ir.Node) bool { - return ir.Any(n, func(n ir.Node) bool { - switch n.Op() { - case ir.ONAME: - n := n.(*ir.Name) - return n.Class_ == ir.PEXTERN || n.Class_ == ir.PAUTOHEAP || n.Name().Addrtaken() - - case ir.ODOT: // but not ODOTPTR - should have been handled in aliased. - base.Fatalf("anyAddrTaken unexpected ODOT") - - case ir.OADD, - ir.OAND, - ir.OANDAND, - ir.OANDNOT, - ir.OBITNOT, - ir.OCONV, - ir.OCONVIFACE, - ir.OCONVNOP, - ir.ODIV, - ir.ODOTTYPE, - ir.OLITERAL, - ir.OLSH, - ir.OMOD, - ir.OMUL, - ir.ONEG, - ir.ONIL, - ir.OOR, - ir.OOROR, - ir.OPAREN, - ir.OPLUS, - ir.ORSH, - ir.OSUB, - ir.OXOR: - return false - } - // Be conservative. - return true - }) -} - -// refersToName reports whether r refers to name. -func refersToName(name *ir.Name, r ir.Node) bool { - return ir.Any(r, func(r ir.Node) bool { - return r.Op() == ir.ONAME && r == name - }) -} - -var stop = errors.New("stop") - -// refersToCommonName reports whether any name -// appears in common between l and r. -// This is called from sinit.go. -func refersToCommonName(l ir.Node, r ir.Node) bool { - if l == nil || r == nil { - return false - } - - // This could be written elegantly as a Find nested inside a Find: - // - // found := ir.Find(l, func(l ir.Node) interface{} { - // if l.Op() == ir.ONAME { - // return ir.Find(r, func(r ir.Node) interface{} { - // if r.Op() == ir.ONAME && l.Name() == r.Name() { - // return r - // } - // return nil - // }) - // } - // return nil - // }) - // return found != nil - // - // But that would allocate a new closure for the inner Find - // for each name found on the left side. - // It may not matter at all, but the below way of writing it - // only allocates two closures, not O(|L|) closures. - - var doL, doR func(ir.Node) error - var targetL *ir.Name - doR = func(r ir.Node) error { - if r.Op() == ir.ONAME && r.Name() == targetL { - return stop - } - return ir.DoChildren(r, doR) - } - doL = func(l ir.Node) error { - if l.Op() == ir.ONAME { - l := l.(*ir.Name) - targetL = l.Name() - if doR(r) == stop { - return stop - } - } - return ir.DoChildren(l, doL) - } - return doL(l) == stop -} - -// paramstoheap returns code to allocate memory for heap-escaped parameters -// and to copy non-result parameters' values from the stack. -func paramstoheap(params *types.Type) []ir.Node { - var nn []ir.Node - for _, t := range params.Fields().Slice() { - v := ir.AsNode(t.Nname) - if v != nil && v.Sym() != nil && strings.HasPrefix(v.Sym().Name, "~r") { // unnamed result - v = nil - } - if v == nil { - continue - } - - if stackcopy := v.Name().Stackcopy; stackcopy != nil { - nn = append(nn, walkstmt(ir.NewDecl(base.Pos, ir.ODCL, v))) - if stackcopy.Class_ == ir.PPARAM { - nn = append(nn, walkstmt(typecheck.Stmt(ir.NewAssignStmt(base.Pos, v, stackcopy)))) - } - } - } - - return nn -} - -// zeroResults zeros the return values at the start of the function. -// We need to do this very early in the function. Defer might stop a -// panic and show the return values as they exist at the time of -// panic. For precise stacks, the garbage collector assumes results -// are always live, so we need to zero them before any allocations, -// even allocations to move params/results to the heap. -// The generated code is added to Curfn's Enter list. -func zeroResults() { - for _, f := range ir.CurFunc.Type().Results().Fields().Slice() { - v := ir.AsNode(f.Nname) - if v != nil && v.Name().Heapaddr != nil { - // The local which points to the return value is the - // thing that needs zeroing. This is already handled - // by a Needzero annotation in plive.go:livenessepilogue. - continue - } - if ir.IsParamHeapCopy(v) { - // TODO(josharian/khr): Investigate whether we can switch to "continue" here, - // and document more in either case. - // In the review of CL 114797, Keith wrote (roughly): - // I don't think the zeroing below matters. - // The stack return value will never be marked as live anywhere in the function. - // It is not written to until deferreturn returns. - v = v.Name().Stackcopy - } - // Zero the stack location containing f. - ir.CurFunc.Enter.Append(ir.NewAssignStmt(ir.CurFunc.Pos(), v, nil)) - } -} - -// returnsfromheap returns code to copy values for heap-escaped parameters -// back to the stack. -func returnsfromheap(params *types.Type) []ir.Node { - var nn []ir.Node - for _, t := range params.Fields().Slice() { - v := ir.AsNode(t.Nname) - if v == nil { - continue - } - if stackcopy := v.Name().Stackcopy; stackcopy != nil && stackcopy.Class_ == ir.PPARAMOUT { - nn = append(nn, walkstmt(typecheck.Stmt(ir.NewAssignStmt(base.Pos, stackcopy, v)))) - } - } - - return nn -} - -// heapmoves generates code to handle migrating heap-escaped parameters -// between the stack and the heap. The generated code is added to Curfn's -// Enter and Exit lists. -func heapmoves() { - lno := base.Pos - base.Pos = ir.CurFunc.Pos() - nn := paramstoheap(ir.CurFunc.Type().Recvs()) - nn = append(nn, paramstoheap(ir.CurFunc.Type().Params())...) - nn = append(nn, paramstoheap(ir.CurFunc.Type().Results())...) - ir.CurFunc.Enter.Append(nn...) - base.Pos = ir.CurFunc.Endlineno - ir.CurFunc.Exit.Append(returnsfromheap(ir.CurFunc.Type().Results())...) - base.Pos = lno -} - -func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) *ir.CallExpr { - if fn.Type() == nil || fn.Type().Kind() != types.TFUNC { - base.Fatalf("mkcall %v %v", fn, fn.Type()) - } - - n := fn.Type().NumParams() - if n != len(va) { - base.Fatalf("vmkcall %v needs %v args got %v", fn, n, len(va)) - } - - call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, va) - typecheck.Call(call) - call.SetType(t) - return walkexpr(call, init).(*ir.CallExpr) -} - -func mkcall(name string, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr { - return vmkcall(typecheck.LookupRuntime(name), t, init, args) -} - -func mkcall1(fn ir.Node, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr { - return vmkcall(fn, t, init, args) -} - -// byteindex converts n, which is byte-sized, to an int used to index into an array. -// We cannot use conv, because we allow converting bool to int here, -// which is forbidden in user code. -func byteindex(n ir.Node) ir.Node { - // We cannot convert from bool to int directly. - // While converting from int8 to int is possible, it would yield - // the wrong result for negative values. - // Reinterpreting the value as an unsigned byte solves both cases. - if !types.Identical(n.Type(), types.Types[types.TUINT8]) { - n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n) - n.SetType(types.Types[types.TUINT8]) - n.SetTypecheck(1) - } - n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n) - n.SetType(types.Types[types.TINT]) - n.SetTypecheck(1) - return n -} - -func chanfn(name string, n int, t *types.Type) ir.Node { - if !t.IsChan() { - base.Fatalf("chanfn %v", t) - } - fn := typecheck.LookupRuntime(name) - switch n { - default: - base.Fatalf("chanfn %d", n) - case 1: - fn = typecheck.SubstArgTypes(fn, t.Elem()) - case 2: - fn = typecheck.SubstArgTypes(fn, t.Elem(), t.Elem()) - } - return fn -} - -func mapfn(name string, t *types.Type) ir.Node { - if !t.IsMap() { - base.Fatalf("mapfn %v", t) - } - fn := typecheck.LookupRuntime(name) - fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), t.Key(), t.Elem()) - return fn -} - -func mapfndel(name string, t *types.Type) ir.Node { - if !t.IsMap() { - base.Fatalf("mapfn %v", t) - } - fn := typecheck.LookupRuntime(name) - fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), t.Key()) - return fn -} - -const ( - mapslow = iota - mapfast32 - mapfast32ptr - mapfast64 - mapfast64ptr - mapfaststr - nmapfast -) - -type mapnames [nmapfast]string - -func mkmapnames(base string, ptr string) mapnames { - return mapnames{base, base + "_fast32", base + "_fast32" + ptr, base + "_fast64", base + "_fast64" + ptr, base + "_faststr"} -} - -var mapaccess1 = mkmapnames("mapaccess1", "") -var mapaccess2 = mkmapnames("mapaccess2", "") -var mapassign = mkmapnames("mapassign", "ptr") -var mapdelete = mkmapnames("mapdelete", "") - -func mapfast(t *types.Type) int { - // Check runtime/map.go:maxElemSize before changing. - if t.Elem().Width > 128 { - return mapslow - } - switch reflectdata.AlgType(t.Key()) { - case types.AMEM32: - if !t.Key().HasPointers() { - return mapfast32 - } - if types.PtrSize == 4 { - return mapfast32ptr - } - base.Fatalf("small pointer %v", t.Key()) - case types.AMEM64: - if !t.Key().HasPointers() { - return mapfast64 - } - if types.PtrSize == 8 { - return mapfast64ptr - } - // Two-word object, at least one of which is a pointer. - // Use the slow path. - case types.ASTRING: - return mapfaststr - } - return mapslow -} - -func writebarrierfn(name string, l *types.Type, r *types.Type) ir.Node { - fn := typecheck.LookupRuntime(name) - fn = typecheck.SubstArgTypes(fn, l, r) - return fn -} - -func addstr(n *ir.AddStringExpr, init *ir.Nodes) ir.Node { - c := len(n.List) - - if c < 2 { - base.Fatalf("addstr count %d too small", c) - } - - buf := typecheck.NodNil() - if n.Esc() == ir.EscNone { - sz := int64(0) - for _, n1 := range n.List { - if n1.Op() == ir.OLITERAL { - sz += int64(len(ir.StringVal(n1))) - } - } - - // Don't allocate the buffer if the result won't fit. - if sz < tmpstringbufsize { - // Create temporary buffer for result string on stack. - t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize) - buf = typecheck.NodAddr(typecheck.Temp(t)) - } - } - - // build list of string arguments - args := []ir.Node{buf} - for _, n2 := range n.List { - args = append(args, typecheck.Conv(n2, types.Types[types.TSTRING])) - } - - var fn string - if c <= 5 { - // small numbers of strings use direct runtime helpers. - // note: order.expr knows this cutoff too. - fn = fmt.Sprintf("concatstring%d", c) - } else { - // large numbers of strings are passed to the runtime as a slice. - fn = "concatstrings" - - t := types.NewSlice(types.Types[types.TSTRING]) - // args[1:] to skip buf arg - slice := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(t), args[1:]) - slice.Prealloc = n.Prealloc - args = []ir.Node{buf, slice} - slice.SetEsc(ir.EscNone) - } - - cat := typecheck.LookupRuntime(fn) - r := ir.NewCallExpr(base.Pos, ir.OCALL, cat, nil) - r.Args.Set(args) - r1 := typecheck.Expr(r) - r1 = walkexpr(r1, init) - r1.SetType(n.Type()) - - return r1 -} - -func walkAppendArgs(n *ir.CallExpr, init *ir.Nodes) { - walkexprlistsafe(n.Args, init) - - // walkexprlistsafe will leave OINDEX (s[n]) alone if both s - // and n are name or literal, but those may index the slice we're - // modifying here. Fix explicitly. - ls := n.Args - for i1, n1 := range ls { - ls[i1] = cheapexpr(n1, init) - } -} - -// expand append(l1, l2...) to -// init { -// s := l1 -// n := len(s) + len(l2) -// // Compare as uint so growslice can panic on overflow. -// if uint(n) > uint(cap(s)) { -// s = growslice(s, n) -// } -// s = s[:n] -// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T)) -// } -// s -// -// l2 is allowed to be a string. -func appendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { - walkAppendArgs(n, init) - - l1 := n.Args[0] - l2 := n.Args[1] - l2 = cheapexpr(l2, init) - n.Args[1] = l2 - - var nodes ir.Nodes - - // var s []T - s := typecheck.Temp(l1.Type()) - nodes.Append(ir.NewAssignStmt(base.Pos, s, l1)) // s = l1 - - elemtype := s.Type().Elem() - - // n := len(s) + len(l2) - nn := typecheck.Temp(types.Types[types.TINT]) - nodes.Append(ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, s), ir.NewUnaryExpr(base.Pos, ir.OLEN, l2)))) - - // if uint(n) > uint(cap(s)) - nif := ir.NewIfStmt(base.Pos, nil, nil, nil) - nuint := typecheck.Conv(nn, types.Types[types.TUINT]) - scapuint := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OCAP, s), types.Types[types.TUINT]) - nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OGT, nuint, scapuint) - - // instantiate growslice(typ *type, []any, int) []any - fn := typecheck.LookupRuntime("growslice") - fn = typecheck.SubstArgTypes(fn, elemtype, elemtype) - - // s = growslice(T, s, n) - nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), reflectdata.TypePtr(elemtype), s, nn))} - nodes.Append(nif) - - // s = s[:n] - nt := ir.NewSliceExpr(base.Pos, ir.OSLICE, s) - nt.SetSliceBounds(nil, nn, nil) - nt.SetBounded(true) - nodes.Append(ir.NewAssignStmt(base.Pos, s, nt)) - - var ncopy ir.Node - if elemtype.HasPointers() { - // copy(s[len(l1):], l2) - slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, s) - slice.SetType(s.Type()) - slice.SetSliceBounds(ir.NewUnaryExpr(base.Pos, ir.OLEN, l1), nil, nil) - - ir.CurFunc.SetWBPos(n.Pos()) - - // instantiate typedslicecopy(typ *type, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int - fn := typecheck.LookupRuntime("typedslicecopy") - fn = typecheck.SubstArgTypes(fn, l1.Type().Elem(), l2.Type().Elem()) - ptr1, len1 := backingArrayPtrLen(cheapexpr(slice, &nodes)) - ptr2, len2 := backingArrayPtrLen(l2) - ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, reflectdata.TypePtr(elemtype), ptr1, len1, ptr2, len2) - } else if base.Flag.Cfg.Instrumenting && !base.Flag.CompilingRuntime { - // rely on runtime to instrument: - // copy(s[len(l1):], l2) - // l2 can be a slice or string. - slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, s) - slice.SetType(s.Type()) - slice.SetSliceBounds(ir.NewUnaryExpr(base.Pos, ir.OLEN, l1), nil, nil) - - ptr1, len1 := backingArrayPtrLen(cheapexpr(slice, &nodes)) - ptr2, len2 := backingArrayPtrLen(l2) - - fn := typecheck.LookupRuntime("slicecopy") - fn = typecheck.SubstArgTypes(fn, ptr1.Type().Elem(), ptr2.Type().Elem()) - ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, ptr1, len1, ptr2, len2, ir.NewInt(elemtype.Width)) - } else { - // memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T)) - ix := ir.NewIndexExpr(base.Pos, s, ir.NewUnaryExpr(base.Pos, ir.OLEN, l1)) - ix.SetBounded(true) - addr := typecheck.NodAddr(ix) - - sptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, l2) - - nwid := cheapexpr(typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, l2), types.Types[types.TUINTPTR]), &nodes) - nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, ir.NewInt(elemtype.Width)) - - // instantiate func memmove(to *any, frm *any, length uintptr) - fn := typecheck.LookupRuntime("memmove") - fn = typecheck.SubstArgTypes(fn, elemtype, elemtype) - ncopy = mkcall1(fn, nil, &nodes, addr, sptr, nwid) - } - ln := append(nodes, ncopy) - - typecheck.Stmts(ln) - walkstmtlist(ln) - init.Append(ln...) - return s -} - -// isAppendOfMake reports whether n is of the form append(x , make([]T, y)...). -// isAppendOfMake assumes n has already been typechecked. -func isAppendOfMake(n ir.Node) bool { - if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting { - return false - } - - if n.Typecheck() == 0 { - base.Fatalf("missing typecheck: %+v", n) - } - - if n.Op() != ir.OAPPEND { - return false - } - call := n.(*ir.CallExpr) - if !call.IsDDD || len(call.Args) != 2 || call.Args[1].Op() != ir.OMAKESLICE { - return false - } - - mk := call.Args[1].(*ir.MakeExpr) - if mk.Cap != nil { - return false - } - - // y must be either an integer constant or the largest possible positive value - // of variable y needs to fit into an uint. - - // typecheck made sure that constant arguments to make are not negative and fit into an int. - - // The care of overflow of the len argument to make will be handled by an explicit check of int(len) < 0 during runtime. - y := mk.Len - if !ir.IsConst(y, constant.Int) && y.Type().Size() > types.Types[types.TUINT].Size() { - return false - } - - return true -} - -// extendslice rewrites append(l1, make([]T, l2)...) to -// init { -// if l2 >= 0 { // Empty if block here for more meaningful node.SetLikely(true) -// } else { -// panicmakeslicelen() -// } -// s := l1 -// n := len(s) + l2 -// // Compare n and s as uint so growslice can panic on overflow of len(s) + l2. -// // cap is a positive int and n can become negative when len(s) + l2 -// // overflows int. Interpreting n when negative as uint makes it larger -// // than cap(s). growslice will check the int n arg and panic if n is -// // negative. This prevents the overflow from being undetected. -// if uint(n) > uint(cap(s)) { -// s = growslice(T, s, n) -// } -// s = s[:n] -// lptr := &l1[0] -// sptr := &s[0] -// if lptr == sptr || !T.HasPointers() { -// // growslice did not clear the whole underlying array (or did not get called) -// hp := &s[len(l1)] -// hn := l2 * sizeof(T) -// memclr(hp, hn) -// } -// } -// s -func extendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node { - // isAppendOfMake made sure all possible positive values of l2 fit into an uint. - // The case of l2 overflow when converting from e.g. uint to int is handled by an explicit - // check of l2 < 0 at runtime which is generated below. - l2 := typecheck.Conv(n.Args[1].(*ir.MakeExpr).Len, types.Types[types.TINT]) - l2 = typecheck.Expr(l2) - n.Args[1] = l2 // walkAppendArgs expects l2 in n.List.Second(). - - walkAppendArgs(n, init) - - l1 := n.Args[0] - l2 = n.Args[1] // re-read l2, as it may have been updated by walkAppendArgs - - var nodes []ir.Node - - // if l2 >= 0 (likely happens), do nothing - nifneg := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGE, l2, ir.NewInt(0)), nil, nil) - nifneg.Likely = true - - // else panicmakeslicelen() - nifneg.Else = []ir.Node{mkcall("panicmakeslicelen", nil, init)} - nodes = append(nodes, nifneg) - - // s := l1 - s := typecheck.Temp(l1.Type()) - nodes = append(nodes, ir.NewAssignStmt(base.Pos, s, l1)) - - elemtype := s.Type().Elem() - - // n := len(s) + l2 - nn := typecheck.Temp(types.Types[types.TINT]) - nodes = append(nodes, ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, s), l2))) - - // if uint(n) > uint(cap(s)) - nuint := typecheck.Conv(nn, types.Types[types.TUINT]) - capuint := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OCAP, s), types.Types[types.TUINT]) - nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGT, nuint, capuint), nil, nil) - - // instantiate growslice(typ *type, old []any, newcap int) []any - fn := typecheck.LookupRuntime("growslice") - fn = typecheck.SubstArgTypes(fn, elemtype, elemtype) - - // s = growslice(T, s, n) - nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), reflectdata.TypePtr(elemtype), s, nn))} - nodes = append(nodes, nif) - - // s = s[:n] - nt := ir.NewSliceExpr(base.Pos, ir.OSLICE, s) - nt.SetSliceBounds(nil, nn, nil) - nt.SetBounded(true) - nodes = append(nodes, ir.NewAssignStmt(base.Pos, s, nt)) - - // lptr := &l1[0] - l1ptr := typecheck.Temp(l1.Type().Elem().PtrTo()) - tmp := ir.NewUnaryExpr(base.Pos, ir.OSPTR, l1) - nodes = append(nodes, ir.NewAssignStmt(base.Pos, l1ptr, tmp)) - - // sptr := &s[0] - sptr := typecheck.Temp(elemtype.PtrTo()) - tmp = ir.NewUnaryExpr(base.Pos, ir.OSPTR, s) - nodes = append(nodes, ir.NewAssignStmt(base.Pos, sptr, tmp)) - - // hp := &s[len(l1)] - ix := ir.NewIndexExpr(base.Pos, s, ir.NewUnaryExpr(base.Pos, ir.OLEN, l1)) - ix.SetBounded(true) - hp := typecheck.ConvNop(typecheck.NodAddr(ix), types.Types[types.TUNSAFEPTR]) - - // hn := l2 * sizeof(elem(s)) - hn := typecheck.Conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, l2, ir.NewInt(elemtype.Width)), types.Types[types.TUINTPTR]) - - clrname := "memclrNoHeapPointers" - hasPointers := elemtype.HasPointers() - if hasPointers { - clrname = "memclrHasPointers" - ir.CurFunc.SetWBPos(n.Pos()) - } - - var clr ir.Nodes - clrfn := mkcall(clrname, nil, &clr, hp, hn) - clr.Append(clrfn) - - if hasPointers { - // if l1ptr == sptr - nifclr := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OEQ, l1ptr, sptr), nil, nil) - nifclr.Body = clr - nodes = append(nodes, nifclr) - } else { - nodes = append(nodes, clr...) - } - - typecheck.Stmts(nodes) - walkstmtlist(nodes) - init.Append(nodes...) - return s -} - -// Rewrite append(src, x, y, z) so that any side effects in -// x, y, z (including runtime panics) are evaluated in -// initialization statements before the append. -// For normal code generation, stop there and leave the -// rest to cgen_append. -// -// For race detector, expand append(src, a [, b]* ) to -// -// init { -// s := src -// const argc = len(args) - 1 -// if cap(s) - len(s) < argc { -// s = growslice(s, len(s)+argc) -// } -// n := len(s) -// s = s[:n+argc] -// s[n] = a -// s[n+1] = b -// ... -// } -// s -func walkappend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node { - if !ir.SameSafeExpr(dst, n.Args[0]) { - n.Args[0] = safeexpr(n.Args[0], init) - n.Args[0] = walkexpr(n.Args[0], init) - } - walkexprlistsafe(n.Args[1:], init) - - nsrc := n.Args[0] - - // walkexprlistsafe will leave OINDEX (s[n]) alone if both s - // and n are name or literal, but those may index the slice we're - // modifying here. Fix explicitly. - // Using cheapexpr also makes sure that the evaluation - // of all arguments (and especially any panics) happen - // before we begin to modify the slice in a visible way. - ls := n.Args[1:] - for i, n := range ls { - n = cheapexpr(n, init) - if !types.Identical(n.Type(), nsrc.Type().Elem()) { - n = typecheck.AssignConv(n, nsrc.Type().Elem(), "append") - n = walkexpr(n, init) - } - ls[i] = n - } - - argc := len(n.Args) - 1 - if argc < 1 { - return nsrc - } - - // General case, with no function calls left as arguments. - // Leave for gen, except that instrumentation requires old form. - if !base.Flag.Cfg.Instrumenting || base.Flag.CompilingRuntime { - return n - } - - var l []ir.Node - - ns := typecheck.Temp(nsrc.Type()) - l = append(l, ir.NewAssignStmt(base.Pos, ns, nsrc)) // s = src - - na := ir.NewInt(int64(argc)) // const argc - nif := ir.NewIfStmt(base.Pos, nil, nil, nil) // if cap(s) - len(s) < argc - nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, ir.NewBinaryExpr(base.Pos, ir.OSUB, ir.NewUnaryExpr(base.Pos, ir.OCAP, ns), ir.NewUnaryExpr(base.Pos, ir.OLEN, ns)), na) - - fn := typecheck.LookupRuntime("growslice") // growslice(, old []T, mincap int) (ret []T) - fn = typecheck.SubstArgTypes(fn, ns.Type().Elem(), ns.Type().Elem()) - - nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, ns, mkcall1(fn, ns.Type(), nif.PtrInit(), reflectdata.TypePtr(ns.Type().Elem()), ns, - ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns), na)))} - - l = append(l, nif) - - nn := typecheck.Temp(types.Types[types.TINT]) - l = append(l, ir.NewAssignStmt(base.Pos, nn, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns))) // n = len(s) - - slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, ns) // ...s[:n+argc] - slice.SetSliceBounds(nil, ir.NewBinaryExpr(base.Pos, ir.OADD, nn, na), nil) - slice.SetBounded(true) - l = append(l, ir.NewAssignStmt(base.Pos, ns, slice)) // s = s[:n+argc] - - ls = n.Args[1:] - for i, n := range ls { - ix := ir.NewIndexExpr(base.Pos, ns, nn) // s[n] ... - ix.SetBounded(true) - l = append(l, ir.NewAssignStmt(base.Pos, ix, n)) // s[n] = arg - if i+1 < len(ls) { - l = append(l, ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, nn, ir.NewInt(1)))) // n = n + 1 - } - } - - typecheck.Stmts(l) - walkstmtlist(l) - init.Append(l...) - return ns -} - -// Lower copy(a, b) to a memmove call or a runtime call. -// -// init { -// n := len(a) -// if n > len(b) { n = len(b) } -// if a.ptr != b.ptr { memmove(a.ptr, b.ptr, n*sizeof(elem(a))) } -// } -// n; -// -// Also works if b is a string. -// -func copyany(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node { - if n.X.Type().Elem().HasPointers() { - ir.CurFunc.SetWBPos(n.Pos()) - fn := writebarrierfn("typedslicecopy", n.X.Type().Elem(), n.Y.Type().Elem()) - n.X = cheapexpr(n.X, init) - ptrL, lenL := backingArrayPtrLen(n.X) - n.Y = cheapexpr(n.Y, init) - ptrR, lenR := backingArrayPtrLen(n.Y) - return mkcall1(fn, n.Type(), init, reflectdata.TypePtr(n.X.Type().Elem()), ptrL, lenL, ptrR, lenR) - } - - if runtimecall { - // rely on runtime to instrument: - // copy(n.Left, n.Right) - // n.Right can be a slice or string. - - n.X = cheapexpr(n.X, init) - ptrL, lenL := backingArrayPtrLen(n.X) - n.Y = cheapexpr(n.Y, init) - ptrR, lenR := backingArrayPtrLen(n.Y) - - fn := typecheck.LookupRuntime("slicecopy") - fn = typecheck.SubstArgTypes(fn, ptrL.Type().Elem(), ptrR.Type().Elem()) - - return mkcall1(fn, n.Type(), init, ptrL, lenL, ptrR, lenR, ir.NewInt(n.X.Type().Elem().Width)) - } - - n.X = walkexpr(n.X, init) - n.Y = walkexpr(n.Y, init) - nl := typecheck.Temp(n.X.Type()) - nr := typecheck.Temp(n.Y.Type()) - var l []ir.Node - l = append(l, ir.NewAssignStmt(base.Pos, nl, n.X)) - l = append(l, ir.NewAssignStmt(base.Pos, nr, n.Y)) - - nfrm := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nr) - nto := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nl) - - nlen := typecheck.Temp(types.Types[types.TINT]) - - // n = len(to) - l = append(l, ir.NewAssignStmt(base.Pos, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nl))) - - // if n > len(frm) { n = len(frm) } - nif := ir.NewIfStmt(base.Pos, nil, nil, nil) - - nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OGT, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nr)) - nif.Body.Append(ir.NewAssignStmt(base.Pos, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nr))) - l = append(l, nif) - - // if to.ptr != frm.ptr { memmove( ... ) } - ne := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.ONE, nto, nfrm), nil, nil) - ne.Likely = true - l = append(l, ne) - - fn := typecheck.LookupRuntime("memmove") - fn = typecheck.SubstArgTypes(fn, nl.Type().Elem(), nl.Type().Elem()) - nwid := ir.Node(typecheck.Temp(types.Types[types.TUINTPTR])) - setwid := ir.NewAssignStmt(base.Pos, nwid, typecheck.Conv(nlen, types.Types[types.TUINTPTR])) - ne.Body.Append(setwid) - nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, ir.NewInt(nl.Type().Elem().Width)) - call := mkcall1(fn, nil, init, nto, nfrm, nwid) - ne.Body.Append(call) - - typecheck.Stmts(l) - walkstmtlist(l) - init.Append(l...) - return nlen -} - -func eqfor(t *types.Type) (n ir.Node, needsize bool) { - // Should only arrive here with large memory or - // a struct/array containing a non-memory field/element. - // Small memory is handled inline, and single non-memory - // is handled by walkcompare. - switch a, _ := types.AlgType(t); a { - case types.AMEM: - n := typecheck.LookupRuntime("memequal") - n = typecheck.SubstArgTypes(n, t, t) - return n, true - case types.ASPECIAL: - sym := reflectdata.TypeSymPrefix(".eq", t) - n := typecheck.NewName(sym) - ir.MarkFunc(n) - n.SetType(typecheck.NewFuncType(nil, []*ir.Field{ - ir.NewField(base.Pos, nil, nil, types.NewPtr(t)), - ir.NewField(base.Pos, nil, nil, types.NewPtr(t)), - }, []*ir.Field{ - ir.NewField(base.Pos, nil, nil, types.Types[types.TBOOL]), - })) - return n, false - } - base.Fatalf("eqfor %v", t) - return nil, false -} - -// The result of walkcompare MUST be assigned back to n, e.g. -// n.Left = walkcompare(n.Left, init) -func walkcompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { - if n.X.Type().IsInterface() && n.Y.Type().IsInterface() && n.X.Op() != ir.ONIL && n.Y.Op() != ir.ONIL { - return walkcompareInterface(n, init) - } - - if n.X.Type().IsString() && n.Y.Type().IsString() { - return walkcompareString(n, init) - } - - n.X = walkexpr(n.X, init) - n.Y = walkexpr(n.Y, init) - - // Given mixed interface/concrete comparison, - // rewrite into types-equal && data-equal. - // This is efficient, avoids allocations, and avoids runtime calls. - if n.X.Type().IsInterface() != n.Y.Type().IsInterface() { - // Preserve side-effects in case of short-circuiting; see #32187. - l := cheapexpr(n.X, init) - r := cheapexpr(n.Y, init) - // Swap so that l is the interface value and r is the concrete value. - if n.Y.Type().IsInterface() { - l, r = r, l - } - - // Handle both == and !=. - eq := n.Op() - andor := ir.OOROR - if eq == ir.OEQ { - andor = ir.OANDAND - } - // Check for types equal. - // For empty interface, this is: - // l.tab == type(r) - // For non-empty interface, this is: - // l.tab != nil && l.tab._type == type(r) - var eqtype ir.Node - tab := ir.NewUnaryExpr(base.Pos, ir.OITAB, l) - rtyp := reflectdata.TypePtr(r.Type()) - if l.Type().IsEmptyInterface() { - tab.SetType(types.NewPtr(types.Types[types.TUINT8])) - tab.SetTypecheck(1) - eqtype = ir.NewBinaryExpr(base.Pos, eq, tab, rtyp) - } else { - nonnil := ir.NewBinaryExpr(base.Pos, brcom(eq), typecheck.NodNil(), tab) - match := ir.NewBinaryExpr(base.Pos, eq, itabType(tab), rtyp) - eqtype = ir.NewLogicalExpr(base.Pos, andor, nonnil, match) - } - // Check for data equal. - eqdata := ir.NewBinaryExpr(base.Pos, eq, ifaceData(n.Pos(), l, r.Type()), r) - // Put it all together. - expr := ir.NewLogicalExpr(base.Pos, andor, eqtype, eqdata) - return finishcompare(n, expr, init) - } - - // Must be comparison of array or struct. - // Otherwise back end handles it. - // While we're here, decide whether to - // inline or call an eq alg. - t := n.X.Type() - var inline bool - - maxcmpsize := int64(4) - unalignedLoad := canMergeLoads() - if unalignedLoad { - // Keep this low enough to generate less code than a function call. - maxcmpsize = 2 * int64(ssagen.Arch.LinkArch.RegSize) - } - - switch t.Kind() { - default: - if base.Debug.Libfuzzer != 0 && t.IsInteger() { - n.X = cheapexpr(n.X, init) - n.Y = cheapexpr(n.Y, init) - - // If exactly one comparison operand is - // constant, invoke the constcmp functions - // instead, and arrange for the constant - // operand to be the first argument. - l, r := n.X, n.Y - if r.Op() == ir.OLITERAL { - l, r = r, l - } - constcmp := l.Op() == ir.OLITERAL && r.Op() != ir.OLITERAL - - var fn string - var paramType *types.Type - switch t.Size() { - case 1: - fn = "libfuzzerTraceCmp1" - if constcmp { - fn = "libfuzzerTraceConstCmp1" - } - paramType = types.Types[types.TUINT8] - case 2: - fn = "libfuzzerTraceCmp2" - if constcmp { - fn = "libfuzzerTraceConstCmp2" - } - paramType = types.Types[types.TUINT16] - case 4: - fn = "libfuzzerTraceCmp4" - if constcmp { - fn = "libfuzzerTraceConstCmp4" - } - paramType = types.Types[types.TUINT32] - case 8: - fn = "libfuzzerTraceCmp8" - if constcmp { - fn = "libfuzzerTraceConstCmp8" - } - paramType = types.Types[types.TUINT64] - default: - base.Fatalf("unexpected integer size %d for %v", t.Size(), t) - } - init.Append(mkcall(fn, nil, init, tracecmpArg(l, paramType, init), tracecmpArg(r, paramType, init))) - } - return n - case types.TARRAY: - // We can compare several elements at once with 2/4/8 byte integer compares - inline = t.NumElem() <= 1 || (types.IsSimple[t.Elem().Kind()] && (t.NumElem() <= 4 || t.Elem().Width*t.NumElem() <= maxcmpsize)) - case types.TSTRUCT: - inline = t.NumComponents(types.IgnoreBlankFields) <= 4 - } - - cmpl := n.X - for cmpl != nil && cmpl.Op() == ir.OCONVNOP { - cmpl = cmpl.(*ir.ConvExpr).X - } - cmpr := n.Y - for cmpr != nil && cmpr.Op() == ir.OCONVNOP { - cmpr = cmpr.(*ir.ConvExpr).X - } - - // Chose not to inline. Call equality function directly. - if !inline { - // eq algs take pointers; cmpl and cmpr must be addressable - if !ir.IsAssignable(cmpl) || !ir.IsAssignable(cmpr) { - base.Fatalf("arguments of comparison must be lvalues - %v %v", cmpl, cmpr) - } - - fn, needsize := eqfor(t) - call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil) - call.Args.Append(typecheck.NodAddr(cmpl)) - call.Args.Append(typecheck.NodAddr(cmpr)) - if needsize { - call.Args.Append(ir.NewInt(t.Width)) - } - res := ir.Node(call) - if n.Op() != ir.OEQ { - res = ir.NewUnaryExpr(base.Pos, ir.ONOT, res) - } - return finishcompare(n, res, init) - } - - // inline: build boolean expression comparing element by element - andor := ir.OANDAND - if n.Op() == ir.ONE { - andor = ir.OOROR - } - var expr ir.Node - compare := func(el, er ir.Node) { - a := ir.NewBinaryExpr(base.Pos, n.Op(), el, er) - if expr == nil { - expr = a - } else { - expr = ir.NewLogicalExpr(base.Pos, andor, expr, a) - } - } - cmpl = safeexpr(cmpl, init) - cmpr = safeexpr(cmpr, init) - if t.IsStruct() { - for _, f := range t.Fields().Slice() { - sym := f.Sym - if sym.IsBlank() { - continue - } - compare( - ir.NewSelectorExpr(base.Pos, ir.OXDOT, cmpl, sym), - ir.NewSelectorExpr(base.Pos, ir.OXDOT, cmpr, sym), - ) - } - } else { - step := int64(1) - remains := t.NumElem() * t.Elem().Width - combine64bit := unalignedLoad && types.RegSize == 8 && t.Elem().Width <= 4 && t.Elem().IsInteger() - combine32bit := unalignedLoad && t.Elem().Width <= 2 && t.Elem().IsInteger() - combine16bit := unalignedLoad && t.Elem().Width == 1 && t.Elem().IsInteger() - for i := int64(0); remains > 0; { - var convType *types.Type - switch { - case remains >= 8 && combine64bit: - convType = types.Types[types.TINT64] - step = 8 / t.Elem().Width - case remains >= 4 && combine32bit: - convType = types.Types[types.TUINT32] - step = 4 / t.Elem().Width - case remains >= 2 && combine16bit: - convType = types.Types[types.TUINT16] - step = 2 / t.Elem().Width - default: - step = 1 - } - if step == 1 { - compare( - ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(i)), - ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(i)), - ) - i++ - remains -= t.Elem().Width - } else { - elemType := t.Elem().ToUnsigned() - cmplw := ir.Node(ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(i))) - cmplw = typecheck.Conv(cmplw, elemType) // convert to unsigned - cmplw = typecheck.Conv(cmplw, convType) // widen - cmprw := ir.Node(ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(i))) - cmprw = typecheck.Conv(cmprw, elemType) - cmprw = typecheck.Conv(cmprw, convType) - // For code like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ... - // ssa will generate a single large load. - for offset := int64(1); offset < step; offset++ { - lb := ir.Node(ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(i+offset))) - lb = typecheck.Conv(lb, elemType) - lb = typecheck.Conv(lb, convType) - lb = ir.NewBinaryExpr(base.Pos, ir.OLSH, lb, ir.NewInt(8*t.Elem().Width*offset)) - cmplw = ir.NewBinaryExpr(base.Pos, ir.OOR, cmplw, lb) - rb := ir.Node(ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(i+offset))) - rb = typecheck.Conv(rb, elemType) - rb = typecheck.Conv(rb, convType) - rb = ir.NewBinaryExpr(base.Pos, ir.OLSH, rb, ir.NewInt(8*t.Elem().Width*offset)) - cmprw = ir.NewBinaryExpr(base.Pos, ir.OOR, cmprw, rb) - } - compare(cmplw, cmprw) - i += step - remains -= step * t.Elem().Width - } - } - } - if expr == nil { - expr = ir.NewBool(n.Op() == ir.OEQ) - // We still need to use cmpl and cmpr, in case they contain - // an expression which might panic. See issue 23837. - t := typecheck.Temp(cmpl.Type()) - a1 := typecheck.Stmt(ir.NewAssignStmt(base.Pos, t, cmpl)) - a2 := typecheck.Stmt(ir.NewAssignStmt(base.Pos, t, cmpr)) - init.Append(a1, a2) - } - return finishcompare(n, expr, init) -} - -func tracecmpArg(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node { - // Ugly hack to avoid "constant -1 overflows uintptr" errors, etc. - if n.Op() == ir.OLITERAL && n.Type().IsSigned() && ir.Int64Val(n) < 0 { - n = copyexpr(n, n.Type(), init) - } - - return typecheck.Conv(n, t) -} - -func walkcompareInterface(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { - n.Y = cheapexpr(n.Y, init) - n.X = cheapexpr(n.X, init) - eqtab, eqdata := reflectdata.EqInterface(n.X, n.Y) - var cmp ir.Node - if n.Op() == ir.OEQ { - cmp = ir.NewLogicalExpr(base.Pos, ir.OANDAND, eqtab, eqdata) - } else { - eqtab.SetOp(ir.ONE) - cmp = ir.NewLogicalExpr(base.Pos, ir.OOROR, eqtab, ir.NewUnaryExpr(base.Pos, ir.ONOT, eqdata)) - } - return finishcompare(n, cmp, init) -} - -func walkcompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { - // Rewrite comparisons to short constant strings as length+byte-wise comparisons. - var cs, ncs ir.Node // const string, non-const string - switch { - case ir.IsConst(n.X, constant.String) && ir.IsConst(n.Y, constant.String): - // ignore; will be constant evaluated - case ir.IsConst(n.X, constant.String): - cs = n.X - ncs = n.Y - case ir.IsConst(n.Y, constant.String): - cs = n.Y - ncs = n.X - } - if cs != nil { - cmp := n.Op() - // Our comparison below assumes that the non-constant string - // is on the left hand side, so rewrite "" cmp x to x cmp "". - // See issue 24817. - if ir.IsConst(n.X, constant.String) { - cmp = brrev(cmp) - } - - // maxRewriteLen was chosen empirically. - // It is the value that minimizes cmd/go file size - // across most architectures. - // See the commit description for CL 26758 for details. - maxRewriteLen := 6 - // Some architectures can load unaligned byte sequence as 1 word. - // So we can cover longer strings with the same amount of code. - canCombineLoads := canMergeLoads() - combine64bit := false - if canCombineLoads { - // Keep this low enough to generate less code than a function call. - maxRewriteLen = 2 * ssagen.Arch.LinkArch.RegSize - combine64bit = ssagen.Arch.LinkArch.RegSize >= 8 - } - - var and ir.Op - switch cmp { - case ir.OEQ: - and = ir.OANDAND - case ir.ONE: - and = ir.OOROR - default: - // Don't do byte-wise comparisons for <, <=, etc. - // They're fairly complicated. - // Length-only checks are ok, though. - maxRewriteLen = 0 - } - if s := ir.StringVal(cs); len(s) <= maxRewriteLen { - if len(s) > 0 { - ncs = safeexpr(ncs, init) - } - r := ir.Node(ir.NewBinaryExpr(base.Pos, cmp, ir.NewUnaryExpr(base.Pos, ir.OLEN, ncs), ir.NewInt(int64(len(s))))) - remains := len(s) - for i := 0; remains > 0; { - if remains == 1 || !canCombineLoads { - cb := ir.NewInt(int64(s[i])) - ncb := ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(int64(i))) - r = ir.NewLogicalExpr(base.Pos, and, r, ir.NewBinaryExpr(base.Pos, cmp, ncb, cb)) - remains-- - i++ - continue - } - var step int - var convType *types.Type - switch { - case remains >= 8 && combine64bit: - convType = types.Types[types.TINT64] - step = 8 - case remains >= 4: - convType = types.Types[types.TUINT32] - step = 4 - case remains >= 2: - convType = types.Types[types.TUINT16] - step = 2 - } - ncsubstr := typecheck.Conv(ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(int64(i))), convType) - csubstr := int64(s[i]) - // Calculate large constant from bytes as sequence of shifts and ors. - // Like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ... - // ssa will combine this into a single large load. - for offset := 1; offset < step; offset++ { - b := typecheck.Conv(ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(int64(i+offset))), convType) - b = ir.NewBinaryExpr(base.Pos, ir.OLSH, b, ir.NewInt(int64(8*offset))) - ncsubstr = ir.NewBinaryExpr(base.Pos, ir.OOR, ncsubstr, b) - csubstr |= int64(s[i+offset]) << uint8(8*offset) - } - csubstrPart := ir.NewInt(csubstr) - // Compare "step" bytes as once - r = ir.NewLogicalExpr(base.Pos, and, r, ir.NewBinaryExpr(base.Pos, cmp, csubstrPart, ncsubstr)) - remains -= step - i += step - } - return finishcompare(n, r, init) - } - } - - var r ir.Node - if n.Op() == ir.OEQ || n.Op() == ir.ONE { - // prepare for rewrite below - n.X = cheapexpr(n.X, init) - n.Y = cheapexpr(n.Y, init) - eqlen, eqmem := reflectdata.EqString(n.X, n.Y) - // quick check of len before full compare for == or !=. - // memequal then tests equality up to length len. - if n.Op() == ir.OEQ { - // len(left) == len(right) && memequal(left, right, len) - r = ir.NewLogicalExpr(base.Pos, ir.OANDAND, eqlen, eqmem) - } else { - // len(left) != len(right) || !memequal(left, right, len) - eqlen.SetOp(ir.ONE) - r = ir.NewLogicalExpr(base.Pos, ir.OOROR, eqlen, ir.NewUnaryExpr(base.Pos, ir.ONOT, eqmem)) - } - } else { - // sys_cmpstring(s1, s2) :: 0 - r = mkcall("cmpstring", types.Types[types.TINT], init, typecheck.Conv(n.X, types.Types[types.TSTRING]), typecheck.Conv(n.Y, types.Types[types.TSTRING])) - r = ir.NewBinaryExpr(base.Pos, n.Op(), r, ir.NewInt(0)) - } - - return finishcompare(n, r, init) -} - -// The result of finishcompare MUST be assigned back to n, e.g. -// n.Left = finishcompare(n.Left, x, r, init) -func finishcompare(n *ir.BinaryExpr, r ir.Node, init *ir.Nodes) ir.Node { - r = typecheck.Expr(r) - r = typecheck.Conv(r, n.Type()) - r = walkexpr(r, init) - return r -} - -// return 1 if integer n must be in range [0, max), 0 otherwise -func bounded(n ir.Node, max int64) bool { - if n.Type() == nil || !n.Type().IsInteger() { - return false - } - - sign := n.Type().IsSigned() - bits := int32(8 * n.Type().Width) - - if ir.IsSmallIntConst(n) { - v := ir.Int64Val(n) - return 0 <= v && v < max - } - - switch n.Op() { - case ir.OAND, ir.OANDNOT: - n := n.(*ir.BinaryExpr) - v := int64(-1) - switch { - case ir.IsSmallIntConst(n.X): - v = ir.Int64Val(n.X) - case ir.IsSmallIntConst(n.Y): - v = ir.Int64Val(n.Y) - if n.Op() == ir.OANDNOT { - v = ^v - if !sign { - v &= 1< 0 && v >= 2 { - bits-- - v >>= 1 - } - } - - case ir.ORSH: - n := n.(*ir.BinaryExpr) - if !sign && ir.IsSmallIntConst(n.Y) { - v := ir.Int64Val(n.Y) - if v > int64(bits) { - return true - } - bits -= int32(v) - } - } - - if !sign && bits <= 62 && 1< Date: Wed, 23 Dec 2020 01:08:27 -0500 Subject: [PATCH 240/474] [dev.regabi] cmd/compile: split out package pkginit [generated] [git-generate] cd src/cmd/compile/internal/gc rf ' mv fninit Task mv init.go initorder.go cmd/compile/internal/pkginit ' Change-Id: Ie2a924784c7a6fa029eaef821384eef4b262e1af Reviewed-on: https://go-review.googlesource.com/c/go/+/279479 Trust: Russ Cox Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/main.go | 3 ++- src/cmd/compile/internal/{gc => pkginit}/init.go | 6 +++--- src/cmd/compile/internal/{gc => pkginit}/initorder.go | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) rename src/cmd/compile/internal/{gc => pkginit}/init.go (96%) rename src/cmd/compile/internal/{gc => pkginit}/initorder.go (99%) diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index aeb58a3310483..8483c87a38d10 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -16,6 +16,7 @@ import ( "cmd/compile/internal/ir" "cmd/compile/internal/logopt" "cmd/compile/internal/noder" + "cmd/compile/internal/pkginit" "cmd/compile/internal/reflectdata" "cmd/compile/internal/ssa" "cmd/compile/internal/ssagen" @@ -223,7 +224,7 @@ func Main(archInit func(*ssagen.ArchInfo)) { base.ExitIfErrors() // Build init task. - if initTask := fninit(); initTask != nil { + if initTask := pkginit.Task(); initTask != nil { typecheck.Export(initTask) } diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/pkginit/init.go similarity index 96% rename from src/cmd/compile/internal/gc/init.go rename to src/cmd/compile/internal/pkginit/init.go index a299b8688b28a..f964edee88379 100644 --- a/src/cmd/compile/internal/gc/init.go +++ b/src/cmd/compile/internal/pkginit/init.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package pkginit import ( "cmd/compile/internal/base" @@ -13,13 +13,13 @@ import ( "cmd/internal/obj" ) -// fninit makes and returns an initialization record for the package. +// Task makes and returns an initialization record for the package. // See runtime/proc.go:initTask for its layout. // The 3 tasks for initialization are: // 1) Initialize all of the packages the current package depends on. // 2) Initialize all the variables that have initializers. // 3) Run any init functions. -func fninit() *ir.Name { +func Task() *ir.Name { nf := initOrder(typecheck.Target.Decls) var deps []*obj.LSym // initTask records for packages the current package depends on diff --git a/src/cmd/compile/internal/gc/initorder.go b/src/cmd/compile/internal/pkginit/initorder.go similarity index 99% rename from src/cmd/compile/internal/gc/initorder.go rename to src/cmd/compile/internal/pkginit/initorder.go index 4ac468fb4e5b3..d63c5a4717da6 100644 --- a/src/cmd/compile/internal/gc/initorder.go +++ b/src/cmd/compile/internal/pkginit/initorder.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package pkginit import ( "bytes" From 37f138df6bcd7bb7cf62148cd8388f3916388ab6 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 23 Dec 2020 01:09:46 -0500 Subject: [PATCH 241/474] [dev.regabi] cmd/compile: split out package test [generated] [git-generate] cd src/cmd/compile/internal/gc rf ' mv bench_test.go constFold_test.go dep_test.go \ fixedbugs_test.go iface_test.go float_test.go global_test.go \ inl_test.go lang_test.go logic_test.go \ reproduciblebuilds_test.go shift_test.go ssa_test.go \ truncconst_test.go zerorange_test.go \ cmd/compile/internal/test ' mv testdata ../test Change-Id: I041971b7e9766673f7a331679bfe1c8110dcda66 Reviewed-on: https://go-review.googlesource.com/c/go/+/279480 Trust: Russ Cox Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/{gc => test}/bench_test.go | 2 +- src/cmd/compile/internal/{gc => test}/constFold_test.go | 2 +- src/cmd/compile/internal/{gc => test}/dep_test.go | 2 +- src/cmd/compile/internal/{gc => test}/fixedbugs_test.go | 2 +- src/cmd/compile/internal/{gc => test}/float_test.go | 2 +- src/cmd/compile/internal/{gc => test}/global_test.go | 2 +- src/cmd/compile/internal/{gc => test}/iface_test.go | 8 +++----- src/cmd/compile/internal/{gc => test}/inl_test.go | 2 +- src/cmd/compile/internal/{gc => test}/lang_test.go | 2 +- src/cmd/compile/internal/{gc => test}/logic_test.go | 2 +- .../internal/{gc => test}/reproduciblebuilds_test.go | 2 +- src/cmd/compile/internal/{gc => test}/shift_test.go | 2 +- src/cmd/compile/internal/{gc => test}/ssa_test.go | 2 +- .../internal/{gc => test}/testdata/addressed_test.go | 0 .../compile/internal/{gc => test}/testdata/append_test.go | 0 .../internal/{gc => test}/testdata/arithBoundary_test.go | 0 .../internal/{gc => test}/testdata/arithConst_test.go | 0 .../compile/internal/{gc => test}/testdata/arith_test.go | 0 .../compile/internal/{gc => test}/testdata/array_test.go | 0 .../compile/internal/{gc => test}/testdata/assert_test.go | 0 .../compile/internal/{gc => test}/testdata/break_test.go | 0 .../compile/internal/{gc => test}/testdata/chan_test.go | 0 .../internal/{gc => test}/testdata/closure_test.go | 0 .../internal/{gc => test}/testdata/cmpConst_test.go | 0 .../compile/internal/{gc => test}/testdata/cmp_test.go | 0 .../internal/{gc => test}/testdata/compound_test.go | 0 .../compile/internal/{gc => test}/testdata/copy_test.go | 0 .../compile/internal/{gc => test}/testdata/ctl_test.go | 0 .../internal/{gc => test}/testdata/deferNoReturn_test.go | 0 .../internal/{gc => test}/testdata/divbyzero_test.go | 0 .../internal/{gc => test}/testdata/dupLoad_test.go | 0 .../{gc => test}/testdata/flowgraph_generator1.go | 0 src/cmd/compile/internal/{gc => test}/testdata/fp_test.go | 0 .../{gc => test}/testdata/gen/arithBoundaryGen.go | 0 .../internal/{gc => test}/testdata/gen/arithConstGen.go | 0 .../internal/{gc => test}/testdata/gen/cmpConstGen.go | 0 .../internal/{gc => test}/testdata/gen/constFoldGen.go | 0 .../compile/internal/{gc => test}/testdata/gen/copyGen.go | 0 .../compile/internal/{gc => test}/testdata/gen/zeroGen.go | 0 .../internal/{gc => test}/testdata/loadstore_test.go | 0 .../compile/internal/{gc => test}/testdata/map_test.go | 0 .../internal/{gc => test}/testdata/namedReturn_test.go | 0 .../compile/internal/{gc => test}/testdata/phi_test.go | 0 .../internal/{gc => test}/testdata/regalloc_test.go | 0 .../{gc => test}/testdata/reproducible/issue20272.go | 0 .../{gc => test}/testdata/reproducible/issue27013.go | 0 .../{gc => test}/testdata/reproducible/issue30202.go | 0 .../{gc => test}/testdata/reproducible/issue38068.go | 0 .../compile/internal/{gc => test}/testdata/short_test.go | 0 .../compile/internal/{gc => test}/testdata/slice_test.go | 0 .../internal/{gc => test}/testdata/sqrtConst_test.go | 0 .../compile/internal/{gc => test}/testdata/string_test.go | 0 .../compile/internal/{gc => test}/testdata/unsafe_test.go | 0 .../compile/internal/{gc => test}/testdata/zero_test.go | 0 src/cmd/compile/internal/{gc => test}/truncconst_test.go | 2 +- src/cmd/compile/internal/{gc => test}/zerorange_test.go | 6 ++---- 56 files changed, 18 insertions(+), 22 deletions(-) rename src/cmd/compile/internal/{gc => test}/bench_test.go (98%) rename src/cmd/compile/internal/{gc => test}/constFold_test.go (99%) rename src/cmd/compile/internal/{gc => test}/dep_test.go (97%) rename src/cmd/compile/internal/{gc => test}/fixedbugs_test.go (99%) rename src/cmd/compile/internal/{gc => test}/float_test.go (99%) rename src/cmd/compile/internal/{gc => test}/global_test.go (99%) rename src/cmd/compile/internal/{gc => test}/iface_test.go (98%) rename src/cmd/compile/internal/{gc => test}/inl_test.go (99%) rename src/cmd/compile/internal/{gc => test}/lang_test.go (99%) rename src/cmd/compile/internal/{gc => test}/logic_test.go (99%) rename src/cmd/compile/internal/{gc => test}/reproduciblebuilds_test.go (99%) rename src/cmd/compile/internal/{gc => test}/shift_test.go (99%) rename src/cmd/compile/internal/{gc => test}/ssa_test.go (99%) rename src/cmd/compile/internal/{gc => test}/testdata/addressed_test.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/append_test.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/arithBoundary_test.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/arithConst_test.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/arith_test.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/array_test.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/assert_test.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/break_test.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/chan_test.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/closure_test.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/cmpConst_test.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/cmp_test.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/compound_test.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/copy_test.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/ctl_test.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/deferNoReturn_test.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/divbyzero_test.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/dupLoad_test.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/flowgraph_generator1.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/fp_test.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/gen/arithBoundaryGen.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/gen/arithConstGen.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/gen/cmpConstGen.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/gen/constFoldGen.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/gen/copyGen.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/gen/zeroGen.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/loadstore_test.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/map_test.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/namedReturn_test.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/phi_test.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/regalloc_test.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/reproducible/issue20272.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/reproducible/issue27013.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/reproducible/issue30202.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/reproducible/issue38068.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/short_test.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/slice_test.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/sqrtConst_test.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/string_test.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/unsafe_test.go (100%) rename src/cmd/compile/internal/{gc => test}/testdata/zero_test.go (100%) rename src/cmd/compile/internal/{gc => test}/truncconst_test.go (99%) rename src/cmd/compile/internal/{gc => test}/zerorange_test.go (98%) diff --git a/src/cmd/compile/internal/gc/bench_test.go b/src/cmd/compile/internal/test/bench_test.go similarity index 98% rename from src/cmd/compile/internal/gc/bench_test.go rename to src/cmd/compile/internal/test/bench_test.go index 8c4288128f25b..3fffe57d08203 100644 --- a/src/cmd/compile/internal/gc/bench_test.go +++ b/src/cmd/compile/internal/test/bench_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package test import "testing" diff --git a/src/cmd/compile/internal/gc/constFold_test.go b/src/cmd/compile/internal/test/constFold_test.go similarity index 99% rename from src/cmd/compile/internal/gc/constFold_test.go rename to src/cmd/compile/internal/test/constFold_test.go index 59f905dad9623..7159f0ed33a37 100644 --- a/src/cmd/compile/internal/gc/constFold_test.go +++ b/src/cmd/compile/internal/test/constFold_test.go @@ -1,7 +1,7 @@ // run // Code generated by gen/constFoldGen.go. DO NOT EDIT. -package gc +package test import "testing" diff --git a/src/cmd/compile/internal/gc/dep_test.go b/src/cmd/compile/internal/test/dep_test.go similarity index 97% rename from src/cmd/compile/internal/gc/dep_test.go rename to src/cmd/compile/internal/test/dep_test.go index a185bc9f547c5..26122e6a5b70a 100644 --- a/src/cmd/compile/internal/gc/dep_test.go +++ b/src/cmd/compile/internal/test/dep_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package test import ( "internal/testenv" diff --git a/src/cmd/compile/internal/gc/fixedbugs_test.go b/src/cmd/compile/internal/test/fixedbugs_test.go similarity index 99% rename from src/cmd/compile/internal/gc/fixedbugs_test.go rename to src/cmd/compile/internal/test/fixedbugs_test.go index 8ac4436947f78..e7e2f7e58eb29 100644 --- a/src/cmd/compile/internal/gc/fixedbugs_test.go +++ b/src/cmd/compile/internal/test/fixedbugs_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package test import ( "internal/testenv" diff --git a/src/cmd/compile/internal/gc/float_test.go b/src/cmd/compile/internal/test/float_test.go similarity index 99% rename from src/cmd/compile/internal/gc/float_test.go rename to src/cmd/compile/internal/test/float_test.go index c619d2570507f..884a983bdd7a5 100644 --- a/src/cmd/compile/internal/gc/float_test.go +++ b/src/cmd/compile/internal/test/float_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package test import ( "math" diff --git a/src/cmd/compile/internal/gc/global_test.go b/src/cmd/compile/internal/test/global_test.go similarity index 99% rename from src/cmd/compile/internal/gc/global_test.go rename to src/cmd/compile/internal/test/global_test.go index edad6d042a36d..5f5f7d619860f 100644 --- a/src/cmd/compile/internal/gc/global_test.go +++ b/src/cmd/compile/internal/test/global_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package test import ( "bytes" diff --git a/src/cmd/compile/internal/gc/iface_test.go b/src/cmd/compile/internal/test/iface_test.go similarity index 98% rename from src/cmd/compile/internal/gc/iface_test.go rename to src/cmd/compile/internal/test/iface_test.go index 21c6587217f0a..ebc4f891c963a 100644 --- a/src/cmd/compile/internal/gc/iface_test.go +++ b/src/cmd/compile/internal/test/iface_test.go @@ -2,15 +2,13 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package test + +import "testing" // Test to make sure we make copies of the values we // put in interfaces. -import ( - "testing" -) - var x int func TestEfaceConv1(t *testing.T) { diff --git a/src/cmd/compile/internal/gc/inl_test.go b/src/cmd/compile/internal/test/inl_test.go similarity index 99% rename from src/cmd/compile/internal/gc/inl_test.go rename to src/cmd/compile/internal/test/inl_test.go index 02735e50fb7f0..9d31975b310bf 100644 --- a/src/cmd/compile/internal/gc/inl_test.go +++ b/src/cmd/compile/internal/test/inl_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package test import ( "bufio" diff --git a/src/cmd/compile/internal/gc/lang_test.go b/src/cmd/compile/internal/test/lang_test.go similarity index 99% rename from src/cmd/compile/internal/gc/lang_test.go rename to src/cmd/compile/internal/test/lang_test.go index 72e7f07a21c00..67c1551292298 100644 --- a/src/cmd/compile/internal/gc/lang_test.go +++ b/src/cmd/compile/internal/test/lang_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package test import ( "internal/testenv" diff --git a/src/cmd/compile/internal/gc/logic_test.go b/src/cmd/compile/internal/test/logic_test.go similarity index 99% rename from src/cmd/compile/internal/gc/logic_test.go rename to src/cmd/compile/internal/test/logic_test.go index 78d2dd2fa81fa..1d7043ff605f1 100644 --- a/src/cmd/compile/internal/gc/logic_test.go +++ b/src/cmd/compile/internal/test/logic_test.go @@ -1,4 +1,4 @@ -package gc +package test import "testing" diff --git a/src/cmd/compile/internal/gc/reproduciblebuilds_test.go b/src/cmd/compile/internal/test/reproduciblebuilds_test.go similarity index 99% rename from src/cmd/compile/internal/gc/reproduciblebuilds_test.go rename to src/cmd/compile/internal/test/reproduciblebuilds_test.go index 8101e440793ee..4d84f9cdeffc9 100644 --- a/src/cmd/compile/internal/gc/reproduciblebuilds_test.go +++ b/src/cmd/compile/internal/test/reproduciblebuilds_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc_test +package test import ( "bytes" diff --git a/src/cmd/compile/internal/gc/shift_test.go b/src/cmd/compile/internal/test/shift_test.go similarity index 99% rename from src/cmd/compile/internal/gc/shift_test.go rename to src/cmd/compile/internal/test/shift_test.go index ce2eedf1521d9..ea88f0a70ae22 100644 --- a/src/cmd/compile/internal/gc/shift_test.go +++ b/src/cmd/compile/internal/test/shift_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package test import ( "reflect" diff --git a/src/cmd/compile/internal/gc/ssa_test.go b/src/cmd/compile/internal/test/ssa_test.go similarity index 99% rename from src/cmd/compile/internal/gc/ssa_test.go rename to src/cmd/compile/internal/test/ssa_test.go index 7f7c9464d44fa..2f3e24c2d37ca 100644 --- a/src/cmd/compile/internal/gc/ssa_test.go +++ b/src/cmd/compile/internal/test/ssa_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package test import ( "bytes" diff --git a/src/cmd/compile/internal/gc/testdata/addressed_test.go b/src/cmd/compile/internal/test/testdata/addressed_test.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/addressed_test.go rename to src/cmd/compile/internal/test/testdata/addressed_test.go diff --git a/src/cmd/compile/internal/gc/testdata/append_test.go b/src/cmd/compile/internal/test/testdata/append_test.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/append_test.go rename to src/cmd/compile/internal/test/testdata/append_test.go diff --git a/src/cmd/compile/internal/gc/testdata/arithBoundary_test.go b/src/cmd/compile/internal/test/testdata/arithBoundary_test.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/arithBoundary_test.go rename to src/cmd/compile/internal/test/testdata/arithBoundary_test.go diff --git a/src/cmd/compile/internal/gc/testdata/arithConst_test.go b/src/cmd/compile/internal/test/testdata/arithConst_test.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/arithConst_test.go rename to src/cmd/compile/internal/test/testdata/arithConst_test.go diff --git a/src/cmd/compile/internal/gc/testdata/arith_test.go b/src/cmd/compile/internal/test/testdata/arith_test.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/arith_test.go rename to src/cmd/compile/internal/test/testdata/arith_test.go diff --git a/src/cmd/compile/internal/gc/testdata/array_test.go b/src/cmd/compile/internal/test/testdata/array_test.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/array_test.go rename to src/cmd/compile/internal/test/testdata/array_test.go diff --git a/src/cmd/compile/internal/gc/testdata/assert_test.go b/src/cmd/compile/internal/test/testdata/assert_test.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/assert_test.go rename to src/cmd/compile/internal/test/testdata/assert_test.go diff --git a/src/cmd/compile/internal/gc/testdata/break_test.go b/src/cmd/compile/internal/test/testdata/break_test.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/break_test.go rename to src/cmd/compile/internal/test/testdata/break_test.go diff --git a/src/cmd/compile/internal/gc/testdata/chan_test.go b/src/cmd/compile/internal/test/testdata/chan_test.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/chan_test.go rename to src/cmd/compile/internal/test/testdata/chan_test.go diff --git a/src/cmd/compile/internal/gc/testdata/closure_test.go b/src/cmd/compile/internal/test/testdata/closure_test.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/closure_test.go rename to src/cmd/compile/internal/test/testdata/closure_test.go diff --git a/src/cmd/compile/internal/gc/testdata/cmpConst_test.go b/src/cmd/compile/internal/test/testdata/cmpConst_test.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/cmpConst_test.go rename to src/cmd/compile/internal/test/testdata/cmpConst_test.go diff --git a/src/cmd/compile/internal/gc/testdata/cmp_test.go b/src/cmd/compile/internal/test/testdata/cmp_test.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/cmp_test.go rename to src/cmd/compile/internal/test/testdata/cmp_test.go diff --git a/src/cmd/compile/internal/gc/testdata/compound_test.go b/src/cmd/compile/internal/test/testdata/compound_test.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/compound_test.go rename to src/cmd/compile/internal/test/testdata/compound_test.go diff --git a/src/cmd/compile/internal/gc/testdata/copy_test.go b/src/cmd/compile/internal/test/testdata/copy_test.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/copy_test.go rename to src/cmd/compile/internal/test/testdata/copy_test.go diff --git a/src/cmd/compile/internal/gc/testdata/ctl_test.go b/src/cmd/compile/internal/test/testdata/ctl_test.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/ctl_test.go rename to src/cmd/compile/internal/test/testdata/ctl_test.go diff --git a/src/cmd/compile/internal/gc/testdata/deferNoReturn_test.go b/src/cmd/compile/internal/test/testdata/deferNoReturn_test.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/deferNoReturn_test.go rename to src/cmd/compile/internal/test/testdata/deferNoReturn_test.go diff --git a/src/cmd/compile/internal/gc/testdata/divbyzero_test.go b/src/cmd/compile/internal/test/testdata/divbyzero_test.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/divbyzero_test.go rename to src/cmd/compile/internal/test/testdata/divbyzero_test.go diff --git a/src/cmd/compile/internal/gc/testdata/dupLoad_test.go b/src/cmd/compile/internal/test/testdata/dupLoad_test.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/dupLoad_test.go rename to src/cmd/compile/internal/test/testdata/dupLoad_test.go diff --git a/src/cmd/compile/internal/gc/testdata/flowgraph_generator1.go b/src/cmd/compile/internal/test/testdata/flowgraph_generator1.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/flowgraph_generator1.go rename to src/cmd/compile/internal/test/testdata/flowgraph_generator1.go diff --git a/src/cmd/compile/internal/gc/testdata/fp_test.go b/src/cmd/compile/internal/test/testdata/fp_test.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/fp_test.go rename to src/cmd/compile/internal/test/testdata/fp_test.go diff --git a/src/cmd/compile/internal/gc/testdata/gen/arithBoundaryGen.go b/src/cmd/compile/internal/test/testdata/gen/arithBoundaryGen.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/gen/arithBoundaryGen.go rename to src/cmd/compile/internal/test/testdata/gen/arithBoundaryGen.go diff --git a/src/cmd/compile/internal/gc/testdata/gen/arithConstGen.go b/src/cmd/compile/internal/test/testdata/gen/arithConstGen.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/gen/arithConstGen.go rename to src/cmd/compile/internal/test/testdata/gen/arithConstGen.go diff --git a/src/cmd/compile/internal/gc/testdata/gen/cmpConstGen.go b/src/cmd/compile/internal/test/testdata/gen/cmpConstGen.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/gen/cmpConstGen.go rename to src/cmd/compile/internal/test/testdata/gen/cmpConstGen.go diff --git a/src/cmd/compile/internal/gc/testdata/gen/constFoldGen.go b/src/cmd/compile/internal/test/testdata/gen/constFoldGen.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/gen/constFoldGen.go rename to src/cmd/compile/internal/test/testdata/gen/constFoldGen.go diff --git a/src/cmd/compile/internal/gc/testdata/gen/copyGen.go b/src/cmd/compile/internal/test/testdata/gen/copyGen.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/gen/copyGen.go rename to src/cmd/compile/internal/test/testdata/gen/copyGen.go diff --git a/src/cmd/compile/internal/gc/testdata/gen/zeroGen.go b/src/cmd/compile/internal/test/testdata/gen/zeroGen.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/gen/zeroGen.go rename to src/cmd/compile/internal/test/testdata/gen/zeroGen.go diff --git a/src/cmd/compile/internal/gc/testdata/loadstore_test.go b/src/cmd/compile/internal/test/testdata/loadstore_test.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/loadstore_test.go rename to src/cmd/compile/internal/test/testdata/loadstore_test.go diff --git a/src/cmd/compile/internal/gc/testdata/map_test.go b/src/cmd/compile/internal/test/testdata/map_test.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/map_test.go rename to src/cmd/compile/internal/test/testdata/map_test.go diff --git a/src/cmd/compile/internal/gc/testdata/namedReturn_test.go b/src/cmd/compile/internal/test/testdata/namedReturn_test.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/namedReturn_test.go rename to src/cmd/compile/internal/test/testdata/namedReturn_test.go diff --git a/src/cmd/compile/internal/gc/testdata/phi_test.go b/src/cmd/compile/internal/test/testdata/phi_test.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/phi_test.go rename to src/cmd/compile/internal/test/testdata/phi_test.go diff --git a/src/cmd/compile/internal/gc/testdata/regalloc_test.go b/src/cmd/compile/internal/test/testdata/regalloc_test.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/regalloc_test.go rename to src/cmd/compile/internal/test/testdata/regalloc_test.go diff --git a/src/cmd/compile/internal/gc/testdata/reproducible/issue20272.go b/src/cmd/compile/internal/test/testdata/reproducible/issue20272.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/reproducible/issue20272.go rename to src/cmd/compile/internal/test/testdata/reproducible/issue20272.go diff --git a/src/cmd/compile/internal/gc/testdata/reproducible/issue27013.go b/src/cmd/compile/internal/test/testdata/reproducible/issue27013.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/reproducible/issue27013.go rename to src/cmd/compile/internal/test/testdata/reproducible/issue27013.go diff --git a/src/cmd/compile/internal/gc/testdata/reproducible/issue30202.go b/src/cmd/compile/internal/test/testdata/reproducible/issue30202.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/reproducible/issue30202.go rename to src/cmd/compile/internal/test/testdata/reproducible/issue30202.go diff --git a/src/cmd/compile/internal/gc/testdata/reproducible/issue38068.go b/src/cmd/compile/internal/test/testdata/reproducible/issue38068.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/reproducible/issue38068.go rename to src/cmd/compile/internal/test/testdata/reproducible/issue38068.go diff --git a/src/cmd/compile/internal/gc/testdata/short_test.go b/src/cmd/compile/internal/test/testdata/short_test.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/short_test.go rename to src/cmd/compile/internal/test/testdata/short_test.go diff --git a/src/cmd/compile/internal/gc/testdata/slice_test.go b/src/cmd/compile/internal/test/testdata/slice_test.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/slice_test.go rename to src/cmd/compile/internal/test/testdata/slice_test.go diff --git a/src/cmd/compile/internal/gc/testdata/sqrtConst_test.go b/src/cmd/compile/internal/test/testdata/sqrtConst_test.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/sqrtConst_test.go rename to src/cmd/compile/internal/test/testdata/sqrtConst_test.go diff --git a/src/cmd/compile/internal/gc/testdata/string_test.go b/src/cmd/compile/internal/test/testdata/string_test.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/string_test.go rename to src/cmd/compile/internal/test/testdata/string_test.go diff --git a/src/cmd/compile/internal/gc/testdata/unsafe_test.go b/src/cmd/compile/internal/test/testdata/unsafe_test.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/unsafe_test.go rename to src/cmd/compile/internal/test/testdata/unsafe_test.go diff --git a/src/cmd/compile/internal/gc/testdata/zero_test.go b/src/cmd/compile/internal/test/testdata/zero_test.go similarity index 100% rename from src/cmd/compile/internal/gc/testdata/zero_test.go rename to src/cmd/compile/internal/test/testdata/zero_test.go diff --git a/src/cmd/compile/internal/gc/truncconst_test.go b/src/cmd/compile/internal/test/truncconst_test.go similarity index 99% rename from src/cmd/compile/internal/gc/truncconst_test.go rename to src/cmd/compile/internal/test/truncconst_test.go index d1538180649df..7705042ca2c84 100644 --- a/src/cmd/compile/internal/gc/truncconst_test.go +++ b/src/cmd/compile/internal/test/truncconst_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package test import "testing" diff --git a/src/cmd/compile/internal/gc/zerorange_test.go b/src/cmd/compile/internal/test/zerorange_test.go similarity index 98% rename from src/cmd/compile/internal/gc/zerorange_test.go rename to src/cmd/compile/internal/test/zerorange_test.go index 89f4cb9bcf6e4..cb1a6e04e4e77 100644 --- a/src/cmd/compile/internal/gc/zerorange_test.go +++ b/src/cmd/compile/internal/test/zerorange_test.go @@ -2,11 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package test -import ( - "testing" -) +import "testing" var glob = 3 var globp *int64 From 63c96c2ee7444b83224b9c5aadd8ad5b757c1e03 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Wed, 23 Dec 2020 00:36:34 -0800 Subject: [PATCH 242/474] [dev.regabi] cmd/compile: update mkbuiltin.go and re-enable TestBuiltin Update's mkbuiltin.go to match builtin.go after the recent rf rewrites. Change-Id: I80cf5d7c27b36fe28553406819cb4263de84e5ed Reviewed-on: https://go-review.googlesource.com/c/go/+/279952 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/typecheck/builtin_test.go | 1 - src/cmd/compile/internal/typecheck/mkbuiltin.go | 11 ++++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/cmd/compile/internal/typecheck/builtin_test.go b/src/cmd/compile/internal/typecheck/builtin_test.go index cc8d49730aaea..fb9d3e393f10e 100644 --- a/src/cmd/compile/internal/typecheck/builtin_test.go +++ b/src/cmd/compile/internal/typecheck/builtin_test.go @@ -13,7 +13,6 @@ import ( ) func TestBuiltin(t *testing.T) { - t.Skip("mkbuiltin needs fixing") testenv.MustHaveGoRun(t) t.Parallel() diff --git a/src/cmd/compile/internal/typecheck/mkbuiltin.go b/src/cmd/compile/internal/typecheck/mkbuiltin.go index 2a208d960f5f4..27dbf1f10e15a 100644 --- a/src/cmd/compile/internal/typecheck/mkbuiltin.go +++ b/src/cmd/compile/internal/typecheck/mkbuiltin.go @@ -36,6 +36,7 @@ func main() { fmt.Fprintln(&b, "package typecheck") fmt.Fprintln(&b) fmt.Fprintln(&b, `import (`) + fmt.Fprintln(&b, ` "cmd/compile/internal/base"`) fmt.Fprintln(&b, ` "cmd/compile/internal/ir"`) fmt.Fprintln(&b, ` "cmd/compile/internal/types"`) fmt.Fprintln(&b, `)`) @@ -169,7 +170,7 @@ func (i *typeInterner) mktype(t ast.Expr) string { } return fmt.Sprintf("types.NewChan(%s, %s)", i.subtype(t.Value), dir) case *ast.FuncType: - return fmt.Sprintf("functype(nil, %s, %s)", i.fields(t.Params, false), i.fields(t.Results, false)) + return fmt.Sprintf("NewFuncType(nil, %s, %s)", i.fields(t.Params, false), i.fields(t.Results, false)) case *ast.InterfaceType: if len(t.Methods.List) != 0 { log.Fatal("non-empty interfaces unsupported") @@ -180,7 +181,7 @@ func (i *typeInterner) mktype(t ast.Expr) string { case *ast.StarExpr: return fmt.Sprintf("types.NewPtr(%s)", i.subtype(t.X)) case *ast.StructType: - return fmt.Sprintf("tostruct(%s)", i.fields(t.Fields, true)) + return fmt.Sprintf("NewStructType(%s)", i.fields(t.Fields, true)) default: log.Fatalf("unhandled type: %#v", t) @@ -196,13 +197,13 @@ func (i *typeInterner) fields(fl *ast.FieldList, keepNames bool) string { for _, f := range fl.List { typ := i.subtype(f.Type) if len(f.Names) == 0 { - res = append(res, fmt.Sprintf("anonfield(%s)", typ)) + res = append(res, fmt.Sprintf("ir.NewField(base.Pos, nil, nil, %s)", typ)) } else { for _, name := range f.Names { if keepNames { - res = append(res, fmt.Sprintf("namedfield(%q, %s)", name.Name, typ)) + res = append(res, fmt.Sprintf("ir.NewField(base.Pos, Lookup(%q), nil, %s)", name.Name, typ)) } else { - res = append(res, fmt.Sprintf("anonfield(%s)", typ)) + res = append(res, fmt.Sprintf("ir.NewField(base.Pos, nil, nil, %s)", typ)) } } } From 5898025026e3ec38451e86c7837f6faf3633cf27 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Wed, 23 Dec 2020 00:50:18 -0800 Subject: [PATCH 243/474] [dev.regabi] cmd/compile: update mkbuiltin.go to use new type constructors We recently added new functions to types like NewSignature and NewField, so we can use these directly rather than depending on the typecheck and ir wrappers. Passes toolstash -cmp. Change-Id: I32676aa9a4ea71892216017756e72bcf90297219 Reviewed-on: https://go-review.googlesource.com/c/go/+/279953 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/typecheck/builtin.go | 189 +++++++++--------- .../compile/internal/typecheck/mkbuiltin.go | 15 +- 2 files changed, 101 insertions(+), 103 deletions(-) diff --git a/src/cmd/compile/internal/typecheck/builtin.go b/src/cmd/compile/internal/typecheck/builtin.go index d3c30fbf50c33..0dee8525292ab 100644 --- a/src/cmd/compile/internal/typecheck/builtin.go +++ b/src/cmd/compile/internal/typecheck/builtin.go @@ -3,9 +3,8 @@ package typecheck import ( - "cmd/compile/internal/base" - "cmd/compile/internal/ir" "cmd/compile/internal/types" + "cmd/internal/src" ) var runtimeDecls = [...]struct { @@ -212,133 +211,133 @@ func runtimeTypes() []*types.Type { typs[1] = types.NewPtr(typs[0]) typs[2] = types.Types[types.TANY] typs[3] = types.NewPtr(typs[2]) - typs[4] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3])}) + typs[4] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[3])}) typs[5] = types.Types[types.TUINTPTR] typs[6] = types.Types[types.TBOOL] typs[7] = types.Types[types.TUNSAFEPTR] - typs[8] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[5]), ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[6])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7])}) - typs[9] = NewFuncType(nil, nil, nil) + typs[8] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[5]), types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[6])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[7])}) + typs[9] = types.NewSignature(types.NoPkg, nil, nil, nil) typs[10] = types.Types[types.TINTER] - typs[11] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[10])}, nil) + typs[11] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[10])}, nil) typs[12] = types.Types[types.TINT32] typs[13] = types.NewPtr(typs[12]) - typs[14] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[13])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[10])}) + typs[14] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[13])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[10])}) typs[15] = types.Types[types.TINT] - typs[16] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[15])}, nil) + typs[16] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[15]), types.NewField(src.NoXPos, nil, typs[15])}, nil) typs[17] = types.Types[types.TUINT] - typs[18] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[17]), ir.NewField(base.Pos, nil, nil, typs[15])}, nil) - typs[19] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}, nil) + typs[18] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[17]), types.NewField(src.NoXPos, nil, typs[15])}, nil) + typs[19] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[6])}, nil) typs[20] = types.Types[types.TFLOAT64] - typs[21] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}, nil) + typs[21] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[20])}, nil) typs[22] = types.Types[types.TINT64] - typs[23] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[22])}, nil) + typs[23] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[22])}, nil) typs[24] = types.Types[types.TUINT64] - typs[25] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[24])}, nil) + typs[25] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[24])}, nil) typs[26] = types.Types[types.TCOMPLEX128] - typs[27] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[26])}, nil) + typs[27] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[26])}, nil) typs[28] = types.Types[types.TSTRING] - typs[29] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}, nil) - typs[30] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[2])}, nil) - typs[31] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[5])}, nil) + typs[29] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[28])}, nil) + typs[30] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[2])}, nil) + typs[31] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[5])}, nil) typs[32] = types.NewArray(typs[0], 32) typs[33] = types.NewPtr(typs[32]) - typs[34] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) - typs[35] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) - typs[36] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) - typs[37] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) + typs[34] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[33]), types.NewField(src.NoXPos, nil, typs[28]), types.NewField(src.NoXPos, nil, typs[28])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[28])}) + typs[35] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[33]), types.NewField(src.NoXPos, nil, typs[28]), types.NewField(src.NoXPos, nil, typs[28]), types.NewField(src.NoXPos, nil, typs[28])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[28])}) + typs[36] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[33]), types.NewField(src.NoXPos, nil, typs[28]), types.NewField(src.NoXPos, nil, typs[28]), types.NewField(src.NoXPos, nil, typs[28]), types.NewField(src.NoXPos, nil, typs[28])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[28])}) + typs[37] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[33]), types.NewField(src.NoXPos, nil, typs[28]), types.NewField(src.NoXPos, nil, typs[28]), types.NewField(src.NoXPos, nil, typs[28]), types.NewField(src.NoXPos, nil, typs[28]), types.NewField(src.NoXPos, nil, typs[28])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[28])}) typs[38] = types.NewSlice(typs[28]) - typs[39] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[38])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) - typs[40] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[15])}) + typs[39] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[33]), types.NewField(src.NoXPos, nil, typs[38])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[28])}) + typs[40] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[28]), types.NewField(src.NoXPos, nil, typs[28])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[15])}) typs[41] = types.NewArray(typs[0], 4) typs[42] = types.NewPtr(typs[41]) - typs[43] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[42]), ir.NewField(base.Pos, nil, nil, typs[22])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) - typs[44] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) - typs[45] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) + typs[43] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[42]), types.NewField(src.NoXPos, nil, typs[22])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[28])}) + typs[44] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[33]), types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[15])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[28])}) + typs[45] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[15])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[28])}) typs[46] = types.RuneType typs[47] = types.NewSlice(typs[46]) - typs[48] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[47])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}) + typs[48] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[33]), types.NewField(src.NoXPos, nil, typs[47])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[28])}) typs[49] = types.NewSlice(typs[0]) - typs[50] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[49])}) + typs[50] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[33]), types.NewField(src.NoXPos, nil, typs[28])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[49])}) typs[51] = types.NewArray(typs[46], 32) typs[52] = types.NewPtr(typs[51]) - typs[53] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[52]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[47])}) - typs[54] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[5])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[15])}) - typs[55] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[46]), ir.NewField(base.Pos, nil, nil, typs[15])}) - typs[56] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[15])}) - typs[57] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[2])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[2])}) - typs[58] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[2])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7])}) - typs[59] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[2])}) - typs[60] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[2])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[2]), ir.NewField(base.Pos, nil, nil, typs[6])}) - typs[61] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[1])}, nil) - typs[62] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1])}, nil) + typs[53] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[52]), types.NewField(src.NoXPos, nil, typs[28])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[47])}) + typs[54] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[15]), types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[15]), types.NewField(src.NoXPos, nil, typs[5])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[15])}) + typs[55] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[28]), types.NewField(src.NoXPos, nil, typs[15])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[46]), types.NewField(src.NoXPos, nil, typs[15])}) + typs[56] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[28])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[15])}) + typs[57] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[2])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[2])}) + typs[58] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[2])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[7])}) + typs[59] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[3])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[2])}) + typs[60] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[2])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[2]), types.NewField(src.NoXPos, nil, typs[6])}) + typs[61] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[1])}, nil) + typs[62] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1])}, nil) typs[63] = types.NewPtr(typs[5]) - typs[64] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[63]), ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[7])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) + typs[64] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[63]), types.NewField(src.NoXPos, nil, typs[7]), types.NewField(src.NoXPos, nil, typs[7])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[6])}) typs[65] = types.Types[types.TUINT32] - typs[66] = NewFuncType(nil, nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[65])}) + typs[66] = types.NewSignature(types.NoPkg, nil, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[65])}) typs[67] = types.NewMap(typs[2], typs[2]) - typs[68] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[22]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[67])}) - typs[69] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[67])}) - typs[70] = NewFuncType(nil, nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[67])}) - typs[71] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3])}) - typs[72] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[2])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3])}) - typs[73] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[1])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3])}) - typs[74] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[6])}) - typs[75] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[2])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[6])}) - typs[76] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[1])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[6])}) - typs[77] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[3])}, nil) - typs[78] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[2])}, nil) - typs[79] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3])}, nil) - typs[80] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67])}, nil) + typs[68] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[22]), types.NewField(src.NoXPos, nil, typs[3])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[67])}) + typs[69] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[15]), types.NewField(src.NoXPos, nil, typs[3])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[67])}) + typs[70] = types.NewSignature(types.NoPkg, nil, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[67])}) + typs[71] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[67]), types.NewField(src.NoXPos, nil, typs[3])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[3])}) + typs[72] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[67]), types.NewField(src.NoXPos, nil, typs[2])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[3])}) + typs[73] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[67]), types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[1])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[3])}) + typs[74] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[67]), types.NewField(src.NoXPos, nil, typs[3])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[6])}) + typs[75] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[67]), types.NewField(src.NoXPos, nil, typs[2])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[6])}) + typs[76] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[67]), types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[1])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[6])}) + typs[77] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[67]), types.NewField(src.NoXPos, nil, typs[3])}, nil) + typs[78] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[67]), types.NewField(src.NoXPos, nil, typs[2])}, nil) + typs[79] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[3])}, nil) + typs[80] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[67])}, nil) typs[81] = types.NewChan(typs[2], types.Cboth) - typs[82] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[22])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[81])}) - typs[83] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[81])}) + typs[82] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[22])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[81])}) + typs[83] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[15])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[81])}) typs[84] = types.NewChan(typs[2], types.Crecv) - typs[85] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[84]), ir.NewField(base.Pos, nil, nil, typs[3])}, nil) - typs[86] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[84]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) + typs[85] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[84]), types.NewField(src.NoXPos, nil, typs[3])}, nil) + typs[86] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[84]), types.NewField(src.NoXPos, nil, typs[3])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[6])}) typs[87] = types.NewChan(typs[2], types.Csend) - typs[88] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[87]), ir.NewField(base.Pos, nil, nil, typs[3])}, nil) + typs[88] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[87]), types.NewField(src.NoXPos, nil, typs[3])}, nil) typs[89] = types.NewArray(typs[0], 3) - typs[90] = NewStructType([]*ir.Field{ir.NewField(base.Pos, Lookup("enabled"), nil, typs[6]), ir.NewField(base.Pos, Lookup("pad"), nil, typs[89]), ir.NewField(base.Pos, Lookup("needed"), nil, typs[6]), ir.NewField(base.Pos, Lookup("cgo"), nil, typs[6]), ir.NewField(base.Pos, Lookup("alignme"), nil, typs[24])}) - typs[91] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[3])}, nil) - typs[92] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[3])}, nil) - typs[93] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[15])}) - typs[94] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[87]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) - typs[95] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[84])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) + typs[90] = types.NewStruct(types.NoPkg, []*types.Field{types.NewField(src.NoXPos, Lookup("enabled"), typs[6]), types.NewField(src.NoXPos, Lookup("pad"), typs[89]), types.NewField(src.NoXPos, Lookup("needed"), typs[6]), types.NewField(src.NoXPos, Lookup("cgo"), typs[6]), types.NewField(src.NoXPos, Lookup("alignme"), typs[24])}) + typs[91] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[3])}, nil) + typs[92] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[3])}, nil) + typs[93] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[15]), types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[15])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[15])}) + typs[94] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[87]), types.NewField(src.NoXPos, nil, typs[3])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[6])}) + typs[95] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[84])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[6])}) typs[96] = types.NewPtr(typs[6]) - typs[97] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[96]), ir.NewField(base.Pos, nil, nil, typs[84])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) - typs[98] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[63])}, nil) - typs[99] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[63]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[6])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[6])}) - typs[100] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7])}) - typs[101] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[22]), ir.NewField(base.Pos, nil, nil, typs[22])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7])}) - typs[102] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[7])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7])}) + typs[97] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[96]), types.NewField(src.NoXPos, nil, typs[84])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[6])}) + typs[98] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[63])}, nil) + typs[99] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[63]), types.NewField(src.NoXPos, nil, typs[15]), types.NewField(src.NoXPos, nil, typs[15]), types.NewField(src.NoXPos, nil, typs[6])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[15]), types.NewField(src.NoXPos, nil, typs[6])}) + typs[100] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[15]), types.NewField(src.NoXPos, nil, typs[15])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[7])}) + typs[101] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[22]), types.NewField(src.NoXPos, nil, typs[22])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[7])}) + typs[102] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[15]), types.NewField(src.NoXPos, nil, typs[15]), types.NewField(src.NoXPos, nil, typs[7])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[7])}) typs[103] = types.NewSlice(typs[2]) - typs[104] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[103]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[103])}) - typs[105] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[5])}, nil) - typs[106] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[5])}, nil) - typs[107] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[5])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) - typs[108] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) - typs[109] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[7])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}) - typs[110] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[5]), ir.NewField(base.Pos, nil, nil, typs[5])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[5])}) - typs[111] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[5])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[5])}) - typs[112] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[22]), ir.NewField(base.Pos, nil, nil, typs[22])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[22])}) - typs[113] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[24]), ir.NewField(base.Pos, nil, nil, typs[24])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[24])}) - typs[114] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[22])}) - typs[115] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[24])}) - typs[116] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[65])}) - typs[117] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[22])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}) - typs[118] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[24])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}) - typs[119] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[65])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}) - typs[120] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[26]), ir.NewField(base.Pos, nil, nil, typs[26])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[26])}) - typs[121] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[5]), ir.NewField(base.Pos, nil, nil, typs[5])}, nil) - typs[122] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[5]), ir.NewField(base.Pos, nil, nil, typs[5]), ir.NewField(base.Pos, nil, nil, typs[5])}, nil) - typs[123] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[5])}, nil) + typs[104] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[103]), types.NewField(src.NoXPos, nil, typs[15])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[103])}) + typs[105] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[5])}, nil) + typs[106] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[7]), types.NewField(src.NoXPos, nil, typs[5])}, nil) + typs[107] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[5])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[6])}) + typs[108] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[3])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[6])}) + typs[109] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[7]), types.NewField(src.NoXPos, nil, typs[7])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[6])}) + typs[110] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[7]), types.NewField(src.NoXPos, nil, typs[5]), types.NewField(src.NoXPos, nil, typs[5])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[5])}) + typs[111] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[7]), types.NewField(src.NoXPos, nil, typs[5])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[5])}) + typs[112] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[22]), types.NewField(src.NoXPos, nil, typs[22])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[22])}) + typs[113] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[24]), types.NewField(src.NoXPos, nil, typs[24])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[24])}) + typs[114] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[20])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[22])}) + typs[115] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[20])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[24])}) + typs[116] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[20])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[65])}) + typs[117] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[22])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[20])}) + typs[118] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[24])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[20])}) + typs[119] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[65])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[20])}) + typs[120] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[26]), types.NewField(src.NoXPos, nil, typs[26])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[26])}) + typs[121] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[5]), types.NewField(src.NoXPos, nil, typs[5])}, nil) + typs[122] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[5]), types.NewField(src.NoXPos, nil, typs[5]), types.NewField(src.NoXPos, nil, typs[5])}, nil) + typs[123] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[7]), types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[5])}, nil) typs[124] = types.NewSlice(typs[7]) - typs[125] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[124])}, nil) + typs[125] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[7]), types.NewField(src.NoXPos, nil, typs[124])}, nil) typs[126] = types.Types[types.TUINT8] - typs[127] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[126]), ir.NewField(base.Pos, nil, nil, typs[126])}, nil) + typs[127] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[126]), types.NewField(src.NoXPos, nil, typs[126])}, nil) typs[128] = types.Types[types.TUINT16] - typs[129] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[128]), ir.NewField(base.Pos, nil, nil, typs[128])}, nil) - typs[130] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[65]), ir.NewField(base.Pos, nil, nil, typs[65])}, nil) - typs[131] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[24]), ir.NewField(base.Pos, nil, nil, typs[24])}, nil) + typs[129] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[128]), types.NewField(src.NoXPos, nil, typs[128])}, nil) + typs[130] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[65]), types.NewField(src.NoXPos, nil, typs[65])}, nil) + typs[131] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[24]), types.NewField(src.NoXPos, nil, typs[24])}, nil) return typs[:] } diff --git a/src/cmd/compile/internal/typecheck/mkbuiltin.go b/src/cmd/compile/internal/typecheck/mkbuiltin.go index 27dbf1f10e15a..07f4b767e8f8d 100644 --- a/src/cmd/compile/internal/typecheck/mkbuiltin.go +++ b/src/cmd/compile/internal/typecheck/mkbuiltin.go @@ -36,9 +36,8 @@ func main() { fmt.Fprintln(&b, "package typecheck") fmt.Fprintln(&b) fmt.Fprintln(&b, `import (`) - fmt.Fprintln(&b, ` "cmd/compile/internal/base"`) - fmt.Fprintln(&b, ` "cmd/compile/internal/ir"`) fmt.Fprintln(&b, ` "cmd/compile/internal/types"`) + fmt.Fprintln(&b, ` "cmd/internal/src"`) fmt.Fprintln(&b, `)`) mkbuiltin(&b, "runtime") @@ -170,7 +169,7 @@ func (i *typeInterner) mktype(t ast.Expr) string { } return fmt.Sprintf("types.NewChan(%s, %s)", i.subtype(t.Value), dir) case *ast.FuncType: - return fmt.Sprintf("NewFuncType(nil, %s, %s)", i.fields(t.Params, false), i.fields(t.Results, false)) + return fmt.Sprintf("types.NewSignature(types.NoPkg, nil, %s, %s)", i.fields(t.Params, false), i.fields(t.Results, false)) case *ast.InterfaceType: if len(t.Methods.List) != 0 { log.Fatal("non-empty interfaces unsupported") @@ -181,7 +180,7 @@ func (i *typeInterner) mktype(t ast.Expr) string { case *ast.StarExpr: return fmt.Sprintf("types.NewPtr(%s)", i.subtype(t.X)) case *ast.StructType: - return fmt.Sprintf("NewStructType(%s)", i.fields(t.Fields, true)) + return fmt.Sprintf("types.NewStruct(types.NoPkg, %s)", i.fields(t.Fields, true)) default: log.Fatalf("unhandled type: %#v", t) @@ -197,18 +196,18 @@ func (i *typeInterner) fields(fl *ast.FieldList, keepNames bool) string { for _, f := range fl.List { typ := i.subtype(f.Type) if len(f.Names) == 0 { - res = append(res, fmt.Sprintf("ir.NewField(base.Pos, nil, nil, %s)", typ)) + res = append(res, fmt.Sprintf("types.NewField(src.NoXPos, nil, %s)", typ)) } else { for _, name := range f.Names { if keepNames { - res = append(res, fmt.Sprintf("ir.NewField(base.Pos, Lookup(%q), nil, %s)", name.Name, typ)) + res = append(res, fmt.Sprintf("types.NewField(src.NoXPos, Lookup(%q), %s)", name.Name, typ)) } else { - res = append(res, fmt.Sprintf("ir.NewField(base.Pos, nil, nil, %s)", typ)) + res = append(res, fmt.Sprintf("types.NewField(src.NoXPos, nil, %s)", typ)) } } } } - return fmt.Sprintf("[]*ir.Field{%s}", strings.Join(res, ", ")) + return fmt.Sprintf("[]*types.Field{%s}", strings.Join(res, ", ")) } func intconst(e ast.Expr) int64 { From 87a592b35602e89c55218d2a54a1e0dade5db7e2 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Wed, 23 Dec 2020 01:15:58 -0800 Subject: [PATCH 244/474] [dev.regabi] cmd/compile: cleanup import/export code Now that we have concrete AST node types and better constructor APIs, we can more cleanup a lot of the import code and some export code too. Passes toolstash -cmp. Change-Id: Ie3425d9dac11ac4245e5da675dd298984a926df4 Reviewed-on: https://go-review.googlesource.com/c/go/+/279954 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/typecheck/iexport.go | 27 +---- src/cmd/compile/internal/typecheck/iimport.go | 114 +++++++----------- 2 files changed, 49 insertions(+), 92 deletions(-) diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go index 4ddee01b5a9c0..95a100e6a5ecd 100644 --- a/src/cmd/compile/internal/typecheck/iexport.go +++ b/src/cmd/compile/internal/typecheck/iexport.go @@ -1155,7 +1155,7 @@ func (w *exportWriter) stmt(n ir.Node) { w.pos(n.Pos()) w.stmtList(n.Init()) w.exprsOrNil(nil, nil) // TODO(rsc): Delete (and fix importer). - w.caseList(n) + w.caseList(n.Cases, false) case ir.OSWITCH: n := n.(*ir.SwitchStmt) @@ -1163,7 +1163,7 @@ func (w *exportWriter) stmt(n ir.Node) { w.pos(n.Pos()) w.stmtList(n.Init()) w.exprsOrNil(n.Tag, nil) - w.caseList(n) + w.caseList(n.Cases, isNamedTypeSwitch(n.Tag)) // case OCASE: // handled by caseList @@ -1187,27 +1187,12 @@ func (w *exportWriter) stmt(n ir.Node) { } } -func isNamedTypeSwitch(n ir.Node) bool { - if n.Op() != ir.OSWITCH { - return false - } - sw := n.(*ir.SwitchStmt) - if sw.Tag == nil || sw.Tag.Op() != ir.OTYPESW { - return false - } - guard := sw.Tag.(*ir.TypeSwitchGuard) - return guard.Tag != nil +func isNamedTypeSwitch(x ir.Node) bool { + guard, ok := x.(*ir.TypeSwitchGuard) + return ok && guard.Tag != nil } -func (w *exportWriter) caseList(sw ir.Node) { - namedTypeSwitch := isNamedTypeSwitch(sw) - - var cases []ir.Node - if sw.Op() == ir.OSWITCH { - cases = sw.(*ir.SwitchStmt).Cases - } else { - cases = sw.(*ir.SelectStmt).Cases - } +func (w *exportWriter) caseList(cases []ir.Node, namedTypeSwitch bool) { w.uint64(uint64(len(cases))) for _, cas := range cases { cas := cas.(*ir.CaseStmt) diff --git a/src/cmd/compile/internal/typecheck/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go index ab43d4f71bceb..3c7dde5506793 100644 --- a/src/cmd/compile/internal/typecheck/iimport.go +++ b/src/cmd/compile/internal/typecheck/iimport.go @@ -767,8 +767,8 @@ func (r *importReader) stmtList() []ir.Node { return list } -func (r *importReader) caseList(sw ir.Node) []ir.Node { - namedTypeSwitch := isNamedTypeSwitch(sw) +func (r *importReader) caseList(switchExpr ir.Node) []ir.Node { + namedTypeSwitch := isNamedTypeSwitch(switchExpr) cases := make([]ir.Node, r.uint64()) for i := range cases { @@ -781,7 +781,7 @@ func (r *importReader) caseList(sw ir.Node) []ir.Node { caseVar := ir.NewNameAt(cas.Pos(), r.ident()) Declare(caseVar, DeclContext) cas.Vars = []ir.Node{caseVar} - caseVar.Defn = sw.(*ir.SwitchStmt).Tag + caseVar.Defn = switchExpr } cas.Body.Set(r.stmtList()) cases[i] = cas @@ -821,7 +821,7 @@ func (r *importReader) node() ir.Node { pos := r.pos() typ := r.typ() - n := npos(pos, NodNil()) + n := ir.NewNilExpr(pos) n.SetType(typ) return n @@ -829,7 +829,7 @@ func (r *importReader) node() ir.Node { pos := r.pos() typ := r.typ() - n := npos(pos, ir.NewLiteral(r.value(typ))) + n := ir.NewBasicLit(pos, r.value(typ)) n.SetType(typ) return n @@ -864,26 +864,19 @@ func (r *importReader) node() ir.Node { // unreachable - mapped to case OADDR below by exporter case ir.OSTRUCTLIT: - // TODO(mdempsky): Export position information for OSTRUCTKEY nodes. - savedlineno := base.Pos - base.Pos = r.pos() - n := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(r.typ()).(ir.Ntype), nil) - n.List.Set(r.elemList()) // special handling of field names - base.Pos = savedlineno - return n + pos := r.pos() + return ir.NewCompLitExpr(pos, ir.OCOMPLIT, ir.TypeNode(r.typ()).(ir.Ntype), r.elemList(pos)) // case OARRAYLIT, OSLICELIT, OMAPLIT: // unreachable - mapped to case OCOMPLIT below by exporter case ir.OCOMPLIT: - n := ir.NewCompLitExpr(r.pos(), ir.OCOMPLIT, ir.TypeNode(r.typ()).(ir.Ntype), nil) - n.List.Set(r.exprList()) - return n + return ir.NewCompLitExpr(r.pos(), ir.OCOMPLIT, ir.TypeNode(r.typ()).(ir.Ntype), r.exprList()) case ir.OKEY: pos := r.pos() - left, right := r.exprsOrNil() - return ir.NewKeyExpr(pos, left, right) + key, value := r.exprsOrNil() + return ir.NewKeyExpr(pos, key, value) // case OSTRUCTKEY: // unreachable - handled in case OSTRUCTLIT by elemList @@ -926,9 +919,9 @@ func (r *importReader) node() ir.Node { // unreachable - mapped to OCONV case below by exporter case ir.OCONV: - n := ir.NewConvExpr(r.pos(), ir.OCONV, nil, r.expr()) - n.SetType(r.typ()) - return n + pos := r.pos() + x := r.expr() + return ir.NewConvExpr(pos, ir.OCONV, r.typ(), x) case ir.OCOPY, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCAP, ir.OCLOSE, ir.ODELETE, ir.OLEN, ir.OMAKE, ir.ONEW, ir.OPANIC, ir.ORECOVER, ir.OPRINT, ir.OPRINTN: n := builtinCall(r.pos(), op) @@ -942,10 +935,10 @@ func (r *importReader) node() ir.Node { // unreachable - mapped to OCALL case below by exporter case ir.OCALL: - n := ir.NewCallExpr(r.pos(), ir.OCALL, nil, nil) - n.PtrInit().Set(r.stmtList()) - n.X = r.expr() - n.Args.Set(r.exprList()) + pos := r.pos() + init := r.stmtList() + n := ir.NewCallExpr(pos, ir.OCALL, r.expr(), r.exprList()) + n.PtrInit().Set(init) n.IsDDD = r.bool() return n @@ -979,7 +972,8 @@ func (r *importReader) node() ir.Node { case ir.OADDSTR: pos := r.pos() list := r.exprList() - x := npos(pos, list[0]) + x := list[0] + x.SetPos(pos) // TODO(mdempsky): Remove toolstash bandage. for _, y := range list[1:] { x = ir.NewBinaryExpr(pos, ir.OADD, x, y) } @@ -1006,9 +1000,7 @@ func (r *importReader) node() ir.Node { return ir.NewAssignStmt(r.pos(), r.expr(), r.expr()) case ir.OASOP: - n := ir.NewAssignOpStmt(r.pos(), ir.OXXX, nil, nil) - n.AsOp = r.op() - n.X = r.expr() + n := ir.NewAssignOpStmt(r.pos(), r.op(), r.expr(), nil) if !r.bool() { n.Y = ir.NewInt(1) n.IncDec = true @@ -1021,15 +1013,10 @@ func (r *importReader) node() ir.Node { // unreachable - mapped to OAS2 case below by exporter case ir.OAS2: - n := ir.NewAssignListStmt(r.pos(), ir.OAS2, nil, nil) - n.Lhs.Set(r.exprList()) - n.Rhs.Set(r.exprList()) - return n + return ir.NewAssignListStmt(r.pos(), ir.OAS2, r.exprList(), r.exprList()) case ir.ORETURN: - n := ir.NewReturnStmt(r.pos(), nil) - n.Results.Set(r.exprList()) - return n + return ir.NewReturnStmt(r.pos(), r.exprList()) // case ORETJMP: // unreachable - generated by compiler for trampolin routines (not exported) @@ -1038,57 +1025,47 @@ func (r *importReader) node() ir.Node { return ir.NewGoDeferStmt(r.pos(), op, r.expr()) case ir.OIF: - n := ir.NewIfStmt(r.pos(), nil, nil, nil) - n.PtrInit().Set(r.stmtList()) - n.Cond = r.expr() - n.Body.Set(r.stmtList()) - n.Else.Set(r.stmtList()) + pos, init := r.pos(), r.stmtList() + n := ir.NewIfStmt(pos, r.expr(), r.stmtList(), r.stmtList()) + n.PtrInit().Set(init) return n case ir.OFOR: - n := ir.NewForStmt(r.pos(), nil, nil, nil, nil) - n.PtrInit().Set(r.stmtList()) - left, right := r.exprsOrNil() - n.Cond = left - n.Post = right - n.Body.Set(r.stmtList()) - return n + pos, init := r.pos(), r.stmtList() + cond, post := r.exprsOrNil() + return ir.NewForStmt(pos, init, cond, post, r.stmtList()) case ir.ORANGE: - n := ir.NewRangeStmt(r.pos(), nil, nil, nil) - n.Vars.Set(r.stmtList()) - n.X = r.expr() - n.Body.Set(r.stmtList()) - return n + return ir.NewRangeStmt(r.pos(), r.stmtList(), r.expr(), r.stmtList()) case ir.OSELECT: - n := ir.NewSelectStmt(r.pos(), nil) - n.PtrInit().Set(r.stmtList()) + pos := r.pos() + init := r.stmtList() r.exprsOrNil() // TODO(rsc): Delete (and fix exporter). These are always nil. - n.Cases.Set(r.caseList(n)) + n := ir.NewSelectStmt(pos, r.caseList(nil)) + n.PtrInit().Set(init) return n case ir.OSWITCH: - n := ir.NewSwitchStmt(r.pos(), nil, nil) - n.PtrInit().Set(r.stmtList()) - left, _ := r.exprsOrNil() - n.Tag = left - n.Cases.Set(r.caseList(n)) + pos := r.pos() + init := r.stmtList() + x, _ := r.exprsOrNil() + n := ir.NewSwitchStmt(pos, x, r.caseList(x)) + n.PtrInit().Set(init) return n // case OCASE: // handled by caseList case ir.OFALL: - n := ir.NewBranchStmt(r.pos(), ir.OFALL, nil) - return n + return ir.NewBranchStmt(r.pos(), ir.OFALL, nil) // case OEMPTY: // unreachable - not emitted by exporter case ir.OBREAK, ir.OCONTINUE, ir.OGOTO: - var sym *types.Sym pos := r.pos() + var sym *types.Sym if label := r.string(); label != "" { sym = Lookup(label) } @@ -1111,12 +1088,12 @@ func (r *importReader) op() ir.Op { return ir.Op(r.uint64()) } -func (r *importReader) elemList() []ir.Node { +func (r *importReader) elemList(pos src.XPos) []ir.Node { c := r.uint64() list := make([]ir.Node, c) for i := range list { - s := r.ident() - list[i] = ir.NewStructKeyExpr(base.Pos, s, r.expr()) + // TODO(mdempsky): Export position information for OSTRUCTKEY nodes. + list[i] = ir.NewStructKeyExpr(pos, r.ident(), r.expr()) } return list } @@ -1135,8 +1112,3 @@ func (r *importReader) exprsOrNil() (a, b ir.Node) { func builtinCall(pos src.XPos, op ir.Op) *ir.CallExpr { return ir.NewCallExpr(pos, ir.OCALL, ir.NewIdent(base.Pos, types.BuiltinPkg.Lookup(ir.OpNames[op])), nil) } - -func npos(pos src.XPos, n ir.Node) ir.Node { - n.SetPos(pos) - return n -} From 18ebfb49e9114b98e5a66acae073f5514e383aba Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Wed, 23 Dec 2020 02:00:39 -0800 Subject: [PATCH 245/474] [dev.regabi] cmd/compile: cleanup noder Similar to previous CL: take advantage of better constructor APIs for translating ASTs from syntax to ir. Passes toolstash -cmp. Change-Id: I40970775e7dd5afe2a0b7593ce3bd73237562457 Reviewed-on: https://go-review.googlesource.com/c/go/+/279972 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/noder/noder.go | 96 +++++++++---------------- 1 file changed, 33 insertions(+), 63 deletions(-) diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go index a684673c8f490..c73e2d7fc5384 100644 --- a/src/cmd/compile/internal/noder/noder.go +++ b/src/cmd/compile/internal/noder/noder.go @@ -377,11 +377,7 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) { func (p *noder) varDecl(decl *syntax.VarDecl) []ir.Node { names := p.declNames(ir.ONAME, decl.NameList) typ := p.typeExprOrNil(decl.Type) - - var exprs []ir.Node - if decl.Values != nil { - exprs = p.exprList(decl.Values) - } + exprs := p.exprList(decl.Values) if pragma, ok := decl.Pragma.(*pragmas); ok { if len(pragma.Embeds) > 0 { @@ -620,10 +616,14 @@ func (p *noder) param(param *syntax.Field, dddOk, final bool) *ir.Field { } func (p *noder) exprList(expr syntax.Expr) []ir.Node { - if list, ok := expr.(*syntax.ListExpr); ok { - return p.exprs(list.ElemList) + switch expr := expr.(type) { + case nil: + return nil + case *syntax.ListExpr: + return p.exprs(expr.ElemList) + default: + return []ir.Node{p.expr(expr)} } - return []ir.Node{p.expr(expr)} } func (p *noder) exprs(exprs []syntax.Expr) []ir.Node { @@ -642,17 +642,14 @@ func (p *noder) expr(expr syntax.Expr) ir.Node { case *syntax.Name: return p.mkname(expr) case *syntax.BasicLit: - n := ir.NewLiteral(p.basicLit(expr)) + n := ir.NewBasicLit(p.pos(expr), p.basicLit(expr)) if expr.Kind == syntax.RuneLit { n.SetType(types.UntypedRune) } n.SetDiag(expr.Bad) // avoid follow-on errors if there was a syntax error return n case *syntax.CompositeLit: - n := ir.NewCompLitExpr(p.pos(expr), ir.OCOMPLIT, nil, nil) - if expr.Type != nil { - n.Ntype = ir.Node(p.expr(expr.Type)).(ir.Ntype) - } + n := ir.NewCompLitExpr(p.pos(expr), ir.OCOMPLIT, p.typeExpr(expr.Type), nil) l := p.exprs(expr.ElemList) for i, e := range l { l[i] = p.wrapname(expr.ElemList[i], e) @@ -695,7 +692,7 @@ func (p *noder) expr(expr syntax.Expr) ir.Node { n.SetSliceBounds(index[0], index[1], index[2]) return n case *syntax.AssertExpr: - return ir.NewTypeAssertExpr(p.pos(expr), p.expr(expr.X), p.typeExpr(expr.Type).(ir.Ntype)) + return ir.NewTypeAssertExpr(p.pos(expr), p.expr(expr.X), p.typeExpr(expr.Type)) case *syntax.Operation: if expr.Op == syntax.Add && expr.Y != nil { return p.sum(expr) @@ -719,8 +716,7 @@ func (p *noder) expr(expr syntax.Expr) ir.Node { } return ir.NewBinaryExpr(pos, op, x, y) case *syntax.CallExpr: - n := ir.NewCallExpr(p.pos(expr), ir.OCALL, p.expr(expr.Fun), nil) - n.Args.Set(p.exprs(expr.ArgList)) + n := ir.NewCallExpr(p.pos(expr), ir.OCALL, p.expr(expr.Fun), p.exprs(expr.ArgList)) n.IsDDD = expr.HasDots return n @@ -987,7 +983,7 @@ func (p *noder) stmt(stmt syntax.Stmt) ir.Node { func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node { p.setlineno(stmt) switch stmt := stmt.(type) { - case *syntax.EmptyStmt: + case nil, *syntax.EmptyStmt: return nil case *syntax.LabeledStmt: return p.labeledStmt(stmt, fallOK) @@ -1060,12 +1056,7 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node { } return ir.NewGoDeferStmt(p.pos(stmt), op, p.expr(stmt.Call)) case *syntax.ReturnStmt: - var results []ir.Node - if stmt.Results != nil { - results = p.exprList(stmt.Results) - } - n := ir.NewReturnStmt(p.pos(stmt), nil) - n.Results.Set(results) + n := ir.NewReturnStmt(p.pos(stmt), p.exprList(stmt.Results)) if len(n.Results) == 0 && ir.CurFunc != nil { for _, ln := range ir.CurFunc.Dcl { if ln.Class_ == ir.PPARAM { @@ -1159,14 +1150,9 @@ func (p *noder) blockStmt(stmt *syntax.BlockStmt) []ir.Node { func (p *noder) ifStmt(stmt *syntax.IfStmt) ir.Node { p.openScope(stmt.Pos()) - n := ir.NewIfStmt(p.pos(stmt), nil, nil, nil) - if stmt.Init != nil { - *n.PtrInit() = []ir.Node{p.stmt(stmt.Init)} - } - if stmt.Cond != nil { - n.Cond = p.expr(stmt.Cond) - } - n.Body.Set(p.blockStmt(stmt.Then)) + init := p.simpleStmt(stmt.Init) + n := ir.NewIfStmt(p.pos(stmt), p.expr(stmt.Cond), p.blockStmt(stmt.Then), nil) + *n.PtrInit() = init if stmt.Else != nil { e := p.stmt(stmt.Else) if e.Op() == ir.OBLOCK { @@ -1197,30 +1183,17 @@ func (p *noder) forStmt(stmt *syntax.ForStmt) ir.Node { return n } - n := ir.NewForStmt(p.pos(stmt), nil, nil, nil, nil) - if stmt.Init != nil { - *n.PtrInit() = []ir.Node{p.stmt(stmt.Init)} - } - if stmt.Cond != nil { - n.Cond = p.expr(stmt.Cond) - } - if stmt.Post != nil { - n.Post = p.stmt(stmt.Post) - } - n.Body.Set(p.blockStmt(stmt.Body)) + n := ir.NewForStmt(p.pos(stmt), p.simpleStmt(stmt.Init), p.expr(stmt.Cond), p.stmt(stmt.Post), p.blockStmt(stmt.Body)) p.closeAnotherScope() return n } func (p *noder) switchStmt(stmt *syntax.SwitchStmt) ir.Node { p.openScope(stmt.Pos()) - n := ir.NewSwitchStmt(p.pos(stmt), nil, nil) - if stmt.Init != nil { - *n.PtrInit() = []ir.Node{p.stmt(stmt.Init)} - } - if stmt.Tag != nil { - n.Tag = p.expr(stmt.Tag) - } + + init := p.simpleStmt(stmt.Init) + n := ir.NewSwitchStmt(p.pos(stmt), p.expr(stmt.Tag), nil) + *n.PtrInit() = init var tswitch *ir.TypeSwitchGuard if l := n.Tag; l != nil && l.Op() == ir.OTYPESW { @@ -1241,10 +1214,7 @@ func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.TypeSwitch } p.openScope(clause.Pos()) - n := ir.NewCaseStmt(p.pos(clause), nil, nil) - if clause.Cases != nil { - n.List.Set(p.exprList(clause.Cases)) - } + n := ir.NewCaseStmt(p.pos(clause), p.exprList(clause.Cases), nil) if tswitch != nil && tswitch.Tag != nil { nn := typecheck.NewName(tswitch.Tag.Sym()) typecheck.Declare(nn, typecheck.DeclContext) @@ -1283,13 +1253,18 @@ func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.TypeSwitch } func (p *noder) selectStmt(stmt *syntax.SelectStmt) ir.Node { - n := ir.NewSelectStmt(p.pos(stmt), nil) - n.Cases.Set(p.commClauses(stmt.Body, stmt.Rbrace)) - return n + return ir.NewSelectStmt(p.pos(stmt), p.commClauses(stmt.Body, stmt.Rbrace)) +} + +func (p *noder) simpleStmt(stmt syntax.SimpleStmt) []ir.Node { + if stmt == nil { + return nil + } + return []ir.Node{p.stmt(stmt)} } func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []ir.Node { - nodes := make([]ir.Node, 0, len(clauses)) + nodes := make([]ir.Node, len(clauses)) for i, clause := range clauses { p.setlineno(clause) if i > 0 { @@ -1297,12 +1272,7 @@ func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []i } p.openScope(clause.Pos()) - n := ir.NewCaseStmt(p.pos(clause), nil, nil) - if clause.Comm != nil { - n.List = []ir.Node{p.stmt(clause.Comm)} - } - n.Body.Set(p.stmts(clause.Body)) - nodes = append(nodes, n) + nodes[i] = ir.NewCaseStmt(p.pos(clause), p.simpleStmt(clause.Comm), p.stmts(clause.Body)) } if len(clauses) > 0 { p.closeScope(rbrace) From addade2cce83fb0019ad8394311c51466d4042cf Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Wed, 23 Dec 2020 02:16:17 -0800 Subject: [PATCH 246/474] [dev.regabi] cmd/compile: prefer types constructors over typecheck Similar to the earlier mkbuiltin cleanup, there's a bunch of code that calls typecheck.NewFuncType or typecheck.NewStructType, which can now just call types.NewSignature and types.NewStruct, respectively. Passes toolstash -cmp. Change-Id: Ie6e09f1a7efef84b9a2bb5daa7087a6879979668 Reviewed-on: https://go-review.googlesource.com/c/go/+/279955 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/reflectdata/alg.go | 22 ++++++------ .../compile/internal/reflectdata/reflect.go | 6 +++- src/cmd/compile/internal/typecheck/dcl.go | 34 ++++++++----------- src/cmd/compile/internal/typecheck/func.go | 14 ++++---- src/cmd/compile/internal/walk/compare.go | 10 +++--- src/cmd/compile/internal/walk/select.go | 6 ++-- 6 files changed, 46 insertions(+), 46 deletions(-) diff --git a/src/cmd/compile/internal/reflectdata/alg.go b/src/cmd/compile/internal/reflectdata/alg.go index 8391486e50ebe..1f943f5795bf5 100644 --- a/src/cmd/compile/internal/reflectdata/alg.go +++ b/src/cmd/compile/internal/reflectdata/alg.go @@ -289,11 +289,11 @@ func hashfor(t *types.Type) ir.Node { n := typecheck.NewName(sym) ir.MarkFunc(n) - n.SetType(typecheck.NewFuncType(nil, []*ir.Field{ - ir.NewField(base.Pos, nil, nil, types.NewPtr(t)), - ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]), - }, []*ir.Field{ - ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]), + n.SetType(types.NewSignature(types.NoPkg, nil, []*types.Field{ + types.NewField(base.Pos, nil, types.NewPtr(t)), + types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]), + }, []*types.Field{ + types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]), })) return n } @@ -777,12 +777,12 @@ func hashmem(t *types.Type) ir.Node { n := typecheck.NewName(sym) ir.MarkFunc(n) - n.SetType(typecheck.NewFuncType(nil, []*ir.Field{ - ir.NewField(base.Pos, nil, nil, types.NewPtr(t)), - ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]), - ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]), - }, []*ir.Field{ - ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]), + n.SetType(types.NewSignature(types.NoPkg, nil, []*types.Field{ + types.NewField(base.Pos, nil, types.NewPtr(t)), + types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]), + types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]), + }, []*types.Field{ + types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]), })) return n } diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go index ba3e0fa75ef97..3fbf6f337f079 100644 --- a/src/cmd/compile/internal/reflectdata/reflect.go +++ b/src/cmd/compile/internal/reflectdata/reflect.go @@ -1419,7 +1419,11 @@ func WriteBasicTypes() { // The latter is the type of an auto-generated wrapper. WriteType(types.NewPtr(types.ErrorType)) - WriteType(typecheck.NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, types.ErrorType)}, []*ir.Field{ir.NewField(base.Pos, nil, nil, types.Types[types.TSTRING])})) + WriteType(types.NewSignature(types.NoPkg, nil, []*types.Field{ + types.NewField(base.Pos, nil, types.ErrorType), + }, []*types.Field{ + types.NewField(base.Pos, nil, types.Types[types.TSTRING]), + })) // add paths for runtime and main, which 6l imports implicitly. dimportpath(ir.Pkgs.Runtime) diff --git a/src/cmd/compile/internal/typecheck/dcl.go b/src/cmd/compile/internal/typecheck/dcl.go index 9f66d0fa17be0..bfdd76ba107c8 100644 --- a/src/cmd/compile/internal/typecheck/dcl.go +++ b/src/cmd/compile/internal/typecheck/dcl.go @@ -676,30 +676,26 @@ func autotmpname(n int) string { // f is method type, with receiver. // return function type, receiver as first argument (or not). -func NewMethodType(f *types.Type, receiver *types.Type) *types.Type { - inLen := f.Params().Fields().Len() - if receiver != nil { - inLen++ +func NewMethodType(sig *types.Type, recv *types.Type) *types.Type { + nrecvs := 0 + if recv != nil { + nrecvs++ } - in := make([]*ir.Field, 0, inLen) - if receiver != nil { - d := ir.NewField(base.Pos, nil, nil, receiver) - in = append(in, d) + params := make([]*types.Field, nrecvs+sig.Params().Fields().Len()) + if recv != nil { + params[0] = types.NewField(base.Pos, nil, recv) } - - for _, t := range f.Params().Fields().Slice() { - d := ir.NewField(base.Pos, nil, nil, t.Type) - d.IsDDD = t.IsDDD() - in = append(in, d) + for i, param := range sig.Params().Fields().Slice() { + d := types.NewField(base.Pos, nil, param.Type) + d.SetIsDDD(param.IsDDD()) + params[nrecvs+i] = d } - outLen := f.Results().Fields().Len() - out := make([]*ir.Field, 0, outLen) - for _, t := range f.Results().Fields().Slice() { - d := ir.NewField(base.Pos, nil, nil, t.Type) - out = append(out, d) + results := make([]*types.Field, sig.Results().Fields().Len()) + for i, t := range sig.Results().Fields().Slice() { + results[i] = types.NewField(base.Pos, nil, t.Type) } - return NewFuncType(nil, in, out) + return types.NewSignature(types.LocalPkg, nil, params, results) } diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go index 99d81dcedece1..fdac719ad9bbc 100644 --- a/src/cmd/compile/internal/typecheck/func.go +++ b/src/cmd/compile/internal/typecheck/func.go @@ -73,17 +73,17 @@ func ClosureType(clo *ir.ClosureExpr) *types.Type { // The information appears in the binary in the form of type descriptors; // the struct is unnamed so that closures in multiple packages with the // same struct type can share the descriptor. - fields := []*ir.Field{ - ir.NewField(base.Pos, Lookup(".F"), nil, types.Types[types.TUINTPTR]), + fields := []*types.Field{ + types.NewField(base.Pos, Lookup(".F"), types.Types[types.TUINTPTR]), } for _, v := range clo.Func.ClosureVars { typ := v.Type() if !v.Byval() { typ = types.NewPtr(typ) } - fields = append(fields, ir.NewField(base.Pos, v.Sym(), nil, typ)) + fields = append(fields, types.NewField(base.Pos, v.Sym(), typ)) } - typ := NewStructType(fields) + typ := types.NewStruct(types.NoPkg, fields) typ.SetNoalg(true) return typ } @@ -92,9 +92,9 @@ func ClosureType(clo *ir.ClosureExpr) *types.Type { // needed in the closure for n (n must be a OCALLPART node). // The address of a variable of the returned type can be cast to a func. func PartialCallType(n *ir.CallPartExpr) *types.Type { - t := NewStructType([]*ir.Field{ - ir.NewField(base.Pos, Lookup("F"), nil, types.Types[types.TUINTPTR]), - ir.NewField(base.Pos, Lookup("R"), nil, n.X.Type()), + t := types.NewStruct(types.NoPkg, []*types.Field{ + types.NewField(base.Pos, Lookup("F"), types.Types[types.TUINTPTR]), + types.NewField(base.Pos, Lookup("R"), n.X.Type()), }) t.SetNoalg(true) return t diff --git a/src/cmd/compile/internal/walk/compare.go b/src/cmd/compile/internal/walk/compare.go index b1ab42782b471..40b45d4dea227 100644 --- a/src/cmd/compile/internal/walk/compare.go +++ b/src/cmd/compile/internal/walk/compare.go @@ -428,11 +428,11 @@ func eqFor(t *types.Type) (n ir.Node, needsize bool) { sym := reflectdata.TypeSymPrefix(".eq", t) n := typecheck.NewName(sym) ir.MarkFunc(n) - n.SetType(typecheck.NewFuncType(nil, []*ir.Field{ - ir.NewField(base.Pos, nil, nil, types.NewPtr(t)), - ir.NewField(base.Pos, nil, nil, types.NewPtr(t)), - }, []*ir.Field{ - ir.NewField(base.Pos, nil, nil, types.Types[types.TBOOL]), + n.SetType(types.NewSignature(types.NoPkg, nil, []*types.Field{ + types.NewField(base.Pos, nil, types.NewPtr(t)), + types.NewField(base.Pos, nil, types.NewPtr(t)), + }, []*types.Field{ + types.NewField(base.Pos, nil, types.Types[types.TBOOL]), })) return n, false } diff --git a/src/cmd/compile/internal/walk/select.go b/src/cmd/compile/internal/walk/select.go index 438131b2945b0..5e03732169f66 100644 --- a/src/cmd/compile/internal/walk/select.go +++ b/src/cmd/compile/internal/walk/select.go @@ -287,9 +287,9 @@ var scase *types.Type // Keep in sync with src/runtime/select.go. func scasetype() *types.Type { if scase == nil { - scase = typecheck.NewStructType([]*ir.Field{ - ir.NewField(base.Pos, typecheck.Lookup("c"), nil, types.Types[types.TUNSAFEPTR]), - ir.NewField(base.Pos, typecheck.Lookup("elem"), nil, types.Types[types.TUNSAFEPTR]), + scase = types.NewStruct(types.NoPkg, []*types.Field{ + types.NewField(base.Pos, typecheck.Lookup("c"), types.Types[types.TUNSAFEPTR]), + types.NewField(base.Pos, typecheck.Lookup("elem"), types.Types[types.TUNSAFEPTR]), }) scase.SetNoalg(true) } From 31267f82e16249a1d9065099c615a936dc32688b Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Wed, 23 Dec 2020 02:48:57 -0800 Subject: [PATCH 247/474] [dev.regabi] cmd/compile: simplify function/interface/struct typechecking After the previous CL, the only callers to NewFuncType, tointerface, or NewStructType are the functions for type-checking the type literal ASTs. So just inline the code there. While here, refactor the Field type-checking logic a little bit, to reduce some duplication. Passes toolstash -cmp. Change-Id: Ie12d14b87ef8b6e528ac9dccd609604bd09b98ec Reviewed-on: https://go-review.googlesource.com/c/go/+/279956 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/typecheck/dcl.go | 87 ---------------------- src/cmd/compile/internal/typecheck/type.go | 72 +++++++++++++++++- 2 files changed, 69 insertions(+), 90 deletions(-) diff --git a/src/cmd/compile/internal/typecheck/dcl.go b/src/cmd/compile/internal/typecheck/dcl.go index bfdd76ba107c8..db18c17e13018 100644 --- a/src/cmd/compile/internal/typecheck/dcl.go +++ b/src/cmd/compile/internal/typecheck/dcl.go @@ -281,72 +281,6 @@ func CheckFuncStack() { } } -// turn a parsed function declaration into a type -func NewFuncType(nrecv *ir.Field, nparams, nresults []*ir.Field) *types.Type { - funarg := func(n *ir.Field) *types.Field { - lno := base.Pos - base.Pos = n.Pos - - if n.Ntype != nil { - n.Type = typecheckNtype(n.Ntype).Type() - n.Ntype = nil - } - - f := types.NewField(n.Pos, n.Sym, n.Type) - f.SetIsDDD(n.IsDDD) - if n.Decl != nil { - n.Decl.SetType(f.Type) - f.Nname = n.Decl - } - - base.Pos = lno - return f - } - funargs := func(nn []*ir.Field) []*types.Field { - res := make([]*types.Field, len(nn)) - for i, n := range nn { - res[i] = funarg(n) - } - return res - } - - var recv *types.Field - if nrecv != nil { - recv = funarg(nrecv) - } - - t := types.NewSignature(types.LocalPkg, recv, funargs(nparams), funargs(nresults)) - checkdupfields("argument", t.Recvs().FieldSlice(), t.Params().FieldSlice(), t.Results().FieldSlice()) - return t -} - -// convert a parsed id/type list into -// a type for struct/interface/arglist -func NewStructType(l []*ir.Field) *types.Type { - lno := base.Pos - - fields := make([]*types.Field, len(l)) - for i, n := range l { - base.Pos = n.Pos - - if n.Ntype != nil { - n.Type = typecheckNtype(n.Ntype).Type() - n.Ntype = nil - } - f := types.NewField(n.Pos, n.Sym, n.Type) - if n.Embedded { - checkembeddedtype(n.Type) - f.Embedded = 1 - } - f.Note = n.Note - fields[i] = f - } - checkdupfields("field", fields) - - base.Pos = lno - return types.NewStruct(types.LocalPkg, fields) -} - // Add a method, declared as a function. // - msym is the method symbol // - t is function type (with receiver) @@ -604,27 +538,6 @@ func initname(s string) bool { return s == "init" } -func tointerface(nmethods []*ir.Field) *types.Type { - if len(nmethods) == 0 { - return types.Types[types.TINTER] - } - - lno := base.Pos - - methods := make([]*types.Field, len(nmethods)) - for i, n := range nmethods { - base.Pos = n.Pos - if n.Ntype != nil { - n.Type = typecheckNtype(n.Ntype).Type() - n.Ntype = nil - } - methods[i] = types.NewField(n.Pos, n.Sym, n.Type) - } - - base.Pos = lno - return types.NewInterface(types.LocalPkg, methods) -} - var vargen int func Temp(t *types.Type) *ir.Name { diff --git a/src/cmd/compile/internal/typecheck/type.go b/src/cmd/compile/internal/typecheck/type.go index 4782bb9c3180a..0c2ebb8b26a0c 100644 --- a/src/cmd/compile/internal/typecheck/type.go +++ b/src/cmd/compile/internal/typecheck/type.go @@ -73,13 +73,42 @@ func tcChanType(n *ir.ChanType) ir.Node { // tcFuncType typechecks an OTFUNC node. func tcFuncType(n *ir.FuncType) ir.Node { - n.SetOTYPE(NewFuncType(n.Recv, n.Params, n.Results)) + misc := func(f *types.Field, nf *ir.Field) { + f.SetIsDDD(nf.IsDDD) + if nf.Decl != nil { + nf.Decl.SetType(f.Type) + f.Nname = nf.Decl + } + } + + lno := base.Pos + + var recv *types.Field + if n.Recv != nil { + recv = tcField(n.Recv, misc) + } + + t := types.NewSignature(types.LocalPkg, recv, tcFields(n.Params, misc), tcFields(n.Results, misc)) + checkdupfields("argument", t.Recvs().FieldSlice(), t.Params().FieldSlice(), t.Results().FieldSlice()) + + base.Pos = lno + + n.SetOTYPE(t) return n } // tcInterfaceType typechecks an OTINTER node. func tcInterfaceType(n *ir.InterfaceType) ir.Node { - n.SetOTYPE(tointerface(n.Methods)) + if len(n.Methods) == 0 { + n.SetOTYPE(types.Types[types.TINTER]) + return n + } + + lno := base.Pos + methods := tcFields(n.Methods, nil) + base.Pos = lno + + n.SetOTYPE(types.NewInterface(types.LocalPkg, methods)) return n } @@ -117,6 +146,43 @@ func tcSliceType(n *ir.SliceType) ir.Node { // tcStructType typechecks an OTSTRUCT node. func tcStructType(n *ir.StructType) ir.Node { - n.SetOTYPE(NewStructType(n.Fields)) + lno := base.Pos + + fields := tcFields(n.Fields, func(f *types.Field, nf *ir.Field) { + if nf.Embedded { + checkembeddedtype(f.Type) + f.Embedded = 1 + } + f.Note = nf.Note + }) + checkdupfields("field", fields) + + base.Pos = lno + n.SetOTYPE(types.NewStruct(types.LocalPkg, fields)) return n } + +// tcField typechecks a generic Field. +// misc can be provided to handle specialized typechecking. +func tcField(n *ir.Field, misc func(*types.Field, *ir.Field)) *types.Field { + base.Pos = n.Pos + if n.Ntype != nil { + n.Type = typecheckNtype(n.Ntype).Type() + n.Ntype = nil + } + f := types.NewField(n.Pos, n.Sym, n.Type) + if misc != nil { + misc(f, n) + } + return f +} + +// tcFields typechecks a slice of generic Fields. +// misc can be provided to handle specialized typechecking. +func tcFields(l []*ir.Field, misc func(*types.Field, *ir.Field)) []*types.Field { + fields := make([]*types.Field, len(l)) + for i, n := range l { + fields[i] = tcField(n, misc) + } + return fields +} From 53f082b0ee81f14d1b1a1c997e2f8e9164af37bc Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Wed, 23 Dec 2020 03:33:03 -0800 Subject: [PATCH 248/474] [dev.regabi] cmd/compile: cleanup export code further This CL rips off a number of toolstash bandages: - Fixes position information for string concatenation. - Adds position information for struct literal fields. - Removes unnecessary exprsOrNil calls or replaces them with plain expr calls when possible. - Reorders conversion expressions to put type first, which matches source order and also the order the importer needs for calling the ConvExpr constructor. Change-Id: I44cdc6035540d9ecefd9c1bcd92b8711d6ed813c Reviewed-on: https://go-review.googlesource.com/c/go/+/279957 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/typecheck/iexport.go | 11 ++++---- src/cmd/compile/internal/typecheck/iimport.go | 26 ++++++------------- 2 files changed, 13 insertions(+), 24 deletions(-) diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go index 95a100e6a5ecd..8ac791c0369a0 100644 --- a/src/cmd/compile/internal/typecheck/iexport.go +++ b/src/cmd/compile/internal/typecheck/iexport.go @@ -858,8 +858,6 @@ func intSize(typ *types.Type) (signed bool, maxBytes uint) { // according to the maximum number of bytes needed to encode a value // of type typ. As a special case, 8-bit types are always encoded as a // single byte. -// -// TODO(mdempsky): Is this level of complexity really worthwhile? func (w *exportWriter) mpint(x constant.Value, typ *types.Type) { signed, maxBytes := intSize(typ) @@ -1154,7 +1152,6 @@ func (w *exportWriter) stmt(n ir.Node) { w.op(n.Op()) w.pos(n.Pos()) w.stmtList(n.Init()) - w.exprsOrNil(nil, nil) // TODO(rsc): Delete (and fix importer). w.caseList(n.Cases, false) case ir.OSWITCH: @@ -1298,7 +1295,7 @@ func (w *exportWriter) expr(n ir.Node) { s = n.Tag.Sym() } w.localIdent(s, 0) // declared pseudo-variable, if any - w.exprsOrNil(n.X, nil) + w.expr(n.X) // case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC: // should have been resolved by typechecking - handled by default case @@ -1333,7 +1330,8 @@ func (w *exportWriter) expr(n ir.Node) { n := n.(*ir.KeyExpr) w.op(ir.OKEY) w.pos(n.Pos()) - w.exprsOrNil(n.Key, n.Value) + w.expr(n.Key) + w.expr(n.Value) // case OSTRUCTKEY: // unreachable - handled in case OSTRUCTLIT by elemList @@ -1397,8 +1395,8 @@ func (w *exportWriter) expr(n ir.Node) { n := n.(*ir.ConvExpr) w.op(ir.OCONV) w.pos(n.Pos()) - w.expr(n.X) w.typ(n.Type()) + w.expr(n.X) case ir.OREAL, ir.OIMAG, ir.OCAP, ir.OCLOSE, ir.OLEN, ir.ONEW, ir.OPANIC: n := n.(*ir.UnaryExpr) @@ -1529,6 +1527,7 @@ func (w *exportWriter) fieldList(list ir.Nodes) { w.uint64(uint64(len(list))) for _, n := range list { n := n.(*ir.StructKeyExpr) + w.pos(n.Pos()) w.selector(n.Field) w.expr(n.Value) } diff --git a/src/cmd/compile/internal/typecheck/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go index 3c7dde5506793..c4d840d2ac218 100644 --- a/src/cmd/compile/internal/typecheck/iimport.go +++ b/src/cmd/compile/internal/typecheck/iimport.go @@ -851,8 +851,7 @@ func (r *importReader) node() ir.Node { if s := r.ident(); s != nil { tag = ir.NewIdent(pos, s) } - expr, _ := r.exprsOrNil() - return ir.NewTypeSwitchGuard(pos, tag, expr) + return ir.NewTypeSwitchGuard(pos, tag, r.expr()) // case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC: // unreachable - should have been resolved by typechecking @@ -864,19 +863,16 @@ func (r *importReader) node() ir.Node { // unreachable - mapped to case OADDR below by exporter case ir.OSTRUCTLIT: - pos := r.pos() - return ir.NewCompLitExpr(pos, ir.OCOMPLIT, ir.TypeNode(r.typ()).(ir.Ntype), r.elemList(pos)) + return ir.NewCompLitExpr(r.pos(), ir.OCOMPLIT, ir.TypeNode(r.typ()), r.fieldList()) // case OARRAYLIT, OSLICELIT, OMAPLIT: // unreachable - mapped to case OCOMPLIT below by exporter case ir.OCOMPLIT: - return ir.NewCompLitExpr(r.pos(), ir.OCOMPLIT, ir.TypeNode(r.typ()).(ir.Ntype), r.exprList()) + return ir.NewCompLitExpr(r.pos(), ir.OCOMPLIT, ir.TypeNode(r.typ()), r.exprList()) case ir.OKEY: - pos := r.pos() - key, value := r.exprsOrNil() - return ir.NewKeyExpr(pos, key, value) + return ir.NewKeyExpr(r.pos(), r.expr(), r.expr()) // case OSTRUCTKEY: // unreachable - handled in case OSTRUCTLIT by elemList @@ -919,9 +915,7 @@ func (r *importReader) node() ir.Node { // unreachable - mapped to OCONV case below by exporter case ir.OCONV: - pos := r.pos() - x := r.expr() - return ir.NewConvExpr(pos, ir.OCONV, r.typ(), x) + return ir.NewConvExpr(r.pos(), ir.OCONV, r.typ(), r.expr()) case ir.OCOPY, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCAP, ir.OCLOSE, ir.ODELETE, ir.OLEN, ir.OMAKE, ir.ONEW, ir.OPANIC, ir.ORECOVER, ir.OPRINT, ir.OPRINTN: n := builtinCall(r.pos(), op) @@ -973,7 +967,6 @@ func (r *importReader) node() ir.Node { pos := r.pos() list := r.exprList() x := list[0] - x.SetPos(pos) // TODO(mdempsky): Remove toolstash bandage. for _, y := range list[1:] { x = ir.NewBinaryExpr(pos, ir.OADD, x, y) } @@ -1041,7 +1034,6 @@ func (r *importReader) node() ir.Node { case ir.OSELECT: pos := r.pos() init := r.stmtList() - r.exprsOrNil() // TODO(rsc): Delete (and fix exporter). These are always nil. n := ir.NewSelectStmt(pos, r.caseList(nil)) n.PtrInit().Set(init) return n @@ -1088,12 +1080,10 @@ func (r *importReader) op() ir.Op { return ir.Op(r.uint64()) } -func (r *importReader) elemList(pos src.XPos) []ir.Node { - c := r.uint64() - list := make([]ir.Node, c) +func (r *importReader) fieldList() []ir.Node { + list := make([]ir.Node, r.uint64()) for i := range list { - // TODO(mdempsky): Export position information for OSTRUCTKEY nodes. - list[i] = ir.NewStructKeyExpr(pos, r.ident(), r.expr()) + list[i] = ir.NewStructKeyExpr(r.pos(), r.ident(), r.expr()) } return list } From d19018e8f1970e2232b35931546ef60cdc0734d1 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Wed, 23 Dec 2020 05:40:11 -0800 Subject: [PATCH 249/474] [dev.regabi] cmd/compile: split SliceHeaderExpr.LenCap into separate fields Passes toolstash -cmp. Change-Id: Ifc98a408c154a05997963e2c731466842ebbf50e Reviewed-on: https://go-review.googlesource.com/c/go/+/279958 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Trust: Matthew Dempsky Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/ir/expr.go | 8 ++++---- src/cmd/compile/internal/ir/fmt.go | 5 +---- src/cmd/compile/internal/ir/node_gen.go | 7 ++++--- src/cmd/compile/internal/ssagen/ssa.go | 4 ++-- src/cmd/compile/internal/typecheck/expr.go | 18 +++++------------- src/cmd/compile/internal/walk/builtin.go | 9 ++++++--- src/cmd/compile/internal/walk/expr.go | 4 ++-- 7 files changed, 24 insertions(+), 31 deletions(-) diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 640cc039546ec..d862a645d0eb6 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -695,16 +695,16 @@ func (o Op) IsSlice3() bool { // A SliceHeader expression constructs a slice header from its parts. type SliceHeaderExpr struct { miniExpr - Ptr Node - LenCap Nodes // TODO(rsc): Split into two Node fields + Ptr Node + Len Node + Cap Node } func NewSliceHeaderExpr(pos src.XPos, typ *types.Type, ptr, len, cap Node) *SliceHeaderExpr { - n := &SliceHeaderExpr{Ptr: ptr} + n := &SliceHeaderExpr{Ptr: ptr, Len: len, Cap: cap} n.pos = pos n.op = OSLICEHEADER n.typ = typ - n.LenCap = []Node{len, cap} return n } diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index 268290853910f..8cfc38a9ae499 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -800,10 +800,7 @@ func exprFmt(n Node, s fmt.State, prec int) { case OSLICEHEADER: n := n.(*SliceHeaderExpr) - if len(n.LenCap) != 2 { - base.Fatalf("bad OSLICEHEADER list length %d", len(n.LenCap)) - } - fmt.Fprintf(s, "sliceheader{%v,%v,%v}", n.Ptr, n.LenCap[0], n.LenCap[1]) + fmt.Fprintf(s, "sliceheader{%v,%v,%v}", n.Ptr, n.Len, n.Cap) case OCOMPLEX, OCOPY: n := n.(*BinaryExpr) diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index 89b1c0ba23a0f..d11e7bf9183b2 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -858,20 +858,21 @@ func (n *SliceHeaderExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *SliceHeaderExpr) copy() Node { c := *n c.init = c.init.Copy() - c.LenCap = c.LenCap.Copy() return &c } func (n *SliceHeaderExpr) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) err = maybeDo(n.Ptr, err, do) - err = maybeDoList(n.LenCap, err, do) + err = maybeDo(n.Len, err, do) + err = maybeDo(n.Cap, err, do) return err } func (n *SliceHeaderExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) n.Ptr = maybeEdit(n.Ptr, edit) - editList(n.LenCap, edit) + n.Len = maybeEdit(n.Len, edit) + n.Cap = maybeEdit(n.Cap, edit) } func (n *SliceType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index a77e57a5b6bac..6b2ba5a781a25 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -2844,8 +2844,8 @@ func (s *state) expr(n ir.Node) *ssa.Value { case ir.OSLICEHEADER: n := n.(*ir.SliceHeaderExpr) p := s.expr(n.Ptr) - l := s.expr(n.LenCap[0]) - c := s.expr(n.LenCap[1]) + l := s.expr(n.Len) + c := s.expr(n.Cap) return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c) case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR: diff --git a/src/cmd/compile/internal/typecheck/expr.go b/src/cmd/compile/internal/typecheck/expr.go index f940a2e73d913..00615c506c17e 100644 --- a/src/cmd/compile/internal/typecheck/expr.go +++ b/src/cmd/compile/internal/typecheck/expr.go @@ -924,30 +924,22 @@ func tcSliceHeader(n *ir.SliceHeaderExpr) ir.Node { base.Fatalf("need unsafe.Pointer for OSLICEHEADER") } - if x := len(n.LenCap); x != 2 { - base.Fatalf("expected 2 params (len, cap) for OSLICEHEADER, got %d", x) - } - n.Ptr = Expr(n.Ptr) - l := Expr(n.LenCap[0]) - c := Expr(n.LenCap[1]) - l = DefaultLit(l, types.Types[types.TINT]) - c = DefaultLit(c, types.Types[types.TINT]) + n.Len = DefaultLit(Expr(n.Len), types.Types[types.TINT]) + n.Cap = DefaultLit(Expr(n.Cap), types.Types[types.TINT]) - if ir.IsConst(l, constant.Int) && ir.Int64Val(l) < 0 { + if ir.IsConst(n.Len, constant.Int) && ir.Int64Val(n.Len) < 0 { base.Fatalf("len for OSLICEHEADER must be non-negative") } - if ir.IsConst(c, constant.Int) && ir.Int64Val(c) < 0 { + if ir.IsConst(n.Cap, constant.Int) && ir.Int64Val(n.Cap) < 0 { base.Fatalf("cap for OSLICEHEADER must be non-negative") } - if ir.IsConst(l, constant.Int) && ir.IsConst(c, constant.Int) && constant.Compare(l.Val(), token.GTR, c.Val()) { + if ir.IsConst(n.Len, constant.Int) && ir.IsConst(n.Cap, constant.Int) && constant.Compare(n.Len.Val(), token.GTR, n.Cap.Val()) { base.Fatalf("len larger than cap for OSLICEHEADER") } - n.LenCap[0] = l - n.LenCap[1] = c return n } diff --git a/src/cmd/compile/internal/walk/builtin.go b/src/cmd/compile/internal/walk/builtin.go index 61a555b773c87..63f7925863974 100644 --- a/src/cmd/compile/internal/walk/builtin.go +++ b/src/cmd/compile/internal/walk/builtin.go @@ -438,7 +438,8 @@ func walkMakeSlice(n *ir.MakeExpr, init *ir.Nodes) ir.Node { fn := typecheck.LookupRuntime(fnname) m.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), typecheck.Conv(len, argtype), typecheck.Conv(cap, argtype)) m.Ptr.MarkNonNil() - m.LenCap = []ir.Node{typecheck.Conv(len, types.Types[types.TINT]), typecheck.Conv(cap, types.Types[types.TINT])} + m.Len = typecheck.Conv(len, types.Types[types.TINT]) + m.Cap = typecheck.Conv(cap, types.Types[types.TINT]) return walkExpr(typecheck.Expr(m), init) } @@ -471,7 +472,8 @@ func walkMakeSliceCopy(n *ir.MakeExpr, init *ir.Nodes) ir.Node { sh := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil) sh.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, typecheck.NodNil(), ir.NewBool(false)) sh.Ptr.MarkNonNil() - sh.LenCap = []ir.Node{length, length} + sh.Len = length + sh.Cap = length sh.SetType(t) s := typecheck.Temp(t) @@ -493,7 +495,8 @@ func walkMakeSliceCopy(n *ir.MakeExpr, init *ir.Nodes) ir.Node { s := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil) s.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), length, copylen, typecheck.Conv(copyptr, types.Types[types.TUNSAFEPTR])) s.Ptr.MarkNonNil() - s.LenCap = []ir.Node{length, length} + s.Len = length + s.Cap = length s.SetType(t) return walkExpr(typecheck.Expr(s), init) } diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index 2029a6aef68d8..4f57962205b50 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -817,8 +817,8 @@ func walkSlice(n *ir.SliceExpr, init *ir.Nodes) ir.Node { // walkSliceHeader walks an OSLICEHEADER node. func walkSliceHeader(n *ir.SliceHeaderExpr, init *ir.Nodes) ir.Node { n.Ptr = walkExpr(n.Ptr, init) - n.LenCap[0] = walkExpr(n.LenCap[0], init) - n.LenCap[1] = walkExpr(n.LenCap[1], init) + n.Len = walkExpr(n.Len, init) + n.Cap = walkExpr(n.Cap, init) return n } From d1d64e4cea41bf908152e6a9c45980946e7825a2 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Wed, 23 Dec 2020 06:06:31 -0800 Subject: [PATCH 250/474] [dev.regabi] cmd/compile: split SliceExpr.List into separate fields Passes toolstash -cmp. Change-Id: I4e31154d04d99f2b80bec6a2c571a2a4a3f2ec99 Reviewed-on: https://go-review.googlesource.com/c/go/+/279959 Run-TryBot: Matthew Dempsky Reviewed-by: Cuong Manh Le TryBot-Result: Go Bot Trust: Matthew Dempsky --- src/cmd/compile/internal/escape/escape.go | 7 +-- src/cmd/compile/internal/ir/expr.go | 63 ++----------------- src/cmd/compile/internal/ir/fmt.go | 13 ++-- src/cmd/compile/internal/ir/node_gen.go | 9 ++- src/cmd/compile/internal/noder/noder.go | 11 ++-- src/cmd/compile/internal/ssagen/ssa.go | 24 ++++--- src/cmd/compile/internal/typecheck/expr.go | 22 +++---- src/cmd/compile/internal/typecheck/iexport.go | 8 +-- src/cmd/compile/internal/typecheck/iimport.go | 7 +-- src/cmd/compile/internal/walk/assign.go | 12 ++-- src/cmd/compile/internal/walk/builtin.go | 8 +-- src/cmd/compile/internal/walk/complit.go | 2 +- src/cmd/compile/internal/walk/convert.go | 2 +- src/cmd/compile/internal/walk/expr.go | 24 +++---- src/cmd/compile/internal/walk/order.go | 11 +--- 15 files changed, 72 insertions(+), 151 deletions(-) diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go index b7cb56b997d28..338b2e0680e26 100644 --- a/src/cmd/compile/internal/escape/escape.go +++ b/src/cmd/compile/internal/escape/escape.go @@ -559,10 +559,9 @@ func (e *escape) exprSkipInit(k hole, n ir.Node) { case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR: n := n.(*ir.SliceExpr) e.expr(k.note(n, "slice"), n.X) - low, high, max := n.SliceBounds() - e.discard(low) - e.discard(high) - e.discard(max) + e.discard(n.Low) + e.discard(n.High) + e.discard(n.Max) case ir.OCONV, ir.OCONVNOP: n := n.(*ir.ConvExpr) diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index d862a645d0eb6..467596609028c 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -605,11 +605,13 @@ func (*SelectorExpr) CanBeNtype() {} type SliceExpr struct { miniExpr X Node - List Nodes // TODO(rsc): Use separate Nodes + Low Node + High Node + Max Node } -func NewSliceExpr(pos src.XPos, op Op, x Node) *SliceExpr { - n := &SliceExpr{X: x} +func NewSliceExpr(pos src.XPos, op Op, x, low, high, max Node) *SliceExpr { + n := &SliceExpr{X: x, Low: low, High: high, Max: max} n.pos = pos n.op = op return n @@ -624,61 +626,6 @@ func (n *SliceExpr) SetOp(op Op) { } } -// SliceBounds returns n's slice bounds: low, high, and max in expr[low:high:max]. -// n must be a slice expression. max is nil if n is a simple slice expression. -func (n *SliceExpr) SliceBounds() (low, high, max Node) { - if len(n.List) == 0 { - return nil, nil, nil - } - - switch n.Op() { - case OSLICE, OSLICEARR, OSLICESTR: - s := n.List - return s[0], s[1], nil - case OSLICE3, OSLICE3ARR: - s := n.List - return s[0], s[1], s[2] - } - base.Fatalf("SliceBounds op %v: %v", n.Op(), n) - return nil, nil, nil -} - -// SetSliceBounds sets n's slice bounds, where n is a slice expression. -// n must be a slice expression. If max is non-nil, n must be a full slice expression. -func (n *SliceExpr) SetSliceBounds(low, high, max Node) { - switch n.Op() { - case OSLICE, OSLICEARR, OSLICESTR: - if max != nil { - base.Fatalf("SetSliceBounds %v given three bounds", n.Op()) - } - s := n.List - if s == nil { - if low == nil && high == nil { - return - } - n.List = []Node{low, high} - return - } - s[0] = low - s[1] = high - return - case OSLICE3, OSLICE3ARR: - s := n.List - if s == nil { - if low == nil && high == nil && max == nil { - return - } - n.List = []Node{low, high, max} - return - } - s[0] = low - s[1] = high - s[2] = max - return - } - base.Fatalf("SetSliceBounds op %v: %v", n.Op(), n) -} - // IsSlice3 reports whether o is a slice3 op (OSLICE3, OSLICE3ARR). // o must be a slicing op. func (o Op) IsSlice3() bool { diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index 8cfc38a9ae499..b882979aa411a 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -782,18 +782,17 @@ func exprFmt(n Node, s fmt.State, prec int) { n := n.(*SliceExpr) exprFmt(n.X, s, nprec) fmt.Fprint(s, "[") - low, high, max := n.SliceBounds() - if low != nil { - fmt.Fprint(s, low) + if n.Low != nil { + fmt.Fprint(s, n.Low) } fmt.Fprint(s, ":") - if high != nil { - fmt.Fprint(s, high) + if n.High != nil { + fmt.Fprint(s, n.High) } if n.Op().IsSlice3() { fmt.Fprint(s, ":") - if max != nil { - fmt.Fprint(s, max) + if n.Max != nil { + fmt.Fprint(s, n.Max) } } fmt.Fprint(s, "]") diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index d11e7bf9183b2..23205b61feaef 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -838,20 +838,23 @@ func (n *SliceExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *SliceExpr) copy() Node { c := *n c.init = c.init.Copy() - c.List = c.List.Copy() return &c } func (n *SliceExpr) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) err = maybeDo(n.X, err, do) - err = maybeDoList(n.List, err, do) + err = maybeDo(n.Low, err, do) + err = maybeDo(n.High, err, do) + err = maybeDo(n.Max, err, do) return err } func (n *SliceExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) n.X = maybeEdit(n.X, edit) - editList(n.List, edit) + n.Low = maybeEdit(n.Low, edit) + n.High = maybeEdit(n.High, edit) + n.Max = maybeEdit(n.Max, edit) } func (n *SliceHeaderExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go index c73e2d7fc5384..4789740bd1822 100644 --- a/src/cmd/compile/internal/noder/noder.go +++ b/src/cmd/compile/internal/noder/noder.go @@ -682,15 +682,14 @@ func (p *noder) expr(expr syntax.Expr) ir.Node { if expr.Full { op = ir.OSLICE3 } - n := ir.NewSliceExpr(p.pos(expr), op, p.expr(expr.X)) + x := p.expr(expr.X) var index [3]ir.Node - for i, x := range &expr.Index { - if x != nil { - index[i] = p.expr(x) + for i, n := range &expr.Index { + if n != nil { + index[i] = p.expr(n) } } - n.SetSliceBounds(index[0], index[1], index[2]) - return n + return ir.NewSliceExpr(p.pos(expr), op, x, index[0], index[1], index[2]) case *syntax.AssertExpr: return ir.NewTypeAssertExpr(p.pos(expr), p.expr(expr.X), p.typeExpr(expr.Type)) case *syntax.Operation: diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 6b2ba5a781a25..cf683e578d278 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -1367,7 +1367,7 @@ func (s *state) stmt(n ir.Node) { // We're assigning a slicing operation back to its source. // Don't write back fields we aren't changing. See issue #14855. rhs := rhs.(*ir.SliceExpr) - i, j, k := rhs.SliceBounds() + i, j, k := rhs.Low, rhs.High, rhs.Max if i != nil && (i.Op() == ir.OLITERAL && i.Val().Kind() == constant.Int && ir.Int64Val(i) == 0) { // [0:...] is the same as [:...] i = nil @@ -2852,15 +2852,14 @@ func (s *state) expr(n ir.Node) *ssa.Value { n := n.(*ir.SliceExpr) v := s.expr(n.X) var i, j, k *ssa.Value - low, high, max := n.SliceBounds() - if low != nil { - i = s.expr(low) + if n.Low != nil { + i = s.expr(n.Low) } - if high != nil { - j = s.expr(high) + if n.High != nil { + j = s.expr(n.High) } - if max != nil { - k = s.expr(max) + if n.Max != nil { + k = s.expr(n.Max) } p, l, c := s.slice(v, i, j, k, n.Bounded()) return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c) @@ -2869,12 +2868,11 @@ func (s *state) expr(n ir.Node) *ssa.Value { n := n.(*ir.SliceExpr) v := s.expr(n.X) var i, j *ssa.Value - low, high, _ := n.SliceBounds() - if low != nil { - i = s.expr(low) + if n.Low != nil { + i = s.expr(n.Low) } - if high != nil { - j = s.expr(high) + if n.High != nil { + j = s.expr(n.High) } p, l, _ := s.slice(v, i, j, nil, n.Bounded()) return s.newValue2(ssa.OpStringMake, n.Type(), p, l) diff --git a/src/cmd/compile/internal/typecheck/expr.go b/src/cmd/compile/internal/typecheck/expr.go index 00615c506c17e..6bbb68550e6cc 100644 --- a/src/cmd/compile/internal/typecheck/expr.go +++ b/src/cmd/compile/internal/typecheck/expr.go @@ -831,17 +831,11 @@ func tcSPtr(n *ir.UnaryExpr) ir.Node { // tcSlice typechecks an OSLICE or OSLICE3 node. func tcSlice(n *ir.SliceExpr) ir.Node { - n.X = Expr(n.X) - low, high, max := n.SliceBounds() + n.X = DefaultLit(Expr(n.X), nil) + n.Low = indexlit(Expr(n.Low)) + n.High = indexlit(Expr(n.High)) + n.Max = indexlit(Expr(n.Max)) hasmax := n.Op().IsSlice3() - low = Expr(low) - high = Expr(high) - max = Expr(max) - n.X = DefaultLit(n.X, nil) - low = indexlit(low) - high = indexlit(high) - max = indexlit(max) - n.SetSliceBounds(low, high, max) l := n.X if l.Type() == nil { n.SetType(nil) @@ -886,19 +880,19 @@ func tcSlice(n *ir.SliceExpr) ir.Node { return n } - if low != nil && !checksliceindex(l, low, tp) { + if n.Low != nil && !checksliceindex(l, n.Low, tp) { n.SetType(nil) return n } - if high != nil && !checksliceindex(l, high, tp) { + if n.High != nil && !checksliceindex(l, n.High, tp) { n.SetType(nil) return n } - if max != nil && !checksliceindex(l, max, tp) { + if n.Max != nil && !checksliceindex(l, n.Max, tp) { n.SetType(nil) return n } - if !checksliceconst(low, high) || !checksliceconst(low, max) || !checksliceconst(high, max) { + if !checksliceconst(n.Low, n.High) || !checksliceconst(n.Low, n.Max) || !checksliceconst(n.High, n.Max) { n.SetType(nil) return n } diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go index 8ac791c0369a0..365e4315bc5e9 100644 --- a/src/cmd/compile/internal/typecheck/iexport.go +++ b/src/cmd/compile/internal/typecheck/iexport.go @@ -1370,17 +1370,15 @@ func (w *exportWriter) expr(n ir.Node) { w.op(ir.OSLICE) w.pos(n.Pos()) w.expr(n.X) - low, high, _ := n.SliceBounds() - w.exprsOrNil(low, high) + w.exprsOrNil(n.Low, n.High) case ir.OSLICE3, ir.OSLICE3ARR: n := n.(*ir.SliceExpr) w.op(ir.OSLICE3) w.pos(n.Pos()) w.expr(n.X) - low, high, max := n.SliceBounds() - w.exprsOrNil(low, high) - w.expr(max) + w.exprsOrNil(n.Low, n.High) + w.expr(n.Max) case ir.OCOPY, ir.OCOMPLEX: // treated like other builtin calls (see e.g., OREAL) diff --git a/src/cmd/compile/internal/typecheck/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go index c4d840d2ac218..cc8646977d741 100644 --- a/src/cmd/compile/internal/typecheck/iimport.go +++ b/src/cmd/compile/internal/typecheck/iimport.go @@ -902,14 +902,13 @@ func (r *importReader) node() ir.Node { return ir.NewIndexExpr(r.pos(), r.expr(), r.expr()) case ir.OSLICE, ir.OSLICE3: - n := ir.NewSliceExpr(r.pos(), op, r.expr()) + pos, x := r.pos(), r.expr() low, high := r.exprsOrNil() var max ir.Node - if n.Op().IsSlice3() { + if op.IsSlice3() { max = r.expr() } - n.SetSliceBounds(low, high, max) - return n + return ir.NewSliceExpr(pos, op, x, low, high, max) // case OCONV, OCONVIFACE, OCONVNOP, OBYTES2STR, ORUNES2STR, OSTR2BYTES, OSTR2RUNES, ORUNESTR: // unreachable - mapped to OCONV case below by exporter diff --git a/src/cmd/compile/internal/walk/assign.go b/src/cmd/compile/internal/walk/assign.go index 6b0e2b272c730..99c1abd73f604 100644 --- a/src/cmd/compile/internal/walk/assign.go +++ b/src/cmd/compile/internal/walk/assign.go @@ -700,17 +700,15 @@ func appendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node { nodes.Append(nif) // s = s[:n] - nt := ir.NewSliceExpr(base.Pos, ir.OSLICE, s) - nt.SetSliceBounds(nil, nn, nil) + nt := ir.NewSliceExpr(base.Pos, ir.OSLICE, s, nil, nn, nil) nt.SetBounded(true) nodes.Append(ir.NewAssignStmt(base.Pos, s, nt)) var ncopy ir.Node if elemtype.HasPointers() { // copy(s[len(l1):], l2) - slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, s) + slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, s, ir.NewUnaryExpr(base.Pos, ir.OLEN, l1), nil, nil) slice.SetType(s.Type()) - slice.SetSliceBounds(ir.NewUnaryExpr(base.Pos, ir.OLEN, l1), nil, nil) ir.CurFunc.SetWBPos(n.Pos()) @@ -724,9 +722,8 @@ func appendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node { // rely on runtime to instrument: // copy(s[len(l1):], l2) // l2 can be a slice or string. - slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, s) + slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, s, ir.NewUnaryExpr(base.Pos, ir.OLEN, l1), nil, nil) slice.SetType(s.Type()) - slice.SetSliceBounds(ir.NewUnaryExpr(base.Pos, ir.OLEN, l1), nil, nil) ptr1, len1 := backingArrayPtrLen(cheapExpr(slice, &nodes)) ptr2, len2 := backingArrayPtrLen(l2) @@ -870,8 +867,7 @@ func extendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node { nodes = append(nodes, nif) // s = s[:n] - nt := ir.NewSliceExpr(base.Pos, ir.OSLICE, s) - nt.SetSliceBounds(nil, nn, nil) + nt := ir.NewSliceExpr(base.Pos, ir.OSLICE, s, nil, nn, nil) nt.SetBounded(true) nodes = append(nodes, ir.NewAssignStmt(base.Pos, s, nt)) diff --git a/src/cmd/compile/internal/walk/builtin.go b/src/cmd/compile/internal/walk/builtin.go index 63f7925863974..fe6045cbbd1d9 100644 --- a/src/cmd/compile/internal/walk/builtin.go +++ b/src/cmd/compile/internal/walk/builtin.go @@ -95,8 +95,7 @@ func walkAppend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node { nn := typecheck.Temp(types.Types[types.TINT]) l = append(l, ir.NewAssignStmt(base.Pos, nn, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns))) // n = len(s) - slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, ns) // ...s[:n+argc] - slice.SetSliceBounds(nil, ir.NewBinaryExpr(base.Pos, ir.OADD, nn, na), nil) + slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, ns, nil, ir.NewBinaryExpr(base.Pos, ir.OADD, nn, na), nil) // ...s[:n+argc] slice.SetBounded(true) l = append(l, ir.NewAssignStmt(base.Pos, ns, slice)) // s = s[:n+argc] @@ -407,9 +406,8 @@ func walkMakeSlice(n *ir.MakeExpr, init *ir.Nodes) ir.Node { t = types.NewArray(t.Elem(), i) // [r]T var_ := typecheck.Temp(t) - appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, nil)) // zero temp - r := ir.NewSliceExpr(base.Pos, ir.OSLICE, var_) // arr[:l] - r.SetSliceBounds(nil, l, nil) + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, nil)) // zero temp + r := ir.NewSliceExpr(base.Pos, ir.OSLICE, var_, nil, l, nil) // arr[:l] // The conv is necessary in case n.Type is named. return walkExpr(typecheck.Expr(typecheck.Conv(r, n.Type())), init) } diff --git a/src/cmd/compile/internal/walk/complit.go b/src/cmd/compile/internal/walk/complit.go index 6fbbee92846b8..b53fe2e935abf 100644 --- a/src/cmd/compile/internal/walk/complit.go +++ b/src/cmd/compile/internal/walk/complit.go @@ -425,7 +425,7 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) } // make slice out of heap (6) - a = ir.NewAssignStmt(base.Pos, var_, ir.NewSliceExpr(base.Pos, ir.OSLICE, vauto)) + a = ir.NewAssignStmt(base.Pos, var_, ir.NewSliceExpr(base.Pos, ir.OSLICE, vauto, nil, nil, nil)) a = typecheck.Stmt(a) a = orderStmtInPlace(a, map[string][]*ir.Name{}) diff --git a/src/cmd/compile/internal/walk/convert.go b/src/cmd/compile/internal/walk/convert.go index 21426c9817388..fd954d611366d 100644 --- a/src/cmd/compile/internal/walk/convert.go +++ b/src/cmd/compile/internal/walk/convert.go @@ -260,7 +260,7 @@ func walkStringToBytes(n *ir.ConvExpr, init *ir.Nodes) ir.Node { } // Slice the [n]byte to a []byte. - slice := ir.NewSliceExpr(n.Pos(), ir.OSLICEARR, p) + slice := ir.NewSliceExpr(n.Pos(), ir.OSLICEARR, p, nil, nil, nil) slice.SetType(n.Type()) slice.SetTypecheck(1) return walkExpr(slice, init) diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index 4f57962205b50..658a579fdaccd 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -786,21 +786,19 @@ func walkSlice(n *ir.SliceExpr, init *ir.Nodes) ir.Node { n.X = walkExpr(n.X, init) } - low, high, max := n.SliceBounds() - low = walkExpr(low, init) - if low != nil && ir.IsZero(low) { + n.Low = walkExpr(n.Low, init) + if n.Low != nil && ir.IsZero(n.Low) { // Reduce x[0:j] to x[:j] and x[0:j:k] to x[:j:k]. - low = nil + n.Low = nil } - high = walkExpr(high, init) - max = walkExpr(max, init) - n.SetSliceBounds(low, high, max) + n.High = walkExpr(n.High, init) + n.Max = walkExpr(n.Max, init) if checkSlice { - n.X = walkCheckPtrAlignment(n.X.(*ir.ConvExpr), init, max) + n.X = walkCheckPtrAlignment(n.X.(*ir.ConvExpr), init, n.Max) } if n.Op().IsSlice3() { - if max != nil && max.Op() == ir.OCAP && ir.SameSafeExpr(n.X, max.(*ir.UnaryExpr).X) { + if n.Max != nil && n.Max.Op() == ir.OCAP && ir.SameSafeExpr(n.X, n.Max.(*ir.UnaryExpr).X) { // Reduce x[i:j:cap(x)] to x[i:j]. if n.Op() == ir.OSLICE3 { n.SetOp(ir.OSLICE) @@ -824,13 +822,11 @@ func walkSliceHeader(n *ir.SliceHeaderExpr, init *ir.Nodes) ir.Node { // TODO(josharian): combine this with its caller and simplify func reduceSlice(n *ir.SliceExpr) ir.Node { - low, high, max := n.SliceBounds() - if high != nil && high.Op() == ir.OLEN && ir.SameSafeExpr(n.X, high.(*ir.UnaryExpr).X) { + if n.High != nil && n.High.Op() == ir.OLEN && ir.SameSafeExpr(n.X, n.High.(*ir.UnaryExpr).X) { // Reduce x[i:len(x)] to x[i:]. - high = nil + n.High = nil } - n.SetSliceBounds(low, high, max) - if (n.Op() == ir.OSLICE || n.Op() == ir.OSLICESTR) && low == nil && high == nil { + if (n.Op() == ir.OSLICE || n.Op() == ir.OSLICESTR) && n.Low == nil && n.High == nil { // Reduce x[:] to x. if base.Debug.Slice > 0 { base.Warn("slice: omit slice operation") diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go index 03310a50c6395..de6a3807e6f25 100644 --- a/src/cmd/compile/internal/walk/order.go +++ b/src/cmd/compile/internal/walk/order.go @@ -1296,14 +1296,9 @@ func (o *orderState) expr1(n, lhs ir.Node) ir.Node { case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR: n := n.(*ir.SliceExpr) n.X = o.expr(n.X, nil) - low, high, max := n.SliceBounds() - low = o.expr(low, nil) - low = o.cheapExpr(low) - high = o.expr(high, nil) - high = o.cheapExpr(high) - max = o.expr(max, nil) - max = o.cheapExpr(max) - n.SetSliceBounds(low, high, max) + n.Low = o.cheapExpr(o.expr(n.Low, nil)) + n.High = o.cheapExpr(o.expr(n.High, nil)) + n.Max = o.cheapExpr(o.expr(n.Max, nil)) if lhs == nil || lhs.Op() != ir.ONAME && !ir.SameSafeExpr(lhs, n.X) { return o.copyExpr(n) } From 9eeed291bcfbf6de4d64abd39eb1eb66cdf9fbb2 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Wed, 23 Dec 2020 20:29:28 +0700 Subject: [PATCH 251/474] [dev.regabi] cmd/compile: eliminate usage of ir.Node in liveness All function parameters and return values in liveness have explicit *ir.Name type, so use it directly instead of casting from ir.Node. While at it, rename "affectedNode" to "affectedVar" to reflect this change. Passes buildall w/ toolstash -cmp. Change-Id: Id927e817a92ddb551a029064a2a54e020ca27074 Reviewed-on: https://go-review.googlesource.com/c/go/+/279434 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/liveness/plive.go | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/src/cmd/compile/internal/liveness/plive.go b/src/cmd/compile/internal/liveness/plive.go index 785a3a29deba7..cf4debb795966 100644 --- a/src/cmd/compile/internal/liveness/plive.go +++ b/src/cmd/compile/internal/liveness/plive.go @@ -179,11 +179,7 @@ type progeffectscache struct { // nor do we care about non-local variables, // nor do we care about empty structs (handled by the pointer check), // nor do we care about the fake PAUTOHEAP variables. -func ShouldTrack(nn ir.Node) bool { - if nn.Op() != ir.ONAME { - return false - } - n := nn.(*ir.Name) +func ShouldTrack(n *ir.Name) bool { return (n.Class_ == ir.PAUTO || n.Class_ == ir.PPARAM || n.Class_ == ir.PPARAMOUT) && n.Type().HasPointers() } @@ -248,19 +244,17 @@ const ( // liveness effects v has on that variable. // If v does not affect any tracked variables, it returns -1, 0. func (lv *liveness) valueEffects(v *ssa.Value) (int32, liveEffect) { - n, e := affectedNode(v) - if e == 0 || n == nil || n.Op() != ir.ONAME { // cheapest checks first + n, e := affectedVar(v) + if e == 0 || n == nil { // cheapest checks first return -1, 0 } - - nn := n.(*ir.Name) // AllocFrame has dropped unused variables from // lv.fn.Func.Dcl, but they might still be referenced by // OpVarFoo pseudo-ops. Ignore them to prevent "lost track of // variable" ICEs (issue 19632). switch v.Op { case ssa.OpVarDef, ssa.OpVarKill, ssa.OpVarLive, ssa.OpKeepAlive: - if !nn.Name().Used() { + if !n.Name().Used() { return -1, 0 } } @@ -283,14 +277,14 @@ func (lv *liveness) valueEffects(v *ssa.Value) (int32, liveEffect) { return -1, 0 } - if pos, ok := lv.idx[nn]; ok { + if pos, ok := lv.idx[n]; ok { return pos, effect } return -1, 0 } -// affectedNode returns the *Node affected by v -func affectedNode(v *ssa.Value) (ir.Node, ssa.SymEffect) { +// affectedVar returns the *ir.Name node affected by v +func affectedVar(v *ssa.Value) (*ir.Name, ssa.SymEffect) { // Special cases. switch v.Op { case ssa.OpLoadReg: From 40818038bf513405bc988678a297a5a6d24f6513 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Wed, 23 Dec 2020 06:59:16 -0800 Subject: [PATCH 252/474] [dev.regabi] cmd/compile: change CaseStmt.Vars to Var There's only ever one variable implicitly declared by a CaseStmt. It's only a slice because we previous used Rlist for this. Passes toolstash -cmp. Change-Id: Idf747f3ec6dfbbe4e94d60546ba04a81754df3fe Reviewed-on: https://go-review.googlesource.com/c/go/+/280012 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/escape/escape.go | 2 +- src/cmd/compile/internal/ir/node_gen.go | 5 ++--- src/cmd/compile/internal/ir/stmt.go | 2 +- src/cmd/compile/internal/noder/noder.go | 2 +- src/cmd/compile/internal/typecheck/iexport.go | 2 +- src/cmd/compile/internal/typecheck/iimport.go | 2 +- src/cmd/compile/internal/typecheck/stmt.go | 6 +++--- src/cmd/compile/internal/walk/switch.go | 5 +---- 8 files changed, 11 insertions(+), 15 deletions(-) diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go index 338b2e0680e26..7a52ff3b88ae8 100644 --- a/src/cmd/compile/internal/escape/escape.go +++ b/src/cmd/compile/internal/escape/escape.go @@ -373,7 +373,7 @@ func (e *escape) stmt(n ir.Node) { for _, cas := range n.Cases { // cases cas := cas.(*ir.CaseStmt) if typesw && n.Tag.(*ir.TypeSwitchGuard).Tag != nil { - cv := cas.Vars[0] + cv := cas.Var k := e.dcl(cv) // type switch variables have no ODCL. if cv.Type().HasPointers() { ks = append(ks, k.dotType(cv.Type(), cas, "switch case")) diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index 23205b61feaef..7d3488f3fd89b 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -230,7 +230,6 @@ func (n *CaseStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *CaseStmt) copy() Node { c := *n c.init = c.init.Copy() - c.Vars = c.Vars.Copy() c.List = c.List.Copy() c.Body = c.Body.Copy() return &c @@ -238,7 +237,7 @@ func (n *CaseStmt) copy() Node { func (n *CaseStmt) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) - err = maybeDoList(n.Vars, err, do) + err = maybeDo(n.Var, err, do) err = maybeDoList(n.List, err, do) err = maybeDo(n.Comm, err, do) err = maybeDoList(n.Body, err, do) @@ -246,7 +245,7 @@ func (n *CaseStmt) doChildren(do func(Node) error) error { } func (n *CaseStmt) editChildren(edit func(Node) Node) { editList(n.init, edit) - editList(n.Vars, edit) + n.Var = maybeEdit(n.Var, edit) editList(n.List, edit) n.Comm = maybeEdit(n.Comm, edit) editList(n.Body, edit) diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index ad6db436a7f6d..c9988eba5cf82 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -176,7 +176,7 @@ func (n *BranchStmt) Sym() *types.Sym { return n.Label } // A CaseStmt is a case statement in a switch or select: case List: Body. type CaseStmt struct { miniStmt - Vars Nodes // declared variable for this case in type switch + Var Node // declared variable for this case in type switch List Nodes // list of expressions for switch, early select Comm Node // communication case (Exprs[0]) after select is type-checked Body Nodes diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go index 4789740bd1822..68a01612dc33f 100644 --- a/src/cmd/compile/internal/noder/noder.go +++ b/src/cmd/compile/internal/noder/noder.go @@ -1217,7 +1217,7 @@ func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.TypeSwitch if tswitch != nil && tswitch.Tag != nil { nn := typecheck.NewName(tswitch.Tag.Sym()) typecheck.Declare(nn, typecheck.DeclContext) - n.Vars = []ir.Node{nn} + n.Var = nn // keep track of the instances for reporting unused nn.Defn = tswitch } diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go index 365e4315bc5e9..4cb943daaf6f0 100644 --- a/src/cmd/compile/internal/typecheck/iexport.go +++ b/src/cmd/compile/internal/typecheck/iexport.go @@ -1196,7 +1196,7 @@ func (w *exportWriter) caseList(cases []ir.Node, namedTypeSwitch bool) { w.pos(cas.Pos()) w.stmtList(cas.List) if namedTypeSwitch { - w.localName(cas.Vars[0].(*ir.Name)) + w.localName(cas.Var.(*ir.Name)) } w.stmtList(cas.Body) } diff --git a/src/cmd/compile/internal/typecheck/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go index cc8646977d741..221229571c501 100644 --- a/src/cmd/compile/internal/typecheck/iimport.go +++ b/src/cmd/compile/internal/typecheck/iimport.go @@ -780,7 +780,7 @@ func (r *importReader) caseList(switchExpr ir.Node) []ir.Node { // Sym for diagnostics anyway. caseVar := ir.NewNameAt(cas.Pos(), r.ident()) Declare(caseVar, DeclContext) - cas.Vars = []ir.Node{caseVar} + cas.Var = caseVar caseVar.Defn = switchExpr } cas.Body.Set(r.stmtList()) diff --git a/src/cmd/compile/internal/typecheck/stmt.go b/src/cmd/compile/internal/typecheck/stmt.go index bf3801eea274c..133f93e53b35d 100644 --- a/src/cmd/compile/internal/typecheck/stmt.go +++ b/src/cmd/compile/internal/typecheck/stmt.go @@ -694,7 +694,7 @@ func tcSwitchType(n *ir.SwitchStmt) { ts.add(ncase.Pos(), n1.Type()) } - if len(ncase.Vars) != 0 { + if ncase.Var != nil { // Assign the clause variable's type. vt := t if len(ls) == 1 { @@ -707,7 +707,7 @@ func tcSwitchType(n *ir.SwitchStmt) { } } - nvar := ncase.Vars[0] + nvar := ncase.Var nvar.SetType(vt) if vt != nil { nvar = AssignExpr(nvar) @@ -716,7 +716,7 @@ func tcSwitchType(n *ir.SwitchStmt) { nvar.SetTypecheck(1) nvar.SetWalkdef(1) } - ncase.Vars[0] = nvar + ncase.Var = nvar } Stmts(ncase.Body) diff --git a/src/cmd/compile/internal/walk/switch.go b/src/cmd/compile/internal/walk/switch.go index 360086ec79723..7829d93373956 100644 --- a/src/cmd/compile/internal/walk/switch.go +++ b/src/cmd/compile/internal/walk/switch.go @@ -334,10 +334,7 @@ func walkSwitchType(sw *ir.SwitchStmt) { var body ir.Nodes for _, ncase := range sw.Cases { ncase := ncase.(*ir.CaseStmt) - var caseVar ir.Node - if len(ncase.Vars) != 0 { - caseVar = ncase.Vars[0] - } + caseVar := ncase.Var // For single-type cases with an interface type, // we initialize the case variable as part of the type assertion. From 27b248b307e6db463930231a7820d5335424c04e Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Thu, 24 Dec 2020 13:09:20 +0700 Subject: [PATCH 253/474] [dev.regabi] cmd/compile: separate range stmt Vars to Key, Value nodes Passes buildall w/ toolstash -cmp. Change-Id: I9738fcabc8ebf3afa34d102afadf1b474b50db35 Reviewed-on: https://go-review.googlesource.com/c/go/+/279435 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le Reviewed-by: Matthew Dempsky TryBot-Result: Go Bot --- src/cmd/compile/internal/escape/escape.go | 18 ++-- src/cmd/compile/internal/ir/fmt.go | 13 ++- src/cmd/compile/internal/ir/node_gen.go | 7 +- src/cmd/compile/internal/ir/stmt.go | 11 +-- src/cmd/compile/internal/noder/noder.go | 8 +- src/cmd/compile/internal/typecheck/iexport.go | 2 +- src/cmd/compile/internal/typecheck/iimport.go | 4 +- src/cmd/compile/internal/typecheck/stmt.go | 91 +++++++------------ src/cmd/compile/internal/walk/order.go | 5 +- src/cmd/compile/internal/walk/range.go | 18 +--- 10 files changed, 73 insertions(+), 104 deletions(-) diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go index 7a52ff3b88ae8..31d157b1651fc 100644 --- a/src/cmd/compile/internal/escape/escape.go +++ b/src/cmd/compile/internal/escape/escape.go @@ -347,21 +347,19 @@ func (e *escape) stmt(n ir.Node) { e.loopDepth-- case ir.ORANGE: - // for List = range Right { Nbody } + // for Key, Value = range X { Body } n := n.(*ir.RangeStmt) e.loopDepth++ - ks := e.addrs(n.Vars) + e.addr(n.Key) + k := e.addr(n.Value) e.block(n.Body) e.loopDepth-- - // Right is evaluated outside the loop. - k := e.discardHole() - if len(ks) >= 2 { - if n.X.Type().IsArray() { - k = ks[1].note(n, "range") - } else { - k = ks[1].deref(n, "range-deref") - } + // X is evaluated outside the loop. + if n.X.Type().IsArray() { + k = k.note(n, "range") + } else { + k = k.deref(n, "range-deref") } e.expr(e.later(k), n.X) diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index b882979aa411a..2b73c5ac1bb4a 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -444,12 +444,15 @@ func stmtFmt(n Node, s fmt.State) { break } - if len(n.Vars) == 0 { - fmt.Fprintf(s, "for range %v { %v }", n.X, n.Body) - break + fmt.Fprint(s, "for") + if n.Key != nil { + fmt.Fprintf(s, " %v", n.Key) + if n.Value != nil { + fmt.Fprintf(s, ", %v", n.Value) + } + fmt.Fprint(s, " =") } - - fmt.Fprintf(s, "for %.v = range %v { %v }", n.Vars, n.X, n.Body) + fmt.Fprintf(s, " range %v { %v }", n.X, n.Body) case OSELECT: n := n.(*SelectStmt) diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index 7d3488f3fd89b..ecb39563c46f3 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -724,22 +724,23 @@ func (n *RangeStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *RangeStmt) copy() Node { c := *n c.init = c.init.Copy() - c.Vars = c.Vars.Copy() c.Body = c.Body.Copy() return &c } func (n *RangeStmt) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) - err = maybeDoList(n.Vars, err, do) err = maybeDo(n.X, err, do) + err = maybeDo(n.Key, err, do) + err = maybeDo(n.Value, err, do) err = maybeDoList(n.Body, err, do) return err } func (n *RangeStmt) editChildren(edit func(Node) Node) { editList(n.init, edit) - editList(n.Vars, edit) n.X = maybeEdit(n.X, edit) + n.Key = maybeEdit(n.Key, edit) + n.Value = maybeEdit(n.Value, edit) editList(n.Body, edit) } diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index c9988eba5cf82..453153c0245c2 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -290,25 +290,24 @@ func NewLabelStmt(pos src.XPos, label *types.Sym) *LabelStmt { func (n *LabelStmt) Sym() *types.Sym { return n.Label } -// A RangeStmt is a range loop: for Vars = range X { Stmts } -// Op can be OFOR or OFORUNTIL (!Cond). +// A RangeStmt is a range loop: for Key, Value = range X { Body } type RangeStmt struct { miniStmt Label *types.Sym - Vars Nodes // TODO(rsc): Replace with Key, Value Node Def bool X Node + Key Node + Value Node Body Nodes HasBreak bool typ *types.Type // TODO(rsc): Remove - use X.Type() instead Prealloc *Name } -func NewRangeStmt(pos src.XPos, vars []Node, x Node, body []Node) *RangeStmt { - n := &RangeStmt{X: x} +func NewRangeStmt(pos src.XPos, key, value, x Node, body []Node) *RangeStmt { + n := &RangeStmt{X: x, Key: key, Value: value} n.pos = pos n.op = ORANGE - n.Vars.Set(vars) n.Body.Set(body) return n } diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go index 68a01612dc33f..ad66b6c8509ca 100644 --- a/src/cmd/compile/internal/noder/noder.go +++ b/src/cmd/compile/internal/noder/noder.go @@ -1172,10 +1172,14 @@ func (p *noder) forStmt(stmt *syntax.ForStmt) ir.Node { panic("unexpected RangeClause") } - n := ir.NewRangeStmt(p.pos(r), nil, p.expr(r.X), nil) + n := ir.NewRangeStmt(p.pos(r), nil, nil, p.expr(r.X), nil) if r.Lhs != nil { n.Def = r.Def - n.Vars.Set(p.assignList(r.Lhs, n, n.Def)) + lhs := p.assignList(r.Lhs, n, n.Def) + n.Key = lhs[0] + if len(lhs) > 1 { + n.Value = lhs[1] + } } n.Body.Set(p.blockStmt(stmt.Body)) p.closeAnotherScope() diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go index 4cb943daaf6f0..449d99266d365 100644 --- a/src/cmd/compile/internal/typecheck/iexport.go +++ b/src/cmd/compile/internal/typecheck/iexport.go @@ -1143,7 +1143,7 @@ func (w *exportWriter) stmt(n ir.Node) { n := n.(*ir.RangeStmt) w.op(ir.ORANGE) w.pos(n.Pos()) - w.stmtList(n.Vars) + w.exprsOrNil(n.Key, n.Value) w.expr(n.X) w.stmtList(n.Body) diff --git a/src/cmd/compile/internal/typecheck/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go index 221229571c501..8285c418e9f8a 100644 --- a/src/cmd/compile/internal/typecheck/iimport.go +++ b/src/cmd/compile/internal/typecheck/iimport.go @@ -1028,7 +1028,9 @@ func (r *importReader) node() ir.Node { return ir.NewForStmt(pos, init, cond, post, r.stmtList()) case ir.ORANGE: - return ir.NewRangeStmt(r.pos(), r.stmtList(), r.expr(), r.stmtList()) + pos := r.pos() + k, v := r.exprsOrNil() + return ir.NewRangeStmt(pos, k, v, r.expr(), r.stmtList()) case ir.OSELECT: pos := r.pos() diff --git a/src/cmd/compile/internal/typecheck/stmt.go b/src/cmd/compile/internal/typecheck/stmt.go index 133f93e53b35d..dfa224b318d87 100644 --- a/src/cmd/compile/internal/typecheck/stmt.go +++ b/src/cmd/compile/internal/typecheck/stmt.go @@ -19,19 +19,18 @@ func typecheckrangeExpr(n *ir.RangeStmt) { return } // delicate little dance. see typecheckas2 - ls := n.Vars - for i1, n1 := range ls { - if !ir.DeclaredBy(n1, n) { - ls[i1] = AssignExpr(ls[i1]) - } + if n.Key != nil && !ir.DeclaredBy(n.Key, n) { + n.Key = AssignExpr(n.Key) + } + if n.Value != nil && !ir.DeclaredBy(n.Value, n) { + n.Value = AssignExpr(n.Value) } - if t.IsPtr() && t.Elem().IsArray() { t = t.Elem() } n.SetType(t) - var t1, t2 *types.Type + var tk, tv *types.Type toomany := false switch t.Kind() { default: @@ -39,12 +38,12 @@ func typecheckrangeExpr(n *ir.RangeStmt) { return case types.TARRAY, types.TSLICE: - t1 = types.Types[types.TINT] - t2 = t.Elem() + tk = types.Types[types.TINT] + tv = t.Elem() case types.TMAP: - t1 = t.Key() - t2 = t.Elem() + tk = t.Key() + tv = t.Elem() case types.TCHAN: if !t.ChanDir().CanRecv() { @@ -52,61 +51,35 @@ func typecheckrangeExpr(n *ir.RangeStmt) { return } - t1 = t.Elem() - t2 = nil - if len(n.Vars) == 2 { + tk = t.Elem() + tv = nil + if n.Value != nil { toomany = true } case types.TSTRING: - t1 = types.Types[types.TINT] - t2 = types.RuneType + tk = types.Types[types.TINT] + tv = types.RuneType } - if len(n.Vars) > 2 || toomany { + if toomany { base.ErrorfAt(n.Pos(), "too many variables in range") } - var v1, v2 ir.Node - if len(n.Vars) != 0 { - v1 = n.Vars[0] - } - if len(n.Vars) > 1 { - v2 = n.Vars[1] - } - - // this is not only an optimization but also a requirement in the spec. - // "if the second iteration variable is the blank identifier, the range - // clause is equivalent to the same clause with only the first variable - // present." - if ir.IsBlank(v2) { - if v1 != nil { - n.Vars = []ir.Node{v1} - } - v2 = nil - } - - if v1 != nil { - if ir.DeclaredBy(v1, n) { - v1.SetType(t1) - } else if v1.Type() != nil { - if op, why := assignop(t1, v1.Type()); op == ir.OXXX { - base.ErrorfAt(n.Pos(), "cannot assign type %v to %L in range%s", t1, v1, why) - } - } - checkassign(n, v1) - } - - if v2 != nil { - if ir.DeclaredBy(v2, n) { - v2.SetType(t2) - } else if v2.Type() != nil { - if op, why := assignop(t2, v2.Type()); op == ir.OXXX { - base.ErrorfAt(n.Pos(), "cannot assign type %v to %L in range%s", t2, v2, why) + do := func(nn ir.Node, t *types.Type) { + if nn != nil { + if ir.DeclaredBy(nn, n) { + nn.SetType(t) + } else if nn.Type() != nil { + if op, why := assignop(t, nn.Type()); op == ir.OXXX { + base.ErrorfAt(n.Pos(), "cannot assign type %v to %L in range%s", t, nn, why) + } } + checkassign(n, nn) } - checkassign(n, v2) } + do(n.Key, tk) + do(n.Value, tv) } // type check assignment. @@ -399,11 +372,11 @@ func tcRange(n *ir.RangeStmt) { // second half of dance, the first half being typecheckrangeExpr n.SetTypecheck(1) - ls := n.Vars - for i1, n1 := range ls { - if n1.Typecheck() == 0 { - ls[i1] = AssignExpr(ls[i1]) - } + if n.Key != nil && n.Key.Typecheck() == 0 { + n.Key = AssignExpr(n.Key) + } + if n.Value != nil && n.Value.Typecheck() == 0 { + n.Value = AssignExpr(n.Value) } decldepth++ diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go index de6a3807e6f25..1fcebf51949d7 100644 --- a/src/cmd/compile/internal/walk/order.go +++ b/src/cmd/compile/internal/walk/order.go @@ -848,7 +848,7 @@ func (o *orderState) stmt(n ir.Node) { base.Fatalf("order.stmt range %v", n.Type()) case types.TARRAY, types.TSLICE: - if len(n.Vars) < 2 || ir.IsBlank(n.Vars[1]) { + if n.Value == nil || ir.IsBlank(n.Value) { // for i := range x will only use x once, to compute len(x). // No need to copy it. break @@ -887,7 +887,8 @@ func (o *orderState) stmt(n ir.Node) { // hiter contains pointers and needs to be zeroed. n.Prealloc = o.newTemp(reflectdata.MapIterType(n.Type()), true) } - o.exprListInPlace(n.Vars) + n.Key = o.exprInPlace(n.Key) + n.Value = o.exprInPlace(n.Value) if orderBody { orderBlock(&n.Body, o.free) } diff --git a/src/cmd/compile/internal/walk/range.go b/src/cmd/compile/internal/walk/range.go index 98a3dc23f9483..5ecd577f74372 100644 --- a/src/cmd/compile/internal/walk/range.go +++ b/src/cmd/compile/internal/walk/range.go @@ -61,15 +61,7 @@ func walkRange(nrange *ir.RangeStmt) ir.Node { a := nrange.X lno := ir.SetPos(a) - var v1, v2 ir.Node - l := len(nrange.Vars) - if l > 0 { - v1 = nrange.Vars[0] - } - - if l > 1 { - v2 = nrange.Vars[1] - } + v1, v2 := nrange.Key, nrange.Value if ir.IsBlank(v2) { v2 = nil @@ -343,15 +335,11 @@ func isMapClear(n *ir.RangeStmt) bool { return false } - if n.Op() != ir.ORANGE || n.Type().Kind() != types.TMAP || len(n.Vars) != 1 { - return false - } - - k := n.Vars[0] - if k == nil || ir.IsBlank(k) { + if n.Op() != ir.ORANGE || n.Type().Kind() != types.TMAP || n.Key == nil || n.Value != nil { return false } + k := n.Key // Require k to be a new variable name. if !ir.DeclaredBy(k, n) { return false From 082cc8b7d9daf88db8779262aca8ab5692a00dfb Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Thu, 24 Dec 2020 18:16:44 +0700 Subject: [PATCH 254/474] [dev.regabi] cmd/compile: change ir.IsAssignable -> ir.IsAddressable ir.IsAssignable does not include map index expression, so it should be named ir.IsAddressable instead. [git-generate] cd src/cmd/compile/internal/ir rf ' mv IsAssignable IsAddressable ' Change-Id: Ief6188e7b784ba9592d7b0cbec33b5f70d78f638 Reviewed-on: https://go-review.googlesource.com/c/go/+/279436 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/expr.go | 6 +++--- src/cmd/compile/internal/ssagen/ssa.go | 2 +- src/cmd/compile/internal/typecheck/expr.go | 2 +- src/cmd/compile/internal/typecheck/typecheck.go | 4 ++-- src/cmd/compile/internal/walk/compare.go | 2 +- src/cmd/compile/internal/walk/convert.go | 2 +- src/cmd/compile/internal/walk/expr.go | 2 +- src/cmd/compile/internal/walk/order.go | 2 +- 8 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 467596609028c..a79b78fb45b55 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -776,12 +776,12 @@ func IsZero(n Node) bool { } // lvalue etc -func IsAssignable(n Node) bool { +func IsAddressable(n Node) bool { switch n.Op() { case OINDEX: n := n.(*IndexExpr) if n.X.Type() != nil && n.X.Type().IsArray() { - return IsAssignable(n.X) + return IsAddressable(n.X) } if n.X.Type() != nil && n.X.Type().IsString() { return false @@ -792,7 +792,7 @@ func IsAssignable(n Node) bool { case ODOT: n := n.(*SelectorExpr) - return IsAssignable(n.X) + return IsAddressable(n.X) case ONAME: n := n.(*Name) diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index cf683e578d278..69e16964239d9 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -2736,7 +2736,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { // SSA, then load just the selected field. This // prevents false memory dependencies in race/msan // instrumentation. - if ir.IsAssignable(n) && !s.canSSA(n) { + if ir.IsAddressable(n) && !s.canSSA(n) { p := s.addr(n) return s.load(n.Type(), p) } diff --git a/src/cmd/compile/internal/typecheck/expr.go b/src/cmd/compile/internal/typecheck/expr.go index 6bbb68550e6cc..879ae385c7b43 100644 --- a/src/cmd/compile/internal/typecheck/expr.go +++ b/src/cmd/compile/internal/typecheck/expr.go @@ -842,7 +842,7 @@ func tcSlice(n *ir.SliceExpr) ir.Node { return n } if l.Type().IsArray() { - if !ir.IsAssignable(n.X) { + if !ir.IsAddressable(n.X) { base.Errorf("invalid operation %v (slice of unaddressable value)", n) n.SetType(nil) return n diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index bf43402d3dabe..87daee123d4b0 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -1638,7 +1638,7 @@ func nonexported(sym *types.Sym) bool { } func checklvalue(n ir.Node, verb string) { - if !ir.IsAssignable(n) { + if !ir.IsAddressable(n) { base.Errorf("cannot %s %v", verb, n) } } @@ -1656,7 +1656,7 @@ func checkassign(stmt ir.Node, n ir.Node) { } } - if ir.IsAssignable(n) { + if ir.IsAddressable(n) { return } if n.Op() == ir.OINDEXMAP { diff --git a/src/cmd/compile/internal/walk/compare.go b/src/cmd/compile/internal/walk/compare.go index 40b45d4dea227..a4ea31bf55e69 100644 --- a/src/cmd/compile/internal/walk/compare.go +++ b/src/cmd/compile/internal/walk/compare.go @@ -155,7 +155,7 @@ func walkCompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { // Chose not to inline. Call equality function directly. if !inline { // eq algs take pointers; cmpl and cmpr must be addressable - if !ir.IsAssignable(cmpl) || !ir.IsAssignable(cmpr) { + if !ir.IsAddressable(cmpl) || !ir.IsAddressable(cmpr) { base.Fatalf("arguments of comparison must be lvalues - %v %v", cmpl, cmpr) } diff --git a/src/cmd/compile/internal/walk/convert.go b/src/cmd/compile/internal/walk/convert.go index fd954d611366d..99abf306680d8 100644 --- a/src/cmd/compile/internal/walk/convert.go +++ b/src/cmd/compile/internal/walk/convert.go @@ -178,7 +178,7 @@ func walkConvInterface(n *ir.ConvExpr, init *ir.Nodes) ir.Node { // with a non-interface, especially in a switch on interface value // with non-interface cases, is not visible to order.stmt, so we // have to fall back on allocating a temp here. - if !ir.IsAssignable(v) { + if !ir.IsAddressable(v) { v = copyExpr(v, v.Type(), init) } v = typecheck.NodAddr(v) diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index 658a579fdaccd..882e455749e74 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -429,7 +429,7 @@ func safeExpr(n ir.Node, init *ir.Nodes) ir.Node { } // make a copy; must not be used as an lvalue - if ir.IsAssignable(n) { + if ir.IsAddressable(n) { base.Fatalf("missing lvalue case in safeexpr: %v", n) } return cheapExpr(n, init) diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go index 1fcebf51949d7..ef95dc14c711d 100644 --- a/src/cmd/compile/internal/walk/order.go +++ b/src/cmd/compile/internal/walk/order.go @@ -235,7 +235,7 @@ func (o *orderState) safeExpr(n ir.Node) ir.Node { // because we emit explicit VARKILL instructions marking the end of those // temporaries' lifetimes. func isaddrokay(n ir.Node) bool { - return ir.IsAssignable(n) && (n.Op() != ir.ONAME || n.(*ir.Name).Class_ == ir.PEXTERN || ir.IsAutoTmp(n)) + return ir.IsAddressable(n) && (n.Op() != ir.ONAME || n.(*ir.Name).Class_ == ir.PEXTERN || ir.IsAutoTmp(n)) } // addrTemp ensures that n is okay to pass by address to runtime routines. From 4b1d0fe66f3fcd80febc0e4be2850c06e3469da3 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Thu, 24 Dec 2020 15:42:37 -0800 Subject: [PATCH 255/474] [dev.regabi] cmd/compile: new devirtualization pkg [generated] The devirtualization code was only in inl.go because it reused some of the same helper functions as inlining (notably staticValue), but that code all ended up in package ir instead anyway. Beyond that minor commonality, it's entirely separate from inlining. It's definitely on the small side, but consistent with the new micropass-as-a-package approach we're trying. [git-generate] cd src/cmd/compile/internal/inline rf ' mv Devirtualize Func mv devirtualizeCall Call mv Func Call devirtualize.go mv devirtualize.go cmd/compile/internal/devirtualize ' Change-Id: Iff7b9fe486856660a8107d5391c54b7e8d238706 Reviewed-on: https://go-review.googlesource.com/c/go/+/280212 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- .../internal/devirtualize/devirtualize.go | 101 ++++++++++++++++++ src/cmd/compile/internal/gc/main.go | 3 +- src/cmd/compile/internal/inline/inl.go | 67 ------------ 3 files changed, 103 insertions(+), 68 deletions(-) create mode 100644 src/cmd/compile/internal/devirtualize/devirtualize.go diff --git a/src/cmd/compile/internal/devirtualize/devirtualize.go b/src/cmd/compile/internal/devirtualize/devirtualize.go new file mode 100644 index 0000000000000..95b28eff61c35 --- /dev/null +++ b/src/cmd/compile/internal/devirtualize/devirtualize.go @@ -0,0 +1,101 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// The inlining facility makes 2 passes: first caninl determines which +// functions are suitable for inlining, and for those that are it +// saves a copy of the body. Then inlcalls walks each function body to +// expand calls to inlinable functions. +// +// The Debug.l flag controls the aggressiveness. Note that main() swaps level 0 and 1, +// making 1 the default and -l disable. Additional levels (beyond -l) may be buggy and +// are not supported. +// 0: disabled +// 1: 80-nodes leaf functions, oneliners, panic, lazy typechecking (default) +// 2: (unassigned) +// 3: (unassigned) +// 4: allow non-leaf functions +// +// At some point this may get another default and become switch-offable with -N. +// +// The -d typcheckinl flag enables early typechecking of all imported bodies, +// which is useful to flush out bugs. +// +// The Debug.m flag enables diagnostic output. a single -m is useful for verifying +// which calls get inlined or not, more is for debugging, and may go away at any point. + +package devirtualize + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" +) + +// Devirtualize replaces interface method calls within fn with direct +// concrete-type method calls where applicable. +func Func(fn *ir.Func) { + ir.CurFunc = fn + ir.VisitList(fn.Body, func(n ir.Node) { + if n.Op() == ir.OCALLINTER { + Call(n.(*ir.CallExpr)) + } + }) +} + +func Call(call *ir.CallExpr) { + sel := call.X.(*ir.SelectorExpr) + r := ir.StaticValue(sel.X) + if r.Op() != ir.OCONVIFACE { + return + } + recv := r.(*ir.ConvExpr) + + typ := recv.X.Type() + if typ.IsInterface() { + return + } + + dt := ir.NewTypeAssertExpr(sel.Pos(), sel.X, nil) + dt.SetType(typ) + x := typecheck.Callee(ir.NewSelectorExpr(sel.Pos(), ir.OXDOT, dt, sel.Sel)) + switch x.Op() { + case ir.ODOTMETH: + x := x.(*ir.SelectorExpr) + if base.Flag.LowerM != 0 { + base.WarnfAt(call.Pos(), "devirtualizing %v to %v", sel, typ) + } + call.SetOp(ir.OCALLMETH) + call.X = x + case ir.ODOTINTER: + // Promoted method from embedded interface-typed field (#42279). + x := x.(*ir.SelectorExpr) + if base.Flag.LowerM != 0 { + base.WarnfAt(call.Pos(), "partially devirtualizing %v to %v", sel, typ) + } + call.SetOp(ir.OCALLINTER) + call.X = x + default: + // TODO(mdempsky): Turn back into Fatalf after more testing. + if base.Flag.LowerM != 0 { + base.WarnfAt(call.Pos(), "failed to devirtualize %v (%v)", x, x.Op()) + } + return + } + + // Duplicated logic from typecheck for function call return + // value types. + // + // Receiver parameter size may have changed; need to update + // call.Type to get correct stack offsets for result + // parameters. + types.CheckSize(x.Type()) + switch ft := x.Type(); ft.NumResults() { + case 0: + case 1: + call.SetType(ft.Results().Field(0).Type) + default: + call.SetType(ft.Results()) + } +} diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 8483c87a38d10..ba3620e6769fd 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -10,6 +10,7 @@ import ( "bufio" "bytes" "cmd/compile/internal/base" + "cmd/compile/internal/devirtualize" "cmd/compile/internal/dwarfgen" "cmd/compile/internal/escape" "cmd/compile/internal/inline" @@ -237,7 +238,7 @@ func Main(archInit func(*ssagen.ArchInfo)) { // Devirtualize. for _, n := range typecheck.Target.Decls { if n.Op() == ir.ODCLFUNC { - inline.Devirtualize(n.(*ir.Func)) + devirtualize.Func(n.(*ir.Func)) } } ir.CurFunc = nil diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index 222e62d0cc894..9ffb08048a6f6 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -1203,73 +1203,6 @@ func pruneUnusedAutos(ll []*ir.Name, vis *hairyVisitor) []*ir.Name { return s } -// Devirtualize replaces interface method calls within fn with direct -// concrete-type method calls where applicable. -func Devirtualize(fn *ir.Func) { - ir.CurFunc = fn - ir.VisitList(fn.Body, func(n ir.Node) { - if n.Op() == ir.OCALLINTER { - devirtualizeCall(n.(*ir.CallExpr)) - } - }) -} - -func devirtualizeCall(call *ir.CallExpr) { - sel := call.X.(*ir.SelectorExpr) - r := ir.StaticValue(sel.X) - if r.Op() != ir.OCONVIFACE { - return - } - recv := r.(*ir.ConvExpr) - - typ := recv.X.Type() - if typ.IsInterface() { - return - } - - dt := ir.NewTypeAssertExpr(sel.Pos(), sel.X, nil) - dt.SetType(typ) - x := typecheck.Callee(ir.NewSelectorExpr(sel.Pos(), ir.OXDOT, dt, sel.Sel)) - switch x.Op() { - case ir.ODOTMETH: - x := x.(*ir.SelectorExpr) - if base.Flag.LowerM != 0 { - base.WarnfAt(call.Pos(), "devirtualizing %v to %v", sel, typ) - } - call.SetOp(ir.OCALLMETH) - call.X = x - case ir.ODOTINTER: - // Promoted method from embedded interface-typed field (#42279). - x := x.(*ir.SelectorExpr) - if base.Flag.LowerM != 0 { - base.WarnfAt(call.Pos(), "partially devirtualizing %v to %v", sel, typ) - } - call.SetOp(ir.OCALLINTER) - call.X = x - default: - // TODO(mdempsky): Turn back into Fatalf after more testing. - if base.Flag.LowerM != 0 { - base.WarnfAt(call.Pos(), "failed to devirtualize %v (%v)", x, x.Op()) - } - return - } - - // Duplicated logic from typecheck for function call return - // value types. - // - // Receiver parameter size may have changed; need to update - // call.Type to get correct stack offsets for result - // parameters. - types.CheckSize(x.Type()) - switch ft := x.Type(); ft.NumResults() { - case 0: - case 1: - call.SetType(ft.Results().Field(0).Type) - default: - call.SetType(ft.Results()) - } -} - // numNonClosures returns the number of functions in list which are not closures. func numNonClosures(list []*ir.Func) int { count := 0 From 2785c691c2ba63d284bdaf0f3bcdb678c3f16cd0 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Thu, 24 Dec 2020 16:03:47 -0800 Subject: [PATCH 256/474] [dev.regabi] cmd/compile: cleanup devirtualization docs Change-Id: I8e319f55fad6e9ed857aa020a96f3a89ccaadcea Reviewed-on: https://go-review.googlesource.com/c/go/+/280213 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- .../internal/devirtualize/devirtualize.go | 38 ++++++------------- 1 file changed, 11 insertions(+), 27 deletions(-) diff --git a/src/cmd/compile/internal/devirtualize/devirtualize.go b/src/cmd/compile/internal/devirtualize/devirtualize.go index 95b28eff61c35..60ba208d0876b 100644 --- a/src/cmd/compile/internal/devirtualize/devirtualize.go +++ b/src/cmd/compile/internal/devirtualize/devirtualize.go @@ -1,29 +1,10 @@ -// Copyright 2011 The Go Authors. All rights reserved. +// Copyright 2020 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// -// The inlining facility makes 2 passes: first caninl determines which -// functions are suitable for inlining, and for those that are it -// saves a copy of the body. Then inlcalls walks each function body to -// expand calls to inlinable functions. -// -// The Debug.l flag controls the aggressiveness. Note that main() swaps level 0 and 1, -// making 1 the default and -l disable. Additional levels (beyond -l) may be buggy and -// are not supported. -// 0: disabled -// 1: 80-nodes leaf functions, oneliners, panic, lazy typechecking (default) -// 2: (unassigned) -// 3: (unassigned) -// 4: allow non-leaf functions -// -// At some point this may get another default and become switch-offable with -N. -// -// The -d typcheckinl flag enables early typechecking of all imported bodies, -// which is useful to flush out bugs. -// -// The Debug.m flag enables diagnostic output. a single -m is useful for verifying -// which calls get inlined or not, more is for debugging, and may go away at any point. +// Package devirtualize implements a simple "devirtualization" +// optimization pass, which replaces interface method calls with +// direct concrete-type method calls where possible. package devirtualize import ( @@ -33,18 +14,21 @@ import ( "cmd/compile/internal/types" ) -// Devirtualize replaces interface method calls within fn with direct -// concrete-type method calls where applicable. +// Func devirtualizes calls within fn where possible. func Func(fn *ir.Func) { ir.CurFunc = fn ir.VisitList(fn.Body, func(n ir.Node) { - if n.Op() == ir.OCALLINTER { - Call(n.(*ir.CallExpr)) + if call, ok := n.(*ir.CallExpr); ok { + Call(call) } }) } +// Call devirtualizes the given call if possible. func Call(call *ir.CallExpr) { + if call.Op() != ir.OCALLINTER { + return + } sel := call.X.(*ir.SelectorExpr) r := ir.StaticValue(sel.X) if r.Op() != ir.OCONVIFACE { From e24d2f3d0513961441904afdf71cafe7808c0be9 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Thu, 24 Dec 2020 18:49:35 +0700 Subject: [PATCH 257/474] [dev.regabi] cmd/compile: remove typ from RangeStmt We can use RangeStmt.X.Type() instead. Passes buildall w/ toolstash -cmp. Change-Id: Id63ce9cb046c3b39bcc35453b1602c986794dfe1 Reviewed-on: https://go-review.googlesource.com/c/go/+/279437 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/stmt.go | 4 ---- src/cmd/compile/internal/typecheck/stmt.go | 17 ++++++++++------- src/cmd/compile/internal/walk/order.go | 5 +++-- src/cmd/compile/internal/walk/range.go | 14 +++++++------- 4 files changed, 20 insertions(+), 20 deletions(-) diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index 453153c0245c2..cfda6fd234dab 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -300,7 +300,6 @@ type RangeStmt struct { Value Node Body Nodes HasBreak bool - typ *types.Type // TODO(rsc): Remove - use X.Type() instead Prealloc *Name } @@ -312,9 +311,6 @@ func NewRangeStmt(pos src.XPos, key, value, x Node, body []Node) *RangeStmt { return n } -func (n *RangeStmt) Type() *types.Type { return n.typ } -func (n *RangeStmt) SetType(x *types.Type) { n.typ = x } - // A ReturnStmt is a return statement. type ReturnStmt struct { miniStmt diff --git a/src/cmd/compile/internal/typecheck/stmt.go b/src/cmd/compile/internal/typecheck/stmt.go index dfa224b318d87..fe9ef400bbdce 100644 --- a/src/cmd/compile/internal/typecheck/stmt.go +++ b/src/cmd/compile/internal/typecheck/stmt.go @@ -11,13 +11,20 @@ import ( "cmd/internal/src" ) +func RangeExprType(t *types.Type) *types.Type { + if t.IsPtr() && t.Elem().IsArray() { + return t.Elem() + } + return t +} + func typecheckrangeExpr(n *ir.RangeStmt) { n.X = Expr(n.X) - - t := n.X.Type() - if t == nil { + if n.X.Type() == nil { return } + + t := RangeExprType(n.X.Type()) // delicate little dance. see typecheckas2 if n.Key != nil && !ir.DeclaredBy(n.Key, n) { n.Key = AssignExpr(n.Key) @@ -25,10 +32,6 @@ func typecheckrangeExpr(n *ir.RangeStmt) { if n.Value != nil && !ir.DeclaredBy(n.Value, n) { n.Value = AssignExpr(n.Value) } - if t.IsPtr() && t.Elem().IsArray() { - t = t.Elem() - } - n.SetType(t) var tk, tv *types.Type toomany := false diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go index ef95dc14c711d..1e41cfc6aaf9a 100644 --- a/src/cmd/compile/internal/walk/order.go +++ b/src/cmd/compile/internal/walk/order.go @@ -843,7 +843,8 @@ func (o *orderState) stmt(n ir.Node) { n.X = o.expr(n.X, nil) orderBody := true - switch n.Type().Kind() { + xt := typecheck.RangeExprType(n.X.Type()) + switch xt.Kind() { default: base.Fatalf("order.stmt range %v", n.Type()) @@ -885,7 +886,7 @@ func (o *orderState) stmt(n ir.Node) { // n.Prealloc is the temp for the iterator. // hiter contains pointers and needs to be zeroed. - n.Prealloc = o.newTemp(reflectdata.MapIterType(n.Type()), true) + n.Prealloc = o.newTemp(reflectdata.MapIterType(xt), true) } n.Key = o.exprInPlace(n.Key) n.Value = o.exprInPlace(n.Value) diff --git a/src/cmd/compile/internal/walk/range.go b/src/cmd/compile/internal/walk/range.go index 5ecd577f74372..49a69e97513f2 100644 --- a/src/cmd/compile/internal/walk/range.go +++ b/src/cmd/compile/internal/walk/range.go @@ -56,9 +56,8 @@ func walkRange(nrange *ir.RangeStmt) ir.Node { // hb: hidden bool // a, v1, v2: not hidden aggregate, val 1, 2 - t := nrange.Type() - a := nrange.X + t := typecheck.RangeExprType(a.Type()) lno := ir.SetPos(a) v1, v2 := nrange.Key, nrange.Value @@ -113,7 +112,7 @@ func walkRange(nrange *ir.RangeStmt) ir.Node { } // for v1, v2 := range ha { body } - if cheapComputableIndex(nrange.Type().Elem().Width) { + if cheapComputableIndex(t.Elem().Width) { // v1, v2 = hv1, ha[hv1] tmp := ir.NewIndexExpr(base.Pos, ha, hv1) tmp.SetBounded(true) @@ -142,7 +141,7 @@ func walkRange(nrange *ir.RangeStmt) ir.Node { ifGuard.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn) nfor.SetOp(ir.OFORUNTIL) - hp := typecheck.Temp(types.NewPtr(nrange.Type().Elem())) + hp := typecheck.Temp(types.NewPtr(t.Elem())) tmp := ir.NewIndexExpr(base.Pos, ha, ir.NewInt(0)) tmp.SetBounded(true) init = append(init, ir.NewAssignStmt(base.Pos, hp, typecheck.NodAddr(tmp))) @@ -335,7 +334,8 @@ func isMapClear(n *ir.RangeStmt) bool { return false } - if n.Op() != ir.ORANGE || n.Type().Kind() != types.TMAP || n.Key == nil || n.Value != nil { + t := n.X.Type() + if n.Op() != ir.ORANGE || t.Kind() != types.TMAP || n.Key == nil || n.Value != nil { return false } @@ -360,7 +360,7 @@ func isMapClear(n *ir.RangeStmt) bool { } // Keys where equality is not reflexive can not be deleted from maps. - if !types.IsReflexive(m.Type().Key()) { + if !types.IsReflexive(t.Key()) { return false } @@ -416,7 +416,7 @@ func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node { return nil } - elemsize := loop.Type().Elem().Width + elemsize := typecheck.RangeExprType(loop.X.Type()).Elem().Width if elemsize <= 0 || !ir.IsZero(stmt.Y) { return nil } From 396b6c2e7c5c368c67e71824471d4f2d48f5c128 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Wed, 23 Dec 2020 16:14:59 -0800 Subject: [PATCH 258/474] [dev.regabi] cmd/compile: cleanup assignment typechecking The assignment type-checking code previously bounced around a lot between the LHS and RHS sides of the assignment. But there's actually a very simple, consistent pattern to how to type check assignments: 1. Check the RHS expression. 2. If the LHS expression is an identifier that was declared in this statement and it doesn't have an explicit type, give it the RHS expression's default type. 3. Check the LHS expression. 4. Try assigning the RHS expression to the LHS expression, adding implicit conversions as needed. This CL implements this algorithm, and refactors tcAssign and tcAssignList to use a common implementation. It also fixes the error messages to consistently say just "1 variable" or "1 value", rather than occasionally "1 variables" or "1 values". Fixes #43348. Passes toolstash -cmp. Change-Id: I749cb8d6ccbc7d22cd7cb0a381f58a39fc2696b5 Reviewed-on: https://go-review.googlesource.com/c/go/+/280112 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/typecheck/stmt.go | 235 +++++++----------- .../compile/internal/typecheck/typecheck.go | 5 + test/fixedbugs/issue27595.go | 2 +- test/fixedbugs/issue30087.go | 6 +- test/used.go | 1 + 5 files changed, 105 insertions(+), 144 deletions(-) diff --git a/src/cmd/compile/internal/typecheck/stmt.go b/src/cmd/compile/internal/typecheck/stmt.go index fe9ef400bbdce..7e74b730bc117 100644 --- a/src/cmd/compile/internal/typecheck/stmt.go +++ b/src/cmd/compile/internal/typecheck/stmt.go @@ -93,47 +93,16 @@ func tcAssign(n *ir.AssignStmt) { defer tracePrint("typecheckas", n)(nil) } - // delicate little dance. - // the definition of n may refer to this assignment - // as its definition, in which case it will call typecheckas. - // in that case, do not call typecheck back, or it will cycle. - // if the variable has a type (ntype) then typechecking - // will not look at defn, so it is okay (and desirable, - // so that the conversion below happens). - n.X = Resolve(n.X) - - if !ir.DeclaredBy(n.X, n) || n.X.Name().Ntype != nil { + if n.Y == nil { n.X = AssignExpr(n.X) + return } - // Use ctxMultiOK so we can emit an "N variables but M values" error - // to be consistent with typecheckas2 (#26616). - n.Y = typecheck(n.Y, ctxExpr|ctxMultiOK) - checkassign(n, n.X) - if n.Y != nil && n.Y.Type() != nil { - if n.Y.Type().IsFuncArgStruct() { - base.Errorf("assignment mismatch: 1 variable but %v returns %d values", n.Y.(*ir.CallExpr).X, n.Y.Type().NumFields()) - // Multi-value RHS isn't actually valid for OAS; nil out - // to indicate failed typechecking. - n.Y.SetType(nil) - } else if n.X.Type() != nil { - n.Y = AssignConv(n.Y, n.X.Type(), "assignment") - } - } - - if ir.DeclaredBy(n.X, n) && n.X.Name().Ntype == nil { - n.Y = DefaultLit(n.Y, nil) - n.X.SetType(n.Y.Type()) - } - - // second half of dance. - // now that right is done, typecheck the left - // just to get it over with. see dance above. - n.SetTypecheck(1) + lhs, rhs := []ir.Node{n.X}, []ir.Node{n.Y} + assign(n, lhs, rhs) + n.X, n.Y = lhs[0], rhs[0] - if n.X.Typecheck() == 0 { - n.X = AssignExpr(n.X) - } + // TODO(mdempsky): This seems out of place. if !ir.IsBlank(n.X) { types.CheckSize(n.X.Type()) // ensure width is calculated for backend } @@ -144,132 +113,118 @@ func tcAssignList(n *ir.AssignListStmt) { defer tracePrint("typecheckas2", n)(nil) } - ls := n.Lhs - for i1, n1 := range ls { - // delicate little dance. - n1 = Resolve(n1) - ls[i1] = n1 + assign(n, n.Lhs, n.Rhs) +} + +func assign(stmt ir.Node, lhs, rhs []ir.Node) { + // delicate little dance. + // the definition of lhs may refer to this assignment + // as its definition, in which case it will call typecheckas. + // in that case, do not call typecheck back, or it will cycle. + // if the variable has a type (ntype) then typechecking + // will not look at defn, so it is okay (and desirable, + // so that the conversion below happens). - if !ir.DeclaredBy(n1, n) || n1.Name().Ntype != nil { - ls[i1] = AssignExpr(ls[i1]) + checkLHS := func(i int, typ *types.Type) { + lhs[i] = Resolve(lhs[i]) + if n := lhs[i]; typ != nil && ir.DeclaredBy(n, stmt) && n.Name().Ntype == nil { + if typ.Kind() != types.TNIL { + n.SetType(defaultType(typ)) + } else { + base.Errorf("use of untyped nil") + } } + if lhs[i].Typecheck() == 0 { + lhs[i] = AssignExpr(lhs[i]) + } + checkassign(stmt, lhs[i]) } - cl := len(n.Lhs) - cr := len(n.Rhs) - if cl > 1 && cr == 1 { - n.Rhs[0] = typecheck(n.Rhs[0], ctxExpr|ctxMultiOK) - } else { - Exprs(n.Rhs) - } - checkassignlist(n, n.Lhs) - - var l ir.Node - var r ir.Node - if cl == cr { - // easy - ls := n.Lhs - rs := n.Rhs - for il, nl := range ls { - nr := rs[il] - if nl.Type() != nil && nr.Type() != nil { - rs[il] = AssignConv(nr, nl.Type(), "assignment") - } - if ir.DeclaredBy(nl, n) && nl.Name().Ntype == nil { - rs[il] = DefaultLit(rs[il], nil) - nl.SetType(rs[il].Type()) - } + assignType := func(i int, typ *types.Type) { + checkLHS(i, typ) + if typ != nil { + checkassignto(typ, lhs[i]) } + } - goto out + cr := len(rhs) + if len(rhs) == 1 { + rhs[0] = typecheck(rhs[0], ctxExpr|ctxMultiOK) + if rtyp := rhs[0].Type(); rtyp != nil && rtyp.IsFuncArgStruct() { + cr = rtyp.NumFields() + } + } else { + Exprs(rhs) } - l = n.Lhs[0] - r = n.Rhs[0] + // x, ok = y +assignOK: + for len(lhs) == 2 && cr == 1 { + stmt := stmt.(*ir.AssignListStmt) + r := rhs[0] - // x,y,z = f() - if cr == 1 { - if r.Type() == nil { - goto out - } switch r.Op() { - case ir.OCALLMETH, ir.OCALLINTER, ir.OCALLFUNC: - if !r.Type().IsFuncArgStruct() { - break - } - cr = r.Type().NumFields() - if cr != cl { - goto mismatch - } - r.(*ir.CallExpr).Use = ir.CallUseList - n.SetOp(ir.OAS2FUNC) - for i, l := range n.Lhs { - f := r.Type().Field(i) - if f.Type != nil && l.Type() != nil { - checkassignto(f.Type, l) - } - if ir.DeclaredBy(l, n) && l.Name().Ntype == nil { - l.SetType(f.Type) - } - } - goto out + case ir.OINDEXMAP: + stmt.SetOp(ir.OAS2MAPR) + case ir.ORECV: + stmt.SetOp(ir.OAS2RECV) + case ir.ODOTTYPE: + r := r.(*ir.TypeAssertExpr) + stmt.SetOp(ir.OAS2DOTTYPE) + r.SetOp(ir.ODOTTYPE2) + default: + break assignOK } + + assignType(0, r.Type()) + assignType(1, types.UntypedBool) + return } - // x, ok = y - if cl == 2 && cr == 1 { - if r.Type() == nil { - goto out - } - switch r.Op() { - case ir.OINDEXMAP, ir.ORECV, ir.ODOTTYPE: - switch r.Op() { - case ir.OINDEXMAP: - n.SetOp(ir.OAS2MAPR) - case ir.ORECV: - n.SetOp(ir.OAS2RECV) - case ir.ODOTTYPE: - r := r.(*ir.TypeAssertExpr) - n.SetOp(ir.OAS2DOTTYPE) - r.SetOp(ir.ODOTTYPE2) + if len(lhs) != cr { + if r, ok := rhs[0].(*ir.CallExpr); ok && len(rhs) == 1 { + if r.Type() != nil { + base.ErrorfAt(stmt.Pos(), "assignment mismatch: %d variable%s but %v returns %d value%s", len(lhs), plural(len(lhs)), r.X, cr, plural(cr)) } - if l.Type() != nil { - checkassignto(r.Type(), l) - } - if ir.DeclaredBy(l, n) { - l.SetType(r.Type()) - } - l := n.Lhs[1] - if l.Type() != nil && !l.Type().IsBoolean() { - checkassignto(types.Types[types.TBOOL], l) - } - if ir.DeclaredBy(l, n) && l.Name().Ntype == nil { - l.SetType(types.Types[types.TBOOL]) - } - goto out + } else { + base.ErrorfAt(stmt.Pos(), "assignment mismatch: %d variable%s but %v value%s", len(lhs), plural(len(lhs)), len(rhs), plural(len(rhs))) + } + + for i := range lhs { + checkLHS(i, nil) } + return } -mismatch: - switch r.Op() { - default: - base.Errorf("assignment mismatch: %d variables but %d values", cl, cr) - case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER: - r := r.(*ir.CallExpr) - base.Errorf("assignment mismatch: %d variables but %v returns %d values", cl, r.X, cr) + // x,y,z = f() + if cr > len(rhs) { + stmt := stmt.(*ir.AssignListStmt) + stmt.SetOp(ir.OAS2FUNC) + r := rhs[0].(*ir.CallExpr) + r.Use = ir.CallUseList + rtyp := r.Type() + + for i := range lhs { + assignType(i, rtyp.Field(i).Type) + } + return } - // second half of dance -out: - n.SetTypecheck(1) - ls = n.Lhs - for i1, n1 := range ls { - if n1.Typecheck() == 0 { - ls[i1] = AssignExpr(ls[i1]) + for i, r := range rhs { + checkLHS(i, r.Type()) + if lhs[i].Type() != nil { + rhs[i] = AssignConv(r, lhs[i].Type(), "assignment") } } } +func plural(n int) string { + if n == 1 { + return "" + } + return "s" +} + // tcFor typechecks an OFOR node. func tcFor(n *ir.ForStmt) ir.Node { Stmts(n.Init()) diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index 87daee123d4b0..05a346b8c889d 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -1690,6 +1690,11 @@ func checkassignlist(stmt ir.Node, l ir.Nodes) { } func checkassignto(src *types.Type, dst ir.Node) { + // TODO(mdempsky): Handle all untyped types correctly. + if src == types.UntypedBool && dst.Type().IsBoolean() { + return + } + if op, why := assignop(src, dst.Type()); op == ir.OXXX { base.Errorf("cannot assign %v to %L in multiple assignment%s", src, dst, why) return diff --git a/test/fixedbugs/issue27595.go b/test/fixedbugs/issue27595.go index af5c7a10d9b01..b9328a68132a1 100644 --- a/test/fixedbugs/issue27595.go +++ b/test/fixedbugs/issue27595.go @@ -8,7 +8,7 @@ package main var a = twoResults() // ERROR "assignment mismatch: 1 variable but twoResults returns 2 values" var b, c, d = twoResults() // ERROR "assignment mismatch: 3 variables but twoResults returns 2 values" -var e, f = oneResult() // ERROR "assignment mismatch: 2 variables but oneResult returns 1 values" +var e, f = oneResult() // ERROR "assignment mismatch: 2 variables but oneResult returns 1 value" func twoResults() (int, int) { return 1, 2 diff --git a/test/fixedbugs/issue30087.go b/test/fixedbugs/issue30087.go index 3ad9c8c8d90c0..a8f6202329e86 100644 --- a/test/fixedbugs/issue30087.go +++ b/test/fixedbugs/issue30087.go @@ -7,8 +7,8 @@ package main func main() { - var a, b = 1 // ERROR "assignment mismatch: 2 variables but 1 values|wrong number of initializations" - _ = 1, 2 // ERROR "assignment mismatch: 1 variables but 2 values|number of variables does not match" - c, d := 1 // ERROR "assignment mismatch: 2 variables but 1 values|wrong number of initializations" + var a, b = 1 // ERROR "assignment mismatch: 2 variables but 1 value|wrong number of initializations" + _ = 1, 2 // ERROR "assignment mismatch: 1 variable but 2 values|number of variables does not match" + c, d := 1 // ERROR "assignment mismatch: 2 variables but 1 value|wrong number of initializations" e, f := 1, 2, 3 // ERROR "assignment mismatch: 2 variables but 3 values|wrong number of initializations" } diff --git a/test/used.go b/test/used.go index 5c7aad24a6ca2..76f3fc91cccd2 100644 --- a/test/used.go +++ b/test/used.go @@ -63,6 +63,7 @@ func _() { _ = f1() // ok _, _ = f2() // ok _ = f2() // ERROR "assignment mismatch: 1 variable but f2 returns 2 values" + _ = f1(), 0 // ERROR "assignment mismatch: 1 variable but 2 values" T.M0 // ERROR "T.M0 evaluated but not used" t.M0 // ERROR "t.M0 evaluated but not used" cap // ERROR "use of builtin cap not in function call" From 1d9a1f67d537309f80740b16ef619500fb55db16 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Fri, 25 Dec 2020 00:12:15 -0800 Subject: [PATCH 259/474] [dev.regabi] cmd/compile: don't emit reflect data for method types Within the compiler, we represent the type of methods as a special "method" type, where the receiver parameter type is kept separate from the other parameters. This is convenient for operations like testing whether a type implements an interface, where we want to ignore the receiver type. These method types don't properly exist within the Go language though: there are only "function" types. E.g., method expressions (expressions of the form Type.Method) are simply functions with the receiver parameter prepended to the regular parameter list. However, the compiler backend is currently a little sloppy in its handling of these types, which results in temporary variables being declared as having "method" type, which then end up in DWARF data. This is probably harmless in practice, but it's still wrong. The proper solution is to fix the backend code so that we use correct types everywhere, and the next CL does exactly this. But as it fixes the DWARF output, so it fails toolstash -cmp. So this prelim CL bandages over the issue in a way that generates the same output as that proper fix. Change-Id: I37a127bc8365c3a79ce513bdb3cfccb945912762 Reviewed-on: https://go-review.googlesource.com/c/go/+/280293 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/reflectdata/reflect.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go index 3fbf6f337f079..27ee09ade2c51 100644 --- a/src/cmd/compile/internal/reflectdata/reflect.go +++ b/src/cmd/compile/internal/reflectdata/reflect.go @@ -835,6 +835,10 @@ func TypeSym(t *types.Type) *types.Sym { if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() { base.Fatalf("typenamesym %v", t) } + if t.Kind() == types.TFUNC && t.Recv() != nil { + // TODO(mdempsky): Fix callers and make fatal. + t = typecheck.NewMethodType(t, t.Recv().Type) + } s := types.TypeSym(t) signatmu.Lock() NeedRuntimeType(t) From e4f293d85306cb89da3c134ce432e330e289447e Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Fri, 25 Dec 2020 00:34:32 -0800 Subject: [PATCH 260/474] [dev.regabi] cmd/compile: fix OCALLMETH desugaring During walkCall, there's a half-hearted attempt at rewriting OCALLMETH expressions into regular function calls by moving the receiver argument into n.Args with the rest of the arguments. But the way it does this leaves the AST in an inconsistent state (an ODOTMETH node with no X expression), and leaves a lot of duplicate work for the rest of the backend to deal with. By simply rewriting OCALLMETH expressions into proper OCALLFUNC expressions, we eliminate a ton of unnecessary code duplication during SSA construction and avoid creation of invalid method-typed variables. Passes toolstash -cmp. Change-Id: I4d5c5f90a79f8994059b2d0ae472182e08096c0a Reviewed-on: https://go-review.googlesource.com/c/go/+/280294 Trust: Matthew Dempsky Reviewed-by: Cuong Manh Le --- .../compile/internal/reflectdata/reflect.go | 3 +- src/cmd/compile/internal/ssagen/ssa.go | 59 +++---------------- src/cmd/compile/internal/typecheck/dcl.go | 3 + src/cmd/compile/internal/walk/expr.go | 26 ++++---- 4 files changed, 24 insertions(+), 67 deletions(-) diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go index 27ee09ade2c51..64cc3e87ca180 100644 --- a/src/cmd/compile/internal/reflectdata/reflect.go +++ b/src/cmd/compile/internal/reflectdata/reflect.go @@ -836,8 +836,7 @@ func TypeSym(t *types.Type) *types.Sym { base.Fatalf("typenamesym %v", t) } if t.Kind() == types.TFUNC && t.Recv() != nil { - // TODO(mdempsky): Fix callers and make fatal. - t = typecheck.NewMethodType(t, t.Recv().Type) + base.Fatalf("misuse of method type: %v", t) } s := types.TypeSym(t) signatmu.Lock() diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 69e16964239d9..25efeee112e71 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -214,10 +214,7 @@ func InitConfig() { func getParam(n *ir.CallExpr, i int) *types.Field { t := n.X.Type() if n.Op() == ir.OCALLMETH { - if i == 0 { - return t.Recv() - } - return t.Params().Field(i - 1) + base.Fatalf("OCALLMETH missed by walkCall") } return t.Params().Field(i) } @@ -1166,7 +1163,7 @@ func (s *state) stmt(n ir.Node) { } fallthrough - case ir.OCALLMETH, ir.OCALLINTER: + case ir.OCALLINTER: n := n.(*ir.CallExpr) s.callResult(n, callNormal) if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME && n.X.(*ir.Name).Class_ == ir.PFUNC { @@ -4396,16 +4393,7 @@ func (s *state) openDeferRecord(n *ir.CallExpr) { opendefer.closure = closure } } else if n.Op() == ir.OCALLMETH { - if fn.Op() != ir.ODOTMETH { - base.Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn) - } - fn := fn.(*ir.SelectorExpr) - closureVal := s.getMethodClosure(fn) - // We must always store the function value in a stack slot for the - // runtime panic code to use. But in the defer exit code, we will - // call the method directly. - closure := s.openDeferSave(nil, fn.Type(), closureVal) - opendefer.closureNode = closure.Aux.(*ir.Name) + base.Fatalf("OCALLMETH missed by walkCall") } else { if fn.Op() != ir.ODOTINTER { base.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op()) @@ -4679,18 +4667,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val s.maybeNilCheckClosure(closure, k) } case ir.OCALLMETH: - if fn.Op() != ir.ODOTMETH { - s.Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn) - } - fn := fn.(*ir.SelectorExpr) - testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f) - if k == callNormal { - sym = fn.Sel - break - } - closure = s.getMethodClosure(fn) - // Note: receiver is already present in n.Rlist, so we don't - // want to set it here. + base.Fatalf("OCALLMETH missed by walkCall") case ir.OCALLINTER: if fn.Op() != ir.ODOTINTER { s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op()) @@ -4755,9 +4732,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val } // Set receiver (for method calls). if n.Op() == ir.OCALLMETH { - f := ft.Recv() - s.storeArgWithBase(args[0], f.Type, addr, off+f.Offset) - args = args[1:] + base.Fatalf("OCALLMETH missed by walkCall") } // Set other args. for _, f := range ft.Params().Fields().Slice() { @@ -4825,11 +4800,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val t := n.X.Type() args := n.Rargs if n.Op() == ir.OCALLMETH { - f := t.Recv() - ACArg, arg := s.putArg(args[0], f.Type, argStart+f.Offset, testLateExpansion) - ACArgs = append(ACArgs, ACArg) - callArgs = append(callArgs, arg) - args = args[1:] + base.Fatalf("OCALLMETH missed by walkCall") } for i, n := range args { f := t.Params().Field(i) @@ -4947,22 +4918,6 @@ func (s *state) maybeNilCheckClosure(closure *ssa.Value, k callKind) { } } -// getMethodClosure returns a value representing the closure for a method call -func (s *state) getMethodClosure(fn *ir.SelectorExpr) *ssa.Value { - // Make a name n2 for the function. - // fn.Sym might be sync.(*Mutex).Unlock. - // Make a PFUNC node out of that, then evaluate it. - // We get back an SSA value representing &sync.(*Mutex).Unlock·f. - // We can then pass that to defer or go. - n2 := ir.NewNameAt(fn.Pos(), fn.Sel) - n2.Curfn = s.curfn - n2.Class_ = ir.PFUNC - // n2.Sym already existed, so it's already marked as a function. - n2.SetPos(fn.Pos()) - n2.SetType(types.Types[types.TUINT8]) // fake type for a static closure. Could use runtime.funcval if we had it. - return s.expr(n2) -} - // getClosureAndRcvr returns values for the appropriate closure and receiver of an // interface call func (s *state) getClosureAndRcvr(fn *ir.SelectorExpr) (*ssa.Value, *ssa.Value) { @@ -5089,7 +5044,7 @@ func (s *state) addr(n ir.Node) *ssa.Value { } addr := s.addr(n.X) return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type - case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH: + case ir.OCALLFUNC, ir.OCALLINTER: n := n.(*ir.CallExpr) return s.callAddr(n, callNormal) case ir.ODOTTYPE: diff --git a/src/cmd/compile/internal/typecheck/dcl.go b/src/cmd/compile/internal/typecheck/dcl.go index db18c17e13018..0da0956c3aebd 100644 --- a/src/cmd/compile/internal/typecheck/dcl.go +++ b/src/cmd/compile/internal/typecheck/dcl.go @@ -556,6 +556,9 @@ func TempAt(pos src.XPos, curfn *ir.Func, t *types.Type) *ir.Name { if t == nil { base.Fatalf("tempAt called with nil type") } + if t.Kind() == types.TFUNC && t.Recv() != nil { + base.Fatalf("misuse of method type: %v", t) + } s := &types.Sym{ Name: autotmpname(len(curfn.Dcl)), diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index 882e455749e74..4eee32cf442f8 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -535,22 +535,31 @@ func walkCall1(n *ir.CallExpr, init *ir.Nodes) { return // already walked } - params := n.X.Type().Params() args := n.Args n.X = walkExpr(n.X, init) walkExprList(args, init) - // If this is a method call, add the receiver at the beginning of the args. + // If this is a method call t.M(...), + // rewrite into a function call T.M(t, ...). + // TODO(mdempsky): Do this right after type checking. if n.Op() == ir.OCALLMETH { withRecv := make([]ir.Node, len(args)+1) dot := n.X.(*ir.SelectorExpr) withRecv[0] = dot.X - dot.X = nil copy(withRecv[1:], args) args = withRecv + + dot = ir.NewSelectorExpr(dot.Pos(), ir.OXDOT, ir.TypeNode(dot.X.Type()), dot.Selection.Sym) + fn := typecheck.Expr(dot).(*ir.MethodExpr).FuncName() + fn.Type().Size() + + n.SetOp(ir.OCALLFUNC) + n.X = fn } + params := n.X.Type().Params() + // For any argument whose evaluation might require a function call, // store that argument into a temporary variable, // to prevent that calls from clobbering arguments already on the stack. @@ -559,16 +568,7 @@ func walkCall1(n *ir.CallExpr, init *ir.Nodes) { for i, arg := range args { updateHasCall(arg) // Determine param type. - var t *types.Type - if n.Op() == ir.OCALLMETH { - if i == 0 { - t = n.X.Type().Recv().Type - } else { - t = params.Field(i - 1).Type - } - } else { - t = params.Field(i).Type - } + t := params.Field(i).Type if base.Flag.Cfg.Instrumenting || fncall(arg, t) { // make assignment of fncall to tempAt tmp := typecheck.Temp(t) From a4f335f42033bc1ef9b948a9bff6f14aa6eb1aa8 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 26 Dec 2020 17:51:16 -0800 Subject: [PATCH 261/474] [dev.regabi] cmd/compile: always use a Field for ODOTPTR expressions During walk, we create ODOTPTR expressions to access runtime struct fields. But rather than using an actual Field for the selection, we were just directly setting the ODOTPTR's Offset field. This CL changes walk to create proper struct fields (albeit without the rest of their enclosing struct type) and use them for creating the ODOTPTR expressions. Passes toolstash -cmp. Change-Id: I08dbac3ed29141587feb0905d15adbcbcc4ca49e Reviewed-on: https://go-review.googlesource.com/c/go/+/280432 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/walk/switch.go | 36 ++++++++++++++++++------- src/cmd/compile/internal/walk/walk.go | 31 ++++++++++++++++----- 2 files changed, 52 insertions(+), 15 deletions(-) diff --git a/src/cmd/compile/internal/walk/switch.go b/src/cmd/compile/internal/walk/switch.go index 7829d93373956..141d2e5e053ff 100644 --- a/src/cmd/compile/internal/walk/switch.go +++ b/src/cmd/compile/internal/walk/switch.go @@ -318,15 +318,7 @@ func walkSwitchType(sw *ir.SwitchStmt) { sw.Compiled.Append(ifNil) // Load hash from type or itab. - dotHash := ir.NewSelectorExpr(base.Pos, ir.ODOTPTR, itab, nil) - dotHash.SetType(types.Types[types.TUINT32]) - dotHash.SetTypecheck(1) - if s.facename.Type().IsEmptyInterface() { - dotHash.Offset = int64(2 * types.PtrSize) // offset of hash in runtime._type - } else { - dotHash.Offset = int64(2 * types.PtrSize) // offset of hash in runtime.itab - } - dotHash.SetBounded(true) // guaranteed not to fault + dotHash := typeHashFieldOf(base.Pos, itab) s.hashname = copyExpr(dotHash, dotHash.Type(), &sw.Compiled) br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil) @@ -409,6 +401,32 @@ func walkSwitchType(sw *ir.SwitchStmt) { walkStmtList(sw.Compiled) } +// typeHashFieldOf returns an expression to select the type hash field +// from an interface's descriptor word (whether a *runtime._type or +// *runtime.itab pointer). +func typeHashFieldOf(pos src.XPos, itab *ir.UnaryExpr) *ir.SelectorExpr { + if itab.Op() != ir.OITAB { + base.Fatalf("expected OITAB, got %v", itab.Op()) + } + var hashField *types.Field + if itab.X.Type().IsEmptyInterface() { + // runtime._type's hash field + if rtypeHashField == nil { + rtypeHashField = runtimeField("hash", int64(2*types.PtrSize), types.Types[types.TUINT32]) + } + hashField = rtypeHashField + } else { + // runtime.itab's hash field + if itabHashField == nil { + itabHashField = runtimeField("hash", int64(2*types.PtrSize), types.Types[types.TUINT32]) + } + hashField = itabHashField + } + return boundedDotPtr(pos, itab, hashField) +} + +var rtypeHashField, itabHashField *types.Field + // A typeSwitch walks a type switch. type typeSwitch struct { // Temporary variables (i.e., ONAMEs) used by type switch dispatch logic: diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go index 9dda367b4d1eb..6def35ef24c1c 100644 --- a/src/cmd/compile/internal/walk/walk.go +++ b/src/cmd/compile/internal/walk/walk.go @@ -539,12 +539,31 @@ func calcHasCall(n ir.Node) bool { // itabType loads the _type field from a runtime.itab struct. func itabType(itab ir.Node) ir.Node { - typ := ir.NewSelectorExpr(base.Pos, ir.ODOTPTR, itab, nil) - typ.SetType(types.NewPtr(types.Types[types.TUINT8])) - typ.SetTypecheck(1) - typ.Offset = int64(types.PtrSize) // offset of _type in runtime.itab - typ.SetBounded(true) // guaranteed not to fault - return typ + if itabTypeField == nil { + // runtime.itab's _type field + itabTypeField = runtimeField("_type", int64(types.PtrSize), types.NewPtr(types.Types[types.TUINT8])) + } + return boundedDotPtr(base.Pos, itab, itabTypeField) +} + +var itabTypeField *types.Field + +// boundedDotPtr returns a selector expression representing ptr.field +// and omits nil-pointer checks for ptr. +func boundedDotPtr(pos src.XPos, ptr ir.Node, field *types.Field) *ir.SelectorExpr { + sel := ir.NewSelectorExpr(pos, ir.ODOTPTR, ptr, field.Sym) + sel.Selection = field + sel.Offset = field.Offset + sel.SetType(field.Type) + sel.SetTypecheck(1) + sel.SetBounded(true) // guaranteed not to fault + return sel +} + +func runtimeField(name string, offset int64, typ *types.Type) *types.Field { + f := types.NewField(src.NoXPos, ir.Pkgs.Runtime.Lookup(name), typ) + f.Offset = offset + return f } // ifaceData loads the data field from an interface. From 0de8eafd98e7431a46c60dd8ea4d3f3a47691049 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 26 Dec 2020 18:02:33 -0800 Subject: [PATCH 262/474] [dev.regabi] cmd/compile: remove SelectorExpr.Offset field Now that the previous CL ensures we always set SelectorExpr.Selection, we can replace the SelectorExpr.Offset field with a helper method that simply returns SelectorExpr.Selection.Offset. Passes toolstash -cmp. Change-Id: Id0f22b8b1980397b668f6860d27cb197b90ff52a Reviewed-on: https://go-review.googlesource.com/c/go/+/280433 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/ir/expr.go | 3 +-- .../compile/internal/reflectdata/reflect.go | 2 +- src/cmd/compile/internal/ssagen/ssa.go | 24 ++++++++----------- src/cmd/compile/internal/staticinit/sched.go | 2 +- src/cmd/compile/internal/typecheck/const.go | 2 +- .../compile/internal/typecheck/typecheck.go | 6 ++--- src/cmd/compile/internal/walk/expr.go | 13 ++-------- src/cmd/compile/internal/walk/walk.go | 1 - 8 files changed, 18 insertions(+), 35 deletions(-) diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index a79b78fb45b55..1337d356a17e7 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -572,14 +572,12 @@ type SelectorExpr struct { miniExpr X Node Sel *types.Sym - Offset int64 Selection *types.Field } func NewSelectorExpr(pos src.XPos, op Op, x Node, sel *types.Sym) *SelectorExpr { n := &SelectorExpr{X: x, Sel: sel} n.pos = pos - n.Offset = types.BADWIDTH n.SetOp(op) return n } @@ -596,6 +594,7 @@ func (n *SelectorExpr) SetOp(op Op) { func (n *SelectorExpr) Sym() *types.Sym { return n.Sel } func (n *SelectorExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 } func (n *SelectorExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) } +func (n *SelectorExpr) Offset() int64 { return n.Selection.Offset } // Before type-checking, bytes.Buffer is a SelectorExpr. // After type-checking it becomes a Name. diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go index 64cc3e87ca180..7c424218962b6 100644 --- a/src/cmd/compile/internal/reflectdata/reflect.go +++ b/src/cmd/compile/internal/reflectdata/reflect.go @@ -1863,7 +1863,7 @@ func MarkUsedIfaceMethod(n *ir.CallExpr) { r.Sym = tsym // dot.Xoffset is the method index * Widthptr (the offset of code pointer // in itab). - midx := dot.Offset / int64(types.PtrSize) + midx := dot.Offset() / int64(types.PtrSize) r.Add = InterfaceMethodOffset(ityp, midx) r.Type = objabi.R_USEIFACEMETHOD } diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 25efeee112e71..9cdf902bcb957 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -2743,7 +2743,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { case ir.ODOTPTR: n := n.(*ir.SelectorExpr) p := s.exprPtr(n.X, n.Bounded(), n.Pos()) - p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type()), n.Offset, p) + p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type()), n.Offset(), p) return s.load(n.Type(), p) case ir.OINDEX: @@ -4924,7 +4924,7 @@ func (s *state) getClosureAndRcvr(fn *ir.SelectorExpr) (*ssa.Value, *ssa.Value) i := s.expr(fn.X) itab := s.newValue1(ssa.OpITab, types.Types[types.TUINTPTR], i) s.nilCheck(itab) - itabidx := fn.Offset + 2*int64(types.PtrSize) + 8 // offset of fun field in runtime.itab + itabidx := fn.Offset() + 2*int64(types.PtrSize) + 8 // offset of fun field in runtime.itab closure := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab) rcvr := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, i) return closure, rcvr @@ -5028,11 +5028,11 @@ func (s *state) addr(n ir.Node) *ssa.Value { case ir.ODOT: n := n.(*ir.SelectorExpr) p := s.addr(n.X) - return s.newValue1I(ssa.OpOffPtr, t, n.Offset, p) + return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p) case ir.ODOTPTR: n := n.(*ir.SelectorExpr) p := s.exprPtr(n.X, n.Bounded(), n.Pos()) - return s.newValue1I(ssa.OpOffPtr, t, n.Offset, p) + return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p) case ir.OCLOSUREREAD: n := n.(*ir.ClosureReadExpr) return s.newValue1I(ssa.OpOffPtr, t, n.Offset, @@ -7069,21 +7069,17 @@ func (s *State) UseArgs(n int64) { // fieldIdx finds the index of the field referred to by the ODOT node n. func fieldIdx(n *ir.SelectorExpr) int { t := n.X.Type() - f := n.Sel if !t.IsStruct() { panic("ODOT's LHS is not a struct") } - var i int - for _, t1 := range t.Fields().Slice() { - if t1.Sym != f { - i++ - continue - } - if t1.Offset != n.Offset { - panic("field offset doesn't match") + for i, f := range t.Fields().Slice() { + if f.Sym == n.Sel { + if f.Offset != n.Offset() { + panic("field offset doesn't match") + } + return i } - return i } panic(fmt.Sprintf("can't find field in expr %v\n", n)) diff --git a/src/cmd/compile/internal/staticinit/sched.go b/src/cmd/compile/internal/staticinit/sched.go index 2a499d6eedbaa..2711f6cec0a9e 100644 --- a/src/cmd/compile/internal/staticinit/sched.go +++ b/src/cmd/compile/internal/staticinit/sched.go @@ -469,7 +469,7 @@ func StaticLoc(n ir.Node) (name *ir.Name, offset int64, ok bool) { if name, offset, ok = StaticLoc(n.X); !ok { break } - offset += n.Offset + offset += n.Offset() return name, offset, true case ir.OINDEX: diff --git a/src/cmd/compile/internal/typecheck/const.go b/src/cmd/compile/internal/typecheck/const.go index 54d70cb8350ca..e22b284e829d2 100644 --- a/src/cmd/compile/internal/typecheck/const.go +++ b/src/cmd/compile/internal/typecheck/const.go @@ -929,7 +929,7 @@ func evalunsafe(n ir.Node) int64 { fallthrough case ir.ODOT: r := r.(*ir.SelectorExpr) - v += r.Offset + v += r.Offset() next = r.X default: ir.Dump("unsafenmagic", tsel) diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index 05a346b8c889d..1d070507fadad 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -1232,7 +1232,7 @@ func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field { if f1.Offset == types.BADWIDTH { base.Fatalf("lookdot badwidth %v %p", f1, f1) } - n.Offset = f1.Offset + n.Selection = f1 n.SetType(f1.Type) if t.IsInterface() { if n.X.Type().IsPtr() { @@ -1243,7 +1243,6 @@ func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field { n.SetOp(ir.ODOTINTER) } - n.Selection = f1 return f1 } @@ -1299,10 +1298,9 @@ func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field { } n.Sel = ir.MethodSym(n.X.Type(), f2.Sym) - n.Offset = f2.Offset + n.Selection = f2 n.SetType(f2.Type) n.SetOp(ir.ODOTMETH) - n.Selection = f2 return f2 } diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index 4eee32cf442f8..f0d9e7c2a1d9a 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -965,22 +965,13 @@ func usefield(n *ir.SelectorExpr) { case ir.ODOT, ir.ODOTPTR: break } - if n.Sel == nil { - // No field name. This DOTPTR was built by the compiler for access - // to runtime data structures. Ignore. - return - } - t := n.X.Type() - if t.IsPtr() { - t = t.Elem() - } field := n.Selection if field == nil { base.Fatalf("usefield %v %v without paramfld", n.X.Type(), n.Sel) } - if field.Sym != n.Sel || field.Offset != n.Offset { - base.Fatalf("field inconsistency: %v,%v != %v,%v", field.Sym, field.Offset, n.Sel, n.Offset) + if field.Sym != n.Sel { + base.Fatalf("field inconsistency: %v != %v", field.Sym, n.Sel) } if !strings.Contains(field.Note, "go:\"track\"") { return diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go index 6def35ef24c1c..c4c3debde4819 100644 --- a/src/cmd/compile/internal/walk/walk.go +++ b/src/cmd/compile/internal/walk/walk.go @@ -553,7 +553,6 @@ var itabTypeField *types.Field func boundedDotPtr(pos src.XPos, ptr ir.Node, field *types.Field) *ir.SelectorExpr { sel := ir.NewSelectorExpr(pos, ir.ODOTPTR, ptr, field.Sym) sel.Selection = field - sel.Offset = field.Offset sel.SetType(field.Type) sel.SetTypecheck(1) sel.SetBounded(true) // guaranteed not to fault From 0f732f8c91aa4550ce1803906a55de51760e3243 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 26 Dec 2020 19:30:12 -0800 Subject: [PATCH 263/474] [dev.regabi] cmd/compile: minor walkExpr cleanups This CL cleans up a few minor points in walkExpr: 1. We don't actually care about computing the type-size of all expressions that are walked. We care about computing the type-size of all expressions that are *returned* by walk, as these are the expressions that will actually be seen by the back end. 2. There's no need to call typecheck.EvalConst anymore. EvalConst used to be responsible for doing additional constant folding during walk; but for a while a now, it has done only as much constant folding as is required during type checking (because doing further constant folding led to too many issues with Go spec compliance). Instead, more aggressive constant folding is handled entirely by SSA. 3. The code for detecting string constants and generating their symbols can be simplified somewhat. Passes toolstash -cmp. Change-Id: I464ef5bceb8a97689c8f55435369a3402a5ebc55 Reviewed-on: https://go-review.googlesource.com/c/go/+/280434 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/walk/expr.go | 30 ++++++--------------------- 1 file changed, 6 insertions(+), 24 deletions(-) diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index f0d9e7c2a1d9a..53bffee1817b4 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -26,15 +26,6 @@ func walkExpr(n ir.Node, init *ir.Nodes) ir.Node { return n } - // Eagerly checkwidth all expressions for the back end. - if n.Type() != nil && !n.Type().WidthCalculated() { - switch n.Type().Kind() { - case types.TBLANK, types.TNIL, types.TIDEAL: - default: - types.CheckSize(n.Type()) - } - } - if init == n.PtrInit() { // not okay to use n->ninit when walking n, // because we might replace n with some other node @@ -70,23 +61,14 @@ func walkExpr(n ir.Node, init *ir.Nodes) ir.Node { n = walkExpr1(n, init) - // Expressions that are constant at run time but not - // considered const by the language spec are not turned into - // constants until walk. For example, if n is y%1 == 0, the - // walk of y%1 may have replaced it by 0. - // Check whether n with its updated args is itself now a constant. - t := n.Type() - n = typecheck.EvalConst(n) - if n.Type() != t { - base.Fatalf("evconst changed Type: %v had type %v, now %v", n, t, n.Type()) - } - if n.Op() == ir.OLITERAL { - n = typecheck.Expr(n) + // Eagerly compute sizes of all expressions for the back end. + if typ := n.Type(); typ != nil && typ.Kind() != types.TBLANK && !typ.IsFuncArgStruct() { + types.CheckSize(typ) + } + if ir.IsConst(n, constant.String) { // Emit string symbol now to avoid emitting // any concurrently during the backend. - if v := n.Val(); v.Kind() == constant.String { - _ = staticdata.StringSym(n.Pos(), constant.StringVal(v)) - } + _ = staticdata.StringSym(n.Pos(), constant.StringVal(n.Val())) } updateHasCall(n) From 135ce1c485d0563d285f47a748a6d56594571a91 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 26 Dec 2020 18:33:27 -0800 Subject: [PATCH 264/474] [dev.regabi] cmd/compile: desugar OMETHEXPR into ONAME during walk A subsequent CL will change FuncName to lazily create the ONAME nodes, which isn't currently safe to do during SSA construction, because that phase is concurrent. Passes toolstash -cmp. Change-Id: Ic24acc1d1160ad93b70ced3baa468f750e689ea6 Reviewed-on: https://go-review.googlesource.com/c/go/+/280435 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/ssagen/ssa.go | 4 ---- src/cmd/compile/internal/walk/expr.go | 26 ++++++++++++++------------ 2 files changed, 14 insertions(+), 16 deletions(-) diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 9cdf902bcb957..082cb7c3210ae 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -2108,10 +2108,6 @@ func (s *state) expr(n ir.Node) *ssa.Value { n := n.(*ir.UnaryExpr) aux := n.X.Sym().Linksym() return s.entryNewValue1A(ssa.OpAddr, n.Type(), aux, s.sb) - case ir.OMETHEXPR: - n := n.(*ir.MethodExpr) - sym := staticdata.FuncSym(n.FuncName().Sym()).Linksym() - return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb) case ir.ONAME: n := n.(*ir.Name) if n.Class_ == ir.PFUNC { diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index 53bffee1817b4..fd0dd5b06248a 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -88,7 +88,7 @@ func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node { base.Fatalf("walkexpr: switch 1 unknown op %+v", n.Op()) panic("unreachable") - case ir.ONONAME, ir.OGETG, ir.ONEWOBJ, ir.OMETHEXPR: + case ir.ONONAME, ir.OGETG, ir.ONEWOBJ: return n case ir.OTYPE, ir.ONAME, ir.OLITERAL, ir.ONIL, ir.ONAMEOFFSET: @@ -98,6 +98,11 @@ func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node { // stringsym for constant strings. return n + case ir.OMETHEXPR: + // TODO(mdempsky): Do this right after type checking. + n := n.(*ir.MethodExpr) + return n.FuncName() + case ir.ONOT, ir.ONEG, ir.OPLUS, ir.OBITNOT, ir.OREAL, ir.OIMAG, ir.OSPTR, ir.OITAB, ir.OIDATA: n := n.(*ir.UnaryExpr) n.X = walkExpr(n.X, init) @@ -517,31 +522,28 @@ func walkCall1(n *ir.CallExpr, init *ir.Nodes) { return // already walked } - args := n.Args - - n.X = walkExpr(n.X, init) - walkExprList(args, init) - // If this is a method call t.M(...), // rewrite into a function call T.M(t, ...). // TODO(mdempsky): Do this right after type checking. if n.Op() == ir.OCALLMETH { - withRecv := make([]ir.Node, len(args)+1) + withRecv := make([]ir.Node, len(n.Args)+1) dot := n.X.(*ir.SelectorExpr) withRecv[0] = dot.X - copy(withRecv[1:], args) - args = withRecv + copy(withRecv[1:], n.Args) + n.Args = withRecv dot = ir.NewSelectorExpr(dot.Pos(), ir.OXDOT, ir.TypeNode(dot.X.Type()), dot.Selection.Sym) - fn := typecheck.Expr(dot).(*ir.MethodExpr).FuncName() - fn.Type().Size() n.SetOp(ir.OCALLFUNC) - n.X = fn + n.X = typecheck.Expr(dot) } + args := n.Args params := n.X.Type().Params() + n.X = walkExpr(n.X, init) + walkExprList(args, init) + // For any argument whose evaluation might require a function call, // store that argument into a temporary variable, // to prevent that calls from clobbering arguments already on the stack. From e6c973198d9f8e68e4dce8637e2d1492032ce939 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 26 Dec 2020 18:56:36 -0800 Subject: [PATCH 265/474] [dev.regabi] cmd/compile: stop mangling SelectorExpr.Sel for ODOTMETH ODOTMETH is unique among SelectorExpr expressions, in that Sel gets mangled so that it no longer has the original identifier that was selected (e.g., just "Foo"), but instead the qualified symbol name for the selected method (e.g., "pkg.Type.Foo"). This is rarely useful, and instead results in a lot of compiler code needing to worry about undoing this change. This CL changes ODOTMETH to leave the original symbol in place. The handful of code locations where the mangled symbol name is actually wanted are updated to use ir.MethodExprName(n).Sym() or (equivalently) ir.MethodExprName(n).Func.Sym() instead. Historically, the compiler backend has mistakenly used types.Syms where it should have used ir.Name/ir.Funcs. And this change in particular may risk breaking something, as the SelectorExpr.Sel will no longer point at a symbol that uniquely identifies the called method. However, I expect CL 280294 (desugar OCALLMETH into OCALLFUNC) to have substantially reduced this risk, as ODOTMETH expressions are now replaced entirely earlier in the compiler. Passes toolstash -cmp. Change-Id: If3c9c3b7df78ea969f135840574cf89e1d263876 Reviewed-on: https://go-review.googlesource.com/c/go/+/280436 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/inline/inl.go | 24 +++++++-------- src/cmd/compile/internal/ir/fmt.go | 4 +-- src/cmd/compile/internal/typecheck/expr.go | 8 ++--- src/cmd/compile/internal/typecheck/func.go | 29 +++++-------------- src/cmd/compile/internal/typecheck/iexport.go | 22 +++++--------- .../compile/internal/typecheck/typecheck.go | 1 - src/cmd/compile/internal/types/fmt.go | 14 +++------ test/fixedbugs/issue31053.dir/main.go | 6 ++-- 8 files changed, 38 insertions(+), 70 deletions(-) diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index 9ffb08048a6f6..67162771e9350 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -324,19 +324,17 @@ func (v *hairyVisitor) doNode(n ir.Node) error { if t == nil { base.Fatalf("no function type for [%p] %+v\n", n.X, n.X) } - if types.IsRuntimePkg(n.X.Sym().Pkg) { - fn := n.X.Sym().Name - if fn == "heapBits.nextArena" { - // Special case: explicitly allow - // mid-stack inlining of - // runtime.heapBits.next even though - // it calls slow-path - // runtime.heapBits.nextArena. - break - } + fn := ir.MethodExprName(n.X).Func + if types.IsRuntimePkg(fn.Sym().Pkg) && fn.Sym().Name == "heapBits.nextArena" { + // Special case: explicitly allow + // mid-stack inlining of + // runtime.heapBits.next even though + // it calls slow-path + // runtime.heapBits.nextArena. + break } - if inlfn := ir.MethodExprName(n.X).Func; inlfn.Inl != nil { - v.budget -= inlfn.Inl.Cost + if fn.Inl != nil { + v.budget -= fn.Inl.Cost break } // Call cost for non-leaf inlining. @@ -531,7 +529,7 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No // Prevent inlining some reflect.Value methods when using checkptr, // even when package reflect was compiled without it (#35073). n := n.(*ir.CallExpr) - if s := n.X.Sym(); base.Debug.Checkptr != 0 && types.IsReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") { + if s := ir.MethodExprName(n.X).Sym(); base.Debug.Checkptr != 0 && types.IsReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") { return n } } diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index 2b73c5ac1bb4a..f52c639c517fa 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -756,7 +756,7 @@ func exprFmt(n Node, s fmt.State, prec int) { fmt.Fprint(s, ".") return } - fmt.Fprintf(s, ".%s", types.SymMethodName(n.Method.Sym)) + fmt.Fprintf(s, ".%s", n.Method.Sym.Name) case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH: n := n.(*SelectorExpr) @@ -765,7 +765,7 @@ func exprFmt(n Node, s fmt.State, prec int) { fmt.Fprint(s, ".") return } - fmt.Fprintf(s, ".%s", types.SymMethodName(n.Sel)) + fmt.Fprintf(s, ".%s", n.Sel.Name) case ODOTTYPE, ODOTTYPE2: n := n.(*TypeAssertExpr) diff --git a/src/cmd/compile/internal/typecheck/expr.go b/src/cmd/compile/internal/typecheck/expr.go index 879ae385c7b43..3e7a880c2a466 100644 --- a/src/cmd/compile/internal/typecheck/expr.go +++ b/src/cmd/compile/internal/typecheck/expr.go @@ -571,7 +571,6 @@ func tcDot(n *ir.SelectorExpr, top int) ir.Node { } n.X = typecheck(n.X, ctxExpr|ctxType) - n.X = DefaultLit(n.X, nil) t := n.X.Type() @@ -581,8 +580,6 @@ func tcDot(n *ir.SelectorExpr, top int) ir.Node { return n } - s := n.Sel - if n.X.Op() == ir.OTYPE { return typecheckMethodExpr(n) } @@ -629,7 +626,10 @@ func tcDot(n *ir.SelectorExpr, top int) ir.Node { } if (n.Op() == ir.ODOTINTER || n.Op() == ir.ODOTMETH) && top&ctxCallee == 0 { - return tcCallPart(n, s) + // Create top-level function. + fn := makepartialcall(n) + + return ir.NewCallPartExpr(n.Pos(), n.X, n.Selection, fn) } return n } diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go index fdac719ad9bbc..50f514a6db0a6 100644 --- a/src/cmd/compile/internal/typecheck/func.go +++ b/src/cmd/compile/internal/typecheck/func.go @@ -249,7 +249,9 @@ var globClosgen int32 // makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed // for partial calls. -func makepartialcall(dot *ir.SelectorExpr, t0 *types.Type, meth *types.Sym) *ir.Func { +func makepartialcall(dot *ir.SelectorExpr) *ir.Func { + t0 := dot.Type() + meth := dot.Sel rcvrtype := dot.X.Type() sym := ir.MethodSymSuffix(rcvrtype, meth, "-fm") @@ -263,11 +265,10 @@ func makepartialcall(dot *ir.SelectorExpr, t0 *types.Type, meth *types.Sym) *ir. ir.CurFunc = nil // Set line number equal to the line number where the method is declared. - var m *types.Field - if lookdot0(meth, rcvrtype, &m, false) == 1 && m.Pos.IsKnown() { - base.Pos = m.Pos + if pos := dot.Selection.Pos; pos.IsKnown() { + base.Pos = pos } - // Note: !m.Pos.IsKnown() happens for method expressions where + // Note: !dot.Selection.Pos.IsKnown() happens for method expressions where // the method is implicitly declared. The Error method of the // built-in error type is one such method. We leave the line // number at the use of the method expression in this @@ -280,6 +281,7 @@ func makepartialcall(dot *ir.SelectorExpr, t0 *types.Type, meth *types.Sym) *ir. fn := DeclFunc(sym, tfn) fn.SetDupok(true) fn.SetNeedctxt(true) + fn.SetWrapper(true) // Declare and initialize variable holding receiver. cr := ir.NewClosureRead(rcvrtype, types.Rnd(int64(types.PtrSize), int64(rcvrtype.Align))) @@ -382,23 +384,6 @@ func tcClosure(clo *ir.ClosureExpr, top int) { Target.Decls = append(Target.Decls, fn) } -func tcCallPart(n ir.Node, sym *types.Sym) *ir.CallPartExpr { - switch n.Op() { - case ir.ODOTINTER, ir.ODOTMETH: - break - - default: - base.Fatalf("invalid typecheckpartialcall") - } - dot := n.(*ir.SelectorExpr) - - // Create top-level function. - fn := makepartialcall(dot, dot.Type(), sym) - fn.SetWrapper(true) - - return ir.NewCallPartExpr(dot.Pos(), dot.X, dot.Selection, fn) -} - // type check function definition // To be called by typecheck, not directly. // (Call typecheckFunc instead.) diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go index 449d99266d365..0c813a71ef41a 100644 --- a/src/cmd/compile/internal/typecheck/iexport.go +++ b/src/cmd/compile/internal/typecheck/iexport.go @@ -594,23 +594,15 @@ func (w *exportWriter) selector(s *types.Sym) { base.Fatalf("missing currPkg") } - // Method selectors are rewritten into method symbols (of the - // form T.M) during typechecking, but we want to write out - // just the bare method name. - name := s.Name - if i := strings.LastIndex(name, "."); i >= 0 { - name = name[i+1:] - } else { - pkg := w.currPkg - if types.IsExported(name) { - pkg = types.LocalPkg - } - if s.Pkg != pkg { - base.Fatalf("package mismatch in selector: %v in package %q, but want %q", s, s.Pkg.Path, pkg.Path) - } + pkg := w.currPkg + if types.IsExported(s.Name) { + pkg = types.LocalPkg + } + if s.Pkg != pkg { + base.Fatalf("package mismatch in selector: %v in package %q, but want %q", s, s.Pkg.Path, pkg.Path) } - w.string(name) + w.string(s.Name) } func (w *exportWriter) typ(t *types.Type) { diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index 1d070507fadad..b779f9ceb0f14 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -1297,7 +1297,6 @@ func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field { return nil } - n.Sel = ir.MethodSym(n.X.Type(), f2.Sym) n.Selection = f2 n.SetType(f2.Type) n.SetOp(ir.ODOTMETH) diff --git a/src/cmd/compile/internal/types/fmt.go b/src/cmd/compile/internal/types/fmt.go index bf37f01922a43..cd0679f6b9efa 100644 --- a/src/cmd/compile/internal/types/fmt.go +++ b/src/cmd/compile/internal/types/fmt.go @@ -180,15 +180,6 @@ func symfmt(b *bytes.Buffer, s *Sym, verb rune, mode fmtMode) { b.WriteString(s.Name) } -func SymMethodName(s *Sym) string { - // Skip leading "type." in method name - name := s.Name - if i := strings.LastIndex(name, "."); i >= 0 { - name = name[i+1:] - } - return name -} - // Type var BasicTypeNames = []string{ @@ -595,7 +586,10 @@ func fldconv(b *bytes.Buffer, f *Field, verb rune, mode fmtMode, visited map[*Ty if funarg != FunargNone { name = fmt.Sprint(f.Nname) } else if verb == 'L' { - name = SymMethodName(s) + name = s.Name + if name == ".F" { + name = "F" // Hack for toolstash -cmp. + } if !IsExported(name) && mode != fmtTypeIDName { name = sconv(s, 0, mode) // qualify non-exported names (used on structs, not on funarg) } diff --git a/test/fixedbugs/issue31053.dir/main.go b/test/fixedbugs/issue31053.dir/main.go index 895c262164ca4..3bc75d17d2d80 100644 --- a/test/fixedbugs/issue31053.dir/main.go +++ b/test/fixedbugs/issue31053.dir/main.go @@ -35,8 +35,8 @@ func main() { _ = f.Exported _ = f.exported // ERROR "f.exported undefined .type f1.Foo has no field or method exported, but does have Exported." _ = f.Unexported // ERROR "f.Unexported undefined .type f1.Foo has no field or method Unexported." - _ = f.unexported // ERROR "f.unexported undefined .cannot refer to unexported field or method f1..\*Foo..unexported." - f.unexported = 10 // ERROR "f.unexported undefined .cannot refer to unexported field or method f1..\*Foo..unexported." - f.unexported() // ERROR "f.unexported undefined .cannot refer to unexported field or method f1..\*Foo..unexported." + _ = f.unexported // ERROR "f.unexported undefined .cannot refer to unexported field or method unexported." + f.unexported = 10 // ERROR "f.unexported undefined .cannot refer to unexported field or method unexported." + f.unexported() // ERROR "f.unexported undefined .cannot refer to unexported field or method unexported." _ = f.hook // ERROR "f.hook undefined .cannot refer to unexported field or method hook." } From 4c215c4fa934990d159c549bcdd85f9be92287cd Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 26 Dec 2020 19:55:57 -0800 Subject: [PATCH 266/474] [dev.regabi] cmd/compile: simplify and optimize reorder3 reorder3 is the code responsible for ensuring that evaluation of an N:N parallel assignment statement respects the order of evaluation rules specified for Go. This CL simplifies the code and improves it from an O(N^2) algorithm to O(N). Passes toolstash -cmp. Change-Id: I04cd31613af6924f637b042be8ad039ec6a924c2 Reviewed-on: https://go-review.googlesource.com/c/go/+/280437 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/walk/assign.go | 237 +++++++++--------------- 1 file changed, 85 insertions(+), 152 deletions(-) diff --git a/src/cmd/compile/internal/walk/assign.go b/src/cmd/compile/internal/walk/assign.go index 99c1abd73f604..3f229dd9f616b 100644 --- a/src/cmd/compile/internal/walk/assign.go +++ b/src/cmd/compile/internal/walk/assign.go @@ -395,10 +395,35 @@ func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node { // // function calls have been removed. func reorder3(all []*ir.AssignStmt) []ir.Node { + var assigned ir.NameSet + var memWrite bool + + // affected reports whether expression n could be affected by + // the assignments applied so far. + affected := func(n ir.Node) bool { + return ir.Any(n, func(n ir.Node) bool { + if n.Op() == ir.ONAME && assigned.Has(n.(*ir.Name)) { + return true + } + if memWrite && readsMemory(n) { + return true + } + return false + }) + } + // If a needed expression may be affected by an // earlier assignment, make an early copy of that // expression and use the copy instead. var early []ir.Node + save := func(np *ir.Node) { + if n := *np; affected(n) { + tmp := ir.Node(typecheck.Temp(n.Type())) + as := typecheck.Stmt(ir.NewAssignStmt(base.Pos, tmp, n)) + early = append(early, as) + *np = tmp + } + } var mapinit ir.Nodes for i, n := range all { @@ -407,19 +432,18 @@ func reorder3(all []*ir.AssignStmt) []ir.Node { // Save subexpressions needed on left side. // Drill through non-dereferences. for { - switch ll := l; ll.Op() { - case ir.ODOT: - ll := ll.(*ir.SelectorExpr) - l = ll.X - continue - case ir.OPAREN: - ll := ll.(*ir.ParenExpr) + switch ll := l.(type) { + case *ir.IndexExpr: + if ll.X.Type().IsArray() { + save(&ll.Index) + l = ll.X + continue + } + case *ir.ParenExpr: l = ll.X continue - case ir.OINDEX: - ll := ll.(*ir.IndexExpr) - if ll.X.Type().IsArray() { - ll.Index = reorder3save(ll.Index, all, i, &early) + case *ir.SelectorExpr: + if ll.Op() == ir.ODOT { l = ll.X continue } @@ -427,181 +451,90 @@ func reorder3(all []*ir.AssignStmt) []ir.Node { break } + var name *ir.Name switch l.Op() { default: base.Fatalf("reorder3 unexpected lvalue %v", l.Op()) case ir.ONAME: - break + name = l.(*ir.Name) case ir.OINDEX, ir.OINDEXMAP: l := l.(*ir.IndexExpr) - l.X = reorder3save(l.X, all, i, &early) - l.Index = reorder3save(l.Index, all, i, &early) + save(&l.X) + save(&l.Index) if l.Op() == ir.OINDEXMAP { all[i] = convas(all[i], &mapinit) } case ir.ODEREF: l := l.(*ir.StarExpr) - l.X = reorder3save(l.X, all, i, &early) + save(&l.X) case ir.ODOTPTR: l := l.(*ir.SelectorExpr) - l.X = reorder3save(l.X, all, i, &early) + save(&l.X) } // Save expression on right side. - all[i].Y = reorder3save(all[i].Y, all, i, &early) - } - - early = append(mapinit, early...) - for _, as := range all { - early = append(early, as) - } - return early -} - -// if the evaluation of *np would be affected by the -// assignments in all up to but not including the ith assignment, -// copy into a temporary during *early and -// replace *np with that temp. -// The result of reorder3save MUST be assigned back to n, e.g. -// n.Left = reorder3save(n.Left, all, i, early) -func reorder3save(n ir.Node, all []*ir.AssignStmt, i int, early *[]ir.Node) ir.Node { - if !aliased(n, all[:i]) { - return n - } - - q := ir.Node(typecheck.Temp(n.Type())) - as := typecheck.Stmt(ir.NewAssignStmt(base.Pos, q, n)) - *early = append(*early, as) - return q -} - -// Is it possible that the computation of r might be -// affected by assignments in all? -func aliased(r ir.Node, all []*ir.AssignStmt) bool { - if r == nil { - return false - } - - // Treat all fields of a struct as referring to the whole struct. - // We could do better but we would have to keep track of the fields. - for r.Op() == ir.ODOT { - r = r.(*ir.SelectorExpr).X - } - - // Look for obvious aliasing: a variable being assigned - // during the all list and appearing in n. - // Also record whether there are any writes to addressable - // memory (either main memory or variables whose addresses - // have been taken). - memwrite := false - for _, as := range all { - // We can ignore assignments to blank. - if ir.IsBlank(as.X) { - continue - } + save(&all[i].Y) - lv := ir.OuterValue(as.X) - if lv.Op() != ir.ONAME { - memwrite = true + if name == nil || name.Addrtaken() || name.Class_ == ir.PEXTERN || name.Class_ == ir.PAUTOHEAP { + memWrite = true continue } - l := lv.(*ir.Name) - - switch l.Class_ { - default: - base.Fatalf("unexpected class: %v, %v", l, l.Class_) - - case ir.PAUTOHEAP, ir.PEXTERN: - memwrite = true + if ir.IsBlank(name) { + // We can ignore assignments to blank. continue - - case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT: - if l.Name().Addrtaken() { - memwrite = true - continue - } - - if refersToName(l, r) { - // Direct hit: l appears in r. - return true - } } + assigned.Add(name) } - // The variables being written do not appear in r. - // However, r might refer to computed addresses - // that are being written. - - // If no computed addresses are affected by the writes, no aliasing. - if !memwrite { - return false + early = append(mapinit, early...) + for _, as := range all { + early = append(early, as) } + return early +} - // If r does not refer to any variables whose addresses have been taken, - // then the only possible writes to r would be directly to the variables, - // and we checked those above, so no aliasing problems. - if !anyAddrTaken(r) { +// readsMemory reports whether the evaluation n directly reads from +// memory that might be written to indirectly. +func readsMemory(n ir.Node) bool { + switch n.Op() { + case ir.ONAME: + n := n.(*ir.Name) + return n.Class_ == ir.PEXTERN || n.Class_ == ir.PAUTOHEAP || n.Addrtaken() + + case ir.OADD, + ir.OAND, + ir.OANDAND, + ir.OANDNOT, + ir.OBITNOT, + ir.OCONV, + ir.OCONVIFACE, + ir.OCONVNOP, + ir.ODIV, + ir.ODOT, + ir.ODOTTYPE, + ir.OLITERAL, + ir.OLSH, + ir.OMOD, + ir.OMUL, + ir.ONEG, + ir.ONIL, + ir.OOR, + ir.OOROR, + ir.OPAREN, + ir.OPLUS, + ir.ORSH, + ir.OSUB, + ir.OXOR: return false } - // Otherwise, both the writes and r refer to computed memory addresses. - // Assume that they might conflict. + // Be conservative. return true } -// anyAddrTaken reports whether the evaluation n, -// which appears on the left side of an assignment, -// may refer to variables whose addresses have been taken. -func anyAddrTaken(n ir.Node) bool { - return ir.Any(n, func(n ir.Node) bool { - switch n.Op() { - case ir.ONAME: - n := n.(*ir.Name) - return n.Class_ == ir.PEXTERN || n.Class_ == ir.PAUTOHEAP || n.Name().Addrtaken() - - case ir.ODOT: // but not ODOTPTR - should have been handled in aliased. - base.Fatalf("anyAddrTaken unexpected ODOT") - - case ir.OADD, - ir.OAND, - ir.OANDAND, - ir.OANDNOT, - ir.OBITNOT, - ir.OCONV, - ir.OCONVIFACE, - ir.OCONVNOP, - ir.ODIV, - ir.ODOTTYPE, - ir.OLITERAL, - ir.OLSH, - ir.OMOD, - ir.OMUL, - ir.ONEG, - ir.ONIL, - ir.OOR, - ir.OOROR, - ir.OPAREN, - ir.OPLUS, - ir.ORSH, - ir.OSUB, - ir.OXOR: - return false - } - // Be conservative. - return true - }) -} - -// refersToName reports whether r refers to name. -func refersToName(name *ir.Name, r ir.Node) bool { - return ir.Any(r, func(r ir.Node) bool { - return r.Op() == ir.ONAME && r == name - }) -} - // refersToCommonName reports whether any name // appears in common between l and r. // This is called from sinit.go. From c98548e1109e9fbe29ef2a8c7c275b241aaacd3b Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 26 Dec 2020 21:10:16 -0800 Subject: [PATCH 267/474] [dev.regabi] cmd/compile: merge ascompatee, ascompatee1, and reorder3 These functions are interelated and have arbitrarily overlapping responsibilities. By joining them together, we simplify the code and remove some redundancy. Passes toolstash -cmp. Change-Id: I7c42cb7171b3006bc790199be3fd0991e6e985f2 Reviewed-on: https://go-review.googlesource.com/c/go/+/280438 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/walk/assign.go | 104 ++++++++---------------- 1 file changed, 32 insertions(+), 72 deletions(-) diff --git a/src/cmd/compile/internal/walk/assign.go b/src/cmd/compile/internal/walk/assign.go index 3f229dd9f616b..99541c58d9d06 100644 --- a/src/cmd/compile/internal/walk/assign.go +++ b/src/cmd/compile/internal/walk/assign.go @@ -297,54 +297,6 @@ func fncall(l ir.Node, rt *types.Type) bool { return true } -func ascompatee(op ir.Op, nl, nr []ir.Node, init *ir.Nodes) []ir.Node { - // check assign expression list to - // an expression list. called in - // expr-list = expr-list - - // ensure order of evaluation for function calls - for i := range nl { - nl[i] = safeExpr(nl[i], init) - } - for i1 := range nr { - nr[i1] = safeExpr(nr[i1], init) - } - - var nn []*ir.AssignStmt - i := 0 - for ; i < len(nl); i++ { - if i >= len(nr) { - break - } - // Do not generate 'x = x' during return. See issue 4014. - if op == ir.ORETURN && ir.SameSafeExpr(nl[i], nr[i]) { - continue - } - nn = append(nn, ascompatee1(nl[i], nr[i], init)) - } - - // cannot happen: caller checked that lists had same length - if i < len(nl) || i < len(nr) { - var nln, nrn ir.Nodes - nln.Set(nl) - nrn.Set(nr) - base.Fatalf("error in shape across %+v %v %+v / %d %d [%s]", nln, op, nrn, len(nl), len(nr), ir.FuncName(ir.CurFunc)) - } - return reorder3(nn) -} - -func ascompatee1(l ir.Node, r ir.Node, init *ir.Nodes) *ir.AssignStmt { - // convas will turn map assigns into function calls, - // making it impossible for reorder3 to work. - n := ir.NewAssignStmt(base.Pos, l, r) - - if l.Op() == ir.OINDEXMAP { - return n - } - - return convas(n, init) -} - // check assign type list to // an expression list. called in // expr-list = func() @@ -387,14 +339,23 @@ func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node { return append(nn, mm...) } -// reorder3 -// from ascompatee -// a,b = c,d -// simultaneous assignment. there cannot -// be later use of an earlier lvalue. -// -// function calls have been removed. -func reorder3(all []*ir.AssignStmt) []ir.Node { +// check assign expression list to +// an expression list. called in +// expr-list = expr-list +func ascompatee(op ir.Op, nl, nr []ir.Node, init *ir.Nodes) []ir.Node { + // cannot happen: should have been rejected during type checking + if len(nl) != len(nr) { + base.Fatalf("assignment operands mismatch: %+v / %+v", ir.Nodes(nl), ir.Nodes(nr)) + } + + // ensure order of evaluation for function calls + for i := range nl { + nl[i] = safeExpr(nl[i], init) + } + for i := range nr { + nr[i] = safeExpr(nr[i], init) + } + var assigned ir.NameSet var memWrite bool @@ -425,9 +386,16 @@ func reorder3(all []*ir.AssignStmt) []ir.Node { } } - var mapinit ir.Nodes - for i, n := range all { - l := n.X + var late []ir.Node + for i, l := range nl { + r := nr[i] + + // Do not generate 'x = x' during return. See issue 4014. + if op == ir.ORETURN && ir.SameSafeExpr(l, r) { + continue + } + + as := ir.NewAssignStmt(base.Pos, l, r) // Save subexpressions needed on left side. // Drill through non-dereferences. @@ -454,19 +422,13 @@ func reorder3(all []*ir.AssignStmt) []ir.Node { var name *ir.Name switch l.Op() { default: - base.Fatalf("reorder3 unexpected lvalue %v", l.Op()) - + base.Fatalf("unexpected lvalue %v", l.Op()) case ir.ONAME: name = l.(*ir.Name) - case ir.OINDEX, ir.OINDEXMAP: l := l.(*ir.IndexExpr) save(&l.X) save(&l.Index) - if l.Op() == ir.OINDEXMAP { - all[i] = convas(all[i], &mapinit) - } - case ir.ODEREF: l := l.(*ir.StarExpr) save(&l.X) @@ -476,7 +438,9 @@ func reorder3(all []*ir.AssignStmt) []ir.Node { } // Save expression on right side. - save(&all[i].Y) + save(&as.Y) + + late = append(late, convas(as, init)) if name == nil || name.Addrtaken() || name.Class_ == ir.PEXTERN || name.Class_ == ir.PAUTOHEAP { memWrite = true @@ -489,11 +453,7 @@ func reorder3(all []*ir.AssignStmt) []ir.Node { assigned.Add(name) } - early = append(mapinit, early...) - for _, as := range all { - early = append(early, as) - } - return early + return append(early, late...) } // readsMemory reports whether the evaluation n directly reads from From 676d794b8119a40aaa0aa00124f367bd72eeff9c Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 26 Dec 2020 21:33:17 -0800 Subject: [PATCH 268/474] [dev.regabi] cmd/compile: remove refersToCommonName After reorder3's simplification, the only remaining use of refersToCommonName is in oaslit, where the LHS expression is always a single name. We can replace the now overly-generalized refersToCommonName with a simple ir.Any traversal with ir.Uses. Passes toolstash -cmp. Change-Id: Ice3020cdbbf6083d52e07866a687580f4eb134b8 Reviewed-on: https://go-review.googlesource.com/c/go/+/280439 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/walk/assign.go | 49 ------------------------ src/cmd/compile/internal/walk/complit.go | 3 +- 2 files changed, 2 insertions(+), 50 deletions(-) diff --git a/src/cmd/compile/internal/walk/assign.go b/src/cmd/compile/internal/walk/assign.go index 99541c58d9d06..c01079d236bc0 100644 --- a/src/cmd/compile/internal/walk/assign.go +++ b/src/cmd/compile/internal/walk/assign.go @@ -495,55 +495,6 @@ func readsMemory(n ir.Node) bool { return true } -// refersToCommonName reports whether any name -// appears in common between l and r. -// This is called from sinit.go. -func refersToCommonName(l ir.Node, r ir.Node) bool { - if l == nil || r == nil { - return false - } - - // This could be written elegantly as a Find nested inside a Find: - // - // found := ir.Find(l, func(l ir.Node) interface{} { - // if l.Op() == ir.ONAME { - // return ir.Find(r, func(r ir.Node) interface{} { - // if r.Op() == ir.ONAME && l.Name() == r.Name() { - // return r - // } - // return nil - // }) - // } - // return nil - // }) - // return found != nil - // - // But that would allocate a new closure for the inner Find - // for each name found on the left side. - // It may not matter at all, but the below way of writing it - // only allocates two closures, not O(|L|) closures. - - var doL, doR func(ir.Node) error - var targetL *ir.Name - doR = func(r ir.Node) error { - if r.Op() == ir.ONAME && r.Name() == targetL { - return stop - } - return ir.DoChildren(r, doR) - } - doL = func(l ir.Node) error { - if l.Op() == ir.ONAME { - l := l.(*ir.Name) - targetL = l.Name() - if doR(r) == stop { - return stop - } - } - return ir.DoChildren(l, doL) - } - return doL(l) == stop -} - // expand append(l1, l2...) to // init { // s := l1 diff --git a/src/cmd/compile/internal/walk/complit.go b/src/cmd/compile/internal/walk/complit.go index b53fe2e935abf..8c4f9583ef5df 100644 --- a/src/cmd/compile/internal/walk/complit.go +++ b/src/cmd/compile/internal/walk/complit.go @@ -629,6 +629,7 @@ func oaslit(n *ir.AssignStmt, init *ir.Nodes) bool { // not a special composite literal assignment return false } + x := n.X.(*ir.Name) if !types.Identical(n.X.Type(), n.Y.Type()) { // not a special composite literal assignment return false @@ -640,7 +641,7 @@ func oaslit(n *ir.AssignStmt, init *ir.Nodes) bool { return false case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT: - if refersToCommonName(n.X, n.Y) { + if ir.Any(n.Y, func(y ir.Node) bool { return ir.Uses(y, x) }) { // not a special composite literal assignment return false } From 6c676775419b4cfc9f1a3b8959d538b81cec754e Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 26 Dec 2020 21:43:30 -0800 Subject: [PATCH 269/474] [dev.regabi] cmd/compile: simplify FuncName and PkgFuncName Now that we have proper types, these functions can be restricted to only allowing *ir.Func, rather than any ir.Node. And even more fortunately, all of their callers already happen to always pass *ir.Func arguments, making this CL pretty simple. Passes toolstash -cmp. Change-Id: I21ecd4c8cee3ccb8ba86b17cedb2e71c56ffe87a Reviewed-on: https://go-review.googlesource.com/c/go/+/280440 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/ir/func.go | 38 ++++------------------------- 1 file changed, 5 insertions(+), 33 deletions(-) diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index 6bc8cd574c579..16d67f6ae0c11 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -206,50 +206,22 @@ func (f *Func) SetWBPos(pos src.XPos) { } // funcname returns the name (without the package) of the function n. -func FuncName(n Node) string { - var f *Func - switch n := n.(type) { - case *Func: - f = n - case *Name: - f = n.Func - case *CallPartExpr: - f = n.Func - case *ClosureExpr: - f = n.Func - } +func FuncName(f *Func) string { if f == nil || f.Nname == nil { return "" } - return f.Nname.Sym().Name + return f.Sym().Name } // pkgFuncName returns the name of the function referenced by n, with package prepended. // This differs from the compiler's internal convention where local functions lack a package // because the ultimate consumer of this is a human looking at an IDE; package is only empty // if the compilation package is actually the empty string. -func PkgFuncName(n Node) string { - var s *types.Sym - if n == nil { +func PkgFuncName(f *Func) string { + if f == nil || f.Nname == nil { return "" } - if n.Op() == ONAME { - s = n.Sym() - } else { - var f *Func - switch n := n.(type) { - case *CallPartExpr: - f = n.Func - case *ClosureExpr: - f = n.Func - case *Func: - f = n - } - if f == nil || f.Nname == nil { - return "" - } - s = f.Nname.Sym() - } + s := f.Sym() pkg := s.Pkg p := base.Ctxt.Pkgpath From fbc4458c068459940c63952bcc6a697728f508fc Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 26 Dec 2020 22:00:53 -0800 Subject: [PATCH 270/474] [dev.regabi] cmd/compile: simplify some tree traversal code When looking for referenced functions within bottomUpVisitor and initDeps, the logic for ODOTMETH, OCALLPART, and OMETHEXPR are basically identical, especially after previous refactorings to make them use MethodExprName. This CL makes them exactly identical. Passes toolstash -cmp. Change-Id: I1f59c9be99aa9484d0397a0a6fb8ddd894a31c68 Reviewed-on: https://go-review.googlesource.com/c/go/+/280441 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/ir/scc.go | 49 ++++++------------- src/cmd/compile/internal/pkginit/initorder.go | 6 +-- 2 files changed, 15 insertions(+), 40 deletions(-) diff --git a/src/cmd/compile/internal/ir/scc.go b/src/cmd/compile/internal/ir/scc.go index 4f646e22b5ab1..f35c4d44e9d8b 100644 --- a/src/cmd/compile/internal/ir/scc.go +++ b/src/cmd/compile/internal/ir/scc.go @@ -76,48 +76,27 @@ func (v *bottomUpVisitor) visit(n *Func) uint32 { min := v.visitgen v.stack = append(v.stack, n) + do := func(defn Node) { + if defn != nil { + if m := v.visit(defn.(*Func)); m < min { + min = m + } + } + } + Visit(n, func(n Node) { switch n.Op() { case ONAME: - n := n.(*Name) - if n.Class_ == PFUNC { - if n != nil && n.Name().Defn != nil { - if m := v.visit(n.Name().Defn.(*Func)); m < min { - min = m - } - } + if n := n.(*Name); n.Class_ == PFUNC { + do(n.Defn) } - case OMETHEXPR: - n := n.(*MethodExpr) - fn := MethodExprName(n) - if fn != nil && fn.Defn != nil { - if m := v.visit(fn.Defn.(*Func)); m < min { - min = m - } - } - case ODOTMETH: - n := n.(*SelectorExpr) - fn := MethodExprName(n) - if fn != nil && fn.Op() == ONAME && fn.Class_ == PFUNC && fn.Defn != nil { - if m := v.visit(fn.Defn.(*Func)); m < min { - min = m - } - } - case OCALLPART: - n := n.(*CallPartExpr) - fn := AsNode(n.Method.Nname) - if fn != nil && fn.Op() == ONAME { - if fn := fn.(*Name); fn.Class_ == PFUNC && fn.Name().Defn != nil { - if m := v.visit(fn.Name().Defn.(*Func)); m < min { - min = m - } - } + case ODOTMETH, OCALLPART, OMETHEXPR: + if fn := MethodExprName(n); fn != nil { + do(fn.Defn) } case OCLOSURE: n := n.(*ClosureExpr) - if m := v.visit(n.Func); m < min { - min = m - } + do(n.Func) } }) diff --git a/src/cmd/compile/internal/pkginit/initorder.go b/src/cmd/compile/internal/pkginit/initorder.go index d63c5a4717da6..c6e223954d33a 100644 --- a/src/cmd/compile/internal/pkginit/initorder.go +++ b/src/cmd/compile/internal/pkginit/initorder.go @@ -289,10 +289,6 @@ func (d *initDeps) inspectList(l ir.Nodes) { ir.VisitList(l, d.cachedVisit()) } // referenced by n, if any. func (d *initDeps) visit(n ir.Node) { switch n.Op() { - case ir.OMETHEXPR: - n := n.(*ir.MethodExpr) - d.foundDep(ir.MethodExprName(n)) - case ir.ONAME: n := n.(*ir.Name) switch n.Class_ { @@ -304,7 +300,7 @@ func (d *initDeps) visit(n ir.Node) { n := n.(*ir.ClosureExpr) d.inspectList(n.Func.Body) - case ir.ODOTMETH, ir.OCALLPART: + case ir.ODOTMETH, ir.OCALLPART, ir.OMETHEXPR: d.foundDep(ir.MethodExprName(n)) } } From a59d26603f0dffbe6e914bc9ab29a2f9f70e5408 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 26 Dec 2020 22:23:45 -0800 Subject: [PATCH 271/474] [dev.regabi] cmd/compile: use []*CaseStmt in {Select,Switch}Stmt Select and switch statements only ever contain case statements, so change their Cases fields from Nodes to []*CaseStmt. This allows removing a bunch of type assertions throughout the compiler. CaseStmt should be renamed to CaseClause, and SelectStmt should probably have its own CommClause type instead (like in go/ast and cmd/compile/internal/syntax), but this is a good start. Passes toolstash -cmp. Change-Id: I2d41d616d44512c2be421e1e2ff13d0ee8b238ad Reviewed-on: https://go-review.googlesource.com/c/go/+/280442 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/escape/escape.go | 2 - src/cmd/compile/internal/ir/mknode.go | 7 +++ src/cmd/compile/internal/ir/node_gen.go | 12 +++--- src/cmd/compile/internal/ir/stmt.go | 43 ++++++++++++++++--- src/cmd/compile/internal/ir/visit.go | 3 +- src/cmd/compile/internal/noder/noder.go | 10 ++--- src/cmd/compile/internal/typecheck/iexport.go | 3 +- src/cmd/compile/internal/typecheck/iimport.go | 4 +- src/cmd/compile/internal/typecheck/stmt.go | 4 -- .../compile/internal/typecheck/typecheck.go | 13 +++--- src/cmd/compile/internal/walk/order.go | 6 +-- src/cmd/compile/internal/walk/select.go | 12 +++--- src/cmd/compile/internal/walk/switch.go | 7 +-- 13 files changed, 73 insertions(+), 53 deletions(-) diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go index 31d157b1651fc..d8f0111d2de69 100644 --- a/src/cmd/compile/internal/escape/escape.go +++ b/src/cmd/compile/internal/escape/escape.go @@ -369,7 +369,6 @@ func (e *escape) stmt(n ir.Node) { var ks []hole for _, cas := range n.Cases { // cases - cas := cas.(*ir.CaseStmt) if typesw && n.Tag.(*ir.TypeSwitchGuard).Tag != nil { cv := cas.Var k := e.dcl(cv) // type switch variables have no ODCL. @@ -391,7 +390,6 @@ func (e *escape) stmt(n ir.Node) { case ir.OSELECT: n := n.(*ir.SelectStmt) for _, cas := range n.Cases { - cas := cas.(*ir.CaseStmt) e.stmt(cas.Comm) e.block(cas.Body) } diff --git a/src/cmd/compile/internal/ir/mknode.go b/src/cmd/compile/internal/ir/mknode.go index f5dacee622f9d..edf3ee501c1ae 100644 --- a/src/cmd/compile/internal/ir/mknode.go +++ b/src/cmd/compile/internal/ir/mknode.go @@ -37,6 +37,7 @@ func main() { nodeType := lookup("Node") ntypeType := lookup("Ntype") nodesType := lookup("Nodes") + slicePtrCaseStmtType := types.NewSlice(types.NewPointer(lookup("CaseStmt"))) ptrFieldType := types.NewPointer(lookup("Field")) slicePtrFieldType := types.NewSlice(ptrFieldType) ptrIdentType := types.NewPointer(lookup("Ident")) @@ -76,6 +77,8 @@ func main() { switch { case is(nodesType): fmt.Fprintf(&buf, "c.%s = c.%s.Copy()\n", name, name) + case is(slicePtrCaseStmtType): + fmt.Fprintf(&buf, "c.%s = copyCases(c.%s)\n", name, name) case is(ptrFieldType): fmt.Fprintf(&buf, "if c.%s != nil { c.%s = c.%s.copy() }\n", name, name, name) case is(slicePtrFieldType): @@ -94,6 +97,8 @@ func main() { fmt.Fprintf(&buf, "err = maybeDo(n.%s, err, do)\n", name) case is(nodesType): fmt.Fprintf(&buf, "err = maybeDoList(n.%s, err, do)\n", name) + case is(slicePtrCaseStmtType): + fmt.Fprintf(&buf, "err = maybeDoCases(n.%s, err, do)\n", name) case is(ptrFieldType): fmt.Fprintf(&buf, "err = maybeDoField(n.%s, err, do)\n", name) case is(slicePtrFieldType): @@ -113,6 +118,8 @@ func main() { fmt.Fprintf(&buf, "n.%s = toNtype(maybeEdit(n.%s, edit))\n", name, name) case is(nodesType): fmt.Fprintf(&buf, "editList(n.%s, edit)\n", name) + case is(slicePtrCaseStmtType): + fmt.Fprintf(&buf, "editCases(n.%s, edit)\n", name) case is(ptrFieldType): fmt.Fprintf(&buf, "editField(n.%s, edit)\n", name) case is(slicePtrFieldType): diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index ecb39563c46f3..041855bbe9304 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -781,20 +781,20 @@ func (n *SelectStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *SelectStmt) copy() Node { c := *n c.init = c.init.Copy() - c.Cases = c.Cases.Copy() + c.Cases = copyCases(c.Cases) c.Compiled = c.Compiled.Copy() return &c } func (n *SelectStmt) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) - err = maybeDoList(n.Cases, err, do) + err = maybeDoCases(n.Cases, err, do) err = maybeDoList(n.Compiled, err, do) return err } func (n *SelectStmt) editChildren(edit func(Node) Node) { editList(n.init, edit) - editList(n.Cases, edit) + editCases(n.Cases, edit) editList(n.Compiled, edit) } @@ -945,7 +945,7 @@ func (n *SwitchStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *SwitchStmt) copy() Node { c := *n c.init = c.init.Copy() - c.Cases = c.Cases.Copy() + c.Cases = copyCases(c.Cases) c.Compiled = c.Compiled.Copy() return &c } @@ -953,14 +953,14 @@ func (n *SwitchStmt) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) err = maybeDo(n.Tag, err, do) - err = maybeDoList(n.Cases, err, do) + err = maybeDoCases(n.Cases, err, do) err = maybeDoList(n.Compiled, err, do) return err } func (n *SwitchStmt) editChildren(edit func(Node) Node) { editList(n.init, edit) n.Tag = maybeEdit(n.Tag, edit) - editList(n.Cases, edit) + editCases(n.Cases, edit) editList(n.Compiled, edit) } diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index cfda6fd234dab..ce775a8529ef3 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -191,6 +191,37 @@ func NewCaseStmt(pos src.XPos, list, body []Node) *CaseStmt { return n } +func copyCases(list []*CaseStmt) []*CaseStmt { + if list == nil { + return nil + } + c := make([]*CaseStmt, len(list)) + copy(c, list) + return c +} + +func maybeDoCases(list []*CaseStmt, err error, do func(Node) error) error { + if err != nil { + return err + } + for _, x := range list { + if x != nil { + if err := do(x); err != nil { + return err + } + } + } + return nil +} + +func editCases(list []*CaseStmt, edit func(Node) Node) { + for i, x := range list { + if x != nil { + list[i] = edit(x).(*CaseStmt) + } + } +} + // A ForStmt is a non-range for loop: for Init; Cond; Post { Body } // Op can be OFOR or OFORUNTIL (!Cond). type ForStmt struct { @@ -334,18 +365,18 @@ func (n *ReturnStmt) SetOrig(x Node) { n.orig = x } type SelectStmt struct { miniStmt Label *types.Sym - Cases Nodes + Cases []*CaseStmt HasBreak bool // TODO(rsc): Instead of recording here, replace with a block? Compiled Nodes // compiled form, after walkswitch } -func NewSelectStmt(pos src.XPos, cases []Node) *SelectStmt { +func NewSelectStmt(pos src.XPos, cases []*CaseStmt) *SelectStmt { n := &SelectStmt{} n.pos = pos n.op = OSELECT - n.Cases.Set(cases) + n.Cases = cases return n } @@ -367,7 +398,7 @@ func NewSendStmt(pos src.XPos, ch, value Node) *SendStmt { type SwitchStmt struct { miniStmt Tag Node - Cases Nodes // list of *CaseStmt + Cases []*CaseStmt Label *types.Sym HasBreak bool @@ -375,11 +406,11 @@ type SwitchStmt struct { Compiled Nodes // compiled form, after walkswitch } -func NewSwitchStmt(pos src.XPos, tag Node, cases []Node) *SwitchStmt { +func NewSwitchStmt(pos src.XPos, tag Node, cases []*CaseStmt) *SwitchStmt { n := &SwitchStmt{Tag: tag} n.pos = pos n.op = OSWITCH - n.Cases.Set(cases) + n.Cases = cases return n } diff --git a/src/cmd/compile/internal/ir/visit.go b/src/cmd/compile/internal/ir/visit.go index a1c345968f703..8839e1664d3b5 100644 --- a/src/cmd/compile/internal/ir/visit.go +++ b/src/cmd/compile/internal/ir/visit.go @@ -217,10 +217,9 @@ func EditChildren(n Node, edit func(Node) Node) { // Note that editList only calls edit on the nodes in the list, not their children. // If x's children should be processed, edit(x) must call EditChildren(x, edit) itself. func editList(list Nodes, edit func(Node) Node) { - s := list for i, x := range list { if x != nil { - s[i] = edit(x) + list[i] = edit(x) } } } diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go index ad66b6c8509ca..b974448338f3a 100644 --- a/src/cmd/compile/internal/noder/noder.go +++ b/src/cmd/compile/internal/noder/noder.go @@ -1202,14 +1202,14 @@ func (p *noder) switchStmt(stmt *syntax.SwitchStmt) ir.Node { if l := n.Tag; l != nil && l.Op() == ir.OTYPESW { tswitch = l.(*ir.TypeSwitchGuard) } - n.Cases.Set(p.caseClauses(stmt.Body, tswitch, stmt.Rbrace)) + n.Cases = p.caseClauses(stmt.Body, tswitch, stmt.Rbrace) p.closeScope(stmt.Rbrace) return n } -func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.TypeSwitchGuard, rbrace syntax.Pos) []ir.Node { - nodes := make([]ir.Node, 0, len(clauses)) +func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.TypeSwitchGuard, rbrace syntax.Pos) []*ir.CaseStmt { + nodes := make([]*ir.CaseStmt, 0, len(clauses)) for i, clause := range clauses { p.setlineno(clause) if i > 0 { @@ -1266,8 +1266,8 @@ func (p *noder) simpleStmt(stmt syntax.SimpleStmt) []ir.Node { return []ir.Node{p.stmt(stmt)} } -func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []ir.Node { - nodes := make([]ir.Node, len(clauses)) +func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []*ir.CaseStmt { + nodes := make([]*ir.CaseStmt, len(clauses)) for i, clause := range clauses { p.setlineno(clause) if i > 0 { diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go index 0c813a71ef41a..19437a069e11f 100644 --- a/src/cmd/compile/internal/typecheck/iexport.go +++ b/src/cmd/compile/internal/typecheck/iexport.go @@ -1181,10 +1181,9 @@ func isNamedTypeSwitch(x ir.Node) bool { return ok && guard.Tag != nil } -func (w *exportWriter) caseList(cases []ir.Node, namedTypeSwitch bool) { +func (w *exportWriter) caseList(cases []*ir.CaseStmt, namedTypeSwitch bool) { w.uint64(uint64(len(cases))) for _, cas := range cases { - cas := cas.(*ir.CaseStmt) w.pos(cas.Pos()) w.stmtList(cas.List) if namedTypeSwitch { diff --git a/src/cmd/compile/internal/typecheck/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go index 8285c418e9f8a..fd8314b66217a 100644 --- a/src/cmd/compile/internal/typecheck/iimport.go +++ b/src/cmd/compile/internal/typecheck/iimport.go @@ -767,10 +767,10 @@ func (r *importReader) stmtList() []ir.Node { return list } -func (r *importReader) caseList(switchExpr ir.Node) []ir.Node { +func (r *importReader) caseList(switchExpr ir.Node) []*ir.CaseStmt { namedTypeSwitch := isNamedTypeSwitch(switchExpr) - cases := make([]ir.Node, r.uint64()) + cases := make([]*ir.CaseStmt, r.uint64()) for i := range cases { cas := ir.NewCaseStmt(r.pos(), nil, nil) cas.List.Set(r.stmtList()) diff --git a/src/cmd/compile/internal/typecheck/stmt.go b/src/cmd/compile/internal/typecheck/stmt.go index 7e74b730bc117..03c3e399eb456 100644 --- a/src/cmd/compile/internal/typecheck/stmt.go +++ b/src/cmd/compile/internal/typecheck/stmt.go @@ -364,8 +364,6 @@ func tcSelect(sel *ir.SelectStmt) { lno := ir.SetPos(sel) Stmts(sel.Init()) for _, ncase := range sel.Cases { - ncase := ncase.(*ir.CaseStmt) - if len(ncase.List) == 0 { // default if def != nil { @@ -508,7 +506,6 @@ func tcSwitchExpr(n *ir.SwitchStmt) { var defCase ir.Node var cs constSet for _, ncase := range n.Cases { - ncase := ncase.(*ir.CaseStmt) ls := ncase.List if len(ls) == 0 { // default: if defCase != nil { @@ -577,7 +574,6 @@ func tcSwitchType(n *ir.SwitchStmt) { var defCase, nilCase ir.Node var ts typeSet for _, ncase := range n.Cases { - ncase := ncase.(*ir.CaseStmt) ls := ncase.List if len(ls) == 0 { // default: if defCase != nil { diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index b779f9ceb0f14..dabfee3bf9ca7 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -2103,7 +2103,6 @@ func isTermNode(n ir.Node) bool { } def := false for _, cas := range n.Cases { - cas := cas.(*ir.CaseStmt) if !isTermNodes(cas.Body) { return false } @@ -2119,7 +2118,6 @@ func isTermNode(n ir.Node) bool { return false } for _, cas := range n.Cases { - cas := cas.(*ir.CaseStmt) if !isTermNodes(cas.Body) { return false } @@ -2218,9 +2216,6 @@ func deadcodeslice(nn *ir.Nodes) { case ir.OBLOCK: n := n.(*ir.BlockStmt) deadcodeslice(&n.List) - case ir.OCASE: - n := n.(*ir.CaseStmt) - deadcodeslice(&n.Body) case ir.OFOR: n := n.(*ir.ForStmt) deadcodeslice(&n.Body) @@ -2233,10 +2228,14 @@ func deadcodeslice(nn *ir.Nodes) { deadcodeslice(&n.Body) case ir.OSELECT: n := n.(*ir.SelectStmt) - deadcodeslice(&n.Cases) + for _, cas := range n.Cases { + deadcodeslice(&cas.Body) + } case ir.OSWITCH: n := n.(*ir.SwitchStmt) - deadcodeslice(&n.Cases) + for _, cas := range n.Cases { + deadcodeslice(&cas.Body) + } } if cut { diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go index 1e41cfc6aaf9a..ebbd467570145 100644 --- a/src/cmd/compile/internal/walk/order.go +++ b/src/cmd/compile/internal/walk/order.go @@ -914,7 +914,6 @@ func (o *orderState) stmt(n ir.Node) { n := n.(*ir.SelectStmt) t := o.markTemp() for _, ncas := range n.Cases { - ncas := ncas.(*ir.CaseStmt) r := ncas.Comm ir.SetPos(ncas) @@ -996,7 +995,6 @@ func (o *orderState) stmt(n ir.Node) { // Also insert any ninit queued during the previous loop. // (The temporary cleaning must follow that ninit work.) for _, cas := range n.Cases { - cas := cas.(*ir.CaseStmt) orderBlock(&cas.Body, o.free) cas.Body.Prepend(o.cleanTempNoPop(t)...) @@ -1036,13 +1034,12 @@ func (o *orderState) stmt(n ir.Node) { n := n.(*ir.SwitchStmt) if base.Debug.Libfuzzer != 0 && !hasDefaultCase(n) { // Add empty "default:" case for instrumentation. - n.Cases.Append(ir.NewCaseStmt(base.Pos, nil, nil)) + n.Cases = append(n.Cases, ir.NewCaseStmt(base.Pos, nil, nil)) } t := o.markTemp() n.Tag = o.expr(n.Tag, nil) for _, ncas := range n.Cases { - ncas := ncas.(*ir.CaseStmt) o.exprListInPlace(ncas.List) orderBlock(&ncas.Body, o.free) } @@ -1056,7 +1053,6 @@ func (o *orderState) stmt(n ir.Node) { func hasDefaultCase(n *ir.SwitchStmt) bool { for _, ncas := range n.Cases { - ncas := ncas.(*ir.CaseStmt) if len(ncas.List) == 0 { return true } diff --git a/src/cmd/compile/internal/walk/select.go b/src/cmd/compile/internal/walk/select.go index 5e03732169f66..0b7e7e99fbfb2 100644 --- a/src/cmd/compile/internal/walk/select.go +++ b/src/cmd/compile/internal/walk/select.go @@ -21,7 +21,7 @@ func walkSelect(sel *ir.SelectStmt) { sel.PtrInit().Set(nil) init = append(init, walkSelectCases(sel.Cases)...) - sel.Cases = ir.Nodes{} + sel.Cases = nil sel.Compiled.Set(init) walkStmtList(sel.Compiled) @@ -29,7 +29,7 @@ func walkSelect(sel *ir.SelectStmt) { base.Pos = lno } -func walkSelectCases(cases ir.Nodes) []ir.Node { +func walkSelectCases(cases []*ir.CaseStmt) []ir.Node { ncas := len(cases) sellineno := base.Pos @@ -40,7 +40,7 @@ func walkSelectCases(cases ir.Nodes) []ir.Node { // optimization: one-case select: single op. if ncas == 1 { - cas := cases[0].(*ir.CaseStmt) + cas := cases[0] ir.SetPos(cas) l := cas.Init() if cas.Comm != nil { // not default: @@ -75,7 +75,6 @@ func walkSelectCases(cases ir.Nodes) []ir.Node { // this rewrite is used by both the general code and the next optimization. var dflt *ir.CaseStmt for _, cas := range cases { - cas := cas.(*ir.CaseStmt) ir.SetPos(cas) n := cas.Comm if n == nil { @@ -99,9 +98,9 @@ func walkSelectCases(cases ir.Nodes) []ir.Node { // optimization: two-case select but one is default: single non-blocking op. if ncas == 2 && dflt != nil { - cas := cases[0].(*ir.CaseStmt) + cas := cases[0] if cas == dflt { - cas = cases[1].(*ir.CaseStmt) + cas = cases[1] } n := cas.Comm @@ -170,7 +169,6 @@ func walkSelectCases(cases ir.Nodes) []ir.Node { // register cases for _, cas := range cases { - cas := cas.(*ir.CaseStmt) ir.SetPos(cas) init = append(init, cas.Init()...) diff --git a/src/cmd/compile/internal/walk/switch.go b/src/cmd/compile/internal/walk/switch.go index 141d2e5e053ff..de0b471b34e49 100644 --- a/src/cmd/compile/internal/walk/switch.go +++ b/src/cmd/compile/internal/walk/switch.go @@ -71,7 +71,6 @@ func walkSwitchExpr(sw *ir.SwitchStmt) { var defaultGoto ir.Node var body ir.Nodes for _, ncase := range sw.Cases { - ncase := ncase.(*ir.CaseStmt) label := typecheck.AutoLabel(".s") jmp := ir.NewBranchStmt(ncase.Pos(), ir.OGOTO, label) @@ -96,7 +95,7 @@ func walkSwitchExpr(sw *ir.SwitchStmt) { body.Append(br) } } - sw.Cases.Set(nil) + sw.Cases = nil if defaultGoto == nil { br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil) @@ -259,7 +258,6 @@ func allCaseExprsAreSideEffectFree(sw *ir.SwitchStmt) bool { // enough. for _, ncase := range sw.Cases { - ncase := ncase.(*ir.CaseStmt) for _, v := range ncase.List { if v.Op() != ir.OLITERAL { return false @@ -325,7 +323,6 @@ func walkSwitchType(sw *ir.SwitchStmt) { var defaultGoto, nilGoto ir.Node var body ir.Nodes for _, ncase := range sw.Cases { - ncase := ncase.(*ir.CaseStmt) caseVar := ncase.Var // For single-type cases with an interface type, @@ -384,7 +381,7 @@ func walkSwitchType(sw *ir.SwitchStmt) { body.Append(ncase.Body...) body.Append(br) } - sw.Cases.Set(nil) + sw.Cases = nil if defaultGoto == nil { defaultGoto = br From ed9772e130d81b3a5a7b9e9b58e8d48a5ec4c319 Mon Sep 17 00:00:00 2001 From: Meng Zhuo Date: Mon, 28 Dec 2020 15:22:47 +0800 Subject: [PATCH 272/474] [dev.regabi] cmd/compile: add explicit file name in types generation The stringer using `go list` for the type detection, which depends on GOROOT. Unfortunally by changing GOROOT to develop path will raise version mismatch with internal packages. Update #43369 Change-Id: Id81334ea5f1ecdbfa81eb2d162944d65664ce727 Reviewed-on: https://go-review.googlesource.com/c/go/+/280572 Trust: Meng Zhuo Reviewed-by: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot --- src/cmd/compile/internal/types/alg.go | 2 +- src/cmd/compile/internal/types/algkind_string.go | 2 +- src/cmd/compile/internal/types/goversion.go | 2 -- .../types/{etype_string.go => kind_string.go} | 12 ++++++------ src/cmd/compile/internal/types/type.go | 4 ++-- 5 files changed, 10 insertions(+), 12 deletions(-) rename src/cmd/compile/internal/types/{etype_string.go => kind_string.go} (59%) diff --git a/src/cmd/compile/internal/types/alg.go b/src/cmd/compile/internal/types/alg.go index 14200e0d162a4..f1a472cca58a0 100644 --- a/src/cmd/compile/internal/types/alg.go +++ b/src/cmd/compile/internal/types/alg.go @@ -10,7 +10,7 @@ import "cmd/compile/internal/base" // hashing a Type. type AlgKind int -//go:generate stringer -type AlgKind -trimprefix A +//go:generate stringer -type AlgKind -trimprefix A alg.go const ( // These values are known by runtime. diff --git a/src/cmd/compile/internal/types/algkind_string.go b/src/cmd/compile/internal/types/algkind_string.go index 8c5a0bc287ee5..a1b518e4dde26 100644 --- a/src/cmd/compile/internal/types/algkind_string.go +++ b/src/cmd/compile/internal/types/algkind_string.go @@ -1,4 +1,4 @@ -// Code generated by "stringer -type AlgKind -trimprefix A"; DO NOT EDIT. +// Code generated by "stringer -type AlgKind -trimprefix A alg.go"; DO NOT EDIT. package types diff --git a/src/cmd/compile/internal/types/goversion.go b/src/cmd/compile/internal/types/goversion.go index 2265f472cf673..1a324aa42fdb7 100644 --- a/src/cmd/compile/internal/types/goversion.go +++ b/src/cmd/compile/internal/types/goversion.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:generate go run mkbuiltin.go - package types import ( diff --git a/src/cmd/compile/internal/types/etype_string.go b/src/cmd/compile/internal/types/kind_string.go similarity index 59% rename from src/cmd/compile/internal/types/etype_string.go rename to src/cmd/compile/internal/types/kind_string.go index e7698296abf26..1e1e84624080b 100644 --- a/src/cmd/compile/internal/types/etype_string.go +++ b/src/cmd/compile/internal/types/kind_string.go @@ -1,4 +1,4 @@ -// Code generated by "stringer -type EType -trimprefix T"; DO NOT EDIT. +// Code generated by "stringer -type Kind -trimprefix T type.go"; DO NOT EDIT. package types @@ -48,13 +48,13 @@ func _() { _ = x[NTYPE-37] } -const _EType_name = "xxxINT8UINT8INT16UINT16INT32UINT32INT64UINT64INTUINTUINTPTRCOMPLEX64COMPLEX128FLOAT32FLOAT64BOOLPTRFUNCSLICEARRAYSTRUCTCHANMAPINTERFORWANYSTRINGUNSAFEPTRIDEALNILBLANKFUNCARGSCHANARGSSSATUPLERESULTSNTYPE" +const _Kind_name = "xxxINT8UINT8INT16UINT16INT32UINT32INT64UINT64INTUINTUINTPTRCOMPLEX64COMPLEX128FLOAT32FLOAT64BOOLPTRFUNCSLICEARRAYSTRUCTCHANMAPINTERFORWANYSTRINGUNSAFEPTRIDEALNILBLANKFUNCARGSCHANARGSSSATUPLERESULTSNTYPE" -var _EType_index = [...]uint8{0, 3, 7, 12, 17, 23, 28, 34, 39, 45, 48, 52, 59, 68, 78, 85, 92, 96, 99, 103, 108, 113, 119, 123, 126, 131, 135, 138, 144, 153, 158, 161, 166, 174, 182, 185, 190, 197, 202} +var _Kind_index = [...]uint8{0, 3, 7, 12, 17, 23, 28, 34, 39, 45, 48, 52, 59, 68, 78, 85, 92, 96, 99, 103, 108, 113, 119, 123, 126, 131, 135, 138, 144, 153, 158, 161, 166, 174, 182, 185, 190, 197, 202} func (i Kind) String() string { - if i >= Kind(len(_EType_index)-1) { - return "EType(" + strconv.FormatInt(int64(i), 10) + ")" + if i >= Kind(len(_Kind_index)-1) { + return "Kind(" + strconv.FormatInt(int64(i), 10) + ")" } - return _EType_name[_EType_index[i]:_EType_index[i+1]] + return _Kind_name[_Kind_index[i]:_Kind_index[i+1]] } diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index b5557b492eb7a..6feedbfabc5a7 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -33,9 +33,9 @@ type VarObject interface { RecordFrameOffset(int64) // save frame offset } -//go:generate stringer -type EType -trimprefix T +//go:generate stringer -type Kind -trimprefix T type.go -// EType describes a kind of type. +// Kind describes a kind of type. type Kind uint8 const ( From 2ecf52b841cd48e76df1fe721d29a972c22bf93f Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 26 Dec 2020 22:42:17 -0800 Subject: [PATCH 273/474] [dev.regabi] cmd/compile: separate CommStmt from CaseStmt Like go/ast and cmd/compile/internal/syntax before it, package ir now has separate concrete representations for switch-case clauses and select-communication clauses. Passes toolstash -cmp. Change-Id: I32667cbae251fe7881be0f434388478433b2414f Reviewed-on: https://go-review.googlesource.com/c/go/+/280443 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/ir/mknode.go | 7 +++ src/cmd/compile/internal/ir/node_gen.go | 31 ++++++++-- src/cmd/compile/internal/ir/stmt.go | 62 +++++++++++++++---- src/cmd/compile/internal/noder/noder.go | 6 +- src/cmd/compile/internal/typecheck/iexport.go | 11 +++- src/cmd/compile/internal/typecheck/iimport.go | 10 ++- src/cmd/compile/internal/walk/select.go | 8 +-- 7 files changed, 109 insertions(+), 26 deletions(-) diff --git a/src/cmd/compile/internal/ir/mknode.go b/src/cmd/compile/internal/ir/mknode.go index edf3ee501c1ae..bc6fa3cd305df 100644 --- a/src/cmd/compile/internal/ir/mknode.go +++ b/src/cmd/compile/internal/ir/mknode.go @@ -38,6 +38,7 @@ func main() { ntypeType := lookup("Ntype") nodesType := lookup("Nodes") slicePtrCaseStmtType := types.NewSlice(types.NewPointer(lookup("CaseStmt"))) + slicePtrCommStmtType := types.NewSlice(types.NewPointer(lookup("CommStmt"))) ptrFieldType := types.NewPointer(lookup("Field")) slicePtrFieldType := types.NewSlice(ptrFieldType) ptrIdentType := types.NewPointer(lookup("Ident")) @@ -79,6 +80,8 @@ func main() { fmt.Fprintf(&buf, "c.%s = c.%s.Copy()\n", name, name) case is(slicePtrCaseStmtType): fmt.Fprintf(&buf, "c.%s = copyCases(c.%s)\n", name, name) + case is(slicePtrCommStmtType): + fmt.Fprintf(&buf, "c.%s = copyComms(c.%s)\n", name, name) case is(ptrFieldType): fmt.Fprintf(&buf, "if c.%s != nil { c.%s = c.%s.copy() }\n", name, name, name) case is(slicePtrFieldType): @@ -99,6 +102,8 @@ func main() { fmt.Fprintf(&buf, "err = maybeDoList(n.%s, err, do)\n", name) case is(slicePtrCaseStmtType): fmt.Fprintf(&buf, "err = maybeDoCases(n.%s, err, do)\n", name) + case is(slicePtrCommStmtType): + fmt.Fprintf(&buf, "err = maybeDoComms(n.%s, err, do)\n", name) case is(ptrFieldType): fmt.Fprintf(&buf, "err = maybeDoField(n.%s, err, do)\n", name) case is(slicePtrFieldType): @@ -120,6 +125,8 @@ func main() { fmt.Fprintf(&buf, "editList(n.%s, edit)\n", name) case is(slicePtrCaseStmtType): fmt.Fprintf(&buf, "editCases(n.%s, edit)\n", name) + case is(slicePtrCommStmtType): + fmt.Fprintf(&buf, "editComms(n.%s, edit)\n", name) case is(ptrFieldType): fmt.Fprintf(&buf, "editField(n.%s, edit)\n", name) case is(slicePtrFieldType): diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index 041855bbe9304..5796544b4843f 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -239,7 +239,6 @@ func (n *CaseStmt) doChildren(do func(Node) error) error { err = maybeDoList(n.init, err, do) err = maybeDo(n.Var, err, do) err = maybeDoList(n.List, err, do) - err = maybeDo(n.Comm, err, do) err = maybeDoList(n.Body, err, do) return err } @@ -247,7 +246,6 @@ func (n *CaseStmt) editChildren(edit func(Node) Node) { editList(n.init, edit) n.Var = maybeEdit(n.Var, edit) editList(n.List, edit) - n.Comm = maybeEdit(n.Comm, edit) editList(n.Body, edit) } @@ -295,6 +293,29 @@ func (n *ClosureReadExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) } +func (n *CommStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *CommStmt) copy() Node { + c := *n + c.init = c.init.Copy() + c.List = c.List.Copy() + c.Body = c.Body.Copy() + return &c +} +func (n *CommStmt) doChildren(do func(Node) error) error { + var err error + err = maybeDoList(n.init, err, do) + err = maybeDoList(n.List, err, do) + err = maybeDo(n.Comm, err, do) + err = maybeDoList(n.Body, err, do) + return err +} +func (n *CommStmt) editChildren(edit func(Node) Node) { + editList(n.init, edit) + editList(n.List, edit) + n.Comm = maybeEdit(n.Comm, edit) + editList(n.Body, edit) +} + func (n *CompLitExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *CompLitExpr) copy() Node { c := *n @@ -781,20 +802,20 @@ func (n *SelectStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *SelectStmt) copy() Node { c := *n c.init = c.init.Copy() - c.Cases = copyCases(c.Cases) + c.Cases = copyComms(c.Cases) c.Compiled = c.Compiled.Copy() return &c } func (n *SelectStmt) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) - err = maybeDoCases(n.Cases, err, do) + err = maybeDoComms(n.Cases, err, do) err = maybeDoList(n.Compiled, err, do) return err } func (n *SelectStmt) editChildren(edit func(Node) Node) { editList(n.init, edit) - editCases(n.Cases, edit) + editComms(n.Cases, edit) editList(n.Compiled, edit) } diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index ce775a8529ef3..181a0fd582a91 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -178,19 +178,17 @@ type CaseStmt struct { miniStmt Var Node // declared variable for this case in type switch List Nodes // list of expressions for switch, early select - Comm Node // communication case (Exprs[0]) after select is type-checked Body Nodes } func NewCaseStmt(pos src.XPos, list, body []Node) *CaseStmt { - n := &CaseStmt{} + n := &CaseStmt{List: list, Body: body} n.pos = pos n.op = OCASE - n.List.Set(list) - n.Body.Set(body) return n } +// TODO(mdempsky): Generate these with mknode.go. func copyCases(list []*CaseStmt) []*CaseStmt { if list == nil { return nil @@ -199,7 +197,6 @@ func copyCases(list []*CaseStmt) []*CaseStmt { copy(c, list) return c } - func maybeDoCases(list []*CaseStmt, err error, do func(Node) error) error { if err != nil { return err @@ -213,7 +210,6 @@ func maybeDoCases(list []*CaseStmt, err error, do func(Node) error) error { } return nil } - func editCases(list []*CaseStmt, edit func(Node) Node) { for i, x := range list { if x != nil { @@ -222,6 +218,50 @@ func editCases(list []*CaseStmt, edit func(Node) Node) { } } +type CommStmt struct { + miniStmt + List Nodes // list of expressions for switch, early select + Comm Node // communication case (Exprs[0]) after select is type-checked + Body Nodes +} + +func NewCommStmt(pos src.XPos, list, body []Node) *CommStmt { + n := &CommStmt{List: list, Body: body} + n.pos = pos + n.op = OCASE + return n +} + +// TODO(mdempsky): Generate these with mknode.go. +func copyComms(list []*CommStmt) []*CommStmt { + if list == nil { + return nil + } + c := make([]*CommStmt, len(list)) + copy(c, list) + return c +} +func maybeDoComms(list []*CommStmt, err error, do func(Node) error) error { + if err != nil { + return err + } + for _, x := range list { + if x != nil { + if err := do(x); err != nil { + return err + } + } + } + return nil +} +func editComms(list []*CommStmt, edit func(Node) Node) { + for i, x := range list { + if x != nil { + list[i] = edit(x).(*CommStmt) + } + } +} + // A ForStmt is a non-range for loop: for Init; Cond; Post { Body } // Op can be OFOR or OFORUNTIL (!Cond). type ForStmt struct { @@ -365,18 +405,17 @@ func (n *ReturnStmt) SetOrig(x Node) { n.orig = x } type SelectStmt struct { miniStmt Label *types.Sym - Cases []*CaseStmt + Cases []*CommStmt HasBreak bool // TODO(rsc): Instead of recording here, replace with a block? Compiled Nodes // compiled form, after walkswitch } -func NewSelectStmt(pos src.XPos, cases []*CaseStmt) *SelectStmt { - n := &SelectStmt{} +func NewSelectStmt(pos src.XPos, cases []*CommStmt) *SelectStmt { + n := &SelectStmt{Cases: cases} n.pos = pos n.op = OSELECT - n.Cases = cases return n } @@ -407,10 +446,9 @@ type SwitchStmt struct { } func NewSwitchStmt(pos src.XPos, tag Node, cases []*CaseStmt) *SwitchStmt { - n := &SwitchStmt{Tag: tag} + n := &SwitchStmt{Tag: tag, Cases: cases} n.pos = pos n.op = OSWITCH - n.Cases = cases return n } diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go index b974448338f3a..ff699cd54d4a6 100644 --- a/src/cmd/compile/internal/noder/noder.go +++ b/src/cmd/compile/internal/noder/noder.go @@ -1266,8 +1266,8 @@ func (p *noder) simpleStmt(stmt syntax.SimpleStmt) []ir.Node { return []ir.Node{p.stmt(stmt)} } -func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []*ir.CaseStmt { - nodes := make([]*ir.CaseStmt, len(clauses)) +func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []*ir.CommStmt { + nodes := make([]*ir.CommStmt, len(clauses)) for i, clause := range clauses { p.setlineno(clause) if i > 0 { @@ -1275,7 +1275,7 @@ func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []* } p.openScope(clause.Pos()) - nodes[i] = ir.NewCaseStmt(p.pos(clause), p.simpleStmt(clause.Comm), p.stmts(clause.Body)) + nodes[i] = ir.NewCommStmt(p.pos(clause), p.simpleStmt(clause.Comm), p.stmts(clause.Body)) } if len(clauses) > 0 { p.closeScope(rbrace) diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go index 19437a069e11f..ef2c4527a934e 100644 --- a/src/cmd/compile/internal/typecheck/iexport.go +++ b/src/cmd/compile/internal/typecheck/iexport.go @@ -1144,7 +1144,7 @@ func (w *exportWriter) stmt(n ir.Node) { w.op(n.Op()) w.pos(n.Pos()) w.stmtList(n.Init()) - w.caseList(n.Cases, false) + w.commList(n.Cases) case ir.OSWITCH: n := n.(*ir.SwitchStmt) @@ -1193,6 +1193,15 @@ func (w *exportWriter) caseList(cases []*ir.CaseStmt, namedTypeSwitch bool) { } } +func (w *exportWriter) commList(cases []*ir.CommStmt) { + w.uint64(uint64(len(cases))) + for _, cas := range cases { + w.pos(cas.Pos()) + w.stmtList(cas.List) + w.stmtList(cas.Body) + } +} + func (w *exportWriter) exprList(list ir.Nodes) { for _, n := range list { w.expr(n) diff --git a/src/cmd/compile/internal/typecheck/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go index fd8314b66217a..ba7ea2f156afa 100644 --- a/src/cmd/compile/internal/typecheck/iimport.go +++ b/src/cmd/compile/internal/typecheck/iimport.go @@ -789,6 +789,14 @@ func (r *importReader) caseList(switchExpr ir.Node) []*ir.CaseStmt { return cases } +func (r *importReader) commList() []*ir.CommStmt { + cases := make([]*ir.CommStmt, r.uint64()) + for i := range cases { + cases[i] = ir.NewCommStmt(r.pos(), r.stmtList(), r.stmtList()) + } + return cases +} + func (r *importReader) exprList() []ir.Node { var list []ir.Node for { @@ -1035,7 +1043,7 @@ func (r *importReader) node() ir.Node { case ir.OSELECT: pos := r.pos() init := r.stmtList() - n := ir.NewSelectStmt(pos, r.caseList(nil)) + n := ir.NewSelectStmt(pos, r.commList()) n.PtrInit().Set(init) return n diff --git a/src/cmd/compile/internal/walk/select.go b/src/cmd/compile/internal/walk/select.go index 0b7e7e99fbfb2..f51684c9b6681 100644 --- a/src/cmd/compile/internal/walk/select.go +++ b/src/cmd/compile/internal/walk/select.go @@ -29,7 +29,7 @@ func walkSelect(sel *ir.SelectStmt) { base.Pos = lno } -func walkSelectCases(cases []*ir.CaseStmt) []ir.Node { +func walkSelectCases(cases []*ir.CommStmt) []ir.Node { ncas := len(cases) sellineno := base.Pos @@ -73,7 +73,7 @@ func walkSelectCases(cases []*ir.CaseStmt) []ir.Node { // convert case value arguments to addresses. // this rewrite is used by both the general code and the next optimization. - var dflt *ir.CaseStmt + var dflt *ir.CommStmt for _, cas := range cases { ir.SetPos(cas) n := cas.Comm @@ -146,7 +146,7 @@ func walkSelectCases(cases []*ir.CaseStmt) []ir.Node { if dflt != nil { ncas-- } - casorder := make([]*ir.CaseStmt, ncas) + casorder := make([]*ir.CommStmt, ncas) nsends, nrecvs := 0, 0 var init []ir.Node @@ -242,7 +242,7 @@ func walkSelectCases(cases []*ir.CaseStmt) []ir.Node { } // dispatch cases - dispatch := func(cond ir.Node, cas *ir.CaseStmt) { + dispatch := func(cond ir.Node, cas *ir.CommStmt) { cond = typecheck.Expr(cond) cond = typecheck.DefaultLit(cond, nil) From 3bdafb0d82c9908ae04d2765847754df0646df35 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 26 Dec 2020 23:03:25 -0800 Subject: [PATCH 274/474] [dev.regabi] cmd/compile: remove CommStmt.List Package syntax's parser already ensures that select communication clauses only have one statement, so there's no need for ir's CommStmt to need to represent more than one. Instead, noder can just directly populate Comm in the first place. Incidentally, this also revealed a latent issue in the inline-body exporter: we were exporting List (where the case statement is before type-checking), rather than Comm (where the case statement would be after type-checking, when export happens). Passes toolstash -cmp. Change-Id: Ib4eb711527bed297c7332c79ed6e6562a1db2cfa Reviewed-on: https://go-review.googlesource.com/c/go/+/280444 Trust: Matthew Dempsky Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/ir/node_gen.go | 3 --- src/cmd/compile/internal/ir/stmt.go | 13 ++++++----- src/cmd/compile/internal/noder/noder.go | 23 ++++++++----------- src/cmd/compile/internal/typecheck/iexport.go | 2 +- src/cmd/compile/internal/typecheck/iimport.go | 6 +++-- src/cmd/compile/internal/typecheck/stmt.go | 18 +++++---------- 6 files changed, 28 insertions(+), 37 deletions(-) diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index 5796544b4843f..74129694251b6 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -297,21 +297,18 @@ func (n *CommStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *CommStmt) copy() Node { c := *n c.init = c.init.Copy() - c.List = c.List.Copy() c.Body = c.Body.Copy() return &c } func (n *CommStmt) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) - err = maybeDoList(n.List, err, do) err = maybeDo(n.Comm, err, do) err = maybeDoList(n.Body, err, do) return err } func (n *CommStmt) editChildren(edit func(Node) Node) { editList(n.init, edit) - editList(n.List, edit) n.Comm = maybeEdit(n.Comm, edit) editList(n.Body, edit) } diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index 181a0fd582a91..0f44acd8b4e29 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -220,13 +220,12 @@ func editCases(list []*CaseStmt, edit func(Node) Node) { type CommStmt struct { miniStmt - List Nodes // list of expressions for switch, early select - Comm Node // communication case (Exprs[0]) after select is type-checked + Comm Node // communication case Body Nodes } -func NewCommStmt(pos src.XPos, list, body []Node) *CommStmt { - n := &CommStmt{List: list, Body: body} +func NewCommStmt(pos src.XPos, comm Node, body []Node) *CommStmt { + n := &CommStmt{Comm: comm, Body: body} n.pos = pos n.op = OCASE return n @@ -274,11 +273,13 @@ type ForStmt struct { HasBreak bool } -func NewForStmt(pos src.XPos, init []Node, cond, post Node, body []Node) *ForStmt { +func NewForStmt(pos src.XPos, init Node, cond, post Node, body []Node) *ForStmt { n := &ForStmt{Cond: cond, Post: post} n.pos = pos n.op = OFOR - n.init.Set(init) + if init != nil { + n.init = []Node{init} + } n.Body.Set(body) return n } diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go index ff699cd54d4a6..19a88e21a27b5 100644 --- a/src/cmd/compile/internal/noder/noder.go +++ b/src/cmd/compile/internal/noder/noder.go @@ -1149,9 +1149,11 @@ func (p *noder) blockStmt(stmt *syntax.BlockStmt) []ir.Node { func (p *noder) ifStmt(stmt *syntax.IfStmt) ir.Node { p.openScope(stmt.Pos()) - init := p.simpleStmt(stmt.Init) + init := p.stmt(stmt.Init) n := ir.NewIfStmt(p.pos(stmt), p.expr(stmt.Cond), p.blockStmt(stmt.Then), nil) - *n.PtrInit() = init + if init != nil { + *n.PtrInit() = []ir.Node{init} + } if stmt.Else != nil { e := p.stmt(stmt.Else) if e.Op() == ir.OBLOCK { @@ -1186,7 +1188,7 @@ func (p *noder) forStmt(stmt *syntax.ForStmt) ir.Node { return n } - n := ir.NewForStmt(p.pos(stmt), p.simpleStmt(stmt.Init), p.expr(stmt.Cond), p.stmt(stmt.Post), p.blockStmt(stmt.Body)) + n := ir.NewForStmt(p.pos(stmt), p.stmt(stmt.Init), p.expr(stmt.Cond), p.stmt(stmt.Post), p.blockStmt(stmt.Body)) p.closeAnotherScope() return n } @@ -1194,9 +1196,11 @@ func (p *noder) forStmt(stmt *syntax.ForStmt) ir.Node { func (p *noder) switchStmt(stmt *syntax.SwitchStmt) ir.Node { p.openScope(stmt.Pos()) - init := p.simpleStmt(stmt.Init) + init := p.stmt(stmt.Init) n := ir.NewSwitchStmt(p.pos(stmt), p.expr(stmt.Tag), nil) - *n.PtrInit() = init + if init != nil { + *n.PtrInit() = []ir.Node{init} + } var tswitch *ir.TypeSwitchGuard if l := n.Tag; l != nil && l.Op() == ir.OTYPESW { @@ -1259,13 +1263,6 @@ func (p *noder) selectStmt(stmt *syntax.SelectStmt) ir.Node { return ir.NewSelectStmt(p.pos(stmt), p.commClauses(stmt.Body, stmt.Rbrace)) } -func (p *noder) simpleStmt(stmt syntax.SimpleStmt) []ir.Node { - if stmt == nil { - return nil - } - return []ir.Node{p.stmt(stmt)} -} - func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []*ir.CommStmt { nodes := make([]*ir.CommStmt, len(clauses)) for i, clause := range clauses { @@ -1275,7 +1272,7 @@ func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []* } p.openScope(clause.Pos()) - nodes[i] = ir.NewCommStmt(p.pos(clause), p.simpleStmt(clause.Comm), p.stmts(clause.Body)) + nodes[i] = ir.NewCommStmt(p.pos(clause), p.stmt(clause.Comm), p.stmts(clause.Body)) } if len(clauses) > 0 { p.closeScope(rbrace) diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go index ef2c4527a934e..bf093c60c78ac 100644 --- a/src/cmd/compile/internal/typecheck/iexport.go +++ b/src/cmd/compile/internal/typecheck/iexport.go @@ -1197,7 +1197,7 @@ func (w *exportWriter) commList(cases []*ir.CommStmt) { w.uint64(uint64(len(cases))) for _, cas := range cases { w.pos(cas.Pos()) - w.stmtList(cas.List) + w.node(cas.Comm) w.stmtList(cas.Body) } } diff --git a/src/cmd/compile/internal/typecheck/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go index ba7ea2f156afa..af2dd84a38715 100644 --- a/src/cmd/compile/internal/typecheck/iimport.go +++ b/src/cmd/compile/internal/typecheck/iimport.go @@ -792,7 +792,7 @@ func (r *importReader) caseList(switchExpr ir.Node) []*ir.CaseStmt { func (r *importReader) commList() []*ir.CommStmt { cases := make([]*ir.CommStmt, r.uint64()) for i := range cases { - cases[i] = ir.NewCommStmt(r.pos(), r.stmtList(), r.stmtList()) + cases[i] = ir.NewCommStmt(r.pos(), r.node(), r.stmtList()) } return cases } @@ -1033,7 +1033,9 @@ func (r *importReader) node() ir.Node { case ir.OFOR: pos, init := r.pos(), r.stmtList() cond, post := r.exprsOrNil() - return ir.NewForStmt(pos, init, cond, post, r.stmtList()) + n := ir.NewForStmt(pos, nil, cond, post, r.stmtList()) + n.PtrInit().Set(init) + return n case ir.ORANGE: pos := r.pos() diff --git a/src/cmd/compile/internal/typecheck/stmt.go b/src/cmd/compile/internal/typecheck/stmt.go index 03c3e399eb456..bfeea06e83d9e 100644 --- a/src/cmd/compile/internal/typecheck/stmt.go +++ b/src/cmd/compile/internal/typecheck/stmt.go @@ -360,29 +360,23 @@ func tcReturn(n *ir.ReturnStmt) ir.Node { // select func tcSelect(sel *ir.SelectStmt) { - var def ir.Node + var def *ir.CommStmt lno := ir.SetPos(sel) Stmts(sel.Init()) for _, ncase := range sel.Cases { - if len(ncase.List) == 0 { + if ncase.Comm == nil { // default if def != nil { base.ErrorfAt(ncase.Pos(), "multiple defaults in select (first at %v)", ir.Line(def)) } else { def = ncase } - } else if len(ncase.List) > 1 { - base.ErrorfAt(ncase.Pos(), "select cases cannot be lists") } else { - ncase.List[0] = Stmt(ncase.List[0]) - n := ncase.List[0] + n := Stmt(ncase.Comm) ncase.Comm = n - ncase.List.Set(nil) - oselrecv2 := func(dst, recv ir.Node, colas bool) { - n := ir.NewAssignListStmt(n.Pos(), ir.OSELRECV2, nil, nil) - n.Lhs = []ir.Node{dst, ir.BlankNode} - n.Rhs = []ir.Node{recv} - n.Def = colas + oselrecv2 := func(dst, recv ir.Node, def bool) { + n := ir.NewAssignListStmt(n.Pos(), ir.OSELRECV2, []ir.Node{dst, ir.BlankNode}, []ir.Node{recv}) + n.Def = def n.SetTypecheck(1) ncase.Comm = n } From 5f3bd59a0d8a8d6feadc918078f153cc5d0447a8 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 26 Dec 2020 23:09:54 -0800 Subject: [PATCH 275/474] [dev.regabi] cmd/compile: remove some unneeded code in package ir The deepCopy functions haven't been needed since we switched to using Edit everywhere, and AddStringExpr no longer has an Alloc field that needs special casing. Passes toolstash -cmp. Change-Id: I5bcc8c73d5cb784f7e57fb3162ae6e288e6c9392 Reviewed-on: https://go-review.googlesource.com/c/go/+/280445 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/ir/mknode.go | 4 ---- src/cmd/compile/internal/ir/type.go | 28 --------------------------- 2 files changed, 32 deletions(-) diff --git a/src/cmd/compile/internal/ir/mknode.go b/src/cmd/compile/internal/ir/mknode.go index bc6fa3cd305df..5c36b729c7c56 100644 --- a/src/cmd/compile/internal/ir/mknode.go +++ b/src/cmd/compile/internal/ir/mknode.go @@ -169,10 +169,6 @@ func forNodeFields(typName string, typ *types.Struct, f func(name string, is fun case "orig": continue } - switch typName + "." + v.Name() { - case "AddStringExpr.Alloc": - continue - } f(v.Name(), func(t types.Type) bool { return types.Identical(t, v.Type()) }) } } diff --git a/src/cmd/compile/internal/ir/type.go b/src/cmd/compile/internal/ir/type.go index 5e6d76229d32e..bd3a05d06e573 100644 --- a/src/cmd/compile/internal/ir/type.go +++ b/src/cmd/compile/internal/ir/type.go @@ -115,14 +115,6 @@ func (n *StructType) SetOTYPE(t *types.Type) { n.Fields = nil } -func deepCopyFields(pos src.XPos, fields []*Field) []*Field { - var out []*Field - for _, f := range fields { - out = append(out, f.deepCopy(pos)) - } - return out -} - // An InterfaceType represents a struct { ... } type syntax. type InterfaceType struct { miniType @@ -250,26 +242,6 @@ func editFields(list []*Field, edit func(Node) Node) { } } -func (f *Field) deepCopy(pos src.XPos) *Field { - if f == nil { - return nil - } - fpos := pos - if !pos.IsKnown() { - fpos = f.Pos - } - decl := f.Decl - if decl != nil { - decl = DeepCopy(pos, decl).(*Name) - } - ntype := f.Ntype - if ntype != nil { - ntype = DeepCopy(pos, ntype).(Ntype) - } - // No keyed literal here: if a new struct field is added, we want this to stop compiling. - return &Field{fpos, f.Sym, ntype, f.Type, f.Embedded, f.IsDDD, f.Note, decl} -} - // A SliceType represents a []Elem type syntax. // If DDD is true, it's the ...Elem at the end of a function list. type SliceType struct { From f8afb8216ad69ed0c4e5ac8b5ad86cc0cb78749d Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 26 Dec 2020 23:21:20 -0800 Subject: [PATCH 276/474] [dev.regabi] cmd/compile: rename CommStmt and CaseStmt [generated] Rename these two AST nodes to match their cmd/compile/internal/syntax and go/ast counterparts. Passes toolstash -cmp. [git-generate] cd src/cmd/compile/internal/ir rf ' mv CaseStmt CaseClause mv CommStmt CommClause ' sed -E -i -e 's/(Case|Comm)Stmt/\1Clause/g' mknode.go Change-Id: I19fba0323a5de1e71346622857011b2f7879bcef Reviewed-on: https://go-review.googlesource.com/c/go/+/280446 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/ir/fmt.go | 2 +- src/cmd/compile/internal/ir/mknode.go | 16 +++---- src/cmd/compile/internal/ir/node_gen.go | 16 +++---- src/cmd/compile/internal/ir/stmt.go | 44 +++++++++---------- src/cmd/compile/internal/noder/noder.go | 8 ++-- src/cmd/compile/internal/typecheck/iexport.go | 4 +- src/cmd/compile/internal/typecheck/iimport.go | 8 ++-- src/cmd/compile/internal/typecheck/stmt.go | 2 +- src/cmd/compile/internal/walk/select.go | 8 ++-- 9 files changed, 54 insertions(+), 54 deletions(-) diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index f52c639c517fa..49f451a5d85a1 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -478,7 +478,7 @@ func stmtFmt(n Node, s fmt.State) { fmt.Fprintf(s, " { %v }", n.Cases) case OCASE: - n := n.(*CaseStmt) + n := n.(*CaseClause) if len(n.List) != 0 { fmt.Fprintf(s, "case %.v", n.List) } else { diff --git a/src/cmd/compile/internal/ir/mknode.go b/src/cmd/compile/internal/ir/mknode.go index 5c36b729c7c56..3b5da32d8c52e 100644 --- a/src/cmd/compile/internal/ir/mknode.go +++ b/src/cmd/compile/internal/ir/mknode.go @@ -37,8 +37,8 @@ func main() { nodeType := lookup("Node") ntypeType := lookup("Ntype") nodesType := lookup("Nodes") - slicePtrCaseStmtType := types.NewSlice(types.NewPointer(lookup("CaseStmt"))) - slicePtrCommStmtType := types.NewSlice(types.NewPointer(lookup("CommStmt"))) + slicePtrCaseClauseType := types.NewSlice(types.NewPointer(lookup("CaseClause"))) + slicePtrCommClauseType := types.NewSlice(types.NewPointer(lookup("CommClause"))) ptrFieldType := types.NewPointer(lookup("Field")) slicePtrFieldType := types.NewSlice(ptrFieldType) ptrIdentType := types.NewPointer(lookup("Ident")) @@ -78,9 +78,9 @@ func main() { switch { case is(nodesType): fmt.Fprintf(&buf, "c.%s = c.%s.Copy()\n", name, name) - case is(slicePtrCaseStmtType): + case is(slicePtrCaseClauseType): fmt.Fprintf(&buf, "c.%s = copyCases(c.%s)\n", name, name) - case is(slicePtrCommStmtType): + case is(slicePtrCommClauseType): fmt.Fprintf(&buf, "c.%s = copyComms(c.%s)\n", name, name) case is(ptrFieldType): fmt.Fprintf(&buf, "if c.%s != nil { c.%s = c.%s.copy() }\n", name, name, name) @@ -100,9 +100,9 @@ func main() { fmt.Fprintf(&buf, "err = maybeDo(n.%s, err, do)\n", name) case is(nodesType): fmt.Fprintf(&buf, "err = maybeDoList(n.%s, err, do)\n", name) - case is(slicePtrCaseStmtType): + case is(slicePtrCaseClauseType): fmt.Fprintf(&buf, "err = maybeDoCases(n.%s, err, do)\n", name) - case is(slicePtrCommStmtType): + case is(slicePtrCommClauseType): fmt.Fprintf(&buf, "err = maybeDoComms(n.%s, err, do)\n", name) case is(ptrFieldType): fmt.Fprintf(&buf, "err = maybeDoField(n.%s, err, do)\n", name) @@ -123,9 +123,9 @@ func main() { fmt.Fprintf(&buf, "n.%s = toNtype(maybeEdit(n.%s, edit))\n", name, name) case is(nodesType): fmt.Fprintf(&buf, "editList(n.%s, edit)\n", name) - case is(slicePtrCaseStmtType): + case is(slicePtrCaseClauseType): fmt.Fprintf(&buf, "editCases(n.%s, edit)\n", name) - case is(slicePtrCommStmtType): + case is(slicePtrCommClauseType): fmt.Fprintf(&buf, "editComms(n.%s, edit)\n", name) case is(ptrFieldType): fmt.Fprintf(&buf, "editField(n.%s, edit)\n", name) diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index 74129694251b6..27a5311748c25 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -226,15 +226,15 @@ func (n *CallPartExpr) editChildren(edit func(Node) Node) { n.X = maybeEdit(n.X, edit) } -func (n *CaseStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *CaseStmt) copy() Node { +func (n *CaseClause) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *CaseClause) copy() Node { c := *n c.init = c.init.Copy() c.List = c.List.Copy() c.Body = c.Body.Copy() return &c } -func (n *CaseStmt) doChildren(do func(Node) error) error { +func (n *CaseClause) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) err = maybeDo(n.Var, err, do) @@ -242,7 +242,7 @@ func (n *CaseStmt) doChildren(do func(Node) error) error { err = maybeDoList(n.Body, err, do) return err } -func (n *CaseStmt) editChildren(edit func(Node) Node) { +func (n *CaseClause) editChildren(edit func(Node) Node) { editList(n.init, edit) n.Var = maybeEdit(n.Var, edit) editList(n.List, edit) @@ -293,21 +293,21 @@ func (n *ClosureReadExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) } -func (n *CommStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *CommStmt) copy() Node { +func (n *CommClause) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *CommClause) copy() Node { c := *n c.init = c.init.Copy() c.Body = c.Body.Copy() return &c } -func (n *CommStmt) doChildren(do func(Node) error) error { +func (n *CommClause) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) err = maybeDo(n.Comm, err, do) err = maybeDoList(n.Body, err, do) return err } -func (n *CommStmt) editChildren(edit func(Node) Node) { +func (n *CommClause) editChildren(edit func(Node) Node) { editList(n.init, edit) n.Comm = maybeEdit(n.Comm, edit) editList(n.Body, edit) diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index 0f44acd8b4e29..de152fec7275b 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -173,31 +173,31 @@ func NewBranchStmt(pos src.XPos, op Op, label *types.Sym) *BranchStmt { func (n *BranchStmt) Sym() *types.Sym { return n.Label } -// A CaseStmt is a case statement in a switch or select: case List: Body. -type CaseStmt struct { +// A CaseClause is a case statement in a switch or select: case List: Body. +type CaseClause struct { miniStmt Var Node // declared variable for this case in type switch List Nodes // list of expressions for switch, early select Body Nodes } -func NewCaseStmt(pos src.XPos, list, body []Node) *CaseStmt { - n := &CaseStmt{List: list, Body: body} +func NewCaseStmt(pos src.XPos, list, body []Node) *CaseClause { + n := &CaseClause{List: list, Body: body} n.pos = pos n.op = OCASE return n } // TODO(mdempsky): Generate these with mknode.go. -func copyCases(list []*CaseStmt) []*CaseStmt { +func copyCases(list []*CaseClause) []*CaseClause { if list == nil { return nil } - c := make([]*CaseStmt, len(list)) + c := make([]*CaseClause, len(list)) copy(c, list) return c } -func maybeDoCases(list []*CaseStmt, err error, do func(Node) error) error { +func maybeDoCases(list []*CaseClause, err error, do func(Node) error) error { if err != nil { return err } @@ -210,37 +210,37 @@ func maybeDoCases(list []*CaseStmt, err error, do func(Node) error) error { } return nil } -func editCases(list []*CaseStmt, edit func(Node) Node) { +func editCases(list []*CaseClause, edit func(Node) Node) { for i, x := range list { if x != nil { - list[i] = edit(x).(*CaseStmt) + list[i] = edit(x).(*CaseClause) } } } -type CommStmt struct { +type CommClause struct { miniStmt - Comm Node // communication case + Comm Node // communication case Body Nodes } -func NewCommStmt(pos src.XPos, comm Node, body []Node) *CommStmt { - n := &CommStmt{Comm: comm, Body: body} +func NewCommStmt(pos src.XPos, comm Node, body []Node) *CommClause { + n := &CommClause{Comm: comm, Body: body} n.pos = pos n.op = OCASE return n } // TODO(mdempsky): Generate these with mknode.go. -func copyComms(list []*CommStmt) []*CommStmt { +func copyComms(list []*CommClause) []*CommClause { if list == nil { return nil } - c := make([]*CommStmt, len(list)) + c := make([]*CommClause, len(list)) copy(c, list) return c } -func maybeDoComms(list []*CommStmt, err error, do func(Node) error) error { +func maybeDoComms(list []*CommClause, err error, do func(Node) error) error { if err != nil { return err } @@ -253,10 +253,10 @@ func maybeDoComms(list []*CommStmt, err error, do func(Node) error) error { } return nil } -func editComms(list []*CommStmt, edit func(Node) Node) { +func editComms(list []*CommClause, edit func(Node) Node) { for i, x := range list { if x != nil { - list[i] = edit(x).(*CommStmt) + list[i] = edit(x).(*CommClause) } } } @@ -406,14 +406,14 @@ func (n *ReturnStmt) SetOrig(x Node) { n.orig = x } type SelectStmt struct { miniStmt Label *types.Sym - Cases []*CommStmt + Cases []*CommClause HasBreak bool // TODO(rsc): Instead of recording here, replace with a block? Compiled Nodes // compiled form, after walkswitch } -func NewSelectStmt(pos src.XPos, cases []*CommStmt) *SelectStmt { +func NewSelectStmt(pos src.XPos, cases []*CommClause) *SelectStmt { n := &SelectStmt{Cases: cases} n.pos = pos n.op = OSELECT @@ -438,7 +438,7 @@ func NewSendStmt(pos src.XPos, ch, value Node) *SendStmt { type SwitchStmt struct { miniStmt Tag Node - Cases []*CaseStmt + Cases []*CaseClause Label *types.Sym HasBreak bool @@ -446,7 +446,7 @@ type SwitchStmt struct { Compiled Nodes // compiled form, after walkswitch } -func NewSwitchStmt(pos src.XPos, tag Node, cases []*CaseStmt) *SwitchStmt { +func NewSwitchStmt(pos src.XPos, tag Node, cases []*CaseClause) *SwitchStmt { n := &SwitchStmt{Tag: tag, Cases: cases} n.pos = pos n.op = OSWITCH diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go index 19a88e21a27b5..7c1f7595b3e42 100644 --- a/src/cmd/compile/internal/noder/noder.go +++ b/src/cmd/compile/internal/noder/noder.go @@ -1212,8 +1212,8 @@ func (p *noder) switchStmt(stmt *syntax.SwitchStmt) ir.Node { return n } -func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.TypeSwitchGuard, rbrace syntax.Pos) []*ir.CaseStmt { - nodes := make([]*ir.CaseStmt, 0, len(clauses)) +func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.TypeSwitchGuard, rbrace syntax.Pos) []*ir.CaseClause { + nodes := make([]*ir.CaseClause, 0, len(clauses)) for i, clause := range clauses { p.setlineno(clause) if i > 0 { @@ -1263,8 +1263,8 @@ func (p *noder) selectStmt(stmt *syntax.SelectStmt) ir.Node { return ir.NewSelectStmt(p.pos(stmt), p.commClauses(stmt.Body, stmt.Rbrace)) } -func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []*ir.CommStmt { - nodes := make([]*ir.CommStmt, len(clauses)) +func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []*ir.CommClause { + nodes := make([]*ir.CommClause, len(clauses)) for i, clause := range clauses { p.setlineno(clause) if i > 0 { diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go index bf093c60c78ac..3b071a61abbd4 100644 --- a/src/cmd/compile/internal/typecheck/iexport.go +++ b/src/cmd/compile/internal/typecheck/iexport.go @@ -1181,7 +1181,7 @@ func isNamedTypeSwitch(x ir.Node) bool { return ok && guard.Tag != nil } -func (w *exportWriter) caseList(cases []*ir.CaseStmt, namedTypeSwitch bool) { +func (w *exportWriter) caseList(cases []*ir.CaseClause, namedTypeSwitch bool) { w.uint64(uint64(len(cases))) for _, cas := range cases { w.pos(cas.Pos()) @@ -1193,7 +1193,7 @@ func (w *exportWriter) caseList(cases []*ir.CaseStmt, namedTypeSwitch bool) { } } -func (w *exportWriter) commList(cases []*ir.CommStmt) { +func (w *exportWriter) commList(cases []*ir.CommClause) { w.uint64(uint64(len(cases))) for _, cas := range cases { w.pos(cas.Pos()) diff --git a/src/cmd/compile/internal/typecheck/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go index af2dd84a38715..cf2cf8749215f 100644 --- a/src/cmd/compile/internal/typecheck/iimport.go +++ b/src/cmd/compile/internal/typecheck/iimport.go @@ -767,10 +767,10 @@ func (r *importReader) stmtList() []ir.Node { return list } -func (r *importReader) caseList(switchExpr ir.Node) []*ir.CaseStmt { +func (r *importReader) caseList(switchExpr ir.Node) []*ir.CaseClause { namedTypeSwitch := isNamedTypeSwitch(switchExpr) - cases := make([]*ir.CaseStmt, r.uint64()) + cases := make([]*ir.CaseClause, r.uint64()) for i := range cases { cas := ir.NewCaseStmt(r.pos(), nil, nil) cas.List.Set(r.stmtList()) @@ -789,8 +789,8 @@ func (r *importReader) caseList(switchExpr ir.Node) []*ir.CaseStmt { return cases } -func (r *importReader) commList() []*ir.CommStmt { - cases := make([]*ir.CommStmt, r.uint64()) +func (r *importReader) commList() []*ir.CommClause { + cases := make([]*ir.CommClause, r.uint64()) for i := range cases { cases[i] = ir.NewCommStmt(r.pos(), r.node(), r.stmtList()) } diff --git a/src/cmd/compile/internal/typecheck/stmt.go b/src/cmd/compile/internal/typecheck/stmt.go index bfeea06e83d9e..f5d36a663d17a 100644 --- a/src/cmd/compile/internal/typecheck/stmt.go +++ b/src/cmd/compile/internal/typecheck/stmt.go @@ -360,7 +360,7 @@ func tcReturn(n *ir.ReturnStmt) ir.Node { // select func tcSelect(sel *ir.SelectStmt) { - var def *ir.CommStmt + var def *ir.CommClause lno := ir.SetPos(sel) Stmts(sel.Init()) for _, ncase := range sel.Cases { diff --git a/src/cmd/compile/internal/walk/select.go b/src/cmd/compile/internal/walk/select.go index f51684c9b6681..1c5e1d7e64ac7 100644 --- a/src/cmd/compile/internal/walk/select.go +++ b/src/cmd/compile/internal/walk/select.go @@ -29,7 +29,7 @@ func walkSelect(sel *ir.SelectStmt) { base.Pos = lno } -func walkSelectCases(cases []*ir.CommStmt) []ir.Node { +func walkSelectCases(cases []*ir.CommClause) []ir.Node { ncas := len(cases) sellineno := base.Pos @@ -73,7 +73,7 @@ func walkSelectCases(cases []*ir.CommStmt) []ir.Node { // convert case value arguments to addresses. // this rewrite is used by both the general code and the next optimization. - var dflt *ir.CommStmt + var dflt *ir.CommClause for _, cas := range cases { ir.SetPos(cas) n := cas.Comm @@ -146,7 +146,7 @@ func walkSelectCases(cases []*ir.CommStmt) []ir.Node { if dflt != nil { ncas-- } - casorder := make([]*ir.CommStmt, ncas) + casorder := make([]*ir.CommClause, ncas) nsends, nrecvs := 0, 0 var init []ir.Node @@ -242,7 +242,7 @@ func walkSelectCases(cases []*ir.CommStmt) []ir.Node { } // dispatch cases - dispatch := func(cond ir.Node, cas *ir.CommStmt) { + dispatch := func(cond ir.Node, cas *ir.CommClause) { cond = typecheck.Expr(cond) cond = typecheck.DefaultLit(cond, nil) From 3383b5c74a4543d7232468201778a8db03cf133d Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 26 Dec 2020 23:46:36 -0800 Subject: [PATCH 277/474] [dev.regabi] cmd/compile: flatten dependency graph [generated] This CL shuffles a couple functions around to help flatten the package dependency graph somewhat: 1. ssa.LosesStmtMark is only ever used in associated with an objw.Prog, so we might as well move it to that package. This removes a dependency from objw (a relatively low-level utility package that wraps cmd/internal/obj) on ssa (a large and relatively high-level package). 2. Moves liveness.SetTypeBits into a new package typebits. A single-function package is a bit on the silly side, but reflectdata shouldn't need to depend on liveness (nor vice versa). [git-generate] cd src/cmd/compile/internal/ssa rf ' mv LosesStmtMark prog.go mv prog.go cmd/compile/internal/objw ' cd ../liveness rf ' mv SetTypeBits Set mv Set typebits.go rm typebits.go:/Copyright/+4,/^package/-0 mv typebits.go cmd/compile/internal/typebits ' Change-Id: Ic9a983f0ad6c0cf1a537f99889699a8444699e6e Reviewed-on: https://go-review.googlesource.com/c/go/+/280447 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/liveness/plive.go | 87 ++----------------- src/cmd/compile/internal/objw/prog.go | 12 ++- .../compile/internal/reflectdata/reflect.go | 4 +- src/cmd/compile/internal/ssa/numberlines.go | 10 --- src/cmd/compile/internal/ssagen/ssa.go | 2 +- src/cmd/compile/internal/typebits/typebits.go | 87 +++++++++++++++++++ 6 files changed, 106 insertions(+), 96 deletions(-) create mode 100644 src/cmd/compile/internal/typebits/typebits.go diff --git a/src/cmd/compile/internal/liveness/plive.go b/src/cmd/compile/internal/liveness/plive.go index cf4debb795966..89c70df65a490 100644 --- a/src/cmd/compile/internal/liveness/plive.go +++ b/src/cmd/compile/internal/liveness/plive.go @@ -24,6 +24,7 @@ import ( "cmd/compile/internal/ir" "cmd/compile/internal/objw" "cmd/compile/internal/ssa" + "cmd/compile/internal/typebits" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/objabi" @@ -375,82 +376,6 @@ func (lv *liveness) blockEffects(b *ssa.Block) *blockEffects { return &lv.be[b.ID] } -// NOTE: The bitmap for a specific type t could be cached in t after -// the first run and then simply copied into bv at the correct offset -// on future calls with the same type t. -func SetTypeBits(t *types.Type, off int64, bv bitvec.BitVec) { - if t.Align > 0 && off&int64(t.Align-1) != 0 { - base.Fatalf("onebitwalktype1: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off) - } - if !t.HasPointers() { - // Note: this case ensures that pointers to go:notinheap types - // are not considered pointers by garbage collection and stack copying. - return - } - - switch t.Kind() { - case types.TPTR, types.TUNSAFEPTR, types.TFUNC, types.TCHAN, types.TMAP: - if off&int64(types.PtrSize-1) != 0 { - base.Fatalf("onebitwalktype1: invalid alignment, %v", t) - } - bv.Set(int32(off / int64(types.PtrSize))) // pointer - - case types.TSTRING: - // struct { byte *str; intgo len; } - if off&int64(types.PtrSize-1) != 0 { - base.Fatalf("onebitwalktype1: invalid alignment, %v", t) - } - bv.Set(int32(off / int64(types.PtrSize))) //pointer in first slot - - case types.TINTER: - // struct { Itab *tab; void *data; } - // or, when isnilinter(t)==true: - // struct { Type *type; void *data; } - if off&int64(types.PtrSize-1) != 0 { - base.Fatalf("onebitwalktype1: invalid alignment, %v", t) - } - // The first word of an interface is a pointer, but we don't - // treat it as such. - // 1. If it is a non-empty interface, the pointer points to an itab - // which is always in persistentalloc space. - // 2. If it is an empty interface, the pointer points to a _type. - // a. If it is a compile-time-allocated type, it points into - // the read-only data section. - // b. If it is a reflect-allocated type, it points into the Go heap. - // Reflect is responsible for keeping a reference to - // the underlying type so it won't be GCd. - // If we ever have a moving GC, we need to change this for 2b (as - // well as scan itabs to update their itab._type fields). - bv.Set(int32(off/int64(types.PtrSize) + 1)) // pointer in second slot - - case types.TSLICE: - // struct { byte *array; uintgo len; uintgo cap; } - if off&int64(types.PtrSize-1) != 0 { - base.Fatalf("onebitwalktype1: invalid TARRAY alignment, %v", t) - } - bv.Set(int32(off / int64(types.PtrSize))) // pointer in first slot (BitsPointer) - - case types.TARRAY: - elt := t.Elem() - if elt.Width == 0 { - // Short-circuit for #20739. - break - } - for i := int64(0); i < t.NumElem(); i++ { - SetTypeBits(elt, off, bv) - off += elt.Width - } - - case types.TSTRUCT: - for _, f := range t.Fields().Slice() { - SetTypeBits(f.Type, off+f.Offset, bv) - } - - default: - base.Fatalf("onebitwalktype1: unexpected type, %v", t) - } -} - // Generates live pointer value maps for arguments and local variables. The // this argument and the in arguments are always assumed live. The vars // argument is a slice of *Nodes. @@ -463,10 +388,10 @@ func (lv *liveness) pointerMap(liveout bitvec.BitVec, vars []*ir.Name, args, loc node := vars[i] switch node.Class_ { case ir.PAUTO: - SetTypeBits(node.Type(), node.FrameOffset()+lv.stkptrsize, locals) + typebits.Set(node.Type(), node.FrameOffset()+lv.stkptrsize, locals) case ir.PPARAM, ir.PPARAMOUT: - SetTypeBits(node.Type(), node.FrameOffset(), args) + typebits.Set(node.Type(), node.FrameOffset(), args) } } } @@ -1309,15 +1234,15 @@ func WriteFuncMap(fn *ir.Func) { off = objw.Uint32(lsym, off, uint32(bv.N)) if ir.IsMethod(fn) { - SetTypeBits(fn.Type().Recvs(), 0, bv) + typebits.Set(fn.Type().Recvs(), 0, bv) } if fn.Type().NumParams() > 0 { - SetTypeBits(fn.Type().Params(), 0, bv) + typebits.Set(fn.Type().Params(), 0, bv) } off = objw.BitVec(lsym, off, bv) if fn.Type().NumResults() > 0 { - SetTypeBits(fn.Type().Results(), 0, bv) + typebits.Set(fn.Type().Results(), 0, bv) off = objw.BitVec(lsym, off, bv) } diff --git a/src/cmd/compile/internal/objw/prog.go b/src/cmd/compile/internal/objw/prog.go index 54028e47fd23c..8d24f94aa5660 100644 --- a/src/cmd/compile/internal/objw/prog.go +++ b/src/cmd/compile/internal/objw/prog.go @@ -33,7 +33,6 @@ package objw import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" - "cmd/compile/internal/ssa" "cmd/internal/obj" "cmd/internal/objabi" "cmd/internal/src" @@ -173,7 +172,7 @@ func (pp *Progs) Prog(as obj.As) *obj.Prog { p.Pos = pp.Pos if pp.Pos.IsStmt() == src.PosIsStmt { // Clear IsStmt for later Progs at this pos provided that as can be marked as a stmt - if ssa.LosesStmtMark(as) { + if LosesStmtMark(as) { return p } pp.Pos = pp.Pos.WithNotStmt() @@ -216,3 +215,12 @@ func (pp *Progs) SetText(fn *ir.Func) { ptxt.From.Name = obj.NAME_EXTERN ptxt.From.Sym = fn.LSym } + +// LosesStmtMark reports whether a prog with op as loses its statement mark on the way to DWARF. +// The attributes from some opcodes are lost in translation. +// TODO: this is an artifact of how funcpctab combines information for instructions at a single PC. +// Should try to fix it there. +func LosesStmtMark(as obj.As) bool { + // is_stmt does not work for these; it DOES for ANOP even though that generates no code. + return as == obj.APCDATA || as == obj.AFUNCDATA +} diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go index 7c424218962b6..df80380fc1311 100644 --- a/src/cmd/compile/internal/reflectdata/reflect.go +++ b/src/cmd/compile/internal/reflectdata/reflect.go @@ -16,8 +16,8 @@ import ( "cmd/compile/internal/escape" "cmd/compile/internal/inline" "cmd/compile/internal/ir" - "cmd/compile/internal/liveness" "cmd/compile/internal/objw" + "cmd/compile/internal/typebits" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/gcprog" @@ -1552,7 +1552,7 @@ func fillptrmask(t *types.Type, ptrmask []byte) { } vec := bitvec.New(8 * int32(len(ptrmask))) - liveness.SetTypeBits(t, 0, vec) + typebits.Set(t, 0, vec) nptr := types.PtrDataSize(t) / int64(types.PtrSize) for i := int64(0); i < nptr; i++ { diff --git a/src/cmd/compile/internal/ssa/numberlines.go b/src/cmd/compile/internal/ssa/numberlines.go index f4e62b88c4f6b..2a9c8e4f326fc 100644 --- a/src/cmd/compile/internal/ssa/numberlines.go +++ b/src/cmd/compile/internal/ssa/numberlines.go @@ -5,7 +5,6 @@ package ssa import ( - "cmd/internal/obj" "cmd/internal/src" "fmt" "sort" @@ -23,15 +22,6 @@ func isPoorStatementOp(op Op) bool { return false } -// LosesStmtMark reports whether a prog with op as loses its statement mark on the way to DWARF. -// The attributes from some opcodes are lost in translation. -// TODO: this is an artifact of how funcpctab combines information for instructions at a single PC. -// Should try to fix it there. -func LosesStmtMark(as obj.As) bool { - // is_stmt does not work for these; it DOES for ANOP even though that generates no code. - return as == obj.APCDATA || as == obj.AFUNCDATA -} - // nextGoodStatementIndex returns an index at i or later that is believed // to be a good place to start the statement for b. This decision is // based on v's Op, the possibility of a better later operation, and diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 082cb7c3210ae..0da6ab3272aec 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -6277,7 +6277,7 @@ type State struct { // Prog appends a new Prog. func (s *State) Prog(as obj.As) *obj.Prog { p := s.pp.Prog(as) - if ssa.LosesStmtMark(as) { + if objw.LosesStmtMark(as) { return p } // Float a statement start to the beginning of any same-line run. diff --git a/src/cmd/compile/internal/typebits/typebits.go b/src/cmd/compile/internal/typebits/typebits.go new file mode 100644 index 0000000000000..63a2bb3ffa4b4 --- /dev/null +++ b/src/cmd/compile/internal/typebits/typebits.go @@ -0,0 +1,87 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typebits + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/bitvec" + "cmd/compile/internal/types" +) + +// NOTE: The bitmap for a specific type t could be cached in t after +// the first run and then simply copied into bv at the correct offset +// on future calls with the same type t. +func Set(t *types.Type, off int64, bv bitvec.BitVec) { + if t.Align > 0 && off&int64(t.Align-1) != 0 { + base.Fatalf("onebitwalktype1: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off) + } + if !t.HasPointers() { + // Note: this case ensures that pointers to go:notinheap types + // are not considered pointers by garbage collection and stack copying. + return + } + + switch t.Kind() { + case types.TPTR, types.TUNSAFEPTR, types.TFUNC, types.TCHAN, types.TMAP: + if off&int64(types.PtrSize-1) != 0 { + base.Fatalf("onebitwalktype1: invalid alignment, %v", t) + } + bv.Set(int32(off / int64(types.PtrSize))) // pointer + + case types.TSTRING: + // struct { byte *str; intgo len; } + if off&int64(types.PtrSize-1) != 0 { + base.Fatalf("onebitwalktype1: invalid alignment, %v", t) + } + bv.Set(int32(off / int64(types.PtrSize))) //pointer in first slot + + case types.TINTER: + // struct { Itab *tab; void *data; } + // or, when isnilinter(t)==true: + // struct { Type *type; void *data; } + if off&int64(types.PtrSize-1) != 0 { + base.Fatalf("onebitwalktype1: invalid alignment, %v", t) + } + // The first word of an interface is a pointer, but we don't + // treat it as such. + // 1. If it is a non-empty interface, the pointer points to an itab + // which is always in persistentalloc space. + // 2. If it is an empty interface, the pointer points to a _type. + // a. If it is a compile-time-allocated type, it points into + // the read-only data section. + // b. If it is a reflect-allocated type, it points into the Go heap. + // Reflect is responsible for keeping a reference to + // the underlying type so it won't be GCd. + // If we ever have a moving GC, we need to change this for 2b (as + // well as scan itabs to update their itab._type fields). + bv.Set(int32(off/int64(types.PtrSize) + 1)) // pointer in second slot + + case types.TSLICE: + // struct { byte *array; uintgo len; uintgo cap; } + if off&int64(types.PtrSize-1) != 0 { + base.Fatalf("onebitwalktype1: invalid TARRAY alignment, %v", t) + } + bv.Set(int32(off / int64(types.PtrSize))) // pointer in first slot (BitsPointer) + + case types.TARRAY: + elt := t.Elem() + if elt.Width == 0 { + // Short-circuit for #20739. + break + } + for i := int64(0); i < t.NumElem(); i++ { + Set(elt, off, bv) + off += elt.Width + } + + case types.TSTRUCT: + for _, f := range t.Fields().Slice() { + Set(f.Type, off+f.Offset, bv) + } + + default: + base.Fatalf("onebitwalktype1: unexpected type, %v", t) + } +} From 137f0d2e06523f6daf808ea09e77e68d8944a85a Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 27 Dec 2020 10:48:10 -0800 Subject: [PATCH 278/474] [dev.regabi] cmd/compile: remove unnecessary Name.Sym call Since the introduction of ir.BasicLit, we no longer create Names without Syms. Passes toolstash -cmp. Change-Id: I82de3fd65455e3756ff56e52febb512c0a2128f2 Reviewed-on: https://go-review.googlesource.com/c/go/+/280512 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/typecheck/func.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go index 50f514a6db0a6..a9d92c668cd78 100644 --- a/src/cmd/compile/internal/typecheck/func.go +++ b/src/cmd/compile/internal/typecheck/func.go @@ -527,9 +527,7 @@ func tcCall(n *ir.CallExpr, top int) ir.Node { default: n.SetOp(ir.OCALLFUNC) if t.Kind() != types.TFUNC { - // TODO(mdempsky): Remove "o.Sym() != nil" once we stop - // using ir.Name for numeric literals. - if o := ir.Orig(l); o.Name() != nil && o.Sym() != nil && types.BuiltinPkg.Lookup(o.Sym().Name).Def != nil { + if o := ir.Orig(l); o.Name() != nil && types.BuiltinPkg.Lookup(o.Sym().Name).Def != nil { // be more specific when the non-function // name matches a predeclared function base.Errorf("cannot call non-function %L, declared at %s", From 098a6490b93f337ed3f13a7a18376ebb8175f2be Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 27 Dec 2020 11:11:11 -0800 Subject: [PATCH 279/474] [dev.regabi] cmd/compile: remove Declare in makepartialcall This is the only remaining late call to Declare. By changing it to use Temp, we'll be able to move the legacy lexical scoping logic by moving it to noder and iimport. Passes toolstash -cmp. Change-Id: Id7cf7a08e3138e50816f515fef3088785a10aaf4 Reviewed-on: https://go-review.googlesource.com/c/go/+/280513 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/typecheck/func.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go index a9d92c668cd78..ed4f3ad4fe9e7 100644 --- a/src/cmd/compile/internal/typecheck/func.go +++ b/src/cmd/compile/internal/typecheck/func.go @@ -285,15 +285,13 @@ func makepartialcall(dot *ir.SelectorExpr) *ir.Func { // Declare and initialize variable holding receiver. cr := ir.NewClosureRead(rcvrtype, types.Rnd(int64(types.PtrSize), int64(rcvrtype.Align))) - ptr := NewName(Lookup(".this")) - Declare(ptr, ir.PAUTO) - ptr.SetUsed(true) + var ptr *ir.Name var body []ir.Node if rcvrtype.IsPtr() || rcvrtype.IsInterface() { - ptr.SetType(rcvrtype) + ptr = Temp(rcvrtype) body = append(body, ir.NewAssignStmt(base.Pos, ptr, cr)) } else { - ptr.SetType(types.NewPtr(rcvrtype)) + ptr = Temp(types.NewPtr(rcvrtype)) body = append(body, ir.NewAssignStmt(base.Pos, ptr, NodAddr(cr))) } From fda7ec3a3f03f95854d33e344b41d52e017e88e0 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 27 Dec 2020 11:45:57 -0800 Subject: [PATCH 280/474] [dev.regabi] cmd/compile: remove Name.IsDDD, etc These are never used. Change-Id: I58f7359f20252ca942f59bc7593c615a7b9de105 Reviewed-on: https://go-review.googlesource.com/c/go/+/280514 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/ir/name.go | 3 --- src/cmd/compile/internal/noder/noder.go | 1 - src/cmd/compile/internal/typecheck/dcl.go | 2 -- 3 files changed, 6 deletions(-) diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 93535f4ceecc4..cc8e1b4cd1900 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -268,7 +268,6 @@ const ( nameInlLocal // PAUTO created by inliner, derived from callee local nameOpenDeferSlot // if temporary var storing info for open-coded defers nameLibfuzzerExtraCounter // if PEXTERN should be assigned to __libfuzzer_extra_counters section - nameIsDDD // is function argument a ... nameAlias // is type name an alias ) @@ -286,7 +285,6 @@ func (n *Name) InlFormal() bool { return n.flags&nameInlFormal != 0 func (n *Name) InlLocal() bool { return n.flags&nameInlLocal != 0 } func (n *Name) OpenDeferSlot() bool { return n.flags&nameOpenDeferSlot != 0 } func (n *Name) LibfuzzerExtraCounter() bool { return n.flags&nameLibfuzzerExtraCounter != 0 } -func (n *Name) IsDDD() bool { return n.flags&nameIsDDD != 0 } func (n *Name) SetCaptured(b bool) { n.flags.set(nameCaptured, b) } func (n *Name) setReadonly(b bool) { n.flags.set(nameReadonly, b) } @@ -302,7 +300,6 @@ func (n *Name) SetInlFormal(b bool) { n.flags.set(nameInlFormal, b) func (n *Name) SetInlLocal(b bool) { n.flags.set(nameInlLocal, b) } func (n *Name) SetOpenDeferSlot(b bool) { n.flags.set(nameOpenDeferSlot, b) } func (n *Name) SetLibfuzzerExtraCounter(b bool) { n.flags.set(nameLibfuzzerExtraCounter, b) } -func (n *Name) SetIsDDD(b bool) { n.flags.set(nameIsDDD, b) } // MarkReadonly indicates that n is an ONAME with readonly contents. func (n *Name) MarkReadonly() { diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go index 7c1f7595b3e42..920f4839adfcc 100644 --- a/src/cmd/compile/internal/noder/noder.go +++ b/src/cmd/compile/internal/noder/noder.go @@ -1838,7 +1838,6 @@ func oldname(s *types.Sym) ir.Node { c = typecheck.NewName(s) c.Class_ = ir.PAUTOHEAP c.SetIsClosureVar(true) - c.SetIsDDD(n.IsDDD()) c.Defn = n // Link into list of active closure variables. diff --git a/src/cmd/compile/internal/typecheck/dcl.go b/src/cmd/compile/internal/typecheck/dcl.go index 0da0956c3aebd..36057ba2d1c96 100644 --- a/src/cmd/compile/internal/typecheck/dcl.go +++ b/src/cmd/compile/internal/typecheck/dcl.go @@ -447,7 +447,6 @@ func funcarg(n *ir.Field, ctxt ir.Class) { name := ir.NewNameAt(n.Pos, n.Sym) n.Decl = name name.Ntype = n.Ntype - name.SetIsDDD(n.IsDDD) Declare(name, ctxt) vargen++ @@ -461,7 +460,6 @@ func funcarg2(f *types.Field, ctxt ir.Class) { n := ir.NewNameAt(f.Pos, f.Sym) f.Nname = n n.SetType(f.Type) - n.SetIsDDD(f.IsDDD()) Declare(n, ctxt) } From 76136be02701aab8a4b546956f1847d28dbe0ba2 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 27 Dec 2020 11:26:12 -0800 Subject: [PATCH 281/474] [dev.regabi] cmd/compile: check for recursive import in ImportBody After earlier importer refactorings, most of the importer is now reentrant, so we don't need to guard against it at Resolve. The only remaining part that is still not reentrant is inline body importing, so move the recursive-import check there. Passes toolstash -cmp. Change-Id: Ia828f880a03e6125b102668c12a155d4c253d26b Reviewed-on: https://go-review.googlesource.com/c/go/+/280515 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/typecheck/iimport.go | 5 +++++ src/cmd/compile/internal/typecheck/typecheck.go | 8 +------- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/src/cmd/compile/internal/typecheck/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go index cf2cf8749215f..546ddcba79295 100644 --- a/src/cmd/compile/internal/typecheck/iimport.go +++ b/src/cmd/compile/internal/typecheck/iimport.go @@ -71,7 +71,12 @@ func ImportBody(fn *ir.Func) { base.Fatalf("missing import reader for %v", fn) } + if inimport { + base.Fatalf("recursive inimport") + } + inimport = true r.doInline(fn) + inimport = false } func importReaderFor(sym *types.Sym, importers map[*types.Sym]iimporterAndOffset) *importReader { diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index dabfee3bf9ca7..e23c249ff2b90 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -251,13 +251,7 @@ func Resolve(n ir.Node) (res ir.Node) { } } - if inimport { - base.Fatalf("recursive inimport") - } - inimport = true - n = expandDecl(n) - inimport = false - return n + return expandDecl(n) } r := ir.AsNode(n.Sym().Def) From 3f370b75fb2f31754132271b2879929daa5f88fd Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 28 Dec 2020 15:40:19 -0800 Subject: [PATCH 282/474] [dev.regabi] cmd/compile: cleanup //go:generate directives During recent refactoring, we moved mkbuiltin.go to package typecheck, but accidentally duplicated its //go:generate directive into a bunch of other files/directories. This CL cleans up the unnecessary duplicates. Also, update all of the stringer invocations to use an explicit file name, and regenerate their files. Updates #43369. Change-Id: I4e493c1fff103d742de0a839d7a3375659270b50 Reviewed-on: https://go-review.googlesource.com/c/go/+/280635 Trust: Matthew Dempsky Trust: Meng Zhuo Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Meng Zhuo --- src/cmd/compile/internal/gc/main.go | 2 - src/cmd/compile/internal/ir/class_string.go | 2 +- src/cmd/compile/internal/ir/name.go | 2 +- src/cmd/compile/internal/ir/node.go | 2 +- src/cmd/compile/internal/ir/op_string.go | 2 +- src/cmd/compile/internal/noder/import.go | 2 - src/cmd/compile/internal/ssagen/abi.go | 2 - .../internal/syntax/operator_string.go | 30 +++++++++- .../compile/internal/syntax/token_string.go | 55 ++++++++++++++++++- src/cmd/compile/internal/syntax/tokens.go | 4 +- 10 files changed, 89 insertions(+), 14 deletions(-) diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index ba3620e6769fd..ced82736ce482 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:generate go run mkbuiltin.go - package gc import ( diff --git a/src/cmd/compile/internal/ir/class_string.go b/src/cmd/compile/internal/ir/class_string.go index 866bf1a6b5b78..13b9bd4812cce 100644 --- a/src/cmd/compile/internal/ir/class_string.go +++ b/src/cmd/compile/internal/ir/class_string.go @@ -1,4 +1,4 @@ -// Code generated by "stringer -type=Class"; DO NOT EDIT. +// Code generated by "stringer -type=Class name.go"; DO NOT EDIT. package ir diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index cc8e1b4cd1900..cb4876b9f8537 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -373,7 +373,7 @@ func DeclaredBy(x, stmt Node) bool { // called declaration contexts. type Class uint8 -//go:generate stringer -type=Class +//go:generate stringer -type=Class name.go const ( Pxxx Class = iota // no class; used during ssa conversion to indicate pseudo-variables PEXTERN // global variables diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index b4a557f290d79..54a3e2ba89bb1 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -92,7 +92,7 @@ func MayBeShared(n Node) bool { return false } -//go:generate stringer -type=Op -trimprefix=O +//go:generate stringer -type=Op -trimprefix=O node.go type Op uint8 diff --git a/src/cmd/compile/internal/ir/op_string.go b/src/cmd/compile/internal/ir/op_string.go index f23e08c47cbf2..0339444132e44 100644 --- a/src/cmd/compile/internal/ir/op_string.go +++ b/src/cmd/compile/internal/ir/op_string.go @@ -1,4 +1,4 @@ -// Code generated by "stringer -type=Op -trimprefix=O"; DO NOT EDIT. +// Code generated by "stringer -type=Op -trimprefix=O node.go"; DO NOT EDIT. package ir diff --git a/src/cmd/compile/internal/noder/import.go b/src/cmd/compile/internal/noder/import.go index a39be9864b414..08f19a4028466 100644 --- a/src/cmd/compile/internal/noder/import.go +++ b/src/cmd/compile/internal/noder/import.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:generate go run mkbuiltin.go - package noder import ( diff --git a/src/cmd/compile/internal/ssagen/abi.go b/src/cmd/compile/internal/ssagen/abi.go index af08fcb7c3b72..b0338e8155db9 100644 --- a/src/cmd/compile/internal/ssagen/abi.go +++ b/src/cmd/compile/internal/ssagen/abi.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:generate go run mkbuiltin.go - package ssagen import ( diff --git a/src/cmd/compile/internal/syntax/operator_string.go b/src/cmd/compile/internal/syntax/operator_string.go index 3c759b2e9befb..a7cd40fb13518 100644 --- a/src/cmd/compile/internal/syntax/operator_string.go +++ b/src/cmd/compile/internal/syntax/operator_string.go @@ -1,9 +1,37 @@ -// Code generated by "stringer -type Operator -linecomment"; DO NOT EDIT. +// Code generated by "stringer -type Operator -linecomment tokens.go"; DO NOT EDIT. package syntax import "strconv" +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Def-1] + _ = x[Not-2] + _ = x[Recv-3] + _ = x[OrOr-4] + _ = x[AndAnd-5] + _ = x[Eql-6] + _ = x[Neq-7] + _ = x[Lss-8] + _ = x[Leq-9] + _ = x[Gtr-10] + _ = x[Geq-11] + _ = x[Add-12] + _ = x[Sub-13] + _ = x[Or-14] + _ = x[Xor-15] + _ = x[Mul-16] + _ = x[Div-17] + _ = x[Rem-18] + _ = x[And-19] + _ = x[AndNot-20] + _ = x[Shl-21] + _ = x[Shr-22] +} + const _Operator_name = ":!<-||&&==!=<<=>>=+-|^*/%&&^<<>>" var _Operator_index = [...]uint8{0, 1, 2, 4, 6, 8, 10, 12, 13, 15, 16, 18, 19, 20, 21, 22, 23, 24, 25, 26, 28, 30, 32} diff --git a/src/cmd/compile/internal/syntax/token_string.go b/src/cmd/compile/internal/syntax/token_string.go index 3cf5473febfd0..ef295eb24b2bc 100644 --- a/src/cmd/compile/internal/syntax/token_string.go +++ b/src/cmd/compile/internal/syntax/token_string.go @@ -1,9 +1,62 @@ -// Code generated by "stringer -type token -linecomment"; DO NOT EDIT. +// Code generated by "stringer -type token -linecomment tokens.go"; DO NOT EDIT. package syntax import "strconv" +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[_EOF-1] + _ = x[_Name-2] + _ = x[_Literal-3] + _ = x[_Operator-4] + _ = x[_AssignOp-5] + _ = x[_IncOp-6] + _ = x[_Assign-7] + _ = x[_Define-8] + _ = x[_Arrow-9] + _ = x[_Star-10] + _ = x[_Lparen-11] + _ = x[_Lbrack-12] + _ = x[_Lbrace-13] + _ = x[_Rparen-14] + _ = x[_Rbrack-15] + _ = x[_Rbrace-16] + _ = x[_Comma-17] + _ = x[_Semi-18] + _ = x[_Colon-19] + _ = x[_Dot-20] + _ = x[_DotDotDot-21] + _ = x[_Break-22] + _ = x[_Case-23] + _ = x[_Chan-24] + _ = x[_Const-25] + _ = x[_Continue-26] + _ = x[_Default-27] + _ = x[_Defer-28] + _ = x[_Else-29] + _ = x[_Fallthrough-30] + _ = x[_For-31] + _ = x[_Func-32] + _ = x[_Go-33] + _ = x[_Goto-34] + _ = x[_If-35] + _ = x[_Import-36] + _ = x[_Interface-37] + _ = x[_Map-38] + _ = x[_Package-39] + _ = x[_Range-40] + _ = x[_Return-41] + _ = x[_Select-42] + _ = x[_Struct-43] + _ = x[_Switch-44] + _ = x[_Type-45] + _ = x[_Var-46] + _ = x[tokenCount-47] +} + const _token_name = "EOFnameliteralopop=opop=:=<-*([{)]},;:....breakcasechanconstcontinuedefaultdeferelsefallthroughforfuncgogotoifimportinterfacemappackagerangereturnselectstructswitchtypevar" var _token_index = [...]uint8{0, 3, 7, 14, 16, 19, 23, 24, 26, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 42, 47, 51, 55, 60, 68, 75, 80, 84, 95, 98, 102, 104, 108, 110, 116, 125, 128, 135, 140, 146, 152, 158, 164, 168, 171, 171} diff --git a/src/cmd/compile/internal/syntax/tokens.go b/src/cmd/compile/internal/syntax/tokens.go index 3b97cb66f24b9..2936b6576bc7a 100644 --- a/src/cmd/compile/internal/syntax/tokens.go +++ b/src/cmd/compile/internal/syntax/tokens.go @@ -6,7 +6,7 @@ package syntax type token uint -//go:generate stringer -type token -linecomment +//go:generate stringer -type token -linecomment tokens.go const ( _ token = iota @@ -105,7 +105,7 @@ const ( type Operator uint -//go:generate stringer -type Operator -linecomment +//go:generate stringer -type Operator -linecomment tokens.go const ( _ Operator = iota From e563715b3085f44a76564485214e33e3c3b2b7b0 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 28 Dec 2020 15:29:03 -0800 Subject: [PATCH 283/474] [dev.regabi] cmd/compile: remove Sym.Importdef Evidently it hasn't been needed since circa 2018, when we removed the binary export data format. Change-Id: I4e4c788d6b6233340fb0de0a56d035c31d96f761 Reviewed-on: https://go-review.googlesource.com/c/go/+/280634 Trust: Matthew Dempsky Trust: Dan Scales Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Dan Scales --- src/cmd/compile/internal/typecheck/export.go | 1 - src/cmd/compile/internal/types/sizeof_test.go | 2 +- src/cmd/compile/internal/types/sym.go | 3 +-- 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/src/cmd/compile/internal/typecheck/export.go b/src/cmd/compile/internal/typecheck/export.go index 381a28e3ed489..03deff8174ae3 100644 --- a/src/cmd/compile/internal/typecheck/export.go +++ b/src/cmd/compile/internal/typecheck/export.go @@ -59,7 +59,6 @@ func importsym(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Cl n := ir.NewDeclNameAt(pos, op, s) n.Class_ = ctxt // TODO(mdempsky): Move this into NewDeclNameAt too? s.SetPkgDef(n) - s.Importdef = ipkg return n } diff --git a/src/cmd/compile/internal/types/sizeof_test.go b/src/cmd/compile/internal/types/sizeof_test.go index 1ca07b12c846b..675739f7f6368 100644 --- a/src/cmd/compile/internal/types/sizeof_test.go +++ b/src/cmd/compile/internal/types/sizeof_test.go @@ -20,7 +20,7 @@ func TestSizeof(t *testing.T) { _32bit uintptr // size on 32bit platforms _64bit uintptr // size on 64bit platforms }{ - {Sym{}, 48, 80}, + {Sym{}, 44, 72}, {Type{}, 56, 96}, {Map{}, 20, 40}, {Forward{}, 20, 32}, diff --git a/src/cmd/compile/internal/types/sym.go b/src/cmd/compile/internal/types/sym.go index c512e3a003768..cd061d5f1c4c2 100644 --- a/src/cmd/compile/internal/types/sym.go +++ b/src/cmd/compile/internal/types/sym.go @@ -27,8 +27,7 @@ import ( // NOTE: In practice, things can be messier than the description above // for various reasons (historical, convenience). type Sym struct { - Importdef *Pkg // where imported definition was found - Linkname string // link name + Linkname string // link name Pkg *Pkg Name string // object name From 4629f6a51da5afabbebe9616f65fbfe0675d6039 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 28 Dec 2020 16:14:11 -0800 Subject: [PATCH 284/474] [dev.regabi] cmd/compile: merge {Selector,CallPart,Method}Expr These three expression nodes all represent the same syntax, and so they're represented the same within types2. And also they're not handled that meaningfully differently throughout the rest of the compiler to merit unique representations. Method expressions are somewhat unique today that they're very frequently turned into plain function names. But eventually that can be handled by a post-typecheck desugaring phase that reduces the number of redundant AST forms. Passes toolstash -cmp. Change-Id: I20df91bbd0d885c1f18ec67feb61ae1558670719 Reviewed-on: https://go-review.googlesource.com/c/go/+/280636 Trust: Matthew Dempsky Trust: Dan Scales Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Dan Scales --- src/cmd/compile/internal/escape/escape.go | 8 +-- src/cmd/compile/internal/inline/inl.go | 11 ++-- src/cmd/compile/internal/ir/expr.go | 60 +++++-------------- src/cmd/compile/internal/ir/fmt.go | 21 +------ src/cmd/compile/internal/ir/node_gen.go | 32 ---------- src/cmd/compile/internal/staticinit/sched.go | 8 +-- src/cmd/compile/internal/typecheck/expr.go | 6 +- src/cmd/compile/internal/typecheck/func.go | 16 +++-- src/cmd/compile/internal/typecheck/iexport.go | 21 +------ .../compile/internal/typecheck/typecheck.go | 15 ++--- src/cmd/compile/internal/walk/closure.go | 4 +- src/cmd/compile/internal/walk/complit.go | 4 +- src/cmd/compile/internal/walk/expr.go | 4 +- src/cmd/compile/internal/walk/order.go | 2 +- 14 files changed, 58 insertions(+), 154 deletions(-) diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go index d8f0111d2de69..7b4037e028b34 100644 --- a/src/cmd/compile/internal/escape/escape.go +++ b/src/cmd/compile/internal/escape/escape.go @@ -612,10 +612,10 @@ func (e *escape) exprSkipInit(k hole, n ir.Node) { // Flow the receiver argument to both the closure and // to the receiver parameter. - n := n.(*ir.CallPartExpr) + n := n.(*ir.SelectorExpr) closureK := e.spill(k, n) - m := n.Method + m := n.Selection // We don't know how the method value will be called // later, so conservatively assume the result @@ -1542,7 +1542,7 @@ func (e *escape) finish(fns []*ir.Func) { n := n.(*ir.ClosureExpr) n.SetTransient(true) case ir.OCALLPART: - n := n.(*ir.CallPartExpr) + n := n.(*ir.SelectorExpr) n.SetTransient(true) case ir.OSLICELIT: n := n.(*ir.CompLitExpr) @@ -1863,7 +1863,7 @@ func HeapAllocReason(n ir.Node) string { if n.Op() == ir.OCLOSURE && typecheck.ClosureType(n.(*ir.ClosureExpr)).Size() >= ir.MaxImplicitStackVarSize { return "too large for stack" } - if n.Op() == ir.OCALLPART && typecheck.PartialCallType(n.(*ir.CallPartExpr)).Size() >= ir.MaxImplicitStackVarSize { + if n.Op() == ir.OCALLPART && typecheck.PartialCallType(n.(*ir.SelectorExpr)).Size() >= ir.MaxImplicitStackVarSize { return "too large for stack" } diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index 67162771e9350..fc6a17b933755 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -419,6 +419,9 @@ func (v *hairyVisitor) doNode(n ir.Node) error { case ir.OCALLPART, ir.OSLICELIT: v.budget-- // Hack for toolstash -cmp. + + case ir.OMETHEXPR: + v.budget++ // Hack for toolstash -cmp. } v.budget-- @@ -613,12 +616,12 @@ func inlCallee(fn ir.Node) *ir.Func { fn = ir.StaticValue(fn) switch fn.Op() { case ir.OMETHEXPR: - fn := fn.(*ir.MethodExpr) + fn := fn.(*ir.SelectorExpr) n := ir.MethodExprName(fn) - // Check that receiver type matches fn.Left. + // Check that receiver type matches fn.X. // TODO(mdempsky): Handle implicit dereference // of pointer receiver argument? - if n == nil || !types.Identical(n.Type().Recv().Type, fn.T) { + if n == nil || !types.Identical(n.Type().Recv().Type, fn.X.Type()) { return nil } return n.Func @@ -1098,7 +1101,7 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { return n case ir.OMETHEXPR: - n := n.(*ir.MethodExpr) + n := n.(*ir.SelectorExpr) return n case ir.OLITERAL, ir.ONIL, ir.OTYPE: diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 1337d356a17e7..872f81a447862 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -225,26 +225,6 @@ func (n *CallExpr) SetOp(op Op) { } } -// A CallPartExpr is a method expression X.Method (uncalled). -type CallPartExpr struct { - miniExpr - Func *Func - X Node - Method *types.Field - Prealloc *Name -} - -func NewCallPartExpr(pos src.XPos, x Node, method *types.Field, fn *Func) *CallPartExpr { - n := &CallPartExpr{Func: fn, X: x, Method: method} - n.op = OCALLPART - n.pos = pos - n.typ = fn.Type() - n.Func = fn - return n -} - -func (n *CallPartExpr) Sym() *types.Sym { return n.Method.Sym } - // A ClosureExpr is a function literal expression. type ClosureExpr struct { miniExpr @@ -476,24 +456,6 @@ func (n *MakeExpr) SetOp(op Op) { } } -// A MethodExpr is a method expression T.M (where T is a type). -type MethodExpr struct { - miniExpr - T *types.Type - Method *types.Field - FuncName_ *Name -} - -func NewMethodExpr(pos src.XPos, t *types.Type, method *types.Field) *MethodExpr { - n := &MethodExpr{T: t, Method: method} - n.pos = pos - n.op = OMETHEXPR - return n -} - -func (n *MethodExpr) FuncName() *Name { return n.FuncName_ } -func (n *MethodExpr) Sym() *types.Sym { panic("MethodExpr.Sym") } - // A NilExpr represents the predefined untyped constant nil. // (It may be copied and assigned a type, though.) type NilExpr struct { @@ -567,12 +529,13 @@ func NewNameOffsetExpr(pos src.XPos, name *Name, offset int64, typ *types.Type) return n } -// A SelectorExpr is a selector expression X.Sym. +// A SelectorExpr is a selector expression X.Sel. type SelectorExpr struct { miniExpr X Node Sel *types.Sym Selection *types.Field + Prealloc *Name // preallocated storage for OCALLPART, if any } func NewSelectorExpr(pos src.XPos, op Op, x Node, sel *types.Sym) *SelectorExpr { @@ -586,7 +549,7 @@ func (n *SelectorExpr) SetOp(op Op) { switch op { default: panic(n.no("SetOp " + op.String())) - case ODOT, ODOTPTR, ODOTMETH, ODOTINTER, OXDOT: + case OXDOT, ODOT, ODOTPTR, ODOTMETH, ODOTINTER, OCALLPART, OMETHEXPR: n.op = op } } @@ -596,6 +559,16 @@ func (n *SelectorExpr) Implicit() bool { return n.flags&miniExprImplicit != func (n *SelectorExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) } func (n *SelectorExpr) Offset() int64 { return n.Selection.Offset } +func (n *SelectorExpr) FuncName() *Name { + if n.Op() != OMETHEXPR { + panic(n.no("FuncName")) + } + fn := NewNameAt(n.Selection.Pos, MethodSym(n.X.Type(), n.Sel)) + fn.Class_ = PFUNC + fn.SetType(n.Type()) + return fn +} + // Before type-checking, bytes.Buffer is a SelectorExpr. // After type-checking it becomes a Name. func (*SelectorExpr) CanBeNtype() {} @@ -1089,13 +1062,8 @@ func MethodExprName(n Node) *Name { // MethodFunc is like MethodName, but returns the types.Field instead. func MethodExprFunc(n Node) *types.Field { switch n.Op() { - case ODOTMETH: + case ODOTMETH, OMETHEXPR, OCALLPART: return n.(*SelectorExpr).Selection - case OMETHEXPR: - return n.(*MethodExpr).Method - case OCALLPART: - n := n.(*CallPartExpr) - return n.Method } base.Fatalf("unexpected node: %v (%v)", n, n.Op()) panic("unreachable") diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index 49f451a5d85a1..7680f05ad2bfc 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -630,10 +630,6 @@ func exprFmt(n Node, s fmt.State, prec int) { case OPACK, ONONAME: fmt.Fprint(s, n.Sym()) - case OMETHEXPR: - n := n.(*MethodExpr) - fmt.Fprint(s, n.FuncName().Sym()) - case ONAMEOFFSET: n := n.(*NameOffsetExpr) fmt.Fprintf(s, "(%v)(%v@%d)", n.Type(), n.Name_, n.Offset_) @@ -749,16 +745,7 @@ func exprFmt(n Node, s fmt.State, prec int) { n := n.(*StructKeyExpr) fmt.Fprintf(s, "%v:%v", n.Field, n.Value) - case OCALLPART: - n := n.(*CallPartExpr) - exprFmt(n.X, s, nprec) - if n.Method.Sym == nil { - fmt.Fprint(s, ".") - return - } - fmt.Fprintf(s, ".%s", n.Method.Sym.Name) - - case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH: + case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH, OCALLPART, OMETHEXPR: n := n.(*SelectorExpr) exprFmt(n.X, s, nprec) if n.Sel == nil { @@ -1160,12 +1147,6 @@ func dumpNode(w io.Writer, n Node, depth int) { } return - case OMETHEXPR: - n := n.(*MethodExpr) - fmt.Fprintf(w, "%+v-%+v", n.Op(), n.FuncName().Sym()) - dumpNodeHeader(w, n) - return - case OASOP: n := n.(*AssignOpStmt) fmt.Fprintf(w, "%+v-%+v", n.Op(), n.AsOp) diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index 27a5311748c25..a1ce9a4e9dddb 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -209,23 +209,6 @@ func (n *CallExpr) editChildren(edit func(Node) Node) { editList(n.Body, edit) } -func (n *CallPartExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *CallPartExpr) copy() Node { - c := *n - c.init = c.init.Copy() - return &c -} -func (n *CallPartExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) - return err -} -func (n *CallPartExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.X = maybeEdit(n.X, edit) -} - func (n *CaseClause) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *CaseClause) copy() Node { c := *n @@ -655,21 +638,6 @@ func (n *MapType) editChildren(edit func(Node) Node) { n.Elem = maybeEdit(n.Elem, edit) } -func (n *MethodExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } -func (n *MethodExpr) copy() Node { - c := *n - c.init = c.init.Copy() - return &c -} -func (n *MethodExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - return err -} -func (n *MethodExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) -} - func (n *Name) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *Name) copy() Node { panic("Name.copy") } func (n *Name) doChildren(do func(Node) error) error { diff --git a/src/cmd/compile/internal/staticinit/sched.go b/src/cmd/compile/internal/staticinit/sched.go index 2711f6cec0a9e..d8f51766deee1 100644 --- a/src/cmd/compile/internal/staticinit/sched.go +++ b/src/cmd/compile/internal/staticinit/sched.go @@ -104,7 +104,7 @@ func (s *Schedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *types.Ty switch r.Op() { case ir.OMETHEXPR: - r = r.(*ir.MethodExpr).FuncName() + r = r.(*ir.SelectorExpr).FuncName() fallthrough case ir.ONAME: r := r.(*ir.Name) @@ -165,7 +165,7 @@ func (s *Schedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *types.Ty } x := e.Expr if x.Op() == ir.OMETHEXPR { - x = x.(*ir.MethodExpr).FuncName() + x = x.(*ir.SelectorExpr).FuncName() } if x.Op() == ir.ONAME && s.staticcopy(l, loff+e.Xoffset, x.(*ir.Name), typ) { continue @@ -195,7 +195,7 @@ func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Ty return s.staticcopy(l, loff, r, typ) case ir.OMETHEXPR: - r := r.(*ir.MethodExpr) + r := r.(*ir.SelectorExpr) return s.staticcopy(l, loff, r.FuncName(), typ) case ir.ONIL: @@ -461,7 +461,7 @@ func StaticLoc(n ir.Node) (name *ir.Name, offset int64, ok bool) { return n, 0, true case ir.OMETHEXPR: - n := n.(*ir.MethodExpr) + n := n.(*ir.SelectorExpr) return StaticLoc(n.FuncName()) case ir.ODOT: diff --git a/src/cmd/compile/internal/typecheck/expr.go b/src/cmd/compile/internal/typecheck/expr.go index 3e7a880c2a466..0682548c27367 100644 --- a/src/cmd/compile/internal/typecheck/expr.go +++ b/src/cmd/compile/internal/typecheck/expr.go @@ -626,10 +626,8 @@ func tcDot(n *ir.SelectorExpr, top int) ir.Node { } if (n.Op() == ir.ODOTINTER || n.Op() == ir.ODOTMETH) && top&ctxCallee == 0 { - // Create top-level function. - fn := makepartialcall(n) - - return ir.NewCallPartExpr(n.Pos(), n.X, n.Selection, fn) + n.SetOp(ir.OCALLPART) + n.SetType(MethodValueWrapper(n).Type()) } return n } diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go index ed4f3ad4fe9e7..c58fef10ecd21 100644 --- a/src/cmd/compile/internal/typecheck/func.go +++ b/src/cmd/compile/internal/typecheck/func.go @@ -91,7 +91,7 @@ func ClosureType(clo *ir.ClosureExpr) *types.Type { // PartialCallType returns the struct type used to hold all the information // needed in the closure for n (n must be a OCALLPART node). // The address of a variable of the returned type can be cast to a func. -func PartialCallType(n *ir.CallPartExpr) *types.Type { +func PartialCallType(n *ir.SelectorExpr) *types.Type { t := types.NewStruct(types.NoPkg, []*types.Field{ types.NewField(base.Pos, Lookup("F"), types.Types[types.TUINTPTR]), types.NewField(base.Pos, Lookup("R"), n.X.Type()), @@ -247,9 +247,17 @@ func closurename(outerfunc *ir.Func) *types.Sym { // globClosgen is like Func.Closgen, but for the global scope. var globClosgen int32 -// makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed -// for partial calls. -func makepartialcall(dot *ir.SelectorExpr) *ir.Func { +// MethodValueWrapper returns the DCLFUNC node representing the +// wrapper function (*-fm) needed for the given method value. If the +// wrapper function hasn't already been created yet, it's created and +// added to Target.Decls. +// +// TODO(mdempsky): Move into walk. This isn't part of type checking. +func MethodValueWrapper(dot *ir.SelectorExpr) *ir.Func { + if dot.Op() != ir.OCALLPART { + base.Fatalf("MethodValueWrapper: unexpected %v (%v)", dot, dot.Op()) + } + t0 := dot.Type() meth := dot.Sel rcvrtype := dot.X.Type() diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go index 3b071a61abbd4..e35cbcafa25e5 100644 --- a/src/cmd/compile/internal/typecheck/iexport.go +++ b/src/cmd/compile/internal/typecheck/iexport.go @@ -1252,17 +1252,6 @@ func (w *exportWriter) expr(n ir.Node) { w.pos(n.Pos()) w.value(n.Type(), n.Val()) - case ir.OMETHEXPR: - // Special case: explicit name of func (*T) method(...) is turned into pkg.(*T).method, - // but for export, this should be rendered as (*pkg.T).meth. - // These nodes have the special property that they are names with a left OTYPE and a right ONAME. - n := n.(*ir.MethodExpr) - w.op(ir.OXDOT) - w.pos(n.Pos()) - w.op(ir.OTYPE) - w.typ(n.T) // n.Left.Op == OTYPE - w.selector(n.Method.Sym) - case ir.ONAME: // Package scope name. n := n.(*ir.Name) @@ -1336,15 +1325,7 @@ func (w *exportWriter) expr(n ir.Node) { // case OSTRUCTKEY: // unreachable - handled in case OSTRUCTLIT by elemList - case ir.OCALLPART: - // An OCALLPART is an OXDOT before type checking. - n := n.(*ir.CallPartExpr) - w.op(ir.OXDOT) - w.pos(n.Pos()) - w.expr(n.X) - w.selector(n.Method.Sym) - - case ir.OXDOT, ir.ODOT, ir.ODOTPTR, ir.ODOTINTER, ir.ODOTMETH: + case ir.OXDOT, ir.ODOT, ir.ODOTPTR, ir.ODOTINTER, ir.ODOTMETH, ir.OCALLPART, ir.OMETHEXPR: n := n.(*ir.SelectorExpr) w.op(ir.OXDOT) w.pos(n.Pos()) diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index e23c249ff2b90..ff9178b5972f7 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -1176,19 +1176,16 @@ func typecheckMethodExpr(n *ir.SelectorExpr) (res ir.Node) { return n } - me := ir.NewMethodExpr(n.Pos(), n.X.Type(), m) - me.SetType(NewMethodType(m.Type, n.X.Type())) - f := NewName(ir.MethodSym(t, m.Sym)) - f.Class_ = ir.PFUNC - f.SetType(me.Type()) - me.FuncName_ = f + n.SetOp(ir.OMETHEXPR) + n.Selection = m + n.SetType(NewMethodType(m.Type, n.X.Type())) // Issue 25065. Make sure that we emit the symbol for a local method. if base.Ctxt.Flag_dynlink && !inimport && (t.Sym() == nil || t.Sym().Pkg == types.LocalPkg) { - NeedFuncSym(me.FuncName_.Sym()) + NeedFuncSym(n.FuncName().Sym()) } - return me + return n } func derefall(t *types.Type) *types.Type { @@ -1422,7 +1419,7 @@ notenough: // Method expressions have the form T.M, and the compiler has // rewritten those to ONAME nodes but left T in Left. if call.Op() == ir.OMETHEXPR { - call := call.(*ir.MethodExpr) + call := call.(*ir.SelectorExpr) base.Errorf("not enough arguments in call to method expression %v%s", call, details) } else { base.Errorf("not enough arguments in call to %v%s", call, details) diff --git a/src/cmd/compile/internal/walk/closure.go b/src/cmd/compile/internal/walk/closure.go index 30f86f0965a78..9bcb82bc03669 100644 --- a/src/cmd/compile/internal/walk/closure.go +++ b/src/cmd/compile/internal/walk/closure.go @@ -151,7 +151,7 @@ func walkClosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node { return walkExpr(cfn, init) } -func walkCallPart(n *ir.CallPartExpr, init *ir.Nodes) ir.Node { +func walkCallPart(n *ir.SelectorExpr, init *ir.Nodes) ir.Node { // Create closure in the form of a composite literal. // For x.M with receiver (x) type T, the generated code looks like: // @@ -176,7 +176,7 @@ func walkCallPart(n *ir.CallPartExpr, init *ir.Nodes) ir.Node { clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ).(ir.Ntype), nil) clos.SetEsc(n.Esc()) - clos.List = []ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, n.Func.Nname), n.X} + clos.List = []ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, typecheck.MethodValueWrapper(n).Nname), n.X} addr := typecheck.NodAddr(clos) addr.SetEsc(n.Esc()) diff --git a/src/cmd/compile/internal/walk/complit.go b/src/cmd/compile/internal/walk/complit.go index 8c4f9583ef5df..fadcd87f25803 100644 --- a/src/cmd/compile/internal/walk/complit.go +++ b/src/cmd/compile/internal/walk/complit.go @@ -539,7 +539,7 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, n)) case ir.OMETHEXPR: - n := n.(*ir.MethodExpr) + n := n.(*ir.SelectorExpr) anylit(n.FuncName(), var_, init) case ir.OPTRLIT: @@ -666,7 +666,7 @@ func genAsStatic(as *ir.AssignStmt) { staticdata.InitConst(name, offset, r, int(r.Type().Width)) return case ir.OMETHEXPR: - r := r.(*ir.MethodExpr) + r := r.(*ir.SelectorExpr) staticdata.InitFunc(name, offset, r.FuncName()) return case ir.ONAME: diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index fd0dd5b06248a..7cc67580248b5 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -100,7 +100,7 @@ func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.OMETHEXPR: // TODO(mdempsky): Do this right after type checking. - n := n.(*ir.MethodExpr) + n := n.(*ir.SelectorExpr) return n.FuncName() case ir.ONOT, ir.ONEG, ir.OPLUS, ir.OBITNOT, ir.OREAL, ir.OIMAG, ir.OSPTR, ir.OITAB, ir.OIDATA: @@ -306,7 +306,7 @@ func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node { return walkClosure(n.(*ir.ClosureExpr), init) case ir.OCALLPART: - return walkCallPart(n.(*ir.CallPartExpr), init) + return walkCallPart(n.(*ir.SelectorExpr), init) } // No return! Each case must return (or panic), diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go index ebbd467570145..0dd76ccee9a9b 100644 --- a/src/cmd/compile/internal/walk/order.go +++ b/src/cmd/compile/internal/walk/order.go @@ -1310,7 +1310,7 @@ func (o *orderState) expr1(n, lhs ir.Node) ir.Node { return n case ir.OCALLPART: - n := n.(*ir.CallPartExpr) + n := n.(*ir.SelectorExpr) n.X = o.expr(n.X, nil) if n.Transient() { t := typecheck.PartialCallType(n) From 6acbae4fcc640715efd01cb161a65e1e04fda3cb Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 28 Dec 2020 17:06:43 -0800 Subject: [PATCH 285/474] [dev.regabi] cmd/compile: address some ir TODOs Previously, ODOTTYPE/ODOTTYPE2 were forced to reuse some available Node fields for storing pointers to runtime type descriptors. This resulted in awkward field types for TypeAssertExpr and AddrExpr. This CL gives TypeAssertExpr proper fields for the runtime type descriptors, and also tightens the field types as possible/appropriate. Passes toolstash -cmp. Change-Id: I521ee7a1462affc5459de33a0de6c68a7d6416ba Reviewed-on: https://go-review.googlesource.com/c/go/+/280637 Trust: Matthew Dempsky Trust: Dan Scales Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Dan Scales --- src/cmd/compile/internal/ir/expr.go | 11 ++++++++--- src/cmd/compile/internal/ir/node_gen.go | 7 +------ src/cmd/compile/internal/ssagen/ssa.go | 8 ++++---- src/cmd/compile/internal/typecheck/expr.go | 2 +- src/cmd/compile/internal/walk/expr.go | 7 ++++--- 5 files changed, 18 insertions(+), 17 deletions(-) diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 872f81a447862..825d4ace78548 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -109,7 +109,7 @@ func NewAddStringExpr(pos src.XPos, list []Node) *AddStringExpr { type AddrExpr struct { miniExpr X Node - Alloc Node // preallocated storage if any + Alloc *Name // preallocated storage if any } func NewAddrExpr(pos src.XPos, x Node) *AddrExpr { @@ -660,8 +660,13 @@ func (n *StarExpr) SetOTYPE(t *types.Type) { type TypeAssertExpr struct { miniExpr X Node - Ntype Node // TODO: Should be Ntype, but reused as address of type structure - Itab Nodes // Itab[0] is itab + Ntype Ntype + + // Runtime type information provided by walkDotType. + // Caution: These aren't always populated; see walkDotType. + SrcType *AddrExpr // *runtime._type for X's type + DstType *AddrExpr // *runtime._type for Type + Itab *AddrExpr // *runtime.itab for Type implementing X's type } func NewTypeAssertExpr(pos src.XPos, x Node, typ Ntype) *TypeAssertExpr { diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index a1ce9a4e9dddb..1d24904a3f34c 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -32,13 +32,11 @@ func (n *AddrExpr) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) err = maybeDo(n.X, err, do) - err = maybeDo(n.Alloc, err, do) return err } func (n *AddrExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) n.X = maybeEdit(n.X, edit) - n.Alloc = maybeEdit(n.Alloc, edit) } func (n *ArrayType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } @@ -954,7 +952,6 @@ func (n *TypeAssertExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } func (n *TypeAssertExpr) copy() Node { c := *n c.init = c.init.Copy() - c.Itab = c.Itab.Copy() return &c } func (n *TypeAssertExpr) doChildren(do func(Node) error) error { @@ -962,14 +959,12 @@ func (n *TypeAssertExpr) doChildren(do func(Node) error) error { err = maybeDoList(n.init, err, do) err = maybeDo(n.X, err, do) err = maybeDo(n.Ntype, err, do) - err = maybeDoList(n.Itab, err, do) return err } func (n *TypeAssertExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) n.X = maybeEdit(n.X, edit) - n.Ntype = maybeEdit(n.Ntype, edit) - editList(n.Itab, edit) + n.Ntype = toNtype(maybeEdit(n.Ntype, edit)) } func (n *TypeSwitchGuard) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 0da6ab3272aec..509d53f8c9b90 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -5978,8 +5978,8 @@ func (s *state) floatToUint(cvttab *f2uCvtTab, n ir.Node, x *ssa.Value, ft, tt * // commaok indicates whether to panic or return a bool. // If commaok is false, resok will be nil. func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Value) { - iface := s.expr(n.X) // input interface - target := s.expr(n.Ntype) // target type + iface := s.expr(n.X) // input interface + target := s.expr(n.DstType) // target type byteptr := s.f.Config.Types.BytePtr if n.Type().IsInterface() { @@ -6086,7 +6086,7 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val targetITab = target } else { // Looking for pointer to itab for target type and source interface. - targetITab = s.expr(n.Itab[0]) + targetITab = s.expr(n.Itab) } var tmp ir.Node // temporary for use with large types @@ -6113,7 +6113,7 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val if !commaok { // on failure, panic by calling panicdottype s.startBlock(bFail) - taddr := s.expr(n.Ntype.(*ir.AddrExpr).Alloc) + taddr := s.expr(n.SrcType) if n.X.Type().IsEmptyInterface() { s.rtcall(ir.Syms.PanicdottypeE, false, nil, itab, target, taddr) } else { diff --git a/src/cmd/compile/internal/typecheck/expr.go b/src/cmd/compile/internal/typecheck/expr.go index 0682548c27367..29d7a080114d7 100644 --- a/src/cmd/compile/internal/typecheck/expr.go +++ b/src/cmd/compile/internal/typecheck/expr.go @@ -649,7 +649,7 @@ func tcDotType(n *ir.TypeAssertExpr) ir.Node { } if n.Ntype != nil { - n.Ntype = typecheck(n.Ntype, ctxType) + n.Ntype = typecheckNtype(n.Ntype) n.SetType(n.Ntype.Type()) n.Ntype = nil if n.Type() == nil { diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index 7cc67580248b5..f40aa6adb5de5 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -639,12 +639,13 @@ func walkDot(n *ir.SelectorExpr, init *ir.Nodes) ir.Node { func walkDotType(n *ir.TypeAssertExpr, init *ir.Nodes) ir.Node { n.X = walkExpr(n.X, init) // Set up interface type addresses for back end. - n.Ntype = reflectdata.TypePtr(n.Type()) + + n.DstType = reflectdata.TypePtr(n.Type()) if n.Op() == ir.ODOTTYPE { - n.Ntype.(*ir.AddrExpr).Alloc = reflectdata.TypePtr(n.X.Type()) + n.SrcType = reflectdata.TypePtr(n.X.Type()) } if !n.Type().IsInterface() && !n.X.Type().IsEmptyInterface() { - n.Itab = []ir.Node{reflectdata.ITabAddr(n.Type(), n.X.Type())} + n.Itab = reflectdata.ITabAddr(n.Type(), n.X.Type()) } return n } From 289da2b33ed6292c853017a15d3108d22ea7491a Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 28 Dec 2020 17:30:04 -0800 Subject: [PATCH 286/474] [dev.regabi] cmd/compile: move Node.Opt to Name Escape analysis uses Node.Opt to map nodes to their "location", so that other references to the same node use the same location again. But in the current implementation of escape analysis, we never need to refer back to a node's location except for named nodes (since other nodes are anonymous, and have no way to be referenced). This CL moves Opt from Node down to Name, turns it into a directly accessed field, and cleans up escape analysis to avoid setting Opt on non-named expressions. One nit: in walkCheckPtrArithmetic, we were abusing Opt as a way to detect/prevent loops. This CL adds a CheckPtr bit flag instead. Passes toolstash -cmp. Change-Id: If57d5ad8d972fa63bedbe69b9ebb6753e31aba85 Reviewed-on: https://go-review.googlesource.com/c/go/+/280638 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky Reviewed-by: Cuong Manh Le TryBot-Result: Go Bot --- src/cmd/compile/internal/escape/escape.go | 45 ++++++++++++++--------- src/cmd/compile/internal/ir/expr.go | 8 ++-- src/cmd/compile/internal/ir/mini.go | 2 - src/cmd/compile/internal/ir/name.go | 4 +- src/cmd/compile/internal/ir/node.go | 2 - src/cmd/compile/internal/walk/convert.go | 14 +++---- src/cmd/compile/internal/walk/walk.go | 2 - 7 files changed, 38 insertions(+), 39 deletions(-) diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go index 7b4037e028b34..b953666ce6267 100644 --- a/src/cmd/compile/internal/escape/escape.go +++ b/src/cmd/compile/internal/escape/escape.go @@ -165,12 +165,16 @@ func Fmt(n ir.Node) string { text = fmt.Sprintf("esc(%d)", n.Esc()) } - if e, ok := n.Opt().(*location); ok && e.loopDepth != 0 { - if text != "" { - text += " " + if n.Op() == ir.ONAME { + n := n.(*ir.Name) + if e, ok := n.Opt.(*location); ok && e.loopDepth != 0 { + if text != "" { + text += " " + } + text += fmt.Sprintf("ld(%d)", e.loopDepth) } - text += fmt.Sprintf("ld(%d)", e.loopDepth) } + return text } @@ -312,7 +316,7 @@ func (e *escape) stmt(n ir.Node) { // Record loop depth at declaration. n := n.(*ir.Decl) if !ir.IsBlank(n.X) { - e.dcl(n.X) + e.dcl(n.X.(*ir.Name)) } case ir.OLABEL: @@ -370,7 +374,7 @@ func (e *escape) stmt(n ir.Node) { var ks []hole for _, cas := range n.Cases { // cases if typesw && n.Tag.(*ir.TypeSwitchGuard).Tag != nil { - cv := cas.Var + cv := cas.Var.(*ir.Name) k := e.dcl(cv) // type switch variables have no ODCL. if cv.Type().HasPointers() { ks = append(ks, k.dotType(cv.Type(), cas, "switch case")) @@ -1097,7 +1101,7 @@ func (e *escape) teeHole(ks ...hole) hole { return loc.asHole() } -func (e *escape) dcl(n ir.Node) hole { +func (e *escape) dcl(n *ir.Name) hole { loc := e.oldLoc(n) loc.loopDepth = e.loopDepth return loc.asHole() @@ -1151,15 +1155,17 @@ func (e *escape) newLoc(n ir.Node, transient bool) *location { } e.allLocs = append(e.allLocs, loc) if n != nil { - if n.Op() == ir.ONAME && n.Name().Curfn != e.curfn { + if n.Op() == ir.ONAME { n := n.(*ir.Name) - base.Fatalf("curfn mismatch: %v != %v", n.Name().Curfn, e.curfn) - } + if n.Curfn != e.curfn { + base.Fatalf("curfn mismatch: %v != %v", n.Name().Curfn, e.curfn) + } - if n.Opt() != nil { - base.Fatalf("%v already has a location", n) + if n.Opt != nil { + base.Fatalf("%v already has a location", n) + } + n.Opt = loc } - n.SetOpt(loc) if why := HeapAllocReason(n); why != "" { e.flow(e.heapHole().addr(n, why), loc) @@ -1168,9 +1174,9 @@ func (e *escape) newLoc(n ir.Node, transient bool) *location { return loc } -func (e *escape) oldLoc(n ir.Node) *location { - n = canonicalNode(n) - return n.Opt().(*location) +func (e *escape) oldLoc(n *ir.Name) *location { + n = canonicalNode(n).(*ir.Name) + return n.Opt.(*location) } func (l *location) asHole() hole { @@ -1516,7 +1522,10 @@ func (e *escape) finish(fns []*ir.Func) { if n == nil { continue } - n.SetOpt(nil) + if n.Op() == ir.ONAME { + n := n.(*ir.Name) + n.Opt = nil + } // Update n.Esc based on escape analysis results. @@ -2122,7 +2131,7 @@ func (e *escape) paramTag(fn *ir.Func, narg int, f *types.Field) string { return esc.Encode() } - n := ir.AsNode(f.Nname) + n := f.Nname.(*ir.Name) loc := e.oldLoc(n) esc := loc.paramEsc esc.Optimize() diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 825d4ace78548..bb32d96088bf1 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -48,8 +48,7 @@ type Expr interface { type miniExpr struct { miniNode typ *types.Type - init Nodes // TODO(rsc): Don't require every Node to have an init - opt interface{} // TODO(rsc): Don't require every Node to have an opt? + init Nodes // TODO(rsc): Don't require every Node to have an init flags bitset8 } @@ -59,14 +58,13 @@ const ( miniExprTransient miniExprBounded miniExprImplicit // for use by implementations; not supported by every Expr + miniExprCheckPtr ) func (*miniExpr) isExpr() {} func (n *miniExpr) Type() *types.Type { return n.typ } func (n *miniExpr) SetType(x *types.Type) { n.typ = x } -func (n *miniExpr) Opt() interface{} { return n.opt } -func (n *miniExpr) SetOpt(x interface{}) { n.opt = x } func (n *miniExpr) HasCall() bool { return n.flags&miniExprHasCall != 0 } func (n *miniExpr) SetHasCall(b bool) { n.flags.set(miniExprHasCall, b) } func (n *miniExpr) NonNil() bool { return n.flags&miniExprNonNil != 0 } @@ -324,6 +322,8 @@ func NewConvExpr(pos src.XPos, op Op, typ *types.Type, x Node) *ConvExpr { func (n *ConvExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 } func (n *ConvExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) } +func (n *ConvExpr) CheckPtr() bool { return n.flags&miniExprCheckPtr != 0 } +func (n *ConvExpr) SetCheckPtr(b bool) { n.flags.set(miniExprCheckPtr, b) } func (n *ConvExpr) SetOp(op Op) { switch op { diff --git a/src/cmd/compile/internal/ir/mini.go b/src/cmd/compile/internal/ir/mini.go index 53a63afe9b6d9..92701326216e4 100644 --- a/src/cmd/compile/internal/ir/mini.go +++ b/src/cmd/compile/internal/ir/mini.go @@ -102,5 +102,3 @@ func (n *miniNode) HasCall() bool { return false } func (n *miniNode) SetHasCall(bool) { panic(n.no("SetHasCall")) } func (n *miniNode) NonNil() bool { return false } func (n *miniNode) MarkNonNil() { panic(n.no("MarkNonNil")) } -func (n *miniNode) Opt() interface{} { return nil } -func (n *miniNode) SetOpt(interface{}) { panic(n.no("SetOpt")) } diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index cb4876b9f8537..980e3f6349c70 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -42,6 +42,7 @@ type Name struct { Func *Func Offset_ int64 val constant.Value + Opt interface{} // for use by escape analysis orig Node Embed *[]Embed // list of embedded files, for ONAME var @@ -321,8 +322,7 @@ func (n *Name) Val() constant.Value { return n.val } -// SetVal sets the constant.Value for the node, -// which must not have been used with SetOpt. +// SetVal sets the constant.Value for the node. func (n *Name) SetVal(v constant.Value) { if n.op != OLITERAL { panic(n.no("SetVal")) diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 54a3e2ba89bb1..0238e9de859a2 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -50,8 +50,6 @@ type Node interface { SetEsc(x uint16) Walkdef() uint8 SetWalkdef(x uint8) - Opt() interface{} - SetOpt(x interface{}) Diag() bool SetDiag(x bool) Typecheck() uint8 diff --git a/src/cmd/compile/internal/walk/convert.go b/src/cmd/compile/internal/walk/convert.go index 99abf306680d8..d0cd5ff75377f 100644 --- a/src/cmd/compile/internal/walk/convert.go +++ b/src/cmd/compile/internal/walk/convert.go @@ -438,18 +438,14 @@ func walkCheckPtrAlignment(n *ir.ConvExpr, init *ir.Nodes, count ir.Node) ir.Nod } func walkCheckPtrArithmetic(n *ir.ConvExpr, init *ir.Nodes) ir.Node { - // Calling cheapexpr(n, init) below leads to a recursive call - // to walkexpr, which leads us back here again. Use n.Opt to + // Calling cheapexpr(n, init) below leads to a recursive call to + // walkexpr, which leads us back here again. Use n.Checkptr to // prevent infinite loops. - if opt := n.Opt(); opt == &walkCheckPtrArithmeticMarker { + if n.CheckPtr() { return n - } else if opt != nil { - // We use n.Opt() here because today it's not used for OCONVNOP. If that changes, - // there's no guarantee that temporarily replacing it is safe, so just hard fail here. - base.Fatalf("unexpected Opt: %v", opt) } - n.SetOpt(&walkCheckPtrArithmeticMarker) - defer n.SetOpt(nil) + n.SetCheckPtr(true) + defer n.SetCheckPtr(false) // TODO(mdempsky): Make stricter. We only need to exempt // reflect.Value.Pointer and reflect.Value.UnsafeAddr. diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go index c4c3debde4819..bdc9a2ea6a444 100644 --- a/src/cmd/compile/internal/walk/walk.go +++ b/src/cmd/compile/internal/walk/walk.go @@ -377,8 +377,6 @@ func walkAppendArgs(n *ir.CallExpr, init *ir.Nodes) { var wrapCall_prgen int -var walkCheckPtrArithmeticMarker byte - // appendWalkStmt typechecks and walks stmt and then appends it to init. func appendWalkStmt(init *ir.Nodes, stmt ir.Node) { op := stmt.Op() From 25c613c02dabb45f3a3dc038a8f01c664d98731a Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 28 Dec 2020 19:14:39 -0800 Subject: [PATCH 287/474] [dev.regabi] cmd/compile: add Linksym helpers Syms are meant to be just interned (pkg, name) tuples, and are a purely abstract, Go-language concept. As such, associating them with linker symbols (a low-level, implementation-oriented detail) is inappropriate. There's still work to be done before linker symbols can be directly attached to their appropriate, higher-level objects instead. But in the mean-time, we can at least add helper functions and discourage folks from using Sym.Linksym directly. The next CL will mechanically rewrite code to use these helpers where possible. Passes toolstash -cmp. Change-Id: I413bd1c80bce056304f9a7343526bd153f2b9c7d Reviewed-on: https://go-review.googlesource.com/c/go/+/280639 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/gc/obj.go | 2 +- src/cmd/compile/internal/ir/func.go | 10 +++------- src/cmd/compile/internal/ir/name.go | 3 +++ src/cmd/compile/internal/reflectdata/reflect.go | 16 ++++++++++++++-- src/cmd/compile/internal/ssagen/pgen.go | 14 ++++---------- src/cmd/compile/internal/ssagen/ssa.go | 4 ++-- src/cmd/compile/internal/staticdata/data.go | 7 +++++++ src/cmd/compile/internal/types/sym.go | 4 ++++ src/cmd/compile/internal/walk/expr.go | 2 +- 9 files changed, 39 insertions(+), 23 deletions(-) diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 0ab3a8dad43ae..d0454981f4439 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -260,7 +260,7 @@ func addGCLocals() { } } -func ggloblnod(nam ir.Node) { +func ggloblnod(nam *ir.Name) { s := nam.Sym().Linksym() s.Gotype = reflectdata.TypeSym(nam.Type()).Linksym() flags := 0 diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index 16d67f6ae0c11..a4f5875aabde7 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -78,7 +78,7 @@ type Func struct { // Marks records scope boundary changes. Marks []Mark - FieldTrack map[*types.Sym]struct{} + FieldTrack map[*obj.LSym]struct{} DebugInfo interface{} LSym *obj.LSym @@ -119,12 +119,8 @@ func (f *Func) isStmt() {} func (f *Func) Type() *types.Type { return f.typ } func (f *Func) SetType(x *types.Type) { f.typ = x } -func (f *Func) Sym() *types.Sym { - if f.Nname != nil { - return f.Nname.Sym() - } - return nil -} +func (f *Func) Sym() *types.Sym { return f.Nname.Sym() } +func (f *Func) Linksym() *obj.LSym { return f.Nname.Linksym() } // An Inline holds fields used for function bodies that can be inlined. type Inline struct { diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 980e3f6349c70..b13b57e95fa99 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -7,6 +7,7 @@ package ir import ( "cmd/compile/internal/base" "cmd/compile/internal/types" + "cmd/internal/obj" "cmd/internal/objabi" "cmd/internal/src" @@ -238,6 +239,8 @@ func (n *Name) SetFrameOffset(x int64) { n.Offset_ = x } func (n *Name) Iota() int64 { return n.Offset_ } func (n *Name) SetIota(x int64) { n.Offset_ = x } +func (n *Name) Linksym() *obj.LSym { return n.sym.Linksym() } + func (*Name) CanBeNtype() {} func (*Name) CanBeAnSSASym() {} func (*Name) CanBeAnSSAAux() {} diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go index df80380fc1311..4c625b40cb8ef 100644 --- a/src/cmd/compile/internal/reflectdata/reflect.go +++ b/src/cmd/compile/internal/reflectdata/reflect.go @@ -812,8 +812,8 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int { // TrackSym returns the symbol for tracking use of field/method f, assumed // to be a member of struct/interface type t. -func TrackSym(t *types.Type, f *types.Field) *types.Sym { - return ir.Pkgs.Track.Lookup(t.ShortString() + "." + f.Sym.Name) +func TrackSym(t *types.Type, f *types.Field) *obj.LSym { + return ir.Pkgs.Track.Lookup(t.ShortString() + "." + f.Sym.Name).Linksym() } func TypeSymPrefix(prefix string, t *types.Type) *types.Sym { @@ -845,6 +845,18 @@ func TypeSym(t *types.Type) *types.Sym { return s } +func TypeLinksymPrefix(prefix string, t *types.Type) *obj.LSym { + return TypeSymPrefix(prefix, t).Linksym() +} + +func TypeLinksymLookup(name string) *obj.LSym { + return types.TypeSymLookup(name).Linksym() +} + +func TypeLinksym(t *types.Type) *obj.LSym { + return TypeSym(t).Linksym() +} + func TypePtr(t *types.Type) *ir.AddrExpr { s := TypeSym(t) if s.Def == nil { diff --git a/src/cmd/compile/internal/ssagen/pgen.go b/src/cmd/compile/internal/ssagen/pgen.go index bc6be20d86835..72ce233fdad0e 100644 --- a/src/cmd/compile/internal/ssagen/pgen.go +++ b/src/cmd/compile/internal/ssagen/pgen.go @@ -225,7 +225,7 @@ func StackOffset(slot ssa.LocalSlot) int32 { // fieldtrack adds R_USEFIELD relocations to fnsym to record any // struct fields that it used. -func fieldtrack(fnsym *obj.LSym, tracked map[*types.Sym]struct{}) { +func fieldtrack(fnsym *obj.LSym, tracked map[*obj.LSym]struct{}) { if fnsym == nil { return } @@ -233,24 +233,18 @@ func fieldtrack(fnsym *obj.LSym, tracked map[*types.Sym]struct{}) { return } - trackSyms := make([]*types.Sym, 0, len(tracked)) + trackSyms := make([]*obj.LSym, 0, len(tracked)) for sym := range tracked { trackSyms = append(trackSyms, sym) } - sort.Sort(symByName(trackSyms)) + sort.Slice(trackSyms, func(i, j int) bool { return trackSyms[i].Name < trackSyms[j].Name }) for _, sym := range trackSyms { r := obj.Addrel(fnsym) - r.Sym = sym.Linksym() + r.Sym = sym r.Type = objabi.R_USEFIELD } } -type symByName []*types.Sym - -func (a symByName) Len() int { return len(a) } -func (a symByName) Less(i, j int) bool { return a[i].Name < a[j].Name } -func (a symByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - // largeStack is info about a function whose stack frame is too large (rare). type largeStack struct { locals int64 diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 509d53f8c9b90..5cf267636bdb8 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -2106,7 +2106,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { return s.newValue3(ssa.OpSliceMake, n.Type(), ptr, len, len) case ir.OCFUNC: n := n.(*ir.UnaryExpr) - aux := n.X.Sym().Linksym() + aux := n.X.(*ir.Name).Linksym() return s.entryNewValue1A(ssa.OpAddr, n.Type(), aux, s.sb) case ir.ONAME: n := n.(*ir.Name) @@ -6826,7 +6826,7 @@ func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) { case *ir.Name: if n.Class_ == ir.PPARAM || n.Class_ == ir.PPARAMOUT { a.Name = obj.NAME_PARAM - a.Sym = ir.Orig(n).Sym().Linksym() + a.Sym = ir.Orig(n).(*ir.Name).Linksym() a.Offset += n.FrameOffset() break } diff --git a/src/cmd/compile/internal/staticdata/data.go b/src/cmd/compile/internal/staticdata/data.go index 342a2e2bbc25f..ab9cb5bd7eca1 100644 --- a/src/cmd/compile/internal/staticdata/data.go +++ b/src/cmd/compile/internal/staticdata/data.go @@ -258,6 +258,13 @@ func FuncSym(s *types.Sym) *types.Sym { return sf } +func FuncLinksym(n *ir.Name) *obj.LSym { + if n.Op() != ir.ONAME || n.Class_ != ir.PFUNC { + base.Fatalf("expected func name: %v", n) + } + return FuncSym(n.Sym()).Linksym() +} + // NeedFuncSym ensures that s·f is exported. // It is only used with -dynlink. // When not compiling for dynamic linking, diff --git a/src/cmd/compile/internal/types/sym.go b/src/cmd/compile/internal/types/sym.go index cd061d5f1c4c2..2914e2ed3fa05 100644 --- a/src/cmd/compile/internal/types/sym.go +++ b/src/cmd/compile/internal/types/sym.go @@ -74,6 +74,10 @@ func (sym *Sym) LinksymName() string { return sym.Pkg.Prefix + "." + sym.Name } +// Deprecated: This method should not be used directly. Instead, use a +// higher-level abstraction that directly returns the linker symbol +// for a named object. For example, reflectdata.TypeLinksym(t) instead +// of reflectdata.TypeSym(t).Linksym(). func (sym *Sym) Linksym() *obj.LSym { if sym == nil { return nil diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index f40aa6adb5de5..0d7ffca15d00f 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -975,7 +975,7 @@ func usefield(n *ir.SelectorExpr) { sym := reflectdata.TrackSym(outer, field) if ir.CurFunc.FieldTrack == nil { - ir.CurFunc.FieldTrack = make(map[*types.Sym]struct{}) + ir.CurFunc.FieldTrack = make(map[*obj.LSym]struct{}) } ir.CurFunc.FieldTrack[sym] = struct{}{} } From ec59b197d5d92ad758c3214d906f9c750cd5b84e Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 28 Dec 2020 19:34:35 -0800 Subject: [PATCH 288/474] [dev.regabi] cmd/compile: rewrite to use linksym helpers [generated] Passes toolstash -cmp. [git-generate] cd src/cmd/compile/internal/gc pkgs=$(grep -l -w Linksym ../*/*.go | xargs dirname | grep -v '/gc$' | sort -u) rf ' ex . '"$(echo $pkgs)"' { import "cmd/compile/internal/ir" import "cmd/compile/internal/reflectdata" import "cmd/compile/internal/staticdata" import "cmd/compile/internal/types" avoid reflectdata.TypeLinksym avoid reflectdata.TypeLinksymLookup avoid reflectdata.TypeLinksymPrefix avoid staticdata.FuncLinksym var f *ir.Func var n *ir.Name var s string var t *types.Type f.Sym().Linksym() -> f.Linksym() n.Sym().Linksym() -> n.Linksym() reflectdata.TypeSym(t).Linksym() -> reflectdata.TypeLinksym(t) reflectdata.TypeSymPrefix(s, t).Linksym() -> reflectdata.TypeLinksymPrefix(s, t) staticdata.FuncSym(n.Sym()).Linksym() -> staticdata.FuncLinksym(n) types.TypeSymLookup(s).Linksym() -> reflectdata.TypeLinksymLookup(s) } ' Change-Id: I7a3ae1dcd61bcdf4a29f708ff12f7f80c2b280c6 Reviewed-on: https://go-review.googlesource.com/c/go/+/280640 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/dwarfgen/dwarf.go | 10 +++++----- src/cmd/compile/internal/gc/abiutils_test.go | 4 ++-- src/cmd/compile/internal/gc/compile.go | 2 +- src/cmd/compile/internal/gc/main.go | 4 ++-- src/cmd/compile/internal/gc/obj.go | 4 ++-- src/cmd/compile/internal/inline/inl.go | 2 +- src/cmd/compile/internal/ir/name.go | 2 +- src/cmd/compile/internal/pkginit/init.go | 4 ++-- src/cmd/compile/internal/reflectdata/alg.go | 8 ++++---- .../compile/internal/reflectdata/reflect.go | 6 +++--- src/cmd/compile/internal/ssagen/abi.go | 2 +- src/cmd/compile/internal/ssagen/ssa.go | 10 +++++----- src/cmd/compile/internal/staticdata/data.go | 18 +++++++++--------- src/cmd/compile/internal/staticdata/embed.go | 4 ++-- src/cmd/compile/internal/staticinit/sched.go | 4 ++-- src/cmd/compile/internal/walk/complit.go | 2 +- src/cmd/compile/internal/walk/race.go | 2 +- 17 files changed, 44 insertions(+), 44 deletions(-) diff --git a/src/cmd/compile/internal/dwarfgen/dwarf.go b/src/cmd/compile/internal/dwarfgen/dwarf.go index 19cb70058c98e..d0bee58442e39 100644 --- a/src/cmd/compile/internal/dwarfgen/dwarf.go +++ b/src/cmd/compile/internal/dwarfgen/dwarf.go @@ -26,7 +26,7 @@ func Info(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, fn := curfn.(*ir.Func) if fn.Nname != nil { - expect := fn.Sym().Linksym() + expect := fn.Linksym() if fnsym.ABI() == obj.ABI0 { expect = fn.Sym().LinksymABI0() } @@ -90,7 +90,7 @@ func Info(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, continue } apdecls = append(apdecls, n) - fnsym.Func().RecordAutoType(reflectdata.TypeSym(n.Type()).Linksym()) + fnsym.Func().RecordAutoType(reflectdata.TypeLinksym(n.Type())) } } @@ -240,7 +240,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir ChildIndex: -1, }) // Record go type of to insure that it gets emitted by the linker. - fnsym.Func().RecordAutoType(reflectdata.TypeSym(n.Type()).Linksym()) + fnsym.Func().RecordAutoType(reflectdata.TypeLinksym(n.Type())) } return decls, vars @@ -309,7 +309,7 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var { } typename := dwarf.InfoPrefix + types.TypeSymName(n.Type()) - delete(fnsym.Func().Autot, reflectdata.TypeSym(n.Type()).Linksym()) + delete(fnsym.Func().Autot, reflectdata.TypeLinksym(n.Type())) inlIndex := 0 if base.Flag.GenDwarfInl > 1 { if n.Name().InlFormal() || n.Name().InlLocal() { @@ -376,7 +376,7 @@ func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var return nil } - gotype := reflectdata.TypeSym(n.Type()).Linksym() + gotype := reflectdata.TypeLinksym(n.Type()) delete(fnsym.Func().Autot, gotype) typename := dwarf.InfoPrefix + gotype.Name[len("type."):] inlIndex := 0 diff --git a/src/cmd/compile/internal/gc/abiutils_test.go b/src/cmd/compile/internal/gc/abiutils_test.go index a421a229dc7a0..656eab18cbe8d 100644 --- a/src/cmd/compile/internal/gc/abiutils_test.go +++ b/src/cmd/compile/internal/gc/abiutils_test.go @@ -40,10 +40,10 @@ func TestMain(m *testing.M) { types.PtrSize = ssagen.Arch.LinkArch.PtrSize types.RegSize = ssagen.Arch.LinkArch.RegSize types.TypeLinkSym = func(t *types.Type) *obj.LSym { - return reflectdata.TypeSym(t).Linksym() + return reflectdata.TypeLinksym(t) } types.TypeLinkSym = func(t *types.Type) *obj.LSym { - return reflectdata.TypeSym(t).Linksym() + return reflectdata.TypeLinksym(t) } typecheck.Init() os.Exit(m.Run()) diff --git a/src/cmd/compile/internal/gc/compile.go b/src/cmd/compile/internal/gc/compile.go index 926b2dee95245..1b3dd672f3f0f 100644 --- a/src/cmd/compile/internal/gc/compile.go +++ b/src/cmd/compile/internal/gc/compile.go @@ -174,5 +174,5 @@ func isInlinableButNotInlined(fn *ir.Func) bool { if fn.Sym() == nil { return true } - return !fn.Sym().Linksym().WasInlined() + return !fn.Linksym().WasInlined() } diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index ced82736ce482..a4613f04fbc17 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -191,7 +191,7 @@ func Main(archInit func(*ssagen.ArchInfo)) { types.RegSize = ssagen.Arch.LinkArch.RegSize types.MaxWidth = ssagen.Arch.MAXWIDTH types.TypeLinkSym = func(t *types.Type) *obj.LSym { - return reflectdata.TypeSym(t).Linksym() + return reflectdata.TypeLinksym(t) } typecheck.Target = new(ir.Package) @@ -203,7 +203,7 @@ func Main(archInit func(*ssagen.ArchInfo)) { base.AutogeneratedPos = makePos(src.NewFileBase("", ""), 1, 0) types.TypeLinkSym = func(t *types.Type) *obj.LSym { - return reflectdata.TypeSym(t).Linksym() + return reflectdata.TypeLinksym(t) } typecheck.Init() diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index d0454981f4439..45eadf719e33d 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -261,8 +261,8 @@ func addGCLocals() { } func ggloblnod(nam *ir.Name) { - s := nam.Sym().Linksym() - s.Gotype = reflectdata.TypeSym(nam.Type()).Linksym() + s := nam.Linksym() + s.Gotype = reflectdata.TypeLinksym(nam.Type()) flags := 0 if nam.Name().Readonly() { flags = obj.RODATA diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index fc6a17b933755..126871b805522 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -932,7 +932,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b parent = b.InliningIndex() } - sym := fn.Sym().Linksym() + sym := fn.Linksym() newIndex := base.Ctxt.InlTree.Add(parent, n.Pos(), sym) // Add an inline mark just before the inlined body. diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index b13b57e95fa99..79583914353d8 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -314,7 +314,7 @@ func (n *Name) MarkReadonly() { // Mark the linksym as readonly immediately // so that the SSA backend can use this information. // It will be overridden later during dumpglobls. - n.Sym().Linksym().Type = objabi.SRODATA + n.Linksym().Type = objabi.SRODATA } // Val returns the constant.Value for the node. diff --git a/src/cmd/compile/internal/pkginit/init.go b/src/cmd/compile/internal/pkginit/init.go index f964edee88379..8e3592700c3fe 100644 --- a/src/cmd/compile/internal/pkginit/init.go +++ b/src/cmd/compile/internal/pkginit/init.go @@ -34,7 +34,7 @@ func Task() *ir.Name { if n.Op() != ir.ONAME || n.(*ir.Name).Class_ != ir.PEXTERN { base.Fatalf("bad inittask: %v", n) } - deps = append(deps, n.(*ir.Name).Sym().Linksym()) + deps = append(deps, n.(*ir.Name).Linksym()) } // Make a function that contains all the initialization statements. @@ -74,7 +74,7 @@ func Task() *ir.Name { continue } } - fns = append(fns, fn.Nname.Sym().Linksym()) + fns = append(fns, fn.Nname.Linksym()) } if len(deps) == 0 && len(fns) == 0 && types.LocalPkg.Name != "main" && types.LocalPkg.Name != "runtime" { diff --git a/src/cmd/compile/internal/reflectdata/alg.go b/src/cmd/compile/internal/reflectdata/alg.go index 1f943f5795bf5..5603aefa77e79 100644 --- a/src/cmd/compile/internal/reflectdata/alg.go +++ b/src/cmd/compile/internal/reflectdata/alg.go @@ -104,7 +104,7 @@ func genhash(t *types.Type) *obj.LSym { // For other sizes of plain memory, we build a closure // that calls memhash_varlen. The size of the memory is // encoded in the first slot of the closure. - closure := types.TypeSymLookup(fmt.Sprintf(".hashfunc%d", t.Width)).Linksym() + closure := TypeLinksymLookup(fmt.Sprintf(".hashfunc%d", t.Width)) if len(closure.P) > 0 { // already generated return closure } @@ -120,7 +120,7 @@ func genhash(t *types.Type) *obj.LSym { break } - closure := TypeSymPrefix(".hashfunc", t).Linksym() + closure := TypeLinksymPrefix(".hashfunc", t) if len(closure.P) > 0 { // already generated return closure } @@ -347,7 +347,7 @@ func geneq(t *types.Type) *obj.LSym { case types.AMEM: // make equality closure. The size of the type // is encoded in the closure. - closure := types.TypeSymLookup(fmt.Sprintf(".eqfunc%d", t.Width)).Linksym() + closure := TypeLinksymLookup(fmt.Sprintf(".eqfunc%d", t.Width)) if len(closure.P) != 0 { return closure } @@ -363,7 +363,7 @@ func geneq(t *types.Type) *obj.LSym { break } - closure := TypeSymPrefix(".eqfunc", t).Linksym() + closure := TypeLinksymPrefix(".eqfunc", t) if len(closure.P) > 0 { // already generated return closure } diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go index 4c625b40cb8ef..87f381fbdd558 100644 --- a/src/cmd/compile/internal/reflectdata/reflect.go +++ b/src/cmd/compile/internal/reflectdata/reflect.go @@ -1583,7 +1583,7 @@ func dgcprog(t *types.Type) (*obj.LSym, int64) { if t.Width == types.BADWIDTH { base.Fatalf("dgcprog: %v badwidth", t) } - lsym := TypeSymPrefix(".gcprog", t).Linksym() + lsym := TypeLinksymPrefix(".gcprog", t) var p gcProg p.init(lsym) p.emit(t, 0) @@ -1857,7 +1857,7 @@ var ZeroSize int64 // MarkTypeUsedInInterface marks that type t is converted to an interface. // This information is used in the linker in dead method elimination. func MarkTypeUsedInInterface(t *types.Type, from *obj.LSym) { - tsym := TypeSym(t).Linksym() + tsym := TypeLinksym(t) // Emit a marker relocation. The linker will know the type is converted // to an interface if "from" is reachable. r := obj.Addrel(from) @@ -1870,7 +1870,7 @@ func MarkTypeUsedInInterface(t *types.Type, from *obj.LSym) { func MarkUsedIfaceMethod(n *ir.CallExpr) { dot := n.X.(*ir.SelectorExpr) ityp := dot.X.Type() - tsym := TypeSym(ityp).Linksym() + tsym := TypeLinksym(ityp) r := obj.Addrel(ir.CurFunc.LSym) r.Sym = tsym // dot.Xoffset is the method index * Widthptr (the offset of code pointer diff --git a/src/cmd/compile/internal/ssagen/abi.go b/src/cmd/compile/internal/ssagen/abi.go index b0338e8155db9..cd5d962b918ce 100644 --- a/src/cmd/compile/internal/ssagen/abi.go +++ b/src/cmd/compile/internal/ssagen/abi.go @@ -166,7 +166,7 @@ func selectLSym(f *ir.Func, hasBody bool) { f.LSym = nam.Sym().LinksymABI0() needABIWrapper, wrapperABI = true, obj.ABIInternal } else { - f.LSym = nam.Sym().Linksym() + f.LSym = nam.Linksym() // No ABI override. Check that the symbol is // using the expected ABI. want := obj.ABIInternal diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 5cf267636bdb8..15c023d3325c5 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -2112,7 +2112,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { n := n.(*ir.Name) if n.Class_ == ir.PFUNC { // "value" of a function is the address of the function's closure - sym := staticdata.FuncSym(n.Sym()).Linksym() + sym := staticdata.FuncLinksym(n) return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb) } if s.canSSA(n) { @@ -4959,7 +4959,7 @@ func (s *state) addr(n ir.Node) *ssa.Value { switch n.Class_ { case ir.PEXTERN: // global variable - v := s.entryNewValue1A(ssa.OpAddr, t, n.Sym().Linksym(), s.sb) + v := s.entryNewValue1A(ssa.OpAddr, t, n.Linksym(), s.sb) // TODO: Make OpAddr use AuxInt as well as Aux. if offset != 0 { v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, offset, v) @@ -6831,7 +6831,7 @@ func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) { break } a.Name = obj.NAME_AUTO - a.Sym = n.Sym().Linksym() + a.Sym = n.Linksym() a.Offset += n.FrameOffset() default: v.Fatalf("aux in %s not implemented %#v", v, v.Aux) @@ -6963,7 +6963,7 @@ func CheckLoweredGetClosurePtr(v *ssa.Value) { func AddrAuto(a *obj.Addr, v *ssa.Value) { n, off := ssa.AutoVar(v) a.Type = obj.TYPE_MEM - a.Sym = n.Sym().Linksym() + a.Sym = n.Linksym() a.Reg = int16(Arch.REGSP) a.Offset = n.FrameOffset() + off if n.Class_ == ir.PPARAM || n.Class_ == ir.PPARAMOUT { @@ -6979,7 +6979,7 @@ func (s *State) AddrScratch(a *obj.Addr) { } a.Type = obj.TYPE_MEM a.Name = obj.NAME_AUTO - a.Sym = s.ScratchFpMem.Sym().Linksym() + a.Sym = s.ScratchFpMem.Linksym() a.Reg = int16(Arch.REGSP) a.Offset = s.ScratchFpMem.Offset_ } diff --git a/src/cmd/compile/internal/staticdata/data.go b/src/cmd/compile/internal/staticdata/data.go index ab9cb5bd7eca1..260731244fc28 100644 --- a/src/cmd/compile/internal/staticdata/data.go +++ b/src/cmd/compile/internal/staticdata/data.go @@ -37,8 +37,8 @@ func InitAddr(n *ir.Name, noff int64, a *ir.Name, aoff int64) { if a.Op() != ir.ONAME { base.Fatalf("addrsym a op %v", a.Op()) } - s := n.Sym().Linksym() - s.WriteAddr(base.Ctxt, noff, types.PtrSize, a.Sym().Linksym(), aoff) + s := n.Linksym() + s.WriteAddr(base.Ctxt, noff, types.PtrSize, a.Linksym(), aoff) } // InitFunc writes the static address of f to n. f must be a global function. @@ -53,18 +53,18 @@ func InitFunc(n *ir.Name, noff int64, f *ir.Name) { if f.Class_ != ir.PFUNC { base.Fatalf("pfuncsym class not PFUNC %d", f.Class_) } - s := n.Sym().Linksym() - s.WriteAddr(base.Ctxt, noff, types.PtrSize, FuncSym(f.Sym()).Linksym(), 0) + s := n.Linksym() + s.WriteAddr(base.Ctxt, noff, types.PtrSize, FuncLinksym(f), 0) } // InitSlice writes a static slice symbol {&arr, lencap, lencap} to n+noff. // InitSlice does not modify n. func InitSlice(n *ir.Name, noff int64, arr *ir.Name, lencap int64) { - s := n.Sym().Linksym() + s := n.Linksym() if arr.Op() != ir.ONAME { base.Fatalf("slicesym non-name arr %v", arr) } - s.WriteAddr(base.Ctxt, noff, types.PtrSize, arr.Sym().Linksym(), 0) + s.WriteAddr(base.Ctxt, noff, types.PtrSize, arr.Linksym(), 0) s.WriteInt(base.Ctxt, noff+types.SliceLenOffset, types.PtrSize, lencap) s.WriteInt(base.Ctxt, noff+types.SliceCapOffset, types.PtrSize, lencap) } @@ -141,7 +141,7 @@ func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj. if readonly { sym = StringSym(pos, string(data)) } else { - sym = slicedata(pos, string(data)).Sym().Linksym() + sym = slicedata(pos, string(data)).Linksym() } if len(hash) > 0 { sum := sha256.Sum256(data) @@ -189,7 +189,7 @@ func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj. } else { // Emit a zero-length data symbol // and then fix up length and content to use file. - symdata = slicedata(pos, "").Sym().Linksym() + symdata = slicedata(pos, "").Linksym() symdata.Size = size symdata.Type = objabi.SNOPTRDATA info := symdata.NewFileInfo() @@ -318,7 +318,7 @@ func InitConst(n *ir.Name, noff int64, c ir.Node, wid int) { if c.Op() != ir.OLITERAL { base.Fatalf("litsym c op %v", c.Op()) } - s := n.Sym().Linksym() + s := n.Linksym() switch u := c.Val(); u.Kind() { case constant.Bool: i := int64(obj.Bool2int(constant.BoolVal(u))) diff --git a/src/cmd/compile/internal/staticdata/embed.go b/src/cmd/compile/internal/staticdata/embed.go index 55c9a3356e8d6..2e551f0b2c6e7 100644 --- a/src/cmd/compile/internal/staticdata/embed.go +++ b/src/cmd/compile/internal/staticdata/embed.go @@ -145,7 +145,7 @@ func WriteEmbed(v *ir.Name) { if err != nil { base.ErrorfAt(v.Pos(), "embed %s: %v", file, err) } - sym := v.Sym().Linksym() + sym := v.Linksym() off := 0 off = objw.SymPtr(sym, off, fsym, 0) // data string off = objw.Uintptr(sym, off, uint64(size)) // len @@ -187,7 +187,7 @@ func WriteEmbed(v *ir.Name) { } } objw.Global(slicedata, int32(off), obj.RODATA|obj.LOCAL) - sym := v.Sym().Linksym() + sym := v.Linksym() objw.SymPtr(sym, 0, slicedata, 0) } } diff --git a/src/cmd/compile/internal/staticinit/sched.go b/src/cmd/compile/internal/staticinit/sched.go index d8f51766deee1..1b0af1b05d4ae 100644 --- a/src/cmd/compile/internal/staticinit/sched.go +++ b/src/cmd/compile/internal/staticinit/sched.go @@ -313,7 +313,7 @@ func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Ty return val.Op() == ir.ONIL } - reflectdata.MarkTypeUsedInInterface(val.Type(), l.Sym().Linksym()) + reflectdata.MarkTypeUsedInInterface(val.Type(), l.Linksym()) var itab *ir.AddrExpr if typ.IsEmptyInterface() { @@ -445,7 +445,7 @@ func StaticName(t *types.Type) *ir.Name { statuniqgen++ typecheck.Declare(n, ir.PEXTERN) n.SetType(t) - n.Sym().Linksym().Set(obj.AttrLocal, true) + n.Linksym().Set(obj.AttrLocal, true) return n } diff --git a/src/cmd/compile/internal/walk/complit.go b/src/cmd/compile/internal/walk/complit.go index fadcd87f25803..3c28ed70ade7a 100644 --- a/src/cmd/compile/internal/walk/complit.go +++ b/src/cmd/compile/internal/walk/complit.go @@ -59,7 +59,7 @@ func (c initContext) String() string { func readonlystaticname(t *types.Type) *ir.Name { n := staticinit.StaticName(t) n.MarkReadonly() - n.Sym().Linksym().Set(obj.AttrContentAddressable, true) + n.Linksym().Set(obj.AttrContentAddressable, true) return n } diff --git a/src/cmd/compile/internal/walk/race.go b/src/cmd/compile/internal/walk/race.go index 1fe439a99a423..87a8839dcd02e 100644 --- a/src/cmd/compile/internal/walk/race.go +++ b/src/cmd/compile/internal/walk/race.go @@ -14,7 +14,7 @@ import ( ) func instrument(fn *ir.Func) { - if fn.Pragma&ir.Norace != 0 || (fn.Sym().Linksym() != nil && fn.Sym().Linksym().ABIWrapper()) { + if fn.Pragma&ir.Norace != 0 || (fn.Linksym() != nil && fn.Linksym().ABIWrapper()) { return } From a5ec920160da51166ee22ac0e5335f51a5d36d8e Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 28 Dec 2020 21:01:34 -0800 Subject: [PATCH 289/474] [dev.regabi] cmd/compile: more Linksym cleanup This largely gets rid of the remaining direct Linksym calls, hopefully enough to discourage people from following bad existing practice until Sym.Linksym can be removed entirely. Passes toolstash -cmp. Change-Id: I5d8f8f703ace7256538fc79648891ede0d879dc2 Reviewed-on: https://go-review.googlesource.com/c/go/+/280641 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/gc/obj.go | 4 +- src/cmd/compile/internal/pkginit/init.go | 4 +- src/cmd/compile/internal/reflectdata/alg.go | 4 +- .../compile/internal/reflectdata/reflect.go | 96 ++++++++----------- src/cmd/compile/internal/ssagen/ssa.go | 2 +- src/cmd/compile/internal/staticdata/data.go | 2 +- 6 files changed, 46 insertions(+), 66 deletions(-) diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 45eadf719e33d..1e8ac8ebb2973 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -148,8 +148,8 @@ func dumpdata() { dumpglobls(typecheck.Target.Externs[numExterns:]) if reflectdata.ZeroSize > 0 { - zero := ir.Pkgs.Map.Lookup("zero") - objw.Global(zero.Linksym(), int32(reflectdata.ZeroSize), obj.DUPOK|obj.RODATA) + zero := ir.Pkgs.Map.Lookup("zero").Linksym() + objw.Global(zero, int32(reflectdata.ZeroSize), obj.DUPOK|obj.RODATA) } addGCLocals() diff --git a/src/cmd/compile/internal/pkginit/init.go b/src/cmd/compile/internal/pkginit/init.go index 8e3592700c3fe..f1ffbb5933db1 100644 --- a/src/cmd/compile/internal/pkginit/init.go +++ b/src/cmd/compile/internal/pkginit/init.go @@ -56,7 +56,7 @@ func Task() *ir.Name { typecheck.Stmts(nf) ir.CurFunc = nil typecheck.Target.Decls = append(typecheck.Target.Decls, fn) - fns = append(fns, initializers.Linksym()) + fns = append(fns, fn.Linksym()) } if typecheck.InitTodoFunc.Dcl != nil { // We only generate temps using initTodo if there @@ -87,7 +87,7 @@ func Task() *ir.Name { task.SetType(types.Types[types.TUINT8]) // fake type task.Class_ = ir.PEXTERN sym.Def = task - lsym := sym.Linksym() + lsym := task.Linksym() ot := 0 ot = objw.Uintptr(lsym, ot, 0) // state: not initialized yet ot = objw.Uintptr(lsym, ot, uint64(len(deps))) diff --git a/src/cmd/compile/internal/reflectdata/alg.go b/src/cmd/compile/internal/reflectdata/alg.go index 5603aefa77e79..d23ca6c7aa2eb 100644 --- a/src/cmd/compile/internal/reflectdata/alg.go +++ b/src/cmd/compile/internal/reflectdata/alg.go @@ -255,7 +255,7 @@ func genhash(t *types.Type) *obj.LSym { // Build closure. It doesn't close over any variables, so // it contains just the function pointer. - objw.SymPtr(closure, 0, sym.Linksym(), 0) + objw.SymPtr(closure, 0, fn.Linksym(), 0) objw.Global(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA) return closure @@ -634,7 +634,7 @@ func geneq(t *types.Type) *obj.LSym { typecheck.Target.Decls = append(typecheck.Target.Decls, fn) // Generate a closure which points at the function we just generated. - objw.SymPtr(closure, 0, sym.Linksym(), 0) + objw.SymPtr(closure, 0, fn.Linksym(), 0) objw.Global(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA) return closure } diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go index 87f381fbdd558..5f88262ddf08d 100644 --- a/src/cmd/compile/internal/reflectdata/reflect.go +++ b/src/cmd/compile/internal/reflectdata/reflect.go @@ -52,13 +52,13 @@ var ( signatslice []*types.Type itabs []itabEntry - ptabs []ptabEntry + ptabs []*ir.Name ) type typeSig struct { name *types.Sym - isym *types.Sym - tsym *types.Sym + isym *obj.LSym + tsym *obj.LSym type_ *types.Type mtype *types.Type } @@ -327,21 +327,19 @@ func methods(t *types.Type) []*typeSig { // generating code if necessary. var ms []*typeSig for _, f := range mt.AllMethods().Slice() { + if f.Sym == nil { + base.Fatalf("method with no sym on %v", mt) + } if !f.IsMethod() { - base.Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f) + base.Fatalf("non-method on %v method %v %v", mt, f.Sym, f) } if f.Type.Recv() == nil { - base.Fatalf("receiver with no type on %v method %v %v\n", mt, f.Sym, f) + base.Fatalf("receiver with no type on %v method %v %v", mt, f.Sym, f) } if f.Nointerface() { continue } - method := f.Sym - if method == nil { - break - } - // get receiver type for this particular method. // if pointer receiver but non-pointer t and // this is not an embedded pointer inside a struct, @@ -351,29 +349,13 @@ func methods(t *types.Type) []*typeSig { } sig := &typeSig{ - name: method, - isym: ir.MethodSym(it, method), - tsym: ir.MethodSym(t, method), + name: f.Sym, + isym: methodWrapper(it, f), + tsym: methodWrapper(t, f), type_: typecheck.NewMethodType(f.Type, t), mtype: typecheck.NewMethodType(f.Type, nil), } ms = append(ms, sig) - - this := f.Type.Recv().Type - - if !sig.isym.Siggen() { - sig.isym.SetSiggen(true) - if !types.Identical(this, it) { - genwrapper(it, f, sig.isym) - } - } - - if !sig.tsym.Siggen() { - sig.tsym.SetSiggen(true) - if !types.Identical(this, t) { - genwrapper(t, f, sig.tsym) - } - } } return ms @@ -407,11 +389,7 @@ func imethods(t *types.Type) []*typeSig { // IfaceType.Method is not in the reflect data. // Generate the method body, so that compiled // code can refer to it. - isym := ir.MethodSym(t, f.Sym) - if !isym.Siggen() { - isym.SetSiggen(true) - genwrapper(t, f, isym) - } + methodWrapper(t, f) } return methods @@ -636,8 +614,8 @@ func dextratypeData(lsym *obj.LSym, ot int, t *types.Type) int { ot = objw.SymPtrOff(lsym, ot, nsym) ot = dmethodptrOff(lsym, ot, WriteType(a.mtype)) - ot = dmethodptrOff(lsym, ot, a.isym.Linksym()) - ot = dmethodptrOff(lsym, ot, a.tsym.Linksym()) + ot = dmethodptrOff(lsym, ot, a.isym) + ot = dmethodptrOff(lsym, ot, a.tsym) } return ot } @@ -884,7 +862,7 @@ func ITabAddr(t, itype *types.Type) *ir.AddrExpr { n.Class_ = ir.PEXTERN n.SetTypecheck(1) s.Def = n - itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: s.Linksym()}) + itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: n.Linksym()}) } n := typecheck.NodAddr(ir.AsNode(s.Def)) @@ -1281,7 +1259,7 @@ func genfun(t, it *types.Type) []*obj.LSym { // so we can find the intersect in a single pass for _, m := range methods { if m.name == sigs[0].name { - out = append(out, m.isym.Linksym()) + out = append(out, m.isym) sigs = sigs[1:] if len(sigs) == 0 { break @@ -1390,8 +1368,12 @@ func WriteTabs() { // name nameOff // typ typeOff // pointer to symbol // } - nsym := dname(p.s.Name, "", nil, true) - tsym := WriteType(p.t) + nsym := dname(p.Sym().Name, "", nil, true) + t := p.Type() + if p.Class_ != ir.PFUNC { + t = types.NewPtr(t) + } + tsym := WriteType(t) ot = objw.SymPtrOff(s, ot, nsym) ot = objw.SymPtrOff(s, ot, tsym) // Plugin exports symbols as interfaces. Mark their types @@ -1403,7 +1385,7 @@ func WriteTabs() { ot = 0 s = base.Ctxt.Lookup("go.plugin.exports") for _, p := range ptabs { - ot = objw.SymPtr(s, ot, p.s.Linksym(), 0) + ot = objw.SymPtr(s, ot, p.Linksym(), 0) } objw.Global(s, int32(ot), int16(obj.RODATA)) } @@ -1722,13 +1704,7 @@ func CollectPTabs() { if s.Pkg.Name != "main" { continue } - if n.Type().Kind() == types.TFUNC && n.Class_ == ir.PFUNC { - // function - ptabs = append(ptabs, ptabEntry{s: s, t: s.Def.Type()}) - } else { - // variable - ptabs = append(ptabs, ptabEntry{s: s, t: types.NewPtr(s.Def.Type())}) - } + ptabs = append(ptabs, n) } } @@ -1752,22 +1728,28 @@ func CollectPTabs() { // // rcvr - U // method - M func (t T)(), a TFIELD type struct -// newnam - the eventual mangled name of this function -func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { - if false && base.Flag.LowerR != 0 { - fmt.Printf("genwrapper rcvrtype=%v method=%v newnam=%v\n", rcvr, method, newnam) +func methodWrapper(rcvr *types.Type, method *types.Field) *obj.LSym { + newnam := ir.MethodSym(rcvr, method.Sym) + lsym := newnam.Linksym() + if newnam.Siggen() { + return lsym + } + newnam.SetSiggen(true) + + if types.Identical(rcvr, method.Type.Recv().Type) { + return lsym } // Only generate (*T).M wrappers for T.M in T's own package. if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym() != nil && rcvr.Elem().Sym().Pkg != types.LocalPkg { - return + return lsym } // Only generate I.M wrappers for I in I's own package // but keep doing it for error.Error (was issue #29304). if rcvr.IsInterface() && rcvr.Sym() != nil && rcvr.Sym().Pkg != types.LocalPkg && rcvr != types.ErrorType { - return + return lsym } base.Pos = base.AutogeneratedPos @@ -1827,10 +1809,6 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { } } - if false && base.Flag.LowerR != 0 { - ir.DumpList("genwrapper body", fn.Body) - } - typecheck.FinishFuncBody() if base.Debug.DclStack != 0 { types.CheckDclstack() @@ -1850,6 +1828,8 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) { ir.CurFunc = nil typecheck.Target.Decls = append(typecheck.Target.Decls, fn) + + return lsym } var ZeroSize int64 diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 15c023d3325c5..3c94ec4c9519e 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -4578,7 +4578,7 @@ func (s *state) openDeferExit() { call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, aux, codeptr, v, s.mem()) } } else { - aux := ssa.StaticAuxCall(fn.Sym().Linksym(), ACArgs, ACResults) + aux := ssa.StaticAuxCall(fn.(*ir.Name).Linksym(), ACArgs, ACResults) if testLateExpansion { callArgs = append(callArgs, s.mem()) call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) diff --git a/src/cmd/compile/internal/staticdata/data.go b/src/cmd/compile/internal/staticdata/data.go index 260731244fc28..27d9cec06d6d3 100644 --- a/src/cmd/compile/internal/staticdata/data.go +++ b/src/cmd/compile/internal/staticdata/data.go @@ -209,7 +209,7 @@ func slicedata(pos src.XPos, s string) *ir.Name { symnode := typecheck.NewName(sym) sym.Def = symnode - lsym := sym.Linksym() + lsym := symnode.Linksym() off := dstringdata(lsym, 0, s, pos, "slice") objw.Global(lsym, int32(off), obj.NOPTR|obj.LOCAL) From e34c44a7c46d63a96e262f837670052759cd4569 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Tue, 29 Dec 2020 12:09:51 +0700 Subject: [PATCH 290/474] [dev.regabi] cmd/compile: refactoring typecheck arith Currently, the tcArith logic is complicated and involes many un-necessary checks for some ir.Op. This CL refactors how it works: - Add a new tcShiftOp function, which only does necessary works for typechecking OLSH/ORSH. That ends up moving OLSH/ORSH to a separated case in typecheck1. - Move OASOP to separated case, so its logic is detached from tcArith. - Move OANDAND/OOROR to separated case, which does some validation dedicated to logical operators only. Passes toolstash -cmp. Change-Id: I0db7b7c7a3e52d6f9e9d87eee6967871f1c32200 Reviewed-on: https://go-review.googlesource.com/c/go/+/279442 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/typecheck/expr.go | 186 ++++-------------- .../compile/internal/typecheck/typecheck.go | 114 ++++++++--- 2 files changed, 135 insertions(+), 165 deletions(-) diff --git a/src/cmd/compile/internal/typecheck/expr.go b/src/cmd/compile/internal/typecheck/expr.go index 29d7a080114d7..f3e3a93150cb3 100644 --- a/src/cmd/compile/internal/typecheck/expr.go +++ b/src/cmd/compile/internal/typecheck/expr.go @@ -55,103 +55,50 @@ func tcAddr(n *ir.AddrExpr) ir.Node { return n } -// tcArith typechecks a binary arithmetic expression. -func tcArith(n ir.Node) ir.Node { - var l, r ir.Node - var setLR func() - switch n := n.(type) { - case *ir.AssignOpStmt: - l, r = n.X, n.Y - setLR = func() { n.X = l; n.Y = r } - case *ir.BinaryExpr: - l, r = n.X, n.Y - setLR = func() { n.X = l; n.Y = r } - case *ir.LogicalExpr: - l, r = n.X, n.Y - setLR = func() { n.X = l; n.Y = r } - } - l = Expr(l) - r = Expr(r) - setLR() - if l.Type() == nil || r.Type() == nil { - n.SetType(nil) - return n +func tcShift(n, l, r ir.Node) (ir.Node, ir.Node, *types.Type) { + if l.Type() == nil || l.Type() == nil { + return l, r, nil } - op := n.Op() - if n.Op() == ir.OASOP { - n := n.(*ir.AssignOpStmt) - checkassign(n, l) - if n.IncDec && !okforarith[l.Type().Kind()] { - base.Errorf("invalid operation: %v (non-numeric type %v)", n, l.Type()) - n.SetType(nil) - return n - } - // TODO(marvin): Fix Node.EType type union. - op = n.AsOp - } - if op == ir.OLSH || op == ir.ORSH { - r = DefaultLit(r, types.Types[types.TUINT]) - setLR() - t := r.Type() - if !t.IsInteger() { - base.Errorf("invalid operation: %v (shift count type %v, must be integer)", n, r.Type()) - n.SetType(nil) - return n - } - if t.IsSigned() && !types.AllowsGoVersion(curpkg(), 1, 13) { - base.ErrorfVers("go1.13", "invalid operation: %v (signed shift count type %v)", n, r.Type()) - n.SetType(nil) - return n - } - t = l.Type() - if t != nil && t.Kind() != types.TIDEAL && !t.IsInteger() { - base.Errorf("invalid operation: %v (shift of type %v)", n, t) - n.SetType(nil) - return n - } - // no defaultlit for left - // the outer context gives the type - n.SetType(l.Type()) - if (l.Type() == types.UntypedFloat || l.Type() == types.UntypedComplex) && r.Op() == ir.OLITERAL { - n.SetType(types.UntypedInt) - } - return n + r = DefaultLit(r, types.Types[types.TUINT]) + t := r.Type() + if !t.IsInteger() { + base.Errorf("invalid operation: %v (shift count type %v, must be integer)", n, r.Type()) + return l, r, nil + } + if t.IsSigned() && !types.AllowsGoVersion(curpkg(), 1, 13) { + base.ErrorfVers("go1.13", "invalid operation: %v (signed shift count type %v)", n, r.Type()) + return l, r, nil + } + t = l.Type() + if t != nil && t.Kind() != types.TIDEAL && !t.IsInteger() { + base.Errorf("invalid operation: %v (shift of type %v)", n, t) + return l, r, nil } - // For "x == x && len(s)", it's better to report that "len(s)" (type int) - // can't be used with "&&" than to report that "x == x" (type untyped bool) - // can't be converted to int (see issue #41500). - if n.Op() == ir.OANDAND || n.Op() == ir.OOROR { - n := n.(*ir.LogicalExpr) - if !n.X.Type().IsBoolean() { - base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.X.Type())) - n.SetType(nil) - return n - } - if !n.Y.Type().IsBoolean() { - base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.Y.Type())) - n.SetType(nil) - return n - } + // no defaultlit for left + // the outer context gives the type + t = l.Type() + if (l.Type() == types.UntypedFloat || l.Type() == types.UntypedComplex) && r.Op() == ir.OLITERAL { + t = types.UntypedInt } + return l, r, t +} - // ideal mixed with non-ideal +// tcArith typechecks operands of a binary arithmetic expression. +// The result of tcArith MUST be assigned back to original operands, +// t is the type of the expression, and should be set by the caller. e.g: +// n.X, n.Y, t = tcArith(n, op, n.X, n.Y) +// n.SetType(t) +func tcArith(n ir.Node, op ir.Op, l, r ir.Node) (ir.Node, ir.Node, *types.Type) { l, r = defaultlit2(l, r, false) - setLR() - if l.Type() == nil || r.Type() == nil { - n.SetType(nil) - return n + return l, r, nil } t := l.Type() if t.Kind() == types.TIDEAL { t = r.Type() } - et := t.Kind() - if et == types.TIDEAL { - et = types.TINT - } aop := ir.OXXX if iscmp[n.Op()] && t.Kind() != types.TIDEAL && !types.Identical(l.Type(), r.Type()) { // comparison is okay as long as one side is @@ -167,15 +114,13 @@ func tcArith(n ir.Node) ir.Node { if aop != ir.OXXX { if r.Type().IsInterface() && !l.Type().IsInterface() && !types.IsComparable(l.Type()) { base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(l.Type())) - n.SetType(nil) - return n + return l, r, nil } types.CalcSize(l.Type()) if r.Type().IsInterface() == l.Type().IsInterface() || l.Type().Width >= 1<<16 { l = ir.NewConvExpr(base.Pos, aop, r.Type(), l) l.SetTypecheck(1) - setLR() } t = r.Type() @@ -188,34 +133,28 @@ func tcArith(n ir.Node) ir.Node { if aop != ir.OXXX { if l.Type().IsInterface() && !r.Type().IsInterface() && !types.IsComparable(r.Type()) { base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(r.Type())) - n.SetType(nil) - return n + return l, r, nil } types.CalcSize(r.Type()) if r.Type().IsInterface() == l.Type().IsInterface() || r.Type().Width >= 1<<16 { r = ir.NewConvExpr(base.Pos, aop, l.Type(), r) r.SetTypecheck(1) - setLR() } t = l.Type() } } - - et = t.Kind() } if t.Kind() != types.TIDEAL && !types.Identical(l.Type(), r.Type()) { l, r = defaultlit2(l, r, true) if l.Type() == nil || r.Type() == nil { - n.SetType(nil) - return n + return l, r, nil } if l.Type().IsInterface() == r.Type().IsInterface() || aop == 0 { base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type()) - n.SetType(nil) - return n + return l, r, nil } } @@ -224,85 +163,46 @@ func tcArith(n ir.Node) ir.Node { } if dt := defaultType(t); !okfor[op][dt.Kind()] { base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(t)) - n.SetType(nil) - return n + return l, r, nil } // okfor allows any array == array, map == map, func == func. // restrict to slice/map/func == nil and nil == slice/map/func. if l.Type().IsArray() && !types.IsComparable(l.Type()) { base.Errorf("invalid operation: %v (%v cannot be compared)", n, l.Type()) - n.SetType(nil) - return n + return l, r, nil } if l.Type().IsSlice() && !ir.IsNil(l) && !ir.IsNil(r) { base.Errorf("invalid operation: %v (slice can only be compared to nil)", n) - n.SetType(nil) - return n + return l, r, nil } if l.Type().IsMap() && !ir.IsNil(l) && !ir.IsNil(r) { base.Errorf("invalid operation: %v (map can only be compared to nil)", n) - n.SetType(nil) - return n + return l, r, nil } if l.Type().Kind() == types.TFUNC && !ir.IsNil(l) && !ir.IsNil(r) { base.Errorf("invalid operation: %v (func can only be compared to nil)", n) - n.SetType(nil) - return n + return l, r, nil } if l.Type().IsStruct() { if f := types.IncomparableField(l.Type()); f != nil { base.Errorf("invalid operation: %v (struct containing %v cannot be compared)", n, f.Type) - n.SetType(nil) - return n + return l, r, nil } } - if iscmp[n.Op()] { - t = types.UntypedBool - n.SetType(t) - if con := EvalConst(n); con.Op() == ir.OLITERAL { - return con - } - l, r = defaultlit2(l, r, true) - setLR() - return n - } - - if et == types.TSTRING && n.Op() == ir.OADD { - // create or update OADDSTR node with list of strings in x + y + z + (w + v) + ... - n := n.(*ir.BinaryExpr) - var add *ir.AddStringExpr - if l.Op() == ir.OADDSTR { - add = l.(*ir.AddStringExpr) - add.SetPos(n.Pos()) - } else { - add = ir.NewAddStringExpr(n.Pos(), []ir.Node{l}) - } - if r.Op() == ir.OADDSTR { - r := r.(*ir.AddStringExpr) - add.List.Append(r.List.Take()...) - } else { - add.List.Append(r) - } - add.SetType(t) - return add - } - if (op == ir.ODIV || op == ir.OMOD) && ir.IsConst(r, constant.Int) { if constant.Sign(r.Val()) == 0 { base.Errorf("division by zero") - n.SetType(nil) - return n + return l, r, nil } } - n.SetType(t) - return n + return l, r, t } // The result of tcCompLit MUST be assigned back to n, e.g. diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index ff9178b5972f7..e29d58cefa7dc 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -672,28 +672,98 @@ func typecheck1(n ir.Node, top int) ir.Node { case ir.ODEREF: n := n.(*ir.StarExpr) return tcStar(n, top) - // arithmetic exprs - case ir.OASOP, - ir.OADD, - ir.OAND, - ir.OANDAND, - ir.OANDNOT, - ir.ODIV, - ir.OEQ, - ir.OGE, - ir.OGT, - ir.OLE, - ir.OLT, - ir.OLSH, - ir.ORSH, - ir.OMOD, - ir.OMUL, - ir.ONE, - ir.OOR, - ir.OOROR, - ir.OSUB, - ir.OXOR: - return tcArith(n) + + // x op= y + case ir.OASOP: + n := n.(*ir.AssignOpStmt) + n.X, n.Y = Expr(n.X), Expr(n.Y) + checkassign(n, n.X) + if n.IncDec && !okforarith[n.X.Type().Kind()] { + base.Errorf("invalid operation: %v (non-numeric type %v)", n, n.X.Type()) + return n + } + switch n.AsOp { + case ir.OLSH, ir.ORSH: + n.X, n.Y, _ = tcShift(n, n.X, n.Y) + case ir.OADD, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD, ir.OMUL, ir.OOR, ir.OSUB, ir.OXOR: + n.X, n.Y, _ = tcArith(n, n.AsOp, n.X, n.Y) + default: + base.Fatalf("invalid assign op: %v", n.AsOp) + } + return n + + // logical operators + case ir.OANDAND, ir.OOROR: + n := n.(*ir.LogicalExpr) + n.X, n.Y = Expr(n.X), Expr(n.Y) + // For "x == x && len(s)", it's better to report that "len(s)" (type int) + // can't be used with "&&" than to report that "x == x" (type untyped bool) + // can't be converted to int (see issue #41500). + if !n.X.Type().IsBoolean() { + base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.X.Type())) + n.SetType(nil) + return n + } + if !n.Y.Type().IsBoolean() { + base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.Y.Type())) + n.SetType(nil) + return n + } + l, r, t := tcArith(n, n.Op(), n.X, n.Y) + n.X, n.Y = l, r + n.SetType(t) + return n + + // shift operators + case ir.OLSH, ir.ORSH: + n := n.(*ir.BinaryExpr) + n.X, n.Y = Expr(n.X), Expr(n.Y) + l, r, t := tcShift(n, n.X, n.Y) + n.X, n.Y = l, r + n.SetType(t) + return n + + // comparison operators + case ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT, ir.ONE: + n := n.(*ir.BinaryExpr) + n.X, n.Y = Expr(n.X), Expr(n.Y) + l, r, t := tcArith(n, n.Op(), n.X, n.Y) + if t != nil { + n.X, n.Y = l, r + n.SetType(types.UntypedBool) + if con := EvalConst(n); con.Op() == ir.OLITERAL { + return con + } + n.X, n.Y = defaultlit2(l, r, true) + } + return n + + // binary operators + case ir.OADD, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD, ir.OMUL, ir.OOR, ir.OSUB, ir.OXOR: + n := n.(*ir.BinaryExpr) + n.X, n.Y = Expr(n.X), Expr(n.Y) + l, r, t := tcArith(n, n.Op(), n.X, n.Y) + if t != nil && t.Kind() == types.TSTRING && n.Op() == ir.OADD { + // create or update OADDSTR node with list of strings in x + y + z + (w + v) + ... + var add *ir.AddStringExpr + if l.Op() == ir.OADDSTR { + add = l.(*ir.AddStringExpr) + add.SetPos(n.Pos()) + } else { + add = ir.NewAddStringExpr(n.Pos(), []ir.Node{l}) + } + if r.Op() == ir.OADDSTR { + r := r.(*ir.AddStringExpr) + add.List.Append(r.List.Take()...) + } else { + add.List.Append(r) + } + add.SetType(t) + return add + } + n.X, n.Y = l, r + n.SetType(t) + return n case ir.OBITNOT, ir.ONEG, ir.ONOT, ir.OPLUS: n := n.(*ir.UnaryExpr) From 82ad3083f86947eece2e4ce2ae82f1230aa466d9 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Tue, 29 Dec 2020 12:31:17 +0700 Subject: [PATCH 291/474] [dev.regabi] cmd/compile: remove typ from AssignOpStmt Previous detached logic of typechecking AssignOpStmt from tcArith, the typ field of it is not used anymore. Pass toolstash -cmp. Change-Id: I407507a1c4c4f2958fca4d6899875564e54bf1f5 Reviewed-on: https://go-review.googlesource.com/c/go/+/279443 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/stmt.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index de152fec7275b..1301e65e26922 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -112,7 +112,6 @@ func (n *AssignStmt) SetOp(op Op) { // An AssignOpStmt is an AsOp= assignment statement: X AsOp= Y. type AssignOpStmt struct { miniStmt - typ *types.Type X Node AsOp Op // OADD etc Y Node @@ -126,9 +125,6 @@ func NewAssignOpStmt(pos src.XPos, asOp Op, x, y Node) *AssignOpStmt { return n } -func (n *AssignOpStmt) Type() *types.Type { return n.typ } -func (n *AssignOpStmt) SetType(x *types.Type) { n.typ = x } - // A BlockStmt is a block: { List }. type BlockStmt struct { miniStmt From 33801cdc627bc4d3f7128d1076a1ac249da2e015 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 28 Dec 2020 23:42:49 -0800 Subject: [PATCH 292/474] [dev.regabi] cmd/compile: use Ntype where possible For nodes that are always a type expression, we can use Ntype instead of Node. Passes toolstash -cmp. Change-Id: I28f9fa235015ab48d0da06b78b30c49d74c64e3a Reviewed-on: https://go-review.googlesource.com/c/go/+/280642 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky Reviewed-by: Cuong Manh Le TryBot-Result: Go Bot --- src/cmd/compile/internal/ir/func.go | 2 +- src/cmd/compile/internal/ir/node_gen.go | 10 +++++----- src/cmd/compile/internal/ir/type.go | 22 +++++++++++----------- src/cmd/compile/internal/typecheck/expr.go | 4 ++-- src/cmd/compile/internal/typecheck/func.go | 2 +- src/cmd/compile/internal/typecheck/type.go | 12 ++++++------ 6 files changed, 26 insertions(+), 26 deletions(-) diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index a4f5875aabde7..4613425f1a340 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -67,7 +67,7 @@ type Func struct { Dcl []*Name ClosureEnter Nodes // list of ONAME nodes (or OADDR-of-ONAME nodes, for output parameters) of captured variables - ClosureType Node // closure representation type + ClosureType Ntype // closure representation type ClosureVars []*Name // closure params; each has closurevar set // Parents records the parent scope of each scope within a diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index 1d24904a3f34c..fe54b62f1805b 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -52,7 +52,7 @@ func (n *ArrayType) doChildren(do func(Node) error) error { } func (n *ArrayType) editChildren(edit func(Node) Node) { n.Len = maybeEdit(n.Len, edit) - n.Elem = maybeEdit(n.Elem, edit) + n.Elem = toNtype(maybeEdit(n.Elem, edit)) } func (n *AssignListStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } @@ -241,7 +241,7 @@ func (n *ChanType) doChildren(do func(Node) error) error { return err } func (n *ChanType) editChildren(edit func(Node) Node) { - n.Elem = maybeEdit(n.Elem, edit) + n.Elem = toNtype(maybeEdit(n.Elem, edit)) } func (n *ClosureExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } @@ -632,8 +632,8 @@ func (n *MapType) doChildren(do func(Node) error) error { return err } func (n *MapType) editChildren(edit func(Node) Node) { - n.Key = maybeEdit(n.Key, edit) - n.Elem = maybeEdit(n.Elem, edit) + n.Key = toNtype(maybeEdit(n.Key, edit)) + n.Elem = toNtype(maybeEdit(n.Elem, edit)) } func (n *Name) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } @@ -873,7 +873,7 @@ func (n *SliceType) doChildren(do func(Node) error) error { return err } func (n *SliceType) editChildren(edit func(Node) Node) { - n.Elem = maybeEdit(n.Elem, edit) + n.Elem = toNtype(maybeEdit(n.Elem, edit)) } func (n *StarExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } diff --git a/src/cmd/compile/internal/ir/type.go b/src/cmd/compile/internal/ir/type.go index bd3a05d06e573..408f6ed56372e 100644 --- a/src/cmd/compile/internal/ir/type.go +++ b/src/cmd/compile/internal/ir/type.go @@ -46,7 +46,7 @@ func (n *miniType) Type() *types.Type { return n.typ } // setOTYPE also records t.Nod = self if t.Nod is not already set. // (Some types are shared by multiple OTYPE nodes, so only // the first such node is used as t.Nod.) -func (n *miniType) setOTYPE(t *types.Type, self Node) { +func (n *miniType) setOTYPE(t *types.Type, self Ntype) { if n.typ != nil { panic(n.op.String() + " SetType: type already set") } @@ -61,11 +61,11 @@ func (n *miniType) Implicit() bool { return false } // for Format OTYPE // A ChanType represents a chan Elem syntax with the direction Dir. type ChanType struct { miniType - Elem Node + Elem Ntype Dir types.ChanDir } -func NewChanType(pos src.XPos, elem Node, dir types.ChanDir) *ChanType { +func NewChanType(pos src.XPos, elem Ntype, dir types.ChanDir) *ChanType { n := &ChanType{Elem: elem, Dir: dir} n.op = OTCHAN n.pos = pos @@ -80,11 +80,11 @@ func (n *ChanType) SetOTYPE(t *types.Type) { // A MapType represents a map[Key]Value type syntax. type MapType struct { miniType - Key Node - Elem Node + Key Ntype + Elem Ntype } -func NewMapType(pos src.XPos, key, elem Node) *MapType { +func NewMapType(pos src.XPos, key, elem Ntype) *MapType { n := &MapType{Key: key, Elem: elem} n.op = OTMAP n.pos = pos @@ -246,11 +246,11 @@ func editFields(list []*Field, edit func(Node) Node) { // If DDD is true, it's the ...Elem at the end of a function list. type SliceType struct { miniType - Elem Node + Elem Ntype DDD bool } -func NewSliceType(pos src.XPos, elem Node) *SliceType { +func NewSliceType(pos src.XPos, elem Ntype) *SliceType { n := &SliceType{Elem: elem} n.op = OTSLICE n.pos = pos @@ -267,11 +267,11 @@ func (n *SliceType) SetOTYPE(t *types.Type) { type ArrayType struct { miniType Len Node - Elem Node + Elem Ntype } -func NewArrayType(pos src.XPos, size Node, elem Node) *ArrayType { - n := &ArrayType{Len: size, Elem: elem} +func NewArrayType(pos src.XPos, len Node, elem Ntype) *ArrayType { + n := &ArrayType{Len: len, Elem: elem} n.op = OTARRAY n.pos = pos return n diff --git a/src/cmd/compile/internal/typecheck/expr.go b/src/cmd/compile/internal/typecheck/expr.go index f3e3a93150cb3..5752139c0b92a 100644 --- a/src/cmd/compile/internal/typecheck/expr.go +++ b/src/cmd/compile/internal/typecheck/expr.go @@ -230,7 +230,7 @@ func tcCompLit(n *ir.CompLitExpr) (res ir.Node) { // Need to handle [...]T arrays specially. if array, ok := n.Ntype.(*ir.ArrayType); ok && array.Elem != nil && array.Len == nil { - array.Elem = typecheck(array.Elem, ctxType) + array.Elem = typecheckNtype(array.Elem) elemType := array.Elem.Type() if elemType == nil { n.SetType(nil) @@ -243,7 +243,7 @@ func tcCompLit(n *ir.CompLitExpr) (res ir.Node) { return n } - n.Ntype = ir.Node(typecheck(n.Ntype, ctxType)).(ir.Ntype) + n.Ntype = typecheckNtype(n.Ntype) t := n.Ntype.Type() if t == nil { n.SetType(nil) diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go index c58fef10ecd21..9bb9245d4a443 100644 --- a/src/cmd/compile/internal/typecheck/func.go +++ b/src/cmd/compile/internal/typecheck/func.go @@ -342,7 +342,7 @@ func tcClosure(clo *ir.ClosureExpr, top int) { fn.Iota = x } - fn.ClosureType = typecheck(fn.ClosureType, ctxType) + fn.ClosureType = typecheckNtype(fn.ClosureType) clo.SetType(fn.ClosureType.Type()) fn.SetClosureCalled(top&ctxCallee != 0) diff --git a/src/cmd/compile/internal/typecheck/type.go b/src/cmd/compile/internal/typecheck/type.go index 0c2ebb8b26a0c..6fdafef77d926 100644 --- a/src/cmd/compile/internal/typecheck/type.go +++ b/src/cmd/compile/internal/typecheck/type.go @@ -14,7 +14,7 @@ import ( // tcArrayType typechecks an OTARRAY node. func tcArrayType(n *ir.ArrayType) ir.Node { - n.Elem = typecheck(n.Elem, ctxType) + n.Elem = typecheckNtype(n.Elem) if n.Elem.Type() == nil { return n } @@ -59,7 +59,7 @@ func tcArrayType(n *ir.ArrayType) ir.Node { // tcChanType typechecks an OTCHAN node. func tcChanType(n *ir.ChanType) ir.Node { - n.Elem = typecheck(n.Elem, ctxType) + n.Elem = typecheckNtype(n.Elem) l := n.Elem if l.Type() == nil { return n @@ -103,7 +103,7 @@ func tcInterfaceType(n *ir.InterfaceType) ir.Node { n.SetOTYPE(types.Types[types.TINTER]) return n } - + lno := base.Pos methods := tcFields(n.Methods, nil) base.Pos = lno @@ -114,8 +114,8 @@ func tcInterfaceType(n *ir.InterfaceType) ir.Node { // tcMapType typechecks an OTMAP node. func tcMapType(n *ir.MapType) ir.Node { - n.Key = typecheck(n.Key, ctxType) - n.Elem = typecheck(n.Elem, ctxType) + n.Key = typecheckNtype(n.Key) + n.Elem = typecheckNtype(n.Elem) l := n.Key r := n.Elem if l.Type() == nil || r.Type() == nil { @@ -134,7 +134,7 @@ func tcMapType(n *ir.MapType) ir.Node { // tcSliceType typechecks an OTSLICE node. func tcSliceType(n *ir.SliceType) ir.Node { - n.Elem = typecheck(n.Elem, ctxType) + n.Elem = typecheckNtype(n.Elem) if n.Elem.Type() == nil { return n } From 171fc6f22388cc8628b5590f42d46a7c57277428 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 29 Dec 2020 00:44:28 -0800 Subject: [PATCH 293/474] [dev.regabi] cmd/compile: remove workarounds for go/constant issues These were fixed in CLs 273086 and 273126, which have been merged back into dev.regabi already. Passes toolstash -cmp. Change-Id: I011e9ed7062bc034496a279e21cc163267bf83fd Reviewed-on: https://go-review.googlesource.com/c/go/+/280643 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky Reviewed-by: Cuong Manh Le TryBot-Result: Go Bot --- src/cmd/compile/internal/typecheck/const.go | 11 +---------- src/cmd/compile/internal/typecheck/iexport.go | 2 +- src/cmd/compile/internal/typecheck/iimport.go | 2 +- 3 files changed, 3 insertions(+), 12 deletions(-) diff --git a/src/cmd/compile/internal/typecheck/const.go b/src/cmd/compile/internal/typecheck/const.go index e22b284e829d2..5259218ef91cb 100644 --- a/src/cmd/compile/internal/typecheck/const.go +++ b/src/cmd/compile/internal/typecheck/const.go @@ -564,20 +564,11 @@ func EvalConst(n ir.Node) ir.Node { return n } -func makeInt(i *big.Int) constant.Value { - if i.IsInt64() { - return constant.Make(i.Int64()) // workaround #42640 (Int64Val(Make(big.NewInt(10))) returns (10, false), not (10, true)) - } - return constant.Make(i) -} - func makeFloat64(f float64) constant.Value { if math.IsInf(f, 0) { base.Fatalf("infinity is not a valid constant") } - v := constant.MakeFloat64(f) - v = constant.ToFloat(v) // workaround #42641 (MakeFloat64(0).Kind() returns Int, not Float) - return v + return constant.MakeFloat64(f) } func makeComplex(real, imag constant.Value) constant.Value { diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go index e35cbcafa25e5..c287d76c4370f 100644 --- a/src/cmd/compile/internal/typecheck/iexport.go +++ b/src/cmd/compile/internal/typecheck/iexport.go @@ -936,7 +936,7 @@ func (w *exportWriter) mpfloat(v constant.Value, typ *types.Type) { if acc != big.Exact { base.Fatalf("mantissa scaling failed for %f (%s)", f, acc) } - w.mpint(makeInt(manti), typ) + w.mpint(constant.Make(manti), typ) if manti.Sign() != 0 { w.int64(exp) } diff --git a/src/cmd/compile/internal/typecheck/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go index 546ddcba79295..86277e69bd4cc 100644 --- a/src/cmd/compile/internal/typecheck/iimport.go +++ b/src/cmd/compile/internal/typecheck/iimport.go @@ -372,7 +372,7 @@ func (p *importReader) value(typ *types.Type) constant.Value { case constant.Int: var i big.Int p.mpint(&i, typ) - return makeInt(&i) + return constant.Make(&i) case constant.Float: return p.float(typ) case constant.Complex: From 6f30c9504861d68d13113989a3cf063832d47002 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 29 Dec 2020 01:22:50 -0800 Subject: [PATCH 294/474] [dev.regabi] cmd/compile: remove unneeded indirection Thanks to package reorganizing, we can remove types.TypeLinkSym by simply having its only callers use reflectdata.TypeLinksym directly. Passes toolstash -cmp. Change-Id: I5bc5dbb6bf0664af43ae5130cfe1f19bd23b2bfe Reviewed-on: https://go-review.googlesource.com/c/go/+/280644 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/gc/abiutils_test.go | 7 ------- src/cmd/compile/internal/gc/main.go | 6 ------ src/cmd/compile/internal/ssa/writebarrier.go | 5 +++-- src/cmd/compile/internal/types/type.go | 5 ----- src/cmd/compile/internal/types/utils.go | 11 ----------- 5 files changed, 3 insertions(+), 31 deletions(-) diff --git a/src/cmd/compile/internal/gc/abiutils_test.go b/src/cmd/compile/internal/gc/abiutils_test.go index 656eab18cbe8d..d535a6a34bf44 100644 --- a/src/cmd/compile/internal/gc/abiutils_test.go +++ b/src/cmd/compile/internal/gc/abiutils_test.go @@ -7,7 +7,6 @@ package gc import ( "bufio" "cmd/compile/internal/base" - "cmd/compile/internal/reflectdata" "cmd/compile/internal/ssagen" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" @@ -39,12 +38,6 @@ func TestMain(m *testing.M) { base.Ctxt.Bso = bufio.NewWriter(os.Stdout) types.PtrSize = ssagen.Arch.LinkArch.PtrSize types.RegSize = ssagen.Arch.LinkArch.RegSize - types.TypeLinkSym = func(t *types.Type) *obj.LSym { - return reflectdata.TypeLinksym(t) - } - types.TypeLinkSym = func(t *types.Type) *obj.LSym { - return reflectdata.TypeLinksym(t) - } typecheck.Init() os.Exit(m.Run()) } diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index a4613f04fbc17..45219801f0f16 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -190,9 +190,6 @@ func Main(archInit func(*ssagen.ArchInfo)) { types.PtrSize = ssagen.Arch.LinkArch.PtrSize types.RegSize = ssagen.Arch.LinkArch.RegSize types.MaxWidth = ssagen.Arch.MAXWIDTH - types.TypeLinkSym = func(t *types.Type) *obj.LSym { - return reflectdata.TypeLinksym(t) - } typecheck.Target = new(ir.Package) @@ -202,9 +199,6 @@ func Main(archInit func(*ssagen.ArchInfo)) { base.AutogeneratedPos = makePos(src.NewFileBase("", ""), 1, 0) - types.TypeLinkSym = func(t *types.Type) *obj.LSym { - return reflectdata.TypeLinksym(t) - } typecheck.Init() // Parse input. diff --git a/src/cmd/compile/internal/ssa/writebarrier.go b/src/cmd/compile/internal/ssa/writebarrier.go index 849c9e8967c60..4378f2d62763c 100644 --- a/src/cmd/compile/internal/ssa/writebarrier.go +++ b/src/cmd/compile/internal/ssa/writebarrier.go @@ -5,6 +5,7 @@ package ssa import ( + "cmd/compile/internal/reflectdata" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/objabi" @@ -270,11 +271,11 @@ func writebarrier(f *Func) { case OpMoveWB: fn = typedmemmove val = w.Args[1] - typ = w.Aux.(*types.Type).Symbol() + typ = reflectdata.TypeLinksym(w.Aux.(*types.Type)) nWBops-- case OpZeroWB: fn = typedmemclr - typ = w.Aux.(*types.Type).Symbol() + typ = reflectdata.TypeLinksym(w.Aux.(*types.Type)) nWBops-- case OpVarDef, OpVarLive, OpVarKill: } diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index 6feedbfabc5a7..5176b96c0204f 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -6,7 +6,6 @@ package types import ( "cmd/compile/internal/base" - "cmd/internal/obj" "cmd/internal/src" "fmt" "sync" @@ -1532,10 +1531,6 @@ func (t *Type) HasPointers() bool { return true } -func (t *Type) Symbol() *obj.LSym { - return TypeLinkSym(t) -} - // Tie returns 'T' if t is a concrete type, // 'I' if t is an interface type, and 'E' if t is an empty interface type. // It is used to build calls to the conv* and assert* runtime routines. diff --git a/src/cmd/compile/internal/types/utils.go b/src/cmd/compile/internal/types/utils.go index 2477f1da668a6..f9f629ca3ea6c 100644 --- a/src/cmd/compile/internal/types/utils.go +++ b/src/cmd/compile/internal/types/utils.go @@ -4,19 +4,8 @@ package types -import ( - "cmd/internal/obj" -) - const BADWIDTH = -1000000000 -// The following variables must be initialized early by the frontend. -// They are here to break import cycles. -// TODO(gri) eliminate these dependencies. -var ( - TypeLinkSym func(*Type) *obj.LSym -) - type bitset8 uint8 func (f *bitset8) set(mask uint8, b bool) { From e40cb4d4ae357d80d5e2b66e765c937317fad07f Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 29 Dec 2020 02:55:05 -0800 Subject: [PATCH 295/474] [dev.regabi] cmd/compile: remove more unused code Change-Id: I60ac28e3ab376cb0dac23a9b4f481f8562ad8c56 Reviewed-on: https://go-review.googlesource.com/c/go/+/280647 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/typecheck/dcl.go | 57 ----------------------- 1 file changed, 57 deletions(-) diff --git a/src/cmd/compile/internal/typecheck/dcl.go b/src/cmd/compile/internal/typecheck/dcl.go index 36057ba2d1c96..83f926e135e8a 100644 --- a/src/cmd/compile/internal/typecheck/dcl.go +++ b/src/cmd/compile/internal/typecheck/dcl.go @@ -17,63 +17,6 @@ import ( var DeclContext ir.Class // PEXTERN/PAUTO -func AssignDefn(left []ir.Node, defn ir.Node) { - for _, n := range left { - if n.Sym() != nil { - n.Sym().SetUniq(true) - } - } - - var nnew, nerr int - for i, n := range left { - if ir.IsBlank(n) { - continue - } - if !assignableName(n) { - base.ErrorfAt(defn.Pos(), "non-name %v on left side of :=", n) - nerr++ - continue - } - - if !n.Sym().Uniq() { - base.ErrorfAt(defn.Pos(), "%v repeated on left side of :=", n.Sym()) - n.SetDiag(true) - nerr++ - continue - } - - n.Sym().SetUniq(false) - if n.Sym().Block == types.Block { - continue - } - - nnew++ - n := NewName(n.Sym()) - Declare(n, DeclContext) - n.Defn = defn - defn.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, n)) - left[i] = n - } - - if nnew == 0 && nerr == 0 { - base.ErrorfAt(defn.Pos(), "no new variables on left side of :=") - } -} - -// := declarations -func assignableName(n ir.Node) bool { - switch n.Op() { - case ir.ONAME, - ir.ONONAME, - ir.OPACK, - ir.OTYPE, - ir.OLITERAL: - return n.Sym() != nil - } - - return false -} - func DeclFunc(sym *types.Sym, tfn ir.Ntype) *ir.Func { if tfn.Op() != ir.OTFUNC { base.Fatalf("expected OTFUNC node, got %v", tfn) From 9ea272e5ec5dd5eadd59d54c08377d5d9527a51b Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 29 Dec 2020 03:08:23 -0800 Subject: [PATCH 296/474] [dev.regabi] cmd/compile: simplify ir.Func somewhat Two simplifications: 1. Statements (including ODCLFUNC) don't have types, and the Func.Nname already has a type. There's no need for a second one. However, there is a lot of code that expects to be able to call Func.Type, so leave a forwarding method, like with Sym and Linksym. 2. Inline and remove ir.NewFuncNameAt. It doesn't really save any code, and it's only used a handful of places. Passes toolstash -cmp. Change-Id: I51acaa341897dae0fcdf2fa576a10174a2ae4d1e Reviewed-on: https://go-review.googlesource.com/c/go/+/280648 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/ir/func.go | 16 +--------------- src/cmd/compile/internal/ir/sizeof_test.go | 2 +- src/cmd/compile/internal/noder/noder.go | 7 +++++-- src/cmd/compile/internal/typecheck/dcl.go | 3 ++- src/cmd/compile/internal/typecheck/export.go | 8 ++------ src/cmd/compile/internal/typecheck/func.go | 1 - src/cmd/compile/internal/typecheck/iimport.go | 11 ++++++----- src/cmd/compile/internal/walk/closure.go | 2 +- 8 files changed, 18 insertions(+), 32 deletions(-) diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index 4613425f1a340..bffd4dd5ef9b2 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -49,7 +49,6 @@ import ( // pointer from the Func back to the OCALLPART. type Func struct { miniNode - typ *types.Type Body Nodes Iota int64 @@ -116,9 +115,7 @@ func NewFunc(pos src.XPos) *Func { func (f *Func) isStmt() {} -func (f *Func) Type() *types.Type { return f.typ } -func (f *Func) SetType(x *types.Type) { f.typ = x } - +func (f *Func) Type() *types.Type { return f.Nname.Type() } func (f *Func) Sym() *types.Sym { return f.Nname.Sym() } func (f *Func) Linksym() *obj.LSym { return f.Nname.Linksym() } @@ -236,17 +233,6 @@ func FuncSymName(s *types.Sym) string { return s.Name + "·f" } -// NewFuncNameAt generates a new name node for a function or method. -func NewFuncNameAt(pos src.XPos, s *types.Sym, fn *Func) *Name { - if fn.Nname != nil { - base.Fatalf("newFuncName - already have name") - } - n := NewNameAt(pos, s) - n.SetFunc(fn) - fn.Nname = n - return n -} - // MarkFunc marks a node as a function. func MarkFunc(n *Name) { if n.Op() != ONAME || n.Class_ != Pxxx { diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go index 2a618f85ed4ea..61f207af2048e 100644 --- a/src/cmd/compile/internal/ir/sizeof_test.go +++ b/src/cmd/compile/internal/ir/sizeof_test.go @@ -20,7 +20,7 @@ func TestSizeof(t *testing.T) { _32bit uintptr // size on 32bit platforms _64bit uintptr // size on 64bit platforms }{ - {Func{}, 200, 352}, + {Func{}, 196, 344}, {Name{}, 132, 232}, } diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go index 920f4839adfcc..f4b5e0cf91d40 100644 --- a/src/cmd/compile/internal/noder/noder.go +++ b/src/cmd/compile/internal/noder/noder.go @@ -524,7 +524,8 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) ir.Node { name = ir.BlankNode.Sym() // filled in by typecheckfunc } - f.Nname = ir.NewFuncNameAt(p.pos(fun.Name), name, f) + f.Nname = ir.NewNameAt(p.pos(fun.Name), name) + f.Nname.Func = f f.Nname.Defn = f f.Nname.Ntype = t @@ -1742,7 +1743,9 @@ func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node { fn := ir.NewFunc(p.pos(expr)) fn.SetIsHiddenClosure(ir.CurFunc != nil) - fn.Nname = ir.NewFuncNameAt(p.pos(expr), ir.BlankNode.Sym(), fn) // filled in by typecheckclosure + + fn.Nname = ir.NewNameAt(p.pos(expr), ir.BlankNode.Sym()) // filled in by typecheckclosure + fn.Nname.Func = fn fn.Nname.Ntype = xtype fn.Nname.Defn = fn diff --git a/src/cmd/compile/internal/typecheck/dcl.go b/src/cmd/compile/internal/typecheck/dcl.go index 83f926e135e8a..c4f32ff59dbad 100644 --- a/src/cmd/compile/internal/typecheck/dcl.go +++ b/src/cmd/compile/internal/typecheck/dcl.go @@ -23,7 +23,8 @@ func DeclFunc(sym *types.Sym, tfn ir.Ntype) *ir.Func { } fn := ir.NewFunc(base.Pos) - fn.Nname = ir.NewFuncNameAt(base.Pos, sym, fn) + fn.Nname = ir.NewNameAt(base.Pos, sym) + fn.Nname.Func = fn fn.Nname.Defn = fn fn.Nname.Ntype = tfn ir.MarkFunc(fn.Nname) diff --git a/src/cmd/compile/internal/typecheck/export.go b/src/cmd/compile/internal/typecheck/export.go index 03deff8174ae3..c5253914012c6 100644 --- a/src/cmd/compile/internal/typecheck/export.go +++ b/src/cmd/compile/internal/typecheck/export.go @@ -31,12 +31,8 @@ func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val // ipkg is the package being imported func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.Name { n := importobj(ipkg, pos, s, ir.ONAME, ir.PFUNC, t) - - fn := ir.NewFunc(pos) - fn.SetType(t) - n.SetFunc(fn) - fn.Nname = n - + n.Func = ir.NewFunc(pos) + n.Func.Nname = n return n } diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go index 9bb9245d4a443..060024951e095 100644 --- a/src/cmd/compile/internal/typecheck/func.go +++ b/src/cmd/compile/internal/typecheck/func.go @@ -409,7 +409,6 @@ func tcFunc(n *ir.Func) { if t == nil { return } - n.SetType(t) rcvr := t.Recv() if rcvr != nil && n.Shortname != nil { m := addmethod(n, n.Shortname, t, true, n.Pragma&ir.Nointerface != 0) diff --git a/src/cmd/compile/internal/typecheck/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go index 86277e69bd4cc..00ecd9b819029 100644 --- a/src/cmd/compile/internal/typecheck/iimport.go +++ b/src/cmd/compile/internal/typecheck/iimport.go @@ -331,12 +331,13 @@ func (r *importReader) doDecl(sym *types.Sym) *ir.Name { recv := r.param() mtyp := r.signature(recv) - fn := ir.NewFunc(mpos) - fn.SetType(mtyp) - m := ir.NewFuncNameAt(mpos, ir.MethodSym(recv.Type, msym), fn) - m.SetType(mtyp) - m.Class_ = ir.PFUNC // methodSym already marked m.Sym as a function. + m := ir.NewNameAt(mpos, ir.MethodSym(recv.Type, msym)) + m.Class_ = ir.PFUNC + m.SetType(mtyp) + + m.Func = ir.NewFunc(mpos) + m.Func.Nname = m f := types.NewField(mpos, msym, mtyp) f.Nname = m diff --git a/src/cmd/compile/internal/walk/closure.go b/src/cmd/compile/internal/walk/closure.go index 9bcb82bc03669..00d3f50bc4c75 100644 --- a/src/cmd/compile/internal/walk/closure.go +++ b/src/cmd/compile/internal/walk/closure.go @@ -67,7 +67,7 @@ func Closure(fn *ir.Func) { } types.CalcSize(f.Type()) - fn.SetType(f.Type()) // update type of ODCLFUNC + fn.Nname.SetType(f.Type()) // update type of ODCLFUNC } else { // The closure is not called, so it is going to stay as closure. var body []ir.Node From 0523d525ae0dea229cffc5634caddd0acbc066af Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Sat, 28 Nov 2020 16:01:58 -0800 Subject: [PATCH 297/474] [dev.regabi] cmd/compile: separate out address taken computation from typechecker This CL computes a second parallel addrtaken bit that we check against the old way of doing it. A subsequent CL will rip out the typechecker code and just use the new way. Change-Id: I62b7342c44f694144844695386f80088bbd40bf4 Reviewed-on: https://go-review.googlesource.com/c/go/+/275695 Trust: Keith Randall Trust: Dan Scales Run-TryBot: Keith Randall TryBot-Result: Go Bot Reviewed-by: Dan Scales --- src/cmd/compile/internal/inline/inl.go | 1 + src/cmd/compile/internal/ir/name.go | 21 ++++++-- src/cmd/compile/internal/typecheck/func.go | 16 +++++++ src/cmd/compile/internal/typecheck/subr.go | 48 +++++++++++++++++++ .../compile/internal/typecheck/typecheck.go | 24 ++++++++-- src/cmd/compile/internal/walk/order.go | 3 +- 6 files changed, 104 insertions(+), 9 deletions(-) diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index 126871b805522..7324369ced9ed 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -1013,6 +1013,7 @@ func inlvar(var_ ir.Node) ir.Node { n.SetUsed(true) n.Curfn = ir.CurFunc // the calling function, not the called one n.SetAddrtaken(var_.Name().Addrtaken()) + n.SetAddrtaken2(var_.Name().Addrtaken()) ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n) return n diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 79583914353d8..8b1084deeb391 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -35,10 +35,10 @@ func (*Ident) CanBeNtype() {} // Name holds Node fields used only by named nodes (ONAME, OTYPE, some OLITERAL). type Name struct { miniExpr - BuiltinOp Op // uint8 - Class_ Class // uint8 - flags bitset16 + BuiltinOp Op // uint8 + Class_ Class // uint8 pragma PragmaFlag // int16 + flags bitset32 sym *types.Sym Func *Func Offset_ int64 @@ -273,6 +273,7 @@ const ( nameOpenDeferSlot // if temporary var storing info for open-coded defers nameLibfuzzerExtraCounter // if PEXTERN should be assigned to __libfuzzer_extra_counters section nameAlias // is type name an alias + nameAddrtaken2 ) func (n *Name) Captured() bool { return n.flags&nameCaptured != 0 } @@ -284,7 +285,7 @@ func (n *Name) Used() bool { return n.flags&nameUsed != 0 } func (n *Name) IsClosureVar() bool { return n.flags&nameIsClosureVar != 0 } func (n *Name) IsOutputParamHeapAddr() bool { return n.flags&nameIsOutputParamHeapAddr != 0 } func (n *Name) Assigned() bool { return n.flags&nameAssigned != 0 } -func (n *Name) Addrtaken() bool { return n.flags&nameAddrtaken != 0 } +func (n *Name) Addrtaken() bool { return n.checkAddrtaken() && n.flags&nameAddrtaken != 0 } func (n *Name) InlFormal() bool { return n.flags&nameInlFormal != 0 } func (n *Name) InlLocal() bool { return n.flags&nameInlLocal != 0 } func (n *Name) OpenDeferSlot() bool { return n.flags&nameOpenDeferSlot != 0 } @@ -300,11 +301,23 @@ func (n *Name) SetIsClosureVar(b bool) { n.flags.set(nameIsClosureVar, func (n *Name) SetIsOutputParamHeapAddr(b bool) { n.flags.set(nameIsOutputParamHeapAddr, b) } func (n *Name) SetAssigned(b bool) { n.flags.set(nameAssigned, b) } func (n *Name) SetAddrtaken(b bool) { n.flags.set(nameAddrtaken, b) } +func (n *Name) SetAddrtaken2(b bool) { n.flags.set(nameAddrtaken2, b) } func (n *Name) SetInlFormal(b bool) { n.flags.set(nameInlFormal, b) } func (n *Name) SetInlLocal(b bool) { n.flags.set(nameInlLocal, b) } func (n *Name) SetOpenDeferSlot(b bool) { n.flags.set(nameOpenDeferSlot, b) } func (n *Name) SetLibfuzzerExtraCounter(b bool) { n.flags.set(nameLibfuzzerExtraCounter, b) } +func (n *Name) checkAddrtaken() bool { + // The two different ways of computing addrtaken bits might diverge during computation, + // but any time we look at them, they should be identical. + x := n.flags&nameAddrtaken != 0 + y := n.flags&nameAddrtaken2 != 0 + if x != y { + panic("inconsistent addrtaken") + } + return true +} + // MarkReadonly indicates that n is an ONAME with readonly contents. func (n *Name) MarkReadonly() { if n.Op() != ONAME { diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go index 060024951e095..ce6f4027da906 100644 --- a/src/cmd/compile/internal/typecheck/func.go +++ b/src/cmd/compile/internal/typecheck/func.go @@ -135,6 +135,7 @@ func CaptureVars(fn *ir.Func) { v.SetByval(true) } else { outermost.Name().SetAddrtaken(true) + outermost.Name().SetAddrtaken2(true) outer = NodAddr(outer) } @@ -163,6 +164,21 @@ func CaptureVars(fn *ir.Func) { func ImportedBody(fn *ir.Func) { lno := ir.SetPos(fn.Nname) + // When we load an inlined body, we need to allow OADDR + // operations on untyped expressions. We will fix the + // addrtaken flags on all the arguments of the OADDR with the + // computeAddrtaken call below (after we typecheck the body). + // TODO: export/import types and addrtaken marks along with inlined bodies, + // so this will be unnecessary. + incrementalAddrtaken = false + defer func() { + if dirtyAddrtaken { + computeAddrtaken(fn.Inl.Body) // compute addrtaken marks once types are available + dirtyAddrtaken = false + } + incrementalAddrtaken = true + }() + ImportBody(fn) // typecheckinl is only for imported functions; diff --git a/src/cmd/compile/internal/typecheck/subr.go b/src/cmd/compile/internal/typecheck/subr.go index 178eba4484e1c..8d31fea9ecd35 100644 --- a/src/cmd/compile/internal/typecheck/subr.go +++ b/src/cmd/compile/internal/typecheck/subr.go @@ -67,9 +67,57 @@ func NodAddr(n ir.Node) *ir.AddrExpr { // nodAddrPos returns a node representing &n at position pos. func NodAddrAt(pos src.XPos, n ir.Node) *ir.AddrExpr { + n = markAddrOf(n) return ir.NewAddrExpr(pos, n) } +func markAddrOf(n ir.Node) ir.Node { + if incrementalAddrtaken { + // We can only do incremental addrtaken computation when it is ok + // to typecheck the argument of the OADDR. That's only safe after the + // main typecheck has completed. + // The argument to OADDR needs to be typechecked because &x[i] takes + // the address of x if x is an array, but not if x is a slice. + // Note: outervalue doesn't work correctly until n is typechecked. + n = typecheck(n, ctxExpr) + if x := ir.OuterValue(n); x.Op() == ir.ONAME { + x.Name().SetAddrtaken2(true) + } + } else { + // Remember that we built an OADDR without computing the Addrtaken bit for + // its argument. We'll do that later in bulk using computeAddrtaken. + dirtyAddrtaken = true + } + return n +} + +// If incrementalAddrtaken is false, we do not compute Addrtaken for an OADDR Node +// when it is built. The Addrtaken bits are set in bulk by computeAddrtaken. +// If incrementalAddrtaken is true, then when an OADDR Node is built the Addrtaken +// field of its argument is updated immediately. +var incrementalAddrtaken = false + +// If dirtyAddrtaken is true, then there are OADDR whose corresponding arguments +// have not yet been marked as Addrtaken. +var dirtyAddrtaken = false + +func computeAddrtaken(top []ir.Node) { + for _, n := range top { + ir.Visit(n, func(n ir.Node) { + if n.Op() == ir.OADDR { + if x := ir.OuterValue(n.(*ir.AddrExpr).X); x.Op() == ir.ONAME { + x.Name().SetAddrtaken2(true) + if x.Name().IsClosureVar() { + // Mark the original variable as Addrtaken so that capturevars + // knows not to pass it by value. + x.Name().Defn.Name().SetAddrtaken2(true) + } + } + } + }) + } +} + func NodNil() ir.Node { n := ir.NewNilExpr(base.Pos) n.SetType(types.Types[types.TNIL]) diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index e29d58cefa7dc..335e1b53ce365 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -99,7 +99,26 @@ func Package() { // Phase 5: With all user code type-checked, it's now safe to verify map keys. CheckMapKeys() - // Phase 6: Decide how to capture closed variables. + // Phase 6: Compute Addrtaken for names. + // We need to wait until typechecking is done so that when we see &x[i] + // we know that x has its address taken if x is an array, but not if x is a slice. + // We compute Addrtaken in bulk here. + // After this phase, we maintain Addrtaken incrementally. + if dirtyAddrtaken { + computeAddrtaken(Target.Decls) + dirtyAddrtaken = false + } + incrementalAddrtaken = true + + // Phase 7: Eliminate some obviously dead code. + // Must happen after typechecking. + for _, n := range Target.Decls { + if n.Op() == ir.ODCLFUNC { + deadcode(n.(*ir.Func)) + } + } + + // Phase 8: Decide how to capture closed variables. // This needs to run before escape analysis, // because variables captured by value do not escape. base.Timer.Start("fe", "capturevars") @@ -156,9 +175,6 @@ func FuncBody(n *ir.Func) { if base.Errors() > errorsBefore { n.Body.Set(nil) // type errors; do not compile } - // Now that we've checked whether n terminates, - // we can eliminate some obviously dead code. - deadcode(n) } var importlist []*ir.Func diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go index 0dd76ccee9a9b..82180c113eb86 100644 --- a/src/cmd/compile/internal/walk/order.go +++ b/src/cmd/compile/internal/walk/order.go @@ -517,7 +517,8 @@ func (o *orderState) call(nn ir.Node) { if arg.X.Type().IsUnsafePtr() { x := o.copyExpr(arg.X) arg.X = x - x.Name().SetAddrtaken(true) // ensure SSA keeps the x variable + x.Name().SetAddrtaken(true) // ensure SSA keeps the x variable + x.Name().SetAddrtaken2(true) // ensure SSA keeps the x variable n.Body.Append(typecheck.Stmt(ir.NewUnaryExpr(base.Pos, ir.OVARLIVE, x))) } } From 0620c674ddca234e0a69b5a35c5fb06a881dd73b Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Sat, 28 Nov 2020 12:37:55 -0800 Subject: [PATCH 298/474] [dev.regabi] cmd/compile: remove original addrtaken bit Switch the source of truth to the new addrtaken bit. Remove the old one. Change-Id: Ie53679ab14cfcd34b55e912e7ecb962a22db7db3 Reviewed-on: https://go-review.googlesource.com/c/go/+/275696 Trust: Keith Randall Trust: Dan Scales Run-TryBot: Keith Randall TryBot-Result: Go Bot Reviewed-by: Dan Scales --- src/cmd/compile/internal/inline/inl.go | 1 - src/cmd/compile/internal/ir/name.go | 14 +------------- src/cmd/compile/internal/typecheck/expr.go | 8 -------- src/cmd/compile/internal/typecheck/func.go | 1 - src/cmd/compile/internal/walk/order.go | 1 - 5 files changed, 1 insertion(+), 24 deletions(-) diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index 7324369ced9ed..8f3a4b4d8cc09 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -1012,7 +1012,6 @@ func inlvar(var_ ir.Node) ir.Node { n.Class_ = ir.PAUTO n.SetUsed(true) n.Curfn = ir.CurFunc // the calling function, not the called one - n.SetAddrtaken(var_.Name().Addrtaken()) n.SetAddrtaken2(var_.Name().Addrtaken()) ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n) diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 8b1084deeb391..6e41fd650bc25 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -285,7 +285,7 @@ func (n *Name) Used() bool { return n.flags&nameUsed != 0 } func (n *Name) IsClosureVar() bool { return n.flags&nameIsClosureVar != 0 } func (n *Name) IsOutputParamHeapAddr() bool { return n.flags&nameIsOutputParamHeapAddr != 0 } func (n *Name) Assigned() bool { return n.flags&nameAssigned != 0 } -func (n *Name) Addrtaken() bool { return n.checkAddrtaken() && n.flags&nameAddrtaken != 0 } +func (n *Name) Addrtaken() bool { return n.flags&nameAddrtaken2 != 0 } func (n *Name) InlFormal() bool { return n.flags&nameInlFormal != 0 } func (n *Name) InlLocal() bool { return n.flags&nameInlLocal != 0 } func (n *Name) OpenDeferSlot() bool { return n.flags&nameOpenDeferSlot != 0 } @@ -300,24 +300,12 @@ func (n *Name) SetUsed(b bool) { n.flags.set(nameUsed, b) } func (n *Name) SetIsClosureVar(b bool) { n.flags.set(nameIsClosureVar, b) } func (n *Name) SetIsOutputParamHeapAddr(b bool) { n.flags.set(nameIsOutputParamHeapAddr, b) } func (n *Name) SetAssigned(b bool) { n.flags.set(nameAssigned, b) } -func (n *Name) SetAddrtaken(b bool) { n.flags.set(nameAddrtaken, b) } func (n *Name) SetAddrtaken2(b bool) { n.flags.set(nameAddrtaken2, b) } func (n *Name) SetInlFormal(b bool) { n.flags.set(nameInlFormal, b) } func (n *Name) SetInlLocal(b bool) { n.flags.set(nameInlLocal, b) } func (n *Name) SetOpenDeferSlot(b bool) { n.flags.set(nameOpenDeferSlot, b) } func (n *Name) SetLibfuzzerExtraCounter(b bool) { n.flags.set(nameLibfuzzerExtraCounter, b) } -func (n *Name) checkAddrtaken() bool { - // The two different ways of computing addrtaken bits might diverge during computation, - // but any time we look at them, they should be identical. - x := n.flags&nameAddrtaken != 0 - y := n.flags&nameAddrtaken2 != 0 - if x != y { - panic("inconsistent addrtaken") - } - return true -} - // MarkReadonly indicates that n is an ONAME with readonly contents. func (n *Name) MarkReadonly() { if n.Op() != ONAME { diff --git a/src/cmd/compile/internal/typecheck/expr.go b/src/cmd/compile/internal/typecheck/expr.go index 5752139c0b92a..12bfae67a865e 100644 --- a/src/cmd/compile/internal/typecheck/expr.go +++ b/src/cmd/compile/internal/typecheck/expr.go @@ -35,14 +35,6 @@ func tcAddr(n *ir.AddrExpr) ir.Node { if ir.Orig(r) != r { base.Fatalf("found non-orig name node %v", r) // TODO(mdempsky): What does this mean? } - r.Name().SetAddrtaken(true) - if r.Name().IsClosureVar() && !CaptureVarsComplete { - // Mark the original variable as Addrtaken so that capturevars - // knows not to pass it by value. - // But if the capturevars phase is complete, don't touch it, - // in case l.Name's containing function has not yet been compiled. - r.Name().Defn.Name().SetAddrtaken(true) - } } n.X = DefaultLit(n.X, nil) if n.X.Type() == nil { diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go index ce6f4027da906..0819380885c7a 100644 --- a/src/cmd/compile/internal/typecheck/func.go +++ b/src/cmd/compile/internal/typecheck/func.go @@ -134,7 +134,6 @@ func CaptureVars(fn *ir.Func) { if outermost.Class_ != ir.PPARAMOUT && !outermost.Name().Addrtaken() && !outermost.Name().Assigned() && v.Type().Width <= 128 { v.SetByval(true) } else { - outermost.Name().SetAddrtaken(true) outermost.Name().SetAddrtaken2(true) outer = NodAddr(outer) } diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go index 82180c113eb86..58c1c597fcd84 100644 --- a/src/cmd/compile/internal/walk/order.go +++ b/src/cmd/compile/internal/walk/order.go @@ -517,7 +517,6 @@ func (o *orderState) call(nn ir.Node) { if arg.X.Type().IsUnsafePtr() { x := o.copyExpr(arg.X) arg.X = x - x.Name().SetAddrtaken(true) // ensure SSA keeps the x variable x.Name().SetAddrtaken2(true) // ensure SSA keeps the x variable n.Body.Append(typecheck.Stmt(ir.NewUnaryExpr(base.Pos, ir.OVARLIVE, x))) } From b3e1ec97fd57d66eb1a1307b8c96141d0014ec51 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Sat, 28 Nov 2020 12:55:01 -0800 Subject: [PATCH 299/474] [dev.regabi] cmd/compile: move new addrtaken bit back to the old name Change-Id: I2732aefe95a21c23d73a907d5596fcb1626d6dd7 Reviewed-on: https://go-review.googlesource.com/c/go/+/275697 Trust: Keith Randall Trust: Dan Scales Run-TryBot: Keith Randall TryBot-Result: Go Bot Reviewed-by: Dan Scales --- src/cmd/compile/internal/inline/inl.go | 2 +- src/cmd/compile/internal/ir/name.go | 7 +++---- src/cmd/compile/internal/typecheck/func.go | 2 +- src/cmd/compile/internal/typecheck/subr.go | 6 +++--- src/cmd/compile/internal/walk/order.go | 2 +- 5 files changed, 9 insertions(+), 10 deletions(-) diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index 8f3a4b4d8cc09..126871b805522 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -1012,7 +1012,7 @@ func inlvar(var_ ir.Node) ir.Node { n.Class_ = ir.PAUTO n.SetUsed(true) n.Curfn = ir.CurFunc // the calling function, not the called one - n.SetAddrtaken2(var_.Name().Addrtaken()) + n.SetAddrtaken(var_.Name().Addrtaken()) ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n) return n diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 6e41fd650bc25..d6135ee29a828 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -38,7 +38,7 @@ type Name struct { BuiltinOp Op // uint8 Class_ Class // uint8 pragma PragmaFlag // int16 - flags bitset32 + flags bitset16 sym *types.Sym Func *Func Offset_ int64 @@ -273,7 +273,6 @@ const ( nameOpenDeferSlot // if temporary var storing info for open-coded defers nameLibfuzzerExtraCounter // if PEXTERN should be assigned to __libfuzzer_extra_counters section nameAlias // is type name an alias - nameAddrtaken2 ) func (n *Name) Captured() bool { return n.flags&nameCaptured != 0 } @@ -285,7 +284,7 @@ func (n *Name) Used() bool { return n.flags&nameUsed != 0 } func (n *Name) IsClosureVar() bool { return n.flags&nameIsClosureVar != 0 } func (n *Name) IsOutputParamHeapAddr() bool { return n.flags&nameIsOutputParamHeapAddr != 0 } func (n *Name) Assigned() bool { return n.flags&nameAssigned != 0 } -func (n *Name) Addrtaken() bool { return n.flags&nameAddrtaken2 != 0 } +func (n *Name) Addrtaken() bool { return n.flags&nameAddrtaken != 0 } func (n *Name) InlFormal() bool { return n.flags&nameInlFormal != 0 } func (n *Name) InlLocal() bool { return n.flags&nameInlLocal != 0 } func (n *Name) OpenDeferSlot() bool { return n.flags&nameOpenDeferSlot != 0 } @@ -300,7 +299,7 @@ func (n *Name) SetUsed(b bool) { n.flags.set(nameUsed, b) } func (n *Name) SetIsClosureVar(b bool) { n.flags.set(nameIsClosureVar, b) } func (n *Name) SetIsOutputParamHeapAddr(b bool) { n.flags.set(nameIsOutputParamHeapAddr, b) } func (n *Name) SetAssigned(b bool) { n.flags.set(nameAssigned, b) } -func (n *Name) SetAddrtaken2(b bool) { n.flags.set(nameAddrtaken2, b) } +func (n *Name) SetAddrtaken(b bool) { n.flags.set(nameAddrtaken, b) } func (n *Name) SetInlFormal(b bool) { n.flags.set(nameInlFormal, b) } func (n *Name) SetInlLocal(b bool) { n.flags.set(nameInlLocal, b) } func (n *Name) SetOpenDeferSlot(b bool) { n.flags.set(nameOpenDeferSlot, b) } diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go index 0819380885c7a..75f38d588d377 100644 --- a/src/cmd/compile/internal/typecheck/func.go +++ b/src/cmd/compile/internal/typecheck/func.go @@ -134,7 +134,7 @@ func CaptureVars(fn *ir.Func) { if outermost.Class_ != ir.PPARAMOUT && !outermost.Name().Addrtaken() && !outermost.Name().Assigned() && v.Type().Width <= 128 { v.SetByval(true) } else { - outermost.Name().SetAddrtaken2(true) + outermost.Name().SetAddrtaken(true) outer = NodAddr(outer) } diff --git a/src/cmd/compile/internal/typecheck/subr.go b/src/cmd/compile/internal/typecheck/subr.go index 8d31fea9ecd35..9d414874a0446 100644 --- a/src/cmd/compile/internal/typecheck/subr.go +++ b/src/cmd/compile/internal/typecheck/subr.go @@ -81,7 +81,7 @@ func markAddrOf(n ir.Node) ir.Node { // Note: outervalue doesn't work correctly until n is typechecked. n = typecheck(n, ctxExpr) if x := ir.OuterValue(n); x.Op() == ir.ONAME { - x.Name().SetAddrtaken2(true) + x.Name().SetAddrtaken(true) } } else { // Remember that we built an OADDR without computing the Addrtaken bit for @@ -106,11 +106,11 @@ func computeAddrtaken(top []ir.Node) { ir.Visit(n, func(n ir.Node) { if n.Op() == ir.OADDR { if x := ir.OuterValue(n.(*ir.AddrExpr).X); x.Op() == ir.ONAME { - x.Name().SetAddrtaken2(true) + x.Name().SetAddrtaken(true) if x.Name().IsClosureVar() { // Mark the original variable as Addrtaken so that capturevars // knows not to pass it by value. - x.Name().Defn.Name().SetAddrtaken2(true) + x.Name().Defn.Name().SetAddrtaken(true) } } } diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go index 58c1c597fcd84..0dd76ccee9a9b 100644 --- a/src/cmd/compile/internal/walk/order.go +++ b/src/cmd/compile/internal/walk/order.go @@ -517,7 +517,7 @@ func (o *orderState) call(nn ir.Node) { if arg.X.Type().IsUnsafePtr() { x := o.copyExpr(arg.X) arg.X = x - x.Name().SetAddrtaken2(true) // ensure SSA keeps the x variable + x.Name().SetAddrtaken(true) // ensure SSA keeps the x variable n.Body.Append(typecheck.Stmt(ir.NewUnaryExpr(base.Pos, ir.OVARLIVE, x))) } } From 5cf3c87fa6ce8440ccda9dddeec0d5e899ee485e Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Tue, 29 Dec 2020 23:26:45 +0700 Subject: [PATCH 300/474] [dev.regabi] cmd/compile: generate case/comm clause functions in mknode.go Passes toolstash -cmp. Change-Id: I52e9d6f35f22d5d59ac6aad02011c5abaac45739 Reviewed-on: https://go-review.googlesource.com/c/go/+/279446 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/mknode.go | 38 ++++++++++++++++ src/cmd/compile/internal/ir/node_gen.go | 58 ++++++++++++++++++++++++ src/cmd/compile/internal/ir/stmt.go | 60 ------------------------- 3 files changed, 96 insertions(+), 60 deletions(-) diff --git a/src/cmd/compile/internal/ir/mknode.go b/src/cmd/compile/internal/ir/mknode.go index 3b5da32d8c52e..17ef720172a6c 100644 --- a/src/cmd/compile/internal/ir/mknode.go +++ b/src/cmd/compile/internal/ir/mknode.go @@ -136,6 +136,10 @@ func main() { fmt.Fprintf(&buf, "}\n") } + for _, name := range []string{"CaseClause", "CommClause"} { + sliceHelper(&buf, name) + } + out, err := format.Source(buf.Bytes()) if err != nil { // write out mangled source so we can see the bug. @@ -148,6 +152,40 @@ func main() { } } +func sliceHelper(buf *bytes.Buffer, name string) { + tmpl := fmt.Sprintf(` +func copy%[1]ss(list []*%[2]s) []*%[2]s { + if list == nil { + return nil + } + c := make([]*%[2]s, len(list)) + copy(c, list) + return c +} +func maybeDo%[1]ss(list []*%[2]s, err error, do func(Node) error) error { + if err != nil { + return err + } + for _, x := range list { + if x != nil { + if err := do(x); err != nil { + return err + } + } + } + return nil +} +func edit%[1]ss(list []*%[2]s, edit func(Node) Node) { + for i, x := range list { + if x != nil { + list[i] = edit(x).(*%[2]s) + } + } +} +`, strings.TrimSuffix(name, "Clause"), name) + fmt.Fprintln(buf, tmpl) +} + func forNodeFields(typName string, typ *types.Struct, f func(name string, is func(types.Type) bool)) { for i, n := 0, typ.NumFields(); i < n; i++ { v := typ.Field(i) diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index fe54b62f1805b..a2a30a05870a8 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -1015,3 +1015,61 @@ func (n *typeNode) doChildren(do func(Node) error) error { } func (n *typeNode) editChildren(edit func(Node) Node) { } + +func copyCases(list []*CaseClause) []*CaseClause { + if list == nil { + return nil + } + c := make([]*CaseClause, len(list)) + copy(c, list) + return c +} +func maybeDoCases(list []*CaseClause, err error, do func(Node) error) error { + if err != nil { + return err + } + for _, x := range list { + if x != nil { + if err := do(x); err != nil { + return err + } + } + } + return nil +} +func editCases(list []*CaseClause, edit func(Node) Node) { + for i, x := range list { + if x != nil { + list[i] = edit(x).(*CaseClause) + } + } +} + +func copyComms(list []*CommClause) []*CommClause { + if list == nil { + return nil + } + c := make([]*CommClause, len(list)) + copy(c, list) + return c +} +func maybeDoComms(list []*CommClause, err error, do func(Node) error) error { + if err != nil { + return err + } + for _, x := range list { + if x != nil { + if err := do(x); err != nil { + return err + } + } + } + return nil +} +func editComms(list []*CommClause, edit func(Node) Node) { + for i, x := range list { + if x != nil { + list[i] = edit(x).(*CommClause) + } + } +} diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index 1301e65e26922..d88280dda767e 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -184,36 +184,6 @@ func NewCaseStmt(pos src.XPos, list, body []Node) *CaseClause { return n } -// TODO(mdempsky): Generate these with mknode.go. -func copyCases(list []*CaseClause) []*CaseClause { - if list == nil { - return nil - } - c := make([]*CaseClause, len(list)) - copy(c, list) - return c -} -func maybeDoCases(list []*CaseClause, err error, do func(Node) error) error { - if err != nil { - return err - } - for _, x := range list { - if x != nil { - if err := do(x); err != nil { - return err - } - } - } - return nil -} -func editCases(list []*CaseClause, edit func(Node) Node) { - for i, x := range list { - if x != nil { - list[i] = edit(x).(*CaseClause) - } - } -} - type CommClause struct { miniStmt Comm Node // communication case @@ -227,36 +197,6 @@ func NewCommStmt(pos src.XPos, comm Node, body []Node) *CommClause { return n } -// TODO(mdempsky): Generate these with mknode.go. -func copyComms(list []*CommClause) []*CommClause { - if list == nil { - return nil - } - c := make([]*CommClause, len(list)) - copy(c, list) - return c -} -func maybeDoComms(list []*CommClause, err error, do func(Node) error) error { - if err != nil { - return err - } - for _, x := range list { - if x != nil { - if err := do(x); err != nil { - return err - } - } - } - return nil -} -func editComms(list []*CommClause, edit func(Node) Node) { - for i, x := range list { - if x != nil { - list[i] = edit(x).(*CommClause) - } - } -} - // A ForStmt is a non-range for loop: for Init; Cond; Post { Body } // Op can be OFOR or OFORUNTIL (!Cond). type ForStmt struct { From 37babc97bb8f1d26dbbbc39e4ec5080a273fa2bb Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Wed, 30 Dec 2020 00:18:35 +0700 Subject: [PATCH 301/474] [dev.regabi] cmd/compile: allow visitor visits *ir.Name So future CLs can refactor ir.Node to *ir.Name when possible. Passes toolstash -cmp. Change-Id: I91ae38417ba10de207ed84b65d1d69cf64f24456 Reviewed-on: https://go-review.googlesource.com/c/go/+/279448 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/mknode.go | 5 ++- src/cmd/compile/internal/ir/node_gen.go | 42 +++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/ir/mknode.go b/src/cmd/compile/internal/ir/mknode.go index 17ef720172a6c..54a228bce72b6 100644 --- a/src/cmd/compile/internal/ir/mknode.go +++ b/src/cmd/compile/internal/ir/mknode.go @@ -35,6 +35,7 @@ func main() { } nodeType := lookup("Node") + ptrNameType := types.NewPointer(lookup("Name")) ntypeType := lookup("Ntype") nodesType := lookup("Nodes") slicePtrCaseClauseType := types.NewSlice(types.NewPointer(lookup("CaseClause"))) @@ -94,7 +95,7 @@ func main() { fmt.Fprintf(&buf, "func (n *%s) doChildren(do func(Node) error) error { var err error\n", name) forNodeFields(typName, typ, func(name string, is func(types.Type) bool) { switch { - case is(ptrIdentType): + case is(ptrIdentType), is(ptrNameType): fmt.Fprintf(&buf, "if n.%s != nil { err = maybeDo(n.%s, err, do) }\n", name, name) case is(nodeType), is(ntypeType): fmt.Fprintf(&buf, "err = maybeDo(n.%s, err, do)\n", name) @@ -117,6 +118,8 @@ func main() { switch { case is(ptrIdentType): fmt.Fprintf(&buf, "if n.%s != nil { n.%s = edit(n.%s).(*Ident) }\n", name, name, name) + case is(ptrNameType): + fmt.Fprintf(&buf, "if n.%s != nil { n.%s = edit(n.%s).(*Name) }\n", name, name, name) case is(nodeType): fmt.Fprintf(&buf, "n.%s = maybeEdit(n.%s, edit)\n", name, name) case is(ntypeType): diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index a2a30a05870a8..d8bb4200efb46 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -15,11 +15,17 @@ func (n *AddStringExpr) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) err = maybeDoList(n.List, err, do) + if n.Prealloc != nil { + err = maybeDo(n.Prealloc, err, do) + } return err } func (n *AddStringExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) editList(n.List, edit) + if n.Prealloc != nil { + n.Prealloc = edit(n.Prealloc).(*Name) + } } func (n *AddrExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } @@ -32,11 +38,17 @@ func (n *AddrExpr) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) err = maybeDo(n.X, err, do) + if n.Alloc != nil { + err = maybeDo(n.Alloc, err, do) + } return err } func (n *AddrExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) n.X = maybeEdit(n.X, edit) + if n.Alloc != nil { + n.Alloc = edit(n.Alloc).(*Name) + } } func (n *ArrayType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } @@ -253,10 +265,16 @@ func (n *ClosureExpr) copy() Node { func (n *ClosureExpr) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) + if n.Prealloc != nil { + err = maybeDo(n.Prealloc, err, do) + } return err } func (n *ClosureExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) + if n.Prealloc != nil { + n.Prealloc = edit(n.Prealloc).(*Name) + } } func (n *ClosureReadExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } @@ -306,12 +324,18 @@ func (n *CompLitExpr) doChildren(do func(Node) error) error { err = maybeDoList(n.init, err, do) err = maybeDo(n.Ntype, err, do) err = maybeDoList(n.List, err, do) + if n.Prealloc != nil { + err = maybeDo(n.Prealloc, err, do) + } return err } func (n *CompLitExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) n.Ntype = toNtype(maybeEdit(n.Ntype, edit)) editList(n.List, edit) + if n.Prealloc != nil { + n.Prealloc = edit(n.Prealloc).(*Name) + } } func (n *ConstExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } @@ -654,10 +678,16 @@ func (n *NameOffsetExpr) copy() Node { func (n *NameOffsetExpr) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) + if n.Name_ != nil { + err = maybeDo(n.Name_, err, do) + } return err } func (n *NameOffsetExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) + if n.Name_ != nil { + n.Name_ = edit(n.Name_).(*Name) + } } func (n *NilExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } @@ -718,6 +748,9 @@ func (n *RangeStmt) doChildren(do func(Node) error) error { err = maybeDo(n.Key, err, do) err = maybeDo(n.Value, err, do) err = maybeDoList(n.Body, err, do) + if n.Prealloc != nil { + err = maybeDo(n.Prealloc, err, do) + } return err } func (n *RangeStmt) editChildren(edit func(Node) Node) { @@ -726,6 +759,9 @@ func (n *RangeStmt) editChildren(edit func(Node) Node) { n.Key = maybeEdit(n.Key, edit) n.Value = maybeEdit(n.Value, edit) editList(n.Body, edit) + if n.Prealloc != nil { + n.Prealloc = edit(n.Prealloc).(*Name) + } } func (n *ResultExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } @@ -792,11 +828,17 @@ func (n *SelectorExpr) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) err = maybeDo(n.X, err, do) + if n.Prealloc != nil { + err = maybeDo(n.Prealloc, err, do) + } return err } func (n *SelectorExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) n.X = maybeEdit(n.X, edit) + if n.Prealloc != nil { + n.Prealloc = edit(n.Prealloc).(*Name) + } } func (n *SendStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } From 850aa7c60cb56d0cc40e3c213acb14ac96e2bf9e Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Wed, 30 Dec 2020 01:24:30 +0700 Subject: [PATCH 302/474] [dev.regabi] cmd/compile: use *ir.Name instead of ir.Node for CaseClause.Var Passes toolstash -cmp. Change-Id: Ib0b6ebf5751ffce2c9500dc67d78e54937ead208 Reviewed-on: https://go-review.googlesource.com/c/go/+/279449 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/escape/escape.go | 2 +- src/cmd/compile/internal/ir/node_gen.go | 8 ++++++-- src/cmd/compile/internal/ir/stmt.go | 2 +- src/cmd/compile/internal/typecheck/iexport.go | 2 +- src/cmd/compile/internal/typecheck/stmt.go | 2 +- src/cmd/compile/internal/walk/switch.go | 4 ++-- 6 files changed, 12 insertions(+), 8 deletions(-) diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go index b953666ce6267..ec99c86c06b62 100644 --- a/src/cmd/compile/internal/escape/escape.go +++ b/src/cmd/compile/internal/escape/escape.go @@ -374,7 +374,7 @@ func (e *escape) stmt(n ir.Node) { var ks []hole for _, cas := range n.Cases { // cases if typesw && n.Tag.(*ir.TypeSwitchGuard).Tag != nil { - cv := cas.Var.(*ir.Name) + cv := cas.Var k := e.dcl(cv) // type switch variables have no ODCL. if cv.Type().HasPointers() { ks = append(ks, k.dotType(cv.Type(), cas, "switch case")) diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index d8bb4200efb46..6c1a28022f3a8 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -230,14 +230,18 @@ func (n *CaseClause) copy() Node { func (n *CaseClause) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) - err = maybeDo(n.Var, err, do) + if n.Var != nil { + err = maybeDo(n.Var, err, do) + } err = maybeDoList(n.List, err, do) err = maybeDoList(n.Body, err, do) return err } func (n *CaseClause) editChildren(edit func(Node) Node) { editList(n.init, edit) - n.Var = maybeEdit(n.Var, edit) + if n.Var != nil { + n.Var = edit(n.Var).(*Name) + } editList(n.List, edit) editList(n.Body, edit) } diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index d88280dda767e..a1f5e5933f2e0 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -172,7 +172,7 @@ func (n *BranchStmt) Sym() *types.Sym { return n.Label } // A CaseClause is a case statement in a switch or select: case List: Body. type CaseClause struct { miniStmt - Var Node // declared variable for this case in type switch + Var *Name // declared variable for this case in type switch List Nodes // list of expressions for switch, early select Body Nodes } diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go index c287d76c4370f..489879b3b4345 100644 --- a/src/cmd/compile/internal/typecheck/iexport.go +++ b/src/cmd/compile/internal/typecheck/iexport.go @@ -1187,7 +1187,7 @@ func (w *exportWriter) caseList(cases []*ir.CaseClause, namedTypeSwitch bool) { w.pos(cas.Pos()) w.stmtList(cas.List) if namedTypeSwitch { - w.localName(cas.Var.(*ir.Name)) + w.localName(cas.Var) } w.stmtList(cas.Body) } diff --git a/src/cmd/compile/internal/typecheck/stmt.go b/src/cmd/compile/internal/typecheck/stmt.go index f5d36a663d17a..d90d13b44cdf3 100644 --- a/src/cmd/compile/internal/typecheck/stmt.go +++ b/src/cmd/compile/internal/typecheck/stmt.go @@ -631,7 +631,7 @@ func tcSwitchType(n *ir.SwitchStmt) { nvar := ncase.Var nvar.SetType(vt) if vt != nil { - nvar = AssignExpr(nvar) + nvar = AssignExpr(nvar).(*ir.Name) } else { // Clause variable is broken; prevent typechecking. nvar.SetTypecheck(1) diff --git a/src/cmd/compile/internal/walk/switch.go b/src/cmd/compile/internal/walk/switch.go index de0b471b34e49..b03bc3eba7c4d 100644 --- a/src/cmd/compile/internal/walk/switch.go +++ b/src/cmd/compile/internal/walk/switch.go @@ -440,7 +440,7 @@ type typeClause struct { body ir.Nodes } -func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp ir.Node) { +func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar *ir.Name, jmp ir.Node) { var body ir.Nodes if caseVar != nil { l := []ir.Node{ @@ -450,7 +450,7 @@ func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp ir.Node) { typecheck.Stmts(l) body.Append(l...) } else { - caseVar = ir.BlankNode + caseVar = ir.BlankNode.(*ir.Name) } // cv, ok = iface.(type) From f5816624cd332ec236c9a155b4a16ba0e8b968af Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Wed, 30 Dec 2020 01:44:56 +0700 Subject: [PATCH 303/474] [dev.regabi] cmd/compile: change AddrExpr.Alloc to AddrExpr.Prealloc For being consistent with other Prealloc fields. [git-generate] cd src/cmd/compile/internal/ir rf ' mv AddrExpr.Alloc AddrExpr.Prealloc ' go generate Change-Id: Id1b05119092036e3f8208b73b63bd0ca6ceb7b15 Reviewed-on: https://go-review.googlesource.com/c/go/+/279450 Trust: Cuong Manh Le Reviewed-by: Matthew Dempsky Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot --- src/cmd/compile/internal/ir/expr.go | 4 ++-- src/cmd/compile/internal/ir/node_gen.go | 8 ++++---- src/cmd/compile/internal/walk/closure.go | 4 ++-- src/cmd/compile/internal/walk/complit.go | 6 +++--- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index bb32d96088bf1..a989ce5e01a5e 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -106,8 +106,8 @@ func NewAddStringExpr(pos src.XPos, list []Node) *AddStringExpr { // It may end up being a normal address-of or an allocation of a composite literal. type AddrExpr struct { miniExpr - X Node - Alloc *Name // preallocated storage if any + X Node + Prealloc *Name // preallocated storage if any } func NewAddrExpr(pos src.XPos, x Node) *AddrExpr { diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index 6c1a28022f3a8..0dd5100018d38 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -38,16 +38,16 @@ func (n *AddrExpr) doChildren(do func(Node) error) error { var err error err = maybeDoList(n.init, err, do) err = maybeDo(n.X, err, do) - if n.Alloc != nil { - err = maybeDo(n.Alloc, err, do) + if n.Prealloc != nil { + err = maybeDo(n.Prealloc, err, do) } return err } func (n *AddrExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) n.X = maybeEdit(n.X, edit) - if n.Alloc != nil { - n.Alloc = edit(n.Alloc).(*Name) + if n.Prealloc != nil { + n.Prealloc = edit(n.Prealloc).(*Name) } } diff --git a/src/cmd/compile/internal/walk/closure.go b/src/cmd/compile/internal/walk/closure.go index 00d3f50bc4c75..0726d3b5521ea 100644 --- a/src/cmd/compile/internal/walk/closure.go +++ b/src/cmd/compile/internal/walk/closure.go @@ -144,7 +144,7 @@ func walkClosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node { if !types.Identical(typ, x.Type()) { panic("closure type does not match order's assigned type") } - addr.Alloc = x + addr.Prealloc = x clo.Prealloc = nil } @@ -189,7 +189,7 @@ func walkCallPart(n *ir.SelectorExpr, init *ir.Nodes) ir.Node { if !types.Identical(typ, x.Type()) { panic("partial call type does not match order's assigned type") } - addr.Alloc = x + addr.Prealloc = x n.Prealloc = nil } diff --git a/src/cmd/compile/internal/walk/complit.go b/src/cmd/compile/internal/walk/complit.go index 3c28ed70ade7a..d8605d39bdc0b 100644 --- a/src/cmd/compile/internal/walk/complit.go +++ b/src/cmd/compile/internal/walk/complit.go @@ -549,10 +549,10 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { } var r ir.Node - if n.Alloc != nil { + if n.Prealloc != nil { // n.Right is stack temporary used as backing store. - appendWalkStmt(init, ir.NewAssignStmt(base.Pos, n.Alloc, nil)) // zero backing store, just in case (#18410) - r = typecheck.NodAddr(n.Alloc) + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, n.Prealloc, nil)) // zero backing store, just in case (#18410) + r = typecheck.NodAddr(n.Prealloc) } else { r = ir.NewUnaryExpr(base.Pos, ir.ONEW, ir.TypeNode(n.X.Type())) r.SetEsc(n.Esc()) From 9958b7ed3e92007cda0f25cffe502e2b88689c6c Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Wed, 30 Dec 2020 02:01:41 +0700 Subject: [PATCH 304/474] [dev.regabi] cmd/compile: unexport ir.FmtNode It's only used inside package ir now. [git-generate] cd src/cmd/compile/internal/ir rf 'mv FmtNode fmtNode' sed -i 's/FmtNode/fmtNode/g' mknode.go go generate Change-Id: Ib8f6c6984905a4d4cfca1b23972a39c5ea30ff42 Reviewed-on: https://go-review.googlesource.com/c/go/+/279451 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/fmt.go | 2 +- src/cmd/compile/internal/ir/mknode.go | 2 +- src/cmd/compile/internal/ir/node_gen.go | 114 ++++++++++++------------ 3 files changed, 59 insertions(+), 59 deletions(-) diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index 7680f05ad2bfc..ea6b5856df6d5 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -128,7 +128,7 @@ func (o Op) Format(s fmt.State, verb rune) { // %L Go syntax followed by " (type T)" if type is known. // %+v Debug syntax, as in Dump. // -func FmtNode(n Node, s fmt.State, verb rune) { +func fmtNode(n Node, s fmt.State, verb rune) { // %+v prints Dump. // Otherwise we print Go syntax. if s.Flag('+') && verb == 'v' { diff --git a/src/cmd/compile/internal/ir/mknode.go b/src/cmd/compile/internal/ir/mknode.go index 54a228bce72b6..755ac6ba8781d 100644 --- a/src/cmd/compile/internal/ir/mknode.go +++ b/src/cmd/compile/internal/ir/mknode.go @@ -68,7 +68,7 @@ func main() { } fmt.Fprintf(&buf, "\n") - fmt.Fprintf(&buf, "func (n *%s) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }\n", name) + fmt.Fprintf(&buf, "func (n *%s) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }\n", name) switch name { case "Name": diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index 0dd5100018d38..4427d89f5c59c 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -4,7 +4,7 @@ package ir import "fmt" -func (n *AddStringExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *AddStringExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *AddStringExpr) copy() Node { c := *n c.init = c.init.Copy() @@ -28,7 +28,7 @@ func (n *AddStringExpr) editChildren(edit func(Node) Node) { } } -func (n *AddrExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *AddrExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *AddrExpr) copy() Node { c := *n c.init = c.init.Copy() @@ -51,7 +51,7 @@ func (n *AddrExpr) editChildren(edit func(Node) Node) { } } -func (n *ArrayType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ArrayType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *ArrayType) copy() Node { c := *n return &c @@ -67,7 +67,7 @@ func (n *ArrayType) editChildren(edit func(Node) Node) { n.Elem = toNtype(maybeEdit(n.Elem, edit)) } -func (n *AssignListStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *AssignListStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *AssignListStmt) copy() Node { c := *n c.init = c.init.Copy() @@ -88,7 +88,7 @@ func (n *AssignListStmt) editChildren(edit func(Node) Node) { editList(n.Rhs, edit) } -func (n *AssignOpStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *AssignOpStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *AssignOpStmt) copy() Node { c := *n c.init = c.init.Copy() @@ -107,7 +107,7 @@ func (n *AssignOpStmt) editChildren(edit func(Node) Node) { n.Y = maybeEdit(n.Y, edit) } -func (n *AssignStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *AssignStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *AssignStmt) copy() Node { c := *n c.init = c.init.Copy() @@ -126,7 +126,7 @@ func (n *AssignStmt) editChildren(edit func(Node) Node) { n.Y = maybeEdit(n.Y, edit) } -func (n *BasicLit) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *BasicLit) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *BasicLit) copy() Node { c := *n c.init = c.init.Copy() @@ -141,7 +141,7 @@ func (n *BasicLit) editChildren(edit func(Node) Node) { editList(n.init, edit) } -func (n *BinaryExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *BinaryExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *BinaryExpr) copy() Node { c := *n c.init = c.init.Copy() @@ -160,7 +160,7 @@ func (n *BinaryExpr) editChildren(edit func(Node) Node) { n.Y = maybeEdit(n.Y, edit) } -func (n *BlockStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *BlockStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *BlockStmt) copy() Node { c := *n c.init = c.init.Copy() @@ -178,7 +178,7 @@ func (n *BlockStmt) editChildren(edit func(Node) Node) { editList(n.List, edit) } -func (n *BranchStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *BranchStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *BranchStmt) copy() Node { c := *n c.init = c.init.Copy() @@ -193,7 +193,7 @@ func (n *BranchStmt) editChildren(edit func(Node) Node) { editList(n.init, edit) } -func (n *CallExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *CallExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *CallExpr) copy() Node { c := *n c.init = c.init.Copy() @@ -219,7 +219,7 @@ func (n *CallExpr) editChildren(edit func(Node) Node) { editList(n.Body, edit) } -func (n *CaseClause) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *CaseClause) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *CaseClause) copy() Node { c := *n c.init = c.init.Copy() @@ -246,7 +246,7 @@ func (n *CaseClause) editChildren(edit func(Node) Node) { editList(n.Body, edit) } -func (n *ChanType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ChanType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *ChanType) copy() Node { c := *n return &c @@ -260,7 +260,7 @@ func (n *ChanType) editChildren(edit func(Node) Node) { n.Elem = toNtype(maybeEdit(n.Elem, edit)) } -func (n *ClosureExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ClosureExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *ClosureExpr) copy() Node { c := *n c.init = c.init.Copy() @@ -281,7 +281,7 @@ func (n *ClosureExpr) editChildren(edit func(Node) Node) { } } -func (n *ClosureReadExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ClosureReadExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *ClosureReadExpr) copy() Node { c := *n c.init = c.init.Copy() @@ -296,7 +296,7 @@ func (n *ClosureReadExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) } -func (n *CommClause) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *CommClause) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *CommClause) copy() Node { c := *n c.init = c.init.Copy() @@ -316,7 +316,7 @@ func (n *CommClause) editChildren(edit func(Node) Node) { editList(n.Body, edit) } -func (n *CompLitExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *CompLitExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *CompLitExpr) copy() Node { c := *n c.init = c.init.Copy() @@ -342,7 +342,7 @@ func (n *CompLitExpr) editChildren(edit func(Node) Node) { } } -func (n *ConstExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ConstExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *ConstExpr) copy() Node { c := *n c.init = c.init.Copy() @@ -357,7 +357,7 @@ func (n *ConstExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) } -func (n *ConvExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ConvExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *ConvExpr) copy() Node { c := *n c.init = c.init.Copy() @@ -374,7 +374,7 @@ func (n *ConvExpr) editChildren(edit func(Node) Node) { n.X = maybeEdit(n.X, edit) } -func (n *Decl) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *Decl) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *Decl) copy() Node { c := *n return &c @@ -388,7 +388,7 @@ func (n *Decl) editChildren(edit func(Node) Node) { n.X = maybeEdit(n.X, edit) } -func (n *ForStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ForStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *ForStmt) copy() Node { c := *n c.init = c.init.Copy() @@ -413,7 +413,7 @@ func (n *ForStmt) editChildren(edit func(Node) Node) { editList(n.Body, edit) } -func (n *Func) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *Func) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *Func) copy() Node { c := *n c.Body = c.Body.Copy() @@ -428,7 +428,7 @@ func (n *Func) editChildren(edit func(Node) Node) { editList(n.Body, edit) } -func (n *FuncType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *FuncType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *FuncType) copy() Node { c := *n if c.Recv != nil { @@ -451,7 +451,7 @@ func (n *FuncType) editChildren(edit func(Node) Node) { editFields(n.Results, edit) } -func (n *GoDeferStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *GoDeferStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *GoDeferStmt) copy() Node { c := *n c.init = c.init.Copy() @@ -468,7 +468,7 @@ func (n *GoDeferStmt) editChildren(edit func(Node) Node) { n.Call = maybeEdit(n.Call, edit) } -func (n *Ident) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *Ident) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *Ident) copy() Node { c := *n c.init = c.init.Copy() @@ -483,7 +483,7 @@ func (n *Ident) editChildren(edit func(Node) Node) { editList(n.init, edit) } -func (n *IfStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *IfStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *IfStmt) copy() Node { c := *n c.init = c.init.Copy() @@ -506,7 +506,7 @@ func (n *IfStmt) editChildren(edit func(Node) Node) { editList(n.Else, edit) } -func (n *IndexExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *IndexExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *IndexExpr) copy() Node { c := *n c.init = c.init.Copy() @@ -525,7 +525,7 @@ func (n *IndexExpr) editChildren(edit func(Node) Node) { n.Index = maybeEdit(n.Index, edit) } -func (n *InlineMarkStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *InlineMarkStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *InlineMarkStmt) copy() Node { c := *n c.init = c.init.Copy() @@ -540,7 +540,7 @@ func (n *InlineMarkStmt) editChildren(edit func(Node) Node) { editList(n.init, edit) } -func (n *InlinedCallExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *InlinedCallExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *InlinedCallExpr) copy() Node { c := *n c.init = c.init.Copy() @@ -561,7 +561,7 @@ func (n *InlinedCallExpr) editChildren(edit func(Node) Node) { editList(n.ReturnVars, edit) } -func (n *InterfaceType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *InterfaceType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *InterfaceType) copy() Node { c := *n c.Methods = copyFields(c.Methods) @@ -576,7 +576,7 @@ func (n *InterfaceType) editChildren(edit func(Node) Node) { editFields(n.Methods, edit) } -func (n *KeyExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *KeyExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *KeyExpr) copy() Node { c := *n c.init = c.init.Copy() @@ -595,7 +595,7 @@ func (n *KeyExpr) editChildren(edit func(Node) Node) { n.Value = maybeEdit(n.Value, edit) } -func (n *LabelStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *LabelStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *LabelStmt) copy() Node { c := *n c.init = c.init.Copy() @@ -610,7 +610,7 @@ func (n *LabelStmt) editChildren(edit func(Node) Node) { editList(n.init, edit) } -func (n *LogicalExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *LogicalExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *LogicalExpr) copy() Node { c := *n c.init = c.init.Copy() @@ -629,7 +629,7 @@ func (n *LogicalExpr) editChildren(edit func(Node) Node) { n.Y = maybeEdit(n.Y, edit) } -func (n *MakeExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *MakeExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *MakeExpr) copy() Node { c := *n c.init = c.init.Copy() @@ -648,7 +648,7 @@ func (n *MakeExpr) editChildren(edit func(Node) Node) { n.Cap = maybeEdit(n.Cap, edit) } -func (n *MapType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *MapType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *MapType) copy() Node { c := *n return &c @@ -664,7 +664,7 @@ func (n *MapType) editChildren(edit func(Node) Node) { n.Elem = toNtype(maybeEdit(n.Elem, edit)) } -func (n *Name) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *Name) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *Name) copy() Node { panic("Name.copy") } func (n *Name) doChildren(do func(Node) error) error { var err error @@ -673,7 +673,7 @@ func (n *Name) doChildren(do func(Node) error) error { func (n *Name) editChildren(edit func(Node) Node) { } -func (n *NameOffsetExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *NameOffsetExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *NameOffsetExpr) copy() Node { c := *n c.init = c.init.Copy() @@ -694,7 +694,7 @@ func (n *NameOffsetExpr) editChildren(edit func(Node) Node) { } } -func (n *NilExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *NilExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *NilExpr) copy() Node { c := *n c.init = c.init.Copy() @@ -709,7 +709,7 @@ func (n *NilExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) } -func (n *ParenExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ParenExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *ParenExpr) copy() Node { c := *n c.init = c.init.Copy() @@ -726,7 +726,7 @@ func (n *ParenExpr) editChildren(edit func(Node) Node) { n.X = maybeEdit(n.X, edit) } -func (n *PkgName) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *PkgName) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *PkgName) copy() Node { c := *n return &c @@ -738,7 +738,7 @@ func (n *PkgName) doChildren(do func(Node) error) error { func (n *PkgName) editChildren(edit func(Node) Node) { } -func (n *RangeStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *RangeStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *RangeStmt) copy() Node { c := *n c.init = c.init.Copy() @@ -768,7 +768,7 @@ func (n *RangeStmt) editChildren(edit func(Node) Node) { } } -func (n *ResultExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ResultExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *ResultExpr) copy() Node { c := *n c.init = c.init.Copy() @@ -783,7 +783,7 @@ func (n *ResultExpr) editChildren(edit func(Node) Node) { editList(n.init, edit) } -func (n *ReturnStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *ReturnStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *ReturnStmt) copy() Node { c := *n c.init = c.init.Copy() @@ -801,7 +801,7 @@ func (n *ReturnStmt) editChildren(edit func(Node) Node) { editList(n.Results, edit) } -func (n *SelectStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *SelectStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *SelectStmt) copy() Node { c := *n c.init = c.init.Copy() @@ -822,7 +822,7 @@ func (n *SelectStmt) editChildren(edit func(Node) Node) { editList(n.Compiled, edit) } -func (n *SelectorExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *SelectorExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *SelectorExpr) copy() Node { c := *n c.init = c.init.Copy() @@ -845,7 +845,7 @@ func (n *SelectorExpr) editChildren(edit func(Node) Node) { } } -func (n *SendStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *SendStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *SendStmt) copy() Node { c := *n c.init = c.init.Copy() @@ -864,7 +864,7 @@ func (n *SendStmt) editChildren(edit func(Node) Node) { n.Value = maybeEdit(n.Value, edit) } -func (n *SliceExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *SliceExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *SliceExpr) copy() Node { c := *n c.init = c.init.Copy() @@ -887,7 +887,7 @@ func (n *SliceExpr) editChildren(edit func(Node) Node) { n.Max = maybeEdit(n.Max, edit) } -func (n *SliceHeaderExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *SliceHeaderExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *SliceHeaderExpr) copy() Node { c := *n c.init = c.init.Copy() @@ -908,7 +908,7 @@ func (n *SliceHeaderExpr) editChildren(edit func(Node) Node) { n.Cap = maybeEdit(n.Cap, edit) } -func (n *SliceType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *SliceType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *SliceType) copy() Node { c := *n return &c @@ -922,7 +922,7 @@ func (n *SliceType) editChildren(edit func(Node) Node) { n.Elem = toNtype(maybeEdit(n.Elem, edit)) } -func (n *StarExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *StarExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *StarExpr) copy() Node { c := *n c.init = c.init.Copy() @@ -939,7 +939,7 @@ func (n *StarExpr) editChildren(edit func(Node) Node) { n.X = maybeEdit(n.X, edit) } -func (n *StructKeyExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *StructKeyExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *StructKeyExpr) copy() Node { c := *n c.init = c.init.Copy() @@ -956,7 +956,7 @@ func (n *StructKeyExpr) editChildren(edit func(Node) Node) { n.Value = maybeEdit(n.Value, edit) } -func (n *StructType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *StructType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *StructType) copy() Node { c := *n c.Fields = copyFields(c.Fields) @@ -971,7 +971,7 @@ func (n *StructType) editChildren(edit func(Node) Node) { editFields(n.Fields, edit) } -func (n *SwitchStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *SwitchStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *SwitchStmt) copy() Node { c := *n c.init = c.init.Copy() @@ -994,7 +994,7 @@ func (n *SwitchStmt) editChildren(edit func(Node) Node) { editList(n.Compiled, edit) } -func (n *TypeAssertExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *TypeAssertExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *TypeAssertExpr) copy() Node { c := *n c.init = c.init.Copy() @@ -1013,7 +1013,7 @@ func (n *TypeAssertExpr) editChildren(edit func(Node) Node) { n.Ntype = toNtype(maybeEdit(n.Ntype, edit)) } -func (n *TypeSwitchGuard) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *TypeSwitchGuard) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *TypeSwitchGuard) copy() Node { c := *n return &c @@ -1033,7 +1033,7 @@ func (n *TypeSwitchGuard) editChildren(edit func(Node) Node) { n.X = maybeEdit(n.X, edit) } -func (n *UnaryExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *UnaryExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *UnaryExpr) copy() Node { c := *n c.init = c.init.Copy() @@ -1050,7 +1050,7 @@ func (n *UnaryExpr) editChildren(edit func(Node) Node) { n.X = maybeEdit(n.X, edit) } -func (n *typeNode) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +func (n *typeNode) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *typeNode) copy() Node { c := *n return &c From 82ab3d1448ee19ebf464297660ed1bc54aa2f3e6 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Wed, 30 Dec 2020 02:46:25 +0700 Subject: [PATCH 305/474] [dev.regabi] cmd/compile: use *ir.Name for Decl.X Passes toolstash -cmp. Change-Id: I505577d067eda3512f6d78618fc0eff061a71e3c Reviewed-on: https://go-review.googlesource.com/c/go/+/280732 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/escape/escape.go | 2 +- src/cmd/compile/internal/inline/inl.go | 10 +++++----- src/cmd/compile/internal/ir/node_gen.go | 8 ++++++-- src/cmd/compile/internal/ir/stmt.go | 4 ++-- src/cmd/compile/internal/ssagen/ssa.go | 2 +- src/cmd/compile/internal/typecheck/iexport.go | 2 +- src/cmd/compile/internal/typecheck/typecheck.go | 4 ++-- src/cmd/compile/internal/walk/order.go | 2 +- src/cmd/compile/internal/walk/stmt.go | 2 +- src/cmd/compile/internal/walk/walk.go | 2 +- 10 files changed, 21 insertions(+), 17 deletions(-) diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go index ec99c86c06b62..b5b09beb5a8db 100644 --- a/src/cmd/compile/internal/escape/escape.go +++ b/src/cmd/compile/internal/escape/escape.go @@ -316,7 +316,7 @@ func (e *escape) stmt(n ir.Node) { // Record loop depth at declaration. n := n.(*ir.Decl) if !ir.IsBlank(n.X) { - e.dcl(n.X.(*ir.Name)) + e.dcl(n.X) } case ir.OLABEL: diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index 126871b805522..7584f6a19f867 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -649,7 +649,7 @@ func inlParam(t *types.Field, as ir.Node, inlvars map[*ir.Name]ir.Node) ir.Node if inlvar == nil { base.Fatalf("missing inlvar for %v", n) } - as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, inlvar)) + as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, inlvar.(*ir.Name))) inlvar.Name().Defn = as return inlvar } @@ -771,14 +771,14 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b if v.Byval() { iv := typecheck.Expr(inlvar(v)) - ninit.Append(ir.NewDecl(base.Pos, ir.ODCL, iv)) + ninit.Append(ir.NewDecl(base.Pos, ir.ODCL, iv.(*ir.Name))) ninit.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, iv, o))) inlvars[v] = iv } else { addr := typecheck.NewName(typecheck.Lookup("&" + v.Sym().Name)) addr.SetType(types.NewPtr(v.Type())) ia := typecheck.Expr(inlvar(addr)) - ninit.Append(ir.NewDecl(base.Pos, ir.ODCL, ia)) + ninit.Append(ir.NewDecl(base.Pos, ir.ODCL, ia.(*ir.Name))) ninit.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, ia, typecheck.NodAddr(o)))) inlvars[addr] = ia @@ -917,7 +917,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b if !delayretvars { // Zero the return parameters. for _, n := range retvars { - ninit.Append(ir.NewDecl(base.Pos, ir.ODCL, n)) + ninit.Append(ir.NewDecl(base.Pos, ir.ODCL, n.(*ir.Name))) ras := ir.NewAssignStmt(base.Pos, n, nil) ninit.Append(typecheck.Stmt(ras)) } @@ -1139,7 +1139,7 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { if subst.delayretvars { for _, n := range as.Lhs { - as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, n)) + as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, n.(*ir.Name))) n.Name().Defn = as } } diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index 4427d89f5c59c..4c48e82d779a3 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -381,11 +381,15 @@ func (n *Decl) copy() Node { } func (n *Decl) doChildren(do func(Node) error) error { var err error - err = maybeDo(n.X, err, do) + if n.X != nil { + err = maybeDo(n.X, err, do) + } return err } func (n *Decl) editChildren(edit func(Node) Node) { - n.X = maybeEdit(n.X, edit) + if n.X != nil { + n.X = edit(n.X).(*Name) + } } func (n *ForStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index a1f5e5933f2e0..4575dec260b2e 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -13,10 +13,10 @@ import ( // A Decl is a declaration of a const, type, or var. (A declared func is a Func.) type Decl struct { miniNode - X Node // the thing being declared + X *Name // the thing being declared } -func NewDecl(pos src.XPos, op Op, x Node) *Decl { +func NewDecl(pos src.XPos, op Op, x *Name) *Decl { n := &Decl{X: x} n.pos = pos switch op { diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 3c94ec4c9519e..ddf65eb20961f 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -1242,7 +1242,7 @@ func (s *state) stmt(n ir.Node) { case ir.ODCL: n := n.(*ir.Decl) - if n.X.(*ir.Name).Class_ == ir.PAUTOHEAP { + if n.X.Class_ == ir.PAUTOHEAP { s.Fatalf("DCL %v", n) } diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go index 489879b3b4345..aa16a54bb83b1 100644 --- a/src/cmd/compile/internal/typecheck/iexport.go +++ b/src/cmd/compile/internal/typecheck/iexport.go @@ -1067,7 +1067,7 @@ func (w *exportWriter) stmt(n ir.Node) { n := n.(*ir.Decl) w.op(ir.ODCL) w.pos(n.X.Pos()) - w.localName(n.X.(*ir.Name)) + w.localName(n.X) w.typ(n.X.Type()) case ir.OAS: diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index 335e1b53ce365..480d2de8e3295 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -1011,12 +1011,12 @@ func typecheck1(n ir.Node, top int) ir.Node { case ir.ODCLCONST: n := n.(*ir.Decl) - n.X = Expr(n.X) + n.X = Expr(n.X).(*ir.Name) return n case ir.ODCLTYPE: n := n.(*ir.Decl) - n.X = typecheck(n.X, ctxType) + n.X = typecheck(n.X, ctxType).(*ir.Name) types.CheckSize(n.X.Type()) return n } diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go index 0dd76ccee9a9b..b3d2eaec17a34 100644 --- a/src/cmd/compile/internal/walk/order.go +++ b/src/cmd/compile/internal/walk/order.go @@ -955,7 +955,7 @@ func (o *orderState) stmt(n ir.Node) { if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].(*ir.Decl).X == n { init = init[1:] } - dcl := typecheck.Stmt(ir.NewDecl(base.Pos, ir.ODCL, n)) + dcl := typecheck.Stmt(ir.NewDecl(base.Pos, ir.ODCL, n.(*ir.Name))) ncas.PtrInit().Append(dcl) } tmp := o.newTemp(t, t.HasPointers()) diff --git a/src/cmd/compile/internal/walk/stmt.go b/src/cmd/compile/internal/walk/stmt.go index 3fe7e103aa30a..f843d2c4faf32 100644 --- a/src/cmd/compile/internal/walk/stmt.go +++ b/src/cmd/compile/internal/walk/stmt.go @@ -176,7 +176,7 @@ func walkStmtList(s []ir.Node) { // walkDecl walks an ODCL node. func walkDecl(n *ir.Decl) ir.Node { - v := n.X.(*ir.Name) + v := n.X if v.Class_ == ir.PAUTOHEAP { if base.Flag.CompilingRuntime { base.Errorf("%v escapes to heap, not allowed in runtime", v) diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go index bdc9a2ea6a444..b6be949689e58 100644 --- a/src/cmd/compile/internal/walk/walk.go +++ b/src/cmd/compile/internal/walk/walk.go @@ -167,7 +167,7 @@ func paramstoheap(params *types.Type) []ir.Node { } if stackcopy := v.Name().Stackcopy; stackcopy != nil { - nn = append(nn, walkStmt(ir.NewDecl(base.Pos, ir.ODCL, v))) + nn = append(nn, walkStmt(ir.NewDecl(base.Pos, ir.ODCL, v.(*ir.Name)))) if stackcopy.Class_ == ir.PPARAM { nn = append(nn, walkStmt(typecheck.Stmt(ir.NewAssignStmt(base.Pos, v, stackcopy)))) } From 499851bac88dfa2a85c39a2123f092071098cada Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 29 Dec 2020 15:36:48 -0800 Subject: [PATCH 306/474] [dev.regabi] cmd/compile: generalize ir/mknode.go This CL generalizes ir/mknode.go to get rid of most of almost all of its special cases for node field types. The only remaining speciale case now is Field, which doesn't implement Node any more, but perhaps should. To help with removing special cases, node fields can now be tagged with `mknode:"-"` so that mknode ignores them when generating its helper methods. Further, to simplify skipping all of the orig fields, a new origNode helper type is added which declares an orig field marked as `mknode:"-"` and also provides the Orig and SetOrig methods needed to implement the OrigNode interface. Passes toolstash -cmp. Change-Id: Ic68d4f0a9d2ef6e57e9fe87cdc641e5c4859830b Reviewed-on: https://go-review.googlesource.com/c/go/+/280674 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/ir/copy.go | 8 + src/cmd/compile/internal/ir/expr.go | 57 +- src/cmd/compile/internal/ir/func.go | 4 + src/cmd/compile/internal/ir/mknode.go | 256 ++--- src/cmd/compile/internal/ir/name.go | 4 + src/cmd/compile/internal/ir/node_gen.go | 1329 +++++++++++++++-------- src/cmd/compile/internal/ir/stmt.go | 7 +- src/cmd/compile/internal/ir/type.go | 62 +- src/cmd/compile/internal/ir/visit.go | 22 +- 9 files changed, 1057 insertions(+), 692 deletions(-) diff --git a/src/cmd/compile/internal/ir/copy.go b/src/cmd/compile/internal/ir/copy.go index 0ab355f76749c..7da9b24940ff9 100644 --- a/src/cmd/compile/internal/ir/copy.go +++ b/src/cmd/compile/internal/ir/copy.go @@ -25,6 +25,14 @@ type OrigNode interface { SetOrig(Node) } +// origNode may be embedded into a Node to make it implement OrigNode. +type origNode struct { + orig Node `mknode:"-"` +} + +func (n *origNode) Orig() Node { return n.orig } +func (n *origNode) SetOrig(o Node) { n.orig = o } + // Orig returns the “original” node for n. // If n implements OrigNode, Orig returns n.Orig(). // Otherwise Orig returns n itself. diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index a989ce5e01a5e..55e4b61baf04e 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -14,27 +14,6 @@ import ( "go/token" ) -func maybeDo(x Node, err error, do func(Node) error) error { - if x != nil && err == nil { - err = do(x) - } - return err -} - -func maybeDoList(x Nodes, err error, do func(Node) error) error { - if err == nil { - err = DoList(x, do) - } - return err -} - -func maybeEdit(x Node, edit func(Node) Node) Node { - if x == nil { - return x - } - return edit(x) -} - // An Expr is a Node that can appear as an expression. type Expr interface { Node @@ -77,16 +56,6 @@ func (n *miniExpr) Init() Nodes { return n.init } func (n *miniExpr) PtrInit() *Nodes { return &n.init } func (n *miniExpr) SetInit(x Nodes) { n.init = x } -func toNtype(x Node) Ntype { - if x == nil { - return nil - } - if _, ok := x.(Ntype); !ok { - Dump("not Ntype", x) - } - return x.(Ntype) -} - // An AddStringExpr is a string concatenation Expr[0] + Exprs[1] + ... + Expr[len(Expr)-1]. type AddStringExpr struct { miniExpr @@ -189,7 +158,7 @@ const ( // A CallExpr is a function call X(Args). type CallExpr struct { miniExpr - orig Node + origNode X Node Args Nodes Rargs Nodes // TODO(rsc): Delete. @@ -210,9 +179,6 @@ func NewCallExpr(pos src.XPos, op Op, fun Node, args []Node) *CallExpr { func (*CallExpr) isStmt() {} -func (n *CallExpr) Orig() Node { return n.orig } -func (n *CallExpr) SetOrig(x Node) { n.orig = x } - func (n *CallExpr) SetOp(op Op) { switch op { default: @@ -226,7 +192,7 @@ func (n *CallExpr) SetOp(op Op) { // A ClosureExpr is a function literal expression. type ClosureExpr struct { miniExpr - Func *Func + Func *Func `mknode:"-"` Prealloc *Name } @@ -254,7 +220,7 @@ func NewClosureRead(typ *types.Type, offset int64) *ClosureReadExpr { // Before type-checking, the type is Ntype. type CompLitExpr struct { miniExpr - orig Node + origNode Ntype Ntype List Nodes // initialized values Prealloc *Name @@ -270,8 +236,6 @@ func NewCompLitExpr(pos src.XPos, op Op, typ Ntype, list []Node) *CompLitExpr { return n } -func (n *CompLitExpr) Orig() Node { return n.orig } -func (n *CompLitExpr) SetOrig(x Node) { n.orig = x } func (n *CompLitExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 } func (n *CompLitExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) } @@ -286,14 +250,15 @@ func (n *CompLitExpr) SetOp(op Op) { type ConstExpr struct { miniExpr - val constant.Value - orig Node + origNode + val constant.Value } func NewConstExpr(val constant.Value, orig Node) Node { - n := &ConstExpr{orig: orig, val: val} + n := &ConstExpr{val: val} n.op = OLITERAL n.pos = orig.Pos() + n.orig = orig n.SetType(orig.Type()) n.SetTypecheck(orig.Typecheck()) n.SetDiag(orig.Diag()) @@ -301,8 +266,6 @@ func NewConstExpr(val constant.Value, orig Node) Node { } func (n *ConstExpr) Sym() *types.Sym { return n.orig.Sym() } -func (n *ConstExpr) Orig() Node { return n.orig } -func (n *ConstExpr) SetOrig(orig Node) { panic(n.no("SetOrig")) } func (n *ConstExpr) Val() constant.Value { return n.val } // A ConvExpr is a conversion Type(X). @@ -664,9 +627,9 @@ type TypeAssertExpr struct { // Runtime type information provided by walkDotType. // Caution: These aren't always populated; see walkDotType. - SrcType *AddrExpr // *runtime._type for X's type - DstType *AddrExpr // *runtime._type for Type - Itab *AddrExpr // *runtime.itab for Type implementing X's type + SrcType *AddrExpr `mknode:"-"` // *runtime._type for X's type + DstType *AddrExpr `mknode:"-"` // *runtime._type for Type + Itab *AddrExpr `mknode:"-"` // *runtime.itab for Type implementing X's type } func NewTypeAssertExpr(pos src.XPos, x Node, typ Ntype) *TypeAssertExpr { diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index bffd4dd5ef9b2..32ad37fa8064a 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -115,6 +115,10 @@ func NewFunc(pos src.XPos) *Func { func (f *Func) isStmt() {} +func (n *Func) copy() Node { panic(n.no("copy")) } +func (n *Func) doChildren(do func(Node) error) error { return doNodes(n.Body, do) } +func (n *Func) editChildren(edit func(Node) Node) { editNodes(n.Body, edit) } + func (f *Func) Type() *types.Type { return f.Nname.Type() } func (f *Func) Sym() *types.Sym { return f.Nname.Sym() } func (f *Func) Linksym() *obj.LSym { return f.Nname.Linksym() } diff --git a/src/cmd/compile/internal/ir/mknode.go b/src/cmd/compile/internal/ir/mknode.go index 755ac6ba8781d..4e26bc5011db4 100644 --- a/src/cmd/compile/internal/ir/mknode.go +++ b/src/cmd/compile/internal/ir/mknode.go @@ -13,11 +13,16 @@ import ( "go/types" "io/ioutil" "log" + "reflect" + "sort" "strings" "golang.org/x/tools/go/packages" ) +var irPkg *types.Package +var buf bytes.Buffer + func main() { cfg := &packages.Config{ Mode: packages.NeedSyntax | packages.NeedTypes, @@ -26,44 +31,26 @@ func main() { if err != nil { log.Fatal(err) } + irPkg = pkgs[0].Types - pkg := pkgs[0].Types - scope := pkg.Scope() - - lookup := func(name string) *types.Named { - return scope.Lookup(name).(*types.TypeName).Type().(*types.Named) - } - - nodeType := lookup("Node") - ptrNameType := types.NewPointer(lookup("Name")) - ntypeType := lookup("Ntype") - nodesType := lookup("Nodes") - slicePtrCaseClauseType := types.NewSlice(types.NewPointer(lookup("CaseClause"))) - slicePtrCommClauseType := types.NewSlice(types.NewPointer(lookup("CommClause"))) - ptrFieldType := types.NewPointer(lookup("Field")) - slicePtrFieldType := types.NewSlice(ptrFieldType) - ptrIdentType := types.NewPointer(lookup("Ident")) - - var buf bytes.Buffer fmt.Fprintln(&buf, "// Code generated by mknode.go. DO NOT EDIT.") fmt.Fprintln(&buf) fmt.Fprintln(&buf, "package ir") fmt.Fprintln(&buf) fmt.Fprintln(&buf, `import "fmt"`) + scope := irPkg.Scope() for _, name := range scope.Names() { - obj, ok := scope.Lookup(name).(*types.TypeName) - if !ok { + if strings.HasPrefix(name, "mini") { continue } - typName := obj.Name() - typ, ok := obj.Type().(*types.Named).Underlying().(*types.Struct) + obj, ok := scope.Lookup(name).(*types.TypeName) if !ok { continue } - - if strings.HasPrefix(typName, "mini") || !hasMiniNode(typ) { + typ := obj.Type().(*types.Named) + if !implementsNode(types.NewPointer(typ)) { continue } @@ -71,77 +58,31 @@ func main() { fmt.Fprintf(&buf, "func (n *%s) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }\n", name) switch name { - case "Name": - fmt.Fprintf(&buf, "func (n *%s) copy() Node {panic(\"%s.copy\")}\n", name, name) - default: - fmt.Fprintf(&buf, "func (n *%s) copy() Node { c := *n\n", name) - forNodeFields(typName, typ, func(name string, is func(types.Type) bool) { - switch { - case is(nodesType): - fmt.Fprintf(&buf, "c.%s = c.%s.Copy()\n", name, name) - case is(slicePtrCaseClauseType): - fmt.Fprintf(&buf, "c.%s = copyCases(c.%s)\n", name, name) - case is(slicePtrCommClauseType): - fmt.Fprintf(&buf, "c.%s = copyComms(c.%s)\n", name, name) - case is(ptrFieldType): - fmt.Fprintf(&buf, "if c.%s != nil { c.%s = c.%s.copy() }\n", name, name, name) - case is(slicePtrFieldType): - fmt.Fprintf(&buf, "c.%s = copyFields(c.%s)\n", name, name) - } - }) - fmt.Fprintf(&buf, "return &c }\n") + case "Name", "Func": + // Too specialized to automate. + continue } - fmt.Fprintf(&buf, "func (n *%s) doChildren(do func(Node) error) error { var err error\n", name) - forNodeFields(typName, typ, func(name string, is func(types.Type) bool) { - switch { - case is(ptrIdentType), is(ptrNameType): - fmt.Fprintf(&buf, "if n.%s != nil { err = maybeDo(n.%s, err, do) }\n", name, name) - case is(nodeType), is(ntypeType): - fmt.Fprintf(&buf, "err = maybeDo(n.%s, err, do)\n", name) - case is(nodesType): - fmt.Fprintf(&buf, "err = maybeDoList(n.%s, err, do)\n", name) - case is(slicePtrCaseClauseType): - fmt.Fprintf(&buf, "err = maybeDoCases(n.%s, err, do)\n", name) - case is(slicePtrCommClauseType): - fmt.Fprintf(&buf, "err = maybeDoComms(n.%s, err, do)\n", name) - case is(ptrFieldType): - fmt.Fprintf(&buf, "err = maybeDoField(n.%s, err, do)\n", name) - case is(slicePtrFieldType): - fmt.Fprintf(&buf, "err = maybeDoFields(n.%s, err, do)\n", name) - } - }) - fmt.Fprintf(&buf, "return err }\n") - - fmt.Fprintf(&buf, "func (n *%s) editChildren(edit func(Node) Node) {\n", name) - forNodeFields(typName, typ, func(name string, is func(types.Type) bool) { - switch { - case is(ptrIdentType): - fmt.Fprintf(&buf, "if n.%s != nil { n.%s = edit(n.%s).(*Ident) }\n", name, name, name) - case is(ptrNameType): - fmt.Fprintf(&buf, "if n.%s != nil { n.%s = edit(n.%s).(*Name) }\n", name, name, name) - case is(nodeType): - fmt.Fprintf(&buf, "n.%s = maybeEdit(n.%s, edit)\n", name, name) - case is(ntypeType): - fmt.Fprintf(&buf, "n.%s = toNtype(maybeEdit(n.%s, edit))\n", name, name) - case is(nodesType): - fmt.Fprintf(&buf, "editList(n.%s, edit)\n", name) - case is(slicePtrCaseClauseType): - fmt.Fprintf(&buf, "editCases(n.%s, edit)\n", name) - case is(slicePtrCommClauseType): - fmt.Fprintf(&buf, "editComms(n.%s, edit)\n", name) - case is(ptrFieldType): - fmt.Fprintf(&buf, "editField(n.%s, edit)\n", name) - case is(slicePtrFieldType): - fmt.Fprintf(&buf, "editFields(n.%s, edit)\n", name) - } - }) - fmt.Fprintf(&buf, "}\n") + forNodeFields(typ, + "func (n *%[1]s) copy() Node { c := *n\n", + "", + "c.%[1]s = copy%[2]s(c.%[1]s)", + "return &c }\n") + + forNodeFields(typ, + "func (n *%[1]s) doChildren(do func(Node) error) error {\n", + "if n.%[1]s != nil { if err := do(n.%[1]s); err != nil { return err } }", + "if err := do%[2]s(n.%[1]s, do); err != nil { return err }", + "return nil }\n") + + forNodeFields(typ, + "func (n *%[1]s) editChildren(edit func(Node) Node) {\n", + "if n.%[1]s != nil { n.%[1]s = edit(n.%[1]s).(%[2]s) }", + "edit%[2]s(n.%[1]s, edit)", + "}\n") } - for _, name := range []string{"CaseClause", "CommClause"} { - sliceHelper(&buf, name) - } + makeHelpers() out, err := format.Source(buf.Bytes()) if err != nil { @@ -155,20 +96,32 @@ func main() { } } -func sliceHelper(buf *bytes.Buffer, name string) { - tmpl := fmt.Sprintf(` -func copy%[1]ss(list []*%[2]s) []*%[2]s { +// needHelper maps needed slice helpers from their base name to their +// respective slice-element type. +var needHelper = map[string]string{} + +func makeHelpers() { + var names []string + for name := range needHelper { + names = append(names, name) + } + sort.Strings(names) + + for _, name := range names { + fmt.Fprintf(&buf, sliceHelperTmpl, name, needHelper[name]) + } +} + +const sliceHelperTmpl = ` +func copy%[1]s(list []%[2]s) []%[2]s { if list == nil { return nil } - c := make([]*%[2]s, len(list)) + c := make([]%[2]s, len(list)) copy(c, list) return c } -func maybeDo%[1]ss(list []*%[2]s, err error, do func(Node) error) error { - if err != nil { - return err - } +func do%[1]s(list []%[2]s, do func(Node) error) error { for _, x := range list { if x != nil { if err := do(x); err != nil { @@ -178,51 +131,98 @@ func maybeDo%[1]ss(list []*%[2]s, err error, do func(Node) error) error { } return nil } -func edit%[1]ss(list []*%[2]s, edit func(Node) Node) { +func edit%[1]s(list []%[2]s, edit func(Node) Node) { for i, x := range list { if x != nil { - list[i] = edit(x).(*%[2]s) + list[i] = edit(x).(%[2]s) } } } -`, strings.TrimSuffix(name, "Clause"), name) - fmt.Fprintln(buf, tmpl) -} +` -func forNodeFields(typName string, typ *types.Struct, f func(name string, is func(types.Type) bool)) { - for i, n := 0, typ.NumFields(); i < n; i++ { - v := typ.Field(i) - if v.Embedded() { - if typ, ok := v.Type().Underlying().(*types.Struct); ok { - forNodeFields(typName, typ, f) - continue - } +func forNodeFields(named *types.Named, prologue, singleTmpl, sliceTmpl, epilogue string) { + fmt.Fprintf(&buf, prologue, named.Obj().Name()) + + anyField(named.Underlying().(*types.Struct), func(f *types.Var) bool { + if f.Embedded() { + return false + } + name, typ := f.Name(), f.Type() + + slice, _ := typ.Underlying().(*types.Slice) + if slice != nil { + typ = slice.Elem() } - switch typName { - case "Func": - if strings.ToLower(strings.TrimSuffix(v.Name(), "_")) != "body" { - continue + + tmpl, what := singleTmpl, types.TypeString(typ, types.RelativeTo(irPkg)) + if implementsNode(typ) { + if slice != nil { + helper := strings.TrimPrefix(what, "*") + "s" + needHelper[helper] = what + tmpl, what = sliceTmpl, helper } - case "Name": - continue + } else if what == "*Field" { + // Special case for *Field. + tmpl = sliceTmpl + if slice != nil { + what = "Fields" + } else { + what = "Field" + } + } else { + return false } - switch v.Name() { - case "orig": - continue + + if tmpl == "" { + return false + } + + // Allow template to not use all arguments without + // upsetting fmt.Printf. + s := fmt.Sprintf(tmpl+"\x00 %[1]s %[2]s", name, what) + fmt.Fprintln(&buf, s[:strings.LastIndex(s, "\x00")]) + return false + }) + + fmt.Fprintf(&buf, epilogue) +} + +func implementsNode(typ types.Type) bool { + if _, ok := typ.Underlying().(*types.Interface); ok { + // TODO(mdempsky): Check the interface implements Node. + // Worst case, node_gen.go will fail to compile if we're wrong. + return true + } + + if ptr, ok := typ.(*types.Pointer); ok { + if str, ok := ptr.Elem().Underlying().(*types.Struct); ok { + return anyField(str, func(f *types.Var) bool { + return f.Embedded() && f.Name() == "miniNode" + }) } - f(v.Name(), func(t types.Type) bool { return types.Identical(t, v.Type()) }) } + + return false } -func hasMiniNode(typ *types.Struct) bool { +func anyField(typ *types.Struct, pred func(f *types.Var) bool) bool { for i, n := 0, typ.NumFields(); i < n; i++ { - v := typ.Field(i) - if v.Name() == "miniNode" { + if value, ok := reflect.StructTag(typ.Tag(i)).Lookup("mknode"); ok { + if value != "-" { + panic(fmt.Sprintf("unexpected tag value: %q", value)) + } + continue + } + + f := typ.Field(i) + if pred(f) { return true } - if v.Embedded() { - if typ, ok := v.Type().Underlying().(*types.Struct); ok && hasMiniNode(typ) { - return true + if f.Embedded() { + if typ, ok := f.Type().Underlying().(*types.Struct); ok { + if anyField(typ, pred) { + return true + } } } } diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index d6135ee29a828..b12e833f73fe8 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -143,6 +143,10 @@ type Name struct { func (n *Name) isExpr() {} +func (n *Name) copy() Node { panic(n.no("copy")) } +func (n *Name) doChildren(do func(Node) error) error { return nil } +func (n *Name) editChildren(edit func(Node) Node) {} + // CloneName makes a cloned copy of the name. // It's not ir.Copy(n) because in general that operation is a mistake on names, // which uniquely identify variables. diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index 4c48e82d779a3..21e4eff9fbe77 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -7,22 +7,27 @@ import "fmt" func (n *AddStringExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *AddStringExpr) copy() Node { c := *n - c.init = c.init.Copy() - c.List = c.List.Copy() + c.init = copyNodes(c.init) + c.List = copyNodes(c.List) return &c } func (n *AddStringExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDoList(n.List, err, do) + if err := doNodes(n.init, do); err != nil { + return err + } + if err := doNodes(n.List, do); err != nil { + return err + } if n.Prealloc != nil { - err = maybeDo(n.Prealloc, err, do) + if err := do(n.Prealloc); err != nil { + return err + } } - return err + return nil } func (n *AddStringExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - editList(n.List, edit) + editNodes(n.init, edit) + editNodes(n.List, edit) if n.Prealloc != nil { n.Prealloc = edit(n.Prealloc).(*Name) } @@ -31,21 +36,30 @@ func (n *AddStringExpr) editChildren(edit func(Node) Node) { func (n *AddrExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *AddrExpr) copy() Node { c := *n - c.init = c.init.Copy() + c.init = copyNodes(c.init) return &c } func (n *AddrExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) + if err := doNodes(n.init, do); err != nil { + return err + } + if n.X != nil { + if err := do(n.X); err != nil { + return err + } + } if n.Prealloc != nil { - err = maybeDo(n.Prealloc, err, do) + if err := do(n.Prealloc); err != nil { + return err + } } - return err + return nil } func (n *AddrExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.X = maybeEdit(n.X, edit) + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } if n.Prealloc != nil { n.Prealloc = edit(n.Prealloc).(*Name) } @@ -57,193 +71,273 @@ func (n *ArrayType) copy() Node { return &c } func (n *ArrayType) doChildren(do func(Node) error) error { - var err error - err = maybeDo(n.Len, err, do) - err = maybeDo(n.Elem, err, do) - return err + if n.Len != nil { + if err := do(n.Len); err != nil { + return err + } + } + if n.Elem != nil { + if err := do(n.Elem); err != nil { + return err + } + } + return nil } func (n *ArrayType) editChildren(edit func(Node) Node) { - n.Len = maybeEdit(n.Len, edit) - n.Elem = toNtype(maybeEdit(n.Elem, edit)) + if n.Len != nil { + n.Len = edit(n.Len).(Node) + } + if n.Elem != nil { + n.Elem = edit(n.Elem).(Ntype) + } } func (n *AssignListStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *AssignListStmt) copy() Node { c := *n - c.init = c.init.Copy() - c.Lhs = c.Lhs.Copy() - c.Rhs = c.Rhs.Copy() + c.init = copyNodes(c.init) + c.Lhs = copyNodes(c.Lhs) + c.Rhs = copyNodes(c.Rhs) return &c } func (n *AssignListStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDoList(n.Lhs, err, do) - err = maybeDoList(n.Rhs, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + if err := doNodes(n.Lhs, do); err != nil { + return err + } + if err := doNodes(n.Rhs, do); err != nil { + return err + } + return nil } func (n *AssignListStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) - editList(n.Lhs, edit) - editList(n.Rhs, edit) + editNodes(n.init, edit) + editNodes(n.Lhs, edit) + editNodes(n.Rhs, edit) } func (n *AssignOpStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *AssignOpStmt) copy() Node { c := *n - c.init = c.init.Copy() + c.init = copyNodes(c.init) return &c } func (n *AssignOpStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) - err = maybeDo(n.Y, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + if n.X != nil { + if err := do(n.X); err != nil { + return err + } + } + if n.Y != nil { + if err := do(n.Y); err != nil { + return err + } + } + return nil } func (n *AssignOpStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.X = maybeEdit(n.X, edit) - n.Y = maybeEdit(n.Y, edit) + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } + if n.Y != nil { + n.Y = edit(n.Y).(Node) + } } func (n *AssignStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *AssignStmt) copy() Node { c := *n - c.init = c.init.Copy() + c.init = copyNodes(c.init) return &c } func (n *AssignStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) - err = maybeDo(n.Y, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + if n.X != nil { + if err := do(n.X); err != nil { + return err + } + } + if n.Y != nil { + if err := do(n.Y); err != nil { + return err + } + } + return nil } func (n *AssignStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.X = maybeEdit(n.X, edit) - n.Y = maybeEdit(n.Y, edit) + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } + if n.Y != nil { + n.Y = edit(n.Y).(Node) + } } func (n *BasicLit) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *BasicLit) copy() Node { c := *n - c.init = c.init.Copy() + c.init = copyNodes(c.init) return &c } func (n *BasicLit) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + return nil } func (n *BasicLit) editChildren(edit func(Node) Node) { - editList(n.init, edit) + editNodes(n.init, edit) } func (n *BinaryExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *BinaryExpr) copy() Node { c := *n - c.init = c.init.Copy() + c.init = copyNodes(c.init) return &c } func (n *BinaryExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) - err = maybeDo(n.Y, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + if n.X != nil { + if err := do(n.X); err != nil { + return err + } + } + if n.Y != nil { + if err := do(n.Y); err != nil { + return err + } + } + return nil } func (n *BinaryExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.X = maybeEdit(n.X, edit) - n.Y = maybeEdit(n.Y, edit) + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } + if n.Y != nil { + n.Y = edit(n.Y).(Node) + } } func (n *BlockStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *BlockStmt) copy() Node { c := *n - c.init = c.init.Copy() - c.List = c.List.Copy() + c.init = copyNodes(c.init) + c.List = copyNodes(c.List) return &c } func (n *BlockStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDoList(n.List, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + if err := doNodes(n.List, do); err != nil { + return err + } + return nil } func (n *BlockStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) - editList(n.List, edit) + editNodes(n.init, edit) + editNodes(n.List, edit) } func (n *BranchStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *BranchStmt) copy() Node { c := *n - c.init = c.init.Copy() + c.init = copyNodes(c.init) return &c } func (n *BranchStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + return nil } func (n *BranchStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) + editNodes(n.init, edit) } func (n *CallExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *CallExpr) copy() Node { c := *n - c.init = c.init.Copy() - c.Args = c.Args.Copy() - c.Rargs = c.Rargs.Copy() - c.Body = c.Body.Copy() + c.init = copyNodes(c.init) + c.Args = copyNodes(c.Args) + c.Rargs = copyNodes(c.Rargs) + c.Body = copyNodes(c.Body) return &c } func (n *CallExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) - err = maybeDoList(n.Args, err, do) - err = maybeDoList(n.Rargs, err, do) - err = maybeDoList(n.Body, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + if n.X != nil { + if err := do(n.X); err != nil { + return err + } + } + if err := doNodes(n.Args, do); err != nil { + return err + } + if err := doNodes(n.Rargs, do); err != nil { + return err + } + if err := doNodes(n.Body, do); err != nil { + return err + } + return nil } func (n *CallExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.X = maybeEdit(n.X, edit) - editList(n.Args, edit) - editList(n.Rargs, edit) - editList(n.Body, edit) + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } + editNodes(n.Args, edit) + editNodes(n.Rargs, edit) + editNodes(n.Body, edit) } func (n *CaseClause) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *CaseClause) copy() Node { c := *n - c.init = c.init.Copy() - c.List = c.List.Copy() - c.Body = c.Body.Copy() + c.init = copyNodes(c.init) + c.List = copyNodes(c.List) + c.Body = copyNodes(c.Body) return &c } func (n *CaseClause) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) + if err := doNodes(n.init, do); err != nil { + return err + } if n.Var != nil { - err = maybeDo(n.Var, err, do) + if err := do(n.Var); err != nil { + return err + } + } + if err := doNodes(n.List, do); err != nil { + return err + } + if err := doNodes(n.Body, do); err != nil { + return err } - err = maybeDoList(n.List, err, do) - err = maybeDoList(n.Body, err, do) - return err + return nil } func (n *CaseClause) editChildren(edit func(Node) Node) { - editList(n.init, edit) + editNodes(n.init, edit) if n.Var != nil { n.Var = edit(n.Var).(*Name) } - editList(n.List, edit) - editList(n.Body, edit) + editNodes(n.List, edit) + editNodes(n.Body, edit) } func (n *ChanType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } @@ -252,30 +346,38 @@ func (n *ChanType) copy() Node { return &c } func (n *ChanType) doChildren(do func(Node) error) error { - var err error - err = maybeDo(n.Elem, err, do) - return err + if n.Elem != nil { + if err := do(n.Elem); err != nil { + return err + } + } + return nil } func (n *ChanType) editChildren(edit func(Node) Node) { - n.Elem = toNtype(maybeEdit(n.Elem, edit)) + if n.Elem != nil { + n.Elem = edit(n.Elem).(Ntype) + } } func (n *ClosureExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *ClosureExpr) copy() Node { c := *n - c.init = c.init.Copy() + c.init = copyNodes(c.init) return &c } func (n *ClosureExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) + if err := doNodes(n.init, do); err != nil { + return err + } if n.Prealloc != nil { - err = maybeDo(n.Prealloc, err, do) + if err := do(n.Prealloc); err != nil { + return err + } } - return err + return nil } func (n *ClosureExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) + editNodes(n.init, edit) if n.Prealloc != nil { n.Prealloc = edit(n.Prealloc).(*Name) } @@ -284,59 +386,80 @@ func (n *ClosureExpr) editChildren(edit func(Node) Node) { func (n *ClosureReadExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *ClosureReadExpr) copy() Node { c := *n - c.init = c.init.Copy() + c.init = copyNodes(c.init) return &c } func (n *ClosureReadExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + return nil } func (n *ClosureReadExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) + editNodes(n.init, edit) } func (n *CommClause) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *CommClause) copy() Node { c := *n - c.init = c.init.Copy() - c.Body = c.Body.Copy() + c.init = copyNodes(c.init) + c.Body = copyNodes(c.Body) return &c } func (n *CommClause) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.Comm, err, do) - err = maybeDoList(n.Body, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + if n.Comm != nil { + if err := do(n.Comm); err != nil { + return err + } + } + if err := doNodes(n.Body, do); err != nil { + return err + } + return nil } func (n *CommClause) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.Comm = maybeEdit(n.Comm, edit) - editList(n.Body, edit) + editNodes(n.init, edit) + if n.Comm != nil { + n.Comm = edit(n.Comm).(Node) + } + editNodes(n.Body, edit) } func (n *CompLitExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *CompLitExpr) copy() Node { c := *n - c.init = c.init.Copy() - c.List = c.List.Copy() + c.init = copyNodes(c.init) + c.List = copyNodes(c.List) return &c } func (n *CompLitExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.Ntype, err, do) - err = maybeDoList(n.List, err, do) + if err := doNodes(n.init, do); err != nil { + return err + } + if n.Ntype != nil { + if err := do(n.Ntype); err != nil { + return err + } + } + if err := doNodes(n.List, do); err != nil { + return err + } if n.Prealloc != nil { - err = maybeDo(n.Prealloc, err, do) + if err := do(n.Prealloc); err != nil { + return err + } } - return err + return nil } func (n *CompLitExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.Ntype = toNtype(maybeEdit(n.Ntype, edit)) - editList(n.List, edit) + editNodes(n.init, edit) + if n.Ntype != nil { + n.Ntype = edit(n.Ntype).(Ntype) + } + editNodes(n.List, edit) if n.Prealloc != nil { n.Prealloc = edit(n.Prealloc).(*Name) } @@ -345,33 +468,41 @@ func (n *CompLitExpr) editChildren(edit func(Node) Node) { func (n *ConstExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *ConstExpr) copy() Node { c := *n - c.init = c.init.Copy() + c.init = copyNodes(c.init) return &c } func (n *ConstExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + return nil } func (n *ConstExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) + editNodes(n.init, edit) } func (n *ConvExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *ConvExpr) copy() Node { c := *n - c.init = c.init.Copy() + c.init = copyNodes(c.init) return &c } func (n *ConvExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + if n.X != nil { + if err := do(n.X); err != nil { + return err + } + } + return nil } func (n *ConvExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.X = maybeEdit(n.X, edit) + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } } func (n *Decl) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } @@ -380,11 +511,12 @@ func (n *Decl) copy() Node { return &c } func (n *Decl) doChildren(do func(Node) error) error { - var err error if n.X != nil { - err = maybeDo(n.X, err, do) + if err := do(n.X); err != nil { + return err + } } - return err + return nil } func (n *Decl) editChildren(edit func(Node) Node) { if n.X != nil { @@ -395,59 +527,66 @@ func (n *Decl) editChildren(edit func(Node) Node) { func (n *ForStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *ForStmt) copy() Node { c := *n - c.init = c.init.Copy() - c.Late = c.Late.Copy() - c.Body = c.Body.Copy() + c.init = copyNodes(c.init) + c.Late = copyNodes(c.Late) + c.Body = copyNodes(c.Body) return &c } func (n *ForStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.Cond, err, do) - err = maybeDoList(n.Late, err, do) - err = maybeDo(n.Post, err, do) - err = maybeDoList(n.Body, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + if n.Cond != nil { + if err := do(n.Cond); err != nil { + return err + } + } + if err := doNodes(n.Late, do); err != nil { + return err + } + if n.Post != nil { + if err := do(n.Post); err != nil { + return err + } + } + if err := doNodes(n.Body, do); err != nil { + return err + } + return nil } func (n *ForStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.Cond = maybeEdit(n.Cond, edit) - editList(n.Late, edit) - n.Post = maybeEdit(n.Post, edit) - editList(n.Body, edit) + editNodes(n.init, edit) + if n.Cond != nil { + n.Cond = edit(n.Cond).(Node) + } + editNodes(n.Late, edit) + if n.Post != nil { + n.Post = edit(n.Post).(Node) + } + editNodes(n.Body, edit) } func (n *Func) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } -func (n *Func) copy() Node { - c := *n - c.Body = c.Body.Copy() - return &c -} -func (n *Func) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.Body, err, do) - return err -} -func (n *Func) editChildren(edit func(Node) Node) { - editList(n.Body, edit) -} func (n *FuncType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *FuncType) copy() Node { c := *n - if c.Recv != nil { - c.Recv = c.Recv.copy() - } + c.Recv = copyField(c.Recv) c.Params = copyFields(c.Params) c.Results = copyFields(c.Results) return &c } func (n *FuncType) doChildren(do func(Node) error) error { - var err error - err = maybeDoField(n.Recv, err, do) - err = maybeDoFields(n.Params, err, do) - err = maybeDoFields(n.Results, err, do) - return err + if err := doField(n.Recv, do); err != nil { + return err + } + if err := doFields(n.Params, do); err != nil { + return err + } + if err := doFields(n.Results, do); err != nil { + return err + } + return nil } func (n *FuncType) editChildren(edit func(Node) Node) { editField(n.Recv, edit) @@ -458,111 +597,149 @@ func (n *FuncType) editChildren(edit func(Node) Node) { func (n *GoDeferStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *GoDeferStmt) copy() Node { c := *n - c.init = c.init.Copy() + c.init = copyNodes(c.init) return &c } func (n *GoDeferStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.Call, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + if n.Call != nil { + if err := do(n.Call); err != nil { + return err + } + } + return nil } func (n *GoDeferStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.Call = maybeEdit(n.Call, edit) + editNodes(n.init, edit) + if n.Call != nil { + n.Call = edit(n.Call).(Node) + } } func (n *Ident) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *Ident) copy() Node { c := *n - c.init = c.init.Copy() + c.init = copyNodes(c.init) return &c } func (n *Ident) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + return nil } func (n *Ident) editChildren(edit func(Node) Node) { - editList(n.init, edit) + editNodes(n.init, edit) } func (n *IfStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *IfStmt) copy() Node { c := *n - c.init = c.init.Copy() - c.Body = c.Body.Copy() - c.Else = c.Else.Copy() + c.init = copyNodes(c.init) + c.Body = copyNodes(c.Body) + c.Else = copyNodes(c.Else) return &c } func (n *IfStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.Cond, err, do) - err = maybeDoList(n.Body, err, do) - err = maybeDoList(n.Else, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + if n.Cond != nil { + if err := do(n.Cond); err != nil { + return err + } + } + if err := doNodes(n.Body, do); err != nil { + return err + } + if err := doNodes(n.Else, do); err != nil { + return err + } + return nil } func (n *IfStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.Cond = maybeEdit(n.Cond, edit) - editList(n.Body, edit) - editList(n.Else, edit) + editNodes(n.init, edit) + if n.Cond != nil { + n.Cond = edit(n.Cond).(Node) + } + editNodes(n.Body, edit) + editNodes(n.Else, edit) } func (n *IndexExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *IndexExpr) copy() Node { c := *n - c.init = c.init.Copy() + c.init = copyNodes(c.init) return &c } func (n *IndexExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) - err = maybeDo(n.Index, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + if n.X != nil { + if err := do(n.X); err != nil { + return err + } + } + if n.Index != nil { + if err := do(n.Index); err != nil { + return err + } + } + return nil } func (n *IndexExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.X = maybeEdit(n.X, edit) - n.Index = maybeEdit(n.Index, edit) + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } + if n.Index != nil { + n.Index = edit(n.Index).(Node) + } } func (n *InlineMarkStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *InlineMarkStmt) copy() Node { c := *n - c.init = c.init.Copy() + c.init = copyNodes(c.init) return &c } func (n *InlineMarkStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + return nil } func (n *InlineMarkStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) + editNodes(n.init, edit) } func (n *InlinedCallExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *InlinedCallExpr) copy() Node { c := *n - c.init = c.init.Copy() - c.Body = c.Body.Copy() - c.ReturnVars = c.ReturnVars.Copy() + c.init = copyNodes(c.init) + c.Body = copyNodes(c.Body) + c.ReturnVars = copyNodes(c.ReturnVars) return &c } func (n *InlinedCallExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDoList(n.Body, err, do) - err = maybeDoList(n.ReturnVars, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + if err := doNodes(n.Body, do); err != nil { + return err + } + if err := doNodes(n.ReturnVars, do); err != nil { + return err + } + return nil } func (n *InlinedCallExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - editList(n.Body, edit) - editList(n.ReturnVars, edit) + editNodes(n.init, edit) + editNodes(n.Body, edit) + editNodes(n.ReturnVars, edit) } func (n *InterfaceType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } @@ -572,9 +749,10 @@ func (n *InterfaceType) copy() Node { return &c } func (n *InterfaceType) doChildren(do func(Node) error) error { - var err error - err = maybeDoFields(n.Methods, err, do) - return err + if err := doFields(n.Methods, do); err != nil { + return err + } + return nil } func (n *InterfaceType) editChildren(edit func(Node) Node) { editFields(n.Methods, edit) @@ -583,73 +761,113 @@ func (n *InterfaceType) editChildren(edit func(Node) Node) { func (n *KeyExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *KeyExpr) copy() Node { c := *n - c.init = c.init.Copy() + c.init = copyNodes(c.init) return &c } func (n *KeyExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.Key, err, do) - err = maybeDo(n.Value, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + if n.Key != nil { + if err := do(n.Key); err != nil { + return err + } + } + if n.Value != nil { + if err := do(n.Value); err != nil { + return err + } + } + return nil } func (n *KeyExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.Key = maybeEdit(n.Key, edit) - n.Value = maybeEdit(n.Value, edit) + editNodes(n.init, edit) + if n.Key != nil { + n.Key = edit(n.Key).(Node) + } + if n.Value != nil { + n.Value = edit(n.Value).(Node) + } } func (n *LabelStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *LabelStmt) copy() Node { c := *n - c.init = c.init.Copy() + c.init = copyNodes(c.init) return &c } func (n *LabelStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + return nil } func (n *LabelStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) + editNodes(n.init, edit) } func (n *LogicalExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *LogicalExpr) copy() Node { c := *n - c.init = c.init.Copy() + c.init = copyNodes(c.init) return &c } func (n *LogicalExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) - err = maybeDo(n.Y, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + if n.X != nil { + if err := do(n.X); err != nil { + return err + } + } + if n.Y != nil { + if err := do(n.Y); err != nil { + return err + } + } + return nil } func (n *LogicalExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.X = maybeEdit(n.X, edit) - n.Y = maybeEdit(n.Y, edit) + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } + if n.Y != nil { + n.Y = edit(n.Y).(Node) + } } func (n *MakeExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *MakeExpr) copy() Node { c := *n - c.init = c.init.Copy() + c.init = copyNodes(c.init) return &c } func (n *MakeExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.Len, err, do) - err = maybeDo(n.Cap, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + if n.Len != nil { + if err := do(n.Len); err != nil { + return err + } + } + if n.Cap != nil { + if err := do(n.Cap); err != nil { + return err + } + } + return nil } func (n *MakeExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.Len = maybeEdit(n.Len, edit) - n.Cap = maybeEdit(n.Cap, edit) + editNodes(n.init, edit) + if n.Len != nil { + n.Len = edit(n.Len).(Node) + } + if n.Cap != nil { + n.Cap = edit(n.Cap).(Node) + } } func (n *MapType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } @@ -658,41 +876,48 @@ func (n *MapType) copy() Node { return &c } func (n *MapType) doChildren(do func(Node) error) error { - var err error - err = maybeDo(n.Key, err, do) - err = maybeDo(n.Elem, err, do) - return err + if n.Key != nil { + if err := do(n.Key); err != nil { + return err + } + } + if n.Elem != nil { + if err := do(n.Elem); err != nil { + return err + } + } + return nil } func (n *MapType) editChildren(edit func(Node) Node) { - n.Key = toNtype(maybeEdit(n.Key, edit)) - n.Elem = toNtype(maybeEdit(n.Elem, edit)) + if n.Key != nil { + n.Key = edit(n.Key).(Ntype) + } + if n.Elem != nil { + n.Elem = edit(n.Elem).(Ntype) + } } func (n *Name) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } -func (n *Name) copy() Node { panic("Name.copy") } -func (n *Name) doChildren(do func(Node) error) error { - var err error - return err -} -func (n *Name) editChildren(edit func(Node) Node) { -} func (n *NameOffsetExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *NameOffsetExpr) copy() Node { c := *n - c.init = c.init.Copy() + c.init = copyNodes(c.init) return &c } func (n *NameOffsetExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) + if err := doNodes(n.init, do); err != nil { + return err + } if n.Name_ != nil { - err = maybeDo(n.Name_, err, do) + if err := do(n.Name_); err != nil { + return err + } } - return err + return nil } func (n *NameOffsetExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) + editNodes(n.init, edit) if n.Name_ != nil { n.Name_ = edit(n.Name_).(*Name) } @@ -701,33 +926,41 @@ func (n *NameOffsetExpr) editChildren(edit func(Node) Node) { func (n *NilExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *NilExpr) copy() Node { c := *n - c.init = c.init.Copy() + c.init = copyNodes(c.init) return &c } func (n *NilExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + return nil } func (n *NilExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) + editNodes(n.init, edit) } func (n *ParenExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *ParenExpr) copy() Node { c := *n - c.init = c.init.Copy() + c.init = copyNodes(c.init) return &c } func (n *ParenExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + if n.X != nil { + if err := do(n.X); err != nil { + return err + } + } + return nil } func (n *ParenExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.X = maybeEdit(n.X, edit) + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } } func (n *PkgName) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } @@ -736,8 +969,7 @@ func (n *PkgName) copy() Node { return &c } func (n *PkgName) doChildren(do func(Node) error) error { - var err error - return err + return nil } func (n *PkgName) editChildren(edit func(Node) Node) { } @@ -745,28 +977,51 @@ func (n *PkgName) editChildren(edit func(Node) Node) { func (n *RangeStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *RangeStmt) copy() Node { c := *n - c.init = c.init.Copy() - c.Body = c.Body.Copy() + c.init = copyNodes(c.init) + c.Body = copyNodes(c.Body) return &c } func (n *RangeStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) - err = maybeDo(n.Key, err, do) - err = maybeDo(n.Value, err, do) - err = maybeDoList(n.Body, err, do) + if err := doNodes(n.init, do); err != nil { + return err + } + if n.X != nil { + if err := do(n.X); err != nil { + return err + } + } + if n.Key != nil { + if err := do(n.Key); err != nil { + return err + } + } + if n.Value != nil { + if err := do(n.Value); err != nil { + return err + } + } + if err := doNodes(n.Body, do); err != nil { + return err + } if n.Prealloc != nil { - err = maybeDo(n.Prealloc, err, do) + if err := do(n.Prealloc); err != nil { + return err + } } - return err + return nil } func (n *RangeStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.X = maybeEdit(n.X, edit) - n.Key = maybeEdit(n.Key, edit) - n.Value = maybeEdit(n.Value, edit) - editList(n.Body, edit) + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } + if n.Key != nil { + n.Key = edit(n.Key).(Node) + } + if n.Value != nil { + n.Value = edit(n.Value).(Node) + } + editNodes(n.Body, edit) if n.Prealloc != nil { n.Prealloc = edit(n.Prealloc).(*Name) } @@ -775,75 +1030,93 @@ func (n *RangeStmt) editChildren(edit func(Node) Node) { func (n *ResultExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *ResultExpr) copy() Node { c := *n - c.init = c.init.Copy() + c.init = copyNodes(c.init) return &c } func (n *ResultExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + return nil } func (n *ResultExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) + editNodes(n.init, edit) } func (n *ReturnStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *ReturnStmt) copy() Node { c := *n - c.init = c.init.Copy() - c.Results = c.Results.Copy() + c.init = copyNodes(c.init) + c.Results = copyNodes(c.Results) return &c } func (n *ReturnStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDoList(n.Results, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + if err := doNodes(n.Results, do); err != nil { + return err + } + return nil } func (n *ReturnStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) - editList(n.Results, edit) + editNodes(n.init, edit) + editNodes(n.Results, edit) } func (n *SelectStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *SelectStmt) copy() Node { c := *n - c.init = c.init.Copy() - c.Cases = copyComms(c.Cases) - c.Compiled = c.Compiled.Copy() + c.init = copyNodes(c.init) + c.Cases = copyCommClauses(c.Cases) + c.Compiled = copyNodes(c.Compiled) return &c } func (n *SelectStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDoComms(n.Cases, err, do) - err = maybeDoList(n.Compiled, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + if err := doCommClauses(n.Cases, do); err != nil { + return err + } + if err := doNodes(n.Compiled, do); err != nil { + return err + } + return nil } func (n *SelectStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) - editComms(n.Cases, edit) - editList(n.Compiled, edit) + editNodes(n.init, edit) + editCommClauses(n.Cases, edit) + editNodes(n.Compiled, edit) } func (n *SelectorExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *SelectorExpr) copy() Node { c := *n - c.init = c.init.Copy() + c.init = copyNodes(c.init) return &c } func (n *SelectorExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) + if err := doNodes(n.init, do); err != nil { + return err + } + if n.X != nil { + if err := do(n.X); err != nil { + return err + } + } if n.Prealloc != nil { - err = maybeDo(n.Prealloc, err, do) + if err := do(n.Prealloc); err != nil { + return err + } } - return err + return nil } func (n *SelectorExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.X = maybeEdit(n.X, edit) + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } if n.Prealloc != nil { n.Prealloc = edit(n.Prealloc).(*Name) } @@ -852,64 +1125,121 @@ func (n *SelectorExpr) editChildren(edit func(Node) Node) { func (n *SendStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *SendStmt) copy() Node { c := *n - c.init = c.init.Copy() + c.init = copyNodes(c.init) return &c } func (n *SendStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.Chan, err, do) - err = maybeDo(n.Value, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + if n.Chan != nil { + if err := do(n.Chan); err != nil { + return err + } + } + if n.Value != nil { + if err := do(n.Value); err != nil { + return err + } + } + return nil } func (n *SendStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.Chan = maybeEdit(n.Chan, edit) - n.Value = maybeEdit(n.Value, edit) + editNodes(n.init, edit) + if n.Chan != nil { + n.Chan = edit(n.Chan).(Node) + } + if n.Value != nil { + n.Value = edit(n.Value).(Node) + } } func (n *SliceExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *SliceExpr) copy() Node { c := *n - c.init = c.init.Copy() + c.init = copyNodes(c.init) return &c } func (n *SliceExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) - err = maybeDo(n.Low, err, do) - err = maybeDo(n.High, err, do) - err = maybeDo(n.Max, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + if n.X != nil { + if err := do(n.X); err != nil { + return err + } + } + if n.Low != nil { + if err := do(n.Low); err != nil { + return err + } + } + if n.High != nil { + if err := do(n.High); err != nil { + return err + } + } + if n.Max != nil { + if err := do(n.Max); err != nil { + return err + } + } + return nil } func (n *SliceExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.X = maybeEdit(n.X, edit) - n.Low = maybeEdit(n.Low, edit) - n.High = maybeEdit(n.High, edit) - n.Max = maybeEdit(n.Max, edit) + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } + if n.Low != nil { + n.Low = edit(n.Low).(Node) + } + if n.High != nil { + n.High = edit(n.High).(Node) + } + if n.Max != nil { + n.Max = edit(n.Max).(Node) + } } func (n *SliceHeaderExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *SliceHeaderExpr) copy() Node { c := *n - c.init = c.init.Copy() + c.init = copyNodes(c.init) return &c } func (n *SliceHeaderExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.Ptr, err, do) - err = maybeDo(n.Len, err, do) - err = maybeDo(n.Cap, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + if n.Ptr != nil { + if err := do(n.Ptr); err != nil { + return err + } + } + if n.Len != nil { + if err := do(n.Len); err != nil { + return err + } + } + if n.Cap != nil { + if err := do(n.Cap); err != nil { + return err + } + } + return nil } func (n *SliceHeaderExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.Ptr = maybeEdit(n.Ptr, edit) - n.Len = maybeEdit(n.Len, edit) - n.Cap = maybeEdit(n.Cap, edit) + editNodes(n.init, edit) + if n.Ptr != nil { + n.Ptr = edit(n.Ptr).(Node) + } + if n.Len != nil { + n.Len = edit(n.Len).(Node) + } + if n.Cap != nil { + n.Cap = edit(n.Cap).(Node) + } } func (n *SliceType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } @@ -918,46 +1248,65 @@ func (n *SliceType) copy() Node { return &c } func (n *SliceType) doChildren(do func(Node) error) error { - var err error - err = maybeDo(n.Elem, err, do) - return err + if n.Elem != nil { + if err := do(n.Elem); err != nil { + return err + } + } + return nil } func (n *SliceType) editChildren(edit func(Node) Node) { - n.Elem = toNtype(maybeEdit(n.Elem, edit)) + if n.Elem != nil { + n.Elem = edit(n.Elem).(Ntype) + } } func (n *StarExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *StarExpr) copy() Node { c := *n - c.init = c.init.Copy() + c.init = copyNodes(c.init) return &c } func (n *StarExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + if n.X != nil { + if err := do(n.X); err != nil { + return err + } + } + return nil } func (n *StarExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.X = maybeEdit(n.X, edit) + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } } func (n *StructKeyExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *StructKeyExpr) copy() Node { c := *n - c.init = c.init.Copy() + c.init = copyNodes(c.init) return &c } func (n *StructKeyExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.Value, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + if n.Value != nil { + if err := do(n.Value); err != nil { + return err + } + } + return nil } func (n *StructKeyExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.Value = maybeEdit(n.Value, edit) + editNodes(n.init, edit) + if n.Value != nil { + n.Value = edit(n.Value).(Node) + } } func (n *StructType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } @@ -967,9 +1316,10 @@ func (n *StructType) copy() Node { return &c } func (n *StructType) doChildren(do func(Node) error) error { - var err error - err = maybeDoFields(n.Fields, err, do) - return err + if err := doFields(n.Fields, do); err != nil { + return err + } + return nil } func (n *StructType) editChildren(edit func(Node) Node) { editFields(n.Fields, edit) @@ -978,43 +1328,67 @@ func (n *StructType) editChildren(edit func(Node) Node) { func (n *SwitchStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *SwitchStmt) copy() Node { c := *n - c.init = c.init.Copy() - c.Cases = copyCases(c.Cases) - c.Compiled = c.Compiled.Copy() + c.init = copyNodes(c.init) + c.Cases = copyCaseClauses(c.Cases) + c.Compiled = copyNodes(c.Compiled) return &c } func (n *SwitchStmt) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.Tag, err, do) - err = maybeDoCases(n.Cases, err, do) - err = maybeDoList(n.Compiled, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + if n.Tag != nil { + if err := do(n.Tag); err != nil { + return err + } + } + if err := doCaseClauses(n.Cases, do); err != nil { + return err + } + if err := doNodes(n.Compiled, do); err != nil { + return err + } + return nil } func (n *SwitchStmt) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.Tag = maybeEdit(n.Tag, edit) - editCases(n.Cases, edit) - editList(n.Compiled, edit) + editNodes(n.init, edit) + if n.Tag != nil { + n.Tag = edit(n.Tag).(Node) + } + editCaseClauses(n.Cases, edit) + editNodes(n.Compiled, edit) } func (n *TypeAssertExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *TypeAssertExpr) copy() Node { c := *n - c.init = c.init.Copy() + c.init = copyNodes(c.init) return &c } func (n *TypeAssertExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) - err = maybeDo(n.Ntype, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + if n.X != nil { + if err := do(n.X); err != nil { + return err + } + } + if n.Ntype != nil { + if err := do(n.Ntype); err != nil { + return err + } + } + return nil } func (n *TypeAssertExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.X = maybeEdit(n.X, edit) - n.Ntype = toNtype(maybeEdit(n.Ntype, edit)) + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } + if n.Ntype != nil { + n.Ntype = edit(n.Ntype).(Ntype) + } } func (n *TypeSwitchGuard) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } @@ -1023,35 +1397,49 @@ func (n *TypeSwitchGuard) copy() Node { return &c } func (n *TypeSwitchGuard) doChildren(do func(Node) error) error { - var err error if n.Tag != nil { - err = maybeDo(n.Tag, err, do) + if err := do(n.Tag); err != nil { + return err + } } - err = maybeDo(n.X, err, do) - return err + if n.X != nil { + if err := do(n.X); err != nil { + return err + } + } + return nil } func (n *TypeSwitchGuard) editChildren(edit func(Node) Node) { if n.Tag != nil { n.Tag = edit(n.Tag).(*Ident) } - n.X = maybeEdit(n.X, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } } func (n *UnaryExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *UnaryExpr) copy() Node { c := *n - c.init = c.init.Copy() + c.init = copyNodes(c.init) return &c } func (n *UnaryExpr) doChildren(do func(Node) error) error { - var err error - err = maybeDoList(n.init, err, do) - err = maybeDo(n.X, err, do) - return err + if err := doNodes(n.init, do); err != nil { + return err + } + if n.X != nil { + if err := do(n.X); err != nil { + return err + } + } + return nil } func (n *UnaryExpr) editChildren(edit func(Node) Node) { - editList(n.init, edit) - n.X = maybeEdit(n.X, edit) + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } } func (n *typeNode) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } @@ -1060,13 +1448,12 @@ func (n *typeNode) copy() Node { return &c } func (n *typeNode) doChildren(do func(Node) error) error { - var err error - return err + return nil } func (n *typeNode) editChildren(edit func(Node) Node) { } -func copyCases(list []*CaseClause) []*CaseClause { +func copyCaseClauses(list []*CaseClause) []*CaseClause { if list == nil { return nil } @@ -1074,10 +1461,7 @@ func copyCases(list []*CaseClause) []*CaseClause { copy(c, list) return c } -func maybeDoCases(list []*CaseClause, err error, do func(Node) error) error { - if err != nil { - return err - } +func doCaseClauses(list []*CaseClause, do func(Node) error) error { for _, x := range list { if x != nil { if err := do(x); err != nil { @@ -1087,7 +1471,7 @@ func maybeDoCases(list []*CaseClause, err error, do func(Node) error) error { } return nil } -func editCases(list []*CaseClause, edit func(Node) Node) { +func editCaseClauses(list []*CaseClause, edit func(Node) Node) { for i, x := range list { if x != nil { list[i] = edit(x).(*CaseClause) @@ -1095,7 +1479,7 @@ func editCases(list []*CaseClause, edit func(Node) Node) { } } -func copyComms(list []*CommClause) []*CommClause { +func copyCommClauses(list []*CommClause) []*CommClause { if list == nil { return nil } @@ -1103,10 +1487,7 @@ func copyComms(list []*CommClause) []*CommClause { copy(c, list) return c } -func maybeDoComms(list []*CommClause, err error, do func(Node) error) error { - if err != nil { - return err - } +func doCommClauses(list []*CommClause, do func(Node) error) error { for _, x := range list { if x != nil { if err := do(x); err != nil { @@ -1116,10 +1497,36 @@ func maybeDoComms(list []*CommClause, err error, do func(Node) error) error { } return nil } -func editComms(list []*CommClause, edit func(Node) Node) { +func editCommClauses(list []*CommClause, edit func(Node) Node) { for i, x := range list { if x != nil { list[i] = edit(x).(*CommClause) } } } + +func copyNodes(list []Node) []Node { + if list == nil { + return nil + } + c := make([]Node, len(list)) + copy(c, list) + return c +} +func doNodes(list []Node, do func(Node) error) error { + for _, x := range list { + if x != nil { + if err := do(x); err != nil { + return err + } + } + } + return nil +} +func editNodes(list []Node, edit func(Node) Node) { + for i, x := range list { + if x != nil { + list[i] = edit(x).(Node) + } + } +} diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index 4575dec260b2e..9c2cba9a082bd 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -322,8 +322,8 @@ func NewRangeStmt(pos src.XPos, key, value, x Node, body []Node) *RangeStmt { // A ReturnStmt is a return statement. type ReturnStmt struct { miniStmt - orig Node // for typecheckargs rewrite - Results Nodes // return list + origNode // for typecheckargs rewrite + Results Nodes // return list } func NewReturnStmt(pos src.XPos, results []Node) *ReturnStmt { @@ -335,9 +335,6 @@ func NewReturnStmt(pos src.XPos, results []Node) *ReturnStmt { return n } -func (n *ReturnStmt) Orig() Node { return n.orig } -func (n *ReturnStmt) SetOrig(x Node) { n.orig = x } - // A SelectStmt is a block: { Cases }. type SelectStmt struct { miniStmt diff --git a/src/cmd/compile/internal/ir/type.go b/src/cmd/compile/internal/ir/type.go index 408f6ed56372e..7dd394f9ea94c 100644 --- a/src/cmd/compile/internal/ir/type.go +++ b/src/cmd/compile/internal/ir/type.go @@ -185,45 +185,32 @@ func (f *Field) String() string { return typ } -func (f *Field) copy() *Field { +// TODO(mdempsky): Make Field a Node again so these can be generated? +// Fields are Nodes in go/ast and cmd/compile/internal/syntax. + +func copyField(f *Field) *Field { + if f == nil { + return nil + } c := *f return &c } - -func copyFields(list []*Field) []*Field { - out := make([]*Field, len(list)) - copy(out, list) - for i, f := range out { - out[i] = f.copy() +func doField(f *Field, do func(Node) error) error { + if f == nil { + return nil } - return out -} - -func maybeDoField(f *Field, err error, do func(Node) error) error { - if f != nil { - if err == nil && f.Decl != nil { - err = do(f.Decl) - } - if err == nil && f.Ntype != nil { - err = do(f.Ntype) + if f.Decl != nil { + if err := do(f.Decl); err != nil { + return err } } - return err -} - -func maybeDoFields(list []*Field, err error, do func(Node) error) error { - if err != nil { - return err - } - for _, f := range list { - err = maybeDoField(f, err, do) - if err != nil { + if f.Ntype != nil { + if err := do(f.Ntype); err != nil { return err } } - return err + return nil } - func editField(f *Field, edit func(Node) Node) { if f == nil { return @@ -232,10 +219,25 @@ func editField(f *Field, edit func(Node) Node) { f.Decl = edit(f.Decl).(*Name) } if f.Ntype != nil { - f.Ntype = toNtype(edit(f.Ntype)) + f.Ntype = edit(f.Ntype).(Ntype) } } +func copyFields(list []*Field) []*Field { + out := make([]*Field, len(list)) + for i, f := range list { + out[i] = copyField(f) + } + return out +} +func doFields(list []*Field, do func(Node) error) error { + for _, x := range list { + if err := doField(x, do); err != nil { + return err + } + } + return nil +} func editFields(list []*Field, edit func(Node) Node) { for _, f := range list { editField(f, edit) diff --git a/src/cmd/compile/internal/ir/visit.go b/src/cmd/compile/internal/ir/visit.go index 8839e1664d3b5..4616390b7c14e 100644 --- a/src/cmd/compile/internal/ir/visit.go +++ b/src/cmd/compile/internal/ir/visit.go @@ -106,14 +106,7 @@ func DoChildren(n Node, do func(Node) error) error { // Note that DoList only calls do on the nodes in the list, not their children. // If x's children should be processed, do(x) must call DoChildren(x, do) itself. func DoList(list Nodes, do func(Node) error) error { - for _, x := range list { - if x != nil { - if err := do(x); err != nil { - return err - } - } - } - return nil + return doNodes(list, do) } // Visit visits each non-nil node x in the IR tree rooted at n @@ -210,16 +203,3 @@ func EditChildren(n Node, edit func(Node) Node) { } n.editChildren(edit) } - -// editList calls edit on each non-nil node x in the list, -// saving the result of edit back into the list. -// -// Note that editList only calls edit on the nodes in the list, not their children. -// If x's children should be processed, edit(x) must call EditChildren(x, edit) itself. -func editList(list Nodes, edit func(Node) Node) { - for i, x := range list { - if x != nil { - list[i] = edit(x) - } - } -} From f9b67f76a59cb9adf5d04e9b559cda98afb3c6f4 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 29 Dec 2020 19:46:31 -0800 Subject: [PATCH 307/474] [dev.regabi] cmd/compile: change ir.DoChildren to use bool result type After using the IR visitor code for a bit, it seems clear that a simple boolean result type is adequate for tree traversals. This CL updates ir.DoChildren to use the same calling convention as ir.Any, and updates mknode.go to generate code accordingly. There were only two places where the error-based DoChildren API was used within the compiler: 1. Within typechecking, marking statements that contain "break". This code never returns errors anyway, so it's trivially updated to return false instead. 2. Within inlining, the "hairy visitor" actually does make use of returning errors. However, it threads through a reference to the hairyVisitor anyway, where it would be trivial to store any needed information instead. For the purpose of this CL, we provide "errChildren" and "errList" helper functions that provide the previous error-based semantics on top of the new bool-based API. Passes toolstash -cmp. Change-Id: I4bac9a697b4dbfb5f66eeac37d4a2ced2073d7d0 Reviewed-on: https://go-review.googlesource.com/c/go/+/280675 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky Reviewed-by: Cuong Manh Le TryBot-Result: Go Bot --- src/cmd/compile/internal/inline/inl.go | 29 +- src/cmd/compile/internal/ir/func.go | 6 +- src/cmd/compile/internal/ir/mknode.go | 18 +- src/cmd/compile/internal/ir/name.go | 6 +- src/cmd/compile/internal/ir/node.go | 2 +- src/cmd/compile/internal/ir/node_gen.go | 898 ++++++++---------- src/cmd/compile/internal/ir/type.go | 26 +- src/cmd/compile/internal/ir/visit.go | 103 +- .../compile/internal/typecheck/typecheck.go | 6 +- 9 files changed, 481 insertions(+), 613 deletions(-) diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index 7584f6a19f867..df797da2d1c8e 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -265,7 +265,7 @@ var errBudget = errors.New("too expensive") func (v *hairyVisitor) tooHairy(fn *ir.Func) bool { v.do = v.doNode // cache closure - err := ir.DoChildren(fn, v.do) + err := errChildren(fn, v.do) if err != nil { v.reason = err.Error() return true @@ -393,13 +393,13 @@ func (v *hairyVisitor) doNode(n ir.Node) error { if ir.IsConst(n.Cond, constant.Bool) { // This if and the condition cost nothing. // TODO(rsc): It seems strange that we visit the dead branch. - if err := ir.DoList(n.Init(), v.do); err != nil { + if err := errList(n.Init(), v.do); err != nil { return err } - if err := ir.DoList(n.Body, v.do); err != nil { + if err := errList(n.Body, v.do); err != nil { return err } - if err := ir.DoList(n.Else, v.do); err != nil { + if err := errList(n.Else, v.do); err != nil { return err } return nil @@ -431,7 +431,7 @@ func (v *hairyVisitor) doNode(n ir.Node) error { return errBudget } - return ir.DoChildren(n, v.do) + return errChildren(n, v.do) } func isBigFunc(fn *ir.Func) bool { @@ -1214,3 +1214,22 @@ func numNonClosures(list []*ir.Func) int { } return count } + +// TODO(mdempsky): Update inl.go to use ir.DoChildren directly. +func errChildren(n ir.Node, do func(ir.Node) error) (err error) { + ir.DoChildren(n, func(x ir.Node) bool { + err = do(x) + return err != nil + }) + return +} +func errList(list []ir.Node, do func(ir.Node) error) error { + for _, x := range list { + if x != nil { + if err := do(x); err != nil { + return err + } + } + } + return nil +} diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index 32ad37fa8064a..9a79a4f30ff83 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -115,9 +115,9 @@ func NewFunc(pos src.XPos) *Func { func (f *Func) isStmt() {} -func (n *Func) copy() Node { panic(n.no("copy")) } -func (n *Func) doChildren(do func(Node) error) error { return doNodes(n.Body, do) } -func (n *Func) editChildren(edit func(Node) Node) { editNodes(n.Body, edit) } +func (n *Func) copy() Node { panic(n.no("copy")) } +func (n *Func) doChildren(do func(Node) bool) bool { return doNodes(n.Body, do) } +func (n *Func) editChildren(edit func(Node) Node) { editNodes(n.Body, edit) } func (f *Func) Type() *types.Type { return f.Nname.Type() } func (f *Func) Sym() *types.Sym { return f.Nname.Sym() } diff --git a/src/cmd/compile/internal/ir/mknode.go b/src/cmd/compile/internal/ir/mknode.go index 4e26bc5011db4..326f491a69d34 100644 --- a/src/cmd/compile/internal/ir/mknode.go +++ b/src/cmd/compile/internal/ir/mknode.go @@ -70,10 +70,10 @@ func main() { "return &c }\n") forNodeFields(typ, - "func (n *%[1]s) doChildren(do func(Node) error) error {\n", - "if n.%[1]s != nil { if err := do(n.%[1]s); err != nil { return err } }", - "if err := do%[2]s(n.%[1]s, do); err != nil { return err }", - "return nil }\n") + "func (n *%[1]s) doChildren(do func(Node) bool) bool {\n", + "if n.%[1]s != nil && do(n.%[1]s) { return true }", + "if do%[2]s(n.%[1]s, do) { return true }", + "return false }\n") forNodeFields(typ, "func (n *%[1]s) editChildren(edit func(Node) Node) {\n", @@ -121,15 +121,13 @@ func copy%[1]s(list []%[2]s) []%[2]s { copy(c, list) return c } -func do%[1]s(list []%[2]s, do func(Node) error) error { +func do%[1]s(list []%[2]s, do func(Node) bool) bool { for _, x := range list { - if x != nil { - if err := do(x); err != nil { - return err - } + if x != nil && do(x) { + return true } } - return nil + return false } func edit%[1]s(list []%[2]s, edit func(Node) Node) { for i, x := range list { diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index b12e833f73fe8..697b04f541bcf 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -143,9 +143,9 @@ type Name struct { func (n *Name) isExpr() {} -func (n *Name) copy() Node { panic(n.no("copy")) } -func (n *Name) doChildren(do func(Node) error) error { return nil } -func (n *Name) editChildren(edit func(Node) Node) {} +func (n *Name) copy() Node { panic(n.no("copy")) } +func (n *Name) doChildren(do func(Node) bool) bool { return false } +func (n *Name) editChildren(edit func(Node) Node) {} // CloneName makes a cloned copy of the name. // It's not ir.Copy(n) because in general that operation is a mistake on names, diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 0238e9de859a2..0d56b5aeb8062 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -28,7 +28,7 @@ type Node interface { // For making copies. For Copy and SepCopy. copy() Node - doChildren(func(Node) error) error + doChildren(func(Node) bool) bool editChildren(func(Node) Node) // Abstract graph structure, for generic traversals. diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index 21e4eff9fbe77..65c0b239ed568 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -11,19 +11,17 @@ func (n *AddStringExpr) copy() Node { c.List = copyNodes(c.List) return &c } -func (n *AddStringExpr) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *AddStringExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if err := doNodes(n.List, do); err != nil { - return err + if doNodes(n.List, do) { + return true } - if n.Prealloc != nil { - if err := do(n.Prealloc); err != nil { - return err - } + if n.Prealloc != nil && do(n.Prealloc) { + return true } - return nil + return false } func (n *AddStringExpr) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -39,21 +37,17 @@ func (n *AddrExpr) copy() Node { c.init = copyNodes(c.init) return &c } -func (n *AddrExpr) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *AddrExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if n.X != nil { - if err := do(n.X); err != nil { - return err - } + if n.X != nil && do(n.X) { + return true } - if n.Prealloc != nil { - if err := do(n.Prealloc); err != nil { - return err - } + if n.Prealloc != nil && do(n.Prealloc) { + return true } - return nil + return false } func (n *AddrExpr) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -70,18 +64,14 @@ func (n *ArrayType) copy() Node { c := *n return &c } -func (n *ArrayType) doChildren(do func(Node) error) error { - if n.Len != nil { - if err := do(n.Len); err != nil { - return err - } +func (n *ArrayType) doChildren(do func(Node) bool) bool { + if n.Len != nil && do(n.Len) { + return true } - if n.Elem != nil { - if err := do(n.Elem); err != nil { - return err - } + if n.Elem != nil && do(n.Elem) { + return true } - return nil + return false } func (n *ArrayType) editChildren(edit func(Node) Node) { if n.Len != nil { @@ -100,17 +90,17 @@ func (n *AssignListStmt) copy() Node { c.Rhs = copyNodes(c.Rhs) return &c } -func (n *AssignListStmt) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *AssignListStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if err := doNodes(n.Lhs, do); err != nil { - return err + if doNodes(n.Lhs, do) { + return true } - if err := doNodes(n.Rhs, do); err != nil { - return err + if doNodes(n.Rhs, do) { + return true } - return nil + return false } func (n *AssignListStmt) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -124,21 +114,17 @@ func (n *AssignOpStmt) copy() Node { c.init = copyNodes(c.init) return &c } -func (n *AssignOpStmt) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *AssignOpStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if n.X != nil { - if err := do(n.X); err != nil { - return err - } + if n.X != nil && do(n.X) { + return true } - if n.Y != nil { - if err := do(n.Y); err != nil { - return err - } + if n.Y != nil && do(n.Y) { + return true } - return nil + return false } func (n *AssignOpStmt) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -156,21 +142,17 @@ func (n *AssignStmt) copy() Node { c.init = copyNodes(c.init) return &c } -func (n *AssignStmt) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *AssignStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if n.X != nil { - if err := do(n.X); err != nil { - return err - } + if n.X != nil && do(n.X) { + return true } - if n.Y != nil { - if err := do(n.Y); err != nil { - return err - } + if n.Y != nil && do(n.Y) { + return true } - return nil + return false } func (n *AssignStmt) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -188,11 +170,11 @@ func (n *BasicLit) copy() Node { c.init = copyNodes(c.init) return &c } -func (n *BasicLit) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *BasicLit) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - return nil + return false } func (n *BasicLit) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -204,21 +186,17 @@ func (n *BinaryExpr) copy() Node { c.init = copyNodes(c.init) return &c } -func (n *BinaryExpr) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *BinaryExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if n.X != nil { - if err := do(n.X); err != nil { - return err - } + if n.X != nil && do(n.X) { + return true } - if n.Y != nil { - if err := do(n.Y); err != nil { - return err - } + if n.Y != nil && do(n.Y) { + return true } - return nil + return false } func (n *BinaryExpr) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -237,14 +215,14 @@ func (n *BlockStmt) copy() Node { c.List = copyNodes(c.List) return &c } -func (n *BlockStmt) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *BlockStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if err := doNodes(n.List, do); err != nil { - return err + if doNodes(n.List, do) { + return true } - return nil + return false } func (n *BlockStmt) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -257,11 +235,11 @@ func (n *BranchStmt) copy() Node { c.init = copyNodes(c.init) return &c } -func (n *BranchStmt) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *BranchStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - return nil + return false } func (n *BranchStmt) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -276,25 +254,23 @@ func (n *CallExpr) copy() Node { c.Body = copyNodes(c.Body) return &c } -func (n *CallExpr) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *CallExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if n.X != nil { - if err := do(n.X); err != nil { - return err - } + if n.X != nil && do(n.X) { + return true } - if err := doNodes(n.Args, do); err != nil { - return err + if doNodes(n.Args, do) { + return true } - if err := doNodes(n.Rargs, do); err != nil { - return err + if doNodes(n.Rargs, do) { + return true } - if err := doNodes(n.Body, do); err != nil { - return err + if doNodes(n.Body, do) { + return true } - return nil + return false } func (n *CallExpr) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -314,22 +290,20 @@ func (n *CaseClause) copy() Node { c.Body = copyNodes(c.Body) return &c } -func (n *CaseClause) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *CaseClause) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if n.Var != nil { - if err := do(n.Var); err != nil { - return err - } + if n.Var != nil && do(n.Var) { + return true } - if err := doNodes(n.List, do); err != nil { - return err + if doNodes(n.List, do) { + return true } - if err := doNodes(n.Body, do); err != nil { - return err + if doNodes(n.Body, do) { + return true } - return nil + return false } func (n *CaseClause) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -345,13 +319,11 @@ func (n *ChanType) copy() Node { c := *n return &c } -func (n *ChanType) doChildren(do func(Node) error) error { - if n.Elem != nil { - if err := do(n.Elem); err != nil { - return err - } +func (n *ChanType) doChildren(do func(Node) bool) bool { + if n.Elem != nil && do(n.Elem) { + return true } - return nil + return false } func (n *ChanType) editChildren(edit func(Node) Node) { if n.Elem != nil { @@ -365,16 +337,14 @@ func (n *ClosureExpr) copy() Node { c.init = copyNodes(c.init) return &c } -func (n *ClosureExpr) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *ClosureExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if n.Prealloc != nil { - if err := do(n.Prealloc); err != nil { - return err - } + if n.Prealloc != nil && do(n.Prealloc) { + return true } - return nil + return false } func (n *ClosureExpr) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -389,11 +359,11 @@ func (n *ClosureReadExpr) copy() Node { c.init = copyNodes(c.init) return &c } -func (n *ClosureReadExpr) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *ClosureReadExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - return nil + return false } func (n *ClosureReadExpr) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -406,19 +376,17 @@ func (n *CommClause) copy() Node { c.Body = copyNodes(c.Body) return &c } -func (n *CommClause) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *CommClause) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if n.Comm != nil { - if err := do(n.Comm); err != nil { - return err - } + if n.Comm != nil && do(n.Comm) { + return true } - if err := doNodes(n.Body, do); err != nil { - return err + if doNodes(n.Body, do) { + return true } - return nil + return false } func (n *CommClause) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -435,24 +403,20 @@ func (n *CompLitExpr) copy() Node { c.List = copyNodes(c.List) return &c } -func (n *CompLitExpr) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *CompLitExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if n.Ntype != nil { - if err := do(n.Ntype); err != nil { - return err - } + if n.Ntype != nil && do(n.Ntype) { + return true } - if err := doNodes(n.List, do); err != nil { - return err + if doNodes(n.List, do) { + return true } - if n.Prealloc != nil { - if err := do(n.Prealloc); err != nil { - return err - } + if n.Prealloc != nil && do(n.Prealloc) { + return true } - return nil + return false } func (n *CompLitExpr) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -471,11 +435,11 @@ func (n *ConstExpr) copy() Node { c.init = copyNodes(c.init) return &c } -func (n *ConstExpr) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *ConstExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - return nil + return false } func (n *ConstExpr) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -487,16 +451,14 @@ func (n *ConvExpr) copy() Node { c.init = copyNodes(c.init) return &c } -func (n *ConvExpr) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *ConvExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if n.X != nil { - if err := do(n.X); err != nil { - return err - } + if n.X != nil && do(n.X) { + return true } - return nil + return false } func (n *ConvExpr) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -510,13 +472,11 @@ func (n *Decl) copy() Node { c := *n return &c } -func (n *Decl) doChildren(do func(Node) error) error { - if n.X != nil { - if err := do(n.X); err != nil { - return err - } +func (n *Decl) doChildren(do func(Node) bool) bool { + if n.X != nil && do(n.X) { + return true } - return nil + return false } func (n *Decl) editChildren(edit func(Node) Node) { if n.X != nil { @@ -532,27 +492,23 @@ func (n *ForStmt) copy() Node { c.Body = copyNodes(c.Body) return &c } -func (n *ForStmt) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *ForStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if n.Cond != nil { - if err := do(n.Cond); err != nil { - return err - } + if n.Cond != nil && do(n.Cond) { + return true } - if err := doNodes(n.Late, do); err != nil { - return err + if doNodes(n.Late, do) { + return true } - if n.Post != nil { - if err := do(n.Post); err != nil { - return err - } + if n.Post != nil && do(n.Post) { + return true } - if err := doNodes(n.Body, do); err != nil { - return err + if doNodes(n.Body, do) { + return true } - return nil + return false } func (n *ForStmt) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -576,17 +532,17 @@ func (n *FuncType) copy() Node { c.Results = copyFields(c.Results) return &c } -func (n *FuncType) doChildren(do func(Node) error) error { - if err := doField(n.Recv, do); err != nil { - return err +func (n *FuncType) doChildren(do func(Node) bool) bool { + if doField(n.Recv, do) { + return true } - if err := doFields(n.Params, do); err != nil { - return err + if doFields(n.Params, do) { + return true } - if err := doFields(n.Results, do); err != nil { - return err + if doFields(n.Results, do) { + return true } - return nil + return false } func (n *FuncType) editChildren(edit func(Node) Node) { editField(n.Recv, edit) @@ -600,16 +556,14 @@ func (n *GoDeferStmt) copy() Node { c.init = copyNodes(c.init) return &c } -func (n *GoDeferStmt) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *GoDeferStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if n.Call != nil { - if err := do(n.Call); err != nil { - return err - } + if n.Call != nil && do(n.Call) { + return true } - return nil + return false } func (n *GoDeferStmt) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -624,11 +578,11 @@ func (n *Ident) copy() Node { c.init = copyNodes(c.init) return &c } -func (n *Ident) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *Ident) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - return nil + return false } func (n *Ident) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -642,22 +596,20 @@ func (n *IfStmt) copy() Node { c.Else = copyNodes(c.Else) return &c } -func (n *IfStmt) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *IfStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if n.Cond != nil { - if err := do(n.Cond); err != nil { - return err - } + if n.Cond != nil && do(n.Cond) { + return true } - if err := doNodes(n.Body, do); err != nil { - return err + if doNodes(n.Body, do) { + return true } - if err := doNodes(n.Else, do); err != nil { - return err + if doNodes(n.Else, do) { + return true } - return nil + return false } func (n *IfStmt) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -674,21 +626,17 @@ func (n *IndexExpr) copy() Node { c.init = copyNodes(c.init) return &c } -func (n *IndexExpr) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *IndexExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if n.X != nil { - if err := do(n.X); err != nil { - return err - } + if n.X != nil && do(n.X) { + return true } - if n.Index != nil { - if err := do(n.Index); err != nil { - return err - } + if n.Index != nil && do(n.Index) { + return true } - return nil + return false } func (n *IndexExpr) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -706,11 +654,11 @@ func (n *InlineMarkStmt) copy() Node { c.init = copyNodes(c.init) return &c } -func (n *InlineMarkStmt) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *InlineMarkStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - return nil + return false } func (n *InlineMarkStmt) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -724,17 +672,17 @@ func (n *InlinedCallExpr) copy() Node { c.ReturnVars = copyNodes(c.ReturnVars) return &c } -func (n *InlinedCallExpr) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *InlinedCallExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if err := doNodes(n.Body, do); err != nil { - return err + if doNodes(n.Body, do) { + return true } - if err := doNodes(n.ReturnVars, do); err != nil { - return err + if doNodes(n.ReturnVars, do) { + return true } - return nil + return false } func (n *InlinedCallExpr) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -748,11 +696,11 @@ func (n *InterfaceType) copy() Node { c.Methods = copyFields(c.Methods) return &c } -func (n *InterfaceType) doChildren(do func(Node) error) error { - if err := doFields(n.Methods, do); err != nil { - return err +func (n *InterfaceType) doChildren(do func(Node) bool) bool { + if doFields(n.Methods, do) { + return true } - return nil + return false } func (n *InterfaceType) editChildren(edit func(Node) Node) { editFields(n.Methods, edit) @@ -764,21 +712,17 @@ func (n *KeyExpr) copy() Node { c.init = copyNodes(c.init) return &c } -func (n *KeyExpr) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *KeyExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if n.Key != nil { - if err := do(n.Key); err != nil { - return err - } + if n.Key != nil && do(n.Key) { + return true } - if n.Value != nil { - if err := do(n.Value); err != nil { - return err - } + if n.Value != nil && do(n.Value) { + return true } - return nil + return false } func (n *KeyExpr) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -796,11 +740,11 @@ func (n *LabelStmt) copy() Node { c.init = copyNodes(c.init) return &c } -func (n *LabelStmt) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *LabelStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - return nil + return false } func (n *LabelStmt) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -812,21 +756,17 @@ func (n *LogicalExpr) copy() Node { c.init = copyNodes(c.init) return &c } -func (n *LogicalExpr) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *LogicalExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if n.X != nil { - if err := do(n.X); err != nil { - return err - } + if n.X != nil && do(n.X) { + return true } - if n.Y != nil { - if err := do(n.Y); err != nil { - return err - } + if n.Y != nil && do(n.Y) { + return true } - return nil + return false } func (n *LogicalExpr) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -844,21 +784,17 @@ func (n *MakeExpr) copy() Node { c.init = copyNodes(c.init) return &c } -func (n *MakeExpr) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *MakeExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if n.Len != nil { - if err := do(n.Len); err != nil { - return err - } + if n.Len != nil && do(n.Len) { + return true } - if n.Cap != nil { - if err := do(n.Cap); err != nil { - return err - } + if n.Cap != nil && do(n.Cap) { + return true } - return nil + return false } func (n *MakeExpr) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -875,18 +811,14 @@ func (n *MapType) copy() Node { c := *n return &c } -func (n *MapType) doChildren(do func(Node) error) error { - if n.Key != nil { - if err := do(n.Key); err != nil { - return err - } +func (n *MapType) doChildren(do func(Node) bool) bool { + if n.Key != nil && do(n.Key) { + return true } - if n.Elem != nil { - if err := do(n.Elem); err != nil { - return err - } + if n.Elem != nil && do(n.Elem) { + return true } - return nil + return false } func (n *MapType) editChildren(edit func(Node) Node) { if n.Key != nil { @@ -905,16 +837,14 @@ func (n *NameOffsetExpr) copy() Node { c.init = copyNodes(c.init) return &c } -func (n *NameOffsetExpr) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *NameOffsetExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if n.Name_ != nil { - if err := do(n.Name_); err != nil { - return err - } + if n.Name_ != nil && do(n.Name_) { + return true } - return nil + return false } func (n *NameOffsetExpr) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -929,11 +859,11 @@ func (n *NilExpr) copy() Node { c.init = copyNodes(c.init) return &c } -func (n *NilExpr) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *NilExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - return nil + return false } func (n *NilExpr) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -945,16 +875,14 @@ func (n *ParenExpr) copy() Node { c.init = copyNodes(c.init) return &c } -func (n *ParenExpr) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *ParenExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if n.X != nil { - if err := do(n.X); err != nil { - return err - } + if n.X != nil && do(n.X) { + return true } - return nil + return false } func (n *ParenExpr) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -968,8 +896,8 @@ func (n *PkgName) copy() Node { c := *n return &c } -func (n *PkgName) doChildren(do func(Node) error) error { - return nil +func (n *PkgName) doChildren(do func(Node) bool) bool { + return false } func (n *PkgName) editChildren(edit func(Node) Node) { } @@ -981,34 +909,26 @@ func (n *RangeStmt) copy() Node { c.Body = copyNodes(c.Body) return &c } -func (n *RangeStmt) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *RangeStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if n.X != nil { - if err := do(n.X); err != nil { - return err - } + if n.X != nil && do(n.X) { + return true } - if n.Key != nil { - if err := do(n.Key); err != nil { - return err - } + if n.Key != nil && do(n.Key) { + return true } - if n.Value != nil { - if err := do(n.Value); err != nil { - return err - } + if n.Value != nil && do(n.Value) { + return true } - if err := doNodes(n.Body, do); err != nil { - return err + if doNodes(n.Body, do) { + return true } - if n.Prealloc != nil { - if err := do(n.Prealloc); err != nil { - return err - } + if n.Prealloc != nil && do(n.Prealloc) { + return true } - return nil + return false } func (n *RangeStmt) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -1033,11 +953,11 @@ func (n *ResultExpr) copy() Node { c.init = copyNodes(c.init) return &c } -func (n *ResultExpr) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *ResultExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - return nil + return false } func (n *ResultExpr) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -1050,14 +970,14 @@ func (n *ReturnStmt) copy() Node { c.Results = copyNodes(c.Results) return &c } -func (n *ReturnStmt) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *ReturnStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if err := doNodes(n.Results, do); err != nil { - return err + if doNodes(n.Results, do) { + return true } - return nil + return false } func (n *ReturnStmt) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -1072,17 +992,17 @@ func (n *SelectStmt) copy() Node { c.Compiled = copyNodes(c.Compiled) return &c } -func (n *SelectStmt) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *SelectStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if err := doCommClauses(n.Cases, do); err != nil { - return err + if doCommClauses(n.Cases, do) { + return true } - if err := doNodes(n.Compiled, do); err != nil { - return err + if doNodes(n.Compiled, do) { + return true } - return nil + return false } func (n *SelectStmt) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -1096,21 +1016,17 @@ func (n *SelectorExpr) copy() Node { c.init = copyNodes(c.init) return &c } -func (n *SelectorExpr) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *SelectorExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if n.X != nil { - if err := do(n.X); err != nil { - return err - } + if n.X != nil && do(n.X) { + return true } - if n.Prealloc != nil { - if err := do(n.Prealloc); err != nil { - return err - } + if n.Prealloc != nil && do(n.Prealloc) { + return true } - return nil + return false } func (n *SelectorExpr) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -1128,21 +1044,17 @@ func (n *SendStmt) copy() Node { c.init = copyNodes(c.init) return &c } -func (n *SendStmt) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *SendStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if n.Chan != nil { - if err := do(n.Chan); err != nil { - return err - } + if n.Chan != nil && do(n.Chan) { + return true } - if n.Value != nil { - if err := do(n.Value); err != nil { - return err - } + if n.Value != nil && do(n.Value) { + return true } - return nil + return false } func (n *SendStmt) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -1160,31 +1072,23 @@ func (n *SliceExpr) copy() Node { c.init = copyNodes(c.init) return &c } -func (n *SliceExpr) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *SliceExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if n.X != nil { - if err := do(n.X); err != nil { - return err - } + if n.X != nil && do(n.X) { + return true } - if n.Low != nil { - if err := do(n.Low); err != nil { - return err - } + if n.Low != nil && do(n.Low) { + return true } - if n.High != nil { - if err := do(n.High); err != nil { - return err - } + if n.High != nil && do(n.High) { + return true } - if n.Max != nil { - if err := do(n.Max); err != nil { - return err - } + if n.Max != nil && do(n.Max) { + return true } - return nil + return false } func (n *SliceExpr) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -1208,26 +1112,20 @@ func (n *SliceHeaderExpr) copy() Node { c.init = copyNodes(c.init) return &c } -func (n *SliceHeaderExpr) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *SliceHeaderExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if n.Ptr != nil { - if err := do(n.Ptr); err != nil { - return err - } + if n.Ptr != nil && do(n.Ptr) { + return true } - if n.Len != nil { - if err := do(n.Len); err != nil { - return err - } + if n.Len != nil && do(n.Len) { + return true } - if n.Cap != nil { - if err := do(n.Cap); err != nil { - return err - } + if n.Cap != nil && do(n.Cap) { + return true } - return nil + return false } func (n *SliceHeaderExpr) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -1247,13 +1145,11 @@ func (n *SliceType) copy() Node { c := *n return &c } -func (n *SliceType) doChildren(do func(Node) error) error { - if n.Elem != nil { - if err := do(n.Elem); err != nil { - return err - } +func (n *SliceType) doChildren(do func(Node) bool) bool { + if n.Elem != nil && do(n.Elem) { + return true } - return nil + return false } func (n *SliceType) editChildren(edit func(Node) Node) { if n.Elem != nil { @@ -1267,16 +1163,14 @@ func (n *StarExpr) copy() Node { c.init = copyNodes(c.init) return &c } -func (n *StarExpr) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *StarExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if n.X != nil { - if err := do(n.X); err != nil { - return err - } + if n.X != nil && do(n.X) { + return true } - return nil + return false } func (n *StarExpr) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -1291,16 +1185,14 @@ func (n *StructKeyExpr) copy() Node { c.init = copyNodes(c.init) return &c } -func (n *StructKeyExpr) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *StructKeyExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if n.Value != nil { - if err := do(n.Value); err != nil { - return err - } + if n.Value != nil && do(n.Value) { + return true } - return nil + return false } func (n *StructKeyExpr) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -1315,11 +1207,11 @@ func (n *StructType) copy() Node { c.Fields = copyFields(c.Fields) return &c } -func (n *StructType) doChildren(do func(Node) error) error { - if err := doFields(n.Fields, do); err != nil { - return err +func (n *StructType) doChildren(do func(Node) bool) bool { + if doFields(n.Fields, do) { + return true } - return nil + return false } func (n *StructType) editChildren(edit func(Node) Node) { editFields(n.Fields, edit) @@ -1333,22 +1225,20 @@ func (n *SwitchStmt) copy() Node { c.Compiled = copyNodes(c.Compiled) return &c } -func (n *SwitchStmt) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *SwitchStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if n.Tag != nil { - if err := do(n.Tag); err != nil { - return err - } + if n.Tag != nil && do(n.Tag) { + return true } - if err := doCaseClauses(n.Cases, do); err != nil { - return err + if doCaseClauses(n.Cases, do) { + return true } - if err := doNodes(n.Compiled, do); err != nil { - return err + if doNodes(n.Compiled, do) { + return true } - return nil + return false } func (n *SwitchStmt) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -1365,21 +1255,17 @@ func (n *TypeAssertExpr) copy() Node { c.init = copyNodes(c.init) return &c } -func (n *TypeAssertExpr) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *TypeAssertExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if n.X != nil { - if err := do(n.X); err != nil { - return err - } + if n.X != nil && do(n.X) { + return true } - if n.Ntype != nil { - if err := do(n.Ntype); err != nil { - return err - } + if n.Ntype != nil && do(n.Ntype) { + return true } - return nil + return false } func (n *TypeAssertExpr) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -1396,18 +1282,14 @@ func (n *TypeSwitchGuard) copy() Node { c := *n return &c } -func (n *TypeSwitchGuard) doChildren(do func(Node) error) error { - if n.Tag != nil { - if err := do(n.Tag); err != nil { - return err - } +func (n *TypeSwitchGuard) doChildren(do func(Node) bool) bool { + if n.Tag != nil && do(n.Tag) { + return true } - if n.X != nil { - if err := do(n.X); err != nil { - return err - } + if n.X != nil && do(n.X) { + return true } - return nil + return false } func (n *TypeSwitchGuard) editChildren(edit func(Node) Node) { if n.Tag != nil { @@ -1424,16 +1306,14 @@ func (n *UnaryExpr) copy() Node { c.init = copyNodes(c.init) return &c } -func (n *UnaryExpr) doChildren(do func(Node) error) error { - if err := doNodes(n.init, do); err != nil { - return err +func (n *UnaryExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true } - if n.X != nil { - if err := do(n.X); err != nil { - return err - } + if n.X != nil && do(n.X) { + return true } - return nil + return false } func (n *UnaryExpr) editChildren(edit func(Node) Node) { editNodes(n.init, edit) @@ -1447,8 +1327,8 @@ func (n *typeNode) copy() Node { c := *n return &c } -func (n *typeNode) doChildren(do func(Node) error) error { - return nil +func (n *typeNode) doChildren(do func(Node) bool) bool { + return false } func (n *typeNode) editChildren(edit func(Node) Node) { } @@ -1461,15 +1341,13 @@ func copyCaseClauses(list []*CaseClause) []*CaseClause { copy(c, list) return c } -func doCaseClauses(list []*CaseClause, do func(Node) error) error { +func doCaseClauses(list []*CaseClause, do func(Node) bool) bool { for _, x := range list { - if x != nil { - if err := do(x); err != nil { - return err - } + if x != nil && do(x) { + return true } } - return nil + return false } func editCaseClauses(list []*CaseClause, edit func(Node) Node) { for i, x := range list { @@ -1487,15 +1365,13 @@ func copyCommClauses(list []*CommClause) []*CommClause { copy(c, list) return c } -func doCommClauses(list []*CommClause, do func(Node) error) error { +func doCommClauses(list []*CommClause, do func(Node) bool) bool { for _, x := range list { - if x != nil { - if err := do(x); err != nil { - return err - } + if x != nil && do(x) { + return true } } - return nil + return false } func editCommClauses(list []*CommClause, edit func(Node) Node) { for i, x := range list { @@ -1513,15 +1389,13 @@ func copyNodes(list []Node) []Node { copy(c, list) return c } -func doNodes(list []Node, do func(Node) error) error { +func doNodes(list []Node, do func(Node) bool) bool { for _, x := range list { - if x != nil { - if err := do(x); err != nil { - return err - } + if x != nil && do(x) { + return true } } - return nil + return false } func editNodes(list []Node, edit func(Node) Node) { for i, x := range list { diff --git a/src/cmd/compile/internal/ir/type.go b/src/cmd/compile/internal/ir/type.go index 7dd394f9ea94c..a903ea8cd4554 100644 --- a/src/cmd/compile/internal/ir/type.go +++ b/src/cmd/compile/internal/ir/type.go @@ -195,21 +195,17 @@ func copyField(f *Field) *Field { c := *f return &c } -func doField(f *Field, do func(Node) error) error { +func doField(f *Field, do func(Node) bool) bool { if f == nil { - return nil + return false } - if f.Decl != nil { - if err := do(f.Decl); err != nil { - return err - } + if f.Decl != nil && do(f.Decl) { + return true } - if f.Ntype != nil { - if err := do(f.Ntype); err != nil { - return err - } + if f.Ntype != nil && do(f.Ntype) { + return true } - return nil + return false } func editField(f *Field, edit func(Node) Node) { if f == nil { @@ -230,13 +226,13 @@ func copyFields(list []*Field) []*Field { } return out } -func doFields(list []*Field, do func(Node) error) error { +func doFields(list []*Field, do func(Node) bool) bool { for _, x := range list { - if err := doField(x, do); err != nil { - return err + if doField(x, do) { + return true } } - return nil + return false } func editFields(list []*Field, edit func(Node) Node) { for _, f := range list { diff --git a/src/cmd/compile/internal/ir/visit.go b/src/cmd/compile/internal/ir/visit.go index 4616390b7c14e..c1b3d4ed9508c 100644 --- a/src/cmd/compile/internal/ir/visit.go +++ b/src/cmd/compile/internal/ir/visit.go @@ -4,23 +4,18 @@ // IR visitors for walking the IR tree. // -// The lowest level helpers are DoChildren and EditChildren, -// which nodes help implement (TODO(rsc): eventually) and -// provide control over whether and when recursion happens -// during the walk of the IR. +// The lowest level helpers are DoChildren and EditChildren, which +// nodes help implement and provide control over whether and when +// recursion happens during the walk of the IR. // // Although these are both useful directly, two simpler patterns -// are fairly common and also provided: Inspect and Scan. +// are fairly common and also provided: Visit and Any. package ir -import ( - "errors" -) - // DoChildren calls do(x) on each of n's non-nil child nodes x. -// If any call returns a non-nil error, DoChildren stops and returns that error. -// Otherwise, DoChildren returns nil. +// If any call returns true, DoChildren stops and returns true. +// Otherwise, DoChildren returns false. // // Note that DoChildren(n, do) only calls do(x) for n's immediate children. // If x's children should be processed, then do(x) must call DoChildren(x, do). @@ -28,32 +23,32 @@ import ( // DoChildren allows constructing general traversals of the IR graph // that can stop early if needed. The most general usage is: // -// var do func(ir.Node) error -// do = func(x ir.Node) error { +// var do func(ir.Node) bool +// do = func(x ir.Node) bool { // ... processing BEFORE visting children ... // if ... should visit children ... { // ir.DoChildren(x, do) // ... processing AFTER visting children ... // } // if ... should stop parent DoChildren call from visiting siblings ... { -// return non-nil error +// return true // } -// return nil +// return false // } // do(root) // -// Since DoChildren does not generate any errors itself, if the do function -// never wants to stop the traversal, it can assume that DoChildren itself -// will always return nil, simplifying to: +// Since DoChildren does not return true itself, if the do function +// never wants to stop the traversal, it can assume that DoChildren +// itself will always return false, simplifying to: // -// var do func(ir.Node) error -// do = func(x ir.Node) error { +// var do func(ir.Node) bool +// do = func(x ir.Node) bool { // ... processing BEFORE visting children ... // if ... should visit children ... { // ir.DoChildren(x, do) // } // ... processing AFTER visting children ... -// return nil +// return false // } // do(root) // @@ -61,14 +56,15 @@ import ( // only processing before visiting children and never stopping: // // func Visit(n ir.Node, visit func(ir.Node)) { -// var do func(ir.Node) error -// do = func(x ir.Node) error { +// if n == nil { +// return +// } +// var do func(ir.Node) bool +// do = func(x ir.Node) bool { // visit(x) // return ir.DoChildren(x, do) // } -// if n != nil { -// visit(n) -// } +// do(n) // } // // The Any function illustrates a different simplification of the pattern, @@ -76,50 +72,40 @@ import ( // a node x for which cond(x) returns true, at which point the entire // traversal stops and returns true. // -// func Any(n ir.Node, find cond(ir.Node)) bool { -// stop := errors.New("stop") -// var do func(ir.Node) error -// do = func(x ir.Node) error { -// if cond(x) { -// return stop -// } -// return ir.DoChildren(x, do) +// func Any(n ir.Node, cond(ir.Node) bool) bool { +// if n == nil { +// return false // } -// return do(n) == stop +// var do func(ir.Node) bool +// do = func(x ir.Node) bool { +// return cond(x) || ir.DoChildren(x, do) +// } +// return do(n) // } // // Visit and Any are presented above as examples of how to use // DoChildren effectively, but of course, usage that fits within the // simplifications captured by Visit or Any will be best served // by directly calling the ones provided by this package. -func DoChildren(n Node, do func(Node) error) error { +func DoChildren(n Node, do func(Node) bool) bool { if n == nil { - return nil + return false } return n.doChildren(do) } -// DoList calls f on each non-nil node x in the list, in list order. -// If any call returns a non-nil error, DoList stops and returns that error. -// Otherwise DoList returns nil. -// -// Note that DoList only calls do on the nodes in the list, not their children. -// If x's children should be processed, do(x) must call DoChildren(x, do) itself. -func DoList(list Nodes, do func(Node) error) error { - return doNodes(list, do) -} - // Visit visits each non-nil node x in the IR tree rooted at n // in a depth-first preorder traversal, calling visit on each node visited. func Visit(n Node, visit func(Node)) { - var do func(Node) error - do = func(x Node) error { + if n == nil { + return + } + var do func(Node) bool + do = func(x Node) bool { visit(x) return DoChildren(x, do) } - if n != nil { - do(n) - } + do(n) } // VisitList calls Visit(x, visit) for each node x in the list. @@ -129,8 +115,6 @@ func VisitList(list Nodes, visit func(Node)) { } } -var stop = errors.New("stop") - // Any looks for a non-nil node x in the IR tree rooted at n // for which cond(x) returns true. // Any considers nodes in a depth-first, preorder traversal. @@ -141,14 +125,11 @@ func Any(n Node, cond func(Node) bool) bool { if n == nil { return false } - var do func(Node) error - do = func(x Node) error { - if cond(x) { - return stop - } - return DoChildren(x, do) + var do func(Node) bool + do = func(x Node) bool { + return cond(x) || DoChildren(x, do) } - return do(n) == stop + return do(n) } // AnyList calls Any(x, cond) for each node x in the list, in order. diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index 480d2de8e3295..ebdcc4a72e7d0 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -2053,8 +2053,8 @@ func markBreak(fn *ir.Func) { var labels map[*types.Sym]ir.Node var implicit ir.Node - var mark func(ir.Node) error - mark = func(n ir.Node) error { + var mark func(ir.Node) bool + mark = func(n ir.Node) bool { switch n.Op() { default: ir.DoChildren(n, mark) @@ -2094,7 +2094,7 @@ func markBreak(fn *ir.Func) { } implicit = old } - return nil + return false } mark(fn) From 0c1a899a6c61dc59032ead0602d1cc6b918f7669 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 26 Dec 2020 01:06:03 -0800 Subject: [PATCH 308/474] [dev.regabi] cmd/compile: fix defined-pointer method call check The compiler has logic to check whether we implicitly dereferenced a defined pointer while trying to select a method. However, rather than checking whether there were any implicit dereferences of a defined pointer, it was finding the innermost dereference/selector expression and checking whether that was dereferencing a named pointer. Moreover, it was only checking defined pointer declared in the package block. This CL restructures the code to match go/types and gccgo's behavior. Fixes #43384. Change-Id: I7bddfe2515776d9480eb2c7286023d4c15423888 Reviewed-on: https://go-review.googlesource.com/c/go/+/280392 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Robert Griesemer Trust: Robert Griesemer Trust: Matthew Dempsky --- .../compile/internal/typecheck/typecheck.go | 31 +++-- test/fixedbugs/issue43384.go | 124 ++++++++++++++++++ 2 files changed, 144 insertions(+), 11 deletions(-) create mode 100644 test/fixedbugs/issue43384.go diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index ebdcc4a72e7d0..b79739bfeba32 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -1328,6 +1328,7 @@ func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field { // Already in the process of diagnosing an error. return f2 } + orig := n.X tt := n.X.Type() types.CalcSize(tt) rcvr := f2.Type.Recv().Type @@ -1358,20 +1359,28 @@ func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field { } } - implicit, ll := n.Implicit(), n.X - for ll != nil && (ll.Op() == ir.ODOT || ll.Op() == ir.ODOTPTR || ll.Op() == ir.ODEREF) { - switch l := ll.(type) { + // Check that we haven't implicitly dereferenced any defined pointer types. + for x := n.X; ; { + var inner ir.Node + implicit := false + switch x := x.(type) { + case *ir.AddrExpr: + inner, implicit = x.X, x.Implicit() case *ir.SelectorExpr: - implicit, ll = l.Implicit(), l.X + inner, implicit = x.X, x.Implicit() case *ir.StarExpr: - implicit, ll = l.Implicit(), l.X + inner, implicit = x.X, x.Implicit() } - } - if implicit && ll.Type().IsPtr() && ll.Type().Sym() != nil && ll.Type().Sym().Def != nil && ir.AsNode(ll.Type().Sym().Def).Op() == ir.OTYPE { - // It is invalid to automatically dereference a named pointer type when selecting a method. - // Make n.Left == ll to clarify error message. - n.X = ll - return nil + if !implicit { + break + } + if inner.Type().Sym() != nil && (x.Op() == ir.ODEREF || x.Op() == ir.ODOTPTR) { + // Found an implicit dereference of a defined pointer type. + // Restore n.X for better error message. + n.X = orig + return nil + } + x = inner } n.Selection = f2 diff --git a/test/fixedbugs/issue43384.go b/test/fixedbugs/issue43384.go new file mode 100644 index 0000000000000..1bd793ba95048 --- /dev/null +++ b/test/fixedbugs/issue43384.go @@ -0,0 +1,124 @@ +// errorcheck + +// Copyright 2020 The Go Authors. All rights reserved. Use of this +// source code is governed by a BSD-style license that can be found in +// the LICENSE file. + +package p + +type T int + +func (T) Mv() {} +func (*T) Mp() {} + +type P1 struct{ T } +type P2 struct{ *T } +type P3 *struct{ T } +type P4 *struct{ *T } + +func _() { + { + var p P1 + p.Mv() + (&p).Mv() + (*&p).Mv() + p.Mp() + (&p).Mp() + (*&p).Mp() + } + { + var p P2 + p.Mv() + (&p).Mv() + (*&p).Mv() + p.Mp() + (&p).Mp() + (*&p).Mp() + } + { + var p P3 + p.Mv() // ERROR "undefined" + (&p).Mv() // ERROR "undefined" + (*&p).Mv() // ERROR "undefined" + (**&p).Mv() + (*p).Mv() + (&*p).Mv() + p.Mp() // ERROR "undefined" + (&p).Mp() // ERROR "undefined" + (*&p).Mp() // ERROR "undefined" + (**&p).Mp() + (*p).Mp() + (&*p).Mp() + } + { + var p P4 + p.Mv() // ERROR "undefined" + (&p).Mv() // ERROR "undefined" + (*&p).Mv() // ERROR "undefined" + (**&p).Mv() + (*p).Mv() + (&*p).Mv() + p.Mp() // ERROR "undefined" + (&p).Mp() // ERROR "undefined" + (*&p).Mp() // ERROR "undefined" + (**&p).Mp() + (*p).Mp() + (&*p).Mp() + } +} + +func _() { + type P5 struct{ T } + type P6 struct{ *T } + type P7 *struct{ T } + type P8 *struct{ *T } + + { + var p P5 + p.Mv() + (&p).Mv() + (*&p).Mv() + p.Mp() + (&p).Mp() + (*&p).Mp() + } + { + var p P6 + p.Mv() + (&p).Mv() + (*&p).Mv() + p.Mp() + (&p).Mp() + (*&p).Mp() + } + { + var p P7 + p.Mv() // ERROR "undefined" + (&p).Mv() // ERROR "undefined" + (*&p).Mv() // ERROR "undefined" + (**&p).Mv() + (*p).Mv() + (&*p).Mv() + p.Mp() // ERROR "undefined" + (&p).Mp() // ERROR "undefined" + (*&p).Mp() // ERROR "undefined" + (**&p).Mp() + (*p).Mp() + (&*p).Mp() + } + { + var p P8 + p.Mv() // ERROR "undefined" + (&p).Mv() // ERROR "undefined" + (*&p).Mv() // ERROR "undefined" + (**&p).Mv() + (*p).Mv() + (&*p).Mv() + p.Mp() // ERROR "undefined" + (&p).Mp() // ERROR "undefined" + (*&p).Mp() // ERROR "undefined" + (**&p).Mp() + (*p).Mp() + (&*p).Mp() + } +} From 451693af71a9d64f7f71a311d7076c8545672f88 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 29 Dec 2020 03:34:57 -0800 Subject: [PATCH 309/474] [dev.regabi] cmd/compile: simplify typecheckdef Reorganize code to be a little clearer. Also allows tightening typecheckdefstack from []ir.Node to []*ir.Name. Passes toolstash -cmp. Change-Id: I43df1a5e2a72dd3423b132d3afe363bf76700269 Reviewed-on: https://go-review.googlesource.com/c/go/+/280649 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Trust: Matthew Dempsky Reviewed-by: Cuong Manh Le --- .../compile/internal/typecheck/typecheck.go | 83 ++++++++----------- 1 file changed, 36 insertions(+), 47 deletions(-) diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index b79739bfeba32..cf9b48f5a657c 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -246,7 +246,7 @@ const ( // marks variables that escape the local frame. // rewrites n.Op to be more specific in some cases. -var typecheckdefstack []ir.Node +var typecheckdefstack []*ir.Name // Resolve ONONAME to definition, if any. func Resolve(n ir.Node) (res ir.Node) { @@ -584,24 +584,9 @@ func indexlit(n ir.Node) ir.Node { // typecheck1 should ONLY be called from typecheck. func typecheck1(n ir.Node, top int) ir.Node { switch n.Op() { - case ir.OLITERAL, ir.ONAME, ir.ONONAME, ir.OTYPE: - if n.Sym() == nil { - return n - } - - if n.Op() == ir.ONAME { - n := n.(*ir.Name) - if n.BuiltinOp != 0 && top&ctxCallee == 0 { - base.Errorf("use of builtin %v not in function call", n.Sym()) - n.SetType(nil) - return n - } - } - - typecheckdef(n) - if n.Op() == ir.ONONAME { - n.SetType(nil) - return n + case ir.OLITERAL, ir.ONAME, ir.OTYPE: + if n.Sym() != nil { + typecheckdef(n) } } @@ -611,22 +596,37 @@ func typecheck1(n ir.Node, top int) ir.Node { base.Fatalf("typecheck %v", n.Op()) panic("unreachable") - // names case ir.OLITERAL: - if n.Type() == nil && n.Val().Kind() == constant.String { - base.Fatalf("string literal missing type") + if n.Sym() == nil && n.Type() == nil { + base.Fatalf("literal missing type: %v", n) } return n - case ir.ONIL, ir.ONONAME: + case ir.ONIL: + return n + + // names + case ir.ONONAME: + if !n.Diag() { + // Note: adderrorname looks for this string and + // adds context about the outer expression + base.ErrorfAt(n.Pos(), "undefined: %v", n.Sym()) + n.SetDiag(true) + } + n.SetType(nil) return n case ir.ONAME: n := n.(*ir.Name) - if n.Name().Decldepth == 0 { - n.Name().Decldepth = decldepth + if n.Decldepth == 0 { + n.Decldepth = decldepth } if n.BuiltinOp != 0 { + if top&ctxCallee == 0 { + base.Errorf("use of builtin %v not in function call", n.Sym()) + n.SetType(nil) + return n + } return n } if top&ctxAssign == 0 { @@ -652,9 +652,6 @@ func typecheck1(n ir.Node, top int) ir.Node { // types (ODEREF is with exprs) case ir.OTYPE: - if n.Type() == nil { - return n - } return n case ir.OTSLICE: @@ -1852,26 +1849,22 @@ func typecheckdef(n ir.Node) { defer tracePrint("typecheckdef", n)(nil) } - lno := ir.SetPos(n) - - if n.Op() == ir.ONONAME { - if !n.Diag() { - n.SetDiag(true) - - // Note: adderrorname looks for this string and - // adds context about the outer expression - base.ErrorfAt(base.Pos, "undefined: %v", n.Sym()) - } - base.Pos = lno + if n.Walkdef() == 1 { return } - if n.Walkdef() == 1 { - base.Pos = lno + if n.Type() != nil { // builtin + // Mark as Walkdef so that if n.SetType(nil) is called later, we + // won't try walking again. + if got := n.Walkdef(); got != 0 { + base.Fatalf("unexpected walkdef: %v", got) + } + n.SetWalkdef(1) return } - typecheckdefstack = append(typecheckdefstack, n) + lno := ir.SetPos(n) + typecheckdefstack = append(typecheckdefstack, n.(*ir.Name)) if n.Walkdef() == 2 { base.FlushErrors() fmt.Printf("typecheckdef loop:") @@ -1885,10 +1878,6 @@ func typecheckdef(n ir.Node) { n.SetWalkdef(2) - if n.Type() != nil || n.Sym() == nil { // builtin or no name - goto ret - } - switch n.Op() { default: base.Fatalf("typecheckdef %v", n.Op()) @@ -2367,7 +2356,7 @@ func deadcodeexpr(n ir.Node) ir.Node { func getIotaValue() int64 { if i := len(typecheckdefstack); i > 0 { if x := typecheckdefstack[i-1]; x.Op() == ir.OLITERAL { - return x.(*ir.Name).Iota() + return x.Iota() } } From f0d99def5b8919292a76b19dfdaf601e25dc6157 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Tue, 29 Dec 2020 10:08:30 -0800 Subject: [PATCH 310/474] [dev.regabi] cmd/compile: add newline to ir.Dump If you do two ir.Dumps in a row, there's no newline between them. Change-Id: I1a80dd22da68cb677eb9abd7a50571ea33584010 Reviewed-on: https://go-review.googlesource.com/c/go/+/280672 Trust: Keith Randall Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/fmt.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index ea6b5856df6d5..6209702291bd5 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -978,7 +978,7 @@ func (l Nodes) Format(s fmt.State, verb rune) { // Dump prints the message s followed by a debug dump of n. func Dump(s string, n Node) { - fmt.Printf("%s [%p]%+v", s, n, n) + fmt.Printf("%s [%p]%+v\n", s, n, n) } // DumpList prints the message s followed by a debug dump of each node in the list. From 178c667db2858f52965609b24857d5448dfb12c4 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Tue, 29 Dec 2020 10:07:38 -0800 Subject: [PATCH 311/474] [dev.regabi] cmd/compile: fix OSLICEARR comments Change-Id: Ia6e734977a2cd80c91c28f4525be403f062dccc6 Reviewed-on: https://go-review.googlesource.com/c/go/+/280651 Trust: Keith Randall Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/node.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 0d56b5aeb8062..9536503085bca 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -218,10 +218,10 @@ const ( OPAREN // (Left) OSEND // Left <- Right OSLICE // Left[List[0] : List[1]] (Left is untypechecked or slice) - OSLICEARR // Left[List[0] : List[1]] (Left is array) + OSLICEARR // Left[List[0] : List[1]] (Left is pointer to array) OSLICESTR // Left[List[0] : List[1]] (Left is string) OSLICE3 // Left[List[0] : List[1] : List[2]] (Left is untypedchecked or slice) - OSLICE3ARR // Left[List[0] : List[1] : List[2]] (Left is array) + OSLICE3ARR // Left[List[0] : List[1] : List[2]] (Left is pointer to array) OSLICEHEADER // sliceheader{Left, List[0], List[1]} (Left is unsafe.Pointer, List[0] is length, List[1] is capacity) ORECOVER // recover() ORECV // <-Left From 477b049060966e90124edf950413575f84a9aa74 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Wed, 30 Dec 2020 18:43:10 -0800 Subject: [PATCH 312/474] [dev.regabi] cmd/compile: fix printing of method expressions OTYPE and OMETHEXPR were missing from OpPrec. So add them with the same precedences as OT{ARRAY,MAP,STRUCT,etc} and ODOT{,METH,INTER,etc}, respectively. However, ODEREF (which is also used for pointer types *T) has a lower precedence than other types, so pointer types need to be specially handled to assign them their correct, lower precedence. Incidentally, this also improves the error messages in issue15055.go, where we were adding unnecessary parentheses around the types in conversion expressions. Thanks to Cuong Manh Le for writing the test cases for #43428. Fixes #43428. Change-Id: I57e7979babe3ed9ef8a8b5a2a3745e3737dd785f Reviewed-on: https://go-review.googlesource.com/c/go/+/280873 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/ir/fmt.go | 6 ++++-- test/fixedbugs/issue15055.go | 8 +++++--- test/fixedbugs/issue43428.go | 25 +++++++++++++++++++++++++ 3 files changed, 34 insertions(+), 5 deletions(-) create mode 100644 test/fixedbugs/issue43428.go diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index 6209702291bd5..92ea160a28bf3 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -216,6 +216,7 @@ var OpPrec = []int{ OTINTER: 8, OTMAP: 8, OTSTRUCT: 8, + OTYPE: 8, OINDEXMAP: 8, OINDEX: 8, OSLICE: 8, @@ -232,6 +233,7 @@ var OpPrec = []int{ ODOT: 8, OXDOT: 8, OCALLPART: 8, + OMETHEXPR: 8, OPLUS: 7, ONOT: 7, OBITNOT: 7, @@ -551,8 +553,8 @@ func exprFmt(n Node, s fmt.State, prec int) { } nprec := OpPrec[n.Op()] - if n.Op() == OTYPE && n.Sym() != nil { - nprec = 8 + if n.Op() == OTYPE && n.Type().IsPtr() { + nprec = OpPrec[ODEREF] } if prec > nprec { diff --git a/test/fixedbugs/issue15055.go b/test/fixedbugs/issue15055.go index e58047e411caa..33cf63aaad8d1 100644 --- a/test/fixedbugs/issue15055.go +++ b/test/fixedbugs/issue15055.go @@ -8,10 +8,12 @@ package main func main() { type name string - _ = []byte("abc", "def", 12) // ERROR "too many arguments to conversion to \[\]byte: \(\[\]byte\)\(.abc., .def., 12\)" + _ = []byte("abc", "def", 12) // ERROR "too many arguments to conversion to \[\]byte: \[\]byte\(.abc., .def., 12\)" _ = string("a", "b", nil) // ERROR "too many arguments to conversion to string: string\(.a., .b., nil\)" - _ = []byte() // ERROR "missing argument to conversion to \[\]byte: \(\[\]byte\)\(\)" + _ = []byte() // ERROR "missing argument to conversion to \[\]byte: \[\]byte\(\)" _ = string() // ERROR "missing argument to conversion to string: string\(\)" + _ = *int() // ERROR "missing argument to conversion to int: int\(\)" + _ = (*int)() // ERROR "missing argument to conversion to \*int: \(\*int\)\(\)" _ = name("a", 1, 3.3) // ERROR "too many arguments to conversion to name: name\(.a., 1, 3.3\)" - _ = map[string]string(nil, nil) // ERROR "too many arguments to conversion to map\[string\]string: \(map\[string\]string\)\(nil, nil\)" + _ = map[string]string(nil, nil) // ERROR "too many arguments to conversion to map\[string\]string: map\[string\]string\(nil, nil\)" } diff --git a/test/fixedbugs/issue43428.go b/test/fixedbugs/issue43428.go new file mode 100644 index 0000000000000..773a3f3516edc --- /dev/null +++ b/test/fixedbugs/issue43428.go @@ -0,0 +1,25 @@ +// errorcheck + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +import "time" + +type T int + +func (T) Mv() {} +func (*T) Mp() {} + +var _ = []int{ + T.Mv, // ERROR "cannot use T\.Mv|incompatible type" + (*T).Mv, // ERROR "cannot use \(\*T\)\.Mv|incompatible type" + (*T).Mp, // ERROR "cannot use \(\*T\)\.Mp|incompatible type" + + time.Time.GobEncode, // ERROR "cannot use time\.Time\.GobEncode|incompatible type" + (*time.Time).GobEncode, // ERROR "cannot use \(\*time\.Time\)\.GobEncode|incompatible type" + (*time.Time).GobDecode, // ERROR "cannot use \(\*time\.Time\)\.GobDecode|incompatible type" + +} From 8fe119765404d29c5efe0fb86afebfa523f83a7f Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Wed, 30 Dec 2020 14:52:50 +0700 Subject: [PATCH 313/474] [dev.regabi] cmd/compile: remove Name.orig Passes toolstash -cmp. Change-Id: Ie563ece7e4da14af46adc660b3d39757eb47c067 Reviewed-on: https://go-review.googlesource.com/c/go/+/280734 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/name.go | 4 +--- src/cmd/compile/internal/ir/sizeof_test.go | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 697b04f541bcf..c79b7e52e5416 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -44,8 +44,7 @@ type Name struct { Offset_ int64 val constant.Value Opt interface{} // for use by escape analysis - orig Node - Embed *[]Embed // list of embedded files, for ONAME var + Embed *[]Embed // list of embedded files, for ONAME var PkgName *PkgName // real package for import . names // For a local variable (not param) or extern, the initializing assignment (OAS or OAS2). @@ -219,7 +218,6 @@ func newNameAt(pos src.XPos, op Op, sym *types.Sym) *Name { n := new(Name) n.op = op n.pos = pos - n.orig = n n.sym = sym return n } diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go index 61f207af2048e..8f5fae8a1200f 100644 --- a/src/cmd/compile/internal/ir/sizeof_test.go +++ b/src/cmd/compile/internal/ir/sizeof_test.go @@ -21,7 +21,7 @@ func TestSizeof(t *testing.T) { _64bit uintptr // size on 64bit platforms }{ {Func{}, 196, 344}, - {Name{}, 132, 232}, + {Name{}, 124, 216}, } for _, tt := range tests { From 77fd81a3e6c4aa248df135cc24be2871689cc7c3 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Wed, 30 Dec 2020 14:08:44 +0700 Subject: [PATCH 314/474] [dev.regabi] cmd/compile: use names for keep alive variables in function call Back to pre Russquake, Node.Nbody of OCALL* node is used to attach variables which must be kept alive during that call. Now after Russquake, we have CallExpr to represent a function call, so use a dedicated field for those variables instead. Passes toolstash -cmp. Change-Id: I4f40ebefcc7c41cdcc4e29c7a6d8496a083b68f4 Reviewed-on: https://go-review.googlesource.com/c/go/+/280733 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/expr.go | 14 ++++++------ src/cmd/compile/internal/ir/node_gen.go | 30 ++++++++++++++++++++++--- src/cmd/compile/internal/ssagen/ssa.go | 4 +++- src/cmd/compile/internal/walk/order.go | 2 +- src/cmd/compile/internal/walk/stmt.go | 2 +- 5 files changed, 39 insertions(+), 13 deletions(-) diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 55e4b61baf04e..f435a5bb2613e 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -159,13 +159,13 @@ const ( type CallExpr struct { miniExpr origNode - X Node - Args Nodes - Rargs Nodes // TODO(rsc): Delete. - Body Nodes // TODO(rsc): Delete. - IsDDD bool - Use CallUse - NoInline bool + X Node + Args Nodes + Rargs Nodes // TODO(rsc): Delete. + KeepAlive []*Name // vars to be kept alive until call returns + IsDDD bool + Use CallUse + NoInline bool } func NewCallExpr(pos src.XPos, op Op, fun Node, args []Node) *CallExpr { diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index 65c0b239ed568..7f494b16cd340 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -251,7 +251,7 @@ func (n *CallExpr) copy() Node { c.init = copyNodes(c.init) c.Args = copyNodes(c.Args) c.Rargs = copyNodes(c.Rargs) - c.Body = copyNodes(c.Body) + c.KeepAlive = copyNames(c.KeepAlive) return &c } func (n *CallExpr) doChildren(do func(Node) bool) bool { @@ -267,7 +267,7 @@ func (n *CallExpr) doChildren(do func(Node) bool) bool { if doNodes(n.Rargs, do) { return true } - if doNodes(n.Body, do) { + if doNames(n.KeepAlive, do) { return true } return false @@ -279,7 +279,7 @@ func (n *CallExpr) editChildren(edit func(Node) Node) { } editNodes(n.Args, edit) editNodes(n.Rargs, edit) - editNodes(n.Body, edit) + editNames(n.KeepAlive, edit) } func (n *CaseClause) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } @@ -1381,6 +1381,30 @@ func editCommClauses(list []*CommClause, edit func(Node) Node) { } } +func copyNames(list []*Name) []*Name { + if list == nil { + return nil + } + c := make([]*Name, len(list)) + copy(c, list) + return c +} +func doNames(list []*Name, do func(Node) bool) bool { + for _, x := range list { + if x != nil && do(x) { + return true + } + } + return false +} +func editNames(list []*Name, edit func(Node) Node) { + for i, x := range list { + if x != nil { + list[i] = edit(x).(*Name) + } + } +} + func copyNodes(list []Node) []Node { if list == nil { return nil diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index ddf65eb20961f..022959a934ef4 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -4867,7 +4867,9 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val s.vars[memVar] = call } // Insert OVARLIVE nodes - s.stmtList(n.Body) + for _, name := range n.KeepAlive { + s.stmt(ir.NewUnaryExpr(n.Pos(), ir.OVARLIVE, name)) + } // Finish block for defers if k == callDefer || k == callDeferStack { diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go index b3d2eaec17a34..681f5dcc76501 100644 --- a/src/cmd/compile/internal/walk/order.go +++ b/src/cmd/compile/internal/walk/order.go @@ -518,7 +518,7 @@ func (o *orderState) call(nn ir.Node) { x := o.copyExpr(arg.X) arg.X = x x.Name().SetAddrtaken(true) // ensure SSA keeps the x variable - n.Body.Append(typecheck.Stmt(ir.NewUnaryExpr(base.Pos, ir.OVARLIVE, x))) + n.KeepAlive = append(n.KeepAlive, x.(*ir.Name)) } } } diff --git a/src/cmd/compile/internal/walk/stmt.go b/src/cmd/compile/internal/walk/stmt.go index f843d2c4faf32..cfd1da46d278f 100644 --- a/src/cmd/compile/internal/walk/stmt.go +++ b/src/cmd/compile/internal/walk/stmt.go @@ -228,7 +228,7 @@ func walkGoDefer(n *ir.GoDeferStmt) ir.Node { case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER: call := call.(*ir.CallExpr) - if len(call.Body) > 0 { + if len(call.KeepAlive) > 0 { n.Call = wrapCall(call, &init) } else { n.Call = walkExpr(call, &init) From dfbcff80c65991e90b7a06a09e4399f7725356dc Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Thu, 31 Dec 2020 16:51:12 +0700 Subject: [PATCH 315/474] [dev.regabi] cmd/compile: make copyExpr return *ir.Name directly copyExpr just calls copyExpr1 with "clear" is false, so make it return *ir.Name directly instead of ir.Node Passes toolstash -cmp. Change-Id: I31ca1d88d9eaf8ac37517022f1c74285ffce07d3 Reviewed-on: https://go-review.googlesource.com/c/go/+/280714 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/walk/order.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go index 681f5dcc76501..a2bd0cf10aca6 100644 --- a/src/cmd/compile/internal/walk/order.go +++ b/src/cmd/compile/internal/walk/order.go @@ -102,7 +102,7 @@ func (o *orderState) newTemp(t *types.Type, clear bool) *ir.Name { // copyExpr behaves like newTemp but also emits // code to initialize the temporary to the value n. -func (o *orderState) copyExpr(n ir.Node) ir.Node { +func (o *orderState) copyExpr(n ir.Node) *ir.Name { return o.copyExpr1(n, false) } @@ -518,7 +518,7 @@ func (o *orderState) call(nn ir.Node) { x := o.copyExpr(arg.X) arg.X = x x.Name().SetAddrtaken(true) // ensure SSA keeps the x variable - n.KeepAlive = append(n.KeepAlive, x.(*ir.Name)) + n.KeepAlive = append(n.KeepAlive, x) } } } From fd22df990545bce77ff78b27c4f7220c7a666a84 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Thu, 31 Dec 2020 18:25:35 -0800 Subject: [PATCH 316/474] [dev.regabi] cmd/compile: remove idempotent Name() calls [generated] [git-generate] cd src/cmd/compile/internal/ir pkgs=$(grep -l -w Name ../*/*.go | xargs dirname | sort -u | grep -v '/ir$') rf ' ex . '"$(echo $pkgs)"' { var n *Name n.Name() -> n } ' Change-Id: I6bfce6417a6dba833d2f652ae212a32c11bc5ef6 Reviewed-on: https://go-review.googlesource.com/c/go/+/280972 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky Reviewed-by: Cuong Manh Le TryBot-Result: Go Bot --- src/cmd/compile/internal/dwarfgen/dwarf.go | 22 +++++++-------- src/cmd/compile/internal/escape/escape.go | 2 +- src/cmd/compile/internal/gc/obj.go | 4 +-- src/cmd/compile/internal/ir/expr.go | 4 +-- src/cmd/compile/internal/ir/name.go | 4 +-- src/cmd/compile/internal/liveness/plive.go | 14 +++++----- src/cmd/compile/internal/noder/noder.go | 6 ++-- src/cmd/compile/internal/pkginit/initorder.go | 2 +- src/cmd/compile/internal/ssagen/nowb.go | 4 +-- src/cmd/compile/internal/ssagen/pgen.go | 6 ++-- src/cmd/compile/internal/ssagen/ssa.go | 4 +-- src/cmd/compile/internal/typecheck/func.go | 6 ++-- src/cmd/compile/internal/typecheck/iexport.go | 4 +-- .../compile/internal/typecheck/typecheck.go | 28 +++++++++---------- src/cmd/compile/internal/walk/expr.go | 2 +- src/cmd/compile/internal/walk/order.go | 4 +-- src/cmd/compile/internal/walk/stmt.go | 2 +- 17 files changed, 59 insertions(+), 59 deletions(-) diff --git a/src/cmd/compile/internal/dwarfgen/dwarf.go b/src/cmd/compile/internal/dwarfgen/dwarf.go index d0bee58442e39..42c83b1f239e5 100644 --- a/src/cmd/compile/internal/dwarfgen/dwarf.go +++ b/src/cmd/compile/internal/dwarfgen/dwarf.go @@ -127,7 +127,7 @@ func Info(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, } func declPos(decl *ir.Name) src.XPos { - if decl.Name().Defn != nil && (decl.Name().Captured() || decl.Name().Byval()) { + if decl.Defn != nil && (decl.Captured() || decl.Byval()) { // It's not clear which position is correct for captured variables here: // * decl.Pos is the wrong position for captured variables, in the inner // function, but it is the right position in the outer function. @@ -142,7 +142,7 @@ func declPos(decl *ir.Name) src.XPos { // case statement. // This code is probably wrong for type switch variables that are also // captured. - return decl.Name().Defn.Pos() + return decl.Defn.Pos() } return decl.Pos() } @@ -211,7 +211,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir // misleading location for the param (we want pointer-to-heap // and not stack). // TODO(thanm): generate a better location expression - stackcopy := n.Name().Stackcopy + stackcopy := n.Stackcopy if stackcopy != nil && (stackcopy.Class_ == ir.PPARAM || stackcopy.Class_ == ir.PPARAMOUT) { abbrev = dwarf.DW_ABRV_PARAM_LOCLIST isReturnValue = (stackcopy.Class_ == ir.PPARAMOUT) @@ -219,9 +219,9 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir } inlIndex := 0 if base.Flag.GenDwarfInl > 1 { - if n.Name().InlFormal() || n.Name().InlLocal() { + if n.InlFormal() || n.InlLocal() { inlIndex = posInlIndex(n.Pos()) + 1 - if n.Name().InlFormal() { + if n.InlFormal() { abbrev = dwarf.DW_ABRV_PARAM_LOCLIST } } @@ -312,9 +312,9 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var { delete(fnsym.Func().Autot, reflectdata.TypeLinksym(n.Type())) inlIndex := 0 if base.Flag.GenDwarfInl > 1 { - if n.Name().InlFormal() || n.Name().InlLocal() { + if n.InlFormal() || n.InlLocal() { inlIndex = posInlIndex(n.Pos()) + 1 - if n.Name().InlFormal() { + if n.InlFormal() { abbrev = dwarf.DW_ABRV_PARAM } } @@ -323,7 +323,7 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var { return &dwarf.Var{ Name: n.Sym().Name, IsReturnValue: n.Class_ == ir.PPARAMOUT, - IsInlFormal: n.Name().InlFormal(), + IsInlFormal: n.InlFormal(), Abbrev: abbrev, StackOffset: int32(offs), Type: base.Ctxt.Lookup(typename), @@ -381,9 +381,9 @@ func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var typename := dwarf.InfoPrefix + gotype.Name[len("type."):] inlIndex := 0 if base.Flag.GenDwarfInl > 1 { - if n.Name().InlFormal() || n.Name().InlLocal() { + if n.InlFormal() || n.InlLocal() { inlIndex = posInlIndex(n.Pos()) + 1 - if n.Name().InlFormal() { + if n.InlFormal() { abbrev = dwarf.DW_ABRV_PARAM_LOCLIST } } @@ -392,7 +392,7 @@ func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var dvar := &dwarf.Var{ Name: n.Sym().Name, IsReturnValue: n.Class_ == ir.PPARAMOUT, - IsInlFormal: n.Name().InlFormal(), + IsInlFormal: n.InlFormal(), Abbrev: abbrev, Type: base.Ctxt.Lookup(typename), // The stack offset is used as a sorting key, so for decomposed diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go index b5b09beb5a8db..98dbf54b755bc 100644 --- a/src/cmd/compile/internal/escape/escape.go +++ b/src/cmd/compile/internal/escape/escape.go @@ -1158,7 +1158,7 @@ func (e *escape) newLoc(n ir.Node, transient bool) *location { if n.Op() == ir.ONAME { n := n.(*ir.Name) if n.Curfn != e.curfn { - base.Fatalf("curfn mismatch: %v != %v", n.Name().Curfn, e.curfn) + base.Fatalf("curfn mismatch: %v != %v", n.Curfn, e.curfn) } if n.Opt != nil { diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 1e8ac8ebb2973..30cfac1b717c9 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -264,14 +264,14 @@ func ggloblnod(nam *ir.Name) { s := nam.Linksym() s.Gotype = reflectdata.TypeLinksym(nam.Type()) flags := 0 - if nam.Name().Readonly() { + if nam.Readonly() { flags = obj.RODATA } if nam.Type() != nil && !nam.Type().HasPointers() { flags |= obj.NOPTR } base.Ctxt.Globl(s, nam.Type().Width, flags) - if nam.Name().LibfuzzerExtraCounter() { + if nam.LibfuzzerExtraCounter() { s.Type = objabi.SLIBFUZZER_EXTRA_COUNTER } if nam.Sym().Linkname != "" { diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index f435a5bb2613e..88fbdff1e0feb 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -771,11 +771,11 @@ func staticValue1(nn Node) Node { return nil } n := nn.(*Name) - if n.Class_ != PAUTO || n.Name().Addrtaken() { + if n.Class_ != PAUTO || n.Addrtaken() { return nil } - defn := n.Name().Defn + defn := n.Defn if defn == nil { return nil } diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index c79b7e52e5416..5acb2d07627a1 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -312,7 +312,7 @@ func (n *Name) MarkReadonly() { if n.Op() != ONAME { base.Fatalf("Node.MarkReadonly %v", n.Op()) } - n.Name().setReadonly(true) + n.setReadonly(true) // Mark the linksym as readonly immediately // so that the SSA backend can use this information. // It will be overridden later during dumpglobls. @@ -433,7 +433,7 @@ func IsParamHeapCopy(n Node) bool { return false } name := n.(*Name) - return name.Class_ == PAUTOHEAP && name.Name().Stackcopy != nil + return name.Class_ == PAUTOHEAP && name.Stackcopy != nil } var RegFP *Name diff --git a/src/cmd/compile/internal/liveness/plive.go b/src/cmd/compile/internal/liveness/plive.go index 89c70df65a490..91f10b0a9dc21 100644 --- a/src/cmd/compile/internal/liveness/plive.go +++ b/src/cmd/compile/internal/liveness/plive.go @@ -255,7 +255,7 @@ func (lv *liveness) valueEffects(v *ssa.Value) (int32, liveEffect) { // variable" ICEs (issue 19632). switch v.Op { case ssa.OpVarDef, ssa.OpVarKill, ssa.OpVarLive, ssa.OpKeepAlive: - if !n.Name().Used() { + if !n.Used() { return -1, 0 } } @@ -688,11 +688,11 @@ func (lv *liveness) epilogue() { if lv.fn.HasDefer() { for i, n := range lv.vars { if n.Class_ == ir.PPARAMOUT { - if n.Name().IsOutputParamHeapAddr() { + if n.IsOutputParamHeapAddr() { // Just to be paranoid. Heap addresses are PAUTOs. base.Fatalf("variable %v both output param and heap output param", n) } - if n.Name().Heapaddr != nil { + if n.Heapaddr != nil { // If this variable moved to the heap, then // its stack copy is not live. continue @@ -700,21 +700,21 @@ func (lv *liveness) epilogue() { // Note: zeroing is handled by zeroResults in walk.go. livedefer.Set(int32(i)) } - if n.Name().IsOutputParamHeapAddr() { + if n.IsOutputParamHeapAddr() { // This variable will be overwritten early in the function // prologue (from the result of a mallocgc) but we need to // zero it in case that malloc causes a stack scan. - n.Name().SetNeedzero(true) + n.SetNeedzero(true) livedefer.Set(int32(i)) } - if n.Name().OpenDeferSlot() { + if n.OpenDeferSlot() { // Open-coded defer args slots must be live // everywhere in a function, since a panic can // occur (almost) anywhere. Because it is live // everywhere, it must be zeroed on entry. livedefer.Set(int32(i)) // It was already marked as Needzero when created. - if !n.Name().Needzero() { + if !n.Needzero() { base.Fatalf("all pointer-containing defer arg slots should have Needzero set") } } diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go index f4b5e0cf91d40..748fd96380083 100644 --- a/src/cmd/compile/internal/noder/noder.go +++ b/src/cmd/compile/internal/noder/noder.go @@ -1835,7 +1835,7 @@ func oldname(s *types.Sym) ir.Node { // the := it looks like a reference to the outer x so we'll // make x a closure variable unnecessarily. n := n.(*ir.Name) - c := n.Name().Innermost + c := n.Innermost if c == nil || c.Curfn != ir.CurFunc { // Do not have a closure var for the active closure yet; make one. c = typecheck.NewName(s) @@ -1845,8 +1845,8 @@ func oldname(s *types.Sym) ir.Node { // Link into list of active closure variables. // Popped from list in func funcLit. - c.Outer = n.Name().Innermost - n.Name().Innermost = c + c.Outer = n.Innermost + n.Innermost = c ir.CurFunc.ClosureVars = append(ir.CurFunc.ClosureVars, c) } diff --git a/src/cmd/compile/internal/pkginit/initorder.go b/src/cmd/compile/internal/pkginit/initorder.go index c6e223954d33a..1c222c1de4388 100644 --- a/src/cmd/compile/internal/pkginit/initorder.go +++ b/src/cmd/compile/internal/pkginit/initorder.go @@ -197,7 +197,7 @@ func (o *InitOrder) findInitLoopAndExit(n *ir.Name, path *[]*ir.Name) { // There might be multiple loops involving n; by sorting // references, we deterministically pick the one reported. - refers := collectDeps(n.Name().Defn, false).Sorted(func(ni, nj *ir.Name) bool { + refers := collectDeps(n.Defn, false).Sorted(func(ni, nj *ir.Name) bool { return ni.Pos().Before(nj.Pos()) }) diff --git a/src/cmd/compile/internal/ssagen/nowb.go b/src/cmd/compile/internal/ssagen/nowb.go index 7b2e68c8e79ee..26858fac873c4 100644 --- a/src/cmd/compile/internal/ssagen/nowb.go +++ b/src/cmd/compile/internal/ssagen/nowb.go @@ -76,7 +76,7 @@ func (c *nowritebarrierrecChecker) findExtraCalls(nn ir.Node) { return } fn := n.X.(*ir.Name) - if fn.Class_ != ir.PFUNC || fn.Name().Defn == nil { + if fn.Class_ != ir.PFUNC || fn.Defn == nil { return } if !types.IsRuntimePkg(fn.Sym().Pkg) || fn.Sym().Name != "systemstack" { @@ -88,7 +88,7 @@ func (c *nowritebarrierrecChecker) findExtraCalls(nn ir.Node) { switch arg.Op() { case ir.ONAME: arg := arg.(*ir.Name) - callee = arg.Name().Defn.(*ir.Func) + callee = arg.Defn.(*ir.Func) case ir.OCLOSURE: arg := arg.(*ir.ClosureExpr) callee = arg.Func diff --git a/src/cmd/compile/internal/ssagen/pgen.go b/src/cmd/compile/internal/ssagen/pgen.go index 72ce233fdad0e..2be10ff7af354 100644 --- a/src/cmd/compile/internal/ssagen/pgen.go +++ b/src/cmd/compile/internal/ssagen/pgen.go @@ -86,7 +86,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { for _, l := range f.RegAlloc { if ls, ok := l.(ssa.LocalSlot); ok { - ls.N.Name().SetUsed(true) + ls.N.SetUsed(true) } } @@ -98,10 +98,10 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { case ir.PPARAM, ir.PPARAMOUT: // Don't modify nodfp; it is a global. if n != ir.RegFP { - n.Name().SetUsed(true) + n.SetUsed(true) } case ir.PAUTO: - n.Name().SetUsed(true) + n.SetUsed(true) } } if !scratchUsed { diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 022959a934ef4..8e3b09aac3d63 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -6223,7 +6223,7 @@ func (s *state) addNamedValue(n *ir.Name, v *ssa.Value) { // from being assigned too early. See #14591 and #14762. TODO: allow this. return } - loc := ssa.LocalSlot{N: n.Name(), Type: n.Type(), Off: 0} + loc := ssa.LocalSlot{N: n, Type: n.Type(), Off: 0} values, ok := s.f.NamedValues[loc] if !ok { s.f.Names = append(s.f.Names, loc) @@ -7198,7 +7198,7 @@ func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym { func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot { node := parent.N - if node.Class_ != ir.PAUTO || node.Name().Addrtaken() { + if node.Class_ != ir.PAUTO || node.Addrtaken() { // addressed things and non-autos retain their parents (i.e., cannot truly be split) return ssa.LocalSlot{N: node, Type: t, Off: parent.Off + offset} } diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go index 75f38d588d377..3552bcf924507 100644 --- a/src/cmd/compile/internal/typecheck/func.go +++ b/src/cmd/compile/internal/typecheck/func.go @@ -131,10 +131,10 @@ func CaptureVars(fn *ir.Func) { outermost := v.Defn.(*ir.Name) // out parameters will be assigned to implicitly upon return. - if outermost.Class_ != ir.PPARAMOUT && !outermost.Name().Addrtaken() && !outermost.Name().Assigned() && v.Type().Width <= 128 { + if outermost.Class_ != ir.PPARAMOUT && !outermost.Addrtaken() && !outermost.Assigned() && v.Type().Width <= 128 { v.SetByval(true) } else { - outermost.Name().SetAddrtaken(true) + outermost.SetAddrtaken(true) outer = NodAddr(outer) } @@ -147,7 +147,7 @@ func CaptureVars(fn *ir.Func) { if v.Byval() { how = "value" } - base.WarnfAt(v.Pos(), "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym(), outermost.Name().Addrtaken(), outermost.Name().Assigned(), int32(v.Type().Width)) + base.WarnfAt(v.Pos(), "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym(), outermost.Addrtaken(), outermost.Assigned(), int32(v.Type().Width)) } outer = Expr(outer) diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go index aa16a54bb83b1..50acb10a9a9c1 100644 --- a/src/cmd/compile/internal/typecheck/iexport.go +++ b/src/cmd/compile/internal/typecheck/iexport.go @@ -1521,8 +1521,8 @@ func (w *exportWriter) localName(n *ir.Name) { // PPARAM/PPARAMOUT, because we only want to include vargen in // non-param names. var v int32 - if n.Class_ == ir.PAUTO || (n.Class_ == ir.PAUTOHEAP && n.Name().Stackcopy == nil) { - v = n.Name().Vargen + if n.Class_ == ir.PAUTO || (n.Class_ == ir.PAUTOHEAP && n.Stackcopy == nil) { + v = n.Vargen } w.localIdent(n.Sym(), v) diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index cf9b48f5a657c..519d8ddfd9505 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -57,7 +57,7 @@ func Package() { base.Timer.Start("fe", "typecheck", "top1") for i := 0; i < len(Target.Decls); i++ { n := Target.Decls[i] - if op := n.Op(); op != ir.ODCL && op != ir.OAS && op != ir.OAS2 && (op != ir.ODCLTYPE || !n.(*ir.Decl).X.Name().Alias()) { + if op := n.Op(); op != ir.ODCL && op != ir.OAS && op != ir.OAS2 && (op != ir.ODCLTYPE || !n.(*ir.Decl).X.Alias()) { Target.Decls[i] = Stmt(n) } } @@ -69,7 +69,7 @@ func Package() { base.Timer.Start("fe", "typecheck", "top2") for i := 0; i < len(Target.Decls); i++ { n := Target.Decls[i] - if op := n.Op(); op == ir.ODCL || op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.(*ir.Decl).X.Name().Alias() { + if op := n.Op(); op == ir.ODCL || op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.(*ir.Decl).X.Alias() { Target.Decls[i] = Stmt(n) } } @@ -636,7 +636,7 @@ func typecheck1(n ir.Node, top int) ir.Node { n.SetType(nil) return n } - n.Name().SetUsed(true) + n.SetUsed(true) } return n @@ -1729,9 +1729,9 @@ func checkassign(stmt ir.Node, n ir.Node) { r := ir.OuterValue(n) if r.Op() == ir.ONAME { r := r.(*ir.Name) - r.Name().SetAssigned(true) - if r.Name().IsClosureVar() { - r.Name().Defn.Name().SetAssigned(true) + r.SetAssigned(true) + if r.IsClosureVar() { + r.Defn.Name().SetAssigned(true) } } } @@ -1938,9 +1938,9 @@ func typecheckdef(n ir.Node) { case ir.ONAME: n := n.(*ir.Name) - if n.Name().Ntype != nil { - n.Name().Ntype = typecheckNtype(n.Name().Ntype) - n.SetType(n.Name().Ntype.Type()) + if n.Ntype != nil { + n.Ntype = typecheckNtype(n.Ntype) + n.SetType(n.Ntype.Type()) if n.Type() == nil { n.SetDiag(true) goto ret @@ -1950,7 +1950,7 @@ func typecheckdef(n ir.Node) { if n.Type() != nil { break } - if n.Name().Defn == nil { + if n.Defn == nil { if n.BuiltinOp != 0 { // like OPRINTN break } @@ -1965,13 +1965,13 @@ func typecheckdef(n ir.Node) { base.Fatalf("var without type, init: %v", n.Sym()) } - if n.Name().Defn.Op() == ir.ONAME { - n.Name().Defn = Expr(n.Name().Defn) - n.SetType(n.Name().Defn.Type()) + if n.Defn.Op() == ir.ONAME { + n.Defn = Expr(n.Defn) + n.SetType(n.Defn.Type()) break } - n.Name().Defn = Stmt(n.Name().Defn) // fills in n.Type + n.Defn = Stmt(n.Defn) // fills in n.Type case ir.OTYPE: n := n.(*ir.Name) diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index 0d7ffca15d00f..f06a87c37fd7e 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -54,7 +54,7 @@ func walkExpr(n ir.Node, init *ir.Nodes) ir.Node { if n.Op() == ir.ONAME && n.(*ir.Name).Class_ == ir.PAUTOHEAP { n := n.(*ir.Name) - nn := ir.NewStarExpr(base.Pos, n.Name().Heapaddr) + nn := ir.NewStarExpr(base.Pos, n.Heapaddr) nn.X.MarkNonNil() return walkExpr(typecheck.Expr(nn), init) } diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go index a2bd0cf10aca6..e40c877ea939c 100644 --- a/src/cmd/compile/internal/walk/order.go +++ b/src/cmd/compile/internal/walk/order.go @@ -406,7 +406,7 @@ func (o *orderState) edge() { // Create a new uint8 counter to be allocated in section // __libfuzzer_extra_counters. counter := staticinit.StaticName(types.Types[types.TUINT8]) - counter.Name().SetLibfuzzerExtraCounter(true) + counter.SetLibfuzzerExtraCounter(true) // counter += 1 incr := ir.NewAssignOpStmt(base.Pos, ir.OADD, counter, ir.NewInt(1)) @@ -517,7 +517,7 @@ func (o *orderState) call(nn ir.Node) { if arg.X.Type().IsUnsafePtr() { x := o.copyExpr(arg.X) arg.X = x - x.Name().SetAddrtaken(true) // ensure SSA keeps the x variable + x.SetAddrtaken(true) // ensure SSA keeps the x variable n.KeepAlive = append(n.KeepAlive, x) } } diff --git a/src/cmd/compile/internal/walk/stmt.go b/src/cmd/compile/internal/walk/stmt.go index cfd1da46d278f..8641a58e2e797 100644 --- a/src/cmd/compile/internal/walk/stmt.go +++ b/src/cmd/compile/internal/walk/stmt.go @@ -181,7 +181,7 @@ func walkDecl(n *ir.Decl) ir.Node { if base.Flag.CompilingRuntime { base.Errorf("%v escapes to heap, not allowed in runtime", v) } - nn := ir.NewAssignStmt(base.Pos, v.Name().Heapaddr, callnew(v.Type())) + nn := ir.NewAssignStmt(base.Pos, v.Heapaddr, callnew(v.Type())) nn.Def = true return walkStmt(typecheck.Stmt(nn)) } From b8fd3440cd3973a16184c4c878b557cf6c6703e4 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Thu, 31 Dec 2020 21:32:52 -0800 Subject: [PATCH 317/474] [dev.regabi] cmd/compile: report unused variables during typecheck Unused variables are a type-checking error, so they should be reported during typecheck rather than walk. One catch is that we only want to report unused-variable errors for functions that type check successfully, but some errors are reported during noding, so we don't have an easy way to detect that currently. As an approximate solution, we simply check if we've reported any errors yet. Passes toolstash -cmp. Change-Id: I9400bfc94312c71d0c908a491e85c16d62224c9c Reviewed-on: https://go-review.googlesource.com/c/go/+/280973 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- .../compile/internal/typecheck/typecheck.go | 34 +++++++++++++++++++ src/cmd/compile/internal/walk/walk.go | 30 ---------------- 2 files changed, 34 insertions(+), 30 deletions(-) diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index 519d8ddfd9505..4b5c3198caccf 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -171,6 +171,7 @@ func FuncBody(n *ir.Func) { decldepth = 1 errorsBefore := base.Errors() Stmts(n.Body) + CheckUnused(n) CheckReturn(n) if base.Errors() > errorsBefore { n.Body.Set(nil) // type errors; do not compile @@ -2203,6 +2204,39 @@ func isTermNode(n ir.Node) bool { return false } +// CheckUnused checks for any declared variables that weren't used. +func CheckUnused(fn *ir.Func) { + // Only report unused variables if we haven't seen any type-checking + // errors yet. + if base.Errors() != 0 { + return + } + + // Propagate the used flag for typeswitch variables up to the NONAME in its definition. + for _, ln := range fn.Dcl { + if ln.Op() == ir.ONAME && ln.Class_ == ir.PAUTO && ln.Used() { + if guard, ok := ln.Defn.(*ir.TypeSwitchGuard); ok { + guard.Used = true + } + } + } + + for _, ln := range fn.Dcl { + if ln.Op() != ir.ONAME || ln.Class_ != ir.PAUTO || ln.Used() { + continue + } + if defn, ok := ln.Defn.(*ir.TypeSwitchGuard); ok { + if defn.Used { + continue + } + base.ErrorfAt(defn.Tag.Pos(), "%v declared but not used", ln.Sym()) + defn.Used = true // suppress repeats + } else { + base.ErrorfAt(ln.Pos(), "%v declared but not used", ln.Sym()) + } + } +} + // CheckReturn makes sure that fn terminates appropriately. func CheckReturn(fn *ir.Func) { if fn.Type().NumResults() != 0 && len(fn.Body) != 0 { diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go index b6be949689e58..25f53a8e7c50c 100644 --- a/src/cmd/compile/internal/walk/walk.go +++ b/src/cmd/compile/internal/walk/walk.go @@ -37,36 +37,6 @@ func Walk(fn *ir.Func) { lno := base.Pos - // Final typecheck for any unused variables. - for i, ln := range fn.Dcl { - if ln.Op() == ir.ONAME && (ln.Class_ == ir.PAUTO || ln.Class_ == ir.PAUTOHEAP) { - ln = typecheck.AssignExpr(ln).(*ir.Name) - fn.Dcl[i] = ln - } - } - - // Propagate the used flag for typeswitch variables up to the NONAME in its definition. - for _, ln := range fn.Dcl { - if ln.Op() == ir.ONAME && (ln.Class_ == ir.PAUTO || ln.Class_ == ir.PAUTOHEAP) && ln.Defn != nil && ln.Defn.Op() == ir.OTYPESW && ln.Used() { - ln.Defn.(*ir.TypeSwitchGuard).Used = true - } - } - - for _, ln := range fn.Dcl { - if ln.Op() != ir.ONAME || (ln.Class_ != ir.PAUTO && ln.Class_ != ir.PAUTOHEAP) || ln.Sym().Name[0] == '&' || ln.Used() { - continue - } - if defn, ok := ln.Defn.(*ir.TypeSwitchGuard); ok { - if defn.Used { - continue - } - base.ErrorfAt(defn.Tag.Pos(), "%v declared but not used", ln.Sym()) - defn.Used = true // suppress repeats - } else { - base.ErrorfAt(ln.Pos(), "%v declared but not used", ln.Sym()) - } - } - base.Pos = lno if base.Errors() > errorsBefore { return From 0f1d2129c4c294a895480b79eeab8d22c07ac573 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Thu, 31 Dec 2020 21:48:27 -0800 Subject: [PATCH 318/474] [dev.regabi] cmd/compile: reshuffle type-checking code [generated] This commit splits up typecheck.Package and moves the code elsewhere. The type-checking code is moved into noder, so that it can eventually be interleaved with the noding process. The non-type-checking code is moved back into package gc, so that it can be incorporated into appropriate compiler backend phases. While here, deadcode removal is moved into its own package. Passes toolstash -cmp. [git-generate] cd src/cmd/compile/internal/typecheck : Split into two functions. sed -i -e '/Phase 6/i}\n\nfunc postTypecheck() {' typecheck.go rf ' # Export needed identifiers. mv deadcode Deadcode mv loadsys InitRuntime mv declareUniverse DeclareUniverse mv dirtyAddrtaken DirtyAddrtaken mv computeAddrtaken ComputeAddrtaken mv incrementalAddrtaken IncrementalAddrtaken # Move into new package. mv Deadcode deadcodeslice deadcodeexpr deadcode.go mv deadcode.go cmd/compile/internal/deadcode # Move top-level type-checking code into noder. # Move DeclVars there too, now that nothing else uses it. mv DeclVars Package noder.go mv noder.go cmd/compile/internal/noder # Move non-type-checking code back into gc. mv postTypecheck main.go mv main.go cmd/compile/internal/gc ' cd ../deadcode rf ' # Destutter names. mv Deadcode Func mv deadcodeslice stmts mv deadcodeexpr expr ' cd ../noder rf ' # Move functions up, next to their related code. mv noder.go:/func Package/-1,$ \ noder.go:/makeSrcPosBase translates/-1 mv noder.go:/func DeclVars/-3,$ \ noder.go:/constState tracks/-1 ' cd ../gc rf ' # Inline postTypecheck code back into gc.Main. mv main.go:/func postTypecheck/+0,/AllImportedBodies/+1 \ main.go:/Build init task/-1 rm postTypecheck ' Change-Id: Ie5e992ece4a42204cce6aa98dd6eb52112d098c8 Reviewed-on: https://go-review.googlesource.com/c/go/+/280974 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/deadcode/deadcode.go | 150 +++++++++++ src/cmd/compile/internal/gc/main.go | 42 ++- src/cmd/compile/internal/noder/noder.go | 119 ++++++++- src/cmd/compile/internal/typecheck/dcl.go | 54 ---- src/cmd/compile/internal/typecheck/func.go | 10 +- src/cmd/compile/internal/typecheck/subr.go | 16 +- src/cmd/compile/internal/typecheck/syms.go | 4 +- .../compile/internal/typecheck/typecheck.go | 243 +----------------- .../compile/internal/typecheck/universe.go | 4 +- 9 files changed, 327 insertions(+), 315 deletions(-) create mode 100644 src/cmd/compile/internal/deadcode/deadcode.go diff --git a/src/cmd/compile/internal/deadcode/deadcode.go b/src/cmd/compile/internal/deadcode/deadcode.go new file mode 100644 index 0000000000000..5453cfe396b2a --- /dev/null +++ b/src/cmd/compile/internal/deadcode/deadcode.go @@ -0,0 +1,150 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package deadcode + +import ( + "go/constant" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" +) + +func Func(fn *ir.Func) { + stmts(&fn.Body) + + if len(fn.Body) == 0 { + return + } + + for _, n := range fn.Body { + if len(n.Init()) > 0 { + return + } + switch n.Op() { + case ir.OIF: + n := n.(*ir.IfStmt) + if !ir.IsConst(n.Cond, constant.Bool) || len(n.Body) > 0 || len(n.Else) > 0 { + return + } + case ir.OFOR: + n := n.(*ir.ForStmt) + if !ir.IsConst(n.Cond, constant.Bool) || ir.BoolVal(n.Cond) { + return + } + default: + return + } + } + + fn.Body.Set([]ir.Node{ir.NewBlockStmt(base.Pos, nil)}) +} + +func stmts(nn *ir.Nodes) { + var lastLabel = -1 + for i, n := range *nn { + if n != nil && n.Op() == ir.OLABEL { + lastLabel = i + } + } + for i, n := range *nn { + // Cut is set to true when all nodes after i'th position + // should be removed. + // In other words, it marks whole slice "tail" as dead. + cut := false + if n == nil { + continue + } + if n.Op() == ir.OIF { + n := n.(*ir.IfStmt) + n.Cond = expr(n.Cond) + if ir.IsConst(n.Cond, constant.Bool) { + var body ir.Nodes + if ir.BoolVal(n.Cond) { + n.Else = ir.Nodes{} + body = n.Body + } else { + n.Body = ir.Nodes{} + body = n.Else + } + // If "then" or "else" branch ends with panic or return statement, + // it is safe to remove all statements after this node. + // isterminating is not used to avoid goto-related complications. + // We must be careful not to deadcode-remove labels, as they + // might be the target of a goto. See issue 28616. + if body := body; len(body) != 0 { + switch body[(len(body) - 1)].Op() { + case ir.ORETURN, ir.ORETJMP, ir.OPANIC: + if i > lastLabel { + cut = true + } + } + } + } + } + + stmts(n.PtrInit()) + switch n.Op() { + case ir.OBLOCK: + n := n.(*ir.BlockStmt) + stmts(&n.List) + case ir.OFOR: + n := n.(*ir.ForStmt) + stmts(&n.Body) + case ir.OIF: + n := n.(*ir.IfStmt) + stmts(&n.Body) + stmts(&n.Else) + case ir.ORANGE: + n := n.(*ir.RangeStmt) + stmts(&n.Body) + case ir.OSELECT: + n := n.(*ir.SelectStmt) + for _, cas := range n.Cases { + stmts(&cas.Body) + } + case ir.OSWITCH: + n := n.(*ir.SwitchStmt) + for _, cas := range n.Cases { + stmts(&cas.Body) + } + } + + if cut { + nn.Set((*nn)[:i+1]) + break + } + } +} + +func expr(n ir.Node) ir.Node { + // Perform dead-code elimination on short-circuited boolean + // expressions involving constants with the intent of + // producing a constant 'if' condition. + switch n.Op() { + case ir.OANDAND: + n := n.(*ir.LogicalExpr) + n.X = expr(n.X) + n.Y = expr(n.Y) + if ir.IsConst(n.X, constant.Bool) { + if ir.BoolVal(n.X) { + return n.Y // true && x => x + } else { + return n.X // false && x => false + } + } + case ir.OOROR: + n := n.(*ir.LogicalExpr) + n.X = expr(n.X) + n.Y = expr(n.Y) + if ir.IsConst(n.X, constant.Bool) { + if ir.BoolVal(n.X) { + return n.X // true || x => true + } else { + return n.Y // false || x => x + } + } + } + return n +} diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 45219801f0f16..603619eb5a521 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -8,6 +8,7 @@ import ( "bufio" "bytes" "cmd/compile/internal/base" + "cmd/compile/internal/deadcode" "cmd/compile/internal/devirtualize" "cmd/compile/internal/dwarfgen" "cmd/compile/internal/escape" @@ -210,12 +211,51 @@ func Main(archInit func(*ssagen.ArchInfo)) { dwarfgen.RecordPackageName() // Typecheck. - typecheck.Package() + noder.Package() // With all user code typechecked, it's now safe to verify unused dot imports. noder.CheckDotImports() base.ExitIfErrors() + // Phase 6: Compute Addrtaken for names. + // We need to wait until typechecking is done so that when we see &x[i] + // we know that x has its address taken if x is an array, but not if x is a slice. + // We compute Addrtaken in bulk here. + // After this phase, we maintain Addrtaken incrementally. + if typecheck.DirtyAddrtaken { + typecheck.ComputeAddrtaken(typecheck.Target.Decls) + typecheck.DirtyAddrtaken = false + } + typecheck.IncrementalAddrtaken = true + // Phase 7: Eliminate some obviously dead code. + // Must happen after typechecking. + for _, n := range typecheck.Target.Decls { + if n.Op() == ir.ODCLFUNC { + deadcode.Func(n.(*ir.Func)) + } + } + + // Phase 8: Decide how to capture closed variables. + // This needs to run before escape analysis, + // because variables captured by value do not escape. + base.Timer.Start("fe", "capturevars") + for _, n := range typecheck.Target.Decls { + if n.Op() == ir.ODCLFUNC { + n := n.(*ir.Func) + if n.OClosure != nil { + ir.CurFunc = n + typecheck.CaptureVars(n) + } + } + } + typecheck.CaptureVarsComplete = true + ir.CurFunc = nil + + if base.Debug.TypecheckInl != 0 { + // Typecheck imported function bodies if Debug.l > 1, + // otherwise lazily when used or re-exported. + typecheck.AllImportedBodies() + } // Build init task. if initTask := pkginit.Task(); initTask != nil { typecheck.Export(initTask) diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go index 748fd96380083..40569af317111 100644 --- a/src/cmd/compile/internal/noder/noder.go +++ b/src/cmd/compile/internal/noder/noder.go @@ -85,6 +85,69 @@ func ParseFiles(filenames []string) uint { return lines } +func Package() { + typecheck.DeclareUniverse() + + typecheck.TypecheckAllowed = true + + // Process top-level declarations in phases. + + // Phase 1: const, type, and names and types of funcs. + // This will gather all the information about types + // and methods but doesn't depend on any of it. + // + // We also defer type alias declarations until phase 2 + // to avoid cycles like #18640. + // TODO(gri) Remove this again once we have a fix for #25838. + + // Don't use range--typecheck can add closures to Target.Decls. + base.Timer.Start("fe", "typecheck", "top1") + for i := 0; i < len(typecheck.Target.Decls); i++ { + n := typecheck.Target.Decls[i] + if op := n.Op(); op != ir.ODCL && op != ir.OAS && op != ir.OAS2 && (op != ir.ODCLTYPE || !n.(*ir.Decl).X.Alias()) { + typecheck.Target.Decls[i] = typecheck.Stmt(n) + } + } + + // Phase 2: Variable assignments. + // To check interface assignments, depends on phase 1. + + // Don't use range--typecheck can add closures to Target.Decls. + base.Timer.Start("fe", "typecheck", "top2") + for i := 0; i < len(typecheck.Target.Decls); i++ { + n := typecheck.Target.Decls[i] + if op := n.Op(); op == ir.ODCL || op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.(*ir.Decl).X.Alias() { + typecheck.Target.Decls[i] = typecheck.Stmt(n) + } + } + + // Phase 3: Type check function bodies. + // Don't use range--typecheck can add closures to Target.Decls. + base.Timer.Start("fe", "typecheck", "func") + var fcount int64 + for i := 0; i < len(typecheck.Target.Decls); i++ { + n := typecheck.Target.Decls[i] + if n.Op() == ir.ODCLFUNC { + typecheck.FuncBody(n.(*ir.Func)) + fcount++ + } + } + + // Phase 4: Check external declarations. + // TODO(mdempsky): This should be handled when type checking their + // corresponding ODCL nodes. + base.Timer.Start("fe", "typecheck", "externdcls") + for i, n := range typecheck.Target.Externs { + if n.Op() == ir.ONAME { + typecheck.Target.Externs[i] = typecheck.Expr(typecheck.Target.Externs[i]) + } + } + + // Phase 5: With all user code type-checked, it's now safe to verify map keys. + typecheck.CheckMapKeys() + +} + // makeSrcPosBase translates from a *syntax.PosBase to a *src.PosBase. func (p *noder) makeSrcPosBase(b0 *syntax.PosBase) *src.PosBase { // fast path: most likely PosBase hasn't changed @@ -398,7 +461,61 @@ func (p *noder) varDecl(decl *syntax.VarDecl) []ir.Node { } p.setlineno(decl) - return typecheck.DeclVars(names, typ, exprs) + return DeclVars(names, typ, exprs) +} + +// declare variables from grammar +// new_name_list (type | [type] = expr_list) +func DeclVars(vl []*ir.Name, t ir.Ntype, el []ir.Node) []ir.Node { + var init []ir.Node + doexpr := len(el) > 0 + + if len(el) == 1 && len(vl) > 1 { + e := el[0] + as2 := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) + as2.Rhs = []ir.Node{e} + for _, v := range vl { + as2.Lhs.Append(v) + typecheck.Declare(v, typecheck.DeclContext) + v.Ntype = t + v.Defn = as2 + if ir.CurFunc != nil { + init = append(init, ir.NewDecl(base.Pos, ir.ODCL, v)) + } + } + + return append(init, as2) + } + + for i, v := range vl { + var e ir.Node + if doexpr { + if i >= len(el) { + base.Errorf("assignment mismatch: %d variables but %d values", len(vl), len(el)) + break + } + e = el[i] + } + + typecheck.Declare(v, typecheck.DeclContext) + v.Ntype = t + + if e != nil || ir.CurFunc != nil || ir.IsBlank(v) { + if ir.CurFunc != nil { + init = append(init, ir.NewDecl(base.Pos, ir.ODCL, v)) + } + as := ir.NewAssignStmt(base.Pos, v, e) + init = append(init, as) + if e != nil { + v.Defn = as + } + } + } + + if len(el) > len(vl) { + base.Errorf("assignment mismatch: %d variables but %d values", len(vl), len(el)) + } + return init } // constState tracks state between constant specifiers within a diff --git a/src/cmd/compile/internal/typecheck/dcl.go b/src/cmd/compile/internal/typecheck/dcl.go index c4f32ff59dbad..fd55f472abe7c 100644 --- a/src/cmd/compile/internal/typecheck/dcl.go +++ b/src/cmd/compile/internal/typecheck/dcl.go @@ -33,60 +33,6 @@ func DeclFunc(sym *types.Sym, tfn ir.Ntype) *ir.Func { return fn } -// declare variables from grammar -// new_name_list (type | [type] = expr_list) -func DeclVars(vl []*ir.Name, t ir.Ntype, el []ir.Node) []ir.Node { - var init []ir.Node - doexpr := len(el) > 0 - - if len(el) == 1 && len(vl) > 1 { - e := el[0] - as2 := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) - as2.Rhs = []ir.Node{e} - for _, v := range vl { - as2.Lhs.Append(v) - Declare(v, DeclContext) - v.Ntype = t - v.Defn = as2 - if ir.CurFunc != nil { - init = append(init, ir.NewDecl(base.Pos, ir.ODCL, v)) - } - } - - return append(init, as2) - } - - for i, v := range vl { - var e ir.Node - if doexpr { - if i >= len(el) { - base.Errorf("assignment mismatch: %d variables but %d values", len(vl), len(el)) - break - } - e = el[i] - } - - Declare(v, DeclContext) - v.Ntype = t - - if e != nil || ir.CurFunc != nil || ir.IsBlank(v) { - if ir.CurFunc != nil { - init = append(init, ir.NewDecl(base.Pos, ir.ODCL, v)) - } - as := ir.NewAssignStmt(base.Pos, v, e) - init = append(init, as) - if e != nil { - v.Defn = as - } - } - } - - if len(el) > len(vl) { - base.Errorf("assignment mismatch: %d variables but %d values", len(vl), len(el)) - } - return init -} - // Declare records that Node n declares symbol n.Sym in the specified // declaration context. func Declare(n *ir.Name, ctxt ir.Class) { diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go index 3552bcf924507..d8c1748432363 100644 --- a/src/cmd/compile/internal/typecheck/func.go +++ b/src/cmd/compile/internal/typecheck/func.go @@ -169,13 +169,13 @@ func ImportedBody(fn *ir.Func) { // computeAddrtaken call below (after we typecheck the body). // TODO: export/import types and addrtaken marks along with inlined bodies, // so this will be unnecessary. - incrementalAddrtaken = false + IncrementalAddrtaken = false defer func() { - if dirtyAddrtaken { - computeAddrtaken(fn.Inl.Body) // compute addrtaken marks once types are available - dirtyAddrtaken = false + if DirtyAddrtaken { + ComputeAddrtaken(fn.Inl.Body) // compute addrtaken marks once types are available + DirtyAddrtaken = false } - incrementalAddrtaken = true + IncrementalAddrtaken = true }() ImportBody(fn) diff --git a/src/cmd/compile/internal/typecheck/subr.go b/src/cmd/compile/internal/typecheck/subr.go index 9d414874a0446..447e945d814c9 100644 --- a/src/cmd/compile/internal/typecheck/subr.go +++ b/src/cmd/compile/internal/typecheck/subr.go @@ -72,7 +72,7 @@ func NodAddrAt(pos src.XPos, n ir.Node) *ir.AddrExpr { } func markAddrOf(n ir.Node) ir.Node { - if incrementalAddrtaken { + if IncrementalAddrtaken { // We can only do incremental addrtaken computation when it is ok // to typecheck the argument of the OADDR. That's only safe after the // main typecheck has completed. @@ -86,22 +86,22 @@ func markAddrOf(n ir.Node) ir.Node { } else { // Remember that we built an OADDR without computing the Addrtaken bit for // its argument. We'll do that later in bulk using computeAddrtaken. - dirtyAddrtaken = true + DirtyAddrtaken = true } return n } -// If incrementalAddrtaken is false, we do not compute Addrtaken for an OADDR Node +// If IncrementalAddrtaken is false, we do not compute Addrtaken for an OADDR Node // when it is built. The Addrtaken bits are set in bulk by computeAddrtaken. -// If incrementalAddrtaken is true, then when an OADDR Node is built the Addrtaken +// If IncrementalAddrtaken is true, then when an OADDR Node is built the Addrtaken // field of its argument is updated immediately. -var incrementalAddrtaken = false +var IncrementalAddrtaken = false -// If dirtyAddrtaken is true, then there are OADDR whose corresponding arguments +// If DirtyAddrtaken is true, then there are OADDR whose corresponding arguments // have not yet been marked as Addrtaken. -var dirtyAddrtaken = false +var DirtyAddrtaken = false -func computeAddrtaken(top []ir.Node) { +func ComputeAddrtaken(top []ir.Node) { for _, n := range top { ir.Visit(n, func(n ir.Node) { if n.Op() == ir.OADDR { diff --git a/src/cmd/compile/internal/typecheck/syms.go b/src/cmd/compile/internal/typecheck/syms.go index ab3384bf901e1..f0e230432a0ba 100644 --- a/src/cmd/compile/internal/typecheck/syms.go +++ b/src/cmd/compile/internal/typecheck/syms.go @@ -61,10 +61,10 @@ func Lookup(name string) *types.Sym { return types.LocalPkg.Lookup(name) } -// loadsys loads the definitions for the low-level runtime functions, +// InitRuntime loads the definitions for the low-level runtime functions, // so that the compiler can generate calls to them, // but does not make them visible to user code. -func loadsys() { +func InitRuntime() { types.Block = 1 inimport = true diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index 4b5c3198caccf..4c6ac21fc68f7 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -35,110 +35,7 @@ func Init() { initUniverse() DeclContext = ir.PEXTERN base.Timer.Start("fe", "loadsys") - loadsys() -} - -func Package() { - declareUniverse() - - TypecheckAllowed = true - - // Process top-level declarations in phases. - - // Phase 1: const, type, and names and types of funcs. - // This will gather all the information about types - // and methods but doesn't depend on any of it. - // - // We also defer type alias declarations until phase 2 - // to avoid cycles like #18640. - // TODO(gri) Remove this again once we have a fix for #25838. - - // Don't use range--typecheck can add closures to Target.Decls. - base.Timer.Start("fe", "typecheck", "top1") - for i := 0; i < len(Target.Decls); i++ { - n := Target.Decls[i] - if op := n.Op(); op != ir.ODCL && op != ir.OAS && op != ir.OAS2 && (op != ir.ODCLTYPE || !n.(*ir.Decl).X.Alias()) { - Target.Decls[i] = Stmt(n) - } - } - - // Phase 2: Variable assignments. - // To check interface assignments, depends on phase 1. - - // Don't use range--typecheck can add closures to Target.Decls. - base.Timer.Start("fe", "typecheck", "top2") - for i := 0; i < len(Target.Decls); i++ { - n := Target.Decls[i] - if op := n.Op(); op == ir.ODCL || op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.(*ir.Decl).X.Alias() { - Target.Decls[i] = Stmt(n) - } - } - - // Phase 3: Type check function bodies. - // Don't use range--typecheck can add closures to Target.Decls. - base.Timer.Start("fe", "typecheck", "func") - var fcount int64 - for i := 0; i < len(Target.Decls); i++ { - n := Target.Decls[i] - if n.Op() == ir.ODCLFUNC { - FuncBody(n.(*ir.Func)) - fcount++ - } - } - - // Phase 4: Check external declarations. - // TODO(mdempsky): This should be handled when type checking their - // corresponding ODCL nodes. - base.Timer.Start("fe", "typecheck", "externdcls") - for i, n := range Target.Externs { - if n.Op() == ir.ONAME { - Target.Externs[i] = Expr(Target.Externs[i]) - } - } - - // Phase 5: With all user code type-checked, it's now safe to verify map keys. - CheckMapKeys() - - // Phase 6: Compute Addrtaken for names. - // We need to wait until typechecking is done so that when we see &x[i] - // we know that x has its address taken if x is an array, but not if x is a slice. - // We compute Addrtaken in bulk here. - // After this phase, we maintain Addrtaken incrementally. - if dirtyAddrtaken { - computeAddrtaken(Target.Decls) - dirtyAddrtaken = false - } - incrementalAddrtaken = true - - // Phase 7: Eliminate some obviously dead code. - // Must happen after typechecking. - for _, n := range Target.Decls { - if n.Op() == ir.ODCLFUNC { - deadcode(n.(*ir.Func)) - } - } - - // Phase 8: Decide how to capture closed variables. - // This needs to run before escape analysis, - // because variables captured by value do not escape. - base.Timer.Start("fe", "capturevars") - for _, n := range Target.Decls { - if n.Op() == ir.ODCLFUNC { - n := n.(*ir.Func) - if n.OClosure != nil { - ir.CurFunc = n - CaptureVars(n) - } - } - } - CaptureVarsComplete = true - ir.CurFunc = nil - - if base.Debug.TypecheckInl != 0 { - // Typecheck imported function bodies if Debug.l > 1, - // otherwise lazily when used or re-exported. - AllImportedBodies() - } + InitRuntime() } func AssignExpr(n ir.Node) ir.Node { return typecheck(n, ctxExpr|ctxAssign) } @@ -2247,144 +2144,6 @@ func CheckReturn(fn *ir.Func) { } } -func deadcode(fn *ir.Func) { - deadcodeslice(&fn.Body) - - if len(fn.Body) == 0 { - return - } - - for _, n := range fn.Body { - if len(n.Init()) > 0 { - return - } - switch n.Op() { - case ir.OIF: - n := n.(*ir.IfStmt) - if !ir.IsConst(n.Cond, constant.Bool) || len(n.Body) > 0 || len(n.Else) > 0 { - return - } - case ir.OFOR: - n := n.(*ir.ForStmt) - if !ir.IsConst(n.Cond, constant.Bool) || ir.BoolVal(n.Cond) { - return - } - default: - return - } - } - - fn.Body.Set([]ir.Node{ir.NewBlockStmt(base.Pos, nil)}) -} - -func deadcodeslice(nn *ir.Nodes) { - var lastLabel = -1 - for i, n := range *nn { - if n != nil && n.Op() == ir.OLABEL { - lastLabel = i - } - } - for i, n := range *nn { - // Cut is set to true when all nodes after i'th position - // should be removed. - // In other words, it marks whole slice "tail" as dead. - cut := false - if n == nil { - continue - } - if n.Op() == ir.OIF { - n := n.(*ir.IfStmt) - n.Cond = deadcodeexpr(n.Cond) - if ir.IsConst(n.Cond, constant.Bool) { - var body ir.Nodes - if ir.BoolVal(n.Cond) { - n.Else = ir.Nodes{} - body = n.Body - } else { - n.Body = ir.Nodes{} - body = n.Else - } - // If "then" or "else" branch ends with panic or return statement, - // it is safe to remove all statements after this node. - // isterminating is not used to avoid goto-related complications. - // We must be careful not to deadcode-remove labels, as they - // might be the target of a goto. See issue 28616. - if body := body; len(body) != 0 { - switch body[(len(body) - 1)].Op() { - case ir.ORETURN, ir.ORETJMP, ir.OPANIC: - if i > lastLabel { - cut = true - } - } - } - } - } - - deadcodeslice(n.PtrInit()) - switch n.Op() { - case ir.OBLOCK: - n := n.(*ir.BlockStmt) - deadcodeslice(&n.List) - case ir.OFOR: - n := n.(*ir.ForStmt) - deadcodeslice(&n.Body) - case ir.OIF: - n := n.(*ir.IfStmt) - deadcodeslice(&n.Body) - deadcodeslice(&n.Else) - case ir.ORANGE: - n := n.(*ir.RangeStmt) - deadcodeslice(&n.Body) - case ir.OSELECT: - n := n.(*ir.SelectStmt) - for _, cas := range n.Cases { - deadcodeslice(&cas.Body) - } - case ir.OSWITCH: - n := n.(*ir.SwitchStmt) - for _, cas := range n.Cases { - deadcodeslice(&cas.Body) - } - } - - if cut { - nn.Set((*nn)[:i+1]) - break - } - } -} - -func deadcodeexpr(n ir.Node) ir.Node { - // Perform dead-code elimination on short-circuited boolean - // expressions involving constants with the intent of - // producing a constant 'if' condition. - switch n.Op() { - case ir.OANDAND: - n := n.(*ir.LogicalExpr) - n.X = deadcodeexpr(n.X) - n.Y = deadcodeexpr(n.Y) - if ir.IsConst(n.X, constant.Bool) { - if ir.BoolVal(n.X) { - return n.Y // true && x => x - } else { - return n.X // false && x => false - } - } - case ir.OOROR: - n := n.(*ir.LogicalExpr) - n.X = deadcodeexpr(n.X) - n.Y = deadcodeexpr(n.Y) - if ir.IsConst(n.X, constant.Bool) { - if ir.BoolVal(n.X) { - return n.X // true || x => true - } else { - return n.Y // false || x => x - } - } - } - return n -} - // getIotaValue returns the current value for "iota", // or -1 if not within a ConstSpec. func getIotaValue() int64 { diff --git a/src/cmd/compile/internal/typecheck/universe.go b/src/cmd/compile/internal/typecheck/universe.go index fc8e962e28fad..054f094cd3119 100644 --- a/src/cmd/compile/internal/typecheck/universe.go +++ b/src/cmd/compile/internal/typecheck/universe.go @@ -336,8 +336,8 @@ func makeErrorInterface() *types.Type { return types.NewInterface(types.NoPkg, []*types.Field{method}) } -// declareUniverse makes the universe block visible within the current package. -func declareUniverse() { +// DeclareUniverse makes the universe block visible within the current package. +func DeclareUniverse() { // Operationally, this is similar to a dot import of builtinpkg, except // that we silently skip symbols that are already declared in the // package block rather than emitting a redeclared symbol error. From 3a4474cdfda0096b5d88c769f81ad81d6f0168c7 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Thu, 31 Dec 2020 23:39:15 -0800 Subject: [PATCH 319/474] [dev.regabi] cmd/compile: some more manual shuffling More minor reshuffling of passes. Passes toolstash -cmp. Change-Id: I22633b3741f668fc5ee8579d7d610035ed57df1f Reviewed-on: https://go-review.googlesource.com/c/go/+/280975 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/gc/abiutils_test.go | 2 +- src/cmd/compile/internal/gc/main.go | 26 +++++++------------ src/cmd/compile/internal/noder/noder.go | 14 ++++++++++ src/cmd/compile/internal/typecheck/dcl.go | 2 +- src/cmd/compile/internal/typecheck/syms.go | 7 +---- .../compile/internal/typecheck/typecheck.go | 7 ----- .../compile/internal/typecheck/universe.go | 4 +-- 7 files changed, 29 insertions(+), 33 deletions(-) diff --git a/src/cmd/compile/internal/gc/abiutils_test.go b/src/cmd/compile/internal/gc/abiutils_test.go index d535a6a34bf44..6fd0af1b1fbf9 100644 --- a/src/cmd/compile/internal/gc/abiutils_test.go +++ b/src/cmd/compile/internal/gc/abiutils_test.go @@ -38,7 +38,7 @@ func TestMain(m *testing.M) { base.Ctxt.Bso = bufio.NewWriter(os.Stdout) types.PtrSize = ssagen.Arch.LinkArch.PtrSize types.RegSize = ssagen.Arch.LinkArch.RegSize - typecheck.Init() + typecheck.InitUniverse() os.Exit(m.Run()) } diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 603619eb5a521..df6a9d8e4518f 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -200,23 +200,15 @@ func Main(archInit func(*ssagen.ArchInfo)) { base.AutogeneratedPos = makePos(src.NewFileBase("", ""), 1, 0) - typecheck.Init() + typecheck.InitUniverse() - // Parse input. - base.Timer.Start("fe", "parse") - lines := noder.ParseFiles(flag.Args()) - ssagen.CgoSymABIs() - base.Timer.Stop() - base.Timer.AddEvent(int64(lines), "lines") - dwarfgen.RecordPackageName() + // Parse and typecheck input. + noder.LoadPackage(flag.Args()) - // Typecheck. - noder.Package() + dwarfgen.RecordPackageName() + ssagen.CgoSymABIs() - // With all user code typechecked, it's now safe to verify unused dot imports. - noder.CheckDotImports() - base.ExitIfErrors() - // Phase 6: Compute Addrtaken for names. + // Compute Addrtaken for names. // We need to wait until typechecking is done so that when we see &x[i] // we know that x has its address taken if x is an array, but not if x is a slice. // We compute Addrtaken in bulk here. @@ -227,7 +219,7 @@ func Main(archInit func(*ssagen.ArchInfo)) { } typecheck.IncrementalAddrtaken = true - // Phase 7: Eliminate some obviously dead code. + // Eliminate some obviously dead code. // Must happen after typechecking. for _, n := range typecheck.Target.Decls { if n.Op() == ir.ODCLFUNC { @@ -235,7 +227,7 @@ func Main(archInit func(*ssagen.ArchInfo)) { } } - // Phase 8: Decide how to capture closed variables. + // Decide how to capture closed variables. // This needs to run before escape analysis, // because variables captured by value do not escape. base.Timer.Start("fe", "capturevars") @@ -256,6 +248,7 @@ func Main(archInit func(*ssagen.ArchInfo)) { // otherwise lazily when used or re-exported. typecheck.AllImportedBodies() } + // Build init task. if initTask := pkginit.Task(); initTask != nil { typecheck.Export(initTask) @@ -311,6 +304,7 @@ func Main(archInit func(*ssagen.ArchInfo)) { // Prepare for SSA compilation. // This must be before peekitabs, because peekitabs // can trigger function compilation. + typecheck.InitRuntime() ssagen.InitConfig() // Just before compilation, compile itabs found on diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go index 40569af317111..29bfde3ff23cc 100644 --- a/src/cmd/compile/internal/noder/noder.go +++ b/src/cmd/compile/internal/noder/noder.go @@ -25,6 +25,20 @@ import ( "cmd/internal/src" ) +func LoadPackage(filenames []string) { + base.Timer.Start("fe", "parse") + lines := ParseFiles(filenames) + base.Timer.Stop() + base.Timer.AddEvent(int64(lines), "lines") + + // Typecheck. + Package() + + // With all user code typechecked, it's now safe to verify unused dot imports. + CheckDotImports() + base.ExitIfErrors() +} + // ParseFiles concurrently parses files into *syntax.File structures. // Each declaration in every *syntax.File is converted to a syntax tree // and its root represented by *Node is appended to Target.Decls. diff --git a/src/cmd/compile/internal/typecheck/dcl.go b/src/cmd/compile/internal/typecheck/dcl.go index fd55f472abe7c..daec9848d0fb0 100644 --- a/src/cmd/compile/internal/typecheck/dcl.go +++ b/src/cmd/compile/internal/typecheck/dcl.go @@ -15,7 +15,7 @@ import ( "cmd/internal/src" ) -var DeclContext ir.Class // PEXTERN/PAUTO +var DeclContext ir.Class = ir.PEXTERN // PEXTERN/PAUTO func DeclFunc(sym *types.Sym, tfn ir.Ntype) *ir.Func { if tfn.Op() != ir.OTFUNC { diff --git a/src/cmd/compile/internal/typecheck/syms.go b/src/cmd/compile/internal/typecheck/syms.go index f0e230432a0ba..2251062e16c6e 100644 --- a/src/cmd/compile/internal/typecheck/syms.go +++ b/src/cmd/compile/internal/typecheck/syms.go @@ -65,11 +65,9 @@ func Lookup(name string) *types.Sym { // so that the compiler can generate calls to them, // but does not make them visible to user code. func InitRuntime() { + base.Timer.Start("fe", "loadsys") types.Block = 1 - inimport = true - TypecheckAllowed = true - typs := runtimeTypes() for _, d := range &runtimeDecls { sym := ir.Pkgs.Runtime.Lookup(d.name) @@ -83,9 +81,6 @@ func InitRuntime() { base.Fatalf("unhandled declaration tag %v", d.tag) } } - - TypecheckAllowed = false - inimport = false } // LookupRuntimeFunc looks up Go function name in package runtime. This function diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index 4c6ac21fc68f7..c8d82443a18c4 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -31,13 +31,6 @@ var ( NeedRuntimeType = func(*types.Type) {} ) -func Init() { - initUniverse() - DeclContext = ir.PEXTERN - base.Timer.Start("fe", "loadsys") - InitRuntime() -} - func AssignExpr(n ir.Node) ir.Node { return typecheck(n, ctxExpr|ctxAssign) } func Expr(n ir.Node) ir.Node { return typecheck(n, ctxExpr) } func Stmt(n ir.Node) ir.Node { return typecheck(n, ctxStmt) } diff --git a/src/cmd/compile/internal/typecheck/universe.go b/src/cmd/compile/internal/typecheck/universe.go index 054f094cd3119..f1e7ed427307f 100644 --- a/src/cmd/compile/internal/typecheck/universe.go +++ b/src/cmd/compile/internal/typecheck/universe.go @@ -90,8 +90,8 @@ var unsafeFuncs = [...]struct { {"Sizeof", ir.OSIZEOF}, } -// initUniverse initializes the universe block. -func initUniverse() { +// InitUniverse initializes the universe block. +func InitUniverse() { if types.PtrSize == 0 { base.Fatalf("typeinit before betypeinit") } From 68e6fa4f6852b4ef0fe61789618c093f4e2185c9 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Thu, 31 Dec 2020 23:45:36 -0800 Subject: [PATCH 320/474] [dev.regabi] cmd/compile: fix package-initialization order This CL fixes package initialization order by creating the init task before the general deadcode-removal pass. It also changes noder to emit zero-initialization assignments (i.e., OAS with nil RHS) for package-block variables, so that initOrder can tell the variables still need initialization. To allow this, we need to also extend the static-init code to recognize zero-initialization assignments. This doesn't pass toolstash -cmp, because it reorders some package initialization routines. Fixes #43444. Change-Id: I0da7996a62c85e15e97ce965298127e075390a7e Reviewed-on: https://go-review.googlesource.com/c/go/+/280976 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/gc/main.go | 10 ++-- src/cmd/compile/internal/noder/noder.go | 52 +++++++------------- src/cmd/compile/internal/pkginit/init.go | 4 ++ src/cmd/compile/internal/staticinit/sched.go | 16 ++++-- test/fixedbugs/issue43444.go | 28 +++++++++++ test/fixedbugs/issue43444.out | 1 + 6 files changed, 70 insertions(+), 41 deletions(-) create mode 100644 test/fixedbugs/issue43444.go create mode 100644 test/fixedbugs/issue43444.out diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index df6a9d8e4518f..c1f51e4f1dee3 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -208,6 +208,11 @@ func Main(archInit func(*ssagen.ArchInfo)) { dwarfgen.RecordPackageName() ssagen.CgoSymABIs() + // Build init task. + if initTask := pkginit.Task(); initTask != nil { + typecheck.Export(initTask) + } + // Compute Addrtaken for names. // We need to wait until typechecking is done so that when we see &x[i] // we know that x has its address taken if x is an array, but not if x is a slice. @@ -249,11 +254,6 @@ func Main(archInit func(*ssagen.ArchInfo)) { typecheck.AllImportedBodies() } - // Build init task. - if initTask := pkginit.Task(); initTask != nil { - typecheck.Export(initTask) - } - // Inlining base.Timer.Start("fe", "inlining") if base.Flag.LowerL != 0 { diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go index 29bfde3ff23cc..cc8a1c7c89a4f 100644 --- a/src/cmd/compile/internal/noder/noder.go +++ b/src/cmd/compile/internal/noder/noder.go @@ -474,24 +474,15 @@ func (p *noder) varDecl(decl *syntax.VarDecl) []ir.Node { p.checkUnused(pragma) } - p.setlineno(decl) - return DeclVars(names, typ, exprs) -} - -// declare variables from grammar -// new_name_list (type | [type] = expr_list) -func DeclVars(vl []*ir.Name, t ir.Ntype, el []ir.Node) []ir.Node { var init []ir.Node - doexpr := len(el) > 0 + p.setlineno(decl) - if len(el) == 1 && len(vl) > 1 { - e := el[0] - as2 := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) - as2.Rhs = []ir.Node{e} - for _, v := range vl { + if len(names) > 1 && len(exprs) == 1 { + as2 := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, exprs) + for _, v := range names { as2.Lhs.Append(v) typecheck.Declare(v, typecheck.DeclContext) - v.Ntype = t + v.Ntype = typ v.Defn = as2 if ir.CurFunc != nil { init = append(init, ir.NewDecl(base.Pos, ir.ODCL, v)) @@ -501,34 +492,29 @@ func DeclVars(vl []*ir.Name, t ir.Ntype, el []ir.Node) []ir.Node { return append(init, as2) } - for i, v := range vl { + for i, v := range names { var e ir.Node - if doexpr { - if i >= len(el) { - base.Errorf("assignment mismatch: %d variables but %d values", len(vl), len(el)) - break - } - e = el[i] + if i < len(exprs) { + e = exprs[i] } typecheck.Declare(v, typecheck.DeclContext) - v.Ntype = t + v.Ntype = typ - if e != nil || ir.CurFunc != nil || ir.IsBlank(v) { - if ir.CurFunc != nil { - init = append(init, ir.NewDecl(base.Pos, ir.ODCL, v)) - } - as := ir.NewAssignStmt(base.Pos, v, e) - init = append(init, as) - if e != nil { - v.Defn = as - } + if ir.CurFunc != nil { + init = append(init, ir.NewDecl(base.Pos, ir.ODCL, v)) + } + as := ir.NewAssignStmt(base.Pos, v, e) + init = append(init, as) + if e != nil || ir.CurFunc == nil { + v.Defn = as } } - if len(el) > len(vl) { - base.Errorf("assignment mismatch: %d variables but %d values", len(vl), len(el)) + if len(exprs) != 0 && len(names) != len(exprs) { + base.Errorf("assignment mismatch: %d variables but %d values", len(names), len(exprs)) } + return init } diff --git a/src/cmd/compile/internal/pkginit/init.go b/src/cmd/compile/internal/pkginit/init.go index f1ffbb5933db1..24fe1a76280d8 100644 --- a/src/cmd/compile/internal/pkginit/init.go +++ b/src/cmd/compile/internal/pkginit/init.go @@ -6,6 +6,7 @@ package pkginit import ( "cmd/compile/internal/base" + "cmd/compile/internal/deadcode" "cmd/compile/internal/ir" "cmd/compile/internal/objw" "cmd/compile/internal/typecheck" @@ -68,6 +69,9 @@ func Task() *ir.Name { // Record user init functions. for _, fn := range typecheck.Target.Inits { + // Must happen after initOrder; see #43444. + deadcode.Func(fn) + // Skip init functions with empty bodies. if len(fn.Body) == 1 { if stmt := fn.Body[0]; stmt.Op() == ir.OBLOCK && len(stmt.(*ir.BlockStmt).List) == 0 { diff --git a/src/cmd/compile/internal/staticinit/sched.go b/src/cmd/compile/internal/staticinit/sched.go index 1b0af1b05d4ae..8e4ce55954816 100644 --- a/src/cmd/compile/internal/staticinit/sched.go +++ b/src/cmd/compile/internal/staticinit/sched.go @@ -86,17 +86,22 @@ func (s *Schedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *types.Ty if rn.Class_ != ir.PEXTERN || rn.Sym().Pkg != types.LocalPkg { return false } - if rn.Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value - return false - } if rn.Defn.Op() != ir.OAS { return false } if rn.Type().IsString() { // perhaps overwritten by cmd/link -X (#34675) return false } + if rn.Embed != nil { + return false + } orig := rn r := rn.Defn.(*ir.AssignStmt).Y + if r == nil { + // No explicit initialization value. Probably zeroed but perhaps + // supplied externally and of unknown value. + return false + } for r.Op() == ir.OCONVNOP && !types.Identical(r.Type(), typ) { r = r.(*ir.ConvExpr).X @@ -185,6 +190,11 @@ func (s *Schedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *types.Ty } func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Type) bool { + if r == nil { + // No explicit initialization value. Either zero or supplied + // externally. + return true + } for r.Op() == ir.OCONVNOP { r = r.(*ir.ConvExpr).X } diff --git a/test/fixedbugs/issue43444.go b/test/fixedbugs/issue43444.go new file mode 100644 index 0000000000000..c430e1baf796b --- /dev/null +++ b/test/fixedbugs/issue43444.go @@ -0,0 +1,28 @@ +// run + +package main + +var sp = "" + +func f(name string, _ ...interface{}) int { + print(sp, name) + sp = " " + return 0 +} + +var a = f("a", x) +var b = f("b", y) +var c = f("c", z) +var d = func() int { + if false { + _ = z + } + return f("d") +}() +var e = f("e") + +var x int +var y int = 42 +var z int = func() int { return 42 }() + +func main() { println() } diff --git a/test/fixedbugs/issue43444.out b/test/fixedbugs/issue43444.out new file mode 100644 index 0000000000000..22d6a0dc691a4 --- /dev/null +++ b/test/fixedbugs/issue43444.out @@ -0,0 +1 @@ +e a b c d From 6ddbc75efd4bc2757e7684e7760ee411ec721e15 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Fri, 1 Jan 2021 01:32:46 -0800 Subject: [PATCH 321/474] [dev.regabi] cmd/compile: earlier deadcode removal This CL moves the general deadcode-removal pass to before computing Addrtaken, which allows variables to still be converted to SSA if their address is only taken in unreachable code paths (e.g., the "&mp" expression in the "if false" block in runtime/os_linux.go:newosproc). This doesn't pass toolstash -cmp, because it allows SSA to better optimize some code. Change-Id: I43e54acc02fdcbad8eb6493283f355aa1ee0de84 Reviewed-on: https://go-review.googlesource.com/c/go/+/280992 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/gc/main.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index c1f51e4f1dee3..2ea614e17ff04 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -213,6 +213,14 @@ func Main(archInit func(*ssagen.ArchInfo)) { typecheck.Export(initTask) } + // Eliminate some obviously dead code. + // Must happen after typechecking. + for _, n := range typecheck.Target.Decls { + if n.Op() == ir.ODCLFUNC { + deadcode.Func(n.(*ir.Func)) + } + } + // Compute Addrtaken for names. // We need to wait until typechecking is done so that when we see &x[i] // we know that x has its address taken if x is an array, but not if x is a slice. @@ -224,14 +232,6 @@ func Main(archInit func(*ssagen.ArchInfo)) { } typecheck.IncrementalAddrtaken = true - // Eliminate some obviously dead code. - // Must happen after typechecking. - for _, n := range typecheck.Target.Decls { - if n.Op() == ir.ODCLFUNC { - deadcode.Func(n.(*ir.Func)) - } - } - // Decide how to capture closed variables. // This needs to run before escape analysis, // because variables captured by value do not escape. From ece345aa691c4097fdb8d1f2736a8fd6214515a9 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Fri, 1 Jan 2021 01:46:55 -0800 Subject: [PATCH 322/474] [dev.regabi] cmd/compile: expand documentation for Func.Closure{Vars,Enter} I keep getting these confused and having to look at how the code actually uses them. Change-Id: I86baf22b76e7dddada6830df0fac241092f716bf Reviewed-on: https://go-review.googlesource.com/c/go/+/280993 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky Reviewed-by: Cuong Manh Le TryBot-Result: Go Bot --- src/cmd/compile/internal/ir/func.go | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index 9a79a4f30ff83..c54b742669294 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -65,9 +65,22 @@ type Func struct { // include closurevars until transformclosure runs. Dcl []*Name - ClosureEnter Nodes // list of ONAME nodes (or OADDR-of-ONAME nodes, for output parameters) of captured variables - ClosureType Ntype // closure representation type - ClosureVars []*Name // closure params; each has closurevar set + ClosureType Ntype // closure representation type + + // ClosureVars lists the free variables that are used within a + // function literal, but formally declared in an enclosing + // function. The variables in this slice are the closure function's + // own copy of the variables, which are used within its function + // body. They will also each have IsClosureVar set, and will have + // Byval set if they're captured by value. + ClosureVars []*Name + + // ClosureEnter holds the expressions that the enclosing function + // will use to initialize the closure's free variables. These + // correspond one-to-one with the variables in ClosureVars, and will + // be either an ONAME node (if the variable is captured by value) or + // an OADDR-of-ONAME node (if not). + ClosureEnter Nodes // Parents records the parent scope of each scope within a // function. The root scope (0) has no parent, so the i'th From 9ed1577779b38620a5df1871ec1cd8d8677d5cc0 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Fri, 1 Jan 2021 02:14:45 -0800 Subject: [PATCH 323/474] [dev.regabi] cmd/compile: remove Func.ClosureEnter We can easily compute this on demand. Passes toolstash -cmp. Change-Id: I433d8adb2b1615ae05b2764e69904369a59542c5 Reviewed-on: https://go-review.googlesource.com/c/go/+/280994 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky Reviewed-by: Cuong Manh Le TryBot-Result: Go Bot --- src/cmd/compile/internal/ir/func.go | 7 ------- src/cmd/compile/internal/ir/sizeof_test.go | 2 +- src/cmd/compile/internal/typecheck/func.go | 14 ++++---------- src/cmd/compile/internal/walk/closure.go | 22 +++++++++++++++++++++- src/cmd/compile/internal/walk/expr.go | 3 +-- 5 files changed, 27 insertions(+), 21 deletions(-) diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index c54b742669294..1eaca9c6f38c6 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -75,13 +75,6 @@ type Func struct { // Byval set if they're captured by value. ClosureVars []*Name - // ClosureEnter holds the expressions that the enclosing function - // will use to initialize the closure's free variables. These - // correspond one-to-one with the variables in ClosureVars, and will - // be either an ONAME node (if the variable is captured by value) or - // an OADDR-of-ONAME node (if not). - ClosureEnter Nodes - // Parents records the parent scope of each scope within a // function. The root scope (0) has no parent, so the i'th // scope's parent is stored at Parents[i-1]. diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go index 8f5fae8a1200f..60120f2998c83 100644 --- a/src/cmd/compile/internal/ir/sizeof_test.go +++ b/src/cmd/compile/internal/ir/sizeof_test.go @@ -20,7 +20,7 @@ func TestSizeof(t *testing.T) { _32bit uintptr // size on 32bit platforms _64bit uintptr // size on 64bit platforms }{ - {Func{}, 196, 344}, + {Func{}, 184, 320}, {Name{}, 124, 216}, } diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go index d8c1748432363..2bc911882f74d 100644 --- a/src/cmd/compile/internal/typecheck/func.go +++ b/src/cmd/compile/internal/typecheck/func.go @@ -122,20 +122,17 @@ func CaptureVars(fn *ir.Func) { } out = append(out, v) - // type check the & of closed variables outside the closure, + // type check closed variables outside the closure, // so that the outer frame also grabs them and knows they escape. - types.CalcSize(v.Type()) + Expr(v.Outer) - var outer ir.Node - outer = v.Outer outermost := v.Defn.(*ir.Name) // out parameters will be assigned to implicitly upon return. - if outermost.Class_ != ir.PPARAMOUT && !outermost.Addrtaken() && !outermost.Assigned() && v.Type().Width <= 128 { + if outermost.Class_ != ir.PPARAMOUT && !outermost.Addrtaken() && !outermost.Assigned() && v.Type().Size() <= 128 { v.SetByval(true) } else { outermost.SetAddrtaken(true) - outer = NodAddr(outer) } if base.Flag.LowerM > 1 { @@ -147,11 +144,8 @@ func CaptureVars(fn *ir.Func) { if v.Byval() { how = "value" } - base.WarnfAt(v.Pos(), "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym(), outermost.Addrtaken(), outermost.Assigned(), int32(v.Type().Width)) + base.WarnfAt(v.Pos(), "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym(), outermost.Addrtaken(), outermost.Assigned(), v.Type().Size()) } - - outer = Expr(outer) - fn.ClosureEnter.Append(outer) } fn.ClosureVars = out diff --git a/src/cmd/compile/internal/walk/closure.go b/src/cmd/compile/internal/walk/closure.go index 0726d3b5521ea..d4eb4eb8a3d07 100644 --- a/src/cmd/compile/internal/walk/closure.go +++ b/src/cmd/compile/internal/walk/closure.go @@ -131,7 +131,7 @@ func walkClosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node { clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ).(ir.Ntype), nil) clos.SetEsc(clo.Esc()) - clos.List.Set(append([]ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, fn.Nname)}, fn.ClosureEnter...)) + clos.List.Set(append([]ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, fn.Nname)}, closureArgs(clo)...)) addr := typecheck.NodAddr(clos) addr.SetEsc(clo.Esc()) @@ -151,6 +151,26 @@ func walkClosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node { return walkExpr(cfn, init) } +// closureArgs returns a slice of expressions that an be used to +// initialize the given closure's free variables. These correspond +// one-to-one with the variables in clo.Func.ClosureVars, and will be +// either an ONAME node (if the variable is captured by value) or an +// OADDR-of-ONAME node (if not). +func closureArgs(clo *ir.ClosureExpr) []ir.Node { + fn := clo.Func + + args := make([]ir.Node, len(fn.ClosureVars)) + for i, v := range fn.ClosureVars { + var outer ir.Node + outer = v.Outer + if !v.Byval() { + outer = typecheck.NodAddrAt(fn.Pos(), outer) + } + args[i] = typecheck.Expr(outer) + } + return args +} + func walkCallPart(n *ir.SelectorExpr, init *ir.Nodes) ir.Node { // Create closure in the form of a composite literal. // For x.M with receiver (x) type T, the generated code looks like: diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index f06a87c37fd7e..1fd09b42af4ca 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -498,8 +498,7 @@ func walkCall(n *ir.CallExpr, init *ir.Nodes) ir.Node { // Prepend captured variables to argument list. clo := n.X.(*ir.ClosureExpr) - n.Args.Prepend(clo.Func.ClosureEnter...) - clo.Func.ClosureEnter.Set(nil) + n.Args.Prepend(closureArgs(clo)...) // Replace OCLOSURE with ONAME/PFUNC. n.X = clo.Func.Nname From 7d55669847389b8d2e490400226f272023da8605 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Fri, 1 Jan 2021 02:23:48 -0800 Subject: [PATCH 324/474] [dev.regabi] cmd/compile: simplify dwarfgen.declPos The previous code was way overcomplicating things. To find out if a variable is a closure pseudo-variable, one only needs to check IsClosureVar. Checking Captured and Byval are only meant to be used by closure conversion. Passes toolstash -cmp. Change-Id: I22622cba36ba7f60b3275d17999a8b6bb7c6719a Reviewed-on: https://go-review.googlesource.com/c/go/+/280995 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky Reviewed-by: Cuong Manh Le TryBot-Result: Go Bot --- src/cmd/compile/internal/dwarfgen/dwarf.go | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) diff --git a/src/cmd/compile/internal/dwarfgen/dwarf.go b/src/cmd/compile/internal/dwarfgen/dwarf.go index 42c83b1f239e5..6eac9d547e4e6 100644 --- a/src/cmd/compile/internal/dwarfgen/dwarf.go +++ b/src/cmd/compile/internal/dwarfgen/dwarf.go @@ -127,22 +127,8 @@ func Info(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, } func declPos(decl *ir.Name) src.XPos { - if decl.Defn != nil && (decl.Captured() || decl.Byval()) { - // It's not clear which position is correct for captured variables here: - // * decl.Pos is the wrong position for captured variables, in the inner - // function, but it is the right position in the outer function. - // * decl.Name.Defn is nil for captured variables that were arguments - // on the outer function, however the decl.Pos for those seems to be - // correct. - // * decl.Name.Defn is the "wrong" thing for variables declared in the - // header of a type switch, it's their position in the header, rather - // than the position of the case statement. In principle this is the - // right thing, but here we prefer the latter because it makes each - // instance of the header variable local to the lexical block of its - // case statement. - // This code is probably wrong for type switch variables that are also - // captured. - return decl.Defn.Pos() + if decl.IsClosureVar() { + decl = decl.Defn.(*ir.Name) } return decl.Pos() } From fad9a8b52864da738037163565e8eacc958baaa8 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Fri, 1 Jan 2021 02:39:00 -0800 Subject: [PATCH 325/474] [dev.regabi] cmd/compile: simplify inlining of closures Closures have their own ONAMEs for captured variables, which their function bodies refer to. So during inlining, we need to account for this and ensure the references still work. The previous inlining handled this by actually declaring the variables and then either copying the original value or creating a pointer to them, as appropriate for variables captured by value or by reference. But this is needlessly complicated. When inlining the function body, we need to rewrite all variable references anyway. We can just detect closure variables and change them to directly point to the enclosing function's version of this variable. No need for copying or further indirection. Does not pass toolstash -cmp. Presumably because we're able to generate better code in some circumstances. Change-Id: I8f0ccf7b098f39b8cd33f3bcefb875c8132d2c62 Reviewed-on: https://go-review.googlesource.com/c/go/+/280996 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky Reviewed-by: Cuong Manh Le TryBot-Result: Go Bot --- src/cmd/compile/internal/inline/inl.go | 55 +++++++++----------------- 1 file changed, 19 insertions(+), 36 deletions(-) diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index df797da2d1c8e..9e9d0bba7ce05 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -753,42 +753,6 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b // record formals/locals for later post-processing var inlfvars []ir.Node - // Handle captured variables when inlining closures. - if c := fn.OClosure; c != nil { - for _, v := range fn.ClosureVars { - if v.Op() == ir.OXXX { - continue - } - - o := v.Outer - // make sure the outer param matches the inlining location - // NB: if we enabled inlining of functions containing OCLOSURE or refined - // the reassigned check via some sort of copy propagation this would most - // likely need to be changed to a loop to walk up to the correct Param - if o == nil || o.Curfn != ir.CurFunc { - base.Fatalf("%v: unresolvable capture %v %v\n", ir.Line(n), fn, v) - } - - if v.Byval() { - iv := typecheck.Expr(inlvar(v)) - ninit.Append(ir.NewDecl(base.Pos, ir.ODCL, iv.(*ir.Name))) - ninit.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, iv, o))) - inlvars[v] = iv - } else { - addr := typecheck.NewName(typecheck.Lookup("&" + v.Sym().Name)) - addr.SetType(types.NewPtr(v.Type())) - ia := typecheck.Expr(inlvar(addr)) - ninit.Append(ir.NewDecl(base.Pos, ir.ODCL, ia.(*ir.Name))) - ninit.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, ia, typecheck.NodAddr(o)))) - inlvars[addr] = ia - - // When capturing by reference, all occurrence of the captured var - // must be substituted with dereference of the temporary address - inlvars[v] = typecheck.Expr(ir.NewStarExpr(base.Pos, ia)) - } - } - } - for _, ln := range fn.Inl.Dcl { if ln.Op() != ir.ONAME { continue @@ -1088,6 +1052,25 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { switch n.Op() { case ir.ONAME: n := n.(*ir.Name) + + // Handle captured variables when inlining closures. + if n.IsClosureVar() { + o := n.Outer + + // make sure the outer param matches the inlining location + // NB: if we enabled inlining of functions containing OCLOSURE or refined + // the reassigned check via some sort of copy propagation this would most + // likely need to be changed to a loop to walk up to the correct Param + if o == nil || o.Curfn != ir.CurFunc { + base.Fatalf("%v: unresolvable capture %v\n", ir.Line(n), n) + } + + if base.Flag.LowerM > 2 { + fmt.Printf("substituting captured name %+v -> %+v\n", n, o) + } + return o + } + if inlvar := subst.inlvars[n]; inlvar != nil { // These will be set during inlnode if base.Flag.LowerM > 2 { fmt.Printf("substituting name %+v -> %+v\n", n, inlvar) From 67ad695416fbcdf9d61e5bfc0f9cd9aac313caa4 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Fri, 1 Jan 2021 03:57:21 -0800 Subject: [PATCH 326/474] [dev.regabi] cmd/compile: split escape analysis state In a future CL, I plan to change escape analysis to walk function literal bodies at the point they appear within the AST, rather than separately as their own standalone function declaration. This means escape analysis's AST-walking code will become reentrant. To make this easier to get right, this CL splits escape analysis's state into two separate types: one that holds all of the state shared across the entire batch, and another that holds only the state that's used within initFunc and walkFunc. Incidentally, this CL reveals that a bunch of logopt code was using e.curfn outside of the AST-walking code paths where it's actually set, so it was always nil. That code is in need of refactoring anyway, so I'll come back and figure out the correct values to pass later when I address that. Passes toolstash -cmp. Change-Id: I1d13f47d06f7583401afa1b53fcc5ee2adaea6c8 Reviewed-on: https://go-review.googlesource.com/c/go/+/280997 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/escape/escape.go | 122 +++++++++++++--------- 1 file changed, 70 insertions(+), 52 deletions(-) diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go index 98dbf54b755bc..17770ffbbcaaa 100644 --- a/src/cmd/compile/internal/escape/escape.go +++ b/src/cmd/compile/internal/escape/escape.go @@ -85,20 +85,29 @@ import ( // u[2], etc. However, we do record the implicit dereference involved // in indexing a slice. -type escape struct { +// A batch holds escape analysis state that's shared across an entire +// batch of functions being analyzed at once. +type batch struct { allLocs []*location - labels map[*types.Sym]labelState // known labels - curfn *ir.Func + heapLoc location + blankLoc location +} + +// An escape holds state specific to a single function being analyzed +// within a batch. +type escape struct { + *batch + + curfn *ir.Func // function being analyzed + + labels map[*types.Sym]labelState // known labels // loopDepth counts the current loop nesting depth within // curfn. It increments within each "for" loop and at each // label with a corresponding backwards "goto" (i.e., // unstructured loop). loopDepth int - - heapLoc location - blankLoc location } // An location represents an abstract location that stores a Go @@ -167,11 +176,11 @@ func Fmt(n ir.Node) string { if n.Op() == ir.ONAME { n := n.(*ir.Name) - if e, ok := n.Opt.(*location); ok && e.loopDepth != 0 { + if loc, ok := n.Opt.(*location); ok && loc.loopDepth != 0 { if text != "" { text += " " } - text += fmt.Sprintf("ld(%d)", e.loopDepth) + text += fmt.Sprintf("ld(%d)", loc.loopDepth) } } @@ -187,23 +196,31 @@ func Batch(fns []*ir.Func, recursive bool) { } } - var e escape - e.heapLoc.escapes = true + var b batch + b.heapLoc.escapes = true // Construct data-flow graph from syntax trees. for _, fn := range fns { - e.initFunc(fn) + b.with(fn).initFunc() } for _, fn := range fns { - e.walkFunc(fn) + b.with(fn).walkFunc() } - e.curfn = nil - e.walkAll() - e.finish(fns) + b.walkAll() + b.finish(fns) +} + +func (b *batch) with(fn *ir.Func) *escape { + return &escape{ + batch: b, + curfn: fn, + loopDepth: 1, + } } -func (e *escape) initFunc(fn *ir.Func) { +func (e *escape) initFunc() { + fn := e.curfn if fn.Esc() != escFuncUnknown { base.Fatalf("unexpected node: %v", fn) } @@ -212,9 +229,6 @@ func (e *escape) initFunc(fn *ir.Func) { ir.Dump("escAnalyze", fn) } - e.curfn = fn - e.loopDepth = 1 - // Allocate locations for local variables. for _, dcl := range fn.Dcl { if dcl.Op() == ir.ONAME { @@ -223,7 +237,8 @@ func (e *escape) initFunc(fn *ir.Func) { } } -func (e *escape) walkFunc(fn *ir.Func) { +func (e *escape) walkFunc() { + fn := e.curfn fn.SetEsc(escFuncStarted) // Identify labels that mark the head of an unstructured loop. @@ -246,8 +261,6 @@ func (e *escape) walkFunc(fn *ir.Func) { } }) - e.curfn = fn - e.loopDepth = 1 e.block(fn.Body) if len(e.labels) != 0 { @@ -680,9 +693,9 @@ func (e *escape) exprSkipInit(k hole, n ir.Node) { case ir.OCLOSURE: n := n.(*ir.ClosureExpr) - k = e.spill(k, n) // Link addresses of captured variables to closure. + k = e.spill(k, n) for _, v := range n.Func.ClosureVars { k := k if !v.Byval() { @@ -1174,7 +1187,7 @@ func (e *escape) newLoc(n ir.Node, transient bool) *location { return loc } -func (e *escape) oldLoc(n *ir.Name) *location { +func (b *batch) oldLoc(n *ir.Name) *location { n = canonicalNode(n).(*ir.Name) return n.Opt.(*location) } @@ -1216,7 +1229,7 @@ func (e *escape) discardHole() hole { return e.blankLoc.asHole() } // walkAll computes the minimal dereferences between all pairs of // locations. -func (e *escape) walkAll() { +func (b *batch) walkAll() { // We use a work queue to keep track of locations that we need // to visit, and repeatedly walk until we reach a fixed point. // @@ -1226,7 +1239,7 @@ func (e *escape) walkAll() { // happen at most once. So we take Θ(len(e.allLocs)) walks. // LIFO queue, has enough room for e.allLocs and e.heapLoc. - todo := make([]*location, 0, len(e.allLocs)+1) + todo := make([]*location, 0, len(b.allLocs)+1) enqueue := func(loc *location) { if !loc.queued { todo = append(todo, loc) @@ -1234,10 +1247,10 @@ func (e *escape) walkAll() { } } - for _, loc := range e.allLocs { + for _, loc := range b.allLocs { enqueue(loc) } - enqueue(&e.heapLoc) + enqueue(&b.heapLoc) var walkgen uint32 for len(todo) > 0 { @@ -1246,13 +1259,13 @@ func (e *escape) walkAll() { root.queued = false walkgen++ - e.walkOne(root, walkgen, enqueue) + b.walkOne(root, walkgen, enqueue) } } // walkOne computes the minimal number of dereferences from root to // all other locations. -func (e *escape) walkOne(root *location, walkgen uint32, enqueue func(*location)) { +func (b *batch) walkOne(root *location, walkgen uint32, enqueue func(*location)) { // The data flow graph has negative edges (from addressing // operations), so we use the Bellman-Ford algorithm. However, // we don't have to worry about infinite negative cycles since @@ -1287,7 +1300,7 @@ func (e *escape) walkOne(root *location, walkgen uint32, enqueue func(*location) } } - if e.outlives(root, l) { + if b.outlives(root, l) { // l's value flows to root. If l is a function // parameter and root is the heap or a // corresponding result parameter, then record @@ -1296,12 +1309,13 @@ func (e *escape) walkOne(root *location, walkgen uint32, enqueue func(*location) if l.isName(ir.PPARAM) { if (logopt.Enabled() || base.Flag.LowerM >= 2) && !l.escapes { if base.Flag.LowerM >= 2 { - fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos()), l.n, e.explainLoc(root), derefs) + fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos()), l.n, b.explainLoc(root), derefs) } - explanation := e.explainPath(root, l) + explanation := b.explainPath(root, l) if logopt.Enabled() { - logopt.LogOpt(l.n.Pos(), "leak", "escape", ir.FuncName(e.curfn), - fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, e.explainLoc(root), derefs), explanation) + var e_curfn *ir.Func // TODO(mdempsky): Fix. + logopt.LogOpt(l.n.Pos(), "leak", "escape", ir.FuncName(e_curfn), + fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, b.explainLoc(root), derefs), explanation) } } l.leakTo(root, derefs) @@ -1315,9 +1329,10 @@ func (e *escape) walkOne(root *location, walkgen uint32, enqueue func(*location) if base.Flag.LowerM >= 2 { fmt.Printf("%s: %v escapes to heap:\n", base.FmtPos(l.n.Pos()), l.n) } - explanation := e.explainPath(root, l) + explanation := b.explainPath(root, l) if logopt.Enabled() { - logopt.LogOpt(l.n.Pos(), "escape", "escape", ir.FuncName(e.curfn), fmt.Sprintf("%v escapes to heap", l.n), explanation) + var e_curfn *ir.Func // TODO(mdempsky): Fix. + logopt.LogOpt(l.n.Pos(), "escape", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", l.n), explanation) } } l.escapes = true @@ -1343,7 +1358,7 @@ func (e *escape) walkOne(root *location, walkgen uint32, enqueue func(*location) } // explainPath prints an explanation of how src flows to the walk root. -func (e *escape) explainPath(root, src *location) []*logopt.LoggedOpt { +func (b *batch) explainPath(root, src *location) []*logopt.LoggedOpt { visited := make(map[*location]bool) pos := base.FmtPos(src.n.Pos()) var explanation []*logopt.LoggedOpt @@ -1362,7 +1377,7 @@ func (e *escape) explainPath(root, src *location) []*logopt.LoggedOpt { base.Fatalf("path inconsistency: %v != %v", edge.src, src) } - explanation = e.explainFlow(pos, dst, src, edge.derefs, edge.notes, explanation) + explanation = b.explainFlow(pos, dst, src, edge.derefs, edge.notes, explanation) if dst == root { break @@ -1373,14 +1388,14 @@ func (e *escape) explainPath(root, src *location) []*logopt.LoggedOpt { return explanation } -func (e *escape) explainFlow(pos string, dst, srcloc *location, derefs int, notes *note, explanation []*logopt.LoggedOpt) []*logopt.LoggedOpt { +func (b *batch) explainFlow(pos string, dst, srcloc *location, derefs int, notes *note, explanation []*logopt.LoggedOpt) []*logopt.LoggedOpt { ops := "&" if derefs >= 0 { ops = strings.Repeat("*", derefs) } print := base.Flag.LowerM >= 2 - flow := fmt.Sprintf(" flow: %s = %s%v:", e.explainLoc(dst), ops, e.explainLoc(srcloc)) + flow := fmt.Sprintf(" flow: %s = %s%v:", b.explainLoc(dst), ops, b.explainLoc(srcloc)) if print { fmt.Printf("%s:%s\n", pos, flow) } @@ -1391,7 +1406,8 @@ func (e *escape) explainFlow(pos string, dst, srcloc *location, derefs int, note } else if srcloc != nil && srcloc.n != nil { epos = srcloc.n.Pos() } - explanation = append(explanation, logopt.NewLoggedOpt(epos, "escflow", "escape", ir.FuncName(e.curfn), flow)) + var e_curfn *ir.Func // TODO(mdempsky): Fix. + explanation = append(explanation, logopt.NewLoggedOpt(epos, "escflow", "escape", ir.FuncName(e_curfn), flow)) } for note := notes; note != nil; note = note.next { @@ -1399,15 +1415,16 @@ func (e *escape) explainFlow(pos string, dst, srcloc *location, derefs int, note fmt.Printf("%s: from %v (%v) at %s\n", pos, note.where, note.why, base.FmtPos(note.where.Pos())) } if logopt.Enabled() { - explanation = append(explanation, logopt.NewLoggedOpt(note.where.Pos(), "escflow", "escape", ir.FuncName(e.curfn), + var e_curfn *ir.Func // TODO(mdempsky): Fix. + explanation = append(explanation, logopt.NewLoggedOpt(note.where.Pos(), "escflow", "escape", ir.FuncName(e_curfn), fmt.Sprintf(" from %v (%v)", note.where, note.why))) } } return explanation } -func (e *escape) explainLoc(l *location) string { - if l == &e.heapLoc { +func (b *batch) explainLoc(l *location) string { + if l == &b.heapLoc { return "{heap}" } if l.n == nil { @@ -1422,7 +1439,7 @@ func (e *escape) explainLoc(l *location) string { // outlives reports whether values stored in l may survive beyond // other's lifetime if stack allocated. -func (e *escape) outlives(l, other *location) bool { +func (b *batch) outlives(l, other *location) bool { // The heap outlives everything. if l.escapes { return true @@ -1503,7 +1520,7 @@ func (l *location) leakTo(sink *location, derefs int) { l.paramEsc.AddHeap(derefs) } -func (e *escape) finish(fns []*ir.Func) { +func (b *batch) finish(fns []*ir.Func) { // Record parameter tags for package export data. for _, fn := range fns { fn.SetEsc(escFuncTagged) @@ -1512,12 +1529,12 @@ func (e *escape) finish(fns []*ir.Func) { for _, fs := range &types.RecvsParams { for _, f := range fs(fn.Type()).Fields().Slice() { narg++ - f.Note = e.paramTag(fn, narg, f) + f.Note = b.paramTag(fn, narg, f) } } } - for _, loc := range e.allLocs { + for _, loc := range b.allLocs { n := loc.n if n == nil { continue @@ -1535,7 +1552,8 @@ func (e *escape) finish(fns []*ir.Func) { base.WarnfAt(n.Pos(), "%v escapes to heap", n) } if logopt.Enabled() { - logopt.LogOpt(n.Pos(), "escape", "escape", ir.FuncName(e.curfn)) + var e_curfn *ir.Func // TODO(mdempsky): Fix. + logopt.LogOpt(n.Pos(), "escape", "escape", ir.FuncName(e_curfn)) } } n.SetEsc(ir.EscHeap) @@ -2061,7 +2079,7 @@ const UnsafeUintptrNote = "unsafe-uintptr" // marked go:uintptrescapes. const UintptrEscapesNote = "uintptr-escapes" -func (e *escape) paramTag(fn *ir.Func, narg int, f *types.Field) string { +func (b *batch) paramTag(fn *ir.Func, narg int, f *types.Field) string { name := func() string { if f.Sym != nil { return f.Sym.Name @@ -2132,7 +2150,7 @@ func (e *escape) paramTag(fn *ir.Func, narg int, f *types.Field) string { } n := f.Nname.(*ir.Name) - loc := e.oldLoc(n) + loc := b.oldLoc(n) esc := loc.paramEsc esc.Optimize() From bfa97ba48fa2924d9c2da1dca01fdb65b44cdb5f Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Fri, 1 Jan 2021 04:51:22 -0800 Subject: [PATCH 327/474] [dev.regabi] test: add another closure test case When deciding whether a captured variable can be passed by value, the compiler is sensitive to the order that the OCLOSURE node is typechecked relative to the order that the variable is passed to "checkassign". Today, for an assignment like: q, g = 2, func() int { return q } we get this right because we always typecheck the full RHS expression list before calling checkassign on any LHS expression. But I nearly made a change that would interleave this ordering, causing us to call checkassign on q before typechecking the function literal. And alarmingly, there weren't any tests that caught this. So this commit adds one. Change-Id: I66cacd61066c7a229070861a7d973bcc434904cc Reviewed-on: https://go-review.googlesource.com/c/go/+/280998 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- test/closure2.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/test/closure2.go b/test/closure2.go index e4db05d8846ce..812d41f8ce02c 100644 --- a/test/closure2.go +++ b/test/closure2.go @@ -9,6 +9,8 @@ package main +var never bool + func main() { { type X struct { @@ -115,4 +117,16 @@ func main() { panic("g() != 2") } } + + { + var g func() int + q := 0 + q, g = 1, func() int { return q } + if never { + g = func() int { return 2 } + } + if g() != 1 { + panic("g() != 1") + } + } } From 7958a23ea326b48cb249840da5834188112889ea Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Fri, 1 Jan 2021 23:20:47 +0700 Subject: [PATCH 328/474] [dev.regabi] cmd/compile: use *ir.Name where possible in inl.go Passes toolstash -cmp. Change-Id: Ic99a5189ad0fca37bccb0e4b4d13793adc4f8fd8 Reviewed-on: https://go-review.googlesource.com/c/go/+/280715 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le Reviewed-by: Matthew Dempsky TryBot-Result: Go Bot --- src/cmd/compile/internal/inline/inl.go | 36 ++++++++++++++------------ 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index 9e9d0bba7ce05..a70c3ae3623cd 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -639,17 +639,19 @@ func inlCallee(fn ir.Node) *ir.Func { return nil } -func inlParam(t *types.Field, as ir.Node, inlvars map[*ir.Name]ir.Node) ir.Node { - n := ir.AsNode(t.Nname) - if n == nil || ir.IsBlank(n) { +func inlParam(t *types.Field, as ir.Node, inlvars map[*ir.Name]*ir.Name) ir.Node { + if t.Nname == nil { return ir.BlankNode } - - inlvar := inlvars[n.(*ir.Name)] + n := t.Nname.(*ir.Name) + if ir.IsBlank(n) { + return ir.BlankNode + } + inlvar := inlvars[n] if inlvar == nil { base.Fatalf("missing inlvar for %v", n) } - as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, inlvar.(*ir.Name))) + as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, inlvar)) inlvar.Name().Defn = as return inlvar } @@ -748,10 +750,10 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b } // Make temp names to use instead of the originals. - inlvars := make(map[*ir.Name]ir.Node) + inlvars := make(map[*ir.Name]*ir.Name) // record formals/locals for later post-processing - var inlfvars []ir.Node + var inlfvars []*ir.Name for _, ln := range fn.Inl.Dcl { if ln.Op() != ir.ONAME { @@ -767,7 +769,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b // nothing should have moved to the heap yet. base.Fatalf("impossible: %v", ln) } - inlf := typecheck.Expr(inlvar(ln)) + inlf := typecheck.Expr(inlvar(ln)).(*ir.Name) inlvars[ln] = inlf if base.Flag.GenDwarfInl > 0 { if ln.Class_ == ir.PPARAM { @@ -795,11 +797,11 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b // temporaries for return values. var retvars []ir.Node for i, t := range fn.Type().Results().Fields().Slice() { - var m ir.Node - if n := ir.AsNode(t.Nname); n != nil && !ir.IsBlank(n) && !strings.HasPrefix(n.Sym().Name, "~r") { - n := n.(*ir.Name) + var m *ir.Name + if nn := t.Nname; nn != nil && !ir.IsBlank(nn.(*ir.Name)) && !strings.HasPrefix(nn.Sym().Name, "~r") { + n := nn.(*ir.Name) m = inlvar(n) - m = typecheck.Expr(m) + m = typecheck.Expr(m).(*ir.Name) inlvars[n] = m delayretvars = false // found a named result parameter } else { @@ -966,7 +968,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b // Every time we expand a function we generate a new set of tmpnames, // PAUTO's in the calling functions, and link them off of the // PPARAM's, PAUTOS and PPARAMOUTs of the called function. -func inlvar(var_ ir.Node) ir.Node { +func inlvar(var_ *ir.Name) *ir.Name { if base.Flag.LowerM > 3 { fmt.Printf("inlvar %+v\n", var_) } @@ -976,14 +978,14 @@ func inlvar(var_ ir.Node) ir.Node { n.Class_ = ir.PAUTO n.SetUsed(true) n.Curfn = ir.CurFunc // the calling function, not the called one - n.SetAddrtaken(var_.Name().Addrtaken()) + n.SetAddrtaken(var_.Addrtaken()) ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n) return n } // Synthesize a variable to store the inlined function's results in. -func retvar(t *types.Field, i int) ir.Node { +func retvar(t *types.Field, i int) *ir.Name { n := typecheck.NewName(typecheck.LookupNum("~R", i)) n.SetType(t.Type) n.Class_ = ir.PAUTO @@ -1018,7 +1020,7 @@ type inlsubst struct { // "return" statement. delayretvars bool - inlvars map[*ir.Name]ir.Node + inlvars map[*ir.Name]*ir.Name // bases maps from original PosBase to PosBase with an extra // inlined call frame. From 1544a03198139656ef4ebc287f2287ad19c19a51 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Sat, 2 Jan 2021 00:39:14 +0700 Subject: [PATCH 329/474] [dev.regabi] cmd/compile: refactor redundant type conversion [generated] Passes toolstash -cmp. [git-generate] cd src/cmd/compile rf ' ex . '"$(printf '%s\n' ./internal/* | paste -sd' ')"' { type T interface{} var t T strict t t.(T) -> t } ' cd internal/ir go generate Change-Id: I492d50390e724a7216c3cd8b49d4aaf7d0c335da Reviewed-on: https://go-review.googlesource.com/c/go/+/280716 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le Reviewed-by: Matthew Dempsky TryBot-Result: Go Bot --- src/cmd/compile/internal/inline/inl.go | 2 +- src/cmd/compile/internal/typecheck/func.go | 2 +- src/cmd/compile/internal/typecheck/typecheck.go | 2 +- src/cmd/compile/internal/walk/closure.go | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index a70c3ae3623cd..31b97a378703e 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -866,7 +866,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b vas.Y = typecheck.NodNil() vas.Y.SetType(param.Type) } else { - lit := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(param.Type).(ir.Ntype), nil) + lit := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(param.Type), nil) lit.List.Set(varargs) vas.Y = lit } diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go index 2bc911882f74d..296755028dd9e 100644 --- a/src/cmd/compile/internal/typecheck/func.go +++ b/src/cmd/compile/internal/typecheck/func.go @@ -21,7 +21,7 @@ func MakeDotArgs(typ *types.Type, args []ir.Node) ir.Node { n = NodNil() n.SetType(typ) } else { - lit := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ).(ir.Ntype), nil) + lit := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ), nil) lit.List.Append(args...) lit.SetImplicit(true) n = lit diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index c8d82443a18c4..0822a4624ca0c 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -1686,7 +1686,7 @@ func stringtoruneslit(n *ir.ConvExpr) ir.Node { i++ } - nn := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(n.Type()).(ir.Ntype), nil) + nn := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(n.Type()), nil) nn.List.Set(l) return Expr(nn) } diff --git a/src/cmd/compile/internal/walk/closure.go b/src/cmd/compile/internal/walk/closure.go index d4eb4eb8a3d07..62d2a362b1ff1 100644 --- a/src/cmd/compile/internal/walk/closure.go +++ b/src/cmd/compile/internal/walk/closure.go @@ -129,7 +129,7 @@ func walkClosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node { typ := typecheck.ClosureType(clo) - clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ).(ir.Ntype), nil) + clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ), nil) clos.SetEsc(clo.Esc()) clos.List.Set(append([]ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, fn.Nname)}, closureArgs(clo)...)) @@ -194,7 +194,7 @@ func walkCallPart(n *ir.SelectorExpr, init *ir.Nodes) ir.Node { typ := typecheck.PartialCallType(n) - clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ).(ir.Ntype), nil) + clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ), nil) clos.SetEsc(n.Esc()) clos.List = []ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, typecheck.MethodValueWrapper(n).Nname), n.X} From 2f2d4b4e68ab2fc448a1c2daf793b11ccde2fb16 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 2 Jan 2021 01:04:19 -0800 Subject: [PATCH 330/474] [dev.regabi] cmd/compile: remove {Ptr,Set}Init from Node interface This CL separates out PtrInit and SetInit into a new InitNode extension interface, and adds a new TakeInit helper function for taking and clearing the Init list (if any) from a Node. This allows removing miniNode.SetInit and miniNode.PtrInit, which in turn allow getting rid of immutableEmptyNodes, and will allow simplification of the Nodes API. It would be nice to get rid of the default Init method too, but there's way more code that expects to be able to call that at the moment, so that'll have to wait. Passes toolstash -cmp. Change-Id: Ia8c18fab9555b774376f7f43eeecfde4f07b5946 Reviewed-on: https://go-review.googlesource.com/c/go/+/281001 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/deadcode/deadcode.go | 4 +- src/cmd/compile/internal/inline/inl.go | 4 +- src/cmd/compile/internal/ir/mini.go | 8 +-- src/cmd/compile/internal/ir/node.go | 52 ++++++++----------- src/cmd/compile/internal/noder/noder.go | 2 +- .../compile/internal/typecheck/typecheck.go | 2 +- src/cmd/compile/internal/walk/assign.go | 10 ++-- src/cmd/compile/internal/walk/builtin.go | 2 +- src/cmd/compile/internal/walk/expr.go | 6 +-- src/cmd/compile/internal/walk/order.go | 9 ++-- src/cmd/compile/internal/walk/range.go | 2 +- src/cmd/compile/internal/walk/select.go | 9 ++-- src/cmd/compile/internal/walk/stmt.go | 12 ++--- src/cmd/compile/internal/walk/walk.go | 3 +- 14 files changed, 52 insertions(+), 73 deletions(-) diff --git a/src/cmd/compile/internal/deadcode/deadcode.go b/src/cmd/compile/internal/deadcode/deadcode.go index 5453cfe396b2a..474532bc17676 100644 --- a/src/cmd/compile/internal/deadcode/deadcode.go +++ b/src/cmd/compile/internal/deadcode/deadcode.go @@ -84,7 +84,9 @@ func stmts(nn *ir.Nodes) { } } - stmts(n.PtrInit()) + if len(n.Init()) != 0 { + stmts(n.(ir.InitNode).PtrInit()) + } switch n.Op() { case ir.OBLOCK: n := n.(*ir.BlockStmt) diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index 31b97a378703e..24fbe3dac0313 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -639,7 +639,7 @@ func inlCallee(fn ir.Node) *ir.Func { return nil } -func inlParam(t *types.Field, as ir.Node, inlvars map[*ir.Name]*ir.Name) ir.Node { +func inlParam(t *types.Field, as ir.InitNode, inlvars map[*ir.Name]*ir.Name) ir.Node { if t.Nname == nil { return ir.BlankNode } @@ -741,7 +741,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b callee := n.X for callee.Op() == ir.OCONVNOP { conv := callee.(*ir.ConvExpr) - ninit.Append(conv.PtrInit().Take()...) + ninit.Append(ir.TakeInit(conv)...) callee = conv.X } if callee.Op() != ir.ONAME && callee.Op() != ir.OCLOSURE && callee.Op() != ir.OMETHEXPR { diff --git a/src/cmd/compile/internal/ir/mini.go b/src/cmd/compile/internal/ir/mini.go index 92701326216e4..93aa15abece25 100644 --- a/src/cmd/compile/internal/ir/mini.go +++ b/src/cmd/compile/internal/ir/mini.go @@ -80,13 +80,7 @@ func (n *miniNode) SetDiag(x bool) { n.bits.set(miniDiag, x) } // Empty, immutable graph structure. -func (n *miniNode) Init() Nodes { return Nodes{} } -func (n *miniNode) PtrInit() *Nodes { return &immutableEmptyNodes } -func (n *miniNode) SetInit(x Nodes) { - if x != nil { - panic(n.no("SetInit")) - } -} +func (n *miniNode) Init() Nodes { return Nodes{} } // Additional functionality unavailable. diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 9536503085bca..9945cc987ac76 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -34,8 +34,6 @@ type Node interface { // Abstract graph structure, for generic traversals. Op() Op Init() Nodes - PtrInit() *Nodes - SetInit(x Nodes) // Fields specific to certain Ops only. Type() *types.Type @@ -90,6 +88,20 @@ func MayBeShared(n Node) bool { return false } +type InitNode interface { + Node + PtrInit() *Nodes + SetInit(x Nodes) +} + +func TakeInit(n Node) Nodes { + init := n.Init() + if len(init) != 0 { + n.(InitNode).SetInit(nil) + } + return init +} + //go:generate stringer -type=Op -trimprefix=O node.go type Op uint8 @@ -311,35 +323,15 @@ const ( // a slice to save space. type Nodes []Node -// immutableEmptyNodes is an immutable, empty Nodes list. -// The methods that would modify it panic instead. -var immutableEmptyNodes = Nodes{} - -func (n *Nodes) mutate() { - if n == &immutableEmptyNodes { - panic("immutable Nodes.Set") - } -} - // Set sets n to a slice. // This takes ownership of the slice. -func (n *Nodes) Set(s []Node) { - if n == &immutableEmptyNodes { - if len(s) == 0 { - // Allow immutableEmptyNodes.Set(nil) (a no-op). - return - } - n.mutate() - } - *n = s -} +func (n *Nodes) Set(s []Node) { *n = s } // Append appends entries to Nodes. func (n *Nodes) Append(a ...Node) { if len(a) == 0 { return } - n.mutate() *n = append(*n, a...) } @@ -349,7 +341,6 @@ func (n *Nodes) Prepend(a ...Node) { if len(a) == 0 { return } - n.mutate() *n = append(a, *n...) } @@ -544,15 +535,16 @@ func SetPos(n Node) src.XPos { // The result of InitExpr MUST be assigned back to n, e.g. // n.Left = InitExpr(init, n.Left) -func InitExpr(init []Node, n Node) Node { +func InitExpr(init []Node, expr Node) Node { if len(init) == 0 { - return n + return expr } - if MayBeShared(n) { + + n, ok := expr.(InitNode) + if !ok || MayBeShared(n) { // Introduce OCONVNOP to hold init list. - old := n - n = NewConvExpr(base.Pos, OCONVNOP, nil, old) - n.SetType(old.Type()) + n = NewConvExpr(base.Pos, OCONVNOP, nil, expr) + n.SetType(expr.Type()) n.SetTypecheck(1) } diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go index cc8a1c7c89a4f..948833f46e91d 100644 --- a/src/cmd/compile/internal/noder/noder.go +++ b/src/cmd/compile/internal/noder/noder.go @@ -1200,7 +1200,7 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node { panic("unhandled Stmt") } -func (p *noder) assignList(expr syntax.Expr, defn ir.Node, colas bool) []ir.Node { +func (p *noder) assignList(expr syntax.Expr, defn ir.InitNode, colas bool) []ir.Node { if !colas { return p.exprList(expr) } diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index 0822a4624ca0c..0ee66df2cfc5b 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -914,7 +914,7 @@ func typecheck1(n ir.Node, top int) ir.Node { // Each must execute its own return n. } -func typecheckargs(n ir.Node) { +func typecheckargs(n ir.InitNode) { var list []ir.Node switch n := n.(type) { default: diff --git a/src/cmd/compile/internal/walk/assign.go b/src/cmd/compile/internal/walk/assign.go index c01079d236bc0..762baa0dd940d 100644 --- a/src/cmd/compile/internal/walk/assign.go +++ b/src/cmd/compile/internal/walk/assign.go @@ -17,7 +17,7 @@ import ( // walkAssign walks an OAS (AssignExpr) or OASOP (AssignOpExpr) node. func walkAssign(init *ir.Nodes, n ir.Node) ir.Node { - init.Append(n.PtrInit().Take()...) + init.Append(ir.TakeInit(n)...) var left, right ir.Node switch n.Op() { @@ -124,7 +124,7 @@ func walkAssignDotType(n *ir.AssignListStmt, init *ir.Nodes) ir.Node { // walkAssignFunc walks an OAS2FUNC node. func walkAssignFunc(init *ir.Nodes, n *ir.AssignListStmt) ir.Node { - init.Append(n.PtrInit().Take()...) + init.Append(ir.TakeInit(n)...) r := n.Rhs[0] walkExprListSafe(n.Lhs, init) @@ -142,7 +142,7 @@ func walkAssignFunc(init *ir.Nodes, n *ir.AssignListStmt) ir.Node { // walkAssignList walks an OAS2 node. func walkAssignList(init *ir.Nodes, n *ir.AssignListStmt) ir.Node { - init.Append(n.PtrInit().Take()...) + init.Append(ir.TakeInit(n)...) walkExprListSafe(n.Lhs, init) walkExprListSafe(n.Rhs, init) return ir.NewBlockStmt(src.NoXPos, ascompatee(ir.OAS, n.Lhs, n.Rhs, init)) @@ -150,7 +150,7 @@ func walkAssignList(init *ir.Nodes, n *ir.AssignListStmt) ir.Node { // walkAssignMapRead walks an OAS2MAPR node. func walkAssignMapRead(init *ir.Nodes, n *ir.AssignListStmt) ir.Node { - init.Append(n.PtrInit().Take()...) + init.Append(ir.TakeInit(n)...) r := n.Rhs[0].(*ir.IndexExpr) walkExprListSafe(n.Lhs, init) @@ -213,7 +213,7 @@ func walkAssignMapRead(init *ir.Nodes, n *ir.AssignListStmt) ir.Node { // walkAssignRecv walks an OAS2RECV node. func walkAssignRecv(init *ir.Nodes, n *ir.AssignListStmt) ir.Node { - init.Append(n.PtrInit().Take()...) + init.Append(ir.TakeInit(n)...) r := n.Rhs[0].(*ir.UnaryExpr) // recv walkExprListSafe(n.Lhs, init) diff --git a/src/cmd/compile/internal/walk/builtin.go b/src/cmd/compile/internal/walk/builtin.go index fe6045cbbd1d9..13837eeffc4ba 100644 --- a/src/cmd/compile/internal/walk/builtin.go +++ b/src/cmd/compile/internal/walk/builtin.go @@ -206,7 +206,7 @@ func walkCopy(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node { // walkDelete walks an ODELETE node. func walkDelete(init *ir.Nodes, n *ir.CallExpr) ir.Node { - init.Append(n.PtrInit().Take()...) + init.Append(ir.TakeInit(n)...) map_ := n.Args[0] key := n.Args[1] map_ = walkExpr(map_, init) diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index 1fd09b42af4ca..7dfac300946bd 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -26,7 +26,7 @@ func walkExpr(n ir.Node, init *ir.Nodes) ir.Node { return n } - if init == n.PtrInit() { + if n, ok := n.(ir.InitNode); ok && init == n.PtrInit() { // not okay to use n->ninit when walking n, // because we might replace n with some other node // and would lose the init list. @@ -35,7 +35,7 @@ func walkExpr(n ir.Node, init *ir.Nodes) ir.Node { if len(n.Init()) != 0 { walkStmtList(n.Init()) - init.Append(n.PtrInit().Take()...) + init.Append(ir.TakeInit(n)...) } lno := ir.SetPos(n) @@ -359,7 +359,7 @@ func safeExpr(n ir.Node, init *ir.Nodes) ir.Node { if len(n.Init()) != 0 { walkStmtList(n.Init()) - init.Append(n.PtrInit().Take()...) + init.Append(ir.TakeInit(n)...) } switch n.Op() { diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go index e40c877ea939c..679b795270e85 100644 --- a/src/cmd/compile/internal/walk/order.go +++ b/src/cmd/compile/internal/walk/order.go @@ -466,8 +466,7 @@ func (o *orderState) init(n ir.Node) { } return } - o.stmtList(n.Init()) - n.PtrInit().Set(nil) + o.stmtList(ir.TakeInit(n)) } // call orders the call expression n. @@ -938,8 +937,7 @@ func (o *orderState) stmt(n ir.Node) { if !ir.IsAutoTmp(recv.X) { recv.X = o.copyExpr(recv.X) } - init := *r.PtrInit() - r.PtrInit().Set(nil) + init := ir.TakeInit(r) colas := r.Def do := func(i int, t *types.Type) { @@ -1000,8 +998,7 @@ func (o *orderState) stmt(n ir.Node) { // TODO(mdempsky): Is this actually necessary? // walkselect appears to walk Ninit. - cas.Body.Prepend(cas.Init()...) - cas.PtrInit().Set(nil) + cas.Body.Prepend(ir.TakeInit(cas)...) } o.out = append(o.out, n) diff --git a/src/cmd/compile/internal/walk/range.go b/src/cmd/compile/internal/walk/range.go index 49a69e97513f2..3092b71d7294d 100644 --- a/src/cmd/compile/internal/walk/range.go +++ b/src/cmd/compile/internal/walk/range.go @@ -210,7 +210,7 @@ func walkRange(nrange *ir.RangeStmt) ir.Node { a.SetTypecheck(1) a.Lhs = []ir.Node{hv1, hb} a.Rhs = []ir.Node{ir.NewUnaryExpr(base.Pos, ir.ORECV, ha)} - *nfor.Cond.PtrInit() = []ir.Node{a} + nfor.Cond = ir.InitExpr([]ir.Node{a}, nfor.Cond) if v1 == nil { body = nil } else { diff --git a/src/cmd/compile/internal/walk/select.go b/src/cmd/compile/internal/walk/select.go index 1c5e1d7e64ac7..c6e9b71384d50 100644 --- a/src/cmd/compile/internal/walk/select.go +++ b/src/cmd/compile/internal/walk/select.go @@ -17,8 +17,7 @@ func walkSelect(sel *ir.SelectStmt) { base.Fatalf("double walkselect") } - init := sel.Init() - sel.PtrInit().Set(nil) + init := ir.TakeInit(sel) init = append(init, walkSelectCases(sel.Cases)...) sel.Cases = nil @@ -45,8 +44,7 @@ func walkSelectCases(cases []*ir.CommClause) []ir.Node { l := cas.Init() if cas.Comm != nil { // not default: n := cas.Comm - l = append(l, n.Init()...) - n.PtrInit().Set(nil) + l = append(l, ir.TakeInit(n)...) switch n.Op() { default: base.Fatalf("select %v", n.Op()) @@ -171,8 +169,7 @@ func walkSelectCases(cases []*ir.CommClause) []ir.Node { for _, cas := range cases { ir.SetPos(cas) - init = append(init, cas.Init()...) - cas.PtrInit().Set(nil) + init = append(init, ir.TakeInit(cas)...) n := cas.Comm if n == nil { // default: diff --git a/src/cmd/compile/internal/walk/stmt.go b/src/cmd/compile/internal/walk/stmt.go index 8641a58e2e797..3440c6650642e 100644 --- a/src/cmd/compile/internal/walk/stmt.go +++ b/src/cmd/compile/internal/walk/stmt.go @@ -55,8 +55,7 @@ func walkStmt(n ir.Node) ir.Node { if n.Typecheck() == 0 { base.Fatalf("missing typecheck: %+v", n) } - init := n.Init() - n.PtrInit().Set(nil) + init := ir.TakeInit(n) n = walkExpr(n, &init) if n.Op() == ir.ONAME { // copy rewrote to a statement list and a temp for the length. @@ -67,7 +66,7 @@ func walkStmt(n ir.Node) ir.Node { if len(init) > 0 { switch n.Op() { case ir.OAS, ir.OAS2, ir.OBLOCK: - n.PtrInit().Prepend(init...) + n.(ir.InitNode).PtrInit().Prepend(init...) default: init.Append(n) @@ -191,9 +190,8 @@ func walkDecl(n *ir.Decl) ir.Node { // walkFor walks an OFOR or OFORUNTIL node. func walkFor(n *ir.ForStmt) ir.Node { if n.Cond != nil { - walkStmtList(n.Cond.Init()) - init := n.Cond.Init() - n.Cond.PtrInit().Set(nil) + init := ir.TakeInit(n.Cond) + walkStmtList(init) n.Cond = walkExpr(n.Cond, &init) n.Cond = ir.InitExpr(init, n.Cond) } @@ -257,7 +255,7 @@ func walkIf(n *ir.IfStmt) ir.Node { func wrapCall(n *ir.CallExpr, init *ir.Nodes) ir.Node { if len(n.Init()) != 0 { walkStmtList(n.Init()) - init.Append(n.PtrInit().Take()...) + init.Append(ir.TakeInit(n)...) } isBuiltinCall := n.Op() != ir.OCALLFUNC && n.Op() != ir.OCALLMETH && n.Op() != ir.OCALLINTER diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go index 25f53a8e7c50c..57c2d43753b5e 100644 --- a/src/cmd/compile/internal/walk/walk.go +++ b/src/cmd/compile/internal/walk/walk.go @@ -81,8 +81,7 @@ func walkRecv(n *ir.UnaryExpr) ir.Node { if n.Typecheck() == 0 { base.Fatalf("missing typecheck: %+v", n) } - init := n.Init() - n.PtrInit().Set(nil) + init := ir.TakeInit(n) n.X = walkExpr(n.X, &init) call := walkExpr(mkcall1(chanfn("chanrecv1", 2, n.X.Type()), nil, &init, n.X, typecheck.NodNil()), &init) From f2538033c08a8c215a19610680d66f5909c5bcdd Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 2 Jan 2021 01:27:29 -0800 Subject: [PATCH 331/474] [dev.regabi] cmd/compile: remove Nodes.Set [generated] Just "=". It's cleaner. Passes toolstash -cmp. [git-generate] cd src/cmd/compile/internal/ir pkgs=$(go list . ../...) rf ' ex '"$(echo $pkgs)"' { var l Nodes var p *Nodes p.Set(l) -> *p = l } ex '"$(echo $pkgs)"' { var n InitNode var l Nodes *n.PtrInit() = l -> n.SetInit(l) } rm Nodes.Set ' Change-Id: Ic97219792243667146a02776553942ae1189ff7d Reviewed-on: https://go-review.googlesource.com/c/go/+/281002 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/deadcode/deadcode.go | 4 ++-- src/cmd/compile/internal/inline/inl.go | 16 ++++++++-------- src/cmd/compile/internal/ir/expr.go | 10 +++++----- src/cmd/compile/internal/ir/node.go | 4 ---- src/cmd/compile/internal/ir/stmt.go | 16 ++++++++-------- src/cmd/compile/internal/noder/noder.go | 14 +++++++------- src/cmd/compile/internal/pkginit/init.go | 2 +- src/cmd/compile/internal/reflectdata/reflect.go | 2 +- src/cmd/compile/internal/ssagen/abi.go | 2 +- src/cmd/compile/internal/typecheck/const.go | 4 ++-- src/cmd/compile/internal/typecheck/func.go | 8 ++++---- src/cmd/compile/internal/typecheck/iimport.go | 16 ++++++++-------- src/cmd/compile/internal/typecheck/typecheck.go | 8 ++++---- src/cmd/compile/internal/walk/assign.go | 4 ++-- src/cmd/compile/internal/walk/builtin.go | 6 +++--- src/cmd/compile/internal/walk/closure.go | 4 ++-- src/cmd/compile/internal/walk/expr.go | 6 +++--- src/cmd/compile/internal/walk/order.go | 12 ++++++------ src/cmd/compile/internal/walk/range.go | 2 +- src/cmd/compile/internal/walk/select.go | 8 ++++---- src/cmd/compile/internal/walk/stmt.go | 4 ++-- 21 files changed, 74 insertions(+), 78 deletions(-) diff --git a/src/cmd/compile/internal/deadcode/deadcode.go b/src/cmd/compile/internal/deadcode/deadcode.go index 474532bc17676..c409320fc4894 100644 --- a/src/cmd/compile/internal/deadcode/deadcode.go +++ b/src/cmd/compile/internal/deadcode/deadcode.go @@ -38,7 +38,7 @@ func Func(fn *ir.Func) { } } - fn.Body.Set([]ir.Node{ir.NewBlockStmt(base.Pos, nil)}) + fn.Body = []ir.Node{ir.NewBlockStmt(base.Pos, nil)} } func stmts(nn *ir.Nodes) { @@ -114,7 +114,7 @@ func stmts(nn *ir.Nodes) { } if cut { - nn.Set((*nn)[:i+1]) + *nn = (*nn)[:i+1] break } } diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index 24fbe3dac0313..2887abb0614d4 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -544,7 +544,7 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No if as := n; as.Op() == ir.OAS2FUNC { as := as.(*ir.AssignListStmt) if as.Rhs[0].Op() == ir.OINLCALL { - as.Rhs.Set(inlconv2list(as.Rhs[0].(*ir.InlinedCallExpr))) + as.Rhs = inlconv2list(as.Rhs[0].(*ir.InlinedCallExpr)) as.SetOp(ir.OAS2) as.SetTypecheck(0) n = typecheck.Stmt(as) @@ -867,7 +867,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b vas.Y.SetType(param.Type) } else { lit := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(param.Type), nil) - lit.List.Set(varargs) + lit.List = varargs vas.Y = lit } } @@ -944,9 +944,9 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b //dumplist("ninit post", ninit); call := ir.NewInlinedCallExpr(base.Pos, nil, nil) - call.PtrInit().Set(ninit) - call.Body.Set(body) - call.ReturnVars.Set(retvars) + *call.PtrInit() = ninit + call.Body = body + call.ReturnVars = retvars call.SetType(n.Type()) call.SetTypecheck(1) @@ -1120,7 +1120,7 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { for _, n := range subst.retvars { as.Lhs.Append(n) } - as.Rhs.Set(subst.list(n.Results)) + as.Rhs = subst.list(n.Results) if subst.delayretvars { for _, n := range as.Lhs { @@ -1139,7 +1139,7 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { n := n.(*ir.BranchStmt) m := ir.Copy(n).(*ir.BranchStmt) m.SetPos(subst.updatedPos(m.Pos())) - m.PtrInit().Set(nil) + *m.PtrInit() = nil p := fmt.Sprintf("%s·%d", n.Label.Name, inlgen) m.Label = typecheck.Lookup(p) return m @@ -1148,7 +1148,7 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { n := n.(*ir.LabelStmt) m := ir.Copy(n).(*ir.LabelStmt) m.SetPos(subst.updatedPos(m.Pos())) - m.PtrInit().Set(nil) + *m.PtrInit() = nil p := fmt.Sprintf("%s·%d", n.Label.Name, inlgen) m.Label = typecheck.Lookup(p) return m diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 88fbdff1e0feb..1b88427146b2b 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -67,7 +67,7 @@ func NewAddStringExpr(pos src.XPos, list []Node) *AddStringExpr { n := &AddStringExpr{} n.pos = pos n.op = OADDSTR - n.List.Set(list) + n.List = list return n } @@ -173,7 +173,7 @@ func NewCallExpr(pos src.XPos, op Op, fun Node, args []Node) *CallExpr { n.pos = pos n.orig = n n.SetOp(op) - n.Args.Set(args) + n.Args = args return n } @@ -231,7 +231,7 @@ func NewCompLitExpr(pos src.XPos, op Op, typ Ntype, list []Node) *CompLitExpr { n := &CompLitExpr{Ntype: typ} n.pos = pos n.SetOp(op) - n.List.Set(list) + n.List = list n.orig = n return n } @@ -364,8 +364,8 @@ func NewInlinedCallExpr(pos src.XPos, body, retvars []Node) *InlinedCallExpr { n := &InlinedCallExpr{} n.pos = pos n.op = OINLCALL - n.Body.Set(body) - n.ReturnVars.Set(retvars) + n.Body = body + n.ReturnVars = retvars return n } diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 9945cc987ac76..9d1ee17aa8a84 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -323,10 +323,6 @@ const ( // a slice to save space. type Nodes []Node -// Set sets n to a slice. -// This takes ownership of the slice. -func (n *Nodes) Set(s []Node) { *n = s } - // Append appends entries to Nodes. func (n *Nodes) Append(a ...Node) { if len(a) == 0 { diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index 9c2cba9a082bd..b13c6b7795ec2 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -70,8 +70,8 @@ func NewAssignListStmt(pos src.XPos, op Op, lhs, rhs []Node) *AssignListStmt { n := &AssignListStmt{} n.pos = pos n.SetOp(op) - n.Lhs.Set(lhs) - n.Rhs.Set(rhs) + n.Lhs = lhs + n.Rhs = rhs return n } @@ -141,7 +141,7 @@ func NewBlockStmt(pos src.XPos, list []Node) *BlockStmt { } } n.op = OBLOCK - n.List.Set(list) + n.List = list return n } @@ -216,7 +216,7 @@ func NewForStmt(pos src.XPos, init Node, cond, post Node, body []Node) *ForStmt if init != nil { n.init = []Node{init} } - n.Body.Set(body) + n.Body = body return n } @@ -262,8 +262,8 @@ func NewIfStmt(pos src.XPos, cond Node, body, els []Node) *IfStmt { n := &IfStmt{Cond: cond} n.pos = pos n.op = OIF - n.Body.Set(body) - n.Else.Set(els) + n.Body = body + n.Else = els return n } @@ -315,7 +315,7 @@ func NewRangeStmt(pos src.XPos, key, value, x Node, body []Node) *RangeStmt { n := &RangeStmt{X: x, Key: key, Value: value} n.pos = pos n.op = ORANGE - n.Body.Set(body) + n.Body = body return n } @@ -331,7 +331,7 @@ func NewReturnStmt(pos src.XPos, results []Node) *ReturnStmt { n.pos = pos n.op = ORETURN n.orig = n - n.Results.Set(results) + n.Results = results return n } diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go index 948833f46e91d..678e378291524 100644 --- a/src/cmd/compile/internal/noder/noder.go +++ b/src/cmd/compile/internal/noder/noder.go @@ -245,7 +245,7 @@ func (p *noder) funcBody(fn *ir.Func, block *syntax.BlockStmt) { if body == nil { body = []ir.Node{ir.NewBlockStmt(base.Pos, nil)} } - fn.Body.Set(body) + fn.Body = body base.Pos = p.makeXPos(block.Rbrace) fn.Endlineno = base.Pos @@ -772,7 +772,7 @@ func (p *noder) expr(expr syntax.Expr) ir.Node { for i, e := range l { l[i] = p.wrapname(expr.ElemList[i], e) } - n.List.Set(l) + n.List = l base.Pos = p.makeXPos(expr.Rbrace) return n case *syntax.KeyValueExpr: @@ -1128,8 +1128,8 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node { if list, ok := stmt.Lhs.(*syntax.ListExpr); ok && len(list.ElemList) != 1 || len(rhs) != 1 { n := ir.NewAssignListStmt(p.pos(stmt), ir.OAS2, nil, nil) n.Def = stmt.Op == syntax.Def - n.Lhs.Set(p.assignList(stmt.Lhs, n, n.Def)) - n.Rhs.Set(rhs) + n.Lhs = p.assignList(stmt.Lhs, n, n.Def) + n.Rhs = rhs return n } @@ -1276,7 +1276,7 @@ func (p *noder) ifStmt(stmt *syntax.IfStmt) ir.Node { e := p.stmt(stmt.Else) if e.Op() == ir.OBLOCK { e := e.(*ir.BlockStmt) - n.Else.Set(e.List) + n.Else = e.List } else { n.Else = []ir.Node{e} } @@ -1301,7 +1301,7 @@ func (p *noder) forStmt(stmt *syntax.ForStmt) ir.Node { n.Value = lhs[1] } } - n.Body.Set(p.blockStmt(stmt.Body)) + n.Body = p.blockStmt(stmt.Body) p.closeAnotherScope() return n } @@ -1359,7 +1359,7 @@ func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.TypeSwitch body = body[:len(body)-1] } - n.Body.Set(p.stmtsFall(body, true)) + n.Body = p.stmtsFall(body, true) if l := len(n.Body); l > 0 && n.Body[l-1].Op() == ir.OFALL { if tswitch != nil { base.Errorf("cannot fallthrough in type switch") diff --git a/src/cmd/compile/internal/pkginit/init.go b/src/cmd/compile/internal/pkginit/init.go index 24fe1a76280d8..a32e09879c7f1 100644 --- a/src/cmd/compile/internal/pkginit/init.go +++ b/src/cmd/compile/internal/pkginit/init.go @@ -49,7 +49,7 @@ func Task() *ir.Name { fn.Dcl = append(fn.Dcl, typecheck.InitTodoFunc.Dcl...) typecheck.InitTodoFunc.Dcl = nil - fn.Body.Set(nf) + fn.Body = nf typecheck.FinishFuncBody() typecheck.Func(fn) diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go index 5f88262ddf08d..f926765326904 100644 --- a/src/cmd/compile/internal/reflectdata/reflect.go +++ b/src/cmd/compile/internal/reflectdata/reflect.go @@ -1798,7 +1798,7 @@ func methodWrapper(rcvr *types.Type, method *types.Field) *obj.LSym { } else { fn.SetWrapper(true) // ignore frame for panic+recover matching call := ir.NewCallExpr(base.Pos, ir.OCALL, dot, nil) - call.Args.Set(ir.ParamNames(tfn.Type())) + call.Args = ir.ParamNames(tfn.Type()) call.IsDDD = tfn.Type().IsVariadic() if method.Type.NumResults() > 0 { ret := ir.NewReturnStmt(base.Pos, nil) diff --git a/src/cmd/compile/internal/ssagen/abi.go b/src/cmd/compile/internal/ssagen/abi.go index cd5d962b918ce..1c013dd2d8757 100644 --- a/src/cmd/compile/internal/ssagen/abi.go +++ b/src/cmd/compile/internal/ssagen/abi.go @@ -303,7 +303,7 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { tail = ir.NewBranchStmt(base.Pos, ir.ORETJMP, f.Nname.Sym()) } else { call := ir.NewCallExpr(base.Pos, ir.OCALL, f.Nname, nil) - call.Args.Set(ir.ParamNames(tfn.Type())) + call.Args = ir.ParamNames(tfn.Type()) call.IsDDD = tfn.Type().IsVariadic() tail = call if tfn.Type().NumResults() > 0 { diff --git a/src/cmd/compile/internal/typecheck/const.go b/src/cmd/compile/internal/typecheck/const.go index 5259218ef91cb..d6bf1019748de 100644 --- a/src/cmd/compile/internal/typecheck/const.go +++ b/src/cmd/compile/internal/typecheck/const.go @@ -509,7 +509,7 @@ func EvalConst(n ir.Node) ir.Node { } nl := ir.Copy(n).(*ir.AddStringExpr) - nl.List.Set(s[i:i2]) + nl.List = s[i:i2] newList = append(newList, OrigConst(nl, constant.MakeString(strings.Join(strs, "")))) i = i2 - 1 } else { @@ -518,7 +518,7 @@ func EvalConst(n ir.Node) ir.Node { } nn := ir.Copy(n).(*ir.AddStringExpr) - nn.List.Set(newList) + nn.List = newList return nn case ir.OCAP, ir.OLEN: diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go index 296755028dd9e..859239700446d 100644 --- a/src/cmd/compile/internal/typecheck/func.go +++ b/src/cmd/compile/internal/typecheck/func.go @@ -52,7 +52,7 @@ func FixVariadicCall(call *ir.CallExpr) { extra[i] = nil // allow GC } - call.Args.Set(append(args[:vi], slice)) + call.Args = append(args[:vi], slice) call.IsDDD = true } @@ -313,7 +313,7 @@ func MethodValueWrapper(dot *ir.SelectorExpr) *ir.Func { } call := ir.NewCallExpr(base.Pos, ir.OCALL, ir.NewSelectorExpr(base.Pos, ir.OXDOT, ptr, meth), nil) - call.Args.Set(ir.ParamNames(tfn.Type())) + call.Args = ir.ParamNames(tfn.Type()) call.IsDDD = tfn.Type().IsVariadic() if t0.NumResults() != 0 { ret := ir.NewReturnStmt(base.Pos, nil) @@ -323,7 +323,7 @@ func MethodValueWrapper(dot *ir.SelectorExpr) *ir.Func { body = append(body, call) } - fn.Body.Set(body) + fn.Body = body FinishFuncBody() Func(fn) @@ -798,7 +798,7 @@ func tcMake(n *ir.CallExpr) ir.Node { return n } - n.Args.Set(nil) + n.Args = nil l := args[0] l = typecheck(l, ctxType) t := l.Type() diff --git a/src/cmd/compile/internal/typecheck/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go index 00ecd9b819029..0caac362e3a18 100644 --- a/src/cmd/compile/internal/typecheck/iimport.go +++ b/src/cmd/compile/internal/typecheck/iimport.go @@ -779,7 +779,7 @@ func (r *importReader) caseList(switchExpr ir.Node) []*ir.CaseClause { cases := make([]*ir.CaseClause, r.uint64()) for i := range cases { cas := ir.NewCaseStmt(r.pos(), nil, nil) - cas.List.Set(r.stmtList()) + cas.List = r.stmtList() if namedTypeSwitch { // Note: per-case variables will have distinct, dotted // names after import. That's okay: swt.go only needs @@ -789,7 +789,7 @@ func (r *importReader) caseList(switchExpr ir.Node) []*ir.CaseClause { cas.Var = caseVar caseVar.Defn = switchExpr } - cas.Body.Set(r.stmtList()) + cas.Body = r.stmtList() cases[i] = cas } return cases @@ -932,7 +932,7 @@ func (r *importReader) node() ir.Node { case ir.OCOPY, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCAP, ir.OCLOSE, ir.ODELETE, ir.OLEN, ir.OMAKE, ir.ONEW, ir.OPANIC, ir.ORECOVER, ir.OPRINT, ir.OPRINTN: n := builtinCall(r.pos(), op) - n.Args.Set(r.exprList()) + n.Args = r.exprList() if op == ir.OAPPEND { n.IsDDD = r.bool() } @@ -945,7 +945,7 @@ func (r *importReader) node() ir.Node { pos := r.pos() init := r.stmtList() n := ir.NewCallExpr(pos, ir.OCALL, r.expr(), r.exprList()) - n.PtrInit().Set(init) + *n.PtrInit() = init n.IsDDD = r.bool() return n @@ -1033,14 +1033,14 @@ func (r *importReader) node() ir.Node { case ir.OIF: pos, init := r.pos(), r.stmtList() n := ir.NewIfStmt(pos, r.expr(), r.stmtList(), r.stmtList()) - n.PtrInit().Set(init) + *n.PtrInit() = init return n case ir.OFOR: pos, init := r.pos(), r.stmtList() cond, post := r.exprsOrNil() n := ir.NewForStmt(pos, nil, cond, post, r.stmtList()) - n.PtrInit().Set(init) + *n.PtrInit() = init return n case ir.ORANGE: @@ -1052,7 +1052,7 @@ func (r *importReader) node() ir.Node { pos := r.pos() init := r.stmtList() n := ir.NewSelectStmt(pos, r.commList()) - n.PtrInit().Set(init) + *n.PtrInit() = init return n case ir.OSWITCH: @@ -1060,7 +1060,7 @@ func (r *importReader) node() ir.Node { init := r.stmtList() x, _ := r.exprsOrNil() n := ir.NewSwitchStmt(pos, x, r.caseList(x)) - n.PtrInit().Set(init) + *n.PtrInit() = init return n // case OCASE: diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index 0ee66df2cfc5b..d0922e8508b02 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -64,7 +64,7 @@ func FuncBody(n *ir.Func) { CheckUnused(n) CheckReturn(n) if base.Errors() > errorsBefore { - n.Body.Set(nil) // type errors; do not compile + n.Body = nil // type errors; do not compile } } @@ -971,9 +971,9 @@ func typecheckargs(n ir.InitNode) { switch n := n.(type) { case *ir.CallExpr: - n.Args.Set(list) + n.Args = list case *ir.ReturnStmt: - n.Results.Set(list) + n.Results = list } n.PtrInit().Append(Stmt(as)) @@ -1687,7 +1687,7 @@ func stringtoruneslit(n *ir.ConvExpr) ir.Node { } nn := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(n.Type()), nil) - nn.List.Set(l) + nn.List = l return Expr(nn) } diff --git a/src/cmd/compile/internal/walk/assign.go b/src/cmd/compile/internal/walk/assign.go index 762baa0dd940d..7f3e4cc995c0f 100644 --- a/src/cmd/compile/internal/walk/assign.go +++ b/src/cmd/compile/internal/walk/assign.go @@ -264,7 +264,7 @@ func walkReturn(n *ir.ReturnStmt) ir.Node { // move function calls out, to make ascompatee's job easier. walkExprListSafe(n.Results, n.PtrInit()) - n.Results.Set(ascompatee(n.Op(), rl, n.Results, n.PtrInit())) + n.Results = ascompatee(n.Op(), rl, n.Results, n.PtrInit()) return n } walkExprList(n.Results, n.PtrInit()) @@ -281,7 +281,7 @@ func walkReturn(n *ir.ReturnStmt) ir.Node { a := ir.NewAssignStmt(base.Pos, nname, rhs[i]) res[i] = convas(a, n.PtrInit()) } - n.Results.Set(res) + n.Results = res return n } diff --git a/src/cmd/compile/internal/walk/builtin.go b/src/cmd/compile/internal/walk/builtin.go index 13837eeffc4ba..a061181e2fb77 100644 --- a/src/cmd/compile/internal/walk/builtin.go +++ b/src/cmd/compile/internal/walk/builtin.go @@ -531,7 +531,7 @@ func walkPrint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { t = append(t, n) } t = append(t, ir.NewString("\n")) - nn.Args.Set(t) + nn.Args = t } // Collapse runs of constant strings. @@ -551,7 +551,7 @@ func walkPrint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { i++ } } - nn.Args.Set(t) + nn.Args = t calls := []ir.Node{mkcall("printlock", nil, init)} for i, n := range nn.Args { @@ -653,7 +653,7 @@ func walkPrint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { walkExprList(calls, init) r := ir.NewBlockStmt(base.Pos, nil) - r.List.Set(calls) + r.List = calls return walkStmt(typecheck.Stmt(r)) } diff --git a/src/cmd/compile/internal/walk/closure.go b/src/cmd/compile/internal/walk/closure.go index 62d2a362b1ff1..fcdb43f113146 100644 --- a/src/cmd/compile/internal/walk/closure.go +++ b/src/cmd/compile/internal/walk/closure.go @@ -107,7 +107,7 @@ func Closure(fn *ir.Func) { if len(body) > 0 { typecheck.Stmts(body) - fn.Enter.Set(body) + fn.Enter = body fn.SetNeedctxt(true) } } @@ -131,7 +131,7 @@ func walkClosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node { clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ), nil) clos.SetEsc(clo.Esc()) - clos.List.Set(append([]ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, fn.Nname)}, closureArgs(clo)...)) + clos.List = append([]ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, fn.Nname)}, closureArgs(clo)...) addr := typecheck.NodAddr(clos) addr.SetEsc(clo.Esc()) diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index 7dfac300946bd..8a56526a362ae 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -477,7 +477,7 @@ func walkAddString(n *ir.AddStringExpr, init *ir.Nodes) ir.Node { cat := typecheck.LookupRuntime(fn) r := ir.NewCallExpr(base.Pos, ir.OCALL, cat, nil) - r.Args.Set(args) + r.Args = args r1 := typecheck.Expr(r) r1 = walkExpr(r1, init) r1.SetType(n.Type()) @@ -562,8 +562,8 @@ func walkCall1(n *ir.CallExpr, init *ir.Nodes) { } } - n.Args.Set(tempAssigns) - n.Rargs.Set(args) + n.Args = tempAssigns + n.Rargs = args } // walkDivMod walks an ODIV or OMOD node. diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go index 679b795270e85..767af07414f1d 100644 --- a/src/cmd/compile/internal/walk/order.go +++ b/src/cmd/compile/internal/walk/order.go @@ -423,7 +423,7 @@ func orderBlock(n *ir.Nodes, free map[string][]*ir.Name) { order.edge() order.stmtList(*n) order.cleanTemp(mark) - n.Set(order.out) + *n = order.out } // exprInPlace orders the side effects in *np and @@ -1233,9 +1233,9 @@ func (o *orderState) expr1(n, lhs ir.Node) ir.Node { // If left-hand side doesn't cause a short-circuit, issue right-hand side. nif := ir.NewIfStmt(base.Pos, r, nil, nil) if n.Op() == ir.OANDAND { - nif.Body.Set(gen) + nif.Body = gen } else { - nif.Else.Set(gen) + nif.Else = gen } o.out = append(o.out, nif) return r @@ -1401,7 +1401,7 @@ func (o *orderState) expr1(n, lhs ir.Node) ir.Node { statics = append(statics, r) } - n.List.Set(statics) + n.List = statics if len(dynamics) == 0 { return n @@ -1448,8 +1448,8 @@ func (o *orderState) as2(n *ir.AssignListStmt) { o.out = append(o.out, n) as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) - as.Lhs.Set(left) - as.Rhs.Set(tmplist) + as.Lhs = left + as.Rhs = tmplist o.stmt(typecheck.Stmt(as)) } diff --git a/src/cmd/compile/internal/walk/range.go b/src/cmd/compile/internal/walk/range.go index 3092b71d7294d..9225c429f02f6 100644 --- a/src/cmd/compile/internal/walk/range.go +++ b/src/cmd/compile/internal/walk/range.go @@ -429,7 +429,7 @@ func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node { // i = len(a) - 1 // } n := ir.NewIfStmt(base.Pos, nil, nil, nil) - n.Body.Set(nil) + n.Body = nil n.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(0)) // hp = &a[0] diff --git a/src/cmd/compile/internal/walk/select.go b/src/cmd/compile/internal/walk/select.go index c6e9b71384d50..776b020155605 100644 --- a/src/cmd/compile/internal/walk/select.go +++ b/src/cmd/compile/internal/walk/select.go @@ -22,7 +22,7 @@ func walkSelect(sel *ir.SelectStmt) { init = append(init, walkSelectCases(sel.Cases)...) sel.Cases = nil - sel.Compiled.Set(init) + sel.Compiled = init walkStmtList(sel.Compiled) base.Pos = lno @@ -104,7 +104,7 @@ func walkSelectCases(cases []*ir.CommClause) []ir.Node { n := cas.Comm ir.SetPos(n) r := ir.NewIfStmt(base.Pos, nil, nil, nil) - r.PtrInit().Set(cas.Init()) + *r.PtrInit() = cas.Init() var call ir.Node switch n.Op() { default: @@ -136,8 +136,8 @@ func walkSelectCases(cases []*ir.CommClause) []ir.Node { } r.Cond = typecheck.Expr(call) - r.Body.Set(cas.Body) - r.Else.Set(append(dflt.Init(), dflt.Body...)) + r.Body = cas.Body + r.Else = append(dflt.Init(), dflt.Body...) return []ir.Node{r, ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)} } diff --git a/src/cmd/compile/internal/walk/stmt.go b/src/cmd/compile/internal/walk/stmt.go index 3440c6650642e..460c0a7c103b3 100644 --- a/src/cmd/compile/internal/walk/stmt.go +++ b/src/cmd/compile/internal/walk/stmt.go @@ -61,7 +61,7 @@ func walkStmt(n ir.Node) ir.Node { // copy rewrote to a statement list and a temp for the length. // Throw away the temp to avoid plain values as statements. n = ir.NewBlockStmt(n.Pos(), init) - init.Set(nil) + init = nil } if len(init) > 0 { switch n.Op() { @@ -265,7 +265,7 @@ func wrapCall(n *ir.CallExpr, init *ir.Nodes) ir.Node { last := len(n.Args) - 1 if va := n.Args[last]; va.Op() == ir.OSLICELIT { va := va.(*ir.CompLitExpr) - n.Args.Set(append(n.Args[:last], va.List...)) + n.Args = append(n.Args[:last], va.List...) n.IsDDD = false } } From b1747756e30a4e1ea0698ddbbb08f5cb7d97b1ba Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 2 Jan 2021 02:40:42 -0800 Subject: [PATCH 332/474] [dev.regabi] cmd/compile: reorganize escape analysis somewhat To do closure conversion during escape analysis, we need to walk the AST in order. So this CL makes a few changes: 1. Function literals are walked where they appear in their enclosing function, rather than as independent functions. 2. Walking "range" and "switch" statements is reordered to visit the X/Tag expression up front, before the body. 3. Most assignments are refactored to use a new assignList helper, which handles 1:1, 2:1, and N:N assignments. N:1 function call assignments are still handled directly by the OAS2FUNC case. 4. A latent missed-optimization in escape.addr is fixed: the ONAMEOFFSET case was failing to update k with the result of calling e.addr(n.Name_). In partice, this probably wasn't an issue because ONAMEOFFSET is likely only used for PEXTERN variables (which are treated as heap memory anyway) or code generated by walk (which has already gone through escape analysis). 5. Finally, don't replace k with discardHole at the end of escape.addr. This is already handled at the start of escape.expr, and we'll want to be able to access the hole's location after escape.expr returns. Passes toolstash -cmp. Change-Id: I2325234346b12b10056a360c489692bab8fdbd93 Reviewed-on: https://go-review.googlesource.com/c/go/+/281003 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/escape/escape.go | 110 ++++++++++++---------- 1 file changed, 59 insertions(+), 51 deletions(-) diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go index 17770ffbbcaaa..1aba0a3fd279d 100644 --- a/src/cmd/compile/internal/escape/escape.go +++ b/src/cmd/compile/internal/escape/escape.go @@ -201,10 +201,12 @@ func Batch(fns []*ir.Func, recursive bool) { // Construct data-flow graph from syntax trees. for _, fn := range fns { - b.with(fn).initFunc() + b.initFunc(fn) } for _, fn := range fns { - b.with(fn).walkFunc() + if !fn.IsHiddenClosure() { + b.walkFunc(fn) + } } b.walkAll() @@ -219,8 +221,8 @@ func (b *batch) with(fn *ir.Func) *escape { } } -func (e *escape) initFunc() { - fn := e.curfn +func (b *batch) initFunc(fn *ir.Func) { + e := b.with(fn) if fn.Esc() != escFuncUnknown { base.Fatalf("unexpected node: %v", fn) } @@ -237,8 +239,8 @@ func (e *escape) initFunc() { } } -func (e *escape) walkFunc() { - fn := e.curfn +func (b *batch) walkFunc(fn *ir.Func) { + e := b.with(fn) fn.SetEsc(escFuncStarted) // Identify labels that mark the head of an unstructured loop. @@ -366,54 +368,52 @@ func (e *escape) stmt(n ir.Node) { case ir.ORANGE: // for Key, Value = range X { Body } n := n.(*ir.RangeStmt) - e.loopDepth++ - e.addr(n.Key) - k := e.addr(n.Value) - e.block(n.Body) - e.loopDepth-- // X is evaluated outside the loop. + tmp := e.newLoc(nil, false) + e.expr(tmp.asHole(), n.X) + + e.loopDepth++ + ks := e.addrs([]ir.Node{n.Key, n.Value}) if n.X.Type().IsArray() { - k = k.note(n, "range") + e.flow(ks[1].note(n, "range"), tmp) } else { - k = k.deref(n, "range-deref") + e.flow(ks[1].deref(n, "range-deref"), tmp) } - e.expr(e.later(k), n.X) + + e.block(n.Body) + e.loopDepth-- case ir.OSWITCH: n := n.(*ir.SwitchStmt) - typesw := n.Tag != nil && n.Tag.Op() == ir.OTYPESW - var ks []hole - for _, cas := range n.Cases { // cases - if typesw && n.Tag.(*ir.TypeSwitchGuard).Tag != nil { - cv := cas.Var - k := e.dcl(cv) // type switch variables have no ODCL. - if cv.Type().HasPointers() { - ks = append(ks, k.dotType(cv.Type(), cas, "switch case")) + if guard, ok := n.Tag.(*ir.TypeSwitchGuard); ok { + var ks []hole + if guard.Tag != nil { + for _, cas := range n.Cases { + cv := cas.Var + k := e.dcl(cv) // type switch variables have no ODCL. + if cv.Type().HasPointers() { + ks = append(ks, k.dotType(cv.Type(), cas, "switch case")) + } } } - - e.discards(cas.List) - e.block(cas.Body) - } - - if typesw { e.expr(e.teeHole(ks...), n.Tag.(*ir.TypeSwitchGuard).X) } else { e.discard(n.Tag) } + for _, cas := range n.Cases { + e.discards(cas.List) + e.block(cas.Body) + } + case ir.OSELECT: n := n.(*ir.SelectStmt) for _, cas := range n.Cases { e.stmt(cas.Comm) e.block(cas.Body) } - case ir.OSELRECV2: - n := n.(*ir.AssignListStmt) - e.assign(n.Lhs[0], n.Rhs[0], "selrecv", n) - e.assign(n.Lhs[1], nil, "selrecv", n) case ir.ORECV: // TODO(mdempsky): Consider e.discard(n.Left). n := n.(*ir.UnaryExpr) @@ -425,28 +425,24 @@ func (e *escape) stmt(n ir.Node) { case ir.OAS: n := n.(*ir.AssignStmt) - e.assign(n.X, n.Y, "assign", n) + e.assignList([]ir.Node{n.X}, []ir.Node{n.Y}, "assign", n) case ir.OASOP: n := n.(*ir.AssignOpStmt) - e.assign(n.X, n.Y, "assign", n) + // TODO(mdempsky): Worry about OLSH/ORSH? + e.assignList([]ir.Node{n.X}, []ir.Node{n.Y}, "assign", n) case ir.OAS2: n := n.(*ir.AssignListStmt) - for i, nl := range n.Lhs { - e.assign(nl, n.Rhs[i], "assign-pair", n) - } + e.assignList(n.Lhs, n.Rhs, "assign-pair", n) case ir.OAS2DOTTYPE: // v, ok = x.(type) n := n.(*ir.AssignListStmt) - e.assign(n.Lhs[0], n.Rhs[0], "assign-pair-dot-type", n) - e.assign(n.Lhs[1], nil, "assign-pair-dot-type", n) + e.assignList(n.Lhs, n.Rhs, "assign-pair-dot-type", n) case ir.OAS2MAPR: // v, ok = m[k] n := n.(*ir.AssignListStmt) - e.assign(n.Lhs[0], n.Rhs[0], "assign-pair-mapr", n) - e.assign(n.Lhs[1], nil, "assign-pair-mapr", n) - case ir.OAS2RECV: // v, ok = <-ch + e.assignList(n.Lhs, n.Rhs, "assign-pair-mapr", n) + case ir.OAS2RECV, ir.OSELRECV2: // v, ok = <-ch n := n.(*ir.AssignListStmt) - e.assign(n.Lhs[0], n.Rhs[0], "assign-pair-receive", n) - e.assign(n.Lhs[1], nil, "assign-pair-receive", n) + e.assignList(n.Lhs, n.Rhs, "assign-pair-receive", n) case ir.OAS2FUNC: n := n.(*ir.AssignListStmt) @@ -455,9 +451,11 @@ func (e *escape) stmt(n ir.Node) { case ir.ORETURN: n := n.(*ir.ReturnStmt) results := e.curfn.Type().Results().FieldSlice() - for i, v := range n.Results { - e.assign(ir.AsNode(results[i].Nname), v, "return", n) + dsts := make([]ir.Node, len(results)) + for i, res := range results { + dsts[i] = res.Nname.(*ir.Name) } + e.assignList(dsts, n.Results, "return", n) case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OCLOSE, ir.OCOPY, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN, ir.ORECOVER: e.call(nil, n, nil) case ir.OGO, ir.ODEFER: @@ -694,6 +692,10 @@ func (e *escape) exprSkipInit(k hole, n ir.Node) { case ir.OCLOSURE: n := n.(*ir.ClosureExpr) + if fn := n.Func; fn.IsHiddenClosure() { + e.walkFunc(fn) + } + // Link addresses of captured variables to closure. k = e.spill(k, n) for _, v := range n.Func.ClosureVars { @@ -795,7 +797,7 @@ func (e *escape) addr(n ir.Node) hole { k = e.oldLoc(n).asHole() case ir.ONAMEOFFSET: n := n.(*ir.NameOffsetExpr) - e.addr(n.Name_) + k = e.addr(n.Name_) case ir.ODOT: n := n.(*ir.SelectorExpr) k = e.addr(n.X) @@ -815,10 +817,6 @@ func (e *escape) addr(n ir.Node) hole { e.assignHeap(n.Index, "key of map put", n) } - if !n.Type().HasPointers() { - k = e.discardHole() - } - return k } @@ -830,6 +828,16 @@ func (e *escape) addrs(l ir.Nodes) []hole { return ks } +func (e *escape) assignList(dsts, srcs []ir.Node, why string, where ir.Node) { + for i, dst := range dsts { + var src ir.Node + if i < len(srcs) { + src = srcs[i] + } + e.assign(dst, src, why, where) + } +} + // assign evaluates the assignment dst = src. func (e *escape) assign(dst, src ir.Node, why string, where ir.Node) { // Filter out some no-op assignments for escape analysis. From 57c426c9a57736d84f6ddd88d7a3306e63f66945 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 2 Jan 2021 03:15:14 -0800 Subject: [PATCH 333/474] [dev.regabi] cmd/compile: tighten typecheckdef to *ir.Name We only actually care about ir.Names in typecheckdef, so don't bother calling it on anything else. Allows us to get rid of some more superfluous .Name() calls and .(*ir.Name) assertions. Passes toolstash -cmp. Change-Id: I78c7cb680178991ea185958b47a36f101d4d5ef7 Reviewed-on: https://go-review.googlesource.com/c/go/+/281004 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- .../compile/internal/typecheck/typecheck.go | 25 ++++++++----------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index d0922e8508b02..812b94de0dd4d 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -474,11 +474,8 @@ func indexlit(n ir.Node) ir.Node { // typecheck1 should ONLY be called from typecheck. func typecheck1(n ir.Node, top int) ir.Node { - switch n.Op() { - case ir.OLITERAL, ir.ONAME, ir.OTYPE: - if n.Sym() != nil { - typecheckdef(n) - } + if n, ok := n.(*ir.Name); ok { + typecheckdef(n) } switch n.Op() { @@ -1735,7 +1732,7 @@ func typecheckdeftype(n *ir.Name) { types.ResumeCheckSize() } -func typecheckdef(n ir.Node) { +func typecheckdef(n *ir.Name) { if base.EnableTrace && base.Flag.LowerT { defer tracePrint("typecheckdef", n)(nil) } @@ -1755,7 +1752,7 @@ func typecheckdef(n ir.Node) { } lno := ir.SetPos(n) - typecheckdefstack = append(typecheckdefstack, n.(*ir.Name)) + typecheckdefstack = append(typecheckdefstack, n) if n.Walkdef() == 2 { base.FlushErrors() fmt.Printf("typecheckdef loop:") @@ -1774,18 +1771,18 @@ func typecheckdef(n ir.Node) { base.Fatalf("typecheckdef %v", n.Op()) case ir.OLITERAL: - if n.Name().Ntype != nil { - n.Name().Ntype = typecheckNtype(n.Name().Ntype) - n.SetType(n.Name().Ntype.Type()) - n.Name().Ntype = nil + if n.Ntype != nil { + n.Ntype = typecheckNtype(n.Ntype) + n.SetType(n.Ntype.Type()) + n.Ntype = nil if n.Type() == nil { n.SetDiag(true) goto ret } } - e := n.Name().Defn - n.Name().Defn = nil + e := n.Defn + n.Defn = nil if e == nil { ir.Dump("typecheckdef nil defn", n) base.ErrorfAt(n.Pos(), "xxx") @@ -1828,7 +1825,6 @@ func typecheckdef(n ir.Node) { } case ir.ONAME: - n := n.(*ir.Name) if n.Ntype != nil { n.Ntype = typecheckNtype(n.Ntype) n.SetType(n.Ntype.Type()) @@ -1865,7 +1861,6 @@ func typecheckdef(n ir.Node) { n.Defn = Stmt(n.Defn) // fills in n.Type case ir.OTYPE: - n := n.(*ir.Name) if n.Alias() { // Type alias declaration: Simply use the rhs type - no need // to create a new type. From bb1b6c95c2d312ec0e23a90dffd37a62f98af7ae Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 2 Jan 2021 03:23:49 -0800 Subject: [PATCH 334/474] [dev.regabi] cmd/compile: remove Node.{,Set}Walkdef After the previous commit, we no longer access Walkdef on anything but ir.Names, so we can remove them from the Node interface and miniNode. The flag bits storage should also move from miniNode.bits to Name.flags, but the latter is already full at the moment. Leaving as a TODO for now. Passes toolstash -cmp. Change-Id: I2427e4cf7bc68dc1d1529f40fb93dd9f7a9149f6 Reviewed-on: https://go-review.googlesource.com/c/go/+/281005 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/ir/mini.go | 9 +-------- src/cmd/compile/internal/ir/name.go | 8 ++++++++ src/cmd/compile/internal/ir/node.go | 2 -- 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/src/cmd/compile/internal/ir/mini.go b/src/cmd/compile/internal/ir/mini.go index 93aa15abece25..4dd9a8807aadf 100644 --- a/src/cmd/compile/internal/ir/mini.go +++ b/src/cmd/compile/internal/ir/mini.go @@ -54,20 +54,13 @@ func (n *miniNode) Esc() uint16 { return n.esc } func (n *miniNode) SetEsc(x uint16) { n.esc = x } const ( - miniWalkdefShift = 0 + miniWalkdefShift = 0 // TODO(mdempsky): Move to Name.flags. miniTypecheckShift = 2 miniDiag = 1 << 4 miniHasCall = 1 << 5 // for miniStmt ) -func (n *miniNode) Walkdef() uint8 { return n.bits.get2(miniWalkdefShift) } func (n *miniNode) Typecheck() uint8 { return n.bits.get2(miniTypecheckShift) } -func (n *miniNode) SetWalkdef(x uint8) { - if x > 3 { - panic(fmt.Sprintf("cannot SetWalkdef %d", x)) - } - n.bits.set2(miniWalkdefShift, x) -} func (n *miniNode) SetTypecheck(x uint8) { if x > 3 { panic(fmt.Sprintf("cannot SetTypecheck %d", x)) diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 5acb2d07627a1..afee6e1308fd4 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -10,6 +10,7 @@ import ( "cmd/internal/obj" "cmd/internal/objabi" "cmd/internal/src" + "fmt" "go/constant" ) @@ -240,6 +241,13 @@ func (n *Name) FrameOffset() int64 { return n.Offset_ } func (n *Name) SetFrameOffset(x int64) { n.Offset_ = x } func (n *Name) Iota() int64 { return n.Offset_ } func (n *Name) SetIota(x int64) { n.Offset_ = x } +func (n *Name) Walkdef() uint8 { return n.bits.get2(miniWalkdefShift) } +func (n *Name) SetWalkdef(x uint8) { + if x > 3 { + panic(fmt.Sprintf("cannot SetWalkdef %d", x)) + } + n.bits.set2(miniWalkdefShift, x) +} func (n *Name) Linksym() *obj.LSym { return n.sym.Linksym() } diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 9d1ee17aa8a84..a5a7203faaa22 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -46,8 +46,6 @@ type Node interface { // Storage for analysis passes. Esc() uint16 SetEsc(x uint16) - Walkdef() uint8 - SetWalkdef(x uint8) Diag() bool SetDiag(x bool) Typecheck() uint8 From 5d80a590a2abc26dcc6cc4455f7cb2bf78fd9123 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 2 Jan 2021 22:43:58 -0800 Subject: [PATCH 335/474] [dev.regabi] cmd/compile: simplify walkReturn Just de-duplicating some logic and adding better comments. Passes toolstash -cmp. Change-Id: I15ec07070510692c6d4367880bc3d2d9847370ab Reviewed-on: https://go-review.googlesource.com/c/go/+/281132 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/walk/assign.go | 69 ++++++++++--------------- 1 file changed, 27 insertions(+), 42 deletions(-) diff --git a/src/cmd/compile/internal/walk/assign.go b/src/cmd/compile/internal/walk/assign.go index 7f3e4cc995c0f..d552749d2660a 100644 --- a/src/cmd/compile/internal/walk/assign.go +++ b/src/cmd/compile/internal/walk/assign.go @@ -143,8 +143,6 @@ func walkAssignFunc(init *ir.Nodes, n *ir.AssignListStmt) ir.Node { // walkAssignList walks an OAS2 node. func walkAssignList(init *ir.Nodes, n *ir.AssignListStmt) ir.Node { init.Append(ir.TakeInit(n)...) - walkExprListSafe(n.Lhs, init) - walkExprListSafe(n.Rhs, init) return ir.NewBlockStmt(src.NoXPos, ascompatee(ir.OAS, n.Lhs, n.Rhs, init)) } @@ -232,54 +230,33 @@ func walkAssignRecv(init *ir.Nodes, n *ir.AssignListStmt) ir.Node { // walkReturn walks an ORETURN node. func walkReturn(n *ir.ReturnStmt) ir.Node { - ir.CurFunc.NumReturns++ + fn := ir.CurFunc + + fn.NumReturns++ if len(n.Results) == 0 { return n } - if (ir.HasNamedResults(ir.CurFunc) && len(n.Results) > 1) || paramoutheap(ir.CurFunc) { - // assign to the function out parameters, - // so that ascompatee can fix up conflicts - var rl []ir.Node - - for _, ln := range ir.CurFunc.Dcl { - cl := ln.Class_ - if cl == ir.PAUTO || cl == ir.PAUTOHEAP { - break - } - if cl == ir.PPARAMOUT { - var ln ir.Node = ln - if ir.IsParamStackCopy(ln) { - ln = walkExpr(typecheck.Expr(ir.NewStarExpr(base.Pos, ln.Name().Heapaddr)), nil) - } - rl = append(rl, ln) - } - } - if got, want := len(n.Results), len(rl); got != want { - // order should have rewritten multi-value function calls - // with explicit OAS2FUNC nodes. - base.Fatalf("expected %v return arguments, have %v", want, got) - } - - // move function calls out, to make ascompatee's job easier. - walkExprListSafe(n.Results, n.PtrInit()) + results := fn.Type().Results().FieldSlice() + dsts := make([]ir.Node, len(results)) + for i, v := range results { + // TODO(mdempsky): typecheck should have already checked the result variables. + dsts[i] = typecheck.AssignExpr(v.Nname.(*ir.Name)) + } - n.Results = ascompatee(n.Op(), rl, n.Results, n.PtrInit()) + if (ir.HasNamedResults(fn) && len(n.Results) > 1) || paramoutheap(fn) { + // General case: For anything tricky, let ascompatee handle + // ordering the assignments correctly. + n.Results = ascompatee(n.Op(), dsts, n.Results, n.PtrInit()) return n } - walkExprList(n.Results, n.PtrInit()) - // For each return parameter (lhs), assign the corresponding result (rhs). - lhs := ir.CurFunc.Type().Results() - rhs := n.Results - res := make([]ir.Node, lhs.NumFields()) - for i, nl := range lhs.FieldSlice() { - nname := ir.AsNode(nl.Nname) - if ir.IsParamHeapCopy(nname) { - nname = nname.Name().Stackcopy - } - a := ir.NewAssignStmt(base.Pos, nname, rhs[i]) - res[i] = convas(a, n.PtrInit()) + // Common case: Assignment order doesn't matter. Simply assign to + // each result parameter in order. + walkExprList(n.Results, n.PtrInit()) + res := make([]ir.Node, len(results)) + for i, v := range n.Results { + res[i] = convas(ir.NewAssignStmt(base.Pos, dsts[i], v), n.PtrInit()) } n.Results = res return n @@ -348,6 +325,14 @@ func ascompatee(op ir.Op, nl, nr []ir.Node, init *ir.Nodes) []ir.Node { base.Fatalf("assignment operands mismatch: %+v / %+v", ir.Nodes(nl), ir.Nodes(nr)) } + // TODO(mdempsky): Simplify this code. Not only is it redundant to + // call safeExpr on the operands twice, but ensuring order of + // evaluation for function calls was already handled by order.go. + + // move function calls out, to make ascompatee's job easier. + walkExprListSafe(nl, init) + walkExprListSafe(nr, init) + // ensure order of evaluation for function calls for i := range nl { nl[i] = safeExpr(nl[i], init) From a317067d65c2f9814cb05e573974d416949bace8 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 2 Jan 2021 23:24:16 -0800 Subject: [PATCH 336/474] [dev.regabi] cmd/compile: improve ascompatee order.go has already ordered function calls, so ascompatee only needs to worry about expressions that might access a variable after it's already been re-assigned. It already handles this, so the safeExpr calls simply result in unnecessarily pessimistic code. Does not pass toolstash -cmp, because it allows more efficient code generation. E.g., cmd/go on linux/ppc64le is about 2kB smaller. Change-Id: Idde0588eabe7850fa13c4e281fc46bbeffb4f68c Reviewed-on: https://go-review.googlesource.com/c/go/+/281152 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/walk/assign.go | 38 ++++++------------------- 1 file changed, 9 insertions(+), 29 deletions(-) diff --git a/src/cmd/compile/internal/walk/assign.go b/src/cmd/compile/internal/walk/assign.go index d552749d2660a..04bd576b696d4 100644 --- a/src/cmd/compile/internal/walk/assign.go +++ b/src/cmd/compile/internal/walk/assign.go @@ -325,22 +325,6 @@ func ascompatee(op ir.Op, nl, nr []ir.Node, init *ir.Nodes) []ir.Node { base.Fatalf("assignment operands mismatch: %+v / %+v", ir.Nodes(nl), ir.Nodes(nr)) } - // TODO(mdempsky): Simplify this code. Not only is it redundant to - // call safeExpr on the operands twice, but ensuring order of - // evaluation for function calls was already handled by order.go. - - // move function calls out, to make ascompatee's job easier. - walkExprListSafe(nl, init) - walkExprListSafe(nr, init) - - // ensure order of evaluation for function calls - for i := range nl { - nl[i] = safeExpr(nl[i], init) - } - for i := range nr { - nr[i] = safeExpr(nr[i], init) - } - var assigned ir.NameSet var memWrite bool @@ -361,27 +345,22 @@ func ascompatee(op ir.Op, nl, nr []ir.Node, init *ir.Nodes) []ir.Node { // If a needed expression may be affected by an // earlier assignment, make an early copy of that // expression and use the copy instead. - var early []ir.Node + var early ir.Nodes save := func(np *ir.Node) { if n := *np; affected(n) { - tmp := ir.Node(typecheck.Temp(n.Type())) - as := typecheck.Stmt(ir.NewAssignStmt(base.Pos, tmp, n)) - early = append(early, as) - *np = tmp + *np = copyExpr(n, n.Type(), &early) } } - var late []ir.Node - for i, l := range nl { - r := nr[i] + var late ir.Nodes + for i, lorig := range nl { + l, r := lorig, nr[i] // Do not generate 'x = x' during return. See issue 4014. if op == ir.ORETURN && ir.SameSafeExpr(l, r) { continue } - as := ir.NewAssignStmt(base.Pos, l, r) - // Save subexpressions needed on left side. // Drill through non-dereferences. for { @@ -423,9 +402,9 @@ func ascompatee(op ir.Op, nl, nr []ir.Node, init *ir.Nodes) []ir.Node { } // Save expression on right side. - save(&as.Y) + save(&r) - late = append(late, convas(as, init)) + appendWalkStmt(&late, convas(ir.NewAssignStmt(base.Pos, lorig, r), &late)) if name == nil || name.Addrtaken() || name.Class_ == ir.PEXTERN || name.Class_ == ir.PAUTOHEAP { memWrite = true @@ -438,7 +417,8 @@ func ascompatee(op ir.Op, nl, nr []ir.Node, init *ir.Nodes) []ir.Node { assigned.Add(name) } - return append(early, late...) + early.Append(late.Take()...) + return early } // readsMemory reports whether the evaluation n directly reads from From d36a6bf44da6d9b6e1ec355381ef15d253435e20 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 2 Jan 2021 23:56:20 -0800 Subject: [PATCH 337/474] [dev.regabi] cmd/compile: improve walkReturn common case Instead of evaluating all result expressions up front and then assigning them to their result destinations, we can interleave evaluation with assignment. This reduces how much temporary stack/register space is needed to hold the values in flight. Doesn't pass toolstash -cmp, because it allows better return statement code to be generated. E.g., cmd/go's text segment on linux/ppc64le shrinks another 1kB. Change-Id: I3fe889342c80e947e0118704ec01f1682c577e6e Reviewed-on: https://go-review.googlesource.com/c/go/+/281153 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/walk/assign.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/cmd/compile/internal/walk/assign.go b/src/cmd/compile/internal/walk/assign.go index 04bd576b696d4..84ba7f0dc5093 100644 --- a/src/cmd/compile/internal/walk/assign.go +++ b/src/cmd/compile/internal/walk/assign.go @@ -253,10 +253,9 @@ func walkReturn(n *ir.ReturnStmt) ir.Node { // Common case: Assignment order doesn't matter. Simply assign to // each result parameter in order. - walkExprList(n.Results, n.PtrInit()) - res := make([]ir.Node, len(results)) + var res ir.Nodes for i, v := range n.Results { - res[i] = convas(ir.NewAssignStmt(base.Pos, dsts[i], v), n.PtrInit()) + appendWalkStmt(&res, convas(ir.NewAssignStmt(base.Pos, dsts[i], v), &res)) } n.Results = res return n From f2e6dab04859a3211ce9f5bf5bac9edde0831ce1 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 3 Jan 2021 00:03:28 -0800 Subject: [PATCH 338/474] [dev.regabi] cmd/compile: remove walkReturn "common case" path After the previous two optimization CLs, this code path now generates the same code as ascompatee does anyway. So just use that and remove some redundant code. Passes toolstash -cmp. Change-Id: I5e2e5c6dbea64d8e91abe0f2cf51aa5bb86576d2 Reviewed-on: https://go-review.googlesource.com/c/go/+/281154 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/walk/assign.go | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/src/cmd/compile/internal/walk/assign.go b/src/cmd/compile/internal/walk/assign.go index 84ba7f0dc5093..ec0f60ad93571 100644 --- a/src/cmd/compile/internal/walk/assign.go +++ b/src/cmd/compile/internal/walk/assign.go @@ -143,7 +143,7 @@ func walkAssignFunc(init *ir.Nodes, n *ir.AssignListStmt) ir.Node { // walkAssignList walks an OAS2 node. func walkAssignList(init *ir.Nodes, n *ir.AssignListStmt) ir.Node { init.Append(ir.TakeInit(n)...) - return ir.NewBlockStmt(src.NoXPos, ascompatee(ir.OAS, n.Lhs, n.Rhs, init)) + return ir.NewBlockStmt(src.NoXPos, ascompatee(ir.OAS, n.Lhs, n.Rhs)) } // walkAssignMapRead walks an OAS2MAPR node. @@ -244,20 +244,7 @@ func walkReturn(n *ir.ReturnStmt) ir.Node { dsts[i] = typecheck.AssignExpr(v.Nname.(*ir.Name)) } - if (ir.HasNamedResults(fn) && len(n.Results) > 1) || paramoutheap(fn) { - // General case: For anything tricky, let ascompatee handle - // ordering the assignments correctly. - n.Results = ascompatee(n.Op(), dsts, n.Results, n.PtrInit()) - return n - } - - // Common case: Assignment order doesn't matter. Simply assign to - // each result parameter in order. - var res ir.Nodes - for i, v := range n.Results { - appendWalkStmt(&res, convas(ir.NewAssignStmt(base.Pos, dsts[i], v), &res)) - } - n.Results = res + n.Results = ascompatee(n.Op(), dsts, n.Results) return n } @@ -318,7 +305,7 @@ func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node { // check assign expression list to // an expression list. called in // expr-list = expr-list -func ascompatee(op ir.Op, nl, nr []ir.Node, init *ir.Nodes) []ir.Node { +func ascompatee(op ir.Op, nl, nr []ir.Node) []ir.Node { // cannot happen: should have been rejected during type checking if len(nl) != len(nr) { base.Fatalf("assignment operands mismatch: %+v / %+v", ir.Nodes(nl), ir.Nodes(nr)) @@ -413,6 +400,11 @@ func ascompatee(op ir.Op, nl, nr []ir.Node, init *ir.Nodes) []ir.Node { // We can ignore assignments to blank. continue } + if op == ir.ORETURN && types.OrigSym(name.Sym()) == nil { + // We can also ignore assignments to anonymous result + // parameters. These can't appear in expressions anyway. + continue + } assigned.Add(name) } From 907a4bfdc75004bc31c30564734cffc61ab1e80c Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 3 Jan 2021 00:16:46 -0800 Subject: [PATCH 339/474] [dev.regabi] cmd/compile: fix map assignment order After the previous cleanup/optimization CLs, ascompatee now correctly handles map assignments too. So remove the code from order.mapAssign, which causes us to assign to the map at the wrong point during execution. It's not every day you get to fix an issue by only removing code. Thanks to Cuong Manh Le for test cases and continually following up on this issue. Passes toolstash -cmp. (Apparently the standard library never uses tricky map assignments. Go figure.) Fixes #23017. Change-Id: Ie0728103d59d884d00c1c050251290a2a46150f9 Reviewed-on: https://go-review.googlesource.com/c/go/+/281172 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/walk/order.go | 37 +------- test/fixedbugs/issue23017.go | 113 +++++++++++++++++++++++++ 2 files changed, 114 insertions(+), 36 deletions(-) create mode 100644 test/fixedbugs/issue23017.go diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go index 767af07414f1d..2164685cd4780 100644 --- a/src/cmd/compile/internal/walk/order.go +++ b/src/cmd/compile/internal/walk/order.go @@ -537,21 +537,7 @@ func (o *orderState) call(nn ir.Node) { } } -// mapAssign appends n to o.out, introducing temporaries -// to make sure that all map assignments have the form m[k] = x. -// (Note: expr has already been called on n, so we know k is addressable.) -// -// If n is the multiple assignment form ..., m[k], ... = ..., x, ..., the rewrite is -// t1 = m -// t2 = k -// ...., t3, ... = ..., x, ... -// t1[t2] = t3 -// -// The temporaries t1, t2 are needed in case the ... being assigned -// contain m or k. They are usually unnecessary, but in the unnecessary -// cases they are also typically registerizable, so not much harm done. -// And this only applies to the multiple-assignment form. -// We could do a more precise analysis if needed, like in walk.go. +// mapAssign appends n to o.out. func (o *orderState) mapAssign(n ir.Node) { switch n.Op() { default: @@ -572,28 +558,7 @@ func (o *orderState) mapAssign(n ir.Node) { case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2MAPR, ir.OAS2FUNC: n := n.(*ir.AssignListStmt) - var post []ir.Node - for i, m := range n.Lhs { - switch { - case m.Op() == ir.OINDEXMAP: - m := m.(*ir.IndexExpr) - if !ir.IsAutoTmp(m.X) { - m.X = o.copyExpr(m.X) - } - if !ir.IsAutoTmp(m.Index) { - m.Index = o.copyExpr(m.Index) - } - fallthrough - case base.Flag.Cfg.Instrumenting && n.Op() == ir.OAS2FUNC && !ir.IsBlank(m): - t := o.newTemp(m.Type(), false) - n.Lhs[i] = t - a := ir.NewAssignStmt(base.Pos, m, t) - post = append(post, typecheck.Stmt(a)) - } - } - o.out = append(o.out, n) - o.out = append(o.out, post...) } } diff --git a/test/fixedbugs/issue23017.go b/test/fixedbugs/issue23017.go new file mode 100644 index 0000000000000..770c48ef26bd9 --- /dev/null +++ b/test/fixedbugs/issue23017.go @@ -0,0 +1,113 @@ +// run + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// assignment order in multiple assignments. +// See issue #23017 + +package main + +import "fmt" + +func main() {} + +func init() { + var m = map[int]int{} + var p *int + + defer func() { + recover() + check(1, len(m)) + check(42, m[2]) + }() + m[2], *p = 42, 2 +} + +func init() { + var m = map[int]int{} + p := []int{} + + defer func() { + recover() + check(1, len(m)) + check(2, m[2]) + }() + m[2], p[1] = 2, 2 +} + +func init() { + type P struct{ i int } + var m = map[int]int{} + var p *P + + defer func() { + recover() + check(1, len(m)) + check(3, m[2]) + }() + m[2], p.i = 3, 2 +} + +func init() { + type T struct{ i int } + var x T + p := &x + p, p.i = new(T), 4 + check(4, x.i) +} + +func init() { + var m map[int]int + var a int + var p = &a + + defer func() { + recover() + check(5, *p) + }() + *p, m[2] = 5, 2 +} + +var g int + +func init() { + var m map[int]int + defer func() { + recover() + check(0, g) + }() + m[0], g = 1, 2 +} + +func init() { + type T struct{ x struct{ y int } } + var x T + p := &x + p, p.x.y = new(T), 7 + check(7, x.x.y) + check(0, p.x.y) +} + +func init() { + type T *struct{ x struct{ y int } } + x := struct{ y int }{0} + var q T = &struct{ x struct{ y int } }{x} + p := q + p, p.x.y = nil, 7 + check(7, q.x.y) +} + +func init() { + x, y := 1, 2 + x, y = y, x + check(2, x) + check(1, y) +} + +func check(want, got int) { + if want != got { + panic(fmt.Sprintf("wanted %d, but got %d", want, got)) + } +} From 8fc44cf0fac5357f45cacc445c0900a8fd054bd5 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 3 Jan 2021 00:53:51 -0800 Subject: [PATCH 340/474] [dev.regabi] cmd/compile: remove a couple CloneName calls In inl.go, that code path is unused, since we added ir.BasicLit to represent unnamed OLITERALs. In race.go, rather than cloning ir.RegFP, we can just create it from scratch again. Passes toolstash -cmp (incl. w/ -race). Change-Id: I8e063e4898d2acf056ceca5bc03df6b40a14eca9 Reviewed-on: https://go-review.googlesource.com/c/go/+/281192 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/inline/inl.go | 9 --------- src/cmd/compile/internal/walk/race.go | 6 +++++- 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index 2887abb0614d4..b9b424b74d9e4 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -1096,15 +1096,6 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { if n.Sym() != nil { return n } - if n, ok := n.(*ir.Name); ok && n.Op() == ir.OLITERAL { - // This happens for unnamed OLITERAL. - // which should really not be a *Name, but for now it is. - // ir.Copy(n) is not allowed generally and would panic below, - // but it's OK in this situation. - n = n.CloneName() - n.SetPos(subst.updatedPos(n.Pos())) - return n - } case ir.ORETURN: // Since we don't handle bodies with closures, diff --git a/src/cmd/compile/internal/walk/race.go b/src/cmd/compile/internal/walk/race.go index 87a8839dcd02e..20becf9be93d6 100644 --- a/src/cmd/compile/internal/walk/race.go +++ b/src/cmd/compile/internal/walk/race.go @@ -8,6 +8,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/ssagen" + "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/src" "cmd/internal/sys" @@ -36,7 +37,10 @@ func instrument(fn *ir.Func) { // This only works for amd64. This will not // work on arm or others that might support // race in the future. - nodpc := ir.RegFP.CloneName() + + nodpc := ir.NewNameAt(src.NoXPos, typecheck.Lookup(".fp")) + nodpc.Class_ = ir.PPARAM + nodpc.SetUsed(true) nodpc.SetType(types.Types[types.TUINTPTR]) nodpc.SetFrameOffset(int64(-types.PtrSize)) fn.Dcl = append(fn.Dcl, nodpc) From a30fd5288415cb1e4a91ec89fac725a9ee7a3d05 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Mon, 4 Jan 2021 10:37:48 +0700 Subject: [PATCH 341/474] [dev.regabi] cmd/compile: use ir.NewNameAt in SubstArgTypes So we can remove Name.CloneName now. Passes toolstash -cmp. Change-Id: I63e57ba52a7031e06fe9c4ee9aee7de6dec70792 Reviewed-on: https://go-review.googlesource.com/c/go/+/281312 Trust: Cuong Manh Le Reviewed-by: Matthew Dempsky Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot --- src/cmd/compile/internal/ir/name.go | 6 ------ src/cmd/compile/internal/typecheck/syms.go | 6 +++--- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index afee6e1308fd4..689ef983f61f7 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -147,12 +147,6 @@ func (n *Name) copy() Node { panic(n.no("copy")) } func (n *Name) doChildren(do func(Node) bool) bool { return false } func (n *Name) editChildren(edit func(Node) Node) {} -// CloneName makes a cloned copy of the name. -// It's not ir.Copy(n) because in general that operation is a mistake on names, -// which uniquely identify variables. -// Callers must use n.CloneName to make clear they intend to create a separate name. -func (n *Name) CloneName() *Name { c := *n; return &c } - // TypeDefn returns the type definition for a named OTYPE. // That is, given "type T Defn", it returns Defn. // It is used by package types. diff --git a/src/cmd/compile/internal/typecheck/syms.go b/src/cmd/compile/internal/typecheck/syms.go index 2251062e16c6e..01c03b5f9f714 100644 --- a/src/cmd/compile/internal/typecheck/syms.go +++ b/src/cmd/compile/internal/typecheck/syms.go @@ -26,12 +26,12 @@ func LookupRuntime(name string) *ir.Name { // The result of SubstArgTypes MUST be assigned back to old, e.g. // n.Left = SubstArgTypes(n.Left, t1, t2) func SubstArgTypes(old *ir.Name, types_ ...*types.Type) *ir.Name { - n := old.CloneName() - for _, t := range types_ { types.CalcSize(t) } - n.SetType(types.SubstAny(n.Type(), &types_)) + n := ir.NewNameAt(old.Pos(), old.Sym()) + n.Class_ = old.Class() + n.SetType(types.SubstAny(old.Type(), &types_)) if len(types_) > 0 { base.Fatalf("substArgTypes: too many argument types") } From 290b4154b73b54045a147f463c6988b935d75d49 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 3 Jan 2021 14:24:25 -0800 Subject: [PATCH 342/474] [dev.regabi] cmd/compile: fix ICE due to large uint64 constants It's an error to call Int64Val on constants that don't fit into int64. CL 272654 made the compiler stricter about detecting misuse, and revealed that we were using it improperly in detecting consecutive integer-switch cases. That particular usage actually did work in practice, but it's easy and best to just fix it. Fixes #43480. Change-Id: I56f722d75e83091638ac43b80e45df0b0ad7d48d Reviewed-on: https://go-review.googlesource.com/c/go/+/281272 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/walk/switch.go | 7 +++++- test/fixedbugs/issue43480.go | 33 +++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) create mode 100644 test/fixedbugs/issue43480.go diff --git a/src/cmd/compile/internal/walk/switch.go b/src/cmd/compile/internal/walk/switch.go index b03bc3eba7c4d..59446ef3dbab4 100644 --- a/src/cmd/compile/internal/walk/switch.go +++ b/src/cmd/compile/internal/walk/switch.go @@ -201,10 +201,15 @@ func (s *exprSwitch) flush() { // Merge consecutive integer cases. if s.exprname.Type().IsInteger() { + consecutive := func(last, next constant.Value) bool { + delta := constant.BinaryOp(next, token.SUB, last) + return constant.Compare(delta, token.EQL, constant.MakeInt64(1)) + } + merged := cc[:1] for _, c := range cc[1:] { last := &merged[len(merged)-1] - if last.jmp == c.jmp && ir.Int64Val(last.hi)+1 == ir.Int64Val(c.lo) { + if last.jmp == c.jmp && consecutive(last.hi.Val(), c.lo.Val()) { last.hi = c.lo } else { merged = append(merged, c) diff --git a/test/fixedbugs/issue43480.go b/test/fixedbugs/issue43480.go new file mode 100644 index 0000000000000..d98ad3a34e035 --- /dev/null +++ b/test/fixedbugs/issue43480.go @@ -0,0 +1,33 @@ +// run + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Issue #43480: ICE on large uint64 constants in switch cases. + +package main + +func isPow10(x uint64) bool { + switch x { + case 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, + 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19: + return true + } + return false +} + +func main() { + var x uint64 = 1 + + for { + if !isPow10(x) || isPow10(x-1) || isPow10(x+1) { + panic(x) + } + next := x * 10 + if next/10 != x { + break // overflow + } + x = next + } +} From d89705e08742c0f4fdf5d2bdbab6f344c6be884f Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 3 Jan 2021 14:37:06 -0800 Subject: [PATCH 343/474] [dev.regabi] cmd/compile: fix re-export of parameters When exporting signature types, we include the originating package, because it's exposed via go/types's API. And as a consistency check, we ensure that the parameter names came from that same package. However, we were getting this wrong in the case of exported variables that were initialized with a method value using an imported method. In this case, when we created the method value wrapper function's type (which is reused as the variable's type if none is explicitly provided in the variable declaration), we were reusing the original (i.e., imported) parameter names, but the newly created signature type was associated with the current package instead. The correct fix here is really to preserve the original signature type's package (along with position and name for its parameters), but that's awkward to do at the moment because the DeclFunc API requires an ir representation of the function signature, whereas we only provide a way to explicitly set packages via the type constructor APIs. As an interim fix, we associate the parameters with the current package, to be consistent with the signature type's package. Fixes #43479. Change-Id: Id45a10f8cf64165c9bc7d9598f0a0ee199a5e752 Reviewed-on: https://go-review.googlesource.com/c/go/+/281292 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/typecheck/dcl.go | 3 ++ src/cmd/compile/internal/typecheck/iexport.go | 13 ++++++- src/cmd/compile/internal/typecheck/iimport.go | 27 +++++++------ src/cmd/compile/internal/typecheck/subr.go | 3 ++ test/fixedbugs/issue43479.dir/a.go | 27 +++++++++++++ test/fixedbugs/issue43479.dir/b.go | 38 +++++++++++++++++++ test/fixedbugs/issue43479.go | 7 ++++ 7 files changed, 104 insertions(+), 14 deletions(-) create mode 100644 test/fixedbugs/issue43479.dir/a.go create mode 100644 test/fixedbugs/issue43479.dir/b.go create mode 100644 test/fixedbugs/issue43479.go diff --git a/src/cmd/compile/internal/typecheck/dcl.go b/src/cmd/compile/internal/typecheck/dcl.go index daec9848d0fb0..5eaf100eed06f 100644 --- a/src/cmd/compile/internal/typecheck/dcl.go +++ b/src/cmd/compile/internal/typecheck/dcl.go @@ -486,6 +486,9 @@ func NewMethodType(sig *types.Type, recv *types.Type) *types.Type { nrecvs++ } + // TODO(mdempsky): Move this function to types. + // TODO(mdempsky): Preserve positions, names, and package from sig+recv. + params := make([]*types.Field, nrecvs+sig.Params().Fields().Len()) if recv != nil { params[0] = types.NewField(base.Pos, nil, recv) diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go index 50acb10a9a9c1..dd515b8ccdd1f 100644 --- a/src/cmd/compile/internal/typecheck/iexport.go +++ b/src/cmd/compile/internal/typecheck/iexport.go @@ -574,6 +574,11 @@ func (w *exportWriter) pos(pos src.XPos) { } func (w *exportWriter) pkg(pkg *types.Pkg) { + // TODO(mdempsky): Add flag to types.Pkg to mark pseudo-packages. + if pkg == ir.Pkgs.Go { + base.Fatalf("export of pseudo-package: %q", pkg.Path) + } + // Ensure any referenced packages are declared in the main index. w.p.allPkgs[pkg] = true @@ -1529,6 +1534,10 @@ func (w *exportWriter) localName(n *ir.Name) { } func (w *exportWriter) localIdent(s *types.Sym, v int32) { + if w.currPkg == nil { + base.Fatalf("missing currPkg") + } + // Anonymous parameters. if s == nil { w.string("") @@ -1553,8 +1562,8 @@ func (w *exportWriter) localIdent(s *types.Sym, v int32) { name = fmt.Sprintf("%s·%d", name, v) } - if !types.IsExported(name) && s.Pkg != w.currPkg { - base.Fatalf("weird package in name: %v => %v, not %q", s, name, w.currPkg.Path) + if s.Pkg != w.currPkg { + base.Fatalf("weird package in name: %v => %v from %q, not %q", s, name, s.Pkg.Path, w.currPkg.Path) } w.string(name) diff --git a/src/cmd/compile/internal/typecheck/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go index 0caac362e3a18..2dc7e70b65013 100644 --- a/src/cmd/compile/internal/typecheck/iimport.go +++ b/src/cmd/compile/internal/typecheck/iimport.go @@ -327,7 +327,7 @@ func (r *importReader) doDecl(sym *types.Sym) *ir.Name { ms := make([]*types.Field, r.uint64()) for i := range ms { mpos := r.pos() - msym := r.ident() + msym := r.selector() recv := r.param() mtyp := r.signature(recv) @@ -434,18 +434,21 @@ func (p *importReader) float(typ *types.Type) constant.Value { return constant.Make(&f) } -func (r *importReader) ident() *types.Sym { +func (r *importReader) ident(selector bool) *types.Sym { name := r.string() if name == "" { return nil } pkg := r.currPkg - if types.IsExported(name) { + if selector && types.IsExported(name) { pkg = types.LocalPkg } return pkg.Lookup(name) } +func (r *importReader) localIdent() *types.Sym { return r.ident(false) } +func (r *importReader) selector() *types.Sym { return r.ident(true) } + func (r *importReader) qualifiedIdent() *ir.Ident { name := r.string() pkg := r.pkg() @@ -534,7 +537,7 @@ func (r *importReader) typ1() *types.Type { fs := make([]*types.Field, r.uint64()) for i := range fs { pos := r.pos() - sym := r.ident() + sym := r.selector() typ := r.typ() emb := r.bool() note := r.string() @@ -563,7 +566,7 @@ func (r *importReader) typ1() *types.Type { methods := make([]*types.Field, r.uint64()) for i := range methods { pos := r.pos() - sym := r.ident() + sym := r.selector() typ := r.signature(fakeRecvField()) methods[i] = types.NewField(pos, sym, typ) @@ -599,7 +602,7 @@ func (r *importReader) paramList() []*types.Field { } func (r *importReader) param() *types.Field { - return types.NewField(r.pos(), r.ident(), r.typ()) + return types.NewField(r.pos(), r.localIdent(), r.typ()) } func (r *importReader) bool() bool { @@ -784,7 +787,7 @@ func (r *importReader) caseList(switchExpr ir.Node) []*ir.CaseClause { // Note: per-case variables will have distinct, dotted // names after import. That's okay: swt.go only needs // Sym for diagnostics anyway. - caseVar := ir.NewNameAt(cas.Pos(), r.ident()) + caseVar := ir.NewNameAt(cas.Pos(), r.localIdent()) Declare(caseVar, DeclContext) cas.Var = caseVar caseVar.Defn = switchExpr @@ -851,7 +854,7 @@ func (r *importReader) node() ir.Node { return r.qualifiedIdent() case ir.ONAME: - return r.ident().Def.(*ir.Name) + return r.localIdent().Def.(*ir.Name) // case OPACK, ONONAME: // unreachable - should have been resolved by typechecking @@ -862,7 +865,7 @@ func (r *importReader) node() ir.Node { case ir.OTYPESW: pos := r.pos() var tag *ir.Ident - if s := r.ident(); s != nil { + if s := r.localIdent(); s != nil { tag = ir.NewIdent(pos, s) } return ir.NewTypeSwitchGuard(pos, tag, r.expr()) @@ -899,7 +902,7 @@ func (r *importReader) node() ir.Node { case ir.OXDOT: // see parser.new_dotname - return ir.NewSelectorExpr(r.pos(), ir.OXDOT, r.expr(), r.ident()) + return ir.NewSelectorExpr(r.pos(), ir.OXDOT, r.expr(), r.selector()) // case ODOTTYPE, ODOTTYPE2: // unreachable - mapped to case ODOTTYPE below by exporter @@ -989,7 +992,7 @@ func (r *importReader) node() ir.Node { // statements case ir.ODCL: pos := r.pos() - lhs := ir.NewDeclNameAt(pos, ir.ONAME, r.ident()) + lhs := ir.NewDeclNameAt(pos, ir.ONAME, r.localIdent()) lhs.SetType(r.typ()) Declare(lhs, ir.PAUTO) @@ -1100,7 +1103,7 @@ func (r *importReader) op() ir.Op { func (r *importReader) fieldList() []ir.Node { list := make([]ir.Node, r.uint64()) for i := range list { - list[i] = ir.NewStructKeyExpr(r.pos(), r.ident(), r.expr()) + list[i] = ir.NewStructKeyExpr(r.pos(), r.selector(), r.expr()) } return list } diff --git a/src/cmd/compile/internal/typecheck/subr.go b/src/cmd/compile/internal/typecheck/subr.go index 447e945d814c9..569075d684ad3 100644 --- a/src/cmd/compile/internal/typecheck/subr.go +++ b/src/cmd/compile/internal/typecheck/subr.go @@ -43,6 +43,9 @@ func NewFuncParams(tl *types.Type, mustname bool) []*ir.Field { // invent a name so that we can refer to it in the trampoline s = LookupNum(".anon", gen) gen++ + } else if s != nil && s.Pkg != types.LocalPkg { + // TODO(mdempsky): Preserve original position, name, and package. + s = Lookup(s.Name) } a := ir.NewField(base.Pos, s, nil, t.Type) a.Pos = t.Pos diff --git a/test/fixedbugs/issue43479.dir/a.go b/test/fixedbugs/issue43479.dir/a.go new file mode 100644 index 0000000000000..ed3e6a5d9b67e --- /dev/null +++ b/test/fixedbugs/issue43479.dir/a.go @@ -0,0 +1,27 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package a + +type Here struct{ stuff int } +type Info struct{ Dir string } + +func New() Here { return Here{} } +func (h Here) Dir(p string) (Info, error) + +type I interface{ M(x string) } + +type T = struct { + Here + I +} + +var X T + +var A = (*T).Dir +var B = T.Dir +var C = X.Dir +var D = (*T).M +var E = T.M +var F = X.M diff --git a/test/fixedbugs/issue43479.dir/b.go b/test/fixedbugs/issue43479.dir/b.go new file mode 100644 index 0000000000000..02d16909cc527 --- /dev/null +++ b/test/fixedbugs/issue43479.dir/b.go @@ -0,0 +1,38 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package b + +import "./a" + +var Here = a.New() +var Dir = Here.Dir + +type T = struct { + a.Here + a.I +} + +var X T + +// Test exporting the type of method values for anonymous structs with +// promoted methods. +var A = a.A +var B = a.B +var C = a.C +var D = a.D +var E = a.E +var F = a.F +var G = (*a.T).Dir +var H = a.T.Dir +var I = a.X.Dir +var J = (*a.T).M +var K = a.T.M +var L = a.X.M +var M = (*T).Dir +var N = T.Dir +var O = X.Dir +var P = (*T).M +var Q = T.M +var R = X.M diff --git a/test/fixedbugs/issue43479.go b/test/fixedbugs/issue43479.go new file mode 100644 index 0000000000000..f21d1d5c582a7 --- /dev/null +++ b/test/fixedbugs/issue43479.go @@ -0,0 +1,7 @@ +// compiledir + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ignored From f24e40c14a0a767b6663c85dc900bb9e6b7c2d8e Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 3 Jan 2021 20:14:00 -0800 Subject: [PATCH 344/474] [dev.regabi] cmd/compile: remove Name.Class_ accessors These aren't part of the Node interface anymore, so no need to keep them around. Passes toolstash -cmp. [git-generate] cd src/cmd/compile/internal/ir : Fix one off case that causes trouble for rf. sed -i -e 's/n.SetClass(ir.PAUTO)/n.Class_ = ir.PAUTO/' ../ssa/export_test.go pkgs=$(go list . ../...) rf ' ex '"$(echo $pkgs)"' { var n *Name var c Class n.Class() -> n.Class_ n.SetClass(c) -> n.Class_ = c } rm Name.Class rm Name.SetClass mv Name.Class_ Name.Class ' Change-Id: Ifb304bf4691a8c455456aabd8aa77178d4a49500 Reviewed-on: https://go-review.googlesource.com/c/go/+/281294 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/dwarfgen/dwarf.go | 24 +++++----- src/cmd/compile/internal/escape/escape.go | 26 +++++----- .../compile/internal/gc/abiutilsaux_test.go | 2 +- src/cmd/compile/internal/gc/compile.go | 2 +- src/cmd/compile/internal/gc/export.go | 2 +- src/cmd/compile/internal/gc/obj.go | 2 +- src/cmd/compile/internal/inline/inl.go | 24 +++++----- src/cmd/compile/internal/ir/expr.go | 6 +-- src/cmd/compile/internal/ir/func.go | 4 +- src/cmd/compile/internal/ir/name.go | 8 ++-- src/cmd/compile/internal/ir/scc.go | 2 +- src/cmd/compile/internal/liveness/plive.go | 14 +++--- src/cmd/compile/internal/noder/noder.go | 6 +-- src/cmd/compile/internal/pkginit/init.go | 4 +- src/cmd/compile/internal/pkginit/initorder.go | 10 ++-- .../compile/internal/reflectdata/reflect.go | 8 ++-- src/cmd/compile/internal/ssa/deadstore.go | 8 ++-- src/cmd/compile/internal/ssa/export_test.go | 2 +- src/cmd/compile/internal/ssagen/nowb.go | 2 +- src/cmd/compile/internal/ssagen/pgen.go | 14 +++--- src/cmd/compile/internal/ssagen/pgen_test.go | 4 +- src/cmd/compile/internal/ssagen/ssa.go | 48 +++++++++---------- src/cmd/compile/internal/staticdata/data.go | 6 +-- src/cmd/compile/internal/staticinit/sched.go | 6 +-- src/cmd/compile/internal/typecheck/dcl.go | 4 +- src/cmd/compile/internal/typecheck/export.go | 2 +- src/cmd/compile/internal/typecheck/func.go | 4 +- src/cmd/compile/internal/typecheck/iexport.go | 8 ++-- src/cmd/compile/internal/typecheck/iimport.go | 2 +- src/cmd/compile/internal/typecheck/syms.go | 2 +- .../compile/internal/typecheck/typecheck.go | 4 +- .../compile/internal/typecheck/universe.go | 2 +- src/cmd/compile/internal/walk/assign.go | 4 +- src/cmd/compile/internal/walk/closure.go | 6 +-- src/cmd/compile/internal/walk/complit.go | 8 ++-- src/cmd/compile/internal/walk/convert.go | 6 +-- src/cmd/compile/internal/walk/expr.go | 2 +- src/cmd/compile/internal/walk/order.go | 2 +- src/cmd/compile/internal/walk/race.go | 2 +- src/cmd/compile/internal/walk/stmt.go | 2 +- src/cmd/compile/internal/walk/walk.go | 6 +-- 41 files changed, 149 insertions(+), 151 deletions(-) diff --git a/src/cmd/compile/internal/dwarfgen/dwarf.go b/src/cmd/compile/internal/dwarfgen/dwarf.go index 6eac9d547e4e6..1534adaac8e66 100644 --- a/src/cmd/compile/internal/dwarfgen/dwarf.go +++ b/src/cmd/compile/internal/dwarfgen/dwarf.go @@ -76,7 +76,7 @@ func Info(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, if n.Op() != ir.ONAME { // might be OTYPE or OLITERAL continue } - switch n.Class_ { + switch n.Class { case ir.PAUTO: if !n.Used() { // Text == nil -> generating abstract function @@ -171,7 +171,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir if c == '.' || n.Type().IsUntyped() { continue } - if n.Class_ == ir.PPARAM && !ssagen.TypeOK(n.Type()) { + if n.Class == ir.PPARAM && !ssagen.TypeOK(n.Type()) { // SSA-able args get location lists, and may move in and // out of registers, so those are handled elsewhere. // Autos and named output params seem to get handled @@ -186,10 +186,10 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir typename := dwarf.InfoPrefix + types.TypeSymName(n.Type()) decls = append(decls, n) abbrev := dwarf.DW_ABRV_AUTO_LOCLIST - isReturnValue := (n.Class_ == ir.PPARAMOUT) - if n.Class_ == ir.PPARAM || n.Class_ == ir.PPARAMOUT { + isReturnValue := (n.Class == ir.PPARAMOUT) + if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT { abbrev = dwarf.DW_ABRV_PARAM_LOCLIST - } else if n.Class_ == ir.PAUTOHEAP { + } else if n.Class == ir.PAUTOHEAP { // If dcl in question has been promoted to heap, do a bit // of extra work to recover original class (auto or param); // see issue 30908. This insures that we get the proper @@ -198,9 +198,9 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir // and not stack). // TODO(thanm): generate a better location expression stackcopy := n.Stackcopy - if stackcopy != nil && (stackcopy.Class_ == ir.PPARAM || stackcopy.Class_ == ir.PPARAMOUT) { + if stackcopy != nil && (stackcopy.Class == ir.PPARAM || stackcopy.Class == ir.PPARAMOUT) { abbrev = dwarf.DW_ABRV_PARAM_LOCLIST - isReturnValue = (stackcopy.Class_ == ir.PPARAMOUT) + isReturnValue = (stackcopy.Class == ir.PPARAMOUT) } } inlIndex := 0 @@ -275,7 +275,7 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var { var abbrev int var offs int64 - switch n.Class_ { + switch n.Class { case ir.PAUTO: offs = n.FrameOffset() abbrev = dwarf.DW_ABRV_AUTO @@ -291,7 +291,7 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var { abbrev = dwarf.DW_ABRV_PARAM offs = n.FrameOffset() + base.Ctxt.FixedFrameSize() default: - base.Fatalf("createSimpleVar unexpected class %v for node %v", n.Class_, n) + base.Fatalf("createSimpleVar unexpected class %v for node %v", n.Class, n) } typename := dwarf.InfoPrefix + types.TypeSymName(n.Type()) @@ -308,7 +308,7 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var { declpos := base.Ctxt.InnermostPos(declPos(n)) return &dwarf.Var{ Name: n.Sym().Name, - IsReturnValue: n.Class_ == ir.PPARAMOUT, + IsReturnValue: n.Class == ir.PPARAMOUT, IsInlFormal: n.InlFormal(), Abbrev: abbrev, StackOffset: int32(offs), @@ -353,7 +353,7 @@ func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var n := debug.Vars[varID] var abbrev int - switch n.Class_ { + switch n.Class { case ir.PAUTO: abbrev = dwarf.DW_ABRV_AUTO_LOCLIST case ir.PPARAM, ir.PPARAMOUT: @@ -377,7 +377,7 @@ func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var declpos := base.Ctxt.InnermostPos(n.Pos()) dvar := &dwarf.Var{ Name: n.Sym().Name, - IsReturnValue: n.Class_ == ir.PPARAMOUT, + IsReturnValue: n.Class == ir.PPARAMOUT, IsInlFormal: n.InlFormal(), Abbrev: abbrev, Type: base.Ctxt.Lookup(typename), diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go index 1aba0a3fd279d..6a2e685fe87eb 100644 --- a/src/cmd/compile/internal/escape/escape.go +++ b/src/cmd/compile/internal/escape/escape.go @@ -519,7 +519,7 @@ func (e *escape) exprSkipInit(k hole, n ir.Node) { case ir.ONAME: n := n.(*ir.Name) - if n.Class_ == ir.PFUNC || n.Class_ == ir.PEXTERN { + if n.Class == ir.PFUNC || n.Class == ir.PEXTERN { return } e.flow(k, e.oldLoc(n)) @@ -791,7 +791,7 @@ func (e *escape) addr(n ir.Node) hole { base.Fatalf("unexpected addr: %v", n) case ir.ONAME: n := n.(*ir.Name) - if n.Class_ == ir.PEXTERN { + if n.Class == ir.PEXTERN { break } k = e.oldLoc(n).asHole() @@ -899,7 +899,7 @@ func (e *escape) call(ks []hole, call, where ir.Node) { switch call.Op() { case ir.OCALLFUNC: switch v := ir.StaticValue(call.X); { - case v.Op() == ir.ONAME && v.(*ir.Name).Class_ == ir.PFUNC: + case v.Op() == ir.ONAME && v.(*ir.Name).Class == ir.PFUNC: fn = v.(*ir.Name) case v.Op() == ir.OCLOSURE: fn = v.(*ir.ClosureExpr).Func.Nname @@ -1589,7 +1589,7 @@ func (b *batch) finish(fns []*ir.Func) { } func (l *location) isName(c ir.Class) bool { - return l.n != nil && l.n.Op() == ir.ONAME && l.n.(*ir.Name).Class_ == c + return l.n != nil && l.n.Op() == ir.ONAME && l.n.(*ir.Name).Class == c } const numEscResults = 7 @@ -1882,7 +1882,7 @@ func HeapAllocReason(n ir.Node) string { // Parameters are always passed via the stack. if n.Op() == ir.ONAME { n := n.(*ir.Name) - if n.Class_ == ir.PPARAM || n.Class_ == ir.PPARAMOUT { + if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT { return "" } } @@ -1939,7 +1939,7 @@ func addrescapes(n ir.Node) { // if this is a tmpname (PAUTO), it was tagged by tmpname as not escaping. // on PPARAM it means something different. - if n.Class_ == ir.PAUTO && n.Esc() == ir.EscNever { + if n.Class == ir.PAUTO && n.Esc() == ir.EscNever { break } @@ -1949,7 +1949,7 @@ func addrescapes(n ir.Node) { break } - if n.Class_ != ir.PPARAM && n.Class_ != ir.PPARAMOUT && n.Class_ != ir.PAUTO { + if n.Class != ir.PPARAM && n.Class != ir.PPARAMOUT && n.Class != ir.PAUTO { break } @@ -2003,7 +2003,7 @@ func moveToHeap(n *ir.Name) { if base.Flag.CompilingRuntime { base.Errorf("%v escapes to heap, not allowed in runtime", n) } - if n.Class_ == ir.PAUTOHEAP { + if n.Class == ir.PAUTOHEAP { ir.Dump("n", n) base.Fatalf("double move to heap") } @@ -2022,7 +2022,7 @@ func moveToHeap(n *ir.Name) { // Parameters have a local stack copy used at function start/end // in addition to the copy in the heap that may live longer than // the function. - if n.Class_ == ir.PPARAM || n.Class_ == ir.PPARAMOUT { + if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT { if n.FrameOffset() == types.BADWIDTH { base.Fatalf("addrescapes before param assignment") } @@ -2034,9 +2034,9 @@ func moveToHeap(n *ir.Name) { stackcopy := typecheck.NewName(n.Sym()) stackcopy.SetType(n.Type()) stackcopy.SetFrameOffset(n.FrameOffset()) - stackcopy.Class_ = n.Class_ + stackcopy.Class = n.Class stackcopy.Heapaddr = heapaddr - if n.Class_ == ir.PPARAMOUT { + if n.Class == ir.PPARAMOUT { // Make sure the pointer to the heap copy is kept live throughout the function. // The function could panic at any point, and then a defer could recover. // Thus, we need the pointer to the heap copy always available so the @@ -2058,7 +2058,7 @@ func moveToHeap(n *ir.Name) { } // Parameters are before locals, so can stop early. // This limits the search even in functions with many local variables. - if d.Class_ == ir.PAUTO { + if d.Class == ir.PAUTO { break } } @@ -2069,7 +2069,7 @@ func moveToHeap(n *ir.Name) { } // Modify n in place so that uses of n now mean indirection of the heapaddr. - n.Class_ = ir.PAUTOHEAP + n.Class = ir.PAUTOHEAP n.SetFrameOffset(0) n.Heapaddr = heapaddr n.SetEsc(ir.EscHeap) diff --git a/src/cmd/compile/internal/gc/abiutilsaux_test.go b/src/cmd/compile/internal/gc/abiutilsaux_test.go index e6590beac0544..9386b554b09d9 100644 --- a/src/cmd/compile/internal/gc/abiutilsaux_test.go +++ b/src/cmd/compile/internal/gc/abiutilsaux_test.go @@ -21,7 +21,7 @@ import ( func mkParamResultField(t *types.Type, s *types.Sym, which ir.Class) *types.Field { field := types.NewField(src.NoXPos, s, t) n := typecheck.NewName(s) - n.Class_ = which + n.Class = which field.Nname = n n.SetType(t) return field diff --git a/src/cmd/compile/internal/gc/compile.go b/src/cmd/compile/internal/gc/compile.go index 1b3dd672f3f0f..25b1c76737111 100644 --- a/src/cmd/compile/internal/gc/compile.go +++ b/src/cmd/compile/internal/gc/compile.go @@ -83,7 +83,7 @@ func compile(fn *ir.Func) { // because symbols must be allocated before the parallel // phase of the compiler. for _, n := range fn.Dcl { - switch n.Class_ { + switch n.Class { case ir.PPARAM, ir.PPARAMOUT, ir.PAUTO: if liveness.ShouldTrack(n) && n.Addrtaken() { reflectdata.WriteType(n.Type()) diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index c65c6c8335be2..356fcfa6719e2 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -83,7 +83,7 @@ type exporter struct { func (p *exporter) markObject(n ir.Node) { if n.Op() == ir.ONAME { n := n.(*ir.Name) - if n.Class_ == ir.PFUNC { + if n.Class == ir.PFUNC { inline.Inline_Flood(n, typecheck.Export) } } diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 30cfac1b717c9..fbb2145e1b10b 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -188,7 +188,7 @@ func dumpGlobal(n *ir.Name) { if n.Type() == nil { base.Fatalf("external %v nil type\n", n) } - if n.Class_ == ir.PFUNC { + if n.Class == ir.PFUNC { return } if n.Sym().Pkg != types.LocalPkg { diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index b9b424b74d9e4..6f5f6499ced51 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -199,8 +199,8 @@ func Inline_Flood(n *ir.Name, exportsym func(*ir.Name)) { if n == nil { return } - if n.Op() != ir.ONAME || n.Class_ != ir.PFUNC { - base.Fatalf("inlFlood: unexpected %v, %v, %v", n, n.Op(), n.Class_) + if n.Op() != ir.ONAME || n.Class != ir.PFUNC { + base.Fatalf("inlFlood: unexpected %v, %v, %v", n, n.Op(), n.Class) } fn := n.Func if fn == nil { @@ -227,7 +227,7 @@ func Inline_Flood(n *ir.Name, exportsym func(*ir.Name)) { case ir.ONAME: n := n.(*ir.Name) - switch n.Class_ { + switch n.Class { case ir.PFUNC: Inline_Flood(n, exportsym) exportsym(n) @@ -292,7 +292,7 @@ func (v *hairyVisitor) doNode(n ir.Node) error { // runtime.throw is a "cheap call" like panic in normal code. if n.X.Op() == ir.ONAME { name := n.X.(*ir.Name) - if name.Class_ == ir.PFUNC && types.IsRuntimePkg(name.Sym().Pkg) { + if name.Class == ir.PFUNC && types.IsRuntimePkg(name.Sym().Pkg) { fn := name.Sym().Name if fn == "getcallerpc" || fn == "getcallersp" { return errors.New("call to " + fn) @@ -407,7 +407,7 @@ func (v *hairyVisitor) doNode(n ir.Node) error { case ir.ONAME: n := n.(*ir.Name) - if n.Class_ == ir.PAUTO { + if n.Class == ir.PAUTO { v.usedLocals[n] = true } @@ -627,7 +627,7 @@ func inlCallee(fn ir.Node) *ir.Func { return n.Func case ir.ONAME: fn := fn.(*ir.Name) - if fn.Class_ == ir.PFUNC { + if fn.Class == ir.PFUNC { return fn.Func } case ir.OCLOSURE: @@ -759,7 +759,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b if ln.Op() != ir.ONAME { continue } - if ln.Class_ == ir.PPARAMOUT { // return values handled below. + if ln.Class == ir.PPARAMOUT { // return values handled below. continue } if ir.IsParamStackCopy(ln) { // ignore the on-stack copy of a parameter that moved to the heap @@ -772,7 +772,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b inlf := typecheck.Expr(inlvar(ln)).(*ir.Name) inlvars[ln] = inlf if base.Flag.GenDwarfInl > 0 { - if ln.Class_ == ir.PPARAM { + if ln.Class == ir.PPARAM { inlf.Name().SetInlFormal(true) } else { inlf.Name().SetInlLocal(true) @@ -975,7 +975,7 @@ func inlvar(var_ *ir.Name) *ir.Name { n := typecheck.NewName(var_.Sym()) n.SetType(var_.Type()) - n.Class_ = ir.PAUTO + n.Class = ir.PAUTO n.SetUsed(true) n.Curfn = ir.CurFunc // the calling function, not the called one n.SetAddrtaken(var_.Addrtaken()) @@ -988,7 +988,7 @@ func inlvar(var_ *ir.Name) *ir.Name { func retvar(t *types.Field, i int) *ir.Name { n := typecheck.NewName(typecheck.LookupNum("~R", i)) n.SetType(t.Type) - n.Class_ = ir.PAUTO + n.Class = ir.PAUTO n.SetUsed(true) n.Curfn = ir.CurFunc // the calling function, not the called one ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n) @@ -1000,7 +1000,7 @@ func retvar(t *types.Field, i int) *ir.Name { func argvar(t *types.Type, i int) ir.Node { n := typecheck.NewName(typecheck.LookupNum("~arg", i)) n.SetType(t.Elem()) - n.Class_ = ir.PAUTO + n.Class = ir.PAUTO n.SetUsed(true) n.Curfn = ir.CurFunc // the calling function, not the called one ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n) @@ -1170,7 +1170,7 @@ func (subst *inlsubst) updatedPos(xpos src.XPos) src.XPos { func pruneUnusedAutos(ll []*ir.Name, vis *hairyVisitor) []*ir.Name { s := make([]*ir.Name, 0, len(ll)) for _, n := range ll { - if n.Class_ == ir.PAUTO { + if n.Class == ir.PAUTO { if _, found := vis.usedLocals[n]; !found { continue } diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 1b88427146b2b..6d81bf8781ef5 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -527,7 +527,7 @@ func (n *SelectorExpr) FuncName() *Name { panic(n.no("FuncName")) } fn := NewNameAt(n.Selection.Pos, MethodSym(n.X.Type(), n.Sel)) - fn.Class_ = PFUNC + fn.Class = PFUNC fn.SetType(n.Type()) return fn } @@ -736,7 +736,7 @@ func IsAddressable(n Node) bool { case ONAME: n := n.(*Name) - if n.Class_ == PFUNC { + if n.Class == PFUNC { return false } return true @@ -771,7 +771,7 @@ func staticValue1(nn Node) Node { return nil } n := nn.(*Name) - if n.Class_ != PAUTO || n.Addrtaken() { + if n.Class != PAUTO || n.Addrtaken() { return nil } diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index 1eaca9c6f38c6..12ef083c1933e 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -245,11 +245,11 @@ func FuncSymName(s *types.Sym) string { // MarkFunc marks a node as a function. func MarkFunc(n *Name) { - if n.Op() != ONAME || n.Class_ != Pxxx { + if n.Op() != ONAME || n.Class != Pxxx { base.Fatalf("expected ONAME/Pxxx node, got %v", n) } - n.Class_ = PFUNC + n.Class = PFUNC n.Sym().SetFunc(true) } diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 689ef983f61f7..58b4ababff9dc 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -37,7 +37,7 @@ func (*Ident) CanBeNtype() {} type Name struct { miniExpr BuiltinOp Op // uint8 - Class_ Class // uint8 + Class Class // uint8 pragma PragmaFlag // int16 flags bitset16 sym *types.Sym @@ -222,8 +222,6 @@ func (n *Name) Sym() *types.Sym { return n.sym } func (n *Name) SetSym(x *types.Sym) { n.sym = x } func (n *Name) SubOp() Op { return n.BuiltinOp } func (n *Name) SetSubOp(x Op) { n.BuiltinOp = x } -func (n *Name) Class() Class { return n.Class_ } -func (n *Name) SetClass(x Class) { n.Class_ = x } func (n *Name) SetFunc(x *Func) { n.Func = x } func (n *Name) Offset() int64 { panic("Name.Offset") } func (n *Name) SetOffset(x int64) { @@ -425,7 +423,7 @@ func IsParamStackCopy(n Node) bool { return false } name := n.(*Name) - return (name.Class_ == PPARAM || name.Class_ == PPARAMOUT) && name.Heapaddr != nil + return (name.Class == PPARAM || name.Class == PPARAMOUT) && name.Heapaddr != nil } // IsParamHeapCopy reports whether this is the on-heap copy of @@ -435,7 +433,7 @@ func IsParamHeapCopy(n Node) bool { return false } name := n.(*Name) - return name.Class_ == PAUTOHEAP && name.Stackcopy != nil + return name.Class == PAUTOHEAP && name.Stackcopy != nil } var RegFP *Name diff --git a/src/cmd/compile/internal/ir/scc.go b/src/cmd/compile/internal/ir/scc.go index f35c4d44e9d8b..83c6074170b31 100644 --- a/src/cmd/compile/internal/ir/scc.go +++ b/src/cmd/compile/internal/ir/scc.go @@ -87,7 +87,7 @@ func (v *bottomUpVisitor) visit(n *Func) uint32 { Visit(n, func(n Node) { switch n.Op() { case ONAME: - if n := n.(*Name); n.Class_ == PFUNC { + if n := n.(*Name); n.Class == PFUNC { do(n.Defn) } case ODOTMETH, OCALLPART, OMETHEXPR: diff --git a/src/cmd/compile/internal/liveness/plive.go b/src/cmd/compile/internal/liveness/plive.go index 91f10b0a9dc21..26d90824b2897 100644 --- a/src/cmd/compile/internal/liveness/plive.go +++ b/src/cmd/compile/internal/liveness/plive.go @@ -181,7 +181,7 @@ type progeffectscache struct { // nor do we care about empty structs (handled by the pointer check), // nor do we care about the fake PAUTOHEAP variables. func ShouldTrack(n *ir.Name) bool { - return (n.Class_ == ir.PAUTO || n.Class_ == ir.PPARAM || n.Class_ == ir.PPARAMOUT) && n.Type().HasPointers() + return (n.Class == ir.PAUTO || n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT) && n.Type().HasPointers() } // getvariables returns the list of on-stack variables that we need to track @@ -208,7 +208,7 @@ func (lv *liveness) initcache() { lv.cache.initialized = true for i, node := range lv.vars { - switch node.Class_ { + switch node.Class { case ir.PPARAM: // A return instruction with a p.to is a tail return, which brings // the stack pointer back up (if it ever went down) and then jumps @@ -386,7 +386,7 @@ func (lv *liveness) pointerMap(liveout bitvec.BitVec, vars []*ir.Name, args, loc break } node := vars[i] - switch node.Class_ { + switch node.Class { case ir.PAUTO: typebits.Set(node.Type(), node.FrameOffset()+lv.stkptrsize, locals) @@ -687,7 +687,7 @@ func (lv *liveness) epilogue() { // don't need to keep the stack copy live? if lv.fn.HasDefer() { for i, n := range lv.vars { - if n.Class_ == ir.PPARAMOUT { + if n.Class == ir.PPARAMOUT { if n.IsOutputParamHeapAddr() { // Just to be paranoid. Heap addresses are PAUTOs. base.Fatalf("variable %v both output param and heap output param", n) @@ -785,7 +785,7 @@ func (lv *liveness) epilogue() { if !liveout.Get(int32(i)) { continue } - if n.Class_ == ir.PPARAM { + if n.Class == ir.PPARAM { continue // ok } base.Fatalf("bad live variable at entry of %v: %L", lv.fn.Nname, n) @@ -818,7 +818,7 @@ func (lv *liveness) epilogue() { // the only things that can possibly be live are the // input parameters. for j, n := range lv.vars { - if n.Class_ != ir.PPARAM && lv.stackMaps[0].Get(int32(j)) { + if n.Class != ir.PPARAM && lv.stackMaps[0].Get(int32(j)) { lv.f.Fatalf("%v %L recorded as live on entry", lv.fn.Nname, n) } } @@ -1063,7 +1063,7 @@ func (lv *liveness) emit() (argsSym, liveSym *obj.LSym) { // (Nodes without pointers aren't in lv.vars; see livenessShouldTrack.) var maxArgNode *ir.Name for _, n := range lv.vars { - switch n.Class_ { + switch n.Class { case ir.PPARAM, ir.PPARAMOUT: if maxArgNode == nil || n.FrameOffset() > maxArgNode.FrameOffset() { maxArgNode = n diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go index 678e378291524..76913c62a6dfd 100644 --- a/src/cmd/compile/internal/noder/noder.go +++ b/src/cmd/compile/internal/noder/noder.go @@ -1176,10 +1176,10 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node { n := ir.NewReturnStmt(p.pos(stmt), p.exprList(stmt.Results)) if len(n.Results) == 0 && ir.CurFunc != nil { for _, ln := range ir.CurFunc.Dcl { - if ln.Class_ == ir.PPARAM { + if ln.Class == ir.PPARAM { continue } - if ln.Class_ != ir.PPARAMOUT { + if ln.Class != ir.PPARAMOUT { break } if ln.Sym().Def != ln { @@ -1956,7 +1956,7 @@ func oldname(s *types.Sym) ir.Node { if c == nil || c.Curfn != ir.CurFunc { // Do not have a closure var for the active closure yet; make one. c = typecheck.NewName(s) - c.Class_ = ir.PAUTOHEAP + c.Class = ir.PAUTOHEAP c.SetIsClosureVar(true) c.Defn = n diff --git a/src/cmd/compile/internal/pkginit/init.go b/src/cmd/compile/internal/pkginit/init.go index a32e09879c7f1..5bc66c7e1be7d 100644 --- a/src/cmd/compile/internal/pkginit/init.go +++ b/src/cmd/compile/internal/pkginit/init.go @@ -32,7 +32,7 @@ func Task() *ir.Name { if n.Op() == ir.ONONAME { continue } - if n.Op() != ir.ONAME || n.(*ir.Name).Class_ != ir.PEXTERN { + if n.Op() != ir.ONAME || n.(*ir.Name).Class != ir.PEXTERN { base.Fatalf("bad inittask: %v", n) } deps = append(deps, n.(*ir.Name).Linksym()) @@ -89,7 +89,7 @@ func Task() *ir.Name { sym := typecheck.Lookup(".inittask") task := typecheck.NewName(sym) task.SetType(types.Types[types.TUINT8]) // fake type - task.Class_ = ir.PEXTERN + task.Class = ir.PEXTERN sym.Def = task lsym := task.Linksym() ot := 0 diff --git a/src/cmd/compile/internal/pkginit/initorder.go b/src/cmd/compile/internal/pkginit/initorder.go index 1c222c1de4388..bdefd594ffe7d 100644 --- a/src/cmd/compile/internal/pkginit/initorder.go +++ b/src/cmd/compile/internal/pkginit/initorder.go @@ -140,7 +140,7 @@ func (o *InitOrder) processAssign(n ir.Node) { defn := dep.Defn // Skip dependencies on functions (PFUNC) and // variables already initialized (InitDone). - if dep.Class_ != ir.PEXTERN || o.order[defn] == orderDone { + if dep.Class != ir.PEXTERN || o.order[defn] == orderDone { continue } o.order[n]++ @@ -204,7 +204,7 @@ func (o *InitOrder) findInitLoopAndExit(n *ir.Name, path *[]*ir.Name) { *path = append(*path, n) for _, ref := range refers { // Short-circuit variables that were initialized. - if ref.Class_ == ir.PEXTERN && o.order[ref.Defn] == orderDone { + if ref.Class == ir.PEXTERN && o.order[ref.Defn] == orderDone { continue } @@ -221,7 +221,7 @@ func reportInitLoopAndExit(l []*ir.Name) { // the start. i := -1 for j, n := range l { - if n.Class_ == ir.PEXTERN && (i == -1 || n.Pos().Before(l[i].Pos())) { + if n.Class == ir.PEXTERN && (i == -1 || n.Pos().Before(l[i].Pos())) { i = j } } @@ -291,7 +291,7 @@ func (d *initDeps) visit(n ir.Node) { switch n.Op() { case ir.ONAME: n := n.(*ir.Name) - switch n.Class_ { + switch n.Class { case ir.PEXTERN, ir.PFUNC: d.foundDep(n) } @@ -324,7 +324,7 @@ func (d *initDeps) foundDep(n *ir.Name) { return } d.seen.Add(n) - if d.transitive && n.Class_ == ir.PFUNC { + if d.transitive && n.Class == ir.PFUNC { d.inspectList(n.Defn.(*ir.Func).Body) } } diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go index f926765326904..30857fff6da72 100644 --- a/src/cmd/compile/internal/reflectdata/reflect.go +++ b/src/cmd/compile/internal/reflectdata/reflect.go @@ -840,7 +840,7 @@ func TypePtr(t *types.Type) *ir.AddrExpr { if s.Def == nil { n := ir.NewNameAt(src.NoXPos, s) n.SetType(types.Types[types.TUINT8]) - n.Class_ = ir.PEXTERN + n.Class = ir.PEXTERN n.SetTypecheck(1) s.Def = n } @@ -859,7 +859,7 @@ func ITabAddr(t, itype *types.Type) *ir.AddrExpr { if s.Def == nil { n := typecheck.NewName(s) n.SetType(types.Types[types.TUINT8]) - n.Class_ = ir.PEXTERN + n.Class = ir.PEXTERN n.SetTypecheck(1) s.Def = n itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: n.Linksym()}) @@ -1370,7 +1370,7 @@ func WriteTabs() { // } nsym := dname(p.Sym().Name, "", nil, true) t := p.Type() - if p.Class_ != ir.PFUNC { + if p.Class != ir.PFUNC { t = types.NewPtr(t) } tsym := WriteType(t) @@ -1674,7 +1674,7 @@ func ZeroAddr(size int64) ir.Node { if s.Def == nil { x := typecheck.NewName(s) x.SetType(types.Types[types.TUINT8]) - x.Class_ = ir.PEXTERN + x.Class = ir.PEXTERN x.SetTypecheck(1) s.Def = x } diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go index a68c82ba97905..530918da4d61d 100644 --- a/src/cmd/compile/internal/ssa/deadstore.go +++ b/src/cmd/compile/internal/ssa/deadstore.go @@ -148,7 +148,7 @@ func elimDeadAutosGeneric(f *Func) { case OpAddr, OpLocalAddr: // Propagate the address if it points to an auto. n, ok := v.Aux.(*ir.Name) - if !ok || n.Class() != ir.PAUTO { + if !ok || n.Class != ir.PAUTO { return } if addr[v] == nil { @@ -159,7 +159,7 @@ func elimDeadAutosGeneric(f *Func) { case OpVarDef, OpVarKill: // v should be eliminated if we eliminate the auto. n, ok := v.Aux.(*ir.Name) - if !ok || n.Class() != ir.PAUTO { + if !ok || n.Class != ir.PAUTO { return } if elim[v] == nil { @@ -175,7 +175,7 @@ func elimDeadAutosGeneric(f *Func) { // may not be used by the inline code, but will be used by // panic processing). n, ok := v.Aux.(*ir.Name) - if !ok || n.Class() != ir.PAUTO { + if !ok || n.Class != ir.PAUTO { return } if !used[n] { @@ -307,7 +307,7 @@ func elimUnreadAutos(f *Func) { if !ok { continue } - if n.Class() != ir.PAUTO { + if n.Class != ir.PAUTO { continue } diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go index 8712ff78c15da..32e6d09d1bc32 100644 --- a/src/cmd/compile/internal/ssa/export_test.go +++ b/src/cmd/compile/internal/ssa/export_test.go @@ -70,7 +70,7 @@ func (TestFrontend) StringData(s string) *obj.LSym { } func (TestFrontend) Auto(pos src.XPos, t *types.Type) *ir.Name { n := ir.NewNameAt(pos, &types.Sym{Name: "aFakeAuto"}) - n.SetClass(ir.PAUTO) + n.Class = ir.PAUTO return n } func (d TestFrontend) SplitString(s LocalSlot) (LocalSlot, LocalSlot) { diff --git a/src/cmd/compile/internal/ssagen/nowb.go b/src/cmd/compile/internal/ssagen/nowb.go index 26858fac873c4..60cfb2f698885 100644 --- a/src/cmd/compile/internal/ssagen/nowb.go +++ b/src/cmd/compile/internal/ssagen/nowb.go @@ -76,7 +76,7 @@ func (c *nowritebarrierrecChecker) findExtraCalls(nn ir.Node) { return } fn := n.X.(*ir.Name) - if fn.Class_ != ir.PFUNC || fn.Defn == nil { + if fn.Class != ir.PFUNC || fn.Defn == nil { return } if !types.IsRuntimePkg(fn.Sym().Pkg) || fn.Sym().Name != "systemstack" { diff --git a/src/cmd/compile/internal/ssagen/pgen.go b/src/cmd/compile/internal/ssagen/pgen.go index 2be10ff7af354..bbd319d73546b 100644 --- a/src/cmd/compile/internal/ssagen/pgen.go +++ b/src/cmd/compile/internal/ssagen/pgen.go @@ -34,11 +34,11 @@ import ( // the top of the stack and increasing in size. // Non-autos sort on offset. func cmpstackvarlt(a, b *ir.Name) bool { - if (a.Class_ == ir.PAUTO) != (b.Class_ == ir.PAUTO) { - return b.Class_ == ir.PAUTO + if (a.Class == ir.PAUTO) != (b.Class == ir.PAUTO) { + return b.Class == ir.PAUTO } - if a.Class_ != ir.PAUTO { + if a.Class != ir.PAUTO { return a.FrameOffset() < b.FrameOffset() } @@ -79,7 +79,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { // Mark the PAUTO's unused. for _, ln := range fn.Dcl { - if ln.Class_ == ir.PAUTO { + if ln.Class == ir.PAUTO { ln.SetUsed(false) } } @@ -94,7 +94,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { for _, b := range f.Blocks { for _, v := range b.Values { if n, ok := v.Aux.(*ir.Name); ok { - switch n.Class_ { + switch n.Class { case ir.PPARAM, ir.PPARAMOUT: // Don't modify nodfp; it is a global. if n != ir.RegFP { @@ -120,7 +120,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { // Reassign stack offsets of the locals that are used. lastHasPtr := false for i, n := range fn.Dcl { - if n.Op() != ir.ONAME || n.Class_ != ir.PAUTO { + if n.Op() != ir.ONAME || n.Class != ir.PAUTO { continue } if !n.Used() { @@ -207,7 +207,7 @@ func init() { func StackOffset(slot ssa.LocalSlot) int32 { n := slot.N var off int64 - switch n.Class_ { + switch n.Class { case ir.PAUTO: off = n.FrameOffset() if base.Ctxt.FixedFrameSize() == 0 { diff --git a/src/cmd/compile/internal/ssagen/pgen_test.go b/src/cmd/compile/internal/ssagen/pgen_test.go index 82d8447e9fd6f..69ed8ad74e974 100644 --- a/src/cmd/compile/internal/ssagen/pgen_test.go +++ b/src/cmd/compile/internal/ssagen/pgen_test.go @@ -46,7 +46,7 @@ func TestCmpstackvar(t *testing.T) { n := typecheck.NewName(s) n.SetType(t) n.SetFrameOffset(xoffset) - n.Class_ = cl + n.Class = cl return n } testdata := []struct { @@ -161,7 +161,7 @@ func TestStackvarSort(t *testing.T) { n := typecheck.NewName(s) n.SetType(t) n.SetFrameOffset(xoffset) - n.Class_ = cl + n.Class = cl return n } inp := []*ir.Name{ diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 8e3b09aac3d63..5998c420122f3 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -436,7 +436,7 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func { var args []ssa.Param var results []ssa.Param for _, n := range fn.Dcl { - switch n.Class_ { + switch n.Class { case ir.PPARAM: s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem) args = append(args, ssa.Param{Type: n.Type(), Offset: int32(n.FrameOffset())}) @@ -457,13 +457,13 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func { case ir.PFUNC: // local function - already handled by frontend default: - s.Fatalf("local variable with class %v unimplemented", n.Class_) + s.Fatalf("local variable with class %v unimplemented", n.Class) } } // Populate SSAable arguments. for _, n := range fn.Dcl { - if n.Class_ == ir.PPARAM && s.canSSA(n) { + if n.Class == ir.PPARAM && s.canSSA(n) { v := s.newValue0A(ssa.OpArg, n.Type(), n) s.vars[n] = v s.addNamedValue(n, v) // This helps with debugging information, not needed for compilation itself. @@ -1166,7 +1166,7 @@ func (s *state) stmt(n ir.Node) { case ir.OCALLINTER: n := n.(*ir.CallExpr) s.callResult(n, callNormal) - if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME && n.X.(*ir.Name).Class_ == ir.PFUNC { + if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME && n.X.(*ir.Name).Class == ir.PFUNC { if fn := n.X.Sym().Name; base.Flag.CompilingRuntime && fn == "throw" || n.X.Sym().Pkg == ir.Pkgs.Runtime && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") { m := s.mem() @@ -1242,7 +1242,7 @@ func (s *state) stmt(n ir.Node) { case ir.ODCL: n := n.(*ir.Decl) - if n.X.Class_ == ir.PAUTOHEAP { + if n.X.Class == ir.PAUTOHEAP { s.Fatalf("DCL %v", n) } @@ -1634,7 +1634,7 @@ func (s *state) stmt(n ir.Node) { if !v.Addrtaken() { s.Fatalf("VARLIVE variable %v must have Addrtaken set", v) } - switch v.Class_ { + switch v.Class { case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT: default: s.Fatalf("VARLIVE variable %v must be Auto or Arg", v) @@ -2110,7 +2110,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { return s.entryNewValue1A(ssa.OpAddr, n.Type(), aux, s.sb) case ir.ONAME: n := n.(*ir.Name) - if n.Class_ == ir.PFUNC { + if n.Class == ir.PFUNC { // "value" of a function is the address of the function's closure sym := staticdata.FuncLinksym(n) return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb) @@ -3003,7 +3003,7 @@ func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value { if inplace { if sn.Op() == ir.ONAME { sn := sn.(*ir.Name) - if sn.Class_ != ir.PEXTERN { + if sn.Class != ir.PEXTERN { // Tell liveness we're about to build a new slice s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem()) } @@ -3222,7 +3222,7 @@ func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask // If this assignment clobbers an entire local variable, then emit // OpVarDef so liveness analysis knows the variable is redefined. - if base := clobberBase(left); base.Op() == ir.ONAME && base.(*ir.Name).Class_ != ir.PEXTERN && skip == 0 { + if base := clobberBase(left); base.Op() == ir.ONAME && base.(*ir.Name).Class != ir.PEXTERN && skip == 0 { s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base.(*ir.Name), s.mem(), !ir.IsAutoTmp(base)) } @@ -4385,7 +4385,7 @@ func (s *state) openDeferRecord(n *ir.CallExpr) { closureVal := s.expr(fn) closure := s.openDeferSave(nil, fn.Type(), closureVal) opendefer.closureNode = closure.Aux.(*ir.Name) - if !(fn.Op() == ir.ONAME && fn.(*ir.Name).Class_ == ir.PFUNC) { + if !(fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC) { opendefer.closure = closure } } else if n.Op() == ir.OCALLMETH { @@ -4651,7 +4651,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val switch n.Op() { case ir.OCALLFUNC: testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f) - if k == callNormal && fn.Op() == ir.ONAME && fn.(*ir.Name).Class_ == ir.PFUNC { + if k == callNormal && fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC { fn := fn.(*ir.Name) sym = fn.Sym() break @@ -4958,7 +4958,7 @@ func (s *state) addr(n ir.Node) *ssa.Value { fallthrough case ir.ONAME: n := n.(*ir.Name) - switch n.Class_ { + switch n.Class { case ir.PEXTERN: // global variable v := s.entryNewValue1A(ssa.OpAddr, t, n.Linksym(), s.sb) @@ -4987,7 +4987,7 @@ func (s *state) addr(n ir.Node) *ssa.Value { // that cse works on their addresses return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), true) default: - s.Fatalf("variable address class %v not implemented", n.Class_) + s.Fatalf("variable address class %v not implemented", n.Class) return nil } case ir.ORESULT: @@ -5096,10 +5096,10 @@ func (s *state) canSSAName(name *ir.Name) bool { if ir.IsParamHeapCopy(name) { return false } - if name.Class_ == ir.PAUTOHEAP { + if name.Class == ir.PAUTOHEAP { s.Fatalf("canSSA of PAUTOHEAP %v", name) } - switch name.Class_ { + switch name.Class { case ir.PEXTERN: return false case ir.PPARAMOUT: @@ -5117,7 +5117,7 @@ func (s *state) canSSAName(name *ir.Name) bool { return false } } - if name.Class_ == ir.PPARAM && name.Sym() != nil && name.Sym().Name == ".this" { + if name.Class == ir.PPARAM && name.Sym() != nil && name.Sym().Name == ".this" { // wrappers generated by genwrapper need to update // the .this pointer in place. // TODO: treat as a PPARAMOUT? @@ -6210,7 +6210,7 @@ func (s *state) mem() *ssa.Value { } func (s *state) addNamedValue(n *ir.Name, v *ssa.Value) { - if n.Class_ == ir.Pxxx { + if n.Class == ir.Pxxx { // Don't track our marker nodes (memVar etc.). return } @@ -6218,7 +6218,7 @@ func (s *state) addNamedValue(n *ir.Name, v *ssa.Value) { // Don't track temporary variables. return } - if n.Class_ == ir.PPARAMOUT { + if n.Class == ir.PPARAMOUT { // Don't track named output values. This prevents return values // from being assigned too early. See #14591 and #14762. TODO: allow this. return @@ -6741,8 +6741,8 @@ func defframe(s *State, e *ssafn) { if !n.Needzero() { continue } - if n.Class_ != ir.PAUTO { - e.Fatalf(n.Pos(), "needzero class %d", n.Class_) + if n.Class != ir.PAUTO { + e.Fatalf(n.Pos(), "needzero class %d", n.Class) } if n.Type().Size()%int64(types.PtrSize) != 0 || n.FrameOffset()%int64(types.PtrSize) != 0 || n.Type().Size() == 0 { e.Fatalf(n.Pos(), "var %L has size %d offset %d", n, n.Type().Size(), n.Offset_) @@ -6826,7 +6826,7 @@ func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) { a.Name = obj.NAME_EXTERN a.Sym = n case *ir.Name: - if n.Class_ == ir.PPARAM || n.Class_ == ir.PPARAMOUT { + if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT { a.Name = obj.NAME_PARAM a.Sym = ir.Orig(n).(*ir.Name).Linksym() a.Offset += n.FrameOffset() @@ -6968,7 +6968,7 @@ func AddrAuto(a *obj.Addr, v *ssa.Value) { a.Sym = n.Linksym() a.Reg = int16(Arch.REGSP) a.Offset = n.FrameOffset() + off - if n.Class_ == ir.PPARAM || n.Class_ == ir.PPARAMOUT { + if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT { a.Name = obj.NAME_PARAM } else { a.Name = obj.NAME_AUTO @@ -7198,7 +7198,7 @@ func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym { func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot { node := parent.N - if node.Class_ != ir.PAUTO || node.Addrtaken() { + if node.Class != ir.PAUTO || node.Addrtaken() { // addressed things and non-autos retain their parents (i.e., cannot truly be split) return ssa.LocalSlot{N: node, Type: t, Off: parent.Off + offset} } @@ -7208,7 +7208,7 @@ func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t s.Def = n ir.AsNode(s.Def).Name().SetUsed(true) n.SetType(t) - n.Class_ = ir.PAUTO + n.Class = ir.PAUTO n.SetEsc(ir.EscNever) n.Curfn = e.curfn e.curfn.Dcl = append(e.curfn.Dcl, n) diff --git a/src/cmd/compile/internal/staticdata/data.go b/src/cmd/compile/internal/staticdata/data.go index 27d9cec06d6d3..94fa6760a044f 100644 --- a/src/cmd/compile/internal/staticdata/data.go +++ b/src/cmd/compile/internal/staticdata/data.go @@ -50,8 +50,8 @@ func InitFunc(n *ir.Name, noff int64, f *ir.Name) { if n.Sym() == nil { base.Fatalf("pfuncsym nil n sym") } - if f.Class_ != ir.PFUNC { - base.Fatalf("pfuncsym class not PFUNC %d", f.Class_) + if f.Class != ir.PFUNC { + base.Fatalf("pfuncsym class not PFUNC %d", f.Class) } s := n.Linksym() s.WriteAddr(base.Ctxt, noff, types.PtrSize, FuncLinksym(f), 0) @@ -259,7 +259,7 @@ func FuncSym(s *types.Sym) *types.Sym { } func FuncLinksym(n *ir.Name) *obj.LSym { - if n.Op() != ir.ONAME || n.Class_ != ir.PFUNC { + if n.Op() != ir.ONAME || n.Class != ir.PFUNC { base.Fatalf("expected func name: %v", n) } return FuncSym(n.Sym()).Linksym() diff --git a/src/cmd/compile/internal/staticinit/sched.go b/src/cmd/compile/internal/staticinit/sched.go index 8e4ce55954816..ac0b6cd87efec 100644 --- a/src/cmd/compile/internal/staticinit/sched.go +++ b/src/cmd/compile/internal/staticinit/sched.go @@ -78,12 +78,12 @@ func (s *Schedule) tryStaticInit(nn ir.Node) bool { // like staticassign but we are copying an already // initialized value r. func (s *Schedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *types.Type) bool { - if rn.Class_ == ir.PFUNC { + if rn.Class == ir.PFUNC { // TODO if roff != 0 { panic } staticdata.InitFunc(l, loff, rn) return true } - if rn.Class_ != ir.PEXTERN || rn.Sym().Pkg != types.LocalPkg { + if rn.Class != ir.PEXTERN || rn.Sym().Pkg != types.LocalPkg { return false } if rn.Defn.Op() != ir.OAS { @@ -246,7 +246,7 @@ func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Ty case ir.OSTR2BYTES: r := r.(*ir.ConvExpr) - if l.Class_ == ir.PEXTERN && r.X.Op() == ir.OLITERAL { + if l.Class == ir.PEXTERN && r.X.Op() == ir.OLITERAL { sval := ir.StringVal(r.X) staticdata.InitSliceBytes(l, loff, sval) return true diff --git a/src/cmd/compile/internal/typecheck/dcl.go b/src/cmd/compile/internal/typecheck/dcl.go index 5eaf100eed06f..6c3aa3781e0e5 100644 --- a/src/cmd/compile/internal/typecheck/dcl.go +++ b/src/cmd/compile/internal/typecheck/dcl.go @@ -91,7 +91,7 @@ func Declare(n *ir.Name, ctxt ir.Class) { s.Lastlineno = base.Pos s.Def = n n.Vargen = int32(gen) - n.Class_ = ctxt + n.Class = ctxt if ctxt == ir.PFUNC { n.Sym().SetFunc(true) } @@ -455,7 +455,7 @@ func TempAt(pos src.XPos, curfn *ir.Func, t *types.Type) *ir.Name { n := ir.NewNameAt(pos, s) s.Def = n n.SetType(t) - n.Class_ = ir.PAUTO + n.Class = ir.PAUTO n.SetEsc(ir.EscNever) n.Curfn = curfn n.SetUsed(true) diff --git a/src/cmd/compile/internal/typecheck/export.go b/src/cmd/compile/internal/typecheck/export.go index c5253914012c6..63d0a1ec6c656 100644 --- a/src/cmd/compile/internal/typecheck/export.go +++ b/src/cmd/compile/internal/typecheck/export.go @@ -53,7 +53,7 @@ func importsym(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Cl } n := ir.NewDeclNameAt(pos, op, s) - n.Class_ = ctxt // TODO(mdempsky): Move this into NewDeclNameAt too? + n.Class = ctxt // TODO(mdempsky): Move this into NewDeclNameAt too? s.SetPkgDef(n) return n } diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go index 859239700446d..b3efb8f25a502 100644 --- a/src/cmd/compile/internal/typecheck/func.go +++ b/src/cmd/compile/internal/typecheck/func.go @@ -129,7 +129,7 @@ func CaptureVars(fn *ir.Func) { outermost := v.Defn.(*ir.Name) // out parameters will be assigned to implicitly upon return. - if outermost.Class_ != ir.PPARAMOUT && !outermost.Addrtaken() && !outermost.Assigned() && v.Type().Size() <= 128 { + if outermost.Class != ir.PPARAMOUT && !outermost.Addrtaken() && !outermost.Assigned() && v.Type().Size() <= 128 { v.SetByval(true) } else { outermost.SetAddrtaken(true) @@ -408,7 +408,7 @@ func tcFunc(n *ir.Func) { } for _, ln := range n.Dcl { - if ln.Op() == ir.ONAME && (ln.Class_ == ir.PPARAM || ln.Class_ == ir.PPARAMOUT) { + if ln.Op() == ir.ONAME && (ln.Class == ir.PPARAM || ln.Class == ir.PPARAMOUT) { ln.Decldepth = 1 } } diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go index dd515b8ccdd1f..a7927c39a31b5 100644 --- a/src/cmd/compile/internal/typecheck/iexport.go +++ b/src/cmd/compile/internal/typecheck/iexport.go @@ -430,7 +430,7 @@ func (p *iexporter) doDecl(n *ir.Name) { switch n.Op() { case ir.ONAME: - switch n.Class_ { + switch n.Class { case ir.PEXTERN: // Variable. w.tag('V') @@ -450,7 +450,7 @@ func (p *iexporter) doDecl(n *ir.Name) { w.funcExt(n) default: - base.Fatalf("unexpected class: %v, %v", n, n.Class_) + base.Fatalf("unexpected class: %v, %v", n, n.Class) } case ir.OLITERAL: @@ -1260,7 +1260,7 @@ func (w *exportWriter) expr(n ir.Node) { case ir.ONAME: // Package scope name. n := n.(*ir.Name) - if (n.Class_ == ir.PEXTERN || n.Class_ == ir.PFUNC) && !ir.IsBlank(n) { + if (n.Class == ir.PEXTERN || n.Class == ir.PFUNC) && !ir.IsBlank(n) { w.op(ir.ONONAME) w.qualifiedIdent(n) break @@ -1526,7 +1526,7 @@ func (w *exportWriter) localName(n *ir.Name) { // PPARAM/PPARAMOUT, because we only want to include vargen in // non-param names. var v int32 - if n.Class_ == ir.PAUTO || (n.Class_ == ir.PAUTOHEAP && n.Stackcopy == nil) { + if n.Class == ir.PAUTO || (n.Class == ir.PAUTOHEAP && n.Stackcopy == nil) { v = n.Vargen } diff --git a/src/cmd/compile/internal/typecheck/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go index 2dc7e70b65013..15c57b2380159 100644 --- a/src/cmd/compile/internal/typecheck/iimport.go +++ b/src/cmd/compile/internal/typecheck/iimport.go @@ -333,7 +333,7 @@ func (r *importReader) doDecl(sym *types.Sym) *ir.Name { // methodSym already marked m.Sym as a function. m := ir.NewNameAt(mpos, ir.MethodSym(recv.Type, msym)) - m.Class_ = ir.PFUNC + m.Class = ir.PFUNC m.SetType(mtyp) m.Func = ir.NewFunc(mpos) diff --git a/src/cmd/compile/internal/typecheck/syms.go b/src/cmd/compile/internal/typecheck/syms.go index 01c03b5f9f714..28db40db91f1e 100644 --- a/src/cmd/compile/internal/typecheck/syms.go +++ b/src/cmd/compile/internal/typecheck/syms.go @@ -30,7 +30,7 @@ func SubstArgTypes(old *ir.Name, types_ ...*types.Type) *ir.Name { types.CalcSize(t) } n := ir.NewNameAt(old.Pos(), old.Sym()) - n.Class_ = old.Class() + n.Class = old.Class n.SetType(types.SubstAny(old.Type(), &types_)) if len(types_) > 0 { base.Fatalf("substArgTypes: too many argument types") diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index 812b94de0dd4d..981f4ef1d6efd 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -2099,7 +2099,7 @@ func CheckUnused(fn *ir.Func) { // Propagate the used flag for typeswitch variables up to the NONAME in its definition. for _, ln := range fn.Dcl { - if ln.Op() == ir.ONAME && ln.Class_ == ir.PAUTO && ln.Used() { + if ln.Op() == ir.ONAME && ln.Class == ir.PAUTO && ln.Used() { if guard, ok := ln.Defn.(*ir.TypeSwitchGuard); ok { guard.Used = true } @@ -2107,7 +2107,7 @@ func CheckUnused(fn *ir.Func) { } for _, ln := range fn.Dcl { - if ln.Op() != ir.ONAME || ln.Class_ != ir.PAUTO || ln.Used() { + if ln.Op() != ir.ONAME || ln.Class != ir.PAUTO || ln.Used() { continue } if defn, ok := ln.Defn.(*ir.TypeSwitchGuard); ok { diff --git a/src/cmd/compile/internal/typecheck/universe.go b/src/cmd/compile/internal/typecheck/universe.go index f1e7ed427307f..402b8deeb3889 100644 --- a/src/cmd/compile/internal/typecheck/universe.go +++ b/src/cmd/compile/internal/typecheck/universe.go @@ -357,6 +357,6 @@ func DeclareUniverse() { ir.RegFP = NewName(Lookup(".fp")) ir.RegFP.SetType(types.Types[types.TINT32]) - ir.RegFP.Class_ = ir.PPARAM + ir.RegFP.Class = ir.PPARAM ir.RegFP.SetUsed(true) } diff --git a/src/cmd/compile/internal/walk/assign.go b/src/cmd/compile/internal/walk/assign.go index ec0f60ad93571..3fe810ac4ea48 100644 --- a/src/cmd/compile/internal/walk/assign.go +++ b/src/cmd/compile/internal/walk/assign.go @@ -392,7 +392,7 @@ func ascompatee(op ir.Op, nl, nr []ir.Node) []ir.Node { appendWalkStmt(&late, convas(ir.NewAssignStmt(base.Pos, lorig, r), &late)) - if name == nil || name.Addrtaken() || name.Class_ == ir.PEXTERN || name.Class_ == ir.PAUTOHEAP { + if name == nil || name.Addrtaken() || name.Class == ir.PEXTERN || name.Class == ir.PAUTOHEAP { memWrite = true continue } @@ -418,7 +418,7 @@ func readsMemory(n ir.Node) bool { switch n.Op() { case ir.ONAME: n := n.(*ir.Name) - return n.Class_ == ir.PEXTERN || n.Class_ == ir.PAUTOHEAP || n.Addrtaken() + return n.Class == ir.PEXTERN || n.Class == ir.PAUTOHEAP || n.Addrtaken() case ir.OADD, ir.OAND, diff --git a/src/cmd/compile/internal/walk/closure.go b/src/cmd/compile/internal/walk/closure.go index fcdb43f113146..449df88f9e69e 100644 --- a/src/cmd/compile/internal/walk/closure.go +++ b/src/cmd/compile/internal/walk/closure.go @@ -52,7 +52,7 @@ func Closure(fn *ir.Func) { v = addr } - v.Class_ = ir.PPARAM + v.Class = ir.PPARAM decls = append(decls, v) fld := types.NewField(src.NoXPos, v.Sym(), v.Type()) @@ -84,7 +84,7 @@ func Closure(fn *ir.Func) { if v.Byval() && v.Type().Width <= int64(2*types.PtrSize) { // If it is a small variable captured by value, downgrade it to PAUTO. - v.Class_ = ir.PAUTO + v.Class = ir.PAUTO fn.Dcl = append(fn.Dcl, v) body = append(body, ir.NewAssignStmt(base.Pos, v, cr)) } else { @@ -92,7 +92,7 @@ func Closure(fn *ir.Func) { // and initialize in entry prologue. addr := typecheck.NewName(typecheck.Lookup("&" + v.Sym().Name)) addr.SetType(types.NewPtr(v.Type())) - addr.Class_ = ir.PAUTO + addr.Class = ir.PAUTO addr.SetUsed(true) addr.Curfn = fn fn.Dcl = append(fn.Dcl, addr) diff --git a/src/cmd/compile/internal/walk/complit.go b/src/cmd/compile/internal/walk/complit.go index d8605d39bdc0b..8a77bba2ad9fb 100644 --- a/src/cmd/compile/internal/walk/complit.go +++ b/src/cmd/compile/internal/walk/complit.go @@ -68,7 +68,7 @@ func isSimpleName(nn ir.Node) bool { return false } n := nn.(*ir.Name) - return n.Class_ != ir.PAUTOHEAP && n.Class_ != ir.PEXTERN + return n.Class != ir.PAUTOHEAP && n.Class != ir.PEXTERN } func litas(l ir.Node, r ir.Node, init *ir.Nodes) { @@ -294,7 +294,7 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) // copy static to slice var_ = typecheck.AssignExpr(var_) name, offset, ok := staticinit.StaticLoc(var_) - if !ok || name.Class_ != ir.PEXTERN { + if !ok || name.Class != ir.PEXTERN { base.Fatalf("slicelit: %v", var_) } staticdata.InitSlice(name, offset, vstat, t.NumElem()) @@ -657,7 +657,7 @@ func genAsStatic(as *ir.AssignStmt) { } name, offset, ok := staticinit.StaticLoc(as.X) - if !ok || (name.Class_ != ir.PEXTERN && as.X != ir.BlankNode) { + if !ok || (name.Class != ir.PEXTERN && as.X != ir.BlankNode) { base.Fatalf("genAsStatic: lhs %v", as.X) } @@ -674,7 +674,7 @@ func genAsStatic(as *ir.AssignStmt) { if r.Offset_ != 0 { base.Fatalf("genAsStatic %+v", as) } - if r.Class_ == ir.PFUNC { + if r.Class == ir.PFUNC { staticdata.InitFunc(name, offset, r) return } diff --git a/src/cmd/compile/internal/walk/convert.go b/src/cmd/compile/internal/walk/convert.go index d0cd5ff75377f..85459fd92f7ee 100644 --- a/src/cmd/compile/internal/walk/convert.go +++ b/src/cmd/compile/internal/walk/convert.go @@ -68,12 +68,12 @@ func walkConvInterface(n *ir.ConvExpr, init *ir.Nodes) ir.Node { if ir.Names.Staticuint64s == nil { ir.Names.Staticuint64s = typecheck.NewName(ir.Pkgs.Runtime.Lookup("staticuint64s")) - ir.Names.Staticuint64s.Class_ = ir.PEXTERN + ir.Names.Staticuint64s.Class = ir.PEXTERN // The actual type is [256]uint64, but we use [256*8]uint8 so we can address // individual bytes. ir.Names.Staticuint64s.SetType(types.NewArray(types.Types[types.TUINT8], 256*8)) ir.Names.Zerobase = typecheck.NewName(ir.Pkgs.Runtime.Lookup("zerobase")) - ir.Names.Zerobase.Class_ = ir.PEXTERN + ir.Names.Zerobase.Class = ir.PEXTERN ir.Names.Zerobase.SetType(types.Types[types.TUINTPTR]) } @@ -98,7 +98,7 @@ func walkConvInterface(n *ir.ConvExpr, init *ir.Nodes) ir.Node { xe := ir.NewIndexExpr(base.Pos, ir.Names.Staticuint64s, index) xe.SetBounded(true) value = xe - case n.X.Op() == ir.ONAME && n.X.(*ir.Name).Class_ == ir.PEXTERN && n.X.(*ir.Name).Readonly(): + case n.X.Op() == ir.ONAME && n.X.(*ir.Name).Class == ir.PEXTERN && n.X.(*ir.Name).Readonly(): // n.Left is a readonly global; use it directly. value = n.X case !fromType.IsInterface() && n.Esc() == ir.EscNone && fromType.Width <= 1024: diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index 8a56526a362ae..3dffb496e9119 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -52,7 +52,7 @@ func walkExpr(n ir.Node, init *ir.Nodes) ir.Node { base.Fatalf("expression has untyped type: %+v", n) } - if n.Op() == ir.ONAME && n.(*ir.Name).Class_ == ir.PAUTOHEAP { + if n.Op() == ir.ONAME && n.(*ir.Name).Class == ir.PAUTOHEAP { n := n.(*ir.Name) nn := ir.NewStarExpr(base.Pos, n.Heapaddr) nn.X.MarkNonNil() diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go index 2164685cd4780..38a9bec6e362b 100644 --- a/src/cmd/compile/internal/walk/order.go +++ b/src/cmd/compile/internal/walk/order.go @@ -235,7 +235,7 @@ func (o *orderState) safeExpr(n ir.Node) ir.Node { // because we emit explicit VARKILL instructions marking the end of those // temporaries' lifetimes. func isaddrokay(n ir.Node) bool { - return ir.IsAddressable(n) && (n.Op() != ir.ONAME || n.(*ir.Name).Class_ == ir.PEXTERN || ir.IsAutoTmp(n)) + return ir.IsAddressable(n) && (n.Op() != ir.ONAME || n.(*ir.Name).Class == ir.PEXTERN || ir.IsAutoTmp(n)) } // addrTemp ensures that n is okay to pass by address to runtime routines. diff --git a/src/cmd/compile/internal/walk/race.go b/src/cmd/compile/internal/walk/race.go index 20becf9be93d6..77cabe50c6a0a 100644 --- a/src/cmd/compile/internal/walk/race.go +++ b/src/cmd/compile/internal/walk/race.go @@ -39,7 +39,7 @@ func instrument(fn *ir.Func) { // race in the future. nodpc := ir.NewNameAt(src.NoXPos, typecheck.Lookup(".fp")) - nodpc.Class_ = ir.PPARAM + nodpc.Class = ir.PPARAM nodpc.SetUsed(true) nodpc.SetType(types.Types[types.TUINTPTR]) nodpc.SetFrameOffset(int64(-types.PtrSize)) diff --git a/src/cmd/compile/internal/walk/stmt.go b/src/cmd/compile/internal/walk/stmt.go index 460c0a7c103b3..1df491bd4e9d7 100644 --- a/src/cmd/compile/internal/walk/stmt.go +++ b/src/cmd/compile/internal/walk/stmt.go @@ -176,7 +176,7 @@ func walkStmtList(s []ir.Node) { // walkDecl walks an ODCL node. func walkDecl(n *ir.Decl) ir.Node { v := n.X - if v.Class_ == ir.PAUTOHEAP { + if v.Class == ir.PAUTOHEAP { if base.Flag.CompilingRuntime { base.Errorf("%v escapes to heap, not allowed in runtime", v) } diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go index 57c2d43753b5e..928b6737528ce 100644 --- a/src/cmd/compile/internal/walk/walk.go +++ b/src/cmd/compile/internal/walk/walk.go @@ -61,7 +61,7 @@ func Walk(fn *ir.Func) { func paramoutheap(fn *ir.Func) bool { for _, ln := range fn.Dcl { - switch ln.Class_ { + switch ln.Class { case ir.PPARAMOUT: if ir.IsParamStackCopy(ln) || ln.Addrtaken() { return true @@ -137,7 +137,7 @@ func paramstoheap(params *types.Type) []ir.Node { if stackcopy := v.Name().Stackcopy; stackcopy != nil { nn = append(nn, walkStmt(ir.NewDecl(base.Pos, ir.ODCL, v.(*ir.Name)))) - if stackcopy.Class_ == ir.PPARAM { + if stackcopy.Class == ir.PPARAM { nn = append(nn, walkStmt(typecheck.Stmt(ir.NewAssignStmt(base.Pos, v, stackcopy)))) } } @@ -185,7 +185,7 @@ func returnsfromheap(params *types.Type) []ir.Node { if v == nil { continue } - if stackcopy := v.Name().Stackcopy; stackcopy != nil && stackcopy.Class_ == ir.PPARAMOUT { + if stackcopy := v.Name().Stackcopy; stackcopy != nil && stackcopy.Class == ir.PPARAMOUT { nn = append(nn, walkStmt(typecheck.Stmt(ir.NewAssignStmt(base.Pos, stackcopy, v)))) } } From c28ca67a961a0c1d149a249918a15ed74c61af27 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 4 Jan 2021 22:58:24 -0800 Subject: [PATCH 345/474] [dev.regabi] cmd/compile: fix ir.Dump for []*CaseClause, etc Dump uses reflection to print IR nodes, and it only knew how to print out the Nodes slice type itself. This CL adds support for printing any slice whose element type implements Node, such as SwitchStmt and SelectStmt's clause lists. Change-Id: I2fd8defe11868b564d1d389ea3cd9b8abcefac62 Reviewed-on: https://go-review.googlesource.com/c/go/+/281537 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/ir/fmt.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index 92ea160a28bf3..a4e769f508b09 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -1237,10 +1237,25 @@ func dumpNode(w io.Writer, n Node, depth int) { fmt.Fprintf(w, "%+v-%s", n.Op(), name) } dumpNodes(w, val, depth+1) + default: + if vf.Kind() == reflect.Slice && vf.Type().Elem().Implements(nodeType) { + if vf.Len() == 0 { + continue + } + if name != "" { + indent(w, depth) + fmt.Fprintf(w, "%+v-%s", n.Op(), name) + } + for i, n := 0, vf.Len(); i < n; i++ { + dumpNode(w, vf.Index(i).Interface().(Node), depth+1) + } + } } } } +var nodeType = reflect.TypeOf((*Node)(nil)).Elem() + func dumpNodes(w io.Writer, list Nodes, depth int) { if len(list) == 0 { fmt.Fprintf(w, " ") From eb626409d152caabac418eccbe86b49d1fc6a6f5 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 4 Jan 2021 18:28:55 -0800 Subject: [PATCH 346/474] [dev.regabi] cmd/compile: simplify CaptureVars CaptureVars is responsible for deciding whether free variables should be captured by value or by reference, but currently it also makes up for some of the short-comings of the frontend symbol resolution / type-checking algorithms. These are really separate responsibilities, so move the latter into type-checking where it fits better. Passes toolstash -cmp. Change-Id: Iffbd53e83846a9ca9dfb54b597450b8543252850 Reviewed-on: https://go-review.googlesource.com/c/go/+/281534 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/typecheck/func.go | 49 ++++++++++------------ 1 file changed, 21 insertions(+), 28 deletions(-) diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go index b3efb8f25a502..e4c308822521c 100644 --- a/src/cmd/compile/internal/typecheck/func.go +++ b/src/cmd/compile/internal/typecheck/func.go @@ -106,26 +106,7 @@ func PartialCallType(n *ir.SelectorExpr) *types.Type { // We use value capturing for values <= 128 bytes that are never reassigned // after capturing (effectively constant). func CaptureVars(fn *ir.Func) { - lno := base.Pos - base.Pos = fn.Pos() - cvars := fn.ClosureVars - out := cvars[:0] - for _, v := range cvars { - if v.Type() == nil { - // If v.Type is nil, it means v looked like it - // was going to be used in the closure, but - // isn't. This happens in struct literals like - // s{f: x} where we can't distinguish whether - // f is a field identifier or expression until - // resolving s. - continue - } - out = append(out, v) - - // type check closed variables outside the closure, - // so that the outer frame also grabs them and knows they escape. - Expr(v.Outer) - + for _, v := range fn.ClosureVars { outermost := v.Defn.(*ir.Name) // out parameters will be assigned to implicitly upon return. @@ -136,20 +117,13 @@ func CaptureVars(fn *ir.Func) { } if base.Flag.LowerM > 1 { - var name *types.Sym - if v.Curfn != nil && v.Curfn.Nname != nil { - name = v.Curfn.Sym() - } how := "ref" if v.Byval() { how = "value" } - base.WarnfAt(v.Pos(), "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym(), outermost.Addrtaken(), outermost.Assigned(), v.Type().Size()) + base.WarnfAt(v.Pos(), "%v capturing by %s: %v (addr=%v assign=%v width=%d)", v.Curfn, how, v, outermost.Addrtaken(), outermost.Assigned(), v.Type().Size()) } } - - fn.ClosureVars = out - base.Pos = lno } // Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck @@ -396,6 +370,25 @@ func tcClosure(clo *ir.ClosureExpr, top int) { ir.CurFunc = oldfn } + out := 0 + for _, v := range fn.ClosureVars { + if v.Type() == nil { + // If v.Type is nil, it means v looked like it was going to be + // used in the closure, but isn't. This happens in struct + // literals like s{f: x} where we can't distinguish whether f is + // a field identifier or expression until resolving s. + continue + } + + // type check closed variables outside the closure, so that the + // outer frame also captures them. + Expr(v.Outer) + + fn.ClosureVars[out] = v + out++ + } + fn.ClosureVars = fn.ClosureVars[:out] + Target.Decls = append(Target.Decls, fn) } From 9aa950c40789223d9e8df7d1ec657cd313e6c7aa Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 5 Jan 2021 03:28:06 -0800 Subject: [PATCH 347/474] [dev.regabi] cmd/compile: make ir.OuterValue safer For OINDEX expressions, ir.OuterValue depends on knowing the indexee's type. Rather than silently acting as though it's not an array, make it loudly fail. The only code that needs to be fixed to support this is checkassign during typechecking, which needs to avoid calling ir.OuterValue now if typechecking the assigned operand already failed. Passes toolstash -cmp. Change-Id: I935cae0dacc837202bc6b63164dc2f0a6fde005c Reviewed-on: https://go-review.googlesource.com/c/go/+/281539 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/ir/node.go | 5 ++++- src/cmd/compile/internal/typecheck/typecheck.go | 13 ++++++++----- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index a5a7203faaa22..850d7343aaef1 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -568,7 +568,10 @@ func OuterValue(n Node) Node { continue case OINDEX: nn := nn.(*IndexExpr) - if nn.X.Type() != nil && nn.X.Type().IsArray() { + if nn.X.Type() == nil { + base.Fatalf("OuterValue needs type for %v", nn.X) + } + if nn.X.Type().IsArray() { n = nn.X continue } diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index 981f4ef1d6efd..c3a5a3c40f715 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -1612,6 +1612,14 @@ func checklvalue(n ir.Node, verb string) { } func checkassign(stmt ir.Node, n ir.Node) { + // have already complained about n being invalid + if n.Type() == nil { + if base.Errors() == 0 { + base.Fatalf("expected an error about %v", n) + } + return + } + // Variables declared in ORANGE are assigned on every iteration. if !ir.DeclaredBy(n, stmt) || stmt.Op() == ir.ORANGE { r := ir.OuterValue(n) @@ -1633,11 +1641,6 @@ func checkassign(stmt ir.Node, n ir.Node) { return } - // have already complained about n being invalid - if n.Type() == nil { - return - } - switch { case n.Op() == ir.ODOT && n.(*ir.SelectorExpr).X.Op() == ir.OINDEXMAP: base.Errorf("cannot assign to struct field %v in map", n) From e09783cbc0a7142719c6210b4eda7b21daad91d5 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 5 Jan 2021 03:27:46 -0800 Subject: [PATCH 348/474] [dev.regabi] cmd/compile: make ir.StaticValue safer ir.StaticValue currently relies on CaptureVars setting Addrtaken for variables that are assigned within nested function literals. We want to move that logic to escape analysis, but ir.StaticValue is used in inlining and devirtualization, which happen before escape analysis. The long-term solution here is to generalize escape analysis's precise reassignment tracking for use by other optimization passes, but for now we just generalize ir.StaticValue to not depend on Addrtaken anymore. Instead, it now also pays attention to OADDR nodes as well as recurses into OCLOSURE bodies. Passes toolstash -cmp. Change-Id: I6114e3277fb70b235f4423d2983d0433c881f79f Reviewed-on: https://go-review.googlesource.com/c/go/+/281540 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/ir/expr.go | 38 +++++++++++++++++++++++++---- 1 file changed, 33 insertions(+), 5 deletions(-) diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 6d81bf8781ef5..77b6c8a1037de 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -771,7 +771,7 @@ func staticValue1(nn Node) Node { return nil } n := nn.(*Name) - if n.Class != PAUTO || n.Addrtaken() { + if n.Class != PAUTO { return nil } @@ -823,23 +823,51 @@ func reassigned(name *Name) bool { if name.Curfn == nil { return true } - return Any(name.Curfn, func(n Node) bool { + + // TODO(mdempsky): This is inefficient and becoming increasingly + // unwieldy. Figure out a way to generalize escape analysis's + // reassignment detection for use by inlining and devirtualization. + + // isName reports whether n is a reference to name. + isName := func(n Node) bool { + if n, ok := n.(*Name); ok && n.Op() == ONAME { + if n.IsClosureVar() && n.Defn != nil { + n = n.Defn.(*Name) + } + return n == name + } + return false + } + + var do func(n Node) bool + do = func(n Node) bool { switch n.Op() { case OAS: n := n.(*AssignStmt) - if n.X == name && n != name.Defn { + if isName(n.X) && n != name.Defn { return true } case OAS2, OAS2FUNC, OAS2MAPR, OAS2DOTTYPE, OAS2RECV, OSELRECV2: n := n.(*AssignListStmt) for _, p := range n.Lhs { - if p == name && n != name.Defn { + if isName(p) && n != name.Defn { return true } } + case OADDR: + n := n.(*AddrExpr) + if isName(OuterValue(n.X)) { + return true + } + case OCLOSURE: + n := n.(*ClosureExpr) + if Any(n.Func, do) { + return true + } } return false - }) + } + return Any(name.Curfn, do) } // IsIntrinsicCall reports whether the compiler back end will treat the call as an intrinsic operation. From 77365c5ed739f4882020ff76b2a4f5bfe4e8fc9d Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 5 Jan 2021 06:43:38 -0800 Subject: [PATCH 349/474] [dev.regabi] cmd/compile: add Name.Canonical and move Byval There's a bunch of code that wants to map closure variables back to their original name, so add a single Name.Canonical method that they can all use. Also, move the Byval flag from being stored on individual closure variables to being stored on the canonical variable. Passes toolstash -cmp. Change-Id: Ia3ef81af5a15783d09f04b4e274ce33df94518e6 Reviewed-on: https://go-review.googlesource.com/c/go/+/281541 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/dwarfgen/dwarf.go | 5 +--- src/cmd/compile/internal/escape/escape.go | 20 +++------------ src/cmd/compile/internal/ir/expr.go | 11 +++----- src/cmd/compile/internal/ir/name.go | 29 ++++++++++++++++++++-- src/cmd/compile/internal/typecheck/func.go | 4 +-- 5 files changed, 37 insertions(+), 32 deletions(-) diff --git a/src/cmd/compile/internal/dwarfgen/dwarf.go b/src/cmd/compile/internal/dwarfgen/dwarf.go index 1534adaac8e66..ff249c1f4ecc3 100644 --- a/src/cmd/compile/internal/dwarfgen/dwarf.go +++ b/src/cmd/compile/internal/dwarfgen/dwarf.go @@ -127,10 +127,7 @@ func Info(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, } func declPos(decl *ir.Name) src.XPos { - if decl.IsClosureVar() { - decl = decl.Defn.(*ir.Name) - } - return decl.Pos() + return decl.Canonical().Pos() } // createDwarfVars process fn, returning a list of DWARF variables and the diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go index 6a2e685fe87eb..794c52f5ae3f8 100644 --- a/src/cmd/compile/internal/escape/escape.go +++ b/src/cmd/compile/internal/escape/escape.go @@ -1146,19 +1146,6 @@ func (e *escape) later(k hole) hole { return loc.asHole() } -// canonicalNode returns the canonical *Node that n logically -// represents. -func canonicalNode(n ir.Node) ir.Node { - if n != nil && n.Op() == ir.ONAME && n.Name().IsClosureVar() { - n = n.Name().Defn - if n.Name().IsClosureVar() { - base.Fatalf("still closure var") - } - } - - return n -} - func (e *escape) newLoc(n ir.Node, transient bool) *location { if e.curfn == nil { base.Fatalf("e.curfn isn't set") @@ -1167,7 +1154,9 @@ func (e *escape) newLoc(n ir.Node, transient bool) *location { base.ErrorfAt(n.Pos(), "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type()) } - n = canonicalNode(n) + if n != nil && n.Op() == ir.ONAME { + n = n.(*ir.Name).Canonical() + } loc := &location{ n: n, curfn: e.curfn, @@ -1196,8 +1185,7 @@ func (e *escape) newLoc(n ir.Node, transient bool) *location { } func (b *batch) oldLoc(n *ir.Name) *location { - n = canonicalNode(n).(*ir.Name) - return n.Opt.(*location) + return n.Canonical().Opt.(*location) } func (l *location) asHole() hole { diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 77b6c8a1037de..e7aa9c6a8f06d 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -829,14 +829,9 @@ func reassigned(name *Name) bool { // reassignment detection for use by inlining and devirtualization. // isName reports whether n is a reference to name. - isName := func(n Node) bool { - if n, ok := n.(*Name); ok && n.Op() == ONAME { - if n.IsClosureVar() && n.Defn != nil { - n = n.Defn.(*Name) - } - return n == name - } - return false + isName := func(x Node) bool { + n, ok := x.(*Name) + return ok && n.Canonical() == name } var do func(n Node) bool diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 58b4ababff9dc..9d7d376ba50d0 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -279,7 +279,6 @@ const ( func (n *Name) Captured() bool { return n.flags&nameCaptured != 0 } func (n *Name) Readonly() bool { return n.flags&nameReadonly != 0 } -func (n *Name) Byval() bool { return n.flags&nameByval != 0 } func (n *Name) Needzero() bool { return n.flags&nameNeedzero != 0 } func (n *Name) AutoTemp() bool { return n.flags&nameAutoTemp != 0 } func (n *Name) Used() bool { return n.flags&nameUsed != 0 } @@ -294,7 +293,6 @@ func (n *Name) LibfuzzerExtraCounter() bool { return n.flags&nameLibfuzzerExtraC func (n *Name) SetCaptured(b bool) { n.flags.set(nameCaptured, b) } func (n *Name) setReadonly(b bool) { n.flags.set(nameReadonly, b) } -func (n *Name) SetByval(b bool) { n.flags.set(nameByval, b) } func (n *Name) SetNeedzero(b bool) { n.flags.set(nameNeedzero, b) } func (n *Name) SetAutoTemp(b bool) { n.flags.set(nameAutoTemp, b) } func (n *Name) SetUsed(b bool) { n.flags.set(nameUsed, b) } @@ -336,6 +334,33 @@ func (n *Name) SetVal(v constant.Value) { n.val = v } +// Canonical returns the logical declaration that n represents. If n +// is a closure variable, then Canonical returns the original Name as +// it appears in the function that immediately contains the +// declaration. Otherwise, Canonical simply returns n itself. +func (n *Name) Canonical() *Name { + if n.IsClosureVar() { + n = n.Defn.(*Name) + if n.IsClosureVar() { + base.Fatalf("recursive closure variable: %v", n) + } + } + return n +} + +func (n *Name) SetByval(b bool) { + if n.Canonical() != n { + base.Fatalf("SetByval called on non-canonical variable: %v", n) + } + n.flags.set(nameByval, b) +} + +func (n *Name) Byval() bool { + // We require byval to be set on the canonical variable, but we + // allow it to be accessed from any instance. + return n.Canonical().flags&nameByval != 0 +} + // SameSource reports whether two nodes refer to the same source // element. // diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go index e4c308822521c..8fdb33b1452e8 100644 --- a/src/cmd/compile/internal/typecheck/func.go +++ b/src/cmd/compile/internal/typecheck/func.go @@ -110,8 +110,8 @@ func CaptureVars(fn *ir.Func) { outermost := v.Defn.(*ir.Name) // out parameters will be assigned to implicitly upon return. - if outermost.Class != ir.PPARAMOUT && !outermost.Addrtaken() && !outermost.Assigned() && v.Type().Size() <= 128 { - v.SetByval(true) + if outermost.Class != ir.PPARAMOUT && !outermost.Addrtaken() && !outermost.Assigned() && outermost.Type().Size() <= 128 { + outermost.SetByval(true) } else { outermost.SetAddrtaken(true) } From 4a9d9adea4d071927de01e5aa07b215cf1464be9 Mon Sep 17 00:00:00 2001 From: Baokun Lee Date: Tue, 5 Jan 2021 15:04:34 +0800 Subject: [PATCH 350/474] [dev.regabi] cmd/compile: remove initname function Passes toolstash -cmp. Change-Id: I84b99d6e636c7b867780389ad11dafc70d3628cd Reviewed-on: https://go-review.googlesource.com/c/go/+/281313 Reviewed-by: Matthew Dempsky Reviewed-by: Cuong Manh Le Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot --- src/cmd/compile/internal/typecheck/dcl.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/cmd/compile/internal/typecheck/dcl.go b/src/cmd/compile/internal/typecheck/dcl.go index 6c3aa3781e0e5..ffbf474a58558 100644 --- a/src/cmd/compile/internal/typecheck/dcl.go +++ b/src/cmd/compile/internal/typecheck/dcl.go @@ -266,7 +266,7 @@ func autoexport(n *ir.Name, ctxt ir.Class) { return } - if types.IsExported(n.Sym().Name) || initname(n.Sym().Name) { + if types.IsExported(n.Sym().Name) || n.Sym().Name == "init" { Export(n) } if base.Flag.AsmHdr != "" && !n.Sym().Asm() { @@ -422,10 +422,6 @@ func funcargs2(t *types.Type) { } } -func initname(s string) bool { - return s == "init" -} - var vargen int func Temp(t *types.Type) *ir.Name { From 81f4f0e912775d11df35220ea598e54c272073fd Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 5 Jan 2021 11:53:00 -0800 Subject: [PATCH 351/474] [dev.regabi] cmd/compile: remove race-y check in Name.Canonical The backend doesn't synchronize compilation of functions with their enclosed function literals, so it's not safe to double-check that IsClosureVar isn't set on the underlying Name. Plenty of frontend stuff would blow-up if this was wrong anyway, so it should be fine to omit. Change-Id: I3e97b64051fe56d97bf316c9b5dcce61f2082428 Reviewed-on: https://go-review.googlesource.com/c/go/+/281812 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky Reviewed-by: Than McIntosh TryBot-Result: Go Bot --- src/cmd/compile/internal/ir/name.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 9d7d376ba50d0..3999c0ecb4a33 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -341,9 +341,6 @@ func (n *Name) SetVal(v constant.Value) { func (n *Name) Canonical() *Name { if n.IsClosureVar() { n = n.Defn.(*Name) - if n.IsClosureVar() { - base.Fatalf("recursive closure variable: %v", n) - } } return n } From fb69c67cad4d554dab8281786b7e1e2707fc3346 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 5 Jan 2021 08:37:41 -0800 Subject: [PATCH 352/474] [dev.regabi] test: enable finalizer tests on !amd64 The gc implementation has had precise GC for a while now, so we can enable these tests more broadly. Confirmed that they still fail with gccgo 10.2.1. Change-Id: Ic1c0394ab832024a99e34163c422941a3706e1a2 Reviewed-on: https://go-review.googlesource.com/c/go/+/281542 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- test/deferfin.go | 7 +------ test/fixedbugs/issue5493.go | 7 +++---- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/test/deferfin.go b/test/deferfin.go index 80372916d20b3..1312bbbe71b04 100644 --- a/test/deferfin.go +++ b/test/deferfin.go @@ -18,12 +18,8 @@ import ( var sink func() func main() { - // Does not work on 32-bits due to partially conservative GC. + // Does not work with gccgo, due to partially conservative GC. // Try to enable when we have fully precise GC. - if runtime.GOARCH != "amd64" { - return - } - // Likewise for gccgo. if runtime.Compiler == "gccgo" { return } @@ -60,4 +56,3 @@ func main() { panic("not all finalizers are called") } } - diff --git a/test/fixedbugs/issue5493.go b/test/fixedbugs/issue5493.go index 2ee0398af2c89..8f771bc2db369 100644 --- a/test/fixedbugs/issue5493.go +++ b/test/fixedbugs/issue5493.go @@ -14,6 +14,7 @@ import ( ) const N = 10 + var count int64 func run() error { @@ -31,10 +32,9 @@ func run() error { } func main() { - // Does not work on 32-bits, or with gccgo, due to partially - // conservative GC. + // Does not work with gccgo, due to partially conservative GC. // Try to enable when we have fully precise GC. - if runtime.GOARCH != "amd64" || runtime.Compiler == "gccgo" { + if runtime.Compiler == "gccgo" { return } count = N @@ -56,4 +56,3 @@ func main() { panic("not all finalizers are called") } } - From fd43831f4476dc9a3ba83aa3a2e4117ed0b8596e Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 4 Jan 2021 18:05:34 -0800 Subject: [PATCH 353/474] [dev.regabi] cmd/compile: reimplement capture analysis Currently we rely on the type-checker to do some basic data-flow analysis to help decide whether function literals should capture variables by value or reference. However, this analysis isn't done by go/types, and escape analysis already has a better framework for doing this more precisely. This CL extends escape analysis to recalculate the same "byval" as CaptureVars and check that it matches. A future CL will remove CaptureVars in favor of escape analysis's calculation. Notably, escape analysis happens after deadcode removes obviously unreachable code, so it sees the AST without any unreachable assignments. (Also without unreachable addrtakens, but ComputeAddrtaken already happens after deadcode too.) There are two test cases where a variable is only reassigned on certain CPUs. This CL changes them to reassign the variables unconditionally (as no-op reassignments that avoid triggering cmd/vet's self-assignment check), at least until we remove CaptureVars. Passes toolstash -cmp. Change-Id: I7162619739fedaf861b478fb8d506f96a6ac21f3 Reviewed-on: https://go-review.googlesource.com/c/go/+/281535 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/escape/escape.go | 250 ++++++++++++++---- .../compile/internal/logopt/logopt_test.go | 1 + test/chancap.go | 1 + test/fixedbugs/issue4085b.go | 1 + 4 files changed, 202 insertions(+), 51 deletions(-) diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go index 794c52f5ae3f8..4aa7381c20534 100644 --- a/src/cmd/compile/internal/escape/escape.go +++ b/src/cmd/compile/internal/escape/escape.go @@ -88,12 +88,20 @@ import ( // A batch holds escape analysis state that's shared across an entire // batch of functions being analyzed at once. type batch struct { - allLocs []*location + allLocs []*location + closures []closure heapLoc location blankLoc location } +// A closure holds a closure expression and its spill hole (i.e., +// where the hole representing storing into its closure record). +type closure struct { + k hole + clo *ir.ClosureExpr +} + // An escape holds state specific to a single function being analyzed // within a batch. type escape struct { @@ -108,6 +116,12 @@ type escape struct { // label with a corresponding backwards "goto" (i.e., // unstructured loop). loopDepth int + + // loopSlop tracks how far off typecheck's "decldepth" variable + // would be from loopDepth at the same point during type checking. + // It's only needed to match CaptureVars's pessimism until it can be + // removed entirely. + loopSlop int } // An location represents an abstract location that stores a Go @@ -117,6 +131,7 @@ type location struct { curfn *ir.Func // enclosing function edges []edge // incoming edges loopDepth int // loopDepth at declaration + loopSlop int // loopSlop at declaration // derefs and walkgen are used during walkOne to track the // minimal dereferences from the walk root. @@ -145,6 +160,10 @@ type location struct { // paramEsc records the represented parameter's leak set. paramEsc leaks + + captured bool // has a closure captured this variable? + reassigned bool // has this variable been reassigned? + addrtaken bool // has this variable's address been taken? } // An edge represents an assignment edge between two Go variables. @@ -209,10 +228,69 @@ func Batch(fns []*ir.Func, recursive bool) { } } + // We've walked the function bodies, so we've seen everywhere a + // variable might be reassigned or have it's address taken. Now we + // can decide whether closures should capture their free variables + // by value or reference. + for _, closure := range b.closures { + b.flowClosure(closure.k, closure.clo, false) + } + b.closures = nil + + for _, orphan := range findOrphans(fns) { + b.flowClosure(b.blankLoc.asHole(), orphan, true) + } + + for _, loc := range b.allLocs { + if why := HeapAllocReason(loc.n); why != "" { + b.flow(b.heapHole().addr(loc.n, why), loc) + } + } + b.walkAll() b.finish(fns) } +// findOrphans finds orphaned closure expressions that were originally +// contained within a function in fns, but were lost due to earlier +// optimizations. +// TODO(mdempsky): Remove after CaptureVars is gone. +func findOrphans(fns []*ir.Func) []*ir.ClosureExpr { + have := make(map[*ir.Func]bool) + for _, fn := range fns { + have[fn] = true + } + + parent := func(fn *ir.Func) *ir.Func { + if len(fn.ClosureVars) == 0 { + return nil + } + cv := fn.ClosureVars[0] + if cv.Defn == nil { + return nil // method value wrapper + } + return cv.Outer.Curfn + } + + outermost := func(fn *ir.Func) *ir.Func { + for { + outer := parent(fn) + if outer == nil { + return fn + } + fn = outer + } + } + + var orphans []*ir.ClosureExpr + for _, fn := range typecheck.Target.Decls { + if fn, ok := fn.(*ir.Func); ok && have[outermost(fn)] && !have[fn] { + orphans = append(orphans, fn.OClosure) + } + } + return orphans +} + func (b *batch) with(fn *ir.Func) *escape { return &escape{ batch: b, @@ -270,6 +348,33 @@ func (b *batch) walkFunc(fn *ir.Func) { } } +func (b *batch) flowClosure(k hole, clo *ir.ClosureExpr, orphan bool) { + for _, cv := range clo.Func.ClosureVars { + n := cv.Canonical() + if n.Opt == nil && orphan { + continue // n.Curfn must have been an orphan too + } + + loc := b.oldLoc(cv) + if !loc.captured && !orphan { + base.FatalfAt(cv.Pos(), "closure variable never captured: %v", cv) + } + + // Capture by value for variables <= 128 bytes that are never reassigned. + byval := !loc.addrtaken && !loc.reassigned && n.Type().Size() <= 128 + if byval != n.Byval() { + base.FatalfAt(cv.Pos(), "byval mismatch: %v: %v != %v", cv, byval, n.Byval()) + } + + // Flow captured variables to closure. + k := k + if !cv.Byval() { + k = k.addr(cv, "reference") + } + b.flow(k.note(cv, "captured by a closure"), loc) + } +} + // Below we implement the methods for walking the AST and recording // data flow edges. Note that because a sub-expression might have // side-effects, it's important to always visit the entire AST. @@ -308,7 +413,7 @@ func (e *escape) stmt(n ir.Node) { }() if base.Flag.LowerM > 2 { - fmt.Printf("%v:[%d] %v stmt: %v\n", base.FmtPos(base.Pos), e.loopDepth, funcSym(e.curfn), n) + fmt.Printf("%v:[%d] %v stmt: %v\n", base.FmtPos(base.Pos), e.loopDepth, e.curfn, n) } e.stmts(n.Init()) @@ -341,6 +446,9 @@ func (e *escape) stmt(n ir.Node) { if base.Flag.LowerM > 2 { fmt.Printf("%v:%v non-looping label\n", base.FmtPos(base.Pos), n) } + if s := n.Label.Name; !strings.HasPrefix(s, ".") && !strings.Contains(s, "·") { + e.loopSlop++ + } case looping: if base.Flag.LowerM > 2 { fmt.Printf("%v: %v looping label\n", base.FmtPos(base.Pos), n) @@ -380,6 +488,7 @@ func (e *escape) stmt(n ir.Node) { } else { e.flow(ks[1].deref(n, "range-deref"), tmp) } + e.reassigned(ks, n) e.block(n.Body) e.loopDepth-- @@ -447,7 +556,9 @@ func (e *escape) stmt(n ir.Node) { case ir.OAS2FUNC: n := n.(*ir.AssignListStmt) e.stmts(n.Rhs[0].Init()) - e.call(e.addrs(n.Lhs), n.Rhs[0], nil) + ks := e.addrs(n.Lhs) + e.call(ks, n.Rhs[0], nil) + e.reassigned(ks, n) case ir.ORETURN: n := n.(*ir.ReturnStmt) results := e.curfn.Type().Results().FieldSlice() @@ -478,6 +589,7 @@ func (e *escape) stmts(l ir.Nodes) { func (e *escape) block(l ir.Nodes) { old := e.loopDepth e.stmts(l) + e.loopSlop += e.loopDepth - old e.loopDepth = old } @@ -507,7 +619,7 @@ func (e *escape) exprSkipInit(k hole, n ir.Node) { if uintptrEscapesHack && n.Op() == ir.OCONVNOP && n.(*ir.ConvExpr).X.Type().IsUnsafePtr() { // nop } else if k.derefs >= 0 && !n.Type().HasPointers() { - k = e.discardHole() + k.dst = &e.blankLoc } switch n.Op() { @@ -691,20 +803,23 @@ func (e *escape) exprSkipInit(k hole, n ir.Node) { case ir.OCLOSURE: n := n.(*ir.ClosureExpr) + k = e.spill(k, n) + e.closures = append(e.closures, closure{k, n}) if fn := n.Func; fn.IsHiddenClosure() { - e.walkFunc(fn) - } - - // Link addresses of captured variables to closure. - k = e.spill(k, n) - for _, v := range n.Func.ClosureVars { - k := k - if !v.Byval() { - k = k.addr(v, "reference") + for _, cv := range fn.ClosureVars { + if loc := e.oldLoc(cv); !loc.captured { + loc.captured = true + + // Ignore reassignments to the variable in straightline code + // preceding the first capture by a closure. + if loc.loopDepth+loc.loopSlop == e.loopDepth+e.loopSlop { + loc.reassigned = false + } + } } - e.expr(k.note(n, "captured by a closure"), v.Defn) + e.walkFunc(fn) } case ir.ORUNES2STR, ir.OBYTES2STR, ir.OSTR2RUNES, ir.OSTR2BYTES, ir.ORUNESTR: @@ -728,6 +843,9 @@ func (e *escape) unsafeValue(k hole, n ir.Node) { if n.Type().Kind() != types.TUINTPTR { base.Fatalf("unexpected type %v for %v", n.Type(), n) } + if k.addrtaken { + base.Fatalf("unexpected addrtaken") + } e.stmts(n.Init()) @@ -828,33 +946,59 @@ func (e *escape) addrs(l ir.Nodes) []hole { return ks } +// reassigned marks the locations associated with the given holes as +// reassigned, unless the location represents a variable declared and +// assigned exactly once by where. +func (e *escape) reassigned(ks []hole, where ir.Node) { + if as, ok := where.(*ir.AssignStmt); ok && as.Op() == ir.OAS && as.Y == nil { + if dst, ok := as.X.(*ir.Name); ok && dst.Op() == ir.ONAME && dst.Defn == nil { + // Zero-value assignment for variable declared without an + // explicit initial value. Assume this is its initialization + // statement. + return + } + } + + for _, k := range ks { + loc := k.dst + // Variables declared by range statements are assigned on every iteration. + if n, ok := loc.n.(*ir.Name); ok && n.Defn == where && where.Op() != ir.ORANGE { + continue + } + loc.reassigned = true + } +} + +// assignList evaluates the assignment dsts... = srcs.... func (e *escape) assignList(dsts, srcs []ir.Node, why string, where ir.Node) { - for i, dst := range dsts { + ks := e.addrs(dsts) + for i, k := range ks { var src ir.Node if i < len(srcs) { src = srcs[i] } - e.assign(dst, src, why, where) - } -} -// assign evaluates the assignment dst = src. -func (e *escape) assign(dst, src ir.Node, why string, where ir.Node) { - // Filter out some no-op assignments for escape analysis. - ignore := dst != nil && src != nil && isSelfAssign(dst, src) - if ignore && base.Flag.LowerM != 0 { - base.WarnfAt(where.Pos(), "%v ignoring self-assignment in %v", funcSym(e.curfn), where) - } + if dst := dsts[i]; dst != nil { + // Detect implicit conversion of uintptr to unsafe.Pointer when + // storing into reflect.{Slice,String}Header. + if dst.Op() == ir.ODOTPTR && ir.IsReflectHeaderDataField(dst) { + e.unsafeValue(e.heapHole().note(where, why), src) + continue + } - k := e.addr(dst) - if dst != nil && dst.Op() == ir.ODOTPTR && ir.IsReflectHeaderDataField(dst) { - e.unsafeValue(e.heapHole().note(where, why), src) - } else { - if ignore { - k = e.discardHole() + // Filter out some no-op assignments for escape analysis. + if src != nil && isSelfAssign(dst, src) { + if base.Flag.LowerM != 0 { + base.WarnfAt(where.Pos(), "%v ignoring self-assignment in %v", e.curfn, where) + } + k = e.discardHole() + } } + e.expr(k.note(where, why), src) } + + e.reassigned(ks, where) } func (e *escape) assignHeap(src ir.Node, why string, where ir.Node) { @@ -1034,7 +1178,7 @@ func (e *escape) tagHole(ks []hole, fn *ir.Name, param *types.Field) hole { func (e *escape) inMutualBatch(fn *ir.Name) bool { if fn.Defn != nil && fn.Defn.Esc() < escFuncTagged { if fn.Defn.Esc() == escFuncUnknown { - base.Fatalf("graph inconsistency") + base.Fatalf("graph inconsistency: %v", fn) } return true } @@ -1049,6 +1193,11 @@ type hole struct { derefs int // >= -1 notes *note + // addrtaken indicates whether this context is taking the address of + // the expression, independent of whether the address will actually + // be stored into a variable. + addrtaken bool + // uintptrEscapesHack indicates this context is evaluating an // argument for a //go:uintptrescapes function. uintptrEscapesHack bool @@ -1079,6 +1228,7 @@ func (k hole) shift(delta int) hole { if k.derefs < -1 { base.Fatalf("derefs underflow: %v", k.derefs) } + k.addrtaken = delta < 0 return k } @@ -1123,8 +1273,12 @@ func (e *escape) teeHole(ks ...hole) hole { } func (e *escape) dcl(n *ir.Name) hole { + if n.Curfn != e.curfn || n.IsClosureVar() { + base.Fatalf("bad declaration of %v", n) + } loc := e.oldLoc(n) loc.loopDepth = e.loopDepth + loc.loopSlop = e.loopSlop return loc.asHole() } @@ -1161,6 +1315,7 @@ func (e *escape) newLoc(n ir.Node, transient bool) *location { n: n, curfn: e.curfn, loopDepth: e.loopDepth, + loopSlop: e.loopSlop, transient: transient, } e.allLocs = append(e.allLocs, loc) @@ -1176,10 +1331,6 @@ func (e *escape) newLoc(n ir.Node, transient bool) *location { } n.Opt = loc } - - if why := HeapAllocReason(n); why != "" { - e.flow(e.heapHole().addr(n, why), loc) - } } return loc } @@ -1192,9 +1343,13 @@ func (l *location) asHole() hole { return hole{dst: l} } -func (e *escape) flow(k hole, src *location) { +func (b *batch) flow(k hole, src *location) { + if k.addrtaken { + src.addrtaken = true + } + dst := k.dst - if dst == &e.blankLoc { + if dst == &b.blankLoc { return } if dst == src && k.derefs >= 0 { // dst = dst, dst = *dst, ... @@ -1206,9 +1361,10 @@ func (e *escape) flow(k hole, src *location) { if base.Flag.LowerM >= 2 { fmt.Printf("%s: %v escapes to heap:\n", pos, src.n) } - explanation := e.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{}) + explanation := b.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{}) if logopt.Enabled() { - logopt.LogOpt(src.n.Pos(), "escapes", "escape", ir.FuncName(e.curfn), fmt.Sprintf("%v escapes to heap", src.n), explanation) + var e_curfn *ir.Func // TODO(mdempsky): Fix. + logopt.LogOpt(src.n.Pos(), "escapes", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", src.n), explanation) } } @@ -1220,8 +1376,8 @@ func (e *escape) flow(k hole, src *location) { dst.edges = append(dst.edges, edge{src: src, derefs: k.derefs, notes: k.notes}) } -func (e *escape) heapHole() hole { return e.heapLoc.asHole() } -func (e *escape) discardHole() hole { return e.blankLoc.asHole() } +func (b *batch) heapHole() hole { return b.heapLoc.asHole() } +func (b *batch) discardHole() hole { return b.blankLoc.asHole() } // walkAll computes the minimal dereferences between all pairs of // locations. @@ -1686,14 +1842,6 @@ const ( escFuncTagged ) -// funcSym returns fn.Nname.Sym if no nils are encountered along the way. -func funcSym(fn *ir.Func) *types.Sym { - if fn == nil || fn.Nname == nil { - return nil - } - return fn.Sym() -} - // Mark labels that have no backjumps to them as not increasing e.loopdepth. type labelState int @@ -1863,7 +2011,7 @@ func mayAffectMemory(n ir.Node) bool { // HeapAllocReason returns the reason the given Node must be heap // allocated, or the empty string if it doesn't. func HeapAllocReason(n ir.Node) string { - if n.Type() == nil { + if n == nil || n.Type() == nil { return "" } diff --git a/src/cmd/compile/internal/logopt/logopt_test.go b/src/cmd/compile/internal/logopt/logopt_test.go index 71976174b0351..1d1e21b060986 100644 --- a/src/cmd/compile/internal/logopt/logopt_test.go +++ b/src/cmd/compile/internal/logopt/logopt_test.go @@ -154,6 +154,7 @@ func s15a8(x *[15]int64) [15]int64 { // On not-amd64, test the host architecture and os arches := []string{runtime.GOARCH} goos0 := runtime.GOOS + goos0 = "" + goos0 // TODO(mdempsky): Remove once CaptureVars is gone. if runtime.GOARCH == "amd64" { // Test many things with "linux" (wasm will get "js") arches = []string{"arm", "arm64", "386", "amd64", "mips", "mips64", "ppc64le", "riscv64", "s390x", "wasm"} goos0 = "linux" diff --git a/test/chancap.go b/test/chancap.go index 8dce9247cd45e..3a4f67638a431 100644 --- a/test/chancap.go +++ b/test/chancap.go @@ -41,6 +41,7 @@ func main() { n := -1 shouldPanic("makechan: size out of range", func() { _ = make(T, n) }) shouldPanic("makechan: size out of range", func() { _ = make(T, int64(n)) }) + n = 0 + n // TODO(mdempsky): Remove once CaptureVars is gone. if ptrSize == 8 { // Test mem > maxAlloc var n2 int64 = 1 << 59 diff --git a/test/fixedbugs/issue4085b.go b/test/fixedbugs/issue4085b.go index cf27512da0b7f..b69e10c6ccee0 100644 --- a/test/fixedbugs/issue4085b.go +++ b/test/fixedbugs/issue4085b.go @@ -22,6 +22,7 @@ func main() { testMakeInAppend(n) var t *byte + n = 0 + n // TODO(mdempsky): Remove once CaptureVars is gone. if unsafe.Sizeof(t) == 8 { // Test mem > maxAlloc var n2 int64 = 1 << 59 From 98218388321c0c48a4b955792b8d1e3db63a140d Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 5 Jan 2021 08:20:11 -0800 Subject: [PATCH 354/474] [dev.regabi] cmd/compile: remove CaptureVars Capture analysis is now part of escape analysis. Passes toolstash -cmp. Change-Id: Ifcd3ecc342074c590e0db1ff0646dfa1ea2ff57b Reviewed-on: https://go-review.googlesource.com/c/go/+/281543 Trust: Matthew Dempsky Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/escape/escape.go | 14 +++-- src/cmd/compile/internal/gc/main.go | 16 ------ src/cmd/compile/internal/ir/name.go | 11 +--- src/cmd/compile/internal/ir/sizeof_test.go | 2 +- src/cmd/compile/internal/typecheck/func.go | 54 ------------------- src/cmd/compile/internal/typecheck/stmt.go | 4 -- .../compile/internal/typecheck/typecheck.go | 19 ------- 7 files changed, 14 insertions(+), 106 deletions(-) diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go index 4aa7381c20534..2222f980038b8 100644 --- a/src/cmd/compile/internal/escape/escape.go +++ b/src/cmd/compile/internal/escape/escape.go @@ -361,9 +361,17 @@ func (b *batch) flowClosure(k hole, clo *ir.ClosureExpr, orphan bool) { } // Capture by value for variables <= 128 bytes that are never reassigned. - byval := !loc.addrtaken && !loc.reassigned && n.Type().Size() <= 128 - if byval != n.Byval() { - base.FatalfAt(cv.Pos(), "byval mismatch: %v: %v != %v", cv, byval, n.Byval()) + n.SetByval(!loc.addrtaken && !loc.reassigned && n.Type().Size() <= 128) + if !n.Byval() { + n.SetAddrtaken(true) + } + + if base.Flag.LowerM > 1 { + how := "ref" + if n.Byval() { + how = "value" + } + base.WarnfAt(n.Pos(), "%v capturing by %s: %v (addr=%v assign=%v width=%d)", n.Curfn, how, n, loc.addrtaken, loc.reassigned, n.Type().Size()) } // Flow captured variables to closure. diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 2ea614e17ff04..c3756309ea55b 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -232,22 +232,6 @@ func Main(archInit func(*ssagen.ArchInfo)) { } typecheck.IncrementalAddrtaken = true - // Decide how to capture closed variables. - // This needs to run before escape analysis, - // because variables captured by value do not escape. - base.Timer.Start("fe", "capturevars") - for _, n := range typecheck.Target.Decls { - if n.Op() == ir.ODCLFUNC { - n := n.(*ir.Func) - if n.OClosure != nil { - ir.CurFunc = n - typecheck.CaptureVars(n) - } - } - } - typecheck.CaptureVarsComplete = true - ir.CurFunc = nil - if base.Debug.TypecheckInl != 0 { // Typecheck imported function bodies if Debug.l > 1, // otherwise lazily when used or re-exported. diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 3999c0ecb4a33..a51cf79929e46 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -59,8 +59,7 @@ type Name struct { // (results) are numbered starting at one, followed by function inputs // (parameters), and then local variables. Vargen is used to distinguish // local variables/params with the same name. - Vargen int32 - Decldepth int32 // declaration loop depth, increased for every loop or label + Vargen int32 Ntype Ntype Heapaddr *Name // temp holding heap address of param @@ -260,15 +259,13 @@ func (n *Name) Alias() bool { return n.flags&nameAlias != 0 } func (n *Name) SetAlias(alias bool) { n.flags.set(nameAlias, alias) } const ( - nameCaptured = 1 << iota // is the variable captured by a closure - nameReadonly + nameReadonly = 1 << iota nameByval // is the variable captured by value or by reference nameNeedzero // if it contains pointers, needs to be zeroed on function entry nameAutoTemp // is the variable a temporary (implies no dwarf info. reset if escapes to heap) nameUsed // for variable declared and not used error nameIsClosureVar // PAUTOHEAP closure pseudo-variable; original at n.Name.Defn nameIsOutputParamHeapAddr // pointer to a result parameter's heap copy - nameAssigned // is the variable ever assigned to nameAddrtaken // address taken, even if not moved to heap nameInlFormal // PAUTO created by inliner, derived from callee formal nameInlLocal // PAUTO created by inliner, derived from callee local @@ -277,28 +274,24 @@ const ( nameAlias // is type name an alias ) -func (n *Name) Captured() bool { return n.flags&nameCaptured != 0 } func (n *Name) Readonly() bool { return n.flags&nameReadonly != 0 } func (n *Name) Needzero() bool { return n.flags&nameNeedzero != 0 } func (n *Name) AutoTemp() bool { return n.flags&nameAutoTemp != 0 } func (n *Name) Used() bool { return n.flags&nameUsed != 0 } func (n *Name) IsClosureVar() bool { return n.flags&nameIsClosureVar != 0 } func (n *Name) IsOutputParamHeapAddr() bool { return n.flags&nameIsOutputParamHeapAddr != 0 } -func (n *Name) Assigned() bool { return n.flags&nameAssigned != 0 } func (n *Name) Addrtaken() bool { return n.flags&nameAddrtaken != 0 } func (n *Name) InlFormal() bool { return n.flags&nameInlFormal != 0 } func (n *Name) InlLocal() bool { return n.flags&nameInlLocal != 0 } func (n *Name) OpenDeferSlot() bool { return n.flags&nameOpenDeferSlot != 0 } func (n *Name) LibfuzzerExtraCounter() bool { return n.flags&nameLibfuzzerExtraCounter != 0 } -func (n *Name) SetCaptured(b bool) { n.flags.set(nameCaptured, b) } func (n *Name) setReadonly(b bool) { n.flags.set(nameReadonly, b) } func (n *Name) SetNeedzero(b bool) { n.flags.set(nameNeedzero, b) } func (n *Name) SetAutoTemp(b bool) { n.flags.set(nameAutoTemp, b) } func (n *Name) SetUsed(b bool) { n.flags.set(nameUsed, b) } func (n *Name) SetIsClosureVar(b bool) { n.flags.set(nameIsClosureVar, b) } func (n *Name) SetIsOutputParamHeapAddr(b bool) { n.flags.set(nameIsOutputParamHeapAddr, b) } -func (n *Name) SetAssigned(b bool) { n.flags.set(nameAssigned, b) } func (n *Name) SetAddrtaken(b bool) { n.flags.set(nameAddrtaken, b) } func (n *Name) SetInlFormal(b bool) { n.flags.set(nameInlFormal, b) } func (n *Name) SetInlLocal(b bool) { n.flags.set(nameInlLocal, b) } diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go index 60120f2998c83..1a4d2e5c7a6d0 100644 --- a/src/cmd/compile/internal/ir/sizeof_test.go +++ b/src/cmd/compile/internal/ir/sizeof_test.go @@ -21,7 +21,7 @@ func TestSizeof(t *testing.T) { _64bit uintptr // size on 64bit platforms }{ {Func{}, 184, 320}, - {Name{}, 124, 216}, + {Name{}, 120, 216}, } for _, tt := range tests { diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go index 8fdb33b1452e8..8789395ffbd98 100644 --- a/src/cmd/compile/internal/typecheck/func.go +++ b/src/cmd/compile/internal/typecheck/func.go @@ -100,32 +100,6 @@ func PartialCallType(n *ir.SelectorExpr) *types.Type { return t } -// CaptureVars is called in a separate phase after all typechecking is done. -// It decides whether each variable captured by a closure should be captured -// by value or by reference. -// We use value capturing for values <= 128 bytes that are never reassigned -// after capturing (effectively constant). -func CaptureVars(fn *ir.Func) { - for _, v := range fn.ClosureVars { - outermost := v.Defn.(*ir.Name) - - // out parameters will be assigned to implicitly upon return. - if outermost.Class != ir.PPARAMOUT && !outermost.Addrtaken() && !outermost.Assigned() && outermost.Type().Size() <= 128 { - outermost.SetByval(true) - } else { - outermost.SetAddrtaken(true) - } - - if base.Flag.LowerM > 1 { - how := "ref" - if v.Byval() { - how = "value" - } - base.WarnfAt(v.Pos(), "%v capturing by %s: %v (addr=%v assign=%v width=%d)", v.Curfn, how, v, outermost.Addrtaken(), outermost.Assigned(), v.Type().Size()) - } - } -} - // Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck // because they're a copy of an already checked body. func ImportedBody(fn *ir.Func) { @@ -198,9 +172,6 @@ func fnpkg(fn *ir.Name) *types.Pkg { return fn.Sym().Pkg } -// CaptureVarsComplete is set to true when the capturevars phase is done. -var CaptureVarsComplete bool - // closurename generates a new unique name for a closure within // outerfunc. func closurename(outerfunc *ir.Func) *types.Sym { @@ -336,22 +307,6 @@ func tcClosure(clo *ir.ClosureExpr, top int) { return } - for _, ln := range fn.ClosureVars { - n := ln.Defn - if !n.Name().Captured() { - n.Name().SetCaptured(true) - if n.Name().Decldepth == 0 { - base.Fatalf("typecheckclosure: var %v does not have decldepth assigned", n) - } - - // Ignore assignments to the variable in straightline code - // preceding the first capturing by a closure. - if n.Name().Decldepth == decldepth { - n.Name().SetAssigned(false) - } - } - } - fn.Nname.SetSym(closurename(ir.CurFunc)) ir.MarkFunc(fn.Nname) Func(fn) @@ -363,10 +318,7 @@ func tcClosure(clo *ir.ClosureExpr, top int) { if ir.CurFunc != nil && clo.Type() != nil { oldfn := ir.CurFunc ir.CurFunc = fn - olddd := decldepth - decldepth = 1 Stmts(fn.Body) - decldepth = olddd ir.CurFunc = oldfn } @@ -400,12 +352,6 @@ func tcFunc(n *ir.Func) { defer tracePrint("typecheckfunc", n)(nil) } - for _, ln := range n.Dcl { - if ln.Op() == ir.ONAME && (ln.Class == ir.PPARAM || ln.Class == ir.PPARAMOUT) { - ln.Decldepth = 1 - } - } - n.Nname = AssignExpr(n.Nname).(*ir.Name) t := n.Nname.Type() if t == nil { diff --git a/src/cmd/compile/internal/typecheck/stmt.go b/src/cmd/compile/internal/typecheck/stmt.go index d90d13b44cdf3..8baa5dda78c07 100644 --- a/src/cmd/compile/internal/typecheck/stmt.go +++ b/src/cmd/compile/internal/typecheck/stmt.go @@ -228,7 +228,6 @@ func plural(n int) string { // tcFor typechecks an OFOR node. func tcFor(n *ir.ForStmt) ir.Node { Stmts(n.Init()) - decldepth++ n.Cond = Expr(n.Cond) n.Cond = DefaultLit(n.Cond, nil) if n.Cond != nil { @@ -242,7 +241,6 @@ func tcFor(n *ir.ForStmt) ir.Node { Stmts(n.Late) } Stmts(n.Body) - decldepth-- return n } @@ -337,9 +335,7 @@ func tcRange(n *ir.RangeStmt) { n.Value = AssignExpr(n.Value) } - decldepth++ Stmts(n.Body) - decldepth-- } // tcReturn typechecks an ORETURN node. diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index c3a5a3c40f715..07bbd2510586f 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -21,8 +21,6 @@ var InitTodoFunc = ir.NewFunc(base.Pos) var inimport bool // set during import -var decldepth int32 - var TypecheckAllowed bool var ( @@ -58,7 +56,6 @@ func Callee(n ir.Node) ir.Node { func FuncBody(n *ir.Func) { ir.CurFunc = n - decldepth = 1 errorsBefore := base.Errors() Stmts(n.Body) CheckUnused(n) @@ -506,9 +503,6 @@ func typecheck1(n ir.Node, top int) ir.Node { case ir.ONAME: n := n.(*ir.Name) - if n.Decldepth == 0 { - n.Decldepth = decldepth - } if n.BuiltinOp != 0 { if top&ctxCallee == 0 { base.Errorf("use of builtin %v not in function call", n.Sym()) @@ -839,7 +833,6 @@ func typecheck1(n ir.Node, top int) ir.Node { return n case ir.OLABEL: - decldepth++ if n.Sym().IsBlank() { // Empty identifier is valid but useless. // Eliminate now to simplify life later. @@ -1620,18 +1613,6 @@ func checkassign(stmt ir.Node, n ir.Node) { return } - // Variables declared in ORANGE are assigned on every iteration. - if !ir.DeclaredBy(n, stmt) || stmt.Op() == ir.ORANGE { - r := ir.OuterValue(n) - if r.Op() == ir.ONAME { - r := r.(*ir.Name) - r.SetAssigned(true) - if r.IsClosureVar() { - r.Defn.Name().SetAssigned(true) - } - } - } - if ir.IsAddressable(n) { return } From cb05a0aa6a05cbef05587f02473dbd7f6740b933 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 5 Jan 2021 09:37:28 -0800 Subject: [PATCH 355/474] [dev.regabi] cmd/compile: remove toolstash scaffolding Now that CaptureVars is gone, we can remove the extra code in escape analysis that only served to appease toolstash -cmp. Change-Id: I8c811834f3d966e76702e2d362e3de414c94bea6 Reviewed-on: https://go-review.googlesource.com/c/go/+/281544 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Trust: Matthew Dempsky Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/escape/escape.go | 69 ++----------------- .../compile/internal/logopt/logopt_test.go | 1 - test/chancap.go | 1 - test/fixedbugs/issue4085b.go | 1 - 4 files changed, 4 insertions(+), 68 deletions(-) diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go index 2222f980038b8..5df82d8cdc564 100644 --- a/src/cmd/compile/internal/escape/escape.go +++ b/src/cmd/compile/internal/escape/escape.go @@ -116,12 +116,6 @@ type escape struct { // label with a corresponding backwards "goto" (i.e., // unstructured loop). loopDepth int - - // loopSlop tracks how far off typecheck's "decldepth" variable - // would be from loopDepth at the same point during type checking. - // It's only needed to match CaptureVars's pessimism until it can be - // removed entirely. - loopSlop int } // An location represents an abstract location that stores a Go @@ -131,7 +125,6 @@ type location struct { curfn *ir.Func // enclosing function edges []edge // incoming edges loopDepth int // loopDepth at declaration - loopSlop int // loopSlop at declaration // derefs and walkgen are used during walkOne to track the // minimal dereferences from the walk root. @@ -233,14 +226,10 @@ func Batch(fns []*ir.Func, recursive bool) { // can decide whether closures should capture their free variables // by value or reference. for _, closure := range b.closures { - b.flowClosure(closure.k, closure.clo, false) + b.flowClosure(closure.k, closure.clo) } b.closures = nil - for _, orphan := range findOrphans(fns) { - b.flowClosure(b.blankLoc.asHole(), orphan, true) - } - for _, loc := range b.allLocs { if why := HeapAllocReason(loc.n); why != "" { b.flow(b.heapHole().addr(loc.n, why), loc) @@ -251,46 +240,6 @@ func Batch(fns []*ir.Func, recursive bool) { b.finish(fns) } -// findOrphans finds orphaned closure expressions that were originally -// contained within a function in fns, but were lost due to earlier -// optimizations. -// TODO(mdempsky): Remove after CaptureVars is gone. -func findOrphans(fns []*ir.Func) []*ir.ClosureExpr { - have := make(map[*ir.Func]bool) - for _, fn := range fns { - have[fn] = true - } - - parent := func(fn *ir.Func) *ir.Func { - if len(fn.ClosureVars) == 0 { - return nil - } - cv := fn.ClosureVars[0] - if cv.Defn == nil { - return nil // method value wrapper - } - return cv.Outer.Curfn - } - - outermost := func(fn *ir.Func) *ir.Func { - for { - outer := parent(fn) - if outer == nil { - return fn - } - fn = outer - } - } - - var orphans []*ir.ClosureExpr - for _, fn := range typecheck.Target.Decls { - if fn, ok := fn.(*ir.Func); ok && have[outermost(fn)] && !have[fn] { - orphans = append(orphans, fn.OClosure) - } - } - return orphans -} - func (b *batch) with(fn *ir.Func) *escape { return &escape{ batch: b, @@ -348,15 +297,11 @@ func (b *batch) walkFunc(fn *ir.Func) { } } -func (b *batch) flowClosure(k hole, clo *ir.ClosureExpr, orphan bool) { +func (b *batch) flowClosure(k hole, clo *ir.ClosureExpr) { for _, cv := range clo.Func.ClosureVars { n := cv.Canonical() - if n.Opt == nil && orphan { - continue // n.Curfn must have been an orphan too - } - loc := b.oldLoc(cv) - if !loc.captured && !orphan { + if !loc.captured { base.FatalfAt(cv.Pos(), "closure variable never captured: %v", cv) } @@ -454,9 +399,6 @@ func (e *escape) stmt(n ir.Node) { if base.Flag.LowerM > 2 { fmt.Printf("%v:%v non-looping label\n", base.FmtPos(base.Pos), n) } - if s := n.Label.Name; !strings.HasPrefix(s, ".") && !strings.Contains(s, "·") { - e.loopSlop++ - } case looping: if base.Flag.LowerM > 2 { fmt.Printf("%v: %v looping label\n", base.FmtPos(base.Pos), n) @@ -597,7 +539,6 @@ func (e *escape) stmts(l ir.Nodes) { func (e *escape) block(l ir.Nodes) { old := e.loopDepth e.stmts(l) - e.loopSlop += e.loopDepth - old e.loopDepth = old } @@ -821,7 +762,7 @@ func (e *escape) exprSkipInit(k hole, n ir.Node) { // Ignore reassignments to the variable in straightline code // preceding the first capture by a closure. - if loc.loopDepth+loc.loopSlop == e.loopDepth+e.loopSlop { + if loc.loopDepth == e.loopDepth { loc.reassigned = false } } @@ -1286,7 +1227,6 @@ func (e *escape) dcl(n *ir.Name) hole { } loc := e.oldLoc(n) loc.loopDepth = e.loopDepth - loc.loopSlop = e.loopSlop return loc.asHole() } @@ -1323,7 +1263,6 @@ func (e *escape) newLoc(n ir.Node, transient bool) *location { n: n, curfn: e.curfn, loopDepth: e.loopDepth, - loopSlop: e.loopSlop, transient: transient, } e.allLocs = append(e.allLocs, loc) diff --git a/src/cmd/compile/internal/logopt/logopt_test.go b/src/cmd/compile/internal/logopt/logopt_test.go index 1d1e21b060986..71976174b0351 100644 --- a/src/cmd/compile/internal/logopt/logopt_test.go +++ b/src/cmd/compile/internal/logopt/logopt_test.go @@ -154,7 +154,6 @@ func s15a8(x *[15]int64) [15]int64 { // On not-amd64, test the host architecture and os arches := []string{runtime.GOARCH} goos0 := runtime.GOOS - goos0 = "" + goos0 // TODO(mdempsky): Remove once CaptureVars is gone. if runtime.GOARCH == "amd64" { // Test many things with "linux" (wasm will get "js") arches = []string{"arm", "arm64", "386", "amd64", "mips", "mips64", "ppc64le", "riscv64", "s390x", "wasm"} goos0 = "linux" diff --git a/test/chancap.go b/test/chancap.go index 3a4f67638a431..8dce9247cd45e 100644 --- a/test/chancap.go +++ b/test/chancap.go @@ -41,7 +41,6 @@ func main() { n := -1 shouldPanic("makechan: size out of range", func() { _ = make(T, n) }) shouldPanic("makechan: size out of range", func() { _ = make(T, int64(n)) }) - n = 0 + n // TODO(mdempsky): Remove once CaptureVars is gone. if ptrSize == 8 { // Test mem > maxAlloc var n2 int64 = 1 << 59 diff --git a/test/fixedbugs/issue4085b.go b/test/fixedbugs/issue4085b.go index b69e10c6ccee0..cf27512da0b7f 100644 --- a/test/fixedbugs/issue4085b.go +++ b/test/fixedbugs/issue4085b.go @@ -22,7 +22,6 @@ func main() { testMakeInAppend(n) var t *byte - n = 0 + n // TODO(mdempsky): Remove once CaptureVars is gone. if unsafe.Sizeof(t) == 8 { // Test mem > maxAlloc var n2 int64 = 1 << 59 From b241938e04ed7171897390fdaefd3d3017a16a0b Mon Sep 17 00:00:00 2001 From: Baokun Lee Date: Tue, 29 Dec 2020 18:49:13 +0800 Subject: [PATCH 356/474] [dev.regabi] cmd/compile: fix some methods error text Change-Id: Ie9b034efba30d66a869c5e991b60c76198fd330f Reviewed-on: https://go-review.googlesource.com/c/go/+/279444 Run-TryBot: Baokun Lee TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/reflectdata/alg.go | 4 ++-- src/cmd/compile/internal/staticdata/data.go | 2 +- src/cmd/compile/internal/types/alg.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/cmd/compile/internal/reflectdata/alg.go b/src/cmd/compile/internal/reflectdata/alg.go index d23ca6c7aa2eb..d576053753bb6 100644 --- a/src/cmd/compile/internal/reflectdata/alg.go +++ b/src/cmd/compile/internal/reflectdata/alg.go @@ -42,8 +42,8 @@ func eqCanPanic(t *types.Type) bool { } } -// AlgType is like algtype1, except it returns the fixed-width AMEMxx variants -// instead of the general AMEM kind when possible. +// AlgType returns the fixed-width AMEMxx variants instead of the general +// AMEM kind when possible. func AlgType(t *types.Type) types.AlgKind { a, _ := types.AlgType(t) if a == types.AMEM { diff --git a/src/cmd/compile/internal/staticdata/data.go b/src/cmd/compile/internal/staticdata/data.go index 94fa6760a044f..a2a844f94025e 100644 --- a/src/cmd/compile/internal/staticdata/data.go +++ b/src/cmd/compile/internal/staticdata/data.go @@ -276,7 +276,7 @@ func FuncLinksym(n *ir.Name) *obj.LSym { // the s·f stubs in s's package. func NeedFuncSym(s *types.Sym) { if !base.Ctxt.Flag_dynlink { - base.Fatalf("makefuncsym dynlink") + base.Fatalf("NeedFuncSym: dynlink") } if s.IsBlank() { return diff --git a/src/cmd/compile/internal/types/alg.go b/src/cmd/compile/internal/types/alg.go index f1a472cca58a0..6091ee249cd2f 100644 --- a/src/cmd/compile/internal/types/alg.go +++ b/src/cmd/compile/internal/types/alg.go @@ -132,7 +132,7 @@ func AlgType(t *Type) (AlgKind, *Type) { return ret, nil } - base.Fatalf("algtype1: unexpected type %v", t) + base.Fatalf("algtype: unexpected type %v", t) return 0, nil } From 6ee9b118a2a70371e038fb6bec4fe7989a3a2b2d Mon Sep 17 00:00:00 2001 From: Robert Griesemer Date: Fri, 8 Jan 2021 14:04:50 -0800 Subject: [PATCH 357/474] [dev.regabi] cmd/compile: remove fmt_test code; it has outlived its usefulness With the recent compiler rewrites and cleanups to gc/fmt.go, the "safety net" provided by fmt_test has become less important and the test itself has become a burden (often breaks because of small format changes elsewhere). Eventually, the syntax and types2 packages will provide most error and diagnostic compiler output at which point fmt.go can be further simplified as well. Change-Id: Ie93eefd3e1166f3548fed0199b732dbd6c81948a Reviewed-on: https://go-review.googlesource.com/c/go/+/282560 Trust: Robert Griesemer Run-TryBot: Robert Griesemer Reviewed-by: Matthew Dempsky Reviewed-by: Austin Clements TryBot-Result: Go Bot --- src/cmd/compile/fmt_test.go | 615 --------------------------------- src/cmd/compile/fmtmap_test.go | 75 ---- 2 files changed, 690 deletions(-) delete mode 100644 src/cmd/compile/fmt_test.go delete mode 100644 src/cmd/compile/fmtmap_test.go diff --git a/src/cmd/compile/fmt_test.go b/src/cmd/compile/fmt_test.go deleted file mode 100644 index 6398a84f8f07f..0000000000000 --- a/src/cmd/compile/fmt_test.go +++ /dev/null @@ -1,615 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file implements TestFormats; a test that verifies -// format strings in the compiler (this directory and all -// subdirectories, recursively). -// -// TestFormats finds potential (Printf, etc.) format strings. -// If they are used in a call, the format verbs are verified -// based on the matching argument type against a precomputed -// map of valid formats (knownFormats). This map can be used to -// automatically rewrite format strings across all compiler -// files with the -r flag. -// -// The format map needs to be updated whenever a new (type, -// format) combination is found and the format verb is not -// 'v' or 'T' (as in "%v" or "%T"). To update the map auto- -// matically from the compiler source's use of format strings, -// use the -u flag. (Whether formats are valid for the values -// to be formatted must be verified manually, of course.) -// -// The -v flag prints out the names of all functions called -// with a format string, the names of files that were not -// processed, and any format rewrites made (with -r). -// -// Run as: go test -run Formats [-r][-u][-v] -// -// Known shortcomings: -// - indexed format strings ("%[2]s", etc.) are not supported -// (the test will fail) -// - format strings that are not simple string literals cannot -// be updated automatically -// (the test will fail with respective warnings) -// - format strings in _test packages outside the current -// package are not processed -// (the test will report those files) -// -package main_test - -import ( - "bytes" - "flag" - "fmt" - "go/ast" - "go/build" - "go/constant" - "go/format" - "go/importer" - "go/parser" - "go/token" - "go/types" - "internal/testenv" - "io" - "io/fs" - "io/ioutil" - "log" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - "testing" - "unicode/utf8" -) - -var ( - rewrite = flag.Bool("r", false, "rewrite format strings") - update = flag.Bool("u", false, "update known formats") -) - -// The following variables collect information across all processed files. -var ( - fset = token.NewFileSet() - formatStrings = make(map[*ast.BasicLit]bool) // set of all potential format strings found - foundFormats = make(map[string]bool) // set of all formats found - callSites = make(map[*ast.CallExpr]*callSite) // map of all calls -) - -// A File is a corresponding (filename, ast) pair. -type File struct { - name string - ast *ast.File -} - -func TestFormats(t *testing.T) { - if testing.Short() && testenv.Builder() == "" { - t.Skip("Skipping in short mode") - } - testenv.MustHaveGoBuild(t) // more restrictive than necessary, but that's ok - - // process all directories - filepath.WalkDir(".", func(path string, info fs.DirEntry, err error) error { - if info.IsDir() { - if info.Name() == "testdata" { - return filepath.SkipDir - } - - importPath := filepath.Join("cmd/compile", path) - if ignoredPackages[filepath.ToSlash(importPath)] { - return filepath.SkipDir - } - - pkg, err := build.Import(importPath, path, 0) - if err != nil { - if _, ok := err.(*build.NoGoError); ok { - return nil // nothing to do here - } - t.Fatal(err) - } - collectPkgFormats(t, pkg) - } - return nil - }) - - // test and rewrite formats - updatedFiles := make(map[string]File) // files that were rewritten - for _, p := range callSites { - // test current format literal and determine updated one - out := formatReplace(p.str, func(index int, in string) string { - if in == "*" { - return in // cannot rewrite '*' (as in "%*d") - } - // in != '*' - typ := p.types[index] - format := typ + " " + in // e.g., "*Node %n" - - // Do not bother reporting basic types, nor %v, %T, %p. - // Vet handles basic types, and those three formats apply to all types. - if !strings.Contains(typ, ".") || (in == "%v" || in == "%T" || in == "%p") { - return in - } - - // check if format is known - out, known := knownFormats[format] - - // record format if not yet found - _, found := foundFormats[format] - if !found { - foundFormats[format] = true - } - - // report an error if the format is unknown and this is the first - // time we see it; ignore "%v" and "%T" which are always valid - if !known && !found && in != "%v" && in != "%T" { - t.Errorf("%s: unknown format %q for %s argument", posString(p.arg), in, typ) - } - - if out == "" { - out = in - } - return out - }) - - // replace existing format literal if it changed - if out != p.str { - // we cannot replace the argument if it's not a string literal for now - // (e.g., it may be "foo" + "bar") - lit, ok := p.arg.(*ast.BasicLit) - if !ok { - delete(callSites, p.call) // treat as if we hadn't found this site - continue - } - - if testing.Verbose() { - fmt.Printf("%s:\n\t- %q\n\t+ %q\n", posString(p.arg), p.str, out) - } - - // find argument index of format argument - index := -1 - for i, arg := range p.call.Args { - if p.arg == arg { - index = i - break - } - } - if index < 0 { - // we may have processed the same call site twice, - // but that shouldn't happen - panic("internal error: matching argument not found") - } - - // replace literal - new := *lit // make a copy - new.Value = strconv.Quote(out) // this may introduce "-quotes where there were `-quotes - p.call.Args[index] = &new - updatedFiles[p.file.name] = p.file - } - } - - // write dirty files back - var filesUpdated bool - if len(updatedFiles) > 0 && *rewrite { - for _, file := range updatedFiles { - var buf bytes.Buffer - if err := format.Node(&buf, fset, file.ast); err != nil { - t.Errorf("WARNING: gofmt %s failed: %v", file.name, err) - continue - } - if err := ioutil.WriteFile(file.name, buf.Bytes(), 0x666); err != nil { - t.Errorf("WARNING: writing %s failed: %v", file.name, err) - continue - } - fmt.Printf("updated %s\n", file.name) - filesUpdated = true - } - } - - // report the names of all functions called with a format string - if len(callSites) > 0 && testing.Verbose() { - set := make(map[string]bool) - for _, p := range callSites { - set[nodeString(p.call.Fun)] = true - } - var list []string - for s := range set { - list = append(list, s) - } - fmt.Println("\nFunctions called with a format string") - writeList(os.Stdout, list) - } - - // update formats - if len(foundFormats) > 0 && *update { - var list []string - for s := range foundFormats { - list = append(list, fmt.Sprintf("%q: \"\",", s)) - } - var buf bytes.Buffer - buf.WriteString(knownFormatsHeader) - writeList(&buf, list) - buf.WriteString("}\n") - out, err := format.Source(buf.Bytes()) - const outfile = "fmtmap_test.go" - if err != nil { - t.Errorf("WARNING: gofmt %s failed: %v", outfile, err) - out = buf.Bytes() // continue with unformatted source - } - if err = ioutil.WriteFile(outfile, out, 0644); err != nil { - t.Errorf("WARNING: updating format map failed: %v", err) - } - } - - // check that knownFormats is up to date - if !*rewrite && !*update { - var mismatch bool - for s := range foundFormats { - if _, ok := knownFormats[s]; !ok { - mismatch = true - break - } - } - if !mismatch { - for s := range knownFormats { - if _, ok := foundFormats[s]; !ok { - mismatch = true - break - } - } - } - if mismatch { - t.Errorf("format map is out of date; run 'go test -u' to update and manually verify correctness of change'") - } - } - - // all format strings of calls must be in the formatStrings set (self-verification) - for _, p := range callSites { - if lit, ok := p.arg.(*ast.BasicLit); ok && lit.Kind == token.STRING { - if formatStrings[lit] { - // ok - delete(formatStrings, lit) - } else { - // this should never happen - panic(fmt.Sprintf("internal error: format string not found (%s)", posString(lit))) - } - } - } - - // if we have any strings left, we may need to update them manually - if len(formatStrings) > 0 && filesUpdated { - var list []string - for lit := range formatStrings { - list = append(list, fmt.Sprintf("%s: %s", posString(lit), nodeString(lit))) - } - fmt.Println("\nWARNING: Potentially missed format strings") - writeList(os.Stdout, list) - t.Fail() - } - - fmt.Println() -} - -// A callSite describes a function call that appears to contain -// a format string. -type callSite struct { - file File - call *ast.CallExpr // call containing the format string - arg ast.Expr // format argument (string literal or constant) - str string // unquoted format string - types []string // argument types -} - -func collectPkgFormats(t *testing.T, pkg *build.Package) { - // collect all files - var filenames []string - filenames = append(filenames, pkg.GoFiles...) - filenames = append(filenames, pkg.CgoFiles...) - filenames = append(filenames, pkg.TestGoFiles...) - - // TODO(gri) verify _test files outside package - for _, name := range pkg.XTestGoFiles { - // don't process this test itself - if name != "fmt_test.go" && testing.Verbose() { - fmt.Printf("WARNING: %s not processed\n", filepath.Join(pkg.Dir, name)) - } - } - - // make filenames relative to . - for i, name := range filenames { - filenames[i] = filepath.Join(pkg.Dir, name) - } - - // parse all files - files := make([]*ast.File, len(filenames)) - for i, filename := range filenames { - f, err := parser.ParseFile(fset, filename, nil, parser.ParseComments) - if err != nil { - t.Fatal(err) - } - files[i] = f - } - - // typecheck package - conf := types.Config{Importer: importer.Default()} - etypes := make(map[ast.Expr]types.TypeAndValue) - if _, err := conf.Check(pkg.ImportPath, fset, files, &types.Info{Types: etypes}); err != nil { - t.Fatal(err) - } - - // collect all potential format strings (for extra verification later) - for _, file := range files { - ast.Inspect(file, func(n ast.Node) bool { - if s, ok := stringLit(n); ok && isFormat(s) { - formatStrings[n.(*ast.BasicLit)] = true - } - return true - }) - } - - // collect all formats/arguments of calls with format strings - for index, file := range files { - ast.Inspect(file, func(n ast.Node) bool { - if call, ok := n.(*ast.CallExpr); ok { - if ignoredFunctions[nodeString(call.Fun)] { - return true - } - // look for an arguments that might be a format string - for i, arg := range call.Args { - if s, ok := stringVal(etypes[arg]); ok && isFormat(s) { - // make sure we have enough arguments - n := numFormatArgs(s) - if i+1+n > len(call.Args) { - t.Errorf("%s: not enough format args (ignore %s?)", posString(call), nodeString(call.Fun)) - break // ignore this call - } - // assume last n arguments are to be formatted; - // determine their types - argTypes := make([]string, n) - for i, arg := range call.Args[len(call.Args)-n:] { - if tv, ok := etypes[arg]; ok { - argTypes[i] = typeString(tv.Type) - } - } - // collect call site - if callSites[call] != nil { - panic("internal error: file processed twice?") - } - callSites[call] = &callSite{ - file: File{filenames[index], file}, - call: call, - arg: arg, - str: s, - types: argTypes, - } - break // at most one format per argument list - } - } - } - return true - }) - } -} - -// writeList writes list in sorted order to w. -func writeList(w io.Writer, list []string) { - sort.Strings(list) - for _, s := range list { - fmt.Fprintln(w, "\t", s) - } -} - -// posString returns a string representation of n's position -// in the form filename:line:col: . -func posString(n ast.Node) string { - if n == nil { - return "" - } - return fset.Position(n.Pos()).String() -} - -// nodeString returns a string representation of n. -func nodeString(n ast.Node) string { - var buf bytes.Buffer - if err := format.Node(&buf, fset, n); err != nil { - log.Fatal(err) // should always succeed - } - return buf.String() -} - -// typeString returns a string representation of n. -func typeString(typ types.Type) string { - s := filepath.ToSlash(typ.String()) - - // Report all the concrete IR types as Node, to shorten fmtmap. - const ir = "cmd/compile/internal/ir." - if s == "*"+ir+"Name" || s == "*"+ir+"Func" || s == "*"+ir+"Decl" || - s == ir+"Ntype" || s == ir+"Expr" || s == ir+"Stmt" || - strings.HasPrefix(s, "*"+ir) && (strings.HasSuffix(s, "Expr") || strings.HasSuffix(s, "Stmt")) { - return "cmd/compile/internal/ir.Node" - } - - return s -} - -// stringLit returns the unquoted string value and true if -// n represents a string literal; otherwise it returns "" -// and false. -func stringLit(n ast.Node) (string, bool) { - if lit, ok := n.(*ast.BasicLit); ok && lit.Kind == token.STRING { - s, err := strconv.Unquote(lit.Value) - if err != nil { - log.Fatal(err) // should not happen with correct ASTs - } - return s, true - } - return "", false -} - -// stringVal returns the (unquoted) string value and true if -// tv is a string constant; otherwise it returns "" and false. -func stringVal(tv types.TypeAndValue) (string, bool) { - if tv.IsValue() && tv.Value != nil && tv.Value.Kind() == constant.String { - return constant.StringVal(tv.Value), true - } - return "", false -} - -// formatIter iterates through the string s in increasing -// index order and calls f for each format specifier '%..v'. -// The arguments for f describe the specifier's index range. -// If a format specifier contains a "*", f is called with -// the index range for "*" alone, before being called for -// the entire specifier. The result of f is the index of -// the rune at which iteration continues. -func formatIter(s string, f func(i, j int) int) { - i := 0 // index after current rune - var r rune // current rune - - next := func() { - r1, w := utf8.DecodeRuneInString(s[i:]) - if w == 0 { - r1 = -1 // signal end-of-string - } - r = r1 - i += w - } - - flags := func() { - for r == ' ' || r == '#' || r == '+' || r == '-' || r == '0' { - next() - } - } - - index := func() { - if r == '[' { - log.Fatalf("cannot handle indexed arguments: %s", s) - } - } - - digits := func() { - index() - if r == '*' { - i = f(i-1, i) - next() - return - } - for '0' <= r && r <= '9' { - next() - } - } - - for next(); r >= 0; next() { - if r == '%' { - i0 := i - next() - flags() - digits() - if r == '.' { - next() - digits() - } - index() - // accept any letter (a-z, A-Z) as format verb; - // ignore anything else - if 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' { - i = f(i0-1, i) - } - } - } -} - -// isFormat reports whether s contains format specifiers. -func isFormat(s string) (yes bool) { - formatIter(s, func(i, j int) int { - yes = true - return len(s) // stop iteration - }) - return -} - -// oneFormat reports whether s is exactly one format specifier. -func oneFormat(s string) (yes bool) { - formatIter(s, func(i, j int) int { - yes = i == 0 && j == len(s) - return j - }) - return -} - -// numFormatArgs returns the number of format specifiers in s. -func numFormatArgs(s string) int { - count := 0 - formatIter(s, func(i, j int) int { - count++ - return j - }) - return count -} - -// formatReplace replaces the i'th format specifier s in the incoming -// string in with the result of f(i, s) and returns the new string. -func formatReplace(in string, f func(i int, s string) string) string { - var buf []byte - i0 := 0 - index := 0 - formatIter(in, func(i, j int) int { - if sub := in[i:j]; sub != "*" { // ignore calls for "*" width/length specifiers - buf = append(buf, in[i0:i]...) - buf = append(buf, f(index, sub)...) - i0 = j - } - index++ - return j - }) - return string(append(buf, in[i0:]...)) -} - -// ignoredPackages is the set of packages which can -// be ignored. -var ignoredPackages = map[string]bool{} - -// ignoredFunctions is the set of functions which may have -// format-like arguments but which don't do any formatting and -// thus may be ignored. -var ignoredFunctions = map[string]bool{} - -func init() { - // verify that knownFormats entries are correctly formatted - for key, val := range knownFormats { - // key must be "typename format", and format starts with a '%' - // (formats containing '*' alone are not collected in this map) - i := strings.Index(key, "%") - if i < 0 || !oneFormat(key[i:]) { - log.Fatalf("incorrect knownFormats key: %q", key) - } - // val must be "format" or "" - if val != "" && !oneFormat(val) { - log.Fatalf("incorrect knownFormats value: %q (key = %q)", val, key) - } - } -} - -const knownFormatsHeader = `// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file implements the knownFormats map which records the valid -// formats for a given type. The valid formats must correspond to -// supported compiler formats implemented in fmt.go, or whatever -// other format verbs are implemented for the given type. The map may -// also be used to change the use of a format verb across all compiler -// sources automatically (for instance, if the implementation of fmt.go -// changes), by using the -r option together with the new formats in the -// map. To generate this file automatically from the existing source, -// run: go test -run Formats -u. -// -// See the package comment in fmt_test.go for additional information. - -package main_test - -// knownFormats entries are of the form "typename format" -> "newformat". -// An absent entry means that the format is not recognized as valid. -// An empty new format means that the format should remain unchanged. -var knownFormats = map[string]string{ -` diff --git a/src/cmd/compile/fmtmap_test.go b/src/cmd/compile/fmtmap_test.go deleted file mode 100644 index a925ec05ace7f..0000000000000 --- a/src/cmd/compile/fmtmap_test.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file implements the knownFormats map which records the valid -// formats for a given type. The valid formats must correspond to -// supported compiler formats implemented in fmt.go, or whatever -// other format verbs are implemented for the given type. The map may -// also be used to change the use of a format verb across all compiler -// sources automatically (for instance, if the implementation of fmt.go -// changes), by using the -r option together with the new formats in the -// map. To generate this file automatically from the existing source, -// run: go test -run Formats -u. -// -// See the package comment in fmt_test.go for additional information. - -package main_test - -// knownFormats entries are of the form "typename format" -> "newformat". -// An absent entry means that the format is not recognized as valid. -// An empty new format means that the format should remain unchanged. -var knownFormats = map[string]string{ - "*bytes.Buffer %s": "", - "*cmd/compile/internal/ssa.Block %s": "", - "*cmd/compile/internal/ssa.Func %s": "", - "*cmd/compile/internal/ssa.Register %s": "", - "*cmd/compile/internal/ssa.Value %s": "", - "*cmd/compile/internal/types.Sym %+v": "", - "*cmd/compile/internal/types.Sym %S": "", - "*cmd/compile/internal/types.Type %+v": "", - "*cmd/compile/internal/types.Type %-S": "", - "*cmd/compile/internal/types.Type %L": "", - "*cmd/compile/internal/types.Type %S": "", - "*cmd/compile/internal/types.Type %s": "", - "*math/big.Float %f": "", - "*math/big.Int %s": "", - "[]cmd/compile/internal/syntax.token %s": "", - "cmd/compile/internal/arm.shift %d": "", - "cmd/compile/internal/gc.RegIndex %d": "", - "cmd/compile/internal/ir.Class %d": "", - "cmd/compile/internal/ir.Node %+v": "", - "cmd/compile/internal/ir.Node %L": "", - "cmd/compile/internal/ir.Nodes %+v": "", - "cmd/compile/internal/ir.Nodes %.v": "", - "cmd/compile/internal/ir.Op %+v": "", - "cmd/compile/internal/ssa.Aux %#v": "", - "cmd/compile/internal/ssa.Aux %q": "", - "cmd/compile/internal/ssa.Aux %s": "", - "cmd/compile/internal/ssa.BranchPrediction %d": "", - "cmd/compile/internal/ssa.ID %d": "", - "cmd/compile/internal/ssa.LocalSlot %s": "", - "cmd/compile/internal/ssa.Location %s": "", - "cmd/compile/internal/ssa.Op %s": "", - "cmd/compile/internal/ssa.ValAndOff %s": "", - "cmd/compile/internal/ssa.flagConstant %s": "", - "cmd/compile/internal/ssa.rbrank %d": "", - "cmd/compile/internal/ssa.regMask %d": "", - "cmd/compile/internal/ssa.register %d": "", - "cmd/compile/internal/ssa.relation %s": "", - "cmd/compile/internal/syntax.Error %q": "", - "cmd/compile/internal/syntax.Expr %#v": "", - "cmd/compile/internal/syntax.LitKind %d": "", - "cmd/compile/internal/syntax.Operator %s": "", - "cmd/compile/internal/syntax.Pos %s": "", - "cmd/compile/internal/syntax.position %s": "", - "cmd/compile/internal/syntax.token %q": "", - "cmd/compile/internal/syntax.token %s": "", - "cmd/compile/internal/types.Kind %d": "", - "cmd/compile/internal/types.Kind %s": "", - "cmd/compile/internal/walk.initKind %d": "", - "go/constant.Value %#v": "", - "math/big.Accuracy %s": "", - "reflect.Type %s": "", - "time.Duration %d": "", -} From 8b2efa990b08e6c32422fbfdab746f4f6948ae42 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 3 Jan 2021 19:49:49 -0800 Subject: [PATCH 358/474] [dev.regabi] cmd/compile: deref PAUTOHEAPs during SSA construction Currently, during walk we rewrite PAUTOHEAP uses into derefs of their corresponding Heapaddr, but we can easily do this instead during SSA construction. This does involve updating two test cases: * nilptr3.go This file had a test that we emit a "removed nil check" diagnostic for the implicit dereference from accessing a PAUTOHEAP variable. This CL removes this diagnostic, since it's not really useful to end users: from the user's point of view, there's no pointer anyway, so they needn't care about whether we check for nil or not. That's a purely internal detail. And with the PAUTOHEAP dereference handled during SSA construction, we can more robustly ensure this happens, rather than relying on setting a flag in walk and hoping that SSA sees it. * issue20780.go Previously, when PAUTOHEAPs were dereferenced during walk, it had a consequence that when they're passed as a function call argument, they would first get copied to the stack before being copied to their actual destination. Moving the dereferencing to SSA had a side-effect of eliminating this unnecessary temporary, and copying directly to the destination parameter. The test is updated to instead call "g(h(), h())" where h() returns a large value, as the first result will always need to be spilled somewhere will calling the second function. Maybe eventually we're smart enough to realize it can be spilled to the heap, but we don't do that today. Because I'm concerned that the direct copy-to-parameter optimization could interfere with race-detector instrumentation (e.g., maybe the copies were previously necessary to ensure they're not clobbered by inserted raceread calls?), I've also added issue20780b.go to exercise this in a few different ways. Change-Id: I720598cb32b17518bc10a03e555620c0f25fd28d Reviewed-on: https://go-review.googlesource.com/c/go/+/281293 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Trust: Matthew Dempsky Reviewed-by: Keith Randall Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/ssagen/ssa.go | 11 +++-- src/cmd/compile/internal/walk/expr.go | 10 ++--- test/fixedbugs/issue20780.go | 16 ++++--- test/fixedbugs/issue20780b.go | 62 ++++++++++++++++++++++++++ test/nilptr3.go | 8 ---- 5 files changed, 81 insertions(+), 26 deletions(-) create mode 100644 test/fixedbugs/issue20780b.go diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 5998c420122f3..f48909e6bed94 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -3222,8 +3222,8 @@ func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask // If this assignment clobbers an entire local variable, then emit // OpVarDef so liveness analysis knows the variable is redefined. - if base := clobberBase(left); base.Op() == ir.ONAME && base.(*ir.Name).Class != ir.PEXTERN && skip == 0 { - s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base.(*ir.Name), s.mem(), !ir.IsAutoTmp(base)) + if base, ok := clobberBase(left).(*ir.Name); ok && base.Op() == ir.ONAME && base.Class != ir.PEXTERN && base.Class != ir.PAUTOHEAP && skip == 0 { + s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !ir.IsAutoTmp(base)) } // Left is not ssa-able. Compute its address. @@ -4986,6 +4986,8 @@ func (s *state) addr(n ir.Node) *ssa.Value { // ensure that we reuse symbols for out parameters so // that cse works on their addresses return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), true) + case ir.PAUTOHEAP: + return s.expr(n.Heapaddr) default: s.Fatalf("variable address class %v not implemented", n.Class) return nil @@ -5096,11 +5098,8 @@ func (s *state) canSSAName(name *ir.Name) bool { if ir.IsParamHeapCopy(name) { return false } - if name.Class == ir.PAUTOHEAP { - s.Fatalf("canSSA of PAUTOHEAP %v", name) - } switch name.Class { - case ir.PEXTERN: + case ir.PEXTERN, ir.PAUTOHEAP: return false case ir.PPARAMOUT: if s.hasdefer { diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index 3dffb496e9119..6fdb8f15f58d6 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -52,19 +52,15 @@ func walkExpr(n ir.Node, init *ir.Nodes) ir.Node { base.Fatalf("expression has untyped type: %+v", n) } - if n.Op() == ir.ONAME && n.(*ir.Name).Class == ir.PAUTOHEAP { - n := n.(*ir.Name) - nn := ir.NewStarExpr(base.Pos, n.Heapaddr) - nn.X.MarkNonNil() - return walkExpr(typecheck.Expr(nn), init) - } - n = walkExpr1(n, init) // Eagerly compute sizes of all expressions for the back end. if typ := n.Type(); typ != nil && typ.Kind() != types.TBLANK && !typ.IsFuncArgStruct() { types.CheckSize(typ) } + if n, ok := n.(*ir.Name); ok && n.Heapaddr != nil { + types.CheckSize(n.Heapaddr.Type()) + } if ir.IsConst(n, constant.String) { // Emit string symbol now to avoid emitting // any concurrently during the backend. diff --git a/test/fixedbugs/issue20780.go b/test/fixedbugs/issue20780.go index 53c4f615e17fa..f73e6d1f794bb 100644 --- a/test/fixedbugs/issue20780.go +++ b/test/fixedbugs/issue20780.go @@ -9,11 +9,17 @@ package main +type Big = [400e6]byte + func f() { // GC_ERROR "stack frame too large" - var x [800e6]byte - g(x) - return + // Note: This test relies on the fact that we currently always + // spill function-results to the stack, even if they're so + // large that we would normally heap allocate them. If we ever + // improve the backend to spill temporaries to the heap, this + // test will probably need updating to find some new way to + // construct an overly large stack frame. + g(h(), h()) } -//go:noinline -func g([800e6]byte) {} +func g(Big, Big) +func h() Big diff --git a/test/fixedbugs/issue20780b.go b/test/fixedbugs/issue20780b.go new file mode 100644 index 0000000000000..c8bf1f83499f4 --- /dev/null +++ b/test/fixedbugs/issue20780b.go @@ -0,0 +1,62 @@ +// +build cgo,linux,amd64 +// run -race + +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that CL 281293 doesn't interfere with race detector +// instrumentation. + +package main + +import "fmt" + +const N = 2e6 + +type Big = [N]int + +var sink interface{} + +func main() { + g(0, f(0)) + + x1 := f(1) + sink = &x1 + g(1, x1) + g(7, f(7)) + g(1, x1) + + x3 := f(3) + sink = &x3 + g(1, x1) + g(3, x3) + + h(f(0), x1, f(2), x3, f(4)) +} + +//go:noinline +func f(k int) (x Big) { + for i := range x { + x[i] = k*N + i + } + return +} + +//go:noinline +func g(k int, x Big) { + for i := range x { + if x[i] != k*N+i { + panic(fmt.Sprintf("x%d[%d] = %d", k, i, x[i])) + } + } +} + +//go:noinline +func h(x0, x1, x2, x3, x4 Big) { + g(0, x0) + g(1, x1) + g(2, x2) + g(3, x3) + g(4, x4) +} diff --git a/test/nilptr3.go b/test/nilptr3.go index e0f2ed9767659..3345cfa5ab38a 100644 --- a/test/nilptr3.go +++ b/test/nilptr3.go @@ -214,14 +214,6 @@ func p1() byte { return p[5] // ERROR "removed nil check" } -// make sure not to do nil check for access of PAUTOHEAP -//go:noinline -func (p *Struct) m() {} -func c1() { - var x Struct - func() { x.m() }() // ERROR "removed nil check" -} - type SS struct { x byte } From 950cf4d46c5bc343644e7ef08828b9e5114d4676 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 3 Jan 2021 21:34:03 -0800 Subject: [PATCH 359/474] [dev.regabi] cmd/compile: bind closure vars during SSA constructions For function literals that aren't inlined or directly called, we need to pass their arguments via a closure struct. This also means we need to rewrite uses of closure variables to access from this closure struct. Currently we do this rewrite in a pass before walking begins. This CL moves the code to SSA construction instead, alongside binding other input parameters. Change-Id: I13538ef3394e2d6f75d5b7b2d0adbb00db812dc2 Reviewed-on: https://go-review.googlesource.com/c/go/+/281352 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Trust: Matthew Dempsky Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssagen/ssa.go | 41 +++++++ src/cmd/compile/internal/walk/closure.go | 139 ++++++++--------------- 2 files changed, 91 insertions(+), 89 deletions(-) diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index f48909e6bed94..0c222b12cf325 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -470,6 +470,47 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func { } } + // Populate closure variables. + if !fn.ClosureCalled() { + clo := s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr) + offset := int64(types.PtrSize) // PtrSize to skip past function entry PC field + for _, n := range fn.ClosureVars { + typ := n.Type() + if !n.Byval() { + typ = types.NewPtr(typ) + } + + offset = types.Rnd(offset, typ.Alignment()) + r := s.newValue1I(ssa.OpOffPtr, types.NewPtr(typ), offset, clo) + offset += typ.Size() + + if n.Byval() && TypeOK(n.Type()) { + // If it is a small variable captured by value, downgrade it to PAUTO. + r = s.load(n.Type(), r) + + n.Class = ir.PAUTO + } else { + if !n.Byval() { + r = s.load(typ, r) + } + + // Declare variable holding address taken from closure. + addr := ir.NewNameAt(fn.Pos(), &types.Sym{Name: "&" + n.Sym().Name, Pkg: types.LocalPkg}) + addr.SetType(types.NewPtr(n.Type())) + addr.Class = ir.PAUTO + addr.SetUsed(true) + addr.Curfn = fn + types.CalcSize(addr.Type()) + + n.Heapaddr = addr + n = addr + } + + fn.Dcl = append(fn.Dcl, n) + s.assign(n, r, false, 0) + } + } + // Convert the AST-based IR to the SSA-based IR s.stmtList(fn.Enter) s.stmtList(fn.Body) diff --git a/src/cmd/compile/internal/walk/closure.go b/src/cmd/compile/internal/walk/closure.go index 449df88f9e69e..acb74b9901538 100644 --- a/src/cmd/compile/internal/walk/closure.go +++ b/src/cmd/compile/internal/walk/closure.go @@ -15,103 +15,64 @@ import ( // Closure is called in a separate phase after escape analysis. // It transform closure bodies to properly reference captured variables. func Closure(fn *ir.Func) { + if len(fn.ClosureVars) == 0 { + return + } + + if !fn.ClosureCalled() { + // The closure is not directly called, so it is going to stay as closure. + fn.SetNeedctxt(true) + return + } + lno := base.Pos base.Pos = fn.Pos() - if fn.ClosureCalled() { - // If the closure is directly called, we transform it to a plain function call - // with variables passed as args. This avoids allocation of a closure object. - // Here we do only a part of the transformation. Walk of OCALLFUNC(OCLOSURE) - // will complete the transformation later. - // For illustration, the following closure: - // func(a int) { - // println(byval) - // byref++ - // }(42) - // becomes: - // func(byval int, &byref *int, a int) { - // println(byval) - // (*&byref)++ - // }(byval, &byref, 42) - - // f is ONAME of the actual function. - f := fn.Nname - - // We are going to insert captured variables before input args. - var params []*types.Field - var decls []*ir.Name - for _, v := range fn.ClosureVars { - if !v.Byval() { - // If v of type T is captured by reference, - // we introduce function param &v *T - // and v remains PAUTOHEAP with &v heapaddr - // (accesses will implicitly deref &v). - addr := typecheck.NewName(typecheck.Lookup("&" + v.Sym().Name)) - addr.SetType(types.NewPtr(v.Type())) - v.Heapaddr = addr - v = addr - } - - v.Class = ir.PPARAM - decls = append(decls, v) - - fld := types.NewField(src.NoXPos, v.Sym(), v.Type()) - fld.Nname = v - params = append(params, fld) - } - - if len(params) > 0 { - // Prepend params and decls. - f.Type().Params().SetFields(append(params, f.Type().Params().FieldSlice()...)) - fn.Dcl = append(decls, fn.Dcl...) + // If the closure is directly called, we transform it to a plain function call + // with variables passed as args. This avoids allocation of a closure object. + // Here we do only a part of the transformation. Walk of OCALLFUNC(OCLOSURE) + // will complete the transformation later. + // For illustration, the following closure: + // func(a int) { + // println(byval) + // byref++ + // }(42) + // becomes: + // func(byval int, &byref *int, a int) { + // println(byval) + // (*&byref)++ + // }(byval, &byref, 42) + + // f is ONAME of the actual function. + f := fn.Nname + + // We are going to insert captured variables before input args. + var params []*types.Field + var decls []*ir.Name + for _, v := range fn.ClosureVars { + if !v.Byval() { + // If v of type T is captured by reference, + // we introduce function param &v *T + // and v remains PAUTOHEAP with &v heapaddr + // (accesses will implicitly deref &v). + addr := typecheck.NewName(typecheck.Lookup("&" + v.Sym().Name)) + addr.SetType(types.NewPtr(v.Type())) + v.Heapaddr = addr + v = addr } - types.CalcSize(f.Type()) - fn.Nname.SetType(f.Type()) // update type of ODCLFUNC - } else { - // The closure is not called, so it is going to stay as closure. - var body []ir.Node - offset := int64(types.PtrSize) - for _, v := range fn.ClosureVars { - // cv refers to the field inside of closure OSTRUCTLIT. - typ := v.Type() - if !v.Byval() { - typ = types.NewPtr(typ) - } - offset = types.Rnd(offset, int64(typ.Align)) - cr := ir.NewClosureRead(typ, offset) - offset += typ.Width - - if v.Byval() && v.Type().Width <= int64(2*types.PtrSize) { - // If it is a small variable captured by value, downgrade it to PAUTO. - v.Class = ir.PAUTO - fn.Dcl = append(fn.Dcl, v) - body = append(body, ir.NewAssignStmt(base.Pos, v, cr)) - } else { - // Declare variable holding addresses taken from closure - // and initialize in entry prologue. - addr := typecheck.NewName(typecheck.Lookup("&" + v.Sym().Name)) - addr.SetType(types.NewPtr(v.Type())) - addr.Class = ir.PAUTO - addr.SetUsed(true) - addr.Curfn = fn - fn.Dcl = append(fn.Dcl, addr) - v.Heapaddr = addr - var src ir.Node = cr - if v.Byval() { - src = typecheck.NodAddr(cr) - } - body = append(body, ir.NewAssignStmt(base.Pos, addr, src)) - } - } + v.Class = ir.PPARAM + decls = append(decls, v) - if len(body) > 0 { - typecheck.Stmts(body) - fn.Enter = body - fn.SetNeedctxt(true) - } + fld := types.NewField(src.NoXPos, v.Sym(), v.Type()) + fld.Nname = v + params = append(params, fld) } + // Prepend params and decls. + f.Type().Params().SetFields(append(params, f.Type().Params().FieldSlice()...)) + fn.Dcl = append(decls, fn.Dcl...) + base.Pos = lno } From c9c26d7ffb3c4077ffaa80f7c8e2d550528e1445 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 4 Jan 2021 02:24:48 -0800 Subject: [PATCH 360/474] [dev.regabi] cmd/compile: use ClosureVars for method value wrappers Similar to with regular closures, we can change method value wrappers to use ClosureVars and allow SSA construction to take care of wiring it up appropriately. Change-Id: I05c0b1bcec4e24305324755df35b7bc5b8a6ce7a Reviewed-on: https://go-review.googlesource.com/c/go/+/281353 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Trust: Matthew Dempsky Reviewed-by: Keith Randall --- src/cmd/compile/internal/escape/escape.go | 3 +++ src/cmd/compile/internal/ir/name.go | 4 ++-- src/cmd/compile/internal/typecheck/func.go | 25 ++++++++++------------ 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go index 5df82d8cdc564..9b9b8f6a5870c 100644 --- a/src/cmd/compile/internal/escape/escape.go +++ b/src/cmd/compile/internal/escape/escape.go @@ -583,6 +583,9 @@ func (e *escape) exprSkipInit(k hole, n ir.Node) { if n.Class == ir.PFUNC || n.Class == ir.PEXTERN { return } + if n.IsClosureVar() && n.Defn == nil { + return // ".this" from method value wrapper + } e.flow(k, e.oldLoc(n)) case ir.ONAMEOFFSET: diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index a51cf79929e46..cfb481e31cd1f 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -264,7 +264,7 @@ const ( nameNeedzero // if it contains pointers, needs to be zeroed on function entry nameAutoTemp // is the variable a temporary (implies no dwarf info. reset if escapes to heap) nameUsed // for variable declared and not used error - nameIsClosureVar // PAUTOHEAP closure pseudo-variable; original at n.Name.Defn + nameIsClosureVar // PAUTOHEAP closure pseudo-variable; original (if any) at n.Defn nameIsOutputParamHeapAddr // pointer to a result parameter's heap copy nameAddrtaken // address taken, even if not moved to heap nameInlFormal // PAUTO created by inliner, derived from callee formal @@ -332,7 +332,7 @@ func (n *Name) SetVal(v constant.Value) { // it appears in the function that immediately contains the // declaration. Otherwise, Canonical simply returns n itself. func (n *Name) Canonical() *Name { - if n.IsClosureVar() { + if n.IsClosureVar() && n.Defn != nil { n = n.Defn.(*Name) } return n diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go index 8789395ffbd98..12762f7ee8f55 100644 --- a/src/cmd/compile/internal/typecheck/func.go +++ b/src/cmd/compile/internal/typecheck/func.go @@ -246,29 +246,26 @@ func MethodValueWrapper(dot *ir.SelectorExpr) *ir.Func { fn.SetWrapper(true) // Declare and initialize variable holding receiver. - cr := ir.NewClosureRead(rcvrtype, types.Rnd(int64(types.PtrSize), int64(rcvrtype.Align))) - var ptr *ir.Name - var body []ir.Node - if rcvrtype.IsPtr() || rcvrtype.IsInterface() { - ptr = Temp(rcvrtype) - body = append(body, ir.NewAssignStmt(base.Pos, ptr, cr)) - } else { - ptr = Temp(types.NewPtr(rcvrtype)) - body = append(body, ir.NewAssignStmt(base.Pos, ptr, NodAddr(cr))) - } + ptr := ir.NewNameAt(base.Pos, Lookup(".this")) + ptr.Class = ir.PAUTOHEAP + ptr.SetType(rcvrtype) + ptr.Curfn = fn + ptr.SetIsClosureVar(true) + ptr.SetByval(true) + fn.ClosureVars = append(fn.ClosureVars, ptr) call := ir.NewCallExpr(base.Pos, ir.OCALL, ir.NewSelectorExpr(base.Pos, ir.OXDOT, ptr, meth), nil) call.Args = ir.ParamNames(tfn.Type()) call.IsDDD = tfn.Type().IsVariadic() + + var body ir.Node = call if t0.NumResults() != 0 { ret := ir.NewReturnStmt(base.Pos, nil) ret.Results = []ir.Node{call} - body = append(body, ret) - } else { - body = append(body, call) + body = ret } - fn.Body = body + fn.Body = []ir.Node{body} FinishFuncBody() Func(fn) From 7fd84c6e465d9c9d9424538ec99da2c59afdd469 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 4 Jan 2021 16:33:30 -0800 Subject: [PATCH 361/474] [dev.regabi] cmd/compile: remove OCLOSUREREAD After the previous CLs, all closure reads are handled during SSA construction. Change-Id: Iad67b01fa2d3798f50ea647be7ccf8195f189c27 Reviewed-on: https://go-review.googlesource.com/c/go/+/281512 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Trust: Matthew Dempsky Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/escape/escape.go | 4 +-- src/cmd/compile/internal/ir/expr.go | 17 ++---------- src/cmd/compile/internal/ir/node.go | 27 +++++++++---------- src/cmd/compile/internal/ir/node_gen.go | 16 ----------- src/cmd/compile/internal/ir/op_string.go | 27 +++++++++---------- src/cmd/compile/internal/ssagen/ssa.go | 7 ----- .../compile/internal/typecheck/typecheck.go | 3 --- src/cmd/compile/internal/walk/expr.go | 2 +- src/cmd/compile/internal/walk/walk.go | 2 +- 9 files changed, 32 insertions(+), 73 deletions(-) diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go index 9b9b8f6a5870c..c63383af43a9f 100644 --- a/src/cmd/compile/internal/escape/escape.go +++ b/src/cmd/compile/internal/escape/escape.go @@ -575,7 +575,7 @@ func (e *escape) exprSkipInit(k hole, n ir.Node) { default: base.Fatalf("unexpected expr: %v", n) - case ir.OLITERAL, ir.ONIL, ir.OGETG, ir.OCLOSUREREAD, ir.OTYPE, ir.OMETHEXPR: + case ir.OLITERAL, ir.ONIL, ir.OGETG, ir.OTYPE, ir.OMETHEXPR: // nop case ir.ONAME: @@ -1926,7 +1926,7 @@ func mayAffectMemory(n ir.Node) bool { // an ir.Any looking for any op that's not the ones in the case statement. // But that produces changes in the compiled output detected by buildall. switch n.Op() { - case ir.ONAME, ir.OCLOSUREREAD, ir.OLITERAL, ir.ONIL: + case ir.ONAME, ir.OLITERAL, ir.ONIL: return false case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD: diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index e7aa9c6a8f06d..51425db42d431 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -203,19 +203,6 @@ func NewClosureExpr(pos src.XPos, fn *Func) *ClosureExpr { return n } -// A ClosureRead denotes reading a variable stored within a closure struct. -type ClosureReadExpr struct { - miniExpr - Offset int64 -} - -func NewClosureRead(typ *types.Type, offset int64) *ClosureReadExpr { - n := &ClosureReadExpr{Offset: offset} - n.typ = typ - n.op = OCLOSUREREAD - return n -} - // A CompLitExpr is a composite literal Type{Vals}. // Before type-checking, the type is Ntype. type CompLitExpr struct { @@ -727,7 +714,7 @@ func IsAddressable(n Node) bool { return false } fallthrough - case ODEREF, ODOTPTR, OCLOSUREREAD: + case ODEREF, ODOTPTR: return true case ODOT: @@ -889,7 +876,7 @@ func SameSafeExpr(l Node, r Node) bool { } switch l.Op() { - case ONAME, OCLOSUREREAD: + case ONAME: return l == r case ODOT, ODOTPTR: diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 850d7343aaef1..a2b6e7203b35d 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -294,20 +294,19 @@ const ( OTSLICE // []int // misc - OINLCALL // intermediary representation of an inlined call. - OEFACE // itable and data words of an empty-interface value. - OITAB // itable word of an interface value. - OIDATA // data word of an interface value in Left - OSPTR // base pointer of a slice or string. - OCLOSUREREAD // read from inside closure struct at beginning of closure function - OCFUNC // reference to c function pointer (not go func value) - OCHECKNIL // emit code to ensure pointer/interface not nil - OVARDEF // variable is about to be fully initialized - OVARKILL // variable is dead - OVARLIVE // variable is alive - ORESULT // result of a function call; Xoffset is stack offset - OINLMARK // start of an inlined body, with file/line of caller. Xoffset is an index into the inline tree. - ONAMEOFFSET // offset within a name + OINLCALL // intermediary representation of an inlined call. + OEFACE // itable and data words of an empty-interface value. + OITAB // itable word of an interface value. + OIDATA // data word of an interface value in Left + OSPTR // base pointer of a slice or string. + OCFUNC // reference to c function pointer (not go func value) + OCHECKNIL // emit code to ensure pointer/interface not nil + OVARDEF // variable is about to be fully initialized + OVARKILL // variable is dead + OVARLIVE // variable is alive + ORESULT // result of a function call; Xoffset is stack offset + OINLMARK // start of an inlined body, with file/line of caller. Xoffset is an index into the inline tree. + ONAMEOFFSET // offset within a name // arch-specific opcodes ORETJMP // return to other function diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index 7f494b16cd340..f1b0a21628373 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -353,22 +353,6 @@ func (n *ClosureExpr) editChildren(edit func(Node) Node) { } } -func (n *ClosureReadExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } -func (n *ClosureReadExpr) copy() Node { - c := *n - c.init = copyNodes(c.init) - return &c -} -func (n *ClosureReadExpr) doChildren(do func(Node) bool) bool { - if doNodes(n.init, do) { - return true - } - return false -} -func (n *ClosureReadExpr) editChildren(edit func(Node) Node) { - editNodes(n.init, edit) -} - func (n *CommClause) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *CommClause) copy() Node { c := *n diff --git a/src/cmd/compile/internal/ir/op_string.go b/src/cmd/compile/internal/ir/op_string.go index 0339444132e44..b54b4785a2391 100644 --- a/src/cmd/compile/internal/ir/op_string.go +++ b/src/cmd/compile/internal/ir/op_string.go @@ -150,23 +150,22 @@ func _() { _ = x[OITAB-139] _ = x[OIDATA-140] _ = x[OSPTR-141] - _ = x[OCLOSUREREAD-142] - _ = x[OCFUNC-143] - _ = x[OCHECKNIL-144] - _ = x[OVARDEF-145] - _ = x[OVARKILL-146] - _ = x[OVARLIVE-147] - _ = x[ORESULT-148] - _ = x[OINLMARK-149] - _ = x[ONAMEOFFSET-150] - _ = x[ORETJMP-151] - _ = x[OGETG-152] - _ = x[OEND-153] + _ = x[OCFUNC-142] + _ = x[OCHECKNIL-143] + _ = x[OVARDEF-144] + _ = x[OVARKILL-145] + _ = x[OVARLIVE-146] + _ = x[ORESULT-147] + _ = x[OINLMARK-148] + _ = x[ONAMEOFFSET-149] + _ = x[ORETJMP-150] + _ = x[OGETG-151] + _ = x[OEND-152] } -const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRSTMTEXPRBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCLOSUREREADCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKNAMEOFFSETRETJMPGETGEND" +const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRSTMTEXPRBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKNAMEOFFSETRETJMPGETGEND" -var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 309, 315, 318, 324, 331, 339, 343, 350, 358, 360, 362, 364, 366, 368, 370, 375, 380, 388, 391, 400, 403, 407, 415, 422, 431, 444, 447, 450, 453, 456, 459, 462, 468, 471, 477, 480, 486, 490, 493, 497, 502, 507, 513, 518, 522, 527, 535, 543, 549, 558, 569, 576, 580, 587, 595, 599, 603, 607, 614, 621, 629, 635, 643, 651, 656, 661, 665, 673, 678, 682, 685, 693, 697, 699, 704, 706, 711, 717, 723, 729, 735, 740, 744, 751, 757, 762, 768, 774, 781, 786, 790, 795, 799, 810, 815, 823, 829, 836, 843, 849, 856, 866, 872, 876, 879} +var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 309, 315, 318, 324, 331, 339, 343, 350, 358, 360, 362, 364, 366, 368, 370, 375, 380, 388, 391, 400, 403, 407, 415, 422, 431, 444, 447, 450, 453, 456, 459, 462, 468, 471, 477, 480, 486, 490, 493, 497, 502, 507, 513, 518, 522, 527, 535, 543, 549, 558, 569, 576, 580, 587, 595, 599, 603, 607, 614, 621, 629, 635, 643, 651, 656, 661, 665, 673, 678, 682, 685, 693, 697, 699, 704, 706, 711, 717, 723, 729, 735, 740, 744, 751, 757, 762, 768, 774, 781, 786, 790, 795, 799, 804, 812, 818, 825, 832, 838, 845, 855, 861, 865, 868} func (i Op) String() string { if i >= Op(len(_Op_index)-1) { diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 0c222b12cf325..54bde20f1cdc9 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -2168,9 +2168,6 @@ func (s *state) expr(n ir.Node) *ssa.Value { } addr := s.addr(n) return s.load(n.Type(), addr) - case ir.OCLOSUREREAD: - addr := s.addr(n) - return s.load(n.Type(), addr) case ir.ONIL: n := n.(*ir.NilExpr) t := n.Type() @@ -5074,10 +5071,6 @@ func (s *state) addr(n ir.Node) *ssa.Value { n := n.(*ir.SelectorExpr) p := s.exprPtr(n.X, n.Bounded(), n.Pos()) return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p) - case ir.OCLOSUREREAD: - n := n.(*ir.ClosureReadExpr) - return s.newValue1I(ssa.OpOffPtr, t, n.Offset, - s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr)) case ir.OCONVNOP: n := n.(*ir.ConvExpr) if n.Type() == n.X.Type() { diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index 07bbd2510586f..3160725e3c56a 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -789,9 +789,6 @@ func typecheck1(n ir.Node, top int) ir.Node { n := n.(*ir.UnaryExpr) return tcSPtr(n) - case ir.OCLOSUREREAD: - return n - case ir.OCFUNC: n := n.(*ir.UnaryExpr) n.X = Expr(n.X) diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index 6fdb8f15f58d6..df575d698589b 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -162,7 +162,7 @@ func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node { n := n.(*ir.CallExpr) return mkcall("gorecover", n.Type(), init, typecheck.NodAddr(ir.RegFP)) - case ir.OCLOSUREREAD, ir.OCFUNC: + case ir.OCFUNC: return n case ir.OCALLINTER, ir.OCALLFUNC, ir.OCALLMETH: diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go index 928b6737528ce..e780a9066031d 100644 --- a/src/cmd/compile/internal/walk/walk.go +++ b/src/cmd/compile/internal/walk/walk.go @@ -476,7 +476,7 @@ func calcHasCall(n ir.Node) bool { n := n.(*ir.SelectorExpr) return n.X.HasCall() - case ir.OGETG, ir.OCLOSUREREAD, ir.OMETHEXPR: + case ir.OGETG, ir.OMETHEXPR: return false // TODO(rsc): These look wrong in various ways but are what calcHasCall has always done. From f57f484053f276c6fb57047cf02fa043974d7b95 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 11 Jan 2021 14:30:16 -0800 Subject: [PATCH 362/474] [dev.regabi] cmd/compile: decouple escape analysis from Name.Vargen Escape analysis needs to know the index of result parameters for recording escape-flow information. It currently relies on Vargen for this, but it can easily figure this out for itself. So just do that instead, so that we can remove Vargen. Passes toolstash -cmp. For #43633. Change-Id: I65dedc2d73bc25e85ff400f308e50b73dc503630 Reviewed-on: https://go-review.googlesource.com/c/go/+/283192 Trust: Matthew Dempsky Trust: Dan Scales Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Dan Scales --- src/cmd/compile/internal/escape/escape.go | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go index c63383af43a9f..bee3878f10039 100644 --- a/src/cmd/compile/internal/escape/escape.go +++ b/src/cmd/compile/internal/escape/escape.go @@ -126,6 +126,11 @@ type location struct { edges []edge // incoming edges loopDepth int // loopDepth at declaration + // resultIndex records the tuple index (starting at 1) for + // PPARAMOUT variables within their function's result type. + // For non-PPARAMOUT variables it's 0. + resultIndex int + // derefs and walkgen are used during walkOne to track the // minimal dereferences from the walk root. derefs int // >= -1 @@ -259,11 +264,16 @@ func (b *batch) initFunc(fn *ir.Func) { } // Allocate locations for local variables. - for _, dcl := range fn.Dcl { - if dcl.Op() == ir.ONAME { - e.newLoc(dcl, false) + for _, n := range fn.Dcl { + if n.Op() == ir.ONAME { + e.newLoc(n, false) } } + + // Initialize resultIndex for result parameters. + for i, f := range fn.Type().Results().FieldSlice() { + e.oldLoc(f.Nname.(*ir.Name)).resultIndex = 1 + i + } } func (b *batch) walkFunc(fn *ir.Func) { @@ -1609,8 +1619,7 @@ func (l *location) leakTo(sink *location, derefs int) { // If sink is a result parameter and we can fit return bits // into the escape analysis tag, then record a return leak. if sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn { - // TODO(mdempsky): Eliminate dependency on Vargen here. - ri := int(sink.n.Name().Vargen) - 1 + ri := sink.resultIndex - 1 if ri < numEscResults { // Leak to result parameter. l.paramEsc.AddResult(ri, derefs) From b4d2a0445b0ca54a159e0895e1a8b31d47411894 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 11 Jan 2021 15:58:19 -0800 Subject: [PATCH 363/474] [dev.regabi] cmd/compile: refactor closure var setup/teardown Creating closure vars is subtle and is also needed in both CL 281932 and CL 283112, so refactor out a common implementation that can be used in all 3 places. Passes toolstash -cmp. Change-Id: Ib993eb90c895b52759bfbfbaad88921e391b0b4d Reviewed-on: https://go-review.googlesource.com/c/go/+/283194 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Dan Scales Trust: Dan Scales Trust: Matthew Dempsky --- src/cmd/compile/internal/ir/name.go | 76 +++++++++++++++++++++++++ src/cmd/compile/internal/noder/noder.go | 64 +-------------------- 2 files changed, 79 insertions(+), 61 deletions(-) diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index cfb481e31cd1f..2375eddb99a0d 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -351,6 +351,82 @@ func (n *Name) Byval() bool { return n.Canonical().flags&nameByval != 0 } +// CaptureName returns a Name suitable for referring to n from within function +// fn or from the package block if fn is nil. If n is a free variable declared +// within a function that encloses fn, then CaptureName returns a closure +// variable that refers to n and adds it to fn.ClosureVars. Otherwise, it simply +// returns n. +func CaptureName(pos src.XPos, fn *Func, n *Name) *Name { + if n.IsClosureVar() { + base.FatalfAt(pos, "misuse of CaptureName on closure variable: %v", n) + } + if n.Op() != ONAME || n.Curfn == nil || n.Curfn == fn { + return n // okay to use directly + } + if fn == nil { + base.FatalfAt(pos, "package-block reference to %v, declared in %v", n, n.Curfn) + } + + c := n.Innermost + if c != nil && c.Curfn == fn { + return c + } + + // Do not have a closure var for the active closure yet; make one. + c = NewNameAt(pos, n.Sym()) + c.Curfn = fn + c.Class = PAUTOHEAP + c.SetIsClosureVar(true) + c.Defn = n + + // Link into list of active closure variables. + // Popped from list in FinishCaptureNames. + c.Outer = n.Innermost + n.Innermost = c + fn.ClosureVars = append(fn.ClosureVars, c) + + return c +} + +// FinishCaptureNames handles any work leftover from calling CaptureName +// earlier. outerfn should be the function that immediately encloses fn. +func FinishCaptureNames(pos src.XPos, outerfn, fn *Func) { + // closure-specific variables are hanging off the + // ordinary ones; see CaptureName above. + // unhook them. + // make the list of pointers for the closure call. + for _, cv := range fn.ClosureVars { + // Unlink from n; see comment in syntax.go type Param for these fields. + n := cv.Defn.(*Name) + n.Innermost = cv.Outer + + // If the closure usage of n is not dense, we need to make it + // dense by recapturing n within the enclosing function. + // + // That is, suppose we just finished parsing the innermost + // closure f4 in this code: + // + // func f() { + // n := 1 + // func() { // f2 + // use(n) + // func() { // f3 + // func() { // f4 + // use(n) + // }() + // }() + // }() + // } + // + // At this point cv.Outer is f2's n; there is no n for f3. To + // construct the closure f4 from within f3, we need to use f3's + // n and in this case we need to create f3's n with CaptureName. + // + // We'll decide later in walk whether to use v directly or &v. + cv.Outer = CaptureName(pos, outerfn, n) + } +} + // SameSource reports whether two nodes refer to the same source // element. // diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go index 76913c62a6dfd..ec0debdbbd6e9 100644 --- a/src/cmd/compile/internal/noder/noder.go +++ b/src/cmd/compile/internal/noder/noder.go @@ -1872,45 +1872,7 @@ func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node { p.funcBody(fn, expr.Body) - // closure-specific variables are hanging off the - // ordinary ones in the symbol table; see oldname. - // unhook them. - // make the list of pointers for the closure call. - for _, v := range fn.ClosureVars { - // Unlink from v1; see comment in syntax.go type Param for these fields. - v1 := v.Defn - v1.Name().Innermost = v.Outer - - // If the closure usage of v is not dense, - // we need to make it dense; now that we're out - // of the function in which v appeared, - // look up v.Sym in the enclosing function - // and keep it around for use in the compiled code. - // - // That is, suppose we just finished parsing the innermost - // closure f4 in this code: - // - // func f() { - // v := 1 - // func() { // f2 - // use(v) - // func() { // f3 - // func() { // f4 - // use(v) - // }() - // }() - // }() - // } - // - // At this point v.Outer is f2's v; there is no f3's v. - // To construct the closure f4 from within f3, - // we need to use f3's v and in this case we need to create f3's v. - // We are now in the context of f3, so calling oldname(v.Sym) - // obtains f3's v, creating it if necessary (as it is in the example). - // - // capturevars will decide whether to use v directly or &v. - v.Outer = oldname(v.Sym()).(*ir.Name) - } + ir.FinishCaptureNames(base.Pos, ir.CurFunc, fn) return clo } @@ -1944,32 +1906,12 @@ func oldname(s *types.Sym) ir.Node { return ir.NewIdent(base.Pos, s) } - if ir.CurFunc != nil && n.Op() == ir.ONAME && n.Name().Curfn != nil && n.Name().Curfn != ir.CurFunc { - // Inner func is referring to var in outer func. - // + if n, ok := n.(*ir.Name); ok { // TODO(rsc): If there is an outer variable x and we // are parsing x := 5 inside the closure, until we get to // the := it looks like a reference to the outer x so we'll // make x a closure variable unnecessarily. - n := n.(*ir.Name) - c := n.Innermost - if c == nil || c.Curfn != ir.CurFunc { - // Do not have a closure var for the active closure yet; make one. - c = typecheck.NewName(s) - c.Class = ir.PAUTOHEAP - c.SetIsClosureVar(true) - c.Defn = n - - // Link into list of active closure variables. - // Popped from list in func funcLit. - c.Outer = n.Innermost - n.Innermost = c - - ir.CurFunc.ClosureVars = append(ir.CurFunc.ClosureVars, c) - } - - // return ref to closure var, not original - return c + return ir.CaptureName(base.Pos, ir.CurFunc, n) } return n From 12ee55ba7bf22157267e735e8e4bbf651c5b4e7d Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 11 Jan 2021 15:07:09 -0800 Subject: [PATCH 364/474] [dev.regabi] cmd/compile: stop using Vargen for import/export Historically, inline function bodies were exported as plain Go source code, and symbol mangling was a convenient hack because it allowed variables to be re-imported with largely the same names as they were originally exported as. However, nowadays we use a binary format that's more easily extended, so we can simply serialize all of a function's declared objects up front, and then refer to them by index later on. This also allows us to easily report unmangled names all the time (e.g., error message from issue7921.go). Fixes #43633. Change-Id: I46c88f5a47cb921f70ab140976ba9ddce38df216 Reviewed-on: https://go-review.googlesource.com/c/go/+/283193 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Dan Scales Trust: Dan Scales Trust: Matthew Dempsky --- src/cmd/compile/internal/ir/func.go | 6 + src/cmd/compile/internal/ir/name.go | 8 +- src/cmd/compile/internal/typecheck/dcl.go | 27 +---- src/cmd/compile/internal/typecheck/iexport.go | 58 +++++----- src/cmd/compile/internal/typecheck/iimport.go | 103 +++++++++++++----- .../compile/internal/typecheck/typecheck.go | 2 +- test/fixedbugs/issue43633.dir/a.go | 28 +++++ test/fixedbugs/issue43633.dir/main.go | 18 +++ test/fixedbugs/issue43633.go | 7 ++ test/fixedbugs/issue7921.go | 2 +- 10 files changed, 171 insertions(+), 88 deletions(-) create mode 100644 test/fixedbugs/issue43633.dir/a.go create mode 100644 test/fixedbugs/issue43633.dir/main.go create mode 100644 test/fixedbugs/issue43633.go diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index 12ef083c1933e..d660fe3b40603 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -61,8 +61,14 @@ type Func struct { // memory for escaping parameters. Enter Nodes Exit Nodes + // ONAME nodes for all params/locals for this func/closure, does NOT // include closurevars until transformclosure runs. + // Names must be listed PPARAMs, PPARAMOUTs, then PAUTOs, + // with PPARAMs and PPARAMOUTs in order corresponding to the function signature. + // However, as anonymous or blank PPARAMs are not actually declared, + // they are omitted from Dcl. + // Anonymous and blank PPARAMOUTs are declared as ~rNN and ~bNN Names, respectively. Dcl []*Name ClosureType Ntype // closure representation type diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 2375eddb99a0d..30f7e9b9e0e6a 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -55,11 +55,9 @@ type Name struct { // The function, method, or closure in which local variable or param is declared. Curfn *Func - // Unique number for ONAME nodes within a function. Function outputs - // (results) are numbered starting at one, followed by function inputs - // (parameters), and then local variables. Vargen is used to distinguish - // local variables/params with the same name. - Vargen int32 + // Unique number for OTYPE names within a function. + // TODO(mdempsky): Remove completely. + Typegen int32 Ntype Ntype Heapaddr *Name // temp holding heap address of param diff --git a/src/cmd/compile/internal/typecheck/dcl.go b/src/cmd/compile/internal/typecheck/dcl.go index ffbf474a58558..caa3e8203a332 100644 --- a/src/cmd/compile/internal/typecheck/dcl.go +++ b/src/cmd/compile/internal/typecheck/dcl.go @@ -7,7 +7,6 @@ package typecheck import ( "fmt" "strconv" - "strings" "cmd/compile/internal/base" "cmd/compile/internal/ir" @@ -47,7 +46,6 @@ func Declare(n *ir.Name, ctxt ir.Class) { base.ErrorfAt(n.Pos(), "cannot declare name %v", s) } - gen := 0 if ctxt == ir.PEXTERN { if s.Name == "init" { base.ErrorfAt(n.Pos(), "cannot declare init - must be func") @@ -66,10 +64,7 @@ func Declare(n *ir.Name, ctxt ir.Class) { } if n.Op() == ir.OTYPE { declare_typegen++ - gen = declare_typegen - } else if n.Op() == ir.ONAME && ctxt == ir.PAUTO && !strings.Contains(s.Name, "·") { - vargen++ - gen = vargen + n.Typegen = int32(declare_typegen) } types.Pushdcl(s) n.Curfn = ir.CurFunc @@ -90,7 +85,6 @@ func Declare(n *ir.Name, ctxt ir.Class) { s.Block = types.Block s.Lastlineno = base.Pos s.Def = n - n.Vargen = int32(gen) n.Class = ctxt if ctxt == ir.PFUNC { n.Sym().SetFunc(true) @@ -338,9 +332,6 @@ func funcarg(n *ir.Field, ctxt ir.Class) { n.Decl = name name.Ntype = n.Ntype Declare(name, ctxt) - - vargen++ - n.Decl.Vargen = int32(vargen) } func funcarg2(f *types.Field, ctxt ir.Class) { @@ -358,15 +349,6 @@ func funcargs(nt *ir.FuncType) { base.Fatalf("funcargs %v", nt.Op()) } - // re-start the variable generation number - // we want to use small numbers for the return variables, - // so let them have the chunk starting at 1. - // - // TODO(mdempsky): This is ugly, and only necessary because - // esc.go uses Vargen to figure out result parameters' index - // within the result tuple. - vargen = len(nt.Results) - // declare the receiver and in arguments. if nt.Recv != nil { funcarg(nt.Recv, ir.PPARAM) @@ -375,9 +357,6 @@ func funcargs(nt *ir.FuncType) { funcarg(n, ir.PPARAM) } - oldvargen := vargen - vargen = 0 - // declare the out arguments. gen := len(nt.Params) for _, n := range nt.Results { @@ -399,8 +378,6 @@ func funcargs(nt *ir.FuncType) { funcarg(n, ir.PPARAMOUT) } - - vargen = oldvargen } // Same as funcargs, except run over an already constructed TFUNC. @@ -422,8 +399,6 @@ func funcargs2(t *types.Type) { } } -var vargen int - func Temp(t *types.Type) *ir.Name { return TempAt(base.Pos, ir.CurFunc, t) } diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go index a7927c39a31b5..4d48b80346675 100644 --- a/src/cmd/compile/internal/typecheck/iexport.go +++ b/src/cmd/compile/internal/typecheck/iexport.go @@ -422,6 +422,10 @@ type exportWriter struct { prevFile string prevLine int64 prevColumn int64 + + // dclIndex maps function-scoped declarations to their index + // within their respective Func's Dcl list. + dclIndex map[*ir.Name]int } func (p *iexporter) doDecl(n *ir.Name) { @@ -529,7 +533,8 @@ func (p *iexporter) doInline(f *ir.Name) { w := p.newWriter() w.setPkg(fnpkg(f), false) - w.stmtList(ir.Nodes(f.Func.Inl.Body)) + w.dclIndex = make(map[*ir.Name]int, len(f.Func.Inl.Dcl)) + w.funcBody(f.Func) w.finish("inl", p.inlineIndex, f.Sym()) } @@ -756,7 +761,7 @@ func (w *exportWriter) paramList(fs []*types.Field) { func (w *exportWriter) param(f *types.Field) { w.pos(f.Pos) - w.localIdent(types.OrigSym(f.Sym), 0) + w.localIdent(types.OrigSym(f.Sym)) w.typ(f.Type) } @@ -1030,7 +1035,19 @@ func (w *exportWriter) typeExt(t *types.Type) { // Inline bodies. -func (w *exportWriter) stmtList(list ir.Nodes) { +func (w *exportWriter) funcBody(fn *ir.Func) { + w.int64(int64(len(fn.Inl.Dcl))) + for i, n := range fn.Inl.Dcl { + w.pos(n.Pos()) + w.localIdent(n.Sym()) + w.typ(n.Type()) + w.dclIndex[n] = i + } + + w.stmtList(fn.Inl.Body) +} + +func (w *exportWriter) stmtList(list []ir.Node) { for _, n := range list { w.node(n) } @@ -1070,10 +1087,11 @@ func (w *exportWriter) stmt(n ir.Node) { case ir.ODCL: n := n.(*ir.Decl) + if ir.IsBlank(n.X) { + return // blank declarations not useful to importers + } w.op(ir.ODCL) - w.pos(n.X.Pos()) w.localName(n.X) - w.typ(n.X.Type()) case ir.OAS: // Don't export "v = " initializing statements, hope they're always @@ -1288,7 +1306,7 @@ func (w *exportWriter) expr(n ir.Node) { } s = n.Tag.Sym() } - w.localIdent(s, 0) // declared pseudo-variable, if any + w.localIdent(s) // declared pseudo-variable, if any w.expr(n.X) // case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC: @@ -1518,22 +1536,19 @@ func (w *exportWriter) fieldList(list ir.Nodes) { } func (w *exportWriter) localName(n *ir.Name) { - // Escape analysis happens after inline bodies are saved, but - // we're using the same ONAME nodes, so we might still see - // PAUTOHEAP here. - // - // Check for Stackcopy to identify PAUTOHEAP that came from - // PPARAM/PPARAMOUT, because we only want to include vargen in - // non-param names. - var v int32 - if n.Class == ir.PAUTO || (n.Class == ir.PAUTOHEAP && n.Stackcopy == nil) { - v = n.Vargen + if ir.IsBlank(n) { + w.int64(-1) + return } - w.localIdent(n.Sym(), v) + i, ok := w.dclIndex[n] + if !ok { + base.FatalfAt(n.Pos(), "missing from dclIndex: %+v", n) + } + w.int64(int64(i)) } -func (w *exportWriter) localIdent(s *types.Sym, v int32) { +func (w *exportWriter) localIdent(s *types.Sym) { if w.currPkg == nil { base.Fatalf("missing currPkg") } @@ -1555,13 +1570,6 @@ func (w *exportWriter) localIdent(s *types.Sym, v int32) { base.Fatalf("unexpected dot in identifier: %v", name) } - if v > 0 { - if strings.Contains(name, "·") { - base.Fatalf("exporter: unexpected · in symbol name") - } - name = fmt.Sprintf("%s·%d", name, v) - } - if s.Pkg != w.currPkg { base.Fatalf("weird package in name: %v => %v from %q, not %q", s, name, s.Pkg.Path, w.currPkg.Path) } diff --git a/src/cmd/compile/internal/typecheck/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go index 15c57b2380159..c9effabce003e 100644 --- a/src/cmd/compile/internal/typecheck/iimport.go +++ b/src/cmd/compile/internal/typecheck/iimport.go @@ -262,6 +262,9 @@ type importReader struct { prevBase *src.PosBase prevLine int64 prevColumn int64 + + // curfn is the current function we're importing into. + curfn *ir.Func } func (p *iimporter) newReader(off uint64, pkg *types.Pkg) *importReader { @@ -715,19 +718,7 @@ func (r *importReader) doInline(fn *ir.Func) { base.Fatalf("%v already has inline body", fn) } - StartFuncBody(fn) - body := r.stmtList() - FinishFuncBody() - if body == nil { - // - // Make sure empty body is not interpreted as - // no inlineable body (see also parser.fnbody) - // (not doing so can cause significant performance - // degradation due to unnecessary calls to empty - // functions). - body = []ir.Node{} - } - fn.Inl.Body = body + r.funcBody(fn) importlist = append(importlist, fn) @@ -755,6 +746,68 @@ func (r *importReader) doInline(fn *ir.Func) { // unrefined nodes (since this is what the importer uses). The respective case // entries are unreachable in the importer. +func (r *importReader) funcBody(fn *ir.Func) { + outerfn := r.curfn + r.curfn = fn + + // Import local declarations. + dcls := make([]*ir.Name, r.int64()) + for i := range dcls { + n := ir.NewDeclNameAt(r.pos(), ir.ONAME, r.localIdent()) + n.Class = ir.PAUTO // overwritten below for parameters/results + n.Curfn = fn + n.SetType(r.typ()) + dcls[i] = n + } + fn.Inl.Dcl = dcls + + // Fixup parameter classes and associate with their + // signature's type fields. + i := 0 + fix := func(f *types.Field, class ir.Class) { + if class == ir.PPARAM && (f.Sym == nil || f.Sym.Name == "_") { + return + } + n := dcls[i] + n.Class = class + f.Nname = n + i++ + } + + typ := fn.Type() + if recv := typ.Recv(); recv != nil { + fix(recv, ir.PPARAM) + } + for _, f := range typ.Params().FieldSlice() { + fix(f, ir.PPARAM) + } + for _, f := range typ.Results().FieldSlice() { + fix(f, ir.PPARAMOUT) + } + + // Import function body. + body := r.stmtList() + if body == nil { + // Make sure empty body is not interpreted as + // no inlineable body (see also parser.fnbody) + // (not doing so can cause significant performance + // degradation due to unnecessary calls to empty + // functions). + body = []ir.Node{} + } + fn.Inl.Body = body + + r.curfn = outerfn +} + +func (r *importReader) localName() *ir.Name { + i := r.int64() + if i < 0 { + return ir.BlankNode.(*ir.Name) + } + return r.curfn.Inl.Dcl[i] +} + func (r *importReader) stmtList() []ir.Node { var list []ir.Node for { @@ -784,13 +837,8 @@ func (r *importReader) caseList(switchExpr ir.Node) []*ir.CaseClause { cas := ir.NewCaseStmt(r.pos(), nil, nil) cas.List = r.stmtList() if namedTypeSwitch { - // Note: per-case variables will have distinct, dotted - // names after import. That's okay: swt.go only needs - // Sym for diagnostics anyway. - caseVar := ir.NewNameAt(cas.Pos(), r.localIdent()) - Declare(caseVar, DeclContext) - cas.Var = caseVar - caseVar.Defn = switchExpr + cas.Var = r.localName() + cas.Var.Defn = switchExpr } cas.Body = r.stmtList() cases[i] = cas @@ -854,7 +902,7 @@ func (r *importReader) node() ir.Node { return r.qualifiedIdent() case ir.ONAME: - return r.localIdent().Def.(*ir.Name) + return r.localName() // case OPACK, ONONAME: // unreachable - should have been resolved by typechecking @@ -991,16 +1039,11 @@ func (r *importReader) node() ir.Node { // -------------------------------------------------------------------- // statements case ir.ODCL: - pos := r.pos() - lhs := ir.NewDeclNameAt(pos, ir.ONAME, r.localIdent()) - lhs.SetType(r.typ()) - - Declare(lhs, ir.PAUTO) - var stmts ir.Nodes - stmts.Append(ir.NewDecl(base.Pos, ir.ODCL, lhs)) - stmts.Append(ir.NewAssignStmt(base.Pos, lhs, nil)) - return ir.NewBlockStmt(pos, stmts) + n := r.localName() + stmts.Append(ir.NewDecl(n.Pos(), ir.ODCL, n)) + stmts.Append(ir.NewAssignStmt(n.Pos(), n, nil)) + return ir.NewBlockStmt(n.Pos(), stmts) // case OAS, OASWB: // unreachable - mapped to OAS case below by exporter diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index 3160725e3c56a..431fb04bef4eb 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -1687,7 +1687,7 @@ func typecheckdeftype(n *ir.Name) { } t := types.NewNamed(n) - t.Vargen = n.Vargen + t.Vargen = n.Typegen if n.Pragma()&ir.NotInHeap != 0 { t.SetNotInHeap(true) } diff --git a/test/fixedbugs/issue43633.dir/a.go b/test/fixedbugs/issue43633.dir/a.go new file mode 100644 index 0000000000000..946a37e87ef1a --- /dev/null +++ b/test/fixedbugs/issue43633.dir/a.go @@ -0,0 +1,28 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package a + +func F() bool { + { + x := false + _ = x + } + if false { + _ = func(x bool) {} + } + x := true + return x +} + +func G() func() bool { + x := true + return func() bool { + { + x := false + _ = x + } + return x + } +} diff --git a/test/fixedbugs/issue43633.dir/main.go b/test/fixedbugs/issue43633.dir/main.go new file mode 100644 index 0000000000000..320e00013c313 --- /dev/null +++ b/test/fixedbugs/issue43633.dir/main.go @@ -0,0 +1,18 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "./a" + +var g = a.G() + +func main() { + if !a.F() { + panic("FAIL") + } + if !g() { + panic("FAIL") + } +} diff --git a/test/fixedbugs/issue43633.go b/test/fixedbugs/issue43633.go new file mode 100644 index 0000000000000..40df49f83bef0 --- /dev/null +++ b/test/fixedbugs/issue43633.go @@ -0,0 +1,7 @@ +// rundir + +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ignored diff --git a/test/fixedbugs/issue7921.go b/test/fixedbugs/issue7921.go index 5dce557ca33cc..a4e7b246d4911 100644 --- a/test/fixedbugs/issue7921.go +++ b/test/fixedbugs/issue7921.go @@ -41,7 +41,7 @@ func bufferNoEscape3(xs []string) string { // ERROR "xs does not escape$" func bufferNoEscape4() []byte { var b bytes.Buffer - b.Grow(64) // ERROR "bufferNoEscape4 ignoring self-assignment in bytes.b.buf = bytes.b.buf\[:bytes.m·3\]$" "inlining call to bytes.\(\*Buffer\).Grow$" + b.Grow(64) // ERROR "bufferNoEscape4 ignoring self-assignment in bytes.b.buf = bytes.b.buf\[:bytes.m\]$" "inlining call to bytes.\(\*Buffer\).Grow$" useBuffer(&b) return b.Bytes() // ERROR "inlining call to bytes.\(\*Buffer\).Bytes$" } From 95acd8121bf76a15ecba0259367dca0efe6d3a77 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 11 Jan 2021 17:22:20 -0800 Subject: [PATCH 365/474] [dev.regabi] cmd/compile: remove Name.Typegen Just directly set Type.Vargen when declaring defined types within a function. Change-Id: Idcc0007084a660ce1c39da4a3697e158a1c615b5 Reviewed-on: https://go-review.googlesource.com/c/go/+/283212 Trust: Matthew Dempsky Trust: Dan Scales Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Dan Scales --- src/cmd/compile/internal/ir/name.go | 4 ---- src/cmd/compile/internal/ir/sizeof_test.go | 2 +- src/cmd/compile/internal/typecheck/dcl.go | 8 -------- src/cmd/compile/internal/typecheck/typecheck.go | 11 ++++++++++- 4 files changed, 11 insertions(+), 14 deletions(-) diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 30f7e9b9e0e6a..514b303893f5f 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -55,10 +55,6 @@ type Name struct { // The function, method, or closure in which local variable or param is declared. Curfn *Func - // Unique number for OTYPE names within a function. - // TODO(mdempsky): Remove completely. - Typegen int32 - Ntype Ntype Heapaddr *Name // temp holding heap address of param diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go index 1a4d2e5c7a6d0..2ada7231aae3c 100644 --- a/src/cmd/compile/internal/ir/sizeof_test.go +++ b/src/cmd/compile/internal/ir/sizeof_test.go @@ -21,7 +21,7 @@ func TestSizeof(t *testing.T) { _64bit uintptr // size on 64bit platforms }{ {Func{}, 184, 320}, - {Name{}, 120, 216}, + {Name{}, 116, 208}, } for _, tt := range tests { diff --git a/src/cmd/compile/internal/typecheck/dcl.go b/src/cmd/compile/internal/typecheck/dcl.go index caa3e8203a332..c7d7506fd1da0 100644 --- a/src/cmd/compile/internal/typecheck/dcl.go +++ b/src/cmd/compile/internal/typecheck/dcl.go @@ -62,10 +62,6 @@ func Declare(n *ir.Name, ctxt ir.Class) { if ir.CurFunc != nil && ctxt != ir.PFUNC && n.Op() == ir.ONAME { ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n) } - if n.Op() == ir.OTYPE { - declare_typegen++ - n.Typegen = int32(declare_typegen) - } types.Pushdcl(s) n.Curfn = ir.CurFunc } @@ -308,10 +304,6 @@ func checkembeddedtype(t *types.Type) { } } -// declare individual names - var, typ, const - -var declare_typegen int - func fakeRecvField() *types.Field { return types.NewField(src.NoXPos, nil, types.FakeRecvType()) } diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index 431fb04bef4eb..3fc077b00cb18 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -1681,13 +1681,22 @@ func CheckMapKeys() { mapqueue = nil } +// typegen tracks the number of function-scoped defined types that +// have been declared. It's used to generate unique linker symbols for +// their runtime type descriptors. +var typegen int32 + func typecheckdeftype(n *ir.Name) { if base.EnableTrace && base.Flag.LowerT { defer tracePrint("typecheckdeftype", n)(nil) } t := types.NewNamed(n) - t.Vargen = n.Typegen + if n.Curfn != nil { + typegen++ + t.Vargen = typegen + } + if n.Pragma()&ir.NotInHeap != 0 { t.SetNotInHeap(true) } From cd5b74d2dfe6009d55c86e90f6c204e58c229c16 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 12 Jan 2021 11:34:00 -0800 Subject: [PATCH 366/474] [dev.regabi] cmd/compile: call NeedFuncSym in InitLSym InitLSym is where we're now generating ABI wrappers, so it seems as good a place as any to make sure we're generating the degenerate closure wrappers for declared functions and methods. Change-Id: I097f34bbcee65dee87a97f9ed6f3f38e4cf2e2b5 Reviewed-on: https://go-review.googlesource.com/c/go/+/283312 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Trust: Matthew Dempsky Reviewed-by: Keith Randall --- src/cmd/compile/internal/gc/main.go | 2 -- src/cmd/compile/internal/ssagen/abi.go | 5 ++++- src/cmd/compile/internal/staticdata/data.go | 13 ++++++++----- src/cmd/compile/internal/typecheck/func.go | 4 ---- src/cmd/compile/internal/typecheck/typecheck.go | 7 ------- 5 files changed, 12 insertions(+), 19 deletions(-) diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index c3756309ea55b..1541bc4285f21 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -20,7 +20,6 @@ import ( "cmd/compile/internal/reflectdata" "cmd/compile/internal/ssa" "cmd/compile/internal/ssagen" - "cmd/compile/internal/staticdata" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/compile/internal/walk" @@ -194,7 +193,6 @@ func Main(archInit func(*ssagen.ArchInfo)) { typecheck.Target = new(ir.Package) - typecheck.NeedFuncSym = staticdata.NeedFuncSym typecheck.NeedITab = func(t, iface *types.Type) { reflectdata.ITabAddr(t, iface) } typecheck.NeedRuntimeType = reflectdata.NeedRuntimeType // TODO(rsc): typenamesym for lock? diff --git a/src/cmd/compile/internal/ssagen/abi.go b/src/cmd/compile/internal/ssagen/abi.go index 1c013dd2d8757..dc27ec3a29f47 100644 --- a/src/cmd/compile/internal/ssagen/abi.go +++ b/src/cmd/compile/internal/ssagen/abi.go @@ -14,6 +14,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/escape" "cmd/compile/internal/ir" + "cmd/compile/internal/staticdata" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/obj" @@ -137,6 +138,8 @@ func ReadSymABIs(file, myimportpath string) { // For body-less functions, we only create the LSym; for functions // with bodies call a helper to setup up / populate the LSym. func InitLSym(f *ir.Func, hasBody bool) { + staticdata.NeedFuncSym(f.Sym()) + // FIXME: for new-style ABI wrappers, we set up the lsym at the // point the wrapper is created. if f.LSym != nil && base.Flag.ABIWrap { @@ -152,7 +155,7 @@ func InitLSym(f *ir.Func, hasBody bool) { // makes calls to helpers to create ABI wrappers if needed. func selectLSym(f *ir.Func, hasBody bool) { if f.LSym != nil { - base.Fatalf("Func.initLSym called twice") + base.FatalfAt(f.Pos(), "Func.initLSym called twice on %v", f) } if nam := f.Nname; !ir.IsBlank(nam) { diff --git a/src/cmd/compile/internal/staticdata/data.go b/src/cmd/compile/internal/staticdata/data.go index a2a844f94025e..4b12590fde4e7 100644 --- a/src/cmd/compile/internal/staticdata/data.go +++ b/src/cmd/compile/internal/staticdata/data.go @@ -265,7 +265,7 @@ func FuncLinksym(n *ir.Name) *obj.LSym { return FuncSym(n.Sym()).Linksym() } -// NeedFuncSym ensures that s·f is exported. +// NeedFuncSym ensures that s·f is exported, if needed. // It is only used with -dynlink. // When not compiling for dynamic linking, // the funcsyms are created as needed by @@ -275,8 +275,13 @@ func FuncLinksym(n *ir.Name) *obj.LSym { // So instead, when dynamic linking, we only create // the s·f stubs in s's package. func NeedFuncSym(s *types.Sym) { + if base.Ctxt.InParallel { + // The append below probably just needs to lock + // funcsymsmu, like in FuncSym. + base.Fatalf("NeedFuncSym must be called in serial") + } if !base.Ctxt.Flag_dynlink { - base.Fatalf("NeedFuncSym: dynlink") + return } if s.IsBlank() { return @@ -287,9 +292,7 @@ func NeedFuncSym(s *types.Sym) { // get funcsyms. return } - if _, existed := s.Pkg.LookupOK(ir.FuncSymName(s)); !existed { - funcsyms = append(funcsyms, s) - } + funcsyms = append(funcsyms, s) } func WriteFuncSyms() { diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go index 12762f7ee8f55..8f7411daecee7 100644 --- a/src/cmd/compile/internal/typecheck/func.go +++ b/src/cmd/compile/internal/typecheck/func.go @@ -364,10 +364,6 @@ func tcFunc(n *ir.Func) { n.Nname.SetSym(ir.MethodSym(rcvr.Type, n.Shortname)) Declare(n.Nname, ir.PFUNC) } - - if base.Ctxt.Flag_dynlink && !inimport && n.Nname != nil { - NeedFuncSym(n.Sym()) - } } // tcCall typechecks an OCALL node. diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index 3fc077b00cb18..814af59772b12 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -24,7 +24,6 @@ var inimport bool // set during import var TypecheckAllowed bool var ( - NeedFuncSym = func(*types.Sym) {} NeedITab = func(t, itype *types.Type) {} NeedRuntimeType = func(*types.Type) {} ) @@ -1140,12 +1139,6 @@ func typecheckMethodExpr(n *ir.SelectorExpr) (res ir.Node) { n.SetOp(ir.OMETHEXPR) n.Selection = m n.SetType(NewMethodType(m.Type, n.X.Type())) - - // Issue 25065. Make sure that we emit the symbol for a local method. - if base.Ctxt.Flag_dynlink && !inimport && (t.Sym() == nil || t.Sym().Pkg == types.LocalPkg) { - NeedFuncSym(n.FuncName().Sym()) - } - return n } From cc90e7a51e15659ea1a1eb53ca08361b6a77696a Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 12 Jan 2021 11:38:32 -0800 Subject: [PATCH 367/474] [dev.regabi] cmd/compile: always use the compile queue The compiler currently has two modes for compilation: one where it compiles each function as it sees them, and another where it enqueues them all into a work queue. A subsequent CL is going to reorder function compilation to ensure that functions are always compiled before any non-trivial function literals they enclose, and this will be easier if we always use the compile work queue. Also, fewer compilation modes makes things simpler to reason about. Change-Id: Ie090e81f7476c49486296f2b90911fa0a466a5dd Reviewed-on: https://go-review.googlesource.com/c/go/+/283313 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Trust: Matthew Dempsky Reviewed-by: Keith Randall --- src/cmd/compile/internal/base/debug.go | 1 - src/cmd/compile/internal/gc/compile.go | 87 ++++++---------------- src/cmd/compile/internal/gc/main.go | 5 +- src/cmd/compile/internal/gc/obj.go | 5 +- src/cmd/compile/internal/liveness/plive.go | 1 + test/fixedbugs/issue20250.go | 2 +- 6 files changed, 28 insertions(+), 73 deletions(-) diff --git a/src/cmd/compile/internal/base/debug.go b/src/cmd/compile/internal/base/debug.go index 3acdcea8463c0..164941bb26c43 100644 --- a/src/cmd/compile/internal/base/debug.go +++ b/src/cmd/compile/internal/base/debug.go @@ -32,7 +32,6 @@ type DebugFlags struct { Append int `help:"print information about append compilation"` Checkptr int `help:"instrument unsafe pointer conversions"` Closure int `help:"print information about closure compilation"` - CompileLater int `help:"compile functions as late as possible"` DclStack int `help:"run internal dclstack check"` Defer int `help:"print information about defer compilation"` DisableNil int `help:"disable nil checks"` diff --git a/src/cmd/compile/internal/gc/compile.go b/src/cmd/compile/internal/gc/compile.go index 25b1c76737111..b9c10056b4e57 100644 --- a/src/cmd/compile/internal/gc/compile.go +++ b/src/cmd/compile/internal/gc/compile.go @@ -26,21 +26,17 @@ var ( compilequeue []*ir.Func // functions waiting to be compiled ) -func funccompile(fn *ir.Func) { +func enqueueFunc(fn *ir.Func) { if ir.CurFunc != nil { - base.Fatalf("funccompile %v inside %v", fn.Sym(), ir.CurFunc.Sym()) + base.FatalfAt(fn.Pos(), "enqueueFunc %v inside %v", fn, ir.CurFunc) } - if fn.Type() == nil { - if base.Errors() == 0 { - base.Fatalf("funccompile missing type") - } + if ir.FuncName(fn) == "_" { + // Skip compiling blank functions. + // Frontend already reported any spec-mandated errors (#29870). return } - // assign parameter offsets - types.CalcSize(fn.Type()) - if len(fn.Body) == 0 { // Initialize ABI wrappers if necessary. ssagen.InitLSym(fn, false) @@ -48,35 +44,31 @@ func funccompile(fn *ir.Func) { return } - typecheck.DeclContext = ir.PAUTO - ir.CurFunc = fn - compile(fn) - ir.CurFunc = nil - typecheck.DeclContext = ir.PEXTERN + errorsBefore := base.Errors() + prepareFunc(fn) + if base.Errors() > errorsBefore { + return + } + + compilequeue = append(compilequeue, fn) } -func compile(fn *ir.Func) { +// prepareFunc handles any remaining frontend compilation tasks that +// aren't yet safe to perform concurrently. +func prepareFunc(fn *ir.Func) { // Set up the function's LSym early to avoid data races with the assemblers. // Do this before walk, as walk needs the LSym to set attributes/relocations // (e.g. in markTypeUsedInInterface). ssagen.InitLSym(fn, true) - errorsBefore := base.Errors() - walk.Walk(fn) - if base.Errors() > errorsBefore { - return - } - - // From this point, there should be no uses of Curfn. Enforce that. - ir.CurFunc = nil + // Calculate parameter offsets. + types.CalcSize(fn.Type()) - if ir.FuncName(fn) == "_" { - // We don't need to generate code for this function, just report errors in its body. - // At this point we've generated any errors needed. - // (Beyond here we generate only non-spec errors, like "stack frame too large".) - // See issue 29870. - return - } + typecheck.DeclContext = ir.PAUTO + ir.CurFunc = fn + walk.Walk(fn) + ir.CurFunc = nil // enforce no further uses of CurFunc + typecheck.DeclContext = ir.PEXTERN // Make sure type syms are declared for all types that might // be types of stack objects. We need to do this here @@ -95,28 +87,6 @@ func compile(fn *ir.Func) { } } } - - if compilenow(fn) { - ssagen.Compile(fn, 0) - } else { - compilequeue = append(compilequeue, fn) - } -} - -// compilenow reports whether to compile immediately. -// If functions are not compiled immediately, -// they are enqueued in compilequeue, -// which is drained by compileFunctions. -func compilenow(fn *ir.Func) bool { - // Issue 38068: if this function is a method AND an inline - // candidate AND was not inlined (yet), put it onto the compile - // queue instead of compiling it immediately. This is in case we - // wind up inlining it into a method wrapper that is generated by - // compiling a function later on in the Target.Decls list. - if ir.IsMethod(fn) && isInlinableButNotInlined(fn) { - return false - } - return base.Flag.LowerC == 1 && base.Debug.CompileLater == 0 } // compileFunctions compiles all functions in compilequeue. @@ -163,16 +133,3 @@ func compileFunctions() { types.CalcSizeDisabled = false } } - -// isInlinableButNotInlined returns true if 'fn' was marked as an -// inline candidate but then never inlined (presumably because we -// found no call sites). -func isInlinableButNotInlined(fn *ir.Func) bool { - if fn.Inl == nil { - return false - } - if fn.Sym() == nil { - return true - } - return !fn.Linksym().WasInlined() -} diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 1541bc4285f21..2903d64ff8993 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -300,9 +300,8 @@ func Main(archInit func(*ssagen.ArchInfo)) { base.Timer.Start("be", "compilefuncs") fcount := int64(0) for i := 0; i < len(typecheck.Target.Decls); i++ { - n := typecheck.Target.Decls[i] - if n.Op() == ir.ODCLFUNC { - funccompile(n.(*ir.Func)) + if fn, ok := typecheck.Target.Decls[i].(*ir.Func); ok { + enqueueFunc(fn) fcount++ } } diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index fbb2145e1b10b..753db80f764b1 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -131,9 +131,8 @@ func dumpdata() { // It was not until issue 24761 that we found any code that required a loop at all. for { for i := numDecls; i < len(typecheck.Target.Decls); i++ { - n := typecheck.Target.Decls[i] - if n.Op() == ir.ODCLFUNC { - funccompile(n.(*ir.Func)) + if n, ok := typecheck.Target.Decls[i].(*ir.Func); ok { + enqueueFunc(n) } } numDecls = len(typecheck.Target.Decls) diff --git a/src/cmd/compile/internal/liveness/plive.go b/src/cmd/compile/internal/liveness/plive.go index 26d90824b2897..8d1754c81380b 100644 --- a/src/cmd/compile/internal/liveness/plive.go +++ b/src/cmd/compile/internal/liveness/plive.go @@ -1223,6 +1223,7 @@ func WriteFuncMap(fn *ir.Func) { if ir.FuncName(fn) == "_" || fn.Sym().Linkname != "" { return } + types.CalcSize(fn.Type()) lsym := base.Ctxt.Lookup(fn.LSym.Name + ".args_stackmap") nptr := int(fn.Type().ArgWidth() / int64(types.PtrSize)) bv := bitvec.New(int32(nptr) * 2) diff --git a/test/fixedbugs/issue20250.go b/test/fixedbugs/issue20250.go index c190515274af2..1a513bea56df1 100644 --- a/test/fixedbugs/issue20250.go +++ b/test/fixedbugs/issue20250.go @@ -1,4 +1,4 @@ -// errorcheck -0 -live -l -d=compilelater +// errorcheck -0 -live -l // Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style From 432f9ffb11231b00b67c8fa8047f21a8282fa914 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 12 Jan 2021 11:39:10 -0800 Subject: [PATCH 368/474] [dev.regabi] cmd/compile: unindent compileFunctions No real code changes. Just splitting into a separate CL so the next one is easier to review. Change-Id: I428dc986b76370d8d3afc12cf19585f6384389d7 Reviewed-on: https://go-review.googlesource.com/c/go/+/283314 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Trust: Matthew Dempsky Reviewed-by: Keith Randall --- src/cmd/compile/internal/gc/compile.go | 76 +++++++++++++------------- 1 file changed, 39 insertions(+), 37 deletions(-) diff --git a/src/cmd/compile/internal/gc/compile.go b/src/cmd/compile/internal/gc/compile.go index b9c10056b4e57..c2894ab012d78 100644 --- a/src/cmd/compile/internal/gc/compile.go +++ b/src/cmd/compile/internal/gc/compile.go @@ -93,43 +93,45 @@ func prepareFunc(fn *ir.Func) { // It fans out nBackendWorkers to do the work // and waits for them to complete. func compileFunctions() { - if len(compilequeue) != 0 { - types.CalcSizeDisabled = true // not safe to calculate sizes concurrently - if race.Enabled { - // Randomize compilation order to try to shake out races. - tmp := make([]*ir.Func, len(compilequeue)) - perm := rand.Perm(len(compilequeue)) - for i, v := range perm { - tmp[v] = compilequeue[i] - } - copy(compilequeue, tmp) - } else { - // Compile the longest functions first, - // since they're most likely to be the slowest. - // This helps avoid stragglers. - sort.Slice(compilequeue, func(i, j int) bool { - return len(compilequeue[i].Body) > len(compilequeue[j].Body) - }) - } - var wg sync.WaitGroup - base.Ctxt.InParallel = true - c := make(chan *ir.Func, base.Flag.LowerC) - for i := 0; i < base.Flag.LowerC; i++ { - wg.Add(1) - go func(worker int) { - for fn := range c { - ssagen.Compile(fn, worker) - } - wg.Done() - }(i) - } - for _, fn := range compilequeue { - c <- fn + if len(compilequeue) == 0 { + return + } + + types.CalcSizeDisabled = true // not safe to calculate sizes concurrently + if race.Enabled { + // Randomize compilation order to try to shake out races. + tmp := make([]*ir.Func, len(compilequeue)) + perm := rand.Perm(len(compilequeue)) + for i, v := range perm { + tmp[v] = compilequeue[i] } - close(c) - compilequeue = nil - wg.Wait() - base.Ctxt.InParallel = false - types.CalcSizeDisabled = false + copy(compilequeue, tmp) + } else { + // Compile the longest functions first, + // since they're most likely to be the slowest. + // This helps avoid stragglers. + sort.Slice(compilequeue, func(i, j int) bool { + return len(compilequeue[i].Body) > len(compilequeue[j].Body) + }) + } + var wg sync.WaitGroup + base.Ctxt.InParallel = true + c := make(chan *ir.Func, base.Flag.LowerC) + for i := 0; i < base.Flag.LowerC; i++ { + wg.Add(1) + go func(worker int) { + for fn := range c { + ssagen.Compile(fn, worker) + } + wg.Done() + }(i) + } + for _, fn := range compilequeue { + c <- fn } + close(c) + compilequeue = nil + wg.Wait() + base.Ctxt.InParallel = false + types.CalcSizeDisabled = false } From d6ad88b4db454813e1bdf09635cd853fe3b7ef13 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 12 Jan 2021 12:00:58 -0800 Subject: [PATCH 369/474] [dev.regabi] cmd/compile: compile functions before closures This CL reorders function compilation to ensure that functions are always compiled before any enclosed function literals. The primary goal of this is to reduce the risk of race conditions that arise due to compilation of function literals needing to inspect data from their closure variables. However, a pleasant side effect is that it allows skipping the redundant, separate compilation of function literals that were inlined into their enclosing function. Change-Id: I03ee96212988cb578c2452162b7e99cc5e92918f Reviewed-on: https://go-review.googlesource.com/c/go/+/282892 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Keith Randall Trust: Matthew Dempsky --- src/cmd/compile/internal/gc/compile.go | 51 +++++++++++++++++----- src/cmd/compile/internal/ir/func.go | 4 ++ src/cmd/compile/internal/ir/sizeof_test.go | 2 +- src/cmd/compile/internal/walk/closure.go | 2 + src/cmd/compile/internal/walk/expr.go | 7 ++- 5 files changed, 52 insertions(+), 14 deletions(-) diff --git a/src/cmd/compile/internal/gc/compile.go b/src/cmd/compile/internal/gc/compile.go index c2894ab012d78..410b3e90ea551 100644 --- a/src/cmd/compile/internal/gc/compile.go +++ b/src/cmd/compile/internal/gc/compile.go @@ -37,6 +37,10 @@ func enqueueFunc(fn *ir.Func) { return } + if clo := fn.OClosure; clo != nil && !ir.IsTrivialClosure(clo) { + return // we'll get this as part of its enclosing function + } + if len(fn.Body) == 0 { // Initialize ABI wrappers if necessary. ssagen.InitLSym(fn, false) @@ -45,11 +49,22 @@ func enqueueFunc(fn *ir.Func) { } errorsBefore := base.Errors() - prepareFunc(fn) + + todo := []*ir.Func{fn} + for len(todo) > 0 { + next := todo[len(todo)-1] + todo = todo[:len(todo)-1] + + prepareFunc(next) + todo = append(todo, next.Closures...) + } + if base.Errors() > errorsBefore { return } + // Enqueue just fn itself. compileFunctions will handle + // scheduling compilation of its closures after it's done. compilequeue = append(compilequeue, fn) } @@ -97,7 +112,6 @@ func compileFunctions() { return } - types.CalcSizeDisabled = true // not safe to calculate sizes concurrently if race.Enabled { // Randomize compilation order to try to shake out races. tmp := make([]*ir.Func, len(compilequeue)) @@ -114,22 +128,37 @@ func compileFunctions() { return len(compilequeue[i].Body) > len(compilequeue[j].Body) }) } - var wg sync.WaitGroup - base.Ctxt.InParallel = true - c := make(chan *ir.Func, base.Flag.LowerC) + + // We queue up a goroutine per function that needs to be + // compiled, but require them to grab an available worker ID + // before doing any substantial work to limit parallelism. + workerIDs := make(chan int, base.Flag.LowerC) for i := 0; i < base.Flag.LowerC; i++ { + workerIDs <- i + } + + var wg sync.WaitGroup + var asyncCompile func(*ir.Func) + asyncCompile = func(fn *ir.Func) { wg.Add(1) - go func(worker int) { - for fn := range c { - ssagen.Compile(fn, worker) + go func() { + worker := <-workerIDs + ssagen.Compile(fn, worker) + workerIDs <- worker + + // Done compiling fn. Schedule it's closures for compilation. + for _, closure := range fn.Closures { + asyncCompile(closure) } wg.Done() - }(i) + }() } + + types.CalcSizeDisabled = true // not safe to calculate sizes concurrently + base.Ctxt.InParallel = true for _, fn := range compilequeue { - c <- fn + asyncCompile(fn) } - close(c) compilequeue = nil wg.Wait() base.Ctxt.InParallel = false diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index d660fe3b40603..3fe23635f4285 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -81,6 +81,10 @@ type Func struct { // Byval set if they're captured by value. ClosureVars []*Name + // Enclosed functions that need to be compiled. + // Populated during walk. + Closures []*Func + // Parents records the parent scope of each scope within a // function. The root scope (0) has no parent, so the i'th // scope's parent is stored at Parents[i-1]. diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go index 2ada7231aae3c..f95f77d6a2f09 100644 --- a/src/cmd/compile/internal/ir/sizeof_test.go +++ b/src/cmd/compile/internal/ir/sizeof_test.go @@ -20,7 +20,7 @@ func TestSizeof(t *testing.T) { _32bit uintptr // size on 32bit platforms _64bit uintptr // size on 64bit platforms }{ - {Func{}, 184, 320}, + {Func{}, 196, 344}, {Name{}, 116, 208}, } diff --git a/src/cmd/compile/internal/walk/closure.go b/src/cmd/compile/internal/walk/closure.go index acb74b9901538..7fa63ea9c7b86 100644 --- a/src/cmd/compile/internal/walk/closure.go +++ b/src/cmd/compile/internal/walk/closure.go @@ -86,6 +86,8 @@ func walkClosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node { } return fn.Nname } + + ir.CurFunc.Closures = append(ir.CurFunc.Closures, fn) ir.ClosureDebugRuntimeCheck(clo) typ := typecheck.ClosureType(clo) diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index df575d698589b..508cdd1d0640a 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -488,12 +488,15 @@ func walkCall(n *ir.CallExpr, init *ir.Nodes) ir.Node { reflectdata.MarkUsedIfaceMethod(n) } - if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.OCLOSURE { + if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.OCLOSURE && !ir.IsTrivialClosure(n.X.(*ir.ClosureExpr)) { // Transform direct call of a closure to call of a normal function. // transformclosure already did all preparation work. + // We leave trivial closures for walkClosure to handle. - // Prepend captured variables to argument list. clo := n.X.(*ir.ClosureExpr) + ir.CurFunc.Closures = append(ir.CurFunc.Closures, clo.Func) + + // Prepend captured variables to argument list. n.Args.Prepend(closureArgs(clo)...) // Replace OCLOSURE with ONAME/PFUNC. From 41352fd401f4f22eceeca375361e018ea787f0fd Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 12 Jan 2021 12:12:27 -0800 Subject: [PATCH 370/474] [dev.regabi] cmd/compile: transform closures during walk We used to transform directly called closures in a separate pass before walk, because we couldn't guarantee whether we'd see the closure call or the closure itself first. As of the last CL, this ordering is always guaranteed, so we can rewrite calls and the closure at the same time. Change-Id: Ia6f4d504c24795e41500108589b53395d301123b Reviewed-on: https://go-review.googlesource.com/c/go/+/283315 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Trust: Matthew Dempsky Reviewed-by: Keith Randall --- src/cmd/compile/internal/gc/main.go | 15 ---- src/cmd/compile/internal/walk/closure.go | 99 ++++++++++++++---------- src/cmd/compile/internal/walk/expr.go | 23 +----- 3 files changed, 61 insertions(+), 76 deletions(-) diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 2903d64ff8993..9ecdd510b18d9 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -22,7 +22,6 @@ import ( "cmd/compile/internal/ssagen" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" - "cmd/compile/internal/walk" "cmd/internal/dwarf" "cmd/internal/obj" "cmd/internal/objabi" @@ -269,20 +268,6 @@ func Main(archInit func(*ssagen.ArchInfo)) { ssagen.EnableNoWriteBarrierRecCheck() } - // Transform closure bodies to properly reference captured variables. - // This needs to happen before walk, because closures must be transformed - // before walk reaches a call of a closure. - base.Timer.Start("fe", "xclosures") - for _, n := range typecheck.Target.Decls { - if n.Op() == ir.ODCLFUNC { - n := n.(*ir.Func) - if n.OClosure != nil { - ir.CurFunc = n - walk.Closure(n) - } - } - } - // Prepare for SSA compilation. // This must be before peekitabs, because peekitabs // can trigger function compilation. diff --git a/src/cmd/compile/internal/walk/closure.go b/src/cmd/compile/internal/walk/closure.go index 7fa63ea9c7b86..e9b3698080674 100644 --- a/src/cmd/compile/internal/walk/closure.go +++ b/src/cmd/compile/internal/walk/closure.go @@ -12,50 +12,43 @@ import ( "cmd/internal/src" ) -// Closure is called in a separate phase after escape analysis. -// It transform closure bodies to properly reference captured variables. -func Closure(fn *ir.Func) { - if len(fn.ClosureVars) == 0 { - return - } +// directClosureCall rewrites a direct call of a function literal into +// a normal function call with closure variables passed as arguments. +// This avoids allocation of a closure object. +// +// For illustration, the following call: +// +// func(a int) { +// println(byval) +// byref++ +// }(42) +// +// becomes: +// +// func(byval int, &byref *int, a int) { +// println(byval) +// (*&byref)++ +// }(byval, &byref, 42) +func directClosureCall(n *ir.CallExpr) { + clo := n.X.(*ir.ClosureExpr) + clofn := clo.Func - if !fn.ClosureCalled() { - // The closure is not directly called, so it is going to stay as closure. - fn.SetNeedctxt(true) - return + if ir.IsTrivialClosure(clo) { + return // leave for walkClosure to handle } - lno := base.Pos - base.Pos = fn.Pos() - - // If the closure is directly called, we transform it to a plain function call - // with variables passed as args. This avoids allocation of a closure object. - // Here we do only a part of the transformation. Walk of OCALLFUNC(OCLOSURE) - // will complete the transformation later. - // For illustration, the following closure: - // func(a int) { - // println(byval) - // byref++ - // }(42) - // becomes: - // func(byval int, &byref *int, a int) { - // println(byval) - // (*&byref)++ - // }(byval, &byref, 42) - - // f is ONAME of the actual function. - f := fn.Nname - // We are going to insert captured variables before input args. var params []*types.Field var decls []*ir.Name - for _, v := range fn.ClosureVars { + for _, v := range clofn.ClosureVars { if !v.Byval() { // If v of type T is captured by reference, // we introduce function param &v *T // and v remains PAUTOHEAP with &v heapaddr // (accesses will implicitly deref &v). - addr := typecheck.NewName(typecheck.Lookup("&" + v.Sym().Name)) + + addr := ir.NewNameAt(clofn.Pos(), typecheck.Lookup("&"+v.Sym().Name)) + addr.Curfn = clofn addr.SetType(types.NewPtr(v.Type())) v.Heapaddr = addr v = addr @@ -69,32 +62,58 @@ func Closure(fn *ir.Func) { params = append(params, fld) } + // f is ONAME of the actual function. + f := clofn.Nname + // Prepend params and decls. - f.Type().Params().SetFields(append(params, f.Type().Params().FieldSlice()...)) - fn.Dcl = append(decls, fn.Dcl...) + typ := f.Type() + typ.Params().SetFields(append(params, typ.Params().FieldSlice()...)) + clofn.Dcl = append(decls, clofn.Dcl...) + + // Rewrite call. + n.X = f + n.Args.Prepend(closureArgs(clo)...) + + // Update the call expression's type. We need to do this + // because typecheck gave it the result type of the OCLOSURE + // node, but we only rewrote the ONAME node's type. Logically, + // they're the same, but the stack offsets probably changed. + // + // TODO(mdempsky): Reuse a single type for both. + if typ.NumResults() == 1 { + n.SetType(typ.Results().Field(0).Type) + } else { + n.SetType(typ.Results()) + } - base.Pos = lno + // Add to Closures for enqueueFunc. It's no longer a proper + // closure, but we may have already skipped over it in the + // functions list as a non-trivial closure, so this just + // ensures it's compiled. + ir.CurFunc.Closures = append(ir.CurFunc.Closures, clofn) } func walkClosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node { - fn := clo.Func + clofn := clo.Func // If no closure vars, don't bother wrapping. if ir.IsTrivialClosure(clo) { if base.Debug.Closure > 0 { base.WarnfAt(clo.Pos(), "closure converted to global") } - return fn.Nname + return clofn.Nname } - ir.CurFunc.Closures = append(ir.CurFunc.Closures, fn) + // The closure is not trivial or directly called, so it's going to stay a closure. ir.ClosureDebugRuntimeCheck(clo) + clofn.SetNeedctxt(true) + ir.CurFunc.Closures = append(ir.CurFunc.Closures, clofn) typ := typecheck.ClosureType(clo) clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ), nil) clos.SetEsc(clo.Esc()) - clos.List = append([]ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, fn.Nname)}, closureArgs(clo)...) + clos.List = append([]ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, clofn.Nname)}, closureArgs(clo)...) addr := typecheck.NodAddr(clos) addr.SetEsc(clo.Esc()) diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index 508cdd1d0640a..893a95f403198 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -488,27 +488,8 @@ func walkCall(n *ir.CallExpr, init *ir.Nodes) ir.Node { reflectdata.MarkUsedIfaceMethod(n) } - if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.OCLOSURE && !ir.IsTrivialClosure(n.X.(*ir.ClosureExpr)) { - // Transform direct call of a closure to call of a normal function. - // transformclosure already did all preparation work. - // We leave trivial closures for walkClosure to handle. - - clo := n.X.(*ir.ClosureExpr) - ir.CurFunc.Closures = append(ir.CurFunc.Closures, clo.Func) - - // Prepend captured variables to argument list. - n.Args.Prepend(closureArgs(clo)...) - - // Replace OCLOSURE with ONAME/PFUNC. - n.X = clo.Func.Nname - - // Update type of OCALLFUNC node. - // Output arguments had not changed, but their offsets could. - if n.X.Type().NumResults() == 1 { - n.SetType(n.X.Type().Results().Field(0).Type) - } else { - n.SetType(n.X.Type().Results()) - } + if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.OCLOSURE { + directClosureCall(n) } walkCall1(n, init) From d9acf6f3a3758c3096ee5ef5a24c2bc5df9d9c8b Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 12 Jan 2021 12:25:33 -0800 Subject: [PATCH 371/474] [dev.regabi] cmd/compile: remove Func.ClosureType The closure's type always matches the corresponding function's type, so just use one instance rather than carrying around two. Simplifies construction of closures, rewriting them during walk, and shrinks memory usage. Passes toolstash -cmp. Change-Id: I83b8b8f435b02ab25a30fb7aa15d5ec7ad97189d Reviewed-on: https://go-review.googlesource.com/c/go/+/283152 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Trust: Matthew Dempsky Reviewed-by: Keith Randall --- src/cmd/compile/internal/ir/func.go | 2 -- src/cmd/compile/internal/ir/sizeof_test.go | 2 +- src/cmd/compile/internal/noder/noder.go | 2 -- src/cmd/compile/internal/typecheck/func.go | 4 ++-- src/cmd/compile/internal/walk/closure.go | 10 +++++----- 5 files changed, 8 insertions(+), 12 deletions(-) diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index 3fe23635f4285..30cddd298ef52 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -71,8 +71,6 @@ type Func struct { // Anonymous and blank PPARAMOUTs are declared as ~rNN and ~bNN Names, respectively. Dcl []*Name - ClosureType Ntype // closure representation type - // ClosureVars lists the free variables that are used within a // function literal, but formally declared in an enclosing // function. The variables in this slice are the closure function's diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go index f95f77d6a2f09..553dc5376098b 100644 --- a/src/cmd/compile/internal/ir/sizeof_test.go +++ b/src/cmd/compile/internal/ir/sizeof_test.go @@ -20,7 +20,7 @@ func TestSizeof(t *testing.T) { _32bit uintptr // size on 32bit platforms _64bit uintptr // size on 64bit platforms }{ - {Func{}, 196, 344}, + {Func{}, 188, 328}, {Name{}, 116, 208}, } diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go index ec0debdbbd6e9..edd30a1fc1200 100644 --- a/src/cmd/compile/internal/noder/noder.go +++ b/src/cmd/compile/internal/noder/noder.go @@ -1856,7 +1856,6 @@ func fakeRecv() *ir.Field { func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node { xtype := p.typeExpr(expr.Type) - ntype := p.typeExpr(expr.Type) fn := ir.NewFunc(p.pos(expr)) fn.SetIsHiddenClosure(ir.CurFunc != nil) @@ -1867,7 +1866,6 @@ func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node { fn.Nname.Defn = fn clo := ir.NewClosureExpr(p.pos(expr), fn) - fn.ClosureType = ntype fn.OClosure = clo p.funcBody(fn, expr.Body) diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go index 8f7411daecee7..03a10f594ab24 100644 --- a/src/cmd/compile/internal/typecheck/func.go +++ b/src/cmd/compile/internal/typecheck/func.go @@ -293,20 +293,20 @@ func tcClosure(clo *ir.ClosureExpr, top int) { fn.Iota = x } - fn.ClosureType = typecheckNtype(fn.ClosureType) - clo.SetType(fn.ClosureType.Type()) fn.SetClosureCalled(top&ctxCallee != 0) // Do not typecheck fn twice, otherwise, we will end up pushing // fn to Target.Decls multiple times, causing initLSym called twice. // See #30709 if fn.Typecheck() == 1 { + clo.SetType(fn.Type()) return } fn.Nname.SetSym(closurename(ir.CurFunc)) ir.MarkFunc(fn.Nname) Func(fn) + clo.SetType(fn.Type()) // Type check the body now, but only if we're inside a function. // At top level (in a variable initialization: curfn==nil) we're not diff --git a/src/cmd/compile/internal/walk/closure.go b/src/cmd/compile/internal/walk/closure.go index e9b3698080674..694aa999407f2 100644 --- a/src/cmd/compile/internal/walk/closure.go +++ b/src/cmd/compile/internal/walk/closure.go @@ -64,10 +64,12 @@ func directClosureCall(n *ir.CallExpr) { // f is ONAME of the actual function. f := clofn.Nname - - // Prepend params and decls. typ := f.Type() - typ.Params().SetFields(append(params, typ.Params().FieldSlice()...)) + + // Create new function type with parameters prepended, and + // then update type and declarations. + typ = types.NewSignature(typ.Pkg(), nil, append(params, typ.Params().FieldSlice()...), typ.Results().FieldSlice()) + f.SetType(typ) clofn.Dcl = append(decls, clofn.Dcl...) // Rewrite call. @@ -78,8 +80,6 @@ func directClosureCall(n *ir.CallExpr) { // because typecheck gave it the result type of the OCLOSURE // node, but we only rewrote the ONAME node's type. Logically, // they're the same, but the stack offsets probably changed. - // - // TODO(mdempsky): Reuse a single type for both. if typ.NumResults() == 1 { n.SetType(typ.Results().Field(0).Type) } else { From 9a19481acb93114948503d935e10f6985ff15843 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 30 Dec 2020 12:05:57 -0500 Subject: [PATCH 372/474] [dev.regabi] cmd/compile: make ordering for InvertFlags more stable Current many architectures use a rule along the lines of // Canonicalize the order of arguments to comparisons - helps with CSE. ((CMP|CMPW) x y) && x.ID > y.ID => (InvertFlags ((CMP|CMPW) y x)) to normalize comparisons as much as possible for CSE. Replace the ID comparison with something less variable across compiler changes. This helps avoid spurious failures in some of the codegen-comparison tests (though the current choice of comparison is sensitive to Op ordering). Two tests changed to accommodate modified instruction choice. Change-Id: Ib35f450bd2bae9d4f9f7838ceaf7ec682bcf1e1a Reviewed-on: https://go-review.googlesource.com/c/go/+/280155 Trust: David Chase Run-TryBot: David Chase TryBot-Result: Go Bot Reviewed-by: Cherry Zhang --- src/cmd/compile/internal/ssa/gen/386.rules | 2 +- src/cmd/compile/internal/ssa/gen/AMD64.rules | 2 +- src/cmd/compile/internal/ssa/gen/ARM.rules | 2 +- src/cmd/compile/internal/ssa/gen/ARM64.rules | 2 +- src/cmd/compile/internal/ssa/gen/PPC64.rules | 2 +- src/cmd/compile/internal/ssa/gen/S390X.rules | 2 +- src/cmd/compile/internal/ssa/rewrite.go | 12 ++++++++++++ src/cmd/compile/internal/ssa/rewrite386.go | 12 ++++++------ src/cmd/compile/internal/ssa/rewriteAMD64.go | 16 ++++++++-------- src/cmd/compile/internal/ssa/rewriteARM.go | 4 ++-- src/cmd/compile/internal/ssa/rewriteARM64.go | 8 ++++---- src/cmd/compile/internal/ssa/rewritePPC64.go | 16 ++++++++-------- src/cmd/compile/internal/ssa/rewriteS390X.go | 16 ++++++++-------- test/codegen/condmove.go | 6 +++--- test/codegen/spectre.go | 4 ++-- 15 files changed, 59 insertions(+), 47 deletions(-) diff --git a/src/cmd/compile/internal/ssa/gen/386.rules b/src/cmd/compile/internal/ssa/gen/386.rules index fbc12fd67219a..df03cb71a6e3c 100644 --- a/src/cmd/compile/internal/ssa/gen/386.rules +++ b/src/cmd/compile/internal/ssa/gen/386.rules @@ -475,7 +475,7 @@ (CMPB (MOVLconst [c]) x) => (InvertFlags (CMPBconst x [int8(c)])) // Canonicalize the order of arguments to comparisons - helps with CSE. -(CMP(L|W|B) x y) && x.ID > y.ID => (InvertFlags (CMP(L|W|B) y x)) +(CMP(L|W|B) x y) && canonLessThan(x,y) => (InvertFlags (CMP(L|W|B) y x)) // strength reduction // Assumes that the following costs from https://gmplib.org/~tege/x86-timing.pdf: diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index a866a967b9283..7d46266411fce 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -916,7 +916,7 @@ (CMPB (MOVLconst [c]) x) => (InvertFlags (CMPBconst x [int8(c)])) // Canonicalize the order of arguments to comparisons - helps with CSE. -(CMP(Q|L|W|B) x y) && x.ID > y.ID => (InvertFlags (CMP(Q|L|W|B) y x)) +(CMP(Q|L|W|B) x y) && canonLessThan(x,y) => (InvertFlags (CMP(Q|L|W|B) y x)) // Using MOVZX instead of AND is cheaper. (AND(Q|L)const [ 0xFF] x) => (MOVBQZX x) diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules index 11c36b5da3567..de0df363e4964 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM.rules +++ b/src/cmd/compile/internal/ssa/gen/ARM.rules @@ -507,7 +507,7 @@ (TEQ x (MOVWconst [c])) => (TEQconst [c] x) // Canonicalize the order of arguments to comparisons - helps with CSE. -(CMP x y) && x.ID > y.ID => (InvertFlags (CMP y x)) +(CMP x y) && canonLessThan(x,y) => (InvertFlags (CMP y x)) // don't extend after proper load // MOVWreg instruction is not emitted if src and dst registers are same, but it ensures the type. diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules index 3f4d0c1c52769..a0e2a0d5e2722 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM64.rules +++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules @@ -1151,7 +1151,7 @@ (CMPW (MOVDconst [c]) x) => (InvertFlags (CMPWconst [int32(c)] x)) // Canonicalize the order of arguments to comparisons - helps with CSE. -((CMP|CMPW) x y) && x.ID > y.ID => (InvertFlags ((CMP|CMPW) y x)) +((CMP|CMPW) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW) y x)) // mul-neg => mneg (NEG (MUL x y)) => (MNEG x y) diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules index c06404617297a..a762be65d4259 100644 --- a/src/cmd/compile/internal/ssa/gen/PPC64.rules +++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules @@ -1088,7 +1088,7 @@ (CMPWU (MOVDconst [c]) y) && isU16Bit(c) => (InvertFlags (CMPWUconst y [int32(c)])) // Canonicalize the order of arguments to comparisons - helps with CSE. -((CMP|CMPW|CMPU|CMPWU) x y) && x.ID > y.ID => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x)) +((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x)) // ISEL auxInt values 0=LT 1=GT 2=EQ arg2 ? arg0 : arg1 // ISEL auxInt values 4=GE 5=LE 6=NE arg2 ? arg1 : arg0 diff --git a/src/cmd/compile/internal/ssa/gen/S390X.rules b/src/cmd/compile/internal/ssa/gen/S390X.rules index 384f2e807e056..c3421da0a242c 100644 --- a/src/cmd/compile/internal/ssa/gen/S390X.rules +++ b/src/cmd/compile/internal/ssa/gen/S390X.rules @@ -785,7 +785,7 @@ => (RISBGZ x {s390x.NewRotateParams(r.Start, r.Start, -r.Start&63)}) // Canonicalize the order of arguments to comparisons - helps with CSE. -((CMP|CMPW|CMPU|CMPWU) x y) && x.ID > y.ID => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x)) +((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x)) // Use sign/zero extend instead of RISBGZ. (RISBGZ x {r}) && r == s390x.NewRotateParams(56, 63, 0) => (MOVBZreg x) diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index 9abfe0938bd07..e0a20668e21ff 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -521,6 +521,18 @@ func shiftIsBounded(v *Value) bool { return v.AuxInt != 0 } +// canonLessThan returns whether x is "ordered" less than y, for purposes of normalizing +// generated code as much as possible. +func canonLessThan(x, y *Value) bool { + if x.Op != y.Op { + return x.Op < y.Op + } + if !x.Pos.SameFileAndLine(y.Pos) { + return x.Pos.Before(y.Pos) + } + return x.ID < y.ID +} + // truncate64Fto32F converts a float64 value to a float32 preserving the bit pattern // of the mantissa. It will panic if the truncation results in lost information. func truncate64Fto32F(f float64) float32 { diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go index 2acdccd5684aa..4e7fdb9e639ce 100644 --- a/src/cmd/compile/internal/ssa/rewrite386.go +++ b/src/cmd/compile/internal/ssa/rewrite386.go @@ -1785,12 +1785,12 @@ func rewriteValue386_Op386CMPB(v *Value) bool { return true } // match: (CMPB x y) - // cond: x.ID > y.ID + // cond: canonLessThan(x,y) // result: (InvertFlags (CMPB y x)) for { x := v_0 y := v_1 - if !(x.ID > y.ID) { + if !(canonLessThan(x, y)) { break } v.reset(Op386InvertFlags) @@ -2078,12 +2078,12 @@ func rewriteValue386_Op386CMPL(v *Value) bool { return true } // match: (CMPL x y) - // cond: x.ID > y.ID + // cond: canonLessThan(x,y) // result: (InvertFlags (CMPL y x)) for { x := v_0 y := v_1 - if !(x.ID > y.ID) { + if !(canonLessThan(x, y)) { break } v.reset(Op386InvertFlags) @@ -2386,12 +2386,12 @@ func rewriteValue386_Op386CMPW(v *Value) bool { return true } // match: (CMPW x y) - // cond: x.ID > y.ID + // cond: canonLessThan(x,y) // result: (InvertFlags (CMPW y x)) for { x := v_0 y := v_1 - if !(x.ID > y.ID) { + if !(canonLessThan(x, y)) { break } v.reset(Op386InvertFlags) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 75d4ff7357a76..db2dc7a004049 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -6749,12 +6749,12 @@ func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool { return true } // match: (CMPB x y) - // cond: x.ID > y.ID + // cond: canonLessThan(x,y) // result: (InvertFlags (CMPB y x)) for { x := v_0 y := v_1 - if !(x.ID > y.ID) { + if !(canonLessThan(x, y)) { break } v.reset(OpAMD64InvertFlags) @@ -7135,12 +7135,12 @@ func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool { return true } // match: (CMPL x y) - // cond: x.ID > y.ID + // cond: canonLessThan(x,y) // result: (InvertFlags (CMPL y x)) for { x := v_0 y := v_1 - if !(x.ID > y.ID) { + if !(canonLessThan(x, y)) { break } v.reset(OpAMD64InvertFlags) @@ -7544,12 +7544,12 @@ func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool { return true } // match: (CMPQ x y) - // cond: x.ID > y.ID + // cond: canonLessThan(x,y) // result: (InvertFlags (CMPQ y x)) for { x := v_0 y := v_1 - if !(x.ID > y.ID) { + if !(canonLessThan(x, y)) { break } v.reset(OpAMD64InvertFlags) @@ -8106,12 +8106,12 @@ func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool { return true } // match: (CMPW x y) - // cond: x.ID > y.ID + // cond: canonLessThan(x,y) // result: (InvertFlags (CMPW y x)) for { x := v_0 y := v_1 - if !(x.ID > y.ID) { + if !(canonLessThan(x, y)) { break } v.reset(OpAMD64InvertFlags) diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go index d9d439fa63ee1..c958aae2c4abd 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM.go +++ b/src/cmd/compile/internal/ssa/rewriteARM.go @@ -3728,12 +3728,12 @@ func rewriteValueARM_OpARMCMP(v *Value) bool { return true } // match: (CMP x y) - // cond: x.ID > y.ID + // cond: canonLessThan(x,y) // result: (InvertFlags (CMP y x)) for { x := v_0 y := v_1 - if !(x.ID > y.ID) { + if !(canonLessThan(x, y)) { break } v.reset(OpARMInvertFlags) diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go index 5d5e526add31f..ff1156d9011ea 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM64.go +++ b/src/cmd/compile/internal/ssa/rewriteARM64.go @@ -2772,12 +2772,12 @@ func rewriteValueARM64_OpARM64CMP(v *Value) bool { return true } // match: (CMP x y) - // cond: x.ID > y.ID + // cond: canonLessThan(x,y) // result: (InvertFlags (CMP y x)) for { x := v_0 y := v_1 - if !(x.ID > y.ID) { + if !(canonLessThan(x, y)) { break } v.reset(OpARM64InvertFlags) @@ -2941,12 +2941,12 @@ func rewriteValueARM64_OpARM64CMPW(v *Value) bool { return true } // match: (CMPW x y) - // cond: x.ID > y.ID + // cond: canonLessThan(x,y) // result: (InvertFlags (CMPW y x)) for { x := v_0 y := v_1 - if !(x.ID > y.ID) { + if !(canonLessThan(x, y)) { break } v.reset(OpARM64InvertFlags) diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index 455f9b138859b..98f748e5fa41d 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -4777,12 +4777,12 @@ func rewriteValuePPC64_OpPPC64CMP(v *Value) bool { return true } // match: (CMP x y) - // cond: x.ID > y.ID + // cond: canonLessThan(x,y) // result: (InvertFlags (CMP y x)) for { x := v_0 y := v_1 - if !(x.ID > y.ID) { + if !(canonLessThan(x, y)) { break } v.reset(OpPPC64InvertFlags) @@ -4834,12 +4834,12 @@ func rewriteValuePPC64_OpPPC64CMPU(v *Value) bool { return true } // match: (CMPU x y) - // cond: x.ID > y.ID + // cond: canonLessThan(x,y) // result: (InvertFlags (CMPU y x)) for { x := v_0 y := v_1 - if !(x.ID > y.ID) { + if !(canonLessThan(x, y)) { break } v.reset(OpPPC64InvertFlags) @@ -4964,12 +4964,12 @@ func rewriteValuePPC64_OpPPC64CMPW(v *Value) bool { return true } // match: (CMPW x y) - // cond: x.ID > y.ID + // cond: canonLessThan(x,y) // result: (InvertFlags (CMPW y x)) for { x := v_0 y := v_1 - if !(x.ID > y.ID) { + if !(canonLessThan(x, y)) { break } v.reset(OpPPC64InvertFlags) @@ -5045,12 +5045,12 @@ func rewriteValuePPC64_OpPPC64CMPWU(v *Value) bool { return true } // match: (CMPWU x y) - // cond: x.ID > y.ID + // cond: canonLessThan(x,y) // result: (InvertFlags (CMPWU y x)) for { x := v_0 y := v_1 - if !(x.ID > y.ID) { + if !(canonLessThan(x, y)) { break } v.reset(OpPPC64InvertFlags) diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go index a9722b820c90d..b52a1b6745e08 100644 --- a/src/cmd/compile/internal/ssa/rewriteS390X.go +++ b/src/cmd/compile/internal/ssa/rewriteS390X.go @@ -6332,12 +6332,12 @@ func rewriteValueS390X_OpS390XCMP(v *Value) bool { return true } // match: (CMP x y) - // cond: x.ID > y.ID + // cond: canonLessThan(x,y) // result: (InvertFlags (CMP y x)) for { x := v_0 y := v_1 - if !(x.ID > y.ID) { + if !(canonLessThan(x, y)) { break } v.reset(OpS390XInvertFlags) @@ -6389,12 +6389,12 @@ func rewriteValueS390X_OpS390XCMPU(v *Value) bool { return true } // match: (CMPU x y) - // cond: x.ID > y.ID + // cond: canonLessThan(x,y) // result: (InvertFlags (CMPU y x)) for { x := v_0 y := v_1 - if !(x.ID > y.ID) { + if !(canonLessThan(x, y)) { break } v.reset(OpS390XInvertFlags) @@ -6624,12 +6624,12 @@ func rewriteValueS390X_OpS390XCMPW(v *Value) bool { return true } // match: (CMPW x y) - // cond: x.ID > y.ID + // cond: canonLessThan(x,y) // result: (InvertFlags (CMPW y x)) for { x := v_0 y := v_1 - if !(x.ID > y.ID) { + if !(canonLessThan(x, y)) { break } v.reset(OpS390XInvertFlags) @@ -6721,12 +6721,12 @@ func rewriteValueS390X_OpS390XCMPWU(v *Value) bool { return true } // match: (CMPWU x y) - // cond: x.ID > y.ID + // cond: canonLessThan(x,y) // result: (InvertFlags (CMPWU y x)) for { x := v_0 y := v_1 - if !(x.ID > y.ID) { + if !(canonLessThan(x, y)) { break } v.reset(OpS390XInvertFlags) diff --git a/test/codegen/condmove.go b/test/codegen/condmove.go index f86da3459a325..7579dd1890ed2 100644 --- a/test/codegen/condmove.go +++ b/test/codegen/condmove.go @@ -31,7 +31,7 @@ func cmovuintptr(x, y uintptr) uintptr { if x < y { x = -y } - // amd64:"CMOVQCS" + // amd64:"CMOVQ(HI|CS)" // arm64:"CSEL\t(LO|HI)" // wasm:"Select" return x @@ -41,7 +41,7 @@ func cmov32bit(x, y uint32) uint32 { if x < y { x = -y } - // amd64:"CMOVLCS" + // amd64:"CMOVL(HI|CS)" // arm64:"CSEL\t(LO|HI)" // wasm:"Select" return x @@ -51,7 +51,7 @@ func cmov16bit(x, y uint16) uint16 { if x < y { x = -y } - // amd64:"CMOVWCS" + // amd64:"CMOVW(HI|CS)" // arm64:"CSEL\t(LO|HI)" // wasm:"Select" return x diff --git a/test/codegen/spectre.go b/test/codegen/spectre.go index 3753498d09f76..d845da35ced92 100644 --- a/test/codegen/spectre.go +++ b/test/codegen/spectre.go @@ -13,12 +13,12 @@ func IndexArray(x *[10]int, i int) int { } func IndexString(x string, i int) byte { - // amd64:`CMOVQCC` + // amd64:`CMOVQLS` return x[i] } func IndexSlice(x []float64, i int) float64 { - // amd64:`CMOVQCC` + // amd64:`CMOVQLS` return x[i] } From 2abd24f3b78f8f605840e5a0dd3b4f76734f6c13 Mon Sep 17 00:00:00 2001 From: David Chase Date: Mon, 4 Jan 2021 14:05:17 -0500 Subject: [PATCH 373/474] [dev.regabi] test: make run.go error messages slightly more informative This is intended to make it easier to write/change a test without referring to the source code to figure out what the error messages actually mean, or how to correct them. Change-Id: Ie79ff7cd9f2d1fa605257fe97eace68adc8a6716 Reviewed-on: https://go-review.googlesource.com/c/go/+/281452 Trust: David Chase Run-TryBot: David Chase TryBot-Result: Go Bot Reviewed-by: Jeremy Faller --- test/run.go | 44 ++++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/test/run.go b/test/run.go index db3e9f6c2fc18..1c516f4946f38 100644 --- a/test/run.go +++ b/test/run.go @@ -489,7 +489,7 @@ func (t *test) run() { // Execution recipe stops at first blank line. pos := strings.Index(t.src, "\n\n") if pos == -1 { - t.err = errors.New("double newline not found") + t.err = fmt.Errorf("double newline ending execution recipe not found in %s", t.goFileName()) return } action := t.src[:pos] @@ -860,9 +860,7 @@ func (t *test) run() { t.err = err return } - if strings.Replace(string(out), "\r\n", "\n", -1) != t.expectedOutput() { - t.err = fmt.Errorf("incorrect output\n%s", out) - } + t.checkExpectedOutput(out) } } @@ -902,9 +900,7 @@ func (t *test) run() { t.err = err return } - if strings.Replace(string(out), "\r\n", "\n", -1) != t.expectedOutput() { - t.err = fmt.Errorf("incorrect output\n%s", out) - } + t.checkExpectedOutput(out) case "build": // Build Go file. @@ -989,9 +985,7 @@ func (t *test) run() { t.err = err break } - if strings.Replace(string(out), "\r\n", "\n", -1) != t.expectedOutput() { - t.err = fmt.Errorf("incorrect output\n%s", out) - } + t.checkExpectedOutput(out) } case "buildrun": @@ -1017,9 +1011,7 @@ func (t *test) run() { return } - if strings.Replace(string(out), "\r\n", "\n", -1) != t.expectedOutput() { - t.err = fmt.Errorf("incorrect output\n%s", out) - } + t.checkExpectedOutput(out) case "run": // Run Go file if no special go command flags are provided; @@ -1062,9 +1054,7 @@ func (t *test) run() { t.err = err return } - if strings.Replace(string(out), "\r\n", "\n", -1) != t.expectedOutput() { - t.err = fmt.Errorf("incorrect output\n%s", out) - } + t.checkExpectedOutput(out) case "runoutput": // Run Go file and write its output into temporary Go file. @@ -1099,9 +1089,7 @@ func (t *test) run() { t.err = err return } - if string(out) != t.expectedOutput() { - t.err = fmt.Errorf("incorrect output\n%s", out) - } + t.checkExpectedOutput(out) case "errorcheckoutput": // Run Go file and write its output into temporary Go file. @@ -1175,12 +1163,24 @@ func (t *test) makeTempDir() { } } -func (t *test) expectedOutput() string { +// checkExpectedOutput compares the output from compiling and/or running with the contents +// of the corresponding reference output file, if any (replace ".go" with ".out"). +// If they don't match, fail with an informative message. +func (t *test) checkExpectedOutput(gotBytes []byte) { + got := string(gotBytes) filename := filepath.Join(t.dir, t.gofile) filename = filename[:len(filename)-len(".go")] filename += ".out" - b, _ := ioutil.ReadFile(filename) - return string(b) + b, err := ioutil.ReadFile(filename) + // File is allowed to be missing (err != nil) in which case output should be empty. + got = strings.Replace(got, "\r\n", "\n", -1) + if got != string(b) { + if err == nil { + t.err = fmt.Errorf("output does not match expected in %s. Instead saw\n%s", filename, got) + } else { + t.err = fmt.Errorf("output should be empty when (optional) expected-output file %s is not present. Instead saw\n%s", filename, got) + } + } } func splitOutput(out string, wantAuto bool) []string { From c1370e918fd88a13f77a133f8e431197cd3a1fc6 Mon Sep 17 00:00:00 2001 From: David Chase Date: Mon, 28 Sep 2020 17:42:30 -0400 Subject: [PATCH 374/474] [dev.regabi] cmd/compile: add code to support register ABI spills around morestack calls This is a selected copy from the register ABI experiment CL, focused on the files and data structures that handle spilling around morestack. Unnecessary code from the experiment was removed, other code was adapted. Would it make sense to leave comments in the experiment as pieces are brought over? Experiment CL (for comparison purposes) https://go-review.googlesource.com/c/go/+/28832 Change-Id: I92136f070351d4fcca1407b52ecf9b80898fed95 Reviewed-on: https://go-review.googlesource.com/c/go/+/279520 Trust: David Chase Run-TryBot: David Chase TryBot-Result: Go Bot Reviewed-by: Jeremy Faller --- src/cmd/compile/internal/ssa/func.go | 3 ++ src/cmd/compile/internal/ssa/location.go | 26 ++++++++++++++ src/cmd/internal/obj/link.go | 44 ++++++++++++++++++++++-- src/cmd/internal/obj/x86/obj6.go | 10 +++--- 4 files changed, 76 insertions(+), 7 deletions(-) diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go index e6c4798a78882..f753b4407bd9d 100644 --- a/src/cmd/compile/internal/ssa/func.go +++ b/src/cmd/compile/internal/ssa/func.go @@ -58,6 +58,9 @@ type Func struct { // of keys to make iteration order deterministic. Names []LocalSlot + // RegArgs is a slice of register-memory pairs that must be spilled and unspilled in the uncommon path of function entry. + RegArgs []ArgPair + // WBLoads is a list of Blocks that branch on the write // barrier flag. Safe-points are disabled from the OpLoad that // reads the write-barrier flag until the control flow rejoins diff --git a/src/cmd/compile/internal/ssa/location.go b/src/cmd/compile/internal/ssa/location.go index 69f90d9ab4107..4cd0ac8d777b3 100644 --- a/src/cmd/compile/internal/ssa/location.go +++ b/src/cmd/compile/internal/ssa/location.go @@ -87,3 +87,29 @@ func (t LocPair) String() string { } return fmt.Sprintf("<%s,%s>", n0, n1) } + +type ArgPair struct { + reg *Register + mem LocalSlot +} + +func (ap *ArgPair) Reg() int16 { + return ap.reg.objNum +} + +func (ap *ArgPair) Type() *types.Type { + return ap.mem.Type +} + +func (ap *ArgPair) Mem() *LocalSlot { + return &ap.mem +} + +func (t ArgPair) String() string { + n0 := "nil" + if t.reg != nil { + n0 = t.reg.String() + } + n1 := t.mem.String() + return fmt.Sprintf("<%s,%s>", n0, n1) +} diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go index 977c5c3303321..7ba8c6d317d1d 100644 --- a/src/cmd/internal/obj/link.go +++ b/src/cmd/internal/obj/link.go @@ -766,6 +766,17 @@ type Auto struct { Gotype *LSym } +// RegArg provides spill/fill information for a register-resident argument +// to a function. These need spilling/filling in the safepoint/stackgrowth case. +// At the time of fill/spill, the offset must be adjusted by the architecture-dependent +// adjustment to hardware SP that occurs in a call instruction. E.g., for AMD64, +// at Offset+8 because the return address was pushed. +type RegArg struct { + Addr Addr + Reg int16 + Spill, Unspill As +} + // Link holds the context for writing object code from a compiler // to be linker input or for reading that input into the linker. type Link struct { @@ -796,10 +807,11 @@ type Link struct { DebugInfo func(fn *LSym, info *LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) // if non-nil, curfn is a *gc.Node GenAbstractFunc func(fn *LSym) Errors int + RegArgs []RegArg - InParallel bool // parallel backend phase in effect - UseBASEntries bool // use Base Address Selection Entries in location lists and PC ranges - IsAsm bool // is the source assembly language, which may contain surprising idioms (e.g., call tables) + InParallel bool // parallel backend phase in effect + UseBASEntries bool // use Base Address Selection Entries in location lists and PC ranges + IsAsm bool // is the source assembly language, which may contain surprising idioms (e.g., call tables) // state for writing objects Text []*LSym @@ -844,6 +856,32 @@ func (ctxt *Link) Logf(format string, args ...interface{}) { ctxt.Bso.Flush() } +func (ctxt *Link) SpillRegisterArgs(last *Prog, pa ProgAlloc) *Prog { + // Spill register args. + for _, ra := range ctxt.RegArgs { + spill := Appendp(last, pa) + spill.As = ra.Spill + spill.From.Type = TYPE_REG + spill.From.Reg = ra.Reg + spill.To = ra.Addr + last = spill + } + return last +} + +func (ctxt *Link) UnspillRegisterArgs(last *Prog, pa ProgAlloc) *Prog { + // Unspill any spilled register args + for _, ra := range ctxt.RegArgs { + unspill := Appendp(last, pa) + unspill.As = ra.Unspill + unspill.From = ra.Addr + unspill.To.Type = TYPE_REG + unspill.To.Reg = ra.Reg + last = unspill + } + return last +} + // The smallest possible offset from the hardware stack pointer to a local // variable on the stack. Architectures that use a link register save its value // on the stack in the function prologue and so always have a pointer between diff --git a/src/cmd/internal/obj/x86/obj6.go b/src/cmd/internal/obj/x86/obj6.go index 839aeb8fe3dfd..1674db626fbb1 100644 --- a/src/cmd/internal/obj/x86/obj6.go +++ b/src/cmd/internal/obj/x86/obj6.go @@ -1114,7 +1114,8 @@ func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgA spfix.Spadj = -framesize pcdata := ctxt.EmitEntryStackMap(cursym, spfix, newprog) - pcdata = ctxt.StartUnsafePoint(pcdata, newprog) + spill := ctxt.StartUnsafePoint(pcdata, newprog) + pcdata = ctxt.SpillRegisterArgs(spill, newprog) call := obj.Appendp(pcdata, newprog) call.Pos = cursym.Func().Text.Pos @@ -1139,7 +1140,8 @@ func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgA progedit(ctxt, callend.Link, newprog) } - pcdata = ctxt.EndUnsafePoint(callend, newprog, -1) + pcdata = ctxt.UnspillRegisterArgs(callend, newprog) + pcdata = ctxt.EndUnsafePoint(pcdata, newprog, -1) jmp := obj.Appendp(pcdata, newprog) jmp.As = obj.AJMP @@ -1147,9 +1149,9 @@ func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgA jmp.To.SetTarget(cursym.Func().Text.Link) jmp.Spadj = +framesize - jls.To.SetTarget(call) + jls.To.SetTarget(spill) if q1 != nil { - q1.To.SetTarget(call) + q1.To.SetTarget(spill) } return end From 861707a8c84f0b1ddbcaea0e9f439398ee2175fb Mon Sep 17 00:00:00 2001 From: David Chase Date: Mon, 4 Jan 2021 13:32:10 -0500 Subject: [PATCH 375/474] [dev.regabi] cmd/compile: added limited //go:registerparams pragma for new ABI dev This only works for functions; if you try it with a method, it will fail. It does work for both local package and imports. For now, it tells you when it thinks it sees either a declaration or a call of such a function (this will normally be silent since no existing code uses this pragma). Note: it appears to be really darn hard to figure out if this pragma was set for a method, and the method's call site. Better ir.Node wranglers than I might be able to make headway, but it seemed unnecessary for this experiment. Change-Id: I601c2ddd124457bf6d62f714d7ac871705743c0a Reviewed-on: https://go-review.googlesource.com/c/go/+/279521 Trust: David Chase Run-TryBot: David Chase TryBot-Result: Go Bot Reviewed-by: Jeremy Faller --- src/cmd/compile/internal/ir/node.go | 3 ++ src/cmd/compile/internal/noder/lex.go | 3 ++ src/cmd/compile/internal/ssagen/ssa.go | 15 ++++++++ src/cmd/compile/internal/typecheck/iexport.go | 3 ++ src/cmd/compile/internal/typecheck/iimport.go | 3 ++ test/abi/regabipragma.dir/main.go | 36 +++++++++++++++++++ test/abi/regabipragma.dir/tmp/foo.go | 19 ++++++++++ test/abi/regabipragma.go | 9 +++++ test/abi/regabipragma.out | 6 ++++ test/run.go | 2 +- 10 files changed, 98 insertions(+), 1 deletion(-) create mode 100644 test/abi/regabipragma.dir/main.go create mode 100644 test/abi/regabipragma.dir/tmp/foo.go create mode 100644 test/abi/regabipragma.go create mode 100644 test/abi/regabipragma.out diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index a2b6e7203b35d..a1b09b38ccd54 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -452,6 +452,9 @@ const ( // Go command pragmas GoBuildPragma + + RegisterParams // TODO remove after register abi is working + ) func AsNode(n types.Object) Node { diff --git a/src/cmd/compile/internal/noder/lex.go b/src/cmd/compile/internal/noder/lex.go index 1095f3344a3f9..cdca9e55f336c 100644 --- a/src/cmd/compile/internal/noder/lex.go +++ b/src/cmd/compile/internal/noder/lex.go @@ -28,6 +28,7 @@ const ( ir.Nosplit | ir.Noinline | ir.NoCheckPtr | + ir.RegisterParams | // TODO remove after register abi is working ir.CgoUnsafeArgs | ir.UintptrEscapes | ir.Systemstack | @@ -79,6 +80,8 @@ func pragmaFlag(verb string) ir.PragmaFlag { // in the argument list. // Used in syscall/dll_windows.go. return ir.UintptrEscapes + case "go:registerparams": // TODO remove after register abi is working + return ir.RegisterParams case "go:notinheap": return ir.NotInHeap } diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 54bde20f1cdc9..3b542cf92a379 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -356,6 +356,13 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func { if fn.Pragma&ir.Nosplit != 0 { s.f.NoSplit = true } + if fn.Pragma&ir.RegisterParams != 0 { // TODO remove after register abi is working + if strings.Contains(name, ".") { + base.ErrorfAt(fn.Pos(), "Calls to //go:registerparams method %s won't work, remove the pragma from the declaration.", name) + } + s.f.Warnl(fn.Pos(), "Declared function %s has register params", name) + } + s.panics = map[funcLine]*ssa.Block{} s.softFloat = s.config.SoftFloat @@ -4685,6 +4692,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val } testLateExpansion := false + inRegisters := false switch n.Op() { case ir.OCALLFUNC: @@ -4692,6 +4700,13 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val if k == callNormal && fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC { fn := fn.(*ir.Name) sym = fn.Sym() + // TODO remove after register abi is working + inRegistersImported := fn.Pragma()&ir.RegisterParams != 0 + inRegistersSamePackage := fn.Func != nil && fn.Func.Pragma&ir.RegisterParams != 0 + inRegisters = inRegistersImported || inRegistersSamePackage + if inRegisters { + s.f.Warnl(n.Pos(), "Called function %s has register params", sym.Linksym().Name) + } break } closure = s.expr(fn) diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go index 4d48b80346675..1ba8771139880 100644 --- a/src/cmd/compile/internal/typecheck/iexport.go +++ b/src/cmd/compile/internal/typecheck/iexport.go @@ -976,6 +976,9 @@ func (w *exportWriter) funcExt(n *ir.Name) { w.linkname(n.Sym()) w.symIdx(n.Sym()) + // TODO remove after register abi is working. + w.uint64(uint64(n.Func.Pragma)) + // Escape analysis. for _, fs := range &types.RecvsParams { for _, f := range fs(n.Type()).FieldSlice() { diff --git a/src/cmd/compile/internal/typecheck/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go index c9effabce003e..396d09263a428 100644 --- a/src/cmd/compile/internal/typecheck/iimport.go +++ b/src/cmd/compile/internal/typecheck/iimport.go @@ -647,6 +647,9 @@ func (r *importReader) funcExt(n *ir.Name) { r.linkname(n.Sym()) r.symIdx(n.Sym()) + // TODO remove after register abi is working + n.SetPragma(ir.PragmaFlag(r.uint64())) + // Escape analysis. for _, fs := range &types.RecvsParams { for _, f := range fs(n.Type()).FieldSlice() { diff --git a/test/abi/regabipragma.dir/main.go b/test/abi/regabipragma.dir/main.go new file mode 100644 index 0000000000000..d663337a10315 --- /dev/null +++ b/test/abi/regabipragma.dir/main.go @@ -0,0 +1,36 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "regabipragma.dir/tmp" +) + +type S string + +//go:noinline +func (s S) ff(t string) string { + return string(s) + " " + t +} + +//go:noinline +//go:registerparams +func f(s,t string) string { // ERROR "Declared function f has register params" + return s + " " + t +} + +func check(s string) { + if s != "Hello world!" { + fmt.Printf("FAIL, wanted 'Hello world!' but got '%s'\n", s) + } +} + +func main() { + check(f("Hello", "world!")) // ERROR "Called function ...f has register params" + check(tmp.F("Hello", "world!")) // ERROR "Called function regabipragma.dir/tmp.F has register params" + check(S("Hello").ff("world!")) + check(tmp.S("Hello").FF("world!")) +} diff --git a/test/abi/regabipragma.dir/tmp/foo.go b/test/abi/regabipragma.dir/tmp/foo.go new file mode 100644 index 0000000000000..cff989bbcdba9 --- /dev/null +++ b/test/abi/regabipragma.dir/tmp/foo.go @@ -0,0 +1,19 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tmp + + +type S string + +//go:noinline +func (s S) FF(t string) string { + return string(s) + " " + t +} + +//go:noinline +//go:registerparams +func F(s,t string) string { + return s + " " + t +} diff --git a/test/abi/regabipragma.go b/test/abi/regabipragma.go new file mode 100644 index 0000000000000..93cdb6abbb1e9 --- /dev/null +++ b/test/abi/regabipragma.go @@ -0,0 +1,9 @@ +// runindir + +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO May delete or adapt this test once regabi is the default + +package ignore diff --git a/test/abi/regabipragma.out b/test/abi/regabipragma.out new file mode 100644 index 0000000000000..7803613351720 --- /dev/null +++ b/test/abi/regabipragma.out @@ -0,0 +1,6 @@ +# regabipragma.dir/tmp +tmp/foo.go:17:6: Declared function F has register params +# regabipragma.dir +./main.go:21:6: Declared function f has register params +./main.go:32:9: Called function "".f has register params +./main.go:33:13: Called function regabipragma.dir/tmp.F has register params diff --git a/test/run.go b/test/run.go index 1c516f4946f38..09f9717cc0c04 100644 --- a/test/run.go +++ b/test/run.go @@ -59,7 +59,7 @@ var ( // dirs are the directories to look for *.go files in. // TODO(bradfitz): just use all directories? - dirs = []string{".", "ken", "chan", "interface", "syntax", "dwarf", "fixedbugs", "codegen", "runtime"} + dirs = []string{".", "ken", "chan", "interface", "syntax", "dwarf", "fixedbugs", "codegen", "runtime", "abi"} // ratec controls the max number of tests running at a time. ratec chan bool From c41b999ad410c74bea222ee76488226a06ba4046 Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 8 Jan 2021 10:15:36 -0500 Subject: [PATCH 376/474] [dev.regabi] cmd/compile: refactor abiutils from "gc" into new "abi" Needs to be visible to ssagen, and might as well start clean to avoid creating a lot of accidental dependencies. Added some methods for export. Decided to use a pointer instead of value for ABIConfig uses. Tests ended up separate from abiutil itself; otherwise there are import cycles. Change-Id: I5570e1e6a463e303c5e2dc84e8dd4125e7c1adcc Reviewed-on: https://go-review.googlesource.com/c/go/+/282614 Trust: David Chase Run-TryBot: David Chase TryBot-Result: Go Bot Reviewed-by: Than McIntosh Reviewed-by: Jeremy Faller --- .../compile/internal/{gc => abi}/abiutils.go | 42 +++++++++++++++++-- .../internal/{gc => test}/abiutils_test.go | 10 ++--- .../internal/{gc => test}/abiutilsaux_test.go | 17 ++++---- 3 files changed, 50 insertions(+), 19 deletions(-) rename src/cmd/compile/internal/{gc => abi}/abiutils.go (91%) rename src/cmd/compile/internal/{gc => test}/abiutils_test.go (98%) rename src/cmd/compile/internal/{gc => test}/abiutilsaux_test.go (87%) diff --git a/src/cmd/compile/internal/gc/abiutils.go b/src/cmd/compile/internal/abi/abiutils.go similarity index 91% rename from src/cmd/compile/internal/gc/abiutils.go rename to src/cmd/compile/internal/abi/abiutils.go index 5822c088f969e..3ac59e6f756b0 100644 --- a/src/cmd/compile/internal/gc/abiutils.go +++ b/src/cmd/compile/internal/abi/abiutils.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package abi import ( "cmd/compile/internal/types" @@ -28,7 +28,35 @@ type ABIParamResultInfo struct { intSpillSlots int floatSpillSlots int offsetToSpillArea int64 - config ABIConfig // to enable String() method + config *ABIConfig // to enable String() method +} + +func (a *ABIParamResultInfo) InParams() []ABIParamAssignment { + return a.inparams +} + +func (a *ABIParamResultInfo) OutParams() []ABIParamAssignment { + return a.outparams +} + +func (a *ABIParamResultInfo) InParam(i int) ABIParamAssignment { + return a.inparams[i] +} + +func (a *ABIParamResultInfo) OutParam(i int) ABIParamAssignment { + return a.outparams[i] +} + +func (a *ABIParamResultInfo) IntSpillCount() int { + return a.intSpillSlots +} + +func (a *ABIParamResultInfo) FloatSpillCount() int { + return a.floatSpillSlots +} + +func (a *ABIParamResultInfo) SpillAreaOffset() int64 { + return a.offsetToSpillArea } // RegIndex stores the index into the set of machine registers used by @@ -66,11 +94,17 @@ type ABIConfig struct { regAmounts RegAmounts } +// NewABIConfig returns a new ABI configuration for an architecture with +// iRegsCount integer/pointer registers and fRegsCount floating point registers. +func NewABIConfig(iRegsCount, fRegsCount int) *ABIConfig { + return &ABIConfig{RegAmounts{iRegsCount, fRegsCount}} +} + // ABIAnalyze takes a function type 't' and an ABI rules description // 'config' and analyzes the function to determine how its parameters // and results will be passed (in registers or on the stack), returning // an ABIParamResultInfo object that holds the results of the analysis. -func ABIAnalyze(t *types.Type, config ABIConfig) ABIParamResultInfo { +func ABIAnalyze(t *types.Type, config *ABIConfig) ABIParamResultInfo { setup() s := assignState{ rTotal: config.regAmounts, @@ -124,7 +158,7 @@ func (c *RegAmounts) regString(r RegIndex) string { // toString method renders an ABIParamAssignment in human-readable // form, suitable for debugging or unit testing. -func (ri *ABIParamAssignment) toString(config ABIConfig) string { +func (ri *ABIParamAssignment) toString(config *ABIConfig) string { regs := "R{" for _, r := range ri.Registers { regs += " " + config.regAmounts.regString(r) diff --git a/src/cmd/compile/internal/gc/abiutils_test.go b/src/cmd/compile/internal/test/abiutils_test.go similarity index 98% rename from src/cmd/compile/internal/gc/abiutils_test.go rename to src/cmd/compile/internal/test/abiutils_test.go index 6fd0af1b1fbf9..ae7d484062968 100644 --- a/src/cmd/compile/internal/gc/abiutils_test.go +++ b/src/cmd/compile/internal/test/abiutils_test.go @@ -2,10 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package test import ( "bufio" + "cmd/compile/internal/abi" "cmd/compile/internal/base" "cmd/compile/internal/ssagen" "cmd/compile/internal/typecheck" @@ -20,12 +21,7 @@ import ( // AMD64 registers available: // - integer: RAX, RBX, RCX, RDI, RSI, R8, R9, r10, R11 // - floating point: X0 - X14 -var configAMD64 = ABIConfig{ - regAmounts: RegAmounts{ - intRegs: 9, - floatRegs: 15, - }, -} +var configAMD64 = abi.NewABIConfig(9,15) func TestMain(m *testing.M) { ssagen.Arch.LinkArch = &x86.Linkamd64 diff --git a/src/cmd/compile/internal/gc/abiutilsaux_test.go b/src/cmd/compile/internal/test/abiutilsaux_test.go similarity index 87% rename from src/cmd/compile/internal/gc/abiutilsaux_test.go rename to src/cmd/compile/internal/test/abiutilsaux_test.go index 9386b554b09d9..7b84e73947ea2 100644 --- a/src/cmd/compile/internal/gc/abiutilsaux_test.go +++ b/src/cmd/compile/internal/test/abiutilsaux_test.go @@ -2,12 +2,13 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gc +package test // This file contains utility routines and harness infrastructure used // by the ABI tests in "abiutils_test.go". import ( + "cmd/compile/internal/abi" "cmd/compile/internal/ir" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" @@ -75,7 +76,7 @@ func tokenize(src string) []string { return res } -func verifyParamResultOffset(t *testing.T, f *types.Field, r ABIParamAssignment, which string, idx int) int { +func verifyParamResultOffset(t *testing.T, f *types.Field, r abi.ABIParamAssignment, which string, idx int) int { n := ir.AsNode(f.Nname).(*ir.Name) if n.FrameOffset() != int64(r.Offset) { t.Errorf("%s %d: got offset %d wanted %d t=%v", @@ -110,7 +111,7 @@ func abitest(t *testing.T, ft *types.Type, exp expectedDump) { types.CalcSize(ft) // Analyze with full set of registers. - regRes := ABIAnalyze(ft, configAMD64) + regRes := abi.ABIAnalyze(ft, configAMD64) regResString := strings.TrimSpace(regRes.String()) // Check results. @@ -121,8 +122,8 @@ func abitest(t *testing.T, ft *types.Type, exp expectedDump) { } // Analyze again with empty register set. - empty := ABIConfig{} - emptyRes := ABIAnalyze(ft, empty) + empty := &abi.ABIConfig{} + emptyRes := abi.ABIAnalyze(ft, empty) emptyResString := emptyRes.String() // Walk the results and make sure the offsets assigned match @@ -135,18 +136,18 @@ func abitest(t *testing.T, ft *types.Type, exp expectedDump) { rfsl := ft.Recvs().Fields().Slice() poff := 0 if len(rfsl) != 0 { - failed |= verifyParamResultOffset(t, rfsl[0], emptyRes.inparams[0], "receiver", 0) + failed |= verifyParamResultOffset(t, rfsl[0], emptyRes.InParams()[0], "receiver", 0) poff = 1 } // params pfsl := ft.Params().Fields().Slice() for k, f := range pfsl { - verifyParamResultOffset(t, f, emptyRes.inparams[k+poff], "param", k) + verifyParamResultOffset(t, f, emptyRes.InParams()[k+poff], "param", k) } // results ofsl := ft.Results().Fields().Slice() for k, f := range ofsl { - failed |= verifyParamResultOffset(t, f, emptyRes.outparams[k], "result", k) + failed |= verifyParamResultOffset(t, f, emptyRes.OutParams()[k], "result", k) } if failed != 0 { From d6d467372854124795cdd11429244ef1e28b809c Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 12 Jan 2021 23:55:08 -0800 Subject: [PATCH 377/474] [dev.regabi] cmd/compile: fix GOEXPERIMENT=regabi builder I misread the FIXME comment in InitLSym the first time. It's referring to how InitLSym is supposed to be called exactly once per function (see function documentation), but this is evidently not actually the case currently in GOEXPERIMENT=regabi mode. So just move the NeedFuncSym call below the GOEXPERIMENT=regabi workaround. Also, to fix the linux-arm64-{aws,packet} builders, move the call to reflectdata.WriteFuncSyms() to after the second batch of functions are compiled. This is necessary to make sure we catch all the funcsyms that can be added by late function compilation. Change-Id: I6d6396d48e2ee29c1fb007fa2b99e065b36375db Reviewed-on: https://go-review.googlesource.com/c/go/+/283552 Run-TryBot: Matthew Dempsky Trust: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Keith Randall Reviewed-by: Than McIntosh --- src/cmd/compile/internal/gc/obj.go | 2 +- src/cmd/compile/internal/ssagen/abi.go | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 753db80f764b1..3e55b7688e8c9 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -111,7 +111,6 @@ func dumpdata() { numDecls := len(typecheck.Target.Decls) dumpglobls(typecheck.Target.Externs) - staticdata.WriteFuncSyms() reflectdata.CollectPTabs() numExports := len(typecheck.Target.Exports) addsignats(typecheck.Target.Externs) @@ -151,6 +150,7 @@ func dumpdata() { objw.Global(zero, int32(reflectdata.ZeroSize), obj.DUPOK|obj.RODATA) } + staticdata.WriteFuncSyms() addGCLocals() if numExports != len(typecheck.Target.Exports) { diff --git a/src/cmd/compile/internal/ssagen/abi.go b/src/cmd/compile/internal/ssagen/abi.go index dc27ec3a29f47..f1226f6a47f32 100644 --- a/src/cmd/compile/internal/ssagen/abi.go +++ b/src/cmd/compile/internal/ssagen/abi.go @@ -138,13 +138,12 @@ func ReadSymABIs(file, myimportpath string) { // For body-less functions, we only create the LSym; for functions // with bodies call a helper to setup up / populate the LSym. func InitLSym(f *ir.Func, hasBody bool) { - staticdata.NeedFuncSym(f.Sym()) - // FIXME: for new-style ABI wrappers, we set up the lsym at the // point the wrapper is created. if f.LSym != nil && base.Flag.ABIWrap { return } + staticdata.NeedFuncSym(f.Sym()) selectLSym(f, hasBody) if hasBody { setupTextLSym(f, 0) From 983ac4b08663ea9655abe99ca30faf47e54fdc16 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Wed, 13 Jan 2021 15:02:16 -0800 Subject: [PATCH 378/474] [dev.regabi] cmd/compile: fix ICE when initializing blank vars CL 278914 introduced NameOffsetExpr to avoid copying ONAME nodes and hacking up their offsets, but evidently staticinit subtly depended on the prior behavior to allow dynamic initialization of blank variables. This CL refactors the code somewhat to avoid using NameOffsetExpr with blank variables, and to instead create dynamic assignments directly to the global blank node. It also adds a check to NewNameOffsetExpr to guard against misuse like this, since I suspect there could be other cases still lurking within staticinit. (This code is overdue for an makeover anyway.) Thanks to thanm@ for bisect and test case minimization. Fixes #43677. Change-Id: Ic71cb5d6698382feb9548dc3bb9fd606b207a172 Reviewed-on: https://go-review.googlesource.com/c/go/+/283537 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Than McIntosh --- src/cmd/compile/internal/ir/expr.go | 3 ++ src/cmd/compile/internal/staticinit/sched.go | 33 +++++++++++--------- test/fixedbugs/issue43677.go | 18 +++++++++++ 3 files changed, 40 insertions(+), 14 deletions(-) create mode 100644 test/fixedbugs/issue43677.go diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 51425db42d431..0639c3b620a32 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -473,6 +473,9 @@ type NameOffsetExpr struct { } func NewNameOffsetExpr(pos src.XPos, name *Name, offset int64, typ *types.Type) *NameOffsetExpr { + if name == nil || IsBlank(name) { + base.FatalfAt(pos, "cannot take offset of nil or blank name: %v", name) + } n := &NameOffsetExpr{Name_: name, Offset_: offset} n.typ = typ n.op = ONAMEOFFSET diff --git a/src/cmd/compile/internal/staticinit/sched.go b/src/cmd/compile/internal/staticinit/sched.go index ac0b6cd87efec..64946ad2476d8 100644 --- a/src/cmd/compile/internal/staticinit/sched.go +++ b/src/cmd/compile/internal/staticinit/sched.go @@ -15,6 +15,7 @@ import ( "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/obj" + "cmd/internal/src" ) type Entry struct { @@ -199,6 +200,20 @@ func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Ty r = r.(*ir.ConvExpr).X } + assign := func(pos src.XPos, a *ir.Name, aoff int64, v ir.Node) { + if s.StaticAssign(a, aoff, v, v.Type()) { + return + } + var lhs ir.Node + if ir.IsBlank(a) { + // Don't use NameOffsetExpr with blank (#43677). + lhs = ir.BlankNode + } else { + lhs = ir.NewNameOffsetExpr(pos, a, aoff, v.Type()) + } + s.append(ir.NewAssignStmt(pos, lhs, v)) + } + switch r.Op() { case ir.ONAME: r := r.(*ir.Name) @@ -237,9 +252,7 @@ func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Ty staticdata.InitAddr(l, loff, a, 0) // Init underlying literal. - if !s.StaticAssign(a, 0, r.X, a.Type()) { - s.append(ir.NewAssignStmt(base.Pos, a, r.X)) - } + assign(base.Pos, a, 0, r.X) return true } //dump("not static ptrlit", r); @@ -278,10 +291,7 @@ func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Ty continue } ir.SetPos(e.Expr) - if !s.StaticAssign(l, loff+e.Xoffset, e.Expr, e.Expr.Type()) { - a := ir.NewNameOffsetExpr(base.Pos, l, loff+e.Xoffset, e.Expr.Type()) - s.append(ir.NewAssignStmt(base.Pos, a, e.Expr)) - } + assign(base.Pos, l, loff+e.Xoffset, e.Expr) } return true @@ -345,17 +355,12 @@ func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Ty } // Copy val directly into n. ir.SetPos(val) - if !s.StaticAssign(l, loff+int64(types.PtrSize), val, val.Type()) { - a := ir.NewNameOffsetExpr(base.Pos, l, loff+int64(types.PtrSize), val.Type()) - s.append(ir.NewAssignStmt(base.Pos, a, val)) - } + assign(base.Pos, l, loff+int64(types.PtrSize), val) } else { // Construct temp to hold val, write pointer to temp into n. a := StaticName(val.Type()) s.Temps[val] = a - if !s.StaticAssign(a, 0, val, val.Type()) { - s.append(ir.NewAssignStmt(base.Pos, a, val)) - } + assign(base.Pos, a, 0, val) staticdata.InitAddr(l, loff+int64(types.PtrSize), a, 0) } diff --git a/test/fixedbugs/issue43677.go b/test/fixedbugs/issue43677.go new file mode 100644 index 0000000000000..1a68c8b8b9c5d --- /dev/null +++ b/test/fixedbugs/issue43677.go @@ -0,0 +1,18 @@ +// compile + +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Issue #43677: ICE during compilation of dynamic initializers for +// composite blank variables. + +package p + +func f() *int + +var _ = [2]*int{nil, f()} + +var _ = struct{ x, y *int }{nil, f()} + +var _ interface{} = f() From 5a5ab24689b63b3c156a17103265c439c1e86df7 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Wed, 6 Jan 2021 10:47:35 +0700 Subject: [PATCH 379/474] [dev.regabi] cmd/compile: do not rely on CallExpr.Rargs for detect already walked calls Currently, there's an awkward issue with walk pass. When walking the AST tree, the compiler generate code for runtime functions (using mkcall* variants), add/modify the AST tree and walk new generated tree again. This causes the double walking on some CallExpr, which is relying on checking Rargs to prevent that. But checking Rargs has its own issue as well. For functions that does not have arguments, this check is failed, and we still double walk the CallExpr node. This CL change the way that compiler detects double walking, by using separated field instead of relying on Rargs. In perfect world, we should make the compiler walks the AST tree just once, but it's not safe to do that at this moment. Passes toolstash -cmp. Change-Id: Ifdd1e0f98940ddb1f574af2da2ac7f005b5fcadd Reviewed-on: https://go-review.googlesource.com/c/go/+/283672 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/mini.go | 4 ++++ src/cmd/compile/internal/walk/expr.go | 3 ++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/ir/mini.go b/src/cmd/compile/internal/ir/mini.go index 4dd9a8807aadf..429f4ed360bb0 100644 --- a/src/cmd/compile/internal/ir/mini.go +++ b/src/cmd/compile/internal/ir/mini.go @@ -58,6 +58,7 @@ const ( miniTypecheckShift = 2 miniDiag = 1 << 4 miniHasCall = 1 << 5 // for miniStmt + miniWalked = 1 << 6 // to prevent/catch re-walking ) func (n *miniNode) Typecheck() uint8 { return n.bits.get2(miniTypecheckShift) } @@ -71,6 +72,9 @@ func (n *miniNode) SetTypecheck(x uint8) { func (n *miniNode) Diag() bool { return n.bits&miniDiag != 0 } func (n *miniNode) SetDiag(x bool) { n.bits.set(miniDiag, x) } +func (n *miniNode) Walked() bool { return n.bits&miniWalked != 0 } +func (n *miniNode) SetWalked(x bool) { n.bits.set(miniWalked, x) } + // Empty, immutable graph structure. func (n *miniNode) Init() Nodes { return Nodes{} } diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index 893a95f403198..449f8ea3ec0fd 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -497,9 +497,10 @@ func walkCall(n *ir.CallExpr, init *ir.Nodes) ir.Node { } func walkCall1(n *ir.CallExpr, init *ir.Nodes) { - if len(n.Rargs) != 0 { + if n.Walked() { return // already walked } + n.SetWalked(true) // If this is a method call t.M(...), // rewrite into a function call T.M(t, ...). From 447630042588a14aec6680e624113258d3849d49 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Thu, 14 Jan 2021 11:30:27 +0700 Subject: [PATCH 380/474] [dev.regabi] cmd/compile: use byte for CallExpr.Use Reduce 16 byte for CallExpr, from 184 to 168 on 64-bit archs. Passes toolstash -cmp. Change-Id: I59c7609ccd03e8b4a7df8d2c30de8022ae312cee Reviewed-on: https://go-review.googlesource.com/c/go/+/283732 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le Reviewed-by: Matthew Dempsky TryBot-Result: Go Bot --- src/cmd/compile/internal/ir/expr.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 0639c3b620a32..39659c45c01b0 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -145,7 +145,7 @@ func (n *BinaryExpr) SetOp(op Op) { } // A CallUse records how the result of the call is used: -type CallUse int +type CallUse byte const ( _ CallUse = iota From f97983249a812c2b079a489fc990fbeb3695be4d Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 11 Jan 2021 22:58:23 -0800 Subject: [PATCH 381/474] [dev.regabi] cmd/compile: move more PAUTOHEAP to SSA construction This CL moves almost all PAUTOHEAP handling code to SSA construction. Instead of changing Names to PAUTOHEAP, escape analysis now only sets n.Esc() to ir.EscHeap, and SSA handles creating the "&x" pseudo-variables and associating them via Heapaddr. This CL also gets rid of n.Stackcopy, which was used to distinguish the heap copy of a parameter used within a function from the stack copy used in the function calling convention. In practice, this is always obvious from context: liveness and function prologue/epilogue want to know about the stack copies, and everywhere else wants the heap copy. Hopefully moving all parameter/result handling into SSA helps with making the register ABI stuff easier. Also, the only remaining uses of PAUTOHEAP are now for closure variables, so I intend to rename it to PCLOSUREVAR or get rid of those altogether too. But this CL is already big and scary enough. Change-Id: Ief5ef6205041b9d0ee445314310c0c5a98187e77 Reviewed-on: https://go-review.googlesource.com/c/go/+/283233 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Trust: Matthew Dempsky Reviewed-by: David Chase --- src/cmd/compile/internal/dwarfgen/dwarf.go | 16 +- src/cmd/compile/internal/escape/escape.go | 169 +---------------- src/cmd/compile/internal/gc/compile.go | 15 +- src/cmd/compile/internal/inline/inl.go | 7 - src/cmd/compile/internal/ir/name.go | 46 ++--- src/cmd/compile/internal/ir/sizeof_test.go | 2 +- src/cmd/compile/internal/liveness/plive.go | 4 +- src/cmd/compile/internal/ssagen/ssa.go | 205 ++++++++++++++------- src/cmd/compile/internal/walk/assign.go | 18 +- src/cmd/compile/internal/walk/complit.go | 4 +- src/cmd/compile/internal/walk/stmt.go | 19 +- src/cmd/compile/internal/walk/walk.go | 111 ----------- 12 files changed, 192 insertions(+), 424 deletions(-) diff --git a/src/cmd/compile/internal/dwarfgen/dwarf.go b/src/cmd/compile/internal/dwarfgen/dwarf.go index ff249c1f4ecc3..2440e3c8d3c59 100644 --- a/src/cmd/compile/internal/dwarfgen/dwarf.go +++ b/src/cmd/compile/internal/dwarfgen/dwarf.go @@ -186,19 +186,11 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir isReturnValue := (n.Class == ir.PPARAMOUT) if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT { abbrev = dwarf.DW_ABRV_PARAM_LOCLIST - } else if n.Class == ir.PAUTOHEAP { - // If dcl in question has been promoted to heap, do a bit - // of extra work to recover original class (auto or param); - // see issue 30908. This insures that we get the proper - // signature in the abstract function DIE, but leaves a - // misleading location for the param (we want pointer-to-heap - // and not stack). + } + if n.Esc() == ir.EscHeap { + // The variable in question has been promoted to the heap. + // Its address is in n.Heapaddr. // TODO(thanm): generate a better location expression - stackcopy := n.Stackcopy - if stackcopy != nil && (stackcopy.Class == ir.PPARAM || stackcopy.Class == ir.PPARAMOUT) { - abbrev = dwarf.DW_ABRV_PARAM_LOCLIST - isReturnValue = (stackcopy.Class == ir.PPARAMOUT) - } } inlIndex := 0 if base.Flag.GenDwarfInl > 1 { diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go index bee3878f10039..79e5a98c91539 100644 --- a/src/cmd/compile/internal/escape/escape.go +++ b/src/cmd/compile/internal/escape/escape.go @@ -1658,7 +1658,14 @@ func (b *batch) finish(fns []*ir.Func) { // Update n.Esc based on escape analysis results. if loc.escapes { - if n.Op() != ir.ONAME { + if n.Op() == ir.ONAME { + if base.Flag.CompilingRuntime { + base.ErrorfAt(n.Pos(), "%v escapes to heap, not allowed in runtime", n) + } + if base.Flag.LowerM != 0 { + base.WarnfAt(n.Pos(), "moved to heap: %v", n) + } + } else { if base.Flag.LowerM != 0 { base.WarnfAt(n.Pos(), "%v escapes to heap", n) } @@ -1668,7 +1675,6 @@ func (b *batch) finish(fns []*ir.Func) { } } n.SetEsc(ir.EscHeap) - addrescapes(n) } else { if base.Flag.LowerM != 0 && n.Op() != ir.ONAME { base.WarnfAt(n.Pos(), "%v does not escape", n) @@ -2014,165 +2020,6 @@ func HeapAllocReason(n ir.Node) string { return "" } -// addrescapes tags node n as having had its address taken -// by "increasing" the "value" of n.Esc to EscHeap. -// Storage is allocated as necessary to allow the address -// to be taken. -func addrescapes(n ir.Node) { - switch n.Op() { - default: - // Unexpected Op, probably due to a previous type error. Ignore. - - case ir.ODEREF, ir.ODOTPTR: - // Nothing to do. - - case ir.ONAME: - n := n.(*ir.Name) - if n == ir.RegFP { - break - } - - // if this is a tmpname (PAUTO), it was tagged by tmpname as not escaping. - // on PPARAM it means something different. - if n.Class == ir.PAUTO && n.Esc() == ir.EscNever { - break - } - - // If a closure reference escapes, mark the outer variable as escaping. - if n.IsClosureVar() { - addrescapes(n.Defn) - break - } - - if n.Class != ir.PPARAM && n.Class != ir.PPARAMOUT && n.Class != ir.PAUTO { - break - } - - // This is a plain parameter or local variable that needs to move to the heap, - // but possibly for the function outside the one we're compiling. - // That is, if we have: - // - // func f(x int) { - // func() { - // global = &x - // } - // } - // - // then we're analyzing the inner closure but we need to move x to the - // heap in f, not in the inner closure. Flip over to f before calling moveToHeap. - oldfn := ir.CurFunc - ir.CurFunc = n.Curfn - ln := base.Pos - base.Pos = ir.CurFunc.Pos() - moveToHeap(n) - ir.CurFunc = oldfn - base.Pos = ln - - // ODOTPTR has already been introduced, - // so these are the non-pointer ODOT and OINDEX. - // In &x[0], if x is a slice, then x does not - // escape--the pointer inside x does, but that - // is always a heap pointer anyway. - case ir.ODOT: - n := n.(*ir.SelectorExpr) - addrescapes(n.X) - case ir.OINDEX: - n := n.(*ir.IndexExpr) - if !n.X.Type().IsSlice() { - addrescapes(n.X) - } - case ir.OPAREN: - n := n.(*ir.ParenExpr) - addrescapes(n.X) - case ir.OCONVNOP: - n := n.(*ir.ConvExpr) - addrescapes(n.X) - } -} - -// moveToHeap records the parameter or local variable n as moved to the heap. -func moveToHeap(n *ir.Name) { - if base.Flag.LowerR != 0 { - ir.Dump("MOVE", n) - } - if base.Flag.CompilingRuntime { - base.Errorf("%v escapes to heap, not allowed in runtime", n) - } - if n.Class == ir.PAUTOHEAP { - ir.Dump("n", n) - base.Fatalf("double move to heap") - } - - // Allocate a local stack variable to hold the pointer to the heap copy. - // temp will add it to the function declaration list automatically. - heapaddr := typecheck.Temp(types.NewPtr(n.Type())) - heapaddr.SetSym(typecheck.Lookup("&" + n.Sym().Name)) - heapaddr.SetPos(n.Pos()) - - // Unset AutoTemp to persist the &foo variable name through SSA to - // liveness analysis. - // TODO(mdempsky/drchase): Cleaner solution? - heapaddr.SetAutoTemp(false) - - // Parameters have a local stack copy used at function start/end - // in addition to the copy in the heap that may live longer than - // the function. - if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT { - if n.FrameOffset() == types.BADWIDTH { - base.Fatalf("addrescapes before param assignment") - } - - // We rewrite n below to be a heap variable (indirection of heapaddr). - // Preserve a copy so we can still write code referring to the original, - // and substitute that copy into the function declaration list - // so that analyses of the local (on-stack) variables use it. - stackcopy := typecheck.NewName(n.Sym()) - stackcopy.SetType(n.Type()) - stackcopy.SetFrameOffset(n.FrameOffset()) - stackcopy.Class = n.Class - stackcopy.Heapaddr = heapaddr - if n.Class == ir.PPARAMOUT { - // Make sure the pointer to the heap copy is kept live throughout the function. - // The function could panic at any point, and then a defer could recover. - // Thus, we need the pointer to the heap copy always available so the - // post-deferreturn code can copy the return value back to the stack. - // See issue 16095. - heapaddr.SetIsOutputParamHeapAddr(true) - } - n.Stackcopy = stackcopy - - // Substitute the stackcopy into the function variable list so that - // liveness and other analyses use the underlying stack slot - // and not the now-pseudo-variable n. - found := false - for i, d := range ir.CurFunc.Dcl { - if d == n { - ir.CurFunc.Dcl[i] = stackcopy - found = true - break - } - // Parameters are before locals, so can stop early. - // This limits the search even in functions with many local variables. - if d.Class == ir.PAUTO { - break - } - } - if !found { - base.Fatalf("cannot find %v in local variable list", n) - } - ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n) - } - - // Modify n in place so that uses of n now mean indirection of the heapaddr. - n.Class = ir.PAUTOHEAP - n.SetFrameOffset(0) - n.Heapaddr = heapaddr - n.SetEsc(ir.EscHeap) - if base.Flag.LowerM != 0 { - base.WarnfAt(n.Pos(), "moved to heap: %v", n) - } -} - // This special tag is applied to uintptr variables // that we believe may hold unsafe.Pointers for // calls into assembly functions. diff --git a/src/cmd/compile/internal/gc/compile.go b/src/cmd/compile/internal/gc/compile.go index 410b3e90ea551..a8a0106320e94 100644 --- a/src/cmd/compile/internal/gc/compile.go +++ b/src/cmd/compile/internal/gc/compile.go @@ -90,15 +90,12 @@ func prepareFunc(fn *ir.Func) { // because symbols must be allocated before the parallel // phase of the compiler. for _, n := range fn.Dcl { - switch n.Class { - case ir.PPARAM, ir.PPARAMOUT, ir.PAUTO: - if liveness.ShouldTrack(n) && n.Addrtaken() { - reflectdata.WriteType(n.Type()) - // Also make sure we allocate a linker symbol - // for the stack object data, for the same reason. - if fn.LSym.Func().StackObjects == nil { - fn.LSym.Func().StackObjects = base.Ctxt.Lookup(fn.LSym.Name + ".stkobj") - } + if liveness.ShouldTrack(n) && n.Addrtaken() { + reflectdata.WriteType(n.Type()) + // Also make sure we allocate a linker symbol + // for the stack object data, for the same reason. + if fn.LSym.Func().StackObjects == nil { + fn.LSym.Func().StackObjects = base.Ctxt.Lookup(fn.LSym.Name + ".stkobj") } } } diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index 6f5f6499ced51..1811feebe9801 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -762,13 +762,6 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b if ln.Class == ir.PPARAMOUT { // return values handled below. continue } - if ir.IsParamStackCopy(ln) { // ignore the on-stack copy of a parameter that moved to the heap - // TODO(mdempsky): Remove once I'm confident - // this never actually happens. We currently - // perform inlining before escape analysis, so - // nothing should have moved to the heap yet. - base.Fatalf("impossible: %v", ln) - } inlf := typecheck.Expr(inlvar(ln)).(*ir.Name) inlvars[ln] = inlf if base.Flag.GenDwarfInl > 0 { diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 514b303893f5f..d19b0440e6808 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -58,9 +58,6 @@ type Name struct { Ntype Ntype Heapaddr *Name // temp holding heap address of param - // ONAME PAUTOHEAP - Stackcopy *Name // the PPARAM/PPARAMOUT on-stack slot (moved func params only) - // ONAME closure linkage // Consider: // @@ -150,12 +147,7 @@ func (n *Name) TypeDefn() *types.Type { // RecordFrameOffset records the frame offset for the name. // It is used by package types when laying out function arguments. func (n *Name) RecordFrameOffset(offset int64) { - if n.Stackcopy != nil { - n.Stackcopy.SetFrameOffset(offset) - n.SetFrameOffset(0) - } else { - n.SetFrameOffset(offset) - } + n.SetFrameOffset(offset) } // NewNameAt returns a new ONAME Node associated with symbol s at position pos. @@ -292,6 +284,22 @@ func (n *Name) SetInlLocal(b bool) { n.flags.set(nameInlLocal, b) } func (n *Name) SetOpenDeferSlot(b bool) { n.flags.set(nameOpenDeferSlot, b) } func (n *Name) SetLibfuzzerExtraCounter(b bool) { n.flags.set(nameLibfuzzerExtraCounter, b) } +// OnStack reports whether variable n may reside on the stack. +func (n *Name) OnStack() bool { + if n.Op() != ONAME || n.Class == PFUNC { + base.Fatalf("%v is not a variable", n) + } + switch n.Class { + case PPARAM, PPARAMOUT, PAUTO: + return n.Esc() != EscHeap + case PEXTERN, PAUTOHEAP: + return false + default: + base.FatalfAt(n.Pos(), "%v has unknown class %v", n, n.Class) + panic("unreachable") + } +} + // MarkReadonly indicates that n is an ONAME with readonly contents. func (n *Name) MarkReadonly() { if n.Op() != ONAME { @@ -501,24 +509,4 @@ func NewPkgName(pos src.XPos, sym *types.Sym, pkg *types.Pkg) *PkgName { return p } -// IsParamStackCopy reports whether this is the on-stack copy of a -// function parameter that moved to the heap. -func IsParamStackCopy(n Node) bool { - if n.Op() != ONAME { - return false - } - name := n.(*Name) - return (name.Class == PPARAM || name.Class == PPARAMOUT) && name.Heapaddr != nil -} - -// IsParamHeapCopy reports whether this is the on-heap copy of -// a function parameter that moved to the heap. -func IsParamHeapCopy(n Node) bool { - if n.Op() != ONAME { - return false - } - name := n.(*Name) - return name.Class == PAUTOHEAP && name.Stackcopy != nil -} - var RegFP *Name diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go index 553dc5376098b..d8c1518b90f34 100644 --- a/src/cmd/compile/internal/ir/sizeof_test.go +++ b/src/cmd/compile/internal/ir/sizeof_test.go @@ -21,7 +21,7 @@ func TestSizeof(t *testing.T) { _64bit uintptr // size on 64bit platforms }{ {Func{}, 188, 328}, - {Name{}, 116, 208}, + {Name{}, 112, 200}, } for _, tt := range tests { diff --git a/src/cmd/compile/internal/liveness/plive.go b/src/cmd/compile/internal/liveness/plive.go index 8d1754c81380b..abc9583d5ac14 100644 --- a/src/cmd/compile/internal/liveness/plive.go +++ b/src/cmd/compile/internal/liveness/plive.go @@ -181,7 +181,7 @@ type progeffectscache struct { // nor do we care about empty structs (handled by the pointer check), // nor do we care about the fake PAUTOHEAP variables. func ShouldTrack(n *ir.Name) bool { - return (n.Class == ir.PAUTO || n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT) && n.Type().HasPointers() + return (n.Class == ir.PAUTO && n.Esc() != ir.EscHeap || n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT) && n.Type().HasPointers() } // getvariables returns the list of on-stack variables that we need to track @@ -788,7 +788,7 @@ func (lv *liveness) epilogue() { if n.Class == ir.PPARAM { continue // ok } - base.Fatalf("bad live variable at entry of %v: %L", lv.fn.Nname, n) + base.FatalfAt(n.Pos(), "bad live variable at entry of %v: %L", lv.fn.Nname, n) } // Record live variables. diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 3b542cf92a379..ab2e21bea064c 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -399,11 +399,20 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func { } if s.hasOpenDefers && len(s.curfn.Exit) > 0 { // Skip doing open defers if there is any extra exit code (likely - // copying heap-allocated return values or race detection), since - // we will not generate that code in the case of the extra - // deferreturn/ret segment. + // race detection), since we will not generate that code in the + // case of the extra deferreturn/ret segment. s.hasOpenDefers = false } + if s.hasOpenDefers { + // Similarly, skip if there are any heap-allocated result + // parameters that need to be copied back to their stack slots. + for _, f := range s.curfn.Type().Results().FieldSlice() { + if !f.Nname.(*ir.Name).OnStack() { + s.hasOpenDefers = false + break + } + } + } if s.hasOpenDefers && s.curfn.NumReturns*s.curfn.NumDefers > 15 { // Since we are generating defer calls at every exit for @@ -450,19 +459,9 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func { case ir.PPARAMOUT: s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem) results = append(results, ssa.Param{Type: n.Type(), Offset: int32(n.FrameOffset())}) - if s.canSSA(n) { - // Save ssa-able PPARAMOUT variables so we can - // store them back to the stack at the end of - // the function. - s.returns = append(s.returns, n) - } case ir.PAUTO: // processed at each use, to prevent Addr coming // before the decl. - case ir.PAUTOHEAP: - // moved to heap - already handled by frontend - case ir.PFUNC: - // local function - already handled by frontend default: s.Fatalf("local variable with class %v unimplemented", n.Class) } @@ -488,38 +487,28 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func { } offset = types.Rnd(offset, typ.Alignment()) - r := s.newValue1I(ssa.OpOffPtr, types.NewPtr(typ), offset, clo) + ptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(typ), offset, clo) offset += typ.Size() if n.Byval() && TypeOK(n.Type()) { // If it is a small variable captured by value, downgrade it to PAUTO. - r = s.load(n.Type(), r) - n.Class = ir.PAUTO - } else { - if !n.Byval() { - r = s.load(typ, r) - } - - // Declare variable holding address taken from closure. - addr := ir.NewNameAt(fn.Pos(), &types.Sym{Name: "&" + n.Sym().Name, Pkg: types.LocalPkg}) - addr.SetType(types.NewPtr(n.Type())) - addr.Class = ir.PAUTO - addr.SetUsed(true) - addr.Curfn = fn - types.CalcSize(addr.Type()) - - n.Heapaddr = addr - n = addr + fn.Dcl = append(fn.Dcl, n) + s.assign(n, s.load(n.Type(), ptr), false, 0) + continue } - fn.Dcl = append(fn.Dcl, n) - s.assign(n, r, false, 0) + if !n.Byval() { + ptr = s.load(typ, ptr) + } + s.setHeapaddr(fn.Pos(), n, ptr) } } // Convert the AST-based IR to the SSA-based IR s.stmtList(fn.Enter) + s.zeroResults() + s.paramsToHeap() s.stmtList(fn.Body) // fallthrough to exit @@ -547,6 +536,100 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func { return s.f } +// zeroResults zeros the return values at the start of the function. +// We need to do this very early in the function. Defer might stop a +// panic and show the return values as they exist at the time of +// panic. For precise stacks, the garbage collector assumes results +// are always live, so we need to zero them before any allocations, +// even allocations to move params/results to the heap. +func (s *state) zeroResults() { + for _, f := range s.curfn.Type().Results().FieldSlice() { + n := f.Nname.(*ir.Name) + if !n.OnStack() { + // The local which points to the return value is the + // thing that needs zeroing. This is already handled + // by a Needzero annotation in plive.go:(*liveness).epilogue. + continue + } + // Zero the stack location containing f. + if typ := n.Type(); TypeOK(typ) { + s.assign(n, s.zeroVal(typ), false, 0) + } else { + s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem()) + s.zero(n.Type(), s.decladdrs[n]) + } + } +} + +// paramsToHeap produces code to allocate memory for heap-escaped parameters +// and to copy non-result parameters' values from the stack. +func (s *state) paramsToHeap() { + do := func(params *types.Type) { + for _, f := range params.FieldSlice() { + if f.Nname == nil { + continue // anonymous or blank parameter + } + n := f.Nname.(*ir.Name) + if ir.IsBlank(n) || n.OnStack() { + continue + } + s.newHeapaddr(n) + if n.Class == ir.PPARAM { + s.move(n.Type(), s.expr(n.Heapaddr), s.decladdrs[n]) + } + } + } + + typ := s.curfn.Type() + do(typ.Recvs()) + do(typ.Params()) + do(typ.Results()) +} + +// newHeapaddr allocates heap memory for n and sets its heap address. +func (s *state) newHeapaddr(n *ir.Name) { + s.setHeapaddr(n.Pos(), n, s.newObject(n.Type())) +} + +// setHeapaddr allocates a new PAUTO variable to store ptr (which must be non-nil) +// and then sets it as n's heap address. +func (s *state) setHeapaddr(pos src.XPos, n *ir.Name, ptr *ssa.Value) { + if !ptr.Type.IsPtr() || !types.Identical(n.Type(), ptr.Type.Elem()) { + base.FatalfAt(n.Pos(), "setHeapaddr %L with type %v", n, ptr.Type) + } + + // Declare variable to hold address. + addr := ir.NewNameAt(pos, &types.Sym{Name: "&" + n.Sym().Name, Pkg: types.LocalPkg}) + addr.SetType(types.NewPtr(n.Type())) + addr.Class = ir.PAUTO + addr.SetUsed(true) + addr.Curfn = s.curfn + s.curfn.Dcl = append(s.curfn.Dcl, addr) + types.CalcSize(addr.Type()) + + if n.Class == ir.PPARAMOUT { + addr.SetIsOutputParamHeapAddr(true) + } + + n.Heapaddr = addr + s.assign(addr, ptr, false, 0) +} + +// newObject returns an SSA value denoting new(typ). +func (s *state) newObject(typ *types.Type) *ssa.Value { + if typ.Size() == 0 { + return s.newValue1A(ssa.OpAddr, types.NewPtr(typ), ir.Syms.Zerobase, s.sb) + } + return s.rtcall(ir.Syms.Newobject, true, []*types.Type{types.NewPtr(typ)}, s.reflectType(typ))[0] +} + +// reflectType returns an SSA value representing a pointer to typ's +// reflection type descriptor. +func (s *state) reflectType(typ *types.Type) *ssa.Value { + lsym := reflectdata.TypeLinksym(typ) + return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(types.Types[types.TUINT8]), lsym, s.sb) +} + func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *ir.Func) { // Read sources of target function fn. fname := base.Ctxt.PosTable.Pos(fn.Pos()).Filename() @@ -682,7 +765,7 @@ type state struct { // all defined variables at the end of each block. Indexed by block ID. defvars []map[ir.Node]*ssa.Value - // addresses of PPARAM and PPARAMOUT variables. + // addresses of PPARAM and PPARAMOUT variables on the stack. decladdrs map[*ir.Name]*ssa.Value // starting values. Memory, stack pointer, and globals pointer @@ -702,9 +785,6 @@ type state struct { // Used to deduplicate panic calls. panics map[funcLine]*ssa.Block - // list of PPARAMOUT (return) variables. - returns []*ir.Name - cgoUnsafeArgs bool hasdefer bool // whether the function contains a defer statement softFloat bool @@ -1290,8 +1370,8 @@ func (s *state) stmt(n ir.Node) { case ir.ODCL: n := n.(*ir.Decl) - if n.X.Class == ir.PAUTOHEAP { - s.Fatalf("DCL %v", n) + if v := n.X; v.Esc() == ir.EscHeap { + s.newHeapaddr(v) } case ir.OLABEL: @@ -1727,21 +1807,25 @@ func (s *state) exit() *ssa.Block { } } - // Run exit code. Typically, this code copies heap-allocated PPARAMOUT - // variables back to the stack. - s.stmtList(s.curfn.Exit) - - // Store SSAable PPARAMOUT variables back to stack locations. - for _, n := range s.returns { - addr := s.decladdrs[n] - val := s.variable(n, n.Type()) - s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem()) - s.store(n.Type(), addr, val) + // Store SSAable and heap-escaped PPARAMOUT variables back to stack locations. + for _, f := range s.curfn.Type().Results().FieldSlice() { + n := f.Nname.(*ir.Name) + if s.canSSA(n) { + val := s.variable(n, n.Type()) + s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem()) + s.store(n.Type(), s.decladdrs[n], val) + } else if !n.OnStack() { + s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem()) + s.move(n.Type(), s.decladdrs[n], s.expr(n.Heapaddr)) + } // TODO: if val is ever spilled, we'd like to use the // PPARAMOUT slot for spilling it. That won't happen // currently. } + // Run exit code. Today, this is just raceexit, in -race mode. + s.stmtList(s.curfn.Exit) + // Do actual return. m := s.mem() b := s.endBlock() @@ -2945,12 +3029,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { case ir.ONEWOBJ: n := n.(*ir.UnaryExpr) - if n.Type().Elem().Size() == 0 { - return s.newValue1A(ssa.OpAddr, n.Type(), ir.Syms.Zerobase, s.sb) - } - typ := s.expr(n.X) - vv := s.rtcall(ir.Syms.Newobject, true, []*types.Type{n.Type()}, typ) - return vv[0] + return s.newObject(n.Type().Elem()) default: s.Fatalf("unhandled expr %v", n.Op()) @@ -3267,7 +3346,7 @@ func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask // If this assignment clobbers an entire local variable, then emit // OpVarDef so liveness analysis knows the variable is redefined. - if base, ok := clobberBase(left).(*ir.Name); ok && base.Op() == ir.ONAME && base.Class != ir.PEXTERN && base.Class != ir.PAUTOHEAP && skip == 0 { + if base, ok := clobberBase(left).(*ir.Name); ok && base.OnStack() && skip == 0 { s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !ir.IsAutoTmp(base)) } @@ -5011,6 +5090,9 @@ func (s *state) addr(n ir.Node) *ssa.Value { fallthrough case ir.ONAME: n := n.(*ir.Name) + if n.Heapaddr != nil { + return s.expr(n.Heapaddr) + } switch n.Class { case ir.PEXTERN: // global variable @@ -5039,8 +5121,6 @@ func (s *state) addr(n ir.Node) *ssa.Value { // ensure that we reuse symbols for out parameters so // that cse works on their addresses return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), true) - case ir.PAUTOHEAP: - return s.expr(n.Heapaddr) default: s.Fatalf("variable address class %v not implemented", n.Class) return nil @@ -5141,15 +5221,10 @@ func (s *state) canSSA(n ir.Node) bool { } func (s *state) canSSAName(name *ir.Name) bool { - if name.Addrtaken() { - return false - } - if ir.IsParamHeapCopy(name) { + if name.Addrtaken() || !name.OnStack() { return false } switch name.Class { - case ir.PEXTERN, ir.PAUTOHEAP: - return false case ir.PPARAMOUT: if s.hasdefer { // TODO: handle this case? Named return values must be @@ -6399,7 +6474,7 @@ func (s byXoffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func emitStackObjects(e *ssafn, pp *objw.Progs) { var vars []*ir.Name for _, n := range e.curfn.Dcl { - if liveness.ShouldTrack(n) && n.Addrtaken() { + if liveness.ShouldTrack(n) && n.Addrtaken() && n.Esc() != ir.EscHeap { vars = append(vars, n) } } diff --git a/src/cmd/compile/internal/walk/assign.go b/src/cmd/compile/internal/walk/assign.go index 3fe810ac4ea48..4043d7574adc1 100644 --- a/src/cmd/compile/internal/walk/assign.go +++ b/src/cmd/compile/internal/walk/assign.go @@ -392,11 +392,7 @@ func ascompatee(op ir.Op, nl, nr []ir.Node) []ir.Node { appendWalkStmt(&late, convas(ir.NewAssignStmt(base.Pos, lorig, r), &late)) - if name == nil || name.Addrtaken() || name.Class == ir.PEXTERN || name.Class == ir.PAUTOHEAP { - memWrite = true - continue - } - if ir.IsBlank(name) { + if name != nil && ir.IsBlank(name) { // We can ignore assignments to blank. continue } @@ -405,7 +401,12 @@ func ascompatee(op ir.Op, nl, nr []ir.Node) []ir.Node { // parameters. These can't appear in expressions anyway. continue } - assigned.Add(name) + + if name != nil && name.OnStack() && !name.Addrtaken() { + assigned.Add(name) + } else { + memWrite = true + } } early.Append(late.Take()...) @@ -418,7 +419,10 @@ func readsMemory(n ir.Node) bool { switch n.Op() { case ir.ONAME: n := n.(*ir.Name) - return n.Class == ir.PEXTERN || n.Class == ir.PAUTOHEAP || n.Addrtaken() + if n.Class == ir.PFUNC { + return false + } + return n.Addrtaken() || !n.OnStack() case ir.OADD, ir.OAND, diff --git a/src/cmd/compile/internal/walk/complit.go b/src/cmd/compile/internal/walk/complit.go index 8a77bba2ad9fb..f82ef69ca9919 100644 --- a/src/cmd/compile/internal/walk/complit.go +++ b/src/cmd/compile/internal/walk/complit.go @@ -64,11 +64,11 @@ func readonlystaticname(t *types.Type) *ir.Name { } func isSimpleName(nn ir.Node) bool { - if nn.Op() != ir.ONAME { + if nn.Op() != ir.ONAME || ir.IsBlank(nn) { return false } n := nn.(*ir.Name) - return n.Class != ir.PAUTOHEAP && n.Class != ir.PEXTERN + return n.OnStack() } func litas(l ir.Node, r ir.Node, init *ir.Nodes) { diff --git a/src/cmd/compile/internal/walk/stmt.go b/src/cmd/compile/internal/walk/stmt.go index 1df491bd4e9d7..d892b2413f138 100644 --- a/src/cmd/compile/internal/walk/stmt.go +++ b/src/cmd/compile/internal/walk/stmt.go @@ -86,6 +86,7 @@ func walkStmt(n ir.Node) ir.Node { ir.OFALL, ir.OGOTO, ir.OLABEL, + ir.ODCL, ir.ODCLCONST, ir.ODCLTYPE, ir.OCHECKNIL, @@ -94,10 +95,6 @@ func walkStmt(n ir.Node) ir.Node { ir.OVARLIVE: return n - case ir.ODCL: - n := n.(*ir.Decl) - return walkDecl(n) - case ir.OBLOCK: n := n.(*ir.BlockStmt) walkStmtList(n.List) @@ -173,20 +170,6 @@ func walkStmtList(s []ir.Node) { } } -// walkDecl walks an ODCL node. -func walkDecl(n *ir.Decl) ir.Node { - v := n.X - if v.Class == ir.PAUTOHEAP { - if base.Flag.CompilingRuntime { - base.Errorf("%v escapes to heap, not allowed in runtime", v) - } - nn := ir.NewAssignStmt(base.Pos, v.Heapaddr, callnew(v.Type())) - nn.Def = true - return walkStmt(typecheck.Stmt(nn)) - } - return n -} - // walkFor walks an OFOR or OFORUNTIL node. func walkFor(n *ir.ForStmt) ir.Node { if n.Cond != nil { diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go index e780a9066031d..71f018fe3e3e6 100644 --- a/src/cmd/compile/internal/walk/walk.go +++ b/src/cmd/compile/internal/walk/walk.go @@ -7,7 +7,6 @@ package walk import ( "errors" "fmt" - "strings" "cmd/compile/internal/base" "cmd/compile/internal/ir" @@ -47,35 +46,11 @@ func Walk(fn *ir.Func) { ir.DumpList(s, ir.CurFunc.Body) } - zeroResults() - heapmoves() - if base.Flag.W != 0 && len(ir.CurFunc.Enter) > 0 { - s := fmt.Sprintf("enter %v", ir.CurFunc.Sym()) - ir.DumpList(s, ir.CurFunc.Enter) - } - if base.Flag.Cfg.Instrumenting { instrument(fn) } } -func paramoutheap(fn *ir.Func) bool { - for _, ln := range fn.Dcl { - switch ln.Class { - case ir.PPARAMOUT: - if ir.IsParamStackCopy(ln) || ln.Addrtaken() { - return true - } - - case ir.PAUTO: - // stop early - parameters are over - return false - } - } - - return false -} - // walkRecv walks an ORECV node. func walkRecv(n *ir.UnaryExpr) ir.Node { if n.Typecheck() == 0 { @@ -122,92 +97,6 @@ func convas(n *ir.AssignStmt, init *ir.Nodes) *ir.AssignStmt { var stop = errors.New("stop") -// paramstoheap returns code to allocate memory for heap-escaped parameters -// and to copy non-result parameters' values from the stack. -func paramstoheap(params *types.Type) []ir.Node { - var nn []ir.Node - for _, t := range params.Fields().Slice() { - v := ir.AsNode(t.Nname) - if v != nil && v.Sym() != nil && strings.HasPrefix(v.Sym().Name, "~r") { // unnamed result - v = nil - } - if v == nil { - continue - } - - if stackcopy := v.Name().Stackcopy; stackcopy != nil { - nn = append(nn, walkStmt(ir.NewDecl(base.Pos, ir.ODCL, v.(*ir.Name)))) - if stackcopy.Class == ir.PPARAM { - nn = append(nn, walkStmt(typecheck.Stmt(ir.NewAssignStmt(base.Pos, v, stackcopy)))) - } - } - } - - return nn -} - -// zeroResults zeros the return values at the start of the function. -// We need to do this very early in the function. Defer might stop a -// panic and show the return values as they exist at the time of -// panic. For precise stacks, the garbage collector assumes results -// are always live, so we need to zero them before any allocations, -// even allocations to move params/results to the heap. -// The generated code is added to Curfn's Enter list. -func zeroResults() { - for _, f := range ir.CurFunc.Type().Results().Fields().Slice() { - v := ir.AsNode(f.Nname) - if v != nil && v.Name().Heapaddr != nil { - // The local which points to the return value is the - // thing that needs zeroing. This is already handled - // by a Needzero annotation in plive.go:livenessepilogue. - continue - } - if ir.IsParamHeapCopy(v) { - // TODO(josharian/khr): Investigate whether we can switch to "continue" here, - // and document more in either case. - // In the review of CL 114797, Keith wrote (roughly): - // I don't think the zeroing below matters. - // The stack return value will never be marked as live anywhere in the function. - // It is not written to until deferreturn returns. - v = v.Name().Stackcopy - } - // Zero the stack location containing f. - ir.CurFunc.Enter.Append(ir.NewAssignStmt(ir.CurFunc.Pos(), v, nil)) - } -} - -// returnsfromheap returns code to copy values for heap-escaped parameters -// back to the stack. -func returnsfromheap(params *types.Type) []ir.Node { - var nn []ir.Node - for _, t := range params.Fields().Slice() { - v := ir.AsNode(t.Nname) - if v == nil { - continue - } - if stackcopy := v.Name().Stackcopy; stackcopy != nil && stackcopy.Class == ir.PPARAMOUT { - nn = append(nn, walkStmt(typecheck.Stmt(ir.NewAssignStmt(base.Pos, stackcopy, v)))) - } - } - - return nn -} - -// heapmoves generates code to handle migrating heap-escaped parameters -// between the stack and the heap. The generated code is added to Curfn's -// Enter and Exit lists. -func heapmoves() { - lno := base.Pos - base.Pos = ir.CurFunc.Pos() - nn := paramstoheap(ir.CurFunc.Type().Recvs()) - nn = append(nn, paramstoheap(ir.CurFunc.Type().Params())...) - nn = append(nn, paramstoheap(ir.CurFunc.Type().Results())...) - ir.CurFunc.Enter.Append(nn...) - base.Pos = ir.CurFunc.Endlineno - ir.CurFunc.Exit.Append(returnsfromheap(ir.CurFunc.Type().Results())...) - base.Pos = lno -} - func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) *ir.CallExpr { if fn.Type() == nil || fn.Type().Kind() != types.TFUNC { base.Fatalf("mkcall %v %v", fn, fn.Type()) From 9734fd482d32528c5ec0e516f79af253871beb77 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Thu, 14 Jan 2021 13:41:35 +0700 Subject: [PATCH 382/474] [dev.regabi] cmd/compile: use node walked flag to prevent double walk for walkSwitch CL 283672 added a flag to prevent double walking, use that flag instead of checking SwitchStmt.Compiled field. Passes toolstash -cmp. Change-Id: Idb8f9078412fb789f51ed4fc4206638011e38a93 Reviewed-on: https://go-review.googlesource.com/c/go/+/283733 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/walk/switch.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/walk/switch.go b/src/cmd/compile/internal/walk/switch.go index 59446ef3dbab4..0cc1830d3fc47 100644 --- a/src/cmd/compile/internal/walk/switch.go +++ b/src/cmd/compile/internal/walk/switch.go @@ -19,9 +19,10 @@ import ( // walkSwitch walks a switch statement. func walkSwitch(sw *ir.SwitchStmt) { // Guard against double walk, see #25776. - if len(sw.Cases) == 0 && len(sw.Compiled) > 0 { + if sw.Walked() { return // Was fatal, but eliminating every possible source of double-walking is hard } + sw.SetWalked(true) if sw.Tag != nil && sw.Tag.Op() == ir.OTYPESW { walkSwitchType(sw) From 35b9c666012dcc5203a1362f10fe5279df163a1a Mon Sep 17 00:00:00 2001 From: Than McIntosh Date: Tue, 22 Dec 2020 16:48:13 -0500 Subject: [PATCH 383/474] [dev.regabi] cmd/compile,cmd/link: additional code review suggestions for CL 270863 This patch pulls in a few additional changes requested by code reviewers for CL 270863 that were accidentally left out. Specifically, guarding use of ORETJMP to insure it is not used when building dynlink on ppc64le, and a tweaking the command line flags used to control wrapper generation. Change-Id: I4f96462e570180887eb8693e11badd83d142710a Reviewed-on: https://go-review.googlesource.com/c/go/+/279527 Run-TryBot: Than McIntosh TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky Trust: Than McIntosh --- src/cmd/compile/internal/ssagen/abi.go | 3 ++- src/cmd/link/internal/ld/main.go | 5 +---- src/cmd/link/internal/ld/symtab.go | 2 +- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/src/cmd/compile/internal/ssagen/abi.go b/src/cmd/compile/internal/ssagen/abi.go index f1226f6a47f32..7ff8e21a48cfb 100644 --- a/src/cmd/compile/internal/ssagen/abi.go +++ b/src/cmd/compile/internal/ssagen/abi.go @@ -301,7 +301,8 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { // extra work in typecheck/walk/ssa, might want to add a new node // OTAILCALL or something to this effect. var tail ir.Node - if tfn.Type().NumResults() == 0 && tfn.Type().NumParams() == 0 && tfn.Type().NumRecvs() == 0 { + if tfn.Type().NumResults() == 0 && tfn.Type().NumParams() == 0 && tfn.Type().NumRecvs() == 0 && !(base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) { + tail = ir.NewBranchStmt(base.Pos, ir.ORETJMP, f.Nname.Sym()) } else { call := ir.NewCallExpr(base.Pos, ir.OCALL, f.Nname, nil) diff --git a/src/cmd/link/internal/ld/main.go b/src/cmd/link/internal/ld/main.go index 1420030eec41e..133308e5f4dae 100644 --- a/src/cmd/link/internal/ld/main.go +++ b/src/cmd/link/internal/ld/main.go @@ -95,7 +95,7 @@ var ( cpuprofile = flag.String("cpuprofile", "", "write cpu profile to `file`") memprofile = flag.String("memprofile", "", "write memory profile to `file`") memprofilerate = flag.Int64("memprofilerate", 0, "set runtime.MemProfileRate to `rate`") - flagAbiWrap = false + flagAbiWrap = flag.Bool("abiwrap", objabi.Regabi_enabled != 0, "support ABI wrapper functions") benchmarkFlag = flag.String("benchmark", "", "set to 'mem' or 'cpu' to enable phase benchmarking") benchmarkFileFlag = flag.String("benchmarkprofile", "", "emit phase profiles to `base`_phase.{cpu,mem}prof") ) @@ -134,9 +134,6 @@ func Main(arch *sys.Arch, theArch Arch) { objabi.Flagfn1("X", "add string value `definition` of the form importpath.name=value", func(s string) { addstrdata1(ctxt, s) }) objabi.Flagcount("v", "print link trace", &ctxt.Debugvlog) objabi.Flagfn1("importcfg", "read import configuration from `file`", ctxt.readImportCfg) - if objabi.Regabi_enabled != 0 { - flag.BoolVar(&flagAbiWrap, "abiwrap", true, "support ABI wrapper functions") - } objabi.Flagparse(usage) diff --git a/src/cmd/link/internal/ld/symtab.go b/src/cmd/link/internal/ld/symtab.go index 3b709baf758bd..85a8ff42ad0d0 100644 --- a/src/cmd/link/internal/ld/symtab.go +++ b/src/cmd/link/internal/ld/symtab.go @@ -120,7 +120,7 @@ func putelfsym(ctxt *Link, x loader.Sym, typ elf.SymType, curbind elf.SymBind) { // sym or marker relocation to associate the wrapper with the // wrapped function. // - if flagAbiWrap { + if *flagAbiWrap { if !ldr.IsExternal(x) && ldr.SymType(x) == sym.STEXT { // First case if ldr.SymVersion(x) == sym.SymVerABIInternal { From 4be7af23f97fe8d1b4210acde6789cf621564ec6 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Thu, 14 Jan 2021 19:40:07 -0800 Subject: [PATCH 384/474] [dev.regabi] cmd/compile: fix ICE during ir.Dump fmt.go:dumpNodeHeader uses reflection to call all "func() bool"-typed methods on Nodes during printing, but the OnStack method that I added in CL 283233 isn't meant to be called on non-variables. dumpNodeHeader does already guard against panics, as happen in some other accessors, but not against Fatalf, as I was using in OnStack. So simply change OnStack to use panic too. Thanks to drchase@ for the report. Change-Id: I0cfac84a96292193401a32fc5e7fd3c48773e008 Reviewed-on: https://go-review.googlesource.com/c/go/+/284074 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky Reviewed-by: David Chase TryBot-Result: Go Bot --- src/cmd/compile/internal/ir/name.go | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index d19b0440e6808..64de42382e80c 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -286,18 +286,17 @@ func (n *Name) SetLibfuzzerExtraCounter(b bool) { n.flags.set(nameLibfuzzerExtra // OnStack reports whether variable n may reside on the stack. func (n *Name) OnStack() bool { - if n.Op() != ONAME || n.Class == PFUNC { - base.Fatalf("%v is not a variable", n) - } - switch n.Class { - case PPARAM, PPARAMOUT, PAUTO: - return n.Esc() != EscHeap - case PEXTERN, PAUTOHEAP: - return false - default: - base.FatalfAt(n.Pos(), "%v has unknown class %v", n, n.Class) - panic("unreachable") + if n.Op() == ONAME { + switch n.Class { + case PPARAM, PPARAMOUT, PAUTO: + return n.Esc() != EscHeap + case PEXTERN, PAUTOHEAP: + return false + } } + // Note: fmt.go:dumpNodeHeader calls all "func() bool"-typed + // methods, but it can only recover from panics, not Fatalf. + panic(fmt.Sprintf("%v: not a variable: %v", base.FmtPos(n.Pos()), n)) } // MarkReadonly indicates that n is an ONAME with readonly contents. From b7a698c73fc61bf60e2e61db0c98f16b0bfc8652 Mon Sep 17 00:00:00 2001 From: David Chase Date: Thu, 14 Jan 2021 20:10:35 -0500 Subject: [PATCH 385/474] [dev.regabi] test: disable test on windows because expected contains path separators. The feature being tested is insensitive to the OS anyway. Change-Id: Ieac9bfaafc6a54c00017afcc0b87bd8bbe80af7b Reviewed-on: https://go-review.googlesource.com/c/go/+/284032 Trust: David Chase Run-TryBot: David Chase TryBot-Result: Go Bot Reviewed-by: Than McIntosh --- test/abi/regabipragma.go | 1 + 1 file changed, 1 insertion(+) diff --git a/test/abi/regabipragma.go b/test/abi/regabipragma.go index 93cdb6abbb1e9..6a1b1938ea038 100644 --- a/test/abi/regabipragma.go +++ b/test/abi/regabipragma.go @@ -1,4 +1,5 @@ // runindir +// +build !windows // Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style From ab523fc510aadb82dc39dec89741fcbb90093ff0 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Fri, 15 Jan 2021 00:39:24 -0800 Subject: [PATCH 386/474] [dev.regabi] cmd/compile: don't promote Byval CaptureVars if Addrtaken We decide during escape analysis whether to pass closure variables by value or reference. One of the factors that's considered is whether a variable has had its address taken. However, this analysis is based only on the user-written source code, whereas order+walk may introduce rewrites that take the address of a variable (e.g., passing a uint16 key by reference to the size-generic map runtime builtins). Typically this would be harmless, albeit suboptimal. But in #43701 it manifested as needing a stack object for a function where we didn't realize we needed one up front when we generate symbols. Probably we should just generate symbols on demand, now that those routines are all concurrent-safe, but this is a first fix. Thanks to Alberto Donizetti for reporting the issue, and Cuong Manh Le for initial investigation. Fixes #43701. Change-Id: I16d87e9150723dcb16de7b43f2a8f3cd807a9437 Reviewed-on: https://go-review.googlesource.com/c/go/+/284075 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/ssagen/ssa.go | 11 +++++++++-- test/fixedbugs/issue43701.go | 18 ++++++++++++++++++ 2 files changed, 27 insertions(+), 2 deletions(-) create mode 100644 test/fixedbugs/issue43701.go diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index ab2e21bea064c..fe9a1f617bb00 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -490,8 +490,15 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func { ptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(typ), offset, clo) offset += typ.Size() - if n.Byval() && TypeOK(n.Type()) { - // If it is a small variable captured by value, downgrade it to PAUTO. + // If n is a small variable captured by value, promote + // it to PAUTO so it can be converted to SSA. + // + // Note: While we never capture a variable by value if + // the user took its address, we may have generated + // runtime calls that did (#43701). Since we don't + // convert Addrtaken variables to SSA anyway, no point + // in promoting them either. + if n.Byval() && !n.Addrtaken() && TypeOK(n.Type()) { n.Class = ir.PAUTO fn.Dcl = append(fn.Dcl, n) s.assign(n, s.load(n.Type(), ptr), false, 0) diff --git a/test/fixedbugs/issue43701.go b/test/fixedbugs/issue43701.go new file mode 100644 index 0000000000000..6e161800466bb --- /dev/null +++ b/test/fixedbugs/issue43701.go @@ -0,0 +1,18 @@ +// compile + +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +func f() { + var st struct { + s string + i int16 + } + _ = func() { + var m map[int16]int + m[st.i] = 0 + } +} From 14537e6e5410b403add59bb41d3954bdab0ade3e Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Fri, 15 Jan 2021 00:56:02 -0800 Subject: [PATCH 387/474] [dev.regabi] cmd/compile: move stkobj symbol generation to SSA The code for allocating linksyms and recording that we need runtime type descriptors is now concurrent-safe, so move it to where those symbols are actually needed to reduce complexity and risk of failing to generate all needed symbols in advance. For #43701. Change-Id: I759d2508213ac9a4e0b504b51a75fa10dfa37a8d Reviewed-on: https://go-review.googlesource.com/c/go/+/284076 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le Trust: Matthew Dempsky --- src/cmd/compile/internal/gc/compile.go | 16 ---------------- src/cmd/compile/internal/ssagen/ssa.go | 8 +++----- 2 files changed, 3 insertions(+), 21 deletions(-) diff --git a/src/cmd/compile/internal/gc/compile.go b/src/cmd/compile/internal/gc/compile.go index a8a0106320e94..6e347bf0f119f 100644 --- a/src/cmd/compile/internal/gc/compile.go +++ b/src/cmd/compile/internal/gc/compile.go @@ -13,7 +13,6 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/liveness" - "cmd/compile/internal/reflectdata" "cmd/compile/internal/ssagen" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" @@ -84,21 +83,6 @@ func prepareFunc(fn *ir.Func) { walk.Walk(fn) ir.CurFunc = nil // enforce no further uses of CurFunc typecheck.DeclContext = ir.PEXTERN - - // Make sure type syms are declared for all types that might - // be types of stack objects. We need to do this here - // because symbols must be allocated before the parallel - // phase of the compiler. - for _, n := range fn.Dcl { - if liveness.ShouldTrack(n) && n.Addrtaken() { - reflectdata.WriteType(n.Type()) - // Also make sure we allocate a linker symbol - // for the stack object data, for the same reason. - if fn.LSym.Func().StackObjects == nil { - fn.LSym.Func().StackObjects = base.Ctxt.Lookup(fn.LSym.Name + ".stkobj") - } - } - } } // compileFunctions compiles all functions in compilequeue. diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index fe9a1f617bb00..c48ac22d2a1fb 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -6494,7 +6494,8 @@ func emitStackObjects(e *ssafn, pp *objw.Progs) { // Populate the stack object data. // Format must match runtime/stack.go:stackObjectRecord. - x := e.curfn.LSym.Func().StackObjects + x := base.Ctxt.Lookup(e.curfn.LSym.Name + ".stkobj") + e.curfn.LSym.Func().StackObjects = x off := 0 off = objw.Uintptr(x, off, uint64(len(vars))) for _, v := range vars { @@ -6502,10 +6503,7 @@ func emitStackObjects(e *ssafn, pp *objw.Progs) { // in which case the offset is relative to argp. // Locals have a negative Xoffset, in which case the offset is relative to varp. off = objw.Uintptr(x, off, uint64(v.FrameOffset())) - if !types.TypeSym(v.Type()).Siggen() { - e.Fatalf(v.Pos(), "stack object's type symbol not generated for type %s", v.Type()) - } - off = objw.SymPtr(x, off, reflectdata.WriteType(v.Type()), 0) + off = objw.SymPtr(x, off, reflectdata.TypeLinksym(v.Type()), 0) } // Emit a funcdata pointing at the stack object data. From 03a875137ff8a496e3e7e06de711ce286679dcba Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Fri, 15 Jan 2021 00:58:03 -0800 Subject: [PATCH 388/474] [dev.regabi] cmd/compile: unexport reflectdata.WriteType WriteType isn't safe for direct concurrent use, and users should instead use TypeLinksym or another higher-level API provided by reflectdata. After the previous CL, there are no remaining uses of WriteType elsewhere in the compiler, so unexport it to keep it that way. For #43701. [git-generate] cd src/cmd/compile/internal/reflectdata rf ' mv WriteType writeType ' Change-Id: I294a78be570a47feb38a1ad4eaae7723653d5991 Reviewed-on: https://go-review.googlesource.com/c/go/+/284077 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- .../compile/internal/reflectdata/reflect.go | 64 +++++++++---------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go index 30857fff6da72..989bcf9ab9a63 100644 --- a/src/cmd/compile/internal/reflectdata/reflect.go +++ b/src/cmd/compile/internal/reflectdata/reflect.go @@ -562,7 +562,7 @@ func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int { } for _, a := range m { - WriteType(a.type_) + writeType(a.type_) } ot = dgopkgpathOff(lsym, ot, typePkg(t)) @@ -613,7 +613,7 @@ func dextratypeData(lsym *obj.LSym, ot int, t *types.Type) int { nsym := dname(a.name.Name, "", pkg, exported) ot = objw.SymPtrOff(lsym, ot, nsym) - ot = dmethodptrOff(lsym, ot, WriteType(a.mtype)) + ot = dmethodptrOff(lsym, ot, writeType(a.mtype)) ot = dmethodptrOff(lsym, ot, a.isym) ot = dmethodptrOff(lsym, ot, a.tsym) } @@ -690,7 +690,7 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int { if t.Sym() != nil || methods(tptr) != nil { sptrWeak = false } - sptr = WriteType(tptr) + sptr = writeType(tptr) } gcsym, useGCProg, ptrdata := dgcsym(t) @@ -933,7 +933,7 @@ func formalType(t *types.Type) *types.Type { return t } -func WriteType(t *types.Type) *obj.LSym { +func writeType(t *types.Type) *obj.LSym { t = formalType(t) if t.IsUntyped() { base.Fatalf("dtypesym %v", t) @@ -983,9 +983,9 @@ func WriteType(t *types.Type) *obj.LSym { case types.TARRAY: // ../../../../runtime/type.go:/arrayType - s1 := WriteType(t.Elem()) + s1 := writeType(t.Elem()) t2 := types.NewSlice(t.Elem()) - s2 := WriteType(t2) + s2 := writeType(t2) ot = dcommontype(lsym, t) ot = objw.SymPtr(lsym, ot, s1, 0) ot = objw.SymPtr(lsym, ot, s2, 0) @@ -994,14 +994,14 @@ func WriteType(t *types.Type) *obj.LSym { case types.TSLICE: // ../../../../runtime/type.go:/sliceType - s1 := WriteType(t.Elem()) + s1 := writeType(t.Elem()) ot = dcommontype(lsym, t) ot = objw.SymPtr(lsym, ot, s1, 0) ot = dextratype(lsym, ot, t, 0) case types.TCHAN: // ../../../../runtime/type.go:/chanType - s1 := WriteType(t.Elem()) + s1 := writeType(t.Elem()) ot = dcommontype(lsym, t) ot = objw.SymPtr(lsym, ot, s1, 0) ot = objw.Uintptr(lsym, ot, uint64(t.ChanDir())) @@ -1009,15 +1009,15 @@ func WriteType(t *types.Type) *obj.LSym { case types.TFUNC: for _, t1 := range t.Recvs().Fields().Slice() { - WriteType(t1.Type) + writeType(t1.Type) } isddd := false for _, t1 := range t.Params().Fields().Slice() { isddd = t1.IsDDD() - WriteType(t1.Type) + writeType(t1.Type) } for _, t1 := range t.Results().Fields().Slice() { - WriteType(t1.Type) + writeType(t1.Type) } ot = dcommontype(lsym, t) @@ -1037,20 +1037,20 @@ func WriteType(t *types.Type) *obj.LSym { // Array of rtype pointers follows funcType. for _, t1 := range t.Recvs().Fields().Slice() { - ot = objw.SymPtr(lsym, ot, WriteType(t1.Type), 0) + ot = objw.SymPtr(lsym, ot, writeType(t1.Type), 0) } for _, t1 := range t.Params().Fields().Slice() { - ot = objw.SymPtr(lsym, ot, WriteType(t1.Type), 0) + ot = objw.SymPtr(lsym, ot, writeType(t1.Type), 0) } for _, t1 := range t.Results().Fields().Slice() { - ot = objw.SymPtr(lsym, ot, WriteType(t1.Type), 0) + ot = objw.SymPtr(lsym, ot, writeType(t1.Type), 0) } case types.TINTER: m := imethods(t) n := len(m) for _, a := range m { - WriteType(a.type_) + writeType(a.type_) } // ../../../../runtime/type.go:/interfaceType @@ -1078,14 +1078,14 @@ func WriteType(t *types.Type) *obj.LSym { nsym := dname(a.name.Name, "", pkg, exported) ot = objw.SymPtrOff(lsym, ot, nsym) - ot = objw.SymPtrOff(lsym, ot, WriteType(a.type_)) + ot = objw.SymPtrOff(lsym, ot, writeType(a.type_)) } // ../../../../runtime/type.go:/mapType case types.TMAP: - s1 := WriteType(t.Key()) - s2 := WriteType(t.Elem()) - s3 := WriteType(MapBucketType(t)) + s1 := writeType(t.Key()) + s2 := writeType(t.Elem()) + s3 := writeType(MapBucketType(t)) hasher := genhash(t.Key()) ot = dcommontype(lsym, t) @@ -1132,7 +1132,7 @@ func WriteType(t *types.Type) *obj.LSym { } // ../../../../runtime/type.go:/ptrType - s1 := WriteType(t.Elem()) + s1 := writeType(t.Elem()) ot = dcommontype(lsym, t) ot = objw.SymPtr(lsym, ot, s1, 0) @@ -1143,7 +1143,7 @@ func WriteType(t *types.Type) *obj.LSym { case types.TSTRUCT: fields := t.Fields().Slice() for _, t1 := range fields { - WriteType(t1.Type) + writeType(t1.Type) } // All non-exported struct field names within a struct @@ -1171,7 +1171,7 @@ func WriteType(t *types.Type) *obj.LSym { for _, f := range fields { // ../../../../runtime/type.go:/structField ot = dnameField(lsym, ot, spkg, f) - ot = objw.SymPtr(lsym, ot, WriteType(f.Type), 0) + ot = objw.SymPtr(lsym, ot, writeType(f.Type), 0) offsetAnon := uint64(f.Offset) << 1 if offsetAnon>>1 != uint64(f.Offset) { base.Fatalf("%v: bad field offset for %s", t, f.Sym.Name) @@ -1326,9 +1326,9 @@ func WriteRuntimeTypes() { sort.Sort(typesByString(signats)) for _, ts := range signats { t := ts.t - WriteType(t) + writeType(t) if t.Sym() != nil { - WriteType(types.NewPtr(t)) + writeType(types.NewPtr(t)) } } } @@ -1345,8 +1345,8 @@ func WriteTabs() { // _ [4]byte // fun [1]uintptr // variable sized // } - o := objw.SymPtr(i.lsym, 0, WriteType(i.itype), 0) - o = objw.SymPtr(i.lsym, o, WriteType(i.t), 0) + o := objw.SymPtr(i.lsym, 0, writeType(i.itype), 0) + o = objw.SymPtr(i.lsym, o, writeType(i.t), 0) o = objw.Uint32(i.lsym, o, types.TypeHash(i.t)) // copy of type hash o += 4 // skip unused field for _, fn := range genfun(i.t, i.itype) { @@ -1373,7 +1373,7 @@ func WriteTabs() { if p.Class != ir.PFUNC { t = types.NewPtr(t) } - tsym := WriteType(t) + tsym := writeType(t) ot = objw.SymPtrOff(s, ot, nsym) ot = objw.SymPtrOff(s, ot, tsym) // Plugin exports symbols as interfaces. Mark their types @@ -1407,16 +1407,16 @@ func WriteBasicTypes() { // but using runtime means fewer copies in object files. if base.Ctxt.Pkgpath == "runtime" { for i := types.Kind(1); i <= types.TBOOL; i++ { - WriteType(types.NewPtr(types.Types[i])) + writeType(types.NewPtr(types.Types[i])) } - WriteType(types.NewPtr(types.Types[types.TSTRING])) - WriteType(types.NewPtr(types.Types[types.TUNSAFEPTR])) + writeType(types.NewPtr(types.Types[types.TSTRING])) + writeType(types.NewPtr(types.Types[types.TUNSAFEPTR])) // emit type structs for error and func(error) string. // The latter is the type of an auto-generated wrapper. - WriteType(types.NewPtr(types.ErrorType)) + writeType(types.NewPtr(types.ErrorType)) - WriteType(types.NewSignature(types.NoPkg, nil, []*types.Field{ + writeType(types.NewSignature(types.NoPkg, nil, []*types.Field{ types.NewField(base.Pos, nil, types.ErrorType), }, []*types.Field{ types.NewField(base.Pos, nil, types.Types[types.TSTRING]), From c9b1445ac830891e2ebb7a4c3ce278309bdcc764 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Fri, 15 Jan 2021 22:21:33 +0700 Subject: [PATCH 389/474] [dev.regabi] cmd/compile: remove TypeAssertExpr {Src,Dst}Type fields CL 283233 added reflectType method to ssagen.state, which we can use to setup type address in the SSA backend in favor of the frontend. However, this will change the order of symbols generation, so not safe for toolstash. Change-Id: Ib6932ec42a9d28c3fd7a1c055596e75494c29843 Reviewed-on: https://go-review.googlesource.com/c/go/+/284115 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/expr.go | 8 +++----- src/cmd/compile/internal/ssagen/ssa.go | 6 +++--- src/cmd/compile/internal/walk/expr.go | 5 ----- 3 files changed, 6 insertions(+), 13 deletions(-) diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 39659c45c01b0..5b1be7fc0f3ec 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -615,11 +615,9 @@ type TypeAssertExpr struct { X Node Ntype Ntype - // Runtime type information provided by walkDotType. - // Caution: These aren't always populated; see walkDotType. - SrcType *AddrExpr `mknode:"-"` // *runtime._type for X's type - DstType *AddrExpr `mknode:"-"` // *runtime._type for Type - Itab *AddrExpr `mknode:"-"` // *runtime.itab for Type implementing X's type + // Runtime type information provided by walkDotType for + // assertions from non-empty interface to concrete type. + Itab *AddrExpr `mknode:"-"` // *runtime.itab for Type implementing X's type } func NewTypeAssertExpr(pos src.XPos, x Node, typ Ntype) *TypeAssertExpr { diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index c48ac22d2a1fb..48942e01d6ed2 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -6110,8 +6110,8 @@ func (s *state) floatToUint(cvttab *f2uCvtTab, n ir.Node, x *ssa.Value, ft, tt * // commaok indicates whether to panic or return a bool. // If commaok is false, resok will be nil. func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Value) { - iface := s.expr(n.X) // input interface - target := s.expr(n.DstType) // target type + iface := s.expr(n.X) // input interface + target := s.reflectType(n.Type()) // target type byteptr := s.f.Config.Types.BytePtr if n.Type().IsInterface() { @@ -6245,7 +6245,7 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val if !commaok { // on failure, panic by calling panicdottype s.startBlock(bFail) - taddr := s.expr(n.SrcType) + taddr := s.reflectType(n.X.Type()) if n.X.Type().IsEmptyInterface() { s.rtcall(ir.Syms.PanicdottypeE, false, nil, itab, target, taddr) } else { diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index 449f8ea3ec0fd..c9b7c0704e888 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -619,11 +619,6 @@ func walkDot(n *ir.SelectorExpr, init *ir.Nodes) ir.Node { func walkDotType(n *ir.TypeAssertExpr, init *ir.Nodes) ir.Node { n.X = walkExpr(n.X, init) // Set up interface type addresses for back end. - - n.DstType = reflectdata.TypePtr(n.Type()) - if n.Op() == ir.ODOTTYPE { - n.SrcType = reflectdata.TypePtr(n.X.Type()) - } if !n.Type().IsInterface() && !n.X.Type().IsEmptyInterface() { n.Itab = reflectdata.ITabAddr(n.Type(), n.X.Type()) } From ab3b67abfd9bff30fc001c966ab121bacff3de9b Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Fri, 15 Jan 2021 23:20:13 +0700 Subject: [PATCH 390/474] [dev.regabi] cmd/compile: remove ONEWOBJ After CL 283233, SSA can now handle new(typ) without the frontend to generate the type address, so we can remove ONEWOBJ in favor of ONEW only. This is also not save for toolstash, the same reason with CL 284115. Change-Id: Ie03ea36b3b6f95fc7ce080376c6f7afc402d51a3 Reviewed-on: https://go-review.googlesource.com/c/go/+/284117 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/expr.go | 2 +- src/cmd/compile/internal/ir/node.go | 1 - src/cmd/compile/internal/ir/op_string.go | 143 +++++++++++------------ src/cmd/compile/internal/ssagen/ssa.go | 2 +- src/cmd/compile/internal/walk/builtin.go | 20 ++-- src/cmd/compile/internal/walk/convert.go | 6 +- src/cmd/compile/internal/walk/expr.go | 2 +- src/cmd/compile/internal/walk/walk.go | 2 +- 8 files changed, 87 insertions(+), 91 deletions(-) diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 5b1be7fc0f3ec..dd91e347bd480 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -657,7 +657,7 @@ func (n *UnaryExpr) SetOp(op Op) { case OBITNOT, ONEG, ONOT, OPLUS, ORECV, OALIGNOF, OCAP, OCLOSE, OIMAG, OLEN, ONEW, OOFFSETOF, OPANIC, OREAL, OSIZEOF, - OCHECKNIL, OCFUNC, OIDATA, OITAB, ONEWOBJ, OSPTR, OVARDEF, OVARKILL, OVARLIVE: + OCHECKNIL, OCFUNC, OIDATA, OITAB, OSPTR, OVARDEF, OVARKILL, OVARLIVE: n.op = op } } diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index a1b09b38ccd54..de03800da24b3 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -216,7 +216,6 @@ const ( OAND // Left & Right OANDNOT // Left &^ Right ONEW // new(Left); corresponds to calls to new in source code - ONEWOBJ // runtime.newobject(n.Type); introduced by walk; Left is type descriptor ONOT // !Left OBITNOT // ^Left OPLUS // +Left diff --git a/src/cmd/compile/internal/ir/op_string.go b/src/cmd/compile/internal/ir/op_string.go index b54b4785a2391..9538599c38c46 100644 --- a/src/cmd/compile/internal/ir/op_string.go +++ b/src/cmd/compile/internal/ir/op_string.go @@ -91,81 +91,80 @@ func _() { _ = x[OAND-80] _ = x[OANDNOT-81] _ = x[ONEW-82] - _ = x[ONEWOBJ-83] - _ = x[ONOT-84] - _ = x[OBITNOT-85] - _ = x[OPLUS-86] - _ = x[ONEG-87] - _ = x[OOROR-88] - _ = x[OPANIC-89] - _ = x[OPRINT-90] - _ = x[OPRINTN-91] - _ = x[OPAREN-92] - _ = x[OSEND-93] - _ = x[OSLICE-94] - _ = x[OSLICEARR-95] - _ = x[OSLICESTR-96] - _ = x[OSLICE3-97] - _ = x[OSLICE3ARR-98] - _ = x[OSLICEHEADER-99] - _ = x[ORECOVER-100] - _ = x[ORECV-101] - _ = x[ORUNESTR-102] - _ = x[OSELRECV2-103] - _ = x[OIOTA-104] - _ = x[OREAL-105] - _ = x[OIMAG-106] - _ = x[OCOMPLEX-107] - _ = x[OALIGNOF-108] - _ = x[OOFFSETOF-109] - _ = x[OSIZEOF-110] - _ = x[OMETHEXPR-111] - _ = x[OSTMTEXPR-112] - _ = x[OBLOCK-113] - _ = x[OBREAK-114] - _ = x[OCASE-115] - _ = x[OCONTINUE-116] - _ = x[ODEFER-117] - _ = x[OFALL-118] - _ = x[OFOR-119] - _ = x[OFORUNTIL-120] - _ = x[OGOTO-121] - _ = x[OIF-122] - _ = x[OLABEL-123] - _ = x[OGO-124] - _ = x[ORANGE-125] - _ = x[ORETURN-126] - _ = x[OSELECT-127] - _ = x[OSWITCH-128] - _ = x[OTYPESW-129] - _ = x[OTCHAN-130] - _ = x[OTMAP-131] - _ = x[OTSTRUCT-132] - _ = x[OTINTER-133] - _ = x[OTFUNC-134] - _ = x[OTARRAY-135] - _ = x[OTSLICE-136] - _ = x[OINLCALL-137] - _ = x[OEFACE-138] - _ = x[OITAB-139] - _ = x[OIDATA-140] - _ = x[OSPTR-141] - _ = x[OCFUNC-142] - _ = x[OCHECKNIL-143] - _ = x[OVARDEF-144] - _ = x[OVARKILL-145] - _ = x[OVARLIVE-146] - _ = x[ORESULT-147] - _ = x[OINLMARK-148] - _ = x[ONAMEOFFSET-149] - _ = x[ORETJMP-150] - _ = x[OGETG-151] - _ = x[OEND-152] + _ = x[ONOT-83] + _ = x[OBITNOT-84] + _ = x[OPLUS-85] + _ = x[ONEG-86] + _ = x[OOROR-87] + _ = x[OPANIC-88] + _ = x[OPRINT-89] + _ = x[OPRINTN-90] + _ = x[OPAREN-91] + _ = x[OSEND-92] + _ = x[OSLICE-93] + _ = x[OSLICEARR-94] + _ = x[OSLICESTR-95] + _ = x[OSLICE3-96] + _ = x[OSLICE3ARR-97] + _ = x[OSLICEHEADER-98] + _ = x[ORECOVER-99] + _ = x[ORECV-100] + _ = x[ORUNESTR-101] + _ = x[OSELRECV2-102] + _ = x[OIOTA-103] + _ = x[OREAL-104] + _ = x[OIMAG-105] + _ = x[OCOMPLEX-106] + _ = x[OALIGNOF-107] + _ = x[OOFFSETOF-108] + _ = x[OSIZEOF-109] + _ = x[OMETHEXPR-110] + _ = x[OSTMTEXPR-111] + _ = x[OBLOCK-112] + _ = x[OBREAK-113] + _ = x[OCASE-114] + _ = x[OCONTINUE-115] + _ = x[ODEFER-116] + _ = x[OFALL-117] + _ = x[OFOR-118] + _ = x[OFORUNTIL-119] + _ = x[OGOTO-120] + _ = x[OIF-121] + _ = x[OLABEL-122] + _ = x[OGO-123] + _ = x[ORANGE-124] + _ = x[ORETURN-125] + _ = x[OSELECT-126] + _ = x[OSWITCH-127] + _ = x[OTYPESW-128] + _ = x[OTCHAN-129] + _ = x[OTMAP-130] + _ = x[OTSTRUCT-131] + _ = x[OTINTER-132] + _ = x[OTFUNC-133] + _ = x[OTARRAY-134] + _ = x[OTSLICE-135] + _ = x[OINLCALL-136] + _ = x[OEFACE-137] + _ = x[OITAB-138] + _ = x[OIDATA-139] + _ = x[OSPTR-140] + _ = x[OCFUNC-141] + _ = x[OCHECKNIL-142] + _ = x[OVARDEF-143] + _ = x[OVARKILL-144] + _ = x[OVARLIVE-145] + _ = x[ORESULT-146] + _ = x[OINLMARK-147] + _ = x[ONAMEOFFSET-148] + _ = x[ORETJMP-149] + _ = x[OGETG-150] + _ = x[OEND-151] } -const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRSTMTEXPRBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKNAMEOFFSETRETJMPGETGEND" +const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRSTMTEXPRBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKNAMEOFFSETRETJMPGETGEND" -var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 309, 315, 318, 324, 331, 339, 343, 350, 358, 360, 362, 364, 366, 368, 370, 375, 380, 388, 391, 400, 403, 407, 415, 422, 431, 444, 447, 450, 453, 456, 459, 462, 468, 471, 477, 480, 486, 490, 493, 497, 502, 507, 513, 518, 522, 527, 535, 543, 549, 558, 569, 576, 580, 587, 595, 599, 603, 607, 614, 621, 629, 635, 643, 651, 656, 661, 665, 673, 678, 682, 685, 693, 697, 699, 704, 706, 711, 717, 723, 729, 735, 740, 744, 751, 757, 762, 768, 774, 781, 786, 790, 795, 799, 804, 812, 818, 825, 832, 838, 845, 855, 861, 865, 868} +var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 309, 315, 318, 324, 331, 339, 343, 350, 358, 360, 362, 364, 366, 368, 370, 375, 380, 388, 391, 400, 403, 407, 415, 422, 431, 444, 447, 450, 453, 456, 459, 462, 468, 471, 474, 480, 484, 487, 491, 496, 501, 507, 512, 516, 521, 529, 537, 543, 552, 563, 570, 574, 581, 589, 593, 597, 601, 608, 615, 623, 629, 637, 645, 650, 655, 659, 667, 672, 676, 679, 687, 691, 693, 698, 700, 705, 711, 717, 723, 729, 734, 738, 745, 751, 756, 762, 768, 775, 780, 784, 789, 793, 798, 806, 812, 819, 826, 832, 839, 849, 855, 859, 862} func (i Op) String() string { if i >= Op(len(_Op_index)-1) { diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 48942e01d6ed2..097cfacc23d53 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -3034,7 +3034,7 @@ func (s *state) expr(n ir.Node) *ssa.Value { } return s.zeroVal(n.Type()) - case ir.ONEWOBJ: + case ir.ONEW: n := n.(*ir.UnaryExpr) return s.newObject(n.Type().Elem()) diff --git a/src/cmd/compile/internal/walk/builtin.go b/src/cmd/compile/internal/walk/builtin.go index a061181e2fb77..18ff70224830a 100644 --- a/src/cmd/compile/internal/walk/builtin.go +++ b/src/cmd/compile/internal/walk/builtin.go @@ -501,18 +501,21 @@ func walkMakeSliceCopy(n *ir.MakeExpr, init *ir.Nodes) ir.Node { // walkNew walks an ONEW node. func walkNew(n *ir.UnaryExpr, init *ir.Nodes) ir.Node { - if n.Type().Elem().NotInHeap() { + t := n.Type().Elem() + if t.NotInHeap() { base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type().Elem()) } if n.Esc() == ir.EscNone { - if n.Type().Elem().Width >= ir.MaxImplicitStackVarSize { + if t.Size() >= ir.MaxImplicitStackVarSize { base.Fatalf("large ONEW with EscNone: %v", n) } - r := typecheck.Temp(n.Type().Elem()) + r := typecheck.Temp(t) init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, nil))) // zero temp return typecheck.Expr(typecheck.NodAddr(r)) } - return callnew(n.Type().Elem()) + types.CalcSize(t) + n.MarkNonNil() + return n } // generate code for print @@ -678,15 +681,6 @@ func badtype(op ir.Op, tl, tr *types.Type) { base.Errorf("illegal types for operand: %v%s", op, s) } -func callnew(t *types.Type) ir.Node { - types.CalcSize(t) - n := ir.NewUnaryExpr(base.Pos, ir.ONEWOBJ, reflectdata.TypePtr(t)) - n.SetType(types.NewPtr(t)) - n.SetTypecheck(1) - n.MarkNonNil() - return n -} - func writebarrierfn(name string, l *types.Type, r *types.Type) ir.Node { fn := typecheck.LookupRuntime(name) fn = typecheck.SubstArgTypes(fn, l, r) diff --git a/src/cmd/compile/internal/walk/convert.go b/src/cmd/compile/internal/walk/convert.go index 85459fd92f7ee..848aee3938516 100644 --- a/src/cmd/compile/internal/walk/convert.go +++ b/src/cmd/compile/internal/walk/convert.go @@ -248,7 +248,11 @@ func walkStringToBytes(n *ir.ConvExpr, init *ir.Nodes) ir.Node { if n.Esc() == ir.EscNone && len(sc) <= int(ir.MaxImplicitStackVarSize) { a = typecheck.NodAddr(typecheck.Temp(t)) } else { - a = callnew(t) + types.CalcSize(t) + a = ir.NewUnaryExpr(base.Pos, ir.ONEW, nil) + a.SetType(types.NewPtr(t)) + a.SetTypecheck(1) + a.MarkNonNil() } p := typecheck.Temp(t.PtrTo()) // *[n]byte init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, p, a))) diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index c9b7c0704e888..253634a60f7fa 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -84,7 +84,7 @@ func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node { base.Fatalf("walkexpr: switch 1 unknown op %+v", n.Op()) panic("unreachable") - case ir.ONONAME, ir.OGETG, ir.ONEWOBJ: + case ir.ONONAME, ir.OGETG: return n case ir.OTYPE, ir.ONAME, ir.OLITERAL, ir.ONIL, ir.ONAMEOFFSET: diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go index 71f018fe3e3e6..4ba81b82fef27 100644 --- a/src/cmd/compile/internal/walk/walk.go +++ b/src/cmd/compile/internal/walk/walk.go @@ -358,7 +358,7 @@ func calcHasCall(n ir.Node) bool { case ir.OBITNOT, ir.ONOT, ir.OPLUS, ir.ORECV, ir.OALIGNOF, ir.OCAP, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.ONEW, ir.OOFFSETOF, ir.OPANIC, ir.OREAL, ir.OSIZEOF, - ir.OCHECKNIL, ir.OCFUNC, ir.OIDATA, ir.OITAB, ir.ONEWOBJ, ir.OSPTR, ir.OVARDEF, ir.OVARKILL, ir.OVARLIVE: + ir.OCHECKNIL, ir.OCFUNC, ir.OIDATA, ir.OITAB, ir.OSPTR, ir.OVARDEF, ir.OVARKILL, ir.OVARLIVE: n := n.(*ir.UnaryExpr) return n.X.HasCall() case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER: From a956a0e909e1d60c8d55339e5e591a9d1db885c4 Mon Sep 17 00:00:00 2001 From: Dan Scales Date: Fri, 15 Jan 2021 14:12:35 -0800 Subject: [PATCH 391/474] [dev.regabi] cmd/compile, runtime: fix up comments/error messages from recent renames Went in a semi-automated way through the clearest renames of functions, and updated comments and error messages where it made sense. Change-Id: Ied8e152b562b705da7f52f715991a77dab60da35 Reviewed-on: https://go-review.googlesource.com/c/go/+/284216 Trust: Dan Scales Run-TryBot: Dan Scales TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/asm/internal/asm/parse.go | 2 +- src/cmd/compile/internal/base/flag.go | 2 +- src/cmd/compile/internal/base/print.go | 2 +- src/cmd/compile/internal/bitvec/bv.go | 2 +- src/cmd/compile/internal/escape/escape.go | 4 +- src/cmd/compile/internal/gc/compile.go | 2 +- src/cmd/compile/internal/gc/main.go | 8 ++-- src/cmd/compile/internal/gc/obj.go | 2 +- src/cmd/compile/internal/inline/inl.go | 10 ++--- src/cmd/compile/internal/ir/const.go | 2 +- src/cmd/compile/internal/ir/func.go | 2 +- src/cmd/compile/internal/ir/stmt.go | 4 +- src/cmd/compile/internal/liveness/bvset.go | 2 +- src/cmd/compile/internal/liveness/plive.go | 2 +- src/cmd/compile/internal/noder/import.go | 2 +- src/cmd/compile/internal/noder/noder.go | 8 ++-- src/cmd/compile/internal/objw/prog.go | 2 +- src/cmd/compile/internal/pkginit/init.go | 4 +- src/cmd/compile/internal/reflectdata/alg.go | 2 +- .../compile/internal/reflectdata/reflect.go | 20 ++++----- src/cmd/compile/internal/ssagen/abi.go | 2 +- src/cmd/compile/internal/ssagen/nowb.go | 4 +- src/cmd/compile/internal/ssagen/pgen.go | 2 +- src/cmd/compile/internal/ssagen/ssa.go | 8 ++-- src/cmd/compile/internal/staticdata/data.go | 30 +++++++------- src/cmd/compile/internal/staticdata/embed.go | 2 +- src/cmd/compile/internal/staticinit/sched.go | 2 +- .../compile/internal/test/abiutilsaux_test.go | 2 +- .../test/testdata/reproducible/issue38068.go | 2 +- src/cmd/compile/internal/typebits/typebits.go | 12 +++--- src/cmd/compile/internal/typecheck/const.go | 2 +- src/cmd/compile/internal/typecheck/dcl.go | 12 +++--- src/cmd/compile/internal/typecheck/expr.go | 6 +-- src/cmd/compile/internal/typecheck/func.go | 20 ++++----- src/cmd/compile/internal/typecheck/iimport.go | 4 +- src/cmd/compile/internal/typecheck/stmt.go | 8 ++-- src/cmd/compile/internal/typecheck/subr.go | 10 ++--- src/cmd/compile/internal/typecheck/syms.go | 4 +- .../compile/internal/typecheck/typecheck.go | 8 ++-- src/cmd/compile/internal/types/alg.go | 4 +- src/cmd/compile/internal/types/fmt.go | 2 +- src/cmd/compile/internal/types/size.go | 41 +++++++++---------- src/cmd/compile/internal/types/type.go | 4 +- src/cmd/compile/internal/walk/builtin.go | 6 +-- src/cmd/compile/internal/walk/closure.go | 2 +- src/cmd/compile/internal/walk/compare.go | 4 +- src/cmd/compile/internal/walk/convert.go | 4 +- src/cmd/compile/internal/walk/expr.go | 14 +++---- src/cmd/compile/internal/walk/order.go | 6 +-- src/cmd/compile/internal/walk/range.go | 8 ++-- src/cmd/compile/internal/walk/select.go | 4 +- src/cmd/compile/internal/walk/switch.go | 4 +- src/cmd/compile/internal/walk/walk.go | 10 ++--- src/cmd/internal/goobj/mkbuiltin.go | 4 +- src/cmd/internal/obj/textflag.go | 2 +- src/embed/embed.go | 4 +- src/reflect/type.go | 2 +- src/runtime/runtime2.go | 2 +- src/runtime/type.go | 2 +- 59 files changed, 176 insertions(+), 177 deletions(-) diff --git a/src/cmd/asm/internal/asm/parse.go b/src/cmd/asm/internal/asm/parse.go index 154cf9c7a7854..f1d37bc2c8d72 100644 --- a/src/cmd/asm/internal/asm/parse.go +++ b/src/cmd/asm/internal/asm/parse.go @@ -305,7 +305,7 @@ func (p *Parser) pseudo(word string, operands [][]lex.Token) bool { // references and writes symabis information to w. // // The symabis format is documented at -// cmd/compile/internal/gc.readSymABIs. +// cmd/compile/internal/ssagen.ReadSymABIs. func (p *Parser) symDefRef(w io.Writer, word string, operands [][]lex.Token) { switch word { case "TEXT": diff --git a/src/cmd/compile/internal/base/flag.go b/src/cmd/compile/internal/base/flag.go index d35b8452f938a..c38bbe627210d 100644 --- a/src/cmd/compile/internal/base/flag.go +++ b/src/cmd/compile/internal/base/flag.go @@ -174,7 +174,7 @@ func ParseFlags() { if (*Flag.Shared || *Flag.Dynlink || *Flag.LinkShared) && !Ctxt.Arch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X) { log.Fatalf("%s/%s does not support -shared", objabi.GOOS, objabi.GOARCH) } - parseSpectre(Flag.Spectre) // left as string for recordFlags + parseSpectre(Flag.Spectre) // left as string for RecordFlags Ctxt.Flag_shared = Ctxt.Flag_dynlink || Ctxt.Flag_shared Ctxt.Flag_optimize = Flag.N == 0 diff --git a/src/cmd/compile/internal/base/print.go b/src/cmd/compile/internal/base/print.go index 9855dfdad0627..668c600d31774 100644 --- a/src/cmd/compile/internal/base/print.go +++ b/src/cmd/compile/internal/base/print.go @@ -121,7 +121,7 @@ func ErrorfAt(pos src.XPos, format string, args ...interface{}) { lasterror.syntax = pos } else { // only one of multiple equal non-syntax errors per line - // (flusherrors shows only one of them, so we filter them + // (FlushErrors shows only one of them, so we filter them // here as best as we can (they may not appear in order) // so that we don't count them here and exit early, and // then have nothing to show for.) diff --git a/src/cmd/compile/internal/bitvec/bv.go b/src/cmd/compile/internal/bitvec/bv.go index 1e084576d1301..bcac1fe351fac 100644 --- a/src/cmd/compile/internal/bitvec/bv.go +++ b/src/cmd/compile/internal/bitvec/bv.go @@ -37,7 +37,7 @@ func NewBulk(nbit int32, count int32) Bulk { nword := (nbit + wordBits - 1) / wordBits size := int64(nword) * int64(count) if int64(int32(size*4)) != size*4 { - base.Fatalf("bvbulkalloc too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size) + base.Fatalf("NewBulk too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size) } return Bulk{ words: make([]uint32, size), diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go index 79e5a98c91539..96c2e02146dcb 100644 --- a/src/cmd/compile/internal/escape/escape.go +++ b/src/cmd/compile/internal/escape/escape.go @@ -856,7 +856,7 @@ func (e *escape) discards(l ir.Nodes) { } } -// addr evaluates an addressable expression n and returns an EscHole +// addr evaluates an addressable expression n and returns a hole // that represents storing into the represented location. func (e *escape) addr(n ir.Node) hole { if n == nil || ir.IsBlank(n) { @@ -1785,7 +1785,7 @@ func (l leaks) Encode() string { return s } -// parseLeaks parses a binary string representing an EscLeaks. +// parseLeaks parses a binary string representing a leaks func parseLeaks(s string) leaks { var l leaks if !strings.HasPrefix(s, "esc:") { diff --git a/src/cmd/compile/internal/gc/compile.go b/src/cmd/compile/internal/gc/compile.go index 6e347bf0f119f..ba67c58c45587 100644 --- a/src/cmd/compile/internal/gc/compile.go +++ b/src/cmd/compile/internal/gc/compile.go @@ -72,7 +72,7 @@ func enqueueFunc(fn *ir.Func) { func prepareFunc(fn *ir.Func) { // Set up the function's LSym early to avoid data races with the assemblers. // Do this before walk, as walk needs the LSym to set attributes/relocations - // (e.g. in markTypeUsedInInterface). + // (e.g. in MarkTypeUsedInInterface). ssagen.InitLSym(fn, true) // Calculate parameter offsets. diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 9ecdd510b18d9..e9ac24352779b 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -121,7 +121,7 @@ func Main(archInit func(*ssagen.ArchInfo)) { log.Fatalf("compiler not built with support for -t") } - // Enable inlining (after recordFlags, to avoid recording the rewritten -l). For now: + // Enable inlining (after RecordFlags, to avoid recording the rewritten -l). For now: // default: inlining on. (Flag.LowerL == 1) // -l: inlining off (Flag.LowerL == 0) // -l=2, -l=3: inlining on again, with extra debugging (Flag.LowerL > 1) @@ -193,7 +193,7 @@ func Main(archInit func(*ssagen.ArchInfo)) { typecheck.Target = new(ir.Package) typecheck.NeedITab = func(t, iface *types.Type) { reflectdata.ITabAddr(t, iface) } - typecheck.NeedRuntimeType = reflectdata.NeedRuntimeType // TODO(rsc): typenamesym for lock? + typecheck.NeedRuntimeType = reflectdata.NeedRuntimeType // TODO(rsc): TypeSym for lock? base.AutogeneratedPos = makePos(src.NewFileBase("", ""), 1, 0) @@ -261,7 +261,7 @@ func Main(archInit func(*ssagen.ArchInfo)) { escape.Funcs(typecheck.Target.Decls) // Collect information for go:nowritebarrierrec - // checking. This must happen before transformclosure. + // checking. This must happen before transforming closures during Walk // We'll do the final check after write barriers are // inserted. if base.Flag.CompilingRuntime { @@ -269,7 +269,7 @@ func Main(archInit func(*ssagen.ArchInfo)) { } // Prepare for SSA compilation. - // This must be before peekitabs, because peekitabs + // This must be before CompileITabs, because CompileITabs // can trigger function compilation. typecheck.InitRuntime() ssagen.InitConfig() diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 3e55b7688e8c9..847d84966646e 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -121,7 +121,7 @@ func dumpdata() { reflectdata.WriteBasicTypes() dumpembeds() - // Calls to dumpsignats can generate functions, + // Calls to WriteRuntimeTypes can generate functions, // like method wrappers and hash and equality routines. // Compile any generated functions, process any new resulting types, repeat. // This can't loop forever, because there is no way to generate an infinite diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index 1811feebe9801..4bb849cdaee81 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -4,7 +4,7 @@ // // The inlining facility makes 2 passes: first caninl determines which // functions are suitable for inlining, and for those that are it -// saves a copy of the body. Then inlcalls walks each function body to +// saves a copy of the body. Then InlineCalls walks each function body to // expand calls to inlinable functions. // // The Debug.l flag controls the aggressiveness. Note that main() swaps level 0 and 1, @@ -79,7 +79,7 @@ func InlinePackage() { // fn and ->nbody will already have been typechecked. func CanInline(fn *ir.Func) { if fn.Nname == nil { - base.Fatalf("caninl no nname %+v", fn) + base.Fatalf("CanInline no nname %+v", fn) } var reason string // reason, if any, that the function was not inlined @@ -144,7 +144,7 @@ func CanInline(fn *ir.Func) { } if fn.Typecheck() == 0 { - base.Fatalf("caninl on non-typechecked function %v", fn) + base.Fatalf("CanInline on non-typechecked function %v", fn) } n := fn.Nname @@ -200,11 +200,11 @@ func Inline_Flood(n *ir.Name, exportsym func(*ir.Name)) { return } if n.Op() != ir.ONAME || n.Class != ir.PFUNC { - base.Fatalf("inlFlood: unexpected %v, %v, %v", n, n.Op(), n.Class) + base.Fatalf("Inline_Flood: unexpected %v, %v, %v", n, n.Op(), n.Class) } fn := n.Func if fn == nil { - base.Fatalf("inlFlood: missing Func on %v", n) + base.Fatalf("Inline_Flood: missing Func on %v", n) } if fn.Inl == nil { return diff --git a/src/cmd/compile/internal/ir/const.go b/src/cmd/compile/internal/ir/const.go index bfa013623255c..eaa4d5b6b15ca 100644 --- a/src/cmd/compile/internal/ir/const.go +++ b/src/cmd/compile/internal/ir/const.go @@ -77,7 +77,7 @@ func ConstOverflow(v constant.Value, t *types.Type) bool { ft := types.FloatForComplex(t) return ConstOverflow(constant.Real(v), ft) || ConstOverflow(constant.Imag(v), ft) } - base.Fatalf("doesoverflow: %v, %v", v, t) + base.Fatalf("ConstOverflow: %v, %v", v, t) panic("unreachable") } diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index 30cddd298ef52..4afdadf57b8bc 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -63,7 +63,7 @@ type Func struct { Exit Nodes // ONAME nodes for all params/locals for this func/closure, does NOT - // include closurevars until transformclosure runs. + // include closurevars until transforming closures during walk. // Names must be listed PPARAMs, PPARAMOUTs, then PAUTOs, // with PPARAMs and PPARAMOUTs in order corresponding to the function signature. // However, as anonymous or blank PPARAMs are not actually declared, diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index b13c6b7795ec2..4e4c0df993b59 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -343,7 +343,7 @@ type SelectStmt struct { HasBreak bool // TODO(rsc): Instead of recording here, replace with a block? - Compiled Nodes // compiled form, after walkswitch + Compiled Nodes // compiled form, after walkSwitch } func NewSelectStmt(pos src.XPos, cases []*CommClause) *SelectStmt { @@ -376,7 +376,7 @@ type SwitchStmt struct { HasBreak bool // TODO(rsc): Instead of recording here, replace with a block? - Compiled Nodes // compiled form, after walkswitch + Compiled Nodes // compiled form, after walkSwitch } func NewSwitchStmt(pos src.XPos, tag Node, cases []*CaseClause) *SwitchStmt { diff --git a/src/cmd/compile/internal/liveness/bvset.go b/src/cmd/compile/internal/liveness/bvset.go index 21bc1fee4d62b..3431f54ede84e 100644 --- a/src/cmd/compile/internal/liveness/bvset.go +++ b/src/cmd/compile/internal/liveness/bvset.go @@ -47,7 +47,7 @@ func (m *bvecSet) grow() { m.index = newIndex } -// add adds bv to the set and returns its index in m.extractUniqe. +// add adds bv to the set and returns its index in m.extractUnique. // The caller must not modify bv after this. func (m *bvecSet) add(bv bitvec.BitVec) int { if len(m.uniq)*4 >= len(m.index) { diff --git a/src/cmd/compile/internal/liveness/plive.go b/src/cmd/compile/internal/liveness/plive.go index abc9583d5ac14..c70db6ed18468 100644 --- a/src/cmd/compile/internal/liveness/plive.go +++ b/src/cmd/compile/internal/liveness/plive.go @@ -1060,7 +1060,7 @@ func (lv *liveness) printDebug() { func (lv *liveness) emit() (argsSym, liveSym *obj.LSym) { // Size args bitmaps to be just large enough to hold the largest pointer. // First, find the largest Xoffset node we care about. - // (Nodes without pointers aren't in lv.vars; see livenessShouldTrack.) + // (Nodes without pointers aren't in lv.vars; see ShouldTrack.) var maxArgNode *ir.Name for _, n := range lv.vars { switch n.Class { diff --git a/src/cmd/compile/internal/noder/import.go b/src/cmd/compile/internal/noder/import.go index 08f19a4028466..ca041a156c145 100644 --- a/src/cmd/compile/internal/noder/import.go +++ b/src/cmd/compile/internal/noder/import.go @@ -418,7 +418,7 @@ func clearImports() { if types.IsDotAlias(s) { // throw away top-level name left over // from previous import . "x" - // We'll report errors after type checking in checkDotImports. + // We'll report errors after type checking in CheckDotImports. s.Def = nil continue } diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go index edd30a1fc1200..99c0e4addeb85 100644 --- a/src/cmd/compile/internal/noder/noder.go +++ b/src/cmd/compile/internal/noder/noder.go @@ -86,7 +86,7 @@ func ParseFiles(filenames []string) uint { if base.SyntaxErrors() != 0 { base.ErrorExit() } - // Always run testdclstack here, even when debug_dclstack is not set, as a sanity measure. + // Always run CheckDclstack here, even when debug_dclstack is not set, as a sanity measure. types.CheckDclstack() } @@ -638,7 +638,7 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) ir.Node { } } else { f.Shortname = name - name = ir.BlankNode.Sym() // filled in by typecheckfunc + name = ir.BlankNode.Sym() // filled in by tcFunc } f.Nname = ir.NewNameAt(p.pos(fun.Name), name) @@ -1084,7 +1084,7 @@ func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []ir.Node { if s == nil { } else if s.Op() == ir.OBLOCK && len(s.(*ir.BlockStmt).List) > 0 { // Inline non-empty block. - // Empty blocks must be preserved for checkreturn. + // Empty blocks must be preserved for CheckReturn. nodes = append(nodes, s.(*ir.BlockStmt).List...) } else { nodes = append(nodes, s) @@ -1860,7 +1860,7 @@ func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node { fn := ir.NewFunc(p.pos(expr)) fn.SetIsHiddenClosure(ir.CurFunc != nil) - fn.Nname = ir.NewNameAt(p.pos(expr), ir.BlankNode.Sym()) // filled in by typecheckclosure + fn.Nname = ir.NewNameAt(p.pos(expr), ir.BlankNode.Sym()) // filled in by tcClosure fn.Nname.Func = fn fn.Nname.Ntype = xtype fn.Nname.Defn = fn diff --git a/src/cmd/compile/internal/objw/prog.go b/src/cmd/compile/internal/objw/prog.go index 8d24f94aa5660..b5ac4dda1eb97 100644 --- a/src/cmd/compile/internal/objw/prog.go +++ b/src/cmd/compile/internal/objw/prog.go @@ -205,7 +205,7 @@ func (pp *Progs) Append(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16, func (pp *Progs) SetText(fn *ir.Func) { if pp.Text != nil { - base.Fatalf("Progs.settext called twice") + base.Fatalf("Progs.SetText called twice") } ptxt := pp.Prog(obj.ATEXT) pp.Text = ptxt diff --git a/src/cmd/compile/internal/pkginit/init.go b/src/cmd/compile/internal/pkginit/init.go index 5bc66c7e1be7d..7cad2622146d0 100644 --- a/src/cmd/compile/internal/pkginit/init.go +++ b/src/cmd/compile/internal/pkginit/init.go @@ -60,10 +60,10 @@ func Task() *ir.Name { fns = append(fns, fn.Linksym()) } if typecheck.InitTodoFunc.Dcl != nil { - // We only generate temps using initTodo if there + // We only generate temps using InitTodoFunc if there // are package-scope initialization statements, so // something's weird if we get here. - base.Fatalf("initTodo still has declarations") + base.Fatalf("InitTodoFunc still has declarations") } typecheck.InitTodoFunc = nil diff --git a/src/cmd/compile/internal/reflectdata/alg.go b/src/cmd/compile/internal/reflectdata/alg.go index d576053753bb6..fcd824f164837 100644 --- a/src/cmd/compile/internal/reflectdata/alg.go +++ b/src/cmd/compile/internal/reflectdata/alg.go @@ -689,7 +689,7 @@ func EqString(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) { // eqtab must be evaluated before eqdata, and shortcircuiting is required. func EqInterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) { if !types.Identical(s.Type(), t.Type()) { - base.Fatalf("eqinterface %v %v", s.Type(), t.Type()) + base.Fatalf("EqInterface %v %v", s.Type(), t.Type()) } // func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool) // func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool) diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go index 989bcf9ab9a63..efe863cc3fa39 100644 --- a/src/cmd/compile/internal/reflectdata/reflect.go +++ b/src/cmd/compile/internal/reflectdata/reflect.go @@ -32,7 +32,7 @@ type itabEntry struct { // symbols of each method in // the itab, sorted by byte offset; - // filled in by peekitabs + // filled in by CompileITabs entries []*obj.LSym } @@ -401,7 +401,7 @@ func dimportpath(p *types.Pkg) { } // If we are compiling the runtime package, there are two runtime packages around - // -- localpkg and Runtimepkg. We don't want to produce import path symbols for + // -- localpkg and Pkgs.Runtime. We don't want to produce import path symbols for // both of them, so just produce one for localpkg. if base.Ctxt.Pkgpath == "runtime" && p == ir.Pkgs.Runtime { return @@ -811,7 +811,7 @@ func TypeSymPrefix(prefix string, t *types.Type) *types.Sym { func TypeSym(t *types.Type) *types.Sym { if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() { - base.Fatalf("typenamesym %v", t) + base.Fatalf("TypeSym %v", t) } if t.Kind() == types.TFUNC && t.Recv() != nil { base.Fatalf("misuse of method type: %v", t) @@ -853,7 +853,7 @@ func TypePtr(t *types.Type) *ir.AddrExpr { func ITabAddr(t, itype *types.Type) *ir.AddrExpr { if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() { - base.Fatalf("itabname(%v, %v)", t, itype) + base.Fatalf("ITabAddr(%v, %v)", t, itype) } s := ir.Pkgs.Itab.Lookup(t.ShortString() + "," + itype.ShortString()) if s.Def == nil { @@ -936,7 +936,7 @@ func formalType(t *types.Type) *types.Type { func writeType(t *types.Type) *obj.LSym { t = formalType(t) if t.IsUntyped() { - base.Fatalf("dtypesym %v", t) + base.Fatalf("writeType %v", t) } s := types.TypeSym(t) @@ -1275,7 +1275,7 @@ func genfun(t, it *types.Type) []*obj.LSym { } // ITabSym uses the information gathered in -// peekitabs to de-virtualize interface methods. +// CompileITabs to de-virtualize interface methods. // Since this is called by the SSA backend, it shouldn't // generate additional Nodes, Syms, etc. func ITabSym(it *obj.LSym, offset int64) *obj.LSym { @@ -1312,7 +1312,7 @@ func NeedRuntimeType(t *types.Type) { } func WriteRuntimeTypes() { - // Process signatset. Use a loop, as dtypesym adds + // Process signatset. Use a loop, as writeType adds // entries to signatset while it is being processed. signats := make([]typeAndStr, len(signatslice)) for len(signatslice) > 0 { @@ -1617,13 +1617,13 @@ func (p *gcProg) emit(t *types.Type, offset int64) { } switch t.Kind() { default: - base.Fatalf("GCProg.emit: unexpected type %v", t) + base.Fatalf("gcProg.emit: unexpected type %v", t) case types.TSTRING: p.w.Ptr(offset / int64(types.PtrSize)) case types.TINTER: - // Note: the first word isn't a pointer. See comment in plive.go:onebitwalktype1. + // Note: the first word isn't a pointer. See comment in typebits.Set p.w.Ptr(offset/int64(types.PtrSize) + 1) case types.TSLICE: @@ -1632,7 +1632,7 @@ func (p *gcProg) emit(t *types.Type, offset int64) { case types.TARRAY: if t.NumElem() == 0 { // should have been handled by haspointers check above - base.Fatalf("GCProg.emit: empty array") + base.Fatalf("gcProg.emit: empty array") } // Flatten array-of-array-of-array to just a big array by multiplying counts. diff --git a/src/cmd/compile/internal/ssagen/abi.go b/src/cmd/compile/internal/ssagen/abi.go index 7ff8e21a48cfb..274c543ca553a 100644 --- a/src/cmd/compile/internal/ssagen/abi.go +++ b/src/cmd/compile/internal/ssagen/abi.go @@ -154,7 +154,7 @@ func InitLSym(f *ir.Func, hasBody bool) { // makes calls to helpers to create ABI wrappers if needed. func selectLSym(f *ir.Func, hasBody bool) { if f.LSym != nil { - base.FatalfAt(f.Pos(), "Func.initLSym called twice on %v", f) + base.FatalfAt(f.Pos(), "InitLSym called twice on %v", f) } if nam := f.Nname; !ir.IsBlank(nam) { diff --git a/src/cmd/compile/internal/ssagen/nowb.go b/src/cmd/compile/internal/ssagen/nowb.go index 60cfb2f698885..a2434366a022c 100644 --- a/src/cmd/compile/internal/ssagen/nowb.go +++ b/src/cmd/compile/internal/ssagen/nowb.go @@ -45,7 +45,7 @@ type nowritebarrierrecCall struct { } // newNowritebarrierrecChecker creates a nowritebarrierrecChecker. It -// must be called before transformclosure and walk. +// must be called before walk func newNowritebarrierrecChecker() *nowritebarrierrecChecker { c := &nowritebarrierrecChecker{ extraCalls: make(map[*ir.Func][]nowritebarrierrecCall), @@ -54,7 +54,7 @@ func newNowritebarrierrecChecker() *nowritebarrierrecChecker { // Find all systemstack calls and record their targets. In // general, flow analysis can't see into systemstack, but it's // important to handle it for this check, so we model it - // directly. This has to happen before transformclosure since + // directly. This has to happen before transforming closures in walk since // it's a lot harder to work out the argument after. for _, n := range typecheck.Target.Decls { if n.Op() != ir.ODCLFUNC { diff --git a/src/cmd/compile/internal/ssagen/pgen.go b/src/cmd/compile/internal/ssagen/pgen.go index bbd319d73546b..182f8408cfb40 100644 --- a/src/cmd/compile/internal/ssagen/pgen.go +++ b/src/cmd/compile/internal/ssagen/pgen.go @@ -96,7 +96,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { if n, ok := v.Aux.(*ir.Name); ok { switch n.Class { case ir.PPARAM, ir.PPARAMOUT: - // Don't modify nodfp; it is a global. + // Don't modify RegFP; it is a global. if n != ir.RegFP { n.SetUsed(true) } diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 097cfacc23d53..7726ecac55775 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -1508,10 +1508,10 @@ func (s *state) stmt(n ir.Node) { // Currently doesn't really work because (*p)[:len(*p)] appears here as: // tmp = len(*p) // (*p)[:tmp] - //if j != nil && (j.Op == OLEN && samesafeexpr(j.Left, n.Left)) { + //if j != nil && (j.Op == OLEN && SameSafeExpr(j.Left, n.Left)) { // j = nil //} - //if k != nil && (k.Op == OCAP && samesafeexpr(k.Left, n.Left)) { + //if k != nil && (k.Op == OCAP && SameSafeExpr(k.Left, n.Left)) { // k = nil //} if i == nil { @@ -6462,7 +6462,7 @@ func (s *State) DebugFriendlySetPosFrom(v *ssa.Value) { // in the generated code. if p.IsStmt() != src.PosIsStmt { p = p.WithNotStmt() - // Calls use the pos attached to v, but copy the statement mark from SSAGenState + // Calls use the pos attached to v, but copy the statement mark from State } s.SetPos(p) } else { @@ -7260,7 +7260,7 @@ func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot if n.Type().IsEmptyInterface() { f = ".type" } - c := e.SplitSlot(&name, f, 0, u) // see comment in plive.go:onebitwalktype1. + c := e.SplitSlot(&name, f, 0, u) // see comment in typebits.Set d := e.SplitSlot(&name, ".data", u.Size(), t) return c, d } diff --git a/src/cmd/compile/internal/staticdata/data.go b/src/cmd/compile/internal/staticdata/data.go index 4b12590fde4e7..4dbc11c3c4af1 100644 --- a/src/cmd/compile/internal/staticdata/data.go +++ b/src/cmd/compile/internal/staticdata/data.go @@ -29,13 +29,13 @@ import ( // Neither n nor a is modified. func InitAddr(n *ir.Name, noff int64, a *ir.Name, aoff int64) { if n.Op() != ir.ONAME { - base.Fatalf("addrsym n op %v", n.Op()) + base.Fatalf("InitAddr n op %v", n.Op()) } if n.Sym() == nil { - base.Fatalf("addrsym nil n sym") + base.Fatalf("InitAddr nil n sym") } if a.Op() != ir.ONAME { - base.Fatalf("addrsym a op %v", a.Op()) + base.Fatalf("InitAddr a op %v", a.Op()) } s := n.Linksym() s.WriteAddr(base.Ctxt, noff, types.PtrSize, a.Linksym(), aoff) @@ -45,13 +45,13 @@ func InitAddr(n *ir.Name, noff int64, a *ir.Name, aoff int64) { // Neither n nor f is modified. func InitFunc(n *ir.Name, noff int64, f *ir.Name) { if n.Op() != ir.ONAME { - base.Fatalf("pfuncsym n op %v", n.Op()) + base.Fatalf("InitFunc n op %v", n.Op()) } if n.Sym() == nil { - base.Fatalf("pfuncsym nil n sym") + base.Fatalf("InitFunc nil n sym") } if f.Class != ir.PFUNC { - base.Fatalf("pfuncsym class not PFUNC %d", f.Class) + base.Fatalf("InitFunc class not PFUNC %d", f.Class) } s := n.Linksym() s.WriteAddr(base.Ctxt, noff, types.PtrSize, FuncLinksym(f), 0) @@ -62,7 +62,7 @@ func InitFunc(n *ir.Name, noff int64, f *ir.Name) { func InitSlice(n *ir.Name, noff int64, arr *ir.Name, lencap int64) { s := n.Linksym() if arr.Op() != ir.ONAME { - base.Fatalf("slicesym non-name arr %v", arr) + base.Fatalf("InitSlice non-name arr %v", arr) } s.WriteAddr(base.Ctxt, noff, types.PtrSize, arr.Linksym(), 0) s.WriteInt(base.Ctxt, noff+types.SliceLenOffset, types.PtrSize, lencap) @@ -71,7 +71,7 @@ func InitSlice(n *ir.Name, noff int64, arr *ir.Name, lencap int64) { func InitSliceBytes(nam *ir.Name, off int64, s string) { if nam.Op() != ir.ONAME { - base.Fatalf("slicebytes %v", nam) + base.Fatalf("InitSliceBytes %v", nam) } InitSlice(nam, off, slicedata(nam.Pos(), s), int64(len(s))) } @@ -243,14 +243,14 @@ func FuncSym(s *types.Sym) *types.Sym { // except for the types package, which is protected separately. // Reusing funcsymsmu to also cover this package lookup // avoids a general, broader, expensive package lookup mutex. - // Note makefuncsym also does package look-up of func sym names, + // Note NeedFuncSym also does package look-up of func sym names, // but that it is only called serially, from the front end. funcsymsmu.Lock() sf, existed := s.Pkg.LookupOK(ir.FuncSymName(s)) // Don't export s·f when compiling for dynamic linking. // When dynamically linking, the necessary function - // symbols will be created explicitly with makefuncsym. - // See the makefuncsym comment for details. + // symbols will be created explicitly with NeedFuncSym. + // See the NeedFuncSym comment for details. if !base.Ctxt.Flag_dynlink && !existed { funcsyms = append(funcsyms, s) } @@ -310,16 +310,16 @@ func WriteFuncSyms() { // Neither n nor c is modified. func InitConst(n *ir.Name, noff int64, c ir.Node, wid int) { if n.Op() != ir.ONAME { - base.Fatalf("litsym n op %v", n.Op()) + base.Fatalf("InitConst n op %v", n.Op()) } if n.Sym() == nil { - base.Fatalf("litsym nil n sym") + base.Fatalf("InitConst nil n sym") } if c.Op() == ir.ONIL { return } if c.Op() != ir.OLITERAL { - base.Fatalf("litsym c op %v", c.Op()) + base.Fatalf("InitConst c op %v", c.Op()) } s := n.Linksym() switch u := c.Val(); u.Kind() { @@ -358,6 +358,6 @@ func InitConst(n *ir.Name, noff int64, c ir.Node, wid int) { s.WriteInt(base.Ctxt, noff+int64(types.PtrSize), types.PtrSize, int64(len(i))) default: - base.Fatalf("litsym unhandled OLITERAL %v", c) + base.Fatalf("InitConst unhandled OLITERAL %v", c) } } diff --git a/src/cmd/compile/internal/staticdata/embed.go b/src/cmd/compile/internal/staticdata/embed.go index 2e551f0b2c6e7..2e15841fe2d77 100644 --- a/src/cmd/compile/internal/staticdata/embed.go +++ b/src/cmd/compile/internal/staticdata/embed.go @@ -82,7 +82,7 @@ func embedKindApprox(typ ir.Node) int { // These are not guaranteed to match only string and []byte - // maybe the local package has redefined one of those words. // But it's the best we can do now during the noder. - // The stricter check happens later, in initEmbed calling embedKind. + // The stricter check happens later, in WriteEmbed calling embedKind. if typ.Sym() != nil && typ.Sym().Name == "string" && typ.Sym().Pkg == types.LocalPkg { return embedString } diff --git a/src/cmd/compile/internal/staticinit/sched.go b/src/cmd/compile/internal/staticinit/sched.go index 64946ad2476d8..8c195742e6925 100644 --- a/src/cmd/compile/internal/staticinit/sched.go +++ b/src/cmd/compile/internal/staticinit/sched.go @@ -455,7 +455,7 @@ var statuniqgen int // name generator for static temps // StaticName returns a name backed by a (writable) static data symbol. // Use readonlystaticname for read-only node. func StaticName(t *types.Type) *ir.Name { - // Don't use lookupN; it interns the resulting string, but these are all unique. + // Don't use LookupNum; it interns the resulting string, but these are all unique. n := typecheck.NewName(typecheck.Lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen))) statuniqgen++ typecheck.Declare(n, ir.PEXTERN) diff --git a/src/cmd/compile/internal/test/abiutilsaux_test.go b/src/cmd/compile/internal/test/abiutilsaux_test.go index 7b84e73947ea2..10fb66874578a 100644 --- a/src/cmd/compile/internal/test/abiutilsaux_test.go +++ b/src/cmd/compile/internal/test/abiutilsaux_test.go @@ -127,7 +127,7 @@ func abitest(t *testing.T, ft *types.Type, exp expectedDump) { emptyResString := emptyRes.String() // Walk the results and make sure the offsets assigned match - // up with those assiged by dowidth. This checks to make sure that + // up with those assiged by CalcSize. This checks to make sure that // when we have no available registers the ABI assignment degenerates // back to the original ABI0. diff --git a/src/cmd/compile/internal/test/testdata/reproducible/issue38068.go b/src/cmd/compile/internal/test/testdata/reproducible/issue38068.go index db5ca7dcbe7f6..b87daed8e9882 100644 --- a/src/cmd/compile/internal/test/testdata/reproducible/issue38068.go +++ b/src/cmd/compile/internal/test/testdata/reproducible/issue38068.go @@ -53,7 +53,7 @@ func G(x *A, n int) { return } // Address-taken local of type A, which will insure that the - // compiler's dtypesym() routine will create a method wrapper. + // compiler's writeType() routine will create a method wrapper. var a, b A a.next = x a.prev = &b diff --git a/src/cmd/compile/internal/typebits/typebits.go b/src/cmd/compile/internal/typebits/typebits.go index 63a2bb3ffa4b4..1c1b077423dc9 100644 --- a/src/cmd/compile/internal/typebits/typebits.go +++ b/src/cmd/compile/internal/typebits/typebits.go @@ -15,7 +15,7 @@ import ( // on future calls with the same type t. func Set(t *types.Type, off int64, bv bitvec.BitVec) { if t.Align > 0 && off&int64(t.Align-1) != 0 { - base.Fatalf("onebitwalktype1: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off) + base.Fatalf("typebits.Set: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off) } if !t.HasPointers() { // Note: this case ensures that pointers to go:notinheap types @@ -26,14 +26,14 @@ func Set(t *types.Type, off int64, bv bitvec.BitVec) { switch t.Kind() { case types.TPTR, types.TUNSAFEPTR, types.TFUNC, types.TCHAN, types.TMAP: if off&int64(types.PtrSize-1) != 0 { - base.Fatalf("onebitwalktype1: invalid alignment, %v", t) + base.Fatalf("typebits.Set: invalid alignment, %v", t) } bv.Set(int32(off / int64(types.PtrSize))) // pointer case types.TSTRING: // struct { byte *str; intgo len; } if off&int64(types.PtrSize-1) != 0 { - base.Fatalf("onebitwalktype1: invalid alignment, %v", t) + base.Fatalf("typebits.Set: invalid alignment, %v", t) } bv.Set(int32(off / int64(types.PtrSize))) //pointer in first slot @@ -42,7 +42,7 @@ func Set(t *types.Type, off int64, bv bitvec.BitVec) { // or, when isnilinter(t)==true: // struct { Type *type; void *data; } if off&int64(types.PtrSize-1) != 0 { - base.Fatalf("onebitwalktype1: invalid alignment, %v", t) + base.Fatalf("typebits.Set: invalid alignment, %v", t) } // The first word of an interface is a pointer, but we don't // treat it as such. @@ -61,7 +61,7 @@ func Set(t *types.Type, off int64, bv bitvec.BitVec) { case types.TSLICE: // struct { byte *array; uintgo len; uintgo cap; } if off&int64(types.PtrSize-1) != 0 { - base.Fatalf("onebitwalktype1: invalid TARRAY alignment, %v", t) + base.Fatalf("typebits.Set: invalid TARRAY alignment, %v", t) } bv.Set(int32(off / int64(types.PtrSize))) // pointer in first slot (BitsPointer) @@ -82,6 +82,6 @@ func Set(t *types.Type, off int64, bv bitvec.BitVec) { } default: - base.Fatalf("onebitwalktype1: unexpected type, %v", t) + base.Fatalf("typebits.Set: unexpected type, %v", t) } } diff --git a/src/cmd/compile/internal/typecheck/const.go b/src/cmd/compile/internal/typecheck/const.go index d6bf1019748de..1a8e58383ad34 100644 --- a/src/cmd/compile/internal/typecheck/const.go +++ b/src/cmd/compile/internal/typecheck/const.go @@ -623,7 +623,7 @@ func OrigInt(n ir.Node, v int64) ir.Node { return OrigConst(n, constant.MakeInt64(v)) } -// defaultlit on both nodes simultaneously; +// DefaultLit on both nodes simultaneously; // if they're both ideal going in they better // get the same type going out. // force means must assign concrete (non-ideal) type. diff --git a/src/cmd/compile/internal/typecheck/dcl.go b/src/cmd/compile/internal/typecheck/dcl.go index c7d7506fd1da0..c324238bf1ef0 100644 --- a/src/cmd/compile/internal/typecheck/dcl.go +++ b/src/cmd/compile/internal/typecheck/dcl.go @@ -41,7 +41,7 @@ func Declare(n *ir.Name, ctxt ir.Class) { s := n.Sym() - // kludgy: typecheckok means we're past parsing. Eg genwrapper may declare out of package names later. + // kludgy: TypecheckAllowed means we're past parsing. Eg reflectdata.methodWrapper may declare out of package names later. if !inimport && !TypecheckAllowed && s.Pkg != types.LocalPkg { base.ErrorfAt(n.Pos(), "cannot declare name %v", s) } @@ -308,7 +308,7 @@ func fakeRecvField() *types.Field { return types.NewField(src.NoXPos, nil, types.FakeRecvType()) } -var funcStack []funcStackEnt // stack of previous values of Curfn/dclcontext +var funcStack []funcStackEnt // stack of previous values of ir.CurFunc/DeclContext type funcStackEnt struct { curfn *ir.Func @@ -398,14 +398,14 @@ func Temp(t *types.Type) *ir.Name { // make a new Node off the books func TempAt(pos src.XPos, curfn *ir.Func, t *types.Type) *ir.Name { if curfn == nil { - base.Fatalf("no curfn for tempAt") + base.Fatalf("no curfn for TempAt") } if curfn.Op() == ir.OCLOSURE { - ir.Dump("tempAt", curfn) - base.Fatalf("adding tempAt to wrong closure function") + ir.Dump("TempAt", curfn) + base.Fatalf("adding TempAt to wrong closure function") } if t == nil { - base.Fatalf("tempAt called with nil type") + base.Fatalf("TempAt called with nil type") } if t.Kind() == types.TFUNC && t.Recv() != nil { base.Fatalf("misuse of method type: %v", t) diff --git a/src/cmd/compile/internal/typecheck/expr.go b/src/cmd/compile/internal/typecheck/expr.go index 12bfae67a865e..339fb00aa4496 100644 --- a/src/cmd/compile/internal/typecheck/expr.go +++ b/src/cmd/compile/internal/typecheck/expr.go @@ -68,7 +68,7 @@ func tcShift(n, l, r ir.Node) (ir.Node, ir.Node, *types.Type) { return l, r, nil } - // no defaultlit for left + // no DefaultLit for left // the outer context gives the type t = l.Type() if (l.Type() == types.UntypedFloat || l.Type() == types.UntypedComplex) && r.Op() == ir.OLITERAL { @@ -201,7 +201,7 @@ func tcArith(n ir.Node, op ir.Op, l, r ir.Node) (ir.Node, ir.Node, *types.Type) // n.Left = tcCompLit(n.Left) func tcCompLit(n *ir.CompLitExpr) (res ir.Node) { if base.EnableTrace && base.Flag.LowerT { - defer tracePrint("typecheckcomplit", n)(&res) + defer tracePrint("tcCompLit", n)(&res) } lno := base.Pos @@ -838,7 +838,7 @@ func tcStar(n *ir.StarExpr, top int) ir.Node { } if l.Op() == ir.OTYPE { n.SetOTYPE(types.NewPtr(l.Type())) - // Ensure l.Type gets dowidth'd for the backend. Issue 20174. + // Ensure l.Type gets CalcSize'd for the backend. Issue 20174. types.CheckSize(l.Type()) return n } diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go index 03a10f594ab24..c832d9700f370 100644 --- a/src/cmd/compile/internal/typecheck/func.go +++ b/src/cmd/compile/internal/typecheck/func.go @@ -100,7 +100,7 @@ func PartialCallType(n *ir.SelectorExpr) *types.Type { return t } -// Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck +// Lazy typechecking of imported bodies. For local functions, CanInline will set ->typecheck // because they're a copy of an already checked body. func ImportedBody(fn *ir.Func) { lno := ir.SetPos(fn.Nname) @@ -122,14 +122,14 @@ func ImportedBody(fn *ir.Func) { ImportBody(fn) - // typecheckinl is only for imported functions; + // Stmts(fn.Inl.Body) below is only for imported functions; // their bodies may refer to unsafe as long as the package // was marked safe during import (which was checked then). - // the ->inl of a local function has been typechecked before caninl copied it. + // the ->inl of a local function has been typechecked before CanInline copied it. pkg := fnpkg(fn.Nname) if pkg == types.LocalPkg || pkg == nil { - return // typecheckinl on local function + return // ImportedBody on local function } if base.Flag.LowerM > 2 || base.Debug.Export != 0 { @@ -141,10 +141,10 @@ func ImportedBody(fn *ir.Func) { Stmts(fn.Inl.Body) ir.CurFunc = savefn - // During expandInline (which imports fn.Func.Inl.Body), - // declarations are added to fn.Func.Dcl by funcHdr(). Move them + // During ImportBody (which imports fn.Func.Inl.Body), + // declarations are added to fn.Func.Dcl by funcBody(). Move them // to fn.Func.Inl.Dcl for consistency with how local functions - // behave. (Append because typecheckinl may be called multiple + // behave. (Append because ImportedBody may be called multiple // times.) fn.Inl.Dcl = append(fn.Inl.Dcl, fn.Dcl...) fn.Dcl = nil @@ -296,7 +296,7 @@ func tcClosure(clo *ir.ClosureExpr, top int) { fn.SetClosureCalled(top&ctxCallee != 0) // Do not typecheck fn twice, otherwise, we will end up pushing - // fn to Target.Decls multiple times, causing initLSym called twice. + // fn to Target.Decls multiple times, causing InitLSym called twice. // See #30709 if fn.Typecheck() == 1 { clo.SetType(fn.Type()) @@ -343,10 +343,10 @@ func tcClosure(clo *ir.ClosureExpr, top int) { // type check function definition // To be called by typecheck, not directly. -// (Call typecheckFunc instead.) +// (Call typecheck.Func instead.) func tcFunc(n *ir.Func) { if base.EnableTrace && base.Flag.LowerT { - defer tracePrint("typecheckfunc", n)(nil) + defer tracePrint("tcFunc", n)(nil) } n.Nname = AssignExpr(n.Nname).(*ir.Name) diff --git a/src/cmd/compile/internal/typecheck/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go index 396d09263a428..c2610229ec5b2 100644 --- a/src/cmd/compile/internal/typecheck/iimport.go +++ b/src/cmd/compile/internal/typecheck/iimport.go @@ -37,7 +37,7 @@ var ( // and offset where that identifier's declaration can be read. DeclImporter = map[*types.Sym]iimporterAndOffset{} - // inlineImporter is like declImporter, but for inline bodies + // inlineImporter is like DeclImporter, but for inline bodies // for function and method symbols. inlineImporter = map[*types.Sym]iimporterAndOffset{} ) @@ -334,7 +334,7 @@ func (r *importReader) doDecl(sym *types.Sym) *ir.Name { recv := r.param() mtyp := r.signature(recv) - // methodSym already marked m.Sym as a function. + // MethodSym already marked m.Sym as a function. m := ir.NewNameAt(mpos, ir.MethodSym(recv.Type, msym)) m.Class = ir.PFUNC m.SetType(mtyp) diff --git a/src/cmd/compile/internal/typecheck/stmt.go b/src/cmd/compile/internal/typecheck/stmt.go index 8baa5dda78c07..14ed175be9c97 100644 --- a/src/cmd/compile/internal/typecheck/stmt.go +++ b/src/cmd/compile/internal/typecheck/stmt.go @@ -25,7 +25,7 @@ func typecheckrangeExpr(n *ir.RangeStmt) { } t := RangeExprType(n.X.Type()) - // delicate little dance. see typecheckas2 + // delicate little dance. see tcAssignList if n.Key != nil && !ir.DeclaredBy(n.Key, n) { n.Key = AssignExpr(n.Key) } @@ -90,7 +90,7 @@ func typecheckrangeExpr(n *ir.RangeStmt) { // fill in the var's type. func tcAssign(n *ir.AssignStmt) { if base.EnableTrace && base.Flag.LowerT { - defer tracePrint("typecheckas", n)(nil) + defer tracePrint("tcAssign", n)(nil) } if n.Y == nil { @@ -110,7 +110,7 @@ func tcAssign(n *ir.AssignStmt) { func tcAssignList(n *ir.AssignListStmt) { if base.EnableTrace && base.Flag.LowerT { - defer tracePrint("typecheckas2", n)(nil) + defer tracePrint("tcAssignList", n)(nil) } assign(n, n.Lhs, n.Rhs) @@ -119,7 +119,7 @@ func tcAssignList(n *ir.AssignListStmt) { func assign(stmt ir.Node, lhs, rhs []ir.Node) { // delicate little dance. // the definition of lhs may refer to this assignment - // as its definition, in which case it will call typecheckas. + // as its definition, in which case it will call tcAssign. // in that case, do not call typecheck back, or it will cycle. // if the variable has a type (ntype) then typechecking // will not look at defn, so it is okay (and desirable, diff --git a/src/cmd/compile/internal/typecheck/subr.go b/src/cmd/compile/internal/typecheck/subr.go index 569075d684ad3..b6a0870672f40 100644 --- a/src/cmd/compile/internal/typecheck/subr.go +++ b/src/cmd/compile/internal/typecheck/subr.go @@ -81,7 +81,7 @@ func markAddrOf(n ir.Node) ir.Node { // main typecheck has completed. // The argument to OADDR needs to be typechecked because &x[i] takes // the address of x if x is an array, but not if x is a slice. - // Note: outervalue doesn't work correctly until n is typechecked. + // Note: OuterValue doesn't work correctly until n is typechecked. n = typecheck(n, ctxExpr) if x := ir.OuterValue(n); x.Op() == ir.ONAME { x.Name().SetAddrtaken(true) @@ -368,10 +368,10 @@ func assignop(src, dst *types.Type) (ir.Op, string) { var missing, have *types.Field var ptr int if implements(src, dst, &missing, &have, &ptr) { - // Call itabname so that (src, dst) + // Call NeedITab/ITabAddr so that (src, dst) // gets added to itabs early, which allows // us to de-virtualize calls through this - // type/interface pair later. See peekitabs in reflect.go + // type/interface pair later. See CompileITabs in reflect.go if types.IsDirectIface(src) && !dst.IsEmptyInterface() { NeedITab(src, dst) } @@ -441,7 +441,7 @@ func assignop(src, dst *types.Type) (ir.Op, string) { } } - // 6. rule about untyped constants - already converted by defaultlit. + // 6. rule about untyped constants - already converted by DefaultLit. // 7. Any typed value can be assigned to the blank identifier. if dst.Kind() == types.TBLANK { @@ -835,7 +835,7 @@ func lookdot0(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) var slist []symlink // Code to help generate trampoline functions for methods on embedded -// types. These are approx the same as the corresponding adddot +// types. These are approx the same as the corresponding AddImplicitDots // routines except that they expect to be called with unique tasks and // they return the actual methods. diff --git a/src/cmd/compile/internal/typecheck/syms.go b/src/cmd/compile/internal/typecheck/syms.go index 28db40db91f1e..f6ff2ee5da2ee 100644 --- a/src/cmd/compile/internal/typecheck/syms.go +++ b/src/cmd/compile/internal/typecheck/syms.go @@ -15,7 +15,7 @@ import ( func LookupRuntime(name string) *ir.Name { s := ir.Pkgs.Runtime.Lookup(name) if s == nil || s.Def == nil { - base.Fatalf("syslook: can't find runtime.%s", name) + base.Fatalf("LookupRuntime: can't find runtime.%s", name) } return ir.AsNode(s.Def).(*ir.Name) } @@ -33,7 +33,7 @@ func SubstArgTypes(old *ir.Name, types_ ...*types.Type) *ir.Name { n.Class = old.Class n.SetType(types.SubstAny(old.Type(), &types_)) if len(types_) > 0 { - base.Fatalf("substArgTypes: too many argument types") + base.Fatalf("SubstArgTypes: too many argument types") } return n } diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index 814af59772b12..3530e76972571 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -456,7 +456,7 @@ func typecheck(n ir.Node, top int) (res ir.Node) { } // indexlit implements typechecking of untyped values as -// array/slice indexes. It is almost equivalent to defaultlit +// array/slice indexes. It is almost equivalent to DefaultLit // but also accepts untyped numeric values representable as // value of type int (see also checkmake for comparison). // The result of indexlit MUST be assigned back to n, e.g. @@ -938,7 +938,7 @@ func typecheckargs(n ir.InitNode) { // If we're outside of function context, then this call will // be executed during the generated init function. However, // init.go hasn't yet created it. Instead, associate the - // temporary variables with initTodo for now, and init.go + // temporary variables with InitTodoFunc for now, and init.go // will reassociate them later when it's appropriate. static := ir.CurFunc == nil if static { @@ -1890,7 +1890,7 @@ func checkmake(t *types.Type, arg string, np *ir.Node) bool { return false } - // Do range checks for constants before defaultlit + // Do range checks for constants before DefaultLit // to avoid redundant "constant NNN overflows int" errors. if n.Op() == ir.OLITERAL { v := toint(n.Val()) @@ -1904,7 +1904,7 @@ func checkmake(t *types.Type, arg string, np *ir.Node) bool { } } - // defaultlit is necessary for non-constants too: n might be 1.1<ninit when walking n, // because we might replace n with some other node // and would lose the init list. - base.Fatalf("walkexpr init == &n->ninit") + base.Fatalf("walkExpr init == &n->ninit") } if len(n.Init()) != 0 { @@ -81,7 +81,7 @@ func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node { switch n.Op() { default: ir.Dump("walk", n) - base.Fatalf("walkexpr: switch 1 unknown op %+v", n.Op()) + base.Fatalf("walkExpr: switch 1 unknown op %+v", n.Op()) panic("unreachable") case ir.ONONAME, ir.OGETG: @@ -91,7 +91,7 @@ func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node { // TODO(mdempsky): Just return n; see discussion on CL 38655. // Perhaps refactor to use Node.mayBeShared for these instead. // If these return early, make sure to still call - // stringsym for constant strings. + // StringSym for constant strings. return n case ir.OMETHEXPR: @@ -221,7 +221,7 @@ func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node { return walkIndexMap(n, init) case ir.ORECV: - base.Fatalf("walkexpr ORECV") // should see inside OAS only + base.Fatalf("walkExpr ORECV") // should see inside OAS only panic("unreachable") case ir.OSLICEHEADER: @@ -413,7 +413,7 @@ func safeExpr(n ir.Node, init *ir.Nodes) ir.Node { // make a copy; must not be used as an lvalue if ir.IsAddressable(n) { - base.Fatalf("missing lvalue case in safeexpr: %v", n) + base.Fatalf("missing lvalue case in safeExpr: %v", n) } return cheapExpr(n, init) } @@ -428,7 +428,7 @@ func walkAddString(n *ir.AddStringExpr, init *ir.Nodes) ir.Node { c := len(n.List) if c < 2 { - base.Fatalf("addstr count %d too small", c) + base.Fatalf("walkAddString count %d too small", c) } buf := typecheck.NodNil() @@ -534,7 +534,7 @@ func walkCall1(n *ir.CallExpr, init *ir.Nodes) { // Determine param type. t := params.Field(i).Type if base.Flag.Cfg.Instrumenting || fncall(arg, t) { - // make assignment of fncall to tempAt + // make assignment of fncall to Temp tmp := typecheck.Temp(t) a := convas(ir.NewAssignStmt(base.Pos, tmp, arg), init) tempAssigns = append(tempAssigns, a) diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go index 38a9bec6e362b..78063c4db2e9c 100644 --- a/src/cmd/compile/internal/walk/order.go +++ b/src/cmd/compile/internal/walk/order.go @@ -849,7 +849,7 @@ func (o *orderState) stmt(n ir.Node) { n.X = o.copyExpr(r) // n.Prealloc is the temp for the iterator. - // hiter contains pointers and needs to be zeroed. + // MapIterType contains pointers and needs to be zeroed. n.Prealloc = o.newTemp(reflectdata.MapIterType(xt), true) } n.Key = o.exprInPlace(n.Key) @@ -962,7 +962,7 @@ func (o *orderState) stmt(n ir.Node) { cas.Body.Prepend(o.cleanTempNoPop(t)...) // TODO(mdempsky): Is this actually necessary? - // walkselect appears to walk Ninit. + // walkSelect appears to walk Ninit. cas.Body.Prepend(ir.TakeInit(cas)...) } @@ -986,7 +986,7 @@ func (o *orderState) stmt(n ir.Node) { o.cleanTemp(t) // TODO(rsc): Clean temporaries more aggressively. - // Note that because walkswitch will rewrite some of the + // Note that because walkSwitch will rewrite some of the // switch into a binary search, this is not as easy as it looks. // (If we ran that code here we could invoke order.stmt on // the if-else chain instead.) diff --git a/src/cmd/compile/internal/walk/range.go b/src/cmd/compile/internal/walk/range.go index 9225c429f02f6..2b28e7442dbb3 100644 --- a/src/cmd/compile/internal/walk/range.go +++ b/src/cmd/compile/internal/walk/range.go @@ -71,7 +71,7 @@ func walkRange(nrange *ir.RangeStmt) ir.Node { } if v1 == nil && v2 != nil { - base.Fatalf("walkrange: v2 != nil while v1 == nil") + base.Fatalf("walkRange: v2 != nil while v1 == nil") } var ifGuard *ir.IfStmt @@ -80,7 +80,7 @@ func walkRange(nrange *ir.RangeStmt) ir.Node { var init []ir.Node switch t.Kind() { default: - base.Fatalf("walkrange") + base.Fatalf("walkRange") case types.TARRAY, types.TSLICE: if nn := arrayClear(nrange, v1, v2, a); nn != nil { @@ -168,7 +168,7 @@ func walkRange(nrange *ir.RangeStmt) ir.Node { hit := nrange.Prealloc th := hit.Type() - keysym := th.Field(0).Sym // depends on layout of iterator struct. See reflect.go:hiter + keysym := th.Field(0).Sym // depends on layout of iterator struct. See reflect.go:MapIterType elemsym := th.Field(1).Sym // ditto fn := typecheck.LookupRuntime("mapiterinit") @@ -388,7 +388,7 @@ func mapClear(m ir.Node) ir.Node { // // in which the evaluation of a is side-effect-free. // -// Parameters are as in walkrange: "for v1, v2 = range a". +// Parameters are as in walkRange: "for v1, v2 = range a". func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node { if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting { return nil diff --git a/src/cmd/compile/internal/walk/select.go b/src/cmd/compile/internal/walk/select.go index 776b020155605..56ba0fa758455 100644 --- a/src/cmd/compile/internal/walk/select.go +++ b/src/cmd/compile/internal/walk/select.go @@ -14,7 +14,7 @@ import ( func walkSelect(sel *ir.SelectStmt) { lno := ir.SetPos(sel) if len(sel.Compiled) != 0 { - base.Fatalf("double walkselect") + base.Fatalf("double walkSelect") } init := ir.TakeInit(sel) @@ -218,7 +218,7 @@ func walkSelectCases(cases []*ir.CommClause) []ir.Node { } } if nsends+nrecvs != ncas { - base.Fatalf("walkselectcases: miscount: %v + %v != %v", nsends, nrecvs, ncas) + base.Fatalf("walkSelectCases: miscount: %v + %v != %v", nsends, nrecvs, ncas) } // run the select diff --git a/src/cmd/compile/internal/walk/switch.go b/src/cmd/compile/internal/walk/switch.go index 0cc1830d3fc47..162de018f637e 100644 --- a/src/cmd/compile/internal/walk/switch.go +++ b/src/cmd/compile/internal/walk/switch.go @@ -49,8 +49,8 @@ func walkSwitchExpr(sw *ir.SwitchStmt) { // Given "switch string(byteslice)", // with all cases being side-effect free, // use a zero-cost alias of the byte slice. - // Do this before calling walkexpr on cond, - // because walkexpr will lower the string + // Do this before calling walkExpr on cond, + // because walkExpr will lower the string // conversion into a runtime call. // See issue 24937 for more discussion. if cond.Op() == ir.OBYTES2STR && allCaseExprsAreSideEffectFree(sw) { diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go index 4ba81b82fef27..f95440d60d535 100644 --- a/src/cmd/compile/internal/walk/walk.go +++ b/src/cmd/compile/internal/walk/walk.go @@ -208,7 +208,7 @@ func mapfast(t *types.Type) int { func walkAppendArgs(n *ir.CallExpr, init *ir.Nodes) { walkExprListSafe(n.Args, init) - // walkexprlistsafe will leave OINDEX (s[n]) alone if both s + // walkExprListSafe will leave OINDEX (s[n]) alone if both s // and n are name or literal, but those may index the slice we're // modifying here. Fix explicitly. ls := n.Args @@ -240,8 +240,8 @@ func appendWalkStmt(init *ir.Nodes, stmt ir.Node) { op := stmt.Op() n := typecheck.Stmt(stmt) if op == ir.OAS || op == ir.OAS2 { - // If the assignment has side effects, walkexpr will append them - // directly to init for us, while walkstmt will wrap it in an OBLOCK. + // If the assignment has side effects, walkExpr will append them + // directly to init for us, while walkStmt will wrap it in an OBLOCK. // We need to append them directly. // TODO(rsc): Clean this up. n = walkExpr(n, init) @@ -256,7 +256,7 @@ func appendWalkStmt(init *ir.Nodes, stmt ir.Node) { const maxOpenDefers = 8 // backingArrayPtrLen extracts the pointer and length from a slice or string. -// This constructs two nodes referring to n, so n must be a cheapexpr. +// This constructs two nodes referring to n, so n must be a cheapExpr. func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) { var init ir.Nodes c := cheapExpr(n, &init) @@ -423,7 +423,7 @@ func runtimeField(name string, offset int64, typ *types.Type) *types.Field { // ifaceData loads the data field from an interface. // The concrete type must be known to have type t. -// It follows the pointer if !isdirectiface(t). +// It follows the pointer if !IsDirectIface(t). func ifaceData(pos src.XPos, n ir.Node, t *types.Type) ir.Node { if t.IsInterface() { base.Fatalf("ifaceData interface: %v", t) diff --git a/src/cmd/internal/goobj/mkbuiltin.go b/src/cmd/internal/goobj/mkbuiltin.go index 07c340668138d..22608e7e6995e 100644 --- a/src/cmd/internal/goobj/mkbuiltin.go +++ b/src/cmd/internal/goobj/mkbuiltin.go @@ -118,8 +118,8 @@ func mkbuiltin(w io.Writer) { // addBasicTypes returns the symbol names for basic types that are // defined in the runtime and referenced in other packages. -// Needs to be kept in sync with reflect.go:dumpbasictypes() and -// reflect.go:dtypesym() in the compiler. +// Needs to be kept in sync with reflect.go:WriteBasicTypes() and +// reflect.go:writeType() in the compiler. func enumerateBasicTypes() []extra { names := [...]string{ "int8", "uint8", "int16", "uint16", diff --git a/src/cmd/internal/obj/textflag.go b/src/cmd/internal/obj/textflag.go index fcc4014aa26f4..2f55793285e2e 100644 --- a/src/cmd/internal/obj/textflag.go +++ b/src/cmd/internal/obj/textflag.go @@ -33,7 +33,7 @@ const ( // This function uses its incoming context register. NEEDCTXT = 64 - // When passed to ggloblsym, causes Local to be set to true on the LSym it creates. + // When passed to objw.Global, causes Local to be set to true on the LSym it creates. LOCAL = 128 // Allocate a word of thread local storage and store the offset from the diff --git a/src/embed/embed.go b/src/embed/embed.go index 29e0adf1a63d1..5f35cd13b6557 100644 --- a/src/embed/embed.go +++ b/src/embed/embed.go @@ -133,7 +133,7 @@ import ( // See the package documentation for more details about initializing an FS. type FS struct { // The compiler knows the layout of this struct. - // See cmd/compile/internal/gc's initEmbed. + // See cmd/compile/internal/staticdata's WriteEmbed. // // The files list is sorted by name but not by simple string comparison. // Instead, each file's name takes the form "dir/elem" or "dir/elem/". @@ -203,7 +203,7 @@ var ( // It implements fs.FileInfo and fs.DirEntry. type file struct { // The compiler knows the layout of this struct. - // See cmd/compile/internal/gc's initEmbed. + // See cmd/compile/internal/staticdata's WriteEmbed. name string data string hash [16]byte // truncated SHA256 hash diff --git a/src/reflect/type.go b/src/reflect/type.go index 1f1e70d485c84..13e3d71228fd6 100644 --- a/src/reflect/type.go +++ b/src/reflect/type.go @@ -1890,7 +1890,7 @@ func MapOf(key, elem Type) Type { // Make a map type. // Note: flag values must match those used in the TMAP case - // in ../cmd/compile/internal/gc/reflect.go:dtypesym. + // in ../cmd/compile/internal/gc/reflect.go:writeType. var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil) mt := **(**mapType)(unsafe.Pointer(&imap)) mt.str = resolveReflectName(newName(s, "", false)) diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index c9376827da14a..9c3ceabd181b0 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -853,7 +853,7 @@ type funcinl struct { // layout of Itab known to compilers // allocated in non-garbage-collected memory // Needs to be in sync with -// ../cmd/compile/internal/gc/reflect.go:/^func.dumptabs. +// ../cmd/compile/internal/gc/reflect.go:/^func.WriteTabs. type itab struct { inter *interfacetype _type *_type diff --git a/src/runtime/type.go b/src/runtime/type.go index 81455f3532db8..18fc4bbfad754 100644 --- a/src/runtime/type.go +++ b/src/runtime/type.go @@ -383,7 +383,7 @@ type maptype struct { } // Note: flag values must match those used in the TMAP case -// in ../cmd/compile/internal/gc/reflect.go:dtypesym. +// in ../cmd/compile/internal/gc/reflect.go:writeType. func (mt *maptype) indirectkey() bool { // store ptr to key instead of key itself return mt.flags&1 != 0 } From 6de9423445840351a4cc7b17d732f0b5e922ef1a Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 16 Jan 2021 03:27:17 -0800 Subject: [PATCH 392/474] [dev.regabi] cmd/compile: cleanup OAS2FUNC ordering Currently, to ensure OAS2FUNC results are assigned in the correct order, they're always assigned to temporary variables. However, these temporary variables are typed based on the destination type, which may require an interface conversion. This means walk may have to then introduce a second set of temporaries to ensure result parameters are all copied out of the results area, before it emits calls to runtime conversion functions. That's just silly. Instead, this CL changes order to allocate the result temporaries with the same type as the function returns in the first place, and then assign them one at a time to their destinations, with conversions as needed. While here, also fix an order-of-evaluation issue with has-ok assignments that I almost added to multi-value function call assignments, and add tests for each. Change-Id: I9f4e962425fe3c5e3305adbbfeae2c7f253ec365 Reviewed-on: https://go-review.googlesource.com/c/go/+/284220 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/walk/assign.go | 10 +-- src/cmd/compile/internal/walk/order.go | 83 +++++++++++-------------- test/reorder.go | 16 +++++ 3 files changed, 57 insertions(+), 52 deletions(-) diff --git a/src/cmd/compile/internal/walk/assign.go b/src/cmd/compile/internal/walk/assign.go index 4043d7574adc1..320a3464ccff1 100644 --- a/src/cmd/compile/internal/walk/assign.go +++ b/src/cmd/compile/internal/walk/assign.go @@ -268,7 +268,7 @@ func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node { base.Fatalf("ascompatet: assignment count mismatch: %d = %d", len(nl), nr.NumFields()) } - var nn, mm ir.Nodes + var nn ir.Nodes for i, l := range nl { if ir.IsBlank(l) { continue @@ -278,11 +278,7 @@ func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node { // Any assignment to an lvalue that might cause a function call must be // deferred until all the returned values have been read. if fncall(l, r.Type) { - tmp := ir.Node(typecheck.Temp(r.Type)) - tmp = typecheck.Expr(tmp) - a := convas(ir.NewAssignStmt(base.Pos, l, tmp), &mm) - mm.Append(a) - l = tmp + base.FatalfAt(l.Pos(), "assigning %v to %+v", r.Type, l) } res := ir.NewResultExpr(base.Pos, nil, types.BADWIDTH) @@ -299,7 +295,7 @@ func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node { nn.Append(a) } - return append(nn, mm...) + return nn } // check assign expression list to diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go index 78063c4db2e9c..d34c58009acbe 100644 --- a/src/cmd/compile/internal/walk/order.go +++ b/src/cmd/compile/internal/walk/order.go @@ -555,10 +555,6 @@ func (o *orderState) mapAssign(n ir.Node) { n.Y = o.safeMapRHS(n.Y) } o.out = append(o.out, n) - - case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2MAPR, ir.OAS2FUNC: - n := n.(*ir.AssignListStmt) - o.out = append(o.out, n) } } @@ -637,7 +633,7 @@ func (o *orderState) stmt(n ir.Node) { t := o.markTemp() o.exprList(n.Lhs) o.exprList(n.Rhs) - o.mapAssign(n) + o.out = append(o.out, n) o.cleanTemp(t) // Special: avoid copy of func call n.Right @@ -647,7 +643,7 @@ func (o *orderState) stmt(n ir.Node) { o.exprList(n.Lhs) o.init(n.Rhs[0]) o.call(n.Rhs[0]) - o.as2(n) + o.as2func(n) o.cleanTemp(t) // Special: use temporary variables to hold result, @@ -679,7 +675,7 @@ func (o *orderState) stmt(n ir.Node) { base.Fatalf("order.stmt: %v", r.Op()) } - o.okAs2(n) + o.as2ok(n) o.cleanTemp(t) // Special: does not save n onto out. @@ -1390,57 +1386,54 @@ func (o *orderState) expr1(n, lhs ir.Node) ir.Node { // No return - type-assertions above. Each case must return for itself. } -// as2 orders OAS2XXXX nodes. It creates temporaries to ensure left-to-right assignment. -// The caller should order the right-hand side of the assignment before calling order.as2. +// as2func orders OAS2FUNC nodes. It creates temporaries to ensure left-to-right assignment. +// The caller should order the right-hand side of the assignment before calling order.as2func. // It rewrites, -// a, b, a = ... +// a, b, a = ... // as // tmp1, tmp2, tmp3 = ... -// a, b, a = tmp1, tmp2, tmp3 +// a, b, a = tmp1, tmp2, tmp3 // This is necessary to ensure left to right assignment order. -func (o *orderState) as2(n *ir.AssignListStmt) { - tmplist := []ir.Node{} - left := []ir.Node{} - for ni, l := range n.Lhs { - if !ir.IsBlank(l) { - tmp := o.newTemp(l.Type(), l.Type().HasPointers()) - n.Lhs[ni] = tmp - tmplist = append(tmplist, tmp) - left = append(left, l) +func (o *orderState) as2func(n *ir.AssignListStmt) { + results := n.Rhs[0].Type() + as := ir.NewAssignListStmt(n.Pos(), ir.OAS2, nil, nil) + for i, nl := range n.Lhs { + if !ir.IsBlank(nl) { + typ := results.Field(i).Type + tmp := o.newTemp(typ, typ.HasPointers()) + n.Lhs[i] = tmp + as.Lhs = append(as.Lhs, nl) + as.Rhs = append(as.Rhs, tmp) } } o.out = append(o.out, n) - - as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) - as.Lhs = left - as.Rhs = tmplist o.stmt(typecheck.Stmt(as)) } -// okAs2 orders OAS2XXX with ok. -// Just like as2, this also adds temporaries to ensure left-to-right assignment. -func (o *orderState) okAs2(n *ir.AssignListStmt) { - var tmp1, tmp2 ir.Node - if !ir.IsBlank(n.Lhs[0]) { - typ := n.Rhs[0].Type() - tmp1 = o.newTemp(typ, typ.HasPointers()) +// as2ok orders OAS2XXX with ok. +// Just like as2func, this also adds temporaries to ensure left-to-right assignment. +func (o *orderState) as2ok(n *ir.AssignListStmt) { + as := ir.NewAssignListStmt(n.Pos(), ir.OAS2, nil, nil) + + do := func(i int, typ *types.Type) { + if nl := n.Lhs[i]; !ir.IsBlank(nl) { + var tmp ir.Node = o.newTemp(typ, typ.HasPointers()) + n.Lhs[i] = tmp + as.Lhs = append(as.Lhs, nl) + if i == 1 { + // The "ok" result is an untyped boolean according to the Go + // spec. We need to explicitly convert it to the LHS type in + // case the latter is a defined boolean type (#8475). + tmp = typecheck.Conv(tmp, nl.Type()) + } + as.Rhs = append(as.Rhs, tmp) + } } - if !ir.IsBlank(n.Lhs[1]) { - tmp2 = o.newTemp(types.Types[types.TBOOL], false) - } + do(0, n.Rhs[0].Type()) + do(1, types.Types[types.TBOOL]) o.out = append(o.out, n) - - if tmp1 != nil { - r := ir.NewAssignStmt(base.Pos, n.Lhs[0], tmp1) - o.mapAssign(typecheck.Stmt(r)) - n.Lhs[0] = tmp1 - } - if tmp2 != nil { - r := ir.NewAssignStmt(base.Pos, n.Lhs[1], typecheck.Conv(tmp2, n.Lhs[1].Type())) - o.mapAssign(typecheck.Stmt(r)) - n.Lhs[1] = tmp2 - } + o.stmt(typecheck.Stmt(as)) } diff --git a/test/reorder.go b/test/reorder.go index 3a87d025c2a95..57892f882fa5b 100644 --- a/test/reorder.go +++ b/test/reorder.go @@ -20,6 +20,8 @@ func main() { p7() p8() p9() + p10() + p11() } var gx []int @@ -149,3 +151,17 @@ func checkOAS2XXX(x bool, s string) { panic("failed") } } + +//go:noinline +func fp() (*int, int) { return nil, 42 } + +func p10() { + p := new(int) + p, *p = fp() +} + +func p11() { + var i interface{} + p := new(bool) + p, *p = i.(*bool) +} From 78e5aabcdb8aeae58a6437a3051fde3555ee0bf2 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 16 Jan 2021 16:59:19 -0800 Subject: [PATCH 393/474] [dev.regabi] cmd/compile: replace Node.HasCall with walk.mayCall After CL 284220, we now only need to detect expressions that contain function calls in the arguments list of further function calls. So we can simplify Node.HasCall/fncall/etc a lot. Instead of incrementally tracking whether an expression contains function calls all throughout walk, simply check once at the point of using an expression as a function call argument. Since any expression checked here will itself become a function call argument, it won't be checked again because we'll short circuit at the enclosing function call. Also, restructure the recursive walk code to use mayCall, and trim down the list of acceptable expressions. It should be okay to be stricter, since we'll now only see function call arguments and after they've already been walked. It's possible I was overly aggressive removing Ops here. But if so, we'll get an ICE, and it'll be easy to re-add them. I think this is better than the alternative of accidentally allowing expressions through that risk silently clobbering the stack. Passes toolstash -cmp. Change-Id: I585ef35dcccd9f4018e4bf2c3f9ccb1514a826f3 Reviewed-on: https://go-review.googlesource.com/c/go/+/284223 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/ir/expr.go | 5 +- src/cmd/compile/internal/ir/mini.go | 5 +- src/cmd/compile/internal/ir/node.go | 3 - src/cmd/compile/internal/ir/stmt.go | 8 +- src/cmd/compile/internal/walk/assign.go | 27 +---- src/cmd/compile/internal/walk/expr.go | 18 +-- src/cmd/compile/internal/walk/walk.go | 155 +++++++----------------- 7 files changed, 65 insertions(+), 156 deletions(-) diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index dd91e347bd480..46314769733e7 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -32,8 +32,7 @@ type miniExpr struct { } const ( - miniExprHasCall = 1 << iota - miniExprNonNil + miniExprNonNil = 1 << iota miniExprTransient miniExprBounded miniExprImplicit // for use by implementations; not supported by every Expr @@ -44,8 +43,6 @@ func (*miniExpr) isExpr() {} func (n *miniExpr) Type() *types.Type { return n.typ } func (n *miniExpr) SetType(x *types.Type) { n.typ = x } -func (n *miniExpr) HasCall() bool { return n.flags&miniExprHasCall != 0 } -func (n *miniExpr) SetHasCall(b bool) { n.flags.set(miniExprHasCall, b) } func (n *miniExpr) NonNil() bool { return n.flags&miniExprNonNil != 0 } func (n *miniExpr) MarkNonNil() { n.flags |= miniExprNonNil } func (n *miniExpr) Transient() bool { return n.flags&miniExprTransient != 0 } diff --git a/src/cmd/compile/internal/ir/mini.go b/src/cmd/compile/internal/ir/mini.go index 429f4ed360bb0..a7ff4ac9c77a1 100644 --- a/src/cmd/compile/internal/ir/mini.go +++ b/src/cmd/compile/internal/ir/mini.go @@ -57,8 +57,7 @@ const ( miniWalkdefShift = 0 // TODO(mdempsky): Move to Name.flags. miniTypecheckShift = 2 miniDiag = 1 << 4 - miniHasCall = 1 << 5 // for miniStmt - miniWalked = 1 << 6 // to prevent/catch re-walking + miniWalked = 1 << 5 // to prevent/catch re-walking ) func (n *miniNode) Typecheck() uint8 { return n.bits.get2(miniTypecheckShift) } @@ -89,7 +88,5 @@ func (n *miniNode) Name() *Name { return nil } func (n *miniNode) Sym() *types.Sym { return nil } func (n *miniNode) Val() constant.Value { panic(n.no("Val")) } func (n *miniNode) SetVal(v constant.Value) { panic(n.no("SetVal")) } -func (n *miniNode) HasCall() bool { return false } -func (n *miniNode) SetHasCall(bool) { panic(n.no("SetHasCall")) } func (n *miniNode) NonNil() bool { return false } func (n *miniNode) MarkNonNil() { panic(n.no("MarkNonNil")) } diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index de03800da24b3..a44bf42e781de 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -52,8 +52,6 @@ type Node interface { SetTypecheck(x uint8) NonNil() bool MarkNonNil() - HasCall() bool - SetHasCall(x bool) } // Line returns n's position as a string. If n has been inlined, @@ -544,7 +542,6 @@ func InitExpr(init []Node, expr Node) Node { } n.PtrInit().Prepend(init...) - n.SetHasCall(true) return n } diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index 4e4c0df993b59..0358569a1f6ee 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -50,11 +50,9 @@ type miniStmt struct { func (*miniStmt) isStmt() {} -func (n *miniStmt) Init() Nodes { return n.init } -func (n *miniStmt) SetInit(x Nodes) { n.init = x } -func (n *miniStmt) PtrInit() *Nodes { return &n.init } -func (n *miniStmt) HasCall() bool { return n.bits&miniHasCall != 0 } -func (n *miniStmt) SetHasCall(b bool) { n.bits.set(miniHasCall, b) } +func (n *miniStmt) Init() Nodes { return n.init } +func (n *miniStmt) SetInit(x Nodes) { n.init = x } +func (n *miniStmt) PtrInit() *Nodes { return &n.init } // An AssignListStmt is an assignment statement with // more than one item on at least one side: Lhs = Rhs. diff --git a/src/cmd/compile/internal/walk/assign.go b/src/cmd/compile/internal/walk/assign.go index 320a3464ccff1..6e8075a35fb9e 100644 --- a/src/cmd/compile/internal/walk/assign.go +++ b/src/cmd/compile/internal/walk/assign.go @@ -248,18 +248,6 @@ func walkReturn(n *ir.ReturnStmt) ir.Node { return n } -// fncall reports whether assigning an rvalue of type rt to an lvalue l might involve a function call. -func fncall(l ir.Node, rt *types.Type) bool { - if l.HasCall() || l.Op() == ir.OINDEXMAP { - return true - } - if types.Identical(l.Type(), rt) { - return false - } - // There might be a conversion required, which might involve a runtime call. - return true -} - // check assign type list to // an expression list. called in // expr-list = func() @@ -275,9 +263,9 @@ func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node { } r := nr.Field(i) - // Any assignment to an lvalue that might cause a function call must be - // deferred until all the returned values have been read. - if fncall(l, r.Type) { + // Order should have created autotemps of the appropriate type for + // us to store results into. + if tmp, ok := l.(*ir.Name); !ok || !tmp.AutoTemp() || !types.Identical(tmp.Type(), r.Type) { base.FatalfAt(l.Pos(), "assigning %v to %+v", r.Type, l) } @@ -286,14 +274,7 @@ func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node { res.SetType(r.Type) res.SetTypecheck(1) - a := convas(ir.NewAssignStmt(base.Pos, l, res), &nn) - updateHasCall(a) - if a.HasCall() { - ir.Dump("ascompatet ucount", a) - base.Fatalf("ascompatet: too many function calls evaluating parameters") - } - - nn.Append(a) + nn.Append(ir.NewAssignStmt(base.Pos, l, res)) } return nn } diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index 510f56857654e..a1e8e6378517c 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -67,8 +67,6 @@ func walkExpr(n ir.Node, init *ir.Nodes) ir.Node { _ = staticdata.StringSym(n.Pos(), constant.StringVal(n.Val())) } - updateHasCall(n) - if base.Flag.LowerW != 0 && n != nil { ir.Dump("after walk expr", n) } @@ -527,15 +525,17 @@ func walkCall1(n *ir.CallExpr, init *ir.Nodes) { // For any argument whose evaluation might require a function call, // store that argument into a temporary variable, // to prevent that calls from clobbering arguments already on the stack. - // When instrumenting, all arguments might require function calls. var tempAssigns []ir.Node for i, arg := range args { - updateHasCall(arg) - // Determine param type. - t := params.Field(i).Type - if base.Flag.Cfg.Instrumenting || fncall(arg, t) { - // make assignment of fncall to Temp - tmp := typecheck.Temp(t) + // Validate argument and parameter types match. + param := params.Field(i) + if !types.Identical(arg.Type(), param.Type) { + base.FatalfAt(n.Pos(), "assigning %L to parameter %v (type %v)", arg, param.Sym, param.Type) + } + + if mayCall(arg) { + // assignment of arg to Temp + tmp := typecheck.Temp(param.Type) a := convas(ir.NewAssignStmt(base.Pos, tmp, arg), init) tempAssigns = append(tempAssigns, a) // replace arg with temp diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go index f95440d60d535..a9672a261b83e 100644 --- a/src/cmd/compile/internal/walk/walk.go +++ b/src/cmd/compile/internal/walk/walk.go @@ -67,8 +67,6 @@ func convas(n *ir.AssignStmt, init *ir.Nodes) *ir.AssignStmt { if n.Op() != ir.OAS { base.Fatalf("convas: not OAS %v", n.Op()) } - defer updateHasCall(n) - n.SetTypecheck(1) if n.X == nil || n.Y == nil { @@ -274,123 +272,64 @@ func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) { return ptr, length } -// updateHasCall checks whether expression n contains any function -// calls and sets the n.HasCall flag if so. -func updateHasCall(n ir.Node) { - if n == nil { - return - } - n.SetHasCall(calcHasCall(n)) -} - -func calcHasCall(n ir.Node) bool { - if len(n.Init()) != 0 { - // TODO(mdempsky): This seems overly conservative. +// mayCall reports whether evaluating expression n may require +// function calls, which could clobber function call arguments/results +// currently on the stack. +func mayCall(n ir.Node) bool { + // When instrumenting, any expression might require function calls. + if base.Flag.Cfg.Instrumenting { return true } - switch n.Op() { - default: - base.Fatalf("calcHasCall %+v", n) - panic("unreachable") + isSoftFloat := func(typ *types.Type) bool { + return types.IsFloat[typ.Kind()] || types.IsComplex[typ.Kind()] + } - case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.OTYPE, ir.ONAMEOFFSET: - if n.HasCall() { - base.Fatalf("OLITERAL/ONAME/OTYPE should never have calls: %+v", n) - } - return false - case ir.OCALL, ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER: - return true - case ir.OANDAND, ir.OOROR: - // hard with instrumented code - n := n.(*ir.LogicalExpr) - if base.Flag.Cfg.Instrumenting { - return true + return ir.Any(n, func(n ir.Node) bool { + // walk should have already moved any Init blocks off of + // expressions. + if len(n.Init()) != 0 { + base.FatalfAt(n.Pos(), "mayCall %+v", n) } - return n.X.HasCall() || n.Y.HasCall() - case ir.OINDEX, ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR, - ir.ODEREF, ir.ODOTPTR, ir.ODOTTYPE, ir.ODIV, ir.OMOD: - // These ops might panic, make sure they are done - // before we start marshaling args for a call. See issue 16760. - return true - // When using soft-float, these ops might be rewritten to function calls - // so we ensure they are evaluated first. - case ir.OADD, ir.OSUB, ir.OMUL: - n := n.(*ir.BinaryExpr) - if ssagen.Arch.SoftFloat && (types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) { - return true - } - return n.X.HasCall() || n.Y.HasCall() - case ir.ONEG: - n := n.(*ir.UnaryExpr) - if ssagen.Arch.SoftFloat && (types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) { - return true - } - return n.X.HasCall() - case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT: - n := n.(*ir.BinaryExpr) - if ssagen.Arch.SoftFloat && (types.IsFloat[n.X.Type().Kind()] || types.IsComplex[n.X.Type().Kind()]) { + switch n.Op() { + default: + base.FatalfAt(n.Pos(), "mayCall %+v", n) + + case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER: return true - } - return n.X.HasCall() || n.Y.HasCall() - case ir.OCONV: - n := n.(*ir.ConvExpr) - if ssagen.Arch.SoftFloat && ((types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) || (types.IsFloat[n.X.Type().Kind()] || types.IsComplex[n.X.Type().Kind()])) { + + case ir.OINDEX, ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR, + ir.ODEREF, ir.ODOTPTR, ir.ODOTTYPE, ir.ODIV, ir.OMOD: + // These ops might panic, make sure they are done + // before we start marshaling args for a call. See issue 16760. return true + + // When using soft-float, these ops might be rewritten to function calls + // so we ensure they are evaluated first. + case ir.OADD, ir.OSUB, ir.OMUL, ir.ONEG: + return ssagen.Arch.SoftFloat && isSoftFloat(n.Type()) + case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT: + n := n.(*ir.BinaryExpr) + return ssagen.Arch.SoftFloat && isSoftFloat(n.X.Type()) + case ir.OCONV: + n := n.(*ir.ConvExpr) + return ssagen.Arch.SoftFloat && (isSoftFloat(n.Type()) || isSoftFloat(n.X.Type())) + + case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.ONAMEOFFSET, ir.OMETHEXPR, + ir.OAND, ir.OANDNOT, ir.OLSH, ir.OOR, ir.ORSH, ir.OXOR, ir.OCOMPLEX, ir.OEFACE, + ir.OANDAND, ir.OOROR, + ir.OADDR, ir.OBITNOT, ir.ONOT, ir.OPLUS, + ir.OCAP, ir.OIMAG, ir.OLEN, ir.OREAL, + ir.OCONVNOP, ir.ODOT, + ir.OCFUNC, ir.OIDATA, ir.OITAB, ir.OSPTR, + ir.OBYTES2STRTMP, ir.OGETG, ir.OSLICEHEADER: + // ok: operations that don't require function calls. + // Expand as needed. } - return n.X.HasCall() - - case ir.OAND, ir.OANDNOT, ir.OLSH, ir.OOR, ir.ORSH, ir.OXOR, ir.OCOPY, ir.OCOMPLEX, ir.OEFACE: - n := n.(*ir.BinaryExpr) - return n.X.HasCall() || n.Y.HasCall() - - case ir.OAS: - n := n.(*ir.AssignStmt) - return n.X.HasCall() || n.Y != nil && n.Y.HasCall() - - case ir.OADDR: - n := n.(*ir.AddrExpr) - return n.X.HasCall() - case ir.OPAREN: - n := n.(*ir.ParenExpr) - return n.X.HasCall() - case ir.OBITNOT, ir.ONOT, ir.OPLUS, ir.ORECV, - ir.OALIGNOF, ir.OCAP, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.ONEW, - ir.OOFFSETOF, ir.OPANIC, ir.OREAL, ir.OSIZEOF, - ir.OCHECKNIL, ir.OCFUNC, ir.OIDATA, ir.OITAB, ir.OSPTR, ir.OVARDEF, ir.OVARKILL, ir.OVARLIVE: - n := n.(*ir.UnaryExpr) - return n.X.HasCall() - case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER: - n := n.(*ir.SelectorExpr) - return n.X.HasCall() - - case ir.OGETG, ir.OMETHEXPR: - return false - // TODO(rsc): These look wrong in various ways but are what calcHasCall has always done. - case ir.OADDSTR: - // TODO(rsc): This used to check left and right, which are not part of OADDSTR. return false - case ir.OBLOCK: - // TODO(rsc): Surely the block's statements matter. - return false - case ir.OCONVIFACE, ir.OCONVNOP, ir.OBYTES2STR, ir.OBYTES2STRTMP, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2BYTESTMP, ir.OSTR2RUNES, ir.ORUNESTR: - // TODO(rsc): Some conversions are themselves calls, no? - n := n.(*ir.ConvExpr) - return n.X.HasCall() - case ir.ODOTTYPE2: - // TODO(rsc): Shouldn't this be up with ODOTTYPE above? - n := n.(*ir.TypeAssertExpr) - return n.X.HasCall() - case ir.OSLICEHEADER: - // TODO(rsc): What about len and cap? - n := n.(*ir.SliceHeaderExpr) - return n.Ptr.HasCall() - case ir.OAS2DOTTYPE, ir.OAS2FUNC: - // TODO(rsc): Surely we need to check List and Rlist. - return false - } + }) } // itabType loads the _type field from a runtime.itab struct. From ba0e8a92fa74768feaccb8c3e4e5791b2dbc382f Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 16 Jan 2021 18:25:00 -0800 Subject: [PATCH 394/474] [dev.regabi] cmd/compile: refactor temp construction in walk This CL adds a few new helper functions for constructing and initializing temporary variables during walk. Passes toolstash -cmp. Change-Id: I54965d992cd8dfef7cb7dc92a17c88372e52a0d6 Reviewed-on: https://go-review.googlesource.com/c/go/+/284224 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/walk/builtin.go | 13 ++----- src/cmd/compile/internal/walk/complit.go | 31 ++++------------ src/cmd/compile/internal/walk/convert.go | 14 +++---- src/cmd/compile/internal/walk/expr.go | 3 +- src/cmd/compile/internal/walk/temp.go | 47 ++++++++++++++++++++++++ 5 files changed, 63 insertions(+), 45 deletions(-) create mode 100644 src/cmd/compile/internal/walk/temp.go diff --git a/src/cmd/compile/internal/walk/builtin.go b/src/cmd/compile/internal/walk/builtin.go index 283c85629b260..97f9de9c1df33 100644 --- a/src/cmd/compile/internal/walk/builtin.go +++ b/src/cmd/compile/internal/walk/builtin.go @@ -277,10 +277,8 @@ func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node { // Allocate hmap on stack. // var hv hmap - hv := typecheck.Temp(hmapType) - init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, hv, nil))) // h = &hv - h = typecheck.NodAddr(hv) + h = stackTempAddr(init, hmapType) // Allocate one bucket pointed to by hmap.buckets on stack if hint // is not larger than BUCKETSIZE. In case hint is larger than @@ -303,11 +301,8 @@ func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node { nif.Likely = true // var bv bmap - bv := typecheck.Temp(reflectdata.MapBucketType(t)) - nif.Body.Append(ir.NewAssignStmt(base.Pos, bv, nil)) - // b = &bv - b := typecheck.NodAddr(bv) + b := stackTempAddr(&nif.Body, reflectdata.MapBucketType(t)) // h.buckets = b bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap @@ -509,9 +504,7 @@ func walkNew(n *ir.UnaryExpr, init *ir.Nodes) ir.Node { if t.Size() >= ir.MaxImplicitStackVarSize { base.Fatalf("large ONEW with EscNone: %v", n) } - r := typecheck.Temp(t) - init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, nil))) // zero temp - return typecheck.Expr(typecheck.NodAddr(r)) + return stackTempAddr(init, t) } types.CalcSize(t) n.MarkNonNil() diff --git a/src/cmd/compile/internal/walk/complit.go b/src/cmd/compile/internal/walk/complit.go index f82ef69ca9919..a7db453550137 100644 --- a/src/cmd/compile/internal/walk/complit.go +++ b/src/cmd/compile/internal/walk/complit.go @@ -344,30 +344,14 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) if !types.Identical(t, x.Type()) { panic("dotdotdot base type does not match order's assigned type") } - - if vstat == nil { - a = ir.NewAssignStmt(base.Pos, x, nil) - a = typecheck.Stmt(a) - init.Append(a) // zero new temp - } else { - // Declare that we're about to initialize all of x. - // (Which happens at the *vauto = vstat below.) - init.Append(ir.NewUnaryExpr(base.Pos, ir.OVARDEF, x)) - } - - a = typecheck.NodAddr(x) + a = initStackTemp(init, x, vstat != nil) } else if n.Esc() == ir.EscNone { - a = typecheck.Temp(t) if vstat == nil { - a = ir.NewAssignStmt(base.Pos, typecheck.Temp(t), nil) - a = typecheck.Stmt(a) - init.Append(a) // zero new temp - a = a.(*ir.AssignStmt).X - } else { - init.Append(ir.NewUnaryExpr(base.Pos, ir.OVARDEF, a)) + // TODO(mdempsky): Remove this useless temporary. + // It's only needed to keep toolstash happy. + typecheck.Temp(t) } - - a = typecheck.NodAddr(a) + a = initStackTemp(init, typecheck.Temp(t), vstat != nil) } else { a = ir.NewUnaryExpr(base.Pos, ir.ONEW, ir.TypeNode(t)) } @@ -550,9 +534,8 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { var r ir.Node if n.Prealloc != nil { - // n.Right is stack temporary used as backing store. - appendWalkStmt(init, ir.NewAssignStmt(base.Pos, n.Prealloc, nil)) // zero backing store, just in case (#18410) - r = typecheck.NodAddr(n.Prealloc) + // n.Prealloc is stack temporary used as backing store. + r = initStackTemp(init, n.Prealloc, false) } else { r = ir.NewUnaryExpr(base.Pos, ir.ONEW, ir.TypeNode(n.X.Type())) r.SetEsc(n.Esc()) diff --git a/src/cmd/compile/internal/walk/convert.go b/src/cmd/compile/internal/walk/convert.go index b47bb917c3a16..d143c1084fd2c 100644 --- a/src/cmd/compile/internal/walk/convert.go +++ b/src/cmd/compile/internal/walk/convert.go @@ -198,8 +198,7 @@ func walkBytesRunesToString(n *ir.ConvExpr, init *ir.Nodes) ir.Node { a := typecheck.NodNil() if n.Esc() == ir.EscNone { // Create temporary buffer for string on stack. - t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize) - a = typecheck.NodAddr(typecheck.Temp(t)) + a = stackBufAddr(tmpstringbufsize, types.Types[types.TUINT8]) } if n.Op() == ir.ORUNES2STR { // slicerunetostring(*[32]byte, []rune) string @@ -229,8 +228,7 @@ func walkBytesToStringTemp(n *ir.ConvExpr, init *ir.Nodes) ir.Node { func walkRuneToString(n *ir.ConvExpr, init *ir.Nodes) ir.Node { a := typecheck.NodNil() if n.Esc() == ir.EscNone { - t := types.NewArray(types.Types[types.TUINT8], 4) - a = typecheck.NodAddr(typecheck.Temp(t)) + a = stackBufAddr(4, types.Types[types.TUINT8]) } // intstring(*[4]byte, rune) return mkcall("intstring", n.Type(), init, a, typecheck.Conv(n.X, types.Types[types.TINT64])) @@ -246,7 +244,7 @@ func walkStringToBytes(n *ir.ConvExpr, init *ir.Nodes) ir.Node { t := types.NewArray(types.Types[types.TUINT8], int64(len(sc))) var a ir.Node if n.Esc() == ir.EscNone && len(sc) <= int(ir.MaxImplicitStackVarSize) { - a = typecheck.NodAddr(typecheck.Temp(t)) + a = stackBufAddr(t.NumElem(), t.Elem()) } else { types.CalcSize(t) a = ir.NewUnaryExpr(base.Pos, ir.ONEW, nil) @@ -273,8 +271,7 @@ func walkStringToBytes(n *ir.ConvExpr, init *ir.Nodes) ir.Node { a := typecheck.NodNil() if n.Esc() == ir.EscNone { // Create temporary buffer for slice on stack. - t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize) - a = typecheck.NodAddr(typecheck.Temp(t)) + a = stackBufAddr(tmpstringbufsize, types.Types[types.TUINT8]) } // stringtoslicebyte(*32[byte], string) []byte return mkcall("stringtoslicebyte", n.Type(), init, a, typecheck.Conv(s, types.Types[types.TSTRING])) @@ -298,8 +295,7 @@ func walkStringToRunes(n *ir.ConvExpr, init *ir.Nodes) ir.Node { a := typecheck.NodNil() if n.Esc() == ir.EscNone { // Create temporary buffer for slice on stack. - t := types.NewArray(types.Types[types.TINT32], tmpstringbufsize) - a = typecheck.NodAddr(typecheck.Temp(t)) + a = stackBufAddr(tmpstringbufsize, types.Types[types.TINT32]) } // stringtoslicerune(*[32]rune, string) []rune return mkcall("stringtoslicerune", n.Type(), init, a, typecheck.Conv(n.X, types.Types[types.TSTRING])) diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index a1e8e6378517c..8a13f6a9238ff 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -441,8 +441,7 @@ func walkAddString(n *ir.AddStringExpr, init *ir.Nodes) ir.Node { // Don't allocate the buffer if the result won't fit. if sz < tmpstringbufsize { // Create temporary buffer for result string on stack. - t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize) - buf = typecheck.NodAddr(typecheck.Temp(t)) + buf = stackBufAddr(tmpstringbufsize, types.Types[types.TUINT8]) } } diff --git a/src/cmd/compile/internal/walk/temp.go b/src/cmd/compile/internal/walk/temp.go new file mode 100644 index 0000000000000..901cb770f362a --- /dev/null +++ b/src/cmd/compile/internal/walk/temp.go @@ -0,0 +1,47 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package walk + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" +) + +// initStackTemp appends statements to init to initialize the given +// temporary variable, and then returns the expression &tmp. If vardef +// is true, then the variable is initialized with OVARDEF, and the +// caller must ensure the variable is later assigned before use; +// otherwise, it's zero initialized. +// +// TODO(mdempsky): Change callers to provide tmp's initial value, +// rather than just vardef, to make this safer/easier to use. +func initStackTemp(init *ir.Nodes, tmp *ir.Name, vardef bool) *ir.AddrExpr { + if vardef { + init.Append(ir.NewUnaryExpr(base.Pos, ir.OVARDEF, tmp)) + } else { + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, tmp, nil)) + } + return typecheck.Expr(typecheck.NodAddr(tmp)).(*ir.AddrExpr) +} + +// stackTempAddr returns the expression &tmp, where tmp is a newly +// allocated temporary variable of the given type. Statements to +// zero-initialize tmp are appended to init. +func stackTempAddr(init *ir.Nodes, typ *types.Type) *ir.AddrExpr { + return initStackTemp(init, typecheck.Temp(typ), false) +} + +// stackBufAddr returns thte expression &tmp, where tmp is a newly +// allocated temporary variable of type [len]elem. This variable is +// initialized, and elem must not contain pointers. +func stackBufAddr(len int64, elem *types.Type) *ir.AddrExpr { + if elem.HasPointers() { + base.FatalfAt(base.Pos, "%v has pointers", elem) + } + tmp := typecheck.Temp(types.NewArray(elem, len)) + return typecheck.Expr(typecheck.NodAddr(tmp)).(*ir.AddrExpr) +} From 7ce2a8383d154ca1860286a9b5c8a1e6cf151a90 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 16 Jan 2021 19:35:39 -0800 Subject: [PATCH 395/474] [dev.regabi] cmd/compile: simplify stack temp initialization This CL simplifies the previous one a little bit further, by combining reordering stack-temporary initialization and getting rid of an unneeded temporary variable. (Does not pass toolstash -cmp.) Change-Id: I17799dfe368484f33a8ddd0ab4f68647d6262147 Reviewed-on: https://go-review.googlesource.com/c/go/+/284225 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/walk/complit.go | 17 +++++++---------- src/cmd/compile/internal/walk/temp.go | 19 ++++++------------- 2 files changed, 13 insertions(+), 23 deletions(-) diff --git a/src/cmd/compile/internal/walk/complit.go b/src/cmd/compile/internal/walk/complit.go index a7db453550137..97e820238bb04 100644 --- a/src/cmd/compile/internal/walk/complit.go +++ b/src/cmd/compile/internal/walk/complit.go @@ -344,21 +344,18 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) if !types.Identical(t, x.Type()) { panic("dotdotdot base type does not match order's assigned type") } - a = initStackTemp(init, x, vstat != nil) + a = initStackTemp(init, x, vstat) } else if n.Esc() == ir.EscNone { - if vstat == nil { - // TODO(mdempsky): Remove this useless temporary. - // It's only needed to keep toolstash happy. - typecheck.Temp(t) - } - a = initStackTemp(init, typecheck.Temp(t), vstat != nil) + a = initStackTemp(init, typecheck.Temp(t), vstat) } else { a = ir.NewUnaryExpr(base.Pos, ir.ONEW, ir.TypeNode(t)) } appendWalkStmt(init, ir.NewAssignStmt(base.Pos, vauto, a)) - if vstat != nil { - // copy static to heap (4) + if vstat != nil && n.Prealloc == nil && n.Esc() != ir.EscNone { + // If we allocated on the heap with ONEW, copy the static to the + // heap (4). We skip this for stack temporaries, because + // initStackTemp already handled the copy. a = ir.NewStarExpr(base.Pos, vauto) appendWalkStmt(init, ir.NewAssignStmt(base.Pos, a, vstat)) } @@ -535,7 +532,7 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { var r ir.Node if n.Prealloc != nil { // n.Prealloc is stack temporary used as backing store. - r = initStackTemp(init, n.Prealloc, false) + r = initStackTemp(init, n.Prealloc, nil) } else { r = ir.NewUnaryExpr(base.Pos, ir.ONEW, ir.TypeNode(n.X.Type())) r.SetEsc(n.Esc()) diff --git a/src/cmd/compile/internal/walk/temp.go b/src/cmd/compile/internal/walk/temp.go index 901cb770f362a..9879a6c69d7c6 100644 --- a/src/cmd/compile/internal/walk/temp.go +++ b/src/cmd/compile/internal/walk/temp.go @@ -12,19 +12,12 @@ import ( ) // initStackTemp appends statements to init to initialize the given -// temporary variable, and then returns the expression &tmp. If vardef -// is true, then the variable is initialized with OVARDEF, and the -// caller must ensure the variable is later assigned before use; -// otherwise, it's zero initialized. -// -// TODO(mdempsky): Change callers to provide tmp's initial value, -// rather than just vardef, to make this safer/easier to use. -func initStackTemp(init *ir.Nodes, tmp *ir.Name, vardef bool) *ir.AddrExpr { - if vardef { - init.Append(ir.NewUnaryExpr(base.Pos, ir.OVARDEF, tmp)) - } else { - appendWalkStmt(init, ir.NewAssignStmt(base.Pos, tmp, nil)) +// temporary variable to val, and then returns the expression &tmp. +func initStackTemp(init *ir.Nodes, tmp *ir.Name, val ir.Node) *ir.AddrExpr { + if val != nil && !types.Identical(tmp.Type(), val.Type()) { + base.Fatalf("bad initial value for %L: %L", tmp, val) } + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, tmp, val)) return typecheck.Expr(typecheck.NodAddr(tmp)).(*ir.AddrExpr) } @@ -32,7 +25,7 @@ func initStackTemp(init *ir.Nodes, tmp *ir.Name, vardef bool) *ir.AddrExpr { // allocated temporary variable of the given type. Statements to // zero-initialize tmp are appended to init. func stackTempAddr(init *ir.Nodes, typ *types.Type) *ir.AddrExpr { - return initStackTemp(init, typecheck.Temp(typ), false) + return initStackTemp(init, typecheck.Temp(typ), nil) } // stackBufAddr returns thte expression &tmp, where tmp is a newly From 88956fc4b1a44efe847fa07a8ebc21a49ff811e1 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Sun, 17 Jan 2021 00:17:59 +0700 Subject: [PATCH 396/474] [dev.regabi] cmd/compile: stop analyze NameOffsetExpr.Name_ in escape analysis It is always used with global variables, so we can skip analyze it, the same as what we are doing for ONAME/PEXTERN nodes. While at it, add a Fatalf check to ensure NewNameOffsetExpr is only called for global variables. For #43737 Change-Id: Iac444ed8d583baba5042bea096531301843b1e8f Reviewed-on: https://go-review.googlesource.com/c/go/+/284118 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/escape/escape.go | 9 ++------- src/cmd/compile/internal/ir/expr.go | 4 ++-- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go index 96c2e02146dcb..356fbc75f8daa 100644 --- a/src/cmd/compile/internal/escape/escape.go +++ b/src/cmd/compile/internal/escape/escape.go @@ -585,7 +585,7 @@ func (e *escape) exprSkipInit(k hole, n ir.Node) { default: base.Fatalf("unexpected expr: %v", n) - case ir.OLITERAL, ir.ONIL, ir.OGETG, ir.OTYPE, ir.OMETHEXPR: + case ir.OLITERAL, ir.ONIL, ir.OGETG, ir.OTYPE, ir.OMETHEXPR, ir.ONAMEOFFSET: // nop case ir.ONAME: @@ -598,10 +598,6 @@ func (e *escape) exprSkipInit(k hole, n ir.Node) { } e.flow(k, e.oldLoc(n)) - case ir.ONAMEOFFSET: - n := n.(*ir.NameOffsetExpr) - e.expr(k, n.Name_) - case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT: n := n.(*ir.UnaryExpr) e.discard(n.X) @@ -876,8 +872,7 @@ func (e *escape) addr(n ir.Node) hole { } k = e.oldLoc(n).asHole() case ir.ONAMEOFFSET: - n := n.(*ir.NameOffsetExpr) - k = e.addr(n.Name_) + break case ir.ODOT: n := n.(*ir.SelectorExpr) k = e.addr(n.X) diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 46314769733e7..e24b2d5b2cb41 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -470,8 +470,8 @@ type NameOffsetExpr struct { } func NewNameOffsetExpr(pos src.XPos, name *Name, offset int64, typ *types.Type) *NameOffsetExpr { - if name == nil || IsBlank(name) { - base.FatalfAt(pos, "cannot take offset of nil or blank name: %v", name) + if name == nil || IsBlank(name) || !(name.Op() == ONAME && name.Class == PEXTERN) { + base.FatalfAt(pos, "cannot take offset of nil, blank name or non-global variable: %v", name) } n := &NameOffsetExpr{Name_: name, Offset_: offset} n.typ = typ From 82b9cae700d844857b24b31f40a51283fbdd6dd5 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Sun, 17 Jan 2021 00:38:54 +0700 Subject: [PATCH 397/474] [dev.regabi] cmd/compile: change ir.NameOffsetExpr to use *obj.LSym instead of *Name Because NameOffsetExpr is always used with global variables, and SSA backend only needs (*Name).Linksym() to generate value for them. Passes toolstash -cmp. Updates #43737 Change-Id: I17209e21383edb766070c0accd1fa4660659caef Reviewed-on: https://go-review.googlesource.com/c/go/+/284119 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/expr.go | 17 ++++++++++----- src/cmd/compile/internal/ir/fmt.go | 2 +- src/cmd/compile/internal/ir/node_gen.go | 6 ----- src/cmd/compile/internal/ssagen/ssa.go | 29 ++++++++++--------------- 4 files changed, 24 insertions(+), 30 deletions(-) diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index e24b2d5b2cb41..a3356d432af2b 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -8,6 +8,7 @@ import ( "bytes" "cmd/compile/internal/base" "cmd/compile/internal/types" + "cmd/internal/obj" "cmd/internal/src" "fmt" "go/constant" @@ -461,22 +462,26 @@ func NewResultExpr(pos src.XPos, typ *types.Type, offset int64) *ResultExpr { return n } -// A NameOffsetExpr refers to an offset within a variable. +// A NameOffsetExpr refers to an offset within a global variable. // It is like a SelectorExpr but without the field name. type NameOffsetExpr struct { miniExpr - Name_ *Name + Linksym *obj.LSym Offset_ int64 } +func NewLinksymOffsetExpr(pos src.XPos, lsym *obj.LSym, offset int64, typ *types.Type) *NameOffsetExpr { + n := &NameOffsetExpr{Linksym: lsym, Offset_: offset} + n.typ = typ + n.op = ONAMEOFFSET + return n +} + func NewNameOffsetExpr(pos src.XPos, name *Name, offset int64, typ *types.Type) *NameOffsetExpr { if name == nil || IsBlank(name) || !(name.Op() == ONAME && name.Class == PEXTERN) { base.FatalfAt(pos, "cannot take offset of nil, blank name or non-global variable: %v", name) } - n := &NameOffsetExpr{Name_: name, Offset_: offset} - n.typ = typ - n.op = ONAMEOFFSET - return n + return NewLinksymOffsetExpr(pos, name.Linksym(), offset, typ) } // A SelectorExpr is a selector expression X.Sel. diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index a4e769f508b09..dfb8e42270170 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -634,7 +634,7 @@ func exprFmt(n Node, s fmt.State, prec int) { case ONAMEOFFSET: n := n.(*NameOffsetExpr) - fmt.Fprintf(s, "(%v)(%v@%d)", n.Type(), n.Name_, n.Offset_) + fmt.Fprintf(s, "(%v)(%s@%d)", n.Type(), n.Linksym.Name, n.Offset_) case OTYPE: if n.Type() == nil && n.Sym() != nil { diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index f1b0a21628373..7db9517b2c57a 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -825,16 +825,10 @@ func (n *NameOffsetExpr) doChildren(do func(Node) bool) bool { if doNodes(n.init, do) { return true } - if n.Name_ != nil && do(n.Name_) { - return true - } return false } func (n *NameOffsetExpr) editChildren(edit func(Node) Node) { editNodes(n.init, edit) - if n.Name_ != nil { - n.Name_ = edit(n.Name_).(*Name) - } } func (n *NilExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 7726ecac55775..fce02f475a3ad 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -2257,15 +2257,10 @@ func (s *state) expr(n ir.Node) *ssa.Value { if s.canSSA(n) { return s.variable(n, n.Type()) } - addr := s.addr(n) - return s.load(n.Type(), addr) + return s.load(n.Type(), s.addr(n)) case ir.ONAMEOFFSET: n := n.(*ir.NameOffsetExpr) - if s.canSSAName(n.Name_) && TypeOK(n.Type()) { - return s.variable(n, n.Type()) - } - addr := s.addr(n) - return s.load(n.Type(), addr) + return s.load(n.Type(), s.addr(n)) case ir.ONIL: n := n.(*ir.NilExpr) t := n.Type() @@ -5088,13 +5083,18 @@ func (s *state) addr(n ir.Node) *ssa.Value { } t := types.NewPtr(n.Type()) - var offset int64 + linksymOffset := func(lsym *obj.LSym, offset int64) *ssa.Value { + v := s.entryNewValue1A(ssa.OpAddr, t, lsym, s.sb) + // TODO: Make OpAddr use AuxInt as well as Aux. + if offset != 0 { + v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, offset, v) + } + return v + } switch n.Op() { case ir.ONAMEOFFSET: no := n.(*ir.NameOffsetExpr) - offset = no.Offset_ - n = no.Name_ - fallthrough + return linksymOffset(no.Linksym, no.Offset_) case ir.ONAME: n := n.(*ir.Name) if n.Heapaddr != nil { @@ -5103,12 +5103,7 @@ func (s *state) addr(n ir.Node) *ssa.Value { switch n.Class { case ir.PEXTERN: // global variable - v := s.entryNewValue1A(ssa.OpAddr, t, n.Linksym(), s.sb) - // TODO: Make OpAddr use AuxInt as well as Aux. - if offset != 0 { - v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, offset, v) - } - return v + return linksymOffset(n.Linksym(), 0) case ir.PPARAM: // parameter slot v := s.decladdrs[n] From 59ff93fe645320c7d6a434ea7794546e89b12d45 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Sun, 17 Jan 2021 00:47:12 +0700 Subject: [PATCH 398/474] [dev.regabi] cmd/compile: rename NameOffsetExpr to LinksymOffsetExpr Updates #43737 [git-generate] cd src/cmd/compile/internal/ir rf ' mv NameOffsetExpr LinksymOffsetExpr mv ONAMEOFFSET OLINKSYMOFFSET ' go generate Change-Id: I8c6b8aa576e88278c0320d16bb2e8e424a15b907 Reviewed-on: https://go-review.googlesource.com/c/go/+/284120 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/escape/escape.go | 4 +-- src/cmd/compile/internal/ir/expr.go | 14 ++++---- src/cmd/compile/internal/ir/fmt.go | 4 +-- src/cmd/compile/internal/ir/node.go | 26 +++++++-------- src/cmd/compile/internal/ir/node_gen.go | 32 +++++++++---------- src/cmd/compile/internal/ir/op_string.go | 6 ++-- src/cmd/compile/internal/ssagen/ssa.go | 8 ++--- .../compile/internal/typecheck/typecheck.go | 2 +- src/cmd/compile/internal/walk/expr.go | 4 +-- src/cmd/compile/internal/walk/walk.go | 2 +- 10 files changed, 51 insertions(+), 51 deletions(-) diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go index 356fbc75f8daa..26420b820a968 100644 --- a/src/cmd/compile/internal/escape/escape.go +++ b/src/cmd/compile/internal/escape/escape.go @@ -585,7 +585,7 @@ func (e *escape) exprSkipInit(k hole, n ir.Node) { default: base.Fatalf("unexpected expr: %v", n) - case ir.OLITERAL, ir.ONIL, ir.OGETG, ir.OTYPE, ir.OMETHEXPR, ir.ONAMEOFFSET: + case ir.OLITERAL, ir.ONIL, ir.OGETG, ir.OTYPE, ir.OMETHEXPR, ir.OLINKSYMOFFSET: // nop case ir.ONAME: @@ -871,7 +871,7 @@ func (e *escape) addr(n ir.Node) hole { break } k = e.oldLoc(n).asHole() - case ir.ONAMEOFFSET: + case ir.OLINKSYMOFFSET: break case ir.ODOT: n := n.(*ir.SelectorExpr) diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index a3356d432af2b..8aad25d625ec3 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -462,22 +462,22 @@ func NewResultExpr(pos src.XPos, typ *types.Type, offset int64) *ResultExpr { return n } -// A NameOffsetExpr refers to an offset within a global variable. +// A LinksymOffsetExpr refers to an offset within a global variable. // It is like a SelectorExpr but without the field name. -type NameOffsetExpr struct { +type LinksymOffsetExpr struct { miniExpr Linksym *obj.LSym Offset_ int64 } -func NewLinksymOffsetExpr(pos src.XPos, lsym *obj.LSym, offset int64, typ *types.Type) *NameOffsetExpr { - n := &NameOffsetExpr{Linksym: lsym, Offset_: offset} +func NewLinksymOffsetExpr(pos src.XPos, lsym *obj.LSym, offset int64, typ *types.Type) *LinksymOffsetExpr { + n := &LinksymOffsetExpr{Linksym: lsym, Offset_: offset} n.typ = typ - n.op = ONAMEOFFSET + n.op = OLINKSYMOFFSET return n } -func NewNameOffsetExpr(pos src.XPos, name *Name, offset int64, typ *types.Type) *NameOffsetExpr { +func NewNameOffsetExpr(pos src.XPos, name *Name, offset int64, typ *types.Type) *LinksymOffsetExpr { if name == nil || IsBlank(name) || !(name.Op() == ONAME && name.Class == PEXTERN) { base.FatalfAt(pos, "cannot take offset of nil, blank name or non-global variable: %v", name) } @@ -731,7 +731,7 @@ func IsAddressable(n Node) bool { } return true - case ONAMEOFFSET: + case OLINKSYMOFFSET: return true } diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index dfb8e42270170..68e1bc156997f 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -632,8 +632,8 @@ func exprFmt(n Node, s fmt.State, prec int) { case OPACK, ONONAME: fmt.Fprint(s, n.Sym()) - case ONAMEOFFSET: - n := n.(*NameOffsetExpr) + case OLINKSYMOFFSET: + n := n.(*LinksymOffsetExpr) fmt.Fprintf(s, "(%v)(%s@%d)", n.Type(), n.Linksym.Name, n.Offset_) case OTYPE: diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index a44bf42e781de..a725307c2c64e 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -291,19 +291,19 @@ const ( OTSLICE // []int // misc - OINLCALL // intermediary representation of an inlined call. - OEFACE // itable and data words of an empty-interface value. - OITAB // itable word of an interface value. - OIDATA // data word of an interface value in Left - OSPTR // base pointer of a slice or string. - OCFUNC // reference to c function pointer (not go func value) - OCHECKNIL // emit code to ensure pointer/interface not nil - OVARDEF // variable is about to be fully initialized - OVARKILL // variable is dead - OVARLIVE // variable is alive - ORESULT // result of a function call; Xoffset is stack offset - OINLMARK // start of an inlined body, with file/line of caller. Xoffset is an index into the inline tree. - ONAMEOFFSET // offset within a name + OINLCALL // intermediary representation of an inlined call. + OEFACE // itable and data words of an empty-interface value. + OITAB // itable word of an interface value. + OIDATA // data word of an interface value in Left + OSPTR // base pointer of a slice or string. + OCFUNC // reference to c function pointer (not go func value) + OCHECKNIL // emit code to ensure pointer/interface not nil + OVARDEF // variable is about to be fully initialized + OVARKILL // variable is dead + OVARLIVE // variable is alive + ORESULT // result of a function call; Xoffset is stack offset + OINLMARK // start of an inlined body, with file/line of caller. Xoffset is an index into the inline tree. + OLINKSYMOFFSET // offset within a name // arch-specific opcodes ORETJMP // return to other function diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index 7db9517b2c57a..8f89c67748658 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -734,6 +734,22 @@ func (n *LabelStmt) editChildren(edit func(Node) Node) { editNodes(n.init, edit) } +func (n *LinksymOffsetExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *LinksymOffsetExpr) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *LinksymOffsetExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + return false +} +func (n *LinksymOffsetExpr) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) +} + func (n *LogicalExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *LogicalExpr) copy() Node { c := *n @@ -815,22 +831,6 @@ func (n *MapType) editChildren(edit func(Node) Node) { func (n *Name) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } -func (n *NameOffsetExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } -func (n *NameOffsetExpr) copy() Node { - c := *n - c.init = copyNodes(c.init) - return &c -} -func (n *NameOffsetExpr) doChildren(do func(Node) bool) bool { - if doNodes(n.init, do) { - return true - } - return false -} -func (n *NameOffsetExpr) editChildren(edit func(Node) Node) { - editNodes(n.init, edit) -} - func (n *NilExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *NilExpr) copy() Node { c := *n diff --git a/src/cmd/compile/internal/ir/op_string.go b/src/cmd/compile/internal/ir/op_string.go index 9538599c38c46..35196b01aee83 100644 --- a/src/cmd/compile/internal/ir/op_string.go +++ b/src/cmd/compile/internal/ir/op_string.go @@ -156,15 +156,15 @@ func _() { _ = x[OVARLIVE-145] _ = x[ORESULT-146] _ = x[OINLMARK-147] - _ = x[ONAMEOFFSET-148] + _ = x[OLINKSYMOFFSET-148] _ = x[ORETJMP-149] _ = x[OGETG-150] _ = x[OEND-151] } -const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRSTMTEXPRBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKNAMEOFFSETRETJMPGETGEND" +const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRSTMTEXPRBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKLINKSYMOFFSETRETJMPGETGEND" -var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 309, 315, 318, 324, 331, 339, 343, 350, 358, 360, 362, 364, 366, 368, 370, 375, 380, 388, 391, 400, 403, 407, 415, 422, 431, 444, 447, 450, 453, 456, 459, 462, 468, 471, 474, 480, 484, 487, 491, 496, 501, 507, 512, 516, 521, 529, 537, 543, 552, 563, 570, 574, 581, 589, 593, 597, 601, 608, 615, 623, 629, 637, 645, 650, 655, 659, 667, 672, 676, 679, 687, 691, 693, 698, 700, 705, 711, 717, 723, 729, 734, 738, 745, 751, 756, 762, 768, 775, 780, 784, 789, 793, 798, 806, 812, 819, 826, 832, 839, 849, 855, 859, 862} +var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 309, 315, 318, 324, 331, 339, 343, 350, 358, 360, 362, 364, 366, 368, 370, 375, 380, 388, 391, 400, 403, 407, 415, 422, 431, 444, 447, 450, 453, 456, 459, 462, 468, 471, 474, 480, 484, 487, 491, 496, 501, 507, 512, 516, 521, 529, 537, 543, 552, 563, 570, 574, 581, 589, 593, 597, 601, 608, 615, 623, 629, 637, 645, 650, 655, 659, 667, 672, 676, 679, 687, 691, 693, 698, 700, 705, 711, 717, 723, 729, 734, 738, 745, 751, 756, 762, 768, 775, 780, 784, 789, 793, 798, 806, 812, 819, 826, 832, 839, 852, 858, 862, 865} func (i Op) String() string { if i >= Op(len(_Op_index)-1) { diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index fce02f475a3ad..1cd49a487ec5a 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -2258,8 +2258,8 @@ func (s *state) expr(n ir.Node) *ssa.Value { return s.variable(n, n.Type()) } return s.load(n.Type(), s.addr(n)) - case ir.ONAMEOFFSET: - n := n.(*ir.NameOffsetExpr) + case ir.OLINKSYMOFFSET: + n := n.(*ir.LinksymOffsetExpr) return s.load(n.Type(), s.addr(n)) case ir.ONIL: n := n.(*ir.NilExpr) @@ -5092,8 +5092,8 @@ func (s *state) addr(n ir.Node) *ssa.Value { return v } switch n.Op() { - case ir.ONAMEOFFSET: - no := n.(*ir.NameOffsetExpr) + case ir.OLINKSYMOFFSET: + no := n.(*ir.LinksymOffsetExpr) return linksymOffset(no.Linksym, no.Offset_) case ir.ONAME: n := n.(*ir.Name) diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index 3530e76972571..5b44a5743fc3a 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -521,7 +521,7 @@ func typecheck1(n ir.Node, top int) ir.Node { } return n - case ir.ONAMEOFFSET: + case ir.OLINKSYMOFFSET: // type already set return n diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index 8a13f6a9238ff..82a76dc239e11 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -85,7 +85,7 @@ func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.ONONAME, ir.OGETG: return n - case ir.OTYPE, ir.ONAME, ir.OLITERAL, ir.ONIL, ir.ONAMEOFFSET: + case ir.OTYPE, ir.ONAME, ir.OLITERAL, ir.ONIL, ir.OLINKSYMOFFSET: // TODO(mdempsky): Just return n; see discussion on CL 38655. // Perhaps refactor to use Node.mayBeShared for these instead. // If these return early, make sure to still call @@ -357,7 +357,7 @@ func safeExpr(n ir.Node, init *ir.Nodes) ir.Node { } switch n.Op() { - case ir.ONAME, ir.OLITERAL, ir.ONIL, ir.ONAMEOFFSET: + case ir.ONAME, ir.OLITERAL, ir.ONIL, ir.OLINKSYMOFFSET: return n case ir.OLEN, ir.OCAP: diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go index a9672a261b83e..f214551617129 100644 --- a/src/cmd/compile/internal/walk/walk.go +++ b/src/cmd/compile/internal/walk/walk.go @@ -316,7 +316,7 @@ func mayCall(n ir.Node) bool { n := n.(*ir.ConvExpr) return ssagen.Arch.SoftFloat && (isSoftFloat(n.Type()) || isSoftFloat(n.X.Type())) - case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.ONAMEOFFSET, ir.OMETHEXPR, + case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.OLINKSYMOFFSET, ir.OMETHEXPR, ir.OAND, ir.OANDNOT, ir.OLSH, ir.OOR, ir.ORSH, ir.OXOR, ir.OCOMPLEX, ir.OEFACE, ir.OANDAND, ir.OOROR, ir.OADDR, ir.OBITNOT, ir.ONOT, ir.OPLUS, From e3027c6828230d01089afec0ab958040ba326abc Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sat, 16 Jan 2021 22:27:23 -0800 Subject: [PATCH 399/474] [dev.regabi] cmd/compile: fix linux-amd64-noopt builder CL 284223 tightened down the allowed expressions in mayCall, but evidently a little too tight. The linux-amd64-noopt builder does in fact see expressions with non-empty Init lists in arguments list. Since I believe these can only appear on the RHS of LogicalExpr expressions, this CL relaxes that one case. Change-Id: I1e6bbd0449778c40ed2610b3e1ef6a825a84ada7 Reviewed-on: https://go-review.googlesource.com/c/go/+/284226 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/walk/walk.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go index f214551617129..399fb2462b9f7 100644 --- a/src/cmd/compile/internal/walk/walk.go +++ b/src/cmd/compile/internal/walk/walk.go @@ -305,6 +305,14 @@ func mayCall(n ir.Node) bool { // before we start marshaling args for a call. See issue 16760. return true + case ir.OANDAND, ir.OOROR: + n := n.(*ir.LogicalExpr) + // The RHS expression may have init statements that + // should only execute conditionally, and so cannot be + // pulled out to the top-level init list. We could try + // to be more precise here. + return len(n.Y.Init()) != 0 + // When using soft-float, these ops might be rewritten to function calls // so we ensure they are evaluated first. case ir.OADD, ir.OSUB, ir.OMUL, ir.ONEG: @@ -318,7 +326,6 @@ func mayCall(n ir.Node) bool { case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.OLINKSYMOFFSET, ir.OMETHEXPR, ir.OAND, ir.OANDNOT, ir.OLSH, ir.OOR, ir.ORSH, ir.OXOR, ir.OCOMPLEX, ir.OEFACE, - ir.OANDAND, ir.OOROR, ir.OADDR, ir.OBITNOT, ir.ONOT, ir.OPLUS, ir.OCAP, ir.OIMAG, ir.OLEN, ir.OREAL, ir.OCONVNOP, ir.ODOT, From 87845d14f9822c104cc192c8f7858a2a24d0029f Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 17 Jan 2021 00:30:32 -0800 Subject: [PATCH 400/474] [dev.regabi] cmd/compile: add ir.TailCallStmt This CL splits out ORETJMP as a new TailCallStmt node, separate from the other BranchStmt nodes. In doing so, this allows us to change it from identifying a function by *types.Sym to identifying one by directly pointing to the *ir.Func. While here, also rename the operation to OTAILCALL. Passes toolstash -cmp. Change-Id: I273e6ea5d92bf3005ae02fb59b3240a190a6cf1b Reviewed-on: https://go-review.googlesource.com/c/go/+/284227 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/deadcode/deadcode.go | 2 +- src/cmd/compile/internal/escape/escape.go | 4 ++-- src/cmd/compile/internal/inline/inl.go | 2 +- src/cmd/compile/internal/ir/fmt.go | 6 ++--- src/cmd/compile/internal/ir/node.go | 4 ++-- src/cmd/compile/internal/ir/node_gen.go | 22 +++++++++++++++++++ src/cmd/compile/internal/ir/op_string.go | 6 ++--- src/cmd/compile/internal/ir/stmt.go | 22 +++++++++++++++---- .../compile/internal/reflectdata/reflect.go | 2 +- src/cmd/compile/internal/ssagen/abi.go | 2 +- src/cmd/compile/internal/ssagen/ssa.go | 6 ++--- .../compile/internal/typecheck/typecheck.go | 6 ++--- src/cmd/compile/internal/walk/order.go | 2 +- src/cmd/compile/internal/walk/stmt.go | 4 ++-- 14 files changed, 63 insertions(+), 27 deletions(-) diff --git a/src/cmd/compile/internal/deadcode/deadcode.go b/src/cmd/compile/internal/deadcode/deadcode.go index c409320fc4894..520203787f02d 100644 --- a/src/cmd/compile/internal/deadcode/deadcode.go +++ b/src/cmd/compile/internal/deadcode/deadcode.go @@ -75,7 +75,7 @@ func stmts(nn *ir.Nodes) { // might be the target of a goto. See issue 28616. if body := body; len(body) != 0 { switch body[(len(body) - 1)].Op() { - case ir.ORETURN, ir.ORETJMP, ir.OPANIC: + case ir.ORETURN, ir.OTAILCALL, ir.OPANIC: if i > lastLabel { cut = true } diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go index 26420b820a968..5ee6d4f498dd4 100644 --- a/src/cmd/compile/internal/escape/escape.go +++ b/src/cmd/compile/internal/escape/escape.go @@ -534,8 +534,8 @@ func (e *escape) stmt(n ir.Node) { e.stmts(n.Call.Init()) e.call(nil, n.Call, n) - case ir.ORETJMP: - // TODO(mdempsky): What do? esc.go just ignores it. + case ir.OTAILCALL: + // TODO(mdempsky): Treat like a normal call? esc.go used to just ignore it. } } diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index 4bb849cdaee81..143fbe9efe1b9 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -359,7 +359,7 @@ func (v *hairyVisitor) doNode(n ir.Node) error { ir.OGO, ir.ODEFER, ir.ODCLTYPE, // can't print yet - ir.ORETJMP: + ir.OTAILCALL: return errors.New("unhandled op " + n.Op().String()) case ir.OAPPEND: diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index 68e1bc156997f..ee6a62625afda 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -378,9 +378,9 @@ func stmtFmt(n Node, s fmt.State) { n := n.(*ReturnStmt) fmt.Fprintf(s, "return %.v", n.Results) - case ORETJMP: - n := n.(*BranchStmt) - fmt.Fprintf(s, "retjmp %v", n.Label) + case OTAILCALL: + n := n.(*TailCallStmt) + fmt.Fprintf(s, "tailcall %v", n.Target) case OINLMARK: n := n.(*InlineMarkStmt) diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index a725307c2c64e..291e1286bb8f1 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -306,8 +306,8 @@ const ( OLINKSYMOFFSET // offset within a name // arch-specific opcodes - ORETJMP // return to other function - OGETG // runtime.getg() (read g pointer) + OTAILCALL // tail call to another function + OGETG // runtime.getg() (read g pointer) OEND ) diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index 8f89c67748658..af9ee8d86ecd5 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -1227,6 +1227,28 @@ func (n *SwitchStmt) editChildren(edit func(Node) Node) { editNodes(n.Compiled, edit) } +func (n *TailCallStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *TailCallStmt) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *TailCallStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.Target != nil && do(n.Target) { + return true + } + return false +} +func (n *TailCallStmt) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Target != nil { + n.Target = edit(n.Target).(*Name) + } +} + func (n *TypeAssertExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *TypeAssertExpr) copy() Node { c := *n diff --git a/src/cmd/compile/internal/ir/op_string.go b/src/cmd/compile/internal/ir/op_string.go index 35196b01aee83..15c60baf449d9 100644 --- a/src/cmd/compile/internal/ir/op_string.go +++ b/src/cmd/compile/internal/ir/op_string.go @@ -157,14 +157,14 @@ func _() { _ = x[ORESULT-146] _ = x[OINLMARK-147] _ = x[OLINKSYMOFFSET-148] - _ = x[ORETJMP-149] + _ = x[OTAILCALL-149] _ = x[OGETG-150] _ = x[OEND-151] } -const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRSTMTEXPRBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKLINKSYMOFFSETRETJMPGETGEND" +const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRSTMTEXPRBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKLINKSYMOFFSETTAILCALLGETGEND" -var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 309, 315, 318, 324, 331, 339, 343, 350, 358, 360, 362, 364, 366, 368, 370, 375, 380, 388, 391, 400, 403, 407, 415, 422, 431, 444, 447, 450, 453, 456, 459, 462, 468, 471, 474, 480, 484, 487, 491, 496, 501, 507, 512, 516, 521, 529, 537, 543, 552, 563, 570, 574, 581, 589, 593, 597, 601, 608, 615, 623, 629, 637, 645, 650, 655, 659, 667, 672, 676, 679, 687, 691, 693, 698, 700, 705, 711, 717, 723, 729, 734, 738, 745, 751, 756, 762, 768, 775, 780, 784, 789, 793, 798, 806, 812, 819, 826, 832, 839, 852, 858, 862, 865} +var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 309, 315, 318, 324, 331, 339, 343, 350, 358, 360, 362, 364, 366, 368, 370, 375, 380, 388, 391, 400, 403, 407, 415, 422, 431, 444, 447, 450, 453, 456, 459, 462, 468, 471, 474, 480, 484, 487, 491, 496, 501, 507, 512, 516, 521, 529, 537, 543, 552, 563, 570, 574, 581, 589, 593, 597, 601, 608, 615, 623, 629, 637, 645, 650, 655, 659, 667, 672, 676, 679, 687, 691, 693, 698, 700, 705, 711, 717, 723, 729, 734, 738, 745, 751, 756, 762, 768, 775, 780, 784, 789, 793, 798, 806, 812, 819, 826, 832, 839, 852, 860, 864, 867} func (i Op) String() string { if i >= Op(len(_Op_index)-1) { diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index 0358569a1f6ee..c304867e1d9a9 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -144,9 +144,6 @@ func NewBlockStmt(pos src.XPos, list []Node) *BlockStmt { } // A BranchStmt is a break, continue, fallthrough, or goto statement. -// -// For back-end code generation, Op may also be RETJMP (return+jump), -// in which case the label names another function entirely. type BranchStmt struct { miniStmt Label *types.Sym // label if present @@ -154,7 +151,7 @@ type BranchStmt struct { func NewBranchStmt(pos src.XPos, op Op, label *types.Sym) *BranchStmt { switch op { - case OBREAK, OCONTINUE, OFALL, OGOTO, ORETJMP: + case OBREAK, OCONTINUE, OFALL, OGOTO: // ok default: panic("NewBranch " + op.String()) @@ -384,6 +381,23 @@ func NewSwitchStmt(pos src.XPos, tag Node, cases []*CaseClause) *SwitchStmt { return n } +// A TailCallStmt is a tail call statement, which is used for back-end +// code generation to jump directly to another function entirely. +type TailCallStmt struct { + miniStmt + Target *Name +} + +func NewTailCallStmt(pos src.XPos, target *Name) *TailCallStmt { + if target.Op() != ONAME || target.Class != PFUNC { + base.FatalfAt(pos, "tail call to non-func %v", target) + } + n := &TailCallStmt{Target: target} + n.pos = pos + n.op = OTAILCALL + return n +} + // A TypeSwitchGuard is the [Name :=] X.(type) in a type switch. type TypeSwitchGuard struct { miniNode diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go index efe863cc3fa39..fd3e6beaa3bca 100644 --- a/src/cmd/compile/internal/reflectdata/reflect.go +++ b/src/cmd/compile/internal/reflectdata/reflect.go @@ -1794,7 +1794,7 @@ func methodWrapper(rcvr *types.Type, method *types.Field) *obj.LSym { } as := ir.NewAssignStmt(base.Pos, nthis, typecheck.ConvNop(left, rcvr)) fn.Body.Append(as) - fn.Body.Append(ir.NewBranchStmt(base.Pos, ir.ORETJMP, ir.MethodSym(methodrcvr, method.Sym))) + fn.Body.Append(ir.NewTailCallStmt(base.Pos, method.Nname.(*ir.Name))) } else { fn.SetWrapper(true) // ignore frame for panic+recover matching call := ir.NewCallExpr(base.Pos, ir.OCALL, dot, nil) diff --git a/src/cmd/compile/internal/ssagen/abi.go b/src/cmd/compile/internal/ssagen/abi.go index 274c543ca553a..b5da42087251b 100644 --- a/src/cmd/compile/internal/ssagen/abi.go +++ b/src/cmd/compile/internal/ssagen/abi.go @@ -303,7 +303,7 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { var tail ir.Node if tfn.Type().NumResults() == 0 && tfn.Type().NumParams() == 0 && tfn.Type().NumRecvs() == 0 && !(base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) { - tail = ir.NewBranchStmt(base.Pos, ir.ORETJMP, f.Nname.Sym()) + tail = ir.NewTailCallStmt(base.Pos, f.Nname) } else { call := ir.NewCallExpr(base.Pos, ir.OCALL, f.Nname, nil) call.Args = ir.ParamNames(tfn.Type()) diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 1cd49a487ec5a..beef0d8234668 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -1580,11 +1580,11 @@ func (s *state) stmt(n ir.Node) { b := s.exit() b.Pos = s.lastPos.WithIsStmt() - case ir.ORETJMP: - n := n.(*ir.BranchStmt) + case ir.OTAILCALL: + n := n.(*ir.TailCallStmt) b := s.exit() b.Kind = ssa.BlockRetJmp // override BlockRet - b.Aux = callTargetLSym(n.Label, s.curfn.LSym) + b.Aux = callTargetLSym(n.Target.Sym(), s.curfn.LSym) case ir.OCONTINUE, ir.OBREAK: n := n.(*ir.BranchStmt) diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index 5b44a5743fc3a..7881ea308dacb 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -857,8 +857,8 @@ func typecheck1(n ir.Node, top int) ir.Node { n := n.(*ir.ReturnStmt) return tcReturn(n) - case ir.ORETJMP: - n := n.(*ir.BranchStmt) + case ir.OTAILCALL: + n := n.(*ir.TailCallStmt) return n case ir.OSELECT: @@ -2023,7 +2023,7 @@ func isTermNode(n ir.Node) bool { n := n.(*ir.BlockStmt) return isTermNodes(n.List) - case ir.OGOTO, ir.ORETURN, ir.ORETJMP, ir.OPANIC, ir.OFALL: + case ir.OGOTO, ir.ORETURN, ir.OTAILCALL, ir.OPANIC, ir.OFALL: return true case ir.OFOR, ir.OFORUNTIL: diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go index d34c58009acbe..e1e9f168bbc4b 100644 --- a/src/cmd/compile/internal/walk/order.go +++ b/src/cmd/compile/internal/walk/order.go @@ -692,7 +692,7 @@ func (o *orderState) stmt(n ir.Node) { ir.OFALL, ir.OGOTO, ir.OLABEL, - ir.ORETJMP: + ir.OTAILCALL: o.out = append(o.out, n) // Special: handle call arguments. diff --git a/src/cmd/compile/internal/walk/stmt.go b/src/cmd/compile/internal/walk/stmt.go index d892b2413f138..46a621c2ba74d 100644 --- a/src/cmd/compile/internal/walk/stmt.go +++ b/src/cmd/compile/internal/walk/stmt.go @@ -136,8 +136,8 @@ func walkStmt(n ir.Node) ir.Node { n := n.(*ir.ReturnStmt) return walkReturn(n) - case ir.ORETJMP: - n := n.(*ir.BranchStmt) + case ir.OTAILCALL: + n := n.(*ir.TailCallStmt) return n case ir.OINLMARK: From 99a5db11acc48794b703bee395a08848d49da41c Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 17 Jan 2021 01:13:34 -0800 Subject: [PATCH 401/474] [dev.regabi] cmd/compile: use LinksymOffsetExpr in walkConvInterface This CL updates walkConvInterface to use LinksymOffsetExpr for referencing runtime.staticuint64s and runtime.zerobase. Passes toolstash -cmp (surprisingly). Change-Id: Iad7e30371f89c8a5e176b5ddbc53faf57012ba0d Reviewed-on: https://go-review.googlesource.com/c/go/+/284229 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/ir/expr.go | 7 +++++++ src/cmd/compile/internal/ir/symtab.go | 7 +------ src/cmd/compile/internal/ssagen/ssa.go | 1 + src/cmd/compile/internal/walk/convert.go | 18 +++++------------- 4 files changed, 14 insertions(+), 19 deletions(-) diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 8aad25d625ec3..e944a0b1550eb 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -477,6 +477,13 @@ func NewLinksymOffsetExpr(pos src.XPos, lsym *obj.LSym, offset int64, typ *types return n } +// NewLinksymExpr is NewLinksymOffsetExpr, but with offset fixed at 0. +func NewLinksymExpr(pos src.XPos, lsym *obj.LSym, typ *types.Type) *LinksymOffsetExpr { + return NewLinksymOffsetExpr(pos, lsym, 0, typ) +} + +// NewNameOffsetExpr is NewLinksymOffsetExpr, but taking a *Name +// representing a global variable instead of an *obj.LSym directly. func NewNameOffsetExpr(pos src.XPos, name *Name, offset int64, typ *types.Type) *LinksymOffsetExpr { if name == nil || IsBlank(name) || !(name.Op() == ONAME && name.Class == PEXTERN) { base.FatalfAt(pos, "cannot take offset of nil, blank name or non-global variable: %v", name) diff --git a/src/cmd/compile/internal/ir/symtab.go b/src/cmd/compile/internal/ir/symtab.go index df694f6c848ea..80e457176490c 100644 --- a/src/cmd/compile/internal/ir/symtab.go +++ b/src/cmd/compile/internal/ir/symtab.go @@ -9,12 +9,6 @@ import ( "cmd/internal/obj" ) -// Names holds known names. -var Names struct { - Staticuint64s *Name - Zerobase *Name -} - // Syms holds known symbols. var Syms struct { AssertE2I *obj.LSym @@ -46,6 +40,7 @@ var Syms struct { Racewriterange *obj.LSym // Wasm SigPanic *obj.LSym + Staticuint64s *obj.LSym Typedmemclr *obj.LSym Typedmemmove *obj.LSym Udiv *obj.LSym diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index beef0d8234668..02aff7a8cf1fc 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -124,6 +124,7 @@ func InitConfig() { ir.Syms.X86HasFMA = typecheck.LookupRuntimeVar("x86HasFMA") // bool ir.Syms.ARMHasVFPv4 = typecheck.LookupRuntimeVar("armHasVFPv4") // bool ir.Syms.ARM64HasATOMICS = typecheck.LookupRuntimeVar("arm64HasATOMICS") // bool + ir.Syms.Staticuint64s = typecheck.LookupRuntimeVar("staticuint64s") ir.Syms.Typedmemclr = typecheck.LookupRuntimeFunc("typedmemclr") ir.Syms.Typedmemmove = typecheck.LookupRuntimeFunc("typedmemmove") ir.Syms.Udiv = typecheck.LookupRuntimeVar("udiv") // asm func with special ABI diff --git a/src/cmd/compile/internal/walk/convert.go b/src/cmd/compile/internal/walk/convert.go index d143c1084fd2c..fa8e2c0bb8db4 100644 --- a/src/cmd/compile/internal/walk/convert.go +++ b/src/cmd/compile/internal/walk/convert.go @@ -66,17 +66,6 @@ func walkConvInterface(n *ir.ConvExpr, init *ir.Nodes) ir.Node { return l } - if ir.Names.Staticuint64s == nil { - ir.Names.Staticuint64s = typecheck.NewName(ir.Pkgs.Runtime.Lookup("staticuint64s")) - ir.Names.Staticuint64s.Class = ir.PEXTERN - // The actual type is [256]uint64, but we use [256*8]uint8 so we can address - // individual bytes. - ir.Names.Staticuint64s.SetType(types.NewArray(types.Types[types.TUINT8], 256*8)) - ir.Names.Zerobase = typecheck.NewName(ir.Pkgs.Runtime.Lookup("zerobase")) - ir.Names.Zerobase.Class = ir.PEXTERN - ir.Names.Zerobase.SetType(types.Types[types.TUINTPTR]) - } - // Optimize convT2{E,I} for many cases in which T is not pointer-shaped, // by using an existing addressable value identical to n.Left // or creating one on the stack. @@ -85,7 +74,7 @@ func walkConvInterface(n *ir.ConvExpr, init *ir.Nodes) ir.Node { case fromType.Size() == 0: // n.Left is zero-sized. Use zerobase. cheapExpr(n.X, init) // Evaluate n.Left for side-effects. See issue 19246. - value = ir.Names.Zerobase + value = ir.NewLinksymExpr(base.Pos, ir.Syms.Zerobase, types.Types[types.TUINTPTR]) case fromType.IsBoolean() || (fromType.Size() == 1 && fromType.IsInteger()): // n.Left is a bool/byte. Use staticuint64s[n.Left * 8] on little-endian // and staticuint64s[n.Left * 8 + 7] on big-endian. @@ -95,7 +84,10 @@ func walkConvInterface(n *ir.ConvExpr, init *ir.Nodes) ir.Node { if ssagen.Arch.LinkArch.ByteOrder == binary.BigEndian { index = ir.NewBinaryExpr(base.Pos, ir.OADD, index, ir.NewInt(7)) } - xe := ir.NewIndexExpr(base.Pos, ir.Names.Staticuint64s, index) + // The actual type is [256]uint64, but we use [256*8]uint8 so we can address + // individual bytes. + staticuint64s := ir.NewLinksymExpr(base.Pos, ir.Syms.Staticuint64s, types.NewArray(types.Types[types.TUINT8], 256*8)) + xe := ir.NewIndexExpr(base.Pos, staticuint64s, index) xe.SetBounded(true) value = xe case n.X.Op() == ir.ONAME && n.X.(*ir.Name).Class == ir.PEXTERN && n.X.(*ir.Name).Readonly(): From 7e0fa38aad7bb402fcd08a66adc6492818c79dcf Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 17 Jan 2021 02:16:58 -0800 Subject: [PATCH 402/474] [dev.regabi] cmd/compile: remove unneeded packages from ir.Pkgs ir.Pkgs.Itablink isn't used anymore. (I don't recall what it was ever used for.) ir.Pkgs.Race and ir.Pkgs.Msan are only needed in exactly only place, so just create them on demand there, the same way that we create "main" on demand. Change-Id: I3474bb949f71cd40c7a462b9f4a369adeacde0d6 Reviewed-on: https://go-review.googlesource.com/c/go/+/284230 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/gc/main.go | 9 --------- src/cmd/compile/internal/ir/symtab.go | 15 ++++++--------- src/cmd/compile/internal/reflectdata/reflect.go | 5 +++-- 3 files changed, 9 insertions(+), 20 deletions(-) diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index e9ac24352779b..f758933d79e39 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -96,9 +96,6 @@ func Main(archInit func(*ssagen.ArchInfo)) { ir.Pkgs.Itab = types.NewPkg("go.itab", "go.itab") ir.Pkgs.Itab.Prefix = "go.itab" // not go%2eitab - ir.Pkgs.Itablink = types.NewPkg("go.itablink", "go.itablink") - ir.Pkgs.Itablink.Prefix = "go.itablink" // not go%2eitablink - ir.Pkgs.Track = types.NewPkg("go.track", "go.track") ir.Pkgs.Track.Prefix = "go.track" // not go%2etrack @@ -160,12 +157,6 @@ func Main(archInit func(*ssagen.ArchInfo)) { ssagen.Arch.LinkArch.Init(base.Ctxt) startProfile() - if base.Flag.Race { - ir.Pkgs.Race = types.NewPkg("runtime/race", "") - } - if base.Flag.MSan { - ir.Pkgs.Msan = types.NewPkg("runtime/msan", "") - } if base.Flag.Race || base.Flag.MSan { base.Flag.Cfg.Instrumenting = true } diff --git a/src/cmd/compile/internal/ir/symtab.go b/src/cmd/compile/internal/ir/symtab.go index 80e457176490c..0968efbf5cb3e 100644 --- a/src/cmd/compile/internal/ir/symtab.go +++ b/src/cmd/compile/internal/ir/symtab.go @@ -65,13 +65,10 @@ var Syms struct { // Pkgs holds known packages. var Pkgs struct { - Go *types.Pkg - Itab *types.Pkg - Itablink *types.Pkg - Map *types.Pkg - Msan *types.Pkg - Race *types.Pkg - Runtime *types.Pkg - Track *types.Pkg - Unsafe *types.Pkg + Go *types.Pkg + Itab *types.Pkg + Map *types.Pkg + Runtime *types.Pkg + Track *types.Pkg + Unsafe *types.Pkg } diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go index fd3e6beaa3bca..fe0bd269272b6 100644 --- a/src/cmd/compile/internal/reflectdata/reflect.go +++ b/src/cmd/compile/internal/reflectdata/reflect.go @@ -1426,11 +1426,12 @@ func WriteBasicTypes() { dimportpath(ir.Pkgs.Runtime) if base.Flag.Race { - dimportpath(ir.Pkgs.Race) + dimportpath(types.NewPkg("runtime/race", "")) } if base.Flag.MSan { - dimportpath(ir.Pkgs.Msan) + dimportpath(types.NewPkg("runtime/msan", "")) } + dimportpath(types.NewPkg("main", "")) } } From 0ffa1ead6e281932697154d4ea45413b2ba8fa53 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Sun, 17 Jan 2021 16:41:19 +0700 Subject: [PATCH 403/474] [dev.regabi] cmd/compile: use *obj.LSym instead of *ir.Name for staticdata functions Those functions only use (*ir.Name).Linksym(), so just change them to get an *obj.LSym directly. This helps get rid of un-necessary validations that their callers have already done. Passes toolstash -cmp. For #43737. Change-Id: Ifd6c2525e472f8e790940bc167665f9d74dd1bc5 Reviewed-on: https://go-review.googlesource.com/c/go/+/284121 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/staticdata/data.go | 48 ++++++++------------ src/cmd/compile/internal/staticinit/sched.go | 25 +++++----- src/cmd/compile/internal/walk/complit.go | 6 +-- 3 files changed, 34 insertions(+), 45 deletions(-) diff --git a/src/cmd/compile/internal/staticdata/data.go b/src/cmd/compile/internal/staticdata/data.go index 4dbc11c3c4af1..6ef99b50c7800 100644 --- a/src/cmd/compile/internal/staticdata/data.go +++ b/src/cmd/compile/internal/staticdata/data.go @@ -25,46 +25,29 @@ import ( "cmd/internal/src" ) -// InitAddr writes the static address of a to n. a must be an ONAME. -// Neither n nor a is modified. -func InitAddr(n *ir.Name, noff int64, a *ir.Name, aoff int64) { +// InitAddrOffset writes the static name symbol lsym to n, it does not modify n. +// It's the caller responsibility to make sure lsym is from ONAME/PEXTERN node. +func InitAddrOffset(n *ir.Name, noff int64, lsym *obj.LSym, off int64) { if n.Op() != ir.ONAME { base.Fatalf("InitAddr n op %v", n.Op()) } if n.Sym() == nil { base.Fatalf("InitAddr nil n sym") } - if a.Op() != ir.ONAME { - base.Fatalf("InitAddr a op %v", a.Op()) - } s := n.Linksym() - s.WriteAddr(base.Ctxt, noff, types.PtrSize, a.Linksym(), aoff) + s.WriteAddr(base.Ctxt, noff, types.PtrSize, lsym, off) } -// InitFunc writes the static address of f to n. f must be a global function. -// Neither n nor f is modified. -func InitFunc(n *ir.Name, noff int64, f *ir.Name) { - if n.Op() != ir.ONAME { - base.Fatalf("InitFunc n op %v", n.Op()) - } - if n.Sym() == nil { - base.Fatalf("InitFunc nil n sym") - } - if f.Class != ir.PFUNC { - base.Fatalf("InitFunc class not PFUNC %d", f.Class) - } - s := n.Linksym() - s.WriteAddr(base.Ctxt, noff, types.PtrSize, FuncLinksym(f), 0) +// InitAddr is InitAddrOffset, with offset fixed to 0. +func InitAddr(n *ir.Name, noff int64, lsym *obj.LSym) { + InitAddrOffset(n, noff, lsym, 0) } -// InitSlice writes a static slice symbol {&arr, lencap, lencap} to n+noff. -// InitSlice does not modify n. -func InitSlice(n *ir.Name, noff int64, arr *ir.Name, lencap int64) { +// InitSlice writes a static slice symbol {lsym, lencap, lencap} to n+noff, it does not modify n. +// It's the caller responsibility to make sure lsym is from ONAME node. +func InitSlice(n *ir.Name, noff int64, lsym *obj.LSym, lencap int64) { s := n.Linksym() - if arr.Op() != ir.ONAME { - base.Fatalf("InitSlice non-name arr %v", arr) - } - s.WriteAddr(base.Ctxt, noff, types.PtrSize, arr.Linksym(), 0) + s.WriteAddr(base.Ctxt, noff, types.PtrSize, lsym, 0) s.WriteInt(base.Ctxt, noff+types.SliceLenOffset, types.PtrSize, lencap) s.WriteInt(base.Ctxt, noff+types.SliceCapOffset, types.PtrSize, lencap) } @@ -73,7 +56,7 @@ func InitSliceBytes(nam *ir.Name, off int64, s string) { if nam.Op() != ir.ONAME { base.Fatalf("InitSliceBytes %v", nam) } - InitSlice(nam, off, slicedata(nam.Pos(), s), int64(len(s))) + InitSlice(nam, off, slicedata(nam.Pos(), s).Linksym(), int64(len(s))) } const ( @@ -265,6 +248,13 @@ func FuncLinksym(n *ir.Name) *obj.LSym { return FuncSym(n.Sym()).Linksym() } +func GlobalLinksym(n *ir.Name) *obj.LSym { + if n.Op() != ir.ONAME || n.Class != ir.PEXTERN { + base.Fatalf("expected global variable: %v", n) + } + return n.Linksym() +} + // NeedFuncSym ensures that s·f is exported, if needed. // It is only used with -dynlink. // When not compiling for dynamic linking, diff --git a/src/cmd/compile/internal/staticinit/sched.go b/src/cmd/compile/internal/staticinit/sched.go index 8c195742e6925..cf1b416462770 100644 --- a/src/cmd/compile/internal/staticinit/sched.go +++ b/src/cmd/compile/internal/staticinit/sched.go @@ -81,7 +81,7 @@ func (s *Schedule) tryStaticInit(nn ir.Node) bool { func (s *Schedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *types.Type) bool { if rn.Class == ir.PFUNC { // TODO if roff != 0 { panic } - staticdata.InitFunc(l, loff, rn) + staticdata.InitAddr(l, loff, staticdata.FuncLinksym(rn)) return true } if rn.Class != ir.PEXTERN || rn.Sym().Pkg != types.LocalPkg { @@ -138,9 +138,8 @@ func (s *Schedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *types.Ty case ir.OADDR: r := r.(*ir.AddrExpr) - if a := r.X; a.Op() == ir.ONAME { - a := a.(*ir.Name) - staticdata.InitAddr(l, loff, a, 0) + if a, ok := r.X.(*ir.Name); ok && a.Op() == ir.ONAME { + staticdata.InitAddr(l, loff, staticdata.GlobalLinksym(a)) return true } @@ -149,14 +148,14 @@ func (s *Schedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *types.Ty switch r.X.Op() { case ir.OARRAYLIT, ir.OSLICELIT, ir.OSTRUCTLIT, ir.OMAPLIT: // copy pointer - staticdata.InitAddr(l, loff, s.Temps[r], 0) + staticdata.InitAddr(l, loff, staticdata.GlobalLinksym(s.Temps[r])) return true } case ir.OSLICELIT: r := r.(*ir.CompLitExpr) // copy slice - staticdata.InitSlice(l, loff, s.Temps[r], r.Len) + staticdata.InitSlice(l, loff, staticdata.GlobalLinksym(s.Temps[r]), r.Len) return true case ir.OARRAYLIT, ir.OSTRUCTLIT: @@ -235,8 +234,8 @@ func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Ty case ir.OADDR: r := r.(*ir.AddrExpr) - if name, offset, ok := StaticLoc(r.X); ok { - staticdata.InitAddr(l, loff, name, offset) + if name, offset, ok := StaticLoc(r.X); ok && name.Class == ir.PEXTERN { + staticdata.InitAddrOffset(l, loff, name.Linksym(), offset) return true } fallthrough @@ -249,7 +248,7 @@ func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Ty a := StaticName(r.X.Type()) s.Temps[r] = a - staticdata.InitAddr(l, loff, a, 0) + staticdata.InitAddr(l, loff, a.Linksym()) // Init underlying literal. assign(base.Pos, a, 0, r.X) @@ -273,7 +272,7 @@ func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Ty ta.SetNoalg(true) a := StaticName(ta) s.Temps[r] = a - staticdata.InitSlice(l, loff, a, r.Len) + staticdata.InitSlice(l, loff, a.Linksym(), r.Len) // Fall through to init underlying array. l = a loff = 0 @@ -308,7 +307,7 @@ func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Ty // Closures with no captured variables are globals, // so the assignment can be done at link time. // TODO if roff != 0 { panic } - staticdata.InitFunc(l, loff, r.Func.Nname) + staticdata.InitAddr(l, loff, staticdata.FuncLinksym(r.Func.Nname)) return true } ir.ClosureDebugRuntimeCheck(r) @@ -345,7 +344,7 @@ func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Ty // Create a copy of l to modify while we emit data. // Emit itab, advance offset. - staticdata.InitAddr(l, loff, itab.X.(*ir.Name), 0) + staticdata.InitAddr(l, loff, itab.X.(*ir.Name).Linksym()) // Emit data. if types.IsDirectIface(val.Type()) { @@ -361,7 +360,7 @@ func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Ty a := StaticName(val.Type()) s.Temps[val] = a assign(base.Pos, a, 0, val) - staticdata.InitAddr(l, loff+int64(types.PtrSize), a, 0) + staticdata.InitAddr(l, loff+int64(types.PtrSize), a.Linksym()) } return true diff --git a/src/cmd/compile/internal/walk/complit.go b/src/cmd/compile/internal/walk/complit.go index 97e820238bb04..73442dc404cf6 100644 --- a/src/cmd/compile/internal/walk/complit.go +++ b/src/cmd/compile/internal/walk/complit.go @@ -297,7 +297,7 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) if !ok || name.Class != ir.PEXTERN { base.Fatalf("slicelit: %v", var_) } - staticdata.InitSlice(name, offset, vstat, t.NumElem()) + staticdata.InitSlice(name, offset, vstat.Linksym(), t.NumElem()) return } @@ -647,7 +647,7 @@ func genAsStatic(as *ir.AssignStmt) { return case ir.OMETHEXPR: r := r.(*ir.SelectorExpr) - staticdata.InitFunc(name, offset, r.FuncName()) + staticdata.InitAddr(name, offset, staticdata.FuncLinksym(r.FuncName())) return case ir.ONAME: r := r.(*ir.Name) @@ -655,7 +655,7 @@ func genAsStatic(as *ir.AssignStmt) { base.Fatalf("genAsStatic %+v", as) } if r.Class == ir.PFUNC { - staticdata.InitFunc(name, offset, r) + staticdata.InitAddr(name, offset, staticdata.FuncLinksym(r)) return } } From 4c835f9169e2b1f98a9755724d1f46bf50566003 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Mon, 18 Jan 2021 09:42:53 +0700 Subject: [PATCH 404/474] [dev.regabi] cmd/compile: use LinksymOffsetExpr in TypePtr/ItabAddr Passes toolstash -cmp. Fixes #43737 Change-Id: I2d5228c0213b5f8742e3cea6fac9bc985b19d78c Reviewed-on: https://go-review.googlesource.com/c/go/+/284122 Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Trust: Cuong Manh Le Reviewed-by: Matthew Dempsky --- .../compile/internal/reflectdata/reflect.go | 37 +++++-------------- src/cmd/compile/internal/staticinit/sched.go | 2 +- 2 files changed, 11 insertions(+), 28 deletions(-) diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go index fe0bd269272b6..bd89b62ff5b8b 100644 --- a/src/cmd/compile/internal/reflectdata/reflect.go +++ b/src/cmd/compile/internal/reflectdata/reflect.go @@ -836,39 +836,22 @@ func TypeLinksym(t *types.Type) *obj.LSym { } func TypePtr(t *types.Type) *ir.AddrExpr { - s := TypeSym(t) - if s.Def == nil { - n := ir.NewNameAt(src.NoXPos, s) - n.SetType(types.Types[types.TUINT8]) - n.Class = ir.PEXTERN - n.SetTypecheck(1) - s.Def = n - } - - n := typecheck.NodAddr(ir.AsNode(s.Def)) - n.SetType(types.NewPtr(s.Def.Type())) - n.SetTypecheck(1) - return n + n := ir.NewLinksymExpr(base.Pos, TypeLinksym(t), types.Types[types.TUINT8]) + return typecheck.Expr(typecheck.NodAddr(n)).(*ir.AddrExpr) } func ITabAddr(t, itype *types.Type) *ir.AddrExpr { if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() { base.Fatalf("ITabAddr(%v, %v)", t, itype) } - s := ir.Pkgs.Itab.Lookup(t.ShortString() + "," + itype.ShortString()) - if s.Def == nil { - n := typecheck.NewName(s) - n.SetType(types.Types[types.TUINT8]) - n.Class = ir.PEXTERN - n.SetTypecheck(1) - s.Def = n - itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: n.Linksym()}) - } - - n := typecheck.NodAddr(ir.AsNode(s.Def)) - n.SetType(types.NewPtr(s.Def.Type())) - n.SetTypecheck(1) - return n + s, existed := ir.Pkgs.Itab.LookupOK(t.ShortString() + "," + itype.ShortString()) + if !existed { + itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: s.Linksym()}) + } + + lsym := s.Linksym() + n := ir.NewLinksymExpr(base.Pos, lsym, types.Types[types.TUINT8]) + return typecheck.Expr(typecheck.NodAddr(n)).(*ir.AddrExpr) } // needkeyupdate reports whether map updates with t as a key diff --git a/src/cmd/compile/internal/staticinit/sched.go b/src/cmd/compile/internal/staticinit/sched.go index cf1b416462770..f3ad82e7b60a1 100644 --- a/src/cmd/compile/internal/staticinit/sched.go +++ b/src/cmd/compile/internal/staticinit/sched.go @@ -344,7 +344,7 @@ func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Ty // Create a copy of l to modify while we emit data. // Emit itab, advance offset. - staticdata.InitAddr(l, loff, itab.X.(*ir.Name).Linksym()) + staticdata.InitAddr(l, loff, itab.X.(*ir.LinksymOffsetExpr).Linksym) // Emit data. if types.IsDirectIface(val.Type()) { From 6113db0bb47706b8b5f65b67b87f8277432ca4d2 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 17 Jan 2021 16:14:48 -0800 Subject: [PATCH 405/474] [dev.regabi] cmd/compile: convert OPANIC argument to interface{} during typecheck Currently, typecheck leaves arguments to OPANIC as their original type. This CL changes it to insert implicit OCONVIFACE operations to convert arguments to `interface{}` like how any other function call would be handled. No immediate benefits, other than getting to remove a tiny bit of special-case logic in order.go's handling of OPANICs. Instead, the generic code path for handling OCONVIFACE is used, if necessary. Longer term, this should be marginally helpful for #43753, as it reduces the number of cases where we need values to be addressable for runtime calls. However, this does require adding some hacks to appease existing tests: 1. We need yet another kludge in inline budgeting, to ensure that reflect.flag.mustBe stays inlinable for cmd/compile/internal/test's TestIntendedInlining. 2. Since the OCONVIFACE expressions are now being introduced during typecheck, they're now visible to escape analysis. So expressions like "panic(1)" are now seen as "panic(interface{}(1))", and escape analysis warns that the "interface{}(1)" escapes to the heap. These have always escaped to heap, just now we're accurately reporting about it. (Also, unfortunately fmt.go hides implicit conversions by default in diagnostics messages, so instead of reporting "interface{}(1) escapes to heap", it actually reports "1 escapes to heap", which is confusing. However, this confusing messaging also isn't new.) Change-Id: Icedf60e1d2e464e219441b8d1233a313770272af Reviewed-on: https://go-review.googlesource.com/c/go/+/284412 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le Trust: Matthew Dempsky --- src/cmd/compile/internal/inline/inl.go | 7 +++++++ src/cmd/compile/internal/typecheck/func.go | 2 +- src/cmd/compile/internal/walk/order.go | 6 ++---- test/closure3.dir/main.go | 2 +- test/escape2.go | 2 +- test/escape2n.go | 2 +- test/escape4.go | 6 +++--- test/fixedbugs/issue13799.go | 12 ++++++------ test/fixedbugs/issue7921.go | 2 +- 9 files changed, 23 insertions(+), 18 deletions(-) diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index 143fbe9efe1b9..aa194ebab2b6e 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -346,6 +346,13 @@ func (v *hairyVisitor) doNode(n ir.Node) error { v.budget -= v.extraCallCost case ir.OPANIC: + n := n.(*ir.UnaryExpr) + if n.X.Op() == ir.OCONVIFACE && n.X.(*ir.ConvExpr).Implicit() { + // Hack to keep reflect.flag.mustBe inlinable for TestIntendedInlining. + // Before CL 284412, these conversions were introduced later in the + // compiler, so they didn't count against inlining budget. + v.budget++ + } v.budget -= inlineExtraPanicCost case ir.ORECOVER: diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go index c832d9700f370..b576590d4d92b 100644 --- a/src/cmd/compile/internal/typecheck/func.go +++ b/src/cmd/compile/internal/typecheck/func.go @@ -896,7 +896,7 @@ func tcNew(n *ir.UnaryExpr) ir.Node { // tcPanic typechecks an OPANIC node. func tcPanic(n *ir.UnaryExpr) ir.Node { n.X = Expr(n.X) - n.X = DefaultLit(n.X, types.Types[types.TINTER]) + n.X = AssignConv(n.X, types.Types[types.TINTER], "argument to panic") if n.X.Type() == nil { n.SetType(nil) return n diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go index e1e9f168bbc4b..fe0b6a0eff4ec 100644 --- a/src/cmd/compile/internal/walk/order.go +++ b/src/cmd/compile/internal/walk/order.go @@ -768,14 +768,12 @@ func (o *orderState) stmt(n ir.Node) { orderBlock(&n.Else, o.free) o.out = append(o.out, n) - // Special: argument will be converted to interface using convT2E - // so make sure it is an addressable temporary. case ir.OPANIC: n := n.(*ir.UnaryExpr) t := o.markTemp() n.X = o.expr(n.X, nil) - if !n.X.Type().IsInterface() { - n.X = o.addrTemp(n.X) + if !n.X.Type().IsEmptyInterface() { + base.FatalfAt(n.Pos(), "bad argument to panic: %L", n.X) } o.out = append(o.out, n) o.cleanTemp(t) diff --git a/test/closure3.dir/main.go b/test/closure3.dir/main.go index 5694673f1ec64..e8e1e99860f5c 100644 --- a/test/closure3.dir/main.go +++ b/test/closure3.dir/main.go @@ -285,5 +285,5 @@ func main() { //go:noinline func ppanic(s string) { // ERROR "leaking param: s" - panic(s) + panic(s) // ERROR "s escapes to heap" } diff --git a/test/escape2.go b/test/escape2.go index 5c6eb559faf06..b9b723d866686 100644 --- a/test/escape2.go +++ b/test/escape2.go @@ -1547,7 +1547,7 @@ func foo153(v interface{}) *int { // ERROR "v does not escape" case int: // ERROR "moved to heap: x$" return &x } - panic(0) + panic(0) // ERROR "0 escapes to heap" } // issue 8185 - &result escaping into result diff --git a/test/escape2n.go b/test/escape2n.go index 46e58f85661a7..7c8208aa73ca1 100644 --- a/test/escape2n.go +++ b/test/escape2n.go @@ -1547,7 +1547,7 @@ func foo153(v interface{}) *int { // ERROR "v does not escape" case int: // ERROR "moved to heap: x$" return &x } - panic(0) + panic(0) // ERROR "0 escapes to heap" } // issue 8185 - &result escaping into result diff --git a/test/escape4.go b/test/escape4.go index a4a9c14a3e0cc..4e50231bf959b 100644 --- a/test/escape4.go +++ b/test/escape4.go @@ -35,14 +35,14 @@ func f1() { func f2() {} // ERROR "can inline f2" // No inline for recover; panic now allowed to inline. -func f3() { panic(1) } // ERROR "can inline f3" +func f3() { panic(1) } // ERROR "can inline f3" "1 escapes to heap" func f4() { recover() } func f5() *byte { type T struct { x [1]byte } - t := new(T) // ERROR "new.T. escapes to heap" + t := new(T) // ERROR "new.T. escapes to heap" return &t.x[0] } @@ -52,6 +52,6 @@ func f6() *byte { y byte } } - t := new(T) // ERROR "new.T. escapes to heap" + t := new(T) // ERROR "new.T. escapes to heap" return &t.x.y } diff --git a/test/fixedbugs/issue13799.go b/test/fixedbugs/issue13799.go index fbdd4c32bc85a..c8ecfc54e4464 100644 --- a/test/fixedbugs/issue13799.go +++ b/test/fixedbugs/issue13799.go @@ -60,7 +60,7 @@ func test1(iter int) { } if len(m) != maxI { - panic(fmt.Sprintf("iter %d: maxI = %d, len(m) = %d", iter, maxI, len(m))) // ERROR "iter escapes to heap$" "len\(m\) escapes to heap$" "maxI escapes to heap$" "... argument does not escape$" + panic(fmt.Sprintf("iter %d: maxI = %d, len(m) = %d", iter, maxI, len(m))) // ERROR "iter escapes to heap$" "len\(m\) escapes to heap$" "maxI escapes to heap$" "... argument does not escape$" "fmt.Sprintf\(.*\) escapes to heap" } } @@ -84,7 +84,7 @@ func test2(iter int) { } if len(m) != maxI { - panic(fmt.Sprintf("iter %d: maxI = %d, len(m) = %d", iter, maxI, len(m))) // ERROR "iter escapes to heap$" "len\(m\) escapes to heap$" "maxI escapes to heap$" "... argument does not escape$" + panic(fmt.Sprintf("iter %d: maxI = %d, len(m) = %d", iter, maxI, len(m))) // ERROR "iter escapes to heap$" "len\(m\) escapes to heap$" "maxI escapes to heap$" "... argument does not escape$" "fmt.Sprintf\(.*\) escapes to heap" } } @@ -110,7 +110,7 @@ func test3(iter int) { } if *m != maxI { - panic(fmt.Sprintf("iter %d: maxI = %d, *m = %d", iter, maxI, *m)) // ERROR "\*m escapes to heap$" "iter escapes to heap$" "maxI escapes to heap$" "... argument does not escape$" + panic(fmt.Sprintf("iter %d: maxI = %d, *m = %d", iter, maxI, *m)) // ERROR "\*m escapes to heap$" "iter escapes to heap$" "maxI escapes to heap$" "... argument does not escape$" "fmt.Sprintf\(.*\) escapes to heap" } } @@ -136,7 +136,7 @@ func test4(iter int) { } if *m != maxI { - panic(fmt.Sprintf("iter %d: maxI = %d, *m = %d", iter, maxI, *m)) // ERROR "\*m escapes to heap$" "iter escapes to heap$" "maxI escapes to heap$" "... argument does not escape$" + panic(fmt.Sprintf("iter %d: maxI = %d, *m = %d", iter, maxI, *m)) // ERROR "\*m escapes to heap$" "iter escapes to heap$" "maxI escapes to heap$" "... argument does not escape$" "fmt.Sprintf\(.*\) escapes to heap" } } @@ -167,7 +167,7 @@ func test5(iter int) { } if *m != maxI { - panic(fmt.Sprintf("iter %d: maxI = %d, *m = %d", iter, maxI, *m)) // ERROR "\*m escapes to heap$" "iter escapes to heap$" "maxI escapes to heap$" "... argument does not escape$" + panic(fmt.Sprintf("iter %d: maxI = %d, *m = %d", iter, maxI, *m)) // ERROR "\*m escapes to heap$" "iter escapes to heap$" "maxI escapes to heap$" "... argument does not escape$" "fmt.Sprintf\(.*\) escapes to heap" } } @@ -185,6 +185,6 @@ func test6(iter int) { } if *m != maxI { - panic(fmt.Sprintf("iter %d: maxI = %d, *m = %d", iter, maxI, *m)) // ERROR "\*m escapes to heap$" "iter escapes to heap$" "maxI escapes to heap$" "... argument does not escape$" + panic(fmt.Sprintf("iter %d: maxI = %d, *m = %d", iter, maxI, *m)) // ERROR "\*m escapes to heap$" "iter escapes to heap$" "maxI escapes to heap$" "... argument does not escape$" "fmt.Sprintf\(.*\) escapes to heap" } } diff --git a/test/fixedbugs/issue7921.go b/test/fixedbugs/issue7921.go index a4e7b246d4911..65be4b5bbee8d 100644 --- a/test/fixedbugs/issue7921.go +++ b/test/fixedbugs/issue7921.go @@ -41,7 +41,7 @@ func bufferNoEscape3(xs []string) string { // ERROR "xs does not escape$" func bufferNoEscape4() []byte { var b bytes.Buffer - b.Grow(64) // ERROR "bufferNoEscape4 ignoring self-assignment in bytes.b.buf = bytes.b.buf\[:bytes.m\]$" "inlining call to bytes.\(\*Buffer\).Grow$" + b.Grow(64) // ERROR "bufferNoEscape4 ignoring self-assignment in bytes.b.buf = bytes.b.buf\[:bytes.m\]$" "inlining call to bytes.\(\*Buffer\).Grow$" "string\(.*\) escapes to heap" useBuffer(&b) return b.Bytes() // ERROR "inlining call to bytes.\(\*Buffer\).Bytes$" } From 422f38fb6c8d673eaa13669a22768f4fdd91642b Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 17 Jan 2021 22:05:50 -0800 Subject: [PATCH 406/474] [dev.regabi] cmd/compile: move stack objects to liveness Calculating and emitting stack objects are essentially part of liveness analysis, so move the code from ssagen to liveness. Allows unexporting liveness.ShouldTrack. Passes toolstash -cmp. Change-Id: I88b5b2e75b8dfb46b8b03a2fa09a9236865cbf3e Reviewed-on: https://go-review.googlesource.com/c/go/+/284413 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky Reviewed-by: Cuong Manh Le TryBot-Result: Go Bot --- src/cmd/compile/internal/liveness/plive.go | 53 ++++++++++++++++++++-- src/cmd/compile/internal/ssagen/ssa.go | 50 -------------------- 2 files changed, 50 insertions(+), 53 deletions(-) diff --git a/src/cmd/compile/internal/liveness/plive.go b/src/cmd/compile/internal/liveness/plive.go index c70db6ed18468..53ae797fce18f 100644 --- a/src/cmd/compile/internal/liveness/plive.go +++ b/src/cmd/compile/internal/liveness/plive.go @@ -17,12 +17,14 @@ package liveness import ( "crypto/md5" "fmt" + "sort" "strings" "cmd/compile/internal/base" "cmd/compile/internal/bitvec" "cmd/compile/internal/ir" "cmd/compile/internal/objw" + "cmd/compile/internal/reflectdata" "cmd/compile/internal/ssa" "cmd/compile/internal/typebits" "cmd/compile/internal/types" @@ -174,13 +176,13 @@ type progeffectscache struct { initialized bool } -// ShouldTrack reports whether the liveness analysis +// shouldTrack reports whether the liveness analysis // should track the variable n. // We don't care about variables that have no pointers, // nor do we care about non-local variables, // nor do we care about empty structs (handled by the pointer check), // nor do we care about the fake PAUTOHEAP variables. -func ShouldTrack(n *ir.Name) bool { +func shouldTrack(n *ir.Name) bool { return (n.Class == ir.PAUTO && n.Esc() != ir.EscHeap || n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT) && n.Type().HasPointers() } @@ -189,7 +191,7 @@ func ShouldTrack(n *ir.Name) bool { func getvariables(fn *ir.Func) ([]*ir.Name, map[*ir.Name]int32) { var vars []*ir.Name for _, n := range fn.Dcl { - if ShouldTrack(n) { + if shouldTrack(n) { vars = append(vars, n) } } @@ -1179,9 +1181,54 @@ func Compute(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *objw.Progs) Map p.To.Name = obj.NAME_EXTERN p.To.Sym = fninfo.GCLocals + if x := lv.emitStackObjects(); x != nil { + p := pp.Prog(obj.AFUNCDATA) + p.From.SetConst(objabi.FUNCDATA_StackObjects) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = x + } + return lv.livenessMap } +func (lv *liveness) emitStackObjects() *obj.LSym { + var vars []*ir.Name + for _, n := range lv.fn.Dcl { + if shouldTrack(n) && n.Addrtaken() && n.Esc() != ir.EscHeap { + vars = append(vars, n) + } + } + if len(vars) == 0 { + return nil + } + + // Sort variables from lowest to highest address. + sort.Slice(vars, func(i, j int) bool { return vars[i].FrameOffset() < vars[j].FrameOffset() }) + + // Populate the stack object data. + // Format must match runtime/stack.go:stackObjectRecord. + x := base.Ctxt.Lookup(lv.fn.LSym.Name + ".stkobj") + lv.fn.LSym.Func().StackObjects = x + off := 0 + off = objw.Uintptr(x, off, uint64(len(vars))) + for _, v := range vars { + // Note: arguments and return values have non-negative Xoffset, + // in which case the offset is relative to argp. + // Locals have a negative Xoffset, in which case the offset is relative to varp. + off = objw.Uintptr(x, off, uint64(v.FrameOffset())) + off = objw.SymPtr(x, off, reflectdata.TypeLinksym(v.Type()), 0) + } + + if base.Flag.Live != 0 { + for _, v := range vars { + base.WarnfAt(v.Pos(), "stack object %v %v", v, v.Type()) + } + } + + return x +} + // isfat reports whether a variable of type t needs multiple assignments to initialize. // For example: // diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 02aff7a8cf1fc..0a1a7aed8481c 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -6467,55 +6467,6 @@ func (s *State) DebugFriendlySetPosFrom(v *ssa.Value) { } } -// byXoffset implements sort.Interface for []*ir.Name using Xoffset as the ordering. -type byXoffset []*ir.Name - -func (s byXoffset) Len() int { return len(s) } -func (s byXoffset) Less(i, j int) bool { return s[i].FrameOffset() < s[j].FrameOffset() } -func (s byXoffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -func emitStackObjects(e *ssafn, pp *objw.Progs) { - var vars []*ir.Name - for _, n := range e.curfn.Dcl { - if liveness.ShouldTrack(n) && n.Addrtaken() && n.Esc() != ir.EscHeap { - vars = append(vars, n) - } - } - if len(vars) == 0 { - return - } - - // Sort variables from lowest to highest address. - sort.Sort(byXoffset(vars)) - - // Populate the stack object data. - // Format must match runtime/stack.go:stackObjectRecord. - x := base.Ctxt.Lookup(e.curfn.LSym.Name + ".stkobj") - e.curfn.LSym.Func().StackObjects = x - off := 0 - off = objw.Uintptr(x, off, uint64(len(vars))) - for _, v := range vars { - // Note: arguments and return values have non-negative Xoffset, - // in which case the offset is relative to argp. - // Locals have a negative Xoffset, in which case the offset is relative to varp. - off = objw.Uintptr(x, off, uint64(v.FrameOffset())) - off = objw.SymPtr(x, off, reflectdata.TypeLinksym(v.Type()), 0) - } - - // Emit a funcdata pointing at the stack object data. - p := pp.Prog(obj.AFUNCDATA) - p.From.SetConst(objabi.FUNCDATA_StackObjects) - p.To.Type = obj.TYPE_MEM - p.To.Name = obj.NAME_EXTERN - p.To.Sym = x - - if base.Flag.Live != 0 { - for _, v := range vars { - base.WarnfAt(v.Pos(), "stack object %v %s", v, v.Type().String()) - } - } -} - // genssa appends entries to pp for each instruction in f. func genssa(f *ssa.Func, pp *objw.Progs) { var s State @@ -6523,7 +6474,6 @@ func genssa(f *ssa.Func, pp *objw.Progs) { e := f.Frontend().(*ssafn) s.livenessMap = liveness.Compute(e.curfn, f, e.stkptrsize, pp) - emitStackObjects(e, pp) openDeferInfo := e.curfn.LSym.Func().OpenCodedDeferInfo if openDeferInfo != nil { From 4f5c603c0f4375d7612feedfd4d5bef41a4060ee Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 17 Jan 2021 00:46:42 -0800 Subject: [PATCH 407/474] [dev.regabi] cmd/compile: cleanup callTargetLSym Now that TailCallStmt carries an *ir.Name instead of a *types.Sym, callTargetLSym can be similarly updated to take the target function as an *ir.Name. This inches us closer towards being able to move Linksym and other properties from *types.Sym to *ir.Name, where they belong. Passes toolstash -cmp w/ -gcflags=all=-abiwrap. Change-Id: I091da290751970eba8ed0438f66d6cca88b665a8 Reviewed-on: https://go-review.googlesource.com/c/go/+/284228 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Than McIntosh --- src/cmd/compile/internal/ssagen/ssa.go | 33 +++++++++++--------------- test/abi/regabipragma.out | 8 +++---- 2 files changed, 18 insertions(+), 23 deletions(-) diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 0a1a7aed8481c..72db4430a5079 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -361,7 +361,7 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func { if strings.Contains(name, ".") { base.ErrorfAt(fn.Pos(), "Calls to //go:registerparams method %s won't work, remove the pragma from the declaration.", name) } - s.f.Warnl(fn.Pos(), "Declared function %s has register params", name) + s.f.Warnl(fn.Pos(), "declared function %v has register params", fn) } s.panics = map[funcLine]*ssa.Block{} @@ -1585,7 +1585,7 @@ func (s *state) stmt(n ir.Node) { n := n.(*ir.TailCallStmt) b := s.exit() b.Kind = ssa.BlockRetJmp // override BlockRet - b.Aux = callTargetLSym(n.Target.Sym(), s.curfn.LSym) + b.Aux = callTargetLSym(n.Target, s.curfn.LSym) case ir.OCONTINUE, ir.OBREAK: n := n.(*ir.BranchStmt) @@ -4756,7 +4756,7 @@ func (s *state) callAddr(n *ir.CallExpr, k callKind) *ssa.Value { // Returns the address of the return value (or nil if none). func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Value { s.prevCall = nil - var sym *types.Sym // target symbol (if static) + var callee *ir.Name // target function (if static) var closure *ssa.Value // ptr to closure to run (if dynamic) var codeptr *ssa.Value // ptr to target code (if dynamic) var rcvr *ssa.Value // receiver to set @@ -4781,13 +4781,13 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f) if k == callNormal && fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC { fn := fn.(*ir.Name) - sym = fn.Sym() + callee = fn // TODO remove after register abi is working inRegistersImported := fn.Pragma()&ir.RegisterParams != 0 inRegistersSamePackage := fn.Func != nil && fn.Func.Pragma&ir.RegisterParams != 0 inRegisters = inRegistersImported || inRegistersSamePackage if inRegisters { - s.f.Warnl(n.Pos(), "Called function %s has register params", sym.Linksym().Name) + s.f.Warnl(n.Pos(), "called function %v has register params", callee) } break } @@ -4982,13 +4982,13 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val } else { call = s.newValue2A(ssa.OpInterCall, types.TypeMem, ssa.InterfaceAuxCall(ACArgs, ACResults), codeptr, s.mem()) } - case sym != nil: + case callee != nil: if testLateExpansion { - aux := ssa.StaticAuxCall(callTargetLSym(sym, s.curfn.LSym), ACArgs, ACResults) + aux := ssa.StaticAuxCall(callTargetLSym(callee, s.curfn.LSym), ACArgs, ACResults) call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) call.AddArgs(callArgs...) } else { - call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(callTargetLSym(sym, s.curfn.LSym), ACArgs, ACResults), s.mem()) + call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(callTargetLSym(callee, s.curfn.LSym), ACArgs, ACResults), s.mem()) } default: s.Fatalf("bad call type %v %v", n.Op(), n) @@ -7386,31 +7386,26 @@ func clobberBase(n ir.Node) ir.Node { // // 3. in all other cases, want the regular ABIInternal linksym // -func callTargetLSym(callee *types.Sym, callerLSym *obj.LSym) *obj.LSym { +func callTargetLSym(callee *ir.Name, callerLSym *obj.LSym) *obj.LSym { lsym := callee.Linksym() if !base.Flag.ABIWrap { return lsym } - if ir.AsNode(callee.Def) == nil { + fn := callee.Func + if fn == nil { return lsym } - defn := ir.AsNode(callee.Def).Name().Defn - if defn == nil { - return lsym - } - ndclfunc := defn.(*ir.Func) // check for case 1 above if callerLSym.ABIWrapper() { - if nlsym := ndclfunc.LSym; nlsym != nil { + if nlsym := fn.LSym; nlsym != nil { lsym = nlsym } } else { // check for case 2 above - nam := ndclfunc.Nname - defABI, hasDefABI := symabiDefs[nam.Sym().LinksymName()] + defABI, hasDefABI := symabiDefs[callee.Sym().LinksymName()] if hasDefABI && defABI == obj.ABI0 { - lsym = nam.Sym().LinksymABI0() + lsym = callee.Sym().LinksymABI0() } } return lsym diff --git a/test/abi/regabipragma.out b/test/abi/regabipragma.out index 7803613351720..321b1adfccc1a 100644 --- a/test/abi/regabipragma.out +++ b/test/abi/regabipragma.out @@ -1,6 +1,6 @@ # regabipragma.dir/tmp -tmp/foo.go:17:6: Declared function F has register params +tmp/foo.go:17:6: declared function F has register params # regabipragma.dir -./main.go:21:6: Declared function f has register params -./main.go:32:9: Called function "".f has register params -./main.go:33:13: Called function regabipragma.dir/tmp.F has register params +./main.go:21:6: declared function f has register params +./main.go:32:9: called function f has register params +./main.go:33:13: called function tmp.F has register params From 4a4212c0e59dee4458be2f5c85262e54f127c500 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 17 Jan 2021 02:38:41 -0800 Subject: [PATCH 408/474] [dev.regabi] cmd/compile: refactor Linksym creation Currently there's a lot of logic within package types for creating Linksyms. This CL pulls it out into base, where it can be more easily reused by other compiler code that shouldn't need to depend on package types. Package base probably isn't the best place for this, but it's convenient because it's a package that types already depends on. It's also where the Ctxt object lives, which these functions depend upon. Passes toolstash -cmp w/ -gcflags=all=-abiwrap. Change-Id: I50d8b7e4596955205036969eab24d7dab053b363 Reviewed-on: https://go-review.googlesource.com/c/go/+/284231 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Than McIntosh Trust: Matthew Dempsky --- src/cmd/compile/internal/base/base.go | 4 -- src/cmd/compile/internal/base/link.go | 36 ++++++++++++++++ src/cmd/compile/internal/dwarfgen/dwarf.go | 2 +- src/cmd/compile/internal/ir/func.go | 7 +-- src/cmd/compile/internal/ir/name.go | 3 +- src/cmd/compile/internal/ssagen/abi.go | 4 +- src/cmd/compile/internal/ssagen/ssa.go | 4 +- src/cmd/compile/internal/staticdata/data.go | 2 +- src/cmd/compile/internal/typecheck/syms.go | 11 +++-- src/cmd/compile/internal/types/sym.go | 47 ++++++--------------- 10 files changed, 67 insertions(+), 53 deletions(-) create mode 100644 src/cmd/compile/internal/base/link.go diff --git a/src/cmd/compile/internal/base/base.go b/src/cmd/compile/internal/base/base.go index 5a30fa6a334e6..3b9bc3a8af2d5 100644 --- a/src/cmd/compile/internal/base/base.go +++ b/src/cmd/compile/internal/base/base.go @@ -6,12 +6,8 @@ package base import ( "os" - - "cmd/internal/obj" ) -var Ctxt *obj.Link - var atExitFuncs []func() func AtExit(f func()) { diff --git a/src/cmd/compile/internal/base/link.go b/src/cmd/compile/internal/base/link.go new file mode 100644 index 0000000000000..49fe4352b2f65 --- /dev/null +++ b/src/cmd/compile/internal/base/link.go @@ -0,0 +1,36 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package base + +import ( + "cmd/internal/obj" +) + +var Ctxt *obj.Link + +// TODO(mdempsky): These should probably be obj.Link methods. + +// PkgLinksym returns the linker symbol for name within the given +// package prefix. For user packages, prefix should be the package +// path encoded with objabi.PathToPrefix. +func PkgLinksym(prefix, name string, abi obj.ABI) *obj.LSym { + if name == "_" { + // TODO(mdempsky): Cleanup callers and Fatalf instead. + return linksym(prefix, "_", abi) + } + return linksym(prefix, prefix+"."+name, abi) +} + +// Linkname returns the linker symbol for the given name as it might +// appear within a //go:linkname directive. +func Linkname(name string, abi obj.ABI) *obj.LSym { + return linksym("_", name, abi) +} + +// linksym is an internal helper function for implementing the above +// exported APIs. +func linksym(pkg, name string, abi obj.ABI) *obj.LSym { + return Ctxt.LookupABIInit(name, abi, func(r *obj.LSym) { r.Pkg = pkg }) +} diff --git a/src/cmd/compile/internal/dwarfgen/dwarf.go b/src/cmd/compile/internal/dwarfgen/dwarf.go index 2440e3c8d3c59..bf039c8fbbe6f 100644 --- a/src/cmd/compile/internal/dwarfgen/dwarf.go +++ b/src/cmd/compile/internal/dwarfgen/dwarf.go @@ -28,7 +28,7 @@ func Info(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, if fn.Nname != nil { expect := fn.Linksym() if fnsym.ABI() == obj.ABI0 { - expect = fn.Sym().LinksymABI0() + expect = fn.LinksymABI(obj.ABI0) } if fnsym != expect { base.Fatalf("unexpected fnsym: %v != %v", fnsym, expect) diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index 4afdadf57b8bc..0a9db92d96713 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -133,9 +133,10 @@ func (n *Func) copy() Node { panic(n.no("copy")) } func (n *Func) doChildren(do func(Node) bool) bool { return doNodes(n.Body, do) } func (n *Func) editChildren(edit func(Node) Node) { editNodes(n.Body, edit) } -func (f *Func) Type() *types.Type { return f.Nname.Type() } -func (f *Func) Sym() *types.Sym { return f.Nname.Sym() } -func (f *Func) Linksym() *obj.LSym { return f.Nname.Linksym() } +func (f *Func) Type() *types.Type { return f.Nname.Type() } +func (f *Func) Sym() *types.Sym { return f.Nname.Sym() } +func (f *Func) Linksym() *obj.LSym { return f.Nname.Linksym() } +func (f *Func) LinksymABI(abi obj.ABI) *obj.LSym { return f.Nname.LinksymABI(abi) } // An Inline holds fields used for function bodies that can be inlined. type Inline struct { diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 64de42382e80c..fa0639600ce0a 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -226,7 +226,8 @@ func (n *Name) SetWalkdef(x uint8) { n.bits.set2(miniWalkdefShift, x) } -func (n *Name) Linksym() *obj.LSym { return n.sym.Linksym() } +func (n *Name) Linksym() *obj.LSym { return n.sym.Linksym() } +func (n *Name) LinksymABI(abi obj.ABI) *obj.LSym { return n.sym.LinksymABI(abi) } func (*Name) CanBeNtype() {} func (*Name) CanBeAnSSASym() {} diff --git a/src/cmd/compile/internal/ssagen/abi.go b/src/cmd/compile/internal/ssagen/abi.go index b5da42087251b..5bebce1db58df 100644 --- a/src/cmd/compile/internal/ssagen/abi.go +++ b/src/cmd/compile/internal/ssagen/abi.go @@ -161,11 +161,11 @@ func selectLSym(f *ir.Func, hasBody bool) { var wrapperABI obj.ABI needABIWrapper := false - defABI, hasDefABI := symabiDefs[nam.Sym().LinksymName()] + defABI, hasDefABI := symabiDefs[nam.Linksym().Name] if hasDefABI && defABI == obj.ABI0 { // Symbol is defined as ABI0. Create an // Internal -> ABI0 wrapper. - f.LSym = nam.Sym().LinksymABI0() + f.LSym = nam.LinksymABI(obj.ABI0) needABIWrapper, wrapperABI = true, obj.ABIInternal } else { f.LSym = nam.Linksym() diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 72db4430a5079..8ed0e6101c3fe 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -7403,9 +7403,9 @@ func callTargetLSym(callee *ir.Name, callerLSym *obj.LSym) *obj.LSym { } } else { // check for case 2 above - defABI, hasDefABI := symabiDefs[callee.Sym().LinksymName()] + defABI, hasDefABI := symabiDefs[lsym.Name] if hasDefABI && defABI == obj.ABI0 { - lsym = callee.Sym().LinksymABI0() + lsym = callee.LinksymABI(obj.ABI0) } } return lsym diff --git a/src/cmd/compile/internal/staticdata/data.go b/src/cmd/compile/internal/staticdata/data.go index 6ef99b50c7800..b06fd7aa4b246 100644 --- a/src/cmd/compile/internal/staticdata/data.go +++ b/src/cmd/compile/internal/staticdata/data.go @@ -287,7 +287,7 @@ func NeedFuncSym(s *types.Sym) { func WriteFuncSyms() { sort.Slice(funcsyms, func(i, j int) bool { - return funcsyms[i].LinksymName() < funcsyms[j].LinksymName() + return funcsyms[i].Linksym().Name < funcsyms[j].Linksym().Name }) for _, s := range funcsyms { sf := s.Pkg.Lookup(ir.FuncSymName(s)).Linksym() diff --git a/src/cmd/compile/internal/typecheck/syms.go b/src/cmd/compile/internal/typecheck/syms.go index f6ff2ee5da2ee..202a932e6c96b 100644 --- a/src/cmd/compile/internal/typecheck/syms.go +++ b/src/cmd/compile/internal/typecheck/syms.go @@ -86,14 +86,17 @@ func InitRuntime() { // LookupRuntimeFunc looks up Go function name in package runtime. This function // must follow the internal calling convention. func LookupRuntimeFunc(name string) *obj.LSym { - s := ir.Pkgs.Runtime.Lookup(name) - s.SetFunc(true) - return s.Linksym() + return LookupRuntimeABI(name, obj.ABIInternal) } // LookupRuntimeVar looks up a variable (or assembly function) name in package // runtime. If this is a function, it may have a special calling // convention. func LookupRuntimeVar(name string) *obj.LSym { - return ir.Pkgs.Runtime.Lookup(name).Linksym() + return LookupRuntimeABI(name, obj.ABI0) +} + +// LookupRuntimeABI looks up a name in package runtime using the given ABI. +func LookupRuntimeABI(name string, abi obj.ABI) *obj.LSym { + return base.PkgLinksym("runtime", name, abi) } diff --git a/src/cmd/compile/internal/types/sym.go b/src/cmd/compile/internal/types/sym.go index 2914e2ed3fa05..0e66ed348bfcf 100644 --- a/src/cmd/compile/internal/types/sym.go +++ b/src/cmd/compile/internal/types/sym.go @@ -64,53 +64,30 @@ func (sym *Sym) IsBlank() bool { return sym != nil && sym.Name == "_" } -func (sym *Sym) LinksymName() string { - if sym.IsBlank() { - return "_" - } - if sym.Linkname != "" { - return sym.Linkname - } - return sym.Pkg.Prefix + "." + sym.Name -} - // Deprecated: This method should not be used directly. Instead, use a // higher-level abstraction that directly returns the linker symbol // for a named object. For example, reflectdata.TypeLinksym(t) instead // of reflectdata.TypeSym(t).Linksym(). func (sym *Sym) Linksym() *obj.LSym { - if sym == nil { - return nil - } - initPkg := func(r *obj.LSym) { - if sym.Linkname != "" { - r.Pkg = "_" - } else { - r.Pkg = sym.Pkg.Prefix - } - } + abi := obj.ABI0 if sym.Func() { - // This is a function symbol. Mark it as "internal ABI". - return base.Ctxt.LookupABIInit(sym.LinksymName(), obj.ABIInternal, initPkg) + abi = obj.ABIInternal } - return base.Ctxt.LookupInit(sym.LinksymName(), initPkg) + return sym.LinksymABI(abi) } -// LinksymABI0 looks up or creates an ABI0 linker symbol for "sym", -// in cases where we want to specifically select the ABI0 version of -// a symbol (typically used only for ABI wrappers). -func (sym *Sym) LinksymABI0() *obj.LSym { +// Deprecated: This method should not be used directly. Instead, use a +// higher-level abstraction that directly returns the linker symbol +// for a named object. For example, (*ir.Name).LinksymABI(abi) instead +// of (*ir.Name).Sym().LinksymABI(abi). +func (sym *Sym) LinksymABI(abi obj.ABI) *obj.LSym { if sym == nil { - return nil + base.Fatalf("nil symbol") } - initPkg := func(r *obj.LSym) { - if sym.Linkname != "" { - r.Pkg = "_" - } else { - r.Pkg = sym.Pkg.Prefix - } + if sym.Linkname != "" { + return base.Linkname(sym.Linkname, abi) } - return base.Ctxt.LookupABIInit(sym.LinksymName(), obj.ABI0, initPkg) + return base.PkgLinksym(sym.Pkg.Prefix, sym.Name, abi) } // Less reports whether symbol a is ordered before symbol b. From a2f825c542bc62b9d4341080302ed309cd3daa97 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 17 Jan 2021 02:53:18 -0800 Subject: [PATCH 409/474] [dev.regabi] cmd/compile: directly create go.map and go.track symbols These symbols are implementation details and don't correspond to Go source symbols, so directly create them as linker symbols and get rid of their pseudo packages. Passes toolstash -cmp w/ -gcflags=all=-abiwrap. Change-Id: I2e97374c21f3e909f6d350f15e7a5ed3574cadf4 Reviewed-on: https://go-review.googlesource.com/c/go/+/284372 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le Trust: Matthew Dempsky --- src/cmd/compile/internal/gc/main.go | 7 ------- src/cmd/compile/internal/gc/obj.go | 2 +- src/cmd/compile/internal/ir/symtab.go | 2 -- src/cmd/compile/internal/reflectdata/reflect.go | 17 ++++------------- 4 files changed, 5 insertions(+), 23 deletions(-) diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index f758933d79e39..726a0685d57ff 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -96,13 +96,6 @@ func Main(archInit func(*ssagen.ArchInfo)) { ir.Pkgs.Itab = types.NewPkg("go.itab", "go.itab") ir.Pkgs.Itab.Prefix = "go.itab" // not go%2eitab - ir.Pkgs.Track = types.NewPkg("go.track", "go.track") - ir.Pkgs.Track.Prefix = "go.track" // not go%2etrack - - // pseudo-package used for map zero values - ir.Pkgs.Map = types.NewPkg("go.map", "go.map") - ir.Pkgs.Map.Prefix = "go.map" - // pseudo-package used for methods with anonymous receivers ir.Pkgs.Go = types.NewPkg("go", "") diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 847d84966646e..0472af74419e5 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -146,7 +146,7 @@ func dumpdata() { dumpglobls(typecheck.Target.Externs[numExterns:]) if reflectdata.ZeroSize > 0 { - zero := ir.Pkgs.Map.Lookup("zero").Linksym() + zero := base.PkgLinksym("go.map", "zero", obj.ABI0) objw.Global(zero, int32(reflectdata.ZeroSize), obj.DUPOK|obj.RODATA) } diff --git a/src/cmd/compile/internal/ir/symtab.go b/src/cmd/compile/internal/ir/symtab.go index 0968efbf5cb3e..61727fb1c4b00 100644 --- a/src/cmd/compile/internal/ir/symtab.go +++ b/src/cmd/compile/internal/ir/symtab.go @@ -67,8 +67,6 @@ var Syms struct { var Pkgs struct { Go *types.Pkg Itab *types.Pkg - Map *types.Pkg Runtime *types.Pkg - Track *types.Pkg Unsafe *types.Pkg } diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go index bd89b62ff5b8b..1ec92e3dd073e 100644 --- a/src/cmd/compile/internal/reflectdata/reflect.go +++ b/src/cmd/compile/internal/reflectdata/reflect.go @@ -791,7 +791,7 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int { // TrackSym returns the symbol for tracking use of field/method f, assumed // to be a member of struct/interface type t. func TrackSym(t *types.Type, f *types.Field) *obj.LSym { - return ir.Pkgs.Track.Lookup(t.ShortString() + "." + f.Sym.Name).Linksym() + return base.PkgLinksym("go.track", t.ShortString() + "." + f.Sym.Name, obj.ABI0) } func TypeSymPrefix(prefix string, t *types.Type) *types.Sym { @@ -1654,18 +1654,9 @@ func ZeroAddr(size int64) ir.Node { if ZeroSize < size { ZeroSize = size } - s := ir.Pkgs.Map.Lookup("zero") - if s.Def == nil { - x := typecheck.NewName(s) - x.SetType(types.Types[types.TUINT8]) - x.Class = ir.PEXTERN - x.SetTypecheck(1) - s.Def = x - } - z := typecheck.NodAddr(ir.AsNode(s.Def)) - z.SetType(types.NewPtr(types.Types[types.TUINT8])) - z.SetTypecheck(1) - return z + lsym := base.PkgLinksym("go.map", "zero", obj.ABI0) + x := ir.NewLinksymExpr(base.Pos, lsym, types.Types[types.TUINT8]) + return typecheck.Expr(typecheck.NodAddr(x)) } func CollectPTabs() { From 9423d50d53f132d7d00f5126144736bfe65627b6 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Tue, 19 Jan 2021 22:57:45 +0700 Subject: [PATCH 410/474] [dev.regabi] cmd/compile: use '%q' for printing rune values less than 128 Fixes #43762 Change-Id: I51734c9b4ee2366a5dae53b2d27b363f4d5fe6c1 Reviewed-on: https://go-review.googlesource.com/c/go/+/284592 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/fmt.go | 14 +++++++------- test/fixedbugs/issue43762.go | 11 +++++++++++ 2 files changed, 18 insertions(+), 7 deletions(-) create mode 100644 test/fixedbugs/issue43762.go diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index ee6a62625afda..0ebfb842867b9 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -589,20 +589,20 @@ func exprFmt(n Node, s fmt.State, prec int) { } if n.Type() == types.UntypedRune { - switch x, ok := constant.Int64Val(n.Val()); { + switch x, ok := constant.Uint64Val(n.Val()); { case !ok: fallthrough default: fmt.Fprintf(s, "('\\x00' + %v)", n.Val()) - case ' ' <= x && x < utf8.RuneSelf && x != '\\' && x != '\'': - fmt.Fprintf(s, "'%c'", int(x)) + case x < utf8.RuneSelf: + fmt.Fprintf(s, "%q", x) - case 0 <= x && x < 1<<16: - fmt.Fprintf(s, "'\\u%04x'", uint(int(x))) + case x < 1<<16: + fmt.Fprintf(s, "'\\u%04x'", x) - case 0 <= x && x <= utf8.MaxRune: - fmt.Fprintf(s, "'\\U%08x'", uint64(x)) + case x <= utf8.MaxRune: + fmt.Fprintf(s, "'\\U%08x'", x) } } else { fmt.Fprint(s, types.FmtConst(n.Val(), s.Flag('#'))) diff --git a/test/fixedbugs/issue43762.go b/test/fixedbugs/issue43762.go new file mode 100644 index 0000000000000..4544b6e4963d8 --- /dev/null +++ b/test/fixedbugs/issue43762.go @@ -0,0 +1,11 @@ +// errorcheck + +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +var _ = true == '\\' // ERROR "invalid operation: true == '\\\\'" +var _ = true == '\'' // ERROR "invalid operation: true == '\\''" +var _ = true == '\n' // ERROR "invalid operation: true == '\\n'" From 92cb157cf3aa51d28e441dbb2b671795f22140f8 Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 29 Dec 2020 22:44:30 -0500 Subject: [PATCH 411/474] [dev.regabi] cmd/compile: late expansion of return values By-hand rebase of earlier CL, because that was easier than letting git try to figure things out. This will naively insert self-moves; in the case that these involve memory, the expander detects these and removes them and their vardefs. Change-Id: Icf72575eb7ae4a186b0de462bc8cf0bedc84d3e9 Reviewed-on: https://go-review.googlesource.com/c/go/+/279519 Trust: David Chase Reviewed-by: Jeremy Faller --- src/cmd/compile/internal/ir/fmt.go | 5 ++ src/cmd/compile/internal/ssa/expand_calls.go | 78 ++++++++++++++++---- src/cmd/compile/internal/ssa/func.go | 6 +- src/cmd/compile/internal/ssa/op.go | 10 ++- src/cmd/compile/internal/ssagen/ssa.go | 77 ++++++++++++++----- 5 files changed, 138 insertions(+), 38 deletions(-) diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index 0ebfb842867b9..01197ad272418 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -1119,6 +1119,11 @@ func dumpNode(w io.Writer, n Node, depth int) { return } + if n == nil { + fmt.Fprint(w, "NilIrNode") + return + } + if len(n.Init()) != 0 { fmt.Fprintf(w, "%+v-init", n.Op()) dumpNodes(w, n.Init(), depth+1) diff --git a/src/cmd/compile/internal/ssa/expand_calls.go b/src/cmd/compile/internal/ssa/expand_calls.go index e1c657d4a4758..66ef1b3515b19 100644 --- a/src/cmd/compile/internal/ssa/expand_calls.go +++ b/src/cmd/compile/internal/ssa/expand_calls.go @@ -24,6 +24,10 @@ type offsetKey struct { pt *types.Type } +func isBlockMultiValueExit(b *Block) bool { + return (b.Kind == BlockRet || b.Kind == BlockRetJmp) && len(b.Controls) > 0 && b.Controls[0].Op == OpMakeResult +} + // expandCalls converts LE (Late Expansion) calls that act like they receive value args into a lower-level form // that is more oriented to a platform's ABI. The SelectN operations that extract results are rewritten into // more appropriate forms, and any StructMake or ArrayMake inputs are decomposed until non-struct values are @@ -624,6 +628,24 @@ func expandCalls(f *Func) { return x } + rewriteDereference := func(b *Block, base, a, mem *Value, offset, size int64, typ *types.Type, pos src.XPos) *Value { + source := a.Args[0] + dst := offsetFrom(base, offset, source.Type) + if a.Uses == 1 && a.Block == b { + a.reset(OpMove) + a.Pos = pos + a.Type = types.TypeMem + a.Aux = typ + a.AuxInt = size + a.SetArgs3(dst, source, mem) + mem = a + } else { + mem = b.NewValue3A(pos, OpMove, types.TypeMem, typ, dst, source, mem) + mem.AuxInt = size + } + return mem + } + // rewriteArgs removes all the Args from a call and converts the call args into appropriate // stores (or later, register movement). Extra args for interface and closure calls are ignored, // but removed. @@ -631,7 +653,7 @@ func expandCalls(f *Func) { // Thread the stores on the memory arg aux := v.Aux.(*AuxCall) pos := v.Pos.WithNotStmt() - m0 := v.Args[len(v.Args)-1] + m0 := v.MemoryArg() mem := m0 for i, a := range v.Args { if i < firstArg { @@ -647,20 +669,7 @@ func expandCalls(f *Func) { } // "Dereference" of addressed (probably not-SSA-eligible) value becomes Move // TODO this will be more complicated with registers in the picture. - source := a.Args[0] - dst := f.ConstOffPtrSP(source.Type, aux.OffsetOfArg(auxI), sp) - if a.Uses == 1 && a.Block == v.Block { - a.reset(OpMove) - a.Pos = pos - a.Type = types.TypeMem - a.Aux = aux.TypeOfArg(auxI) - a.AuxInt = aux.SizeOfArg(auxI) - a.SetArgs3(dst, source, mem) - mem = a - } else { - mem = v.Block.NewValue3A(pos, OpMove, types.TypeMem, aux.TypeOfArg(auxI), dst, source, mem) - mem.AuxInt = aux.SizeOfArg(auxI) - } + mem = rewriteDereference(v.Block, sp, a, mem, aux.OffsetOfArg(auxI), aux.SizeOfArg(auxI), aux.TypeOfArg(auxI), pos) } else { if debug { fmt.Printf("storeArg %s, %v, %d\n", a.LongString(), aux.TypeOfArg(auxI), aux.OffsetOfArg(auxI)) @@ -692,6 +701,45 @@ func expandCalls(f *Func) { v.SetArgs2(code, mem) } } + if isBlockMultiValueExit(b) { + // Very similar to code in rewriteArgs, but results instead of args. + v := b.Controls[0] + m0 := v.MemoryArg() + mem := m0 + aux := f.OwnAux + pos := v.Pos.WithNotStmt() + for j, a := range v.Args { + i := int64(j) + if a == m0 { + break + } + auxType := aux.TypeOfResult(i) + auxBase := b.NewValue2A(v.Pos, OpLocalAddr, types.NewPtr(auxType), aux.results[i].Name, sp, mem) + auxOffset := int64(0) + auxSize := aux.SizeOfResult(i) + if a.Op == OpDereference { + // Avoid a self-move, and if one is detected try to remove the already-inserted VarDef for the assignment that won't happen. + if dAddr, dMem := a.Args[0], a.Args[1]; dAddr.Op == OpLocalAddr && dAddr.Args[0].Op == OpSP && + dAddr.Args[1] == dMem && dAddr.Aux == aux.results[i].Name { + if dMem.Op == OpVarDef && dMem.Aux == dAddr.Aux { + dMem.copyOf(dMem.MemoryArg()) // elide the VarDef + } + continue + } + mem = rewriteDereference(v.Block, auxBase, a, mem, auxOffset, auxSize, auxType, pos) + } else { + if a.Op == OpLoad && a.Args[0].Op == OpLocalAddr { + addr := a.Args[0] + if addr.MemoryArg() == a.MemoryArg() && addr.Aux == aux.results[i].Name { + continue + } + } + mem = storeArgOrLoad(v.Pos, b, auxBase, a, mem, aux.TypeOfResult(i), auxOffset) + } + } + b.SetControl(mem) + v.reset(OpInvalid) // otherwise it can have a mem operand which will fail check(), even though it is dead. + } } for i, name := range f.Names { diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go index f753b4407bd9d..de99a8d4af9cb 100644 --- a/src/cmd/compile/internal/ssa/func.go +++ b/src/cmd/compile/internal/ssa/func.go @@ -60,6 +60,8 @@ type Func struct { // RegArgs is a slice of register-memory pairs that must be spilled and unspilled in the uncommon path of function entry. RegArgs []ArgPair + // AuxCall describing parameters and results for this function. + OwnAux *AuxCall // WBLoads is a list of Blocks that branch on the write // barrier flag. Safe-points are disabled from the OpLoad that @@ -774,7 +776,7 @@ func DebugNameMatch(evname, name string) bool { } func (f *Func) spSb() (sp, sb *Value) { - initpos := f.Entry.Pos + initpos := src.NoXPos // These are originally created with no position in ssa.go; if they are optimized out then recreated, should be the same. for _, v := range f.Entry.Values { if v.Op == OpSB { sb = v @@ -783,7 +785,7 @@ func (f *Func) spSb() (sp, sb *Value) { sp = v } if sb != nil && sp != nil { - break + return } } if sb == nil { diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go index 5e6ce2b5088d5..c64b145107e23 100644 --- a/src/cmd/compile/internal/ssa/op.go +++ b/src/cmd/compile/internal/ssa/op.go @@ -5,6 +5,7 @@ package ssa import ( + "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/obj" "fmt" @@ -70,7 +71,8 @@ type auxType int8 type Param struct { Type *types.Type - Offset int32 // TODO someday this will be a register + Offset int32 // Offset of Param if not in a register. + Name *ir.Name // For OwnAux, need to prepend stores with Vardefs } type AuxCall struct { @@ -199,6 +201,12 @@ func ClosureAuxCall(args []Param, results []Param) *AuxCall { func (*AuxCall) CanBeAnSSAAux() {} +// OwnAuxCall returns a function's own AuxCall +func OwnAuxCall(args []Param, results []Param) *AuxCall { + // TODO if this remains identical to ClosureAuxCall above after new ABI is done, should deduplicate. + return &AuxCall{Fn: nil, args: args, results: results} +} + const ( auxNone auxType = iota auxBool // auxInt is 0/1 for false/true diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 8ed0e6101c3fe..5ba8579f6a7d7 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -459,7 +459,7 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func { args = append(args, ssa.Param{Type: n.Type(), Offset: int32(n.FrameOffset())}) case ir.PPARAMOUT: s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem) - results = append(results, ssa.Param{Type: n.Type(), Offset: int32(n.FrameOffset())}) + results = append(results, ssa.Param{Type: n.Type(), Offset: int32(n.FrameOffset()), Name: n}) case ir.PAUTO: // processed at each use, to prevent Addr coming // before the decl. @@ -467,6 +467,7 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func { s.Fatalf("local variable with class %v unimplemented", n.Class) } } + s.f.OwnAux = ssa.OwnAuxCall(args, results) // Populate SSAable arguments. for _, n := range fn.Dcl { @@ -532,6 +533,8 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func { } } + s.f.HTMLWriter.WritePhase("before insert phis", "before insert phis") + s.insertPhis() // Main call to ssa package to compile function @@ -1799,6 +1802,7 @@ const shareDeferExits = false // It returns a BlockRet block that ends the control flow. Its control value // will be set to the final memory state. func (s *state) exit() *ssa.Block { + lateResultLowering := s.f.DebugTest && ssa.LateCallExpansionEnabledWithin(s.f) if s.hasdefer { if s.hasOpenDefers { if shareDeferExits && s.lastDeferExit != nil && len(s.openDefers) == s.lastDeferCount { @@ -1815,28 +1819,61 @@ func (s *state) exit() *ssa.Block { } } - // Store SSAable and heap-escaped PPARAMOUT variables back to stack locations. - for _, f := range s.curfn.Type().Results().FieldSlice() { - n := f.Nname.(*ir.Name) - if s.canSSA(n) { - val := s.variable(n, n.Type()) - s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem()) - s.store(n.Type(), s.decladdrs[n], val) - } else if !n.OnStack() { + var b *ssa.Block + var m *ssa.Value + // Do actual return. + // These currently turn into self-copies (in many cases). + if lateResultLowering { + resultFields := s.curfn.Type().Results().FieldSlice() + results := make([]*ssa.Value, len(resultFields)+1, len(resultFields)+1) + m = s.newValue0(ssa.OpMakeResult, s.f.OwnAux.LateExpansionResultType()) + // Store SSAable and heap-escaped PPARAMOUT variables back to stack locations. + for i, f := range resultFields { + n := f.Nname.(*ir.Name) s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem()) - s.move(n.Type(), s.decladdrs[n], s.expr(n.Heapaddr)) + if s.canSSA(n) { // result is in some SSA variable + results[i] = s.variable(n, n.Type()) + } else if !n.OnStack() { // result is actually heap allocated + ha := s.expr(n.Heapaddr) + s.instrumentFields(n.Type(), ha, instrumentRead) + results[i] = s.newValue2(ssa.OpDereference, n.Type(), ha, s.mem()) + } else { // result is not SSA-able; not escaped, so not on heap, but too large for SSA. + // Before register ABI this ought to be a self-move, home=dest, + // With register ABI, it's still a self-move if parameter is on stack (i.e., too big or overflowed) + results[i] = s.newValue2(ssa.OpDereference, n.Type(), s.addr(n), s.mem()) + } } - // TODO: if val is ever spilled, we'd like to use the - // PPARAMOUT slot for spilling it. That won't happen - // currently. - } - // Run exit code. Today, this is just raceexit, in -race mode. - s.stmtList(s.curfn.Exit) + // Run exit code. Today, this is just racefuncexit, in -race mode. + // TODO this seems risky here with a register-ABI, but not clear it is right to do it earlier either. + // Spills in register allocation might just fix it. + s.stmtList(s.curfn.Exit) - // Do actual return. - m := s.mem() - b := s.endBlock() + results[len(results)-1] = s.mem() + m.AddArgs(results...) + } else { + // Store SSAable and heap-escaped PPARAMOUT variables back to stack locations. + for _, f := range s.curfn.Type().Results().FieldSlice() { + n := f.Nname.(*ir.Name) + if s.canSSA(n) { + val := s.variable(n, n.Type()) + s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem()) + s.store(n.Type(), s.decladdrs[n], val) + } else if !n.OnStack() { + s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem()) + s.move(n.Type(), s.decladdrs[n], s.expr(n.Heapaddr)) + } // else, on stack but too large to SSA, the result is already in its destination by construction, so no store needed. + + // TODO: if (SSA) val is ever spilled, we'd like to use the PPARAMOUT slot for spilling it. That won't happen currently. + } + + // Run exit code. Today, this is just racefuncexit, in -race mode. + s.stmtList(s.curfn.Exit) + + // Do actual return. + m = s.mem() + } + b = s.endBlock() b.Kind = ssa.BlockRet b.SetControl(m) if s.hasdefer && s.hasOpenDefers { @@ -5253,7 +5290,7 @@ func (s *state) canSSAName(name *ir.Name) bool { // TODO: try to make more variables SSAable? } -// canSSA reports whether variables of type t are SSA-able. +// TypeOK reports whether variables of type t are SSA-able. func TypeOK(t *types.Type) bool { types.CalcSize(t) if t.Width > int64(4*types.PtrSize) { From 1760d736f61265b3c78a6a48f2e1904341806643 Mon Sep 17 00:00:00 2001 From: Dan Scales Date: Tue, 1 Dec 2020 14:48:03 -0800 Subject: [PATCH 412/474] [dev.regabi] cmd/compile: exporting, importing, and inlining functions with OCLOSURE I have exporting, importing, and inlining of functions with closures working in all cases (issue #28727). all.bash runs successfully without errors. Approach: - Write out the Func type, Dcls, ClosureVars, and Body when exporting an OCLOSURE. - When importing an OCLOSURE, read in the type, dcls, closure vars, and body, and then do roughly equivalent code to (*noder).funcLit - During inlining of a closure within inlined function, create new nodes for all params and local variables (including closure variables), so they can have a new Curfn and some other field values. Must substitute not only on the Nbody of the closure, but also the Type, Cvars, and Dcl fields. Fixes #28727 Change-Id: I4da1e2567c3fa31a5121afbe82dc4e5ee32b3170 Reviewed-on: https://go-review.googlesource.com/c/go/+/283112 Run-TryBot: Dan Scales TryBot-Result: Go Bot Reviewed-by: Keith Randall Reviewed-by: Matthew Dempsky Trust: Dan Scales --- src/cmd/compile/internal/escape/escape.go | 4 + src/cmd/compile/internal/inline/inl.go | 264 ++++++++++++++++-- src/cmd/compile/internal/ir/fmt.go | 21 ++ src/cmd/compile/internal/ir/node.go | 4 + src/cmd/compile/internal/noder/noder.go | 8 + src/cmd/compile/internal/typecheck/func.go | 22 +- src/cmd/compile/internal/typecheck/iexport.go | 49 +++- src/cmd/compile/internal/typecheck/iimport.go | 85 ++++-- test/closure3.dir/main.go | 44 +-- test/closure5.dir/a.go | 11 + test/closure5.dir/main.go | 15 + test/closure5.go | 10 + test/inline.go | 22 +- 13 files changed, 472 insertions(+), 87 deletions(-) create mode 100644 test/closure5.dir/a.go create mode 100644 test/closure5.dir/main.go create mode 100644 test/closure5.go diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go index 5ee6d4f498dd4..883e68a730c1c 100644 --- a/src/cmd/compile/internal/escape/escape.go +++ b/src/cmd/compile/internal/escape/escape.go @@ -218,6 +218,10 @@ func Batch(fns []*ir.Func, recursive bool) { // Construct data-flow graph from syntax trees. for _, fn := range fns { + if base.Flag.W > 1 { + s := fmt.Sprintf("\nbefore escape %v", fn) + ir.Dump(s, fn) + } b.initFunc(fn) } for _, fn := range fns { diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index aa194ebab2b6e..7778bc56c4cdd 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -180,7 +180,7 @@ func CanInline(fn *ir.Func) { n.Func.Inl = &ir.Inline{ Cost: inlineMaxBudget - visitor.budget, Dcl: pruneUnusedAutos(n.Defn.(*ir.Func).Dcl, &visitor), - Body: ir.DeepCopyList(src.NoXPos, fn.Body), + Body: inlcopylist(fn.Body), } if base.Flag.LowerM > 1 { @@ -217,10 +217,8 @@ func Inline_Flood(n *ir.Name, exportsym func(*ir.Name)) { typecheck.ImportedBody(fn) - // Recursively identify all referenced functions for - // reexport. We want to include even non-called functions, - // because after inlining they might be callable. - ir.VisitList(ir.Nodes(fn.Inl.Body), func(n ir.Node) { + var doFlood func(n ir.Node) + doFlood = func(n ir.Node) { switch n.Op() { case ir.OMETHEXPR, ir.ODOTMETH: Inline_Flood(ir.MethodExprName(n), exportsym) @@ -239,15 +237,16 @@ func Inline_Flood(n *ir.Name, exportsym func(*ir.Name)) { // Okay, because we don't yet inline indirect // calls to method values. case ir.OCLOSURE: - // If the closure is inlinable, we'll need to - // flood it too. But today we don't support - // inlining functions that contain closures. - // - // When we do, we'll probably want: - // inlFlood(n.Func.Closure.Func.Nname) - base.Fatalf("unexpected closure in inlinable function") + // VisitList doesn't visit closure bodies, so force a + // recursive call to VisitList on the body of the closure. + ir.VisitList(n.(*ir.ClosureExpr).Func.Body, doFlood) } - }) + } + + // Recursively identify all referenced functions for + // reexport. We want to include even non-called functions, + // because after inlining they might be callable. + ir.VisitList(ir.Nodes(fn.Inl.Body), doFlood) } // hairyVisitor visits a function body to determine its inlining @@ -360,8 +359,13 @@ func (v *hairyVisitor) doNode(n ir.Node) error { // the right panic value, so it needs an argument frame. return errors.New("call to recover") - case ir.OCLOSURE, - ir.ORANGE, + case ir.OCLOSURE: + // TODO(danscales) - fix some bugs when budget is lowered below 30 + // Maybe make budget proportional to number of closure variables, e.g.: + //v.budget -= int32(len(n.(*ir.ClosureExpr).Func.ClosureVars) * 3) + v.budget -= 30 + + case ir.ORANGE, ir.OSELECT, ir.OGO, ir.ODEFER, @@ -449,6 +453,52 @@ func isBigFunc(fn *ir.Func) bool { }) } +// inlcopylist (together with inlcopy) recursively copies a list of nodes, except +// that it keeps the same ONAME, OTYPE, and OLITERAL nodes. It is used for copying +// the body and dcls of an inlineable function. +func inlcopylist(ll []ir.Node) []ir.Node { + s := make([]ir.Node, len(ll)) + for i, n := range ll { + s[i] = inlcopy(n) + } + return s +} + +// inlcopy is like DeepCopy(), but does extra work to copy closures. +func inlcopy(n ir.Node) ir.Node { + var edit func(ir.Node) ir.Node + edit = func(x ir.Node) ir.Node { + switch x.Op() { + case ir.ONAME, ir.OTYPE, ir.OLITERAL, ir.ONIL: + return x + } + m := ir.Copy(x) + ir.EditChildren(m, edit) + if x.Op() == ir.OCLOSURE { + x := x.(*ir.ClosureExpr) + // Need to save/duplicate x.Func.Nname, + // x.Func.Nname.Ntype, x.Func.Dcl, x.Func.ClosureVars, and + // x.Func.Body for iexport and local inlining. + oldfn := x.Func + newfn := ir.NewFunc(oldfn.Pos()) + if oldfn.ClosureCalled() { + newfn.SetClosureCalled(true) + } + m.(*ir.ClosureExpr).Func = newfn + newfn.Nname = ir.NewNameAt(oldfn.Nname.Pos(), oldfn.Nname.Sym()) + // XXX OK to share fn.Type() ?? + newfn.Nname.SetType(oldfn.Nname.Type()) + newfn.Nname.Ntype = inlcopy(oldfn.Nname.Ntype).(ir.Ntype) + newfn.Body = inlcopylist(oldfn.Body) + // Make shallow copy of the Dcl and ClosureVar slices + newfn.Dcl = append([]*ir.Name(nil), oldfn.Dcl...) + newfn.ClosureVars = append([]*ir.Name(nil), oldfn.ClosureVars...) + } + return m + } + return edit(n) +} + // Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any // calls made to inlineable functions. This is the external entry point. func InlineCalls(fn *ir.Func) { @@ -925,6 +975,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b inlvars: inlvars, bases: make(map[*src.PosBase]*src.PosBase), newInlIndex: newIndex, + fn: fn, } subst.edit = subst.node @@ -1031,6 +1082,12 @@ type inlsubst struct { newInlIndex int edit func(ir.Node) ir.Node // cached copy of subst.node method value closure + + // If non-nil, we are inside a closure inside the inlined function, and + // newclofn is the Func of the new inlined closure. + newclofn *ir.Func + + fn *ir.Func // For debug -- the func that is being inlined } // list inlines a list of nodes. @@ -1042,6 +1099,157 @@ func (subst *inlsubst) list(ll ir.Nodes) []ir.Node { return s } +// fields returns a list of the fields of a struct type representing receiver, +// params, or results, after duplicating the field nodes and substituting the +// Nname nodes inside the field nodes. +func (subst *inlsubst) fields(oldt *types.Type) []*types.Field { + oldfields := oldt.FieldSlice() + newfields := make([]*types.Field, len(oldfields)) + for i := range oldfields { + newfields[i] = oldfields[i].Copy() + if oldfields[i].Nname != nil { + newfields[i].Nname = subst.node(oldfields[i].Nname.(*ir.Name)) + } + } + return newfields +} + +// clovar creates a new ONAME node for a local variable or param of a closure +// inside a function being inlined. +func (subst *inlsubst) clovar(n *ir.Name) *ir.Name { + // TODO(danscales): want to get rid of this shallow copy, with code like the + // following, but it is hard to copy all the necessary flags in a maintainable way. + // m := ir.NewNameAt(n.Pos(), n.Sym()) + // m.Class = n.Class + // m.SetType(n.Type()) + // m.SetTypecheck(1) + //if n.IsClosureVar() { + // m.SetIsClosureVar(true) + //} + m := &ir.Name{} + *m = *n + m.Curfn = subst.newclofn + if n.Defn != nil && n.Defn.Op() == ir.ONAME { + if !n.IsClosureVar() { + base.FatalfAt(n.Pos(), "want closure variable, got: %+v", n) + } + if n.Sym().Pkg != types.LocalPkg { + // If the closure came from inlining a function from + // another package, must change package of captured + // variable to localpkg, so that the fields of the closure + // struct are local package and can be accessed even if + // name is not exported. If you disable this code, you can + // reproduce the problem by running 'go test + // go/internal/srcimporter'. TODO(mdempsky) - maybe change + // how we create closure structs? + m.SetSym(types.LocalPkg.Lookup(n.Sym().Name)) + } + // Make sure any inlvar which is the Defn + // of an ONAME closure var is rewritten + // during inlining. Don't substitute + // if Defn node is outside inlined function. + if subst.inlvars[n.Defn.(*ir.Name)] != nil { + m.Defn = subst.node(n.Defn) + } + } + if n.Outer != nil { + // Either the outer variable is defined in function being inlined, + // and we will replace it with the substituted variable, or it is + // defined outside the function being inlined, and we should just + // skip the outer variable (the closure variable of the function + // being inlined). + s := subst.node(n.Outer).(*ir.Name) + if s == n.Outer { + s = n.Outer.Outer + } + m.Outer = s + } + return m +} + +// closure does the necessary substitions for a ClosureExpr n and returns the new +// closure node. +func (subst *inlsubst) closure(n *ir.ClosureExpr) ir.Node { + m := ir.Copy(n) + m.SetPos(subst.updatedPos(m.Pos())) + ir.EditChildren(m, subst.edit) + + //fmt.Printf("Inlining func %v with closure into %v\n", subst.fn, ir.FuncName(ir.CurFunc)) + + // The following is similar to funcLit + oldfn := n.Func + newfn := ir.NewFunc(oldfn.Pos()) + // These three lines are not strictly necessary, but just to be clear + // that new function needs to redo typechecking and inlinability. + newfn.SetTypecheck(0) + newfn.SetInlinabilityChecked(false) + newfn.Inl = nil + newfn.SetIsHiddenClosure(true) + newfn.Nname = ir.NewNameAt(n.Pos(), ir.BlankNode.Sym()) + newfn.Nname.Func = newfn + newfn.Nname.Ntype = subst.node(oldfn.Nname.Ntype).(ir.Ntype) + newfn.Nname.Defn = newfn + + m.(*ir.ClosureExpr).Func = newfn + newfn.OClosure = m.(*ir.ClosureExpr) + + if subst.newclofn != nil { + //fmt.Printf("Inlining a closure with a nested closure\n") + } + prevxfunc := subst.newclofn + + // Mark that we are now substituting within a closure (within the + // inlined function), and create new nodes for all the local + // vars/params inside this closure. + subst.newclofn = newfn + newfn.Dcl = nil + newfn.ClosureVars = nil + for _, oldv := range oldfn.Dcl { + newv := subst.clovar(oldv) + subst.inlvars[oldv] = newv + newfn.Dcl = append(newfn.Dcl, newv) + } + for _, oldv := range oldfn.ClosureVars { + newv := subst.clovar(oldv) + subst.inlvars[oldv] = newv + newfn.ClosureVars = append(newfn.ClosureVars, newv) + } + + // Need to replace ONAME nodes in + // newfn.Type().FuncType().Receiver/Params/Results.FieldSlice().Nname + oldt := oldfn.Type() + newrecvs := subst.fields(oldt.Recvs()) + var newrecv *types.Field + if len(newrecvs) > 0 { + newrecv = newrecvs[0] + } + newt := types.NewSignature(oldt.Pkg(), newrecv, + subst.fields(oldt.Params()), subst.fields(oldt.Results())) + + newfn.Nname.SetType(newt) + newfn.Body = subst.list(oldfn.Body) + + // Remove the nodes for the current closure from subst.inlvars + for _, oldv := range oldfn.Dcl { + delete(subst.inlvars, oldv) + } + for _, oldv := range oldfn.ClosureVars { + delete(subst.inlvars, oldv) + } + // Go back to previous closure func + subst.newclofn = prevxfunc + + // Actually create the named function for the closure, now that + // the closure is inlined in a specific function. + m.SetTypecheck(0) + if oldfn.ClosureCalled() { + typecheck.Callee(m) + } else { + typecheck.Expr(m) + } + return m +} + // node recursively copies a node from the saved pristine body of the // inlined function, substituting references to input/output // parameters with ones to the tmpnames, and substituting returns with @@ -1056,13 +1264,17 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { n := n.(*ir.Name) // Handle captured variables when inlining closures. - if n.IsClosureVar() { + if n.IsClosureVar() && subst.newclofn == nil { o := n.Outer + // Deal with case where sequence of closures are inlined. + // TODO(danscales) - write test case to see if we need to + // go up multiple levels. + if o.Curfn != ir.CurFunc { + o = o.Outer + } + // make sure the outer param matches the inlining location - // NB: if we enabled inlining of functions containing OCLOSURE or refined - // the reassigned check via some sort of copy propagation this would most - // likely need to be changed to a loop to walk up to the correct Param if o == nil || o.Curfn != ir.CurFunc { base.Fatalf("%v: unresolvable capture %v\n", ir.Line(n), n) } @@ -1098,6 +1310,10 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { } case ir.ORETURN: + if subst.newclofn != nil { + // Don't do special substitutions if inside a closure + break + } // Since we don't handle bodies with closures, // this return is guaranteed to belong to the current inlined function. n := n.(*ir.ReturnStmt) @@ -1136,6 +1352,10 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { return m case ir.OLABEL: + if subst.newclofn != nil { + // Don't do special substitutions if inside a closure + break + } n := n.(*ir.LabelStmt) m := ir.Copy(n).(*ir.LabelStmt) m.SetPos(subst.updatedPos(m.Pos())) @@ -1143,10 +1363,10 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { p := fmt.Sprintf("%s·%d", n.Label.Name, inlgen) m.Label = typecheck.Lookup(p) return m - } - if n.Op() == ir.OCLOSURE { - base.Fatalf("cannot inline function containing closure: %+v", n) + case ir.OCLOSURE: + return subst.closure(n.(*ir.ClosureExpr)) + } m := ir.Copy(n) diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index 01197ad272418..1a05079dac8ee 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -1020,6 +1020,15 @@ func dumpNodeHeader(w io.Writer, n Node) { fmt.Fprintf(w, " defn(%p)", n.Name().Defn) } + if base.Debug.DumpPtrs != 0 && n.Name() != nil && n.Name().Curfn != nil { + // Useful to see where Defn is set and what node it points to + fmt.Fprintf(w, " curfn(%p)", n.Name().Curfn) + } + if base.Debug.DumpPtrs != 0 && n.Name() != nil && n.Name().Outer != nil { + // Useful to see where Defn is set and what node it points to + fmt.Fprintf(w, " outer(%p)", n.Name().Outer) + } + if EscFmt != nil { if esc := EscFmt(n); esc != "" { fmt.Fprintf(w, " %s", esc) @@ -1187,6 +1196,18 @@ func dumpNode(w io.Writer, n Node, depth int) { dumpNode(w, dcl, depth+1) } } + if len(fn.ClosureVars) > 0 { + indent(w, depth) + fmt.Fprintf(w, "%+v-ClosureVars", n.Op()) + for _, cv := range fn.ClosureVars { + dumpNode(w, cv, depth+1) + } + } + if len(fn.Enter) > 0 { + indent(w, depth) + fmt.Fprintf(w, "%+v-Enter", n.Op()) + dumpNodes(w, fn.Enter, depth+1) + } if len(fn.Body) > 0 { indent(w, depth) fmt.Fprintf(w, "%+v-body", n.Op()) diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 291e1286bb8f1..ffa7daf6b2be7 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -291,6 +291,10 @@ const ( OTSLICE // []int // misc + // intermediate representation of an inlined call. Uses Init (assignments + // for the captured variables, parameters, retvars, & INLMARK op), + // Body (body of the inlined function), and ReturnVars (list of + // return values) OINLCALL // intermediary representation of an inlined call. OEFACE // itable and data words of an empty-interface value. OITAB // itable word of an interface value. diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go index 99c0e4addeb85..0ea72a28dcf9c 100644 --- a/src/cmd/compile/internal/noder/noder.go +++ b/src/cmd/compile/internal/noder/noder.go @@ -142,7 +142,15 @@ func Package() { for i := 0; i < len(typecheck.Target.Decls); i++ { n := typecheck.Target.Decls[i] if n.Op() == ir.ODCLFUNC { + if base.Flag.W > 1 { + s := fmt.Sprintf("\nbefore typecheck %v", n) + ir.Dump(s, n) + } typecheck.FuncBody(n.(*ir.Func)) + if base.Flag.W > 1 { + s := fmt.Sprintf("\nafter typecheck %v", n) + ir.Dump(s, n) + } fcount++ } } diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go index b576590d4d92b..f624773c8f0fe 100644 --- a/src/cmd/compile/internal/typecheck/func.go +++ b/src/cmd/compile/internal/typecheck/func.go @@ -145,7 +145,7 @@ func ImportedBody(fn *ir.Func) { // declarations are added to fn.Func.Dcl by funcBody(). Move them // to fn.Func.Inl.Dcl for consistency with how local functions // behave. (Append because ImportedBody may be called multiple - // times.) + // times on same fn.) fn.Inl.Dcl = append(fn.Inl.Dcl, fn.Dcl...) fn.Dcl = nil @@ -303,8 +303,15 @@ func tcClosure(clo *ir.ClosureExpr, top int) { return } - fn.Nname.SetSym(closurename(ir.CurFunc)) - ir.MarkFunc(fn.Nname) + // Don't give a name and add to xtop if we are typechecking an inlined + // body in ImportedBody(), since we only want to create the named function + // when the closure is actually inlined (and then we force a typecheck + // explicitly in (*inlsubst).node()). + inTypeCheckInl := ir.CurFunc != nil && ir.CurFunc.Body == nil + if !inTypeCheckInl { + fn.Nname.SetSym(closurename(ir.CurFunc)) + ir.MarkFunc(fn.Nname) + } Func(fn) clo.SetType(fn.Type()) @@ -338,7 +345,14 @@ func tcClosure(clo *ir.ClosureExpr, top int) { } fn.ClosureVars = fn.ClosureVars[:out] - Target.Decls = append(Target.Decls, fn) + if base.Flag.W > 1 { + s := fmt.Sprintf("New closure func: %s", ir.FuncName(fn)) + ir.Dump(s, fn) + } + if !inTypeCheckInl { + // Add function to xtop once only when we give it a name + Target.Decls = append(Target.Decls, fn) + } } // type check function definition diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go index 1ba8771139880..be4a689836cec 100644 --- a/src/cmd/compile/internal/typecheck/iexport.go +++ b/src/cmd/compile/internal/typecheck/iexport.go @@ -423,9 +423,13 @@ type exportWriter struct { prevLine int64 prevColumn int64 - // dclIndex maps function-scoped declarations to their index - // within their respective Func's Dcl list. - dclIndex map[*ir.Name]int + // dclIndex maps function-scoped declarations to an int used to refer to + // them later in the function. For local variables/params, the int is + // non-negative and in order of the appearance in the Func's Dcl list. For + // closure variables, the index is negative starting at -2. + dclIndex map[*ir.Name]int + maxDclIndex int + maxClosureVarIndex int } func (p *iexporter) doDecl(n *ir.Name) { @@ -1038,14 +1042,19 @@ func (w *exportWriter) typeExt(t *types.Type) { // Inline bodies. -func (w *exportWriter) funcBody(fn *ir.Func) { - w.int64(int64(len(fn.Inl.Dcl))) - for i, n := range fn.Inl.Dcl { +func (w *exportWriter) writeNames(dcl []*ir.Name) { + w.int64(int64(len(dcl))) + for i, n := range dcl { w.pos(n.Pos()) w.localIdent(n.Sym()) w.typ(n.Type()) - w.dclIndex[n] = i + w.dclIndex[n] = w.maxDclIndex + i } + w.maxDclIndex += len(dcl) +} + +func (w *exportWriter) funcBody(fn *ir.Func) { + w.writeNames(fn.Inl.Dcl) w.stmtList(fn.Inl.Body) } @@ -1315,8 +1324,30 @@ func (w *exportWriter) expr(n ir.Node) { // case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC: // should have been resolved by typechecking - handled by default case - // case OCLOSURE: - // unimplemented - handled by default case + case ir.OCLOSURE: + n := n.(*ir.ClosureExpr) + w.op(ir.OCLOSURE) + w.pos(n.Pos()) + w.signature(n.Type()) + + // Write out id for the Outer of each conditional variable. The + // conditional variable itself for this closure will be re-created + // during import. + w.int64(int64(len(n.Func.ClosureVars))) + for i, cv := range n.Func.ClosureVars { + w.pos(cv.Pos()) + w.localName(cv.Outer) + // Closure variable (which will be re-created during + // import) is given via a negative id, starting at -2, + // which is used to refer to it later in the function + // during export. -1 represents blanks. + w.dclIndex[cv] = -(i + 2) - w.maxClosureVarIndex + } + w.maxClosureVarIndex += len(n.Func.ClosureVars) + + // like w.funcBody(n.Func), but not for .Inl + w.writeNames(n.Func.Dcl) + w.stmtList(n.Func.Body) // case OCOMPLIT: // should have been resolved by typechecking - handled by default case diff --git a/src/cmd/compile/internal/typecheck/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go index c2610229ec5b2..f2682257f3456 100644 --- a/src/cmd/compile/internal/typecheck/iimport.go +++ b/src/cmd/compile/internal/typecheck/iimport.go @@ -265,6 +265,9 @@ type importReader struct { // curfn is the current function we're importing into. curfn *ir.Func + // Slice of all dcls for function, including any interior closures + allDcls []*ir.Name + allClosureVars []*ir.Name } func (p *iimporter) newReader(off uint64, pkg *types.Pkg) *importReader { @@ -721,6 +724,7 @@ func (r *importReader) doInline(fn *ir.Func) { base.Fatalf("%v already has inline body", fn) } + //fmt.Printf("Importing %v\n", n) r.funcBody(fn) importlist = append(importlist, fn) @@ -754,6 +758,24 @@ func (r *importReader) funcBody(fn *ir.Func) { r.curfn = fn // Import local declarations. + fn.Inl.Dcl = r.readFuncDcls(fn) + + // Import function body. + body := r.stmtList() + if body == nil { + // Make sure empty body is not interpreted as + // no inlineable body (see also parser.fnbody) + // (not doing so can cause significant performance + // degradation due to unnecessary calls to empty + // functions). + body = []ir.Node{} + } + fn.Inl.Body = body + + r.curfn = outerfn +} + +func (r *importReader) readNames(fn *ir.Func) []*ir.Name { dcls := make([]*ir.Name, r.int64()) for i := range dcls { n := ir.NewDeclNameAt(r.pos(), ir.ONAME, r.localIdent()) @@ -762,7 +784,12 @@ func (r *importReader) funcBody(fn *ir.Func) { n.SetType(r.typ()) dcls[i] = n } - fn.Inl.Dcl = dcls + r.allDcls = append(r.allDcls, dcls...) + return dcls +} + +func (r *importReader) readFuncDcls(fn *ir.Func) []*ir.Name { + dcls := r.readNames(fn) // Fixup parameter classes and associate with their // signature's type fields. @@ -787,28 +814,18 @@ func (r *importReader) funcBody(fn *ir.Func) { for _, f := range typ.Results().FieldSlice() { fix(f, ir.PPARAMOUT) } - - // Import function body. - body := r.stmtList() - if body == nil { - // Make sure empty body is not interpreted as - // no inlineable body (see also parser.fnbody) - // (not doing so can cause significant performance - // degradation due to unnecessary calls to empty - // functions). - body = []ir.Node{} - } - fn.Inl.Body = body - - r.curfn = outerfn + return dcls } func (r *importReader) localName() *ir.Name { i := r.int64() - if i < 0 { + if i == -1 { return ir.BlankNode.(*ir.Name) } - return r.curfn.Inl.Dcl[i] + if i < 0 { + return r.allClosureVars[-i-2] + } + return r.allDcls[i] } func (r *importReader) stmtList() []ir.Node { @@ -924,8 +941,38 @@ func (r *importReader) node() ir.Node { // case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC: // unreachable - should have been resolved by typechecking - // case OCLOSURE: - // unimplemented + case ir.OCLOSURE: + //println("Importing CLOSURE") + pos := r.pos() + typ := r.signature(nil) + + // All the remaining code below is similar to (*noder).funcLit(), but + // with Dcls and ClosureVars lists already set up + fn := ir.NewFunc(pos) + fn.SetIsHiddenClosure(true) + fn.Nname = ir.NewNameAt(pos, ir.BlankNode.Sym()) + fn.Nname.Func = fn + fn.Nname.Ntype = ir.TypeNode(typ) + fn.Nname.Defn = fn + fn.Nname.SetType(typ) + + cvars := make([]*ir.Name, r.int64()) + for i := range cvars { + cvars[i] = ir.CaptureName(r.pos(), fn, r.localName().Canonical()) + } + fn.ClosureVars = cvars + r.allClosureVars = append(r.allClosureVars, cvars...) + + fn.Dcl = r.readFuncDcls(fn) + body := r.stmtList() + ir.FinishCaptureNames(pos, r.curfn, fn) + + clo := ir.NewClosureExpr(pos, fn) + fn.OClosure = clo + + fn.Body = body + + return clo // case OPTRLIT: // unreachable - mapped to case OADDR below by exporter diff --git a/test/closure3.dir/main.go b/test/closure3.dir/main.go index e8e1e99860f5c..2fc33753ed7f8 100644 --- a/test/closure3.dir/main.go +++ b/test/closure3.dir/main.go @@ -93,11 +93,11 @@ func main() { y := func(x int) int { // ERROR "can inline main.func11" "func literal does not escape" return x + 2 } - y, sink = func() (func(int) int, int) { // ERROR "func literal does not escape" - return func(x int) int { // ERROR "can inline main.func12" "func literal escapes" + y, sink = func() (func(int) int, int) { // ERROR "can inline main.func12" + return func(x int) int { // ERROR "can inline main.func12" return x + 1 }, 42 - }() + }() // ERROR "func literal does not escape" "inlining call to main.func12" if y(40) != 41 { ppanic("y(40) != 41") } @@ -105,14 +105,14 @@ func main() { { func() { // ERROR "func literal does not escape" - y := func(x int) int { // ERROR "can inline main.func13.1" "func literal does not escape" + y := func(x int) int { // ERROR "func literal does not escape" "can inline main.func13.1" return x + 2 } - y, sink = func() (func(int) int, int) { // ERROR "func literal does not escape" - return func(x int) int { // ERROR "can inline main.func13.2" "func literal escapes" + y, sink = func() (func(int) int, int) { // ERROR "can inline main.func13.2" + return func(x int) int { // ERROR "can inline main.func13.2" return x + 1 }, 42 - }() + }() // ERROR "inlining call to main.func13.2" "func literal does not escape" if y(40) != 41 { ppanic("y(40) != 41") } @@ -187,29 +187,29 @@ func main() { { x := 42 - if z := func(y int) int { // ERROR "func literal does not escape" - return func() int { // ERROR "can inline main.func22.1" + if z := func(y int) int { // ERROR "can inline main.func22" + return func() int { // ERROR "can inline main.func22.1" "can inline main.func30" return x + y }() // ERROR "inlining call to main.func22.1" - }(1); z != 43 { + }(1); z != 43 { // ERROR "inlining call to main.func22" "inlining call to main.func30" ppanic("z != 43") } - if z := func(y int) int { // ERROR "func literal does not escape" - return func() int { // ERROR "can inline main.func23.1" + if z := func(y int) int { // ERROR "func literal does not escape" "can inline main.func23" + return func() int { // ERROR "can inline main.func23.1" "can inline main.func31" return x + y }() // ERROR "inlining call to main.func23.1" - }; z(1) != 43 { + }; z(1) != 43 { // ERROR "inlining call to main.func23" "inlining call to main.func31" ppanic("z(1) != 43") } } { a := 1 - func() { // ERROR "func literal does not escape" - func() { // ERROR "can inline main.func24" + func() { // ERROR "can inline main.func24" + func() { // ERROR "can inline main.func24" "can inline main.func32" a = 2 }() // ERROR "inlining call to main.func24" - }() + }() // ERROR "inlining call to main.func24" "inlining call to main.func32" if a != 2 { ppanic("a != 2") } @@ -250,12 +250,12 @@ func main() { a := 2 if r := func(x int) int { // ERROR "func literal does not escape" b := 3 - return func(y int) int { // ERROR "func literal does not escape" + return func(y int) int { // ERROR "can inline main.func27.1" c := 5 - return func(z int) int { // ERROR "can inline main.func27.1.1" + return func(z int) int { // ERROR "can inline main.func27.1.1" "can inline main.func27.2" return a*x + b*y + c*z }(10) // ERROR "inlining call to main.func27.1.1" - }(100) + }(100) // ERROR "inlining call to main.func27.1" "inlining call to main.func27.2" }(1000); r != 2350 { ppanic("r != 2350") } @@ -265,15 +265,15 @@ func main() { a := 2 if r := func(x int) int { // ERROR "func literal does not escape" b := 3 - return func(y int) int { // ERROR "func literal does not escape" + return func(y int) int { // ERROR "can inline main.func28.1" c := 5 - func(z int) { // ERROR "can inline main.func28.1.1" + func(z int) { // ERROR "can inline main.func28.1.1" "can inline main.func28.2" a = a * x b = b * y c = c * z }(10) // ERROR "inlining call to main.func28.1.1" return a + c - }(100) + b + }(100) + b // ERROR "inlining call to main.func28.1" "inlining call to main.func28.2" }(1000); r != 2350 { ppanic("r != 2350") } diff --git a/test/closure5.dir/a.go b/test/closure5.dir/a.go new file mode 100644 index 0000000000000..de8082b7b17a7 --- /dev/null +++ b/test/closure5.dir/a.go @@ -0,0 +1,11 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Check correctness of various closure corner cases +// that are expected to be inlined + +package a + +func f() bool { return true } +func G() func() func() bool { return func() func() bool { return f } } diff --git a/test/closure5.dir/main.go b/test/closure5.dir/main.go new file mode 100644 index 0000000000000..ee5dba648156c --- /dev/null +++ b/test/closure5.dir/main.go @@ -0,0 +1,15 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Check correctness of various closure corner cases +// that are expected to be inlined +package main + +import "a" + +func main() { + if !a.G()()() { + panic("FAIL") + } +} diff --git a/test/closure5.go b/test/closure5.go new file mode 100644 index 0000000000000..a7022b27a68f7 --- /dev/null +++ b/test/closure5.go @@ -0,0 +1,10 @@ +// compiledir + +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Check correctness of various closure corner cases +// that are expected to be inlined + +package ignored diff --git a/test/inline.go b/test/inline.go index d754f06e0340c..37965c0d9dc8a 100644 --- a/test/inline.go +++ b/test/inline.go @@ -58,7 +58,7 @@ func _() int { // ERROR "can inline _" var somethingWrong error // local closures can be inlined -func l(x, y int) (int, int, error) { +func l(x, y int) (int, int, error) { // ERROR "can inline l" e := func(err error) (int, int, error) { // ERROR "can inline l.func1" "func literal does not escape" "leaking param: err to result" return 0, 0, err } @@ -90,19 +90,19 @@ func n() int { // make sure assignment inside closure is detected func o() int { foo := func() int { return 1 } // ERROR "can inline o.func1" "func literal does not escape" - func(x int) { // ERROR "func literal does not escape" + func(x int) { // ERROR "can inline o.func2" if x > 10 { - foo = func() int { return 2 } // ERROR "can inline o.func2" "func literal escapes" + foo = func() int { return 2 } // ERROR "can inline o.func2" } - }(11) + }(11) // ERROR "func literal does not escape" "inlining call to o.func2" return foo() } -func p() int { +func p() int { // ERROR "can inline p" return func() int { return 42 }() // ERROR "can inline p.func1" "inlining call to p.func1" } -func q(x int) int { +func q(x int) int { // ERROR "can inline q" foo := func() int { return x * 2 } // ERROR "can inline q.func1" "func literal does not escape" return foo() // ERROR "inlining call to q.func1" } @@ -111,15 +111,15 @@ func r(z int) int { foo := func(x int) int { // ERROR "can inline r.func1" "func literal does not escape" return x + z } - bar := func(x int) int { // ERROR "func literal does not escape" - return x + func(y int) int { // ERROR "can inline r.func2.1" + bar := func(x int) int { // ERROR "func literal does not escape" "can inline r.func2" + return x + func(y int) int { // ERROR "can inline r.func2.1" "can inline r.func3" return 2*y + x*z }(x) // ERROR "inlining call to r.func2.1" } - return foo(42) + bar(42) // ERROR "inlining call to r.func1" + return foo(42) + bar(42) // ERROR "inlining call to r.func1" "inlining call to r.func2" "inlining call to r.func3" } -func s0(x int) int { +func s0(x int) int { // ERROR "can inline s0" foo := func() { // ERROR "can inline s0.func1" "func literal does not escape" x = x + 1 } @@ -127,7 +127,7 @@ func s0(x int) int { return x } -func s1(x int) int { +func s1(x int) int { // ERROR "can inline s1" foo := func() int { // ERROR "can inline s1.func1" "func literal does not escape" return x } From 213c3905e9eb4fcc4847d3f7e55ce6a0d3087318 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Thu, 21 Jan 2021 02:35:03 +0700 Subject: [PATCH 413/474] [dev.regabi] cmd/compile: use node walked flag to prevent double walk for walkSelect Same as CL 283733, but for walkSelect. Passes toolstash -cmp. Change-Id: I3ecb8d6eafd395379191c15fc58c95f75809fec9 Reviewed-on: https://go-review.googlesource.com/c/go/+/284895 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/walk/select.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/walk/select.go b/src/cmd/compile/internal/walk/select.go index 56ba0fa758455..c6069d0ba288c 100644 --- a/src/cmd/compile/internal/walk/select.go +++ b/src/cmd/compile/internal/walk/select.go @@ -13,9 +13,10 @@ import ( func walkSelect(sel *ir.SelectStmt) { lno := ir.SetPos(sel) - if len(sel.Compiled) != 0 { + if sel.Walked() { base.Fatalf("double walkSelect") } + sel.SetWalked(true) init := ir.TakeInit(sel) From 9f036844db39acad54ab2b45bab39fa376c78003 Mon Sep 17 00:00:00 2001 From: Baokun Lee Date: Thu, 7 Jan 2021 11:17:57 +0800 Subject: [PATCH 414/474] [dev.regabi] cmd/compile: use ir.DoChildren directly in inlining Passes toolstash -cmp. Change-Id: Ie35e8163fa0e61ed9e1b259929c8cbe82ee5301e Reviewed-on: https://go-review.googlesource.com/c/go/+/282212 Run-TryBot: Baokun Lee TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le Trust: Baokun Lee --- src/cmd/compile/internal/inline/inl.go | 66 ++++++++++---------------- 1 file changed, 25 insertions(+), 41 deletions(-) diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index 7778bc56c4cdd..46f093b1f8fb3 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -27,7 +27,6 @@ package inline import ( - "errors" "fmt" "go/constant" "strings" @@ -256,17 +255,12 @@ type hairyVisitor struct { reason string extraCallCost int32 usedLocals map[*ir.Name]bool - do func(ir.Node) error + do func(ir.Node) bool } -var errBudget = errors.New("too expensive") - func (v *hairyVisitor) tooHairy(fn *ir.Func) bool { v.do = v.doNode // cache closure - - err := errChildren(fn, v.do) - if err != nil { - v.reason = err.Error() + if ir.DoChildren(fn, v.do) { return true } if v.budget < 0 { @@ -276,11 +270,10 @@ func (v *hairyVisitor) tooHairy(fn *ir.Func) bool { return false } -func (v *hairyVisitor) doNode(n ir.Node) error { +func (v *hairyVisitor) doNode(n ir.Node) bool { if n == nil { - return nil + return false } - switch n.Op() { // Call is okay if inlinable and we have the budget for the body. case ir.OCALLFUNC: @@ -294,7 +287,8 @@ func (v *hairyVisitor) doNode(n ir.Node) error { if name.Class == ir.PFUNC && types.IsRuntimePkg(name.Sym().Pkg) { fn := name.Sym().Name if fn == "getcallerpc" || fn == "getcallersp" { - return errors.New("call to " + fn) + v.reason = "call to " + fn + return true } if fn == "throw" { v.budget -= inlineExtraThrowCost @@ -357,7 +351,8 @@ func (v *hairyVisitor) doNode(n ir.Node) error { case ir.ORECOVER: // recover matches the argument frame pointer to find // the right panic value, so it needs an argument frame. - return errors.New("call to recover") + v.reason = "call to recover" + return true case ir.OCLOSURE: // TODO(danscales) - fix some bugs when budget is lowered below 30 @@ -371,24 +366,27 @@ func (v *hairyVisitor) doNode(n ir.Node) error { ir.ODEFER, ir.ODCLTYPE, // can't print yet ir.OTAILCALL: - return errors.New("unhandled op " + n.Op().String()) + v.reason = "unhandled op " + n.Op().String() + return true case ir.OAPPEND: v.budget -= inlineExtraAppendCost case ir.ODCLCONST, ir.OFALL: // These nodes don't produce code; omit from inlining budget. - return nil + return false case ir.OFOR, ir.OFORUNTIL: n := n.(*ir.ForStmt) if n.Label != nil { - return errors.New("labeled control") + v.reason = "labeled control" + return true } case ir.OSWITCH: n := n.(*ir.SwitchStmt) if n.Label != nil { - return errors.New("labeled control") + v.reason = "labeled control" + return true } // case ir.ORANGE, ir.OSELECT in "unhandled" above @@ -404,16 +402,9 @@ func (v *hairyVisitor) doNode(n ir.Node) error { if ir.IsConst(n.Cond, constant.Bool) { // This if and the condition cost nothing. // TODO(rsc): It seems strange that we visit the dead branch. - if err := errList(n.Init(), v.do); err != nil { - return err - } - if err := errList(n.Body, v.do); err != nil { - return err - } - if err := errList(n.Else, v.do); err != nil { - return err - } - return nil + return doList(n.Init(), v.do) || + doList(n.Body, v.do) || + doList(n.Else, v.do) } case ir.ONAME: @@ -439,10 +430,11 @@ func (v *hairyVisitor) doNode(n ir.Node) error { // When debugging, don't stop early, to get full cost of inlining this function if v.budget < 0 && base.Flag.LowerM < 2 && !logopt.Enabled() { - return errBudget + v.reason = "too expensive" + return true } - return errChildren(n, v.do) + return ir.DoChildren(n, v.do) } func isBigFunc(fn *ir.Func) bool { @@ -1411,21 +1403,13 @@ func numNonClosures(list []*ir.Func) int { return count } -// TODO(mdempsky): Update inl.go to use ir.DoChildren directly. -func errChildren(n ir.Node, do func(ir.Node) error) (err error) { - ir.DoChildren(n, func(x ir.Node) bool { - err = do(x) - return err != nil - }) - return -} -func errList(list []ir.Node, do func(ir.Node) error) error { +func doList(list []ir.Node, do func(ir.Node) bool) bool { for _, x := range list { if x != nil { - if err := do(x); err != nil { - return err + if do(x) { + return true } } } - return nil + return false } From 19a6db6b63fd53d36b2eef5823e107a25a8062c0 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Wed, 20 Jan 2021 14:26:42 +0700 Subject: [PATCH 415/474] [dev.regabi] cmd/compile: make sure mkcall* passed non-nil init So next CL can pass temporaries assignments for function arguments in to init instead of CallExpr.Rargs. Passes toolstash -cmp. Change-Id: I2c3cb6a63e8bf9d0418052b39c1db58050f71305 Reviewed-on: https://go-review.googlesource.com/c/go/+/284893 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/walk/race.go | 9 ++++----- src/cmd/compile/internal/walk/range.go | 16 +++++++++------- src/cmd/compile/internal/walk/select.go | 8 +++++--- src/cmd/compile/internal/walk/walk.go | 17 +++++++++++++++++ 4 files changed, 35 insertions(+), 15 deletions(-) diff --git a/src/cmd/compile/internal/walk/race.go b/src/cmd/compile/internal/walk/race.go index 77cabe50c6a0a..47cd2fdc222a6 100644 --- a/src/cmd/compile/internal/walk/race.go +++ b/src/cmd/compile/internal/walk/race.go @@ -26,10 +26,9 @@ func instrument(fn *ir.Func) { if base.Flag.Race { lno := base.Pos base.Pos = src.NoXPos - if ssagen.Arch.LinkArch.Arch.Family != sys.AMD64 { - fn.Enter.Prepend(mkcall("racefuncenterfp", nil, nil)) - fn.Exit.Append(mkcall("racefuncexit", nil, nil)) + fn.Enter.Prepend(mkcallstmt("racefuncenterfp")) + fn.Exit.Append(mkcallstmt("racefuncexit")) } else { // nodpc is the PC of the caller as extracted by @@ -44,8 +43,8 @@ func instrument(fn *ir.Func) { nodpc.SetType(types.Types[types.TUINTPTR]) nodpc.SetFrameOffset(int64(-types.PtrSize)) fn.Dcl = append(fn.Dcl, nodpc) - fn.Enter.Prepend(mkcall("racefuncenter", nil, nil, nodpc)) - fn.Exit.Append(mkcall("racefuncexit", nil, nil)) + fn.Enter.Prepend(mkcallstmt("racefuncenter", nodpc)) + fn.Exit.Append(mkcallstmt("racefuncexit")) } base.Pos = lno } diff --git a/src/cmd/compile/internal/walk/range.go b/src/cmd/compile/internal/walk/range.go index 2b28e7442dbb3..5ab24b2188404 100644 --- a/src/cmd/compile/internal/walk/range.go +++ b/src/cmd/compile/internal/walk/range.go @@ -174,12 +174,12 @@ func walkRange(nrange *ir.RangeStmt) ir.Node { fn := typecheck.LookupRuntime("mapiterinit") fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), th) - init = append(init, mkcall1(fn, nil, nil, reflectdata.TypePtr(t), ha, typecheck.NodAddr(hit))) + init = append(init, mkcallstmt1(fn, reflectdata.TypePtr(t), ha, typecheck.NodAddr(hit))) nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym), typecheck.NodNil()) fn = typecheck.LookupRuntime("mapiternext") fn = typecheck.SubstArgTypes(fn, th) - nfor.Post = mkcall1(fn, nil, nil, typecheck.NodAddr(hit)) + nfor.Post = mkcallstmt1(fn, typecheck.NodAddr(hit)) key := ir.NewStarExpr(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym)) if v1 == nil { @@ -269,12 +269,14 @@ func walkRange(nrange *ir.RangeStmt) ir.Node { // } else { eif := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) - nif.Else = []ir.Node{eif} // hv2, hv1 = decoderune(ha, hv1) eif.Lhs = []ir.Node{hv2, hv1} fn := typecheck.LookupRuntime("decoderune") - eif.Rhs = []ir.Node{mkcall1(fn, fn.Type().Results(), nil, ha, hv1)} + var fnInit ir.Nodes + eif.Rhs = []ir.Node{mkcall1(fn, fn.Type().Results(), &fnInit, ha, hv1)} + fnInit.Append(eif) + nif.Else = fnInit body = append(body, nif) @@ -374,7 +376,7 @@ func mapClear(m ir.Node) ir.Node { // instantiate mapclear(typ *type, hmap map[any]any) fn := typecheck.LookupRuntime("mapclear") fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem()) - n := mkcall1(fn, nil, nil, reflectdata.TypePtr(t), m) + n := mkcallstmt1(fn, reflectdata.TypePtr(t), m) return walkStmt(typecheck.Stmt(n)) } @@ -449,10 +451,10 @@ func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node { if a.Type().Elem().HasPointers() { // memclrHasPointers(hp, hn) ir.CurFunc.SetWBPos(stmt.Pos()) - fn = mkcall("memclrHasPointers", nil, nil, hp, hn) + fn = mkcallstmt("memclrHasPointers", hp, hn) } else { // memclrNoHeapPointers(hp, hn) - fn = mkcall("memclrNoHeapPointers", nil, nil, hp, hn) + fn = mkcallstmt("memclrNoHeapPointers", hp, hn) } n.Body.Append(fn) diff --git a/src/cmd/compile/internal/walk/select.go b/src/cmd/compile/internal/walk/select.go index c6069d0ba288c..873be289dcc6a 100644 --- a/src/cmd/compile/internal/walk/select.go +++ b/src/cmd/compile/internal/walk/select.go @@ -35,7 +35,7 @@ func walkSelectCases(cases []*ir.CommClause) []ir.Node { // optimization: zero-case select if ncas == 0 { - return []ir.Node{mkcall("block", nil, nil)} + return []ir.Node{mkcallstmt("block")} } // optimization: one-case select: single op. @@ -214,7 +214,7 @@ func walkSelectCases(cases []*ir.CommClause) []ir.Node { // TODO(mdempsky): There should be a cleaner way to // handle this. if base.Flag.Race { - r := mkcall("selectsetpc", nil, nil, typecheck.NodAddr(ir.NewIndexExpr(base.Pos, pcs, ir.NewInt(int64(i))))) + r := mkcallstmt("selectsetpc", typecheck.NodAddr(ir.NewIndexExpr(base.Pos, pcs, ir.NewInt(int64(i))))) init = append(init, r) } } @@ -229,7 +229,9 @@ func walkSelectCases(cases []*ir.CommClause) []ir.Node { r := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) r.Lhs = []ir.Node{chosen, recvOK} fn := typecheck.LookupRuntime("selectgo") - r.Rhs = []ir.Node{mkcall1(fn, fn.Type().Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, ir.NewInt(int64(nsends)), ir.NewInt(int64(nrecvs)), ir.NewBool(dflt == nil))} + var fnInit ir.Nodes + r.Rhs = []ir.Node{mkcall1(fn, fn.Type().Results(), &fnInit, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, ir.NewInt(int64(nsends)), ir.NewInt(int64(nrecvs)), ir.NewBool(dflt == nil))} + init = append(init, fnInit...) init = append(init, typecheck.Stmt(r)) // selv and order are no longer alive after selectgo. diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go index 399fb2462b9f7..4273a62fe564e 100644 --- a/src/cmd/compile/internal/walk/walk.go +++ b/src/cmd/compile/internal/walk/walk.go @@ -96,6 +96,9 @@ func convas(n *ir.AssignStmt, init *ir.Nodes) *ir.AssignStmt { var stop = errors.New("stop") func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) *ir.CallExpr { + if init == nil { + base.Fatalf("mkcall with nil init: %v", fn) + } if fn.Type() == nil || fn.Type().Kind() != types.TFUNC { base.Fatalf("mkcall %v %v", fn, fn.Type()) } @@ -115,10 +118,24 @@ func mkcall(name string, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.Cal return vmkcall(typecheck.LookupRuntime(name), t, init, args) } +func mkcallstmt(name string, args ...ir.Node) ir.Node { + return mkcallstmt1(typecheck.LookupRuntime(name), args...) +} + func mkcall1(fn ir.Node, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr { return vmkcall(fn, t, init, args) } +func mkcallstmt1(fn ir.Node, args ...ir.Node) ir.Node { + var init ir.Nodes + n := vmkcall(fn, nil, &init, args) + if len(init) == 0 { + return n + } + init.Append(n) + return ir.NewBlockStmt(n.Pos(), init) +} + func chanfn(name string, n int, t *types.Type) ir.Node { if !t.IsChan() { base.Fatalf("chanfn %v", t) From fd9a391cdd08385cead816b41bed381d694859f6 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Wed, 20 Jan 2021 14:46:38 +0700 Subject: [PATCH 416/474] [dev.regabi] cmd/compile: remove CallExpr.Rargs Instead, push the temps assignments to init. This does not pass toolstash, since when before this, the temps were evaluated after function callee, now we evaluate them before. Change-Id: Icb9cb10e036925b56c1ef3eec468416a11f4932f Reviewed-on: https://go-review.googlesource.com/c/go/+/284894 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ir/expr.go | 1 - src/cmd/compile/internal/ir/node_gen.go | 5 --- src/cmd/compile/internal/ssagen/ssa.go | 44 +++---------------------- src/cmd/compile/internal/walk/expr.go | 6 ++-- 4 files changed, 8 insertions(+), 48 deletions(-) diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index e944a0b1550eb..b32ed71260d3e 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -159,7 +159,6 @@ type CallExpr struct { origNode X Node Args Nodes - Rargs Nodes // TODO(rsc): Delete. KeepAlive []*Name // vars to be kept alive until call returns IsDDD bool Use CallUse diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index af9ee8d86ecd5..fe436867b2c3d 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -250,7 +250,6 @@ func (n *CallExpr) copy() Node { c := *n c.init = copyNodes(c.init) c.Args = copyNodes(c.Args) - c.Rargs = copyNodes(c.Rargs) c.KeepAlive = copyNames(c.KeepAlive) return &c } @@ -264,9 +263,6 @@ func (n *CallExpr) doChildren(do func(Node) bool) bool { if doNodes(n.Args, do) { return true } - if doNodes(n.Rargs, do) { - return true - } if doNames(n.KeepAlive, do) { return true } @@ -278,7 +274,6 @@ func (n *CallExpr) editChildren(edit func(Node) Node) { n.X = edit(n.X).(Node) } editNodes(n.Args, edit) - editNodes(n.Rargs, edit) editNames(n.KeepAlive, edit) } diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 5ba8579f6a7d7..ecf3294082861 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -4492,30 +4492,8 @@ func (s *state) intrinsicCall(n *ir.CallExpr) *ssa.Value { // intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them. func (s *state) intrinsicArgs(n *ir.CallExpr) []*ssa.Value { - // Construct map of temps; see comments in s.call about the structure of n. - temps := map[ir.Node]*ssa.Value{} - for _, a := range n.Args { - if a.Op() != ir.OAS { - s.Fatalf("non-assignment as a temp function argument %v", a.Op()) - } - a := a.(*ir.AssignStmt) - l, r := a.X, a.Y - if l.Op() != ir.ONAME { - s.Fatalf("non-ONAME temp function argument %v", a.Op()) - } - // Evaluate and store to "temporary". - // Walk ensures these temporaries are dead outside of n. - temps[l] = s.expr(r) - } - args := make([]*ssa.Value, len(n.Rargs)) - for i, n := range n.Rargs { - // Store a value to an argument slot. - if x, ok := temps[n]; ok { - // This is a previously computed temporary. - args[i] = x - continue - } - // This is an explicit value; evaluate it. + args := make([]*ssa.Value, len(n.Args)) + for i, n := range n.Args { args[i] = s.expr(n) } return args @@ -4528,13 +4506,6 @@ func (s *state) intrinsicArgs(n *ir.CallExpr) []*ssa.Value { // (as well as the deferBits variable), and this will enable us to run the proper // defer calls during panics. func (s *state) openDeferRecord(n *ir.CallExpr) { - // Do any needed expression evaluation for the args (including the - // receiver, if any). This may be evaluating something like 'autotmp_3 = - // once.mutex'. Such a statement will create a mapping in s.vars[] from - // the autotmp name to the evaluated SSA arg value, but won't do any - // stores to the stack. - s.stmtList(n.Args) - var args []*ssa.Value var argNodes []*ir.Name @@ -4567,7 +4538,7 @@ func (s *state) openDeferRecord(n *ir.CallExpr) { opendefer.closureNode = opendefer.closure.Aux.(*ir.Name) opendefer.rcvrNode = opendefer.rcvr.Aux.(*ir.Name) } - for _, argn := range n.Rargs { + for _, argn := range n.Args { var v *ssa.Value if TypeOK(argn.Type()) { v = s.openDeferSave(nil, argn.Type(), s.expr(argn)) @@ -4853,11 +4824,6 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val types.CalcSize(fn.Type()) stksize := fn.Type().ArgWidth() // includes receiver, args, and results - // Run all assignments of temps. - // The temps are introduced to avoid overwriting argument - // slots when arguments themselves require function calls. - s.stmtList(n.Args) - var call *ssa.Value if k == callDeferStack { testLateExpansion = ssa.LateCallExpansionEnabledWithin(s.f) @@ -4891,7 +4857,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val // Then, store all the arguments of the defer call. ft := fn.Type() off := t.FieldOff(12) - args := n.Rargs + args := n.Args // Set receiver (for interface calls). Always a pointer. if rcvr != nil { @@ -4966,7 +4932,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val // Write args. t := n.X.Type() - args := n.Rargs + args := n.Args if n.Op() == ir.OCALLMETH { base.Fatalf("OCALLMETH missed by walkCall") } diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index 82a76dc239e11..bc4ae23759cfc 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -535,15 +535,15 @@ func walkCall1(n *ir.CallExpr, init *ir.Nodes) { if mayCall(arg) { // assignment of arg to Temp tmp := typecheck.Temp(param.Type) - a := convas(ir.NewAssignStmt(base.Pos, tmp, arg), init) + a := convas(typecheck.Stmt(ir.NewAssignStmt(base.Pos, tmp, arg)).(*ir.AssignStmt), init) tempAssigns = append(tempAssigns, a) // replace arg with temp args[i] = tmp } } - n.Args = tempAssigns - n.Rargs = args + init.Append(tempAssigns...) + n.Args = args } // walkDivMod walks an ODIV or OMOD node. From 68a46644752b6bc8de8d2b82b7f2354f3b52b50a Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Thu, 21 Jan 2021 12:08:46 +0700 Subject: [PATCH 417/474] [dev.regabi] cmd/compile: remove tempAssigns in walkCall1 Passes toolstash -cmp. Change-Id: I588c663324443e02b901cda461b999ff192e150c Reviewed-on: https://go-review.googlesource.com/c/go/+/284896 Run-TryBot: Cuong Manh Le Trust: Cuong Manh Le Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/walk/expr.go | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index bc4ae23759cfc..d7a20206c89f0 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -521,10 +521,6 @@ func walkCall1(n *ir.CallExpr, init *ir.Nodes) { n.X = walkExpr(n.X, init) walkExprList(args, init) - // For any argument whose evaluation might require a function call, - // store that argument into a temporary variable, - // to prevent that calls from clobbering arguments already on the stack. - var tempAssigns []ir.Node for i, arg := range args { // Validate argument and parameter types match. param := params.Field(i) @@ -532,17 +528,18 @@ func walkCall1(n *ir.CallExpr, init *ir.Nodes) { base.FatalfAt(n.Pos(), "assigning %L to parameter %v (type %v)", arg, param.Sym, param.Type) } + // For any argument whose evaluation might require a function call, + // store that argument into a temporary variable, + // to prevent that calls from clobbering arguments already on the stack. if mayCall(arg) { // assignment of arg to Temp tmp := typecheck.Temp(param.Type) - a := convas(typecheck.Stmt(ir.NewAssignStmt(base.Pos, tmp, arg)).(*ir.AssignStmt), init) - tempAssigns = append(tempAssigns, a) + init.Append(convas(typecheck.Stmt(ir.NewAssignStmt(base.Pos, tmp, arg)).(*ir.AssignStmt), init)) // replace arg with temp args[i] = tmp } } - init.Append(tempAssigns...) n.Args = args } From 970d8b6cb2ca5302f09a4eb8bfe90c4baea9cf88 Mon Sep 17 00:00:00 2001 From: Baokun Lee Date: Thu, 21 Jan 2021 14:13:36 +0800 Subject: [PATCH 418/474] [dev.regabi] cmd/compile: replace ir.Name map with ir.NameSet in inlining As CL 282212 mentioned, we should clean all map[*ir.Name]bool with ir.NameSet. Passes toolstash -cmp. Updates #43819 Change-Id: I1ce5d2055f88539f807dc021cd8e3941b425bc4e Reviewed-on: https://go-review.googlesource.com/c/go/+/284897 Run-TryBot: Baokun Lee TryBot-Result: Go Bot Trust: Baokun Lee Reviewed-by: Cuong Manh Le Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/inline/inl.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index 46f093b1f8fb3..83f6740a48d1d 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -73,7 +73,7 @@ func InlinePackage() { }) } -// Caninl determines whether fn is inlineable. +// CanInline determines whether fn is inlineable. // If so, CanInline saves fn->nbody in fn->inl and substitutes it with a copy. // fn and ->nbody will already have been typechecked. func CanInline(fn *ir.Func) { @@ -169,7 +169,6 @@ func CanInline(fn *ir.Func) { visitor := hairyVisitor{ budget: inlineMaxBudget, extraCallCost: cc, - usedLocals: make(map[*ir.Name]bool), } if visitor.tooHairy(fn) { reason = visitor.reason @@ -254,7 +253,7 @@ type hairyVisitor struct { budget int32 reason string extraCallCost int32 - usedLocals map[*ir.Name]bool + usedLocals ir.NameSet do func(ir.Node) bool } @@ -410,7 +409,7 @@ func (v *hairyVisitor) doNode(n ir.Node) bool { case ir.ONAME: n := n.(*ir.Name) if n.Class == ir.PAUTO { - v.usedLocals[n] = true + v.usedLocals.Add(n) } case ir.OBLOCK: @@ -1383,7 +1382,7 @@ func pruneUnusedAutos(ll []*ir.Name, vis *hairyVisitor) []*ir.Name { s := make([]*ir.Name, 0, len(ll)) for _, n := range ll { if n.Class == ir.PAUTO { - if _, found := vis.usedLocals[n]; !found { + if !vis.usedLocals.Has(n) { continue } } From 5248f59a224e390cc59c9850f7795479f07757a7 Mon Sep 17 00:00:00 2001 From: Baokun Lee Date: Thu, 21 Jan 2021 15:07:25 +0800 Subject: [PATCH 419/474] [dev.regabi] cmd/compile: replace ir.Name map with ir.NameSet for SSA Same as CL 284897, but for SSA. Passes toolstash -cmp. Updates #43819 Change-Id: I3c500ad635a3192d95d16fdc36f154ba3ea5df69 Reviewed-on: https://go-review.googlesource.com/c/go/+/284898 Run-TryBot: Baokun Lee Reviewed-by: Cuong Manh Le TryBot-Result: Go Bot Trust: Baokun Lee --- src/cmd/compile/internal/ssa/deadstore.go | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go index 530918da4d61d..0cf9931dbcd83 100644 --- a/src/cmd/compile/internal/ssa/deadstore.go +++ b/src/cmd/compile/internal/ssa/deadstore.go @@ -139,7 +139,7 @@ func dse(f *Func) { func elimDeadAutosGeneric(f *Func) { addr := make(map[*Value]*ir.Name) // values that the address of the auto reaches elim := make(map[*Value]*ir.Name) // values that could be eliminated if the auto is - used := make(map[*ir.Name]bool) // used autos that must be kept + var used ir.NameSet // used autos that must be kept // visit the value and report whether any of the maps are updated visit := func(v *Value) (changed bool) { @@ -178,8 +178,8 @@ func elimDeadAutosGeneric(f *Func) { if !ok || n.Class != ir.PAUTO { return } - if !used[n] { - used[n] = true + if !used.Has(n) { + used.Add(n) changed = true } return @@ -212,8 +212,8 @@ func elimDeadAutosGeneric(f *Func) { if v.Type.IsMemory() || v.Type.IsFlags() || v.Op == OpPhi || v.MemoryArg() != nil { for _, a := range args { if n, ok := addr[a]; ok { - if !used[n] { - used[n] = true + if !used.Has(n) { + used.Add(n) changed = true } } @@ -224,7 +224,7 @@ func elimDeadAutosGeneric(f *Func) { // Propagate any auto addresses through v. var node *ir.Name for _, a := range args { - if n, ok := addr[a]; ok && !used[n] { + if n, ok := addr[a]; ok && !used.Has(n) { if node == nil { node = n } else if node != n { @@ -233,7 +233,7 @@ func elimDeadAutosGeneric(f *Func) { // multiple pointers (e.g. NeqPtr, Phi etc.). // This is rare, so just propagate the first // value to keep things simple. - used[n] = true + used.Add(n) changed = true } } @@ -249,7 +249,7 @@ func elimDeadAutosGeneric(f *Func) { } if addr[v] != node { // This doesn't happen in practice, but catch it just in case. - used[node] = true + used.Add(node) changed = true } return @@ -269,8 +269,8 @@ func elimDeadAutosGeneric(f *Func) { } // keep the auto if its address reaches a control value for _, c := range b.ControlValues() { - if n, ok := addr[c]; ok && !used[n] { - used[n] = true + if n, ok := addr[c]; ok && !used.Has(n) { + used.Add(n) changed = true } } @@ -282,7 +282,7 @@ func elimDeadAutosGeneric(f *Func) { // Eliminate stores to unread autos. for v, n := range elim { - if used[n] { + if used.Has(n) { continue } // replace with OpCopy From d7e71c01ad1c8edd568380ce9276c265dfd3635b Mon Sep 17 00:00:00 2001 From: Baokun Lee Date: Thu, 21 Jan 2021 15:24:38 +0800 Subject: [PATCH 420/474] [dev.regabi] cmd/compile: replace ir.Name map with ir.NameSet for dwarf Same as CL 284897, but for dwarf. Passes toolstash -cmp. Fixes #43819 Change-Id: Icbe43aa2e3cb96e6a6c318523c643247da8e4c74 Reviewed-on: https://go-review.googlesource.com/c/go/+/284899 Run-TryBot: Baokun Lee Trust: Baokun Lee TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/dwarfgen/dwarf.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/cmd/compile/internal/dwarfgen/dwarf.go b/src/cmd/compile/internal/dwarfgen/dwarf.go index bf039c8fbbe6f..dd22c033ccd57 100644 --- a/src/cmd/compile/internal/dwarfgen/dwarf.go +++ b/src/cmd/compile/internal/dwarfgen/dwarf.go @@ -136,7 +136,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir // Collect a raw list of DWARF vars. var vars []*dwarf.Var var decls []*ir.Name - var selected map[*ir.Name]bool + var selected ir.NameSet if base.Ctxt.Flag_locationlists && base.Ctxt.Flag_optimize && fn.DebugInfo != nil && complexOK { decls, vars, selected = createComplexVars(fnsym, fn) } else { @@ -161,7 +161,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir // For non-SSA-able arguments, however, the correct information // is known -- they have a single home on the stack. for _, n := range dcl { - if _, found := selected[n]; found { + if selected.Has(n) { continue } c := n.Sym().Name[0] @@ -244,10 +244,10 @@ func preInliningDcls(fnsym *obj.LSym) []*ir.Name { // createSimpleVars creates a DWARF entry for every variable declared in the // function, claiming that they are permanently on the stack. -func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var, map[*ir.Name]bool) { +func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var, ir.NameSet) { var vars []*dwarf.Var var decls []*ir.Name - selected := make(map[*ir.Name]bool) + var selected ir.NameSet for _, n := range apDecls { if ir.IsAutoTmp(n) { continue @@ -255,7 +255,7 @@ func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Name) ([]*ir.Name, []*dwarf decls = append(decls, n) vars = append(vars, createSimpleVar(fnsym, n)) - selected[n] = true + selected.Add(n) } return decls, vars, selected } @@ -312,19 +312,19 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var { // createComplexVars creates recomposed DWARF vars with location lists, // suitable for describing optimized code. -func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Name, []*dwarf.Var, map[*ir.Name]bool) { +func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Name, []*dwarf.Var, ir.NameSet) { debugInfo := fn.DebugInfo.(*ssa.FuncDebug) // Produce a DWARF variable entry for each user variable. var decls []*ir.Name var vars []*dwarf.Var - ssaVars := make(map[*ir.Name]bool) + var ssaVars ir.NameSet for varID, dvar := range debugInfo.Vars { n := dvar - ssaVars[n] = true + ssaVars.Add(n) for _, slot := range debugInfo.VarSlots[varID] { - ssaVars[debugInfo.Slots[slot].N] = true + ssaVars.Add(debugInfo.Slots[slot].N) } if dvar := createComplexVar(fnsym, fn, ssa.VarID(varID)); dvar != nil { From 51e1819a8d2ecb6ed292ca363cbb8edfea4aea65 Mon Sep 17 00:00:00 2001 From: Dan Scales Date: Fri, 22 Jan 2021 14:32:06 -0800 Subject: [PATCH 421/474] [dev.regabi] cmd/compile: scan body of closure in tooHairy to check for disallowed nodes Several of the bugs in #43818 are because we were not scanning the body of an possibly inlined closure in tooHairy(). I think this scanning got lost in the rebase past some of the ir changes. This fixes the issue related to the SELRECV2 and the bug reported from cuonglm. There is at least one other bug related to escape analysis which I'll fix in another change. Change-Id: I8f38cd12a287881155403bbabbc540ed5fc2248e Reviewed-on: https://go-review.googlesource.com/c/go/+/285676 Trust: Dan Scales Run-TryBot: Dan Scales TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/inline/inl.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index 83f6740a48d1d..9f9bb87dd5397 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -354,10 +354,16 @@ func (v *hairyVisitor) doNode(n ir.Node) bool { return true case ir.OCLOSURE: - // TODO(danscales) - fix some bugs when budget is lowered below 30 + // TODO(danscales) - fix some bugs when budget is lowered below 15 // Maybe make budget proportional to number of closure variables, e.g.: //v.budget -= int32(len(n.(*ir.ClosureExpr).Func.ClosureVars) * 3) - v.budget -= 30 + v.budget -= 15 + // Scan body of closure (which DoChildren doesn't automatically + // do) to check for disallowed ops in the body and include the + // body in the budget. + if doList(n.(*ir.ClosureExpr).Func.Body, v.do) { + return true + } case ir.ORANGE, ir.OSELECT, From 48badc5fa863ce5e7e8ac9f268f13955483070e3 Mon Sep 17 00:00:00 2001 From: Dan Scales Date: Fri, 22 Jan 2021 16:07:00 -0800 Subject: [PATCH 422/474] [dev.regabi] cmd/compile: fix escape analysis problem with closures In reflect.methodWrapper, we call escape analysis without including the full batch of dependent functions, including the closure functions. Because of this, we haven't created locations for the params/local variables of a closure when we are processing a function that inlines that closure. (Whereas in the normal compilation of the function, we do call with the full batch.) To deal with this, I am creating locations for the params/local variables of a closure when needed. Without this fix, the new test closure6.go would fail. Updates #43818 Change-Id: I5f91cfb6f35efe2937ef88cbcc468e403e0da9ad Reviewed-on: https://go-review.googlesource.com/c/go/+/285677 Run-TryBot: Dan Scales TryBot-Result: Go Bot Trust: Dan Scales Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/escape/escape.go | 10 ++++++++++ test/closure6.go | 18 ++++++++++++++++++ 2 files changed, 28 insertions(+) create mode 100644 test/closure6.go diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go index 883e68a730c1c..58cad73c76230 100644 --- a/src/cmd/compile/internal/escape/escape.go +++ b/src/cmd/compile/internal/escape/escape.go @@ -781,6 +781,16 @@ func (e *escape) exprSkipInit(k hole, n ir.Node) { } } + for _, n := range fn.Dcl { + // Add locations for local variables of the + // closure, if needed, in case we're not including + // the closure func in the batch for escape + // analysis (happens for escape analysis called + // from reflectdata.methodWrapper) + if n.Op() == ir.ONAME && n.Opt == nil { + e.with(fn).newLoc(n, false) + } + } e.walkFunc(fn) } diff --git a/test/closure6.go b/test/closure6.go new file mode 100644 index 0000000000000..b5592ad3d3e07 --- /dev/null +++ b/test/closure6.go @@ -0,0 +1,18 @@ +// compile + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +type Float64Slice []float64 + +func (a Float64Slice) Search1(x float64) int { + f := func(q int) bool { return a[q] >= x } + i := 0 + if !f(3) { + i = 5 + } + return i +} From d05d6fab32cb3d47f8682d19ca11085430f39164 Mon Sep 17 00:00:00 2001 From: Baokun Lee Date: Sat, 23 Jan 2021 17:05:01 +0800 Subject: [PATCH 423/474] [dev.regabi] cmd/compile: replace ir.Name map with ir.NameSet for SSA 2 Same as CL 284897, the last one. Passes toolstash -cmp. Updates #43819 Change-Id: I0bd8958b3717fb58a5a6576f1819a85f33b76e2d Reviewed-on: https://go-review.googlesource.com/c/go/+/285913 Run-TryBot: Baokun Lee TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky Trust: Baokun Lee --- src/cmd/compile/internal/ssa/deadstore.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go index 0cf9931dbcd83..31d3f62d4e7ce 100644 --- a/src/cmd/compile/internal/ssa/deadstore.go +++ b/src/cmd/compile/internal/ssa/deadstore.go @@ -299,7 +299,7 @@ func elimUnreadAutos(f *Func) { // Loop over all ops that affect autos taking note of which // autos we need and also stores that we might be able to // eliminate. - seen := make(map[*ir.Name]bool) + var seen ir.NameSet var stores []*Value for _, b := range f.Blocks { for _, v := range b.Values { @@ -317,7 +317,7 @@ func elimUnreadAutos(f *Func) { // If we haven't seen the auto yet // then this might be a store we can // eliminate. - if !seen[n] { + if !seen.Has(n) { stores = append(stores, v) } default: @@ -327,7 +327,7 @@ func elimUnreadAutos(f *Func) { // because dead loads haven't been // eliminated yet. if v.Uses > 0 { - seen[n] = true + seen.Add(n) } } } @@ -336,7 +336,7 @@ func elimUnreadAutos(f *Func) { // Eliminate stores to unread autos. for _, store := range stores { n, _ := store.Aux.(*ir.Name) - if seen[n] { + if seen.Has(n) { continue } From 063c72f06d8673f3a2a03fd549c61935ca3e5cc5 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 24 Jan 2021 10:52:00 -0800 Subject: [PATCH 424/474] [dev.regabi] cmd/compile: backport changes from dev.typeparams (9456804) This CL backports a bunch of changes that landed on dev.typeparams, but are not dependent on types2 or generics. By backporting, we reduce the divergence between development branches, hopefully improving test coverage and reducing risk of merge conflicts. Updates #43866. Change-Id: I382510855c9b5fac52b17066e44a00bd07fe86f5 Reviewed-on: https://go-review.googlesource.com/c/go/+/286172 Trust: Matthew Dempsky Trust: Robert Griesemer Run-TryBot: Matthew Dempsky Reviewed-by: Robert Griesemer --- src/cmd/compile/internal/dwarfgen/marker.go | 94 ++++++ src/cmd/compile/internal/noder/import.go | 239 ++++++++-------- src/cmd/compile/internal/noder/noder.go | 268 ++++++------------ src/cmd/compile/internal/noder/posmap.go | 83 ++++++ .../compile/internal/reflectdata/reflect.go | 2 +- src/cmd/compile/internal/typecheck/dcl.go | 5 +- src/cmd/compile/internal/typecheck/func.go | 4 +- src/cmd/compile/internal/typecheck/subr.go | 7 +- .../compile/internal/typecheck/typecheck.go | 8 +- src/cmd/compile/internal/types/pkg.go | 3 +- src/cmd/compile/internal/types/scope.go | 2 +- src/cmd/compile/internal/walk/walk.go | 5 + src/cmd/internal/obj/link.go | 6 +- test/fixedbugs/issue11362.go | 2 +- 14 files changed, 398 insertions(+), 330 deletions(-) create mode 100644 src/cmd/compile/internal/dwarfgen/marker.go create mode 100644 src/cmd/compile/internal/noder/posmap.go diff --git a/src/cmd/compile/internal/dwarfgen/marker.go b/src/cmd/compile/internal/dwarfgen/marker.go new file mode 100644 index 0000000000000..ec6ce45a900bc --- /dev/null +++ b/src/cmd/compile/internal/dwarfgen/marker.go @@ -0,0 +1,94 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dwarfgen + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/internal/src" +) + +// A ScopeMarker tracks scope nesting and boundaries for later use +// during DWARF generation. +type ScopeMarker struct { + parents []ir.ScopeID + marks []ir.Mark +} + +// checkPos validates the given position and returns the current scope. +func (m *ScopeMarker) checkPos(pos src.XPos) ir.ScopeID { + if !pos.IsKnown() { + base.Fatalf("unknown scope position") + } + + if len(m.marks) == 0 { + return 0 + } + + last := &m.marks[len(m.marks)-1] + if xposBefore(pos, last.Pos) { + base.FatalfAt(pos, "non-monotonic scope positions\n\t%v: previous scope position", base.FmtPos(last.Pos)) + } + return last.Scope +} + +// Push records a transition to a new child scope of the current scope. +func (m *ScopeMarker) Push(pos src.XPos) { + current := m.checkPos(pos) + + m.parents = append(m.parents, current) + child := ir.ScopeID(len(m.parents)) + + m.marks = append(m.marks, ir.Mark{Pos: pos, Scope: child}) +} + +// Pop records a transition back to the current scope's parent. +func (m *ScopeMarker) Pop(pos src.XPos) { + current := m.checkPos(pos) + + parent := m.parents[current-1] + + m.marks = append(m.marks, ir.Mark{Pos: pos, Scope: parent}) +} + +// Unpush removes the current scope, which must be empty. +func (m *ScopeMarker) Unpush() { + i := len(m.marks) - 1 + current := m.marks[i].Scope + + if current != ir.ScopeID(len(m.parents)) { + base.FatalfAt(m.marks[i].Pos, "current scope is not empty") + } + + m.parents = m.parents[:current-1] + m.marks = m.marks[:i] +} + +// WriteTo writes the recorded scope marks to the given function, +// and resets the marker for reuse. +func (m *ScopeMarker) WriteTo(fn *ir.Func) { + m.compactMarks() + + fn.Parents = make([]ir.ScopeID, len(m.parents)) + copy(fn.Parents, m.parents) + m.parents = m.parents[:0] + + fn.Marks = make([]ir.Mark, len(m.marks)) + copy(fn.Marks, m.marks) + m.marks = m.marks[:0] +} + +func (m *ScopeMarker) compactMarks() { + n := 0 + for _, next := range m.marks { + if n > 0 && next.Pos == m.marks[n-1].Pos { + m.marks[n-1].Scope = next.Scope + continue + } + m.marks[n] = next + n++ + } + m.marks = m.marks[:n] +} diff --git a/src/cmd/compile/internal/noder/import.go b/src/cmd/compile/internal/noder/import.go index ca041a156c145..747c30e6ff103 100644 --- a/src/cmd/compile/internal/noder/import.go +++ b/src/cmd/compile/internal/noder/import.go @@ -5,18 +5,20 @@ package noder import ( + "errors" "fmt" - "go/constant" "os" - "path" + pathpkg "path" "runtime" "sort" + "strconv" "strings" "unicode" "unicode/utf8" "cmd/compile/internal/base" "cmd/compile/internal/ir" + "cmd/compile/internal/syntax" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/archive" @@ -38,160 +40,157 @@ func islocalname(name string) bool { strings.HasPrefix(name, "../") || name == ".." } -func findpkg(name string) (file string, ok bool) { - if islocalname(name) { +func openPackage(path string) (*os.File, error) { + if islocalname(path) { if base.Flag.NoLocalImports { - return "", false + return nil, errors.New("local imports disallowed") } if base.Flag.Cfg.PackageFile != nil { - file, ok = base.Flag.Cfg.PackageFile[name] - return file, ok + return os.Open(base.Flag.Cfg.PackageFile[path]) } - // try .a before .6. important for building libraries: - // if there is an array.6 in the array.a library, - // want to find all of array.a, not just array.6. - file = fmt.Sprintf("%s.a", name) - if _, err := os.Stat(file); err == nil { - return file, true + // try .a before .o. important for building libraries: + // if there is an array.o in the array.a library, + // want to find all of array.a, not just array.o. + if file, err := os.Open(fmt.Sprintf("%s.a", path)); err == nil { + return file, nil } - file = fmt.Sprintf("%s.o", name) - if _, err := os.Stat(file); err == nil { - return file, true + if file, err := os.Open(fmt.Sprintf("%s.o", path)); err == nil { + return file, nil } - return "", false + return nil, errors.New("file not found") } // local imports should be canonicalized already. // don't want to see "encoding/../encoding/base64" // as different from "encoding/base64". - if q := path.Clean(name); q != name { - base.Errorf("non-canonical import path %q (should be %q)", name, q) - return "", false + if q := pathpkg.Clean(path); q != path { + return nil, fmt.Errorf("non-canonical import path %q (should be %q)", path, q) } if base.Flag.Cfg.PackageFile != nil { - file, ok = base.Flag.Cfg.PackageFile[name] - return file, ok + return os.Open(base.Flag.Cfg.PackageFile[path]) } for _, dir := range base.Flag.Cfg.ImportDirs { - file = fmt.Sprintf("%s/%s.a", dir, name) - if _, err := os.Stat(file); err == nil { - return file, true + if file, err := os.Open(fmt.Sprintf("%s/%s.a", dir, path)); err == nil { + return file, nil } - file = fmt.Sprintf("%s/%s.o", dir, name) - if _, err := os.Stat(file); err == nil { - return file, true + if file, err := os.Open(fmt.Sprintf("%s/%s.o", dir, path)); err == nil { + return file, nil } } if objabi.GOROOT != "" { suffix := "" - suffixsep := "" if base.Flag.InstallSuffix != "" { - suffixsep = "_" - suffix = base.Flag.InstallSuffix + suffix = "_" + base.Flag.InstallSuffix } else if base.Flag.Race { - suffixsep = "_" - suffix = "race" + suffix = "_race" } else if base.Flag.MSan { - suffixsep = "_" - suffix = "msan" + suffix = "_msan" } - file = fmt.Sprintf("%s/pkg/%s_%s%s%s/%s.a", objabi.GOROOT, objabi.GOOS, objabi.GOARCH, suffixsep, suffix, name) - if _, err := os.Stat(file); err == nil { - return file, true + if file, err := os.Open(fmt.Sprintf("%s/pkg/%s_%s%s/%s.a", objabi.GOROOT, objabi.GOOS, objabi.GOARCH, suffix, path)); err == nil { + return file, nil } - file = fmt.Sprintf("%s/pkg/%s_%s%s%s/%s.o", objabi.GOROOT, objabi.GOOS, objabi.GOARCH, suffixsep, suffix, name) - if _, err := os.Stat(file); err == nil { - return file, true + if file, err := os.Open(fmt.Sprintf("%s/pkg/%s_%s%s/%s.o", objabi.GOROOT, objabi.GOOS, objabi.GOARCH, suffix, path)); err == nil { + return file, nil } } - - return "", false + return nil, errors.New("file not found") } // myheight tracks the local package's height based on packages // imported so far. var myheight int -func importfile(f constant.Value) *types.Pkg { - if f.Kind() != constant.String { - base.Errorf("import path must be a string") - return nil - } - - path_ := constant.StringVal(f) - if len(path_) == 0 { - base.Errorf("import path is empty") - return nil - } - - if isbadimport(path_, false) { - return nil - } - +// resolveImportPath resolves an import path as it appears in a Go +// source file to the package's full path. +func resolveImportPath(path string) (string, error) { // The package name main is no longer reserved, // but we reserve the import path "main" to identify // the main package, just as we reserve the import // path "math" to identify the standard math package. - if path_ == "main" { - base.Errorf("cannot import \"main\"") - base.ErrorExit() - } - - if base.Ctxt.Pkgpath != "" && path_ == base.Ctxt.Pkgpath { - base.Errorf("import %q while compiling that package (import cycle)", path_) - base.ErrorExit() + if path == "main" { + return "", errors.New("cannot import \"main\"") } - if mapped, ok := base.Flag.Cfg.ImportMap[path_]; ok { - path_ = mapped + if base.Ctxt.Pkgpath != "" && path == base.Ctxt.Pkgpath { + return "", fmt.Errorf("import %q while compiling that package (import cycle)", path) } - if path_ == "unsafe" { - return ir.Pkgs.Unsafe + if mapped, ok := base.Flag.Cfg.ImportMap[path]; ok { + path = mapped } - if islocalname(path_) { - if path_[0] == '/' { - base.Errorf("import path cannot be absolute path") - return nil + if islocalname(path) { + if path[0] == '/' { + return "", errors.New("import path cannot be absolute path") } - prefix := base.Ctxt.Pathname - if base.Flag.D != "" { - prefix = base.Flag.D + prefix := base.Flag.D + if prefix == "" { + // Questionable, but when -D isn't specified, historically we + // resolve local import paths relative to the directory the + // compiler's current directory, not the respective source + // file's directory. + prefix = base.Ctxt.Pathname } - path_ = path.Join(prefix, path_) + path = pathpkg.Join(prefix, path) - if isbadimport(path_, true) { - return nil + if err := checkImportPath(path, true); err != nil { + return "", err } } - file, found := findpkg(path_) - if !found { - base.Errorf("can't find import: %q", path_) - base.ErrorExit() + return path, nil +} + +// TODO(mdempsky): Return an error instead. +func importfile(decl *syntax.ImportDecl) *types.Pkg { + if decl.Path.Kind != syntax.StringLit { + base.Errorf("import path must be a string") + return nil } - importpkg := types.NewPkg(path_, "") - if importpkg.Imported { - return importpkg + path, err := strconv.Unquote(decl.Path.Value) + if err != nil { + base.Errorf("import path must be a string") + return nil + } + + if err := checkImportPath(path, false); err != nil { + base.Errorf("%s", err.Error()) + return nil } - importpkg.Imported = true + path, err = resolveImportPath(path) + if err != nil { + base.Errorf("%s", err) + return nil + } + + importpkg := types.NewPkg(path, "") + if importpkg.Direct { + return importpkg // already fully loaded + } + importpkg.Direct = true + typecheck.Target.Imports = append(typecheck.Target.Imports, importpkg) + + if path == "unsafe" { + return importpkg // initialized with universe + } - imp, err := bio.Open(file) + f, err := openPackage(path) if err != nil { - base.Errorf("can't open import: %q: %v", path_, err) + base.Errorf("could not import %q: %v", path, err) base.ErrorExit() } + imp := bio.NewReader(f) defer imp.Close() + file := f.Name() // check object header p, err := imp.ReadString('\n') @@ -261,12 +260,12 @@ func importfile(f constant.Value) *types.Pkg { var fingerprint goobj.FingerprintType switch c { case '\n': - base.Errorf("cannot import %s: old export format no longer supported (recompile library)", path_) + base.Errorf("cannot import %s: old export format no longer supported (recompile library)", path) return nil case 'B': if base.Debug.Export != 0 { - fmt.Printf("importing %s (%s)\n", path_, file) + fmt.Printf("importing %s (%s)\n", path, file) } imp.ReadByte() // skip \n after $$B @@ -285,17 +284,17 @@ func importfile(f constant.Value) *types.Pkg { fingerprint = typecheck.ReadImports(importpkg, imp) default: - base.Errorf("no import in %q", path_) + base.Errorf("no import in %q", path) base.ErrorExit() } // assume files move (get installed) so don't record the full path if base.Flag.Cfg.PackageFile != nil { // If using a packageFile map, assume path_ can be recorded directly. - base.Ctxt.AddImport(path_, fingerprint) + base.Ctxt.AddImport(path, fingerprint) } else { // For file "/Users/foo/go/pkg/darwin_amd64/math.a" record "math.a". - base.Ctxt.AddImport(file[len(file)-len(path_)-len(".a"):], fingerprint) + base.Ctxt.AddImport(file[len(file)-len(path)-len(".a"):], fingerprint) } if importpkg.Height >= myheight { @@ -315,47 +314,37 @@ var reservedimports = []string{ "type", } -func isbadimport(path string, allowSpace bool) bool { +func checkImportPath(path string, allowSpace bool) error { + if path == "" { + return errors.New("import path is empty") + } + if strings.Contains(path, "\x00") { - base.Errorf("import path contains NUL") - return true + return errors.New("import path contains NUL") } for _, ri := range reservedimports { if path == ri { - base.Errorf("import path %q is reserved and cannot be used", path) - return true + return fmt.Errorf("import path %q is reserved and cannot be used", path) } } for _, r := range path { - if r == utf8.RuneError { - base.Errorf("import path contains invalid UTF-8 sequence: %q", path) - return true - } - - if r < 0x20 || r == 0x7f { - base.Errorf("import path contains control character: %q", path) - return true - } - - if r == '\\' { - base.Errorf("import path contains backslash; use slash: %q", path) - return true - } - - if !allowSpace && unicode.IsSpace(r) { - base.Errorf("import path contains space character: %q", path) - return true - } - - if strings.ContainsRune("!\"#$%&'()*,:;<=>?[]^`{|}", r) { - base.Errorf("import path contains invalid character '%c': %q", r, path) - return true + switch { + case r == utf8.RuneError: + return fmt.Errorf("import path contains invalid UTF-8 sequence: %q", path) + case r < 0x20 || r == 0x7f: + return fmt.Errorf("import path contains control character: %q", path) + case r == '\\': + return fmt.Errorf("import path contains backslash; use slash: %q", path) + case !allowSpace && unicode.IsSpace(r): + return fmt.Errorf("import path contains space character: %q", path) + case strings.ContainsRune("!\"#$%&'()*,:;<=>?[]^`{|}", r): + return fmt.Errorf("import path contains invalid character '%c': %q", r, path) } } - return false + return nil } func pkgnotused(lineno src.XPos, path string, name string) { diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go index 5bb01895cc9ea..6aab18549aefc 100644 --- a/src/cmd/compile/internal/noder/noder.go +++ b/src/cmd/compile/internal/noder/noder.go @@ -17,6 +17,7 @@ import ( "unicode/utf8" "cmd/compile/internal/base" + "cmd/compile/internal/dwarfgen" "cmd/compile/internal/ir" "cmd/compile/internal/syntax" "cmd/compile/internal/typecheck" @@ -27,40 +28,26 @@ import ( func LoadPackage(filenames []string) { base.Timer.Start("fe", "parse") - lines := ParseFiles(filenames) - base.Timer.Stop() - base.Timer.AddEvent(int64(lines), "lines") - // Typecheck. - Package() + mode := syntax.CheckBranches - // With all user code typechecked, it's now safe to verify unused dot imports. - CheckDotImports() - base.ExitIfErrors() -} - -// ParseFiles concurrently parses files into *syntax.File structures. -// Each declaration in every *syntax.File is converted to a syntax tree -// and its root represented by *Node is appended to Target.Decls. -// Returns the total count of parsed lines. -func ParseFiles(filenames []string) uint { - noders := make([]*noder, 0, len(filenames)) // Limit the number of simultaneously open files. sem := make(chan struct{}, runtime.GOMAXPROCS(0)+10) - for _, filename := range filenames { - p := &noder{ - basemap: make(map[*syntax.PosBase]*src.PosBase), + noders := make([]*noder, len(filenames)) + for i, filename := range filenames { + p := noder{ err: make(chan syntax.Error), trackScopes: base.Flag.Dwarf, } - noders = append(noders, p) + noders[i] = &p - go func(filename string) { + filename := filename + go func() { sem <- struct{}{} defer func() { <-sem }() defer close(p.err) - base := syntax.NewFileBase(filename) + fbase := syntax.NewFileBase(filename) f, err := os.Open(filename) if err != nil { @@ -69,8 +56,8 @@ func ParseFiles(filenames []string) uint { } defer f.Close() - p.file, _ = syntax.Parse(base, f, p.error, p.pragma, syntax.CheckBranches) // errors are tracked via p.error - }(filename) + p.file, _ = syntax.Parse(fbase, f, p.error, p.pragma, mode) // errors are tracked via p.error + }() } var lines uint @@ -78,30 +65,27 @@ func ParseFiles(filenames []string) uint { for e := range p.err { p.errorAt(e.Pos, "%s", e.Msg) } + lines += p.file.Lines + } + base.Timer.AddEvent(int64(lines), "lines") + for _, p := range noders { p.node() - lines += p.file.Lines p.file = nil // release memory + } - if base.SyntaxErrors() != 0 { - base.ErrorExit() - } - // Always run CheckDclstack here, even when debug_dclstack is not set, as a sanity measure. - types.CheckDclstack() + if base.SyntaxErrors() != 0 { + base.ErrorExit() } + types.CheckDclstack() for _, p := range noders { p.processPragmas() } + // Typecheck. types.LocalPkg.Height = myheight - - return lines -} - -func Package() { typecheck.DeclareUniverse() - typecheck.TypecheckAllowed = true // Process top-level declarations in phases. @@ -166,44 +150,10 @@ func Package() { } // Phase 5: With all user code type-checked, it's now safe to verify map keys. + // With all user code typechecked, it's now safe to verify unused dot imports. typecheck.CheckMapKeys() - -} - -// makeSrcPosBase translates from a *syntax.PosBase to a *src.PosBase. -func (p *noder) makeSrcPosBase(b0 *syntax.PosBase) *src.PosBase { - // fast path: most likely PosBase hasn't changed - if p.basecache.last == b0 { - return p.basecache.base - } - - b1, ok := p.basemap[b0] - if !ok { - fn := b0.Filename() - if b0.IsFileBase() { - b1 = src.NewFileBase(fn, absFilename(fn)) - } else { - // line directive base - p0 := b0.Pos() - p0b := p0.Base() - if p0b == b0 { - panic("infinite recursion in makeSrcPosBase") - } - p1 := src.MakePos(p.makeSrcPosBase(p0b), p0.Line(), p0.Col()) - b1 = src.NewLinePragmaBase(p1, fn, fileh(fn), b0.Line(), b0.Col()) - } - p.basemap[b0] = b1 - } - - // update cache - p.basecache.last = b0 - p.basecache.base = b1 - - return b1 -} - -func (p *noder) makeXPos(pos syntax.Pos) (_ src.XPos) { - return base.Ctxt.PosTable.XPos(src.MakePos(p.makeSrcPosBase(pos.Base()), pos.Line(), pos.Col())) + CheckDotImports() + base.ExitIfErrors() } func (p *noder) errorAt(pos syntax.Pos, format string, args ...interface{}) { @@ -221,31 +171,33 @@ func absFilename(name string) string { // noder transforms package syntax's AST into a Node tree. type noder struct { - basemap map[*syntax.PosBase]*src.PosBase - basecache struct { - last *syntax.PosBase - base *src.PosBase - } + posMap file *syntax.File linknames []linkname pragcgobuf [][]string err chan syntax.Error - scope ir.ScopeID importedUnsafe bool importedEmbed bool + trackScopes bool - // scopeVars is a stack tracking the number of variables declared in the - // current function at the moment each open scope was opened. - trackScopes bool - scopeVars []int + funcState *funcState +} + +// funcState tracks all per-function state to make handling nested +// functions easier. +type funcState struct { + // scopeVars is a stack tracking the number of variables declared in + // the current function at the moment each open scope was opened. + scopeVars []int + marker dwarfgen.ScopeMarker lastCloseScopePos syntax.Pos } func (p *noder) funcBody(fn *ir.Func, block *syntax.BlockStmt) { - oldScope := p.scope - p.scope = 0 + outerFuncState := p.funcState + p.funcState = new(funcState) typecheck.StartFuncBody(fn) if block != nil { @@ -260,62 +212,34 @@ func (p *noder) funcBody(fn *ir.Func, block *syntax.BlockStmt) { } typecheck.FinishFuncBody() - p.scope = oldScope + p.funcState.marker.WriteTo(fn) + p.funcState = outerFuncState } func (p *noder) openScope(pos syntax.Pos) { + fs := p.funcState types.Markdcl() if p.trackScopes { - ir.CurFunc.Parents = append(ir.CurFunc.Parents, p.scope) - p.scopeVars = append(p.scopeVars, len(ir.CurFunc.Dcl)) - p.scope = ir.ScopeID(len(ir.CurFunc.Parents)) - - p.markScope(pos) + fs.scopeVars = append(fs.scopeVars, len(ir.CurFunc.Dcl)) + fs.marker.Push(p.makeXPos(pos)) } } func (p *noder) closeScope(pos syntax.Pos) { - p.lastCloseScopePos = pos + fs := p.funcState + fs.lastCloseScopePos = pos types.Popdcl() if p.trackScopes { - scopeVars := p.scopeVars[len(p.scopeVars)-1] - p.scopeVars = p.scopeVars[:len(p.scopeVars)-1] + scopeVars := fs.scopeVars[len(fs.scopeVars)-1] + fs.scopeVars = fs.scopeVars[:len(fs.scopeVars)-1] if scopeVars == len(ir.CurFunc.Dcl) { // no variables were declared in this scope, so we can retract it. - - if int(p.scope) != len(ir.CurFunc.Parents) { - base.Fatalf("scope tracking inconsistency, no variables declared but scopes were not retracted") - } - - p.scope = ir.CurFunc.Parents[p.scope-1] - ir.CurFunc.Parents = ir.CurFunc.Parents[:len(ir.CurFunc.Parents)-1] - - nmarks := len(ir.CurFunc.Marks) - ir.CurFunc.Marks[nmarks-1].Scope = p.scope - prevScope := ir.ScopeID(0) - if nmarks >= 2 { - prevScope = ir.CurFunc.Marks[nmarks-2].Scope - } - if ir.CurFunc.Marks[nmarks-1].Scope == prevScope { - ir.CurFunc.Marks = ir.CurFunc.Marks[:nmarks-1] - } - return + fs.marker.Unpush() + } else { + fs.marker.Pop(p.makeXPos(pos)) } - - p.scope = ir.CurFunc.Parents[p.scope-1] - - p.markScope(pos) - } -} - -func (p *noder) markScope(pos syntax.Pos) { - xpos := p.makeXPos(pos) - if i := len(ir.CurFunc.Marks); i > 0 && ir.CurFunc.Marks[i-1].Pos == xpos { - ir.CurFunc.Marks[i-1].Scope = p.scope - } else { - ir.CurFunc.Marks = append(ir.CurFunc.Marks, ir.Mark{Pos: xpos, Scope: p.scope}) } } @@ -324,7 +248,7 @@ func (p *noder) markScope(pos syntax.Pos) { // "if" statements, as their implicit blocks always end at the same // position as an explicit block. func (p *noder) closeAnotherScope() { - p.closeScope(p.lastCloseScopePos) + p.closeScope(p.funcState.lastCloseScopePos) } // linkname records a //go:linkname directive. @@ -335,7 +259,6 @@ type linkname struct { } func (p *noder) node() { - types.Block = 1 p.importedUnsafe = false p.importedEmbed = false @@ -404,7 +327,7 @@ func (p *noder) decls(decls []syntax.Decl) (l []ir.Node) { } func (p *noder) importDecl(imp *syntax.ImportDecl) { - if imp.Path.Bad { + if imp.Path == nil || imp.Path.Bad { return // avoid follow-on errors if there was a syntax error } @@ -412,7 +335,7 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) { p.checkUnused(pragma) } - ipkg := importfile(p.basicLit(imp.Path)) + ipkg := importfile(imp) if ipkg == nil { if base.Errors() == 0 { base.Fatalf("phase error in import") @@ -427,11 +350,6 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) { p.importedEmbed = true } - if !ipkg.Direct { - typecheck.Target.Imports = append(typecheck.Target.Imports, ipkg) - } - ipkg.Direct = true - var my *types.Sym if imp.LocalPkgName != nil { my = p.name(imp.LocalPkgName) @@ -465,20 +383,7 @@ func (p *noder) varDecl(decl *syntax.VarDecl) []ir.Node { exprs := p.exprList(decl.Values) if pragma, ok := decl.Pragma.(*pragmas); ok { - if len(pragma.Embeds) > 0 { - if !p.importedEmbed { - // This check can't be done when building the list pragma.Embeds - // because that list is created before the noder starts walking over the file, - // so at that point it hasn't seen the imports. - // We're left to check now, just before applying the //go:embed lines. - for _, e := range pragma.Embeds { - p.errorAt(e.Pos, "//go:embed only allowed in Go files that import \"embed\"") - } - } else { - varEmbed(p, names, typ, exprs, pragma.Embeds) - } - pragma.Embeds = nil - } + varEmbed(p.makeXPos, names[0], decl, pragma, p.importedEmbed) p.checkUnused(pragma) } @@ -1126,9 +1031,16 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node { case *syntax.DeclStmt: return ir.NewBlockStmt(src.NoXPos, p.decls(stmt.DeclList)) case *syntax.AssignStmt: + if stmt.Rhs == syntax.ImplicitOne { + one := constant.MakeInt64(1) + pos := p.pos(stmt) + n := ir.NewAssignOpStmt(pos, p.binOp(stmt.Op), p.expr(stmt.Lhs), ir.NewBasicLit(pos, one)) + n.IncDec = true + return n + } + if stmt.Op != 0 && stmt.Op != syntax.Def { n := ir.NewAssignOpStmt(p.pos(stmt), p.binOp(stmt.Op), p.expr(stmt.Lhs), p.expr(stmt.Rhs)) - n.IncDec = stmt.Rhs == syntax.ImplicitOne return n } @@ -1588,15 +1500,6 @@ func (p *noder) wrapname(n syntax.Node, x ir.Node) ir.Node { return x } -func (p *noder) pos(n syntax.Node) src.XPos { - // TODO(gri): orig.Pos() should always be known - fix package syntax - xpos := base.Pos - if pos := n.Pos(); pos.IsKnown() { - xpos = p.makeXPos(pos) - } - return xpos -} - func (p *noder) setlineno(n syntax.Node) { if n != nil { base.Pos = p.pos(n) @@ -1923,48 +1826,41 @@ func oldname(s *types.Sym) ir.Node { return n } -func varEmbed(p *noder, names []*ir.Name, typ ir.Ntype, exprs []ir.Node, embeds []pragmaEmbed) { - haveEmbed := false - for _, decl := range p.file.DeclList { - imp, ok := decl.(*syntax.ImportDecl) - if !ok { - // imports always come first - break - } - path, _ := strconv.Unquote(imp.Path.Value) - if path == "embed" { - haveEmbed = true - break - } +func varEmbed(makeXPos func(syntax.Pos) src.XPos, name *ir.Name, decl *syntax.VarDecl, pragma *pragmas, haveEmbed bool) { + if pragma.Embeds == nil { + return } - pos := embeds[0].Pos + pragmaEmbeds := pragma.Embeds + pragma.Embeds = nil + pos := makeXPos(pragmaEmbeds[0].Pos) + if !haveEmbed { - p.errorAt(pos, "invalid go:embed: missing import \"embed\"") + base.ErrorfAt(pos, "go:embed only allowed in Go files that import \"embed\"") return } - if len(names) > 1 { - p.errorAt(pos, "go:embed cannot apply to multiple vars") + if len(decl.NameList) > 1 { + base.ErrorfAt(pos, "go:embed cannot apply to multiple vars") return } - if len(exprs) > 0 { - p.errorAt(pos, "go:embed cannot apply to var with initializer") + if decl.Values != nil { + base.ErrorfAt(pos, "go:embed cannot apply to var with initializer") return } - if typ == nil { - // Should not happen, since len(exprs) == 0 now. - p.errorAt(pos, "go:embed cannot apply to var without type") + if decl.Type == nil { + // Should not happen, since Values == nil now. + base.ErrorfAt(pos, "go:embed cannot apply to var without type") return } if typecheck.DeclContext != ir.PEXTERN { - p.errorAt(pos, "go:embed cannot apply to var inside func") + base.ErrorfAt(pos, "go:embed cannot apply to var inside func") return } - v := names[0] - typecheck.Target.Embeds = append(typecheck.Target.Embeds, v) - v.Embed = new([]ir.Embed) - for _, e := range embeds { - *v.Embed = append(*v.Embed, ir.Embed{Pos: p.makeXPos(e.Pos), Patterns: e.Patterns}) + var embeds []ir.Embed + for _, e := range pragmaEmbeds { + embeds = append(embeds, ir.Embed{Pos: makeXPos(e.Pos), Patterns: e.Patterns}) } + typecheck.Target.Embeds = append(typecheck.Target.Embeds, name) + name.Embed = &embeds } diff --git a/src/cmd/compile/internal/noder/posmap.go b/src/cmd/compile/internal/noder/posmap.go new file mode 100644 index 0000000000000..a6d3e2d7ef4d9 --- /dev/null +++ b/src/cmd/compile/internal/noder/posmap.go @@ -0,0 +1,83 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package noder + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/syntax" + "cmd/internal/src" +) + +// A posMap handles mapping from syntax.Pos to src.XPos. +type posMap struct { + bases map[*syntax.PosBase]*src.PosBase + cache struct { + last *syntax.PosBase + base *src.PosBase + } +} + +type poser interface{ Pos() syntax.Pos } +type ender interface{ End() syntax.Pos } + +func (m *posMap) pos(p poser) src.XPos { return m.makeXPos(p.Pos()) } +func (m *posMap) end(p ender) src.XPos { return m.makeXPos(p.End()) } + +func (m *posMap) makeXPos(pos syntax.Pos) src.XPos { + if !pos.IsKnown() { + // TODO(mdempsky): Investigate restoring base.Fatalf. + return src.NoXPos + } + + posBase := m.makeSrcPosBase(pos.Base()) + return base.Ctxt.PosTable.XPos(src.MakePos(posBase, pos.Line(), pos.Col())) +} + +// makeSrcPosBase translates from a *syntax.PosBase to a *src.PosBase. +func (m *posMap) makeSrcPosBase(b0 *syntax.PosBase) *src.PosBase { + // fast path: most likely PosBase hasn't changed + if m.cache.last == b0 { + return m.cache.base + } + + b1, ok := m.bases[b0] + if !ok { + fn := b0.Filename() + if b0.IsFileBase() { + b1 = src.NewFileBase(fn, absFilename(fn)) + } else { + // line directive base + p0 := b0.Pos() + p0b := p0.Base() + if p0b == b0 { + panic("infinite recursion in makeSrcPosBase") + } + p1 := src.MakePos(m.makeSrcPosBase(p0b), p0.Line(), p0.Col()) + b1 = src.NewLinePragmaBase(p1, fn, fileh(fn), b0.Line(), b0.Col()) + } + if m.bases == nil { + m.bases = make(map[*syntax.PosBase]*src.PosBase) + } + m.bases[b0] = b1 + } + + // update cache + m.cache.last = b0 + m.cache.base = b1 + + return b1 +} + +func (m *posMap) join(other *posMap) { + if m.bases == nil { + m.bases = make(map[*syntax.PosBase]*src.PosBase) + } + for k, v := range other.bases { + if m.bases[k] != nil { + base.Fatalf("duplicate posmap bases") + } + m.bases[k] = v + } +} diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go index 1ec92e3dd073e..3ff14c87f4fc7 100644 --- a/src/cmd/compile/internal/reflectdata/reflect.go +++ b/src/cmd/compile/internal/reflectdata/reflect.go @@ -791,7 +791,7 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int { // TrackSym returns the symbol for tracking use of field/method f, assumed // to be a member of struct/interface type t. func TrackSym(t *types.Type, f *types.Field) *obj.LSym { - return base.PkgLinksym("go.track", t.ShortString() + "." + f.Sym.Name, obj.ABI0) + return base.PkgLinksym("go.track", t.ShortString()+"."+f.Sym.Name, obj.ABI0) } func TypeSymPrefix(prefix string, t *types.Type) *types.Sym { diff --git a/src/cmd/compile/internal/typecheck/dcl.go b/src/cmd/compile/internal/typecheck/dcl.go index c324238bf1ef0..eab0bb09b2950 100644 --- a/src/cmd/compile/internal/typecheck/dcl.go +++ b/src/cmd/compile/internal/typecheck/dcl.go @@ -304,10 +304,13 @@ func checkembeddedtype(t *types.Type) { } } -func fakeRecvField() *types.Field { +// TODO(mdempsky): Move to package types. +func FakeRecv() *types.Field { return types.NewField(src.NoXPos, nil, types.FakeRecvType()) } +var fakeRecvField = FakeRecv + var funcStack []funcStackEnt // stack of previous values of ir.CurFunc/DeclContext type funcStackEnt struct { diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go index f624773c8f0fe..7ab5f68ce301b 100644 --- a/src/cmd/compile/internal/typecheck/func.go +++ b/src/cmd/compile/internal/typecheck/func.go @@ -174,7 +174,7 @@ func fnpkg(fn *ir.Name) *types.Pkg { // closurename generates a new unique name for a closure within // outerfunc. -func closurename(outerfunc *ir.Func) *types.Sym { +func ClosureName(outerfunc *ir.Func) *types.Sym { outer := "glob." prefix := "func" gen := &globClosgen @@ -309,7 +309,7 @@ func tcClosure(clo *ir.ClosureExpr, top int) { // explicitly in (*inlsubst).node()). inTypeCheckInl := ir.CurFunc != nil && ir.CurFunc.Body == nil if !inTypeCheckInl { - fn.Nname.SetSym(closurename(ir.CurFunc)) + fn.Nname.SetSym(ClosureName(ir.CurFunc)) ir.MarkFunc(fn.Nname) } Func(fn) diff --git a/src/cmd/compile/internal/typecheck/subr.go b/src/cmd/compile/internal/typecheck/subr.go index b6a0870672f40..b88a9f22839e7 100644 --- a/src/cmd/compile/internal/typecheck/subr.go +++ b/src/cmd/compile/internal/typecheck/subr.go @@ -127,10 +127,9 @@ func NodNil() ir.Node { return n } -// in T.field -// find missing fields that -// will give shortest unique addressing. -// modify the tree with missing type names. +// AddImplicitDots finds missing fields in obj.field that +// will give the shortest unique addressing and +// modifies the tree with missing field names. func AddImplicitDots(n *ir.SelectorExpr) *ir.SelectorExpr { n.X = typecheck(n.X, ctxType|ctxExpr) if n.X.Diag() { diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index 7881ea308dacb..cb434578dd37e 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -1674,10 +1674,10 @@ func CheckMapKeys() { mapqueue = nil } -// typegen tracks the number of function-scoped defined types that +// TypeGen tracks the number of function-scoped defined types that // have been declared. It's used to generate unique linker symbols for // their runtime type descriptors. -var typegen int32 +var TypeGen int32 func typecheckdeftype(n *ir.Name) { if base.EnableTrace && base.Flag.LowerT { @@ -1686,8 +1686,8 @@ func typecheckdeftype(n *ir.Name) { t := types.NewNamed(n) if n.Curfn != nil { - typegen++ - t.Vargen = typegen + TypeGen++ + t.Vargen = TypeGen } if n.Pragma()&ir.NotInHeap != 0 { diff --git a/src/cmd/compile/internal/types/pkg.go b/src/cmd/compile/internal/types/pkg.go index de45d32bfa30e..a6d2e2007b042 100644 --- a/src/cmd/compile/internal/types/pkg.go +++ b/src/cmd/compile/internal/types/pkg.go @@ -31,8 +31,7 @@ type Pkg struct { // height of their imported packages. Height int - Imported bool // export data of this package was parsed - Direct bool // imported directly + Direct bool // imported directly } // NewPkg returns a new Pkg for the given package path and name. diff --git a/src/cmd/compile/internal/types/scope.go b/src/cmd/compile/internal/types/scope.go index a9669ffafcbc6..d7c454f3795e4 100644 --- a/src/cmd/compile/internal/types/scope.go +++ b/src/cmd/compile/internal/types/scope.go @@ -12,7 +12,7 @@ import ( // Declaration stack & operations var blockgen int32 = 1 // max block number -var Block int32 // current block number +var Block int32 = 1 // current block number // A dsym stores a symbol's shadowed declaration so that it can be // restored once the block scope ends. diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go index 4273a62fe564e..b47d96dc4c933 100644 --- a/src/cmd/compile/internal/walk/walk.go +++ b/src/cmd/compile/internal/walk/walk.go @@ -49,6 +49,11 @@ func Walk(fn *ir.Func) { if base.Flag.Cfg.Instrumenting { instrument(fn) } + + // Eagerly compute sizes of all variables for SSA. + for _, n := range fn.Dcl { + types.CalcSize(n.Type()) + } } // walkRecv walks an ORECV node. diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go index 7ba8c6d317d1d..35cb53cbf6cc8 100644 --- a/src/cmd/internal/obj/link.go +++ b/src/cmd/internal/obj/link.go @@ -809,9 +809,9 @@ type Link struct { Errors int RegArgs []RegArg - InParallel bool // parallel backend phase in effect - UseBASEntries bool // use Base Address Selection Entries in location lists and PC ranges - IsAsm bool // is the source assembly language, which may contain surprising idioms (e.g., call tables) + InParallel bool // parallel backend phase in effect + UseBASEntries bool // use Base Address Selection Entries in location lists and PC ranges + IsAsm bool // is the source assembly language, which may contain surprising idioms (e.g., call tables) // state for writing objects Text []*LSym diff --git a/test/fixedbugs/issue11362.go b/test/fixedbugs/issue11362.go index 964e5fdf6b7ce..9492ec12739af 100644 --- a/test/fixedbugs/issue11362.go +++ b/test/fixedbugs/issue11362.go @@ -8,7 +8,7 @@ package main -import _ "unicode//utf8" // GC_ERROR "non-canonical import path .unicode//utf8. \(should be .unicode/utf8.\)" "can't find import: .unicode//utf8." +import _ "unicode//utf8" // GC_ERROR "non-canonical import path .unicode//utf8. \(should be .unicode/utf8.\)" func main() { } From 5a76c3d5485e5c5714a147e10a6bc55738ab0b90 Mon Sep 17 00:00:00 2001 From: David Chase Date: Thu, 21 Jan 2021 12:02:39 -0500 Subject: [PATCH 425/474] [dev.regabi] cmd/compile: modify abiutils for recently updated ABI Discovered difficluties posed by earlier design, these modifications should work better. Updated tests, also added some helper functions for use in call lowering. Change-Id: I459f0f71ad8a6730c571244925c3f395e1df28de Reviewed-on: https://go-review.googlesource.com/c/go/+/285392 Trust: David Chase Run-TryBot: David Chase TryBot-Result: Go Bot Reviewed-by: Than McIntosh --- src/cmd/compile/internal/abi/abiutils.go | 146 +++++++++--- .../compile/internal/test/abiutils_test.go | 214 ++++++++++-------- .../compile/internal/test/abiutilsaux_test.go | 18 +- 3 files changed, 244 insertions(+), 134 deletions(-) diff --git a/src/cmd/compile/internal/abi/abiutils.go b/src/cmd/compile/internal/abi/abiutils.go index 3ac59e6f756b0..e935821802dd2 100644 --- a/src/cmd/compile/internal/abi/abiutils.go +++ b/src/cmd/compile/internal/abi/abiutils.go @@ -25,9 +25,8 @@ import ( type ABIParamResultInfo struct { inparams []ABIParamAssignment // Includes receiver for method calls. Does NOT include hidden closure pointer. outparams []ABIParamAssignment - intSpillSlots int - floatSpillSlots int offsetToSpillArea int64 + spillAreaSize int64 config *ABIConfig // to enable String() method } @@ -47,18 +46,14 @@ func (a *ABIParamResultInfo) OutParam(i int) ABIParamAssignment { return a.outparams[i] } -func (a *ABIParamResultInfo) IntSpillCount() int { - return a.intSpillSlots -} - -func (a *ABIParamResultInfo) FloatSpillCount() int { - return a.floatSpillSlots -} - func (a *ABIParamResultInfo) SpillAreaOffset() int64 { return a.offsetToSpillArea } +func (a *ABIParamResultInfo) SpillAreaSize() int64 { + return a.spillAreaSize +} + // RegIndex stores the index into the set of machine registers used by // the ABI on a specific architecture for parameter passing. RegIndex // values 0 through N-1 (where N is the number of integer registers @@ -78,7 +73,27 @@ type RegIndex uint8 type ABIParamAssignment struct { Type *types.Type Registers []RegIndex - Offset int32 + offset int32 +} + +// Offset returns the stack offset for addressing the parameter that "a" describes. +// This will panic if "a" describes a register-allocated parameter. +func (a *ABIParamAssignment) Offset() int32 { + if len(a.Registers) > 0 { + panic("Register allocated parameters have no offset") + } + return a.offset +} + +// SpillOffset returns the offset *within the spill area* for the parameter that "a" describes. +// Registers will be spilled here; if a memory home is needed (for a pointer method e.g.) +// then that will be the address. +// This will panic if "a" describes a stack-allocated parameter. +func (a *ABIParamAssignment) SpillOffset() int32 { + if len(a.Registers) == 0 { + panic("Stack-allocated parameters have no spill offset") + } + return a.offset } // RegAmounts holds a specified number of integer/float registers. @@ -91,20 +106,58 @@ type RegAmounts struct { // by the ABI rules for parameter passing and result returning. type ABIConfig struct { // Do we need anything more than this? - regAmounts RegAmounts + regAmounts RegAmounts + regsForTypeCache map[*types.Type]int } // NewABIConfig returns a new ABI configuration for an architecture with // iRegsCount integer/pointer registers and fRegsCount floating point registers. func NewABIConfig(iRegsCount, fRegsCount int) *ABIConfig { - return &ABIConfig{RegAmounts{iRegsCount, fRegsCount}} + return &ABIConfig{regAmounts: RegAmounts{iRegsCount, fRegsCount}, regsForTypeCache: make(map[*types.Type]int)} +} + +// NumParamRegs returns the number of parameter registers used for a given type, +// without regard for the number available. +func (a *ABIConfig) NumParamRegs(t *types.Type) int { + if n, ok := a.regsForTypeCache[t]; ok { + return n + } + + if t.IsScalar() || t.IsPtrShaped() { + var n int + if t.IsComplex() { + n = 2 + } else { + n = (int(t.Size()) + types.RegSize - 1) / types.RegSize + } + a.regsForTypeCache[t] = n + return n + } + typ := t.Kind() + n := 0 + switch typ { + case types.TARRAY: + n = a.NumParamRegs(t.Elem()) * int(t.NumElem()) + case types.TSTRUCT: + for _, f := range t.FieldSlice() { + n += a.NumParamRegs(f.Type) + } + case types.TSLICE: + n = a.NumParamRegs(synthSlice) + case types.TSTRING: + n = a.NumParamRegs(synthString) + case types.TINTER: + n = a.NumParamRegs(synthIface) + } + a.regsForTypeCache[t] = n + return n } // ABIAnalyze takes a function type 't' and an ABI rules description // 'config' and analyzes the function to determine how its parameters // and results will be passed (in registers or on the stack), returning // an ABIParamResultInfo object that holds the results of the analysis. -func ABIAnalyze(t *types.Type, config *ABIConfig) ABIParamResultInfo { +func (config *ABIConfig) ABIAnalyze(t *types.Type) ABIParamResultInfo { setup() s := assignState{ rTotal: config.regAmounts, @@ -116,28 +169,27 @@ func ABIAnalyze(t *types.Type, config *ABIConfig) ABIParamResultInfo { if t.NumRecvs() != 0 { rfsl := ft.Receiver.FieldSlice() result.inparams = append(result.inparams, - s.assignParamOrReturn(rfsl[0].Type)) + s.assignParamOrReturn(rfsl[0].Type, false)) } // Inputs ifsl := ft.Params.FieldSlice() for _, f := range ifsl { result.inparams = append(result.inparams, - s.assignParamOrReturn(f.Type)) + s.assignParamOrReturn(f.Type, false)) } s.stackOffset = types.Rnd(s.stackOffset, int64(types.RegSize)) - // Record number of spill slots needed. - result.intSpillSlots = s.rUsed.intRegs - result.floatSpillSlots = s.rUsed.floatRegs - // Outputs s.rUsed = RegAmounts{} ofsl := ft.Results.FieldSlice() for _, f := range ofsl { - result.outparams = append(result.outparams, s.assignParamOrReturn(f.Type)) + result.outparams = append(result.outparams, s.assignParamOrReturn(f.Type, true)) } - result.offsetToSpillArea = s.stackOffset + // The spill area is at a register-aligned offset and its size is rounded up to a register alignment. + // TODO in theory could align offset only to minimum required by spilled data types. + result.offsetToSpillArea = alignTo(s.stackOffset, types.RegSize) + result.spillAreaSize = alignTo(s.spillOffset, types.RegSize) return result } @@ -160,10 +212,14 @@ func (c *RegAmounts) regString(r RegIndex) string { // form, suitable for debugging or unit testing. func (ri *ABIParamAssignment) toString(config *ABIConfig) string { regs := "R{" + offname := "spilloffset" // offset is for spill for register(s) + if len(ri.Registers) == 0 { + offname = "offset" // offset is for memory arg + } for _, r := range ri.Registers { regs += " " + config.regAmounts.regString(r) } - return fmt.Sprintf("%s } offset: %d typ: %v", regs, ri.Offset, ri.Type) + return fmt.Sprintf("%s } %s: %d typ: %v", regs, offname, ri.offset, ri.Type) } // toString method renders an ABIParamResultInfo in human-readable @@ -176,8 +232,8 @@ func (ri *ABIParamResultInfo) String() string { for k, r := range ri.outparams { res += fmt.Sprintf("OUT %d: %s\n", k, r.toString(ri.config)) } - res += fmt.Sprintf("intspill: %d floatspill: %d offsetToSpillArea: %d", - ri.intSpillSlots, ri.floatSpillSlots, ri.offsetToSpillArea) + res += fmt.Sprintf("offsetToSpillArea: %d spillAreaSize: %d", + ri.offsetToSpillArea, ri.spillAreaSize) return res } @@ -188,16 +244,27 @@ type assignState struct { rUsed RegAmounts // regs used by params completely assigned so far pUsed RegAmounts // regs used by the current param (or pieces therein) stackOffset int64 // current stack offset + spillOffset int64 // current spill offset +} + +// align returns a rounded up to t's alignment +func align(a int64, t *types.Type) int64 { + return alignTo(a, int(t.Align)) +} + +// alignTo returns a rounded up to t, where t must be 0 or a power of 2. +func alignTo(a int64, t int) int64 { + if t == 0 { + return a + } + return types.Rnd(a, int64(t)) } // stackSlot returns a stack offset for a param or result of the // specified type. func (state *assignState) stackSlot(t *types.Type) int64 { - if t.Align > 0 { - state.stackOffset = types.Rnd(state.stackOffset, int64(t.Align)) - } - rv := state.stackOffset - state.stackOffset += t.Width + rv := align(state.stackOffset, t) + state.stackOffset = rv + t.Width return rv } @@ -225,11 +292,17 @@ func (state *assignState) allocateRegs() []RegIndex { // regAllocate creates a register ABIParamAssignment object for a param // or result with the specified type, as a final step (this assumes // that all of the safety/suitability analysis is complete). -func (state *assignState) regAllocate(t *types.Type) ABIParamAssignment { +func (state *assignState) regAllocate(t *types.Type, isReturn bool) ABIParamAssignment { + spillLoc := int64(-1) + if !isReturn { + // Spill for register-resident t must be aligned for storage of a t. + spillLoc = align(state.spillOffset, t) + state.spillOffset = spillLoc + t.Size() + } return ABIParamAssignment{ Type: t, Registers: state.allocateRegs(), - Offset: -1, + offset: int32(spillLoc), } } @@ -239,7 +312,7 @@ func (state *assignState) regAllocate(t *types.Type) ABIParamAssignment { func (state *assignState) stackAllocate(t *types.Type) ABIParamAssignment { return ABIParamAssignment{ Type: t, - Offset: int32(state.stackSlot(t)), + offset: int32(state.stackSlot(t)), } } @@ -261,6 +334,9 @@ func (state *assignState) floatUsed() int { // accordingly). func (state *assignState) regassignIntegral(t *types.Type) bool { regsNeeded := int(types.Rnd(t.Width, int64(types.PtrSize)) / int64(types.PtrSize)) + if t.IsComplex() { + regsNeeded = 2 + } // Floating point and complex. if t.IsFloat() || t.IsComplex() { @@ -371,14 +447,14 @@ func (state *assignState) regassign(pt *types.Type) bool { // of type 'pt' to determine whether it can be register assigned. // The result of the analysis is recorded in the result // ABIParamResultInfo held in 'state'. -func (state *assignState) assignParamOrReturn(pt *types.Type) ABIParamAssignment { +func (state *assignState) assignParamOrReturn(pt *types.Type, isReturn bool) ABIParamAssignment { state.pUsed = RegAmounts{} if pt.Width == types.BADWIDTH { panic("should never happen") } else if pt.Width == 0 { return state.stackAllocate(pt) } else if state.regassign(pt) { - return state.regAllocate(pt) + return state.regAllocate(pt, isReturn) } else { return state.stackAllocate(pt) } diff --git a/src/cmd/compile/internal/test/abiutils_test.go b/src/cmd/compile/internal/test/abiutils_test.go index ae7d484062968..decc29667e7d3 100644 --- a/src/cmd/compile/internal/test/abiutils_test.go +++ b/src/cmd/compile/internal/test/abiutils_test.go @@ -21,7 +21,7 @@ import ( // AMD64 registers available: // - integer: RAX, RBX, RCX, RDI, RSI, R8, R9, r10, R11 // - floating point: X0 - X14 -var configAMD64 = abi.NewABIConfig(9,15) +var configAMD64 = abi.NewABIConfig(9, 15) func TestMain(m *testing.M) { ssagen.Arch.LinkArch = &x86.Linkamd64 @@ -46,9 +46,9 @@ func TestABIUtilsBasic1(t *testing.T) { // expected results exp := makeExpectedDump(` - IN 0: R{ I0 } offset: -1 typ: int32 - OUT 0: R{ I0 } offset: -1 typ: int32 - intspill: 1 floatspill: 0 offsetToSpillArea: 0 + IN 0: R{ I0 } spilloffset: 0 typ: int32 + OUT 0: R{ I0 } spilloffset: -1 typ: int32 + offsetToSpillArea: 0 spillAreaSize: 8 `) abitest(t, ft, exp) @@ -75,39 +75,39 @@ func TestABIUtilsBasic2(t *testing.T) { i8, i16, i32, i64}, []*types.Type{i32, f64, f64}) exp := makeExpectedDump(` - IN 0: R{ I0 } offset: -1 typ: int8 - IN 1: R{ I1 } offset: -1 typ: int16 - IN 2: R{ I2 } offset: -1 typ: int32 - IN 3: R{ I3 } offset: -1 typ: int64 - IN 4: R{ F0 } offset: -1 typ: float32 - IN 5: R{ F1 } offset: -1 typ: float32 - IN 6: R{ F2 } offset: -1 typ: float64 - IN 7: R{ F3 } offset: -1 typ: float64 - IN 8: R{ I4 } offset: -1 typ: int8 - IN 9: R{ I5 } offset: -1 typ: int16 - IN 10: R{ I6 } offset: -1 typ: int32 - IN 11: R{ I7 } offset: -1 typ: int64 - IN 12: R{ F4 } offset: -1 typ: float32 - IN 13: R{ F5 } offset: -1 typ: float32 - IN 14: R{ F6 } offset: -1 typ: float64 - IN 15: R{ F7 } offset: -1 typ: float64 - IN 16: R{ F8 F9 } offset: -1 typ: complex128 - IN 17: R{ F10 F11 } offset: -1 typ: complex128 - IN 18: R{ F12 F13 } offset: -1 typ: complex128 - IN 19: R{ } offset: 0 typ: complex128 - IN 20: R{ F14 } offset: -1 typ: complex64 - IN 21: R{ I8 } offset: -1 typ: int8 - IN 22: R{ } offset: 16 typ: int16 - IN 23: R{ } offset: 20 typ: int32 - IN 24: R{ } offset: 24 typ: int64 - IN 25: R{ } offset: 32 typ: int8 - IN 26: R{ } offset: 34 typ: int16 - IN 27: R{ } offset: 36 typ: int32 - IN 28: R{ } offset: 40 typ: int64 - OUT 0: R{ I0 } offset: -1 typ: int32 - OUT 1: R{ F0 } offset: -1 typ: float64 - OUT 2: R{ F1 } offset: -1 typ: float64 - intspill: 9 floatspill: 15 offsetToSpillArea: 48 + IN 0: R{ I0 } spilloffset: 0 typ: int8 + IN 1: R{ I1 } spilloffset: 2 typ: int16 + IN 2: R{ I2 } spilloffset: 4 typ: int32 + IN 3: R{ I3 } spilloffset: 8 typ: int64 + IN 4: R{ F0 } spilloffset: 16 typ: float32 + IN 5: R{ F1 } spilloffset: 20 typ: float32 + IN 6: R{ F2 } spilloffset: 24 typ: float64 + IN 7: R{ F3 } spilloffset: 32 typ: float64 + IN 8: R{ I4 } spilloffset: 40 typ: int8 + IN 9: R{ I5 } spilloffset: 42 typ: int16 + IN 10: R{ I6 } spilloffset: 44 typ: int32 + IN 11: R{ I7 } spilloffset: 48 typ: int64 + IN 12: R{ F4 } spilloffset: 56 typ: float32 + IN 13: R{ F5 } spilloffset: 60 typ: float32 + IN 14: R{ F6 } spilloffset: 64 typ: float64 + IN 15: R{ F7 } spilloffset: 72 typ: float64 + IN 16: R{ F8 F9 } spilloffset: 80 typ: complex128 + IN 17: R{ F10 F11 } spilloffset: 96 typ: complex128 + IN 18: R{ F12 F13 } spilloffset: 112 typ: complex128 + IN 19: R{ } offset: 0 typ: complex128 + IN 20: R{ } offset: 16 typ: complex64 + IN 21: R{ I8 } spilloffset: 128 typ: int8 + IN 22: R{ } offset: 24 typ: int16 + IN 23: R{ } offset: 28 typ: int32 + IN 24: R{ } offset: 32 typ: int64 + IN 25: R{ } offset: 40 typ: int8 + IN 26: R{ } offset: 42 typ: int16 + IN 27: R{ } offset: 44 typ: int32 + IN 28: R{ } offset: 48 typ: int64 + OUT 0: R{ I0 } spilloffset: -1 typ: int32 + OUT 1: R{ F0 } spilloffset: -1 typ: float64 + OUT 2: R{ F1 } spilloffset: -1 typ: float64 + offsetToSpillArea: 56 spillAreaSize: 136 `) abitest(t, ft, exp) @@ -123,15 +123,15 @@ func TestABIUtilsArrays(t *testing.T) { []*types.Type{a2, a1, ae, aa1}) exp := makeExpectedDump(` - IN 0: R{ I0 } offset: -1 typ: [1]int32 - IN 1: R{ } offset: 0 typ: [0]int32 - IN 2: R{ I1 } offset: -1 typ: [1][1]int32 - IN 3: R{ } offset: 0 typ: [2]int32 - OUT 0: R{ } offset: 8 typ: [2]int32 - OUT 1: R{ I0 } offset: -1 typ: [1]int32 - OUT 2: R{ } offset: 16 typ: [0]int32 - OUT 3: R{ I1 } offset: -1 typ: [1][1]int32 - intspill: 2 floatspill: 0 offsetToSpillArea: 16 + IN 0: R{ I0 } spilloffset: 0 typ: [1]int32 + IN 1: R{ } offset: 0 typ: [0]int32 + IN 2: R{ I1 } spilloffset: 4 typ: [1][1]int32 + IN 3: R{ } offset: 0 typ: [2]int32 + OUT 0: R{ } offset: 8 typ: [2]int32 + OUT 1: R{ I0 } spilloffset: -1 typ: [1]int32 + OUT 2: R{ } offset: 16 typ: [0]int32 + OUT 3: R{ I1 } spilloffset: -1 typ: [1][1]int32 + offsetToSpillArea: 16 spillAreaSize: 8 `) abitest(t, ft, exp) @@ -147,13 +147,13 @@ func TestABIUtilsStruct1(t *testing.T) { []*types.Type{s, i8, i32}) exp := makeExpectedDump(` - IN 0: R{ I0 } offset: -1 typ: int8 - IN 1: R{ I1 I2 I3 I4 } offset: -1 typ: struct { int8; int8; struct {}; int8; int16 } - IN 2: R{ I5 } offset: -1 typ: int64 - OUT 0: R{ I0 I1 I2 I3 } offset: -1 typ: struct { int8; int8; struct {}; int8; int16 } - OUT 1: R{ I4 } offset: -1 typ: int8 - OUT 2: R{ I5 } offset: -1 typ: int32 - intspill: 6 floatspill: 0 offsetToSpillArea: 0 + IN 0: R{ I0 } spilloffset: 0 typ: int8 + IN 1: R{ I1 I2 I3 I4 } spilloffset: 2 typ: struct { int8; int8; struct {}; int8; int16 } + IN 2: R{ I5 } spilloffset: 8 typ: int64 + OUT 0: R{ I0 I1 I2 I3 } spilloffset: -1 typ: struct { int8; int8; struct {}; int8; int16 } + OUT 1: R{ I4 } spilloffset: -1 typ: int8 + OUT 2: R{ I5 } spilloffset: -1 typ: int32 + offsetToSpillArea: 0 spillAreaSize: 16 `) abitest(t, ft, exp) @@ -168,12 +168,12 @@ func TestABIUtilsStruct2(t *testing.T) { []*types.Type{fs, fs}) exp := makeExpectedDump(` - IN 0: R{ I0 } offset: -1 typ: struct { int64; struct {} } - IN 1: R{ I1 } offset: -1 typ: struct { int64; struct {} } - IN 2: R{ I2 F0 } offset: -1 typ: struct { float64; struct { int64; struct {} }; struct {} } - OUT 0: R{ I0 F0 } offset: -1 typ: struct { float64; struct { int64; struct {} }; struct {} } - OUT 1: R{ I1 F1 } offset: -1 typ: struct { float64; struct { int64; struct {} }; struct {} } - intspill: 3 floatspill: 1 offsetToSpillArea: 0 + IN 0: R{ I0 } spilloffset: 0 typ: struct { int64; struct {} } + IN 1: R{ I1 } spilloffset: 16 typ: struct { int64; struct {} } + IN 2: R{ I2 F0 } spilloffset: 32 typ: struct { float64; struct { int64; struct {} }; struct {} } + OUT 0: R{ I0 F0 } spilloffset: -1 typ: struct { float64; struct { int64; struct {} }; struct {} } + OUT 1: R{ I1 F1 } spilloffset: -1 typ: struct { float64; struct { int64; struct {} }; struct {} } + offsetToSpillArea: 0 spillAreaSize: 64 `) abitest(t, ft, exp) @@ -189,19 +189,19 @@ func TestABIUtilsSliceString(t *testing.T) { []*types.Type{str, i64, str, sli32}) exp := makeExpectedDump(` - IN 0: R{ I0 I1 I2 } offset: -1 typ: []int32 - IN 1: R{ I3 } offset: -1 typ: int8 - IN 2: R{ I4 I5 I6 } offset: -1 typ: []int32 - IN 3: R{ I7 } offset: -1 typ: int8 - IN 4: R{ } offset: 0 typ: string - IN 5: R{ I8 } offset: -1 typ: int8 - IN 6: R{ } offset: 16 typ: int64 - IN 7: R{ } offset: 24 typ: []int32 - OUT 0: R{ I0 I1 } offset: -1 typ: string - OUT 1: R{ I2 } offset: -1 typ: int64 - OUT 2: R{ I3 I4 } offset: -1 typ: string - OUT 3: R{ I5 I6 I7 } offset: -1 typ: []int32 - intspill: 9 floatspill: 0 offsetToSpillArea: 48 + IN 0: R{ I0 I1 I2 } spilloffset: 0 typ: []int32 + IN 1: R{ I3 } spilloffset: 24 typ: int8 + IN 2: R{ I4 I5 I6 } spilloffset: 32 typ: []int32 + IN 3: R{ I7 } spilloffset: 56 typ: int8 + IN 4: R{ } offset: 0 typ: string + IN 5: R{ I8 } spilloffset: 57 typ: int8 + IN 6: R{ } offset: 16 typ: int64 + IN 7: R{ } offset: 24 typ: []int32 + OUT 0: R{ I0 I1 } spilloffset: -1 typ: string + OUT 1: R{ I2 } spilloffset: -1 typ: int64 + OUT 2: R{ I3 I4 } spilloffset: -1 typ: string + OUT 3: R{ I5 I6 I7 } spilloffset: -1 typ: []int32 + offsetToSpillArea: 48 spillAreaSize: 64 `) abitest(t, ft, exp) @@ -219,17 +219,17 @@ func TestABIUtilsMethod(t *testing.T) { []*types.Type{a7, f64, i64}) exp := makeExpectedDump(` - IN 0: R{ I0 I1 I2 } offset: -1 typ: struct { int16; int16; int16 } - IN 1: R{ I3 } offset: -1 typ: *struct { int16; int16; int16 } - IN 2: R{ } offset: 0 typ: [7]*struct { int16; int16; int16 } - IN 3: R{ F0 } offset: -1 typ: float64 - IN 4: R{ I4 } offset: -1 typ: int16 - IN 5: R{ I5 } offset: -1 typ: int16 - IN 6: R{ I6 } offset: -1 typ: int16 - OUT 0: R{ } offset: 56 typ: [7]*struct { int16; int16; int16 } - OUT 1: R{ F0 } offset: -1 typ: float64 - OUT 2: R{ I0 } offset: -1 typ: int64 - intspill: 7 floatspill: 1 offsetToSpillArea: 112 + IN 0: R{ I0 I1 I2 } spilloffset: 0 typ: struct { int16; int16; int16 } + IN 1: R{ I3 } spilloffset: 8 typ: *struct { int16; int16; int16 } + IN 2: R{ } offset: 0 typ: [7]*struct { int16; int16; int16 } + IN 3: R{ F0 } spilloffset: 16 typ: float64 + IN 4: R{ I4 } spilloffset: 24 typ: int16 + IN 5: R{ I5 } spilloffset: 26 typ: int16 + IN 6: R{ I6 } spilloffset: 28 typ: int16 + OUT 0: R{ } offset: 56 typ: [7]*struct { int16; int16; int16 } + OUT 1: R{ F0 } spilloffset: -1 typ: float64 + OUT 2: R{ I0 } spilloffset: -1 typ: int64 + offsetToSpillArea: 112 spillAreaSize: 32 `) abitest(t, ft, exp) @@ -252,18 +252,44 @@ func TestABIUtilsInterfaces(t *testing.T) { []*types.Type{ei, nei, pei}) exp := makeExpectedDump(` - IN 0: R{ I0 I1 I2 } offset: -1 typ: struct { int16; int16; bool } - IN 1: R{ I3 I4 } offset: -1 typ: interface {} - IN 2: R{ I5 I6 } offset: -1 typ: interface {} - IN 3: R{ I7 I8 } offset: -1 typ: interface { () untyped string } - IN 4: R{ } offset: 0 typ: *interface {} - IN 5: R{ } offset: 8 typ: interface { () untyped string } - IN 6: R{ } offset: 24 typ: int16 - OUT 0: R{ I0 I1 } offset: -1 typ: interface {} - OUT 1: R{ I2 I3 } offset: -1 typ: interface { () untyped string } - OUT 2: R{ I4 } offset: -1 typ: *interface {} - intspill: 9 floatspill: 0 offsetToSpillArea: 32 + IN 0: R{ I0 I1 I2 } spilloffset: 0 typ: struct { int16; int16; bool } + IN 1: R{ I3 I4 } spilloffset: 8 typ: interface {} + IN 2: R{ I5 I6 } spilloffset: 24 typ: interface {} + IN 3: R{ I7 I8 } spilloffset: 40 typ: interface { () untyped string } + IN 4: R{ } offset: 0 typ: *interface {} + IN 5: R{ } offset: 8 typ: interface { () untyped string } + IN 6: R{ } offset: 24 typ: int16 + OUT 0: R{ I0 I1 } spilloffset: -1 typ: interface {} + OUT 1: R{ I2 I3 } spilloffset: -1 typ: interface { () untyped string } + OUT 2: R{ I4 } spilloffset: -1 typ: *interface {} + offsetToSpillArea: 32 spillAreaSize: 56 `) abitest(t, ft, exp) } + +func TestABINumParamRegs(t *testing.T) { + i8 := types.Types[types.TINT8] + i16 := types.Types[types.TINT16] + i32 := types.Types[types.TINT32] + i64 := types.Types[types.TINT64] + f32 := types.Types[types.TFLOAT32] + f64 := types.Types[types.TFLOAT64] + c64 := types.Types[types.TCOMPLEX64] + c128 := types.Types[types.TCOMPLEX128] + + s := mkstruct([]*types.Type{i8, i8, mkstruct([]*types.Type{}), i8, i16}) + a := types.NewArray(s, 3) + + nrtest(t, i8, 1) + nrtest(t, i16, 1) + nrtest(t, i32, 1) + nrtest(t, i64, 1) + nrtest(t, f32, 1) + nrtest(t, f64, 1) + nrtest(t, c64, 2) + nrtest(t, c128, 2) + nrtest(t, s, 4) + nrtest(t, a, 12) + +} \ No newline at end of file diff --git a/src/cmd/compile/internal/test/abiutilsaux_test.go b/src/cmd/compile/internal/test/abiutilsaux_test.go index 10fb66874578a..19dd3a51fd079 100644 --- a/src/cmd/compile/internal/test/abiutilsaux_test.go +++ b/src/cmd/compile/internal/test/abiutilsaux_test.go @@ -78,9 +78,9 @@ func tokenize(src string) []string { func verifyParamResultOffset(t *testing.T, f *types.Field, r abi.ABIParamAssignment, which string, idx int) int { n := ir.AsNode(f.Nname).(*ir.Name) - if n.FrameOffset() != int64(r.Offset) { + if n.FrameOffset() != int64(r.Offset()) { t.Errorf("%s %d: got offset %d wanted %d t=%v", - which, idx, r.Offset, n.Offset_, f.Type) + which, idx, r.Offset(), n.Offset_, f.Type) return 1 } return 0 @@ -106,12 +106,20 @@ func difftokens(atoks []string, etoks []string) string { return "" } +func nrtest(t *testing.T, ft *types.Type, expected int) { + types.CalcSize(ft) + got := configAMD64.NumParamRegs(ft) + if got != expected { + t.Errorf("]\nexpected num regs = %d, got %d, type %v", expected, got, ft) + } +} + func abitest(t *testing.T, ft *types.Type, exp expectedDump) { types.CalcSize(ft) // Analyze with full set of registers. - regRes := abi.ABIAnalyze(ft, configAMD64) + regRes := configAMD64.ABIAnalyze(ft) regResString := strings.TrimSpace(regRes.String()) // Check results. @@ -122,8 +130,8 @@ func abitest(t *testing.T, ft *types.Type, exp expectedDump) { } // Analyze again with empty register set. - empty := &abi.ABIConfig{} - emptyRes := abi.ABIAnalyze(ft, empty) + empty := abi.NewABIConfig(0, 0) + emptyRes := empty.ABIAnalyze(ft) emptyResString := emptyRes.String() // Walk the results and make sure the offsets assigned match From 8ee3d398383170e21ba2a63b3a45e1577f97c329 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 24 Jan 2021 12:26:47 -0800 Subject: [PATCH 426/474] [dev.regabi] cmd/go: workaround -race issue on ppc64le The race detector on ppc64le corrupts command-line arguments lists if they contain an empty string, and cmd/go often generates compiler argument lists containing `-D ""`. Since this is equivalent to not specifying the `-D` flag at all, just do that. This allows using a race-detector-enabled cmd/compile on ppc64le. Updates #43883. Change-Id: Ifac5cd9a44932129438b9b0b3ecc6101ad3716b2 Reviewed-on: https://go-review.googlesource.com/c/go/+/286173 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky Reviewed-by: Bryan C. Mills Reviewed-by: Jay Conrod TryBot-Result: Go Bot --- src/cmd/go/internal/work/gc.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/cmd/go/internal/work/gc.go b/src/cmd/go/internal/work/gc.go index cc4e2b2b2b965..3205fcbffc8d2 100644 --- a/src/cmd/go/internal/work/gc.go +++ b/src/cmd/go/internal/work/gc.go @@ -129,7 +129,11 @@ func (gcToolchain) gc(b *Builder, a *Action, archive string, importcfg, embedcfg } } - args := []interface{}{cfg.BuildToolexec, base.Tool("compile"), "-o", ofile, "-trimpath", a.trimpath(), gcflags, gcargs, "-D", p.Internal.LocalPrefix} + args := []interface{}{cfg.BuildToolexec, base.Tool("compile"), "-o", ofile, "-trimpath", a.trimpath(), gcflags, gcargs} + if p.Internal.LocalPrefix != "" { + // Workaround #43883. + args = append(args, "-D", p.Internal.LocalPrefix) + } if importcfg != nil { if err := b.writeFile(objdir+"importcfg", importcfg); err != nil { return "", nil, err From be9612a832186637173e35a2aa83ae193cf8d957 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 24 Jan 2021 21:26:14 -0800 Subject: [PATCH 427/474] [dev.regabi] os: disable TestDirFS until #42637 is fixed This test is causing nearly every trybot run on dev.regabi and dev.typeparams to fail. It's already a release blocker for Go 1.16, so the failures on the development branches is entirely noise; and because it causes the trybots to short-circuit, it risks masking actual Windows-specific failures. This CL disables the test until a proper solution is decided upon and implemented for Go 1.16. Updates #42637. Change-Id: Ibc85edaed591f1c125cf0b210867aa89d2b0a4b6 Reviewed-on: https://go-review.googlesource.com/c/go/+/286213 Run-TryBot: Matthew Dempsky Trust: Matthew Dempsky Trust: Robert Griesemer TryBot-Result: Go Bot Reviewed-by: Than McIntosh Reviewed-by: Robert Griesemer --- src/os/os_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/os/os_test.go b/src/os/os_test.go index 698dbca91e4a3..c02dc2c375255 100644 --- a/src/os/os_test.go +++ b/src/os/os_test.go @@ -2689,6 +2689,9 @@ func TestOpenFileKeepsPermissions(t *testing.T) { } func TestDirFS(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("workaround for dev.regabi/dev.typeparams until #42637 is fixed") + } if err := fstest.TestFS(DirFS("./testdata/dirfs"), "a", "b", "dir/x"); err != nil { t.Fatal(err) } From 6a4739ccc5198449d58d2e90a040c4fb908b3cb0 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 24 Jan 2021 23:39:16 -0800 Subject: [PATCH 428/474] [dev.regabi] cmd/compile: enable rational constant arithmetic This allows more precision and matches types2's behavior. For backwards compatibility with gcimporter, for now we still need to write out declared constants as limited-precision floating-point values. To ensure consistent behavior of constant arithmetic whether it spans package boundaries or not, we include the full-precision rational representation in the compiler's extension section of the export data. Also, this CL simply uses the math/big.Rat.String text representation as the encoding. This is inefficient, but because it's only in the compiler's extension section, we can easily revisit this in the future. Declaring exported untyped float and complex constants isn't very common anyway. Within the standard library, only package math declares any at all, containing just 15. And those 15 are only imported a total of 12 times elsewhere in the standard library. Change-Id: I85ea23ab712e93fd3b68e52d60cbedce9be696a0 Reviewed-on: https://go-review.googlesource.com/c/go/+/286215 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Trust: Matthew Dempsky Trust: Robert Griesemer Reviewed-by: Robert Griesemer --- src/cmd/compile/internal/noder/noder.go | 8 --- src/cmd/compile/internal/typecheck/iexport.go | 51 +++++++++++++++++-- src/cmd/compile/internal/typecheck/iimport.go | 27 +++++++++- test/fixedbugs/issue7740.go | 2 +- test/float_lit3.go | 5 +- 5 files changed, 76 insertions(+), 17 deletions(-) diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go index 6aab18549aefc..5b5b09cb2df93 100644 --- a/src/cmd/compile/internal/noder/noder.go +++ b/src/cmd/compile/internal/noder/noder.go @@ -1455,14 +1455,6 @@ func (p *noder) basicLit(lit *syntax.BasicLit) constant.Value { p.errorAt(lit.Pos(), "malformed constant: %s", lit.Value) } - // go/constant uses big.Rat by default, which is more precise, but - // causes toolstash -cmp and some tests to fail. For now, convert - // to big.Float to match cmd/compile's historical precision. - // TODO(mdempsky): Remove. - if v.Kind() == constant.Float { - v = constant.Make(ir.BigFloat(v)) - } - return v } diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go index be4a689836cec..6fab74e61fe77 100644 --- a/src/cmd/compile/internal/typecheck/iexport.go +++ b/src/cmd/compile/internal/typecheck/iexport.go @@ -462,12 +462,16 @@ func (p *iexporter) doDecl(n *ir.Name) { } case ir.OLITERAL: + // TODO(mdempsky): Extend check to all declarations. + if n.Typecheck() == 0 { + base.FatalfAt(n.Pos(), "missed typecheck: %v", n) + } + // Constant. - // TODO(mdempsky): Do we still need this typecheck? If so, why? - n = Expr(n).(*ir.Name) w.tag('C') w.pos(n.Pos()) w.value(n.Type(), n.Val()) + w.constExt(n) case ir.OTYPE: if types.IsDotAlias(n.Sym()) { @@ -956,6 +960,17 @@ func (w *exportWriter) mpfloat(v constant.Value, typ *types.Type) { } } +func (w *exportWriter) mprat(v constant.Value) { + r, ok := constant.Val(v).(*big.Rat) + if !w.bool(ok) { + return + } + // TODO(mdempsky): Come up with a more efficient binary + // encoding before bumping iexportVersion to expose to + // gcimporter. + w.string(r.String()) +} + func (w *exportWriter) bool(b bool) bool { var x uint64 if b { @@ -971,7 +986,37 @@ func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) } // Compiler-specific extensions. -func (w *exportWriter) varExt(n ir.Node) { +func (w *exportWriter) constExt(n *ir.Name) { + // Internally, we now represent untyped float and complex + // constants with infinite-precision rational numbers using + // go/constant, but the "public" export data format known to + // gcimporter only supports 512-bit floating point constants. + // In case rationals turn out to be a bad idea and we want to + // switch back to fixed-precision constants, for now we + // continue writing out the 512-bit truncation in the public + // data section, and write the exact, rational constant in the + // compiler's extension data. Also, we only need to worry + // about exporting rationals for declared constants, because + // constants that appear in an expression will already have + // been coerced to a concrete, fixed-precision type. + // + // Eventually, assuming we stick with using rationals, we + // should bump iexportVersion to support rationals, and do the + // whole gcimporter update song-and-dance. + // + // TODO(mdempsky): Prepare vocals for that. + + switch n.Type() { + case types.UntypedFloat: + w.mprat(n.Val()) + case types.UntypedComplex: + v := n.Val() + w.mprat(constant.Real(v)) + w.mprat(constant.Imag(v)) + } +} + +func (w *exportWriter) varExt(n *ir.Name) { w.linkname(n.Sym()) w.symIdx(n.Sym()) } diff --git a/src/cmd/compile/internal/typecheck/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go index f2682257f3456..b73ef5176b958 100644 --- a/src/cmd/compile/internal/typecheck/iimport.go +++ b/src/cmd/compile/internal/typecheck/iimport.go @@ -303,7 +303,9 @@ func (r *importReader) doDecl(sym *types.Sym) *ir.Name { typ := r.typ() val := r.value(typ) - return importconst(r.p.ipkg, pos, sym, typ, val) + n := importconst(r.p.ipkg, pos, sym, typ, val) + r.constExt(n) + return n case 'F': typ := r.signature(nil) @@ -440,6 +442,15 @@ func (p *importReader) float(typ *types.Type) constant.Value { return constant.Make(&f) } +func (p *importReader) mprat(orig constant.Value) constant.Value { + if !p.bool() { + return orig + } + var rat big.Rat + rat.SetString(p.string()) + return constant.Make(&rat) +} + func (r *importReader) ident(selector bool) *types.Sym { name := r.string() if name == "" { @@ -641,7 +652,19 @@ func (r *importReader) byte() byte { // Compiler-specific extensions. -func (r *importReader) varExt(n ir.Node) { +func (r *importReader) constExt(n *ir.Name) { + switch n.Type() { + case types.UntypedFloat: + n.SetVal(r.mprat(n.Val())) + case types.UntypedComplex: + v := n.Val() + re := r.mprat(constant.Real(v)) + im := r.mprat(constant.Imag(v)) + n.SetVal(makeComplex(re, im)) + } +} + +func (r *importReader) varExt(n *ir.Name) { r.linkname(n.Sym()) r.symIdx(n.Sym()) } diff --git a/test/fixedbugs/issue7740.go b/test/fixedbugs/issue7740.go index 8f1afe86dae5e..6bc6249d7e0b6 100644 --- a/test/fixedbugs/issue7740.go +++ b/test/fixedbugs/issue7740.go @@ -21,7 +21,7 @@ func main() { var prec float64 switch runtime.Compiler { case "gc": - prec = 512 + prec = math.Inf(1) // exact precision using rational arithmetic case "gccgo": prec = 256 default: diff --git a/test/float_lit3.go b/test/float_lit3.go index c4d1aa567cef2..850d02c9c7f97 100644 --- a/test/float_lit3.go +++ b/test/float_lit3.go @@ -37,12 +37,11 @@ var x = []interface{}{ // If the compiler's internal floating point representation // is shorter than 1024 bits, it cannot distinguish max64+ulp64/2-1 and max64+ulp64/2. - // gc uses fewer than 1024 bits, so allow it to print the overflow error for the -1 case. float64(max64 + ulp64/2 - two1024/two256), // ok - float64(max64 + ulp64/2 - 1), // GC_ERROR "constant 1\.79769e\+308 overflows float64" + float64(max64 + ulp64/2 - 1), // ok float64(max64 + ulp64/2), // ERROR "constant 1\.79769e\+308 overflows float64" float64(-max64 - ulp64/2 + two1024/two256), // ok - float64(-max64 - ulp64/2 + 1), // GC_ERROR "constant -1\.79769e\+308 overflows float64" + float64(-max64 - ulp64/2 + 1), // ok float64(-max64 - ulp64/2), // ERROR "constant -1\.79769e\+308 overflows float64" } From cabffc199d6d71611c589fb21da27c61d683194d Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Thu, 21 Jan 2021 10:19:21 -0500 Subject: [PATCH 429/474] [dev.regabi] cmd/compile/internal: add internal ABI specification This adds a document specifying the internal ABI (specifically the calling convention). This document lives in the Go tree (rather than the proposal repository) because the intent is for it to track the reality in the compiler. Updates #40724. Change-Id: I583190080cd7d8cb1084f616fd1384d0f1f25725 Reviewed-on: https://go-review.googlesource.com/c/go/+/285292 Trust: Austin Clements Reviewed-by: Michael Knyszek Reviewed-by: Cherry Zhang --- src/cmd/compile/internal-abi.md | 539 ++++++++++++++++++++++++++++++++ 1 file changed, 539 insertions(+) create mode 100644 src/cmd/compile/internal-abi.md diff --git a/src/cmd/compile/internal-abi.md b/src/cmd/compile/internal-abi.md new file mode 100644 index 0000000000000..6f1fddd57ac4a --- /dev/null +++ b/src/cmd/compile/internal-abi.md @@ -0,0 +1,539 @@ +# Go internal ABI specification + +This document describes Go’s internal application binary interface +(ABI), known as ABIInternal. +This ABI is *unstable* and will change between Go versions. +If you’re writing assembly code, please instead refer to Go’s +[assembly documentation](/doc/asm.html), which describes Go’s stable +ABI, known as ABI0. + +All functions defined in Go source follow ABIInternal. +However, ABIInternal and ABI0 functions are able to call each other +through transparent *ABI wrappers*, described in the [internal calling +convention proposal](https://golang.org/design/27539-internal-abi). + +Go uses a common ABI design across all architectures. +We first describe the common ABI, and then cover per-architecture +specifics. + +*Rationale*: For the reasoning behind using a common ABI across +architectures instead of the platform ABI, see the [register-based Go +calling convention proposal](https://golang.org/design/40724-register-calling). + +## Argument and result passing + +Function calls pass arguments and results using a combination of the +stack and machine registers. +Each argument or result is passed either entirely in registers or +entirely on the stack. +Because access to registers is generally faster than access to the +stack, arguments and results are preferentially passed in registers. +However, any argument or result that contains a non-trivial array or +does not fit entirely in the remaining available registers is passed +on the stack. + +Each architecture defines a sequence of integer registers and a +sequence of floating-point registers. +At a high level, arguments and results are recursively broken down +into values of base types and these base values are assigned to +registers from these sequences. + +Arguments and results can share the same registers, but do not share +the same stack space. +Beyond the arguments and results passed on the stack, the caller also +reserves spill space on the stack for all register-based arguments +(but does not populate this space). + +The receiver, arguments, and results of function or method F are +assigned to registers using the following algorithm: + +1. Start with the full integer and floating-point register sequences + and an empty stack frame. +1. If F is a method, assign F’s receiver. +1. For each argument A of F, assign A. +1. Align the stack frame offset to the architecture’s pointer size. +1. Reset to the full integer and floating-point register sequences + (but do not reset the stack frame). +1. For each result R of F, assign R. +1. Align the stack frame offset to the architecture’s pointer size. +1. For each register-assigned receiver and argument of F, let T be its + type and stack-assign an empty value of type T. + This is the argument's (or receiver's) spill space. +1. Align the stack frame offset to the architecture’s pointer size. + +Assigning a receiver, argument, or result V works as follows: + +1. Register-assign V. +1. If step 1 failed, undo all register and stack assignments it + performed and stack-assign V. + +Register-assignment of a value V of underlying type T works as follows: + +1. If T is a boolean or integral type that fits in an integer + register, assign V to the next available integer register. +1. If T is an integral type that fits in two integer registers, assign + the least significant and most significant halves of V to the next + two available integer registers, respectively. +1. If T is a floating-point type and can be represented without loss + of precision in a floating-point register, assign V to the next + available floating-point register. +1. If T is a complex type, recursively register-assign its real and + imaginary parts. +1. If T is a pointer type, map type, channel type, or function type, + assign V to the next available integer register. +1. If T is a string type, interface type, or slice type, recursively + register-assign V’s components (2 for strings and interfaces, 3 for + slices). +1. If T is a struct type, recursively register-assign each field of V. +1. If T is an array type of length 0, do nothing. +1. If T is an array type of length 1, recursively register-assign its + one element. +1. If T is an array type of length > 1, fail. +1. If there is no available integer or floating-point register + available above, fail. +1. If any recursive assignment above fails, this register-assign fails. + +Stack-assignment of a value V of underlying type T works as follows: + +1. Align the current stack frame offset to T’s alignment. +1. Append V to the stack frame. + +(Note that any non-zero-sized struct type that ends in a zero-sized +field is implicitly padded with 1 byte to prevent past-the-end +pointers. +This applies to all structs, not just those passed as arguments.) + +The following diagram shows what the resulting argument frame looks +like on the stack: + + +------------------------------+ + | . . . | + | 2nd reg argument spill space | + | 1st reg argument spill space | + | | + | . . . | + | 2nd stack-assigned result | + | 1st stack-assigned result | + | | + | . . . | + | 2nd stack-assigned argument | + | 1st stack-assigned argument | + | stack-assigned receiver | + +------------------------------+ ↓ lower addresses + +(Note that, while stack diagrams conventionally have address 0 at the +bottom, if this were expressed as a Go struct the fields would appear +in the opposite order, starting with the stack-assigned receiver.) + +To perform a call, the caller reserves space starting at the lowest +address in its stack frame for the call stack frame, stores arguments +in the registers and argument stack slots determined by the above +algorithm, and performs the call. +At the time of a call, spill slots, result stack slots, and result +registers are assumed to be uninitialized. +Upon return, the callee must have stored results to all result +registers and result stack slots determined by the above algorithm. + +There are no callee-save registers, so a call may overwrite any +register that doesn’t have a fixed meaning, including argument +registers. + +### Example + +The function `func f(a1 uint8, a2 [2]uintptr, a3 uint8) (r1 struct { x +uintptr; y [2]uintptr }, r2 string)` has the following argument frame +layout on a 64-bit host with hypothetical integer registers R0–R9: + + +-------------------+ 48 + | alignment padding | 42 + | a3 argument spill | 41 + | a1 argument spill | 40 + | r1 result | 16 + | a2 argument | 0 + +-------------------+ + On entry: R0=a1, R1=a3 + On exit: R0=r2.base, R1=r2.len + +There are several things to note in this example. +First, a2 and r1 are stack-assigned because they contain arrays. +The other arguments and results are register-assigned. +Result r2 is decomposed into its components, which are individually +register-assigned. +On the stack, the stack-assigned arguments appear below the +stack-assigned results, which appear below the argument spill area. +Only arguments, not results, are assigned a spill area. + +### Rationale + +Each base value is assigned to its own register to optimize +construction and access. +An alternative would be to pack multiple sub-word values into +registers, or to simply map an argument's in-memory layout to +registers (this is common in C ABIs), but this typically adds cost to +pack and unpack these values. +Modern architectures have more than enough registers to pass all +arguments and results this way for nearly all functions (see the +appendix), so there’s little downside to spreading base values across +registers. + +Arguments that can’t be fully assigned to registers are passed +entirely on the stack in case the callee takes the address of that +argument. +If an argument could be split across the stack and registers and the +callee took its address, it would need to be reconstructed in memory, +a process that would be proportional to the size of the argument. + +Non-trivial arrays are always passed on the stack because indexing +into an array typically requires a computed offset, which generally +isn’t possible with registers. +Arrays in general are rare in function signatures (only 0.7% of +functions in the Go 1.15 standard library and 0.2% in kubelet). +We considered allowing array fields to be passed on the stack while +the rest of an argument’s fields are passed in registers, but this +creates the same problems as other large structs if the callee takes +the address of an argument, and would benefit <0.1% of functions in +kubelet (and even these very little). + +We make exceptions for 0 and 1-element arrays because these don’t +require computed offsets, and 1-element arrays are already decomposed +in the compiler’s SSA. + +The stack assignment algorithm above is equivalent to Go’s stack-based +ABI0 calling convention if there are zero architecture registers. +This is intended to ease the transition to the register-based internal +ABI and make it easy for the compiler to generate either calling +convention. +An architecture may still define register meanings that aren’t +compatible with ABI0, but these differences should be easy to account +for in the compiler. + +The algorithm reserves spill space for arguments in the caller’s frame +so that the compiler can generate a stack growth path that spills into +this reserved space. +If the callee has to grow the stack, it may not be able to reserve +enough additional stack space in its own frame to spill these, which +is why it’s important that the caller do so. +These slots also act as the home location if these arguments need to +be spilled for any other reason, which simplifies traceback printing. + +There are several options for how to lay out the argument spill space. +We chose to lay out each argument in its type's usual memory layout +but to separate the spill space from the regular argument space. +Using the usual memory layout simplifies the compiler because it +already understands this layout. +Also, if a function takes the address of a register-assigned argument, +the compiler must spill that argument to memory in its usual in-memory +layout and it's more convenient to use the argument spill space for +this purpose. + +Alternatively, the spill space could be structured around argument +registers. +In this approach, the stack growth spill path would spill each +argument register to a register-sized stack word. +However, if the function takes the address of a register-assigned +argument, the compiler would have to reconstruct it in memory layout +elsewhere on the stack. + +The spill space could also be interleaved with the stack-assigned +arguments so the arguments appear in order whether they are register- +or stack-assigned. +This would be close to ABI0, except that register-assigned arguments +would be uninitialized on the stack and there's no need to reserve +stack space for register-assigned results. +We expect separating the spill space to perform better because of +memory locality. +Separating the space is also potentially simpler for `reflect` calls +because this allows `reflect` to summarize the spill space as a single +number. +Finally, the long-term intent is to remove reserved spill slots +entirely – allowing most functions to be called without any stack +setup and easing the introduction of callee-save registers – and +separating the spill space makes that transition easier. + +## Closures + +A func value (e.g., `var x func()`) is a pointer to a closure object. +A closure object begins with a pointer-sized program counter +representing the entry point of the function, followed by zero or more +bytes containing the closed-over environment. + +Closure calls follow the same conventions as static function and +method calls, with one addition. Each architecture specifies a +*closure context pointer* register and calls to closures store the +address of the closure object in the closure context pointer register +prior to the call. + +## Software floating-point mode + +In "softfloat" mode, the ABI simply treats the hardware as having zero +floating-point registers. +As a result, any arguments containing floating-point values will be +passed on the stack. + +*Rationale*: Softfloat mode is about compatibility over performance +and is not commonly used. +Hence, we keep the ABI as simple as possible in this case, rather than +adding additional rules for passing floating-point values in integer +registers. + +## Architecture specifics + +This section describes per-architecture register mappings, as well as +other per-architecture special cases. + +### amd64 architecture + +The amd64 architecture uses the following sequence of 9 registers for +integer arguments and results: + + RAX, RBX, RCX, RDI, RSI, R8, R9, R10, R11 + +It uses X0 – X14 for floating-point arguments and results. + +*Rationale*: These sequences are chosen from the available registers +to be relatively easy to remember. + +Registers R12 and R13 are permanent scratch registers. +R15 is a scratch register except in dynamically linked binaries. + +*Rationale*: Some operations such as stack growth and reflection calls +need dedicated scratch registers in order to manipulate call frames +without corrupting arguments or results. + +Special-purpose registers are as follows: + +| Register | Call meaning | Body meaning | +| --- | --- | --- | +| RSP | Stack pointer | Fixed | +| RBP | Frame pointer | Fixed | +| RDX | Closure context pointer | Scratch | +| R12 | None | Scratch | +| R13 | None | Scratch | +| R14 | Current goroutine | Scratch | +| R15 | GOT reference temporary | Fixed if dynlink | +| X15 | Zero value | Fixed | + +TODO: We may start with the existing TLS-based g and move to R14 +later. + +*Rationale*: These register meanings are compatible with Go’s +stack-based calling convention except for R14 and X15, which will have +to be restored on transitions from ABI0 code to ABIInternal code. +In ABI0, these are undefined, so transitions from ABIInternal to ABI0 +can ignore these registers. + +*Rationale*: For the current goroutine pointer, we chose a register +that requires an additional REX byte. +While this adds one byte to every function prologue, it is hardly ever +accessed outside the function prologue and we expect making more +single-byte registers available to be a net win. + +*Rationale*: We designate X15 as a fixed zero register because +functions often have to bulk zero their stack frames, and this is more +efficient with a designated zero register. + +#### Stack layout + +The stack pointer, RSP, grows down and is always aligned to 8 bytes. + +The amd64 architecture does not use a link register. + +A function's stack frame is laid out as follows: + + +------------------------------+ + | return PC | + | RBP on entry | + | ... locals ... | + | ... outgoing arguments ... | + +------------------------------+ ↓ lower addresses + +The "return PC" is pushed as part of the standard amd64 `CALL` +operation. +On entry, a function subtracts from RSP to open its stack frame and +saves the value of RBP directly below the return PC. +A leaf function that does not require any stack space may omit the +saved RBP. + +The Go ABI's use of RBP as a frame pointer register is compatible with +amd64 platform conventions so that Go can inter-operate with platform +debuggers and profilers. + +#### Flags + +The direction flag (D) is always cleared (set to the “forward” +direction) at a call. +The arithmetic status flags are treated like scratch registers and not +preserved across calls. +All other bits in RFLAGS are system flags. + +The CPU is always in MMX technology state (not x87 mode). + +*Rationale*: Go on amd64 uses the XMM registers and never uses the x87 +registers, so it makes sense to assume the CPU is in MMX mode. +Otherwise, any function that used the XMM registers would have to +execute an EMMS instruction before calling another function or +returning (this is the case in the SysV ABI). + +At calls, the MXCSR control bits are always set as follows: + +| Flag | Bit | Value | Meaning | +| --- | --- | --- | --- | +| FZ | 15 | 0 | Do not flush to zero | +| RC | 14/13 | 0 (RN) | Round to nearest | +| PM | 12 | 1 | Precision masked | +| UM | 11 | 1 | Underflow masked | +| OM | 10 | 1 | Overflow masked | +| ZM | 9 | 1 | Divide-by-zero masked | +| DM | 8 | 1 | Denormal operations masked | +| IM | 7 | 1 | Invalid operations masked | +| DAZ | 6 | 0 | Do not zero de-normals | + +The MXCSR status bits are callee-save. + +*Rationale*: Having a fixed MXCSR control configuration allows Go +functions to use SSE operations without modifying or saving the MXCSR. +Functions are allowed to modify it between calls (as long as they +restore it), but as of this writing Go code never does. +The above fixed configuration matches the process initialization +control bits specified by the ELF AMD64 ABI. + +The x87 floating-point control word is not used by Go on amd64. + +## Future directions + +### Spill path improvements + +The ABI currently reserves spill space for argument registers so the +compiler can statically generate an argument spill path before calling +into `runtime.morestack` to grow the stack. +This ensures there will be sufficient spill space even when the stack +is nearly exhausted and keeps stack growth and stack scanning +essentially unchanged from ABI0. + +However, this wastes stack space (the median wastage is 16 bytes per +call), resulting in larger stacks and increased cache footprint. +A better approach would be to reserve stack space only when spilling. +One way to ensure enough space is available to spill would be for +every function to ensure there is enough space for the function's own +frame *as well as* the spill space of all functions it calls. +For most functions, this would change the threshold for the prologue +stack growth check. +For `nosplit` functions, this would change the threshold used in the +linker's static stack size check. + +Allocating spill space in the callee rather than the caller may also +allow for faster reflection calls in the common case where a function +takes only register arguments, since it would allow reflection to make +these calls directly without allocating any frame. + +The statically-generated spill path also increases code size. +It is possible to instead have a generic spill path in the runtime, as +part of `morestack`. +However, this complicates reserving the spill space, since spilling +all possible register arguments would, in most cases, take +significantly more space than spilling only those used by a particular +function. +Some options are to spill to a temporary space and copy back only the +registers used by the function, or to grow the stack if necessary +before spilling to it (using a temporary space if necessary), or to +use a heap-allocated space if insufficient stack space is available. +These options all add enough complexity that we will have to make this +decision based on the actual code size growth caused by the static +spill paths. + +### Clobber sets + +As defined, the ABI does not use callee-save registers. +This significantly simplifies the garbage collector and the compiler's +register allocator, but at some performance cost. +A potentially better balance for Go code would be to use *clobber +sets*: for each function, the compiler records the set of registers it +clobbers (including those clobbered by functions it calls) and any +register not clobbered by function F can remain live across calls to +F. + +This is generally a good fit for Go because Go's package DAG allows +function metadata like the clobber set to flow up the call graph, even +across package boundaries. +Clobber sets would require relatively little change to the garbage +collector, unlike general callee-save registers. +One disadvantage of clobber sets over callee-save registers is that +they don't help with indirect function calls or interface method +calls, since static information isn't available in these cases. + +### Large aggregates + +Go encourages passing composite values by value, and this simplifies +reasoning about mutation and races. +However, this comes at a performance cost for large composite values. +It may be possible to instead transparently pass large composite +values by reference and delay copying until it is actually necessary. + +## Appendix: Register usage analysis + +In order to understand the impacts of the above design on register +usage, we +[analyzed](https://github.com/aclements/go-misc/tree/master/abi) the +impact of the above ABI on a large code base: cmd/kubelet from +[Kubernetes](https://github.com/kubernetes/kubernetes) at tag v1.18.8. + +The following table shows the impact of different numbers of available +integer and floating-point registers on argument assignment: + +``` +| | | | stack args | spills | stack total | +| ints | floats | % fit | p50 | p95 | p99 | p50 | p95 | p99 | p50 | p95 | p99 | +| 0 | 0 | 6.3% | 32 | 152 | 256 | 0 | 0 | 0 | 32 | 152 | 256 | +| 0 | 8 | 6.4% | 32 | 152 | 256 | 0 | 0 | 0 | 32 | 152 | 256 | +| 1 | 8 | 21.3% | 24 | 144 | 248 | 8 | 8 | 8 | 32 | 152 | 256 | +| 2 | 8 | 38.9% | 16 | 128 | 224 | 8 | 16 | 16 | 24 | 136 | 240 | +| 3 | 8 | 57.0% | 0 | 120 | 224 | 16 | 24 | 24 | 24 | 136 | 240 | +| 4 | 8 | 73.0% | 0 | 120 | 216 | 16 | 32 | 32 | 24 | 136 | 232 | +| 5 | 8 | 83.3% | 0 | 112 | 216 | 16 | 40 | 40 | 24 | 136 | 232 | +| 6 | 8 | 87.5% | 0 | 112 | 208 | 16 | 48 | 48 | 24 | 136 | 232 | +| 7 | 8 | 89.8% | 0 | 112 | 208 | 16 | 48 | 56 | 24 | 136 | 232 | +| 8 | 8 | 91.3% | 0 | 112 | 200 | 16 | 56 | 64 | 24 | 136 | 232 | +| 9 | 8 | 92.1% | 0 | 112 | 192 | 16 | 56 | 72 | 24 | 136 | 232 | +| 10 | 8 | 92.6% | 0 | 104 | 192 | 16 | 56 | 72 | 24 | 136 | 232 | +| 11 | 8 | 93.1% | 0 | 104 | 184 | 16 | 56 | 80 | 24 | 128 | 232 | +| 12 | 8 | 93.4% | 0 | 104 | 176 | 16 | 56 | 88 | 24 | 128 | 232 | +| 13 | 8 | 94.0% | 0 | 88 | 176 | 16 | 56 | 96 | 24 | 128 | 232 | +| 14 | 8 | 94.4% | 0 | 80 | 152 | 16 | 64 | 104 | 24 | 128 | 232 | +| 15 | 8 | 94.6% | 0 | 80 | 152 | 16 | 64 | 112 | 24 | 128 | 232 | +| 16 | 8 | 94.9% | 0 | 16 | 152 | 16 | 64 | 112 | 24 | 128 | 232 | +| ∞ | 8 | 99.8% | 0 | 0 | 0 | 24 | 112 | 216 | 24 | 120 | 216 | +``` + +The first two columns show the number of available integer and +floating-point registers. +The first row shows the results for 0 integer and 0 floating-point +registers, which is equivalent to ABI0. +We found that any reasonable number of floating-point registers has +the same effect, so we fixed it at 8 for all other rows. + +The “% fit” column gives the fraction of functions where all arguments +and results are register-assigned and no arguments are passed on the +stack. +The three “stack args” columns give the median, 95th and 99th +percentile number of bytes of stack arguments. +The “spills” columns likewise summarize the number of bytes in +on-stack spill space. +And “stack total” summarizes the sum of stack arguments and on-stack +spill slots. +Note that these are three different distributions; for example, +there’s no single function that takes 0 stack argument bytes, 16 spill +bytes, and 24 total stack bytes. + +From this, we can see that the fraction of functions that fit entirely +in registers grows very slowly once it reaches about 90%, though +curiously there is a small minority of functions that could benefit +from a huge number of registers. +Making 9 integer registers available on amd64 puts it in this realm. +We also see that the stack space required for most functions is fairly +small. +While the increasing space required for spills largely balances out +the decreasing space required for stack arguments as the number of +available registers increases, there is a general reduction in the +total stack space required with more available registers. +This does, however, suggest that eliminating spill slots in the future +would noticeably reduce stack requirements. From 6f5e79f470e8956e1c01cb93802d52aee5c307b5 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Sat, 23 Jan 2021 16:58:34 -0500 Subject: [PATCH 430/474] [dev.regabi] cmd/compile/internal: specify memory layout This CL expands internal-abi.md to cover Go's memory layout rules and then uses this to specify the calling convention more precisely. Change-Id: Ifeef9e49d9ccc8c7333dec81bdd47b511b028469 Reviewed-on: https://go-review.googlesource.com/c/go/+/286073 Trust: Austin Clements Reviewed-by: David Chase Reviewed-by: Michael Knyszek Reviewed-by: Than McIntosh Reviewed-by: Cherry Zhang --- src/cmd/compile/internal-abi.md | 223 ++++++++++++++++++++++---------- 1 file changed, 156 insertions(+), 67 deletions(-) diff --git a/src/cmd/compile/internal-abi.md b/src/cmd/compile/internal-abi.md index 6f1fddd57ac4a..f4ef2cc86949f 100644 --- a/src/cmd/compile/internal-abi.md +++ b/src/cmd/compile/internal-abi.md @@ -2,6 +2,8 @@ This document describes Go’s internal application binary interface (ABI), known as ABIInternal. +Go's ABI defines the layout of data in memory and the conventions for +calling between Go functions. This ABI is *unstable* and will change between Go versions. If you’re writing assembly code, please instead refer to Go’s [assembly documentation](/doc/asm.html), which describes Go’s stable @@ -20,7 +22,89 @@ specifics. architectures instead of the platform ABI, see the [register-based Go calling convention proposal](https://golang.org/design/40724-register-calling). -## Argument and result passing +## Memory layout + +Go's built-in types have the following sizes and alignments. +Many, though not all, of these sizes are guaranteed by the [language +specification](/doc/go_spec.html#Size_and_alignment_guarantees). +Those that aren't guaranteed may change in future versions of Go (for +example, we've considered changing the alignment of int64 on 32-bit). + +| Type | 64-bit | | 32-bit | | +| --- | --- | --- | --- | --- | +| | Size | Align | Size | Align | +| bool, uint8, int8 | 1 | 1 | 1 | 1 | +| uint16, int16 | 2 | 2 | 2 | 2 | +| uint32, int32 | 4 | 4 | 4 | 4 | +| uint64, int64 | 8 | 8 | 8 | 4 | +| int, uint | 8 | 8 | 4 | 4 | +| float32 | 4 | 4 | 4 | 4 | +| float64 | 8 | 8 | 8 | 4 | +| complex64 | 8 | 4 | 8 | 4 | +| complex128 | 16 | 8 | 16 | 4 | +| uintptr, *T, unsafe.Pointer | 8 | 8 | 4 | 4 | + +The types `byte` and `rune` are aliases for `uint8` and `int32`, +respectively, and hence have the same size and alignment as these +types. + +The layout of `map`, `chan`, and `func` types is equivalent to *T. + +To describe the layout of the remaining composite types, we first +define the layout of a *sequence* S of N fields with types +t1, t2, ..., tN. +We define the byte offset at which each field begins relative to a +base address of 0, as well as the size and alignment of the sequence +as follows: + +``` +offset(S, i) = 0 if i = 1 + = align(offset(S, i-1) + sizeof(t_(i-1)), alignof(t_i)) +alignof(S) = 1 if N = 0 + = max(alignof(t_i) | 1 <= i <= N) +sizeof(S) = 0 if N = 0 + = align(offset(S, N) + sizeof(t_N), alignof(S)) +``` + +Where sizeof(T) and alignof(T) are the size and alignment of type T, +respectively, and align(x, y) rounds x up to a multiple of y. + +The `interface{}` type is a sequence of 1. a pointer to the runtime type +description for the interface's dynamic type and 2. an `unsafe.Pointer` +data field. +Any other interface type (besides the empty interface) is a sequence +of 1. a pointer to the runtime "itab" that gives the method pointers and +the type of the data field and 2. an `unsafe.Pointer` data field. +An interface can be "direct" or "indirect" depending on the dynamic +type: a direct interface stores the value directly in the data field, +and an indirect interface stores a pointer to the value in the data +field. +An interface can only be direct if the value consists of a single +pointer word. + +An array type `[N]T` is a sequence of N fields of type T. + +The slice type `[]T` is a sequence of a `*[cap]T` pointer to the slice +backing store, an `int` giving the `len` of the slice, and an `int` +giving the `cap` of the slice. + +The `string` type is a sequence of a `*[len]byte` pointer to the +string backing store, and an `int` giving the `len` of the string. + +A struct type `struct { f1 t1; ...; fM tM }` is laid out as the +sequence t1, ..., tM, tP, where tP is either: + +- Type `byte` if sizeof(tM) = 0 and any of sizeof(t*i*) ≠ 0. +- Empty (size 0 and align 1) otherwise. + +The padding byte prevents creating a past-the-end pointer by taking +the address of the final, empty fN field. + +Note that user-written assembly code should generally not depend on Go +type layout and should instead use the constants defined in +[`go_asm.h`](/doc/asm.html#data-offsets). + +## Function call argument and result passing Function calls pass arguments and results using a combination of the stack and machine registers. @@ -45,42 +129,48 @@ reserves spill space on the stack for all register-based arguments (but does not populate this space). The receiver, arguments, and results of function or method F are -assigned to registers using the following algorithm: +assigned to registers or the stack using the following algorithm: -1. Start with the full integer and floating-point register sequences - and an empty stack frame. +1. Let NI and NFP be the length of integer and floating-point register + sequences defined by the architecture. + Let I and FP be 0; these are the indexes of the next integer and + floating-pointer register. + Let S, the type sequence defining the stack frame, be empty. 1. If F is a method, assign F’s receiver. 1. For each argument A of F, assign A. -1. Align the stack frame offset to the architecture’s pointer size. -1. Reset to the full integer and floating-point register sequences - (but do not reset the stack frame). +1. Add a pointer-alignment field to S. This has size 0 and the same + alignment as `uintptr`. +1. Reset I and FP to 0. 1. For each result R of F, assign R. -1. Align the stack frame offset to the architecture’s pointer size. +1. Add a pointer-alignment field to S. 1. For each register-assigned receiver and argument of F, let T be its - type and stack-assign an empty value of type T. - This is the argument's (or receiver's) spill space. -1. Align the stack frame offset to the architecture’s pointer size. + type and add T to the stack sequence S. + This is the argument's (or receiver's) spill space and will be + uninitialized at the call. +1. Add a pointer-alignment field to S. -Assigning a receiver, argument, or result V works as follows: +Assigning a receiver, argument, or result V of underlying type T works +as follows: -1. Register-assign V. -1. If step 1 failed, undo all register and stack assignments it - performed and stack-assign V. +1. Remember I and FP. +1. Try to register-assign V. +1. If step 2 failed, reset I and FP to the values from step 1, add T + to the stack sequence S, and assign V to this field in S. Register-assignment of a value V of underlying type T works as follows: 1. If T is a boolean or integral type that fits in an integer - register, assign V to the next available integer register. + register, assign V to register I and increment I. 1. If T is an integral type that fits in two integer registers, assign - the least significant and most significant halves of V to the next - two available integer registers, respectively. + the least significant and most significant halves of V to registers + I and I+1, respectively, and increment I by 2 1. If T is a floating-point type and can be represented without loss - of precision in a floating-point register, assign V to the next - available floating-point register. + of precision in a floating-point register, assign V to register FP + and increment FP. 1. If T is a complex type, recursively register-assign its real and imaginary parts. 1. If T is a pointer type, map type, channel type, or function type, - assign V to the next available integer register. + assign V to register I and increment I. 1. If T is a string type, interface type, or slice type, recursively register-assign V’s components (2 for strings and interfaces, 3 for slices). @@ -89,22 +179,17 @@ Register-assignment of a value V of underlying type T works as follows: 1. If T is an array type of length 1, recursively register-assign its one element. 1. If T is an array type of length > 1, fail. -1. If there is no available integer or floating-point register - available above, fail. -1. If any recursive assignment above fails, this register-assign fails. - -Stack-assignment of a value V of underlying type T works as follows: - -1. Align the current stack frame offset to T’s alignment. -1. Append V to the stack frame. - -(Note that any non-zero-sized struct type that ends in a zero-sized -field is implicitly padded with 1 byte to prevent past-the-end -pointers. -This applies to all structs, not just those passed as arguments.) - -The following diagram shows what the resulting argument frame looks -like on the stack: +1. If I > NI or FP > NFP, fail. +1. If any recursive assignment above fails, fail. + +The above algorithm produces an assignment of each receiver, argument, +and result to registers or to a field in the stack sequence. +The final stack sequence looks like: stack-assigned receiver, +stack-assigned arguments, pointer-alignment, stack-assigned results, +pointer-alignment, spill space for each register-assigned argument, +pointer-alignment. +The following diagram shows what this stack frame looks like on the +stack, using the typical convention where address 0 is at the bottom: +------------------------------+ | . . . | @@ -121,18 +206,14 @@ like on the stack: | stack-assigned receiver | +------------------------------+ ↓ lower addresses -(Note that, while stack diagrams conventionally have address 0 at the -bottom, if this were expressed as a Go struct the fields would appear -in the opposite order, starting with the stack-assigned receiver.) - To perform a call, the caller reserves space starting at the lowest address in its stack frame for the call stack frame, stores arguments -in the registers and argument stack slots determined by the above +in the registers and argument stack fields determined by the above algorithm, and performs the call. -At the time of a call, spill slots, result stack slots, and result -registers are assumed to be uninitialized. +At the time of a call, spill space, result stack fields, and result +registers are left uninitialized. Upon return, the callee must have stored results to all result -registers and result stack slots determined by the above algorithm. +registers and result stack fields determined by the above algorithm. There are no callee-save registers, so a call may overwrite any register that doesn’t have a fixed meaning, including argument @@ -140,28 +221,35 @@ registers. ### Example -The function `func f(a1 uint8, a2 [2]uintptr, a3 uint8) (r1 struct { x -uintptr; y [2]uintptr }, r2 string)` has the following argument frame -layout on a 64-bit host with hypothetical integer registers R0–R9: +Consider the function `func f(a1 uint8, a2 [2]uintptr, a3 uint8) (r1 +struct { x uintptr; y [2]uintptr }, r2 string)` on a 64-bit +architecture with hypothetical integer registers R0–R9. + +On entry, `a1` is assigned to `R0`, `a3` is assigned to `R1` and the +stack frame is laid out in the following sequence: + + a2 [2]uintptr + r1.x uintptr + r1.y [2]uintptr + a1Spill uint8 + a2Spill uint8 + _ [6]uint8 // alignment padding + +In the stack frame, only the `a2` field is initialized on entry; the +rest of the frame is left uninitialized. - +-------------------+ 48 - | alignment padding | 42 - | a3 argument spill | 41 - | a1 argument spill | 40 - | r1 result | 16 - | a2 argument | 0 - +-------------------+ - On entry: R0=a1, R1=a3 - On exit: R0=r2.base, R1=r2.len +On exit, `r2.base` is assigned to `R0`, `r2.len` is assigned to `R1`, +and `r1.x` and `r1.y` are initialized in the stack frame. There are several things to note in this example. -First, a2 and r1 are stack-assigned because they contain arrays. +First, `a2` and `r1` are stack-assigned because they contain arrays. The other arguments and results are register-assigned. -Result r2 is decomposed into its components, which are individually +Result `r2` is decomposed into its components, which are individually register-assigned. -On the stack, the stack-assigned arguments appear below the -stack-assigned results, which appear below the argument spill area. -Only arguments, not results, are assigned a spill area. +On the stack, the stack-assigned arguments appear at lower addresses +than the stack-assigned results, which appear at lower addresses than +the argument spill area. +Only arguments, not results, are assigned a spill area on the stack. ### Rationale @@ -196,9 +284,9 @@ kubelet (and even these very little). We make exceptions for 0 and 1-element arrays because these don’t require computed offsets, and 1-element arrays are already decomposed -in the compiler’s SSA. +in the compiler’s SSA representation. -The stack assignment algorithm above is equivalent to Go’s stack-based +The ABI assignment algorithm above is equivalent to Go’s stack-based ABI0 calling convention if there are zero architecture registers. This is intended to ease the transition to the register-based internal ABI and make it easy for the compiler to generate either calling @@ -217,12 +305,13 @@ These slots also act as the home location if these arguments need to be spilled for any other reason, which simplifies traceback printing. There are several options for how to lay out the argument spill space. -We chose to lay out each argument in its type's usual memory layout -but to separate the spill space from the regular argument space. +We chose to lay out each argument according to its type's usual memory +layout but to separate the spill space from the regular argument +space. Using the usual memory layout simplifies the compiler because it already understands this layout. Also, if a function takes the address of a register-assigned argument, -the compiler must spill that argument to memory in its usual in-memory +the compiler must spill that argument to memory in its usual memory layout and it's more convenient to use the argument spill space for this purpose. From 7eaaf28caee0442f2376735ac28de252c7f4baae Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 25 Jan 2021 14:14:10 -0800 Subject: [PATCH 431/474] [dev.regabi] cmd/compile: disallow taking address of SSA'd values Adds some extra validation that the frontend is setting flags like Addrtaken correctly. Change-Id: Iffde83e32ba1c4c917ab8cb3fe410a4f623cf635 Reviewed-on: https://go-review.googlesource.com/c/go/+/286434 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Trust: Matthew Dempsky Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssagen/ssa.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index ecf3294082861..e49a9716fead5 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -434,6 +434,7 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func { // bitmask showing which of the open-coded defers in this function // have been activated. deferBitsTemp := typecheck.TempAt(src.NoXPos, s.curfn, types.Types[types.TUINT8]) + deferBitsTemp.SetAddrtaken(true) s.deferBitsTemp = deferBitsTemp // For this value, AuxInt is initialized to zero by default startDeferBits := s.entryNewValue0(ssa.OpConst8, types.Types[types.TUINT8]) @@ -5086,6 +5087,10 @@ func (s *state) addr(n ir.Node) *ssa.Value { defer s.popLine() } + if s.canSSA(n) { + s.Fatalf("addr of canSSA expression: %+v", n) + } + t := types.NewPtr(n.Type()) linksymOffset := func(lsym *obj.LSym, offset int64) *ssa.Value { v := s.entryNewValue1A(ssa.OpAddr, t, lsym, s.sb) From f7dad5eae43d5feb77c16fbd892a5a24a4d309ae Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 22 Jan 2021 22:53:47 -0500 Subject: [PATCH 432/474] [dev.regabi] cmd/compile: remove leftover code form late call lowering work It's no longer conditional. Change-Id: I697bb0e9ffe9644ec4d2766f7e8be8b82d3b0638 Reviewed-on: https://go-review.googlesource.com/c/go/+/286013 Trust: David Chase Run-TryBot: David Chase TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ssa/compile.go | 1 - src/cmd/compile/internal/ssa/config.go | 8 - src/cmd/compile/internal/ssa/decompose.go | 4 - src/cmd/compile/internal/ssa/expand_calls.go | 3 - src/cmd/compile/internal/ssa/gen/dec64.rules | 8 +- .../compile/internal/ssa/gen/decArgs.rules | 58 ---- .../compile/internal/ssa/gen/decArgsOps.go | 20 -- src/cmd/compile/internal/ssa/rewritedec64.go | 16 +- .../compile/internal/ssa/rewritedecArgs.go | 247 ------------------ src/cmd/compile/internal/ssagen/ssa.go | 211 ++++----------- 10 files changed, 63 insertions(+), 513 deletions(-) delete mode 100644 src/cmd/compile/internal/ssa/gen/decArgs.rules delete mode 100644 src/cmd/compile/internal/ssa/gen/decArgsOps.go delete mode 100644 src/cmd/compile/internal/ssa/rewritedecArgs.go diff --git a/src/cmd/compile/internal/ssa/compile.go b/src/cmd/compile/internal/ssa/compile.go index 63994d1778ed0..c267274366b35 100644 --- a/src/cmd/compile/internal/ssa/compile.go +++ b/src/cmd/compile/internal/ssa/compile.go @@ -431,7 +431,6 @@ var passes = [...]pass{ {name: "early copyelim", fn: copyelim}, {name: "early deadcode", fn: deadcode}, // remove generated dead code to avoid doing pointless work during opt {name: "short circuit", fn: shortcircuit}, - {name: "decompose args", fn: decomposeArgs, required: !go116lateCallExpansion, disabled: go116lateCallExpansion}, // handled by late call lowering {name: "decompose user", fn: decomposeUser, required: true}, {name: "pre-opt deadcode", fn: deadcode}, {name: "opt", fn: opt, required: true}, // NB: some generic rules know the name of the opt pass. TODO: split required rules and optimizing rules diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go index 8dc2ee8213bda..e952c73d9b487 100644 --- a/src/cmd/compile/internal/ssa/config.go +++ b/src/cmd/compile/internal/ssa/config.go @@ -179,14 +179,6 @@ type Frontend interface { MyImportPath() string } -const go116lateCallExpansion = true - -// LateCallExpansionEnabledWithin returns true if late call expansion should be tested -// within compilation of a function/method. -func LateCallExpansionEnabledWithin(f *Func) bool { - return go116lateCallExpansion -} - // NewConfig returns a new configuration object for the given architecture. func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config { c := &Config{arch: arch, Types: types} diff --git a/src/cmd/compile/internal/ssa/decompose.go b/src/cmd/compile/internal/ssa/decompose.go index bf7f1e826b17e..ea988e44f61a6 100644 --- a/src/cmd/compile/internal/ssa/decompose.go +++ b/src/cmd/compile/internal/ssa/decompose.go @@ -219,10 +219,6 @@ func decomposeInterfacePhi(v *Value) { v.AddArg(data) } -func decomposeArgs(f *Func) { - applyRewrite(f, rewriteBlockdecArgs, rewriteValuedecArgs, removeDeadValues) -} - func decomposeUser(f *Func) { for _, b := range f.Blocks { for _, v := range b.Values { diff --git a/src/cmd/compile/internal/ssa/expand_calls.go b/src/cmd/compile/internal/ssa/expand_calls.go index af994d4b5b6e2..d89d74370376c 100644 --- a/src/cmd/compile/internal/ssa/expand_calls.go +++ b/src/cmd/compile/internal/ssa/expand_calls.go @@ -42,9 +42,6 @@ func expandCalls(f *Func) { // With the current ABI, the outputs need to be converted to loads, which will all use the call's // memory output as their input. - if !LateCallExpansionEnabledWithin(f) { - return - } debug := f.pass.debug > 0 if debug { diff --git a/src/cmd/compile/internal/ssa/gen/dec64.rules b/src/cmd/compile/internal/ssa/gen/dec64.rules index 9297ed8d2e0a0..b0f10d0a0f4e9 100644 --- a/src/cmd/compile/internal/ssa/gen/dec64.rules +++ b/src/cmd/compile/internal/ssa/gen/dec64.rules @@ -42,20 +42,20 @@ (Store {hi.Type} dst hi mem)) // These are not enabled during decomposeBuiltin if late call expansion, but they are always enabled for softFloat -(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin") => +(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin") => (Int64Make (Arg {n} [off+4]) (Arg {n} [off])) -(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin") => +(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin") => (Int64Make (Arg {n} [off+4]) (Arg {n} [off])) -(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin") => +(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin") => (Int64Make (Arg {n} [off]) (Arg {n} [off+4])) -(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin") => +(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin") => (Int64Make (Arg {n} [off]) (Arg {n} [off+4])) diff --git a/src/cmd/compile/internal/ssa/gen/decArgs.rules b/src/cmd/compile/internal/ssa/gen/decArgs.rules deleted file mode 100644 index 1c9a0bb23de24..0000000000000 --- a/src/cmd/compile/internal/ssa/gen/decArgs.rules +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Decompose compound argument values -// Do this early to simplify tracking names for debugging. - -(Arg {n} [off]) && v.Type.IsString() => - (StringMake - (Arg {n} [off]) - (Arg {n} [off+int32(config.PtrSize)])) - -(Arg {n} [off]) && v.Type.IsSlice() => - (SliceMake - (Arg {n} [off]) - (Arg {n} [off+int32(config.PtrSize)]) - (Arg {n} [off+2*int32(config.PtrSize)])) - -(Arg {n} [off]) && v.Type.IsInterface() => - (IMake - (Arg {n} [off]) - (Arg {n} [off+int32(config.PtrSize)])) - -(Arg {n} [off]) && v.Type.IsComplex() && v.Type.Size() == 16 => - (ComplexMake - (Arg {n} [off]) - (Arg {n} [off+8])) - -(Arg {n} [off]) && v.Type.IsComplex() && v.Type.Size() == 8 => - (ComplexMake - (Arg {n} [off]) - (Arg {n} [off+4])) - -(Arg ) && t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t) => - (StructMake0) -(Arg {n} [off]) && t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t) => - (StructMake1 - (Arg {n} [off+int32(t.FieldOff(0))])) -(Arg {n} [off]) && t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t) => - (StructMake2 - (Arg {n} [off+int32(t.FieldOff(0))]) - (Arg {n} [off+int32(t.FieldOff(1))])) -(Arg {n} [off]) && t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t) => - (StructMake3 - (Arg {n} [off+int32(t.FieldOff(0))]) - (Arg {n} [off+int32(t.FieldOff(1))]) - (Arg {n} [off+int32(t.FieldOff(2))])) -(Arg {n} [off]) && t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t) => - (StructMake4 - (Arg {n} [off+int32(t.FieldOff(0))]) - (Arg {n} [off+int32(t.FieldOff(1))]) - (Arg {n} [off+int32(t.FieldOff(2))]) - (Arg {n} [off+int32(t.FieldOff(3))])) - -(Arg ) && t.IsArray() && t.NumElem() == 0 => - (ArrayMake0) -(Arg {n} [off]) && t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t) => - (ArrayMake1 (Arg {n} [off])) diff --git a/src/cmd/compile/internal/ssa/gen/decArgsOps.go b/src/cmd/compile/internal/ssa/gen/decArgsOps.go deleted file mode 100644 index b73d9d3976102..0000000000000 --- a/src/cmd/compile/internal/ssa/gen/decArgsOps.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -var decArgsOps = []opData{} - -var decArgsBlocks = []blockData{} - -func init() { - archs = append(archs, arch{ - name: "decArgs", - ops: decArgsOps, - blocks: decArgsBlocks, - generic: true, - }) -} diff --git a/src/cmd/compile/internal/ssa/rewritedec64.go b/src/cmd/compile/internal/ssa/rewritedec64.go index c49bc8043e7f2..60b727f45fdcb 100644 --- a/src/cmd/compile/internal/ssa/rewritedec64.go +++ b/src/cmd/compile/internal/ssa/rewritedec64.go @@ -184,12 +184,12 @@ func rewriteValuedec64_OpArg(v *Value) bool { config := b.Func.Config typ := &b.Func.Config.Types // match: (Arg {n} [off]) - // cond: is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin") + // cond: is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin") // result: (Int64Make (Arg {n} [off+4]) (Arg {n} [off])) for { off := auxIntToInt32(v.AuxInt) n := auxToSym(v.Aux) - if !(is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin")) { + if !(is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")) { break } v.reset(OpInt64Make) @@ -203,12 +203,12 @@ func rewriteValuedec64_OpArg(v *Value) bool { return true } // match: (Arg {n} [off]) - // cond: is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin") + // cond: is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin") // result: (Int64Make (Arg {n} [off+4]) (Arg {n} [off])) for { off := auxIntToInt32(v.AuxInt) n := auxToSym(v.Aux) - if !(is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin")) { + if !(is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")) { break } v.reset(OpInt64Make) @@ -222,12 +222,12 @@ func rewriteValuedec64_OpArg(v *Value) bool { return true } // match: (Arg {n} [off]) - // cond: is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin") + // cond: is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin") // result: (Int64Make (Arg {n} [off]) (Arg {n} [off+4])) for { off := auxIntToInt32(v.AuxInt) n := auxToSym(v.Aux) - if !(is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin")) { + if !(is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")) { break } v.reset(OpInt64Make) @@ -241,12 +241,12 @@ func rewriteValuedec64_OpArg(v *Value) bool { return true } // match: (Arg {n} [off]) - // cond: is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin") + // cond: is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin") // result: (Int64Make (Arg {n} [off]) (Arg {n} [off+4])) for { off := auxIntToInt32(v.AuxInt) n := auxToSym(v.Aux) - if !(is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin")) { + if !(is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")) { break } v.reset(OpInt64Make) diff --git a/src/cmd/compile/internal/ssa/rewritedecArgs.go b/src/cmd/compile/internal/ssa/rewritedecArgs.go deleted file mode 100644 index 23ff417eee930..0000000000000 --- a/src/cmd/compile/internal/ssa/rewritedecArgs.go +++ /dev/null @@ -1,247 +0,0 @@ -// Code generated from gen/decArgs.rules; DO NOT EDIT. -// generated with: cd gen; go run *.go - -package ssa - -func rewriteValuedecArgs(v *Value) bool { - switch v.Op { - case OpArg: - return rewriteValuedecArgs_OpArg(v) - } - return false -} -func rewriteValuedecArgs_OpArg(v *Value) bool { - b := v.Block - config := b.Func.Config - fe := b.Func.fe - typ := &b.Func.Config.Types - // match: (Arg {n} [off]) - // cond: v.Type.IsString() - // result: (StringMake (Arg {n} [off]) (Arg {n} [off+int32(config.PtrSize)])) - for { - off := auxIntToInt32(v.AuxInt) - n := auxToSym(v.Aux) - if !(v.Type.IsString()) { - break - } - v.reset(OpStringMake) - v0 := b.NewValue0(v.Pos, OpArg, typ.BytePtr) - v0.AuxInt = int32ToAuxInt(off) - v0.Aux = symToAux(n) - v1 := b.NewValue0(v.Pos, OpArg, typ.Int) - v1.AuxInt = int32ToAuxInt(off + int32(config.PtrSize)) - v1.Aux = symToAux(n) - v.AddArg2(v0, v1) - return true - } - // match: (Arg {n} [off]) - // cond: v.Type.IsSlice() - // result: (SliceMake (Arg {n} [off]) (Arg {n} [off+int32(config.PtrSize)]) (Arg {n} [off+2*int32(config.PtrSize)])) - for { - off := auxIntToInt32(v.AuxInt) - n := auxToSym(v.Aux) - if !(v.Type.IsSlice()) { - break - } - v.reset(OpSliceMake) - v0 := b.NewValue0(v.Pos, OpArg, v.Type.Elem().PtrTo()) - v0.AuxInt = int32ToAuxInt(off) - v0.Aux = symToAux(n) - v1 := b.NewValue0(v.Pos, OpArg, typ.Int) - v1.AuxInt = int32ToAuxInt(off + int32(config.PtrSize)) - v1.Aux = symToAux(n) - v2 := b.NewValue0(v.Pos, OpArg, typ.Int) - v2.AuxInt = int32ToAuxInt(off + 2*int32(config.PtrSize)) - v2.Aux = symToAux(n) - v.AddArg3(v0, v1, v2) - return true - } - // match: (Arg {n} [off]) - // cond: v.Type.IsInterface() - // result: (IMake (Arg {n} [off]) (Arg {n} [off+int32(config.PtrSize)])) - for { - off := auxIntToInt32(v.AuxInt) - n := auxToSym(v.Aux) - if !(v.Type.IsInterface()) { - break - } - v.reset(OpIMake) - v0 := b.NewValue0(v.Pos, OpArg, typ.Uintptr) - v0.AuxInt = int32ToAuxInt(off) - v0.Aux = symToAux(n) - v1 := b.NewValue0(v.Pos, OpArg, typ.BytePtr) - v1.AuxInt = int32ToAuxInt(off + int32(config.PtrSize)) - v1.Aux = symToAux(n) - v.AddArg2(v0, v1) - return true - } - // match: (Arg {n} [off]) - // cond: v.Type.IsComplex() && v.Type.Size() == 16 - // result: (ComplexMake (Arg {n} [off]) (Arg {n} [off+8])) - for { - off := auxIntToInt32(v.AuxInt) - n := auxToSym(v.Aux) - if !(v.Type.IsComplex() && v.Type.Size() == 16) { - break - } - v.reset(OpComplexMake) - v0 := b.NewValue0(v.Pos, OpArg, typ.Float64) - v0.AuxInt = int32ToAuxInt(off) - v0.Aux = symToAux(n) - v1 := b.NewValue0(v.Pos, OpArg, typ.Float64) - v1.AuxInt = int32ToAuxInt(off + 8) - v1.Aux = symToAux(n) - v.AddArg2(v0, v1) - return true - } - // match: (Arg {n} [off]) - // cond: v.Type.IsComplex() && v.Type.Size() == 8 - // result: (ComplexMake (Arg {n} [off]) (Arg {n} [off+4])) - for { - off := auxIntToInt32(v.AuxInt) - n := auxToSym(v.Aux) - if !(v.Type.IsComplex() && v.Type.Size() == 8) { - break - } - v.reset(OpComplexMake) - v0 := b.NewValue0(v.Pos, OpArg, typ.Float32) - v0.AuxInt = int32ToAuxInt(off) - v0.Aux = symToAux(n) - v1 := b.NewValue0(v.Pos, OpArg, typ.Float32) - v1.AuxInt = int32ToAuxInt(off + 4) - v1.Aux = symToAux(n) - v.AddArg2(v0, v1) - return true - } - // match: (Arg ) - // cond: t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t) - // result: (StructMake0) - for { - t := v.Type - if !(t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t)) { - break - } - v.reset(OpStructMake0) - return true - } - // match: (Arg {n} [off]) - // cond: t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t) - // result: (StructMake1 (Arg {n} [off+int32(t.FieldOff(0))])) - for { - t := v.Type - off := auxIntToInt32(v.AuxInt) - n := auxToSym(v.Aux) - if !(t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t)) { - break - } - v.reset(OpStructMake1) - v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0)) - v0.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(0))) - v0.Aux = symToAux(n) - v.AddArg(v0) - return true - } - // match: (Arg {n} [off]) - // cond: t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t) - // result: (StructMake2 (Arg {n} [off+int32(t.FieldOff(0))]) (Arg {n} [off+int32(t.FieldOff(1))])) - for { - t := v.Type - off := auxIntToInt32(v.AuxInt) - n := auxToSym(v.Aux) - if !(t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t)) { - break - } - v.reset(OpStructMake2) - v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0)) - v0.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(0))) - v0.Aux = symToAux(n) - v1 := b.NewValue0(v.Pos, OpArg, t.FieldType(1)) - v1.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(1))) - v1.Aux = symToAux(n) - v.AddArg2(v0, v1) - return true - } - // match: (Arg {n} [off]) - // cond: t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t) - // result: (StructMake3 (Arg {n} [off+int32(t.FieldOff(0))]) (Arg {n} [off+int32(t.FieldOff(1))]) (Arg {n} [off+int32(t.FieldOff(2))])) - for { - t := v.Type - off := auxIntToInt32(v.AuxInt) - n := auxToSym(v.Aux) - if !(t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t)) { - break - } - v.reset(OpStructMake3) - v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0)) - v0.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(0))) - v0.Aux = symToAux(n) - v1 := b.NewValue0(v.Pos, OpArg, t.FieldType(1)) - v1.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(1))) - v1.Aux = symToAux(n) - v2 := b.NewValue0(v.Pos, OpArg, t.FieldType(2)) - v2.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(2))) - v2.Aux = symToAux(n) - v.AddArg3(v0, v1, v2) - return true - } - // match: (Arg {n} [off]) - // cond: t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t) - // result: (StructMake4 (Arg {n} [off+int32(t.FieldOff(0))]) (Arg {n} [off+int32(t.FieldOff(1))]) (Arg {n} [off+int32(t.FieldOff(2))]) (Arg {n} [off+int32(t.FieldOff(3))])) - for { - t := v.Type - off := auxIntToInt32(v.AuxInt) - n := auxToSym(v.Aux) - if !(t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t)) { - break - } - v.reset(OpStructMake4) - v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0)) - v0.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(0))) - v0.Aux = symToAux(n) - v1 := b.NewValue0(v.Pos, OpArg, t.FieldType(1)) - v1.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(1))) - v1.Aux = symToAux(n) - v2 := b.NewValue0(v.Pos, OpArg, t.FieldType(2)) - v2.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(2))) - v2.Aux = symToAux(n) - v3 := b.NewValue0(v.Pos, OpArg, t.FieldType(3)) - v3.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(3))) - v3.Aux = symToAux(n) - v.AddArg4(v0, v1, v2, v3) - return true - } - // match: (Arg ) - // cond: t.IsArray() && t.NumElem() == 0 - // result: (ArrayMake0) - for { - t := v.Type - if !(t.IsArray() && t.NumElem() == 0) { - break - } - v.reset(OpArrayMake0) - return true - } - // match: (Arg {n} [off]) - // cond: t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t) - // result: (ArrayMake1 (Arg {n} [off])) - for { - t := v.Type - off := auxIntToInt32(v.AuxInt) - n := auxToSym(v.Aux) - if !(t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t)) { - break - } - v.reset(OpArrayMake1) - v0 := b.NewValue0(v.Pos, OpArg, t.Elem()) - v0.AuxInt = int32ToAuxInt(off) - v0.Aux = symToAux(n) - v.AddArg(v0) - return true - } - return false -} -func rewriteBlockdecArgs(b *Block) bool { - switch b.Kind { - } - return false -} diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index e49a9716fead5..99e081264507b 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -1803,7 +1803,7 @@ const shareDeferExits = false // It returns a BlockRet block that ends the control flow. Its control value // will be set to the final memory state. func (s *state) exit() *ssa.Block { - lateResultLowering := s.f.DebugTest && ssa.LateCallExpansionEnabledWithin(s.f) + lateResultLowering := s.f.DebugTest if s.hasdefer { if s.hasOpenDefers { if shareDeferExits && s.lastDeferExit != nil && len(s.openDefers) == s.lastDeferCount { @@ -4628,7 +4628,6 @@ func (s *state) openDeferExit() { s.lastDeferExit = deferExit s.lastDeferCount = len(s.openDefers) zeroval := s.constInt8(types.Types[types.TUINT8], 0) - testLateExpansion := ssa.LateCallExpansionEnabledWithin(s.f) // Test for and run defers in reverse order for i := len(s.openDefers) - 1; i >= 0; i-- { r := s.openDefers[i] @@ -4670,35 +4669,19 @@ func (s *state) openDeferExit() { if r.rcvr != nil { // rcvr in case of OCALLINTER v := s.load(r.rcvr.Type.Elem(), r.rcvr) - addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart) ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(argStart)}) - if testLateExpansion { - callArgs = append(callArgs, v) - } else { - s.store(types.Types[types.TUINTPTR], addr, v) - } + callArgs = append(callArgs, v) } for j, argAddrVal := range r.argVals { f := getParam(r.n, j) - pt := types.NewPtr(f.Type) ACArgs = append(ACArgs, ssa.Param{Type: f.Type, Offset: int32(argStart + f.Offset)}) - if testLateExpansion { - var a *ssa.Value - if !TypeOK(f.Type) { - a = s.newValue2(ssa.OpDereference, f.Type, argAddrVal, s.mem()) - } else { - a = s.load(f.Type, argAddrVal) - } - callArgs = append(callArgs, a) + var a *ssa.Value + if !TypeOK(f.Type) { + a = s.newValue2(ssa.OpDereference, f.Type, argAddrVal, s.mem()) } else { - addr := s.constOffPtrSP(pt, argStart+f.Offset) - if !TypeOK(f.Type) { - s.move(f.Type, addr, argAddrVal) - } else { - argVal := s.load(f.Type, argAddrVal) - s.storeType(f.Type, addr, argVal, 0, false) - } + a = s.load(f.Type, argAddrVal) } + callArgs = append(callArgs, a) } var call *ssa.Value if r.closure != nil { @@ -4706,30 +4689,17 @@ func (s *state) openDeferExit() { s.maybeNilCheckClosure(v, callDefer) codeptr := s.rawLoad(types.Types[types.TUINTPTR], v) aux := ssa.ClosureAuxCall(ACArgs, ACResults) - if testLateExpansion { - callArgs = append(callArgs, s.mem()) - call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, v) - call.AddArgs(callArgs...) - } else { - call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, aux, codeptr, v, s.mem()) - } + callArgs = append(callArgs, s.mem()) + call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, v) + call.AddArgs(callArgs...) } else { aux := ssa.StaticAuxCall(fn.(*ir.Name).Linksym(), ACArgs, ACResults) - if testLateExpansion { - callArgs = append(callArgs, s.mem()) - call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) - call.AddArgs(callArgs...) - } else { - // Do a static call if the original call was a static function or method - call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem()) - } + callArgs = append(callArgs, s.mem()) + call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) + call.AddArgs(callArgs...) } call.AuxInt = stksize - if testLateExpansion { - s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call) - } else { - s.vars[memVar] = call - } + s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call) // Make sure that the stack slots with pointers are kept live // through the call (which is a pre-emption point). Also, we will // use the first call of the last defer exit to compute liveness @@ -4782,12 +4752,10 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val } } - testLateExpansion := false inRegisters := false switch n.Op() { case ir.OCALLFUNC: - testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f) if k == callNormal && fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC { fn := fn.(*ir.Name) callee = fn @@ -4813,7 +4781,6 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op()) } fn := fn.(*ir.SelectorExpr) - testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f) var iclosure *ssa.Value iclosure, rcvr = s.getClosureAndRcvr(fn) if k == callNormal { @@ -4827,7 +4794,6 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val var call *ssa.Value if k == callDeferStack { - testLateExpansion = ssa.LateCallExpansionEnabledWithin(s.f) // Make a defer struct d on the stack. t := deferstruct(stksize) d := typecheck.TempAt(n.Pos(), s.curfn, t) @@ -4878,15 +4844,9 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val // Call runtime.deferprocStack with pointer to _defer record. ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(base.Ctxt.FixedFrameSize())}) aux := ssa.StaticAuxCall(ir.Syms.DeferprocStack, ACArgs, ACResults) - if testLateExpansion { - callArgs = append(callArgs, addr, s.mem()) - call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) - call.AddArgs(callArgs...) - } else { - arg0 := s.constOffPtrSP(types.Types[types.TUINTPTR], base.Ctxt.FixedFrameSize()) - s.store(types.Types[types.TUINTPTR], arg0, addr) - call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem()) - } + callArgs = append(callArgs, addr, s.mem()) + call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) + call.AddArgs(callArgs...) if stksize < int64(types.PtrSize) { // We need room for both the call to deferprocStack and the call to // the deferred function. @@ -4903,32 +4863,17 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val // Write argsize and closure (args to newproc/deferproc). argsize := s.constInt32(types.Types[types.TUINT32], int32(stksize)) ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINT32], Offset: int32(argStart)}) - if testLateExpansion { - callArgs = append(callArgs, argsize) - } else { - addr := s.constOffPtrSP(s.f.Config.Types.UInt32Ptr, argStart) - s.store(types.Types[types.TUINT32], addr, argsize) - } + callArgs = append(callArgs, argsize) ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(argStart) + int32(types.PtrSize)}) - if testLateExpansion { - callArgs = append(callArgs, closure) - } else { - addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(types.PtrSize)) - s.store(types.Types[types.TUINTPTR], addr, closure) - } + callArgs = append(callArgs, closure) stksize += 2 * int64(types.PtrSize) argStart += 2 * int64(types.PtrSize) } // Set receiver (for interface calls). if rcvr != nil { - addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart) ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(argStart)}) - if testLateExpansion { - callArgs = append(callArgs, rcvr) - } else { - s.store(types.Types[types.TUINTPTR], addr, rcvr) - } + callArgs = append(callArgs, rcvr) } // Write args. @@ -4939,7 +4884,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val } for i, n := range args { f := t.Params().Field(i) - ACArg, arg := s.putArg(n, f.Type, argStart+f.Offset, testLateExpansion) + ACArg, arg := s.putArg(n, f.Type, argStart+f.Offset) ACArgs = append(ACArgs, ACArg) callArgs = append(callArgs, arg) } @@ -4950,20 +4895,12 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val switch { case k == callDefer: aux := ssa.StaticAuxCall(ir.Syms.Deferproc, ACArgs, ACResults) - if testLateExpansion { - call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) - call.AddArgs(callArgs...) - } else { - call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem()) - } + call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) + call.AddArgs(callArgs...) case k == callGo: aux := ssa.StaticAuxCall(ir.Syms.Newproc, ACArgs, ACResults) - if testLateExpansion { - call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) - call.AddArgs(callArgs...) - } else { - call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem()) - } + call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) + call.AddArgs(callArgs...) case closure != nil: // rawLoad because loading the code pointer from a // closure is always safe, but IsSanitizerSafeAddr @@ -4971,40 +4908,24 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val // critical that we not clobber any arguments already // stored onto the stack. codeptr = s.rawLoad(types.Types[types.TUINTPTR], closure) - if testLateExpansion { - aux := ssa.ClosureAuxCall(ACArgs, ACResults) - call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, closure) - call.AddArgs(callArgs...) - } else { - call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, ssa.ClosureAuxCall(ACArgs, ACResults), codeptr, closure, s.mem()) - } + aux := ssa.ClosureAuxCall(ACArgs, ACResults) + call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, closure) + call.AddArgs(callArgs...) case codeptr != nil: - if testLateExpansion { - aux := ssa.InterfaceAuxCall(ACArgs, ACResults) - call = s.newValue1A(ssa.OpInterLECall, aux.LateExpansionResultType(), aux, codeptr) - call.AddArgs(callArgs...) - } else { - call = s.newValue2A(ssa.OpInterCall, types.TypeMem, ssa.InterfaceAuxCall(ACArgs, ACResults), codeptr, s.mem()) - } + aux := ssa.InterfaceAuxCall(ACArgs, ACResults) + call = s.newValue1A(ssa.OpInterLECall, aux.LateExpansionResultType(), aux, codeptr) + call.AddArgs(callArgs...) case callee != nil: - if testLateExpansion { - aux := ssa.StaticAuxCall(callTargetLSym(callee, s.curfn.LSym), ACArgs, ACResults) - call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) - call.AddArgs(callArgs...) - } else { - call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(callTargetLSym(callee, s.curfn.LSym), ACArgs, ACResults), s.mem()) - } + aux := ssa.StaticAuxCall(callTargetLSym(callee, s.curfn.LSym), ACArgs, ACResults) + call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) + call.AddArgs(callArgs...) default: s.Fatalf("bad call type %v %v", n.Op(), n) } call.AuxInt = stksize // Call operations carry the argsize of the callee along with them } - if testLateExpansion { - s.prevCall = call - s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call) - } else { - s.vars[memVar] = call - } + s.prevCall = call + s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call) // Insert OVARLIVE nodes for _, name := range n.KeepAlive { s.stmt(ir.NewUnaryExpr(n.Pos(), ir.OVARLIVE, name)) @@ -5033,16 +4954,10 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val fp := res.Field(0) if returnResultAddr { pt := types.NewPtr(fp.Type) - if testLateExpansion { - return s.newValue1I(ssa.OpSelectNAddr, pt, 0, call) - } - return s.constOffPtrSP(pt, fp.Offset+base.Ctxt.FixedFrameSize()) + return s.newValue1I(ssa.OpSelectNAddr, pt, 0, call) } - if testLateExpansion { - return s.newValue1I(ssa.OpSelectN, fp.Type, 0, call) - } - return s.load(n.Type(), s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+base.Ctxt.FixedFrameSize())) + return s.newValue1I(ssa.OpSelectN, fp.Type, 0, call) } // maybeNilCheckClosure checks if a nil check of a closure is needed in some @@ -5458,7 +5373,6 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args . s.prevCall = nil // Write args to the stack off := base.Ctxt.FixedFrameSize() - testLateExpansion := ssa.LateCallExpansionEnabledWithin(s.f) var ACArgs []ssa.Param var ACResults []ssa.Param var callArgs []*ssa.Value @@ -5468,12 +5382,7 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args . off = types.Rnd(off, t.Alignment()) size := t.Size() ACArgs = append(ACArgs, ssa.Param{Type: t, Offset: int32(off)}) - if testLateExpansion { - callArgs = append(callArgs, arg) - } else { - ptr := s.constOffPtrSP(t.PtrTo(), off) - s.store(t, ptr, arg) - } + callArgs = append(callArgs, arg) off += size } off = types.Rnd(off, int64(types.RegSize)) @@ -5489,15 +5398,10 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args . // Issue call var call *ssa.Value aux := ssa.StaticAuxCall(fn, ACArgs, ACResults) - if testLateExpansion { callArgs = append(callArgs, s.mem()) call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) call.AddArgs(callArgs...) s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call) - } else { - call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem()) - s.vars[memVar] = call - } if !returns { // Finish block @@ -5513,24 +5417,15 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args . // Load results res := make([]*ssa.Value, len(results)) - if testLateExpansion { - for i, t := range results { - off = types.Rnd(off, t.Alignment()) - if TypeOK(t) { - res[i] = s.newValue1I(ssa.OpSelectN, t, int64(i), call) - } else { - addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), int64(i), call) - res[i] = s.rawLoad(t, addr) - } - off += t.Size() - } - } else { - for i, t := range results { - off = types.Rnd(off, t.Alignment()) - ptr := s.constOffPtrSP(types.NewPtr(t), off) - res[i] = s.load(t, ptr) - off += t.Size() + for i, t := range results { + off = types.Rnd(off, t.Alignment()) + if TypeOK(t) { + res[i] = s.newValue1I(ssa.OpSelectN, t, int64(i), call) + } else { + addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), int64(i), call) + res[i] = s.rawLoad(t, addr) } + off += t.Size() } off = types.Rnd(off, int64(types.PtrSize)) @@ -5653,16 +5548,12 @@ func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) { // putArg evaluates n for the purpose of passing it as an argument to a function and returns the corresponding Param for the call. // If forLateExpandedCall is true, it returns the argument value to pass to the call operation. // If forLateExpandedCall is false, then the value is stored at the specified stack offset, and the returned value is nil. -func (s *state) putArg(n ir.Node, t *types.Type, off int64, forLateExpandedCall bool) (ssa.Param, *ssa.Value) { +func (s *state) putArg(n ir.Node, t *types.Type, off int64) (ssa.Param, *ssa.Value) { var a *ssa.Value - if forLateExpandedCall { - if !TypeOK(t) { - a = s.newValue2(ssa.OpDereference, t, s.addr(n), s.mem()) - } else { - a = s.expr(n) - } + if !TypeOK(t) { + a = s.newValue2(ssa.OpDereference, t, s.addr(n), s.mem()) } else { - s.storeArgWithBase(n, t, s.sp, off) + a = s.expr(n) } return ssa.Param{Type: t, Offset: int32(off)}, a } From 9b636feafeecd627a72d95ba1fa637e162143027 Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 26 Jan 2021 14:04:02 -0500 Subject: [PATCH 433/474] [dev.regabi] cmd/compile: missing last patch set for cl286013 Forgot to mail last patch set before committing, repair that. Change-Id: I1ef72d0d7df56e89369e6fb4d6e5690f254e6aa8 Reviewed-on: https://go-review.googlesource.com/c/go/+/286912 Trust: David Chase Run-TryBot: David Chase TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/ssagen/ssa.go | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 99e081264507b..b042c132d542e 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -4689,15 +4689,13 @@ func (s *state) openDeferExit() { s.maybeNilCheckClosure(v, callDefer) codeptr := s.rawLoad(types.Types[types.TUINTPTR], v) aux := ssa.ClosureAuxCall(ACArgs, ACResults) - callArgs = append(callArgs, s.mem()) call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, v) - call.AddArgs(callArgs...) } else { aux := ssa.StaticAuxCall(fn.(*ir.Name).Linksym(), ACArgs, ACResults) - callArgs = append(callArgs, s.mem()) call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) - call.AddArgs(callArgs...) } + callArgs = append(callArgs, s.mem()) + call.AddArgs(callArgs...) call.AuxInt = stksize s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call) // Make sure that the stack slots with pointers are kept live @@ -4896,11 +4894,9 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val case k == callDefer: aux := ssa.StaticAuxCall(ir.Syms.Deferproc, ACArgs, ACResults) call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) - call.AddArgs(callArgs...) case k == callGo: aux := ssa.StaticAuxCall(ir.Syms.Newproc, ACArgs, ACResults) call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) - call.AddArgs(callArgs...) case closure != nil: // rawLoad because loading the code pointer from a // closure is always safe, but IsSanitizerSafeAddr @@ -4910,18 +4906,16 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val codeptr = s.rawLoad(types.Types[types.TUINTPTR], closure) aux := ssa.ClosureAuxCall(ACArgs, ACResults) call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, closure) - call.AddArgs(callArgs...) case codeptr != nil: aux := ssa.InterfaceAuxCall(ACArgs, ACResults) call = s.newValue1A(ssa.OpInterLECall, aux.LateExpansionResultType(), aux, codeptr) - call.AddArgs(callArgs...) case callee != nil: aux := ssa.StaticAuxCall(callTargetLSym(callee, s.curfn.LSym), ACArgs, ACResults) call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) - call.AddArgs(callArgs...) default: s.Fatalf("bad call type %v %v", n.Op(), n) } + call.AddArgs(callArgs...) call.AuxInt = stksize // Call operations carry the argsize of the callee along with them } s.prevCall = call @@ -5398,10 +5392,10 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args . // Issue call var call *ssa.Value aux := ssa.StaticAuxCall(fn, ACArgs, ACResults) - callArgs = append(callArgs, s.mem()) - call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) - call.AddArgs(callArgs...) - s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call) + callArgs = append(callArgs, s.mem()) + call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) + call.AddArgs(callArgs...) + s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call) if !returns { // Finish block @@ -5545,9 +5539,7 @@ func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) { } } -// putArg evaluates n for the purpose of passing it as an argument to a function and returns the corresponding Param for the call. -// If forLateExpandedCall is true, it returns the argument value to pass to the call operation. -// If forLateExpandedCall is false, then the value is stored at the specified stack offset, and the returned value is nil. +// putArg evaluates n for the purpose of passing it as an argument to a function and returns the corresponding Param and value for the call. func (s *state) putArg(n ir.Node, t *types.Type, off int64) (ssa.Param, *ssa.Value) { var a *ssa.Value if !TypeOK(t) { From 667e08ba8ccce4c00b0cde4a777030167295faf9 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 14 Oct 2020 13:05:33 -0400 Subject: [PATCH 434/474] [dev.regabi] cmd/go: Use GOMAXPROCS to limit default build, compile parallelism When people want deterministic/single-process builds, they probably assume that GOMAXPROCS=1 will do that. It currently does not, neither for build parallelism nor for compiler internal parallelism. (Current incantation for that is "go build -p=1 -gcflags=all=-c=1 ... ") This CL makes "GOMAXPROCS=1 go build ..." behave like "go build -p=1 -gcflags=all=-c=1 ... " RELNOTE=yes Change-Id: I9cfe50b7deee7334d2f1057b58385f6c98547b9f Reviewed-on: https://go-review.googlesource.com/c/go/+/284695 Trust: David Chase Run-TryBot: David Chase TryBot-Result: Go Bot Reviewed-by: Jeremy Faller --- src/cmd/go/alldocs.go | 2 +- src/cmd/go/internal/cfg/cfg.go | 24 ++++++++++++------------ src/cmd/go/internal/work/build.go | 2 +- src/cmd/go/internal/work/gc.go | 17 ++++++++++------- 4 files changed, 24 insertions(+), 21 deletions(-) diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go index 49d390297cdc5..da06e831aeeee 100644 --- a/src/cmd/go/alldocs.go +++ b/src/cmd/go/alldocs.go @@ -111,7 +111,7 @@ // -p n // the number of programs, such as build commands or // test binaries, that can be run in parallel. -// The default is the number of CPUs available. +// The default is GOMAXPROCS, normally the number of CPUs available. // -race // enable data race detection. // Supported only on linux/amd64, freebsd/amd64, darwin/amd64, windows/amd64, diff --git a/src/cmd/go/internal/cfg/cfg.go b/src/cmd/go/internal/cfg/cfg.go index c48904eacceab..322247962f895 100644 --- a/src/cmd/go/internal/cfg/cfg.go +++ b/src/cmd/go/internal/cfg/cfg.go @@ -28,18 +28,18 @@ var ( BuildA bool // -a flag BuildBuildmode string // -buildmode flag BuildContext = defaultContext() - BuildMod string // -mod flag - BuildModExplicit bool // whether -mod was set explicitly - BuildModReason string // reason -mod was set, if set by default - BuildI bool // -i flag - BuildLinkshared bool // -linkshared flag - BuildMSan bool // -msan flag - BuildN bool // -n flag - BuildO string // -o flag - BuildP = runtime.NumCPU() // -p flag - BuildPkgdir string // -pkgdir flag - BuildRace bool // -race flag - BuildToolexec []string // -toolexec flag + BuildMod string // -mod flag + BuildModExplicit bool // whether -mod was set explicitly + BuildModReason string // reason -mod was set, if set by default + BuildI bool // -i flag + BuildLinkshared bool // -linkshared flag + BuildMSan bool // -msan flag + BuildN bool // -n flag + BuildO string // -o flag + BuildP = runtime.GOMAXPROCS(0) // -p flag + BuildPkgdir string // -pkgdir flag + BuildRace bool // -race flag + BuildToolexec []string // -toolexec flag BuildToolchainName string BuildToolchainCompiler func() string BuildToolchainLinker func() string diff --git a/src/cmd/go/internal/work/build.go b/src/cmd/go/internal/work/build.go index 780d639c5d9f3..0e7af6d33f570 100644 --- a/src/cmd/go/internal/work/build.go +++ b/src/cmd/go/internal/work/build.go @@ -71,7 +71,7 @@ and test commands: -p n the number of programs, such as build commands or test binaries, that can be run in parallel. - The default is the number of CPUs available. + The default is GOMAXPROCS, normally the number of CPUs available. -race enable data race detection. Supported only on linux/amd64, freebsd/amd64, darwin/amd64, windows/amd64, diff --git a/src/cmd/go/internal/work/gc.go b/src/cmd/go/internal/work/gc.go index 3205fcbffc8d2..2087855b3c04a 100644 --- a/src/cmd/go/internal/work/gc.go +++ b/src/cmd/go/internal/work/gc.go @@ -239,16 +239,19 @@ CheckFlags: // - it has no successor packages to compile (usually package main) // - all paths through the build graph pass through it // - critical path scheduling says it is high priority - // and in such a case, set c to runtime.NumCPU. + // and in such a case, set c to runtime.GOMAXPROCS(0). + // By default this is the same as runtime.NumCPU. // We do this now when p==1. + // To limit parallelism, set GOMAXPROCS below numCPU; this may be useful + // on a low-memory builder, or if a deterministic build order is required. + c := runtime.GOMAXPROCS(0) if cfg.BuildP == 1 { - // No process parallelism. Max out c. - return runtime.NumCPU() + // No process parallelism, do not cap compiler parallelism. + return c } - // Some process parallelism. Set c to min(4, numcpu). - c := 4 - if ncpu := runtime.NumCPU(); ncpu < c { - c = ncpu + // Some process parallelism. Set c to min(4, maxprocs). + if c > 4 { + c = 4 } return c } From aca22bddf231c862a1d6c9d8af8eed804c329d22 Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 26 Jan 2021 19:33:34 -0500 Subject: [PATCH 435/474] [dev.regabi] cmd/compile: remove nested functions from expands_calls.go Replace nested function spaghetti with state object and methods. Still somewhat complex, but a bit more explicit. Change-Id: I21987c8e4be75821faa5a248af05d2095cdfb0d9 Reviewed-on: https://go-review.googlesource.com/c/go/+/287132 Trust: David Chase Run-TryBot: David Chase TryBot-Result: Go Bot Reviewed-by: Cherry Zhang --- src/cmd/compile/internal/ssa/expand_calls.go | 1225 +++++++++--------- 1 file changed, 618 insertions(+), 607 deletions(-) diff --git a/src/cmd/compile/internal/ssa/expand_calls.go b/src/cmd/compile/internal/ssa/expand_calls.go index d89d74370376c..579818e4f3cf7 100644 --- a/src/cmd/compile/internal/ssa/expand_calls.go +++ b/src/cmd/compile/internal/ssa/expand_calls.go @@ -28,655 +28,666 @@ func isBlockMultiValueExit(b *Block) bool { return (b.Kind == BlockRet || b.Kind == BlockRetJmp) && len(b.Controls) > 0 && b.Controls[0].Op == OpMakeResult } -// expandCalls converts LE (Late Expansion) calls that act like they receive value args into a lower-level form -// that is more oriented to a platform's ABI. The SelectN operations that extract results are rewritten into -// more appropriate forms, and any StructMake or ArrayMake inputs are decomposed until non-struct values are -// reached. On the callee side, OpArg nodes are not decomposed until this phase is run. -// TODO results should not be lowered until this phase. -func expandCalls(f *Func) { - // Calls that need lowering have some number of inputs, including a memory input, - // and produce a tuple of (value1, value2, ..., mem) where valueK may or may not be SSA-able. - - // With the current ABI those inputs need to be converted into stores to memory, - // rethreading the call's memory input to the first, and the new call now receiving the last. - - // With the current ABI, the outputs need to be converted to loads, which will all use the call's - // memory output as their input. - debug := f.pass.debug > 0 - - if debug { - fmt.Printf("\nexpandsCalls(%s)\n", f.Name) +// removeTrivialWrapperTypes unwraps layers of +// struct { singleField SomeType } and [1]SomeType +// until a non-wrapper type is reached. This is useful +// for working with assignments to/from interface data +// fields (either second operand to OpIMake or OpIData) +// where the wrapping or type conversion can be elided +// because of type conversions/assertions in source code +// that do not appear in SSA. +func removeTrivialWrapperTypes(t *types.Type) *types.Type { + for { + if t.IsStruct() && t.NumFields() == 1 { + t = t.Field(0).Type + continue + } + if t.IsArray() && t.NumElem() == 1 { + t = t.Elem() + continue + } + break } + return t +} - canSSAType := f.fe.CanSSA - regSize := f.Config.RegSize - sp, _ := f.spSb() - typ := &f.Config.Types - ptrSize := f.Config.PtrSize +type expandState struct { + f *Func + debug bool + canSSAType func(*types.Type) bool + regSize int64 + sp *Value + typs *Types + ptrSize int64 + hiOffset int64 + lowOffset int64 + namedSelects map[*Value][]namedVal + sdom SparseTree + common map[selKey]*Value + offsets map[offsetKey]*Value +} - // For 32-bit, need to deal with decomposition of 64-bit integers, which depends on endianness. - var hiOffset, lowOffset int64 - if f.Config.BigEndian { - lowOffset = 4 - } else { - hiOffset = 4 +// intPairTypes returns the pair of 32-bit int types needed to encode a 64-bit integer type on a target +// that has no 64-bit integer registers. +func (x *expandState) intPairTypes(et types.Kind) (tHi, tLo *types.Type) { + tHi = x.typs.UInt32 + if et == types.TINT64 { + tHi = x.typs.Int32 } + tLo = x.typs.UInt32 + return +} - namedSelects := make(map[*Value][]namedVal) - - sdom := f.Sdom() - - common := make(map[selKey]*Value) - - // intPairTypes returns the pair of 32-bit int types needed to encode a 64-bit integer type on a target - // that has no 64-bit integer registers. - intPairTypes := func(et types.Kind) (tHi, tLo *types.Type) { - tHi = typ.UInt32 - if et == types.TINT64 { - tHi = typ.Int32 - } - tLo = typ.UInt32 - return +// isAlreadyExpandedAggregateType returns whether a type is an SSA-able "aggregate" (multiple register) type +// that was expanded in an earlier phase (currently, expand_calls is intended to run after decomposeBuiltin, +// so this is all aggregate types -- small struct and array, complex, interface, string, slice, and 64-bit +// integer on 32-bit). +func (x *expandState) isAlreadyExpandedAggregateType(t *types.Type) bool { + if !x.canSSAType(t) { + return false } + return t.IsStruct() || t.IsArray() || t.IsComplex() || t.IsInterface() || t.IsString() || t.IsSlice() || + t.Size() > x.regSize && t.IsInteger() +} - // isAlreadyExpandedAggregateType returns whether a type is an SSA-able "aggregate" (multiple register) type - // that was expanded in an earlier phase (currently, expand_calls is intended to run after decomposeBuiltin, - // so this is all aggregate types -- small struct and array, complex, interface, string, slice, and 64-bit - // integer on 32-bit). - isAlreadyExpandedAggregateType := func(t *types.Type) bool { - if !canSSAType(t) { - return false - } - return t.IsStruct() || t.IsArray() || t.IsComplex() || t.IsInterface() || t.IsString() || t.IsSlice() || - t.Size() > regSize && t.IsInteger() +// offsetFrom creates an offset from a pointer, simplifying chained offsets and offsets from SP +// TODO should also optimize offsets from SB? +func (x *expandState) offsetFrom(from *Value, offset int64, pt *types.Type) *Value { + if offset == 0 && from.Type == pt { // this is not actually likely + return from } - - offsets := make(map[offsetKey]*Value) - - // offsetFrom creates an offset from a pointer, simplifying chained offsets and offsets from SP - // TODO should also optimize offsets from SB? - offsetFrom := func(from *Value, offset int64, pt *types.Type) *Value { - if offset == 0 && from.Type == pt { // this is not actually likely - return from - } - // Simplify, canonicalize - for from.Op == OpOffPtr { - offset += from.AuxInt - from = from.Args[0] - } - if from == sp { - return f.ConstOffPtrSP(pt, offset, sp) - } - key := offsetKey{from, offset, pt} - v := offsets[key] - if v != nil { - return v - } - v = from.Block.NewValue1I(from.Pos.WithNotStmt(), OpOffPtr, pt, offset, from) - offsets[key] = v + // Simplify, canonicalize + for from.Op == OpOffPtr { + offset += from.AuxInt + from = from.Args[0] + } + if from == x.sp { + return x.f.ConstOffPtrSP(pt, offset, x.sp) + } + key := offsetKey{from, offset, pt} + v := x.offsets[key] + if v != nil { return v } + v = from.Block.NewValue1I(from.Pos.WithNotStmt(), OpOffPtr, pt, offset, from) + x.offsets[key] = v + return v +} - // splitSlots splits one "field" (specified by sfx, offset, and ty) out of the LocalSlots in ls and returns the new LocalSlots this generates. - splitSlots := func(ls []LocalSlot, sfx string, offset int64, ty *types.Type) []LocalSlot { - var locs []LocalSlot - for i := range ls { - locs = append(locs, f.fe.SplitSlot(&ls[i], sfx, offset, ty)) - } - return locs +// splitSlots splits one "field" (specified by sfx, offset, and ty) out of the LocalSlots in ls and returns the new LocalSlots this generates. +func (x *expandState) splitSlots(ls []LocalSlot, sfx string, offset int64, ty *types.Type) []LocalSlot { + var locs []LocalSlot + for i := range ls { + locs = append(locs, x.f.fe.SplitSlot(&ls[i], sfx, offset, ty)) } + return locs +} - // removeTrivialWrapperTypes unwraps layers of - // struct { singleField SomeType } and [1]SomeType - // until a non-wrapper type is reached. This is useful - // for working with assignments to/from interface data - // fields (either second operand to OpIMake or OpIData) - // where the wrapping or type conversion can be elided - // because of type conversions/assertions in source code - // that do not appear in SSA. - removeTrivialWrapperTypes := func(t *types.Type) *types.Type { - for { - if t.IsStruct() && t.NumFields() == 1 { - t = t.Field(0).Type - continue - } - if t.IsArray() && t.NumElem() == 1 { - t = t.Elem() - continue - } - break - } - return t +// Calls that need lowering have some number of inputs, including a memory input, +// and produce a tuple of (value1, value2, ..., mem) where valueK may or may not be SSA-able. + +// With the current ABI those inputs need to be converted into stores to memory, +// rethreading the call's memory input to the first, and the new call now receiving the last. + +// With the current ABI, the outputs need to be converted to loads, which will all use the call's +// memory output as their input. + +// rewriteSelect recursively walks from leaf selector to a root (OpSelectN, OpLoad, OpArg) +// through a chain of Struct/Array/builtin Select operations. If the chain of selectors does not +// end in an expected root, it does nothing (this can happen depending on compiler phase ordering). +// The "leaf" provides the type, the root supplies the container, and the leaf-to-root path +// accumulates the offset. +// It emits the code necessary to implement the leaf select operation that leads to the root. +// +// TODO when registers really arrive, must also decompose anything split across two registers or registers and memory. +func (x *expandState) rewriteSelect(leaf *Value, selector *Value, offset int64) []LocalSlot { + if x.debug { + fmt.Printf("rewriteSelect(%s, %s, %d)\n", leaf.LongString(), selector.LongString(), offset) } - - // Calls that need lowering have some number of inputs, including a memory input, - // and produce a tuple of (value1, value2, ..., mem) where valueK may or may not be SSA-able. - - // With the current ABI those inputs need to be converted into stores to memory, - // rethreading the call's memory input to the first, and the new call now receiving the last. - - // With the current ABI, the outputs need to be converted to loads, which will all use the call's - // memory output as their input. - - // rewriteSelect recursively walks from leaf selector to a root (OpSelectN, OpLoad, OpArg) - // through a chain of Struct/Array/builtin Select operations. If the chain of selectors does not - // end in an expected root, it does nothing (this can happen depending on compiler phase ordering). - // The "leaf" provides the type, the root supplies the container, and the leaf-to-root path - // accumulates the offset. - // It emits the code necessary to implement the leaf select operation that leads to the root. - // - // TODO when registers really arrive, must also decompose anything split across two registers or registers and memory. - var rewriteSelect func(leaf *Value, selector *Value, offset int64) []LocalSlot - rewriteSelect = func(leaf *Value, selector *Value, offset int64) []LocalSlot { - if debug { - fmt.Printf("rewriteSelect(%s, %s, %d)\n", leaf.LongString(), selector.LongString(), offset) - } - var locs []LocalSlot - leafType := leaf.Type - if len(selector.Args) > 0 { - w := selector.Args[0] - if w.Op == OpCopy { - for w.Op == OpCopy { - w = w.Args[0] - } - selector.SetArg(0, w) + var locs []LocalSlot + leafType := leaf.Type + if len(selector.Args) > 0 { + w := selector.Args[0] + if w.Op == OpCopy { + for w.Op == OpCopy { + w = w.Args[0] } + selector.SetArg(0, w) } - switch selector.Op { - case OpArg: - if !isAlreadyExpandedAggregateType(selector.Type) { - if leafType == selector.Type { // OpIData leads us here, sometimes. - leaf.copyOf(selector) - } else { - f.Fatalf("Unexpected OpArg type, selector=%s, leaf=%s\n", selector.LongString(), leaf.LongString()) - } - if debug { - fmt.Printf("\tOpArg, break\n") - } - break - } - switch leaf.Op { - case OpIData, OpStructSelect, OpArraySelect: - leafType = removeTrivialWrapperTypes(leaf.Type) - } - aux := selector.Aux - auxInt := selector.AuxInt + offset - if leaf.Block == selector.Block { - leaf.reset(OpArg) - leaf.Aux = aux - leaf.AuxInt = auxInt - leaf.Type = leafType + } + switch selector.Op { + case OpArg: + if !x.isAlreadyExpandedAggregateType(selector.Type) { + if leafType == selector.Type { // OpIData leads us here, sometimes. + leaf.copyOf(selector) } else { - w := selector.Block.NewValue0IA(leaf.Pos, OpArg, leafType, auxInt, aux) - leaf.copyOf(w) - if debug { - fmt.Printf("\tnew %s\n", w.LongString()) - } + x.f.Fatalf("Unexpected OpArg type, selector=%s, leaf=%s\n", selector.LongString(), leaf.LongString()) } - for _, s := range namedSelects[selector] { - locs = append(locs, f.Names[s.locIndex]) + if x.debug { + fmt.Printf("\tOpArg, break\n") } - - case OpLoad: // We end up here because of IData of immediate structures. - // Failure case: - // (note the failure case is very rare; w/o this case, make.bash and run.bash both pass, as well as - // the hard cases of building {syscall,math,math/cmplx,math/bits,go/constant} on ppc64le and mips-softfloat). - // - // GOSSAFUNC='(*dumper).dump' go build -gcflags=-l -tags=math_big_pure_go cmd/compile/internal/gc - // cmd/compile/internal/gc/dump.go:136:14: internal compiler error: '(*dumper).dump': not lowered: v827, StructSelect PTR PTR - // b2: ← b1 - // v20 (+142) = StaticLECall {AuxCall{reflect.Value.Interface([reflect.Value,0])[interface {},24]}} [40] v8 v1 - // v21 (142) = SelectN [1] v20 - // v22 (142) = SelectN [0] v20 - // b15: ← b8 - // v71 (+143) = IData v22 (v[Nodes]) - // v73 (+146) = StaticLECall <[]*Node,mem> {AuxCall{"".Nodes.Slice([Nodes,0])[[]*Node,8]}} [32] v71 v21 - // - // translates (w/o the "case OpLoad:" above) to: - // - // b2: ← b1 - // v20 (+142) = StaticCall {AuxCall{reflect.Value.Interface([reflect.Value,0])[interface {},24]}} [40] v715 - // v23 (142) = Load <*uintptr> v19 v20 - // v823 (142) = IsNonNil v23 - // v67 (+143) = Load <*[]*Node> v880 v20 - // b15: ← b8 - // v827 (146) = StructSelect <*[]*Node> [0] v67 - // v846 (146) = Store {*[]*Node} v769 v827 v20 - // v73 (+146) = StaticCall {AuxCall{"".Nodes.Slice([Nodes,0])[[]*Node,8]}} [32] v846 - // i.e., the struct select is generated and remains in because it is not applied to an actual structure. - // The OpLoad was created to load the single field of the IData - // This case removes that StructSelect. - if leafType != selector.Type { - f.Fatalf("Unexpected Load as selector, leaf=%s, selector=%s\n", leaf.LongString(), selector.LongString()) - } - leaf.copyOf(selector) - for _, s := range namedSelects[selector] { - locs = append(locs, f.Names[s.locIndex]) + break + } + switch leaf.Op { + case OpIData, OpStructSelect, OpArraySelect: + leafType = removeTrivialWrapperTypes(leaf.Type) + } + aux := selector.Aux + auxInt := selector.AuxInt + offset + if leaf.Block == selector.Block { + leaf.reset(OpArg) + leaf.Aux = aux + leaf.AuxInt = auxInt + leaf.Type = leafType + } else { + w := selector.Block.NewValue0IA(leaf.Pos, OpArg, leafType, auxInt, aux) + leaf.copyOf(w) + if x.debug { + fmt.Printf("\tnew %s\n", w.LongString()) } + } + for _, s := range x.namedSelects[selector] { + locs = append(locs, x.f.Names[s.locIndex]) + } - case OpSelectN: - // TODO these may be duplicated. Should memoize. Intermediate selectors will go dead, no worries there. - call := selector.Args[0] - aux := call.Aux.(*AuxCall) - which := selector.AuxInt - if which == aux.NResults() { // mem is after the results. - // rewrite v as a Copy of call -- the replacement call will produce a mem. - leaf.copyOf(call) - } else { - leafType := removeTrivialWrapperTypes(leaf.Type) - if canSSAType(leafType) { - pt := types.NewPtr(leafType) - off := offsetFrom(sp, offset+aux.OffsetOfResult(which), pt) - // Any selection right out of the arg area/registers has to be same Block as call, use call as mem input. - if leaf.Block == call.Block { - leaf.reset(OpLoad) - leaf.SetArgs2(off, call) - leaf.Type = leafType - } else { - w := call.Block.NewValue2(leaf.Pos, OpLoad, leafType, off, call) - leaf.copyOf(w) - if debug { - fmt.Printf("\tnew %s\n", w.LongString()) - } - } - for _, s := range namedSelects[selector] { - locs = append(locs, f.Names[s.locIndex]) - } + case OpLoad: // We end up here because of IData of immediate structures. + // Failure case: + // (note the failure case is very rare; w/o this case, make.bash and run.bash both pass, as well as + // the hard cases of building {syscall,math,math/cmplx,math/bits,go/constant} on ppc64le and mips-softfloat). + // + // GOSSAFUNC='(*dumper).dump' go build -gcflags=-l -tags=math_big_pure_go cmd/compile/internal/gc + // cmd/compile/internal/gc/dump.go:136:14: internal compiler error: '(*dumper).dump': not lowered: v827, StructSelect PTR PTR + // b2: ← b1 + // v20 (+142) = StaticLECall {AuxCall{reflect.Value.Interface([reflect.Value,0])[interface {},24]}} [40] v8 v1 + // v21 (142) = SelectN [1] v20 + // v22 (142) = SelectN [0] v20 + // b15: ← b8 + // v71 (+143) = IData v22 (v[Nodes]) + // v73 (+146) = StaticLECall <[]*Node,mem> {AuxCall{"".Nodes.Slice([Nodes,0])[[]*Node,8]}} [32] v71 v21 + // + // translates (w/o the "case OpLoad:" above) to: + // + // b2: ← b1 + // v20 (+142) = StaticCall {AuxCall{reflect.Value.Interface([reflect.Value,0])[interface {},24]}} [40] v715 + // v23 (142) = Load <*uintptr> v19 v20 + // v823 (142) = IsNonNil v23 + // v67 (+143) = Load <*[]*Node> v880 v20 + // b15: ← b8 + // v827 (146) = StructSelect <*[]*Node> [0] v67 + // v846 (146) = Store {*[]*Node} v769 v827 v20 + // v73 (+146) = StaticCall {AuxCall{"".Nodes.Slice([Nodes,0])[[]*Node,8]}} [32] v846 + // i.e., the struct select is generated and remains in because it is not applied to an actual structure. + // The OpLoad was created to load the single field of the IData + // This case removes that StructSelect. + if leafType != selector.Type { + x.f.Fatalf("Unexpected Load as selector, leaf=%s, selector=%s\n", leaf.LongString(), selector.LongString()) + } + leaf.copyOf(selector) + for _, s := range x.namedSelects[selector] { + locs = append(locs, x.f.Names[s.locIndex]) + } + + case OpSelectN: + // TODO these may be duplicated. Should memoize. Intermediate selectors will go dead, no worries there. + call := selector.Args[0] + aux := call.Aux.(*AuxCall) + which := selector.AuxInt + if which == aux.NResults() { // mem is after the results. + // rewrite v as a Copy of call -- the replacement call will produce a mem. + leaf.copyOf(call) + } else { + leafType := removeTrivialWrapperTypes(leaf.Type) + if x.canSSAType(leafType) { + pt := types.NewPtr(leafType) + off := x.offsetFrom(x.sp, offset+aux.OffsetOfResult(which), pt) + // Any selection right out of the arg area/registers has to be same Block as call, use call as mem input. + if leaf.Block == call.Block { + leaf.reset(OpLoad) + leaf.SetArgs2(off, call) + leaf.Type = leafType } else { - f.Fatalf("Should not have non-SSA-able OpSelectN, selector=%s", selector.LongString()) + w := call.Block.NewValue2(leaf.Pos, OpLoad, leafType, off, call) + leaf.copyOf(w) + if x.debug { + fmt.Printf("\tnew %s\n", w.LongString()) + } + } + for _, s := range x.namedSelects[selector] { + locs = append(locs, x.f.Names[s.locIndex]) } + } else { + x.f.Fatalf("Should not have non-SSA-able OpSelectN, selector=%s", selector.LongString()) } + } - case OpStructSelect: - w := selector.Args[0] - var ls []LocalSlot - if w.Type.Kind() != types.TSTRUCT { // IData artifact - ls = rewriteSelect(leaf, w, offset) - } else { - ls = rewriteSelect(leaf, w, offset+w.Type.FieldOff(int(selector.AuxInt))) - if w.Op != OpIData { - for _, l := range ls { - locs = append(locs, f.fe.SplitStruct(l, int(selector.AuxInt))) - } + case OpStructSelect: + w := selector.Args[0] + var ls []LocalSlot + if w.Type.Kind() != types.TSTRUCT { // IData artifact + ls = x.rewriteSelect(leaf, w, offset) + } else { + ls = x.rewriteSelect(leaf, w, offset+w.Type.FieldOff(int(selector.AuxInt))) + if w.Op != OpIData { + for _, l := range ls { + locs = append(locs, x.f.fe.SplitStruct(l, int(selector.AuxInt))) } } + } - case OpArraySelect: - w := selector.Args[0] - rewriteSelect(leaf, w, offset+selector.Type.Size()*selector.AuxInt) - - case OpInt64Hi: - w := selector.Args[0] - ls := rewriteSelect(leaf, w, offset+hiOffset) - locs = splitSlots(ls, ".hi", hiOffset, leafType) - - case OpInt64Lo: - w := selector.Args[0] - ls := rewriteSelect(leaf, w, offset+lowOffset) - locs = splitSlots(ls, ".lo", lowOffset, leafType) - - case OpStringPtr: - ls := rewriteSelect(leaf, selector.Args[0], offset) - locs = splitSlots(ls, ".ptr", 0, typ.BytePtr) - - case OpSlicePtr: - w := selector.Args[0] - ls := rewriteSelect(leaf, w, offset) - locs = splitSlots(ls, ".ptr", 0, types.NewPtr(w.Type.Elem())) - - case OpITab: - w := selector.Args[0] - ls := rewriteSelect(leaf, w, offset) - sfx := ".itab" - if w.Type.IsEmptyInterface() { - sfx = ".type" - } - locs = splitSlots(ls, sfx, 0, typ.Uintptr) + case OpArraySelect: + w := selector.Args[0] + x.rewriteSelect(leaf, w, offset+selector.Type.Size()*selector.AuxInt) + + case OpInt64Hi: + w := selector.Args[0] + ls := x.rewriteSelect(leaf, w, offset+x.hiOffset) + locs = x.splitSlots(ls, ".hi", x.hiOffset, leafType) + + case OpInt64Lo: + w := selector.Args[0] + ls := x.rewriteSelect(leaf, w, offset+x.lowOffset) + locs = x.splitSlots(ls, ".lo", x.lowOffset, leafType) + + case OpStringPtr: + ls := x.rewriteSelect(leaf, selector.Args[0], offset) + locs = x.splitSlots(ls, ".ptr", 0, x.typs.BytePtr) + + case OpSlicePtr: + w := selector.Args[0] + ls := x.rewriteSelect(leaf, w, offset) + locs = x.splitSlots(ls, ".ptr", 0, types.NewPtr(w.Type.Elem())) + + case OpITab: + w := selector.Args[0] + ls := x.rewriteSelect(leaf, w, offset) + sfx := ".itab" + if w.Type.IsEmptyInterface() { + sfx = ".type" + } + locs = x.splitSlots(ls, sfx, 0, x.typs.Uintptr) - case OpComplexReal: - ls := rewriteSelect(leaf, selector.Args[0], offset) - locs = splitSlots(ls, ".real", 0, leafType) + case OpComplexReal: + ls := x.rewriteSelect(leaf, selector.Args[0], offset) + locs = x.splitSlots(ls, ".real", 0, leafType) - case OpComplexImag: - ls := rewriteSelect(leaf, selector.Args[0], offset+leafType.Width) // result is FloatNN, width of result is offset of imaginary part. - locs = splitSlots(ls, ".imag", leafType.Width, leafType) + case OpComplexImag: + ls := x.rewriteSelect(leaf, selector.Args[0], offset+leafType.Width) // result is FloatNN, width of result is offset of imaginary part. + locs = x.splitSlots(ls, ".imag", leafType.Width, leafType) - case OpStringLen, OpSliceLen: - ls := rewriteSelect(leaf, selector.Args[0], offset+ptrSize) - locs = splitSlots(ls, ".len", ptrSize, leafType) + case OpStringLen, OpSliceLen: + ls := x.rewriteSelect(leaf, selector.Args[0], offset+x.ptrSize) + locs = x.splitSlots(ls, ".len", x.ptrSize, leafType) - case OpIData: - ls := rewriteSelect(leaf, selector.Args[0], offset+ptrSize) - locs = splitSlots(ls, ".data", ptrSize, leafType) + case OpIData: + ls := x.rewriteSelect(leaf, selector.Args[0], offset+x.ptrSize) + locs = x.splitSlots(ls, ".data", x.ptrSize, leafType) - case OpSliceCap: - ls := rewriteSelect(leaf, selector.Args[0], offset+2*ptrSize) - locs = splitSlots(ls, ".cap", 2*ptrSize, leafType) - - case OpCopy: // If it's an intermediate result, recurse - locs = rewriteSelect(leaf, selector.Args[0], offset) - for _, s := range namedSelects[selector] { - // this copy may have had its own name, preserve that, too. - locs = append(locs, f.Names[s.locIndex]) - } + case OpSliceCap: + ls := x.rewriteSelect(leaf, selector.Args[0], offset+2*x.ptrSize) + locs = x.splitSlots(ls, ".cap", 2*x.ptrSize, leafType) - default: - // Ignore dead ends. These can occur if this phase is run before decompose builtin (which is not intended, but allowed). + case OpCopy: // If it's an intermediate result, recurse + locs = x.rewriteSelect(leaf, selector.Args[0], offset) + for _, s := range x.namedSelects[selector] { + // this copy may have had its own name, preserve that, too. + locs = append(locs, x.f.Names[s.locIndex]) } - return locs + default: + // Ignore dead ends. These can occur if this phase is run before decompose builtin (which is not intended, but allowed). } - // storeArgOrLoad converts stores of SSA-able aggregate arguments (passed to a call) into a series of primitive-typed - // stores of non-aggregate types. It recursively walks up a chain of selectors until it reaches a Load or an Arg. - // If it does not reach a Load or an Arg, nothing happens; this allows a little freedom in phase ordering. - var storeArgOrLoad func(pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offset int64) *Value + return locs +} - // decomposeArgOrLoad is a helper for storeArgOrLoad. - // It decomposes a Load or an Arg into smaller parts, parameterized by the decomposeOne and decomposeTwo functions - // passed to it, and returns the new mem. If the type does not match one of the expected aggregate types, it returns nil instead. - decomposeArgOrLoad := func(pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offset int64, - decomposeOne func(pos src.XPos, b *Block, base, source, mem *Value, t1 *types.Type, offArg, offStore int64) *Value, - decomposeTwo func(pos src.XPos, b *Block, base, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64) *Value) *Value { - u := source.Type - switch u.Kind() { - case types.TARRAY: - elem := u.Elem() - for i := int64(0); i < u.NumElem(); i++ { - elemOff := i * elem.Size() - mem = decomposeOne(pos, b, base, source, mem, elem, source.AuxInt+elemOff, offset+elemOff) - pos = pos.WithNotStmt() - } - return mem - case types.TSTRUCT: - for i := 0; i < u.NumFields(); i++ { - fld := u.Field(i) - mem = decomposeOne(pos, b, base, source, mem, fld.Type, source.AuxInt+fld.Offset, offset+fld.Offset) - pos = pos.WithNotStmt() - } - return mem - case types.TINT64, types.TUINT64: - if t.Width == regSize { - break - } - tHi, tLo := intPairTypes(t.Kind()) - mem = decomposeOne(pos, b, base, source, mem, tHi, source.AuxInt+hiOffset, offset+hiOffset) +func (x *expandState) rewriteDereference(b *Block, base, a, mem *Value, offset, size int64, typ *types.Type, pos src.XPos) *Value { + source := a.Args[0] + dst := x.offsetFrom(base, offset, source.Type) + if a.Uses == 1 && a.Block == b { + a.reset(OpMove) + a.Pos = pos + a.Type = types.TypeMem + a.Aux = typ + a.AuxInt = size + a.SetArgs3(dst, source, mem) + mem = a + } else { + mem = b.NewValue3A(pos, OpMove, types.TypeMem, typ, dst, source, mem) + mem.AuxInt = size + } + return mem +} + +// decomposeArgOrLoad is a helper for storeArgOrLoad. +// It decomposes a Load or an Arg into smaller parts, parameterized by the decomposeOne and decomposeTwo functions +// passed to it, and returns the new mem. If the type does not match one of the expected aggregate types, it returns nil instead. +func (x *expandState) decomposeArgOrLoad(pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offset int64, + decomposeOne func(x *expandState, pos src.XPos, b *Block, base, source, mem *Value, t1 *types.Type, offArg, offStore int64) *Value, + decomposeTwo func(x *expandState, pos src.XPos, b *Block, base, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64) *Value) *Value { + u := source.Type + switch u.Kind() { + case types.TARRAY: + elem := u.Elem() + for i := int64(0); i < u.NumElem(); i++ { + elemOff := i * elem.Size() + mem = decomposeOne(x, pos, b, base, source, mem, elem, source.AuxInt+elemOff, offset+elemOff) pos = pos.WithNotStmt() - return decomposeOne(pos, b, base, source, mem, tLo, source.AuxInt+lowOffset, offset+lowOffset) - case types.TINTER: - return decomposeTwo(pos, b, base, source, mem, typ.Uintptr, typ.BytePtr, source.AuxInt, offset) - case types.TSTRING: - return decomposeTwo(pos, b, base, source, mem, typ.BytePtr, typ.Int, source.AuxInt, offset) - case types.TCOMPLEX64: - return decomposeTwo(pos, b, base, source, mem, typ.Float32, typ.Float32, source.AuxInt, offset) - case types.TCOMPLEX128: - return decomposeTwo(pos, b, base, source, mem, typ.Float64, typ.Float64, source.AuxInt, offset) - case types.TSLICE: - mem = decomposeTwo(pos, b, base, source, mem, typ.BytePtr, typ.Int, source.AuxInt, offset) - return decomposeOne(pos, b, base, source, mem, typ.Int, source.AuxInt+2*ptrSize, offset+2*ptrSize) - } - return nil - } - - // storeOneArg creates a decomposed (one step) arg that is then stored. - // pos and b locate the store instruction, base is the base of the store target, source is the "base" of the value input, - // mem is the input mem, t is the type in question, and offArg and offStore are the offsets from the respective bases. - storeOneArg := func(pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offArg, offStore int64) *Value { - w := common[selKey{source, offArg, t.Width, t}] - if w == nil { - w = source.Block.NewValue0IA(source.Pos, OpArg, t, offArg, source.Aux) - common[selKey{source, offArg, t.Width, t}] = w - } - return storeArgOrLoad(pos, b, base, w, mem, t, offStore) - } - - // storeOneLoad creates a decomposed (one step) load that is then stored. - storeOneLoad := func(pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offArg, offStore int64) *Value { - from := offsetFrom(source.Args[0], offArg, types.NewPtr(t)) - w := source.Block.NewValue2(source.Pos, OpLoad, t, from, mem) - return storeArgOrLoad(pos, b, base, w, mem, t, offStore) - } - - storeTwoArg := func(pos src.XPos, b *Block, base, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64) *Value { - mem = storeOneArg(pos, b, base, source, mem, t1, offArg, offStore) + } + return mem + case types.TSTRUCT: + for i := 0; i < u.NumFields(); i++ { + fld := u.Field(i) + mem = decomposeOne(x, pos, b, base, source, mem, fld.Type, source.AuxInt+fld.Offset, offset+fld.Offset) + pos = pos.WithNotStmt() + } + return mem + case types.TINT64, types.TUINT64: + if t.Width == x.regSize { + break + } + tHi, tLo := x.intPairTypes(t.Kind()) + mem = decomposeOne(x, pos, b, base, source, mem, tHi, source.AuxInt+x.hiOffset, offset+x.hiOffset) pos = pos.WithNotStmt() - t1Size := t1.Size() - return storeOneArg(pos, b, base, source, mem, t2, offArg+t1Size, offStore+t1Size) + return decomposeOne(x, pos, b, base, source, mem, tLo, source.AuxInt+x.lowOffset, offset+x.lowOffset) + case types.TINTER: + return decomposeTwo(x, pos, b, base, source, mem, x.typs.Uintptr, x.typs.BytePtr, source.AuxInt, offset) + case types.TSTRING: + return decomposeTwo(x, pos, b, base, source, mem, x.typs.BytePtr, x.typs.Int, source.AuxInt, offset) + case types.TCOMPLEX64: + return decomposeTwo(x, pos, b, base, source, mem, x.typs.Float32, x.typs.Float32, source.AuxInt, offset) + case types.TCOMPLEX128: + return decomposeTwo(x, pos, b, base, source, mem, x.typs.Float64, x.typs.Float64, source.AuxInt, offset) + case types.TSLICE: + mem = decomposeTwo(x, pos, b, base, source, mem, x.typs.BytePtr, x.typs.Int, source.AuxInt, offset) + return decomposeOne(x, pos, b, base, source, mem, x.typs.Int, source.AuxInt+2*x.ptrSize, offset+2*x.ptrSize) } + return nil +} - storeTwoLoad := func(pos src.XPos, b *Block, base, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64) *Value { - mem = storeOneLoad(pos, b, base, source, mem, t1, offArg, offStore) - pos = pos.WithNotStmt() - t1Size := t1.Size() - return storeOneLoad(pos, b, base, source, mem, t2, offArg+t1Size, offStore+t1Size) +// storeOneArg creates a decomposed (one step) arg that is then stored. +// pos and b locate the store instruction, base is the base of the store target, source is the "base" of the value input, +// mem is the input mem, t is the type in question, and offArg and offStore are the offsets from the respective bases. +func storeOneArg(x *expandState, pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offArg, offStore int64) *Value { + w := x.common[selKey{source, offArg, t.Width, t}] + if w == nil { + w = source.Block.NewValue0IA(source.Pos, OpArg, t, offArg, source.Aux) + x.common[selKey{source, offArg, t.Width, t}] = w } + return x.storeArgOrLoad(pos, b, base, w, mem, t, offStore) +} - storeArgOrLoad = func(pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offset int64) *Value { - if debug { - fmt.Printf("\tstoreArgOrLoad(%s; %s; %s; %s; %d)\n", base.LongString(), source.LongString(), mem.String(), t.String(), offset) - } +// storeOneLoad creates a decomposed (one step) load that is then stored. +func storeOneLoad(x *expandState, pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offArg, offStore int64) *Value { + from := x.offsetFrom(source.Args[0], offArg, types.NewPtr(t)) + w := source.Block.NewValue2(source.Pos, OpLoad, t, from, mem) + return x.storeArgOrLoad(pos, b, base, w, mem, t, offStore) +} - switch source.Op { - case OpCopy: - return storeArgOrLoad(pos, b, base, source.Args[0], mem, t, offset) +func storeTwoArg(x *expandState, pos src.XPos, b *Block, base, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64) *Value { + mem = storeOneArg(x, pos, b, base, source, mem, t1, offArg, offStore) + pos = pos.WithNotStmt() + t1Size := t1.Size() + return storeOneArg(x, pos, b, base, source, mem, t2, offArg+t1Size, offStore+t1Size) +} - case OpLoad: - ret := decomposeArgOrLoad(pos, b, base, source, mem, t, offset, storeOneLoad, storeTwoLoad) - if ret != nil { - return ret - } +func storeTwoLoad(x *expandState, pos src.XPos, b *Block, base, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64) *Value { + mem = storeOneLoad(x, pos, b, base, source, mem, t1, offArg, offStore) + pos = pos.WithNotStmt() + t1Size := t1.Size() + return storeOneLoad(x, pos, b, base, source, mem, t2, offArg+t1Size, offStore+t1Size) +} - case OpArg: - ret := decomposeArgOrLoad(pos, b, base, source, mem, t, offset, storeOneArg, storeTwoArg) - if ret != nil { - return ret - } +// storeArgOrLoad converts stores of SSA-able aggregate arguments (passed to a call) into a series of primitive-typed +// stores of non-aggregate types. It recursively walks up a chain of selectors until it reaches a Load or an Arg. +// If it does not reach a Load or an Arg, nothing happens; this allows a little freedom in phase ordering. +func (x *expandState) storeArgOrLoad(pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offset int64) *Value { + if x.debug { + fmt.Printf("\tstoreArgOrLoad(%s; %s; %s; %s; %d)\n", base.LongString(), source.LongString(), mem.String(), t.String(), offset) + } - case OpArrayMake0, OpStructMake0: - return mem + switch source.Op { + case OpCopy: + return x.storeArgOrLoad(pos, b, base, source.Args[0], mem, t, offset) - case OpStructMake1, OpStructMake2, OpStructMake3, OpStructMake4: - for i := 0; i < t.NumFields(); i++ { - fld := t.Field(i) - mem = storeArgOrLoad(pos, b, base, source.Args[i], mem, fld.Type, offset+fld.Offset) - pos = pos.WithNotStmt() - } - return mem + case OpLoad: + ret := x.decomposeArgOrLoad(pos, b, base, source, mem, t, offset, storeOneLoad, storeTwoLoad) + if ret != nil { + return ret + } - case OpArrayMake1: - return storeArgOrLoad(pos, b, base, source.Args[0], mem, t.Elem(), offset) + case OpArg: + ret := x.decomposeArgOrLoad(pos, b, base, source, mem, t, offset, storeOneArg, storeTwoArg) + if ret != nil { + return ret + } - case OpInt64Make: - tHi, tLo := intPairTypes(t.Kind()) - mem = storeArgOrLoad(pos, b, base, source.Args[0], mem, tHi, offset+hiOffset) - pos = pos.WithNotStmt() - return storeArgOrLoad(pos, b, base, source.Args[1], mem, tLo, offset+lowOffset) + case OpArrayMake0, OpStructMake0: + return mem - case OpComplexMake: - tPart := typ.Float32 - wPart := t.Width / 2 - if wPart == 8 { - tPart = typ.Float64 - } - mem = storeArgOrLoad(pos, b, base, source.Args[0], mem, tPart, offset) + case OpStructMake1, OpStructMake2, OpStructMake3, OpStructMake4: + for i := 0; i < t.NumFields(); i++ { + fld := t.Field(i) + mem = x.storeArgOrLoad(pos, b, base, source.Args[i], mem, fld.Type, offset+fld.Offset) pos = pos.WithNotStmt() - return storeArgOrLoad(pos, b, base, source.Args[1], mem, tPart, offset+wPart) + } + return mem - case OpIMake: - mem = storeArgOrLoad(pos, b, base, source.Args[0], mem, typ.Uintptr, offset) - pos = pos.WithNotStmt() - return storeArgOrLoad(pos, b, base, source.Args[1], mem, typ.BytePtr, offset+ptrSize) + case OpArrayMake1: + return x.storeArgOrLoad(pos, b, base, source.Args[0], mem, t.Elem(), offset) - case OpStringMake: - mem = storeArgOrLoad(pos, b, base, source.Args[0], mem, typ.BytePtr, offset) - pos = pos.WithNotStmt() - return storeArgOrLoad(pos, b, base, source.Args[1], mem, typ.Int, offset+ptrSize) + case OpInt64Make: + tHi, tLo := x.intPairTypes(t.Kind()) + mem = x.storeArgOrLoad(pos, b, base, source.Args[0], mem, tHi, offset+x.hiOffset) + pos = pos.WithNotStmt() + return x.storeArgOrLoad(pos, b, base, source.Args[1], mem, tLo, offset+x.lowOffset) - case OpSliceMake: - mem = storeArgOrLoad(pos, b, base, source.Args[0], mem, typ.BytePtr, offset) - pos = pos.WithNotStmt() - mem = storeArgOrLoad(pos, b, base, source.Args[1], mem, typ.Int, offset+ptrSize) - return storeArgOrLoad(pos, b, base, source.Args[2], mem, typ.Int, offset+2*ptrSize) - } - - // For nodes that cannot be taken apart -- OpSelectN, other structure selectors. - switch t.Kind() { - case types.TARRAY: - elt := t.Elem() - if source.Type != t && t.NumElem() == 1 && elt.Width == t.Width && t.Width == regSize { - t = removeTrivialWrapperTypes(t) - // it could be a leaf type, but the "leaf" could be complex64 (for example) - return storeArgOrLoad(pos, b, base, source, mem, t, offset) - } - for i := int64(0); i < t.NumElem(); i++ { - sel := source.Block.NewValue1I(pos, OpArraySelect, elt, i, source) - mem = storeArgOrLoad(pos, b, base, sel, mem, elt, offset+i*elt.Width) - pos = pos.WithNotStmt() - } - return mem - - case types.TSTRUCT: - if source.Type != t && t.NumFields() == 1 && t.Field(0).Type.Width == t.Width && t.Width == regSize { - // This peculiar test deals with accesses to immediate interface data. - // It works okay because everything is the same size. - // Example code that triggers this can be found in go/constant/value.go, function ToComplex - // v119 (+881) = IData v6 - // v121 (+882) = StaticLECall {AuxCall{"".itof([intVal,0])[floatVal,8]}} [16] v119 v1 - // This corresponds to the generic rewrite rule "(StructSelect [0] (IData x)) => (IData x)" - // Guard against "struct{struct{*foo}}" - // Other rewriting phases create minor glitches when they transform IData, for instance the - // interface-typed Arg "x" of ToFloat in go/constant/value.go - // v6 (858) = Arg {x} (x[Value], x[Value]) - // is rewritten by decomposeArgs into - // v141 (858) = Arg {x} - // v139 (858) = Arg <*uint8> {x} [8] - // because of a type case clause on line 862 of go/constant/value.go - // case intVal: - // return itof(x) - // v139 is later stored as an intVal == struct{val *big.Int} which naively requires the fields of - // of a *uint8, which does not succeed. - t = removeTrivialWrapperTypes(t) - // it could be a leaf type, but the "leaf" could be complex64 (for example) - return storeArgOrLoad(pos, b, base, source, mem, t, offset) - } + case OpComplexMake: + tPart := x.typs.Float32 + wPart := t.Width / 2 + if wPart == 8 { + tPart = x.typs.Float64 + } + mem = x.storeArgOrLoad(pos, b, base, source.Args[0], mem, tPart, offset) + pos = pos.WithNotStmt() + return x.storeArgOrLoad(pos, b, base, source.Args[1], mem, tPart, offset+wPart) - for i := 0; i < t.NumFields(); i++ { - fld := t.Field(i) - sel := source.Block.NewValue1I(pos, OpStructSelect, fld.Type, int64(i), source) - mem = storeArgOrLoad(pos, b, base, sel, mem, fld.Type, offset+fld.Offset) - pos = pos.WithNotStmt() - } - return mem + case OpIMake: + mem = x.storeArgOrLoad(pos, b, base, source.Args[0], mem, x.typs.Uintptr, offset) + pos = pos.WithNotStmt() + return x.storeArgOrLoad(pos, b, base, source.Args[1], mem, x.typs.BytePtr, offset+x.ptrSize) - case types.TINT64, types.TUINT64: - if t.Width == regSize { - break - } - tHi, tLo := intPairTypes(t.Kind()) - sel := source.Block.NewValue1(pos, OpInt64Hi, tHi, source) - mem = storeArgOrLoad(pos, b, base, sel, mem, tHi, offset+hiOffset) - pos = pos.WithNotStmt() - sel = source.Block.NewValue1(pos, OpInt64Lo, tLo, source) - return storeArgOrLoad(pos, b, base, sel, mem, tLo, offset+lowOffset) + case OpStringMake: + mem = x.storeArgOrLoad(pos, b, base, source.Args[0], mem, x.typs.BytePtr, offset) + pos = pos.WithNotStmt() + return x.storeArgOrLoad(pos, b, base, source.Args[1], mem, x.typs.Int, offset+x.ptrSize) - case types.TINTER: - sel := source.Block.NewValue1(pos, OpITab, typ.BytePtr, source) - mem = storeArgOrLoad(pos, b, base, sel, mem, typ.BytePtr, offset) - pos = pos.WithNotStmt() - sel = source.Block.NewValue1(pos, OpIData, typ.BytePtr, source) - return storeArgOrLoad(pos, b, base, sel, mem, typ.BytePtr, offset+ptrSize) + case OpSliceMake: + mem = x.storeArgOrLoad(pos, b, base, source.Args[0], mem, x.typs.BytePtr, offset) + pos = pos.WithNotStmt() + mem = x.storeArgOrLoad(pos, b, base, source.Args[1], mem, x.typs.Int, offset+x.ptrSize) + return x.storeArgOrLoad(pos, b, base, source.Args[2], mem, x.typs.Int, offset+2*x.ptrSize) + } - case types.TSTRING: - sel := source.Block.NewValue1(pos, OpStringPtr, typ.BytePtr, source) - mem = storeArgOrLoad(pos, b, base, sel, mem, typ.BytePtr, offset) + // For nodes that cannot be taken apart -- OpSelectN, other structure selectors. + switch t.Kind() { + case types.TARRAY: + elt := t.Elem() + if source.Type != t && t.NumElem() == 1 && elt.Width == t.Width && t.Width == x.regSize { + t = removeTrivialWrapperTypes(t) + // it could be a leaf type, but the "leaf" could be complex64 (for example) + return x.storeArgOrLoad(pos, b, base, source, mem, t, offset) + } + for i := int64(0); i < t.NumElem(); i++ { + sel := source.Block.NewValue1I(pos, OpArraySelect, elt, i, source) + mem = x.storeArgOrLoad(pos, b, base, sel, mem, elt, offset+i*elt.Width) pos = pos.WithNotStmt() - sel = source.Block.NewValue1(pos, OpStringLen, typ.Int, source) - return storeArgOrLoad(pos, b, base, sel, mem, typ.Int, offset+ptrSize) + } + return mem - case types.TSLICE: - et := types.NewPtr(t.Elem()) - sel := source.Block.NewValue1(pos, OpSlicePtr, et, source) - mem = storeArgOrLoad(pos, b, base, sel, mem, et, offset) - pos = pos.WithNotStmt() - sel = source.Block.NewValue1(pos, OpSliceLen, typ.Int, source) - mem = storeArgOrLoad(pos, b, base, sel, mem, typ.Int, offset+ptrSize) - sel = source.Block.NewValue1(pos, OpSliceCap, typ.Int, source) - return storeArgOrLoad(pos, b, base, sel, mem, typ.Int, offset+2*ptrSize) - - case types.TCOMPLEX64: - sel := source.Block.NewValue1(pos, OpComplexReal, typ.Float32, source) - mem = storeArgOrLoad(pos, b, base, sel, mem, typ.Float32, offset) - pos = pos.WithNotStmt() - sel = source.Block.NewValue1(pos, OpComplexImag, typ.Float32, source) - return storeArgOrLoad(pos, b, base, sel, mem, typ.Float32, offset+4) + case types.TSTRUCT: + if source.Type != t && t.NumFields() == 1 && t.Field(0).Type.Width == t.Width && t.Width == x.regSize { + // This peculiar test deals with accesses to immediate interface data. + // It works okay because everything is the same size. + // Example code that triggers this can be found in go/constant/value.go, function ToComplex + // v119 (+881) = IData v6 + // v121 (+882) = StaticLECall {AuxCall{"".itof([intVal,0])[floatVal,8]}} [16] v119 v1 + // This corresponds to the generic rewrite rule "(StructSelect [0] (IData x)) => (IData x)" + // Guard against "struct{struct{*foo}}" + // Other rewriting phases create minor glitches when they transform IData, for instance the + // interface-typed Arg "x" of ToFloat in go/constant/value.go + // v6 (858) = Arg {x} (x[Value], x[Value]) + // is rewritten by decomposeArgs into + // v141 (858) = Arg {x} + // v139 (858) = Arg <*uint8> {x} [8] + // because of a type case clause on line 862 of go/constant/value.go + // case intVal: + // return itof(x) + // v139 is later stored as an intVal == struct{val *big.Int} which naively requires the fields of + // of a *uint8, which does not succeed. + t = removeTrivialWrapperTypes(t) + // it could be a leaf type, but the "leaf" could be complex64 (for example) + return x.storeArgOrLoad(pos, b, base, source, mem, t, offset) + } - case types.TCOMPLEX128: - sel := source.Block.NewValue1(pos, OpComplexReal, typ.Float64, source) - mem = storeArgOrLoad(pos, b, base, sel, mem, typ.Float64, offset) + for i := 0; i < t.NumFields(); i++ { + fld := t.Field(i) + sel := source.Block.NewValue1I(pos, OpStructSelect, fld.Type, int64(i), source) + mem = x.storeArgOrLoad(pos, b, base, sel, mem, fld.Type, offset+fld.Offset) pos = pos.WithNotStmt() - sel = source.Block.NewValue1(pos, OpComplexImag, typ.Float64, source) - return storeArgOrLoad(pos, b, base, sel, mem, typ.Float64, offset+8) } + return mem - dst := offsetFrom(base, offset, types.NewPtr(t)) - x := b.NewValue3A(pos, OpStore, types.TypeMem, t, dst, source, mem) - if debug { - fmt.Printf("\t\tstoreArg returns %s\n", x.LongString()) + case types.TINT64, types.TUINT64: + if t.Width == x.regSize { + break } - return x + tHi, tLo := x.intPairTypes(t.Kind()) + sel := source.Block.NewValue1(pos, OpInt64Hi, tHi, source) + mem = x.storeArgOrLoad(pos, b, base, sel, mem, tHi, offset+x.hiOffset) + pos = pos.WithNotStmt() + sel = source.Block.NewValue1(pos, OpInt64Lo, tLo, source) + return x.storeArgOrLoad(pos, b, base, sel, mem, tLo, offset+x.lowOffset) + + case types.TINTER: + sel := source.Block.NewValue1(pos, OpITab, x.typs.BytePtr, source) + mem = x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.BytePtr, offset) + pos = pos.WithNotStmt() + sel = source.Block.NewValue1(pos, OpIData, x.typs.BytePtr, source) + return x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.BytePtr, offset+x.ptrSize) + + case types.TSTRING: + sel := source.Block.NewValue1(pos, OpStringPtr, x.typs.BytePtr, source) + mem = x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.BytePtr, offset) + pos = pos.WithNotStmt() + sel = source.Block.NewValue1(pos, OpStringLen, x.typs.Int, source) + return x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.Int, offset+x.ptrSize) + + case types.TSLICE: + et := types.NewPtr(t.Elem()) + sel := source.Block.NewValue1(pos, OpSlicePtr, et, source) + mem = x.storeArgOrLoad(pos, b, base, sel, mem, et, offset) + pos = pos.WithNotStmt() + sel = source.Block.NewValue1(pos, OpSliceLen, x.typs.Int, source) + mem = x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.Int, offset+x.ptrSize) + sel = source.Block.NewValue1(pos, OpSliceCap, x.typs.Int, source) + return x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.Int, offset+2*x.ptrSize) + + case types.TCOMPLEX64: + sel := source.Block.NewValue1(pos, OpComplexReal, x.typs.Float32, source) + mem = x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.Float32, offset) + pos = pos.WithNotStmt() + sel = source.Block.NewValue1(pos, OpComplexImag, x.typs.Float32, source) + return x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.Float32, offset+4) + + case types.TCOMPLEX128: + sel := source.Block.NewValue1(pos, OpComplexReal, x.typs.Float64, source) + mem = x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.Float64, offset) + pos = pos.WithNotStmt() + sel = source.Block.NewValue1(pos, OpComplexImag, x.typs.Float64, source) + return x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.Float64, offset+8) } - rewriteDereference := func(b *Block, base, a, mem *Value, offset, size int64, typ *types.Type, pos src.XPos) *Value { - source := a.Args[0] - dst := offsetFrom(base, offset, source.Type) - if a.Uses == 1 && a.Block == b { - a.reset(OpMove) - a.Pos = pos - a.Type = types.TypeMem - a.Aux = typ - a.AuxInt = size - a.SetArgs3(dst, source, mem) - mem = a - } else { - mem = b.NewValue3A(pos, OpMove, types.TypeMem, typ, dst, source, mem) - mem.AuxInt = size - } - return mem + dst := x.offsetFrom(base, offset, types.NewPtr(t)) + s := b.NewValue3A(pos, OpStore, types.TypeMem, t, dst, source, mem) + if x.debug { + fmt.Printf("\t\tstoreArg returns %s\n", s.LongString()) } + return s +} - // rewriteArgs removes all the Args from a call and converts the call args into appropriate - // stores (or later, register movement). Extra args for interface and closure calls are ignored, - // but removed. - rewriteArgs := func(v *Value, firstArg int) *Value { - // Thread the stores on the memory arg - aux := v.Aux.(*AuxCall) - pos := v.Pos.WithNotStmt() - m0 := v.MemoryArg() - mem := m0 - for i, a := range v.Args { - if i < firstArg { - continue - } - if a == m0 { // mem is last. - break +// rewriteArgs removes all the Args from a call and converts the call args into appropriate +// stores (or later, register movement). Extra args for interface and closure calls are ignored, +// but removed. +func (x *expandState) rewriteArgs(v *Value, firstArg int) *Value { + // Thread the stores on the memory arg + aux := v.Aux.(*AuxCall) + pos := v.Pos.WithNotStmt() + m0 := v.MemoryArg() + mem := m0 + for i, a := range v.Args { + if i < firstArg { + continue + } + if a == m0 { // mem is last. + break + } + auxI := int64(i - firstArg) + if a.Op == OpDereference { + if a.MemoryArg() != m0 { + x.f.Fatalf("Op...LECall and OpDereference have mismatched mem, %s and %s", v.LongString(), a.LongString()) } - auxI := int64(i - firstArg) - if a.Op == OpDereference { - if a.MemoryArg() != m0 { - f.Fatalf("Op...LECall and OpDereference have mismatched mem, %s and %s", v.LongString(), a.LongString()) - } - // "Dereference" of addressed (probably not-SSA-eligible) value becomes Move - // TODO this will be more complicated with registers in the picture. - mem = rewriteDereference(v.Block, sp, a, mem, aux.OffsetOfArg(auxI), aux.SizeOfArg(auxI), aux.TypeOfArg(auxI), pos) - } else { - if debug { - fmt.Printf("storeArg %s, %v, %d\n", a.LongString(), aux.TypeOfArg(auxI), aux.OffsetOfArg(auxI)) - } - mem = storeArgOrLoad(pos, v.Block, sp, a, mem, aux.TypeOfArg(auxI), aux.OffsetOfArg(auxI)) + // "Dereference" of addressed (probably not-SSA-eligible) value becomes Move + // TODO this will be more complicated with registers in the picture. + mem = x.rewriteDereference(v.Block, x.sp, a, mem, aux.OffsetOfArg(auxI), aux.SizeOfArg(auxI), aux.TypeOfArg(auxI), pos) + } else { + if x.debug { + fmt.Printf("storeArg %s, %v, %d\n", a.LongString(), aux.TypeOfArg(auxI), aux.OffsetOfArg(auxI)) } + mem = x.storeArgOrLoad(pos, v.Block, x.sp, a, mem, aux.TypeOfArg(auxI), aux.OffsetOfArg(auxI)) } - v.resetArgs() - return mem + } + v.resetArgs() + return mem +} + +// expandCalls converts LE (Late Expansion) calls that act like they receive value args into a lower-level form +// that is more oriented to a platform's ABI. The SelectN operations that extract results are rewritten into +// more appropriate forms, and any StructMake or ArrayMake inputs are decomposed until non-struct values are +// reached. On the callee side, OpArg nodes are not decomposed until this phase is run. +// TODO results should not be lowered until this phase. +func expandCalls(f *Func) { + // Calls that need lowering have some number of inputs, including a memory input, + // and produce a tuple of (value1, value2, ..., mem) where valueK may or may not be SSA-able. + + // With the current ABI those inputs need to be converted into stores to memory, + // rethreading the call's memory input to the first, and the new call now receiving the last. + + // With the current ABI, the outputs need to be converted to loads, which will all use the call's + // memory output as their input. + sp, _ := f.spSb() + x := &expandState{ + f: f, + debug: f.pass.debug > 0, + canSSAType: f.fe.CanSSA, + regSize: f.Config.RegSize, + sp: sp, + typs: &f.Config.Types, + ptrSize: f.Config.PtrSize, + namedSelects: make(map[*Value][]namedVal), + sdom: f.Sdom(), + common: make(map[selKey]*Value), + offsets: make(map[offsetKey]*Value), + } + + // For 32-bit, need to deal with decomposition of 64-bit integers, which depends on endianness. + if f.Config.BigEndian { + x.lowOffset = 4 + } else { + x.hiOffset = 4 + } + + if x.debug { + fmt.Printf("\nexpandsCalls(%s)\n", f.Name) } // TODO if too slow, whole program iteration can be replaced w/ slices of appropriate values, accumulated in first loop here. @@ -686,16 +697,16 @@ func expandCalls(f *Func) { for _, v := range b.Values { switch v.Op { case OpStaticLECall: - mem := rewriteArgs(v, 0) + mem := x.rewriteArgs(v, 0) v.SetArgs1(mem) case OpClosureLECall: code := v.Args[0] context := v.Args[1] - mem := rewriteArgs(v, 2) + mem := x.rewriteArgs(v, 2) v.SetArgs3(code, context, mem) case OpInterLECall: code := v.Args[0] - mem := rewriteArgs(v, 1) + mem := x.rewriteArgs(v, 1) v.SetArgs2(code, mem) } } @@ -712,7 +723,7 @@ func expandCalls(f *Func) { break } auxType := aux.TypeOfResult(i) - auxBase := b.NewValue2A(v.Pos, OpLocalAddr, types.NewPtr(auxType), aux.results[i].Name, sp, mem) + auxBase := b.NewValue2A(v.Pos, OpLocalAddr, types.NewPtr(auxType), aux.results[i].Name, x.sp, mem) auxOffset := int64(0) auxSize := aux.SizeOfResult(i) if a.Op == OpDereference { @@ -724,7 +735,7 @@ func expandCalls(f *Func) { } continue } - mem = rewriteDereference(v.Block, auxBase, a, mem, auxOffset, auxSize, auxType, pos) + mem = x.rewriteDereference(v.Block, auxBase, a, mem, auxOffset, auxSize, auxType, pos) } else { if a.Op == OpLoad && a.Args[0].Op == OpLocalAddr { addr := a.Args[0] @@ -732,7 +743,7 @@ func expandCalls(f *Func) { continue } } - mem = storeArgOrLoad(v.Pos, b, auxBase, a, mem, aux.TypeOfResult(i), auxOffset) + mem = x.storeArgOrLoad(v.Pos, b, auxBase, a, mem, aux.TypeOfResult(i), auxOffset) } } b.SetControl(mem) @@ -742,11 +753,11 @@ func expandCalls(f *Func) { for i, name := range f.Names { t := name.Type - if isAlreadyExpandedAggregateType(t) { + if x.isAlreadyExpandedAggregateType(t) { for j, v := range f.NamedValues[name] { - if v.Op == OpSelectN || v.Op == OpArg && isAlreadyExpandedAggregateType(v.Type) { - ns := namedSelects[v] - namedSelects[v] = append(ns, namedVal{locIndex: i, valIndex: j}) + if v.Op == OpSelectN || v.Op == OpArg && x.isAlreadyExpandedAggregateType(v.Type) { + ns := x.namedSelects[v] + x.namedSelects[v] = append(ns, namedVal{locIndex: i, valIndex: j}) } } } @@ -760,22 +771,22 @@ func expandCalls(f *Func) { t := v.Aux.(*types.Type) source := v.Args[1] tSrc := source.Type - iAEATt := isAlreadyExpandedAggregateType(t) + iAEATt := x.isAlreadyExpandedAggregateType(t) if !iAEATt { // guarding against store immediate struct into interface data field -- store type is *uint8 // TODO can this happen recursively? - iAEATt = isAlreadyExpandedAggregateType(tSrc) + iAEATt = x.isAlreadyExpandedAggregateType(tSrc) if iAEATt { t = tSrc } } if iAEATt { - if debug { + if x.debug { fmt.Printf("Splitting store %s\n", v.LongString()) } dst, mem := v.Args[0], v.Args[2] - mem = storeArgOrLoad(v.Pos, b, dst, source, mem, t, 0) + mem = x.storeArgOrLoad(v.Pos, b, dst, source, mem, t, 0) v.copyOf(mem) } } @@ -804,7 +815,7 @@ func expandCalls(f *Func) { switch w.Op { case OpStructSelect, OpArraySelect, OpSelectN, OpArg: val2Preds[w] += 1 - if debug { + if x.debug { fmt.Printf("v2p[%s] = %d\n", w.LongString(), val2Preds[w]) } } @@ -813,18 +824,18 @@ func expandCalls(f *Func) { case OpSelectN: if _, ok := val2Preds[v]; !ok { val2Preds[v] = 0 - if debug { + if x.debug { fmt.Printf("v2p[%s] = %d\n", v.LongString(), val2Preds[v]) } } case OpArg: - if !isAlreadyExpandedAggregateType(v.Type) { + if !x.isAlreadyExpandedAggregateType(v.Type) { continue } if _, ok := val2Preds[v]; !ok { val2Preds[v] = 0 - if debug { + if x.debug { fmt.Printf("v2p[%s] = %d\n", v.LongString(), val2Preds[v]) } } @@ -835,7 +846,7 @@ func expandCalls(f *Func) { which := v.AuxInt aux := call.Aux.(*AuxCall) pt := v.Type - off := offsetFrom(sp, aux.OffsetOfResult(which), pt) + off := x.offsetFrom(x.sp, aux.OffsetOfResult(which), pt) v.copyOf(off) } } @@ -857,7 +868,7 @@ func expandCalls(f *Func) { if bi == bj { return vi.ID < vj.ID } - return sdom.domorder(bi) > sdom.domorder(bj) // reverse the order to put dominators last. + return x.sdom.domorder(bi) > x.sdom.domorder(bj) // reverse the order to put dominators last. } // Accumulate order in allOrdered @@ -891,7 +902,7 @@ func expandCalls(f *Func) { } } - common = make(map[selKey]*Value) + x.common = make(map[selKey]*Value) // Rewrite duplicate selectors as copies where possible. for i := len(allOrdered) - 1; i >= 0; i-- { v := allOrdered[i] @@ -923,26 +934,26 @@ func expandCalls(f *Func) { case OpSelectN: offset = w.Aux.(*AuxCall).OffsetOfResult(v.AuxInt) case OpInt64Hi: - offset = hiOffset + offset = x.hiOffset case OpInt64Lo: - offset = lowOffset + offset = x.lowOffset case OpStringLen, OpSliceLen, OpIData: - offset = ptrSize + offset = x.ptrSize case OpSliceCap: - offset = 2 * ptrSize + offset = 2 * x.ptrSize case OpComplexImag: offset = size } sk := selKey{from: w, size: size, offset: offset, typ: typ} - dupe := common[sk] + dupe := x.common[sk] if dupe == nil { - common[sk] = v - } else if sdom.IsAncestorEq(dupe.Block, v.Block) { + x.common[sk] = v + } else if x.sdom.IsAncestorEq(dupe.Block, v.Block) { v.copyOf(dupe) } else { // Because values are processed in dominator order, the old common[s] will never dominate after a miss is seen. // Installing the new value might match some future values. - common[sk] = v + x.common[sk] = v } } @@ -951,7 +962,7 @@ func expandCalls(f *Func) { // Rewrite selectors. for i, v := range allOrdered { - if debug { + if x.debug { b := v.Block fmt.Printf("allOrdered[%d] = b%d, %s, uses=%d\n", i, b.ID, v.LongString(), v.Uses) } @@ -962,13 +973,13 @@ func expandCalls(f *Func) { if v.Op == OpCopy { continue } - locs := rewriteSelect(v, v, 0) + locs := x.rewriteSelect(v, v, 0) // Install new names. if v.Type.IsMemory() { continue } // Leaf types may have debug locations - if !isAlreadyExpandedAggregateType(v.Type) { + if !x.isAlreadyExpandedAggregateType(v.Type) { for _, l := range locs { f.NamedValues[l] = append(f.NamedValues[l], v) } @@ -976,7 +987,7 @@ func expandCalls(f *Func) { continue } // Not-leaf types that had debug locations need to lose them. - if ns, ok := namedSelects[v]; ok { + if ns, ok := x.namedSelects[v]; ok { toDelete = append(toDelete, ns...) } } From ca6999e27c395a30edb277dbda9c5b3c5854aace Mon Sep 17 00:00:00 2001 From: Dan Scales Date: Sun, 31 Jan 2021 10:05:03 -0800 Subject: [PATCH 436/474] [dev.regabi] test: add a test for inlining closures Add a test case for issue 43818. We don't want to mark as inlinable a function with a closure that has an operation (such as OSELRECV2) that we don't currently support for exporting. This test case fails to compile without the fix for #43818. Updates #43818 Change-Id: Ief322a14aefaefc6913c40a6b8505214bd622fda Reviewed-on: https://go-review.googlesource.com/c/go/+/288392 Run-TryBot: Dan Scales Reviewed-by: Cuong Manh Le TryBot-Result: Go Bot Trust: Dan Scales --- test/closure7.go | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 test/closure7.go diff --git a/test/closure7.go b/test/closure7.go new file mode 100644 index 0000000000000..823333f45fd25 --- /dev/null +++ b/test/closure7.go @@ -0,0 +1,28 @@ +// run + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func g(f func()) { +} + +// Must have exportable name +func F() { + g(func() { + ch := make(chan int) + for { + select { + case <-ch: + return + default: + } + } + }) +} + +func main() { + F() +} From bfc7418e6d3a505fe348718fd113473c9d92b135 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Fri, 29 Jan 2021 12:03:32 -0500 Subject: [PATCH 437/474] [dev.regabi] runtime, syscall, etc.: mark Darwin syscall wrappers as ABIInternal Mark the syscall wrappers as ABIInternal, as they have addresses taken from Go code, and it is important to call to them without wrappers. Previously, the wrapper is just a single JMP instruction, which makes it not matter. In the next CL we'll make the wrapper actually have a frame. The real wrappers will mess up things such as stack alignment for C ABI. This doesn't look really nice, but I don't know how we can do better... TODO: other OSes. Change-Id: Ifb3920494990a7775e3e6902fbcaf137df3cc653 Reviewed-on: https://go-review.googlesource.com/c/go/+/288092 Trust: Cherry Zhang Run-TryBot: Cherry Zhang TryBot-Result: Go Bot Reviewed-by: Than McIntosh --- src/cmd/dist/build.go | 2 + src/cmd/internal/objabi/path.go | 2 + .../x509/internal/macos/corefoundation.s | 21 +- src/crypto/x509/internal/macos/security.s | 11 +- src/runtime/sys_darwin_amd64.s | 96 +++---- src/syscall/mkasm.go | 3 +- src/syscall/zsyscall_darwin_amd64.s | 250 +++++++++--------- src/syscall/zsyscall_darwin_arm64.s | 250 +++++++++--------- src/syscall/zsyscall_openbsd_amd64.s | 230 ++++++++-------- src/syscall/zsyscall_openbsd_arm64.s | 230 ++++++++-------- 10 files changed, 554 insertions(+), 541 deletions(-) diff --git a/src/cmd/dist/build.go b/src/cmd/dist/build.go index c8c3212d1661f..332f2fab5816f 100644 --- a/src/cmd/dist/build.go +++ b/src/cmd/dist/build.go @@ -1765,6 +1765,8 @@ func IsRuntimePackagePath(pkgpath string) bool { rval = true case "syscall": rval = true + case "crypto/x509/internal/macos": // libc function wrappers need to be ABIInternal + rval = true default: rval = strings.HasPrefix(pkgpath, "runtime/internal") } diff --git a/src/cmd/internal/objabi/path.go b/src/cmd/internal/objabi/path.go index fd1c9981c69d0..1a0784cf7f0d2 100644 --- a/src/cmd/internal/objabi/path.go +++ b/src/cmd/internal/objabi/path.go @@ -56,6 +56,8 @@ func IsRuntimePackagePath(pkgpath string) bool { rval = true case "syscall": rval = true + case "crypto/x509/internal/macos": // libc function wrappers need to be ABIInternal + rval = true default: rval = strings.HasPrefix(pkgpath, "runtime/internal") } diff --git a/src/crypto/x509/internal/macos/corefoundation.s b/src/crypto/x509/internal/macos/corefoundation.s index a4495d68dd357..1ce39fac9d752 100644 --- a/src/crypto/x509/internal/macos/corefoundation.s +++ b/src/crypto/x509/internal/macos/corefoundation.s @@ -6,21 +6,24 @@ #include "textflag.h" -TEXT ·x509_CFArrayGetCount_trampoline(SB),NOSPLIT,$0-0 +// The trampolines are ABIInternal as they are address-taken in +// Go code. + +TEXT ·x509_CFArrayGetCount_trampoline(SB),NOSPLIT,$0-0 JMP x509_CFArrayGetCount(SB) -TEXT ·x509_CFArrayGetValueAtIndex_trampoline(SB),NOSPLIT,$0-0 +TEXT ·x509_CFArrayGetValueAtIndex_trampoline(SB),NOSPLIT,$0-0 JMP x509_CFArrayGetValueAtIndex(SB) -TEXT ·x509_CFDataGetBytePtr_trampoline(SB),NOSPLIT,$0-0 +TEXT ·x509_CFDataGetBytePtr_trampoline(SB),NOSPLIT,$0-0 JMP x509_CFDataGetBytePtr(SB) -TEXT ·x509_CFDataGetLength_trampoline(SB),NOSPLIT,$0-0 +TEXT ·x509_CFDataGetLength_trampoline(SB),NOSPLIT,$0-0 JMP x509_CFDataGetLength(SB) -TEXT ·x509_CFStringCreateWithBytes_trampoline(SB),NOSPLIT,$0-0 +TEXT ·x509_CFStringCreateWithBytes_trampoline(SB),NOSPLIT,$0-0 JMP x509_CFStringCreateWithBytes(SB) -TEXT ·x509_CFRelease_trampoline(SB),NOSPLIT,$0-0 +TEXT ·x509_CFRelease_trampoline(SB),NOSPLIT,$0-0 JMP x509_CFRelease(SB) -TEXT ·x509_CFDictionaryGetValueIfPresent_trampoline(SB),NOSPLIT,$0-0 +TEXT ·x509_CFDictionaryGetValueIfPresent_trampoline(SB),NOSPLIT,$0-0 JMP x509_CFDictionaryGetValueIfPresent(SB) -TEXT ·x509_CFNumberGetValue_trampoline(SB),NOSPLIT,$0-0 +TEXT ·x509_CFNumberGetValue_trampoline(SB),NOSPLIT,$0-0 JMP x509_CFNumberGetValue(SB) -TEXT ·x509_CFEqual_trampoline(SB),NOSPLIT,$0-0 +TEXT ·x509_CFEqual_trampoline(SB),NOSPLIT,$0-0 JMP x509_CFEqual(SB) diff --git a/src/crypto/x509/internal/macos/security.s b/src/crypto/x509/internal/macos/security.s index bd446dbcbe260..bea265a5ef916 100644 --- a/src/crypto/x509/internal/macos/security.s +++ b/src/crypto/x509/internal/macos/security.s @@ -6,11 +6,14 @@ #include "textflag.h" -TEXT ·x509_SecTrustSettingsCopyCertificates_trampoline(SB),NOSPLIT,$0-0 +// The trampolines are ABIInternal as they are address-taken in +// Go code. + +TEXT ·x509_SecTrustSettingsCopyCertificates_trampoline(SB),NOSPLIT,$0-0 JMP x509_SecTrustSettingsCopyCertificates(SB) -TEXT ·x509_SecItemExport_trampoline(SB),NOSPLIT,$0-0 +TEXT ·x509_SecItemExport_trampoline(SB),NOSPLIT,$0-0 JMP x509_SecItemExport(SB) -TEXT ·x509_SecTrustSettingsCopyTrustSettings_trampoline(SB),NOSPLIT,$0-0 +TEXT ·x509_SecTrustSettingsCopyTrustSettings_trampoline(SB),NOSPLIT,$0-0 JMP x509_SecTrustSettingsCopyTrustSettings(SB) -TEXT ·x509_SecPolicyCopyProperties_trampoline(SB),NOSPLIT,$0-0 +TEXT ·x509_SecPolicyCopyProperties_trampoline(SB),NOSPLIT,$0-0 JMP x509_SecPolicyCopyProperties(SB) diff --git a/src/runtime/sys_darwin_amd64.s b/src/runtime/sys_darwin_amd64.s index 630fb5df64fd2..0fe8c7e172f5b 100644 --- a/src/runtime/sys_darwin_amd64.s +++ b/src/runtime/sys_darwin_amd64.s @@ -5,6 +5,8 @@ // System calls and other sys.stuff for AMD64, Darwin // System calls are implemented in libSystem, this file contains // trampolines that convert from Go to C calling convention. +// The trampolines are ABIInternal as they are referenced from +// Go code with funcPC. #include "go_asm.h" #include "go_tls.h" @@ -13,7 +15,7 @@ #define CLOCK_REALTIME 0 // Exit the entire program (like C exit) -TEXT runtime·exit_trampoline(SB),NOSPLIT,$0 +TEXT runtime·exit_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP MOVL 0(DI), DI // arg 1 exit status @@ -22,7 +24,7 @@ TEXT runtime·exit_trampoline(SB),NOSPLIT,$0 POPQ BP RET -TEXT runtime·open_trampoline(SB),NOSPLIT,$0 +TEXT runtime·open_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP MOVL 8(DI), SI // arg 2 flags @@ -33,7 +35,7 @@ TEXT runtime·open_trampoline(SB),NOSPLIT,$0 POPQ BP RET -TEXT runtime·close_trampoline(SB),NOSPLIT,$0 +TEXT runtime·close_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP MOVL 0(DI), DI // arg 1 fd @@ -41,7 +43,7 @@ TEXT runtime·close_trampoline(SB),NOSPLIT,$0 POPQ BP RET -TEXT runtime·read_trampoline(SB),NOSPLIT,$0 +TEXT runtime·read_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP MOVQ 8(DI), SI // arg 2 buf @@ -57,7 +59,7 @@ noerr: POPQ BP RET -TEXT runtime·write_trampoline(SB),NOSPLIT,$0 +TEXT runtime·write_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP MOVQ 8(DI), SI // arg 2 buf @@ -73,7 +75,7 @@ noerr: POPQ BP RET -TEXT runtime·pipe_trampoline(SB),NOSPLIT,$0 +TEXT runtime·pipe_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP CALL libc_pipe(SB) // pointer already in DI @@ -84,7 +86,7 @@ TEXT runtime·pipe_trampoline(SB),NOSPLIT,$0 POPQ BP RET -TEXT runtime·setitimer_trampoline(SB),NOSPLIT,$0 +TEXT runtime·setitimer_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP MOVQ 8(DI), SI // arg 2 new @@ -94,7 +96,7 @@ TEXT runtime·setitimer_trampoline(SB),NOSPLIT,$0 POPQ BP RET -TEXT runtime·madvise_trampoline(SB), NOSPLIT, $0 +TEXT runtime·madvise_trampoline(SB), NOSPLIT, $0 PUSHQ BP MOVQ SP, BP MOVQ 8(DI), SI // arg 2 len @@ -105,12 +107,12 @@ TEXT runtime·madvise_trampoline(SB), NOSPLIT, $0 POPQ BP RET -TEXT runtime·mlock_trampoline(SB), NOSPLIT, $0 +TEXT runtime·mlock_trampoline(SB), NOSPLIT, $0 UNDEF // unimplemented GLOBL timebase<>(SB),NOPTR,$(machTimebaseInfo__size) -TEXT runtime·nanotime_trampoline(SB),NOSPLIT,$0 +TEXT runtime·nanotime_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP MOVQ DI, BX @@ -139,7 +141,7 @@ initialized: POPQ BP RET -TEXT runtime·walltime_trampoline(SB),NOSPLIT,$0 +TEXT runtime·walltime_trampoline(SB),NOSPLIT,$0 PUSHQ BP // make a frame; keep stack aligned MOVQ SP, BP MOVQ DI, SI // arg 2 timespec @@ -148,7 +150,7 @@ TEXT runtime·walltime_trampoline(SB),NOSPLIT,$0 POPQ BP RET -TEXT runtime·sigaction_trampoline(SB),NOSPLIT,$0 +TEXT runtime·sigaction_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP MOVQ 8(DI), SI // arg 2 new @@ -161,7 +163,7 @@ TEXT runtime·sigaction_trampoline(SB),NOSPLIT,$0 POPQ BP RET -TEXT runtime·sigprocmask_trampoline(SB),NOSPLIT,$0 +TEXT runtime·sigprocmask_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP MOVQ 8(DI), SI // arg 2 new @@ -174,7 +176,7 @@ TEXT runtime·sigprocmask_trampoline(SB),NOSPLIT,$0 POPQ BP RET -TEXT runtime·sigaltstack_trampoline(SB),NOSPLIT,$0 +TEXT runtime·sigaltstack_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP MOVQ 8(DI), SI // arg 2 old @@ -186,7 +188,7 @@ TEXT runtime·sigaltstack_trampoline(SB),NOSPLIT,$0 POPQ BP RET -TEXT runtime·raiseproc_trampoline(SB),NOSPLIT,$0 +TEXT runtime·raiseproc_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP MOVL 0(DI), BX // signal @@ -212,7 +214,7 @@ TEXT runtime·sigfwd(SB),NOSPLIT,$0-32 // This is the function registered during sigaction and is invoked when // a signal is received. It just redirects to the Go function sigtrampgo. -TEXT runtime·sigtramp(SB),NOSPLIT,$0 +TEXT runtime·sigtramp(SB),NOSPLIT,$0 // This runs on the signal stack, so we have lots of stack available. // We allocate our own stack space, because if we tell the linker // how much we're using, the NOSPLIT check fails. @@ -246,7 +248,7 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$0 // Used instead of sigtramp in programs that use cgo. // Arguments from kernel are in DI, SI, DX. -TEXT runtime·cgoSigtramp(SB),NOSPLIT,$0 +TEXT runtime·cgoSigtramp(SB),NOSPLIT,$0 // If no traceback function, do usual sigtramp. MOVQ runtime·cgoTraceback(SB), AX TESTQ AX, AX @@ -289,12 +291,12 @@ TEXT runtime·cgoSigtramp(SB),NOSPLIT,$0 // The first three arguments, and the fifth, are already in registers. // Set the two remaining arguments now. MOVQ runtime·cgoTraceback(SB), CX - MOVQ $runtime·sigtramp(SB), R9 + MOVQ $runtime·sigtramp(SB), R9 MOVQ _cgo_callers(SB), AX JMP AX sigtramp: - JMP runtime·sigtramp(SB) + JMP runtime·sigtramp(SB) sigtrampnog: // Signal arrived on a non-Go thread. If this is SIGPROF, get a @@ -320,7 +322,7 @@ sigtrampnog: MOVQ _cgo_callers(SB), AX JMP AX -TEXT runtime·mmap_trampoline(SB),NOSPLIT,$0 +TEXT runtime·mmap_trampoline(SB),NOSPLIT,$0 PUSHQ BP // make a frame; keep stack aligned MOVQ SP, BP MOVQ DI, BX @@ -343,7 +345,7 @@ ok: POPQ BP RET -TEXT runtime·munmap_trampoline(SB),NOSPLIT,$0 +TEXT runtime·munmap_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP MOVQ 8(DI), SI // arg 2 len @@ -355,7 +357,7 @@ TEXT runtime·munmap_trampoline(SB),NOSPLIT,$0 POPQ BP RET -TEXT runtime·usleep_trampoline(SB),NOSPLIT,$0 +TEXT runtime·usleep_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP MOVL 0(DI), DI // arg 1 usec @@ -367,7 +369,7 @@ TEXT runtime·settls(SB),NOSPLIT,$32 // Nothing to do on Darwin, pthread already set thread-local storage up. RET -TEXT runtime·sysctl_trampoline(SB),NOSPLIT,$0 +TEXT runtime·sysctl_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP MOVL 8(DI), SI // arg 2 miblen @@ -380,7 +382,7 @@ TEXT runtime·sysctl_trampoline(SB),NOSPLIT,$0 POPQ BP RET -TEXT runtime·sysctlbyname_trampoline(SB),NOSPLIT,$0 +TEXT runtime·sysctlbyname_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP MOVQ 8(DI), SI // arg 2 oldp @@ -392,14 +394,14 @@ TEXT runtime·sysctlbyname_trampoline(SB),NOSPLIT,$0 POPQ BP RET -TEXT runtime·kqueue_trampoline(SB),NOSPLIT,$0 +TEXT runtime·kqueue_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP CALL libc_kqueue(SB) POPQ BP RET -TEXT runtime·kevent_trampoline(SB),NOSPLIT,$0 +TEXT runtime·kevent_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP MOVQ 8(DI), SI // arg 2 keventt @@ -418,7 +420,7 @@ ok: POPQ BP RET -TEXT runtime·fcntl_trampoline(SB),NOSPLIT,$0 +TEXT runtime·fcntl_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP MOVL 4(DI), SI // arg 2 cmd @@ -475,7 +477,7 @@ TEXT runtime·mstart_stub(SB),NOSPLIT,$0 // A pointer to the arguments is passed in DI. // A single int32 result is returned in AX. // (For more results, make an args/results structure.) -TEXT runtime·pthread_attr_init_trampoline(SB),NOSPLIT,$0 +TEXT runtime·pthread_attr_init_trampoline(SB),NOSPLIT,$0 PUSHQ BP // make frame, keep stack 16-byte aligned. MOVQ SP, BP MOVQ 0(DI), DI // arg 1 attr @@ -483,7 +485,7 @@ TEXT runtime·pthread_attr_init_trampoline(SB),NOSPLIT,$0 POPQ BP RET -TEXT runtime·pthread_attr_getstacksize_trampoline(SB),NOSPLIT,$0 +TEXT runtime·pthread_attr_getstacksize_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP MOVQ 8(DI), SI // arg 2 size @@ -492,7 +494,7 @@ TEXT runtime·pthread_attr_getstacksize_trampoline(SB),NOSPLIT,$0 POPQ BP RET -TEXT runtime·pthread_attr_setdetachstate_trampoline(SB),NOSPLIT,$0 +TEXT runtime·pthread_attr_setdetachstate_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP MOVQ 8(DI), SI // arg 2 state @@ -501,7 +503,7 @@ TEXT runtime·pthread_attr_setdetachstate_trampoline(SB),NOSPLIT,$0 POPQ BP RET -TEXT runtime·pthread_create_trampoline(SB),NOSPLIT,$0 +TEXT runtime·pthread_create_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP SUBQ $16, SP @@ -514,7 +516,7 @@ TEXT runtime·pthread_create_trampoline(SB),NOSPLIT,$0 POPQ BP RET -TEXT runtime·raise_trampoline(SB),NOSPLIT,$0 +TEXT runtime·raise_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP MOVL 0(DI), DI // arg 1 signal @@ -522,7 +524,7 @@ TEXT runtime·raise_trampoline(SB),NOSPLIT,$0 POPQ BP RET -TEXT runtime·pthread_mutex_init_trampoline(SB),NOSPLIT,$0 +TEXT runtime·pthread_mutex_init_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP MOVQ 8(DI), SI // arg 2 attr @@ -531,7 +533,7 @@ TEXT runtime·pthread_mutex_init_trampoline(SB),NOSPLIT,$0 POPQ BP RET -TEXT runtime·pthread_mutex_lock_trampoline(SB),NOSPLIT,$0 +TEXT runtime·pthread_mutex_lock_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP MOVQ 0(DI), DI // arg 1 mutex @@ -539,7 +541,7 @@ TEXT runtime·pthread_mutex_lock_trampoline(SB),NOSPLIT,$0 POPQ BP RET -TEXT runtime·pthread_mutex_unlock_trampoline(SB),NOSPLIT,$0 +TEXT runtime·pthread_mutex_unlock_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP MOVQ 0(DI), DI // arg 1 mutex @@ -547,7 +549,7 @@ TEXT runtime·pthread_mutex_unlock_trampoline(SB),NOSPLIT,$0 POPQ BP RET -TEXT runtime·pthread_cond_init_trampoline(SB),NOSPLIT,$0 +TEXT runtime·pthread_cond_init_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP MOVQ 8(DI), SI // arg 2 attr @@ -556,7 +558,7 @@ TEXT runtime·pthread_cond_init_trampoline(SB),NOSPLIT,$0 POPQ BP RET -TEXT runtime·pthread_cond_wait_trampoline(SB),NOSPLIT,$0 +TEXT runtime·pthread_cond_wait_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP MOVQ 8(DI), SI // arg 2 mutex @@ -565,7 +567,7 @@ TEXT runtime·pthread_cond_wait_trampoline(SB),NOSPLIT,$0 POPQ BP RET -TEXT runtime·pthread_cond_timedwait_relative_np_trampoline(SB),NOSPLIT,$0 +TEXT runtime·pthread_cond_timedwait_relative_np_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP MOVQ 8(DI), SI // arg 2 mutex @@ -575,7 +577,7 @@ TEXT runtime·pthread_cond_timedwait_relative_np_trampoline(SB),NOSPLIT,$0 POPQ BP RET -TEXT runtime·pthread_cond_signal_trampoline(SB),NOSPLIT,$0 +TEXT runtime·pthread_cond_signal_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP MOVQ 0(DI), DI // arg 1 cond @@ -583,7 +585,7 @@ TEXT runtime·pthread_cond_signal_trampoline(SB),NOSPLIT,$0 POPQ BP RET -TEXT runtime·pthread_self_trampoline(SB),NOSPLIT,$0 +TEXT runtime·pthread_self_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP MOVQ DI, BX // BX is caller-save @@ -592,7 +594,7 @@ TEXT runtime·pthread_self_trampoline(SB),NOSPLIT,$0 POPQ BP RET -TEXT runtime·pthread_kill_trampoline(SB),NOSPLIT,$0 +TEXT runtime·pthread_kill_trampoline(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP MOVQ 8(DI), SI // arg 2 sig @@ -617,7 +619,7 @@ TEXT runtime·pthread_kill_trampoline(SB),NOSPLIT,$0 // // syscall expects a 32-bit result and tests for 32-bit -1 // to decide there was an error. -TEXT runtime·syscall(SB),NOSPLIT,$0 +TEXT runtime·syscall(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP SUBQ $16, SP @@ -667,7 +669,7 @@ ok: // // syscallX is like syscall but expects a 64-bit result // and tests for 64-bit -1 to decide there was an error. -TEXT runtime·syscallX(SB),NOSPLIT,$0 +TEXT runtime·syscallX(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP SUBQ $16, SP @@ -703,7 +705,7 @@ ok: // syscallPtr is like syscallX except that the libc function reports an // error by returning NULL and setting errno. -TEXT runtime·syscallPtr(SB),NOSPLIT,$0 +TEXT runtime·syscallPtr(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP SUBQ $16, SP @@ -756,7 +758,7 @@ ok: // // syscall6 expects a 32-bit result and tests for 32-bit -1 // to decide there was an error. -TEXT runtime·syscall6(SB),NOSPLIT,$0 +TEXT runtime·syscall6(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP SUBQ $16, SP @@ -809,7 +811,7 @@ ok: // // syscall6X is like syscall6 but expects a 64-bit result // and tests for 64-bit -1 to decide there was an error. -TEXT runtime·syscall6X(SB),NOSPLIT,$0 +TEXT runtime·syscall6X(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP SUBQ $16, SP @@ -845,7 +847,7 @@ ok: // syscallNoErr is like syscall6 but does not check for errors, and // only returns one value, for use with standard C ABI library functions. -TEXT runtime·syscallNoErr(SB),NOSPLIT,$0 +TEXT runtime·syscallNoErr(SB),NOSPLIT,$0 PUSHQ BP MOVQ SP, BP SUBQ $16, SP diff --git a/src/syscall/mkasm.go b/src/syscall/mkasm.go index 2ebaf8d3513e5..e53d14bed10b2 100644 --- a/src/syscall/mkasm.go +++ b/src/syscall/mkasm.go @@ -53,7 +53,8 @@ func main() { fn := line[5 : len(line)-13] if !trampolines[fn] { trampolines[fn] = true - fmt.Fprintf(&out, "TEXT ·%s_trampoline(SB),NOSPLIT,$0-0\n", fn) + // The trampolines are ABIInternal as they are address-taken in Go code. + fmt.Fprintf(&out, "TEXT ·%s_trampoline(SB),NOSPLIT,$0-0\n", fn) fmt.Fprintf(&out, "\tJMP\t%s(SB)\n", fn) } } diff --git a/src/syscall/zsyscall_darwin_amd64.s b/src/syscall/zsyscall_darwin_amd64.s index 492f947855487..5eb48cee447a3 100644 --- a/src/syscall/zsyscall_darwin_amd64.s +++ b/src/syscall/zsyscall_darwin_amd64.s @@ -1,253 +1,253 @@ // go run mkasm.go darwin amd64 // Code generated by the command above; DO NOT EDIT. #include "textflag.h" -TEXT ·libc_getfsstat_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getfsstat_trampoline(SB),NOSPLIT,$0-0 JMP libc_getfsstat(SB) -TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_setattrlist(SB) -TEXT ·libc_fdopendir_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fdopendir_trampoline(SB),NOSPLIT,$0-0 JMP libc_fdopendir(SB) -TEXT ·libc_sendfile_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_sendfile_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) -TEXT ·libc_getgroups_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getgroups_trampoline(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) -TEXT ·libc_setgroups_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setgroups_trampoline(SB),NOSPLIT,$0-0 JMP libc_setgroups(SB) -TEXT ·libc_wait4_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_wait4_trampoline(SB),NOSPLIT,$0-0 JMP libc_wait4(SB) -TEXT ·libc_accept_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_accept_trampoline(SB),NOSPLIT,$0-0 JMP libc_accept(SB) -TEXT ·libc_bind_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_bind_trampoline(SB),NOSPLIT,$0-0 JMP libc_bind(SB) -TEXT ·libc_connect_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_connect_trampoline(SB),NOSPLIT,$0-0 JMP libc_connect(SB) -TEXT ·libc_socket_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_socket_trampoline(SB),NOSPLIT,$0-0 JMP libc_socket(SB) -TEXT ·libc_getsockopt_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getsockopt_trampoline(SB),NOSPLIT,$0-0 JMP libc_getsockopt(SB) -TEXT ·libc_setsockopt_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setsockopt_trampoline(SB),NOSPLIT,$0-0 JMP libc_setsockopt(SB) -TEXT ·libc_getpeername_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getpeername_trampoline(SB),NOSPLIT,$0-0 JMP libc_getpeername(SB) -TEXT ·libc_getsockname_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getsockname_trampoline(SB),NOSPLIT,$0-0 JMP libc_getsockname(SB) -TEXT ·libc_shutdown_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_shutdown_trampoline(SB),NOSPLIT,$0-0 JMP libc_shutdown(SB) -TEXT ·libc_socketpair_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_socketpair_trampoline(SB),NOSPLIT,$0-0 JMP libc_socketpair(SB) -TEXT ·libc_recvfrom_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_recvfrom_trampoline(SB),NOSPLIT,$0-0 JMP libc_recvfrom(SB) -TEXT ·libc_sendto_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_sendto_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendto(SB) -TEXT ·libc_recvmsg_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_recvmsg_trampoline(SB),NOSPLIT,$0-0 JMP libc_recvmsg(SB) -TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) -TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) -TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) -TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) -TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 JMP libc_fcntl(SB) -TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) -TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 JMP libc_kill(SB) -TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0 JMP libc_access(SB) -TEXT ·libc_adjtime_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_adjtime_trampoline(SB),NOSPLIT,$0-0 JMP libc_adjtime(SB) -TEXT ·libc_chdir_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_chdir_trampoline(SB),NOSPLIT,$0-0 JMP libc_chdir(SB) -TEXT ·libc_chflags_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_chflags_trampoline(SB),NOSPLIT,$0-0 JMP libc_chflags(SB) -TEXT ·libc_chmod_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_chmod_trampoline(SB),NOSPLIT,$0-0 JMP libc_chmod(SB) -TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 JMP libc_chown(SB) -TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) -TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 JMP libc_close(SB) -TEXT ·libc_closedir_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_closedir_trampoline(SB),NOSPLIT,$0-0 JMP libc_closedir(SB) -TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 JMP libc_dup(SB) -TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0 JMP libc_dup2(SB) -TEXT ·libc_exchangedata_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_exchangedata_trampoline(SB),NOSPLIT,$0-0 JMP libc_exchangedata(SB) -TEXT ·libc_fchdir_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fchdir_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchdir(SB) -TEXT ·libc_fchflags_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fchflags_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchflags(SB) -TEXT ·libc_fchmod_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fchmod_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchmod(SB) -TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) -TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0 JMP libc_flock(SB) -TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0 JMP libc_fpathconf(SB) -TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) -TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) -TEXT ·libc_getdtablesize_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getdtablesize_trampoline(SB),NOSPLIT,$0-0 JMP libc_getdtablesize(SB) -TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getegid(SB) -TEXT ·libc_geteuid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_geteuid_trampoline(SB),NOSPLIT,$0-0 JMP libc_geteuid(SB) -TEXT ·libc_getgid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getgid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getgid(SB) -TEXT ·libc_getpgid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getpgid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getpgid(SB) -TEXT ·libc_getpgrp_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getpgrp_trampoline(SB),NOSPLIT,$0-0 JMP libc_getpgrp(SB) -TEXT ·libc_getpid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getpid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getpid(SB) -TEXT ·libc_getppid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getppid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getppid(SB) -TEXT ·libc_getpriority_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getpriority_trampoline(SB),NOSPLIT,$0-0 JMP libc_getpriority(SB) -TEXT ·libc_getrlimit_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getrlimit_trampoline(SB),NOSPLIT,$0-0 JMP libc_getrlimit(SB) -TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) -TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) -TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) -TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0 JMP libc_issetugid(SB) -TEXT ·libc_kqueue_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_kqueue_trampoline(SB),NOSPLIT,$0-0 JMP libc_kqueue(SB) -TEXT ·libc_lchown_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_lchown_trampoline(SB),NOSPLIT,$0-0 JMP libc_lchown(SB) -TEXT ·libc_link_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_link_trampoline(SB),NOSPLIT,$0-0 JMP libc_link(SB) -TEXT ·libc_listen_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_listen_trampoline(SB),NOSPLIT,$0-0 JMP libc_listen(SB) -TEXT ·libc_mkdir_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_mkdir_trampoline(SB),NOSPLIT,$0-0 JMP libc_mkdir(SB) -TEXT ·libc_mkfifo_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_mkfifo_trampoline(SB),NOSPLIT,$0-0 JMP libc_mkfifo(SB) -TEXT ·libc_mknod_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_mknod_trampoline(SB),NOSPLIT,$0-0 JMP libc_mknod(SB) -TEXT ·libc_mlock_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_mlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_mlock(SB) -TEXT ·libc_mlockall_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_mlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_mlockall(SB) -TEXT ·libc_mprotect_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_mprotect_trampoline(SB),NOSPLIT,$0-0 JMP libc_mprotect(SB) -TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) -TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_open_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_open_trampoline(SB),NOSPLIT,$0-0 JMP libc_open(SB) -TEXT ·libc_pathconf_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_pathconf_trampoline(SB),NOSPLIT,$0-0 JMP libc_pathconf(SB) -TEXT ·libc_pread_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_pread_trampoline(SB),NOSPLIT,$0-0 JMP libc_pread(SB) -TEXT ·libc_pwrite_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_pwrite_trampoline(SB),NOSPLIT,$0-0 JMP libc_pwrite(SB) -TEXT ·libc_read_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_read_trampoline(SB),NOSPLIT,$0-0 JMP libc_read(SB) -TEXT ·libc_readdir_r_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_readdir_r_trampoline(SB),NOSPLIT,$0-0 JMP libc_readdir_r(SB) -TEXT ·libc_readlink_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_readlink_trampoline(SB),NOSPLIT,$0-0 JMP libc_readlink(SB) -TEXT ·libc_rename_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_rename_trampoline(SB),NOSPLIT,$0-0 JMP libc_rename(SB) -TEXT ·libc_revoke_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_revoke_trampoline(SB),NOSPLIT,$0-0 JMP libc_revoke(SB) -TEXT ·libc_rmdir_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_rmdir_trampoline(SB),NOSPLIT,$0-0 JMP libc_rmdir(SB) -TEXT ·libc_lseek_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_lseek_trampoline(SB),NOSPLIT,$0-0 JMP libc_lseek(SB) -TEXT ·libc_select_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_select_trampoline(SB),NOSPLIT,$0-0 JMP libc_select(SB) -TEXT ·libc_setegid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setegid_trampoline(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) -TEXT ·libc_seteuid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_seteuid_trampoline(SB),NOSPLIT,$0-0 JMP libc_seteuid(SB) -TEXT ·libc_setgid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setgid_trampoline(SB),NOSPLIT,$0-0 JMP libc_setgid(SB) -TEXT ·libc_setlogin_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setlogin_trampoline(SB),NOSPLIT,$0-0 JMP libc_setlogin(SB) -TEXT ·libc_setpgid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setpgid_trampoline(SB),NOSPLIT,$0-0 JMP libc_setpgid(SB) -TEXT ·libc_setpriority_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setpriority_trampoline(SB),NOSPLIT,$0-0 JMP libc_setpriority(SB) -TEXT ·libc_setprivexec_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setprivexec_trampoline(SB),NOSPLIT,$0-0 JMP libc_setprivexec(SB) -TEXT ·libc_setregid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setregid_trampoline(SB),NOSPLIT,$0-0 JMP libc_setregid(SB) -TEXT ·libc_setreuid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setreuid_trampoline(SB),NOSPLIT,$0-0 JMP libc_setreuid(SB) -TEXT ·libc_setrlimit_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setrlimit_trampoline(SB),NOSPLIT,$0-0 JMP libc_setrlimit(SB) -TEXT ·libc_setsid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setsid_trampoline(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) -TEXT ·libc_settimeofday_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_settimeofday_trampoline(SB),NOSPLIT,$0-0 JMP libc_settimeofday(SB) -TEXT ·libc_setuid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setuid_trampoline(SB),NOSPLIT,$0-0 JMP libc_setuid(SB) -TEXT ·libc_symlink_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_symlink_trampoline(SB),NOSPLIT,$0-0 JMP libc_symlink(SB) -TEXT ·libc_sync_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_sync_trampoline(SB),NOSPLIT,$0-0 JMP libc_sync(SB) -TEXT ·libc_truncate_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_truncate_trampoline(SB),NOSPLIT,$0-0 JMP libc_truncate(SB) -TEXT ·libc_umask_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_umask_trampoline(SB),NOSPLIT,$0-0 JMP libc_umask(SB) -TEXT ·libc_undelete_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_undelete_trampoline(SB),NOSPLIT,$0-0 JMP libc_undelete(SB) -TEXT ·libc_unlink_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_unlink_trampoline(SB),NOSPLIT,$0-0 JMP libc_unlink(SB) -TEXT ·libc_unmount_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_unmount_trampoline(SB),NOSPLIT,$0-0 JMP libc_unmount(SB) -TEXT ·libc_write_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_write_trampoline(SB),NOSPLIT,$0-0 JMP libc_write(SB) -TEXT ·libc_writev_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_writev_trampoline(SB),NOSPLIT,$0-0 JMP libc_writev(SB) -TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) -TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) -TEXT ·libc_fork_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fork_trampoline(SB),NOSPLIT,$0-0 JMP libc_fork(SB) -TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) -TEXT ·libc_execve_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_execve_trampoline(SB),NOSPLIT,$0-0 JMP libc_execve(SB) -TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0 JMP libc_exit(SB) -TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) -TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0 JMP libc_unlinkat(SB) -TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0 JMP libc_openat(SB) -TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0 JMP libc_getcwd(SB) -TEXT ·libc_fstat64_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fstat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstat64(SB) -TEXT ·libc_fstatfs64_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fstatfs64_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstatfs64(SB) -TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) -TEXT ·libc_lstat64_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_lstat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_lstat64(SB) -TEXT ·libc_stat64_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_stat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_stat64(SB) -TEXT ·libc_statfs64_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_statfs64_trampoline(SB),NOSPLIT,$0-0 JMP libc_statfs64(SB) -TEXT ·libc_fstatat64_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fstatat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstatat64(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 JMP libc_ptrace(SB) diff --git a/src/syscall/zsyscall_darwin_arm64.s b/src/syscall/zsyscall_darwin_arm64.s index b606c6e49e35b..73e4a3fd8dff2 100644 --- a/src/syscall/zsyscall_darwin_arm64.s +++ b/src/syscall/zsyscall_darwin_arm64.s @@ -1,253 +1,253 @@ // go run mkasm.go darwin arm64 // Code generated by the command above; DO NOT EDIT. #include "textflag.h" -TEXT ·libc_getfsstat_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getfsstat_trampoline(SB),NOSPLIT,$0-0 JMP libc_getfsstat(SB) -TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_setattrlist(SB) -TEXT ·libc_fdopendir_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fdopendir_trampoline(SB),NOSPLIT,$0-0 JMP libc_fdopendir(SB) -TEXT ·libc_sendfile_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_sendfile_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) -TEXT ·libc_getgroups_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getgroups_trampoline(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) -TEXT ·libc_setgroups_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setgroups_trampoline(SB),NOSPLIT,$0-0 JMP libc_setgroups(SB) -TEXT ·libc_wait4_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_wait4_trampoline(SB),NOSPLIT,$0-0 JMP libc_wait4(SB) -TEXT ·libc_accept_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_accept_trampoline(SB),NOSPLIT,$0-0 JMP libc_accept(SB) -TEXT ·libc_bind_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_bind_trampoline(SB),NOSPLIT,$0-0 JMP libc_bind(SB) -TEXT ·libc_connect_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_connect_trampoline(SB),NOSPLIT,$0-0 JMP libc_connect(SB) -TEXT ·libc_socket_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_socket_trampoline(SB),NOSPLIT,$0-0 JMP libc_socket(SB) -TEXT ·libc_getsockopt_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getsockopt_trampoline(SB),NOSPLIT,$0-0 JMP libc_getsockopt(SB) -TEXT ·libc_setsockopt_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setsockopt_trampoline(SB),NOSPLIT,$0-0 JMP libc_setsockopt(SB) -TEXT ·libc_getpeername_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getpeername_trampoline(SB),NOSPLIT,$0-0 JMP libc_getpeername(SB) -TEXT ·libc_getsockname_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getsockname_trampoline(SB),NOSPLIT,$0-0 JMP libc_getsockname(SB) -TEXT ·libc_shutdown_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_shutdown_trampoline(SB),NOSPLIT,$0-0 JMP libc_shutdown(SB) -TEXT ·libc_socketpair_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_socketpair_trampoline(SB),NOSPLIT,$0-0 JMP libc_socketpair(SB) -TEXT ·libc_recvfrom_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_recvfrom_trampoline(SB),NOSPLIT,$0-0 JMP libc_recvfrom(SB) -TEXT ·libc_sendto_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_sendto_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendto(SB) -TEXT ·libc_recvmsg_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_recvmsg_trampoline(SB),NOSPLIT,$0-0 JMP libc_recvmsg(SB) -TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) -TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) -TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) -TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) -TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 JMP libc_fcntl(SB) -TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) -TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 JMP libc_kill(SB) -TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0 JMP libc_access(SB) -TEXT ·libc_adjtime_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_adjtime_trampoline(SB),NOSPLIT,$0-0 JMP libc_adjtime(SB) -TEXT ·libc_chdir_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_chdir_trampoline(SB),NOSPLIT,$0-0 JMP libc_chdir(SB) -TEXT ·libc_chflags_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_chflags_trampoline(SB),NOSPLIT,$0-0 JMP libc_chflags(SB) -TEXT ·libc_chmod_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_chmod_trampoline(SB),NOSPLIT,$0-0 JMP libc_chmod(SB) -TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 JMP libc_chown(SB) -TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) -TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 JMP libc_close(SB) -TEXT ·libc_closedir_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_closedir_trampoline(SB),NOSPLIT,$0-0 JMP libc_closedir(SB) -TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 JMP libc_dup(SB) -TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0 JMP libc_dup2(SB) -TEXT ·libc_exchangedata_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_exchangedata_trampoline(SB),NOSPLIT,$0-0 JMP libc_exchangedata(SB) -TEXT ·libc_fchdir_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fchdir_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchdir(SB) -TEXT ·libc_fchflags_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fchflags_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchflags(SB) -TEXT ·libc_fchmod_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fchmod_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchmod(SB) -TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) -TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0 JMP libc_flock(SB) -TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0 JMP libc_fpathconf(SB) -TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) -TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) -TEXT ·libc_getdtablesize_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getdtablesize_trampoline(SB),NOSPLIT,$0-0 JMP libc_getdtablesize(SB) -TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getegid(SB) -TEXT ·libc_geteuid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_geteuid_trampoline(SB),NOSPLIT,$0-0 JMP libc_geteuid(SB) -TEXT ·libc_getgid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getgid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getgid(SB) -TEXT ·libc_getpgid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getpgid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getpgid(SB) -TEXT ·libc_getpgrp_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getpgrp_trampoline(SB),NOSPLIT,$0-0 JMP libc_getpgrp(SB) -TEXT ·libc_getpid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getpid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getpid(SB) -TEXT ·libc_getppid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getppid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getppid(SB) -TEXT ·libc_getpriority_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getpriority_trampoline(SB),NOSPLIT,$0-0 JMP libc_getpriority(SB) -TEXT ·libc_getrlimit_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getrlimit_trampoline(SB),NOSPLIT,$0-0 JMP libc_getrlimit(SB) -TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) -TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) -TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) -TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0 JMP libc_issetugid(SB) -TEXT ·libc_kqueue_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_kqueue_trampoline(SB),NOSPLIT,$0-0 JMP libc_kqueue(SB) -TEXT ·libc_lchown_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_lchown_trampoline(SB),NOSPLIT,$0-0 JMP libc_lchown(SB) -TEXT ·libc_link_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_link_trampoline(SB),NOSPLIT,$0-0 JMP libc_link(SB) -TEXT ·libc_listen_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_listen_trampoline(SB),NOSPLIT,$0-0 JMP libc_listen(SB) -TEXT ·libc_mkdir_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_mkdir_trampoline(SB),NOSPLIT,$0-0 JMP libc_mkdir(SB) -TEXT ·libc_mkfifo_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_mkfifo_trampoline(SB),NOSPLIT,$0-0 JMP libc_mkfifo(SB) -TEXT ·libc_mknod_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_mknod_trampoline(SB),NOSPLIT,$0-0 JMP libc_mknod(SB) -TEXT ·libc_mlock_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_mlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_mlock(SB) -TEXT ·libc_mlockall_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_mlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_mlockall(SB) -TEXT ·libc_mprotect_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_mprotect_trampoline(SB),NOSPLIT,$0-0 JMP libc_mprotect(SB) -TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) -TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_open_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_open_trampoline(SB),NOSPLIT,$0-0 JMP libc_open(SB) -TEXT ·libc_pathconf_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_pathconf_trampoline(SB),NOSPLIT,$0-0 JMP libc_pathconf(SB) -TEXT ·libc_pread_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_pread_trampoline(SB),NOSPLIT,$0-0 JMP libc_pread(SB) -TEXT ·libc_pwrite_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_pwrite_trampoline(SB),NOSPLIT,$0-0 JMP libc_pwrite(SB) -TEXT ·libc_read_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_read_trampoline(SB),NOSPLIT,$0-0 JMP libc_read(SB) -TEXT ·libc_readdir_r_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_readdir_r_trampoline(SB),NOSPLIT,$0-0 JMP libc_readdir_r(SB) -TEXT ·libc_readlink_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_readlink_trampoline(SB),NOSPLIT,$0-0 JMP libc_readlink(SB) -TEXT ·libc_rename_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_rename_trampoline(SB),NOSPLIT,$0-0 JMP libc_rename(SB) -TEXT ·libc_revoke_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_revoke_trampoline(SB),NOSPLIT,$0-0 JMP libc_revoke(SB) -TEXT ·libc_rmdir_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_rmdir_trampoline(SB),NOSPLIT,$0-0 JMP libc_rmdir(SB) -TEXT ·libc_lseek_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_lseek_trampoline(SB),NOSPLIT,$0-0 JMP libc_lseek(SB) -TEXT ·libc_select_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_select_trampoline(SB),NOSPLIT,$0-0 JMP libc_select(SB) -TEXT ·libc_setegid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setegid_trampoline(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) -TEXT ·libc_seteuid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_seteuid_trampoline(SB),NOSPLIT,$0-0 JMP libc_seteuid(SB) -TEXT ·libc_setgid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setgid_trampoline(SB),NOSPLIT,$0-0 JMP libc_setgid(SB) -TEXT ·libc_setlogin_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setlogin_trampoline(SB),NOSPLIT,$0-0 JMP libc_setlogin(SB) -TEXT ·libc_setpgid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setpgid_trampoline(SB),NOSPLIT,$0-0 JMP libc_setpgid(SB) -TEXT ·libc_setpriority_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setpriority_trampoline(SB),NOSPLIT,$0-0 JMP libc_setpriority(SB) -TEXT ·libc_setprivexec_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setprivexec_trampoline(SB),NOSPLIT,$0-0 JMP libc_setprivexec(SB) -TEXT ·libc_setregid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setregid_trampoline(SB),NOSPLIT,$0-0 JMP libc_setregid(SB) -TEXT ·libc_setreuid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setreuid_trampoline(SB),NOSPLIT,$0-0 JMP libc_setreuid(SB) -TEXT ·libc_setrlimit_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setrlimit_trampoline(SB),NOSPLIT,$0-0 JMP libc_setrlimit(SB) -TEXT ·libc_setsid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setsid_trampoline(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) -TEXT ·libc_settimeofday_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_settimeofday_trampoline(SB),NOSPLIT,$0-0 JMP libc_settimeofday(SB) -TEXT ·libc_setuid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setuid_trampoline(SB),NOSPLIT,$0-0 JMP libc_setuid(SB) -TEXT ·libc_symlink_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_symlink_trampoline(SB),NOSPLIT,$0-0 JMP libc_symlink(SB) -TEXT ·libc_sync_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_sync_trampoline(SB),NOSPLIT,$0-0 JMP libc_sync(SB) -TEXT ·libc_truncate_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_truncate_trampoline(SB),NOSPLIT,$0-0 JMP libc_truncate(SB) -TEXT ·libc_umask_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_umask_trampoline(SB),NOSPLIT,$0-0 JMP libc_umask(SB) -TEXT ·libc_undelete_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_undelete_trampoline(SB),NOSPLIT,$0-0 JMP libc_undelete(SB) -TEXT ·libc_unlink_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_unlink_trampoline(SB),NOSPLIT,$0-0 JMP libc_unlink(SB) -TEXT ·libc_unmount_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_unmount_trampoline(SB),NOSPLIT,$0-0 JMP libc_unmount(SB) -TEXT ·libc_write_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_write_trampoline(SB),NOSPLIT,$0-0 JMP libc_write(SB) -TEXT ·libc_writev_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_writev_trampoline(SB),NOSPLIT,$0-0 JMP libc_writev(SB) -TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) -TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) -TEXT ·libc_fork_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fork_trampoline(SB),NOSPLIT,$0-0 JMP libc_fork(SB) -TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) -TEXT ·libc_execve_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_execve_trampoline(SB),NOSPLIT,$0-0 JMP libc_execve(SB) -TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0 JMP libc_exit(SB) -TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) -TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0 JMP libc_unlinkat(SB) -TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0 JMP libc_openat(SB) -TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0 JMP libc_getcwd(SB) -TEXT ·libc_fstat_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fstat_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstat(SB) -TEXT ·libc_fstatfs_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fstatfs_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstatfs(SB) -TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) -TEXT ·libc_lstat_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_lstat_trampoline(SB),NOSPLIT,$0-0 JMP libc_lstat(SB) -TEXT ·libc_stat_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_stat_trampoline(SB),NOSPLIT,$0-0 JMP libc_stat(SB) -TEXT ·libc_statfs_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_statfs_trampoline(SB),NOSPLIT,$0-0 JMP libc_statfs(SB) -TEXT ·libc_fstatat_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fstatat_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstatat(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 JMP libc_ptrace(SB) diff --git a/src/syscall/zsyscall_openbsd_amd64.s b/src/syscall/zsyscall_openbsd_amd64.s index e5c5dde930bb6..8256a451f5f86 100644 --- a/src/syscall/zsyscall_openbsd_amd64.s +++ b/src/syscall/zsyscall_openbsd_amd64.s @@ -1,233 +1,233 @@ // go run mkasm.go openbsd amd64 // Code generated by the command above; DO NOT EDIT. #include "textflag.h" -TEXT ·libc_getgroups_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getgroups_trampoline(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) -TEXT ·libc_setgroups_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setgroups_trampoline(SB),NOSPLIT,$0-0 JMP libc_setgroups(SB) -TEXT ·libc_wait4_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_wait4_trampoline(SB),NOSPLIT,$0-0 JMP libc_wait4(SB) -TEXT ·libc_accept_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_accept_trampoline(SB),NOSPLIT,$0-0 JMP libc_accept(SB) -TEXT ·libc_bind_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_bind_trampoline(SB),NOSPLIT,$0-0 JMP libc_bind(SB) -TEXT ·libc_connect_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_connect_trampoline(SB),NOSPLIT,$0-0 JMP libc_connect(SB) -TEXT ·libc_socket_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_socket_trampoline(SB),NOSPLIT,$0-0 JMP libc_socket(SB) -TEXT ·libc_getsockopt_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getsockopt_trampoline(SB),NOSPLIT,$0-0 JMP libc_getsockopt(SB) -TEXT ·libc_setsockopt_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setsockopt_trampoline(SB),NOSPLIT,$0-0 JMP libc_setsockopt(SB) -TEXT ·libc_getpeername_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getpeername_trampoline(SB),NOSPLIT,$0-0 JMP libc_getpeername(SB) -TEXT ·libc_getsockname_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getsockname_trampoline(SB),NOSPLIT,$0-0 JMP libc_getsockname(SB) -TEXT ·libc_shutdown_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_shutdown_trampoline(SB),NOSPLIT,$0-0 JMP libc_shutdown(SB) -TEXT ·libc_socketpair_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_socketpair_trampoline(SB),NOSPLIT,$0-0 JMP libc_socketpair(SB) -TEXT ·libc_recvfrom_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_recvfrom_trampoline(SB),NOSPLIT,$0-0 JMP libc_recvfrom(SB) -TEXT ·libc_sendto_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_sendto_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendto(SB) -TEXT ·libc_recvmsg_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_recvmsg_trampoline(SB),NOSPLIT,$0-0 JMP libc_recvmsg(SB) -TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) -TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) -TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) -TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) -TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 JMP libc_fcntl(SB) -TEXT ·libc_pipe2_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_pipe2_trampoline(SB),NOSPLIT,$0-0 JMP libc_pipe2(SB) -TEXT ·libc_accept4_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_accept4_trampoline(SB),NOSPLIT,$0-0 JMP libc_accept4(SB) -TEXT ·libc_getdents_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getdents_trampoline(SB),NOSPLIT,$0-0 JMP libc_getdents(SB) -TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0 JMP libc_access(SB) -TEXT ·libc_adjtime_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_adjtime_trampoline(SB),NOSPLIT,$0-0 JMP libc_adjtime(SB) -TEXT ·libc_chdir_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_chdir_trampoline(SB),NOSPLIT,$0-0 JMP libc_chdir(SB) -TEXT ·libc_chflags_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_chflags_trampoline(SB),NOSPLIT,$0-0 JMP libc_chflags(SB) -TEXT ·libc_chmod_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_chmod_trampoline(SB),NOSPLIT,$0-0 JMP libc_chmod(SB) -TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 JMP libc_chown(SB) -TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) -TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 JMP libc_close(SB) -TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 JMP libc_dup(SB) -TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0 JMP libc_dup2(SB) -TEXT ·libc_fchdir_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fchdir_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchdir(SB) -TEXT ·libc_fchflags_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fchflags_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchflags(SB) -TEXT ·libc_fchmod_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fchmod_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchmod(SB) -TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) -TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0 JMP libc_flock(SB) -TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0 JMP libc_fpathconf(SB) -TEXT ·libc_fstat_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fstat_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstat(SB) -TEXT ·libc_fstatfs_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fstatfs_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstatfs(SB) -TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) -TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) -TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getegid(SB) -TEXT ·libc_geteuid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_geteuid_trampoline(SB),NOSPLIT,$0-0 JMP libc_geteuid(SB) -TEXT ·libc_getgid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getgid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getgid(SB) -TEXT ·libc_getpgid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getpgid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getpgid(SB) -TEXT ·libc_getpgrp_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getpgrp_trampoline(SB),NOSPLIT,$0-0 JMP libc_getpgrp(SB) -TEXT ·libc_getpid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getpid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getpid(SB) -TEXT ·libc_getppid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getppid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getppid(SB) -TEXT ·libc_getpriority_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getpriority_trampoline(SB),NOSPLIT,$0-0 JMP libc_getpriority(SB) -TEXT ·libc_getrlimit_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getrlimit_trampoline(SB),NOSPLIT,$0-0 JMP libc_getrlimit(SB) -TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) -TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) -TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) -TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) -TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0 JMP libc_issetugid(SB) -TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 JMP libc_kill(SB) -TEXT ·libc_kqueue_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_kqueue_trampoline(SB),NOSPLIT,$0-0 JMP libc_kqueue(SB) -TEXT ·libc_lchown_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_lchown_trampoline(SB),NOSPLIT,$0-0 JMP libc_lchown(SB) -TEXT ·libc_link_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_link_trampoline(SB),NOSPLIT,$0-0 JMP libc_link(SB) -TEXT ·libc_listen_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_listen_trampoline(SB),NOSPLIT,$0-0 JMP libc_listen(SB) -TEXT ·libc_lstat_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_lstat_trampoline(SB),NOSPLIT,$0-0 JMP libc_lstat(SB) -TEXT ·libc_mkdir_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_mkdir_trampoline(SB),NOSPLIT,$0-0 JMP libc_mkdir(SB) -TEXT ·libc_mkfifo_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_mkfifo_trampoline(SB),NOSPLIT,$0-0 JMP libc_mkfifo(SB) -TEXT ·libc_mknod_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_mknod_trampoline(SB),NOSPLIT,$0-0 JMP libc_mknod(SB) -TEXT ·libc_nanosleep_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_nanosleep_trampoline(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) -TEXT ·libc_open_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_open_trampoline(SB),NOSPLIT,$0-0 JMP libc_open(SB) -TEXT ·libc_pathconf_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_pathconf_trampoline(SB),NOSPLIT,$0-0 JMP libc_pathconf(SB) -TEXT ·libc_pread_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_pread_trampoline(SB),NOSPLIT,$0-0 JMP libc_pread(SB) -TEXT ·libc_pwrite_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_pwrite_trampoline(SB),NOSPLIT,$0-0 JMP libc_pwrite(SB) -TEXT ·libc_read_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_read_trampoline(SB),NOSPLIT,$0-0 JMP libc_read(SB) -TEXT ·libc_readlink_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_readlink_trampoline(SB),NOSPLIT,$0-0 JMP libc_readlink(SB) -TEXT ·libc_rename_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_rename_trampoline(SB),NOSPLIT,$0-0 JMP libc_rename(SB) -TEXT ·libc_revoke_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_revoke_trampoline(SB),NOSPLIT,$0-0 JMP libc_revoke(SB) -TEXT ·libc_rmdir_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_rmdir_trampoline(SB),NOSPLIT,$0-0 JMP libc_rmdir(SB) -TEXT ·libc_select_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_select_trampoline(SB),NOSPLIT,$0-0 JMP libc_select(SB) -TEXT ·libc_setegid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setegid_trampoline(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) -TEXT ·libc_seteuid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_seteuid_trampoline(SB),NOSPLIT,$0-0 JMP libc_seteuid(SB) -TEXT ·libc_setgid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setgid_trampoline(SB),NOSPLIT,$0-0 JMP libc_setgid(SB) -TEXT ·libc_setlogin_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setlogin_trampoline(SB),NOSPLIT,$0-0 JMP libc_setlogin(SB) -TEXT ·libc_setpgid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setpgid_trampoline(SB),NOSPLIT,$0-0 JMP libc_setpgid(SB) -TEXT ·libc_setpriority_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setpriority_trampoline(SB),NOSPLIT,$0-0 JMP libc_setpriority(SB) -TEXT ·libc_setregid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setregid_trampoline(SB),NOSPLIT,$0-0 JMP libc_setregid(SB) -TEXT ·libc_setreuid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setreuid_trampoline(SB),NOSPLIT,$0-0 JMP libc_setreuid(SB) -TEXT ·libc_setrlimit_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setrlimit_trampoline(SB),NOSPLIT,$0-0 JMP libc_setrlimit(SB) -TEXT ·libc_setsid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setsid_trampoline(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) -TEXT ·libc_settimeofday_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_settimeofday_trampoline(SB),NOSPLIT,$0-0 JMP libc_settimeofday(SB) -TEXT ·libc_setuid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setuid_trampoline(SB),NOSPLIT,$0-0 JMP libc_setuid(SB) -TEXT ·libc_stat_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_stat_trampoline(SB),NOSPLIT,$0-0 JMP libc_stat(SB) -TEXT ·libc_statfs_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_statfs_trampoline(SB),NOSPLIT,$0-0 JMP libc_statfs(SB) -TEXT ·libc_symlink_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_symlink_trampoline(SB),NOSPLIT,$0-0 JMP libc_symlink(SB) -TEXT ·libc_sync_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_sync_trampoline(SB),NOSPLIT,$0-0 JMP libc_sync(SB) -TEXT ·libc_truncate_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_truncate_trampoline(SB),NOSPLIT,$0-0 JMP libc_truncate(SB) -TEXT ·libc_umask_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_umask_trampoline(SB),NOSPLIT,$0-0 JMP libc_umask(SB) -TEXT ·libc_unlink_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_unlink_trampoline(SB),NOSPLIT,$0-0 JMP libc_unlink(SB) -TEXT ·libc_unmount_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_unmount_trampoline(SB),NOSPLIT,$0-0 JMP libc_unmount(SB) -TEXT ·libc_write_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_write_trampoline(SB),NOSPLIT,$0-0 JMP libc_write(SB) -TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) -TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) -TEXT ·libc_utimensat_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_utimensat_trampoline(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) -TEXT ·libc_syscall_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_syscall_trampoline(SB),NOSPLIT,$0-0 JMP libc_syscall(SB) -TEXT ·libc_lseek_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_lseek_trampoline(SB),NOSPLIT,$0-0 JMP libc_lseek(SB) -TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0 JMP libc_getcwd(SB) -TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) -TEXT ·libc_fork_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fork_trampoline(SB),NOSPLIT,$0-0 JMP libc_fork(SB) -TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) -TEXT ·libc_execve_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_execve_trampoline(SB),NOSPLIT,$0-0 JMP libc_execve(SB) -TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0 JMP libc_exit(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 JMP libc_ptrace(SB) -TEXT ·libc_getentropy_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getentropy_trampoline(SB),NOSPLIT,$0-0 JMP libc_getentropy(SB) -TEXT ·libc_fstatat_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fstatat_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstatat(SB) -TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0 JMP libc_unlinkat(SB) -TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0 JMP libc_openat(SB) diff --git a/src/syscall/zsyscall_openbsd_arm64.s b/src/syscall/zsyscall_openbsd_arm64.s index 37778b1db5417..f6e0a8da94c4f 100644 --- a/src/syscall/zsyscall_openbsd_arm64.s +++ b/src/syscall/zsyscall_openbsd_arm64.s @@ -1,233 +1,233 @@ // go run mkasm.go openbsd arm64 // Code generated by the command above; DO NOT EDIT. #include "textflag.h" -TEXT ·libc_getgroups_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getgroups_trampoline(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) -TEXT ·libc_setgroups_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setgroups_trampoline(SB),NOSPLIT,$0-0 JMP libc_setgroups(SB) -TEXT ·libc_wait4_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_wait4_trampoline(SB),NOSPLIT,$0-0 JMP libc_wait4(SB) -TEXT ·libc_accept_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_accept_trampoline(SB),NOSPLIT,$0-0 JMP libc_accept(SB) -TEXT ·libc_bind_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_bind_trampoline(SB),NOSPLIT,$0-0 JMP libc_bind(SB) -TEXT ·libc_connect_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_connect_trampoline(SB),NOSPLIT,$0-0 JMP libc_connect(SB) -TEXT ·libc_socket_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_socket_trampoline(SB),NOSPLIT,$0-0 JMP libc_socket(SB) -TEXT ·libc_getsockopt_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getsockopt_trampoline(SB),NOSPLIT,$0-0 JMP libc_getsockopt(SB) -TEXT ·libc_setsockopt_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setsockopt_trampoline(SB),NOSPLIT,$0-0 JMP libc_setsockopt(SB) -TEXT ·libc_getpeername_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getpeername_trampoline(SB),NOSPLIT,$0-0 JMP libc_getpeername(SB) -TEXT ·libc_getsockname_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getsockname_trampoline(SB),NOSPLIT,$0-0 JMP libc_getsockname(SB) -TEXT ·libc_shutdown_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_shutdown_trampoline(SB),NOSPLIT,$0-0 JMP libc_shutdown(SB) -TEXT ·libc_socketpair_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_socketpair_trampoline(SB),NOSPLIT,$0-0 JMP libc_socketpair(SB) -TEXT ·libc_recvfrom_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_recvfrom_trampoline(SB),NOSPLIT,$0-0 JMP libc_recvfrom(SB) -TEXT ·libc_sendto_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_sendto_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendto(SB) -TEXT ·libc_recvmsg_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_recvmsg_trampoline(SB),NOSPLIT,$0-0 JMP libc_recvmsg(SB) -TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) -TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) -TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) -TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) -TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 JMP libc_fcntl(SB) -TEXT ·libc_pipe2_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_pipe2_trampoline(SB),NOSPLIT,$0-0 JMP libc_pipe2(SB) -TEXT ·libc_accept4_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_accept4_trampoline(SB),NOSPLIT,$0-0 JMP libc_accept4(SB) -TEXT ·libc_getdents_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getdents_trampoline(SB),NOSPLIT,$0-0 JMP libc_getdents(SB) -TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0 JMP libc_access(SB) -TEXT ·libc_adjtime_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_adjtime_trampoline(SB),NOSPLIT,$0-0 JMP libc_adjtime(SB) -TEXT ·libc_chdir_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_chdir_trampoline(SB),NOSPLIT,$0-0 JMP libc_chdir(SB) -TEXT ·libc_chflags_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_chflags_trampoline(SB),NOSPLIT,$0-0 JMP libc_chflags(SB) -TEXT ·libc_chmod_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_chmod_trampoline(SB),NOSPLIT,$0-0 JMP libc_chmod(SB) -TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 JMP libc_chown(SB) -TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) -TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 JMP libc_close(SB) -TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 JMP libc_dup(SB) -TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0 JMP libc_dup2(SB) -TEXT ·libc_fchdir_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fchdir_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchdir(SB) -TEXT ·libc_fchflags_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fchflags_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchflags(SB) -TEXT ·libc_fchmod_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fchmod_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchmod(SB) -TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) -TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0 JMP libc_flock(SB) -TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0 JMP libc_fpathconf(SB) -TEXT ·libc_fstat_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fstat_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstat(SB) -TEXT ·libc_fstatfs_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fstatfs_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstatfs(SB) -TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) -TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) -TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getegid(SB) -TEXT ·libc_geteuid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_geteuid_trampoline(SB),NOSPLIT,$0-0 JMP libc_geteuid(SB) -TEXT ·libc_getgid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getgid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getgid(SB) -TEXT ·libc_getpgid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getpgid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getpgid(SB) -TEXT ·libc_getpgrp_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getpgrp_trampoline(SB),NOSPLIT,$0-0 JMP libc_getpgrp(SB) -TEXT ·libc_getpid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getpid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getpid(SB) -TEXT ·libc_getppid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getppid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getppid(SB) -TEXT ·libc_getpriority_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getpriority_trampoline(SB),NOSPLIT,$0-0 JMP libc_getpriority(SB) -TEXT ·libc_getrlimit_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getrlimit_trampoline(SB),NOSPLIT,$0-0 JMP libc_getrlimit(SB) -TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) -TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) -TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) -TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) -TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0 JMP libc_issetugid(SB) -TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 JMP libc_kill(SB) -TEXT ·libc_kqueue_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_kqueue_trampoline(SB),NOSPLIT,$0-0 JMP libc_kqueue(SB) -TEXT ·libc_lchown_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_lchown_trampoline(SB),NOSPLIT,$0-0 JMP libc_lchown(SB) -TEXT ·libc_link_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_link_trampoline(SB),NOSPLIT,$0-0 JMP libc_link(SB) -TEXT ·libc_listen_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_listen_trampoline(SB),NOSPLIT,$0-0 JMP libc_listen(SB) -TEXT ·libc_lstat_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_lstat_trampoline(SB),NOSPLIT,$0-0 JMP libc_lstat(SB) -TEXT ·libc_mkdir_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_mkdir_trampoline(SB),NOSPLIT,$0-0 JMP libc_mkdir(SB) -TEXT ·libc_mkfifo_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_mkfifo_trampoline(SB),NOSPLIT,$0-0 JMP libc_mkfifo(SB) -TEXT ·libc_mknod_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_mknod_trampoline(SB),NOSPLIT,$0-0 JMP libc_mknod(SB) -TEXT ·libc_nanosleep_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_nanosleep_trampoline(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) -TEXT ·libc_open_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_open_trampoline(SB),NOSPLIT,$0-0 JMP libc_open(SB) -TEXT ·libc_pathconf_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_pathconf_trampoline(SB),NOSPLIT,$0-0 JMP libc_pathconf(SB) -TEXT ·libc_pread_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_pread_trampoline(SB),NOSPLIT,$0-0 JMP libc_pread(SB) -TEXT ·libc_pwrite_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_pwrite_trampoline(SB),NOSPLIT,$0-0 JMP libc_pwrite(SB) -TEXT ·libc_read_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_read_trampoline(SB),NOSPLIT,$0-0 JMP libc_read(SB) -TEXT ·libc_readlink_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_readlink_trampoline(SB),NOSPLIT,$0-0 JMP libc_readlink(SB) -TEXT ·libc_rename_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_rename_trampoline(SB),NOSPLIT,$0-0 JMP libc_rename(SB) -TEXT ·libc_revoke_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_revoke_trampoline(SB),NOSPLIT,$0-0 JMP libc_revoke(SB) -TEXT ·libc_rmdir_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_rmdir_trampoline(SB),NOSPLIT,$0-0 JMP libc_rmdir(SB) -TEXT ·libc_select_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_select_trampoline(SB),NOSPLIT,$0-0 JMP libc_select(SB) -TEXT ·libc_setegid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setegid_trampoline(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) -TEXT ·libc_seteuid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_seteuid_trampoline(SB),NOSPLIT,$0-0 JMP libc_seteuid(SB) -TEXT ·libc_setgid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setgid_trampoline(SB),NOSPLIT,$0-0 JMP libc_setgid(SB) -TEXT ·libc_setlogin_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setlogin_trampoline(SB),NOSPLIT,$0-0 JMP libc_setlogin(SB) -TEXT ·libc_setpgid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setpgid_trampoline(SB),NOSPLIT,$0-0 JMP libc_setpgid(SB) -TEXT ·libc_setpriority_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setpriority_trampoline(SB),NOSPLIT,$0-0 JMP libc_setpriority(SB) -TEXT ·libc_setregid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setregid_trampoline(SB),NOSPLIT,$0-0 JMP libc_setregid(SB) -TEXT ·libc_setreuid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setreuid_trampoline(SB),NOSPLIT,$0-0 JMP libc_setreuid(SB) -TEXT ·libc_setrlimit_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setrlimit_trampoline(SB),NOSPLIT,$0-0 JMP libc_setrlimit(SB) -TEXT ·libc_setsid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setsid_trampoline(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) -TEXT ·libc_settimeofday_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_settimeofday_trampoline(SB),NOSPLIT,$0-0 JMP libc_settimeofday(SB) -TEXT ·libc_setuid_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_setuid_trampoline(SB),NOSPLIT,$0-0 JMP libc_setuid(SB) -TEXT ·libc_stat_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_stat_trampoline(SB),NOSPLIT,$0-0 JMP libc_stat(SB) -TEXT ·libc_statfs_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_statfs_trampoline(SB),NOSPLIT,$0-0 JMP libc_statfs(SB) -TEXT ·libc_symlink_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_symlink_trampoline(SB),NOSPLIT,$0-0 JMP libc_symlink(SB) -TEXT ·libc_sync_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_sync_trampoline(SB),NOSPLIT,$0-0 JMP libc_sync(SB) -TEXT ·libc_truncate_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_truncate_trampoline(SB),NOSPLIT,$0-0 JMP libc_truncate(SB) -TEXT ·libc_umask_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_umask_trampoline(SB),NOSPLIT,$0-0 JMP libc_umask(SB) -TEXT ·libc_unlink_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_unlink_trampoline(SB),NOSPLIT,$0-0 JMP libc_unlink(SB) -TEXT ·libc_unmount_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_unmount_trampoline(SB),NOSPLIT,$0-0 JMP libc_unmount(SB) -TEXT ·libc_write_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_write_trampoline(SB),NOSPLIT,$0-0 JMP libc_write(SB) -TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) -TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) -TEXT ·libc_utimensat_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_utimensat_trampoline(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) -TEXT ·libc_syscall_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_syscall_trampoline(SB),NOSPLIT,$0-0 JMP libc_syscall(SB) -TEXT ·libc_lseek_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_lseek_trampoline(SB),NOSPLIT,$0-0 JMP libc_lseek(SB) -TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0 JMP libc_getcwd(SB) -TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) -TEXT ·libc_fork_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fork_trampoline(SB),NOSPLIT,$0-0 JMP libc_fork(SB) -TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) -TEXT ·libc_execve_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_execve_trampoline(SB),NOSPLIT,$0-0 JMP libc_execve(SB) -TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0 JMP libc_exit(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 JMP libc_ptrace(SB) -TEXT ·libc_getentropy_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_getentropy_trampoline(SB),NOSPLIT,$0-0 JMP libc_getentropy(SB) -TEXT ·libc_fstatat_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_fstatat_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstatat(SB) -TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0 JMP libc_unlinkat(SB) -TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0 +TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0 JMP libc_openat(SB) From 401d7e5a242f1007c2637a25111a6fa985728c08 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Fri, 29 Jan 2021 13:46:34 -0500 Subject: [PATCH 438/474] [dev.regabi] cmd/compile: reserve X15 as zero register on AMD64 In ABIInternal, reserve X15 as constant zero, and use it to zero memory. (Maybe there can be more use of it?) The register is zeroed when transition to ABIInternal from ABI0. Caveat: using X15 generates longer instructions than using X0. Maybe we want to use X0? Change-Id: I12d5ee92a01fc0b59dad4e5ab023ac71bc2a8b7d Reviewed-on: https://go-review.googlesource.com/c/go/+/288093 Trust: Cherry Zhang Run-TryBot: Cherry Zhang TryBot-Result: Go Bot Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/ggen.go | 4 +- src/cmd/compile/internal/amd64/ssa.go | 43 ++- src/cmd/compile/internal/ssa/config.go | 1 + src/cmd/compile/internal/ssa/gen/AMD64.rules | 26 +- src/cmd/compile/internal/ssa/gen/AMD64Ops.go | 41 +-- src/cmd/compile/internal/ssa/op.go | 4 +- src/cmd/compile/internal/ssa/opGen.go | 281 ++++++++++--------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 96 +++---- src/cmd/compile/internal/ssagen/abi.go | 15 +- src/cmd/compile/internal/ssagen/ssa.go | 5 +- src/runtime/duff_amd64.s | 128 ++++----- src/runtime/mkduff.go | 14 +- test/codegen/structs.go | 4 +- 13 files changed, 347 insertions(+), 315 deletions(-) diff --git a/src/cmd/compile/internal/amd64/ggen.go b/src/cmd/compile/internal/amd64/ggen.go index dacdb07a3837d..aefdb14a69b12 100644 --- a/src/cmd/compile/internal/amd64/ggen.go +++ b/src/cmd/compile/internal/amd64/ggen.go @@ -22,8 +22,8 @@ var isPlan9 = objabi.GOOS == "plan9" const ( dzBlocks = 16 // number of MOV/ADD blocks dzBlockLen = 4 // number of clears per block - dzBlockSize = 19 // size of instructions in a single block - dzMovSize = 4 // size of single MOV instruction w/ offset + dzBlockSize = 23 // size of instructions in a single block + dzMovSize = 5 // size of single MOV instruction w/ offset dzLeaqSize = 4 // size of single LEAQ instruction dzClearStep = 16 // number of bytes cleared by each MOV instruction diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index da355c49d1e2a..d9c97183fd5d0 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -813,6 +813,20 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() ssagen.AddAux2(&p.To, v, sc.Off()) + case ssa.OpAMD64MOVOstorezero: + if s.ABI != obj.ABIInternal { + v.Fatalf("MOVOstorezero can be only used in ABIInternal functions") + } + if !base.Flag.ABIWrap { + // zeroing X15 manually if wrappers are not used + opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15) + } + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = x86.REG_X15 + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) case ssa.OpAMD64MOVQstoreconstidx1, ssa.OpAMD64MOVQstoreconstidx8, ssa.OpAMD64MOVLstoreconstidx1, ssa.OpAMD64MOVLstoreconstidx4, ssa.OpAMD64MOVWstoreconstidx1, ssa.OpAMD64MOVWstoreconstidx2, ssa.OpAMD64MOVBstoreconstidx1, ssa.OpAMD64ADDLconstmodifyidx1, ssa.OpAMD64ADDLconstmodifyidx4, ssa.OpAMD64ADDLconstmodifyidx8, ssa.OpAMD64ADDQconstmodifyidx1, ssa.OpAMD64ADDQconstmodifyidx8, ssa.OpAMD64ANDLconstmodifyidx1, ssa.OpAMD64ANDLconstmodifyidx4, ssa.OpAMD64ANDLconstmodifyidx8, ssa.OpAMD64ANDQconstmodifyidx1, ssa.OpAMD64ANDQconstmodifyidx8, @@ -900,6 +914,13 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { v.Fatalf("input[0] and output not in same register %s", v.LongString()) } case ssa.OpAMD64DUFFZERO: + if s.ABI != obj.ABIInternal { + v.Fatalf("MOVOconst can be only used in ABIInternal functions") + } + if !base.Flag.ABIWrap { + // zeroing X15 manually if wrappers are not used + opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15) + } off := duffStart(v.AuxInt) adj := duffAdj(v.AuxInt) var p *obj.Prog @@ -915,12 +936,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Type = obj.TYPE_ADDR p.To.Sym = ir.Syms.Duffzero p.To.Offset = off - case ssa.OpAMD64MOVOconst: - if v.AuxInt != 0 { - v.Fatalf("MOVOconst can only do constant=0") - } - r := v.Reg() - opregreg(s, x86.AXORPS, r, r) case ssa.OpAMD64DUFFCOPY: p := s.Prog(obj.ADUFFCOPY) p.To.Type = obj.TYPE_ADDR @@ -1000,7 +1015,17 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { q.To.Type = obj.TYPE_REG q.To.Reg = r } - case ssa.OpAMD64CALLstatic, ssa.OpAMD64CALLclosure, ssa.OpAMD64CALLinter: + case ssa.OpAMD64CALLstatic: + if s.ABI == obj.ABI0 && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABIInternal { + // zeroing X15 when entering ABIInternal from ABI0 + opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15) + } + s.Call(v) + if s.ABI == obj.ABIInternal && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABI0 { + // zeroing X15 when entering ABIInternal from ABI0 + opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15) + } + case ssa.OpAMD64CALLclosure, ssa.OpAMD64CALLinter: s.Call(v) case ssa.OpAMD64LoweredGetCallerPC: @@ -1297,6 +1322,10 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { case ssa.BlockRet: s.Prog(obj.ARET) case ssa.BlockRetJmp: + if s.ABI == obj.ABI0 && b.Aux.(*obj.LSym).ABI() == obj.ABIInternal { + // zeroing X15 when entering ABIInternal from ABI0 + opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15) + } p := s.Prog(obj.ARET) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go index e952c73d9b487..32cfd7e61eaad 100644 --- a/src/cmd/compile/internal/ssa/config.go +++ b/src/cmd/compile/internal/ssa/config.go @@ -194,6 +194,7 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config c.registers = registersAMD64[:] c.gpRegMask = gpRegMaskAMD64 c.fpRegMask = fpRegMaskAMD64 + c.specialRegMask = specialRegMaskAMD64 c.FPReg = framepointerRegAMD64 c.LinkReg = linkRegAMD64 c.hasGReg = false diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index 7d46266411fce..706336289e189 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -361,31 +361,31 @@ // Adjust zeros to be a multiple of 16 bytes. (Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE => (Zero [s-s%16] (OffPtr destptr [s%16]) - (MOVOstore destptr (MOVOconst [0]) mem)) + (MOVOstorezero destptr mem)) (Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE => (Zero [s-s%16] (OffPtr destptr [s%16]) (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem)) (Zero [16] destptr mem) && config.useSSE => - (MOVOstore destptr (MOVOconst [0]) mem) + (MOVOstorezero destptr mem) (Zero [32] destptr mem) && config.useSSE => - (MOVOstore (OffPtr destptr [16]) (MOVOconst [0]) - (MOVOstore destptr (MOVOconst [0]) mem)) + (MOVOstorezero (OffPtr destptr [16]) + (MOVOstorezero destptr mem)) (Zero [48] destptr mem) && config.useSSE => - (MOVOstore (OffPtr destptr [32]) (MOVOconst [0]) - (MOVOstore (OffPtr destptr [16]) (MOVOconst [0]) - (MOVOstore destptr (MOVOconst [0]) mem))) + (MOVOstorezero (OffPtr destptr [32]) + (MOVOstorezero (OffPtr destptr [16]) + (MOVOstorezero destptr mem))) (Zero [64] destptr mem) && config.useSSE => - (MOVOstore (OffPtr destptr [48]) (MOVOconst [0]) - (MOVOstore (OffPtr destptr [32]) (MOVOconst [0]) - (MOVOstore (OffPtr destptr [16]) (MOVOconst [0]) - (MOVOstore destptr (MOVOconst [0]) mem)))) + (MOVOstorezero (OffPtr destptr [48]) + (MOVOstorezero (OffPtr destptr [32]) + (MOVOstorezero (OffPtr destptr [16]) + (MOVOstorezero destptr mem)))) // Medium zeroing uses a duff device. (Zero [s] destptr mem) && s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice => - (DUFFZERO [s] destptr (MOVOconst [0]) mem) + (DUFFZERO [s] destptr mem) // Large zeroing uses REP STOSQ. (Zero [s] destptr mem) @@ -1900,7 +1900,7 @@ && c.Val() == 0 && c2.Val() == 0 && clobber(x) - => (MOVOstore [c2.Off32()] {s} p (MOVOconst [0]) mem) + => (MOVOstorezero [c2.Off32()] {s} p mem) // Combine stores into larger (unaligned) stores. Little endian. (MOVBstore [i] {s} p (SHR(W|L|Q)const [8] w) x:(MOVBstore [i-1] {s} p w mem)) diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go index de5372670b365..0a411bbdca0b3 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go @@ -61,7 +61,7 @@ var regNamesAMD64 = []string{ "X12", "X13", "X14", - "X15", + "X15", // constant 0 in ABIInternal // If you add registers, update asyncPreempt in runtime @@ -97,7 +97,8 @@ func init() { dx = buildReg("DX") bx = buildReg("BX") gp = buildReg("AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15") - fp = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15") + fp = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14") + x15 = buildReg("X15") gpsp = gp | buildReg("SP") gpspsb = gpsp | buildReg("SB") callerSave = gp | fp @@ -684,19 +685,20 @@ func init() { // Note: LEAx{1,2,4,8} must not have OpSB as either argument. // auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address - {name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVBLZX", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load byte from arg0+auxint+aux. arg1=mem. Zero extend. - {name: "MOVBQSXload", argLength: 2, reg: gpload, asm: "MOVBQSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int64 - {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVWLZX", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes from arg0+auxint+aux. arg1=mem. Zero extend. - {name: "MOVWQSXload", argLength: 2, reg: gpload, asm: "MOVWQSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int64 - {name: "MOVLload", argLength: 2, reg: gpload, asm: "MOVL", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes from arg0+auxint+aux. arg1=mem. Zero extend. - {name: "MOVLQSXload", argLength: 2, reg: gpload, asm: "MOVLQSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int64 - {name: "MOVQload", argLength: 2, reg: gpload, asm: "MOVQ", aux: "SymOff", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, // load 8 bytes from arg0+auxint+aux. arg1=mem - {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store byte in arg1 to arg0+auxint+aux. arg2=mem - {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes in arg1 to arg0+auxint+aux. arg2=mem - {name: "MOVLstore", argLength: 3, reg: gpstore, asm: "MOVL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem - {name: "MOVQstore", argLength: 3, reg: gpstore, asm: "MOVQ", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes in arg1 to arg0+auxint+aux. arg2=mem - {name: "MOVOload", argLength: 2, reg: fpload, asm: "MOVUPS", aux: "SymOff", typ: "Int128", faultOnNilArg0: true, symEffect: "Read"}, // load 16 bytes from arg0+auxint+aux. arg1=mem - {name: "MOVOstore", argLength: 3, reg: fpstore, asm: "MOVUPS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 16 bytes in arg1 to arg0+auxint+aux. arg2=mem + {name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVBLZX", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load byte from arg0+auxint+aux. arg1=mem. Zero extend. + {name: "MOVBQSXload", argLength: 2, reg: gpload, asm: "MOVBQSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int64 + {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVWLZX", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes from arg0+auxint+aux. arg1=mem. Zero extend. + {name: "MOVWQSXload", argLength: 2, reg: gpload, asm: "MOVWQSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int64 + {name: "MOVLload", argLength: 2, reg: gpload, asm: "MOVL", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes from arg0+auxint+aux. arg1=mem. Zero extend. + {name: "MOVLQSXload", argLength: 2, reg: gpload, asm: "MOVLQSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int64 + {name: "MOVQload", argLength: 2, reg: gpload, asm: "MOVQ", aux: "SymOff", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, // load 8 bytes from arg0+auxint+aux. arg1=mem + {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store byte in arg1 to arg0+auxint+aux. arg2=mem + {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes in arg1 to arg0+auxint+aux. arg2=mem + {name: "MOVLstore", argLength: 3, reg: gpstore, asm: "MOVL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem + {name: "MOVQstore", argLength: 3, reg: gpstore, asm: "MOVQ", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes in arg1 to arg0+auxint+aux. arg2=mem + {name: "MOVOload", argLength: 2, reg: fpload, asm: "MOVUPS", aux: "SymOff", typ: "Int128", faultOnNilArg0: true, symEffect: "Read"}, // load 16 bytes from arg0+auxint+aux. arg1=mem + {name: "MOVOstore", argLength: 3, reg: fpstore, asm: "MOVUPS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 16 bytes in arg1 to arg0+auxint+aux. arg2=mem + {name: "MOVOstorezero", argLength: 2, reg: regInfo{inputs: []regMask{gpspsb, 0}}, asm: "MOVUPS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 16 bytes of zero to arg0+auxint+aux. arg1=mem // indexed loads/stores {name: "MOVBloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVBLZX", scale: 1, aux: "SymOff", typ: "UInt8", symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem @@ -735,22 +737,20 @@ func init() { {name: "MOVQstoreconstidx8", argLength: 3, reg: gpstoreconstidx, asm: "MOVQ", scale: 8, aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store 8 bytes of ... 8*arg1 ... // arg0 = pointer to start of memory to zero - // arg1 = value to store (will always be zero) - // arg2 = mem + // arg1 = mem // auxint = # of bytes to zero // returns mem { name: "DUFFZERO", aux: "Int64", - argLength: 3, + argLength: 2, reg: regInfo{ - inputs: []regMask{buildReg("DI"), buildReg("X0")}, + inputs: []regMask{buildReg("DI")}, clobbers: buildReg("DI"), }, faultOnNilArg0: true, unsafePoint: true, // FP maintenance around DUFFCOPY can be clobbered by interrupts }, - {name: "MOVOconst", reg: regInfo{nil, 0, []regMask{fp}}, typ: "Int128", aux: "Int128", rematerializeable: true}, // arg0 = address of memory to zero // arg1 = # of 8-byte words to zero @@ -935,6 +935,7 @@ func init() { regnames: regNamesAMD64, gpregmask: gp, fpregmask: fp, + specialregmask: x15, framepointerreg: int8(num["BP"]), linkreg: -1, // not used }) diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go index c64b145107e23..f41d014d4138d 100644 --- a/src/cmd/compile/internal/ssa/op.go +++ b/src/cmd/compile/internal/ssa/op.go @@ -202,9 +202,9 @@ func ClosureAuxCall(args []Param, results []Param) *AuxCall { func (*AuxCall) CanBeAnSSAAux() {} // OwnAuxCall returns a function's own AuxCall -func OwnAuxCall(args []Param, results []Param) *AuxCall { +func OwnAuxCall(fn *obj.LSym, args []Param, results []Param) *AuxCall { // TODO if this remains identical to ClosureAuxCall above after new ABI is done, should deduplicate. - return &AuxCall{Fn: nil, args: args, results: results} + return &AuxCall{Fn: fn, args: args, results: results} } const ( diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index e590f6ba5d990..9ad4c2f305f9e 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -970,6 +970,7 @@ const ( OpAMD64MOVQstore OpAMD64MOVOload OpAMD64MOVOstore + OpAMD64MOVOstorezero OpAMD64MOVBloadidx1 OpAMD64MOVWloadidx1 OpAMD64MOVWloadidx2 @@ -998,7 +999,6 @@ const ( OpAMD64MOVQstoreconstidx1 OpAMD64MOVQstoreconstidx8 OpAMD64DUFFZERO - OpAMD64MOVOconst OpAMD64REPSTOSQ OpAMD64CALLstatic OpAMD64CALLclosure @@ -6162,11 +6162,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDSS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6178,11 +6178,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6193,11 +6193,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBSS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6208,11 +6208,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6224,11 +6224,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AMULSS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6240,11 +6240,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AMULSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6255,11 +6255,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ADIVSS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6270,11 +6270,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ADIVSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6290,7 +6290,7 @@ var opcodeTable = [...]opInfo{ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6306,7 +6306,7 @@ var opcodeTable = [...]opInfo{ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6318,7 +6318,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVSS, reg: regInfo{ outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6330,7 +6330,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVSD, reg: regInfo{ outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6347,7 +6347,7 @@ var opcodeTable = [...]opInfo{ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6364,7 +6364,7 @@ var opcodeTable = [...]opInfo{ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6381,7 +6381,7 @@ var opcodeTable = [...]opInfo{ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6398,7 +6398,7 @@ var opcodeTable = [...]opInfo{ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6411,7 +6411,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVSS, reg: regInfo{ inputs: []inputInfo{ - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, }, @@ -6425,7 +6425,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVSD, reg: regInfo{ inputs: []inputInfo{ - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, }, @@ -6439,8 +6439,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, }, @@ -6454,8 +6454,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, }, @@ -6469,8 +6469,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, }, @@ -6484,8 +6484,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, }, @@ -6500,11 +6500,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDSS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6518,11 +6518,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6536,11 +6536,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBSS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6554,11 +6554,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6572,11 +6572,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AMULSS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6590,11 +6590,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AMULSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6608,11 +6608,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ADIVSS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6626,11 +6626,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ADIVSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6644,12 +6644,12 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6663,12 +6663,12 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6682,12 +6682,12 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6701,12 +6701,12 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6720,12 +6720,12 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6739,12 +6739,12 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6758,12 +6758,12 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6777,12 +6777,12 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6796,12 +6796,12 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6815,12 +6815,12 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6834,12 +6834,12 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6853,12 +6853,12 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6872,12 +6872,12 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6891,12 +6891,12 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6910,12 +6910,12 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -6929,12 +6929,12 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -8245,8 +8245,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AUCOMISS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -8256,8 +8256,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AUCOMISD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -11628,10 +11628,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ASQRTSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -11642,10 +11642,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AROUNDSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -11656,12 +11656,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADD231SD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {2, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -12097,7 +12097,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACVTTSD2SL, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 @@ -12110,7 +12110,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACVTTSD2SQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 @@ -12123,7 +12123,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACVTTSS2SL, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 @@ -12136,7 +12136,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACVTTSS2SQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 @@ -12152,7 +12152,7 @@ var opcodeTable = [...]opInfo{ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -12165,7 +12165,7 @@ var opcodeTable = [...]opInfo{ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -12178,7 +12178,7 @@ var opcodeTable = [...]opInfo{ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -12191,7 +12191,7 @@ var opcodeTable = [...]opInfo{ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -12201,10 +12201,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ACVTSD2SS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -12214,10 +12214,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ACVTSS2SD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -12229,7 +12229,7 @@ var opcodeTable = [...]opInfo{ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -12238,7 +12238,7 @@ var opcodeTable = [...]opInfo{ argLen: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 @@ -12253,7 +12253,7 @@ var opcodeTable = [...]opInfo{ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -12262,7 +12262,7 @@ var opcodeTable = [...]opInfo{ argLen: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 @@ -12277,11 +12277,11 @@ var opcodeTable = [...]opInfo{ asm: x86.APXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -12720,7 +12720,7 @@ var opcodeTable = [...]opInfo{ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -12733,7 +12733,20 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVUPS, reg: regInfo{ inputs: []inputInfo{ - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "MOVOstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AMOVUPS, + reg: regInfo{ + inputs: []inputInfo{ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, }, @@ -13159,28 +13172,16 @@ var opcodeTable = [...]opInfo{ { name: "DUFFZERO", auxType: auxInt64, - argLen: 3, + argLen: 2, faultOnNilArg0: true, unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 128}, // DI - {1, 65536}, // X0 + {0, 128}, // DI }, clobbers: 128, // DI }, }, - { - name: "MOVOconst", - auxType: auxInt128, - argLen: 0, - rematerializeable: true, - reg: regInfo{ - outputs: []outputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - }, - }, - }, { name: "REPSTOSQ", argLen: 4, @@ -13201,7 +13202,7 @@ var opcodeTable = [...]opInfo{ clobberFlags: true, call: true, reg: regInfo{ - clobbers: 4294967279, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + clobbers: 2147483631, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, { @@ -13215,7 +13216,7 @@ var opcodeTable = [...]opInfo{ {1, 4}, // DX {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, - clobbers: 4294967279, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + clobbers: 2147483631, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, { @@ -13228,7 +13229,7 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, - clobbers: 4294967279, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + clobbers: 2147483631, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, { @@ -13328,7 +13329,7 @@ var opcodeTable = [...]opInfo{ {0, 128}, // DI {1, 879}, // AX CX DX BX BP SI R8 R9 }, - clobbers: 4294901760, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + clobbers: 2147418112, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, { @@ -36193,8 +36194,8 @@ var registersAMD64 = [...]Register{ {32, 0, -1, "SB"}, } var gpRegMaskAMD64 = regMask(65519) -var fpRegMaskAMD64 = regMask(4294901760) -var specialRegMaskAMD64 = regMask(0) +var fpRegMaskAMD64 = regMask(2147418112) +var specialRegMaskAMD64 = regMask(2147483648) var framepointerRegAMD64 = int8(5) var linkRegAMD64 = int8(-1) var registersARM = [...]Register{ diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index db2dc7a004049..6087874fa9270 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -14226,7 +14226,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool { } // match: (MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem)) // cond: config.useSSE && x.Uses == 1 && c2.Off() + 8 == c.Off() && c.Val() == 0 && c2.Val() == 0 && clobber(x) - // result: (MOVOstore [c2.Off32()] {s} p (MOVOconst [0]) mem) + // result: (MOVOstorezero [c2.Off32()] {s} p mem) for { c := auxIntToValAndOff(v.AuxInt) s := auxToSym(v.Aux) @@ -14243,12 +14243,10 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool { if p != x.Args[0] || !(config.useSSE && x.Uses == 1 && c2.Off()+8 == c.Off() && c.Val() == 0 && c2.Val() == 0 && clobber(x)) { break } - v.reset(OpAMD64MOVOstore) + v.reset(OpAMD64MOVOstorezero) v.AuxInt = int32ToAuxInt(c2.Off32()) v.Aux = symToAux(s) - v0 := b.NewValue0(x.Pos, OpAMD64MOVOconst, types.TypeInt128) - v0.AuxInt = int128ToAuxInt(0) - v.AddArg3(p, v0, mem) + v.AddArg2(p, mem) return true } // match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) @@ -34163,7 +34161,7 @@ func rewriteValueAMD64_OpZero(v *Value) bool { } // match: (Zero [s] destptr mem) // cond: s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE - // result: (Zero [s-s%16] (OffPtr destptr [s%16]) (MOVOstore destptr (MOVOconst [0]) mem)) + // result: (Zero [s-s%16] (OffPtr destptr [s%16]) (MOVOstorezero destptr mem)) for { s := auxIntToInt64(v.AuxInt) destptr := v_0 @@ -34176,10 +34174,8 @@ func rewriteValueAMD64_OpZero(v *Value) bool { v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) v0.AuxInt = int64ToAuxInt(s % 16) v0.AddArg(destptr) - v1 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) - v2.AuxInt = int128ToAuxInt(0) - v1.AddArg3(destptr, v2, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVOstorezero, types.TypeMem) + v1.AddArg2(destptr, mem) v.AddArg2(v0, v1) return true } @@ -34206,7 +34202,7 @@ func rewriteValueAMD64_OpZero(v *Value) bool { } // match: (Zero [16] destptr mem) // cond: config.useSSE - // result: (MOVOstore destptr (MOVOconst [0]) mem) + // result: (MOVOstorezero destptr mem) for { if auxIntToInt64(v.AuxInt) != 16 { break @@ -34216,15 +34212,13 @@ func rewriteValueAMD64_OpZero(v *Value) bool { if !(config.useSSE) { break } - v.reset(OpAMD64MOVOstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) - v0.AuxInt = int128ToAuxInt(0) - v.AddArg3(destptr, v0, mem) + v.reset(OpAMD64MOVOstorezero) + v.AddArg2(destptr, mem) return true } // match: (Zero [32] destptr mem) // cond: config.useSSE - // result: (MOVOstore (OffPtr destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem)) + // result: (MOVOstorezero (OffPtr destptr [16]) (MOVOstorezero destptr mem)) for { if auxIntToInt64(v.AuxInt) != 32 { break @@ -34234,20 +34228,18 @@ func rewriteValueAMD64_OpZero(v *Value) bool { if !(config.useSSE) { break } - v.reset(OpAMD64MOVOstore) + v.reset(OpAMD64MOVOstorezero) v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) v0.AuxInt = int64ToAuxInt(16) v0.AddArg(destptr) - v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) - v1.AuxInt = int128ToAuxInt(0) - v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) - v2.AddArg3(destptr, v1, mem) - v.AddArg3(v0, v1, v2) + v1 := b.NewValue0(v.Pos, OpAMD64MOVOstorezero, types.TypeMem) + v1.AddArg2(destptr, mem) + v.AddArg2(v0, v1) return true } // match: (Zero [48] destptr mem) // cond: config.useSSE - // result: (MOVOstore (OffPtr destptr [32]) (MOVOconst [0]) (MOVOstore (OffPtr destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem))) + // result: (MOVOstorezero (OffPtr destptr [32]) (MOVOstorezero (OffPtr destptr [16]) (MOVOstorezero destptr mem))) for { if auxIntToInt64(v.AuxInt) != 48 { break @@ -34257,25 +34249,23 @@ func rewriteValueAMD64_OpZero(v *Value) bool { if !(config.useSSE) { break } - v.reset(OpAMD64MOVOstore) + v.reset(OpAMD64MOVOstorezero) v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) v0.AuxInt = int64ToAuxInt(32) v0.AddArg(destptr) - v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) - v1.AuxInt = int128ToAuxInt(0) - v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) - v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) - v3.AuxInt = int64ToAuxInt(16) - v3.AddArg(destptr) - v4 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) - v4.AddArg3(destptr, v1, mem) - v2.AddArg3(v3, v1, v4) - v.AddArg3(v0, v1, v2) + v1 := b.NewValue0(v.Pos, OpAMD64MOVOstorezero, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) + v2.AuxInt = int64ToAuxInt(16) + v2.AddArg(destptr) + v3 := b.NewValue0(v.Pos, OpAMD64MOVOstorezero, types.TypeMem) + v3.AddArg2(destptr, mem) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) return true } // match: (Zero [64] destptr mem) // cond: config.useSSE - // result: (MOVOstore (OffPtr destptr [48]) (MOVOconst [0]) (MOVOstore (OffPtr destptr [32]) (MOVOconst [0]) (MOVOstore (OffPtr destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem)))) + // result: (MOVOstorezero (OffPtr destptr [48]) (MOVOstorezero (OffPtr destptr [32]) (MOVOstorezero (OffPtr destptr [16]) (MOVOstorezero destptr mem)))) for { if auxIntToInt64(v.AuxInt) != 64 { break @@ -34285,30 +34275,28 @@ func rewriteValueAMD64_OpZero(v *Value) bool { if !(config.useSSE) { break } - v.reset(OpAMD64MOVOstore) + v.reset(OpAMD64MOVOstorezero) v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) v0.AuxInt = int64ToAuxInt(48) v0.AddArg(destptr) - v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) - v1.AuxInt = int128ToAuxInt(0) - v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) - v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) - v3.AuxInt = int64ToAuxInt(32) - v3.AddArg(destptr) - v4 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) - v5 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) - v5.AuxInt = int64ToAuxInt(16) - v5.AddArg(destptr) - v6 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) - v6.AddArg3(destptr, v1, mem) - v4.AddArg3(v5, v1, v6) - v2.AddArg3(v3, v1, v4) - v.AddArg3(v0, v1, v2) + v1 := b.NewValue0(v.Pos, OpAMD64MOVOstorezero, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) + v2.AuxInt = int64ToAuxInt(32) + v2.AddArg(destptr) + v3 := b.NewValue0(v.Pos, OpAMD64MOVOstorezero, types.TypeMem) + v4 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) + v4.AuxInt = int64ToAuxInt(16) + v4.AddArg(destptr) + v5 := b.NewValue0(v.Pos, OpAMD64MOVOstorezero, types.TypeMem) + v5.AddArg2(destptr, mem) + v3.AddArg2(v4, v5) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) return true } // match: (Zero [s] destptr mem) // cond: s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice - // result: (DUFFZERO [s] destptr (MOVOconst [0]) mem) + // result: (DUFFZERO [s] destptr mem) for { s := auxIntToInt64(v.AuxInt) destptr := v_0 @@ -34318,9 +34306,7 @@ func rewriteValueAMD64_OpZero(v *Value) bool { } v.reset(OpAMD64DUFFZERO) v.AuxInt = int64ToAuxInt(s) - v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) - v0.AuxInt = int128ToAuxInt(0) - v.AddArg3(destptr, v0, mem) + v.AddArg2(destptr, mem) return true } // match: (Zero [s] destptr mem) diff --git a/src/cmd/compile/internal/ssagen/abi.go b/src/cmd/compile/internal/ssagen/abi.go index 5bebce1db58df..7180b3816ceea 100644 --- a/src/cmd/compile/internal/ssagen/abi.go +++ b/src/cmd/compile/internal/ssagen/abi.go @@ -300,9 +300,20 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { // to allocate any stack space). Doing this will require some // extra work in typecheck/walk/ssa, might want to add a new node // OTAILCALL or something to this effect. - var tail ir.Node - if tfn.Type().NumResults() == 0 && tfn.Type().NumParams() == 0 && tfn.Type().NumRecvs() == 0 && !(base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) { + tailcall := tfn.Type().NumResults() == 0 && tfn.Type().NumParams() == 0 && tfn.Type().NumRecvs() == 0 + if base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink { + // cannot tailcall on PPC64 with dynamic linking, as we need + // to restore R2 after call. + tailcall = false + } + if base.Ctxt.Arch.Name == "amd64" && wrapperABI == obj.ABIInternal { + // cannot tailcall from ABIInternal to ABI0 on AMD64, as we need + // to special registers (X15) when returning to ABIInternal. + tailcall = false + } + var tail ir.Node + if tailcall { tail = ir.NewTailCallStmt(base.Pos, f.Nname) } else { call := ir.NewCallExpr(base.Pos, ir.OCALL, f.Nname, nil) diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index b042c132d542e..6b1ddebd32aae 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -468,7 +468,7 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func { s.Fatalf("local variable with class %v unimplemented", n.Class) } } - s.f.OwnAux = ssa.OwnAuxCall(args, results) + s.f.OwnAux = ssa.OwnAuxCall(fn.LSym, args, results) // Populate SSAable arguments. for _, n := range fn.Dcl { @@ -6266,6 +6266,8 @@ type Branch struct { // State contains state needed during Prog generation. type State struct { + ABI obj.ABI + pp *objw.Progs // Branches remembers all the branch instructions we've seen @@ -6361,6 +6363,7 @@ func (s *State) DebugFriendlySetPosFrom(v *ssa.Value) { // genssa appends entries to pp for each instruction in f. func genssa(f *ssa.Func, pp *objw.Progs) { var s State + s.ABI = f.OwnAux.Fn.ABI() e := f.Frontend().(*ssafn) diff --git a/src/runtime/duff_amd64.s b/src/runtime/duff_amd64.s index 2ff5bf6dbcd66..df010f58533be 100644 --- a/src/runtime/duff_amd64.s +++ b/src/runtime/duff_amd64.s @@ -5,100 +5,100 @@ #include "textflag.h" TEXT runtime·duffzero(SB), NOSPLIT, $0-0 - MOVUPS X0,(DI) - MOVUPS X0,16(DI) - MOVUPS X0,32(DI) - MOVUPS X0,48(DI) + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) LEAQ 64(DI),DI - MOVUPS X0,(DI) - MOVUPS X0,16(DI) - MOVUPS X0,32(DI) - MOVUPS X0,48(DI) + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) LEAQ 64(DI),DI - MOVUPS X0,(DI) - MOVUPS X0,16(DI) - MOVUPS X0,32(DI) - MOVUPS X0,48(DI) + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) LEAQ 64(DI),DI - MOVUPS X0,(DI) - MOVUPS X0,16(DI) - MOVUPS X0,32(DI) - MOVUPS X0,48(DI) + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) LEAQ 64(DI),DI - MOVUPS X0,(DI) - MOVUPS X0,16(DI) - MOVUPS X0,32(DI) - MOVUPS X0,48(DI) + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) LEAQ 64(DI),DI - MOVUPS X0,(DI) - MOVUPS X0,16(DI) - MOVUPS X0,32(DI) - MOVUPS X0,48(DI) + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) LEAQ 64(DI),DI - MOVUPS X0,(DI) - MOVUPS X0,16(DI) - MOVUPS X0,32(DI) - MOVUPS X0,48(DI) + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) LEAQ 64(DI),DI - MOVUPS X0,(DI) - MOVUPS X0,16(DI) - MOVUPS X0,32(DI) - MOVUPS X0,48(DI) + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) LEAQ 64(DI),DI - MOVUPS X0,(DI) - MOVUPS X0,16(DI) - MOVUPS X0,32(DI) - MOVUPS X0,48(DI) + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) LEAQ 64(DI),DI - MOVUPS X0,(DI) - MOVUPS X0,16(DI) - MOVUPS X0,32(DI) - MOVUPS X0,48(DI) + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) LEAQ 64(DI),DI - MOVUPS X0,(DI) - MOVUPS X0,16(DI) - MOVUPS X0,32(DI) - MOVUPS X0,48(DI) + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) LEAQ 64(DI),DI - MOVUPS X0,(DI) - MOVUPS X0,16(DI) - MOVUPS X0,32(DI) - MOVUPS X0,48(DI) + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) LEAQ 64(DI),DI - MOVUPS X0,(DI) - MOVUPS X0,16(DI) - MOVUPS X0,32(DI) - MOVUPS X0,48(DI) + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) LEAQ 64(DI),DI - MOVUPS X0,(DI) - MOVUPS X0,16(DI) - MOVUPS X0,32(DI) - MOVUPS X0,48(DI) + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) LEAQ 64(DI),DI - MOVUPS X0,(DI) - MOVUPS X0,16(DI) - MOVUPS X0,32(DI) - MOVUPS X0,48(DI) + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) LEAQ 64(DI),DI - MOVUPS X0,(DI) - MOVUPS X0,16(DI) - MOVUPS X0,32(DI) - MOVUPS X0,48(DI) + MOVUPS X15,(DI) + MOVUPS X15,16(DI) + MOVUPS X15,32(DI) + MOVUPS X15,48(DI) LEAQ 64(DI),DI RET diff --git a/src/runtime/mkduff.go b/src/runtime/mkduff.go index 94ae75fbfe5ff..ef297f073e099 100644 --- a/src/runtime/mkduff.go +++ b/src/runtime/mkduff.go @@ -62,15 +62,15 @@ func gen(arch string, tags, zero, copy func(io.Writer)) { func notags(w io.Writer) { fmt.Fprintln(w) } func zeroAMD64(w io.Writer) { - // X0: zero + // X15: zero // DI: ptr to memory to be zeroed // DI is updated as a side effect. - fmt.Fprintln(w, "TEXT runtime·duffzero(SB), NOSPLIT, $0-0") + fmt.Fprintln(w, "TEXT runtime·duffzero(SB), NOSPLIT, $0-0") for i := 0; i < 16; i++ { - fmt.Fprintln(w, "\tMOVUPS\tX0,(DI)") - fmt.Fprintln(w, "\tMOVUPS\tX0,16(DI)") - fmt.Fprintln(w, "\tMOVUPS\tX0,32(DI)") - fmt.Fprintln(w, "\tMOVUPS\tX0,48(DI)") + fmt.Fprintln(w, "\tMOVUPS\tX15,(DI)") + fmt.Fprintln(w, "\tMOVUPS\tX15,16(DI)") + fmt.Fprintln(w, "\tMOVUPS\tX15,32(DI)") + fmt.Fprintln(w, "\tMOVUPS\tX15,48(DI)") fmt.Fprintln(w, "\tLEAQ\t64(DI),DI") // We use lea instead of add, to avoid clobbering flags fmt.Fprintln(w) } @@ -84,7 +84,7 @@ func copyAMD64(w io.Writer) { // // This is equivalent to a sequence of MOVSQ but // for some reason that is 3.5x slower than this code. - fmt.Fprintln(w, "TEXT runtime·duffcopy(SB), NOSPLIT, $0-0") + fmt.Fprintln(w, "TEXT runtime·duffcopy(SB), NOSPLIT, $0-0") for i := 0; i < 64; i++ { fmt.Fprintln(w, "\tMOVUPS\t(SI), X0") fmt.Fprintln(w, "\tADDQ\t$16, SI") diff --git a/test/codegen/structs.go b/test/codegen/structs.go index 9eddc5b16ec3e..c4bcb55c637cd 100644 --- a/test/codegen/structs.go +++ b/test/codegen/structs.go @@ -18,7 +18,7 @@ type Z1 struct { } func Zero1(t *Z1) { // Issue #18370 - // amd64:`XORPS\tX., X`,`MOVUPS\tX., \(.*\)`,`MOVQ\t\$0, 16\(.*\)` + // amd64:`MOVUPS\tX[0-9]+, \(.*\)`,`MOVQ\t\$0, 16\(.*\)` *t = Z1{} } @@ -27,7 +27,7 @@ type Z2 struct { } func Zero2(t *Z2) { - // amd64:`XORPS\tX., X`,`MOVUPS\tX., \(.*\)`,`MOVQ\t\$0, 16\(.*\)` + // amd64:`MOVUPS\tX[0-9]+, \(.*\)`,`MOVQ\t\$0, 16\(.*\)` // amd64:`.*runtime[.]gcWriteBarrier.*\(SB\)` *t = Z2{} } From afd67f333466fc67cd37433e45ecdb190efc8f51 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Thu, 4 Feb 2021 10:27:41 -0500 Subject: [PATCH 439/474] [dev.regabi] go/types: no "declared but not used" errors for invalid var decls This is a port of CL 274615, adapted to go/types. The only change was in the positioning of expected errors in vardecl.src: in go/types they are positioned on the identifier. Change-Id: Iab03265a7c4287749373e4380c6db6a95f262f30 Reviewed-on: https://go-review.googlesource.com/c/go/+/289712 Trust: Robert Findley Run-TryBot: Robert Findley TryBot-Result: Go Bot Reviewed-by: Robert Griesemer --- src/go/types/assignments.go | 1 + src/go/types/decl.go | 14 ++++++++++++++ src/go/types/testdata/vardecl.src | 14 +++++++++++++- 3 files changed, 28 insertions(+), 1 deletion(-) diff --git a/src/go/types/assignments.go b/src/go/types/assignments.go index 616564b567d73..d6f18c9bee1e9 100644 --- a/src/go/types/assignments.go +++ b/src/go/types/assignments.go @@ -120,6 +120,7 @@ func (check *Checker) initVar(lhs *Var, x *operand, context string) Type { if lhs.typ == nil { lhs.typ = Typ[Invalid] } + lhs.used = true return nil } diff --git a/src/go/types/decl.go b/src/go/types/decl.go index 1f0bc358a26e9..df01e92530603 100644 --- a/src/go/types/decl.go +++ b/src/go/types/decl.go @@ -504,6 +504,20 @@ func (check *Checker) constDecl(obj *Const, typ, init ast.Expr, inherited bool) func (check *Checker) varDecl(obj *Var, lhs []*Var, typ, init ast.Expr) { assert(obj.typ == nil) + // If we have undefined variable types due to errors, + // mark variables as used to avoid follow-on errors. + // Matches compiler behavior. + defer func() { + if obj.typ == Typ[Invalid] { + obj.used = true + } + for _, lhs := range lhs { + if lhs.typ == Typ[Invalid] { + lhs.used = true + } + } + }() + // determine type, if any if typ != nil { obj.typ = check.typ(typ) diff --git a/src/go/types/testdata/vardecl.src b/src/go/types/testdata/vardecl.src index 54f5ef1e10d09..6e2d1b5bd5e3b 100644 --- a/src/go/types/testdata/vardecl.src +++ b/src/go/types/testdata/vardecl.src @@ -158,6 +158,18 @@ func _() { } } + +// Invalid variable declarations must not lead to "declared but not used errors". +func _() { + var a x // ERROR undeclared name: x + var b = x // ERROR undeclared name: x + var c int = x // ERROR undeclared name: x + var d, e, f x /* ERROR x */ /* ERROR x */ /* ERROR x */ + var g, h, i = x /* ERROR x */, x /* ERROR x */, x /* ERROR x */ + var j, k, l float32 = x /* ERROR x */, x /* ERROR x */, x /* ERROR x */ + // but no "declared but not used" errors +} + // Invalid (unused) expressions must not lead to spurious "declared but not used errors" func _() { var a, b, c int @@ -203,4 +215,4 @@ func _() { _, _, _ = x, y, z } -// TODO(gri) consolidate other var decl checks in this file \ No newline at end of file +// TODO(gri) consolidate other var decl checks in this file From bc451b5770dc99b6a74934c26fd11a8cdc172bb1 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Thu, 4 Feb 2021 11:06:15 -0500 Subject: [PATCH 440/474] [dev.regabi] go/types: port check_test.go ergonomics from dev.typeparams On the dev.typeparams and dev.go2go branches, check_test.go has been updated to automatically discover test data. This is convenient, so port it to dev.regabi. Change-Id: I5da9a9a5139c35a2693e64364eb9928ece1cd7c6 Reviewed-on: https://go-review.googlesource.com/c/go/+/289713 Trust: Robert Findley Run-TryBot: Robert Findley TryBot-Result: Go Bot Reviewed-by: Robert Griesemer --- src/go/types/check_test.go | 121 +++++++----------- .../types/testdata/{ => decls2}/decls2a.src | 0 .../types/testdata/{ => decls2}/decls2b.src | 0 .../{ => importdecl0}/importdecl0a.src | 0 .../{ => importdecl0}/importdecl0b.src | 0 .../{ => importdecl1}/importdecl1a.src | 0 .../{ => importdecl1}/importdecl1b.src | 0 .../testdata/{ => issue25008}/issue25008a.src | 0 .../testdata/{ => issue25008}/issue25008b.src | 0 9 files changed, 45 insertions(+), 76 deletions(-) rename src/go/types/testdata/{ => decls2}/decls2a.src (100%) rename src/go/types/testdata/{ => decls2}/decls2b.src (100%) rename src/go/types/testdata/{ => importdecl0}/importdecl0a.src (100%) rename src/go/types/testdata/{ => importdecl0}/importdecl0b.src (100%) rename src/go/types/testdata/{ => importdecl1}/importdecl1a.src (100%) rename src/go/types/testdata/{ => importdecl1}/importdecl1b.src (100%) rename src/go/types/testdata/{ => issue25008}/issue25008a.src (100%) rename src/go/types/testdata/{ => issue25008}/issue25008b.src (100%) diff --git a/src/go/types/check_test.go b/src/go/types/check_test.go index ce31dab68bb18..47d749b3a331b 100644 --- a/src/go/types/check_test.go +++ b/src/go/types/check_test.go @@ -27,12 +27,14 @@ package types_test import ( "flag" + "fmt" "go/ast" "go/importer" "go/parser" "go/scanner" "go/token" "internal/testenv" + "io/ioutil" "os" "path/filepath" "regexp" @@ -48,54 +50,6 @@ var ( testFiles = flag.String("files", "", "space-separated list of test files") ) -// The test filenames do not end in .go so that they are invisible -// to gofmt since they contain comments that must not change their -// positions relative to surrounding tokens. - -// Each tests entry is list of files belonging to the same package. -var tests = [][]string{ - {"testdata/errors.src"}, - {"testdata/importdecl0a.src", "testdata/importdecl0b.src"}, - {"testdata/importdecl1a.src", "testdata/importdecl1b.src"}, - {"testdata/importC.src"}, // special handling in checkFiles - {"testdata/cycles.src"}, - {"testdata/cycles1.src"}, - {"testdata/cycles2.src"}, - {"testdata/cycles3.src"}, - {"testdata/cycles4.src"}, - {"testdata/cycles5.src"}, - {"testdata/init0.src"}, - {"testdata/init1.src"}, - {"testdata/init2.src"}, - {"testdata/decls0.src"}, - {"testdata/decls1.src"}, - {"testdata/decls2a.src", "testdata/decls2b.src"}, - {"testdata/decls3.src"}, - {"testdata/decls4.src"}, - {"testdata/decls5.src"}, - {"testdata/const0.src"}, - {"testdata/const1.src"}, - {"testdata/constdecl.src"}, - {"testdata/vardecl.src"}, - {"testdata/expr0.src"}, - {"testdata/expr1.src"}, - {"testdata/expr2.src"}, - {"testdata/expr3.src"}, - {"testdata/methodsets.src"}, - {"testdata/shifts.src"}, - {"testdata/builtins.src"}, - {"testdata/conversions.src"}, - {"testdata/conversions2.src"}, - {"testdata/stmt0.src"}, - {"testdata/stmt1.src"}, - {"testdata/gotos.src"}, - {"testdata/labels.src"}, - {"testdata/literals.src"}, - {"testdata/issues.src"}, - {"testdata/blank.src"}, - {"testdata/issue25008b.src", "testdata/issue25008a.src"}, // order (b before a) is crucial! -} - var fset = token.NewFileSet() // Positioned errors are of the form filename:line:column: message . @@ -236,9 +190,13 @@ func eliminate(t *testing.T, errmap map[string][]string, errlist []error) { } } -func checkFiles(t *testing.T, testfiles []string) { +func checkFiles(t *testing.T, sources []string) { + if len(sources) == 0 { + t.Fatal("no source files") + } + // parse files and collect parser errors - files, errlist := parseFiles(t, testfiles) + files, errlist := parseFiles(t, sources) pkgName := "" if len(files) > 0 { @@ -254,10 +212,13 @@ func checkFiles(t *testing.T, testfiles []string) { // typecheck and collect typechecker errors var conf Config + // special case for importC.src - if len(testfiles) == 1 && strings.HasSuffix(testfiles[0], "importC.src") { + if len(sources) == 1 && strings.HasSuffix(sources[0], "importC.src") { conf.FakeImportC = true } + // TODO(rFindley) we may need to use the source importer when adding generics + // tests. conf.Importer = importer.Default() conf.Error = func(err error) { if *haltOnError { @@ -306,44 +267,52 @@ func checkFiles(t *testing.T, testfiles []string) { } } +// TestCheck is for manual testing of selected input files, provided with -files. func TestCheck(t *testing.T) { - testenv.MustHaveGoBuild(t) - - // Declare builtins for testing. - DefPredeclaredTestFuncs() - - // If explicit test files are specified, only check those. - if files := *testFiles; files != "" { - checkFiles(t, strings.Split(files, " ")) + if *testFiles == "" { return } - - // Otherwise, run all the tests. - for _, files := range tests { - checkFiles(t, files) - } + testenv.MustHaveGoBuild(t) + DefPredeclaredTestFuncs() + checkFiles(t, strings.Split(*testFiles, " ")) } -func TestFixedBugs(t *testing.T) { testDir(t, "fixedbugs") } +func TestTestdata(t *testing.T) { DefPredeclaredTestFuncs(); testDir(t, "testdata") } +func TestFixedbugs(t *testing.T) { testDir(t, "fixedbugs") } func testDir(t *testing.T, dir string) { testenv.MustHaveGoBuild(t) - dirs, err := os.ReadDir(dir) + fis, err := os.ReadDir(dir) if err != nil { - t.Fatal(err) + t.Error(err) + return } - for _, d := range dirs { - testname := filepath.Base(d.Name()) - testname = strings.TrimSuffix(testname, filepath.Ext(testname)) - t.Run(testname, func(t *testing.T) { - filename := filepath.Join(dir, d.Name()) - if d.IsDir() { - t.Errorf("skipped directory %q", filename) - return + for _, fi := range fis { + path := filepath.Join(dir, fi.Name()) + + // if fi is a directory, its files make up a single package + var files []string + if fi.IsDir() { + fis, err := ioutil.ReadDir(path) + if err != nil { + t.Error(err) + continue + } + files = make([]string, len(fis)) + for i, fi := range fis { + // if fi is a directory, checkFiles below will complain + files[i] = filepath.Join(path, fi.Name()) + if testing.Verbose() { + fmt.Printf("\t%s\n", files[i]) + } } - checkFiles(t, []string{filename}) + } else { + files = []string{path} + } + t.Run(filepath.Base(path), func(t *testing.T) { + checkFiles(t, files) }) } } diff --git a/src/go/types/testdata/decls2a.src b/src/go/types/testdata/decls2/decls2a.src similarity index 100% rename from src/go/types/testdata/decls2a.src rename to src/go/types/testdata/decls2/decls2a.src diff --git a/src/go/types/testdata/decls2b.src b/src/go/types/testdata/decls2/decls2b.src similarity index 100% rename from src/go/types/testdata/decls2b.src rename to src/go/types/testdata/decls2/decls2b.src diff --git a/src/go/types/testdata/importdecl0a.src b/src/go/types/testdata/importdecl0/importdecl0a.src similarity index 100% rename from src/go/types/testdata/importdecl0a.src rename to src/go/types/testdata/importdecl0/importdecl0a.src diff --git a/src/go/types/testdata/importdecl0b.src b/src/go/types/testdata/importdecl0/importdecl0b.src similarity index 100% rename from src/go/types/testdata/importdecl0b.src rename to src/go/types/testdata/importdecl0/importdecl0b.src diff --git a/src/go/types/testdata/importdecl1a.src b/src/go/types/testdata/importdecl1/importdecl1a.src similarity index 100% rename from src/go/types/testdata/importdecl1a.src rename to src/go/types/testdata/importdecl1/importdecl1a.src diff --git a/src/go/types/testdata/importdecl1b.src b/src/go/types/testdata/importdecl1/importdecl1b.src similarity index 100% rename from src/go/types/testdata/importdecl1b.src rename to src/go/types/testdata/importdecl1/importdecl1b.src diff --git a/src/go/types/testdata/issue25008a.src b/src/go/types/testdata/issue25008/issue25008a.src similarity index 100% rename from src/go/types/testdata/issue25008a.src rename to src/go/types/testdata/issue25008/issue25008a.src diff --git a/src/go/types/testdata/issue25008b.src b/src/go/types/testdata/issue25008/issue25008b.src similarity index 100% rename from src/go/types/testdata/issue25008b.src rename to src/go/types/testdata/issue25008/issue25008b.src From 52d5cb2822966c00ce2ef97eb08bec4850d76fb2 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Wed, 3 Feb 2021 18:10:04 -0500 Subject: [PATCH 441/474] [dev.regabi] cmd/internal/obj: access Attribute atomically Symbol's Attributes and ABI are in the same word. In the concurrent backend, we may read one symbol's ABI (the callee) while setting its attributes in another goroutine. Fix racecompile build. Change-Id: I500e869bafdd72080119ab243db94eee3afcf926 Reviewed-on: https://go-review.googlesource.com/c/go/+/289290 Trust: Cherry Zhang Run-TryBot: Cherry Zhang TryBot-Result: Go Bot Reviewed-by: Than McIntosh --- src/cmd/internal/obj/link.go | 64 ++++++++++++++++++++++-------------- 1 file changed, 40 insertions(+), 24 deletions(-) diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go index 35cb53cbf6cc8..8206902328077 100644 --- a/src/cmd/internal/obj/link.go +++ b/src/cmd/internal/obj/link.go @@ -39,6 +39,7 @@ import ( "cmd/internal/sys" "fmt" "sync" + "sync/atomic" ) // An Addr is an argument to an instruction. @@ -647,37 +648,52 @@ const ( attrABIBase ) -func (a Attribute) DuplicateOK() bool { return a&AttrDuplicateOK != 0 } -func (a Attribute) MakeTypelink() bool { return a&AttrMakeTypelink != 0 } -func (a Attribute) CFunc() bool { return a&AttrCFunc != 0 } -func (a Attribute) NoSplit() bool { return a&AttrNoSplit != 0 } -func (a Attribute) Leaf() bool { return a&AttrLeaf != 0 } -func (a Attribute) OnList() bool { return a&AttrOnList != 0 } -func (a Attribute) ReflectMethod() bool { return a&AttrReflectMethod != 0 } -func (a Attribute) Local() bool { return a&AttrLocal != 0 } -func (a Attribute) Wrapper() bool { return a&AttrWrapper != 0 } -func (a Attribute) NeedCtxt() bool { return a&AttrNeedCtxt != 0 } -func (a Attribute) NoFrame() bool { return a&AttrNoFrame != 0 } -func (a Attribute) Static() bool { return a&AttrStatic != 0 } -func (a Attribute) WasInlined() bool { return a&AttrWasInlined != 0 } -func (a Attribute) TopFrame() bool { return a&AttrTopFrame != 0 } -func (a Attribute) Indexed() bool { return a&AttrIndexed != 0 } -func (a Attribute) UsedInIface() bool { return a&AttrUsedInIface != 0 } -func (a Attribute) ContentAddressable() bool { return a&AttrContentAddressable != 0 } -func (a Attribute) ABIWrapper() bool { return a&AttrABIWrapper != 0 } +func (a *Attribute) load() Attribute { return Attribute(atomic.LoadUint32((*uint32)(a))) } + +func (a *Attribute) DuplicateOK() bool { return a.load()&AttrDuplicateOK != 0 } +func (a *Attribute) MakeTypelink() bool { return a.load()&AttrMakeTypelink != 0 } +func (a *Attribute) CFunc() bool { return a.load()&AttrCFunc != 0 } +func (a *Attribute) NoSplit() bool { return a.load()&AttrNoSplit != 0 } +func (a *Attribute) Leaf() bool { return a.load()&AttrLeaf != 0 } +func (a *Attribute) OnList() bool { return a.load()&AttrOnList != 0 } +func (a *Attribute) ReflectMethod() bool { return a.load()&AttrReflectMethod != 0 } +func (a *Attribute) Local() bool { return a.load()&AttrLocal != 0 } +func (a *Attribute) Wrapper() bool { return a.load()&AttrWrapper != 0 } +func (a *Attribute) NeedCtxt() bool { return a.load()&AttrNeedCtxt != 0 } +func (a *Attribute) NoFrame() bool { return a.load()&AttrNoFrame != 0 } +func (a *Attribute) Static() bool { return a.load()&AttrStatic != 0 } +func (a *Attribute) WasInlined() bool { return a.load()&AttrWasInlined != 0 } +func (a *Attribute) TopFrame() bool { return a.load()&AttrTopFrame != 0 } +func (a *Attribute) Indexed() bool { return a.load()&AttrIndexed != 0 } +func (a *Attribute) UsedInIface() bool { return a.load()&AttrUsedInIface != 0 } +func (a *Attribute) ContentAddressable() bool { return a.load()&AttrContentAddressable != 0 } +func (a *Attribute) ABIWrapper() bool { return a.load()&AttrABIWrapper != 0 } func (a *Attribute) Set(flag Attribute, value bool) { - if value { - *a |= flag - } else { - *a &^= flag + for { + v0 := a.load() + v := v0 + if value { + v |= flag + } else { + v &^= flag + } + if atomic.CompareAndSwapUint32((*uint32)(a), uint32(v0), uint32(v)) { + break + } } } -func (a Attribute) ABI() ABI { return ABI(a / attrABIBase) } +func (a *Attribute) ABI() ABI { return ABI(a.load() / attrABIBase) } func (a *Attribute) SetABI(abi ABI) { const mask = 1 // Only one ABI bit for now. - *a = (*a &^ (mask * attrABIBase)) | Attribute(abi)*attrABIBase + for { + v0 := a.load() + v := (v0 &^ (mask * attrABIBase)) | Attribute(abi)*attrABIBase + if atomic.CompareAndSwapUint32((*uint32)(a), uint32(v0), uint32(v)) { + break + } + } } var textAttrStrings = [...]struct { From 120b819f45d1c109a1c2ef380edde9e826862a5c Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Thu, 4 Feb 2021 11:16:25 -0500 Subject: [PATCH 442/474] [dev.regabi] go/types: report error for invalid main function signature This is a port of CL 279424, which didn't make it into master in time for go1.16. Move it to dev.regabi so that it may be merged. Notably, this port no longer removes the _InvalidInitSig error code, instead opting to deprecate it. Now that error codes are 'locked in' for go1.16, even if their API may not yet be exposed, we should follow the practice of not changing their values. In the future, code generation can make it easier to keep error code values constant. For #43308 Change-Id: I5260b93fd063393d38d6458e45a67e7f9b7426ed Reviewed-on: https://go-review.googlesource.com/c/go/+/289714 Trust: Robert Findley Run-TryBot: Robert Findley TryBot-Result: Go Bot Reviewed-by: Robert Griesemer --- src/go/types/decl.go | 8 ++++++-- src/go/types/errorcodes.go | 7 +++++-- src/go/types/testdata/main.src | 9 +++++++++ 3 files changed, 20 insertions(+), 4 deletions(-) create mode 100644 src/go/types/testdata/main.src diff --git a/src/go/types/decl.go b/src/go/types/decl.go index df01e92530603..571e172351bef 100644 --- a/src/go/types/decl.go +++ b/src/go/types/decl.go @@ -751,8 +751,12 @@ func (check *Checker) funcDecl(obj *Func, decl *declInfo) { obj.typ = sig // guard against cycles fdecl := decl.fdecl check.funcType(sig, fdecl.Recv, fdecl.Type) - if sig.recv == nil && obj.name == "init" && (sig.params.Len() > 0 || sig.results.Len() > 0) { - check.errorf(fdecl, _InvalidInitSig, "func init must have no arguments and no return values") + if sig.recv == nil { + if obj.name == "init" && (sig.params.Len() > 0 || sig.results.Len() > 0) { + check.errorf(fdecl, _InvalidInitDecl, "func init must have no arguments and no return values") + } else if obj.name == "main" && check.pkg.name == "main" && (sig.params.Len() > 0 || sig.results.Len() > 0) { + check.errorf(fdecl, _InvalidMainDecl, "func main must have no arguments and no return values") + } // ok to continue } diff --git a/src/go/types/errorcodes.go b/src/go/types/errorcodes.go index c01a12c346d3e..d27abdf4d48bf 100644 --- a/src/go/types/errorcodes.go +++ b/src/go/types/errorcodes.go @@ -386,8 +386,8 @@ const ( // _InvalidInitSig occurs when an init function declares parameters or // results. // - // Example: - // func init() int { return 1 } + // Deprecated: no longer emitted by the type checker. _InvalidInitDecl is + // used instead. _InvalidInitSig // _InvalidInitDecl occurs when init is declared as anything other than a @@ -395,6 +395,9 @@ const ( // // Example: // var init = 1 + // + // Example: + // func init() int { return 1 } _InvalidInitDecl // _InvalidMainDecl occurs when main is declared as anything other than a diff --git a/src/go/types/testdata/main.src b/src/go/types/testdata/main.src new file mode 100644 index 0000000000000..f892938d4acfa --- /dev/null +++ b/src/go/types/testdata/main.src @@ -0,0 +1,9 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func main() +func /* ERROR "no arguments and no return values" */ main /* ERROR redeclared */ (int) +func /* ERROR "no arguments and no return values" */ main /* ERROR redeclared */ () int From 63de2110148eec432c4954dced7ff674a4942115 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Tue, 2 Feb 2021 17:26:57 -0500 Subject: [PATCH 443/474] [dev.regabi] runtime/cgo: call setg_gcc in crosscall_amd64 Currently, when using cgo, the g pointer is set via a separate call to setg_gcc or with inline assembly in threadentry. This CL changes it to call setg_gcc in crosscall_amd64, like other g- register platforms. When we have an actual g register on AMD64, we'll need to set the register immediately before calling into Go. Change-Id: Ib1171e05cd0dabba3b7d12e072084d141051cf3d Reviewed-on: https://go-review.googlesource.com/c/go/+/289192 Trust: Cherry Zhang Reviewed-by: Ian Lance Taylor Reviewed-by: Than McIntosh --- src/runtime/cgo/gcc_amd64.S | 7 ++++++- src/runtime/cgo/gcc_darwin_amd64.c | 11 +++++------ src/runtime/cgo/gcc_dragonfly_amd64.c | 7 +------ src/runtime/cgo/gcc_freebsd_amd64.c | 7 +------ src/runtime/cgo/gcc_linux_amd64.c | 7 +------ src/runtime/cgo/gcc_netbsd_amd64.c | 7 +------ src/runtime/cgo/gcc_openbsd_amd64.c | 7 +------ src/runtime/cgo/gcc_solaris_amd64.c | 7 +------ src/runtime/cgo/gcc_windows_amd64.c | 10 +++++----- src/runtime/cgo/libcgo.h | 2 +- 10 files changed, 23 insertions(+), 49 deletions(-) diff --git a/src/runtime/cgo/gcc_amd64.S b/src/runtime/cgo/gcc_amd64.S index 17d9d47ef40cb..d75f8646663ab 100644 --- a/src/runtime/cgo/gcc_amd64.S +++ b/src/runtime/cgo/gcc_amd64.S @@ -30,9 +30,14 @@ EXT(crosscall_amd64): pushq %r15 #if defined(_WIN64) + movq %r8, %rdi /* arg of setg_gcc */ + call *%rdx /* setg_gcc */ call *%rcx /* fn */ #else - call *%rdi /* fn */ + movq %rdi, %rbx + movq %rdx, %rdi /* arg of setg_gcc */ + call *%rsi /* setg_gcc */ + call *%rbx /* fn */ #endif popq %r15 diff --git a/src/runtime/cgo/gcc_darwin_amd64.c b/src/runtime/cgo/gcc_darwin_amd64.c index 51410d50269da..d5b7fd8fd802b 100644 --- a/src/runtime/cgo/gcc_darwin_amd64.c +++ b/src/runtime/cgo/gcc_darwin_amd64.c @@ -9,13 +9,16 @@ #include "libcgo_unix.h" static void* threadentry(void*); +static void (*setg_gcc)(void*); void -x_cgo_init(G *g) +x_cgo_init(G *g, void (*setg)(void*), void **tlsg, void **tlsbase) { pthread_attr_t attr; size_t size; + setg_gcc = setg; + pthread_attr_init(&attr); pthread_attr_getstacksize(&attr, &size); g->stacklo = (uintptr)&attr - size + 4096; @@ -57,10 +60,6 @@ threadentry(void *v) ts = *(ThreadStart*)v; free(v); - // Move the g pointer into the slot reserved in thread local storage. - // Constant must match the one in cmd/link/internal/ld/sym.go. - asm volatile("movq %0, %%gs:0x30" :: "r"(ts.g)); - - crosscall_amd64(ts.fn); + crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g); return nil; } diff --git a/src/runtime/cgo/gcc_dragonfly_amd64.c b/src/runtime/cgo/gcc_dragonfly_amd64.c index d25db91900071..0003414bf8665 100644 --- a/src/runtime/cgo/gcc_dragonfly_amd64.c +++ b/src/runtime/cgo/gcc_dragonfly_amd64.c @@ -61,11 +61,6 @@ threadentry(void *v) ts = *(ThreadStart*)v; free(v); - /* - * Set specific keys. - */ - setg_gcc((void*)ts.g); - - crosscall_amd64(ts.fn); + crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g); return nil; } diff --git a/src/runtime/cgo/gcc_freebsd_amd64.c b/src/runtime/cgo/gcc_freebsd_amd64.c index 514a2f8a235ee..6071ec3909378 100644 --- a/src/runtime/cgo/gcc_freebsd_amd64.c +++ b/src/runtime/cgo/gcc_freebsd_amd64.c @@ -69,11 +69,6 @@ threadentry(void *v) free(v); _cgo_tsan_release(); - /* - * Set specific keys. - */ - setg_gcc((void*)ts.g); - - crosscall_amd64(ts.fn); + crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g); return nil; } diff --git a/src/runtime/cgo/gcc_linux_amd64.c b/src/runtime/cgo/gcc_linux_amd64.c index f2bf6482cb5f7..c25e7e769ba23 100644 --- a/src/runtime/cgo/gcc_linux_amd64.c +++ b/src/runtime/cgo/gcc_linux_amd64.c @@ -89,11 +89,6 @@ threadentry(void *v) free(v); _cgo_tsan_release(); - /* - * Set specific keys. - */ - setg_gcc((void*)ts.g); - - crosscall_amd64(ts.fn); + crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g); return nil; } diff --git a/src/runtime/cgo/gcc_netbsd_amd64.c b/src/runtime/cgo/gcc_netbsd_amd64.c index dc966fc45b4ba..9f4b031a08954 100644 --- a/src/runtime/cgo/gcc_netbsd_amd64.c +++ b/src/runtime/cgo/gcc_netbsd_amd64.c @@ -62,11 +62,6 @@ threadentry(void *v) ts = *(ThreadStart*)v; free(v); - /* - * Set specific keys. - */ - setg_gcc((void*)ts.g); - // On NetBSD, a new thread inherits the signal stack of the // creating thread. That confuses minit, so we remove that // signal stack here before calling the regular mstart. It's @@ -78,6 +73,6 @@ threadentry(void *v) ss.ss_flags = SS_DISABLE; sigaltstack(&ss, nil); - crosscall_amd64(ts.fn); + crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g); return nil; } diff --git a/src/runtime/cgo/gcc_openbsd_amd64.c b/src/runtime/cgo/gcc_openbsd_amd64.c index 34319fb0b80e3..09d2750f3ad8f 100644 --- a/src/runtime/cgo/gcc_openbsd_amd64.c +++ b/src/runtime/cgo/gcc_openbsd_amd64.c @@ -60,11 +60,6 @@ threadentry(void *v) ts = *(ThreadStart*)v; free(v); - /* - * Set specific keys. - */ - setg_gcc((void*)ts.g); - - crosscall_amd64(ts.fn); + crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g); return nil; } diff --git a/src/runtime/cgo/gcc_solaris_amd64.c b/src/runtime/cgo/gcc_solaris_amd64.c index 079bd12898e25..e89e844b1e0ad 100644 --- a/src/runtime/cgo/gcc_solaris_amd64.c +++ b/src/runtime/cgo/gcc_solaris_amd64.c @@ -72,11 +72,6 @@ threadentry(void *v) ts = *(ThreadStart*)v; free(v); - /* - * Set specific keys. - */ - setg_gcc((void*)ts.g); - - crosscall_amd64(ts.fn); + crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g); return nil; } diff --git a/src/runtime/cgo/gcc_windows_amd64.c b/src/runtime/cgo/gcc_windows_amd64.c index 0f8c817f0e4d1..25cfd086ddc33 100644 --- a/src/runtime/cgo/gcc_windows_amd64.c +++ b/src/runtime/cgo/gcc_windows_amd64.c @@ -12,10 +12,12 @@ #include "libcgo_windows.h" static void threadentry(void*); +static void (*setg_gcc)(void*); void -x_cgo_init(G *g) +x_cgo_init(G *g, void (*setg)(void*), void **tlsg, void **tlsbase) { + setg_gcc = setg; } @@ -46,10 +48,8 @@ threadentry(void *v) */ asm volatile ( "movq %0, %%gs:0x28\n" // MOVL tls0, 0x28(GS) - "movq %%gs:0x28, %%rax\n" // MOVQ 0x28(GS), tmp - "movq %1, 0(%%rax)\n" // MOVQ g, 0(GS) - :: "r"(ts.tls), "r"(ts.g) : "%rax" + :: "r"(ts.tls) ); - crosscall_amd64(ts.fn); + crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g); } diff --git a/src/runtime/cgo/libcgo.h b/src/runtime/cgo/libcgo.h index aba500a301c66..af4960e7e9522 100644 --- a/src/runtime/cgo/libcgo.h +++ b/src/runtime/cgo/libcgo.h @@ -66,7 +66,7 @@ uintptr_t _cgo_wait_runtime_init_done(void); /* * Call fn in the 6c world. */ -void crosscall_amd64(void (*fn)(void)); +void crosscall_amd64(void (*fn)(void), void (*setg_gcc)(void*), void *g); /* * Call fn in the 8c world. From 7cc6de59f25911ff786a4d54420f2ddbf21c00f2 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Wed, 3 Feb 2021 11:53:35 -0500 Subject: [PATCH 444/474] [dev.regabi] runtime: don't mark rt0_go ABIInternal rt0_go is not actually ABIInternal, and it actually has callers (e.g. _rt0_amd64). Change-Id: Id730176e620ad9f443e6bfca6ded81a1367531ba Reviewed-on: https://go-review.googlesource.com/c/go/+/289193 Trust: Cherry Zhang Reviewed-by: Than McIntosh --- src/runtime/asm_amd64.s | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s index b5d01ba73ce03..aece84bde8c00 100644 --- a/src/runtime/asm_amd64.s +++ b/src/runtime/asm_amd64.s @@ -84,9 +84,7 @@ GLOBL _rt0_amd64_lib_argc<>(SB),NOPTR, $8 DATA _rt0_amd64_lib_argv<>(SB)/8, $0 GLOBL _rt0_amd64_lib_argv<>(SB),NOPTR, $8 -// Defined as ABIInternal since it does not use the stack-based Go ABI (and -// in addition there are no calls to this entry point from Go code). -TEXT runtime·rt0_go(SB),NOSPLIT,$0 +TEXT runtime·rt0_go(SB),NOSPLIT,$0 // copy arguments forward on an even stack MOVQ DI, AX // argc MOVQ SI, BX // argv From e79c2fd428372f64e6183bed9f765c1556816111 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Tue, 2 Feb 2021 18:09:03 -0500 Subject: [PATCH 445/474] [dev.regabi] runtime: mark racecallbackthunk as ABIInternal racecallbackthunk is called from C, and it needs to follow C ABI. The assembly code preserves C callee-save registers. It must not be called via wrappers, which may not preserve those registers. Change-Id: Icd72c399f4424d73c4882860d85057fe2671f6aa Reviewed-on: https://go-review.googlesource.com/c/go/+/289194 Trust: Cherry Zhang Reviewed-by: Than McIntosh --- src/runtime/race_amd64.s | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/runtime/race_amd64.s b/src/runtime/race_amd64.s index 9818bc6ddff97..cf0a51462fd55 100644 --- a/src/runtime/race_amd64.s +++ b/src/runtime/race_amd64.s @@ -419,7 +419,9 @@ call: // The overall effect of Go->C->Go call chain is similar to that of mcall. // RARG0 contains command code. RARG1 contains command-specific context. // See racecallback for command codes. -TEXT runtime·racecallbackthunk(SB), NOSPLIT, $56-8 +// Defined as ABIInternal so as to avoid introducing a wrapper, +// because its address is passed to C via funcPC. +TEXT runtime·racecallbackthunk(SB), NOSPLIT, $56-8 // Handle command raceGetProcCmd (0) here. // First, code below assumes that we are on curg, while raceGetProcCmd // can be executed on g0. Second, it is called frequently, so will From 397a46a10a2cc8557e965af269915909cb5c0a80 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Wed, 3 Feb 2021 12:09:53 -0500 Subject: [PATCH 446/474] [dev.regabi] cmd/asm: define g register on AMD64 Define g register as R14 on AMD64. It is not used now, but will be in later CLs. The name "R14" is still recognized. Change-Id: I9a066b15bf1051113db8c6640605e350cea397b9 Reviewed-on: https://go-review.googlesource.com/c/go/+/289195 Trust: Cherry Zhang Reviewed-by: Than McIntosh --- src/cmd/asm/internal/arch/arch.go | 4 ++++ src/cmd/asm/internal/asm/operand_test.go | 1 + src/cmd/internal/obj/x86/a.out.go | 1 + 3 files changed, 6 insertions(+) diff --git a/src/cmd/asm/internal/arch/arch.go b/src/cmd/asm/internal/arch/arch.go index a62e55191e689..026d8abf81305 100644 --- a/src/cmd/asm/internal/arch/arch.go +++ b/src/cmd/asm/internal/arch/arch.go @@ -109,6 +109,10 @@ func archX86(linkArch *obj.LinkArch) *Arch { register["SB"] = RSB register["FP"] = RFP register["PC"] = RPC + if linkArch == &x86.Linkamd64 { + // Alias g to R14 + register["g"] = x86.REGG + } // Register prefix not used on this architecture. instructions := make(map[string]obj.As) diff --git a/src/cmd/asm/internal/asm/operand_test.go b/src/cmd/asm/internal/asm/operand_test.go index 2e83e176b297c..c6def15e20eac 100644 --- a/src/cmd/asm/internal/asm/operand_test.go +++ b/src/cmd/asm/internal/asm/operand_test.go @@ -259,6 +259,7 @@ var amd64OperandTests = []operandTest{ {"R15", "R15"}, {"R8", "R8"}, {"R9", "R9"}, + {"g", "R14"}, {"SI", "SI"}, {"SP", "SP"}, {"X0", "X0"}, diff --git a/src/cmd/internal/obj/x86/a.out.go b/src/cmd/internal/obj/x86/a.out.go index 30c1a6a44547d..3be4b59da461f 100644 --- a/src/cmd/internal/obj/x86/a.out.go +++ b/src/cmd/internal/obj/x86/a.out.go @@ -263,6 +263,7 @@ const ( FREGRET = REG_X0 REGSP = REG_SP REGCTXT = REG_DX + REGG = REG_R14 // g register in ABIInternal REGEXT = REG_R15 // compiler allocates external registers R15 down FREGMIN = REG_X0 + 5 // first register variable FREGEXT = REG_X0 + 15 // first external register From 946351d5a27d7dc5550f579ddfec926790903fc5 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Tue, 2 Feb 2021 18:25:39 -0500 Subject: [PATCH 447/474] [dev.regabi] runtime: zero X15 in racecall racecall can be called in ABIInternal context (e.g. raceread calling racecalladdr calling racecall) without wrapper. racecall calls C code, which doesn't preserve our special registers. Set them explicitly in racecall upon returning from C. Change-Id: Ic990479c1fca6bb8a3b151325c7a89be8331a530 Reviewed-on: https://go-review.googlesource.com/c/go/+/289709 Trust: Cherry Zhang Run-TryBot: Cherry Zhang TryBot-Result: Go Bot Reviewed-by: Michael Knyszek Reviewed-by: Jeremy Faller --- src/runtime/race_amd64.s | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/runtime/race_amd64.s b/src/runtime/race_amd64.s index cf0a51462fd55..fd41b5690a64c 100644 --- a/src/runtime/race_amd64.s +++ b/src/runtime/race_amd64.s @@ -412,6 +412,9 @@ call: ANDQ $~15, SP // alignment for gcc ABI CALL AX MOVQ R12, SP + // Back to Go world, set special registers. + // The g register (R14) is preserved in C. + XORPS X15, X15 RET // C->Go callback thunk that allows to call runtime·racesymbolize from C code. From 8fa84772ba035b74975572fbc9df0330523cc388 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Thu, 4 Feb 2021 12:59:06 -0500 Subject: [PATCH 448/474] [dev.regabi] runtime: delete gosave function The runtime.gosave function is not used anywhere. Delete. Note: there is also a gosave<> function, which is actually used and not deleted. Change-Id: I64149a7afdd217de26d1e6396233f2becfad7153 Reviewed-on: https://go-review.googlesource.com/c/go/+/289719 Trust: Cherry Zhang Run-TryBot: Cherry Zhang TryBot-Result: Go Bot Reviewed-by: Michael Knyszek --- src/runtime/asm_386.s | 19 ------------------- src/runtime/asm_amd64.s | 20 -------------------- src/runtime/asm_arm.s | 17 ----------------- src/runtime/asm_arm64.s | 17 ----------------- src/runtime/asm_mips64x.s | 15 --------------- src/runtime/asm_mipsx.s | 15 --------------- src/runtime/asm_ppc64x.s | 17 ----------------- src/runtime/asm_riscv64.s | 15 --------------- src/runtime/asm_s390x.s | 15 --------------- src/runtime/stubs.go | 1 - 10 files changed, 151 deletions(-) diff --git a/src/runtime/asm_386.s b/src/runtime/asm_386.s index fa3b1be339259..429f3fef82916 100644 --- a/src/runtime/asm_386.s +++ b/src/runtime/asm_386.s @@ -273,25 +273,6 @@ TEXT runtime·asminit(SB),NOSPLIT,$0-0 * go-routine */ -// void gosave(Gobuf*) -// save state in Gobuf; setjmp -TEXT runtime·gosave(SB), NOSPLIT, $0-4 - MOVL buf+0(FP), AX // gobuf - LEAL buf+0(FP), BX // caller's SP - MOVL BX, gobuf_sp(AX) - MOVL 0(SP), BX // caller's PC - MOVL BX, gobuf_pc(AX) - MOVL $0, gobuf_ret(AX) - // Assert ctxt is zero. See func save. - MOVL gobuf_ctxt(AX), BX - TESTL BX, BX - JZ 2(PC) - CALL runtime·badctxt(SB) - get_tls(CX) - MOVL g(CX), BX - MOVL BX, gobuf_g(AX) - RET - // void gogo(Gobuf*) // restore state from Gobuf; longjmp TEXT runtime·gogo(SB), NOSPLIT, $8-4 diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s index aece84bde8c00..a9456dc9ff43d 100644 --- a/src/runtime/asm_amd64.s +++ b/src/runtime/asm_amd64.s @@ -254,26 +254,6 @@ TEXT runtime·asminit(SB),NOSPLIT,$0-0 * go-routine */ -// func gosave(buf *gobuf) -// save state in Gobuf; setjmp -TEXT runtime·gosave(SB), NOSPLIT, $0-8 - MOVQ buf+0(FP), AX // gobuf - LEAQ buf+0(FP), BX // caller's SP - MOVQ BX, gobuf_sp(AX) - MOVQ 0(SP), BX // caller's PC - MOVQ BX, gobuf_pc(AX) - MOVQ $0, gobuf_ret(AX) - MOVQ BP, gobuf_bp(AX) - // Assert ctxt is zero. See func save. - MOVQ gobuf_ctxt(AX), BX - TESTQ BX, BX - JZ 2(PC) - CALL runtime·badctxt(SB) - get_tls(CX) - MOVQ g(CX), BX - MOVQ BX, gobuf_g(AX) - RET - // func gogo(buf *gobuf) // restore state from Gobuf; longjmp TEXT runtime·gogo(SB), NOSPLIT, $16-8 diff --git a/src/runtime/asm_arm.s b/src/runtime/asm_arm.s index c54b4eb0061e6..8eec84d3f2e64 100644 --- a/src/runtime/asm_arm.s +++ b/src/runtime/asm_arm.s @@ -206,23 +206,6 @@ TEXT runtime·asminit(SB),NOSPLIT,$0-0 * go-routine */ -// void gosave(Gobuf*) -// save state in Gobuf; setjmp -TEXT runtime·gosave(SB),NOSPLIT|NOFRAME,$0-4 - MOVW buf+0(FP), R0 - MOVW R13, gobuf_sp(R0) - MOVW LR, gobuf_pc(R0) - MOVW g, gobuf_g(R0) - MOVW $0, R11 - MOVW R11, gobuf_lr(R0) - MOVW R11, gobuf_ret(R0) - // Assert ctxt is zero. See func save. - MOVW gobuf_ctxt(R0), R0 - CMP R0, R11 - B.EQ 2(PC) - CALL runtime·badctxt(SB) - RET - // void gogo(Gobuf*) // restore state from Gobuf; longjmp TEXT runtime·gogo(SB),NOSPLIT,$8-4 diff --git a/src/runtime/asm_arm64.s b/src/runtime/asm_arm64.s index a09172f0c964d..8e4a1f74f9a17 100644 --- a/src/runtime/asm_arm64.s +++ b/src/runtime/asm_arm64.s @@ -113,23 +113,6 @@ TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0 * go-routine */ -// void gosave(Gobuf*) -// save state in Gobuf; setjmp -TEXT runtime·gosave(SB), NOSPLIT|NOFRAME, $0-8 - MOVD buf+0(FP), R3 - MOVD RSP, R0 - MOVD R0, gobuf_sp(R3) - MOVD R29, gobuf_bp(R3) - MOVD LR, gobuf_pc(R3) - MOVD g, gobuf_g(R3) - MOVD ZR, gobuf_lr(R3) - MOVD ZR, gobuf_ret(R3) - // Assert ctxt is zero. See func save. - MOVD gobuf_ctxt(R3), R0 - CBZ R0, 2(PC) - CALL runtime·badctxt(SB) - RET - // void gogo(Gobuf*) // restore state from Gobuf; longjmp TEXT runtime·gogo(SB), NOSPLIT, $24-8 diff --git a/src/runtime/asm_mips64x.s b/src/runtime/asm_mips64x.s index 19781f7885fda..054a89dc37eee 100644 --- a/src/runtime/asm_mips64x.s +++ b/src/runtime/asm_mips64x.s @@ -89,21 +89,6 @@ TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0 * go-routine */ -// void gosave(Gobuf*) -// save state in Gobuf; setjmp -TEXT runtime·gosave(SB), NOSPLIT|NOFRAME, $0-8 - MOVV buf+0(FP), R1 - MOVV R29, gobuf_sp(R1) - MOVV R31, gobuf_pc(R1) - MOVV g, gobuf_g(R1) - MOVV R0, gobuf_lr(R1) - MOVV R0, gobuf_ret(R1) - // Assert ctxt is zero. See func save. - MOVV gobuf_ctxt(R1), R1 - BEQ R1, 2(PC) - JAL runtime·badctxt(SB) - RET - // void gogo(Gobuf*) // restore state from Gobuf; longjmp TEXT runtime·gogo(SB), NOSPLIT, $16-8 diff --git a/src/runtime/asm_mipsx.s b/src/runtime/asm_mipsx.s index ee87d81436304..f57437d590279 100644 --- a/src/runtime/asm_mipsx.s +++ b/src/runtime/asm_mipsx.s @@ -90,21 +90,6 @@ TEXT runtime·asminit(SB),NOSPLIT,$0-0 * go-routine */ -// void gosave(Gobuf*) -// save state in Gobuf; setjmp -TEXT runtime·gosave(SB),NOSPLIT|NOFRAME,$0-4 - MOVW buf+0(FP), R1 - MOVW R29, gobuf_sp(R1) - MOVW R31, gobuf_pc(R1) - MOVW g, gobuf_g(R1) - MOVW R0, gobuf_lr(R1) - MOVW R0, gobuf_ret(R1) - // Assert ctxt is zero. See func save. - MOVW gobuf_ctxt(R1), R1 - BEQ R1, 2(PC) - JAL runtime·badctxt(SB) - RET - // void gogo(Gobuf*) // restore state from Gobuf; longjmp TEXT runtime·gogo(SB),NOSPLIT,$8-4 diff --git a/src/runtime/asm_ppc64x.s b/src/runtime/asm_ppc64x.s index dc34c0e4c876b..763a92adf1864 100644 --- a/src/runtime/asm_ppc64x.s +++ b/src/runtime/asm_ppc64x.s @@ -128,23 +128,6 @@ TEXT runtime·reginit(SB),NOSPLIT|NOFRAME,$0-0 * go-routine */ -// void gosave(Gobuf*) -// save state in Gobuf; setjmp -TEXT runtime·gosave(SB), NOSPLIT|NOFRAME, $0-8 - MOVD buf+0(FP), R3 - MOVD R1, gobuf_sp(R3) - MOVD LR, R31 - MOVD R31, gobuf_pc(R3) - MOVD g, gobuf_g(R3) - MOVD R0, gobuf_lr(R3) - MOVD R0, gobuf_ret(R3) - // Assert ctxt is zero. See func save. - MOVD gobuf_ctxt(R3), R3 - CMP R0, R3 - BEQ 2(PC) - BL runtime·badctxt(SB) - RET - // void gogo(Gobuf*) // restore state from Gobuf; longjmp TEXT runtime·gogo(SB), NOSPLIT, $16-8 diff --git a/src/runtime/asm_riscv64.s b/src/runtime/asm_riscv64.s index 01b42dc3debe6..cf460d1586c83 100644 --- a/src/runtime/asm_riscv64.s +++ b/src/runtime/asm_riscv64.s @@ -297,21 +297,6 @@ TEXT runtime·mcall(SB), NOSPLIT|NOFRAME, $0-8 JALR RA, T1 JMP runtime·badmcall2(SB) -// func gosave(buf *gobuf) -// save state in Gobuf; setjmp -TEXT runtime·gosave(SB), NOSPLIT|NOFRAME, $0-8 - MOV buf+0(FP), T1 - MOV X2, gobuf_sp(T1) - MOV RA, gobuf_pc(T1) - MOV g, gobuf_g(T1) - MOV ZERO, gobuf_lr(T1) - MOV ZERO, gobuf_ret(T1) - // Assert ctxt is zero. See func save. - MOV gobuf_ctxt(T1), T1 - BEQ T1, ZERO, 2(PC) - CALL runtime·badctxt(SB) - RET - // Save state of caller into g->sched. Smashes X31. TEXT gosave<>(SB),NOSPLIT|NOFRAME,$0 MOV X1, (g_sched+gobuf_pc)(g) diff --git a/src/runtime/asm_s390x.s b/src/runtime/asm_s390x.s index 7baef37324755..1cd5eca06f8c3 100644 --- a/src/runtime/asm_s390x.s +++ b/src/runtime/asm_s390x.s @@ -174,21 +174,6 @@ TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0 * go-routine */ -// void gosave(Gobuf*) -// save state in Gobuf; setjmp -TEXT runtime·gosave(SB), NOSPLIT, $-8-8 - MOVD buf+0(FP), R3 - MOVD R15, gobuf_sp(R3) - MOVD LR, gobuf_pc(R3) - MOVD g, gobuf_g(R3) - MOVD $0, gobuf_lr(R3) - MOVD $0, gobuf_ret(R3) - // Assert ctxt is zero. See func save. - MOVD gobuf_ctxt(R3), R3 - CMPBEQ R3, $0, 2(PC) - BL runtime·badctxt(SB) - RET - // void gogo(Gobuf*) // restore state from Gobuf; longjmp TEXT runtime·gogo(SB), NOSPLIT, $16-8 diff --git a/src/runtime/stubs.go b/src/runtime/stubs.go index 2ee2c74dfe07e..36bbc8991ac0f 100644 --- a/src/runtime/stubs.go +++ b/src/runtime/stubs.go @@ -167,7 +167,6 @@ func noescape(p unsafe.Pointer) unsafe.Pointer { // pointer-declared arguments. func cgocallback(fn, frame, ctxt uintptr) func gogo(buf *gobuf) -func gosave(buf *gobuf) //go:noescape func jmpdefer(fv *funcval, argp uintptr) From a21de9ec73b8a433cafd336448dc8111a4e4571e Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Wed, 3 Feb 2021 15:07:33 -0500 Subject: [PATCH 449/474] [dev.regabi] cmd/link: resolve symbol ABI in shared linkage In shared build mode and linkage, currently we assume all function symbols are ABI0 (except for generated type algorithm functions), and alias them to ABIInternal. When the two ABIs actually differ (as it is now), this is not actually correct. This CL resolves symbol ABI based on their mangled names. If the symbol's name has a ".abi0" or ".abiinternal" suffix, it is of the corresponding ABI. The symbol without the suffix is the other ABI. For functions without ABI wrapper generated, only one ABI exists but we don't know what it is, so we still use alias (for now). Change-Id: I2165f149bc83d513e81eb1eb4ee95464335b0e75 Reviewed-on: https://go-review.googlesource.com/c/go/+/289289 Trust: Cherry Zhang Run-TryBot: Cherry Zhang TryBot-Result: Go Bot Reviewed-by: Than McIntosh --- src/cmd/link/internal/ld/lib.go | 44 +++++++++++++++++++++++++++++++-- 1 file changed, 42 insertions(+), 2 deletions(-) diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index 17d5040827c0e..71cef0b774ff8 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -2091,6 +2091,26 @@ func ldshlibsyms(ctxt *Link, shlib string) { Errorf(nil, "cannot read symbols from shared library: %s", libpath) return } + + // collect text symbol ABI versions. + symabi := make(map[string]int) // map (unmangled) symbol name to version + if *flagAbiWrap { + for _, elfsym := range syms { + if elf.ST_TYPE(elfsym.Info) != elf.STT_FUNC { + continue + } + // Demangle the name. Keep in sync with symtab.go:putelfsym. + if strings.HasSuffix(elfsym.Name, ".abiinternal") { + // ABIInternal symbol has mangled name, so the primary symbol is ABI0. + symabi[strings.TrimSuffix(elfsym.Name, ".abiinternal")] = 0 + } + if strings.HasSuffix(elfsym.Name, ".abi0") { + // ABI0 symbol has mangled name, so the primary symbol is ABIInternal. + symabi[strings.TrimSuffix(elfsym.Name, ".abi0")] = sym.SymVerABIInternal + } + } + } + for _, elfsym := range syms { if elf.ST_TYPE(elfsym.Info) == elf.STT_NOTYPE || elf.ST_TYPE(elfsym.Info) == elf.STT_SECTION { continue @@ -2099,12 +2119,23 @@ func ldshlibsyms(ctxt *Link, shlib string) { // Symbols whose names start with "type." are compiler // generated, so make functions with that prefix internal. ver := 0 + symname := elfsym.Name // (unmangled) symbol name if elf.ST_TYPE(elfsym.Info) == elf.STT_FUNC && strings.HasPrefix(elfsym.Name, "type.") { ver = sym.SymVerABIInternal + } else if *flagAbiWrap && elf.ST_TYPE(elfsym.Info) == elf.STT_FUNC { + if strings.HasSuffix(elfsym.Name, ".abiinternal") { + ver = sym.SymVerABIInternal + symname = strings.TrimSuffix(elfsym.Name, ".abiinternal") + } else if strings.HasSuffix(elfsym.Name, ".abi0") { + ver = 0 + symname = strings.TrimSuffix(elfsym.Name, ".abi0") + } else if abi, ok := symabi[elfsym.Name]; ok { + ver = abi + } } l := ctxt.loader - s := l.LookupOrCreateSym(elfsym.Name, ver) + s := l.LookupOrCreateSym(symname, ver) // Because loadlib above loads all .a files before loading // any shared libraries, any non-dynimport symbols we find @@ -2129,6 +2160,10 @@ func ldshlibsyms(ctxt *Link, shlib string) { } } + if symname != elfsym.Name { + l.SetSymExtname(s, elfsym.Name) + } + // For function symbols, we don't know what ABI is // available, so alias it under both ABIs. // @@ -2137,7 +2172,12 @@ func ldshlibsyms(ctxt *Link, shlib string) { // mangle Go function names in the .so to include the // ABI. if elf.ST_TYPE(elfsym.Info) == elf.STT_FUNC && ver == 0 { - alias := ctxt.loader.LookupOrCreateSym(elfsym.Name, sym.SymVerABIInternal) + if *flagAbiWrap { + if _, ok := symabi[symname]; ok { + continue // only use alias for functions w/o ABI wrappers + } + } + alias := ctxt.loader.LookupOrCreateSym(symname, sym.SymVerABIInternal) if l.SymType(alias) != 0 { continue } From 5d7dc53888c3c91ef4122d584a064bc24b6f7540 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Tue, 2 Feb 2021 18:20:16 -0500 Subject: [PATCH 450/474] [dev.regabi] cmd/compile, runtime: reserve R14 as g registers on AMD64 This is a proof-of-concept change for using the g register on AMD64. getg is now lowered to R14 in the new ABI. The g register is not yet used in all places where it can be used (e.g. stack bounds check, runtime assembly code). Change-Id: I10123ddf38e31782cf58bafcdff170aee0ff0d1b Reviewed-on: https://go-review.googlesource.com/c/go/+/289196 Trust: Cherry Zhang Run-TryBot: Cherry Zhang TryBot-Result: Go Bot Reviewed-by: Than McIntosh Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/ssa.go | 63 +- src/cmd/compile/internal/ssa/config.go | 3 +- src/cmd/compile/internal/ssa/gen/AMD64.rules | 2 +- src/cmd/compile/internal/ssa/gen/AMD64Ops.go | 42 +- src/cmd/compile/internal/ssa/gen/rulegen.go | 1 + src/cmd/compile/internal/ssa/opGen.go | 2014 +++++++++--------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 20 +- src/runtime/asm_amd64.s | 35 +- src/runtime/race_amd64.s | 7 +- src/runtime/sys_linux_amd64.s | 1 + 10 files changed, 1113 insertions(+), 1075 deletions(-) diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index d9c97183fd5d0..4938e4b0e3519 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -166,6 +166,34 @@ func duff(size int64) (int64, int64) { return off, adj } +func getgFromTLS(s *ssagen.State, r int16) { + // See the comments in cmd/internal/obj/x86/obj6.go + // near CanUse1InsnTLS for a detailed explanation of these instructions. + if x86.CanUse1InsnTLS(base.Ctxt) { + // MOVQ (TLS), r + p := s.Prog(x86.AMOVQ) + p.From.Type = obj.TYPE_MEM + p.From.Reg = x86.REG_TLS + p.To.Type = obj.TYPE_REG + p.To.Reg = r + } else { + // MOVQ TLS, r + // MOVQ (r)(TLS*1), r + p := s.Prog(x86.AMOVQ) + p.From.Type = obj.TYPE_REG + p.From.Reg = x86.REG_TLS + p.To.Type = obj.TYPE_REG + p.To.Reg = r + q := s.Prog(x86.AMOVQ) + q.From.Type = obj.TYPE_MEM + q.From.Reg = r + q.From.Index = x86.REG_TLS + q.From.Scale = 1 + q.To.Type = obj.TYPE_REG + q.To.Reg = r + } +} + func ssaGenValue(s *ssagen.State, v *ssa.Value) { switch v.Op { case ssa.OpAMD64VFMADD231SD: @@ -989,41 +1017,24 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // Closure pointer is DX. ssagen.CheckLoweredGetClosurePtr(v) case ssa.OpAMD64LoweredGetG: - r := v.Reg() - // See the comments in cmd/internal/obj/x86/obj6.go - // near CanUse1InsnTLS for a detailed explanation of these instructions. - if x86.CanUse1InsnTLS(base.Ctxt) { - // MOVQ (TLS), r - p := s.Prog(x86.AMOVQ) - p.From.Type = obj.TYPE_MEM - p.From.Reg = x86.REG_TLS - p.To.Type = obj.TYPE_REG - p.To.Reg = r - } else { - // MOVQ TLS, r - // MOVQ (r)(TLS*1), r - p := s.Prog(x86.AMOVQ) - p.From.Type = obj.TYPE_REG - p.From.Reg = x86.REG_TLS - p.To.Type = obj.TYPE_REG - p.To.Reg = r - q := s.Prog(x86.AMOVQ) - q.From.Type = obj.TYPE_MEM - q.From.Reg = r - q.From.Index = x86.REG_TLS - q.From.Scale = 1 - q.To.Type = obj.TYPE_REG - q.To.Reg = r + if base.Flag.ABIWrap { + v.Fatalf("LoweredGetG should not appear in new ABI") } + r := v.Reg() + getgFromTLS(s, r) case ssa.OpAMD64CALLstatic: if s.ABI == obj.ABI0 && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABIInternal { // zeroing X15 when entering ABIInternal from ABI0 opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15) + // set G register from TLS + getgFromTLS(s, x86.REG_R14) } s.Call(v) if s.ABI == obj.ABIInternal && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABI0 { // zeroing X15 when entering ABIInternal from ABI0 opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15) + // set G register from TLS + getgFromTLS(s, x86.REG_R14) } case ssa.OpAMD64CALLclosure, ssa.OpAMD64CALLinter: s.Call(v) @@ -1325,6 +1336,8 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { if s.ABI == obj.ABI0 && b.Aux.(*obj.LSym).ABI() == obj.ABIInternal { // zeroing X15 when entering ABIInternal from ABI0 opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15) + // set G register from TLS + getgFromTLS(s, x86.REG_R14) } p := s.Prog(obj.ARET) p.To.Type = obj.TYPE_MEM diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go index 32cfd7e61eaad..c29bc8fae6f70 100644 --- a/src/cmd/compile/internal/ssa/config.go +++ b/src/cmd/compile/internal/ssa/config.go @@ -5,6 +5,7 @@ package ssa import ( + "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/obj" @@ -197,7 +198,7 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config c.specialRegMask = specialRegMaskAMD64 c.FPReg = framepointerRegAMD64 c.LinkReg = linkRegAMD64 - c.hasGReg = false + c.hasGReg = base.Flag.ABIWrap case "386": c.PtrSize = 4 c.RegSize = 4 diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index 706336289e189..3c75bcfa05f61 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -459,7 +459,7 @@ (IsInBounds idx len) => (SETB (CMPQ idx len)) (IsSliceInBounds idx len) => (SETBE (CMPQ idx len)) (NilCheck ...) => (LoweredNilCheck ...) -(GetG ...) => (LoweredGetG ...) +(GetG mem) && !base.Flag.ABIWrap => (LoweredGetG mem) // only lower in old ABI. in new ABI we have a G register. (GetClosurePtr ...) => (LoweredGetClosurePtr ...) (GetCallerPC ...) => (LoweredGetCallerPC ...) (GetCallerSP ...) => (LoweredGetCallerSP ...) diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go index 0a411bbdca0b3..043162e544e5e 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go @@ -44,7 +44,7 @@ var regNamesAMD64 = []string{ "R11", "R12", "R13", - "R14", + "g", // a.k.a. R14 "R15", "X0", "X1", @@ -96,12 +96,14 @@ func init() { cx = buildReg("CX") dx = buildReg("DX") bx = buildReg("BX") - gp = buildReg("AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15") + gp = buildReg("AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15") + g = buildReg("g") fp = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14") x15 = buildReg("X15") gpsp = gp | buildReg("SP") gpspsb = gpsp | buildReg("SB") - callerSave = gp | fp + gpspsbg = gpspsb | g + callerSave = gp | fp | g // runtime.setg (and anything calling it) may clobber g ) // Common slices of register masks var ( @@ -114,10 +116,10 @@ func init() { gp01 = regInfo{inputs: nil, outputs: gponly} gp11 = regInfo{inputs: []regMask{gp}, outputs: gponly} gp11sp = regInfo{inputs: []regMask{gpsp}, outputs: gponly} - gp11sb = regInfo{inputs: []regMask{gpspsb}, outputs: gponly} + gp11sb = regInfo{inputs: []regMask{gpspsbg}, outputs: gponly} gp21 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly} gp21sp = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly} - gp21sb = regInfo{inputs: []regMask{gpspsb, gpsp}, outputs: gponly} + gp21sb = regInfo{inputs: []regMask{gpspsbg, gpsp}, outputs: gponly} gp21shift = regInfo{inputs: []regMask{gp, cx}, outputs: []regMask{gp}} gp11div = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{ax, dx}} gp21hmul = regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx}, clobbers: ax} @@ -126,9 +128,9 @@ func init() { gp2flags = regInfo{inputs: []regMask{gpsp, gpsp}} gp1flags = regInfo{inputs: []regMask{gpsp}} - gp0flagsLoad = regInfo{inputs: []regMask{gpspsb, 0}} - gp1flagsLoad = regInfo{inputs: []regMask{gpspsb, gpsp, 0}} - gp2flagsLoad = regInfo{inputs: []regMask{gpspsb, gpsp, gpsp, 0}} + gp0flagsLoad = regInfo{inputs: []regMask{gpspsbg, 0}} + gp1flagsLoad = regInfo{inputs: []regMask{gpspsbg, gpsp, 0}} + gp2flagsLoad = regInfo{inputs: []regMask{gpspsbg, gpsp, gpsp, 0}} flagsgp = regInfo{inputs: nil, outputs: gponly} gp11flags = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp, 0}} @@ -137,24 +139,24 @@ func init() { readflags = regInfo{inputs: nil, outputs: gponly} flagsgpax = regInfo{inputs: nil, clobbers: ax, outputs: []regMask{gp &^ ax}} - gpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: gponly} - gp21load = regInfo{inputs: []regMask{gp, gpspsb, 0}, outputs: gponly} - gploadidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}, outputs: gponly} - gp21loadidx = regInfo{inputs: []regMask{gp, gpspsb, gpsp, 0}, outputs: gponly} + gpload = regInfo{inputs: []regMask{gpspsbg, 0}, outputs: gponly} + gp21load = regInfo{inputs: []regMask{gp, gpspsbg, 0}, outputs: gponly} + gploadidx = regInfo{inputs: []regMask{gpspsbg, gpsp, 0}, outputs: gponly} + gp21loadidx = regInfo{inputs: []regMask{gp, gpspsbg, gpsp, 0}, outputs: gponly} gp21pax = regInfo{inputs: []regMask{gp &^ ax, gp}, outputs: []regMask{gp &^ ax}, clobbers: ax} - gpstore = regInfo{inputs: []regMask{gpspsb, gpsp, 0}} - gpstoreconst = regInfo{inputs: []regMask{gpspsb, 0}} - gpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, gpsp, 0}} - gpstoreconstidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}} - gpstorexchg = regInfo{inputs: []regMask{gp, gpspsb, 0}, outputs: []regMask{gp}} + gpstore = regInfo{inputs: []regMask{gpspsbg, gpsp, 0}} + gpstoreconst = regInfo{inputs: []regMask{gpspsbg, 0}} + gpstoreidx = regInfo{inputs: []regMask{gpspsbg, gpsp, gpsp, 0}} + gpstoreconstidx = regInfo{inputs: []regMask{gpspsbg, gpsp, 0}} + gpstorexchg = regInfo{inputs: []regMask{gp, gpspsbg, 0}, outputs: []regMask{gp}} cmpxchg = regInfo{inputs: []regMask{gp, ax, gp, 0}, outputs: []regMask{gp, 0}, clobbers: ax} fp01 = regInfo{inputs: nil, outputs: fponly} fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: fponly} fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: fponly} - fp21load = regInfo{inputs: []regMask{fp, gpspsb, 0}, outputs: fponly} - fp21loadidx = regInfo{inputs: []regMask{fp, gpspsb, gpspsb, 0}, outputs: fponly} + fp21load = regInfo{inputs: []regMask{fp, gpspsbg, 0}, outputs: fponly} + fp21loadidx = regInfo{inputs: []regMask{fp, gpspsbg, gpspsb, 0}, outputs: fponly} fpgp = regInfo{inputs: fponly, outputs: gponly} gpfp = regInfo{inputs: gponly, outputs: fponly} fp11 = regInfo{inputs: fponly, outputs: fponly} @@ -830,7 +832,7 @@ func init() { {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpsp}}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true}, // LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier // It saves all GP registers if necessary, but may clobber others. - {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("DI"), buildReg("AX CX DX BX BP SI R8 R9")}, clobbers: callerSave &^ gp}, clobberFlags: true, aux: "Sym", symEffect: "None"}, + {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("DI"), buildReg("AX CX DX BX BP SI R8 R9")}, clobbers: callerSave &^ (gp | g)}, clobberFlags: true, aux: "Sym", symEffect: "None"}, {name: "LoweredHasCPUFeature", argLength: 0, reg: gp01, rematerializeable: true, typ: "UInt64", aux: "Sym", symEffect: "None"}, diff --git a/src/cmd/compile/internal/ssa/gen/rulegen.go b/src/cmd/compile/internal/ssa/gen/rulegen.go index aaf9101368fcf..6388aab3621b9 100644 --- a/src/cmd/compile/internal/ssa/gen/rulegen.go +++ b/src/cmd/compile/internal/ssa/gen/rulegen.go @@ -582,6 +582,7 @@ func fprint(w io.Writer, n Node) { "math", "cmd/internal/obj", "cmd/internal/objabi", + "cmd/compile/internal/base", "cmd/compile/internal/types", }, n.Arch.imports...) { fmt.Fprintf(w, "import %q\n", path) diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 9ad4c2f305f9e..ccfed93475ddb 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -6287,7 +6287,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVSS, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6303,7 +6303,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6343,8 +6343,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6360,8 +6360,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6377,8 +6377,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6394,8 +6394,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6412,7 +6412,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -6426,7 +6426,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -6439,9 +6439,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -6454,9 +6454,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -6469,9 +6469,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -6484,9 +6484,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -6501,7 +6501,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6519,7 +6519,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6537,7 +6537,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6555,7 +6555,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6573,7 +6573,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6591,7 +6591,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6609,7 +6609,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6627,7 +6627,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6645,8 +6645,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB - {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6664,8 +6664,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB - {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6683,8 +6683,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB - {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6702,8 +6702,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB - {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6721,8 +6721,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB - {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6740,8 +6740,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB - {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6759,8 +6759,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB - {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6778,8 +6778,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB - {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6797,8 +6797,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB - {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6816,8 +6816,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB - {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6835,8 +6835,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB - {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6854,8 +6854,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB - {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6873,8 +6873,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB - {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6892,8 +6892,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB - {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6911,8 +6911,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB - {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6930,8 +6930,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB - {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6946,11 +6946,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -6962,11 +6962,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDL, reg: regInfo{ inputs: []inputInfo{ - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -6978,10 +6978,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -6993,10 +6993,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7010,7 +7010,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -7024,7 +7024,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -7036,11 +7036,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7052,11 +7052,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7069,10 +7069,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7085,10 +7085,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7101,11 +7101,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AIMULQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7118,11 +7118,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AIMULL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7134,10 +7134,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AIMUL3Q, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7149,10 +7149,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AIMUL3L, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7165,7 +7165,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 1}, // AX - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, clobbers: 4, // DX outputs: []outputInfo{ @@ -7183,7 +7183,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 1}, // AX - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, clobbers: 4, // DX outputs: []outputInfo{ @@ -7200,7 +7200,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 1}, // AX - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, clobbers: 1, // AX outputs: []outputInfo{ @@ -7216,7 +7216,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 1}, // AX - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, clobbers: 1, // AX outputs: []outputInfo{ @@ -7232,7 +7232,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 1}, // AX - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, clobbers: 1, // AX outputs: []outputInfo{ @@ -7248,7 +7248,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 1}, // AX - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, clobbers: 1, // AX outputs: []outputInfo{ @@ -7264,11 +7264,11 @@ var opcodeTable = [...]opInfo{ clobberFlags: true, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7281,7 +7281,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 1}, // AX - {1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 49147}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ {0, 1}, // AX @@ -7298,7 +7298,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 1}, // AX - {1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 49147}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ {0, 1}, // AX @@ -7315,7 +7315,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 1}, // AX - {1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 49147}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ {0, 1}, // AX @@ -7331,7 +7331,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 1}, // AX - {1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 49147}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ {0, 1}, // AX @@ -7347,7 +7347,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 1}, // AX - {1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 49147}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ {0, 1}, // AX @@ -7363,7 +7363,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 1}, // AX - {1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 49147}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ {0, 1}, // AX @@ -7378,11 +7378,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ANEGL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ {1, 0}, - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7394,12 +7394,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ {1, 0}, - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7411,12 +7411,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AADCQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ {1, 0}, - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7428,11 +7428,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ {1, 0}, - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7444,11 +7444,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AADCQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ {1, 0}, - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7459,12 +7459,12 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ {1, 0}, - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7475,12 +7475,12 @@ var opcodeTable = [...]opInfo{ asm: x86.ASBBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ {1, 0}, - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7492,11 +7492,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ {1, 0}, - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7508,11 +7508,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ASBBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ {1, 0}, - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7525,7 +7525,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 1}, // AX - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ {0, 4}, // DX @@ -7542,7 +7542,7 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {0, 4}, // DX {1, 1}, // AX - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ {0, 1}, // AX @@ -7559,11 +7559,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7576,11 +7576,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7593,10 +7593,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7609,10 +7609,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7626,7 +7626,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -7640,7 +7640,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -7653,11 +7653,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7670,11 +7670,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AORL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7687,10 +7687,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7703,10 +7703,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AORL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7720,7 +7720,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -7734,7 +7734,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AORL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -7747,11 +7747,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7764,11 +7764,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7781,10 +7781,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7797,10 +7797,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7814,7 +7814,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -7828,7 +7828,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -7838,8 +7838,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7849,8 +7849,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7860,8 +7860,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7871,8 +7871,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPB, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7883,7 +7883,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7894,7 +7894,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7905,7 +7905,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7916,7 +7916,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPB, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -7929,8 +7929,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPQ, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -7943,8 +7943,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPL, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -7957,8 +7957,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPW, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -7971,8 +7971,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPB, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -7985,7 +7985,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -7998,7 +7998,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8011,7 +8011,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8024,7 +8024,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPB, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8037,9 +8037,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8053,9 +8053,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8068,9 +8068,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8084,9 +8084,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8099,9 +8099,9 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8115,9 +8115,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8131,9 +8131,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8146,8 +8146,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8161,8 +8161,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8175,8 +8175,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8190,8 +8190,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8204,8 +8204,8 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8219,8 +8219,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8234,8 +8234,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8267,8 +8267,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8278,8 +8278,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8291,11 +8291,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTCL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8307,11 +8307,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTCQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8323,11 +8323,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTRL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8339,11 +8339,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTRQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8355,11 +8355,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTSL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8371,11 +8371,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8386,7 +8386,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8397,7 +8397,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8410,10 +8410,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTCL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8426,10 +8426,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTCQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8442,10 +8442,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTRL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8458,10 +8458,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTRQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8474,10 +8474,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTSL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8490,10 +8490,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8507,8 +8507,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTCQ, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8522,8 +8522,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTCL, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8537,8 +8537,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTSQ, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8552,8 +8552,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTSL, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8567,8 +8567,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTRQ, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8582,8 +8582,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTRL, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8597,7 +8597,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTCQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8611,7 +8611,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTCL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8625,7 +8625,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8639,7 +8639,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTSL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8653,7 +8653,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTRQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8667,7 +8667,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTRL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8678,8 +8678,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ATESTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8690,8 +8690,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ATESTL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8702,8 +8702,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ATESTW, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8714,8 +8714,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ATESTB, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8726,7 +8726,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ATESTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8737,7 +8737,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ATESTL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8748,7 +8748,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ATESTW, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8759,7 +8759,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ATESTB, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8772,10 +8772,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2}, // CX - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8788,10 +8788,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2}, // CX - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8804,10 +8804,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ASHLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8820,10 +8820,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ASHLL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8836,10 +8836,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2}, // CX - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8852,10 +8852,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2}, // CX - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8868,10 +8868,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2}, // CX - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8884,10 +8884,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2}, // CX - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8900,10 +8900,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ASHRQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8916,10 +8916,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ASHRL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8932,10 +8932,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ASHRW, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8948,10 +8948,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ASHRB, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8964,10 +8964,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2}, // CX - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8980,10 +8980,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2}, // CX - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -8996,10 +8996,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2}, // CX - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9012,10 +9012,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2}, // CX - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9028,10 +9028,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ASARQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9044,10 +9044,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ASARL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9060,10 +9060,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ASARW, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9076,10 +9076,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ASARB, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9092,10 +9092,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2}, // CX - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9108,10 +9108,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2}, // CX - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9124,10 +9124,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2}, // CX - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9140,10 +9140,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2}, // CX - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9156,10 +9156,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2}, // CX - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9172,10 +9172,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2}, // CX - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9188,10 +9188,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2}, // CX - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9204,10 +9204,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2}, // CX - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9220,10 +9220,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9236,10 +9236,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AROLL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9252,10 +9252,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AROLW, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9268,10 +9268,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AROLB, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9286,11 +9286,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9305,11 +9305,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9324,11 +9324,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9343,11 +9343,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9362,11 +9362,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9381,11 +9381,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9400,11 +9400,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9419,11 +9419,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AORL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9438,11 +9438,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9457,11 +9457,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9476,12 +9476,12 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9496,12 +9496,12 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9516,12 +9516,12 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9536,12 +9536,12 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9556,12 +9556,12 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9576,12 +9576,12 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9596,12 +9596,12 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9616,12 +9616,12 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9636,12 +9636,12 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9656,12 +9656,12 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9676,12 +9676,12 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9696,12 +9696,12 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9716,12 +9716,12 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9736,12 +9736,12 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9756,12 +9756,12 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9776,12 +9776,12 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9796,12 +9796,12 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9816,12 +9816,12 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9836,12 +9836,12 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9856,12 +9856,12 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9876,12 +9876,12 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9896,12 +9896,12 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9916,12 +9916,12 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9936,12 +9936,12 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9956,12 +9956,12 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -9975,8 +9975,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -9990,8 +9990,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBQ, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10005,8 +10005,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10020,8 +10020,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10035,8 +10035,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10050,8 +10050,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDL, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10065,8 +10065,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBL, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10080,8 +10080,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10095,8 +10095,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORL, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10110,8 +10110,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORL, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10125,9 +10125,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10141,9 +10141,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10157,9 +10157,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10173,9 +10173,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10189,9 +10189,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10205,9 +10205,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10221,9 +10221,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10237,9 +10237,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10253,9 +10253,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10269,9 +10269,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10285,9 +10285,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10301,9 +10301,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10317,9 +10317,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10333,9 +10333,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10349,9 +10349,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10365,9 +10365,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10381,9 +10381,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10397,9 +10397,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10413,9 +10413,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10429,9 +10429,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10445,9 +10445,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10461,9 +10461,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10477,9 +10477,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10493,9 +10493,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10509,9 +10509,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10525,8 +10525,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10540,8 +10540,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10555,8 +10555,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10570,8 +10570,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10585,8 +10585,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10600,8 +10600,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10615,8 +10615,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10630,8 +10630,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10645,8 +10645,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10660,8 +10660,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10675,8 +10675,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10690,8 +10690,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10705,8 +10705,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10720,8 +10720,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10735,8 +10735,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10750,8 +10750,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10765,8 +10765,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10780,8 +10780,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10795,8 +10795,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10810,8 +10810,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10823,10 +10823,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ANEGQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -10838,10 +10838,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ANEGL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -10853,10 +10853,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ANOTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -10868,10 +10868,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ANOTL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -10881,11 +10881,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ABSFQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ {1, 0}, - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -10896,10 +10896,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ABSFL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -10909,11 +10909,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ABSRQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ {1, 0}, - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -10924,10 +10924,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ABSRL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -10938,11 +10938,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVQEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -10953,11 +10953,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVQNE, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -10968,11 +10968,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVQLT, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -10983,11 +10983,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVQGT, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -10998,11 +10998,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVQLE, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11013,11 +11013,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVQGE, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11028,11 +11028,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVQLS, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11043,11 +11043,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVQHI, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11058,11 +11058,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVQCC, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11073,11 +11073,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVQCS, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11088,11 +11088,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVLEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11103,11 +11103,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVLNE, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11118,11 +11118,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVLLT, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11133,11 +11133,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVLGT, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11148,11 +11148,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVLLE, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11163,11 +11163,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVLGE, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11178,11 +11178,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVLLS, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11193,11 +11193,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVLHI, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11208,11 +11208,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVLCC, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11223,11 +11223,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVLCS, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11238,11 +11238,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVWEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11253,11 +11253,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVWNE, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11268,11 +11268,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVWLT, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11283,11 +11283,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVWGT, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11298,11 +11298,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVWLE, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11313,11 +11313,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVWGE, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11328,11 +11328,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVWLS, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11343,11 +11343,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVWHI, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11358,11 +11358,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVWCC, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11373,11 +11373,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVWCS, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11388,12 +11388,12 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVQNE, reg: regInfo{ inputs: []inputInfo{ - {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49134}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, clobbers: 1, // AX outputs: []outputInfo{ - {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49134}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11404,11 +11404,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVQNE, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11419,11 +11419,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVQHI, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11434,11 +11434,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVQCC, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11449,12 +11449,12 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVLNE, reg: regInfo{ inputs: []inputInfo{ - {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49134}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, clobbers: 1, // AX outputs: []outputInfo{ - {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49134}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11465,11 +11465,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVLNE, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11480,11 +11480,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVLHI, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11495,11 +11495,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVLCC, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11510,12 +11510,12 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVWNE, reg: regInfo{ inputs: []inputInfo{ - {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49134}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, clobbers: 1, // AX outputs: []outputInfo{ - {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49134}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11526,11 +11526,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVWNE, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11541,11 +11541,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVWHI, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11556,11 +11556,11 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMOVWCC, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11572,10 +11572,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ABSWAPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11587,10 +11587,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ABSWAPL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11601,10 +11601,10 @@ var opcodeTable = [...]opInfo{ asm: x86.APOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11615,10 +11615,10 @@ var opcodeTable = [...]opInfo{ asm: x86.APOPCNTL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11671,7 +11671,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASBBQ, reg: regInfo{ outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11681,7 +11681,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASBBL, reg: regInfo{ outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11691,7 +11691,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETEQ, reg: regInfo{ outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11701,7 +11701,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETNE, reg: regInfo{ outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11711,7 +11711,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETLT, reg: regInfo{ outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11721,7 +11721,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETLE, reg: regInfo{ outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11731,7 +11731,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETGT, reg: regInfo{ outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11741,7 +11741,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETGE, reg: regInfo{ outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11751,7 +11751,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETCS, reg: regInfo{ outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11761,7 +11761,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETLS, reg: regInfo{ outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11771,7 +11771,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETHI, reg: regInfo{ outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11781,7 +11781,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETCC, reg: regInfo{ outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11791,7 +11791,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETOS, reg: regInfo{ outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11804,7 +11804,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11817,7 +11817,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETNE, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11830,7 +11830,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETLT, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11843,7 +11843,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETLE, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11856,7 +11856,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETGT, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11869,7 +11869,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETGE, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11882,7 +11882,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETCS, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11895,7 +11895,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETLS, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11908,7 +11908,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETHI, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11921,7 +11921,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETCC, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11933,7 +11933,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ clobbers: 1, // AX outputs: []outputInfo{ - {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49134}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11945,7 +11945,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ clobbers: 1, // AX outputs: []outputInfo{ - {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49134}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11955,7 +11955,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETPC, reg: regInfo{ outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11965,7 +11965,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETPS, reg: regInfo{ outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11975,7 +11975,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETHI, reg: regInfo{ outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11985,7 +11985,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETCC, reg: regInfo{ outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -11995,10 +11995,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBQSX, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12008,10 +12008,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBLZX, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12021,10 +12021,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVWQSX, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12034,10 +12034,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVWLZX, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12047,10 +12047,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVLQSX, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12060,10 +12060,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12075,7 +12075,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVL, reg: regInfo{ outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12087,7 +12087,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVQ, reg: regInfo{ outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12100,7 +12100,7 @@ var opcodeTable = [...]opInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12113,7 +12113,7 @@ var opcodeTable = [...]opInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12126,7 +12126,7 @@ var opcodeTable = [...]opInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12139,7 +12139,7 @@ var opcodeTable = [...]opInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12149,7 +12149,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACVTSL2SS, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -12162,7 +12162,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACVTSL2SD, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -12175,7 +12175,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACVTSQ2SS, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -12188,7 +12188,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACVTSQ2SD, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -12226,7 +12226,7 @@ var opcodeTable = [...]opInfo{ argLen: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -12241,7 +12241,7 @@ var opcodeTable = [...]opInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12250,7 +12250,7 @@ var opcodeTable = [...]opInfo{ argLen: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -12265,7 +12265,7 @@ var opcodeTable = [...]opInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12294,10 +12294,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ALEAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12310,10 +12310,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ALEAL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12326,10 +12326,10 @@ var opcodeTable = [...]opInfo{ asm: x86.ALEAW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12343,11 +12343,11 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12361,11 +12361,11 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12379,11 +12379,11 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12396,11 +12396,11 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12413,11 +12413,11 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12430,11 +12430,11 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12447,11 +12447,11 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12464,11 +12464,11 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12481,11 +12481,11 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12498,11 +12498,11 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12515,11 +12515,11 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12532,11 +12532,11 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12549,10 +12549,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBLZX, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12565,10 +12565,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBQSX, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12581,10 +12581,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVWLZX, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12597,10 +12597,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVWQSX, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12613,10 +12613,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12629,10 +12629,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVLQSX, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12645,10 +12645,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12661,8 +12661,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12675,8 +12675,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12689,8 +12689,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVL, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12703,8 +12703,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVQ, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12717,7 +12717,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVUPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -12734,7 +12734,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -12747,7 +12747,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVUPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -12761,11 +12761,11 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12779,11 +12779,11 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12796,11 +12796,11 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12814,11 +12814,11 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12831,11 +12831,11 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12848,11 +12848,11 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12866,11 +12866,11 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12883,11 +12883,11 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -12901,9 +12901,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12917,9 +12917,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12932,9 +12932,9 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12948,9 +12948,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12963,9 +12963,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12978,9 +12978,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12994,9 +12994,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13009,9 +13009,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13024,7 +13024,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13037,7 +13037,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13050,7 +13050,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13063,7 +13063,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13077,8 +13077,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13092,8 +13092,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13106,8 +13106,8 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13121,8 +13121,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13135,8 +13135,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13150,8 +13150,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13164,8 +13164,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13202,7 +13202,7 @@ var opcodeTable = [...]opInfo{ clobberFlags: true, call: true, reg: regInfo{ - clobbers: 2147483631, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + clobbers: 2147483631, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 g R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, { @@ -13214,9 +13214,9 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 4}, // DX - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, - clobbers: 2147483631, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + clobbers: 2147483631, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 g R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, { @@ -13227,9 +13227,9 @@ var opcodeTable = [...]opInfo{ call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, - clobbers: 2147483631, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + clobbers: 2147483631, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 g R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, { @@ -13272,7 +13272,7 @@ var opcodeTable = [...]opInfo{ argLen: 1, reg: regInfo{ outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -13292,7 +13292,7 @@ var opcodeTable = [...]opInfo{ rematerializeable: true, reg: regInfo{ outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -13302,7 +13302,7 @@ var opcodeTable = [...]opInfo{ rematerializeable: true, reg: regInfo{ outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -13314,7 +13314,7 @@ var opcodeTable = [...]opInfo{ faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -13340,7 +13340,7 @@ var opcodeTable = [...]opInfo{ symEffect: SymNone, reg: regInfo{ outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -13414,10 +13414,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -13430,10 +13430,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -13446,10 +13446,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -13464,11 +13464,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AXCHGB, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -13483,11 +13483,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AXCHGL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -13502,11 +13502,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AXCHGQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -13522,11 +13522,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AXADDL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -13542,11 +13542,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AXADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -13572,13 +13572,13 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 1}, // AX - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, clobbers: 1, // AX outputs: []outputInfo{ {1, 0}, - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -13594,13 +13594,13 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 1}, // AX - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {2, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, clobbers: 1, // AX outputs: []outputInfo{ {1, 0}, - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, @@ -13615,8 +13615,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDB, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13631,8 +13631,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13647,8 +13647,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORB, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13663,8 +13663,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORL, reg: regInfo{ inputs: []inputInfo{ - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -36173,8 +36173,8 @@ var registersAMD64 = [...]Register{ {11, x86.REG_R11, 10, "R11"}, {12, x86.REG_R12, 11, "R12"}, {13, x86.REG_R13, 12, "R13"}, - {14, x86.REG_R14, 13, "R14"}, - {15, x86.REG_R15, 14, "R15"}, + {14, x86.REGG, -1, "g"}, + {15, x86.REG_R15, 13, "R15"}, {16, x86.REG_X0, -1, "X0"}, {17, x86.REG_X1, -1, "X1"}, {18, x86.REG_X2, -1, "X2"}, @@ -36193,7 +36193,7 @@ var registersAMD64 = [...]Register{ {31, x86.REG_X15, -1, "X15"}, {32, 0, -1, "SB"}, } -var gpRegMaskAMD64 = regMask(65519) +var gpRegMaskAMD64 = regMask(49135) var fpRegMaskAMD64 = regMask(2147418112) var specialRegMaskAMD64 = regMask(2147483648) var framepointerRegAMD64 = int8(5) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 6087874fa9270..03498c719c095 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -4,6 +4,7 @@ package ssa import "math" +import "cmd/compile/internal/base" import "cmd/compile/internal/types" func rewriteValueAMD64(v *Value) bool { @@ -767,8 +768,7 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64LoweredGetClosurePtr return true case OpGetG: - v.Op = OpAMD64LoweredGetG - return true + return rewriteValueAMD64_OpGetG(v) case OpHasCPUFeature: return rewriteValueAMD64_OpHasCPUFeature(v) case OpHmul32: @@ -30126,6 +30126,22 @@ func rewriteValueAMD64_OpFloor(v *Value) bool { return true } } +func rewriteValueAMD64_OpGetG(v *Value) bool { + v_0 := v.Args[0] + // match: (GetG mem) + // cond: !base.Flag.ABIWrap + // result: (LoweredGetG mem) + for { + mem := v_0 + if !(!base.Flag.ABIWrap) { + break + } + v.reset(OpAMD64LoweredGetG) + v.AddArg(mem) + return true + } + return false +} func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool { b := v.Block typ := &b.Func.Config.Types diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s index a9456dc9ff43d..9f15990b137cd 100644 --- a/src/runtime/asm_amd64.s +++ b/src/runtime/asm_amd64.s @@ -262,6 +262,7 @@ TEXT runtime·gogo(SB), NOSPLIT, $16-8 MOVQ 0(DX), CX // make sure g != nil get_tls(CX) MOVQ DX, g(CX) + MOVQ DX, R14 // set the g register MOVQ gobuf_sp(BX), SP // restore SP MOVQ gobuf_ret(BX), AX MOVQ gobuf_ctxt(BX), DX @@ -298,6 +299,7 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-8 MOVQ $runtime·badmcall(SB), AX JMP AX MOVQ SI, g(CX) // g = m->g0 + MOVQ SI, R14 // set the g register MOVQ (g_sched+gobuf_sp)(SI), SP // sp = m->g0->sched.sp PUSHQ AX MOVQ DI, DX @@ -344,6 +346,7 @@ TEXT runtime·systemstack(SB), NOSPLIT, $0-8 // switch to g0 MOVQ DX, g(CX) + MOVQ DX, R14 // set the g register MOVQ (g_sched+gobuf_sp)(DX), BX // make it look like mstart called systemstack on g0, to stop traceback SUBQ $8, BX @@ -824,6 +827,7 @@ settls: TEXT setg_gcc<>(SB),NOSPLIT,$0 get_tls(AX) MOVQ DI, g(AX) + MOVQ DI, R14 // set the g register RET TEXT runtime·abort(SB),NOSPLIT,$0-0 @@ -1368,24 +1372,24 @@ TEXT runtime·addmoduledata(SB),NOSPLIT,$0-0 // It clobbers FLAGS. It does not clobber any general-purpose registers, // but may clobber others (e.g., SSE registers). // Defined as ABIInternal since it does not use the stack-based Go ABI. -TEXT runtime·gcWriteBarrier(SB),NOSPLIT,$120 +TEXT runtime·gcWriteBarrier(SB),NOSPLIT,$112 // Save the registers clobbered by the fast path. This is slightly // faster than having the caller spill these. - MOVQ R14, 104(SP) - MOVQ R13, 112(SP) + MOVQ R12, 96(SP) + MOVQ R13, 104(SP) // TODO: Consider passing g.m.p in as an argument so they can be shared // across a sequence of write barriers. get_tls(R13) MOVQ g(R13), R13 MOVQ g_m(R13), R13 MOVQ m_p(R13), R13 - MOVQ (p_wbBuf+wbBuf_next)(R13), R14 + MOVQ (p_wbBuf+wbBuf_next)(R13), R12 // Increment wbBuf.next position. - LEAQ 16(R14), R14 - MOVQ R14, (p_wbBuf+wbBuf_next)(R13) - CMPQ R14, (p_wbBuf+wbBuf_end)(R13) + LEAQ 16(R12), R12 + MOVQ R12, (p_wbBuf+wbBuf_next)(R13) + CMPQ R12, (p_wbBuf+wbBuf_end)(R13) // Record the write. - MOVQ AX, -16(R14) // Record value + MOVQ AX, -16(R12) // Record value // Note: This turns bad pointer writes into bad // pointer reads, which could be confusing. We could avoid // reading from obviously bad pointers, which would @@ -1393,12 +1397,12 @@ TEXT runtime·gcWriteBarrier(SB),NOSPLIT,$120 // patch this up in the signal handler, or use XCHG to // combine the read and the write. MOVQ (DI), R13 - MOVQ R13, -8(R14) // Record *slot + MOVQ R13, -8(R12) // Record *slot // Is the buffer full? (flags set in CMPQ above) JEQ flush ret: - MOVQ 104(SP), R14 - MOVQ 112(SP), R13 + MOVQ 96(SP), R12 + MOVQ 104(SP), R13 // Do the write. MOVQ AX, (DI) RET @@ -1428,10 +1432,10 @@ flush: MOVQ R9, 64(SP) MOVQ R10, 72(SP) MOVQ R11, 80(SP) - MOVQ R12, 88(SP) + // R12 already saved // R13 already saved - // R14 already saved - MOVQ R15, 96(SP) + // R14 is g + MOVQ R15, 88(SP) // This takes arguments DI and AX CALL runtime·wbBufFlush(SB) @@ -1447,8 +1451,7 @@ flush: MOVQ 64(SP), R9 MOVQ 72(SP), R10 MOVQ 80(SP), R11 - MOVQ 88(SP), R12 - MOVQ 96(SP), R15 + MOVQ 88(SP), R15 JMP ret // gcWriteBarrierCX is gcWriteBarrier, but with args in DI and CX. diff --git a/src/runtime/race_amd64.s b/src/runtime/race_amd64.s index fd41b5690a64c..7f97025c1add8 100644 --- a/src/runtime/race_amd64.s +++ b/src/runtime/race_amd64.s @@ -452,12 +452,13 @@ rest: PUSHQ R15 // Set g = g0. get_tls(R12) - MOVQ g(R12), R13 - MOVQ g_m(R13), R14 - MOVQ m_g0(R14), R15 + MOVQ g(R12), R14 + MOVQ g_m(R14), R13 + MOVQ m_g0(R13), R15 CMPQ R13, R15 JEQ noswitch // branch if already on g0 MOVQ R15, g(R12) // g = m->g0 + MOVQ R15, R14 // set g register PUSHQ RARG1 // func arg PUSHQ RARG0 // func arg CALL runtime·racecallback(SB) diff --git a/src/runtime/sys_linux_amd64.s b/src/runtime/sys_linux_amd64.s index 37cb8dad0369e..b0a201fc6f70c 100644 --- a/src/runtime/sys_linux_amd64.s +++ b/src/runtime/sys_linux_amd64.s @@ -632,6 +632,7 @@ nog1: get_tls(CX) MOVQ R13, g_m(R9) MOVQ R9, g(CX) + MOVQ R9, R14 // set g register CALL runtime·stackcheck(SB) nog2: From 22f9e1ccbc9db9a1d9ecbadca972264e5ad2f169 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Thu, 4 Feb 2021 11:41:34 -0500 Subject: [PATCH 451/474] [dev.regabi] runtime: initialize special registers before sigpanic In case that we are panicking in ABI0 context or external code, special registers are not initialized. Initialized them in injected code before calling sigpanic. TODO: Windows, Plan 9. Change-Id: I0919b80e7cc55463f3dd94f1f63cba305717270a Reviewed-on: https://go-review.googlesource.com/c/go/+/289710 Trust: Cherry Zhang Run-TryBot: Cherry Zhang TryBot-Result: Go Bot Reviewed-by: Jeremy Faller Reviewed-by: Michael Knyszek --- src/runtime/asm.s | 5 +++++ src/runtime/asm_amd64.s | 12 ++++++++++++ src/runtime/signal_amd64.go | 7 +++++-- src/runtime/stubs.go | 4 ++++ 4 files changed, 26 insertions(+), 2 deletions(-) diff --git a/src/runtime/asm.s b/src/runtime/asm.s index 27d8df9e063ec..72c744925d89a 100644 --- a/src/runtime/asm.s +++ b/src/runtime/asm.s @@ -11,3 +11,8 @@ DATA runtime·no_pointers_stackmap+0x00(SB)/4, $2 DATA runtime·no_pointers_stackmap+0x04(SB)/4, $0 GLOBL runtime·no_pointers_stackmap(SB),RODATA, $8 + +#ifndef GOARCH_amd64 +TEXT ·sigpanic0(SB),NOSPLIT,$0-0 + JMP ·sigpanic(SB) +#endif diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s index 9f15990b137cd..83c08a52f742c 100644 --- a/src/runtime/asm_amd64.s +++ b/src/runtime/asm_amd64.s @@ -1364,6 +1364,18 @@ TEXT runtime·addmoduledata(SB),NOSPLIT,$0-0 POPQ R15 RET +// Initialize special registers then jump to sigpanic. +// This function is injected from the signal handler for panicking +// signals. It is quite painful to set X15 in the signal context, +// so we do it here. +TEXT ·sigpanic0(SB),NOSPLIT,$0-0 +#ifdef GOEXPERIMENT_REGABI + get_tls(R14) + MOVQ g(R14), R14 + XORPS X15, X15 +#endif + JMP ·sigpanic(SB) + // gcWriteBarrier performs a heap pointer write and informs the GC. // // gcWriteBarrier does NOT follow the Go ABI. It takes two arguments: diff --git a/src/runtime/signal_amd64.go b/src/runtime/signal_amd64.go index 6ab1f758c2293..3eeb5e044ff0f 100644 --- a/src/runtime/signal_amd64.go +++ b/src/runtime/signal_amd64.go @@ -65,11 +65,14 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) { pc := uintptr(c.rip()) sp := uintptr(c.rsp()) + // In case we are panicking from external code, we need to initialize + // Go special registers. We inject sigpanic0 (instead of sigpanic), + // which takes care of that. if shouldPushSigpanic(gp, pc, *(*uintptr)(unsafe.Pointer(sp))) { - c.pushCall(funcPC(sigpanic), pc) + c.pushCall(funcPC(sigpanic0), pc) } else { // Not safe to push the call. Just clobber the frame. - c.set_rip(uint64(funcPC(sigpanic))) + c.set_rip(uint64(funcPC(sigpanic0))) } } diff --git a/src/runtime/stubs.go b/src/runtime/stubs.go index 36bbc8991ac0f..3d1e0c0bb4dd7 100644 --- a/src/runtime/stubs.go +++ b/src/runtime/stubs.go @@ -356,3 +356,7 @@ func duffcopy() // Called from linker-generated .initarray; declared for go vet; do NOT call from Go. func addmoduledata() + +// Injected by the signal handler for panicking signals. On many platforms it just +// jumps to sigpanic. +func sigpanic0() From 2e60c00f56cdab9bb02e649e089b2ee5761acf26 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Thu, 4 Feb 2021 11:43:24 -0500 Subject: [PATCH 452/474] [dev.regabi] cmd/internal/obj/x86: use g register in stack bounds check In ABIInternal context, we can directly use the g register for stack bounds check. Change-Id: I8b1073a3343984a6cd76cf5734ddc4a8cd5dc73f Reviewed-on: https://go-review.googlesource.com/c/go/+/289711 Trust: Cherry Zhang Run-TryBot: Cherry Zhang TryBot-Result: Go Bot Reviewed-by: David Chase --- src/cmd/internal/obj/x86/obj6.go | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/src/cmd/internal/obj/x86/obj6.go b/src/cmd/internal/obj/x86/obj6.go index 1674db626fbb1..84de58a4c4b87 100644 --- a/src/cmd/internal/obj/x86/obj6.go +++ b/src/cmd/internal/obj/x86/obj6.go @@ -637,13 +637,19 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { } } + var regg int16 if !p.From.Sym.NoSplit() || (p.From.Sym.Wrapper() && !p.From.Sym.ABIWrapper()) { - p = obj.Appendp(p, newprog) - p = load_g_cx(ctxt, p, newprog) // load g into CX + if ctxt.Arch.Family == sys.AMD64 && objabi.Regabi_enabled != 0 && cursym.ABI() == obj.ABIInternal { + regg = REGG // use the g register directly in ABIInternal + } else { + p = obj.Appendp(p, newprog) + p = load_g_cx(ctxt, p, newprog) // load g into CX + regg = REG_CX + } } if !cursym.Func().Text.From.Sym.NoSplit() { - p = stacksplit(ctxt, cursym, p, newprog, autoffset, int32(textarg)) // emit split check + p = stacksplit(ctxt, cursym, p, newprog, autoffset, int32(textarg), regg) // emit split check } // Delve debugger would like the next instruction to be noted as the end of the function prologue. @@ -695,7 +701,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { // g._panic.argp = bottom-of-frame // } // - // MOVQ g_panic(CX), BX + // MOVQ g_panic(g), BX // TESTQ BX, BX // JNE checkargp // end: @@ -718,7 +724,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p = obj.Appendp(p, newprog) p.As = AMOVQ p.From.Type = obj.TYPE_MEM - p.From.Reg = REG_CX + p.From.Reg = regg p.From.Offset = 4 * int64(ctxt.Arch.PtrSize) // g_panic p.To.Type = obj.TYPE_REG p.To.Reg = REG_BX @@ -969,9 +975,9 @@ func load_g_cx(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) *obj.Prog { // Append code to p to check for stack split. // Appends to (does not overwrite) p. -// Assumes g is in CX. +// Assumes g is in rg. // Returns last new instruction. -func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgAlloc, framesize int32, textarg int32) *obj.Prog { +func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgAlloc, framesize int32, textarg int32, rg int16) *obj.Prog { cmp := ACMPQ lea := ALEAQ mov := AMOVQ @@ -993,7 +999,8 @@ func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgA p.As = cmp p.From.Type = obj.TYPE_REG p.From.Reg = REG_SP - indir_cx(ctxt, &p.To) + p.To.Type = obj.TYPE_MEM + p.To.Reg = rg p.To.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0 if cursym.CFunc() { p.To.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1 @@ -1021,7 +1028,8 @@ func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgA p.As = cmp p.From.Type = obj.TYPE_REG p.From.Reg = REG_AX - indir_cx(ctxt, &p.To) + p.To.Type = obj.TYPE_MEM + p.To.Reg = rg p.To.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0 if cursym.CFunc() { p.To.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1 @@ -1047,7 +1055,8 @@ func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgA p = obj.Appendp(p, newprog) p.As = mov - indir_cx(ctxt, &p.From) + p.From.Type = obj.TYPE_MEM + p.From.Reg = rg p.From.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0 if cursym.CFunc() { p.From.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1 From 7b0dfb177f3ae81641af898bb5479256fb21fd5d Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Thu, 4 Feb 2021 12:40:04 -0500 Subject: [PATCH 453/474] [dev.regabi] runtime: use g register in some assembly functions on AMD64 Now that we have a g register, just use it. Note: functions that can be called from ABI0 context (e.g. morestack) is unchanged. Functions that switch g is also unchanged, because we need to set the new g in both the register and TLS. TODO: other OSes. Change-Id: I692a82a7caa8417ff620a59676a6275f56747b94 Reviewed-on: https://go-review.googlesource.com/c/go/+/289718 Trust: Cherry Zhang Run-TryBot: Cherry Zhang TryBot-Result: Go Bot Reviewed-by: Michael Knyszek --- src/runtime/asm_amd64.s | 22 ++++++++++++++-------- src/runtime/race_amd64.s | 12 ++++++++++++ src/runtime/sys_linux_amd64.s | 16 ++++++++++++++++ 3 files changed, 42 insertions(+), 8 deletions(-) diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s index 83c08a52f742c..93280eee4a6c5 100644 --- a/src/runtime/asm_amd64.s +++ b/src/runtime/asm_amd64.s @@ -585,18 +585,20 @@ TEXT runtime·jmpdefer(SB), NOSPLIT, $0-16 MOVQ 0(DX), BX JMP BX // but first run the deferred function -// Save state of caller into g->sched. Smashes R8, R9. +// Save state of caller into g->sched. Smashes R9. TEXT gosave<>(SB),NOSPLIT,$0 - get_tls(R8) - MOVQ g(R8), R8 +#ifndef GOEXPERIMENT_REGABI + get_tls(R14) + MOVQ g(R14), R14 +#endif MOVQ 0(SP), R9 - MOVQ R9, (g_sched+gobuf_pc)(R8) + MOVQ R9, (g_sched+gobuf_pc)(R14) LEAQ 8(SP), R9 - MOVQ R9, (g_sched+gobuf_sp)(R8) - MOVQ $0, (g_sched+gobuf_ret)(R8) - MOVQ BP, (g_sched+gobuf_bp)(R8) + MOVQ R9, (g_sched+gobuf_sp)(R14) + MOVQ $0, (g_sched+gobuf_ret)(R14) + MOVQ BP, (g_sched+gobuf_bp)(R14) // Assert ctxt is zero. See func save. - MOVQ (g_sched+gobuf_ctxt)(R8), R9 + MOVQ (g_sched+gobuf_ctxt)(R14), R9 TESTQ R9, R9 JZ 2(PC) CALL runtime·badctxt(SB) @@ -1391,9 +1393,13 @@ TEXT runtime·gcWriteBarrier(SB),NOSPLIT,$112 MOVQ R13, 104(SP) // TODO: Consider passing g.m.p in as an argument so they can be shared // across a sequence of write barriers. +#ifdef GOEXPERIMENT_REGABI + MOVQ g_m(R14), R13 +#else get_tls(R13) MOVQ g(R13), R13 MOVQ g_m(R13), R13 +#endif MOVQ m_p(R13), R13 MOVQ (p_wbBuf+wbBuf_next)(R13), R12 // Increment wbBuf.next position. diff --git a/src/runtime/race_amd64.s b/src/runtime/race_amd64.s index 7f97025c1add8..c3b7bbfbfe2e8 100644 --- a/src/runtime/race_amd64.s +++ b/src/runtime/race_amd64.s @@ -146,8 +146,10 @@ TEXT runtime·racewriterangepc1(SB), NOSPLIT, $0-24 // If addr (RARG1) is out of range, do nothing. // Otherwise, setup goroutine context and invoke racecall. Other arguments already set. TEXT racecalladdr<>(SB), NOSPLIT, $0-0 +#ifndef GOEXPERIMENT_REGABI get_tls(R12) MOVQ g(R12), R14 +#endif MOVQ g_racectx(R14), RARG0 // goroutine context // Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend). CMPQ RARG1, runtime·racearenastart(SB) @@ -183,8 +185,10 @@ TEXT runtime·racefuncenter(SB), NOSPLIT, $0-8 // R11 = caller's return address TEXT racefuncenter<>(SB), NOSPLIT, $0-0 MOVQ DX, R15 // save function entry context (for closures) +#ifndef GOEXPERIMENT_REGABI get_tls(R12) MOVQ g(R12), R14 +#endif MOVQ g_racectx(R14), RARG0 // goroutine context MOVQ R11, RARG1 // void __tsan_func_enter(ThreadState *thr, void *pc); @@ -197,8 +201,10 @@ TEXT racefuncenter<>(SB), NOSPLIT, $0-0 // func runtime·racefuncexit() // Called from instrumented code. TEXT runtime·racefuncexit(SB), NOSPLIT, $0-0 +#ifndef GOEXPERIMENT_REGABI get_tls(R12) MOVQ g(R12), R14 +#endif MOVQ g_racectx(R14), RARG0 // goroutine context // void __tsan_func_exit(ThreadState *thr); MOVQ $__tsan_func_exit(SB), AX @@ -357,8 +363,10 @@ racecallatomic_data: JAE racecallatomic_ignore racecallatomic_ok: // Addr is within the good range, call the atomic function. +#ifndef GOEXPERIMENT_REGABI get_tls(R12) MOVQ g(R12), R14 +#endif MOVQ g_racectx(R14), RARG0 // goroutine context MOVQ 8(SP), RARG1 // caller pc MOVQ (SP), RARG2 // pc @@ -370,8 +378,10 @@ racecallatomic_ignore: // An attempt to synchronize on the address would cause crash. MOVQ AX, R15 // remember the original function MOVQ $__tsan_go_ignore_sync_begin(SB), AX +#ifndef GOEXPERIMENT_REGABI get_tls(R12) MOVQ g(R12), R14 +#endif MOVQ g_racectx(R14), RARG0 // goroutine context CALL racecall<>(SB) MOVQ R15, AX // restore the original function @@ -399,8 +409,10 @@ TEXT runtime·racecall(SB), NOSPLIT, $0-0 // Switches SP to g0 stack and calls (AX). Arguments already set. TEXT racecall<>(SB), NOSPLIT, $0-0 +#ifndef GOEXPERIMENT_REGABI get_tls(R12) MOVQ g(R12), R14 +#endif MOVQ g_m(R14), R13 // Switch to g0 stack. MOVQ SP, R12 // callee-saved, preserved across the CALL diff --git a/src/runtime/sys_linux_amd64.s b/src/runtime/sys_linux_amd64.s index b0a201fc6f70c..d48573c2c5922 100644 --- a/src/runtime/sys_linux_amd64.s +++ b/src/runtime/sys_linux_amd64.s @@ -215,9 +215,13 @@ TEXT runtime·walltime1(SB),NOSPLIT,$16-12 MOVQ SP, R12 // Save old SP; R12 unchanged by C code. +#ifdef GOEXPERIMENT_REGABI + MOVQ g_m(R14), BX // BX unchanged by C code. +#else get_tls(CX) MOVQ g(CX), AX MOVQ g_m(AX), BX // BX unchanged by C code. +#endif // Set vdsoPC and vdsoSP for SIGPROF traceback. // Save the old values on stack and restore them on exit, @@ -232,7 +236,11 @@ TEXT runtime·walltime1(SB),NOSPLIT,$16-12 MOVQ CX, m_vdsoPC(BX) MOVQ DX, m_vdsoSP(BX) +#ifdef GOEXPERIMENT_REGABI + CMPQ R14, m_curg(BX) // Only switch if on curg. +#else CMPQ AX, m_curg(BX) // Only switch if on curg. +#endif JNE noswitch MOVQ m_g0(BX), DX @@ -275,9 +283,13 @@ TEXT runtime·nanotime1(SB),NOSPLIT,$16-8 MOVQ SP, R12 // Save old SP; R12 unchanged by C code. +#ifdef GOEXPERIMENT_REGABI + MOVQ g_m(R14), BX // BX unchanged by C code. +#else get_tls(CX) MOVQ g(CX), AX MOVQ g_m(AX), BX // BX unchanged by C code. +#endif // Set vdsoPC and vdsoSP for SIGPROF traceback. // Save the old values on stack and restore them on exit, @@ -292,7 +304,11 @@ TEXT runtime·nanotime1(SB),NOSPLIT,$16-8 MOVQ CX, m_vdsoPC(BX) MOVQ DX, m_vdsoSP(BX) +#ifdef GOEXPERIMENT_REGABI + CMPQ R14, m_curg(BX) // Only switch if on curg. +#else CMPQ AX, m_curg(BX) // Only switch if on curg. +#endif JNE noswitch MOVQ m_g0(BX), DX From 618e3c15bdb5c031ac037e7ad5c1b3791a913226 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Thu, 4 Feb 2021 11:44:21 -0500 Subject: [PATCH 454/474] [dev.regabi] go/types: consistently report nil type as "untyped nil" This is a port of CL 284052 to go/types. The port is not entirely faithful, as untyped conversion has been refactored in go/types. Additionally, a comment was added to reference issue #13061 in the implicitType method. For #13061 Change-Id: Iec17611f6432c988624023d1d74121ff34eb0c83 Reviewed-on: https://go-review.googlesource.com/c/go/+/289715 Run-TryBot: Robert Findley TryBot-Result: Go Bot Trust: Robert Findley Trust: Robert Griesemer Reviewed-by: Robert Griesemer --- src/go/types/api_test.go | 64 +++++++++++++++++++++++++++++++------ src/go/types/conversions.go | 4 +-- src/go/types/expr.go | 2 ++ 3 files changed, 59 insertions(+), 11 deletions(-) diff --git a/src/go/types/api_test.go b/src/go/types/api_test.go index 75cebc9826112..dde451ee3cf76 100644 --- a/src/go/types/api_test.go +++ b/src/go/types/api_test.go @@ -42,7 +42,7 @@ func mustTypecheck(t *testing.T, path, source string, info *Info) string { return pkg.Name() } -func mayTypecheck(t *testing.T, path, source string, info *Info) string { +func mayTypecheck(t *testing.T, path, source string, info *Info) (string, error) { fset := token.NewFileSet() f, err := parser.ParseFile(fset, path, source, 0) if f == nil { // ignore errors unless f is nil @@ -52,8 +52,8 @@ func mayTypecheck(t *testing.T, path, source string, info *Info) string { Error: func(err error) {}, Importer: importer.Default(), } - pkg, _ := conf.Check(f.Name.Name, fset, []*ast.File{f}, info) - return pkg.Name() + pkg, err := conf.Check(f.Name.Name, fset, []*ast.File{f}, info) + return pkg.Name(), err } func TestValuesInfo(t *testing.T) { @@ -175,6 +175,9 @@ func TestValuesInfo(t *testing.T) { } func TestTypesInfo(t *testing.T) { + // Test sources that are not expected to typecheck must start with the broken prefix. + const broken = "package broken_" + var tests = []struct { src string expr string // expression @@ -187,6 +190,39 @@ func TestTypesInfo(t *testing.T) { {`package b3; var x interface{} = 0i`, `0i`, `complex128`}, {`package b4; var x interface{} = "foo"`, `"foo"`, `string`}, + // uses of nil + {`package n0; var _ *int = nil`, `nil`, `untyped nil`}, + {`package n1; var _ func() = nil`, `nil`, `untyped nil`}, + {`package n2; var _ []byte = nil`, `nil`, `untyped nil`}, + {`package n3; var _ map[int]int = nil`, `nil`, `untyped nil`}, + {`package n4; var _ chan int = nil`, `nil`, `untyped nil`}, + {`package n5; var _ interface{} = nil`, `nil`, `untyped nil`}, + {`package n6; import "unsafe"; var _ unsafe.Pointer = nil`, `nil`, `untyped nil`}, + + {`package n10; var (x *int; _ = x == nil)`, `nil`, `untyped nil`}, + {`package n11; var (x func(); _ = x == nil)`, `nil`, `untyped nil`}, + {`package n12; var (x []byte; _ = x == nil)`, `nil`, `untyped nil`}, + {`package n13; var (x map[int]int; _ = x == nil)`, `nil`, `untyped nil`}, + {`package n14; var (x chan int; _ = x == nil)`, `nil`, `untyped nil`}, + {`package n15; var (x interface{}; _ = x == nil)`, `nil`, `untyped nil`}, + {`package n15; import "unsafe"; var (x unsafe.Pointer; _ = x == nil)`, `nil`, `untyped nil`}, + + {`package n20; var _ = (*int)(nil)`, `nil`, `untyped nil`}, + {`package n21; var _ = (func())(nil)`, `nil`, `untyped nil`}, + {`package n22; var _ = ([]byte)(nil)`, `nil`, `untyped nil`}, + {`package n23; var _ = (map[int]int)(nil)`, `nil`, `untyped nil`}, + {`package n24; var _ = (chan int)(nil)`, `nil`, `untyped nil`}, + {`package n25; var _ = (interface{})(nil)`, `nil`, `untyped nil`}, + {`package n26; import "unsafe"; var _ = unsafe.Pointer(nil)`, `nil`, `untyped nil`}, + + {`package n30; func f(*int) { f(nil) }`, `nil`, `untyped nil`}, + {`package n31; func f(func()) { f(nil) }`, `nil`, `untyped nil`}, + {`package n32; func f([]byte) { f(nil) }`, `nil`, `untyped nil`}, + {`package n33; func f(map[int]int) { f(nil) }`, `nil`, `untyped nil`}, + {`package n34; func f(chan int) { f(nil) }`, `nil`, `untyped nil`}, + {`package n35; func f(interface{}) { f(nil) }`, `nil`, `untyped nil`}, + {`package n35; import "unsafe"; func f(unsafe.Pointer) { f(nil) }`, `nil`, `untyped nil`}, + // comma-ok expressions {`package p0; var x interface{}; var _, _ = x.(int)`, `x.(int)`, @@ -268,17 +304,27 @@ func TestTypesInfo(t *testing.T) { }, // tests for broken code that doesn't parse or type-check - {`package x0; func _() { var x struct {f string}; x.f := 0 }`, `x.f`, `string`}, - {`package x1; func _() { var z string; type x struct {f string}; y := &x{q: z}}`, `z`, `string`}, - {`package x2; func _() { var a, b string; type x struct {f string}; z := &x{f: a; f: b;}}`, `b`, `string`}, - {`package x3; var x = panic("");`, `panic`, `func(interface{})`}, + {broken + `x0; func _() { var x struct {f string}; x.f := 0 }`, `x.f`, `string`}, + {broken + `x1; func _() { var z string; type x struct {f string}; y := &x{q: z}}`, `z`, `string`}, + {broken + `x2; func _() { var a, b string; type x struct {f string}; z := &x{f: a; f: b;}}`, `b`, `string`}, + {broken + `x3; var x = panic("");`, `panic`, `func(interface{})`}, {`package x4; func _() { panic("") }`, `panic`, `func(interface{})`}, - {`package x5; func _() { var x map[string][...]int; x = map[string][...]int{"": {1,2,3}} }`, `x`, `map[string][-1]int`}, + {broken + `x5; func _() { var x map[string][...]int; x = map[string][...]int{"": {1,2,3}} }`, `x`, `map[string][-1]int`}, } for _, test := range tests { info := Info{Types: make(map[ast.Expr]TypeAndValue)} - name := mayTypecheck(t, "TypesInfo", test.src, &info) + var name string + if strings.HasPrefix(test.src, broken) { + var err error + name, err = mayTypecheck(t, "TypesInfo", test.src, &info) + if err == nil { + t.Errorf("package %s: expected to fail but passed", name) + continue + } + } else { + name = mustTypecheck(t, "TypesInfo", test.src, &info) + } // look for expression type var typ Type diff --git a/src/go/types/conversions.go b/src/go/types/conversions.go index 1cab1cc70f0e8..c634d27aa9499 100644 --- a/src/go/types/conversions.go +++ b/src/go/types/conversions.go @@ -55,8 +55,8 @@ func (check *Checker) conversion(x *operand, T Type) { // - Keep untyped nil for untyped nil arguments. // - For integer to string conversions, keep the argument type. // (See also the TODO below.) - if IsInterface(T) || constArg && !isConstType(T) { - final = Default(x.typ) + if IsInterface(T) || constArg && !isConstType(T) || x.isNil() { + final = Default(x.typ) // default type of untyped nil is untyped nil } else if isInteger(x.typ) && isString(T) { final = x.typ } diff --git a/src/go/types/expr.go b/src/go/types/expr.go index eb2056125a7b7..f7fb0caeddb0e 100644 --- a/src/go/types/expr.go +++ b/src/go/types/expr.go @@ -579,6 +579,8 @@ func (check *Checker) implicitType(x *operand, target Type) Type { if !hasNil(target) { return nil } + // Preserve the type of nil as UntypedNil: see #13061. + return Typ[UntypedNil] default: return nil } From 11d15c171bd25337c1dde25a0f7ce4892cb894bb Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Thu, 4 Feb 2021 12:03:53 -0500 Subject: [PATCH 455/474] [dev.regabi] go/types: convert untyped arguments to delete This is a port of CL 285059 to go/types. The error assertion is updated to match go/types error for assignment, which has been improved. Change-Id: Icdd2751edea0abef7c84feadcbf9265d71239ade Reviewed-on: https://go-review.googlesource.com/c/go/+/289716 Run-TryBot: Robert Findley TryBot-Result: Go Bot Trust: Robert Findley Reviewed-by: Robert Griesemer --- src/go/types/builtins.go | 4 ++-- src/go/types/testdata/builtins.src | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/go/types/builtins.go b/src/go/types/builtins.go index fd35f7867696d..078ed4488d11b 100644 --- a/src/go/types/builtins.go +++ b/src/go/types/builtins.go @@ -353,8 +353,8 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b return } - if ok, code := x.assignableTo(check, m.key, nil); !ok { - check.invalidArg(x, code, "%s is not assignable to %s", x, m.key) + check.assignment(x, m.key, "argument to delete") + if x.mode == invalid { return } diff --git a/src/go/types/testdata/builtins.src b/src/go/types/testdata/builtins.src index 98830eb08c601..a7613adc35e1e 100644 --- a/src/go/types/testdata/builtins.src +++ b/src/go/types/testdata/builtins.src @@ -283,7 +283,7 @@ func delete1() { delete() // ERROR not enough arguments delete(1) // ERROR not enough arguments delete(1, 2, 3) // ERROR too many arguments - delete(m, 0 /* ERROR not assignable */) + delete(m, 0 /* ERROR cannot use */) delete(m, s) _ = delete /* ERROR used as value */ (m, s) From 813958f13cee9b2e7587f173e7a5e6cc9ff51850 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Thu, 4 Feb 2021 12:10:02 -0500 Subject: [PATCH 456/474] [dev.regabi] go/types: factor out sorting of methods This is a port of CL 285993 to go/types. Change-Id: I7560cf1176fea5de2c54786a086e547c73294a60 Reviewed-on: https://go-review.googlesource.com/c/go/+/289717 Trust: Robert Findley Trust: Robert Griesemer Run-TryBot: Robert Findley TryBot-Result: Go Bot Reviewed-by: Robert Griesemer --- src/go/types/predicates.go | 6 ++---- src/go/types/type.go | 8 +++----- src/go/types/typexpr.go | 21 +++++++++++++++++++-- 3 files changed, 24 insertions(+), 11 deletions(-) diff --git a/src/go/types/predicates.go b/src/go/types/predicates.go index 148edbfb767a4..954a7ca987a8f 100644 --- a/src/go/types/predicates.go +++ b/src/go/types/predicates.go @@ -6,8 +6,6 @@ package types -import "sort" - func isNamed(typ Type) bool { if _, ok := typ.(*Basic); ok { return ok @@ -273,8 +271,8 @@ func (check *Checker) identical0(x, y Type, cmpTags bool, p *ifacePair) bool { p = p.prev } if debug { - assert(sort.IsSorted(byUniqueMethodName(a))) - assert(sort.IsSorted(byUniqueMethodName(b))) + assertSortedMethods(a) + assertSortedMethods(b) } for i, f := range a { g := b[i] diff --git a/src/go/types/type.go b/src/go/types/type.go index 087cda429d689..66e194e967920 100644 --- a/src/go/types/type.go +++ b/src/go/types/type.go @@ -4,8 +4,6 @@ package types -import "sort" - // A Type represents a type of Go. // All types implement the Type interface. type Type interface { @@ -301,8 +299,8 @@ func NewInterfaceType(methods []*Func, embeddeds []Type) *Interface { } // sort for API stability - sort.Sort(byUniqueMethodName(methods)) - sort.Stable(byUniqueTypeName(embeddeds)) + sortMethods(methods) + sortTypes(embeddeds) typ.methods = methods typ.embeddeds = embeddeds @@ -396,7 +394,7 @@ func (t *Interface) Complete() *Interface { } if methods != nil { - sort.Sort(byUniqueMethodName(methods)) + sortMethods(methods) t.allMethods = methods } diff --git a/src/go/types/typexpr.go b/src/go/types/typexpr.go index 2b398010f4d04..311a970051ad4 100644 --- a/src/go/types/typexpr.go +++ b/src/go/types/typexpr.go @@ -518,8 +518,8 @@ func (check *Checker) interfaceType(ityp *Interface, iface *ast.InterfaceType, d } // sort for API stability - sort.Sort(byUniqueMethodName(ityp.methods)) - sort.Stable(byUniqueTypeName(ityp.embeddeds)) + sortMethods(ityp.methods) + sortTypes(ityp.embeddeds) check.later(func() { check.completeInterface(ityp) }) } @@ -613,6 +613,10 @@ func (check *Checker) completeInterface(ityp *Interface) { } } +func sortTypes(list []Type) { + sort.Stable(byUniqueTypeName(list)) +} + // byUniqueTypeName named type lists can be sorted by their unique type names. type byUniqueTypeName []Type @@ -627,6 +631,19 @@ func sortName(t Type) string { return "" } +func sortMethods(list []*Func) { + sort.Sort(byUniqueMethodName(list)) +} + +func assertSortedMethods(list []*Func) { + if !debug { + panic("internal error: assertSortedMethods called outside debug mode") + } + if !sort.IsSorted(byUniqueMethodName(list)) { + panic("internal error: methods not sorted") + } +} + // byUniqueMethodName method lists can be sorted by their unique method names. type byUniqueMethodName []*Func From c48d1503ba5d0f74bbc5cae5036bf225c6823a44 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Thu, 4 Feb 2021 12:24:10 -0500 Subject: [PATCH 457/474] [dev.regabi] go/types: report unused packages in source order This is a port of CL 287072 to go/types. Change-Id: I08f56995f0323c1f238d1b44703a481d393471d5 Reviewed-on: https://go-review.googlesource.com/c/go/+/289720 Run-TryBot: Robert Findley TryBot-Result: Go Bot Trust: Robert Findley Trust: Robert Griesemer Reviewed-by: Robert Griesemer --- src/go/types/check.go | 36 +++++----- src/go/types/resolver.go | 67 +++++++++---------- .../testdata/importdecl0/importdecl0b.src | 2 +- .../testdata/importdecl1/importdecl1b.src | 2 +- src/go/types/typexpr.go | 8 +-- 5 files changed, 54 insertions(+), 61 deletions(-) diff --git a/src/go/types/check.go b/src/go/types/check.go index 280792e838ef2..03798587e74fe 100644 --- a/src/go/types/check.go +++ b/src/go/types/check.go @@ -69,6 +69,12 @@ type importKey struct { path, dir string } +// A dotImportKey describes a dot-imported object in the given scope. +type dotImportKey struct { + scope *Scope + obj Object +} + // A Checker maintains the state of the type checker. // It must be created with NewChecker. type Checker struct { @@ -86,8 +92,9 @@ type Checker struct { // information collected during type-checking of a set of package files // (initialized by Files, valid only for the duration of check.Files; // maps and lists are allocated on demand) - files []*ast.File // package files - unusedDotImports map[*Scope]map[*Package]*ast.ImportSpec // unused dot-imported packages + files []*ast.File // package files + imports []*PkgName // list of imported packages + dotImportMap map[dotImportKey]*PkgName // maps dot-imported objects to the package they were dot-imported through firstErr error // first error encountered methods map[*TypeName][]*Func // maps package scope type names to associated non-blank (non-interface) methods @@ -104,22 +111,6 @@ type Checker struct { indent int // indentation for tracing } -// addUnusedImport adds the position of a dot-imported package -// pkg to the map of dot imports for the given file scope. -func (check *Checker) addUnusedDotImport(scope *Scope, pkg *Package, spec *ast.ImportSpec) { - mm := check.unusedDotImports - if mm == nil { - mm = make(map[*Scope]map[*Package]*ast.ImportSpec) - check.unusedDotImports = mm - } - m := mm[scope] - if m == nil { - m = make(map[*Package]*ast.ImportSpec) - mm[scope] = m - } - m[pkg] = spec -} - // addDeclDep adds the dependency edge (check.decl -> to) if check.decl exists func (check *Checker) addDeclDep(to Object) { from := check.decl @@ -202,7 +193,8 @@ func NewChecker(conf *Config, fset *token.FileSet, pkg *Package, info *Info) *Ch func (check *Checker) initFiles(files []*ast.File) { // start with a clean slate (check.Files may be called multiple times) check.files = nil - check.unusedDotImports = nil + check.imports = nil + check.dotImportMap = nil check.firstErr = nil check.methods = nil @@ -272,10 +264,16 @@ func (check *Checker) checkFiles(files []*ast.File) (err error) { if !check.conf.DisableUnusedImportCheck { check.unusedImports() } + // no longer needed - release memory + check.imports = nil + check.dotImportMap = nil check.recordUntyped() check.pkg.complete = true + + // TODO(rFindley) There's more memory we should release at this point. + return } diff --git a/src/go/types/resolver.go b/src/go/types/resolver.go index b637f8b8cacf7..cb66871883d90 100644 --- a/src/go/types/resolver.go +++ b/src/go/types/resolver.go @@ -275,21 +275,26 @@ func (check *Checker) collectObjects() { } } - obj := NewPkgName(d.spec.Pos(), pkg, name, imp) + pkgName := NewPkgName(d.spec.Pos(), pkg, name, imp) if d.spec.Name != nil { // in a dot-import, the dot represents the package - check.recordDef(d.spec.Name, obj) + check.recordDef(d.spec.Name, pkgName) } else { - check.recordImplicit(d.spec, obj) + check.recordImplicit(d.spec, pkgName) } if path == "C" { // match cmd/compile (not prescribed by spec) - obj.used = true + pkgName.used = true } // add import to file scope + check.imports = append(check.imports, pkgName) if name == "." { + // dot-import + if check.dotImportMap == nil { + check.dotImportMap = make(map[dotImportKey]*PkgName) + } // merge imported scope with file scope for _, obj := range imp.scope.elems { // A package scope may contain non-exported objects, @@ -303,16 +308,15 @@ func (check *Checker) collectObjects() { if alt := fileScope.Insert(obj); alt != nil { check.errorf(d.spec.Name, _DuplicateDecl, "%s redeclared in this block", obj.Name()) check.reportAltDecl(alt) + } else { + check.dotImportMap[dotImportKey{fileScope, obj}] = pkgName } } } - // add position to set of dot-import positions for this file - // (this is only needed for "imported but not used" errors) - check.addUnusedDotImport(fileScope, imp, d.spec) } else { // declare imported package object in file scope // (no need to provide s.Name since we called check.recordDef earlier) - check.declare(fileScope, nil, obj, token.NoPos) + check.declare(fileScope, nil, pkgName, token.NoPos) } case constDecl: // declare all constants @@ -566,39 +570,30 @@ func (check *Checker) unusedImports() { // any of its exported identifiers. To import a package solely for its side-effects // (initialization), use the blank identifier as explicit package name." - // check use of regular imported packages - for _, scope := range check.pkg.scope.children /* file scopes */ { - for _, obj := range scope.elems { - if obj, ok := obj.(*PkgName); ok { - // Unused "blank imports" are automatically ignored - // since _ identifiers are not entered into scopes. - if !obj.used { - path := obj.imported.path - base := pkgName(path) - if obj.name == base { - check.softErrorf(obj, _UnusedImport, "%q imported but not used", path) - } else { - check.softErrorf(obj, _UnusedImport, "%q imported but not used as %s", path, obj.name) - } - } - } - } - } - - // check use of dot-imported packages - for _, unusedDotImports := range check.unusedDotImports { - for pkg, pos := range unusedDotImports { - check.softErrorf(pos, _UnusedImport, "%q imported but not used", pkg.path) + for _, obj := range check.imports { + if !obj.used && obj.name != "_" { + check.errorUnusedPkg(obj) } } } -// pkgName returns the package name (last element) of an import path. -func pkgName(path string) string { - if i := strings.LastIndex(path, "/"); i >= 0 { - path = path[i+1:] +func (check *Checker) errorUnusedPkg(obj *PkgName) { + // If the package was imported with a name other than the final + // import path element, show it explicitly in the error message. + // Note that this handles both renamed imports and imports of + // packages containing unconventional package declarations. + // Note that this uses / always, even on Windows, because Go import + // paths always use forward slashes. + path := obj.imported.path + elem := path + if i := strings.LastIndex(elem, "/"); i >= 0 { + elem = elem[i+1:] + } + if obj.name == "" || obj.name == "." || obj.name == elem { + check.softErrorf(obj, _UnusedImport, "%q imported but not used", path) + } else { + check.softErrorf(obj, _UnusedImport, "%q imported but not used as %s", path, obj.name) } - return path } // dir makes a good-faith attempt to return the directory diff --git a/src/go/types/testdata/importdecl0/importdecl0b.src b/src/go/types/testdata/importdecl0/importdecl0b.src index 6844e7098233e..55690423b66ec 100644 --- a/src/go/types/testdata/importdecl0/importdecl0b.src +++ b/src/go/types/testdata/importdecl0/importdecl0b.src @@ -8,7 +8,7 @@ import "math" import m "math" import . "testing" // declares T in file scope -import . /* ERROR "imported but not used" */ "unsafe" +import . /* ERROR .unsafe. imported but not used */ "unsafe" import . "fmt" // declares Println in file scope import ( diff --git a/src/go/types/testdata/importdecl1/importdecl1b.src b/src/go/types/testdata/importdecl1/importdecl1b.src index ee70bbd8e73f7..43a7bcd75396c 100644 --- a/src/go/types/testdata/importdecl1/importdecl1b.src +++ b/src/go/types/testdata/importdecl1/importdecl1b.src @@ -4,7 +4,7 @@ package importdecl1 -import . /* ERROR "imported but not used" */ "unsafe" +import . /* ERROR .unsafe. imported but not used */ "unsafe" type B interface { A diff --git a/src/go/types/typexpr.go b/src/go/types/typexpr.go index 311a970051ad4..6e89ccb02743b 100644 --- a/src/go/types/typexpr.go +++ b/src/go/types/typexpr.go @@ -51,12 +51,12 @@ func (check *Checker) ident(x *operand, e *ast.Ident, def *Named, wantType bool) } assert(typ != nil) - // The object may be dot-imported: If so, remove its package from - // the map of unused dot imports for the respective file scope. + // The object may have been dot-imported. + // If so, mark the respective package as used. // (This code is only needed for dot-imports. Without them, // we only have to mark variables, see *Var case below). - if pkg := obj.Pkg(); pkg != check.pkg && pkg != nil { - delete(check.unusedDotImports[scope], pkg) + if pkgName := check.dotImportMap[dotImportKey{scope, obj}]; pkgName != nil { + pkgName.used = true } switch obj := obj.(type) { From 493363ccff354ab5ed133f6d5fac942ba6cc034a Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Mon, 8 Feb 2021 18:24:13 -0500 Subject: [PATCH 458/474] [dev.regabi] go/types: must not import a package called "init" This is a port of CL 287494 to go/types. The additional checks in test/fixedbugs are included, though they won't be executed by go/types. Support for errorcheckdir checks will be added to go/types in a later CL. Change-Id: I37e202ea5daf7d7b8fc6ae93a4c4dbd11762480f Reviewed-on: https://go-review.googlesource.com/c/go/+/290570 Reviewed-by: Robert Griesemer Trust: Robert Findley Run-TryBot: Robert Findley TryBot-Result: Go Bot --- src/go/types/resolver.go | 25 ++++++++++--------- .../testdata/importdecl0/importdecl0a.src | 2 +- test/fixedbugs/issue43962.dir/a.go | 5 ++++ test/fixedbugs/issue43962.dir/b.go | 7 ++++++ test/fixedbugs/issue43962.go | 9 +++++++ 5 files changed, 35 insertions(+), 13 deletions(-) create mode 100644 test/fixedbugs/issue43962.dir/a.go create mode 100644 test/fixedbugs/issue43962.dir/b.go create mode 100644 test/fixedbugs/issue43962.go diff --git a/src/go/types/resolver.go b/src/go/types/resolver.go index cb66871883d90..47e165db368a1 100644 --- a/src/go/types/resolver.go +++ b/src/go/types/resolver.go @@ -252,14 +252,6 @@ func (check *Checker) collectObjects() { return } - // add package to list of explicit imports - // (this functionality is provided as a convenience - // for clients; it is not needed for type-checking) - if !pkgImports[imp] { - pkgImports[imp] = true - pkg.imports = append(pkg.imports, imp) - } - // local name overrides imported package name name := imp.name if d.spec.Name != nil { @@ -269,10 +261,19 @@ func (check *Checker) collectObjects() { check.errorf(d.spec.Name, _ImportCRenamed, `cannot rename import "C"`) return } - if name == "init" { - check.errorf(d.spec.Name, _InvalidInitDecl, "cannot declare init - must be func") - return - } + } + + if name == "init" { + check.errorf(d.spec.Name, _InvalidInitDecl, "cannot import package as init - init must be a func") + return + } + + // add package to list of explicit imports + // (this functionality is provided as a convenience + // for clients; it is not needed for type-checking) + if !pkgImports[imp] { + pkgImports[imp] = true + pkg.imports = append(pkg.imports, imp) } pkgName := NewPkgName(d.spec.Pos(), pkg, name, imp) diff --git a/src/go/types/testdata/importdecl0/importdecl0a.src b/src/go/types/testdata/importdecl0/importdecl0a.src index e96fca3cdd56f..5ceb96e1fada6 100644 --- a/src/go/types/testdata/importdecl0/importdecl0a.src +++ b/src/go/types/testdata/importdecl0/importdecl0a.src @@ -10,7 +10,7 @@ import ( // we can have multiple blank imports (was bug) _ "math" _ "net/rpc" - init /* ERROR "cannot declare init" */ "fmt" + init /* ERROR "cannot import package as init" */ "fmt" // reflect defines a type "flag" which shows up in the gc export data "reflect" . /* ERROR "imported but not used" */ "reflect" diff --git a/test/fixedbugs/issue43962.dir/a.go b/test/fixedbugs/issue43962.dir/a.go new file mode 100644 index 0000000000000..168b2063b4872 --- /dev/null +++ b/test/fixedbugs/issue43962.dir/a.go @@ -0,0 +1,5 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package init diff --git a/test/fixedbugs/issue43962.dir/b.go b/test/fixedbugs/issue43962.dir/b.go new file mode 100644 index 0000000000000..f55fea11c1934 --- /dev/null +++ b/test/fixedbugs/issue43962.dir/b.go @@ -0,0 +1,7 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package b + +import "./a" // ERROR "cannot import package as init" diff --git a/test/fixedbugs/issue43962.go b/test/fixedbugs/issue43962.go new file mode 100644 index 0000000000000..dca4d077d5133 --- /dev/null +++ b/test/fixedbugs/issue43962.go @@ -0,0 +1,9 @@ +// errorcheckdir + +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Issue 43962: Importing a package called "init" is an error. + +package ignored From 1c58fcf7ed917f66e2b7f77f251e7e63ca9630e2 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Mon, 8 Feb 2021 18:04:58 -0500 Subject: [PATCH 459/474] [dev.regabi] go/types: handle untyped constant arithmetic overflow This is a port of CL 287832 for go/types. It differs from that CL in its handling of position data. Unlike the syntax package, which has a unified Operation node, go/types checks operations for ast.UnaryExpr, IncDecStmt, and BinaryExpr. It was simpler to keep the existing position logic. Notably, this correctly puts the errors on the operator. Change-Id: Id1e3aefe863da225eb0a9b3628cfc8a5684c0c4f Reviewed-on: https://go-review.googlesource.com/c/go/+/290569 Run-TryBot: Robert Findley TryBot-Result: Go Bot Reviewed-by: Robert Griesemer Trust: Robert Findley --- src/go/types/expr.go | 133 +++++++++++++++++++------------ src/go/types/stdlib_test.go | 1 - src/go/types/testdata/const0.src | 7 ++ 3 files changed, 88 insertions(+), 53 deletions(-) diff --git a/src/go/types/expr.go b/src/go/types/expr.go index f7fb0caeddb0e..2741cc635dfab 100644 --- a/src/go/types/expr.go +++ b/src/go/types/expr.go @@ -78,13 +78,60 @@ func (check *Checker) op(m opPredicates, x *operand, op token.Token) bool { return true } +// overflow checks that the constant x is representable by its type. +// For untyped constants, it checks that the value doesn't become +// arbitrarily large. +func (check *Checker) overflow(x *operand, op token.Token, opPos token.Pos) { + assert(x.mode == constant_) + + what := "" // operator description, if any + if int(op) < len(op2str) { + what = op2str[op] + } + + if x.val.Kind() == constant.Unknown { + // TODO(gri) We should report exactly what went wrong. At the + // moment we don't have the (go/constant) API for that. + // See also TODO in go/constant/value.go. + check.errorf(atPos(opPos), _InvalidConstVal, "constant result is not representable") + return + } + + // Typed constants must be representable in + // their type after each constant operation. + if typ, ok := x.typ.Underlying().(*Basic); ok && isTyped(typ) { + check.representable(x, typ) + return + } + + // Untyped integer values must not grow arbitrarily. + const limit = 4 * 512 // 512 is the constant precision - we need more because old tests had no limits + if x.val.Kind() == constant.Int && constant.BitLen(x.val) > limit { + check.errorf(atPos(opPos), _InvalidConstVal, "constant %s overflow", what) + x.val = constant.MakeUnknown() + } +} + +// This is only used for operations that may cause overflow. +var op2str = [...]string{ + token.ADD: "addition", + token.SUB: "subtraction", + token.XOR: "bitwise XOR", + token.MUL: "multiplication", + token.SHL: "shift", +} + // The unary expression e may be nil. It's passed in for better error messages only. -func (check *Checker) unary(x *operand, e *ast.UnaryExpr, op token.Token) { - switch op { +func (check *Checker) unary(x *operand, e *ast.UnaryExpr) { + check.expr(x, e.X) + if x.mode == invalid { + return + } + switch e.Op { case token.AND: // spec: "As an exception to the addressability // requirement x may also be a composite literal." - if _, ok := unparen(x.expr).(*ast.CompositeLit); !ok && x.mode != variable { + if _, ok := unparen(e.X).(*ast.CompositeLit); !ok && x.mode != variable { check.invalidOp(x, _UnaddressableOperand, "cannot take address of %s", x) x.mode = invalid return @@ -111,26 +158,23 @@ func (check *Checker) unary(x *operand, e *ast.UnaryExpr, op token.Token) { return } - if !check.op(unaryOpPredicates, x, op) { + if !check.op(unaryOpPredicates, x, e.Op) { x.mode = invalid return } if x.mode == constant_ { - typ := x.typ.Underlying().(*Basic) - var prec uint - if isUnsigned(typ) { - prec = uint(check.conf.sizeof(typ) * 8) + if x.val.Kind() == constant.Unknown { + // nothing to do (and don't cause an error below in the overflow check) + return } - x.val = constant.UnaryOp(op, x.val, prec) - // Typed constants must be representable in - // their type after each constant operation. - if isTyped(typ) { - if e != nil { - x.expr = e // for better error message - } - check.representable(x, typ) + var prec uint + if isUnsigned(x.typ) { + prec = uint(check.conf.sizeof(x.typ) * 8) } + x.val = constant.UnaryOp(e.Op, x.val, prec) + x.expr = e + check.overflow(x, e.Op, x.Pos()) return } @@ -667,7 +711,8 @@ func (check *Checker) comparison(x, y *operand, op token.Token) { x.typ = Typ[UntypedBool] } -func (check *Checker) shift(x, y *operand, e *ast.BinaryExpr, op token.Token) { +// If e != nil, it must be the shift expression; it may be nil for non-constant shifts. +func (check *Checker) shift(x, y *operand, e ast.Expr, op token.Token) { untypedx := isUntyped(x.typ) var xval constant.Value @@ -735,14 +780,12 @@ func (check *Checker) shift(x, y *operand, e *ast.BinaryExpr, op token.Token) { } // x is a constant so xval != nil and it must be of Int kind. x.val = constant.Shift(xval, op, uint(s)) - // Typed constants must be representable in - // their type after each constant operation. - if isTyped(x.typ) { - if e != nil { - x.expr = e // for better error message - } - check.representable(x, x.typ.Underlying().(*Basic)) + x.expr = e + opPos := x.Pos() + if b, _ := e.(*ast.BinaryExpr); b != nil { + opPos = b.OpPos } + check.overflow(x, op, opPos) return } @@ -803,8 +846,9 @@ var binaryOpPredicates = opPredicates{ token.LOR: isBoolean, } -// The binary expression e may be nil. It's passed in for better error messages only. -func (check *Checker) binary(x *operand, e *ast.BinaryExpr, lhs, rhs ast.Expr, op token.Token, opPos token.Pos) { +// If e != nil, it must be the binary expression; it may be nil for non-constant expressions +// (when invoked for an assignment operation where the binary expression is implicit). +func (check *Checker) binary(x *operand, e ast.Expr, lhs, rhs ast.Expr, op token.Token, opPos token.Pos) { var y operand check.expr(x, lhs) @@ -879,30 +923,19 @@ func (check *Checker) binary(x *operand, e *ast.BinaryExpr, lhs, rhs ast.Expr, o } if x.mode == constant_ && y.mode == constant_ { - xval := x.val - yval := y.val - typ := x.typ.Underlying().(*Basic) + // if either x or y has an unknown value, the result is unknown + if x.val.Kind() == constant.Unknown || y.val.Kind() == constant.Unknown { + x.val = constant.MakeUnknown() + // x.typ is unchanged + return + } // force integer division of integer operands - if op == token.QUO && isInteger(typ) { + if op == token.QUO && isInteger(x.typ) { op = token.QUO_ASSIGN } - x.val = constant.BinaryOp(xval, op, yval) - // report error if valid operands lead to an invalid result - if xval.Kind() != constant.Unknown && yval.Kind() != constant.Unknown && x.val.Kind() == constant.Unknown { - // TODO(gri) We should report exactly what went wrong. At the - // moment we don't have the (go/constant) API for that. - // See also TODO in go/constant/value.go. - check.errorf(atPos(opPos), _InvalidConstVal, "constant result is not representable") - // TODO(gri) Should we mark operands with unknown values as invalid? - } - // Typed constants must be representable in - // their type after each constant operation. - if isTyped(typ) { - if e != nil { - x.expr = e // for better error message - } - check.representable(x, typ) - } + x.val = constant.BinaryOp(x.val, op, y.val) + x.expr = e + check.overflow(x, op, opPos) return } @@ -1538,11 +1571,7 @@ func (check *Checker) exprInternal(x *operand, e ast.Expr, hint Type) exprKind { } case *ast.UnaryExpr: - check.expr(x, e.X) - if x.mode == invalid { - goto Error - } - check.unary(x, e, e.Op) + check.unary(x, e) if x.mode == invalid { goto Error } diff --git a/src/go/types/stdlib_test.go b/src/go/types/stdlib_test.go index 63e31f3d74721..8a1e2905a7e1f 100644 --- a/src/go/types/stdlib_test.go +++ b/src/go/types/stdlib_test.go @@ -171,7 +171,6 @@ func TestStdFixed(t *testing.T) { testTestDir(t, filepath.Join(runtime.GOROOT(), "test", "fixedbugs"), "bug248.go", "bug302.go", "bug369.go", // complex test instructions - ignore "issue6889.go", // gc-specific test - "issue7746.go", // large constants - consumes too much memory "issue11362.go", // canonical import path check "issue16369.go", // go/types handles this correctly - not an issue "issue18459.go", // go/types doesn't check validity of //go:xxx directives diff --git a/src/go/types/testdata/const0.src b/src/go/types/testdata/const0.src index adbbf2863b03e..2916af54906ee 100644 --- a/src/go/types/testdata/const0.src +++ b/src/go/types/testdata/const0.src @@ -348,3 +348,10 @@ const _ = unsafe.Sizeof(func() { assert(one == 1) assert(iota == 0) }) + +// untyped constants must not get arbitrarily large +const ( + huge = 1<<1000 + _ = huge * huge * /* ERROR constant multiplication overflow */ huge + _ = huge << 1000 << /* ERROR constant shift overflow */ 1000 +) From 0a62067708938020e10b8142b4017edeac1b1f52 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Mon, 8 Feb 2021 21:53:29 -0500 Subject: [PATCH 460/474] [dev.regabi] go/types: adjust importer to match compiler importer This is an exact port of CL 288632 to go/types. Change-Id: Ie46e13355bdd0713b392e042844bab8491a16504 Reviewed-on: https://go-review.googlesource.com/c/go/+/290629 Trust: Robert Findley Run-TryBot: Robert Findley TryBot-Result: Go Bot Reviewed-by: Robert Griesemer --- src/go/internal/gcimporter/iimport.go | 52 +++++++++++---------------- 1 file changed, 20 insertions(+), 32 deletions(-) diff --git a/src/go/internal/gcimporter/iimport.go b/src/go/internal/gcimporter/iimport.go index c59dd16533173..a3184e7641aa0 100644 --- a/src/go/internal/gcimporter/iimport.go +++ b/src/go/internal/gcimporter/iimport.go @@ -15,6 +15,7 @@ import ( "go/token" "go/types" "io" + "math/big" "sort" ) @@ -320,7 +321,9 @@ func (r *importReader) value() (typ types.Type, val constant.Value) { val = constant.MakeString(r.string()) case types.IsInteger: - val = r.mpint(b) + var x big.Int + r.mpint(&x, b) + val = constant.Make(&x) case types.IsFloat: val = r.mpfloat(b) @@ -365,8 +368,8 @@ func intSize(b *types.Basic) (signed bool, maxBytes uint) { return } -func (r *importReader) mpint(b *types.Basic) constant.Value { - signed, maxBytes := intSize(b) +func (r *importReader) mpint(x *big.Int, typ *types.Basic) { + signed, maxBytes := intSize(typ) maxSmall := 256 - maxBytes if signed { @@ -385,7 +388,8 @@ func (r *importReader) mpint(b *types.Basic) constant.Value { v = ^v } } - return constant.MakeInt64(v) + x.SetInt64(v) + return } v := -n @@ -395,39 +399,23 @@ func (r *importReader) mpint(b *types.Basic) constant.Value { if v < 1 || uint(v) > maxBytes { errorf("weird decoding: %v, %v => %v", n, signed, v) } - - buf := make([]byte, v) - io.ReadFull(&r.declReader, buf) - - // convert to little endian - // TODO(gri) go/constant should have a more direct conversion function - // (e.g., once it supports a big.Float based implementation) - for i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 { - buf[i], buf[j] = buf[j], buf[i] - } - - x := constant.MakeFromBytes(buf) + b := make([]byte, v) + io.ReadFull(&r.declReader, b) + x.SetBytes(b) if signed && n&1 != 0 { - x = constant.UnaryOp(token.SUB, x, 0) + x.Neg(x) } - return x } -func (r *importReader) mpfloat(b *types.Basic) constant.Value { - x := r.mpint(b) - if constant.Sign(x) == 0 { - return x - } - - exp := r.int64() - switch { - case exp > 0: - x = constant.Shift(x, token.SHL, uint(exp)) - case exp < 0: - d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) - x = constant.BinaryOp(x, token.QUO, d) +func (r *importReader) mpfloat(typ *types.Basic) constant.Value { + var mant big.Int + r.mpint(&mant, typ) + var f big.Float + f.SetInt(&mant) + if f.Sign() != 0 { + f.SetMantExp(&f, int(r.int64())) } - return x + return constant.Make(&f) } func (r *importReader) ident() string { From 168d6a49a5ecbdd6a1eb039b2398c2821b3d3865 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Mon, 8 Feb 2021 22:37:48 -0500 Subject: [PATCH 461/474] [dev.regabi] go/types: use 512 bits as max. integer precision This is a port of CL 288633 to go/types. It differs from that CL in the implementation of opName, which now uses ast Exprs. Additionally, a couple tests had to be updated: + TestEvalArith is updated to not overflow. + stmt0.src is updated to have an error positioned on the '<<' operator. Change-Id: I628357c33a1e7b0bb5bb7de5736f1fb10ce404e4 Reviewed-on: https://go-review.googlesource.com/c/go/+/290630 Trust: Robert Findley Run-TryBot: Robert Findley TryBot-Result: Go Bot Reviewed-by: Robert Griesemer --- src/go/types/eval_test.go | 2 +- src/go/types/expr.go | 46 +++++++++++++++++++++++------- src/go/types/stdlib_test.go | 1 - src/go/types/testdata/builtins.src | 10 +++---- src/go/types/testdata/const0.src | 16 +++++++---- src/go/types/testdata/const1.src | 18 ++++++++++-- src/go/types/testdata/stmt0.src | 2 +- 7 files changed, 69 insertions(+), 26 deletions(-) diff --git a/src/go/types/eval_test.go b/src/go/types/eval_test.go index d940bf0e80a9c..3a97ac0471620 100644 --- a/src/go/types/eval_test.go +++ b/src/go/types/eval_test.go @@ -76,7 +76,7 @@ func TestEvalArith(t *testing.T) { `false == false`, `12345678 + 87654321 == 99999999`, `10 * 20 == 200`, - `(1<<1000)*2 >> 100 == 2<<900`, + `(1<<500)*2 >> 100 == 2<<400`, `"foo" + "bar" == "foobar"`, `"abc" <= "bcd"`, `len([10]struct{}{}) == 2*5`, diff --git a/src/go/types/expr.go b/src/go/types/expr.go index 2741cc635dfab..5e1fe28a43aae 100644 --- a/src/go/types/expr.go +++ b/src/go/types/expr.go @@ -84,11 +84,6 @@ func (check *Checker) op(m opPredicates, x *operand, op token.Token) bool { func (check *Checker) overflow(x *operand, op token.Token, opPos token.Pos) { assert(x.mode == constant_) - what := "" // operator description, if any - if int(op) < len(op2str) { - what = op2str[op] - } - if x.val.Kind() == constant.Unknown { // TODO(gri) We should report exactly what went wrong. At the // moment we don't have the (go/constant) API for that. @@ -105,15 +100,37 @@ func (check *Checker) overflow(x *operand, op token.Token, opPos token.Pos) { } // Untyped integer values must not grow arbitrarily. - const limit = 4 * 512 // 512 is the constant precision - we need more because old tests had no limits - if x.val.Kind() == constant.Int && constant.BitLen(x.val) > limit { - check.errorf(atPos(opPos), _InvalidConstVal, "constant %s overflow", what) + const prec = 512 // 512 is the constant precision + if x.val.Kind() == constant.Int && constant.BitLen(x.val) > prec { + check.errorf(atPos(opPos), _InvalidConstVal, "constant %s overflow", opName(x.expr)) x.val = constant.MakeUnknown() } } +// opName returns the name of an operation, or the empty string. +// For now, only operations that might overflow are handled. +// TODO(gri) Expand this to a general mechanism giving names to +// nodes? +func opName(e ast.Expr) string { + switch e := e.(type) { + case *ast.BinaryExpr: + if int(e.Op) < len(op2str2) { + return op2str2[e.Op] + } + case *ast.UnaryExpr: + if int(e.Op) < len(op2str1) { + return op2str1[e.Op] + } + } + return "" +} + +var op2str1 = [...]string{ + token.XOR: "bitwise complement", +} + // This is only used for operations that may cause overflow. -var op2str = [...]string{ +var op2str2 = [...]string{ token.ADD: "addition", token.SUB: "subtraction", token.XOR: "bitwise XOR", @@ -763,8 +780,17 @@ func (check *Checker) shift(x, y *operand, e ast.Expr, op token.Token) { if x.mode == constant_ { if y.mode == constant_ { + // if either x or y has an unknown value, the result is unknown + if x.val.Kind() == constant.Unknown || y.val.Kind() == constant.Unknown { + x.val = constant.MakeUnknown() + // ensure the correct type - see comment below + if !isInteger(x.typ) { + x.typ = Typ[UntypedInt] + } + return + } // rhs must be within reasonable bounds in constant shifts - const shiftBound = 1023 - 1 + 52 // so we can express smallestFloat64 + const shiftBound = 1023 - 1 + 52 // so we can express smallestFloat64 (see issue #44057) s, ok := constant.Uint64Val(yval) if !ok || s > shiftBound { check.invalidOp(y, _InvalidShiftCount, "invalid shift count %s", y) diff --git a/src/go/types/stdlib_test.go b/src/go/types/stdlib_test.go index 8a1e2905a7e1f..71e14b85e5c98 100644 --- a/src/go/types/stdlib_test.go +++ b/src/go/types/stdlib_test.go @@ -175,7 +175,6 @@ func TestStdFixed(t *testing.T) { "issue16369.go", // go/types handles this correctly - not an issue "issue18459.go", // go/types doesn't check validity of //go:xxx directives "issue18882.go", // go/types doesn't check validity of //go:xxx directives - "issue20232.go", // go/types handles larger constants than gc "issue20529.go", // go/types does not have constraints on stack size "issue22200.go", // go/types does not have constraints on stack size "issue22200b.go", // go/types does not have constraints on stack size diff --git a/src/go/types/testdata/builtins.src b/src/go/types/testdata/builtins.src index a7613adc35e1e..6ee28f13b4291 100644 --- a/src/go/types/testdata/builtins.src +++ b/src/go/types/testdata/builtins.src @@ -514,7 +514,7 @@ func panic1() { panic("foo") panic(false) panic(1<<10) - panic(1 /* ERROR overflows */ <<1000) + panic(1 << /* ERROR constant shift overflow */ 1000) _ = panic /* ERROR used as value */ (0) var s []byte @@ -538,7 +538,7 @@ func print1() { print(2.718281828) print(false) print(1<<10) - print(1 /* ERROR overflows */ <<1000) + print(1 << /* ERROR constant shift overflow */ 1000) println(nil /* ERROR untyped nil */ ) var s []int @@ -564,7 +564,7 @@ func println1() { println(2.718281828) println(false) println(1<<10) - println(1 /* ERROR overflows */ <<1000) + println(1 << /* ERROR constant shift overflow */ 1000) println(nil /* ERROR untyped nil */ ) var s []int @@ -695,7 +695,7 @@ func Alignof1() { _ = unsafe.Alignof(42) _ = unsafe.Alignof(new(struct{})) _ = unsafe.Alignof(1<<10) - _ = unsafe.Alignof(1 /* ERROR overflows */ <<1000) + _ = unsafe.Alignof(1 << /* ERROR constant shift overflow */ 1000) _ = unsafe.Alignof(nil /* ERROR "untyped nil */ ) unsafe /* ERROR not used */ .Alignof(x) @@ -783,7 +783,7 @@ func Sizeof1() { _ = unsafe.Sizeof(42) _ = unsafe.Sizeof(new(complex128)) _ = unsafe.Sizeof(1<<10) - _ = unsafe.Sizeof(1 /* ERROR overflows */ <<1000) + _ = unsafe.Sizeof(1 << /* ERROR constant shift overflow */ 1000) _ = unsafe.Sizeof(nil /* ERROR untyped nil */ ) unsafe /* ERROR not used */ .Sizeof(x) diff --git a/src/go/types/testdata/const0.src b/src/go/types/testdata/const0.src index 2916af54906ee..5608b1549ba8d 100644 --- a/src/go/types/testdata/const0.src +++ b/src/go/types/testdata/const0.src @@ -350,8 +350,14 @@ const _ = unsafe.Sizeof(func() { }) // untyped constants must not get arbitrarily large -const ( - huge = 1<<1000 - _ = huge * huge * /* ERROR constant multiplication overflow */ huge - _ = huge << 1000 << /* ERROR constant shift overflow */ 1000 -) +const prec = 512 // internal maximum precision for integers +const maxInt = (1<<(prec/2) - 1) * (1<<(prec/2) + 1) // == 1< Date: Fri, 5 Feb 2021 18:07:46 -0500 Subject: [PATCH 462/474] [dev.regabi] cmd/link: stop using ABI aliases if wrapper is enabled If ABI wrappers are enabled, we should not see ABI aliases at link time. Stop resolving them. One exception is shared linkage, where we still use ABI aliases as we don't always know the ABI for symbols from shared libraries. Change-Id: Ia89a788094382adeb4c4ef9b0312aa6e8c2f79ef Reviewed-on: https://go-review.googlesource.com/c/go/+/290032 TryBot-Result: Go Bot Reviewed-by: Than McIntosh Trust: Cherry Zhang Run-TryBot: Cherry Zhang --- src/cmd/link/internal/ld/lib.go | 8 +++++++- src/cmd/link/internal/loader/loader.go | 4 ++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index 71cef0b774ff8..314896824a03f 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -489,10 +489,16 @@ func (ctxt *Link) loadlib() { case 0: // nothing to do case 1, 2: - flags = loader.FlagStrictDups + flags |= loader.FlagStrictDups default: log.Fatalf("invalid -strictdups flag value %d", *FlagStrictDups) } + if !*flagAbiWrap || ctxt.linkShared { + // Use ABI aliases if ABI wrappers are not used. + // TODO: for now we still use ABI aliases in shared linkage, even if + // the wrapper is enabled. + flags |= loader.FlagUseABIAlias + } elfsetstring1 := func(str string, off int) { elfsetstring(ctxt, 0, str, off) } ctxt.loader = loader.NewLoader(flags, elfsetstring1, &ctxt.ErrorReporter.ErrorReporter) ctxt.ErrorReporter.SymName = func(s loader.Sym) string { diff --git a/src/cmd/link/internal/loader/loader.go b/src/cmd/link/internal/loader/loader.go index 971cc432ff54b..98c2131c2b43f 100644 --- a/src/cmd/link/internal/loader/loader.go +++ b/src/cmd/link/internal/loader/loader.go @@ -322,6 +322,7 @@ type extSymPayload struct { const ( // Loader.flags FlagStrictDups = 1 << iota + FlagUseABIAlias ) func NewLoader(flags uint32, elfsetstring elfsetstringFunc, reporter *ErrorReporter) *Loader { @@ -2270,6 +2271,9 @@ func abiToVer(abi uint16, localSymVersion int) int { // symbol. If the sym in question is not an alias, the sym itself is // returned. func (l *Loader) ResolveABIAlias(s Sym) Sym { + if l.flags&FlagUseABIAlias == 0 { + return s + } if s == 0 { return 0 } From baa6c75dcef23aa51e95bf7818b7ded5262fbaa8 Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Thu, 22 Oct 2020 16:02:14 +0000 Subject: [PATCH 463/474] [dev.regabi] internal/abi: add new internal/abi package for ABI constants This change creates a new internal std package internal/abi which is intended to hold constants with platform-specific values related to our ABI that is useful to different std packages, such as runtime and reflect. For #40724. Change-Id: Ie7ae7f687629cd3d613ba603e9371f0887601fe6 Reviewed-on: https://go-review.googlesource.com/c/go/+/272567 Trust: Michael Knyszek Run-TryBot: Michael Knyszek TryBot-Result: Go Bot Reviewed-by: Cherry Zhang Reviewed-by: David Chase Reviewed-by: Than McIntosh Reviewed-by: Austin Clements --- src/go/build/deps_test.go | 4 ++-- src/internal/abi/abi.go | 12 +++++++++++ src/internal/abi/abi_amd64.go | 20 +++++++++++++++++ src/internal/abi/abi_generic.go | 38 +++++++++++++++++++++++++++++++++ 4 files changed, 72 insertions(+), 2 deletions(-) create mode 100644 src/internal/abi/abi.go create mode 100644 src/internal/abi/abi_amd64.go create mode 100644 src/internal/abi/abi_generic.go diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go index c97c668cc4922..02b29f498a664 100644 --- a/src/go/build/deps_test.go +++ b/src/go/build/deps_test.go @@ -71,13 +71,13 @@ var depsRules = ` # No dependencies allowed for any of these packages. NONE < container/list, container/ring, - internal/cfg, internal/cpu, + internal/abi, internal/cfg, internal/cpu, internal/goversion, internal/nettrace, unicode/utf8, unicode/utf16, unicode, unsafe; # RUNTIME is the core runtime group of packages, all of them very light-weight. - internal/cpu, unsafe + internal/abi, internal/cpu, unsafe < internal/bytealg < internal/unsafeheader < runtime/internal/sys diff --git a/src/internal/abi/abi.go b/src/internal/abi/abi.go new file mode 100644 index 0000000000000..07ea51df8f614 --- /dev/null +++ b/src/internal/abi/abi.go @@ -0,0 +1,12 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package abi + +// RegArgs is a struct that has space for each argument +// and return value register on the current architecture. +type RegArgs struct { + Ints [IntArgRegs]uintptr + Floats [FloatArgRegs]uint64 +} diff --git a/src/internal/abi/abi_amd64.go b/src/internal/abi/abi_amd64.go new file mode 100644 index 0000000000000..6574d4216de3f --- /dev/null +++ b/src/internal/abi/abi_amd64.go @@ -0,0 +1,20 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build goexperiment.regabi + +package abi + +const ( + // See abi_generic.go. + + // RAX, RBX, RCX, RDI, RSI, R8, R9, R10, R11. + IntArgRegs = 9 + + // X0 -> X14. + FloatArgRegs = 15 + + // We use SSE2 registers which support 64-bit float operations. + EffectiveFloatRegSize = 8 +) diff --git a/src/internal/abi/abi_generic.go b/src/internal/abi/abi_generic.go new file mode 100644 index 0000000000000..5ef9883dc6e66 --- /dev/null +++ b/src/internal/abi/abi_generic.go @@ -0,0 +1,38 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !goexperiment.regabi + +package abi + +const ( + // ABI-related constants. + // + // In the generic case, these are all zero + // which lets them gracefully degrade to ABI0. + + // IntArgRegs is the number of registers dedicated + // to passing integer argument values. Result registers are identical + // to argument registers, so this number is used for those too. + IntArgRegs = 0 + + // FloatArgRegs is the number of registers dedicated + // to passing floating-point argument values. Result registers are + // identical to argument registers, so this number is used for + // those too. + FloatArgRegs = 0 + + // EffectiveFloatRegSize describes the width of floating point + // registers on the current platform from the ABI's perspective. + // + // Since Go only supports 32-bit and 64-bit floating point primitives, + // this number should be either 0, 4, or 8. 0 indicates no floating + // point registers for the ABI or that floating point values will be + // passed via the softfloat ABI. + // + // For platforms that support larger floating point register widths, + // such as x87's 80-bit "registers" (not that we support x87 currently), + // use 8. + EffectiveFloatRegSize = 0 +) From 060fa49bd23d758a9062f4cb50e65960ec9662f1 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Wed, 10 Feb 2021 12:04:31 -0500 Subject: [PATCH 464/474] [dev.regabi] go/types: refuse excessively long constants This is a port of CL 289049 to go/types. In that CL, tests were written using the ability of tests/run.go to generate test packages dynamically. For this CL, similar functionality is added to the go/types errmap tests: tests are refactored to decouple the loading of source code from the filesystem, so that tests for long constants may be generated dynamically rather than checked-in as a large testdata file. Change-Id: I92c7cb61a8d42c6593570ef7ae0af86b501fa34e Reviewed-on: https://go-review.googlesource.com/c/go/+/290949 Trust: Robert Findley Trust: Robert Griesemer Run-TryBot: Robert Findley TryBot-Result: Go Bot Reviewed-by: Robert Griesemer --- src/go/types/check_test.go | 74 ++++++++++++++++++++++---------------- src/go/types/expr.go | 17 +++++++++ 2 files changed, 60 insertions(+), 31 deletions(-) diff --git a/src/go/types/check_test.go b/src/go/types/check_test.go index 47d749b3a331b..7292f7bcb2c3f 100644 --- a/src/go/types/check_test.go +++ b/src/go/types/check_test.go @@ -68,11 +68,11 @@ func splitError(err error) (pos, msg string) { return } -func parseFiles(t *testing.T, filenames []string) ([]*ast.File, []error) { +func parseFiles(t *testing.T, filenames []string, srcs [][]byte) ([]*ast.File, []error) { var files []*ast.File var errlist []error - for _, filename := range filenames { - file, err := parser.ParseFile(fset, filename, nil, parser.AllErrors) + for i, filename := range filenames { + file, err := parser.ParseFile(fset, filename, srcs[i], parser.AllErrors) if file == nil { t.Fatalf("%s: %s", filename, err) } @@ -101,19 +101,17 @@ var errRx = regexp.MustCompile(`^ *ERROR *(HERE)? *"?([^"]*)"?`) // errMap collects the regular expressions of ERROR comments found // in files and returns them as a map of error positions to error messages. // -func errMap(t *testing.T, testname string, files []*ast.File) map[string][]string { +// srcs must be a slice of the same length as files, containing the original +// source for the parsed AST. +func errMap(t *testing.T, files []*ast.File, srcs [][]byte) map[string][]string { // map of position strings to lists of error message patterns errmap := make(map[string][]string) - for _, file := range files { - filename := fset.Position(file.Package).Filename - src, err := os.ReadFile(filename) - if err != nil { - t.Fatalf("%s: could not read %s", testname, filename) - } - + for i, file := range files { + tok := fset.File(file.Package) + src := srcs[i] var s scanner.Scanner - s.Init(fset.AddFile(filename, -1, len(src)), src, nil, scanner.ScanComments) + s.Init(tok, src, nil, scanner.ScanComments) var prev token.Pos // position of last non-comment, non-semicolon token var here token.Pos // position immediately after the token at position prev @@ -190,13 +188,13 @@ func eliminate(t *testing.T, errmap map[string][]string, errlist []error) { } } -func checkFiles(t *testing.T, sources []string) { - if len(sources) == 0 { +func checkFiles(t *testing.T, filenames []string, srcs [][]byte) { + if len(filenames) == 0 { t.Fatal("no source files") } // parse files and collect parser errors - files, errlist := parseFiles(t, sources) + files, errlist := parseFiles(t, filenames, srcs) pkgName := "" if len(files) > 0 { @@ -214,11 +212,12 @@ func checkFiles(t *testing.T, sources []string) { var conf Config // special case for importC.src - if len(sources) == 1 && strings.HasSuffix(sources[0], "importC.src") { - conf.FakeImportC = true + if len(filenames) == 1 { + if strings.HasSuffix(filenames[0], "importC.src") { + conf.FakeImportC = true + } } - // TODO(rFindley) we may need to use the source importer when adding generics - // tests. + conf.Importer = importer.Default() conf.Error = func(err error) { if *haltOnError { @@ -253,7 +252,7 @@ func checkFiles(t *testing.T, sources []string) { // match and eliminate errors; // we are expecting the following errors - errmap := errMap(t, pkgName, files) + errmap := errMap(t, files, srcs) eliminate(t, errmap, errlist) // there should be no expected errors left @@ -274,7 +273,13 @@ func TestCheck(t *testing.T) { } testenv.MustHaveGoBuild(t) DefPredeclaredTestFuncs() - checkFiles(t, strings.Split(*testFiles, " ")) + testPkg(t, strings.Split(*testFiles, " ")) +} + +func TestLongConstants(t *testing.T) { + format := "package longconst\n\nconst _ = %s\nconst _ = %s // ERROR excessively long constant" + src := fmt.Sprintf(format, strings.Repeat("1", 9999), strings.Repeat("1", 10001)) + checkFiles(t, []string{"longconst.go"}, [][]byte{[]byte(src)}) } func TestTestdata(t *testing.T) { DefPredeclaredTestFuncs(); testDir(t, "testdata") } @@ -293,26 +298,33 @@ func testDir(t *testing.T, dir string) { path := filepath.Join(dir, fi.Name()) // if fi is a directory, its files make up a single package - var files []string + var filenames []string if fi.IsDir() { fis, err := ioutil.ReadDir(path) if err != nil { t.Error(err) continue } - files = make([]string, len(fis)) - for i, fi := range fis { - // if fi is a directory, checkFiles below will complain - files[i] = filepath.Join(path, fi.Name()) - if testing.Verbose() { - fmt.Printf("\t%s\n", files[i]) - } + for _, fi := range fis { + filenames = append(filenames, filepath.Join(path, fi.Name())) } } else { - files = []string{path} + filenames = []string{path} } t.Run(filepath.Base(path), func(t *testing.T) { - checkFiles(t, files) + testPkg(t, filenames) }) } } + +func testPkg(t *testing.T, filenames []string) { + srcs := make([][]byte, len(filenames)) + for i, filename := range filenames { + src, err := os.ReadFile(filename) + if err != nil { + t.Fatalf("could not read %s: %v", filename, err) + } + srcs[i] = src + } + checkFiles(t, filenames, srcs) +} diff --git a/src/go/types/expr.go b/src/go/types/expr.go index 5e1fe28a43aae..1a3c486af7ee8 100644 --- a/src/go/types/expr.go +++ b/src/go/types/expr.go @@ -1140,6 +1140,23 @@ func (check *Checker) exprInternal(x *operand, e ast.Expr, hint Type) exprKind { goto Error case *ast.BasicLit: + switch e.Kind { + case token.INT, token.FLOAT, token.IMAG: + // The max. mantissa precision for untyped numeric values + // is 512 bits, or 4048 bits for each of the two integer + // parts of a fraction for floating-point numbers that are + // represented accurately in the go/constant package. + // Constant literals that are longer than this many bits + // are not meaningful; and excessively long constants may + // consume a lot of space and time for a useless conversion. + // Cap constant length with a generous upper limit that also + // allows for separators between all digits. + const limit = 10000 + if len(e.Value) > limit { + check.errorf(e, _InvalidConstVal, "excessively long constant: %s... (%d chars)", e.Value[:10], len(e.Value)) + goto Error + } + } x.setConst(e.Kind, e.Value) if x.mode == invalid { // The parser already establishes syntactic correctness. From a7e9b4b94804a1fbefc0c012ec510f4ee0837ffa Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Thu, 11 Feb 2021 10:17:39 -0500 Subject: [PATCH 465/474] [dev.regabi] go/types: untyped shift counts must fit into uint This is a port of CL 283872 to go/types. It differs from that CL only in added error codes. For #43697 Change-Id: I62277834cef1c0359bcf2c6ee4388731babbc855 Reviewed-on: https://go-review.googlesource.com/c/go/+/291316 Trust: Robert Findley Trust: Robert Griesemer Run-TryBot: Robert Findley TryBot-Result: Go Bot Reviewed-by: Robert Griesemer --- src/go/types/expr.go | 26 ++++++++++++++++++-------- src/go/types/testdata/shifts.src | 12 +++++++----- 2 files changed, 25 insertions(+), 13 deletions(-) diff --git a/src/go/types/expr.go b/src/go/types/expr.go index 1a3c486af7ee8..7f8aaed411c03 100644 --- a/src/go/types/expr.go +++ b/src/go/types/expr.go @@ -730,14 +730,14 @@ func (check *Checker) comparison(x, y *operand, op token.Token) { // If e != nil, it must be the shift expression; it may be nil for non-constant shifts. func (check *Checker) shift(x, y *operand, e ast.Expr, op token.Token) { - untypedx := isUntyped(x.typ) + // TODO(gri) This function seems overly complex. Revisit. var xval constant.Value if x.mode == constant_ { xval = constant.ToInt(x.val) } - if isInteger(x.typ) || untypedx && xval != nil && xval.Kind() == constant.Int { + if isInteger(x.typ) || isUntyped(x.typ) && xval != nil && xval.Kind() == constant.Int { // The lhs is of integer type or an untyped constant representable // as an integer. Nothing to do. } else { @@ -749,16 +749,26 @@ func (check *Checker) shift(x, y *operand, e ast.Expr, op token.Token) { // spec: "The right operand in a shift expression must have integer type // or be an untyped constant representable by a value of type uint." - switch { - case isInteger(y.typ): - // nothing to do - case isUntyped(y.typ): + + // Provide a good error message for negative shift counts. + if y.mode == constant_ { + yval := constant.ToInt(y.val) // consider -1, 1.0, but not -1.1 + if yval.Kind() == constant.Int && constant.Sign(yval) < 0 { + check.invalidOp(y, _InvalidShiftCount, "negative shift count %s", y) + x.mode = invalid + return + } + } + + // Caution: Check for isUntyped first because isInteger includes untyped + // integers (was bug #43697). + if isUntyped(y.typ) { check.convertUntyped(y, Typ[Uint]) if y.mode == invalid { x.mode = invalid return } - default: + } else if !isInteger(y.typ) { check.invalidOp(y, _InvalidShiftCount, "shift count %s must be integer", y) x.mode = invalid return @@ -816,7 +826,7 @@ func (check *Checker) shift(x, y *operand, e ast.Expr, op token.Token) { } // non-constant shift with constant lhs - if untypedx { + if isUntyped(x.typ) { // spec: "If the left operand of a non-constant shift // expression is an untyped constant, the type of the // constant is what it would be if the shift expression diff --git a/src/go/types/testdata/shifts.src b/src/go/types/testdata/shifts.src index c9a38ae169ea0..4d3c59a50fd9b 100644 --- a/src/go/types/testdata/shifts.src +++ b/src/go/types/testdata/shifts.src @@ -20,7 +20,7 @@ func shifts0() { // This depends on the exact spec wording which is not // done yet. // TODO(gri) revisit and adjust when spec change is done - _ = 1<<- /* ERROR "truncated to uint" */ 1.0 + _ = 1<<- /* ERROR "negative shift count" */ 1.0 _ = 1<<1075 /* ERROR "invalid shift" */ _ = 2.0<<1 _ = 1<<1.0 @@ -60,11 +60,13 @@ func shifts1() { _ uint = 1 << u _ float32 = 1 /* ERROR "must be integer" */ << u - // for issue 14822 + // issue #14822 + _ = 1<<( /* ERROR "overflows uint" */ 1<<64) _ = 1<<( /* ERROR "invalid shift count" */ 1<<64-1) - _ = 1<<( /* ERROR "invalid shift count" */ 1<<64) - _ = u<<(1<<63) // valid - _ = u<<(1<<64) // valid + + // issue #43697 + _ = u<<( /* ERROR "overflows uint" */ 1<<64) + _ = u<<(1<<64-1) ) } From b81efb7ec4348951211058cf4fdfc045c75255d6 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Thu, 11 Feb 2021 10:23:41 -0500 Subject: [PATCH 466/474] [dev.regabi] go/types: add support for language version checking This is a port of CL 289509 to go/types. It differs from that CL in codes added to errors, to fit the new factoring of check_test.go, and to allow go/types to import regexp in deps_test.go For #31793 Change-Id: Ia9e4c7f5aac1493001189184227c2ebc79a76e77 Reviewed-on: https://go-review.googlesource.com/c/go/+/291317 Trust: Robert Findley Run-TryBot: Robert Findley TryBot-Result: Go Bot Reviewed-by: Robert Griesemer --- src/go/build/deps_test.go | 2 +- src/go/types/api.go | 7 +++ src/go/types/check.go | 32 ++++++++----- src/go/types/check_test.go | 36 +++++++++++--- src/go/types/expr.go | 5 ++ src/go/types/stdlib_test.go | 10 ++-- src/go/types/testdata/go1_12.src | 35 ++++++++++++++ src/go/types/version.go | 82 ++++++++++++++++++++++++++++++++ 8 files changed, 186 insertions(+), 23 deletions(-) create mode 100644 src/go/types/testdata/go1_12.src create mode 100644 src/go/types/version.go diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go index 02b29f498a664..3fea5ecf0d375 100644 --- a/src/go/build/deps_test.go +++ b/src/go/build/deps_test.go @@ -285,7 +285,7 @@ var depsRules = ` math/big, go/token < go/constant; - container/heap, go/constant, go/parser + container/heap, go/constant, go/parser, regexp < go/types; FMT diff --git a/src/go/types/api.go b/src/go/types/api.go index d625959817203..b5bbb2d97dc7e 100644 --- a/src/go/types/api.go +++ b/src/go/types/api.go @@ -101,6 +101,13 @@ type ImporterFrom interface { // A Config specifies the configuration for type checking. // The zero value for Config is a ready-to-use default configuration. type Config struct { + // GoVersion describes the accepted Go language version. The string + // must follow the format "go%d.%d" (e.g. "go1.12") or it must be + // empty; an empty string indicates the latest language version. + // If the format is invalid, invoking the type checker will cause a + // panic. + GoVersion string + // If IgnoreFuncBodies is set, function bodies are not // type-checked. IgnoreFuncBodies bool diff --git a/src/go/types/check.go b/src/go/types/check.go index 03798587e74fe..3bc8ee067c2b1 100644 --- a/src/go/types/check.go +++ b/src/go/types/check.go @@ -8,6 +8,7 @@ package types import ( "errors" + "fmt" "go/ast" "go/constant" "go/token" @@ -84,10 +85,11 @@ type Checker struct { fset *token.FileSet pkg *Package *Info - objMap map[Object]*declInfo // maps package-level objects and (non-interface) methods to declaration info - impMap map[importKey]*Package // maps (import path, source directory) to (complete or fake) package - posMap map[*Interface][]token.Pos // maps interface types to lists of embedded interface positions - pkgCnt map[string]int // counts number of imported packages with a given name (for better error messages) + version version // accepted language version + objMap map[Object]*declInfo // maps package-level objects and (non-interface) methods to declaration info + impMap map[importKey]*Package // maps (import path, source directory) to (complete or fake) package + posMap map[*Interface][]token.Pos // maps interface types to lists of embedded interface positions + pkgCnt map[string]int // counts number of imported packages with a given name (for better error messages) // information collected during type-checking of a set of package files // (initialized by Files, valid only for the duration of check.Files; @@ -176,15 +178,21 @@ func NewChecker(conf *Config, fset *token.FileSet, pkg *Package, info *Info) *Ch info = new(Info) } + version, err := parseGoVersion(conf.GoVersion) + if err != nil { + panic(fmt.Sprintf("invalid Go version %q (%v)", conf.GoVersion, err)) + } + return &Checker{ - conf: conf, - fset: fset, - pkg: pkg, - Info: info, - objMap: make(map[Object]*declInfo), - impMap: make(map[importKey]*Package), - posMap: make(map[*Interface][]token.Pos), - pkgCnt: make(map[string]int), + conf: conf, + fset: fset, + pkg: pkg, + Info: info, + version: version, + objMap: make(map[Object]*declInfo), + impMap: make(map[importKey]*Package), + posMap: make(map[*Interface][]token.Pos), + pkgCnt: make(map[string]int), } } diff --git a/src/go/types/check_test.go b/src/go/types/check_test.go index 7292f7bcb2c3f..ca7d926ca9039 100644 --- a/src/go/types/check_test.go +++ b/src/go/types/check_test.go @@ -47,7 +47,8 @@ import ( var ( haltOnError = flag.Bool("halt", false, "halt on error") listErrors = flag.Bool("errlist", false, "list errors") - testFiles = flag.String("files", "", "space-separated list of test files") + testFiles = flag.String("files", "", "comma-separated list of test files") + goVersion = flag.String("lang", "", "Go language version (e.g. \"go1.12\"") ) var fset = token.NewFileSet() @@ -188,7 +189,21 @@ func eliminate(t *testing.T, errmap map[string][]string, errlist []error) { } } -func checkFiles(t *testing.T, filenames []string, srcs [][]byte) { +// goVersionRx matches a Go version string using '_', e.g. "go1_12". +var goVersionRx = regexp.MustCompile(`^go[1-9][0-9]*_(0|[1-9][0-9]*)$`) + +// asGoVersion returns a regular Go language version string +// if s is a Go version string using '_' rather than '.' to +// separate the major and minor version numbers (e.g. "go1_12"). +// Otherwise it returns the empty string. +func asGoVersion(s string) string { + if goVersionRx.MatchString(s) { + return strings.Replace(s, "_", ".", 1) + } + return "" +} + +func checkFiles(t *testing.T, goVersion string, filenames []string, srcs [][]byte) { if len(filenames) == 0 { t.Fatal("no source files") } @@ -201,6 +216,11 @@ func checkFiles(t *testing.T, filenames []string, srcs [][]byte) { pkgName = files[0].Name.Name } + // if no Go version is given, consider the package name + if goVersion == "" { + goVersion = asGoVersion(pkgName) + } + if *listErrors && len(errlist) > 0 { t.Errorf("--- %s:", pkgName) for _, err := range errlist { @@ -210,6 +230,7 @@ func checkFiles(t *testing.T, filenames []string, srcs [][]byte) { // typecheck and collect typechecker errors var conf Config + conf.GoVersion = goVersion // special case for importC.src if len(filenames) == 1 { @@ -267,19 +288,20 @@ func checkFiles(t *testing.T, filenames []string, srcs [][]byte) { } // TestCheck is for manual testing of selected input files, provided with -files. +// The accepted Go language version can be controlled with the -lang flag. func TestCheck(t *testing.T) { if *testFiles == "" { return } testenv.MustHaveGoBuild(t) DefPredeclaredTestFuncs() - testPkg(t, strings.Split(*testFiles, " ")) + testPkg(t, strings.Split(*testFiles, ","), *goVersion) } func TestLongConstants(t *testing.T) { format := "package longconst\n\nconst _ = %s\nconst _ = %s // ERROR excessively long constant" src := fmt.Sprintf(format, strings.Repeat("1", 9999), strings.Repeat("1", 10001)) - checkFiles(t, []string{"longconst.go"}, [][]byte{[]byte(src)}) + checkFiles(t, "", []string{"longconst.go"}, [][]byte{[]byte(src)}) } func TestTestdata(t *testing.T) { DefPredeclaredTestFuncs(); testDir(t, "testdata") } @@ -312,12 +334,12 @@ func testDir(t *testing.T, dir string) { filenames = []string{path} } t.Run(filepath.Base(path), func(t *testing.T) { - testPkg(t, filenames) + testPkg(t, filenames, "") }) } } -func testPkg(t *testing.T, filenames []string) { +func testPkg(t *testing.T, filenames []string, goVersion string) { srcs := make([][]byte, len(filenames)) for i, filename := range filenames { src, err := os.ReadFile(filename) @@ -326,5 +348,5 @@ func testPkg(t *testing.T, filenames []string) { } srcs[i] = src } - checkFiles(t, filenames, srcs) + checkFiles(t, goVersion, filenames, srcs) } diff --git a/src/go/types/expr.go b/src/go/types/expr.go index 7f8aaed411c03..aec3172327877 100644 --- a/src/go/types/expr.go +++ b/src/go/types/expr.go @@ -772,6 +772,10 @@ func (check *Checker) shift(x, y *operand, e ast.Expr, op token.Token) { check.invalidOp(y, _InvalidShiftCount, "shift count %s must be integer", y) x.mode = invalid return + } else if !isUnsigned(y.typ) && !check.allowVersion(check.pkg, 1, 13) { + check.invalidOp(y, _InvalidShiftCount, "signed shift count %s requires go1.13 or later", y) + x.mode = invalid + return } var yval constant.Value @@ -1152,6 +1156,7 @@ func (check *Checker) exprInternal(x *operand, e ast.Expr, hint Type) exprKind { case *ast.BasicLit: switch e.Kind { case token.INT, token.FLOAT, token.IMAG: + check.langCompat(e) // The max. mantissa precision for untyped numeric values // is 512 bits, or 4048 bits for each of the two integer // parts of a fraction for floating-point numbers that are diff --git a/src/go/types/stdlib_test.go b/src/go/types/stdlib_test.go index 71e14b85e5c98..979785de956dd 100644 --- a/src/go/types/stdlib_test.go +++ b/src/go/types/stdlib_test.go @@ -106,6 +106,7 @@ func testTestDir(t *testing.T, path string, ignore ...string) { // get per-file instructions expectErrors := false filename := filepath.Join(path, f.Name()) + goVersion := "" if comment := firstComment(filename); comment != "" { fields := strings.Fields(comment) switch fields[0] { @@ -115,13 +116,17 @@ func testTestDir(t *testing.T, path string, ignore ...string) { expectErrors = true for _, arg := range fields[1:] { if arg == "-0" || arg == "-+" || arg == "-std" { - // Marked explicitly as not expected errors (-0), + // Marked explicitly as not expecting errors (-0), // or marked as compiling runtime/stdlib, which is only done // to trigger runtime/stdlib-only error output. // In both cases, the code should typecheck. expectErrors = false break } + const prefix = "-lang=" + if strings.HasPrefix(arg, prefix) { + goVersion = arg[len(prefix):] + } } } } @@ -129,7 +134,7 @@ func testTestDir(t *testing.T, path string, ignore ...string) { // parse and type-check file file, err := parser.ParseFile(fset, filename, nil, 0) if err == nil { - conf := Config{Importer: stdLibImporter} + conf := Config{GoVersion: goVersion, Importer: stdLibImporter} _, err = conf.Check(filename, fset, []*ast.File{file}, nil) } @@ -180,7 +185,6 @@ func TestStdFixed(t *testing.T) { "issue22200b.go", // go/types does not have constraints on stack size "issue25507.go", // go/types does not have constraints on stack size "issue20780.go", // go/types does not have constraints on stack size - "issue31747.go", // go/types does not have constraints on language level (-lang=go1.12) (see #31793) "issue34329.go", // go/types does not have constraints on language level (-lang=go1.13) (see #31793) "bug251.go", // issue #34333 which was exposed with fix for #34151 "issue42058a.go", // go/types does not have constraints on channel element size diff --git a/src/go/types/testdata/go1_12.src b/src/go/types/testdata/go1_12.src new file mode 100644 index 0000000000000..1e529f18be831 --- /dev/null +++ b/src/go/types/testdata/go1_12.src @@ -0,0 +1,35 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Check Go language version-specific errors. + +package go1_12 // go1.12 + +// numeric literals +const ( + _ = 1_000 // ERROR "underscores in numeric literals requires go1.13 or later" + _ = 0b111 // ERROR "binary literals requires go1.13 or later" + _ = 0o567 // ERROR "0o/0O-style octal literals requires go1.13 or later" + _ = 0xabc // ok + _ = 0x0p1 // ERROR "hexadecimal floating-point literals requires go1.13 or later" + + _ = 0B111 // ERROR "binary" + _ = 0O567 // ERROR "octal" + _ = 0Xabc // ok + _ = 0X0P1 // ERROR "hexadecimal floating-point" + + _ = 1_000i // ERROR "underscores" + _ = 0b111i // ERROR "binary" + _ = 0o567i // ERROR "octal" + _ = 0xabci // ERROR "hexadecimal floating-point" + _ = 0x0p1i // ERROR "hexadecimal floating-point" +) + +// signed shift counts +var ( + s int + _ = 1 << s // ERROR "invalid operation: signed shift count s \(variable of type int\) requires go1.13 or later" + _ = 1 >> s // ERROR "signed shift count" +) + diff --git a/src/go/types/version.go b/src/go/types/version.go new file mode 100644 index 0000000000000..154694169b49d --- /dev/null +++ b/src/go/types/version.go @@ -0,0 +1,82 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types + +import ( + "fmt" + "go/ast" + "go/token" + "regexp" + "strconv" + "strings" +) + +// langCompat reports an error if the representation of a numeric +// literal is not compatible with the current language version. +func (check *Checker) langCompat(lit *ast.BasicLit) { + s := lit.Value + if len(s) <= 2 || check.allowVersion(check.pkg, 1, 13) { + return + } + // len(s) > 2 + if strings.Contains(s, "_") { + check.errorf(lit, _InvalidLit, "underscores in numeric literals requires go1.13 or later") + return + } + if s[0] != '0' { + return + } + radix := s[1] + if radix == 'b' || radix == 'B' { + check.errorf(lit, _InvalidLit, "binary literals requires go1.13 or later") + return + } + if radix == 'o' || radix == 'O' { + check.errorf(lit, _InvalidLit, "0o/0O-style octal literals requires go1.13 or later") + return + } + if lit.Kind != token.INT && (radix == 'x' || radix == 'X') { + check.errorf(lit, _InvalidLit, "hexadecimal floating-point literals requires go1.13 or later") + } +} + +// allowVersion reports whether the given package +// is allowed to use version major.minor. +func (check *Checker) allowVersion(pkg *Package, major, minor int) bool { + // We assume that imported packages have all been checked, + // so we only have to check for the local package. + if pkg != check.pkg { + return true + } + ma, mi := check.version.major, check.version.minor + return ma == 0 && mi == 0 || ma > major || ma == major && mi >= minor +} + +type version struct { + major, minor int +} + +// parseGoVersion parses a Go version string (such as "go1.12") +// and returns the version, or an error. If s is the empty +// string, the version is 0.0. +func parseGoVersion(s string) (v version, err error) { + if s == "" { + return + } + matches := goVersionRx.FindStringSubmatch(s) + if matches == nil { + err = fmt.Errorf(`should be something like "go1.12"`) + return + } + v.major, err = strconv.Atoi(matches[1]) + if err != nil { + return + } + v.minor, err = strconv.Atoi(matches[2]) + return +} + +// goVersionRx matches a Go version string, e.g. "go1.12". +var goVersionRx = regexp.MustCompile(`^go([1-9][0-9]*)\.(0|[1-9][0-9]*)$`) From e0215315f51c62f6d2c5ea5ed7008b7e7963dd5d Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Thu, 22 Oct 2020 16:29:04 +0000 Subject: [PATCH 467/474] [dev.regabi] reflect: support for register ABI on amd64 for reflect.(Value).Call This change adds support for the new register ABI on amd64 to reflect.(Value).Call. If internal/abi's register counts are non-zero, reflect will try to set up arguments in registers on the Call path. Note that because the register ABI becomes ABI0 with zero registers available, this should keep working as it did before. This change does not add any tests for the register ABI case because there's no way to do so at the moment. For #40724. Change-Id: I8aa089a5aa5a31b72e56b3d9388dd3f82203985b Reviewed-on: https://go-review.googlesource.com/c/go/+/272568 Trust: Michael Knyszek Run-TryBot: Michael Knyszek TryBot-Result: Go Bot Reviewed-by: Cherry Zhang Reviewed-by: Than McIntosh --- src/go/build/deps_test.go | 6 +- src/internal/abi/abi.go | 45 +++- src/reflect/abi.go | 403 +++++++++++++++++++++++++++++++++ src/reflect/export_test.go | 12 +- src/reflect/makefunc.go | 10 +- src/reflect/type.go | 61 ++--- src/reflect/value.go | 249 +++++++++++++++----- src/runtime/asm_386.s | 23 +- src/runtime/asm_amd64.s | 100 ++++++-- src/runtime/asm_arm.s | 24 +- src/runtime/asm_arm64.s | 23 +- src/runtime/asm_mips64x.s | 23 +- src/runtime/asm_mipsx.s | 23 +- src/runtime/asm_ppc64x.s | 23 +- src/runtime/asm_riscv64.s | 27 +-- src/runtime/asm_s390x.s | 23 +- src/runtime/asm_wasm.s | 23 +- src/runtime/mbarrier.go | 10 +- src/runtime/mfinal.go | 7 +- src/runtime/panic.go | 13 +- src/runtime/stubs.go | 56 ++++- src/runtime/syscall_windows.go | 7 +- 22 files changed, 950 insertions(+), 241 deletions(-) create mode 100644 src/reflect/abi.go diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go index 3fea5ecf0d375..e5c849e8f53ea 100644 --- a/src/go/build/deps_test.go +++ b/src/go/build/deps_test.go @@ -71,11 +71,15 @@ var depsRules = ` # No dependencies allowed for any of these packages. NONE < container/list, container/ring, - internal/abi, internal/cfg, internal/cpu, + internal/cfg, internal/cpu, internal/goversion, internal/nettrace, unicode/utf8, unicode/utf16, unicode, unsafe; + # These packages depend only on unsafe. + unsafe + < internal/abi; + # RUNTIME is the core runtime group of packages, all of them very light-weight. internal/abi, internal/cpu, unsafe < internal/bytealg diff --git a/src/internal/abi/abi.go b/src/internal/abi/abi.go index 07ea51df8f614..6700facc04300 100644 --- a/src/internal/abi/abi.go +++ b/src/internal/abi/abi.go @@ -4,9 +4,50 @@ package abi +import "unsafe" + // RegArgs is a struct that has space for each argument // and return value register on the current architecture. +// +// Assembly code knows the layout of the first two fields +// of RegArgs. +// +// RegArgs also contains additional space to hold pointers +// when it may not be safe to keep them only in the integer +// register space otherwise. type RegArgs struct { - Ints [IntArgRegs]uintptr - Floats [FloatArgRegs]uint64 + Ints [IntArgRegs]uintptr // untyped integer registers + Floats [FloatArgRegs]uint64 // untyped float registers + + // Fields above this point are known to assembly. + + // Ptrs is a space that duplicates Ints but with pointer type, + // used to make pointers passed or returned in registers + // visible to the GC by making the type unsafe.Pointer. + Ptrs [IntArgRegs]unsafe.Pointer + + // ReturnIsPtr is a bitmap that indicates which registers + // contain or will contain pointers on the return path from + // a reflectcall. The i'th bit indicates whether the i'th + // register contains or will contain a valid Go pointer. + ReturnIsPtr IntArgRegBitmap +} + +// IntArgRegBitmap is a bitmap large enough to hold one bit per +// integer argument/return register. +type IntArgRegBitmap [(IntArgRegs + 7) / 8]uint8 + +// Set sets the i'th bit of the bitmap to 1. +func (b *IntArgRegBitmap) Set(i int) { + b[i/8] |= uint8(1) << (i % 8) +} + +// Get returns whether the i'th bit of the bitmap is set. +// +// nosplit because it's called in extremely sensitive contexts, like +// on the reflectcall return path. +// +//go:nosplit +func (b *IntArgRegBitmap) Get(i int) bool { + return b[i/8]&(uint8(1)<<(i%8)) != 0 } diff --git a/src/reflect/abi.go b/src/reflect/abi.go new file mode 100644 index 0000000000000..88af212717d23 --- /dev/null +++ b/src/reflect/abi.go @@ -0,0 +1,403 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package reflect + +import ( + "internal/abi" + "unsafe" +) + +// abiStep represents an ABI "instruction." Each instruction +// describes one part of how to translate between a Go value +// in memory and a call frame. +type abiStep struct { + kind abiStepKind + + // offset and size together describe a part of a Go value + // in memory. + offset uintptr + size uintptr // size in bytes of the part + + // These fields describe the ABI side of the translation. + stkOff uintptr // stack offset, used if kind == abiStepStack + ireg int // integer register index, used if kind == abiStepIntReg or kind == abiStepPointer + freg int // FP register index, used if kind == abiStepFloatReg +} + +// abiStepKind is the "op-code" for an abiStep instruction. +type abiStepKind int + +const ( + abiStepBad abiStepKind = iota + abiStepStack // copy to/from stack + abiStepIntReg // copy to/from integer register + abiStepPointer // copy pointer to/from integer register + abiStepFloatReg // copy to/from FP register +) + +// abiSeq represents a sequence of ABI instructions for copying +// from a series of reflect.Values to a call frame (for call arguments) +// or vice-versa (for call results). +// +// An abiSeq should be populated by calling its addArg method. +type abiSeq struct { + // steps is the set of instructions. + // + // The instructions are grouped together by whole arguments, + // with the starting index for the instructions + // of the i'th Go value available in valueStart. + // + // For instance, if this abiSeq represents 3 arguments + // passed to a function, then the 2nd argument's steps + // begin at steps[valueStart[1]]. + // + // Because reflect accepts Go arguments in distinct + // Values and each Value is stored separately, each abiStep + // that begins a new argument will have its offset + // field == 0. + steps []abiStep + valueStart []int + + stackBytes uintptr // stack space used + iregs, fregs int // registers used +} + +func (a *abiSeq) dump() { + for i, p := range a.steps { + println("part", i, p.kind, p.offset, p.size, p.stkOff, p.ireg, p.freg) + } + print("values ") + for _, i := range a.valueStart { + print(i, " ") + } + println() + println("stack", a.stackBytes) + println("iregs", a.iregs) + println("fregs", a.fregs) +} + +// stepsForValue returns the ABI instructions for translating +// the i'th Go argument or return value represented by this +// abiSeq to the Go ABI. +func (a *abiSeq) stepsForValue(i int) []abiStep { + s := a.valueStart[i] + var e int + if i == len(a.valueStart)-1 { + e = len(a.steps) + } else { + e = a.valueStart[i+1] + } + return a.steps[s:e] +} + +// addArg extends the abiSeq with a new Go value of type t. +// +// If the value was stack-assigned, returns the single +// abiStep describing that translation, and nil otherwise. +func (a *abiSeq) addArg(t *rtype) *abiStep { + pStart := len(a.steps) + a.valueStart = append(a.valueStart, pStart) + if !a.regAssign(t, 0) { + a.steps = a.steps[:pStart] + a.stackAssign(t.size, uintptr(t.align)) + return &a.steps[len(a.steps)-1] + } + return nil +} + +// addRcvr extends the abiSeq with a new method call +// receiver according to the interface calling convention. +// +// If the receiver was stack-assigned, returns the single +// abiStep describing that translation, and nil otherwise. +// Returns true if the receiver is a pointer. +func (a *abiSeq) addRcvr(rcvr *rtype) (*abiStep, bool) { + // The receiver is always one word. + a.valueStart = append(a.valueStart, len(a.steps)) + var ok, ptr bool + if ifaceIndir(rcvr) || rcvr.pointers() { + ok = a.assignIntN(0, ptrSize, 1, 0b1) + ptr = true + } else { + // TODO(mknyszek): Is this case even possible? + // The interface data work never contains a non-pointer + // value. This case was copied over from older code + // in the reflect package which only conditionally added + // a pointer bit to the reflect.(Value).Call stack frame's + // GC bitmap. + ok = a.assignIntN(0, ptrSize, 1, 0b0) + ptr = false + } + if !ok { + a.stackAssign(ptrSize, ptrSize) + return &a.steps[len(a.steps)-1], ptr + } + return nil, ptr +} + +// regAssign attempts to reserve argument registers for a value of +// type t, stored at some offset. +// +// It returns whether or not the assignment succeeded, but +// leaves any changes it made to a.steps behind, so the caller +// must undo that work by adjusting a.steps if it fails. +// +// This method along with the assign* methods represent the +// complete register-assignment algorithm for the Go ABI. +func (a *abiSeq) regAssign(t *rtype, offset uintptr) bool { + switch t.Kind() { + case UnsafePointer, Ptr, Chan, Map, Func: + return a.assignIntN(offset, t.size, 1, 0b1) + case Bool, Int, Uint, Int8, Uint8, Int16, Uint16, Int32, Uint32, Uintptr: + return a.assignIntN(offset, t.size, 1, 0b0) + case Int64, Uint64: + switch ptrSize { + case 4: + return a.assignIntN(offset, 4, 2, 0b0) + case 8: + return a.assignIntN(offset, 8, 1, 0b0) + } + case Float32, Float64: + return a.assignFloatN(offset, t.size, 1) + case Complex64: + return a.assignFloatN(offset, 4, 2) + case Complex128: + return a.assignFloatN(offset, 8, 2) + case String: + return a.assignIntN(offset, ptrSize, 2, 0b01) + case Interface: + return a.assignIntN(offset, ptrSize, 2, 0b10) + case Slice: + return a.assignIntN(offset, ptrSize, 3, 0b001) + case Array: + tt := (*arrayType)(unsafe.Pointer(t)) + switch tt.len { + case 0: + // There's nothing to assign, so don't modify + // a.steps but succeed so the caller doesn't + // try to stack-assign this value. + return true + case 1: + return a.regAssign(tt.elem, offset) + default: + return false + } + case Struct: + if t.size == 0 { + // There's nothing to assign, so don't modify + // a.steps but succeed so the caller doesn't + // try to stack-assign this value. + return true + } + st := (*structType)(unsafe.Pointer(t)) + for i := range st.fields { + f := &st.fields[i] + if f.typ.Size() == 0 { + // Ignore zero-sized fields. + continue + } + if !a.regAssign(f.typ, offset+f.offset()) { + return false + } + } + return true + default: + print("t.Kind == ", t.Kind(), "\n") + panic("unknown type kind") + } + panic("unhandled register assignment path") +} + +// assignIntN assigns n values to registers, each "size" bytes large, +// from the data at [offset, offset+n*size) in memory. Each value at +// [offset+i*size, offset+(i+1)*size) for i < n is assigned to the +// next n integer registers. +// +// Bit i in ptrMap indicates whether the i'th value is a pointer. +// n must be <= 8. +// +// Returns whether assignment succeeded. +func (a *abiSeq) assignIntN(offset, size uintptr, n int, ptrMap uint8) bool { + if n > 8 || n < 0 { + panic("invalid n") + } + if ptrMap != 0 && size != ptrSize { + panic("non-empty pointer map passed for non-pointer-size values") + } + if a.iregs+n > abi.IntArgRegs { + return false + } + for i := 0; i < n; i++ { + kind := abiStepIntReg + if ptrMap&(uint8(1)< abi.FloatArgRegs || abi.EffectiveFloatRegSize < size { + return false + } + for i := 0; i < n; i++ { + a.steps = append(a.steps, abiStep{ + kind: abiStepFloatReg, + offset: offset + uintptr(i)*size, + size: size, + freg: a.fregs, + }) + a.fregs++ + } + return true +} + +// stackAssign reserves space for one value that is "size" bytes +// large with alignment "alignment" to the stack. +// +// Should not be called directly; use addArg instead. +func (a *abiSeq) stackAssign(size, alignment uintptr) { + a.stackBytes = align(a.stackBytes, alignment) + a.steps = append(a.steps, abiStep{ + kind: abiStepStack, + offset: 0, // Only used for whole arguments, so the memory offset is 0. + size: size, + stkOff: a.stackBytes, + }) + a.stackBytes += size +} + +// abiDesc describes the ABI for a function or method. +type abiDesc struct { + // call and ret represent the translation steps for + // the call and return paths of a Go function. + call, ret abiSeq + + // These fields describe the stack space allocated + // for the call. stackCallArgsSize is the amount of space + // reserved for arguments but not return values. retOffset + // is the offset at which return values begin, and + // spill is the size in bytes of additional space reserved + // to spill argument registers into in case of preemption in + // reflectcall's stack frame. + stackCallArgsSize, retOffset, spill uintptr + + // stackPtrs is a bitmap that indicates whether + // each word in the ABI stack space (stack-assigned + // args + return values) is a pointer. Used + // as the heap pointer bitmap for stack space + // passed to reflectcall. + stackPtrs *bitVector + + // outRegPtrs is a bitmap whose i'th bit indicates + // whether the i'th integer result register contains + // a pointer. Used by reflectcall to make result + // pointers visible to the GC. + outRegPtrs abi.IntArgRegBitmap +} + +func (a *abiDesc) dump() { + println("ABI") + println("call") + a.call.dump() + println("ret") + a.ret.dump() + println("stackCallArgsSize", a.stackCallArgsSize) + println("retOffset", a.retOffset) + println("spill", a.spill) +} + +func newAbiDesc(t *funcType, rcvr *rtype) abiDesc { + // We need to add space for this argument to + // the frame so that it can spill args into it. + // + // The size of this space is just the sum of the sizes + // of each register-allocated type. + // + // TODO(mknyszek): Remove this when we no longer have + // caller reserved spill space. + spillInt := uintptr(0) + spillFloat := uintptr(0) + + // Compute gc program & stack bitmap for stack arguments + stackPtrs := new(bitVector) + + // Compute abiSeq for input parameters. + var in abiSeq + if rcvr != nil { + stkStep, isPtr := in.addRcvr(rcvr) + if stkStep != nil { + if isPtr { + stackPtrs.append(1) + } else { + stackPtrs.append(0) + } + } else { + spillInt += ptrSize + } + } + for _, arg := range t.in() { + i, f := in.iregs, in.fregs + stkStep := in.addArg(arg) + if stkStep != nil { + addTypeBits(stackPtrs, stkStep.stkOff, arg) + } else { + i, f = in.iregs-i, in.fregs-f + spillInt += uintptr(i) * ptrSize + spillFloat += uintptr(f) * abi.EffectiveFloatRegSize + } + } + spill := align(spillInt+spillFloat, ptrSize) + + // From the input parameters alone, we now know + // the stackCallArgsSize and retOffset. + stackCallArgsSize := in.stackBytes + retOffset := align(in.stackBytes, ptrSize) + + // Compute the stack frame pointer bitmap and register + // pointer bitmap for return values. + outRegPtrs := abi.IntArgRegBitmap{} + + // Compute abiSeq for output parameters. + var out abiSeq + // Stack-assigned return values do not share + // space with arguments like they do with registers, + // so we need to inject a stack offset here. + // Fake it by artifically extending stackBytes by + // the return offset. + out.stackBytes = retOffset + for i, res := range t.out() { + stkStep := out.addArg(res) + if stkStep != nil { + addTypeBits(stackPtrs, stkStep.stkOff, res) + } else { + for _, st := range out.stepsForValue(i) { + if st.kind == abiStepPointer { + outRegPtrs.Set(st.ireg) + } + } + } + } + // Undo the faking from earlier so that stackBytes + // is accurate. + out.stackBytes -= retOffset + return abiDesc{in, out, stackCallArgsSize, retOffset, spill, stackPtrs, outRegPtrs} +} diff --git a/src/reflect/export_test.go b/src/reflect/export_test.go index de426b58a8a2c..ddcfca9dee45c 100644 --- a/src/reflect/export_test.go +++ b/src/reflect/export_test.go @@ -23,15 +23,17 @@ const PtrSize = ptrSize func FuncLayout(t Type, rcvr Type) (frametype Type, argSize, retOffset uintptr, stack []byte, gc []byte, ptrs bool) { var ft *rtype - var s *bitVector + var abi abiDesc if rcvr != nil { - ft, argSize, retOffset, s, _ = funcLayout((*funcType)(unsafe.Pointer(t.(*rtype))), rcvr.(*rtype)) + ft, _, abi = funcLayout((*funcType)(unsafe.Pointer(t.(*rtype))), rcvr.(*rtype)) } else { - ft, argSize, retOffset, s, _ = funcLayout((*funcType)(unsafe.Pointer(t.(*rtype))), nil) + ft, _, abi = funcLayout((*funcType)(unsafe.Pointer(t.(*rtype))), nil) } + argSize = abi.stackCallArgsSize + retOffset = abi.retOffset frametype = ft - for i := uint32(0); i < s.n; i++ { - stack = append(stack, s.data[i/8]>>(i%8)&1) + for i := uint32(0); i < abi.stackPtrs.n; i++ { + stack = append(stack, abi.stackPtrs.data[i/8]>>(i%8)&1) } if ft.kind&kindGCProg != 0 { panic("can't handle gc programs") diff --git a/src/reflect/makefunc.go b/src/reflect/makefunc.go index 67dc4859b9736..e17d4ea758c3c 100644 --- a/src/reflect/makefunc.go +++ b/src/reflect/makefunc.go @@ -60,9 +60,9 @@ func MakeFunc(typ Type, fn func(args []Value) (results []Value)) Value { code := **(**uintptr)(unsafe.Pointer(&dummy)) // makeFuncImpl contains a stack map for use by the runtime - _, argLen, _, stack, _ := funcLayout(ftyp, nil) + _, _, abi := funcLayout(ftyp, nil) - impl := &makeFuncImpl{code: code, stack: stack, argLen: argLen, ftyp: ftyp, fn: fn} + impl := &makeFuncImpl{code: code, stack: abi.stackPtrs, argLen: abi.stackCallArgsSize, ftyp: ftyp, fn: fn} return Value{t, unsafe.Pointer(impl), flag(Func)} } @@ -112,12 +112,12 @@ func makeMethodValue(op string, v Value) Value { code := **(**uintptr)(unsafe.Pointer(&dummy)) // methodValue contains a stack map for use by the runtime - _, argLen, _, stack, _ := funcLayout(ftyp, nil) + _, _, abi := funcLayout(ftyp, nil) fv := &methodValue{ fn: code, - stack: stack, - argLen: argLen, + stack: abi.stackPtrs, + argLen: abi.stackCallArgsSize, method: int(v.flag) >> flagMethodShift, rcvr: rcvr, } diff --git a/src/reflect/type.go b/src/reflect/type.go index d323828c74eda..d52816628f0d1 100644 --- a/src/reflect/type.go +++ b/src/reflect/type.go @@ -2984,21 +2984,20 @@ type layoutKey struct { type layoutType struct { t *rtype - argSize uintptr // size of arguments - retOffset uintptr // offset of return values. - stack *bitVector framePool *sync.Pool + abi abiDesc } var layoutCache sync.Map // map[layoutKey]layoutType // funcLayout computes a struct type representing the layout of the -// function arguments and return values for the function type t. +// stack-assigned function arguments and return values for the function +// type t. // If rcvr != nil, rcvr specifies the type of the receiver. // The returned type exists only for GC, so we only fill out GC relevant info. // Currently, that's just size and the GC program. We also fill in // the name for possible debugging use. -func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, argSize, retOffset uintptr, stk *bitVector, framePool *sync.Pool) { +func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, framePool *sync.Pool, abi abiDesc) { if t.Kind() != Func { panic("reflect: funcLayout of non-func type " + t.String()) } @@ -3008,46 +3007,24 @@ func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, argSize, retOffset k := layoutKey{t, rcvr} if lti, ok := layoutCache.Load(k); ok { lt := lti.(layoutType) - return lt.t, lt.argSize, lt.retOffset, lt.stack, lt.framePool + return lt.t, lt.framePool, lt.abi } - // compute gc program & stack bitmap for arguments - ptrmap := new(bitVector) - var offset uintptr - if rcvr != nil { - // Reflect uses the "interface" calling convention for - // methods, where receivers take one word of argument - // space no matter how big they actually are. - if ifaceIndir(rcvr) || rcvr.pointers() { - ptrmap.append(1) - } else { - ptrmap.append(0) - } - offset += ptrSize - } - for _, arg := range t.in() { - offset += -offset & uintptr(arg.align-1) - addTypeBits(ptrmap, offset, arg) - offset += arg.size - } - argSize = offset - offset += -offset & (ptrSize - 1) - retOffset = offset - for _, res := range t.out() { - offset += -offset & uintptr(res.align-1) - addTypeBits(ptrmap, offset, res) - offset += res.size - } - offset += -offset & (ptrSize - 1) + // Compute the ABI layout. + abi = newAbiDesc(t, rcvr) // build dummy rtype holding gc program x := &rtype{ - align: ptrSize, - size: offset, - ptrdata: uintptr(ptrmap.n) * ptrSize, + align: ptrSize, + // Don't add spill space here; it's only necessary in + // reflectcall's frame, not in the allocated frame. + // TODO(mknyszek): Remove this comment when register + // spill space in the frame is no longer required. + size: align(abi.retOffset+abi.ret.stackBytes, ptrSize), + ptrdata: uintptr(abi.stackPtrs.n) * ptrSize, } - if ptrmap.n > 0 { - x.gcdata = &ptrmap.data[0] + if abi.stackPtrs.n > 0 { + x.gcdata = &abi.stackPtrs.data[0] } var s string @@ -3064,13 +3041,11 @@ func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, argSize, retOffset }} lti, _ := layoutCache.LoadOrStore(k, layoutType{ t: x, - argSize: argSize, - retOffset: retOffset, - stack: ptrmap, framePool: framePool, + abi: abi, }) lt := lti.(layoutType) - return lt.t, lt.argSize, lt.retOffset, lt.stack, lt.framePool + return lt.t, lt.framePool, lt.abi } // ifaceIndir reports whether t is stored indirectly in an interface value. diff --git a/src/reflect/value.go b/src/reflect/value.go index 1f185b52e49e3..eae1b9bf294b7 100644 --- a/src/reflect/value.go +++ b/src/reflect/value.go @@ -5,6 +5,7 @@ package reflect import ( + "internal/abi" "internal/unsafeheader" "math" "runtime" @@ -352,6 +353,8 @@ func (v Value) CallSlice(in []Value) []Value { var callGC bool // for testing; see TestCallMethodJump +const debugReflectCall = false + func (v Value) call(op string, in []Value) []Value { // Get function pointer, type. t := (*funcType)(unsafe.Pointer(v.typ)) @@ -430,50 +433,112 @@ func (v Value) call(op string, in []Value) []Value { } nout := t.NumOut() + // Register argument space. + var regArgs abi.RegArgs + // Compute frame type. - frametype, _, retOffset, _, framePool := funcLayout(t, rcvrtype) + frametype, framePool, abi := funcLayout(t, rcvrtype) - // Allocate a chunk of memory for frame. - var args unsafe.Pointer - if nout == 0 { - args = framePool.Get().(unsafe.Pointer) - } else { - // Can't use pool if the function has return values. - // We will leak pointer to args in ret, so its lifetime is not scoped. - args = unsafe_New(frametype) + // Allocate a chunk of memory for frame if needed. + var stackArgs unsafe.Pointer + if frametype.size != 0 { + if nout == 0 { + stackArgs = framePool.Get().(unsafe.Pointer) + } else { + // Can't use pool if the function has return values. + // We will leak pointer to args in ret, so its lifetime is not scoped. + stackArgs = unsafe_New(frametype) + } + } + frameSize := frametype.size + + if debugReflectCall { + println("reflect.call", t.String()) + abi.dump() } - off := uintptr(0) // Copy inputs into args. + + // Handle receiver. + inStart := 0 if rcvrtype != nil { - storeRcvr(rcvr, args) - off = ptrSize + // Guaranteed to only be one word in size, + // so it will only take up exactly 1 abiStep (either + // in a register or on the stack). + switch st := abi.call.steps[0]; st.kind { + case abiStepStack: + storeRcvr(rcvr, stackArgs) + case abiStepIntReg, abiStepPointer: + // Even pointers can go into the uintptr slot because + // they'll be kept alive by the Values referenced by + // this frame. Reflection forces these to be heap-allocated, + // so we don't need to worry about stack copying. + storeRcvr(rcvr, unsafe.Pointer(®Args.Ints[st.ireg])) + case abiStepFloatReg: + storeRcvr(rcvr, unsafe.Pointer(®Args.Floats[st.freg])) + default: + panic("unknown ABI parameter kind") + } + inStart = 1 } + + // Handle arguments. for i, v := range in { v.mustBeExported() targ := t.In(i).(*rtype) - a := uintptr(targ.align) - off = (off + a - 1) &^ (a - 1) - n := targ.size - if n == 0 { - // Not safe to compute args+off pointing at 0 bytes, - // because that might point beyond the end of the frame, - // but we still need to call assignTo to check assignability. - v.assignTo("reflect.Value.Call", targ, nil) - continue - } - addr := add(args, off, "n > 0") - v = v.assignTo("reflect.Value.Call", targ, addr) - if v.flag&flagIndir != 0 { - typedmemmove(targ, addr, v.ptr) - } else { - *(*unsafe.Pointer)(addr) = v.ptr + // TODO(mknyszek): Figure out if it's possible to get some + // scratch space for this assignment check. Previously, it + // was possible to use space in the argument frame. + v = v.assignTo("reflect.Value.Call", targ, nil) + stepsLoop: + for _, st := range abi.call.stepsForValue(i + inStart) { + switch st.kind { + case abiStepStack: + // Copy values to the "stack." + addr := add(stackArgs, st.stkOff, "precomputed stack arg offset") + if v.flag&flagIndir != 0 { + typedmemmove(targ, addr, v.ptr) + } else { + *(*unsafe.Pointer)(addr) = v.ptr + } + // There's only one step for a stack-allocated value. + break stepsLoop + case abiStepIntReg, abiStepPointer: + // Copy values to "integer registers." + if v.flag&flagIndir != 0 { + offset := add(v.ptr, st.offset, "precomputed value offset") + memmove(unsafe.Pointer(®Args.Ints[st.ireg]), offset, st.size) + } else { + if st.kind == abiStepPointer { + // Duplicate this pointer in the pointer area of the + // register space. Otherwise, there's the potential for + // this to be the last reference to v.ptr. + regArgs.Ptrs[st.ireg] = v.ptr + } + regArgs.Ints[st.ireg] = uintptr(v.ptr) + } + case abiStepFloatReg: + // Copy values to "float registers." + if v.flag&flagIndir == 0 { + panic("attempted to copy pointer to FP register") + } + offset := add(v.ptr, st.offset, "precomputed value offset") + memmove(unsafe.Pointer(®Args.Floats[st.freg]), offset, st.size) + default: + panic("unknown ABI part kind") + } } - off += n } + // TODO(mknyszek): Remove this when we no longer have + // caller reserved spill space. + frameSize = align(frameSize, ptrSize) + frameSize += abi.spill + + // Mark pointers in registers for the return path. + regArgs.ReturnIsPtr = abi.outRegPtrs // Call. - call(frametype, fn, args, uint32(frametype.size), uint32(retOffset)) + call(frametype, fn, stackArgs, uint32(frametype.size), uint32(abi.retOffset), uint32(frameSize), ®Args) // For testing; see TestCallMethodJump. if callGC { @@ -482,34 +547,82 @@ func (v Value) call(op string, in []Value) []Value { var ret []Value if nout == 0 { - typedmemclr(frametype, args) - framePool.Put(args) + if stackArgs != nil { + typedmemclr(frametype, stackArgs) + framePool.Put(stackArgs) + } } else { - // Zero the now unused input area of args, - // because the Values returned by this function contain pointers to the args object, - // and will thus keep the args object alive indefinitely. - typedmemclrpartial(frametype, args, 0, retOffset) + if stackArgs != nil { + // Zero the now unused input area of args, + // because the Values returned by this function contain pointers to the args object, + // and will thus keep the args object alive indefinitely. + typedmemclrpartial(frametype, stackArgs, 0, abi.retOffset) + } // Wrap Values around return values in args. ret = make([]Value, nout) - off = retOffset for i := 0; i < nout; i++ { tv := t.Out(i) - a := uintptr(tv.Align()) - off = (off + a - 1) &^ (a - 1) - if tv.Size() != 0 { + if tv.Size() == 0 { + // For zero-sized return value, args+off may point to the next object. + // In this case, return the zero value instead. + ret[i] = Zero(tv) + continue + } + steps := abi.ret.stepsForValue(i) + if st := steps[0]; st.kind == abiStepStack { + // This value is on the stack. If part of a value is stack + // allocated, the entire value is according to the ABI. So + // just make an indirection into the allocated frame. fl := flagIndir | flag(tv.Kind()) - ret[i] = Value{tv.common(), add(args, off, "tv.Size() != 0"), fl} + ret[i] = Value{tv.common(), add(stackArgs, st.stkOff, "tv.Size() != 0"), fl} // Note: this does introduce false sharing between results - // if any result is live, they are all live. // (And the space for the args is live as well, but as we've // cleared that space it isn't as big a deal.) - } else { - // For zero-sized return value, args+off may point to the next object. - // In this case, return the zero value instead. - ret[i] = Zero(tv) + continue + } + + // Handle pointers passed in registers. + if !ifaceIndir(tv.common()) { + // Pointer-valued data gets put directly + // into v.ptr. + if steps[0].kind != abiStepPointer { + print("kind=", steps[0].kind, ", type=", tv.String(), "\n") + panic("mismatch between ABI description and types") + } + ret[i] = Value{tv.common(), regArgs.Ptrs[steps[0].ireg], flag(t.Kind())} + continue + } + + // All that's left is values passed in registers that we need to + // create space for and copy values back into. + // + // TODO(mknyszek): We make a new allocation for each register-allocated + // value, but previously we could always point into the heap-allocated + // stack frame. This is a regression that could be fixed by adding + // additional space to the allocated stack frame and storing the + // register-allocated return values into the allocated stack frame and + // referring there in the resulting Value. + s := unsafe_New(tv.common()) + for _, st := range steps { + switch st.kind { + case abiStepIntReg: + offset := add(s, st.offset, "precomputed value offset") + memmove(offset, unsafe.Pointer(®Args.Ints[st.ireg]), st.size) + case abiStepPointer: + s := add(s, st.offset, "precomputed value offset") + *((*unsafe.Pointer)(s)) = regArgs.Ptrs[st.ireg] + case abiStepFloatReg: + offset := add(s, st.offset, "precomputed value offset") + memmove(offset, unsafe.Pointer(®Args.Floats[st.freg]), st.size) + case abiStepStack: + panic("register-based return value has stack component") + default: + panic("unknown ABI part kind") + } } - off += tv.Size() + ret[i] = Value{tv.common(), s, flagIndir | flag(tv.Kind())} } } @@ -709,7 +822,8 @@ func align(x, n uintptr) uintptr { func callMethod(ctxt *methodValue, frame unsafe.Pointer, retValid *bool) { rcvr := ctxt.rcvr rcvrtype, t, fn := methodReceiver("call", rcvr, ctxt.method) - frametype, argSize, retOffset, _, framePool := funcLayout(t, rcvrtype) + frametype, framePool, abid := funcLayout(t, rcvrtype) + argSize, retOffset := abid.stackCallArgsSize, abid.retOffset // Make a new frame that is one word bigger so we can store the receiver. // This space is used for both arguments and return values. @@ -727,10 +841,19 @@ func callMethod(ctxt *methodValue, frame unsafe.Pointer, retValid *bool) { typedmemmovepartial(frametype, add(scratch, argOffset, "argSize > argOffset"), frame, argOffset, argSize-argOffset) } + frameSize := frametype.size + // TODO(mknyszek): Remove this when we no longer have + // caller reserved spill space. + frameSize = align(frameSize, ptrSize) + frameSize += abid.spill + // Call. // Call copies the arguments from scratch to the stack, calls fn, // and then copies the results back into scratch. - call(frametype, fn, scratch, uint32(frametype.size), uint32(retOffset)) + // + // TODO(mknyszek): Have this actually support the register-based ABI. + var regs abi.RegArgs + call(frametype, fn, scratch, uint32(frametype.size), uint32(retOffset), uint32(frameSize), ®s) // Copy return values. // Ignore any changes to args and just copy return values. @@ -2802,14 +2925,32 @@ func mapiternext(it unsafe.Pointer) //go:noescape func maplen(m unsafe.Pointer) int -// call calls fn with a copy of the n argument bytes pointed at by arg. -// After fn returns, reflectcall copies n-retoffset result bytes -// back into arg+retoffset before returning. If copying result bytes back, -// the caller must pass the argument frame type as argtype, so that -// call can execute appropriate write barriers during the copy. +// call calls fn with "stackArgsSize" bytes of stack arguments laid out +// at stackArgs and register arguments laid out in regArgs. frameSize is +// the total amount of stack space that will be reserved by call, so this +// should include enough space to spill register arguments to the stack in +// case of preemption. +// +// After fn returns, call copies stackArgsSize-stackRetOffset result bytes +// back into stackArgs+stackRetOffset before returning, for any return +// values passed on the stack. Register-based return values will be found +// in the same regArgs structure. +// +// regArgs must also be prepared with an appropriate ReturnIsPtr bitmap +// indicating which registers will contain pointer-valued return values. The +// purpose of this bitmap is to keep pointers visible to the GC between +// returning from reflectcall and actually using them. // +// If copying result bytes back from the stack, the caller must pass the +// argument frame type as stackArgsType, so that call can execute appropriate +// write barriers during the copy. +// +// Arguments passed through to call do not escape. The type is used only in a +// very limited callee of call, the stackArgs are copied, and regArgs is only +// used in the call frame. +//go:noescape //go:linkname call runtime.reflectcall -func call(argtype *rtype, fn, arg unsafe.Pointer, n uint32, retoffset uint32) +func call(stackArgsType *rtype, f, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs) func ifaceE2I(t *rtype, src interface{}, dst unsafe.Pointer) diff --git a/src/runtime/asm_386.s b/src/runtime/asm_386.s index 429f3fef82916..471451df282c0 100644 --- a/src/runtime/asm_386.s +++ b/src/runtime/asm_386.s @@ -458,7 +458,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0-0 JMP runtime·morestack(SB) // reflectcall: call a function with the given argument list -// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32). +// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs). // we don't have variable-sized frames, so we use a small number // of constant-sized-frame functions to encode a few bits of size in the pc. // Caution: ugly multiline assembly macros in your future! @@ -470,8 +470,8 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0-0 JMP AX // Note: can't just "JMP NAME(SB)" - bad inlining results. -TEXT ·reflectcall(SB), NOSPLIT, $0-20 - MOVL argsize+12(FP), CX +TEXT ·reflectcall(SB), NOSPLIT, $0-28 + MOVL frameSize+20(FP), CX DISPATCH(runtime·call16, 16) DISPATCH(runtime·call32, 32) DISPATCH(runtime·call64, 64) @@ -503,11 +503,11 @@ TEXT ·reflectcall(SB), NOSPLIT, $0-20 JMP AX #define CALLFN(NAME,MAXSIZE) \ -TEXT NAME(SB), WRAPPER, $MAXSIZE-20; \ +TEXT NAME(SB), WRAPPER, $MAXSIZE-28; \ NO_LOCAL_POINTERS; \ /* copy arguments to stack */ \ - MOVL argptr+8(FP), SI; \ - MOVL argsize+12(FP), CX; \ + MOVL stackArgs+8(FP), SI; \ + MOVL stackArgsSize+12(FP), CX; \ MOVL SP, DI; \ REP;MOVSB; \ /* call function */ \ @@ -516,10 +516,10 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-20; \ PCDATA $PCDATA_StackMapIndex, $0; \ CALL AX; \ /* copy return values back */ \ - MOVL argtype+0(FP), DX; \ - MOVL argptr+8(FP), DI; \ - MOVL argsize+12(FP), CX; \ - MOVL retoffset+16(FP), BX; \ + MOVL stackArgsType+0(FP), DX; \ + MOVL stackArgs+8(FP), DI; \ + MOVL stackArgsSize+12(FP), CX; \ + MOVL stackRetOffset+16(FP), BX; \ MOVL SP, SI; \ ADDL BX, DI; \ ADDL BX, SI; \ @@ -531,11 +531,12 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-20; \ // separate function so it can allocate stack space for the arguments // to reflectcallmove. It does not follow the Go ABI; it expects its // arguments in registers. -TEXT callRet<>(SB), NOSPLIT, $16-0 +TEXT callRet<>(SB), NOSPLIT, $20-0 MOVL DX, 0(SP) MOVL DI, 4(SP) MOVL SI, 8(SP) MOVL CX, 12(SP) + MOVL $0, 16(SP) CALL runtime·reflectcallmove(SB) RET diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s index 93280eee4a6c5..5e1ed9b2ad8e8 100644 --- a/src/runtime/asm_amd64.s +++ b/src/runtime/asm_amd64.s @@ -445,8 +445,74 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0 MOVL $0, DX JMP runtime·morestack(SB) +#ifdef GOEXPERIMENT_REGABI +// spillArgs stores return values from registers to a *internal/abi.RegArgs in R12. +TEXT spillArgs<>(SB),NOSPLIT,$0-0 + MOVQ AX, 0(R12) + MOVQ BX, 8(R12) + MOVQ CX, 16(R12) + MOVQ DI, 24(R12) + MOVQ SI, 32(R12) + MOVQ R8, 40(R12) + MOVQ R9, 48(R12) + MOVQ R10, 56(R12) + MOVQ R11, 64(R12) + MOVQ X0, 72(R12) + MOVQ X1, 80(R12) + MOVQ X2, 88(R12) + MOVQ X3, 96(R12) + MOVQ X4, 104(R12) + MOVQ X5, 112(R12) + MOVQ X6, 120(R12) + MOVQ X7, 128(R12) + MOVQ X8, 136(R12) + MOVQ X9, 144(R12) + MOVQ X10, 152(R12) + MOVQ X11, 160(R12) + MOVQ X12, 168(R12) + MOVQ X13, 176(R12) + MOVQ X14, 184(R12) + RET + +// unspillArgs loads args into registers from a *internal/abi.RegArgs in R12. +TEXT unspillArgs<>(SB),NOSPLIT,$0-0 + MOVQ 0(R12), AX + MOVQ 8(R12), BX + MOVQ 16(R12), CX + MOVQ 24(R12), DI + MOVQ 32(R12), SI + MOVQ 40(R12), R8 + MOVQ 48(R12), R9 + MOVQ 56(R12), R10 + MOVQ 64(R12), R11 + MOVQ 72(R12), X0 + MOVQ 80(R12), X1 + MOVQ 88(R12), X2 + MOVQ 96(R12), X3 + MOVQ 104(R12), X4 + MOVQ 112(R12), X5 + MOVQ 120(R12), X6 + MOVQ 128(R12), X7 + MOVQ 136(R12), X8 + MOVQ 144(R12), X9 + MOVQ 152(R12), X10 + MOVQ 160(R12), X11 + MOVQ 168(R12), X12 + MOVQ 176(R12), X13 + MOVQ 184(R12), X14 + RET +#else +// spillArgs stores return values from registers to a pointer in R12. +TEXT spillArgs<>(SB),NOSPLIT,$0-0 + RET + +// unspillArgs loads args into registers from a pointer in R12. +TEXT unspillArgs<>(SB),NOSPLIT,$0-0 + RET +#endif + // reflectcall: call a function with the given argument list -// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32). +// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs). // we don't have variable-sized frames, so we use a small number // of constant-sized-frame functions to encode a few bits of size in the pc. // Caution: ugly multiline assembly macros in your future! @@ -458,8 +524,8 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0 JMP AX // Note: can't just "JMP NAME(SB)" - bad inlining results. -TEXT ·reflectcall(SB), NOSPLIT, $0-32 - MOVLQZX argsize+24(FP), CX +TEXT ·reflectcall(SB), NOSPLIT, $0-48 + MOVLQZX frameSize+32(FP), CX DISPATCH(runtime·call16, 16) DISPATCH(runtime·call32, 32) DISPATCH(runtime·call64, 64) @@ -491,23 +557,28 @@ TEXT ·reflectcall(SB), NOSPLIT, $0-32 JMP AX #define CALLFN(NAME,MAXSIZE) \ -TEXT NAME(SB), WRAPPER, $MAXSIZE-32; \ +TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \ NO_LOCAL_POINTERS; \ /* copy arguments to stack */ \ - MOVQ argptr+16(FP), SI; \ - MOVLQZX argsize+24(FP), CX; \ + MOVQ stackArgs+16(FP), SI; \ + MOVLQZX stackArgsSize+24(FP), CX; \ MOVQ SP, DI; \ REP;MOVSB; \ + /* set up argument registers */ \ + MOVQ regArgs+40(FP), R12; \ + CALL unspillArgs<>(SB); \ /* call function */ \ MOVQ f+8(FP), DX; \ PCDATA $PCDATA_StackMapIndex, $0; \ - MOVQ (DX), AX; \ - CALL AX; \ - /* copy return values back */ \ - MOVQ argtype+0(FP), DX; \ - MOVQ argptr+16(FP), DI; \ - MOVLQZX argsize+24(FP), CX; \ - MOVLQZX retoffset+28(FP), BX; \ + MOVQ (DX), R12; \ + CALL R12; \ + /* copy register return values back */ \ + MOVQ regArgs+40(FP), R12; \ + CALL spillArgs<>(SB); \ + MOVLQZX stackArgsSize+24(FP), CX; \ + MOVLQZX stackRetOffset+28(FP), BX; \ + MOVQ stackArgs+16(FP), DI; \ + MOVQ stackArgsType+0(FP), DX; \ MOVQ SP, SI; \ ADDQ BX, DI; \ ADDQ BX, SI; \ @@ -519,12 +590,13 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-32; \ // separate function so it can allocate stack space for the arguments // to reflectcallmove. It does not follow the Go ABI; it expects its // arguments in registers. -TEXT callRet<>(SB), NOSPLIT, $32-0 +TEXT callRet<>(SB), NOSPLIT, $40-0 NO_LOCAL_POINTERS MOVQ DX, 0(SP) MOVQ DI, 8(SP) MOVQ SI, 16(SP) MOVQ CX, 24(SP) + MOVQ R12, 32(SP) CALL runtime·reflectcallmove(SB) RET diff --git a/src/runtime/asm_arm.s b/src/runtime/asm_arm.s index 8eec84d3f2e64..23619b1408496 100644 --- a/src/runtime/asm_arm.s +++ b/src/runtime/asm_arm.s @@ -404,7 +404,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 B runtime·morestack(SB) // reflectcall: call a function with the given argument list -// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32). +// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs). // we don't have variable-sized frames, so we use a small number // of constant-sized-frame functions to encode a few bits of size in the pc. // Caution: ugly multiline assembly macros in your future! @@ -415,8 +415,8 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 MOVW $NAME(SB), R1; \ B (R1) -TEXT ·reflectcall(SB),NOSPLIT|NOFRAME,$0-20 - MOVW argsize+12(FP), R0 +TEXT ·reflectcall(SB),NOSPLIT|NOFRAME,$0-28 + MOVW frameSize+20(FP), R0 DISPATCH(runtime·call16, 16) DISPATCH(runtime·call32, 32) DISPATCH(runtime·call64, 64) @@ -448,11 +448,11 @@ TEXT ·reflectcall(SB),NOSPLIT|NOFRAME,$0-20 B (R1) #define CALLFN(NAME,MAXSIZE) \ -TEXT NAME(SB), WRAPPER, $MAXSIZE-20; \ +TEXT NAME(SB), WRAPPER, $MAXSIZE-28; \ NO_LOCAL_POINTERS; \ /* copy arguments to stack */ \ - MOVW argptr+8(FP), R0; \ - MOVW argsize+12(FP), R2; \ + MOVW stackArgs+8(FP), R0; \ + MOVW stackArgsSize+12(FP), R2; \ ADD $4, R13, R1; \ CMP $0, R2; \ B.EQ 5(PC); \ @@ -466,10 +466,10 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-20; \ PCDATA $PCDATA_StackMapIndex, $0; \ BL (R0); \ /* copy return values back */ \ - MOVW argtype+0(FP), R4; \ - MOVW argptr+8(FP), R0; \ - MOVW argsize+12(FP), R2; \ - MOVW retoffset+16(FP), R3; \ + MOVW stackArgsType+0(FP), R4; \ + MOVW stackArgs+8(FP), R0; \ + MOVW stackArgsSize+12(FP), R2; \ + MOVW stackArgsRetOffset+16(FP), R3; \ ADD $4, R13, R1; \ ADD R3, R1; \ ADD R3, R0; \ @@ -481,11 +481,13 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-20; \ // separate function so it can allocate stack space for the arguments // to reflectcallmove. It does not follow the Go ABI; it expects its // arguments in registers. -TEXT callRet<>(SB), NOSPLIT, $16-0 +TEXT callRet<>(SB), NOSPLIT, $20-0 MOVW R4, 4(R13) MOVW R0, 8(R13) MOVW R1, 12(R13) MOVW R2, 16(R13) + MOVW $0, R7 + MOVW R7, 20(R13) BL runtime·reflectcallmove(SB) RET diff --git a/src/runtime/asm_arm64.s b/src/runtime/asm_arm64.s index 8e4a1f74f9a17..0ab92be1e4b95 100644 --- a/src/runtime/asm_arm64.s +++ b/src/runtime/asm_arm64.s @@ -312,7 +312,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 B runtime·morestack(SB) // reflectcall: call a function with the given argument list -// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32). +// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs). // we don't have variable-sized frames, so we use a small number // of constant-sized-frame functions to encode a few bits of size in the pc. // Caution: ugly multiline assembly macros in your future! @@ -325,8 +325,8 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 B (R27) // Note: can't just "B NAME(SB)" - bad inlining results. -TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32 - MOVWU argsize+24(FP), R16 +TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-48 + MOVWU frameSize+32(FP), R16 DISPATCH(runtime·call16, 16) DISPATCH(runtime·call32, 32) DISPATCH(runtime·call64, 64) @@ -358,11 +358,11 @@ TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32 B (R0) #define CALLFN(NAME,MAXSIZE) \ -TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \ +TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \ NO_LOCAL_POINTERS; \ /* copy arguments to stack */ \ - MOVD arg+16(FP), R3; \ - MOVWU argsize+24(FP), R4; \ + MOVD stackArgs+16(FP), R3; \ + MOVWU stackArgsSize+24(FP), R4; \ ADD $8, RSP, R5; \ BIC $0xf, R4, R6; \ CBZ R6, 6(PC); \ @@ -388,10 +388,10 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \ PCDATA $PCDATA_StackMapIndex, $0; \ BL (R0); \ /* copy return values back */ \ - MOVD argtype+0(FP), R7; \ - MOVD arg+16(FP), R3; \ - MOVWU n+24(FP), R4; \ - MOVWU retoffset+28(FP), R6; \ + MOVD stackArgsType+0(FP), R7; \ + MOVD stackArgs+16(FP), R3; \ + MOVWU stackArgsSize+24(FP), R4; \ + MOVWU stackRetOffset+28(FP), R6; \ ADD $8, RSP, R5; \ ADD R6, R5; \ ADD R6, R3; \ @@ -403,11 +403,12 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \ // separate function so it can allocate stack space for the arguments // to reflectcallmove. It does not follow the Go ABI; it expects its // arguments in registers. -TEXT callRet<>(SB), NOSPLIT, $40-0 +TEXT callRet<>(SB), NOSPLIT, $48-0 MOVD R7, 8(RSP) MOVD R3, 16(RSP) MOVD R5, 24(RSP) MOVD R4, 32(RSP) + MOVD $0, 40(RSP) BL runtime·reflectcallmove(SB) RET diff --git a/src/runtime/asm_mips64x.s b/src/runtime/asm_mips64x.s index 054a89dc37eee..694950663a920 100644 --- a/src/runtime/asm_mips64x.s +++ b/src/runtime/asm_mips64x.s @@ -264,7 +264,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 JMP runtime·morestack(SB) // reflectcall: call a function with the given argument list -// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32). +// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs). // we don't have variable-sized frames, so we use a small number // of constant-sized-frame functions to encode a few bits of size in the pc. // Caution: ugly multiline assembly macros in your future! @@ -277,8 +277,8 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 JMP (R4) // Note: can't just "BR NAME(SB)" - bad inlining results. -TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32 - MOVWU argsize+24(FP), R1 +TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-48 + MOVWU frameSize+32(FP), R1 DISPATCH(runtime·call16, 16) DISPATCH(runtime·call32, 32) DISPATCH(runtime·call64, 64) @@ -310,11 +310,11 @@ TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32 JMP (R4) #define CALLFN(NAME,MAXSIZE) \ -TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \ +TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \ NO_LOCAL_POINTERS; \ /* copy arguments to stack */ \ - MOVV arg+16(FP), R1; \ - MOVWU argsize+24(FP), R2; \ + MOVV stackArgs+16(FP), R1; \ + MOVWU stackArgsSize+24(FP), R2; \ MOVV R29, R3; \ ADDV $8, R3; \ ADDV R3, R2; \ @@ -330,10 +330,10 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \ PCDATA $PCDATA_StackMapIndex, $0; \ JAL (R4); \ /* copy return values back */ \ - MOVV argtype+0(FP), R5; \ - MOVV arg+16(FP), R1; \ - MOVWU n+24(FP), R2; \ - MOVWU retoffset+28(FP), R4; \ + MOVV stackArgsType+0(FP), R5; \ + MOVV stackArgs+16(FP), R1; \ + MOVWU stackArgsSize+24(FP), R2; \ + MOVWU stackRetOffset+28(FP), R4; \ ADDV $8, R29, R3; \ ADDV R4, R3; \ ADDV R4, R1; \ @@ -345,11 +345,12 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \ // separate function so it can allocate stack space for the arguments // to reflectcallmove. It does not follow the Go ABI; it expects its // arguments in registers. -TEXT callRet<>(SB), NOSPLIT, $32-0 +TEXT callRet<>(SB), NOSPLIT, $40-0 MOVV R5, 8(R29) MOVV R1, 16(R29) MOVV R3, 24(R29) MOVV R2, 32(R29) + MOVV $0, 40(R29) JAL runtime·reflectcallmove(SB) RET diff --git a/src/runtime/asm_mipsx.s b/src/runtime/asm_mipsx.s index f57437d590279..8e5753d255728 100644 --- a/src/runtime/asm_mipsx.s +++ b/src/runtime/asm_mipsx.s @@ -265,7 +265,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0-0 JMP runtime·morestack(SB) // reflectcall: call a function with the given argument list -// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32). +// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs). // we don't have variable-sized frames, so we use a small number // of constant-sized-frame functions to encode a few bits of size in the pc. @@ -276,8 +276,8 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0-0 MOVW $NAME(SB), R4; \ JMP (R4) -TEXT ·reflectcall(SB),NOSPLIT|NOFRAME,$0-20 - MOVW argsize+12(FP), R1 +TEXT ·reflectcall(SB),NOSPLIT|NOFRAME,$0-28 + MOVW frameSize+20(FP), R1 DISPATCH(runtime·call16, 16) DISPATCH(runtime·call32, 32) @@ -310,11 +310,11 @@ TEXT ·reflectcall(SB),NOSPLIT|NOFRAME,$0-20 JMP (R4) #define CALLFN(NAME,MAXSIZE) \ -TEXT NAME(SB),WRAPPER,$MAXSIZE-20; \ +TEXT NAME(SB),WRAPPER,$MAXSIZE-28; \ NO_LOCAL_POINTERS; \ /* copy arguments to stack */ \ - MOVW arg+8(FP), R1; \ - MOVW argsize+12(FP), R2; \ + MOVW stackArgs+8(FP), R1; \ + MOVW stackArgsSize+12(FP), R2; \ MOVW R29, R3; \ ADDU $4, R3; \ ADDU R3, R2; \ @@ -330,10 +330,10 @@ TEXT NAME(SB),WRAPPER,$MAXSIZE-20; \ PCDATA $PCDATA_StackMapIndex, $0; \ JAL (R4); \ /* copy return values back */ \ - MOVW argtype+0(FP), R5; \ - MOVW arg+8(FP), R1; \ - MOVW n+12(FP), R2; \ - MOVW retoffset+16(FP), R4; \ + MOVW stackArgsType+0(FP), R5; \ + MOVW stackArgs+8(FP), R1; \ + MOVW stackArgsSize+12(FP), R2; \ + MOVW stackRetOffset+16(FP), R4; \ ADDU $4, R29, R3; \ ADDU R4, R3; \ ADDU R4, R1; \ @@ -345,11 +345,12 @@ TEXT NAME(SB),WRAPPER,$MAXSIZE-20; \ // separate function so it can allocate stack space for the arguments // to reflectcallmove. It does not follow the Go ABI; it expects its // arguments in registers. -TEXT callRet<>(SB), NOSPLIT, $16-0 +TEXT callRet<>(SB), NOSPLIT, $20-0 MOVW R5, 4(R29) MOVW R1, 8(R29) MOVW R3, 12(R29) MOVW R2, 16(R29) + MOVW $0, 20(R29) JAL runtime·reflectcallmove(SB) RET diff --git a/src/runtime/asm_ppc64x.s b/src/runtime/asm_ppc64x.s index 763a92adf1864..834023cce19fb 100644 --- a/src/runtime/asm_ppc64x.s +++ b/src/runtime/asm_ppc64x.s @@ -339,7 +339,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 BR runtime·morestack(SB) // reflectcall: call a function with the given argument list -// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32). +// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs). // we don't have variable-sized frames, so we use a small number // of constant-sized-frame functions to encode a few bits of size in the pc. // Caution: ugly multiline assembly macros in your future! @@ -353,8 +353,8 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 BR (CTR) // Note: can't just "BR NAME(SB)" - bad inlining results. -TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32 - MOVWZ argsize+24(FP), R3 +TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-48 + MOVWZ frameSize+32(FP), R3 DISPATCH(runtime·call16, 16) DISPATCH(runtime·call32, 32) DISPATCH(runtime·call64, 64) @@ -387,11 +387,11 @@ TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32 BR (CTR) #define CALLFN(NAME,MAXSIZE) \ -TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \ +TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \ NO_LOCAL_POINTERS; \ /* copy arguments to stack */ \ - MOVD arg+16(FP), R3; \ - MOVWZ argsize+24(FP), R4; \ + MOVD stackArgs+16(FP), R3; \ + MOVWZ stackArgsSize+24(FP), R4; \ MOVD R1, R5; \ CMP R4, $8; \ BLT tailsetup; \ @@ -439,10 +439,10 @@ callfn: \ MOVD 24(R1), R2; \ #endif \ /* copy return values back */ \ - MOVD argtype+0(FP), R7; \ - MOVD arg+16(FP), R3; \ - MOVWZ n+24(FP), R4; \ - MOVWZ retoffset+28(FP), R6; \ + MOVD stackArgsType+0(FP), R7; \ + MOVD stackArgs+16(FP), R3; \ + MOVWZ stackArgsSize+24(FP), R4; \ + MOVWZ stackRetOffset+28(FP), R6; \ ADD $FIXED_FRAME, R1, R5; \ ADD R6, R5; \ ADD R6, R3; \ @@ -454,11 +454,12 @@ callfn: \ // separate function so it can allocate stack space for the arguments // to reflectcallmove. It does not follow the Go ABI; it expects its // arguments in registers. -TEXT callRet<>(SB), NOSPLIT, $32-0 +TEXT callRet<>(SB), NOSPLIT, $40-0 MOVD R7, FIXED_FRAME+0(R1) MOVD R3, FIXED_FRAME+8(R1) MOVD R5, FIXED_FRAME+16(R1) MOVD R4, FIXED_FRAME+24(R1) + MOVD $0, FIXED_FRAME+32(R1) BL runtime·reflectcallmove(SB) RET diff --git a/src/runtime/asm_riscv64.s b/src/runtime/asm_riscv64.s index cf460d1586c83..31e324d677592 100644 --- a/src/runtime/asm_riscv64.s +++ b/src/runtime/asm_riscv64.s @@ -359,7 +359,7 @@ TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0 RET // reflectcall: call a function with the given argument list -// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32). +// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs). // we don't have variable-sized frames, so we use a small number // of constant-sized-frame functions to encode a few bits of size in the pc. // Caution: ugly multiline assembly macros in your future! @@ -371,13 +371,13 @@ TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0 JALR ZERO, T2 // Note: can't just "BR NAME(SB)" - bad inlining results. -// func call(argtype *rtype, fn, arg unsafe.Pointer, n uint32, retoffset uint32) +// func call(stackArgsType *rtype, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs). TEXT reflect·call(SB), NOSPLIT, $0-0 JMP ·reflectcall(SB) -// func reflectcall(argtype *_type, fn, arg unsafe.Pointer, argsize uint32, retoffset uint32) -TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32 - MOVWU argsize+24(FP), T0 +// func call(stackArgsType *_type, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs). +TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-48 + MOVWU frameSize+32(FP), T0 DISPATCH(runtime·call16, 16) DISPATCH(runtime·call32, 32) DISPATCH(runtime·call64, 64) @@ -409,11 +409,11 @@ TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32 JALR ZERO, T2 #define CALLFN(NAME,MAXSIZE) \ -TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \ +TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \ NO_LOCAL_POINTERS; \ /* copy arguments to stack */ \ - MOV arg+16(FP), A1; \ - MOVWU argsize+24(FP), A2; \ + MOV stackArgs+16(FP), A1; \ + MOVWU stackArgsSize+24(FP), A2; \ MOV X2, A3; \ ADD $8, A3; \ ADD A3, A2; \ @@ -429,10 +429,10 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \ PCDATA $PCDATA_StackMapIndex, $0; \ JALR RA, A4; \ /* copy return values back */ \ - MOV argtype+0(FP), A5; \ - MOV arg+16(FP), A1; \ - MOVWU n+24(FP), A2; \ - MOVWU retoffset+28(FP), A4; \ + MOV stackArgsType+0(FP), A5; \ + MOV stackArgs+16(FP), A1; \ + MOVWU stackArgsSize+24(FP), A2; \ + MOVWU stackRetOffset+28(FP), A4; \ ADD $8, X2, A3; \ ADD A4, A3; \ ADD A4, A1; \ @@ -444,11 +444,12 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \ // separate function so it can allocate stack space for the arguments // to reflectcallmove. It does not follow the Go ABI; it expects its // arguments in registers. -TEXT callRet<>(SB), NOSPLIT, $32-0 +TEXT callRet<>(SB), NOSPLIT, $40-0 MOV A5, 8(X2) MOV A1, 16(X2) MOV A3, 24(X2) MOV A2, 32(X2) + MOV $0, 40(X2) CALL runtime·reflectcallmove(SB) RET diff --git a/src/runtime/asm_s390x.s b/src/runtime/asm_s390x.s index 1cd5eca06f8c3..fbd185c3533d1 100644 --- a/src/runtime/asm_s390x.s +++ b/src/runtime/asm_s390x.s @@ -353,7 +353,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 BR runtime·morestack(SB) // reflectcall: call a function with the given argument list -// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32). +// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs). // we don't have variable-sized frames, so we use a small number // of constant-sized-frame functions to encode a few bits of size in the pc. // Caution: ugly multiline assembly macros in your future! @@ -366,8 +366,8 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 BR (R5) // Note: can't just "BR NAME(SB)" - bad inlining results. -TEXT ·reflectcall(SB), NOSPLIT, $-8-32 - MOVWZ argsize+24(FP), R3 +TEXT ·reflectcall(SB), NOSPLIT, $-8-48 + MOVWZ frameSize+32(FP), R3 DISPATCH(runtime·call16, 16) DISPATCH(runtime·call32, 32) DISPATCH(runtime·call64, 64) @@ -399,11 +399,11 @@ TEXT ·reflectcall(SB), NOSPLIT, $-8-32 BR (R5) #define CALLFN(NAME,MAXSIZE) \ -TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \ +TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \ NO_LOCAL_POINTERS; \ /* copy arguments to stack */ \ - MOVD arg+16(FP), R4; \ - MOVWZ argsize+24(FP), R5; \ + MOVD stackArgs+16(FP), R4; \ + MOVWZ stackArgsSize+24(FP), R5; \ MOVD $stack-MAXSIZE(SP), R6; \ loopArgs: /* copy 256 bytes at a time */ \ CMP R5, $256; \ @@ -424,11 +424,11 @@ callFunction: \ PCDATA $PCDATA_StackMapIndex, $0; \ BL (R8); \ /* copy return values back */ \ - MOVD argtype+0(FP), R7; \ - MOVD arg+16(FP), R6; \ - MOVWZ n+24(FP), R5; \ + MOVD stackArgsType+0(FP), R7; \ + MOVD stackArgs+16(FP), R6; \ + MOVWZ stackArgsSize+24(FP), R5; \ MOVD $stack-MAXSIZE(SP), R4; \ - MOVWZ retoffset+28(FP), R1; \ + MOVWZ stackRetOffset+28(FP), R1; \ ADD R1, R4; \ ADD R1, R6; \ SUB R1, R5; \ @@ -439,11 +439,12 @@ callFunction: \ // separate function so it can allocate stack space for the arguments // to reflectcallmove. It does not follow the Go ABI; it expects its // arguments in registers. -TEXT callRet<>(SB), NOSPLIT, $32-0 +TEXT callRet<>(SB), NOSPLIT, $40-0 MOVD R7, 8(R15) MOVD R6, 16(R15) MOVD R4, 24(R15) MOVD R5, 32(R15) + MOVD $0, 40(R15) BL runtime·reflectcallmove(SB) RET diff --git a/src/runtime/asm_wasm.s b/src/runtime/asm_wasm.s index fcb780f1dc830..cf3d961b749bd 100644 --- a/src/runtime/asm_wasm.s +++ b/src/runtime/asm_wasm.s @@ -296,14 +296,14 @@ TEXT ·asmcgocall(SB), NOSPLIT, $0-0 JMP NAME(SB); \ End -TEXT ·reflectcall(SB), NOSPLIT, $0-32 +TEXT ·reflectcall(SB), NOSPLIT, $0-48 I64Load fn+8(FP) I64Eqz If CALLNORESUME runtime·sigpanic(SB) End - MOVW argsize+24(FP), R0 + MOVW frameSize+32(FP), R0 DISPATCH(runtime·call16, 16) DISPATCH(runtime·call32, 32) @@ -335,18 +335,18 @@ TEXT ·reflectcall(SB), NOSPLIT, $0-32 JMP runtime·badreflectcall(SB) #define CALLFN(NAME, MAXSIZE) \ -TEXT NAME(SB), WRAPPER, $MAXSIZE-32; \ +TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \ NO_LOCAL_POINTERS; \ - MOVW argsize+24(FP), R0; \ + MOVW stackArgsSize+24(FP), R0; \ \ Get R0; \ I64Eqz; \ Not; \ If; \ Get SP; \ - I64Load argptr+16(FP); \ + I64Load stackArgs+16(FP); \ I32WrapI64; \ - I64Load argsize+24(FP); \ + I64Load stackArgsSize+24(FP); \ I64Const $3; \ I64ShrU; \ I32WrapI64; \ @@ -359,12 +359,12 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-32; \ I64Load $0; \ CALL; \ \ - I64Load32U retoffset+28(FP); \ + I64Load32U stackRetOffset+28(FP); \ Set R0; \ \ - MOVD argtype+0(FP), RET0; \ + MOVD stackArgsType+0(FP), RET0; \ \ - I64Load argptr+16(FP); \ + I64Load stackArgs+16(FP); \ Get R0; \ I64Add; \ Set RET1; \ @@ -375,7 +375,7 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-32; \ I64Add; \ Set RET2; \ \ - I64Load32U argsize+24(FP); \ + I64Load32U stackArgsSize+24(FP); \ Get R0; \ I64Sub; \ Set RET3; \ @@ -387,12 +387,13 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-32; \ // separate function so it can allocate stack space for the arguments // to reflectcallmove. It does not follow the Go ABI; it expects its // arguments in registers. -TEXT callRet<>(SB), NOSPLIT, $32-0 +TEXT callRet<>(SB), NOSPLIT, $40-0 NO_LOCAL_POINTERS MOVD RET0, 0(SP) MOVD RET1, 8(SP) MOVD RET2, 16(SP) MOVD RET3, 24(SP) + MOVD $0, 32(SP) CALL runtime·reflectcallmove(SB) RET diff --git a/src/runtime/mbarrier.go b/src/runtime/mbarrier.go index 2b5affce5219a..4994347bdeb35 100644 --- a/src/runtime/mbarrier.go +++ b/src/runtime/mbarrier.go @@ -14,6 +14,7 @@ package runtime import ( + "internal/abi" "runtime/internal/sys" "unsafe" ) @@ -223,11 +224,18 @@ func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size // stack map of reflectcall is wrong. // //go:nosplit -func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr) { +func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr, regs *abi.RegArgs) { if writeBarrier.needed && typ != nil && typ.ptrdata != 0 && size >= sys.PtrSize { bulkBarrierPreWrite(uintptr(dst), uintptr(src), size) } memmove(dst, src, size) + + // Move pointers returned in registers to a place where the GC can see them. + for i := range regs.Ints { + if regs.ReturnIsPtr.Get(i) { + regs.Ptrs[i] = unsafe.Pointer(regs.Ints[i]) + } + } } //go:nosplit diff --git a/src/runtime/mfinal.go b/src/runtime/mfinal.go index f4dbd77252890..7d0313be12026 100644 --- a/src/runtime/mfinal.go +++ b/src/runtime/mfinal.go @@ -7,6 +7,7 @@ package runtime import ( + "internal/abi" "runtime/internal/atomic" "runtime/internal/sys" "unsafe" @@ -219,7 +220,11 @@ func runfinq() { throw("bad kind in runfinq") } fingRunning = true - reflectcall(nil, unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz)) + // Pass a dummy RegArgs for now. + // + // TODO(mknyszek): Pass arguments in registers. + var regs abi.RegArgs + reflectcall(nil, unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz), uint32(framesz), ®s) fingRunning = false // Drop finalizer queue heap references diff --git a/src/runtime/panic.go b/src/runtime/panic.go index 5b2ccdd8740f8..e320eaa59666d 100644 --- a/src/runtime/panic.go +++ b/src/runtime/panic.go @@ -5,6 +5,7 @@ package runtime import ( + "internal/abi" "runtime/internal/atomic" "runtime/internal/sys" "unsafe" @@ -874,7 +875,13 @@ func reflectcallSave(p *_panic, fn, arg unsafe.Pointer, argsize uint32) { p.pc = getcallerpc() p.sp = unsafe.Pointer(getcallersp()) } - reflectcall(nil, fn, arg, argsize, argsize) + // Pass a dummy RegArgs for now since no function actually implements + // the register-based ABI. + // + // TODO(mknyszek): Implement this properly, setting up arguments in + // registers as necessary in the caller. + var regs abi.RegArgs + reflectcall(nil, fn, arg, argsize, argsize, argsize, ®s) if p != nil { p.pc = 0 p.sp = unsafe.Pointer(nil) @@ -968,7 +975,9 @@ func gopanic(e interface{}) { } } else { p.argp = unsafe.Pointer(getargp(0)) - reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz)) + + var regs abi.RegArgs + reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz), uint32(d.siz), ®s) } p.argp = nil diff --git a/src/runtime/stubs.go b/src/runtime/stubs.go index 3d1e0c0bb4dd7..c0cc95ec65f45 100644 --- a/src/runtime/stubs.go +++ b/src/runtime/stubs.go @@ -4,7 +4,10 @@ package runtime -import "unsafe" +import ( + "internal/abi" + "unsafe" +) // Should be a built-in for unsafe.Pointer? //go:nosplit @@ -174,19 +177,50 @@ func asminit() func setg(gg *g) func breakpoint() -// reflectcall calls fn with a copy of the n argument bytes pointed at by arg. -// After fn returns, reflectcall copies n-retoffset result bytes -// back into arg+retoffset before returning. If copying result bytes back, -// the caller should pass the argument frame type as argtype, so that -// call can execute appropriate write barriers during the copy. +// reflectcall calls fn with arguments described by stackArgs, stackArgsSize, +// frameSize, and regArgs. // -// Package reflect always passes a frame type. In package runtime, -// Windows callbacks are the only use of this that copies results -// back, and those cannot have pointers in their results, so runtime -// passes nil for the frame type. +// Arguments passed on the stack and space for return values passed on the stack +// must be laid out at the space pointed to by stackArgs (with total length +// stackArgsSize) according to the ABI. +// +// stackRetOffset must be some value <= stackArgsSize that indicates the +// offset within stackArgs where the return value space begins. +// +// frameSize is the total size of the argument frame at stackArgs and must +// therefore be >= stackArgsSize. It must include additional space for spilling +// register arguments for stack growth and preemption. +// +// TODO(mknyszek): Once we don't need the additional spill space, remove frameSize, +// since frameSize will be redundant with stackArgsSize. +// +// Arguments passed in registers must be laid out in regArgs according to the ABI. +// regArgs will hold any return values passed in registers after the call. +// +// reflectcall copies stack arguments from stackArgs to the goroutine stack, and +// then copies back stackArgsSize-stackRetOffset bytes back to the return space +// in stackArgs once fn has completed. It also "unspills" argument registers from +// regArgs before calling fn, and spills them back into regArgs immediately +// following the call to fn. If there are results being returned on the stack, +// the caller should pass the argument frame type as stackArgsType so that +// reflectcall can execute appropriate write barriers during the copy. +// +// reflectcall expects regArgs.ReturnIsPtr to be populated indicating which +// registers on the return path will contain Go pointers. It will then store +// these pointers in regArgs.Ptrs such that they are visible to the GC. +// +// Package reflect passes a frame type. In package runtime, there is only +// one call that copies results back, in callbackWrap in syscall_windows.go, and it +// does NOT pass a frame type, meaning there are no write barriers invoked. See that +// call site for justification. // // Package reflect accesses this symbol through a linkname. -func reflectcall(argtype *_type, fn, arg unsafe.Pointer, argsize uint32, retoffset uint32) +// +// Arguments passed through to reflectcall do not escape. The type is used +// only in a very limited callee of reflectcall, the stackArgs are copied, and +// regArgs is only used in the reflectcall frame. +//go:noescape +func reflectcall(stackArgsType *_type, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs) func procyield(cycles uint32) diff --git a/src/runtime/syscall_windows.go b/src/runtime/syscall_windows.go index 7835b492f7814..add40bb0b345b 100644 --- a/src/runtime/syscall_windows.go +++ b/src/runtime/syscall_windows.go @@ -5,6 +5,7 @@ package runtime import ( + "internal/abi" "runtime/internal/sys" "unsafe" ) @@ -242,7 +243,11 @@ func callbackWrap(a *callbackArgs) { // Even though this is copying back results, we can pass a nil // type because those results must not require write barriers. - reflectcall(nil, unsafe.Pointer(c.fn), noescape(goArgs), uint32(c.retOffset)+sys.PtrSize, uint32(c.retOffset)) + // + // Pass a dummy RegArgs for now. + // TODO(mknyszek): Pass arguments in registers. + var regs abi.RegArgs + reflectcall(nil, unsafe.Pointer(c.fn), noescape(goArgs), uint32(c.retOffset)+sys.PtrSize, uint32(c.retOffset), uint32(c.retOffset)+sys.PtrSize, ®s) // Extract the result. a.result = *(*uintptr)(unsafe.Pointer(&frame[c.retOffset])) From d28aae26b00ec047da1c27192d7eb4b64e30db45 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Tue, 16 Feb 2021 12:58:48 -0500 Subject: [PATCH 468/474] [dev.regabi] cmd/link: recognize internal/abi as runtime package The runtime imports the internal/abi package. Recognize internal/abi as a runtime dependent, to make trampoline generation algorithm work. Fix ARM build. Change-Id: I26b6778aa41dcb959bc226ff04abe08a5a82c4f6 Reviewed-on: https://go-review.googlesource.com/c/go/+/292610 Reviewed-by: Than McIntosh Trust: Cherry Zhang Run-TryBot: Cherry Zhang TryBot-Result: Go Bot --- src/cmd/link/internal/ld/data.go | 1 + 1 file changed, 1 insertion(+) diff --git a/src/cmd/link/internal/ld/data.go b/src/cmd/link/internal/ld/data.go index 6013e0ab0a11b..2fb790a6ea540 100644 --- a/src/cmd/link/internal/ld/data.go +++ b/src/cmd/link/internal/ld/data.go @@ -55,6 +55,7 @@ func isRuntimeDepPkg(pkg string) bool { switch pkg { case "runtime", "sync/atomic", // runtime may call to sync/atomic, due to go:linkname + "internal/abi", // used by reflectcall (and maybe more) "internal/bytealg", // for IndexByte "internal/cpu": // for cpu features return true From 8cfbf34dd956125524ea63469342cf8a319b5bd1 Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Tue, 16 Feb 2021 18:29:18 +0000 Subject: [PATCH 469/474] internal/abi: set register count constants to zero for regabi experiment This change sets the register count constants to zero for the GOEXPERIMENT regabi because currently the users of it (i.e. reflect) will be broken, since they expect Go functions that implement the new ABI. Change-Id: Id3e874c61821a36605eb4e1cccdee36a2759f303 Reviewed-on: https://go-review.googlesource.com/c/go/+/292649 Reviewed-by: Cherry Zhang TryBot-Result: Go Bot Trust: Michael Knyszek Run-TryBot: Michael Knyszek --- src/internal/abi/abi_amd64.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/internal/abi/abi_amd64.go b/src/internal/abi/abi_amd64.go index 6574d4216de3f..70e2ed1feb945 100644 --- a/src/internal/abi/abi_amd64.go +++ b/src/internal/abi/abi_amd64.go @@ -9,12 +9,16 @@ package abi const ( // See abi_generic.go. + // Currently these values are zero because whatever uses + // them will expect the register ABI, which isn't ready + // yet. + // RAX, RBX, RCX, RDI, RSI, R8, R9, R10, R11. - IntArgRegs = 9 + IntArgRegs = 0 // 9 // X0 -> X14. - FloatArgRegs = 15 + FloatArgRegs = 0 // 15 // We use SSE2 registers which support 64-bit float operations. - EffectiveFloatRegSize = 8 + EffectiveFloatRegSize = 0 // 8 ) From c2358a1ae77d7bd09fb8b728d25641b5757a7a58 Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Tue, 16 Feb 2021 20:15:13 +0000 Subject: [PATCH 470/474] [dev.regabi] runtime: stub out spillArgs and unspillArgs Currently these two functions assume that constants in internal/abi are set correctly, but we actually just made them zero if GOEXPERIMENT_REGABI is set. This means reflectcall is broken. Fix it by stubbing out these routines even if GOEXPERIMENT_REGABI is set. Change-Id: I4c8df6d6af28562c5bb7b85f48c03d37daa9ee0d Reviewed-on: https://go-review.googlesource.com/c/go/+/292650 Reviewed-by: Cherry Zhang TryBot-Result: Go Bot Trust: Michael Knyszek Run-TryBot: Michael Knyszek --- src/runtime/asm_amd64.s | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s index 5e1ed9b2ad8e8..05422c9699c45 100644 --- a/src/runtime/asm_amd64.s +++ b/src/runtime/asm_amd64.s @@ -445,7 +445,10 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0 MOVL $0, DX JMP runtime·morestack(SB) -#ifdef GOEXPERIMENT_REGABI +// REFLECTCALL_USE_REGABI is not defined. It must be defined in conjunction with the +// register constants in the internal/abi package. + +#ifdef REFLECTCALL_USE_REGABI // spillArgs stores return values from registers to a *internal/abi.RegArgs in R12. TEXT spillArgs<>(SB),NOSPLIT,$0-0 MOVQ AX, 0(R12) From 7696c9433406c3f5b9f127cb557120b74e3c3952 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Thu, 11 Feb 2021 10:45:49 -0500 Subject: [PATCH 471/474] [dev.regabi] go/types: type alias decl requires go1.9 This is a port of CL 289570 to go/types. It has some notable differences with that CL: + A new _BadDecl error code is added, to indicate declarations with bad syntax. + declInfo is updated hold not an 'alias' bool, but an aliasPos token.Pos to identify the location of the type aliasing '=' token. This allows for error messages to be accurately placed on the '=' For #31793 Change-Id: Ib15969f9cd5be30228b7a4c6406f978d6fc58018 Reviewed-on: https://go-review.googlesource.com/c/go/+/291318 Trust: Robert Findley Trust: Robert Griesemer Run-TryBot: Robert Findley TryBot-Result: Go Bot Reviewed-by: Robert Griesemer --- src/go/types/decl.go | 18 ++++++++++++------ src/go/types/errorcodes.go | 3 +++ src/go/types/resolver.go | 8 ++++---- src/go/types/testdata/go1_8.src | 11 +++++++++++ 4 files changed, 30 insertions(+), 10 deletions(-) create mode 100644 src/go/types/testdata/go1_8.src diff --git a/src/go/types/decl.go b/src/go/types/decl.go index 571e172351bef..b861cde496490 100644 --- a/src/go/types/decl.go +++ b/src/go/types/decl.go @@ -189,7 +189,7 @@ func (check *Checker) objDecl(obj Object, def *Named) { check.varDecl(obj, d.lhs, d.typ, d.init) case *TypeName: // invalid recursive types are detected via path - check.typeDecl(obj, d.typ, def, d.alias) + check.typeDecl(obj, d.typ, def, d.aliasPos) case *Func: // functions may be recursive - no need to track dependencies check.funcDecl(obj, d) @@ -234,7 +234,7 @@ func (check *Checker) cycle(obj Object) (isCycle bool) { // this information explicitly in the object. var alias bool if d := check.objMap[obj]; d != nil { - alias = d.alias // package-level object + alias = d.aliasPos.IsValid() // package-level object } else { alias = obj.IsAlias() // function local object } @@ -640,14 +640,17 @@ func (n *Named) setUnderlying(typ Type) { } } -func (check *Checker) typeDecl(obj *TypeName, typ ast.Expr, def *Named, alias bool) { +func (check *Checker) typeDecl(obj *TypeName, typ ast.Expr, def *Named, aliasPos token.Pos) { assert(obj.typ == nil) check.later(func() { check.validType(obj.typ, nil) }) - if alias { + if aliasPos.IsValid() { + if !check.allowVersion(obj.pkg, 1, 9) { + check.errorf(atPos(aliasPos), _BadDecl, "type aliases requires go1.9 or later") + } obj.typ = Typ[Invalid] obj.typ = check.typ(typ) @@ -678,9 +681,12 @@ func (check *Checker) typeDecl(obj *TypeName, typ ast.Expr, def *Named, alias bo } + // TODO(rFindley): move to the callsite, as this is only needed for top-level + // decls. check.addMethodDecls(obj) } +// TODO(rFindley): rename to collectMethods, to be consistent with types2. func (check *Checker) addMethodDecls(obj *TypeName) { // get associated methods // (Checker.collectObjects only collects methods with non-blank names; @@ -691,7 +697,7 @@ func (check *Checker) addMethodDecls(obj *TypeName) { return } delete(check.methods, obj) - assert(!check.objMap[obj].alias) // don't use TypeName.IsAlias (requires fully set up object) + assert(!check.objMap[obj].aliasPos.IsValid()) // don't use TypeName.IsAlias (requires fully set up object) // use an objset to check for name conflicts var mset objset @@ -864,7 +870,7 @@ func (check *Checker) declStmt(d ast.Decl) { check.declare(check.scope, d.spec.Name, obj, scopePos) // mark and unmark type before calling typeDecl; its type is still nil (see Checker.objDecl) obj.setColor(grey + color(check.push(obj))) - check.typeDecl(obj, d.spec.Type, nil, d.spec.Assign.IsValid()) + check.typeDecl(obj, d.spec.Type, nil, d.spec.Assign) check.pop().setColor(black) default: check.invalidAST(d.node(), "unknown ast.Decl node %T", d.node()) diff --git a/src/go/types/errorcodes.go b/src/go/types/errorcodes.go index d27abdf4d48bf..ac28c3bd13443 100644 --- a/src/go/types/errorcodes.go +++ b/src/go/types/errorcodes.go @@ -1366,4 +1366,7 @@ const ( // return i // } _InvalidGo + + // _BadDecl occurs when a declaration has invalid syntax. + _BadDecl ) diff --git a/src/go/types/resolver.go b/src/go/types/resolver.go index 47e165db368a1..e4411592e82a0 100644 --- a/src/go/types/resolver.go +++ b/src/go/types/resolver.go @@ -23,7 +23,7 @@ type declInfo struct { init ast.Expr // init/orig expression, or nil inherited bool // if set, the init expression is inherited from a previous constant declaration fdecl *ast.FuncDecl // func declaration, or nil - alias bool // type alias declaration + aliasPos token.Pos // If valid, the decl is a type alias and aliasPos is the position of '='. // The deps field tracks initialization expression dependencies. deps map[Object]bool // lazily initialized @@ -366,7 +366,7 @@ func (check *Checker) collectObjects() { } case typeDecl: obj := NewTypeName(d.spec.Name.Pos(), pkg, d.spec.Name.Name, nil) - check.declarePkgObj(d.spec.Name, obj, &declInfo{file: fileScope, typ: d.spec.Type, alias: d.spec.Assign.IsValid()}) + check.declarePkgObj(d.spec.Name, obj, &declInfo{file: fileScope, typ: d.spec.Type, aliasPos: d.spec.Assign}) case funcDecl: info := &declInfo{file: fileScope, fdecl: d.decl} name := d.decl.Name.Name @@ -493,7 +493,7 @@ func (check *Checker) resolveBaseTypeName(typ ast.Expr) (ptr bool, base *TypeNam // we're done if tdecl defined tname as a new type // (rather than an alias) tdecl := check.objMap[tname] // must exist for objects in package scope - if !tdecl.alias { + if !tdecl.aliasPos.IsValid() { return ptr, tname } @@ -534,7 +534,7 @@ func (check *Checker) packageObjects() { // phase 1 for _, obj := range objList { // If we have a type alias, collect it for the 2nd phase. - if tname, _ := obj.(*TypeName); tname != nil && check.objMap[tname].alias { + if tname, _ := obj.(*TypeName); tname != nil && check.objMap[tname].aliasPos.IsValid() { aliasList = append(aliasList, tname) continue } diff --git a/src/go/types/testdata/go1_8.src b/src/go/types/testdata/go1_8.src new file mode 100644 index 0000000000000..3ead1e981b71f --- /dev/null +++ b/src/go/types/testdata/go1_8.src @@ -0,0 +1,11 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Check Go language version-specific errors. + +package go1_8 // go1.8 + +// type alias declarations +type any = /* ERROR type aliases requires go1.9 or later */ interface{} + From ed55da46ab994abb4ea1b20aaab3cff6b650959f Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Thu, 11 Feb 2021 10:51:52 -0500 Subject: [PATCH 472/474] [dev.regabi] go/types: overlapping embedded interfaces requires go1.14 This is an exact port of CL 290911 to go/types. For #31793 Change-Id: I28c42727735f467a5984594b455ca58ab3375591 Reviewed-on: https://go-review.googlesource.com/c/go/+/291319 Trust: Robert Findley Run-TryBot: Robert Findley TryBot-Result: Go Bot Reviewed-by: Robert Griesemer --- src/go/types/stdlib_test.go | 1 - src/go/types/testdata/go1_13.src | 22 ++++++++++++++++++++++ src/go/types/typexpr.go | 8 ++++++-- 3 files changed, 28 insertions(+), 3 deletions(-) create mode 100644 src/go/types/testdata/go1_13.src diff --git a/src/go/types/stdlib_test.go b/src/go/types/stdlib_test.go index 979785de956dd..29f71137df839 100644 --- a/src/go/types/stdlib_test.go +++ b/src/go/types/stdlib_test.go @@ -185,7 +185,6 @@ func TestStdFixed(t *testing.T) { "issue22200b.go", // go/types does not have constraints on stack size "issue25507.go", // go/types does not have constraints on stack size "issue20780.go", // go/types does not have constraints on stack size - "issue34329.go", // go/types does not have constraints on language level (-lang=go1.13) (see #31793) "bug251.go", // issue #34333 which was exposed with fix for #34151 "issue42058a.go", // go/types does not have constraints on channel element size "issue42058b.go", // go/types does not have constraints on channel element size diff --git a/src/go/types/testdata/go1_13.src b/src/go/types/testdata/go1_13.src new file mode 100644 index 0000000000000..6aa1364e8a473 --- /dev/null +++ b/src/go/types/testdata/go1_13.src @@ -0,0 +1,22 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Check Go language version-specific errors. + +package go1_13 // go1.13 + +// interface embedding + +type I interface { m() } + +type _ interface { + m() + I // ERROR "duplicate method m" +} + +type _ interface { + I + I // ERROR "duplicate method m" +} + diff --git a/src/go/types/typexpr.go b/src/go/types/typexpr.go index 6e89ccb02743b..b9249494fa16d 100644 --- a/src/go/types/typexpr.go +++ b/src/go/types/typexpr.go @@ -578,9 +578,13 @@ func (check *Checker) completeInterface(ityp *Interface) { check.errorf(atPos(pos), _DuplicateDecl, "duplicate method %s", m.name) check.errorf(atPos(mpos[other.(*Func)]), _DuplicateDecl, "\tother declaration of %s", m.name) // secondary error, \t indented default: - // check method signatures after all types are computed (issue #33656) + // We have a duplicate method name in an embedded (not explicitly declared) method. + // Check method signatures after all types are computed (issue #33656). + // If we're pre-go1.14 (overlapping embeddings are not permitted), report that + // error here as well (even though we could do it eagerly) because it's the same + // error message. check.atEnd(func() { - if !check.identical(m.typ, other.Type()) { + if !check.allowVersion(m.pkg, 1, 14) || !check.identical(m.typ, other.Type()) { check.errorf(atPos(pos), _DuplicateDecl, "duplicate method %s", m.name) check.errorf(atPos(mpos[other.(*Func)]), _DuplicateDecl, "\tother declaration of %s", m.name) // secondary error, \t indented } From b8fb049c7ad4940901613d16629a88b38c6a82da Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Tue, 16 Feb 2021 15:55:54 -0500 Subject: [PATCH 473/474] [dev.regabi] cmd/go: copy internal/abi in TestNewReleaseRebuildsStalePackagesInGOPATH The internal/abi package is used by runtime and needs to be copied. Fix longtest builders. Change-Id: I7a962df3db2c6bf68cc6a7da74b579f381920009 Reviewed-on: https://go-review.googlesource.com/c/go/+/292592 Reviewed-by: Michael Knyszek TryBot-Result: Go Bot Trust: Cherry Zhang Run-TryBot: Cherry Zhang --- src/cmd/go/go_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/src/cmd/go/go_test.go b/src/cmd/go/go_test.go index 3ce32388d05dd..d14b2328bfaa0 100644 --- a/src/cmd/go/go_test.go +++ b/src/cmd/go/go_test.go @@ -811,6 +811,7 @@ func TestNewReleaseRebuildsStalePackagesInGOPATH(t *testing.T) { // so that we can change files. for _, copydir := range []string{ "src/runtime", + "src/internal/abi", "src/internal/bytealg", "src/internal/cpu", "src/math/bits", From d3cd4830adf45ce53c586a83f9d78421484737fd Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Thu, 11 Feb 2021 19:55:07 -0500 Subject: [PATCH 474/474] [dev.regabi] test: run abi/regabipragma test with -c=1 Currently, we call Warnl in SSA backend when we see a function (defined or called) with regparams pragma. Calling Warnl in concurrent environment is racy. As the debugging output is temporary, for testing purposes we just pass -c=1. We'll remove the pragma and the debugging print some time soon. Change-Id: I6f925a665b953259453fc458490c5ff91f67c91a Reviewed-on: https://go-review.googlesource.com/c/go/+/291710 TryBot-Result: Go Bot Reviewed-by: Jeremy Faller Trust: Cherry Zhang Run-TryBot: Cherry Zhang --- test/abi/regabipragma.go | 2 +- test/run.go | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/test/abi/regabipragma.go b/test/abi/regabipragma.go index 6a1b1938ea038..e7ecd58fc8937 100644 --- a/test/abi/regabipragma.go +++ b/test/abi/regabipragma.go @@ -1,4 +1,4 @@ -// runindir +// runindir -gcflags=-c=1 // +build !windows // Copyright 2021 The Go Authors. All rights reserved. diff --git a/test/run.go b/test/run.go index 116f983a97a15..dba4d16d63e6e 100644 --- a/test/run.go +++ b/test/run.go @@ -902,6 +902,7 @@ func (t *test) run() { if *linkshared { cmd = append(cmd, "-linkshared") } + cmd = append(cmd, flags...) cmd = append(cmd, ".") out, err := runcmd(cmd...) if err != nil {